summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/00-INDEX4
-rw-r--r--Documentation/ABI/testing/devlink-resource-mlxsw33
-rw-r--r--Documentation/ABI/testing/evm54
-rw-r--r--Documentation/ABI/testing/ima_policy3
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm3216
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd25
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-netdev45
-rw-r--r--Documentation/ABI/testing/sysfs-class-net24
-rw-r--r--Documentation/ABI/testing/sysfs-devices-coredump10
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch26
-rw-r--r--Documentation/IRQ-domain.txt36
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html49
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html7
-rw-r--r--Documentation/RCU/rcu_dereference.txt6
-rw-r--r--Documentation/RCU/stallwarn.txt10
-rw-r--r--Documentation/RCU/whatisRCU.txt3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt79
-rw-r--r--Documentation/admin-guide/mono.rst6
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt4
-rw-r--r--Documentation/arm64/elf_hwcaps.txt4
-rw-r--r--Documentation/arm64/silicon-errata.txt2
-rw-r--r--Documentation/bpf/bpf_devel_QA.txt519
-rw-r--r--Documentation/cgroup-v1/cgroups.txt7
-rw-r--r--Documentation/cgroup-v1/memory.txt4
-rw-r--r--Documentation/cgroup-v2.txt77
-rw-r--r--Documentation/circular-buffers.txt3
-rw-r--r--Documentation/conf.py1
-rw-r--r--Documentation/core-api/errseq.rst (renamed from Documentation/errseq.rst)20
-rw-r--r--Documentation/core-api/index.rst3
-rw-r--r--Documentation/core-api/kernel-api.rst15
-rw-r--r--Documentation/core-api/printk-formats.rst (renamed from Documentation/printk-formats.txt)227
-rw-r--r--Documentation/core-api/refcount-vs-atomic.rst150
-rw-r--r--Documentation/device-mapper/cache-policies.txt4
-rw-r--r--Documentation/device-mapper/cache.txt9
-rw-r--r--Documentation/device-mapper/dm-raid.txt5
-rw-r--r--Documentation/device-mapper/snapshot.txt4
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt14
-rw-r--r--Documentation/device-mapper/unstriped.txt124
-rw-r--r--Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/sdei.txt42
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt19
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt22
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt3
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt4
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt19
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-axp209.txt49
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt7
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt128
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30102.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/light/uvis25.txt23
-rw-r--r--Documentation/devicetree/bindings/input/hid-over-i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt30
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3692x.txt49
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp8860.txt32
-rw-r--r--Documentation/devicetree/bindings/mfd/mc13xxx.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-quadspi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-onenand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/marvell-nand.txt123
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt11
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt1
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt5
-rw-r--r--Documentation/devicetree/bindings/net/can/can-transceiver.txt24
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt6
-rw-r--r--Documentation/devicetree/bindings/net/can/m_can.txt9
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt92
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt4
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/adf7242.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt12
-rw-r--r--Documentation/devicetree/bindings/net/sff,sfp.txt10
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt48
-rw-r--r--Documentation/devicetree/bindings/net/socionext-netsec.txt53
-rw-r--r--Documentation/devicetree/bindings/net/ti-bluetooth.txt (renamed from Documentation/devicetree/bindings/net/ti,wilink-st.txt)18
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt32
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt3
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt13
-rw-r--r--Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt63
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt65
-rw-r--r--Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt23
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt43
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.txt22
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm6368.txt17
-rw-r--r--Documentation/devicetree/bindings/scsi/hisilicon-sas.txt5
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/ingenic,uart.txt8
-rw-r--r--Documentation/devicetree/bindings/serial/maxim,max310x.txt18
-rw-r--r--Documentation/devicetree/bindings/serial/mvebu-uart.txt50
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/dmic.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98373.txt40
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt266
-rw-r--r--Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt33
-rw-r--r--Documentation/devicetree/bindings/sound/nau8825.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/pcm186x.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt63
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tas5720.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/tfa9879.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas6424.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/tscs42xx.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/uniphier,evea.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-meson.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-orion.txt9
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/actions,owl-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt20
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt18
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt15
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.txt68
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt39
-rw-r--r--Documentation/doc-guide/kernel-doc.rst360
-rw-r--r--Documentation/driver-api/basics.rst21
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst38
-rw-r--r--Documentation/driver-api/firmware/built-in-fw.rst7
-rw-r--r--Documentation/driver-api/firmware/fallback-mechanisms.rst2
-rw-r--r--Documentation/driver-api/iio/hw-consumer.rst51
-rw-r--r--Documentation/driver-api/iio/index.rst1
-rw-r--r--Documentation/driver-api/pm/devices.rst54
-rw-r--r--Documentation/driver-api/scsi.rst10
-rw-r--r--Documentation/driver-api/usb/usb3-debug-port.rst52
-rw-r--r--Documentation/driver-api/usb/writing_usb_driver.rst2
-rw-r--r--Documentation/driver-model/devres.txt3
-rw-r--r--Documentation/fault-injection/fault-injection.txt70
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt2
-rw-r--r--Documentation/filesystems/ext2.txt2
-rw-r--r--Documentation/filesystems/ext4.txt7
-rw-r--r--Documentation/filesystems/nfs/Exporting27
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/filesystems/vfat.txt2
-rw-r--r--Documentation/gpio/board.txt14
-rw-r--r--Documentation/gpio/consumer.txt107
-rw-r--r--Documentation/gpio/driver.txt4
-rw-r--r--Documentation/gpio/sysfs.txt11
-rw-r--r--Documentation/gpu/i915.rst5
-rw-r--r--Documentation/hwmon/lm2506620
-rw-r--r--Documentation/hwmon/max3178515
-rw-r--r--Documentation/hwmon/w83773g33
-rw-r--r--Documentation/i2c/dev-interface17
-rw-r--r--Documentation/index.rst13
-rw-r--r--Documentation/input/multi-touch-protocol.rst9
-rw-r--r--Documentation/kbuild/kconfig-language.txt44
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt322
-rw-r--r--Documentation/kernel-hacking/hacking.rst6
-rw-r--r--Documentation/livepatch/livepatch.txt114
-rw-r--r--Documentation/locking/locktorture.txt5
-rw-r--r--Documentation/maintainer/conf.py10
-rw-r--r--Documentation/maintainer/configure-git.rst34
-rw-r--r--Documentation/maintainer/index.rst14
-rw-r--r--Documentation/maintainer/pull-requests.rst178
-rw-r--r--Documentation/md/raid5-ppl.txt7
-rw-r--r--Documentation/memory-barriers.txt22
-rw-r--r--Documentation/mtd/spi-nor.txt3
-rw-r--r--Documentation/networking/00-INDEX4
-rw-r--r--Documentation/networking/batman-adv.rst2
-rw-r--r--Documentation/networking/can.rst1437
-rw-r--r--Documentation/networking/can.txt1308
-rw-r--r--Documentation/networking/dsa/dsa.txt5
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/ieee802154.txt40
-rw-r--r--Documentation/networking/index.rst3
-rw-r--r--Documentation/networking/ip-sysctl.txt1
-rw-r--r--Documentation/networking/kapi.rst24
-rw-r--r--Documentation/networking/msg_zerocopy.rst4
-rw-r--r--Documentation/networking/netdev-features.txt9
-rw-r--r--Documentation/networking/pktgen.txt19
-rw-r--r--Documentation/networking/xfrm_device.txt135
-rw-r--r--Documentation/networking/xfrm_proc.txt20
-rw-r--r--Documentation/perf/arm_dsu_pmu.txt28
-rw-r--r--Documentation/power/pci.txt11
-rw-r--r--Documentation/power/regulator/machine.txt36
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst4
-rw-r--r--Documentation/process/license-rules.rst370
-rw-r--r--Documentation/process/submit-checklist.rst4
-rw-r--r--Documentation/security/credentials.rst7
-rw-r--r--Documentation/security/self-protection.rst15
-rw-r--r--Documentation/sparc/oradax/dax-hv-api.txt1433
-rw-r--r--Documentation/sparc/oradax/oracle-dax.txt429
-rw-r--r--Documentation/sphinx/kfigure.py6
-rw-r--r--Documentation/sysctl/kernel.txt17
-rw-r--r--Documentation/sysctl/net.txt4
-rw-r--r--Documentation/sysctl/vm.txt25
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt115
-rw-r--r--Documentation/trace/ftrace-uses.rst60
-rw-r--r--Documentation/usb/chipidea.txt12
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/usb/usbip_protocol.txt1
-rw-r--r--Documentation/virtual/kvm/api.txt46
-rw-r--r--Documentation/vm/hugetlbpage.txt27
-rw-r--r--Documentation/vm/hwpoison.txt2
-rw-r--r--Documentation/w1/masters/w1-gpio17
-rw-r--r--Documentation/w1/w1.generic2
-rw-r--r--Documentation/x86/intel_rdt_ui.txt13
-rw-r--r--Documentation/x86/microcode.txt5
-rw-r--r--Documentation/x86/pti.txt186
-rw-r--r--Documentation/x86/x86_64/mm.txt18
-rw-r--r--Documentation/xtensa/mmu.txt78
-rw-r--r--LICENSES/exceptions/Linux-syscall-note25
-rw-r--r--LICENSES/other/GPL-1.0260
-rw-r--r--LICENSES/other/MPL-1.1478
-rw-r--r--LICENSES/preferred/BSD-2-Clause32
-rw-r--r--LICENSES/preferred/BSD-3-Clause36
-rw-r--r--LICENSES/preferred/BSD-3-Clause-Clear41
-rw-r--r--LICENSES/preferred/GPL-2.0353
-rw-r--r--LICENSES/preferred/LGPL-2.0487
-rw-r--r--LICENSES/preferred/LGPL-2.1503
-rw-r--r--LICENSES/preferred/MIT30
-rw-r--r--MAINTAINERS251
-rw-r--r--Makefile45
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h2
-rw-r--r--arch/alpha/include/asm/thread_info.h3
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild1
-rw-r--r--arch/alpha/include/uapi/asm/poll.h2
-rw-r--r--arch/alpha/kernel/osf_sys.c72
-rw-r--r--arch/alpha/kernel/sys_sio.c35
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/boot/dts/axc003.dtsi8
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi8
-rw-r--r--arch/arc/boot/dts/hsdk.dts8
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig6
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/dma-mapping.h7
-rw-r--r--arch/arc/include/asm/hugepage.h3
-rw-r--r--arch/arc/include/asm/thread_info.h3
-rw-r--r--arch/arc/include/asm/uaccess.h5
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/traps.c28
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/mm/dma.c14
-rw-r--r--arch/arc/plat-axs10x/axs10x.c18
-rw-r--r--arch/arc/plat-hsdk/platform.c42
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-tse850-3.dts1
-rw-r--r--arch/arm/boot/dts/bcm2836.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm2837.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi1
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts4
-rw-r--r--arch/arm/boot/dts/da850-lego-ev3.dts4
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts4
-rw-r--r--arch/arm/boot/dts/imx25.dtsi4
-rw-r--r--arch/arm/boot/dts/imx28.dtsi4
-rw-r--r--arch/arm/boot/dts/imx35.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts52
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts52
-rw-r--r--arch/arm/boot/dts/imx6q-b850v3.dts75
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi62
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a7.dts10
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts18
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts18
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi36
-rw-r--r--arch/arm/boot/dts/omap2420-n8x0-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi30
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts1
-rw-r--r--arch/arm/boot/dts/rk3066a-marsboard.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/boot/dts/tango4-common.dtsi1
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig1
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig1
-rw-r--r--arch/arm/configs/cns3420vb_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig1
-rw-r--r--arch/arm/configs/magician_defconfig1
-rw-r--r--arch/arm/configs/mini2440_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/mv78xx0_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig3
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig2
-rw-r--r--arch/arm/configs/vf610m4_defconfig1
-rw-r--r--arch/arm/configs/vt8500_v6_v7_defconfig1
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c10
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm/include/asm/dma-direct.h36
-rw-r--r--arch/arm/include/asm/dma-mapping.h35
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/include/asm/pgtable-3level.h3
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/uapi/asm/siginfo.h13
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-davinci/da830.c4
-rw-r--r--arch/arm/mach-davinci/da850.c4
-rw-r--r--arch/arm/mach-davinci/dm365.c29
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c13
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c409
-rw-r--r--arch/arm/mach-pxa/raumfeld.c16
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c4
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c13
-rw-r--r--arch/arm/net/bpf_jit_32.c235
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/Kconfig94
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi13
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi11
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi4
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig18
-rw-r--r--arch/arm64/crypto/Makefile11
-rw-r--r--arch/arm64/crypto/aes-ce-core.S87
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c (renamed from arch/arm64/crypto/aes-ce-cipher.c)115
-rw-r--r--arch/arm64/crypto/aes-cipher-core.S19
-rw-r--r--arch/arm64/crypto/aes-glue.c1
-rw-r--r--arch/arm64/crypto/aes-neon.S8
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S7
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S17
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S20
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S210
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c161
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S204
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c119
-rw-r--r--arch/arm64/crypto/sha512-glue.c1
-rw-r--r--arch/arm64/crypto/sm3-ce-core.S141
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c92
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/arm_dsu_pmu.h129
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h42
-rw-r--r--arch/arm64/include/asm/assembler.h75
-rw-r--r--arch/arm64/include/asm/compat.h64
-rw-r--r--arch/arm64/include/asm/cpucaps.h6
-rw-r--r--arch/arm64/include/asm/cputype.h9
-rw-r--r--arch/arm64/include/asm/dma-mapping.h35
-rw-r--r--arch/arm64/include/asm/efi.h12
-rw-r--r--arch/arm64/include/asm/esr.h20
-rw-r--r--arch/arm64/include/asm/exception.h14
-rw-r--r--arch/arm64/include/asm/fixmap.h5
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h59
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h19
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h59
-rw-r--r--arch/arm64/include/asm/mmu.h49
-rw-r--r--arch/arm64/include/asm/mmu_context.h27
-rw-r--r--arch/arm64/include/asm/percpu.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h32
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h21
-rw-r--r--arch/arm64/include/asm/pgtable.h64
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/include/asm/processor.h1
-rw-r--r--arch/arm64/include/asm/sdei.h57
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h92
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/tlbflush.h16
-rw-r--r--arch/arm64/include/asm/traps.h54
-rw-r--r--arch/arm64/include/asm/uaccess.h40
-rw-r--r--arch/arm64/include/asm/vmap_stack.h28
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h21
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/alternative.c9
-rw-r--r--arch/arm64/kernel/asm-offsets.c12
-rw-r--r--arch/arm64/kernel/bpi.S87
-rw-r--r--arch/arm64/kernel/cpu_errata.c192
-rw-r--r--arch/arm64/kernel/cpufeature.c146
-rw-r--r--arch/arm64/kernel/cpuidle.c8
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S396
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/head.S245
-rw-r--r--arch/arm64/kernel/hibernate-asm.S12
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/irq.c13
-rw-r--r--arch/arm64/kernel/process.c12
-rw-r--r--arch/arm64/kernel/ptrace.c42
-rw-r--r--arch/arm64/kernel/sdei.c235
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/signal32.c85
-rw-r--r--arch/arm64/kernel/smp.c11
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/topology.c16
-rw-r--r--arch/arm64/kernel/traps.c51
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S27
-rw-r--r--arch/arm64/kvm/handle_exit.c36
-rw-r--r--arch/arm64/kvm/hyp-init.S30
-rw-r--r--arch/arm64/kvm/hyp/entry.S35
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S18
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c2
-rw-r--r--arch/arm64/kvm/hyp/switch.c60
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c22
-rw-r--r--arch/arm64/kvm/inject_fault.c13
-rw-r--r--arch/arm64/kvm/sys_regs.c11
-rw-r--r--arch/arm64/lib/clear_user.S4
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_in_user.S4
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/lib/tishift.S8
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/context.c67
-rw-r--r--arch/arm64/mm/dma-mapping.c54
-rw-r--r--arch/arm64/mm/fault.c131
-rw-r--r--arch/arm64/mm/init.c62
-rw-r--r--arch/arm64/mm/mmu.c47
-rw-r--r--arch/arm64/mm/pgd.c8
-rw-r--r--arch/arm64/mm/proc.S66
-rw-r--r--arch/arm64/net/bpf_jit_comp.c106
-rw-r--r--arch/arm64/xen/hypercall.S4
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/uapi/asm/poll.h21
-rw-r--r--arch/blackfin/include/uapi/asm/siginfo.h34
-rw-r--r--arch/c6x/include/asm/thread_info.h3
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c6
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c8
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c23
-rw-r--r--arch/cris/arch-v32/drivers/pci/Makefile2
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c80
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c8
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/dma-mapping.h20
-rw-r--r--arch/cris/include/asm/processor.h9
-rw-r--r--arch/cris/include/asm/thread_info.h9
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/frv/include/asm/thread_info.h3
-rw-r--r--arch/frv/include/uapi/asm/Kbuild1
-rw-r--r--arch/frv/include/uapi/asm/poll.h21
-rw-r--r--arch/frv/include/uapi/asm/siginfo.h13
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/dma-mapping.h12
-rw-r--r--arch/h8300/include/asm/thread_info.h3
-rw-r--r--arch/h8300/kernel/Makefile2
-rw-r--r--arch/h8300/kernel/dma.c69
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h7
-rw-r--r--arch/hexagon/include/asm/io.h2
-rw-r--r--arch/hexagon/include/asm/thread_info.h3
-rw-r--r--arch/hexagon/kernel/dma.c1
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/Kconfig10
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h2
-rw-r--r--arch/ia64/include/asm/atomic.h37
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/ia64/include/asm/dma.h2
-rw-r--r--arch/ia64/include/asm/swiotlb.h18
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/poll.h2
-rw-r--r--arch/ia64/include/uapi/asm/siginfo.h96
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/dma-mapping.c9
-rw-r--r--arch/ia64/kernel/init_task.c44
-rw-r--r--arch/ia64/kernel/pci-dma.c19
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c68
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/signal.c52
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c4
-rw-r--r--arch/ia64/mm/discontig.c8
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/dma-mapping.h24
-rw-r--r--arch/m32r/include/asm/io.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h3
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild1
-rw-r--r--arch/m32r/include/uapi/asm/poll.h2
-rw-r--r--arch/m32r/kernel/traps.c9
-rw-r--r--arch/m68k/configs/amiga_defconfig5
-rw-r--r--arch/m68k/configs/apollo_defconfig5
-rw-r--r--arch/m68k/configs/atari_defconfig5
-rw-r--r--arch/m68k/configs/bvme6000_defconfig5
-rw-r--r--arch/m68k/configs/hp300_defconfig5
-rw-r--r--arch/m68k/configs/mac_defconfig5
-rw-r--r--arch/m68k/configs/multi_defconfig5
-rw-r--r--arch/m68k/configs/mvme147_defconfig5
-rw-r--r--arch/m68k/configs/mvme16x_defconfig5
-rw-r--r--arch/m68k/configs/q40_defconfig5
-rw-r--r--arch/m68k/configs/sun3_defconfig5
-rw-r--r--arch/m68k/configs/sun3x_defconfig5
-rw-r--r--arch/m68k/include/asm/macintosh.h9
-rw-r--r--arch/m68k/include/asm/thread_info.h4
-rw-r--r--arch/m68k/include/uapi/asm/poll.h19
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/m68k/mac/config.c110
-rw-r--r--arch/m68k/mac/oss.c67
-rw-r--r--arch/m68k/mm/fault.c3
-rw-r--r--arch/metag/include/asm/thread_info.h3
-rw-r--r--arch/metag/include/uapi/asm/siginfo.h7
-rw-r--r--arch/metag/kernel/traps.c2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h4
-rw-r--r--arch/microblaze/include/asm/thread_info.h3
-rw-r--r--arch/microblaze/kernel/dma.c78
-rw-r--r--arch/mips/Kconfig14
-rw-r--r--arch/mips/Kconfig.debug14
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/ath25/devices.c2
-rw-r--r--arch/mips/bcm63xx/dev-enet.c8
-rw-r--r--arch/mips/cavium-octeon/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c29
-rw-r--r--arch/mips/configs/ar7_defconfig1
-rw-r--r--arch/mips/configs/ath25_defconfig1
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/pic32mzda_defconfig1
-rw-r--r--arch/mips/configs/qi_lb60_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig9
-rw-r--r--arch/mips/configs/rt305x_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/mips/include/asm/asm-prototypes.h2
-rw-r--r--arch/mips/include/asm/compat.h73
-rw-r--r--arch/mips/include/asm/dma-direct.h1
-rw-r--r--arch/mips/include/asm/dma-mapping.h10
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h4
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h8
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h12
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h8
-rw-r--r--arch/mips/include/asm/netlogic/common.h3
-rw-r--r--arch/mips/include/asm/pgtable.h3
-rw-r--r--arch/mips/include/asm/thread_info.h3
-rw-r--r--arch/mips/include/uapi/asm/poll.h19
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h86
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/mips-cm.c1
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/ptrace.c147
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/signal32.c67
-rw-r--r--arch/mips/kernel/traps.c29
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/loongson64/Kconfig1
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c24
-rw-r--r--arch/mips/mm/dma-default.c3
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mips/net/ebpf_jit.c33
-rw-r--r--arch/mips/netlogic/Kconfig5
-rw-r--r--arch/mips/netlogic/common/Makefile1
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c97
-rw-r--r--arch/mips/ralink/timer.c4
-rw-r--r--arch/mips/rb532/Makefile4
-rw-r--r--arch/mips/rb532/devices.c4
-rw-r--r--arch/mn10300/configs/asb2364_defconfig1
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/uapi/asm/Kbuild1
-rw-r--r--arch/mn10300/include/uapi/asm/poll.h2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c7
-rw-r--r--arch/mn10300/mm/dma-alloc.c3
-rw-r--r--arch/mn10300/mm/misalignment.c2
-rw-r--r--arch/nios2/include/asm/thread_info.h3
-rw-r--r--arch/nios2/mm/dma-mapping.c3
-rw-r--r--arch/openrisc/include/asm/processor.h2
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/traps.c10
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/include/asm/compat.h64
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h3
-rw-r--r--arch/parisc/include/uapi/asm/siginfo.h7
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/entry.S13
-rw-r--r--arch/parisc/kernel/pacache.S9
-rw-r--r--arch/parisc/kernel/pci-dma.c7
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/process.c39
-rw-r--r--arch/parisc/kernel/signal32.c106
-rw-r--r--arch/parisc/kernel/signal32.h3
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/mm/init.c10
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi2
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig1
-rw-r--r--arch/powerpc/configs/c2k_defconfig12
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/g5_defconfig12
-rw-r--r--arch/powerpc/configs/maple_defconfig12
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig12
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h13
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h6
-rw-r--r--arch/powerpc/include/asm/compat.h65
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/dma-direct.h29
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h36
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h57
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h13
-rw-r--r--arch/powerpc/include/asm/hvcall.h18
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h14
-rw-r--r--arch/powerpc/include/asm/setup.h13
-rw-r--r--arch/powerpc/include/asm/swiotlb.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h3
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h25
-rw-r--r--arch/powerpc/include/uapi/asm/siginfo.h16
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c12
-rw-r--r--arch/powerpc/kernel/dma.c73
-rw-r--r--arch/powerpc/kernel/entry_64.S44
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S137
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/process.c13
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c13
-rw-r--r--arch/powerpc/kernel/setup_64.c139
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c66
-rw-r--r--arch/powerpc/kernel/traps.c22
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c90
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xive.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c131
-rw-r--r--arch/powerpc/lib/feature-fixups.c41
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c7
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c22
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/cell/iommu.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c4
-rw-r--r--arch/powerpc/platforms/powernv/setup.c49
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c21
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c35
-rw-r--r--arch/powerpc/platforms/pseries/vio.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/powerpc/xmon/xmon.c26
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/configs/defconfig75
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/csr.h8
-rw-r--r--arch/riscv/include/asm/dma-mapping.h38
-rw-r--r--arch/riscv/include/asm/io.h4
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/ptrace.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h2
-rw-r--r--arch/riscv/include/asm/tlbflush.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h12
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/asm/vdso-syscalls.h28
-rw-r--r--arch/riscv/include/uapi/asm/syscalls.h26
-rw-r--r--arch/riscv/kernel/entry.S8
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S1
-rw-r--r--arch/riscv/mm/fault.c3
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Makefile14
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S12
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/crc32-vx.c3
-rw-r--r--arch/s390/include/asm/Kbuild6
-rw-r--r--arch/s390/include/asm/compat.h73
-rw-r--r--arch/s390/include/asm/diag.h44
-rw-r--r--arch/s390/include/asm/dis.h2
-rw-r--r--arch/s390/include/asm/dma-mapping.h26
-rw-r--r--arch/s390/include/asm/dwarf.h37
-rw-r--r--arch/s390/include/asm/facility.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h3
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/uapi/asm/Kbuild3
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/include/uapi/asm/unistd.h401
-rw-r--r--arch/s390/kernel/compat_linux.c8
-rw-r--r--arch/s390/kernel/compat_signal.c100
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/head.S10
-rw-r--r--arch/s390/kernel/ipl.c12
-rw-r--r--arch/s390/kernel/kprobes.c1
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/syscalls.S392
-rw-r--r--arch/s390/kernel/syscalls/Makefile52
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl390
-rwxr-xr-xarch/s390/kernel/syscalls/syscalltbl232
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S17
-rw-r--r--arch/s390/kernel/vdso32/getcpu.S5
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S21
-rw-r--r--arch/s390/kernel/vdso64/getcpu.S5
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/kvm/kvm-s390.c39
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/vsie.c10
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/s390/pci/pci_dma.c21
-rw-r--r--arch/s390/pci/pci_insn.c3
-rw-r--r--arch/s390/tools/.gitignore1
-rw-r--r--arch/s390/tools/Makefile23
-rw-r--r--arch/s390/tools/gen_facilities.c4
-rw-r--r--arch/s390/tools/gen_opcode_table.c4
-rw-r--r--arch/score/include/asm/thread_info.h3
-rw-r--r--arch/score/include/uapi/asm/Kbuild1
-rw-r--r--arch/score/include/uapi/asm/poll.h7
-rw-r--r--arch/sh/boards/board-espt.c1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c24
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c1
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c1
-rw-r--r--arch/sh/configs/polaris_defconfig1
-rw-r--r--arch/sh/drivers/push-switch.c2
-rw-r--r--arch/sh/include/asm/thread_info.h3
-rw-r--r--arch/sh/include/mach-se/mach/se.h1
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c1
-rw-r--r--arch/sh/kernel/traps_32.c3
-rw-r--r--arch/sparc/crypto/Makefile2
-rw-r--r--arch/sparc/crypto/crc32c_glue.c1
-rw-r--r--arch/sparc/include/asm/asm-prototypes.h2
-rw-r--r--arch/sparc/include/asm/compat.h59
-rw-r--r--arch/sparc/include/asm/hypervisor.h138
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h3
-rw-r--r--arch/sparc/include/asm/thread_info_64.h3
-rw-r--r--arch/sparc/include/uapi/asm/oradax.h91
-rw-r--r--arch/sparc/include/uapi/asm/poll.h28
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S57
-rw-r--r--arch/sparc/kernel/signal32.c69
-rw-r--r--arch/sparc/kernel/signal_64.c3
-rw-r--r--arch/sparc/mm/tlb.c23
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c66
-rw-r--r--arch/sparc/vdso/vma.c2
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/tile/include/asm/compat.h62
-rw-r--r--arch/tile/include/asm/dma-mapping.h20
-rw-r--r--arch/tile/include/asm/thread_info.h3
-rw-r--r--arch/tile/include/uapi/asm/siginfo.h8
-rw-r--r--arch/tile/kernel/compat_signal.c73
-rw-r--r--arch/tile/kernel/pci-dma.c38
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/single_step.c24
-rw-r--r--arch/tile/kernel/sysfs.c12
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/kernel/unaligned.c46
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/um/drivers/hostaudio_kern.c6
-rw-r--r--arch/um/include/asm/processor-generic.h5
-rw-r--r--arch/um/include/asm/thread_info.h9
-rw-r--r--arch/um/include/asm/vmlinux.lds.h2
-rw-r--r--arch/um/kernel/dyn.lds.S3
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h29
-rw-r--r--arch/unicore32/include/asm/thread_info.h3
-rw-r--r--arch/unicore32/kernel/traps.c1
-rw-r--r--arch/unicore32/mm/Kconfig1
-rw-r--r--arch/unicore32/mm/Makefile2
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c48
-rw-r--r--arch/x86/Kconfig33
-rw-r--r--arch/x86/Kconfig.debug8
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/eboot.c1
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S204
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c70
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S3
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S3
-rw-r--r--arch/x86/crypto/chacha20_glue.c1
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c1
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c1
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S3
-rw-r--r--arch/x86/crypto/poly1305_glue.c2
-rw-r--r--arch/x86/crypto/salsa20-i586-asm_32.S184
-rw-r--r--arch/x86/crypto/salsa20-x86_64-asm_64.S114
-rw-r--r--arch/x86/crypto/salsa20_glue.c105
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S112
-rw-r--r--arch/x86/entry/calling.h36
-rw-r--r--arch/x86/entry/common.c6
-rw-r--r--arch/x86/entry/entry_32.S17
-rw-r--r--arch/x86/entry/entry_64.S26
-rw-r--r--arch/x86/entry/entry_64_compat.S13
-rw-r--r--arch/x86/events/amd/power.c2
-rw-r--r--arch/x86/events/intel/bts.c18
-rw-r--r--arch/x86/events/intel/ds.c49
-rw-r--r--arch/x86/events/intel/rapl.c4
-rw-r--r--arch/x86/events/msr.c70
-rw-r--r--arch/x86/hyperv/mmu.c12
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm-prototypes.h27
-rw-r--r--arch/x86/include/asm/compat.h86
-rw-r--r--arch/x86/include/asm/cpufeature.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h26
-rw-r--r--arch/x86/include/asm/disabled-features.h3
-rw-r--r--arch/x86/include/asm/dma-direct.h30
-rw-r--r--arch/x86/include/asm/dma-mapping.h29
-rw-r--r--arch/x86/include/asm/error-injection.h13
-rw-r--r--arch/x86/include/asm/fpu/signal.h6
-rw-r--r--arch/x86/include/asm/hypervisor.h1
-rw-r--r--arch/x86/include/asm/i8259.h5
-rw-r--r--arch/x86/include/asm/jailhouse_para.h26
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mem_encrypt.h4
-rw-r--r--arch/x86/include/asm/mpspec_def.h14
-rw-r--r--arch/x86/include/asm/mshyperv.h18
-rw-r--r--arch/x86/include/asm/msr-index.h15
-rw-r--r--arch/x86/include/asm/nospec-branch.h174
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level.h37
-rw-r--r--arch/x86/include/asm/pgtable.h15
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h14
-rw-r--r--arch/x86/include/asm/processor-flags.h2
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/ptrace.h5
-rw-r--r--arch/x86/include/asm/required-features.h3
-rw-r--r--arch/x86/include/asm/swiotlb.h2
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/include/asm/unwind.h17
-rw-r--r--arch/x86/include/asm/uprobes.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h14
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h749
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h5
-rw-r--r--arch/x86/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h22
-rw-r--r--arch/x86/include/uapi/asm/poll.h1
-rw-r--r--arch/x86/kernel/Makefile7
-rw-r--r--arch/x86/kernel/acpi/boot.c35
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/alternative.c21
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/aperture_64.c46
-rw-r--r--arch/x86/kernel/apic/apic.c49
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c20
-rw-r--r--arch/x86/kernel/apic/vector.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c84
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c257
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c46
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c77
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c76
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h5
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c117
-rw-r--r--arch/x86/kernel/cpu/mcheck/dev-mcelog.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c28
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c29
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c29
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/dumpstack.c31
-rw-r--r--arch/x86/kernel/ftrace_32.S6
-rw-r--r--arch/x86/kernel/ftrace_64.S34
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/idt.c12
-rw-r--r--arch/x86/kernel/irq_32.c9
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/itmt.c1
-rw-r--r--arch/x86/kernel/jailhouse.c211
-rw-r--r--arch/x86/kernel/kprobes/opt.c23
-rw-r--r--arch/x86/kernel/mpparse.c23
-rw-r--r--arch/x86/kernel/pci-dma.c23
-rw-r--r--arch/x86/kernel/pci-nommu.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c8
-rw-r--r--arch/x86/kernel/platform-quirks.c1
-rw-r--r--arch/x86/kernel/process.c28
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/signal_compat.c123
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tboot.c11
-rw-r--r--arch/x86/kernel/time.c9
-rw-r--r--arch/x86/kernel/tsc.c70
-rw-r--r--arch/x86/kernel/unwind_orc.c48
-rw-r--r--arch/x86/kernel/uprobes.c107
-rw-r--r--arch/x86/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/mmu.c19
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx.c39
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/checksum_32.S7
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/error-inject.c19
-rw-r--r--arch/x86/lib/retpoline.S104
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/extable.c34
-rw-r--r--arch/x86/mm/fault.c29
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c24
-rw-r--r--arch/x86/mm/kaslr.c32
-rw-r--r--arch/x86/mm/mem_encrypt.c358
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S80
-rw-r--r--arch/x86/mm/pti.c41
-rw-r--r--arch/x86/mm/tlb.c34
-rw-r--r--arch/x86/net/bpf_jit_comp.c106
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/fixup.c43
-rw-r--r--arch/x86/pci/intel_mid_pci.c1
-rw-r--r--arch/x86/pci/sta2x11-fixup.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c3
-rw-r--r--arch/x86/platform/efi/quirks.c13
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/platform/intel-mid/sfi.c5
-rw-r--r--arch/x86/platform/uv/tlb_uv.c3
-rw-r--r--arch/x86/tools/Makefile12
-rw-r--r--arch/x86/tools/insn_decoder_test.c (renamed from arch/x86/tools/test_get_len.c)43
-rw-r--r--arch/x86/tools/objdump_reformat.awk (renamed from arch/x86/tools/distill.awk)4
-rw-r--r--arch/x86/xen/mmu_hvm.c2
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/Kconfig7
-rw-r--r--arch/xtensa/Makefile7
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S1
-rw-r--r--arch/xtensa/boot/lib/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/include/asm/asmmacro.h40
-rw-r--r--arch/xtensa/include/asm/current.h4
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h10
-rw-r--r--arch/xtensa/include/asm/fixmap.h4
-rw-r--r--arch/xtensa/include/asm/futex.h23
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/kasan.h37
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h7
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu_context.h1
-rw-r--r--arch/xtensa/include/asm/nommu_context.h4
-rw-r--r--arch/xtensa/include/asm/page.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/include/asm/ptrace.h15
-rw-r--r--arch/xtensa/include/asm/regs.h1
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/string.h23
-rw-r--r--arch/xtensa/include/asm/thread_info.h16
-rw-r--r--arch/xtensa/include/asm/traps.h35
-rw-r--r--arch/xtensa/include/asm/uaccess.h9
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h21
-rw-r--r--arch/xtensa/kernel/Makefile3
-rw-r--r--arch/xtensa/kernel/align.S7
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/coprocessor.S3
-rw-r--r--arch/xtensa/kernel/entry.S103
-rw-r--r--arch/xtensa/kernel/head.S10
-rw-r--r--arch/xtensa/kernel/module.c19
-rw-r--r--arch/xtensa/kernel/pci.c30
-rw-r--r--arch/xtensa/kernel/process.c6
-rw-r--r--arch/xtensa/kernel/ptrace.c8
-rw-r--r--arch/xtensa/kernel/setup.c49
-rw-r--r--arch/xtensa/kernel/signal.c8
-rw-r--r--arch/xtensa/kernel/traps.c64
-rw-r--r--arch/xtensa/kernel/vectors.S17
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S90
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c5
-rw-r--r--arch/xtensa/lib/checksum.S74
-rw-r--r--arch/xtensa/lib/memcopy.S81
-rw-r--r--arch/xtensa/lib/memset.S45
-rw-r--r--arch/xtensa/lib/pci-auto.c45
-rw-r--r--arch/xtensa/lib/strncpy_user.S60
-rw-r--r--arch/xtensa/lib/strnlen_user.S28
-rw-r--r--arch/xtensa/lib/usercopy.S138
-rw-r--r--arch/xtensa/mm/Makefile5
-rw-r--r--arch/xtensa/mm/cache.c3
-rw-r--r--arch/xtensa/mm/fault.c22
-rw-r--r--arch/xtensa/mm/init.c36
-rw-r--r--arch/xtensa/mm/kasan_init.c95
-rw-r--r--arch/xtensa/mm/mmu.c31
-rw-r--r--arch/xtensa/mm/tlb.c6
-rw-r--r--arch/xtensa/platforms/iss/console.c4
-rw-r--r--arch/xtensa/platforms/iss/network.c14
-rw-r--r--block/bfq-cgroup.c7
-rw-r--r--block/bfq-iosched.c529
-rw-r--r--block/bfq-iosched.h19
-rw-r--r--block/bfq-wf2q.c7
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/bio.c30
-rw-r--r--block/blk-core.c96
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c13
-rw-r--r--block/blk-mq-debugfs.c22
-rw-r--r--block/blk-mq-sched.c3
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk-mq-sysfs.c9
-rw-r--r--block/blk-mq-tag.c13
-rw-r--r--block/blk-mq.c669
-rw-r--r--block/blk-mq.h52
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c146
-rw-r--r--block/blk-timeout.c26
-rw-r--r--block/blk-zoned.c42
-rw-r--r--block/blk.h48
-rw-r--r--block/bounce.c33
-rw-r--r--block/bsg-lib.c3
-rw-r--r--block/bsg.c44
-rw-r--r--block/deadline-iosched.c114
-rw-r--r--block/elevator.c12
-rw-r--r--block/genhd.c23
-rw-r--r--block/mq-deadline.c141
-rw-r--r--block/partitions/msdos.c4
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/Kconfig5
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablk_helper.c5
-rw-r--r--crypto/aead.c19
-rw-r--r--crypto/af_alg.c18
-rw-r--r--crypto/ahash.c33
-rw-r--r--crypto/algapi.c25
-rw-r--r--crypto/algif_aead.c17
-rw-r--r--crypto/algif_hash.c52
-rw-r--r--crypto/algif_skcipher.c62
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/authenc.c4
-rw-r--r--crypto/authencesn.c4
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--crypto/camellia_generic.c3
-rw-r--r--crypto/cast5_generic.c3
-rw-r--r--crypto/cast6_generic.c3
-rw-r--r--crypto/chacha20_generic.c33
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/crc32_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/cryptd.c17
-rw-r--r--crypto/crypto_user.c4
-rw-r--r--crypto/ecc.c2
-rw-r--r--crypto/echainiv.c5
-rw-r--r--crypto/gcm.c4
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/mcryptd.c11
-rw-r--r--crypto/pcrypt.c19
-rw-r--r--crypto/poly1305_generic.c27
-rw-r--r--crypto/proc.c2
-rw-r--r--crypto/salsa20_generic.c240
-rw-r--r--crypto/scompress.c51
-rw-r--r--crypto/seqiv.c5
-rw-r--r--crypto/sha3_generic.c332
-rw-r--r--crypto/shash.c25
-rw-r--r--crypto/simd.c4
-rw-r--r--crypto/skcipher.c30
-rw-r--r--crypto/tcrypt.c1083
-rw-r--r--crypto/testmgr.c41
-rw-r--r--crypto/testmgr.h550
-rw-r--r--crypto/twofish_common.c5
-rw-r--r--crypto/twofish_generic.c5
-rw-r--r--crypto/xcbc.c3
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig32
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/ghes.c81
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/device_pm.c27
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c94
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_brcm.c120
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/pata_at32.c400
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/eni.c18
-rw-r--r--drivers/base/Kconfig31
-rw-r--r--drivers/base/arch_topology.c8
-rw-r--r--drivers/base/attribute_container.c3
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/cacheinfo.c13
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/component.c83
-rw-r--r--drivers/base/container.c5
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/cpu.c49
-rw-r--r--drivers/base/dd.c43
-rw-r--r--drivers/base/devcoredump.c17
-rw-r--r--drivers/base/devres.c3
-rw-r--r--drivers/base/dma-contiguous.c6
-rw-r--r--drivers/base/dma-mapping.c3
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware.c3
-rw-r--r--drivers/base/firmware_class.c859
-rw-r--r--drivers/base/hypervisor.c3
-rw-r--r--drivers/base/init.c3
-rw-r--r--drivers/base/isa.c1
-rw-r--r--drivers/base/map.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/base/pinctrl.c3
-rw-r--r--drivers/base/platform-msi.c13
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/domain.c69
-rw-r--r--drivers/base/power/main.c415
-rw-r--r--drivers/base/power/power.h11
-rw-r--r--drivers/base/power/runtime.c84
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/base/power/wakeirq.c8
-rw-r--r--drivers/base/power/wakeup.c26
-rw-r--r--drivers/base/property.c184
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-flat.c15
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-sdw.c88
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/base/syscore.c3
-rw-r--r--drivers/base/test/test_async_driver_probe.c10
-rw-r--r--drivers/base/topology.c17
-rw-r--r--drivers/base/transport_class.c3
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/bcma/driver_pcie2.c3
-rw-r--r--drivers/block/DAC960.c160
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoe.h3
-rw-r--r--drivers/block/aoe/aoecmd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/null_blk.c290
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/Kconfig14
-rw-r--r--drivers/bluetooth/bluecard_cs.c8
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btbcm.h2
-rw-r--r--drivers/bluetooth/btintel.c157
-rw-r--r--drivers/bluetooth/btintel.h33
-rw-r--r--drivers/bluetooth/btqcomsmd.c3
-rw-r--r--drivers/bluetooth/btsdio.c9
-rw-r--r--drivers/bluetooth/btusb.c162
-rw-r--r--drivers/bluetooth/hci_bcm.c243
-rw-r--r--drivers/bluetooth/hci_intel.c186
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c107
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bluetooth/hci_serdev.c1
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig45
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c169
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c154
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/hw_random/exynos-trng.c235
-rw-r--r--drivers/char/hw_random/imx-rngc.c13
-rw-r--r--drivers/char/hw_random/mtk-rng.c1
-rw-r--r--drivers/char/hw_random/tpm-rng.c50
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c19
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c4
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/random.c28
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig11
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/tpm-chip.c67
-rw-r--r--drivers/char/tpm/tpm-interface.c231
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm1_eventlog.c13
-rw-r--r--drivers/char/tpm/tpm2-cmd.c12
-rw-r--r--drivers/char/tpm/tpm2_eventlog.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog_acpi.c (renamed from drivers/char/tpm/tpm_acpi.c)4
-rw-r--r--drivers/char/tpm/tpm_eventlog_efi.c66
-rw-r--r--drivers/char/tpm/tpm_eventlog_of.c (renamed from drivers/char/tpm/tpm_of.c)6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c27
-rw-r--r--drivers/char/tpm/tpm_tis.c108
-rw-r--r--drivers/char/tpm/tpm_tis_core.c193
-rw-r--r--drivers/char/tpm/tpm_tis_core.h16
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c61
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/owl-timer.c5
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/clocksource/timer-of.c84
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-sprd.c159
-rw-r--r--drivers/clocksource/timer-stm32.c358
-rw-r--r--drivers/cpufreq/Kconfig.arm88
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/arm_big_little.c23
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c241
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c27
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c171
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c23
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c16
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c143
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c193
-rw-r--r--drivers/cpufreq/ti-cpufreq.c51
-rw-r--r--drivers/cpuidle/governor.c5
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c120
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c68
-rw-r--r--drivers/crypto/caam/caamhash.c73
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc.h29
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c540
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h15
-rw-r--r--drivers/crypto/chelsio/chcr_core.c14
-rw-r--r--drivers/crypto/chelsio/chcr_core.h38
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h76
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c371
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c136
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c214
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/marvell/cesa.c20
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c27
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/s5p-sss.c26
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/bcm2835-dma.c10
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/img-mdc-dma.c17
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/k3dma.c10
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c41
-rw-r--r--drivers/dma/qcom/hidma_ll.c9
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c61
-rw-r--r--drivers/dma/s3c24xx-dma.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c68
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/tegra20-apb-dma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/virt-dma.c5
-rw-r--r--drivers/dma/virt-dma.h44
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c302
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c179
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/ti_edac.c341
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c4
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_sdei.c1092
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/capsule-loader.c47
-rw-r--r--drivers/firmware/efi/cper-arm.c356
-rw-r--r--drivers/firmware/efi/cper.c122
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/tpm.c81
-rw-r--r--drivers/firmware/efi/tpm.c40
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c46
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c42
-rw-r--r--drivers/gpio/gpio-74x164.c5
-rw-r--r--drivers/gpio/gpio-adp5520.c3
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-altera.c3
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c41
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-axp209.c188
-rw-r--r--drivers/gpio/gpio-bcm-kona.c8
-rw-r--r--drivers/gpio/gpio-brcmstb.c1
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-max732x.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpio-mmio.c30
-rw-r--r--drivers/gpio/gpio-mockup.c288
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c447
-rw-r--r--drivers/gpio/gpio-stmpe.c54
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-winbond.c732
-rw-r--r--drivers/gpio/gpiolib-acpi.c57
-rw-r--r--drivers/gpio/gpiolib-of.c119
-rw-r--r--drivers/gpio/gpiolib-sysfs.c33
-rw-r--r--drivers/gpio/gpiolib.c369
-rw-r--r--drivers/gpio/gpiolib.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/drm_file.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_display.c317
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/r128/r128_state.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c9
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-asus.c41
-rw-r--r--drivers/hid/hid-core.c932
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c78
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-generic.c68
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-jabra.c58
-rw-r--r--drivers/hid/hid-multitouch.c137
-rw-r--r--drivers/hid/hid-quirks.c1276
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sony.c93
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c18
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/Makefile2
-rw-r--r--drivers/hid/usbhid/hid-core.c11
-rw-r--r--drivers/hid/usbhid/hid-quirks.c402
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/wacom_sys.c134
-rw-r--r--drivers/hid/wacom_wac.c67
-rw-r--r--drivers/hid/wacom_wac.h6
-rw-r--r--drivers/hsi/clients/cmt_speech.c8
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/ring_buffer.c23
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c22
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c54
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c12
-rw-r--r--drivers/hwmon/iio_hwmon.c3
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c288
-rw-r--r--drivers/hwmon/pmbus/ir35221.c189
-rw-r--r--drivers/hwmon/pmbus/lm25066.c67
-rw-r--r--drivers/hwmon/pmbus/max31785.c294
-rw-r--r--drivers/hwmon/pmbus/pmbus.h41
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c273
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c7
-rw-r--r--drivers/hwmon/w83773g.c329
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c13
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c12
-rw-r--r--drivers/iio/accel/da280.c31
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c3
-rw-r--r--drivers/iio/accel/mma8452.c20
-rw-r--r--drivers/iio/accel/st_accel_core.c5
-rw-r--r--drivers/iio/adc/Kconfig40
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/aspeed_adc.c25
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c456
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/axp288_adc.c20
-rw-r--r--drivers/iio/adc/hx711.c134
-rw-r--r--drivers/iio/adc/ina2xx-adc.c317
-rw-r--r--drivers/iio/adc/meson_saradc.c60
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c4
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c68
-rw-r--r--drivers/iio/adc/stm32-adc-core.c14
-rw-r--r--drivers/iio/adc/stm32-adc-core.h14
-rw-r--r--drivers/iio/adc/stm32-adc.c199
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1205
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c302
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h310
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c11
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c247
-rw-r--r--drivers/iio/chemical/ccs811.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp.h2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c5
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c2
-rw-r--r--drivers/iio/dac/mcp4725.c2
-rw-r--r--drivers/iio/dac/stm32-dac-core.c14
-rw-r--r--drivers/iio/dac/stm32-dac-core.h15
-rw-r--r--drivers/iio/dac/stm32-dac.c15
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c2
-rw-r--r--drivers/iio/gyro/adis16136.c15
-rw-r--r--drivers/iio/gyro/bmg160_core.c1
-rw-r--r--drivers/iio/health/max30102.c308
-rw-r--r--drivers/iio/humidity/hts221.h3
-rw-r--r--drivers/iio/humidity/hts221_core.c18
-rw-r--r--drivers/iio/humidity/hts221_i2c.c18
-rw-r--r--drivers/iio/humidity/hts221_spi.c18
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/imu/adis16480.c28
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h39
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c107
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c115
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c55
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c70
-rw-r--r--drivers/iio/industrialio-buffer.c17
-rw-r--r--drivers/iio/industrialio-core.c1
-rw-r--r--drivers/iio/industrialio-event.c4
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/iio/light/Kconfig34
-rw-r--r--drivers/iio/light/Makefile4
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c2
-rw-r--r--drivers/iio/light/st_uvis25.h37
-rw-r--r--drivers/iio/light/st_uvis25_core.c359
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c69
-rw-r--r--drivers/iio/light/st_uvis25_spi.c68
-rw-r--r--drivers/iio/light/zopt2201.c568
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c203
-rw-r--r--drivers/iio/proximity/sx9500.c1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c3
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c65
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c227
-rw-r--r--drivers/infiniband/core/cma.c252
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h54
-rw-r--r--drivers/infiniband/core/cq.c39
-rw-r--r--drivers/infiniband/core/device.c60
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c1
-rw-r--r--drivers/infiniband/core/netlink.c10
-rw-r--r--drivers/infiniband/core/nldev.c448
-rw-r--r--drivers/infiniband/core/restrack.c164
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c13
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/security.c10
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucm.c77
-rw-r--r--drivers/infiniband/core/ucma.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c127
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c19
-rw-r--r--drivers/infiniband/core/uverbs_main.c103
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/verbs.c312
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h43
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c145
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h39
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c404
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c251
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h78
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c141
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h127
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c87
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c16
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c22
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c64
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h25
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c12
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h103
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c759
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c758
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1837
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h283
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c72
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c68
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c50
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c83
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1356
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h111
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c437
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h125
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c82
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c235
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c13
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c21
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c15
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h6
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c123
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c795
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h43
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c965
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h100
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c3
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c23
-rw-r--r--drivers/input/mouse/alps.h10
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/trackpoint.c245
-rw-r--r--drivers/input/mouse/trackpoint.h34
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/serio/userio.c2
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c16
-rw-r--r--drivers/input/touchscreen/elants_i2c.c12
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/stmfts.c15
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel-svm.c1
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm2836.c46
-rw-r--r--drivers/irqchip/irq-gic-v3.c40
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c139
-rw-r--r--drivers/irqchip/irq-ompic.c4
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/divert/divert_procfs.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c22
-rw-r--r--drivers/isdn/mISDN/timerdev.c4
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-core.c3
-rw-r--r--drivers/leds/leds-as3645a.c7
-rw-r--r--drivers/leds/leds-blinkm.c4
-rw-r--r--drivers/leds/leds-lm3692x.c393
-rw-r--r--drivers/leds/leds-lp8860.c66
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c496
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c33
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/Kconfig7
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c462
-rw-r--r--drivers/lightnvm/pblk-cache.c5
-rw-r--r--drivers/lightnvm/pblk-core.c55
-rw-r--r--drivers/lightnvm/pblk-gc.c23
-rw-r--r--drivers/lightnvm/pblk-init.c104
-rw-r--r--drivers/lightnvm/pblk-map.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c111
-rw-r--r--drivers/lightnvm/pblk-read.c35
-rw-r--r--drivers/lightnvm/pblk-recovery.c43
-rw-r--r--drivers/lightnvm/pblk-rl.c54
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c23
-rw-r--r--drivers/lightnvm/pblk.h163
-rw-r--r--drivers/lightnvm/rrpc.c1625
-rw-r--r--drivers/lightnvm/rrpc.h290
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/md/Kconfig7
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.c47
-rw-r--r--drivers/md/bcache/closure.h60
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c13
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c29
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/bcache/util.c34
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/bcache/writeback.c203
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-bufio.c37
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c5
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-io.c3
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c6
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c316
-rw-r--r--drivers/md/dm-queue-length.c6
-rw-r--r--drivers/md/dm-raid.c389
-rw-r--r--drivers/md/dm-rq.c34
-rw-r--r--drivers/md/dm-service-time.c6
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c114
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c9
-rw-r--r--drivers/md/dm-unstripe.c219
-rw-r--r--drivers/md/dm-zoned-metadata.c3
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c680
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-log.h30
-rw-r--r--drivers/md/raid5-ppl.c167
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/cec/cec-api.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c8
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c7
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/media-devnode.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c8
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h4
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c4
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c4
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h2
-rw-r--r--drivers/media/radio/radio-cadet.c6
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c6
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c10
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c4
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c57
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c59
-rw-r--r--drivers/message/fusion/mptctl.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab8500-debugfs.c439
-rw-r--r--drivers/mfd/atmel-flexcom.c63
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/cros_ec.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c (renamed from drivers/platform/chrome/cros_ec_dev.c)8
-rw-r--r--drivers/mfd/cros_ec_dev.h (renamed from drivers/platform/chrome/cros_ec_dev.h)0
-rw-r--r--drivers/mfd/cros_ec_spi.c25
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c5
-rw-r--r--drivers/mfd/max77843.c1
-rw-r--r--drivers/mfd/palmas.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rave-sp.c710
-rw-r--r--drivers/mfd/stm32-lptimer.c6
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tmio_core.c20
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/Kconfig20
-rw-r--r--drivers/misc/cardreader/Makefile4
-rw-r--r--drivers/misc/cardreader/rtl8411.c (renamed from drivers/mfd/rtl8411.c)2
-rw-r--r--drivers/misc/cardreader/rts5209.c (renamed from drivers/mfd/rts5209.c)2
-rw-r--r--drivers/misc/cardreader/rts5227.c (renamed from drivers/mfd/rts5227.c)2
-rw-r--r--drivers/misc/cardreader/rts5229.c (renamed from drivers/mfd/rts5229.c)2
-rw-r--r--drivers/misc/cardreader/rts5249.c (renamed from drivers/mfd/rts5249.c)3
-rw-r--r--drivers/misc/cardreader/rts5260.c748
-rw-r--r--drivers/misc/cardreader/rts5260.h45
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c (renamed from drivers/mfd/rtsx_pcr.c)128
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h (renamed from drivers/mfd/rtsx_pcr.h)12
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c (renamed from drivers/mfd/rtsx_usb.c)2
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/file.c4
-rw-r--r--drivers/misc/cxl/vphb.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c7
-rw-r--r--drivers/misc/mic/scif/scif_epd.h2
-rw-r--r--drivers/misc/mic/scif/scif_fd.c2
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c4
-rw-r--r--drivers/misc/phantom.c4
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c179
-rw-r--r--drivers/mmc/core/block.c1385
-rw-r--r--drivers/mmc/core/block.h12
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/core.c228
-rw-r--r--drivers/mmc/core/core.h47
-rw-r--r--drivers/mmc/core/host.h6
-rw-r--r--drivers/mmc/core/mmc_test.c221
-rw-r--r--drivers/mmc/core/queue.c504
-rw-r--r--drivers/mmc/core/queue.h64
-rw-r--r--drivers/mmc/core/slot-gpio.c22
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile11
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/cqhci.c1150
-rw-r--r--drivers/mmc/host/cqhci.h240
-rw-r--r--drivers/mmc/host/davinci_mmc.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/mmci.c127
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi.h22
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c52
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c37
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c8
-rw-r--r--drivers/mmc/host/sdhci-acpi.c132
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c36
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c149
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-arasan.c331
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c218
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mmc/host/sdhci.c57
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c52
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c152
-rw-r--r--drivers/mtd/devices/docg3.c70
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c18
-rw-r--r--drivers/mtd/mtdcore.c36
-rw-r--r--drivers/mtd/mtdpart.c43
-rw-r--r--drivers/mtd/mtdswap.c5
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c38
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c111
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2309
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c15
-rw-r--r--drivers/mtd/nand/qcom_nandc.c31
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/parsers/sharpslpart.c6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c6
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c240
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c75
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c42
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c87
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/can/c_can/c_can_pci.c4
-rw-r--r--drivers/net/can/dev.c47
-rw-r--r--drivers/net/can/flexcan.c243
-rw-r--r--drivers/net/can/m_can/m_can.c183
-rw-r--r--drivers/net/can/rx-offload.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c55
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c9
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c18
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c19
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/dsa/dsa_loop.c9
-rw-r--r--drivers/net/dsa/lan9303-core.c138
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c12
-rw-r--r--drivers/net/dsa/mt7530.c288
-rw-r--r--drivers/net/dsa/mt7530.h83
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c87
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c74
-rw-r--r--drivers/net/dummy.c215
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/8390/mac8390.c33
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c113
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c290
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h62
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c413
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c472
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h65
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c506
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h64
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h544
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h1521
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c346
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c184
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c27
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c814
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h11936
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c223
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h14
-rw-r--r--drivers/net/ethernet/cadence/macb.h170
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c758
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c58
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c297
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h156
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c306
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c607
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c323
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c286
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cortina/Kconfig23
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c)2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c)405
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h)28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c)347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c418
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c181
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h17
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c43
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c148
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c54
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c192
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c255
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c143
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c92
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c228
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c83
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c589
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c372
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c34
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c687
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c253
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c35
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c339
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c613
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c558
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c300
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c453
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h157
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c988
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c248
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h207
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h71
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c113
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c135
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c94
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10603
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c1091
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h190
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c70
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h32
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c142
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c55
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c363
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h46
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c26
-rw-r--r--drivers/net/ethernet/sfc/io.h19
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h2453
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c168
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h33
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c370
-rw-r--r--drivers/net/ethernet/sfc/siena.c12
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/socionext/Kconfig34
-rw-r--r--drivers/net/ethernet/socionext/Makefile6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c1736
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c67
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c24
-rw-r--r--drivers/net/ethernet/ti/cpsw.c96
-rw-r--r--drivers/net/ethernet/ti/cpsw.h23
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c109
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c6
-rw-r--r--drivers/net/geneve.c38
-rw-r--r--drivers/net/hyperv/hyperv_net.h60
-rw-r--r--drivers/net/hyperv/netvsc.c73
-rw-r--r--drivers/net/hyperv/netvsc_drv.c111
-rw-r--r--drivers/net/hyperv/rndis_filter.c44
-rw-r--r--drivers/net/ieee802154/adf7242.c90
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c17
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c33
-rw-r--r--drivers/net/macsec.c68
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/netdevsim/Makefile11
-rw-r--r--drivers/net/netdevsim/bpf.c643
-rw-r--r--drivers/net/netdevsim/netdev.c504
-rw-r--r--drivers/net/netdevsim/netdevsim.h109
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/at803x.c44
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c6
-rw-r--r--drivers/net/phy/broadcom.c69
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c5
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/fixed_phy.c31
-rw-r--r--drivers/net/phy/icplus.c4
-rw-r--r--drivers/net/phy/intel-xway.c12
-rw-r--r--drivers/net/phy/lxt.c5
-rw-r--r--drivers/net/phy/marvell.c600
-rw-r--r--drivers/net/phy/marvell10g.c111
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio_bus.c99
-rw-r--r--drivers/net/phy/mdio_device.c34
-rw-r--r--drivers/net/phy/meson-gxl.c186
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c33
-rw-r--r--drivers/net/phy/phy-core.c258
-rw-r--r--drivers/net/phy/phy.c56
-rw-r--r--drivers/net/phy/phy_device.c108
-rw-r--r--drivers/net/phy/phylink.c453
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c105
-rw-r--r--drivers/net/phy/rockchip.c1
-rw-r--r--drivers/net/phy/sfp-bus.c227
-rw-r--r--drivers/net/phy/sfp.c119
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c1
-rw-r--r--drivers/net/phy/vitesse.c12
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tap.c46
-rw-r--r--drivers/net/tun.c462
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c235
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c32
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wimax/i2400m/sysfs.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c636
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c342
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c993
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h225
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c293
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h165
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c365
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c600
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c230
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h52
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c146
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h115
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c133
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c46
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h107
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h9
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c85
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h18
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c304
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c192
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c121
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c635
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h117
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c531
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c487
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h73
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c462
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c314
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c128
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h68
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c97
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c61
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c7
-rw-r--r--drivers/net/wireless/mediatek/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig10
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile15
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c459
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c112
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c505
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h432
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h228
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c133
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c506
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c664
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c875
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c839
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c577
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c453
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h155
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_pci.c110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c740
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h587
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.h144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c511
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h44
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c191
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c581
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c152
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c80
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h351
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c78
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c32
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c164
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c203
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c206
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c285
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c177
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c663
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h252
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c325
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c483
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c57
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h68
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c41
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c4
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nubus/Makefile2
-rw-r--r--drivers/nubus/bus.c117
-rw-r--r--drivers/nubus/nubus.c542
-rw-r--r--drivers/nubus/proc.c281
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/core.c151
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c7
-rw-r--r--drivers/nvme/host/lightnvm.c185
-rw-r--r--drivers/nvme/host/multipath.c44
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c282
-rw-r--r--drivers/nvme/host/rdma.c20
-rw-r--r--drivers/nvme/host/trace.c130
-rw-r--r--drivers/nvme/host/trace.h165
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c246
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/rdma.c83
-rw-r--r--drivers/of/base.c26
-rw-r--r--drivers/of/of_mdio.c14
-rw-r--r--drivers/of/property.c8
-rw-r--r--drivers/opp/Makefile1
-rw-r--r--drivers/opp/ti-opp-supply.c425
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/pci/pci-driver.c21
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_dsu_pmu.c843
-rw-r--r--drivers/perf/arm_pmu_platform.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c31
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c22
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c35
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c183
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c476
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.h27
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c18
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/surfacepro3_button.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/pnp/pnpbios/core.c5
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c24
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/imx-snvs-poweroff.c66
-rw-r--r--drivers/power/reset/msm-poweroff.c7
-rw-r--r--drivers/power/reset/zx-reboot.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/axp20x_ac_power.c8
-rw-r--r--drivers/power/supply/axp288_charger.c290
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c203
-rw-r--r--drivers/power/supply/bq24190_charger.c126
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/charger-manager.c2
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c25
-rw-r--r--drivers/power/supply/max17042_battery.c54
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/powercap/intel_rapl.c99
-rw-r--r--drivers/powercap/powercap_sys.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/ptp/ptp_chardev.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c383
-rw-r--r--drivers/regulator/internal.h27
-rw-r--r--drivers/regulator/of_regulator.c34
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c84
-rw-r--r--drivers/regulator/sc2731-regulator.c256
-rw-r--r--drivers/regulator/tps65218-regulator.c5
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c7
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/device.c10
-rw-r--r--drivers/s390/crypto/ap_card.c2
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/lcs.h3
-rw-r--r--drivers/s390/net/qeth_core.h42
-rw-r--r--drivers/s390/net/qeth_core_main.c105
-rw-r--r--drivers/s390/net/qeth_core_mpc.h13
-rw-r--r--drivers/s390/net/qeth_l2.h3
-rw-r--r--drivers/s390/net/qeth_l2_main.c92
-rw-r--r--drivers/s390/net/qeth_l3.h15
-rw-r--r--drivers/s390/net/qeth_l3_main.c510
-rw-r--r--drivers/s390/net/qeth_l3_sys.c114
-rw-r--r--drivers/sbus/char/Kconfig8
-rw-r--r--drivers/sbus/char/Makefile1
-rw-r--r--drivers/sbus/char/oradax.c1005
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c220
-rw-r--r--drivers/scsi/aacraid/linit.c26
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c10
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.h32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c28
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c37
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c175
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c35
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h68
-rw-r--r--drivers/scsi/qedf/qedf_io.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c61
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c139
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c75
-rw-r--r--drivers/scsi/qedi/qedi_version.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_devinfo.c39
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c16
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
-rw-r--r--drivers/spi/spi-armada-3700.c110
-rw-r--r--drivers/spi/spi-atmel.c113
-rw-r--r--drivers/spi/spi-bcm53xx.c26
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c9
-rw-r--r--drivers/spi/spi-imx.c26
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c1
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c18
-rw-r--r--drivers/spi/spi-sh-msiof.c128
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-xilinx.c1
-rw-r--r--drivers/ssb/Kconfig11
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/android/ashmem.c39
-rw-r--r--drivers/staging/android/ashmem.h1
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c16
-rw-r--r--drivers/staging/android/ion/ion.c40
-rw-r--r--drivers/staging/android/ion/ion.h16
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c11
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c11
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c11
-rw-r--r--drivers/staging/android/ion/ion_heap.c16
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c11
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c13
-rw-r--r--drivers/staging/android/uapi/ashmem.h1
-rw-r--r--drivers/staging/android/uapi/ion.h11
-rw-r--r--drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt27
-rw-r--r--drivers/staging/ccree/Kconfig2
-rw-r--r--drivers/staging/ccree/Makefile8
-rw-r--r--drivers/staging/ccree/TODO22
-rw-r--r--drivers/staging/ccree/cc_aead.c (renamed from drivers/staging/ccree/ssi_aead.c)1119
-rw-r--r--drivers/staging/ccree/cc_aead.h (renamed from drivers/staging/ccree/ssi_aead.h)56
-rw-r--r--drivers/staging/ccree/cc_buffer_mgr.c (renamed from drivers/staging/ccree/ssi_buffer_mgr.c)1085
-rw-r--r--drivers/staging/ccree/cc_buffer_mgr.h74
-rw-r--r--drivers/staging/ccree/cc_cipher.c (renamed from drivers/staging/ccree/ssi_cipher.c)715
-rw-r--r--drivers/staging/ccree/cc_cipher.h74
-rw-r--r--drivers/staging/ccree/cc_crypto_ctx.h38
-rw-r--r--drivers/staging/ccree/cc_debugfs.c101
-rw-r--r--drivers/staging/ccree/cc_debugfs.h32
-rw-r--r--drivers/staging/ccree/cc_driver.c (renamed from drivers/staging/ccree/ssi_driver.c)346
-rw-r--r--drivers/staging/ccree/cc_driver.h194
-rw-r--r--drivers/staging/ccree/cc_fips.c (renamed from drivers/staging/ccree/ssi_fips.c)45
-rw-r--r--drivers/staging/ccree/cc_fips.h37
-rw-r--r--drivers/staging/ccree/cc_hash.c (renamed from drivers/staging/ccree/ssi_hash.c)1820
-rw-r--r--drivers/staging/ccree/cc_hash.h114
-rw-r--r--drivers/staging/ccree/cc_host_regs.h142
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h40
-rw-r--r--drivers/staging/ccree/cc_ivgen.c (renamed from drivers/staging/ccree/ssi_ivgen.c)139
-rw-r--r--drivers/staging/ccree/cc_ivgen.h55
-rw-r--r--drivers/staging/ccree/cc_kernel_regs.h167
-rw-r--r--drivers/staging/ccree/cc_lli_defs.h19
-rw-r--r--drivers/staging/ccree/cc_pm.c122
-rw-r--r--drivers/staging/ccree/cc_pm.h57
-rw-r--r--drivers/staging/ccree/cc_request_mgr.c713
-rw-r--r--drivers/staging/ccree/cc_request_mgr.h51
-rw-r--r--drivers/staging/ccree/cc_sram_mgr.c (renamed from drivers/staging/ccree/ssi_sram_mgr.c)70
-rw-r--r--drivers/staging/ccree/cc_sram_mgr.h65
-rw-r--r--drivers/staging/ccree/dx_crys_kernel.h180
-rw-r--r--drivers/staging/ccree/dx_host.h155
-rw-r--r--drivers/staging/ccree/dx_reg_common.h26
-rw-r--r--drivers/staging/ccree/hash_defs.h36
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.h91
-rw-r--r--drivers/staging/ccree/ssi_cipher.h84
-rw-r--r--drivers/staging/ccree/ssi_config.h36
-rw-r--r--drivers/staging/ccree/ssi_driver.h206
-rw-r--r--drivers/staging/ccree/ssi_fips.h49
-rw-r--r--drivers/staging/ccree/ssi_hash.h103
-rw-r--r--drivers/staging/ccree/ssi_ivgen.h71
-rw-r--r--drivers/staging/ccree/ssi_pm.c145
-rw-r--r--drivers/staging/ccree/ssi_pm.h43
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c610
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.h60
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.h79
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c172
-rw-r--r--drivers/staging/ccree/ssi_sysfs.h55
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c13
-rw-r--r--drivers/staging/comedi/comedi.h11
-rw-r--r--drivers/staging/comedi/comedi_buf.c11
-rw-r--r--drivers/staging/comedi/comedi_compat32.c11
-rw-r--r--drivers/staging/comedi/comedi_compat32.h11
-rw-r--r--drivers/staging/comedi/comedi_fops.c15
-rw-r--r--drivers/staging/comedi/comedi_pci.c11
-rw-r--r--drivers/staging/comedi/comedi_pci.h11
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c11
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h11
-rw-r--r--drivers/staging/comedi/comedi_usb.c11
-rw-r--r--drivers/staging/comedi/comedi_usb.h11
-rw-r--r--drivers/staging/comedi/comedidev.h51
-rw-r--r--drivers/staging/comedi/comedilib.h11
-rw-r--r--drivers/staging/comedi/drivers.c11
-rw-r--r--drivers/staging/comedi/drivers/8255.c11
-rw-r--r--drivers/staging/comedi/drivers/8255.h11
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c11
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c11
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c11
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c11
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c11
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c1
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c1
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1720.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c13
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c1
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c11
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.h11
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.h11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236_common.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci236.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c11
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h11
-rw-r--r--drivers/staging/comedi/drivers/comedi_8255.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.h11
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c11
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c11
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c11
-rw-r--r--drivers/staging/comedi/drivers/dac02.c11
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c11
-rw-r--r--drivers/staging/comedi/drivers/das08.c11
-rw-r--r--drivers/staging/comedi/drivers/das08.h11
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c11
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c11
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/das16.c11
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c11
-rw-r--r--drivers/staging/comedi/drivers/das1800.c11
-rw-r--r--drivers/staging/comedi/drivers/das6402.c11
-rw-r--r--drivers/staging/comedi/drivers/das800.c11
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c11
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c1
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c13
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c11
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c11
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c11
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c11
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c11
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c11
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c11
-rw-r--r--drivers/staging/comedi/drivers/fl512.c11
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c11
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c11
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c11
-rw-r--r--drivers/staging/comedi/drivers/me4000.c11
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c11
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c11
-rw-r--r--drivers/staging/comedi/drivers/mite.c11
-rw-r--r--drivers/staging/comedi/drivers/mite.h11
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c11
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h11
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h11
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h11
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c11
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c11
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c11
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c1
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c1
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c11
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c11
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c11
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c11
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h11
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h6
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c11
-rw-r--r--drivers/staging/comedi/drivers/rti800.c11
-rw-r--r--drivers/staging/comedi/drivers/rti802.c11
-rw-r--r--drivers/staging/comedi/drivers/s526.c11
-rw-r--r--drivers/staging/comedi/drivers/s626.c20
-rw-r--r--drivers/staging/comedi/drivers/s626.h11
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c14
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c11
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c14
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c11
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c11
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c11
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c11
-rw-r--r--drivers/staging/comedi/proc.c11
-rw-r--r--drivers/staging/comedi/range.c11
-rw-r--r--drivers/staging/dgnc/Makefile3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c38
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h11
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c160
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h52
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c248
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.h26
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c1690
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h175
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h69
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c20
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h11
-rw-r--r--drivers/staging/dgnc/dgnc_utils.c17
-rw-r--r--drivers/staging/dgnc/dgnc_utils.h7
-rw-r--r--drivers/staging/dgnc/digi.h88
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c10
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h10
-rw-r--r--drivers/staging/fbtft/Kconfig6
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c11
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c11
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c11
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c11
-rw-r--r--drivers/staging/fbtft/fb_hx8353d.c11
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.c11
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.h1
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c11
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c11
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c11
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c11
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c11
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c11
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c11
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c11
-rw-r--r--drivers/staging/fbtft/fb_ssd1305.c11
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c11
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c11
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c1
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c1
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c11
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c11
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c11
-rw-r--r--drivers/staging/fbtft/fb_tls8204.c11
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c11
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c11
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c11
-rw-r--r--drivers/staging/fbtft/fb_watterott.c11
-rw-r--r--drivers/staging/fbtft/fbtft-core.c27
-rw-r--r--drivers/staging/fbtft/fbtft.h15
-rw-r--r--drivers/staging/fbtft/fbtft_device.c11
-rw-r--r--drivers/staging/fbtft/flexfb.c11
-rw-r--r--drivers/staging/fbtft/internal.h16
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c165
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h35
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c1
-rw-r--r--drivers/staging/fsl-mc/Kconfig1
-rw-r--r--drivers/staging/fsl-mc/Makefile1
-rw-r--r--drivers/staging/fsl-mc/README.txt386
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig3
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile3
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp-cmd.h28
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c44
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h34
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon.c115
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/Makefile1
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h28
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-driver.c27
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-service.c129
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio.c28
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio.h28
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.c27
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.h27
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h56
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c63
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h60
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h58
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h451
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c82
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c268
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.h268
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c117
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c112
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c19
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h379
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c34
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c62
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c37
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-fd.h27
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-global.h27
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h33
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h29
-rw-r--r--drivers/staging/fsl-mc/include/dpcon.h42
-rw-r--r--drivers/staging/fsl-mc/include/mc.h61
-rw-r--r--drivers/staging/fsl-mc/overview.rst404
-rw-r--r--drivers/staging/fwserial/dma_fifo.c11
-rw-r--r--drivers/staging/fwserial/dma_fifo.h11
-rw-r--r--drivers/staging/fwserial/fwserial.c11
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c39
-rw-r--r--drivers/staging/greybus/arche-platform.c21
-rw-r--r--drivers/staging/greybus/audio_codec.c23
-rw-r--r--drivers/staging/greybus/audio_codec.h12
-rw-r--r--drivers/staging/greybus/authentication.c2
-rw-r--r--drivers/staging/greybus/camera.c16
-rw-r--r--drivers/staging/gs_fpgaboot/io.h5
-rw-r--r--drivers/staging/iio/adc/ad7192.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c25
-rw-r--r--drivers/staging/iio/cdc/ad7746.c17
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c9
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c19
-rw-r--r--drivers/staging/ipx/Kconfig (renamed from net/ipx/Kconfig)1
-rw-r--r--drivers/staging/ipx/Makefile (renamed from net/ipx/Makefile)0
-rw-r--r--drivers/staging/ipx/TODO4
-rw-r--r--drivers/staging/ipx/af_ipx.c (renamed from net/ipx/af_ipx.c)0
-rw-r--r--drivers/staging/ipx/ipx_proc.c (renamed from net/ipx/ipx_proc.c)3
-rw-r--r--drivers/staging/ipx/ipx_route.c (renamed from net/ipx/ipx_route.c)0
-rw-r--r--drivers/staging/ipx/pe2.c (renamed from net/ipx/pe2.c)0
-rw-r--r--drivers/staging/ipx/sysctl_net_ipx.c (renamed from net/ipx/sysctl_net_ipx.c)0
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c1
-rw-r--r--drivers/staging/irda/net/af_irda.c4
-rw-r--r--drivers/staging/irda/net/irlmp.c4
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c8
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h130
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h107
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h63
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c160
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c61
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c8
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c100
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c24
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c32
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c151
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c36
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c137
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c466
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c36
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c53
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c15
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c25
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c20
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c38
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c48
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c60
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c42
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c191
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c10
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c83
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c40
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c48
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c96
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h46
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h305
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c18
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c11
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c10
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c21
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c24
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c63
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c8
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c30
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt313
-rw-r--r--drivers/staging/most/Documentation/driver_usage.txt192
-rw-r--r--drivers/staging/most/Kconfig27
-rw-r--r--drivers/staging/most/Makefile19
-rw-r--r--drivers/staging/most/aim-cdev/Makefile4
-rw-r--r--drivers/staging/most/aim-network/Makefile4
-rw-r--r--drivers/staging/most/aim-sound/Makefile4
-rw-r--r--drivers/staging/most/aim-v4l2/Makefile5
-rw-r--r--drivers/staging/most/cdev/Kconfig (renamed from drivers/staging/most/aim-cdev/Kconfig)6
-rw-r--r--drivers/staging/most/cdev/Makefile4
-rw-r--r--drivers/staging/most/cdev/cdev.c (renamed from drivers/staging/most/aim-cdev/cdev.c)196
-rw-r--r--drivers/staging/most/core.c1603
-rw-r--r--drivers/staging/most/core.h (renamed from drivers/staging/most/mostcore/mostcore.h)77
-rw-r--r--drivers/staging/most/dim2/Kconfig (renamed from drivers/staging/most/hdm-dim2/Kconfig)6
-rw-r--r--drivers/staging/most/dim2/Makefile4
-rw-r--r--drivers/staging/most/dim2/dim2.c (renamed from drivers/staging/most/hdm-dim2/dim2_hdm.c)45
-rw-r--r--drivers/staging/most/dim2/dim2.h (renamed from drivers/staging/most/hdm-dim2/dim2_hdm.h)10
-rw-r--r--drivers/staging/most/dim2/errors.h (renamed from drivers/staging/most/hdm-dim2/dim2_errors.h)10
-rw-r--r--drivers/staging/most/dim2/hal.c (renamed from drivers/staging/most/hdm-dim2/dim2_hal.c)16
-rw-r--r--drivers/staging/most/dim2/hal.h (renamed from drivers/staging/most/hdm-dim2/dim2_hal.h)12
-rw-r--r--drivers/staging/most/dim2/reg.h (renamed from drivers/staging/most/hdm-dim2/dim2_reg.h)10
-rw-r--r--drivers/staging/most/dim2/sysfs.c49
-rw-r--r--drivers/staging/most/dim2/sysfs.h30
-rw-r--r--drivers/staging/most/hdm-dim2/Makefile5
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c115
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.h36
-rw-r--r--drivers/staging/most/hdm-i2c/Makefile3
-rw-r--r--drivers/staging/most/hdm-usb/Makefile4
-rw-r--r--drivers/staging/most/i2c/Kconfig (renamed from drivers/staging/most/hdm-i2c/Kconfig)6
-rw-r--r--drivers/staging/most/i2c/Makefile4
-rw-r--r--drivers/staging/most/i2c/i2c.c (renamed from drivers/staging/most/hdm-i2c/hdm_i2c.c)19
-rw-r--r--drivers/staging/most/mostcore/Kconfig14
-rw-r--r--drivers/staging/most/mostcore/Makefile3
-rw-r--r--drivers/staging/most/mostcore/core.c1949
-rw-r--r--drivers/staging/most/net/Kconfig (renamed from drivers/staging/most/aim-network/Kconfig)6
-rw-r--r--drivers/staging/most/net/Makefile4
-rw-r--r--drivers/staging/most/net/net.c (renamed from drivers/staging/most/aim-network/networking.c)71
-rw-r--r--drivers/staging/most/sound/Kconfig (renamed from drivers/staging/most/aim-sound/Kconfig)6
-rw-r--r--drivers/staging/most/sound/Makefile4
-rw-r--r--drivers/staging/most/sound/sound.c (renamed from drivers/staging/most/aim-sound/sound.c)33
-rw-r--r--drivers/staging/most/usb/Kconfig (renamed from drivers/staging/most/hdm-usb/Kconfig)7
-rw-r--r--drivers/staging/most/usb/Makefile4
-rw-r--r--drivers/staging/most/usb/usb.c (renamed from drivers/staging/most/hdm-usb/hdm_usb.c)276
-rw-r--r--drivers/staging/most/video/Kconfig (renamed from drivers/staging/most/aim-v4l2/Kconfig)6
-rw-r--r--drivers/staging/most/video/Makefile4
-rw-r--r--drivers/staging/most/video/video.c (renamed from drivers/staging/most/aim-v4l2/video.c)169
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--drivers/staging/ncpfs/Kconfig (renamed from fs/ncpfs/Kconfig)0
-rw-r--r--drivers/staging/ncpfs/Makefile (renamed from fs/ncpfs/Makefile)0
-rw-r--r--drivers/staging/ncpfs/TODO4
-rw-r--r--drivers/staging/ncpfs/dir.c (renamed from fs/ncpfs/dir.c)0
-rw-r--r--drivers/staging/ncpfs/file.c (renamed from fs/ncpfs/file.c)0
-rw-r--r--drivers/staging/ncpfs/getopt.c (renamed from fs/ncpfs/getopt.c)0
-rw-r--r--drivers/staging/ncpfs/getopt.h (renamed from fs/ncpfs/getopt.h)0
-rw-r--r--drivers/staging/ncpfs/inode.c (renamed from fs/ncpfs/inode.c)3
-rw-r--r--drivers/staging/ncpfs/ioctl.c (renamed from fs/ncpfs/ioctl.c)0
-rw-r--r--drivers/staging/ncpfs/mmap.c (renamed from fs/ncpfs/mmap.c)0
-rw-r--r--drivers/staging/ncpfs/ncp_fs.h (renamed from fs/ncpfs/ncp_fs.h)0
-rw-r--r--drivers/staging/ncpfs/ncp_fs_i.h (renamed from fs/ncpfs/ncp_fs_i.h)0
-rw-r--r--drivers/staging/ncpfs/ncp_fs_sb.h (renamed from fs/ncpfs/ncp_fs_sb.h)0
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.c (renamed from fs/ncpfs/ncplib_kernel.c)0
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.h (renamed from fs/ncpfs/ncplib_kernel.h)0
-rw-r--r--drivers/staging/ncpfs/ncpsign_kernel.c (renamed from fs/ncpfs/ncpsign_kernel.c)0
-rw-r--r--drivers/staging/ncpfs/ncpsign_kernel.h (renamed from fs/ncpfs/ncpsign_kernel.h)0
-rw-r--r--drivers/staging/ncpfs/sock.c (renamed from fs/ncpfs/sock.c)3
-rw-r--r--drivers/staging/ncpfs/symlink.c (renamed from fs/ncpfs/symlink.c)0
-rw-r--r--drivers/staging/nvec/nvec-keytable.h14
-rw-r--r--drivers/staging/nvec/nvec.c6
-rw-r--r--drivers/staging/nvec/nvec.h6
-rw-r--r--drivers/staging/nvec/nvec_kbd.c6
-rw-r--r--drivers/staging/nvec/nvec_paz00.c6
-rw-r--r--drivers/staging/nvec/nvec_power.c6
-rw-r--r--drivers/staging/nvec/nvec_ps2.c6
-rw-r--r--drivers/staging/octeon/Makefile4
-rw-r--r--drivers/staging/octeon/ethernet-defines.h5
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c5
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h5
-rw-r--r--drivers/staging/octeon/ethernet-mem.c5
-rw-r--r--drivers/staging/octeon/ethernet-mem.h5
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c5
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/staging/octeon/ethernet-rx.h5
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c5
-rw-r--r--drivers/staging/octeon/ethernet-spi.c5
-rw-r--r--drivers/staging/octeon/ethernet-tx.c5
-rw-r--r--drivers/staging/octeon/ethernet-tx.h5
-rw-r--r--drivers/staging/octeon/ethernet-util.h5
-rw-r--r--drivers/staging/octeon/ethernet.c5
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h5
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c30
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h30
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts32
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt18
-rw-r--r--drivers/staging/pi433/pi433_if.c607
-rw-r--r--drivers/staging/pi433/pi433_if.h38
-rw-r--r--drivers/staging/pi433/rf69.c905
-rw-r--r--drivers/staging/pi433/rf69.h37
-rw-r--r--drivers/staging/pi433/rf69_enum.h298
-rw-r--r--drivers/staging/pi433/rf69_registers.h48
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c1
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/hal_init.c1
-rw-r--r--drivers/staging/rtl8712/ieee80211.c6
-rw-r--r--drivers/staging/rtl8712/os_intfs.c5
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c55
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c36
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c106
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c72
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c44
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c16
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c768
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c4
-rw-r--r--drivers/staging/rtlwifi/Kconfig10
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c20
-rw-r--r--drivers/staging/rtlwifi/cam.c4
-rw-r--r--drivers/staging/rtlwifi/core.c10
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h2
-rw-r--r--drivers/staging/rts5208/ms.c3
-rw-r--r--drivers/staging/rts5208/rtsx.c17
-rw-r--r--drivers/staging/rts5208/rtsx.h2
-rw-r--r--drivers/staging/rts5208/sd.h2
-rw-r--r--drivers/staging/sm750fb/TODO4
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h2
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c14
-rw-r--r--drivers/staging/speakup/buffers.c1
-rw-r--r--drivers/staging/speakup/fakekey.c11
-rw-r--r--drivers/staging/speakup/keyhelp.c11
-rw-r--r--drivers/staging/speakup/kobjects.c1
-rw-r--r--drivers/staging/speakup/main.c11
-rw-r--r--drivers/staging/speakup/selection.c9
-rw-r--r--drivers/staging/speakup/serialio.c1
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c10
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c11
-rw-r--r--drivers/staging/speakup/speakup_apollo.c11
-rw-r--r--drivers/staging/speakup/speakup_audptr.c11
-rw-r--r--drivers/staging/speakup/speakup_bns.c11
-rw-r--r--drivers/staging/speakup/speakup_decext.c11
-rw-r--r--drivers/staging/speakup/speakup_decpc.c11
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c11
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c11
-rw-r--r--drivers/staging/speakup/speakup_dummy.c11
-rw-r--r--drivers/staging/speakup/speakup_keypc.c11
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c11
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/speakup_spkout.c11
-rw-r--r--drivers/staging/speakup/speakup_txprt.c11
-rw-r--r--drivers/staging/speakup/spk_priv.h11
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h11
-rw-r--r--drivers/staging/speakup/spk_ttyio.c1
-rw-r--r--drivers/staging/speakup/synth.c1
-rw-r--r--drivers/staging/speakup/thread.c1
-rw-r--r--drivers/staging/speakup/varhandlers.c1
-rw-r--r--drivers/staging/typec/tcpci.c28
-rw-r--r--drivers/staging/typec/tcpci.h11
-rw-r--r--drivers/staging/unisys/Kconfig4
-rw-r--r--drivers/staging/unisys/Makefile1
-rw-r--r--drivers/staging/unisys/include/iochannel.h15
-rw-r--r--drivers/staging/unisys/include/visorchannel.h189
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c14
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h11
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c13
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c13
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c15
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c15
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c15
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c16
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h15
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h15
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c61
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h7
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c9
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c50
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c3
-rw-r--r--drivers/staging/vme/devices/vme_user.c26
-rw-r--r--drivers/staging/vt6655/baseband.c11
-rw-r--r--drivers/staging/vt6655/baseband.h11
-rw-r--r--drivers/staging/vt6655/card.c11
-rw-r--r--drivers/staging/vt6655/card.h11
-rw-r--r--drivers/staging/vt6655/channel.c11
-rw-r--r--drivers/staging/vt6655/channel.h12
-rw-r--r--drivers/staging/vt6655/desc.h11
-rw-r--r--drivers/staging/vt6655/device.h11
-rw-r--r--drivers/staging/vt6655/device_cfg.h11
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/staging/vt6655/dpc.c11
-rw-r--r--drivers/staging/vt6655/dpc.h11
-rw-r--r--drivers/staging/vt6655/key.c11
-rw-r--r--drivers/staging/vt6655/key.h11
-rw-r--r--drivers/staging/vt6655/mac.c11
-rw-r--r--drivers/staging/vt6655/mac.h11
-rw-r--r--drivers/staging/vt6655/power.c11
-rw-r--r--drivers/staging/vt6655/power.h11
-rw-r--r--drivers/staging/vt6655/rf.c11
-rw-r--r--drivers/staging/vt6655/rf.h11
-rw-r--r--drivers/staging/vt6655/rxtx.c11
-rw-r--r--drivers/staging/vt6655/rxtx.h11
-rw-r--r--drivers/staging/vt6655/srom.c11
-rw-r--r--drivers/staging/vt6655/srom.h11
-rw-r--r--drivers/staging/vt6655/tmacro.h11
-rw-r--r--drivers/staging/vt6655/upc.h11
-rw-r--r--drivers/staging/vt6656/baseband.c12
-rw-r--r--drivers/staging/vt6656/baseband.h12
-rw-r--r--drivers/staging/vt6656/card.c12
-rw-r--r--drivers/staging/vt6656/card.h12
-rw-r--r--drivers/staging/vt6656/channel.c12
-rw-r--r--drivers/staging/vt6656/channel.h12
-rw-r--r--drivers/staging/vt6656/desc.h12
-rw-r--r--drivers/staging/vt6656/device.h12
-rw-r--r--drivers/staging/vt6656/dpc.c12
-rw-r--r--drivers/staging/vt6656/dpc.h12
-rw-r--r--drivers/staging/vt6656/firmware.c12
-rw-r--r--drivers/staging/vt6656/firmware.h12
-rw-r--r--drivers/staging/vt6656/int.c12
-rw-r--r--drivers/staging/vt6656/int.h12
-rw-r--r--drivers/staging/vt6656/key.c12
-rw-r--r--drivers/staging/vt6656/key.h12
-rw-r--r--drivers/staging/vt6656/mac.c12
-rw-r--r--drivers/staging/vt6656/mac.h12
-rw-r--r--drivers/staging/vt6656/main_usb.c17
-rw-r--r--drivers/staging/vt6656/power.c12
-rw-r--r--drivers/staging/vt6656/power.h12
-rw-r--r--drivers/staging/vt6656/rf.c12
-rw-r--r--drivers/staging/vt6656/rf.h12
-rw-r--r--drivers/staging/vt6656/rxtx.c12
-rw-r--r--drivers/staging/vt6656/rxtx.h12
-rw-r--r--drivers/staging/vt6656/usbpipe.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.h12
-rw-r--r--drivers/staging/vt6656/wcmd.c12
-rw-r--r--drivers/staging/vt6656/wcmd.h12
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c40
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c11
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c1
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c5
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c21
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c566
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h29
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c9
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h100
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h1
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c7
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c1
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h1
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h1
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h1
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h1
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h1
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h1
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h1
-rw-r--r--drivers/staging/wlan-ng/p80211req.c1
-rw-r--r--drivers/staging/wlan-ng/p80211req.h1
-rw-r--r--drivers/staging/wlan-ng/p80211types.h1
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c1
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c1
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h13
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c224
-rw-r--r--drivers/staging/xgifb/vb_setmode.c81
-rw-r--r--drivers/staging/xgifb/vb_table.h4
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_transport.c46
-rw-r--r--drivers/thermal/cpu_cooling.c201
-rw-r--r--drivers/thermal/thermal_sysfs.c17
-rw-r--r--drivers/tty/Kconfig6
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/isicom.c6
-rw-r--r--drivers/tty/moxa.c17
-rw-r--r--drivers/tty/moxa.h2
-rw-r--r--drivers/tty/n_gsm.c23
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_r3964.c6
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serdev/core.c118
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c32
-rw-r--r--drivers/tty/serial/8250/8250_dw.c33
-rw-r--r--drivers/tty/serial/8250/8250_exar.c34
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c17
-rw-r--r--drivers/tty/serial/8250/8250_of.c5
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c26
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c5
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/amba-pl011.c12
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c15
-rw-r--r--drivers/tty/serial/imx.c93
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c6
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c6
-rw-r--r--drivers/tty/serial/max310x.c267
-rw-r--r--drivers/tty/serial/meson_uart.c31
-rw-r--r--drivers/tty/serial/mxs-auart.c11
-rw-r--r--drivers/tty/serial/omap-serial.c18
-rw-r--r--drivers/tty/serial/serial_core.c35
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/tty_io.c18
-rw-r--r--drivers/tty/tty_ldisc.c4
-rw-r--r--drivers/tty/vt/selection.c6
-rw-r--r--drivers/tty/vt/vc_screen.c4
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/atm/cxacru.c18
-rw-r--r--drivers/usb/atm/ueagle-atm.c18
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c23
-rw-r--r--drivers/usb/class/cdc-acm.c17
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usblp.c8
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c10
-rw-r--r--drivers/usb/core/driver.c10
-rw-r--r--drivers/usb/core/hub.c47
-rw-r--r--drivers/usb/core/ledtrig-usbport.c10
-rw-r--r--drivers/usb/core/message.c50
-rw-r--r--drivers/usb/core/of.c95
-rw-r--r--drivers/usb/core/urb.c3
-rw-r--r--drivers/usb/core/usb.c3
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/hcd.h9
-rw-r--r--drivers/usb/dwc2/hcd_intr.c20
-rw-r--r--drivers/usb/dwc2/hcd_queue.c81
-rw-r--r--drivers/usb/dwc2/platform.c10
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/core.h5
-rw-r--r--drivers/usb/dwc3/debug.h9
-rw-r--r--drivers/usb/dwc3/ep0.c6
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/dwc3/trace.h4
-rw-r--r--drivers/usb/early/xhci-dbc.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c16
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c4
-rw-r--r--drivers/usb/gadget/function/u_serial.c192
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c5
-rw-r--r--drivers/usb/gadget/legacy/ncm.c6
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c16
-rw-r--r--drivers/usb/gadget/udc/core.c38
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c16
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/gadget/udc/trace.h6
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/Makefile5
-rw-r--r--drivers/usb/host/ehci-omap.c3
-rw-r--r--drivers/usb/host/ehci-sysfs.c12
-rw-r--r--drivers/usb/host/fotg210-hcd.c14
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c3
-rw-r--r--drivers/usb/host/uhci-hcd.h3
-rw-r--r--drivers/usb/host/uhci-platform.c23
-rw-r--r--drivers/usb/host/uhci-q.c3
-rw-r--r--drivers/usb/host/whci/asl.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c261
-rw-r--r--drivers/usb/host/xhci-dbgcap.c996
-rw-r--r--drivers/usb/host/xhci-dbgcap.h229
-rw-r--r--drivers/usb/host/xhci-dbgtty.c497
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c132
-rw-r--r--drivers/usb/host/xhci-mtk.c186
-rw-r--r--drivers/usb/host/xhci-mtk.h6
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c5
-rw-r--r--drivers/usb/host/xhci-ring.c14
-rw-r--r--drivers/usb/host/xhci-trace.h69
-rw-r--r--drivers/usb/host/xhci.c48
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/misc/chaoskey.c6
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c12
-rw-r--r--drivers/usb/misc/cytherm.c45
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/trancevibrator.c6
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c24
-rw-r--r--drivers/usb/misc/usbtest.c70
-rw-r--r--drivers/usb/mon/mon_bin.c12
-rw-r--r--drivers/usb/mtu3/mtu3.h11
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h10
-rw-r--r--drivers/usb/mtu3/mtu3_host.c115
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c10
-rw-r--r--drivers/usb/musb/da8xx.c2
-rw-r--r--drivers/usb/musb/musb_core.c17
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/phy/phy-mv-usb.c18
-rw-r--r--drivers/usb/phy/phy-tahvo.c6
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c4
-rw-r--r--drivers/usb/phy/phy.c14
-rw-r--r--drivers/usb/renesas_usbhs/Makefile2
-rw-r--r--drivers/usb/renesas_usbhs/common.c22
-rw-r--r--drivers/usb/renesas_usbhs/common.h7
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c21
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c19
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c35
-rw-r--r--drivers/usb/renesas_usbhs/rza.c52
-rw-r--r--drivers/usb/renesas_usbhs/rza.h4
-rw-r--r--drivers/usb/serial/Kconfig79
-rw-r--r--drivers/usb/serial/ark3116.c49
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/f81534.c392
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/storage/debug.c3
-rw-r--r--drivers/usb/storage/debug.h3
-rw-r--r--drivers/usb/storage/initializers.c3
-rw-r--r--drivers/usb/storage/initializers.h3
-rw-r--r--drivers/usb/storage/protocol.c3
-rw-r--r--drivers/usb/storage/protocol.h3
-rw-r--r--drivers/usb/storage/scsiglue.c3
-rw-r--r--drivers/usb/storage/scsiglue.h3
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/transport.c3
-rw-r--r--drivers/usb/storage/transport.h3
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h8
-rw-r--r--drivers/usb/storage/unusual_uas.h70
-rw-r--r--drivers/usb/storage/usb.c11
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/storage/usual-tables.c3
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c39
-rw-r--r--drivers/usb/typec/tcpm.c293
-rw-r--r--drivers/usb/typec/typec_wcove.c4
-rw-r--r--drivers/usb/usbip/stub_dev.c4
-rw-r--r--drivers/usb/usbip/stub_rx.c3
-rw-r--r--drivers/usb/usbip/usbip_common.c18
-rw-r--r--drivers/usb/usbip/vhci_rx.c2
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c20
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c4
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/uwb/Kconfig1
-rw-r--r--drivers/uwb/address.c14
-rw-r--r--drivers/uwb/drp-ie.c13
-rw-r--r--drivers/vfio/virqfd.c4
-rw-r--r--drivers/vhost/net.c70
-rw-r--r--drivers/vhost/vhost.c25
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/fbdev/auo_k190x.c4
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/video/fbdev/w100fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/visorbus/Kconfig (renamed from drivers/staging/unisys/visorbus/Kconfig)2
-rw-r--r--drivers/visorbus/Makefile (renamed from drivers/staging/unisys/visorbus/Makefile)2
-rw-r--r--drivers/visorbus/controlvmchannel.h (renamed from drivers/staging/unisys/visorbus/controlvmchannel.h)14
-rw-r--r--drivers/visorbus/vbuschannel.h (renamed from drivers/staging/unisys/visorbus/vbuschannel.h)13
-rw-r--r--drivers/visorbus/visorbus_main.c (renamed from drivers/staging/unisys/visorbus/visorbus_main.c)13
-rw-r--r--drivers/visorbus/visorbus_private.h (renamed from drivers/staging/unisys/visorbus/visorbus_private.h)13
-rw-r--r--drivers/visorbus/visorchannel.c (renamed from drivers/staging/unisys/visorbus/visorchannel.c)13
-rw-r--r--drivers/visorbus/visorchipset.c (renamed from drivers/staging/unisys/visorbus/visorchipset.c)20
-rw-r--r--drivers/w1/masters/w1-gpio.c149
-rw-r--r--drivers/w1/w1_netlink.h6
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c337
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/mcelog.c2
-rw-r--r--drivers/xen/pvcalls-front.c12
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/9p/Kconfig3
-rw-r--r--fs/Kconfig7
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/amigaffs.c5
-rw-r--r--fs/affs/dir.c5
-rw-r--r--fs/affs/super.c3
-rw-r--r--fs/afs/dir.c37
-rw-r--r--fs/afs/fsclient.c3
-rw-r--r--fs/afs/inode.c9
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/autofs4/waitq.c4
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/backref.c3
-rw-r--r--fs/btrfs/compression.c141
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c56
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/delayed-inode.c137
-rw-r--r--fs/btrfs/delayed-ref.c2
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c30
-rw-r--r--fs/btrfs/dir-item.c108
-rw-r--r--fs/btrfs/disk-io.c77
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/export.c5
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/extent_io.c154
-rw-r--r--fs/btrfs/extent_io.h50
-rw-r--r--fs/btrfs/extent_map.c132
-rw-r--r--fs/btrfs/extent_map.h3
-rw-r--r--fs/btrfs/file.c392
-rw-r--r--fs/btrfs/free-space-cache.c15
-rw-r--r--fs/btrfs/inode.c298
-rw-r--r--fs/btrfs/ioctl.c45
-rw-r--r--fs/btrfs/props.c13
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/raid56.c119
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/root-tree.c7
-rw-r--r--fs/btrfs/scrub.c95
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/super.c348
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c3
-rw-r--r--fs/btrfs/tests/btrfs-tests.h1
-rw-r--r--fs/btrfs/tests/extent-map-tests.c366
-rw-r--r--fs/btrfs/tests/inode-tests.c17
-rw-r--r--fs/btrfs/transaction.c22
-rw-r--r--fs/btrfs/transaction.h11
-rw-r--r--fs/btrfs/tree-checker.c142
-rw-r--r--fs/btrfs/tree-log.c58
-rw-r--r--fs/btrfs/volumes.c690
-rw-r--r--fs/btrfs/volumes.h45
-rw-r--r--fs/btrfs/xattr.c7
-rw-r--r--fs/btrfs/zstd.c132
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/cachefiles/daemon.c6
-rw-r--r--fs/ceph/Kconfig3
-rw-r--r--fs/cifs/Kconfig23
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/cifs_debug.c199
-rw-r--r--fs/cifs/cifsacl.c2
-rw-r--r--fs/cifs/cifsencrypt.c3
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h35
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c22
-rw-r--r--fs/cifs/connect.c251
-rw-r--r--fs/cifs/file.c43
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/smb1ops.c4
-rw-r--r--fs/cifs/smb2file.c2
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c77
-rw-r--r--fs/cifs/smb2pdu.c578
-rw-r--r--fs/cifs/smb2pdu.h60
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/smbdirect.c2610
-rw-r--r--fs/cifs/smbdirect.h338
-rw-r--r--fs/cifs/transport.c69
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/dax.c21
-rw-r--r--fs/dcache.c65
-rw-r--r--fs/debugfs/file.c4
-rw-r--r--fs/devpts/inode.c4
-rw-r--r--fs/direct-io.c24
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/dlm/plock.c4
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/ecryptfs/miscdev.c4
-rw-r--r--fs/eventfd.c131
-rw-r--r--fs/eventpoll.c28
-rw-r--r--fs/exec.c9
-rw-r--r--fs/exofs/dir.c9
-rw-r--r--fs/exofs/super.c3
-rw-r--r--fs/ext2/Kconfig6
-rw-r--r--fs/ext2/dir.c9
-rw-r--r--fs/ext2/super.c5
-rw-r--r--fs/ext4/Kconfig3
-rw-r--r--fs/ext4/dir.c9
-rw-r--r--fs/ext4/inline.c7
-rw-r--r--fs/ext4/inode.c13
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/ext4/xattr.c5
-rw-r--r--fs/f2fs/Kconfig6
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/data.c301
-rw-r--r--fs/f2fs/debug.c12
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/f2fs.h201
-rw-r--r--fs/f2fs/file.c248
-rw-r--r--fs/f2fs/gc.c18
-rw-r--r--fs/f2fs/gc.h2
-rw-r--r--fs/f2fs/inode.c34
-rw-r--r--fs/f2fs/namei.c67
-rw-r--r--fs/f2fs/node.c149
-rw-r--r--fs/f2fs/node.h4
-rw-r--r--fs/f2fs/recovery.c27
-rw-r--r--fs/f2fs/segment.c129
-rw-r--r--fs/f2fs/segment.h92
-rw-r--r--fs/f2fs/super.c142
-rw-r--r--fs/f2fs/sysfs.c14
-rw-r--r--fs/f2fs/trace.c12
-rw-r--r--fs/f2fs/xattr.c12
-rw-r--r--fs/fat/dir.c3
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fat/namei_msdos.c7
-rw-r--r--fs/fat/namei_vfat.c22
-rw-r--r--fs/fcntl.c7
-rw-r--r--fs/file.c7
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/aops.c35
-rw-r--r--fs/gfs2/bmap.c583
-rw-r--r--fs/gfs2/bmap.h1
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/file.c23
-rw-r--r--fs/gfs2/glock.c7
-rw-r--r--fs/gfs2/glops.c19
-rw-r--r--fs/gfs2/incore.h6
-rw-r--r--fs/gfs2/inode.c11
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/log.c124
-rw-r--r--fs/gfs2/log.h10
-rw-r--r--fs/gfs2/lops.c18
-rw-r--r--fs/gfs2/lops.h3
-rw-r--r--fs/gfs2/main.c90
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c3
-rw-r--r--fs/gfs2/recovery.c110
-rw-r--r--fs/gfs2/rgrp.c38
-rw-r--r--fs/gfs2/super.c19
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/trace_gfs2.h11
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/hfsplus/Kconfig3
-rw-r--r--fs/hugetlbfs/inode.c39
-rw-r--r--fs/inode.c11
-rw-r--r--fs/iomap.c14
-rw-r--r--fs/jffs2/Kconfig6
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jfs/Kconfig3
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--fs/lockd/clntproc.c14
-rw-r--r--fs/lockd/host.c22
-rw-r--r--fs/lockd/mon.c14
-rw-r--r--fs/lockd/svcproc.c2
-rw-r--r--fs/namei.c95
-rw-r--r--fs/nfs/blocklayout/blocklayout.c94
-rw-r--r--fs/nfs/blocklayout/blocklayout.h7
-rw-r--r--fs/nfs/blocklayout/dev.c7
-rw-r--r--fs/nfs/delegation.c3
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/export.c5
-rw-r--r--fs/nfs/filelayout/filelayout.c4
-rw-r--r--fs/nfs/fscache-index.c5
-rw-r--r--fs/nfs/inode.c71
-rw-r--r--fs/nfs/io.c2
-rw-r--r--fs/nfs/nfs4client.c24
-rw-r--r--fs/nfs/nfs4file.c1
-rw-r--r--fs/nfs/nfs4idmap.c6
-rw-r--r--fs/nfs/nfs4namespace.c2
-rw-r--r--fs/nfs/nfs4proc.c52
-rw-r--r--fs/nfs/nfs4state.c5
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/nfs4xdr.c64
-rw-r--r--fs/nfs/nfstrace.h27
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/pnfs_dev.c1
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/nfsd/auth.c6
-rw-r--r--fs/nfsd/nfsfh.h3
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/nsfs.c29
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ntfs/mft.c6
-rw-r--r--fs/ocfs2/acl.c6
-rw-r--r--fs/ocfs2/alloc.c261
-rw-r--r--fs/ocfs2/alloc.h1
-rw-r--r--fs/ocfs2/aops.c10
-rw-r--r--fs/ocfs2/cluster/quorum.c5
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h2
-rw-r--r--fs/ocfs2/dir.c17
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c7
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmglue.c136
-rw-r--r--fs/ocfs2/dlmglue.h35
-rw-r--r--fs/ocfs2/extent_map.c45
-rw-r--r--fs/ocfs2/extent_map.h3
-rw-r--r--fs/ocfs2/file.c101
-rw-r--r--fs/ocfs2/inode.c3
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/ocfs2.h1
-rw-r--r--fs/ocfs2/ocfs2_lockid.h5
-rw-r--r--fs/ocfs2/ocfs2_trace.h10
-rw-r--r--fs/ocfs2/quota_global.c3
-rw-r--r--fs/ocfs2/suballoc.c8
-rw-r--r--fs/ocfs2/super.c13
-rw-r--r--fs/ocfs2/xattr.c5
-rw-r--r--fs/orangefs/devorangefs-req.c9
-rw-r--r--fs/orangefs/file.c7
-rw-r--r--fs/orangefs/orangefs-kernel.h11
-rw-r--r--fs/orangefs/waitqueue.c4
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/posix_acl.c6
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/kmsg.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/task_mmu.c19
-rw-r--r--fs/proc_namespace.c4
-rw-r--r--fs/reiserfs/Kconfig6
-rw-r--r--fs/select.c27
-rw-r--r--fs/signalfd.c4
-rw-r--r--fs/super.c8
-rw-r--r--fs/sysfs/dir.c9
-rw-r--r--fs/sysfs/file.c3
-rw-r--r--fs/sysfs/group.c6
-rw-r--r--fs/sysfs/mount.c5
-rw-r--r--fs/sysfs/symlink.c3
-rw-r--r--fs/sysfs/sysfs.h3
-rw-r--r--fs/timerfd.c4
-rw-r--r--fs/ubifs/dir.c43
-rw-r--r--fs/ubifs/file.c41
-rw-r--r--fs/ubifs/tnc.c21
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/ufs/dir.c9
-rw-r--r--fs/ufs/inode.c3
-rw-r--r--fs/ufs/super.c3
-rw-r--r--fs/userfaultfd.c99
-rw-r--r--fs/xfs/Kconfig3
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c124
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h10
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c47
-rw-r--r--fs/xfs/libxfs/xfs_attr.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c148
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c104
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c120
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c58
-rw-r--r--fs/xfs/libxfs/xfs_btree.c117
-rw-r--r--fs/xfs/libxfs/xfs_btree.h16
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c70
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h6
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c5
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c208
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c89
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c89
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c30
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c174
-rw-r--r--fs/xfs/libxfs/xfs_fs.h7
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c143
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c65
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c135
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h4
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c152
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h14
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c2
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h9
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c19
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h3
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c40
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c67
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c40
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c21
-rw-r--r--fs/xfs/libxfs/xfs_sb.c113
-rw-r--r--fs/xfs/libxfs/xfs_sb.h4
-rw-r--r--fs/xfs/libxfs/xfs_shared.h4
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c75
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c199
-rw-r--r--fs/xfs/scrub/agheader.c340
-rw-r--r--fs/xfs/scrub/alloc.c81
-rw-r--r--fs/xfs/scrub/bmap.c219
-rw-r--r--fs/xfs/scrub/btree.c184
-rw-r--r--fs/xfs/scrub/btree.h9
-rw-r--r--fs/xfs/scrub/common.c255
-rw-r--r--fs/xfs/scrub/common.h23
-rw-r--r--fs/xfs/scrub/dabtree.c22
-rw-r--r--fs/xfs/scrub/dir.c44
-rw-r--r--fs/xfs/scrub/ialloc.c194
-rw-r--r--fs/xfs/scrub/inode.c178
-rw-r--r--fs/xfs/scrub/parent.c8
-rw-r--r--fs/xfs/scrub/quota.c7
-rw-r--r--fs/xfs/scrub/refcount.c420
-rw-r--r--fs/xfs/scrub/rmap.c123
-rw-r--r--fs/xfs/scrub/rtbitmap.c35
-rw-r--r--fs/xfs/scrub/scrub.c203
-rw-r--r--fs/xfs/scrub/scrub.h37
-rw-r--r--fs/xfs/scrub/trace.h44
-rw-r--r--fs/xfs/xfs_aops.c19
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_buf.c22
-rw-r--r--fs/xfs/xfs_buf.h8
-rw-r--r--fs/xfs/xfs_buf_item.c156
-rw-r--r--fs/xfs/xfs_buf_item.h7
-rw-r--r--fs/xfs/xfs_dir2_readdir.c4
-rw-r--r--fs/xfs/xfs_dquot.c62
-rw-r--r--fs/xfs/xfs_dquot_item.c9
-rw-r--r--fs/xfs/xfs_error.c64
-rw-r--r--fs/xfs/xfs_error.h14
-rw-r--r--fs/xfs/xfs_fsops.c79
-rw-r--r--fs/xfs/xfs_fsops.h1
-rw-r--r--fs/xfs/xfs_icache.c75
-rw-r--r--fs/xfs/xfs_inode.c107
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--fs/xfs/xfs_inode_item.c44
-rw-r--r--fs/xfs/xfs_ioctl.c5
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_linux.h14
-rw-r--r--fs/xfs/xfs_log.c17
-rw-r--r--fs/xfs/xfs_log_recover.c58
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_qm.c79
-rw-r--r--fs/xfs/xfs_reflink.c95
-rw-r--r--fs/xfs/xfs_rtalloc.h4
-rw-r--r--fs/xfs/xfs_super.c14
-rw-r--r--fs/xfs/xfs_trace.h68
-rw-r--r--fs/xfs/xfs_trans.c22
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_buf.c98
-rw-r--r--fs/xfs/xfs_trans_inode.c16
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/acpi/actbl1.h159
-rw-r--r--include/acpi/actbl2.h15
-rw-r--r--include/acpi/actypes.h4
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/error-injection.h35
-rw-r--r--include/asm-generic/pgtable.h25
-rw-r--r--include/asm-generic/vmlinux.lds.h14
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h46
-rw-r--r--include/crypto/if_alg.h7
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/scompress.h11
-rw-r--r--include/crypto/null.h10
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sha3.h6
-rw-r--r--include/crypto/skcipher.h11
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/dt-bindings/gpio/gpio.h6
-rw-r--r--include/linux/acpi.h21
-rw-r--r--include/linux/arch_topology.h2
-rw-r--r--include/linux/arm_sdei.h79
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/bio.h24
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/blk-cgroup.h8
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h172
-rw-r--r--include/linux/bpf.h130
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h63
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/compat.h100
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/cper.h48
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpu_cooling.h75
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h40
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/delayacct.h8
-rw-r--r--include/linux/device-mapper.h56
-rw-r--r--include/linux/device.h8
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h47
-rw-r--r--include/linux/dma-mapping.h23
-rw-r--r--include/linux/dsa/lan9303.h3
-rw-r--r--include/linux/efi.h50
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/error-injection.h27
-rw-r--r--include/linux/errseq.h2
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/f2fs_fs.h14
-rw-r--r--include/linux/filter.h40
-rw-r--r--include/linux/fs.h34
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/ftrace.h14
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/gpio.h10
-rw-r--r--include/linux/gpio/consumer.h25
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/hrtimer.h113
-rw-r--r--include/linux/hugetlb.h21
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/if_link.h2
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_tap.h6
-rw-r--r--include/linux/if_tun.h21
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h18
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/hw-consumer.h21
-rw-r--r--include/linux/iio/iio.h32
-rw-r--r--include/linux/iio/machine.h7
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/types.h28
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h258
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/irq_work.h11
-rw-r--r--include/linux/irqflags.h6
-rw-r--r--include/linux/iversion.h337
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kobject_ns.h3
-rw-r--r--include/linux/lightnvm.h125
-rw-r--r--include/linux/livepatch.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lockdep.h9
-rw-r--r--include/linux/mdio.h8
-rw-r--r--include/linux/memcontrol.h165
-rw-r--r--include/linux/mfd/cros_ec.h4
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rave-sp.h60
-rw-r--r--include/linux/mfd/stm32-lptimer.h6
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/tmio.h20
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h62
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h76
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/transobj.h23
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h30
-rw-r--r--include/linux/mm_types.h154
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmu_notifier.h30
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/module.h15
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/mtd.h28
-rw-r--r--include/linux/mtd/rawnand.h443
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/net_dim.h380
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h54
-rw-r--r--include/linux/netfilter.h116
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h25
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_defs.h12
-rw-r--r--include/linux/netfilter_ipv4.h46
-rw-r--r--include/linux/netfilter_ipv6.h19
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h12
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_gpio.h2
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/pagevec.h6
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/phy.h145
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/phylink.h201
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h6
-rw-r--r--include/linux/pm.h16
-rw-r--r--include/linux/pm_wakeup.h7
-rw-r--r--include/linux/poll.h10
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h25
-rw-r--r--include/linux/posix_acl.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/proc_ns.h3
-rw-r--r--include/linux/property.h23
-rw-r--r--include/linux/psci.h4
-rw-r--r--include/linux/ptr_ring.h71
-rw-r--r--include/linux/qed/common_hsi.h1264
-rw-r--r--include/linux/qed/eth_common.h396
-rw-r--r--include/linux/qed/fcoe_common.h940
-rw-r--r--include/linux/qed/iscsi_common.h1585
-rw-r--r--include/linux/qed/iwarp_common.h17
-rw-r--r--include/linux/qed/qed_eth_if.h38
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/qed/rdma_common.h25
-rw-r--r--include/linux/qed/roce_common.h15
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h165
-rw-r--r--include/linux/rcupdate.h25
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/refcount.h2
-rw-r--r--include/linux/regmap.h50
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/regulator/machine.h37
-rw-r--r--include/linux/rhashtable.h38
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rpmsg.h4
-rw-r--r--include/linux/rtnetlink.h18
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)0
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)236
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)0
-rw-r--r--include/linux/scatterlist.h11
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/mm.h24
-rw-r--r--include/linux/sched/signal.h28
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h12
-rw-r--r--include/linux/scif.h4
-rw-r--r--include/linux/sctp.h37
-rw-r--r--include/linux/seccomp.h8
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/serdev.h17
-rw-r--r--include/linux/serial_core.h8
-rw-r--r--include/linux/sfp.h94
-rw-r--r--include/linux/sh_eth.h4
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skb_array.h7
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/suspend.h28
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/swapops.h21
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/tcp.h13
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h39
-rw-r--r--include/linux/tpm_eventlog.h (renamed from drivers/char/tpm/tpm_eventlog.h)34
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/of.h21
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_vdo.h2
-rw-r--r--include/linux/usb/renesas_usbhs.h9
-rw-r--r--include/linux/usb/tcpm.h16
-rw-r--r--include/linux/visorbus.h (renamed from drivers/staging/unisys/include/visorbus.h)178
-rw-r--r--include/linux/vmstat.h17
-rw-r--r--include/linux/w1-gpio.h9
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/linux/zpool.h2
-rw-r--r--include/media/lirc_dev.h2
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/v4l2-ctrls.h2
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/caif/cfpkt.h27
-rw-r--r--include/net/cfg80211.h19
-rw-r--r--include/net/devlink.h115
-rw-r--r--include/net/dn_route.h1
-rw-r--r--include/net/dsa.h61
-rw-r--r--include/net/dst.h47
-rw-r--r--include/net/erspan.h240
-rw-r--r--include/net/gen_stats.h3
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/inet_hashtables.h29
-rw-r--r--include/net/inet_sock.h25
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h9
-rw-r--r--include/net/ip6_fib.h20
-rw-r--r--include/net/ip6_route.h11
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6.h20
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/mac80211.h10
-rw-r--r--include/net/net_namespace.h18
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h12
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h12
-rw-r--r--include/net/netfilter/nf_conntrack_count.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h19
-rw-r--r--include/net/netfilter/nf_flow_table.h122
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h129
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h27
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h29
-rw-r--r--include/net/netns/can.h4
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/netns/netfilter.h12
-rw-r--r--include/net/netns/nftables.h8
-rw-r--r--include/net/netns/sctp.h5
-rw-r--r--include/net/pkt_cls.h115
-rw-r--r--include/net/pkt_sched.h17
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h152
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/sctp.h8
-rw-r--r--include/net/sctp/sm.h18
-rw-r--r--include/net/sctp/stream_interleave.h61
-rw-r--r--include/net/sctp/structs.h70
-rw-r--r--include/net/sctp/ulpevent.h23
-rw-r--r--include/net/sctp/ulpqueue.h10
-rw-r--r--include/net/sock.h63
-rw-r--r--include/net/tc_act/tc_csum.h16
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tcp.h54
-rw-r--r--include/net/tls.h4
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/net/wext.h4
-rw-r--r--include/net/xdp.h48
-rw-r--r--include/net/xfrm.h79
-rw-r--r--include/rdma/ib_addr.h38
-rw-r--r--include/rdma/ib_sa.h10
-rw-r--r--include/rdma/ib_verbs.h64
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h31
-rw-r--r--include/rdma/restrack.h157
-rw-r--r--include/scsi/libsas.h30
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/sound/hdaudio_ext.h4
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/pcm.h8
-rw-r--r--include/sound/rt5514.h2
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-dai.h5
-rw-r--r--include/sound/soc.h9
-rw-r--r--include/trace/events/bridge.h4
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/net_probe_common.h44
-rw-r--r--include/trace/events/rcu.h75
-rw-r--r--include/trace/events/rdma.h129
-rw-r--r--include/trace/events/rpcrdma.h890
-rw-r--r--include/trace/events/sctp.h99
-rw-r--r--include/trace/events/sock.h117
-rw-r--r--include/trace/events/sunrpc.h12
-rw-r--r--include/trace/events/tcp.h76
-rw-r--r--include/trace/events/thermal.h10
-rw-r--r--include/trace/events/timer.h37
-rw-r--r--include/trace/events/vmscan.h23
-rw-r--r--include/uapi/asm-generic/poll.h44
-rw-r--r--include/uapi/asm-generic/siginfo.h109
-rw-r--r--include/uapi/linux/arm_sdei.h73
-rw-r--r--include/uapi/linux/batadv_packet.h (renamed from net/batman-adv/packet.h)269
-rw-r--r--include/uapi/linux/batman_adv.h27
-rw-r--r--include/uapi/linux/bpf.h116
-rw-r--r--include/uapi/linux/bpf_common.h7
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/devlink.h25
-rw-r--r--include/uapi/linux/erspan.h52
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h62
-rw-r--r--include/uapi/linux/if_ether.h4
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/if_macsec.h9
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/l2tp.h6
-rw-r--r--include/uapi/linux/libc-compat.h61
-rw-r--r--include/uapi/linux/lightnvm.h9
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h8
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h76
-rw-r--r--include/uapi/linux/netfilter/xt_connlimit.h2
-rw-r--r--include/uapi/linux/netfilter_arp.h3
-rw-r--r--include/uapi/linux/netfilter_decnet.h4
-rw-r--r--include/uapi/linux/netfilter_ipv4.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h57
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/nubus.h23
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--include/uapi/linux/perf_event.h32
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/rtnetlink.h12
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/tipc.h7
-rw-r--r--include/uapi/linux/types.h6
-rw-r--r--include/uapi/linux/usb/ch9.h4
-rw-r--r--include/uapi/linux/usbdevice_fs.h2
-rw-r--r--include/uapi/linux/virtio_net.h13
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/mlx4-abi.h7
-rw-r--r--include/uapi/rdma/mlx5-abi.h53
-rw-r--r--include/uapi/rdma/rdma_netlink.h49
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h12
-rw-r--r--include/uapi/sound/asound.h9
-rw-r--r--include/uapi/sound/snd_sst_tokens.h17
-rw-r--r--init/Kconfig8
-rw-r--r--init/Makefile2
-rw-r--r--init/init_task.c170
-rw-r--r--ipc/mqueue.c246
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c104
-rw-r--r--kernel/bpf/cgroup.c15
-rw-r--r--kernel/bpf/core.c422
-rw-r--r--kernel/bpf/cpumap.c31
-rw-r--r--kernel/bpf/devmap.c8
-rw-r--r--kernel/bpf/disasm.c63
-rw-r--r--kernel/bpf/disasm.h29
-rw-r--r--kernel/bpf/hashtab.c103
-rw-r--r--kernel/bpf/inode.c90
-rw-r--r--kernel/bpf/lpm_trie.c98
-rw-r--r--kernel/bpf/offload.c430
-rw-r--r--kernel/bpf/sockmap.c27
-rw-r--r--kernel/bpf/stackmap.c34
-rw-r--r--kernel/bpf/syscall.c216
-rw-r--r--kernel/bpf/verifier.c1540
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c21
-rw-r--r--kernel/configs/nopm.config15
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c10
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/delayacct.c42
-rw-r--r--kernel/events/callchain.c15
-rw-r--r--kernel/events/core.c116
-rw-r--r--kernel/events/internal.h4
-rw-r--r--kernel/events/uprobes.c12
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fail_function.c349
-rw-r--r--kernel/fork.c448
-rw-r--r--kernel/futex.c92
-rw-r--r--kernel/irq/Kconfig10
-rw-r--r--kernel/irq/affinity.c30
-rw-r--r--kernel/irq/irqdomain.c118
-rw-r--r--kernel/irq/matrix.c20
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c12
-rw-r--r--kernel/livepatch/core.c76
-rw-r--r--kernel/livepatch/transition.c116
-rw-r--r--kernel/livepatch/transition.h2
-rw-r--r--kernel/locking/lockdep.c91
-rw-r--r--kernel/locking/locktorture.c108
-rw-r--r--kernel/locking/qspinlock.c12
-rw-r--r--kernel/locking/rtmutex.c26
-rw-r--r--kernel/locking/rtmutex_common.h1
-rw-r--r--kernel/module.c17
-rw-r--r--kernel/padata.c1
-rw-r--r--kernel/pid.c22
-rw-r--r--kernel/power/main.c29
-rw-r--r--kernel/power/snapshot.c6
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/ptrace.c9
-rw-r--r--kernel/rcu/rcu.h27
-rw-r--r--kernel/rcu/rcuperf.c6
-rw-r--r--kernel/rcu/rcutorture.c12
-rw-r--r--kernel/rcu/srcutree.c109
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h5
-rw-r--r--kernel/rcu/tree_plugin.h13
-rw-r--r--kernel/rcu/update.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/completion.c5
-rw-r--r--kernel/sched/core.c76
-rw-r--r--kernel/sched/cpufreq_schedutil.c93
-rw-r--r--kernel/sched/deadline.c143
-rw-r--r--kernel/sched/fair.c43
-rw-r--r--kernel/sched/membarrier.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h112
-rw-r--r--kernel/seccomp.c108
-rw-r--r--kernel/signal.c354
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/hrtimer.c654
-rw-r--r--kernel/time/posix-clock.c6
-rw-r--r--kernel/time/posix-cpu-timers.c25
-rw-r--r--kernel/time/posix-timers.c2
-rw-r--r--kernel/time/tick-internal.h13
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timer.c92
-rw-r--r--kernel/torture.c39
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/bpf_trace.c59
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/ring_buffer.c63
-rw-r--r--kernel/trace/trace.c51
-rw-r--r--kernel/trace/trace_benchmark.c2
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_trigger.c13
-rw-r--r--kernel/trace/trace_functions.c49
-rw-r--r--kernel/trace/trace_kprobe.c61
-rw-r--r--kernel/trace/trace_probe.h12
-rw-r--r--kernel/tracepoint.c9
-rw-r--r--kernel/workqueue.c80
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c37
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/chacha20.c71
-rw-r--r--lib/crc-ccitt.c58
-rw-r--r--lib/dma-direct.c156
-rw-r--r--lib/dma-noop.c68
-rw-r--r--lib/error-inject.c242
-rw-r--r--lib/errseq.c37
-rw-r--r--lib/kobject.c6
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/percpu-refcount.c8
-rw-r--r--lib/rhashtable.c160
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c127
-rw-r--r--lib/swiotlb.c205
-rw-r--r--lib/test_bpf.c123
-rw-r--r--lib/test_firmware.c17
-rw-r--r--lib/test_kmod.c14
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--lib/usercopy.c2
-rw-r--r--lib/uuid.c34
-rw-r--r--lib/vsprintf.c5
-rw-r--r--mm/Kconfig7
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/debug.c28
-rw-r--r--mm/fadvise.c10
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/gup.c46
-rw-r--r--mm/hmm.c4
-rw-r--r--mm/huge_memory.c82
-rw-r--r--mm/hugetlb.c377
-rw-r--r--mm/hwpoison-inject.c2
-rw-r--r--mm/interval_tree.c2
-rw-r--r--mm/khugepaged.c15
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/ksm.c9
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c273
-rw-r--r--mm/memory-failure.c48
-rw-r--r--mm/memory.c90
-rw-r--r--mm/memory_hotplug.c9
-rw-r--r--mm/mempolicy.c39
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmu_notifier.c31
-rw-r--r--mm/mprotect.c11
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/oom_kill.c21
-rw-r--r--mm/page_alloc.c176
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/page_owner.c21
-rw-r--r--mm/page_vma_mapped.c66
-rw-r--r--mm/pgtable-generic.c6
-rw-r--r--mm/shmem.c59
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c56
-rw-r--r--mm/slub.c12
-rw-r--r--mm/sparse.c8
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c23
-rw-r--r--mm/util.c36
-rw-r--r--mm/vmscan.c89
-rw-r--r--mm/zpool.c25
-rw-r--r--mm/zsmalloc.c32
-rw-r--r--mm/zswap.c91
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/trans_fd.c60
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/Kconfig19
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/atalk_proc.c3
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/atm/common.c6
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/atm/mpc.c9
-rw-r--r--net/atm/mpoa_caches.c48
-rw-r--r--net/atm/mpoa_caches.h9
-rw-r--r--net/atm/mpoa_proc.c16
-rw-r--r--net/atm/proc.c1
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/batman-adv/Kconfig17
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c35
-rw-r--r--net/batman-adv/bat_algo.h1
-rw-r--r--net/batman-adv/bat_iv_ogm.c107
-rw-r--r--net/batman-adv/bat_iv_ogm.h1
-rw-r--r--net/batman-adv/bat_v.c51
-rw-r--r--net/batman-adv/bat_v.h1
-rw-r--r--net/batman-adv/bat_v_elp.c29
-rw-r--r--net/batman-adv/bat_v_elp.h1
-rw-r--r--net/batman-adv/bat_v_ogm.c39
-rw-r--r--net/batman-adv/bat_v_ogm.h1
-rw-r--r--net/batman-adv/bitarray.c3
-rw-r--r--net/batman-adv/bitarray.h10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c114
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h5
-rw-r--r--net/batman-adv/debugfs.c30
-rw-r--r--net/batman-adv/debugfs.h1
-rw-r--r--net/batman-adv/distributed-arp-table.c80
-rw-r--r--net/batman-adv/distributed-arp-table.h9
-rw-r--r--net/batman-adv/fragmentation.c25
-rw-r--r--net/batman-adv/fragmentation.h3
-rw-r--r--net/batman-adv/gateway_client.c67
-rw-r--r--net/batman-adv/gateway_client.h1
-rw-r--r--net/batman-adv/gateway_common.c30
-rw-r--r--net/batman-adv/gateway_common.h6
-rw-r--r--net/batman-adv/hard-interface.c67
-rw-r--r--net/batman-adv/hard-interface.h59
-rw-r--r--net/batman-adv/hash.c20
-rw-r--r--net/batman-adv/hash.h28
-rw-r--r--net/batman-adv/icmp_socket.c19
-rw-r--r--net/batman-adv/icmp_socket.h1
-rw-r--r--net/batman-adv/log.c21
-rw-r--r--net/batman-adv/log.h62
-rw-r--r--net/batman-adv/main.c54
-rw-r--r--net/batman-adv/main.h127
-rw-r--r--net/batman-adv/multicast.c83
-rw-r--r--net/batman-adv/multicast.h17
-rw-r--r--net/batman-adv/netlink.c27
-rw-r--r--net/batman-adv/netlink.h1
-rw-r--r--net/batman-adv/network-coding.c126
-rw-r--r--net/batman-adv/network-coding.h1
-rw-r--r--net/batman-adv/originator.c154
-rw-r--r--net/batman-adv/originator.h47
-rw-r--r--net/batman-adv/routing.c56
-rw-r--r--net/batman-adv/routing.h1
-rw-r--r--net/batman-adv/send.c66
-rw-r--r--net/batman-adv/send.h8
-rw-r--r--net/batman-adv/soft-interface.c66
-rw-r--r--net/batman-adv/soft-interface.h1
-rw-r--r--net/batman-adv/sysfs.c58
-rw-r--r--net/batman-adv/sysfs.h14
-rw-r--r--net/batman-adv/tp_meter.c77
-rw-r--r--net/batman-adv/tp_meter.h1
-rw-r--r--net/batman-adv/translation-table.c234
-rw-r--r--net/batman-adv/translation-table.h1
-rw-r--r--net/batman-adv/tvlv.c43
-rw-r--r--net/batman-adv/tvlv.h1
-rw-r--r--net/batman-adv/types.h1996
-rw-r--r--net/bluetooth/af_bluetooth.c44
-rw-r--r--net/bluetooth/cmtp/capi.c1
-rw-r--r--net/bluetooth/hci_debugfs.c201
-rw-r--r--net/bluetooth/hci_request.c64
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c20
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_fdb.c392
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_nf_core.c1
-rw-r--r--net/bridge/br_private.h18
-rw-r--r--net/bridge/br_switchdev.c8
-rw-r--r--net/bridge/br_sysfs_br.c13
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c120
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c10
-rw-r--r--net/caif/cfctrl.c54
-rw-r--r--net/caif/cfpkt_skbuff.c1
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/can/Kconfig2
-rw-r--r--net/can/af_can.c56
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c1
-rw-r--r--net/can/gw.c14
-rw-r--r--net/can/proc.c14
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c293
-rw-r--r--net/core/dev_ioctl.c132
-rw-r--r--net/core/devlink.c596
-rw-r--r--net/core/dst.c14
-rw-r--r--net/core/ethtool.c29
-rw-r--r--net/core/filter.c367
-rw-r--r--net/core/flow_dissector.c72
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/gen_stats.c9
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/net-procfs.c4
-rw-r--r--net/core/net-sysfs.c56
-rw-r--r--net/core/net_namespace.c83
-rw-r--r--net/core/pktgen.c281
-rw-r--r--net/core/rtnetlink.c492
-rw-r--r--net/core/skbuff.c14
-rw-r--r--net/core/sock.c58
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sock_reuseport.c4
-rw-r--r--net/core/sysctl_net_core.c58
-rw-r--r--net/core/xdp.c73
-rw-r--r--net/dccp/Kconfig17
-rw-r--r--net/dccp/Makefile5
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/dccp/ccids/ccid2.c3
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/minisocks.c7
-rw-r--r--net/dccp/probe.c203
-rw-r--r--net/dccp/proto.c11
-rw-r--r--net/dccp/trace.h84
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/decnet/dn_dev.c10
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/decnet/dn_route.c43
-rw-r--r--net/dsa/Kconfig9
-rw-r--r--net/dsa/Makefile3
-rw-r--r--net/dsa/dsa2.c9
-rw-r--r--net/dsa/dsa_priv.h13
-rw-r--r--net/dsa/legacy.c24
-rw-r--r--net/dsa/port.c103
-rw-r--r--net/dsa/slave.c25
-rw-r--r--net/dsa/switch.c111
-rw-r--r--net/dsa/tag_brcm.c12
-rw-r--r--net/dsa/tag_mtk.c38
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c45
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/devinet.c61
-rw-r--r--net/ipv4/esp4.c37
-rw-r--r--net/ipv4/esp4_offload.c77
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_diag.c8
-rw-r--r--net/ipv4/inet_hashtables.c186
-rw-r--r--net/ipv4/inet_timewait_sock.c31
-rw-r--r--net/ipv4/ip_gre.c169
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel.c16
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipconfig.c48
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter.c62
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/netfilter/Makefile9
-rw-r--r--net/ipv4/netfilter/arp_tables.c34
-rw-r--r--net/ipv4/netfilter/ip_tables.c34
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c1
-rw-r--r--net/ipv4/netfilter/iptable_filter.c6
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c5
-rw-r--r--net/ipv4/netfilter/iptable_nat.c4
-rw-r--r--net/ipv4/netfilter/iptable_raw.c37
-rw-r--r--net/ipv4/netfilter/iptable_security.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c4
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c284
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.asn1177
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c1286
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c235
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c62
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c83
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c8
-rw-r--r--net/ipv4/proc.c3
-rw-r--r--net/ipv4/raw.c20
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp.c80
-rw-r--r--net/ipv4/tcp_bbr.c3
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_fastopen.c30
-rw-r--r--net/ipv4/tcp_input.c58
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_metrics.c7
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_nv.c4
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_probe.c301
-rw-r--r--net/ipv4/tcp_timer.c39
-rw-r--r--net/ipv4/udp.c66
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c8
-rw-r--r--net/ipv6/addrconf.c90
-rw-r--r--net/ipv6/addrlabel.c25
-rw-r--r--net/ipv6/af_inet6.c11
-rw-r--r--net/ipv6/anycast.c1
-rw-r--r--net/ipv6/datagram.c3
-rw-r--r--net/ipv6/esp6.c39
-rw-r--r--net/ipv6/esp6_offload.c84
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/ila/ila_xlat.c4
-rw-r--r--net/ipv6/inet6_hashtables.c77
-rw-r--r--net/ipv6/ip6_fib.c149
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c652
-rw-r--r--net/ipv6/ip6_output.c27
-rw-r--r--net/ipv6/ip6_tunnel.c26
-rw-r--r--net/ipv6/ip6_vti.c22
-rw-r--r--net/ipv6/ip6mr.c12
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/ndisc.c5
-rw-r--r--net/ipv6/netfilter.c44
-rw-r--r--net/ipv6/netfilter/Kconfig18
-rw-r--r--net/ipv6/netfilter/Makefile4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c34
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c161
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c8
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c4
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c15
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c3
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c277
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c82
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c12
-rw-r--r--net/ipv6/proc.c3
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/route.c567
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/seg6_local.c2
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp.c55
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c8
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/iucv/af_iucv.c6
-rw-r--r--net/kcm/kcmproc.c2
-rw-r--r--net/kcm/kcmsock.c25
-rw-r--r--net/key/af_key.c12
-rw-r--r--net/l2tp/l2tp_core.c54
-rw-r--r--net/l2tp/l2tp_core.h16
-rw-r--r--net/l2tp/l2tp_debugfs.c4
-rw-r--r--net/l2tp/l2tp_netlink.c39
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/agg-rx.c26
-rw-r--r--net/mac80211/agg-tx.c34
-rw-r--r--net/mac80211/cfg.c31
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/ht.c1
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/offchannel.c4
-rw-r--r--net/mac80211/rx.c19
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c19
-rw-r--r--net/mac80211/wme.c1
-rw-r--r--net/mac80211/wpa.c16
-rw-r--r--net/mpls/af_mpls.c15
-rw-r--r--net/ncsi/ncsi-aen.c35
-rw-r--r--net/netfilter/Kconfig33
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c263
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h10
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c8
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c8
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c8
-rw-r--r--net/netfilter/ipset/ip_set_core.c36
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h38
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c21
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c9
-rw-r--r--net/netfilter/nf_conncount.c373
-rw-r--r--net/netfilter/nf_conntrack_core.c28
-rw-r--r--net/netfilter/nf_conntrack_expect.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c40
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c77
-rw-r--r--net/netfilter/nf_conntrack_netlink.c22
-rw-r--r--net/netfilter/nf_conntrack_proto.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c25
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c14
-rw-r--r--net/netfilter/nf_flow_table.c429
-rw-r--r--net/netfilter/nf_flow_table_inet.c48
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_queue.c96
-rw-r--r--net/netfilter/nf_synproxy_core.c1
-rw-r--r--net/netfilter/nf_tables_api.c1664
-rw-r--r--net/netfilter/nf_tables_inet.c88
-rw-r--r--net/netfilter/nf_tables_netdev.c87
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c10
-rw-r--r--net/netfilter/nft_cmp.c2
-rw-r--r--net/netfilter/nft_compat.c26
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_dynset.c4
-rw-r--r--net/netfilter/nft_flow_offload.c264
-rw-r--r--net/netfilter/nft_log.c4
-rw-r--r--net/netfilter/nft_masq.c2
-rw-r--r--net/netfilter/nft_meta.c45
-rw-r--r--net/netfilter/nft_nat.c2
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_rt.c15
-rw-r--r--net/netfilter/nft_set_hash.c10
-rw-r--r--net/netfilter/utils.c90
-rw-r--r--net/netfilter/x_tables.c41
-rw-r--r--net/netfilter/xt_TCPMSS.c5
-rw-r--r--net/netfilter/xt_addrtype.c15
-rw-r--r--net/netfilter/xt_bpf.c14
-rw-r--r--net/netfilter/xt_connlimit.c369
-rw-r--r--net/netfilter/xt_hashlimit.c5
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netfilter/xt_policy.c3
-rw-r--r--net/netfilter/xt_set.c119
-rw-r--r--net/netlink/af_netlink.c75
-rw-r--r--net/netlink/diag.c8
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/nfc/llcp_sock.c6
-rw-r--r--net/nfc/nci/uart.c2
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/flow_netlink.c47
-rw-r--r--net/openvswitch/meter.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/phonet/pn_netlink.c21
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/qrtr/qrtr.c8
-rw-r--r--net/rds/af_rds.c4
-rw-r--r--net/rds/bind.c1
-rw-r--r--net/rds/cong.c10
-rw-r--r--net/rds/connection.c27
-rw-r--r--net/rds/ib.c6
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/rds.h10
-rw-r--r--net/rds/send.c37
-rw-r--r--net/rds/tcp.c86
-rw-r--r--net/rds/tcp.h3
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c8
-rw-r--r--net/rds/tcp_send.c9
-rw-r--r--net/rds/threads.c20
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rose/af_rose.c1
-rw-r--r--net/rose/rose_route.c3
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/rxrpc/proc.c2
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c10
-rw-r--r--net/sched/act_connmark.c8
-rw-r--r--net/sched/act_csum.c74
-rw-r--r--net/sched/act_gact.c10
-rw-r--r--net/sched/act_ife.c18
-rw-r--r--net/sched/act_ipt.c18
-rw-r--r--net/sched/act_mirred.c29
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c10
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c10
-rw-r--r--net/sched/cls_api.c695
-rw-r--r--net/sched/cls_basic.c16
-rw-r--r--net/sched/cls_bpf.c67
-rw-r--r--net/sched/cls_cgroup.c12
-rw-r--r--net/sched/cls_flow.c12
-rw-r--r--net/sched/cls_flower.c54
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c35
-rw-r--r--net/sched/cls_route.c16
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c17
-rw-r--r--net/sched/cls_u32.c133
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_api.c314
-rw-r--r--net/sched/sch_atm.c23
-rw-r--r--net/sched/sch_cbq.c78
-rw-r--r--net/sched/sch_cbs.c31
-rw-r--r--net/sched/sch_choke.c8
-rw-r--r--net/sched/sch_codel.c8
-rw-r--r--net/sched/sch_drr.c40
-rw-r--r--net/sched/sch_dsmark.c19
-rw-r--r--net/sched/sch_fifo.c11
-rw-r--r--net/sched/sch_fq.c8
-rw-r--r--net/sched/sch_fq_codel.c13
-rw-r--r--net/sched/sch_generic.c563
-rw-r--r--net/sched/sch_gred.c13
-rw-r--r--net/sched/sch_hfsc.c28
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_htb.c29
-rw-r--r--net/sched/sch_ingress.c110
-rw-r--r--net/sched/sch_mq.c42
-rw-r--r--net/sched/sch_mqprio.c76
-rw-r--r--net/sched/sch_multiq.c19
-rw-r--r--net/sched/sch_netem.c10
-rw-r--r--net/sched/sch_pie.c8
-rw-r--r--net/sched/sch_plug.c6
-rw-r--r--net/sched/sch_prio.c81
-rw-r--r--net/sched/sch_qfq.c22
-rw-r--r--net/sched/sch_red.c65
-rw-r--r--net/sched/sch_sfb.c20
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c21
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile5
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/chunk.c8
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c28
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/offload.c3
-rw-r--r--net/sctp/output.c5
-rw-r--r--net/sctp/outqueue.c16
-rw-r--r--net/sctp/probe.c244
-rw-r--r--net/sctp/proc.c7
-rw-r--r--net/sctp/sm_make_chunk.c72
-rw-r--r--net/sctp/sm_sideeffect.c51
-rw-r--r--net/sctp/sm_statefuns.c50
-rw-r--r--net/sctp/sm_statetable.c5
-rw-r--r--net/sctp/socket.c291
-rw-r--r--net/sctp/stream.c69
-rw-r--r--net/sctp/stream_interleave.c1334
-rw-r--r--net/sctp/stream_sched.c3
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/transport.c29
-rw-r--r--net/sctp/ulpevent.c15
-rw-r--r--net/sctp/ulpqueue.c23
-rw-r--r--net/smc/af_smc.c237
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c52
-rw-r--r--net/smc/smc_cdc.h1
-rw-r--r--net/smc/smc_clc.c102
-rw-r--r--net/smc/smc_clc.h34
-rw-r--r--net/smc/smc_close.c208
-rw-r--r--net/smc/smc_close.h2
-rw-r--r--net/smc/smc_core.c17
-rw-r--r--net/smc/smc_diag.c6
-rw-r--r--net/smc/smc_ib.c38
-rw-r--r--net/smc/smc_rx.c5
-rw-r--r--net/smc/smc_tx.c32
-rw-r--r--net/smc/smc_wr.c50
-rw-r--r--net/smc/smc_wr.h2
-rw-r--r--net/socket.c302
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c78
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c157
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c329
-rw-r--r--net/sunrpc/xprtrdma/module.c12
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c162
-rw-r--r--net/sunrpc/xprtrdma/transport.c128
-rw-r--r--net/sunrpc/xprtrdma/verbs.c280
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h116
-rw-r--r--net/sunrpc/xprtsock.c36
-rw-r--r--net/tipc/bcast.c12
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/group.c387
-rw-r--r--net/tipc/group.h10
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.c51
-rw-r--r--net/tipc/msg.h3
-rw-r--r--net/tipc/name_table.c57
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/server.c80
-rw-r--r--net/tipc/server.h13
-rw-r--r--net/tipc/socket.c118
-rw-r--r--net/tipc/subscr.c35
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/tls/tls_main.c17
-rw-r--r--net/tls/tls_sw.c26
-rw-r--r--net/unix/af_unix.c16
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c6
-rw-r--r--net/wireless/nl80211.c84
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/scan.c5
-rw-r--r--net/wireless/trace.h12
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/wireless/wext-core.c13
-rw-r--r--net/wireless/wext-proc.c1
-rw-r--r--net/xfrm/xfrm_device.c200
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c152
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_replay.c5
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c18
-rw-r--r--samples/bpf/Makefile14
-rw-r--r--samples/bpf/tcbpf2_kern.c170
-rw-r--r--samples/bpf/test_cgrp2_attach2.c36
-rwxr-xr-xsamples/bpf/test_override_return.sh15
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh128
-rw-r--r--samples/bpf/tracex7_kern.c16
-rw-r--r--samples/bpf/tracex7_user.c28
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh220
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c105
-rw-r--r--samples/bpf/xdp_monitor_kern.c96
-rw-r--r--samples/bpf/xdp_monitor_user.c416
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c96
-rw-r--r--samples/bpf/xdp_rxq_info_user.c531
-rw-r--r--samples/kobject/kobject-example.c4
-rw-r--r--samples/kobject/kset-example.c4
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c15
-rw-r--r--samples/livepatch/livepatch-sample.c15
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c15
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c15
-rw-r--r--samples/sockmap/sockmap_user.c392
-rw-r--r--scripts/Makefile.build14
-rwxr-xr-xscripts/checkpatch.pl6
-rwxr-xr-xscripts/decodecode20
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/genksyms/.gitignore1
-rw-r--r--scripts/kconfig/expr.c5
-rwxr-xr-xscripts/kernel-doc1506
-rw-r--r--scripts/mod/modpost.c9
-rwxr-xr-xscripts/tags.sh2
-rw-r--r--security/Kconfig3
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/apparmor/domain.c9
-rw-r--r--security/apparmor/include/perms.h3
-rw-r--r--security/apparmor/ipc.c53
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--security/commoncap.c21
-rw-r--r--security/integrity/evm/evm.h9
-rw-r--r--security/integrity/evm/evm_crypto.c75
-rw-r--r--security/integrity/evm/evm_main.c67
-rw-r--r--security/integrity/evm/evm_secfs.c20
-rw-r--r--security/integrity/iint.c4
-rw-r--r--security/integrity/ima/ima_api.c5
-rw-r--r--security/integrity/ima/ima_appraise.c46
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c96
-rw-r--r--security/integrity/ima/ima_policy.c32
-rw-r--r--security/integrity/ima/ima_queue.c2
-rw-r--r--security/integrity/ima/ima_template.c11
-rw-r--r--security/integrity/integrity.h41
-rw-r--r--security/keys/keyring.c7
-rw-r--r--security/keys/trusted.c35
-rw-r--r--security/selinux/include/netlabel.h3
-rw-r--r--security/selinux/netlabel.c3
-rw-r--r--security/selinux/ss/services.c21
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_access.c40
-rw-r--r--security/smack/smack_lsm.c10
-rw-r--r--security/tomoyo/audit.c2
-rw-r--r--security/tomoyo/common.c4
-rw-r--r--security/tomoyo/common.h6
-rw-r--r--security/tomoyo/securityfs_if.c2
-rw-r--r--sound/core/compress_offload.c6
-rw-r--r--sound/core/control.c19
-rw-r--r--sound/core/hwdep.c4
-rw-r--r--sound/core/info.c4
-rw-r--r--sound/core/init.c2
-rw-r--r--sound/core/oss/pcm_oss.c57
-rw-r--r--sound/core/oss/pcm_plugin.c14
-rw-r--r--sound/core/pcm_lib.c9
-rw-r--r--sound/core/pcm_misc.c19
-rw-r--r--sound/core/pcm_native.c19
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/seq/oss/seq_oss.c4
-rw-r--r--sound/core/seq/oss/seq_oss_device.h2
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c7
-rw-r--r--sound/core/seq/seq_clientmgr.h1
-rw-r--r--sound/core/seq/seq_queue.c4
-rw-r--r--sound/core/seq/seq_timer.c13
-rw-r--r--sound/core/seq/seq_timer.h2
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/drivers/aloop.c98
-rw-r--r--sound/drivers/dummy.c29
-rw-r--r--sound/firewire/bebob/bebob_hwdep.c4
-rw-r--r--sound/firewire/dice/dice-hwdep.c4
-rw-r--r--sound/firewire/digi00x/digi00x-hwdep.c4
-rw-r--r--sound/firewire/fireface/ff-hwdep.c4
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c4
-rw-r--r--sound/firewire/motu/motu-hwdep.c4
-rw-r--r--sound/firewire/oxfw/oxfw-hwdep.c4
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c4
-rw-r--r--sound/hda/ext/hdac_ext_bus.c2
-rw-r--r--sound/isa/gus/gus_dma.c5
-rw-r--r--sound/mips/hal2.c2
-rw-r--r--sound/mips/sgio2audio.c2
-rw-r--r--sound/oss/dmasound/dmasound_core.c4
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_realtek.c138
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c131
-rw-r--r--sound/pci/korg1212/korg1212.c1
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c35
-rw-r--r--sound/soc/atmel/atmel-classd.c6
-rw-r--r--sound/soc/au1x/ac97c.c6
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c20
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c6
-rw-r--r--sound/soc/codecs/88pm860x-codec.c9
-rw-r--r--sound/soc/codecs/Kconfig46
-rw-r--r--sound/soc/codecs/Makefile13
-rw-r--r--sound/soc/codecs/cq93vc.c10
-rw-r--r--sound/soc/codecs/cs35l32.c18
-rw-r--r--sound/soc/codecs/cs35l34.c19
-rw-r--r--sound/soc/codecs/cs42l52.c13
-rw-r--r--sound/soc/codecs/cs42l56.c13
-rw-r--r--sound/soc/codecs/cs42l73.c15
-rw-r--r--sound/soc/codecs/cs47l24.c12
-rw-r--r--sound/soc/codecs/cx20442.c46
-rw-r--r--sound/soc/codecs/da7213.c7
-rw-r--r--sound/soc/codecs/da7218.c9
-rw-r--r--sound/soc/codecs/dmic.c24
-rw-r--r--sound/soc/codecs/hdac_hdmi.c358
-rw-r--r--sound/soc/codecs/max98373.c976
-rw-r--r--sound/soc/codecs/max98373.h212
-rw-r--r--sound/soc/codecs/max98926.c2
-rw-r--r--sound/soc/codecs/max98927.c1
-rw-r--r--sound/soc/codecs/mc13783.c9
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c8
-rw-r--r--sound/soc/codecs/nau8540.c98
-rw-r--r--sound/soc/codecs/nau8540.h20
-rw-r--r--sound/soc/codecs/nau8824.c18
-rw-r--r--sound/soc/codecs/nau8825.c101
-rw-r--r--sound/soc/codecs/nau8825.h3
-rw-r--r--sound/soc/codecs/pcm186x-i2c.c69
-rw-r--r--sound/soc/codecs/pcm186x-spi.c69
-rw-r--r--sound/soc/codecs/pcm186x.c719
-rw-r--r--sound/soc/codecs/pcm186x.h220
-rw-r--r--sound/soc/codecs/pcm512x-spi.c4
-rw-r--r--sound/soc/codecs/rl6231.c93
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c85
-rw-r--r--sound/soc/codecs/rt5514.h5
-rw-r--r--sound/soc/codecs/rt5645.c187
-rw-r--r--sound/soc/codecs/rt5645.h6
-rw-r--r--sound/soc/codecs/sgtl5000.c5
-rw-r--r--sound/soc/codecs/si476x.c9
-rw-r--r--sound/soc/codecs/sn95031.c936
-rw-r--r--sound/soc/codecs/sn95031.h133
-rw-r--r--sound/soc/codecs/spdif_receiver.c5
-rw-r--r--sound/soc/codecs/spdif_transmitter.c5
-rw-r--r--sound/soc/codecs/tas5720.c61
-rw-r--r--sound/soc/codecs/tas5720.h31
-rw-r--r--sound/soc/codecs/tas6424.c707
-rw-r--r--sound/soc/codecs/tas6424.h144
-rw-r--r--sound/soc/codecs/tfa9879.c1
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c310
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h335
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c182
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h308
-rw-r--r--sound/soc/codecs/tlv320aic3x.c15
-rw-r--r--sound/soc/codecs/tlv320dac33.c32
-rw-r--r--sound/soc/codecs/ts3a227e.c2
-rw-r--r--sound/soc/codecs/tscs42xx.c1456
-rw-r--r--sound/soc/codecs/tscs42xx.h2693
-rw-r--r--sound/soc/codecs/twl4030.c11
-rw-r--r--sound/soc/codecs/twl6040.c20
-rw-r--r--sound/soc/codecs/uda1380.c42
-rw-r--r--sound/soc/codecs/wm0010.c5
-rw-r--r--sound/soc/codecs/wm2000.c6
-rw-r--r--sound/soc/codecs/wm2200.c9
-rw-r--r--sound/soc/codecs/wm5102.c11
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8350.c10
-rw-r--r--sound/soc/codecs/wm8400.c9
-rw-r--r--sound/soc/codecs/wm8903.c12
-rw-r--r--sound/soc/codecs/wm8994.c10
-rw-r--r--sound/soc/codecs/wm8997.c11
-rw-r--r--sound/soc/codecs/wm8998.c12
-rw-r--r--sound/soc/davinci/davinci-mcasp.c19
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c4
-rw-r--r--sound/soc/fsl/fsl_asrc.h2
-rw-r--r--sound/soc/fsl/fsl_dma.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c1367
-rw-r--r--sound/soc/fsl/fsl_ssi.h427
-rw-r--r--sound/soc/fsl/fsl_ssi_dbg.c59
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c1
-rw-r--r--sound/soc/intel/Kconfig115
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c3
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c8
-rw-r--r--sound/soc/intel/boards/Kconfig195
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c4
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c26
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c50
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c13
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c4
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/boards/mfld_machine.c428
-rw-r--r--sound/soc/intel/common/sst-dsp.c4
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c2
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c2
-rw-r--r--sound/soc/intel/skylake/skl-i2s.h64
-rw-r--r--sound/soc/intel/skylake/skl-messages.c22
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c158
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c14
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.h79
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c14
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h4
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst.c2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c45
-rw-r--r--sound/soc/intel/skylake/skl.c150
-rw-r--r--sound/soc/intel/skylake/skl.h22
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c552
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h15
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-common.h87
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c213
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-reg.h42
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c2
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c25
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c11
-rw-r--r--sound/soc/omap/ams-delta.c4
-rw-r--r--sound/soc/omap/mcbsp.c4
-rw-r--r--sound/soc/qcom/apq8016_sbc.c10
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c3
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c11
-rw-r--r--sound/soc/samsung/bells.c40
-rw-r--r--sound/soc/sh/rcar/core.c143
-rw-r--r--sound/soc/sh/rcar/dma.c18
-rw-r--r--sound/soc/sh/rcar/rsnd.h15
-rw-r--r--sound/soc/sh/rcar/ssi.c163
-rw-r--r--sound/soc/soc-acpi.c73
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c145
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-io.c6
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/stm/Kconfig12
-rw-r--r--sound/soc/stm/Makefile3
-rw-r--r--sound/soc/stm/stm32_adfsdm.c347
-rw-r--r--sound/soc/stm/stm32_sai.c114
-rw-r--r--sound/soc/sunxi/sun4i-codec.c29
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c57
-rw-r--r--sound/soc/uniphier/Kconfig19
-rw-r--r--sound/soc/uniphier/Makefile3
-rw-r--r--sound/soc/uniphier/evea.c567
-rw-r--r--sound/soc/ux500/mop500.c4
-rw-r--r--sound/soc/ux500/ux500_pcm.c5
-rw-r--r--sound/sound_core.c42
-rw-r--r--sound/usb/card.c21
-rw-r--r--sound/usb/mixer.c8
-rw-r--r--sound/usb/mixer_quirks.c84
-rw-r--r--sound/usb/quirks-table.h48
-rw-r--r--sound/usb/usx2y/us122l.c47
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c32
-rw-r--r--tools/arch/alpha/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/mips/include/asm/errno.h17
-rw-r--r--tools/arch/mips/include/uapi/asm/errno.h130
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/powerpc/include/uapi/asm/errno.h10
-rw-r--r--tools/arch/s390/include/uapi/asm/unistd.h412
-rw-r--r--tools/arch/sparc/include/uapi/asm/errno.h118
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h4
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/arch/x86/include/uapi/asm/errno.h1
-rw-r--r--tools/bpf/Makefile29
-rw-r--r--tools/bpf/bpf_jit_disasm.c14
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile30
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst118
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst16
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst12
-rw-r--r--tools/bpf/bpftool/Makefile90
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool8
-rw-r--r--tools/bpf/bpftool/cgroup.c308
-rw-r--r--tools/bpf/bpftool/common.c195
-rw-r--r--tools/bpf/bpftool/jit_disasm.c23
-rw-r--r--tools/bpf/bpftool/main.c16
-rw-r--r--tools/bpf/bpftool/main.h9
-rw-r--r--tools/bpf/bpftool/map.c11
-rw-r--r--tools/bpf/bpftool/prog.c229
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile15
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-disassembler-four-args.c15
-rw-r--r--tools/build/feature/test-libopencsd.c8
-rw-r--r--tools/build/feature/test-pthread-barrier.c12
-rw-r--r--tools/gpio/gpio-event-mon.c10
-rw-r--r--tools/include/uapi/asm-generic/errno-base.h40
-rw-r--r--tools/include/uapi/asm-generic/errno.h123
-rw-r--r--tools/include/uapi/linux/bpf.h116
-rw-r--r--tools/include/uapi/linux/perf_event.h32
-rw-r--r--tools/lib/bpf/Makefile24
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/libbpf.c200
-rw-r--r--tools/lib/traceevent/event-parse.c62
-rw-r--r--tools/lib/traceevent/event-plugin.c24
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c4
-rw-r--r--tools/lib/traceevent/parse-filter.c22
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/check.c69
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/elf.c4
-rw-r--r--tools/perf/Build4
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt3
-rw-r--r--tools/perf/Documentation/perf-evlist.txt4
-rw-r--r--tools/perf/Documentation/perf-inject.txt4
-rw-r--r--tools/perf/Documentation/perf-lock.txt4
-rw-r--r--tools/perf/Documentation/perf-probe.txt18
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Documentation/perf-report.txt37
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-script.txt47
-rw-r--r--tools/perf/Documentation/perf-timechart.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt20
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt27
-rw-r--r--tools/perf/Documentation/tips.txt2
-rw-r--r--tools/perf/Makefile.config65
-rw-r--r--tools/perf/Makefile.perf17
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c77
-rw-r--r--tools/perf/arch/arm/util/pmu.c6
-rw-r--r--tools/perf/arch/arm64/util/Build5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c225
-rw-r--r--tools/perf/arch/arm64/util/header.c65
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c22
-rw-r--r--tools/perf/arch/common.c44
-rw-r--r--tools/perf/arch/common.h1
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c8
-rw-r--r--tools/perf/arch/s390/Makefile21
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c3
-rwxr-xr-xtools/perf/arch/s390/entry/syscalls/mksyscalltbl36
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/arch/x86/util/header.c4
-rw-r--r--tools/perf/arch/x86/util/unwind-libunwind.c2
-rw-r--r--tools/perf/bench/futex-hash.c20
-rw-r--r--tools/perf/bench/futex-lock-pi.c23
-rw-r--r--tools/perf/bench/futex-requeue.c22
-rw-r--r--tools/perf/bench/futex-wake-parallel.c46
-rw-r--r--tools/perf/bench/futex-wake.c18
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-c2c.c16
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c3
-rw-r--r--tools/perf/builtin-kvm.c13
-rw-r--r--tools/perf/builtin-record.c63
-rw-r--r--tools/perf/builtin-report.c283
-rw-r--r--tools/perf/builtin-script.c266
-rw-r--r--tools/perf/builtin-stat.c230
-rw-r--r--tools/perf/builtin-top.c10
-rw-r--r--tools/perf/builtin-trace.c122
-rwxr-xr-xtools/perf/check-headers.sh11
-rw-r--r--tools/perf/perf-completion.sh47
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv15
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/cache.json5
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/frontend.json7
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/marked.json27
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json276
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pmc.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/translation.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json555
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json389
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json383
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1050
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json280
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json506
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json365
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json377
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json243
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json4180
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json232
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json2008
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json973
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json262
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json257
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json231
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json42
-rw-r--r--tools/perf/pmu-events/jevents.c39
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-record19
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-report3
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py95
-rw-r--r--tools/perf/tests/attr.c6
-rw-r--r--tools/perf/tests/backward-ring-buffer.c6
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bpf-script-example.c4
-rw-r--r--tools/perf/tests/bpf.c68
-rw-r--r--tools/perf/tests/builtin-test.c10
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c5
-rw-r--r--tools/perf/tests/parse-events.c1
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh7
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/arch_errno_names.c1
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh100
-rw-r--r--tools/perf/trace/beauty/beauty.h5
-rw-r--r--tools/perf/trace/beauty/flock.c10
-rw-r--r--tools/perf/trace/beauty/futex_val3.c18
-rw-r--r--tools/perf/ui/browsers/annotate.c399
-rw-r--r--tools/perf/ui/gtk/annotate.c25
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/annotate.c652
-rw-r--r--tools/perf/util/annotate.h76
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.c462
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.h43
-rw-r--r--tools/perf/util/arm-spe.c231
-rw-r--r--tools/perf/util/arm-spe.h31
-rw-r--r--tools/perf/util/auxtrace.c8
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/callchain.c10
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/cgroup.c3
-rw-r--r--tools/perf/util/cs-etm-decoder/Build1
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c513
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h105
-rw-r--r--tools/perf/util/cs-etm.c1023
-rw-r--r--tools/perf/util/cs-etm.h18
-rw-r--r--tools/perf/util/data.c10
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/env.c47
-rw-r--r--tools/perf/util/env.h2
-rw-r--r--tools/perf/util/event.c8
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/evlist.c70
-rw-r--r--tools/perf/util/evlist.h15
-rw-r--r--tools/perf/util/evsel.c227
-rw-r--r--tools/perf/util/evsel.h12
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh2
-rw-r--r--tools/perf/util/header.c130
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/Build24
-rw-r--r--tools/perf/util/intel-pt.c11
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/map.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/mmap.c73
-rw-r--r--tools/perf/util/mmap.h4
-rw-r--r--tools/perf/util/ordered-events.c3
-rw-r--r--tools/perf/util/ordered-events.h2
-rw-r--r--tools/perf/util/parse-events.c3
-rw-r--r--tools/perf/util/path.c14
-rw-r--r--tools/perf/util/path.h3
-rw-r--r--tools/perf/util/pmu.c87
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/probe-event.c85
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rblist.c19
-rw-r--r--tools/perf/util/rblist.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c54
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c20
-rw-r--r--tools/perf/util/srcline.c9
-rw-r--r--tools/perf/util/srcline.h5
-rw-r--r--tools/perf/util/stat-shadow.c426
-rw-r--r--tools/perf/util/stat.c15
-rw-r--r--tools/perf/util/stat.h63
-rw-r--r--tools/perf/util/string.c46
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/target.h7
-rw-r--r--tools/perf/util/thread_map.c27
-rw-r--r--tools/perf/util/thread_map.h3
-rw-r--r--tools/perf/util/time-utils.c301
-rw-r--r--tools/perf/util/time-utils.h8
-rw-r--r--tools/perf/util/tool.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c9
-rw-r--r--tools/perf/util/unwind-libunwind.c4
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/perf/util/util.h10
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c28
-rw-r--r--tools/power/cpupower/lib/cpufreq.h4
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py15
-rw-r--r--tools/scripts/Makefile.include1
-rw-r--r--tools/testing/selftests/bpf/.gitignore7
-rw-r--r--tools/testing/selftests/bpf/Makefile20
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h5
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/sample_map_ret0.c34
-rw-r--r--tools/testing/selftests/bpf/sample_ret0.c7
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py51
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py83
-rw-r--r--tools/testing/selftests/bpf/test_align.c174
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c11
-rw-r--r--tools/testing/selftests/bpf/test_l4lb_noinline.c473
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c217
-rw-r--r--tools/testing/selftests/bpf/test_maps.c32
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py1085
-rw-r--r--tools/testing/selftests/bpf/test_progs.c355
-rw-r--r--tools/testing/selftests/bpf/test_stacktrace_map.c62
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h16
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c115
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c126
-rw-r--r--tools/testing/selftests/bpf/test_tracepoint.c26
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2522
-rw-r--r--tools/testing/selftests/bpf/test_xdp_noinline.c833
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh172
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh5
-rw-r--r--tools/testing/selftests/memfd/Makefile5
-rw-r--r--tools/testing/selftests/memfd/common.c46
-rw-r--r--tools/testing/selftests/memfd/common.h9
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c44
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c212
-rwxr-xr-xtools/testing/selftests/memfd/run_fuse_test.sh2
-rwxr-xr-xtools/testing/selftests/memfd/run_tests.sh1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh429
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c21
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh229
-rw-r--r--tools/testing/selftests/ptp/testptp.c4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2frag.sh25
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh1
-rw-r--r--tools/testing/selftests/vm/Makefile5
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests11
-rw-r--r--tools/testing/selftests/vm/va_128TBswitch.c297
-rw-r--r--tools/testing/selftests/x86/5lvl.c177
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c500
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.c7
-rw-r--r--tools/usb/usbip/src/usbip_bind.c9
-rw-r--r--tools/usb/usbip/src/usbip_list.c9
-rw-r--r--tools/usb/usbip/src/usbipd.c2
-rw-r--r--tools/virtio/linux/kernel.h2
-rw-r--r--tools/virtio/linux/thread_info.h1
-rw-r--r--tools/virtio/ringtest/main.h59
-rw-r--r--tools/virtio/ringtest/ptr_ring.c2
-rw-r--r--tools/vm/page-types.c28
-rw-r--r--virt/kvm/arm/arm.c35
-rw-r--r--virt/kvm/arm/mmu.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c2
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c44
6499 files changed, 283822 insertions, 140792 deletions
diff --git a/.mailmap b/.mailmap
index 1469ff0d3f4d..e18cab73e209 100644
--- a/.mailmap
+++ b/.mailmap
@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
+Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 3bec49c33bbb..7f3a0728ccf2 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -228,8 +228,6 @@ isdn/
- directory with info on the Linux ISDN support, and supported cards.
kbuild/
- directory with info about the kernel build process.
-kernel-doc-nano-HOWTO.txt
- - outdated info about kernel-doc documentation.
kdump/
- directory with mini HowTo on getting the crash dump code to work.
doc-guide/
@@ -346,8 +344,6 @@ prctl/
- directory with info on the priveledge control subsystem
preempt-locking.txt
- info on locking under a preemptive kernel.
-printk-formats.txt
- - how to get printk format specifiers right
process/
- how to work with the mainline kernel development process.
pps/
diff --git a/Documentation/ABI/testing/devlink-resource-mlxsw b/Documentation/ABI/testing/devlink-resource-mlxsw
new file mode 100644
index 000000000000..259ed2948ec0
--- /dev/null
+++ b/Documentation/ABI/testing/devlink-resource-mlxsw
@@ -0,0 +1,33 @@
+What: /kvd/
+Date: 08-Jan-2018
+KernelVersion: v4.16
+Contact: mlxsw@mellanox.com
+Description: The main database in the Spectrum device is a centralized
+ KVD database used for many of the tables used to configure
+ the chip including L2 FDB, L3 LPM, ECMP and more. The KVD
+ is divided into two sections, the first is hash-based table
+ and the second is a linear access table. The division
+ between the linear and hash-based sections is static and
+ require reload before the changes take effect.
+
+What: /kvd/linear
+Date: 08-Jan-2018
+KernelVersion: v4.16
+Contact: mlxsw@mellanox.com
+Description: The linear section of the KVD is managed by software as a
+ flat memory accessed using an index.
+
+What: /kvd/hash_single
+Date: 08-Jan-2018
+KernelVersion: v4.16
+Contact: mlxsw@mellanox.com
+Description: The hash based section of the KVD is managed by the switch
+ device. Used in case the key size is smaller or equal to
+ 64bit.
+
+What: /kvd/hash_double
+Date: 08-Jan-2018
+KernelVersion: v4.16
+Contact: mlxsw@mellanox.com
+Description: The hash based section of the KVD is managed by the switch
+ device. Used in case the key is larger than 64 bit.
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index 9578247e1792..d12cb2eae9ee 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -14,30 +14,46 @@ Description:
generated either locally or remotely using an
asymmetric key. These keys are loaded onto root's
keyring using keyctl, and EVM is then enabled by
- echoing a value to <securityfs>/evm:
+ echoing a value to <securityfs>/evm made up of the
+ following bits:
- 1: enable HMAC validation and creation
- 2: enable digital signature validation
- 3: enable HMAC and digital signature validation and HMAC
- creation
+ Bit Effect
+ 0 Enable HMAC validation and creation
+ 1 Enable digital signature validation
+ 2 Permit modification of EVM-protected metadata at
+ runtime. Not supported if HMAC validation and
+ creation is enabled.
+ 31 Disable further runtime modification of EVM policy
- Further writes will be blocked if HMAC support is enabled or
- if bit 32 is set:
+ For example:
- echo 0x80000002 ><securityfs>/evm
+ echo 1 ><securityfs>/evm
- will enable digital signature validation and block
- further writes to <securityfs>/evm.
+ will enable HMAC validation and creation
- Until this is done, EVM can not create or validate the
- 'security.evm' xattr, but returns INTEGRITY_UNKNOWN.
- Loading keys and signaling EVM should be done as early
- as possible. Normally this is done in the initramfs,
- which has already been measured as part of the trusted
- boot. For more information on creating and loading
- existing trusted/encrypted keys, refer to:
+ echo 0x80000003 ><securityfs>/evm
- Documentation/security/keys/trusted-encrypted.rst. Both dracut
- (via 97masterkey and 98integrity) and systemd (via
+ will enable HMAC and digital signature validation and
+ HMAC creation and disable all further modification of policy.
+
+ echo 0x80000006 ><securityfs>/evm
+
+ will enable digital signature validation, permit
+ modification of EVM-protected metadata and
+ disable all further modification of policy
+
+ Note that once a key has been loaded, it will no longer be
+ possible to enable metadata modification.
+
+ Until key loading has been signaled EVM can not create
+ or validate the 'security.evm' xattr, but returns
+ INTEGRITY_UNKNOWN. Loading keys and signaling EVM
+ should be done as early as possible. Normally this is
+ done in the initramfs, which has already been measured
+ as part of the trusted boot. For more information on
+ creating and loading existing trusted/encrypted keys,
+ refer to:
+ Documentation/security/keys/trusted-encrypted.rst. Both
+ dracut (via 97masterkey and 98integrity) and systemd (via
core/ima-setup) have support for loading keys at boot
time.
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index e76432b9954d..2028f2d093b2 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -17,7 +17,8 @@ Description:
rule format: action [condition ...]
- action: measure | dont_measure | appraise | dont_appraise | audit
+ action: measure | dont_measure | appraise | dont_appraise |
+ audit | hash | dont_hash
condition:= base | lsm [option]
base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
[euid=] [fowner=]]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 2e3f919485f4..6a5f34b4d5b9 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -32,7 +32,7 @@ Description:
Description of the physical chip / device for device X.
Typically a part number.
-What: /sys/bus/iio/devices/iio:deviceX/timestamp_clock
+What: /sys/bus/iio/devices/iio:deviceX/current_timestamp_clock
KernelVersion: 4.5
Contact: linux-iio@vger.kernel.org
Description:
@@ -1290,7 +1290,7 @@ KernelVersion: 3.4
Contact: linux-iio@vger.kernel.org
Description:
Unit-less light intensity. Modifiers both and ir indicate
- that measurements contains visible and infrared light
+ that measurements contain visible and infrared light
components or just infrared light, respectively. Modifier uv indicates
that measurements contain ultraviolet light components.
@@ -1413,6 +1413,16 @@ Description:
the available samples after the timeout expires and thus have a
maximum delay guarantee.
+What: /sys/bus/iio/devices/iio:deviceX/buffer/data_available
+KernelVersion: 4.16
+Contact: linux-iio@vger.kernel.org
+Description:
+ A read-only value indicating the bytes of data available in the
+ buffer. In the case of an output buffer, this indicates the
+ amount of empty space available to write data to. In the case of
+ an input buffer, this indicates the amount of data available for
+ reading.
+
What: /sys/bus/iio/devices/iio:deviceX/buffer/hwfifo_enabled
KernelVersion: 4.2
Contact: linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32
new file mode 100644
index 000000000000..da9822309f07
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32
@@ -0,0 +1,16 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_spi_clk_freq
+KernelVersion: 4.14
+Contact: arnaud.pouliquen@st.com
+Description:
+ For audio purpose only.
+ Used by audio driver to set/get the spi input frequency.
+ This is mandatory if DFSDM is slave on SPI bus, to
+ provide information on the SPI clock frequency during runtime
+ Notice that the SPI frequency should be a multiple of sample
+ frequency to ensure the precision.
+ if DFSDM input is SPI master
+ Reading SPI clkout frequency,
+ error on writing
+ If DFSDM input is SPI Slave:
+ Reading returns value previously set.
+ Writing value before starting conversions. \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd b/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd
new file mode 100644
index 000000000000..0088aba4caa8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-xhci_hcd
@@ -0,0 +1,25 @@
+What: /sys/bus/pci/drivers/xhci_hcd/.../dbc
+Date: June 2017
+Contact: Lu Baolu <baolu.lu@linux.intel.com>
+Description:
+ xHCI compatible USB host controllers (i.e. super-speed
+ USB3 controllers) are often implemented with the Debug
+ Capability (DbC). It can present a debug device which
+ is fully compliant with the USB framework and provides
+ the equivalent of a very high performance full-duplex
+ serial link for debug purpose.
+
+ The DbC debug device shares a root port with xHCI host.
+ When the DbC is enabled, the root port will be assigned
+ to the Debug Capability. Otherwise, it will be assigned
+ to xHCI.
+
+ Writing "enable" to this attribute will enable the DbC
+ functionality and the shared root port will be assigned
+ to the DbC device. Writing "disable" to this attribute
+ will disable the DbC functionality and the shared root
+ port will roll back to the xHCI.
+
+ Reading this attribute gives the state of the DbC. It
+ can be one of the following states: disabled, enabled,
+ initialized, connected, configured and stalled.
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-netdev b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev
new file mode 100644
index 000000000000..451af6d6768c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev
@@ -0,0 +1,45 @@
+What: /sys/class/leds/<led>/device_name
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies the network device name to monitor.
+
+What: /sys/class/leds/<led>/interval
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies the duration of the LED blink in milliseconds.
+ Defaults to 50 ms.
+
+What: /sys/class/leds/<led>/link
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal the link state of the named network device.
+ If set to 0 (default), the LED's normal state is off.
+ If set to 1, the LED's normal state reflects the link state
+ of the named network device.
+ Setting this value also immediately changes the LED state.
+
+What: /sys/class/leds/<led>/tx
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal transmission of data on the named network device.
+ If set to 0 (default), the LED will not blink on transmission.
+ If set to 1, the LED will blink for the milliseconds specified
+ in interval to signal transmission.
+
+What: /sys/class/leds/<led>/rx
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal reception of data on the named network device.
+ If set to 0 (default), the LED will not blink on reception.
+ If set to 1, the LED will blink for the milliseconds specified
+ in interval to signal reception.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 6856da99b6f7..2f1788111cd9 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -259,3 +259,27 @@ Contact: netdev@vger.kernel.org
Description:
Symbolic link to the PHY device this network device is attached
to.
+
+What: /sys/class/net/<iface>/carrier_changes
+Date: Mar 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ 32-bit unsigned integer counting the number of times the link has
+ seen a change from UP to DOWN and vice versa
+
+What: /sys/class/net/<iface>/carrier_up_count
+Date: Jan 2018
+KernelVersion: 4.16
+Contact: netdev@vger.kernel.org
+Description:
+ 32-bit unsigned integer counting the number of times the link has
+ been up
+
+What: /sys/class/net/<iface>/carrier_down_count
+Date: Jan 2018
+KernelVersion: 4.16
+Contact: netdev@vger.kernel.org
+Description:
+ 32-bit unsigned integer counting the number of times the link has
+ been down
diff --git a/Documentation/ABI/testing/sysfs-devices-coredump b/Documentation/ABI/testing/sysfs-devices-coredump
new file mode 100644
index 000000000000..e459368533a4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-coredump
@@ -0,0 +1,10 @@
+What: /sys/devices/.../coredump
+Date: December 2017
+Contact: Arend van Spriel <aspriel@gmail.com>
+Description:
+ The /sys/devices/.../coredump attribute is only present when the
+ device is bound to a driver, which provides the .coredump()
+ callback. The attribute is write only. Anything written to this
+ file will trigger the .coredump() callback.
+
+ Available when CONFIG_DEV_COREDUMP is enabled.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index d6d862db3b5d..bfd29bc8d37a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -375,3 +375,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: information about CPUs heterogeneity.
cpu_capacity: capacity of cpu#.
+
+What: /sys/devices/system/cpu/vulnerabilities
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+Date: January 2018
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Information about CPU vulnerabilities
+
+ The files are named after the code names of CPU
+ vulnerabilities. The output of those files reflects the
+ state of the CPUs in the system. Possible output values:
+
+ "Not affected" CPU is not affected by the vulnerability
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index a7799c2fca28..d870b5514d15 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -186,3 +186,9 @@ Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls sleep time of GC urgent mode
+
+What: /sys/fs/f2fs/<disk>/readdir_ra
+Date: November 2017
+Contact: "Sheng Yong" <shengyong1@huawei.com>
+Description:
+ Controls readahead inode block in readdir.
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index d5d39748382f..dac7e1e62a8b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -33,6 +33,32 @@ Description:
An attribute which indicates whether the patch is currently in
transition.
+What: /sys/kernel/livepatch/<patch>/signal
+Date: Nov 2017
+KernelVersion: 4.15.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that allows administrator to affect the
+ course of an existing transition. Writing 1 sends a fake
+ signal to all remaining blocking tasks. The fake signal
+ means that no proper signal is delivered (there is no data in
+ signal pending structures). Tasks are interrupted or woken up,
+ and forced to change their patched state.
+
+What: /sys/kernel/livepatch/<patch>/force
+Date: Nov 2017
+KernelVersion: 4.15.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that allows administrator to affect the
+ course of an existing transition. Writing 1 clears
+ TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to
+ the patched or unpatched state. Administrator should not
+ use this feature without a clearance from a patch
+ distributor. Removal (rmmod) of patch modules is permanently
+ disabled when the feature is used. See
+ Documentation/livepatch/livepatch.txt for more information.
+
What: /sys/kernel/livepatch/<patch>/<object>
Date: Nov 2014
KernelVersion: 3.19.0
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 4a1cd7645d85..507775cce753 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -265,37 +265,5 @@ support other architectures, such as ARM, ARM64 etc.
=== Debugging ===
-If you switch on CONFIG_IRQ_DOMAIN_DEBUG (which depends on
-CONFIG_IRQ_DOMAIN and CONFIG_DEBUG_FS), you will find a new file in
-your debugfs mount point, called irq_domain_mapping. This file
-contains a live snapshot of all the IRQ domains in the system:
-
- name mapped linear-max direct-max devtree-node
- pl061 8 8 0 /smb/gpio@e0080000
- pl061 8 8 0 /smb/gpio@e1050000
- pMSI 0 0 0 /interrupt-controller@e1101000/v2m@e0080000
- MSI 37 0 0 /interrupt-controller@e1101000/v2m@e0080000
- GICv2m 37 0 0 /interrupt-controller@e1101000/v2m@e0080000
- GICv2 448 448 0 /interrupt-controller@e1101000
-
-it also iterates over the interrupts to display their mapping in the
-domains, and makes the domain stacking visible:
-
-
-irq hwirq chip name chip data active type domain
- 1 0x00019 GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 2 0x0001d GICv2 0xffff00000916bfd8 LINEAR GICv2
- 3 0x0001e GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 4 0x0001b GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 5 0x0001a GICv2 0xffff00000916bfd8 LINEAR GICv2
-[...]
- 96 0x81808 MSI 0x (null) RADIX MSI
- 96+ 0x00063 GICv2m 0xffff8003ee116980 RADIX GICv2m
- 96+ 0x00063 GICv2 0xffff00000916bfd8 LINEAR GICv2
- 97 0x08800 MSI 0x (null) * RADIX MSI
- 97+ 0x00064 GICv2m 0xffff8003ee116980 * RADIX GICv2m
- 97+ 0x00064 GICv2 0xffff00000916bfd8 * LINEAR GICv2
-
-Here, interrupts 1-5 are only using a single domain, while 96 and 97
-are build out of a stack of three domain, each level performing a
-particular function.
+Most of the internals of the IRQ subsystem are exposed in debugfs by
+turning CONFIG_GENERIC_IRQ_DEBUGFS on.
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 38d6d800761f..6c06e10bd04b 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -1097,7 +1097,8 @@ will cause the CPU to disregard the values of its counters on
its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
cases where a given operation has resulted in a quiescent state
-for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+for all flavors of RCU, for example, <tt>cond_resched()</tt>
+when RCU has indicated a need for quiescent states.
<h5>RCU Callback Handling</h5>
@@ -1182,8 +1183,8 @@ CPU (and from tracing) unless otherwise stated.
Its fields are as follows:
<pre>
- 1 int dynticks_nesting;
- 2 int dynticks_nmi_nesting;
+ 1 long dynticks_nesting;
+ 2 long dynticks_nmi_nesting;
3 atomic_t dynticks;
4 bool rcu_need_heavy_qs;
5 unsigned long rcu_qs_ctr;
@@ -1191,15 +1192,31 @@ Its fields are as follows:
</pre>
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
-nesting depth of normal interrupts.
-In addition, this counter is incremented when exiting dyntick-idle
-mode and decremented when entering it.
+nesting depth of process execution, so that in normal circumstances
+this counter has value zero or one.
+NMIs, irqs, and tracers are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field.
+Because NMIs cannot be masked, changes to this variable have to be
+undertaken carefully using an algorithm provided by Andy Lutomirski.
+The initial transition from idle adds one, and nested transitions
+add two, so that a nesting level of five is represented by a
+<tt>-&gt;dynticks_nmi_nesting</tt> value of nine.
This counter can therefore be thought of as counting the number
of reasons why this CPU cannot be permitted to enter dyntick-idle
-mode, aside from non-maskable interrupts (NMIs).
-NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
-field, except that NMIs that interrupt non-dyntick-idle execution
-are not counted.
+mode, aside from process-level transitions.
+
+<p>However, it turns out that when running in non-idle kernel context,
+the Linux kernel is fully capable of entering interrupt handlers that
+never exit and perhaps also vice versa.
+Therefore, whenever the <tt>-&gt;dynticks_nesting</tt> field is
+incremented up from zero, the <tt>-&gt;dynticks_nmi_nesting</tt> field
+is set to a large positive number, and whenever the
+<tt>-&gt;dynticks_nesting</tt> field is decremented down to zero,
+the the <tt>-&gt;dynticks_nmi_nesting</tt> field is set to zero.
+Assuming that the number of misnested interrupts is not sufficient
+to overflow the counter, this approach corrects the
+<tt>-&gt;dynticks_nmi_nesting</tt> field every time the corresponding
+CPU enters the idle loop from process context.
</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
CPU's transitions to and from dyntick-idle mode, so that this counter
@@ -1231,14 +1248,16 @@ in response.
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
<tr><td>
- Why not just count all NMIs?
- Wouldn't that be simpler and less error prone?
+ Why not simply combine the <tt>-&gt;dynticks_nesting</tt>
+ and <tt>-&gt;dynticks_nmi_nesting</tt> counters into a
+ single counter that just counts the number of reasons that
+ the corresponding CPU is non-idle?
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- It seems simpler only until you think hard about how to go about
- updating the <tt>rcu_dynticks</tt> structure's
- <tt>-&gt;dynticks</tt> field.
+ Because this would fail in the presence of interrupts whose
+ handlers never return and of handlers that manage to return
+ from a made-up interrupt.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 62e847bcdcdd..49690228b1c6 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -581,7 +581,8 @@ This guarantee was only partially premeditated.
DYNIX/ptx used an explicit memory barrier for publication, but had nothing
resembling <tt>rcu_dereference()</tt> for subscription, nor did it
have anything resembling the <tt>smp_read_barrier_depends()</tt>
-that was later subsumed into <tt>rcu_dereference()</tt>.
+that was later subsumed into <tt>rcu_dereference()</tt> and later
+still into <tt>READ_ONCE()</tt>.
The need for these operations made itself known quite suddenly at a
late-1990s meeting with the DEC Alpha architects, back in the days when
DEC was still a free-standing company.
@@ -2797,7 +2798,7 @@ RCU must avoid degrading real-time response for CPU-bound threads, whether
executing in usermode (which is one use case for
<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
That said, CPU-bound loops in the kernel must execute
-<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+<tt>cond_resched()</tt> at least once per few tens of milliseconds
in order to avoid receiving an IPI from RCU.
<p>
@@ -3128,7 +3129,7 @@ The solution, in the form of
is to have implicit
read-side critical sections that are delimited by voluntary context
switches, that is, calls to <tt>schedule()</tt>,
-<tt>cond_resched_rcu_qs()</tt>, and
+<tt>cond_resched()</tt>, and
<tt>synchronize_rcu_tasks()</tt>.
In addition, transitions to and from userspace execution also delimit
tasks-RCU read-side critical sections.
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index 1acb26b09b48..ab96227bad42 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -122,11 +122,7 @@ o Be very careful about comparing pointers obtained from
Note that if checks for being within an RCU read-side
critical section are not required and the pointer is never
dereferenced, rcu_access_pointer() should be used in place
- of rcu_dereference(). The rcu_access_pointer() primitive
- does not require an enclosing read-side critical section,
- and also omits the smp_read_barrier_depends() included in
- rcu_dereference(), which in turn should provide a small
- performance gain in some CPUs (e.g., the DEC Alpha).
+ of rcu_dereference().
o The comparison is against a pointer that references memory
that was initialized "a long time ago." The reason
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index a08f928c8557..4259f95c3261 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -23,12 +23,10 @@ o A CPU looping with preemption disabled. This condition can
o A CPU looping with bottom halves disabled. This condition can
result in RCU-sched and RCU-bh stalls.
-o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
- kernel without invoking schedule(). Note that cond_resched()
- does not necessarily prevent RCU CPU stall warnings. Therefore,
- if the looping in the kernel is really expected and desirable
- behavior, you might need to replace some of the cond_resched()
- calls with calls to cond_resched_rcu_qs().
+o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
+ without invoking schedule(). If the looping in the kernel is
+ really expected and desirable behavior, you might need to add
+ some calls to cond_resched().
o Booting Linux using a console connection that is too slow to
keep up with the boot-time console-message rate. For example,
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index df62466da4e0..a27fbfb0efb8 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -600,8 +600,7 @@ don't forget about them when submitting patches making use of RCU!]
#define rcu_dereference(p) \
({ \
- typeof(p) _________p1 = p; \
- smp_read_barrier_depends(); \
+ typeof(p) _________p1 = READ_ONCE(p); \
(_________p1); \
})
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index af7104aaffd9..dc63e5bddf78 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -114,7 +114,6 @@
This facility can be used to prevent such uncontrolled
GPE floodings.
Format: <int>
- Support masking of GPEs numbered from 0x00 to 0x7f.
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
@@ -223,7 +222,7 @@
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
- old_ordering, nonvs, sci_force_enable }
+ old_ordering, nonvs, sci_force_enable, nobl }
See Documentation/power/video.txt for information on
s3_bios and s3_mode.
s3_beep is for debugging; it makes the PC's speaker beep
@@ -239,6 +238,9 @@
sci_force_enable causes the kernel to set SCI_EN directly
on resume from S1/S3 (which is against the ACPI spec,
but some broken systems don't work without it).
+ nobl causes the internal blacklist of systems known to
+ behave incorrectly in some ways with respect to system
+ suspend and resume to be ignored (use wisely).
acpi_use_timer_override [HW,ACPI]
Use timer override. For some broken Nvidia NF5 boards
@@ -713,9 +715,6 @@
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
- crossrelease_fullstack
- [KNL] Allow to record full stack trace in cross-release
-
cryptomgr.notests
[KNL] Disable crypto self-tests
@@ -2053,9 +2052,6 @@
This tests the locking primitive's ability to
transition abruptly to and from idle.
- locktorture.torture_runnable= [BOOT]
- Start locktorture running at boot time.
-
locktorture.torture_type= [KNL]
Specify the locking implementation to test.
@@ -2542,6 +2538,9 @@
This is useful when you use a panic=... timeout and
need the box quickly up again.
+ These settings can be accessed at runtime via
+ the nmi_watchdog and hardlockup_panic sysctls.
+
netpoll.carrier_timeout=
[NET] Specifies amount of time (in seconds) that
netpoll should wait for a carrier. By default netpoll
@@ -2626,6 +2625,11 @@
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+ allow data leaks with this option, which is equivalent
+ to spectre_v2=off.
+
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
@@ -2712,8 +2716,6 @@
steal time is computed, but won't influence scheduler
behaviour
- nopti [X86-64] Disable kernel page table isolation
-
nolapic [X86-32,APIC] Do not enable or use the local APIC.
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
@@ -3100,6 +3102,12 @@
pcie_scan_all Scan all possible PCIe devices. Otherwise we
only look for one device below a PCIe downstream
port.
+ big_root_window Try to add a big 64bit memory window to the PCIe
+ root complex on AMD CPUs. Some GFX hardware
+ can resize a BAR to allow access to all VRAM.
+ Adding the window is slightly risky (it may
+ conflict with unreported devices), so this
+ taints the kernel.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -3288,11 +3296,20 @@
pt. [PARIDE]
See Documentation/blockdev/paride.txt.
- pti= [X86_64]
- Control user/kernel address space isolation:
- on - enable
- off - disable
- auto - default setting
+ pti= [X86_64] Control Page Table Isolation of user and
+ kernel address spaces. Disabling this feature
+ removes hardening, but improves performance of
+ system calls and interrupts.
+
+ on - unconditionally enable
+ off - unconditionally disable
+ auto - kernel detects whether your CPU model is
+ vulnerable to issues that PTI mitigates
+
+ Not specifying this option is equivalent to pti=auto.
+
+ nopti [X86_64]
+ Equivalent to pti=off
pty.legacy_count=
[KNL] Number of legacy pty's. Overwrites compiled-in
@@ -3471,9 +3488,6 @@
the same as for rcuperf.nreaders.
N, where N is the number of CPUs
- rcuperf.perf_runnable= [BOOT]
- Start rcuperf running at boot time.
-
rcuperf.perf_type= [KNL]
Specify the RCU implementation to test.
@@ -3607,9 +3621,6 @@
Test RCU's dyntick-idle handling. See also the
rcutorture.shuffle_interval parameter.
- rcutorture.torture_runnable= [BOOT]
- Start rcutorture running at boot time.
-
rcutorture.torture_type= [KNL]
Specify the RCU implementation to test.
@@ -3667,7 +3678,8 @@
rdt= [HW,X86,RDT]
Turn on/off individual RDT features. List is:
- cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, mba.
+ cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
+ mba.
E.g. to turn on cmt and turn off mba use:
rdt=cmt,!mba
@@ -3943,6 +3955,29 @@
sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/laptops/sonypi.txt
+ spectre_v2= [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability.
+
+ on - unconditionally enable
+ off - unconditionally disable
+ auto - kernel detects whether your CPU model is
+ vulnerable
+
+ Selecting 'on' will, and 'auto' may, choose a
+ mitigation method at run time according to the
+ CPU, the available microcode, the setting of the
+ CONFIG_RETPOLINE configuration option, and the
+ compiler with which the kernel was built.
+
+ Specific mitigations can also be selected manually:
+
+ retpoline - replace indirect branches
+ retpoline,generic - google's original retpoline
+ retpoline,amd - AMD-specific minimal thunk
+
+ Not specifying this option is equivalent to
+ spectre_v2=auto.
+
spia_io_base= [HW,MTD]
spia_fio_base=
spia_pedr=
diff --git a/Documentation/admin-guide/mono.rst b/Documentation/admin-guide/mono.rst
index cdddc099af64..59e6d59f0ed9 100644
--- a/Documentation/admin-guide/mono.rst
+++ b/Documentation/admin-guide/mono.rst
@@ -9,14 +9,14 @@ This will allow you to execute Mono-based .NET binaries just like any
other program after you have done the following:
1) You MUST FIRST install the Mono CLR support, either by downloading
- a binary package, a source tarball or by installing from CVS. Binary
+ a binary package, a source tarball or by installing from Git. Binary
packages for several distributions can be found at:
- http://go-mono.com/download.html
+ http://www.mono-project.com/download/
Instructions for compiling Mono can be found at:
- http://www.go-mono.com/compiling.html
+ http://www.mono-project.com/docs/compiling-mono/linux/
Once the Mono CLR support has been installed, just check that
``/usr/bin/mono`` (which could be located elsewhere, for example
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index bd9b3faab2c4..a70090b28b07 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -110,7 +110,9 @@ infrastructure:
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
- | RES0 | [63-48] | n |
+ | RES0 | [63-52] | n |
+ |--------------------------------------------------|
+ | FHM | [51-48] | y |
|--------------------------------------------------|
| DP | [47-44] | y |
|--------------------------------------------------|
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index 89edba12a9e0..57324ee55ecc 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -158,3 +158,7 @@ HWCAP_SHA512
HWCAP_SVE
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
+
+HWCAP_ASIMDFHM
+
+ Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index fc1c884fea10..c1d520de6dfe 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -72,7 +72,7 @@ stable kernels.
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
| | | | |
-| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
diff --git a/Documentation/bpf/bpf_devel_QA.txt b/Documentation/bpf/bpf_devel_QA.txt
new file mode 100644
index 000000000000..cefef855dea4
--- /dev/null
+++ b/Documentation/bpf/bpf_devel_QA.txt
@@ -0,0 +1,519 @@
+This document provides information for the BPF subsystem about various
+workflows related to reporting bugs, submitting patches, and queueing
+patches for stable kernels.
+
+For general information about submitting patches, please refer to
+Documentation/process/. This document only describes additional specifics
+related to BPF.
+
+Reporting bugs:
+---------------
+
+Q: How do I report bugs for BPF kernel code?
+
+A: Since all BPF kernel development as well as bpftool and iproute2 BPF
+ loader development happens through the netdev kernel mailing list,
+ please report any found issues around BPF to the following mailing
+ list:
+
+ netdev@vger.kernel.org
+
+ This may also include issues related to XDP, BPF tracing, etc.
+
+ Given netdev has a high volume of traffic, please also add the BPF
+ maintainers to Cc (from kernel MAINTAINERS file):
+
+ Alexei Starovoitov <ast@kernel.org>
+ Daniel Borkmann <daniel@iogearbox.net>
+
+ In case a buggy commit has already been identified, make sure to keep
+ the actual commit authors in Cc as well for the report. They can
+ typically be identified through the kernel's git tree.
+
+ Please do *not* report BPF issues to bugzilla.kernel.org since it
+ is a guarantee that the reported issue will be overlooked.
+
+Submitting patches:
+-------------------
+
+Q: To which mailing list do I need to submit my BPF patches?
+
+A: Please submit your BPF patches to the netdev kernel mailing list:
+
+ netdev@vger.kernel.org
+
+ Historically, BPF came out of networking and has always been maintained
+ by the kernel networking community. Although these days BPF touches
+ many other subsystems as well, the patches are still routed mainly
+ through the networking community.
+
+ In case your patch has changes in various different subsystems (e.g.
+ tracing, security, etc), make sure to Cc the related kernel mailing
+ lists and maintainers from there as well, so they are able to review
+ the changes and provide their Acked-by's to the patches.
+
+Q: Where can I find patches currently under discussion for BPF subsystem?
+
+A: All patches that are Cc'ed to netdev are queued for review under netdev
+ patchwork project:
+
+ http://patchwork.ozlabs.org/project/netdev/list/
+
+ Those patches which target BPF, are assigned to a 'bpf' delegate for
+ further processing from BPF maintainers. The current queue with
+ patches under review can be found at:
+
+ https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
+
+ Once the patches have been reviewed by the BPF community as a whole
+ and approved by the BPF maintainers, their status in patchwork will be
+ changed to 'Accepted' and the submitter will be notified by mail. This
+ means that the patches look good from a BPF perspective and have been
+ applied to one of the two BPF kernel trees.
+
+ In case feedback from the community requires a respin of the patches,
+ their status in patchwork will be set to 'Changes Requested', and purged
+ from the current review queue. Likewise for cases where patches would
+ get rejected or are not applicable to the BPF trees (but assigned to
+ the 'bpf' delegate).
+
+Q: How do the changes make their way into Linux?
+
+A: There are two BPF kernel trees (git repositories). Once patches have
+ been accepted by the BPF maintainers, they will be applied to one
+ of the two BPF trees:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/
+ https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/
+
+ The bpf tree itself is for fixes only, whereas bpf-next for features,
+ cleanups or other kind of improvements ("next-like" content). This is
+ analogous to net and net-next trees for networking. Both bpf and
+ bpf-next will only have a master branch in order to simplify against
+ which branch patches should get rebased to.
+
+ Accumulated BPF patches in the bpf tree will regularly get pulled
+ into the net kernel tree. Likewise, accumulated BPF patches accepted
+ into the bpf-next tree will make their way into net-next tree. net and
+ net-next are both run by David S. Miller. From there, they will go
+ into the kernel mainline tree run by Linus Torvalds. To read up on the
+ process of net and net-next being merged into the mainline tree, see
+ the netdev FAQ under:
+
+ Documentation/networking/netdev-FAQ.txt
+
+ Occasionally, to prevent merge conflicts, we might send pull requests
+ to other trees (e.g. tracing) with a small subset of the patches, but
+ net and net-next are always the main trees targeted for integration.
+
+ The pull requests will contain a high-level summary of the accumulated
+ patches and can be searched on netdev kernel mailing list through the
+ following subject lines (yyyy-mm-dd is the date of the pull request):
+
+ pull-request: bpf yyyy-mm-dd
+ pull-request: bpf-next yyyy-mm-dd
+
+Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be
+ applied to?
+
+A: The process is the very same as described in the netdev FAQ, so
+ please read up on it. The subject line must indicate whether the
+ patch is a fix or rather "next-like" content in order to let the
+ maintainers know whether it is targeted at bpf or bpf-next.
+
+ For fixes eventually landing in bpf -> net tree, the subject must
+ look like:
+
+ git format-patch --subject-prefix='PATCH bpf' start..finish
+
+ For features/improvements/etc that should eventually land in
+ bpf-next -> net-next, the subject must look like:
+
+ git format-patch --subject-prefix='PATCH bpf-next' start..finish
+
+ If unsure whether the patch or patch series should go into bpf
+ or net directly, or bpf-next or net-next directly, it is not a
+ problem either if the subject line says net or net-next as target.
+ It is eventually up to the maintainers to do the delegation of
+ the patches.
+
+ If it is clear that patches should go into bpf or bpf-next tree,
+ please make sure to rebase the patches against those trees in
+ order to reduce potential conflicts.
+
+ In case the patch or patch series has to be reworked and sent out
+ again in a second or later revision, it is also required to add a
+ version number (v2, v3, ...) into the subject prefix:
+
+ git format-patch --subject-prefix='PATCH net-next v2' start..finish
+
+ When changes have been requested to the patch series, always send the
+ whole patch series again with the feedback incorporated (never send
+ individual diffs on top of the old series).
+
+Q: What does it mean when a patch gets applied to bpf or bpf-next tree?
+
+A: It means that the patch looks good for mainline inclusion from
+ a BPF point of view.
+
+ Be aware that this is not a final verdict that the patch will
+ automatically get accepted into net or net-next trees eventually:
+
+ On the netdev kernel mailing list reviews can come in at any point
+ in time. If discussions around a patch conclude that they cannot
+ get included as-is, we will either apply a follow-up fix or drop
+ them from the trees entirely. Therefore, we also reserve to rebase
+ the trees when deemed necessary. After all, the purpose of the tree
+ is to i) accumulate and stage BPF patches for integration into trees
+ like net and net-next, and ii) run extensive BPF test suite and
+ workloads on the patches before they make their way any further.
+
+ Once the BPF pull request was accepted by David S. Miller, then
+ the patches end up in net or net-next tree, respectively, and
+ make their way from there further into mainline. Again, see the
+ netdev FAQ for additional information e.g. on how often they are
+ merged to mainline.
+
+Q: How long do I need to wait for feedback on my BPF patches?
+
+A: We try to keep the latency low. The usual time to feedback will
+ be around 2 or 3 business days. It may vary depending on the
+ complexity of changes and current patch load.
+
+Q: How often do you send pull requests to major kernel trees like
+ net or net-next?
+
+A: Pull requests will be sent out rather often in order to not
+ accumulate too many patches in bpf or bpf-next.
+
+ As a rule of thumb, expect pull requests for each tree regularly
+ at the end of the week. In some cases pull requests could additionally
+ come also in the middle of the week depending on the current patch
+ load or urgency.
+
+Q: Are patches applied to bpf-next when the merge window is open?
+
+A: For the time when the merge window is open, bpf-next will not be
+ processed. This is roughly analogous to net-next patch processing,
+ so feel free to read up on the netdev FAQ about further details.
+
+ During those two weeks of merge window, we might ask you to resend
+ your patch series once bpf-next is open again. Once Linus released
+ a v*-rc1 after the merge window, we continue processing of bpf-next.
+
+ For non-subscribers to kernel mailing lists, there is also a status
+ page run by David S. Miller on net-next that provides guidance:
+
+ http://vger.kernel.org/~davem/net-next.html
+
+Q: I made a BPF verifier change, do I need to add test cases for
+ BPF kernel selftests?
+
+A: If the patch has changes to the behavior of the verifier, then yes,
+ it is absolutely necessary to add test cases to the BPF kernel
+ selftests suite. If they are not present and we think they are
+ needed, then we might ask for them before accepting any changes.
+
+ In particular, test_verifier.c is tracking a high number of BPF test
+ cases, including a lot of corner cases that LLVM BPF back end may
+ generate out of the restricted C code. Thus, adding test cases is
+ absolutely crucial to make sure future changes do not accidentally
+ affect prior use-cases. Thus, treat those test cases as: verifier
+ behavior that is not tracked in test_verifier.c could potentially
+ be subject to change.
+
+Q: When should I add code to samples/bpf/ and when to BPF kernel
+ selftests?
+
+A: In general, we prefer additions to BPF kernel selftests rather than
+ samples/bpf/. The rationale is very simple: kernel selftests are
+ regularly run by various bots to test for kernel regressions.
+
+ The more test cases we add to BPF selftests, the better the coverage
+ and the less likely it is that those could accidentally break. It is
+ not that BPF kernel selftests cannot demo how a specific feature can
+ be used.
+
+ That said, samples/bpf/ may be a good place for people to get started,
+ so it might be advisable that simple demos of features could go into
+ samples/bpf/, but advanced functional and corner-case testing rather
+ into kernel selftests.
+
+ If your sample looks like a test case, then go for BPF kernel selftests
+ instead!
+
+Q: When should I add code to the bpftool?
+
+A: The main purpose of bpftool (under tools/bpf/bpftool/) is to provide
+ a central user space tool for debugging and introspection of BPF programs
+ and maps that are active in the kernel. If UAPI changes related to BPF
+ enable for dumping additional information of programs or maps, then
+ bpftool should be extended as well to support dumping them.
+
+Q: When should I add code to iproute2's BPF loader?
+
+A: For UAPI changes related to the XDP or tc layer (e.g. cls_bpf), the
+ convention is that those control-path related changes are added to
+ iproute2's BPF loader as well from user space side. This is not only
+ useful to have UAPI changes properly designed to be usable, but also
+ to make those changes available to a wider user base of major
+ downstream distributions.
+
+Q: Do you accept patches as well for iproute2's BPF loader?
+
+A: Patches for the iproute2's BPF loader have to be sent to:
+
+ netdev@vger.kernel.org
+
+ While those patches are not processed by the BPF kernel maintainers,
+ please keep them in Cc as well, so they can be reviewed.
+
+ The official git repository for iproute2 is run by Stephen Hemminger
+ and can be found at:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git/
+
+ The patches need to have a subject prefix of '[PATCH iproute2 master]'
+ or '[PATCH iproute2 net-next]'. 'master' or 'net-next' describes the
+ target branch where the patch should be applied to. Meaning, if kernel
+ changes went into the net-next kernel tree, then the related iproute2
+ changes need to go into the iproute2 net-next branch, otherwise they
+ can be targeted at master branch. The iproute2 net-next branch will get
+ merged into the master branch after the current iproute2 version from
+ master has been released.
+
+ Like BPF, the patches end up in patchwork under the netdev project and
+ are delegated to 'shemminger' for further processing:
+
+ http://patchwork.ozlabs.org/project/netdev/list/?delegate=389
+
+Q: What is the minimum requirement before I submit my BPF patches?
+
+A: When submitting patches, always take the time and properly test your
+ patches *prior* to submission. Never rush them! If maintainers find
+ that your patches have not been properly tested, it is a good way to
+ get them grumpy. Testing patch submissions is a hard requirement!
+
+ Note, fixes that go to bpf tree *must* have a Fixes: tag included. The
+ same applies to fixes that target bpf-next, where the affected commit
+ is in net-next (or in some cases bpf-next). The Fixes: tag is crucial
+ in order to identify follow-up commits and tremendously helps for people
+ having to do backporting, so it is a must have!
+
+ We also don't accept patches with an empty commit message. Take your
+ time and properly write up a high quality commit message, it is
+ essential!
+
+ Think about it this way: other developers looking at your code a month
+ from now need to understand *why* a certain change has been done that
+ way, and whether there have been flaws in the analysis or assumptions
+ that the original author did. Thus providing a proper rationale and
+ describing the use-case for the changes is a must.
+
+ Patch submissions with >1 patch must have a cover letter which includes
+ a high level description of the series. This high level summary will
+ then be placed into the merge commit by the BPF maintainers such that
+ it is also accessible from the git log for future reference.
+
+Q: What do I need to consider when adding a new instruction or feature
+ that would require BPF JIT and/or LLVM integration as well?
+
+A: We try hard to keep all BPF JITs up to date such that the same user
+ experience can be guaranteed when running BPF programs on different
+ architectures without having the program punt to the less efficient
+ interpreter in case the in-kernel BPF JIT is enabled.
+
+ If you are unable to implement or test the required JIT changes for
+ certain architectures, please work together with the related BPF JIT
+ developers in order to get the feature implemented in a timely manner.
+ Please refer to the git log (arch/*/net/) to locate the necessary
+ people for helping out.
+
+ Also always make sure to add BPF test cases (e.g. test_bpf.c and
+ test_verifier.c) for new instructions, so that they can receive
+ broad test coverage and help run-time testing the various BPF JITs.
+
+ In case of new BPF instructions, once the changes have been accepted
+ into the Linux kernel, please implement support into LLVM's BPF back
+ end. See LLVM section below for further information.
+
+Stable submission:
+------------------
+
+Q: I need a specific BPF commit in stable kernels. What should I do?
+
+A: In case you need a specific fix in stable kernels, first check whether
+ the commit has already been applied in the related linux-*.y branches:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/
+
+ If not the case, then drop an email to the BPF maintainers with the
+ netdev kernel mailing list in Cc and ask for the fix to be queued up:
+
+ netdev@vger.kernel.org
+
+ The process in general is the same as on netdev itself, see also the
+ netdev FAQ document.
+
+Q: Do you also backport to kernels not currently maintained as stable?
+
+A: No. If you need a specific BPF commit in kernels that are currently not
+ maintained by the stable maintainers, then you are on your own.
+
+ The current stable and longterm stable kernels are all listed here:
+
+ https://www.kernel.org/
+
+Q: The BPF patch I am about to submit needs to go to stable as well. What
+ should I do?
+
+A: The same rules apply as with netdev patch submissions in general, see
+ netdev FAQ under:
+
+ Documentation/networking/netdev-FAQ.txt
+
+ Never add "Cc: stable@vger.kernel.org" to the patch description, but
+ ask the BPF maintainers to queue the patches instead. This can be done
+ with a note, for example, under the "---" part of the patch which does
+ not go into the git log. Alternatively, this can be done as a simple
+ request by mail instead.
+
+Q: Where do I find currently queued BPF patches that will be submitted
+ to stable?
+
+A: Once patches that fix critical bugs got applied into the bpf tree, they
+ are queued up for stable submission under:
+
+ http://patchwork.ozlabs.org/bundle/bpf/stable/?state=*
+
+ They will be on hold there at minimum until the related commit made its
+ way into the mainline kernel tree.
+
+ After having been under broader exposure, the queued patches will be
+ submitted by the BPF maintainers to the stable maintainers.
+
+Testing patches:
+----------------
+
+Q: Which BPF kernel selftests version should I run my kernel against?
+
+A: If you run a kernel xyz, then always run the BPF kernel selftests from
+ that kernel xyz as well. Do not expect that the BPF selftest from the
+ latest mainline tree will pass all the time.
+
+ In particular, test_bpf.c and test_verifier.c have a large number of
+ test cases and are constantly updated with new BPF test sequences, or
+ existing ones are adapted to verifier changes e.g. due to verifier
+ becoming smarter and being able to better track certain things.
+
+LLVM:
+-----
+
+Q: Where do I find LLVM with BPF support?
+
+A: The BPF back end for LLVM is upstream in LLVM since version 3.7.1.
+
+ All major distributions these days ship LLVM with BPF back end enabled,
+ so for the majority of use-cases it is not required to compile LLVM by
+ hand anymore, just install the distribution provided package.
+
+ LLVM's static compiler lists the supported targets through 'llc --version',
+ make sure BPF targets are listed. Example:
+
+ $ llc --version
+ LLVM (http://llvm.org/):
+ LLVM version 6.0.0svn
+ Optimized build.
+ Default target: x86_64-unknown-linux-gnu
+ Host CPU: skylake
+
+ Registered Targets:
+ bpf - BPF (host endian)
+ bpfeb - BPF (big endian)
+ bpfel - BPF (little endian)
+ x86 - 32-bit X86: Pentium-Pro and above
+ x86-64 - 64-bit X86: EM64T and AMD64
+
+ For developers in order to utilize the latest features added to LLVM's
+ BPF back end, it is advisable to run the latest LLVM releases. Support
+ for new BPF kernel features such as additions to the BPF instruction
+ set are often developed together.
+
+ All LLVM releases can be found at: http://releases.llvm.org/
+
+Q: Got it, so how do I build LLVM manually anyway?
+
+A: You need cmake and gcc-c++ as build requisites for LLVM. Once you have
+ that set up, proceed with building the latest LLVM and clang version
+ from the git repositories:
+
+ $ git clone http://llvm.org/git/llvm.git
+ $ cd llvm/tools
+ $ git clone --depth 1 http://llvm.org/git/clang.git
+ $ cd ..; mkdir build; cd build
+ $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+ -DBUILD_SHARED_LIBS=OFF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DLLVM_BUILD_RUNTIME=OFF
+ $ make -j $(getconf _NPROCESSORS_ONLN)
+
+ The built binaries can then be found in the build/bin/ directory, where
+ you can point the PATH variable to.
+
+Q: Should I notify BPF kernel maintainers about issues in LLVM's BPF code
+ generation back end or about LLVM generated code that the verifier
+ refuses to accept?
+
+A: Yes, please do! LLVM's BPF back end is a key piece of the whole BPF
+ infrastructure and it ties deeply into verification of programs from the
+ kernel side. Therefore, any issues on either side need to be investigated
+ and fixed whenever necessary.
+
+ Therefore, please make sure to bring them up at netdev kernel mailing
+ list and Cc BPF maintainers for LLVM and kernel bits:
+
+ Yonghong Song <yhs@fb.com>
+ Alexei Starovoitov <ast@kernel.org>
+ Daniel Borkmann <daniel@iogearbox.net>
+
+ LLVM also has an issue tracker where BPF related bugs can be found:
+
+ https://bugs.llvm.org/buglist.cgi?quicksearch=bpf
+
+ However, it is better to reach out through mailing lists with having
+ maintainers in Cc.
+
+Q: I have added a new BPF instruction to the kernel, how can I integrate
+ it into LLVM?
+
+A: LLVM has a -mcpu selector for the BPF back end in order to allow the
+ selection of BPF instruction set extensions. By default the 'generic'
+ processor target is used, which is the base instruction set (v1) of BPF.
+
+ LLVM has an option to select -mcpu=probe where it will probe the host
+ kernel for supported BPF instruction set extensions and selects the
+ optimal set automatically.
+
+ For cross-compilation, a specific version can be select manually as well.
+
+ $ llc -march bpf -mcpu=help
+ Available CPUs for this target:
+
+ generic - Select the generic processor.
+ probe - Select the probe processor.
+ v1 - Select the v1 processor.
+ v2 - Select the v2 processor.
+ [...]
+
+ Newly added BPF instructions to the Linux kernel need to follow the same
+ scheme, bump the instruction set version and implement probing for the
+ extensions such that -mcpu=probe users can benefit from the optimization
+ transparently when upgrading their kernels.
+
+ If you are unable to implement support for the newly added BPF instruction
+ please reach out to BPF developers for help.
+
+ By the way, the BPF kernel selftests run with -mcpu=probe for better
+ test coverage.
+
+Happy BPF hacking!
diff --git a/Documentation/cgroup-v1/cgroups.txt b/Documentation/cgroup-v1/cgroups.txt
index 308e5ff7207a..059f7063eea6 100644
--- a/Documentation/cgroup-v1/cgroups.txt
+++ b/Documentation/cgroup-v1/cgroups.txt
@@ -523,12 +523,7 @@ Accessing a task's cgroup pointer may be done in the following ways:
Each subsystem should:
- add an entry in linux/cgroup_subsys.h
-- define a cgroup_subsys object called <name>_subsys
-
-If a subsystem can be compiled as a module, it should also have in its
-module initcall a call to cgroup_load_subsys(), and in its exitcall a
-call to cgroup_unload_subsys(). It should also set its_subsys.module =
-THIS_MODULE in its .c file.
+- define a cgroup_subsys object called <name>_cgrp_subsys
Each subsystem may export the following methods. The only mandatory
methods are css_alloc/free. Any others that are null are presumed to
diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt
index cefb63639070..a4af2e124e24 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -524,9 +524,9 @@ Note:
Only anonymous and swap cache memory is listed as part of 'rss' stat.
This should not be confused with the true 'resident set size' or the
amount of physical memory used by the cgroup.
- 'rss + file_mapped" will give you resident set size of cgroup.
+ 'rss + mapped_file" will give you resident set size of cgroup.
(Note: file and shmem may be shared among other cgroups. In that case,
- file_mapped is accounted only when the memory cgroup is owner of page
+ mapped_file is accounted only when the memory cgroup is owner of page
cache.)
5.3 swappiness
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 2cddab7efb20..74cdeaed9f7a 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -53,10 +53,14 @@ v1 is available under Documentation/cgroup-v1/.
5-3-2. Writeback
5-4. PID
5-4-1. PID Interface Files
- 5-5. RDMA
- 5-5-1. RDMA Interface Files
- 5-6. Misc
- 5-6-1. perf_event
+ 5-5. Device
+ 5-6. RDMA
+ 5-6-1. RDMA Interface Files
+ 5-7. Misc
+ 5-7-1. perf_event
+ 5-N. Non-normative information
+ 5-N-1. CPU controller root cgroup process behaviour
+ 5-N-2. IO controller root cgroup process behaviour
6. Namespace
6-1. Basics
6-2. The Root and Views
@@ -279,7 +283,7 @@ thread mode, the following conditions must be met.
exempt from this requirement.
Topology-wise, a cgroup can be in an invalid state. Please consider
-the following toplogy::
+the following topology::
A (threaded domain) - B (threaded) - C (domain, just created)
@@ -420,7 +424,9 @@ The root cgroup is exempt from this restriction. Root contains
processes and anonymous resource consumption which can't be associated
with any other cgroups and requires special treatment from most
controllers. How resource consumption in the root cgroup is governed
-is up to each controller.
+is up to each controller (for more information on this topic please
+refer to the Non-normative information section in the Controllers
+chapter).
Note that the restriction doesn't get in the way if there is no
enabled controller in the cgroup's "cgroup.subtree_control". This is
@@ -1063,10 +1069,10 @@ PAGE_SIZE multiple when read back.
reached the limit and allocation was about to fail.
Depending on context result could be invocation of OOM
- killer and retrying allocation or failing alloction.
+ killer and retrying allocation or failing allocation.
Failed allocation in its turn could be returned into
- userspace as -ENOMEM or siletly ignored in cases like
+ userspace as -ENOMEM or silently ignored in cases like
disk readahead. For now OOM in memory cgroup kills
tasks iff shortage has happened inside page fault.
@@ -1191,7 +1197,7 @@ PAGE_SIZE multiple when read back.
cgroups. The default is "max".
Swap usage hard limit. If a cgroup's swap usage reaches this
- limit, anonymous meomry of the cgroup will not be swapped out.
+ limit, anonymous memory of the cgroup will not be swapped out.
Usage Guidelines
@@ -1429,6 +1435,30 @@ through fork() or clone(). These will return -EAGAIN if the creation
of a new process would cause a cgroup policy to be violated.
+Device controller
+-----------------
+
+Device controller manages access to device files. It includes both
+creation of new device files (using mknod), and access to the
+existing device files.
+
+Cgroup v2 device controller has no interface files and is implemented
+on top of cgroup BPF. To control access to device files, a user may
+create bpf programs of the BPF_CGROUP_DEVICE type and attach them
+to cgroups. On an attempt to access a device file, corresponding
+BPF programs will be executed, and depending on the return value
+the attempt will succeed or fail with -EPERM.
+
+A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
+structure, which describes the device access attempt: access type
+(mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise
+it succeeds.
+
+An example of BPF_CGROUP_DEVICE program may be found in the kernel
+source tree in the tools/testing/selftests/bpf/dev_cgroup.c file.
+
+
RDMA
----
@@ -1481,6 +1511,35 @@ always be filtered by cgroup v2 path. The controller can still be
moved to a legacy hierarchy after v2 hierarchy is populated.
+Non-normative information
+-------------------------
+
+This section contains information that isn't considered to be a part of
+the stable kernel API and so is subject to change.
+
+
+CPU controller root cgroup process behaviour
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When distributing CPU cycles in the root cgroup each thread in this
+cgroup is treated as if it was hosted in a separate child cgroup of the
+root cgroup. This child cgroup weight is dependent on its thread nice
+level.
+
+For details of this mapping see sched_prio_to_weight array in
+kernel/sched/core.c file (values from this array should be scaled
+appropriately so the neutral - nice 0 - value is 100 instead of 1024).
+
+
+IO controller root cgroup process behaviour
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Root cgroup processes are hosted in an implicit leaf child node.
+When distributing IO resources this implicit child node is taken into
+account as if it was a normal child cgroup of the root cgroup with a
+weight value of 200.
+
+
Namespace
=========
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index d4628174b7c5..53e51caa3347 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -220,8 +220,7 @@ before it writes the new tail pointer, which will erase the item.
Note the use of READ_ONCE() and smp_load_acquire() to read the
opposition index. This prevents the compiler from discarding and
-reloading its cached value - which some compilers will do across
-smp_read_barrier_depends(). This isn't strictly needed if you can
+reloading its cached value. This isn't strictly needed if you can
be sure that the opposition index will _only_ be used the once.
The smp_load_acquire() additionally forces the CPU to order against
subsequent memory references. Similarly, smp_store_release() is used
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 63857d33778c..62ac5a9f3a9f 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -88,7 +88,6 @@ finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
- sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
diff --git a/Documentation/errseq.rst b/Documentation/core-api/errseq.rst
index 4c29bd5afbc5..ff332e272405 100644
--- a/Documentation/errseq.rst
+++ b/Documentation/core-api/errseq.rst
@@ -1,5 +1,7 @@
+=====================
The errseq_t datatype
=====================
+
An errseq_t is a way of recording errors in one place, and allowing any
number of "subscribers" to tell whether it has changed since a previous
point where it was sampled.
@@ -21,12 +23,13 @@ a flag to tell whether the value has been sampled since a new value was
recorded. That allows us to avoid bumping the counter if no one has
sampled it since the last time an error was recorded.
-Thus we end up with a value that looks something like this::
+Thus we end up with a value that looks something like this:
- bit: 31..13 12 11..0
- +-----------------+----+----------------+
- | counter | SF | errno |
- +-----------------+----+----------------+
++--------------------------------------+----+------------------------+
+| 31..13 | 12 | 11..0 |
++--------------------------------------+----+------------------------+
+| counter | SF | errno |
++--------------------------------------+----+------------------------+
The general idea is for "watchers" to sample an errseq_t value and keep
it as a running cursor. That value can later be used to tell whether
@@ -42,6 +45,7 @@ has ever been an error set since it was first initialized.
API usage
=========
+
Let me tell you a story about a worker drone. Now, he's a good worker
overall, but the company is a little...management heavy. He has to
report to 77 supervisors today, and tomorrow the "big boss" is coming in
@@ -125,6 +129,7 @@ not usable by anyone else.
Serializing errseq_t cursor updates
===================================
+
Note that the errseq_t API does not protect the errseq_t cursor during a
check_and_advance_operation. Only the canonical error code is handled
atomically. In a situation where more than one task might be using the
@@ -147,3 +152,8 @@ errseq_check_and_advance after taking the lock. e.g.::
That avoids the spinlock in the common case where nothing has changed
since the last time it was checked.
+
+Functions
+=========
+
+.. kernel-doc:: lib/errseq.c
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index d5bbe035316d..1b1fd01990b5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -14,6 +14,7 @@ Core utilities
kernel-api
assoc_array
atomic_ops
+ refcount-vs-atomic
cpu_hotplug
local_ops
workqueue
@@ -21,6 +22,8 @@ Core utilities
flexible-arrays
librs
genalloc
+ errseq
+ printk-formats
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 2d9da6c40a4d..e7fadf02c511 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -139,6 +139,21 @@ Division Functions
.. kernel-doc:: lib/gcd.c
:export:
+Sorting
+-------
+
+.. kernel-doc:: lib/sort.c
+ :export:
+
+.. kernel-doc:: lib/list_sort.c
+ :export:
+
+UUID/GUID
+---------
+
+.. kernel-doc:: lib/uuid.c
+ :export:
+
Memory Management in Linux
==========================
diff --git a/Documentation/printk-formats.txt b/Documentation/core-api/printk-formats.rst
index aa0a776c817a..258b46435320 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/core-api/printk-formats.rst
@@ -5,6 +5,7 @@ How to get printk format specifiers right
:Author: Randy Dunlap <rdunlap@infradead.org>
:Author: Andrew Murray <amurray@mpc-data.co.uk>
+
Integer types
=============
@@ -25,39 +26,45 @@ Integer types
s64 %lld or %llx
u64 %llu or %llx
-If <type> is dependent on a config option for its size (e.g., ``sector_t``,
-``blkcnt_t``) or is architecture-dependent for its size (e.g., ``tcflag_t``),
-use a format specifier of its largest possible type and explicitly cast to it.
+
+If <type> is dependent on a config option for its size (e.g., sector_t,
+blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
+format specifier of its largest possible type and explicitly cast to it.
Example::
printk("test: sector number/total blocks: %llu/%llu\n",
(unsigned long long)sector, (unsigned long long)blockcount);
-Reminder: ``sizeof()`` result is of type ``size_t``.
+Reminder: sizeof() returns type size_t.
-The kernel's printf does not support ``%n``. For obvious reasons, floating
-point formats (``%e, %f, %g, %a``) are also not recognized. Use of any
+The kernel's printf does not support %n. Floating point formats (%e, %f,
+%g, %a) are also not recognized, for obvious reasons. Use of any
unsupported specifier or length qualifier results in a WARN and early
-return from vsnprintf.
-
-Raw pointer value SHOULD be printed with %p. The kernel supports
-the following extended format specifiers for pointer types:
+return from vsnprintf().
-Pointer Types
+Pointer types
=============
-Pointers printed without a specifier extension (i.e unadorned %p) are
-hashed to give a unique identifier without leaking kernel addresses to user
-space. On 64 bit machines the first 32 bits are zeroed. If you _really_
-want the address see %px below.
+A raw pointer value may be printed with %p which will hash the address
+before printing. The kernel also supports extended specifiers for printing
+pointers of different types.
+
+Plain Pointers
+--------------
::
%p abcdef12 or 00000000abcdef12
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to prevent leaking information about the kernel memory layout. This
+has the added benefit of providing a unique identifier. On 64-bit machines
+the first 32 bits are zeroed. If you *really* want the address see %px
+below.
+
Symbols/Function Pointers
-=========================
+-------------------------
::
@@ -69,6 +76,7 @@ Symbols/Function Pointers
%ps versatile_init
%pB prev_fn_of_versatile_init+0x88/0x88
+
The ``F`` and ``f`` specifiers are for printing function pointers,
for example, f->func, &gettimeofday. They have the same result as
``S`` and ``s`` specifiers. But they do an extra conversion on
@@ -77,14 +85,14 @@ are actually function descriptors.
The ``S`` and ``s`` specifiers can be used for printing symbols
from direct addresses, for example, __builtin_return_address(0),
-(void *)regs->ip. They result in the symbol name with (``S``) or
-without (``s``) offsets. If KALLSYMS are disabled then the symbol
+(void *)regs->ip. They result in the symbol name with (S) or
+without (s) offsets. If KALLSYMS are disabled then the symbol
address is printed instead.
The ``B`` specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
-when tail-call``s are used and marked with the noreturn GCC attribute.
+when tail-calls are used and marked with the noreturn GCC attribute.
Examples::
@@ -97,33 +105,32 @@ Examples::
printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
Kernel Pointers
-===============
+---------------
::
%pK 01234567 or 0123456789abcdef
For printing kernel pointers which should be hidden from unprivileged
-users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
+users. The behaviour of %pK depends on the kptr_restrict sysctl - see
Documentation/sysctl/kernel.txt for more details.
Unmodified Addresses
-====================
+--------------------
::
%px 01234567 or 0123456789abcdef
-For printing pointers when you _really_ want to print the address. Please
+For printing pointers when you *really* want to print the address. Please
consider whether or not you are leaking sensitive information about the
-Kernel layout in memory before printing pointers with %px. %px is
-functionally equivalent to %lx. %px is preferred to %lx because it is more
-uniquely grep'able. If, in the future, we need to modify the way the Kernel
-handles printing pointers it will be nice to be able to find the call
-sites.
+kernel memory layout before printing pointers with %px. %px is functionally
+equivalent to %lx (or %lu). %px is preferred because it is more uniquely
+grep'able. If in the future we need to modify the way the kernel handles
+printing pointers we will be better equipped to find the call sites.
Struct Resources
-================
+----------------
::
@@ -133,32 +140,37 @@ Struct Resources
[mem 0x0000000060000000-0x000000006fffffff pref]
For printing struct resources. The ``R`` and ``r`` specifiers result in a
-printed resource with (``R``) or without (``r``) a decoded flags member.
+printed resource with (R) or without (r) a decoded flags member.
+
Passed by reference.
-Physical addresses types ``phys_addr_t``
-========================================
+Physical address types phys_addr_t
+----------------------------------
::
%pa[p] 0x01234567 or 0x0123456789abcdef
-For printing a ``phys_addr_t`` type (and its derivatives, such as
-``resource_size_t``) which can vary based on build options, regardless of
-the width of the CPU data path. Passed by reference.
+For printing a phys_addr_t type (and its derivatives, such as
+resource_size_t) which can vary based on build options, regardless of the
+width of the CPU data path.
+
+Passed by reference.
-DMA addresses types ``dma_addr_t``
-==================================
+DMA address types dma_addr_t
+----------------------------
::
%pad 0x01234567 or 0x0123456789abcdef
-For printing a ``dma_addr_t`` type which can vary based on build options,
-regardless of the width of the CPU data path. Passed by reference.
+For printing a dma_addr_t type which can vary based on build options,
+regardless of the width of the CPU data path.
+
+Passed by reference.
Raw buffer as an escaped string
-===============================
+-------------------------------
::
@@ -168,8 +180,8 @@ For printing raw buffer as an escaped string. For the following buffer::
1b 62 20 5c 43 07 22 90 0d 5d
-few examples show how the conversion would be done (the result string
-without surrounding quotes)::
+A few examples show how the conversion would be done (excluding surrounding
+quotes)::
%*pE "\eb \C\a"\220\r]"
%*pEhp "\x1bb \C\x07"\x90\x0d]"
@@ -179,23 +191,23 @@ The conversion rules are applied according to an optional combination
of flags (see :c:func:`string_escape_mem` kernel documentation for the
details):
- - ``a`` - ESCAPE_ANY
- - ``c`` - ESCAPE_SPECIAL
- - ``h`` - ESCAPE_HEX
- - ``n`` - ESCAPE_NULL
- - ``o`` - ESCAPE_OCTAL
- - ``p`` - ESCAPE_NP
- - ``s`` - ESCAPE_SPACE
+ - a - ESCAPE_ANY
+ - c - ESCAPE_SPECIAL
+ - h - ESCAPE_HEX
+ - n - ESCAPE_NULL
+ - o - ESCAPE_OCTAL
+ - p - ESCAPE_NP
+ - s - ESCAPE_SPACE
By default ESCAPE_ANY_NP is used.
ESCAPE_ANY_NP is the sane choice for many cases, in particularly for
printing SSIDs.
-If field width is omitted the 1 byte only will be escaped.
+If field width is omitted then 1 byte only will be escaped.
Raw buffer as a hex string
-==========================
+--------------------------
::
@@ -204,12 +216,12 @@ Raw buffer as a hex string
%*phD 00-01-02- ... -3f
%*phN 000102 ... 3f
-For printing a small buffers (up to 64 bytes long) as a hex string with
-certain separator. For the larger buffers consider to use
+For printing small buffers (up to 64 bytes long) as a hex string with a
+certain separator. For larger buffers consider using
:c:func:`print_hex_dump`.
MAC/FDDI addresses
-==================
+------------------
::
@@ -220,11 +232,11 @@ MAC/FDDI addresses
%pmR 050403020100
For printing 6-byte MAC/FDDI addresses in hex notation. The ``M`` and ``m``
-specifiers result in a printed address with (``M``) or without (``m``) byte
-separators. The default byte separator is the colon (``:``).
+specifiers result in a printed address with (M) or without (m) byte
+separators. The default byte separator is the colon (:).
Where FDDI addresses are concerned the ``F`` specifier can be used after
-the ``M`` specifier to use dash (``-``) separators instead of the default
+the ``M`` specifier to use dash (-) separators instead of the default
separator.
For Bluetooth addresses the ``R`` specifier shall be used after the ``M``
@@ -234,7 +246,7 @@ of Bluetooth addresses which are in the little endian order.
Passed by reference.
IPv4 addresses
-==============
+--------------
::
@@ -243,8 +255,8 @@ IPv4 addresses
%p[Ii]4[hnbl]
For printing IPv4 dot-separated decimal addresses. The ``I4`` and ``i4``
-specifiers result in a printed address with (``i4``) or without (``I4``)
-leading zeros.
+specifiers result in a printed address with (i4) or without (I4) leading
+zeros.
The additional ``h``, ``n``, ``b``, and ``l`` specifiers are used to specify
host, network, big or little endian order addresses respectively. Where
@@ -253,7 +265,7 @@ no specifier is provided the default network/big endian order is used.
Passed by reference.
IPv6 addresses
-==============
+--------------
::
@@ -262,7 +274,7 @@ IPv6 addresses
%pI6c 1:2:3:4:5:6:7:8
For printing IPv6 network-order 16-bit hex addresses. The ``I6`` and ``i6``
-specifiers result in a printed address with (``I6``) or without (``i6``)
+specifiers result in a printed address with (I6) or without (i6)
colon-separators. Leading zeros are always used.
The additional ``c`` specifier can be used with the ``I`` specifier to
@@ -272,7 +284,7 @@ http://tools.ietf.org/html/rfc5952
Passed by reference.
IPv4/IPv6 addresses (generic, with port, flowinfo, scope)
-=========================================================
+---------------------------------------------------------
::
@@ -282,8 +294,8 @@ IPv4/IPv6 addresses (generic, with port, flowinfo, scope)
%pISpc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345
%p[Ii]S[pfschnbl]
-For printing an IP address without the need to distinguish whether it``s
-of type AF_INET or AF_INET6, a pointer to a valid ``struct sockaddr``,
+For printing an IP address without the need to distinguish whether it's of
+type AF_INET or AF_INET6. A pointer to a valid struct sockaddr,
specified through ``IS`` or ``iS``, can be passed to this format specifier.
The additional ``p``, ``f``, and ``s`` specifiers are used to specify port
@@ -309,7 +321,7 @@ Further examples::
%pISpfc 1.2.3.4:12345 or [1:2:3:4:5:6:7:8]:12345/123456789
UUID/GUID addresses
-===================
+-------------------
::
@@ -318,33 +330,33 @@ UUID/GUID addresses
%pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
%pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
-For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
-'b' and 'B' specifiers are used to specify a little endian order in
-lower ('l') or upper case ('L') hex characters - and big endian order
-in lower ('b') or upper case ('B') hex characters.
+For printing 16-byte UUID/GUIDs addresses. The additional ``l``, ``L``,
+``b`` and ``B`` specifiers are used to specify a little endian order in
+lower (l) or upper case (L) hex notation - and big endian order in lower (b)
+or upper case (B) hex notation.
Where no additional specifiers are used the default big endian
-order with lower case hex characters will be printed.
+order with lower case hex notation will be printed.
Passed by reference.
dentry names
-============
+------------
::
%pd{,2,3,4}
%pD{,2,3,4}
-For printing dentry name; if we race with :c:func:`d_move`, the name might be
-a mix of old and new ones, but it won't oops. ``%pd`` dentry is a safer
-equivalent of ``%s`` ``dentry->d_name.name`` we used to use, ``%pd<n>`` prints
-``n`` last components. ``%pD`` does the same thing for struct file.
+For printing dentry name; if we race with :c:func:`d_move`, the name might
+be a mix of old and new ones, but it won't oops. %pd dentry is a safer
+equivalent of %s dentry->d_name.name we used to use, %pd<n> prints ``n``
+last components. %pD does the same thing for struct file.
Passed by reference.
block_device names
-==================
+------------------
::
@@ -353,7 +365,7 @@ block_device names
For printing name of block_device pointers.
struct va_format
-================
+----------------
::
@@ -375,31 +387,27 @@ correctness of the format string and va_list arguments.
Passed by reference.
kobjects
-========
+--------
::
- %pO
+ %pOF[fnpPcCF]
- Base specifier for kobject based structs. Must be followed with
- character for specific type of kobject as listed below:
- Device tree nodes:
+For printing kobject based structs (device nodes). Default behaviour is
+equivalent to %pOFf.
- %pOF[fnpPcCF]
+ - f - device node full_name
+ - n - device node name
+ - p - device node phandle
+ - P - device node path spec (name + @unit)
+ - F - device node flags
+ - c - major compatible string
+ - C - full compatible string
- For printing device tree nodes. The optional arguments are:
- f device node full_name
- n device node name
- p device node phandle
- P device node path spec (name + @unit)
- F device node flags
- c major compatible string
- C full compatible string
- Without any arguments prints full_name (same as %pOFf)
- The separator when using multiple arguments is ':'
+The separator when using multiple arguments is ':'
- Examples:
+Examples::
%pOF /foo/bar@0 - Node full name
%pOFf /foo/bar@0 - Same as above
@@ -412,11 +420,10 @@ kobjects
P - Populated
B - Populated bus
- Passed by reference.
-
+Passed by reference.
struct clk
-==========
+----------
::
@@ -424,14 +431,14 @@ struct clk
%pCn pll1
%pCr 1560000000
-For printing struct clk structures. ``%pC`` and ``%pCn`` print the name
+For printing struct clk structures. %pC and %pCn print the name
(Common Clock Framework) or address (legacy clock framework) of the
-structure; ``%pCr`` prints the current clock rate.
+structure; %pCr prints the current clock rate.
Passed by reference.
bitmap and its derivatives such as cpumask and nodemask
-=======================================================
+-------------------------------------------------------
::
@@ -439,13 +446,13 @@ bitmap and its derivatives such as cpumask and nodemask
%*pbl 0,3-6,8-10
For printing bitmap and its derivatives such as cpumask and nodemask,
-``%*pb`` output the bitmap with field width as the number of bits and ``%*pbl``
+%*pb outputs the bitmap with field width as the number of bits and %*pbl
output the bitmap as range list with field width as the number of bits.
Passed by reference.
Flags bitfields such as page flags, gfp_flags
-=============================================
+---------------------------------------------
::
@@ -459,14 +466,14 @@ character. Currently supported are [p]age flags, [v]ma_flags (both
expect ``unsigned long *``) and [g]fp_flags (expects ``gfp_t *``). The flag
names and print order depends on the particular type.
-Note that this format should not be used directly in :c:func:`TP_printk()` part
-of a tracepoint. Instead, use the ``show_*_flags()`` functions from
-<trace/events/mmflags.h>.
+Note that this format should not be used directly in the
+:c:func:`TP_printk()` part of a tracepoint. Instead, use the show_*_flags()
+functions from <trace/events/mmflags.h>.
Passed by reference.
Network device features
-=======================
+-----------------------
::
@@ -476,8 +483,10 @@ For printing netdev_features_t.
Passed by reference.
-If you add other ``%p`` extensions, please extend lib/test_printf.c with
-one or more test cases, if at all feasible.
+Thanks
+======
+If you add other %p extensions, please extend <lib/test_printf.c> with
+one or more test cases, if at all feasible.
Thank you for your cooperation and attention.
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst
new file mode 100644
index 000000000000..83351c258cdb
--- /dev/null
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -0,0 +1,150 @@
+===================================
+refcount_t API compared to atomic_t
+===================================
+
+.. contents:: :local:
+
+Introduction
+============
+
+The goal of refcount_t API is to provide a minimal API for implementing
+an object's reference counters. While a generic architecture-independent
+implementation from lib/refcount.c uses atomic operations underneath,
+there are a number of differences between some of the ``refcount_*()`` and
+``atomic_*()`` functions with regards to the memory ordering guarantees.
+This document outlines the differences and provides respective examples
+in order to help maintainers validate their code against the change in
+these memory ordering guarantees.
+
+The terms used through this document try to follow the formal LKMM defined in
+github.com/aparri/memory-model/blob/master/Documentation/explanation.txt
+
+memory-barriers.txt and atomic_t.txt provide more background to the
+memory ordering in general and for atomic operations specifically.
+
+Relevant types of memory ordering
+=================================
+
+.. note:: The following section only covers some of the memory
+ ordering types that are relevant for the atomics and reference
+ counters and used through this document. For a much broader picture
+ please consult memory-barriers.txt document.
+
+In the absence of any memory ordering guarantees (i.e. fully unordered)
+atomics & refcounters only provide atomicity and
+program order (po) relation (on the same CPU). It guarantees that
+each ``atomic_*()`` and ``refcount_*()`` operation is atomic and instructions
+are executed in program order on a single CPU.
+This is implemented using :c:func:`READ_ONCE`/:c:func:`WRITE_ONCE` and
+compare-and-swap primitives.
+
+A strong (full) memory ordering guarantees that all prior loads and
+stores (all po-earlier instructions) on the same CPU are completed
+before any po-later instruction is executed on the same CPU.
+It also guarantees that all po-earlier stores on the same CPU
+and all propagated stores from other CPUs must propagate to all
+other CPUs before any po-later instruction is executed on the original
+CPU (A-cumulative property). This is implemented using :c:func:`smp_mb`.
+
+A RELEASE memory ordering guarantees that all prior loads and
+stores (all po-earlier instructions) on the same CPU are completed
+before the operation. It also guarantees that all po-earlier
+stores on the same CPU and all propagated stores from other CPUs
+must propagate to all other CPUs before the release operation
+(A-cumulative property). This is implemented using
+:c:func:`smp_store_release`.
+
+A control dependency (on success) for refcounters guarantees that
+if a reference for an object was successfully obtained (reference
+counter increment or addition happened, function returned true),
+then further stores are ordered against this operation.
+Control dependency on stores are not implemented using any explicit
+barriers, but rely on CPU not to speculate on stores. This is only
+a single CPU relation and provides no guarantees for other CPUs.
+
+
+Comparison of functions
+=======================
+
+case 1) - non-"Read/Modify/Write" (RMW) ops
+-------------------------------------------
+
+Function changes:
+
+ * :c:func:`atomic_set` --> :c:func:`refcount_set`
+ * :c:func:`atomic_read` --> :c:func:`refcount_read`
+
+Memory ordering guarantee changes:
+
+ * none (both fully unordered)
+
+
+case 2) - increment-based ops that return no value
+--------------------------------------------------
+
+Function changes:
+
+ * :c:func:`atomic_inc` --> :c:func:`refcount_inc`
+ * :c:func:`atomic_add` --> :c:func:`refcount_add`
+
+Memory ordering guarantee changes:
+
+ * none (both fully unordered)
+
+case 3) - decrement-based RMW ops that return no value
+------------------------------------------------------
+
+Function changes:
+
+ * :c:func:`atomic_dec` --> :c:func:`refcount_dec`
+
+Memory ordering guarantee changes:
+
+ * fully unordered --> RELEASE ordering
+
+
+case 4) - increment-based RMW ops that return a value
+-----------------------------------------------------
+
+Function changes:
+
+ * :c:func:`atomic_inc_not_zero` --> :c:func:`refcount_inc_not_zero`
+ * no atomic counterpart --> :c:func:`refcount_add_not_zero`
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> control dependency on success for stores
+
+.. note:: We really assume here that necessary ordering is provided as a
+ result of obtaining pointer to the object!
+
+
+case 5) - decrement-based RMW ops that return a value
+-----------------------------------------------------
+
+Function changes:
+
+ * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test`
+ * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test`
+ * no atomic counterpart --> :c:func:`refcount_dec_if_one`
+ * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> RELEASE ordering + control dependency
+
+.. note:: :c:func:`atomic_add_unless` only provides full order on success.
+
+
+case 6) - lock-based RMW
+------------------------
+
+Function changes:
+
+ * :c:func:`atomic_dec_and_lock` --> :c:func:`refcount_dec_and_lock`
+ * :c:func:`atomic_dec_and_mutex_lock` --> :c:func:`refcount_dec_and_mutex_lock`
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> RELEASE ordering + control dependency + hold
+ :c:func:`spin_lock` on success
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
index d3ca8af21a31..86786d87d9a8 100644
--- a/Documentation/device-mapper/cache-policies.txt
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -60,7 +60,7 @@ Memory usage:
The mq policy used a lot of memory; 88 bytes per cache block on a 64
bit machine.
-smq uses 28bit indexes to implement it's data structures rather than
+smq uses 28bit indexes to implement its data structures rather than
pointers. It avoids storing an explicit hit count for each block. It
has a 'hotspot' queue, rather than a pre-cache, which uses a quarter of
the entries (each hotspot block covers a larger area than a single
@@ -84,7 +84,7 @@ resulting in better promotion/demotion decisions.
Adaptability:
The mq policy maintained a hit count for each cache block. For a
-different block to get promoted to the cache it's hit count has to
+different block to get promoted to the cache its hit count has to
exceed the lowest currently in the cache. This meant it could take a
long time for the cache to adapt between varying IO patterns.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index cdfd0feb294e..ff0841711fd5 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -59,7 +59,7 @@ Fixed block size
The origin is divided up into blocks of a fixed size. This block size
is configurable when you first create the cache. Typically we've been
using block sizes of 256KB - 1024KB. The block size must be between 64
-(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
+sectors (32KB) and 2097152 sectors (1GB) and a multiple of 64 sectors (32KB).
Having a fixed block size simplifies the target a lot. But it is
something of a compromise. For instance, a small part of a block may be
@@ -119,7 +119,7 @@ doing here to avoid migrating during those peak io moments.
For the time being, a message "migration_threshold <#sectors>"
can be used to set the maximum number of sectors being migrated,
-the default being 204800 sectors (or 100MB).
+the default being 2048 sectors (1MB).
Updating on-disk metadata
-------------------------
@@ -143,11 +143,6 @@ the policy how big this chunk is, but it should be kept small. Like the
dirty flags this data is lost if there's a crash so a safe fallback
value should always be possible.
-For instance, the 'mq' policy, which is currently the default policy,
-uses this facility to store the hit count of the cache blocks. If
-there's a crash this information will be lost, which means the cache
-may be less efficient until those hit counts are regenerated.
-
Policy hints affect performance, not correctness.
Policy messaging
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 32df07e29f68..390c145f01d7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -343,5 +343,8 @@ Version History
1.11.0 Fix table line argument order
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
-1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
+1.12.1 Fix for MD deadlock between mddev_suspend() and md_write_start() available
1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A')
+1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
+ state races.
+1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index ad6949bff2e3..b8bbb516f989 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -49,6 +49,10 @@ The difference between persistent and transient is with transient
snapshots less metadata must be saved on disk - they can be kept in
memory by the kernel.
+When loading or unloading the snapshot target, the corresponding
+snapshot-origin or snapshot-merge target must be suspended. A failure to
+suspend the origin target could result in data corruption.
+
* snapshot-merge <origin> <COW device> <persistent> <chunksize>
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 1699a55b7b70..4bcd4b7f79f9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
will be triggered which a userspace daemon should catch allowing it to
extend the pool device. Only one such event will be sent.
-Resuming a device with a new table itself triggers an event so the
-userspace daemon can use this to detect a situation where a new table
-already exceeds the threshold.
+
+No special event is triggered if a just resumed device's free space is below
+the low water mark. However, resuming a device always triggers an
+event; a userspace daemon should verify that free space exceeds the low
+water mark when handling this event.
A low water mark for the metadata device is maintained in the kernel and
will trigger a dm event if free space on the metadata device drops below
@@ -274,7 +276,8 @@ ii) Status
<transaction id> <used metadata blocks>/<total metadata blocks>
<used data blocks>/<total data blocks> <held metadata root>
- [no_]discard_passdown ro|rw
+ ro|rw|out_of_data_space [no_]discard_passdown [error|queue]_if_no_space
+ needs_check|-
transaction id:
A 64-bit number used by userspace to help synchronise with metadata
@@ -394,3 +397,6 @@ ii) Status
If the pool has encountered device errors and failed, the status
will just contain the string 'Fail'. The userspace recovery
tools should then be used.
+
+ In the case where <nr mapped sectors> is 0, there is no highest
+ mapped sector and the value of <highest mapped sector> is unspecified.
diff --git a/Documentation/device-mapper/unstriped.txt b/Documentation/device-mapper/unstriped.txt
new file mode 100644
index 000000000000..0b2a306c54ee
--- /dev/null
+++ b/Documentation/device-mapper/unstriped.txt
@@ -0,0 +1,124 @@
+Introduction
+============
+
+The device-mapper "unstriped" target provides a transparent mechanism to
+unstripe a device-mapper "striped" target to access the underlying disks
+without having to touch the true backing block-device. It can also be
+used to unstripe a hardware RAID-0 to access backing disks.
+
+Parameters:
+<number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+
+<number of stripes>
+ The number of stripes in the RAID 0.
+
+<chunk size>
+ The amount of 512B sectors in the chunk striping.
+
+<dev_path>
+ The block device you wish to unstripe.
+
+<stripe #>
+ The stripe number within the device that corresponds to physical
+ drive you wish to unstripe. This must be 0 indexed.
+
+
+Why use this module?
+====================
+
+An example of undoing an existing dm-stripe
+-------------------------------------------
+
+This small bash script will setup 4 loop devices and use the existing
+striped target to combine the 4 devices into one. It then will use
+the unstriped target ontop of the striped device to access the
+individual backing loop devices. We write data to the newly exposed
+unstriped devices and verify the data written matches the correct
+underlying device on the striped array.
+
+#!/bin/bash
+
+MEMBER_SIZE=$((128 * 1024 * 1024))
+NUM=4
+SEQ_END=$((${NUM}-1))
+CHUNK=256
+BS=4096
+
+RAID_SIZE=$((${MEMBER_SIZE}*${NUM}/512))
+DM_PARMS="0 ${RAID_SIZE} striped ${NUM} ${CHUNK}"
+COUNT=$((${MEMBER_SIZE} / ${BS}))
+
+for i in $(seq 0 ${SEQ_END}); do
+ dd if=/dev/zero of=member-${i} bs=${MEMBER_SIZE} count=1 oflag=direct
+ losetup /dev/loop${i} member-${i}
+ DM_PARMS+=" /dev/loop${i} 0"
+done
+
+echo $DM_PARMS | dmsetup create raid0
+for i in $(seq 0 ${SEQ_END}); do
+ echo "0 1 unstriped ${NUM} ${CHUNK} ${i} /dev/mapper/raid0 0" | dmsetup create set-${i}
+done;
+
+for i in $(seq 0 ${SEQ_END}); do
+ dd if=/dev/urandom of=/dev/mapper/set-${i} bs=${BS} count=${COUNT} oflag=direct
+ diff /dev/mapper/set-${i} member-${i}
+done;
+
+for i in $(seq 0 ${SEQ_END}); do
+ dmsetup remove set-${i}
+done
+
+dmsetup remove raid0
+
+for i in $(seq 0 ${SEQ_END}); do
+ losetup -d /dev/loop${i}
+ rm -f member-${i}
+done
+
+Another example
+---------------
+
+Intel NVMe drives contain two cores on the physical device.
+Each core of the drive has segregated access to its LBA range.
+The current LBA model has a RAID 0 128k chunk on each core, resulting
+in a 256k stripe across the two cores:
+
+ Core 0: Core 1:
+ __________ __________
+ | LBA 512| | LBA 768|
+ | LBA 0 | | LBA 256|
+ ---------- ----------
+
+The purpose of this unstriping is to provide better QoS in noisy
+neighbor environments. When two partitions are created on the
+aggregate drive without this unstriping, reads on one partition
+can affect writes on another partition. This is because the partitions
+are striped across the two cores. When we unstripe this hardware RAID 0
+and make partitions on each new exposed device the two partitions are now
+physically separated.
+
+With the dm-unstriped target we're able to segregate an fio script that
+has read and write jobs that are independent of each other. Compared to
+when we run the test on a combined drive with partitions, we were able
+to get a 92% reduction in read latency using this device mapper target.
+
+
+Example dmsetup usage
+=====================
+
+unstriped ontop of Intel NVMe device that has 2 cores
+-----------------------------------------------------
+dmsetup create nvmset0 --table '0 512 unstriped 2 256 0 /dev/nvme0n1 0'
+dmsetup create nvmset1 --table '0 512 unstriped 2 256 1 /dev/nvme0n1 0'
+
+There will now be two devices that expose Intel NVMe core 0 and 1
+respectively:
+/dev/mapper/nvmset0
+/dev/mapper/nvmset1
+
+unstriped ontop of striped with 4 drives using 128K chunk size
+--------------------------------------------------------------
+dmsetup create raid_disk0 --table '0 512 unstriped 4 256 0 /dev/mapper/striped 0'
+dmsetup create raid_disk1 --table '0 512 unstriped 4 256 1 /dev/mapper/striped 0'
+dmsetup create raid_disk2 --table '0 512 unstriped 4 256 2 /dev/mapper/striped 0'
+dmsetup create raid_disk3 --table '0 512 unstriped 4 256 3 /dev/mapper/striped 0'
diff --git a/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt b/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
new file mode 100644
index 000000000000..6efabba530f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
@@ -0,0 +1,27 @@
+* ARM DynamIQ Shared Unit (DSU) Performance Monitor Unit (PMU)
+
+ARM DyanmIQ Shared Unit (DSU) integrates one or more CPU cores
+with a shared L3 memory system, control logic and external interfaces to
+form a multicore cluster. The PMU enables to gather various statistics on
+the operations of the DSU. The PMU provides independent 32bit counters that
+can count any of the supported events, along with a 64bit cycle counter.
+The PMU is accessed via CPU system registers and has no MMIO component.
+
+** DSU PMU required properties:
+
+- compatible : should be one of :
+
+ "arm,dsu-pmu"
+
+- interrupts : Exactly 1 SPI must be listed.
+
+- cpus : List of phandles for the CPUs connected to this DSU instance.
+
+
+** Example:
+
+dsu-pmu-0 {
+ compatible = "arm,dsu-pmu";
+ interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>;
+ cpus = <&cpu_0>, <&cpu_1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/firmware/sdei.txt b/Documentation/devicetree/bindings/arm/firmware/sdei.txt
new file mode 100644
index 000000000000..ee3f0ff49889
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/sdei.txt
@@ -0,0 +1,42 @@
+* Software Delegated Exception Interface (SDEI)
+
+Firmware implementing the SDEI functions described in ARM document number
+ARM DEN 0054A ("Software Delegated Exception Interface") can be used by
+Linux to receive notification of events such as those generated by
+firmware-first error handling, or from an IRQ that has been promoted to
+a firmware-assisted NMI.
+
+The interface provides a number of API functions for registering callbacks
+and enabling/disabling events. Functions are invoked by trapping to the
+privilege level of the SDEI firmware (specified as part of the binding
+below) and passing arguments in a manner specified by the "SMC Calling
+Convention (ARM DEN 0028B):
+
+ r0 => 32-bit Function ID / return value
+ {r1 - r3} => Parameters
+
+Note that the immediate field of the trapping instruction must be set
+to #0.
+
+The SDEI_EVENT_REGISTER function registers a callback in the kernel
+text to handle the specified event number.
+
+The sdei node should be a child node of '/firmware' and have required
+properties:
+
+ - compatible : should contain:
+ * "arm,sdei-1.0" : For implementations complying to SDEI version 1.x.
+
+ - method : The method of calling the SDEI firmware. Permitted
+ values are:
+ * "smc" : SMC #0, with the register assignments specified in this
+ binding.
+ * "hvc" : HVC #0, with the register assignments specified in this
+ binding.
+Example:
+ firmware {
+ sdei {
+ compatible = "arm,sdei-1.0";
+ method = "smc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
index 51336e5fc761..35c3c3460d17 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
@@ -14,3 +14,22 @@ following property before the previous one:
Example:
compatible = "marvell,armada-3720-db", "marvell,armada3720", "marvell,armada3710";
+
+
+Power management
+----------------
+
+For power management (particularly DVFS and AVS), the North Bridge
+Power Management component is needed:
+
+Required properties:
+- compatible : should contain "marvell,armada-3700-nb-pm", "syscon";
+- reg : the register start and length for the North Bridge
+ Power Management
+
+Example:
+
+nb_pm: syscon@14000 {
+ compatible = "marvell,armada-3700-nb-pm", "syscon";
+ reg = <0x14000 0x60>;
+}
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
new file mode 100644
index 000000000000..cec8d5d74e26
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -0,0 +1,22 @@
+Arm TrustZone CryptoCell cryptographic engine
+
+Required properties:
+- compatible: Should be "arm,cryptocell-712-ree".
+- reg: Base physical address of the engine and length of memory mapped region.
+- interrupts: Interrupt number for the device.
+
+Optional properties:
+- interrupt-parent: The phandle for the interrupt controller that services
+ interrupts for this device.
+- clocks: Reference to the crypto engine clock.
+- dma-coherent: Present if dma operations are coherent.
+
+Examples:
+
+ arm_cc712: crypto@80000000 {
+ compatible = "arm,cryptocell-712-ree";
+ interrupt-parent = <&intc>;
+ interrupts = < 0 30 4 >;
+ reg = < 0x80000000 0x10000 >;
+
+ };
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index fbc07d12322f..30c3ce6b502e 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,7 +1,8 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197".
+- compatible: Should be "inside-secure,safexcel-eip197" or
+ "inside-secure,safexcel-eip97".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
index 4ca8dd4d7e66..a13fbdb4bd88 100644
--- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
@@ -2,7 +2,9 @@ Exynos Pseudo Random Number Generator
Required properties:
-- compatible : Should be "samsung,exynos4-rng".
+- compatible : One of:
+ - "samsung,exynos4-rng" for Exynos4210 and Exynos4412
+ - "samsung,exynos5250-prng" for Exynos5250+
- reg : Specifies base physical address and size of the registers map.
- clocks : Phandle to clock-controller plus clock-specifier pair.
- clock-names : "secss" as a clock name.
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
new file mode 100644
index 000000000000..970487fa40b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
@@ -0,0 +1,19 @@
+* STMicroelectronics STM32 CRYP
+
+Required properties:
+- compatible: Should be "st,stm32f756-cryp".
+- reg: The address and length of the peripheral registers space
+- clocks: The input clock of the CRYP instance
+- interrupts: The CRYP interrupt
+
+Optional properties:
+- resets: The input reset of the CRYP instance
+
+Example:
+crypto@50060000 {
+ compatible = "st,stm32f756-cryp";
+ reg = <0x50060000 0x400>;
+ interrupts = <79>;
+ clocks = <&rcc 0 STM32F7_AHB2_CLOCK(CRYP)>;
+ resets = <&rcc STM32F7_AHB2_RESET(CRYP)>;
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index b3408cc57be6..1ae4748730a8 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -47,8 +47,8 @@ When the OS is not in control of the management interface (i.e. it's a guest),
the channel nodes appear on their own, not under a management node.
Required properties:
-- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1"
-for MSI capable HW.
+- compatible: must contain "qcom,hidma-1.0" for initial HW or
+ "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW.
- reg: Addresses for the transfer and event channel
- interrupts: Should contain the event interrupt
- desc-count: Number of asynchronous requests this channel can handle
diff --git a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
index a6611304dd3c..fc42b2caa06d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
@@ -1,10 +1,17 @@
-AXP209 GPIO controller
+AXP209 GPIO & pinctrl controller
This driver follows the usual GPIO bindings found in
Documentation/devicetree/bindings/gpio/gpio.txt
+This driver follows the usual pinctrl bindings found in
+Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+This driver employs the per-pin muxing pattern.
+
Required properties:
-- compatible: Should be "x-powers,axp209-gpio"
+- compatible: Should be one of:
+ - "x-powers,axp209-gpio"
+ - "x-powers,axp813-gpio"
- #gpio-cells: Should be two. The first cell is the pin number and the
second is the GPIO flags.
- gpio-controller: Marks the device node as a GPIO controller.
@@ -28,3 +35,41 @@ axp209: pmic@34 {
#gpio-cells = <2>;
};
};
+
+The GPIOs can be muxed to other functions and therefore, must be a subnode of
+axp_gpio.
+
+Example:
+
+&axp_gpio {
+ gpio0_adc: gpio0-adc {
+ pins = "GPIO0";
+ function = "adc";
+ };
+};
+
+&example_node {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio0_adc>;
+};
+
+GPIOs and their functions
+-------------------------
+
+Each GPIO is independent from the other (i.e. GPIO0 in gpio_in function does
+not force GPIO1 and GPIO2 to be in gpio_in function as well).
+
+axp209
+------
+GPIO | Functions
+------------------------
+GPIO0 | gpio_in, gpio_out, ldo, adc
+GPIO1 | gpio_in, gpio_out, ldo, adc
+GPIO2 | gpio_in, gpio_out
+
+axp813
+------
+GPIO | Functions
+------------------------
+GPIO0 | gpio_in, gpio_out, ldo, adc
+GPIO1 | gpio_in, gpio_out, ldo
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index a7ac460ad657..9474138d776e 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -5,7 +5,7 @@ Required Properties:
- compatible: should contain one or more of the following:
- "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
- "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- - "renesas,gpio-r8a7778": for R8A7778 (R-Mobile M1) compatible GPIO controller.
+ - "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
- "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
diff --git a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
index 367c8203213b..3ac02988a1a5 100644
--- a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
+++ b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
@@ -22,8 +22,9 @@ Required properties for pwm-tacho node:
- compatible : should be "aspeed,ast2400-pwm-tacho" for AST2400 and
"aspeed,ast2500-pwm-tacho" for AST2500.
-- clocks : a fixed clock providing input clock frequency(PWM
- and Fan Tach clock)
+- clocks : phandle to clock provider with the clock number in the second cell
+
+- resets : phandle to reset controller with the reset number in the second cell
fan subnode format:
===================
@@ -48,19 +49,14 @@ Required properties for each child node:
Examples:
-pwm_tacho_fixed_clk: fixedclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
-};
-
pwm_tacho: pwmtachocontroller@1e786000 {
#address-cells = <1>;
#size-cells = <1>;
#cooling-cells = <2>;
reg = <0x1E786000 0x1000>;
compatible = "aspeed,ast2500-pwm-tacho";
- clocks = <&pwm_tacho_fixed_clk>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_PWM>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>;
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index f413e82c8b83..1e6ee3deb4fa 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -15,7 +15,6 @@ Required properties:
- "clkin" for the reference clock (typically XTAL)
- "core" for the SAR ADC core clock
optional clocks:
- - "sana" for the analog clock
- "adc_clk" for the ADC (sampling) clock
- "adc_sel" for the ADC (sampling) clock mux
- vref-supply: the regulator supply for the ADC reference voltage
diff --git a/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt b/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
index 674e133b7cd7..034fc2ba100e 100644
--- a/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
@@ -8,6 +8,7 @@ Required properties:
- reg: memory window mapping address and length
- clocks: Input clock used to derive the sample clock. Expected to be the
SoC's APB clock.
+- resets: Reset controller phandle
- #io-channel-cells: Must be set to <1> to indicate channels are selected
by index.
@@ -15,6 +16,7 @@ Example:
adc@1e6e9000 {
compatible = "aspeed,ast2400-adc";
reg = <0x1e6e9000 0xb0>;
- clocks = <&clk_apb>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_ADC>;
#io-channel-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
index 552e7a83951d..6469a4cd2a6d 100644
--- a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
@@ -17,6 +17,11 @@ Required properties:
This property uses the IRQ edge types values: IRQ_TYPE_EDGE_RISING ,
IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH
+Optional properties:
+ - dmas: Phandle to dma channel for the ADC.
+ - dma-names: Must be "rx" when dmas property is being used.
+ See ../../dma/dma.txt for details.
+
Example:
adc: adc@fc030000 {
@@ -31,4 +36,6 @@ adc: adc@fc030000 {
vddana-supply = <&vdd_3v3_lp_reg>;
vref-supply = <&vdd_3v3_lp_reg>;
atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>;
+ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(25))>;
+ dma-names = "rx";
}
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
new file mode 100644
index 000000000000..e9ebb8a20e0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
@@ -0,0 +1,13 @@
+Device-Tree bindings for sigma delta modulator
+
+Required properties:
+- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
+ as a generic SD modulator if modulator not specified in compatible list.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Example node:
+
+ ads1202: adc@0 {
+ compatible = "sd-modulator";
+ #io-channel-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index 48bfcaa3ffcd..e8bb8243e92c 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -62,6 +62,15 @@ Required properties:
- st,adc-channels: List of single-ended channels muxed for this ADC.
It can have up to 16 channels on stm32f4 or 20 channels on stm32h7, numbered
from 0 to 15 or 19 (resp. for in0..in15 or in0..in19).
+- st,adc-diff-channels: List of differential channels muxed for this ADC.
+ Depending on part used, some channels can be configured as differential
+ instead of single-ended (e.g. stm32h7). List here positive and negative
+ inputs pairs as <vinp vinn>, <vinp vinn>,... vinp and vinn are numbered
+ from 0 to 19 on stm32h7)
+ Note: At least one of "st,adc-channels" or "st,adc-diff-channels" is required.
+ Both properties can be used together. Some channels can be used as
+ single-ended and some other ones as differential (mixed). But channels
+ can't be configured both as single-ended and differential (invalid).
- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
Documentation/devicetree/bindings/iio/iio-bindings.txt
@@ -111,3 +120,18 @@ Example:
...
other adc child nodes follow...
};
+
+Example to setup:
+- channel 1 as single-ended
+- channels 2 & 3 as differential (with resp. 6 & 7 negative inputs)
+
+ adc: adc@40022000 {
+ compatible = "st,stm32h7-adc-core";
+ ...
+ adc1: adc@0 {
+ compatible = "st,stm32h7-adc";
+ ...
+ st,adc-channels = <1>;
+ st,adc-diff-channels = <2 6>, <3 7>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
new file mode 100644
index 000000000000..911492da48f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
@@ -0,0 +1,128 @@
+STMicroelectronics STM32 DFSDM ADC device driver
+
+
+STM32 DFSDM ADC is a sigma delta analog-to-digital converter dedicated to
+interface external sigma delta modulators to STM32 micro controllers.
+It is mainly targeted for:
+- Sigma delta modulators (motor control, metering...)
+- PDM microphones (audio digital microphone)
+
+It features up to 8 serial digital interfaces (SPI or Manchester) and
+up to 4 filters on stm32h7.
+
+Each child node match with a filter instance.
+
+Contents of a STM32 DFSDM root node:
+------------------------------------
+Required properties:
+- compatible: Should be "st,stm32h7-dfsdm".
+- reg: Offset and length of the DFSDM block register set.
+- clocks: IP and serial interfaces clocking. Should be set according
+ to rcc clock ID and "clock-names".
+- clock-names: Input clock name "dfsdm" must be defined,
+ "audio" is optional. If defined CLKOUT is based on the audio
+ clock, else "dfsdm" is used.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- spi-max-frequency: Requested only for SPI master mode.
+ SPI clock OUT frequency (Hz). This clock must be set according
+ to "clock" property. Frequency must be a multiple of the rcc
+ clock frequency. If not, SPI CLKOUT frequency will not be
+ accurate.
+
+Contents of a STM32 DFSDM child nodes:
+--------------------------------------
+
+Required properties:
+- compatible: Must be:
+ "st,stm32-dfsdm-adc" for sigma delta ADCs
+ "st,stm32-dfsdm-dmic" for audio digital microphone.
+- reg: Specifies the DFSDM filter instance used.
+- interrupts: IRQ lines connected to each DFSDM filter instance.
+- st,adc-channels: List of single-ended channels muxed for this ADC.
+ valid values:
+ "st,stm32h7-dfsdm" compatibility: 0 to 7.
+- st,adc-channel-names: List of single-ended channel names.
+- st,filter-order: SinC filter order from 0 to 5.
+ 0: FastSinC
+ [1-5]: order 1 to 5.
+ For audio purpose it is recommended to use order 3 to 5.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Required properties for "st,stm32-dfsdm-adc" compatibility:
+- io-channels: From common IIO binding. Used to pipe external sigma delta
+ modulator or internal ADC output to DFSDM channel.
+ This is not required for "st,stm32-dfsdm-pdm" compatibility as
+ PDM microphone is binded in Audio DT node.
+
+Required properties for "st,stm32-dfsdm-pdm" compatibility:
+- #sound-dai-cells: Must be set to 0.
+- dma: DMA controller phandle and DMA request line associated to the
+ filter instance (specified by the field "reg")
+- dma-names: Must be "rx"
+
+Optional properties:
+- st,adc-channel-types: Single-ended channel input type.
+ - "SPI_R": SPI with data on rising edge (default)
+ - "SPI_F": SPI with data on falling edge
+ - "MANCH_R": manchester codec, rising edge = logic 0
+ - "MANCH_F": manchester codec, falling edge = logic 1
+- st,adc-channel-clk-src: Conversion clock source.
+ - "CLKIN": external SPI clock (CLKIN x)
+ - "CLKOUT": internal SPI clock (CLKOUT) (default)
+ - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+ - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+
+- st,adc-alt-channel: Must be defined if two sigma delta modulator are
+ connected on same SPI input.
+ If not set, channel n is connected to SPI input n.
+ If set, channel n is connected to SPI input n + 1.
+
+- st,filter0-sync: Set to 1 to synchronize with DFSDM filter instance 0.
+ Used for multi microphones synchronization.
+
+Example of a sigma delta adc connected on DFSDM SPI port 0
+and a pdm microphone connected on DFSDM SPI port 1:
+
+ ads1202: simple_sd_adc@0 {
+ compatible = "ads1202";
+ #io-channel-cells = <1>;
+ };
+
+ dfsdm: dfsdm@40017000 {
+ compatible = "st,stm32h7-dfsdm";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DFSDM1_CK>;
+ clock-names = "dfsdm";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfsdm_adc0: filter@0 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <0>;
+ interrupts = <110>;
+ st,adc-channels = <0>;
+ st,adc-channel-names = "sd_adc0";
+ st,adc-channel-types = "SPI_F";
+ st,adc-channel-clk-src = "CLKOUT";
+ io-channels = <&ads1202 0>;
+ st,filter-order = <3>;
+ };
+ dfsdm_pdm1: filter@1 {
+ compatible = "st,stm32-dfsdm-dmic";
+ reg = <1>;
+ interrupts = <111>;
+ dmas = <&dmamux1 102 0x400 0x00>;
+ dma-names = "rx";
+ st,adc-channels = <1>;
+ st,adc-channel-names = "dmic1";
+ st,adc-channel-types = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,filter-order = <5>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/iio/health/max30102.txt b/Documentation/devicetree/bindings/iio/health/max30102.txt
index 8629c18b0e78..ef2ca0a0306f 100644
--- a/Documentation/devicetree/bindings/iio/health/max30102.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30102.txt
@@ -1,9 +1,11 @@
Maxim MAX30102 heart rate and pulse oximeter sensor
+Maxim MAX30105 optical particle-sensing module
* https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf
+* https://datasheets.maximintegrated.com/en/ds/MAX30105.pdf
Required properties:
- - compatible: must be "maxim,max30102"
+ - compatible: must be "maxim,max30102" or "maxim,max30105"
- reg: the I2C address of the sensor
- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
@@ -12,8 +14,10 @@ Required properties:
interrupt client node bindings.
Optional properties:
- - maxim,red-led-current-microamp: configuration for RED LED current
+ - maxim,red-led-current-microamp: configuration for red LED current
- maxim,ir-led-current-microamp: configuration for IR LED current
+ - maxim,green-led-current-microamp: configuration for green LED current
+ (max30105 only)
Note that each step is approximately 200 microamps, ranging from 0 uA to
50800 uA.
diff --git a/Documentation/devicetree/bindings/iio/light/uvis25.txt b/Documentation/devicetree/bindings/iio/light/uvis25.txt
new file mode 100644
index 000000000000..3041207e3f3c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/uvis25.txt
@@ -0,0 +1,23 @@
+* ST UVIS25 uv sensor
+
+Required properties:
+- compatible: should be "st,uvis25"
+- reg: i2c address of the sensor / spi cs line
+
+Optional properties:
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ. It should be configured with
+ flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
+ IRQ_TYPE_EDGE_FALLING.
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt
+ client node bindings.
+
+Example:
+
+uvis25@47 {
+ compatible = "st,uvis25";
+ reg = <0x47>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.txt b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
index 28e8bd8b7d64..4d3da9d91de4 100644
--- a/Documentation/devicetree/bindings/input/hid-over-i2c.txt
+++ b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
@@ -31,7 +31,7 @@ device-specific compatible properties, which should be used in addition to the
- vdd-supply: phandle of the regulator that provides the supply voltage.
- post-power-on-delay-ms: time required by the device after enabling its regulators
- before it is ready for communication. Must be used with 'vdd-supply'.
+ or powering it on, before it is ready for communication.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
index f320dcd6e69b..8ced1696c325 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
@@ -12,7 +12,7 @@ Required properties:
registers
- interrupt-controller: Identifies the node as an interrupt controller
- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1
+ interrupt source. The value shall be 2
Please refer to interrupts.txt in this directory for details of the common
Interrupt Controllers bindings used by client devices.
@@ -32,6 +32,6 @@ local_intc: local_intc {
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
new file mode 100644
index 000000000000..35f752706e7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
@@ -0,0 +1,30 @@
+Android Goldfish PIC
+
+Android Goldfish programmable interrupt device used by Android
+emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-pic"
+- reg : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example for mips when used in cascade mode:
+
+ cpuintc {
+ #interrupt-cells = <0x1>;
+ #address-cells = <0>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ interrupt-controller@1f000000 {
+ compatible = "google,goldfish-pic";
+ reg = <0x1f000000 0x1000>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <0x2>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
new file mode 100644
index 000000000000..6c9074f84a51
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
@@ -0,0 +1,49 @@
+* Texas Instruments - LM3692x Highly Efficient White LED Driver
+
+The LM3692x is an ultra-compact, highly efficient,
+white-LED driver designed for LCD display backlighting.
+
+The main difference between the LM36922 and LM36923 is the number of
+LED strings it supports. The LM36922 supports two strings while the LM36923
+supports three strings.
+
+Required properties:
+ - compatible:
+ "ti,lm36922"
+ "ti,lm36923"
+ - reg : I2C slave address
+ - #address-cells : 1
+ - #size-cells : 0
+
+Optional properties:
+ - enable-gpios : gpio pin to enable/disable the device.
+ - vled-supply : LED supply
+
+Required child properties:
+ - reg : 0
+
+Optional child properties:
+ - label : see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+led-controller@36 {
+ compatible = "ti,lm3692x";
+ reg = <0x36>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+
+ led@0 {
+ reg = <0>;
+ label = "white:backlight_cluster";
+ linux,default-trigger = "backlight";
+ };
+}
+
+For more product information please see the link below:
+http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
diff --git a/Documentation/devicetree/bindings/leds/leds-lp8860.txt b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
index aad38dd94d4b..5f0e892ad759 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp8860.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
@@ -6,23 +6,39 @@ current sinks that can be controlled by a PWM input
signal, a SPI/I2C master, or both.
Required properties:
- - compatible:
+ - compatible :
"ti,lp8860"
- - reg - I2C slave address
- - label - Used for naming LEDs
+ - reg : I2C slave address
+ - #address-cells : 1
+ - #size-cells : 0
Optional properties:
- - enable-gpio - gpio pin to enable/disable the device.
- - supply - "vled" - LED supply
+ - enable-gpios : gpio pin to enable (active high)/disable the device.
+ - vled-supply : LED supply
+
+Required child properties:
+ - reg : 0
+
+Optional child properties:
+ - label : see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt
Example:
-leds: leds@6 {
+led-controller@2d {
compatible = "ti,lp8860";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x2d>;
- label = "display_cluster";
- enable-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
vled-supply = <&vbatt>;
+
+ led@0 {
+ reg = <0>;
+ label = "white:backlight";
+ linux,default-trigger = "backlight";
+ };
}
For more product information please see the link below:
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index ac235fe385fc..8261ea73278a 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -130,7 +130,7 @@ ecspi@70010000 { /* ECSPI1 */
#size-cells = <0>;
led-control = <0x000 0x000 0x0e0 0x000>;
- sysled {
+ sysled@3 {
reg = <3>;
label = "system:red:live";
linux,default-trigger = "heartbeat";
diff --git a/Documentation/devicetree/bindings/mfd/syscon.txt b/Documentation/devicetree/bindings/mfd/syscon.txt
index 8b92d4576c42..25d9e9c2fd53 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.txt
+++ b/Documentation/devicetree/bindings/mfd/syscon.txt
@@ -16,9 +16,17 @@ Required properties:
Optional property:
- reg-io-width: the size (in bytes) of the IO accesses that should be
performed on the device.
+- hwlocks: reference to a phandle of a hardware spinlock provider node.
Examples:
gpr: iomuxc-gpr@20e0000 {
compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
reg = <0x020e0000 0x38>;
+ hwlocks = <&hwlock1 1>;
+};
+
+hwlock1: hwspinlock@40500000 {
+ ...
+ reg = <0x40500000 0x1000>;
+ #hwlock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 72d2a734ab85..9b8017670870 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -12,6 +12,8 @@ Required properties:
"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
+ "mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC
+
- reg: physical base address of the controller and length
- interrupts: Should contain MSDC interrupt number
- clocks: Should contain phandle for the clock feeding the MMC controller
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 3c6762430fd9..d8685cb83325 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -26,6 +26,7 @@ Required properties:
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+ "renesas,sdhi-r8a77995" - SDHI IP on R8A77995 SoC
"renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
"renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
"renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index c34aa6f8a424..63d4d626fbd5 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -12,7 +12,7 @@ Required properties:
- reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
- interrupts : Should contain the interrupt for the device
- clocks : The clocks needed by the QuadSPI controller
- - clock-names : the name of the clocks
+ - clock-names : Should contain the name of the clocks: "qspi_en" and "qspi".
Optional properties:
- fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B.
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
index b6e8bfd024f4..e9f01a963a0a 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
@@ -9,13 +9,14 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Required properties:
+ - compatible: "ti,omap2-onenand"
- reg: The CS line the peripheral is connected to
- - gpmc,device-width Width of the ONENAND device connected to the GPMC
+ - gpmc,device-width: Width of the ONENAND device connected to the GPMC
in bytes. Must be 1 or 2.
Optional properties:
- - dma-channel: DMA Channel index
+ - int-gpios: GPIO specifier for the INT pin.
For inline partition table parsing (optional):
@@ -35,6 +36,7 @@ Example for an OMAP3430 board:
#size-cells = <1>;
onenand@0 {
+ compatible = "ti,omap2-onenand";
reg = <0 0 0>; /* CS0, offset 0 */
gpmc,device-width = <2>;
diff --git a/Documentation/devicetree/bindings/mtd/marvell-nand.txt b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
new file mode 100644
index 000000000000..c08fb477b3c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
@@ -0,0 +1,123 @@
+Marvell NAND Flash Controller (NFC)
+
+Required properties:
+- compatible: can be one of the following:
+ * "marvell,armada-8k-nand-controller"
+ * "marvell,armada370-nand-controller"
+ * "marvell,pxa3xx-nand-controller"
+ * "marvell,armada-8k-nand" (deprecated)
+ * "marvell,armada370-nand" (deprecated)
+ * "marvell,pxa3xx-nand" (deprecated)
+ Compatibles marked deprecated support only the old bindings described
+ at the bottom.
+- reg: NAND flash controller memory area.
+- #address-cells: shall be set to 1. Encode the NAND CS.
+- #size-cells: shall be set to 0.
+- interrupts: shall define the NAND controller interrupt.
+- clocks: shall reference the NAND controller clock.
+- marvell,system-controller: Set to retrieve the syscon node that handles
+ NAND controller related registers (only required with the
+ "marvell,armada-8k-nand[-controller]" compatibles).
+
+Optional properties:
+- label: see partition.txt. New platforms shall omit this property.
+- dmas: shall reference DMA channel associated to the NAND controller.
+ This property is only used with "marvell,pxa3xx-nand[-controller]"
+ compatible strings.
+- dma-names: shall be "rxtx".
+ This property is only used with "marvell,pxa3xx-nand[-controller]"
+ compatible strings.
+
+Optional children nodes:
+Children nodes represent the available NAND chips.
+
+Required properties:
+- reg: shall contain the native Chip Select ids (0-3).
+- nand-rb: see nand.txt (0-1).
+
+Optional properties:
+- marvell,nand-keep-config: orders the driver not to take the timings
+ from the core and leaving them completely untouched. Bootloader
+ timings will then be used.
+- label: MTD name.
+- nand-on-flash-bbt: see nand.txt.
+- nand-ecc-mode: see nand.txt. Will use hardware ECC if not specified.
+- nand-ecc-algo: see nand.txt. This property is essentially useful when
+ not using hardware ECC. Howerver, it may be added when using hardware
+ ECC for clarification but will be ignored by the driver because ECC
+ mode is chosen depending on the page size and the strength required by
+ the NAND chip. This value may be overwritten with nand-ecc-strength
+ property.
+- nand-ecc-strength: see nand.txt.
+- nand-ecc-step-size: see nand.txt. Marvell's NAND flash controller does
+ use fixed strength (1-bit for Hamming, 16-bit for BCH), so the actual
+ step size will shrink or grow in order to fit the required strength.
+ Step sizes are not completely random for all and follow certain
+ patterns described in AN-379, "Marvell SoC NFC ECC".
+
+See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+generic bindings.
+
+
+Example:
+nand_controller: nand-controller@d0000 {
+ compatible = "marvell,armada370-nand-controller";
+ reg = <0xd0000 0x54>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coredivclk 0>;
+
+ nand@0 {
+ reg = <0>;
+ label = "main-storage";
+ nand-rb = <0>;
+ nand-ecc-mode = "hw";
+ marvell,nand-keep-config;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "Rootfs";
+ reg = <0x00000000 0x40000000>;
+ };
+ };
+ };
+};
+
+
+Note on legacy bindings: One can find, in not-updated device trees,
+bindings slightly different than described above with other properties
+described below as well as the partitions node at the root of a so
+called "nand" node (without clear controller/chip separation).
+
+Legacy properties:
+- marvell,nand-enable-arbiter: To enable the arbiter, all boards blindly
+ used it, this bit was set by the bootloader for many boards and even if
+ it is marked reserved in several datasheets, it might be needed to set
+ it (otherwise it is harmless) so whether or not this property is set,
+ the bit is selected by the driver.
+- num-cs: Number of chip-select lines to use, all boards blindly set 1
+ to this and for a reason, other values would have failed. The value of
+ this property is ignored.
+
+Example:
+
+ nand0: nand@43100000 {
+ compatible = "marvell,pxa3xx-nand";
+ reg = <0x43100000 90>;
+ interrupts = <45>;
+ dmas = <&pdma 97 0>;
+ dma-names = "rxtx";
+ #address-cells = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ num-cs = <1>;
+ /* Partitions (optional) */
+ };
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index 0431841de781..1c88526dedfc 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -12,8 +12,10 @@ tree nodes.
The first part of NFC is NAND Controller Interface (NFI) HW.
Required NFI properties:
-- compatible: Should be one of "mediatek,mt2701-nfc",
- "mediatek,mt2712-nfc".
+- compatible: Should be one of
+ "mediatek,mt2701-nfc",
+ "mediatek,mt2712-nfc",
+ "mediatek,mt7622-nfc".
- reg: Base physical address and size of NFI.
- interrupts: Interrupts of NFI.
- clocks: NFI required clocks.
@@ -142,7 +144,10 @@ Example:
==============
Required BCH properties:
-- compatible: Should be one of "mediatek,mt2701-ecc", "mediatek,mt2712-ecc".
+- compatible: Should be one of
+ "mediatek,mt2701-ecc",
+ "mediatek,mt2712-ecc",
+ "mediatek,mt7622-ecc".
- reg: Base physical address and size of ECC.
- interrupts: Interrupts of ECC.
- clocks: ECC required clocks.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 133f3813719c..8bb11d809429 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -43,6 +43,7 @@ Optional NAND chip properties:
This is particularly useful when only the in-band area is
used by the upper layers, and you want to make your NAND
as reliable as possible.
+- nand-rb: shall contain the native Ready/Busy ids.
The ECC strength and ECC step size properties define the correction capability
of a controller. Together, they say a controller can correct "{strength} bit
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 9a734d808aa7..b7336b9d6a3c 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -2,7 +2,10 @@
Required properties:
-- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
+- compatible: should be one of
+ "brcm,bcm7445-switch-v4.0"
+ "brcm,bcm7278-switch-v4.0"
+ "brcm,bcm7278-switch-v4.8"
- reg: addresses and length of the register sets for the device, must be 6
pairs of register addresses and lengths
- interrupts: interrupts for the devices, must be two interrupts
diff --git a/Documentation/devicetree/bindings/net/can/can-transceiver.txt b/Documentation/devicetree/bindings/net/can/can-transceiver.txt
new file mode 100644
index 000000000000..0011f53ff159
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/can-transceiver.txt
@@ -0,0 +1,24 @@
+Generic CAN transceiver Device Tree binding
+------------------------------
+
+CAN transceiver typically limits the max speed in standard CAN and CAN FD
+modes. Typically these limitations are static and the transceivers themselves
+provide no way to detect this limitation at runtime. For this situation,
+the "can-transceiver" node can be used.
+
+Required Properties:
+ max-bitrate: a positive non 0 value that determines the max
+ speed that CAN/CAN-FD can run. Any other value
+ will be ignored.
+
+Examples:
+
+Based on Texas Instrument's TCAN1042HGV CAN Transceiver
+
+m_can0 {
+ ....
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+ ...
+};
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index 56d6cc336e1c..bfc0c433654f 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -18,6 +18,12 @@ Optional properties:
- xceiver-supply: Regulator that powers the CAN transceiver
+- big-endian: This means the registers of FlexCAN controller are big endian.
+ This is optional property.i.e. if this property is not present in
+ device tree node then controller is assumed to be little endian.
+ if this property is present then controller is assumed to be big
+ endian.
+
Example:
can@1c000 {
diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt
index 63e90421d029..ed614383af9c 100644
--- a/Documentation/devicetree/bindings/net/can/m_can.txt
+++ b/Documentation/devicetree/bindings/net/can/m_can.txt
@@ -43,6 +43,11 @@ Required properties:
Please refer to 2.4.1 Message RAM Configuration in
Bosch M_CAN user manual for details.
+Optional Subnode:
+- can-transceiver : Can-transceiver subnode describing maximum speed
+ that can be used for CAN/CAN-FD modes. See
+ Documentation/devicetree/bindings/net/can/can-transceiver.txt
+ for details.
Example:
SoC dtsi:
m_can1: can@20e8000 {
@@ -63,4 +68,8 @@ Board dts:
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_m_can1>;
status = "enabled";
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 06bb7cc334c8..94a7f33ac5e9 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -2,7 +2,9 @@ Renesas R-Car CAN controller Device Tree Bindings
-------------------------------------------------
Required properties:
-- compatible: "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
+- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
+ "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
+ "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
"renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC.
@@ -12,7 +14,8 @@ Required properties:
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
"renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
- "renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device.
+ "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
+ compatible device.
"renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
new file mode 100644
index 000000000000..6c559981d110
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
@@ -0,0 +1,92 @@
+Cortina Systems Gemini Ethernet Controller
+==========================================
+
+This ethernet controller is found in the Gemini SoC family:
+StorLink SL3512 and SL3516, also known as Cortina Systems
+CS3512 and CS3516.
+
+Required properties:
+- compatible: must be "cortina,gemini-ethernet"
+- reg: must contain the global registers and the V-bit and A-bit
+ memory areas, in total three register sets.
+- syscon: a phandle to the system controller
+- #address-cells: must be specified, must be <1>
+- #size-cells: must be specified, must be <1>
+- ranges: should be state like this giving a 1:1 address translation
+ for the subnodes
+
+The subnodes represents the two ethernet ports in this device.
+They are not independent of each other since they share resources
+in the parent node, and are thus children.
+
+Required subnodes:
+- port0: contains the resources for ethernet port 0
+- port1: contains the resources for ethernet port 1
+
+Required subnode properties:
+- compatible: must be "cortina,gemini-ethernet-port"
+- reg: must contain two register areas: the DMA/TOE memory and
+ the GMAC memory area of the port
+- interrupts: should contain the interrupt line of the port.
+ this is nominally a level interrupt active high.
+- resets: this must provide an SoC-integrated reset line for
+ the port.
+- clocks: this should contain a handle to the PCLK clock for
+ clocking the silicon in this port
+- clock-names: must be "PCLK"
+
+Optional subnode properties:
+- phy-mode: see ethernet.txt
+- phy-handle: see ethernet.txt
+
+Example:
+
+mdio-bus {
+ (...)
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@3 {
+ reg = <3>;
+ device_type = "ethernet-phy";
+ };
+};
+
+
+ethernet@60000000 {
+ compatible = "cortina,gemini-ethernet";
+ reg = <0x60000000 0x4000>, /* Global registers, queue */
+ <0x60004000 0x2000>, /* V-bit */
+ <0x60006000 0x2000>; /* A-bit */
+ syscon = <&syscon>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gmac0: ethernet-port@0 {
+ compatible = "cortina,gemini-ethernet-port";
+ reg = <0x60008000 0x2000>, /* Port 0 DMA/TOE */
+ <0x6000a000 0x2000>; /* Port 0 GMAC */
+ interrupt-parent = <&intcon>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&syscon GEMINI_RESET_GMAC0>;
+ clocks = <&syscon GEMINI_CLK_GATE_GMAC0>;
+ clock-names = "PCLK";
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ };
+
+ gmac1: ethernet-port@1 {
+ compatible = "cortina,gemini-ethernet-port";
+ reg = <0x6000c000 0x2000>, /* Port 1 DMA/TOE */
+ <0x6000e000 0x2000>; /* Port 1 GMAC */
+ interrupt-parent = <&intcon>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&syscon GEMINI_RESET_GMAC1>;
+ clocks = <&syscon GEMINI_CLK_GATE_GMAC1>;
+ clock-names = "PCLK";
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index f0dc94409107..2d41fb96ce0a 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -59,7 +59,7 @@ ethernet@83fec000 {
reg = <0x83fec000 0x4000>;
interrupts = <87>;
phy-mode = "mii";
- phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+ phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
local-mac-address = [00 04 9F 01 1B B9];
phy-supply = <&reg_fec_supply>;
};
@@ -71,7 +71,7 @@ ethernet@83fec000 {
reg = <0x83fec000 0x4000>;
interrupts = <87>;
phy-mode = "mii";
- phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+ phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
local-mac-address = [00 04 9F 01 1B B9];
phy-supply = <&reg_fec_supply>;
phy-handle = <&ethphy>;
diff --git a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
index dea5124cdc52..d24172cc6d32 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
@@ -1,7 +1,7 @@
* ADF7242 IEEE 802.15.4 *
Required properties:
- - compatible: should be "adi,adf7242"
+ - compatible: should be "adi,adf7242", "adi,adf7241"
- spi-max-frequency: maximal bus speed (12.5 MHz)
- reg: the chipselect index
- interrupts: the interrupt generated by the device via pin IRQ1.
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 214eaa9a6683..53c13ee384a4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -28,7 +28,7 @@ Required properties:
- mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
which is required for those SoCs equipped with SGMII such as MT7622 SoC.
- mediatek,pctl: phandle to the syscon node that handles the ports slew rate
- and driver current
+ and driver current: only for MT2701 and MT7623 SoC
Optional properties:
- interrupt-parent: Should be the phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 77d0b2a61ffa..d2169a56f5e3 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -53,6 +53,14 @@ Optional Properties:
to ensure the integrated PHY is used. The absence of this property indicates
the muxers should be configured so that the external PHY is used.
+- reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
+
+- reset-assert-us: Delay after the reset was asserted in microseconds.
+ If this property is missing the delay will be skipped.
+
+- reset-deassert-us: Delay after the reset was deasserted in microseconds.
+ If this property is missing the delay will be skipped.
+
Example:
ethernet-phy@0 {
@@ -60,4 +68,8 @@ ethernet-phy@0 {
interrupt-parent = <&PIC>;
interrupts = <35 IRQ_TYPE_EDGE_RISING>;
reg = <0>;
+
+ reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <2000>;
};
diff --git a/Documentation/devicetree/bindings/net/sff,sfp.txt b/Documentation/devicetree/bindings/net/sff,sfp.txt
index 60e970ce10ee..f1c441bedf68 100644
--- a/Documentation/devicetree/bindings/net/sff,sfp.txt
+++ b/Documentation/devicetree/bindings/net/sff,sfp.txt
@@ -3,7 +3,9 @@ Transceiver
Required properties:
-- compatible : must be "sff,sfp"
+- compatible : must be one of
+ "sff,sfp" for SFP modules
+ "sff,sff" for soldered down SFF modules
Optional Properties:
@@ -11,7 +13,8 @@ Optional Properties:
interface
- mod-def0-gpios : GPIO phandle and a specifier of the MOD-DEF0 (AKA Mod_ABS)
- module presence input gpio signal, active (module absent) high
+ module presence input gpio signal, active (module absent) high. Must
+ not be present for SFF modules
- los-gpios : GPIO phandle and a specifier of the Receiver Loss of Signal
Indication input gpio signal, active (signal lost) high
@@ -24,10 +27,11 @@ Optional Properties:
- rate-select0-gpios : GPIO phandle and a specifier of the Rx Signaling Rate
Select (AKA RS0) output gpio signal, low: low Rx rate, high: high Rx rate
+ Must not be present for SFF modules
- rate-select1-gpios : GPIO phandle and a specifier of the Tx Signaling Rate
Select (AKA RS1) output gpio signal (SFP+ only), low: low Tx rate, high:
- high Tx rate
+ high Tx rate. Must not be present for SFF modules
Example #1: Direct serdes to SFP connection
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
new file mode 100644
index 000000000000..270ea4efff13
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
@@ -0,0 +1,48 @@
+* Socionext AVE ethernet controller
+
+This describes the devicetree bindings for AVE ethernet controller
+implemented on Socionext UniPhier SoCs.
+
+Required properties:
+ - compatible: Should be
+ - "socionext,uniphier-pro4-ave4" : for Pro4 SoC
+ - "socionext,uniphier-pxs2-ave4" : for PXs2 SoC
+ - "socionext,uniphier-ld11-ave4" : for LD11 SoC
+ - "socionext,uniphier-ld20-ave4" : for LD20 SoC
+ - reg: Address where registers are mapped and size of region.
+ - interrupts: Should contain the MAC interrupt.
+ - phy-mode: See ethernet.txt in the same directory. Allow to choose
+ "rgmii", "rmii", or "mii" according to the PHY.
+ - phy-handle: Should point to the external phy device.
+ See ethernet.txt file in the same directory.
+ - clocks: A phandle to the clock for the MAC.
+
+Optional properties:
+ - resets: A phandle to the reset control for the MAC.
+ - local-mac-address: See ethernet.txt in the same directory.
+
+Required subnode:
+ - mdio: A container for child nodes representing phy nodes.
+ See phy.txt in the same directory.
+
+Example:
+
+ ether: ethernet@65000000 {
+ compatible = "socionext,uniphier-ld20-ave4";
+ reg = <0x65000000 0x8500>;
+ interrupts = <0 66 4>;
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy>;
+ clocks = <&sys_clk 6>;
+ resets = <&sys_rst 6>;
+ local-mac-address = [00 00 00 00 00 00];
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethphy@1 {
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
new file mode 100644
index 000000000000..0cff94fb0433
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
@@ -0,0 +1,53 @@
+* Socionext NetSec Ethernet Controller IP
+
+Required properties:
+- compatible: Should be "socionext,synquacer-netsec"
+- reg: Address and length of the control register area, followed by the
+ address and length of the EEPROM holding the MAC address and
+ microengine firmware
+- interrupts: Should contain ethernet controller interrupt
+- clocks: phandle to the PHY reference clock
+- clock-names: Should be "phy_ref_clk"
+- phy-mode: See ethernet.txt file in the same directory
+- phy-handle: See ethernet.txt in the same directory.
+
+- mdio device tree subnode: When the Netsec has a phy connected to its local
+ mdio, there must be device tree subnode with the following
+ required properties:
+
+ - #address-cells: Must be <1>.
+ - #size-cells: Must be <0>.
+
+ For each phy on the mdio bus, there must be a node with the following
+ fields:
+ - compatible: Refer to phy.txt
+ - reg: phy id used to communicate to phy.
+
+Optional properties: (See ethernet.txt file in the same directory)
+- dma-coherent: Boolean property, must only be present if memory
+ accesses performed by the device are cache coherent.
+- local-mac-address: See ethernet.txt in the same directory.
+- mac-address: See ethernet.txt in the same directory.
+- max-speed: See ethernet.txt in the same directory.
+- max-frame-size: See ethernet.txt in the same directory.
+
+Example:
+ eth0: ethernet@522d0000 {
+ compatible = "socionext,synquacer-netsec";
+ reg = <0 0x522d0000 0x0 0x10000>, <0 0x10000000 0x0 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_netsec>;
+ clock-names = "phy_ref_clk";
+ phy-mode = "rgmii";
+ max-speed = <1000>;
+ max-frame-size = <9000>;
+ phy-handle = <&phy1>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
index 1649c1f66b07..6d03ff8c7068 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
@@ -1,10 +1,18 @@
-TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
+Texas Instruments Bluetooth Chips
+---------------------------------
+
+This documents the binding structure and common properties for serial
+attached TI Bluetooth devices. The following chips are included in this
+binding:
+
+* TI CC256x Bluetooth devices
+* TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
TI WiLink devices have a UART interface for providing Bluetooth, FM radio,
and GPS over what's called "shared transport". The shared transport is
standard BT HCI protocol with additional channels for the other functions.
-These devices also have a separate WiFi interface as described in
+TI WiLink devices also have a separate WiFi interface as described in
wireless/ti,wlcore.txt.
This bindings follows the UART slave device binding in
@@ -12,6 +20,7 @@ This bindings follows the UART slave device binding in
Required properties:
- compatible: should be one of the following:
+ "ti,cc2560"
"ti,wl1271-st"
"ti,wl1273-st"
"ti,wl1281-st"
@@ -32,6 +41,9 @@ Optional properties:
See ../clocks/clock-bindings.txt for details.
- clock-names : Must include the following entry:
"ext_clock" (External clock provided to the TI combo chip).
+ - nvmem-cells: phandle to nvmem data cell that contains a 6 byte BD address
+ with the most significant byte first (big-endian).
+ - nvmem-cell-names: "bd-address" (required when nvmem-cells is specified)
Example:
@@ -43,5 +55,7 @@ Example:
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
clocks = <&clk32k_wl18xx>;
clock-names = "ext_clock";
+ nvmem-cells = <&bd_address>;
+ nvmem-cell-names = "bd-address";
};
};
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
new file mode 100644
index 000000000000..0c17a0ec9b7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -0,0 +1,32 @@
+* MediaTek mt76xx devices
+
+This node provides properties for configuring the MediaTek mt76xx wireless
+device. The node is expected to be specified as a child node of the PCI
+controller to which the wireless chip is connected.
+
+Optional properties:
+
+- mac-address: See ethernet.txt in the parent directory
+- local-mac-address: See ethernet.txt in the parent directory
+- ieee80211-freq-limit: See ieee80211.txt
+- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+
+Optional nodes:
+- led: Properties for a connected LED
+ Optional properties:
+ - led-sources: See Documentation/devicetree/bindings/leds/common.txt
+
+&pcie {
+ pcie0 {
+ wifi@0,0 {
+ compatible = "mediatek,mt76";
+ reg = <0x0000 0 0 0 0>;
+ ieee80211-freq-limit = <5000000 6000000>;
+ mediatek,mtd-eeprom = <&factory 0x8000>;
+
+ led {
+ led-sources = <2>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 74d7f0af209c..3d2a031217da 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -41,6 +41,9 @@ Optional properties:
- qcom,msi_addr: MSI interrupt address.
- qcom,msi_base: Base value to add before writing MSI data into
MSI address register.
+- qcom,ath10k-calibration-variant: string to search for in the board-2.bin
+ variant list with the same bus and device
+ specific ids
- qcom,ath10k-calibration-data : calibration data + board specific data
as an array, the length can vary between
hw versions.
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 9d733af26be7..4e4f30288c8b 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -45,6 +45,11 @@ Devices supporting OPPs must set their "operating-points-v2" property with
phandle to a OPP table in their DT node. The OPP core will use this phandle to
find the operating points for the device.
+This can contain more than one phandle for power domain providers that provide
+multiple power domains. That is, one phandle for each power domain. If only one
+phandle is available, then the same OPP table will be used for all power domains
+provided by the power domain provider.
+
If required, this can be extended for SoC vendor specific bindings. Such bindings
should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
and should have a compatible description like: "operating-points-v2-<vendor>".
@@ -154,6 +159,14 @@ Optional properties:
- status: Marks the node enabled/disabled.
+- required-opp: This contains phandle to an OPP node in another device's OPP
+ table. It may contain an array of phandles, where each phandle points to an
+ OPP of a different device. It should not contain multiple phandles to the OPP
+ nodes in the same OPP table. This specifies the minimum required OPP of the
+ device(s), whose OPP's phandle is present in this property, for the
+ functioning of the current device at the current OPP (where this property is
+ present).
+
Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
/ {
diff --git a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
new file mode 100644
index 000000000000..832346e489a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
@@ -0,0 +1,63 @@
+Texas Instruments OMAP compatible OPP supply description
+
+OMAP5, DRA7, and AM57 family of SoCs have Class0 AVS eFuse registers which
+contain data that can be used to adjust voltages programmed for some of their
+supplies for more efficient operation. This binding provides the information
+needed to read these values and use them to program the main regulator during
+an OPP transitions.
+
+Also, some supplies may have an associated vbb-supply which is an Adaptive Body
+Bias regulator which much be transitioned in a specific sequence with regards
+to the vdd-supply and clk when making an OPP transition. By supplying two
+regulators to the device that will undergo OPP transitions we can make use
+of the multi regulator binding that is part of the OPP core described here [1]
+to describe both regulators needed by the platform.
+
+[1] Documentation/devicetree/bindings/opp/opp.txt
+
+Required Properties for Device Node:
+- vdd-supply: phandle to regulator controlling VDD supply
+- vbb-supply: phandle to regulator controlling Body Bias supply
+ (Usually Adaptive Body Bias regulator)
+
+Required Properties for opp-supply node:
+- compatible: Should be one of:
+ "ti,omap-opp-supply" - basic OPP supply controlling VDD and VBB
+ "ti,omap5-opp-supply" - OMAP5+ optimized voltages in efuse(class0)VDD
+ along with VBB
+ "ti,omap5-core-opp-supply" - OMAP5+ optimized voltages in efuse(class0) VDD
+ but no VBB.
+- reg: Address and length of the efuse register set for the device (mandatory
+ only for "ti,omap5-opp-supply")
+- ti,efuse-settings: An array of u32 tuple items providing information about
+ optimized efuse configuration. Each item consists of the following:
+ volt: voltage in uV - reference voltage (OPP voltage)
+ efuse_offseet: efuse offset from reg where the optimized voltage is stored.
+- ti,absolute-max-voltage-uv: absolute maximum voltage for the OPP supply.
+
+Example:
+
+/* Device Node (CPU) */
+cpus {
+ cpu0: cpu@0 {
+ device_type = "cpu";
+
+ ...
+
+ vdd-supply = <&vcc>;
+ vbb-supply = <&abb_mpu>;
+ };
+};
+
+/* OMAP OPP Supply with Class0 registers */
+opp_supply_mpu: opp_supply@4a003b20 {
+ compatible = "ti,omap5-opp-supply";
+ reg = <0x4a003b20 0x8>;
+ ti,efuse-settings = <
+ /* uV offset */
+ 1060000 0x0
+ 1160000 0x4
+ 1210000 0x8
+ >;
+ ti,absolute-max-voltage-uv = <1500000>;
+};
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 14bd9e945ff6..f3355313c020 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -40,6 +40,12 @@ Optional properties:
domain's idle states. In the absence of this property, the domain would be
considered as capable of being powered-on or powered-off.
+- operating-points-v2 : Phandles to the OPP tables of power domains provided by
+ a power domain provider. If the provider provides a single power domain only
+ or all the power domains provided by the provider have identical OPP tables,
+ then this shall contain a single phandle. Refer to ../opp/opp.txt for more
+ information.
+
Example:
power: power-controller@12340000 {
@@ -120,4 +126,63 @@ The node above defines a typical PM domain consumer device, which is located
inside a PM domain with index 0 of a power controller represented by a node
with the label "power".
+Optional properties:
+- required-opp: This contains phandle to an OPP node in another device's OPP
+ table. It may contain an array of phandles, where each phandle points to an
+ OPP of a different device. It should not contain multiple phandles to the OPP
+ nodes in the same OPP table. This specifies the minimum required OPP of the
+ device(s), whose OPP's phandle is present in this property, for the
+ functioning of the current device at the current OPP (where this property is
+ present).
+
+Example:
+- OPP table for domain provider that provides two domains.
+
+ domain0_opp_table: opp-table0 {
+ compatible = "operating-points-v2";
+
+ domain0_opp_0: opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <975000 970000 985000>;
+ };
+ domain0_opp_1: opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ };
+ };
+
+ domain1_opp_table: opp-table1 {
+ compatible = "operating-points-v2";
+
+ domain1_opp_0: opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <975000 970000 985000>;
+ };
+ domain1_opp_1: opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ };
+ };
+
+ power: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&domain0_opp_table>, <&domain1_opp_table>;
+ };
+
+ leaky-device0@12350000 {
+ compatible = "foo,i-leak-current";
+ reg = <0x12350000 0x1000>;
+ power-domains = <&power 0>;
+ required-opp = <&domain0_opp_0>;
+ };
+
+ leaky-device1@12350000 {
+ compatible = "foo,i-leak-current";
+ reg = <0x12350000 0x1000>;
+ power-domains = <&power 1>;
+ required-opp = <&domain1_opp_1>;
+ };
+
[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
diff --git a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
deleted file mode 100644
index 1b81fcd9fb72..000000000000
--- a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-i.mx6 Poweroff Driver
-
-SNVS_LPCR in SNVS module can power off the whole system by pull
-PMIC_ON_REQ low if PMIC_ON_REQ is connected with external PMIC.
-If you don't want to use PMIC_ON_REQ as power on/off control,
-please set status='disabled' to disable this driver.
-
-Required Properties:
--compatible: "fsl,sec-v4.0-poweroff"
--reg: Specifies the physical address of the SNVS_LPCR register
-
-Example:
- snvs@20cc000 {
- compatible = "fsl,sec-v4.0-mon", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x020cc000 0x4000>;
- .....
- snvs_poweroff: snvs-poweroff@38 {
- compatible = "fsl,sec-v4.0-poweroff";
- reg = <0x38 0x4>;
- };
- }
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index 6858e1a804ad..615c1cb6889f 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -15,6 +15,7 @@ Required properties:
* "ti,bq27520g2" - BQ27520-g2
* "ti,bq27520g3" - BQ27520-g3
* "ti,bq27520g4" - BQ27520-g4
+ * "ti,bq27521" - BQ27521
* "ti,bq27530" - BQ27530
* "ti,bq27531" - BQ27531
* "ti,bq27541" - BQ27541
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
index 4ccb2cd5df94..d096cf461d81 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
@@ -195,4 +195,4 @@ External interrupts:
fsl,mpc5200-mscan nodes
-----------------------
-See file can.txt in this directory.
+See file Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 3cbf56ce66ea..2babe15b618d 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -42,8 +42,16 @@ Optional properties:
- regulator-state-[mem/disk] node has following common properties:
- regulator-on-in-suspend: regulator should be on in suspend state.
- regulator-off-in-suspend: regulator should be off in suspend state.
- - regulator-suspend-microvolt: regulator should be set to this voltage
- in suspend.
+ - regulator-suspend-min-microvolt: minimum voltage may be set in
+ suspend state.
+ - regulator-suspend-max-microvolt: maximum voltage may be set in
+ suspend state.
+ - regulator-suspend-microvolt: the default voltage which regulator
+ would be set in suspend. This property is now deprecated, instead
+ setting voltage for suspend mode via the API which regulator
+ driver provides is recommended.
+ - regulator-changeable-in-suspend: whether the default voltage and
+ the regulator on/off in suspend can be changed in runtime.
- regulator-mode: operating mode in the given suspend state.
The set of possible operating modes depends on the capabilities of
every hardware so the valid modes are documented on each regulator
diff --git a/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
new file mode 100644
index 000000000000..63dc07877cd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
@@ -0,0 +1,43 @@
+Spreadtrum SC2731 Voltage regulators
+
+The SC2731 integrates low-voltage and low quiescent current DCDC/LDO.
+14 LDO and 3 DCDCs are designed for external use. All DCDCs/LDOs have
+their own bypass (power-down) control signals. External tantalum or MLCC
+ceramic capacitors are recommended to use with these LDOs.
+
+Required properties:
+ - compatible: should be "sprd,sc27xx-regulator".
+
+List of regulators provided by this controller. It is named according to
+its regulator type, BUCK_<name> and LDO_<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are:
+BUCK:
+ BUCK_CPU0, BUCK_CPU1, BUCK_RF
+LDO:
+ LDO_CAMA0, LDO_CAMA1, LDO_CAMMOT, LDO_VLDO, LDO_EMMCCORE, LDO_SDCORE,
+ LDO_SDIO, LDO_WIFIPA, LDO_USB33, LDO_CAMD0, LDO_CAMD1, LDO_CON,
+ LDO_CAMIO, LDO_SRAM
+
+Example:
+ regulators {
+ compatible = "sprd,sc27xx-regulator";
+
+ vddarm0: BUCK_CPU0 {
+ regulator-name = "vddarm0";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1996875>;
+ regulator-ramp-delay = <25000>;
+ regulator-always-on;
+ };
+
+ vddcama0: LDO_CAMA0 {
+ regulator-name = "vddcama0";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ regulator-enable-ramp-delay = <100>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
index 26542690b578..627b29531a32 100644
--- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
@@ -1,11 +1,19 @@
-BCM2835 Random number generator
+BCM2835/6368 Random number generator
Required properties:
-- compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or
- "brcm,bcm5301x-rng"
+- compatible : should be one of
+ "brcm,bcm2835-rng"
+ "brcm,bcm-nsp-rng"
+ "brcm,bcm5301x-rng" or
+ "brcm,bcm6368-rng"
- reg : Specifies base physical address and size of the registers.
+Optional properties:
+
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "ipsec" as a clock name
+
Example:
rng {
@@ -17,3 +25,11 @@ rng@18033000 {
compatible = "brcm,bcm-nsp-rng";
reg = <0x18033000 0x14>;
};
+
+random: rng@10004180 {
+ compatible = "brcm,bcm6368-rng";
+ reg = <0x10004180 0x14>;
+
+ clocks = <&periph_clk 18>;
+ clock-names = "ipsec";
+};
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt b/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
deleted file mode 100644
index 4b5ac600bfbd..000000000000
--- a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-BCM6368 Random number generator
-
-Required properties:
-
-- compatible : should be "brcm,bcm6368-rng"
-- reg : Specifies base physical address and size of the registers
-- clocks : phandle to clock-controller plus clock-specifier pair
-- clock-names : "ipsec" as a clock name
-
-Example:
- random: rng@10004180 {
- compatible = "brcm,bcm6368-rng";
- reg = <0x10004180 0x14>;
-
- clocks = <&periph_clk 18>;
- clock-names = "ipsec";
- };
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
index b6a869f97715..df3bef7998fa 100644
--- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -8,7 +8,10 @@ Main node required properties:
(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
(c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
- sas-addr : array of 8 bytes for host SAS address
- - reg : Address and length of the SAS register
+ - reg : Contains two regions. The first is the address and length of the SAS
+ register. The second is the address and length of CPLD register for
+ SGPIO control. The second is optional, and should be set only when
+ we use a CPLD for directly attached disk LED control.
- hisilicon,sas-syscon: phandle of syscon used for sas control
- ctrl-reset-reg : offset to controller reset register in ctrl reg
- ctrl-reset-sts-reg : offset to controller reset status register in ctrl reg
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 860a9559839a..afcfbc34e243 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -6,10 +6,10 @@ Required properties:
- interrupts : Should contain uart interrupt
Optional properties:
-- fsl,irda-mode : Indicate the uart supports irda mode
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
in DCE mode by default.
-- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
+- rs485-rts-delay, rs485-rts-active-low, rs485-rx-during-tx,
+ linux,rs485-enabled-at-boot-time: see rs485.txt
Please check Documentation/devicetree/bindings/serial/serial.txt
for the complete list of generic properties.
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index 59567b51cf09..6bd3f2e93d61 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
@@ -16,7 +16,8 @@ Required properties:
Optional properties:
- dmas: A list of two dma specifiers, one for each entry in dma-names.
- dma-names: should contain "tx" and "rx".
-- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
+- rs485-rts-delay, rs485-rts-active-low, rs485-rx-during-tx,
+ linux,rs485-enabled-at-boot-time: see rs485.txt
Note: Optional properties for DMA support. Write them both or both not.
diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.txt b/Documentation/devicetree/bindings/serial/ingenic,uart.txt
index 02cb7fe59cb7..c3c6406d5cfe 100644
--- a/Documentation/devicetree/bindings/serial/ingenic,uart.txt
+++ b/Documentation/devicetree/bindings/serial/ingenic,uart.txt
@@ -1,8 +1,12 @@
* Ingenic SoC UART
Required properties:
-- compatible : "ingenic,jz4740-uart", "ingenic,jz4760-uart",
- "ingenic,jz4775-uart" or "ingenic,jz4780-uart"
+- compatible : One of:
+ - "ingenic,jz4740-uart",
+ - "ingenic,jz4760-uart",
+ - "ingenic,jz4770-uart",
+ - "ingenic,jz4775-uart",
+ - "ingenic,jz4780-uart".
- reg : offset and length of the register set for the device.
- interrupts : should contain uart interrupt.
- clocks : phandles to the module & baud clocks.
diff --git a/Documentation/devicetree/bindings/serial/maxim,max310x.txt b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
index 83a919c241b0..823f77dd7978 100644
--- a/Documentation/devicetree/bindings/serial/maxim,max310x.txt
+++ b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
@@ -24,13 +24,27 @@ Optional properties:
1 = active low.
Example:
+
+/ {
+ clocks {
+ spi_uart_clk: osc_max14830 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <3686400>;
+ };
+
+ };
+};
+
+&spi0 {
max14830: max14830@0 {
compatible = "maxim,max14830";
reg = <0>;
- clocks = <&clk20m>;
+ clocks = <&spi_uart_clk>;
clock-names = "osc";
interrupt-parent = <&gpio3>;
- interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <2>;
};
+};
diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index d37fabe17bd1..2ae2fee7e023 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -1,13 +1,53 @@
-* Marvell UART : Non standard UART used in some of Marvell EBU SoCs (e.g., Armada-3700)
+* Marvell UART : Non standard UART used in some of Marvell EBU SoCs
+ e.g., Armada-3700.
Required properties:
-- compatible: "marvell,armada-3700-uart"
+- compatible:
+ - "marvell,armada-3700-uart" for the standard variant of the UART
+ (32 bytes FIFO, no DMA, level interrupts, 8-bit access to the
+ FIFO, baudrate limited to 230400).
+ - "marvell,armada-3700-uart-ext" for the extended variant of the
+ UART (128 bytes FIFO, DMA, front interrupts, 8-bit or 32-bit
+ accesses to the FIFO, baudrate unlimited by the dividers).
- reg: offset and length of the register set for the device.
-- interrupts: device interrupt
+- clocks: UART reference clock used to derive the baudrate. If no clock
+ is provided (possible only with the "marvell,armada-3700-uart"
+ compatible string for backward compatibility), it will only work
+ if the baudrate was initialized by the bootloader and no baudrate
+ change will then be possible.
+- interrupts:
+ - Must contain three elements for the standard variant of the IP
+ (marvell,armada-3700-uart): "uart-sum", "uart-tx" and "uart-rx",
+ respectively the UART sum interrupt, the UART TX interrupt and
+ UART RX interrupt. A corresponding interrupt-names property must
+ be defined.
+ - Must contain two elements for the extended variant of the IP
+ (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
+ respectively the UART TX interrupt and the UART RX interrupt. A
+ corresponding interrupts-names property must be defined.
+ - For backward compatibility reasons, a single element interrupts
+ property is also supported for the standard variant of the IP,
+ containing only the UART sum interrupt. This form is deprecated
+ and should no longer be used.
Example:
- serial@12000 {
+ uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
reg = <0x12000 0x200>;
- interrupts = <43>;
+ clocks = <&xtalclk>;
+ interrupts =
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart-sum", "uart-tx", "uart-rx";
+ };
+
+ uart1: serial@12200 {
+ compatible = "marvell,armada-3700-uart-ext";
+ reg = <0x12200 0x30>;
+ clocks = <&xtalclk>;
+ interrupts =
+ <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "uart-tx", "uart-rx";
};
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index 43eac675f21f..4b0f05adb228 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -20,6 +20,7 @@ Optional properties:
node and a DMA channel number.
- dma-names : "rx" for receive channel, "tx" for transmit channel.
- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
+- rs485-rts-active-high: drive RTS high when sending (default is low).
Example:
diff --git a/Documentation/devicetree/bindings/serial/rs485.txt b/Documentation/devicetree/bindings/serial/rs485.txt
index b8415936dfdb..b7c29f74ebb2 100644
--- a/Documentation/devicetree/bindings/serial/rs485.txt
+++ b/Documentation/devicetree/bindings/serial/rs485.txt
@@ -12,6 +12,7 @@ Optional properties:
* b is the delay between end of data sent and rts signal in milliseconds
it corresponds to the delay after sending data and actual release of the line.
If this property is not specified, <0 0> is assumed.
+- rs485-rts-active-low: drive RTS low when sending (default is high).
- linux,rs485-enabled-at-boot-time: empty property telling to enable the rs485
feature at boot time. It can be disabled later with proper ioctl.
- rs485-rx-during-tx: empty property that enables the receiving of data even
diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt
index 54c8ef6498a8..f7bf65611453 100644
--- a/Documentation/devicetree/bindings/sound/dmic.txt
+++ b/Documentation/devicetree/bindings/sound/dmic.txt
@@ -7,10 +7,12 @@ Required properties:
Optional properties:
- dmicen-gpios: GPIO specifier for dmic to control start and stop
+ - num-channels: Number of microphones on this DAI
Example node:
dmic_codec: dmic@0 {
compatible = "dmic-codec";
dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+ num-channels = <1>;
};
diff --git a/Documentation/devicetree/bindings/sound/max98373.txt b/Documentation/devicetree/bindings/sound/max98373.txt
new file mode 100644
index 000000000000..456cb1c59353
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98373.txt
@@ -0,0 +1,40 @@
+Maxim Integrated MAX98373 Speaker Amplifier
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98373"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+
+ - maxim,vmon-slot-no : slot number used to send voltage information
+ or in inteleave mode this will be used as
+ interleave slot.
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,imon-slot-no : slot number used to send current information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,spkfb-slot-no : slot number used to send speaker feedback information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,interleave-mode : For cases where a single combined channel
+ for the I/V sense data is not sufficient, the device can also be configured
+ to share a single data output channel on alternating frames.
+ In this configuration, the current and voltage data will be frame interleaved
+ on a single output channel.
+ Boolean, define to enable the interleave mode, Default : false
+
+Example:
+
+codec: max98373@31 {
+ compatible = "maxim,max98373";
+ reg = <0x31>;
+ maxim,vmon-slot-no = <0>;
+ maxim,imon-slot-no = <1>;
+ maxim,spkfb-slot-no = <2>;
+ maxim,interleave-mode;
+};
diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
index 77a57f84bed4..6df87b97f7cb 100644
--- a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
@@ -2,153 +2,143 @@ Mediatek AFE PCM controller for mt2701
Required properties:
- compatible = "mediatek,mt2701-audio";
-- reg: register location and size
- interrupts: should contain AFE and ASYS interrupts
- interrupt-names: should be "afe" and "asys"
- power-domains: should define the power domain
+- clocks: Must contain an entry for each entry in clock-names
+ See ../clocks/clock-bindings.txt for details
- clock-names: should have these clock names:
"infra_sys_audio_clk",
"top_audio_mux1_sel",
"top_audio_mux2_sel",
- "top_audio_mux1_div",
- "top_audio_mux2_div",
- "top_audio_48k_timing",
- "top_audio_44k_timing",
- "top_audpll_mux_sel",
- "top_apll_sel",
- "top_aud1_pll_98M",
- "top_aud2_pll_90M",
- "top_hadds2_pll_98M",
- "top_hadds2_pll_294M",
- "top_audpll",
- "top_audpll_d4",
- "top_audpll_d8",
- "top_audpll_d16",
- "top_audpll_d24",
- "top_audintbus_sel",
- "clk_26m",
- "top_syspll1_d4",
- "top_aud_k1_src_sel",
- "top_aud_k2_src_sel",
- "top_aud_k3_src_sel",
- "top_aud_k4_src_sel",
- "top_aud_k5_src_sel",
- "top_aud_k6_src_sel",
- "top_aud_k1_src_div",
- "top_aud_k2_src_div",
- "top_aud_k3_src_div",
- "top_aud_k4_src_div",
- "top_aud_k5_src_div",
- "top_aud_k6_src_div",
- "top_aud_i2s1_mclk",
- "top_aud_i2s2_mclk",
- "top_aud_i2s3_mclk",
- "top_aud_i2s4_mclk",
- "top_aud_i2s5_mclk",
- "top_aud_i2s6_mclk",
- "top_asm_m_sel",
- "top_asm_h_sel",
- "top_univpll2_d4",
- "top_univpll2_d2",
- "top_syspll_d5";
+ "top_audio_a1sys_hp",
+ "top_audio_a2sys_hp",
+ "i2s0_src_sel",
+ "i2s1_src_sel",
+ "i2s2_src_sel",
+ "i2s3_src_sel",
+ "i2s0_src_div",
+ "i2s1_src_div",
+ "i2s2_src_div",
+ "i2s3_src_div",
+ "i2s0_mclk_en",
+ "i2s1_mclk_en",
+ "i2s2_mclk_en",
+ "i2s3_mclk_en",
+ "i2so0_hop_ck",
+ "i2so1_hop_ck",
+ "i2so2_hop_ck",
+ "i2so3_hop_ck",
+ "i2si0_hop_ck",
+ "i2si1_hop_ck",
+ "i2si2_hop_ck",
+ "i2si3_hop_ck",
+ "asrc0_out_ck",
+ "asrc1_out_ck",
+ "asrc2_out_ck",
+ "asrc3_out_ck",
+ "audio_afe_pd",
+ "audio_afe_conn_pd",
+ "audio_a1sys_pd",
+ "audio_a2sys_pd",
+ "audio_mrgif_pd";
+- assigned-clocks: list of input clocks and dividers for the audio system.
+ See ../clocks/clock-bindings.txt for details.
+- assigned-clocks-parents: parent of input clocks of assigned clocks.
+- assigned-clock-rates: list of clock frequencies of assigned clocks.
+
+Must be a subnode of MediaTek audsys device tree node.
+See ../arm/mediatek/mediatek,audsys.txt for details about the parent node.
Example:
- afe: mt2701-afe-pcm@11220000 {
- compatible = "mediatek,mt2701-audio";
- reg = <0 0x11220000 0 0x2000>,
- <0 0x112A0000 0 0x20000>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
- interrupt-names = "afe", "asys";
- power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
- clocks = <&infracfg CLK_INFRA_AUDIO>,
- <&topckgen CLK_TOP_AUD_MUX1_SEL>,
- <&topckgen CLK_TOP_AUD_MUX2_SEL>,
- <&topckgen CLK_TOP_AUD_MUX1_DIV>,
- <&topckgen CLK_TOP_AUD_MUX2_DIV>,
- <&topckgen CLK_TOP_AUD_48K_TIMING>,
- <&topckgen CLK_TOP_AUD_44K_TIMING>,
- <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
- <&topckgen CLK_TOP_APLL_SEL>,
- <&topckgen CLK_TOP_AUD1PLL_98M>,
- <&topckgen CLK_TOP_AUD2PLL_90M>,
- <&topckgen CLK_TOP_HADDS2PLL_98M>,
- <&topckgen CLK_TOP_HADDS2PLL_294M>,
- <&topckgen CLK_TOP_AUDPLL>,
- <&topckgen CLK_TOP_AUDPLL_D4>,
- <&topckgen CLK_TOP_AUDPLL_D8>,
- <&topckgen CLK_TOP_AUDPLL_D16>,
- <&topckgen CLK_TOP_AUDPLL_D24>,
- <&topckgen CLK_TOP_AUDINTBUS_SEL>,
- <&clk26m>,
- <&topckgen CLK_TOP_SYSPLL1_D4>,
- <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
- <&topckgen CLK_TOP_ASM_M_SEL>,
- <&topckgen CLK_TOP_ASM_H_SEL>,
- <&topckgen CLK_TOP_UNIVPLL2_D4>,
- <&topckgen CLK_TOP_UNIVPLL2_D2>,
- <&topckgen CLK_TOP_SYSPLL_D5>;
+ audsys: audio-subsystem@11220000 {
+ compatible = "mediatek,mt2701-audsys", "syscon", "simple-mfd";
+ ...
+
+ afe: audio-controller {
+ compatible = "mediatek,mt2701-audio";
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "afe", "asys";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+ clocks = <&infracfg CLK_INFRA_AUDIO>,
+ <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_48K_TIMING>,
+ <&topckgen CLK_TOP_AUD_44K_TIMING>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+ <&audsys CLK_AUD_I2SO1>,
+ <&audsys CLK_AUD_I2SO2>,
+ <&audsys CLK_AUD_I2SO3>,
+ <&audsys CLK_AUD_I2SO4>,
+ <&audsys CLK_AUD_I2SIN1>,
+ <&audsys CLK_AUD_I2SIN2>,
+ <&audsys CLK_AUD_I2SIN3>,
+ <&audsys CLK_AUD_I2SIN4>,
+ <&audsys CLK_AUD_ASRCO1>,
+ <&audsys CLK_AUD_ASRCO2>,
+ <&audsys CLK_AUD_ASRCO3>,
+ <&audsys CLK_AUD_ASRCO4>,
+ <&audsys CLK_AUD_AFE>,
+ <&audsys CLK_AUD_AFE_CONN>,
+ <&audsys CLK_AUD_A1SYS>,
+ <&audsys CLK_AUD_A2SYS>,
+ <&audsys CLK_AUD_AFE_MRGIF>;
+
+ clock-names = "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_a1sys_hp",
+ "top_audio_a2sys_hp",
+ "i2s0_src_sel",
+ "i2s1_src_sel",
+ "i2s2_src_sel",
+ "i2s3_src_sel",
+ "i2s0_src_div",
+ "i2s1_src_div",
+ "i2s2_src_div",
+ "i2s3_src_div",
+ "i2s0_mclk_en",
+ "i2s1_mclk_en",
+ "i2s2_mclk_en",
+ "i2s3_mclk_en",
+ "i2so0_hop_ck",
+ "i2so1_hop_ck",
+ "i2so2_hop_ck",
+ "i2so3_hop_ck",
+ "i2si0_hop_ck",
+ "i2si1_hop_ck",
+ "i2si2_hop_ck",
+ "i2si3_hop_ck",
+ "asrc0_out_ck",
+ "asrc1_out_ck",
+ "asrc2_out_ck",
+ "asrc3_out_ck",
+ "audio_afe_pd",
+ "audio_afe_conn_pd",
+ "audio_a1sys_pd",
+ "audio_a2sys_pd",
+ "audio_mrgif_pd";
- clock-names = "infra_sys_audio_clk",
- "top_audio_mux1_sel",
- "top_audio_mux2_sel",
- "top_audio_mux1_div",
- "top_audio_mux2_div",
- "top_audio_48k_timing",
- "top_audio_44k_timing",
- "top_audpll_mux_sel",
- "top_apll_sel",
- "top_aud1_pll_98M",
- "top_aud2_pll_90M",
- "top_hadds2_pll_98M",
- "top_hadds2_pll_294M",
- "top_audpll",
- "top_audpll_d4",
- "top_audpll_d8",
- "top_audpll_d16",
- "top_audpll_d24",
- "top_audintbus_sel",
- "clk_26m",
- "top_syspll1_d4",
- "top_aud_k1_src_sel",
- "top_aud_k2_src_sel",
- "top_aud_k3_src_sel",
- "top_aud_k4_src_sel",
- "top_aud_k5_src_sel",
- "top_aud_k6_src_sel",
- "top_aud_k1_src_div",
- "top_aud_k2_src_div",
- "top_aud_k3_src_div",
- "top_aud_k4_src_div",
- "top_aud_k5_src_div",
- "top_aud_k6_src_div",
- "top_aud_i2s1_mclk",
- "top_aud_i2s2_mclk",
- "top_aud_i2s3_mclk",
- "top_aud_i2s4_mclk",
- "top_aud_i2s5_mclk",
- "top_aud_i2s6_mclk",
- "top_asm_m_sel",
- "top_asm_h_sel",
- "top_univpll2_d4",
- "top_univpll2_d2",
- "top_syspll_d5";
+ assigned-clocks = <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+ <&topckgen CLK_TOP_AUD_MUX2_DIV>;
+ assigned-clock-parents = <&topckgen CLK_TOP_AUD1PLL_98M>,
+ <&topckgen CLK_TOP_AUD2PLL_90M>;
+ assigned-clock-rates = <0>, <0>, <49152000>, <45158400>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt b/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
index 601c518eddaa..4eb980bd0287 100644
--- a/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
@@ -1,10 +1,31 @@
* Freescale MXS audio complex with SGTL5000 codec
Required properties:
-- compatible: "fsl,mxs-audio-sgtl5000"
-- model: The user-visible name of this sound complex
-- saif-controllers: The phandle list of the MXS SAIF controller
-- audio-codec: The phandle of the SGTL5000 audio codec
+- compatible : "fsl,mxs-audio-sgtl5000"
+- model : The user-visible name of this sound complex
+- saif-controllers : The phandle list of the MXS SAIF controller
+- audio-codec : The phandle of the SGTL5000 audio codec
+- audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, SGTL5000
+ pins, and the jacks on the board:
+
+ Power supplies:
+ * Mic Bias
+
+ SGTL5000 pins:
+ * MIC_IN
+ * LINE_IN
+ * HP_OUT
+ * LINE_OUT
+
+ Board connectors:
+ * Mic Jack
+ * Line In Jack
+ * Headphone Jack
+ * Line Out Jack
+ * Ext Spk
Example:
@@ -14,4 +35,8 @@ sound {
model = "imx28-evk-sgtl5000";
saif-controllers = <&saif0 &saif1>;
audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
};
diff --git a/Documentation/devicetree/bindings/sound/nau8825.txt b/Documentation/devicetree/bindings/sound/nau8825.txt
index 2f5e973285a6..d16d96839bcb 100644
--- a/Documentation/devicetree/bindings/sound/nau8825.txt
+++ b/Documentation/devicetree/bindings/sound/nau8825.txt
@@ -69,7 +69,7 @@ Optional properties:
- nuvoton,jack-insert-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- - nuvoton,crosstalk-bypass: make crosstalk function bypass if set.
+ - nuvoton,crosstalk-enable: make crosstalk function enable if set.
- clocks: list of phandle and clock specifier pairs according to common clock bindings for the
clocks described in clock-names
@@ -98,7 +98,7 @@ Example:
nuvoton,short-key-debounce = <2>;
nuvoton,jack-insert-debounce = <7>;
nuvoton,jack-eject-debounce = <7>;
- nuvoton,crosstalk-bypass;
+ nuvoton,crosstalk-enable;
clock-names = "mclk";
clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
diff --git a/Documentation/devicetree/bindings/sound/pcm186x.txt b/Documentation/devicetree/bindings/sound/pcm186x.txt
new file mode 100644
index 000000000000..1087f4855980
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm186x.txt
@@ -0,0 +1,42 @@
+Texas Instruments PCM186x Universal Audio ADC
+
+These devices support both I2C and SPI (configured with pin strapping
+on the board).
+
+Required properties:
+
+ - compatible : "ti,pcm1862",
+ "ti,pcm1863",
+ "ti,pcm1864",
+ "ti,pcm1865"
+
+ - reg : The I2C address of the device for I2C, the chip select
+ number for SPI.
+
+ - avdd-supply: Analog core power supply (3.3v)
+ - dvdd-supply: Digital core power supply
+ - iovdd-supply: Digital IO power supply
+ See regulator/regulator.txt for more information
+
+CODEC input pins:
+ * VINL1
+ * VINR1
+ * VINL2
+ * VINR2
+ * VINL3
+ * VINR3
+ * VINL4
+ * VINR4
+
+The pins can be used in referring sound node's audio-routing property.
+
+Example:
+
+ pcm186x: audio-codec@4a {
+ compatible = "ti,pcm1865";
+ reg = <0x4a>;
+
+ avdd-supply = <&reg_3v3_analog>;
+ dvdd-supply = <&reg_3v3>;
+ iovdd-supply = <&reg_1v8>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 085bec364caf..5bed9a595772 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -4,7 +4,7 @@ Renesas R-Car sound
* Modules
=============================================
-Renesas R-Car sound is constructed from below modules
+Renesas R-Car and RZ/G sound is constructed from below modules
(for Gen2 or later)
SCU : Sampling Rate Converter Unit
@@ -197,12 +197,17 @@ Ex)
[MEM] -> [SRC2] -> [CTU03] -+
sound {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
compatible = "simple-scu-audio-card";
...
- simple-audio-card,cpu-0 {
+ simple-audio-card,cpu@0 {
+ reg = <0>;
sound-dai = <&rcar_sound 0>;
};
- simple-audio-card,cpu-1 {
+ simple-audio-card,cpu@1 {
+ reg = <1>;
sound-dai = <&rcar_sound 1>;
};
simple-audio-card,codec {
@@ -334,9 +339,11 @@ Required properties:
- compatible : "renesas,rcar_sound-<soctype>", fallbacks
"renesas,rcar_sound-gen1" if generation1, and
- "renesas,rcar_sound-gen2" if generation2
+ "renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
"renesas,rcar_sound-gen3" if generation3
Examples with soctypes are:
+ - "renesas,rcar_sound-r8a7743" (RZ/G1M)
+ - "renesas,rcar_sound-r8a7745" (RZ/G1E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
- "renesas,rcar_sound-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index 166f2290233b..17c13e74667d 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -140,6 +140,7 @@ sound {
simple-audio-card,name = "Cubox Audio";
simple-audio-card,dai-link@0 { /* I2S - HDMI */
+ reg = <0>;
format = "i2s";
cpu {
sound-dai = <&audio1 0>;
@@ -150,6 +151,7 @@ sound {
};
simple-audio-card,dai-link@1 { /* S/PDIF - HDMI */
+ reg = <1>;
cpu {
sound-dai = <&audio1 1>;
};
@@ -159,6 +161,7 @@ sound {
};
simple-audio-card,dai-link@2 { /* S/PDIF - S/PDIF */
+ reg = <2>;
cpu {
sound-dai = <&audio1 1>;
};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt b/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt
new file mode 100644
index 000000000000..864f5b00b031
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt
@@ -0,0 +1,63 @@
+STMicroelectronics Audio Digital Filter Sigma Delta modulators(DFSDM)
+
+The DFSDM allows PDM microphones capture through SPI interface. The Audio
+interface is seems as a sub block of the DFSDM device.
+For details on DFSDM bindings refer to ../iio/adc/st,stm32-dfsdm-adc.txt
+
+Required properties:
+ - compatible: "st,stm32h7-dfsdm-dai".
+
+ - #sound-dai-cells : Must be equal to 0
+
+ - io-channels : phandle to iio dfsdm instance node.
+
+Example of a sound card using audio DFSDM node.
+
+ sound_card {
+ compatible = "audio-graph-card";
+
+ dais = <&cpu_port>;
+ };
+
+ dfsdm: dfsdm@40017000 {
+ compatible = "st,stm32h7-dfsdm";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DFSDM1_CK>;
+ clock-names = "dfsdm";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfsdm_adc0: filter@0 {
+ compatible = "st,stm32-dfsdm-dmic";
+ reg = <0>;
+ interrupts = <110>;
+ dmas = <&dmamux1 101 0x400 0x00>;
+ dma-names = "rx";
+ st,adc-channels = <1>;
+ st,adc-channel-names = "dmic0";
+ st,adc-channel-types = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,filter-order = <5>;
+
+ dfsdm_dai0: dfsdm-dai {
+ compatible = "st,stm32h7-dfsdm-dai";
+ #sound-dai-cells = <0>;
+ io-channels = <&dfsdm_adc0 0>;
+ cpu_port: port {
+ dfsdm_endpoint: endpoint {
+ remote-endpoint = <&dmic0_endpoint>;
+ };
+ };
+ };
+ };
+
+ dmic0: dmic@0 {
+ compatible = "dmic-codec";
+ #sound-dai-cells = <0>;
+ port {
+ dmic0_endpoint: endpoint {
+ remote-endpoint = <&dfsdm_endpoint>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 1f9cd7095337..b1acc1a256ba 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -20,11 +20,6 @@ Required properties:
Optional properties:
- resets: Reference to a reset controller asserting the SAI
- - st,sync: specify synchronization mode.
- By default SAI sub-block is in asynchronous mode.
- This property sets SAI sub-block as slave of another SAI sub-block.
- Must contain the phandle and index of the sai sub-block providing
- the synchronization.
SAI subnodes:
Two subnodes corresponding to SAI sub-block instances A et B can be defined.
@@ -44,6 +39,13 @@ SAI subnodes required properties:
- pinctrl-names: should contain only value "default"
- pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+SAI subnodes Optional properties:
+ - st,sync: specify synchronization mode.
+ By default SAI sub-block is in asynchronous mode.
+ This property sets SAI sub-block as slave of another SAI sub-block.
+ Must contain the phandle and index of the sai sub-block providing
+ the synchronization.
+
The device node should contain one 'port' child node with one child 'endpoint'
node, according to the bindings defined in Documentation/devicetree/bindings/
graph.txt.
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index 05d7135a8d2f..b9d50d6cdef3 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible: should be one of the following:
- "allwinner,sun4i-a10-i2s"
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
- reg: physical base address of the controller and length of memory mapped
region.
@@ -23,6 +24,7 @@ Required properties:
Required properties for the following compatibles:
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
- resets: phandle to the reset line for this codec
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
index 40d94f82beb3..7481653fe8e3 100644
--- a/Documentation/devicetree/bindings/sound/tas5720.txt
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -6,10 +6,12 @@ audio playback. For more product information please see the links below:
http://www.ti.com/product/TAS5720L
http://www.ti.com/product/TAS5720M
+http://www.ti.com/product/TAS5722L
Required properties:
-- compatible : "ti,tas5720"
+- compatible : "ti,tas5720",
+ "ti,tas5722"
- reg : I2C slave address
- dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
- pvdd-supply : phandle to a supply used for the Class-D amp and the analog
diff --git a/Documentation/devicetree/bindings/sound/tfa9879.txt b/Documentation/devicetree/bindings/sound/tfa9879.txt
index 23ba522d9e2b..1620e6848436 100644
--- a/Documentation/devicetree/bindings/sound/tfa9879.txt
+++ b/Documentation/devicetree/bindings/sound/tfa9879.txt
@@ -6,18 +6,18 @@ Required properties:
- reg : the I2C address of the device
+- #sound-dai-cells : must be 0.
+
Example:
&i2c1 {
- clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- status = "okay";
- codec: tfa9879@6c {
+ amp: amp@6c {
#sound-dai-cells = <0>;
compatible = "nxp,tfa9879";
reg = <0x6c>;
- };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/ti,tas6424.txt b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
new file mode 100644
index 000000000000..1c4ada0eef4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
@@ -0,0 +1,20 @@
+Texas Instruments TAS6424 Quad-Channel Audio amplifier
+
+The TAS6424 serial control bus communicates through I2C protocols.
+
+Required properties:
+ - compatible: "ti,tas6424" - TAS6424
+ - reg: I2C slave address
+ - sound-dai-cells: must be equal to 0
+
+Example:
+
+tas6424: tas6424@6a {
+ compatible = "ti,tas6424";
+ reg = <0x6a>;
+
+ #sound-dai-cells = <0>;
+};
+
+For more product information please see the link below:
+http://www.ti.com/product/TAS6424-Q1
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 6fbba562eaa7..5b3c33bb99e5 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -22,7 +22,7 @@ Required properties:
Optional properties:
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
- ai31xx-micbias-vg - MicBias Voltage setting
1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
@@ -30,6 +30,10 @@ Optional properties:
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
CODEC output pins:
* HPL
* HPR
@@ -48,6 +52,7 @@ CODEC input pins:
The pins can be used in referring sound node's audio-routing property.
Example:
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/sound/tlv320aic31xx-micbias.h>
tlv320aic31xx: tlv320aic31xx@18 {
@@ -56,6 +61,8 @@ tlv320aic31xx: tlv320aic31xx@18 {
ai31xx-micbias-vg = <MICBIAS_OFF>;
+ reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
HPVDD-supply = <&regulator>;
SPRVDD-supply = <&regulator>;
SPLVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index ba5b45c483f5..9796c4639262 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -17,7 +17,7 @@ Required properties:
Optional properties:
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
- ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
- Not supported on tlv320aic3104
- ai3x-micbias-vg - MicBias Voltage required.
@@ -34,6 +34,10 @@ Optional properties:
- AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
CODEC output pins:
* LLOUT
* RLOUT
@@ -61,10 +65,14 @@ The pins can be used in referring sound node's audio-routing property.
Example:
+#include <dt-bindings/gpio/gpio.h>
+
tlv320aic3x: tlv320aic3x@1b {
compatible = "ti,tlv320aic3x";
reg = <0x1b>;
+ reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
AVDD-supply = <&regulator>;
IOVDD-supply = <&regulator>;
DRVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tscs42xx.txt b/Documentation/devicetree/bindings/sound/tscs42xx.txt
new file mode 100644
index 000000000000..2ac2f0996697
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tscs42xx.txt
@@ -0,0 +1,16 @@
+TSCS42XX Audio CODEC
+
+Required Properties:
+
+ - compatible : "tempo,tscs42A1" for analog mic
+ "tempo,tscs42A2" for digital mic
+
+ - reg : <0x71> for analog mic
+ <0x69> for digital mic
+
+Example:
+
+wookie: codec@69 {
+ compatible = "tempo,tscs42A2";
+ reg = <0x69>;
+};
diff --git a/Documentation/devicetree/bindings/sound/uniphier,evea.txt b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
new file mode 100644
index 000000000000..3f31b235f18b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
@@ -0,0 +1,26 @@
+Socionext EVEA - UniPhier SoC internal codec driver
+
+Required properties:
+- compatible : should be "socionext,uniphier-evea".
+- reg : offset and length of the register set for the device.
+- clock-names : should include following entries:
+ "evea", "exiv"
+- clocks : a list of phandle, should contain an entry for each
+ entries in clock-names.
+- reset-names : should include following entries:
+ "evea", "exiv", "adamv"
+- resets : a list of phandle, should contain reset entries of
+ reset-names.
+- #sound-dai-cells: should be 1.
+
+Example:
+
+ codec {
+ compatible = "socionext,uniphier-evea";
+ reg = <0x57900000 0x1000>;
+ clock-names = "evea", "exiv";
+ clocks = <&sys_clk 41>, <&sys_clk 42>;
+ reset-names = "evea", "exiv", "adamv";
+ resets = <&sys_rst 41>, <&sys_rst 42>, <&adamv_rst 0>;
+ #sound-dai-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index bdd83959019c..80710f0f0448 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -36,7 +36,21 @@ Required properties:
Optional properties:
- clocks : Must contain a reference to the functional clock.
-- num-cs : Total number of chip-selects (default is 1)
+- num-cs : Total number of chip selects (default is 1).
+ Up to 3 native chip selects are supported:
+ 0: MSIOF_SYNC
+ 1: MSIOF_SS1
+ 2: MSIOF_SS2
+ Hardware limitations related to chip selects:
+ - Native chip selects are always deasserted in
+ between transfers that are part of the same
+ message. Use cs-gpios to work around this.
+ - All slaves using native chip selects must use the
+ same spi-cs-high configuration. Use cs-gpios to
+ work around this.
+ - When using GPIO chip selects, at least one native
+ chip select must be left unused, as it will be
+ driven anyway.
- dmas : Must contain a list of two references to DMA
specifiers, one for transmission, and one for
reception.
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
index 825c39cae74a..b7f5e86fed22 100644
--- a/Documentation/devicetree/bindings/spi/spi-meson.txt
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -27,7 +27,9 @@ The Meson SPICC is generic SPI controller for general purpose Full-Duplex
communications with dedicated 16 words RX/TX PIO FIFOs.
Required properties:
- - compatible: should be "amlogic,meson-gx-spicc" on Amlogic GX SoCs.
+ - compatible: should be:
+ "amlogic,meson-gx-spicc" on Amlogic GX and compatible SoCs.
+ "amlogic,meson-axg-spicc" on Amlogic AXG and compatible SoCs
- reg: physical base address and length of the controller registers
- interrupts: The interrupt specifier
- clock-names: Must contain "core"
diff --git a/Documentation/devicetree/bindings/spi/spi-orion.txt b/Documentation/devicetree/bindings/spi/spi-orion.txt
index df8ec31f2f07..8434a65fc12a 100644
--- a/Documentation/devicetree/bindings/spi/spi-orion.txt
+++ b/Documentation/devicetree/bindings/spi/spi-orion.txt
@@ -18,8 +18,17 @@ Required properties:
The eight register sets following the control registers refer to
chip-select lines 0 through 7 respectively.
- cell-index : Which of multiple SPI controllers is this.
+- clocks : pointers to the reference clocks for this device, the first
+ one is the one used for the clock on the spi bus, the
+ second one is optional and is the clock used for the
+ functional part of the controller
+
Optional properties:
- interrupts : Is currently not used.
+- clock-names : names of used clocks, mandatory if the second clock is
+ used, the name must be "core", and "axi" (the latter
+ is only for Armada 7K/8K).
+
Example:
spi@10600 {
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index c7b7856bd528..7bf61efc66c8 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -2,7 +2,7 @@ Xilinx SPI controller Device Tree Bindings
-------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,xps-spi-2.00.a" or "xlnx,xps-spi-2.00.b"
+- compatible : Should be "xlnx,xps-spi-2.00.a", "xlnx,xps-spi-2.00.b" or "xlnx,axi-quad-spi-1.00.a"
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
diff --git a/Documentation/devicetree/bindings/timer/actions,owl-timer.txt b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
index e3c28da80cb2..977054f87563 100644
--- a/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
+++ b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -2,6 +2,7 @@ Actions Semi Owl Timer
Required properties:
- compatible : "actions,s500-timer" for S500
+ "actions,s700-timer" for S700
"actions,s900-timer" for S900
- reg : Offset and length of the register set for the device.
- interrupts : Should contain the interrupts.
diff --git a/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt b/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt
new file mode 100644
index 000000000000..6d97e7d0f6e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt
@@ -0,0 +1,20 @@
+Spreadtrum timers
+
+The Spreadtrum SC9860 platform provides 3 general-purpose timers.
+These timers can support 32bit or 64bit counter, as well as supporting
+period mode or one-shot mode, and they are can be wakeup source
+during deep sleep.
+
+Required properties:
+- compatible: should be "sprd,sc9860-timer" for SC9860 platform.
+- reg: The register address of the timer device.
+- interrupts: Should contain the interrupt for the timer device.
+- clocks: The phandle to the source clock (usually a 32.768 KHz fixed clock).
+
+Example:
+ timer@40050000 {
+ compatible = "sprd,sc9860-timer";
+ reg = <0 0x40050000 0 0x20>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_32k>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 52fb41046b34..44e8bab159ad 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -47,6 +47,8 @@ Optional properties:
from P0 to P1/P2/P3 without delay.
- snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
during HS transmit.
+ - snps,dis_metastability_quirk: when set, disable metastability workaround.
+ CAUTION: use only if you are absolutely sure of it.
- snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
utmi_l1_suspend_n, false when asserts utmi_sleep_n
- snps,hird-threshold: HIRD threshold
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
index 30595964876a..88d9f4a4b280 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
@@ -35,10 +35,14 @@ Required properties:
- phys : a list of phandle + phy specifier pairs
Optional properties:
- - mediatek,wakeup-src : 1: ip sleep wakeup mode; 2: line state wakeup
- mode;
- - mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
- control register, it depends on "mediatek,wakeup-src".
+ - wakeup-source : enable USB remote wakeup;
+ - mediatek,syscon-wakeup : phandle to syscon used to access the register
+ of the USB wakeup glue layer between xHCI and SPM; it depends on
+ "wakeup-source", and has two arguments:
+ - the first one : register base address of the glue layer in syscon;
+ - the second one : hardware version of the glue layer
+ - 1 : used by mt8173 etc
+ - 2 : used by mt2712 etc
- mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
bit1 for u3port1, ... etc;
- vbus-supply : reference to the VBUS regulator;
@@ -46,6 +50,7 @@ Optional properties:
- pinctrl-names : a pinctrl state named "default" must be defined
- pinctrl-0 : pin control group
See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+ - imod-interval-ns: default interrupt moderation interval is 5000ns
Example:
usb30: usb@11270000 {
@@ -64,8 +69,9 @@ usb30: usb@11270000 {
vusb33-supply = <&mt6397_vusb_reg>;
vbus-supply = <&usb_p1_vbus>;
usb3-lpm-capable;
- mediatek,syscon-wakeup = <&pericfg>;
- mediatek,wakeup-src = <1>;
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
+ wakeup-source;
+ imod-interval-ns = <10000>;
};
2nd: dual-role mode with xHCI driver
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index b2271d8e6b50..d589a1ef96a1 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -42,9 +42,14 @@ Optional properties:
- enable-manual-drd : supports manual dual-role switch via debugfs; usually
used when receptacle is TYPE-A and also wants to support dual-role
mode.
- - mediatek,enable-wakeup : supports ip sleep wakeup used by host mode
- - mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
- control register, it depends on "mediatek,enable-wakeup".
+ - wakeup-source: enable USB remote wakeup of host mode.
+ - mediatek,syscon-wakeup : phandle to syscon used to access the register
+ of the USB wakeup glue layer between SSUSB and SPM; it depends on
+ "wakeup-source", and has two arguments:
+ - the first one : register base address of the glue layer in syscon;
+ - the second one : hardware version of the glue layer
+ - 1 : used by mt8173 etc
+ - 2 : used by mt2712 etc
- mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
bit1 for u3port1, ... etc;
@@ -71,8 +76,8 @@ ssusb: usb@11271000 {
vbus-supply = <&usb_p0_vbus>;
extcon = <&extcon_usb>;
dr_mode = "otg";
- mediatek,enable-wakeup;
- mediatek,syscon-wakeup = <&pericfg>;
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 47394ab788e3..d060172f1529 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -13,8 +13,10 @@ Required properties:
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
+ - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- "renesas,rcar-gen3-usbhs" for R-Car Gen3 compatible device
+ - "renesas,rza1-usbhs" for RZ/A1 compatible device
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first followed
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
index 1b27cebb47f4..036be172b1ae 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
@@ -4,8 +4,49 @@ Usually, we only use device tree for hard wired USB device.
The reference binding doc is from:
http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps
+Four types of device-tree nodes are defined: "host-controller nodes"
+representing USB host controllers, "device nodes" representing USB devices,
+"interface nodes" representing USB interfaces and "combined nodes"
+representing simple USB devices.
-Required properties:
+A combined node shall be used instead of a device node and an interface node
+for devices of class 0 or 9 (hub) with a single configuration and a single
+interface.
+
+A "hub node" is a combined node or an interface node that represents a USB
+hub.
+
+
+Required properties for device nodes:
+- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
+ The textual representation of VID and PID shall be in lower case hexadecimal
+ with leading zeroes suppressed. The other compatible strings from the above
+ standard binding could also be used, but a device adhering to this binding
+ may leave out all except for "usbVID,PID".
+- reg: the number of the USB hub port or the USB host-controller port to which
+ this device is attached. The range is 1-255.
+
+
+Required properties for device nodes with interface nodes:
+- #address-cells: shall be 2
+- #size-cells: shall be 0
+
+
+Required properties for interface nodes:
+- compatible: "usbifVID,PID.configCN.IN", where VID is the vendor id, PID is
+ the product id, CN is the configuration value and IN is the interface
+ number. The textual representation of VID, PID, CN and IN shall be in lower
+ case hexadecimal with leading zeroes suppressed. The other compatible
+ strings from the above standard binding could also be used, but a device
+ adhering to this binding may leave out all except for
+ "usbifVID,PID.configCN.IN".
+- reg: the interface number and configuration value
+
+The configuration component is not included in the textual representation of
+an interface-node unit address for configuration 1.
+
+
+Required properties for combined nodes:
- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
The textual representation of VID and PID shall be in lower case hexadecimal
with leading zeroes suppressed. The other compatible strings from the above
@@ -31,8 +72,31 @@ Example:
#address-cells = <1>;
#size-cells = <0>;
- hub@1 { /* hub connected to port 1 */
+ hub@1 { /* hub connected to port 1 */
compatible = "usb5e3,608";
reg = <1>;
};
+
+ device@2 { /* device connected to port 2 */
+ compatible = "usb123,4567";
+ reg = <2>;
+ };
+
+ device@3 { /* device connected to port 3 */
+ compatible = "usb123,abcd";
+ reg = <3>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ interface@0 { /* interface 0 of configuration 1 */
+ compatible = "usbif123,abcd.config1.0";
+ reg = <0 1>;
+ };
+
+ interface@0,2 { /* interface 0 of configuration 2 */
+ compatible = "usbif123,abcd.config2.0";
+ reg = <0 2>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index ae6e484a8d7c..e2ea59bbca93 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -7,12 +7,14 @@ Required properties:
- "marvell,armada3700-xhci" for Armada 37xx SoCs
- "marvell,armada-375-xhci" for Armada 375 SoCs
- "marvell,armada-380-xhci" for Armada 38x SoCs
+ - "renesas,xhci-r8a7743" for r8a7743 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
- "renesas,xhci-r8a7795" for r8a7795 SoC
- "renesas,xhci-r8a7796" for r8a7796 SoC
- - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device
+ - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
+ device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
- "xhci-platform" (deprecated)
@@ -29,6 +31,7 @@ Optional properties:
- usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
- usb3-lpm-capable: determines if platform is USB3 LPM capable
- quirk-broken-port-ped: set if the controller has broken port disable mechanism
+ - imod-interval-ns: default interrupt moderation interval is 5000ns
Example:
usb@f0931000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 889d1c0f5050..6ec1a028a3a8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -348,6 +348,7 @@ tcg Trusted Computing Group
tcl Toby Churchill Ltd.
technexion TechNexion
technologic Technologic Systems
+tempo Tempo Semiconductor
terasic Terasic Inc.
thine THine Electronics, Inc.
ti Texas Instruments
diff --git a/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt b/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt
new file mode 100644
index 000000000000..3de96186e92e
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt
@@ -0,0 +1,39 @@
+Zodiac Inflight Innovations RAVE Supervisory Processor Watchdog Bindings
+
+RAVE SP watchdog device is a "MFD cell" device corresponding to
+watchdog functionality of RAVE Supervisory Processor. It is expected
+that its Device Tree node is specified as a child of the node
+corresponding to the parent RAVE SP device (as documented in
+Documentation/devicetree/bindings/mfd/zii,rave-sp.txt)
+
+Required properties:
+
+- compatible: Depending on wire protocol implemented by RAVE SP
+ firmware, should be one of:
+ - "zii,rave-sp-watchdog"
+ - "zii,rave-sp-watchdog-legacy"
+
+Optional properties:
+
+- wdt-timeout: Two byte nvmem cell specified as per
+ Documentation/devicetree/bindings/nvmem/nvmem.txt
+
+Example:
+
+ rave-sp {
+ compatible = "zii,rave-sp-rdu1";
+ current-speed = <38400>;
+
+ eeprom {
+ wdt_timeout: wdt-timeout@8E {
+ reg = <0x8E 2>;
+ };
+ };
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ nvmem-cells = <&wdt_timeout>;
+ nvmem-cell-names = "wdt-timeout";
+ };
+ }
+
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 0268335414ce..722d4525f7cf 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -112,16 +112,17 @@ Example kernel-doc function comment::
/**
* foobar() - Brief description of foobar.
- * @arg: Description of argument of foobar.
+ * @argument1: Description of parameter argument1 of foobar.
+ * @argument2: Description of parameter argument2 of foobar.
*
* Longer description of foobar.
*
* Return: Description of return value of foobar.
*/
- int foobar(int arg)
+ int foobar(int argument1, char *argument2)
The format is similar for documentation for structures, enums, paragraphs,
-etc. See the sections below for details.
+etc. See the sections below for specific details of each type.
The kernel-doc structure is extracted from the comments, and proper `Sphinx C
Domain`_ function and type descriptions with anchors are generated for them. The
@@ -130,6 +131,226 @@ cross-references. See below for details.
.. _Sphinx C Domain: http://www.sphinx-doc.org/en/stable/domains.html
+
+Parameters and member arguments
+-------------------------------
+
+The kernel-doc function comments describe each parameter to the function and
+function typedefs or each member of struct/union, in order, with the
+``@argument:`` descriptions. For each non-private member argument, one
+``@argument`` definition is needed.
+
+The ``@argument:`` descriptions begin on the very next line following
+the opening brief function description line, with no intervening blank
+comment lines.
+
+The ``@argument:`` descriptions may span multiple lines.
+
+.. note::
+
+ If the ``@argument`` description has multiple lines, the continuation
+ of the description should be starting exactly at the same column as
+ the previous line, e. g.::
+
+ * @argument: some long description
+ * that continues on next lines
+
+ or::
+
+ * @argument:
+ * some long description
+ * that continues on next lines
+
+If a function or typedef parameter argument is ``...`` (e. g. a variable
+number of arguments), its description should be listed in kernel-doc
+notation as::
+
+ * @...: description
+
+Private members
+~~~~~~~~~~~~~~~
+
+Inside a struct or union description, you can use the ``private:`` and
+``public:`` comment tags. Structure fields that are inside a ``private:``
+area are not listed in the generated output documentation.
+
+The ``private:`` and ``public:`` tags must begin immediately following a
+``/*`` comment marker. They may optionally include comments between the
+``:`` and the ending ``*/`` marker.
+
+Example::
+
+ /**
+ * struct my_struct - short description
+ * @a: first member
+ * @b: second member
+ * @d: fourth member
+ *
+ * Longer description
+ */
+ struct my_struct {
+ int a;
+ int b;
+ /* private: internal use only */
+ int c;
+ /* public: the next one is public */
+ int d;
+ };
+
+Function documentation
+----------------------
+
+The general format of a function and function-like macro kernel-doc comment is::
+
+ /**
+ * function_name() - Brief description of function.
+ * @arg1: Describe the first argument.
+ * @arg2: Describe the second argument.
+ * One can provide multiple line descriptions
+ * for arguments.
+ *
+ * A longer description, with more discussion of the function function_name()
+ * that might be useful to those using or modifying it. Begins with an
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * The longer description may have multiple paragraphs.
+ *
+ * Return: Describe the return value of foobar.
+ *
+ * The return value description can also have multiple paragraphs, and should
+ * be placed at the end of the comment block.
+ */
+
+The brief description following the function name may span multiple lines, and
+ends with an argument description, a blank comment line, or the end of the
+comment block.
+
+Return values
+~~~~~~~~~~~~~
+
+The return value, if any, should be described in a dedicated section
+named ``Return``.
+
+.. note::
+
+ #) The multi-line descriptive text you provide does *not* recognize
+ line breaks, so if you try to format some text nicely, as in::
+
+ * Return:
+ * 0 - OK
+ * -EINVAL - invalid argument
+ * -ENOMEM - out of memory
+
+ this will all run together and produce::
+
+ Return: 0 - OK -EINVAL - invalid argument -ENOMEM - out of memory
+
+ So, in order to produce the desired line breaks, you need to use a
+ ReST list, e. g.::
+
+ * Return:
+ * * 0 - OK to runtime suspend the device
+ * * -EBUSY - Device should not be runtime suspended
+
+ #) If the descriptive text you provide has lines that begin with
+ some phrase followed by a colon, each of those phrases will be taken
+ as a new section heading, with probably won't produce the desired
+ effect.
+
+Structure, union, and enumeration documentation
+-----------------------------------------------
+
+The general format of a struct, union, and enum kernel-doc comment is::
+
+ /**
+ * struct struct_name - Brief description.
+ * @argument: Description of member member_name.
+ *
+ * Description of the structure.
+ */
+
+On the above, ``struct`` is used to mean structs. You can also use ``union``
+and ``enum`` to describe unions and enums. ``argument`` is used
+to mean struct and union member names as well as enumerations in an enum.
+
+The brief description following the structure name may span multiple lines, and
+ends with a member description, a blank comment line, or the end of the
+comment block.
+
+The kernel-doc data structure comments describe each member of the structure,
+in order, with the member descriptions.
+
+Nested structs/unions
+~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to document nested structs unions, like::
+
+ /**
+ * struct nested_foobar - a struct with nested unions and structs
+ * @arg1: - first argument of anonymous union/anonymous struct
+ * @arg2: - second argument of anonymous union/anonymous struct
+ * @arg3: - third argument of anonymous union/anonymous struct
+ * @arg4: - fourth argument of anonymous union/anonymous struct
+ * @bar.st1.arg1 - first argument of struct st1 on union bar
+ * @bar.st1.arg2 - second argument of struct st1 on union bar
+ * @bar.st2.arg1 - first argument of struct st2 on union bar
+ * @bar.st2.arg2 - second argument of struct st2 on union bar
+ struct nested_foobar {
+ /* Anonymous union/struct*/
+ union {
+ struct {
+ int arg1;
+ int arg2;
+ }
+ struct {
+ void *arg3;
+ int arg4;
+ }
+ }
+ union {
+ struct {
+ int arg1;
+ int arg2;
+ } st1;
+ struct {
+ void *arg1;
+ int arg2;
+ } st2;
+ } bar;
+ };
+
+.. note::
+
+ #) When documenting nested structs or unions, if the struct/union ``foo``
+ is named, the argument ``bar`` inside it should be documented as
+ ``@foo.bar:``
+ #) When the nested struct/union is anonymous, the argument ``bar`` on it
+ should be documented as ``@bar:``
+
+Typedef documentation
+---------------------
+
+The general format of a typedef kernel-doc comment is::
+
+ /**
+ * typedef type_name - Brief description.
+ *
+ * Description of the type.
+ */
+
+Typedefs with function prototypes can also be documented::
+
+ /**
+ * typedef type_name - Brief description.
+ * @arg1: description of arg1
+ * @arg2: description of arg2
+ *
+ * Description of the type.
+ */
+ typedef void (*type_name)(struct v4l2_ctrl *arg1, void *arg2);
+
+
Highlights and cross-references
-------------------------------
@@ -201,70 +422,7 @@ cross-references.
For further details, please refer to the `Sphinx C Domain`_ documentation.
-Function documentation
-----------------------
-
-The general format of a function and function-like macro kernel-doc comment is::
-
- /**
- * function_name() - Brief description of function.
- * @arg1: Describe the first argument.
- * @arg2: Describe the second argument.
- * One can provide multiple line descriptions
- * for arguments.
- *
- * A longer description, with more discussion of the function function_name()
- * that might be useful to those using or modifying it. Begins with an
- * empty comment line, and may include additional embedded empty
- * comment lines.
- *
- * The longer description may have multiple paragraphs.
- *
- * Return: Describe the return value of foobar.
- *
- * The return value description can also have multiple paragraphs, and should
- * be placed at the end of the comment block.
- */
-
-The brief description following the function name may span multiple lines, and
-ends with an ``@argument:`` description, a blank comment line, or the end of the
-comment block.
-
-The kernel-doc function comments describe each parameter to the function, in
-order, with the ``@argument:`` descriptions. The ``@argument:`` descriptions
-must begin on the very next line following the opening brief function
-description line, with no intervening blank comment lines. The ``@argument:``
-descriptions may span multiple lines. The continuation lines may contain
-indentation. If a function parameter is ``...`` (varargs), it should be listed
-in kernel-doc notation as: ``@...:``.
-
-The return value, if any, should be described in a dedicated section at the end
-of the comment starting with "Return:".
-
-Structure, union, and enumeration documentation
------------------------------------------------
-
-The general format of a struct, union, and enum kernel-doc comment is::
-
- /**
- * struct struct_name - Brief description.
- * @member_name: Description of member member_name.
- *
- * Description of the structure.
- */
-
-Below, "struct" is used to mean structs, unions and enums, and "member" is used
-to mean struct and union members as well as enumerations in an enum.
-
-The brief description following the structure name may span multiple lines, and
-ends with a ``@member:`` description, a blank comment line, or the end of the
-comment block.
-The kernel-doc data structure comments describe each member of the structure, in
-order, with the ``@member:`` descriptions. The ``@member:`` descriptions must
-begin on the very next line following the opening brief function description
-line, with no intervening blank comment lines. The ``@member:`` descriptions may
-span multiple lines. The continuation lines may contain indentation.
In-line member documentation comments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -294,42 +452,6 @@ on a line of their own, like all other kernel-doc comments::
int foobar;
}
-Private members
-~~~~~~~~~~~~~~~
-
-Inside a struct description, you can use the "private:" and "public:" comment
-tags. Structure fields that are inside a "private:" area are not listed in the
-generated output documentation. The "private:" and "public:" tags must begin
-immediately following a ``/*`` comment marker. They may optionally include
-comments between the ``:`` and the ending ``*/`` marker.
-
-Example::
-
- /**
- * struct my_struct - short description
- * @a: first member
- * @b: second member
- *
- * Longer description
- */
- struct my_struct {
- int a;
- int b;
- /* private: internal use only */
- int c;
- };
-
-
-Typedef documentation
----------------------
-
-The general format of a typedef kernel-doc comment is::
-
- /**
- * typedef type_name - Brief description.
- *
- * Description of the type.
- */
Overview documentation comments
-------------------------------
@@ -376,3 +498,37 @@ file.
Data structures visible in kernel include files should also be documented using
kernel-doc formatted comments.
+
+How to use kernel-doc to generate man pages
+-------------------------------------------
+
+If you just want to use kernel-doc to generate man pages you can do this
+from the Kernel git tree::
+
+ $ scripts/kernel-doc -man $(git grep -l '/\*\*' |grep -v Documentation/) | ./split-man.pl /tmp/man
+
+Using the small ``split-man.pl`` script below::
+
+
+ #!/usr/bin/perl
+
+ if ($#ARGV < 0) {
+ die "where do I put the results?\n";
+ }
+
+ mkdir $ARGV[0],0777;
+ $state = 0;
+ while (<STDIN>) {
+ if (/^\.TH \"[^\"]*\" 9 \"([^\"]*)\"/) {
+ if ($state == 1) { close OUT }
+ $state = 1;
+ $fn = "$ARGV[0]/$1.9";
+ print STDERR "Creating $fn\n";
+ open OUT, ">$fn" or die "can't open $fn: $!\n";
+ print OUT $_;
+ } elsif ($state != 0) {
+ print OUT $_;
+ }
+ }
+
+ close OUT;
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
index 73fa7d42bbba..826e85d50a16 100644
--- a/Documentation/driver-api/basics.rst
+++ b/Documentation/driver-api/basics.rst
@@ -13,12 +13,6 @@ Driver device table
.. kernel-doc:: include/linux/mod_devicetable.h
:internal:
-Atomic and pointer manipulation
--------------------------------
-
-.. kernel-doc:: arch/x86/include/asm/atomic.h
- :internal:
-
Delaying, scheduling, and timer routines
----------------------------------------
@@ -85,6 +79,21 @@ Internal Functions
.. kernel-doc:: kernel/kthread.c
:export:
+Reference counting
+------------------
+
+.. kernel-doc:: include/linux/refcount.h
+ :internal:
+
+.. kernel-doc:: lib/refcount.c
+ :export:
+
+Atomics
+-------
+
+.. kernel-doc:: arch/x86/include/asm/atomic.h
+ :internal:
+
Kernel objects manipulation
---------------------------
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 814acb4d2294..dfc4486b5743 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -111,40 +111,36 @@ The first thing you need to do in your driver is to allocate this
structure. Any of the usual memory allocators will do, but you'll also
need to initialize a few fields in there:
-- channels: should be initialized as a list using the
+- ``channels``: should be initialized as a list using the
INIT_LIST_HEAD macro for example
-- src_addr_widths:
+- ``src_addr_widths``:
should contain a bitmask of the supported source transfer width
-- dst_addr_widths:
+- ``dst_addr_widths``:
should contain a bitmask of the supported destination transfer width
-- directions:
+- ``directions``:
should contain a bitmask of the supported slave directions
(i.e. excluding mem2mem transfers)
-- residue_granularity:
+- ``residue_granularity``:
+ granularity of the transfer residue reported to dma_set_residue.
+ This can be either:
- - Granularity of the transfer residue reported to dma_set_residue.
- This can be either:
+ - Descriptor:
+ your device doesn't support any kind of residue
+ reporting. The framework will only know that a particular
+ transaction descriptor is done.
- - Descriptor
+ - Segment:
+ your device is able to report which chunks have been transferred
- - Your device doesn't support any kind of residue
- reporting. The framework will only know that a particular
- transaction descriptor is done.
+ - Burst:
+ your device is able to report which burst have been transferred
- - Segment
-
- - Your device is able to report which chunks have been transferred
-
- - Burst
-
- - Your device is able to report which burst have been transferred
-
- - dev: should hold the pointer to the ``struct device`` associated
- to your current driver instance.
+- ``dev``: should hold the pointer to the ``struct device`` associated
+ to your current driver instance.
Supported transaction types
---------------------------
diff --git a/Documentation/driver-api/firmware/built-in-fw.rst b/Documentation/driver-api/firmware/built-in-fw.rst
index 7300e66857f8..396cdf591ac5 100644
--- a/Documentation/driver-api/firmware/built-in-fw.rst
+++ b/Documentation/driver-api/firmware/built-in-fw.rst
@@ -11,13 +11,8 @@ options:
* CONFIG_EXTRA_FIRMWARE
* CONFIG_EXTRA_FIRMWARE_DIR
-This should not be confused with CONFIG_FIRMWARE_IN_KERNEL, this is for drivers
-which enables firmware to be built as part of the kernel build process. This
-option, CONFIG_FIRMWARE_IN_KERNEL, will build all firmware for all drivers
-enabled which ship its firmware inside the Linux kernel source tree.
-
There are a few reasons why you might want to consider building your firmware
-into the kernel with CONFIG_EXTRA_FIRMWARE though:
+into the kernel with CONFIG_EXTRA_FIRMWARE:
* Speed
* Firmware is needed for accessing the boot device, and the user doesn't
diff --git a/Documentation/driver-api/firmware/fallback-mechanisms.rst b/Documentation/driver-api/firmware/fallback-mechanisms.rst
index d19354794e67..4055ac76b288 100644
--- a/Documentation/driver-api/firmware/fallback-mechanisms.rst
+++ b/Documentation/driver-api/firmware/fallback-mechanisms.rst
@@ -71,7 +71,7 @@ via fw_create_instance(). This call creates a new struct device named after
the firmware requested, and establishes it in the device hierarchy by
associating the device used to make the request as the device's parent.
The sysfs directory's file attributes are defined and controlled through
-the new device's class (firmare_class) and group (fw_dev_attr_groups).
+the new device's class (firmware_class) and group (fw_dev_attr_groups).
This is actually where the original firmware_class.c file name comes from,
as originally the only firmware loading mechanism available was the
mechanism we now use as a fallback mechanism.
diff --git a/Documentation/driver-api/iio/hw-consumer.rst b/Documentation/driver-api/iio/hw-consumer.rst
new file mode 100644
index 000000000000..8facce6a6733
--- /dev/null
+++ b/Documentation/driver-api/iio/hw-consumer.rst
@@ -0,0 +1,51 @@
+===========
+HW consumer
+===========
+An IIO device can be directly connected to another device in hardware. in this
+case the buffers between IIO provider and IIO consumer are handled by hardware.
+The Industrial I/O HW consumer offers a way to bond these IIO devices without
+software buffer for data. The implementation can be found under
+:file:`drivers/iio/buffer/hw-consumer.c`
+
+
+* struct :c:type:`iio_hw_consumer` — Hardware consumer structure
+* :c:func:`iio_hw_consumer_alloc` — Allocate IIO hardware consumer
+* :c:func:`iio_hw_consumer_free` — Free IIO hardware consumer
+* :c:func:`iio_hw_consumer_enable` — Enable IIO hardware consumer
+* :c:func:`iio_hw_consumer_disable` — Disable IIO hardware consumer
+
+
+HW consumer setup
+=================
+
+As standard IIO device the implementation is based on IIO provider/consumer.
+A typical IIO HW consumer setup looks like this::
+
+ static struct iio_hw_consumer *hwc;
+
+ static const struct iio_info adc_info = {
+ .read_raw = adc_read_raw,
+ };
+
+ static int adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+ {
+ ret = iio_hw_consumer_enable(hwc);
+
+ /* Acquire data */
+
+ ret = iio_hw_consumer_disable(hwc);
+ }
+
+ static int adc_probe(struct platform_device *pdev)
+ {
+ hwc = devm_iio_hw_consumer_alloc(&iio->dev);
+ }
+
+More details
+============
+.. kernel-doc:: include/linux/iio/hw-consumer.h
+.. kernel-doc:: drivers/iio/buffer/industrialio-hw-consumer.c
+ :export:
+
diff --git a/Documentation/driver-api/iio/index.rst b/Documentation/driver-api/iio/index.rst
index e5c3922d1b6f..7fba341bd8b2 100644
--- a/Documentation/driver-api/iio/index.rst
+++ b/Documentation/driver-api/iio/index.rst
@@ -15,3 +15,4 @@ Contents:
buffers
triggers
triggered-buffers
+ hw-consumer
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 53c1b0b06da5..1128705a5731 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -777,17 +777,51 @@ The driver can indicate that by setting ``DPM_FLAG_SMART_SUSPEND`` in
runtime suspend at the beginning of the ``suspend_late`` phase of system-wide
suspend (or in the ``poweroff_late`` phase of hibernation), when runtime PM
has been disabled for it, under the assumption that its state should not change
-after that point until the system-wide transition is over. If that happens, the
-driver's system-wide resume callbacks, if present, may still be invoked during
-the subsequent system-wide resume transition and the device's runtime power
-management status may be set to "active" before enabling runtime PM for it,
-so the driver must be prepared to cope with the invocation of its system-wide
-resume callbacks back-to-back with its ``->runtime_suspend`` one (without the
-intervening ``->runtime_resume`` and so on) and the final state of the device
-must reflect the "active" status for runtime PM in that case.
+after that point until the system-wide transition is over (the PM core itself
+does that for devices whose "noirq", "late" and "early" system-wide PM callbacks
+are executed directly by it). If that happens, the driver's system-wide resume
+callbacks, if present, may still be invoked during the subsequent system-wide
+resume transition and the device's runtime power management status may be set
+to "active" before enabling runtime PM for it, so the driver must be prepared to
+cope with the invocation of its system-wide resume callbacks back-to-back with
+its ``->runtime_suspend`` one (without the intervening ``->runtime_resume`` and
+so on) and the final state of the device must reflect the "active" runtime PM
+status in that case.
During system-wide resume from a sleep state it's easiest to put devices into
the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
-Refer to that document for more information regarding this particular issue as
+[Refer to that document for more information regarding this particular issue as
well as for information on the device runtime power management framework in
-general.
+general.]
+
+However, it often is desirable to leave devices in suspend after system
+transitions to the working state, especially if those devices had been in
+runtime suspend before the preceding system-wide suspend (or analogous)
+transition. Device drivers can use the ``DPM_FLAG_LEAVE_SUSPENDED`` flag to
+indicate to the PM core (and middle-layer code) that they prefer the specific
+devices handled by them to be left suspended and they have no problems with
+skipping their system-wide resume callbacks for this reason. Whether or not the
+devices will actually be left in suspend may depend on their state before the
+given system suspend-resume cycle and on the type of the system transition under
+way. In particular, devices are not left suspended if that transition is a
+restore from hibernation, as device states are not guaranteed to be reflected
+by the information stored in the hibernation image in that case.
+
+The middle-layer code involved in the handling of the device is expected to
+indicate to the PM core if the device may be left in suspend by setting its
+:c:member:`power.may_skip_resume` status bit which is checked by the PM core
+during the "noirq" phase of the preceding system-wide suspend (or analogous)
+transition. The middle layer is then responsible for handling the device as
+appropriate in its "noirq" resume callback, which is executed regardless of
+whether or not the device is left suspended, but the other resume callbacks
+(except for ``->complete``) will be skipped automatically by the PM core if the
+device really can be left in suspend.
+
+For devices whose "noirq", "late" and "early" driver callbacks are invoked
+directly by the PM core, all of the system-wide resume callbacks are skipped if
+``DPM_FLAG_LEAVE_SUSPENDED`` is set and the device is in runtime suspend during
+the ``suspend_noirq`` (or analogous) phase or the transition under way is a
+proper system suspend (rather than anything related to hibernation) and the
+device's wakeup settings are suitable for runtime PM (that is, it cannot
+generate wakeup signals at all or it is allowed to wake up the system from
+sleep).
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 9ae03171daca..3ae337929721 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -224,6 +224,14 @@ mid to lowlevel SCSI driver interface
.. kernel-doc:: drivers/scsi/hosts.c
:export:
+drivers/scsi/scsi_common.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+general support functions
+
+.. kernel-doc:: drivers/scsi/scsi_common.c
+ :export:
+
Transport classes
-----------------
@@ -332,5 +340,5 @@ todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I20, iSCSI, Parallel ports,
+FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
netlink...
diff --git a/Documentation/driver-api/usb/usb3-debug-port.rst b/Documentation/driver-api/usb/usb3-debug-port.rst
index feb1a36a65b7..b9fd131f4723 100644
--- a/Documentation/driver-api/usb/usb3-debug-port.rst
+++ b/Documentation/driver-api/usb/usb3-debug-port.rst
@@ -98,3 +98,55 @@ you to check the sanity of the setup.
cat /dev/ttyUSB0
done
===== end of bash scripts ===============
+
+Serial TTY
+==========
+
+The DbC support has been added to the xHCI driver. You can get a
+debug device provided by the DbC at runtime.
+
+In order to use this, you need to make sure your kernel has been
+configured to support USB_XHCI_DBGCAP. A sysfs attribute under
+the xHCI device node is used to enable or disable DbC. By default,
+DbC is disabled::
+
+ root@target:/sys/bus/pci/devices/0000:00:14.0# cat dbc
+ disabled
+
+Enable DbC with the following command::
+
+ root@target:/sys/bus/pci/devices/0000:00:14.0# echo enable > dbc
+
+You can check the DbC state at anytime::
+
+ root@target:/sys/bus/pci/devices/0000:00:14.0# cat dbc
+ enabled
+
+Connect the debug target to the debug host with a USB 3.0 super-
+speed A-to-A debugging cable. You can see /dev/ttyDBC0 created
+on the debug target. You will see below kernel message lines::
+
+ root@target: tail -f /var/log/kern.log
+ [ 182.730103] xhci_hcd 0000:00:14.0: DbC connected
+ [ 191.169420] xhci_hcd 0000:00:14.0: DbC configured
+ [ 191.169597] xhci_hcd 0000:00:14.0: DbC now attached to /dev/ttyDBC0
+
+Accordingly, the DbC state has been brought up to::
+
+ root@target:/sys/bus/pci/devices/0000:00:14.0# cat dbc
+ configured
+
+On the debug host, you will see the debug device has been enumerated.
+You will see below kernel message lines::
+
+ root@host: tail -f /var/log/kern.log
+ [ 79.454780] usb 2-2.1: new SuperSpeed USB device number 3 using xhci_hcd
+ [ 79.475003] usb 2-2.1: LPM exit latency is zeroed, disabling LPM.
+ [ 79.475389] usb 2-2.1: New USB device found, idVendor=1d6b, idProduct=0010
+ [ 79.475390] usb 2-2.1: New USB device strings: Mfr=1, Product=2, SerialNumber=3
+ [ 79.475391] usb 2-2.1: Product: Linux USB Debug Target
+ [ 79.475392] usb 2-2.1: Manufacturer: Linux Foundation
+ [ 79.475393] usb 2-2.1: SerialNumber: 0001
+
+The debug device works now. You can use any communication or debugging
+program to talk between the host and the target.
diff --git a/Documentation/driver-api/usb/writing_usb_driver.rst b/Documentation/driver-api/usb/writing_usb_driver.rst
index 69f077dcdb78..4fe1c06b6a13 100644
--- a/Documentation/driver-api/usb/writing_usb_driver.rst
+++ b/Documentation/driver-api/usb/writing_usb_driver.rst
@@ -321,6 +321,6 @@ linux-usb-devel Mailing List Archives:
http://marc.theaimsgroup.com/?l=linux-usb-devel
Programming Guide for Linux USB Device Drivers:
-http://usb.cs.tum.edu/usbdoc
+http://lmu.web.psi.ch/docu/manuals/software_manuals/linux_sl/usb_linux_programming_guide.pdf
USB Home Page: http://www.usb.org
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index c180045eb43b..7c1bb3d0c222 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -384,6 +384,9 @@ RESET
devm_reset_control_get()
devm_reset_controller_register()
+SERDEV
+ devm_serdev_device_open()
+
SLAVE DMA ENGINE
devm_acpi_dma_controller_register()
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 918972babcd8..de1dc35fe500 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -1,7 +1,7 @@
Fault injection capabilities infrastructure
===========================================
-See also drivers/md/faulty.c and "every_nth" module option for scsi_debug.
+See also drivers/md/md-faulty.c and "every_nth" module option for scsi_debug.
Available fault injection capabilities
@@ -30,6 +30,12 @@ o fail_mmc_request
injects MMC data errors on devices permitted by setting
debugfs entries under /sys/kernel/debug/mmc0/fail_mmc_request
+o fail_function
+
+ injects error return on specific functions, which are marked by
+ ALLOW_ERROR_INJECTION() macro, by setting debugfs entries
+ under /sys/kernel/debug/fail_function. No boot option supported.
+
Configure fault-injection capabilities behavior
-----------------------------------------------
@@ -123,6 +129,29 @@ configuration of fault-injection capabilities.
default is 'N', setting it to 'Y' will disable failure injections
when dealing with private (address space) futexes.
+- /sys/kernel/debug/fail_function/inject:
+
+ Format: { 'function-name' | '!function-name' | '' }
+ specifies the target function of error injection by name.
+ If the function name leads '!' prefix, given function is
+ removed from injection list. If nothing specified ('')
+ injection list is cleared.
+
+- /sys/kernel/debug/fail_function/injectable:
+
+ (read only) shows error injectable functions and what type of
+ error values can be specified. The error type will be one of
+ below;
+ - NULL: retval must be 0.
+ - ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
+ - ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
+
+- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
+
+ specifies the "error" return value to inject to the given
+ function for given function. This will be created when
+ user specifies new injection entry.
+
o Boot option
In order to inject faults while debugfs is not available (early boot time),
@@ -268,6 +297,45 @@ trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
echo "Injecting errors into the module $module... (interrupt to stop)"
sleep 1000000
+------------------------------------------------------------------------------
+
+o Inject open_ctree error while btrfs mount
+
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir -p tmpmnt
+
+FAILTYPE=fail_function
+FAILFUNC=open_ctree
+echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
+echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
+
+mount -t btrfs $DEVICE tmpmnt
+if [ $? -ne 0 ]
+then
+ echo "SUCCESS!"
+else
+ echo "FAILED!"
+ umount tmpmnt
+fi
+
+echo > /sys/kernel/debug/$FAILTYPE/inject
+
+rmdir tmpmnt
+losetup -d $DEVICE
+rm testfile.img
+
+
Tool to run command with failslab or fail_page_alloc
----------------------------------------------------
In order to make it easier to accomplish the tasks mentioned above, we can use
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index f377290fe48e..3406fae833c3 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -35,5 +35,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok | 64-bit only
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index d7acd7bd3619..59a4c9ffb7f3 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -35,5 +35,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index 55755395d3dc..81c0becab225 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -49,12 +49,10 @@ sb=n Use alternate superblock at this location.
user_xattr Enable "user." POSIX Extended Attributes
(requires CONFIG_EXT2_FS_XATTR).
- See also http://acl.bestbits.at
nouser_xattr Don't support "user." extended attributes.
acl Enable POSIX Access Control Lists support
(requires CONFIG_EXT2_FS_POSIX_ACL).
- See also http://acl.bestbits.at
noacl Don't support POSIX ACLs.
nobh Do not attach buffer_heads to file pagecache.
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 75236c0c2ac2..8cd63e16f171 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -202,15 +202,14 @@ inode_readahead_blks=n This tuning parameter controls the maximum
the buffer cache. The default value is 32 blocks.
nouser_xattr Disables Extended User Attributes. See the
- attr(5) manual page and http://acl.bestbits.at/
- for more information about extended attributes.
+ attr(5) manual page for more information about
+ extended attributes.
noacl This option disables POSIX Access Control List
support. If ACL support is enabled in the kernel
configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL is
enabled by default on mount. See the acl(5) manual
- page and http://acl.bestbits.at/ for more information
- about acl.
+ page for more information about acl.
bsddf (*) Make 'df' act like BSD.
minixdf Make 'df' act like Minix.
diff --git a/Documentation/filesystems/nfs/Exporting b/Documentation/filesystems/nfs/Exporting
index 520a4becb75c..63889149f532 100644
--- a/Documentation/filesystems/nfs/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
@@ -56,13 +56,25 @@ a/ A dentry flag DCACHE_DISCONNECTED which is set on
any dentry that might not be part of the proper prefix.
This is set when anonymous dentries are created, and cleared when a
dentry is noticed to be a child of a dentry which is in the proper
- prefix.
-
-b/ A per-superblock list "s_anon" of dentries which are the roots of
- subtrees that are not in the proper prefix. These dentries, as
- well as the proper prefix, need to be released at unmount time. As
- these dentries will not be hashed, they are linked together on the
- d_hash list_head.
+ prefix. If the refcount on a dentry with this flag set
+ becomes zero, the dentry is immediately discarded, rather than being
+ kept in the dcache. If a dentry that is not already in the dcache
+ is repeatedly accessed by filehandle (as NFSD might do), an new dentry
+ will be a allocated for each access, and discarded at the end of
+ the access.
+
+ Note that such a dentry can acquire children, name, ancestors, etc.
+ without losing DCACHE_DISCONNECTED - that flag is only cleared when
+ subtree is successfully reconnected to root. Until then dentries
+ in such subtree are retained only as long as there are references;
+ refcount reaching zero means immediate eviction, same as for unhashed
+ dentries. That guarantees that we won't need to hunt them down upon
+ umount.
+
+b/ A primitive for creation of secondary roots - d_obtain_root(inode).
+ Those do _not_ bear DCACHE_DISCONNECTED. They are placed on the
+ per-superblock list (->s_roots), so they can be located at umount
+ time for eviction purposes.
c/ Helper routines to allocate anonymous dentries, and to help attach
loose directory dentries at lookup time. They are:
@@ -77,7 +89,6 @@ c/ Helper routines to allocate anonymous dentries, and to help attach
(such as an anonymous one created by d_obtain_alias), if appropriate.
It returns NULL when the passed-in dentry is used, following the calling
convention of ->lookup.
-
Filesystem Issues
-----------------
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index c0727dc36271..f2f3f8592a6f 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -25,8 +25,8 @@ available from the following download page. At least "mkfs.nilfs2",
cleaner or garbage collector) are required. Details on the tools are
described in the man pages included in the package.
-Project web page: http://nilfs.sourceforge.net/
-Download page: http://nilfs.sourceforge.net/en/download.html
+Project web page: https://nilfs.sourceforge.io/
+Download page: https://nilfs.sourceforge.io/en/download.html
List info: http://vger.kernel.org/vger-lists.html#linux-nilfs
Caveats
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index cf51360e3a9f..91031298beb1 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -344,4 +344,4 @@ the following:
characters in the final slot are set to Unicode 0xFFFF.
Finally, note that the extended name is stored in Unicode. Each Unicode
-character takes two bytes.
+character takes either two or four bytes, UTF-16LE encoded.
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index a0f61898d493..659bb19f5b3c 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -2,6 +2,7 @@ GPIO Mappings
=============
This document explains how GPIOs can be assigned to given devices and functions.
+
Note that it only applies to the new descriptor-based interface. For a
description of the deprecated integer-based GPIO interface please refer to
gpio-legacy.txt (actually, there is no real mapping possible with the old
@@ -49,7 +50,7 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the
power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
-The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
+The led GPIOs will be active high, while the power GPIO will be active low (i.e.
gpiod_is_active_low(power) will be true).
The second parameter of the gpiod_get() functions, the con_id string, has to be
@@ -122,9 +123,14 @@ where
can be NULL, in which case it will match any function.
- idx is the index of the GPIO within the function.
- flags is defined to specify the following properties:
- * GPIOF_ACTIVE_LOW - to configure the GPIO as active-low
- * GPIOF_OPEN_DRAIN - GPIO pin is open drain type.
- * GPIOF_OPEN_SOURCE - GPIO pin is open source type.
+ * GPIO_ACTIVE_HIGH - GPIO line is active high
+ * GPIO_ACTIVE_LOW - GPIO line is active low
+ * GPIO_OPEN_DRAIN - GPIO line is set up as open drain
+ * GPIO_OPEN_SOURCE - GPIO line is set up as open source
+ * GPIO_PERSISTENT - GPIO line is persistent during
+ suspend/resume and maintains its value
+ * GPIO_TRANSITORY - GPIO line is transitory and may loose its
+ electrical state during suspend/resume
In the future, these flags might be extended to support more properties.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 63e1bd1d88e3..d53e5b5cfc9c 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -66,6 +66,15 @@ for the GPIO. Values can be:
* GPIOD_IN to initialize the GPIO as input.
* GPIOD_OUT_LOW to initialize the GPIO as output with a value of 0.
* GPIOD_OUT_HIGH to initialize the GPIO as output with a value of 1.
+* GPIOD_OUT_LOW_OPEN_DRAIN same as GPIOD_OUT_LOW but also enforce the line
+ to be electrically used with open drain.
+* GPIOD_OUT_HIGH_OPEN_DRAIN same as GPIOD_OUT_HIGH but also enforce the line
+ to be electrically used with open drain.
+
+The two last flags are used for use cases where open drain is mandatory, such
+as I2C: if the line is not already configured as open drain in the mappings
+(see board.txt), then open drain will be enforced anyway and a warning will be
+printed that the board configuration needs to be updated to match the use case.
Both functions return either a valid GPIO descriptor, or an error code checkable
with IS_ERR() (they will never return a NULL pointer). -ENOENT will be returned
@@ -184,7 +193,7 @@ A driver can also query the current direction of a GPIO:
int gpiod_get_direction(const struct gpio_desc *desc)
-This function will return either GPIOF_DIR_IN or GPIOF_DIR_OUT.
+This function returns 0 for output, 1 for input, or an error code in case of error.
Be aware that there is no default direction for GPIOs. Therefore, **using a GPIO
without setting its direction first is illegal and will result in undefined
@@ -240,59 +249,71 @@ that can't be accessed from hardIRQ handlers, these calls act the same as the
spinlock-safe calls.
-Active-low State and Raw GPIO Values
-------------------------------------
-Device drivers like to manage the logical state of a GPIO, i.e. the value their
-device will actually receive, no matter what lies between it and the GPIO line.
-In some cases, it might make sense to control the actual GPIO line value. The
-following set of calls ignore the active-low property of a GPIO and work on the
-raw line value:
-
- int gpiod_get_raw_value(const struct gpio_desc *desc)
- void gpiod_set_raw_value(struct gpio_desc *desc, int value)
- int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
- void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
- int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
-
-The active-low state of a GPIO can also be queried using the following call:
-
- int gpiod_is_active_low(const struct gpio_desc *desc)
-
-Note that these functions should only be used with great moderation ; a driver
-should not have to care about the physical line level.
-
-
-The active-low property
------------------------
-
-As a driver should not have to care about the physical line level, all of the
+The active low and open drain semantics
+---------------------------------------
+As a consumer should not have to care about the physical line level, all of the
gpiod_set_value_xxx() or gpiod_set_array_value_xxx() functions operate with
-the *logical* value. With this they take the active-low property into account.
-This means that they check whether the GPIO is configured to be active-low,
+the *logical* value. With this they take the active low property into account.
+This means that they check whether the GPIO is configured to be active low,
and if so, they manipulate the passed value before the physical line level is
driven.
+The same is applicable for open drain or open source output lines: those do not
+actively drive their output high (open drain) or low (open source), they just
+switch their output to a high impedance value. The consumer should not need to
+care. (For details read about open drain in driver.txt.)
+
With this, all the gpiod_set_(array)_value_xxx() functions interpret the
-parameter "value" as "active" ("1") or "inactive" ("0"). The physical line
+parameter "value" as "asserted" ("1") or "de-asserted" ("0"). The physical line
level will be driven accordingly.
-As an example, if the active-low property for a dedicated GPIO is set, and the
-gpiod_set_(array)_value_xxx() passes "active" ("1"), the physical line level
+As an example, if the active low property for a dedicated GPIO is set, and the
+gpiod_set_(array)_value_xxx() passes "asserted" ("1"), the physical line level
will be driven low.
To summarize:
-Function (example) active-low property physical line
-gpiod_set_raw_value(desc, 0); don't care low
-gpiod_set_raw_value(desc, 1); don't care high
-gpiod_set_value(desc, 0); default (active-high) low
-gpiod_set_value(desc, 1); default (active-high) high
-gpiod_set_value(desc, 0); active-low high
-gpiod_set_value(desc, 1); active-low low
-
-Please note again that the set_raw/get_raw functions should be avoided as much
-as possible, especially by drivers which should not care about the actual
-physical line level and worry about the logical value instead.
+Function (example) line property physical line
+gpiod_set_raw_value(desc, 0); don't care low
+gpiod_set_raw_value(desc, 1); don't care high
+gpiod_set_value(desc, 0); default (active high) low
+gpiod_set_value(desc, 1); default (active high) high
+gpiod_set_value(desc, 0); active low high
+gpiod_set_value(desc, 1); active low low
+gpiod_set_value(desc, 0); default (active high) low
+gpiod_set_value(desc, 1); default (active high) high
+gpiod_set_value(desc, 0); open drain low
+gpiod_set_value(desc, 1); open drain high impedance
+gpiod_set_value(desc, 0); open source high impedance
+gpiod_set_value(desc, 1); open source high
+
+It is possible to override these semantics using the *set_raw/'get_raw functions
+but it should be avoided as much as possible, especially by system-agnostic drivers
+which should not need to care about the actual physical line level and worry about
+the logical value instead.
+
+
+Accessing raw GPIO values
+-------------------------
+Consumers exist that need to manage the logical state of a GPIO line, i.e. the value
+their device will actually receive, no matter what lies between it and the GPIO
+line.
+
+The following set of calls ignore the active-low or open drain property of a GPIO and
+work on the raw line value:
+
+ int gpiod_get_raw_value(const struct gpio_desc *desc)
+ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+ int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+
+The active low state of a GPIO can also be queried using the following call:
+
+ int gpiod_is_active_low(const struct gpio_desc *desc)
+
+Note that these functions should only be used with great moderation; a driver
+should not have to care about the physical line level or open drain semantics.
Access multiple GPIOs with a single function call
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index d8de1c7de85a..3392a0fd4c23 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -88,6 +88,10 @@ ending up in the pin control back-end "behind" the GPIO controller, usually
closer to the actual pins. This way the pin controller can manage the below
listed GPIO configurations.
+If a pin controller back-end is used, the GPIO controller or hardware
+description needs to provide "GPIO ranges" mapping the GPIO line offsets to pin
+numbers on the pin controller so they can properly cross-reference each other.
+
GPIOs with debounce support
---------------------------
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index aeab01aa4d00..6cdeab8650cd 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -1,6 +1,17 @@
GPIO Sysfs Interface for Userspace
==================================
+THIS ABI IS DEPRECATED, THE ABI DOCUMENTATION HAS BEEN MOVED TO
+Documentation/ABI/obsolete/sysfs-gpio AND NEW USERSPACE CONSUMERS
+ARE SUPPOSED TO USE THE CHARACTER DEVICE ABI. THIS OLD SYSFS ABI WILL
+NOT BE DEVELOPED (NO NEW FEATURES), IT WILL JUST BE MAINTAINED.
+
+Refer to the examples in tools/gpio/* for an introduction to the new
+character device ABI. Also see the userspace header in
+include/uapi/linux/gpio.h
+
+The deprecated sysfs ABI
+------------------------
Platforms which use the "gpiolib" implementors framework may choose to
configure a sysfs user interface to GPIOs. This is different from the
debugfs interface, since it provides control over GPIO direction and
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 2e7ee0313c1c..e94d3ac2bdd0 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -341,10 +341,7 @@ GuC
GuC-specific firmware loader
----------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
- :doc: GuC-specific firmware loader
-
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
:internal:
GuC-based command submission
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index 3fa6bf820c88..51b32aa203a8 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -8,11 +8,6 @@ Supported chips:
Datasheets:
http://www.ti.com/lit/gpn/lm25056
http://www.ti.com/lit/gpn/lm25056a
- * TI LM25063
- Prefix: 'lm25063'
- Addresses scanned: -
- Datasheet:
- To be announced
* National Semiconductor LM25066
Prefix: 'lm25066'
Addresses scanned: -
@@ -42,7 +37,7 @@ Description
-----------
This driver supports hardware monitoring for National Semiconductor / TI LM25056,
-LM25063, LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
+LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
Control, and Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
@@ -74,12 +69,8 @@ in1_input Measured input voltage.
in1_average Average measured input voltage.
in1_min Minimum input voltage.
in1_max Maximum input voltage.
-in1_crit Critical high input voltage (LM25063 only).
-in1_lcrit Critical low input voltage (LM25063 only).
in1_min_alarm Input voltage low alarm.
in1_max_alarm Input voltage high alarm.
-in1_lcrit_alarm Input voltage critical low alarm (LM25063 only).
-in1_crit_alarm Input voltage critical high alarm. (LM25063 only).
in2_label "vmon"
in2_input Measured voltage on VAUX pin
@@ -94,16 +85,12 @@ in3_input Measured output voltage.
in3_average Average measured output voltage.
in3_min Minimum output voltage.
in3_min_alarm Output voltage low alarm.
-in3_highest Historical minimum output voltage (LM25063 only).
-in3_lowest Historical maximum output voltage (LM25063 only).
curr1_label "iin"
curr1_input Measured input current.
curr1_average Average measured input current.
curr1_max Maximum input current.
-curr1_crit Critical input current (LM25063 only).
curr1_max_alarm Input current high alarm.
-curr1_crit_alarm Input current critical high alarm (LM25063 only).
power1_label "pin"
power1_input Measured input power.
@@ -113,11 +100,6 @@ power1_alarm Input power alarm
power1_input_highest Historical maximum power.
power1_reset_history Write any value to reset maximum power history.
-power2_label "pout". LM25063 only.
-power2_input Measured output power.
-power2_max Maximum output power limit.
-power2_crit Critical output power limit.
-
temp1_input Measured temperature.
temp1_max Maximum temperature.
temp1_crit Critical high temperature.
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785
index 45fb6093dec2..270c5f865261 100644
--- a/Documentation/hwmon/max31785
+++ b/Documentation/hwmon/max31785
@@ -17,8 +17,9 @@ management with temperature and remote voltage sensing. Various fan control
features are provided, including PWM frequency control, temperature hysteresis,
dual tachometer measurements, and fan health monitoring.
-For dual rotor fan configuration, the MAX31785 exposes the slowest rotor of the
-two in the fan[1-4]_input attributes.
+For dual-rotor configurations the MAX31785A exposes the second rotor tachometer
+readings in attributes fan[5-8]_input. By contrast the MAX31785 only exposes
+the slowest rotor measurement, and does so in the fan[1-4]_input attributes.
Usage Notes
-----------
@@ -31,7 +32,9 @@ Sysfs attributes
fan[1-4]_alarm Fan alarm.
fan[1-4]_fault Fan fault.
-fan[1-4]_input Fan RPM.
+fan[1-8]_input Fan RPM. On the MAX31785A, inputs 5-8 correspond to the
+ second rotor of fans 1-4
+fan[1-4]_target Fan input target
in[1-6]_crit Critical maximum output voltage
in[1-6]_crit_alarm Output voltage critical high alarm
@@ -44,6 +47,12 @@ in[1-6]_max_alarm Output voltage high alarm
in[1-6]_min Minimum output voltage
in[1-6]_min_alarm Output voltage low alarm
+pwm[1-4] Fan target duty cycle (0..255)
+pwm[1-4]_enable 0: Full-speed
+ 1: Manual PWM control
+ 2: Automatic PWM (tach-feedback RPM fan-control)
+ 3: Automatic closed-loop (temp-feedback fan-control)
+
temp[1-11]_crit Critical high temperature
temp[1-11]_crit_alarm Chip temperature critical high alarm
temp[1-11]_input Measured temperature
diff --git a/Documentation/hwmon/w83773g b/Documentation/hwmon/w83773g
new file mode 100644
index 000000000000..4cc6c0b8257f
--- /dev/null
+++ b/Documentation/hwmon/w83773g
@@ -0,0 +1,33 @@
+Kernel driver w83773g
+====================
+
+Supported chips:
+ * Nuvoton W83773G
+ Prefix: 'w83773g'
+ Addresses scanned: I2C 0x4c and 0x4d
+ Datasheet: https://www.nuvoton.com/resource-files/W83773G_SG_DatasheetV1_2.pdf
+
+Authors:
+ Lei YU <mine260309@gmail.com>
+
+Description
+-----------
+
+This driver implements support for Nuvoton W83773G temperature sensor
+chip. This chip implements one local and two remote sensors.
+The chip also features offsets for the two remote sensors which get added to
+the input readings. The chip does all the scaling by itself and the driver
+therefore reports true temperatures that don't need any user-space adjustments.
+Temperature is measured in degrees Celsius.
+The chip is wired over I2C/SMBus and specified over a temperature
+range of -40 to +125 degrees Celsius (for local sensor) and -40 to +127
+degrees Celsius (for remote sensors).
+Resolution for both the local and remote channels is 0.125 degree C.
+
+The chip supports only temperature measurement. The driver exports
+the temperature values via the following sysfs files:
+
+temp[1-3]_input
+temp[2-3]_fault
+temp[2-3]_offset
+update_interval
diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface
index 5ff19447ac44..d04e6e4964ee 100644
--- a/Documentation/i2c/dev-interface
+++ b/Documentation/i2c/dev-interface
@@ -17,13 +17,16 @@ i2c-10, ...). All 256 minor device numbers are reserved for i2c.
C example
=========
-So let's say you want to access an i2c adapter from a C program. The
-first thing to do is "#include <linux/i2c-dev.h>". Please note that
-there are two files named "i2c-dev.h" out there, one is distributed
-with the Linux kernel and is meant to be included from kernel
-driver code, the other one is distributed with i2c-tools and is
-meant to be included from user-space programs. You obviously want
-the second one here.
+So let's say you want to access an i2c adapter from a C program.
+First, you need to include these two headers:
+
+ #include <linux/i2c-dev.h>
+ #include <i2c/smbus.h>
+
+(Please note that there are two files named "i2c-dev.h" out there. One is
+distributed with the Linux kernel and the other one is included in the
+source tree of i2c-tools. They used to be different in content but since 2012
+they're identical. You should use "linux/i2c-dev.h").
Now, you have to decide which adapter you want to access. You should
inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index cb7f1ba5b3b1..ef5080cbf009 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -13,6 +13,18 @@ documents into a coherent whole. Please note that improvements to the
documentation are welcome; join the linux-doc list at vger.kernel.org if
you want to help out.
+Licensing documentation
+-----------------------
+
+The following describes the license of the Linux kernel source code
+(GPLv2), how to properly mark the license of individual files in the source
+tree, as well as links to the full license text.
+
+.. toctree::
+ :maxdepth: 2
+
+ process/license-rules.rst
+
User-oriented documentation
---------------------------
@@ -52,6 +64,7 @@ merged much easier.
dev-tools/index
doc-guide/index
kernel-hacking/index
+ maintainer/index
Kernel API documentation
------------------------
diff --git a/Documentation/input/multi-touch-protocol.rst b/Documentation/input/multi-touch-protocol.rst
index 8035868c56bc..b51751a0cd5d 100644
--- a/Documentation/input/multi-touch-protocol.rst
+++ b/Documentation/input/multi-touch-protocol.rst
@@ -269,10 +269,11 @@ ABS_MT_ORIENTATION
The orientation of the touching ellipse. The value should describe a signed
quarter of a revolution clockwise around the touch center. The signed value
range is arbitrary, but zero should be returned for an ellipse aligned with
- the Y axis of the surface, a negative value when the ellipse is turned to
- the left, and a positive value when the ellipse is turned to the
- right. When completely aligned with the X axis, the range max should be
- returned.
+ the Y axis (north) of the surface, a negative value when the ellipse is
+ turned to the left, and a positive value when the ellipse is turned to the
+ right. When aligned with the X axis in the positive direction, the range
+ max should be returned; when aligned with the X axis in the negative
+ direction, the range -max should be returned.
Touch ellipsis are symmetrical by default. For devices capable of true 360
degree orientation, the reported orientation must exceed the range max to
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 262722d8867b..f5b9493f04ad 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -77,6 +77,27 @@ applicable everywhere (see syntax).
Optionally, dependencies only for this default value can be added with
"if".
+ The default value deliberately defaults to 'n' in order to avoid bloating the
+ build. With few exceptions, new config options should not change this. The
+ intent is for "make oldconfig" to add as little as possible to the config from
+ release to release.
+
+ Note:
+ Things that merit "default y/m" include:
+
+ a) A new Kconfig option for something that used to always be built
+ should be "default y".
+
+ b) A new gatekeeping Kconfig option that hides/shows other Kconfig
+ options (but does not generate any code of its own), should be
+ "default y" so people will see those other options.
+
+ c) Sub-driver behavior or similar options for a driver that is
+ "default n". This allows you to provide sane defaults.
+
+ d) Hardware or infrastructure that everybody expects, such as CONFIG_NET
+ or CONFIG_BLOCK. These are rare exceptions.
+
- type definition + default value:
"def_bool"/"def_tristate" <expr> ["if" <expr>]
This is a shorthand notation for a type definition plus a value.
@@ -200,10 +221,14 @@ module state. Dependency expressions have the following syntax:
<expr> ::= <symbol> (1)
<symbol> '=' <symbol> (2)
<symbol> '!=' <symbol> (3)
- '(' <expr> ')' (4)
- '!' <expr> (5)
- <expr> '&&' <expr> (6)
- <expr> '||' <expr> (7)
+ <symbol1> '<' <symbol2> (4)
+ <symbol1> '>' <symbol2> (4)
+ <symbol1> '<=' <symbol2> (4)
+ <symbol1> '>=' <symbol2> (4)
+ '(' <expr> ')' (5)
+ '!' <expr> (6)
+ <expr> '&&' <expr> (7)
+ <expr> '||' <expr> (8)
Expressions are listed in decreasing order of precedence.
@@ -214,10 +239,13 @@ Expressions are listed in decreasing order of precedence.
otherwise 'n'.
(3) If the values of both symbols are equal, it returns 'n',
otherwise 'y'.
-(4) Returns the value of the expression. Used to override precedence.
-(5) Returns the result of (2-/expr/).
-(6) Returns the result of min(/expr/, /expr/).
-(7) Returns the result of max(/expr/, /expr/).
+(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
+ or greater-or-equal than value of <symbol2>, it returns 'y',
+ otherwise 'n'.
+(5) Returns the value of the expression. Used to override precedence.
+(6) Returns the result of (2-/expr/).
+(7) Returns the result of min(/expr/, /expr/).
+(8) Returns the result of max(/expr/, /expr/).
An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
deleted file mode 100644
index c23e2c5ab80d..000000000000
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ /dev/null
@@ -1,322 +0,0 @@
-NOTE: this document is outdated and will eventually be removed. See
-Documentation/doc-guide/ for current information.
-
-kernel-doc nano-HOWTO
-=====================
-
-How to format kernel-doc comments
----------------------------------
-
-In order to provide embedded, 'C' friendly, easy to maintain,
-but consistent and extractable documentation of the functions and
-data structures in the Linux kernel, the Linux kernel has adopted
-a consistent style for documenting functions and their parameters,
-and structures and their members.
-
-The format for this documentation is called the kernel-doc format.
-It is documented in this Documentation/kernel-doc-nano-HOWTO.txt file.
-
-This style embeds the documentation within the source files, using
-a few simple conventions. The scripts/kernel-doc perl script, the
-Documentation/sphinx/kerneldoc.py Sphinx extension and other tools understand
-these conventions, and are used to extract this embedded documentation
-into various documents.
-
-In order to provide good documentation of kernel functions and data
-structures, please use the following conventions to format your
-kernel-doc comments in Linux kernel source.
-
-We definitely need kernel-doc formatted documentation for functions
-that are exported to loadable modules using EXPORT_SYMBOL.
-
-We also look to provide kernel-doc formatted documentation for
-functions externally visible to other kernel files (not marked
-"static").
-
-We also recommend providing kernel-doc formatted documentation
-for private (file "static") routines, for consistency of kernel
-source code layout. But this is lower priority and at the
-discretion of the MAINTAINER of that kernel source file.
-
-Data structures visible in kernel include files should also be
-documented using kernel-doc formatted comments.
-
-The opening comment mark "/**" is reserved for kernel-doc comments.
-Only comments so marked will be considered by the kernel-doc scripts,
-and any comment so marked must be in kernel-doc format. Do not use
-"/**" to be begin a comment block unless the comment block contains
-kernel-doc formatted comments. The closing comment marker for
-kernel-doc comments can be either "*/" or "**/", but "*/" is
-preferred in the Linux kernel tree.
-
-Kernel-doc comments should be placed just before the function
-or data structure being described.
-
-Example kernel-doc function comment:
-
-/**
- * foobar() - short function description of foobar
- * @arg1: Describe the first argument to foobar.
- * @arg2: Describe the second argument to foobar.
- * One can provide multiple line descriptions
- * for arguments.
- *
- * A longer description, with more discussion of the function foobar()
- * that might be useful to those using or modifying it. Begins with
- * empty comment line, and may include additional embedded empty
- * comment lines.
- *
- * The longer description can have multiple paragraphs.
- *
- * Return: Describe the return value of foobar.
- */
-
-The short description following the subject can span multiple lines
-and ends with an @argument description, an empty line or the end of
-the comment block.
-
-The @argument descriptions must begin on the very next line following
-this opening short function description line, with no intervening
-empty comment lines.
-
-If a function parameter is "..." (varargs), it should be listed in
-kernel-doc notation as:
- * @...: description
-
-The return value, if any, should be described in a dedicated section
-named "Return".
-
-Example kernel-doc data structure comment.
-
-/**
- * struct blah - the basic blah structure
- * @mem1: describe the first member of struct blah
- * @mem2: describe the second member of struct blah,
- * perhaps with more lines and words.
- *
- * Longer description of this structure.
- */
-
-The kernel-doc function comments describe each parameter to the
-function, in order, with the @name lines.
-
-The kernel-doc data structure comments describe each structure member
-in the data structure, with the @name lines.
-
-The longer description formatting is "reflowed", losing your line
-breaks. So presenting carefully formatted lists within these
-descriptions won't work so well; derived documentation will lose
-the formatting.
-
-See the section below "How to add extractable documentation to your
-source files" for more details and notes on how to format kernel-doc
-comments.
-
-Components of the kernel-doc system
------------------------------------
-
-Many places in the source tree have extractable documentation in the
-form of block comments above functions. The components of this system
-are:
-
-- scripts/kernel-doc
-
- This is a perl script that hunts for the block comments and can mark
- them up directly into DocBook, ReST, man, text, and HTML. (No, not
- texinfo.)
-
-- scripts/docproc.c
-
- This is a program for converting SGML template files into SGML
- files. When a file is referenced it is searched for symbols
- exported (EXPORT_SYMBOL), to be able to distinguish between internal
- and external functions.
- It invokes kernel-doc, giving it the list of functions that
- are to be documented.
- Additionally it is used to scan the SGML template files to locate
- all the files referenced herein. This is used to generate dependency
- information as used by make.
-
-- Makefile
-
- The targets 'xmldocs', 'latexdocs', 'pdfdocs', 'epubdocs'and 'htmldocs'
- are used to build XML DocBook files, LaTeX files, PDF files,
- ePub files and html files in Documentation/.
-
-How to extract the documentation
---------------------------------
-
-If you just want to read the ready-made books on the various
-subsystems, just type 'make epubdocs', or 'make pdfdocs', or 'make htmldocs',
-depending on your preference. If you would rather read a different format,
-you can type 'make xmldocs' and then use DocBook tools to convert
-Documentation/output/*.xml to a format of your choice (for example,
-'db2html ...' if 'make htmldocs' was not defined).
-
-If you want to see man pages instead, you can do this:
-
-$ cd linux
-$ scripts/kernel-doc -man $(find -name '*.c') | split-man.pl /tmp/man
-$ scripts/kernel-doc -man $(find -name '*.h') | split-man.pl /tmp/man
-
-Here is split-man.pl:
-
--->
-#!/usr/bin/perl
-
-if ($#ARGV < 0) {
- die "where do I put the results?\n";
-}
-
-mkdir $ARGV[0],0777;
-$state = 0;
-while (<STDIN>) {
- if (/^\.TH \"[^\"]*\" 9 \"([^\"]*)\"/) {
- if ($state == 1) { close OUT }
- $state = 1;
- $fn = "$ARGV[0]/$1.9";
- print STDERR "Creating $fn\n";
- open OUT, ">$fn" or die "can't open $fn: $!\n";
- print OUT $_;
- } elsif ($state != 0) {
- print OUT $_;
- }
-}
-
-close OUT;
-<--
-
-If you just want to view the documentation for one function in one
-file, you can do this:
-
-$ scripts/kernel-doc -man -function fn file | nroff -man | less
-
-or this:
-
-$ scripts/kernel-doc -text -function fn file
-
-
-How to add extractable documentation to your source files
----------------------------------------------------------
-
-The format of the block comment is like this:
-
-/**
- * function_name(:)? (- short description)?
-(* @parameterx(space)*: (description of parameter x)?)*
-(* a blank line)?
- * (Description:)? (Description of function)?
- * (section header: (section description)? )*
-(*)?*/
-
-All "description" text can span multiple lines, although the
-function_name & its short description are traditionally on a single line.
-Description text may also contain blank lines (i.e., lines that contain
-only a "*").
-
-"section header:" names must be unique per function (or struct,
-union, typedef, enum).
-
-Use the section header "Return" for sections describing the return value
-of a function.
-
-Avoid putting a spurious blank line after the function name, or else the
-description will be repeated!
-
-All descriptive text is further processed, scanning for the following special
-patterns, which are highlighted appropriately.
-
-'funcname()' - function
-'$ENVVAR' - environment variable
-'&struct_name' - name of a structure (up to two words including 'struct')
-'@parameter' - name of a parameter
-'%CONST' - name of a constant.
-
-NOTE 1: The multi-line descriptive text you provide does *not* recognize
-line breaks, so if you try to format some text nicely, as in:
-
- Return:
- 0 - cool
- 1 - invalid arg
- 2 - out of memory
-
-this will all run together and produce:
-
- Return: 0 - cool 1 - invalid arg 2 - out of memory
-
-NOTE 2: If the descriptive text you provide has lines that begin with
-some phrase followed by a colon, each of those phrases will be taken as
-a new section heading, which means you should similarly try to avoid text
-like:
-
- Return:
- 0: cool
- 1: invalid arg
- 2: out of memory
-
-every line of which would start a new section. Again, probably not
-what you were after.
-
-Take a look around the source tree for examples.
-
-
-kernel-doc for structs, unions, enums, and typedefs
----------------------------------------------------
-
-Beside functions you can also write documentation for structs, unions,
-enums and typedefs. Instead of the function name you must write the name
-of the declaration; the struct/union/enum/typedef must always precede
-the name. Nesting of declarations is not supported.
-Use the argument mechanism to document members or constants.
-
-Inside a struct description, you can use the "private:" and "public:"
-comment tags. Structure fields that are inside a "private:" area
-are not listed in the generated output documentation. The "private:"
-and "public:" tags must begin immediately following a "/*" comment
-marker. They may optionally include comments between the ":" and the
-ending "*/" marker.
-
-Example:
-
-/**
- * struct my_struct - short description
- * @a: first member
- * @b: second member
- *
- * Longer description
- */
-struct my_struct {
- int a;
- int b;
-/* private: internal use only */
- int c;
-};
-
-
-Including documentation blocks in source files
-----------------------------------------------
-
-To facilitate having source code and comments close together, you can
-include kernel-doc documentation blocks that are free-form comments
-instead of being kernel-doc for functions, structures, unions,
-enums, or typedefs. This could be used for something like a
-theory of operation for a driver or library code, for example.
-
-This is done by using a DOC: section keyword with a section title. E.g.:
-
-/**
- * DOC: Theory of Operation
- *
- * The whizbang foobar is a dilly of a gizmo. It can do whatever you
- * want it to do, at any time. It reads your mind. Here's how it works.
- *
- * foo bar splat
- *
- * The only drawback to this gizmo is that is can sometimes damage
- * hardware, software, or its subject(s).
- */
-
-DOC: sections are used in ReST files.
-
-Tim.
-*/ <twaugh@redhat.com>
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index daf3883b2694..9999c8468293 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -523,7 +523,7 @@ this expression is true, or ``-ERESTARTSYS`` if a signal is received. The
Waking Up Queued Tasks
----------------------
-Call :c:func:`wake_up()` (``include/linux/wait.h``);, which will wake
+Call :c:func:`wake_up()` (``include/linux/wait.h``), which will wake
up every process in the queue. The exception is if one has
``TASK_EXCLUSIVE`` set, in which case the remainder of the queue will
not be woken. There are other variants of this basic function available
@@ -690,8 +690,8 @@ not provide the necessary runtime environment and the include files are
not tested for it. It is still possible, but not recommended. If you
really want to do this, forget about exceptions at least.
-NUMif
------
+#if
+---
It is generally considered cleaner to use macros in header files (or at
the top of .c files) to abstract away functions rather than using \`#if'
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index ecdb18104ab0..1ae2de758c08 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -72,8 +72,7 @@ example, they add a NULL pointer or a boundary check, fix a race by adding
a missing memory barrier, or add some locking around a critical section.
Most of these changes are self contained and the function presents itself
the same way to the rest of the system. In this case, the functions might
-be updated independently one by one. (This can be done by setting the
-'immediate' flag in the klp_patch struct.)
+be updated independently one by one.
But there are more complex fixes. For example, a patch might change
ordering of locking in multiple functions at the same time. Or a patch
@@ -125,12 +124,6 @@ safe to patch tasks:
b) Patching CPU-bound user tasks. If the task is highly CPU-bound
then it will get patched the next time it gets interrupted by an
IRQ.
- c) In the future it could be useful for applying patches for
- architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In
- this case you would have to signal most of the tasks on the
- system. However this isn't supported yet because there's
- currently no way to patch kthreads without
- HAVE_RELIABLE_STACKTRACE.
3. For idle "swapper" tasks, since they don't ever exit the kernel, they
instead have a klp_update_patch_state() call in the idle loop which
@@ -138,27 +131,16 @@ safe to patch tasks:
(Note there's not yet such an approach for kthreads.)
-All the above approaches may be skipped by setting the 'immediate' flag
-in the 'klp_patch' struct, which will disable per-task consistency and
-patch all tasks immediately. This can be useful if the patch doesn't
-change any function or data semantics. Note that, even with this flag
-set, it's possible that some tasks may still be running with an old
-version of the function, until that function returns.
+Architectures which don't have HAVE_RELIABLE_STACKTRACE solely rely on
+the second approach. It's highly likely that some tasks may still be
+running with an old version of the function, until that function
+returns. In this case you would have to signal the tasks. This
+especially applies to kthreads. They may not be woken up and would need
+to be forced. See below for more information.
-There's also an 'immediate' flag in the 'klp_func' struct which allows
-you to specify that certain functions in the patch can be applied
-without per-task consistency. This might be useful if you want to patch
-a common function like schedule(), and the function change doesn't need
-consistency but the rest of the patch does.
-
-For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user
-must set patch->immediate which causes all tasks to be patched
-immediately. This option should be used with care, only when the patch
-doesn't change any function or data semantics.
-
-In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE
-may be allowed to use per-task consistency if we can come up with
-another way to patch kthreads.
+Unless we can come up with another way to patch kthreads, architectures
+without HAVE_RELIABLE_STACKTRACE are not considered fully supported by
+the kernel livepatching.
The /sys/kernel/livepatch/<patch>/transition file shows whether a patch
is in transition. Only a single patch (the topmost patch on the stack)
@@ -176,8 +158,31 @@ If a patch is in transition, this file shows 0 to indicate the task is
unpatched and 1 to indicate it's patched. Otherwise, if no patch is in
transition, it shows -1. Any tasks which are blocking the transition
can be signaled with SIGSTOP and SIGCONT to force them to change their
-patched state.
-
+patched state. This may be harmful to the system though.
+/sys/kernel/livepatch/<patch>/signal attribute provides a better alternative.
+Writing 1 to the attribute sends a fake signal to all remaining blocking
+tasks. No proper signal is actually delivered (there is no data in signal
+pending structures). Tasks are interrupted or woken up, and forced to change
+their patched state.
+
+Administrator can also affect a transition through
+/sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears
+TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to the patched
+state. Important note! The force attribute is intended for cases when the
+transition gets stuck for a long time because of a blocking task. Administrator
+is expected to collect all necessary data (namely stack traces of such blocking
+tasks) and request a clearance from a patch distributor to force the transition.
+Unauthorized usage may cause harm to the system. It depends on the nature of the
+patch, which functions are (un)patched, and which functions the blocking tasks
+are sleeping in (/proc/<pid>/stack may help here). Removal (rmmod) of patch
+modules is permanently disabled when the force feature is used. It cannot be
+guaranteed there is no task sleeping in such module. It implies unbounded
+reference count if a patch module is disabled and enabled in a loop.
+
+Moreover, the usage of force may also affect future applications of live
+patches and cause even more harm to the system. Administrator should first
+consider to simply cancel a transition (see above). If force is used, reboot
+should be planned and no more live patches applied.
3.1 Adding consistency model support to new architectures
---------------------------------------------------------
@@ -216,13 +221,6 @@ few options:
a good backup option for those architectures which don't have
reliable stack traces yet.
-In the meantime, patches for such architectures can bypass the
-consistency model by setting klp_patch.immediate to true. This option
-is perfectly fine for patches which don't change the semantics of the
-patched functions. In practice, this is usable for ~90% of security
-fixes. Use of this option also means the patch can't be unloaded after
-it has been disabled.
-
4. Livepatch module
===================
@@ -278,9 +276,6 @@ into three levels:
only for a particular object ( vmlinux or a kernel module ). Note that
kallsyms allows for searching symbols according to the object name.
- There's also an 'immediate' flag which, when set, patches the
- function immediately, bypassing the consistency model safety checks.
-
+ struct klp_object defines an array of patched functions (struct
klp_func) in the same object. Where the object is either vmlinux
(NULL) or a module name.
@@ -299,9 +294,6 @@ into three levels:
symbols are found. The only exception are symbols from objects
(kernel modules) that have not been loaded yet.
- Setting the 'immediate' flag applies the patch to all tasks
- immediately, bypassing the consistency model safety checks.
-
For more details on how the patch is applied on a per-task basis,
see the "Consistency model" section.
@@ -316,14 +308,12 @@ section "Livepatch life-cycle" below for more details about these
two operations.
Module removal is only safe when there are no users of the underlying
-functions. The immediate consistency model is not able to detect this. The
-code just redirects the functions at the very beginning and it does not
-check if the functions are in use. In other words, it knows when the
-functions get called but it does not know when the functions return.
-Therefore it cannot be decided when the livepatch module can be safely
-removed. This is solved by a hybrid consistency model. When the system is
-transitioned to a new patch state (patched/unpatched) it is guaranteed that
-no task sleeps or runs in the old code.
+functions. This is the reason why the force feature permanently disables
+the removal. The forced tasks entered the functions but we cannot say
+that they returned back. Therefore it cannot be decided when the
+livepatch module can be safely removed. When the system is successfully
+transitioned to a new patch state (patched/unpatched) without being
+forced it is guaranteed that no task sleeps or runs in the old code.
5. Livepatch life-cycle
@@ -337,19 +327,12 @@ First, the patch is applied only when all patched symbols for already
loaded objects are found. The error handling is much easier if this
check is done before particular functions get redirected.
-Second, the immediate consistency model does not guarantee that anyone is not
-sleeping in the new code after the patch is reverted. This means that the new
-code needs to stay around "forever". If the code is there, one could apply it
-again. Therefore it makes sense to separate the operations that might be done
-once and those that need to be repeated when the patch is enabled (applied)
-again.
-
-Third, it might take some time until the entire system is migrated
-when a more complex consistency model is used. The patch revert might
-block the livepatch module removal for too long. Therefore it is useful
-to revert the patch using a separate operation that might be called
-explicitly. But it does not make sense to remove all information
-until the livepatch module is really removed.
+Second, it might take some time until the entire system is migrated with
+the hybrid consistency model being used. The patch revert might block
+the livepatch module removal for too long. Therefore it is useful to
+revert the patch using a separate operation that might be called
+explicitly. But it does not make sense to remove all information until
+the livepatch module is really removed.
5.1. Registration
@@ -435,6 +418,9 @@ Information about the registered patches can be found under
/sys/kernel/livepatch. The patches could be enabled and disabled
by writing there.
+/sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force
+attributes allow administrator to affect a patching operation.
+
See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
diff --git a/Documentation/locking/locktorture.txt b/Documentation/locking/locktorture.txt
index a2ef3a929bf1..6a8df4cd19bf 100644
--- a/Documentation/locking/locktorture.txt
+++ b/Documentation/locking/locktorture.txt
@@ -57,11 +57,6 @@ torture_type Type of lock to torture. By default, only spinlocks will
o "rwsem_lock": read/write down() and up() semaphore pairs.
-torture_runnable Start locktorture at boot time in the case where the
- module is built into the kernel, otherwise wait for
- torture_runnable to be set via sysfs before starting.
- By default it will begin once the module is loaded.
-
** Torture-framework (RCU + locking) **
diff --git a/Documentation/maintainer/conf.py b/Documentation/maintainer/conf.py
new file mode 100644
index 000000000000..81e9eb7a7884
--- /dev/null
+++ b/Documentation/maintainer/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Kernel Development Documentation'
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'maintainer.tex', 'Linux Kernel Development Documentation',
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/maintainer/configure-git.rst b/Documentation/maintainer/configure-git.rst
new file mode 100644
index 000000000000..78bbbb0d2c84
--- /dev/null
+++ b/Documentation/maintainer/configure-git.rst
@@ -0,0 +1,34 @@
+.. _configuregit:
+
+Configure Git
+=============
+
+This chapter describes maintainer level git configuration.
+
+Tagged branches used in :ref:`Documentation/maintainer/pull-requests.rst
+<pullrequests>` should be signed with the developers public GPG key. Signed
+tags can be created by passing the ``-u`` flag to ``git tag``. However,
+since you would *usually* use the same key for the same project, you can
+set it once with
+::
+
+ git config user.signingkey "keyname"
+
+Alternatively, edit your ``.git/config`` or ``~/.gitconfig`` file by hand:
+::
+
+ [user]
+ name = Jane Developer
+ email = jd@domain.org
+ signingkey = jd@domain.org
+
+You may need to tell ``git`` to use ``gpg2``
+::
+
+ [gpg]
+ program = /path/to/gpg2
+
+You may also like to tell ``gpg`` which ``tty`` to use (add to your shell rc file)
+::
+
+ export GPG_TTY=$(tty)
diff --git a/Documentation/maintainer/index.rst b/Documentation/maintainer/index.rst
new file mode 100644
index 000000000000..2a14916930cb
--- /dev/null
+++ b/Documentation/maintainer/index.rst
@@ -0,0 +1,14 @@
+==========================
+Kernel Maintainer Handbook
+==========================
+
+This document is the humble beginning of a manual for kernel maintainers.
+There is a lot yet to go here! Please feel free to propose (and write)
+additions to this manual.
+
+.. toctree::
+ :maxdepth: 2
+
+ configure-git
+ pull-requests
+
diff --git a/Documentation/maintainer/pull-requests.rst b/Documentation/maintainer/pull-requests.rst
new file mode 100644
index 000000000000..a19db3458b56
--- /dev/null
+++ b/Documentation/maintainer/pull-requests.rst
@@ -0,0 +1,178 @@
+.. _pullrequests:
+
+Creating Pull Requests
+======================
+
+This chapter describes how maintainers can create and submit pull requests
+to other maintainers. This is useful for transferring changes from one
+maintainers tree to another maintainers tree.
+
+This document was written by Tobin C. Harding (who at that time, was not an
+experienced maintainer) primarily from comments made by Greg Kroah-Hartman
+and Linus Torvalds on LKML. Suggestions and fixes by Jonathan Corbet and
+Mauro Carvalho Chehab. Misrepresentation was unintentional but inevitable,
+please direct abuse to Tobin C. Harding <me@tobin.cc>.
+
+Original email thread::
+
+ http://lkml.kernel.org/r/20171114110500.GA21175@kroah.com
+
+
+Create Branch
+-------------
+
+To start with you will need to have all the changes you wish to include in
+the pull request on a separate branch. Typically you will base this branch
+off of a branch in the developers tree whom you intend to send the pull
+request to.
+
+In order to create the pull request you must first tag the branch that you
+have just created. It is recommended that you choose a meaningful tag name,
+in a way that you and others can understand, even after some time. A good
+practice is to include in the name an indicator of the sybsystem of origin
+and the target kernel version.
+
+Greg offers the following. A pull request with miscellaneous stuff for
+drivers/char, to be applied at the Kernel version 4.15-rc1 could be named
+as ``char-misc-4.15-rc1``. If such tag would be produced from a branch
+named ``char-misc-next``, you would be using the following command::
+
+ git tag -s char-misc-4.15-rc1 char-misc-next
+
+that will create a signed tag called ``char-misc-4.15-rc1`` based on the
+last commit in the ``char-misc-next`` branch, and sign it with your gpg key
+(see :ref:`Documentation/maintainer/configure_git.rst <configuregit>`).
+
+Linus will only accept pull requests based on a signed tag. Other
+maintainers may differ.
+
+When you run the above command ``git`` will drop you into an editor and ask
+you to describe the tag. In this case, you are describing a pull request,
+so outline what is contained here, why it should be merged, and what, if
+any, testing has been done. All of this information will end up in the tag
+itself, and then in the merge commit that the maintainer makes if/when they
+merge the pull request. So write it up well, as it will be in the kernel
+tree for forever.
+
+As said by Linus::
+
+ Anyway, at least to me, the important part is the *message*. I want
+ to understand what I'm pulling, and why I should pull it. I also
+ want to use that message as the message for the merge, so it should
+ not just make sense to me, but make sense as a historical record
+ too.
+
+ Note that if there is something odd about the pull request, that
+ should very much be in the explanation. If you're touching files
+ that you don't maintain, explain _why_. I will see it in the
+ diffstat anyway, and if you didn't mention it, I'll just be extra
+ suspicious. And when you send me new stuff after the merge window
+ (or even bug-fixes, but ones that look scary), explain not just
+ what they do and why they do it, but explain the _timing_. What
+ happened that this didn't go through the merge window..
+
+ I will take both what you write in the email pull request _and_ in
+ the signed tag, so depending on your workflow, you can either
+ describe your work in the signed tag (which will also automatically
+ make it into the pull request email), or you can make the signed
+ tag just a placeholder with nothing interesting in it, and describe
+ the work later when you actually send me the pull request.
+
+ And yes, I will edit the message. Partly because I tend to do just
+ trivial formatting (the whole indentation and quoting etc), but
+ partly because part of the message may make sense for me at pull
+ time (describing the conflicts and your personal issues for sending
+ it right now), but may not make sense in the context of a merge
+ commit message, so I will try to make it all make sense. I will
+ also fix any speeling mistaeks and bad grammar I notice,
+ particularly for non-native speakers (but also for native ones
+ ;^). But I may miss some, or even add some.
+
+ Linus
+
+Greg gives, as an example pull request::
+
+ Char/Misc patches for 4.15-rc1
+
+ Here is the big char/misc patch set for the 4.15-rc1 merge window.
+ Contained in here is the normal set of new functions added to all
+ of these crazy drivers, as well as the following brand new
+ subsystems:
+ - time_travel_controller: Finally a set of drivers for the
+ latest time travel bus architecture that provides i/o to
+ the CPU before it asked for it, allowing uninterrupted
+ processing
+ - relativity_shifters: due to the affect that the
+ time_travel_controllers have on the overall system, there
+ was a need for a new set of relativity shifter drivers to
+ accommodate the newly formed black holes that would
+ threaten to suck CPUs into them. This subsystem handles
+ this in a way to successfully neutralize the problems.
+ There is a Kconfig option to force these to be enabled
+ when needed, so problems should not occur.
+
+ All of these patches have been successfully tested in the latest
+ linux-next releases, and the original problems that it found have
+ all been resolved (apologies to anyone living near Canberra for the
+ lack of the Kconfig options in the earlier versions of the
+ linux-next tree creations.)
+
+ Signed-off-by: Your-name-here <your_email@domain>
+
+
+The tag message format is just like a git commit id. One line at the top
+for a "summary subject" and be sure to sign-off at the bottom.
+
+Now that you have a local signed tag, you need to push it up to where it
+can be retrieved::
+
+ git push origin char-misc-4.15-rc1
+
+
+Create Pull Request
+-------------------
+
+The last thing to do is create the pull request message. ``git`` handily
+will do this for you with the ``git request-pull`` command, but it needs a
+bit of help determining what you want to pull, and on what to base the pull
+against (to show the correct changes to be pulled and the diffstat). The
+following command(s) will generate a pull request::
+
+ git request-pull master git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git/ char-misc-4.15-rc1
+
+Quoting Greg::
+
+ This is asking git to compare the difference from the
+ 'char-misc-4.15-rc1' tag location, to the head of the 'master'
+ branch (which in my case points to the last location in Linus's
+ tree that I diverged from, usually a -rc release) and to use the
+ git:// protocol to pull from. If you wish to use https://, that
+ can be used here instead as well (but note that some people behind
+ firewalls will have problems with https git pulls).
+
+ If the char-misc-4.15-rc1 tag is not present in the repo that I am
+ asking to be pulled from, git will complain saying it is not there,
+ a handy way to remember to actually push it to a public location.
+
+ The output of 'git request-pull' will contain the location of the
+ git tree and specific tag to pull from, and the full text
+ description of that tag (which is why you need to provide good
+ information in that tag). It will also create a diffstat of the
+ pull request, and a shortlog of the individual commits that the
+ pull request will provide.
+
+Linus responded that he tends to prefer the ``git://`` protocol. Other
+maintainers may have different preferences. Also, note that if you are
+creating pull requests without a signed tag then ``https://`` may be a
+better choice. Please see the original thread for the full discussion.
+
+
+Submit Pull Request
+-------------------
+
+A pull request is submitted in the same way as an ordinary patch. Send as
+inline email to the maintainer and CC LKML and any sub-system specific
+lists if required. Pull requests to Linus typically have a subject line
+something like::
+
+ [GIT PULL] <subsystem> changes for v4.15-rc1
diff --git a/Documentation/md/raid5-ppl.txt b/Documentation/md/raid5-ppl.txt
index 127072b09363..bfa092589e00 100644
--- a/Documentation/md/raid5-ppl.txt
+++ b/Documentation/md/raid5-ppl.txt
@@ -39,6 +39,7 @@ case the behavior is the same as in plain raid5.
PPL is available for md version-1 metadata and external (specifically IMSM)
metadata arrays. It can be enabled using mdadm option --consistency-policy=ppl.
-Currently, volatile write-back cache should be disabled on all member drives
-when using PPL. Otherwise it cannot guarantee consistency in case of power
-failure.
+There is a limitation of maximum 64 disks in the array for PPL. It allows to
+keep data structures and implementation simple. RAID5 arrays with so many disks
+are not likely due to high risk of multiple disks failure. Such restriction
+should not be a real life limitation.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 479ecec80593..a863009849a3 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -227,17 +227,20 @@ There are some minimal guarantees that may be expected of a CPU:
(*) On any given CPU, dependent memory accesses will be issued in order, with
respect to itself. This means that for:
- Q = READ_ONCE(P); smp_read_barrier_depends(); D = READ_ONCE(*Q);
+ Q = READ_ONCE(P); D = READ_ONCE(*Q);
the CPU will issue the following memory operations:
Q = LOAD P, D = LOAD *Q
- and always in that order. On most systems, smp_read_barrier_depends()
- does nothing, but it is required for DEC Alpha. The READ_ONCE()
- is required to prevent compiler mischief. Please note that you
- should normally use something like rcu_dereference() instead of
- open-coding smp_read_barrier_depends().
+ and always in that order. However, on DEC Alpha, READ_ONCE() also
+ emits a memory-barrier instruction, so that a DEC Alpha CPU will
+ instead issue the following memory operations:
+
+ Q = LOAD P, MEMORY_BARRIER, D = LOAD *Q, MEMORY_BARRIER
+
+ Whether on DEC Alpha or not, the READ_ONCE() also prevents compiler
+ mischief.
(*) Overlapping loads and stores within a particular CPU will appear to be
ordered within that CPU. This means that for:
@@ -1815,7 +1818,7 @@ The Linux kernel has eight basic CPU memory barriers:
GENERAL mb() smp_mb()
WRITE wmb() smp_wmb()
READ rmb() smp_rmb()
- DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
+ DATA DEPENDENCY READ_ONCE()
All memory barriers except the data dependency barriers imply a compiler
@@ -2864,7 +2867,10 @@ access depends on a read, not all do, so it may not be relied on.
Other CPUs may also have split caches, but must coordinate between the various
cachelets for normal memory accesses. The semantics of the Alpha removes the
-need for coordination in the absence of memory barriers.
+need for hardware coordination in the absence of memory barriers, which
+permitted Alpha to sport higher CPU clock rates back in the day. However,
+please note that smp_read_barrier_depends() should not be used except in
+Alpha arch-specific code and within the READ_ONCE() macro.
CACHE COHERENCY VS DMA
diff --git a/Documentation/mtd/spi-nor.txt b/Documentation/mtd/spi-nor.txt
index 548d6306ebca..da1fbff5a24c 100644
--- a/Documentation/mtd/spi-nor.txt
+++ b/Documentation/mtd/spi-nor.txt
@@ -60,3 +60,6 @@ The main API is spi_nor_scan(). Before you call the hook, a driver should
initialize the necessary fields for spi_nor{}. Please see
drivers/mtd/spi-nor/spi-nor.c for detail. Please also refer to fsl-quadspi.c
when you want to write a new driver for a SPI NOR controller.
+Another API is spi_nor_restore(), this is used to restore the status of SPI
+flash chip such as addressing mode. Call it whenever detach the driver from
+device or reboot the system.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 7a79b3587dd3..2b89d91b376f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -36,8 +36,6 @@ bonding.txt
- Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux.
bridge.txt
- where to get user space programs for ethernet bridging with Linux.
-can.txt
- - documentation on CAN protocol family.
cdc_mbim.txt
- 3G/LTE USB modem (Mobile Broadband Interface Model)
checksum-offloads.txt
@@ -228,6 +226,8 @@ x25.txt
- general info on X.25 development.
x25-iface.txt
- description of the X.25 Packet Layer to LAPB device interface.
+xfrm_device.txt
+ - description of XFRM offload API
xfrm_proc.txt
- description of the statistics package for XFRM.
xfrm_sync.txt
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index a342b2cc3dc6..245fb6c0ab6f 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==========
batman-adv
==========
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
new file mode 100644
index 000000000000..d23c51abf8c6
--- /dev/null
+++ b/Documentation/networking/can.rst
@@ -0,0 +1,1437 @@
+===================================
+SocketCAN - Controller Area Network
+===================================
+
+Overview / What is SocketCAN
+============================
+
+The socketcan package is an implementation of CAN protocols
+(Controller Area Network) for Linux. CAN is a networking technology
+which has widespread use in automation, embedded devices, and
+automotive fields. While there have been other CAN implementations
+for Linux based on character devices, SocketCAN uses the Berkeley
+socket API, the Linux network stack and implements the CAN device
+drivers as network interfaces. The CAN socket API has been designed
+as similar as possible to the TCP/IP protocols to allow programmers,
+familiar with network programming, to easily learn how to use CAN
+sockets.
+
+
+.. _socketcan-motivation:
+
+Motivation / Why Using the Socket API
+=====================================
+
+There have been CAN implementations for Linux before SocketCAN so the
+question arises, why we have started another project. Most existing
+implementations come as a device driver for some CAN hardware, they
+are based on character devices and provide comparatively little
+functionality. Usually, there is only a hardware-specific device
+driver which provides a character device interface to send and
+receive raw CAN frames, directly to/from the controller hardware.
+Queueing of frames and higher-level transport protocols like ISO-TP
+have to be implemented in user space applications. Also, most
+character-device implementations support only one single process to
+open the device at a time, similar to a serial interface. Exchanging
+the CAN controller requires employment of another device driver and
+often the need for adaption of large parts of the application to the
+new driver's API.
+
+SocketCAN was designed to overcome all of these limitations. A new
+protocol family has been implemented which provides a socket interface
+to user space applications and which builds upon the Linux network
+layer, enabling use all of the provided queueing functionality. A device
+driver for CAN controller hardware registers itself with the Linux
+network layer as a network device, so that CAN frames from the
+controller can be passed up to the network layer and on to the CAN
+protocol family module and also vice-versa. Also, the protocol family
+module provides an API for transport protocol modules to register, so
+that any number of transport protocols can be loaded or unloaded
+dynamically. In fact, the can core module alone does not provide any
+protocol and cannot be used without loading at least one additional
+protocol module. Multiple sockets can be opened at the same time,
+on different or the same protocol module and they can listen/send
+frames on different or the same CAN IDs. Several sockets listening on
+the same interface for frames with the same CAN ID are all passed the
+same received matching CAN frames. An application wishing to
+communicate using a specific transport protocol, e.g. ISO-TP, just
+selects that protocol when opening the socket, and then can read and
+write application data byte streams, without having to deal with
+CAN-IDs, frames, etc.
+
+Similar functionality visible from user-space could be provided by a
+character device, too, but this would lead to a technically inelegant
+solution for a couple of reasons:
+
+* **Intricate usage:** Instead of passing a protocol argument to
+ socket(2) and using bind(2) to select a CAN interface and CAN ID, an
+ application would have to do all these operations using ioctl(2)s.
+
+* **Code duplication:** A character device cannot make use of the Linux
+ network queueing code, so all that code would have to be duplicated
+ for CAN networking.
+
+* **Abstraction:** In most existing character-device implementations, the
+ hardware-specific device driver for a CAN controller directly
+ provides the character device for the application to work with.
+ This is at least very unusual in Unix systems for both, char and
+ block devices. For example you don't have a character device for a
+ certain UART of a serial interface, a certain sound chip in your
+ computer, a SCSI or IDE controller providing access to your hard
+ disk or tape streamer device. Instead, you have abstraction layers
+ which provide a unified character or block device interface to the
+ application on the one hand, and a interface for hardware-specific
+ device drivers on the other hand. These abstractions are provided
+ by subsystems like the tty layer, the audio subsystem or the SCSI
+ and IDE subsystems for the devices mentioned above.
+
+ The easiest way to implement a CAN device driver is as a character
+ device without such a (complete) abstraction layer, as is done by most
+ existing drivers. The right way, however, would be to add such a
+ layer with all the functionality like registering for certain CAN
+ IDs, supporting several open file descriptors and (de)multiplexing
+ CAN frames between them, (sophisticated) queueing of CAN frames, and
+ providing an API for device drivers to register with. However, then
+ it would be no more difficult, or may be even easier, to use the
+ networking framework provided by the Linux kernel, and this is what
+ SocketCAN does.
+
+The use of the networking framework of the Linux kernel is just the
+natural and most appropriate way to implement CAN for Linux.
+
+
+.. _socketcan-concept:
+
+SocketCAN Concept
+=================
+
+As described in :ref:`socketcan-motivation` the main goal of SocketCAN is to
+provide a socket interface to user space applications which builds
+upon the Linux network layer. In contrast to the commonly known
+TCP/IP and ethernet networking, the CAN bus is a broadcast-only(!)
+medium that has no MAC-layer addressing like ethernet. The CAN-identifier
+(can_id) is used for arbitration on the CAN-bus. Therefore the CAN-IDs
+have to be chosen uniquely on the bus. When designing a CAN-ECU
+network the CAN-IDs are mapped to be sent by a specific ECU.
+For this reason a CAN-ID can be treated best as a kind of source address.
+
+
+.. _socketcan-receive-lists:
+
+Receive Lists
+-------------
+
+The network transparent access of multiple applications leads to the
+problem that different applications may be interested in the same
+CAN-IDs from the same CAN network interface. The SocketCAN core
+module - which implements the protocol family CAN - provides several
+high efficient receive lists for this reason. If e.g. a user space
+application opens a CAN RAW socket, the raw protocol module itself
+requests the (range of) CAN-IDs from the SocketCAN core that are
+requested by the user. The subscription and unsubscription of
+CAN-IDs can be done for specific CAN interfaces or for all(!) known
+CAN interfaces with the can_rx_(un)register() functions provided to
+CAN protocol modules by the SocketCAN core (see :ref:`socketcan-core-module`).
+To optimize the CPU usage at runtime the receive lists are split up
+into several specific lists per device that match the requested
+filter complexity for a given use-case.
+
+
+.. _socketcan-local-loopback1:
+
+Local Loopback of Sent Frames
+-----------------------------
+
+As known from other networking concepts the data exchanging
+applications may run on the same or different nodes without any
+change (except for the according addressing information):
+
+.. code::
+
+ ___ ___ ___ _______ ___
+ | _ | | _ | | _ | | _ _ | | _ |
+ ||A|| ||B|| ||C|| ||A| |B|| ||C||
+ |___| |___| |___| |_______| |___|
+ | | | | |
+ -----------------(1)- CAN bus -(2)---------------
+
+To ensure that application A receives the same information in the
+example (2) as it would receive in example (1) there is need for
+some kind of local loopback of the sent CAN frames on the appropriate
+node.
+
+The Linux network devices (by default) just can handle the
+transmission and reception of media dependent frames. Due to the
+arbitration on the CAN bus the transmission of a low prio CAN-ID
+may be delayed by the reception of a high prio CAN frame. To
+reflect the correct [*]_ traffic on the node the loopback of the sent
+data has to be performed right after a successful transmission. If
+the CAN network interface is not capable of performing the loopback for
+some reason the SocketCAN core can do this task as a fallback solution.
+See :ref:`socketcan-local-loopback1` for details (recommended).
+
+The loopback functionality is enabled by default to reflect standard
+networking behaviour for CAN applications. Due to some requests from
+the RT-SocketCAN group the loopback optionally may be disabled for each
+separate socket. See sockopts from the CAN RAW sockets in :ref:`socketcan-raw-sockets`.
+
+.. [*] you really like to have this when you're running analyser
+ tools like 'candump' or 'cansniffer' on the (same) node.
+
+
+.. _socketcan-network-problem-notifications:
+
+Network Problem Notifications
+-----------------------------
+
+The use of the CAN bus may lead to several problems on the physical
+and media access control layer. Detecting and logging of these lower
+layer problems is a vital requirement for CAN users to identify
+hardware issues on the physical transceiver layer as well as
+arbitration problems and error frames caused by the different
+ECUs. The occurrence of detected errors are important for diagnosis
+and have to be logged together with the exact timestamp. For this
+reason the CAN interface driver can generate so called Error Message
+Frames that can optionally be passed to the user application in the
+same way as other CAN frames. Whenever an error on the physical layer
+or the MAC layer is detected (e.g. by the CAN controller) the driver
+creates an appropriate error message frame. Error messages frames can
+be requested by the user application using the common CAN filter
+mechanisms. Inside this filter definition the (interested) type of
+errors may be selected. The reception of error messages is disabled
+by default. The format of the CAN error message frame is briefly
+described in the Linux header file "include/uapi/linux/can/error.h".
+
+
+How to use SocketCAN
+====================
+
+Like TCP/IP, you first need to open a socket for communicating over a
+CAN network. Since SocketCAN implements a new protocol family, you
+need to pass PF_CAN as the first argument to the socket(2) system
+call. Currently, there are two CAN protocols to choose from, the raw
+socket protocol and the broadcast manager (BCM). So to open a socket,
+you would write::
+
+ s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+
+and::
+
+ s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
+
+respectively. After the successful creation of the socket, you would
+normally use the bind(2) system call to bind the socket to a CAN
+interface (which is different from TCP/IP due to different addressing
+- see :ref:`socketcan-concept`). After binding (CAN_RAW) or connecting (CAN_BCM)
+the socket, you can read(2) and write(2) from/to the socket or use
+send(2), sendto(2), sendmsg(2) and the recv* counterpart operations
+on the socket as usual. There are also CAN specific socket options
+described below.
+
+The basic CAN frame structure and the sockaddr structure are defined
+in include/linux/can.h:
+
+.. code-block:: C
+
+ struct can_frame {
+ canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+ __u8 can_dlc; /* frame payload length in byte (0 .. 8) */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
+ __u8 data[8] __attribute__((aligned(8)));
+ };
+
+The alignment of the (linear) payload data[] to a 64bit boundary
+allows the user to define their own structs and unions to easily access
+the CAN payload. There is no given byteorder on the CAN bus by
+default. A read(2) system call on a CAN_RAW socket transfers a
+struct can_frame to the user space.
+
+The sockaddr_can structure has an interface index like the
+PF_PACKET socket, that also binds to a specific interface:
+
+.. code-block:: C
+
+ struct sockaddr_can {
+ sa_family_t can_family;
+ int can_ifindex;
+ union {
+ /* transport protocol class address info (e.g. ISOTP) */
+ struct { canid_t rx_id, tx_id; } tp;
+
+ /* reserved for future CAN protocols address information */
+ } can_addr;
+ };
+
+To determine the interface index an appropriate ioctl() has to
+be used (example for CAN_RAW sockets without error checking):
+
+.. code-block:: C
+
+ int s;
+ struct sockaddr_can addr;
+ struct ifreq ifr;
+
+ s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+
+ strcpy(ifr.ifr_name, "can0" );
+ ioctl(s, SIOCGIFINDEX, &ifr);
+
+ addr.can_family = AF_CAN;
+ addr.can_ifindex = ifr.ifr_ifindex;
+
+ bind(s, (struct sockaddr *)&addr, sizeof(addr));
+
+ (..)
+
+To bind a socket to all(!) CAN interfaces the interface index must
+be 0 (zero). In this case the socket receives CAN frames from every
+enabled CAN interface. To determine the originating CAN interface
+the system call recvfrom(2) may be used instead of read(2). To send
+on a socket that is bound to 'any' interface sendto(2) is needed to
+specify the outgoing interface.
+
+Reading CAN frames from a bound CAN_RAW socket (see above) consists
+of reading a struct can_frame:
+
+.. code-block:: C
+
+ struct can_frame frame;
+
+ nbytes = read(s, &frame, sizeof(struct can_frame));
+
+ if (nbytes < 0) {
+ perror("can raw socket read");
+ return 1;
+ }
+
+ /* paranoid check ... */
+ if (nbytes < sizeof(struct can_frame)) {
+ fprintf(stderr, "read: incomplete CAN frame\n");
+ return 1;
+ }
+
+ /* do something with the received CAN frame */
+
+Writing CAN frames can be done similarly, with the write(2) system call::
+
+ nbytes = write(s, &frame, sizeof(struct can_frame));
+
+When the CAN interface is bound to 'any' existing CAN interface
+(addr.can_ifindex = 0) it is recommended to use recvfrom(2) if the
+information about the originating CAN interface is needed:
+
+.. code-block:: C
+
+ struct sockaddr_can addr;
+ struct ifreq ifr;
+ socklen_t len = sizeof(addr);
+ struct can_frame frame;
+
+ nbytes = recvfrom(s, &frame, sizeof(struct can_frame),
+ 0, (struct sockaddr*)&addr, &len);
+
+ /* get interface name of the received CAN frame */
+ ifr.ifr_ifindex = addr.can_ifindex;
+ ioctl(s, SIOCGIFNAME, &ifr);
+ printf("Received a CAN frame from interface %s", ifr.ifr_name);
+
+To write CAN frames on sockets bound to 'any' CAN interface the
+outgoing interface has to be defined certainly:
+
+.. code-block:: C
+
+ strcpy(ifr.ifr_name, "can0");
+ ioctl(s, SIOCGIFINDEX, &ifr);
+ addr.can_ifindex = ifr.ifr_ifindex;
+ addr.can_family = AF_CAN;
+
+ nbytes = sendto(s, &frame, sizeof(struct can_frame),
+ 0, (struct sockaddr*)&addr, sizeof(addr));
+
+An accurate timestamp can be obtained with an ioctl(2) call after reading
+a message from the socket:
+
+.. code-block:: C
+
+ struct timeval tv;
+ ioctl(s, SIOCGSTAMP, &tv);
+
+The timestamp has a resolution of one microsecond and is set automatically
+at the reception of a CAN frame.
+
+Remark about CAN FD (flexible data rate) support:
+
+Generally the handling of CAN FD is very similar to the formerly described
+examples. The new CAN FD capable CAN controllers support two different
+bitrates for the arbitration phase and the payload phase of the CAN FD frame
+and up to 64 bytes of payload. This extended payload length breaks all the
+kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
+bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
+the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
+switches the socket into a mode that allows the handling of CAN FD frames
+and (legacy) CAN frames simultaneously (see :ref:`socketcan-rawfd`).
+
+The struct canfd_frame is defined in include/linux/can.h:
+
+.. code-block:: C
+
+ struct canfd_frame {
+ canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+ __u8 len; /* frame payload length in byte (0 .. 64) */
+ __u8 flags; /* additional flags for CAN FD */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
+ __u8 data[64] __attribute__((aligned(8)));
+ };
+
+The struct canfd_frame and the existing struct can_frame have the can_id,
+the payload length and the payload data at the same offset inside their
+structures. This allows to handle the different structures very similar.
+When the content of a struct can_frame is copied into a struct canfd_frame
+all structure elements can be used as-is - only the data[] becomes extended.
+
+When introducing the struct canfd_frame it turned out that the data length
+code (DLC) of the struct can_frame was used as a length information as the
+length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
+the easy handling of the length information the canfd_frame.len element
+contains a plain length value from 0 .. 64. So both canfd_frame.len and
+can_frame.can_dlc are equal and contain a length information and no DLC.
+For details about the distinction of CAN and CAN FD capable devices and
+the mapping to the bus-relevant data length code (DLC), see :ref:`socketcan-can-fd-driver`.
+
+The length of the two CAN(FD) frame structures define the maximum transfer
+unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
+definitions are specified for CAN specific MTUs in include/linux/can.h:
+
+.. code-block:: C
+
+ #define CAN_MTU (sizeof(struct can_frame)) == 16 => 'legacy' CAN frame
+ #define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
+
+
+.. _socketcan-raw-sockets:
+
+RAW Protocol Sockets with can_filters (SOCK_RAW)
+------------------------------------------------
+
+Using CAN_RAW sockets is extensively comparable to the commonly
+known access to CAN character devices. To meet the new possibilities
+provided by the multi user SocketCAN approach, some reasonable
+defaults are set at RAW socket binding time:
+
+- The filters are set to exactly one filter receiving everything
+- The socket only receives valid data frames (=> no error message frames)
+- The loopback of sent CAN frames is enabled (see :ref:`socketcan-local-loopback2`)
+- The socket does not receive its own sent frames (in loopback mode)
+
+These default settings may be changed before or after binding the socket.
+To use the referenced definitions of the socket options for CAN_RAW
+sockets, include <linux/can/raw.h>.
+
+
+.. _socketcan-rawfilter:
+
+RAW socket option CAN_RAW_FILTER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The reception of CAN frames using CAN_RAW sockets can be controlled
+by defining 0 .. n filters with the CAN_RAW_FILTER socket option.
+
+The CAN filter structure is defined in include/linux/can.h:
+
+.. code-block:: C
+
+ struct can_filter {
+ canid_t can_id;
+ canid_t can_mask;
+ };
+
+A filter matches, when:
+
+.. code-block:: C
+
+ <received_can_id> & mask == can_id & mask
+
+which is analogous to known CAN controllers hardware filter semantics.
+The filter can be inverted in this semantic, when the CAN_INV_FILTER
+bit is set in can_id element of the can_filter structure. In
+contrast to CAN controller hardware filters the user may set 0 .. n
+receive filters for each open socket separately:
+
+.. code-block:: C
+
+ struct can_filter rfilter[2];
+
+ rfilter[0].can_id = 0x123;
+ rfilter[0].can_mask = CAN_SFF_MASK;
+ rfilter[1].can_id = 0x200;
+ rfilter[1].can_mask = 0x700;
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
+To disable the reception of CAN frames on the selected CAN_RAW socket:
+
+.. code-block:: C
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
+
+To set the filters to zero filters is quite obsolete as to not read
+data causes the raw socket to discard the received CAN frames. But
+having this 'send only' use-case we may remove the receive list in the
+Kernel to save a little (really a very little!) CPU usage.
+
+CAN Filter Usage Optimisation
+.............................
+
+The CAN filters are processed in per-device filter lists at CAN frame
+reception time. To reduce the number of checks that need to be performed
+while walking through the filter lists the CAN core provides an optimized
+filter handling when the filter subscription focusses on a single CAN ID.
+
+For the possible 2048 SFF CAN identifiers the identifier is used as an index
+to access the corresponding subscription list without any further checks.
+For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
+hash function to retrieve the EFF table index.
+
+To benefit from the optimized filters for single CAN identifiers the
+CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
+with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
+can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
+subscribed. E.g. in the example from above:
+
+.. code-block:: C
+
+ rfilter[0].can_id = 0x123;
+ rfilter[0].can_mask = CAN_SFF_MASK;
+
+both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
+
+To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
+filter has to be defined in this way to benefit from the optimized filters:
+
+.. code-block:: C
+
+ struct can_filter rfilter[2];
+
+ rfilter[0].can_id = 0x123;
+ rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
+ rfilter[1].can_id = 0x12345678 | CAN_EFF_FLAG;
+ rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
+
+RAW Socket Option CAN_RAW_ERR_FILTER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As described in :ref:`socketcan-network-problem-notifications` the CAN interface driver can generate so
+called Error Message Frames that can optionally be passed to the user
+application in the same way as other CAN frames. The possible
+errors are divided into different error classes that may be filtered
+using the appropriate error mask. To register for every possible
+error condition CAN_ERR_MASK can be used as value for the error mask.
+The values for the error mask are defined in linux/can/error.h:
+
+.. code-block:: C
+
+ can_err_mask_t err_mask = ( CAN_ERR_TX_TIMEOUT | CAN_ERR_BUSOFF );
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
+ &err_mask, sizeof(err_mask));
+
+
+RAW Socket Option CAN_RAW_LOOPBACK
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To meet multi user needs the local loopback is enabled by default
+(see :ref:`socketcan-local-loopback1` for details). But in some embedded use-cases
+(e.g. when only one application uses the CAN bus) this loopback
+functionality can be disabled (separately for each socket):
+
+.. code-block:: C
+
+ int loopback = 0; /* 0 = disabled, 1 = enabled (default) */
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK, &loopback, sizeof(loopback));
+
+
+RAW socket option CAN_RAW_RECV_OWN_MSGS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the local loopback is enabled, all the sent CAN frames are
+looped back to the open CAN sockets that registered for the CAN
+frames' CAN-ID on this given interface to meet the multi user
+needs. The reception of the CAN frames on the same socket that was
+sending the CAN frame is assumed to be unwanted and therefore
+disabled by default. This default behaviour may be changed on
+demand:
+
+.. code-block:: C
+
+ int recv_own_msgs = 1; /* 0 = disabled (default), 1 = enabled */
+
+ setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
+ &recv_own_msgs, sizeof(recv_own_msgs));
+
+
+.. _socketcan-rawfd:
+
+RAW Socket Option CAN_RAW_FD_FRAMES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CAN FD support in CAN_RAW sockets can be enabled with a new socket option
+CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
+not supported by the CAN_RAW socket (e.g. on older kernels), switching the
+CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
+
+Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
+and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
+when reading from the socket:
+
+.. code-block:: C
+
+ CAN_RAW_FD_FRAMES enabled: CAN_MTU and CANFD_MTU are allowed
+ CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
+
+Example:
+
+.. code-block:: C
+
+ [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
+
+ struct canfd_frame cfd;
+
+ nbytes = read(s, &cfd, CANFD_MTU);
+
+ if (nbytes == CANFD_MTU) {
+ printf("got CAN FD frame with length %d\n", cfd.len);
+ /* cfd.flags contains valid data */
+ } else if (nbytes == CAN_MTU) {
+ printf("got legacy CAN frame with length %d\n", cfd.len);
+ /* cfd.flags is undefined */
+ } else {
+ fprintf(stderr, "read: invalid CAN(FD) frame\n");
+ return 1;
+ }
+
+ /* the content can be handled independently from the received MTU size */
+
+ printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
+ for (i = 0; i < cfd.len; i++)
+ printf("%02X ", cfd.data[i]);
+
+When reading with size CANFD_MTU only returns CAN_MTU bytes that have
+been received from the socket a legacy CAN frame has been read into the
+provided CAN FD structure. Note that the canfd_frame.flags data field is
+not specified in the struct can_frame and therefore it is only valid in
+CANFD_MTU sized CAN FD frames.
+
+Implementation hint for new CAN applications:
+
+To build a CAN FD aware application use struct canfd_frame as basic CAN
+data structure for CAN_RAW based applications. When the application is
+executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
+socket option returns an error: No problem. You'll get legacy CAN frames
+or CAN FD frames and can process them the same way.
+
+When sending to CAN devices make sure that the device is capable to handle
+CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
+The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+
+
+RAW socket option CAN_RAW_JOIN_FILTERS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN_RAW socket can set multiple CAN identifier specific filters that
+lead to multiple filters in the af_can.c filter processing. These filters
+are indenpendent from each other which leads to logical OR'ed filters when
+applied (see :ref:`socketcan-rawfilter`).
+
+This socket option joines the given CAN filters in the way that only CAN
+frames are passed to user space that matched *all* given CAN filters. The
+semantic for the applied filters is therefore changed to a logical AND.
+
+This is useful especially when the filterset is a combination of filters
+where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
+CAN ID ranges from the incoming traffic.
+
+
+RAW Socket Returned Message Flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using recvmsg() call, the msg->msg_flags may contain following flags:
+
+MSG_DONTROUTE:
+ set when the received frame was created on the local host.
+
+MSG_CONFIRM:
+ set when the frame was sent via the socket it is received on.
+ This flag can be interpreted as a 'transmission confirmation' when the
+ CAN driver supports the echo of frames on driver level, see
+ :ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
+ In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
+
+
+Broadcast Manager Protocol Sockets (SOCK_DGRAM)
+-----------------------------------------------
+
+The Broadcast Manager protocol provides a command based configuration
+interface to filter and send (e.g. cyclic) CAN messages in kernel space.
+
+Receive filters can be used to down sample frequent messages; detect events
+such as message contents changes, packet length changes, and do time-out
+monitoring of received messages.
+
+Periodic transmission tasks of CAN frames or a sequence of CAN frames can be
+created and modified at runtime; both the message content and the two
+possible transmit intervals can be altered.
+
+A BCM socket is not intended for sending individual CAN frames using the
+struct can_frame as known from the CAN_RAW socket. Instead a special BCM
+configuration message is defined. The basic BCM configuration message used
+to communicate with the broadcast manager and the available operations are
+defined in the linux/can/bcm.h include. The BCM message consists of a
+message header with a command ('opcode') followed by zero or more CAN frames.
+The broadcast manager sends responses to user space in the same form:
+
+.. code-block:: C
+
+ struct bcm_msg_head {
+ __u32 opcode; /* command */
+ __u32 flags; /* special flags */
+ __u32 count; /* run 'count' times with ival1 */
+ struct timeval ival1, ival2; /* count and subsequent interval */
+ canid_t can_id; /* unique can_id for task */
+ __u32 nframes; /* number of can_frames following */
+ struct can_frame frames[0];
+ };
+
+The aligned payload 'frames' uses the same basic CAN frame structure defined
+at the beginning of :ref:`socketcan-rawfd` and in the include/linux/can.h include. All
+messages to the broadcast manager from user space have this structure.
+
+Note a CAN_BCM socket must be connected instead of bound after socket
+creation (example without error checking):
+
+.. code-block:: C
+
+ int s;
+ struct sockaddr_can addr;
+ struct ifreq ifr;
+
+ s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
+
+ strcpy(ifr.ifr_name, "can0");
+ ioctl(s, SIOCGIFINDEX, &ifr);
+
+ addr.can_family = AF_CAN;
+ addr.can_ifindex = ifr.ifr_ifindex;
+
+ connect(s, (struct sockaddr *)&addr, sizeof(addr));
+
+ (..)
+
+The broadcast manager socket is able to handle any number of in flight
+transmissions or receive filters concurrently. The different RX/TX jobs are
+distinguished by the unique can_id in each BCM message. However additional
+CAN_BCM sockets are recommended to communicate on multiple CAN interfaces.
+When the broadcast manager socket is bound to 'any' CAN interface (=> the
+interface index is set to zero) the configured receive filters apply to any
+CAN interface unless the sendto() syscall is used to overrule the 'any' CAN
+interface index. When using recvfrom() instead of read() to retrieve BCM
+socket messages the originating CAN interface is provided in can_ifindex.
+
+
+Broadcast Manager Operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The opcode defines the operation for the broadcast manager to carry out,
+or details the broadcast managers response to several events, including
+user requests.
+
+Transmit Operations (user space to broadcast manager):
+
+TX_SETUP:
+ Create (cyclic) transmission task.
+
+TX_DELETE:
+ Remove (cyclic) transmission task, requires only can_id.
+
+TX_READ:
+ Read properties of (cyclic) transmission task for can_id.
+
+TX_SEND:
+ Send one CAN frame.
+
+Transmit Responses (broadcast manager to user space):
+
+TX_STATUS:
+ Reply to TX_READ request (transmission task configuration).
+
+TX_EXPIRED:
+ Notification when counter finishes sending at initial interval
+ 'ival1'. Requires the TX_COUNTEVT flag to be set at TX_SETUP.
+
+Receive Operations (user space to broadcast manager):
+
+RX_SETUP:
+ Create RX content filter subscription.
+
+RX_DELETE:
+ Remove RX content filter subscription, requires only can_id.
+
+RX_READ:
+ Read properties of RX content filter subscription for can_id.
+
+Receive Responses (broadcast manager to user space):
+
+RX_STATUS:
+ Reply to RX_READ request (filter task configuration).
+
+RX_TIMEOUT:
+ Cyclic message is detected to be absent (timer ival1 expired).
+
+RX_CHANGED:
+ BCM message with updated CAN frame (detected content change).
+ Sent on first message received or on receipt of revised CAN messages.
+
+
+Broadcast Manager Message Flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When sending a message to the broadcast manager the 'flags' element may
+contain the following flag definitions which influence the behaviour:
+
+SETTIMER:
+ Set the values of ival1, ival2 and count
+
+STARTTIMER:
+ Start the timer with the actual values of ival1, ival2
+ and count. Starting the timer leads simultaneously to emit a CAN frame.
+
+TX_COUNTEVT:
+ Create the message TX_EXPIRED when count expires
+
+TX_ANNOUNCE:
+ A change of data by the process is emitted immediately.
+
+TX_CP_CAN_ID:
+ Copies the can_id from the message header to each
+ subsequent frame in frames. This is intended as usage simplification. For
+ TX tasks the unique can_id from the message header may differ from the
+ can_id(s) stored for transmission in the subsequent struct can_frame(s).
+
+RX_FILTER_ID:
+ Filter by can_id alone, no frames required (nframes=0).
+
+RX_CHECK_DLC:
+ A change of the DLC leads to an RX_CHANGED.
+
+RX_NO_AUTOTIMER:
+ Prevent automatically starting the timeout monitor.
+
+RX_ANNOUNCE_RESUME:
+ If passed at RX_SETUP and a receive timeout occurred, a
+ RX_CHANGED message will be generated when the (cyclic) receive restarts.
+
+TX_RESET_MULTI_IDX:
+ Reset the index for the multiple frame transmission.
+
+RX_RTR_FRAME:
+ Send reply for RTR-request (placed in op->frames[0]).
+
+
+Broadcast Manager Transmission Timers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Periodic transmission configurations may use up to two interval timers.
+In this case the BCM sends a number of messages ('count') at an interval
+'ival1', then continuing to send at another given interval 'ival2'. When
+only one timer is needed 'count' is set to zero and only 'ival2' is used.
+When SET_TIMER and START_TIMER flag were set the timers are activated.
+The timer values can be altered at runtime when only SET_TIMER is set.
+
+
+Broadcast Manager message sequence transmission
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Up to 256 CAN frames can be transmitted in a sequence in the case of a cyclic
+TX task configuration. The number of CAN frames is provided in the 'nframes'
+element of the BCM message head. The defined number of CAN frames are added
+as array to the TX_SETUP BCM configuration message:
+
+.. code-block:: C
+
+ /* create a struct to set up a sequence of four CAN frames */
+ struct {
+ struct bcm_msg_head msg_head;
+ struct can_frame frame[4];
+ } mytxmsg;
+
+ (..)
+ mytxmsg.msg_head.nframes = 4;
+ (..)
+
+ write(s, &mytxmsg, sizeof(mytxmsg));
+
+With every transmission the index in the array of CAN frames is increased
+and set to zero at index overflow.
+
+
+Broadcast Manager Receive Filter Timers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The timer values ival1 or ival2 may be set to non-zero values at RX_SETUP.
+When the SET_TIMER flag is set the timers are enabled:
+
+ival1:
+ Send RX_TIMEOUT when a received message is not received again within
+ the given time. When START_TIMER is set at RX_SETUP the timeout detection
+ is activated directly - even without a former CAN frame reception.
+
+ival2:
+ Throttle the received message rate down to the value of ival2. This
+ is useful to reduce messages for the application when the signal inside the
+ CAN frame is stateless as state changes within the ival2 periode may get
+ lost.
+
+Broadcast Manager Multiplex Message Receive Filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To filter for content changes in multiplex message sequences an array of more
+than one CAN frames can be passed in a RX_SETUP configuration message. The
+data bytes of the first CAN frame contain the mask of relevant bits that
+have to match in the subsequent CAN frames with the received CAN frame.
+If one of the subsequent CAN frames is matching the bits in that frame data
+mark the relevant content to be compared with the previous received content.
+Up to 257 CAN frames (multiplex filter bit mask CAN frame plus 256 CAN
+filters) can be added as array to the TX_SETUP BCM configuration message:
+
+.. code-block:: C
+
+ /* usually used to clear CAN frame data[] - beware of endian problems! */
+ #define U64_DATA(p) (*(unsigned long long*)(p)->data)
+
+ struct {
+ struct bcm_msg_head msg_head;
+ struct can_frame frame[5];
+ } msg;
+
+ msg.msg_head.opcode = RX_SETUP;
+ msg.msg_head.can_id = 0x42;
+ msg.msg_head.flags = 0;
+ msg.msg_head.nframes = 5;
+ U64_DATA(&msg.frame[0]) = 0xFF00000000000000ULL; /* MUX mask */
+ U64_DATA(&msg.frame[1]) = 0x01000000000000FFULL; /* data mask (MUX 0x01) */
+ U64_DATA(&msg.frame[2]) = 0x0200FFFF000000FFULL; /* data mask (MUX 0x02) */
+ U64_DATA(&msg.frame[3]) = 0x330000FFFFFF0003ULL; /* data mask (MUX 0x33) */
+ U64_DATA(&msg.frame[4]) = 0x4F07FC0FF0000000ULL; /* data mask (MUX 0x4F) */
+
+ write(s, &msg, sizeof(msg));
+
+
+Broadcast Manager CAN FD Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The programming API of the CAN_BCM depends on struct can_frame which is
+given as array directly behind the bcm_msg_head structure. To follow this
+schema for the CAN FD frames a new flag 'CAN_FD_FRAME' in the bcm_msg_head
+flags indicates that the concatenated CAN frame structures behind the
+bcm_msg_head are defined as struct canfd_frame:
+
+.. code-block:: C
+
+ struct {
+ struct bcm_msg_head msg_head;
+ struct canfd_frame frame[5];
+ } msg;
+
+ msg.msg_head.opcode = RX_SETUP;
+ msg.msg_head.can_id = 0x42;
+ msg.msg_head.flags = CAN_FD_FRAME;
+ msg.msg_head.nframes = 5;
+ (..)
+
+When using CAN FD frames for multiplex filtering the MUX mask is still
+expected in the first 64 bit of the struct canfd_frame data section.
+
+
+Connected Transport Protocols (SOCK_SEQPACKET)
+----------------------------------------------
+
+(to be written)
+
+
+Unconnected Transport Protocols (SOCK_DGRAM)
+--------------------------------------------
+
+(to be written)
+
+
+.. _socketcan-core-module:
+
+SocketCAN Core Module
+=====================
+
+The SocketCAN core module implements the protocol family
+PF_CAN. CAN protocol modules are loaded by the core module at
+runtime. The core module provides an interface for CAN protocol
+modules to subscribe needed CAN IDs (see :ref:`socketcan-receive-lists`).
+
+
+can.ko Module Params
+--------------------
+
+- **stats_timer**:
+ To calculate the SocketCAN core statistics
+ (e.g. current/maximum frames per second) this 1 second timer is
+ invoked at can.ko module start time by default. This timer can be
+ disabled by using stattimer=0 on the module commandline.
+
+- **debug**:
+ (removed since SocketCAN SVN r546)
+
+
+procfs content
+--------------
+
+As described in :ref:`socketcan-receive-lists` the SocketCAN core uses several filter
+lists to deliver received CAN frames to CAN protocol modules. These
+receive lists, their filters and the count of filter matches can be
+checked in the appropriate receive list. All entries contain the
+device and a protocol module identifier::
+
+ foo@bar:~$ cat /proc/net/can/rcvlist_all
+
+ receive list 'rx_all':
+ (vcan3: no entry)
+ (vcan2: no entry)
+ (vcan1: no entry)
+ device can_id can_mask function userdata matches ident
+ vcan0 000 00000000 f88e6370 f6c6f400 0 raw
+ (any: no entry)
+
+In this example an application requests any CAN traffic from vcan0::
+
+ rcvlist_all - list for unfiltered entries (no filter operations)
+ rcvlist_eff - list for single extended frame (EFF) entries
+ rcvlist_err - list for error message frames masks
+ rcvlist_fil - list for mask/value filters
+ rcvlist_inv - list for mask/value filters (inverse semantic)
+ rcvlist_sff - list for single standard frame (SFF) entries
+
+Additional procfs files in /proc/net/can::
+
+ stats - SocketCAN core statistics (rx/tx frames, match ratios, ...)
+ reset_stats - manual statistic reset
+ version - prints the SocketCAN core version and the ABI version
+
+
+Writing Own CAN Protocol Modules
+--------------------------------
+
+To implement a new protocol in the protocol family PF_CAN a new
+protocol has to be defined in include/linux/can.h .
+The prototypes and definitions to use the SocketCAN core can be
+accessed by including include/linux/can/core.h .
+In addition to functions that register the CAN protocol and the
+CAN device notifier chain there are functions to subscribe CAN
+frames received by CAN interfaces and to send CAN frames::
+
+ can_rx_register - subscribe CAN frames from a specific interface
+ can_rx_unregister - unsubscribe CAN frames from a specific interface
+ can_send - transmit a CAN frame (optional with local loopback)
+
+For details see the kerneldoc documentation in net/can/af_can.c or
+the source code of net/can/raw.c or net/can/bcm.c .
+
+
+CAN Network Drivers
+===================
+
+Writing a CAN network device driver is much easier than writing a
+CAN character device driver. Similar to other known network device
+drivers you mainly have to deal with:
+
+- TX: Put the CAN frame from the socket buffer to the CAN controller.
+- RX: Put the CAN frame from the CAN controller to the socket buffer.
+
+See e.g. at Documentation/networking/netdevices.txt . The differences
+for writing CAN network device driver are described below:
+
+
+General Settings
+----------------
+
+.. code-block:: C
+
+ dev->type = ARPHRD_CAN; /* the netdevice hardware type */
+ dev->flags = IFF_NOARP; /* CAN has no arp */
+
+ dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
+
+ or alternative, when the controller supports CAN with flexible data rate:
+ dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
+
+The struct can_frame or struct canfd_frame is the payload of each socket
+buffer (skbuff) in the protocol family PF_CAN.
+
+
+.. _socketcan-local-loopback2:
+
+Local Loopback of Sent Frames
+-----------------------------
+
+As described in :ref:`socketcan-local-loopback1` the CAN network device driver should
+support a local loopback functionality similar to the local echo
+e.g. of tty devices. In this case the driver flag IFF_ECHO has to be
+set to prevent the PF_CAN core from locally echoing sent frames
+(aka loopback) as fallback solution::
+
+ dev->flags = (IFF_NOARP | IFF_ECHO);
+
+
+CAN Controller Hardware Filters
+-------------------------------
+
+To reduce the interrupt load on deep embedded systems some CAN
+controllers support the filtering of CAN IDs or ranges of CAN IDs.
+These hardware filter capabilities vary from controller to
+controller and have to be identified as not feasible in a multi-user
+networking approach. The use of the very controller specific
+hardware filters could make sense in a very dedicated use-case, as a
+filter on driver level would affect all users in the multi-user
+system. The high efficient filter sets inside the PF_CAN core allow
+to set different multiple filters for each socket separately.
+Therefore the use of hardware filters goes to the category 'handmade
+tuning on deep embedded systems'. The author is running a MPC603e
+@133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
+load without any problems ...
+
+
+The Virtual CAN Driver (vcan)
+-----------------------------
+
+Similar to the network loopback devices, vcan offers a virtual local
+CAN interface. A full qualified address on CAN consists of
+
+- a unique CAN Identifier (CAN ID)
+- the CAN bus this CAN ID is transmitted on (e.g. can0)
+
+so in common use cases more than one virtual CAN interface is needed.
+
+The virtual CAN interfaces allow the transmission and reception of CAN
+frames without real CAN controller hardware. Virtual CAN network
+devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
+When compiled as a module the virtual CAN driver module is called vcan.ko
+
+Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
+netlink interface to create vcan network devices. The creation and
+removal of vcan network devices can be managed with the ip(8) tool::
+
+ - Create a virtual CAN network interface:
+ $ ip link add type vcan
+
+ - Create a virtual CAN network interface with a specific name 'vcan42':
+ $ ip link add dev vcan42 type vcan
+
+ - Remove a (virtual CAN) network interface 'vcan42':
+ $ ip link del vcan42
+
+
+The CAN Network Device Driver Interface
+---------------------------------------
+
+The CAN network device driver interface provides a generic interface
+to setup, configure and monitor CAN network devices. The user can then
+configure the CAN device, like setting the bit-timing parameters, via
+the netlink interface using the program "ip" from the "IPROUTE2"
+utility suite. The following chapter describes briefly how to use it.
+Furthermore, the interface uses a common data structure and exports a
+set of common functions, which all real CAN network device drivers
+should use. Please have a look to the SJA1000 or MSCAN driver to
+understand how to use them. The name of the module is can-dev.ko.
+
+
+Netlink interface to set/get devices properties
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN device must be configured via netlink interface. The supported
+netlink message types are defined and briefly described in
+"include/linux/can/netlink.h". CAN link support for the program "ip"
+of the IPROUTE2 utility suite is available and it can be used as shown
+below:
+
+Setting CAN device properties::
+
+ $ ip link set can0 type can help
+ Usage: ip link set DEVICE type can
+ [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
+ [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
+ phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
+
+ [ dbitrate BITRATE [ dsample-point SAMPLE-POINT] ] |
+ [ dtq TQ dprop-seg PROP_SEG dphase-seg1 PHASE-SEG1
+ dphase-seg2 PHASE-SEG2 [ dsjw SJW ] ]
+
+ [ loopback { on | off } ]
+ [ listen-only { on | off } ]
+ [ triple-sampling { on | off } ]
+ [ one-shot { on | off } ]
+ [ berr-reporting { on | off } ]
+ [ fd { on | off } ]
+ [ fd-non-iso { on | off } ]
+ [ presume-ack { on | off } ]
+
+ [ restart-ms TIME-MS ]
+ [ restart ]
+
+ Where: BITRATE := { 1..1000000 }
+ SAMPLE-POINT := { 0.000..0.999 }
+ TQ := { NUMBER }
+ PROP-SEG := { 1..8 }
+ PHASE-SEG1 := { 1..8 }
+ PHASE-SEG2 := { 1..8 }
+ SJW := { 1..4 }
+ RESTART-MS := { 0 | NUMBER }
+
+Display CAN device details and statistics::
+
+ $ ip -details -statistics link show can0
+ 2: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 16 qdisc pfifo_fast state UP qlen 10
+ link/can
+ can <TRIPLE-SAMPLING> state ERROR-ACTIVE restart-ms 100
+ bitrate 125000 sample_point 0.875
+ tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1
+ sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
+ clock 8000000
+ re-started bus-errors arbit-lost error-warn error-pass bus-off
+ 41 17457 0 41 42 41
+ RX: bytes packets errors dropped overrun mcast
+ 140859 17608 17457 0 0 0
+ TX: bytes packets errors dropped carrier collsns
+ 861 112 0 41 0 0
+
+More info to the above output:
+
+"<TRIPLE-SAMPLING>"
+ Shows the list of selected CAN controller modes: LOOPBACK,
+ LISTEN-ONLY, or TRIPLE-SAMPLING.
+
+"state ERROR-ACTIVE"
+ The current state of the CAN controller: "ERROR-ACTIVE",
+ "ERROR-WARNING", "ERROR-PASSIVE", "BUS-OFF" or "STOPPED"
+
+"restart-ms 100"
+ Automatic restart delay time. If set to a non-zero value, a
+ restart of the CAN controller will be triggered automatically
+ in case of a bus-off condition after the specified delay time
+ in milliseconds. By default it's off.
+
+"bitrate 125000 sample-point 0.875"
+ Shows the real bit-rate in bits/sec and the sample-point in the
+ range 0.000..0.999. If the calculation of bit-timing parameters
+ is enabled in the kernel (CONFIG_CAN_CALC_BITTIMING=y), the
+ bit-timing can be defined by setting the "bitrate" argument.
+ Optionally the "sample-point" can be specified. By default it's
+ 0.000 assuming CIA-recommended sample-points.
+
+"tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1"
+ Shows the time quanta in ns, propagation segment, phase buffer
+ segment 1 and 2 and the synchronisation jump width in units of
+ tq. They allow to define the CAN bit-timing in a hardware
+ independent format as proposed by the Bosch CAN 2.0 spec (see
+ chapter 8 of http://www.semiconductors.bosch.de/pdf/can2spec.pdf).
+
+"sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1 clock 8000000"
+ Shows the bit-timing constants of the CAN controller, here the
+ "sja1000". The minimum and maximum values of the time segment 1
+ and 2, the synchronisation jump width in units of tq, the
+ bitrate pre-scaler and the CAN system clock frequency in Hz.
+ These constants could be used for user-defined (non-standard)
+ bit-timing calculation algorithms in user-space.
+
+"re-started bus-errors arbit-lost error-warn error-pass bus-off"
+ Shows the number of restarts, bus and arbitration lost errors,
+ and the state changes to the error-warning, error-passive and
+ bus-off state. RX overrun errors are listed in the "overrun"
+ field of the standard network statistics.
+
+Setting the CAN Bit-Timing
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CAN bit-timing parameters can always be defined in a hardware
+independent format as proposed in the Bosch CAN 2.0 specification
+specifying the arguments "tq", "prop_seg", "phase_seg1", "phase_seg2"
+and "sjw"::
+
+ $ ip link set canX type can tq 125 prop-seg 6 \
+ phase-seg1 7 phase-seg2 2 sjw 1
+
+If the kernel option CONFIG_CAN_CALC_BITTIMING is enabled, CIA
+recommended CAN bit-timing parameters will be calculated if the bit-
+rate is specified with the argument "bitrate"::
+
+ $ ip link set canX type can bitrate 125000
+
+Note that this works fine for the most common CAN controllers with
+standard bit-rates but may *fail* for exotic bit-rates or CAN system
+clock frequencies. Disabling CONFIG_CAN_CALC_BITTIMING saves some
+space and allows user-space tools to solely determine and set the
+bit-timing parameters. The CAN controller specific bit-timing
+constants can be used for that purpose. They are listed by the
+following command::
+
+ $ ip -details link show can0
+ ...
+ sja1000: clock 8000000 tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
+
+
+Starting and Stopping the CAN Network Device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A CAN network device is started or stopped as usual with the command
+"ifconfig canX up/down" or "ip link set canX up/down". Be aware that
+you *must* define proper bit-timing parameters for real CAN devices
+before you can start it to avoid error-prone default settings::
+
+ $ ip link set canX up type can bitrate 125000
+
+A device may enter the "bus-off" state if too many errors occurred on
+the CAN bus. Then no more messages are received or sent. An automatic
+bus-off recovery can be enabled by setting the "restart-ms" to a
+non-zero value, e.g.::
+
+ $ ip link set canX type can restart-ms 100
+
+Alternatively, the application may realize the "bus-off" condition
+by monitoring CAN error message frames and do a restart when
+appropriate with the command::
+
+ $ ip link set canX type can restart
+
+Note that a restart will also create a CAN error message frame (see
+also :ref:`socketcan-network-problem-notifications`).
+
+
+.. _socketcan-can-fd-driver:
+
+CAN FD (Flexible Data Rate) Driver Support
+------------------------------------------
+
+CAN FD capable CAN controllers support two different bitrates for the
+arbitration phase and the payload phase of the CAN FD frame. Therefore a
+second bit timing has to be specified in order to enable the CAN FD bitrate.
+
+Additionally CAN FD capable CAN controllers support up to 64 bytes of
+payload. The representation of this length in can_frame.can_dlc and
+canfd_frame.len for userspace applications and inside the Linux network
+layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
+The data length code was a 1:1 mapping to the payload length in the legacy
+CAN frames anyway. The payload length to the bus-relevant DLC mapping is
+only performed inside the CAN drivers, preferably with the helper
+functions can_dlc2len() and can_len2dlc().
+
+The CAN netdevice driver capabilities can be distinguished by the network
+devices maximum transfer unit (MTU)::
+
+ MTU = 16 (CAN_MTU) => sizeof(struct can_frame) => 'legacy' CAN device
+ MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
+
+The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+N.B. CAN FD capable devices can also handle and send legacy CAN frames.
+
+When configuring CAN FD capable CAN controllers an additional 'data' bitrate
+has to be set. This bitrate for the data phase of the CAN FD frame has to be
+at least the bitrate which was configured for the arbitration phase. This
+second bitrate is specified analogue to the first bitrate but the bitrate
+setting keywords for the 'data' bitrate start with 'd' e.g. dbitrate,
+dsample-point, dsjw or dtq and similar settings. When a data bitrate is set
+within the configuration process the controller option "fd on" can be
+specified to enable the CAN FD mode in the CAN controller. This controller
+option also switches the device MTU to 72 (CANFD_MTU).
+
+The first CAN FD specification presented as whitepaper at the International
+CAN Conference 2012 needed to be improved for data integrity reasons.
+Therefore two CAN FD implementations have to be distinguished today:
+
+- ISO compliant: The ISO 11898-1:2015 CAN FD implementation (default)
+- non-ISO compliant: The CAN FD implementation following the 2012 whitepaper
+
+Finally there are three types of CAN FD controllers:
+
+1. ISO compliant (fixed)
+2. non-ISO compliant (fixed, like the M_CAN IP core v3.0.1 in m_can.c)
+3. ISO/non-ISO CAN FD controllers (switchable, like the PEAK PCAN-USB FD)
+
+The current ISO/non-ISO mode is announced by the CAN controller driver via
+netlink and displayed by the 'ip' tool (controller option FD-NON-ISO).
+The ISO/non-ISO-mode can be altered by setting 'fd-non-iso {on|off}' for
+switchable CAN FD controllers only.
+
+Example configuring 500 kbit/s arbitration bitrate and 4 Mbit/s data bitrate::
+
+ $ ip link set can0 up type can bitrate 500000 sample-point 0.75 \
+ dbitrate 4000000 dsample-point 0.8 fd on
+ $ ip -details link show can0
+ 5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
+ mode DEFAULT group default qlen 10
+ link/can promiscuity 0
+ can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+ bitrate 500000 sample-point 0.750
+ tq 50 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 1
+ pcan_usb_pro_fd: tseg1 1..64 tseg2 1..16 sjw 1..16 brp 1..1024 \
+ brp-inc 1
+ dbitrate 4000000 dsample-point 0.800
+ dtq 12 dprop-seg 7 dphase-seg1 8 dphase-seg2 4 dsjw 1
+ pcan_usb_pro_fd: dtseg1 1..16 dtseg2 1..8 dsjw 1..4 dbrp 1..1024 \
+ dbrp-inc 1
+ clock 80000000
+
+Example when 'fd-non-iso on' is added on this switchable CAN FD adapter::
+
+ can <FD,FD-NON-ISO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+
+
+Supported CAN Hardware
+----------------------
+
+Please check the "Kconfig" file in "drivers/net/can" to get an actual
+list of the support CAN hardware. On the SocketCAN project website
+(see :ref:`socketcan-resources`) there might be further drivers available, also for
+older kernel versions.
+
+
+.. _socketcan-resources:
+
+SocketCAN Resources
+===================
+
+The Linux CAN / SocketCAN project resources (project site / mailing list)
+are referenced in the MAINTAINERS file in the Linux source tree.
+Search for CAN NETWORK [LAYERS|DRIVERS].
+
+Credits
+=======
+
+- Oliver Hartkopp (PF_CAN core, filters, drivers, bcm, SJA1000 driver)
+- Urs Thuermann (PF_CAN core, kernel integration, socket interfaces, raw, vcan)
+- Jan Kizka (RT-SocketCAN core, Socket-API reconciliation)
+- Wolfgang Grandegger (RT-SocketCAN core & drivers, Raw Socket-API reviews, CAN device driver interface, MSCAN driver)
+- Robert Schwebel (design reviews, PTXdist integration)
+- Marc Kleine-Budde (design reviews, Kernel 2.6 cleanups, drivers)
+- Benedikt Spranger (reviews)
+- Thomas Gleixner (LKML reviews, coding style, posting hints)
+- Andrey Volkov (kernel subtree structure, ioctls, MSCAN driver)
+- Matthias Brukner (first SJA1000 CAN netdevice implementation Q2/2003)
+- Klaus Hitschler (PEAK driver integration)
+- Uwe Koppe (CAN netdevices with PF_PACKET approach)
+- Michael Schulze (driver layer loopback requirement, RT CAN drivers review)
+- Pavel Pisa (Bit-timing calculation)
+- Sascha Hauer (SJA1000 platform driver)
+- Sebastian Haas (SJA1000 EMS PCI driver)
+- Markus Plessing (SJA1000 EMS PCI driver)
+- Per Dalen (SJA1000 Kvaser PCI driver)
+- Sam Ravnborg (reviews, coding style, kbuild help)
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
deleted file mode 100644
index aa15b9ee2e70..000000000000
--- a/Documentation/networking/can.txt
+++ /dev/null
@@ -1,1308 +0,0 @@
-============================================================================
-
-can.txt
-
-Readme file for the Controller Area Network Protocol Family (aka SocketCAN)
-
-This file contains
-
- 1 Overview / What is SocketCAN
-
- 2 Motivation / Why using the socket API
-
- 3 SocketCAN concept
- 3.1 receive lists
- 3.2 local loopback of sent frames
- 3.3 network problem notifications
-
- 4 How to use SocketCAN
- 4.1 RAW protocol sockets with can_filters (SOCK_RAW)
- 4.1.1 RAW socket option CAN_RAW_FILTER
- 4.1.2 RAW socket option CAN_RAW_ERR_FILTER
- 4.1.3 RAW socket option CAN_RAW_LOOPBACK
- 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
- 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
- 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
- 4.1.7 RAW socket returned message flags
- 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
- 4.2.1 Broadcast Manager operations
- 4.2.2 Broadcast Manager message flags
- 4.2.3 Broadcast Manager transmission timers
- 4.2.4 Broadcast Manager message sequence transmission
- 4.2.5 Broadcast Manager receive filter timers
- 4.2.6 Broadcast Manager multiplex message receive filter
- 4.2.7 Broadcast Manager CAN FD support
- 4.3 connected transport protocols (SOCK_SEQPACKET)
- 4.4 unconnected transport protocols (SOCK_DGRAM)
-
- 5 SocketCAN core module
- 5.1 can.ko module params
- 5.2 procfs content
- 5.3 writing own CAN protocol modules
-
- 6 CAN network drivers
- 6.1 general settings
- 6.2 local loopback of sent frames
- 6.3 CAN controller hardware filters
- 6.4 The virtual CAN driver (vcan)
- 6.5 The CAN network device driver interface
- 6.5.1 Netlink interface to set/get devices properties
- 6.5.2 Setting the CAN bit-timing
- 6.5.3 Starting and stopping the CAN network device
- 6.6 CAN FD (flexible data rate) driver support
- 6.7 supported CAN hardware
-
- 7 SocketCAN resources
-
- 8 Credits
-
-============================================================================
-
-1. Overview / What is SocketCAN
---------------------------------
-
-The socketcan package is an implementation of CAN protocols
-(Controller Area Network) for Linux. CAN is a networking technology
-which has widespread use in automation, embedded devices, and
-automotive fields. While there have been other CAN implementations
-for Linux based on character devices, SocketCAN uses the Berkeley
-socket API, the Linux network stack and implements the CAN device
-drivers as network interfaces. The CAN socket API has been designed
-as similar as possible to the TCP/IP protocols to allow programmers,
-familiar with network programming, to easily learn how to use CAN
-sockets.
-
-2. Motivation / Why using the socket API
-----------------------------------------
-
-There have been CAN implementations for Linux before SocketCAN so the
-question arises, why we have started another project. Most existing
-implementations come as a device driver for some CAN hardware, they
-are based on character devices and provide comparatively little
-functionality. Usually, there is only a hardware-specific device
-driver which provides a character device interface to send and
-receive raw CAN frames, directly to/from the controller hardware.
-Queueing of frames and higher-level transport protocols like ISO-TP
-have to be implemented in user space applications. Also, most
-character-device implementations support only one single process to
-open the device at a time, similar to a serial interface. Exchanging
-the CAN controller requires employment of another device driver and
-often the need for adaption of large parts of the application to the
-new driver's API.
-
-SocketCAN was designed to overcome all of these limitations. A new
-protocol family has been implemented which provides a socket interface
-to user space applications and which builds upon the Linux network
-layer, enabling use all of the provided queueing functionality. A device
-driver for CAN controller hardware registers itself with the Linux
-network layer as a network device, so that CAN frames from the
-controller can be passed up to the network layer and on to the CAN
-protocol family module and also vice-versa. Also, the protocol family
-module provides an API for transport protocol modules to register, so
-that any number of transport protocols can be loaded or unloaded
-dynamically. In fact, the can core module alone does not provide any
-protocol and cannot be used without loading at least one additional
-protocol module. Multiple sockets can be opened at the same time,
-on different or the same protocol module and they can listen/send
-frames on different or the same CAN IDs. Several sockets listening on
-the same interface for frames with the same CAN ID are all passed the
-same received matching CAN frames. An application wishing to
-communicate using a specific transport protocol, e.g. ISO-TP, just
-selects that protocol when opening the socket, and then can read and
-write application data byte streams, without having to deal with
-CAN-IDs, frames, etc.
-
-Similar functionality visible from user-space could be provided by a
-character device, too, but this would lead to a technically inelegant
-solution for a couple of reasons:
-
-* Intricate usage. Instead of passing a protocol argument to
- socket(2) and using bind(2) to select a CAN interface and CAN ID, an
- application would have to do all these operations using ioctl(2)s.
-
-* Code duplication. A character device cannot make use of the Linux
- network queueing code, so all that code would have to be duplicated
- for CAN networking.
-
-* Abstraction. In most existing character-device implementations, the
- hardware-specific device driver for a CAN controller directly
- provides the character device for the application to work with.
- This is at least very unusual in Unix systems for both, char and
- block devices. For example you don't have a character device for a
- certain UART of a serial interface, a certain sound chip in your
- computer, a SCSI or IDE controller providing access to your hard
- disk or tape streamer device. Instead, you have abstraction layers
- which provide a unified character or block device interface to the
- application on the one hand, and a interface for hardware-specific
- device drivers on the other hand. These abstractions are provided
- by subsystems like the tty layer, the audio subsystem or the SCSI
- and IDE subsystems for the devices mentioned above.
-
- The easiest way to implement a CAN device driver is as a character
- device without such a (complete) abstraction layer, as is done by most
- existing drivers. The right way, however, would be to add such a
- layer with all the functionality like registering for certain CAN
- IDs, supporting several open file descriptors and (de)multiplexing
- CAN frames between them, (sophisticated) queueing of CAN frames, and
- providing an API for device drivers to register with. However, then
- it would be no more difficult, or may be even easier, to use the
- networking framework provided by the Linux kernel, and this is what
- SocketCAN does.
-
- The use of the networking framework of the Linux kernel is just the
- natural and most appropriate way to implement CAN for Linux.
-
-3. SocketCAN concept
----------------------
-
- As described in chapter 2 it is the main goal of SocketCAN to
- provide a socket interface to user space applications which builds
- upon the Linux network layer. In contrast to the commonly known
- TCP/IP and ethernet networking, the CAN bus is a broadcast-only(!)
- medium that has no MAC-layer addressing like ethernet. The CAN-identifier
- (can_id) is used for arbitration on the CAN-bus. Therefore the CAN-IDs
- have to be chosen uniquely on the bus. When designing a CAN-ECU
- network the CAN-IDs are mapped to be sent by a specific ECU.
- For this reason a CAN-ID can be treated best as a kind of source address.
-
- 3.1 receive lists
-
- The network transparent access of multiple applications leads to the
- problem that different applications may be interested in the same
- CAN-IDs from the same CAN network interface. The SocketCAN core
- module - which implements the protocol family CAN - provides several
- high efficient receive lists for this reason. If e.g. a user space
- application opens a CAN RAW socket, the raw protocol module itself
- requests the (range of) CAN-IDs from the SocketCAN core that are
- requested by the user. The subscription and unsubscription of
- CAN-IDs can be done for specific CAN interfaces or for all(!) known
- CAN interfaces with the can_rx_(un)register() functions provided to
- CAN protocol modules by the SocketCAN core (see chapter 5).
- To optimize the CPU usage at runtime the receive lists are split up
- into several specific lists per device that match the requested
- filter complexity for a given use-case.
-
- 3.2 local loopback of sent frames
-
- As known from other networking concepts the data exchanging
- applications may run on the same or different nodes without any
- change (except for the according addressing information):
-
- ___ ___ ___ _______ ___
- | _ | | _ | | _ | | _ _ | | _ |
- ||A|| ||B|| ||C|| ||A| |B|| ||C||
- |___| |___| |___| |_______| |___|
- | | | | |
- -----------------(1)- CAN bus -(2)---------------
-
- To ensure that application A receives the same information in the
- example (2) as it would receive in example (1) there is need for
- some kind of local loopback of the sent CAN frames on the appropriate
- node.
-
- The Linux network devices (by default) just can handle the
- transmission and reception of media dependent frames. Due to the
- arbitration on the CAN bus the transmission of a low prio CAN-ID
- may be delayed by the reception of a high prio CAN frame. To
- reflect the correct* traffic on the node the loopback of the sent
- data has to be performed right after a successful transmission. If
- the CAN network interface is not capable of performing the loopback for
- some reason the SocketCAN core can do this task as a fallback solution.
- See chapter 6.2 for details (recommended).
-
- The loopback functionality is enabled by default to reflect standard
- networking behaviour for CAN applications. Due to some requests from
- the RT-SocketCAN group the loopback optionally may be disabled for each
- separate socket. See sockopts from the CAN RAW sockets in chapter 4.1.
-
- * = you really like to have this when you're running analyser tools
- like 'candump' or 'cansniffer' on the (same) node.
-
- 3.3 network problem notifications
-
- The use of the CAN bus may lead to several problems on the physical
- and media access control layer. Detecting and logging of these lower
- layer problems is a vital requirement for CAN users to identify
- hardware issues on the physical transceiver layer as well as
- arbitration problems and error frames caused by the different
- ECUs. The occurrence of detected errors are important for diagnosis
- and have to be logged together with the exact timestamp. For this
- reason the CAN interface driver can generate so called Error Message
- Frames that can optionally be passed to the user application in the
- same way as other CAN frames. Whenever an error on the physical layer
- or the MAC layer is detected (e.g. by the CAN controller) the driver
- creates an appropriate error message frame. Error messages frames can
- be requested by the user application using the common CAN filter
- mechanisms. Inside this filter definition the (interested) type of
- errors may be selected. The reception of error messages is disabled
- by default. The format of the CAN error message frame is briefly
- described in the Linux header file "include/uapi/linux/can/error.h".
-
-4. How to use SocketCAN
-------------------------
-
- Like TCP/IP, you first need to open a socket for communicating over a
- CAN network. Since SocketCAN implements a new protocol family, you
- need to pass PF_CAN as the first argument to the socket(2) system
- call. Currently, there are two CAN protocols to choose from, the raw
- socket protocol and the broadcast manager (BCM). So to open a socket,
- you would write
-
- s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
-
- and
-
- s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
-
- respectively. After the successful creation of the socket, you would
- normally use the bind(2) system call to bind the socket to a CAN
- interface (which is different from TCP/IP due to different addressing
- - see chapter 3). After binding (CAN_RAW) or connecting (CAN_BCM)
- the socket, you can read(2) and write(2) from/to the socket or use
- send(2), sendto(2), sendmsg(2) and the recv* counterpart operations
- on the socket as usual. There are also CAN specific socket options
- described below.
-
- The basic CAN frame structure and the sockaddr structure are defined
- in include/linux/can.h:
-
- struct can_frame {
- canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 can_dlc; /* frame payload length in byte (0 .. 8) */
- __u8 __pad; /* padding */
- __u8 __res0; /* reserved / padding */
- __u8 __res1; /* reserved / padding */
- __u8 data[8] __attribute__((aligned(8)));
- };
-
- The alignment of the (linear) payload data[] to a 64bit boundary
- allows the user to define their own structs and unions to easily access
- the CAN payload. There is no given byteorder on the CAN bus by
- default. A read(2) system call on a CAN_RAW socket transfers a
- struct can_frame to the user space.
-
- The sockaddr_can structure has an interface index like the
- PF_PACKET socket, that also binds to a specific interface:
-
- struct sockaddr_can {
- sa_family_t can_family;
- int can_ifindex;
- union {
- /* transport protocol class address info (e.g. ISOTP) */
- struct { canid_t rx_id, tx_id; } tp;
-
- /* reserved for future CAN protocols address information */
- } can_addr;
- };
-
- To determine the interface index an appropriate ioctl() has to
- be used (example for CAN_RAW sockets without error checking):
-
- int s;
- struct sockaddr_can addr;
- struct ifreq ifr;
-
- s = socket(PF_CAN, SOCK_RAW, CAN_RAW);
-
- strcpy(ifr.ifr_name, "can0" );
- ioctl(s, SIOCGIFINDEX, &ifr);
-
- addr.can_family = AF_CAN;
- addr.can_ifindex = ifr.ifr_ifindex;
-
- bind(s, (struct sockaddr *)&addr, sizeof(addr));
-
- (..)
-
- To bind a socket to all(!) CAN interfaces the interface index must
- be 0 (zero). In this case the socket receives CAN frames from every
- enabled CAN interface. To determine the originating CAN interface
- the system call recvfrom(2) may be used instead of read(2). To send
- on a socket that is bound to 'any' interface sendto(2) is needed to
- specify the outgoing interface.
-
- Reading CAN frames from a bound CAN_RAW socket (see above) consists
- of reading a struct can_frame:
-
- struct can_frame frame;
-
- nbytes = read(s, &frame, sizeof(struct can_frame));
-
- if (nbytes < 0) {
- perror("can raw socket read");
- return 1;
- }
-
- /* paranoid check ... */
- if (nbytes < sizeof(struct can_frame)) {
- fprintf(stderr, "read: incomplete CAN frame\n");
- return 1;
- }
-
- /* do something with the received CAN frame */
-
- Writing CAN frames can be done similarly, with the write(2) system call:
-
- nbytes = write(s, &frame, sizeof(struct can_frame));
-
- When the CAN interface is bound to 'any' existing CAN interface
- (addr.can_ifindex = 0) it is recommended to use recvfrom(2) if the
- information about the originating CAN interface is needed:
-
- struct sockaddr_can addr;
- struct ifreq ifr;
- socklen_t len = sizeof(addr);
- struct can_frame frame;
-
- nbytes = recvfrom(s, &frame, sizeof(struct can_frame),
- 0, (struct sockaddr*)&addr, &len);
-
- /* get interface name of the received CAN frame */
- ifr.ifr_ifindex = addr.can_ifindex;
- ioctl(s, SIOCGIFNAME, &ifr);
- printf("Received a CAN frame from interface %s", ifr.ifr_name);
-
- To write CAN frames on sockets bound to 'any' CAN interface the
- outgoing interface has to be defined certainly.
-
- strcpy(ifr.ifr_name, "can0");
- ioctl(s, SIOCGIFINDEX, &ifr);
- addr.can_ifindex = ifr.ifr_ifindex;
- addr.can_family = AF_CAN;
-
- nbytes = sendto(s, &frame, sizeof(struct can_frame),
- 0, (struct sockaddr*)&addr, sizeof(addr));
-
- An accurate timestamp can be obtained with an ioctl(2) call after reading
- a message from the socket:
-
- struct timeval tv;
- ioctl(s, SIOCGSTAMP, &tv);
-
- The timestamp has a resolution of one microsecond and is set automatically
- at the reception of a CAN frame.
-
- Remark about CAN FD (flexible data rate) support:
-
- Generally the handling of CAN FD is very similar to the formerly described
- examples. The new CAN FD capable CAN controllers support two different
- bitrates for the arbitration phase and the payload phase of the CAN FD frame
- and up to 64 bytes of payload. This extended payload length breaks all the
- kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
- bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
- the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
- switches the socket into a mode that allows the handling of CAN FD frames
- and (legacy) CAN frames simultaneously (see section 4.1.5).
-
- The struct canfd_frame is defined in include/linux/can.h:
-
- struct canfd_frame {
- canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 len; /* frame payload length in byte (0 .. 64) */
- __u8 flags; /* additional flags for CAN FD */
- __u8 __res0; /* reserved / padding */
- __u8 __res1; /* reserved / padding */
- __u8 data[64] __attribute__((aligned(8)));
- };
-
- The struct canfd_frame and the existing struct can_frame have the can_id,
- the payload length and the payload data at the same offset inside their
- structures. This allows to handle the different structures very similar.
- When the content of a struct can_frame is copied into a struct canfd_frame
- all structure elements can be used as-is - only the data[] becomes extended.
-
- When introducing the struct canfd_frame it turned out that the data length
- code (DLC) of the struct can_frame was used as a length information as the
- length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
- the easy handling of the length information the canfd_frame.len element
- contains a plain length value from 0 .. 64. So both canfd_frame.len and
- can_frame.can_dlc are equal and contain a length information and no DLC.
- For details about the distinction of CAN and CAN FD capable devices and
- the mapping to the bus-relevant data length code (DLC), see chapter 6.6.
-
- The length of the two CAN(FD) frame structures define the maximum transfer
- unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
- definitions are specified for CAN specific MTUs in include/linux/can.h :
-
- #define CAN_MTU (sizeof(struct can_frame)) == 16 => 'legacy' CAN frame
- #define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
-
- 4.1 RAW protocol sockets with can_filters (SOCK_RAW)
-
- Using CAN_RAW sockets is extensively comparable to the commonly
- known access to CAN character devices. To meet the new possibilities
- provided by the multi user SocketCAN approach, some reasonable
- defaults are set at RAW socket binding time:
-
- - The filters are set to exactly one filter receiving everything
- - The socket only receives valid data frames (=> no error message frames)
- - The loopback of sent CAN frames is enabled (see chapter 3.2)
- - The socket does not receive its own sent frames (in loopback mode)
-
- These default settings may be changed before or after binding the socket.
- To use the referenced definitions of the socket options for CAN_RAW
- sockets, include <linux/can/raw.h>.
-
- 4.1.1 RAW socket option CAN_RAW_FILTER
-
- The reception of CAN frames using CAN_RAW sockets can be controlled
- by defining 0 .. n filters with the CAN_RAW_FILTER socket option.
-
- The CAN filter structure is defined in include/linux/can.h:
-
- struct can_filter {
- canid_t can_id;
- canid_t can_mask;
- };
-
- A filter matches, when
-
- <received_can_id> & mask == can_id & mask
-
- which is analogous to known CAN controllers hardware filter semantics.
- The filter can be inverted in this semantic, when the CAN_INV_FILTER
- bit is set in can_id element of the can_filter structure. In
- contrast to CAN controller hardware filters the user may set 0 .. n
- receive filters for each open socket separately:
-
- struct can_filter rfilter[2];
-
- rfilter[0].can_id = 0x123;
- rfilter[0].can_mask = CAN_SFF_MASK;
- rfilter[1].can_id = 0x200;
- rfilter[1].can_mask = 0x700;
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
-
- To disable the reception of CAN frames on the selected CAN_RAW socket:
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
-
- To set the filters to zero filters is quite obsolete as to not read
- data causes the raw socket to discard the received CAN frames. But
- having this 'send only' use-case we may remove the receive list in the
- Kernel to save a little (really a very little!) CPU usage.
-
- 4.1.1.1 CAN filter usage optimisation
-
- The CAN filters are processed in per-device filter lists at CAN frame
- reception time. To reduce the number of checks that need to be performed
- while walking through the filter lists the CAN core provides an optimized
- filter handling when the filter subscription focusses on a single CAN ID.
-
- For the possible 2048 SFF CAN identifiers the identifier is used as an index
- to access the corresponding subscription list without any further checks.
- For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
- hash function to retrieve the EFF table index.
-
- To benefit from the optimized filters for single CAN identifiers the
- CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
- with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
- can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
- subscribed. E.g. in the example from above
-
- rfilter[0].can_id = 0x123;
- rfilter[0].can_mask = CAN_SFF_MASK;
-
- both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
-
- To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
- filter has to be defined in this way to benefit from the optimized filters:
-
- struct can_filter rfilter[2];
-
- rfilter[0].can_id = 0x123;
- rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
- rfilter[1].can_id = 0x12345678 | CAN_EFF_FLAG;
- rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
-
- 4.1.2 RAW socket option CAN_RAW_ERR_FILTER
-
- As described in chapter 3.3 the CAN interface driver can generate so
- called Error Message Frames that can optionally be passed to the user
- application in the same way as other CAN frames. The possible
- errors are divided into different error classes that may be filtered
- using the appropriate error mask. To register for every possible
- error condition CAN_ERR_MASK can be used as value for the error mask.
- The values for the error mask are defined in linux/can/error.h .
-
- can_err_mask_t err_mask = ( CAN_ERR_TX_TIMEOUT | CAN_ERR_BUSOFF );
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
- &err_mask, sizeof(err_mask));
-
- 4.1.3 RAW socket option CAN_RAW_LOOPBACK
-
- To meet multi user needs the local loopback is enabled by default
- (see chapter 3.2 for details). But in some embedded use-cases
- (e.g. when only one application uses the CAN bus) this loopback
- functionality can be disabled (separately for each socket):
-
- int loopback = 0; /* 0 = disabled, 1 = enabled (default) */
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK, &loopback, sizeof(loopback));
-
- 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
-
- When the local loopback is enabled, all the sent CAN frames are
- looped back to the open CAN sockets that registered for the CAN
- frames' CAN-ID on this given interface to meet the multi user
- needs. The reception of the CAN frames on the same socket that was
- sending the CAN frame is assumed to be unwanted and therefore
- disabled by default. This default behaviour may be changed on
- demand:
-
- int recv_own_msgs = 1; /* 0 = disabled (default), 1 = enabled */
-
- setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
- &recv_own_msgs, sizeof(recv_own_msgs));
-
- 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
-
- CAN FD support in CAN_RAW sockets can be enabled with a new socket option
- CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
- not supported by the CAN_RAW socket (e.g. on older kernels), switching the
- CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
-
- Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
- and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
- when reading from the socket.
-
- CAN_RAW_FD_FRAMES enabled: CAN_MTU and CANFD_MTU are allowed
- CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
-
- Example:
- [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
-
- struct canfd_frame cfd;
-
- nbytes = read(s, &cfd, CANFD_MTU);
-
- if (nbytes == CANFD_MTU) {
- printf("got CAN FD frame with length %d\n", cfd.len);
- /* cfd.flags contains valid data */
- } else if (nbytes == CAN_MTU) {
- printf("got legacy CAN frame with length %d\n", cfd.len);
- /* cfd.flags is undefined */
- } else {
- fprintf(stderr, "read: invalid CAN(FD) frame\n");
- return 1;
- }
-
- /* the content can be handled independently from the received MTU size */
-
- printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
- for (i = 0; i < cfd.len; i++)
- printf("%02X ", cfd.data[i]);
-
- When reading with size CANFD_MTU only returns CAN_MTU bytes that have
- been received from the socket a legacy CAN frame has been read into the
- provided CAN FD structure. Note that the canfd_frame.flags data field is
- not specified in the struct can_frame and therefore it is only valid in
- CANFD_MTU sized CAN FD frames.
-
- Implementation hint for new CAN applications:
-
- To build a CAN FD aware application use struct canfd_frame as basic CAN
- data structure for CAN_RAW based applications. When the application is
- executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
- socket option returns an error: No problem. You'll get legacy CAN frames
- or CAN FD frames and can process them the same way.
-
- When sending to CAN devices make sure that the device is capable to handle
- CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
- The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
-
- 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
-
- The CAN_RAW socket can set multiple CAN identifier specific filters that
- lead to multiple filters in the af_can.c filter processing. These filters
- are indenpendent from each other which leads to logical OR'ed filters when
- applied (see 4.1.1).
-
- This socket option joines the given CAN filters in the way that only CAN
- frames are passed to user space that matched *all* given CAN filters. The
- semantic for the applied filters is therefore changed to a logical AND.
-
- This is useful especially when the filterset is a combination of filters
- where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
- CAN ID ranges from the incoming traffic.
-
- 4.1.7 RAW socket returned message flags
-
- When using recvmsg() call, the msg->msg_flags may contain following flags:
-
- MSG_DONTROUTE: set when the received frame was created on the local host.
-
- MSG_CONFIRM: set when the frame was sent via the socket it is received on.
- This flag can be interpreted as a 'transmission confirmation' when the
- CAN driver supports the echo of frames on driver level, see 3.2 and 6.2.
- In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
-
- 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
-
- The Broadcast Manager protocol provides a command based configuration
- interface to filter and send (e.g. cyclic) CAN messages in kernel space.
-
- Receive filters can be used to down sample frequent messages; detect events
- such as message contents changes, packet length changes, and do time-out
- monitoring of received messages.
-
- Periodic transmission tasks of CAN frames or a sequence of CAN frames can be
- created and modified at runtime; both the message content and the two
- possible transmit intervals can be altered.
-
- A BCM socket is not intended for sending individual CAN frames using the
- struct can_frame as known from the CAN_RAW socket. Instead a special BCM
- configuration message is defined. The basic BCM configuration message used
- to communicate with the broadcast manager and the available operations are
- defined in the linux/can/bcm.h include. The BCM message consists of a
- message header with a command ('opcode') followed by zero or more CAN frames.
- The broadcast manager sends responses to user space in the same form:
-
- struct bcm_msg_head {
- __u32 opcode; /* command */
- __u32 flags; /* special flags */
- __u32 count; /* run 'count' times with ival1 */
- struct timeval ival1, ival2; /* count and subsequent interval */
- canid_t can_id; /* unique can_id for task */
- __u32 nframes; /* number of can_frames following */
- struct can_frame frames[0];
- };
-
- The aligned payload 'frames' uses the same basic CAN frame structure defined
- at the beginning of section 4 and in the include/linux/can.h include. All
- messages to the broadcast manager from user space have this structure.
-
- Note a CAN_BCM socket must be connected instead of bound after socket
- creation (example without error checking):
-
- int s;
- struct sockaddr_can addr;
- struct ifreq ifr;
-
- s = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
-
- strcpy(ifr.ifr_name, "can0");
- ioctl(s, SIOCGIFINDEX, &ifr);
-
- addr.can_family = AF_CAN;
- addr.can_ifindex = ifr.ifr_ifindex;
-
- connect(s, (struct sockaddr *)&addr, sizeof(addr));
-
- (..)
-
- The broadcast manager socket is able to handle any number of in flight
- transmissions or receive filters concurrently. The different RX/TX jobs are
- distinguished by the unique can_id in each BCM message. However additional
- CAN_BCM sockets are recommended to communicate on multiple CAN interfaces.
- When the broadcast manager socket is bound to 'any' CAN interface (=> the
- interface index is set to zero) the configured receive filters apply to any
- CAN interface unless the sendto() syscall is used to overrule the 'any' CAN
- interface index. When using recvfrom() instead of read() to retrieve BCM
- socket messages the originating CAN interface is provided in can_ifindex.
-
- 4.2.1 Broadcast Manager operations
-
- The opcode defines the operation for the broadcast manager to carry out,
- or details the broadcast managers response to several events, including
- user requests.
-
- Transmit Operations (user space to broadcast manager):
-
- TX_SETUP: Create (cyclic) transmission task.
-
- TX_DELETE: Remove (cyclic) transmission task, requires only can_id.
-
- TX_READ: Read properties of (cyclic) transmission task for can_id.
-
- TX_SEND: Send one CAN frame.
-
- Transmit Responses (broadcast manager to user space):
-
- TX_STATUS: Reply to TX_READ request (transmission task configuration).
-
- TX_EXPIRED: Notification when counter finishes sending at initial interval
- 'ival1'. Requires the TX_COUNTEVT flag to be set at TX_SETUP.
-
- Receive Operations (user space to broadcast manager):
-
- RX_SETUP: Create RX content filter subscription.
-
- RX_DELETE: Remove RX content filter subscription, requires only can_id.
-
- RX_READ: Read properties of RX content filter subscription for can_id.
-
- Receive Responses (broadcast manager to user space):
-
- RX_STATUS: Reply to RX_READ request (filter task configuration).
-
- RX_TIMEOUT: Cyclic message is detected to be absent (timer ival1 expired).
-
- RX_CHANGED: BCM message with updated CAN frame (detected content change).
- Sent on first message received or on receipt of revised CAN messages.
-
- 4.2.2 Broadcast Manager message flags
-
- When sending a message to the broadcast manager the 'flags' element may
- contain the following flag definitions which influence the behaviour:
-
- SETTIMER: Set the values of ival1, ival2 and count
-
- STARTTIMER: Start the timer with the actual values of ival1, ival2
- and count. Starting the timer leads simultaneously to emit a CAN frame.
-
- TX_COUNTEVT: Create the message TX_EXPIRED when count expires
-
- TX_ANNOUNCE: A change of data by the process is emitted immediately.
-
- TX_CP_CAN_ID: Copies the can_id from the message header to each
- subsequent frame in frames. This is intended as usage simplification. For
- TX tasks the unique can_id from the message header may differ from the
- can_id(s) stored for transmission in the subsequent struct can_frame(s).
-
- RX_FILTER_ID: Filter by can_id alone, no frames required (nframes=0).
-
- RX_CHECK_DLC: A change of the DLC leads to an RX_CHANGED.
-
- RX_NO_AUTOTIMER: Prevent automatically starting the timeout monitor.
-
- RX_ANNOUNCE_RESUME: If passed at RX_SETUP and a receive timeout occurred, a
- RX_CHANGED message will be generated when the (cyclic) receive restarts.
-
- TX_RESET_MULTI_IDX: Reset the index for the multiple frame transmission.
-
- RX_RTR_FRAME: Send reply for RTR-request (placed in op->frames[0]).
-
- 4.2.3 Broadcast Manager transmission timers
-
- Periodic transmission configurations may use up to two interval timers.
- In this case the BCM sends a number of messages ('count') at an interval
- 'ival1', then continuing to send at another given interval 'ival2'. When
- only one timer is needed 'count' is set to zero and only 'ival2' is used.
- When SET_TIMER and START_TIMER flag were set the timers are activated.
- The timer values can be altered at runtime when only SET_TIMER is set.
-
- 4.2.4 Broadcast Manager message sequence transmission
-
- Up to 256 CAN frames can be transmitted in a sequence in the case of a cyclic
- TX task configuration. The number of CAN frames is provided in the 'nframes'
- element of the BCM message head. The defined number of CAN frames are added
- as array to the TX_SETUP BCM configuration message.
-
- /* create a struct to set up a sequence of four CAN frames */
- struct {
- struct bcm_msg_head msg_head;
- struct can_frame frame[4];
- } mytxmsg;
-
- (..)
- mytxmsg.msg_head.nframes = 4;
- (..)
-
- write(s, &mytxmsg, sizeof(mytxmsg));
-
- With every transmission the index in the array of CAN frames is increased
- and set to zero at index overflow.
-
- 4.2.5 Broadcast Manager receive filter timers
-
- The timer values ival1 or ival2 may be set to non-zero values at RX_SETUP.
- When the SET_TIMER flag is set the timers are enabled:
-
- ival1: Send RX_TIMEOUT when a received message is not received again within
- the given time. When START_TIMER is set at RX_SETUP the timeout detection
- is activated directly - even without a former CAN frame reception.
-
- ival2: Throttle the received message rate down to the value of ival2. This
- is useful to reduce messages for the application when the signal inside the
- CAN frame is stateless as state changes within the ival2 periode may get
- lost.
-
- 4.2.6 Broadcast Manager multiplex message receive filter
-
- To filter for content changes in multiplex message sequences an array of more
- than one CAN frames can be passed in a RX_SETUP configuration message. The
- data bytes of the first CAN frame contain the mask of relevant bits that
- have to match in the subsequent CAN frames with the received CAN frame.
- If one of the subsequent CAN frames is matching the bits in that frame data
- mark the relevant content to be compared with the previous received content.
- Up to 257 CAN frames (multiplex filter bit mask CAN frame plus 256 CAN
- filters) can be added as array to the TX_SETUP BCM configuration message.
-
- /* usually used to clear CAN frame data[] - beware of endian problems! */
- #define U64_DATA(p) (*(unsigned long long*)(p)->data)
-
- struct {
- struct bcm_msg_head msg_head;
- struct can_frame frame[5];
- } msg;
-
- msg.msg_head.opcode = RX_SETUP;
- msg.msg_head.can_id = 0x42;
- msg.msg_head.flags = 0;
- msg.msg_head.nframes = 5;
- U64_DATA(&msg.frame[0]) = 0xFF00000000000000ULL; /* MUX mask */
- U64_DATA(&msg.frame[1]) = 0x01000000000000FFULL; /* data mask (MUX 0x01) */
- U64_DATA(&msg.frame[2]) = 0x0200FFFF000000FFULL; /* data mask (MUX 0x02) */
- U64_DATA(&msg.frame[3]) = 0x330000FFFFFF0003ULL; /* data mask (MUX 0x33) */
- U64_DATA(&msg.frame[4]) = 0x4F07FC0FF0000000ULL; /* data mask (MUX 0x4F) */
-
- write(s, &msg, sizeof(msg));
-
- 4.2.7 Broadcast Manager CAN FD support
-
- The programming API of the CAN_BCM depends on struct can_frame which is
- given as array directly behind the bcm_msg_head structure. To follow this
- schema for the CAN FD frames a new flag 'CAN_FD_FRAME' in the bcm_msg_head
- flags indicates that the concatenated CAN frame structures behind the
- bcm_msg_head are defined as struct canfd_frame.
-
- struct {
- struct bcm_msg_head msg_head;
- struct canfd_frame frame[5];
- } msg;
-
- msg.msg_head.opcode = RX_SETUP;
- msg.msg_head.can_id = 0x42;
- msg.msg_head.flags = CAN_FD_FRAME;
- msg.msg_head.nframes = 5;
- (..)
-
- When using CAN FD frames for multiplex filtering the MUX mask is still
- expected in the first 64 bit of the struct canfd_frame data section.
-
- 4.3 connected transport protocols (SOCK_SEQPACKET)
- 4.4 unconnected transport protocols (SOCK_DGRAM)
-
-
-5. SocketCAN core module
--------------------------
-
- The SocketCAN core module implements the protocol family
- PF_CAN. CAN protocol modules are loaded by the core module at
- runtime. The core module provides an interface for CAN protocol
- modules to subscribe needed CAN IDs (see chapter 3.1).
-
- 5.1 can.ko module params
-
- - stats_timer: To calculate the SocketCAN core statistics
- (e.g. current/maximum frames per second) this 1 second timer is
- invoked at can.ko module start time by default. This timer can be
- disabled by using stattimer=0 on the module commandline.
-
- - debug: (removed since SocketCAN SVN r546)
-
- 5.2 procfs content
-
- As described in chapter 3.1 the SocketCAN core uses several filter
- lists to deliver received CAN frames to CAN protocol modules. These
- receive lists, their filters and the count of filter matches can be
- checked in the appropriate receive list. All entries contain the
- device and a protocol module identifier:
-
- foo@bar:~$ cat /proc/net/can/rcvlist_all
-
- receive list 'rx_all':
- (vcan3: no entry)
- (vcan2: no entry)
- (vcan1: no entry)
- device can_id can_mask function userdata matches ident
- vcan0 000 00000000 f88e6370 f6c6f400 0 raw
- (any: no entry)
-
- In this example an application requests any CAN traffic from vcan0.
-
- rcvlist_all - list for unfiltered entries (no filter operations)
- rcvlist_eff - list for single extended frame (EFF) entries
- rcvlist_err - list for error message frames masks
- rcvlist_fil - list for mask/value filters
- rcvlist_inv - list for mask/value filters (inverse semantic)
- rcvlist_sff - list for single standard frame (SFF) entries
-
- Additional procfs files in /proc/net/can
-
- stats - SocketCAN core statistics (rx/tx frames, match ratios, ...)
- reset_stats - manual statistic reset
- version - prints the SocketCAN core version and the ABI version
-
- 5.3 writing own CAN protocol modules
-
- To implement a new protocol in the protocol family PF_CAN a new
- protocol has to be defined in include/linux/can.h .
- The prototypes and definitions to use the SocketCAN core can be
- accessed by including include/linux/can/core.h .
- In addition to functions that register the CAN protocol and the
- CAN device notifier chain there are functions to subscribe CAN
- frames received by CAN interfaces and to send CAN frames:
-
- can_rx_register - subscribe CAN frames from a specific interface
- can_rx_unregister - unsubscribe CAN frames from a specific interface
- can_send - transmit a CAN frame (optional with local loopback)
-
- For details see the kerneldoc documentation in net/can/af_can.c or
- the source code of net/can/raw.c or net/can/bcm.c .
-
-6. CAN network drivers
-----------------------
-
- Writing a CAN network device driver is much easier than writing a
- CAN character device driver. Similar to other known network device
- drivers you mainly have to deal with:
-
- - TX: Put the CAN frame from the socket buffer to the CAN controller.
- - RX: Put the CAN frame from the CAN controller to the socket buffer.
-
- See e.g. at Documentation/networking/netdevices.txt . The differences
- for writing CAN network device driver are described below:
-
- 6.1 general settings
-
- dev->type = ARPHRD_CAN; /* the netdevice hardware type */
- dev->flags = IFF_NOARP; /* CAN has no arp */
-
- dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
-
- or alternative, when the controller supports CAN with flexible data rate:
- dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
-
- The struct can_frame or struct canfd_frame is the payload of each socket
- buffer (skbuff) in the protocol family PF_CAN.
-
- 6.2 local loopback of sent frames
-
- As described in chapter 3.2 the CAN network device driver should
- support a local loopback functionality similar to the local echo
- e.g. of tty devices. In this case the driver flag IFF_ECHO has to be
- set to prevent the PF_CAN core from locally echoing sent frames
- (aka loopback) as fallback solution:
-
- dev->flags = (IFF_NOARP | IFF_ECHO);
-
- 6.3 CAN controller hardware filters
-
- To reduce the interrupt load on deep embedded systems some CAN
- controllers support the filtering of CAN IDs or ranges of CAN IDs.
- These hardware filter capabilities vary from controller to
- controller and have to be identified as not feasible in a multi-user
- networking approach. The use of the very controller specific
- hardware filters could make sense in a very dedicated use-case, as a
- filter on driver level would affect all users in the multi-user
- system. The high efficient filter sets inside the PF_CAN core allow
- to set different multiple filters for each socket separately.
- Therefore the use of hardware filters goes to the category 'handmade
- tuning on deep embedded systems'. The author is running a MPC603e
- @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
- load without any problems ...
-
- 6.4 The virtual CAN driver (vcan)
-
- Similar to the network loopback devices, vcan offers a virtual local
- CAN interface. A full qualified address on CAN consists of
-
- - a unique CAN Identifier (CAN ID)
- - the CAN bus this CAN ID is transmitted on (e.g. can0)
-
- so in common use cases more than one virtual CAN interface is needed.
-
- The virtual CAN interfaces allow the transmission and reception of CAN
- frames without real CAN controller hardware. Virtual CAN network
- devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
- When compiled as a module the virtual CAN driver module is called vcan.ko
-
- Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
- netlink interface to create vcan network devices. The creation and
- removal of vcan network devices can be managed with the ip(8) tool:
-
- - Create a virtual CAN network interface:
- $ ip link add type vcan
-
- - Create a virtual CAN network interface with a specific name 'vcan42':
- $ ip link add dev vcan42 type vcan
-
- - Remove a (virtual CAN) network interface 'vcan42':
- $ ip link del vcan42
-
- 6.5 The CAN network device driver interface
-
- The CAN network device driver interface provides a generic interface
- to setup, configure and monitor CAN network devices. The user can then
- configure the CAN device, like setting the bit-timing parameters, via
- the netlink interface using the program "ip" from the "IPROUTE2"
- utility suite. The following chapter describes briefly how to use it.
- Furthermore, the interface uses a common data structure and exports a
- set of common functions, which all real CAN network device drivers
- should use. Please have a look to the SJA1000 or MSCAN driver to
- understand how to use them. The name of the module is can-dev.ko.
-
- 6.5.1 Netlink interface to set/get devices properties
-
- The CAN device must be configured via netlink interface. The supported
- netlink message types are defined and briefly described in
- "include/linux/can/netlink.h". CAN link support for the program "ip"
- of the IPROUTE2 utility suite is available and it can be used as shown
- below:
-
- - Setting CAN device properties:
-
- $ ip link set can0 type can help
- Usage: ip link set DEVICE type can
- [ bitrate BITRATE [ sample-point SAMPLE-POINT] ] |
- [ tq TQ prop-seg PROP_SEG phase-seg1 PHASE-SEG1
- phase-seg2 PHASE-SEG2 [ sjw SJW ] ]
-
- [ dbitrate BITRATE [ dsample-point SAMPLE-POINT] ] |
- [ dtq TQ dprop-seg PROP_SEG dphase-seg1 PHASE-SEG1
- dphase-seg2 PHASE-SEG2 [ dsjw SJW ] ]
-
- [ loopback { on | off } ]
- [ listen-only { on | off } ]
- [ triple-sampling { on | off } ]
- [ one-shot { on | off } ]
- [ berr-reporting { on | off } ]
- [ fd { on | off } ]
- [ fd-non-iso { on | off } ]
- [ presume-ack { on | off } ]
-
- [ restart-ms TIME-MS ]
- [ restart ]
-
- Where: BITRATE := { 1..1000000 }
- SAMPLE-POINT := { 0.000..0.999 }
- TQ := { NUMBER }
- PROP-SEG := { 1..8 }
- PHASE-SEG1 := { 1..8 }
- PHASE-SEG2 := { 1..8 }
- SJW := { 1..4 }
- RESTART-MS := { 0 | NUMBER }
-
- - Display CAN device details and statistics:
-
- $ ip -details -statistics link show can0
- 2: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 16 qdisc pfifo_fast state UP qlen 10
- link/can
- can <TRIPLE-SAMPLING> state ERROR-ACTIVE restart-ms 100
- bitrate 125000 sample_point 0.875
- tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1
- sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
- clock 8000000
- re-started bus-errors arbit-lost error-warn error-pass bus-off
- 41 17457 0 41 42 41
- RX: bytes packets errors dropped overrun mcast
- 140859 17608 17457 0 0 0
- TX: bytes packets errors dropped carrier collsns
- 861 112 0 41 0 0
-
- More info to the above output:
-
- "<TRIPLE-SAMPLING>"
- Shows the list of selected CAN controller modes: LOOPBACK,
- LISTEN-ONLY, or TRIPLE-SAMPLING.
-
- "state ERROR-ACTIVE"
- The current state of the CAN controller: "ERROR-ACTIVE",
- "ERROR-WARNING", "ERROR-PASSIVE", "BUS-OFF" or "STOPPED"
-
- "restart-ms 100"
- Automatic restart delay time. If set to a non-zero value, a
- restart of the CAN controller will be triggered automatically
- in case of a bus-off condition after the specified delay time
- in milliseconds. By default it's off.
-
- "bitrate 125000 sample-point 0.875"
- Shows the real bit-rate in bits/sec and the sample-point in the
- range 0.000..0.999. If the calculation of bit-timing parameters
- is enabled in the kernel (CONFIG_CAN_CALC_BITTIMING=y), the
- bit-timing can be defined by setting the "bitrate" argument.
- Optionally the "sample-point" can be specified. By default it's
- 0.000 assuming CIA-recommended sample-points.
-
- "tq 125 prop-seg 6 phase-seg1 7 phase-seg2 2 sjw 1"
- Shows the time quanta in ns, propagation segment, phase buffer
- segment 1 and 2 and the synchronisation jump width in units of
- tq. They allow to define the CAN bit-timing in a hardware
- independent format as proposed by the Bosch CAN 2.0 spec (see
- chapter 8 of http://www.semiconductors.bosch.de/pdf/can2spec.pdf).
-
- "sja1000: tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
- clock 8000000"
- Shows the bit-timing constants of the CAN controller, here the
- "sja1000". The minimum and maximum values of the time segment 1
- and 2, the synchronisation jump width in units of tq, the
- bitrate pre-scaler and the CAN system clock frequency in Hz.
- These constants could be used for user-defined (non-standard)
- bit-timing calculation algorithms in user-space.
-
- "re-started bus-errors arbit-lost error-warn error-pass bus-off"
- Shows the number of restarts, bus and arbitration lost errors,
- and the state changes to the error-warning, error-passive and
- bus-off state. RX overrun errors are listed in the "overrun"
- field of the standard network statistics.
-
- 6.5.2 Setting the CAN bit-timing
-
- The CAN bit-timing parameters can always be defined in a hardware
- independent format as proposed in the Bosch CAN 2.0 specification
- specifying the arguments "tq", "prop_seg", "phase_seg1", "phase_seg2"
- and "sjw":
-
- $ ip link set canX type can tq 125 prop-seg 6 \
- phase-seg1 7 phase-seg2 2 sjw 1
-
- If the kernel option CONFIG_CAN_CALC_BITTIMING is enabled, CIA
- recommended CAN bit-timing parameters will be calculated if the bit-
- rate is specified with the argument "bitrate":
-
- $ ip link set canX type can bitrate 125000
-
- Note that this works fine for the most common CAN controllers with
- standard bit-rates but may *fail* for exotic bit-rates or CAN system
- clock frequencies. Disabling CONFIG_CAN_CALC_BITTIMING saves some
- space and allows user-space tools to solely determine and set the
- bit-timing parameters. The CAN controller specific bit-timing
- constants can be used for that purpose. They are listed by the
- following command:
-
- $ ip -details link show can0
- ...
- sja1000: clock 8000000 tseg1 1..16 tseg2 1..8 sjw 1..4 brp 1..64 brp-inc 1
-
- 6.5.3 Starting and stopping the CAN network device
-
- A CAN network device is started or stopped as usual with the command
- "ifconfig canX up/down" or "ip link set canX up/down". Be aware that
- you *must* define proper bit-timing parameters for real CAN devices
- before you can start it to avoid error-prone default settings:
-
- $ ip link set canX up type can bitrate 125000
-
- A device may enter the "bus-off" state if too many errors occurred on
- the CAN bus. Then no more messages are received or sent. An automatic
- bus-off recovery can be enabled by setting the "restart-ms" to a
- non-zero value, e.g.:
-
- $ ip link set canX type can restart-ms 100
-
- Alternatively, the application may realize the "bus-off" condition
- by monitoring CAN error message frames and do a restart when
- appropriate with the command:
-
- $ ip link set canX type can restart
-
- Note that a restart will also create a CAN error message frame (see
- also chapter 3.3).
-
- 6.6 CAN FD (flexible data rate) driver support
-
- CAN FD capable CAN controllers support two different bitrates for the
- arbitration phase and the payload phase of the CAN FD frame. Therefore a
- second bit timing has to be specified in order to enable the CAN FD bitrate.
-
- Additionally CAN FD capable CAN controllers support up to 64 bytes of
- payload. The representation of this length in can_frame.can_dlc and
- canfd_frame.len for userspace applications and inside the Linux network
- layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
- The data length code was a 1:1 mapping to the payload length in the legacy
- CAN frames anyway. The payload length to the bus-relevant DLC mapping is
- only performed inside the CAN drivers, preferably with the helper
- functions can_dlc2len() and can_len2dlc().
-
- The CAN netdevice driver capabilities can be distinguished by the network
- devices maximum transfer unit (MTU):
-
- MTU = 16 (CAN_MTU) => sizeof(struct can_frame) => 'legacy' CAN device
- MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
-
- The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
- N.B. CAN FD capable devices can also handle and send legacy CAN frames.
-
- When configuring CAN FD capable CAN controllers an additional 'data' bitrate
- has to be set. This bitrate for the data phase of the CAN FD frame has to be
- at least the bitrate which was configured for the arbitration phase. This
- second bitrate is specified analogue to the first bitrate but the bitrate
- setting keywords for the 'data' bitrate start with 'd' e.g. dbitrate,
- dsample-point, dsjw or dtq and similar settings. When a data bitrate is set
- within the configuration process the controller option "fd on" can be
- specified to enable the CAN FD mode in the CAN controller. This controller
- option also switches the device MTU to 72 (CANFD_MTU).
-
- The first CAN FD specification presented as whitepaper at the International
- CAN Conference 2012 needed to be improved for data integrity reasons.
- Therefore two CAN FD implementations have to be distinguished today:
-
- - ISO compliant: The ISO 11898-1:2015 CAN FD implementation (default)
- - non-ISO compliant: The CAN FD implementation following the 2012 whitepaper
-
- Finally there are three types of CAN FD controllers:
-
- 1. ISO compliant (fixed)
- 2. non-ISO compliant (fixed, like the M_CAN IP core v3.0.1 in m_can.c)
- 3. ISO/non-ISO CAN FD controllers (switchable, like the PEAK PCAN-USB FD)
-
- The current ISO/non-ISO mode is announced by the CAN controller driver via
- netlink and displayed by the 'ip' tool (controller option FD-NON-ISO).
- The ISO/non-ISO-mode can be altered by setting 'fd-non-iso {on|off}' for
- switchable CAN FD controllers only.
-
- Example configuring 500 kbit/s arbitration bitrate and 4 Mbit/s data bitrate:
-
- $ ip link set can0 up type can bitrate 500000 sample-point 0.75 \
- dbitrate 4000000 dsample-point 0.8 fd on
- $ ip -details link show can0
- 5: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UNKNOWN \
- mode DEFAULT group default qlen 10
- link/can promiscuity 0
- can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
- bitrate 500000 sample-point 0.750
- tq 50 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 1
- pcan_usb_pro_fd: tseg1 1..64 tseg2 1..16 sjw 1..16 brp 1..1024 \
- brp-inc 1
- dbitrate 4000000 dsample-point 0.800
- dtq 12 dprop-seg 7 dphase-seg1 8 dphase-seg2 4 dsjw 1
- pcan_usb_pro_fd: dtseg1 1..16 dtseg2 1..8 dsjw 1..4 dbrp 1..1024 \
- dbrp-inc 1
- clock 80000000
-
- Example when 'fd-non-iso on' is added on this switchable CAN FD adapter:
- can <FD,FD-NON-ISO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
-
- 6.7 Supported CAN hardware
-
- Please check the "Kconfig" file in "drivers/net/can" to get an actual
- list of the support CAN hardware. On the SocketCAN project website
- (see chapter 7) there might be further drivers available, also for
- older kernel versions.
-
-7. SocketCAN resources
------------------------
-
- The Linux CAN / SocketCAN project resources (project site / mailing list)
- are referenced in the MAINTAINERS file in the Linux source tree.
- Search for CAN NETWORK [LAYERS|DRIVERS].
-
-8. Credits
-----------
-
- Oliver Hartkopp (PF_CAN core, filters, drivers, bcm, SJA1000 driver)
- Urs Thuermann (PF_CAN core, kernel integration, socket interfaces, raw, vcan)
- Jan Kizka (RT-SocketCAN core, Socket-API reconciliation)
- Wolfgang Grandegger (RT-SocketCAN core & drivers, Raw Socket-API reviews,
- CAN device driver interface, MSCAN driver)
- Robert Schwebel (design reviews, PTXdist integration)
- Marc Kleine-Budde (design reviews, Kernel 2.6 cleanups, drivers)
- Benedikt Spranger (reviews)
- Thomas Gleixner (LKML reviews, coding style, posting hints)
- Andrey Volkov (kernel subtree structure, ioctls, MSCAN driver)
- Matthias Brukner (first SJA1000 CAN netdevice implementation Q2/2003)
- Klaus Hitschler (PEAK driver integration)
- Uwe Koppe (CAN netdevices with PF_PACKET approach)
- Michael Schulze (driver layer loopback requirement, RT CAN drivers review)
- Pavel Pisa (Bit-timing calculation)
- Sascha Hauer (SJA1000 platform driver)
- Sebastian Haas (SJA1000 EMS PCI driver)
- Markus Plessing (SJA1000 EMS PCI driver)
- Per Dalen (SJA1000 Kvaser PCI driver)
- Sam Ravnborg (reviews, coding style, kbuild help)
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index b8b40753133e..25170ad7d25b 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -385,11 +385,6 @@ Switch configuration
avoid relying on what a previous software agent such as a bootloader/firmware
may have previously configured.
-- set_addr: Some switches require the programming of the management interface's
- Ethernet MAC address, switch drivers can also disable ageing of MAC addresses
- on the management interface and "hardcode"/"force" this MAC address for the
- CPU/management interface as an optimization
-
PHY devices and link management
-------------------------------
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 87814859cfc2..a4508ec1816b 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -1134,7 +1134,7 @@ The verifier's knowledge about the variable offset consists of:
mask and value; no bit should ever be 1 in both. For example, if a byte is read
into a register from memory, the register's top 56 bits are known zero, while
the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we
-then OR this with 0x40, we get (0x40; 0xcf), then if we add 1 we get (0x0;
+then OR this with 0x40, we get (0x40; 0xbf), then if we add 1 we get (0x0;
0x1ff), because of potential carries.
Besides arithmetic, the register state can also be updated by conditional
branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 057e9fdbfac9..e74d8e1da0e2 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -97,6 +97,46 @@ The include/net/mac802154.h defines following functions:
- void ieee802154_unregister_hw(struct ieee802154_hw *hw):
freeing registered PHY
+ - void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
+ u8 lqi):
+ telling 802.15.4 module there is a new received frame in the skb with
+ the RF Link Quality Indicator (LQI) from the hardware device
+
+ - void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling):
+ telling 802.15.4 module the frame in the skb is or going to be
+ transmitted through the hardware device
+
+The device driver must implement the following callbacks in the IEEE 802.15.4
+operations structure at least:
+struct ieee802154_ops {
+ ...
+ int (*start)(struct ieee802154_hw *hw);
+ void (*stop)(struct ieee802154_hw *hw);
+ ...
+ int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb);
+ int (*ed)(struct ieee802154_hw *hw, u8 *level);
+ int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel);
+ ...
+};
+
+ - int start(struct ieee802154_hw *hw):
+ handler that 802.15.4 module calls for the hardware device initialization.
+
+ - void stop(struct ieee802154_hw *hw):
+ handler that 802.15.4 module calls for the hardware device cleanup.
+
+ - int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb):
+ handler that 802.15.4 module calls for each frame in the skb going to be
+ transmitted through the hardware device.
+
+ - int ed(struct ieee802154_hw *hw, u8 *level):
+ handler that 802.15.4 module calls for Energy Detection from the hardware
+ device.
+
+ - int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel):
+ set radio for listening on specific channel of the hardware device.
+
Moreover IEEE 802.15.4 device operations structure should be filled.
Fake drivers
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 66e620866245..90966c2692d8 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -7,8 +7,10 @@ Contents:
:maxdepth: 2
batman-adv
+ can
kapi
z8530book
+ msg_zerocopy
.. only:: subproject
@@ -16,4 +18,3 @@ Contents:
=======
* :ref:`genindex`
-
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 46c7e1085efc..3f2c40d8e6aa 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -606,6 +606,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
This time period will grow exponentially when more blackhole issues
get detected right after Fastopen is re-enabled and will reset to
initial value when the blackhole issue goes away.
+ 0 to disable the blackhole detection.
By default, it is set to 1hr.
tcp_syn_retries - INTEGER
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
index 580289f345da..f03ae64be8bc 100644
--- a/Documentation/networking/kapi.rst
+++ b/Documentation/networking/kapi.rst
@@ -145,3 +145,27 @@ PHY Support
.. kernel-doc:: drivers/net/phy/mdio_bus.c
:internal:
+
+PHYLINK
+-------
+
+ PHYLINK interfaces traditional network drivers with PHYLIB, fixed-links,
+ and SFF modules (eg, hot-pluggable SFP) that may contain PHYs. PHYLINK
+ provides management of the link state and link modes.
+
+.. kernel-doc:: include/linux/phylink.h
+ :internal:
+
+.. kernel-doc:: drivers/net/phy/phylink.c
+
+SFP support
+-----------
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+ :internal:
+
+.. kernel-doc:: include/linux/sfp.h
+ :internal:
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+ :export:
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 77f6d7e25cfd..291a01264967 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
error(1, errno, "setsockopt zerocopy");
+Setting the socket option only works when the socket is in its initial
+(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
+for example, will lead to an EBUSY error. In this case, the option should be set
+to the listening socket and it will be inherited by the accepted sockets.
Transmission
------------
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index 7413eb05223b..c77f9d57eb91 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -163,3 +163,12 @@ This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
PROMISC mode.
+
+* rx-gro-hw
+
+This requests that the NIC enables Hardware GRO (generic receive offload).
+Hardware GRO is basically the exact reverse of TSO, and is generally
+stricter than Hardware LRO. A packet stream merged by Hardware GRO must
+be re-segmentable by GSO or TSO back to the exact original packet stream.
+Hardware GRO is dependent on RXCSUM since every packet successfully merged
+by hardware must also have the checksum verified by hardware.
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 2c4e3354e128..d2fd78f85aa4 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -12,8 +12,8 @@ suitable sample script and configure that.
On a dual CPU:
ps aux | grep pkt
-root 129 0.3 0.0 0 0 ? SW 2003 523:20 [pktgen/0]
-root 130 0.3 0.0 0 0 ? SW 2003 509:50 [pktgen/1]
+root 129 0.3 0.0 0 0 ? SW 2003 523:20 [kpktgend_0]
+root 130 0.3 0.0 0 0 ? SW 2003 509:50 [kpktgend_1]
For monitoring and control pktgen creates:
@@ -113,9 +113,16 @@ Configuring devices
===================
This is done via the /proc interface, and most easily done via pgset
as defined in the sample scripts.
+You need to specify PGDEV environment variable to use functions from sample
+scripts, i.e.:
+export PGDEV=/proc/net/pktgen/eth4@0
+source samples/pktgen/functions.sh
Examples:
+ pg_ctrl start starts injection.
+ pg_ctrl stop aborts injection. Also, ^C aborts generator.
+
pgset "clone_skb 1" sets the number of copies of the same packet
pgset "clone_skb 0" use single SKB for all transmits
pgset "burst 8" uses xmit_more API to queue 8 copies of the same
@@ -165,8 +172,12 @@ Examples:
IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
NODE_ALLOC # node specific memory allocation
NO_TIMESTAMP # disable timestamping
+ pgset 'flag ![name]' Clear a flag to determine behaviour.
+ Note that you might need to use single quote in
+ interactive mode, so that your shell wouldn't expand
+ the specified flag as a history command.
- pgset spi SPI_VALUE Set specific SA used to transform packet.
+ pgset "spi [SPI_VALUE]" Set specific SA used to transform packet.
pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then
cycle through the port range.
@@ -207,8 +218,6 @@ Examples:
pgset "tos XX" set former IPv4 TOS field (e.g. "tos 28" for AF11 no ECN, default 00)
pgset "traffic_class XX" set former IPv6 TRAFFIC CLASS (e.g. "traffic_class B8" for EF no ECN, default 00)
- pgset stop aborts injection. Also, ^C aborts generator.
-
pgset "rate 300M" set rate to 300 Mb/s
pgset "ratep 1000000" set rate to 1Mpps
diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt
new file mode 100644
index 000000000000..50c34ca65efe
--- /dev/null
+++ b/Documentation/networking/xfrm_device.txt
@@ -0,0 +1,135 @@
+
+===============================================
+XFRM device - offloading the IPsec computations
+===============================================
+Shannon Nelson <shannon.nelson@oracle.com>
+
+
+Overview
+========
+
+IPsec is a useful feature for securing network traffic, but the
+computational cost is high: a 10Gbps link can easily be brought down
+to under 1Gbps, depending on the traffic and link configuration.
+Luckily, there are NICs that offer a hardware based IPsec offload which
+can radically increase throughput and decrease CPU utilization. The XFRM
+Device interface allows NIC drivers to offer to the stack access to the
+hardware offload.
+
+Userland access to the offload is typically through a system such as
+libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
+be handy when experimenting. An example command might look something
+like this:
+
+ ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
+ reqid 0x07 replay-window 32 \
+ aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \
+ sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
+ offload dev eth4 dir in
+
+Yes, that's ugly, but that's what shell scripts and/or libreswan are for.
+
+
+
+Callbacks to implement
+======================
+
+/* from include/linux/netdevice.h */
+struct xfrmdev_ops {
+ int (*xdo_dev_state_add) (struct xfrm_state *x);
+ void (*xdo_dev_state_delete) (struct xfrm_state *x);
+ void (*xdo_dev_state_free) (struct xfrm_state *x);
+ bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
+ struct xfrm_state *x);
+ void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+};
+
+The NIC driver offering ipsec offload will need to implement these
+callbacks to make the offload available to the network stack's
+XFRM subsytem. Additionally, the feature bits NETIF_F_HW_ESP and
+NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
+
+
+
+Flow
+====
+
+At probe time and before the call to register_netdev(), the driver should
+set up local data structures and XFRM callbacks, and set the feature bits.
+The XFRM code's listener will finish the setup on NETDEV_REGISTER.
+
+ adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+ adapter->netdev->features |= NETIF_F_HW_ESP;
+ adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+When new SAs are set up with a request for "offload" feature, the
+driver's xdo_dev_state_add() will be given the new SA to be offloaded
+and an indication of whether it is for Rx or Tx. The driver should
+ - verify the algorithm is supported for offloads
+ - store the SA information (key, salt, target-ip, protocol, etc)
+ - enable the HW offload of the SA
+
+The driver can also set an offload_handle in the SA, an opaque void pointer
+that can be used to convey context into the fast-path offload requests.
+
+ xs->xso.offload_handle = context;
+
+
+When the network stack is preparing an IPsec packet for an SA that has
+been setup for offload, it first calls into xdo_dev_offload_ok() with
+the skb and the intended offload state to ask the driver if the offload
+will serviceable. This can check the packet information to be sure the
+offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
+return true of false to signify its support.
+
+When ready to send, the driver needs to inspect the Tx packet for the
+offload information, including the opaque context, and set up the packet
+send accordingly.
+
+ xs = xfrm_input_state(skb);
+ context = xs->xso.offload_handle;
+ set up HW for send
+
+The stack has already inserted the appropriate IPsec headers in the
+packet data, the offload just needs to do the encryption and fix up the
+header values.
+
+
+When a packet is received and the HW has indicated that it offloaded a
+decryption, the driver needs to add a reference to the decoded SA into
+the packet's skb. At this point the data should be decrypted but the
+IPsec headers are still in the packet data; they are removed later up
+the stack in xfrm_input().
+
+ find and hold the SA that was used to the Rx skb
+ get spi, protocol, and destination IP from packet headers
+ xs = find xs from (spi, protocol, dest_IP)
+ xfrm_state_hold(xs);
+
+ store the state information into the skb
+ skb->sp = secpath_dup(skb->sp);
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+
+ indicate the success and/or error status of the offload
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = crypto_status;
+
+ hand the packet to napi_gro_receive() as usual
+
+In ESN mode, xdo_dev_state_advance_esn() is called from xfrm_replay_advance_esn().
+Driver will check packet seq number and update HW ESN state machine if needed.
+
+When the SA is removed by the user, the driver's xdo_dev_state_delete()
+is asked to disable the offload. Later, xdo_dev_state_free() is called
+from a garbage collection routine after all reference counts to the state
+have been removed and any remaining resources can be cleared for the
+offload state. How these are used by the driver will depend on specific
+hardware needs.
+
+As a netdev is set to DOWN the XFRM stack's netdev listener will call
+xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded
+states.
+
+
diff --git a/Documentation/networking/xfrm_proc.txt b/Documentation/networking/xfrm_proc.txt
index d0d8bafa9016..2eae619ab67b 100644
--- a/Documentation/networking/xfrm_proc.txt
+++ b/Documentation/networking/xfrm_proc.txt
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
Transformation Statistics
-------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why. These counters are defined
+as part of the linux private MIB. These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
XfrmInError:
All errors which is not matched others
XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
Policy discards
XfrmInPolError:
Policy error
+XfrmAcquireError:
+ State hasn't been fully acquired before use
+XfrmFwdHdrError:
+ Forward routing of a packet is not allowed
Outbound errors
~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
Policy is dead
XfrmOutPolError:
Policy error
+XfrmOutStateInvalid:
+ State is invalid, perhaps expired
diff --git a/Documentation/perf/arm_dsu_pmu.txt b/Documentation/perf/arm_dsu_pmu.txt
new file mode 100644
index 000000000000..d611e15f5add
--- /dev/null
+++ b/Documentation/perf/arm_dsu_pmu.txt
@@ -0,0 +1,28 @@
+ARM DynamIQ Shared Unit (DSU) PMU
+==================================
+
+ARM DynamIQ Shared Unit integrates one or more cores with an L3 memory system,
+control logic and external interfaces to form a multicore cluster. The PMU
+allows counting the various events related to the L3 cache, Snoop Control Unit
+etc, using 32bit independent counters. It also provides a 64bit cycle counter.
+
+The PMU can only be accessed via CPU system registers and are common to the
+cores connected to the same DSU. Like most of the other uncore PMUs, DSU
+PMU doesn't support process specific events and cannot be used in sampling mode.
+
+The DSU provides a bitmap for a subset of implemented events via hardware
+registers. There is no way for the driver to determine if the other events
+are available or not. Hence the driver exposes only those events advertised
+by the DSU, in "events" directory under :
+
+ /sys/bus/event_sources/devices/arm_dsu_<N>/
+
+The user should refer to the TRM of the product to figure out the supported events
+and use the raw event code for the unlisted events.
+
+The driver also exposes the CPUs connected to the DSU instance in "associated_cpus".
+
+
+e.g usage :
+
+ perf stat -a -e arm_dsu_0/cycles/
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 704cd36079b8..8eaf9ee24d43 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -994,6 +994,17 @@ into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(),
the function will set the power.direct_complete flag for it (to make the PM core
skip the subsequent "thaw" callbacks for it) and return.
+Setting the DPM_FLAG_LEAVE_SUSPENDED flag means that the driver prefers the
+device to be left in suspend after system-wide transitions to the working state.
+This flag is checked by the PM core, but the PCI bus type informs the PM core
+which devices may be left in suspend from its perspective (that happens during
+the "noirq" phase of system-wide suspend and analogous transitions) and next it
+uses the dev_pm_may_skip_resume() helper to decide whether or not to return from
+pci_pm_resume_noirq() early, as the PM core will skip the remaining resume
+callbacks for the device during the transition under way and will set its
+runtime PM status to "suspended" if dev_pm_may_skip_resume() returns "true" for
+it.
+
3.2. Device Runtime Power Management
------------------------------------
In addition to providing device power management callbacks PCI device drivers
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index 757e3b53dc11..eff4dcaaa252 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -23,16 +23,12 @@ struct regulator_consumer_supply {
e.g. for the machine above
static struct regulator_consumer_supply regulator1_consumers[] = {
-{
- .dev_name = "dev_name(consumer B)",
- .supply = "Vcc",
-},};
+ REGULATOR_SUPPLY("Vcc", "consumer B"),
+};
static struct regulator_consumer_supply regulator2_consumers[] = {
-{
- .dev = "dev_name(consumer A"),
- .supply = "Vcc",
-},};
+ REGULATOR_SUPPLY("Vcc", "consumer A"),
+};
This maps Regulator-1 to the 'Vcc' supply for Consumer B and maps Regulator-2
to the 'Vcc' supply for Consumer A.
@@ -78,20 +74,20 @@ static struct regulator_init_data regulator2_data = {
Finally the regulator devices must be registered in the usual manner.
static struct platform_device regulator_devices[] = {
-{
- .name = "regulator",
- .id = DCDC_1,
- .dev = {
- .platform_data = &regulator1_data,
+ {
+ .name = "regulator",
+ .id = DCDC_1,
+ .dev = {
+ .platform_data = &regulator1_data,
+ },
},
-},
-{
- .name = "regulator",
- .id = DCDC_2,
- .dev = {
- .platform_data = &regulator2_data,
+ {
+ .name = "regulator",
+ .id = DCDC_2,
+ .dev = {
+ .platform_data = &regulator2_data,
+ },
},
-},
};
/* register regulator 1 device */
platform_device_register(&regulator_devices[0]);
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
index b3170671a1df..6816c12d6956 100644
--- a/Documentation/process/kernel-enforcement-statement.rst
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -68,6 +68,7 @@ we might work for today, have in the past, or will in the future.
- Paul Burton
- Javier Martinez Canillas
- Rob Clark
+ - Kees Cook (Google)
- Jonathan Corbet
- Dennis Dalessandro
- Vivien Didelot (Savoir-faire Linux)
@@ -118,6 +119,7 @@ we might work for today, have in the past, or will in the future.
- Mike Marshall
- Chris Mason
- Paul E. McKenney
+ - Arnaldo Carvalho de Melo
- David S. Miller
- Ingo Molnar
- Kuninori Morimoto
@@ -136,6 +138,7 @@ we might work for today, have in the past, or will in the future.
- Anna Schumaker
- Jes Sorensen
- K.Y. Srinivasan
+ - David Sterba (SUSE)
- Heiko Stuebner
- Jiri Kosina (SUSE)
- Willy Tarreau
@@ -143,6 +146,7 @@ we might work for today, have in the past, or will in the future.
- Linus Torvalds
- Thierry Reding
- Rik van Riel
+ - Luis R. Rodriguez
- Geert Uytterhoeven (Glider bvba)
- Eduardo Valentin (Amazon.com)
- Daniel Vetter
diff --git a/Documentation/process/license-rules.rst b/Documentation/process/license-rules.rst
new file mode 100644
index 000000000000..408f77dc6157
--- /dev/null
+++ b/Documentation/process/license-rules.rst
@@ -0,0 +1,370 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Linux kernel licensing rules
+============================
+
+The Linux Kernel is provided under the terms of the GNU General Public
+License version 2 only (GPL-2.0), as published by the Free Software
+Foundation, and provided in the COPYING file. This documentation file is
+not meant to replace the COPYING file, but provides a description of how
+each source file should be annotated to make the licensing it is governed
+under clear and unambiguous.
+
+The license in the COPYING file applies to the kernel source as a whole,
+though individual source files can have a different license which is
+required to be compatible with the GPL-2.0::
+
+ GPL-1.0+ : GNU General Public License v1.0 or later
+ GPL-2.0+ : GNU General Public License v2.0 or later
+ LGPL-2.0 : GNU Library General Public License v2 only
+ LGPL-2.0+ : GNU Library General Public License v2 or later
+ LGPL-2.1 : GNU Lesser General Public License v2.1 only
+ LGPL-2.1+ : GNU Lesser General Public License v2.1 or later
+
+Aside from that, individual files can be provided under a dual license,
+e.g. one of the compatible GPL variants and alternatively under a
+permissive license like BSD, MIT etc.
+
+The User-space API (UAPI) header files, which describe the interface of
+user-space programs to the kernel are a special case. According to the
+note in the kernel COPYING file, the syscall interface is a clear boundary,
+which does not extend the GPL requirements to any software which uses it to
+communicate with the kernel. Because the UAPI headers must be includable
+into any source files which create an executable running on the Linux
+kernel, the exception must be documented by a special license expression.
+
+The common way of expressing the license of a source file is to add the
+matching boilerplate text into the top comment of the file. Due to
+formatting, typos etc. these "boilerplates" are hard to validate for
+tools which are used in the context of license compliance.
+
+An alternative to boilerplate text is the use of Software Package Data
+Exchange (SPDX) license identifiers in each source file. SPDX license
+identifiers are machine parsable and precise shorthands for the license
+under which the content of the file is contributed. SPDX license
+identifiers are managed by the SPDX Workgroup at the Linux Foundation and
+have been agreed on by partners throughout the industry, tool vendors, and
+legal teams. For further information see https://spdx.org/
+
+The Linux kernel requires the precise SPDX identifier in all source files.
+The valid identifiers used in the kernel are explained in the section
+`License identifiers`_ and have been retrieved from the official SPDX
+license list at https://spdx.org/licenses/ along with the license texts.
+
+License identifier syntax
+-------------------------
+
+1. Placement:
+
+ The SPDX license identifier in kernel files shall be added at the first
+ possible line in a file which can contain a comment. For the majority
+ or files this is the first line, except for scripts which require the
+ '#!PATH_TO_INTERPRETER' in the first line. For those scripts the SPDX
+ identifier goes into the second line.
+
+|
+
+2. Style:
+
+ The SPDX license identifier is added in form of a comment. The comment
+ style depends on the file type::
+
+ C source: // SPDX-License-Identifier: <SPDX License Expression>
+ C header: /* SPDX-License-Identifier: <SPDX License Expression> */
+ ASM: /* SPDX-License-Identifier: <SPDX License Expression> */
+ scripts: # SPDX-License-Identifier: <SPDX License Expression>
+ .rst: .. SPDX-License-Identifier: <SPDX License Expression>
+ .dts{i}: // SPDX-License-Identifier: <SPDX License Expression>
+
+ If a specific tool cannot handle the standard comment style, then the
+ appropriate comment mechanism which the tool accepts shall be used. This
+ is the reason for having the "/\* \*/" style comment in C header
+ files. There was build breakage observed with generated .lds files where
+ 'ld' failed to parse the C++ comment. This has been fixed by now, but
+ there are still older assembler tools which cannot handle C++ style
+ comments.
+
+|
+
+3. Syntax:
+
+ A <SPDX License Expression> is either an SPDX short form license
+ identifier found on the SPDX License List, or the combination of two
+ SPDX short form license identifiers separated by "WITH" when a license
+ exception applies. When multiple licenses apply, an expression consists
+ of keywords "AND", "OR" separating sub-expressions and surrounded by
+ "(", ")" .
+
+ License identifiers for licenses like [L]GPL with the 'or later' option
+ are constructed by using a "+" for indicating the 'or later' option.::
+
+ // SPDX-License-Identifier: GPL-2.0+
+ // SPDX-License-Identifier: LGPL-2.1+
+
+ WITH should be used when there is a modifier to a license needed.
+ For example, the linux kernel UAPI files use the expression::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ // SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note
+
+ Other examples using WITH exceptions found in the kernel are::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH mif-exception
+ // SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
+
+ Exceptions can only be used with particular License identifiers. The
+ valid License identifiers are listed in the tags of the exception text
+ file. For details see the point `Exceptions`_ in the chapter `License
+ identifiers`_.
+
+ OR should be used if the file is dual licensed and only one license is
+ to be selected. For example, some dtsi files are available under dual
+ licenses::
+
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+ Examples from the kernel for license expressions in dual licensed files::
+
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ // SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ // SPDX-License-Identifier: GPL-2.0 OR MPL-1.1
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT
+ // SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause OR OpenSSL
+
+ AND should be used if the file has multiple licenses whose terms all
+ apply to use the file. For example, if code is inherited from another
+ project and permission has been given to put it in the kernel, but the
+ original license terms need to remain in effect::
+
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
+
+ Another other example where both sets of license terms need to be
+ adhered to is::
+
+ // SPDX-License-Identifier: GPL-1.0+ AND LGPL-2.1+
+
+License identifiers
+-------------------
+
+The licenses currently used, as well as the licenses for code added to the
+kernel, can be broken down into:
+
+1. _`Preferred licenses`:
+
+ Whenever possible these licenses should be used as they are known to be
+ fully compatible and widely used. These licenses are available from the
+ directory::
+
+ LICENSES/preferred/
+
+ in the kernel source tree.
+
+ The files in this directory contain the full license text and
+ `Metatags`_. The file names are identical to the SPDX license
+ identifier which shall be used for the license in source files.
+
+ Examples::
+
+ LICENSES/preferred/GPL-2.0
+
+ Contains the GPL version 2 license text and the required metatags::
+
+ LICENSES/preferred/MIT
+
+ Contains the MIT license text and the required metatags
+
+ _`Metatags`:
+
+ The following meta tags must be available in a license file:
+
+ - Valid-License-Identifier:
+
+ One or more lines which declare which License Identifiers are valid
+ inside the project to reference this particular license text. Usually
+ this is a single valid identifier, but e.g. for licenses with the 'or
+ later' options two identifiers are valid.
+
+ - SPDX-URL:
+
+ The URL of the SPDX page which contains additional information related
+ to the license.
+
+ - Usage-Guidance:
+
+ Freeform text for usage advice. The text must include correct examples
+ for the SPDX license identifiers as they should be put into source
+ files according to the `License identifier syntax`_ guidelines.
+
+ - License-Text:
+
+ All text after this tag is treated as the original license text
+
+ File format examples::
+
+ Valid-License-Identifier: GPL-2.0
+ Valid-License-Identifier: GPL-2.0+
+ SPDX-URL: https://spdx.org/licenses/GPL-2.0.html
+ Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU General Public License (GPL) version 2 only' use:
+ SPDX-License-Identifier: GPL-2.0
+ For 'GNU General Public License (GPL) version 2 or any later version' use:
+ SPDX-License-Identifier: GPL-2.0+
+ License-Text:
+ Full license text
+
+ ::
+
+ SPDX-License-Identifier: MIT
+ SPDX-URL: https://spdx.org/licenses/MIT.html
+ Usage-Guide:
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: MIT
+ License-Text:
+ Full license text
+
+|
+
+2. Not recommended licenses:
+
+ These licenses should only be used for existing code or for importing
+ code from a different project. These licenses are available from the
+ directory::
+
+ LICENSES/other/
+
+ in the kernel source tree.
+
+ The files in this directory contain the full license text and
+ `Metatags`_. The file names are identical to the SPDX license
+ identifier which shall be used for the license in source files.
+
+ Examples::
+
+ LICENSES/other/ISC
+
+ Contains the Internet Systems Consortium license text and the required
+ metatags::
+
+ LICENSES/other/ZLib
+
+ Contains the ZLIB license text and the required metatags.
+
+ Metatags:
+
+ The metatag requirements for 'other' licenses are identical to the
+ requirements of the `Preferred licenses`_.
+
+ File format example::
+
+ Valid-License-Identifier: ISC
+ SPDX-URL: https://spdx.org/licenses/ISC.html
+ Usage-Guide:
+ Usage of this license in the kernel for new code is discouraged
+ and it should solely be used for importing code from an already
+ existing project.
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: ISC
+ License-Text:
+ Full license text
+
+|
+
+3. _`Exceptions`:
+
+ Some licenses can be amended with exceptions which grant certain rights
+ which the original license does not. These exceptions are available
+ from the directory::
+
+ LICENSES/exceptions/
+
+ in the kernel source tree. The files in this directory contain the full
+ exception text and the required `Exception Metatags`_.
+
+ Examples::
+
+ LICENSES/exceptions/Linux-syscall-note
+
+ Contains the Linux syscall exception as documented in the COPYING
+ file of the Linux kernel, which is used for UAPI header files.
+ e.g. /\* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note \*/::
+
+ LICENSES/exceptions/GCC-exception-2.0
+
+ Contains the GCC 'linking exception' which allows to link any binary
+ independent of its license against the compiled version of a file marked
+ with this exception. This is required for creating runnable executables
+ from source code which is not compatible with the GPL.
+
+ _`Exception Metatags`:
+
+ The following meta tags must be available in an exception file:
+
+ - SPDX-Exception-Identifier:
+
+ One exception identifier which can be used with SPDX license
+ identifiers.
+
+ - SPDX-URL:
+
+ The URL of the SPDX page which contains additional information related
+ to the exception.
+
+ - SPDX-Licenses:
+
+ A comma separated list of SPDX license identifiers for which the
+ exception can be used.
+
+ - Usage-Guidance:
+
+ Freeform text for usage advice. The text must be followed by correct
+ examples for the SPDX license identifiers as they should be put into
+ source files according to the `License identifier syntax`_ guidelines.
+
+ - Exception-Text:
+
+ All text after this tag is treated as the original exception text
+
+ File format examples::
+
+ SPDX-Exception-Identifier: Linux-syscall-note
+ SPDX-URL: https://spdx.org/licenses/Linux-syscall-note.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+
+ Usage-Guidance:
+ This exception is used together with one of the above SPDX-Licenses
+ to mark user-space API (uapi) header files so they can be included
+ into non GPL compliant user-space application code.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH Linux-syscall-note
+ Exception-Text:
+ Full exception text
+
+ ::
+
+ SPDX-Exception-Identifier: GCC-exception-2.0
+ SPDX-URL: https://spdx.org/licenses/GCC-exception-2.0.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+
+ Usage-Guidance:
+ The "GCC Runtime Library exception 2.0" is used together with one
+ of the above SPDX-Licenses for code imported from the GCC runtime
+ library.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH GCC-exception-2.0
+ Exception-Text:
+ Full exception text
+
+
+All SPDX license identifiers and exceptions must have a corresponding file
+in the LICENSE subdirectories. This is required to allow tool
+verification (e.g. checkpatch.pl) and to have the licenses ready to read
+and extract right from the source, which is recommended by various FOSS
+organizations, e.g. the `FSFE REUSE initiative <https://reuse.software/>`_.
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index a0d9d34bfb6d..367353c54949 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -37,7 +37,9 @@ and elsewhere regarding submitting Linux kernel patches.
You should be able to justify all violations that remain in
your patch.
-6) Any new or modified ``CONFIG`` options don't muck up the config menu.
+6) Any new or modified ``CONFIG`` options do not muck up the config menu and
+ default to off unless they meet the exception criteria documented in
+ ``Documentation/kbuild/kconfig-language.txt`` Menu attributes: default value.
7) All new ``Kconfig`` options have help text.
diff --git a/Documentation/security/credentials.rst b/Documentation/security/credentials.rst
index 66a2e24939d8..5bb7125faeee 100644
--- a/Documentation/security/credentials.rst
+++ b/Documentation/security/credentials.rst
@@ -451,6 +451,13 @@ checks and hooks done. Both the current and the proposed sets of credentials
are available for this purpose as current_cred() will return the current set
still at this point.
+When replacing the group list, the new list must be sorted before it
+is added to the credential, as a binary search is used to test for
+membership. In practice, this means :c:func:`groups_sort` should be
+called before :c:func:`set_groups` or :c:func:`set_current_groups`.
+:c:func:`groups_sort)` must not be called on a ``struct group_list`` which
+is shared as it may permute elements as part of the sorting process
+even if the array is already sorted.
When the credential set is ready, it should be committed to the current process
by calling::
diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst
index 60c8bd8b77bf..0f53826c78b9 100644
--- a/Documentation/security/self-protection.rst
+++ b/Documentation/security/self-protection.rst
@@ -270,6 +270,21 @@ attacks, it is important to defend against exposure of both kernel memory
addresses and kernel memory contents (since they may contain kernel
addresses or other sensitive things like canary values).
+Kernel addresses
+----------------
+
+Printing kernel addresses to userspace leaks sensitive information about
+the kernel memory layout. Care should be exercised when using any printk
+specifier that prints the raw address, currently %px, %p[ad], (and %p[sSb]
+in certain circumstances [*]). Any file written to using one of these
+specifiers should be readable only by privileged processes.
+
+Kernels 4.14 and older printed the raw address using %p. As of 4.15-rc1
+addresses printed with the specifier %p are hashed before printing.
+
+[*] If KALLSYMS is enabled and symbol lookup fails, the raw address is
+printed. If KALLSYMS is not enabled the raw address is printed.
+
Unique identifiers
------------------
diff --git a/Documentation/sparc/oradax/dax-hv-api.txt b/Documentation/sparc/oradax/dax-hv-api.txt
new file mode 100644
index 000000000000..73e8d506cf64
--- /dev/null
+++ b/Documentation/sparc/oradax/dax-hv-api.txt
@@ -0,0 +1,1433 @@
+Excerpt from UltraSPARC Virtual Machine Specification
+Compiled from version 3.0.20+15
+Publication date 2017-09-25 08:21
+Copyright © 2008, 2015 Oracle and/or its affiliates. All rights reserved.
+Extracted via "pdftotext -f 547 -l 572 -layout sun4v_20170925.pdf"
+Authors:
+ Charles Kunzman
+ Sam Glidden
+ Mark Cianchetti
+
+
+Chapter 36. Coprocessor services
+ The following APIs provide access via the Hypervisor to hardware assisted data processing functionality.
+ These APIs may only be provided by certain platforms, and may not be available to all virtual machines
+ even on supported platforms. Restrictions on the use of these APIs may be imposed in order to support
+ live-migration and other system management activities.
+
+36.1. Data Analytics Accelerator
+ The Data Analytics Accelerator (DAX) functionality is a collection of hardware coprocessors that provide
+ high speed processoring of database-centric operations. The coprocessors may support one or more of
+ the following data query operations: search, extraction, compression, decompression, and translation. The
+ functionality offered may vary by virtual machine implementation.
+
+ The DAX is a virtual device to sun4v guests, with supported data operations indicated by the virtual device
+ compatibilty property. Functionality is accessed through the submission of Command Control Blocks
+ (CCBs) via the ccb_submit API function. The operations are processed asynchronously, with the status
+ of the submitted operations reported through a Completion Area linked to each CCB. Each CCB has a
+ separate Completion Area and, unless execution order is specifically restricted through the use of serial-
+ conditional flags, the execution order of submitted CCBs is arbitrary. Likewise, the time to completion
+ for a given CCB is never guaranteed.
+
+ Guest software may implement a software timeout on CCB operations, and if the timeout is exceeded, the
+ operation may be cancelled or killed via the ccb_kill API function. It is recommended for guest software
+ to implement a software timeout to account for certain RAS errors which may result in lost CCBs. It is
+ recommended such implementation use the ccb_info API function to check the status of a CCB prior to
+ killing it in order to determine if the CCB is still in queue, or may have been lost due to a RAS error.
+
+ There is no fixed limit on the number of outstanding CCBs guest software may have queued in the virtual
+ machine, however, internal resource limitations within the virtual machine can cause CCB submissions
+ to be temporarily rejected with EWOULDBLOCK. In such cases, guests should continue to attempt
+ submissions until they succeed; waiting for an outstanding CCB to complete is not necessary, and would
+ not be a guarantee that a future submission would succeed.
+
+ The availablility of DAX coprocessor command service is indicated by the presence of the DAX virtual
+ device node in the guest MD (Section 8.24.17, “Database Analytics Accelerators (DAX) virtual-device
+ nodeâ€).
+
+36.1.1. DAX Compatibility Property
+ The query functionality may vary based on the compatibility property of the virtual device:
+
+36.1.1.1. "ORCL,sun4v-dax" Device Compatibility
+ Available CCB commands:
+
+ • No-op/Sync
+
+ • Extract
+
+ • Scan Value
+
+ • Inverted Scan Value
+
+ • Scan Range
+
+
+ 509
+ Coprocessor services
+
+
+ • Inverted Scan Range
+
+ • Translate
+
+ • Inverted Translate
+
+ • Select
+
+ See Section 36.2.1, “Query CCB Command Formats†for the corresponding CCB input and output formats.
+
+ Only version 0 CCBs are available.
+
+36.1.1.2. "ORCL,sun4v-dax-fc" Device Compatibility
+ "ORCL,sun4v-dax-fc" is compatible with the "ORCL,sun4v-dax" interface, and includes additional CCB
+ bit fields and controls.
+
+36.1.1.3. "ORCL,sun4v-dax2" Device Compatibility
+ Available CCB commands:
+
+ • No-op/Sync
+
+ • Extract
+
+ • Scan Value
+
+ • Inverted Scan Value
+
+ • Scan Range
+
+ • Inverted Scan Range
+
+ • Translate
+
+ • Inverted Translate
+
+ • Select
+
+ See Section 36.2.1, “Query CCB Command Formats†for the corresponding CCB input and output formats.
+
+ Version 0 and 1 CCBs are available. Only version 0 CCBs may use Huffman encoded data, whereas only
+ version 1 CCBs may use OZIP.
+
+36.1.2. DAX Virtual Device Interrupts
+ The DAX virtual device has multiple interrupts associated with it which may be used by the guest if
+ desired. The number of device interrupts available to the guest is indicated in the virtual device node of the
+ guest MD (Section 8.24.17, “Database Analytics Accelerators (DAX) virtual-device nodeâ€). If the device
+ node indicates N interrupts available, the guest may use any value from 0 to N - 1 (inclusive) in a CCB
+ interrupt number field. Using values outside this range will result in the CCB being rejected for an invalid
+ field value.
+
+ The interrupts may be bound and managed using the standard sun4v device interrupts API (Chapter 16,
+ Device interrupt services). Sysino interrupts are not available for DAX devices.
+
+36.2. Coprocessor Control Block (CCB)
+ CCBs are either 64 or 128 bytes long, depending on the operation type. The exact contents of the CCB
+ are command specific, but all CCBs contain at least one memory buffer address. All memory locations
+
+
+ 510
+ Coprocessor services
+
+
+referenced by a CCB must be pinned in memory until the CCB either completes execution or is killed
+via the ccb_kill API call. Changes in virtual address mappings occurring after CCB submission are not
+guaranteed to be visible, and as such all virtual address updates need to be synchronized with CCB
+execution.
+
+All CCBs begin with a common 32-bit header.
+
+Table 36.1. CCB Header Format
+Bits Field Description
+[31:28] CCB version. For API version 2.0: set to 1 if CCB uses OZIP encoding; set to 0 if the CCB
+ uses Huffman encoding; otherwise either 0 or 1. For API version 1.0: always set to 0.
+[27] When API version 2.0 is negotiated, this is the Pipeline Flag [512]. It is reserved in
+ API version 1.0
+[26] Long CCB flag [512]
+[25] Conditional synchronization flag [512]
+[24] Serial synchronization flag
+[23:16] CCB operation code:
+ 0x00 No Operation (No-op) or Sync
+ 0x01 Extract
+ 0x02 Scan Value
+ 0x12 Inverted Scan Value
+ 0x03 Scan Range
+ 0x13 Inverted Scan Range
+ 0x04 Translate
+ 0x14 Inverted Translate
+ 0x05 Select
+[15:13] Reserved
+[12:11] Table address type
+ 0b'00 No address
+ 0b'01 Alternate context virtual address
+ 0b'10 Real address
+ 0b'11 Primary context virtual address
+[10:8] Output/Destination address type
+ 0b'000 No address
+ 0b'001 Alternate context virtual address
+ 0b'010 Real address
+ 0b'011 Primary context virtual address
+ 0b'100 Reserved
+ 0b'101 Reserved
+ 0b'110 Reserved
+ 0b'111 Reserved
+[7:5] Secondary source address type
+
+
+ 511
+ Coprocessor services
+
+
+Bits Field Description
+ 0b'000 No address
+ 0b'001 Alternate context virtual address
+ 0b'010 Real address
+ 0b'011 Primary context virtual address
+ 0b'100 Reserved
+ 0b'101 Reserved
+ 0b'110 Reserved
+ 0b'111 Reserved
+[4:2] Primary source address type
+ 0b'000 No address
+ 0b'001 Alternate context virtual address
+ 0b'010 Real address
+ 0b'011 Primary context virtual address
+ 0b'100 Reserved
+ 0b'101 Reserved
+ 0b'110 Reserved
+ 0b'111 Reserved
+[1:0] Completion area address type
+ 0b'00 No address
+ 0b'01 Alternate context virtual address
+ 0b'10 Real address
+ 0b'11 Primary context virtual address
+
+The Long CCB flag indicates whether the submitted CCB is 64 or 128 bytes long; value is 0 for 64 bytes
+and 1 for 128 bytes.
+
+The Serial and Conditional flags allow simple relative ordering between CCBs. Any CCB with the Serial
+flag set will execute sequentially relative to any previous CCB that is also marked as Serial in the same
+CCB submission. CCBs without the Serial flag set execute independently, even if they are between CCBs
+with the Serial flag set. CCBs marked solely with the Serial flag will execute upon the completion of the
+previous Serial CCB, regardless of the completion status of that CCB. The Conditional flag allows CCBs
+to conditionally execute based on the successful execution of the closest CCB marked with the Serial flag.
+A CCB may only be conditional on exactly one CCB, however, a CCB may be marked both Conditional
+and Serial to allow execution chaining. The flags do NOT allow fan-out chaining, where multiple CCBs
+execute in parallel based on the completion of another CCB.
+
+The Pipeline flag is an optimization that directs the output of one CCB (the "source" CCB) directly to
+the input of the next CCB (the "target" CCB). The target CCB thus does not need to read the input from
+memory. The Pipeline flag is advisory and may be dropped.
+
+Both the Pipeline and Serial bits must be set in the source CCB. The Conditional bit must be set in the
+target CCB. Exactly one CCB must be made conditional on the source CCB; either 0 or 2 target CCBs
+is invalid. However, Pipelines can be extended beyond two CCBs: the sequence would start with a CCB
+with both the Pipeline and Serial bits set, proceed through CCBs with the Pipeline, Serial, and Conditional
+bits set, and terminate at a CCB that has the Conditional bit set, but not the Pipeline bit.
+
+
+ 512
+ Coprocessor services
+
+
+ The input of the target CCB must start within 64 bytes of the output of the source CCB or the pipeline flag
+ will be ignored. All CCBs in a pipeline must be submitted in the same call to ccb_submit.
+
+ The various address type fields indicate how the various address values used in the CCB should be
+ interpreted by the virtual machine. Not all of the types specified are used by every CCB format. Types
+ which are not applicable to the given CCB command should be indicated as type 0 (No address). Virtual
+ addresses used in the CCB must have translation entries present in either the TLB or a configured TSB
+ for the submitting virtual processor. Virtual addresses which cannot be translated by the virtual machine
+ will result in the CCB submission being rejected, with the causal virtual address indicated. The CCB
+ may be resubmitted after inserting the translation, or the address may be translated by guest software and
+ resubmitted using the real address translation.
+
+36.2.1. Query CCB Command Formats
+36.2.1.1. Supported Data Formats, Elements Sizes and Offsets
+ Data for query commands may be encoded in multiple possible formats. The data query commands use a
+ common set of values to indicate the encoding formats of the data being processed. Some encoding formats
+ require multiple data streams for processing, requiring the specification of both primary data formats (the
+ encoded data) and secondary data streams (meta-data for the encoded data).
+
+36.2.1.1.1. Primary Input Format
+
+ The primary input format code is a 4-bit field when it is used. There are 10 primary input formats available.
+ The packed formats are not endian neutral. Code values not listed below are reserved.
+
+ Code Format Description
+ 0x0 Fixed width byte packed Up to 16 bytes
+ 0x1 Fixed width bit packed Up to 15 bits (CCB version 0) or 23 bits (CCB version
+ 1); bits are read most significant bit to least significant bit
+ within a byte
+ 0x2 Variable width byte packed Data stream of lengths must be provided as a secondary
+ input
+ 0x4 Fixed width byte packed with run Up to 16 bytes; data stream of run lengths must be
+ length encoding provided as a secondary input
+ 0x5 Fixed width bit packed with run Up to 15 bits (CCB version 0) or 23 bits (CCB version
+ length encoding 1); bits are read most significant bit to least significant bit
+ within a byte; data stream of run lengths must be provided
+ as a secondary input
+ 0x8 Fixed width byte packed with Up to 16 bytes before the encoding; compressed stream
+ Huffman (CCB version 0) or bits are read most significant bit to least significant bit
+ OZIP (CCB version 1) encoding within a byte; pointer to the encoding table must be
+ provided
+ 0x9 Fixed width bit packed with Up to 15 bits (CCB version 0) or 23 bits (CCB version
+ Huffman (CCB version 0) or 1); compressed stream bits are read most significant bit to
+ OZIP (CCB version 1) encoding least significant bit within a byte; pointer to the encoding
+ table must be provided
+ 0xA Variable width byte packed with Up to 16 bytes before the encoding; compressed stream
+ Huffman (CCB version 0) or bits are read most significant bit to least significant bit
+ OZIP (CCB version 1) encoding within a byte; data stream of lengths must be provided as
+ a secondary input; pointer to the encoding table must be
+ provided
+
+
+ 513
+ Coprocessor services
+
+
+ Code Format Description
+ 0xC Fixed width byte packed with Up to 16 bytes before the encoding; compressed stream
+ run length encoding, followed by bits are read most significant bit to least significant bit
+ Huffman (CCB version 0) or within a byte; data stream of run lengths must be provided
+ OZIP (CCB version 1) encoding as a secondary input; pointer to the encoding table must
+ be provided
+ 0xD Fixed width bit packed with Up to 15 bits (CCB version 0) or 23 bits(CCB version 1)
+ run length encoding, followed by before the encoding; compressed stream bits are read most
+ Huffman (CCB version 0) or significant bit to least significant bit within a byte; data
+ OZIP (CCB version 1) encoding stream of run lengths must be provided as a secondary
+ input; pointer to the encoding table must be provided
+
+ If OZIP encoding is used, there must be no reserved bytes in the table.
+
+36.2.1.1.2. Primary Input Element Size
+
+ For primary input data streams with fixed size elements, the element size must be indicated in the CCB
+ command. The size is encoded as the number of bits or bytes, minus one. The valid value range for this
+ field depends on the input format selected, as listed in the table above.
+
+36.2.1.1.3. Secondary Input Format
+
+ For primary input data streams which require a secondary input stream, the secondary input stream is
+ always encoded in a fixed width, bit-packed format. The bits are read from most significant bit to least
+ significant bit within a byte. There are two encoding options for the secondary input stream data elements,
+ depending on whether the value of 0 is needed:
+
+ Secondary Input Description
+ Format Code
+ 0 Element is stored as value minus 1 (0 evalutes to 1, 1 evalutes
+ to 2, etc)
+ 1 Element is stored as value
+
+36.2.1.1.4. Secondary Input Element Size
+
+ Secondary input element size is encoded as a two bit field:
+
+ Secondary Input Size Description
+ Code
+ 0x0 1 bit
+ 0x1 2 bits
+ 0x2 4 bits
+ 0x3 8 bits
+
+36.2.1.1.5. Input Element Offsets
+
+ Bit-wise input data streams may have any alignment within the base addressed byte. The offset, specified
+ from most significant bit to least significant bit, is provided as a fixed 3 bit field for each input type. A
+ value of 0 indicates that the first input element begins at the most significant bit in the first byte, and a
+ value of 7 indicates it begins with the least significant bit.
+
+ This field should be zero for any byte-wise primary input data streams.
+
+
+ 514
+ Coprocessor services
+
+
+36.2.1.1.6. Output Format
+
+ Query commands support multiple sizes and encodings for output data streams. There are four possible
+ output encodings, and up to four supported element sizes per encoding. Not all output encodings are
+ supported for every command. The format is indicated by a 4-bit field in the CCB:
+
+ Output Format Code Description
+ 0x0 Byte aligned, 1 byte elements
+ 0x1 Byte aligned, 2 byte elements
+ 0x2 Byte aligned, 4 byte elements
+ 0x3 Byte aligned, 8 byte elements
+ 0x4 16 byte aligned, 16 byte elements
+ 0x5 Reserved
+ 0x6 Reserved
+ 0x7 Reserved
+ 0x8 Packed vector of single bit elements
+ 0x9 Reserved
+ 0xA Reserved
+ 0xB Reserved
+ 0xC Reserved
+ 0xD 2 byte elements where each element is the index value of a bit,
+ from an bit vector, which was 1.
+ 0xE 4 byte elements where each element is the index value of a bit,
+ from an bit vector, which was 1.
+ 0xF Reserved
+
+36.2.1.1.7. Application Data Integrity (ADI)
+
+ On platforms which support ADI, the ADI version number may be specified for each separate memory
+ access type used in the CCB command. ADI checking only occurs when reading data. When writing data,
+ the specified ADI version number overwrites any existing ADI value in memory.
+
+ An ADI version value of 0 or 0xF indicates the ADI checking is disabled for that data access, even if it is
+ enabled in memory. By setting the appropriate flag in CCB_SUBMIT (Section 36.3.1, “ccb_submitâ€) it is
+ also an option to disable ADI checking for all inputs accessed via virtual address for all CCBs submitted
+ during that hypercall invocation.
+
+ The ADI value is only guaranteed to be checked on the first 64 bytes of each data access. Mismatches on
+ subsequent data chunks may not be detected, so guest software should be careful to use page size checking
+ to protect against buffer overruns.
+
+36.2.1.1.8. Page size checking
+
+ All data accesses used in CCB commands must be bounded within a single memory page. When addresses
+ are provided using a virtual address, the page size for checking is extracted from the TTE for that virtual
+ address. When using real addresses, the guest must supply the page size in the same field as the address
+ value. The page size must be one of the sizes supported by the underlying virtual machine. Using a value
+ that is not supported may result in the CCB submission being rejected or the generation of a CCB parsing
+ error in the completion area.
+
+
+ 515
+ Coprocessor services
+
+
+36.2.1.2. Extract command
+
+ Converts an input vector in one format to an output vector in another format. All input format types are
+ supported.
+
+ The only supported output format is a padded, byte-aligned output stream, using output codes 0x0 - 0x4.
+ When the specified output element size is larger than the extracted input element size, zeros are padded to
+ the extracted input element. First, if the decompressed input size is not a whole number of bytes, 0 bits are
+ padded to the most significant bit side till the next byte boundary. Next, if the output element size is larger
+ than the byte padded input element, bytes of value 0 are added based on the Padding Direction bit in the
+ CCB. If the output element size is smaller than the byte-padded input element size, the input element is
+ truncated by dropped from the least significant byte side until the selected output size is reached.
+
+ The return value of the CCB completion area is invalid. The “number of elements processed†field in the
+ CCB completion area will be valid.
+
+ The extract CCB is a 64-byte “short format†CCB.
+
+ The extract CCB command format can be specified by the following packed C structure for a big-endian
+ machine:
+
+
+ struct extract_ccb {
+ uint32_t header;
+ uint32_t control;
+ uint64_t completion;
+ uint64_t primary_input;
+ uint64_t data_access_control;
+ uint64_t secondary_input;
+ uint64_t reserved;
+ uint64_t output;
+ uint64_t table;
+ };
+
+
+ The exact field offsets, sizes, and composition are as follows:
+
+ Offset Size Field Description
+ 0 4 CCB header (Table 36.1, “CCB Header Formatâ€)
+ 4 4 Command control
+ Bits Field Description
+ [31:28] Primary Input Format (see Section 36.2.1.1.1, “Primary Input
+ Formatâ€)
+ [27:23] Primary Input Element Size (see Section 36.2.1.1.2, “Primary
+ Input Element Sizeâ€)
+ [22:20] Primary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [19] Secondary Input Format (see Section 36.2.1.1.3, “Secondary
+ Input Formatâ€)
+ [18:16] Secondary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+
+
+ 516
+ Coprocessor services
+
+
+Offset Size Field Description
+ Bits Field Description
+ [15:14] Secondary Input Element Size (see Section 36.2.1.1.4,
+ “Secondary Input Element Sizeâ€
+ [13:10] Output Format (see Section 36.2.1.1.6, “Output Formatâ€)
+ [9] Padding Direction selector: A value of 1 causes padding bytes
+ to be added to the left side of output elements. A value of 0
+ causes padding bytes to be added to the right side of output
+ elements.
+ [8:0] Reserved
+8 8 Completion
+ Bits Field Description
+ [63:60] ADI version (see Section 36.2.1.1.7, “Application Data
+ Integrity (ADI)â€)
+ [59] If set to 1, a virtual device interrupt will be generated using
+ the device interrupt number specified in the lower bits of this
+ completion word. If 0, the lower bits of this completion word
+ are ignored.
+ [58:6] Completion area address bits [58:6]. Address type is
+ determined by CCB header.
+ [5:0] Virtual device interrupt number for completion interrupt, if
+ enabled.
+16 8 Primary Input
+ Bits Field Description
+ [63:60] ADI version (see Section 36.2.1.1.7, “Application Data
+ Integrity (ADI)â€)
+ [59:56] If using real address, these bits should be filled in with the
+ page size code for the page boundary checking the guest wants
+ the virtual machine to use when accessing this data stream
+ (checking is only guaranteed to be performed when using API
+ version 1.1 and later). If using a virtual address, this field will
+ be used as as primary input address bits [59:56].
+ [55:0] Primary input address bits [55:0]. Address type is determined
+ by CCB header.
+24 8 Data Access Control
+ Bits Field Description
+ [63:62] Flow Control
+ Value Description
+ 0b'00 Disable flow control
+ 0b'01 Enable flow control (only valid with "ORCL,sun4v-
+ dax-fc" compatible virtual device variants)
+ 0b'10 Reserved
+ 0b'11 Reserved
+ [61:60] Reserved (API 1.0)
+
+
+ 517
+ Coprocessor services
+
+
+Offset Size Field Description
+ Bits Field Description
+ Pipeline target (API 2.0)
+ Value Description
+ 0b'00 Connect to primary input
+ 0b'01 Connect to secondary input
+ 0b'10 Reserved
+ 0b'11 Reserved
+ [59:40] Output buffer size given in units of 64 bytes, minus 1. Value of
+ 0 means 64 bytes, value of 1 means 128 bytes, etc. Buffer size is
+ only enforced if flow control is enabled in Flow Control field.
+ [39:32] Reserved
+ [31:30] Output Data Cache Allocation
+ Value Description
+ 0b'00 Do not allocate cache lines for output data stream.
+ 0b'01 Force cache lines for output data stream to be
+ allocated in the cache that is local to the submitting
+ virtual cpu.
+ 0b'10 Allocate cache lines for output data stream, but allow
+ existing cache lines associated with the data to remain
+ in their current cache instance. Any memory not
+ already in cache will be allocated in the cache local
+ to the submitting virtual cpu.
+ 0b'11 Reserved
+ [29:26] Reserved
+ [25:24] Primary Input Length Format
+ Value Description
+ 0b'00 Number of primary symbols
+ 0b'01 Number of primary bytes
+ 0b'10 Number of primary bits
+ 0b'11 Reserved
+ [23:0] Primary Input Length
+ Format Field Value
+ # of primary symbols Number of input elements to process,
+ minus 1. Command execution stops
+ once count is reached.
+ # of primary bytes Number of input bytes to process,
+ minus 1. Command execution stops
+ once count is reached. The count is
+ done before any decompression or
+ decoding.
+ # of primary bits Number of input bits to process,
+ minus 1. Command execution stops
+
+
+
+ 518
+ Coprocessor services
+
+
+ Offset Size Field Description
+ Bits Field Description
+ Format Field Value
+ once count is reached. The count is
+ done before any decompression or
+ decoding, and does not include any
+ bits skipped by the Primary Input
+ Offset field value of the command
+ control word.
+ 32 8 Secondary Input, if used by Primary Input Format. Same fields as Primary
+ Input.
+ 40 8 Reserved
+ 48 8 Output (same fields as Primary Input)
+ 56 8 Symbol Table (if used by Primary Input)
+ Bits Field Description
+ [63:60] ADI version (see Section 36.2.1.1.7, “Application Data
+ Integrity (ADI)â€)
+ [59:56] If using real address, these bits should be filled in with the
+ page size code for the page boundary checking the guest wants
+ the virtual machine to use when accessing this data stream
+ (checking is only guaranteed to be performed when using API
+ version 1.1 and later). If using a virtual address, this field will
+ be used as as symbol table address bits [59:56].
+ [55:4] Symbol table address bits [55:4]. Address type is determined
+ by CCB header.
+ [3:0] Symbol table version
+ Value Description
+ 0 Huffman encoding. Must use 64 byte aligned table
+ address. (Only available when using version 0 CCBs)
+ 1 OZIP encoding. Must use 16 byte aligned table
+ address. (Only available when using version 1 CCBs)
+
+
+36.2.1.3. Scan commands
+
+ The scan commands search a stream of input data elements for values which match the selection criteria.
+ All the input format types are supported. There are multiple formats for the scan commands, allowing the
+ scan to search for exact matches to one value, exact matches to either of two values, or any value within
+ a specified range. The specific type of scan is indicated by the command code in the CCB header. For the
+ scan range commands, the boundary conditions can be specified as greater-than-or-equal-to a value, less-
+ than-or-equal-to a value, or both by using two boundary values.
+
+ There are two supported formats for the output stream: the bit vector and index array formats (codes 0x8,
+ 0xD, and 0xE). For the standard scan command using the bit vector output, for each input element there
+ exists one bit in the vector that is set if the input element matched the scan criteria, or clear if not. The
+ inverted scan command inverts the polarity of the bits in the output. The most significant bit of the first
+ byte of the output stream corresponds to the first element in the input stream. The standard index array
+ output format contains one array entry for each input element that matched the scan criteria. Each array
+
+
+
+ 519
+ Coprocessor services
+
+
+entry is the index of an input element that matched the scan criteria. An inverted scan command produces
+a similar array, but of all the input elements which did NOT match the scan criteria.
+
+The return value of the CCB completion area contains the number of input elements found which match
+the scan criteria (or number that did not match for the inverted scans). The “number of elements processedâ€
+field in the CCB completion area will be valid, indicating the number of input elements processed.
+
+These commands are 128-byte “long format†CCBs.
+
+The scan CCB command format can be specified by the following packed C structure for a big-endian
+machine:
+
+
+ struct scan_ccb {
+ uint32_t header;
+ uint32_t control;
+ uint64_t completion;
+ uint64_t primary_input;
+ uint64_t data_access_control;
+ uint64_t secondary_input;
+ uint64_t match_criteria0;
+ uint64_t output;
+ uint64_t table;
+ uint64_t match_criteria1;
+ uint64_t match_criteria2;
+ uint64_t match_criteria3;
+ uint64_t reserved[5];
+ };
+
+
+The exact field offsets, sizes, and composition are as follows:
+
+Offset Size Field Description
+0 4 CCB header (Table 36.1, “CCB Header Formatâ€)
+4 4 Command control
+ Bits Field Description
+ [31:28] Primary Input Format (see Section 36.2.1.1.1, “Primary Input
+ Formatâ€)
+ [27:23] Primary Input Element Size (see Section 36.2.1.1.2, “Primary
+ Input Element Sizeâ€)
+ [22:20] Primary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [19] Secondary Input Format (see Section 36.2.1.1.3, “Secondary
+ Input Formatâ€)
+ [18:16] Secondary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [15:14] Secondary Input Element Size (see Section 36.2.1.1.4,
+ “Secondary Input Element Sizeâ€
+ [13:10] Output Format (see Section 36.2.1.1.6, “Output Formatâ€)
+ [9:5] Operand size for first scan criteria value. In a scan value
+ operation, this is one of two potential extact match values.
+ In a scan range operation, this is the size of the upper range
+
+
+ 520
+ Coprocessor services
+
+
+Offset Size Field Description
+ Bits Field Description
+ boundary. The value of this field is the number of bytes in the
+ operand, minus 1. Values 0xF-0x1E are reserved. A value of
+ 0x1F indicates this operand is not in use for this scan operation.
+ [4:0] Operand size for second scan criteria value. In a scan value
+ operation, this is one of two potential extact match values.
+ In a scan range operation, this is the size of the lower range
+ boundary. The value of this field is the number of bytes in the
+ operand, minus 1. Values 0xF-0x1E are reserved. A value of
+ 0x1F indicates this operand is not in use for this scan operation.
+8 8 Completion (same fields as Section 36.2.1.2, “Extract commandâ€)
+16 8 Primary Input (same fields as Section 36.2.1.2, “Extract commandâ€)
+24 8 Data Access Control (same fields as Section 36.2.1.2, “Extract commandâ€)
+32 8 Secondary Input, if used by Primary Input Format. Same fields as Primary
+ Input.
+40 4 Most significant 4 bytes of first scan criteria operand. If first operand is less
+ than 4 bytes, the value is left-aligned to the lowest address bytes.
+44 4 Most significant 4 bytes of second scan criteria operand. If second operand
+ is less than 4 bytes, the value is left-aligned to the lowest address bytes.
+48 8 Output (same fields as Primary Input)
+56 8 Symbol Table (if used by Primary Input). Same fields as Section 36.2.1.2,
+ “Extract commandâ€
+64 4 Next 4 most significant bytes of first scan criteria operand occuring after the
+ bytes specified at offset 40, if needed by the operand size. If first operand
+ is less than 8 bytes, the valid bytes are left-aligned to the lowest address.
+68 4 Next 4 most significant bytes of second scan criteria operand occuring after
+ the bytes specified at offset 44, if needed by the operand size. If second
+ operand is less than 8 bytes, the valid bytes are left-aligned to the lowest
+ address.
+72 4 Next 4 most significant bytes of first scan criteria operand occuring after the
+ bytes specified at offset 64, if needed by the operand size. If first operand
+ is less than 12 bytes, the valid bytes are left-aligned to the lowest address.
+76 4 Next 4 most significant bytes of second scan criteria operand occuring after
+ the bytes specified at offset 68, if needed by the operand size. If second
+ operand is less than 12 bytes, the valid bytes are left-aligned to the lowest
+ address.
+80 4 Next 4 most significant bytes of first scan criteria operand occuring after the
+ bytes specified at offset 72, if needed by the operand size. If first operand
+ is less than 16 bytes, the valid bytes are left-aligned to the lowest address.
+84 4 Next 4 most significant bytes of second scan criteria operand occuring after
+ the bytes specified at offset 76, if needed by the operand size. If second
+ operand is less than 16 bytes, the valid bytes are left-aligned to the lowest
+ address.
+
+
+
+
+ 521
+ Coprocessor services
+
+
+36.2.1.4. Translate commands
+
+ The translate commands takes an input array of indicies, and a table of single bit values indexed by those
+ indicies, and outputs a bit vector or index array created by reading the tables bit value at each index in
+ the input array. The output should therefore contain exactly one bit per index in the input data stream,
+ when outputing as a bit vector. When outputing as an index array, the number of elements depends on the
+ values read in the bit table, but will always be less than, or equal to, the number of input elements. Only
+ a restricted subset of the possible input format types are supported. No variable width or Huffman/OZIP
+ encoded input streams are allowed. The primary input data element size must be 3 bytes or less.
+
+ The maximum table index size allowed is 15 bits, however, larger input elements may be used to provide
+ additional processing of the output values. If 2 or 3 byte values are used, the least significant 15 bits are
+ used as an index into the bit table. The most significant 9 bits (when using 3-byte input elements) or single
+ bit (when using 2-byte input elements) are compared against a fixed 9-bit test value provided in the CCB.
+ If the values match, the value from the bit table is used as the output element value. If the values do not
+ match, the output data element value is forced to 0.
+
+ In the inverted translate operation, the bit value read from bit table is inverted prior to its use. The additional
+ additional processing based on any additional non-index bits remains unchanged, and still forces the output
+ element value to 0 on a mismatch. The specific type of translate command is indicated by the command
+ code in the CCB header.
+
+ There are two supported formats for the output stream: the bit vector and index array formats (codes 0x8,
+ 0xD, and 0xE). The index array format is an array of indicies of bits which would have been set if the
+ output format was a bit array.
+
+ The return value of the CCB completion area contains the number of bits set in the output bit vector,
+ or number of elements in the output index array. The “number of elements processed†field in the CCB
+ completion area will be valid, indicating the number of input elements processed.
+
+ These commands are 64-byte “short format†CCBs.
+
+ The translate CCB command format can be specified by the following packed C structure for a big-endian
+ machine:
+
+
+ struct translate_ccb {
+ uint32_t header;
+ uint32_t control;
+ uint64_t completion;
+ uint64_t primary_input;
+ uint64_t data_access_control;
+ uint64_t secondary_input;
+ uint64_t reserved;
+ uint64_t output;
+ uint64_t table;
+ };
+
+
+ The exact field offsets, sizes, and composition are as follows:
+
+
+ Offset Size Field Description
+ 0 4 CCB header (Table 36.1, “CCB Header Formatâ€)
+
+
+ 522
+ Coprocessor services
+
+
+Offset Size Field Description
+4 4 Command control
+ Bits Field Description
+ [31:28] Primary Input Format (see Section 36.2.1.1.1, “Primary Input
+ Formatâ€)
+ [27:23] Primary Input Element Size (see Section 36.2.1.1.2, “Primary
+ Input Element Sizeâ€)
+ [22:20] Primary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [19] Secondary Input Format (see Section 36.2.1.1.3, “Secondary
+ Input Formatâ€)
+ [18:16] Secondary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [15:14] Secondary Input Element Size (see Section 36.2.1.1.4,
+ “Secondary Input Element Sizeâ€
+ [13:10] Output Format (see Section 36.2.1.1.6, “Output Formatâ€)
+ [9] Reserved
+ [8:0] Test value used for comparison against the most significant bits
+ in the input values, when using 2 or 3 byte input elements.
+8 8 Completion (same fields as Section 36.2.1.2, “Extract commandâ€
+16 8 Primary Input (same fields as Section 36.2.1.2, “Extract commandâ€
+24 8 Data Access Control (same fields as Section 36.2.1.2, “Extract commandâ€,
+ except Primary Input Length Format may not use the 0x0 value)
+32 8 Secondary Input, if used by Primary Input Format. Same fields as Primary
+ Input.
+40 8 Reserved
+48 8 Output (same fields as Primary Input)
+56 8 Bit Table
+ Bits Field Description
+ [63:60] ADI version (see Section 36.2.1.1.7, “Application Data
+ Integrity (ADI)â€)
+ [59:56] If using real address, these bits should be filled in with the
+ page size code for the page boundary checking the guest wants
+ the virtual machine to use when accessing this data stream
+ (checking is only guaranteed to be performed when using API
+ version 1.1 and later). If using a virtual address, this field will
+ be used as as bit table address bits [59:56]
+ [55:4] Bit table address bits [55:4]. Address type is determined by
+ CCB header. Address must be 64-byte aligned (CCB version
+ 0) or 16-byte aligned (CCB version 1).
+ [3:0] Bit table version
+ Value Description
+ 0 4KB table size
+ 1 8KB table size
+
+
+
+ 523
+ Coprocessor services
+
+
+36.2.1.5. Select command
+ The select command filters the primary input data stream by using a secondary input bit vector to determine
+ which input elements to include in the output. For each bit set at a given index N within the bit vector,
+ the Nth input element is included in the output. If the bit is not set, the element is not included. Only a
+ restricted subset of the possible input format types are supported. No variable width or run length encoded
+ input streams are allowed, since the secondary input stream is used for the filtering bit vector.
+
+ The only supported output format is a padded, byte-aligned output stream. The stream follows the same
+ rules and restrictions as padded output stream described in Section 36.2.1.2, “Extract commandâ€.
+
+ The return value of the CCB completion area contains the number of bits set in the input bit vector. The
+ "number of elements processed" field in the CCB completion area will be valid, indicating the number
+ of input elements processed.
+
+ The select CCB is a 64-byte “short format†CCB.
+
+ The select CCB command format can be specified by the following packed C structure for a big-endian
+ machine:
+
+
+ struct select_ccb {
+ uint32_t header;
+ uint32_t control;
+ uint64_t completion;
+ uint64_t primary_input;
+ uint64_t data_access_control;
+ uint64_t secondary_input;
+ uint64_t reserved;
+ uint64_t output;
+ uint64_t table;
+ };
+
+
+ The exact field offsets, sizes, and composition are as follows:
+
+ Offset Size Field Description
+ 0 4 CCB header (Table 36.1, “CCB Header Formatâ€)
+ 4 4 Command control
+ Bits Field Description
+ [31:28] Primary Input Format (see Section 36.2.1.1.1, “Primary Input
+ Formatâ€)
+ [27:23] Primary Input Element Size (see Section 36.2.1.1.2, “Primary
+ Input Element Sizeâ€)
+ [22:20] Primary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [19] Secondary Input Format (see Section 36.2.1.1.3, “Secondary
+ Input Formatâ€)
+ [18:16] Secondary Input Starting Offset (see Section 36.2.1.1.5, “Input
+ Element Offsetsâ€)
+ [15:14] Secondary Input Element Size (see Section 36.2.1.1.4,
+ “Secondary Input Element Sizeâ€
+
+
+ 524
+ Coprocessor services
+
+
+ Offset Size Field Description
+ Bits Field Description
+ [13:10] Output Format (see Section 36.2.1.1.6, “Output Formatâ€)
+ [9] Padding Direction selector: A value of 1 causes padding bytes
+ to be added to the left side of output elements. A value of 0
+ causes padding bytes to be added to the right side of output
+ elements.
+ [8:0] Reserved
+ 8 8 Completion (same fields as Section 36.2.1.2, “Extract commandâ€
+ 16 8 Primary Input (same fields as Section 36.2.1.2, “Extract commandâ€
+ 24 8 Data Access Control (same fields as Section 36.2.1.2, “Extract commandâ€)
+ 32 8 Secondary Bit Vector Input. Same fields as Primary Input.
+ 40 8 Reserved
+ 48 8 Output (same fields as Primary Input)
+ 56 8 Symbol Table (if used by Primary Input). Same fields as Section 36.2.1.2,
+ “Extract commandâ€
+
+36.2.1.6. No-op and Sync commands
+ The no-op (no operation) command is a CCB which has no processing effect. The CCB, when processed
+ by the virtual machine, simply updates the completion area with its execution status. The CCB may have
+ the serial-conditional flags set in order to restrict when it executes.
+
+ The sync command is a variant of the no-op command which with restricted execution timing. A sync
+ command CCB will only execute when all previous commands submitted in the same request have
+ completed. This is stronger than the conditional flag sequencing, which is only dependent on a single
+ previous serial CCB. While the relative ordering is guaranteed, virtual machine implementations with
+ shared hardware resources may cause the sync command to wait for longer than the minimum required
+ time.
+
+ The return value of the CCB completion area is invalid for these CCBs. The “number of elements
+ processed†field is also invalid for these CCBs.
+
+ These commands are 64-byte “short format†CCBs.
+
+ The no-op CCB command format can be specified by the following packed C structure for a big-endian
+ machine:
+
+
+ struct nop_ccb {
+ uint32_t header;
+ uint32_t control;
+ uint64_t completion;
+ uint64_t reserved[6];
+ };
+
+
+ The exact field offsets, sizes, and composition are as follows:
+
+ Offset Size Field Description
+ 0 4 CCB header (Table 36.1, “CCB Header Formatâ€)
+
+
+ 525
+ Coprocessor services
+
+
+ Offset Size Field Description
+ 4 4 Command control
+ Bits Field Description
+ [31] If set, this CCB functions as a Sync command. If clear, this
+ CCB functions as a No-op command.
+ [30:0] Reserved
+ 8 8 Completion (same fields as Section 36.2.1.2, “Extract commandâ€
+ 16 46 Reserved
+
+36.2.2. CCB Completion Area
+ All CCB commands use a common 128-byte Completion Area format, which can be specified by the
+ following packed C structure for a big-endian machine:
+
+
+ struct completion_area {
+ uint8_t status_flag;
+ uint8_t error_note;
+ uint8_t rsvd0[2];
+ uint32_t error_values;
+ uint32_t output_size;
+ uint32_t rsvd1;
+ uint64_t run_time;
+ uint64_t run_stats;
+ uint32_t elements;
+ uint8_t rsvd2[20];
+ uint64_t return_value;
+ uint64_t extra_return_value[8];
+ };
+
+
+ The Completion Area must be a 128-byte aligned memory location. The exact layout can be described
+ using byte offsets and sizes relative to the memory base:
+
+ Offset Size Field Description
+ 0 1 CCB execution status
+ 0x0 Command not yet completed
+ 0x1 Command ran and succeeded
+ 0x2 Command ran and failed (partial results may be been
+ produced)
+ 0x3 Command ran and was killed (partial execution may
+ have occurred)
+ 0x4 Command was not run
+ 0x5-0xF Reserved
+ 1 1 Error reason code
+ 0x0 Reserved
+ 0x1 Buffer overflow
+
+
+ 526
+ Coprocessor services
+
+
+Offset Size Field Description
+ 0x2 CCB decoding error
+ 0x3 Page overflow
+ 0x4-0x6 Reserved
+ 0x7 Command was killed
+ 0x8 Command execution timeout
+ 0x9 ADI miscompare error
+ 0xA Data format error
+ 0xB-0xD Reserved
+ 0xE Unexpected hardware error (Do not retry)
+ 0xF Unexpected hardware error (Retry is ok)
+ 0x10-0x7F Reserved
+ 0x80 Partial Symbol Warning
+ 0x81-0xFF Reserved
+2 2 Reserved
+4 4 If a partial symbol warning was generated, this field contains the number
+ of remaining bits which were not decoded.
+8 4 Number of bytes of output produced
+12 4 Reserved
+16 8 Runtime of command (unspecified time units)
+24 8 Reserved
+32 4 Number of elements processed
+36 20 Reserved
+56 8 Return value
+64 64 Extended return value
+
+The CCB completion area should be treated as read-only by guest software. The CCB execution status
+byte will be cleared by the Hypervisor to reflect the pending execution status when the CCB is submitted
+successfully. All other fields are considered invalid upon CCB submission until the CCB execution status
+byte becomes non-zero.
+
+CCBs which complete with status 0x2 or 0x3 may produce partial results and/or side effects due to partial
+execution of the CCB command. Some valid data may be accessible depending on the fault type, however,
+it is recommended that guest software treat the destination buffer as being in an unknown state. If a CCB
+completes with a status byte of 0x2, the error reason code byte can be read to determine what corrective
+action should be taken.
+
+A buffer overflow indicates that the results of the operation exceeded the size of the output buffer indicated
+in the CCB. The operation can be retried by resubmitting the CCB with a larger output buffer.
+
+A CCB decoding error indicates that the CCB contained some invalid field values. It may be also be
+triggered if the CCB output is directed at a non-existent secondary input and the pipelining hint is followed.
+
+A page overflow error indicates that the operation required accessing a memory location beyond the page
+size associated with a given address. No data will have been read or written past the page boundary, but
+partial results may have been written to the destination buffer. The CCB can be resubmitted with a larger
+page size memory allocation to complete the operation.
+
+
+ 527
+ Coprocessor services
+
+
+ In the case of pipelined CCBs, a page overflow error will be triggered if the output from the pipeline source
+ CCB ends before the input of the pipeline target CCB. Page boundaries are ignored when the pipeline
+ hint is followed.
+
+ Command kill indicates that the CCB execution was halted or prevented by use of the ccb_kill API call.
+
+ Command timeout indicates that the CCB execution began, but did not complete within a pre-determined
+ limit set by the virtual machine. The command may have produced some or no output. The CCB may be
+ resubmitted with no alterations.
+
+ ADI miscompare indicates that the memory buffer version specified in the CCB did not match the value
+ in memory when accessed by the virtual machine. Guest software should not attempt to resubmit the CCB
+ without determining the cause of the version mismatch.
+
+ A data format error indicates that the input data stream did not follow the specified data input formatting
+ selected in the CCB.
+
+ Some CCBs which encounter hardware errors may be resubmitted without change. Persistent hardware
+ errors may result in multiple failures until RAS software can identify and isolate the faulty component.
+
+ The output size field indicates the number of bytes of valid output in the destination buffer. This field is
+ not valid for all possible CCB commands.
+
+ The runtime field indicates the execution time of the CCB command once it leaves the internal virtual
+ machine queue. The time units are fixed, but unspecified, allowing only relative timing comparisons
+ by guest software. The time units may also vary by hardware platform, and should not be construed to
+ represent any absolute time value.
+
+ Some data query commands process data in units of elements. If applicable to the command, the number of
+ elements processed is indicated in the listed field. This field is not valid for all possible CCB commands.
+
+ The return value and extended return value fields are output locations for commands which do not use
+ a destination output buffer, or have secondary return results. The field is not valid for all possible CCB
+ commands.
+
+36.3. Hypervisor API Functions
+36.3.1. ccb_submit
+ trap# FAST_TRAP
+ function# CCB_SUBMIT
+ arg0 address
+ arg1 length
+ arg2 flags
+ arg3 reserved
+ ret0 status
+ ret1 length
+ ret2 status data
+ ret3 reserved
+
+ Submit one or more coprocessor control blocks (CCBs) for evaluation and processing by the virtual
+ machine. The CCBs are passed in a linear array indicated by address. length indicates the size of
+ the array in bytes.
+
+
+ 528
+ Coprocessor services
+
+
+The address should be aligned to the size indicated by length, rounded up to the nearest power of
+two. Virtual machines implementations may reject submissions which do not adhere to that alignment.
+length must be a multiple of 64 bytes. If length is zero, the maximum supported array length will be
+returned as length in ret1. In all other cases, the length value in ret1 will reflect the number of bytes
+successfully consumed from the input CCB array.
+
+ Implementation note
+ Virtual machines should never reject submissions based on the alignment of address if the
+ entire array is contained within a single memory page of the smallest page size supported by the
+ virtual machine.
+
+A guest may choose to submit addresses used in this API function, including the CCB array address,
+as either a real or virtual addresses, with the type of each address indicated in flags. Virtual addresses
+must be present in either the TLB or an active TSB to be processed. The translation context for virtual
+addresses is determined by a combination of CCB contents and the flags argument.
+
+The flags argument is divided into multiple fields defined as follows:
+
+
+Bits Field Description
+[63:16] Reserved
+[15] Disable ADI for VA reads (in API 2.0)
+ Reserved (in API 1.0)
+[14] Virtual addresses within CCBs are translated in privileged context
+[13:12] Alternate translation context for virtual addresses within CCBs:
+ 0b'00 CCBs requesting alternate context are rejected
+ 0b'01 Reserved
+ 0b'10 CCBs requesting alternate context use secondary context
+ 0b'11 CCBs requesting alternate context use nucleus context
+[11:9] Reserved
+[8] Queue info flag
+[7] All-or-nothing flag
+[6] If address is a virtual address, treat its translation context as privileged
+[5:4] Address type of address:
+ 0b'00 Real address
+ 0b'01 Virtual address in primary context
+ 0b'10 Virtual address in secondary context
+ 0b'11 Virtual address in nucleus context
+[3:2] Reserved
+[1:0] CCB command type:
+ 0b'00 Reserved
+ 0b'01 Reserved
+ 0b'10 Query command
+ 0b'11 Reserved
+
+
+
+ 529
+ Coprocessor services
+
+
+ The CCB submission type and address type for the CCB array must be provided in the flags argument.
+ All other fields are optional values which change the default behavior of the CCB processing.
+
+ When set to one, the "Disable ADI for VA reads" bit will turn off ADI checking when using a virtual
+ address to load data. ADI checking will still be done when loading real-addressed memory. This bit is only
+ available when using major version 2 of the coprocessor API group; at major version 1 it is reserved. For
+ more information about using ADI and DAX, see Section 36.2.1.1.7, “Application Data Integrity (ADI)â€.
+
+ By default, all virtual addresses are treated as user addresses. If the virtual address translations are
+ privileged, they must be marked as such in the appropriate flags field. The virtual addresses used within
+ the submitted CCBs must all be translated with the same privilege level.
+
+ By default, all virtual addresses used within the submitted CCBs are translated using the primary context
+ active at the time of the submission. The address type field within a CCB allows each address to request
+ translation in an alternate address context. The address context used when the alternate address context is
+ requested is selected in the flags argument.
+
+ The all-or-nothing flag specifies whether the virtual machine should allow partial submissions of the
+ input CCB array. When using CCBs with serial-conditional flags, it is strongly recommended to use
+ the all-or-nothing flag to avoid broken conditional chains. Using long CCB chains on a machine under
+ high coprocessor load may make this impractical, however, and require submitting without the flag.
+ When submitting serial-conditional CCBs without the all-or-nothing flag, guest software must manually
+ implement the serial-conditional behavior at any point where the chain was not submitted in a single API
+ call, and resubmission of the remaining CCBs should clear any conditional flag that might be set in the
+ first remaining CCB. Failure to do so will produce indeterminate CCB execution status and ordering.
+
+ When the all-or-nothing flag is not specified, callers should check the value of length in ret1 to determine
+ how many CCBs from the array were successfully submitted. Any remaining CCBs can be resubmitted
+ without modifications.
+
+ The value of length in ret1 is also valid when the API call returns an error, and callers should always
+ check its value to determine which CCBs in the array were already processed. This will additionally
+ identify which CCB encountered the processing error, and was not submitted successfully.
+
+ If the queue info flag is used during submission, and at least one CCB was successfully submitted, the
+ length value in ret1 will be a multi-field value defined as follows:
+ Bits Field Description
+ [63:48] DAX unit instance identifier
+ [47:32] DAX queue instance identifier
+ [31:16] Reserved
+ [15:0] Number of CCB bytes successfully submitted
+
+ The value of status data depends on the status value. See error status code descriptions for details.
+ The value is undefined for status values that do not specifically list a value for the status data.
+
+ The API has a reserved input and output register which will be used in subsequent minor versions of this
+ API function. Guest software implementations should treat that register as voltile across the function call
+ in order to maintain forward compatibility.
+
+36.3.1.1. Errors
+ EOK One or more CCBs have been accepted and enqueued in the virtual machine
+ and no errors were been encountered during submission. Some submitted
+ CCBs may not have been enqueued due to internal virtual machine limitations,
+ and may be resubmitted without changes.
+
+
+ 530
+ Coprocessor services
+
+
+EWOULDBLOCK An internal resource conflict within the virtual machine has prevented it from
+ being able to complete the CCB submissions sufficiently quickly, requiring
+ it to abandon processing before it was complete. Some CCBs may have been
+ successfully enqueued prior to the block, and all remaining CCBs may be
+ resubmitted without changes.
+EBADALIGN CCB array is not on a 64-byte boundary, or the array length is not a multiple
+ of 64 bytes.
+ENORADDR A real address used either for the CCB array, or within one of the submitted
+ CCBs, is not valid for the guest. Some CCBs may have been enqueued prior
+ to the error being detected.
+ENOMAP A virtual address used either for the CCB array, or within one of the submitted
+ CCBs, could not be translated by the virtual machine using either the TLB
+ or TSB contents. The submission may be retried after adding the required
+ mapping, or by converting the virtual address into a real address. Due to the
+ shared nature of address translation resources, there is no theoretical limit on
+ the number of times the translation may fail, and it is recommended all guests
+ implement some real address based backup. The virtual address which failed
+ translation is returned as status data in ret2. Some CCBs may have been
+ enqueued prior to the error being detected.
+EINVAL The virtual machine detected an invalid CCB during submission, or invalid
+ input arguments, such as bad flag values. Note that not all invalid CCB values
+ will be detected during submission, and some may be reported as errors in the
+ completion area instead. Some CCBs may have been enqueued prior to the
+ error being detected. This error may be returned if the CCB version is invalid.
+ETOOMANY The request was submitted with the all-or-nothing flag set, and the array size is
+ greater than the virtual machine can support in a single request. The maximum
+ supported size for the current virtual machine can be queried by submitting a
+ request with a zero length array, as described above.
+ENOACCESS The guest does not have permission to submit CCBs, or an address used in a
+ CCBs lacks sufficient permissions to perform the required operation (no write
+ permission on the destination buffer address, for example). A virtual address
+ which fails permission checking is returned as status data in ret2. Some
+ CCBs may have been enqueued prior to the error being detected.
+EUNAVAILABLE The requested CCB operation could not be performed at this time. The
+ restricted operation availability may apply only to the first unsuccessfully
+ submitted CCB, or may apply to a larger scope. The status should not be
+ interpreted as permanent, and the guest should attempt to submit CCBs in
+ the future which had previously been unable to be performed. The status
+ data provides additional information about scope of the retricted availability
+ as follows:
+ Value Description
+ 0 Processing for the exact CCB instance submitted was unavailable,
+ and it is recommended the guest emulate the operation. The
+ guest should continue to submit all other CCBs, and assume no
+ restrictions beyond this exact CCB instance.
+ 1 Processing is unavailable for all CCBs using the requested opcode,
+ and it is recommended the guest emulate the operation. The
+ guest should continue to submit all other CCBs that use different
+ opcodes, but can expect continued rejections of CCBs using the
+ same opcode in the near future.
+
+
+ 531
+ Coprocessor services
+
+
+ Value Description
+ 2 Processing is unavailable for all CCBs using the requested CCB
+ version, and it is recommended the guest emulate the operation.
+ The guest should continue to submit all other CCBs that use
+ different CCB versions, but can expect continued rejections of
+ CCBs using the same CCB version in the near future.
+ 3 Processing is unavailable for all CCBs on the submitting vcpu,
+ and it is recommended the guest emulate the operation or resubmit
+ the CCB on a different vcpu. The guest should continue to submit
+ CCBs on all other vcpus but can expect continued rejections of all
+ CCBs on this vcpu in the near future.
+ 4 Processing is unavailable for all CCBs, and it is recommended
+ the guest emulate the operation. The guest should expect all CCB
+ submissions to be similarly rejected in the near future.
+
+
+36.3.2. ccb_info
+
+ trap# FAST_TRAP
+ function# CCB_INFO
+ arg0 address
+ ret0 status
+ ret1 CCB state
+ ret2 position
+ ret3 dax
+ ret4 queue
+
+ Requests status information on a previously submitted CCB. The previously submitted CCB is identified
+ by the 64-byte aligned real address of the CCBs completion area.
+
+ A CCB can be in one of 4 states:
+
+
+ State Value Description
+ COMPLETED 0 The CCB has been fetched and executed, and is no longer active in
+ the virtual machine.
+ ENQUEUED 1 The requested CCB is current in a queue awaiting execution.
+ INPROGRESS 2 The CCB has been fetched and is currently being executed. It may still
+ be possible to stop the execution using the ccb_kill hypercall.
+ NOTFOUND 3 The CCB could not be located in the virtual machine, and does not
+ appear to have been executed. This may occur if the CCB was lost
+ due to a hardware error, or the CCB may not have been successfully
+ submitted to the virtual machine in the first place.
+
+ Implementation note
+ Some platforms may not be able to report CCBs that are currently being processed, and therefore
+ guest software should invoke the ccb_kill hypercall prior to assuming the request CCB will never
+ be executed because it was in the NOTFOUND state.
+
+
+ 532
+ Coprocessor services
+
+
+ The position return value is only valid when the state is ENQUEUED. The value returned is the number
+ of other CCBs ahead of the requested CCB, to provide a relative estimate of when the CCB may execute.
+
+ The dax return value is only valid when the state is ENQUEUED. The value returned is the DAX unit
+ instance indentifier for the DAX unit processing the queue where the requested CCB is located. The value
+ matches the value that would have been, or was, returned by ccb_submit using the queue info flag.
+
+ The queue return value is only valid when the state is ENQUEUED. The value returned is the DAX
+ queue instance indentifier for the DAX unit processing the queue where the requested CCB is located. The
+ value matches the value that would have been, or was, returned by ccb_submit using the queue info flag.
+
+36.3.2.1. Errors
+
+ EOK The request was proccessed and the CCB state is valid.
+ EBADALIGN address is not on a 64-byte aligned.
+ ENORADDR The real address provided for address is not valid.
+ EINVAL The CCB completion area contents are not valid.
+ EWOULDBLOCK Internal resource contraints prevented the CCB state from being queried at this
+ time. The guest should retry the request.
+ ENOACCESS The guest does not have permission to access the coprocessor virtual device
+ functionality.
+
+36.3.3. ccb_kill
+
+ trap# FAST_TRAP
+ function# CCB_KILL
+ arg0 address
+ ret0 status
+ ret1 result
+
+ Request to stop execution of a previously submitted CCB. The previously submitted CCB is identified by
+ the 64-byte aligned real address of the CCBs completion area.
+
+ The kill attempt can produce one of several values in the result return value, reflecting the CCB state
+ and actions taken by the Hypervisor:
+
+ Result Value Description
+ COMPLETED 0 The CCB has been fetched and executed, and is no longer active in
+ the virtual machine. It could not be killed and no action was taken.
+ DEQUEUED 1 The requested CCB was still enqueued when the kill request was
+ submitted, and has been removed from the queue. Since the CCB
+ never began execution, no memory modifications were produced by
+ it, and the completion area will never be updated. The same CCB may
+ be submitted again, if desired, with no modifications required.
+ KILLED 2 The CCB had been fetched and was being executed when the kill
+ request was submitted. The CCB execution was stopped, and the CCB
+ is no longer active in the virtual machine. The CCB completion area
+ will reflect the killed status, with the subsequent implications that
+ partial results may have been produced. Partial results may include full
+
+
+ 533
+ Coprocessor services
+
+
+ Result Value Description
+ command execution if the command was stopped just prior to writing
+ to the completion area.
+ NOTFOUND 3 The CCB could not be located in the virtual machine, and does not
+ appear to have been executed. This may occur if the CCB was lost
+ due to a hardware error, or the CCB may not have been successfully
+ submitted to the virtual machine in the first place. CCBs in the state
+ are guaranteed to never execute in the future unless resubmitted.
+
+36.3.3.1. Interactions with Pipelined CCBs
+
+ If the pipeline target CCB is killed but the pipeline source CCB was skipped, the completion area of the
+ target CCB may contain status (4,0) "Command was skipped" instead of (3,7) "Command was killed".
+
+ If the pipeline source CCB is killed, the pipeline target CCB's completion status may read (1,0) "Success".
+ This does not mean the target CCB was processed; since the source CCB was killed, there was no
+ meaningful output on which the target CCB could operate.
+
+36.3.3.2. Errors
+
+ EOK The request was proccessed and the result is valid.
+ EBADALIGN address is not on a 64-byte aligned.
+ ENORADDR The real address provided for address is not valid.
+ EINVAL The CCB completion area contents are not valid.
+ EWOULDBLOCK Internal resource contraints prevented the CCB from being killed at this time.
+ The guest should retry the request.
+ ENOACCESS The guest does not have permission to access the coprocessor virtual device
+ functionality.
+
+36.3.4. dax_info
+ trap# FAST_TRAP
+ function# DAX_INFO
+ ret0 status
+ ret1 Number of enabled DAX units
+ ret2 Number of disabled DAX units
+
+ Returns the number of DAX units that are enabled for the calling guest to submit CCBs. The number of
+ DAX units that are disabled for the calling guest are also returned. A disabled DAX unit would have been
+ available for CCB submission to the calling guest had it not been offlined.
+
+36.3.4.1. Errors
+
+ EOK The request was proccessed and the number of enabled/disabled DAX units
+ are valid.
+
+
+
+
+ 534
+
diff --git a/Documentation/sparc/oradax/oracle-dax.txt b/Documentation/sparc/oradax/oracle-dax.txt
new file mode 100644
index 000000000000..9d53ac93286f
--- /dev/null
+++ b/Documentation/sparc/oradax/oracle-dax.txt
@@ -0,0 +1,429 @@
+Oracle Data Analytics Accelerator (DAX)
+---------------------------------------
+
+DAX is a coprocessor which resides on the SPARC M7 (DAX1) and M8
+(DAX2) processor chips, and has direct access to the CPU's L3 caches
+as well as physical memory. It can perform several operations on data
+streams with various input and output formats. A driver provides a
+transport mechanism and has limited knowledge of the various opcodes
+and data formats. A user space library provides high level services
+and translates these into low level commands which are then passed
+into the driver and subsequently the Hypervisor and the coprocessor.
+The library is the recommended way for applications to use the
+coprocessor, and the driver interface is not intended for general use.
+This document describes the general flow of the driver, its
+structures, and its programmatic interface. It also provides example
+code sufficient to write user or kernel applications that use DAX
+functionality.
+
+The user library is open source and available at:
+ https://oss.oracle.com/git/gitweb.cgi?p=libdax.git
+
+The Hypervisor interface to the coprocessor is described in detail in
+the accompanying document, dax-hv-api.txt, which is a plain text
+excerpt of the (Oracle internal) "UltraSPARC Virtual Machine
+Specification" version 3.0.20+15, dated 2017-09-25.
+
+
+High Level Overview
+-------------------
+
+A coprocessor request is described by a Command Control Block
+(CCB). The CCB contains an opcode and various parameters. The opcode
+specifies what operation is to be done, and the parameters specify
+options, flags, sizes, and addresses. The CCB (or an array of CCBs)
+is passed to the Hypervisor, which handles queueing and scheduling of
+requests to the available coprocessor execution units. A status code
+returned indicates if the request was submitted successfully or if
+there was an error. One of the addresses given in each CCB is a
+pointer to a "completion area", which is a 128 byte memory block that
+is written by the coprocessor to provide execution status. No
+interrupt is generated upon completion; the completion area must be
+polled by software to find out when a transaction has finished, but
+the M7 and later processors provide a mechanism to pause the virtual
+processor until the completion status has been updated by the
+coprocessor. This is done using the monitored load and mwait
+instructions, which are described in more detail later. The DAX
+coprocessor was designed so that after a request is submitted, the
+kernel is no longer involved in the processing of it. The polling is
+done at the user level, which results in almost zero latency between
+completion of a request and resumption of execution of the requesting
+thread.
+
+
+Addressing Memory
+-----------------
+
+The kernel does not have access to physical memory in the Sun4v
+architecture, as there is an additional level of memory virtualization
+present. This intermediate level is called "real" memory, and the
+kernel treats this as if it were physical. The Hypervisor handles the
+translations between real memory and physical so that each logical
+domain (LDOM) can have a partition of physical memory that is isolated
+from that of other LDOMs. When the kernel sets up a virtual mapping,
+it specifies a virtual address and the real address to which it should
+be mapped.
+
+The DAX coprocessor can only operate on physical memory, so before a
+request can be fed to the coprocessor, all the addresses in a CCB must
+be converted into physical addresses. The kernel cannot do this since
+it has no visibility into physical addresses. So a CCB may contain
+either the virtual or real addresses of the buffers or a combination
+of them. An "address type" field is available for each address that
+may be given in the CCB. In all cases, the Hypervisor will translate
+all the addresses to physical before dispatching to hardware. Address
+translations are performed using the context of the process initiating
+the request.
+
+
+The Driver API
+--------------
+
+An application makes requests to the driver via the write() system
+call, and gets results (if any) via read(). The completion areas are
+made accessible via mmap(), and are read-only for the application.
+
+The request may either be an immediate command or an array of CCBs to
+be submitted to the hardware.
+
+Each open instance of the device is exclusive to the thread that
+opened it, and must be used by that thread for all subsequent
+operations. The driver open function creates a new context for the
+thread and initializes it for use. This context contains pointers and
+values used internally by the driver to keep track of submitted
+requests. The completion area buffer is also allocated, and this is
+large enough to contain the completion areas for many concurrent
+requests. When the device is closed, any outstanding transactions are
+flushed and the context is cleaned up.
+
+On a DAX1 system (M7), the device will be called "oradax1", while on a
+DAX2 system (M8) it will be "oradax2". If an application requires one
+or the other, it should simply attempt to open the appropriate
+device. Only one of the devices will exist on any given system, so the
+name can be used to determine what the platform supports.
+
+The immediate commands are CCB_DEQUEUE, CCB_KILL, and CCB_INFO. For
+all of these, success is indicated by a return value from write()
+equal to the number of bytes given in the call. Otherwise -1 is
+returned and errno is set.
+
+CCB_DEQUEUE
+
+Tells the driver to clean up resources associated with past
+requests. Since no interrupt is generated upon the completion of a
+request, the driver must be told when it may reclaim resources. No
+further status information is returned, so the user should not
+subsequently call read().
+
+CCB_KILL
+
+Kills a CCB during execution. The CCB is guaranteed to not continue
+executing once this call returns successfully. On success, read() must
+be called to retrieve the result of the action.
+
+CCB_INFO
+
+Retrieves information about a currently executing CCB. Note that some
+Hypervisors might return 'notfound' when the CCB is in 'inprogress'
+state. To ensure a CCB in the 'notfound' state will never be executed,
+CCB_KILL must be invoked on that CCB. Upon success, read() must be
+called to retrieve the details of the action.
+
+Submission of an array of CCBs for execution
+
+A write() whose length is a multiple of the CCB size is treated as a
+submit operation. The file offset is treated as the index of the
+completion area to use, and may be set via lseek() or using the
+pwrite() system call. If -1 is returned then errno is set to indicate
+the error. Otherwise, the return value is the length of the array that
+was actually accepted by the coprocessor. If the accepted length is
+equal to the requested length, then the submission was completely
+successful and there is no further status needed; hence, the user
+should not subsequently call read(). Partial acceptance of the CCB
+array is indicated by a return value less than the requested length,
+and read() must be called to retrieve further status information. The
+status will reflect the error caused by the first CCB that was not
+accepted, and status_data will provide additional data in some cases.
+
+MMAP
+
+The mmap() function provides access to the completion area allocated
+in the driver. Note that the completion area is not writeable by the
+user process, and the mmap call must not specify PROT_WRITE.
+
+
+Completion of a Request
+-----------------------
+
+The first byte in each completion area is the command status which is
+updated by the coprocessor hardware. Software may take advantage of
+new M7/M8 processor capabilities to efficiently poll this status byte.
+First, a "monitored load" is achieved via a Load from Alternate Space
+(ldxa, lduba, etc.) with ASI 0x84 (ASI_MONITOR_PRIMARY). Second, a
+"monitored wait" is achieved via the mwait instruction (a write to
+%asr28). This instruction is like pause in that it suspends execution
+of the virtual processor for the given number of nanoseconds, but in
+addition will terminate early when one of several events occur. If the
+block of data containing the monitored location is modified, then the
+mwait terminates. This causes software to resume execution immediately
+(without a context switch or kernel to user transition) after a
+transaction completes. Thus the latency between transaction completion
+and resumption of execution may be just a few nanoseconds.
+
+
+Application Life Cycle of a DAX Submission
+------------------------------------------
+
+ - open dax device
+ - call mmap() to get the completion area address
+ - allocate a CCB and fill in the opcode, flags, parameters, addresses, etc.
+ - submit CCB via write() or pwrite()
+ - go into a loop executing monitored load + monitored wait and
+ terminate when the command status indicates the request is complete
+ (CCB_KILL or CCB_INFO may be used any time as necessary)
+ - perform a CCB_DEQUEUE
+ - call munmap() for completion area
+ - close the dax device
+
+
+Memory Constraints
+------------------
+
+The DAX hardware operates only on physical addresses. Therefore, it is
+not aware of virtual memory mappings and the discontiguities that may
+exist in the physical memory that a virtual buffer maps to. There is
+no I/O TLB or any scatter/gather mechanism. All buffers, whether input
+or output, must reside in a physically contiguous region of memory.
+
+The Hypervisor translates all addresses within a CCB to physical
+before handing off the CCB to DAX. The Hypervisor determines the
+virtual page size for each virtual address given, and uses this to
+program a size limit for each address. This prevents the coprocessor
+from reading or writing beyond the bound of the virtual page, even
+though it is accessing physical memory directly. A simpler way of
+saying this is that a DAX operation will never "cross" a virtual page
+boundary. If an 8k virtual page is used, then the data is strictly
+limited to 8k. If a user's buffer is larger than 8k, then a larger
+page size must be used, or the transaction size will be truncated to
+8k.
+
+Huge pages. A user may allocate huge pages using standard interfaces.
+Memory buffers residing on huge pages may be used to achieve much
+larger DAX transaction sizes, but the rules must still be followed,
+and no transaction will cross a page boundary, even a huge page. A
+major caveat is that Linux on Sparc presents 8Mb as one of the huge
+page sizes. Sparc does not actually provide a 8Mb hardware page size,
+and this size is synthesized by pasting together two 4Mb pages. The
+reasons for this are historical, and it creates an issue because only
+half of this 8Mb page can actually be used for any given buffer in a
+DAX request, and it must be either the first half or the second half;
+it cannot be a 4Mb chunk in the middle, since that crosses a
+(hardware) page boundary. Note that this entire issue may be hidden by
+higher level libraries.
+
+
+CCB Structure
+-------------
+A CCB is an array of 8 64-bit words. Several of these words provide
+command opcodes, parameters, flags, etc., and the rest are addresses
+for the completion area, output buffer, and various inputs:
+
+ struct ccb {
+ u64 control;
+ u64 completion;
+ u64 input0;
+ u64 access;
+ u64 input1;
+ u64 op_data;
+ u64 output;
+ u64 table;
+ };
+
+See libdax/common/sys/dax1/dax1_ccb.h for a detailed description of
+each of these fields, and see dax-hv-api.txt for a complete description
+of the Hypervisor API available to the guest OS (ie, Linux kernel).
+
+The first word (control) is examined by the driver for the following:
+ - CCB version, which must be consistent with hardware version
+ - Opcode, which must be one of the documented allowable commands
+ - Address types, which must be set to "virtual" for all the addresses
+ given by the user, thereby ensuring that the application can
+ only access memory that it owns
+
+
+Example Code
+------------
+
+The DAX is accessible to both user and kernel code. The kernel code
+can make hypercalls directly while the user code must use wrappers
+provided by the driver. The setup of the CCB is nearly identical for
+both; the only difference is in preparation of the completion area. An
+example of user code is given now, with kernel code afterwards.
+
+In order to program using the driver API, the file
+arch/sparc/include/uapi/asm/oradax.h must be included.
+
+First, the proper device must be opened. For M7 it will be
+/dev/oradax1 and for M8 it will be /dev/oradax2. The simplest
+procedure is to attempt to open both, as only one will succeed:
+
+ fd = open("/dev/oradax1", O_RDWR);
+ if (fd < 0)
+ fd = open("/dev/oradax2", O_RDWR);
+ if (fd < 0)
+ /* No DAX found */
+
+Next, the completion area must be mapped:
+
+ completion_area = mmap(NULL, DAX_MMAP_LEN, PROT_READ, MAP_SHARED, fd, 0);
+
+All input and output buffers must be fully contained in one hardware
+page, since as explained above, the DAX is strictly constrained by
+virtual page boundaries. In addition, the output buffer must be
+64-byte aligned and its size must be a multiple of 64 bytes because
+the coprocessor writes in units of cache lines.
+
+This example demonstrates the DAX Scan command, which takes as input a
+vector and a match value, and produces a bitmap as the output. For
+each input element that matches the value, the corresponding bit is
+set in the output.
+
+In this example, the input vector consists of a series of single bits,
+and the match value is 0. So each 0 bit in the input will produce a 1
+in the output, and vice versa, which produces an output bitmap which
+is the input bitmap inverted.
+
+For details of all the parameters and bits used in this CCB, please
+refer to section 36.2.1.3 of the DAX Hypervisor API document, which
+describes the Scan command in detail.
+
+ ccb->control = /* Table 36.1, CCB Header Format */
+ (2L << 48) /* command = Scan Value */
+ | (3L << 40) /* output address type = primary virtual */
+ | (3L << 34) /* primary input address type = primary virtual */
+ /* Section 36.2.1, Query CCB Command Formats */
+ | (1 << 28) /* 36.2.1.1.1 primary input format = fixed width bit packed */
+ | (0 << 23) /* 36.2.1.1.2 primary input element size = 0 (1 bit) */
+ | (8 << 10) /* 36.2.1.1.6 output format = bit vector */
+ | (0 << 5) /* 36.2.1.3 First scan criteria size = 0 (1 byte) */
+ | (31 << 0); /* 36.2.1.3 Disable second scan criteria */
+
+ ccb->completion = 0; /* Completion area address, to be filled in by driver */
+
+ ccb->input0 = (unsigned long) input; /* primary input address */
+
+ ccb->access = /* Section 36.2.1.2, Data Access Control */
+ (2 << 24) /* Primary input length format = bits */
+ | (nbits - 1); /* number of bits in primary input stream, minus 1 */
+
+ ccb->input1 = 0; /* secondary input address, unused */
+
+ ccb->op_data = 0; /* scan criteria (value to be matched) */
+
+ ccb->output = (unsigned long) output; /* output address */
+
+ ccb->table = 0; /* table address, unused */
+
+The CCB submission is a write() or pwrite() system call to the
+driver. If the call fails, then a read() must be used to retrieve the
+status:
+
+ if (pwrite(fd, ccb, 64, 0) != 64) {
+ struct ccb_exec_result status;
+ read(fd, &status, sizeof(status));
+ /* bail out */
+ }
+
+After a successful submission of the CCB, the completion area may be
+polled to determine when the DAX is finished. Detailed information on
+the contents of the completion area can be found in section 36.2.2 of
+the DAX HV API document.
+
+ while (1) {
+ /* Monitored Load */
+ __asm__ __volatile__("lduba [%1] 0x84, %0\n"
+ : "=r" (status)
+ : "r" (completion_area));
+
+ if (status) /* 0 indicates command in progress */
+ break;
+
+ /* MWAIT */
+ __asm__ __volatile__("wr %%g0, 1000, %%asr28\n" ::); /* 1000 ns */
+ }
+
+A completion area status of 1 indicates successful completion of the
+CCB and validity of the output bitmap, which may be used immediately.
+All other non-zero values indicate error conditions which are
+described in section 36.2.2.
+
+ if (completion_area[0] != 1) { /* section 36.2.2, 1 = command ran and succeeded */
+ /* completion_area[0] contains the completion status */
+ /* completion_area[1] contains an error code, see 36.2.2 */
+ }
+
+After the completion area has been processed, the driver must be
+notified that it can release any resources associated with the
+request. This is done via the dequeue operation:
+
+ struct dax_command cmd;
+ cmd.command = CCB_DEQUEUE;
+ if (write(fd, &cmd, sizeof(cmd)) != sizeof(cmd)) {
+ /* bail out */
+ }
+
+Finally, normal program cleanup should be done, i.e., unmapping
+completion area, closing the dax device, freeing memory etc.
+
+[Kernel example]
+
+The only difference in using the DAX in kernel code is the treatment
+of the completion area. Unlike user applications which mmap the
+completion area allocated by the driver, kernel code must allocate its
+own memory to use for the completion area, and this address and its
+type must be given in the CCB:
+
+ ccb->control |= /* Table 36.1, CCB Header Format */
+ (3L << 32); /* completion area address type = primary virtual */
+
+ ccb->completion = (unsigned long) completion_area; /* Completion area address */
+
+The dax submit hypercall is made directly. The flags used in the
+ccb_submit call are documented in the DAX HV API in section 36.3.1.
+
+#include <asm/hypervisor.h>
+
+ hv_rv = sun4v_ccb_submit((unsigned long)ccb, 64,
+ HV_CCB_QUERY_CMD |
+ HV_CCB_ARG0_PRIVILEGED | HV_CCB_ARG0_TYPE_PRIMARY |
+ HV_CCB_VA_PRIVILEGED,
+ 0, &bytes_accepted, &status_data);
+
+ if (hv_rv != HV_EOK) {
+ /* hv_rv is an error code, status_data contains */
+ /* potential additional status, see 36.3.1.1 */
+ }
+
+After the submission, the completion area polling code is identical to
+that in user land:
+
+ while (1) {
+ /* Monitored Load */
+ __asm__ __volatile__("lduba [%1] 0x84, %0\n"
+ : "=r" (status)
+ : "r" (completion_area));
+
+ if (status) /* 0 indicates command in progress */
+ break;
+
+ /* MWAIT */
+ __asm__ __volatile__("wr %%g0, 1000, %%asr28\n" ::); /* 1000 ns */
+ }
+
+ if (completion_area[0] != 1) { /* section 36.2.2, 1 = command ran and succeeded */
+ /* completion_area[0] contains the completion status */
+ /* completion_area[1] contains an error code, see 36.2.2 */
+ }
+
+The output bitmap is ready for consumption immediately after the
+completion status indicates success.
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index cef4ad19624c..b97228d2cc0e 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -81,7 +81,7 @@ __version__ = '1.0.0'
# -------------
def which(cmd):
- """Searches the ``cmd`` in the ``PATH`` enviroment.
+ """Searches the ``cmd`` in the ``PATH`` environment.
This *which* searches the PATH for executable ``cmd`` . First match is
returned, if nothing is found, ``None` is returned.
@@ -419,7 +419,7 @@ def visit_kernel_render(self, node):
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
- app.warn('kernel-render: "%s" unknow / include raw.' % (srclang))
+ app.warn('kernel-render: "%s" unknown / include raw.' % (srclang))
return
if not dot_cmd and tmp_ext == '.dot':
@@ -482,7 +482,7 @@ class KernelRender(Figure):
srclang = self.arguments[0].strip()
if srclang not in RENDER_MARKUP_EXT.keys():
return [self.state_machine.reporter.warning(
- 'Unknow source language "%s", use one of: %s.' % (
+ 'Unknown source language "%s", use one of: %s.' % (
srclang, ",".join(RENDER_MARKUP_EXT.keys())),
line=self.lineno)]
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 694968c7523c..412314eebda6 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -34,6 +34,7 @@ show up in /proc/sys/kernel:
- hostname
- hotplug
- hardlockup_all_cpu_backtrace
+- hardlockup_panic
- hung_task_panic
- hung_task_check_count
- hung_task_timeout_secs
@@ -313,6 +314,19 @@ will be initiated.
1: on detection capture more debug information.
==============================================================
+hardlockup_panic:
+
+This parameter can be used to control whether the kernel panics
+when a hard lockup is detected.
+
+ 0 - don't panic on hard lockup
+ 1 - panic on hard lockup
+
+See Documentation/lockup-watchdogs.txt for more information. This can
+also be set using the nmi_watchdog kernel parameter.
+
+==============================================================
+
hotplug:
Path for the hotplug policy agent.
@@ -377,7 +391,8 @@ kptr_restrict:
This toggle indicates whether restrictions are placed on
exposing kernel addresses via /proc and other interfaces.
-When kptr_restrict is set to (0), the default, there are no restrictions.
+When kptr_restrict is set to 0 (the default) the address is hashed before
+printing. (This is the equivalent to %p.)
When kptr_restrict is set to (1), kernel pointers printed using the %pK
format specifier will be replaced with 0's unless the user has CAP_SYSLOG
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index b67044a2575f..35c62f522754 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -95,7 +95,9 @@ dev_weight
--------------
The maximum number of packets that kernel can handle on a NAPI interrupt,
-it's a Per-CPU variable.
+it's a Per-CPU variable. For drivers that support LRO or GRO_HW, a hardware
+aggregated packet is counted as one packet in this context.
+
Default: 64
dev_weight_rx_bias
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 5025ff9307e6..ff234d229cbb 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -30,7 +30,6 @@ Currently, these files are in /proc/sys/vm:
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
-- hugepages_treat_as_movable
- hugetlb_shm_group
- laptop_mode
- legacy_va_layout
@@ -261,30 +260,6 @@ any throttling.
==============================================================
-hugepages_treat_as_movable
-
-This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
-or not. If set to non-zero, hugepages can be allocated from ZONE_MOVABLE.
-ZONE_MOVABLE is created when kernel boot parameter kernelcore= is specified,
-so this parameter has no effect if used without kernelcore=.
-
-Hugepage migration is now available in some situations which depend on the
-architecture and/or the hugepage size. If a hugepage supports migration,
-allocation from ZONE_MOVABLE is always enabled for the hugepage regardless
-of the value of this parameter.
-IOW, this parameter affects only non-migratable hugepages.
-
-Assuming that hugepages are not migratable in your system, one usecase of
-this parameter is that users can make hugepage pool more extensible by
-enabling the allocation from ZONE_MOVABLE. This is because on ZONE_MOVABLE
-page reclaim/migration/compaction work more and you can get contiguous
-memory more likely. Note that using ZONE_MOVABLE for non-migratable
-hugepages can do harm to other features like memory hotremove (because
-memory hotremove expects that memory blocks on ZONE_MOVABLE are always
-removable,) so it's a trade-off responsible for the users.
-
-==============================================================
-
hugetlb_shm_group
hugetlb_shm_group contains group id that is allowed to create SysV
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index 71653584cd03..7df567eaea1a 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -26,39 +26,16 @@ the user. The registration APIs returns the cooling device pointer.
clip_cpus: cpumask of cpus where the frequency constraints will happen.
1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
- struct device_node *np, const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
This interface function registers the cpufreq cooling device with
the name "thermal-cpufreq-%x" linking it with a device tree node, in
order to bind it via the thermal DT code. This api can support multiple
instances of cpufreq cooling devices.
- np: pointer to the cooling device device tree node
- clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ policy: CPUFreq policy.
-1.1.3 struct thermal_cooling_device *cpufreq_power_cooling_register(
- const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
-
-Similar to cpufreq_cooling_register, this function registers a cpufreq
-cooling device. Using this function, the cooling device will
-implement the power extensions by using a simple cpu power model. The
-cpus must have registered their OPPs using the OPP library.
-
-The additional parameters are needed for the power model (See 2. Power
-models). "capacitance" is the dynamic power coefficient (See 2.1
-Dynamic power). "plat_static_func" is a function to calculate the
-static power consumed by these cpus (See 2.2 Static power).
-
-1.1.4 struct thermal_cooling_device *of_cpufreq_power_cooling_register(
- struct device_node *np, const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
-
-Similar to cpufreq_power_cooling_register, this function register a
-cpufreq cooling device with power extensions using the device tree
-information supplied by the np parameter.
-
-1.1.5 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
This interface function unregisters the "thermal-cpufreq-%x" cooling device.
@@ -67,20 +44,14 @@ information supplied by the np parameter.
2. Power models
The power API registration functions provide a simple power model for
-CPUs. The current power is calculated as dynamic + (optionally)
-static power. This power model requires that the operating-points of
+CPUs. The current power is calculated as dynamic power (static power isn't
+supported currently). This power model requires that the operating-points of
the CPUs are registered using the kernel's opp library and the
`cpufreq_frequency_table` is assigned to the `struct device` of the
cpu. If you are using CONFIG_CPUFREQ_DT then the
`cpufreq_frequency_table` should already be assigned to the cpu
device.
-The `plat_static_func` parameter of `cpufreq_power_cooling_register()`
-and `of_cpufreq_power_cooling_register()` is optional. If you don't
-provide it, only dynamic power will be considered.
-
-2.1 Dynamic power
-
The dynamic power consumption of a processor depends on many factors.
For a given processor implementation the primary factors are:
@@ -119,79 +90,3 @@ mW/MHz/uVolt^2. Typical values for mobile CPUs might lie in range
from 100 to 500. For reference, the approximate values for the SoC in
ARM's Juno Development Platform are 530 for the Cortex-A57 cluster and
140 for the Cortex-A53 cluster.
-
-
-2.2 Static power
-
-Static leakage power consumption depends on a number of factors. For a
-given circuit implementation the primary factors are:
-
-- Time the circuit spends in each 'power state'
-- Temperature
-- Operating voltage
-- Process grade
-
-The time the circuit spends in each 'power state' for a given
-evaluation period at first order means OFF or ON. However,
-'retention' states can also be supported that reduce power during
-inactive periods without loss of context.
-
-Note: The visibility of state entries to the OS can vary, according to
-platform specifics, and this can then impact the accuracy of a model
-based on OS state information alone. It might be possible in some
-cases to extract more accurate information from system resources.
-
-The temperature, operating voltage and process 'grade' (slow to fast)
-of the circuit are all significant factors in static leakage power
-consumption. All of these have complex relationships to static power.
-
-Circuit implementation specific factors include the chosen silicon
-process as well as the type, number and size of transistors in both
-the logic gates and any RAM elements included.
-
-The static power consumption modelling must take into account the
-power managed regions that are implemented. Taking the example of an
-ARM processor cluster, the modelling would take into account whether
-each CPU can be powered OFF separately or if only a single power
-region is implemented for the complete cluster.
-
-In one view, there are others, a static power consumption model can
-then start from a set of reference values for each power managed
-region (e.g. CPU, Cluster/L2) in each state (e.g. ON, OFF) at an
-arbitrary process grade, voltage and temperature point. These values
-are then scaled for all of the following: the time in each state, the
-process grade, the current temperature and the operating voltage.
-However, since both implementation specific and complex relationships
-dominate the estimate, the appropriate interface to the model from the
-cpu cooling device is to provide a function callback that calculates
-the static power in this platform. When registering the cpu cooling
-device pass a function pointer that follows the `get_static_t`
-prototype:
-
- int plat_get_static(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 &power);
-
-`cpumask` is the cpumask of the cpus involved in the calculation.
-`voltage` is the voltage at which they are operating. The function
-should calculate the average static power for the last `interval`
-milliseconds. It returns 0 on success, -E* on error. If it
-succeeds, it should store the static power in `power`. Reading the
-temperature of the cpus described by `cpumask` is left for
-plat_get_static() to do as the platform knows best which thermal
-sensor is closest to the cpu.
-
-If `plat_static_func` is NULL, static power is considered to be
-negligible for this platform and only dynamic power is considered.
-
-The platform specific callback can then use any combination of tables
-and/or equations to permute the estimated value. Process grade
-information is not passed to the model since access to such data, from
-on-chip measurement capability or manufacture time data, is platform
-specific.
-
-Note: the significance of static power for CPUs in comparison to
-dynamic power is highly dependent on implementation. Given the
-potential complexity in implementation, the importance and accuracy of
-its inclusion when using cpu cooling devices should be assessed on a
-case by case basis.
-
diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst
index 8494a801d341..3aed560a12ee 100644
--- a/Documentation/trace/ftrace-uses.rst
+++ b/Documentation/trace/ftrace-uses.rst
@@ -42,9 +42,9 @@ as well as what protections the callback will perform and not require
ftrace to handle.
There is only one field that is needed to be set when registering
-an ftrace_ops with ftrace::
+an ftrace_ops with ftrace:
-.. code-block: c
+.. code-block:: c
struct ftrace_ops ops = {
.func = my_callback_func,
@@ -81,12 +81,12 @@ may take some time to finish.
The callback function
=====================
-The prototype of the callback function is as follows (as of v4.14)::
+The prototype of the callback function is as follows (as of v4.14):
-.. code-block: c
+.. code-block:: c
- void callback_func(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ void callback_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
@ip
This is the instruction pointer of the function that is being traced.
@@ -176,10 +176,10 @@ Filtering which functions to trace
If a callback is only to be called from specific functions, a filter must be
set up. The filters are added by name, or ip if it is known.
-.. code-block: c
+.. code-block:: c
- int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
- int len, int reset);
+ int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
@ops
The ops to set the filter with
@@ -202,9 +202,9 @@ See Filter Commands in :file:`Documentation/trace/ftrace.txt`.
To just trace the schedule function::
-.. code-block: c
+.. code-block:: c
- ret = ftrace_set_filter(&ops, "schedule", strlen("schedule"), 0);
+ ret = ftrace_set_filter(&ops, "schedule", strlen("schedule"), 0);
To add more functions, call the ftrace_set_filter() more than once with the
@reset parameter set to zero. To remove the current filter set and replace it
@@ -212,17 +212,17 @@ with new functions defined by @buf, have @reset be non-zero.
To remove all the filtered functions and trace all functions::
-.. code-block: c
+.. code-block:: c
- ret = ftrace_set_filter(&ops, NULL, 0, 1);
+ ret = ftrace_set_filter(&ops, NULL, 0, 1);
Sometimes more than one function has the same name. To trace just a specific
function in this case, ftrace_set_filter_ip() can be used.
-.. code-block: c
+.. code-block:: c
- ret = ftrace_set_filter_ip(&ops, ip, 0, 0);
+ ret = ftrace_set_filter_ip(&ops, ip, 0, 0);
Although the ip must be the address where the call to fentry or mcount is
located in the function. This function is used by perf and kprobes that
@@ -237,10 +237,10 @@ be called by any function.
An empty "notrace" list means to allow all functions defined by the filter
to be traced.
-.. code-block: c
+.. code-block:: c
- int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
- int len, int reset);
+ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
This takes the same parameters as ftrace_set_filter() but will add the
functions it finds to not be traced. This is a separate list from the
@@ -251,7 +251,7 @@ that match @buf to it.
Clearing the "notrace" list is the same as clearing the filter list
-.. code-block: c
+.. code-block:: c
ret = ftrace_set_notrace(&ops, NULL, 0, 1);
@@ -264,29 +264,29 @@ If a filter is in place, and the @reset is non-zero, and @buf contains a
matching glob to functions, the switch will happen during the time of
the ftrace_set_filter() call. At no time will all functions call the callback.
-.. code-block: c
+.. code-block:: c
- ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
- register_ftrace_function(&ops);
+ register_ftrace_function(&ops);
- msleep(10);
+ msleep(10);
- ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 1);
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 1);
is not the same as:
-.. code-block: c
+.. code-block:: c
- ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
- register_ftrace_function(&ops);
+ register_ftrace_function(&ops);
- msleep(10);
+ msleep(10);
- ftrace_set_filter(&ops, NULL, 0, 1);
+ ftrace_set_filter(&ops, NULL, 0, 1);
- ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 0);
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 0);
As the latter will have a short time where all functions will call
the callback, between the time of the reset, and the time of the
diff --git a/Documentation/usb/chipidea.txt b/Documentation/usb/chipidea.txt
index edf7cdfddc88..d1eedc01b00a 100644
--- a/Documentation/usb/chipidea.txt
+++ b/Documentation/usb/chipidea.txt
@@ -23,13 +23,13 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
2) Connect 2 boards with usb cable with one end is micro A plug, the other end
is micro B plug.
- The A-device(with micro A plug inserted) should enumrate B-device.
+ The A-device(with micro A plug inserted) should enumerate B-device.
3) Role switch
On B-device:
echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
- B-device should take host role and enumrate A-device.
+ B-device should take host role and enumerate A-device.
4) A-device switch back to host.
On B-device:
@@ -40,13 +40,13 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
side by answering the polling from B-Host, this can be done on A-device:
echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
- A-device should switch back to host and enumrate B-device.
+ A-device should switch back to host and enumerate B-device.
5) Remove B-device(unplug micro B plug) and insert again in 10 seconds,
- A-device should enumrate B-device again.
+ A-device should enumerate B-device again.
6) Remove B-device(unplug micro B plug) and insert again after 10 seconds,
- A-device should NOT enumrate B-device.
+ A-device should NOT enumerate B-device.
if A-device wants to use bus:
On A-device:
@@ -67,7 +67,7 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
On B-device:
echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
- A-device should resume usb bus and enumrate B-device.
+ A-device should resume usb bus and enumerate B-device.
1.3 Reference document
----------------------
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 441a4b9b666f..5908a21fddb6 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value
in each line. The rules stated above are best illustrated with an example:
# mkdir functions/uvc.usb0/control/header/h
-# cd functions/uvc.usb0/control/header/h
+# cd functions/uvc.usb0/control/
# ln -s header/h class/fs
# ln -s header/h class/ss
# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
diff --git a/Documentation/usb/usbip_protocol.txt b/Documentation/usb/usbip_protocol.txt
index 16b6fe27284c..c7a0f4c7e7f1 100644
--- a/Documentation/usb/usbip_protocol.txt
+++ b/Documentation/usb/usbip_protocol.txt
@@ -274,7 +274,6 @@ USBIP_CMD_SUBMIT: Submit an URB
URB_SHORT_NOT_OK | 0x00000001 | only in | only in | only in | no
URB_ISO_ASAP | 0x00000002 | no | no | no | yes
URB_NO_TRANSFER_DMA_MAP | 0x00000004 | yes | yes | yes | yes
- URB_NO_FSBR | 0x00000020 | yes | no | no | no
URB_ZERO_PACKET | 0x00000040 | no | no | only out | no
URB_NO_INTERRUPT | 0x00000080 | yes | yes | yes | yes
URB_FREE_BUFFER | 0x00000100 | yes | yes | yes | yes
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 57d3ee9e4bde..fc3ae951bc07 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3403,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
or if no page table is present for the addresses (e.g. when using
hugepages).
+4.108 KVM_PPC_GET_CPU_CHAR
+
+Capability: KVM_CAP_PPC_GET_CPU_CHAR
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_ppc_cpu_char (out)
+Returns: 0 on successful completion
+ -EFAULT if struct kvm_ppc_cpu_char cannot be written
+
+This ioctl gives userspace information about certain characteristics
+of the CPU relating to speculative execution of instructions and
+possible information leakage resulting from speculative execution (see
+CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is
+returned in struct kvm_ppc_cpu_char, which looks like this:
+
+struct kvm_ppc_cpu_char {
+ __u64 character; /* characteristics of the CPU */
+ __u64 behaviour; /* recommended software behaviour */
+ __u64 character_mask; /* valid bits in character */
+ __u64 behaviour_mask; /* valid bits in behaviour */
+};
+
+For extensibility, the character_mask and behaviour_mask fields
+indicate which bits of character and behaviour have been filled in by
+the kernel. If the set of defined bits is extended in future then
+userspace will be able to tell whether it is running on a kernel that
+knows about the new bits.
+
+The character field describes attributes of the CPU which can help
+with preventing inadvertent information disclosure - specifically,
+whether there is an instruction to flash-invalidate the L1 data cache
+(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
+to a mode where entries can only be used by the thread that created
+them, whether the bcctr[l] instruction prevents speculation, and
+whether a speculation barrier instruction (ori 31,31,0) is provided.
+
+The behaviour field describes actions that software should take to
+prevent inadvertent information disclosure, and thus describes which
+vulnerabilities the hardware is subject to; specifically whether the
+L1 data cache should be flushed when returning to user mode from the
+kernel, and whether a speculation barrier should be placed between an
+array bounds check and the array access.
+
+These fields use the same bit definitions as the new
+H_GET_CPU_CHARACTERISTICS hypercall.
+
5. The kvm_run structure
------------------------
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 59cbc803aad6..faf077d50d42 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -20,19 +20,20 @@ options.
The /proc/meminfo file provides information about the total number of
persistent hugetlb pages in the kernel's huge page pool. It also displays
-information about the number of free, reserved and surplus huge pages and the
-default huge page size. The huge page size is needed for generating the
-proper alignment and size of the arguments to system calls that map huge page
-regions.
+default huge page size and information about the number of free, reserved
+and surplus huge pages in the pool of huge pages of default size.
+The huge page size is needed for generating the proper alignment and
+size of the arguments to system calls that map huge page regions.
The output of "cat /proc/meminfo" will include lines like:
.....
-HugePages_Total: vvv
-HugePages_Free: www
-HugePages_Rsvd: xxx
-HugePages_Surp: yyy
-Hugepagesize: zzz kB
+HugePages_Total: uuu
+HugePages_Free: vvv
+HugePages_Rsvd: www
+HugePages_Surp: xxx
+Hugepagesize: yyy kB
+Hugetlb: zzz kB
where:
HugePages_Total is the size of the pool of huge pages.
@@ -47,6 +48,14 @@ HugePages_Surp is short for "surplus," and is the number of huge pages in
the pool above the value in /proc/sys/vm/nr_hugepages. The
maximum number of surplus huge pages is controlled by
/proc/sys/vm/nr_overcommit_hugepages.
+Hugepagesize is the default hugepage size (in Kb).
+Hugetlb is the total amount of memory (in kB), consumed by huge
+ pages of all sizes.
+ If huge pages of different sizes are in use, this number
+ will exceed HugePages_Total * Hugepagesize. To get more
+ detailed information, please, refer to
+ /sys/kernel/mm/hugepages (described below).
+
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 6ae89a9edf2a..e912d7eee769 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -104,7 +104,7 @@ madvise(MADV_HWPOISON, ....)
hwpoison-inject module through debugfs
-/sys/debug/hwpoison/
+/sys/kernel/debug/hwpoison/
corrupt-pfn
diff --git a/Documentation/w1/masters/w1-gpio b/Documentation/w1/masters/w1-gpio
index af5d3b4aa851..623961d9e83f 100644
--- a/Documentation/w1/masters/w1-gpio
+++ b/Documentation/w1/masters/w1-gpio
@@ -8,17 +8,27 @@ Description
-----------
GPIO 1-wire bus master driver. The driver uses the GPIO API to control the
-wire and the GPIO pin can be specified using platform data.
+wire and the GPIO pin can be specified using GPIO machine descriptor tables.
+It is also possible to define the master using device tree, see
+Documentation/devicetree/bindings/w1/w1-gpio.txt
Example (mach-at91)
-------------------
+#include <linux/gpio/machine.h>
#include <linux/w1-gpio.h>
+static struct gpiod_lookup_table foo_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("at91-gpio", AT91_PIN_PB20, NULL, 0,
+ GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data foo_w1_gpio_pdata = {
- .pin = AT91_PIN_PB20,
- .is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device foo_w1_device = {
@@ -30,4 +40,5 @@ static struct platform_device foo_w1_device = {
...
at91_set_GPIO_periph(foo_w1_gpio_pdata.pin, 1);
at91_set_multi_drive(foo_w1_gpio_pdata.pin, 1);
+ gpiod_add_lookup_table(&foo_w1_gpiod_table);
platform_device_register(&foo_w1_device);
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index b3ffaf8cfab2..c51b1ab012d0 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -76,7 +76,7 @@ See struct w1_bus_master definition in w1.h for details.
w1 master sysfs interface
------------------------------------------------------------------
-<xx-xxxxxxxxxxxxx> - A directory for a found device. The format is family-serial
+<xx-xxxxxxxxxxxx> - A directory for a found device. The format is family-serial
bus - (standard) symlink to the w1 bus
driver - (standard) symlink to the w1 driver
w1_master_add - (rw) manually register a slave device
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index 6851854cf69d..756fd76b78a6 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -7,15 +7,24 @@ Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
-X86 /proc/cpuinfo flag bits "rdt", "cqm", "cat_l3" and "cdp_l3".
+X86 /proc/cpuinfo flag bits:
+RDT (Resource Director Technology) Allocation - "rdt_a"
+CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
+CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
+CQM (Cache QoS Monitoring) - "cqm_llc", "cqm_occup_llc"
+MBM (Memory Bandwidth Monitoring) - "cqm_mbm_total", "cqm_mbm_local"
+MBA (Memory Bandwidth Allocation) - "mba"
To use the feature mount the file system:
- # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+ # mount -t resctrl resctrl [-o cdp[,cdpl2]] /sys/fs/resctrl
mount options are:
"cdp": Enable code/data prioritization in L3 cache allocations.
+"cdpl2": Enable code/data prioritization in L2 cache allocations.
+
+L2 and L3 CDP are controlled seperately.
RDT features are orthogonal. A particular system may support only
monitoring, only control, or both monitoring and control.
diff --git a/Documentation/x86/microcode.txt b/Documentation/x86/microcode.txt
index f57e1b45e628..79fdb4a8148a 100644
--- a/Documentation/x86/microcode.txt
+++ b/Documentation/x86/microcode.txt
@@ -108,12 +108,11 @@ packages already put them there.
====================
The loader supports also loading of a builtin microcode supplied through
-the regular firmware builtin method CONFIG_FIRMWARE_IN_KERNEL. Only
-64-bit is currently supported.
+the regular builtin firmware method CONFIG_EXTRA_FIRMWARE. Only 64-bit is
+currently supported.
Here's an example:
-CONFIG_FIRMWARE_IN_KERNEL=y
CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 000000000000..5cd58439ad2d
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
+Overview
+========
+
+Page Table Isolation (pti, previously known as KAISER[1]) is a
+countermeasure against attacks on the shared user/kernel address
+space such as the "Meltdown" approach[2].
+
+To mitigate this class of attacks, we create an independent set of
+page tables for use only when running userspace applications. When
+the kernel is entered via syscalls, interrupts or exceptions, the
+page tables are switched to the full "kernel" copy. When the system
+switches back to user mode, the user copy is used again.
+
+The userspace page tables contain only a minimal amount of kernel
+data: only what is needed to enter/exit the kernel such as the
+entry/exit functions themselves and the interrupt descriptor table
+(IDT). There are a few strictly unnecessary things that get mapped
+such as the first C function when entering an interrupt (see
+comments in pti.c).
+
+This approach helps to ensure that side-channel attacks leveraging
+the paging structures do not function when PTI is enabled. It can be
+enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
+Once enabled at compile-time, it can be disabled at boot with the
+'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
+
+Page Table Management
+=====================
+
+When PTI is enabled, the kernel manages two sets of page tables.
+The first set is very similar to the single set which is present in
+kernels without PTI. This includes a complete mapping of userspace
+that the kernel can use for things like copy_to_user().
+
+Although _complete_, the user portion of the kernel page tables is
+crippled by setting the NX bit in the top level. This ensures
+that any missed kernel->user CR3 switch will immediately crash
+userspace upon executing its first instruction.
+
+The userspace page tables map only the kernel data needed to enter
+and exit the kernel. This data is entirely contained in the 'struct
+cpu_entry_area' structure which is placed in the fixmap which gives
+each CPU's copy of the area a compile-time-fixed virtual address.
+
+For new userspace mappings, the kernel makes the entries in its
+page tables like normal. The only difference is when the kernel
+makes entries in the top (PGD) level. In addition to setting the
+entry in the main kernel PGD, a copy of the entry is made in the
+userspace page tables' PGD.
+
+This sharing at the PGD level also inherently shares all the lower
+layers of the page tables. This leaves a single, shared set of
+userspace page tables to manage. One PTE to lock, one set of
+accessed bits, dirty bits, etc...
+
+Overhead
+========
+
+Protection against side-channel attacks is important. But,
+this protection comes at a cost:
+
+1. Increased Memory Use
+ a. Each process now needs an order-1 PGD instead of order-0.
+ (Consumes an additional 4k per process).
+ b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
+ aligned so that it can be mapped by setting a single PMD
+ entry. This consumes nearly 2MB of RAM once the kernel
+ is decompressed, but no space in the kernel image itself.
+
+2. Runtime Cost
+ a. CR3 manipulation to switch between the page table copies
+ must be done at interrupt, syscall, and exception entry
+ and exit (it can be skipped when the kernel is interrupted,
+ though.) Moves to CR3 are on the order of a hundred
+ cycles, and are required at every entry and exit.
+ b. A "trampoline" must be used for SYSCALL entry. This
+ trampoline depends on a smaller set of resources than the
+ non-PTI SYSCALL entry code, so requires mapping fewer
+ things into the userspace page tables. The downside is
+ that stacks must be switched at entry time.
+ c. Global pages are disabled for all kernel structures not
+ mapped into both kernel and userspace page tables. This
+ feature of the MMU allows different processes to share TLB
+ entries mapping the kernel. Losing the feature means more
+ TLB misses after a context switch. The actual loss of
+ performance is very small, however, never exceeding 1%.
+ d. Process Context IDentifiers (PCID) is a CPU feature that
+ allows us to skip flushing the entire TLB when switching page
+ tables by setting a special bit in CR3 when the page tables
+ are changed. This makes switching the page tables (at context
+ switch, or kernel entry/exit) cheaper. But, on systems with
+ PCID support, the context switch code must flush both the user
+ and kernel entries out of the TLB. The user PCID TLB flush is
+ deferred until the exit to userspace, minimizing the cost.
+ See intel.com/sdm for the gory PCID/INVPCID details.
+ e. The userspace page tables must be populated for each new
+ process. Even without PTI, the shared kernel mappings
+ are created by copying top-level (PGD) entries into each
+ new process. But, with PTI, there are now *two* kernel
+ mappings: one in the kernel page tables that maps everything
+ and one for the entry/exit structures. At fork(), we need to
+ copy both.
+ f. In addition to the fork()-time copying, there must also
+ be an update to the userspace PGD any time a set_pgd() is done
+ on a PGD used to map userspace. This ensures that the kernel
+ and userspace copies always map the same userspace
+ memory.
+ g. On systems without PCID support, each CR3 write flushes
+ the entire TLB. That means that each syscall, interrupt
+ or exception flushes the TLB.
+ h. INVPCID is a TLB-flushing instruction which allows flushing
+ of TLB entries for non-current PCIDs. Some systems support
+ PCIDs, but do not support INVPCID. On these systems, addresses
+ can only be flushed from the TLB for the current PCID. When
+ flushing a kernel address, we need to flush all PCIDs, so a
+ single kernel address flush will require a TLB-flushing CR3
+ write upon the next use of every PCID.
+
+Possible Future Work
+====================
+1. We can be more careful about not actually writing to CR3
+ unless its value is actually changed.
+2. Allow PTI to be enabled/disabled at runtime in addition to the
+ boot-time switching.
+
+Testing
+========
+
+To test stability of PTI, the following test procedure is recommended,
+ideally doing all of these in parallel:
+
+1. Set CONFIG_DEBUG_ENTRY=y
+2. Run several copies of all of the tools/testing/selftests/x86/ tests
+ (excluding MPX and protection_keys) in a loop on multiple CPUs for
+ several minutes. These tests frequently uncover corner cases in the
+ kernel entry code. In general, old kernels might cause these tests
+ themselves to crash, but they should never crash the kernel.
+3. Run the 'perf' tool in a mode (top or record) that generates many
+ frequent performance monitoring non-maskable interrupts (see "NMI"
+ in /proc/interrupts). This exercises the NMI entry/exit code which
+ is known to trigger bugs in code paths that did not expect to be
+ interrupted, including nested NMIs. Using "-c" boosts the rate of
+ NMIs, and using two -c with separate counters encourages nested NMIs
+ and less deterministic behavior.
+
+ while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
+
+4. Launch a KVM virtual machine.
+5. Run 32-bit binaries on systems supporting the SYSCALL instruction.
+ This has been a lightly-tested code path and needs extra scrutiny.
+
+Debugging
+=========
+
+Bugs in PTI cause a few different signatures of crashes
+that are worth noting here.
+
+ * Failures of the selftests/x86 code. Usually a bug in one of the
+ more obscure corners of entry_64.S
+ * Crashes in early boot, especially around CPU bringup. Bugs
+ in the trampoline code or mappings cause these.
+ * Crashes at the first interrupt. Caused by bugs in entry_64.S,
+ like screwing up a page table switch. Also caused by
+ incorrectly mapping the IRQ handler entry code.
+ * Crashes at the first NMI. The NMI code is separate from main
+ interrupt handlers and can have bugs that do not affect
+ normal interrupts. Also caused by incorrectly mapping NMI
+ code. NMIs that interrupt the entry code must be very
+ careful and can be the cause of crashes that show up when
+ running perf.
+ * Kernel crashes at the first exit to userspace. entry_64.S
+ bugs, or failing to map some of the exit code.
+ * Crashes at first interrupt that interrupts userspace. The paths
+ in entry_64.S that return to userspace are sometimes separate
+ from the ones that return to the kernel.
+ * Double faults: overflowing the kernel stack because of page
+ faults upon page faults. Caused by touching non-pti-mapped
+ data in the entry code, or forgetting to switch to kernel
+ CR3 before calling into C functions which are not pti-mapped.
+ * Userspace segfaults early in boot, sometimes manifesting
+ as mount(8) failing to mount the rootfs. These have
+ tended to be TLB invalidation issues. Usually invalidating
+ the wrong PCID, or otherwise missing an invalidation.
+
+1. https://gruss.cc/files/kaiser.pdf
+2. https://meltdownattack.com/meltdown.pdf
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index ad41b3813f0a..ea91cb61a602 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
... unused hole ...
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
... unused hole ...
-fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
+ vaddr_end for KASLR
+fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
+fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
@@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
... unused hole ...
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
... unused hole ...
-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
+ vaddr_end for KASLR
+fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
+... unused hole ...
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
... unused hole ...
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
-ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space
+ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
@@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised later if needed).
The mappings are not part of any other kernel PGD and are only available
during EFI runtime calls.
-The module mapping space size changes based on the CONFIG requirements for the
-following fixmap section.
-
Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
physical memory, vmalloc/ioremap space and virtual memory map are randomized.
Their order is preserved but their base will be offset early at boot time.
+
+Be very careful vs. KASLR when changing anything here. The KASLR address
+range must not overlap with anything except the KASAN shadow area, which is
+correct as KASAN disables KASLR.
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 5de8715d5bec..318114de63f3 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -69,19 +69,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0xbffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -92,6 +83,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0xcffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
@@ -109,19 +111,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0x9ffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -132,6 +125,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0xaffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
@@ -150,19 +154,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0x8ffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -173,6 +168,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0x9ffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
diff --git a/LICENSES/exceptions/Linux-syscall-note b/LICENSES/exceptions/Linux-syscall-note
new file mode 100644
index 000000000000..6b60b61be4e9
--- /dev/null
+++ b/LICENSES/exceptions/Linux-syscall-note
@@ -0,0 +1,25 @@
+SPDX-Exception-Identifier: Linux-syscall-note
+SPDX-URL: https://spdx.org/licenses/Linux-syscall-note.html
+SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+
+Usage-Guide:
+ This exception is used together with one of the above SPDX-Licenses
+ to mark user space API (uapi) header files so they can be included
+ into non GPL compliant user space application code.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH Linux-syscall-note
+License-Text:
+
+ NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Also note that the only valid version of the GPL as far as the kernel
+ is concerned is _this_ particular version of the license (ie v2, not
+ v2.2 or v3.x or whatever), unless explicitly otherwise stated.
+
+ Linus Torvalds
+
diff --git a/LICENSES/other/GPL-1.0 b/LICENSES/other/GPL-1.0
new file mode 100644
index 000000000000..3a4fa969e4c2
--- /dev/null
+++ b/LICENSES/other/GPL-1.0
@@ -0,0 +1,260 @@
+Valid-License-Identifier: GPL-1.0+
+SPDX-URL: https://spdx.org/licenses/GPL-1.0.html
+Usage-Guide:
+ The GNU General Public License (GPL) version 1 should not be used in new
+ code. For existing kernel code the 'or any later version' option is
+ required to be compatible with the general license of the project: GPLv2.
+ To use the license in source code, put the following SPDX tag/value pair
+ into a comment according to the placement guidelines in the licensing
+ rules documentation:
+ SPDX-License-Identifier: GPL-1.0+
+License-Text:
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 1, February 1989
+
+ Copyright (C) 1989 Free Software Foundation, Inc.
+ 675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The license agreements of most software companies try to keep users
+at the mercy of those companies. By contrast, our General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. The
+General Public License applies to the Free Software Foundation's
+software and to any other program whose authors commit to using it.
+You can use it for your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Specifically, the General Public License is designed to make
+sure that you have the freedom to give away or sell copies of free
+software, that you receive source code or can get it if you want it,
+that you can change the software or use pieces of it in new free
+programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of a such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must tell them their rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any program or other work which
+contains a notice placed by the copyright holder saying it may be
+distributed under the terms of this General Public License. The
+"Program", below, refers to any such program or work, and a "work based
+on the Program" means either the Program or any work containing the
+Program or a portion of it, either verbatim or with modifications. Each
+licensee is addressed as "you".
+
+ 1. You may copy and distribute verbatim copies of the Program's source
+code as you receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice and
+disclaimer of warranty; keep intact all the notices that refer to this
+General Public License and to the absence of any warranty; and give any
+other recipients of the Program a copy of this General Public License
+along with the Program. You may charge a fee for the physical act of
+transferring a copy.
+
+ 2. You may modify your copy or copies of the Program or any portion of
+it, and copy and distribute such modifications under the terms of Paragraph
+1 above, provided that you also do the following:
+
+ a) cause the modified files to carry prominent notices stating that
+ you changed the files and the date of any change; and
+
+ b) cause the whole of any work that you distribute or publish, that
+ in whole or in part contains the Program or any part thereof, either
+ with or without modifications, to be licensed at no charge to all
+ third parties under the terms of this General Public License (except
+ that you may choose to grant warranty protection to some or all
+ third parties, at your option).
+
+ c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive use
+ in the simplest and most usual way, to print or display an
+ announcement including an appropriate copyright notice and a notice
+ that there is no warranty (or else, saying that you provide a
+ warranty) and that users may redistribute the program under these
+ conditions, and telling the user how to view a copy of this General
+ Public License.
+
+ d) You may charge a fee for the physical act of transferring a
+ copy, and you may at your option offer warranty protection in
+ exchange for a fee.
+
+Mere aggregation of another independent work with the Program (or its
+derivative) on a volume of a storage or distribution medium does not bring
+the other work under the scope of these terms.
+
+ 3. You may copy and distribute the Program (or a portion or derivative of
+it, under Paragraph 2) in object code or executable form under the terms of
+Paragraphs 1 and 2 above provided that you also do one of the following:
+
+ a) accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of
+ Paragraphs 1 and 2 above; or,
+
+ b) accompany it with a written offer, valid for at least three
+ years, to give any third party free (except for a nominal charge
+ for the cost of distribution) a complete machine-readable copy of the
+ corresponding source code, to be distributed under the terms of
+ Paragraphs 1 and 2 above; or,
+
+ c) accompany it with the information you received as to where the
+ corresponding source code may be obtained. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form alone.)
+
+Source code for a work means the preferred form of the work for making
+modifications to it. For an executable file, complete source code means
+all the source code for all modules it contains; but, as a special
+exception, it need not include source code for modules which are standard
+libraries that accompany the operating system on which the executable
+file runs, or for standard header files or definitions files that
+accompany that operating system.
+
+ 4. You may not copy, modify, sublicense, distribute or transfer the
+Program except as expressly provided under this General Public License.
+Any attempt otherwise to copy, modify, sublicense, distribute or transfer
+the Program is void, and will automatically terminate your rights to use
+the Program under this License. However, parties who have received
+copies, or rights to use copies, from you under this General Public
+License will not have their licenses terminated so long as such parties
+remain in full compliance.
+
+ 5. By copying, distributing or modifying the Program (or any work based
+on the Program) you indicate your acceptance of this license to do so,
+and all its terms and conditions.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the original
+licensor to copy, distribute or modify the Program subject to these
+terms and conditions. You may not impose any further restrictions on the
+recipients' exercise of the rights granted herein.
+
+ 7. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of the license which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+the license, you may choose any version ever published by the Free Software
+Foundation.
+
+ 8. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 9. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 10. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to humanity, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these
+terms.
+
+ To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey
+the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19xx name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the
+appropriate parts of the General Public License. Of course, the
+commands you use may be called something other than `show w' and `show
+c'; they could even be mouse-clicks or menu items--whatever suits your
+program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ program `Gnomovision' (a program to direct compilers to make passes
+ at assemblers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/LICENSES/other/MPL-1.1 b/LICENSES/other/MPL-1.1
new file mode 100644
index 000000000000..568b6049efe6
--- /dev/null
+++ b/LICENSES/other/MPL-1.1
@@ -0,0 +1,478 @@
+Valid-License-Identifier: MPL-1.1
+SPDX-URL: https://spdx.org/licenses/MPL-1.1.html
+Usage-Guide:
+ To use the Mozilla Public License version 1.1 put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: MPL-1.1
+License-Text:
+
+ MOZILLA PUBLIC LICENSE
+ Version 1.1
+
+ ---------------
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+ A. Any addition to or deletion from the contents of a file
+ containing Original Code or previous Modifications.
+
+ B. Any new file that contains any part of the Original Code or
+ previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or
+ hereafter acquired, including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by grantor.
+
+ 1.11. "Source Code" means the preferred form of the Covered Code for
+ making modifications to it, including all modules it contains, plus
+ any associated interface definition files, scripts used to control
+ compilation and installation of an Executable, or source code
+ differential comparisons against either the Original Code or another
+ well known, available Covered Code of the Contributor's choice. The
+ Source Code can be in a compressed or archival form, provided the
+ appropriate decompression or de-archiving software is widely available
+ for no charge.
+
+ 1.12. "You" (or "Your") means an individual or a legal entity
+ exercising rights under, and complying with all of the terms of, this
+ License or a future version of this License issued under Section 6.1.
+ For legal entities, "You" includes any entity which controls, is
+ controlled by, or is under common control with You. For purposes of
+ this definition, "control" means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by
+ contract or otherwise, or (b) ownership of more than fifty percent
+ (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+
+2. Source Code License.
+
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property
+ claims:
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Initial Developer to use, reproduce,
+ modify, display, perform, sublicense and distribute the Original
+ Code (or portions thereof) with or without Modifications, and/or
+ as part of a Larger Work; and
+
+ (b) under Patents Claims infringed by the making, using or
+ selling of Original Code, to make, have made, use, practice,
+ sell, and offer for sale, and/or otherwise dispose of the
+ Original Code (or portions thereof).
+
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+ effective on the date Initial Developer first distributes
+ Original Code under the terms of this License.
+
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+ granted: 1) for code that You delete from the Original Code; 2)
+ separate from the Original Code; or 3) for infringements caused
+ by: i) the modification of the Original Code or ii) the
+ combination of the Original Code with other software or devices.
+
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor
+ hereby grants You a world-wide, royalty-free, non-exclusive license
+
+ (a) under intellectual property rights (other than patent or
+ trademark) Licensable by Contributor, to use, reproduce, modify,
+ display, perform, sublicense and distribute the Modifications
+ created by such Contributor (or portions thereof) either on an
+ unmodified basis, with other Modifications, as Covered Code
+ and/or as part of a Larger Work; and
+
+ (b) under Patent Claims infringed by the making, using, or
+ selling of Modifications made by that Contributor either alone
+ and/or in combination with its Contributor Version (or portions
+ of such combination), to make, use, sell, offer for sale, have
+ made, and/or otherwise dispose of: 1) Modifications made by that
+ Contributor (or portions thereof); and 2) the combination of
+ Modifications made by that Contributor with its Contributor
+ Version (or portions of such combination).
+
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+ effective on the date Contributor first makes Commercial Use of
+ the Covered Code.
+
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+ granted: 1) for any code that Contributor has deleted from the
+ Contributor Version; 2) separate from the Contributor Version;
+ 3) for infringements caused by: i) third party modifications of
+ Contributor Version or ii) the combination of Modifications made
+ by that Contributor with other software (except as part of the
+ Contributor Version) or other devices; or 4) under Patent Claims
+ infringed by Covered Code in the absence of Modifications made by
+ that Contributor.
+
+3. Distribution Obligations.
+
+ 3.1. Application of License.
+ The Modifications which You create or to which You contribute are
+ governed by the terms of this License, including without limitation
+ Section 2.2. The Source Code version of Covered Code may be
+ distributed only under the terms of this License or a future version
+ of this License released under Section 6.1, and You must include a
+ copy of this License with every copy of the Source Code You
+ distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this
+ License or the recipients' rights hereunder. However, You may include
+ an additional document offering the additional rights described in
+ Section 3.5.
+
+ 3.2. Availability of Source Code.
+ Any Modification which You create or to which You contribute must be
+ made available in Source Code form under the terms of this License
+ either on the same media as an Executable version or via an accepted
+ Electronic Distribution Mechanism to anyone to whom you made an
+ Executable version available; and if made available via Electronic
+ Distribution Mechanism, must remain available for at least twelve (12)
+ months after the date it initially became available, or at least six
+ (6) months after a subsequent version of that particular Modification
+ has been made available to such recipients. You are responsible for
+ ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+
+ 3.3. Description of Modifications.
+ You must cause all Covered Code to which You contribute to contain a
+ file documenting the changes You made to create that Covered Code and
+ the date of any change. You must include a prominent statement that
+ the Modification is derived, directly or indirectly, from Original
+ Code provided by the Initial Developer and including the name of the
+ Initial Developer in (a) the Source Code, and (b) in any notice in an
+ Executable version or related documentation in which You describe the
+ origin or ownership of the Covered Code.
+
+ 3.4. Intellectual Property Matters
+ (a) Third Party Claims.
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+
+ (b) Contributor APIs.
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+
+ (c) Representations.
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+
+ 3.5. Required Notices.
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code. If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice. If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A. You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code. You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+
+ 3.6. Distribution of Executable Versions.
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+
+ 3.7. Larger Works.
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Application of this License.
+
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+ 6.1. New Versions.
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+
+ 6.2. Effect of New Versions.
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+
+ 6.3. Derivative Works.
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+ 8.1. This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+
+ 8.2. If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant") alleging that:
+
+ (a) such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant. If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+
+ (b) any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+
+ 8.3. If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+
+ 8.4. In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+
+11. MISCELLANEOUS.
+
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed". "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the MPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+ ``The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ https://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+ License for the specific language governing rights and limitations
+ under the License.
+
+ The Original Code is ______________________________________.
+
+ The Initial Developer of the Original Code is ________________________.
+ Portions created by ______________________ are Copyright (C) ______
+ _______________________. All Rights Reserved.
+
+ Contributor(s): ______________________________________.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the _____ license (the "[___] License"), in which case the
+ provisions of [______] License are applicable instead of those
+ above. If you wish to allow use of your version of this file only
+ under the terms of the [____] License and not to allow others to use
+ your version of this file under the MPL, indicate your decision by
+ deleting the provisions above and replace them with the notice and
+ other provisions required by the [___] License. If you do not delete
+ the provisions above, a recipient may use your version of this file
+ under either the MPL or the [___] License."
+
+ [NOTE: The text of this Exhibit A may differ slightly from the text of
+ the notices in the Source Code files of the Original Code. You should
+ use the text of this Exhibit A rather than the text found in the
+ Original Code Source Code for Your Modifications.]
diff --git a/LICENSES/preferred/BSD-2-Clause b/LICENSES/preferred/BSD-2-Clause
new file mode 100644
index 000000000000..da366e2ce50b
--- /dev/null
+++ b/LICENSES/preferred/BSD-2-Clause
@@ -0,0 +1,32 @@
+Valid-License-Identifier: BSD-2-Clause
+SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html
+Usage-Guide:
+ To use the BSD 2-clause "Simplified" License put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: BSD-2-Clause
+License-Text:
+
+Copyright (c) <year> <owner> . All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSES/preferred/BSD-3-Clause b/LICENSES/preferred/BSD-3-Clause
new file mode 100644
index 000000000000..34c7f057c8d5
--- /dev/null
+++ b/LICENSES/preferred/BSD-3-Clause
@@ -0,0 +1,36 @@
+Valid-License-Identifier: BSD-3-Clause
+SPDX-URL: https://spdx.org/licenses/BSD-3-Clause.html
+Usage-Guide:
+ To use the BSD 3-clause "New" or "Revised" License put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: BSD-3-Clause
+License-Text:
+
+Copyright (c) <year> <owner> . All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSES/preferred/BSD-3-Clause-Clear b/LICENSES/preferred/BSD-3-Clause-Clear
new file mode 100644
index 000000000000..e53b56092b90
--- /dev/null
+++ b/LICENSES/preferred/BSD-3-Clause-Clear
@@ -0,0 +1,41 @@
+Valid-License-Identifier: BSD-3-Clause-Clear
+SPDX-URL: https://spdx.org/licenses/BSD-3-Clause-Clear.html
+Usage-Guide:
+ To use the BSD 3-clause "Clear" License put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: BSD-3-Clause-Clear
+License-Text:
+
+The Clear BSD License
+
+Copyright (c) [xxxx]-[xxxx] [Owner Organization]
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the disclaimer
+below) provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of [Owner Organization] nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
+THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSES/preferred/GPL-2.0 b/LICENSES/preferred/GPL-2.0
new file mode 100644
index 000000000000..b8db91d3a1cb
--- /dev/null
+++ b/LICENSES/preferred/GPL-2.0
@@ -0,0 +1,353 @@
+Valid-License-Identifier: GPL-2.0
+Valid-License-Identifier: GPL-2.0+
+SPDX-URL: https://spdx.org/licenses/GPL-2.0.html
+Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU General Public License (GPL) version 2 only' use:
+ SPDX-License-Identifier: GPL-2.0
+ For 'GNU General Public License (GPL) version 2 or any later version' use:
+ SPDX-License-Identifier: GPL-2.0+
+License-Text:
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/LICENSES/preferred/LGPL-2.0 b/LICENSES/preferred/LGPL-2.0
new file mode 100644
index 000000000000..957d798fe037
--- /dev/null
+++ b/LICENSES/preferred/LGPL-2.0
@@ -0,0 +1,487 @@
+Valid-License-Identifier: LGPL-2.0
+Valid-License-Identifier: LGPL-2.0+
+SPDX-URL: https://spdx.org/licenses/LGPL-2.0.html
+Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU Library General Public License (LGPL) version 2.0 only' use:
+ SPDX-License-Identifier: LGPL-2.0
+ For 'GNU Library General Public License (LGPL) version 2.0 or any later
+ version' use:
+ SPDX-License-Identifier: LGPL-2.0+
+License-Text:
+
+GNU LIBRARY GENERAL PUBLIC LICENSE
+Version 2, June 1991
+
+Copyright (C) 1991 Free Software Foundation, Inc.
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is numbered 2
+because it goes with version 2 of the ordinary GPL.]
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public Licenses are
+intended to guarantee your freedom to share and change free software--to
+make sure the software is free for all its users.
+
+This license, the Library General Public License, applies to some specially
+designated Free Software Foundation software, and to any other libraries
+whose authors decide to use it. You can use it for your libraries, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you
+distribute copies of the library, or if you modify it.
+
+For example, if you distribute copies of the library, whether gratis or for
+a fee, you must give the recipients all the rights that we gave you. You
+must make sure that they, too, receive or can get the source code. If you
+link a program with the library, you must provide complete object files to
+the recipients so that they can relink them with the library, after making
+changes to the library and recompiling it. And you must show them these
+terms so they know their rights.
+
+Our method of protecting your rights has two steps: (1) copyright the
+library, and (2) offer you this license which gives you legal permission to
+copy, distribute and/or modify the library.
+
+Also, for each distributor's protection, we want to make certain that
+everyone understands that there is no warranty for this free library. If
+the library is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original version, so that
+any problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that companies distributing free software will
+individually obtain patent licenses, thus in effect transforming the
+program into proprietary software. To prevent this, we have made it clear
+that any patent must be licensed for everyone's free use or not licensed at
+all.
+
+Most GNU software, including some libraries, is covered by the ordinary GNU
+General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+Because of this blurred distinction, using the ordinary General Public
+License for libraries did not effectively promote software sharing, because
+most developers did not use the libraries. We concluded that weaker
+conditions might promote sharing better.
+
+However, unrestricted linking of non-free programs would deprive the users
+of those programs of all benefit from the free status of the libraries
+themselves. This Library General Public License is intended to permit
+developers of non-free programs to use free libraries, while preserving
+your freedom as a user of such programs to change the free libraries that
+are incorporated in them. (We have not seen how to achieve this as regards
+changes in header files, but we have achieved it as regards changes in the
+actual functions of the Library.) The hope is that this will lead to faster
+development of free libraries.
+
+The precise terms and conditions for copying, distribution and modification
+follow. Pay close attention to the difference between a "work based on the
+library" and a "work that uses the library". The former contains code
+derived from the library, while the latter only works together with the
+library.
+
+Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License Agreement applies to any software library which contains a
+ notice placed by the copyright holder or other authorized party saying
+ it may be distributed under the terms of this Library General Public
+ License (also called "this License"). Each licensee is addressed as
+ "you".
+
+ A "library" means a collection of software functions and/or data
+ prepared so as to be conveniently linked with application programs
+ (which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work which
+ has been distributed under these terms. A "work based on the Library"
+ means either the Library or any derivative work under copyright law:
+ that is to say, a work containing the Library or a portion of it, either
+ verbatim or with modifications and/or translated straightforwardly into
+ another language. (Hereinafter, translation is included without
+ limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for making
+ modifications to it. For a library, complete source code means all the
+ source code for all modules it contains, plus any associated interface
+ definition files, plus the scripts used to control compilation and
+ installation of the library.
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ a program using the Library is not restricted, and output from such a
+ program is covered only if its contents constitute a work based on the
+ Library (independent of the use of the Library in a tool for writing
+ it). Whether that is true depends on what the Library does and what the
+ program that uses the Library does.
+
+1. You may copy and distribute verbatim copies of the Library's complete
+ source code as you receive it, in any medium, provided that you
+ conspicuously and appropriately publish on each copy an appropriate
+ copyright notice and disclaimer of warranty; keep intact all the notices
+ that refer to this License and to the absence of any warranty; and
+ distribute a copy of this License along with the Library.
+
+ You may charge a fee for the physical act of transferring a copy, and
+ you may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Library or any portion of it,
+ thus forming a work based on the Library, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no charge to
+ all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a table
+ of data to be supplied by an application program that uses the
+ facility, other than as an argument passed when the facility is
+ invoked, then you must make a good faith effort to ensure that, in
+ the event an application does not supply such function or table, the
+ facility still operates, and performs whatever part of its purpose
+ remains meaningful.
+
+ (For example, a function in a library to compute square roots has a
+ purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must be
+ optional: if the application does not supply it, the square root
+ function must still compute square roots.)
+
+ These requirements apply to the modified work as a whole. If
+ identifiable sections of that work are not derived from the Library, and
+ can be reasonably considered independent and separate works in
+ themselves, then this License, and its terms, do not apply to those
+ sections when you distribute them as separate works. But when you
+ distribute the same sections as part of a whole which is a work based on
+ the Library, the distribution of the whole must be on the terms of this
+ License, whose permissions for other licensees extend to the entire
+ whole, and thus to each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Library.
+
+ In addition, mere aggregation of another work not based on the Library
+ with the Library (or with a work based on the Library) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may opt to apply the terms of the ordinary GNU General Public
+ License instead of this License to a given copy of the Library. To do
+ this, you must alter all the notices that refer to this License, so that
+ they refer to the ordinary GNU General Public License, version 2,
+ instead of to this License. (If a newer version than version 2 of the
+ ordinary GNU General Public License has appeared, then you can specify
+ that version instead if you wish.) Do not make any other change in these
+ notices.
+
+ Once this change is made in a given copy, it is irreversible for that
+ copy, so the ordinary GNU General Public License applies to all
+ subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of the
+ Library into a program that is not a library.
+
+4. You may copy and distribute the Library (or a portion or derivative of
+ it, under Section 2) in object code or executable form under the terms
+ of Sections 1 and 2 above provided that you accompany it with the
+ complete corresponding machine-readable source code, which must be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy from a
+ designated place, then offering equivalent access to copy the source
+ code from the same place satisfies the requirement to distribute the
+ source code, even though third parties are not compelled to copy the
+ source along with the object code.
+
+5. A program that contains no derivative of any portion of the Library, but
+ is designed to work with the Library by being compiled or linked with
+ it, is called a "work that uses the Library". Such a work, in isolation,
+ is not a derivative work of the Library, and therefore falls outside the
+ scope of this License.
+
+ However, linking a "work that uses the Library" with the Library creates
+ an executable that is a derivative of the Library (because it contains
+ portions of the Library), rather than a "work that uses the
+ library". The executable is therefore covered by this License. Section 6
+ states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+ that is part of the Library, the object code for the work may be a
+ derivative work of the Library even though the source code is
+ not. Whether this is true is especially significant if the work can be
+ linked without the Library, or if the work is itself a library. The
+ threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data structure
+ layouts and accessors, and small macros and small inline functions (ten
+ lines or less in length), then the use of the object file is
+ unrestricted, regardless of whether it is legally a derivative
+ work. (Executables containing this object code plus portions of the
+ Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+ distribute the object code for the work under the terms of Section
+ 6. Any executables containing that work also fall under Section 6,
+ whether or not they are linked directly with the Library itself.
+
+6. As an exception to the Sections above, you may also compile or link a
+ "work that uses the Library" with the Library to produce a work
+ containing portions of the Library, and distribute that work under terms
+ of your choice, provided that the terms permit modification of the work
+ for the customer's own use and reverse engineering for debugging such
+ modifications.
+
+ You must give prominent notice with each copy of the work that the
+ Library is used in it and that the Library and its use are covered by
+ this License. You must supply a copy of this License. If the work during
+ execution displays copyright notices, you must include the copyright
+ notice for the Library among them, as well as a reference directing the
+ user to the copy of this License. Also, you must do one of these things:
+
+ a) Accompany the work with the complete corresponding machine-readable
+ source code for the Library including whatever changes were used in
+ the work (which must be distributed under Sections 1 and 2 above);
+ and, if the work is an executable linked with the Library, with the
+ complete machine-readable "work that uses the Library", as object
+ code and/or source code, so that the user can modify the Library and
+ then relink to produce a modified executable containing the modified
+ Library. (It is understood that the user who changes the contents of
+ definitions files in the Library will not necessarily be able to
+ recompile the application to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at least three
+ years, to give the same user the materials specified in Subsection
+ 6a, above, for a charge no more than the cost of performing this
+ distribution.
+
+ c) If distribution of the work is made by offering access to copy from a
+ designated place, offer equivalent access to copy the above specified
+ materials from the same place.
+
+ d) Verify that the user has already received a copy of these materials
+ or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the Library"
+ must include any data and utility programs needed for reproducing the
+ executable from it. However, as a special exception, the source code
+ distributed need not include anything that is normally distributed (in
+ either source or binary form) with the major components (compiler,
+ kernel, and so on) of the operating system on which the executable runs,
+ unless that component itself accompanies the executable.
+
+ It may happen that this requirement contradicts the license restrictions
+ of other proprietary libraries that do not normally accompany the
+ operating system. Such a contradiction means you cannot use both them
+ and the Library together in an executable that you distribute.
+
+7. You may place library facilities that are a work based on the Library
+ side-by-side in a single library together with other library facilities
+ not covered by this License, and distribute such a combined library,
+ provided that the separate distribution of the work based on the Library
+ and of the other library facilities is otherwise permitted, and provided
+ that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work based on
+ the Library, uncombined with any other library facilities. This must
+ be distributed under the terms of the Sections above.
+
+ b) Give prominent notice with the combined library of the fact that part
+ of it is a work based on the Library, and explaining where to find
+ the accompanying uncombined form of the same work.
+
+8. You may not copy, modify, sublicense, link with, or distribute the
+ Library except as expressly provided under this License. Any attempt
+ otherwise to copy, modify, sublicense, link with, or distribute the
+ Library is void, and will automatically terminate your rights under this
+ License. However, parties who have received copies, or rights, from you
+ under this License will not have their licenses terminated so long as
+ such parties remain in full compliance.
+
+9. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Library or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Library (or any work based on the Library), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Library or works
+ based on it.
+
+10. Each time you redistribute the Library (or any work based on the
+ Library), the recipient automatically receives a license from the
+ original licensor to copy, distribute, link with or modify the Library
+ subject to these terms and conditions. You may not impose any further
+ restrictions on the recipients' exercise of the rights granted
+ herein. You are not responsible for enforcing compliance by third
+ parties to this License.
+
+11. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot
+ distribute so as to satisfy simultaneously your obligations under this
+ License and any other pertinent obligations, then as a consequence you
+ may not distribute the Library at all. For example, if a patent license
+ would not permit royalty-free redistribution of the Library by all
+ those who receive copies directly or indirectly through you, then the
+ only way you could satisfy both it and this License would be to refrain
+ entirely from distribution of the Library.
+
+ If any portion of this section is held invalid or unenforceable under
+ any particular circumstance, the balance of the section is intended to
+ apply, and the section as a whole is intended to apply in other
+ circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system which is implemented
+ by public license practices. Many people have made generous
+ contributions to the wide range of software distributed through that
+ system in reliance on consistent application of that system; it is up
+ to the author/donor to decide if he or she is willing to distribute
+ software through any other system and a licensee cannot impose that
+ choice.
+
+ This section is intended to make thoroughly clear what is believed to
+ be a consequence of the rest of this License.
+
+12. If the distribution and/or use of the Library is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Library under this License may add an
+ explicit geographical distribution limitation excluding those
+ countries, so that distribution is permitted only in or among countries
+ not thus excluded. In such case, this License incorporates the
+ limitation as if written in the body of this License.
+
+13. The Free Software Foundation may publish revised and/or new versions of
+ the Library General Public License from time to time. Such new versions
+ will be similar in spirit to the present version, but may differ in
+ detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Library
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Library does not specify a license
+ version number, you may choose any version ever published by the Free
+ Software Foundation.
+
+14. If you wish to incorporate parts of the Library into other free
+ programs whose distribution conditions are incompatible with these,
+ write to the author to ask for permission. For software which is
+ copyrighted by the Free Software Foundation, write to the Free Software
+ Foundation; we sometimes make exceptions for this. Our decision will be
+ guided by the two goals of preserving the free status of all
+ derivatives of our free software and of promoting the sharing and reuse
+ of software generally.
+
+NO WARRANTY
+
+15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH
+ YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Libraries
+
+If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+one line to give the library's name and an idea of what it does.
+Copyright (C) year name of author
+
+This library is free software; you can redistribute it and/or modify it
+under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with this library; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in
+the library `Frob' (a library for tweaking knobs) written
+by James Random Hacker.
+
+signature of Ty Coon, 1 April 1990
+Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/LICENSES/preferred/LGPL-2.1 b/LICENSES/preferred/LGPL-2.1
new file mode 100644
index 000000000000..27bb4342a3e8
--- /dev/null
+++ b/LICENSES/preferred/LGPL-2.1
@@ -0,0 +1,503 @@
+Valid-License-Identifier: LGPL-2.1
+Valid-License-Identifier: LGPL-2.1+
+SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html
+Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU Lesser General Public License (LGPL) version 2.1 only' use:
+ SPDX-License-Identifier: LGPL-2.1
+ For 'GNU Lesser General Public License (LGPL) version 2.1 or any later
+ version' use:
+ SPDX-License-Identifier: LGPL-2.1+
+License-Text:
+
+GNU LESSER GENERAL PUBLIC LICENSE
+Version 2.1, February 1999
+
+Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts as
+the successor of the GNU Library Public License, version 2, hence the
+version number 2.1.]
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public Licenses are
+intended to guarantee your freedom to share and change free software--to
+make sure the software is free for all its users.
+
+This license, the Lesser General Public License, applies to some specially
+designated software packages--typically libraries--of the Free Software
+Foundation and other authors who decide to use it. You can use it too, but
+we suggest you first think carefully about whether this license or the
+ordinary General Public License is the better strategy to use in any
+particular case, based on the explanations below.
+
+When we speak of free software, we are referring to freedom of use, not
+price. Our General Public Licenses are designed to make sure that you have
+the freedom to distribute copies of free software (and charge for this
+service if you wish); that you receive source code or can get it if you
+want it; that you can change the software and use pieces of it in new free
+programs; and that you are informed that you can do these things.
+
+To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for you if
+you distribute copies of the library or if you modify it.
+
+For example, if you distribute copies of the library, whether gratis or for
+a fee, you must give the recipients all the rights that we gave you. You
+must make sure that they, too, receive or can get the source code. If you
+link other code with the library, you must provide complete object files to
+the recipients, so that they can relink them with the library after making
+changes to the library and recompiling it. And you must show them these
+terms so they know their rights.
+
+We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+To protect each distributor, we want to make it very clear that there is no
+warranty for the free library. Also, if the library is modified by someone
+else and passed on, the recipients should know that what they have is not
+the original version, so that the original author's reputation will not be
+affected by problems that might be introduced by others.
+
+Finally, software patents pose a constant threat to the existence of any
+free program. We wish to make sure that a company cannot effectively
+restrict the users of a free program by obtaining a restrictive license
+from a patent holder. Therefore, we insist that any patent license obtained
+for a version of the library must be consistent with the full freedom of
+use specified in this license.
+
+Most GNU software, including some libraries, is covered by the ordinary GNU
+General Public License. This license, the GNU Lesser General Public
+License, applies to certain designated libraries, and is quite different
+from the ordinary General Public License. We use this license for certain
+libraries in order to permit linking those libraries into non-free
+programs.
+
+When a program is linked with a library, whether statically or using a
+shared library, the combination of the two is legally speaking a combined
+work, a derivative of the original library. The ordinary General Public
+License therefore permits such linking only if the entire combination fits
+its criteria of freedom. The Lesser General Public License permits more lax
+criteria for linking other code with the library.
+
+We call this license the "Lesser" General Public License because it does
+Less to protect the user's freedom than the ordinary General Public
+License. It also provides other free software developers Less of an
+advantage over competing non-free programs. These disadvantages are the
+reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+For example, on rare occasions, there may be a special need to encourage
+the widest possible use of a certain library, so that it becomes a de-facto
+standard. To achieve this, non-free programs must be allowed to use the
+library. A more frequent case is that a free library does the same job as
+widely used non-free libraries. In this case, there is little to gain by
+limiting the free library to free software only, so we use the Lesser
+General Public License.
+
+In other cases, permission to use a particular library in non-free programs
+enables a greater number of people to use a large body of free
+software. For example, permission to use the GNU C Library in non-free
+programs enables many more people to use the whole GNU operating system, as
+well as its variant, the GNU/Linux operating system.
+
+Although the Lesser General Public License is Less protective of the users'
+freedom, it does ensure that the user of a program that is linked with the
+Library has the freedom and the wherewithal to run that program using a
+modified version of the Library.
+
+The precise terms and conditions for copying, distribution and modification
+follow. Pay close attention to the difference between a "work based on the
+library" and a "work that uses the library". The former contains code
+derived from the library, whereas the latter must be combined with the
+library in order to run.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License Agreement applies to any software library or other program
+ which contains a notice placed by the copyright holder or other
+ authorized party saying it may be distributed under the terms of this
+ Lesser General Public License (also called "this License"). Each
+ licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+ prepared so as to be conveniently linked with application programs
+ (which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work which
+ has been distributed under these terms. A "work based on the Library"
+ means either the Library or any derivative work under copyright law:
+ that is to say, a work containing the Library or a portion of it, either
+ verbatim or with modifications and/or translated straightforwardly into
+ another language. (Hereinafter, translation is included without
+ limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for making
+ modifications to it. For a library, complete source code means all the
+ source code for all modules it contains, plus any associated interface
+ definition files, plus the scripts used to control compilation and
+ installation of the library.
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ a program using the Library is not restricted, and output from such a
+ program is covered only if its contents constitute a work based on the
+ Library (independent of the use of the Library in a tool for writing
+ it). Whether that is true depends on what the Library does and what the
+ program that uses the Library does.
+
+1. You may copy and distribute verbatim copies of the Library's complete
+ source code as you receive it, in any medium, provided that you
+ conspicuously and appropriately publish on each copy an appropriate
+ copyright notice and disclaimer of warranty; keep intact all the notices
+ that refer to this License and to the absence of any warranty; and
+ distribute a copy of this License along with the Library.
+
+ You may charge a fee for the physical act of transferring a copy, and
+ you may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Library or any portion of it,
+ thus forming a work based on the Library, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no charge to
+ all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a table
+ of data to be supplied by an application program that uses the
+ facility, other than as an argument passed when the facility is
+ invoked, then you must make a good faith effort to ensure that, in
+ the event an application does not supply such function or table, the
+ facility still operates, and performs whatever part of its purpose
+ remains meaningful.
+
+ (For example, a function in a library to compute square roots has a
+ purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must be
+ optional: if the application does not supply it, the square root
+ function must still compute square roots.)
+
+ These requirements apply to the modified work as a whole. If
+ identifiable sections of that work are not derived from the Library, and
+ can be reasonably considered independent and separate works in
+ themselves, then this License, and its terms, do not apply to those
+ sections when you distribute them as separate works. But when you
+ distribute the same sections as part of a whole which is a work based on
+ the Library, the distribution of the whole must be on the terms of this
+ License, whose permissions for other licensees extend to the entire
+ whole, and thus to each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Library.
+
+ In addition, mere aggregation of another work not based on the Library
+ with the Library (or with a work based on the Library) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may opt to apply the terms of the ordinary GNU General Public
+ License instead of this License to a given copy of the Library. To do
+ this, you must alter all the notices that refer to this License, so that
+ they refer to the ordinary GNU General Public License, version 2,
+ instead of to this License. (If a newer version than version 2 of the
+ ordinary GNU General Public License has appeared, then you can specify
+ that version instead if you wish.) Do not make any other change in these
+ notices.
+
+ Once this change is made in a given copy, it is irreversible for that
+ copy, so the ordinary GNU General Public License applies to all
+ subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of the
+ Library into a program that is not a library.
+
+4. You may copy and distribute the Library (or a portion or derivative of
+ it, under Section 2) in object code or executable form under the terms
+ of Sections 1 and 2 above provided that you accompany it with the
+ complete corresponding machine-readable source code, which must be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy from a
+ designated place, then offering equivalent access to copy the source
+ code from the same place satisfies the requirement to distribute the
+ source code, even though third parties are not compelled to copy the
+ source along with the object code.
+
+5. A program that contains no derivative of any portion of the Library, but
+ is designed to work with the Library by being compiled or linked with
+ it, is called a "work that uses the Library". Such a work, in isolation,
+ is not a derivative work of the Library, and therefore falls outside the
+ scope of this License.
+
+ However, linking a "work that uses the Library" with the Library creates
+ an executable that is a derivative of the Library (because it contains
+ portions of the Library), rather than a "work that uses the
+ library". The executable is therefore covered by this License. Section 6
+ states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+ that is part of the Library, the object code for the work may be a
+ derivative work of the Library even though the source code is
+ not. Whether this is true is especially significant if the work can be
+ linked without the Library, or if the work is itself a library. The
+ threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data structure
+ layouts and accessors, and small macros and small inline functions (ten
+ lines or less in length), then the use of the object file is
+ unrestricted, regardless of whether it is legally a derivative
+ work. (Executables containing this object code plus portions of the
+ Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+ distribute the object code for the work under the terms of Section
+ 6. Any executables containing that work also fall under Section 6,
+ whether or not they are linked directly with the Library itself.
+
+6. As an exception to the Sections above, you may also combine or link a
+ "work that uses the Library" with the Library to produce a work
+ containing portions of the Library, and distribute that work under terms
+ of your choice, provided that the terms permit modification of the work
+ for the customer's own use and reverse engineering for debugging such
+ modifications.
+
+ You must give prominent notice with each copy of the work that the
+ Library is used in it and that the Library and its use are covered by
+ this License. You must supply a copy of this License. If the work during
+ execution displays copyright notices, you must include the copyright
+ notice for the Library among them, as well as a reference directing the
+ user to the copy of this License. Also, you must do one of these things:
+
+ a) Accompany the work with the complete corresponding machine-readable
+ source code for the Library including whatever changes were used in
+ the work (which must be distributed under Sections 1 and 2 above);
+ and, if the work is an executable linked with the Library, with the
+ complete machine-readable "work that uses the Library", as object
+ code and/or source code, so that the user can modify the Library and
+ then relink to produce a modified executable containing the modified
+ Library. (It is understood that the user who changes the contents of
+ definitions files in the Library will not necessarily be able to
+ recompile the application to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a copy
+ of the library already present on the user's computer system, rather
+ than copying library functions into the executable, and (2) will
+ operate properly with a modified version of the library, if the user
+ installs one, as long as the modified version is interface-compatible
+ with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at least three
+ years, to give the same user the materials specified in Subsection
+ 6a, above, for a charge no more than the cost of performing this
+ distribution.
+
+ d) If distribution of the work is made by offering access to copy from a
+ designated place, offer equivalent access to copy the above specified
+ materials from the same place.
+
+ e) Verify that the user has already received a copy of these materials
+ or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the Library"
+ must include any data and utility programs needed for reproducing the
+ executable from it. However, as a special exception, the materials to be
+ distributed need not include anything that is normally distributed (in
+ either source or binary form) with the major components (compiler,
+ kernel, and so on) of the operating system on which the executable runs,
+ unless that component itself accompanies the executable.
+
+ It may happen that this requirement contradicts the license restrictions
+ of other proprietary libraries that do not normally accompany the
+ operating system. Such a contradiction means you cannot use both them
+ and the Library together in an executable that you distribute.
+
+7. You may place library facilities that are a work based on the Library
+ side-by-side in a single library together with other library facilities
+ not covered by this License, and distribute such a combined library,
+ provided that the separate distribution of the work based on the Library
+ and of the other library facilities is otherwise permitted, and provided
+ that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work based on
+ the Library, uncombined with any other library facilities. This must
+ be distributed under the terms of the Sections above.
+
+ b) Give prominent notice with the combined library of the fact that part
+ of it is a work based on the Library, and explaining where to find
+ the accompanying uncombined form of the same work.
+
+8. You may not copy, modify, sublicense, link with, or distribute the
+ Library except as expressly provided under this License. Any attempt
+ otherwise to copy, modify, sublicense, link with, or distribute the
+ Library is void, and will automatically terminate your rights under this
+ License. However, parties who have received copies, or rights, from you
+ under this License will not have their licenses terminated so long as
+ such parties remain in full compliance.
+
+9. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Library or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Library (or any work based on the Library), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Library or works
+ based on it.
+
+10. Each time you redistribute the Library (or any work based on the
+ Library), the recipient automatically receives a license from the
+ original licensor to copy, distribute, link with or modify the Library
+ subject to these terms and conditions. You may not impose any further
+ restrictions on the recipients' exercise of the rights granted
+ herein. You are not responsible for enforcing compliance by third
+ parties with this License.
+
+11. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot
+ distribute so as to satisfy simultaneously your obligations under this
+ License and any other pertinent obligations, then as a consequence you
+ may not distribute the Library at all. For example, if a patent license
+ would not permit royalty-free redistribution of the Library by all
+ those who receive copies directly or indirectly through you, then the
+ only way you could satisfy both it and this License would be to refrain
+ entirely from distribution of the Library.
+
+ If any portion of this section is held invalid or unenforceable under
+ any particular circumstance, the balance of the section is intended to
+ apply, and the section as a whole is intended to apply in other
+ circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system which is implemented
+ by public license practices. Many people have made generous
+ contributions to the wide range of software distributed through that
+ system in reliance on consistent application of that system; it is up
+ to the author/donor to decide if he or she is willing to distribute
+ software through any other system and a licensee cannot impose that
+ choice.
+
+ This section is intended to make thoroughly clear what is believed to
+ be a consequence of the rest of this License.
+
+12. If the distribution and/or use of the Library is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Library under this License may add an
+ explicit geographical distribution limitation excluding those
+ countries, so that distribution is permitted only in or among countries
+ not thus excluded. In such case, this License incorporates the
+ limitation as if written in the body of this License.
+
+13. The Free Software Foundation may publish revised and/or new versions of
+ the Lesser General Public License from time to time. Such new versions
+ will be similar in spirit to the present version, but may differ in
+ detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Library
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Library does not specify a license
+ version number, you may choose any version ever published by the Free
+ Software Foundation.
+
+14. If you wish to incorporate parts of the Library into other free
+ programs whose distribution conditions are incompatible with these,
+ write to the author to ask for permission. For software which is
+ copyrighted by the Free Software Foundation, write to the Free Software
+ Foundation; we sometimes make exceptions for this. Our decision will be
+ guided by the two goals of preserving the free status of all
+ derivatives of our free software and of promoting the sharing and reuse
+ of software generally.
+
+NO WARRANTY
+
+15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH
+ YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Libraries
+
+If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+one line to give the library's name and an idea of what it does.
+Copyright (C) year name of author
+
+This library is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2.1 of the License, or (at
+your option) any later version.
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
+for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation,
+Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add
+information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in
+the library `Frob' (a library for tweaking knobs) written
+by James Random Hacker.
+
+signature of Ty Coon, 1 April 1990
+Ty Coon, President of Vice
+That's all there is to it!
diff --git a/LICENSES/preferred/MIT b/LICENSES/preferred/MIT
new file mode 100644
index 000000000000..f33a68ceb3ea
--- /dev/null
+++ b/LICENSES/preferred/MIT
@@ -0,0 +1,30 @@
+Valid-License-Identifier: MIT
+SPDX-URL: https://spdx.org/licenses/MIT.html
+Usage-Guide:
+ To use the MIT License put the following SPDX tag/value pair into a
+ comment according to the placement guidelines in the licensing rules
+ documentation:
+ SPDX-License-Identifier: MIT
+License-Text:
+
+MIT License
+
+Copyright (c) <year> <copyright holders>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/MAINTAINERS b/MAINTAINERS
index 76cd525b56fe..a2a25331e3b1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -62,7 +62,15 @@ trivial patch so apply some common sense.
7. When sending security related changes or reports to a maintainer
please Cc: security@kernel.org, especially if the maintainer
- does not respond.
+ does not respond. Please keep in mind that the security team is
+ a small set of people who can be efficient only when working on
+ verified bugs. Please only Cc: this list when you have identified
+ that the bug would present a short-term risk to other users if it
+ were publicly disclosed. For example, reports of address leaks do
+ not represent an immediate threat and are better handled publicly,
+ and ideally, should come with a patch proposal. Please do not send
+ automated reports to this list either. Such bugs will be handled
+ better and faster in the usual public places.
8. Happy hacking.
@@ -262,6 +270,7 @@ ACCES 104-QUAD-8 IIO DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
F: drivers/iio/counter/104-quad-8.c
ACCES PCI-IDIO-16 GPIO DRIVER
@@ -270,6 +279,12 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-pci-idio-16.c
+ACCES PCIe-IDIO-24 GPIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-pcie-idio-24.c
+
ACENIC DRIVER
M: Jes Sorensen <jes@trained-monkey.org>
L: linux-acenic@sunsite.dk
@@ -321,7 +336,7 @@ F: drivers/acpi/apei/
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
-M: Lv Zheng <lv.zheng@intel.com>
+M: Erik Schmauss <erik.schmauss@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
L: devel@acpica.org
@@ -845,6 +860,8 @@ M: Michael Hennerich <Michael.Hennerich@analog.com>
W: http://wiki.analog.com/
W: http://ez.analog.com/community/linux-device-drivers
S: Supported
+F: Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
+F: Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
F: drivers/iio/*/ad*
F: drivers/iio/adc/ltc2497*
X: drivers/iio/*/adjd*
@@ -867,6 +884,12 @@ S: Supported
F: drivers/android/
F: drivers/staging/android/
+ANDROID GOLDFISH PIC DRIVER
+M: Miodrag Dinic <miodrag.dinic@mips.com>
+S: Supported
+F: Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
+F: drivers/irqchip/irq-goldfish-pic.c
+
ANDROID GOLDFISH RTC DRIVER
M: Miodrag Dinic <miodrag.dinic@mips.com>
S: Supported
@@ -1313,7 +1336,8 @@ F: tools/perf/arch/arm/util/pmu.c
F: tools/perf/arch/arm/util/auxtrace.c
F: tools/perf/arch/arm/util/cs-etm.c
F: tools/perf/arch/arm/util/cs-etm.h
-F: tools/perf/util/cs-etm.h
+F: tools/perf/util/cs-etm.*
+F: tools/perf/util/cs-etm-decoder/*
ARM/CORGI MACHINE SUPPORT
M: Richard Purdie <rpurdie@rpsys.net>
@@ -1327,8 +1351,10 @@ T: git git://github.com/ulli-kroll/linux.git
S: Maintained
F: Documentation/devicetree/bindings/arm/gemini.txt
F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
+F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
F: arch/arm/mach-gemini/
+F: drivers/net/ethernet/cortina/gemini/*
F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
@@ -1583,6 +1609,7 @@ F: arch/arm/boot/dts/kirkwood*
F: arch/arm/configs/mvebu_*_defconfig
F: arch/arm/mach-mvebu/
F: arch/arm64/boot/dts/marvell/armada*
+F: drivers/cpufreq/armada-37xx-cpufreq.c
F: drivers/cpufreq/mvebu-cpufreq.c
F: drivers/irqchip/irq-armada-370-xp.c
F: drivers/irqchip/irq-mvebu-*
@@ -2383,13 +2410,6 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
F: include/linux/platform_data/atmel_mxt_ts.h
-ATMEL NAND DRIVER
-M: Wenyou Yang <wenyou.yang@atmel.com>
-M: Josh Wu <rainyfeeling@outlook.com>
-L: linux-mtd@lists.infradead.org
-S: Supported
-F: drivers/mtd/nand/atmel/*
-
ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-iio@vger.kernel.org
@@ -2564,6 +2584,7 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-class-net-batman-adv
F: Documentation/ABI/testing/sysfs-class-net-mesh
F: Documentation/networking/batman-adv.rst
+F: include/uapi/linux/batadv_packet.h
F: include/uapi/linux/batman_adv.h
F: net/batman-adv/
@@ -2687,7 +2708,6 @@ F: drivers/mtd/devices/block2mtd.c
BLUETOOTH DRIVERS
M: Marcel Holtmann <marcel@holtmann.org>
-M: Gustavo Padovan <gustavo@padovan.org>
M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/
@@ -2698,7 +2718,6 @@ F: drivers/bluetooth/
BLUETOOTH SUBSYSTEM
M: Marcel Holtmann <marcel@holtmann.org>
-M: Gustavo Padovan <gustavo@padovan.org>
M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/
@@ -2723,12 +2742,16 @@ M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
S: Supported
F: arch/x86/net/bpf_jit*
F: Documentation/networking/filter.txt
F: Documentation/bpf/
F: include/linux/bpf*
F: include/linux/filter.h
+F: include/trace/events/bpf.h
+F: include/trace/events/xdp.h
F: include/uapi/linux/bpf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
@@ -3193,7 +3216,7 @@ W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
-F: Documentation/networking/can.txt
+F: Documentation/networking/can.rst
F: net/can/
F: include/linux/can/core.h
F: include/uapi/linux/can.h
@@ -4125,6 +4148,7 @@ DEVANTECH SRF ULTRASONIC RANGER IIO DRIVER
M: Andreas Klinger <ak@it-klinger.de>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-iio-distance-srf08
F: drivers/iio/proximity/srf*.c
DEVICE COREDUMP (DEV_COREDUMP)
@@ -4334,10 +4358,12 @@ T: git git://git.infradead.org/users/hch/dma-mapping.git
W: http://git.infradead.org/users/hch/dma-mapping.git
S: Supported
F: lib/dma-debug.c
-F: lib/dma-noop.c
+F: lib/dma-direct.c
F: lib/dma-virt.c
F: drivers/base/dma-mapping.c
F: drivers/base/dma-coherent.c
+F: include/asm-generic/dma-mapping.h
+F: include/linux/dma-direct.h
F: include/linux/dma-mapping.h
DME1737 HARDWARE MONITOR DRIVER
@@ -4939,6 +4965,11 @@ S: Maintained
F: lib/dynamic_debug.c
F: include/linux/dynamic_debug.h
+DYNAMIC INTERRUPT MODERATION
+M: Tal Gilboa <talgi@mellanox.com>
+S: Maintained
+F: include/linux/net_dim.h
+
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
@@ -5139,6 +5170,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/skx_edac.c
+EDAC-TI
+M: Tero Kristo <t-kristo@ti.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/ti_edac.c
+
EDIROL UA-101/UA-1000 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5149,15 +5186,15 @@ F: sound/usb/misc/ua101.c
EFI TEST DRIVER
L: linux-efi@vger.kernel.org
M: Ivan Hu <ivan.hu@canonical.com>
-M: Matt Fleming <matt@codeblueprint.co.uk>
+M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
S: Maintained
F: drivers/firmware/efi/test/
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
M: Jeremy Kerr <jk@ozlabs.org>
-M: Matt Fleming <matt@codeblueprint.co.uk>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
+M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
L: linux-efi@vger.kernel.org
S: Maintained
F: fs/efivarfs/
@@ -5318,7 +5355,6 @@ S: Supported
F: security/integrity/evm/
EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M: Matt Fleming <matt@codeblueprint.co.uk>
M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
L: linux-efi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
@@ -5963,6 +5999,7 @@ F: drivers/media/rc/gpio-ir-tx.c
GPIO MOCKUP DRIVER
M: Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
+R: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-mockup.c
@@ -6610,16 +6647,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
-i386 BOOT CODE
-M: "H. Peter Anvin" <hpa@zytor.com>
-S: Maintained
-F: arch/x86/boot/
-
-i386 SETUP CODE / CPU ERRATA WORKAROUNDS
-M: "H. Peter Anvin" <hpa@zytor.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
-S: Maintained
-
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
@@ -6827,6 +6854,8 @@ R: Peter Meerwald-Stadler <pmeerw@pmeerw.net>
L: linux-iio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
S: Maintained
+F: Documentation/ABI/testing/configfs-iio*
+F: Documentation/ABI/testing/sysfs-bus-iio*
F: Documentation/devicetree/bindings/iio/
F: drivers/iio/
F: drivers/staging/iio/
@@ -6886,7 +6915,7 @@ M: Jason Gunthorpe <jgg@mellanox.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
S: Supported
F: Documentation/devicetree/bindings/infiniband/
F: Documentation/infiniband/
@@ -7308,17 +7337,16 @@ F: drivers/tty/ipwireless/
IPX NETWORK LAYER
L: netdev@vger.kernel.org
-S: Odd fixes
-F: include/net/ipx.h
+S: Obsolete
F: include/uapi/linux/ipx.h
-F: net/ipx/
+F: drivers/staging/ipx/
IRDA SUBSYSTEM
M: Samuel Ortiz <samuel@sortiz.org>
L: irda-users@lists.sourceforge.net (subscribers-only)
L: netdev@vger.kernel.org
W: http://irda.sourceforge.net/
-S: Maintained
+S: Obsolete
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
F: Documentation/networking/irda.txt
F: drivers/staging/irda/
@@ -8193,6 +8221,7 @@ F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
F: lib/locking*.[ch]
F: kernel/locking/
+X: kernel/locking/locktorture.c
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -8408,6 +8437,13 @@ L: linux-wireless@vger.kernel.org
S: Odd Fixes
F: drivers/net/wireless/marvell/mwl8k.c
+MARVELL NAND CONTROLLER DRIVER
+M: Miquel Raynal <miquel.raynal@free-electrons.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/marvell_nand.c
+F: Documentation/devicetree/bindings/mtd/marvell-nand.txt
+
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -8722,6 +8758,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
+MEDIATEK SWITCH DRIVER
+M: Sean Wang <sean.wang@mediatek.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/dsa/mt7530.*
+F: net/dsa/tag_mtk.c
+
MEDIATEK JPEG DRIVER
M: Rick Chang <rick.chang@mediatek.com>
M: Bin Liu <bin.liu@mediatek.com>
@@ -8955,7 +8998,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git master
-T: git git://git.infradead.org/l2-mtd.git master
+T: git git://git.infradead.org/linux-mtd.git mtd/next
S: Maintained
F: Documentation/devicetree/bindings/mtd/
F: drivers/mtd/
@@ -9044,6 +9087,14 @@ F: drivers/media/platform/atmel/atmel-isc.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: devicetree/bindings/media/atmel-isc.txt
+MICROCHIP / ATMEL NAND DRIVER
+M: Wenyou Yang <wenyou.yang@microchip.com>
+M: Josh Wu <rainyfeeling@outlook.com>
+L: linux-mtd@lists.infradead.org
+S: Supported
+F: drivers/mtd/nand/atmel/*
+F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
+
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@ -9086,6 +9137,7 @@ F: drivers/usb/image/microtek.*
MIPS
M: Ralf Baechle <ralf@linux-mips.org>
+M: James Hogan <jhogan@kernel.org>
L: linux-mips@linux-mips.org
W: http://www.linux-mips.org/
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
@@ -9343,7 +9395,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git nand/fixes
-T: git git://git.infradead.org/l2-mtd.git nand/next
+T: git git://git.infradead.org/linux-mtd.git nand/next
S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
@@ -9361,8 +9413,8 @@ F: drivers/net/ethernet/natsemi/natsemi.c
NCP FILESYSTEM
M: Petr Vandrovec <petr@vandrovec.name>
-S: Odd Fixes
-F: fs/ncpfs/
+S: Obsolete
+F: drivers/staging/ncpfs/
NCR 5380 SCSI DRIVERS
M: Finn Thain <fthain@telegraphics.com.au>
@@ -9598,6 +9650,11 @@ NETWORKING [WIRELESS]
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
+NETDEVSIM
+M: Jakub Kicinski <jakub.kicinski@netronome.com>
+S: Maintained
+F: drivers/net/netdevsim/*
+
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@cavium.com>
M: Rahul Verma <rahul.verma@cavium.com>
@@ -9639,8 +9696,8 @@ F: include/uapi/linux/sunrpc/
NILFS2 FILESYSTEM
M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
L: linux-nilfs@vger.kernel.org
-W: http://nilfs.sourceforge.net/
-W: http://nilfs.osdn.jp/
+W: https://nilfs.sourceforge.io/
+W: https://nilfs.osdn.jp/
T: git git://github.com/konis/nilfs2.git
S: Supported
F: Documentation/filesystems/nilfs2.txt
@@ -9744,6 +9801,15 @@ S: Supported
F: Documentation/filesystems/ntfs.txt
F: fs/ntfs/
+NUBUS SUBSYSTEM
+M: Finn Thain <fthain@telegraphics.com.au>
+L: linux-m68k@lists.linux-m68k.org
+S: Maintained
+F: arch/*/include/asm/nubus.h
+F: drivers/nubus/
+F: include/linux/nubus.h
+F: include/uapi/linux/nubus.h
+
NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev@vger.kernel.org
@@ -9804,6 +9870,7 @@ NXP TFA9879 DRIVER
M: Peter Rosin <peda@axentia.se>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
NXP-NCI NFC DRIVER
@@ -10135,7 +10202,7 @@ F: drivers/irqchip/irq-ompic.c
F: drivers/irqchip/irq-or1k-*
OPENVSWITCH
-M: Pravin Shelar <pshelar@nicira.com>
+M: Pravin B Shelar <pshelar@ovn.org>
L: netdev@vger.kernel.org
L: dev@openvswitch.org
W: http://openvswitch.org
@@ -10890,6 +10957,7 @@ F: include/linux/pm.h
F: include/linux/pm_*
F: include/linux/powercap.h
F: drivers/powercap/
+F: kernel/configs/nopm.config
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
@@ -11184,7 +11252,8 @@ S: Maintained
F: drivers/firmware/qemu_fw_cfg.c
QIB DRIVER
-M: Mike Marciniszyn <infinipath@intel.com>
+M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+M: Mike Marciniszyn <mike.marciniszyn@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qib/
@@ -11211,7 +11280,6 @@ F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER
-M: Ram Amrani <Ram.Amrani@cavium.com>
M: Michal Kalderon <Michal.Kalderon@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
L: linux-rdma@vger.kernel.org
@@ -11450,15 +11518,6 @@ L: linux-wireless@vger.kernel.org
S: Orphan
F: drivers/net/wireless/ray*
-RCUTORTURE MODULE
-M: Josh Triplett <josh@joshtriplett.org>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-L: linux-kernel@vger.kernel.org
-S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: Documentation/RCU/torture.txt
-F: kernel/rcu/rcutorture.c
-
RCUTORTURE TEST FRAMEWORK
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
@@ -11482,6 +11541,7 @@ F: drivers/net/ethernet/rdc/r6040.c
RDMAVT - RDMA verbs software
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+M: Mike Marciniszyn <mike.marciniszyn@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rdmavt
@@ -11651,8 +11711,8 @@ F: drivers/mtd/nand/r852.h
RISC-V ARCHITECTURE
M: Palmer Dabbelt <palmer@sifive.com>
M: Albert Ou <albert@sifive.com>
-L: patches@groups.riscv.org
-T: git https://github.com/riscv/riscv-linux
+L: linux-riscv@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
S: Supported
F: arch/riscv/
K: riscv
@@ -11759,15 +11819,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
S: Maintained
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
-RTL8192CE WIRELESS DRIVER
-M: Larry Finger <Larry.Finger@lwfinger.net>
-M: Chaoming Li <chaoming_li@realsil.com.cn>
+REALTEK WIRELESS DRIVER (rtlwifi family)
+M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/realtek/rtlwifi/
-F: drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
@@ -11931,6 +11989,13 @@ S: Maintained
F: drivers/crypto/exynos-rng.c
F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
+M: Åukasz Stelmach <l.stelmach@samsung.com>
+L: linux-samsung-soc@vger.kernel.org
+S: Maintained
+F: drivers/char/hw_random/exynos-trng.c
+F: Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt
+
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-fbdev@vger.kernel.org
@@ -11993,6 +12058,7 @@ F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
+M: Kamil Konieczny <k.konieczny@partner.samsung.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -12233,7 +12299,7 @@ M: Security Officers <security@kernel.org>
S: Supported
SECURITY SUBSYSTEM
-M: James Morris <james.l.morris@oracle.com>
+M: James Morris <jmorris@namei.org>
M: "Serge E. Hallyn" <serge@hallyn.com>
L: linux-security-module@vger.kernel.org (suggested Cc:)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
@@ -12607,6 +12673,12 @@ F: include/media/soc*
F: drivers/media/i2c/soc_camera/
F: drivers/media/platform/soc_camera/
+SOCIONEXT UNIPHIER SOUND DRIVER
+M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/uniphier/
+
SOEKRIS NET48XX LED SUPPORT
M: Chris Boot <bootc@bootc.net>
S: Maintained
@@ -12631,6 +12703,15 @@ L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/pci/solo6x10/
+SOFTWARE DELEGATED EXCEPTION INTERFACE (SDEI)
+M: James Morse <james.morse@arm.com>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/arm/firmware/sdei.txt
+F: drivers/firmware/arm_sdei.c
+F: include/linux/sdei.h
+F: include/uapi/linux/sdei.h
+
SOFTWARE RAID (Multiple Disks) SUPPORT
M: Shaohua Li <shli@kernel.org>
L: linux-raid@vger.kernel.org
@@ -12643,6 +12724,13 @@ F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
+SOCIONEXT (SNI) NETSEC NETWORK DRIVER
+M: Jassi Brar <jaswinder.singh@linaro.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/socionext/netsec.c
+F: Documentation/devicetree/bindings/net/socionext-netsec.txt
+
SONIC NETWORK DRIVER
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
L: netdev@vger.kernel.org
@@ -12805,7 +12893,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git spi-nor/fixes
-T: git git://git.infradead.org/l2-mtd.git spi-nor/next
+T: git git://git.infradead.org/linux-mtd.git spi-nor/next
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
@@ -13058,7 +13146,7 @@ F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L: linux-kernel@vger.kernel.org
+L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
S: Supported
F: lib/swiotlb.c
@@ -13123,6 +13211,11 @@ S: Supported
F: drivers/reset/reset-axs10x.c
F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
+SYNOPSYS DESIGNWARE 8250 UART DRIVER
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+F: drivers/tty/serial/8250/8250_dw.c
+
SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <hotran@apm.com>
L: linux-gpio@vger.kernel.org
@@ -13792,6 +13885,18 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
+TORTURE-TEST MODULES
+M: Davidlohr Bueso <dave@stgolabs.net>
+M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: Josh Triplett <josh@joshtriplett.org>
+L: linux-kernel@vger.kernel.org
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+F: Documentation/RCU/torture.txt
+F: kernel/torture.c
+F: kernel/rcu/rcutorture.c
+F: kernel/locking/locktorture.c
+
TOSHIBA ACPI EXTRAS DRIVER
M: Azael Avalos <coproscefalo@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -13834,9 +13939,10 @@ F: drivers/platform/x86/toshiba-wmi.c
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-integrity/list/
+W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
T: git git://git.infradead.org/users/jjs/linux-tpmdd.git
S: Maintained
F: drivers/char/tpm/
@@ -13875,6 +13981,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
S: Maintained
K: ^Subject:.*(?i)trivial
+TEMPO SEMICONDUCTOR DRIVERS
+M: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+S: Maintained
+F: sound/soc/codecs/tscs*.c
+F: sound/soc/codecs/tscs*.h
+F: Documentation/devicetree/bindings/sound/tscs*.txt
+
TTY LAYER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Jiri Slaby <jslaby@suse.com>
@@ -14036,6 +14149,8 @@ UNISYS S-PAR DRIVERS
M: David Kershner <david.kershner@unisys.com>
L: sparmaintainer@unisys.com (Unisys internal)
S: Supported
+F: include/linux/visorbus.h
+F: drivers/visorbus/
F: drivers/staging/unisys/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
@@ -14682,6 +14797,7 @@ W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
S: Supported
F: Documentation/devicetree/bindings/regulator/
+F: Documentation/power/regulator/
F: drivers/regulator/
F: include/dt-bindings/regulator/
F: include/linux/regulator/
@@ -14779,9 +14895,9 @@ S: Maintained
F: drivers/hid/hid-wiimote*
WILOCITY WIL6210 WIRELESS DRIVER
-M: Maya Erez <qca_merez@qca.qualcomm.com>
+M: Maya Erez <merez@codeaurora.org>
L: linux-wireless@vger.kernel.org
-L: wil6210@qca.qualcomm.com
+L: wil6210@qti.qualcomm.com
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/wil6210
F: drivers/net/wireless/ath/wil6210/
@@ -14875,6 +14991,12 @@ F: include/linux/workqueue.h
F: kernel/workqueue.c
F: Documentation/core-api/workqueue.rst
+X-POWERS AXP288 PMIC DRIVERS
+M: Hans de Goede <hdegoede@redhat.com>
+S: Maintained
+N: axp288
+F: drivers/acpi/pmic/intel_pmic_xpower.c
+
X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
M: Chen-Yu Tsai <wens@csie.org>
L: linux-kernel@vger.kernel.org
@@ -14892,7 +15014,7 @@ F: net/x25/
X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
-M: "H. Peter Anvin" <hpa@zytor.com>
+R: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
@@ -15002,6 +15124,7 @@ F: include/xen/interface/io/vscsiif.h
XEN SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+L: iommu@lists.linux-foundation.org
S: Supported
F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
diff --git a/Makefile b/Makefile
index eb1f5973813e..c8b8e902d5a4 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -484,26 +484,6 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif
ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
KBUILD_CFLAGS += $(stackp-flag)
+ifeq ($(cc-name),clang)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
diff --git a/arch/Kconfig b/arch/Kconfig
index 400b9e1b2f27..d007b2a15b22 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -196,6 +196,9 @@ config HAVE_OPTPROBES
config HAVE_KPROBES_ON_FTRACE
bool
+config HAVE_FUNCTION_ERROR_INJECTION
+ bool
+
config HAVE_NMI
bool
@@ -234,8 +237,8 @@ config ARCH_HAS_FORTIFY_SOURCE
config ARCH_HAS_SET_MEMORY
bool
-# Select if arch init_task initializer is different to init/init_task.c
-config ARCH_INIT_TASK
+# Select if arch init_task must go in the __init_task_data section
+config ARCH_TASK_STRUCT_ON_STACK
bool
# Select if arch has its private alloc_task_struct() function
@@ -938,6 +941,10 @@ config STRICT_MODULE_RWX
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
+# select if the architecture provides an asm/dma-direct.h header
+config ARCH_HAS_PHYS_TO_DMA
+ bool
+
config ARCH_HAS_REFCOUNT
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b31b974a03cb..e96adcbcab41 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -209,6 +209,7 @@ config ALPHA_EIGER
config ALPHA_JENSEN
bool "Jensen"
+ depends on BROKEN
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
of the first-generation Alpha systems. A number of these systems
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
index d12c68ea340b..b34cc1f06720 100644
--- a/arch/alpha/include/asm/asm-prototypes.h
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -4,7 +4,7 @@
#include <asm/console.h>
#include <asm/page.h>
#include <asm/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm-generic/asm-prototypes.h>
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 8c20c5e35432..807d7b9a1860 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -39,9 +39,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$8");
#define current_thread_info() __current_thread_info
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index 14a2e9af97e9..9afaba5e5503 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
diff --git a/arch/alpha/include/uapi/asm/poll.h b/arch/alpha/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/alpha/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ce3a675c0c4b..fa1a392ca9a2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -950,22 +950,31 @@ struct itimerval32
};
static inline long
-get_tv32(struct timeval *o, struct timeval32 __user *i)
+get_tv32(struct timespec64 *o, struct timeval32 __user *i)
{
struct timeval32 tv;
if (copy_from_user(&tv, i, sizeof(struct timeval32)))
return -EFAULT;
o->tv_sec = tv.tv_sec;
- o->tv_usec = tv.tv_usec;
+ o->tv_nsec = tv.tv_usec * NSEC_PER_USEC;
return 0;
}
static inline long
-put_tv32(struct timeval32 __user *o, struct timeval *i)
+put_tv32(struct timeval32 __user *o, struct timespec64 *i)
{
return copy_to_user(o, &(struct timeval32){
- .tv_sec = o->tv_sec,
- .tv_usec = o->tv_usec},
+ .tv_sec = i->tv_sec,
+ .tv_usec = i->tv_nsec / NSEC_PER_USEC},
+ sizeof(struct timeval32));
+}
+
+static inline long
+put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
+{
+ return copy_to_user(o, &(struct timeval32){
+ .tv_sec = i->tv_sec,
+ .tv_usec = i->tv_usec},
sizeof(struct timeval32));
}
@@ -1004,9 +1013,10 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_tv32(tv, &ktv))
+ struct timespec64 kts;
+
+ ktime_get_real_ts64(&kts);
+ if (put_tv32(tv, &kts))
return -EFAULT;
}
if (tz) {
@@ -1019,22 +1029,19 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
- struct timespec64 kts64;
- struct timespec kts;
+ struct timespec64 kts;
struct timezone ktz;
if (tv) {
- if (get_tv32((struct timeval *)&kts, tv))
+ if (get_tv32(&kts, tv))
return -EFAULT;
- kts.tv_nsec *= 1000;
- kts64 = timespec_to_timespec64(kts);
}
if (tz) {
if (copy_from_user(&ktz, tz, sizeof(*tz)))
return -EFAULT;
}
- return do_sys_settimeofday64(tv ? &kts64 : NULL, tz ? &ktz : NULL);
+ return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL);
}
asmlinkage long sys_ni_posix_timers(void);
@@ -1083,22 +1090,16 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
struct timeval32 __user *, tvs)
{
- struct timespec tv[2];
+ struct timespec64 tv[2];
if (tvs) {
- struct timeval ktvs[2];
- if (get_tv32(&ktvs[0], &tvs[0]) ||
- get_tv32(&ktvs[1], &tvs[1]))
+ if (get_tv32(&tv[0], &tvs[0]) ||
+ get_tv32(&tv[1], &tvs[1]))
return -EFAULT;
- if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 ||
- ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000)
+ if (tv[0].tv_nsec < 0 || tv[0].tv_nsec >= 1000000000 ||
+ tv[1].tv_nsec < 0 || tv[1].tv_nsec >= 1000000000)
return -EINVAL;
-
- tv[0].tv_sec = ktvs[0].tv_sec;
- tv[0].tv_nsec = 1000 * ktvs[0].tv_usec;
- tv[1].tv_sec = ktvs[1].tv_sec;
- tv[1].tv_nsec = 1000 * ktvs[1].tv_usec;
}
return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
@@ -1107,19 +1108,18 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
fd_set __user *, exp, struct timeval32 __user *, tvp)
{
- struct timespec end_time, *to = NULL;
+ struct timespec64 end_time, *to = NULL;
if (tvp) {
- struct timeval tv;
+ struct timespec64 tv;
to = &end_time;
if (get_tv32(&tv, tvp))
return -EFAULT;
- if (tv.tv_sec < 0 || tv.tv_usec < 0)
+ if (tv.tv_sec < 0 || tv.tv_nsec < 0)
return -EINVAL;
- if (poll_select_set_timeout(to, tv.tv_sec,
- tv.tv_usec * NSEC_PER_USEC))
+ if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec))
return -EINVAL;
}
@@ -1192,9 +1192,9 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
return -EFAULT;
if (!ur)
return err;
- if (put_tv32(&ur->ru_utime, &r.ru_utime))
+ if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
return -EFAULT;
- if (put_tv32(&ur->ru_stime, &r.ru_stime))
+ if (put_tv_to_tv32(&ur->ru_stime, &r.ru_stime))
return -EFAULT;
if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss,
sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss)))
@@ -1210,18 +1210,18 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
struct timeval32 __user *, remain)
{
- struct timeval tmp;
+ struct timespec64 tmp;
unsigned long ticks;
if (get_tv32(&tmp, sleep))
goto fault;
- ticks = timeval_to_jiffies(&tmp);
+ ticks = timespec64_to_jiffies(&tmp);
ticks = schedule_timeout_interruptible(ticks);
if (remain) {
- jiffies_to_timeval(ticks, &tmp);
+ jiffies_to_timespec64(ticks, &tmp);
if (put_tv32(remain, &tmp))
goto fault;
}
@@ -1280,7 +1280,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
(copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
offsetof(struct timex32, tick))) ||
- (put_tv32(&txc_p->time, &txc.time)))
+ (put_tv_to_tv32(&txc_p->time, &txc.time)))
return -EFAULT;
return ret;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 37bd6d9b8eb9..a6bdc1da47ad 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -102,6 +102,15 @@ sio_pci_route(void)
alpha_mv.sys.sio.route_tab);
}
+static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
+{
+ if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
+ (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+ return false;
+
+ return true;
+}
+
static unsigned int __init
sio_collect_irq_levels(void)
{
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
/* Iterate through the devices, collecting IRQ levels. */
for_each_pci_dev(dev) {
- if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
- (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+ if (!sio_pci_dev_irq_needs_level(dev))
continue;
if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
return level_bits;
}
-static void __init
-sio_fixup_irq_levels(unsigned int level_bits)
+static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
{
unsigned int old_level_bits;
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
*/
old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
- level_bits |= (old_level_bits & 0x71ff);
+ if (reset)
+ old_level_bits &= 0x71ff;
+
+ level_bits |= old_level_bits;
outb((level_bits >> 0) & 0xff, 0x4d0);
outb((level_bits >> 8) & 0xff, 0x4d1);
}
+static inline void
+sio_fixup_irq_levels(unsigned int level_bits)
+{
+ __sio_fixup_irq_levels(level_bits, true);
+}
+
static inline int
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP, tmp;
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
- return irq >= 0 ? tmp : -1;
+
+ irq = irq >= 0 ? tmp : -1;
+
+ /* Fixup IRQ level if an actual IRQ mapping is detected */
+ if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
+ __sio_fixup_irq_levels(1 << irq, false);
+
+ return irq;
}
static inline int
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 316a99aa9efe..1cfcfbbea6f0 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -18,7 +18,7 @@
* The algorithm for the leading and trailing quadwords remains the same,
* however the loop has been unrolled to enable better memory throughput,
* and the code has been replicated for each of the entry points: __memset
- * and __memsetw to permit better scheduling to eliminate the stalling
+ * and __memset16 to permit better scheduling to eliminate the stalling
* encountered during the mask replication.
* A future enhancement might be to put in a byte store loop for really
* small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -34,7 +34,7 @@
.globl memset
.globl __memset
.globl ___memset
- .globl __memsetw
+ .globl __memset16
.globl __constant_c_memset
.ent ___memset
@@ -415,9 +415,9 @@ end:
* to mask stalls. Note that entry point names also had to change
*/
.align 5
- .ent __memsetw
+ .ent __memset16
-__memsetw:
+__memset16:
.frame $30,0,$26,0
.prologue 0
@@ -596,8 +596,8 @@ end_w:
nop
ret $31,($26),1 # L0 :
- .end __memsetw
- EXPORT_SYMBOL(__memsetw)
+ .end __memset16
+ EXPORT_SYMBOL(__memset16)
memset = ___memset
__memset = ___memset
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9d5fd00d9e91..f3a80cf164cc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -463,9 +463,6 @@ config ARCH_PHYS_ADDR_T_64BIT
config ARCH_DMA_ADDR_T_64BIT
bool
-config ARC_PLAT_NEEDS_PHYS_TO_DMA
- bool
-
config ARC_KVADDR_SIZE
int "Kernel Virtual Address Space size (MB)"
range 0 512
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 4e6e9f57e790..dc91c663bcc0 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -35,6 +35,14 @@
reg = <0x80 0x10>, <0x100 0x10>;
#clock-cells = <0>;
clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 90MHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <90000000>;
};
core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 63954a8b0100..69ff4895f2ba 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -35,6 +35,14 @@
reg = <0x80 0x10>, <0x100 0x10>;
#clock-cells = <0>;
clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 100MHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <100000000>;
};
core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8f627c200d60..006aa3de5348 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -114,6 +114,14 @@
reg = <0x00 0x10>, <0x14B8 0x4>;
#clock-cells = <0>;
clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 1GHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <1000000000>;
};
serial: serial@5000 {
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index ec7c849a5c8e..09f85154c5a4 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -44,7 +44,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 63d3cf69e0b0..09fed3ef22b6 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -44,7 +44,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index f613ecac14a7..ea2f6d817d1a 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index db04ea4dd2d9..ab231c040efe 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -40,7 +40,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 3507be2af6fe..cf449cbf440d 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -43,7 +43,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 7b8f8faf8a24..1b54c72f4296 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -32,7 +32,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -49,10 +48,11 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_UDL=y
CONFIG_FB=y
-CONFIG_FB_UDL=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 6dff83a238b8..31c2c70b34a1 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -36,7 +36,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
CONFIG_ARC_EMAC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 31ee51b987e7..a578c721d50f 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -40,7 +40,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 8d3b1f67cae4..37d7395f3272 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -39,7 +39,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 6168ce2ac2ef..1e1470e2a7f0 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -35,7 +35,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index a70bdeb2b3fd..084a6e42685b 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -36,7 +36,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index ef96406c446e..f36d47990415 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -39,7 +39,6 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index f30182549395..1aca2e8fd1ba 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -42,7 +42,6 @@ CONFIG_IP_MULTICAST=y
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 4fcf4f2503f6..f629493929ea 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -31,7 +31,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 7b71464f6c2f..21f0ca26a05d 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -34,7 +34,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 94285031c4fb..7a16824bfe98 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -11,13 +11,6 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
-#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
-#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
-#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
-#else
-#include <plat/dma.h>
-#endif
-
extern const struct dma_map_ops arc_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index b18fcb606908..dc8ee011882f 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -74,4 +74,7 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 2d79e527fa50..c85947bac5e5 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -62,9 +62,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
static inline __attribute_const__ struct thread_info *current_thread_info(void)
{
register unsigned long sp asm("sp");
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index f35974ee7264..c9173c02081c 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
return 0;
__asm__ __volatile__(
+ " mov lp_count, %5 \n"
" lp 3f \n"
"1: ldb.ab %3, [%2, 1] \n"
" breq.d %3, 0, 3f \n"
@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .word 1b, 4b \n"
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
- : "g"(-EFAULT), "l"(count)
- : "memory");
+ : "g"(-EFAULT), "r"(count)
+ : "lp_count", "lp_start", "lp_end", "memory");
return res;
}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7ef7d9a8ff89..9d27331fe69a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
unsigned int exec_ctrl;
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_enb = exec_ctrl & 1;
+ cpu->extn.dual_enb = !(exec_ctrl & 1);
/* dual issue always present for this core */
cpu->extn.dual = 1;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 74315f302971..bf40e06f3fb8 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
*/
static int __print_sym(unsigned int address, void *unused)
{
- __print_symbol(" %s\n", address);
+ printk(" %pS\n", (void *)address);
return 0;
}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index bcd7c9fc5d0f..b123558bf0bb 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -65,12 +65,14 @@ unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
#define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long address, struct pt_regs *regs) \
{ \
- siginfo_t info = { \
- .si_signo = signr, \
- .si_errno = 0, \
- .si_code = sicode, \
- .si_addr = (void __user *)address, \
- }; \
+ siginfo_t info; \
+ \
+ clear_siginfo(&info); \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)address; \
+ \
return unhandled_exception(str, regs, &info);\
}
@@ -83,6 +85,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
+DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
/*
* Entry Point for Misaligned Data access Exception, for emulating in software
@@ -115,6 +118,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
* Thus TRAP_S <n> can be used for specific purpose
* -1 used for software breakpointing (gdb)
* -2 used by kprobes
+ * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
+ * -fno-isolate-erroneous-paths-dereference
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
@@ -134,6 +139,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
kgdb_trap(regs);
break;
+ case 5:
+ do_trap5_error(address, regs);
+ break;
default:
break;
}
@@ -155,3 +163,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
insterror_is_error(address, regs);
}
+
+/*
+ * abort() call generated by older gcc for __builtin_trap()
+ */
+void abort(void)
+{
+ __asm__ __volatile__("trap_s 5\n");
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7d8c1d6c2f60..6e9a0a9a6a04 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
else
pr_cont("Bus Error, check PRM\n");
#endif
+ } else if (vec == ECR_V_TRAP) {
+ if (regs->ecr_param == 5)
+ pr_cont("gcc generated __builtin_trap\n");
} else {
pr_cont("Check Programmer's Manual\n");
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index e9d93604ad0f..1dcc404b5aec 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -60,7 +60,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
- *dma_handle = plat_phys_to_dma(dev, paddr);
+ *dma_handle = paddr;
/* This is kernel Virtual address (0x7000_0000 based) */
if (need_kvaddr) {
@@ -92,7 +92,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
@@ -111,7 +111,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long pfn = __phys_to_pfn(dma_addr);
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;
@@ -175,7 +175,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
- return plat_phys_to_dma(dev, paddr);
+ return paddr;
}
/*
@@ -190,7 +190,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t paddr = plat_dma_to_phys(dev, handle);
+ phys_addr_t paddr = handle;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
@@ -224,13 +224,13 @@ static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
+ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
}
static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
+ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
}
static void arc_dma_sync_sg_for_cpu(struct device *dev,
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..46544e88492d 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
* of fudging the freq in DT
*/
+#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
+
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
if (num_cores > 2) {
- u32 freq = 50, orig;
- /*
- * TODO: use cpu node "cpu-freq" param instead of platform-specific
- * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
- */
+ u32 freq;
int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
const struct fdt_property *prop;
prop = fdt_get_property(initial_boot_params, off,
- "clock-frequency", NULL);
- orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000;
+ "assigned-clock-rates", NULL);
+ freq = be32_to_cpu(*(u32 *)(prop->data));
/* Patching .dtb in-place with new core clock value */
- if (freq != orig ) {
- freq = cpu_to_be32(freq * 1000000);
+ if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
+ freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
fdt_setprop_inplace(initial_boot_params, off,
- "clock-frequency", &freq, sizeof(freq));
+ "assigned-clock-rates", &freq, sizeof(freq));
}
}
#endif
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index fd0ae5e38639..2958aedb649a 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
#define CREG_PAE (CREG_BASE + 0x180)
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
-#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
-#define CREG_CORE_IF_CLK_DIV_2 0x1
-#define CGU_BASE ARC_PERIPHERAL_BASE
-#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
-#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
-#define CGU_PLL_STATUS_LOCK BIT(0)
-#define CGU_PLL_STATUS_ERR BIT(1)
-#define CGU_PLL_CTRL_1GHZ 0x3A10
-#define HSDK_PLL_LOCK_TIMEOUT 500
-
-#define HSDK_PLL_LOCKED() \
- !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
-
-#define HSDK_PLL_ERR() \
- !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
-
-static void __init hsdk_set_cpu_freq_1ghz(void)
-{
- u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
-
- /*
- * As we set cpu clock which exceeds 500MHz, the divider for the interface
- * clock must be programmed to div-by-2.
- */
- iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
-
- /* Set cpu clock to 1GHz */
- iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
-
- while (!HSDK_PLL_LOCKED() && timeout--)
- cpu_relax();
-
- if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
- pr_err("Failed to setup CPU frequency to 1GHz!");
-}
-
#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
@@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
* minimum possible div-by-2.
*/
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
-
- /*
- * Setup CPU frequency to 1GHz.
- * TODO: remove it after smart hsdk pll driver will be introduced.
- */
- hsdk_set_cpu_freq_1ghz();
}
static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 51c8df561077..430a0aa710d6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -8,6 +8,7 @@ config ARM
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -24,7 +25,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
- select DMA_NOOP_OPS if !MMU
+ select DMA_DIRECT_OPS if !MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 45d815a86d42..de08d9045cb8 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -219,7 +219,7 @@
compatible = "aspeed,ast2400-vuart";
reg = <0x1e787000 0x40>;
reg-shift = <2>;
- interrupts = <10>;
+ interrupts = <8>;
clocks = <&clk_uart>;
no-loopback-test;
status = "disabled";
diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
index 5f29010cdbd8..9b82cc8843e1 100644
--- a/arch/arm/boot/dts/at91-tse850-3.dts
+++ b/arch/arm/boot/dts/at91-tse850-3.dts
@@ -221,6 +221,7 @@
jc42@18 {
compatible = "nxp,se97b", "jedec,jc-42.4-temp";
reg = <0x18>;
+ smbus-timeout-disable;
};
dpot: mcp4651-104@28 {
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 61e158003509..1dfd76442777 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -13,24 +13,24 @@
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
arm-pmu {
compatible = "arm,cortex-a7-pmu";
interrupt-parent = <&local_intc>;
- interrupts = <9>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
};
};
timer {
compatible = "arm,armv7-timer";
interrupt-parent = <&local_intc>;
- interrupts = <0>, // PHYS_SECURE_PPI
- <1>, // PHYS_NONSECURE_PPI
- <3>, // VIRT_PPI
- <2>; // HYP_PPI
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>, // PHYS_SECURE_PPI
+ <1 IRQ_TYPE_LEVEL_HIGH>, // PHYS_NONSECURE_PPI
+ <3 IRQ_TYPE_LEVEL_HIGH>, // VIRT_PPI
+ <2 IRQ_TYPE_LEVEL_HIGH>; // HYP_PPI
always-on;
};
@@ -76,7 +76,7 @@
compatible = "brcm,bcm2836-armctrl-ic";
reg = <0x7e00b200 0x200>;
interrupt-parent = <&local_intc>;
- interrupts = <8>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
};
&cpu_thermal {
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
index bc1cca5cf43c..efa7d3387ab2 100644
--- a/arch/arm/boot/dts/bcm2837.dtsi
+++ b/arch/arm/boot/dts/bcm2837.dtsi
@@ -12,7 +12,7 @@
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
};
@@ -20,10 +20,10 @@
timer {
compatible = "arm,armv7-timer";
interrupt-parent = <&local_intc>;
- interrupts = <0>, // PHYS_SECURE_PPI
- <1>, // PHYS_NONSECURE_PPI
- <3>, // VIRT_PPI
- <2>; // HYP_PPI
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>, // PHYS_SECURE_PPI
+ <1 IRQ_TYPE_LEVEL_HIGH>, // PHYS_NONSECURE_PPI
+ <3 IRQ_TYPE_LEVEL_HIGH>, // VIRT_PPI
+ <2 IRQ_TYPE_LEVEL_HIGH>; // HYP_PPI
always-on;
};
@@ -73,7 +73,7 @@
compatible = "brcm,bcm2836-armctrl-ic";
reg = <0x7e00b200 0x200>;
interrupt-parent = <&local_intc>;
- interrupts = <8>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
};
&cpu_thermal {
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index dcde93c85c2d..18db25a5a66e 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -2,6 +2,7 @@
#include <dt-bindings/clock/bcm2835.h>
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/* firmware-provided startup stubs live here, where the secondary CPUs are
* spinning.
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index eed89e659143..a1f4d6d5a569 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -293,12 +293,12 @@
label = "u-boot env";
reg = <0 0x020000>;
};
- partition@0x020000 {
+ partition@20000 {
/* The LCDK defaults to booting from this partition */
label = "u-boot";
reg = <0x020000 0x080000>;
};
- partition@0x0a0000 {
+ partition@a0000 {
label = "free space";
reg = <0x0a0000 0>;
};
diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
index 413dbd5d9f64..81942ae83e1f 100644
--- a/arch/arm/boot/dts/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/da850-lego-ev3.dts
@@ -178,7 +178,7 @@
*/
battery {
pinctrl-names = "default";
- pintctrl-0 = <&battery_pins>;
+ pinctrl-0 = <&battery_pins>;
compatible = "lego,ev3-battery";
io-channels = <&adc 4>, <&adc 3>;
io-channel-names = "voltage", "current";
@@ -392,7 +392,7 @@
batt_volt_en {
gpio-hog;
gpios = <6 GPIO_ACTIVE_HIGH>;
- output-low;
+ output-high;
};
};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index b2b95ff205e8..0029ec27819c 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -664,6 +664,10 @@
status = "okay";
};
+&mixer {
+ status = "okay";
+};
+
/* eMMC flash */
&mmc_0 {
status = "okay";
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 09ce8b81fafa..fcaff1c66bcb 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -122,7 +122,7 @@
};
can1: can@43f88000 {
- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx25-flexcan";
reg = <0x43f88000 0x4000>;
interrupts = <43>;
clocks = <&clks 75>, <&clks 75>;
@@ -131,7 +131,7 @@
};
can2: can@43f8c000 {
- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx25-flexcan";
reg = <0x43f8c000 0x4000>;
interrupts = <44>;
clocks = <&clks 76>, <&clks 76>;
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 2f4ebe0318d3..e52e05c0fe56 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -1038,7 +1038,7 @@
};
can0: can@80032000 {
- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx28-flexcan";
reg = <0x80032000 0x2000>;
interrupts = <8>;
clocks = <&clks 58>, <&clks 58>;
@@ -1047,7 +1047,7 @@
};
can1: can@80034000 {
- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx28-flexcan";
reg = <0x80034000 0x2000>;
interrupts = <9>;
clocks = <&clks 59>, <&clks 59>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 6d5e6a60bee7..1f0e2203b576 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -303,7 +303,7 @@
};
can1: can@53fe4000 {
- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx35-flexcan";
reg = <0x53fe4000 0x1000>;
clocks = <&clks 33>, <&clks 33>;
clock-names = "ipg", "per";
@@ -312,7 +312,7 @@
};
can2: can@53fe8000 {
- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx35-flexcan";
reg = <0x53fe8000 0x1000>;
clocks = <&clks 34>, <&clks 34>;
clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 84f17f7abb71..85071ff8c639 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -536,7 +536,7 @@
};
can1: can@53fc8000 {
- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx53-flexcan";
reg = <0x53fc8000 0x4000>;
interrupts = <82>;
clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
@@ -546,7 +546,7 @@
};
can2: can@53fcc000 {
- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ compatible = "fsl,imx53-flexcan";
reg = <0x53fcc000 0x4000>;
interrupts = <83>;
clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 404a93d9596b..3ec58500e9c2 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -112,3 +112,55 @@
line-name = "PCA9539-P07";
};
};
+
+&pci_root {
+ /* Intel Corporation I210 Gigabit Network Connection */
+ switch_nic: ethernet@3,0 {
+ compatible = "pci8086,1533";
+ reg = <0x00010000 0 0 0 0>;
+ };
+};
+
+&switch_ports {
+ port@0 {
+ reg = <0>;
+ label = "enacq";
+ phy-handle = <&switchphy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eneport1";
+ phy-handle = <&switchphy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "enix";
+ phy-handle = <&switchphy2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "enid";
+ phy-handle = <&switchphy3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "cpu";
+ ethernet = <&switch_nic>;
+ phy-handle = <&switchphy4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "enembc";
+
+ /* connected to Ethernet MAC of AT91RM9200 in MII mode */
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 7f9f176901d4..5650a9b11091 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -111,3 +111,55 @@
fsl,tx-cal-45-dp-ohms = <55>;
fsl,tx-d-cal = <100>;
};
+
+&pci_root {
+ /* Intel Corporation I210 Gigabit Network Connection */
+ switch_nic: ethernet@3,0 {
+ compatible = "pci8086,1533";
+ reg = <0x00010000 0 0 0 0>;
+ };
+};
+
+&switch_ports {
+ port@0 {
+ reg = <0>;
+ label = "enacq";
+ phy-handle = <&switchphy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eneport1";
+ phy-handle = <&switchphy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "enix";
+ phy-handle = <&switchphy2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "enid";
+ phy-handle = <&switchphy3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "cpu";
+ ethernet = <&switch_nic>;
+ phy-handle = <&switchphy4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "enembc";
+
+ /* connected to Ethernet MAC of AT91RM9200 in MII mode */
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index 46bdc6722715..35edbdc7bcd1 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -212,3 +212,78 @@
};
};
};
+
+&pci_root {
+ /* PLX Technology, Inc. PEX 8605 PCI Express 4-port Gen2 Switch */
+ bridge@1,0 {
+ compatible = "pci10b5,8605";
+ reg = <0x00010000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ bridge@2,1 {
+ compatible = "pci10b5,8605";
+ reg = <0x00020800 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ /* Intel Corporation I210 Gigabit Network Connection */
+ ethernet@3,0 {
+ compatible = "pci8086,1533";
+ reg = <0x00030000 0 0 0 0>;
+ };
+ };
+
+ bridge@2,2 {
+ compatible = "pci10b5,8605";
+ reg = <0x00021000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ /* Intel Corporation I210 Gigabit Network Connection */
+ switch_nic: ethernet@4,0 {
+ compatible = "pci8086,1533";
+ reg = <0x00040000 0 0 0 0>;
+ };
+ };
+ };
+};
+
+&switch_ports {
+ port@0 {
+ reg = <0>;
+ label = "eneport1";
+ phy-handle = <&switchphy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eneport2";
+ phy-handle = <&switchphy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "enix";
+ phy-handle = <&switchphy2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "enid";
+ phy-handle = <&switchphy3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "cpu";
+ ethernet = <&switch_nic>;
+ phy-handle = <&switchphy4>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index b915837bbb5f..916ea94d75ca 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -92,6 +92,56 @@
mux-int-port = <1>;
mux-ext-port = <4>;
};
+
+ aliases {
+ mdio-gpio0 = &mdio0;
+ };
+
+ mdio0: mdio-gpio {
+ compatible = "virtual,mdio-gpio";
+ gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */
+ <&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "marvell,mv88e6085"; /* 88e6240*/
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ switch_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switchphy0: switchphy@0 {
+ reg = <0>;
+ };
+
+ switchphy1: switchphy@1 {
+ reg = <1>;
+ };
+
+ switchphy2: switchphy@2 {
+ reg = <2>;
+ };
+
+ switchphy3: switchphy@3 {
+ reg = <3>;
+ };
+
+ switchphy4: switchphy@4 {
+ reg = <4>;
+ };
+ };
+ };
+ };
};
&ecspi5 {
@@ -326,3 +376,15 @@
tcxo-clock-frequency = <26000000>;
};
};
+
+&pcie {
+ /* Synopsys, Inc. Device */
+ pci_root: root@0,0 {
+ compatible = "pci16c3,abcd";
+ reg = <0x00000000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index d5181f85ca9c..963e1698fe1d 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -68,12 +68,14 @@
clock-latency = <61036>; /* two CLK32 periods */
operating-points = <
/* kHz uV */
+ 696000 1275000
528000 1175000
396000 1025000
198000 950000
>;
fsl,soc-operating-points = <
/* KHz uV */
+ 696000 1275000
528000 1175000
396000 1175000
198000 1175000
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
index cf2f5240e176..27cc913ca0f5 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
@@ -53,7 +53,8 @@
};
pinctrl: pin-controller@10000 {
- pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
+ pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
+ &pmx_gpio_header_gpo>;
pinctrl-names = "default";
pmx_uart0: pmx-uart0 {
@@ -85,11 +86,16 @@
* ground.
*/
pmx_gpio_header: pmx-gpio-header {
- marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
+ marvell,pins = "mpp17", "mpp29", "mpp28",
"mpp35", "mpp34", "mpp40";
marvell,function = "gpio";
};
+ pmx_gpio_header_gpo: pxm-gpio-header-gpo {
+ marvell,pins = "mpp7";
+ marvell,function = "gpo";
+ };
+
pmx_gpio_init: pmx-init {
marvell,pins = "mpp38";
marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 940875316d0f..7bb402d3e9d0 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -215,7 +215,7 @@
reg = <0x2a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
- clocks = <&sys_mclk 1>;
+ clocks = <&sys_mclk>;
};
};
};
@@ -331,3 +331,19 @@
&uart1 {
status = "okay";
};
+
+&can0 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&can2 {
+ status = "disabled";
+};
+
+&can3 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index a8b148ad1dd2..860b898141f0 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -187,7 +187,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
- clocks = <&sys_mclk 1>;
+ clocks = <&sys_mclk>;
};
};
@@ -243,3 +243,19 @@
&uart1 {
status = "okay";
};
+
+&can0 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&can2 {
+ status = "disabled";
+};
+
+&can3 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 9319e1f0f1d8..7789031898b0 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -730,5 +730,41 @@
<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ can0: can@2a70000 {
+ compatible = "fsl,ls1021ar2-flexcan";
+ reg = <0x0 0x2a70000 0x0 0x1000>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "ipg", "per";
+ big-endian;
+ };
+
+ can1: can@2a80000 {
+ compatible = "fsl,ls1021ar2-flexcan";
+ reg = <0x0 0x2a80000 0x0 0x1000>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "ipg", "per";
+ big-endian;
+ };
+
+ can2: can@2a90000 {
+ compatible = "fsl,ls1021ar2-flexcan";
+ reg = <0x0 0x2a90000 0x0 0x1000>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "ipg", "per";
+ big-endian;
+ };
+
+ can3: can@2aa0000 {
+ compatible = "fsl,ls1021ar2-flexcan";
+ reg = <0x0 0x2aa0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "ipg", "per";
+ big-endian;
+ };
};
};
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index 1df3ace3af92..63b0b4921e4e 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -52,6 +52,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 4ad7d5565906..f33cc80c9dbc 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -147,32 +147,32 @@
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
- gpmc,burst-read;
gpmc,burst-wrap;
+ gpmc,burst-read;
gpmc,burst-write;
gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <87>;
- gpmc,cs-wr-off-ns = <87>;
+ gpmc,cs-rd-off-ns = <96>;
+ gpmc,cs-wr-off-ns = <96>;
gpmc,adv-on-ns = <0>;
- gpmc,adv-rd-off-ns = <10>;
- gpmc,adv-wr-off-ns = <10>;
- gpmc,oe-on-ns = <15>;
- gpmc,oe-off-ns = <87>;
+ gpmc,adv-rd-off-ns = <12>;
+ gpmc,adv-wr-off-ns = <12>;
+ gpmc,oe-on-ns = <18>;
+ gpmc,oe-off-ns = <96>;
gpmc,we-on-ns = <0>;
- gpmc,we-off-ns = <87>;
- gpmc,rd-cycle-ns = <112>;
- gpmc,wr-cycle-ns = <112>;
- gpmc,access-ns = <81>;
- gpmc,page-burst-access-ns = <15>;
+ gpmc,we-off-ns = <96>;
+ gpmc,rd-cycle-ns = <114>;
+ gpmc,wr-cycle-ns = <114>;
+ gpmc,access-ns = <90>;
+ gpmc,page-burst-access-ns = <12>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
- gpmc,clk-activation-ns = <5>;
+ gpmc,clk-activation-ns = <6>;
gpmc,wr-data-mux-bus-ns = <30>;
- gpmc,wr-access-ns = <81>;
- gpmc,sync-clk-ps = <15000>;
+ gpmc,wr-access-ns = <90>;
+ gpmc,sync-clk-ps = <12000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 669c51c00c00..e7c7b8e50703 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -838,6 +838,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 12fbb3da5fce..0d9b85317529 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -367,6 +367,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index 908951eb5943..d652708f6bef 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -154,6 +154,7 @@
linux,mtd-name= "samsung,kfm2g16q2m-deb8";
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <2 0 0x20000>; /* CS2, offset 0, IO size 4 */
gpmc,device-width = <2>;
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index c6d92c25df42..d23ee6d911ac 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -83,6 +83,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&vdd_arm>;
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index cd24894ee5c6..6102e4e7f35c 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -956,7 +956,7 @@
iep_mmu: iommu@ff900800 {
compatible = "rockchip,iommu";
reg = <0x0 0xff900800 0x0 0x40>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "iep_mmu";
#iommu-cells = <0>;
status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index b91300d49a31..4f2f2eea0755 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -502,8 +502,8 @@
reg = <0x01c16000 0x1000>;
interrupts = <58>;
clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
- <&ccu 9>,
- <&ccu 18>;
+ <&ccu CLK_PLL_VIDEO0_2X>,
+ <&ccu CLK_PLL_VIDEO1_2X>;
clock-names = "ahb", "mod", "pll-0", "pll-1";
dmas = <&dma SUN4I_DMA_NORMAL 16>,
<&dma SUN4I_DMA_NORMAL 16>,
@@ -1104,7 +1104,7 @@
be1_out_tcon0: endpoint@0 {
reg = <0>;
- remote-endpoint = <&tcon1_in_be0>;
+ remote-endpoint = <&tcon0_in_be1>;
};
be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 6ae4d95e230e..316cb8b2945b 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -82,8 +82,8 @@
reg = <0x01c16000 0x1000>;
interrupts = <58>;
clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
- <&ccu 9>,
- <&ccu 16>;
+ <&ccu CLK_PLL_VIDEO0_2X>,
+ <&ccu CLK_PLL_VIDEO1_2X>;
clock-names = "ahb", "mod", "pll-0", "pll-1";
dmas = <&dma SUN4I_DMA_NORMAL 16>,
<&dma SUN4I_DMA_NORMAL 16>,
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 8bfa12b548e0..72d3fe44ecaf 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -429,8 +429,8 @@
interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>,
<&ccu CLK_HDMI_DDC>,
- <&ccu 7>,
- <&ccu 13>;
+ <&ccu CLK_PLL_VIDEO0_2X>,
+ <&ccu CLK_PLL_VIDEO1_2X>;
clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1";
resets = <&ccu RST_AHB1_HDMI>;
reset-names = "ahb";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 68dfa82544fc..bd0cd3204273 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -581,8 +581,8 @@
reg = <0x01c16000 0x1000>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
- <&ccu 9>,
- <&ccu 18>;
+ <&ccu CLK_PLL_VIDEO0_2X>,
+ <&ccu CLK_PLL_VIDEO1_2X>;
clock-names = "ahb", "mod", "pll-0", "pll-1";
dmas = <&dma SUN4I_DMA_NORMAL 16>,
<&dma SUN4I_DMA_NORMAL 16>,
@@ -1354,7 +1354,7 @@
be1_out_tcon0: endpoint@0 {
reg = <0>;
- remote-endpoint = <&tcon1_in_be0>;
+ remote-endpoint = <&tcon0_in_be1>;
};
be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 98715538932f..a021ee6da396 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -146,6 +146,7 @@
status = "okay";
axp81x: pmic@3a3 {
+ compatible = "x-powers,axp813";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi
index 0ec1b0a317b4..ff72a8efb73d 100644
--- a/arch/arm/boot/dts/tango4-common.dtsi
+++ b/arch/arm/boot/dts/tango4-common.dtsi
@@ -156,7 +156,6 @@
reg = <0x6e000 0x400>;
ranges = <0 0x6e000 0x400>;
interrupt-parent = <&gic>;
- interrupt-controller;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index d23b9d56a88b..95946dee9c77 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -1,7 +1,6 @@
CONFIG_KERNEL_XZ=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index c0ad7b82086b..8c7ea033cdc2 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -1,7 +1,6 @@
CONFIG_KERNEL_XZ=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
index 63a953d855a6..c6dcd6e4f4e6 100644
--- a/arch/arm/configs/cns3420vb_defconfig
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -28,7 +28,6 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyS0,38400 mem=128M root=/dev/mmcblk0p1 ro rootwait"
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index b2e340b272ee..74d611e41e02 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -1,4 +1,3 @@
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index ec5674c229a3..de5be2fc7306 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -54,7 +54,6 @@ CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index cf7dcb2c86e6..88ea02e7ba19 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -77,7 +77,6 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_MESH=y
CONFIG_MAC80211_LEDS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_CONNECTOR=m
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 61509c4b769f..b659244902cd 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,6 +1,5 @@
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CGROUPS=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index 752e2e74de5b..0448bd8075ac 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -37,7 +37,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 69553704f2dc..ddaeda4f2e82 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -1,6 +1,5 @@
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
@@ -57,7 +56,7 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_SRAM=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index e5822ab01b7d..bbfb6759447b 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -46,7 +46,6 @@ CONFIG_CAN_FLEXCAN=m
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index b831baddae02..bf9046331f6e 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -60,7 +60,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_DSA=y
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 830e817a028a..837d0c9c8b0e 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 6529cb43e0fd..2080025556b5 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -2,7 +2,6 @@
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 5caaf971fb50..df433abfcb02 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -10,6 +10,7 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
+CONFIG_CMA=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CPU_FREQ=y
@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_AHCI_SUNXI=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 6678f2929356..c7b99ebf5fcf 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,5 +1,4 @@
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -76,7 +75,6 @@ CONFIG_RFKILL_INPUT=y
CONFIG_RFKILL_GPIO=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_TEGRA_GMI=y
diff --git a/arch/arm/configs/vf610m4_defconfig b/arch/arm/configs/vf610m4_defconfig
index b7ecb83a95b6..a89f035c3b01 100644
--- a/arch/arm/configs/vf610m4_defconfig
+++ b/arch/arm/configs/vf610m4_defconfig
@@ -23,7 +23,6 @@ CONFIG_BINFMT_SHARED_FLAT=y
# CONFIG_UEVENT_HELPER is not set
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig
index 1bfaa7bfc392..9b85326ba287 100644
--- a/arch/arm/configs/vt8500_v6_v7_defconfig
+++ b/arch/arm/configs/vt8500_v6_v7_defconfig
@@ -1,4 +1,3 @@
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 18768f330449..07e31941dc67 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -181,9 +181,8 @@ static int cbc_init(struct crypto_tfm *tfm)
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->enc_tfm))
- return PTR_ERR(ctx->enc_tfm);
- return 0;
+
+ return PTR_ERR_OR_ZERO(ctx->enc_tfm);
}
static void cbc_exit(struct crypto_tfm *tfm)
@@ -258,9 +257,8 @@ static int xts_init(struct crypto_tfm *tfm)
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tweak_tfm))
- return PTR_ERR(ctx->tweak_tfm);
- return 0;
+
+ return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
}
static void xts_exit(struct crypto_tfm *tfm)
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 1b0e0e86ee9c..96e62ec105d0 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-arm-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
}, {
@@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-arm-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
new file mode 100644
index 000000000000..5b0a8a421894
--- /dev/null
+++ b/arch/arm/include/asm/dma-direct.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_ARM_DMA_DIRECT_H
+#define ASM_ARM_DMA_DIRECT_H 1
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ unsigned int offset = paddr & ~PAGE_MASK;
+ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ unsigned int offset = dev_addr & ~PAGE_MASK;
+ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ u64 limit, mask;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ mask = *dev->dma_mask;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit)
+ return 0;
+
+ if ((addr | (addr + size - 1)) & ~mask)
+ return 0;
+
+ return 1;
+}
+
+#endif /* ASM_ARM_DMA_DIRECT_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index daf837423a76..8436f6ade57d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
+ return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
}
#ifdef __arch_page_to_dma
@@ -109,39 +109,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) { }
-
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index a9f7d3f47134..acbf9ec7b396 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -238,6 +238,9 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index) {}
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -301,4 +304,6 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
static inline void kvm_fpsimd_flush_cpu_state(void) {}
+static inline void kvm_arm_vhe_guest_enter(void) {}
+static inline void kvm_arm_vhe_guest_exit(void) {}
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index fa6f2174276b..a2d176a308bd 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -211,6 +211,11 @@ static inline bool __kvm_cpu_uses_extended_idmap(void)
return false;
}
+static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
+{
+ return PTRS_PER_PGD;
+}
+
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
@@ -221,6 +226,18 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+
+#define kvm_phys_to_vttbr(addr) (addr)
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 1a7a17b2a1ba..2a4836087358 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -249,6 +249,9 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
+#define pmdp_establish generic_pmdp_establish
+
/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d1604a..e71cc35de163 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -75,9 +75,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/*
* how to get the current stack pointer in C
*/
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000000..d0513880be21
--- /dev/null
+++ b/arch/arm/include/uapi/asm/siginfo.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SIGINFO_H
+#define __ASM_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 58e3771e4c5b..7724b0f661b3 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -390,7 +390,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
- siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
@@ -398,12 +397,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
- info.si_signo = SIGTRAP;
- info.si_errno = (int)num;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)(bkpt->trigger);
-
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
}
/*
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5cf04888c581..3e26c6f7a191 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -793,7 +793,6 @@ void abort(void)
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
-EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index bd88470f3e5c..7fcbdc800f68 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -417,7 +417,7 @@ static struct clk_lookup da830_clks[] = {
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
- CLK("musb-da8xx", "usb20", &usb20_clk),
+ CLK("musb-da8xx", NULL, &usb20_clk),
CLK("cppi41-dmaengine", NULL, &cppi41_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
@@ -426,7 +426,7 @@ static struct clk_lookup da830_clks[] = {
CLK("davinci_mdio.0", "fck", &emac_clk),
CLK(NULL, "gpio", &gpio_clk),
CLK("i2c_davinci.2", NULL, &i2c1_clk),
- CLK("ohci-da8xx", "usb11", &usb11_clk),
+ CLK("ohci-da8xx", NULL, &usb11_clk),
CLK(NULL, "emif3", &emif3_clk),
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 07d6f0eb8c82..d37b5463cd8f 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -563,8 +563,8 @@ static struct clk_lookup da850_clks[] = {
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
CLK("ti-aemif", NULL, &aemif_clk),
CLK("davinci-nand.0", "aemif", &aemif_nand_clk),
- CLK("ohci-da8xx", "usb11", &usb11_clk),
- CLK("musb-da8xx", "usb20", &usb20_clk),
+ CLK("ohci-da8xx", NULL, &usb11_clk),
+ CLK("musb-da8xx", NULL, &usb20_clk),
CLK("cppi41-dmaengine", NULL, &cppi41_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
CLK("spi_davinci.1", NULL, &spi1_clk),
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8be04ec95adf..5ace9380626a 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = {
{ "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) },
{ "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) },
{ "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) },
- { "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
- { "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
- { "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
- { "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
+ { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
+ { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
+ { "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
+ { "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
};
static struct edma_soc_info dm365_edma_pdata = {
@@ -925,12 +925,14 @@ static struct resource edma_resources[] = {
/* not using TC*_ERR */
};
-static struct platform_device dm365_edma_device = {
- .name = "edma",
- .id = 0,
- .dev.platform_data = &dm365_edma_pdata,
- .num_resources = ARRAY_SIZE(edma_resources),
- .resource = edma_resources,
+static const struct platform_device_info dm365_edma_device __initconst = {
+ .name = "edma",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = edma_resources,
+ .num_res = ARRAY_SIZE(edma_resources),
+ .data = &dm365_edma_pdata,
+ .size_data = sizeof(dm365_edma_pdata),
};
static struct resource dm365_asp_resources[] = {
@@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
static int __init dm365_init_devices(void)
{
+ struct platform_device *edma_pdev;
int ret = 0;
if (!cpu_is_davinci_dm365())
return 0;
davinci_cfg_reg(DM365_INT_EDMA_CC);
- platform_device_register(&dm365_edma_device);
+ edma_pdev = platform_device_register_full(&dm365_edma_device);
+ if (IS_ERR(edma_pdev)) {
+ pr_warn("%s: Failed to register eDMA\n", __func__);
+ return PTR_ERR(edma_pdev);
+ }
platform_device_register(&dm365_mdio_device);
platform_device_register(&dm365_emac_device);
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 731fb2019ecb..2c03d2f6b647 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -16,6 +16,7 @@
#include <linux/serial_8250.h>
#include <linux/io.h>
#include <linux/w1-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/mtd/plat-ram.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -162,9 +163,16 @@ static struct platform_device vulcan_max6369 = {
.num_resources = 1,
};
+static struct gpiod_lookup_table vulcan_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", 14, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = {
- .pin = 14,
- .ext_pullup_enable_pin = -EINVAL,
+ /* Intentionally left blank */
};
static struct platform_device vulcan_w1_gpio = {
@@ -233,6 +241,7 @@ static void __init vulcan_init(void)
IXP4XX_EXP_BUS_WR_EN |
IXP4XX_EXP_BUS_BYTE_EN;
+ gpiod_add_lookup_table(&vulcan_w1_gpiod_table);
platform_add_devices(vulcan_devices, ARRAY_SIZE(vulcan_devices));
}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 2f722a805948..c15bbcad5f67 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -232,6 +232,3 @@ obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
obj-y += omap_phy_internal.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
-
-onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
-obj-y += $(onenand-m) $(onenand-y)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
deleted file mode 100644
index 2944af820558..000000000000
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ /dev/null
@@ -1,409 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/gpmc-onenand.c
- *
- * Copyright (C) 2006 - 2009 Nokia Corporation
- * Contacts: Juha Yrjola
- * Tony Lindgren
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/onenand_regs.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-#include <linux/err.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-#define ONENAND_IO_SIZE SZ_128K
-
-#define ONENAND_FLAG_SYNCREAD (1 << 0)
-#define ONENAND_FLAG_SYNCWRITE (1 << 1)
-#define ONENAND_FLAG_HF (1 << 2)
-#define ONENAND_FLAG_VHF (1 << 3)
-
-static unsigned onenand_flags;
-static unsigned latency;
-
-static struct omap_onenand_platform_data *gpmc_onenand_data;
-
-static struct resource gpmc_onenand_resource = {
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device gpmc_onenand_device = {
- .name = "omap2-onenand",
- .id = -1,
- .num_resources = 1,
- .resource = &gpmc_onenand_resource,
-};
-
-static struct gpmc_settings onenand_async = {
- .device_width = GPMC_DEVWIDTH_16BIT,
- .mux_add_data = GPMC_MUX_AD,
-};
-
-static struct gpmc_settings onenand_sync = {
- .burst_read = true,
- .burst_wrap = true,
- .burst_len = GPMC_BURST_16,
- .device_width = GPMC_DEVWIDTH_16BIT,
- .mux_add_data = GPMC_MUX_AD,
- .wait_pin = 0,
-};
-
-static void omap2_onenand_calc_async_timings(struct gpmc_timings *t)
-{
- struct gpmc_device_timings dev_t;
- const int t_cer = 15;
- const int t_avdp = 12;
- const int t_aavdh = 7;
- const int t_ce = 76;
- const int t_aa = 76;
- const int t_oe = 20;
- const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_wpl = 40;
- const int t_wph = 30;
-
- memset(&dev_t, 0, sizeof(dev_t));
-
- dev_t.t_avdp_r = max_t(int, t_avdp, t_cer) * 1000;
- dev_t.t_avdp_w = dev_t.t_avdp_r;
- dev_t.t_aavdh = t_aavdh * 1000;
- dev_t.t_aa = t_aa * 1000;
- dev_t.t_ce = t_ce * 1000;
- dev_t.t_oe = t_oe * 1000;
- dev_t.t_cez_r = t_cez * 1000;
- dev_t.t_cez_w = dev_t.t_cez_r;
- dev_t.t_wpl = t_wpl * 1000;
- dev_t.t_wph = t_wph * 1000;
-
- gpmc_calc_timings(t, &onenand_async, &dev_t);
-}
-
-static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
-{
- u32 reg;
-
- /* Ensure sync read and sync write are disabled */
- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
- reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
- writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
-}
-
-static void set_onenand_cfg(void __iomem *onenand_base)
-{
- u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
-
- reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
- ONENAND_SYS_CFG1_BL_16;
- if (onenand_flags & ONENAND_FLAG_SYNCREAD)
- reg |= ONENAND_SYS_CFG1_SYNC_READ;
- else
- reg &= ~ONENAND_SYS_CFG1_SYNC_READ;
- if (onenand_flags & ONENAND_FLAG_SYNCWRITE)
- reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
- else
- reg &= ~ONENAND_SYS_CFG1_SYNC_WRITE;
- if (onenand_flags & ONENAND_FLAG_HF)
- reg |= ONENAND_SYS_CFG1_HF;
- else
- reg &= ~ONENAND_SYS_CFG1_HF;
- if (onenand_flags & ONENAND_FLAG_VHF)
- reg |= ONENAND_SYS_CFG1_VHF;
- else
- reg &= ~ONENAND_SYS_CFG1_VHF;
-
- writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
-}
-
-static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
- void __iomem *onenand_base)
-{
- u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
- int freq;
-
- switch ((ver >> 4) & 0xf) {
- case 0:
- freq = 40;
- break;
- case 1:
- freq = 54;
- break;
- case 2:
- freq = 66;
- break;
- case 3:
- freq = 83;
- break;
- case 4:
- freq = 104;
- break;
- default:
- pr_err("onenand rate not detected, bad GPMC async timings?\n");
- freq = 0;
- }
-
- return freq;
-}
-
-static void omap2_onenand_calc_sync_timings(struct gpmc_timings *t,
- unsigned int flags,
- int freq)
-{
- struct gpmc_device_timings dev_t;
- const int t_cer = 15;
- const int t_avdp = 12;
- const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_wpl = 40;
- const int t_wph = 30;
- int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
- int div, gpmc_clk_ns;
-
- if (flags & ONENAND_SYNC_READ)
- onenand_flags = ONENAND_FLAG_SYNCREAD;
- else if (flags & ONENAND_SYNC_READWRITE)
- onenand_flags = ONENAND_FLAG_SYNCREAD | ONENAND_FLAG_SYNCWRITE;
-
- switch (freq) {
- case 104:
- min_gpmc_clk_period = 9600; /* 104 MHz */
- t_ces = 3;
- t_avds = 4;
- t_avdh = 2;
- t_ach = 3;
- t_aavdh = 6;
- t_rdyo = 6;
- break;
- case 83:
- min_gpmc_clk_period = 12000; /* 83 MHz */
- t_ces = 5;
- t_avds = 4;
- t_avdh = 2;
- t_ach = 6;
- t_aavdh = 6;
- t_rdyo = 9;
- break;
- case 66:
- min_gpmc_clk_period = 15000; /* 66 MHz */
- t_ces = 6;
- t_avds = 5;
- t_avdh = 2;
- t_ach = 6;
- t_aavdh = 6;
- t_rdyo = 11;
- break;
- default:
- min_gpmc_clk_period = 18500; /* 54 MHz */
- t_ces = 7;
- t_avds = 7;
- t_avdh = 7;
- t_ach = 9;
- t_aavdh = 7;
- t_rdyo = 15;
- onenand_flags &= ~ONENAND_FLAG_SYNCWRITE;
- break;
- }
-
- div = gpmc_calc_divider(min_gpmc_clk_period);
- gpmc_clk_ns = gpmc_ticks_to_ns(div);
- if (gpmc_clk_ns < 15) /* >66MHz */
- onenand_flags |= ONENAND_FLAG_HF;
- else
- onenand_flags &= ~ONENAND_FLAG_HF;
- if (gpmc_clk_ns < 12) /* >83MHz */
- onenand_flags |= ONENAND_FLAG_VHF;
- else
- onenand_flags &= ~ONENAND_FLAG_VHF;
- if (onenand_flags & ONENAND_FLAG_VHF)
- latency = 8;
- else if (onenand_flags & ONENAND_FLAG_HF)
- latency = 6;
- else if (gpmc_clk_ns >= 25) /* 40 MHz*/
- latency = 3;
- else
- latency = 4;
-
- /* Set synchronous read timings */
- memset(&dev_t, 0, sizeof(dev_t));
-
- if (onenand_flags & ONENAND_FLAG_SYNCREAD)
- onenand_sync.sync_read = true;
- if (onenand_flags & ONENAND_FLAG_SYNCWRITE) {
- onenand_sync.sync_write = true;
- onenand_sync.burst_write = true;
- } else {
- dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
- dev_t.t_wpl = t_wpl * 1000;
- dev_t.t_wph = t_wph * 1000;
- dev_t.t_aavdh = t_aavdh * 1000;
- }
- dev_t.ce_xdelay = true;
- dev_t.avd_xdelay = true;
- dev_t.oe_xdelay = true;
- dev_t.we_xdelay = true;
- dev_t.clk = min_gpmc_clk_period;
- dev_t.t_bacc = dev_t.clk;
- dev_t.t_ces = t_ces * 1000;
- dev_t.t_avds = t_avds * 1000;
- dev_t.t_avdh = t_avdh * 1000;
- dev_t.t_ach = t_ach * 1000;
- dev_t.cyc_iaa = (latency + 1);
- dev_t.t_cez_r = t_cez * 1000;
- dev_t.t_cez_w = dev_t.t_cez_r;
- dev_t.cyc_aavdh_oe = 1;
- dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
-
- gpmc_calc_timings(t, &onenand_sync, &dev_t);
-}
-
-static int omap2_onenand_setup_async(void __iomem *onenand_base)
-{
- struct gpmc_timings t;
- int ret;
-
- /*
- * Note that we need to keep sync_write set for the call to
- * omap2_onenand_set_async_mode() to work to detect the onenand
- * supported clock rate for the sync timings.
- */
- if (gpmc_onenand_data->of_node) {
- gpmc_read_settings_dt(gpmc_onenand_data->of_node,
- &onenand_async);
- if (onenand_async.sync_read || onenand_async.sync_write) {
- if (onenand_async.sync_write)
- gpmc_onenand_data->flags |=
- ONENAND_SYNC_READWRITE;
- else
- gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
- onenand_async.sync_read = false;
- }
- }
-
- onenand_async.sync_write = true;
- omap2_onenand_calc_async_timings(&t);
-
- ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_async);
- if (ret < 0)
- return ret;
-
- omap2_onenand_set_async_mode(onenand_base);
-
- return 0;
-}
-
-static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr)
-{
- int ret, freq = *freq_ptr;
- struct gpmc_timings t;
-
- if (!freq) {
- /* Very first call freq is not known */
- freq = omap2_onenand_get_freq(gpmc_onenand_data, onenand_base);
- if (!freq)
- return -ENODEV;
- set_onenand_cfg(onenand_base);
- }
-
- if (gpmc_onenand_data->of_node) {
- gpmc_read_settings_dt(gpmc_onenand_data->of_node,
- &onenand_sync);
- } else {
- /*
- * FIXME: Appears to be legacy code from initial ONENAND commit.
- * Unclear what boards this is for and if this can be removed.
- */
- if (!cpu_is_omap34xx())
- onenand_sync.wait_on_read = true;
- }
-
- omap2_onenand_calc_sync_timings(&t, gpmc_onenand_data->flags, freq);
-
- ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_sync);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_sync);
- if (ret < 0)
- return ret;
-
- set_onenand_cfg(onenand_base);
-
- *freq_ptr = freq;
-
- return 0;
-}
-
-static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
-{
- struct device *dev = &gpmc_onenand_device.dev;
- unsigned l = ONENAND_SYNC_READ | ONENAND_SYNC_READWRITE;
- int ret;
-
- ret = omap2_onenand_setup_async(onenand_base);
- if (ret) {
- dev_err(dev, "unable to set to async mode\n");
- return ret;
- }
-
- if (!(gpmc_onenand_data->flags & l))
- return 0;
-
- ret = omap2_onenand_setup_sync(onenand_base, freq_ptr);
- if (ret)
- dev_err(dev, "unable to set to sync mode\n");
- return ret;
-}
-
-int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
-{
- int err;
- struct device *dev = &gpmc_onenand_device.dev;
-
- gpmc_onenand_data = _onenand_data;
- gpmc_onenand_data->onenand_setup = gpmc_onenand_setup;
- gpmc_onenand_device.dev.platform_data = gpmc_onenand_data;
-
- if (cpu_is_omap24xx() &&
- (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) {
- dev_warn(dev, "OneNAND using only SYNC_READ on 24xx\n");
- gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE;
- gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
- }
-
- if (cpu_is_omap34xx())
- gpmc_onenand_data->flags |= ONENAND_IN_OMAP34XX;
- else
- gpmc_onenand_data->flags &= ~ONENAND_IN_OMAP34XX;
-
- err = gpmc_cs_request(gpmc_onenand_data->cs, ONENAND_IO_SIZE,
- (unsigned long *)&gpmc_onenand_resource.start);
- if (err < 0) {
- dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
- gpmc_onenand_data->cs, err);
- return err;
- }
-
- gpmc_onenand_resource.end = gpmc_onenand_resource.start +
- ONENAND_IO_SIZE - 1;
-
- err = platform_device_register(&gpmc_onenand_device);
- if (err) {
- dev_err(dev, "Unable to register OneNAND device\n");
- gpmc_cs_free(gpmc_onenand_data->cs);
- }
-
- return err;
-}
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 9d662fed03ec..feddca7f3540 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -506,11 +506,16 @@ static void w1_enable_external_pullup(int enable)
msleep(100);
}
+static struct gpiod_lookup_table raumfeld_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-pxa", GPIO_ONE_WIRE, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data w1_gpio_platform_data = {
- .pin = GPIO_ONE_WIRE,
- .is_open_drain = 0,
- .enable_external_pullup = w1_enable_external_pullup,
- .ext_pullup_enable_pin = -EINVAL,
+ .enable_external_pullup = w1_enable_external_pullup,
};
static struct platform_device raumfeld_w1_gpio_device = {
@@ -523,13 +528,14 @@ static struct platform_device raumfeld_w1_gpio_device = {
static void __init raumfeld_w1_init(void)
{
int ret = gpio_request(GPIO_W1_PULLUP_ENABLE,
- "W1 external pullup enable");
+ "W1 external pullup enable");
if (ret < 0)
pr_warn("Unable to request GPIO_W1_PULLUP_ENABLE\n");
else
gpio_direction_output(GPIO_W1_PULLUP_ENABLE, 0);
+ gpiod_add_lookup_table(&raumfeld_w1_gpiod_table);
platform_device_register(&raumfeld_w1_gpio_device);
}
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 398ba9ba2632..ef9fd9b759cb 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -802,8 +802,8 @@ static ssize_t battery_voltage_show(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", sharpsl_pm.battstat.mainbat_voltage);
}
-static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
-static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
+static DEVICE_ATTR_RO(battery_percentage);
+static DEVICE_ATTR_RO(battery_voltage);
extern void (*apm_get_power_status)(struct apm_power_info *);
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 6db5fc26d154..619f24a42d09 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <asm/cachetype.h>
@@ -22,7 +22,7 @@
#include "dma.h"
/*
- * dma_noop_ops is used if
+ * dma_direct_ops is used if
* - MMU/MPU is off
* - cpu is v7m w/o cache support
* - device is coherent
@@ -39,7 +39,6 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
- const struct dma_map_ops *ops = &dma_noop_ops;
void *ret;
/*
@@ -48,7 +47,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
*/
if (attrs & DMA_ATTR_NON_CONSISTENT)
- return ops->alloc(dev, size, dma_handle, gfp, attrs);
+ return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
ret = dma_alloc_from_global_coherent(size, dma_handle);
@@ -70,10 +69,8 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
- const struct dma_map_ops *ops = &dma_noop_ops;
-
if (attrs & DMA_ATTR_NON_CONSISTENT) {
- ops->free(dev, size, cpu_addr, dma_addr, attrs);
+ dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
} else {
int ret = dma_release_from_global_coherent(get_order(size),
cpu_addr);
@@ -213,7 +210,7 @@ EXPORT_SYMBOL(arm_nommu_dma_ops);
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
{
- return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
+ return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c199990e12b6..b5030e1a41d8 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -25,16 +25,58 @@
#include "bpf_jit_32.h"
-int bpf_jit_enable __read_mostly;
+/*
+ * eBPF prog stack layout:
+ *
+ * high
+ * original ARM_SP => +-----+
+ * | | callee saved registers
+ * +-----+ <= (BPF_FP + SCRATCH_SIZE)
+ * | ... | eBPF JIT scratch space
+ * eBPF fp register => +-----+
+ * (BPF_FP) | ... | eBPF prog stack
+ * +-----+
+ * |RSVD | JIT scratchpad
+ * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +-----+
+ * low
+ *
+ * The callee saved registers depends on whether frame pointers are enabled.
+ * With frame pointers (to be compliant with the ABI):
+ *
+ * high
+ * original ARM_SP => +------------------+ \
+ * | pc | |
+ * current ARM_FP => +------------------+ } callee saved registers
+ * |r4-r8,r10,fp,ip,lr| |
+ * +------------------+ /
+ * low
+ *
+ * Without frame pointers:
+ *
+ * high
+ * original ARM_SP => +------------------+
+ * | r4-r8,r10,fp,lr | callee saved registers
+ * current ARM_FP => +------------------+
+ * low
+ *
+ * When popping registers off the stack at the end of a BPF function, we
+ * reference them via the current ARM_FP register.
+ */
+#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
+ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+ 1 << ARM_FP)
+#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
+#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
#define STACK_OFFSET(k) (k)
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
-/* Flags used for JIT optimization */
-#define SEEN_CALL (1 << 0)
-
#define FLAG_IMM_OVERFLOW (1 << 0)
/*
@@ -95,7 +137,6 @@ static const u8 bpf2a32[][2] = {
* idx : index of current last JITed instruction.
* prologue_bytes : bytes used in prologue.
* epilogue_offset : offset of epilogue starting.
- * seen : bit mask used for JIT optimization.
* offsets : array of eBPF instruction offsets in
* JITed code.
* target : final JITed code.
@@ -110,7 +151,6 @@ struct jit_ctx {
unsigned int idx;
unsigned int prologue_bytes;
unsigned int epilogue_offset;
- u32 seen;
u32 flags;
u32 *offsets;
u32 *target;
@@ -179,8 +219,13 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
}
-/* Stack must be multiples of 16 Bytes */
-#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+/* EABI requires the stack to be aligned to 64-bit boundaries */
+#define STACK_ALIGNMENT 8
+#else
+/* Stack must be aligned to 32-bit boundaries */
+#define STACK_ALIGNMENT 4
+#endif
/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
* BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +239,7 @@ static void jit_fill_hole(void *area, unsigned int size)
+ SCRATCH_SIZE + \
+ 4 /* extra for skb_copy_bits buffer */)
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
/* Get the offset of eBPF REGISTERs stored on scratch space. */
#define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +330,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
emit_mov_i_no8m(rd, val, ctx);
}
-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
{
- ctx->seen |= SEEN_CALL;
-#if __LINUX_ARM_ARCH__ < 5
- emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
-
if (elf_hwcap & HWCAP_THUMB)
emit(ARM_BX(tgt_reg), ctx);
else
emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+}
+
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 5
+ emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+ emit_bx_r(tgt_reg, ctx);
#else
emit(ARM_BLX_R(tgt_reg), ctx);
#endif
@@ -315,15 +363,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{
const u8 *tmp = bpf2a32[TMP_REG_1];
- s32 jmp_offset;
- /* checks if divisor is zero or not. If it is, then
- * exit directly.
- */
- emit(ARM_CMP_I(rn, 0), ctx);
- _emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx);
- jmp_offset = epilogue_offset(ctx);
- _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
#if __LINUX_ARM_ARCH__ == 7
if (elf_hwcap & HWCAP_IDIVA) {
if (op == BPF_DIV)
@@ -354,7 +394,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
}
/* Call appropriate function */
- ctx->seen |= SEEN_CALL;
emit_mov_i(ARM_IP, op == BPF_DIV ?
(u32)jit_udiv32 : (u32)jit_mod32, ctx);
emit_blx_r(ARM_IP, ctx);
@@ -620,8 +659,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do LSH operation */
emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +693,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do the ARSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
_emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +727,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do LSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +861,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do Multiplication */
emit(ARM_MUL(ARM_IP, rd, rn), ctx);
emit(ARM_MUL(ARM_LR, rm, rt), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +903,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
}
/* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
- const s32 off, struct jit_ctx *ctx, const u8 sz){
+static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+ s32 off, struct jit_ctx *ctx, const u8 sz){
const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst;
+ const u8 *rd = dstk ? tmp : dst;
u8 rm = src;
+ s32 off_max;
- if (off) {
+ if (sz == BPF_H)
+ off_max = 0xff;
+ else
+ off_max = 0xfff;
+
+ if (off < 0 || off > off_max) {
emit_a32_mov_i(tmp[0], off, false, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
+ off = 0;
+ } else if (rd[1] == rm) {
+ emit(ARM_MOV_R(tmp[0], rm), ctx);
+ rm = tmp[0];
}
switch (sz) {
- case BPF_W:
- /* Load a Word */
- emit(ARM_LDR_I(rd, rm, 0), ctx);
+ case BPF_B:
+ /* Load a Byte */
+ emit(ARM_LDRB_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
break;
case BPF_H:
/* Load a HalfWord */
- emit(ARM_LDRH_I(rd, rm, 0), ctx);
+ emit(ARM_LDRH_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
break;
- case BPF_B:
- /* Load a Byte */
- emit(ARM_LDRB_I(rd, rm, 0), ctx);
+ case BPF_W:
+ /* Load a Word */
+ emit(ARM_LDR_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ break;
+ case BPF_DW:
+ /* Load a Double Word */
+ emit(ARM_LDR_I(rd[1], rm, off), ctx);
+ emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
break;
}
if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
+ if (dstk && sz == BPF_DW)
+ emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
}
/* Arithmatic Operation */
@@ -906,7 +957,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
const u8 rn, struct jit_ctx *ctx, u8 op) {
switch (op) {
case BPF_JSET:
- ctx->seen |= SEEN_CALL;
emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +995,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const u8 *tcc = bpf2a32[TCALL_CNT];
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (out_offset - (cur_offset) - 2)
u32 off, lo, hi;
/* if (index >= array->map.max_entries)
@@ -956,7 +1006,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_a32_mov_i(tmp[1], off, false, ctx);
emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
- /* index (64 bit) */
+ /* index is 32-bit for arrays */
emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
/* index >= array->map.max_entries */
emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1047,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_a32_mov_i(tmp2[1], off, false, ctx);
emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
- emit(ARM_BX(tmp[1]), ctx);
+ emit_bx_r(tmp[1], ctx);
/* out: */
if (out_offset == -1)
@@ -1070,54 +1120,22 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 r2 = bpf2a32[BPF_REG_1][1];
const u8 r3 = bpf2a32[BPF_REG_1][0];
const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 r5 = bpf2a32[BPF_REG_6][0];
- const u8 r6 = bpf2a32[TMP_REG_1][1];
- const u8 r7 = bpf2a32[TMP_REG_1][0];
- const u8 r8 = bpf2a32[TMP_REG_2][1];
- const u8 r10 = bpf2a32[TMP_REG_2][0];
const u8 fplo = bpf2a32[BPF_REG_FP][1];
const u8 fphi = bpf2a32[BPF_REG_FP][0];
- const u8 sp = ARM_SP;
const u8 *tcc = bpf2a32[TCALL_CNT];
- u16 reg_set = 0;
-
- /*
- * eBPF prog stack layout
- *
- * high
- * original ARM_SP => +-----+ eBPF prologue
- * |FP/LR|
- * current ARM_FP => +-----+
- * | ... | callee saved registers
- * eBPF fp register => +-----+ <= (BPF_FP)
- * | ... | eBPF JIT scratch space
- * | | eBPF prog stack
- * +-----+
- * |RSVD | JIT scratchpad
- * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
- * | |
- * | ... | Function call stack
- * | |
- * +-----+
- * low
- */
-
/* Save callee saved registers. */
- reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
#ifdef CONFIG_FRAME_POINTER
- reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
- emit(ARM_MOV_R(ARM_IP, sp), ctx);
+ u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
+ emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
emit(ARM_PUSH(reg_set), ctx);
emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
#else
- /* Check if call instruction exists in BPF body */
- if (ctx->seen & SEEN_CALL)
- reg_set |= (1<<ARM_LR);
- emit(ARM_PUSH(reg_set), ctx);
+ emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
+ emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
#endif
/* Save frame pointer for later */
- emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
+ emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
ctx->stack_size = imm8m(STACK_SIZE);
@@ -1140,33 +1158,19 @@ static void build_prologue(struct jit_ctx *ctx)
/* end of prologue */
}
+/* restore callee saved registers. */
static void build_epilogue(struct jit_ctx *ctx)
{
- const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 r5 = bpf2a32[BPF_REG_6][0];
- const u8 r6 = bpf2a32[TMP_REG_1][1];
- const u8 r7 = bpf2a32[TMP_REG_1][0];
- const u8 r8 = bpf2a32[TMP_REG_2][1];
- const u8 r10 = bpf2a32[TMP_REG_2][0];
- u16 reg_set = 0;
-
- /* unwind function call stack */
- emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
-
- /* restore callee saved registers. */
- reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
#ifdef CONFIG_FRAME_POINTER
- /* the first instruction of the prologue was: mov ip, sp */
- reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
+ /* When using frame pointers, some additional registers need to
+ * be loaded. */
+ u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
+ emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
emit(ARM_LDM(ARM_SP, reg_set), ctx);
#else
- if (ctx->seen & SEEN_CALL)
- reg_set |= (1<<ARM_PC);
/* Restore callee saved registers. */
- emit(ARM_POP(reg_set), ctx);
- /* Return back to the callee function */
- if (!(ctx->seen & SEEN_CALL))
- emit(ARM_BX(ARM_LR), ctx);
+ emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
+ emit(ARM_POP(CALLEE_POP_MASK), ctx);
#endif
}
@@ -1394,8 +1398,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit_rev32(rt, rt, ctx);
goto emit_bswap_uxt;
case 64:
- /* Because of the usage of ARM_LR */
- ctx->seen |= SEEN_CALL;
emit_rev32(ARM_LR, rt, ctx);
emit_rev32(rt, rd, ctx);
emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1450,7 @@ exit:
rn = sstk ? tmp2[1] : src_lo;
if (sstk)
emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- switch (BPF_SIZE(code)) {
- case BPF_W:
- /* Load a Word */
- case BPF_H:
- /* Load a Half-Word */
- case BPF_B:
- /* Load a Byte */
- emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
- break;
- case BPF_DW:
- /* Load a double word */
- emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
- emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
- break;
- }
+ emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
case BPF_LD | BPF_ABS | BPF_W:
@@ -1824,7 +1811,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If BPF JIT was not enabled then we must fall back to
* the interpreter.
*/
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return orig_prog;
/* If constant blinding was enabled and we failed during blinding
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a71a48e71fff..03c6a3c72f9c 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(0, regs);
+ vfp_raise_sigfpe(FPE_FIXME, regs);
return;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c9a7e9e1414f..b2b95f79c746 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -59,6 +59,7 @@ config ARM64
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select DMA_DIRECT_OPS
select EDAC_SUPPORT
select FRAME_POINTER
select GENERIC_ALLOCATOR
@@ -227,7 +228,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
config HAVE_GENERIC_GUP
@@ -522,20 +523,13 @@ config CAVIUM_ERRATUM_30115
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
- select ARM64_PAN if ARM64_SW_TTBR0_PAN
help
On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
- and BADDR are changed together in TTBRx_EL1. The workaround for this
- issue is to use a reserved ASID in cpu_do_switch_mm() before
- switching to the new ASID. Saying Y here selects ARM64_PAN if
- ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
- maintaining the E1003 workaround in the software PAN emulation code
- would be an unnecessary complication. The affected Falkor v1 CPU
- implements ARMv8.1 hardware PAN support and using hardware PAN
- support versus software PAN emulation is mutually exclusive at
- runtime.
-
- If unsure, say Y.
+ and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
+ in TTBR1_EL1, this situation only occurs in the entry trampoline and
+ then only for entries in the walk cache, since the leaf translation
+ is unchanged. Work around the erratum by invalidating the walk cache
+ entries for the trampoline before entering the kernel proper.
config QCOM_FALKOR_ERRATUM_1009
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
@@ -656,6 +650,35 @@ config ARM64_VA_BITS
default 47 if ARM64_VA_BITS_47
default 48 if ARM64_VA_BITS_48
+choice
+ prompt "Physical address space size"
+ default ARM64_PA_BITS_48
+ help
+ Choose the maximum physical address range that the kernel will
+ support.
+
+config ARM64_PA_BITS_48
+ bool "48-bit"
+
+config ARM64_PA_BITS_52
+ bool "52-bit (ARMv8.2)"
+ depends on ARM64_64K_PAGES
+ depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
+ help
+ Enable support for a 52-bit physical address space, introduced as
+ part of the ARMv8.2-LPA extension.
+
+ With this enabled, the kernel will also continue to work on CPUs that
+ do not support ARMv8.2-LPA, but with some added memory overhead (and
+ minor performance overhead).
+
+endchoice
+
+config ARM64_PA_BITS
+ int
+ default 48 if ARM64_PA_BITS_48
+ default 52 if ARM64_PA_BITS_52
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
help
@@ -850,6 +873,35 @@ config FORCE_MAX_ZONEORDER
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
4M allocations matching the default size used by generic code.
+config UNMAP_KERNEL_AT_EL0
+ bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors can
+ be used to bypass MMU permission checks and leak kernel data to
+ userspace. This can be defended against by unmapping the kernel
+ when running in userspace, mapping it back in on exception entry
+ via a trampoline page in the vector table.
+
+ If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors rely on
+ being able to manipulate the branch predictor for a victim context by
+ executing aliasing branches in the attacker context. Such attacks
+ can be partially mitigated against by clearing internal branch
+ predictor state and limiting the prediction logic in some situations.
+
+ This config option will take CPU-specific actions to harden the
+ branch predictor against aliasing attacks and may rely on specific
+ instruction sequences or control bits being set by the system
+ firmware.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -1021,6 +1073,22 @@ config ARM64_PMEM
operations if DC CVAP is not supported (following the behaviour of
DC CVAP itself if the system does not define a point of persistence).
+config ARM64_RAS_EXTN
+ bool "Enable support for RAS CPU Extensions"
+ default y
+ help
+ CPUs that support the Reliability, Availability and Serviceability
+ (RAS) Extensions, part of ARMv8.2 are able to track faults and
+ errors, classify them and report them to software.
+
+ On CPUs with these extensions system software can use additional
+ barriers to determine if faults are pending and read the
+ classification from a new set of registers.
+
+ Selecting this feature will allow the kernel to use these barriers
+ and access the new registers if the system supports the extension.
+ Platform RAS features may additionally depend on firmware support.
+
endmenu
config ARM64_SVE
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 45bdbfb96126..4a8d3f83a36e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -75,6 +75,7 @@
pinctrl-0 = <&rgmii_pins>;
phy-mode = "rgmii";
phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_dc1sw>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 806442d3e846..604cdaedac38 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -77,6 +77,7 @@
pinctrl-0 = <&rmii_pins>;
phy-mode = "rmii";
phy-handle = <&ext_rmii_phy1>;
+ phy-supply = <&reg_dc1sw>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 0eb2acedf8c3..abe179de35d7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -82,6 +82,7 @@
pinctrl-0 = <&rgmii_pins>;
phy-mode = "rgmii";
phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_dc1sw>;
status = "okay";
};
@@ -95,7 +96,7 @@
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
vqmmc-supply = <&reg_vcc1v8>;
bus-width = <8>;
non-removable;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index a5da18a6f286..43418bd881d8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -45,19 +45,10 @@
#include "sun50i-a64.dtsi"
-/ {
- reg_vcc3v3: vcc3v3 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
non-removable;
disable-wp;
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index b6b7a561df8c..a42fd79a62a3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -71,7 +71,7 @@
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
vmmc-supply = <&reg_vcc3v3>;
bus-width = <4>;
- cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 7c9bdc7ab50b..9db19314c60c 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -66,6 +66,7 @@
<&cpu1>,
<&cpu2>,
<&cpu3>;
+ interrupt-parent = <&intc>;
};
psci {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e3b64d03fbd8..9c7724e82aff 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -63,8 +63,10 @@
cpm_ethernet: ethernet@0 {
compatible = "marvell,armada-7k-pp22";
reg = <0x0 0x100000>, <0x129000 0xb000>;
- clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>;
- clock-names = "pp_clk", "gop_clk", "mg_clk";
+ clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
+ <&cpm_clk 1 5>, <&cpm_clk 1 18>;
+ clock-names = "pp_clk", "gop_clk",
+ "mg_clk","axi_clk";
marvell,system-controller = <&cpm_syscon0>;
status = "disabled";
dma-coherent;
@@ -155,7 +157,8 @@
#size-cells = <0>;
compatible = "marvell,orion-mdio";
reg = <0x12a200 0x10>;
- clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
+ clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
+ <&cpm_clk 1 6>, <&cpm_clk 1 18>;
status = "disabled";
};
@@ -338,8 +341,8 @@
compatible = "marvell,armada-cp110-sdhci";
reg = <0x780000 0x300>;
interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "core";
- clocks = <&cpm_clk 1 4>;
+ clock-names = "core","axi";
+ clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
dma-coherent;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 0d51096c69f8..87ac68b2cf37 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -63,8 +63,10 @@
cps_ethernet: ethernet@0 {
compatible = "marvell,armada-7k-pp22";
reg = <0x0 0x100000>, <0x129000 0xb000>;
- clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>;
- clock-names = "pp_clk", "gop_clk", "mg_clk";
+ clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
+ <&cps_clk 1 5>, <&cps_clk 1 18>;
+ clock-names = "pp_clk", "gop_clk",
+ "mg_clk", "axi_clk";
marvell,system-controller = <&cps_syscon0>;
status = "disabled";
dma-coherent;
@@ -155,7 +157,8 @@
#size-cells = <0>;
compatible = "marvell,orion-mdio";
reg = <0x12a200 0x10>;
- clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
+ clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
+ <&cps_clk 1 6>, <&cps_clk 1 18>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 1c3634fa94bf..0acba1f4e04e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -505,7 +505,7 @@
vbus-supply = <&usb_p0_vbus>;
extcon = <&extcon_usb>;
dr_mode = "otg";
- mediatek,enable-wakeup;
+ wakeup-source;
pinctrl-names = "default", "id_float", "id_ground";
pinctrl-0 = <&usb_id_pins_float>;
pinctrl-1 = <&usb_id_pins_float>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 26396ef53bde..9263d9f99aec 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -731,15 +731,9 @@
<&u3port0 PHY_TYPE_USB3>,
<&u2port1 PHY_TYPE_USB2>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
- clocks = <&topckgen CLK_TOP_USB30_SEL>,
- <&clk26m>,
- <&pericfg CLK_PERI_USB0>,
- <&pericfg CLK_PERI_USB1>;
- clock-names = "sys_ck",
- "ref_ck",
- "wakeup_deb_p0",
- "wakeup_deb_p1";
- mediatek,syscon-wakeup = <&pericfg>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
+ clock-names = "sys_ck", "ref_ck";
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index a298df74ca6c..dbe2648649db 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -255,7 +255,6 @@
&avb {
pinctrl-0 = <&avb_pins>;
pinctrl-names = "default";
- renesas,no-ether-link;
phy-handle = <&phy0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 0d85b315ce71..73439cf48659 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -145,7 +145,6 @@
&avb {
pinctrl-0 = <&avb_pins>;
pinctrl-names = "default";
- renesas,no-ether-link;
phy-handle = <&phy0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index d4f80786e7c2..3890468678ce 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -132,6 +132,8 @@
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
clock_in_out = "input";
+ /* shows instability at 1GBit right now */
+ max-speed = <100>;
phy-supply = <&vcc_io>;
phy-mode = "rgmii";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 41d61840fb99..2426da631938 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -514,7 +514,7 @@
tsadc: tsadc@ff250000 {
compatible = "rockchip,rk3328-tsadc";
reg = <0x0 0xff250000 0x0 0x100>;
- interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
assigned-clocks = <&cru SCLK_TSADC>;
assigned-clock-rates = <50000>;
clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 910628d18add..1fc5060d7027 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -155,17 +155,6 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
-
- vdd_log: vdd-log {
- compatible = "pwm-regulator";
- pwms = <&pwm2 0 25000 0>;
- regulator-name = "vdd_log";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
- regulator-always-on;
- regulator-boot-on;
- status = "okay";
- };
};
&cpu_b0 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 48e733136db4..0ac2ace82435 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -198,8 +198,8 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&pinctrl 0 0 0>,
- <&pinctrl 96 0 0>,
- <&pinctrl 160 0 0>;
+ <&pinctrl 104 0 0>,
+ <&pinctrl 168 0 0>;
gpio-ranges-group-names = "gpio_range0",
"gpio_range1",
"gpio_range2";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6356c6da34ea..b20fa9b31efe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -161,7 +161,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
-CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 70c517aa4501..285c36c7b408 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -29,6 +29,24 @@ config CRYPTO_SHA2_ARM64_CE
select CRYPTO_HASH
select CRYPTO_SHA256_ARM64
+config CRYPTO_SHA512_ARM64_CE
+ tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SHA512_ARM64
+
+config CRYPTO_SHA3_ARM64
+ tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SHA3
+
+config CRYPTO_SM3_ARM64_CE
+ tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+
config CRYPTO_GHASH_ARM64_CE
tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index b5edc5918c28..cee9b8d9830b 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -14,6 +14,15 @@ sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
+obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o
+sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
+sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
+sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
+
obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
@@ -24,7 +33,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
-CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
+aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S
new file mode 100644
index 000000000000..8efdfdade393
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-core.S
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .arch armv8-a+crypto
+
+ENTRY(__aes_ce_encrypt)
+ sub w3, w3, #2
+ ld1 {v0.16b}, [x2]
+ ld1 {v1.4s}, [x0], #16
+ cmp w3, #10
+ bmi 0f
+ bne 3f
+ mov v3.16b, v1.16b
+ b 2f
+0: mov v2.16b, v1.16b
+ ld1 {v3.4s}, [x0], #16
+1: aese v0.16b, v2.16b
+ aesmc v0.16b, v0.16b
+2: ld1 {v1.4s}, [x0], #16
+ aese v0.16b, v3.16b
+ aesmc v0.16b, v0.16b
+3: ld1 {v2.4s}, [x0], #16
+ subs w3, w3, #3
+ aese v0.16b, v1.16b
+ aesmc v0.16b, v0.16b
+ ld1 {v3.4s}, [x0], #16
+ bpl 1b
+ aese v0.16b, v2.16b
+ eor v0.16b, v0.16b, v3.16b
+ st1 {v0.16b}, [x1]
+ ret
+ENDPROC(__aes_ce_encrypt)
+
+ENTRY(__aes_ce_decrypt)
+ sub w3, w3, #2
+ ld1 {v0.16b}, [x2]
+ ld1 {v1.4s}, [x0], #16
+ cmp w3, #10
+ bmi 0f
+ bne 3f
+ mov v3.16b, v1.16b
+ b 2f
+0: mov v2.16b, v1.16b
+ ld1 {v3.4s}, [x0], #16
+1: aesd v0.16b, v2.16b
+ aesimc v0.16b, v0.16b
+2: ld1 {v1.4s}, [x0], #16
+ aesd v0.16b, v3.16b
+ aesimc v0.16b, v0.16b
+3: ld1 {v2.4s}, [x0], #16
+ subs w3, w3, #3
+ aesd v0.16b, v1.16b
+ aesimc v0.16b, v0.16b
+ ld1 {v3.4s}, [x0], #16
+ bpl 1b
+ aesd v0.16b, v2.16b
+ eor v0.16b, v0.16b, v3.16b
+ st1 {v0.16b}, [x1]
+ ret
+ENDPROC(__aes_ce_decrypt)
+
+/*
+ * __aes_ce_sub() - use the aese instruction to perform the AES sbox
+ * substitution on each byte in 'input'
+ */
+ENTRY(__aes_ce_sub)
+ dup v1.4s, w0
+ movi v0.16b, #0
+ aese v0.16b, v1.16b
+ umov w0, v0.s[0]
+ ret
+ENDPROC(__aes_ce_sub)
+
+ENTRY(__aes_ce_invert)
+ ld1 {v0.4s}, [x1]
+ aesimc v1.16b, v0.16b
+ st1 {v1.4s}, [x0]
+ ret
+ENDPROC(__aes_ce_invert)
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-glue.c
index 6a75cd75ed11..e6b3227bbf57 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -29,6 +29,13 @@ struct aes_block {
u8 b[AES_BLOCK_SIZE];
};
+asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+
+asmlinkage u32 __aes_ce_sub(u32 l);
+asmlinkage void __aes_ce_invert(struct aes_block *out,
+ const struct aes_block *in);
+
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
@@ -44,10 +51,6 @@ static int num_rounds(struct crypto_aes_ctx *ctx)
static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct aes_block *out = (struct aes_block *)dst;
- struct aes_block const *in = (struct aes_block *)src;
- void *dummy0;
- int dummy1;
if (!may_use_simd()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
@@ -55,49 +58,13 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
}
kernel_neon_begin();
-
- __asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.4s}, [%[key]], #16 ;"
- " cmp %w[rounds], #10 ;"
- " bmi 0f ;"
- " bne 3f ;"
- " mov v3.16b, v1.16b ;"
- " b 2f ;"
- "0: mov v2.16b, v1.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- "1: aese v0.16b, v2.16b ;"
- " aesmc v0.16b, v0.16b ;"
- "2: ld1 {v1.4s}, [%[key]], #16 ;"
- " aese v0.16b, v3.16b ;"
- " aesmc v0.16b, v0.16b ;"
- "3: ld1 {v2.4s}, [%[key]], #16 ;"
- " subs %w[rounds], %w[rounds], #3 ;"
- " aese v0.16b, v1.16b ;"
- " aesmc v0.16b, v0.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- " bpl 1b ;"
- " aese v0.16b, v2.16b ;"
- " eor v0.16b, v0.16b, v3.16b ;"
- " st1 {v0.16b}, %[out] ;"
-
- : [out] "=Q"(*out),
- [key] "=r"(dummy0),
- [rounds] "=r"(dummy1)
- : [in] "Q"(*in),
- "1"(ctx->key_enc),
- "2"(num_rounds(ctx) - 2)
- : "cc");
-
+ __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
kernel_neon_end();
}
static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct aes_block *out = (struct aes_block *)dst;
- struct aes_block const *in = (struct aes_block *)src;
- void *dummy0;
- int dummy1;
if (!may_use_simd()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
@@ -105,62 +72,10 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
}
kernel_neon_begin();
-
- __asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.4s}, [%[key]], #16 ;"
- " cmp %w[rounds], #10 ;"
- " bmi 0f ;"
- " bne 3f ;"
- " mov v3.16b, v1.16b ;"
- " b 2f ;"
- "0: mov v2.16b, v1.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- "1: aesd v0.16b, v2.16b ;"
- " aesimc v0.16b, v0.16b ;"
- "2: ld1 {v1.4s}, [%[key]], #16 ;"
- " aesd v0.16b, v3.16b ;"
- " aesimc v0.16b, v0.16b ;"
- "3: ld1 {v2.4s}, [%[key]], #16 ;"
- " subs %w[rounds], %w[rounds], #3 ;"
- " aesd v0.16b, v1.16b ;"
- " aesimc v0.16b, v0.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- " bpl 1b ;"
- " aesd v0.16b, v2.16b ;"
- " eor v0.16b, v0.16b, v3.16b ;"
- " st1 {v0.16b}, %[out] ;"
-
- : [out] "=Q"(*out),
- [key] "=r"(dummy0),
- [rounds] "=r"(dummy1)
- : [in] "Q"(*in),
- "1"(ctx->key_dec),
- "2"(num_rounds(ctx) - 2)
- : "cc");
-
+ __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
kernel_neon_end();
}
-/*
- * aes_sub() - use the aese instruction to perform the AES sbox substitution
- * on each byte in 'input'
- */
-static u32 aes_sub(u32 input)
-{
- u32 ret;
-
- __asm__("dup v1.4s, %w[in] ;"
- "movi v0.16b, #0 ;"
- "aese v0.16b, v1.16b ;"
- "umov %w[out], v0.4s[0] ;"
-
- : [out] "=r"(ret)
- : [in] "r"(input)
- : "v0","v1");
-
- return ret;
-}
-
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
@@ -189,7 +104,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
- rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+ rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
@@ -202,7 +117,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
} else if (key_len == AES_KEYSIZE_256) {
if (i >= 6)
break;
- rko[4] = aes_sub(rko[3]) ^ rki[4];
+ rko[4] = __aes_ce_sub(rko[3]) ^ rki[4];
rko[5] = rko[4] ^ rki[5];
rko[6] = rko[5] ^ rki[6];
rko[7] = rko[6] ^ rki[7];
@@ -221,13 +136,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
key_dec[0] = key_enc[j];
for (i = 1, j--; j > 0; i++, j--)
- __asm__("ld1 {v0.4s}, %[in] ;"
- "aesimc v1.16b, v0.16b ;"
- "st1 {v1.4s}, %[out] ;"
-
- : [out] "=Q"(key_dec[i])
- : [in] "Q"(key_enc[j])
- : "v0","v1");
+ __aes_ce_invert(key_dec + i, key_enc + j);
key_dec[i] = key_enc[0];
kernel_neon_end();
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S
index 6d2445d603cc..3a44eada2347 100644
--- a/arch/arm64/crypto/aes-cipher-core.S
+++ b/arch/arm64/crypto/aes-cipher-core.S
@@ -125,6 +125,16 @@ CPU_BE( rev w7, w7 )
ret
.endm
+ENTRY(__aes_arm64_encrypt)
+ do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
+ENDPROC(__aes_arm64_encrypt)
+
+ .align 5
+ENTRY(__aes_arm64_decrypt)
+ do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0
+ENDPROC(__aes_arm64_decrypt)
+
+ .section ".rodata", "a"
.align L1_CACHE_SHIFT
.type __aes_arm64_inverse_sbox, %object
__aes_arm64_inverse_sbox:
@@ -161,12 +171,3 @@ __aes_arm64_inverse_sbox:
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox
-
-ENTRY(__aes_arm64_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
-ENDPROC(__aes_arm64_encrypt)
-
- .align 5
-ENTRY(__aes_arm64_decrypt)
- do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0
-ENDPROC(__aes_arm64_decrypt)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 998ba519a026..2fa850e86aa8 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -665,6 +665,7 @@ static int __init aes_init(void)
unregister_simds:
aes_exit();
+ return err;
unregister_ciphers:
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
return err;
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index f1e3aa2732f9..1c7b45b7268e 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -32,10 +32,10 @@
/* preload the entire Sbox */
.macro prepare, sbox, shiftrows, temp
- adr \temp, \sbox
movi v12.16b, #0x1b
- ldr q13, \shiftrows
- ldr q14, .Lror32by8
+ ldr_l q13, \shiftrows, \temp
+ ldr_l q14, .Lror32by8, \temp
+ adr_l \temp, \sbox
ld1 {v16.16b-v19.16b}, [\temp], #64
ld1 {v20.16b-v23.16b}, [\temp], #64
ld1 {v24.16b-v27.16b}, [\temp], #64
@@ -272,7 +272,7 @@
#include "aes-modes.S"
- .text
+ .section ".rodata", "a"
.align 6
.LForward_Sbox:
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
index 18f5a8442276..16ed3c7ebd37 100644
--- a/arch/arm64/crypto/crc32-ce-core.S
+++ b/arch/arm64/crypto/crc32-ce-core.S
@@ -50,7 +50,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
- .text
+ .section ".rodata", "a"
.align 6
.cpu generic+crypto+crc
@@ -115,12 +115,13 @@
* uint crc32_pmull_le(unsigned char const *buffer,
* size_t len, uint crc32)
*/
+ .text
ENTRY(crc32_pmull_le)
- adr x3, .Lcrc32_constants
+ adr_l x3, .Lcrc32_constants
b 0f
ENTRY(crc32c_pmull_le)
- adr x3, .Lcrc32c_constants
+ adr_l x3, .Lcrc32c_constants
0: bic LEN, LEN, #15
ld1 {v1.16b-v4.16b}, [BUF], #0x40
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
index 624f4137918c..34b4e3d46aab 100644
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ b/arch/arm64/crypto/crc32-ce-glue.c
@@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-arm64-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
}, {
@@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-arm64-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index d5b5a8c038c8..f179c01bd55c 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -128,7 +128,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// XOR the initial_crc value
eor v0.16b, v0.16b, v10.16b
- ldr q10, rk3 // xmm10 has rk3 and rk4
+ ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
// type of pmull instruction
// will determine which constant to use
@@ -184,13 +184,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
// fold the 8 vector registers to 1 vector register with different
// constants
- ldr q10, rk9
+ ldr_l q10, rk9, x8
.macro fold16, reg, rk
pmull v8.1q, \reg\().1d, v10.1d
pmull2 \reg\().1q, \reg\().2d, v10.2d
.ifnb \rk
- ldr q10, \rk
+ ldr_l q10, \rk, x8
.endif
eor v7.16b, v7.16b, v8.16b
eor v7.16b, v7.16b, \reg\().16b
@@ -251,7 +251,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
// get rid of the extra data that was loaded before
// load the shift constant
- adr x4, tbl_shf_table + 16
+ adr_l x4, tbl_shf_table + 16
sub x4, x4, arg3
ld1 {v0.16b}, [x4]
@@ -275,7 +275,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
_128_done:
// compute crc of a 128-bit value
- ldr q10, rk5 // rk5 and rk6 in xmm10
+ ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
// 64b fold
ext v0.16b, vzr.16b, v7.16b, #8
@@ -291,7 +291,7 @@ _128_done:
// barrett reduction
_barrett:
- ldr q10, rk7
+ ldr_l q10, rk7, x8
mov v0.d[0], v7.d[1]
pmull v0.1q, v0.1d, v10.1d
@@ -321,7 +321,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
b.eq _128_done // exactly 16 left
b.lt _less_than_16_left
- ldr q10, rk1 // rk1 and rk2 in xmm10
+ ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
// update the counter. subtract 32 instead of 16 to save one
// instruction from the loop
@@ -333,7 +333,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
_less_than_16_left:
// shl r9, 4
- adr x0, tbl_shf_table + 16
+ adr_l x0, tbl_shf_table + 16
sub x0, x0, arg3
ld1 {v0.16b}, [x0]
movi v9.16b, #0x80
@@ -345,6 +345,7 @@ ENDPROC(crc_t10dif_pmull)
// precomputed constants
// these constants are precomputed from the poly:
// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+ .section ".rodata", "a"
.align 4
// Q = 0x18BB70000
// rk1 = 2^(32*3) mod Q << 32
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 8550408735a0..46049850727d 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -58,12 +58,11 @@
sha1su1 v\s0\().4s, v\s3\().4s
.endm
- /*
- * The SHA1 round constants
- */
- .align 4
-.Lsha1_rcon:
- .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+ .macro loadrc, k, val, tmp
+ movz \tmp, :abs_g0_nc:\val
+ movk \tmp, :abs_g1:\val
+ dup \k, \tmp
+ .endm
/*
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
@@ -71,11 +70,10 @@
*/
ENTRY(sha1_ce_transform)
/* load round constants */
- adr x6, .Lsha1_rcon
- ld1r {k0.4s}, [x6], #4
- ld1r {k1.4s}, [x6], #4
- ld1r {k2.4s}, [x6], #4
- ld1r {k3.4s}, [x6]
+ loadrc k0.4s, 0x5a827999, w6
+ loadrc k1.4s, 0x6ed9eba1, w6
+ loadrc k2.4s, 0x8f1bbcdc, w6
+ loadrc k3.4s, 0xca62c1d6, w6
/* load state */
ld1 {dgav.4s}, [x0]
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 679c6c002f4f..4c3c89b812ce 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -53,6 +53,7 @@
/*
* The SHA-256 round constants
*/
+ .section ".rodata", "a"
.align 4
.Lsha2_rcon:
.word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
@@ -76,9 +77,10 @@
* void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
* int blocks)
*/
+ .text
ENTRY(sha2_ce_transform)
/* load round constants */
- adr x8, .Lsha2_rcon
+ adr_l x8, .Lsha2_rcon
ld1 { v0.4s- v3.4s}, [x8], #64
ld1 { v4.4s- v7.4s}, [x8], #64
ld1 { v8.4s-v11.4s}, [x8], #64
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
new file mode 100644
index 000000000000..332ad7530690
--- /dev/null
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ .set .Lv\b\().2d, \b
+ .set .Lv\b\().16b, \b
+ .endr
+
+ /*
+ * ARMv8.2 Crypto Extensions instructions
+ */
+ .macro eor3, rd, rn, rm, ra
+ .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro rax1, rd, rn, rm
+ .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro bcax, rd, rn, rm, ra
+ .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro xar, rd, rn, rm, imm6
+ .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16)
+ .endm
+
+ /*
+ * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
+ */
+ .text
+ENTRY(sha3_ce_transform)
+ /* load state */
+ add x8, x0, #32
+ ld1 { v0.1d- v3.1d}, [x0]
+ ld1 { v4.1d- v7.1d}, [x8], #32
+ ld1 { v8.1d-v11.1d}, [x8], #32
+ ld1 {v12.1d-v15.1d}, [x8], #32
+ ld1 {v16.1d-v19.1d}, [x8], #32
+ ld1 {v20.1d-v23.1d}, [x8], #32
+ ld1 {v24.1d}, [x8]
+
+0: sub w2, w2, #1
+ mov w8, #24
+ adr_l x9, .Lsha3_rcon
+
+ /* load input */
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v31.8b}, [x1], #24
+ eor v0.8b, v0.8b, v25.8b
+ eor v1.8b, v1.8b, v26.8b
+ eor v2.8b, v2.8b, v27.8b
+ eor v3.8b, v3.8b, v28.8b
+ eor v4.8b, v4.8b, v29.8b
+ eor v5.8b, v5.8b, v30.8b
+ eor v6.8b, v6.8b, v31.8b
+
+ tbnz x3, #6, 2f // SHA3-512
+
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v30.8b}, [x1], #16
+ eor v7.8b, v7.8b, v25.8b
+ eor v8.8b, v8.8b, v26.8b
+ eor v9.8b, v9.8b, v27.8b
+ eor v10.8b, v10.8b, v28.8b
+ eor v11.8b, v11.8b, v29.8b
+ eor v12.8b, v12.8b, v30.8b
+
+ tbnz x3, #4, 1f // SHA3-384 or SHA3-224
+
+ // SHA3-256
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ eor v13.8b, v13.8b, v25.8b
+ eor v14.8b, v14.8b, v26.8b
+ eor v15.8b, v15.8b, v27.8b
+ eor v16.8b, v16.8b, v28.8b
+ b 3f
+
+1: tbz x3, #2, 3f // bit 2 cleared? SHA-384
+
+ // SHA3-224
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b}, [x1], #8
+ eor v13.8b, v13.8b, v25.8b
+ eor v14.8b, v14.8b, v26.8b
+ eor v15.8b, v15.8b, v27.8b
+ eor v16.8b, v16.8b, v28.8b
+ eor v17.8b, v17.8b, v29.8b
+ b 3f
+
+ // SHA3-512
+2: ld1 {v25.8b-v26.8b}, [x1], #16
+ eor v7.8b, v7.8b, v25.8b
+ eor v8.8b, v8.8b, v26.8b
+
+3: sub w8, w8, #1
+
+ eor3 v29.16b, v4.16b, v9.16b, v14.16b
+ eor3 v26.16b, v1.16b, v6.16b, v11.16b
+ eor3 v28.16b, v3.16b, v8.16b, v13.16b
+ eor3 v25.16b, v0.16b, v5.16b, v10.16b
+ eor3 v27.16b, v2.16b, v7.16b, v12.16b
+ eor3 v29.16b, v29.16b, v19.16b, v24.16b
+ eor3 v26.16b, v26.16b, v16.16b, v21.16b
+ eor3 v28.16b, v28.16b, v18.16b, v23.16b
+ eor3 v25.16b, v25.16b, v15.16b, v20.16b
+ eor3 v27.16b, v27.16b, v17.16b, v22.16b
+
+ rax1 v30.2d, v29.2d, v26.2d // bc[0]
+ rax1 v26.2d, v26.2d, v28.2d // bc[2]
+ rax1 v28.2d, v28.2d, v25.2d // bc[4]
+ rax1 v25.2d, v25.2d, v27.2d // bc[1]
+ rax1 v27.2d, v27.2d, v29.2d // bc[3]
+
+ eor v0.16b, v0.16b, v30.16b
+ xar v29.2d, v1.2d, v25.2d, (64 - 1)
+ xar v1.2d, v6.2d, v25.2d, (64 - 44)
+ xar v6.2d, v9.2d, v28.2d, (64 - 20)
+ xar v9.2d, v22.2d, v26.2d, (64 - 61)
+ xar v22.2d, v14.2d, v28.2d, (64 - 39)
+ xar v14.2d, v20.2d, v30.2d, (64 - 18)
+ xar v31.2d, v2.2d, v26.2d, (64 - 62)
+ xar v2.2d, v12.2d, v26.2d, (64 - 43)
+ xar v12.2d, v13.2d, v27.2d, (64 - 25)
+ xar v13.2d, v19.2d, v28.2d, (64 - 8)
+ xar v19.2d, v23.2d, v27.2d, (64 - 56)
+ xar v23.2d, v15.2d, v30.2d, (64 - 41)
+ xar v15.2d, v4.2d, v28.2d, (64 - 27)
+ xar v28.2d, v24.2d, v28.2d, (64 - 14)
+ xar v24.2d, v21.2d, v25.2d, (64 - 2)
+ xar v8.2d, v8.2d, v27.2d, (64 - 55)
+ xar v4.2d, v16.2d, v25.2d, (64 - 45)
+ xar v16.2d, v5.2d, v30.2d, (64 - 36)
+ xar v5.2d, v3.2d, v27.2d, (64 - 28)
+ xar v27.2d, v18.2d, v27.2d, (64 - 21)
+ xar v3.2d, v17.2d, v26.2d, (64 - 15)
+ xar v25.2d, v11.2d, v25.2d, (64 - 10)
+ xar v26.2d, v7.2d, v26.2d, (64 - 6)
+ xar v30.2d, v10.2d, v30.2d, (64 - 3)
+
+ bcax v20.16b, v31.16b, v22.16b, v8.16b
+ bcax v21.16b, v8.16b, v23.16b, v22.16b
+ bcax v22.16b, v22.16b, v24.16b, v23.16b
+ bcax v23.16b, v23.16b, v31.16b, v24.16b
+ bcax v24.16b, v24.16b, v8.16b, v31.16b
+
+ ld1r {v31.2d}, [x9], #8
+
+ bcax v17.16b, v25.16b, v19.16b, v3.16b
+ bcax v18.16b, v3.16b, v15.16b, v19.16b
+ bcax v19.16b, v19.16b, v16.16b, v15.16b
+ bcax v15.16b, v15.16b, v25.16b, v16.16b
+ bcax v16.16b, v16.16b, v3.16b, v25.16b
+
+ bcax v10.16b, v29.16b, v12.16b, v26.16b
+ bcax v11.16b, v26.16b, v13.16b, v12.16b
+ bcax v12.16b, v12.16b, v14.16b, v13.16b
+ bcax v13.16b, v13.16b, v29.16b, v14.16b
+ bcax v14.16b, v14.16b, v26.16b, v29.16b
+
+ bcax v7.16b, v30.16b, v9.16b, v4.16b
+ bcax v8.16b, v4.16b, v5.16b, v9.16b
+ bcax v9.16b, v9.16b, v6.16b, v5.16b
+ bcax v5.16b, v5.16b, v30.16b, v6.16b
+ bcax v6.16b, v6.16b, v4.16b, v30.16b
+
+ bcax v3.16b, v27.16b, v0.16b, v28.16b
+ bcax v4.16b, v28.16b, v1.16b, v0.16b
+ bcax v0.16b, v0.16b, v2.16b, v1.16b
+ bcax v1.16b, v1.16b, v27.16b, v2.16b
+ bcax v2.16b, v2.16b, v28.16b, v27.16b
+
+ eor v0.16b, v0.16b, v31.16b
+
+ cbnz w8, 3b
+ cbnz w2, 0b
+
+ /* save state */
+ st1 { v0.1d- v3.1d}, [x0], #32
+ st1 { v4.1d- v7.1d}, [x0], #32
+ st1 { v8.1d-v11.1d}, [x0], #32
+ st1 {v12.1d-v15.1d}, [x0], #32
+ st1 {v16.1d-v19.1d}, [x0], #32
+ st1 {v20.1d-v23.1d}, [x0], #32
+ st1 {v24.1d}, [x0]
+ ret
+ENDPROC(sha3_ce_transform)
+
+ .section ".rodata", "a"
+ .align 8
+.Lsha3_rcon:
+ .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a
+ .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001
+ .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a
+ .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a
+ .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089
+ .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080
+ .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081
+ .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
new file mode 100644
index 000000000000..da8222e528bd
--- /dev/null
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
+
+static int sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+
+ if (!may_use_simd())
+ return crypto_sha3_update(desc, data, len);
+
+ if ((sctx->partial + len) >= sctx->rsiz) {
+ int blocks;
+
+ if (sctx->partial) {
+ int p = sctx->rsiz - sctx->partial;
+
+ memcpy(sctx->buf + sctx->partial, data, p);
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
+ kernel_neon_end();
+
+ data += p;
+ len -= p;
+ sctx->partial = 0;
+ }
+
+ blocks = len / sctx->rsiz;
+ len %= sctx->rsiz;
+
+ if (blocks) {
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, data, blocks, digest_size);
+ kernel_neon_end();
+ data += blocks * sctx->rsiz;
+ }
+ }
+
+ if (len) {
+ memcpy(sctx->buf + sctx->partial, data, len);
+ sctx->partial += len;
+ }
+ return 0;
+}
+
+static int sha3_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
+ int i;
+
+ if (!may_use_simd())
+ return crypto_sha3_final(desc, out);
+
+ sctx->buf[sctx->partial++] = 0x06;
+ memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial);
+ sctx->buf[sctx->rsiz - 1] |= 0x80;
+
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
+ kernel_neon_end();
+
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
+
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
+
+ *sctx = (struct sha3_state){};
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+} };
+
+static int __init sha3_neon_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha3_neon_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA3, sha3_neon_mod_init);
+module_exit(sha3_neon_mod_fini);
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
new file mode 100644
index 000000000000..7f3bca5c59a2
--- /dev/null
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19
+ .set .Lq\b, \b
+ .set .Lv\b\().2d, \b
+ .endr
+
+ .macro sha512h, rd, rn, rm
+ .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sha512h2, rd, rn, rm
+ .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sha512su0, rd, rn
+ .inst 0xcec08000 | .L\rd | (.L\rn << 5)
+ .endm
+
+ .macro sha512su1, rd, rn, rm
+ .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ /*
+ * The SHA-512 round constants
+ */
+ .section ".rodata", "a"
+ .align 4
+.Lsha512_rcon:
+ .quad 0x428a2f98d728ae22, 0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538, 0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242, 0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235, 0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5
+ .quad 0x983e5152ee66dfab, 0xa831c66d2db43210
+ .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725
+ .quad 0x06ca6351e003826f, 0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df
+ .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6, 0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218, 0xd69906245565a910
+ .quad 0xf40e35855771202a, 0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec
+ .quad 0x90befffa23631e28, 0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b
+ .quad 0xca273eceea26619c, 0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae, 0x1b710b35131c471b
+ .quad 0x28db77f523047d84, 0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
+
+ .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4
+ .ifnb \rc1
+ ld1 {v\rc1\().2d}, [x4], #16
+ .endif
+ add v5.2d, v\rc0\().2d, v\in0\().2d
+ ext v6.16b, v\i2\().16b, v\i3\().16b, #8
+ ext v5.16b, v5.16b, v5.16b, #8
+ ext v7.16b, v\i1\().16b, v\i2\().16b, #8
+ add v\i3\().2d, v\i3\().2d, v5.2d
+ .ifnb \in1
+ ext v5.16b, v\in3\().16b, v\in4\().16b, #8
+ sha512su0 v\in0\().2d, v\in1\().2d
+ .endif
+ sha512h q\i3, q6, v7.2d
+ .ifnb \in1
+ sha512su1 v\in0\().2d, v\in2\().2d, v5.2d
+ .endif
+ add v\i4\().2d, v\i1\().2d, v\i3\().2d
+ sha512h2 q\i3, q\i1, v\i0\().2d
+ .endm
+
+ /*
+ * void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ * int blocks)
+ */
+ .text
+ENTRY(sha512_ce_transform)
+ /* load state */
+ ld1 {v8.2d-v11.2d}, [x0]
+
+ /* load first 4 round constants */
+ adr_l x3, .Lsha512_rcon
+ ld1 {v20.2d-v23.2d}, [x3], #64
+
+ /* load input */
+0: ld1 {v12.2d-v15.2d}, [x1], #64
+ ld1 {v16.2d-v19.2d}, [x1], #64
+ sub w2, w2, #1
+
+CPU_LE( rev64 v12.16b, v12.16b )
+CPU_LE( rev64 v13.16b, v13.16b )
+CPU_LE( rev64 v14.16b, v14.16b )
+CPU_LE( rev64 v15.16b, v15.16b )
+CPU_LE( rev64 v16.16b, v16.16b )
+CPU_LE( rev64 v17.16b, v17.16b )
+CPU_LE( rev64 v18.16b, v18.16b )
+CPU_LE( rev64 v19.16b, v19.16b )
+
+ mov x4, x3 // rc pointer
+
+ mov v0.16b, v8.16b
+ mov v1.16b, v9.16b
+ mov v2.16b, v10.16b
+ mov v3.16b, v11.16b
+
+ // v0 ab cd -- ef gh ab
+ // v1 cd -- ef gh ab cd
+ // v2 ef gh ab cd -- ef
+ // v3 gh ab cd -- ef gh
+ // v4 -- ef gh ab cd --
+
+ dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17
+ dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18
+ dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19
+ dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12
+ dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13
+
+ dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14
+ dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15
+ dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16
+ dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17
+ dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18
+
+ dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19
+ dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12
+ dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13
+ dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14
+ dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15
+
+ dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16
+ dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17
+ dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18
+ dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19
+ dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12
+
+ dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13
+ dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14
+ dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15
+ dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16
+ dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17
+
+ dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18
+ dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19
+ dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12
+ dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13
+ dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14
+
+ dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15
+ dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16
+ dround 2, 3, 1, 4, 0, 28, 24, 12
+ dround 4, 2, 0, 1, 3, 29, 25, 13
+ dround 1, 4, 3, 0, 2, 30, 26, 14
+
+ dround 0, 1, 2, 3, 4, 31, 27, 15
+ dround 3, 0, 4, 2, 1, 24, , 16
+ dround 2, 3, 1, 4, 0, 25, , 17
+ dround 4, 2, 0, 1, 3, 26, , 18
+ dround 1, 4, 3, 0, 2, 27, , 19
+
+ /* update state */
+ add v8.2d, v8.2d, v0.2d
+ add v9.2d, v9.2d, v1.2d
+ add v10.2d, v10.2d, v2.2d
+ add v11.2d, v11.2d, v3.2d
+
+ /* handled all input blocks? */
+ cbnz w2, 0b
+
+ /* store new state */
+3: st1 {v8.2d-v11.2d}, [x0]
+ ret
+ENDPROC(sha512_ce_transform)
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
new file mode 100644
index 000000000000..a77c8632a589
--- /dev/null
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha512-ce-glue.c - SHA-384/SHA-512 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha512_base.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+
+static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ if (!may_use_simd())
+ return sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+
+ kernel_neon_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd()) {
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ kernel_neon_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+ return sha512_base_finish(desc, out);
+}
+
+static int sha512_ce_final(struct shash_desc *desc, u8 *out)
+{
+ if (!may_use_simd()) {
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ kernel_neon_begin();
+ sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+ return sha512_base_finish(desc, out);
+}
+
+static struct shash_alg algs[] = { {
+ .init = sha384_base_init,
+ .update = sha512_ce_update,
+ .final = sha512_ce_final,
+ .finup = sha512_ce_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base.cra_name = "sha384",
+ .base.cra_driver_name = "sha384-ce",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .init = sha512_base_init,
+ .update = sha512_ce_update,
+ .final = sha512_ce_final,
+ .finup = sha512_ce_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base.cra_name = "sha512",
+ .base.cra_driver_name = "sha512-ce",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init sha512_ce_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_ce_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA512, sha512_ce_mod_init);
+module_exit(sha512_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index aff35c9992a4..27db4851e380 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -27,6 +27,7 @@ MODULE_ALIAS_CRYPTO("sha512");
asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
+EXPORT_SYMBOL(sha512_block_data_order);
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S
new file mode 100644
index 000000000000..27169fe07a68
--- /dev/null
+++ b/arch/arm64/crypto/sm3-ce-core.S
@@ -0,0 +1,141 @@
+/*
+ * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+ .set .Lv\b\().4s, \b
+ .endr
+
+ .macro sm3partw1, rd, rn, rm
+ .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sm3partw2, rd, rn, rm
+ .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sm3ss1, rd, rn, rm, ra
+ .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt1a, rd, rn, rm, imm2
+ .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt1b, rd, rn, rm, imm2
+ .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt2a, rd, rn, rm, imm2
+ .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt2b, rd, rn, rm, imm2
+ .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro round, ab, s0, t0, t1, i
+ sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s
+ shl \t1\().4s, \t0\().4s, #1
+ sri \t1\().4s, \t0\().4s, #31
+ sm3tt1\ab v8.4s, v5.4s, v10.4s, \i
+ sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i
+ .endm
+
+ .macro qround, ab, s0, s1, s2, s3, s4
+ .ifnb \s4
+ ext \s4\().16b, \s1\().16b, \s2\().16b, #12
+ ext v6.16b, \s0\().16b, \s1\().16b, #12
+ ext v7.16b, \s2\().16b, \s3\().16b, #8
+ sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s
+ .endif
+
+ eor v10.16b, \s0\().16b, \s1\().16b
+
+ round \ab, \s0, v11, v12, 0
+ round \ab, \s0, v12, v11, 1
+ round \ab, \s0, v11, v12, 2
+ round \ab, \s0, v12, v11, 3
+
+ .ifnb \s4
+ sm3partw2 \s4\().4s, v7.4s, v6.4s
+ .endif
+ .endm
+
+ /*
+ * void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
+ * int blocks)
+ */
+ .text
+ENTRY(sm3_ce_transform)
+ /* load state */
+ ld1 {v8.4s-v9.4s}, [x0]
+ rev64 v8.4s, v8.4s
+ rev64 v9.4s, v9.4s
+ ext v8.16b, v8.16b, v8.16b, #8
+ ext v9.16b, v9.16b, v9.16b, #8
+
+ adr_l x8, .Lt
+ ldp s13, s14, [x8]
+
+ /* load input */
+0: ld1 {v0.16b-v3.16b}, [x1], #64
+ sub w2, w2, #1
+
+ mov v15.16b, v8.16b
+ mov v16.16b, v9.16b
+
+CPU_LE( rev32 v0.16b, v0.16b )
+CPU_LE( rev32 v1.16b, v1.16b )
+CPU_LE( rev32 v2.16b, v2.16b )
+CPU_LE( rev32 v3.16b, v3.16b )
+
+ ext v11.16b, v13.16b, v13.16b, #4
+
+ qround a, v0, v1, v2, v3, v4
+ qround a, v1, v2, v3, v4, v0
+ qround a, v2, v3, v4, v0, v1
+ qround a, v3, v4, v0, v1, v2
+
+ ext v11.16b, v14.16b, v14.16b, #4
+
+ qround b, v4, v0, v1, v2, v3
+ qround b, v0, v1, v2, v3, v4
+ qround b, v1, v2, v3, v4, v0
+ qround b, v2, v3, v4, v0, v1
+ qround b, v3, v4, v0, v1, v2
+ qround b, v4, v0, v1, v2, v3
+ qround b, v0, v1, v2, v3, v4
+ qround b, v1, v2, v3, v4, v0
+ qround b, v2, v3, v4, v0, v1
+ qround b, v3, v4
+ qround b, v4, v0
+ qround b, v0, v1
+
+ eor v8.16b, v8.16b, v15.16b
+ eor v9.16b, v9.16b, v16.16b
+
+ /* handled all input blocks? */
+ cbnz w2, 0b
+
+ /* save state */
+ rev64 v8.4s, v8.4s
+ rev64 v9.4s, v9.4s
+ ext v8.16b, v8.16b, v8.16b, #8
+ ext v9.16b, v9.16b, v9.16b, #8
+ st1 {v8.4s-v9.4s}, [x0]
+ ret
+ENDPROC(sm3_ce_transform)
+
+ .section ".rodata", "a"
+ .align 3
+.Lt: .word 0x79cc4519, 0x9d8a7a87
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
new file mode 100644
index 000000000000..3b4948f7e26f
--- /dev/null
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -0,0 +1,92 @@
+/*
+ * sm3-ce-glue.c - SM3 secure hash using ARMv8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
+ int blocks);
+
+static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ if (!may_use_simd())
+ return crypto_sm3_update(desc, data, len);
+
+ kernel_neon_begin();
+ sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sm3_ce_final(struct shash_desc *desc, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sm3_finup(desc, NULL, 0, out);
+
+ kernel_neon_begin();
+ sm3_base_do_finalize(desc, sm3_ce_transform);
+ kernel_neon_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sm3_finup(desc, data, len, out);
+
+ kernel_neon_begin();
+ sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ kernel_neon_end();
+
+ return sm3_ce_final(desc, out);
+}
+
+static struct shash_alg sm3_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = sm3_ce_update,
+ .final = sm3_ce_final,
+ .finup = sm3_ce_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base.cra_name = "sm3",
+ .base.cra_driver_name = "sm3-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SM3_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+};
+
+static int __init sm3_ce_mod_init(void)
+{
+ return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_ce_mod_fini(void)
+{
+ crypto_unregister_shash(&sm3_alg);
+}
+
+module_cpu_feature_match(SM3, sm3_ce_mod_init);
+module_exit(sm3_ce_mod_fini);
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4a85c6952a22..669028172fd6 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -12,6 +12,8 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
+extern int alternatives_applied;
+
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
diff --git a/arch/arm64/include/asm/arm_dsu_pmu.h b/arch/arm64/include/asm/arm_dsu_pmu.h
new file mode 100644
index 000000000000..82e5cc3356bf
--- /dev/null
+++ b/arch/arm64/include/asm/arm_dsu_pmu.h
@@ -0,0 +1,129 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU Low level register access routines.
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+
+
+#define CLUSTERPMCR_EL1 sys_reg(3, 0, 15, 5, 0)
+#define CLUSTERPMCNTENSET_EL1 sys_reg(3, 0, 15, 5, 1)
+#define CLUSTERPMCNTENCLR_EL1 sys_reg(3, 0, 15, 5, 2)
+#define CLUSTERPMOVSSET_EL1 sys_reg(3, 0, 15, 5, 3)
+#define CLUSTERPMOVSCLR_EL1 sys_reg(3, 0, 15, 5, 4)
+#define CLUSTERPMSELR_EL1 sys_reg(3, 0, 15, 5, 5)
+#define CLUSTERPMINTENSET_EL1 sys_reg(3, 0, 15, 5, 6)
+#define CLUSTERPMINTENCLR_EL1 sys_reg(3, 0, 15, 5, 7)
+#define CLUSTERPMCCNTR_EL1 sys_reg(3, 0, 15, 6, 0)
+#define CLUSTERPMXEVTYPER_EL1 sys_reg(3, 0, 15, 6, 1)
+#define CLUSTERPMXEVCNTR_EL1 sys_reg(3, 0, 15, 6, 2)
+#define CLUSTERPMMDCR_EL1 sys_reg(3, 0, 15, 6, 3)
+#define CLUSTERPMCEID0_EL1 sys_reg(3, 0, 15, 6, 4)
+#define CLUSTERPMCEID1_EL1 sys_reg(3, 0, 15, 6, 5)
+
+static inline u32 __dsu_pmu_read_pmcr(void)
+{
+ return read_sysreg_s(CLUSTERPMCR_EL1);
+}
+
+static inline void __dsu_pmu_write_pmcr(u32 val)
+{
+ write_sysreg_s(val, CLUSTERPMCR_EL1);
+ isb();
+}
+
+static inline u32 __dsu_pmu_get_reset_overflow(void)
+{
+ u32 val = read_sysreg_s(CLUSTERPMOVSCLR_EL1);
+ /* Clear the bit */
+ write_sysreg_s(val, CLUSTERPMOVSCLR_EL1);
+ isb();
+ return val;
+}
+
+static inline void __dsu_pmu_select_counter(int counter)
+{
+ write_sysreg_s(counter, CLUSTERPMSELR_EL1);
+ isb();
+}
+
+static inline u64 __dsu_pmu_read_counter(int counter)
+{
+ __dsu_pmu_select_counter(counter);
+ return read_sysreg_s(CLUSTERPMXEVCNTR_EL1);
+}
+
+static inline void __dsu_pmu_write_counter(int counter, u64 val)
+{
+ __dsu_pmu_select_counter(counter);
+ write_sysreg_s(val, CLUSTERPMXEVCNTR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_set_event(int counter, u32 event)
+{
+ __dsu_pmu_select_counter(counter);
+ write_sysreg_s(event, CLUSTERPMXEVTYPER_EL1);
+ isb();
+}
+
+static inline u64 __dsu_pmu_read_pmccntr(void)
+{
+ return read_sysreg_s(CLUSTERPMCCNTR_EL1);
+}
+
+static inline void __dsu_pmu_write_pmccntr(u64 val)
+{
+ write_sysreg_s(val, CLUSTERPMCCNTR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_disable_counter(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMCNTENCLR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_enable_counter(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMCNTENSET_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_counter_interrupt_enable(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMINTENSET_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_counter_interrupt_disable(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMINTENCLR_EL1);
+ isb();
+}
+
+
+static inline u32 __dsu_pmu_read_pmceid(int n)
+{
+ switch (n) {
+ case 0:
+ return read_sysreg_s(CLUSTERPMCEID0_EL1);
+ case 1:
+ return read_sysreg_s(CLUSTERPMCEID1_EL1);
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index b3da6c886835..4128bec033f6 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -4,6 +4,7 @@
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
+#include <asm/mmu.h>
#include <asm/sysreg.h>
#include <asm/assembler.h>
@@ -12,52 +13,63 @@
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
- mrs \tmp1, ttbr1_el1 // swapper_pg_dir
- add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
- msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ bic \tmp1, \tmp1, #TTBR_ASID_MASK
+ sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
+ msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm
- .macro __uaccess_ttbr0_enable, tmp1
+ .macro __uaccess_ttbr0_enable, tmp1, tmp2
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ mrs \tmp2, ttbr1_el1
+ extr \tmp2, \tmp2, \tmp1, #48
+ ror \tmp2, \tmp2, #16
+ msr ttbr1_el1, \tmp2 // set the active ASID
+ isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
- .macro uaccess_ttbr0_disable, tmp1
+ .macro uaccess_ttbr0_disable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_disable \tmp1
+ restore_irq \tmp2
alternative_else_nop_endif
.endm
- .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- __uaccess_ttbr0_enable \tmp1
- restore_irq \tmp2
+ save_and_disable_irq \tmp3 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1, \tmp2
+ restore_irq \tmp3
alternative_else_nop_endif
.endm
#else
- .macro uaccess_ttbr0_disable, tmp1
+ .macro uaccess_ttbr0_disable, tmp1, tmp2
.endm
- .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
- .macro uaccess_disable_not_uao, tmp1
- uaccess_ttbr0_disable \tmp1
+ .macro uaccess_disable_not_uao, tmp1, tmp2
+ uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
- .macro uaccess_enable_not_uao, tmp1, tmp2
- uaccess_ttbr0_enable \tmp1, \tmp2
+ .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
+ uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8b168280976f..3873dd7b5a32 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -26,7 +26,6 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
-#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
@@ -110,6 +109,13 @@
.endm
/*
+ * RAS Error Synchronization barrier
+ */
+ .macro esb
+ hint #16
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
@@ -255,7 +261,11 @@ lr .req x30 // link register
#else
adr_l \dst, \sym
#endif
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
add \dst, \dst, \tmp
.endm
@@ -266,7 +276,11 @@ lr .req x30 // link register
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
ldr \dst, [\dst, \tmp]
.endm
@@ -344,10 +358,26 @@ alternative_endif
* tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
*/
.macro tcr_set_idmap_t0sz, valreg, tmpreg
-#ifndef CONFIG_ARM64_VA_BITS_48
ldr_l \tmpreg, idmap_t0sz
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
-#endif
+ .endm
+
+/*
+ * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
+ * ID_AA64MMFR0_EL1.PARange value
+ *
+ * tcr: register with the TCR_ELx value to be updated
+ * pos: IPS or PS bitfield position
+ * tmp{0,1}: temporary registers
+ */
+ .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
+ mrs \tmp0, ID_AA64MMFR0_EL1
+ // Narrow PARange to fit the PS field in TCR_ELx
+ ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
+ mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
+ cmp \tmp0, \tmp1
+ csel \tmp0, \tmp1, \tmp0, hi
+ bfi \tcr, \tmp0, \pos, #3
.endm
/*
@@ -478,37 +508,18 @@ alternative_endif
.endm
/*
- * Errata workaround prior to TTBR0_EL1 update
+ * Arrange a physical address in a TTBR register, taking care of 52-bit
+ * addresses.
*
- * val: TTBR value with new BADDR, preserved
- * tmp0: temporary register, clobbered
- * tmp1: other temporary register, clobbered
+ * phys: physical address, preserved
+ * ttbr: returns the TTBR value
*/
- .macro pre_ttbr0_update_workaround, val, tmp0, tmp1
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
-alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
- mrs \tmp0, ttbr0_el1
- mov \tmp1, #FALKOR_RESERVED_ASID
- bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
- msr ttbr0_el1, \tmp0
- isb
- bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
- msr ttbr0_el1, \tmp0
- isb
-alternative_else_nop_endif
-#endif
- .endm
-
-/*
- * Errata workaround post TTBR0_EL1 update.
- */
- .macro post_ttbr0_update_workaround
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
- ic iallu
- dsb nsh
- isb
-alternative_else_nop_endif
+ .macro phys_to_ttbr, phys, ttbr
+#ifdef CONFIG_ARM64_PA_BITS_52
+ orr \ttbr, \phys, \phys, lsr #46
+ and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
+#else
+ mov \ttbr, \phys
#endif
.endm
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index a3c7f271ad4c..c00c62e1a4a3 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -150,70 +150,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- compat_uptr_t _addr; /* faulting insn/memory ref. */
- short _addr_lsb; /* LSB of the reported address */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- compat_uptr_t _call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 2ff7c5e8efab..bb263820de13 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -41,7 +41,11 @@
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
+#define ARM64_UNMAP_KERNEL_AT_EL0 23
+#define ARM64_HARDEN_BRANCH_PREDICTOR 24
+#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
+#define ARM64_HAS_RAS_EXTN 26
-#define ARM64_NCAPS 23
+#define ARM64_NCAPS 27
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index cbf08d7cbf30..be7bd19c87ec 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -79,28 +79,37 @@
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
+#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
#define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
+#define QCOM_CPU_PART_KRYO 0x200
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 0df756b24863..b7847eb8a7bb 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -50,40 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- dma_addr_t dev_addr = (dma_addr_t)paddr;
-
- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- phys_addr_t paddr = (phys_addr_t)dev_addr;
-
- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size)
-{
-}
-
-/* Override for dma_max_pfn() */
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask;
-
- return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT;
-}
-#define dma_max_pfn(dev) dma_max_pfn(dev)
-
#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index c4cd5081d78b..8389050328bb 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
if (mm != current->active_mm) {
/*
* Update the current thread's saved ttbr0 since it is
- * restored as part of a return from exception. Set
- * the hardware TTBR0_EL1 using cpu_switch_mm()
- * directly to enable potential errata workarounds.
+ * restored as part of a return from exception. Enable
+ * access to the valid TTBR0_EL1 and invoke the errata
+ * workaround directly since there is no return from
+ * exception when invoking the EFI run-time services.
*/
update_saved_ttbr0(current, mm);
- cpu_switch_mm(mm->pgd, mm);
+ uaccess_ttbr0_enable();
+ post_ttbr_update_workaround();
} else {
/*
* Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm
*/
- cpu_set_reserved_ttbr0();
+ uaccess_ttbr0_disable();
update_saved_ttbr0(current, current->active_mm);
}
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 014d7d8edcf9..803443d74926 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -86,6 +86,18 @@
#define ESR_ELx_WNR_SHIFT (6)
#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
+/* Asynchronous Error Type */
+#define ESR_ELx_IDS_SHIFT (24)
+#define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT)
+#define ESR_ELx_AET_SHIFT (10)
+#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
+
+#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
+
/* Shared ISS field definitions for Data/Instruction aborts */
#define ESR_ELx_SET_SHIFT (11)
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
@@ -100,6 +112,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
@@ -127,6 +140,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+#define DISR_EL1_IDS (UL(1) << 24)
+/*
+ * DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
+ * different things in the future...
+ */
+#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
+
/* ESR value templates for specific events */
/* BRK instruction trap from AArch64 state */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0c2eec490abf..bc30429d8e91 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,6 +18,8 @@
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
+#include <asm/esr.h>
+
#include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
@@ -27,4 +29,16 @@
#define __exception_irq_entry __exception
#endif
+static inline u32 disr_to_esr(u64 disr)
+{
+ unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
+
+ if ((disr & DISR_EL1_IDS) == 0)
+ esr |= (disr & DISR_EL1_ESR_MASK);
+ else
+ esr |= (disr & ESR_ELx_ISS_MASK);
+
+ return esr;
+}
+
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 4052ec39e8db..ec1e6d6fa14c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -58,6 +58,11 @@ enum fixed_addresses {
FIX_APEI_GHES_NMI,
#endif /* CONFIG_ACPI_APEI_GHES */
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ FIX_ENTRY_TRAMP_DATA,
+ FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 74f34392a531..8857a0f0d0f7 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -71,7 +71,7 @@ extern void fpsimd_flush_thread(void);
extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
-extern void fpsimd_update_current_state(struct fpsimd_state *state);
+extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void sve_flush_cpu_state(void);
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7803343e5881..82386e860dd2 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -52,7 +52,52 @@
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
#endif
-#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+
+/*
+ * If KASLR is enabled, then an offset K is added to the kernel address
+ * space. The bottom 21 bits of this offset are zero to guarantee 2MB
+ * alignment for PA and VA.
+ *
+ * For each pagetable level of the swapper, we know that the shift will
+ * be larger than 21 (for the 4KB granule case we use section maps thus
+ * the smallest shift is actually 30) thus there is the possibility that
+ * KASLR can increase the number of pagetable entries by 1, so we make
+ * room for this extra entry.
+ *
+ * Note KASLR cannot increase the number of required entries for a level
+ * by more than one because it increments both the virtual start and end
+ * addresses equally (the extra entry comes from the case where the end
+ * address is just pushed over a boundary and the start address isn't).
+ */
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#define EARLY_KASLR (1)
+#else
+#define EARLY_KASLR (0)
+#endif
+
+#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
+ - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
+
+#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
+
+#if SWAPPER_PGTABLE_LEVELS > 3
+#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
+#else
+#define EARLY_PUDS(vstart, vend) (0)
+#endif
+
+#if SWAPPER_PGTABLE_LEVELS > 2
+#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
+#else
+#define EARLY_PMDS(vstart, vend) (0)
+#endif
+
+#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
+ + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
+#define SWAPPER_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
@@ -78,8 +123,16 @@
/*
* Initial memory map attributes.
*/
-#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
+#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+#else
+#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
+#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
+#endif
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 715d395ef45b..b0c84171e6a3 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_TEA (UL(1) << 37)
+#define HCR_TERR (UL(1) << 36)
#define HCR_E2H (UL(1) << 34)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ab4d0a926043..24961b732e65 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -68,6 +68,8 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void);
+extern void __qcom_hyp_sanitize_btac_predictors(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5f28dfa14cee..413dc82b1e89 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -50,6 +50,13 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (is_kernel_in_hyp_mode())
vcpu->arch.hcr_el2 |= HCR_E2H;
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
+ /* route synchronous external abort exceptions to EL2 */
+ vcpu->arch.hcr_el2 |= HCR_TEA;
+ /* trap error record accesses */
+ vcpu->arch.hcr_el2 |= HCR_TERR;
+ }
+
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
}
@@ -64,6 +71,11 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
vcpu->arch.hcr_el2 = hcr;
}
+static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
+{
+ vcpu->arch.vsesr_el2 = vsesr;
+}
+
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
@@ -171,6 +183,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
+static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.disr_el1;
+}
+
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ea6cb5b24258..4485ae8e98de 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
@@ -89,6 +90,7 @@ struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
+ u64 disr_el1; /* Deferred [SError] Status Register */
};
/*
@@ -120,6 +122,7 @@ enum vcpu_sysreg {
PAR_EL1, /* Physical Address Register */
MDSCR_EL1, /* Monitor Debug System Control Register */
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+ DISR_EL1, /* Deferred Interrupt Status Register */
/* Performance Monitors Registers */
PMCR_EL0, /* Control Register */
@@ -192,6 +195,8 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
u32 copro[NR_COPRO_REGS];
};
+
+ struct kvm_vcpu *__hyp_running_vcpu;
};
typedef struct kvm_cpu_context kvm_cpu_context_t;
@@ -277,6 +282,9 @@ struct kvm_vcpu_arch {
/* Detect first run of a vcpu */
bool has_run_once;
+
+ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
+ u64 vsesr_el2;
};
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
@@ -340,6 +348,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
int kvm_perf_init(void);
int kvm_perf_teardown(void);
@@ -396,4 +406,13 @@ static inline void kvm_fpsimd_flush_cpu_state(void)
sve_flush_cpu_state();
}
+static inline void kvm_arm_vhe_guest_enter(void)
+{
+ local_daif_mask();
+}
+
+static inline void kvm_arm_vhe_guest_exit(void)
+{
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+}
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 672c8684d5c2..72e279dbae5f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -273,15 +273,26 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
- return __cpu_uses_extended_idmap();
+ return __cpu_uses_extended_idmap_level();
}
+static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
+{
+ return idmap_ptrs_per_pgd;
+}
+
+/*
+ * Can't use pgd_populate here, because the extended idmap adds an extra level
+ * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
+ * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
+ */
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start)
{
int idmap_idx;
+ u64 pgd_addr;
/*
* Use the first entry to access the HYP mappings. It is
@@ -289,7 +300,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
* extended idmap.
*/
VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
- merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
+ pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
+ merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
/*
* Create another extended level entry that points to the boot HYP map,
@@ -299,7 +311,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
*/
idmap_idx = hyp_idmap_start >> VA_BITS;
VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
- merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
+ pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
+ merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
}
static inline unsigned int kvm_get_vmid_bits(void)
@@ -309,5 +322,45 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+ struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+ void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+ if (data->fn) {
+ vect = __bp_harden_hyp_vecs_start +
+ data->hyp_vectors_slot * SZ_2K;
+
+ if (!has_vhe())
+ vect = lm_alias(vect);
+ }
+
+ return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+ kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+ PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+#endif
+
+#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 0d34bf0a89c7..a050d4f3615d 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,6 +17,11 @@
#define __ASM_MMU_H
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
+#define USER_ASID_BIT 48
+#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
+#define TTBR_ASID_MASK (UL(0xffff) << 48)
+
+#ifndef __ASSEMBLY__
typedef struct {
atomic64_t id;
@@ -31,6 +36,49 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
@@ -41,4 +89,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void);
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 9d155fa9a507..8d3331985d2e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -19,8 +19,6 @@
#ifndef __ASM_MMU_CONTEXT_H
#define __ASM_MMU_CONTEXT_H
-#define FALKOR_RESERVED_ASID 1
-
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
@@ -51,23 +49,39 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = __pa_symbol(empty_zero_page);
+ unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
write_sysreg(ttbr, ttbr0_el1);
isb();
}
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+ BUG_ON(pgd == swapper_pg_dir);
+ cpu_set_reserved_ttbr0();
+ cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
/*
* TCR.T0SZ value to use when the ID map is active. Usually equals
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
* physical memory, in which case it will be smaller.
*/
extern u64 idmap_t0sz;
+extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
- return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
- unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
+ return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
+}
+
+/*
+ * True if the extended ID map requires an extra level of translation table
+ * to be configured.
+ */
+static inline bool __cpu_uses_extended_idmap_level(void)
+{
+ return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
}
/*
@@ -170,7 +184,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
- task_thread_info(tsk)->ttbr0 = ttbr;
+ WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -225,6 +239,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev,next) switch_mm(prev, next, current)
void verify_cpu_asid_bits(void);
+void post_ttbr_update_workaround(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 3bd498e4de4c..43393208229e 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,11 +16,15 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/alternative.h>
#include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off)
{
- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+ "msr tpidr_el2, %0",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ :: "r" (off) : "memory");
}
static inline unsigned long __my_cpu_offset(void)
@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) :
+ asm(ALTERNATIVE("mrs %0, tpidr_el1",
+ "mrs %0, tpidr_el2",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 5ca6a573a701..e9d9f1b006ef 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
{
- set_pud(pud, __pud(pmd | prot));
+ set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -73,7 +73,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
{
- set_pgd(pgdp, __pgd(pud | prot));
+ set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
@@ -129,7 +129,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
pmdval_t prot)
{
- set_pmd(pmdp, __pmd(pte | prot));
+ set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
}
/*
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index eb0c2bd90de9..f42836da8723 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_HWDEF_H
#define __ASM_PGTABLE_HWDEF_H
+#include <asm/memory.h>
+
/*
* Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
@@ -116,9 +118,9 @@
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
-#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
-#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
-#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
+#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
+#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
+#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
/*
* Level 2 descriptor (PMD).
@@ -166,6 +168,14 @@
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
#define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */
+#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
+#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
+#else
+#define PTE_ADDR_MASK PTE_ADDR_LOW
+#endif
+
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
*/
@@ -196,7 +206,7 @@
/*
* Highest possible physical address supported.
*/
-#define PHYS_MASK_SHIFT (48)
+#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
/*
@@ -272,9 +282,23 @@
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
+#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+/*
+ * TTBR.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+/*
+ * This should be GENMASK_ULL(47, 2).
+ * TTBR_ELx[1] is RES0 in this configuration.
+ */
+#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 0a5635fb0ef9..22a926825e3f 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -34,8 +34,16 @@
#include <asm/pgtable-types.h>
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
+#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
+#else
+#define PROT_DEFAULT _PROT_DEFAULT
+#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -48,6 +56,7 @@
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
@@ -55,15 +64,15 @@
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
-#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
-#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
-#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bdcc7f1c9d06..094374c82db0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -59,9 +59,22 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
-#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
+/*
+ * Macros to convert between a physical address and its placement in a
+ * page table entry, taking care of 52-bit addresses.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define __pte_to_phys(pte) \
+ ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
+#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
+#else
+#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
+#define __phys_to_pte_val(phys) (phys)
+#endif
-#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) \
+ __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
@@ -292,6 +305,11 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+ return __pte(pgd_val(pgd));
+}
+
static inline pte_t pud_pte(pud_t pud)
{
return __pte(pud_val(pud));
@@ -357,15 +375,24 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
-#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
-#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
+#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
+#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+
+#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
+#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
+#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
+#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
+#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
+
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
@@ -416,7 +443,7 @@ static inline void pmd_clear(pmd_t *pmdp)
static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
{
- return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pmd_to_phys(pmd);
}
/* Find an entry in the third-level page table. */
@@ -434,7 +461,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
@@ -467,7 +494,7 @@ static inline void pud_clear(pud_t *pudp)
static inline phys_addr_t pud_page_paddr(pud_t pud)
{
- return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pud_to_phys(pud);
}
/* Find an entry in the second-level page table. */
@@ -480,7 +507,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
-#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
+#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
/* use ONLY for statically allocated translation tables */
#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
@@ -519,7 +546,7 @@ static inline void pgd_clear(pgd_t *pgdp)
static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
{
- return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pgd_to_phys(pgd);
}
/* Find an entry in the frst-level page table. */
@@ -532,7 +559,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
-#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
/* use ONLY for statically allocated translation tables */
#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
@@ -679,10 +706,19 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
{
ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
+
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
+}
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t swapper_pg_end[];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
@@ -736,6 +772,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
#define kc_offset_to_vaddr(o) ((o) | VA_START)
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+#else
+#define phys_to_ttbr(addr) (addr)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 14ad6e4e87d1..16cef2e8449e 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
-#define cpu_switch_mm(pgd,mm) \
-do { \
- BUG_ON(pgd == swapper_pg_dir); \
- cpu_do_switch_mm(virt_to_phys(pgd),mm); \
-} while (0)
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 023cacb946c3..cee4ae25a5d1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -216,6 +216,7 @@ static inline void spin_lock_prefetch(const void *ptr)
int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+int cpu_clear_disr(void *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
new file mode 100644
index 000000000000..e073e6886685
--- /dev/null
+++ b/arch/arm64/include/asm/sdei.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __ASM_SDEI_H
+#define __ASM_SDEI_H
+
+/* Values for sdei_exit_mode */
+#define SDEI_EXIT_HVC 0
+#define SDEI_EXIT_SMC 1
+
+#define SDEI_STACK_SIZE IRQ_STACK_SIZE
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/virt.h>
+
+extern unsigned long sdei_exit_mode;
+
+/* Software Delegated Exception entry point from firmware*/
+asmlinkage void __sdei_asm_handler(unsigned long event_num, unsigned long arg,
+ unsigned long pc, unsigned long pstate);
+
+/* and its CONFIG_UNMAP_KERNEL_AT_EL0 trampoline */
+asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num,
+ unsigned long arg,
+ unsigned long pc,
+ unsigned long pstate);
+
+/*
+ * The above entry point does the minimum to call C code. This function does
+ * anything else, before calling the driver.
+ */
+struct sdei_registered_event;
+asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+unsigned long sdei_arch_get_entry_point(int conduit);
+#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
+
+bool _on_sdei_stack(unsigned long sp);
+static inline bool on_sdei_stack(unsigned long sp)
+{
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+ return false;
+ if (in_nmi())
+ return _on_sdei_stack(sp);
+
+ return false;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SDEI_H */
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 941267caa39c..caab039d6305 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -28,5 +28,6 @@ extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
+extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 74a9d301819f..b299929fe56c 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -17,7 +17,7 @@
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS 48
+#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
#define SECTION_SIZE_BITS 30
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 6ad30776e984..472ef944e932 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -22,6 +22,7 @@
#include <asm/memory.h>
#include <asm/ptrace.h>
+#include <asm/sdei.h>
struct stackframe {
unsigned long fp;
@@ -85,6 +86,8 @@ static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp
return true;
if (on_overflow_stack(sp))
return true;
+ if (on_sdei_stack(sp))
+ return true;
return false;
}
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 08cc88574659..0e1960c59197 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,7 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <asm/compiler.h>
#include <linux/stringify.h>
/*
@@ -175,6 +176,16 @@
#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+
+#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
+#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
+#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
+#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
+#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
+#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
+#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
@@ -278,6 +289,7 @@
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
@@ -353,8 +365,10 @@
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
@@ -398,27 +412,85 @@
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_ELx_IESB (1 << 21)
+#define SCTLR_ELx_WXN (1 << 19)
#define SCTLR_ELx_I (1 << 12)
#define SCTLR_ELx_SA (1 << 3)
#define SCTLR_ELx_C (1 << 2)
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1
+#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
+
+/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
(1 << 29))
+#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
+ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
+ (1 << 27) | (1 << 30) | (1 << 31))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL2 SCTLR_ELx_EE
+#define ENDIAN_CLEAR_EL2 0
+#else
+#define ENDIAN_SET_EL2 0
+#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
+#endif
+
+/* SCTLR_EL2 value used for the hyp-stub */
+#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
+#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
+ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+
+/* Check all the bits are accounted for */
+#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
-#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
- SCTLR_ELx_SA | SCTLR_ELx_I)
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_EL1_E0E (1 << 24)
#define SCTLR_EL1_SPAN (1 << 23)
+#define SCTLR_EL1_NTWE (1 << 18)
+#define SCTLR_EL1_NTWI (1 << 16)
#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_EL1_UMA (1 << 9)
#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+
+#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
+ (1 << 29))
+#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
+ (1 << 27) | (1 << 30) | (1 << 31))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#define ENDIAN_CLEAR_EL1 0
+#else
+#define ENDIAN_SET_EL1 0
+#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#endif
+
+#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
+ SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
+ SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
+ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
+ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
+#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
+ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
+ SCTLR_EL1_RES0)
+
+/* Check all the bits are accounted for */
+#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40
#define ID_AA64ISAR0_SM3_SHIFT 36
@@ -437,7 +509,10 @@
#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
@@ -447,6 +522,7 @@
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_SVE 0x1
+#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
@@ -471,6 +547,14 @@
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
+#define ID_AA64MMFR0_PARANGE_48 0x5
+#define ID_AA64MMFR0_PARANGE_52 0x6
+
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
+#else
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
+#endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_PAN_SHIFT 20
@@ -582,6 +666,7 @@
#else
+#include <linux/build_bug.h>
#include <linux/types.h>
asm(
@@ -638,6 +723,9 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
+ SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
+ SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
+
val = read_sysreg(sctlr_el1);
val &= ~clear;
val |= set;
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index eb431286bacd..740aa03c5f0d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -51,8 +51,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_stack (init_thread_union.stack)
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index af1c76981911..9e82dd79c7db 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cputype.h>
+#include <asm/mmu.h>
/*
* Raw TLBI operations.
@@ -54,6 +55,11 @@
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
+#define __tlbi_user(op, arg) do { \
+ if (arm64_kernel_unmapped_at_el0()) \
+ __tlbi(op, (arg) | USER_ASID_FLAG); \
+} while (0)
+
/*
* TLB Management
* ==============
@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
__tlbi(aside1is, asid);
+ __tlbi_user(aside1is, asid);
dsb(ish);
}
@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ishst);
__tlbi(vale1is, addr);
+ __tlbi_user(vale1is, addr);
dsb(ish);
}
@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (last_level)
+ if (last_level) {
__tlbi(vale1is, addr);
- else
+ __tlbi_user(vale1is, addr);
+ } else {
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
+ }
}
dsb(ish);
}
@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
dsb(ish);
}
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 1696f9de9359..178e338d2889 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -19,6 +19,7 @@
#define __ASM_TRAP_H
#include <linux/list.h>
+#include <asm/esr.h>
#include <asm/sections.h>
struct pt_regs;
@@ -66,4 +67,57 @@ static inline int in_entry_text(unsigned long ptr)
return ptr >= (unsigned long)&__entry_text_start &&
ptr < (unsigned long)&__entry_text_end;
}
+
+/*
+ * CPUs with the RAS extensions have an Implementation-Defined-Syndrome bit
+ * to indicate whether this ESR has a RAS encoding. CPUs without this feature
+ * have a ISS-Valid bit in the same position.
+ * If this bit is set, we know its not a RAS SError.
+ * If its clear, we need to know if the CPU supports RAS. Uncategorized RAS
+ * errors share the same encoding as an all-zeros encoding from a CPU that
+ * doesn't support RAS.
+ */
+static inline bool arm64_is_ras_serror(u32 esr)
+{
+ WARN_ON(preemptible());
+
+ if (esr & ESR_ELx_IDS)
+ return false;
+
+ if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN))
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Return the AET bits from a RAS SError's ESR.
+ *
+ * It is implementation defined whether Uncategorized errors are containable.
+ * We treat them as Uncontainable.
+ * Non-RAS SError's are reported as Uncontained/Uncategorized.
+ */
+static inline u32 arm64_ras_serror_get_severity(u32 esr)
+{
+ u32 aet = esr & ESR_ELx_AET;
+
+ if (!arm64_is_ras_serror(esr)) {
+ /* Not a RAS error, we can't interpret the ESR. */
+ return ESR_ELx_AET_UC;
+ }
+
+ /*
+ * AET is RES0 if 'the value returned in the DFSC field is not
+ * [ESR_ELx_FSC_SERROR]'
+ */
+ if ((esr & ESR_ELx_FSC) != ESR_ELx_FSC_SERROR) {
+ /* No severity information : Uncategorized */
+ return ESR_ELx_AET_UC;
+ }
+
+ return aet;
+}
+
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr);
+void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr);
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index fc0f9eb66039..59fda5292936 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -105,17 +105,23 @@ static inline void set_fs(mm_segment_t fs)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void __uaccess_ttbr0_disable(void)
{
- unsigned long ttbr;
+ unsigned long flags, ttbr;
- /* reserved_ttbr0 placed at the end of swapper_pg_dir */
- ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
- write_sysreg(ttbr, ttbr0_el1);
+ local_irq_save(flags);
+ ttbr = read_sysreg(ttbr1_el1);
+ ttbr &= ~TTBR_ASID_MASK;
+ /* reserved_ttbr0 placed before swapper_pg_dir */
+ write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
+ isb();
+ /* Set reserved ASID */
+ write_sysreg(ttbr, ttbr1_el1);
isb();
+ local_irq_restore(flags);
}
static inline void __uaccess_ttbr0_enable(void)
{
- unsigned long flags;
+ unsigned long flags, ttbr0, ttbr1;
/*
* Disable interrupts to avoid preemption between reading the 'ttbr0'
@@ -123,7 +129,17 @@ static inline void __uaccess_ttbr0_enable(void)
* roll-over and an update of 'ttbr0'.
*/
local_irq_save(flags);
- write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+ ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
+
+ /* Restore active ASID */
+ ttbr1 = read_sysreg(ttbr1_el1);
+ ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
+ ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+ write_sysreg(ttbr1, ttbr1_el1);
+ isb();
+
+ /* Restore user page table */
+ write_sysreg(ttbr0, ttbr0_el1);
isb();
local_irq_restore(flags);
}
@@ -155,6 +171,18 @@ static inline bool uaccess_ttbr0_enable(void)
}
#endif
+static inline void __uaccess_disable_hw_pan(void)
+{
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+}
+
+static inline void __uaccess_enable_hw_pan(void)
+{
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+}
+
#define __uaccess_disable(alt) \
do { \
if (!uaccess_ttbr0_disable()) \
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
new file mode 100644
index 000000000000..0b5ec6e08c10
--- /dev/null
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __ASM_VMAP_STACK_H
+#define __ASM_VMAP_STACK_H
+
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/vmalloc.h>
+#include <asm/memory.h>
+#include <asm/pgtable.h>
+#include <asm/thread_info.h>
+
+/*
+ * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
+ * stacks need to have the same alignment.
+ */
+static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
+{
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
+
+ return __vmalloc_node_range(stack_size, THREAD_ALIGN,
+ VMALLOC_START, VMALLOC_END,
+ THREADINFO_GFP, PAGE_KERNEL, 0, node,
+ __builtin_return_address(0));
+}
+
+#endif /* __ASM_VMAP_STACK_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index cda76fa8b9b2..f018c3deea3b 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -43,5 +43,6 @@
#define HWCAP_ASIMDDP (1 << 20)
#define HWCAP_SHA512 (1 << 21)
#define HWCAP_SVE (1 << 22)
+#define HWCAP_ASIMDFHM (1 << 23)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h
index 574d12f86039..9b4d91277742 100644
--- a/arch/arm64/include/uapi/asm/siginfo.h
+++ b/arch/arm64/include/uapi/asm/siginfo.h
@@ -21,4 +21,25 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGBUS si_codes
+ */
+#ifdef __KERNEL__
+#define BUS_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGTRAP si_codes
+ */
+#ifdef __KERNEL__
+#define TRAP_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 067baace74a0..b87541360f43 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,11 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
+
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+endif
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index b3162715ed78..252396a96c78 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -117,7 +117,7 @@ bool __init acpi_psci_present(void)
}
/* Whether HVC must be used instead of SMC as the PSCI conduit */
-bool __init acpi_psci_use_hvc(void)
+bool acpi_psci_use_hvc(void)
{
return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
}
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 6dd0a3a3e5c9..414288a558c8 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -32,6 +32,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+int alternatives_applied;
+
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
@@ -143,7 +145,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- static int patched = 0;
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
@@ -151,14 +152,14 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(patched))
+ while (!READ_ONCE(alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(patched);
+ BUG_ON(alternatives_applied);
__apply_alternatives(&region, true);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(patched, 1);
+ WRITE_ONCE(alternatives_applied, 1);
}
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 71bf088f1e4b..1303e04110cd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -18,12 +18,14 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm_sdei.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kvm_host.h>
#include <linux/suspend.h>
#include <asm/cpufeature.h>
+#include <asm/fixmap.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/smp_plat.h>
@@ -130,6 +132,7 @@ int main(void)
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
@@ -148,11 +151,18 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
-
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
+ BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
+#endif
+#ifdef CONFIG_ARM_SDE_INTERFACE
+ DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
+ DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 000000000000..76225c2611ea
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,87 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+ .rept 31
+ nop
+ .endr
+ b \target
+.endm
+
+.macro vectors target
+ ventry \target + 0x000
+ ventry \target + 0x080
+ ventry \target + 0x100
+ ventry \target + 0x180
+
+ ventry \target + 0x200
+ ventry \target + 0x280
+ ventry \target + 0x300
+ ventry \target + 0x380
+
+ ventry \target + 0x400
+ ventry \target + 0x480
+ ventry \target + 0x500
+ ventry \target + 0x580
+
+ ventry \target + 0x600
+ ventry \target + 0x680
+ ventry \target + 0x700
+ ventry \target + 0x780
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept 4
+ vectors __kvm_hyp_vector
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+ sub sp, sp, #(8 * 18)
+ stp x16, x17, [sp, #(16 * 0)]
+ stp x14, x15, [sp, #(16 * 1)]
+ stp x12, x13, [sp, #(16 * 2)]
+ stp x10, x11, [sp, #(16 * 3)]
+ stp x8, x9, [sp, #(16 * 4)]
+ stp x6, x7, [sp, #(16 * 5)]
+ stp x4, x5, [sp, #(16 * 6)]
+ stp x2, x3, [sp, #(16 * 7)]
+ stp x0, x1, [sp, #(16 * 8)]
+ mov x0, #0x84000000
+ smc #0
+ ldp x16, x17, [sp, #(16 * 0)]
+ ldp x14, x15, [sp, #(16 * 1)]
+ ldp x12, x13, [sp, #(16 * 2)]
+ ldp x10, x11, [sp, #(16 * 3)]
+ ldp x8, x9, [sp, #(16 * 4)]
+ ldp x6, x7, [sp, #(16 * 5)]
+ ldp x4, x5, [sp, #(16 * 6)]
+ ldp x2, x3, [sp, #(16 * 7)]
+ ldp x0, x1, [sp, #(16 * 8)]
+ add sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
+
+ENTRY(__qcom_hyp_sanitize_link_stack_start)
+ stp x29, x30, [sp, #-16]!
+ .rept 16
+ bl . + 4
+ .endr
+ ldp x29, x30, [sp], #16
+ENTRY(__qcom_hyp_sanitize_link_stack_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0e27f86ee709..ed6881882231 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
entry->midr_range_max);
}
+static bool __maybe_unused
+is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u32 model;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ model = read_cpuid_id();
+ model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+ MIDR_ARCHITECTURE_MASK;
+
+ return model == entry->midr_model;
+}
+
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
int scope)
@@ -46,6 +60,127 @@ static int cpu_enable_trap_ctr_access(void *__unused)
return 0;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+extern char __qcom_hyp_sanitize_link_stack_start[];
+extern char __qcom_hyp_sanitize_link_stack_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ static int last_slot = -1;
+ static DEFINE_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+
+ spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ last_slot++;
+ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+ / SZ_2K) <= last_slot);
+ slot = last_slot;
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start NULL
+#define __psci_hyp_bp_inval_end NULL
+#define __qcom_hyp_sanitize_link_stack_start NULL
+#define __qcom_hyp_sanitize_link_stack_end NULL
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+ bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ u64 pfr0;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return;
+
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ if (psci_ops.get_version)
+ install_bp_hardening_cb(entry,
+ (bp_hardening_cb_t)psci_ops.get_version,
+ __psci_hyp_bp_inval_start,
+ __psci_hyp_bp_inval_end);
+
+ return 0;
+}
+
+static void qcom_link_stack_sanitization(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static int qcom_enable_link_stack_sanitization(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
+ __qcom_hyp_sanitize_link_stack_start,
+ __qcom_hyp_sanitize_link_stack_end);
+
+ return 0;
+}
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
@@ -169,6 +304,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(0, 0)),
},
+ {
+ .desc = "Qualcomm Technologies Kryo erratum 1003",
+ .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+ .def_scope = SCOPE_LOCAL_CPU,
+ .midr_model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
#endif
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
{
@@ -187,6 +329,47 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
},
#endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+ .enable = qcom_enable_link_stack_sanitization,
+ },
+ {
+ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ .enable = enable_psci_bp_hardening,
+ },
+#endif
{
}
};
@@ -200,15 +383,18 @@ void verify_local_cpu_errata_workarounds(void)
{
const struct arm64_cpu_capabilities *caps = arm64_errata;
- for (; caps->matches; caps++)
- if (!cpus_have_cap(caps->capability) &&
- caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ for (; caps->matches; caps++) {
+ if (cpus_have_cap(caps->capability)) {
+ if (caps->enable)
+ caps->enable((void *)caps);
+ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
smp_processor_id(),
caps->desc ? : "an erratum");
cpu_die_early();
}
+ }
}
void update_cpu_errata_workarounds(void)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a73a5928f09b..0fb6a3151443 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -123,6 +123,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
@@ -145,8 +146,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
@@ -846,6 +850,67 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
ID_AA64PFR0_FP_SHIFT) < 0;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ /* Forced on command line? */
+ if (__kpti_forced) {
+ pr_info_once("kernel page table isolation forced %s by command line option\n",
+ __kpti_forced > 0 ? "ON" : "OFF");
+ return __kpti_forced > 0;
+ }
+
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return true;
+
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+ case MIDR_CAVIUM_THUNDERX2:
+ case MIDR_BRCM_VULCAN:
+ return false;
+ }
+
+ /* Defer to CPU feature registers */
+ return !cpuid_feature_extract_unsigned_field(pfr0,
+ ID_AA64PFR0_CSV3_SHIFT);
+}
+
+static int __init parse_kpti(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+static int cpu_copy_el2regs(void *__unused)
+{
+ /*
+ * Copy register values that aren't redirected by hardware.
+ *
+ * Before code patching, we only set tpidr_el1, all CPUs need to copy
+ * this value to tpidr_el2 before we patch the code. Once we've done
+ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+ * do anything here.
+ */
+ if (!alternatives_applied)
+ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+ return 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -915,6 +980,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
+ .enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",
@@ -932,6 +998,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ {
+ .desc = "Kernel page table isolation (KPTI)",
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = unmap_kernel_at_el0,
+ },
+#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -963,6 +1037,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.enable = sve_kernel_enable,
},
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_RAS_EXTN
+ {
+ .desc = "RAS Extension Support",
+ .capability = ARM64_HAS_RAS_EXTN,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_RAS_SHIFT,
+ .min_field_value = ID_AA64PFR0_RAS_V1,
+ .enable = cpu_clear_disr,
+ },
+#endif /* CONFIG_ARM64_RAS_EXTN */
{},
};
@@ -992,6 +1079,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
@@ -1071,6 +1159,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+ unsigned int cap)
+{
+ const struct arm64_cpu_capabilities *caps;
+
+ if (WARN_ON(preemptible()))
+ return false;
+
+ for (caps = cap_array; caps->matches; caps++)
+ if (caps->capability == cap &&
+ caps->matches(caps, SCOPE_LOCAL_CPU))
+ return true;
+ return false;
+}
+
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info)
{
@@ -1106,7 +1213,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps->enable, NULL, cpu_online_mask);
+ stop_machine(caps->enable, (void *)caps, cpu_online_mask);
}
}
}
@@ -1134,8 +1241,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
static void
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
{
+ const struct arm64_cpu_capabilities *caps = caps_list;
for (; caps->matches; caps++) {
if (!cpus_have_cap(caps->capability))
continue;
@@ -1143,13 +1251,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
* If the new CPU misses an advertised feature, we cannot proceed
* further, park the cpu.
*/
- if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ if (!__this_cpu_has_cap(caps_list, caps->capability)) {
pr_crit("CPU%d: missing feature: %s\n",
smp_processor_id(), caps->desc);
cpu_die_early();
}
if (caps->enable)
- caps->enable(NULL);
+ caps->enable((void *)caps);
}
}
@@ -1189,6 +1297,9 @@ static void verify_local_cpu_capabilities(void)
if (system_supports_sve())
verify_sve_features();
+
+ if (system_uses_ttbr0_pan())
+ pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
}
void check_local_cpu_capabilities(void)
@@ -1225,25 +1336,6 @@ static void __init mark_const_caps_ready(void)
static_branch_enable(&arm64_const_caps_ready);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
- unsigned int cap)
-{
- const struct arm64_cpu_capabilities *caps;
-
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = cap_array; caps->desc; caps++)
- if (caps->capability == cap && caps->matches)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
-}
-
extern const struct arm64_cpu_capabilities arm64_errata[];
bool this_cpu_has_cap(unsigned int cap)
@@ -1387,3 +1479,11 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
+
+int cpu_clear_disr(void *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index fd691087dc9a..f2d13810daa8 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -47,6 +47,8 @@ int arm_cpuidle_suspend(int index)
#include <acpi/processor.h>
+#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return arm_cpuidle_init(cpu);
@@ -54,6 +56,10 @@ int acpi_processor_ffh_lpi_probe(unsigned int cpu)
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
- return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+ if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
+ lpi->index);
+ else
+ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
}
#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 1e2554543506..7f94623df8a5 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -76,6 +76,7 @@ static const char *const hwcap_str[] = {
"asimddp",
"sha512",
"sve",
+ "asimdfhm",
NULL
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index a88b6ccebbb4..53781f5687c5 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -209,12 +209,13 @@ NOKPROBE_SYMBOL(call_step_hook);
static void send_user_sigtrap(int si_code)
{
struct pt_regs *regs = current_pt_regs();
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = si_code,
- .si_addr = (void __user *)instruction_pointer(regs),
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)instruction_pointer(regs);
if (WARN_ON(!user_mode(regs)))
return;
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 82cd07592519..f85ac58d08a3 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -48,7 +48,9 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
return pgprot_val(PAGE_KERNEL_ROX);
/* RW- */
- if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
+ if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
+ EFI_MEMORY_XP) ||
+ type != EFI_RUNTIME_SERVICES_CODE)
return pgprot_val(PAGE_KERNEL);
/* RWX */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 6d14b8f29b5f..b34e717d7597 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,8 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -69,8 +71,21 @@
#define BAD_FIQ 2
#define BAD_ERROR 3
- .macro kernel_ventry label
+ .macro kernel_ventry, el, label, regsize = 64
.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+ .endif
+alternative_else_nop_endif
+#endif
+
sub sp, sp, #S_FRAME_SIZE
#ifdef CONFIG_VMAP_STACK
/*
@@ -82,7 +97,7 @@
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b \label
+ b el\()\el\()_\label
0:
/*
@@ -114,7 +129,12 @@
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b \label
+ b el\()\el\()_\label
+ .endm
+
+ .macro tramp_alias, dst, sym
+ mov_q \dst, TRAMP_VALIAS
+ add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
.macro kernel_entry, el, regsize = 64
@@ -185,7 +205,7 @@ alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
- tst x21, #0xffff << 48 // Check for the reserved ASID
+ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
@@ -248,7 +268,7 @@ alternative_else_nop_endif
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
- __uaccess_ttbr0_enable x0
+ __uaccess_ttbr0_enable x0, x1
.if \el == 0
/*
@@ -257,7 +277,7 @@ alternative_else_nop_endif
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
* corruption).
*/
- post_ttbr0_update_workaround
+ bl post_ttbr_update_workaround
.endif
1:
.if \el != 0
@@ -269,18 +289,20 @@ alternative_else_nop_endif
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+ tst x22, #PSR_MODE32_BIT // native task?
+ b.eq 3f
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
- tbz x22, #4, 1f
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrs x29, contextidr_el1
msr contextidr_el1, x29
#else
msr contextidr_el1, xzr
#endif
-1:
alternative_else_nop_endif
#endif
+3:
.endif
msr elr_el1, x21 // set up the return data
@@ -302,7 +324,21 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
- eret // return to kernel
+
+ .if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x30
+ tramp_alias x30, tramp_exit_native
+ br x30
+4:
+ tramp_alias x30, tramp_exit_compat
+ br x30
+#endif
+ .else
+ eret
+ .endif
.endm
.macro irq_stack_entry
@@ -367,31 +403,31 @@ tsk .req x28 // current thread_info
.align 11
ENTRY(vectors)
- kernel_ventry el1_sync_invalid // Synchronous EL1t
- kernel_ventry el1_irq_invalid // IRQ EL1t
- kernel_ventry el1_fiq_invalid // FIQ EL1t
- kernel_ventry el1_error_invalid // Error EL1t
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
- kernel_ventry el1_sync // Synchronous EL1h
- kernel_ventry el1_irq // IRQ EL1h
- kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error // Error EL1h
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error // Error EL1h
- kernel_ventry el0_sync // Synchronous 64-bit EL0
- kernel_ventry el0_irq // IRQ 64-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error // Error 64-bit EL0
+ kernel_ventry 0, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, irq // IRQ 64-bit EL0
+ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
- kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
- kernel_ventry el0_irq_compat // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_compat // Error 32-bit EL0
+ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
#else
- kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
- kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid // Error 32-bit EL0
+ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
@@ -685,12 +721,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- enable_daif
+ enable_da_f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
@@ -943,6 +982,124 @@ __ni_sys_trace:
.popsection // .entry.text
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ bic \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ /* ASID already in \tmp[63:48] */
+ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+ movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+ /* 2MB boundary containing the vectors, so we nobble the walk cache */
+ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+ isb
+ tlbi vae1, \tmp
+ dsb nsh
+alternative_else_nop_endif
+#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
+ .endm
+
+ .macro tramp_unmap_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ orr \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ /*
+ * We avoid running the post_ttbr_update_workaround here because the
+ * user and kernel ASIDs don't have conflicting mappings, so any
+ * "blessing" as described in:
+ *
+ * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+ *
+ * will not hurt correctness. Whilst this may partially defeat the
+ * point of using split ASIDs in the first place, it avoids
+ * the hit of invalidating the entire I-cache on every return to
+ * userspace.
+ */
+ .endm
+
+ .macro tramp_ventry, regsize = 64
+ .align 7
+1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+ * enter the full-fat kernel vectors.
+ */
+ bl 2f
+ b .
+2:
+ tramp_map_kernel x30
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x30, tramp_vectors + PAGE_SIZE
+alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ ldr x30, [x30]
+#else
+ ldr x30, =vectors
+#endif
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+ ret
+ .endm
+
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
+ msr vbar_el1, x30
+ tramp_unmap_kernel x30
+ .if \regsize == 64
+ mrs x30, far_el1
+ .endif
+ eret
+ .endm
+
+ .align 11
+ENTRY(tramp_vectors)
+ .space 0x400
+
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+ tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+ tramp_exit 32
+END(tramp_exit_compat)
+
+ .ltorg
+ .popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+ .globl __entry_tramp_data_start
+__entry_tramp_data_start:
+ .quad vectors
+ .popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
/*
* Special system call wrappers.
*/
@@ -996,3 +1153,180 @@ ENTRY(ret_from_fork)
b ret_to_user
ENDPROC(ret_from_fork)
NOKPROBE(ret_from_fork)
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+
+#include <asm/sdei.h>
+#include <uapi/linux/arm_sdei.h>
+
+.macro sdei_handler_exit exit_mode
+ /* On success, this call never returns... */
+ cmp \exit_mode, #SDEI_EXIT_SMC
+ b.ne 99f
+ smc #0
+ b .
+99: hvc #0
+ b .
+.endm
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * The regular SDEI entry point may have been unmapped along with the rest of
+ * the kernel. This trampoline restores the kernel mapping to make the x1 memory
+ * argument accessible.
+ *
+ * This clobbers x4, __sdei_handler() will restore this from firmware's
+ * copy.
+ */
+.ltorg
+.pushsection ".entry.tramp.text", "ax"
+ENTRY(__sdei_asm_entry_trampoline)
+ mrs x4, ttbr1_el1
+ tbz x4, #USER_ASID_BIT, 1f
+
+ tramp_map_kernel tmp=x4
+ isb
+ mov x4, xzr
+
+ /*
+ * Use reg->interrupted_regs.addr_limit to remember whether to unmap
+ * the kernel on exit.
+ */
+1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x4, tramp_vectors + PAGE_SIZE
+ add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+ ldr x4, [x4]
+#else
+ ldr x4, =__sdei_asm_handler
+#endif
+ br x4
+ENDPROC(__sdei_asm_entry_trampoline)
+NOKPROBE(__sdei_asm_entry_trampoline)
+
+/*
+ * Make the exit call and restore the original ttbr1_el1
+ *
+ * x0 & x1: setup for the exit API call
+ * x2: exit_mode
+ * x4: struct sdei_registered_event argument from registration time.
+ */
+ENTRY(__sdei_asm_exit_trampoline)
+ ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ cbnz x4, 1f
+
+ tramp_unmap_kernel tmp=x4
+
+1: sdei_handler_exit exit_mode=x2
+ENDPROC(__sdei_asm_exit_trampoline)
+NOKPROBE(__sdei_asm_exit_trampoline)
+ .ltorg
+.popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+.pushsection ".rodata", "a"
+__sdei_asm_trampoline_next_handler:
+ .quad __sdei_asm_handler
+.popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+/*
+ * Software Delegated Exception entry point.
+ *
+ * x0: Event number
+ * x1: struct sdei_registered_event argument from registration time.
+ * x2: interrupted PC
+ * x3: interrupted PSTATE
+ * x4: maybe clobbered by the trampoline
+ *
+ * Firmware has preserved x0->x17 for us, we must save/restore the rest to
+ * follow SMC-CC. We save (or retrieve) all the registers as the handler may
+ * want them.
+ */
+ENTRY(__sdei_asm_handler)
+ stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
+ stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
+ stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
+ stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
+ stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
+ stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
+ stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
+ stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
+ stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
+ stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
+ stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
+ stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
+ stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
+ stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
+ mov x4, sp
+ stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
+
+ mov x19, x1
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * entry.S may have been using sp as a scratch register, find whether
+ * this is a normal or critical event and switch to the appropriate
+ * stack for this CPU.
+ */
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
+ cbnz w4, 1f
+ ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
+ b 2f
+1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
+2: mov x6, #SDEI_STACK_SIZE
+ add x5, x5, x6
+ mov sp, x5
+#endif
+
+ /*
+ * We may have interrupted userspace, or a guest, or exit-from or
+ * return-to either of these. We can't trust sp_el0, restore it.
+ */
+ mrs x28, sp_el0
+ ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
+ msr sp_el0, x0
+
+ /* If we interrupted the kernel point to the previous stack/frame. */
+ and x0, x3, #0xc
+ mrs x1, CurrentEL
+ cmp x0, x1
+ csel x29, x29, xzr, eq // fp, or zero
+ csel x4, x2, xzr, eq // elr, or zero
+
+ stp x29, x4, [sp, #-16]!
+ mov x29, sp
+
+ add x0, x19, #SDEI_EVENT_INTREGS
+ mov x1, x19
+ bl __sdei_handler
+
+ msr sp_el0, x28
+ /* restore regs >x17 that we clobbered */
+ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
+ ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
+ ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
+ ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
+ mov sp, x1
+
+ mov x1, x0 // address to complete_and_resume
+ /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
+ cmp x0, #1
+ mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
+ mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ csel x0, x2, x3, ls
+
+ ldr_l x2, sdei_exit_mode
+
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ sdei_handler_exit exit_mode=x2
+alternative_else_nop_endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
+ br x5
+#endif
+ENDPROC(__sdei_asm_handler)
+NOKPROBE(__sdei_asm_handler)
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index fae81f7964b4..e7226c4c7493 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -867,7 +867,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
siginfo_t info;
- unsigned int si_code = 0;
+ unsigned int si_code = FPE_FIXME;
if (esr & FPEXC_IOF)
si_code = FPE_FLTINV;
@@ -1036,14 +1036,14 @@ void fpsimd_restore_current_state(void)
* flag that indicates that the FPSIMD register contents are the most recent
* FPSIMD state of 'current'
*/
-void fpsimd_update_current_state(struct fpsimd_state *state)
+void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
if (!system_supports_fpsimd())
return;
local_bh_disable();
- current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
+ current->thread.fpsimd_state.user_fpsimd = *state;
if (system_supports_sve() && test_thread_flag(TIF_SVE))
fpsimd_to_sve(current);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e3cb9fbf96b6..ba3ab04788dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -148,6 +148,26 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
+ * Macro to arrange a physical address in a page table entry, taking care of
+ * 52-bit addresses.
+ *
+ * Preserves: phys
+ * Returns: pte
+ */
+ .macro phys_to_pte, phys, pte
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * We assume \phys is 64K aligned and this is guaranteed by only
+ * supporting this configuration with 64K pages.
+ */
+ orr \pte, \phys, \phys, lsr #36
+ and \pte, \pte, #PTE_ADDR_MASK
+#else
+ mov \pte, \phys
+#endif
+ .endm
+
+/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -156,54 +176,124 @@ ENDPROC(preserve_boot_args)
* ptrs: #imm pointers per table page
*
* Preserves: virt
- * Corrupts: tmp1, tmp2
+ * Corrupts: ptrs, tmp1, tmp2
* Returns: tbl -> next level table page address
*/
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
- lsr \tmp1, \virt, #\shift
- and \tmp1, \tmp1, #\ptrs - 1 // table index
- add \tmp2, \tbl, #PAGE_SIZE
+ add \tmp1, \tbl, #PAGE_SIZE
+ phys_to_pte \tmp1, \tmp2
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
+ lsr \tmp1, \virt, #\shift
+ sub \ptrs, \ptrs, #1
+ and \tmp1, \tmp1, \ptrs // table index
str \tmp2, [\tbl, \tmp1, lsl #3]
add \tbl, \tbl, #PAGE_SIZE // next level table page
.endm
/*
- * Macro to populate the PGD (and possibily PUD) for the corresponding
- * block entry in the next level (tbl) for the given virtual address.
+ * Macro to populate page table entries, these entries can be pointers to the next level
+ * or last level entries pointing to physical memory.
+ *
+ * tbl: page table address
+ * rtbl: pointer to page table or physical memory
+ * index: start index to write
+ * eindex: end index to write - [index, eindex] written to
+ * flags: flags for pagetable entry to or in
+ * inc: increment to rtbl between each entry
+ * tmp1: temporary variable
*
- * Preserves: tbl, next, virt
- * Corrupts: tmp1, tmp2
+ * Preserves: tbl, eindex, flags, inc
+ * Corrupts: index, tmp1
+ * Returns: rtbl
*/
- .macro create_pgd_entry, tbl, virt, tmp1, tmp2
- create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
-#if SWAPPER_PGTABLE_LEVELS > 3
- create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
-#endif
-#if SWAPPER_PGTABLE_LEVELS > 2
- create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
-#endif
+ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
+.Lpe\@: phys_to_pte \rtbl, \tmp1
+ orr \tmp1, \tmp1, \flags // tmp1 = table entry
+ str \tmp1, [\tbl, \index, lsl #3]
+ add \rtbl, \rtbl, \inc // rtbl = pa next level
+ add \index, \index, #1
+ cmp \index, \eindex
+ b.ls .Lpe\@
+ .endm
+
+/*
+ * Compute indices of table entries from virtual address range. If multiple entries
+ * were needed in the previous page table level then the next page table level is assumed
+ * to be composed of multiple pages. (This effectively scales the end index).
+ *
+ * vstart: virtual address of start of range
+ * vend: virtual address of end of range
+ * shift: shift used to transform virtual address into index
+ * ptrs: number of entries in page table
+ * istart: index in table corresponding to vstart
+ * iend: index in table corresponding to vend
+ * count: On entry: how many extra entries were required in previous level, scales
+ * our end index.
+ * On exit: returns how many extra entries required for next page table level
+ *
+ * Preserves: vstart, vend, shift, ptrs
+ * Returns: istart, iend, count
+ */
+ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
+ lsr \iend, \vend, \shift
+ mov \istart, \ptrs
+ sub \istart, \istart, #1
+ and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
+ mov \istart, \ptrs
+ mul \istart, \istart, \count
+ add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ // our entries span multiple tables
+
+ lsr \istart, \vstart, \shift
+ mov \count, \ptrs
+ sub \count, \count, #1
+ and \istart, \istart, \count
+
+ sub \count, \iend, \istart
.endm
/*
- * Macro to populate block entries in the page table for the start..end
- * virtual range (inclusive).
+ * Map memory for specified virtual address range. Each level of page table needed supports
+ * multiple entries. If a level requires n entries the next page table level is assumed to be
+ * formed from n pages.
+ *
+ * tbl: location of page table
+ * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
+ * vstart: start address to map
+ * vend: end address to map - we map [vstart, vend]
+ * flags: flags to use to map last level entries
+ * phys: physical address corresponding to vstart - physical memory is contiguous
+ * pgds: the number of pgd entries
*
- * Preserves: tbl, flags
- * Corrupts: phys, start, end, pstate
+ * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
+ * Preserves: vstart, vend, flags
+ * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
*/
- .macro create_block_map, tbl, flags, phys, start, end
- lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
- lsr \start, \start, #SWAPPER_BLOCK_SHIFT
- and \start, \start, #PTRS_PER_PTE - 1 // table index
- orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
- lsr \end, \end, #SWAPPER_BLOCK_SHIFT
- and \end, \end, #PTRS_PER_PTE - 1 // table end index
-9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- add \start, \start, #1 // next entry
- add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
- cmp \start, \end
- b.ls 9999b
+ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
+ add \rtbl, \tbl, #PAGE_SIZE
+ mov \sv, \rtbl
+ mov \count, #0
+ compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+
+#if SWAPPER_PGTABLE_LEVELS > 3
+ compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+#endif
+
+#if SWAPPER_PGTABLE_LEVELS > 2
+ compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+#endif
+
+ compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
+ bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
+ populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
.endm
/*
@@ -221,14 +311,16 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
bl __inval_dcache_area
/*
* Clear the idmap and swapper page tables.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -244,26 +336,13 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-#ifndef CONFIG_ARM64_VA_BITS_48
-#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
-#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
-
- /*
- * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
- * created that covers system RAM if that is located sufficiently high
- * in the physical address space. So for the ID map, use an extended
- * virtual range in that case, by configuring an additional translation
- * level.
- * First, we have to verify our assumption that the current value of
- * VA_BITS was chosen such that all translation levels are fully
- * utilised, and that lowering T0SZ will always result in an additional
- * translation level to be configured.
- */
-#if VA_BITS != EXTRA_SHIFT
-#error "Mismatch between VA_BITS and page size/number of translation levels"
-#endif
-
/*
+ * VA_BITS may be too small to allow for an ID mapping to be created
+ * that covers system RAM if that is located sufficiently high in the
+ * physical address space. So for the ID map, use an extended virtual
+ * range in that case, and configure an additional translation level
+ * if needed.
+ *
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
@@ -272,21 +351,44 @@ __create_page_tables:
adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
- b.ge 1f // .. then skip additional level
+ b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz
str x5, [x6]
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
- create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
-1:
+#if (VA_BITS < 48)
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
+
+ /*
+ * If VA_BITS < 48, we have to configure an additional table level.
+ * First, we have to verify our assumption that the current value of
+ * VA_BITS was chosen such that all translation levels are fully
+ * utilised, and that lowering T0SZ will always result in an additional
+ * translation level to be configured.
+ */
+#if VA_BITS != EXTRA_SHIFT
+#error "Mismatch between VA_BITS and page size/number of translation levels"
#endif
- create_pgd_entry x0, x3, x5, x6
+ mov x4, EXTRA_PTRS
+ create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
+#else
+ /*
+ * If VA_BITS == 48, we don't have to configure an additional
+ * translation level, but the top-level table has more entries.
+ */
+ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
+ str_l x4, idmap_ptrs_per_pgd, x5
+#endif
+1:
+ ldr_l x4, idmap_ptrs_per_pgd
mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
- create_block_map x0, x7, x3, x5, x6
+
+ map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -294,12 +396,13 @@ __create_page_tables:
adrp x0, swapper_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
- create_pgd_entry x0, x5, x3, x6
+ mov x4, PTRS_PER_PGD
adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text
add x6, x6, x5 // runtime __va(_end)
- create_block_map x0, x7, x3, x5, x6
+
+ map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
/*
* Since the page tables have been populated with non-cacheable
@@ -307,7 +410,8 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
dmb sy
bl __inval_dcache_area
@@ -388,17 +492,13 @@ ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq 1f
- mrs x0, sctlr_el1
-CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
-CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
msr sctlr_el1, x0
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
isb
ret
-1: mrs x0, sctlr_el2
-CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
-CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
msr sctlr_el2, x0
#ifdef CONFIG_ARM64_VHE
@@ -514,10 +614,7 @@ install_el2_stub:
* requires no configuration, and all non-hyp-specific EL2 setup
* will be done via the _EL1 system register aliases in __cpu_setup.
*/
- /* sctlr_el1 */
- mov x0, #0x0800 // Set/clear RES{1,0} bits
-CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
-CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -679,8 +776,10 @@ ENTRY(__enable_mmu)
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir
- msr ttbr0_el1, x1 // load TTBR0
- msr ttbr1_el1, x2 // load TTBR1
+ phys_to_ttbr x1, x3
+ phys_to_ttbr x2, x4
+ msr ttbr0_el1, x3 // load TTBR0
+ msr ttbr1_el1, x4 // load TTBR1
isb
msr sctlr_el1, x0
isb
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index e56d848b6466..84f5d52fddda 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -33,12 +33,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table
- msr ttbr1_el1, \zero_page
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+ phys_to_ttbr \zero_page, \tmp
+ msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
- msr ttbr1_el1, \page_table
+ phys_to_ttbr \page_table, \tmp
+ msr ttbr1_el1, \tmp
isb
.endm
@@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0
+ break_before_make_ttbr_switch x5, x0, x6
mov x21, x1
mov x30, x2
@@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21
+ break_before_make_ttbr_switch x25, x21, x6
ic ialluis
dsb ish
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 3009b8b80f08..f20cf7e99249 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -247,8 +247,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
}
pte = pte_offset_kernel(pmd, dst_addr);
- set_pte(pte, __pte(virt_to_phys((void *)dst) |
- pgprot_val(PAGE_KERNEL_EXEC)));
+ set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
/*
* Load our new page tables. A strict BBM approach requires that we
@@ -264,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 713561e5bcab..60e5fc661f74 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -29,6 +29,7 @@
#include <linux/irqchip.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <asm/vmap_stack.h>
unsigned long irq_err_count;
@@ -58,17 +59,7 @@ static void init_irq_stacks(void)
unsigned long *p;
for_each_possible_cpu(cpu) {
- /*
- * To ensure that VMAP'd stack overflow detection works
- * correctly, the IRQ stacks need to have the same
- * alignment as other stacks.
- */
- p = __vmalloc_node_range(IRQ_STACK_SIZE, THREAD_ALIGN,
- VMALLOC_START, VMALLOC_END,
- THREADINFO_GFP, PAGE_KERNEL,
- 0, cpu_to_node(cpu),
- __builtin_return_address(0));
-
+ p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
per_cpu(irq_stack_ptr, cpu) = p;
}
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6b7dcf4310ac..583fd8154695 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -370,16 +370,14 @@ void tls_preserve_current_state(void)
static void tls_thread_switch(struct task_struct *next)
{
- unsigned long tpidr, tpidrro;
-
tls_preserve_current_state();
- tpidr = *task_user_tls(next);
- tpidrro = is_compat_thread(task_thread_info(next)) ?
- next->thread.tp_value : 0;
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.tp_value, tpidrro_el0);
+ else if (!arm64_kernel_unmapped_at_el0())
+ write_sysreg(0, tpidrro_el0);
- write_sysreg(tpidr, tpidr_el0);
- write_sysreg(tpidrro, tpidrro_el0);
+ write_sysreg(*task_user_tls(next), tpidr_el0);
}
/* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 7c44658b316d..6618036ae6d4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -180,34 +180,34 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_HWBKPT,
- .si_addr = (void __user *)(bkpt->trigger),
- };
+ siginfo_t info;
-#ifdef CONFIG_COMPAT
- int i;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void __user *)(bkpt->trigger);
- if (!is_compat_task())
- goto send_sig;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ int si_errno = 0;
+ int i;
- for (i = 0; i < ARM_MAX_BRP; ++i) {
- if (current->thread.debug.hbp_break[i] == bp) {
- info.si_errno = (i << 1) + 1;
- break;
+ for (i = 0; i < ARM_MAX_BRP; ++i) {
+ if (current->thread.debug.hbp_break[i] == bp) {
+ si_errno = (i << 1) + 1;
+ break;
+ }
}
- }
- for (i = 0; i < ARM_MAX_WRP; ++i) {
- if (current->thread.debug.hbp_watch[i] == bp) {
- info.si_errno = -((i << 1) + 1);
- break;
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
+ if (current->thread.debug.hbp_watch[i] == bp) {
+ si_errno = -((i << 1) + 1);
+ break;
+ }
}
+ force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
}
-
-send_sig:
#endif
force_sig_info(SIGTRAP, &info, current);
}
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
new file mode 100644
index 000000000000..6b8d90d5ceae
--- /dev/null
+++ b/arch/arm64/kernel/sdei.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/arm_sdei.h>
+#include <linux/hardirq.h>
+#include <linux/irqflags.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/kprobes.h>
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/sysreg.h>
+#include <asm/vmap_stack.h>
+
+unsigned long sdei_exit_mode;
+
+/*
+ * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
+ * register, meaning SDEI has to switch to its own stack. We need two stacks as
+ * a critical event may interrupt a normal event that has just taken a
+ * synchronous exception, and is using sp as scratch register. For a critical
+ * event interrupting a normal event, we can't reliably tell if we were on the
+ * sdei stack.
+ * For now, we allocate stacks when the driver is probed.
+ */
+DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+#endif
+
+static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = per_cpu(*ptr, cpu);
+ if (p) {
+ per_cpu(*ptr, cpu) = NULL;
+ vfree(p);
+ }
+}
+
+static void free_sdei_stacks(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ }
+}
+
+static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
+ if (!p)
+ return -ENOMEM;
+ per_cpu(*ptr, cpu) = p;
+
+ return 0;
+}
+
+static int init_sdei_stacks(void)
+{
+ int cpu;
+ int err = 0;
+
+ for_each_possible_cpu(cpu) {
+ err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ if (err)
+ break;
+ err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ if (err)
+ break;
+ }
+
+ if (err)
+ free_sdei_stacks();
+
+ return err;
+}
+
+bool _on_sdei_stack(unsigned long sp)
+{
+ unsigned long low, high;
+
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+
+ low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
+ high = low + SDEI_STACK_SIZE;
+
+ if (low <= sp && sp < high)
+ return true;
+
+ low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
+ high = low + SDEI_STACK_SIZE;
+
+ return (low <= sp && sp < high);
+}
+
+unsigned long sdei_arch_get_entry_point(int conduit)
+{
+ /*
+ * SDEI works between adjacent exception levels. If we booted at EL1 we
+ * assume a hypervisor is marshalling events. If we booted at EL2 and
+ * dropped to EL1 because we don't support VHE, then we can't support
+ * SDEI.
+ */
+ if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_err("Not supported on this hardware/boot configuration\n");
+ return 0;
+ }
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ if (init_sdei_stacks())
+ return 0;
+ }
+
+ sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ if (arm64_kernel_unmapped_at_el0()) {
+ unsigned long offset;
+
+ offset = (unsigned long)__sdei_asm_entry_trampoline -
+ (unsigned long)__entry_tramp_text_start;
+ return TRAMP_VALIAS + offset;
+ } else
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ return (unsigned long)__sdei_asm_handler;
+
+}
+
+/*
+ * __sdei_handler() returns one of:
+ * SDEI_EV_HANDLED - success, return to the interrupted context.
+ * SDEI_EV_FAILED - failure, return this error code to firmare.
+ * virtual-address - success, return to this address.
+ */
+static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ u32 mode;
+ int i, err = 0;
+ int clobbered_registers = 4;
+ u64 elr = read_sysreg(elr_el1);
+ u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
+ unsigned long vbar = read_sysreg(vbar_el1);
+
+ if (arm64_kernel_unmapped_at_el0())
+ clobbered_registers++;
+
+ /* Retrieve the missing registers values */
+ for (i = 0; i < clobbered_registers; i++) {
+ /* from within the handler, this call always succeeds */
+ sdei_api_event_context(i, &regs->regs[i]);
+ }
+
+ /*
+ * We didn't take an exception to get here, set PAN. UAO will be cleared
+ * by sdei_event_handler()s set_fs(USER_DS) call.
+ */
+ __uaccess_enable_hw_pan();
+
+ err = sdei_event_handler(regs, arg);
+ if (err)
+ return SDEI_EV_FAILED;
+
+ if (elr != read_sysreg(elr_el1)) {
+ /*
+ * We took a synchronous exception from the SDEI handler.
+ * This could deadlock, and if you interrupt KVM it will
+ * hyp-panic instead.
+ */
+ pr_warn("unsafe: exception during handler\n");
+ }
+
+ mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
+
+ /*
+ * If we interrupted the kernel with interrupts masked, we always go
+ * back to wherever we came from.
+ */
+ if (mode == kernel_mode && !interrupts_enabled(regs))
+ return SDEI_EV_HANDLED;
+
+ /*
+ * Otherwise, we pretend this was an IRQ. This lets user space tasks
+ * receive signals before we return to them, and KVM to invoke it's
+ * world switch to do the same.
+ *
+ * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
+ * address'.
+ */
+ if (mode == kernel_mode)
+ return vbar + 0x280;
+ else if (mode & PSR_MODE32_BIT)
+ return vbar + 0x680;
+
+ return vbar + 0x480;
+}
+
+
+asmlinkage __kprobes notrace unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+ bool do_nmi_exit = false;
+
+ /*
+ * nmi_enter() deals with printk() re-entrance and use of RCU when
+ * RCU believed this CPU was idle. Because critical events can
+ * interrupt normal events, we may already be in_nmi().
+ */
+ if (!in_nmi()) {
+ nmi_enter();
+ do_nmi_exit = true;
+ }
+
+ ret = _sdei_handler(regs, arg);
+
+ if (do_nmi_exit)
+ nmi_exit();
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index b120111a46be..f60c052e8d1c 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -178,7 +178,8 @@ static void __user *apply_user_offset(
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{
- struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.fpsimd_state.user_fpsimd;
int err;
/* copy the FP and status/control registers */
@@ -195,7 +196,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
{
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
__u32 magic, size;
int err = 0;
@@ -266,7 +267,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err;
unsigned int vq;
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 22711ee8e36c..79feb861929b 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -125,86 +125,6 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- * this code is fixed accordingly.
- * It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic
- * 3 ints plus the relevant union member.
- * This routine must convert siginfo from 64bit to 32bit as well
- * at the same time.
- */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
- SI_PAD_SIZE);
- else switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_FAULT:
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
- &to->si_addr);
-#ifdef BUS_MCEERR_AO
- /*
- * Other callers might not initialize the si_lsb field,
- * so check explicitly for the right codes here.
- */
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
- err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
-#endif
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_status, &to->si_status);
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_SYS:
- err |= __put_user((compat_uptr_t)(unsigned long)
- from->si_call_addr, &to->si_call_addr);
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE))
- return -EFAULT;
-
- return 0;
-}
-
/*
* VFP save/restore code.
*
@@ -228,7 +148,8 @@ union __fpsimd_vreg {
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
- struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.fpsimd_state.user_fpsimd;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
@@ -277,7 +198,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
{
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 551eb07c53b6..3b8ad7be9c33 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -18,6 +18,7 @@
*/
#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -836,6 +837,7 @@ static void ipi_cpu_stop(unsigned int cpu)
set_cpu_online(cpu, false);
local_daif_mask();
+ sdei_mask_local_cpu();
while (1)
cpu_relax();
@@ -853,6 +855,7 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
atomic_dec(&waiting_for_crash_ipi);
local_irq_disable();
+ sdei_mask_local_cpu();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_ops[cpu]->cpu_die)
@@ -972,6 +975,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
+
+ sdei_mask_local_cpu();
}
#ifdef CONFIG_KEXEC_CORE
@@ -990,8 +995,10 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
- if (num_online_cpus() == 1)
+ if (num_online_cpus() == 1) {
+ sdei_mask_local_cpu();
return;
+ }
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
@@ -1009,6 +1016,8 @@ void crash_smp_send_stop(void)
if (atomic_read(&waiting_for_crash_ipi) > 0)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
+
+ sdei_mask_local_cpu();
}
bool smp_crash_stop_failed(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3fe5ad884418..a307b9e13392 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -2,6 +2,7 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
@@ -51,8 +52,7 @@ void notrace __cpu_suspend_exit(void)
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ __uaccess_enable_hw_pan();
uao_thread_switch(current);
/*
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 8d48b233e6ce..21868530018e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -37,18 +37,14 @@ static int __init get_cpu_for_node(struct device_node *node)
if (!cpu_node)
return -1;
- for_each_possible_cpu(cpu) {
- if (of_get_cpu_node(cpu, NULL) == cpu_node) {
- topology_parse_cpu_capacity(cpu_node, cpu);
- of_node_put(cpu_node);
- return cpu;
- }
- }
-
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (cpu >= 0)
+ topology_parse_cpu_capacity(cpu_node, cpu);
+ else
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
- return -1;
+ return cpu;
}
static int __init parse_core(struct device_node *core, int cluster_id,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 3d3588fcd1c7..bbb0fde2780e 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -662,17 +662,58 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
}
#endif
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
{
- nmi_enter();
-
console_verbose();
pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
- __show_regs(regs);
+ if (regs)
+ __show_regs(regs);
+
+ nmi_panic(regs, "Asynchronous SError Interrupt");
+
+ cpu_park_loop();
+ unreachable();
+}
+
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
+{
+ u32 aet = arm64_ras_serror_get_severity(esr);
+
+ switch (aet) {
+ case ESR_ELx_AET_CE: /* corrected error */
+ case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
+ /*
+ * The CPU can make progress. We may take UEO again as
+ * a more severe error.
+ */
+ return false;
+
+ case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
+ case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
+ /*
+ * The CPU can't make progress. The exception may have
+ * been imprecise.
+ */
+ return true;
+
+ case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
+ default:
+ /* Error has been silently propagated */
+ arm64_serror_panic(regs, esr);
+ }
+}
+
+asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+{
+ nmi_enter();
+
+ /* non-RAS errors are not containable */
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
+ arm64_serror_panic(regs, esr);
- panic("Asynchronous SError Interrupt");
+ nmi_exit();
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7da3e5c366a0..0221aca6493d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -57,6 +57,17 @@ jiffies = jiffies_64;
#define HIBERNATE_TEXT
#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
+ *(.entry.tramp.text) \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -113,6 +124,7 @@ SECTIONS
HYPERVISOR_TEXT
IDMAP_TEXT
HIBERNATE_TEXT
+ TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -206,13 +218,19 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
reserved_ttbr0 = .;
. += RESERVED_TTBR0_SIZE;
#endif
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+ swapper_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
@@ -234,7 +252,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
<= SZ_4K, "Hibernate exit text too big or misaligned")
#endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+ "Entry trampoline text too big")
+#endif
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 304203fa9e33..520b0dad3c62 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -23,18 +23,26 @@
#include <linux/kvm_host.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+{
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
+ kvm_inject_vabt(vcpu);
+}
+
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
@@ -45,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_psci_call(vcpu);
if (ret < 0) {
- kvm_inject_undefined(vcpu);
+ vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}
@@ -54,7 +62,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_inject_undefined(vcpu);
+ vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}
@@ -242,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*vcpu_pc(vcpu) -= adj;
}
- kvm_inject_vabt(vcpu);
return 1;
}
@@ -252,7 +259,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- kvm_inject_vabt(vcpu);
/* We may still need to return for single-step */
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
&& kvm_arm_handle_step_debug(vcpu, run))
@@ -275,3 +281,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 0;
}
}
+
+/* For exit types that need handling before we can be preempted */
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ if (ARM_SERROR_PENDING(exception_index)) {
+ if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
+ u64 disr = kvm_vcpu_get_disr(vcpu);
+
+ kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
+ } else {
+ kvm_inject_vabt(vcpu);
+ }
+
+ return;
+ }
+
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
+ if (exception_index == ARM_EXCEPTION_EL1_SERROR)
+ kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 870828c364c5..e086c6eff8c6 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -63,7 +63,8 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- msr ttbr0_el2, x0
+ phys_to_ttbr x0, x4
+ msr ttbr0_el2, x4
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
@@ -71,30 +72,27 @@ __do_hyp_init:
mov x5, #TCR_EL2_RES1
orr x4, x4, x5
-#ifndef CONFIG_ARM64_VA_BITS_48
/*
- * If we are running with VA_BITS < 48, we may be running with an extra
- * level of translation in the ID map. This is only the case if system
- * RAM is out of range for the currently configured page size and number
- * of translation levels, in which case we will also need the extra
- * level for the HYP ID map, or we won't be able to enable the EL2 MMU.
+ * The ID map may be configured to use an extended virtual address
+ * range. This is only the case if system RAM is out of range for the
+ * currently configured page size and VA_BITS, in which case we will
+ * also need the extended virtual range for the HYP ID map, or we won't
+ * be able to enable the EL2 MMU.
*
* However, at EL2, there is only one TTBR register, and we can't switch
* between translation tables *and* update TCR_EL2.T0SZ at the same
- * time. Bottom line: we need the extra level in *both* our translation
- * tables.
+ * time. Bottom line: we need to use the extended range with *both* our
+ * translation tables.
*
* So use the same T0SZ value we use for the ID map.
*/
ldr_l x5, idmap_t0sz
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
-#endif
+
/*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
- * TCR_EL2.
+ * Set the PS bits in TCR_EL2.
*/
- mrs x5, ID_AA64MMFR0_EL1
- bfi x4, x5, #16, #3
+ tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
msr tcr_el2, x4
@@ -122,6 +120,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
kern_hyp_va x2
msr vbar_el2, x2
+ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
+ mrs x1, tpidr_el1
+ msr tpidr_el2, x1
+
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 12ee62d6d410..fdd1068ee3a5 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -62,8 +62,8 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
- // Store the host_ctxt for use at exit time
- str x1, [sp, #-16]!
+ // Store host_ctxt and vcpu for use at exit time
+ stp x1, x0, [sp, #-16]!
add x18, x0, #VCPU_CONTEXT
@@ -124,6 +124,17 @@ ENTRY(__guest_exit)
// Now restore the host regs
restore_callee_saved_regs x2
+alternative_if ARM64_HAS_RAS_EXTN
+ // If we have the RAS extensions we can consume a pending error
+ // without an unmask-SError and isb.
+ esb
+ mrs_s x2, SYS_DISR_EL1
+ str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
+ cbz x2, 1f
+ msr_s SYS_DISR_EL1, xzr
+ orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
+1: ret
+alternative_else
// If we have a pending asynchronous abort, now is the
// time to find out. From your VAXorcist book, page 666:
// "Threaten me not, oh Evil one! For I speak with
@@ -134,7 +145,9 @@ ENTRY(__guest_exit)
mov x5, x0
dsb sy // Synchronize against in-flight ld/st
+ nop
msr daifclr, #4 // Unmask aborts
+alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
@@ -159,6 +172,10 @@ abort_guest_exit_end:
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
+ // x0: esr
+ // x1: vcpu
+ // x2-x29,lr: vcpu regs
+ // vcpu x0-x1 on the stack
stp x2, x3, [sp, #-16]!
stp x4, lr, [sp, #-16]!
@@ -173,7 +190,7 @@ alternative_else
alternative_endif
isb
- mrs x3, tpidr_el2
+ mov x3, x1
ldr x0, [x3, #VCPU_HOST_CONTEXT]
kern_hyp_va x0
@@ -196,3 +213,15 @@ alternative_endif
eret
ENDPROC(__fpsimd_guest_restore)
+
+ENTRY(__qcom_hyp_sanitize_btac_predictors)
+ /**
+ * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
+ * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
+ * b15-b0: contains SiP functionID
+ */
+ movz x0, #0x1700
+ movk x0, #0xc200, lsl #16
+ smc #0
+ ret
+ENDPROC(__qcom_hyp_sanitize_btac_predictors)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5170ce1021da..e4f37b9dd47c 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -104,6 +104,7 @@ el1_trap:
/*
* x0: ESR_EC
*/
+ ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
/*
* We trap the first access to the FP/SIMD to save the host context
@@ -116,19 +117,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
b.eq __fpsimd_guest_restore
alternative_else_nop_endif
- mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
el1_irq:
stp x0, x1, [sp, #-16]!
- mrs x1, tpidr_el2
+ ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
el1_error:
stp x0, x1, [sp, #-16]!
- mrs x1, tpidr_el2
+ ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
@@ -163,6 +163,18 @@ ENTRY(__hyp_do_panic)
eret
ENDPROC(__hyp_do_panic)
+ENTRY(__hyp_panic)
+ /*
+ * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
+ * not be accessible by this address from EL2, hyp_panic() converts
+ * it with kern_hyp_va() before use.
+ */
+ ldr x0, =kvm_host_cpu_state
+ mrs x1, tpidr_el2
+ add x0, x0, x1
+ b hyp_panic
+ENDPROC(__hyp_panic)
+
.macro invalid_vector label, target = __hyp_panic
.align 2
\label:
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index a81f5e10fc8c..603e1ee83e89 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -32,6 +32,8 @@ u32 __hyp_text __init_stage2_translation(void)
* PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
*/
parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+ if (parange > ID_AA64MMFR0_PARANGE_MAX)
+ parange = ID_AA64MMFR0_PARANGE_MAX;
val |= parange << 16;
/* Compute the actual PARange... */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f7c651f3a8c0..036e1f3d77a6 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -52,7 +53,7 @@ static void __hyp_text __activate_traps_vhe(void)
val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
write_sysreg(val, cpacr_el1);
- write_sysreg(__kvm_hyp_vector, vbar_el1);
+ write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
static void __hyp_text __activate_traps_nvhe(void)
@@ -93,6 +94,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
write_sysreg(val, hcr_el2);
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE))
+ write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -235,11 +239,12 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
{
- u64 esr = read_sysreg_el2(esr);
- u8 ec = ESR_ELx_EC(esr);
+ u8 ec;
+ u64 esr;
u64 hpfar, far;
- vcpu->arch.fault.esr_el2 = esr;
+ esr = vcpu->arch.fault.esr_el2;
+ ec = ESR_ELx_EC(esr);
if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
return true;
@@ -305,9 +310,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
- write_sysreg(vcpu, tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_host_state(host_ctxt);
@@ -332,6 +337,8 @@ again:
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
+ if (ARM_EXCEPTION_CODE(exit_code) != ARM_EXCEPTION_IRQ)
+ vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
/*
* We're using the raw exception code in order to only process
* the trap if no SError is pending. We will come back to the
@@ -341,6 +348,18 @@ again:
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
+ if (exit_code == ARM_EXCEPTION_TRAP &&
+ (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+ vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+ u64 val = PSCI_RET_NOT_SUPPORTED;
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ val = 2;
+
+ vcpu_set_reg(vcpu, 0, val);
+ goto again;
+ }
+
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
exit_code == ARM_EXCEPTION_TRAP) {
bool valid;
@@ -393,6 +412,14 @@ again:
/* 0 falls through to be handled out of EL2 */
}
+ if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
+ u32 midr = read_cpuid_id();
+
+ /* Apply BTAC predictors mitigation to all Falkor chips */
+ if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
+ __qcom_hyp_sanitize_btac_predictors();
+ }
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);
@@ -422,7 +449,8 @@ again:
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
+ struct kvm_vcpu *vcpu)
{
unsigned long str_va;
@@ -436,35 +464,35 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
__hyp_do_panic(str_va,
spsr, elr,
read_sysreg(esr_el2), read_sysreg_el2(far),
- read_sysreg(hpfar_el2), par,
- (void *)read_sysreg(tpidr_el2));
+ read_sysreg(hpfar_el2), par, vcpu);
}
-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
+ struct kvm_vcpu *vcpu)
{
panic(__hyp_panic_string,
spsr, elr,
read_sysreg_el2(esr), read_sysreg_el2(far),
- read_sysreg(hpfar_el2), par,
- (void *)read_sysreg(tpidr_el2));
+ read_sysreg(hpfar_el2), par, vcpu);
}
static hyp_alternate_select(__hyp_call_panic,
__hyp_call_panic_nvhe, __hyp_call_panic_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-void __hyp_text __noreturn __hyp_panic(void)
+void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
{
+ struct kvm_vcpu *vcpu = NULL;
+
u64 spsr = read_sysreg_el2(spsr);
u64 elr = read_sysreg_el2(elr);
u64 par = read_sysreg(par_el1);
if (read_sysreg(vttbr_el2)) {
- struct kvm_vcpu *vcpu;
struct kvm_cpu_context *host_ctxt;
- vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = kern_hyp_va(__host_ctxt);
+ vcpu = host_ctxt->__hyp_running_vcpu;
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
@@ -472,7 +500,7 @@ void __hyp_text __noreturn __hyp_panic(void)
}
/* Call panic for real */
- __hyp_call_panic()(spsr, elr, par);
+ __hyp_call_panic()(spsr, elr, par, vcpu);
unreachable();
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 934137647837..2c17afd2be96 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
/*
* Non-VHE: Both host and guest must save everything.
*
- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
- * pstate, and guest must save everything.
+ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
+ * and guest must save everything.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
- ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
}
static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
@@ -62,10 +59,16 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
+ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
+ ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
+ ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
}
static hyp_alternate_select(__sysreg_call_save_host_state,
@@ -89,11 +92,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
- write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
}
static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
@@ -115,10 +115,16 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
}
static hyp_alternate_select(__sysreg_call_restore_host_state,
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 8ecbcb40e317..60666a056944 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -164,14 +164,25 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu);
}
+static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
+{
+ vcpu_set_vsesr(vcpu, esr);
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+}
+
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @vcpu: The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
+ *
+ * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
+ * the remaining ISS all-zeros so that this error is not interpreted as an
+ * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
+ * value, so the CPU generates an imp-def value.
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+ pend_guest_serror(vcpu, ESR_ELx_ISV);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1830ebc227d1..50a43c7b97ca 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1159,6 +1159,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
+
+ { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
@@ -1169,6 +1179,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
+ { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index e88fb99c1561..3d69a8d41fa5 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -30,7 +30,7 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
- uaccess_enable_not_uao x2, x3
+ uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
- uaccess_disable_not_uao x2
+ uaccess_disable_not_uao x2, x3
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4b5d826895ff..20305d485046 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -64,10 +64,10 @@
end .req x5
ENTRY(__arch_copy_from_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index b24a830419ad..fbb090f431a5 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -65,10 +65,10 @@
end .req x5
ENTRY(raw_copy_in_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(raw_copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 351f0766f7a6..fda6172d6b88 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -63,10 +63,10 @@
end .req x5
ENTRY(__arch_copy_to_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index 0179a43cc045..d3db9b2cd479 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -38,19 +38,19 @@ ENTRY(__ashlti3)
ENDPROC(__ashlti3)
ENTRY(__ashrti3)
- cbz x2, 3f
+ cbz x2, 1f
mov x3, #64
sub x3, x3, x2
cmp x3, #0
- b.le 4f
+ b.le 2f
lsr x0, x0, x2
lsl x3, x1, x3
asr x2, x1, x2
orr x0, x0, x3
mov x1, x2
-3:
+1:
ret
-4:
+2:
neg w0, w3
asr x2, x1, #63
asr x0, x1, x0
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 7f1dbe962cf5..91464e7f77cc 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
- uaccess_ttbr0_enable x2, x3
+ uaccess_ttbr0_enable x2, x3, x4
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
isb
mov x0, #0
1:
- uaccess_ttbr0_disable x1
+ uaccess_ttbr0_disable x1, x2
ret
9:
mov x0, #-EFAULT
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 6f4017046323..301417ae2ba8 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define asid2idx(asid) ((asid) & ~ASID_MASK)
+#define idx2asid(idx) asid2idx(idx)
+#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void)
}
}
-static void set_reserved_asid_bits(void)
-{
- if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
- cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
- __set_bit(FALKOR_RESERVED_ASID, asid_map);
-}
-
static void flush_context(unsigned int cpu)
{
int i;
@@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
/*
@@ -107,7 +107,7 @@ static void flush_context(unsigned int cpu)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
+ __set_bit(asid2idx(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -162,16 +162,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- asid &= ~ASID_MASK;
- if (!__test_and_set_bit(asid, asid_map))
+ if (!__test_and_set_bit(asid2idx(asid), asid_map))
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
- * currently active ASIDs and mark the TLBs as requiring flushes.
- * We always count from ASID #1, as we use ASID #0 when setting a
- * reserved TTBR0 for the init_mm.
+ * currently active ASIDs and mark the TLBs as requiring flushes. We
+ * always count from ASID #2 (index 1), as we use ASID #0 when setting
+ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+ * pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
@@ -188,32 +188,35 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return asid | generation;
+ return idx2asid(asid) | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
{
unsigned long flags;
- u64 asid;
+ u64 asid, old_active_asid;
asid = atomic64_read(&mm->context.id);
/*
* The memory ordering here is subtle.
- * If our ASID matches the current generation, then we update
- * our active_asids entry with a relaxed xchg. Racing with a
- * concurrent rollover means that either:
+ * If our active_asids is non-zero and the ASID matches the current
+ * generation, then we update the active_asids entry with a relaxed
+ * cmpxchg. Racing with a concurrent rollover means that either:
*
- * - We get a zero back from the xchg and end up waiting on the
+ * - We get a zero back from the cmpxchg and end up waiting on the
* lock. Taking the lock synchronises with the rollover and so
* we are forced to see the updated generation.
*
- * - We get a valid ASID back from the xchg, which means the
+ * - We get a valid ASID back from the cmpxchg, which means the
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
- if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
- && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
+ old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
+ if (old_active_asid &&
+ !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
+ atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
+ old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
@@ -231,6 +234,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
+
+ arm64_apply_bp_hardening();
+
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
* emulating PAN.
@@ -239,6 +245,15 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+ asm(ALTERNATIVE("nop; nop; nop",
+ "ic iallu; dsb nsh; isb",
+ ARM64_WORKAROUND_CAVIUM_27456,
+ CONFIG_CAVIUM_ERRATUM_27456));
+}
+
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
@@ -254,8 +269,6 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b45c5bcaeccb..a96ec0181818 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size)
return 1;
}
-static void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
- struct page *page;
- void *addr;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flags);
- if (!page)
- return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
- memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- }
-}
-
-static void __dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- bool freed;
- phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
-
- freed = dma_release_from_contiguous(dev,
- phys_to_page(paddr),
- size >> PAGE_SHIFT);
- if (!freed)
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
@@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return addr;
}
- ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
@@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return coherent_ptr;
no_map:
- __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+ swiotlb_free(dev, size, ptr, *dma_handle, attrs);
no_mem:
return NULL;
}
@@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
return;
vunmap(vaddr);
}
- __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+ swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
}
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
@@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
return 0;
}
-static const struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops arm64_swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
@@ -397,7 +357,7 @@ static int __init atomic_pool_init(void)
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order, GFP_KERNEL);
else
- page = alloc_pages(GFP_DMA, pool_size_order);
+ page = alloc_pages(GFP_DMA32, pool_size_order);
if (page) {
int ret;
@@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
+ dev->dma_ops = &arm64_swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9b7f89df49db..ce441d29e7f6 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -596,7 +596,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
info.si_signo = SIGBUS;
info.si_errno = 0;
- info.si_code = 0;
+ info.si_code = BUS_FIXME;
if (esr & ESR_ELx_FnV)
info.si_addr = NULL;
else
@@ -607,70 +607,70 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
}
static const struct fault_info fault_info[] = {
- { do_bad, SIGBUS, 0, "ttbr address size fault" },
- { do_bad, SIGBUS, 0, "level 1 address size fault" },
- { do_bad, SIGBUS, 0, "level 2 address size fault" },
- { do_bad, SIGBUS, 0, "level 3 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "ttbr address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 1 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 2 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 3 address size fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
- { do_bad, SIGBUS, 0, "unknown 8" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
- { do_bad, SIGBUS, 0, "unknown 12" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
- { do_sea, SIGBUS, 0, "synchronous external abort" },
- { do_bad, SIGBUS, 0, "unknown 17" },
- { do_bad, SIGBUS, 0, "unknown 18" },
- { do_bad, SIGBUS, 0, "unknown 19" },
- { do_sea, SIGBUS, 0, "level 0 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 1 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 2 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 3 (translation table walk)" },
- { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
- { do_bad, SIGBUS, 0, "unknown 25" },
- { do_bad, SIGBUS, 0, "unknown 26" },
- { do_bad, SIGBUS, 0, "unknown 27" },
- { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_bad, SIGBUS, 0, "unknown 32" },
+ { do_sea, SIGBUS, BUS_FIXME, "synchronous external abort" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 17" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 18" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 19" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 0 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 1 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 2 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 3 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 25" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 26" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 27" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 32" },
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
- { do_bad, SIGBUS, 0, "unknown 34" },
- { do_bad, SIGBUS, 0, "unknown 35" },
- { do_bad, SIGBUS, 0, "unknown 36" },
- { do_bad, SIGBUS, 0, "unknown 37" },
- { do_bad, SIGBUS, 0, "unknown 38" },
- { do_bad, SIGBUS, 0, "unknown 39" },
- { do_bad, SIGBUS, 0, "unknown 40" },
- { do_bad, SIGBUS, 0, "unknown 41" },
- { do_bad, SIGBUS, 0, "unknown 42" },
- { do_bad, SIGBUS, 0, "unknown 43" },
- { do_bad, SIGBUS, 0, "unknown 44" },
- { do_bad, SIGBUS, 0, "unknown 45" },
- { do_bad, SIGBUS, 0, "unknown 46" },
- { do_bad, SIGBUS, 0, "unknown 47" },
- { do_bad, SIGBUS, 0, "TLB conflict abort" },
- { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" },
- { do_bad, SIGBUS, 0, "unknown 50" },
- { do_bad, SIGBUS, 0, "unknown 51" },
- { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
- { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
- { do_bad, SIGBUS, 0, "unknown 54" },
- { do_bad, SIGBUS, 0, "unknown 55" },
- { do_bad, SIGBUS, 0, "unknown 56" },
- { do_bad, SIGBUS, 0, "unknown 57" },
- { do_bad, SIGBUS, 0, "unknown 58" },
- { do_bad, SIGBUS, 0, "unknown 59" },
- { do_bad, SIGBUS, 0, "unknown 60" },
- { do_bad, SIGBUS, 0, "section domain fault" },
- { do_bad, SIGBUS, 0, "page domain fault" },
- { do_bad, SIGBUS, 0, "unknown 63" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 34" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 35" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 36" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 37" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 38" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 39" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 40" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 41" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 42" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 43" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 44" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 45" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 46" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 47" },
+ { do_bad, SIGBUS, BUS_FIXME, "TLB conflict abort" },
+ { do_bad, SIGBUS, BUS_FIXME, "Unsupported atomic hardware update fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 50" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 51" },
+ { do_bad, SIGBUS, BUS_FIXME, "implementation fault (lockdown abort)" },
+ { do_bad, SIGBUS, BUS_FIXME, "implementation fault (unsupported exclusive)" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 54" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 55" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 56" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 57" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 58" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 59" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 60" },
+ { do_bad, SIGBUS, BUS_FIXME, "section domain fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "page domain fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 63" },
};
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
@@ -707,6 +707,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (addr > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
+ local_irq_enable();
+ do_mem_abort(addr, esr, regs);
+}
+
+
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
@@ -739,11 +756,11 @@ static struct fault_info __refdata debug_fault_info[] = {
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
- { do_bad, SIGBUS, 0, "unknown 3" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 3" },
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
- { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
+ { do_bad, SIGTRAP, TRAP_FIXME, "aarch32 vector catch" },
{ early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
- { do_bad, SIGBUS, 0, "unknown 7" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 7" },
};
void __init hook_debug_fault_code(int nr,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 00e7b900ca41..9f3c47acf8ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void)
}
#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
@@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init_nodes(max_zone_pfns);
@@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
memset(zone_size, 0, sizeof(zone_size));
/* 4GB maximum for 32-bit only capable devices */
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
max_dma = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA] = max_dma - min;
+ zone_size[ZONE_DMA32] = max_dma - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
if (start < max_dma) {
unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA] -= dma_end - start;
+ zhole_size[ZONE_DMA32] -= dma_end - start;
}
#endif
if (end > max_dma) {
@@ -366,6 +366,9 @@ void __init arm64_memblock_init(void)
/* Handle linux,usable-memory-range property */
fdt_enforce_memory_region();
+ /* Remove memory above our supported physical address size */
+ memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
+
/*
* Ensure that the linear region takes up exactly half of the kernel
* virtual address space. This way, we can distinguish a linear address
@@ -467,7 +470,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
- if (IS_ENABLED(CONFIG_ZONE_DMA))
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;
@@ -600,49 +603,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLG(b, t) b, t, ((t) - (b)) >> 30
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
- pr_notice("Virtual kernel memory layout:\n");
-#ifdef CONFIG_KASAN
- pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
- MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
-#endif
- pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(MODULES_VADDR, MODULES_END));
- pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
- MLG(VMALLOC_START, VMALLOC_END));
- pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(_text, _etext));
- pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__start_rodata, __init_begin));
- pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__init_begin, __init_end));
- pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(_sdata, _edata));
- pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__bss_start, __bss_stop));
- pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
- MLK(FIXADDR_START, FIXADDR_TOP));
- pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(PCI_IO_START, PCI_IO_END));
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
- MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
- pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
- MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
- (unsigned long)virt_to_page(high_memory)));
-#endif
- pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(__phys_to_virt(memblock_start_of_DRAM()),
- (unsigned long)high_memory));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 267d2b79d52d..b44992ec9643 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -50,6 +50,7 @@
#define NO_CONT_MAPPINGS BIT(1)
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
@@ -525,6 +526,35 @@ static int __init parse_rodata(char *arg)
}
early_param("rodata", parse_rodata);
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+ pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+ /* The trampoline is always mapped and can therefore be global */
+ pgprot_val(prot) &= ~PTE_NG;
+
+ /* Map only the text into the trampoline page table */
+ memset(tramp_pg_dir, 0, PGD_SIZE);
+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+ prot, pgd_pgtable_alloc, 0);
+
+ /* Map both the text and data into the kernel page table */
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ extern char __entry_tramp_data_start[];
+
+ __set_fixmap(FIX_ENTRY_TRAMP_DATA,
+ __pa_symbol(__entry_tramp_data_start),
+ PAGE_KERNEL_RO);
+ }
+
+ return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
/*
* Create fine-grained mappings for the kernel.
*/
@@ -570,8 +600,8 @@ static void __init map_kernel(pgd_t *pgd)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
+ pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
+ lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
@@ -612,7 +642,8 @@ void __init paging_init(void)
* allocated with it.
*/
memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
- SWAPPER_DIR_SIZE - PAGE_SIZE);
+ __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
+ - PAGE_SIZE);
}
/*
@@ -686,7 +717,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
+ pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
@@ -879,15 +910,19 @@ int __init arch_ioremap_pmd_supported(void)
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
}
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 051e71ec3335..289f9113a27a 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -49,6 +49,14 @@ void __init pgd_cache_init(void)
if (PGD_SIZE == PAGE_SIZE)
return;
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * With 52-bit physical addresses, the architecture requires the
+ * top-level table to be aligned to at least 64 bytes.
+ */
+ BUILD_BUG_ON(PGD_SIZE < 64);
+#endif
+
/*
* Naturally aligned pgds required by the architecture.
*/
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 95233dfc4c39..9f177aac6390 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x11, tpidr_el1
+alternative_else
+ mrs x11, tpidr_el2
+alternative_endif
mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
@@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
msr tpidr_el1, x13
+alternative_else
+ msr tpidr_el2, x13
+alternative_endif
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -124,6 +132,11 @@ ENTRY(cpu_do_resume)
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+
+alternative_if ARM64_HAS_RAS_EXTN
+ msr_s SYS_DISR_EL1, xzr
+alternative_else_nop_endif
+
isb
ret
ENDPROC(cpu_do_resume)
@@ -138,13 +151,18 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
- pre_ttbr0_update_workaround x0, x2, x3
+ mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
- bfi x0, x1, #48, #16 // set the ASID
- msr ttbr0_el1, x0 // set TTBR0
+ phys_to_ttbr x0, x3
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ bfi x3, x1, #48, #16 // set the ASID field in TTBR0
+#endif
+ bfi x2, x1, #48, #16 // set the ASID
+ msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
isb
- post_ttbr0_update_workaround
- ret
+ msr ttbr0_el1, x3 // now update TTBR0
+ isb
+ b post_ttbr_update_workaround // Back to C code...
ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "ax"
@@ -158,14 +176,16 @@ ENTRY(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
adrp x1, empty_zero_page
- msr ttbr1_el1, x1
+ phys_to_ttbr x1, x3
+ msr ttbr1_el1, x3
isb
tlbi vmalle1
dsb nsh
isb
- msr ttbr1_el1, x0
+ phys_to_ttbr x0, x3
+ msr ttbr1_el1, x3
isb
restore_daif x2
@@ -214,25 +234,19 @@ ENTRY(__cpu_setup)
/*
* Prepare SCTLR
*/
- adr x5, crval
- ldp w5, w6, [x5]
- mrs x0, sctlr_el1
- bic x0, x0, x5 // clear bits
- orr x0, x0, x6 // set bits
+ mov_q x0, SCTLR_EL1_SET
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
tcr_set_idmap_t0sz x10, x9
/*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
- * TCR_EL1.
+ * Set the IPS bits in TCR_EL1.
*/
- mrs x9, ID_AA64MMFR0_EL1
- bfi x10, x9, #32, #3
+ tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Hardware update of the Access and Dirty bits.
@@ -249,21 +263,3 @@ ENTRY(__cpu_setup)
msr tcr_el1, x10
ret // return to head.S
ENDPROC(__cpu_setup)
-
- /*
- * We set the desired value explicitly, including those of the
- * reserved bits. The values of bits EE & E0E were set early in
- * el2_setup, which are left untouched below.
- *
- * n n T
- * U E WT T UD US IHBS
- * CE0 XWHW CZ ME TEEA S
- * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
- * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
- * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
- */
- .type crval, #object
-crval:
- .word 0xfcffffff // clear
- .word 0x34d5d91d // set
- .popsection
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ba38d403abb2..1d4f1da7c58f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -31,8 +31,6 @@
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
@@ -99,6 +97,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
}
}
+static inline void emit_addr_mov_i64(const int reg, const u64 val,
+ struct jit_ctx *ctx)
+{
+ u64 tmp = val;
+ int shift = 0;
+
+ emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+ for (;shift < 48;) {
+ tmp >>= 16;
+ shift += 16;
+ emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+ }
+}
+
static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx)
{
@@ -148,7 +160,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-#define PROLOGUE_OFFSET 8
+/* Tail call offset to jump into */
+#define PROLOGUE_OFFSET 7
static int build_prologue(struct jit_ctx *ctx)
{
@@ -200,19 +213,19 @@ static int build_prologue(struct jit_ctx *ctx)
/* Initialize tail_call_cnt */
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
- /* 4 byte extra for skb_copy_bits buffer */
- ctx->stack_size = prog->aux->stack_depth + 4;
- ctx->stack_size = STACK_ALIGN(ctx->stack_size);
-
- /* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
-
cur_offset = ctx->idx - idx0;
if (cur_offset != PROLOGUE_OFFSET) {
pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
cur_offset, PROLOGUE_OFFSET);
return -1;
}
+
+ /* 4 byte extra for skb_copy_bits buffer */
+ ctx->stack_size = prog->aux->stack_depth + 4;
+ ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+
+ /* Set up function call stack */
+ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
return 0;
}
@@ -260,11 +273,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_LDR64(prg, tmp, prg), ctx);
emit(A64_CBZ(1, prg, jmp_offset), ctx);
- /* goto *(prog->bpf_func + prologue_size); */
+ /* goto *(prog->bpf_func + prologue_offset); */
off = offsetof(struct bpf_prog, bpf_func);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR64(tmp, prg, tmp), ctx);
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
emit(A64_BR(tmp), ctx);
/* out: */
@@ -376,18 +390,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- {
- const u8 r0 = bpf2a64[BPF_REG_0];
-
- /* if (src == 0) return 0 */
- jmp_offset = 3; /* skip ahead to else path */
- check_imm19(jmp_offset);
- emit(A64_CBNZ(is64, src, jmp_offset), ctx);
- emit(A64_MOVZ(1, r0, 0, 0), ctx);
- jmp_offset = epilogue_offset(ctx);
- check_imm26(jmp_offset);
- emit(A64_B(jmp_offset), ctx);
- /* else */
switch (BPF_OP(code)) {
case BPF_DIV:
emit(A64_UDIV(is64, dst, dst, src), ctx);
@@ -399,7 +401,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
}
break;
- }
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(A64_LSLV(is64, dst, dst, src), ctx);
@@ -603,7 +604,10 @@ emit_cond_jmp:
const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm;
- emit_a64_mov_i64(tmp, func, ctx);
+ if (ctx->prog->is_func)
+ emit_addr_mov_i64(tmp, func, ctx);
+ else
+ emit_a64_mov_i64(tmp, func, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
@@ -835,16 +839,24 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end);
}
+struct arm64_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct jit_ctx ctx;
+};
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
+ struct arm64_jit_data *jit_data;
bool tmp_blinded = false;
+ bool extra_pass = false;
struct jit_ctx ctx;
int image_size;
u8 *image_ptr;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
@@ -858,13 +870,30 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+ if (jit_data->ctx.offset) {
+ ctx = jit_data->ctx;
+ image_ptr = jit_data->image;
+ header = jit_data->header;
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx.idx;
+ goto skip_init_ctx;
+ }
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
- goto out;
+ goto out_off;
}
/* 1. Initial fake pass to compute ctx->idx. */
@@ -895,6 +924,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */
ctx.image = (__le32 *)image_ptr;
+skip_init_ctx:
ctx.idx = 0;
build_prologue(&ctx);
@@ -920,13 +950,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, ctx.image + ctx.idx);
- bpf_jit_binary_lock_ro(header);
+ if (!prog->is_func || extra_pass) {
+ if (extra_pass && ctx.idx != jit_data->ctx.idx) {
+ pr_err_once("multi-func JIT bug %d != %d\n",
+ ctx.idx, jit_data->ctx.idx);
+ bpf_jit_binary_free(header);
+ prog->bpf_func = NULL;
+ prog->jited = 0;
+ goto out_off;
+ }
+ bpf_jit_binary_lock_ro(header);
+ } else {
+ jit_data->ctx = ctx;
+ jit_data->image = image_ptr;
+ jit_data->header = header;
+ }
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
prog->jited_len = image_size;
+ if (!prog->is_func || extra_pass) {
out_off:
- kfree(ctx.offset);
+ kfree(ctx.offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 401ceb71540c..c5f05c4a4d00 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -101,12 +101,12 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
- uaccess_ttbr0_enable x6, x7
+ uaccess_ttbr0_enable x6, x7, x8
hvc XEN_IMM
/*
* Disable userspace access from kernel once the hyp call completed.
*/
- uaccess_ttbr0_disable x6
+ uaccess_ttbr0_disable x6, x7
ret
ENDPROC(privcmd_call);
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 2966b93850a1..a5aeab4e5f2d 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -56,8 +56,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
/* Given a task stack pointer, you can find its corresponding
* thread_info structure just by masking it to the THREAD_SIZE
diff --git a/arch/blackfin/include/uapi/asm/poll.h b/arch/blackfin/include/uapi/asm/poll.h
index 8b094d43e9b7..3b162f2d2970 100644
--- a/arch/blackfin/include/uapi/asm/poll.h
+++ b/arch/blackfin/include/uapi/asm/poll.h
@@ -9,8 +9,25 @@
#ifndef _UAPI__BFIN_POLL_H
#define _UAPI__BFIN_POLL_H
-#define POLLWRNORM 4 /* POLLOUT */
-#define POLLWRBAND 256
+#ifndef __KERNEL__
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/blackfin/include/uapi/asm/siginfo.h b/arch/blackfin/include/uapi/asm/siginfo.h
index b1db506c8d2e..2dd8c9c39248 100644
--- a/arch/blackfin/include/uapi/asm/siginfo.h
+++ b/arch/blackfin/include/uapi/asm/siginfo.h
@@ -11,40 +11,6 @@
#include <linux/types.h>
#include <asm-generic/siginfo.h>
-#define UID16_SIGINFO_COMPAT_NEEDED
-
#define si_uid16 _sifields._kill._uid
-#define ILL_ILLPARAOP 2 /* illegal opcode combine ********** */
-#define ILL_ILLEXCPT 4 /* unrecoverable exception ********** */
-#define ILL_CPLB_VI 9 /* D/I CPLB protect violation ******** */
-#define ILL_CPLB_MISS 10 /* D/I CPLB miss ******** */
-#define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit ******** */
-#undef NSIGILL
-#define NSIGILL 11
-
-/*
- * SIGBUS si_codes
- */
-#define BUS_OPFETCH 4 /* error from instruction fetch ******** */
-#undef NSIGBUS
-#define NSIGBUS 4
-
-/*
- * SIGTRAP si_codes
- */
-#define TRAP_STEP 1 /* single-step breakpoint************* */
-#define TRAP_TRACEFLOW 2 /* trace buffer overflow ************* */
-#define TRAP_WATCHPT 3 /* watchpoint match ************* */
-#define TRAP_ILLTRAP 4 /* illegal trap ************* */
-#undef NSIGTRAP
-#define NSIGTRAP 4
-
-/*
- * SIGSEGV si_codes
- */
-#define SEGV_STACKFLOW 3 /* stack overflow */
-#undef NSIGSEGV
-#define NSIGSEGV 3
-
#endif /* _UAPI_BFIN_SIGINFO_H */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
index acc70c135ab8..59a5697fe0f3 100644
--- a/arch/c6x/include/asm/thread_info.h
+++ b/arch/c6x/include/asm/thread_info.h
@@ -60,9 +60,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* get the thread information struct of current task */
static inline __attribute__((const))
struct thread_info *current_thread_info(void)
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 54d3f426763b..cd5a0865c97f 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -33,6 +33,9 @@ config GENERIC_CALIBRATE_DELAY
config NO_IOPORT_MAP
def_bool y if !PCI
+config NO_DMA
+ def_bool y if !PCI
+
config FORCE_MAX_ZONEORDER
int
default 6
@@ -72,6 +75,7 @@ config CRIS
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
select HAVE_NMI
+ select DMA_DIRECT_OPS if PCI
config HZ
int
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index 68dbe261dc57..a2986c60aaac 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -50,7 +50,7 @@ static ssize_t gpio_write(struct file *file, const char __user *buf,
size_t count, loff_t *off);
static int gpio_open(struct inode *inode, struct file *filp);
static int gpio_release(struct inode *inode, struct file *filp);
-static unsigned int gpio_poll(struct file *filp, struct poll_table_struct *wait);
+static __poll_t gpio_poll(struct file *filp, struct poll_table_struct *wait);
/* private data per open() of this driver */
@@ -141,9 +141,9 @@ static unsigned long dir_g_shadow; /* 1=output */
#define USE_PORTS(priv) ((priv)->minor <= GPIO_MINOR_B)
-static unsigned int gpio_poll(struct file *file, poll_table *wait)
+static __poll_t gpio_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gpio_private *priv = file->private_data;
unsigned long data;
unsigned long flags;
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index cfe9176f2205..177843c64071 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -157,7 +157,7 @@ static inline int sync_data_avail(struct sync_port *port);
static int sync_serial_open(struct inode *inode, struct file *file);
static int sync_serial_release(struct inode *inode, struct file *file);
-static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
+static __poll_t sync_serial_poll(struct file *filp, poll_table *wait);
static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
@@ -654,12 +654,12 @@ static int sync_serial_release(struct inode *inode, struct file *file)
-static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
+static __poll_t sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = MINOR(file_inode(file)->i_rdev);
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct sync_port *port;
- DEBUGPOLL(static unsigned int prev_mask = 0);
+ DEBUGPOLL(static __poll_t prev_mask = 0);
port = &ports[dev];
poll_wait(file, &port->out_wait_q, wait);
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index d688fe117dca..a3c353472a8c 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
}
}
- /* Acquire the mm page semaphore. */
- down_read(&current->mm->mmap_sem);
-
- err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
+ err = get_user_pages_fast((unsigned long)(oper.indata + prev_ix),
noinpages,
- 0, /* read access only for in data */
- inpages,
- NULL);
+ false, /* read access only for in data */
+ inpages);
if (err < 0) {
- up_read(&current->mm->mmap_sem);
nooutpages = noinpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
goto error_cleanup;
}
noinpages = err;
- if (oper.do_cipher){
- err = get_user_pages((unsigned long int)oper.cipher_outdata,
+ if (oper.do_cipher) {
+ err = get_user_pages_fast((unsigned long)oper.cipher_outdata,
nooutpages,
- FOLL_WRITE, /* write access for out data */
- outpages,
- NULL);
- up_read(&current->mm->mmap_sem);
+ true, /* write access for out data */
+ outpages);
if (err < 0) {
nooutpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
goto error_cleanup;
}
nooutpages = err;
- } else {
- up_read(&current->mm->mmap_sem);
}
/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
diff --git a/arch/cris/arch-v32/drivers/pci/Makefile b/arch/cris/arch-v32/drivers/pci/Makefile
index bff7482f2444..93c8be6170b1 100644
--- a/arch/cris/arch-v32/drivers/pci/Makefile
+++ b/arch/cris/arch-v32/drivers/pci/Makefile
@@ -2,4 +2,4 @@
# Makefile for Etrax cardbus driver
#
-obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o
+obj-$(CONFIG_ETRAX_CARDBUS) += bios.o
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
deleted file mode 100644
index dbbd3816cc0b..000000000000
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Dynamic DMA mapping support.
- *
- * On cris there is no hardware dynamic DMA address translation,
- * so consistent alloc/free are merely page allocation/freeing.
- * The rest of the dynamic DMA mapping interface is implemented
- * in asm/pci.h.
- *
- * Borrowed from i386.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-
-static void *v32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
- }
- return ret;
-}
-
-static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static inline dma_addr_t v32_dma_map_page(struct device *dev,
- struct page *page, unsigned long offset, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- return page_to_phys(page) + offset;
-}
-
-static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- printk("Map sg\n");
- return nents;
-}
-
-static inline int v32_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < 0x00ffffff)
- return 0;
- return 1;
-}
-
-const struct dma_map_ops v32_dma_ops = {
- .alloc = v32_dma_alloc,
- .free = v32_dma_free,
- .map_page = v32_dma_map_page,
- .map_sg = v32_dma_map_sg,
- .dma_supported = v32_dma_supported,
-};
-EXPORT_SYMBOL(v32_dma_ops);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 8efcc1a899a8..e20e0b9a3a5c 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -178,7 +178,7 @@ static inline int sync_data_avail(struct sync_port *port);
static int sync_serial_open(struct inode *, struct file *);
static int sync_serial_release(struct inode *, struct file *);
-static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
+static __poll_t sync_serial_poll(struct file *filp, poll_table *wait);
static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
@@ -555,13 +555,13 @@ static int sync_serial_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
+static __poll_t sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file));
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct sync_port *port;
DEBUGPOLL(
- static unsigned int prev_mask;
+ static __poll_t prev_mask;
);
port = &ports[dev];
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 460349cb147f..8cf45ac30c1b 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
deleted file mode 100644
index 1553bdb30a0c..000000000000
--- a/arch/cris/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_CRIS_DMA_MAPPING_H
-#define _ASM_CRIS_DMA_MAPPING_H
-
-#ifdef CONFIG_PCI
-extern const struct dma_map_ops v32_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &v32_dma_ops;
-}
-#else
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- BUG();
- return NULL;
-}
-#endif
-
-#endif
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 124dd5ec7f65..ee4d8b03d048 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -26,13 +26,6 @@ struct task_struct;
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
- * normally, the stack is found by doing something like p + THREAD_SIZE
- * in CRIS, a page is 8192 bytes, which seems like a sane size
- */
-#define THREAD_SIZE PAGE_SIZE
-#define THREAD_SIZE_ORDER (0)
-
/*
* At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
* This macro allows us to find those regs for a task.
@@ -59,8 +52,6 @@ static inline void release_thread(struct task_struct *dead_task)
/* Nothing needs to be done. */
}
-#define init_stack (init_thread_union.stack)
-
#define cpu_relax() barrier()
void default_idle(void);
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 472830c90997..996fef3be1d5 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -20,6 +20,13 @@
#endif
+/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in CRIS, a page is 8192 bytes, which seems like a sane size
+ */
+#define THREAD_SIZE PAGE_SIZE
+#define THREAD_SIZE_ORDER (0)
+
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@@ -56,8 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 6d1dbc1ba767..9b232e0f673e 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -11,6 +11,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#ifdef CONFIG_ETRAX_VMEM_SIZE
#define __CONFIG_ETRAX_VMEM_SIZE CONFIG_ETRAX_VMEM_SIZE
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index ccba3b6ce918..0f950845fad9 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -64,9 +64,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *__current_thread_info asm("gr15");
diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild
index 14a2e9af97e9..5354b0f84d41 100644
--- a/arch/frv/include/uapi/asm/Kbuild
+++ b/arch/frv/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += siginfo.h
generic-y += bpf_perf_event.h
diff --git a/arch/frv/include/uapi/asm/poll.h b/arch/frv/include/uapi/asm/poll.h
index 887b67288340..a44c8f0ebee7 100644
--- a/arch/frv/include/uapi/asm/poll.h
+++ b/arch/frv/include/uapi/asm/poll.h
@@ -2,12 +2,27 @@
#ifndef _ASM_POLL_H
#define _ASM_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
-#include <asm-generic/poll.h>
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
+#include <asm-generic/poll.h>
#undef POLLREMOVE
#endif
-
diff --git a/arch/frv/include/uapi/asm/siginfo.h b/arch/frv/include/uapi/asm/siginfo.h
deleted file mode 100644
index 4c8c975747ac..000000000000
--- a/arch/frv/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <linux/types.h>
-#include <asm-generic/siginfo.h>
-
-#define FPE_MDAOVF 9 /* media overflow */
-#undef NSIGFPE
-#define NSIGFPE 9
-
-#endif
-
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index f8d3fde08190..091d6d04b5e5 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -23,6 +23,7 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
+ select DMA_DIRECT_OPS
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index bc077491d299..642752c94306 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += delay.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h
deleted file mode 100644
index 21bb1fc3a6f1..000000000000
--- a/arch/h8300/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _H8300_DMA_MAPPING_H
-#define _H8300_DMA_MAPPING_H
-
-extern const struct dma_map_ops h8300_dma_map_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &h8300_dma_map_ops;
-}
-
-#endif
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index 072b92c0d8b5..0cdaa302d3d2 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -46,9 +46,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
index b62e830525c6..307aa51576dd 100644
--- a/arch/h8300/kernel/Makefile
+++ b/arch/h8300/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := vmlinux.lds
obj-y := process.o traps.o ptrace.o \
signal.o setup.o syscalls.o \
- irq.o entry.o dma.o
+ irq.o entry.o
obj-$(CONFIG_ROMKERNEL) += head_rom.o
obj-$(CONFIG_RAMKERNEL) += head_ram.o
diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c
deleted file mode 100644
index 225dd0a188dc..000000000000
--- a/arch/h8300/kernel/dma.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <linux/module.h>
-#include <asm/pgalloc.h>
-
-static void *dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (*dev->dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
- }
- return ret;
-}
-
-static void dma_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- return page_to_phys(page) + offset;
-}
-
-static int map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- }
-
- return nents;
-}
-
-const struct dma_map_ops h8300_dma_map_ops = {
- .alloc = dma_alloc,
- .free = dma_free,
- .map_page = map_page,
- .map_sg = map_sg,
-};
-EXPORT_SYMBOL(h8300_dma_map_ops);
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 5208de242e79..263f6acbfb0f 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -37,11 +37,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
#endif
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 66f5e9a61efc..9e8621d94ee9 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -330,8 +330,6 @@ static inline void outsl(unsigned long port, const void *buffer, int count)
}
}
-#define flush_write_buffers() do { } while (0)
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
index b80fe1db7b64..f41f9c6f0e31 100644
--- a/arch/hexagon/include/asm/thread_info.h
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -84,9 +84,6 @@ struct thread_info {
.regs = NULL, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* Tacky preprocessor trickery */
#define qqstr(s) qstr(s)
#define qstr(s) #s
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 546792d176a4..ad8347c29dcf 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -19,6 +19,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index ec87e67feb19..ad69d181c939 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -22,6 +22,8 @@
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
#include <asm/cache.h> /* and now we're pulling cache line size */
+#include <asm/thread_info.h> /* and we need THREAD_SIZE too */
+
OUTPUT_ARCH(hexagon)
ENTRY(stext)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 49583c5a5d44..bbe12a038d21 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -33,6 +33,7 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN
select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
@@ -43,7 +44,7 @@ config IA64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
- select ARCH_INIT_TASK
+ select ARCH_TASK_STRUCT_ON_STACK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
@@ -65,7 +66,7 @@ config 64BIT
select ATA_NONSTANDARD if ATA
default y
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
depends on !IA64_SGI_SN2
@@ -145,6 +146,7 @@ config IA64_GENERIC
bool "generic"
select NUMA
select ACPI_NUMA
+ select DMA_DIRECT_OPS
select SWIOTLB
select PCI_MSI
help
@@ -165,6 +167,7 @@ config IA64_GENERIC
config IA64_DIG
bool "DIG-compliant"
+ select DMA_DIRECT_OPS
select SWIOTLB
config IA64_DIG_VTD
@@ -180,6 +183,7 @@ config IA64_HP_ZX1
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
+ select DMA_DIRECT_OPS
select SWIOTLB
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
@@ -203,6 +207,7 @@ config IA64_SGI_UV
bool "SGI-UV"
select NUMA
select ACPI_NUMA
+ select DMA_DIRECT_OPS
select SWIOTLB
help
Selecting this option will optimize the kernel for use on UV based
@@ -213,6 +218,7 @@ config IA64_SGI_UV
config IA64_HP_SIM
bool "Ski-simulator"
+ select DMA_DIRECT_OPS
select SWIOTLB
depends on !PM
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index c100d780f1eb..2dd7f519ad0b 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -42,7 +42,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
endif
KBUILD_CFLAGS += $(cflags-y)
-head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+head-y := arch/ia64/kernel/head.o
libs-y += arch/ia64/lib/
core-y += arch/ia64/kernel/ arch/ia64/mm/
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 63d8e1d2477f..58969039bed2 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -19,7 +19,7 @@
#include <linux/export.h>
#include <asm/machvec.h>
-extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+extern const struct dma_map_ops sba_dma_ops;
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
index c711536674e3..a96689447a74 100644
--- a/arch/ia64/include/asm/asm-prototypes.h
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -9,7 +9,7 @@
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unwind.h>
#include <asm/xor.h>
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 28e02c99be6d..762eeb0fcc1d 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -)
-#define atomic_add_return(i,v) \
+#ifdef __OPTIMIZE__
+#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
+ ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
+ (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
+
+#define atomic_add_return(i, v) \
({ \
- int __ia64_aar_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
- : ia64_atomic_add(__ia64_aar_i, v); \
+ int __i = (i); \
+ static const int __ia64_atomic_p = __ia64_atomic_const(i); \
+ __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
+ ia64_atomic_add(__i, v); \
})
-#define atomic_sub_return(i,v) \
+#define atomic_sub_return(i, v) \
({ \
- int __ia64_asr_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
- : ia64_atomic_sub(__ia64_asr_i, v); \
+ int __i = (i); \
+ static const int __ia64_atomic_p = __ia64_atomic_const(i); \
+ __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
+ ia64_atomic_sub(__i, v); \
})
+#else
+#define atomic_add_return(i, v) ia64_atomic_add(i, v)
+#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
+#endif
#define atomic_fetch_add(i,v) \
({ \
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index c1bab526a046..76e4d6632d68 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -8,7 +8,6 @@
*/
#include <asm/machvec.h>
#include <linux/scatterlist.h>
-#include <asm/swiotlb.h>
#include <linux/dma-debug.h>
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -27,22 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return platform_dma_get_ops(NULL);
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
index 186850eec934..23604d6a2cb2 100644
--- a/arch/ia64/include/asm/dma.h
+++ b/arch/ia64/include/asm/dma.h
@@ -20,6 +20,4 @@ extern unsigned long MAX_DMA_ADDRESS;
#define free_dma(x)
-void dma_mark_clean(void *addr, size_t size);
-
#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
deleted file mode 100644
index 841e2c7d0b21..000000000000
--- a/arch/ia64/include/asm/swiotlb.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_IA64__SWIOTLB_H
-#define ASM_IA64__SWIOTLB_H
-
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-
-#ifdef CONFIG_SWIOTLB
-extern int swiotlb;
-extern void pci_swiotlb_init(void);
-#else
-#define swiotlb 0
-static inline void pci_swiotlb_init(void)
-{
-}
-#endif
-
-#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 1d172a4119a7..64a1011f6812 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -12,6 +12,8 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
+#define THREAD_SIZE KERNEL_STACK_SIZE
+
#ifndef __ASSEMBLY__
/*
@@ -41,8 +43,6 @@ struct thread_info {
#endif
};
-#define THREAD_SIZE KERNEL_STACK_SIZE
-
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index f5c6967a93bb..c0527cfc48f0 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += kvm_para.h
+generic-y += poll.h
diff --git a/arch/ia64/include/uapi/asm/poll.h b/arch/ia64/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/ia64/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
index f3a02a10c3a3..5aa454ed89db 100644
--- a/arch/ia64/include/uapi/asm/siginfo.h
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -11,77 +11,8 @@
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#define HAVE_ARCH_SIGINFO_T
-#define HAVE_ARCH_COPY_SIGINFO_TO_USER
-
#include <asm-generic/siginfo.h>
-typedef struct siginfo {
- int si_signo;
- int si_errno;
- int si_code;
- int __pad0;
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
- sigval_t _sigval; /* must overlay ._rt._sigval! */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- clock_t _utime;
- clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see below */
- unsigned long _isr; /* isr */
- short _addr_lsb; /* lsb of faulting address */
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
- int _fd;
- } _sigpoll;
- } _sifields;
-} siginfo_t;
-
#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
#define si_flags _sifields._sigfault._flags
/*
@@ -97,37 +28,10 @@ typedef struct siginfo {
#define __ISR_VALID (1 << __ISR_VALID_BIT)
/*
- * SIGILL si_codes
- */
-#define ILL_BADIADDR 9 /* unimplemented instruction address */
-#define __ILL_BREAK 10 /* illegal break */
-#define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
-#undef NSIGILL
-#define NSIGILL 11
-
-/*
* SIGFPE si_codes
*/
#ifdef __KERNEL__
#define FPE_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
-#define __FPE_DECOVF 9 /* decimal overflow */
-#define __FPE_DECDIV 10 /* decimal division by zero */
-#define __FPE_DECERR 11 /* packed decimal error */
-#define __FPE_INVASC 12 /* invalid ASCII digit */
-#define __FPE_INVDEC 13 /* invalid decimal digit */
-#undef NSIGFPE
-#define NSIGFPE 13
-
-/*
- * SIGSEGV si_codes
- */
-#define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
-#undef NSIGSEGV
-#define NSIGSEGV 4
-
-#undef NSIGTRAP
-#define NSIGTRAP 4
-
#endif /* _UAPI_ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 14ad79f394e5..0b4c65a1af25 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ ifdef CONFIG_DYNAMIC_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
endif
-extra-y := head.o init_task.o vmlinux.lds
+extra-y := head.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 1d29b2f8726b..1dacbf5e9e09 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -504,6 +504,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return -1;
+ if (num_node_memblks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 7a82c9259609..f2d57e66fd86 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
#include <linux/export.h>
/* Set this to 1 if there is a HW IOMMU in the system */
@@ -23,3 +24,11 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
return dma_ops;
}
EXPORT_SYMBOL(dma_get_ops);
+
+#ifdef CONFIG_SWIOTLB
+void __init swiotlb_dma_init(void)
+{
+ dma_ops = &swiotlb_dma_ops;
+ swiotlb_init(1);
+}
+#endif
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
deleted file mode 100644
index 8df9245e29d9..000000000000
--- a/arch/ia64/kernel/init_task.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is where we statically allocate and initialize the initial
- * task.
- *
- * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial task structure.
- *
- * We need to make sure that this is properly aligned due to the way process stacks are
- * handled. This is done by having a special ".data..init_task" section...
- */
-#define init_thread_info init_task_mem.s.thread_info
-#define init_stack init_task_mem.stack
-
-union {
- struct {
- struct task_struct task;
- struct thread_info thread_info;
- } s;
- unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __init_task_data =
- {{
- .task = INIT_TASK(init_task_mem.s.task),
- .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
-}};
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 3ba87c22dfbc..b5df084c0af4 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -12,12 +12,7 @@
#include <asm/iommu.h>
#include <asm/machvec.h>
#include <linux/dma-mapping.h>
-
-
-#ifdef CONFIG_INTEL_IOMMU
-
#include <linux/kernel.h>
-
#include <asm/page.h>
dma_addr_t bad_dma_address __read_mostly;
@@ -104,8 +99,14 @@ void __init pci_iommu_alloc(void)
detect_intel_iommu();
#ifdef CONFIG_SWIOTLB
- pci_swiotlb_init();
-#endif
+ if (!iommu_detected) {
+#ifdef CONFIG_IA64_GENERIC
+ printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
+ machvec_init("dig");
+ swiotlb_dma_init();
+#else
+ panic("Unable to find Intel IOMMU");
+#endif /* CONFIG_IA64_GENERIC */
+ }
+#endif /* CONFIG_SWIOTLB */
}
-
-#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
deleted file mode 100644
index 5e50939aa03e..000000000000
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Glue code to lib/swiotlb.c */
-
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/cache.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/swiotlb.h>
-#include <asm/dma.h>
-#include <asm/iommu.h>
-#include <asm/machvec.h>
-
-int swiotlb __read_mostly;
-EXPORT_SYMBOL(swiotlb);
-
-static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
- gfp |= GFP_DMA;
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
- .alloc = ia64_swiotlb_alloc_coherent,
- .free = ia64_swiotlb_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-
-void __init swiotlb_dma_init(void)
-{
- dma_ops = &swiotlb_dma_ops;
- swiotlb_init(1);
-}
-
-void __init pci_swiotlb_init(void)
-{
- if (!iommu_detected) {
-#ifdef CONFIG_IA64_GENERIC
- swiotlb = 1;
- printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
- machvec_init("dig");
- swiotlb_init(1);
- dma_ops = &swiotlb_dma_ops;
-#else
- panic("Unable to find Intel IOMMU");
-#endif
- }
-}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 09f86ebfcc7b..c44f002e8f6b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1644,12 +1644,12 @@ pfm_write(struct file *file, const char __user *ubuf,
return -EINVAL;
}
-static unsigned int
+static __poll_t
pfm_poll(struct file *filp, poll_table * wait)
{
pfm_context_t *ctx;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index a254cc98f95c..54547c7cf8a2 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -105,58 +105,6 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
return err;
}
-int
-copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from)
-{
- if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
- return -EFAULT;
- if (from->si_code < 0) {
- if (__copy_to_user(to, from, sizeof(siginfo_t)))
- return -EFAULT;
- return 0;
- } else {
- int err;
-
- /*
- * If you change siginfo_t structure, please be sure this code is fixed
- * accordingly. It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic 3 ints plus the
- * relevant union member.
- */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- err |= __put_user(from->si_flags, &to->si_flags);
- err |= __put_user(from->si_isr, &to->si_isr);
- case SIL_POLL:
- err |= __put_user(from->si_addr, &to->si_addr);
- err |= __put_user(from->si_imm, &to->si_imm);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_ptr, &to->si_ptr);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_ptr, &to->si_ptr);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- break;
- }
- return err;
- }
-}
-
long
ia64_rt_sigreturn (struct sigscratch *scr)
{
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c6ecb97151a2..9025699049ca 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
}
if (ti->softirq_time) {
- delta = cycle_to_nsec(ti->softirq_time));
+ delta = cycle_to_nsec(ti->softirq_time);
account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 58db59da0bd8..b0b2070e0591 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -3,6 +3,7 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 52715a71aede..7d64b30913d1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -237,9 +237,9 @@ paging_init (void)
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- max_zone_pfns[ZONE_DMA] = max_dma;
+ max_zone_pfns[ZONE_DMA32] = max_dma;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 9b2d994cddf6..ac46f0d60b66 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -38,7 +38,7 @@ struct early_node_data {
struct ia64_node_data *node_data;
unsigned long pernode_addr;
unsigned long pernode_size;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
unsigned long num_dma_physpages;
#endif
unsigned long min_pfn;
@@ -669,7 +669,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
@@ -724,8 +724,8 @@ void __init paging_init(void)
}
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = max_dma;
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = max_dma;
#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 498398d915c1..dd84ee194579 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -19,7 +19,7 @@ config M32R
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS
- select DMA_NOOP_OPS
+ select DMA_DIRECT_OPS
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
config SBUS
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 7e11b125c35e..ca83fda8177b 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += current.h
+generic-y += dma-mapping.h
generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
deleted file mode 100644
index 336ffe60814b..000000000000
--- a/arch/m32r/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M32R_DMA_MAPPING_H
-#define _ASM_M32R_DMA_MAPPING_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/io.h>
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* _ASM_M32R_DMA_MAPPING_H */
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 1b653bb16f9a..a4272d8f0d9c 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -191,8 +191,6 @@ static inline void _writel(unsigned long l, unsigned long addr)
#define mmiowb()
-#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
-
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index b3a215b0ce0a..ba00f1032587 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -56,9 +56,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index 451bf6071c6e..c3df55aeefe7 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -3,4 +3,5 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += kvm_para.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/m32r/include/uapi/asm/poll.h b/arch/m32r/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/m32r/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index cb79fba79d43..a6f300a208bd 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -115,15 +115,6 @@ static void set_eit_vector_entries(void)
_flush_cache_copyback_all();
}
-void abort(void)
-{
- BUG();
-
- /* if that doesn't kill us, halt */
- panic("Oops failed to kill thread");
-}
-EXPORT_SYMBOL(abort);
-
void __init trap_init(void)
{
set_eit_vector_entries();
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 5b5fa9831b4d..52fa7fbdefa2 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -313,7 +313,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
@@ -454,7 +453,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -595,6 +593,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -624,6 +623,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -653,3 +653,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 72a7764b74ed..b00fe19e1c04 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -311,7 +311,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -422,7 +421,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -554,6 +552,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -583,6 +582,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -612,3 +612,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 884b43a2f0d9..2871d75b912f 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -311,7 +311,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
@@ -437,7 +436,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_ATARI=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -576,6 +574,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -605,6 +604,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -634,3 +634,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fcfa60d31499..2c90328433bf 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -309,7 +309,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -420,7 +419,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +544,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +574,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +604,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 9d597bbbbbfe..177c91d63826 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -311,7 +311,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -425,7 +424,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -556,6 +554,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -585,6 +584,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -614,3 +614,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 45da20d1286c..efbcaffa30ed 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -313,7 +313,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_SWIM=m
@@ -447,7 +446,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_VALKYRIE=y
CONFIG_FB_MAC=y
@@ -578,6 +576,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -607,6 +606,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -636,3 +636,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index fda880c10861..e78a205d266a 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -323,7 +323,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
@@ -504,7 +503,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -658,6 +656,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -687,6 +686,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -716,3 +716,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 7d5e4863efec..0d42ecacfd7a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -308,7 +308,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -420,7 +419,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +544,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +574,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +604,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7763b71a9c49..8dc609c5b8aa 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -309,7 +309,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -420,7 +419,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +544,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +574,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +604,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 17eaebfa3e19..11c96087fc5d 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -309,7 +309,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
@@ -437,7 +436,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -569,6 +567,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -598,6 +597,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -627,3 +627,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d1cb7a04ae1d..ddba205ee1aa 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -306,7 +306,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -419,7 +418,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -548,6 +546,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -576,6 +575,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -605,3 +605,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index ea3a331c62d5..88d09a14c684 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -306,7 +306,6 @@ CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_LOOP=y
@@ -419,7 +418,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -548,6 +546,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -577,6 +576,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -606,3 +606,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index f42c27400dbc..9b840c03ebb7 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -33,7 +33,7 @@ struct mac_model
char ide_type;
char scc_type;
char ether_type;
- char nubus_type;
+ char expansion_type;
char floppy_type;
};
@@ -73,8 +73,11 @@ struct mac_model
#define MAC_ETHER_SONIC 1
#define MAC_ETHER_MACE 2
-#define MAC_NO_NUBUS 0
-#define MAC_NUBUS 1
+#define MAC_EXP_NONE 0
+#define MAC_EXP_PDS 1 /* Accepts only a PDS card */
+#define MAC_EXP_NUBUS 2 /* Accepts only NuBus card(s) */
+#define MAC_EXP_PDS_NUBUS 3 /* Accepts PDS card and/or NuBus card(s) */
+#define MAC_EXP_PDS_COMM 4 /* Accepts PDS card or Comm Slot card */
#define MAC_FLOPPY_IWM 0
#define MAC_FLOPPY_SWIM_ADDR1 1
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 928035591f2e..015f1ca38305 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -41,8 +41,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_stack (init_thread_union.stack)
-
#ifndef __ASSEMBLY__
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
@@ -58,8 +56,6 @@ static inline struct thread_info *current_thread_info(void)
}
#endif
-#define init_thread_info (init_thread_union.thread_info)
-
/* entry.S relies on these definitions!
* bits 0-7 are tested at every exception exit
* bits 8-15 are also tested at syscall exit
diff --git a/arch/m68k/include/uapi/asm/poll.h b/arch/m68k/include/uapi/asm/poll.h
index c3e3fcc15e1d..d8be239e8141 100644
--- a/arch/m68k/include/uapi/asm/poll.h
+++ b/arch/m68k/include/uapi/asm/poll.h
@@ -2,8 +2,25 @@
#ifndef __m68k_POLL_H
#define __m68k_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 87ef73a93856..c01b9b8f97bf 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -76,8 +76,6 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 16cd5cea5207..d3d435248a24 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -212,7 +212,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_IWM,
},
@@ -227,7 +227,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_IWM,
}, {
.ident = MAC_MODEL_IIX,
@@ -236,7 +236,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IICX,
@@ -245,7 +245,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_SE30,
@@ -254,7 +254,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -272,7 +272,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIFX,
@@ -281,7 +281,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_IIFX,
.scc_type = MAC_SCC_IOP,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
}, {
.ident = MAC_MODEL_IISI,
@@ -290,7 +290,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIVI,
@@ -299,7 +299,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIVX,
@@ -308,7 +308,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -323,7 +323,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_CCL,
@@ -332,7 +331,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_CCLII,
@@ -341,7 +340,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -356,7 +355,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_LCII,
@@ -365,7 +364,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_LCIII,
@@ -374,7 +373,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -395,7 +394,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q605_ACC,
@@ -404,7 +403,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q610,
@@ -414,7 +413,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q630,
@@ -424,8 +423,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.ide_type = MAC_IDE_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q650,
@@ -435,7 +433,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
},
/* The Q700 does have a NS Sonic */
@@ -447,7 +445,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q800,
@@ -457,7 +455,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q840,
@@ -467,7 +465,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA3,
.scc_type = MAC_SCC_PSC,
.ether_type = MAC_ETHER_MACE,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_AV,
}, {
.ident = MAC_MODEL_Q900,
@@ -477,7 +475,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_IOP,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
}, {
.ident = MAC_MODEL_Q950,
@@ -487,7 +485,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_IOP,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
},
@@ -502,7 +500,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P475,
@@ -511,7 +509,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P475F,
@@ -520,7 +518,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P520,
@@ -529,7 +527,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P550,
@@ -538,7 +536,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
/* These have the comm slot, and therefore possibly SONIC ethernet */
@@ -549,8 +547,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P588,
@@ -560,8 +557,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.ide_type = MAC_IDE_QUADRA,
.scc_type = MAC_SCC_II,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_TV,
@@ -570,7 +566,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P600,
@@ -579,7 +574,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -596,7 +591,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_C650,
@@ -606,7 +601,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_C660,
@@ -616,7 +611,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA3,
.scc_type = MAC_SCC_PSC,
.ether_type = MAC_ETHER_MACE,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_AV,
},
@@ -633,7 +628,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB145,
@@ -642,7 +636,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB150,
@@ -652,7 +645,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_OLD,
.ide_type = MAC_IDE_PB,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB160,
@@ -661,7 +653,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB165,
@@ -670,7 +661,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB165C,
@@ -679,7 +669,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB170,
@@ -688,7 +677,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB180,
@@ -697,7 +685,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB180C,
@@ -706,7 +693,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB190,
@@ -716,7 +702,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_LATE,
.ide_type = MAC_IDE_BABOON,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB520,
@@ -726,7 +711,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_LATE,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -743,7 +727,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB230,
@@ -752,7 +736,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB250,
@@ -761,7 +745,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB270C,
@@ -770,7 +754,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB280,
@@ -779,7 +763,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB280C,
@@ -788,7 +772,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -1100,14 +1084,12 @@ int __init mac_platform_init(void)
* Ethernet device
*/
- switch (macintosh_config->ether_type) {
- case MAC_ETHER_SONIC:
+ if (macintosh_config->ether_type == MAC_ETHER_SONIC ||
+ macintosh_config->expansion_type == MAC_EXP_PDS_COMM)
platform_device_register_simple("macsonic", -1, NULL, 0);
- break;
- case MAC_ETHER_MACE:
+
+ if (macintosh_config->ether_type == MAC_ETHER_MACE)
platform_device_register_simple("macmace", -1, NULL, 0);
- break;
- }
return 0;
}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 3f81892527ad..921e6c092f2c 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -53,56 +53,41 @@ void __init oss_init(void)
}
/*
- * Handle miscellaneous OSS interrupts.
+ * Handle OSS interrupts.
+ * XXX how do you clear a pending IRQ? is it even necessary?
*/
-static void oss_irq(struct irq_desc *desc)
+static void oss_iopism_irq(struct irq_desc *desc)
{
- int events = oss->irq_pending &
- (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
-
- if (events & OSS_IP_IOPSCC) {
- oss->irq_pending &= ~OSS_IP_IOPSCC;
- generic_handle_irq(IRQ_MAC_SCC);
- }
-
- if (events & OSS_IP_SCSI) {
- oss->irq_pending &= ~OSS_IP_SCSI;
- generic_handle_irq(IRQ_MAC_SCSI);
- }
-
- if (events & OSS_IP_IOPISM) {
- oss->irq_pending &= ~OSS_IP_IOPISM;
- generic_handle_irq(IRQ_MAC_ADB);
- }
+ generic_handle_irq(IRQ_MAC_ADB);
}
-/*
- * Nubus IRQ handler, OSS style
- *
- * Unlike the VIA/RBV this is on its own autovector interrupt level.
- */
+static void oss_scsi_irq(struct irq_desc *desc)
+{
+ generic_handle_irq(IRQ_MAC_SCSI);
+}
static void oss_nubus_irq(struct irq_desc *desc)
{
- int events, irq_bit, i;
+ u16 events, irq_bit;
+ int irq_num;
events = oss->irq_pending & OSS_IP_NUBUS;
- if (!events)
- return;
-
- /* There are only six slots on the OSS, not seven */
-
- i = 6;
- irq_bit = 0x40;
+ irq_num = NUBUS_SOURCE_BASE + 5;
+ irq_bit = OSS_IP_NUBUS5;
do {
- --i;
- irq_bit >>= 1;
if (events & irq_bit) {
- oss->irq_pending &= ~irq_bit;
- generic_handle_irq(NUBUS_SOURCE_BASE + i);
+ events &= ~irq_bit;
+ generic_handle_irq(irq_num);
}
- } while(events & (irq_bit - 1));
+ --irq_num;
+ irq_bit >>= 1;
+ } while (events);
+}
+
+static void oss_iopscc_irq(struct irq_desc *desc)
+{
+ generic_handle_irq(IRQ_MAC_SCC);
}
/*
@@ -122,14 +107,14 @@ static void oss_nubus_irq(struct irq_desc *desc)
void __init oss_register_interrupts(void)
{
- irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_irq);
- irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_iopism_irq);
+ irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_scsi_irq);
irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
- irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_iopscc_irq);
irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
/* OSS_VIA1 gets enabled here because it has no machspec interrupt. */
- oss->irq_level[OSS_VIA1] = IRQ_AUTO_6;
+ oss->irq_level[OSS_VIA1] = OSS_IRQLEV_VIA1;
}
/*
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 127d7c1f2090..03253c4f8e6a 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -21,8 +21,9 @@ extern void die_if_kernel(char *, struct pt_regs *, long);
int send_fault_sig(struct pt_regs *regs)
{
- siginfo_t siginfo = { 0, 0, 0, };
+ siginfo_t siginfo;
+ clear_siginfo(&siginfo);
siginfo.si_signo = current->thread.signo;
siginfo.si_code = current->thread.code;
siginfo.si_addr = (void *)current->thread.faddr;
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 554f73a77e6e..a1a9c7f5ca8c 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the current stack pointer from C */
register unsigned long current_stack_pointer asm("A0StP") __used;
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h
index b54ef7186ca3..9a3f6cde9487 100644
--- a/arch/metag/include/uapi/asm/siginfo.h
+++ b/arch/metag/include/uapi/asm/siginfo.h
@@ -6,4 +6,11 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 444851e510d5..3b62b1b0c0b5 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -735,7 +735,7 @@ TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
else if (error_state & TXSTAT_FPE_INEXACT_BIT)
info.si_code = FPE_FLTRES;
else
- info.si_code = 0;
+ info.si_code = FPE_FIXME;
info.si_errno = 0;
info.si_addr = (__force void __user *)regs->ctx.CurrPC;
force_sig_info(SIGFPE, &info, current);
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 6b9ea39405b8..add50c1373bf 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -18,11 +18,11 @@
/*
* Available generic sets of operations
*/
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &dma_direct_ops;
+ return &dma_nommu_ops;
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index e7e8954e9815..9afe4b5bd6c8 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -86,9 +86,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 990bf9ea0ec6..c91e8cef98dd 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -15,42 +15,18 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
-#define NOT_COHERENT_CACHE
-
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
-#else
- void *ret;
- struct page *page;
- int node = dev_to_node(dev);
-
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
- page = alloc_pages_node(node, flag, get_order(size));
- if (page == NULL)
- return NULL;
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
-
- return ret;
-#endif
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
-#else
- free_pages((unsigned long)vaddr, get_order(size));
-#endif
}
static inline void __dma_sync(unsigned long paddr,
@@ -69,7 +45,7 @@ static inline void __dma_sync(unsigned long paddr,
}
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -89,12 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -106,7 +77,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset;
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -122,7 +93,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_cpu(struct device *dev,
+dma_nommu_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -136,7 +107,7 @@ dma_direct_sync_single_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_device(struct device *dev,
+dma_nommu_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -150,7 +121,7 @@ dma_direct_sync_single_for_device(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_cpu(struct device *dev,
+dma_nommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -164,7 +135,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_device(struct device *dev,
+dma_nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -178,7 +149,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
}
static
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -191,12 +162,8 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > (count - off))
return -ENXIO;
-#ifdef NOT_COHERENT_CACHE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = consistent_virt_to_pfn(cpu_addr);
-#else
- pfn = virt_to_pfn(cpu_addr);
-#endif
return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
@@ -204,20 +171,19 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
#endif
}
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
- .sync_single_for_device = dma_direct_sync_single_for_device,
- .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
- .sync_sg_for_device = dma_direct_sync_sg_for_device,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
+ .sync_single_for_device = dma_nommu_sync_single_for_device,
+ .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
+ .sync_sg_for_device = dma_nommu_sync_sg_for_device,
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 350a990fc719..ab98569994f0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -259,6 +259,7 @@ config BCM47XX
select LEDS_GPIO_REGISTER
select BCM47XX_NVRAM
select BCM47XX_SPROM
+ select BCM47XX_SSB if !BCM47XX_BCMA
help
Support for BCM47XX based boards
@@ -389,6 +390,7 @@ config LANTIQ
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_VPE_LOADER
select SYS_HAS_EARLY_PRINTK
select GPIOLIB
select SWAP_IO_SPACE
@@ -429,6 +431,7 @@ config MACH_LOONGSON32
config MACH_LOONGSON64
bool "Loongson-2/3 family of machines"
+ select ARCH_HAS_PHYS_TO_DMA
select SYS_SUPPORTS_ZBOOT
help
This enables the support of Loongson-2/3 family of machines.
@@ -516,6 +519,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
+ select SYS_SUPPORTS_VPE_LOADER
select SYS_SUPPORTS_ZBOOT
select SYS_SUPPORTS_RELOCATABLE
select USE_OF
@@ -877,6 +881,7 @@ config MIKROTIK_RB532
config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_PHYS_ADDR_T_64BIT
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
@@ -2281,9 +2286,16 @@ config MIPSR2_TO_R6_EMULATOR
The only reason this is a build-time option is to save ~14K from the
final kernel image.
+config SYS_SUPPORTS_VPE_LOADER
+ bool
+ depends on SYS_SUPPORTS_MULTITHREADING
+ help
+ Indicates that the platform supports the VPE loader, and provides
+ physical_memsize.
+
config MIPS_VPE_LOADER
bool "VPE loader support."
- depends on SYS_SUPPORTS_MULTITHREADING && MODULES
+ depends on SYS_SUPPORTS_VPE_LOADER && MODULES
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 464af5e025d6..0749c3724543 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
If unsure, say N.
-menuconfig MIPS_CPS_NS16550
+menuconfig MIPS_CPS_NS16550_BOOL
bool "CPS SMP NS16550 UART output"
depends on MIPS_CPS
help
Output debug information via an ns16550 compatible UART if exceptions
occur early in the boot process of a secondary core.
-if MIPS_CPS_NS16550
+if MIPS_CPS_NS16550_BOOL
+
+config MIPS_CPS_NS16550
+ def_bool MIPS_CPS_NS16550_BASE != 0
config MIPS_CPS_NS16550_BASE
hex "UART Base Address"
default 0x1b0003f8 if MIPS_MALTA
+ default 0
help
The base address of the ns16550 compatible UART on which to output
debug information from the early stages of core startup.
+ This is only used if non-zero.
+
config MIPS_CPS_NS16550_SHIFT
int "UART Register Shift"
- default 0 if MIPS_MALTA
+ default 0
help
The number of bits to shift ns16550 register indices by in order to
form their addresses. That is, log base 2 of the span between
adjacent ns16550 registers in the system.
-endif # MIPS_CPS_NS16550
+endif # MIPS_CPS_NS16550_BOOL
endmenu
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 4674f1efbe7a..e1675c25d5d4 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
- uart_port.flags = UPF_FIXED_TYPE;
+ uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
uart_port.regshift = 2;
uart_port.line = 0;
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
index e1156347da53..301a9028273c 100644
--- a/arch/mips/ath25/devices.c
+++ b/arch/mips/ath25/devices.c
@@ -73,6 +73,7 @@ const char *get_system_type(void)
void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
struct uart_port s;
memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
s.uartclk = uartclk;
early_serial_setup(&s);
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
}
int __init ath25_add_wmac(int nr, u32 base, int irq)
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index e8284771d620..07b4c65a88a4 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -265,6 +265,14 @@ int __init bcm63xx_enet_register(int unit,
dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
}
+ if (unit == 0) {
+ dpd->rx_chan = 0;
+ dpd->tx_chan = 1;
+ } else {
+ dpd->rx_chan = 2;
+ dpd->tx_chan = 3;
+ }
+
ret = platform_device_register(pdev);
if (ret)
return ret;
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 204a1670fd9b..b5eee1a57d6c 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -75,6 +75,7 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB
def_bool y
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index c64bd87f0b6e..c7bb8a407041 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -159,36 +159,13 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(24))
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+ void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
mb();
return ret;
}
-static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
@@ -228,7 +205,7 @@ EXPORT_SYMBOL(dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
- .free = octeon_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
@@ -314,7 +291,7 @@ void __init plat_swiotlb_setup(void)
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
- .free = octeon_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 92fca3c42eac..5651f4d8f45c 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -82,7 +82,6 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
index 2c829950be17..b8d48038e74f 100644
--- a/arch/mips/configs/ath25_defconfig
+++ b/arch/mips/configs/ath25_defconfig
@@ -38,7 +38,6 @@ CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_DEBUGFS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 25ed914933e5..951c4231bdb8 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -39,7 +39,6 @@ CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_DEBUGFS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
index 52192c632ae8..41190c2036e6 100644
--- a/arch/mips/configs/pic32mzda_defconfig
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -26,7 +26,6 @@ CONFIG_BINFMT_MISC=m
# CONFIG_SUSPEND is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_SCSI=y
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index 3f1333517405..3b02ff9a7c64 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -42,7 +42,6 @@ CONFIG_TCP_CONG_WESTWOOD=y
# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 99679e514042..5f71aa598b06 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -325,15 +325,6 @@ CONFIG_USB_SERIAL_EDGEPORT=m
CONFIG_USB_SERIAL_EDGEPORT_TI=m
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index c695b7b1c4ae..dbe6a4639d05 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -76,7 +76,6 @@ CONFIG_VLAN_8021Q=y
CONFIG_NET_SCHED=y
CONFIG_HAMRADIO=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 4365108bef77..fa750d501c11 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -75,7 +75,6 @@ CONFIG_VLAN_8021Q=y
CONFIG_NET_SCHED=y
CONFIG_HAMRADIO=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/mips/include/asm/asm-prototypes.h b/arch/mips/include/asm/asm-prototypes.h
index d60b57f34e92..576f1a62dea9 100644
--- a/arch/mips/include/asm/asm-prototypes.h
+++ b/arch/mips/include/asm/asm-prototypes.h
@@ -3,5 +3,5 @@
#include <asm/page.h>
#include <asm/fpu.h>
#include <asm-generic/asm-prototypes.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ftrace.h>
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 49691331ada4..946681db8dc3 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -126,79 +126,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-/* Can't use the generic version because si_code and si_errno are swapped */
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_code;
- int si_errno;
-
- union {
- int _pad[128 / sizeof(int) - 3];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- short _addr_lsb; /* LSB of the reported address */
- struct {
- compat_uptr_t _lower;
- compat_uptr_t _upper;
- } _addr_bnd;
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- struct {
- compat_uptr_t _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
new file mode 100644
index 000000000000..f32f15530aba
--- /dev/null
+++ b/arch/mips/include/asm/dma-direct.h
@@ -0,0 +1 @@
+#include <asm/dma-coherence.h>
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 0d9418d264f9..886e75a383f2 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -17,16 +17,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return mips_dma_map_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size <= *dev->dma_mask;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
#define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu,
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index c0bd47444cff..da39e4d326ba 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -55,6 +55,10 @@ struct bcm63xx_enet_platform_data {
/* DMA descriptor shift */
unsigned int dma_desc_shift;
+
+ /* dma channel ids */
+ int rx_chan;
+ int tx_chan;
};
/*
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 9110988b92a1..138edf6b5b48 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -61,6 +61,14 @@ static inline void plat_post_dma_flush(struct device *dev)
{
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 61addb1677e9..8ad7a40ca786 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -70,16 +70,4 @@ static inline void plat_post_dma_flush(struct device *dev)
}
#endif
-#ifdef CONFIG_SWIOTLB
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-#endif
-
#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
index 1602a9e9e8c2..b1b575f5c6c1 100644
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
@@ -17,6 +17,14 @@
struct device;
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index a6e6cbebe046..57616649b4f3 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -87,9 +87,6 @@ unsigned int nlm_get_cpu_frequency(void);
extern const struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
-/* SWIOTLB */
-extern const struct dma_map_ops nlm_swiotlb_dma_ops;
-
extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 1a508a74d48d..129e0328367f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -534,6 +534,9 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
#define has_transparent_hugepage has_transparent_hugepage
extern int has_transparent_hugepage(void);
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 5e8927f99a76..4993db40482c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -49,9 +49,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$28");
diff --git a/arch/mips/include/uapi/asm/poll.h b/arch/mips/include/uapi/asm/poll.h
index ad289d7b7434..3173f8917128 100644
--- a/arch/mips/include/uapi/asm/poll.h
+++ b/arch/mips/include/uapi/asm/poll.h
@@ -2,8 +2,25 @@
#ifndef __ASM_POLL_H
#define __ASM_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 0x0100
+#define POLLWRBAND (__force __poll_t)0x0100
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index f17d8163dec6..262504bd59a5 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -14,8 +14,6 @@
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
-#define HAVE_ARCH_SIGINFO_T
-
/*
* Careful to keep union _sifields from shifting ...
*/
@@ -27,92 +25,10 @@
#error _MIPS_SZLONG neither 32 nor 64
#endif
-#define __ARCH_SIGSYS
+#define __ARCH_HAS_SWAPPED_SIGINFO
#include <asm-generic/siginfo.h>
-/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
-typedef struct siginfo {
- int si_signo;
- int si_code;
- int si_errno;
- int __pad0[SI_MAX_SIZE / sizeof(int) - SI_PAD_SIZE - 3];
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- __kernel_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
- sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- int _status; /* exit code */
- __kernel_clock_t _utime;
- __kernel_clock_t _stime;
- } _sigchld;
-
- /* IRIX SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __kernel_clock_t _utime;
- int _status; /* exit code */
- __kernel_clock_t _stime;
- } _irix_sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- short _addr_lsb;
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL, SIGXFSZ (To do ...) */
- struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- void __user *_call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} siginfo_t;
-
/*
* si_code values
* Again these have been chosen to be IRIX compatible.
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index c7ed26029cbb..e68e6e04063a 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
has_mt t0, 3f
.set push
+ .set MIPS_ISA_LEVEL_RAW
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
#elif defined(CONFIG_MIPS_MT)
.set push
+ .set MIPS_ISA_LEVEL_RAW
.set mt
/* If the core doesn't support MT then return */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index dd5567b1e305..8f5bd04f320a 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
*this_cpu_ptr(&cm_core_lock_flags));
} else {
WARN_ON(cluster != 0);
- WARN_ON(vp != 0);
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/*
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 45d0b6b037ee..57028d49c202 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
struct task_struct *t;
int max_users;
+ /* If nothing to change, return right away, successfully. */
+ if (value == mips_get_process_fp_mode(task))
+ return 0;
+
+ /* Only accept a mode change if 64-bit FP enabled for o32. */
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return -EOPNOTSUPP;
+
+ /* And only for o32 tasks. */
+ if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+ return -EOPNOTSUPP;
+
/* Check the value is valid */
if (value & ~known_bits)
return -EOPNOTSUPP;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index efbd8df8b665..0b23b1ad99e6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
#endif /* CONFIG_64BIT */
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
+ * correspond 1:1 to buffer slots. Only general registers are copied.
+ */
+static int fpr_get_fpa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ void **kbuf, void __user **ubuf)
+{
+ return user_regset_copyout(pos, count, kbuf, ubuf,
+ &target->thread.fpu,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots. Only general
+ * registers are copied.
+ */
+static int fpr_get_msa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ void **kbuf, void __user **ubuf)
+{
+ unsigned int i;
+ u64 fpr_val;
+ int err;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ err = user_regset_copyout(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ */
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- unsigned i;
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
int err;
- u64 fpr_val;
- /* XXX fcr31 */
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
- if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu,
- 0, sizeof(elf_fpregset_t));
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcr31,
+ fcr31_pos, fcr31_pos + sizeof(u32));
- for (i = 0; i < NUM_FPU_REGS; i++) {
- fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
- err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fpr_val, i * sizeof(elf_fpreg_t),
- (i + 1) * sizeof(elf_fpreg_t));
+ return err;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
+ * context's general register slots. Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots. Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ unsigned int i;
+ u64 fpr_val;
+ int err;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
if (err)
return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
}
return 0;
}
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- unsigned i;
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ u32 fcr31;
int err;
- u64 fpr_val;
- /* XXX fcr31 */
+ BUG_ON(count % sizeof(elf_fpreg_t));
+
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
init_fp_ctx(target);
- if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu,
- 0, sizeof(elf_fpregset_t));
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
- BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
- for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
+ if (count > 0) {
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpr_val, i * sizeof(elf_fpreg_t),
- (i + 1) * sizeof(elf_fpreg_t));
+ &fcr31,
+ fcr31_pos, fcr31_pos + sizeof(u32));
if (err)
return err;
- set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+
+ ptrace_setfcr31(target, fcr31);
}
- return 0;
+ return err;
}
enum mips_regset {
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b80dd8b17a76..bbb0f4770c0d 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -336,10 +336,10 @@ static int file_release(struct inode *inode, struct file *filp)
return rtlx_release(iminor(inode));
}
-static unsigned int file_poll(struct file *file, poll_table *wait)
+static __poll_t file_poll(struct file *file, poll_table *wait)
{
int minor = iminor(file_inode(file));
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index cf5c7c05e5a3..c4db910a8794 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -76,70 +76,3 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
return ret;
}
-
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_SYS:
- err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr,
- sizeof(compat_uptr_t));
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE32))
- return -EFAULT;
-
- return 0;
-}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5d19ed07e99d..0ae4a731cc12 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -699,11 +699,12 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
- siginfo_t info = {
- .si_signo = SIGFPE,
- .si_code = FPE_INTOVF,
- .si_addr = (void __user *)regs->cp0_epc,
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGFPE;
+ info.si_code = FPE_INTOVF;
+ info.si_addr = (void __user *)regs->cp0_epc;
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
@@ -721,7 +722,11 @@ asmlinkage void do_ov(struct pt_regs *regs)
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
struct task_struct *tsk)
{
- struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+ struct siginfo si;
+
+ clear_siginfo(&si);
+ si.si_addr = fault_addr;
+ si.si_signo = SIGFPE;
if (fcr31 & FPU_CSR_INV_X)
si.si_code = FPE_FLTINV;
@@ -739,9 +744,10 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
- struct siginfo si = { 0 };
+ struct siginfo si;
struct vm_area_struct *vma;
+ clear_siginfo(&si);
switch (sig) {
case 0:
return 0;
@@ -890,9 +896,10 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
- siginfo_t info = { 0 };
+ siginfo_t info;
char b[40];
+ clear_siginfo(&info);
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
@@ -1499,9 +1506,13 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
- siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
+ siginfo_t info;
enum ctx_state prev_state;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_HWBKPT;
+
prev_state = exception_enter();
/*
* Clear WP (bit 22) bit of cause register so we don't loop
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 78c2affeabf8..e84e12655fa8 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
# libgcc-style stuff needed in the kernel
-obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o
+obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
+ ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 28002ed90c2c..199a7f96282f 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
struct DWstruct {
int high, low;
};
+
+struct TWstruct {
+ long long high, low;
+};
#elif defined(__LITTLE_ENDIAN)
struct DWstruct {
int low, high;
};
+
+struct TWstruct {
+ long long low, high;
+};
#else
#error I feel sick.
#endif
@@ -23,4 +31,13 @@ typedef union {
long long ll;
} DWunion;
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
+typedef int ti_type __attribute__((mode(TI)));
+
+typedef union {
+ struct TWstruct s;
+ ti_type ti;
+} TWunion;
+#endif
+
#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+
+#include "libgcc.h"
+
+/*
+ * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
+ * specific case only we'll implement it here.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+
+/* multiply 64-bit values, low 64-bits returned */
+static inline long long notrace dmulu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
+static inline long long notrace dmuhu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 128-bit values, low 128-bits returned */
+ti_type notrace __multi3(ti_type a, ti_type b)
+{
+ TWunion res, aa, bb;
+
+ aa.ti = a;
+ bb.ti = b;
+
+ /*
+ * a * b = (a.lo * b.lo)
+ * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
+ * [+ 2^128 * (a.hi * b.hi)]
+ */
+ res.s.low = dmulu(aa.s.low, bb.s.low);
+ res.s.high = dmuhu(aa.s.low, bb.s.low);
+ res.s.high += dmulu(aa.s.high, bb.s.low);
+ res.s.high += dmulu(aa.s.low, bb.s.high);
+
+ return res.ti;
+}
+EXPORT_SYMBOL(__multi3);
+
+#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index 0d249fc3cfe9..6f109bb54cdb 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -136,6 +136,7 @@ config SWIOTLB
bool "Soft IOMMU Support for All-Memory DMA"
default y
depends on CPU_LOONGSON3
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
select NEED_DMA_MAP_STATE
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index ef07740cee61..7bbcf89475f3 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -13,32 +13,12 @@
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- void *ret;
+ void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) ||
- (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(32)))
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(40))
- gfp |= __GFP_DMA32;
-
- gfp |= __GFP_NORETRY;
-
- ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
mb();
return ret;
}
-static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -109,7 +89,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent,
- .free = loongson_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = loongson_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = loongson_dma_map_sg,
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e3e94d05f0fd..237532e89919 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -93,9 +93,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
#ifdef CONFIG_ISA
if (dev == NULL)
dma_flag = __GFP_DMA;
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index cdb5a191b9d5..9bb6baa45da3 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -40,7 +40,7 @@
#include "uasm.c"
-static const struct insn const insn_table_MM[insn_invalid] = {
+static const struct insn insn_table_MM[insn_invalid] = {
[insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
[insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 44b925005dd3..4d8cb9bb8365 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1207,8 +1207,6 @@ jmp_cmp:
return 0;
}
-int bpf_jit_enable __read_mostly;
-
void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 962b0259b4b6..3e2798bfea4f 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -177,8 +177,6 @@ static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
(ctx->idx * 4) - 4;
}
-int bpf_jit_enable __read_mostly;
-
enum which_ebpf_reg {
src_reg,
src_reg_no_fp,
@@ -743,16 +741,11 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (dst < 0)
return dst;
- if (insn->imm == 0) { /* Div by zero */
- b_off = b_imm(exit_idx, ctx);
- if (is_bad_offset(b_off))
- return -E2BIG;
- emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
- emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
- }
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
/* sign extend */
@@ -772,19 +765,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (dst < 0)
return dst;
- if (insn->imm == 0) { /* Div by zero */
- b_off = b_imm(exit_idx, ctx);
- if (is_bad_offset(b_off))
- return -E2BIG;
- emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
- emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
- }
if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
-
if (insn->imm == 1) {
/* div by 1 is a nop, mod by 1 is zero */
if (bpf_op == BPF_MOD)
@@ -862,11 +849,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_DIV:
case BPF_MOD:
- b_off = b_imm(exit_idx, ctx);
- if (is_bad_offset(b_off))
- return -E2BIG;
- emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
- emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, ddivu, dst, src);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -945,11 +927,6 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_DIV:
case BPF_MOD:
- b_off = b_imm(exit_idx, ctx);
- if (is_bad_offset(b_off))
- return -E2BIG;
- emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
- emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
emit_instr(ctx, divu, dst, src);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -1869,7 +1846,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int image_size;
u8 *image_ptr;
- if (!bpf_jit_enable || !cpu_has_mips64r2)
+ if (!prog->jit_requested || !cpu_has_mips64r2)
return prog;
tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 8296b13affd2..7fcfc7fe9f14 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -89,9 +89,4 @@ config IOMMU_HELPER
config NEED_SG_DMA_LENGTH
bool
-config SWIOTLB
- def_bool y
- select NEED_SG_DMA_LENGTH
- select IOMMU_HELPER
-
endif
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
index 60d00b5d748e..89f6e3f39fed 100644
--- a/arch/mips/netlogic/common/Makefile
+++ b/arch/mips/netlogic/common/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += irq.o time.o
-obj-y += nlm-dma.o
obj-y += reset.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
deleted file mode 100644
index 0ec9d9da6d51..000000000000
--- a/arch/mips/netlogic/common/nlm-dma.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-* Copyright (C) 2003-2013 Broadcom Corporation
-* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/bootmem.h>
-#include <linux/export.h>
-#include <linux/swiotlb.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#include <asm/bootinfo.h>
-
-static char *nlm_swiotlb;
-
-static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
-#ifdef CONFIG_ZONE_DMA32
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
-#endif
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
-const struct dma_map_ops nlm_swiotlb_dma_ops = {
- .alloc = nlm_dma_alloc_coherent,
- .free = nlm_dma_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
-};
-
-void __init plat_swiotlb_setup(void)
-{
- size_t swiotlbsize;
- unsigned long swiotlb_nslabs;
-
- swiotlbsize = 1 << 20; /* 1 MB for now */
- swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
- swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
- swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
-
- nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
- swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1);
-}
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index d4469b20d176..4f46a4509f79 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
}
rt->irq = platform_get_irq(pdev, 0);
- if (!rt->irq) {
+ if (rt->irq < 0) {
dev_err(&pdev->dev, "failed to load irq\n");
- return -ENOENT;
+ return rt->irq;
}
rt->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/mips/rb532/Makefile b/arch/mips/rb532/Makefile
index efdecdb6e3ea..8186afca2234 100644
--- a/arch/mips/rb532/Makefile
+++ b/arch/mips/rb532/Makefile
@@ -2,4 +2,6 @@
# Makefile for the RB532 board specific parts of the kernel
#
-obj-y += irq.o time.o setup.o serial.o prom.o gpio.o devices.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
+
+obj-y += irq.o time.o setup.o prom.o gpio.o devices.o
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 32ea3e6731d6..354d258396ff 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
}
+#ifdef CONFIG_NET
+
static int __init setup_kmac(char *s)
{
printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
__setup("kmac=", setup_kmac);
+#endif /* CONFIG_NET */
+
arch_initcall(plat_setup_devices);
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index b1d80cee97ee..a84c3153f22a 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -44,7 +44,6 @@ CONFIG_IPV6=y
# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
# CONFIG_INET6_XFRM_MODE_BEET is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_DEBUG=y
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index f5f90bbf019d..1748a7b25bf8 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -79,8 +79,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
#define init_uregs \
((struct pt_regs *) \
((unsigned long) init_stack + THREAD_SIZE - sizeof(struct pt_regs)))
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index 81271d3af47c..b04fd1632051 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/mn10300/include/uapi/asm/poll.h b/arch/mn10300/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/mn10300/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index d7ef1232a82a..4994b570dfd9 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -550,7 +550,7 @@ try_again:
return;
}
- smp_read_barrier_depends();
+ /* READ_ONCE() enforces dependency, but dangerous through integer!!! */
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_mb();
@@ -1728,7 +1728,10 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
- smp_read_barrier_depends();
+ /*
+ * READ_ONCE() enforces dependency, but dangerous
+ * through integer!!!
+ */
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_mb();
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 86108d2496b3..e3910d4db102 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -37,9 +37,6 @@ static void *mn10300_dma_alloc(struct device *dev, size_t size,
goto done;
}
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index b39a388825ae..8ace89617c1c 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -437,7 +437,7 @@ transfer_failed:
info.si_signo = SIGSEGV;
info.si_errno = 0;
- info.si_code = 0;
+ info.si_code = SEGV_MAPERR;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGSEGV, &info, current);
return;
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
index d69c338bd19c..7349a4fa635b 100644
--- a/arch/nios2/include/asm/thread_info.h
+++ b/arch/nios2/include/asm/thread_info.h
@@ -63,9 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 7040c1adbb5e..4be815519dd4 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -63,9 +63,6 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
/* optimized page clearing */
gfp |= __GFP_ZERO;
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 396d8f306c21..af31a9fe736a 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -84,8 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-#define init_stack (init_thread_union.stack)
-
#define cpu_relax() barrier()
#endif /* __ASSEMBLY__ */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index c229aa6bb502..5c15dfa2fd4f 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -79,8 +79,6 @@ struct thread_info {
.ksp = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("r10");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 4085d72fa5ae..9e38dc66c9e4 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -266,12 +266,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
siginfo_t info;
if (user_mode(regs)) {
- /* Send a SIGSEGV */
- info.si_signo = SIGSEGV;
+ /* Send a SIGBUS */
+ info.si_signo = SIGBUS;
info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, current);
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
} else {
printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
show_registers(regs);
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 00ddb7804be4..953bdcd54efe 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
#include <asm/page.h>
#include <asm/cache.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef __OR1K__
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index acf8aa07cbe0..c22db5323244 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -130,70 +130,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(unsigned int) - sizeof(int)];
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- compat_uptr_t _call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index dd5a08aaa4da..3eb4bfc1fb36 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -12,6 +12,7 @@
for the semaphore. */
#define __PA_LDCW_ALIGNMENT 16
+#define __PA_LDCW_ALIGN_ORDER 4
#define __ldcw_align(a) ({ \
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
@@ -29,6 +30,7 @@
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
+#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#define __LDCW "ldcw,co"
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 598c8d60fa5e..285757544cca 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -25,9 +25,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
index 4a1062e05aaf..be40331f757d 100644
--- a/arch/parisc/include/uapi/asm/siginfo.h
+++ b/arch/parisc/include/uapi/asm/siginfo.h
@@ -8,4 +8,11 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index d8f77358e2ba..29b99b8964aa 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
static int count;
print_pa_hwpath(dev, hw_path);
- printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+ printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f3cecf5117cf..e95207c0565e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,6 +35,7 @@
#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/unistd.h>
+#include <asm/ldcw.h>
#include <asm/thread_info.h>
#include <linux/linkage.h>
@@ -46,6 +47,14 @@
#endif
.import pa_tlb_lock,data
+ .macro load_pa_tlb_lock reg
+#if __PA_LDCW_ALIGNMENT > 4
+ load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
+ depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
+#else
+ load32 PA(pa_tlb_lock), \reg
+#endif
+ .endm
/* space_to_prot macro creates a prot id from a space id */
@@ -457,7 +466,7 @@
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
cmpib,COND(=),n 0,\spc,2f
- load32 PA(pa_tlb_lock),\tmp
+ load_pa_tlb_lock \tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
@@ -480,7 +489,7 @@
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
- load32 PA(pa_tlb_lock),\tmp
+ load_pa_tlb_lock \tmp
tlb_unlock0 \spc,\tmp
#endif
.endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187f8951..2d40c4ff3f69 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -36,6 +36,7 @@
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
+#include <asm/ldcw.h>
#include <linux/linkage.h>
.text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_lock la,flags,tmp
#ifdef CONFIG_SMP
- ldil L%pa_tlb_lock,%r1
- ldo R%pa_tlb_lock(%r1),\la
+#if __PA_LDCW_ALIGNMENT > 4
+ load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
+ depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
+#else
+ load32 pa_tlb_lock, \la
+#endif
rsm PSW_SM_I,\flags
1: LDCW 0(\la),\tmp
cmpib,<>,n 0,\tmp,3f
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index c0dfd892f70c..91bc0cac03a1 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -75,11 +75,6 @@ void dump_resmap(void)
static inline void dump_resmap(void) {;}
#endif
-static int pa11_dma_supported( struct device *dev, u64 mask)
-{
- return 1;
-}
-
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
@@ -579,7 +574,6 @@ static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
const struct dma_map_ops pcxl_dma_ops = {
- .dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
.free = pa11_dma_free,
.map_page = pa11_dma_map_page,
@@ -616,7 +610,6 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
}
const struct dma_map_ops pcx_dma_ops = {
- .dma_supported = pa11_dma_supported,
.alloc = pcx_dma_alloc,
.free = pcx_dma_free,
.map_page = pa11_dma_map_page,
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index e07eb34c8750..36434d4da381 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -325,7 +325,7 @@ static int pdt_mainloop(void *unused)
#ifdef CONFIG_MEMORY_FAILURE
if ((pde & PDT_ADDR_PERM_ERR) ||
((pde & PDT_ADDR_SINGLE_ERR) == 0))
- memory_failure(pde >> PAGE_SHIFT, 0, 0);
+ memory_failure(pde >> PAGE_SHIFT, 0);
else
soft_offline_page(
pfn_to_page(pde >> PAGE_SHIFT), 0);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 30f92391a93e..cad3e8661cd6 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -39,6 +39,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
@@ -184,6 +185,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
}
/*
+ * Idle thread support
+ *
+ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
+ * QEMU idle the host too.
+ */
+
+int running_on_qemu __read_mostly;
+
+void __cpuidle arch_cpu_idle_dead(void)
+{
+ /* nop on real hardware, qemu will offline CPU. */
+ asm volatile("or %%r31,%%r31,%%r31\n":::);
+}
+
+void __cpuidle arch_cpu_idle(void)
+{
+ local_irq_enable();
+
+ /* nop on real hardware, qemu will idle sleep. */
+ asm volatile("or %%r10,%%r10,%%r10\n":::);
+}
+
+static int __init parisc_idle_init(void)
+{
+ const char *marker;
+
+ /* check QEMU/SeaBIOS marker in PAGE0 */
+ marker = (char *) &PAGE0->pad0;
+ running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+
+ if (!running_on_qemu)
+ cpu_idle_poll_ctrl(1);
+
+ return 0;
+}
+arch_initcall(parisc_idle_init);
+
+/*
* Copy architecture-specific thread state
*/
int
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 41afa9cd1f55..e8ef3eb69449 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -260,109 +260,3 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
return err;
}
-
-int
-copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
-{
- compat_uptr_t addr;
- int err;
-
- if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (to->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(to->si_signo, to->si_code)) {
- case SIL_CHLD:
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- default:
- case SIL_KILL:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case SIL_FAULT:
- err |= __get_user(addr, &from->si_addr);
- to->si_addr = compat_ptr(addr);
- break;
- case SIL_POLL:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- case SIL_RT:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_int, &from->si_int);
- break;
- }
- }
- return err;
-}
-
-int
-copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
-{
- compat_uptr_t addr;
- compat_int_t val;
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- addr = ptr_to_compat(from->si_addr);
- err |= __put_user(addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- val = (compat_int_t)from->si_int;
- err |= __put_user(val, &to->si_int);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- val = (compat_int_t)from->si_int;
- err |= __put_user(val, &to->si_int);
- break;
- case SIL_SYS:
- err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- }
- return err;
-}
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index 719e7417732c..a271dc0976ce 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -34,9 +34,6 @@ struct compat_ucontext {
/* ELF32 signal handling */
-int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
-int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
-
/* In a deft move of uber-hackery, we decide to carry the top half of all
* 64-bit registers in a non-portable, non-ABI, hidden structure.
* Userspace can read the hidden structure if it *wants* but is never
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 8453724b8009..c919e6c0a687 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -629,7 +629,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
the insn pointed to by si_addr */
- si.si_code = 0;
+ si.si_code = FPE_FIXME;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 13f7854e0d49..48f41399fc0b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -631,11 +631,11 @@ void __init mem_init(void)
mem_init_print_info(NULL);
#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
printk("virtual kernel memory layout:\n"
- " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
- " memory : 0x%p - 0x%p (%4ld MB)\n"
- " .init : 0x%p - 0x%p (%4ld kB)\n"
- " .data : 0x%p - 0x%p (%4ld kB)\n"
- " .text : 0x%p - 0x%p (%4ld kB)\n",
+ " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
+ " memory : 0x%px - 0x%px (%4ld MB)\n"
+ " .init : 0x%px - 0x%px (%4ld kB)\n"
+ " .data : 0x%px - 0x%px (%4ld kB)\n"
+ " .text : 0x%px - 0x%px (%4ld kB)\n",
(void*)VMALLOC_START, (void*)VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c51e6ce42e7a..73fcf592ee91 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,6 +139,7 @@ config PPC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
@@ -150,7 +151,6 @@ config PPC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
@@ -166,6 +166,7 @@ config PPC
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index af12ead88c5f..1b4aafc1f6a2 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -137,12 +137,14 @@
compatible = "fsl,p1010-flexcan";
reg = <0x1c000 0x1000>;
interrupts = <48 0x2 0 0>;
+ big-endian;
};
can1: can@1d000 {
compatible = "fsl,p1010-flexcan";
reg = <0x1d000 0x1000>;
interrupts = <61 0x2 0 0>;
+ big-endian;
};
L2: l2-cache-controller@20000 {
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index b5c866073efd..6c02f53271cd 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -28,7 +28,6 @@ CONFIG_NETFILTER=y
CONFIG_VLAN_8021Q=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index f1552af9eecc..4bb832a41d55 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -272,18 +272,6 @@ CONFIG_USB_SERIAL_EDGEPORT=m
CONFIG_USB_SERIAL_EDGEPORT_TI=m
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index cc49c95494da..e0567dc41968 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -71,7 +71,6 @@ CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_SCTP=m
CONFIG_IPV6=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_ISO9660_FS=m
CONFIG_JFFS2_FS_DEBUG=1
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 063817fee61c..67c39f4acede 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -189,18 +189,6 @@ CONFIG_USB_SERIAL_GARMIN=m
CONFIG_USB_SERIAL_IPW=m
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 078cdb427fc9..59e47ec85336 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -82,18 +82,6 @@ CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_GARMIN=m
CONFIG_USB_SERIAL_IPW=m
CONFIG_USB_SERIAL_KEYSPAN=y
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_TI=m
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 10be5773ad5d..c2b1c4404683 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -39,7 +39,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 1aab9a62a681..62948d198d7f 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -264,18 +264,6 @@ CONFIG_USB_SERIAL_VISOR=m
CONFIG_USB_SERIAL_IPAQ=m
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_APPLEDISPLAY=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 4891bbed6258..73dab7a37386 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -4,7 +4,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6ddca80c52c3..5033e630afea 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,7 +1,6 @@
CONFIG_PPC64=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index da0e8d535eb8..7ee736f20774 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -347,7 +347,6 @@ CONFIG_MAC80211_DEBUGFS=y
CONFIG_NET_9P=m
CONFIG_NET_9P_VIRTIO=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_PARPORT=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2efa025bf483..187e2f7c12c8 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -64,7 +64,6 @@ CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65535
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bde2cd1005a2..0dd5cf7b566d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -3,7 +3,6 @@ CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 9c7400a19e9d..0b0f78823a1b 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -43,7 +43,6 @@ CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index f058e0c3e4d4..fd1d6c83f0c0 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -141,6 +141,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-vpmsum",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 197ced1eaaa0..2d9df40446f6 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -101,8 +101,6 @@ extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 8d40cf03cb67..cb46d1034f33 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -203,8 +203,6 @@ extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 44697817ccc6..6ca1208cedcb 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1137,17 +1137,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
}
#define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
-static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- if (radix_enabled())
- return radix__pmdp_huge_split_prepare(vma, address, pmdp);
- return hash__pmdp_huge_split_prepare(vma, address, pmdp);
-}
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 19c44e1495ae..365010f66570 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -269,12 +269,6 @@ static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
-static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- /* Nothing to do for radix. */
- return;
-}
extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long clr,
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 8a2aecfe9b02..62168e1158f1 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -119,71 +119,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- unsigned int _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 14e71ff6579e..fc97404de0a3 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -49,7 +49,7 @@ void set_breakpoint(struct arch_hw_breakpoint *brk);
void __set_breakpoint(struct arch_hw_breakpoint *brk);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
- unsigned long error_code, int signal_code, int brkpt);
+ unsigned long error_code, int brkpt);
#else
extern void do_break(struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
new file mode 100644
index 000000000000..a5b59c765426
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_POWERPC_DMA_DIRECT_H
+#define ASM_POWERPC_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+#ifdef CONFIG_SWIOTLB
+ struct dev_archdata *sd = &dev->archdata;
+
+ if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
+ return false;
+#endif
+
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + get_dma_offset(dev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - get_dma_offset(dev);
+}
+#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 5a6cbe11db6f..8fa394520af6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -19,13 +19,13 @@
#include <asm/swiotlb.h>
/* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs);
-extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs);
-extern int dma_direct_mmap_coherent(struct device *dev,
+extern int dma_nommu_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
size_t size, unsigned long attrs);
@@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev)
#ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops;
#endif
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
@@ -107,39 +107,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off;
}
-/* this will be removed soon */
-#define flush_write_buffers()
-
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
-#ifdef CONFIG_SWIOTLB
- struct dev_archdata *sd = &dev->archdata;
-
- if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
- return false;
-#endif
-
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr + get_dma_offset(dev);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr - get_dma_offset(dev);
-}
-
#define ARCH_HAS_DMA_MMAP_COHERENT
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452d67b6..555e22d5e07f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
ori r3,r3,vector_offset@l; \
mtspr SPRN_IVOR##vector_number,r3;
+#define RFI_TO_KERNEL \
+ rfi
+
+#define RFI_TO_USER \
+ rfi
+
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b27205297e1d..7197b179c1b1 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,59 @@
*/
#define EX_R3 EX_DAR
+/*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+ * The nop instructions allow us to insert one or more instructions to flush the
+ * L1-D cache when returning to userspace or a guest.
+ */
+#define RFI_FLUSH_SLOT \
+ RFI_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop
+
+#define RFI_TO_KERNEL \
+ rfid
+
+#define RFI_TO_USER \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define RFI_TO_USER_OR_KERNEL \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define RFI_TO_GUEST \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+#define HRFI_TO_KERNEL \
+ hrfid
+
+#define HRFI_TO_USER \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_USER_OR_KERNEL \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_GUEST \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+#define HRFI_TO_UNKNOWN \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
#ifdef CONFIG_RELOCATABLE
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR0,r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
mtspr SPRN_##h##SRR1,r10; \
- h##rfid; \
+ h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR0,r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
mtspr SPRN_##h##SRR1,r10; \
- h##rfid; \
+ h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 8f88f771cc55..1e82eb3caabd 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,7 +187,20 @@ label##3: \
FTR_ENTRY_OFFSET label##1b-label##3b; \
.popsection;
+#define RFI_FLUSH_FIXUP_SECTION \
+951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+ .align 2; \
+952: \
+ FTR_ENTRY_OFFSET 951b-952b; \
+ .popsection;
+
+
#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
void apply_feature_fixups(void);
void setup_feature_keys(void);
#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a409177be8bd..eca3f9c68907 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -241,6 +241,7 @@
#define H_GET_HCA_INFO 0x1B8
#define H_GET_PERF_COUNT 0x1BC
#define H_MANAGE_TRACE 0x1C0
+#define H_GET_CPU_CHARACTERISTICS 0x1C8
#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
#define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8
@@ -330,6 +331,17 @@
#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
/* >= 0 values are CPU number */
+/* H_GET_CPU_CHARACTERISTICS return values */
+#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
+#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
+#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
+
+#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
+#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
#define PROC_TABLE_DEREG 0x10
@@ -341,6 +353,7 @@
#define PROC_TABLE_GTSE 0x01
#ifndef __ASSEMBLY__
+#include <linux/types.h>
/**
* plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
@@ -436,6 +449,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
}
}
+struct h_cpu_char_result {
+ u64 character;
+ u64 behaviour;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3892db93b837..23ac7fc0af23 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -232,6 +232,16 @@ struct paca_struct {
struct sibling_subcore_state *sibling_subcore_state;
#endif
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * rfi fallback flush must be in its own cacheline to prevent
+ * other paca data leaking into the L1d
+ */
+ u64 exrfi[EX_SIZE] __aligned(0x80);
+ void *rfi_flush_fallback_area;
+ u64 l1d_flush_congruence;
+ u64 l1d_flush_sets;
+#endif
};
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 7f01b22fa6cb..55eddf50d149 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
}
+static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
+ if (rc == H_SUCCESS) {
+ p->character = retbuf[0];
+ p->behaviour = retbuf[1];
+ }
+
+ return rc;
+}
+
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index cf00ec26303a..469b7fdc9be4 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
static inline void pseries_little_endian_exceptions(void) {}
#endif /* CONFIG_PPC_PSERIES */
+void rfi_flush_enable(bool enable);
+
+/* These are bit flags */
+enum l1d_flush_type {
+ L1D_FLUSH_NONE = 0x1,
+ L1D_FLUSH_FALLBACK = 0x2,
+ L1D_FLUSH_ORI = 0x4,
+ L1D_FLUSH_MTTRIG = 0x8,
+};
+
+void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
+void do_rfi_flush_fixups(enum l1d_flush_type types);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 01d45a5fd00b..f65ecf57b66c 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -13,9 +13,7 @@
#include <linux/swiotlb.h>
-extern const struct dma_map_ops swiotlb_dma_ops;
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
+extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
extern unsigned int ppc_swiotlb_enable;
int __init swiotlb_setup_bus_notifier(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index a264c3ad366b..4a12c00f8de3 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -58,9 +58,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 61d6049f4c1e..637b7263cb86 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
__u32 ap_encodings[8];
};
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+ __u64 character; /* characteristics of the CPU */
+ __u64 behaviour; /* recommended software behaviour */
+ __u64 character_mask; /* valid bits in character */
+ __u64 behaviour_mask; /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+
/* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h
index 1a691141e49f..9f142451a01f 100644
--- a/arch/powerpc/include/uapi/asm/siginfo.h
+++ b/arch/powerpc/include/uapi/asm/siginfo.h
@@ -15,7 +15,19 @@
#include <asm-generic/siginfo.h>
-#undef NSIGTRAP
-#define NSIGTRAP 4
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGTRAP si_codes
+ */
+#ifdef __KERNEL__
+#define TRAP_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_POWERPC_SIGINFO_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6b958414b4e0..f390d57cf2e1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -237,6 +237,11 @@ int main(void)
OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
OFFSET(PACA_IN_MCE, paca_struct, in_mce);
OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
+ OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
+ OFFSET(PACA_EXRFI, paca_struct, exrfi);
+ OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
+ OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
+
#endif
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 66f33e7f8d40..f9fe2080ceb9 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index d0ea7860e02b..88f3963ca30f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -46,10 +46,10 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* map_page, and unmap_page on highmem, use normal dma_ops
* for everything else.
*/
-const struct dma_map_ops swiotlb_dma_ops = {
- .alloc = __dma_direct_alloc_coherent,
- .free = __dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+const struct dma_map_ops powerpc_swiotlb_dma_ops = {
+ .alloc = __dma_nommu_alloc_coherent,
+ .free = __dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
@@ -89,7 +89,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
/* May need to bounce if the device can't address all of DRAM */
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
- set_dma_ops(dev, &swiotlb_dma_ops);
+ set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
return NOTIFY_DONE;
}
@@ -121,7 +121,7 @@ static int __init check_swiotlb_enabled(void)
if (ppc_swiotlb_enable)
swiotlb_print_info();
else
- swiotlb_free();
+ swiotlb_exit();
return 0;
}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 4194bbbbdb10..da20569de9d4 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -33,14 +33,14 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB
- if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
+ if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif
return pfn;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
+static int dma_nommu_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
@@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif
}
-void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -105,9 +105,6 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
};
#endif /* CONFIG_FSL_SOC */
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)
return NULL;
@@ -119,7 +116,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
#endif
}
-void __dma_direct_free_coherent(struct device *dev, size_t size,
+void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
@@ -130,7 +127,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -139,8 +136,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
/* The coherent mask may be smaller than the real mask, check if
* we can really use the direct ops
*/
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_alloc_coherent(dev, size, dma_handle,
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_alloc_coherent(dev, size, dma_handle,
flag, attrs);
/* Ok we can't ... do we have an iommu ? If not, fail */
@@ -154,15 +151,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dev_to_node(dev));
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct iommu_table *iommu;
- /* See comments in dma_direct_alloc_coherent() */
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+ /* See comments in dma_nommu_alloc_coherent() */
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
attrs);
/* Maybe we used an iommu ... */
iommu = get_iommu_table_base(dev);
@@ -175,7 +172,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(iommu, size, vaddr, dma_handle);
}
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -193,7 +190,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -213,13 +210,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
}
-static u64 dma_direct_get_required_mask(struct device *dev)
+static u64 dma_nommu_get_required_mask(struct device *dev)
{
u64 end, mask;
@@ -231,7 +228,7 @@ static u64 dma_direct_get_required_mask(struct device *dev)
return mask;
}
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -246,7 +243,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset + get_dma_offset(dev);
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -255,7 +252,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
#ifdef CONFIG_NOT_COHERENT_CACHE
-static inline void dma_direct_sync_sg(struct device *dev,
+static inline void dma_nommu_sync_sg(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -266,7 +263,7 @@ static inline void dma_direct_sync_sg(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline void dma_direct_sync_single(struct device *dev,
+static inline void dma_nommu_sync_single(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -274,24 +271,24 @@ static inline void dma_direct_sync_single(struct device *dev,
}
#endif
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .unmap_sg = dma_direct_unmap_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .get_required_mask = dma_direct_get_required_mask,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .unmap_sg = dma_nommu_unmap_sg,
+ .dma_supported = dma_nommu_dma_supported,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .get_required_mask = dma_nommu_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE
- .sync_single_for_cpu = dma_direct_sync_single,
- .sync_single_for_device = dma_direct_sync_single,
- .sync_sg_for_cpu = dma_direct_sync_sg,
- .sync_sg_for_device = dma_direct_sync_sg,
+ .sync_single_for_cpu = dma_nommu_sync_single,
+ .sync_single_for_device = dma_nommu_sync_single,
+ .sync_sg_for_cpu = dma_nommu_sync_sg,
+ .sync_sg_for_device = dma_nommu_sync_sg,
#endif
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
@@ -302,7 +299,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
* is no dma_op->set_coherent_mask() so we have to do
* things the hard way:
*/
- if (get_dma_ops(dev) != &dma_direct_ops ||
+ if (get_dma_ops(dev) != &dma_nommu_ops ||
get_iommu_table_base(dev) == NULL ||
!dma_iommu_dma_supported(dev, mask))
return -EIO;
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3320bcac7192..2748584b767d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -37,6 +37,11 @@
#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/export.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
/*
* System calls.
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
+ ld r2,GPR2(r1)
+ ld r1,GPR1(r1)
+ mtlr r4
+ mtcr r5
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r8
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+
+ /* exit to kernel */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtlr r4
mtcr r5
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
- RFI
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
.Lsyscall_error:
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
mtmsrd r10, 1
mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12
-
- rfid
+ RFI_TO_USER
b . /* prevent speculative execution */
#endif
_ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
-1:
+
mtspr SPRN_SRR1,r3
ld r2,_CCR(r1)
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r3,GPR3(r1)
ld r4,GPR4(r1)
ld r1,GPR1(r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
- rfid
+1: mtspr SPRN_SRR1,r3
+
+ ld r2,_CCR(r1)
+ mtcrf 0xFF,r2
+ ld r2,_NIP(r1)
+ mtspr SPRN_SRR0,r2
+
+ ld r0,GPR0(r1)
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r1,GPR1(r1)
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
#endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@ __enter_rtas:
mtspr SPRN_SRR0,r5
mtspr SPRN_SRR1,r6
- rfid
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
rtas_return_loc:
@@ -1098,7 +1126,7 @@ rtas_return_loc:
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
- rfid
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
_ASM_NOKPROBE_SYMBOL(__enter_rtas)
_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
andc r11,r11,r12
mtsrr1 r11
- rfid
+ RFI_TO_KERNEL
#endif /* CONFIG_PPC_BOOK3E */
1: /* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e441b469dc8f..2dc10bf646b8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
LOAD_HANDLER(r12, machine_check_handle_early)
1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11
- rfid
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
2:
/* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
li r3,MSR_ME
andc r10,r10,r3 /* Turn off MSR_ME */
mtspr SPRN_SRR1,r10
- rfid
+ RFI_TO_KERNEL
b .
2:
/*
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/
bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
- rfid
+ RFI_TO_USER_OR_KERNEL
9:
/* Deliver the machine check to host kernel in V mode. */
MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+ andi. r9,r11,MSR_PR // Check for exception from userspace
+ cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
+
/*
* Test MSR_RI before calling slb_allocate_realmode, because the
* MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* All done -- return from exception. */
+ bne cr4,1f /* returning to kernel */
+
.machine push
.machine "power4"
mtcrf 0x80,r9
+ mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
mtcrf 0x02,r9 /* I/D indication is in cr6 */
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+1:
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
+ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
+ mtcrf 0x02,r9 /* I/D indication is in cr6 */
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+ RESTORE_CTR(r9, PACA_EXSLB)
+ RESTORE_PPR_PACA(PACA_EXSLB, r9)
+ mr r3,r12
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
+
2: std r3,PACA_EXSLB+EX_DAR(r13)
mr r3,r12
mfspr r11,SPRN_SRR0
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
- rfid
+ RFI_TO_KERNEL
b .
8: std r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
- rfid
+ RFI_TO_KERNEL
b .
EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
mtspr SPRN_SRR0,r10 ; \
ld r10,PACAKMSR(r13) ; \
mtspr SPRN_SRR1,r10 ; \
- rfid ; \
+ RFI_TO_KERNEL ; \
b . ; /* prevent speculative execution */
#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
xori r12,r12,MSR_LE ; \
mtspr SPRN_SRR1,r12 ; \
mr r13,r9 ; \
- rfid ; /* return to userspace */ \
+ RFI_TO_USER ; /* return to userspace */ \
b . ; /* prevent speculative execution */
#else
#define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
mtcr r11
REST_GPR(11, r1)
ld r1,GPR1(r1)
- hrfid
+ HRFI_TO_USER_OR_KERNEL
1: mtcr r11
REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
- HRFID
+ HRFI_TO_UNKNOWN
b .
#endif
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \
ld r10,PACA_EXGEN+EX_R10(r13); \
ld r11,PACA_EXGEN+EX_R11(r13); \
/* returns to kernel where r13 must be set up, so don't restore it */ \
- ##_H##rfid; \
+ ##_H##RFI_TO_KERNEL; \
b .; \
MASKED_DEC_HANDLER(_H)
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ std r12,PACA_EXRFI+EX_R12(r13)
+ std r8,PACA_EXRFI+EX_R13(r13)
+ mfctr r9
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+ ld r11,PACA_L1D_FLUSH_SETS(r13)
+ ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+ /*
+ * The load adresses are at staggered offsets within cachelines,
+ * which suits some pipelines better (on others it should not
+ * hurt).
+ */
+ addi r12,r12,8
+ mtctr r11
+ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+ /* order ld/st prior to dcbt stop all streams with flushing */
+ sync
+1: li r8,0
+ .rept 8 /* 8-way set associative */
+ ldx r11,r10,r8
+ add r8,r8,r12
+ xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
+ add r8,r8,r11 // Add 0, this creates a dependency on the ldx
+ .endr
+ addi r10,r10,128 /* 128 byte cache line */
+ bdnz 1b
+
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r12,PACA_EXRFI+EX_R12(r13)
+ ld r8,PACA_EXRFI+EX_R13(r13)
+ GET_SCRATCH0(r13);
+ rfid
+
+TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ std r12,PACA_EXRFI+EX_R12(r13)
+ std r8,PACA_EXRFI+EX_R13(r13)
+ mfctr r9
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+ ld r11,PACA_L1D_FLUSH_SETS(r13)
+ ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+ /*
+ * The load adresses are at staggered offsets within cachelines,
+ * which suits some pipelines better (on others it should not
+ * hurt).
+ */
+ addi r12,r12,8
+ mtctr r11
+ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+ /* order ld/st prior to dcbt stop all streams with flushing */
+ sync
+1: li r8,0
+ .rept 8 /* 8-way set associative */
+ ldx r11,r10,r8
+ add r8,r8,r12
+ xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
+ add r8,r8,r11 // Add 0, this creates a dependency on the ldx
+ .endr
+ addi r10,r10,128 /* 128 byte cache line */
+ bdnz 1b
+
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r12,PACA_EXRFI+EX_R12(r13)
+ ld r8,PACA_EXRFI+EX_R13(r13)
+ GET_SCRATCH0(r13);
+ hrfid
+
/*
* Real mode exceptions actually use this too, but alternate
* instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
addi r13, r13, 4
mtspr SPRN_SRR0, r13
GET_SCRATCH0(r13)
- rfid
+ RFI_TO_KERNEL
b .
TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
addi r13, r13, 4
mtspr SPRN_HSRR0, r13
GET_SCRATCH0(r13)
- hrfid
+ HRFI_TO_KERNEL
b .
#endif
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 742e4658c5dc..71e8a1b8c86e 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -273,7 +273,7 @@ static void machine_process_ue_event(struct work_struct *work)
pfn = evt->u.ue_error.physical_address >>
PAGE_SHIFT;
- memory_failure(pfn, SIGBUS, 0);
+ memory_failure(pfn, 0);
} else
pr_warn("Failed to identify bad address from "
"where the uncorrectable error (UE) "
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0ac7aa346c69..590f4d0a6cb1 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -60,7 +60,7 @@ resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base);
-static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops;
void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 72be0c32e902..4208cbe2fb7f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -601,21 +601,16 @@ EXPORT_SYMBOL(flush_all_to_thread);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
- unsigned long error_code, int signal_code, int breakpt)
+ unsigned long error_code, int breakpt)
{
- siginfo_t info;
-
- current->thread.trap_nr = signal_code;
+ current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
/* Deliver the signal to userspace */
- info.si_signo = SIGTRAP;
- info.si_errno = breakpt; /* breakpoint or watchpoint id */
- info.si_code = signal_code;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
+ (void __user *)address);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
void do_break (struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 0f0b1b2f3b60..1da8b7d8c6ca 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -388,7 +388,7 @@ out:
return error;
}
-static unsigned int rtas_log_poll(struct file *file, poll_table * wait)
+static __poll_t rtas_log_poll(struct file *file, poll_table * wait)
{
poll_wait(file, &rtas_log_wait, wait);
if (rtas_log_size)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d213542a48b..3f33869c6486 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned short maj;
unsigned short min;
- /* We only show online cpus: disable preempt (overzealous, I
- * knew) to prevent cpu going down. */
- preempt_disable();
- if (!cpu_online(cpu_id)) {
- preempt_enable();
- return 0;
- }
-
#ifdef CONFIG_SMP
pvr = per_cpu(cpu_pvr, cpu_id);
#else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "\n");
#endif
-
- preempt_enable();
-
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
show_cpuinfo_summary(m);
@@ -791,7 +780,7 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
{
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
}
static __init void print_system_info(void)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8956a9856604..e67413f4a8f0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -38,6 +38,7 @@
#include <linux/memory.h>
#include <linux/nmi.h>
+#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -801,3 +802,141 @@ static int __init disable_hardlockup_detector(void)
return 0;
}
early_initcall(disable_hardlockup_detector);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static enum l1d_flush_type enabled_flush_types;
+static void *l1d_flush_fallback_area;
+static bool no_rfi_flush;
+bool rfi_flush;
+
+static int __init handle_no_rfi_flush(char *p)
+{
+ pr_info("rfi-flush: disabled on command line.");
+ no_rfi_flush = true;
+ return 0;
+}
+early_param("no_rfi_flush", handle_no_rfi_flush);
+
+/*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+ */
+static int __init handle_no_pti(char *p)
+{
+ pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+ handle_no_rfi_flush(NULL);
+ return 0;
+}
+early_param("nopti", handle_no_pti);
+
+static void do_nothing(void *unused)
+{
+ /*
+ * We don't need to do the flush explicitly, just enter+exit kernel is
+ * sufficient, the RFI exit handlers will do the right thing.
+ */
+}
+
+void rfi_flush_enable(bool enable)
+{
+ if (rfi_flush == enable)
+ return;
+
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else
+ do_rfi_flush_fixups(L1D_FLUSH_NONE);
+
+ rfi_flush = enable;
+}
+
+static void init_fallback_flush(void)
+{
+ u64 l1d_size, limit;
+ int cpu;
+
+ l1d_size = ppc64_caches.l1d.size;
+ limit = min(safe_stack_limit(), ppc64_rma_size);
+
+ /*
+ * Align to L1d size, and size it at 2x L1d size, to catch possible
+ * hardware prefetch runoff. We don't have a recipe for load patterns to
+ * reliably avoid the prefetcher.
+ */
+ l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
+ memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * The fallback flush is currently coded for 8-way
+ * associativity. Different associativity is possible, but it
+ * will be treated as 8-way and may not evict the lines as
+ * effectively.
+ *
+ * 128 byte lines are mandatory.
+ */
+ u64 c = l1d_size / 8;
+
+ paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+ paca[cpu].l1d_flush_congruence = c;
+ paca[cpu].l1d_flush_sets = c / 128;
+ }
+}
+
+void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+ if (types & L1D_FLUSH_FALLBACK) {
+ pr_info("rfi-flush: Using fallback displacement flush\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+ pr_info("rfi-flush: Using ori type flush\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+ pr_info("rfi-flush: Using mttrig type flush\n");
+
+ enabled_flush_types = types;
+
+ if (!no_rfi_flush)
+ rfi_flush_enable(enable);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rfi_flush_set(void *data, u64 val)
+{
+ if (val == 1)
+ rfi_flush_enable(true);
+ else if (val == 0)
+ rfi_flush_enable(false);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rfi_flush_get(void *data, u64 *val)
+{
+ *val = rfi_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
+static __init int rfi_flush_debugfs_init(void)
+{
+ debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+ return 0;
+}
+device_initcall(rfi_flush_debugfs_init);
+#endif
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ if (rfi_flush)
+ return sprintf(buf, "Mitigation: RFI Flush\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 3d7539b90010..61db86ecd318 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -153,6 +153,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (thread_info_flags & _TIF_PATCH_PENDING)
+ klp_update_patch_state(current);
+
if (thread_info_flags & _TIF_SIGPENDING) {
BUG_ON(regs != current->thread.regs);
do_signal(current);
@@ -163,9 +166,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
tracehook_notify_resume(regs);
}
- if (thread_info_flags & _TIF_PATCH_PENDING)
- klp_update_patch_state(current);
-
user_enter();
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 9ffd73296f64..aded81169648 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -873,75 +873,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
#endif
#ifdef CONFIG_PPC64
-int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
-{
- int err;
-
- if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- * this code is fixed accordingly.
- * It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic
- * 3 ints plus the relevant union member.
- * This routine must convert siginfo from 64bit to 32bit as well
- * at the same time.
- */
- err = __put_user(s->si_signo, &d->si_signo);
- err |= __put_user(s->si_errno, &d->si_errno);
- err |= __put_user(s->si_code, &d->si_code);
- if (s->si_code < 0)
- err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
- SI_PAD_SIZE32);
- else switch(siginfo_layout(s->si_signo, s->si_code)) {
- case SIL_CHLD:
- err |= __put_user(s->si_pid, &d->si_pid);
- err |= __put_user(s->si_uid, &d->si_uid);
- err |= __put_user(s->si_utime, &d->si_utime);
- err |= __put_user(s->si_stime, &d->si_stime);
- err |= __put_user(s->si_status, &d->si_status);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned int)(unsigned long)s->si_addr,
- &d->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(s->si_band, &d->si_band);
- err |= __put_user(s->si_fd, &d->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(s->si_tid, &d->si_tid);
- err |= __put_user(s->si_overrun, &d->si_overrun);
- err |= __put_user(s->si_int, &d->si_int);
- break;
- case SIL_SYS:
- err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
- err |= __put_user(s->si_syscall, &d->si_syscall);
- err |= __put_user(s->si_arch, &d->si_arch);
- break;
- case SIL_RT:
- err |= __put_user(s->si_int, &d->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __put_user(s->si_pid, &d->si_pid);
- err |= __put_user(s->si_uid, &d->si_uid);
- break;
- }
- return err;
-}
#define copy_siginfo_to_user copy_siginfo_to_user32
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
-{
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE32))
- return -EFAULT;
-
- return 0;
-}
#endif /* CONFIG_PPC64 */
/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f3eb61be0d30..c93f1e6a9fff 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -917,7 +917,7 @@ void unknown_exception(struct pt_regs *regs)
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
- _exception(SIGTRAP, regs, 0, 0);
+ _exception(SIGTRAP, regs, TRAP_FIXME, 0);
exception_exit(prev_state);
}
@@ -939,7 +939,7 @@ bail:
void RunModeException(struct pt_regs *regs)
{
- _exception(SIGTRAP, regs, 0, 0);
+ _exception(SIGTRAP, regs, TRAP_FIXME, 0);
}
void single_step_exception(struct pt_regs *regs)
@@ -978,7 +978,7 @@ static void emulate_single_step(struct pt_regs *regs)
static inline int __parse_fpscr(unsigned long fpscr)
{
- int ret = 0;
+ int ret = FPE_FIXME;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
@@ -1750,34 +1750,34 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
#endif
- do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
5);
changed |= 0x01;
} else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
- do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
6);
changed |= 0x01;
} else if (debug_status & DBSR_IAC1) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
- do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1);
changed |= 0x01;
} else if (debug_status & DBSR_IAC2) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
- do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
2);
changed |= 0x01;
} else if (debug_status & DBSR_IAC3) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
- do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
3);
changed |= 0x01;
} else if (debug_status & DBSR_IAC4) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
- do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
4);
changed |= 0x01;
}
@@ -1929,7 +1929,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
int fpexc_mode;
- int code = 0;
+ int code = FPE_FIXME;
int err;
flush_spe_to_thread(current);
@@ -1998,7 +1998,7 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
printk(KERN_ERR "unrecognized spe instruction "
"in %s at %lx\n", current->comm, regs->nip);
} else {
- _exception(SIGFPE, regs, 0, regs->nip);
+ _exception(SIGFPE, regs, FPE_FIXME, regs->nip);
return;
}
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0494e1566ee2..307843d23682 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,15 @@ SECTIONS
/* Read-only data */
RO_DATA(PAGE_SIZE)
+#ifdef CONFIG_PPC64
+ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+ *(__rfi_flush_fixup)
+ __stop___rfi_flush_fixup = .;
+ }
+#endif
+
EXCEPTION_TABLE(0)
NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 29ebe2fd5867..a93d719edc90 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_read = true;
gpte->may_write = true;
gpte->page_size = MMU_PAGE_4K;
+ gpte->wimg = HPTE_R_M;
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 966097232d21..b73dbc9e797d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
u32 order;
/* These fields protected by kvm->lock */
+
+ /* Possible values and their usage:
+ * <0 an error occurred during allocation,
+ * -EBUSY allocation is in the progress,
+ * 0 allocation made successfuly.
+ */
int error;
- bool prepare_done;
- /* Private to the work thread, until prepare_done is true,
- * then protected by kvm->resize_hpt_sem */
+ /* Private to the work thread, until error != -EBUSY,
+ * then protected by kvm->lock.
+ */
struct kvm_hpt_info hpt;
};
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
* Reset all the reverse-mapping chains for all memslots
*/
kvmppc_rmap_reset(kvm);
- /* Ensure that each vcpu will flush its TLB on next entry. */
- cpumask_setall(&kvm->arch.need_tlb_flush);
err = 0;
goto out;
}
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
kvmppc_set_hpt(kvm, &info);
out:
+ if (err == 0)
+ /* Ensure that each vcpu will flush its TLB on next entry. */
+ cpumask_setall(&kvm->arch.need_tlb_flush);
+
mutex_unlock(&kvm->lock);
return err;
}
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
- BUG_ON(kvm->arch.resize_hpt != resize);
+ if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+ return;
if (!resize)
return;
- if (resize->hpt.virt)
- kvmppc_free_hpt(&resize->hpt);
+ if (resize->error != -EBUSY) {
+ if (resize->hpt.virt)
+ kvmppc_free_hpt(&resize->hpt);
+ kfree(resize);
+ }
- kvm->arch.resize_hpt = NULL;
- kfree(resize);
+ if (kvm->arch.resize_hpt == resize)
+ kvm->arch.resize_hpt = NULL;
}
static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
struct kvm_resize_hpt,
work);
struct kvm *kvm = resize->kvm;
- int err;
+ int err = 0;
- resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
- resize->order);
-
- err = resize_hpt_allocate(resize);
+ if (WARN_ON(resize->error != -EBUSY))
+ return;
mutex_lock(&kvm->lock);
+ /* Request is still current? */
+ if (kvm->arch.resize_hpt == resize) {
+ /* We may request large allocations here:
+ * do not sleep with kvm->lock held for a while.
+ */
+ mutex_unlock(&kvm->lock);
+
+ resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+ resize->order);
+
+ err = resize_hpt_allocate(resize);
+
+ /* We have strict assumption about -EBUSY
+ * when preparing for HPT resize.
+ */
+ if (WARN_ON(err == -EBUSY))
+ err = -EINPROGRESS;
+
+ mutex_lock(&kvm->lock);
+ /* It is possible that kvm->arch.resize_hpt != resize
+ * after we grab kvm->lock again.
+ */
+ }
+
resize->error = err;
- resize->prepare_done = true;
+
+ if (kvm->arch.resize_hpt != resize)
+ resize_hpt_release(kvm, resize);
mutex_unlock(&kvm->lock);
}
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
if (resize) {
if (resize->order == shift) {
- /* Suitable resize in progress */
- if (resize->prepare_done) {
- ret = resize->error;
- if (ret != 0)
- resize_hpt_release(kvm, resize);
- } else {
+ /* Suitable resize in progress? */
+ ret = resize->error;
+ if (ret == -EBUSY)
ret = 100; /* estimated time in ms */
- }
+ else if (ret)
+ resize_hpt_release(kvm, resize);
goto out;
}
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
ret = -ENOMEM;
goto out;
}
+
+ resize->error = -EBUSY;
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
if (!resize || (resize->order != shift))
goto out;
- ret = -EBUSY;
- if (!resize->prepare_done)
- goto out;
-
ret = resize->error;
- if (ret != 0)
+ if (ret)
goto out;
ret = resize_hpt_rehash(resize);
- if (ret != 0)
+ if (ret)
goto out;
resize_hpt_pivot(resize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2659844784b8..9c61f736c75b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mtmsrd r0,1 /* clear RI in MSR */
mtsrr0 r5
mtsrr1 r6
- RFI
+ RFI_TO_KERNEL
kvmppc_call_hv_entry:
BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- RFI
+ RFI_TO_KERNEL
/* Virtual-mode return */
.Lvirt_return:
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
-
- hrfid
+ HRFI_TO_GUEST
b .
secondary_too_late:
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r4, PACAKMSR(r13)
mtspr SPRN_SRR0, r3
mtspr SPRN_SRR1, r4
- rfid
+ RFI_TO_KERNEL
9: addi r3, r1, STACK_FRAME_OVERHEAD
bl kvmppc_bad_interrupt
b 9b
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d0dc8624198f..7deaeeb14b93 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
+#define HPTE_R_M _PAGE_COHERENT
#endif
static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
pte.page_size = MMU_PAGE_64K;
+ pte.wimg = HPTE_R_M;
}
switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 42a4b237df5f..34a5adeff084 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
#define FUNC(name) name
+#define RFI_TO_KERNEL RFI
+#define RFI_TO_GUEST RFI
+
.macro INTERRUPT_TRAMPOLINE intno
.global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
GET_SCRATCH0(r13)
/* And get back into the code */
- RFI
+ RFI_TO_KERNEL
#endif
/*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
ori r5, r5, MSR_EE
mtsrr0 r7
mtsrr1 r6
- RFI
+ RFI_TO_KERNEL
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 2a2b96d53999..93a180ceefad 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@ no_dcbz32_on:
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3)
- RFI
+ RFI_TO_GUEST
kvmppc_handler_trampoline_enter_end:
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
beqa BOOK3S_INTERRUPT_DOORBELL
- RFI
+ RFI_TO_KERNEL
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 0d750d274c4e..6882bc94eba8 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/percpu.h>
#include <linux/cpumask.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1915e86cef6f..0a7c88786ec0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,6 +39,10 @@
#include <asm/iommu.h>
#include <asm/switch_to.h>
#include <asm/xive.h>
+#ifdef CONFIG_PPC_PSERIES
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#endif
#include "timing.h"
#include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS:
#endif
+ case KVM_CAP_PPC_GET_CPU_CHAR:
r = 1;
break;
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
return r;
}
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * These functions check whether the underlying hardware is safe
+ * against attacks based on observing the effects of speculatively
+ * executed instructions, and whether it supplies instructions for
+ * use in workarounds. The information comes from firmware, either
+ * via the device tree on powernv platforms or from an hcall on
+ * pseries platforms.
+ */
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+ struct h_cpu_char_result c;
+ unsigned long rc;
+
+ if (!machine_is(pseries))
+ return -ENOTTY;
+
+ rc = plpar_get_cpu_characteristics(&c);
+ if (rc == H_SUCCESS) {
+ cp->character = c.character;
+ cp->behaviour = c.behaviour;
+ cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+ KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+ KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+ KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+ KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+ KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
+ KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+ cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+ KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+ }
+ return 0;
+}
+#else
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+ return -ENOTTY;
+}
+#endif
+
+static inline bool have_fw_feat(struct device_node *fw_features,
+ const char *state, const char *name)
+{
+ struct device_node *np;
+ bool r = false;
+
+ np = of_get_child_by_name(fw_features, name);
+ if (np) {
+ r = of_property_read_bool(np, state);
+ of_node_put(np);
+ }
+ return r;
+}
+
+static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+ struct device_node *np, *fw_features;
+ int r;
+
+ memset(cp, 0, sizeof(*cp));
+ r = pseries_get_cpu_char(cp);
+ if (r != -ENOTTY)
+ return r;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ if (np) {
+ fw_features = of_get_child_by_name(np, "fw-features");
+ of_node_put(np);
+ if (!fw_features)
+ return 0;
+ if (have_fw_feat(fw_features, "enabled",
+ "inst-spec-barrier-ori31,31,0"))
+ cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
+ if (have_fw_feat(fw_features, "enabled",
+ "fw-bcctrl-serialized"))
+ cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
+ if (have_fw_feat(fw_features, "enabled",
+ "inst-l1d-flush-ori30,30,0"))
+ cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
+ if (have_fw_feat(fw_features, "enabled",
+ "inst-l1d-flush-trig2"))
+ cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
+ if (have_fw_feat(fw_features, "enabled",
+ "fw-l1d-thread-split"))
+ cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
+ if (have_fw_feat(fw_features, "enabled",
+ "fw-count-cache-disabled"))
+ cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+ cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+ KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+ KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+ KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+ KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+
+ if (have_fw_feat(fw_features, "enabled",
+ "speculation-policy-favor-security"))
+ cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
+ if (!have_fw_feat(fw_features, "disabled",
+ "needs-l1d-flush-msr-pr-0-to-1"))
+ cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
+ if (!have_fw_feat(fw_features, "disabled",
+ "needs-spec-barrier-for-bound-checks"))
+ cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+ cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+ KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+
+ of_node_put(fw_features);
+ }
+
+ return 0;
+}
+#endif
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
break;
}
+ case KVM_PPC_GET_CPU_CHAR: {
+ struct kvm_ppc_cpu_char cpuchar;
+
+ r = kvmppc_get_cpu_char(&cpuchar);
+ if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
+ r = -EFAULT;
+ break;
+ }
default: {
struct kvm *kvm = filp->private_data;
r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 41cf5ae273cf..a95ea007d654 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
+#ifdef CONFIG_PPC_BOOK3S_64
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+ unsigned int instrs[3], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___rfi_flush_fixup),
+ end = PTRRELOC(&__stop___rfi_flush_fixup);
+
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+
+ if (types & L1D_FLUSH_FALLBACK)
+ /* b .+16 to fallback flush */
+ instrs[0] = 0x48000010;
+
+ i = 0;
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction(dest, instrs[0]);
+ patch_instruction(dest + 1, instrs[1]);
+ patch_instruction(dest + 2, instrs[2]);
+ }
+
+ printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 4797d08581ce..6e1e39035380 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
return __bad_area(regs, address, SEGV_MAPERR);
}
+static noinline int bad_access(struct pt_regs *regs, unsigned long address)
+{
+ return __bad_area(regs, address, SEGV_ACCERR);
+}
+
static int do_sigbus(struct pt_regs *regs, unsigned long address,
unsigned int fault)
{
@@ -490,7 +495,7 @@ retry:
good_area:
if (unlikely(access_error(is_write, is_exec, vma)))
- return bad_area(regs, address);
+ return bad_access(regs, address);
/*
* If for any reason at all we couldn't handle the fault,
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 3b65917785a5..422e80253a33 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -90,16 +90,19 @@ void serialize_against_pte_lookup(struct mm_struct *mm)
* We use this to invalidate a pmdp entry before switching from a
* hugepte to regular pmd entry.
*/
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+ unsigned long old_pmd;
+
+ old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*/
serialize_against_pte_lookup(vma->vm_mm);
+ return __pmd(old_pmd);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index ec277913e01b..469808e77e58 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -296,28 +296,6 @@ pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
return pgtable;
}
-void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
- VM_BUG_ON(pmd_devmap(*pmdp));
-
- /*
- * We can't mark the pmd none here, because that will cause a race
- * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
- * we spilt, but at the same time we wan't rest of the ppc64 code
- * not to insert hash pte on this, because we will be modifying
- * the deposited pgtable in the caller of this function. Hence
- * clear the _PAGE_USER so that we move the fault handling to
- * higher level function and that will serialize against ptl.
- * We need to flush existing hash pte entries here even though,
- * the translation is still valid, because we will withdraw
- * pgtable_t after this.
- */
- pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
-}
-
/*
* A linux hugepage PMD was changed and the corresponding hash table entries
* neesd to be flushed.
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index f9941b3b5770..872d1f6dd11e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -18,8 +18,6 @@
#include "bpf_jit32.h"
-int bpf_jit_enable __read_mostly;
-
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb();
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index d183b4801bdb..0a34b0cec7b7 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -21,8 +21,6 @@
#include "bpf_jit64.h"
-int bpf_jit_enable __read_mostly;
-
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
memset32(area, BREAKPOINT_INSTRUCTION, size/4);
@@ -383,10 +381,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
goto bpf_alu32_trunc;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- PPC_CMPWI(src_reg, 0);
- PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
if (BPF_OP(code) == BPF_MOD) {
PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULW(b2p[TMP_REG_1], src_reg,
@@ -397,10 +391,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
- PPC_CMPDI(src_reg, 0);
- PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
if (BPF_OP(code) == BPF_MOD) {
PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULD(b2p[TMP_REG_1], src_reg,
@@ -995,7 +985,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
- if (!bpf_jit_enable)
+ if (!fp->jit_requested)
return org_fp;
tmp_fp = bpf_jit_blind_constants(org_fp);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 4b91ad08eefd..12352a58072a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -541,7 +541,7 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
return NULL;
}
-static unsigned long cell_dma_direct_offset;
+static unsigned long cell_dma_nommu_offset;
static unsigned long dma_iommu_fixed_base;
@@ -580,7 +580,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
device_to_mask(dev), flag,
dev_to_node(dev));
else
- return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+ return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
attrs);
}
@@ -592,7 +592,7 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
dma_handle);
else
- dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
+ dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@@ -601,7 +601,7 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_page(dev, page, offset, size,
+ return dma_nommu_ops.map_page(dev, page, offset, size,
direction, attrs);
else
return iommu_map_page(dev, cell_get_iommu_table(dev), page,
@@ -614,7 +614,7 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+ dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
@@ -626,7 +626,7 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
+ return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
else
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
nents, device_to_mask(dev),
@@ -638,7 +638,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
+ dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
direction, attrs);
@@ -661,8 +661,8 @@ static void cell_dma_dev_setup(struct device *dev)
{
if (get_pci_dma_ops() == &dma_iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
- else if (get_pci_dma_ops() == &dma_direct_ops)
- set_dma_offset(dev, cell_dma_direct_offset);
+ else if (get_pci_dma_ops() == &dma_nommu_ops)
+ set_dma_offset(dev, cell_dma_nommu_offset);
else
BUG();
}
@@ -810,14 +810,14 @@ static int __init cell_iommu_init_disabled(void)
unsigned long base = 0, size;
/* When no iommu is present, we use direct DMA ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
/* First make sure all IOC translation is turned off */
cell_disable_iommus();
/* If we have no Axon, we set up the spider DMA magic offset */
if (of_find_node_by_name(NULL, "axon") == NULL)
- cell_dma_direct_offset = SPIDER_DMA_OFFSET;
+ cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
/* Now we need to check to see where the memory is mapped
* in PCI space. We assume that all busses use the same dma
@@ -851,13 +851,13 @@ static int __init cell_iommu_init_disabled(void)
return -ENODEV;
}
- cell_dma_direct_offset += base;
+ cell_dma_nommu_offset += base;
- if (cell_dma_direct_offset != 0)
+ if (cell_dma_nommu_offset != 0)
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
- cell_dma_direct_offset);
+ cell_dma_nommu_offset);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 6e8a9ef8590e..1a9a756b0b2f 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -86,10 +86,10 @@ static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
return ctx->csa.prob.mb_stat_R;
}
-static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
- unsigned int events)
+static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
+ __poll_t events)
{
- int ret;
+ __poll_t ret;
u32 stat;
ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 5ffcdeb1eb17..fc7772c3d068 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -762,10 +762,10 @@ out:
return count;
}
-static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->ibox_wq, wait);
@@ -898,10 +898,10 @@ out:
return count;
}
-static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->wbox_wq, wait);
@@ -1690,11 +1690,11 @@ out:
return ret;
}
-static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
+static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)
{
struct spu_context *ctx = file->private_data;
u32 free_elements, tagstatus;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->mfc_wq, wait);
@@ -2455,11 +2455,11 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
return cnt == 0 ? error : cnt;
}
-static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)
{
struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int rc;
poll_wait(file, &ctx->switch_log->wait, wait);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 8655c4cbefc2..fff58198b5b6 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -56,11 +56,10 @@ static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
return in_be32(&ctx->spu->problem->mb_stat_R);
}
-static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
- unsigned int events)
+static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events)
{
struct spu *spu = ctx->spu;
- int ret = 0;
+ __poll_t ret = 0;
u32 stat;
spin_lock_irq(&spu->register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5e59f80e95db..2d0479ad3af4 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -185,8 +185,7 @@ struct mfc_dma_command {
struct spu_context_ops {
int (*mbox_read) (struct spu_context * ctx, u32 * data);
u32(*mbox_stat_read) (struct spu_context * ctx);
- unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
- unsigned int events);
+ __poll_t (*mbox_stat_poll)(struct spu_context *ctx, __poll_t events);
int (*ibox_read) (struct spu_context * ctx, u32 * data);
int (*wbox_write) (struct spu_context * ctx, u32 data);
u32(*signal1_read) (struct spu_context * ctx);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7fec04de27fc..78b80cbd9768 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
- dev->dev.dma_ops = &dma_direct_ops;
+ dev->dev.dma_ops = &dma_nommu_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index c4a3e93dc324..d0b8ae53660d 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0;
/* We use the direct ops for localbus */
- dev->dma_ops = &dma_direct_ops;
+ dev->dma_ops = &dma_nommu_ops;
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index d9916ea62305..8ddc1accf199 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -60,7 +60,7 @@ static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
}
for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
- memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
+ memory_failure(paddr_start >> PAGE_SHIFT, 0);
}
}
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index de4dd09f4a15..c18de0a9b1bd 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -147,7 +147,7 @@ static bool opal_msg_queue_empty(void)
return ret;
}
-static unsigned int opal_prd_poll(struct file *file,
+static __poll_t opal_prd_poll(struct file *file,
struct poll_table_struct *wait)
{
poll_wait(file, &opal_prd_msg_wait, wait);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 749055553064..9582aeb1fe4c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1850,7 +1850,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else {
/*
* If the device can't set the TCE bypass bit but still wants
@@ -1868,7 +1868,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
return rc;
/* 4GB offset bypasses 32-bit space */
set_dma_offset(&pdev->dev, (1ULL << 32));
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
/*
* Fail the request if a DMA mask between 32 and 64 bits
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 1edfbc1e40f4..4fb21e17504a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -37,13 +37,62 @@
#include <asm/kexec.h>
#include <asm/smp.h>
#include <asm/tm.h>
+#include <asm/setup.h>
#include "powernv.h"
+static void pnv_setup_rfi_flush(void)
+{
+ struct device_node *np, *fw_features;
+ enum l1d_flush_type type;
+ int enable;
+
+ /* Default to fallback in case fw-features are not available */
+ type = L1D_FLUSH_FALLBACK;
+ enable = 1;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ fw_features = of_get_child_by_name(np, "fw-features");
+ of_node_put(np);
+
+ if (fw_features) {
+ np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+ if (np && of_property_read_bool(np, "enabled"))
+ type = L1D_FLUSH_MTTRIG;
+
+ of_node_put(np);
+
+ np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+ if (np && of_property_read_bool(np, "enabled"))
+ type = L1D_FLUSH_ORI;
+
+ of_node_put(np);
+
+ /* Enable unless firmware says NOT to */
+ enable = 2;
+ np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+ if (np && of_property_read_bool(np, "disabled"))
+ enable--;
+
+ of_node_put(np);
+
+ np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+ if (np && of_property_read_bool(np, "disabled"))
+ enable--;
+
+ of_node_put(np);
+ of_node_put(fw_features);
+ }
+
+ setup_rfi_flush(type, enable > 0);
+}
+
static void __init pnv_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+ pnv_setup_rfi_flush();
+
/* Initialize SMP */
pnv_smp_init();
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6e35780c5962..a0b20c03f078 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
static CLASS_ATTR_RW(dlpar);
-static int __init pseries_dlpar_init(void)
+int __init dlpar_workqueue_init(void)
{
+ if (pseries_hp_wq)
+ return 0;
+
pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
- WQ_UNBOUND, 1);
+ WQ_UNBOUND, 1);
+
+ return pseries_hp_wq ? 0 : -ENOMEM;
+}
+
+static int __init dlpar_sysfs_init(void)
+{
+ int rc;
+
+ rc = dlpar_workqueue_init();
+ if (rc)
+ return rc;
+
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
}
-machine_device_initcall(pseries, pseries_dlpar_init);
+machine_device_initcall(pseries, dlpar_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 69921f72e2da..eaa11334fc8c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1231,7 +1231,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
if (dma_offset != 0) {
dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
set_dma_offset(dev, dma_offset);
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
ddw_enabled = true;
}
}
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4470a3194311..1ae1d9f4dbe9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
return CMO_PageSize;
}
+int dlpar_workqueue_init(void);
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 4923ffe230cf..81d8614e7379 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
/* Hotplug Events */
np = of_find_node_by_path("/event-sources/hot-plug-events");
if (np != NULL) {
- request_event_sources_irqs(np, ras_hotplug_interrupt,
+ if (dlpar_workqueue_init() == 0)
+ request_event_sources_irqs(np, ras_hotplug_interrupt,
"RAS_HOTPLUG");
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a8531e012658..ae4f596273b5 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
of_pci_check_probe_only();
}
+static void pseries_setup_rfi_flush(void)
+{
+ struct h_cpu_char_result result;
+ enum l1d_flush_type types;
+ bool enable;
+ long rc;
+
+ /* Enable by default */
+ enable = true;
+
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS) {
+ types = L1D_FLUSH_NONE;
+
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+ types |= L1D_FLUSH_MTTRIG;
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+ types |= L1D_FLUSH_ORI;
+
+ /* Use fallback if nothing set in hcall */
+ if (types == L1D_FLUSH_NONE)
+ types = L1D_FLUSH_FALLBACK;
+
+ if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+ enable = false;
+ } else {
+ /* Default to fallback if case hcall is not available */
+ types = L1D_FLUSH_FALLBACK;
+ }
+
+ setup_rfi_flush(types, enable);
+}
+
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
+ pseries_setup_rfi_flush();
+
/* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index d86938260a86..49e04ec19238 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -618,7 +618,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 3573d54b2770..a6198d4f0f03 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -402,7 +402,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
*/
if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
dev_info(dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
} else {
dev_info(dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
@@ -446,7 +446,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
controller_ops->dma_bus_setup = NULL;
/* Setup pci_dma ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 22d98057f773..61e07c78d64f 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -118,7 +118,7 @@ static void setup_swiotlb_ops(struct pci_controller *hose)
{
if (ppc_swiotlb_enable) {
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
- set_pci_dma_ops(&swiotlb_dma_ops);
+ set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
}
}
#else
@@ -135,7 +135,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
set_dma_offset(dev, pci64_dma_offset);
}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index cab24f549e7c..0ddc7ac6c5f1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2344,10 +2344,10 @@ static void dump_one_paca(int cpu)
DUMP(p, kernel_toc, "lx");
DUMP(p, kernelbase, "lx");
DUMP(p, kernel_msr, "lx");
- DUMP(p, emergency_sp, "p");
+ DUMP(p, emergency_sp, "px");
#ifdef CONFIG_PPC_BOOK3S_64
- DUMP(p, nmi_emergency_sp, "p");
- DUMP(p, mc_emergency_sp, "p");
+ DUMP(p, nmi_emergency_sp, "px");
+ DUMP(p, mc_emergency_sp, "px");
DUMP(p, in_nmi, "x");
DUMP(p, in_mce, "x");
DUMP(p, hmi_event_available, "x");
@@ -2375,17 +2375,21 @@ static void dump_one_paca(int cpu)
DUMP(p, slb_cache_ptr, "x");
for (i = 0; i < SLB_CACHE_ENTRIES; i++)
printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]);
+
+ DUMP(p, rfi_flush_fallback_area, "px");
+ DUMP(p, l1d_flush_congruence, "llx");
+ DUMP(p, l1d_flush_sets, "llx");
#endif
DUMP(p, dscr_default, "llx");
#ifdef CONFIG_PPC_BOOK3E
- DUMP(p, pgd, "p");
- DUMP(p, kernel_pgd, "p");
- DUMP(p, tcd_ptr, "p");
- DUMP(p, mc_kstack, "p");
- DUMP(p, crit_kstack, "p");
- DUMP(p, dbg_kstack, "p");
+ DUMP(p, pgd, "px");
+ DUMP(p, kernel_pgd, "px");
+ DUMP(p, tcd_ptr, "px");
+ DUMP(p, mc_kstack, "px");
+ DUMP(p, crit_kstack, "px");
+ DUMP(p, dbg_kstack, "px");
#endif
- DUMP(p, __current, "p");
+ DUMP(p, __current, "px");
DUMP(p, kstack, "lx");
printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1));
DUMP(p, stab_rr, "lx");
@@ -2403,7 +2407,7 @@ static void dump_one_paca(int cpu)
#endif
#ifdef CONFIG_PPC_POWERNV
- DUMP(p, core_idle_state_ptr, "p");
+ DUMP(p, core_idle_state_ptr, "px");
DUMP(p, thread_idle_state, "x");
DUMP(p, thread_mask, "x");
DUMP(p, subcore_sibling_mask, "x");
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 2c6adf12713a..865e14f50c14 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -83,7 +83,7 @@ config PGTABLE_LEVELS
config HAVE_KPROBES
def_bool n
-config DMA_NOOP_OPS
+config DMA_DIRECT_OPS
def_bool y
menu "Platform type"
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e69de29bb2d1..47dacf06c679 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -0,0 +1,75 @@
+CONFIG_SMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RAS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_CRYPTO_USER_API_HASH=y
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 970460a0b492..197460ccbf21 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0d64bc9f4f91..3c7a2c97e377 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -17,10 +17,10 @@
#include <linux/const.h>
/* Status register flags */
-#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
-#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
-#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
#define SR_FS_OFF _AC(0x00000000, UL)
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h
deleted file mode 100644
index 3eec1000196d..000000000000
--- a/arch/riscv/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2003-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2016 SiFive, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_RISCV_DMA_MAPPING_H
-#define __ASM_RISCV_DMA_MAPPING_H
-
-/* Use ops->dma_mapping_error (if it exists) or assume success */
-// #undef DMA_ERROR_CODE
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* __ASM_RISCV_DMA_MAPPING_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index a82ce599b639..b269451e7e85 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -21,8 +21,6 @@
#include <linux/types.h>
-#ifdef CONFIG_MMU
-
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
/*
@@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
-#endif /* CONFIG_MMU */
-
/* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 6fdc860d7f84..07a3c6d5706f 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
/* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void)
{
- csr_set(sstatus, SR_IE);
+ csr_set(sstatus, SR_SIE);
}
/* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void)
{
- csr_clear(sstatus, SR_IE);
+ csr_clear(sstatus, SR_SIE);
}
/* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void)
{
- return csr_read_clear(sstatus, SR_IE);
+ return csr_read_clear(sstatus, SR_SIE);
}
/* test flags */
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return !(flags & SR_IE);
+ return !(flags & SR_SIE);
}
/* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- csr_set(sstatus, flags & SR_IE);
+ csr_set(sstatus, flags & SR_SIE);
}
#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2cbd92ed1629..16301966d65b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -20,8 +20,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MMU
-
/* Page Upper Directory not used in RISC-V */
#include <asm-generic/pgtable-nopud.h>
#include <asm/page.h>
@@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
/* No page table caches to initialize */
}
-#endif /* CONFIG_MMU */
-
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 93b8956e25e4..2c5df945d43c 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -66,7 +66,7 @@ struct pt_regs {
#define REG_FMT "%08lx"
#endif
-#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
+#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
/* Helpers for working with the instruction pointer */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 22c3536ed281..f8fa1cd2dad9 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -64,8 +64,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_stack (init_thread_union.stack)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 715b0f10af58..7b9c24ebdf52 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,8 +15,6 @@
#ifndef _ASM_RISCV_TLBFLUSH_H
#define _ASM_RISCV_TLBFLUSH_H
-#ifdef CONFIG_MMU
-
#include <linux/mm_types.h>
/*
@@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all();
}
-#endif /* CONFIG_MMU */
-
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 27b90d64814b..14b0b22fb578 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
* call.
*/
-#ifdef CONFIG_MMU
#define __get_user_asm(insn, x, ptr, err) \
do { \
uintptr_t __tmp; \
@@ -153,13 +152,11 @@ do { \
__disable_user_access(); \
(x) = __x; \
} while (0)
-#endif /* CONFIG_MMU */
#ifdef CONFIG_64BIT
#define __get_user_8(x, ptr, err) \
__get_user_asm("ld", x, ptr, err)
#else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
#define __get_user_8(x, ptr, err) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -193,7 +190,6 @@ do { \
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
-#endif /* CONFIG_MMU */
#endif /* CONFIG_64BIT */
@@ -267,8 +263,6 @@ do { \
((x) = 0, -EFAULT); \
})
-
-#ifdef CONFIG_MMU
#define __put_user_asm(insn, x, ptr, err) \
do { \
uintptr_t __tmp; \
@@ -292,14 +286,11 @@ do { \
: "rJ" (__x), "i" (-EFAULT)); \
__disable_user_access(); \
} while (0)
-#endif /* CONFIG_MMU */
-
#ifdef CONFIG_64BIT
#define __put_user_8(x, ptr, err) \
__put_user_asm("sd", x, ptr, err)
#else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
#define __put_user_8(x, ptr, err) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -329,7 +320,6 @@ do { \
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
__disable_user_access(); \
} while (0)
-#endif /* CONFIG_MMU */
#endif /* CONFIG_64BIT */
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
* will set "err" to -EFAULT, while successful accesses return the previous
* value.
*/
-#ifdef CONFIG_MMU
#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
(err) = __err; \
__ret; \
})
-#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 9f250ed007cd..2f704a5c4196 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -14,3 +14,4 @@
#define __ARCH_HAVE_MMU
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
deleted file mode 100644
index a2ccf1894929..000000000000
--- a/arch/riscv/include/asm/vdso-syscalls.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2017 SiFive
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
-#define _ASM_RISCV_VDSO_SYSCALLS_H
-
-#ifdef CONFIG_SMP
-
-/* These syscalls are only used by the vDSO and are not in the uapi. */
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
-
-#endif
-
-#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644
index 000000000000..818655b0d535
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/syscalls.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM__UAPI__SYSCALLS_H
+#define _ASM__UAPI__SYSCALLS_H
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 20ee86f782a9..7404ec222406 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -196,7 +196,7 @@ handle_syscall:
addi s2, s2, 0x4
REG_S s2, PT_SEPC(sp)
/* System calls run with interrupts enabled */
- csrs sstatus, SR_IE
+ csrs sstatus, SR_SIE
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
- csrc sstatus, SR_IE
- andi s0, s0, SR_PS
+ csrc sstatus, SR_SIE
+ andi s0, s0, SR_SPP
bnez s0, restore_all
resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
bnez s1, work_resched
work_notifysig:
/* Handle pending signals and notify-resume requests */
- csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+ csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
tail do_notify_resume
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 0d90dcc1fbd3..d7c6ca7c95ae 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -25,9 +25,9 @@
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/ptrace.h>
+#include <linux/uaccess.h>
#include <asm/unistd.h>
-#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/csr.h>
#include <asm/string.h>
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
- regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+ regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
regs->sepc = pc;
regs->sp = sp;
set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp;
- childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+ childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
p->thread.ra = (unsigned long)ret_from_kernel_thread;
p->thread.s[0] = usp; /* fn */
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a5bd6401f95e..ade52b903a43 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -23,5 +23,4 @@
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#include <asm/unistd.h>
-#include <asm/vdso-syscalls.h>
};
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index b0fbad74e873..023e4d4aef58 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/unistd.h>
-#include <asm/vdso-syscalls.h>
.text
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index df2ca3c65048..ceebfc29305b 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -29,7 +29,6 @@
#include <asm/pgalloc.h>
#include <asm/ptrace.h>
-#include <asm/uaccess.h>
/*
* This routine handles page faults. It determines the address and the
@@ -63,7 +62,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
goto vmalloc_fault;
/* Enable interrupts if they were enabled in the parent context. */
- if (likely(regs->sstatus & SR_PIE))
+ if (likely(regs->sstatus & SR_SPIE))
local_irq_enable();
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 829c67986db7..0105ce28e246 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -108,7 +108,6 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
@@ -140,7 +139,7 @@ config S390
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
- select DMA_NOOP_OPS
+ select DMA_DIRECT_OPS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index de54cfc6109d..fd691c4ff89e 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,9 +88,13 @@ KBUILD_CFLAGS += -DCC_USING_HOTPATCH
endif
endif
+# Test CFI features of binutils
+cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1)
+
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
-KBUILD_AFLAGS += $(aflags-y)
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
+KBUILD_AFLAGS += $(aflags-y) $(cfi)
OBJCOPYFLAGS := -O binary
@@ -107,6 +111,7 @@ drivers-y += drivers/s390/
drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
boot := arch/s390/boot
+syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
all: image bzImage
@@ -128,9 +133,12 @@ archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(tools)
+archheaders:
+ $(Q)$(MAKE) $(build)=$(syscalls) uapi
+
archprepare:
- $(Q)$(MAKE) $(build)=$(tools) include/generated/facilities.h
- $(Q)$(MAKE) $(build)=$(tools) include/generated/dis.h
+ $(Q)$(MAKE) $(build)=$(syscalls) kapi
+ $(Q)$(MAKE) $(build)=$(tools) kapi
# Don't use tabs in echo arguments
define archhelp
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 29e3dc99b916..26d6a94f40f6 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -14,6 +14,7 @@ targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 88e46d4a7784..8150132b144f 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -17,15 +17,15 @@ SECTIONS
HEAD_TEXT
_ehead = . ;
}
- .rodata.compressed : {
- *(.rodata.compressed)
- }
.text : {
_text = .; /* Text */
*(.text)
*(.text.*)
_etext = . ;
}
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
@@ -48,4 +48,10 @@ SECTIONS
_ebss = .;
}
_end = .;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.eh_frame)
+ *(*__ksymtab*)
+ }
}
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 04e042edbab7..7dc7f58c4287 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -26,7 +26,6 @@ CONFIG_NET=y
# CONFIG_IUCV is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 436865926c26..423ee05887e6 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -239,6 +239,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32",
.cra_driver_name = "crc32-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
@@ -259,6 +260,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32be",
.cra_driver_name = "crc32be-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
@@ -279,6 +281,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 048450869328..f6c2ba93b21c 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,9 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+generated-y += dis-defs.h
+generated-y += facility-defs.h
+generated-y += syscall_table.h
+generated-y += unistd_nr.h
+
generic-y += asm-offsets.h
generic-y += cacheflush.h
generic-y += clkdev.h
generic-y += device.h
generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += export.h
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 5e6a63641a5f..9830fb6b076e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -189,79 +189,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status;/* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- __u32 _addr; /* faulting insn/memory ref. - pointer */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
-/*
- * How these fields are to be accessed.
- */
-#define si_pid _sifields._kill._pid
-#define si_uid _sifields._kill._uid
-#define si_status _sifields._sigchld._status
-#define si_utime _sifields._sigchld._utime
-#define si_stime _sifields._sigchld._stime
-#define si_value _sifields._rt._sigval
-#define si_int _sifields._rt._sigval.sival_int
-#define si_ptr _sifields._rt._sigval.sival_ptr
-#define si_addr _sifields._sigfault._addr
-#define si_band _sifields._sigpoll._band
-#define si_fd _sifields._sigpoll._fd
-#define si_tid _sifields._timer._tid
-#define si_overrun _sifields._timer._overrun
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 6db78567294c..cdbaad50c7c7 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -229,13 +229,55 @@ struct diag204_x_phys_block {
} __packed;
enum diag26c_sc {
+ DIAG26C_PORT_VNIC = 0x00000024,
DIAG26C_MAC_SERVICES = 0x00000030
};
enum diag26c_version {
- DIAG26C_VERSION2 = 0x00000002 /* z/VM 5.4.0 */
+ DIAG26C_VERSION2 = 0x00000002, /* z/VM 5.4.0 */
+ DIAG26C_VERSION6_VM65918 = 0x00020006 /* z/VM 6.4.0 + VM65918 */
};
+#define DIAG26C_VNIC_INFO 0x0002
+struct diag26c_vnic_req {
+ u32 resp_buf_len;
+ u32 resp_version;
+ u16 req_format;
+ u16 vlan_id;
+ u64 sys_name;
+ u8 res[2];
+ u16 devno;
+} __packed __aligned(8);
+
+#define VNIC_INFO_PROT_L3 1
+#define VNIC_INFO_PROT_L2 2
+/* Note: this is the bare minimum, use it for uninitialized VNICs only. */
+struct diag26c_vnic_resp {
+ u32 version;
+ u32 entry_cnt;
+ /* VNIC info: */
+ u32 next_entry;
+ u64 owner;
+ u16 devno;
+ u8 status;
+ u8 type;
+ u64 lan_owner;
+ u64 lan_name;
+ u64 port_name;
+ u8 port_type;
+ u8 ext_status:6;
+ u8 protocol:2;
+ u16 base_devno;
+ u32 port_num;
+ u32 ifindex;
+ u32 maxinfo;
+ u32 dev_count;
+ /* 3x device info: */
+ u8 dev_info1[28];
+ u8 dev_info2[28];
+ u8 dev_info3[28];
+} __packed __aligned(8);
+
#define DIAG26C_GET_MAC 0x0000
struct diag26c_mac_req {
u32 resp_buf_len;
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index b0480c60a8e1..c18ed6091914 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -9,7 +9,7 @@
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
-#include <generated/dis.h>
+#include <asm/dis-defs.h>
static inline int insn_length(unsigned char code)
{
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
deleted file mode 100644
index eaf490f9c5bc..000000000000
--- a/arch/s390/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_DMA_MAPPING_H
-#define _ASM_S390_DMA_MAPPING_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/io.h>
-
-extern const struct dma_map_ops s390_pci_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
new file mode 100644
index 000000000000..4f21ae561e4d
--- /dev/null
+++ b/arch/s390/include/asm/dwarf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_DWARF_H
+#define _ASM_S390_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+#define CFI_RESTORE .cfi_restore
+
+#ifdef CONFIG_AS_CFI_VAL_OFFSET
+#define CFI_VAL_OFFSET .cfi_val_offset
+#else
+#define CFI_VAL_OFFSET #
+#endif
+
+#ifndef BUILD_VDSO
+ /*
+ * Emit CFI data in .debug_frame sections and not in .eh_frame
+ * sections. The .eh_frame CFI is used for runtime unwind
+ * information that is not being used. Hence, vmlinux.lds.S
+ * can discard the .eh_frame sections.
+ */
+ .cfi_sections .debug_frame
+#else
+ /*
+ * For vDSO, emit CFI data in both, .eh_frame and .debug_frame
+ * sections.
+ */
+ .cfi_sections .eh_frame, .debug_frame
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_DWARF_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index f040644575b7..fbe0c4be3cd8 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -8,7 +8,7 @@
#ifndef __ASM_FACILITY_H
#define __ASM_FACILITY_H
-#include <generated/facilities.h>
+#include <asm/facility-defs.h>
#include <linux/string.h>
#include <linux/preempt.h>
#include <asm/lowcore.h>
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e14f381757f6..c1b0a9ac1dc8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
#define ECB_GS 0x40
#define ECB_TE 0x10
#define ECB_SRSI 0x04
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index e8d9161fa17a..419fac7a62c0 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -201,4 +201,7 @@ void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
+extern const struct dma_map_ops s390_pci_dma_ops;
+
+
#endif
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0a6b0286c32e..2d24d33bf188 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1505,12 +1505,12 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
}
#define __HAVE_ARCH_PMDP_INVALIDATE
-static inline void pmdp_invalidate(struct vm_area_struct *vma,
+static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
- pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 0880a37b6d3b..25d6ec3aaddd 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -42,8 +42,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_stack (init_thread_union.stack)
-
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 7807093b73be..fd79c0d35dc4 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -8,6 +8,7 @@
#define _ASM_S390_UNISTD_H_
#include <uapi/asm/unistd.h>
+#include <asm/unistd_nr.h>
#define __IGNORE_time
#define __IGNORE_pkey_mprotect
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 92b7c9b3e641..faef3f7e8353 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -2,6 +2,9 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
+generated-y += unistd_64.h
+
generic-y += errno.h
generic-y += fcntl.h
generic-y += ioctl.h
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 38535a57fef8..4cdaa55fabfe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
+#define KVM_SYNC_BPBC (1UL << 10)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding1[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
__u8 padding2[192]; /* sdnx needs to be 256byte aligned */
union {
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 725120939051..01b5fe8b9db6 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -8,405 +8,10 @@
#ifndef _UAPI_ASM_S390_UNISTD_H_
#define _UAPI_ASM_S390_UNISTD_H_
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_restart_syscall 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_brk 45
-#define __NR_signal 48
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_setpgid 57
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_symlink 83
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_lookup_dcookie 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_getdents 141
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188
-#define __NR_putpmsg 189
-#define __NR_vfork 190
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_getdents64 220
-#define __NR_readahead 222
-#define __NR_setxattr 224
-#define __NR_lsetxattr 225
-#define __NR_fsetxattr 226
-#define __NR_getxattr 227
-#define __NR_lgetxattr 228
-#define __NR_fgetxattr 229
-#define __NR_listxattr 230
-#define __NR_llistxattr 231
-#define __NR_flistxattr 232
-#define __NR_removexattr 233
-#define __NR_lremovexattr 234
-#define __NR_fremovexattr 235
-#define __NR_gettid 236
-#define __NR_tkill 237
-#define __NR_futex 238
-#define __NR_sched_setaffinity 239
-#define __NR_sched_getaffinity 240
-#define __NR_tgkill 241
-/* Number 242 is reserved for tux */
-#define __NR_io_setup 243
-#define __NR_io_destroy 244
-#define __NR_io_getevents 245
-#define __NR_io_submit 246
-#define __NR_io_cancel 247
-#define __NR_exit_group 248
-#define __NR_epoll_create 249
-#define __NR_epoll_ctl 250
-#define __NR_epoll_wait 251
-#define __NR_set_tid_address 252
-#define __NR_fadvise64 253
-#define __NR_timer_create 254
-#define __NR_timer_settime 255
-#define __NR_timer_gettime 256
-#define __NR_timer_getoverrun 257
-#define __NR_timer_delete 258
-#define __NR_clock_settime 259
-#define __NR_clock_gettime 260
-#define __NR_clock_getres 261
-#define __NR_clock_nanosleep 262
-/* Number 263 is reserved for vserver */
-#define __NR_statfs64 265
-#define __NR_fstatfs64 266
-#define __NR_remap_file_pages 267
-#define __NR_mbind 268
-#define __NR_get_mempolicy 269
-#define __NR_set_mempolicy 270
-#define __NR_mq_open 271
-#define __NR_mq_unlink 272
-#define __NR_mq_timedsend 273
-#define __NR_mq_timedreceive 274
-#define __NR_mq_notify 275
-#define __NR_mq_getsetattr 276
-#define __NR_kexec_load 277
-#define __NR_add_key 278
-#define __NR_request_key 279
-#define __NR_keyctl 280
-#define __NR_waitid 281
-#define __NR_ioprio_set 282
-#define __NR_ioprio_get 283
-#define __NR_inotify_init 284
-#define __NR_inotify_add_watch 285
-#define __NR_inotify_rm_watch 286
-#define __NR_migrate_pages 287
-#define __NR_openat 288
-#define __NR_mkdirat 289
-#define __NR_mknodat 290
-#define __NR_fchownat 291
-#define __NR_futimesat 292
-#define __NR_unlinkat 294
-#define __NR_renameat 295
-#define __NR_linkat 296
-#define __NR_symlinkat 297
-#define __NR_readlinkat 298
-#define __NR_fchmodat 299
-#define __NR_faccessat 300
-#define __NR_pselect6 301
-#define __NR_ppoll 302
-#define __NR_unshare 303
-#define __NR_set_robust_list 304
-#define __NR_get_robust_list 305
-#define __NR_splice 306
-#define __NR_sync_file_range 307
-#define __NR_tee 308
-#define __NR_vmsplice 309
-#define __NR_move_pages 310
-#define __NR_getcpu 311
-#define __NR_epoll_pwait 312
-#define __NR_utimes 313
-#define __NR_fallocate 314
-#define __NR_utimensat 315
-#define __NR_signalfd 316
-#define __NR_timerfd 317
-#define __NR_eventfd 318
-#define __NR_timerfd_create 319
-#define __NR_timerfd_settime 320
-#define __NR_timerfd_gettime 321
-#define __NR_signalfd4 322
-#define __NR_eventfd2 323
-#define __NR_inotify_init1 324
-#define __NR_pipe2 325
-#define __NR_dup3 326
-#define __NR_epoll_create1 327
-#define __NR_preadv 328
-#define __NR_pwritev 329
-#define __NR_rt_tgsigqueueinfo 330
-#define __NR_perf_event_open 331
-#define __NR_fanotify_init 332
-#define __NR_fanotify_mark 333
-#define __NR_prlimit64 334
-#define __NR_name_to_handle_at 335
-#define __NR_open_by_handle_at 336
-#define __NR_clock_adjtime 337
-#define __NR_syncfs 338
-#define __NR_setns 339
-#define __NR_process_vm_readv 340
-#define __NR_process_vm_writev 341
-#define __NR_s390_runtime_instr 342
-#define __NR_kcmp 343
-#define __NR_finit_module 344
-#define __NR_sched_setattr 345
-#define __NR_sched_getattr 346
-#define __NR_renameat2 347
-#define __NR_seccomp 348
-#define __NR_getrandom 349
-#define __NR_memfd_create 350
-#define __NR_bpf 351
-#define __NR_s390_pci_mmio_write 352
-#define __NR_s390_pci_mmio_read 353
-#define __NR_execveat 354
-#define __NR_userfaultfd 355
-#define __NR_membarrier 356
-#define __NR_recvmmsg 357
-#define __NR_sendmmsg 358
-#define __NR_socket 359
-#define __NR_socketpair 360
-#define __NR_bind 361
-#define __NR_connect 362
-#define __NR_listen 363
-#define __NR_accept4 364
-#define __NR_getsockopt 365
-#define __NR_setsockopt 366
-#define __NR_getsockname 367
-#define __NR_getpeername 368
-#define __NR_sendto 369
-#define __NR_sendmsg 370
-#define __NR_recvfrom 371
-#define __NR_recvmsg 372
-#define __NR_shutdown 373
-#define __NR_mlock2 374
-#define __NR_copy_file_range 375
-#define __NR_preadv2 376
-#define __NR_pwritev2 377
-#define __NR_s390_guarded_storage 378
-#define __NR_statx 379
-#define __NR_s390_sthyi 380
-#define NR_syscalls 381
-
-/*
- * There are some system calls that are not present on 64 bit, some
- * have a different name although they do the same (e.g. __NR_chown32
- * is __NR_chown on 64 bit).
- */
-#ifndef __s390x__
-
-#define __NR_time 13
-#define __NR_lchown 16
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_getrlimit 76
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_fchown 95
-#define __NR_ioperm 101
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR__newselect 142
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_chown 182
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_fcntl64 221
-#define __NR_sendfile64 223
-#define __NR_fadvise64_64 264
-#define __NR_fstatat64 293
-
+#ifdef __s390x__
+#include <asm/unistd_64.h>
#else
-
-#define __NR_select 142
-#define __NR_getrlimit 191 /* SuS compliant getrlimit */
-#define __NR_lchown 198
-#define __NR_getuid 199
-#define __NR_getgid 200
-#define __NR_geteuid 201
-#define __NR_getegid 202
-#define __NR_setreuid 203
-#define __NR_setregid 204
-#define __NR_getgroups 205
-#define __NR_setgroups 206
-#define __NR_fchown 207
-#define __NR_setresuid 208
-#define __NR_getresuid 209
-#define __NR_setresgid 210
-#define __NR_getresgid 211
-#define __NR_chown 212
-#define __NR_setuid 213
-#define __NR_setgid 214
-#define __NR_setfsuid 215
-#define __NR_setfsgid 216
-#define __NR_newfstatat 293
-
+#include <asm/unistd_32.h>
#endif
#endif /* _UAPI_ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 59eea9c65d3e..79b7a3438d54 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
{
- return sys_setgid((gid_t)gid);
+ return sys_setgid(low2highgid(gid));
}
COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
{
- return sys_setuid((uid_t)uid);
+ return sys_setuid(low2highuid(uid));
}
COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
{
- return sys_setfsuid((uid_t)uid);
+ return sys_setfsuid(low2highuid(uid));
}
COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
{
- return sys_setfsgid((gid_t)gid);
+ return sys_setfsgid(low2highgid(gid));
}
static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index ef246940b44c..18c1eeb847b2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -50,106 +50,6 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_RT:
- err |= __put_user(from->si_int, &to->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned long) from->si_addr,
- &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- default:
- break;
- }
- }
- return err ? -EFAULT : 0;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- int err;
- u32 tmp;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (to->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(to->si_signo, to->si_code)) {
- case SIL_RT:
- err |= __get_user(to->si_int, &from->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case SIL_CHLD:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- break;
- case SIL_FAULT:
- err |= __get_user(tmp, &from->si_addr);
- to->si_addr = (void __force __user *)
- (u64) (tmp & PSW32_ADDR_INSN);
- break;
- case SIL_POLL:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- case SIL_TIMER:
- err |= __get_user(to->si_tid, &from->si_tid);
- err |= __get_user(to->si_overrun, &from->si_overrun);
- err |= __get_user(to->si_int, &from->si_int);
- break;
- default:
- break;
- }
- }
- return err ? -EFAULT : 0;
-}
-
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 9e5f6cd8e4c2..6cd444d25545 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1392,7 +1392,7 @@ cleanup_critical:
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
sys_call_table:
-#include "syscalls.S"
+#include "asm/syscall_table.h"
#undef SYSCALL
#ifdef CONFIG_COMPAT
@@ -1400,6 +1400,6 @@ sys_call_table:
#define SYSCALL(esame,emu) .long emu
.globl sys_call_table_emu
sys_call_table_emu:
-#include "syscalls.S"
+#include "asm/syscall_table.h"
#undef SYSCALL
#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index b86fa3ee4927..5c42f16a54c4 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -66,7 +66,7 @@ __HEAD
# subroutine to wait for end I/O
#
.Lirqwait:
- mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
+ mvc __LC_IO_NEW_PSW(16),.Lnewpsw # set up IO interrupt psw
lpsw .Lwaitpsw
.Lioint:
br %r14
@@ -98,7 +98,7 @@ __HEAD
bnz .Llderr
.Lwait4irq:
bas %r14,.Lirqwait
- c %r1,0xb8 # compare subchannel number
+ c %r1,__LC_SUBCHANNEL_ID # compare subchannel number
bne .Lwait4irq
tsch 0(%r5)
@@ -156,9 +156,9 @@ iplstart:
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
- lh %r1,0xb8 # test if subchannel number
+ lh %r1,__LC_SUBCHANNEL_ID # test if subchannel number
bct %r1,.Lnoload # is valid
- l %r1,0xb8 # load ipl subchannel number
+ l %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number
la %r2,IPL_BS # load start address
bas %r14,.Lloader # load rest of ipl image
l %r12,.Lparm # pointer to parameter area
@@ -239,7 +239,7 @@ iplstart:
bz .Lnoreset
.Lwaitforirq:
bas %r14,.Lirqwait # wait for IO interrupt
- c %r1,0xb8 # compare subchannel number
+ c %r1,__LC_SUBCHANNEL_ID # compare subchannel number
bne .Lwaitforirq
la %r5,.Lirb
tsch 0(%r5)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8ecb8726ac47..da5cc3b469aa 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -119,13 +119,9 @@ static char *dump_type_str(enum dump_type type)
}
}
-/*
- * Must be in data section since the bss section
- * is not cleared when these are accessed.
- */
-static u8 ipl_ssid __section(.data) = 0;
-static u16 ipl_devno __section(.data) = 0;
-u32 ipl_flags __section(.data) = 0;
+static u8 ipl_ssid;
+static u16 ipl_devno;
+u32 ipl_flags;
enum ipl_method {
REIPL_METHOD_CCW_CIO,
@@ -148,7 +144,7 @@ enum dump_method {
DUMP_METHOD_FCP_DIAG,
};
-static int diag308_set_works = 0;
+static int diag308_set_works;
static struct ipl_parameter_block ipl_block;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index af3722c28fd9..943d13e90c98 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -20,7 +20,6 @@
#include <linux/ftrace.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
-#include <linux/uaccess.h>
#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b8c1a85bcf2d..a919b2f0141d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1151,7 +1151,7 @@ static ssize_t __ref rescan_store(struct device *dev,
rc = smp_rescan_cpus();
return rc ? rc : count;
}
-static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
+static DEVICE_ATTR_WO(rescan);
#endif /* CONFIG_HOTPLUG_CPU */
static int __init s390_smp_init(void)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
deleted file mode 100644
index f7fc63385553..000000000000
--- a/arch/s390/kernel/syscalls.S
+++ /dev/null
@@ -1,392 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * definitions for sys_call_table, each line represents an
- * entry in the table in the form
- * SYSCALL(64 bit syscall, 31 bit emulated syscall)
- *
- * this file is meant to be included from entry.S
- */
-
-#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
-
-NI_SYSCALL /* 0 */
-SYSCALL(sys_exit,sys_exit)
-SYSCALL(sys_fork,sys_fork)
-SYSCALL(sys_read,compat_sys_s390_read)
-SYSCALL(sys_write,compat_sys_s390_write)
-SYSCALL(sys_open,compat_sys_open) /* 5 */
-SYSCALL(sys_close,sys_close)
-SYSCALL(sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,compat_sys_creat)
-SYSCALL(sys_link,compat_sys_link)
-SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */
-SYSCALL(sys_execve,compat_sys_execve)
-SYSCALL(sys_chdir,compat_sys_chdir)
-SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */
-SYSCALL(sys_mknod,compat_sys_mknod)
-SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */
-SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
-NI_SYSCALL /* old break syscall holder */
-NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,compat_sys_lseek)
-SYSCALL(sys_getpid,sys_getpid) /* 20 */
-SYSCALL(sys_mount,compat_sys_mount)
-SYSCALL(sys_oldumount,compat_sys_oldumount)
-SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
-SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
-SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
-SYSCALL(sys_ptrace,compat_sys_ptrace)
-SYSCALL(sys_alarm,sys_alarm)
-NI_SYSCALL /* old fstat syscall */
-SYSCALL(sys_pause,sys_pause)
-SYSCALL(sys_utime,compat_sys_utime) /* 30 */
-NI_SYSCALL /* old stty syscall */
-NI_SYSCALL /* old gtty syscall */
-SYSCALL(sys_access,compat_sys_access)
-SYSCALL(sys_nice,sys_nice)
-NI_SYSCALL /* 35 old ftime syscall */
-SYSCALL(sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill)
-SYSCALL(sys_rename,compat_sys_rename)
-SYSCALL(sys_mkdir,compat_sys_mkdir)
-SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
-SYSCALL(sys_dup,sys_dup)
-SYSCALL(sys_pipe,compat_sys_pipe)
-SYSCALL(sys_times,compat_sys_times)
-NI_SYSCALL /* old prof syscall */
-SYSCALL(sys_brk,compat_sys_brk) /* 45 */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
-SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
-SYSCALL(sys_signal,compat_sys_signal)
-SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,compat_sys_acct)
-SYSCALL(sys_umount,compat_sys_umount)
-NI_SYSCALL /* old lock syscall */
-SYSCALL(sys_ioctl,compat_sys_ioctl)
-SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
-NI_SYSCALL /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid)
-NI_SYSCALL /* old ulimit syscall */
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_umask,sys_umask) /* 60 */
-SYSCALL(sys_chroot,compat_sys_chroot)
-SYSCALL(sys_ustat,compat_sys_ustat)
-SYSCALL(sys_dup2,sys_dup2)
-SYSCALL(sys_getppid,sys_getppid)
-SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
-SYSCALL(sys_setsid,sys_setsid)
-SYSCALL(sys_sigaction,compat_sys_sigaction)
-NI_SYSCALL /* old sgetmask syscall*/
-NI_SYSCALL /* old ssetmask syscall*/
-SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,compat_sys_sigsuspend)
-SYSCALL(sys_sigpending,compat_sys_sigpending)
-SYSCALL(sys_sethostname,compat_sys_sethostname)
-SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */
-SYSCALL(sys_getrlimit,compat_sys_old_getrlimit)
-SYSCALL(sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,compat_sys_gettimeofday)
-SYSCALL(sys_settimeofday,compat_sys_settimeofday)
-SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
-NI_SYSCALL /* old select syscall */
-SYSCALL(sys_symlink,compat_sys_symlink)
-NI_SYSCALL /* old lstat syscall */
-SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */
-SYSCALL(sys_uselib,compat_sys_uselib)
-SYSCALL(sys_swapon,compat_sys_swapon)
-SYSCALL(sys_reboot,compat_sys_reboot)
-SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
-SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
-SYSCALL(sys_munmap,compat_sys_munmap)
-SYSCALL(sys_truncate,compat_sys_truncate)
-SYSCALL(sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod)
-SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority)
-SYSCALL(sys_setpriority,sys_setpriority)
-NI_SYSCALL /* old profil syscall */
-SYSCALL(sys_statfs,compat_sys_statfs)
-SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
-NI_SYSCALL /* ioperm for i386 */
-SYSCALL(sys_socketcall,compat_sys_socketcall)
-SYSCALL(sys_syslog,compat_sys_syslog)
-SYSCALL(sys_setitimer,compat_sys_setitimer)
-SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */
-SYSCALL(sys_newstat,compat_sys_newstat)
-SYSCALL(sys_newlstat,compat_sys_newlstat)
-SYSCALL(sys_newfstat,compat_sys_newfstat)
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
-SYSCALL(sys_vhangup,sys_vhangup)
-NI_SYSCALL /* old "idle" system call */
-NI_SYSCALL /* vm86old for i386 */
-SYSCALL(sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
-SYSCALL(sys_sysinfo,compat_sys_sysinfo)
-SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync)
-SYSCALL(sys_sigreturn,compat_sys_sigreturn)
-SYSCALL(sys_clone,compat_sys_clone) /* 120 */
-SYSCALL(sys_setdomainname,compat_sys_setdomainname)
-SYSCALL(sys_newuname,compat_sys_newuname)
-NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,compat_sys_adjtimex)
-SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */
-SYSCALL(sys_sigprocmask,compat_sys_sigprocmask)
-NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,compat_sys_init_module)
-SYSCALL(sys_delete_module,compat_sys_delete_module)
-NI_SYSCALL /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,compat_sys_quotactl)
-SYSCALL(sys_getpgid,sys_getpgid)
-SYSCALL(sys_fchdir,sys_fchdir)
-SYSCALL(sys_bdflush,compat_sys_bdflush)
-SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
-SYSCALL(sys_s390_personality,sys_s390_personality)
-NI_SYSCALL /* for afs_syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
-SYSCALL(sys_getdents,compat_sys_getdents)
-SYSCALL(sys_select,compat_sys_select)
-SYSCALL(sys_flock,sys_flock)
-SYSCALL(sys_msync,compat_sys_msync)
-SYSCALL(sys_readv,compat_sys_readv) /* 145 */
-SYSCALL(sys_writev,compat_sys_writev)
-SYSCALL(sys_getsid,sys_getsid)
-SYSCALL(sys_fdatasync,sys_fdatasync)
-SYSCALL(sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
-SYSCALL(sys_munlock,compat_sys_munlock)
-SYSCALL(sys_mlockall,sys_mlockall)
-SYSCALL(sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
-SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
-SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler)
-SYSCALL(sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min) /* 160 */
-SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,compat_sys_nanosleep)
-SYSCALL(sys_mremap,compat_sys_mremap)
-SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
-NI_SYSCALL /* for vm86 */
-NI_SYSCALL /* old sys_query_module */
-SYSCALL(sys_poll,compat_sys_poll)
-NI_SYSCALL /* old nfsservctl */
-SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
-SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
-SYSCALL(sys_prctl,compat_sys_prctl)
-SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn)
-SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction)
-SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
-SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending)
-SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
-SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
-SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */
-SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64)
-SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
-SYSCALL(sys_getcwd,compat_sys_getcwd)
-SYSCALL(sys_capget,compat_sys_capget)
-SYSCALL(sys_capset,compat_sys_capset) /* 185 */
-SYSCALL(sys_sigaltstack,compat_sys_sigaltstack)
-SYSCALL(sys_sendfile64,compat_sys_sendfile)
-NI_SYSCALL /* streams1 */
-NI_SYSCALL /* streams2 */
-SYSCALL(sys_vfork,sys_vfork) /* 190 */
-SYSCALL(sys_getrlimit,compat_sys_getrlimit)
-SYSCALL(sys_mmap2,compat_sys_s390_mmap2)
-SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64)
-SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64)
-SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
-SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64)
-SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64)
-SYSCALL(sys_lchown,compat_sys_lchown)
-SYSCALL(sys_getuid,sys_getuid)
-SYSCALL(sys_getgid,sys_getgid) /* 200 */
-SYSCALL(sys_geteuid,sys_geteuid)
-SYSCALL(sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid)
-SYSCALL(sys_setregid,sys_setregid)
-SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
-SYSCALL(sys_setgroups,compat_sys_setgroups)
-SYSCALL(sys_fchown,sys_fchown)
-SYSCALL(sys_setresuid,sys_setresuid)
-SYSCALL(sys_getresuid,compat_sys_getresuid)
-SYSCALL(sys_setresgid,sys_setresgid) /* 210 */
-SYSCALL(sys_getresgid,compat_sys_getresgid)
-SYSCALL(sys_chown,compat_sys_chown)
-SYSCALL(sys_setuid,sys_setuid)
-SYSCALL(sys_setgid,sys_setgid)
-SYSCALL(sys_setfsuid,sys_setfsuid) /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid)
-SYSCALL(sys_pivot_root,compat_sys_pivot_root)
-SYSCALL(sys_mincore,compat_sys_mincore)
-SYSCALL(sys_madvise,compat_sys_madvise)
-SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */
-SYSCALL(sys_ni_syscall,compat_sys_fcntl64)
-SYSCALL(sys_readahead,compat_sys_s390_readahead)
-SYSCALL(sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,compat_sys_setxattr)
-SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
-SYSCALL(sys_fsetxattr,compat_sys_fsetxattr)
-SYSCALL(sys_getxattr,compat_sys_getxattr)
-SYSCALL(sys_lgetxattr,compat_sys_lgetxattr)
-SYSCALL(sys_fgetxattr,compat_sys_fgetxattr)
-SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */
-SYSCALL(sys_llistxattr,compat_sys_llistxattr)
-SYSCALL(sys_flistxattr,compat_sys_flistxattr)
-SYSCALL(sys_removexattr,compat_sys_removexattr)
-SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
-SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
-SYSCALL(sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill)
-SYSCALL(sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
-SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill)
-NI_SYSCALL /* reserved for TUX */
-SYSCALL(sys_io_setup,compat_sys_io_setup)
-SYSCALL(sys_io_destroy,compat_sys_io_destroy)
-SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
-SYSCALL(sys_io_submit,compat_sys_io_submit)
-SYSCALL(sys_io_cancel,compat_sys_io_cancel)
-SYSCALL(sys_exit_group,sys_exit_group)
-SYSCALL(sys_epoll_create,sys_epoll_create)
-SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
-SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
-SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
-SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
-SYSCALL(sys_timer_create,compat_sys_timer_create)
-SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
-SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun)
-SYSCALL(sys_timer_delete,sys_timer_delete)
-SYSCALL(sys_clock_settime,compat_sys_clock_settime)
-SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
-SYSCALL(sys_clock_getres,compat_sys_clock_getres)
-SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep)
-NI_SYSCALL /* reserved for vserver */
-SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64)
-SYSCALL(sys_statfs64,compat_sys_statfs64)
-SYSCALL(sys_fstatfs64,compat_sys_fstatfs64)
-SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages)
-SYSCALL(sys_mbind,compat_sys_mbind)
-SYSCALL(sys_get_mempolicy,compat_sys_get_mempolicy)
-SYSCALL(sys_set_mempolicy,compat_sys_set_mempolicy)
-SYSCALL(sys_mq_open,compat_sys_mq_open)
-SYSCALL(sys_mq_unlink,compat_sys_mq_unlink)
-SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend)
-SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive)
-SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */
-SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr)
-SYSCALL(sys_kexec_load,compat_sys_kexec_load)
-SYSCALL(sys_add_key,compat_sys_add_key)
-SYSCALL(sys_request_key,compat_sys_request_key)
-SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
-SYSCALL(sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set)
-SYSCALL(sys_ioprio_get,sys_ioprio_get)
-SYSCALL(sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch)
-SYSCALL(sys_migrate_pages,compat_sys_migrate_pages)
-SYSCALL(sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,compat_sys_mkdirat)
-SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */
-SYSCALL(sys_fchownat,compat_sys_fchownat)
-SYSCALL(sys_futimesat,compat_sys_futimesat)
-SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64)
-SYSCALL(sys_unlinkat,compat_sys_unlinkat)
-SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */
-SYSCALL(sys_linkat,compat_sys_linkat)
-SYSCALL(sys_symlinkat,compat_sys_symlinkat)
-SYSCALL(sys_readlinkat,compat_sys_readlinkat)
-SYSCALL(sys_fchmodat,compat_sys_fchmodat)
-SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */
-SYSCALL(sys_pselect6,compat_sys_pselect6)
-SYSCALL(sys_ppoll,compat_sys_ppoll)
-SYSCALL(sys_unshare,compat_sys_unshare)
-SYSCALL(sys_set_robust_list,compat_sys_set_robust_list)
-SYSCALL(sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,compat_sys_splice)
-SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range)
-SYSCALL(sys_tee,compat_sys_tee)
-SYSCALL(sys_vmsplice,compat_sys_vmsplice)
-SYSCALL(sys_move_pages,compat_sys_move_pages)
-SYSCALL(sys_getcpu,compat_sys_getcpu)
-SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,compat_sys_utimes)
-SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
-SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
-SYSCALL(sys_signalfd,compat_sys_signalfd)
-NI_SYSCALL /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd)
-SYSCALL(sys_timerfd_create,sys_timerfd_create)
-SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
-SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
-SYSCALL(sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2)
-SYSCALL(sys_inotify_init1,sys_inotify_init1)
-SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
-SYSCALL(sys_dup3,sys_dup3)
-SYSCALL(sys_epoll_create1,sys_epoll_create1)
-SYSCALL(sys_preadv,compat_sys_preadv)
-SYSCALL(sys_pwritev,compat_sys_pwritev)
-SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
-SYSCALL(sys_fanotify_init,sys_fanotify_init)
-SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,compat_sys_prlimit64)
-SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
-SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
-SYSCALL(sys_syncfs,sys_syncfs)
-SYSCALL(sys_setns,sys_setns)
-SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
-SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
-SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr)
-SYSCALL(sys_kcmp,compat_sys_kcmp)
-SYSCALL(sys_finit_module,compat_sys_finit_module)
-SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
-SYSCALL(sys_sched_getattr,compat_sys_sched_getattr)
-SYSCALL(sys_renameat2,compat_sys_renameat2)
-SYSCALL(sys_seccomp,compat_sys_seccomp)
-SYSCALL(sys_getrandom,compat_sys_getrandom)
-SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */
-SYSCALL(sys_bpf,compat_sys_bpf)
-SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
-SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
-SYSCALL(sys_execveat,compat_sys_execveat)
-SYSCALL(sys_userfaultfd,sys_userfaultfd) /* 355 */
-SYSCALL(sys_membarrier,sys_membarrier)
-SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
-SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
-SYSCALL(sys_socket,sys_socket)
-SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
-SYSCALL(sys_bind,compat_sys_bind)
-SYSCALL(sys_connect,compat_sys_connect)
-SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,compat_sys_accept4)
-SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
-SYSCALL(sys_setsockopt,compat_sys_setsockopt)
-SYSCALL(sys_getsockname,compat_sys_getsockname)
-SYSCALL(sys_getpeername,compat_sys_getpeername)
-SYSCALL(sys_sendto,compat_sys_sendto)
-SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
-SYSCALL(sys_recvfrom,compat_sys_recvfrom)
-SYSCALL(sys_recvmsg,compat_sys_recvmsg)
-SYSCALL(sys_shutdown,sys_shutdown)
-SYSCALL(sys_mlock2,compat_sys_mlock2)
-SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
-SYSCALL(sys_preadv2,compat_sys_preadv2)
-SYSCALL(sys_pwritev2,compat_sys_pwritev2)
-SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
-SYSCALL(sys_statx,compat_sys_statx)
-SYSCALL(sys_s390_sthyi,compat_sys_s390_sthyi)
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..8ff96c08955f
--- /dev/null
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+uapi := $(gen)/uapi/asm
+
+syscall := $(srctree)/$(src)/syscall.tbl
+systbl := $(srctree)/$(src)/syscalltbl
+
+gen-y := $(kapi)/syscall_table.h
+kapi-hdrs-y := $(kapi)/unistd_nr.h
+uapi-hdrs-y := $(uapi)/unistd_32.h
+uapi-hdrs-y += $(uapi)/unistd_64.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
+
+PHONY += kapi uapi
+
+kapi: $(gen-y) $(kapi-hdrs-y)
+uapi: $(uapi-hdrs-y)
+
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+define filechk_syshdr
+ $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2"
+endef
+
+define filechk_sysnr
+ $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget))
+endef
+
+define filechk_syscalls
+ $(CONFIG_SHELL) '$(systbl)' -S
+endef
+
+syshdr_abi_unistd_32 := common,32
+$(uapi)/unistd_32.h: $(syscall) FORCE
+ $(call filechk,syshdr,$@)
+
+syshdr_abi_unistd_64 := common,64
+$(uapi)/unistd_64.h: $(syscall) FORCE
+ $(call filechk,syshdr,$@)
+
+$(kapi)/syscall_table.h: $(syscall) FORCE
+ $(call filechk,syscalls)
+
+sysnr_abi_unistd_nr := common,32,64
+$(kapi)/unistd_nr.h: $(syscall) FORCE
+ $(call filechk,sysnr)
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..b38d48464368
--- /dev/null
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -0,0 +1,390 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# System call table for s390
+#
+# Format:
+#
+# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
+#
+# where <abi> can be common, 64, or 32
+
+1 common exit sys_exit sys_exit
+2 common fork sys_fork sys_fork
+3 common read sys_read compat_sys_s390_read
+4 common write sys_write compat_sys_s390_write
+5 common open sys_open compat_sys_open
+6 common close sys_close sys_close
+7 common restart_syscall sys_restart_syscall sys_restart_syscall
+8 common creat sys_creat compat_sys_creat
+9 common link sys_link compat_sys_link
+10 common unlink sys_unlink compat_sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir compat_sys_chdir
+13 32 time - compat_sys_time
+14 common mknod sys_mknod compat_sys_mknod
+15 common chmod sys_chmod compat_sys_chmod
+16 32 lchown - compat_sys_s390_lchown16
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid sys_getpid
+21 common mount sys_mount compat_sys_mount
+22 common umount sys_oldumount compat_sys_oldumount
+23 32 setuid - compat_sys_s390_setuid16
+24 32 getuid - compat_sys_s390_getuid16
+25 32 stime - compat_sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm sys_alarm
+29 common pause sys_pause sys_pause
+30 common utime sys_utime compat_sys_utime
+33 common access sys_access compat_sys_access
+34 common nice sys_nice sys_nice
+36 common sync sys_sync sys_sync
+37 common kill sys_kill sys_kill
+38 common rename sys_rename compat_sys_rename
+39 common mkdir sys_mkdir compat_sys_mkdir
+40 common rmdir sys_rmdir compat_sys_rmdir
+41 common dup sys_dup sys_dup
+42 common pipe sys_pipe compat_sys_pipe
+43 common times sys_times compat_sys_times
+45 common brk sys_brk compat_sys_brk
+46 32 setgid - compat_sys_s390_setgid16
+47 32 getgid - compat_sys_s390_getgid16
+48 common signal sys_signal compat_sys_signal
+49 32 geteuid - compat_sys_s390_geteuid16
+50 32 getegid - compat_sys_s390_getegid16
+51 common acct sys_acct compat_sys_acct
+52 common umount2 sys_umount compat_sys_umount
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+57 common setpgid sys_setpgid sys_setpgid
+60 common umask sys_umask sys_umask
+61 common chroot sys_chroot compat_sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2 sys_dup2
+64 common getppid sys_getppid sys_getppid
+65 common getpgrp sys_getpgrp sys_getpgrp
+66 common setsid sys_setsid sys_setsid
+67 common sigaction sys_sigaction compat_sys_sigaction
+70 32 setreuid - compat_sys_s390_setreuid16
+71 32 setregid - compat_sys_s390_setregid16
+72 common sigsuspend sys_sigsuspend compat_sys_sigsuspend
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname compat_sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 32 getrlimit - compat_sys_old_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 32 getgroups - compat_sys_s390_getgroups16
+81 32 setgroups - compat_sys_s390_setgroups16
+83 common symlink sys_symlink compat_sys_symlink
+85 common readlink sys_readlink compat_sys_readlink
+86 common uselib sys_uselib compat_sys_uselib
+87 common swapon sys_swapon compat_sys_swapon
+88 common reboot sys_reboot compat_sys_reboot
+89 common readdir - compat_sys_old_readdir
+90 common mmap sys_old_mmap compat_sys_s390_old_mmap
+91 common munmap sys_munmap compat_sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod sys_fchmod
+95 32 fchown - compat_sys_s390_fchown16
+96 common getpriority sys_getpriority sys_getpriority
+97 common setpriority sys_setpriority sys_setpriority
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+101 32 ioperm - -
+102 common socketcall sys_socketcall compat_sys_socketcall
+103 common syslog sys_syslog compat_sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+111 common vhangup sys_vhangup sys_vhangup
+112 common idle - -
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff compat_sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 common ipc sys_s390_ipc compat_sys_s390_ipc
+118 common fsync sys_fsync sys_fsync
+119 common sigreturn sys_sigreturn compat_sys_sigreturn
+120 common clone sys_clone compat_sys_clone
+121 common setdomainname sys_setdomainname compat_sys_setdomainname
+122 common uname sys_newuname compat_sys_newuname
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect compat_sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 common create_module - -
+128 common init_module sys_init_module compat_sys_init_module
+129 common delete_module sys_delete_module compat_sys_delete_module
+130 common get_kernel_syms - -
+131 common quotactl sys_quotactl compat_sys_quotactl
+132 common getpgid sys_getpgid sys_getpgid
+133 common fchdir sys_fchdir sys_fchdir
+134 common bdflush sys_bdflush compat_sys_bdflush
+135 common sysfs sys_sysfs compat_sys_sysfs
+136 common personality sys_s390_personality sys_s390_personality
+137 common afs_syscall - -
+138 32 setfsuid - compat_sys_s390_setfsuid16
+139 32 setfsgid - compat_sys_s390_setfsgid16
+140 32 _llseek - compat_sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 32 _newselect - compat_sys_select
+142 64 select sys_select -
+143 common flock sys_flock sys_flock
+144 common msync sys_msync compat_sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid sys_getsid
+148 common fdatasync sys_fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock compat_sys_mlock
+151 common munlock sys_munlock compat_sys_munlock
+152 common mlockall sys_mlockall sys_mlockall
+153 common munlockall sys_munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam
+155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap compat_sys_mremap
+164 32 setresuid - compat_sys_s390_setresuid16
+165 32 getresuid - compat_sys_s390_getresuid16
+167 common query_module - -
+168 common poll sys_poll compat_sys_poll
+169 common nfsservctl - -
+170 32 setresgid - compat_sys_s390_setresgid16
+171 32 getresgid - compat_sys_s390_getresgid16
+172 common prctl sys_prctl compat_sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common pread64 sys_pread64 compat_sys_s390_pread64
+181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
+182 32 chown - compat_sys_s390_chown16
+183 common getcwd sys_getcwd compat_sys_getcwd
+184 common capget sys_capget compat_sys_capget
+185 common capset sys_capset compat_sys_capset
+186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+187 common sendfile sys_sendfile64 compat_sys_sendfile
+188 common getpmsg - -
+189 common putpmsg - -
+190 common vfork sys_vfork sys_vfork
+191 32 ugetrlimit - compat_sys_getrlimit
+191 64 getrlimit sys_getrlimit -
+192 32 mmap2 - compat_sys_s390_mmap2
+193 32 truncate64 - compat_sys_s390_truncate64
+194 32 ftruncate64 - compat_sys_s390_ftruncate64
+195 32 stat64 - compat_sys_s390_stat64
+196 32 lstat64 - compat_sys_s390_lstat64
+197 32 fstat64 - compat_sys_s390_fstat64
+198 32 lchown32 - compat_sys_lchown
+198 64 lchown sys_lchown -
+199 32 getuid32 - sys_getuid
+199 64 getuid sys_getuid -
+200 32 getgid32 - sys_getgid
+200 64 getgid sys_getgid -
+201 32 geteuid32 - sys_geteuid
+201 64 geteuid sys_geteuid -
+202 32 getegid32 - sys_getegid
+202 64 getegid sys_getegid -
+203 32 setreuid32 - sys_setreuid
+203 64 setreuid sys_setreuid -
+204 32 setregid32 - sys_setregid
+204 64 setregid sys_setregid -
+205 32 getgroups32 - compat_sys_getgroups
+205 64 getgroups sys_getgroups -
+206 32 setgroups32 - compat_sys_setgroups
+206 64 setgroups sys_setgroups -
+207 32 fchown32 - sys_fchown
+207 64 fchown sys_fchown -
+208 32 setresuid32 - sys_setresuid
+208 64 setresuid sys_setresuid -
+209 32 getresuid32 - compat_sys_getresuid
+209 64 getresuid sys_getresuid -
+210 32 setresgid32 - sys_setresgid
+210 64 setresgid sys_setresgid -
+211 32 getresgid32 - compat_sys_getresgid
+211 64 getresgid sys_getresgid -
+212 32 chown32 - compat_sys_chown
+212 64 chown sys_chown -
+213 32 setuid32 - sys_setuid
+213 64 setuid sys_setuid -
+214 32 setgid32 - sys_setgid
+214 64 setgid sys_setgid -
+215 32 setfsuid32 - sys_setfsuid
+215 64 setfsuid sys_setfsuid -
+216 32 setfsgid32 - sys_setfsgid
+216 64 setfsgid sys_setfsgid -
+217 common pivot_root sys_pivot_root compat_sys_pivot_root
+218 common mincore sys_mincore compat_sys_mincore
+219 common madvise sys_madvise compat_sys_madvise
+220 common getdents64 sys_getdents64 compat_sys_getdents64
+221 32 fcntl64 - compat_sys_fcntl64
+222 common readahead sys_readahead compat_sys_s390_readahead
+223 32 sendfile64 - compat_sys_sendfile64
+224 common setxattr sys_setxattr compat_sys_setxattr
+225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr
+226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr
+227 common getxattr sys_getxattr compat_sys_getxattr
+228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr
+229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr
+230 common listxattr sys_listxattr compat_sys_listxattr
+231 common llistxattr sys_llistxattr compat_sys_llistxattr
+232 common flistxattr sys_flistxattr compat_sys_flistxattr
+233 common removexattr sys_removexattr compat_sys_removexattr
+234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr
+235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr
+236 common gettid sys_gettid sys_gettid
+237 common tkill sys_tkill sys_tkill
+238 common futex sys_futex compat_sys_futex
+239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+241 common tgkill sys_tgkill sys_tgkill
+243 common io_setup sys_io_setup compat_sys_io_setup
+244 common io_destroy sys_io_destroy compat_sys_io_destroy
+245 common io_getevents sys_io_getevents compat_sys_io_getevents
+246 common io_submit sys_io_submit compat_sys_io_submit
+247 common io_cancel sys_io_cancel compat_sys_io_cancel
+248 common exit_group sys_exit_group sys_exit_group
+249 common epoll_create sys_epoll_create sys_epoll_create
+250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait
+252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address
+253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
+254 common timer_create sys_timer_create compat_sys_timer_create
+255 common timer_settime sys_timer_settime compat_sys_timer_settime
+256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
+258 common timer_delete sys_timer_delete sys_timer_delete
+259 common clock_settime sys_clock_settime compat_sys_clock_settime
+260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+261 common clock_getres sys_clock_getres compat_sys_clock_getres
+262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
+265 common statfs64 sys_statfs64 compat_sys_statfs64
+266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages
+268 common mbind sys_mbind compat_sys_mbind
+269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+271 common mq_open sys_mq_open compat_sys_mq_open
+272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+275 common mq_notify sys_mq_notify compat_sys_mq_notify
+276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+277 common kexec_load sys_kexec_load compat_sys_kexec_load
+278 common add_key sys_add_key compat_sys_add_key
+279 common request_key sys_request_key compat_sys_request_key
+280 common keyctl sys_keyctl compat_sys_keyctl
+281 common waitid sys_waitid compat_sys_waitid
+282 common ioprio_set sys_ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get sys_ioprio_get
+284 common inotify_init sys_inotify_init sys_inotify_init
+285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch
+286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
+287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+288 common openat sys_openat compat_sys_openat
+289 common mkdirat sys_mkdirat compat_sys_mkdirat
+290 common mknodat sys_mknodat compat_sys_mknodat
+291 common fchownat sys_fchownat compat_sys_fchownat
+292 common futimesat sys_futimesat compat_sys_futimesat
+293 32 fstatat64 - compat_sys_s390_fstatat64
+293 64 newfstatat sys_newfstatat -
+294 common unlinkat sys_unlinkat compat_sys_unlinkat
+295 common renameat sys_renameat compat_sys_renameat
+296 common linkat sys_linkat compat_sys_linkat
+297 common symlinkat sys_symlinkat compat_sys_symlinkat
+298 common readlinkat sys_readlinkat compat_sys_readlinkat
+299 common fchmodat sys_fchmodat compat_sys_fchmodat
+300 common faccessat sys_faccessat compat_sys_faccessat
+301 common pselect6 sys_pselect6 compat_sys_pselect6
+302 common ppoll sys_ppoll compat_sys_ppoll
+303 common unshare sys_unshare compat_sys_unshare
+304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+306 common splice sys_splice compat_sys_splice
+307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
+308 common tee sys_tee compat_sys_tee
+309 common vmsplice sys_vmsplice compat_sys_vmsplice
+310 common move_pages sys_move_pages compat_sys_move_pages
+311 common getcpu sys_getcpu compat_sys_getcpu
+312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+313 common utimes sys_utimes compat_sys_utimes
+314 common fallocate sys_fallocate compat_sys_s390_fallocate
+315 common utimensat sys_utimensat compat_sys_utimensat
+316 common signalfd sys_signalfd compat_sys_signalfd
+317 common timerfd - -
+318 common eventfd sys_eventfd sys_eventfd
+319 common timerfd_create sys_timerfd_create sys_timerfd_create
+320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+323 common eventfd2 sys_eventfd2 sys_eventfd2
+324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
+325 common pipe2 sys_pipe2 compat_sys_pipe2
+326 common dup3 sys_dup3 sys_dup3
+327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
+328 common preadv sys_preadv compat_sys_preadv
+329 common pwritev sys_pwritev compat_sys_pwritev
+330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open
+332 common fanotify_init sys_fanotify_init sys_fanotify_init
+333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+334 common prlimit64 sys_prlimit64 compat_sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at
+336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+338 common syncfs sys_syncfs sys_syncfs
+339 common setns sys_setns sys_setns
+340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
+343 common kcmp sys_kcmp compat_sys_kcmp
+344 common finit_module sys_finit_module compat_sys_finit_module
+345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr
+346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr
+347 common renameat2 sys_renameat2 compat_sys_renameat2
+348 common seccomp sys_seccomp compat_sys_seccomp
+349 common getrandom sys_getrandom compat_sys_getrandom
+350 common memfd_create sys_memfd_create compat_sys_memfd_create
+351 common bpf sys_bpf compat_sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read
+354 common execveat sys_execveat compat_sys_execveat
+355 common userfaultfd sys_userfaultfd sys_userfaultfd
+356 common membarrier sys_membarrier sys_membarrier
+357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+359 common socket sys_socket sys_socket
+360 common socketpair sys_socketpair compat_sys_socketpair
+361 common bind sys_bind compat_sys_bind
+362 common connect sys_connect compat_sys_connect
+363 common listen sys_listen sys_listen
+364 common accept4 sys_accept4 compat_sys_accept4
+365 common getsockopt sys_getsockopt compat_sys_getsockopt
+366 common setsockopt sys_setsockopt compat_sys_setsockopt
+367 common getsockname sys_getsockname compat_sys_getsockname
+368 common getpeername sys_getpeername compat_sys_getpeername
+369 common sendto sys_sendto compat_sys_sendto
+370 common sendmsg sys_sendmsg compat_sys_sendmsg
+371 common recvfrom sys_recvfrom compat_sys_recvfrom
+372 common recvmsg sys_recvmsg compat_sys_recvmsg
+373 common shutdown sys_shutdown sys_shutdown
+374 common mlock2 sys_mlock2 compat_sys_mlock2
+375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range
+376 common preadv2 sys_preadv2 compat_sys_preadv2
+377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
+379 common statx sys_statx compat_sys_statx
+380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
diff --git a/arch/s390/kernel/syscalls/syscalltbl b/arch/s390/kernel/syscalls/syscalltbl
new file mode 100755
index 000000000000..fbac1732f874
--- /dev/null
+++ b/arch/s390/kernel/syscalls/syscalltbl
@@ -0,0 +1,232 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table and header files
+#
+# Copyright IBM Corp. 2018
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+
+#
+# File path to the system call table definition.
+# You can set the path with the -i option. If omitted,
+# system call table definitions are read from standard input.
+#
+SYSCALL_TBL=""
+
+
+create_syscall_table_entries()
+{
+ local nr abi name entry64 entry32 _ignore
+ local temp=$(mktemp ${TMPDIR:-/tmp}/syscalltbl-common.XXXXXXXXX)
+
+ (
+ #
+ # Initialize with 0 to create an NI_SYSCALL for 0
+ #
+ local prev_nr=0 prev_32=sys_ni_syscall prev_64=sys_ni_syscall
+ while read nr abi name entry64 entry32 _ignore; do
+ test x$entry32 = x- && entry32=sys_ni_syscall
+ test x$entry64 = x- && entry64=sys_ni_syscall
+
+ if test $prev_nr -eq $nr; then
+ #
+ # Same syscall but different ABI, just update
+ # the respective entry point
+ #
+ case $abi in
+ 32)
+ prev_32=$entry32
+ ;;
+ 64)
+ prev_64=$entry64
+ ;;
+ esac
+ continue;
+ else
+ printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32
+ fi
+
+ prev_nr=$nr
+ prev_64=$entry64
+ prev_32=$entry32
+ done
+ printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32
+ ) >> $temp
+
+ #
+ # Check for duplicate syscall numbers
+ #
+ if ! cat $temp |cut -f1 |uniq -d 2>&1; then
+ echo "Error: generated system call table contains duplicate entries: $temp" >&2
+ exit 1
+ fi
+
+ #
+ # Generate syscall table
+ #
+ prev_nr=0
+ while read nr entry64 entry32; do
+ while test $prev_nr -lt $((nr - 1)); do
+ printf "NI_SYSCALL\n"
+ prev_nr=$((prev_nr + 1))
+ done
+ if test x$entry64 = xsys_ni_syscall &&
+ test x$entry32 = xsys_ni_syscall; then
+ printf "NI_SYSCALL\n"
+ else
+ printf "SYSCALL(%s,%s)\n" $entry64 $entry32
+ fi
+ prev_nr=$nr
+ done < $temp
+ rm $temp
+}
+
+generate_syscall_table()
+{
+ cat <<-EoHEADER
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+ * Definitions for sys_call_table, each line represents an
+ * entry in the table in the form
+ * SYSCALL(64 bit syscall, 31 bit emulated syscall)
+ *
+ * This file is meant to be included from entry.S.
+ */
+
+ #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
+
+EoHEADER
+ grep -Ev '^(#|[[:blank:]]*$)' $SYSCALL_TBL \
+ |sort -k1 -n \
+ |create_syscall_table_entries
+}
+
+create_header_defines()
+{
+ local nr abi name _ignore
+
+ while read nr abi name _ignore; do
+ printf "#define __NR_%s %d\n" $name $nr
+ done
+}
+
+normalize_fileguard()
+{
+ local fileguard="$1"
+
+ echo "$1" |tr '[[:lower:]]' '[[:upper:]]' \
+ |sed -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'
+}
+
+generate_syscall_header()
+{
+ local abis=$(echo "($1)" | tr ',' '|')
+ local filename="$2"
+ local fileguard suffix
+
+ if test "$filename"; then
+ fileguard=$(normalize_fileguard "__UAPI_ASM_S390_$2")
+ else
+ case "$abis" in
+ *64*) suffix=64 ;;
+ *32*) suffix=32 ;;
+ esac
+ fileguard=$(normalize_fileguard "__UAPI_ASM_S390_SYSCALLS_$suffix")
+ fi
+
+ cat <<-EoHEADER
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ #ifndef ${fileguard}
+ #define ${fileguard}
+
+EoHEADER
+
+ grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \
+ |sort -k1 -n \
+ |create_header_defines
+
+ cat <<-EoFOOTER
+
+ #endif /* ${fileguard} */
+EoFOOTER
+}
+
+__max_syscall_nr()
+{
+ local abis=$(echo "($1)" | tr ',' '|')
+
+ grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \
+ |sed -ne 's/^\([[:digit:]]*\)[[:space:]].*/\1/p' \
+ |sort -n \
+ |tail -1
+}
+
+
+generate_syscall_nr()
+{
+ local abis="$1"
+ local max_syscall_nr num_syscalls
+
+ max_syscall_nr=$(__max_syscall_nr "$abis")
+ num_syscalls=$((max_syscall_nr + 1))
+
+ cat <<-EoHEADER
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ #ifndef __ASM_S390_SYSCALLS_NR
+ #define __ASM_S390_SYSCALLS_NR
+
+ #define NR_syscalls ${num_syscalls}
+
+ #endif /* __ASM_S390_SYSCALLS_NR */
+EoHEADER
+}
+
+
+#
+# Parse command line arguments
+#
+do_syscall_header=""
+do_syscall_table=""
+do_syscall_nr=""
+output_file=""
+abi_list="common,64"
+filename=""
+while getopts ":HNSXi:a:f:" arg; do
+ case $arg in
+ a)
+ abi_list="$OPTARG"
+ ;;
+ i)
+ SYSCALL_TBL="$OPTARG"
+ ;;
+ f)
+ filename=${OPTARG##*/}
+ ;;
+ H)
+ do_syscall_header=1
+ ;;
+ N)
+ do_syscall_nr=1
+ ;;
+ S)
+ do_syscall_table=1
+ ;;
+ X)
+ set -x
+ ;;
+ :)
+ echo "Missing argument for -$OPTARG" >&2
+ exit 1
+ ;;
+ \?)
+ echo "Invalid option specified" >&2
+ exit 1
+ ;;
+ esac
+done
+
+test "$do_syscall_header" && generate_syscall_header "$abi_list" "$filename"
+test "$do_syscall_table" && generate_syscall_table
+test "$do_syscall_nr" && generate_syscall_nr "$abi_list"
+
+exit 0
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4d5b65e527b5..4b6e0397f66d 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -404,8 +404,7 @@ out:
put_online_cpus();
return rc ? rc : count;
}
-static DEVICE_ATTR(dispatching, 0644, dispatching_show,
- dispatching_store);
+static DEVICE_ATTR_RW(dispatching);
static ssize_t cpu_polarization_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 308564b9bf68..c5c856f320bc 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -10,6 +10,9 @@ obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+KBUILD_AFLAGS += -DBUILD_VDSO
+KBUILD_CFLAGS += -DBUILD_VDSO
+
KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_31 += -m31 -s
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index f61df5253c23..eaf9cf1417f6 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -9,13 +9,14 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_clock_getres
.type __kernel_clock_getres,@function
__kernel_clock_getres:
- .cfi_startproc
+ CFI_STARTPROC
basr %r1,0
la %r1,4f-.(%r1)
chi %r2,__CLOCK_REALTIME
@@ -37,7 +38,7 @@ __kernel_clock_getres:
3: lhi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
+ CFI_ENDPROC
4: .long __CLOCK_REALTIME_RES
5: .long __CLOCK_COARSE_RES
- .cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 2d6ec3abe095..a9418bf975db 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -9,14 +9,17 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_clock_gettime
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
- .cfi_startproc
+ CFI_STARTPROC
ahi %r15,-16
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
@@ -69,9 +72,13 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 9b
@@ -151,15 +158,21 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
/* Fallback to system call */
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
19: lhi %r1,__NR_clock_gettime
svc 0
ahi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
+ CFI_ENDPROC
20: .long 1000000000
21: .long _vdso_data - 0b
- .cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S
index 5477a2c112fb..25515f3fbcea 100644
--- a/arch/s390/kernel/vdso32/getcpu.S
+++ b/arch/s390/kernel/vdso32/getcpu.S
@@ -8,13 +8,14 @@
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_getcpu
.type __kernel_getcpu,@function
__kernel_getcpu:
- .cfi_startproc
+ CFI_STARTPROC
la %r4,0
sacf 256
l %r5,__VDSO_CPU_NR(%r4)
@@ -28,5 +29,5 @@ __kernel_getcpu:
st %r4,0(%r3)
3: lhi %r2,0
br %r14
- .cfi_endproc
+ CFI_ENDPROC
.size __kernel_getcpu,.-__kernel_getcpu
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index aa8bf13a2edb..3c0db0fa6ad9 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -9,14 +9,17 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_gettimeofday
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
- .cfi_startproc
+ CFI_STARTPROC
ahi %r15,-16
+ CFI_ADJUST_CFA_OFFSET 16
+ CFI_VAL_OFFSET 15, -160
basr %r5,0
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
1: ltr %r3,%r3 /* check if tz is NULL */
@@ -89,9 +92,11 @@ __kernel_gettimeofday:
st %r0,4(%r2) /* store tv->tv_usec */
10: slr %r2,%r2
ahi %r15,16
+ CFI_ADJUST_CFA_OFFSET -16
+ CFI_RESTORE 15
br %r14
+ CFI_ENDPROC
11: .long 1000000000
12: .long 274877907
13: .long _vdso_data - 0b
- .cfi_endproc
.size __kernel_gettimeofday,.-__kernel_gettimeofday
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index f81ae7998883..15b1ceafc4c1 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -10,6 +10,9 @@ obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+KBUILD_AFLAGS += -DBUILD_VDSO
+KBUILD_CFLAGS += -DBUILD_VDSO
+
KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64 -s
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index faf5213b15df..081435398e0a 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -9,13 +9,14 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_clock_getres
.type __kernel_clock_getres,@function
__kernel_clock_getres:
- .cfi_startproc
+ CFI_STARTPROC
larl %r1,4f
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
@@ -43,7 +44,7 @@ __kernel_clock_getres:
2: lghi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
+ CFI_ENDPROC
3: .quad __CLOCK_REALTIME_RES
4: .quad __CLOCK_COARSE_RES
- .cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 6046b3bfca46..fac3ab5ec83a 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -9,14 +9,17 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_clock_gettime
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
- .cfi_startproc
+ CFI_STARTPROC
aghi %r15,-16
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
@@ -53,9 +56,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
@@ -108,9 +115,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
/* CPUCLOCK_VIRT for this thread */
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
9: lghi %r4,0
icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
@@ -131,15 +142,21 @@ __kernel_clock_gettime:
stg %r4,8(%r3)
lghi %r2,0
aghi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
/* Fallback to system call */
+ CFI_DEF_CFA_OFFSET 176
+ CFI_VAL_OFFSET 15, -160
12: lghi %r1,__NR_clock_gettime
svc 0
aghi %r15,16
+ CFI_DEF_CFA_OFFSET 160
+ CFI_RESTORE 15
br %r14
+ CFI_ENDPROC
13: .quad 1000000000
14: .quad 19342813113834067
- .cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S
index e9c34364d97b..2446e9dac8ab 100644
--- a/arch/s390/kernel/vdso64/getcpu.S
+++ b/arch/s390/kernel/vdso64/getcpu.S
@@ -8,13 +8,14 @@
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_getcpu
.type __kernel_getcpu,@function
__kernel_getcpu:
- .cfi_startproc
+ CFI_STARTPROC
la %r4,0
sacf 256
l %r5,__VDSO_CPU_NR(%r4)
@@ -28,5 +29,5 @@ __kernel_getcpu:
st %r4,0(%r3)
3: lghi %r2,0
br %r14
- .cfi_endproc
+ CFI_ENDPROC
.size __kernel_getcpu,.-__kernel_getcpu
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index cc9dbc27da6f..6e1f0b421695 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -9,14 +9,17 @@
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/dwarf.h>
.text
.align 4
.globl __kernel_gettimeofday
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
- .cfi_startproc
+ CFI_STARTPROC
aghi %r15,-16
+ CFI_ADJUST_CFA_OFFSET 16
+ CFI_VAL_OFFSET 15, -160
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f
@@ -58,8 +61,10 @@ __kernel_gettimeofday:
stg %r0,8(%r2) /* store tv->tv_usec */
4: lghi %r2,0
aghi %r15,16
+ CFI_ADJUST_CFA_OFFSET -16
+ CFI_RESTORE 15
br %r14
+ CFI_ENDPROC
5: .quad 1000000000
.long 274877907
- .cfi_endproc
.size __kernel_gettimeofday,.-__kernel_gettimeofday
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a049ff005f03..608cf2987d19 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -141,4 +141,7 @@ SECTIONS
/* Sections to be discarded */
DISCARDS
+ /DISCARD/ : {
+ *(.eh_frame)
+ }
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ec8b68e97d3c..1371dff2b90d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -421,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_GS:
r = test_facility(133);
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -766,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
/*
* Must be called with kvm->srcu held to avoid races on memslots, and with
- * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
+ * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
@@ -792,11 +795,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) {
/*
- * Get the last slot. They should be sorted by base_gfn, so the
- * last slot is also the one at the end of the address space.
- * We have verified above that at least one slot is present.
+ * Get the first slot. They are reverse sorted by base_gfn, so
+ * the first slot is also the one at the end of the address
+ * space. We have verified above that at least one slot is
+ * present.
*/
- ms = slots->memslots + slots->used_slots - 1;
+ ms = slots->memslots;
/* round up so we only use full longs */
ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
/* allocate enough bytes to store all the bits */
@@ -821,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
}
/*
- * Must be called with kvm->lock to avoid races with ourselves and
+ * Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration.
*/
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -836,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
if (kvm->arch.use_cmma) {
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
+ /* We have to wait for the essa emulation to finish */
+ synchronize_srcu(&kvm->srcu);
vfree(mgs->pgste_bitmap);
}
kfree(mgs);
@@ -845,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
static int kvm_s390_vm_set_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- int idx, res = -ENXIO;
+ int res = -ENXIO;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_S390_VM_MIGRATION_START:
- idx = srcu_read_lock(&kvm->srcu);
res = kvm_s390_vm_start_migration(kvm);
- srcu_read_unlock(&kvm->srcu, idx);
break;
case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm);
@@ -860,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
default:
break;
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->slots_lock);
return res;
}
@@ -1750,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
+ mutex_lock(&kvm->slots_lock);
r = kvm_s390_get_cmma_bits(kvm, &args);
+ mutex_unlock(&kvm->slots_lock);
if (!r) {
r = copy_to_user(argp, &args, sizeof(args));
if (r)
@@ -1764,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
+ mutex_lock(&kvm->slots_lock);
r = kvm_s390_set_cmma_bits(kvm, &args);
+ mutex_unlock(&kvm->slots_lock);
break;
}
default:
@@ -2197,6 +2205,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (test_kvm_facility(vcpu->kvm, 133))
vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2338,6 +2348,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3297,6 +3308,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
@@ -3343,6 +3359,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 572496c688cc..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
cbrlo[entries] = gfn << PAGE_SHIFT;
}
- if (orc) {
+ if (orc && gfn < ms->bitmap_size) {
/* increment only if we are really flipping the bit to 1 */
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
atomic64_inc(&ms->dirty_pages);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5d6ae0326d9e..751348348477 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -223,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
memcpy(scb_o->gcr, scb_s->gcr, 128);
scb_o->pp = scb_s->pp;
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82)) {
+ scb_o->fpf &= ~FPF_BPBC;
+ scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+ }
+
/* interrupt intercept */
switch (scb_s->icptcode) {
case ICPT_PROGI:
@@ -265,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ecb3 = 0;
scb_s->ecd = 0;
scb_s->fac = 0;
+ scb_s->fpf = 0;
rc = prepare_cpuflags(vcpu, vsie_page);
if (rc)
@@ -324,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & ECB_TE;
}
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82))
+ scb_s->fpf |= scb_o->fpf & FPF_BPBC;
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
scb_s->eca |= scb_o->eca & ECA_VX;
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index cae5a1e16cbd..c4f8039a35e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess);
void disable_sacf_uaccess(mm_segment_t old_fs)
{
+ current->thread.mm_segment = old_fs;
if (old_fs == USER_DS && test_facility(27)) {
__ctl_load(S390_lowcore.user_asce, 1, 1);
clear_cpu_flag(CIF_ASCE_PRIMARY);
}
- current->thread.mm_segment = old_fs;
}
EXPORT_SYMBOL(disable_sacf_uaccess);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9557d8b516df..78a19c93b380 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -28,8 +28,6 @@
#include <asm/set_memory.h>
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
u32 seen_reg[16]; /* Array to remember which registers are used */
@@ -612,11 +610,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
- jit->seen |= SEEN_RET0;
- /* ltr %src,%src (if src == 0 goto fail) */
- EMIT2(0x1200, src_reg, src_reg);
- /* jz <ret0> */
- EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
/* lhi %w0,0 */
EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */
@@ -632,11 +625,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
- jit->seen |= SEEN_RET0;
- /* ltgr %src,%src (if src == 0 goto fail) */
- EMIT4(0xb9020000, src_reg, src_reg);
- /* jz <ret0> */
- EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
/* lghi %w0,0 */
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
@@ -1299,7 +1287,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit;
int pass;
- if (!bpf_jit_enable)
+ if (!fp->jit_requested)
return orig_fp;
tmp = bpf_jit_blind_constants(fp);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f7aa5a77827e..2d15d84c20ed 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,6 +181,9 @@ out_unlock:
static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
size_t size, int flags)
{
+ unsigned long irqflags;
+ int ret;
+
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
@@ -196,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
return 0;
}
- return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
- PAGE_ALIGN(size));
+ ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+ if (ret == -ENOMEM && !s390_iommu_strict) {
+ /* enable the hypervisor to free some resources */
+ if (zpci_refresh_global(zdev))
+ goto out;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
+ ret = 0;
+ }
+out:
+ return ret;
}
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 19bcb3b45a70..f069929e8211 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -89,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
if (cc)
zpci_err_insn(cc, status, addr, range);
+ if (cc == 1 && (status == 4 || status == 16))
+ return -ENOMEM;
+
return (cc) ? -EIO : 0;
}
diff --git a/arch/s390/tools/.gitignore b/arch/s390/tools/.gitignore
index 72a4b2cf1365..71bd6f8eebaf 100644
--- a/arch/s390/tools/.gitignore
+++ b/arch/s390/tools/.gitignore
@@ -1 +1,2 @@
gen_facilities
+gen_opcode_table
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 2e70e25de07a..48cdac1143a9 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -3,22 +3,33 @@
# Makefile for s390 specific build tools
#
+kapi := arch/$(ARCH)/include/generated/asm
+kapi-hdrs-y := $(kapi)/facility-defs.h $(kapi)/dis-defs.h
+
+targets += $(addprefix ../../../,$(kapi-hdrs-y))
+PHONY += kapi
+
+kapi: $(kapi-hdrs-y)
+
hostprogs-y += gen_facilities
hostprogs-y += gen_opcode_table
HOSTCFLAGS_gen_facilities.o += -Wall $(LINUXINCLUDE)
HOSTCFLAGS_gen_opcode_table.o += -Wall $(LINUXINCLUDE)
-define filechk_facilities.h
+# Ensure output directory exists
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+define filechk_facility-defs.h
$(obj)/gen_facilities
endef
-define filechk_dis.h
+define filechk_dis-defs.h
( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt )
endef
-include/generated/facilities.h: $(obj)/gen_facilities FORCE
- $(call filechk,facilities.h)
+$(kapi)/facility-defs.h: $(obj)/gen_facilities FORCE
+ $(call filechk,facility-defs.h)
-include/generated/dis.h: $(obj)/gen_opcode_table FORCE
- $(call filechk,dis.h)
+$(kapi)/dis-defs.h: $(obj)/gen_opcode_table FORCE
+ $(call filechk,dis-defs.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 0373801d9860..424a1ba4f874 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -128,8 +128,8 @@ static void print_facility_lists(void)
int main(int argc, char **argv)
{
- printf("#ifndef __ASM_S390_FACILITIES__\n");
- printf("#define __ASM_S390_FACILITIES__\n");
+ printf("#ifndef __ASM_S390_FACILITY_DEFS__\n");
+ printf("#define __ASM_S390_FACILITY_DEFS__\n");
printf("/*\n");
printf(" * DO NOT MODIFY.\n");
printf(" *\n");
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index 357d42681cef..259aa0680d1a 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -321,8 +321,8 @@ int main(int argc, char **argv)
struct gen_opcode *desc = &_desc;
read_instructions(desc);
- printf("#ifndef __S390_GENERATED_DIS_H__\n");
- printf("#define __S390_GENERATED_DIS_H__\n");
+ printf("#ifndef __S390_GENERATED_DIS_DEFS_H__\n");
+ printf("#define __S390_GENERATED_DIS_DEFS_H__\n");
printf("/*\n");
printf(" * DO NOT MODIFY.\n");
printf(" *\n");
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index ad51b56e51bd..bc4c7c90550f 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -58,9 +58,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index 81271d3af47c..b04fd1632051 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/score/include/uapi/asm/poll.h b/arch/score/include/uapi/asm/poll.h
deleted file mode 100644
index c636b85843cd..000000000000
--- a/arch/score/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_SCORE_POLL_H
-#define _ASM_SCORE_POLL_H
-
-#include <asm-generic/poll.h>
-
-#endif /* _ASM_SCORE_POLL_H */
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 7291e2f11a47..4d6be53058d6 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -79,7 +79,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 0104c8199c48..1bde08dc067d 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -76,7 +76,6 @@ static struct resource sh_eth0_resources[] = {
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -104,7 +103,6 @@ static struct resource sh_eth1_resources[] = {
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -148,7 +146,6 @@ static struct resource sh_eth_giga0_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
@@ -182,7 +179,6 @@ static struct resource sh_eth_giga1_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 1faf6cb93dcb..6f929abe0b50 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1
};
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 77c35350ee77..412326d59e6f 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/sh_eth.h>
#include <mach-se/mach/se.h>
#include <mach-se/mach/mrshpc.h>
#include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
defined(CONFIG_CPU_SUBTYPE_SH7712)
/* SH771X Ethernet driver */
+static struct sh_eth_plat_data sh_eth_plat = {
+ .phy = PHY_ID,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct resource sh_eth0_resources[] = {
[0] = {
.start = SH_ETH0_BASE,
- .end = SH_ETH0_BASE + 0x1B8,
+ .end = SH_ETH0_BASE + 0x1B8 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
+ .start = SH_TSU_BASE,
+ .end = SH_TSU_BASE + 0x200 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
.start = SH_ETH0_IRQ,
.end = SH_ETH0_IRQ,
.flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
.name = "sh771x-ether",
.id = 0,
.dev = {
- .platform_data = PHY_ID,
+ .platform_data = &sh_eth_plat,
},
.num_resources = ARRAY_SIZE(sh_eth0_resources),
.resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
static struct resource sh_eth1_resources[] = {
[0] = {
.start = SH_ETH1_BASE,
- .end = SH_ETH1_BASE + 0x1B8,
+ .end = SH_ETH1_BASE + 0x1B8 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
+ .start = SH_TSU_BASE,
+ .end = SH_TSU_BASE + 0x200 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
.start = SH_ETH1_IRQ,
.end = SH_ETH1_IRQ,
.flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
.name = "sh771x-ether",
.id = 1,
.dev = {
- .platform_data = PHY_ID,
+ .platform_data = &sh_eth_plat,
},
.num_resources = ARRAY_SIZE(sh_eth1_resources),
.resource = sh_eth1_resources,
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index f1fecd395679..255952555656 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -374,7 +374,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8187 */
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 2c8fb04685d4..6e62686b81b1 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -87,7 +87,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index 0a432b5f50e7..87641b7d6c4e 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_MULTICAST=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index a17181160233..762bc5619910 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -24,7 +24,7 @@ static ssize_t switch_show(struct device *dev,
struct push_switch_platform_info *psw_info = dev->platform_data;
return sprintf(buf, "%s\n", psw_info->name);
}
-static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
+static DEVICE_ATTR_RO(switch);
static void switch_timer(struct timer_list *t)
{
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index becb798f1b04..cf5c792bf70b 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -63,9 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the current stack pointer from C */
register unsigned long current_stack_pointer asm("r15") __used;
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h
index 4246ef9b07a3..aa83fe1ff0b1 100644
--- a/arch/sh/include/mach-se/mach/se.h
+++ b/arch/sh/include/mach-se/mach/se.h
@@ -100,6 +100,7 @@
/* Base address */
#define SH_ETH0_BASE 0xA7000000
#define SH_ETH1_BASE 0xA7000400
+#define SH_TSU_BASE 0xA7000800
/* PHY ID */
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
# define PHY_ID 0x00
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 95796ad00fbe..d08db08dec38 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -122,7 +122,6 @@ static struct platform_device scif2_device = {
static struct sh_eth_plat_data eth_platform_data = {
.phy = 1,
- .edmac_endian = EDMAC_LITTLE_ENDIAN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 57cff00cad17..b3770bb26211 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4)
break;
}
- force_sig_info(SIGFPE, &info, current);
+ info.si_signo = SIGFPE;
+ force_sig_info(info.si_signo, &info, current);
}
#endif
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index 818d3aa5172e..d257186c27d1 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
-obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
+obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index d1064e46efe8..8aa664638c3c 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_alignmask = 7,
diff --git a/arch/sparc/include/asm/asm-prototypes.h b/arch/sparc/include/asm/asm-prototypes.h
index 96e0972e8dbf..4987c735ff56 100644
--- a/arch/sparc/include/asm/asm-prototypes.h
+++ b/arch/sparc/include/asm/asm-prototypes.h
@@ -6,7 +6,7 @@
#include <asm/xor.h>
#include <asm/checksum.h>
#include <asm/trap_block.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/ftrace.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index fa38c78de0f0..615283e16f22 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -149,65 +149,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- u32 _addr; /* faulting insn/memory ref. */
- int _trapno;
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 89a0c57aed59..ab9c6b027b75 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -76,6 +76,10 @@
#define HV_ETOOMANY 15 /* Too many items specified */
#define HV_ECHANNEL 16 /* Invalid LDC channel */
#define HV_EBUSY 17 /* Resource busy */
+#define HV_EUNAVAILABLE 23 /* Resource or operation not
+ * currently available, but may
+ * become available in the future
+ */
/* mach_exit()
* TRAP: HV_FAST_TRAP
@@ -941,6 +945,139 @@ unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
*/
#define HV_FAST_MEM_SYNC 0x32
+/* Coprocessor services
+ *
+ * M7 and later processors provide an on-chip coprocessor which
+ * accelerates database operations, and is known internally as
+ * DAX.
+ */
+
+/* ccb_submit()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_CCB_SUBMIT
+ * ARG0: address of CCB array
+ * ARG1: size (in bytes) of CCB array being submitted
+ * ARG2: flags
+ * ARG3: reserved
+ * RET0: status (success or error code)
+ * RET1: size (in bytes) of CCB array that was accepted (might be less
+ * than arg1)
+ * RET2: status data
+ * if status == ENOMAP or ENOACCESS, identifies the VA in question
+ * if status == EUNAVAILBLE, unavailable code
+ * RET3: reserved
+ *
+ * ERRORS: EOK successful submission (check size)
+ * EWOULDBLOCK could not finish submissions, try again
+ * EBADALIGN array not 64B aligned or size not 64B multiple
+ * ENORADDR invalid RA for array or in CCB
+ * ENOMAP could not translate address (see status data)
+ * EINVAL invalid ccb or arguments
+ * ETOOMANY too many ccbs with all-or-nothing flag
+ * ENOACCESS guest has no access to submit ccbs or address
+ * in CCB does not have correct permissions (check
+ * status data)
+ * EUNAVAILABLE ccb operation could not be performed at this
+ * time (check status data)
+ * Status data codes:
+ * 0 - exact CCB could not be executed
+ * 1 - CCB opcode cannot be executed
+ * 2 - CCB version cannot be executed
+ * 3 - vcpu cannot execute CCBs
+ * 4 - no CCBs can be executed
+ */
+
+#define HV_CCB_SUBMIT 0x34
+#ifndef __ASSEMBLY__
+unsigned long sun4v_ccb_submit(unsigned long ccb_buf,
+ unsigned long len,
+ unsigned long flags,
+ unsigned long reserved,
+ void *submitted_len,
+ void *status_data);
+#endif
+
+/* flags (ARG2) */
+#define HV_CCB_QUERY_CMD BIT(1)
+#define HV_CCB_ARG0_TYPE_REAL 0UL
+#define HV_CCB_ARG0_TYPE_PRIMARY BIT(4)
+#define HV_CCB_ARG0_TYPE_SECONDARY BIT(5)
+#define HV_CCB_ARG0_TYPE_NUCLEUS GENMASK(5, 4)
+#define HV_CCB_ARG0_PRIVILEGED BIT(6)
+#define HV_CCB_ALL_OR_NOTHING BIT(7)
+#define HV_CCB_QUEUE_INFO BIT(8)
+#define HV_CCB_VA_REJECT 0UL
+#define HV_CCB_VA_SECONDARY BIT(13)
+#define HV_CCB_VA_NUCLEUS GENMASK(13, 12)
+#define HV_CCB_VA_PRIVILEGED BIT(14)
+#define HV_CCB_VA_READ_ADI_DISABLE BIT(15) /* DAX2 only */
+
+/* ccb_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_CCB_INFO
+ * ARG0: real address of CCB completion area
+ * RET0: status (success or error code)
+ * RET1: info array
+ * - RET1[0]: CCB state
+ * - RET1[1]: dax unit
+ * - RET1[2]: queue number
+ * - RET1[3]: queue position
+ *
+ * ERRORS: EOK operation successful
+ * EBADALIGN address not 64B aligned
+ * ENORADDR RA in address not valid
+ * EINVAL CA not valid
+ * EWOULDBLOCK info not available for this CCB currently, try
+ * again
+ * ENOACCESS guest cannot use dax
+ */
+
+#define HV_CCB_INFO 0x35
+#ifndef __ASSEMBLY__
+unsigned long sun4v_ccb_info(unsigned long ca,
+ void *info_arr);
+#endif
+
+/* info array byte offsets (RET1) */
+#define CCB_INFO_OFFSET_CCB_STATE 0
+#define CCB_INFO_OFFSET_DAX_UNIT 2
+#define CCB_INFO_OFFSET_QUEUE_NUM 4
+#define CCB_INFO_OFFSET_QUEUE_POS 6
+
+/* CCB state (RET1[0]) */
+#define HV_CCB_STATE_COMPLETED 0
+#define HV_CCB_STATE_ENQUEUED 1
+#define HV_CCB_STATE_INPROGRESS 2
+#define HV_CCB_STATE_NOTFOUND 3
+
+/* ccb_kill()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_CCB_KILL
+ * ARG0: real address of CCB completion area
+ * RET0: status (success or error code)
+ * RET1: CCB kill status
+ *
+ * ERRORS: EOK operation successful
+ * EBADALIGN address not 64B aligned
+ * ENORADDR RA in address not valid
+ * EINVAL CA not valid
+ * EWOULDBLOCK kill not available for this CCB currently, try
+ * again
+ * ENOACCESS guest cannot use dax
+ */
+
+#define HV_CCB_KILL 0x36
+#ifndef __ASSEMBLY__
+unsigned long sun4v_ccb_kill(unsigned long ca,
+ void *kill_status);
+#endif
+
+/* CCB kill status (RET1) */
+#define HV_CCB_KILL_COMPLETED 0
+#define HV_CCB_KILL_DEQUEUED 1
+#define HV_CCB_KILL_KILLED 2
+#define HV_CCB_KILL_NOTFOUND 3
+
/* Time of day services.
*
* The hypervisor maintains the time of day on a per-domain basis.
@@ -3355,6 +3492,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
#define HV_GRP_ATU 0x0111
+#define HV_GRP_DAX 0x0113
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 9937c5ff94a9..339920fdf9ed 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1010,7 +1010,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
#define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#define __HAVE_ARCH_PGTABLE_DEPOSIT
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index febaaeb1a0fe..548b366165dd 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -63,9 +63,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index caf915321ba9..f7e7b0baec9f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -120,9 +120,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
new file mode 100644
index 000000000000..722951908b0a
--- /dev/null
+++ b/arch/sparc/include/uapi/asm/oradax.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Oracle DAX driver API definitions
+ */
+
+#ifndef _ORADAX_H
+#define _ORADAX_H
+
+#include <linux/types.h>
+
+#define CCB_KILL 0
+#define CCB_INFO 1
+#define CCB_DEQUEUE 2
+
+struct dax_command {
+ __u16 command; /* CCB_KILL/INFO/DEQUEUE */
+ __u16 ca_offset; /* offset into mmapped completion area */
+};
+
+struct ccb_kill_result {
+ __u16 action; /* action taken to kill ccb */
+};
+
+struct ccb_info_result {
+ __u16 state; /* state of enqueued ccb */
+ __u16 inst_num; /* dax instance number of enqueued ccb */
+ __u16 q_num; /* queue number of enqueued ccb */
+ __u16 q_pos; /* ccb position in queue */
+};
+
+struct ccb_exec_result {
+ __u64 status_data; /* additional status data (e.g. bad VA) */
+ __u32 status; /* one of DAX_SUBMIT_* */
+};
+
+union ccb_result {
+ struct ccb_exec_result exec;
+ struct ccb_info_result info;
+ struct ccb_kill_result kill;
+};
+
+#define DAX_MMAP_LEN (16 * 1024)
+#define DAX_MAX_CCBS 15
+#define DAX_CCB_BUF_MAXLEN (DAX_MAX_CCBS * 64)
+#define DAX_NAME "oradax"
+
+/* CCB_EXEC status */
+#define DAX_SUBMIT_OK 0
+#define DAX_SUBMIT_ERR_RETRY 1
+#define DAX_SUBMIT_ERR_WOULDBLOCK 2
+#define DAX_SUBMIT_ERR_BUSY 3
+#define DAX_SUBMIT_ERR_THR_INIT 4
+#define DAX_SUBMIT_ERR_ARG_INVAL 5
+#define DAX_SUBMIT_ERR_CCB_INVAL 6
+#define DAX_SUBMIT_ERR_NO_CA_AVAIL 7
+#define DAX_SUBMIT_ERR_CCB_ARR_MMU_MISS 8
+#define DAX_SUBMIT_ERR_NOMAP 9
+#define DAX_SUBMIT_ERR_NOACCESS 10
+#define DAX_SUBMIT_ERR_TOOMANY 11
+#define DAX_SUBMIT_ERR_UNAVAIL 12
+#define DAX_SUBMIT_ERR_INTERNAL 13
+
+/* CCB_INFO states - must match HV_CCB_STATE_* definitions */
+#define DAX_CCB_COMPLETED 0
+#define DAX_CCB_ENQUEUED 1
+#define DAX_CCB_INPROGRESS 2
+#define DAX_CCB_NOTFOUND 3
+
+/* CCB_KILL actions - must match HV_CCB_KILL_* definitions */
+#define DAX_KILL_COMPLETED 0
+#define DAX_KILL_DEQUEUED 1
+#define DAX_KILL_KILLED 2
+#define DAX_KILL_NOTFOUND 3
+
+#endif /* _ORADAX_H */
diff --git a/arch/sparc/include/uapi/asm/poll.h b/arch/sparc/include/uapi/asm/poll.h
index 72356c999125..2a81e79aa3ea 100644
--- a/arch/sparc/include/uapi/asm/poll.h
+++ b/arch/sparc/include/uapi/asm/poll.h
@@ -2,11 +2,31 @@
#ifndef __SPARC_POLL_H
#define __SPARC_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
-#define POLLMSG 512
-#define POLLREMOVE 1024
-#define POLLRDHUP 2048
+#define POLLWRBAND (__force __poll_t)256
+#define POLLMSG (__force __poll_t)512
+#define POLLREMOVE (__force __poll_t)1024
+#define POLLRDHUP (__force __poll_t)2048
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2, bit 13 -> bit 11 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6) |
+ ((v & 0x2000) >> 2);
+
+
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6) | ((v & 0x800) << 2));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index a2bc1b2955b4..717ec7ef07f9 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -41,6 +41,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
{ .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_DAX, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index bdfd3d8c6707..2f865a464576 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -871,3 +871,60 @@ ENTRY(sun4v_m7_set_perfreg)
retl
nop
ENDPROC(sun4v_m7_set_perfreg)
+
+ /* %o0: address of CCB array
+ * %o1: size (in bytes) of CCB array
+ * %o2: flags
+ * %o3: reserved
+ *
+ * returns:
+ * %o0: status
+ * %o1: size (in bytes) of the CCB array that was accepted
+ * %o2: status data
+ * %o3: reserved
+ */
+ENTRY(sun4v_ccb_submit)
+ mov %o5, %g1
+ mov HV_CCB_SUBMIT, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ stx %o2, [%g1]
+ENDPROC(sun4v_ccb_submit)
+EXPORT_SYMBOL(sun4v_ccb_submit)
+
+ /* %o0: completion area ra for the ccb to get info
+ *
+ * returns:
+ * %o0: status
+ * %o1: CCB state
+ * %o2: position
+ * %o3: dax unit
+ * %o4: queue
+ */
+ENTRY(sun4v_ccb_info)
+ mov %o1, %g1
+ mov HV_CCB_INFO, %o5
+ ta HV_FAST_TRAP
+ sth %o1, [%g1 + CCB_INFO_OFFSET_CCB_STATE]
+ sth %o2, [%g1 + CCB_INFO_OFFSET_QUEUE_POS]
+ sth %o3, [%g1 + CCB_INFO_OFFSET_DAX_UNIT]
+ retl
+ sth %o4, [%g1 + CCB_INFO_OFFSET_QUEUE_NUM]
+ENDPROC(sun4v_ccb_info)
+EXPORT_SYMBOL(sun4v_ccb_info)
+
+ /* %o0: completion area ra for the ccb to kill
+ *
+ * returns:
+ * %o0: status
+ * %o1: result of the kill
+ */
+ENTRY(sun4v_ccb_kill)
+ mov %o1, %g1
+ mov HV_CCB_KILL, %o5
+ ta HV_FAST_TRAP
+ retl
+ sth %o1, [%g1]
+ENDPROC(sun4v_ccb_kill)
+EXPORT_SYMBOL(sun4v_ccb_kill)
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 54a6159b9cd8..44d379db3f64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -70,75 +70,6 @@ struct rt_signal_frame32 {
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- default:
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- err |= __put_user(from->si_trapno, &to->si_trapno);
- err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- }
- }
- return err;
-}
-
-/* CAUTION: This is just a very minimalist implementation for the
- * sake of compat_sys_rt_sigqueueinfo()
- */
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad, from->_sifields._pad,
- SI_PAD_SIZE))
- return -EFAULT;
-
- return 0;
-}
-
/* Checks if the fp is valid. We always build signal frames which are
* 16-byte aligned, therefore we can always enforce that the restore
* frame has that property as well.
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 20426a1c28f2..48366e5eb5b2 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -9,9 +9,6 @@
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h> /* for compat_old_sigset_t */
-#endif
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 4ae86bc0d35c..847ddffbf38a 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -219,17 +219,28 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
}
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old;
+
+ do {
+ old = *pmdp;
+ } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+
+ return old;
+}
+
/*
* This routine is only called when splitting a THP
*/
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_t entry = *pmdp;
-
- pmd_val(entry) &= ~_PAGE_VALID;
+ pmd_t old, entry;
- set_pmd_at(vma->vm_mm, address, pmdp, entry);
+ entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+ old = pmdp_establish(vma, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
@@ -240,6 +251,8 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
!is_huge_zero_page(pmd_page(entry)))
(vma->vm_mm)->context.thp_pte_count--;
+
+ return old;
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index 09e318eb34ee..3bd8ca95e521 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -11,8 +11,6 @@
#include "bpf_jit_32.h"
-int bpf_jit_enable __read_mostly;
-
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index ff5f9cb3039a..48a25869349b 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -12,8 +12,6 @@
#include "bpf_jit_64.h"
-int bpf_jit_enable __read_mostly;
-
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
@@ -969,31 +967,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit_alu(MULX, src, dst, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_X:
- emit_cmp(src, G0, ctx);
- emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
- emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
emit_write_y(G0, ctx);
emit_alu(DIV, src, dst, ctx);
break;
-
case BPF_ALU64 | BPF_DIV | BPF_X:
- emit_cmp(src, G0, ctx);
- emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
- emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
emit_alu(UDIVX, src, dst, ctx);
break;
-
case BPF_ALU | BPF_MOD | BPF_X: {
const u8 tmp = bpf2sparc[TMP_REG_1];
ctx->tmp_1_used = true;
- emit_cmp(src, G0, ctx);
- emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
- emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
emit_write_y(G0, ctx);
emit_alu3(DIV, dst, src, tmp, ctx);
emit_alu3(MULX, tmp, src, tmp, ctx);
@@ -1005,10 +989,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
ctx->tmp_1_used = true;
- emit_cmp(src, G0, ctx);
- emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
- emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
-
emit_alu3(UDIVX, dst, src, tmp, ctx);
emit_alu3(MULX, tmp, src, tmp, ctx);
emit_alu3(SUB, dst, tmp, dst, ctx);
@@ -1509,17 +1489,25 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = 0x91d02005; /* ta 5 */
}
+struct sparc64_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct jit_ctx ctx;
+};
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
+ struct sparc64_jit_data *jit_data;
struct bpf_binary_header *header;
bool tmp_blinded = false;
+ bool extra_pass = false;
struct jit_ctx ctx;
u32 image_size;
u8 *image_ptr;
int pass;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
@@ -1533,13 +1521,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+ if (jit_data->ctx.offset) {
+ ctx = jit_data->ctx;
+ image_ptr = jit_data->image;
+ header = jit_data->header;
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx.idx;
+ goto skip_init_ctx;
+ }
+
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
- goto out;
+ goto out_off;
}
/* Fake pass to detect features used, and get an accurate assessment
@@ -1562,7 +1568,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
for (pass = 1; pass < 3; pass++) {
ctx.idx = 0;
@@ -1593,14 +1599,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
- bpf_jit_binary_lock_ro(header);
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(header);
+ } else {
+ jit_data->ctx = ctx;
+ jit_data->image = image_ptr;
+ jit_data->header = header;
+ }
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
prog->jited_len = image_size;
+ if (!prog->is_func || extra_pass) {
out_off:
- kfree(ctx.offset);
+ kfree(ctx.offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index 0a6f50098e23..f51595f861b8 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -251,7 +251,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
else
return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
#else
- return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+ return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
#endif
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 02f269cfa538..ef9d403cbbe4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -249,7 +249,7 @@ config HIGHMEM
If unsure, say "true".
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
config IOMMU_HELPER
@@ -261,6 +261,7 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB
bool
default TILEGX
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
select ARCH_HAS_DMA_SET_COHERENT_MASK
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 9f94435cc44f..357a4c271ad4 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -159,7 +159,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index 1c5bd4f8ffca..da2858755fa1 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -289,7 +289,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 62a7b83025dd..769ff6ac0bf5 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -110,68 +110,6 @@ struct compat_flock64 {
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define COMPAT_SI_PAD_SIZE (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[COMPAT_SI_PAD_SIZE];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 97ad62878290..d25fce101fc0 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -44,26 +44,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
#define HAVE_ARCH_DMA_SET_MASK 1
int dma_set_mask(struct device *dev, u64 mask);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index b7659b8f1117..2adcacd85749 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -59,9 +59,6 @@ struct thread_info {
.align_ctl = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
#endif /* !__ASSEMBLY__ */
#if PAGE_SIZE < 8192
diff --git a/arch/tile/include/uapi/asm/siginfo.h b/arch/tile/include/uapi/asm/siginfo.h
index f234d24fff55..a812fcbf4267 100644
--- a/arch/tile/include/uapi/asm/siginfo.h
+++ b/arch/tile/include/uapi/asm/siginfo.h
@@ -24,12 +24,4 @@
#include <asm-generic/siginfo.h>
-/*
- * Additional Tile-specific SIGILL si_codes
- */
-#define ILL_DBLFLT 9 /* double fault */
-#define ILL_HARDWALL 10 /* user networks hardwall violation */
-#undef NSIGILL
-#define NSIGILL 10
-
#endif /* _ASM_TILE_SIGINFO_H */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 971d87a1d8cf..a703bd0e0488 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -50,79 +50,6 @@ struct compat_rt_sigframe {
struct compat_ucontext uc;
};
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please make sure that
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
-
- if (from->si_code < 0) {
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- } else {
- /*
- * First 32bits of unions are always present:
- * si_pid === si_band === si_tid === si_addr(LS half)
- */
- err |= __put_user(from->_sifields._pad[0],
- &to->_sifields._pad[0]);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- /* FALL THROUGH */
- default:
- case SIL_KILL:
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- }
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
-{
- int err;
-
- if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_int, &from->si_int);
-
- return err;
-}
-
/* The assembly shim for this function arranges to ignore the return value. */
long compat_sys_rt_sigreturn(void)
{
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index f2abedc8a080..6a1efe5543fa 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -54,7 +54,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
* which case we will return NULL. But such devices are uncommon.
*/
if (dma_mask <= DMA_BIT_MASK(32)) {
- gfp |= GFP_DMA;
+ gfp |= GFP_DMA32;
node = 0;
}
@@ -509,39 +509,9 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops);
/* PCI DMA mapping functions for legacy PCI devices */
#ifdef CONFIG_SWIOTLB
-static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- gfp |= GFP_DMA;
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-static const struct dma_map_ops pci_swiotlb_dma_ops = {
- .alloc = tile_swiotlb_alloc_coherent,
- .free = tile_swiotlb_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-
static const struct dma_map_ops pci_hybrid_dma_ops = {
- .alloc = tile_swiotlb_alloc_coherent,
- .free = tile_swiotlb_free_coherent,
+ .alloc = swiotlb_alloc,
+ .free = swiotlb_free,
.map_page = tile_pci_dma_map_page,
.unmap_page = tile_pci_dma_unmap_page,
.map_sg = tile_pci_dma_map_sg,
@@ -552,7 +522,7 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
};
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops;
const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else
const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ad83c1e66dbd..eb4e198f6f93 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -814,11 +814,11 @@ static void __init zone_sizes_init(void)
#endif
if (start < dma_end) {
- zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
+ zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL],
dma_end - start);
- zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
+ zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32];
} else {
- zones_size[ZONE_DMA] = 0;
+ zones_size[ZONE_DMA32] = 0;
}
/* Take zone metadata from controller 0 if we're isolnode. */
@@ -830,7 +830,7 @@ static void __init zone_sizes_init(void)
PFN_UP(node_percpu[i]));
/* Track the type of memory on each node */
- if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
+ if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32])
node_set_state(i, N_NORMAL_MEMORY);
#ifdef CONFIG_HIGHMEM
if (end != start)
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de3eae813e52..479d8033a801 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -163,11 +163,13 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
* actual bad address in an SPR, which it doesn't.
*/
if (align_ctl == 0) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = addr
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = addr;
+
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
@@ -210,11 +212,13 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
}
if (err) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = addr
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = addr;
+
trace_unhandled_signal("bad address for unaligned fixup", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 825867c53853..b09456a3d77a 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -38,7 +38,7 @@ static ssize_t chip_width_show(struct device *dev,
{
return sprintf(page, "%u\n", smp_width);
}
-static DEVICE_ATTR(chip_width, 0444, chip_width_show, NULL);
+static DEVICE_ATTR_RO(chip_width);
static ssize_t chip_height_show(struct device *dev,
struct device_attribute *attr,
@@ -46,7 +46,7 @@ static ssize_t chip_height_show(struct device *dev,
{
return sprintf(page, "%u\n", smp_height);
}
-static DEVICE_ATTR(chip_height, 0444, chip_height_show, NULL);
+static DEVICE_ATTR_RO(chip_height);
static ssize_t chip_serial_show(struct device *dev,
struct device_attribute *attr,
@@ -54,7 +54,7 @@ static ssize_t chip_serial_show(struct device *dev,
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
}
-static DEVICE_ATTR(chip_serial, 0444, chip_serial_show, NULL);
+static DEVICE_ATTR_RO(chip_serial);
static ssize_t chip_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -62,7 +62,7 @@ static ssize_t chip_revision_show(struct device *dev,
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
}
-static DEVICE_ATTR(chip_revision, 0444, chip_revision_show, NULL);
+static DEVICE_ATTR_RO(chip_revision);
static ssize_t type_show(struct device *dev,
@@ -71,7 +71,7 @@ static ssize_t type_show(struct device *dev,
{
return sprintf(page, "tilera\n");
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR_RO(type);
#define HV_CONF_ATTR(name, conf) \
static ssize_t name ## _show(struct device *dev, \
@@ -184,7 +184,7 @@ static ssize_t hv_stats_store(struct device *dev,
return n < 0 ? n : count;
}
-static DEVICE_ATTR(hv_stats, 0644, hv_stats_show, hv_stats_store);
+static DEVICE_ATTR_RW(hv_stats);
static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
{
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 9b08c6055f15..83a7186198d7 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -256,12 +256,14 @@ static int do_bpt(struct pt_regs *regs)
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
- siginfo_t info = { 0 };
+ siginfo_t info;
int signo, code;
unsigned long address = 0;
tile_bundle_bits instr;
int is_kernel = !user_mode(regs);
+ clear_siginfo(&info);
+
/* Handle breakpoints, etc. */
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
return;
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index 8149c38f67b6..77a0b6b6a2a1 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -980,11 +980,13 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
}
if ((align_ctl == 0) || unexpected) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = (unsigned char __user *)0
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (unsigned char __user *)0;
+
if (unaligned_printk)
pr_info("Unalign bundle: unexp @%llx, %llx\n",
(unsigned long long)regs->pc,
@@ -1396,11 +1398,12 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
&frag, sizeof(frag));
if (status) {
/* Fail to copy JIT into user land. send SIGSEGV. */
- siginfo_t info = {
- .si_signo = SIGSEGV,
- .si_code = SEGV_MAPERR,
- .si_addr = (void __user *)&jit_code_area[idx]
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)&jit_code_area[idx];
pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
current->pid, current->comm,
@@ -1511,11 +1514,12 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
* If so, we will trigger SIGBUS.
*/
if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = (unsigned char __user *)0
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (unsigned char __user *)0;
if (unaligned_printk)
pr_info("Unalign fixup: %d %llx @%llx\n",
@@ -1535,11 +1539,13 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
pc = (tilegx_bundle_bits __user *)(regs->pc);
if (get_user(bundle, pc) != 0) {
/* Probably never be here since pc is valid user address.*/
- siginfo_t info = {
- .si_signo = SIGSEGV,
- .si_code = SEGV_MAPERR,
- .si_addr = (void __user *)pc
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)pc;
+
pr_err("Couldn't read instruction at %p trying to step\n", pc);
trace_unhandled_signal("segfault in unalign fixup", regs,
(unsigned long)info.si_addr, SIGSEGV);
diff --git a/arch/um/Makefile b/arch/um/Makefile
index b76fcce397a1..e54dda8a0363 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -116,8 +116,15 @@ endef
KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig
archheaders:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+ kbuild-file=$(HOST_DIR)/include/asm/Kbuild \
+ obj=$(HOST_DIR)/include/generated/asm
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+ kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
+ obj=$(HOST_DIR)/include/generated/uapi/asm
$(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
+
archprepare: include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 12bdb5996bf5..7f9dbdbc4eb7 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -119,10 +119,10 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
return err;
}
-static unsigned int hostaudio_poll(struct file *file,
- struct poll_table_struct *wait)
+static __poll_t hostaudio_poll(struct file *file,
+ struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 86942a492454..b58b746d3f2c 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -58,7 +58,10 @@ static inline void release_thread(struct task_struct *task)
{
}
-#define init_stack (init_thread_union.stack)
+static inline void mm_copy_segments(struct mm_struct *from_mm,
+ struct mm_struct *new_mm)
+{
+}
/*
* User space process size: 3GB (default).
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 9300f7630d2a..4eecd960ee8c 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -6,6 +6,9 @@
#ifndef __UM_THREAD_INFO_H
#define __UM_THREAD_INFO_H
+#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
+#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
@@ -37,10 +40,6 @@ struct thread_info {
.real_thread = NULL, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -53,8 +52,6 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
-#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
-
#endif
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
diff --git a/arch/um/include/asm/vmlinux.lds.h b/arch/um/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..149494ae78ea
--- /dev/null
+++ b/arch/um/include/asm/vmlinux.lds.h
@@ -0,0 +1,2 @@
+#include <asm/thread_info.h>
+#include <asm-generic/vmlinux.lds.h>
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index d417e3899700..5568cf882371 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -1,5 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 428644175956..b2b02df9896e 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -306,7 +306,7 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
arch_examine_signal(sig, regs);
- memset(&clean_si, 0, sizeof(clean_si));
+ clear_siginfo(&clean_si);
clean_si.si_signo = si->si_signo;
clean_si.si_errno = si->si_errno;
clean_si.si_code = si->si_code;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index f433690b9b37..a818ccef30ca 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -54,7 +54,7 @@ struct cpuinfo_um boot_cpu_data = {
union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
- { INIT_THREAD_INFO(init_task) };
+ { .thread_info = INIT_THREAD_INFO(init_task) };
/* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5];
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 3d6ed6ba5b78..36b07ec09742 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index ac608c2f6af6..790bc2ef4af2 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -12,38 +12,11 @@
#ifndef __UNICORE_DMA_MAPPING_H__
#define __UNICORE_DMA_MAPPING_H__
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
#include <linux/swiotlb.h>
-extern const struct dma_map_ops swiotlb_dma_map_ops;
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &swiotlb_dma_map_ops;
+ return &swiotlb_dma_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (dev && dev->dma_mask)
- return addr + size - 1 <= *dev->dma_mask;
-
- return 1;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
-#endif /* __KERNEL__ */
#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index e79ad6d5b5b2..5fb728f3b49a 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -87,9 +87,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/*
* how to get the thread information struct from C
*/
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 5f25b39f04d4..c4ac6043ebb0 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -298,7 +298,6 @@ void abort(void)
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
-EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig
index c256460cd363..e9154a59d561 100644
--- a/arch/unicore32/mm/Kconfig
+++ b/arch/unicore32/mm/Kconfig
@@ -42,6 +42,7 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE
config SWIOTLB
def_bool y
+ select DMA_DIRECT_OPS
config IOMMU_HELPER
def_bool SWIOTLB
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile
index 681c0ef5ec9e..8106260583ab 100644
--- a/arch/unicore32/mm/Makefile
+++ b/arch/unicore32/mm/Makefile
@@ -6,8 +6,6 @@
obj-y := extable.o fault.o init.o pgd.o mmu.o
obj-y += flush.o ioremap.o
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
-
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
deleted file mode 100644
index 525413d6690e..000000000000
--- a/arch/unicore32/mm/dma-swiotlb.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Contains routines needed to support swiotlb for UniCore32.
- *
- * Copyright (C) 2010 Guan Xuetao
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/pci.h>
-#include <linux/cache.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/dma.h>
-
-static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
-}
-
-static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-const struct dma_map_ops swiotlb_dma_map_ops = {
- .alloc = unicore_swiotlb_alloc_coherent,
- .free = unicore_swiotlb_free_coherent,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .dma_supported = swiotlb_dma_supported,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-EXPORT_SYMBOL(swiotlb_dma_map_ops);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d4fc98c50378..72d5149bcaa1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,8 +54,8 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if X86_64
- # Causing hangs/crashes, see the commit that added this change for details.
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY
@@ -69,7 +69,6 @@ config X86
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
@@ -89,6 +88,7 @@ config X86
select GENERIC_CLOCKEVENTS_MIN_ADJUST
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP
@@ -154,6 +154,7 @@ config X86
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
+ select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
@@ -429,6 +430,19 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
+config RETPOLINE
+ bool "Avoid speculative indirect branches in kernel"
+ default y
+ help
+ Compile kernel with the retpoline compiler options to guard against
+ kernel-to-user data leaks by avoiding speculative indirect
+ branches. Requires a compiler with -mindirect-branch=thunk-extern
+ support for full protection. The kernel may run slower.
+
+ Without compiler support, at least indirect branches in assembler
+ code are eliminated. Since this includes the syscall entry path,
+ it is not entirely pointless.
+
config INTEL_RDT
bool "Intel Resource Director Technology support"
default n
@@ -797,6 +811,15 @@ config PARAVIRT_TIME_ACCOUNTING
config PARAVIRT_CLOCK
bool
+config JAILHOUSE_GUEST
+ bool "Jailhouse non-root cell support"
+ depends on X86_64 && PCI
+ select X86_PM_TIMER
+ ---help---
+ This option allows to run Linux as guest in a Jailhouse non-root
+ cell. You can leave this option disabled if you only want to start
+ Jailhouse and run Linux afterwards in the root cell.
+
endif #HYPERVISOR_GUEST
config NO_BOOTMEM
@@ -1242,9 +1265,9 @@ config MICROCODE
CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
initrd for microcode blobs.
- In addition, you can build-in the microcode into the kernel. For that you
- need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
- to the CONFIG_EXTRA_FIRMWARE config option.
+ In addition, you can build the microcode into the kernel. For that you
+ need to add the vendor-supplied microcode to the CONFIG_EXTRA_FIRMWARE
+ config option.
config MICROCODE_INTEL
bool "Intel microcode loading support"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 672441c008c7..192e4d2f9efc 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -169,14 +169,6 @@ config IOMMU_DEBUG
options. See Documentation/x86/x86_64/boot-options.txt for more
details.
-config IOMMU_STRESS
- bool "Enable IOMMU stress-test mode"
- ---help---
- This option disables various optimizations in IOMMU related
- code to do real stress testing of the IOMMU code. This option
- will cause a performance drop and should only be enabled for
- testing.
-
config IOMMU_LEAK
bool "IOMMU leak tracing"
depends on IOMMU_DEBUG && DMA_API_DEBUG
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3e73bc255e4e..fad55160dcb9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare
#
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+# Avoid indirect branches in kernel to deal with Spectre
+ifdef CONFIG_RETPOLINE
+ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+ ifneq ($(RETPOLINE_CFLAGS),)
+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+ endif
+endif
+
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e56dbc67e837..353e20c3f114 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -999,6 +999,7 @@ struct boot_params *efi_main(struct efi_config *c,
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation(sys_table);
+ efi_retrieve_tpm2_eventlog(sys_table);
setup_graphics(boot_params);
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..12e8484a8ee7 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
#include <linux/linkage.h>
#include <asm/inst.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
/*
* The following macros are used to move an (un)aligned 16 byte value to/from
@@ -89,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
-.section .rodata
-.align 16
-.type aad_shift_arr, @object
-.size aad_shift_arr, 272
-aad_shift_arr:
- .octa 0xffffffffffffffffffffffffffffffff
- .octa 0xffffffffffffffffffffffffffffff0C
- .octa 0xffffffffffffffffffffffffffff0D0C
- .octa 0xffffffffffffffffffffffffff0E0D0C
- .octa 0xffffffffffffffffffffffff0F0E0D0C
- .octa 0xffffffffffffffffffffff0C0B0A0908
- .octa 0xffffffffffffffffffff0D0C0B0A0908
- .octa 0xffffffffffffffffff0E0D0C0B0A0908
- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
- .octa 0xffffffffffffff0C0B0A090807060504
- .octa 0xffffffffffff0D0C0B0A090807060504
- .octa 0xffffffffff0E0D0C0B0A090807060504
- .octa 0xffffffff0F0E0D0C0B0A090807060504
- .octa 0xffffff0C0B0A09080706050403020100
- .octa 0xffff0D0C0B0A09080706050403020100
- .octa 0xff0E0D0C0B0A09080706050403020100
- .octa 0x0F0E0D0C0B0A09080706050403020100
-
-
.text
@@ -256,6 +233,37 @@ aad_shift_arr:
pxor \TMP1, \GH # result is in TMP1
.endm
+# Reads DLEN bytes starting at DPTR and stores in XMMDst
+# where 0 < DLEN < 16
+# Clobbers %rax, DLEN and XMM1
+.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
+ cmp $8, \DLEN
+ jl _read_lt8_\@
+ mov (\DPTR), %rax
+ MOVQ_R64_XMM %rax, \XMMDst
+ sub $8, \DLEN
+ jz _done_read_partial_block_\@
+ xor %eax, %eax
+_read_next_byte_\@:
+ shl $8, %rax
+ mov 7(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_\@
+ MOVQ_R64_XMM %rax, \XMM1
+ pslldq $8, \XMM1
+ por \XMM1, \XMMDst
+ jmp _done_read_partial_block_\@
+_read_lt8_\@:
+ xor %eax, %eax
+_read_next_byte_lt8_\@:
+ shl $8, %rax
+ mov -1(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_lt8_\@
+ MOVQ_R64_XMM %rax, \XMMDst
+_done_read_partial_block_\@:
+.endm
+
/*
* if a = number of total plaintext bytes
* b = floor(a/16)
@@ -272,62 +280,30 @@ aad_shift_arr:
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
- mov arg8, %r12 # %r12 = aadLen
- mov %r12, %r11
+ mov arg8, %r11 # %r11 = aadLen
pxor %xmm\i, %xmm\i
pxor \XMM2, \XMM2
cmp $16, %r11
- jl _get_AAD_rest8\num_initial_blocks\operation
+ jl _get_AAD_rest\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
+
+ /* read the last <16B of AAD */
+_get_AAD_rest\num_initial_blocks\operation:
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
- pxor %xmm\i,%xmm\i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\num_initial_blocks\operation:
- cmp $4, %r11
- jle _get_AAD_rest4\num_initial_blocks\operation
- movq (%r10), \TMP1
- add $8, %r10
- sub $8, %r11
- pslldq $8, \TMP1
- psrldq $8, %xmm\i
- pxor \TMP1, %xmm\i
- jmp _get_AAD_rest8\num_initial_blocks\operation
-_get_AAD_rest4\num_initial_blocks\operation:
- cmp $0, %r11
- jle _get_AAD_rest0\num_initial_blocks\operation
- mov (%r10), %eax
- movq %rax, \TMP1
- add $4, %r10
- sub $4, %r10
- pslldq $12, \TMP1
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
-_get_AAD_rest0\num_initial_blocks\operation:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \TMP1
- PSHUFB_XMM \TMP1, %xmm\i
-_get_AAD_rest_final\num_initial_blocks\operation:
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
@@ -531,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation:
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
- mov arg8, %r12 # %r12 = aadLen
- mov %r12, %r11
+ mov arg8, %r11 # %r11 = aadLen
pxor %xmm\i, %xmm\i
pxor \XMM2, \XMM2
cmp $16, %r11
- jl _get_AAD_rest8\num_initial_blocks\operation
+ jl _get_AAD_rest\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
+
+ /* read the last <16B of AAD */
+_get_AAD_rest\num_initial_blocks\operation:
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
- pxor %xmm\i,%xmm\i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some PT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\num_initial_blocks\operation:
- cmp $4, %r11
- jle _get_AAD_rest4\num_initial_blocks\operation
- movq (%r10), \TMP1
- add $8, %r10
- sub $8, %r11
- pslldq $8, \TMP1
- psrldq $8, %xmm\i
- pxor \TMP1, %xmm\i
- jmp _get_AAD_rest8\num_initial_blocks\operation
-_get_AAD_rest4\num_initial_blocks\operation:
- cmp $0, %r11
- jle _get_AAD_rest0\num_initial_blocks\operation
- mov (%r10), %eax
- movq %rax, \TMP1
- add $4, %r10
- sub $4, %r10
- pslldq $12, \TMP1
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
-_get_AAD_rest0\num_initial_blocks\operation:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \TMP1
- PSHUFB_XMM \TMP1, %xmm\i
-_get_AAD_rest_final\num_initial_blocks\operation:
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
@@ -1385,14 +1329,6 @@ _esb_loop_\@:
*
* AAD Format with 64-bit Extended Sequence Number
*
-* aadLen:
-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
-* The code supports 16 too but for other sizes, the code will fail.
-*
-* TLen:
-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
-* For other sizes, the code will fail.
-*
* poly = x^128 + x^127 + x^126 + x^121 + 1
*
*****************************************************************************/
@@ -1486,19 +1422,16 @@ _zero_cipher_left_decrypt:
PSHUFB_XMM %xmm10, %xmm0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12
-# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
-# (%r13 is the number of bytes in plaintext mod 16)
- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+ lea (%arg3,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+
+ lea ALL_F+16(%rip), %r12
+ sub %r13, %r12
movdqa %xmm1, %xmm2
pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ movdqu (%r12), %xmm1
# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
pand %xmm1, %xmm2
@@ -1507,9 +1440,6 @@ _zero_cipher_left_decrypt:
pxor %xmm2, %xmm8
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # GHASH computation for the last <16 byte block
- sub %r13, %r11
- add $16, %r11
# output %r13 bytes
MOVQ_R64_XMM %xmm0, %rax
@@ -1663,14 +1593,6 @@ ENDPROC(aesni_gcm_dec)
*
* AAD Format with 64-bit Extended Sequence Number
*
-* aadLen:
-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
-* The code supports 16 too but for other sizes, the code will fail.
-*
-* TLen:
-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
-* For other sizes, the code will fail.
-*
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
ENTRY(aesni_gcm_enc)
@@ -1763,19 +1685,16 @@ _zero_cipher_left_encrypt:
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
-
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
- lea SHIFT_MASK+16(%rip), %r12
+
+ lea (%arg3,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+
+ lea ALL_F+16(%rip), %r12
sub %r13, %r12
- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ movdqu (%r12), %xmm1
# get the appropriate mask to mask out top 16-r13 bytes of xmm0
pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
movdqa SHUF_MASK(%rip), %xmm10
@@ -1784,9 +1703,6 @@ _zero_cipher_left_encrypt:
pxor %xmm0, %xmm8
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
# GHASH computation for the last <16 byte block
- sub %r13, %r11
- add $16, %r11
-
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
@@ -2884,7 +2800,7 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4
movdqu IV, 0x30(OUTP)
- call *%r11
+ CALL_NOSPEC %r11
movdqu 0x00(OUTP), INC
pxor INC, STATE1
@@ -2929,7 +2845,7 @@ ENTRY(aesni_xts_crypt8)
_aesni_gf128mul_x_ble()
movups IV, (IVP)
- call *%r11
+ CALL_NOSPEC %r11
movdqu 0x40(OUTP), INC
pxor INC, STATE1
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 3bf3dcf29825..34cf1c1f8c98 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -690,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
- unsigned int key_len)
+static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
+ unsigned int key_len)
{
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
struct cryptd_aead *cryptd_tfm = *ctx;
@@ -716,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
/* This is the Integrity Check Value (aka the authentication tag length and can
* be 8, 12 or 16 bytes long. */
-static int rfc4106_set_authsize(struct crypto_aead *parent,
- unsigned int authsize)
+static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
+ unsigned int authsize)
{
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
struct cryptd_aead *cryptd_tfm = *ctx;
@@ -824,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) &&
+ sg_is_last(req->dst) && req->dst->length &&
(!PageHighMem(sg_page(req->dst)) ||
req->dst->offset + req->dst->length <= PAGE_SIZE)) {
one_entry_in_sg = 1;
@@ -929,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
aes_ctx);
}
-static int rfc4106_encrypt(struct aead_request *req)
+static int gcmaes_wrapper_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
@@ -945,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req)
return crypto_aead_encrypt(req);
}
-static int rfc4106_decrypt(struct aead_request *req)
+static int gcmaes_wrapper_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
@@ -1117,7 +1117,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
@@ -1128,6 +1128,30 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
aes_ctx);
}
+static int generic_gcmaes_init(struct crypto_aead *aead)
+{
+ struct cryptd_aead *cryptd_tfm;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ *ctx = cryptd_tfm;
+ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
+
+ return 0;
+}
+
+static void generic_gcmaes_exit(struct crypto_aead *aead)
+{
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_free_aead(*ctx);
+}
+
static struct aead_alg aesni_aead_algs[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
@@ -1147,10 +1171,10 @@ static struct aead_alg aesni_aead_algs[] = { {
}, {
.init = rfc4106_init,
.exit = rfc4106_exit,
- .setkey = rfc4106_set_key,
- .setauthsize = rfc4106_set_authsize,
- .encrypt = rfc4106_encrypt,
- .decrypt = rfc4106_decrypt,
+ .setkey = gcmaes_wrapper_set_key,
+ .setauthsize = gcmaes_wrapper_set_authsize,
+ .encrypt = gcmaes_wrapper_encrypt,
+ .decrypt = gcmaes_wrapper_decrypt,
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
@@ -1170,13 +1194,31 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
+ .cra_name = "__generic-gcm-aes-aesni",
+ .cra_driver_name = "__driver-generic-gcm-aes-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
+ .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_module = THIS_MODULE,
+ },
+}, {
+ .init = generic_gcmaes_init,
+ .exit = generic_gcmaes_exit,
+ .setkey = gcmaes_wrapper_set_key,
+ .setauthsize = gcmaes_wrapper_set_authsize,
+ .encrypt = gcmaes_wrapper_encrypt,
+ .decrypt = gcmaes_wrapper_decrypt,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "generic-gcm-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
- .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_ctxsize = sizeof(struct cryptd_aead *),
.cra_module = THIS_MODULE,
},
} };
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..a14af6eb09cb 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
vpxor 14 * 16(%rax), %xmm15, %xmm14;
vpxor 15 * 16(%rax), %xmm15, %xmm15;
- call *%r9;
+ CALL_NOSPEC %r9;
addq $(16 * 16), %rsp;
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index eee5b3982cfd..b66bbfa62f50 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
vpxor 14 * 32(%rax), %ymm15, %ymm14;
vpxor 15 * 32(%rax), %ymm15, %ymm15;
- call *%r9;
+ CALL_NOSPEC %r9;
addq $(16 * 32), %rsp;
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 1e6af1b35f7b..dce7c5d39c2f 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -107,7 +107,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 27226df3f7d8..c8d9cdacbf10 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -162,6 +162,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-pclmul",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index c194d5717ae5..5773e1161072 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -226,6 +226,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-intel",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 7a7de27c6f41..d9b734d0c8cc 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
#include <asm/inst.h>
#include <linux/linkage.h>
+#include <asm/nospec-branch.h>
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
@@ -172,7 +173,7 @@ continue_block:
movzxw (bufp, %rax, 2), len
lea crc_array(%rip), bufp
lea (bufp, len, 1), bufp
- jmp *bufp
+ JMP_NOSPEC bufp
################################################################
## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index e32142bc071d..790377797544 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -164,14 +164,12 @@ static struct shash_alg alg = {
.init = poly1305_simd_init,
.update = poly1305_simd_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_simd_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
index 329452b8f794..6014b7b9e52a 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -1,6 +1,7 @@
-# salsa20_pm.s version 20051229
-# D. J. Bernstein
-# Public domain.
+# Derived from:
+# salsa20_pm.s version 20051229
+# D. J. Bernstein
+# Public domain.
#include <linux/linkage.h>
@@ -935,180 +936,3 @@ ENTRY(salsa20_encrypt_bytes)
# goto bytesatleast1
jmp ._bytesatleast1
ENDPROC(salsa20_encrypt_bytes)
-
-# enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
- mov %esp,%eax
- and $31,%eax
- add $256,%eax
- sub %eax,%esp
- # eax_stack = eax
- movl %eax,64(%esp)
- # ebx_stack = ebx
- movl %ebx,68(%esp)
- # esi_stack = esi
- movl %esi,72(%esp)
- # edi_stack = edi
- movl %edi,76(%esp)
- # ebp_stack = ebp
- movl %ebp,80(%esp)
- # k = arg2
- movl 8(%esp,%eax),%ecx
- # kbits = arg3
- movl 12(%esp,%eax),%edx
- # x = arg1
- movl 4(%esp,%eax),%eax
- # in1 = *(uint32 *) (k + 0)
- movl 0(%ecx),%ebx
- # in2 = *(uint32 *) (k + 4)
- movl 4(%ecx),%esi
- # in3 = *(uint32 *) (k + 8)
- movl 8(%ecx),%edi
- # in4 = *(uint32 *) (k + 12)
- movl 12(%ecx),%ebp
- # *(uint32 *) (x + 4) = in1
- movl %ebx,4(%eax)
- # *(uint32 *) (x + 8) = in2
- movl %esi,8(%eax)
- # *(uint32 *) (x + 12) = in3
- movl %edi,12(%eax)
- # *(uint32 *) (x + 16) = in4
- movl %ebp,16(%eax)
- # kbits - 256
- cmp $256,%edx
- # goto kbits128 if unsigned<
- jb ._kbits128
-._kbits256:
- # in11 = *(uint32 *) (k + 16)
- movl 16(%ecx),%edx
- # in12 = *(uint32 *) (k + 20)
- movl 20(%ecx),%ebx
- # in13 = *(uint32 *) (k + 24)
- movl 24(%ecx),%esi
- # in14 = *(uint32 *) (k + 28)
- movl 28(%ecx),%ecx
- # *(uint32 *) (x + 44) = in11
- movl %edx,44(%eax)
- # *(uint32 *) (x + 48) = in12
- movl %ebx,48(%eax)
- # *(uint32 *) (x + 52) = in13
- movl %esi,52(%eax)
- # *(uint32 *) (x + 56) = in14
- movl %ecx,56(%eax)
- # in0 = 1634760805
- mov $1634760805,%ecx
- # in5 = 857760878
- mov $857760878,%edx
- # in10 = 2036477234
- mov $2036477234,%ebx
- # in15 = 1797285236
- mov $1797285236,%esi
- # *(uint32 *) (x + 0) = in0
- movl %ecx,0(%eax)
- # *(uint32 *) (x + 20) = in5
- movl %edx,20(%eax)
- # *(uint32 *) (x + 40) = in10
- movl %ebx,40(%eax)
- # *(uint32 *) (x + 60) = in15
- movl %esi,60(%eax)
- # goto keysetupdone
- jmp ._keysetupdone
-._kbits128:
- # in11 = *(uint32 *) (k + 0)
- movl 0(%ecx),%edx
- # in12 = *(uint32 *) (k + 4)
- movl 4(%ecx),%ebx
- # in13 = *(uint32 *) (k + 8)
- movl 8(%ecx),%esi
- # in14 = *(uint32 *) (k + 12)
- movl 12(%ecx),%ecx
- # *(uint32 *) (x + 44) = in11
- movl %edx,44(%eax)
- # *(uint32 *) (x + 48) = in12
- movl %ebx,48(%eax)
- # *(uint32 *) (x + 52) = in13
- movl %esi,52(%eax)
- # *(uint32 *) (x + 56) = in14
- movl %ecx,56(%eax)
- # in0 = 1634760805
- mov $1634760805,%ecx
- # in5 = 824206446
- mov $824206446,%edx
- # in10 = 2036477238
- mov $2036477238,%ebx
- # in15 = 1797285236
- mov $1797285236,%esi
- # *(uint32 *) (x + 0) = in0
- movl %ecx,0(%eax)
- # *(uint32 *) (x + 20) = in5
- movl %edx,20(%eax)
- # *(uint32 *) (x + 40) = in10
- movl %ebx,40(%eax)
- # *(uint32 *) (x + 60) = in15
- movl %esi,60(%eax)
-._keysetupdone:
- # eax = eax_stack
- movl 64(%esp),%eax
- # ebx = ebx_stack
- movl 68(%esp),%ebx
- # esi = esi_stack
- movl 72(%esp),%esi
- # edi = edi_stack
- movl 76(%esp),%edi
- # ebp = ebp_stack
- movl 80(%esp),%ebp
- # leave
- add %eax,%esp
- ret
-ENDPROC(salsa20_keysetup)
-
-# enter salsa20_ivsetup
-ENTRY(salsa20_ivsetup)
- mov %esp,%eax
- and $31,%eax
- add $256,%eax
- sub %eax,%esp
- # eax_stack = eax
- movl %eax,64(%esp)
- # ebx_stack = ebx
- movl %ebx,68(%esp)
- # esi_stack = esi
- movl %esi,72(%esp)
- # edi_stack = edi
- movl %edi,76(%esp)
- # ebp_stack = ebp
- movl %ebp,80(%esp)
- # iv = arg2
- movl 8(%esp,%eax),%ecx
- # x = arg1
- movl 4(%esp,%eax),%eax
- # in6 = *(uint32 *) (iv + 0)
- movl 0(%ecx),%edx
- # in7 = *(uint32 *) (iv + 4)
- movl 4(%ecx),%ecx
- # in8 = 0
- mov $0,%ebx
- # in9 = 0
- mov $0,%esi
- # *(uint32 *) (x + 24) = in6
- movl %edx,24(%eax)
- # *(uint32 *) (x + 28) = in7
- movl %ecx,28(%eax)
- # *(uint32 *) (x + 32) = in8
- movl %ebx,32(%eax)
- # *(uint32 *) (x + 36) = in9
- movl %esi,36(%eax)
- # eax = eax_stack
- movl 64(%esp),%eax
- # ebx = ebx_stack
- movl 68(%esp),%ebx
- # esi = esi_stack
- movl 72(%esp),%esi
- # edi = edi_stack
- movl 76(%esp),%edi
- # ebp = ebp_stack
- movl 80(%esp),%ebp
- # leave
- add %eax,%esp
- ret
-ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
index 10db30d58006..03a4918f41ee 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -803,117 +803,3 @@ ENTRY(salsa20_encrypt_bytes)
# goto bytesatleast1
jmp ._bytesatleast1
ENDPROC(salsa20_encrypt_bytes)
-
-# enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
- mov %rsp,%r11
- and $31,%r11
- add $256,%r11
- sub %r11,%rsp
- # k = arg2
- mov %rsi,%rsi
- # kbits = arg3
- mov %rdx,%rdx
- # x = arg1
- mov %rdi,%rdi
- # in0 = *(uint64 *) (k + 0)
- movq 0(%rsi),%r8
- # in2 = *(uint64 *) (k + 8)
- movq 8(%rsi),%r9
- # *(uint64 *) (x + 4) = in0
- movq %r8,4(%rdi)
- # *(uint64 *) (x + 12) = in2
- movq %r9,12(%rdi)
- # unsigned<? kbits - 256
- cmp $256,%rdx
- # comment:fp stack unchanged by jump
- # goto kbits128 if unsigned<
- jb ._kbits128
-# kbits256:
-._kbits256:
- # in10 = *(uint64 *) (k + 16)
- movq 16(%rsi),%rdx
- # in12 = *(uint64 *) (k + 24)
- movq 24(%rsi),%rsi
- # *(uint64 *) (x + 44) = in10
- movq %rdx,44(%rdi)
- # *(uint64 *) (x + 52) = in12
- movq %rsi,52(%rdi)
- # in0 = 1634760805
- mov $1634760805,%rsi
- # in4 = 857760878
- mov $857760878,%rdx
- # in10 = 2036477234
- mov $2036477234,%rcx
- # in14 = 1797285236
- mov $1797285236,%r8
- # *(uint32 *) (x + 0) = in0
- movl %esi,0(%rdi)
- # *(uint32 *) (x + 20) = in4
- movl %edx,20(%rdi)
- # *(uint32 *) (x + 40) = in10
- movl %ecx,40(%rdi)
- # *(uint32 *) (x + 60) = in14
- movl %r8d,60(%rdi)
- # comment:fp stack unchanged by jump
- # goto keysetupdone
- jmp ._keysetupdone
-# kbits128:
-._kbits128:
- # in10 = *(uint64 *) (k + 0)
- movq 0(%rsi),%rdx
- # in12 = *(uint64 *) (k + 8)
- movq 8(%rsi),%rsi
- # *(uint64 *) (x + 44) = in10
- movq %rdx,44(%rdi)
- # *(uint64 *) (x + 52) = in12
- movq %rsi,52(%rdi)
- # in0 = 1634760805
- mov $1634760805,%rsi
- # in4 = 824206446
- mov $824206446,%rdx
- # in10 = 2036477238
- mov $2036477238,%rcx
- # in14 = 1797285236
- mov $1797285236,%r8
- # *(uint32 *) (x + 0) = in0
- movl %esi,0(%rdi)
- # *(uint32 *) (x + 20) = in4
- movl %edx,20(%rdi)
- # *(uint32 *) (x + 40) = in10
- movl %ecx,40(%rdi)
- # *(uint32 *) (x + 60) = in14
- movl %r8d,60(%rdi)
-# keysetupdone:
-._keysetupdone:
- # leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
-ENDPROC(salsa20_keysetup)
-
-# enter salsa20_ivsetup
-ENTRY(salsa20_ivsetup)
- mov %rsp,%r11
- and $31,%r11
- add $256,%r11
- sub %r11,%rsp
- # iv = arg2
- mov %rsi,%rsi
- # x = arg1
- mov %rdi,%rdi
- # in6 = *(uint64 *) (iv + 0)
- movq 0(%rsi),%rsi
- # in8 = 0
- mov $0,%r8
- # *(uint64 *) (x + 24) = in6
- movq %rsi,24(%rdi)
- # *(uint64 *) (x + 32) = in8
- movq %r8,32(%rdi)
- # leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
-ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index cb91a64a99e7..b07d7d959806 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -11,6 +11,9 @@
* - x86-64 version, renamed as salsa20-x86_64-asm_64.S
* available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
*
+ * Also modified to set up the initial state using the generic C code rather
+ * than in assembly.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -18,93 +21,65 @@
*
*/
-#include <crypto/algapi.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/crypto.h>
-
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
- u32 keysize, u32 ivsize);
-asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
-asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
- const u8 *src, u8 *dst, u32 bytes);
+asmlinkage void salsa20_encrypt_bytes(u32 state[16], const u8 *src, u8 *dst,
+ u32 bytes);
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
+static int salsa20_asm_crypt(struct skcipher_request *req)
{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
- return 0;
-}
-
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
- walk.dst.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
- walk.dst.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_encrypt_bytes(state, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-asm",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-asm",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_asm_crypt,
+ .decrypt = salsa20_asm_crypt,
};
static int __init init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(init);
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index 1c3b7ceb36d2..e7273a606a07 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -55,29 +55,31 @@
#define RAB1bl %bl
#define RAB2bl %cl
+#define CD0 0x0(%rsp)
+#define CD1 0x8(%rsp)
+#define CD2 0x10(%rsp)
+
+# used only before/after all rounds
#define RCD0 %r8
#define RCD1 %r9
#define RCD2 %r10
-#define RCD0d %r8d
-#define RCD1d %r9d
-#define RCD2d %r10d
-
-#define RX0 %rbp
-#define RX1 %r11
-#define RX2 %r12
+# used only during rounds
+#define RX0 %r8
+#define RX1 %r9
+#define RX2 %r10
-#define RX0d %ebp
-#define RX1d %r11d
-#define RX2d %r12d
+#define RX0d %r8d
+#define RX1d %r9d
+#define RX2d %r10d
-#define RY0 %r13
-#define RY1 %r14
-#define RY2 %r15
+#define RY0 %r11
+#define RY1 %r12
+#define RY2 %r13
-#define RY0d %r13d
-#define RY1d %r14d
-#define RY2d %r15d
+#define RY0d %r11d
+#define RY1d %r12d
+#define RY2d %r13d
#define RT0 %rdx
#define RT1 %rsi
@@ -85,6 +87,8 @@
#define RT0d %edx
#define RT1d %esi
+#define RT1bl %sil
+
#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
movzbl ab ## bl, tmp2 ## d; \
movzbl ab ## bh, tmp1 ## d; \
@@ -92,6 +96,11 @@
op1##l T0(CTX, tmp2, 4), dst ## d; \
op2##l T1(CTX, tmp1, 4), dst ## d;
+#define swap_ab_with_cd(ab, cd, tmp) \
+ movq cd, tmp; \
+ movq ab, cd; \
+ movq tmp, ab;
+
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining.
@@ -110,15 +119,15 @@
/* G1,2 && G2,2 */ \
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
- xchgq cd ## 0, ab ## 0; \
+ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
- xchgq cd ## 1, ab ## 1; \
+ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
- xchgq cd ## 2, ab ## 2;
+ swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
#define enc_round_end(ab, x, y, n) \
addl y ## d, x ## d; \
@@ -168,6 +177,16 @@
decrypt_round3(ba, dc, (n*2)+1); \
decrypt_round3(ba, dc, (n*2));
+#define push_cd() \
+ pushq RCD2; \
+ pushq RCD1; \
+ pushq RCD0;
+
+#define pop_cd() \
+ popq RCD0; \
+ popq RCD1; \
+ popq RCD2;
+
#define inpack3(in, n, xy, m) \
movq 4*(n)(in), xy ## 0; \
xorq w+4*m(CTX), xy ## 0; \
@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
* %rdx: src, RIO
* %rcx: bool, if true: xor output
*/
- pushq %r15;
- pushq %r14;
pushq %r13;
pushq %r12;
- pushq %rbp;
pushq %rbx;
pushq %rcx; /* bool xor */
@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
inpack_enc3();
- encrypt_cycle3(RAB, RCD, 0);
- encrypt_cycle3(RAB, RCD, 1);
- encrypt_cycle3(RAB, RCD, 2);
- encrypt_cycle3(RAB, RCD, 3);
- encrypt_cycle3(RAB, RCD, 4);
- encrypt_cycle3(RAB, RCD, 5);
- encrypt_cycle3(RAB, RCD, 6);
- encrypt_cycle3(RAB, RCD, 7);
+ push_cd();
+ encrypt_cycle3(RAB, CD, 0);
+ encrypt_cycle3(RAB, CD, 1);
+ encrypt_cycle3(RAB, CD, 2);
+ encrypt_cycle3(RAB, CD, 3);
+ encrypt_cycle3(RAB, CD, 4);
+ encrypt_cycle3(RAB, CD, 5);
+ encrypt_cycle3(RAB, CD, 6);
+ encrypt_cycle3(RAB, CD, 7);
+ pop_cd();
popq RIO; /* dst */
- popq %rbp; /* bool xor */
+ popq RT1; /* bool xor */
- testb %bpl, %bpl;
+ testb RT1bl, RT1bl;
jnz .L__enc_xor3;
outunpack_enc3(mov);
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
.L__enc_xor3:
outunpack_enc3(xor);
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
ENDPROC(__twofish_enc_blk_3way)
@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
* %rsi: dst
* %rdx: src, RIO
*/
- pushq %r15;
- pushq %r14;
pushq %r13;
pushq %r12;
- pushq %rbp;
pushq %rbx;
pushq %rsi; /* dst */
inpack_dec3();
- decrypt_cycle3(RAB, RCD, 7);
- decrypt_cycle3(RAB, RCD, 6);
- decrypt_cycle3(RAB, RCD, 5);
- decrypt_cycle3(RAB, RCD, 4);
- decrypt_cycle3(RAB, RCD, 3);
- decrypt_cycle3(RAB, RCD, 2);
- decrypt_cycle3(RAB, RCD, 1);
- decrypt_cycle3(RAB, RCD, 0);
+ push_cd();
+ decrypt_cycle3(RAB, CD, 7);
+ decrypt_cycle3(RAB, CD, 6);
+ decrypt_cycle3(RAB, CD, 5);
+ decrypt_cycle3(RAB, CD, 4);
+ decrypt_cycle3(RAB, CD, 3);
+ decrypt_cycle3(RAB, CD, 2);
+ decrypt_cycle3(RAB, CD, 1);
+ decrypt_cycle3(RAB, CD, 0);
+ pop_cd();
popq RIO; /* dst */
outunpack_dec3();
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 45a63e00a6af..3f48f695d5e6 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with
* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
* halves:
*/
-#define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT)
-#define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT))
+#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
+#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
+#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
+#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
+#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
.macro SET_NOFLUSH_BIT reg:req
bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
@@ -208,7 +211,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro ADJUST_KERNEL_CR3 reg:req
ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
- andq $(~PTI_SWITCH_MASK), \reg
+ andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
.endm
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
@@ -239,15 +242,19 @@ For 32-bit we have the following conventions - kernel is built with
/* Flush needed, clear the bit */
btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
movq \scratch_reg2, \scratch_reg
- jmp .Lwrcr3_\@
+ jmp .Lwrcr3_pcid_\@
.Lnoflush_\@:
movq \scratch_reg2, \scratch_reg
SET_NOFLUSH_BIT \scratch_reg
+.Lwrcr3_pcid_\@:
+ /* Flip the ASID to the user version */
+ orq $(PTI_USER_PCID_MASK), \scratch_reg
+
.Lwrcr3_\@:
- /* Flip the PGD and ASID to the user version */
- orq $(PTI_SWITCH_MASK), \scratch_reg
+ /* Flip the PGD to the user version */
+ orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
mov \scratch_reg, %cr3
.Lend_\@:
.endm
@@ -263,17 +270,12 @@ For 32-bit we have the following conventions - kernel is built with
movq %cr3, \scratch_reg
movq \scratch_reg, \save_reg
/*
- * Is the "switch mask" all zero? That means that both of
- * these are zero:
- *
- * 1. The user/kernel PCID bit, and
- * 2. The user/kernel "bit" that points CR3 to the
- * bottom half of the 8k PGD
- *
- * That indicates a kernel CR3 value, not a user CR3.
+ * Test the user pagetable bit. If set, then the user page tables
+ * are active. If clear CR3 already has the kernel page table
+ * active.
*/
- testq $(PTI_SWITCH_MASK), \scratch_reg
- jz .Ldone_\@
+ bt $PTI_USER_PGTABLE_BIT, \scratch_reg
+ jnc .Ldone_\@
ADJUST_KERNEL_CR3 \scratch_reg
movq \scratch_reg, %cr3
@@ -290,7 +292,7 @@ For 32-bit we have the following conventions - kernel is built with
* KERNEL pages can always resume with NOFLUSH as we do
* explicit flushes.
*/
- bt $X86_CR3_PTI_SWITCH_BIT, \save_reg
+ bt $PTI_USER_PGTABLE_BIT, \save_reg
jnc .Lnoflush_\@
/*
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index d7d3cc24baf4..1e3883e45687 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -153,6 +153,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (cached_flags & _TIF_PATCH_PENDING)
+ klp_update_patch_state(current);
+
/* deal with pending signal delivery */
if (cached_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -165,9 +168,6 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
fire_user_return_notifiers();
- if (cached_flags & _TIF_PATCH_PENDING)
- klp_update_patch_state(current);
-
/* Disable IRQs and retry */
local_irq_disable();
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f321a5a1..2a35b1e0fb90 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
.section .entry.text, "ax"
@@ -243,6 +244,18 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ /* Clobbers %ebx */
+ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
/* restore callee-saved registers */
popl %esi
popl %edi
@@ -290,7 +303,7 @@ ENTRY(ret_from_fork)
/* kernel thread */
1: movl %edi, %eax
- call *%ebx
+ CALL_NOSPEC %ebx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +932,7 @@ common_exception:
movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
- call *%edi
+ CALL_NOSPEC %edi
jmp ret_from_exception
END(common_exception)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e384ff54..a83570495162 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#include <linux/err.h>
#include "calling.h"
@@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
*/
pushq %rdi
movq $entry_SYSCALL_64_stage2, %rdi
- jmp *%rdi
+ JMP_NOSPEC %rdi
END(entry_SYSCALL_64_trampoline)
.popsection
@@ -270,7 +271,12 @@ entry_SYSCALL_64_fastpath:
* It might end up jumping to the slow path. If it jumps, RAX
* and all argument registers are clobbered.
*/
+#ifdef CONFIG_RETPOLINE
+ movq sys_call_table(, %rax, 8), %rax
+ call __x86_indirect_thunk_rax
+#else
call *sys_call_table(, %rax, 8)
+#endif
.Lentry_SYSCALL_64_after_fastpath_call:
movq %rax, RAX(%rsp)
@@ -442,7 +448,7 @@ ENTRY(stub_ptregs_64)
jmp entry_SYSCALL64_slow_path
1:
- jmp *%rax /* Called from C */
+ JMP_NOSPEC %rax /* Called from C */
END(stub_ptregs_64)
.macro ptregs_stub func
@@ -485,6 +491,18 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ /* Clobbers %rbx */
+ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
/* restore callee-saved registers */
popq %r15
popq %r14
@@ -521,7 +539,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
movq %r12, %rdi
- call *%rbx
+ CALL_NOSPEC %rbx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
@@ -1247,7 +1265,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
#endif
#ifdef CONFIG_X86_MCE
-idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
+idtentry machine_check do_mce has_error_code=0 paranoid=1
#endif
/*
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 40f17009ec20..98d5358e4041 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -190,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
/* Interrupts are off on entry. */
swapgs
- /* Stash user ESP and switch to the kernel stack. */
+ /* Stash user ESP */
movl %esp, %r8d
+
+ /* Use %rsp as scratch reg. User ESP is stashed in r8 */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
+ /* Switch to the kernel stack */
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/* Construct struct pt_regs on stack */
@@ -220,12 +225,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
pushq $0 /* pt_regs->r15 = 0 */
/*
- * We just saved %rdi so it is safe to clobber. It is not
- * preserved during the C calls inside TRACE_IRQS_OFF anyway.
- */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-
- /*
* User mode is traced as though IRQs are on, and SYSENTER
* turned them off.
*/
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index a6eee5ac4f58..2aefacf5c5b2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
int ret;
if (!x86_match_cpu(cpu_match))
- return 0;
+ return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
return -ENODEV;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 141e07b06216..24ffa1e88cf9 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -582,6 +582,24 @@ static __init int bts_init(void)
if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
return -ENODEV;
+ if (boot_cpu_has(X86_FEATURE_PTI)) {
+ /*
+ * BTS hardware writes through a virtual memory map we must
+ * either use the kernel physical map, or the user mapping of
+ * the AUX buffer.
+ *
+ * However, since this driver supports per-CPU and per-task inherit
+ * we cannot use the user mapping since it will not be availble
+ * if we're not running the owning process.
+ *
+ * With PTI we can't use the kernal map either, because its not
+ * there when we run userspace.
+ *
+ * For now, disable this driver when using PTI.
+ */
+ return -ENODEV;
+ }
+
bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
PERF_PMU_CAP_EXCLUSIVE;
bts_pmu.task_ctx_nr = perf_sw_context;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8f0aace08b87..18c25ab28557 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -5,6 +5,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/perf_event.h>
+#include <asm/tlbflush.h>
#include <asm/insn.h>
#include "../perf_event.h"
@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
{
+ unsigned long start = (unsigned long)cea;
phys_addr_t pa;
size_t msz = 0;
pa = virt_to_phys(addr);
+
+ preempt_disable();
for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
cea_set_pte(cea, pa, prot);
+
+ /*
+ * This is a cross-CPU update of the cpu_entry_area, we must shoot down
+ * all TLB entries for it.
+ */
+ flush_tlb_kernel_range(start, start + size);
+ preempt_enable();
}
static void ds_clear_cea(void *cea, size_t size)
{
+ unsigned long start = (unsigned long)cea;
size_t msz = 0;
+ preempt_disable();
for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
cea_set_pte(cea, 0, PAGE_NONE);
+
+ flush_tlb_kernel_range(start, start + size);
+ preempt_enable();
}
static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
@@ -356,10 +372,9 @@ static int alloc_pebs_buffer(int cpu)
static void release_pebs_buffer(int cpu)
{
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
- struct debug_store *ds = hwev->ds;
void *cea;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs)
return;
kfree(per_cpu(insn_buffer, cpu));
@@ -368,7 +383,6 @@ static void release_pebs_buffer(int cpu)
/* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
- ds->pebs_buffer_base = 0;
dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
hwev->ds_pebs_vaddr = NULL;
}
@@ -403,16 +417,14 @@ static int alloc_bts_buffer(int cpu)
static void release_bts_buffer(int cpu)
{
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
- struct debug_store *ds = hwev->ds;
void *cea;
- if (!ds || !x86_pmu.bts)
+ if (!x86_pmu.bts)
return;
/* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
ds_clear_cea(cea, BTS_BUFFER_SIZE);
- ds->bts_buffer_base = 0;
dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
hwev->ds_bts_vaddr = NULL;
}
@@ -438,16 +450,22 @@ void release_ds_buffers(void)
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
- get_online_cpus();
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
+ release_ds_buffer(cpu);
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Again, ignore errors from offline CPUs, they will no longer
+ * observe cpu_hw_events.ds and not program the DS_AREA when
+ * they come up.
+ */
fini_debug_store_on_cpu(cpu);
+ }
for_each_possible_cpu(cpu) {
release_pebs_buffer(cpu);
release_bts_buffer(cpu);
- release_ds_buffer(cpu);
}
- put_online_cpus();
}
void reserve_ds_buffers(void)
@@ -467,8 +485,6 @@ void reserve_ds_buffers(void)
if (!x86_pmu.pebs)
pebs_err = 1;
- get_online_cpus();
-
for_each_possible_cpu(cpu) {
if (alloc_ds_buffer(cpu)) {
bts_err = 1;
@@ -505,11 +521,14 @@ void reserve_ds_buffers(void)
if (x86_pmu.pebs && !pebs_err)
x86_pmu.pebs_active = 1;
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ /*
+ * Ignores wrmsr_on_cpu() errors for offline CPUs they
+ * will get this call through intel_pmu_cpu_starting().
+ */
init_debug_store_on_cpu(cpu);
+ }
}
-
- put_online_cpus();
}
/*
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 005908ee9333..a2efb490f743 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -755,14 +755,14 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 14efaa0e8684..18e2628e2d8f 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -10,7 +10,9 @@ enum perf_msr_id {
PERF_MSR_SMI = 4,
PERF_MSR_PTSC = 5,
PERF_MSR_IRPERF = 6,
-
+ PERF_MSR_THERM = 7,
+ PERF_MSR_THERM_SNAP = 8,
+ PERF_MSR_THERM_UNIT = 9,
PERF_MSR_EVENT_MAX,
};
@@ -29,6 +31,11 @@ static bool test_irperf(int idx)
return boot_cpu_has(X86_FEATURE_IRPERF);
}
+static bool test_therm_status(int idx)
+{
+ return boot_cpu_has(X86_FEATURE_DTHERM);
+}
+
static bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
@@ -95,22 +102,28 @@ struct perf_msr {
bool (*test)(int idx);
};
-PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
-PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
-PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
-PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
-PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
-PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
-PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
+PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00" );
+PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01" );
+PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02" );
+PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03" );
+PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04" );
+PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05" );
+PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin, evattr_therm, "event=0x07" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, evattr_therm_snap, "1" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, evattr_therm_unit, "C" );
static struct perf_msr msr[] = {
- [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
- [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
- [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
- [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
- [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
- [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
- [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
+ [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
+ [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
+ [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
+ [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
+ [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
+ [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
+ [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
+ [PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &evattr_therm, test_therm_status, },
+ [PERF_MSR_THERM_SNAP] = { MSR_IA32_THERM_STATUS, &evattr_therm_snap, test_therm_status, },
+ [PERF_MSR_THERM_UNIT] = { MSR_IA32_THERM_STATUS, &evattr_therm_unit, test_therm_status, },
};
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
@@ -161,9 +174,9 @@ static int msr_event_init(struct perf_event *event)
if (!msr[cfg].attr)
return -EINVAL;
- event->hw.idx = -1;
- event->hw.event_base = msr[cfg].msr;
- event->hw.config = cfg;
+ event->hw.idx = -1;
+ event->hw.event_base = msr[cfg].msr;
+ event->hw.config = cfg;
return 0;
}
@@ -184,7 +197,7 @@ static void msr_event_update(struct perf_event *event)
u64 prev, now;
s64 delta;
- /* Careful, an NMI might modify the previous event value. */
+ /* Careful, an NMI might modify the previous event value: */
again:
prev = local64_read(&event->hw.prev_count);
now = msr_read_counter(event);
@@ -193,17 +206,22 @@ again:
goto again;
delta = now - prev;
- if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
+ if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
delta = sign_extend64(delta, 31);
-
- local64_add(delta, &event->count);
+ local64_add(delta, &event->count);
+ } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
+ /* If valid, extract digital readout, otherwise set to -1: */
+ now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
+ local64_set(&event->count, now);
+ } else {
+ local64_add(delta, &event->count);
+ }
}
static void msr_event_start(struct perf_event *event, int flags)
{
- u64 now;
+ u64 now = msr_read_counter(event);
- now = msr_read_counter(event);
local64_set(&event->hw.prev_count, now);
}
@@ -250,9 +268,7 @@ static int __init msr_init(void)
for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
u64 val;
- /*
- * Virt sucks arse; you cannot tell if a R/O MSR is present :/
- */
+ /* Virt sucks; you cannot tell if a R/O MSR is present :/ */
if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
msr[i].attr = NULL;
}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 9cc9e1c1e2db..56c9ebac946f 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -137,7 +137,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
}
if (info->mm) {
+ /*
+ * AddressSpace argument must match the CR3 with PCID bits
+ * stripped out.
+ */
flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->address_space &= CR3_ADDR_MASK;
flush->flags = 0;
} else {
flush->address_space = 0;
@@ -219,7 +224,12 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
}
if (info->mm) {
+ /*
+ * AddressSpace argument must match the CR3 with PCID bits
+ * stripped out.
+ */
flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->address_space &= CR3_ADDR_MASK;
flush->flags = 0;
} else {
flush->address_space = 0;
@@ -278,8 +288,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- setup_clear_cpu_cap(X86_FEATURE_PCID);
-
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
pr_info("Using hypercall for remote TLB flush\n");
pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d0ec9df1cbe..44f5d79d5105 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,7 +49,7 @@ extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
-extern int acpi_sci_override_gsi;
+extern u32 acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
struct device;
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index dbfd0854651f..cf5961ca8677 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
- ".popsection"
+ ".popsection\n"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR_2(oldinstr, 1, 2) \
@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
- ".popsection"
+ ".popsection\n"
/*
* Alternative instructions for different CPU types or capabilities.
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a9e57f08bfa6..98722773391d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -136,6 +136,7 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
extern void sync_Arb_IDs(void);
+extern void init_bsp_APIC(void);
extern void apic_intr_mode_init(void);
extern void setup_local_APIC(void);
extern void init_apic_mappings(void);
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index ff700d81e91e..4d111616524b 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,34 @@
#include <asm/pgtable.h>
#include <asm/special_insns.h>
#include <asm/preempt.h>
+#include <asm/asm.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
#endif
+
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_32
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
+#else
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
+INDIRECT_THUNK(8)
+INDIRECT_THUNK(9)
+INDIRECT_THUNK(10)
+INDIRECT_THUNK(11)
+INDIRECT_THUNK(12)
+INDIRECT_THUNK(13)
+INDIRECT_THUNK(14)
+INDIRECT_THUNK(15)
+#endif
+INDIRECT_THUNK(ax)
+INDIRECT_THUNK(bx)
+INDIRECT_THUNK(cx)
+INDIRECT_THUNK(dx)
+INDIRECT_THUNK(si)
+INDIRECT_THUNK(di)
+INDIRECT_THUNK(bp)
+asmlinkage void __fill_rsb(void);
+asmlinkage void __clear_rsb(void);
+
+#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 2cbd75dd2fd3..e1c8dab86670 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -127,88 +127,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGCHLD (x32 version) */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_s64 _utime;
- compat_s64 _stime;
- } _sigchld_x32;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- short int _addr_lsb; /* Valid LSB of the reported address. */
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- compat_uptr_t _lower;
- compat_uptr_t _upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- compat_u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- struct {
- unsigned int _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
@@ -331,4 +249,8 @@ static inline bool in_compat_syscall(void)
}
#define in_compat_syscall in_compat_syscall /* override the generic impl */
+struct compat_siginfo;
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const siginfo_t *from, bool x32_ABI);
+
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ea9a7dde62e5..70eddb3922ff 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -29,6 +29,7 @@ enum cpuid_leafs
CPUID_8000_000A_EDX,
CPUID_7_ECX,
CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
};
#ifdef CONFIG_X86_FEATURE_NAMES
@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 07cdd1715705..1d9199e1c2ad 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -203,12 +203,15 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -243,6 +246,7 @@
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
@@ -268,6 +272,9 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -316,6 +323,13 @@
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+
/*
* BUG word(s)
*/
@@ -341,6 +355,8 @@
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
-#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b027633e7300..33833d1909af 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -77,6 +77,7 @@
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
new file mode 100644
index 000000000000..1295bc622ebe
--- /dev/null
+++ b/arch/x86/include/asm/dma-direct.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_X86_DMA_DIRECT_H
+#define ASM_X86_DMA_DIRECT_H 1
+
+#include <linux/mem_encrypt.h>
+
+#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
+bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+#else
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return __sme_set(paddr);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return __sme_clr(daddr);
+}
+#endif /* CONFIG_X86_DMA_REMAP */
+#endif /* ASM_X86_DMA_DIRECT_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0350d99bb8fd..6277c83c0eb1 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -12,7 +12,6 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
-#include <linux/mem_encrypt.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -31,6 +30,9 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
+int arch_dma_supported(struct device *dev, u64 mask);
+#define arch_dma_supported arch_dma_supported
+
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
@@ -42,31 +44,6 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs);
-#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
-extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
-extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
-#else
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return __sme_set(paddr);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return __sme_clr(daddr);
-}
-#endif /* CONFIG_X86_DMA_REMAP */
-
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h
new file mode 100644
index 000000000000..47b7a1296245
--- /dev/null
+++ b/arch/x86/include/asm/error-injection.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ERROR_INJECTION_H
+#define _ASM_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm-generic/error-injection.h>
+
+asmlinkage void just_return_func(void);
+void override_function_with_return(struct pt_regs *regs);
+
+#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
index 4df2754ef380..44bbc39a57b3 100644
--- a/arch/x86/include/asm/fpu/signal.h
+++ b/arch/x86/include/asm/fpu/signal.h
@@ -20,12 +20,6 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
# define ia32_setup_rt_frame __setup_rt_frame
#endif
-#ifdef CONFIG_COMPAT
-int __copy_siginfo_to_user32(compat_siginfo_t __user *to,
- const siginfo_t *from, bool x32_ABI);
-#endif
-
-
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
extern void convert_to_fxsr(struct task_struct *tsk,
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 96aa6b9884dc..8c5aaba6633f 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -28,6 +28,7 @@ enum x86_hypervisor_type {
X86_HYPER_XEN_PV,
X86_HYPER_XEN_HVM,
X86_HYPER_KVM,
+ X86_HYPER_JAILHOUSE,
};
#ifdef CONFIG_HYPERVISOR_GUEST
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index c8376b40e882..5cdcdbd4d892 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -69,6 +69,11 @@ struct legacy_pic {
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
+static inline bool has_legacy_pic(void)
+{
+ return legacy_pic != &null_legacy_pic;
+}
+
static inline int nr_legacy_irqs(void)
{
return legacy_pic->nr_legacy_irqs;
diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h
new file mode 100644
index 000000000000..875b54376689
--- /dev/null
+++ b/arch/x86/include/asm/jailhouse_para.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL2.0 */
+
+/*
+ * Jailhouse paravirt_ops implementation
+ *
+ * Copyright (c) Siemens AG, 2015-2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+#ifndef _ASM_X86_JAILHOUSE_PARA_H
+#define _ASM_X86_JAILHOUSE_PARA_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_JAILHOUSE_GUEST
+bool jailhouse_paravirt(void);
+#else
+static inline bool jailhouse_paravirt(void)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_X86_JAILHOUSE_PARA_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 9f2e3102e0bb..367d99cff426 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -67,6 +67,8 @@ extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p);
asmlinkage void kretprobe_trampoline(void);
+extern void arch_kprobe_override_function(struct pt_regs *regs);
+
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index b1e8d8db921f..96ea4b5ba658 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -376,6 +376,7 @@ struct smca_bank {
extern struct smca_bank smca_banks[MAX_NR_BANKS];
extern const char *smca_get_long_name(enum smca_bank_types t);
+extern bool amd_mce_is_memory_error(struct mce *m);
extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);
@@ -384,6 +385,7 @@ extern int mce_threshold_remove_device(unsigned int cpu);
static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
+static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
#endif
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c9459a4c3c68..22c5f3e6f820 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void);
-void __init sme_encrypt_kernel(void);
+void __init sme_encrypt_kernel(struct boot_params *bp);
void __init sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { }
-static inline void __init sme_encrypt_kernel(void) { }
+static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
static inline bool sme_active(void) { return false; }
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index a6bec8028480..6fb923a34309 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -128,9 +128,17 @@ enum mp_irq_source_types {
mp_ExtINT = 3
};
-#define MP_IRQDIR_DEFAULT 0
-#define MP_IRQDIR_HIGH 1
-#define MP_IRQDIR_LOW 3
+#define MP_IRQPOL_DEFAULT 0x0
+#define MP_IRQPOL_ACTIVE_HIGH 0x1
+#define MP_IRQPOL_RESERVED 0x2
+#define MP_IRQPOL_ACTIVE_LOW 0x3
+#define MP_IRQPOL_MASK 0x3
+
+#define MP_IRQTRIG_DEFAULT 0x0
+#define MP_IRQTRIG_EDGE 0x4
+#define MP_IRQTRIG_RESERVED 0x8
+#define MP_IRQTRIG_LEVEL 0xc
+#define MP_IRQTRIG_MASK 0xc
#define MP_APIC_ALL 0xFF
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index b623a4288e8b..b52af150cbd8 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/hyperv.h>
+#include <asm/nospec-branch.h>
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
return U64_MAX;
__asm__ __volatile__("mov %4, %%r8\n"
- "call *%5"
+ CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address)
- : "r" (output_address), "m" (hv_hypercall_pg)
+ : "r" (output_address),
+ THUNK_TARGET(hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11");
#else
u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
if (!hv_hypercall_pg)
return U64_MAX;
- __asm__ __volatile__("call *%7"
+ __asm__ __volatile__(CALL_NOSPEC
: "=A" (hv_status),
"+c" (input_address_lo), ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo),
- "m" (hv_hypercall_pg)
+ THUNK_TARGET(hv_hypercall_pg)
: "cc", "memory");
#endif /* !x86_64 */
return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
#ifdef CONFIG_X86_64
{
- __asm__ __volatile__("call *%4"
+ __asm__ __volatile__(CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
- : "m" (hv_hypercall_pg)
+ : THUNK_TARGET(hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11");
}
#else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
u32 input1_hi = upper_32_bits(input1);
u32 input1_lo = lower_32_bits(input1);
- __asm__ __volatile__ ("call *%5"
+ __asm__ __volatile__ (CALL_NOSPEC
: "=A"(hv_status),
"+c"(input1_lo),
ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input1_hi),
- "m" (hv_hypercall_pg)
+ THUNK_TARGET(hv_hypercall_pg)
: "cc", "edi", "esi");
}
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 34c4922bbc3f..e520a1e6fc11 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -39,6 +39,13 @@
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -57,6 +64,11 @@
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -355,6 +367,9 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
+#define MSR_F10H_DECFG 0xc0011029
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..d15d471348b8
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_X86_NOSPEC_BRANCH_H_
+#define _ASM_X86_NOSPEC_BRANCH_H_
+
+#include <asm/alternative.h>
+#include <asm/alternative-asm.h>
+#include <asm/cpufeatures.h>
+
+#ifdef __ASSEMBLY__
+
+/*
+ * This should be used immediately before a retpoline alternative. It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+.macro ANNOTATE_NOSPEC_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.nospec
+ .long .Lannotate_\@ - .
+ .popsection
+.endm
+
+/*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+ * invocation below less ugly.
+ */
+.macro RETPOLINE_JMP reg:req
+ call .Ldo_rop_\@
+.Lspec_trap_\@:
+ pause
+ lfence
+ jmp .Lspec_trap_\@
+.Ldo_rop_\@:
+ mov \reg, (%_ASM_SP)
+ ret
+.endm
+
+/*
+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
+ * returns to the instruction after the macro.
+ */
+.macro RETPOLINE_CALL reg:req
+ jmp .Ldo_call_\@
+.Ldo_retpoline_jmp_\@:
+ RETPOLINE_JMP \reg
+.Ldo_call_\@:
+ call .Ldo_retpoline_jmp_\@
+.endm
+
+/*
+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
+ * indirect jmp/call which may be susceptible to the Spectre variant 2
+ * attack.
+ */
+.macro JMP_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE_2 __stringify(jmp *\reg), \
+ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
+ __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+ jmp *\reg
+#endif
+.endm
+
+.macro CALL_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE_2 __stringify(call *\reg), \
+ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+ __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+ call *\reg
+#endif
+.endm
+
+/* This clobbers the BX register */
+.macro FILL_RETURN_BUFFER nr:req ftr:req
+#ifdef CONFIG_RETPOLINE
+ ALTERNATIVE "", "call __clear_rsb", \ftr
+#endif
+.endm
+
+#else /* __ASSEMBLY__ */
+
+#define ANNOTATE_NOSPEC_ALTERNATIVE \
+ "999:\n\t" \
+ ".pushsection .discard.nospec\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+
+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+/*
+ * Since the inline asm uses the %V modifier which is only in newer GCC,
+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ */
+# define CALL_NOSPEC \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+ ALTERNATIVE( \
+ "call *%[thunk_target]\n", \
+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
+ X86_FEATURE_RETPOLINE)
+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+/*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
+ " jmp 904f;\n" \
+ " .align 16\n" \
+ "901: call 903f;\n" \
+ "902: pause;\n" \
+ " lfence;\n" \
+ " jmp 902b;\n" \
+ " .align 16\n" \
+ "903: addl $4, %%esp;\n" \
+ " pushl %[thunk_target];\n" \
+ " ret;\n" \
+ " .align 16\n" \
+ "904: call 901b;\n", \
+ X86_FEATURE_RETPOLINE)
+
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#else /* No retpoline for C / inline asm */
+# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
+
+/* The Spectre V2 mitigation variants */
+enum spectre_v2_mitigation {
+ SPECTRE_V2_NONE,
+ SPECTRE_V2_RETPOLINE_MINIMAL,
+ SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+ SPECTRE_V2_RETPOLINE_GENERIC,
+ SPECTRE_V2_RETPOLINE_AMD,
+ SPECTRE_V2_IBRS,
+};
+
+extern char __indirect_thunk_start[];
+extern char __indirect_thunk_end[];
+
+/*
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
+ * can be followed in the host, by overwriting the RSB completely. Both
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+ * CPUs with IBRS_ATT *might* it be avoided.
+ */
+static inline void vmexit_fill_RSB(void)
+{
+#ifdef CONFIG_RETPOLINE
+ alternative_input("",
+ "call __fill_rsb",
+ X86_FEATURE_RETPOLINE,
+ ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+#endif
+}
+
+static inline void indirect_branch_prediction_barrier(void)
+{
+ alternative_input("",
+ "call __ibp_barrier",
+ X86_FEATURE_USE_IBPB,
+ ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 7a5d6695abd3..eb66fa9cd0fc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -38,6 +38,7 @@ do { \
#define PCI_NOASSIGN_ROMS 0x80000
#define PCI_ROOT_NO_CRS 0x100000
#define PCI_NOASSIGN_BARS 0x200000
+#define PCI_BIG_ROOT_WINDOW 0x400000
extern unsigned int pci_probe;
extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index bc4af5453802..f24df59c40b2 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -158,7 +158,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-#ifdef CONFIG_SMP
union split_pmd {
struct {
u32 pmd_low;
@@ -166,6 +165,8 @@ union split_pmd {
};
pmd_t pmd;
};
+
+#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
{
union split_pmd res, *orig = (union split_pmd *)pmdp;
@@ -181,6 +182,40 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifndef pmdp_establish
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old;
+
+ /*
+ * If pmd has present bit cleared we can get away without expensive
+ * cmpxchg64: we can update pmdp half-by-half without racing with
+ * anybody.
+ */
+ if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
+ union split_pmd old, new, *ptr;
+
+ ptr = (union split_pmd *)pmdp;
+
+ new.pmd = pmd;
+
+ /* xchg acts as a barrier before setting of the high bits */
+ old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
+ old.pmd_high = ptr->pmd_high;
+ ptr->pmd_high = new.pmd_high;
+ return old.pmd;
+ }
+
+ do {
+ old = *pmdp;
+ } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+
+ return old;
+}
+#endif
+
#ifdef CONFIG_SMP
union split_pud {
struct {
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e42b8943cb1a..63c2552b6b65 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1109,6 +1109,21 @@ static inline int pud_write(pud_t pud)
return pud_flags(pud) & _PAGE_RW;
}
+#ifndef pmdp_establish
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ if (IS_ENABLED(CONFIG_SMP)) {
+ return xchg(pmdp, pmd);
+ } else {
+ pmd_t old = *pmdp;
+ *pmdp = pmd;
+ return old;
+ }
+}
+#endif
+
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index b97a539bcdee..6b8f73dcbc2c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
-/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
+/*
+ * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
+ *
+ * Be very careful vs. KASLR when changing anything here. The KASLR address
+ * range must not overlap with anything except the KASAN shadow area, which
+ * is correct as KASAN disables KASLR.
+ */
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#ifdef CONFIG_X86_5LEVEL
@@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
# define VMALLOC_SIZE_TB _AC(32, UL)
# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
-# define LDT_PGD_ENTRY _AC(-4, UL)
+# define LDT_PGD_ENTRY _AC(-3, UL)
# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#endif
@@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
/* The module sections ends with the start of the fixmap */
-#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
+#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
-#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
+#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 6a60fea90b9d..625a52a5594f 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -40,7 +40,7 @@
#define CR3_NOFLUSH BIT_ULL(63)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_SWITCH_BIT 11
+# define X86_CR3_PTI_PCID_USER_BIT 11
#endif
#else
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d3a67fba200a..efbde088a718 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -971,4 +971,7 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
+
+void __ibp_barrier(void);
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 14131dd06b29..6de1fd3d0097 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -109,6 +109,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->ax;
}
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->ax = rc;
+}
+
/*
* user_mode(regs) determines whether a register set came from user
* mode. On x86_32, this is true if V8086 mode was enabled OR if the
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d91ba04dd007..fb3a6de7440b 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -106,6 +106,7 @@
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index bdf9aed40403..1c6a6cb230ff 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -28,8 +28,6 @@ static inline void pci_swiotlb_late_init(void)
}
#endif
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 00223333821a..d25a638a2720 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -62,8 +62,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_stack (init_thread_union.stack)
-
#else /* !__ASSEMBLY__ */
#include <asm/asm-offsets.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4a08dd2ab32a..d33e4a26dc7e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,13 +81,13 @@ static inline u16 kern_pcid(u16 asid)
* Make sure that the dynamic ASID space does not confict with the
* bit we are using to switch between user and kernel ASIDs.
*/
- BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT));
+ BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
/*
* The ASID being passed in here should have respected the
* MAX_ASID_AVAILABLE and thus never have the switch bit set.
*/
- VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT));
+ VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
#endif
/*
* The dynamically-assigned ASIDs that get passed in are small
@@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid)
{
u16 ret = kern_pcid(asid);
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- ret |= 1 << X86_CR3_PTI_SWITCH_BIT;
+ ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
#endif
return ret;
}
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 31051f35cbb7..3de69330e6c5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *, long);
#endif
+dotraplinkage void do_mce(struct pt_regs *, long);
static inline int get_si_code(unsigned long condition)
{
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index c1688c2d0a12..1f86e1b0a5cd 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
/*
- * WARNING: The entire pt_regs may not be safe to dereference. In some cases,
- * only the iret frame registers are accessible. Use with caution!
+ * If 'partial' returns true, only the iret frame registers are valid.
*/
-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
+ bool *partial)
{
if (unwind_done(state))
return NULL;
+ if (partial) {
+#ifdef CONFIG_UNWINDER_ORC
+ *partial = !state->full_regs;
+#else
+ *partial = false;
+#endif
+ }
+
return state->regs;
}
#else
-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
+ bool *partial)
{
return NULL;
}
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 74f4c2ff6427..d8bfa98fca98 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -53,6 +53,10 @@ struct arch_uprobe {
u8 fixups;
u8 ilen;
} defparam;
+ struct {
+ u8 reg_offset; /* to the start of pt_regs */
+ u8 ilen;
+ } push;
};
};
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 7cac79802ad2..7803114aa140 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -48,7 +48,6 @@
#define UV2_NET_ENDPOINT_INTD 0x28
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
-#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 036e26d63d9a..44cf6d6deb7a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -241,6 +241,7 @@ static inline int uv_hub_info_check(int version)
#define UV2_HUB_REVISION_BASE 3
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
+#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
#ifdef UV1_HUB_IS_SUPPORTED
static inline int is_uv1_hub(void)
@@ -280,6 +281,19 @@ static inline int is_uv3_hub(void)
}
#endif
+/* First test "is UV4A", then "is UV4" */
+#ifdef UV4A_HUB_IS_SUPPORTED
+static inline int is_uv4a_hub(void)
+{
+ return (uv_hub_info->hub_revision >= UV4A_HUB_REVISION_BASE);
+}
+#else
+static inline int is_uv4a_hub(void)
+{
+ return 0;
+}
+#endif
+
#ifdef UV4_HUB_IS_SUPPORTED
static inline int is_uv4_hub(void)
{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 548d684a7960..ecb9ddef128f 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -39,9 +39,11 @@
* #define UV2Hxxx b
* #define UV3Hxxx c
* #define UV4Hxxx d
+ * #define UV4AHxxx e
* #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
* (is_uv2_hub() ? UV2Hxxx :
* (is_uv3_hub() ? UV3Hxxx :
+ * (is_uv4a_hub() ? UV4AHxxx :
* UV4Hxxx))
*
* If the MMR exists on all hub types > 1 but have different addresses, the
@@ -49,8 +51,10 @@
* #define UV2Hxxx b
* #define UV3Hxxx c
* #define UV4Hxxx d
+ * #define UV4AHxxx e
* #define UVHxxx (is_uv2_hub() ? UV2Hxxx :
* (is_uv3_hub() ? UV3Hxxx :
+ * (is_uv4a_hub() ? UV4AHxxx :
* UV4Hxxx))
*
* union uvh_xxx {
@@ -63,6 +67,7 @@
* } s2;
* struct uv3h_xxx_s { # Full UV3 definition (*)
* } s3;
+ * (NOTE: No struct uv4ah_xxx_s members exist)
* struct uv4h_xxx_s { # Full UV4 definition (*)
* } s4;
* };
@@ -99,6 +104,7 @@
#define UV2_HUB_IS_SUPPORTED 1
#define UV3_HUB_IS_SUPPORTED 1
#define UV4_HUB_IS_SUPPORTED 1
+#define UV4A_HUB_IS_SUPPORTED 1
/* Error function to catch undefined references */
extern unsigned long uv_undefined(char *str);
@@ -2779,35 +2785,47 @@ union uvh_lb_bau_sb_activation_status_1_u {
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL
-
-
-union uvh_lb_bau_sb_descriptor_base_u {
- unsigned long v;
- struct uvh_lb_bau_sb_descriptor_base_s {
- unsigned long rsvd_0_11:12;
- unsigned long rsvd_12_48:37;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s;
- struct uv4h_lb_bau_sb_descriptor_base_s {
- unsigned long rsvd_0_11:12;
- unsigned long page_address:34; /* RW */
- unsigned long rsvd_46_48:3;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s4;
-};
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 53
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000ffffffffff000UL
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT)
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK)
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK)
/* ========================================================================= */
/* UVH_NODE_ID */
@@ -3031,6 +3049,41 @@ union uvh_node_present_table_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
unsigned long v;
@@ -3042,6 +3095,46 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3064,6 +3157,41 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long v;
@@ -3075,6 +3203,46 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3097,6 +3265,41 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long v;
@@ -3108,6 +3311,46 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3126,6 +3369,21 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
unsigned long v;
@@ -3134,6 +3392,31 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3152,6 +3435,21 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
unsigned long v;
@@ -3160,6 +3458,31 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3178,6 +3501,21 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
unsigned long v;
@@ -3186,6 +3524,31 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3384,6 +3747,162 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR)
+
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)
+
+union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+ struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s4a;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x483000UL
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR)
+
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x03f0000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
+
+union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+ struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s4a;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
@@ -3438,6 +3957,112 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR)
+
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH)
+
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000000fffUL
+
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK)
+
+union uvh_rh_gam_mmioh_redirect_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+ struct uv4h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+ struct uv4ah_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:12; /* RW */
+ unsigned long rsvd_12_63:52;
+ } s4a;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR)
+
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH)
+
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000000fffUL
+
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK)
+
+union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+ struct uv4h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+ struct uv4ah_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:12; /* RW */
+ unsigned long rsvd_12_63:52;
+ } s4a;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
@@ -4138,88 +4763,6 @@ union uv3h_gr0_gam_gr_config_u {
};
/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
- unsigned long rsvd_0_25:26;
- unsigned long base:20; /* RW */
- unsigned long m_io:6; /* RW */
- unsigned long n_io:4;
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
- unsigned long rsvd_0_25:26;
- unsigned long base:20; /* RW */
- unsigned long m_io:6; /* RW */
- unsigned long n_io:4;
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
-
-union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
- unsigned long nasid:15; /* RW */
- unsigned long rsvd_15_63:49;
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
-
-union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
- unsigned long nasid:15; /* RW */
- unsigned long rsvd_15_63:49;
- } s3;
-};
-
-/* ========================================================================= */
/* UV4H_LB_PROC_INTD_QUEUE_FIRST */
/* ========================================================================= */
#define UV4H_LB_PROC_INTD_QUEUE_FIRST 0xa4100UL
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index aa4747569e23..fc2f082ac635 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -212,6 +212,7 @@ enum x86_legacy_i8042_state {
struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc;
+ int warm_reset;
int no_vga;
int reserve_bios_regions;
struct x86_legacy_devices devices;
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7cb282e9e587..bfd882617613 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/smap.h>
+#include <asm/nospec-branch.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
stac();
- asm volatile("call *%[call]"
+ asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
- : [call] "a" (&hypercall_page[call])
+ : [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
clac();
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 1e901e421f2d..322681622d1e 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += bpf_perf_event.h
generated-y += unistd_32.h
generated-y += unistd_64.h
generated-y += unistd_x32.h
+generic-y += poll.h
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index afdd5ae0fcc4..aebf60357758 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -9,6 +9,7 @@
#define SETUP_PCI 3
#define SETUP_EFI 4
#define SETUP_APPLE_PROPERTIES 5
+#define SETUP_JAILHOUSE 6
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -126,6 +127,27 @@ struct boot_e820_entry {
__u32 type;
} __attribute__((packed));
+/*
+ * Smallest compatible version of jailhouse_setup_data required by this kernel.
+ */
+#define JAILHOUSE_SETUP_REQUIRED_VERSION 1
+
+/*
+ * The boot loader is passing platform information via this Jailhouse-specific
+ * setup data structure.
+ */
+struct jailhouse_setup_data {
+ u16 version;
+ u16 compatible_version;
+ u16 pm_timer_address;
+ u16 num_cpus;
+ u64 pci_mmconfig_base;
+ u32 tsc_khz;
+ u32 apic_khz;
+ u8 standard_ioapic;
+ u8 cpu_ids[255];
+} __attribute__((packed));
+
/* The so-called "zeropage" */
struct boot_params {
struct screen_info screen_info; /* 0x000 */
diff --git a/arch/x86/include/uapi/asm/poll.h b/arch/x86/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/x86/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 81bb565f4497..29786c87e864 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
-OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
+ifdef CONFIG_FRAME_POINTER
+OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
+endif
+
# If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs,
# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
@@ -112,6 +115,8 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
+obj-$(CONFIG_JAILHOUSE_GUEST) += jailhouse.o
+
obj-$(CONFIG_EISA) += eisa.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f4c463df8b08..ec3a286163c3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -68,8 +68,9 @@ int acpi_ioapic;
int acpi_strict;
int acpi_disable_cmcff;
+/* ACPI SCI override configuration */
u8 acpi_sci_flags __initdata;
-int acpi_sci_override_gsi __initdata;
+u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
int acpi_fix_pin2_polarity __initdata;
@@ -112,8 +113,6 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
-#define ACPI_INVALID_GSI INT_MIN
-
/*
* This is just a simple wrapper around early_memremap(),
* with sanity checks for phys == 0 and size == 0.
@@ -372,7 +371,7 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
* and acpi_isa_irq_to_gsi() may give wrong result.
*/
if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
- isa_irq_to_gsi[gsi] = ACPI_INVALID_GSI;
+ isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
isa_irq_to_gsi[bus_irq] = gsi;
}
@@ -620,24 +619,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
}
rc = acpi_get_override_irq(gsi, &trigger, &polarity);
- if (rc == 0) {
- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
- irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
- if (irq >= 0) {
- *irqp = irq;
- return 0;
- }
- }
+ if (rc)
+ return rc;
- return -1;
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+ if (irq < 0)
+ return irq;
+
+ *irqp = irq;
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
{
if (isa_irq < nr_legacy_irqs() &&
- isa_irq_to_gsi[isa_irq] != ACPI_INVALID_GSI) {
+ isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
*gsi = isa_irq_to_gsi[isa_irq];
return 0;
}
@@ -676,8 +675,7 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
/* Don't set up the ACPI SCI because it's already set up */
- if (irq >= 0 && enable_update_mptable &&
- acpi_gbl_FADT.sci_interrupt != gsi)
+ if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
mutex_unlock(&acpi_ioapic_lock);
#endif
@@ -1211,8 +1209,9 @@ static int __init acpi_parse_madt_ioapic_entries(void)
/*
* If BIOS did not supply an INT_SRC_OVR for the SCI
* pretend we got one so we can set the SCI flags.
+ * But ignore setting up SCI on hardware reduced platforms.
*/
- if (!acpi_sci_override_gsi)
+ if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
acpi_gbl_FADT.sci_interrupt);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 7188aea91549..f1915b744052 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -138,6 +138,8 @@ static int __init acpi_sleep_setup(char *str)
acpi_nvs_nosave_s3();
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
+ if (strncmp(str, "nobl", 4) == 0)
+ acpi_sleep_no_blacklist();
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index dbaf14d69ebd..30571fdaaf6f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
tgt_rip = next_rip + o_dspl;
n_dspl = tgt_rip - orig_insn;
- DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
+ DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
if (tgt_rip - orig_insn >= 0) {
if (n_dspl - 2 <= 127)
@@ -344,15 +344,18 @@ done:
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
{
unsigned long flags;
+ int i;
- if (instr[0] != 0x90)
- return;
+ for (i = 0; i < a->padlen; i++) {
+ if (instr[i] != 0x90)
+ return;
+ }
local_irq_save(flags);
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
local_irq_restore(flags);
- DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
+ DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
}
@@ -373,7 +376,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN];
- DPRINTK("alt table %p -> %p", start, end);
+ DPRINTK("alt table %px, -> %px", start, end);
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
@@ -397,14 +400,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue;
}
- DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
+ DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
a->cpuid >> 5,
a->cpuid & 0x1f,
instr, a->instrlen,
replacement, a->replacementlen, a->padlen);
- DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
- DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
+ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen;
@@ -430,7 +433,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
a->instrlen - a->replacementlen);
insnbuf_sz += a->instrlen - a->replacementlen;
}
- DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
+ DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, insnbuf_sz);
}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index cc0e8bc0ea3f..ecd486cb06ab 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/atomic.h>
+#include <linux/dma-direct.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f5d92bc3b884..2c4d5ece7456 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -30,6 +30,7 @@
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
+#include <linux/crash_dump.h>
/*
* Using 512M as goal, in case kexec will load kernel_big
@@ -56,6 +57,33 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
+#ifdef CONFIG_PROC_VMCORE
+/*
+ * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
+ * use the same range because it will remain configured in the northbridge.
+ * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
+ * it from vmcore.
+ */
+static unsigned long aperture_pfn_start, aperture_page_count;
+
+static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ return likely((pfn < aperture_pfn_start) ||
+ (pfn >= aperture_pfn_start + aperture_page_count));
+}
+
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+ aperture_pfn_start = aper_base >> PAGE_SHIFT;
+ aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
+ WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+}
+#else
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+}
+#endif
+
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void)
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base)
+ if (last_aper_base) {
+ /*
+ * If this is the kdump kernel, the first kernel
+ * may have allocated the range over its e820 RAM
+ * and fixed up the northbridge
+ */
+ exclude_from_vmcore(last_aper_base, last_aper_order);
+
return 1;
+ }
return 0;
}
@@ -473,6 +509,14 @@ out:
return 0;
}
+ /*
+ * If this is the kdump kernel _and_ the first kernel did not
+ * configure the aperture in the northbridge, this range may
+ * overlap with the first kernel's memory. We can't access the
+ * range through vmcore even though it should be part of the dump.
+ */
+ exclude_from_vmcore(aper_alloc, aper_order);
+
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 880441f24146..25ddf02598d2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1286,6 +1286,55 @@ static int __init apic_intr_mode_select(void)
return APIC_SYMMETRIC_IO;
}
+/*
+ * An initial setup of the virtual wire mode.
+ */
+void __init init_bsp_APIC(void)
+{
+ unsigned int value;
+
+ /*
+ * Don't do the setup now if we have a SMP BIOS as the
+ * through-I/O-APIC virtual wire mode might be active.
+ */
+ if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
+ return;
+
+ /*
+ * Do not trust the local APIC being empty at bootup.
+ */
+ clear_local_APIC();
+
+ /*
+ * Enable APIC.
+ */
+ value = apic_read(APIC_SPIV);
+ value &= ~APIC_VECTOR_MASK;
+ value |= APIC_SPIV_APIC_ENABLED;
+
+#ifdef CONFIG_X86_32
+ /* This bit is reserved on P4/Xeon and should be cleared */
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ (boot_cpu_data.x86 == 15))
+ value &= ~APIC_SPIV_FOCUS_DISABLED;
+ else
+#endif
+ value |= APIC_SPIV_FOCUS_DISABLED;
+ value |= SPURIOUS_APIC_VECTOR;
+ apic_write(APIC_SPIV, value);
+
+ /*
+ * Set up the virtual wire mode.
+ */
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
+ value = APIC_DM_NMI;
+ if (!lapic_is_integrated()) /* 82489DX */
+ value |= APIC_LVT_LEVEL_TRIGGER;
+ if (apic_extnmi == APIC_EXTNMI_NONE)
+ value |= APIC_LVT_MASKED;
+ apic_write(APIC_LVT1, value);
+}
+
/* Init the interrupt delivery mode for the BSP */
void __init apic_intr_mode_init(void)
{
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 25a87028cb3f..e84c9eb4e5b4 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -19,6 +19,7 @@
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
+#include <asm/jailhouse_para.h>
#include <linux/acpi.h>
@@ -84,12 +85,8 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
static void flat_send_IPI_allbutself(int vector)
{
int cpu = smp_processor_id();
-#ifdef CONFIG_HOTPLUG_CPU
- int hotplug = 1;
-#else
- int hotplug = 0;
-#endif
- if (hotplug || vector == NMI_VECTOR) {
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU) || vector == NMI_VECTOR) {
if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
unsigned long mask = cpumask_bits(cpu_online_mask)[0];
@@ -218,6 +215,15 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
+static void physflat_init_apic_ldr(void)
+{
+ /*
+ * LDR and DFR are not involved in physflat mode, rather:
+ * "In physical destination mode, the destination processor is
+ * specified by its local APIC ID [...]." (Intel SDM, 10.6.2.1)
+ */
+}
+
static void physflat_send_IPI_allbutself(int vector)
{
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
@@ -230,7 +236,8 @@ static void physflat_send_IPI_all(int vector)
static int physflat_probe(void)
{
- if (apic == &apic_physflat || num_possible_cpus() > 8)
+ if (apic == &apic_physflat || num_possible_cpus() > 8 ||
+ jailhouse_paravirt())
return 1;
return 0;
@@ -251,8 +258,7 @@ static struct apic apic_physflat __ro_after_init = {
.dest_logical = 0,
.check_apicid_used = NULL,
- /* not needed, but shouldn't hurt: */
- .init_apic_ldr = flat_init_apic_ldr,
+ .init_apic_ldr = physflat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8a7963421460..8ad2e410974f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -800,18 +800,18 @@ static int irq_polarity(int idx)
/*
* Determine IRQ line polarity (high active or low active):
*/
- switch (mp_irqs[idx].irqflag & 0x03) {
- case 0:
+ switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
+ case MP_IRQPOL_DEFAULT:
/* conforms to spec, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci))
return default_ISA_polarity(idx);
else
return default_PCI_polarity(idx);
- case 1:
+ case MP_IRQPOL_ACTIVE_HIGH:
return IOAPIC_POL_HIGH;
- case 2:
+ case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- case 3:
+ case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
}
@@ -845,8 +845,8 @@ static int irq_trigger(int idx)
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
- switch ((mp_irqs[idx].irqflag >> 2) & 0x03) {
- case 0:
+ switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
+ case MP_IRQTRIG_DEFAULT:
/* conforms to spec, ie. bus-type dependent trigger mode */
if (test_bit(bus, mp_bus_not_pci))
trigger = default_ISA_trigger(idx);
@@ -854,11 +854,11 @@ static int irq_trigger(int idx)
trigger = default_PCI_trigger(idx);
/* Take EISA into account */
return eisa_irq_trigger(idx, bus, trigger);
- case 1:
+ case MP_IRQTRIG_EDGE:
return IOAPIC_EDGE;
- case 2:
+ case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- case 3:
+ case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f8b03bb8e725..3cc471beb50b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -542,14 +542,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
err = assign_irq_vector_policy(irqd, info);
trace_vector_setup(virq + i, false, err);
- if (err)
+ if (err) {
+ irqd->chip_data = NULL;
+ free_apic_chip_data(apicd);
goto error;
+ }
}
return 0;
error:
- x86_vector_free_irqs(domain, virq, i + 1);
+ x86_vector_free_irqs(domain, virq, i);
return err;
}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e1b8e8bf6b3c..46b675aaf20b 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -137,6 +137,8 @@ static int __init early_get_pnodeid(void)
case UV3_HUB_PART_NUMBER_X:
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
break;
+
+ /* Update: UV4A has only a modified revision to indicate HUB fixes */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
@@ -316,6 +318,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} else if (!strcmp(oem_table_id, "UVH")) {
/* Only UV1 systems: */
uv_system_type = UV_NON_UNIQUE_APIC;
+ x86_platform.legacy.warm_reset = 0;
__this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
uv_set_apicid_hibit();
uv_apic = 1;
@@ -767,6 +770,7 @@ static __init void map_gru_high(int max_pnode)
return;
}
+ /* Only UV3 has distributed GRU mode */
if (is_uv3_hub() && gru.s3.mode) {
map_gru_distributed(gru.v);
return;
@@ -790,63 +794,61 @@ static __init void map_mmr_high(int max_pnode)
pr_info("UV: MMR disabled\n");
}
-/*
- * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
- * and REDIRECT MMR regs are exactly the same on UV3.
- */
-struct mmioh_config {
- unsigned long overlay;
- unsigned long redirect;
- char *id;
-};
-
-static __initdata struct mmioh_config mmiohs[] = {
- {
- UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
- UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
- "MMIOH0"
- },
- {
- UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
- UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
- "MMIOH1"
- },
-};
-
-/* UV3 & UV4 have identical MMIOH overlay configs */
-static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
+/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */
+static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
{
- union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
+ unsigned long overlay;
unsigned long mmr;
unsigned long base;
+ unsigned long nasid_mask;
+ unsigned long m_overlay;
int i, n, shift, m_io, max_io;
int nasid, lnasid, fi, li;
char *id;
- id = mmiohs[index].id;
- overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
-
- pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", id, overlay.v, overlay.s3.base, overlay.s3.m_io);
- if (!overlay.s3.enable) {
+ if (index == 0) {
+ id = "MMIOH0";
+ m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
+ overlay = uv_read_local_mmr(m_overlay);
+ base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
+ m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
+ >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
+ shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
+ nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
+ } else {
+ id = "MMIOH1";
+ m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
+ overlay = uv_read_local_mmr(m_overlay);
+ base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
+ m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
+ >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
+ shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
+ nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
+ }
+ pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
+ if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
pr_info("UV: %s disabled\n", id);
return;
}
- shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
- base = (unsigned long)overlay.s3.base;
- m_io = overlay.s3.m_io;
- mmr = mmiohs[index].redirect;
- n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
/* Convert to NASID: */
min_pnode *= 2;
max_pnode *= 2;
max_io = lnasid = fi = li = -1;
for (i = 0; i < n; i++) {
- union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
+ unsigned long m_redirect = mmr + i * 8;
+ unsigned long redirect = uv_read_local_mmr(m_redirect);
+
+ nasid = redirect & nasid_mask;
+ if (i == 0)
+ pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
+ id, redirect, m_redirect, nasid);
- redirect.v = uv_read_local_mmr(mmr + i * 8);
- nasid = redirect.s3.nasid;
/* Invalid NASID: */
if (nasid < min_pnode || max_pnode < nasid)
nasid = -1;
@@ -894,8 +896,8 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
if (is_uv3_hub() || is_uv4_hub()) {
/* Map both MMIOH regions: */
- map_mmioh_high_uv3(0, min_pnode, max_pnode);
- map_mmioh_high_uv3(1, min_pnode, max_pnode);
+ map_mmioh_high_uv34(0, min_pnode, max_pnode);
+ map_mmioh_high_uv34(1, min_pnode, max_pnode);
return;
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index e4b0d92b3ae0..ab1865342002 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1506,7 +1506,7 @@ static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *
return 0;
}
-static unsigned int do_poll(struct file *fp, poll_table *wait)
+static __poll_t do_poll(struct file *fp, poll_table *wait)
{
struct apm_user *as;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bcb75dc97d44..ea831c858195 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -829,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_K8);
if (cpu_has(c, X86_FEATURE_XMM2)) {
- /* MFENCE stops RDTSC speculation */
- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ unsigned long long val;
+ int ret;
+
+ /*
+ * A serializing LFENCE has less overhead than MFENCE, so
+ * use it for execution serialization. On families which
+ * don't have that MSR, LFENCE is already serializing.
+ * msr_set_bit() uses the safe accessors, too, even if the MSR
+ * is not present.
+ */
+ msr_set_bit(MSR_F10H_DECFG,
+ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+ /*
+ * Verify that the MSR write was successful (could be running
+ * under a hypervisor) and only then assume that LFENCE is
+ * serializing.
+ */
+ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+ /* A serializing LFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+ } else {
+ /* MFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b2424c9b0..3bfb2b23d79c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,11 @@
*/
#include <linux/init.h>
#include <linux/utsname.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/nospec-branch.h>
+#include <asm/cmdline.h>
#include <asm/bugs.h>
#include <asm/processor.h>
#include <asm/processor-flags.h>
@@ -19,6 +24,9 @@
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
+#include <asm/intel-family.h>
+
+static void __init spectre_v2_select_mitigation(void);
void __init check_bugs(void)
{
@@ -29,6 +37,9 @@ void __init check_bugs(void)
print_cpu_info(&boot_cpu_data);
}
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +71,249 @@ void __init check_bugs(void)
set_memory_4k((unsigned long)__va(0), 1);
#endif
}
+
+/* The kernel command line selection */
+enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_NONE,
+ SPECTRE_V2_CMD_AUTO,
+ SPECTRE_V2_CMD_FORCE,
+ SPECTRE_V2_CMD_RETPOLINE,
+ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+ SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+static const char *spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V2 : " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+#ifdef RETPOLINE
+static bool spectre_v2_bad_module;
+
+bool retpoline_module_ok(bool has_retpoline)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
+ return true;
+
+ pr_err("System may be vunerable to spectre v2\n");
+ spectre_v2_bad_module = true;
+ return false;
+}
+
+static inline const char *spectre_v2_module_string(void)
+{
+ return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
+}
+#else
+static inline const char *spectre_v2_module_string(void) { return ""; }
+#endif
+
+static void __init spec2_print_if_insecure(const char *reason)
+{
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ pr_info("%s\n", reason);
+}
+
+static void __init spec2_print_if_secure(const char *reason)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ pr_info("%s\n", reason);
+}
+
+static inline bool retp_compiler(void)
+{
+ return __is_defined(RETPOLINE);
+}
+
+static inline bool match_option(const char *arg, int arglen, const char *opt)
+{
+ int len = strlen(opt);
+
+ return len == arglen && !strncmp(arg, opt, len);
+}
+
+static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+{
+ char arg[20];
+ int ret;
+
+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+ sizeof(arg));
+ if (ret > 0) {
+ if (match_option(arg, ret, "off")) {
+ goto disable;
+ } else if (match_option(arg, ret, "on")) {
+ spec2_print_if_secure("force enabled on command line.");
+ return SPECTRE_V2_CMD_FORCE;
+ } else if (match_option(arg, ret, "retpoline")) {
+ spec2_print_if_insecure("retpoline selected on command line.");
+ return SPECTRE_V2_CMD_RETPOLINE;
+ } else if (match_option(arg, ret, "retpoline,amd")) {
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+ return SPECTRE_V2_CMD_AUTO;
+ }
+ spec2_print_if_insecure("AMD retpoline selected on command line.");
+ return SPECTRE_V2_CMD_RETPOLINE_AMD;
+ } else if (match_option(arg, ret, "retpoline,generic")) {
+ spec2_print_if_insecure("generic retpoline selected on command line.");
+ return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
+ } else if (match_option(arg, ret, "auto")) {
+ return SPECTRE_V2_CMD_AUTO;
+ }
+ }
+
+ if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ return SPECTRE_V2_CMD_AUTO;
+disable:
+ spec2_print_if_insecure("disabled on command line.");
+ return SPECTRE_V2_CMD_NONE;
+}
+
+/* Check for Skylake-like CPUs (for RSB handling) */
+static bool __init is_skylake_era(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6) {
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ return true;
+ }
+ }
+ return false;
+}
+
+static void __init spectre_v2_select_mitigation(void)
+{
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+ enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+
+ /*
+ * If the CPU is not affected and the command line mode is NONE or AUTO
+ * then nothing to do.
+ */
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+ (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+ return;
+
+ switch (cmd) {
+ case SPECTRE_V2_CMD_NONE:
+ return;
+
+ case SPECTRE_V2_CMD_FORCE:
+ /* FALLTRHU */
+ case SPECTRE_V2_CMD_AUTO:
+ goto retpoline_auto;
+
+ case SPECTRE_V2_CMD_RETPOLINE_AMD:
+ if (IS_ENABLED(CONFIG_RETPOLINE))
+ goto retpoline_amd;
+ break;
+ case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+ if (IS_ENABLED(CONFIG_RETPOLINE))
+ goto retpoline_generic;
+ break;
+ case SPECTRE_V2_CMD_RETPOLINE:
+ if (IS_ENABLED(CONFIG_RETPOLINE))
+ goto retpoline_auto;
+ break;
+ }
+ pr_err("kernel not compiled with retpoline; no mitigation available!");
+ return;
+
+retpoline_auto:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ retpoline_amd:
+ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+ pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+ goto retpoline_generic;
+ }
+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+ SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+ } else {
+ retpoline_generic:
+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+ SPECTRE_V2_RETPOLINE_MINIMAL;
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+ }
+
+ spectre_v2_enabled = mode;
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+ * If neither SMEP or KPTI are available, there is a risk of
+ * hitting userspace addresses in the RSB after a context switch
+ * from a shallow call stack to a deeper one. To prevent this fill
+ * the entire RSB, even when using IBRS.
+ *
+ * Skylake era CPUs have a separate issue with *underflow* of the
+ * RSB, when they will predict 'ret' targets from the generic BTB.
+ * The proper mitigation for this is IBRS. If IBRS is not supported
+ * or deactivated in favour of retpolines the RSB fill on context
+ * switch is required.
+ */
+ if ((!boot_cpu_has(X86_FEATURE_PTI) &&
+ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Filling RSB on context switch\n");
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Enabling Indirect Branch Prediction Barrier\n");
+ }
+}
+
+#undef pr_fmt
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+ return sprintf(buf, "Not affected\n");
+ if (boot_cpu_has(X86_FEATURE_PTI))
+ return sprintf(buf, "Mitigation: PTI\n");
+ return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+ return sprintf(buf, "Not affected\n");
+ return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ spectre_v2_module_string());
+}
+#endif
+
+void __ibp_barrier(void)
+{
+ __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+}
+EXPORT_SYMBOL_GPL(__ibp_barrier);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 68bc6d9b3132..c578cd29c2d2 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -106,6 +106,10 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
+ if (c->x86_power & (1 << 8)) {
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+ }
}
static void init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c47de4ebf63a..c7c996a692fd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -47,6 +47,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -769,6 +771,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx;
+ c->x86_capability[CPUID_7_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
@@ -876,6 +879,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initdata struct x86_cpu_id cpu_no_speculation[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ {}
+};
+
+static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return false;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return false;
+
+ return true;
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -923,8 +961,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- /* Assume for now that ALL x86 CPUs are insecure */
- setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
+ if (!x86_match_cpu(cpu_no_speculation)) {
+ if (cpu_vulnerable_to_meltdown(c))
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ }
fpu__init_system(c);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index bea8d3e24f50..479ca4728de0 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -31,6 +31,7 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_pv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
+extern const struct hypervisor_x86 x86_hyper_jailhouse;
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
@@ -45,6 +46,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
#ifdef CONFIG_KVM_GUEST
&x86_hyper_kvm,
#endif
+#ifdef CONFIG_JAILHOUSE_GUEST
+ &x86_hyper_jailhouse,
+#endif
};
enum x86_hypervisor_type x86_hyper_type;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b1af22073e28..6936d14d4c77 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
}
+/*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://kb.vmware.com/s/article/52345
+ * - Microcode revisions observed in the wild
+ * - Release note from 20180108 microcode release
+ */
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+ { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
+ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
+ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
+ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
+ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ /* Updated in the 20180108 release; blacklist until we know otherwise */
+ { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+};
+
+static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_mask == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -123,6 +176,30 @@ static void early_init_intel(struct cpuinfo_x86 *c)
c->microcode = intel_get_microcode_revision();
/*
+ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ }
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+ /* Now if any of them are set, check the blacklist and clear the lot */
+ if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
+ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
+ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
+ clear_cpu_cap(c, X86_FEATURE_IBRS);
+ clear_cpu_cap(c, X86_FEATURE_IBPB);
+ clear_cpu_cap(c, X86_FEATURE_STIBP);
+ clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
+ }
+
+ /*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
* A race condition between speculative fetches and invalidating
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 88dcf8479013..410629f10ad3 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -135,6 +135,40 @@ struct rdt_resource rdt_resources_all[] = {
.format_str = "%d=%0*x",
.fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_L2DATA] =
+ {
+ .rid = RDT_RESOURCE_L2DATA,
+ .name = "L2DATA",
+ .domains = domain_init(RDT_RESOURCE_L2DATA),
+ .msr_base = IA32_L2_CBM_BASE,
+ .msr_update = cat_wrmsr,
+ .cache_level = 2,
+ .cache = {
+ .min_cbm_bits = 1,
+ .cbm_idx_mult = 2,
+ .cbm_idx_offset = 0,
+ },
+ .parse_ctrlval = parse_cbm,
+ .format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
+ },
+ [RDT_RESOURCE_L2CODE] =
+ {
+ .rid = RDT_RESOURCE_L2CODE,
+ .name = "L2CODE",
+ .domains = domain_init(RDT_RESOURCE_L2CODE),
+ .msr_base = IA32_L2_CBM_BASE,
+ .msr_update = cat_wrmsr,
+ .cache_level = 2,
+ .cache = {
+ .min_cbm_bits = 1,
+ .cbm_idx_mult = 2,
+ .cbm_idx_offset = 1,
+ },
+ .parse_ctrlval = parse_cbm,
+ .format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
+ },
[RDT_RESOURCE_MBA] =
{
.rid = RDT_RESOURCE_MBA,
@@ -259,15 +293,15 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
r->alloc_enabled = true;
}
-static void rdt_get_cdp_l3_config(int type)
+static void rdt_get_cdp_config(int level, int type)
{
- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r_l = &rdt_resources_all[level];
struct rdt_resource *r = &rdt_resources_all[type];
- r->num_closid = r_l3->num_closid / 2;
- r->cache.cbm_len = r_l3->cache.cbm_len;
- r->default_ctrl = r_l3->default_ctrl;
- r->cache.shareable_bits = r_l3->cache.shareable_bits;
+ r->num_closid = r_l->num_closid / 2;
+ r->cache.cbm_len = r_l->cache.cbm_len;
+ r->default_ctrl = r_l->default_ctrl;
+ r->cache.shareable_bits = r_l->cache.shareable_bits;
r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true;
/*
@@ -277,6 +311,18 @@ static void rdt_get_cdp_l3_config(int type)
r->alloc_enabled = false;
}
+static void rdt_get_cdp_l3_config(void)
+{
+ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
+ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
+}
+
+static void rdt_get_cdp_l2_config(void)
+{
+ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
+ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
+}
+
static int get_cache_id(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
@@ -525,10 +571,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
*/
if (static_branch_unlikely(&rdt_mon_enable_key))
rmdir_mondata_subdir_allrdtgrp(r, d->id);
- kfree(d->ctrl_val);
- kfree(d->rmid_busy_llc);
- kfree(d->mbm_total);
- kfree(d->mbm_local);
list_del(&d->list);
if (is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
@@ -545,6 +587,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cancel_delayed_work(&d->cqm_limbo);
}
+ kfree(d->ctrl_val);
+ kfree(d->rmid_busy_llc);
+ kfree(d->mbm_total);
+ kfree(d->mbm_local);
kfree(d);
return;
}
@@ -645,6 +691,7 @@ enum {
RDT_FLAG_L3_CAT,
RDT_FLAG_L3_CDP,
RDT_FLAG_L2_CAT,
+ RDT_FLAG_L2_CDP,
RDT_FLAG_MBA,
};
@@ -667,6 +714,7 @@ static struct rdt_options rdt_options[] __initdata = {
RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
+ RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
};
#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
@@ -729,15 +777,15 @@ static __init bool get_rdt_alloc_resources(void)
if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
- if (rdt_cpu_has(X86_FEATURE_CDP_L3)) {
- rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
- rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
- }
+ if (rdt_cpu_has(X86_FEATURE_CDP_L3))
+ rdt_get_cdp_l3_config();
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
/* CPUID 0x10.2 fields are same format at 0x10.1 */
rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ if (rdt_cpu_has(X86_FEATURE_CDP_L2))
+ rdt_get_cdp_l2_config();
ret = true;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 3397244984f5..3fd7a70ee04a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -7,12 +7,15 @@
#include <linux/jump_label.h>
#define IA32_L3_QOS_CFG 0xc81
+#define IA32_L2_QOS_CFG 0xc82
#define IA32_L3_CBM_BASE 0xc90
#define IA32_L2_CBM_BASE 0xd10
#define IA32_MBA_THRTL_BASE 0xd50
#define L3_QOS_CDP_ENABLE 0x01ULL
+#define L2_QOS_CDP_ENABLE 0x01ULL
+
/*
* Event IDs are used to program IA32_QM_EVTSEL before reading event
* counter from IA32_QM_CTR
@@ -357,6 +360,8 @@ enum {
RDT_RESOURCE_L3DATA,
RDT_RESOURCE_L3CODE,
RDT_RESOURCE_L2,
+ RDT_RESOURCE_L2DATA,
+ RDT_RESOURCE_L2CODE,
RDT_RESOURCE_MBA,
/* Must be the last */
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 64c5ff97ee0d..bdab7d2f51af 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -990,6 +990,7 @@ out_destroy:
kernfs_remove(kn);
return ret;
}
+
static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
@@ -997,8 +998,17 @@ static void l3_qos_cfg_update(void *arg)
wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
}
-static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+static void l2_qos_cfg_update(void *arg)
{
+ bool *enable = arg;
+
+ wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_cache_qos_cfg(int level, bool enable)
+{
+ void (*update)(void *arg);
+ struct rdt_resource *r_l;
cpumask_var_t cpu_mask;
struct rdt_domain *d;
int cpu;
@@ -1006,16 +1016,24 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
return -ENOMEM;
- list_for_each_entry(d, &r->domains, list) {
+ if (level == RDT_RESOURCE_L3)
+ update = l3_qos_cfg_update;
+ else if (level == RDT_RESOURCE_L2)
+ update = l2_qos_cfg_update;
+ else
+ return -EINVAL;
+
+ r_l = &rdt_resources_all[level];
+ list_for_each_entry(d, &r_l->domains, list) {
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
cpu = get_cpu();
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
- l3_qos_cfg_update(&enable);
+ update(&enable);
/* Update QOS_CFG MSR on all other cpus in cpu_mask. */
- smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+ smp_call_function_many(cpu_mask, update, &enable, 1);
put_cpu();
free_cpumask_var(cpu_mask);
@@ -1023,52 +1041,99 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
return 0;
}
-static int cdp_enable(void)
+static int cdp_enable(int level, int data_type, int code_type)
{
- struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
- struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
+ struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
+ struct rdt_resource *r_l = &rdt_resources_all[level];
int ret;
- if (!r_l3->alloc_capable || !r_l3data->alloc_capable ||
- !r_l3code->alloc_capable)
+ if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
+ !r_lcode->alloc_capable)
return -EINVAL;
- ret = set_l3_qos_cfg(r_l3, true);
+ ret = set_cache_qos_cfg(level, true);
if (!ret) {
- r_l3->alloc_enabled = false;
- r_l3data->alloc_enabled = true;
- r_l3code->alloc_enabled = true;
+ r_l->alloc_enabled = false;
+ r_ldata->alloc_enabled = true;
+ r_lcode->alloc_enabled = true;
}
return ret;
}
-static void cdp_disable(void)
+static int cdpl3_enable(void)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+ return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE);
+}
+
+static int cdpl2_enable(void)
+{
+ return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
+ RDT_RESOURCE_L2CODE);
+}
+
+static void cdp_disable(int level, int data_type, int code_type)
+{
+ struct rdt_resource *r = &rdt_resources_all[level];
r->alloc_enabled = r->alloc_capable;
- if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) {
- rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false;
- rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false;
- set_l3_qos_cfg(r, false);
+ if (rdt_resources_all[data_type].alloc_enabled) {
+ rdt_resources_all[data_type].alloc_enabled = false;
+ rdt_resources_all[code_type].alloc_enabled = false;
+ set_cache_qos_cfg(level, false);
}
}
+static void cdpl3_disable(void)
+{
+ cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
+}
+
+static void cdpl2_disable(void)
+{
+ cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
+}
+
+static void cdp_disable_all(void)
+{
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
+ cdpl3_disable();
+ if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+ cdpl2_disable();
+}
+
static int parse_rdtgroupfs_options(char *data)
{
char *token, *o = data;
int ret = 0;
while ((token = strsep(&o, ",")) != NULL) {
- if (!*token)
- return -EINVAL;
+ if (!*token) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!strcmp(token, "cdp"))
- ret = cdp_enable();
+ if (!strcmp(token, "cdp")) {
+ ret = cdpl3_enable();
+ if (ret)
+ goto out;
+ } else if (!strcmp(token, "cdpl2")) {
+ ret = cdpl2_enable();
+ if (ret)
+ goto out;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
}
+ return 0;
+
+out:
+ pr_err("Invalid mount option \"%s\"\n", token);
+
return ret;
}
@@ -1223,7 +1288,7 @@ out_mongrp:
out_info:
kernfs_remove(kn_info);
out_cdp:
- cdp_disable();
+ cdp_disable_all();
out:
rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex);
@@ -1383,7 +1448,7 @@ static void rdt_kill_sb(struct super_block *sb)
/*Put everything back to default values. */
for_each_alloc_enabled_rdt_resource(r)
reset_all_ctrls(r);
- cdp_disable();
+ cdp_disable_all();
rmdir_all_sub();
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
index 7f85b76f43bc..213e8c2ca702 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
@@ -243,7 +243,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &mce_chrdev_wait, wait);
if (READ_ONCE(mcelog.next))
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 4ca632a06e0b..5bbd06f38ff6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -59,6 +59,7 @@ static struct severity {
#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
#define MASK(x, y) .mask = x, .result = y
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
+#define MCI_UC_AR (MCI_STATUS_UC|MCI_STATUS_AR)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
@@ -101,6 +102,22 @@ static struct severity {
NOSER, BITCLR(MCI_STATUS_UC)
),
+ /*
+ * known AO MCACODs reported via MCE or CMC:
+ *
+ * SRAO could be signaled either via a machine check exception or
+ * CMCI with the corresponding bit S 1 or 0. So we don't need to
+ * check bit S for SRAO.
+ */
+ MCESEV(
+ AO, "Action optional: memory scrubbing error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
+ ),
+ MCESEV(
+ AO, "Action optional: last level cache writeback error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ ),
+
/* ignore OVER for UCNA */
MCESEV(
UCNA, "Uncorrected no action required",
@@ -149,15 +166,6 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
),
- /* known AO MCACODs: */
- MCESEV(
- AO, "Action optional: memory scrubbing error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
- ),
- MCESEV(
- AO, "Action optional: last level cache writeback error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
- ),
MCESEV(
SOME, "Action optional: unknown MCACOD",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b1d616d08eee..ba1f9555fbc5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -503,10 +503,8 @@ static int mce_usable_address(struct mce *m)
bool mce_is_memory_error(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD) {
- /* ErrCodeExt[20:16] */
- u8 xec = (m->status >> 16) & 0x1f;
+ return amd_mce_is_memory_error(m);
- return (xec == 0x0 || xec == 0x8);
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
@@ -530,6 +528,17 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
+static bool mce_is_correctable(struct mce *m)
+{
+ if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
+ return false;
+
+ if (m->status & MCI_STATUS_UC)
+ return false;
+
+ return true;
+}
+
static bool cec_add_mce(struct mce *m)
{
if (!m)
@@ -537,7 +546,7 @@ static bool cec_add_mce(struct mce *m)
/* We eat only correctable DRAM errors with usable addresses. */
if (mce_is_memory_error(m) &&
- !(m->status & MCI_STATUS_UC) &&
+ mce_is_correctable(m) &&
mce_usable_address(m))
if (!cec_add_elem(m->addr >> PAGE_SHIFT))
return true;
@@ -582,7 +591,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
- memory_failure(pfn, MCE_VECTOR, 0);
+ memory_failure(pfn, 0);
}
return NOTIFY_OK;
@@ -1046,7 +1055,7 @@ static int do_memory_failure(struct mce *m)
pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
if (!(m->mcgstatus & MCG_STATUS_RIPV))
flags |= MF_MUST_KILL;
- ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
+ ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
if (ret)
pr_err("Memory error not recovered");
return ret;
@@ -1325,7 +1334,7 @@ out_ist:
EXPORT_SYMBOL_GPL(do_machine_check);
#ifndef CONFIG_MEMORY_FAILURE
-int memory_failure(unsigned long pfn, int vector, int flags)
+int memory_failure(unsigned long pfn, int flags)
{
/* mce_severity() should not hand us an ACTION_REQUIRED error */
BUG_ON(flags & MF_ACTION_REQUIRED);
@@ -1785,6 +1794,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
void (*machine_check_vector)(struct pt_regs *, long error_code) =
unexpected_machine_check;
+dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
+{
+ machine_check_vector(regs, error_code);
+}
+
/*
* Called for each booted CPU to set up machine checks.
* Must be called with preempt off:
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 486f640b02ef..0f32ad242324 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -110,6 +110,20 @@ const char *smca_get_long_name(enum smca_bank_types t)
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
+static enum smca_bank_types smca_get_bank_type(struct mce *m)
+{
+ struct smca_bank *b;
+
+ if (m->bank >= N_SMCA_BANK_TYPES)
+ return N_SMCA_BANK_TYPES;
+
+ b = &smca_banks[m->bank];
+ if (!b->hwid)
+ return N_SMCA_BANK_TYPES;
+
+ return b->hwid->bank_type;
+}
+
static struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype, xec_bitmap } */
@@ -407,7 +421,9 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
(deferred_error_int_vector != amd_deferred_error_interrupt))
deferred_error_int_vector = amd_deferred_error_interrupt;
- low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+ if (!mce_flags.smca)
+ low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+
wrmsr(MSR_CU_DEF_ERR, low, high);
}
@@ -738,6 +754,17 @@ out_err:
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
+bool amd_mce_is_memory_error(struct mce *m)
+{
+ /* ErrCodeExt[20:16] */
+ u8 xec = (m->status >> 16) & 0x1f;
+
+ if (mce_flags.smca)
+ return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0;
+
+ return m->bank == 4 && xec == 0x8;
+}
+
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
{
struct mce m;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index c4fa4a85d4cb..319dd65f98a2 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd(cpuid_eax(1));
+ ret = save_microcode_in_initrd_amd(cpuid_eax(1));
break;
default:
break;
@@ -560,7 +560,7 @@ static ssize_t pf_show(struct device *dev,
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}
-static DEVICE_ATTR(reload, 0200, NULL, reload_store);
+static DEVICE_ATTR_WO(reload);
static DEVICE_ATTR(version, 0400, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8ccdca6d3f9e..f7c55b0e753a 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
/* Current microcode patch used in early patching on the APs. */
static struct microcode_intel *intel_ucode_patch;
+/* last level cache size per core */
+static int llc_size_per_core;
+
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
unsigned int s2, unsigned int p2)
{
@@ -910,8 +913,19 @@ static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
- if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
- pr_err_once("late loading on model 79 is disabled.\n");
+ /*
+ * Late loading on model 79 with microcode revision less than 0x0b000021
+ * and LLC size per core bigger than 2.5MB may result in a system hang.
+ * This behavior is documented in item BDF90, #334165 (Intel Xeon
+ * Processor E7-8800/4800 v4 Product Family).
+ */
+ if (c->x86 == 6 &&
+ c->x86_model == INTEL_FAM6_BROADWELL_X &&
+ c->x86_mask == 0x01 &&
+ llc_size_per_core > 2621440 &&
+ c->microcode < 0x0b000021) {
+ pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+ pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
return true;
}
@@ -966,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
.apply_microcode = apply_microcode_intel,
};
+static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+{
+ u64 llc_size = c->x86_cache_size * 1024;
+
+ do_div(llc_size, c->x86_max_cores);
+
+ return (int)llc_size;
+}
+
struct microcode_ops * __init init_intel_microcode(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -976,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
return NULL;
}
+ llc_size_per_core = calc_llc_size_per_core(c);
+
return &microcode_intel_ops;
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 05459ad3db46..4075d2be5357 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,12 +21,10 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+ { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 5fa110699ed2..afbecff161d1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -76,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs)
regs->sp, regs->flags);
}
-static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
+static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
+ bool partial)
{
- if (on_stack(info, regs, sizeof(*regs)))
+ /*
+ * These on_stack() checks aren't strictly necessary: the unwind code
+ * has already validated the 'regs' pointer. The checks are done for
+ * ordering reasons: if the registers are on the next stack, we don't
+ * want to print them out yet. Otherwise they'll be shown as part of
+ * the wrong stack. Later, when show_trace_log_lvl() switches to the
+ * next stack, this function will be called again with the same regs so
+ * they can be printed in the right context.
+ */
+ if (!partial && on_stack(info, regs, sizeof(*regs))) {
__show_regs(regs, 0);
- else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
- IRET_FRAME_SIZE)) {
+
+ } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+ IRET_FRAME_SIZE)) {
/*
* When an interrupt or exception occurs in entry code, the
* full pt_regs might not have been saved yet. In that case
@@ -98,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
struct stack_info stack_info = {0};
unsigned long visit_mask = 0;
int graph_idx = 0;
+ bool partial;
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
stack = stack ? : get_stack_pointer(task, regs);
+ regs = unwind_get_entry_regs(&state, &partial);
/*
* Iterate through the stacks, starting with the current stack pointer.
@@ -120,7 +133,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
- for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+ for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
const char *stack_name;
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
@@ -140,7 +153,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%s <%s>\n", log_lvl, stack_name);
if (regs)
- show_regs_safe(&stack_info, regs);
+ show_regs_if_on_stack(&stack_info, regs, partial);
/*
* Scan the stack, printing any text addresses we find. At the
@@ -164,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
/*
* Don't print regs->ip again if it was already printed
- * by show_regs_safe() below.
+ * by show_regs_if_on_stack().
*/
if (regs && stack == &regs->ip)
goto next;
@@ -199,9 +212,9 @@ next:
unwind_next_frame(&state);
/* if the frame has entry regs, print them */
- regs = unwind_get_entry_regs(&state);
+ regs = unwind_get_entry_regs(&state, &partial);
if (regs)
- show_regs_safe(&stack_info, regs);
+ show_regs_if_on_stack(&stack_info, regs, partial);
}
if (stack_name)
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b6c6468e10bc..4c8440de3355 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -8,6 +8,7 @@
#include <asm/segment.h>
#include <asm/export.h>
#include <asm/ftrace.h>
+#include <asm/nospec-branch.h>
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
@@ -197,7 +198,8 @@ ftrace_stub:
movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax
- call *ftrace_trace_function
+ movl ftrace_trace_function, %ecx
+ CALL_NOSPEC %ecx
popl %edx
popl %ecx
@@ -241,5 +243,5 @@ return_to_handler:
movl %eax, %ecx
popl %edx
popl %eax
- jmp *%ecx
+ JMP_NOSPEC %ecx
#endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291d948a..91b2cff4b79a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -7,7 +7,8 @@
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
-
+#include <asm/nospec-branch.h>
+#include <asm/unwind_hints.h>
.code64
.section .entry.text, "ax"
@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
EXPORT_SYMBOL(mcount)
#endif
-/* All cases save the original rbp (8 bytes) */
#ifdef CONFIG_FRAME_POINTER
# ifdef CC_USING_FENTRY
/* Save parent and function stack frames (rip and rbp) */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
# endif
#else
/* No need to save a stack frame */
-# define MCOUNT_FRAME_SIZE 8
+# define MCOUNT_FRAME_SIZE 0
#endif /* CONFIG_FRAME_POINTER */
/* Size of stack used to save mcount regs in save_mcount_regs */
@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
*/
.macro save_mcount_regs added=0
- /* Always save the original rbp */
+#ifdef CONFIG_FRAME_POINTER
+ /* Save the original rbp */
pushq %rbp
-#ifdef CONFIG_FRAME_POINTER
/*
* Stack traces will stop at the ftrace trampoline if the frame pointer
* is not set up properly. If fentry is used, we need to save a frame
@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
* Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers.
*/
+#ifdef CONFIG_FRAME_POINTER
movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+#else
+ movq %rbp, %rdx
+#endif
movq %rdx, RBP(%rsp)
/* Copy the parent address into %rsi (second parameter) */
@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
ENTRY(function_hook)
retq
-END(function_hook)
+ENDPROC(function_hook)
ENTRY(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
retq
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
jmp ftrace_epilogue
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -286,12 +290,12 @@ trace:
* ip and parent ip are used and the list function is called when
* function tracing is enabled.
*/
- call *ftrace_trace_function
-
+ movq ftrace_trace_function, %r8
+ CALL_NOSPEC %r8
restore_mcount_regs
jmp fgraph_trace
-END(function_hook)
+ENDPROC(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
restore_mcount_regs
retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
-GLOBAL(return_to_handler)
+ENTRY(return_to_handler)
+ UNWIND_HINT_EMPTY
subq $24, %rsp
/* Save the return values */
@@ -329,5 +334,6 @@ GLOBAL(return_to_handler)
movq 8(%rsp), %rdx
movq (%rsp), %rax
addq $24, %rsp
- jmp *%rdi
+ JMP_NOSPEC %rdi
+END(return_to_handler)
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 6a5d757b9cfd..7ba5d819ebe3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
p = fixup_pointer(&phys_base, physaddr);
*p += load_delta - sme_get_me_mask();
- /* Encrypt the kernel (if SME is active) */
- sme_encrypt_kernel();
+ /* Encrypt the kernel and related (if SME is active) */
+ sme_encrypt_kernel(bp);
/*
* Return the SME encryption mask (if SME is active) to be used as a
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index d985cef3984f..56d99be3706a 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -56,7 +56,7 @@ struct idt_data {
* Early traps running on the DEFAULT_STACK because the other interrupt
* stacks work only after cpu_init().
*/
-static const __initdata struct idt_data early_idts[] = {
+static const __initconst struct idt_data early_idts[] = {
INTG(X86_TRAP_DB, debug),
SYSG(X86_TRAP_BP, int3),
#ifdef CONFIG_X86_32
@@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
* the traps which use them are reinitialized with IST after cpu_init() has
* set up TSS.
*/
-static const __initdata struct idt_data def_idts[] = {
+static const __initconst struct idt_data def_idts[] = {
INTG(X86_TRAP_DE, divide_error),
INTG(X86_TRAP_NMI, nmi),
INTG(X86_TRAP_BR, bounds),
@@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
/*
* The APIC and SMP idt entries
*/
-static const __initdata struct idt_data apic_idts[] = {
+static const __initconst struct idt_data apic_idts[] = {
#ifdef CONFIG_SMP
INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
@@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
* Early traps running on the DEFAULT_STACK because the other interrupt
* stacks work only after cpu_init().
*/
-static const __initdata struct idt_data early_pf_idts[] = {
+static const __initconst struct idt_data early_pf_idts[] = {
INTG(X86_TRAP_PF, page_fault),
};
@@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
* Override for the debug_idt. Same as the default, but with interrupt
* stack set to DEFAULT_STACK (0). Required for NMI trap handling.
*/
-static const __initdata struct idt_data dbg_idts[] = {
+static const __initconst struct idt_data dbg_idts[] = {
INTG(X86_TRAP_DB, debug),
INTG(X86_TRAP_BP, int3),
};
@@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
* The exceptions which use Interrupt stacks. They are setup after
* cpu_init() when the TSS has been initialized.
*/
-static const __initdata struct idt_data ist_idts[] = {
+static const __initconst struct idt_data ist_idts[] = {
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a83b3346a0e1..c1bdbd3d3232 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <asm/apic.h>
+#include <asm/nospec-branch.h>
#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
static void call_on_stack(void *func, void *stack)
{
asm volatile("xchgl %%ebx,%%esp \n"
- "call *%%edi \n"
+ CALL_NOSPEC
"movl %%ebx,%%esp \n"
: "=b" (stack)
: "0" (stack),
- "D"(func)
+ [thunk_target] "D"(func)
: "memory", "cc", "edx", "ecx", "eax");
}
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
call_on_stack(print_stack_overflow, isp);
asm volatile("xchgl %%ebx,%%esp \n"
- "call *%%edi \n"
+ CALL_NOSPEC
"movl %%ebx,%%esp \n"
: "=a" (arg1), "=b" (isp)
: "0" (desc), "1" (isp),
- "D" (desc->handle_irq)
+ [thunk_target] "D" (desc->handle_irq)
: "memory", "cc", "ecx");
return 1;
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 8da3e909e967..a539410c4ea9 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,6 +61,9 @@ void __init init_ISA_irqs(void)
struct irq_chip *chip = legacy_pic->chip;
int i;
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ init_bsp_APIC();
+#endif
legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++)
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index f73f475d0573..d177940aa090 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -24,7 +24,6 @@
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
-#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/nodemask.h>
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
new file mode 100644
index 000000000000..b68fd895235a
--- /dev/null
+++ b/arch/x86/kernel/jailhouse.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL2.0
+/*
+ * Jailhouse paravirt_ops implementation
+ *
+ * Copyright (c) Siemens AG, 2015-2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+#include <linux/acpi_pmtmr.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+#include <asm/hypervisor.h>
+#include <asm/i8259.h>
+#include <asm/irqdomain.h>
+#include <asm/pci_x86.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+
+static __initdata struct jailhouse_setup_data setup_data;
+static unsigned int precalibrated_tsc_khz;
+
+static uint32_t jailhouse_cpuid_base(void)
+{
+ if (boot_cpu_data.cpuid_level < 0 ||
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return 0;
+
+ return hypervisor_cpuid_base("Jailhouse\0\0\0", 0);
+}
+
+static uint32_t __init jailhouse_detect(void)
+{
+ return jailhouse_cpuid_base();
+}
+
+static void jailhouse_get_wallclock(struct timespec *now)
+{
+ memset(now, 0, sizeof(*now));
+}
+
+static void __init jailhouse_timer_init(void)
+{
+ lapic_timer_frequency = setup_data.apic_khz * (1000 / HZ);
+}
+
+static unsigned long jailhouse_get_tsc(void)
+{
+ return precalibrated_tsc_khz;
+}
+
+static void __init jailhouse_x2apic_init(void)
+{
+#ifdef CONFIG_X86_X2APIC
+ if (!x2apic_enabled())
+ return;
+ /*
+ * We do not have access to IR inside Jailhouse non-root cells. So
+ * we have to run in physical mode.
+ */
+ x2apic_phys = 1;
+ /*
+ * This will trigger the switch to apic_x2apic_phys. Empty OEM IDs
+ * ensure that only this APIC driver picks up the call.
+ */
+ default_acpi_madt_oem_check("", "");
+#endif
+}
+
+static void __init jailhouse_get_smp_config(unsigned int early)
+{
+ struct ioapic_domain_cfg ioapic_cfg = {
+ .type = IOAPIC_DOMAIN_STRICT,
+ .ops = &mp_ioapic_irqdomain_ops,
+ };
+ struct mpc_intsrc mp_irq = {
+ .type = MP_INTSRC,
+ .irqtype = mp_INT,
+ .irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE,
+ };
+ unsigned int cpu;
+
+ jailhouse_x2apic_init();
+
+ register_lapic_address(0xfee00000);
+
+ for (cpu = 0; cpu < setup_data.num_cpus; cpu++) {
+ generic_processor_info(setup_data.cpu_ids[cpu],
+ boot_cpu_apic_version);
+ }
+
+ smp_found_config = 1;
+
+ if (setup_data.standard_ioapic) {
+ mp_register_ioapic(0, 0xfec00000, gsi_top, &ioapic_cfg);
+
+ /* Register 1:1 mapping for legacy UART IRQs 3 and 4 */
+ mp_irq.srcbusirq = mp_irq.dstirq = 3;
+ mp_save_irq(&mp_irq);
+
+ mp_irq.srcbusirq = mp_irq.dstirq = 4;
+ mp_save_irq(&mp_irq);
+ }
+}
+
+static void jailhouse_no_restart(void)
+{
+ pr_notice("Jailhouse: Restart not supported, halting\n");
+ machine_halt();
+}
+
+static int __init jailhouse_pci_arch_init(void)
+{
+ pci_direct_init(1);
+
+ /*
+ * There are no bridges on the virtual PCI root bus under Jailhouse,
+ * thus no other way to discover all devices than a full scan.
+ * Respect any overrides via the command line, though.
+ */
+ if (pcibios_last_bus < 0)
+ pcibios_last_bus = 0xff;
+
+ return 0;
+}
+
+static void __init jailhouse_init_platform(void)
+{
+ u64 pa_data = boot_params.hdr.setup_data;
+ struct setup_data header;
+ void *mapping;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+ x86_init.timers.timer_init = jailhouse_timer_init;
+ x86_init.mpparse.get_smp_config = jailhouse_get_smp_config;
+ x86_init.pci.arch_init = jailhouse_pci_arch_init;
+
+ x86_platform.calibrate_cpu = jailhouse_get_tsc;
+ x86_platform.calibrate_tsc = jailhouse_get_tsc;
+ x86_platform.get_wallclock = jailhouse_get_wallclock;
+ x86_platform.legacy.rtc = 0;
+ x86_platform.legacy.warm_reset = 0;
+ x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
+
+ legacy_pic = &null_legacy_pic;
+
+ machine_ops.emergency_restart = jailhouse_no_restart;
+
+ while (pa_data) {
+ mapping = early_memremap(pa_data, sizeof(header));
+ memcpy(&header, mapping, sizeof(header));
+ early_memunmap(mapping, sizeof(header));
+
+ if (header.type == SETUP_JAILHOUSE &&
+ header.len >= sizeof(setup_data)) {
+ pa_data += offsetof(struct setup_data, data);
+
+ mapping = early_memremap(pa_data, sizeof(setup_data));
+ memcpy(&setup_data, mapping, sizeof(setup_data));
+ early_memunmap(mapping, sizeof(setup_data));
+
+ break;
+ }
+
+ pa_data = header.next;
+ }
+
+ if (!pa_data)
+ panic("Jailhouse: No valid setup data found");
+
+ if (setup_data.compatible_version > JAILHOUSE_SETUP_REQUIRED_VERSION)
+ panic("Jailhouse: Unsupported setup data structure");
+
+ pmtmr_ioport = setup_data.pm_timer_address;
+ pr_debug("Jailhouse: PM-Timer IO Port: %#x\n", pmtmr_ioport);
+
+ precalibrated_tsc_khz = setup_data.tsc_khz;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ pci_probe = 0;
+
+ /*
+ * Avoid that the kernel complains about missing ACPI tables - there
+ * are none in a non-root cell.
+ */
+ disable_acpi();
+}
+
+bool jailhouse_paravirt(void)
+{
+ return jailhouse_cpuid_base() != 0;
+}
+
+static bool jailhouse_x2apic_available(void)
+{
+ /*
+ * The x2APIC is only available if the root cell enabled it. Jailhouse
+ * does not support switching between xAPIC and x2APIC.
+ */
+ return x2apic_enabled();
+}
+
+const struct hypervisor_x86 x86_hyper_jailhouse __refconst = {
+ .name = "Jailhouse",
+ .detect = jailhouse_detect,
+ .init.init_platform = jailhouse_init_platform,
+ .init.x2apic_available = jailhouse_x2apic_available,
+};
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e941136e24d8..203d398802a3 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
#include <asm/debugreg.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
+#include <asm/nospec-branch.h>
#include "common.h"
@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
}
/* Check whether insn is indirect jump */
-static int insn_is_indirect_jump(struct insn *insn)
+static int __insn_is_indirect_jump(struct insn *insn)
{
return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
return (start <= target && target <= start + len);
}
+static int insn_is_indirect_jump(struct insn *insn)
+{
+ int ret = __insn_is_indirect_jump(insn);
+
+#ifdef CONFIG_RETPOLINE
+ /*
+ * Jump to x86_indirect_thunk_* is treated as an indirect jump.
+ * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
+ * older gcc may use indirect jump. So we add this check instead of
+ * replace indirect-jump check.
+ */
+ if (!ret)
+ ret = insn_jump_into_range(insn,
+ (unsigned long)__indirect_thunk_start,
+ (unsigned long)__indirect_thunk_end -
+ (unsigned long)__indirect_thunk_start);
+#endif
+ return ret;
+}
+
/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 3a4b12809ab5..27d0a1712663 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -281,7 +281,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
int ELCR_fallback = 0;
intsrc.type = MP_INTSRC;
- intsrc.irqflag = 0; /* conforming */
+ intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
intsrc.srcbus = 0;
intsrc.dstapic = mpc_ioapic_id(0);
@@ -324,10 +324,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
* copy that information over to the MP table in the
* irqflag field (level sensitive, active high polarity).
*/
- if (ELCR_trigger(i))
- intsrc.irqflag = 13;
- else
- intsrc.irqflag = 0;
+ if (ELCR_trigger(i)) {
+ intsrc.irqflag = MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_HIGH;
+ } else {
+ intsrc.irqflag = MP_IRQTRIG_DEFAULT |
+ MP_IRQPOL_DEFAULT;
+ }
}
intsrc.srcbusirq = i;
@@ -419,7 +422,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
construct_ioapic_table(mpc_default_type);
lintsrc.type = MP_LINTSRC;
- lintsrc.irqflag = 0; /* conforming */
+ lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
lintsrc.srcbusid = 0;
lintsrc.srcbusirq = 0;
lintsrc.destapic = MP_APIC_ALL;
@@ -664,7 +667,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (m->irqtype != mp_INT)
return 0;
- if (m->irqflag != 0x0f)
+ if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
return 0;
/* not legacy */
@@ -673,7 +676,8 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_LOW))
continue;
if (mp_irqs[i].srcbus != m->srcbus)
@@ -784,7 +788,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_LOW))
continue;
if (nr_m_spare > 0) {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 599d7462eccc..df7ab02f959f 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
@@ -87,7 +87,6 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_mask = dma_alloc_coherent_mask(dev, flag);
- flag &= ~__GFP_ZERO;
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
@@ -139,7 +138,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
if (!*dev)
*dev = &x86_dma_fallback_dev;
- *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!is_device_dma_capable(*dev))
@@ -217,7 +215,7 @@ static __init int iommu_setup(char *p)
}
early_param("iommu", iommu_setup);
-int x86_dma_supported(struct device *dev, u64 mask)
+int arch_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
@@ -226,12 +224,6 @@ int x86_dma_supported(struct device *dev, u64 mask)
}
#endif
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases.
@@ -251,6 +243,17 @@ int x86_dma_supported(struct device *dev, u64 mask)
return 1;
}
+EXPORT_SYMBOL(arch_dma_supported);
+
+int x86_dma_supported(struct device *dev, u64 mask)
+{
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+ return 1;
+}
static int __init pci_iommu_init(void)
{
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index b0caae27e1b7..618285e475c6 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386. */
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 53bd05ea90d8..0ee0f8f34251 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -6,7 +6,7 @@
#include <linux/init.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mem_encrypt.h>
#include <asm/iommu.h>
@@ -48,7 +48,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
-static const struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops x86_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc = x86_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent,
@@ -112,7 +112,7 @@ void __init pci_swiotlb_init(void)
{
if (swiotlb) {
swiotlb_init(0);
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = &x86_swiotlb_dma_ops;
}
}
@@ -120,7 +120,7 @@ void __init pci_swiotlb_late_init(void)
{
/* An IOMMU turned us off. */
if (!swiotlb)
- swiotlb_free();
+ swiotlb_exit();
else {
printk(KERN_INFO "PCI-DMA: "
"Using software bounce buffering for IO (SWIOTLB)\n");
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 39a59299bfa0..235fe6008ac8 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -9,6 +9,7 @@ void __init x86_early_init_platform_quirks(void)
{
x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT;
x86_platform.legacy.rtc = 1;
+ x86_platform.legacy.warm_reset = 1;
x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index aed9d94bd46f..03408b942adb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -21,7 +21,6 @@
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
-#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
@@ -47,7 +46,7 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
+__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
.x86_tss = {
/*
* .sp0 is only used when entering ring 0 from a lower
@@ -380,19 +379,24 @@ void stop_this_cpu(void *dummy)
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+ /*
+ * Use wbinvd on processors that support SME. This provides support
+ * for performing a successful kexec when going from SME inactive
+ * to SME active (or vice-versa). The cache must be cleared so that
+ * if there are entries with the same physical address, both with and
+ * without the encryption bit, they don't race each other when flushed
+ * and potentially end up with the wrong entry being committed to
+ * memory.
+ */
+ if (boot_cpu_has(X86_FEATURE_SME))
+ native_wbinvd();
for (;;) {
/*
- * Use wbinvd followed by hlt to stop the processor. This
- * provides support for kexec on a processor that supports
- * SME. With kexec, going from SME inactive to SME active
- * requires clearing cache entries so that addresses without
- * the encryption bit set don't corrupt the same physical
- * address that has the encryption bit set when caches are
- * flushed. To achieve this a wbinvd is performed followed by
- * a hlt. Even if the processor is not in the kexec/SME
- * scenario this only adds a wbinvd to a halting processor.
+ * Use native_halt() so that memory contents don't change
+ * (stack usage and variables) after possibly issuing the
+ * native_wbinvd() above.
*/
- asm volatile("wbinvd; hlt" : : : "memory");
+ native_halt();
}
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 8af2e8d0c0a1..1ae67e982af7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,7 +114,6 @@
#include <asm/alternative.h>
#include <asm/prom.h>
#include <asm/microcode.h>
-#include <asm/mmu_context.h>
#include <asm/kaslr.h>
#include <asm/unwind.h>
@@ -364,16 +363,6 @@ static void __init reserve_initrd(void)
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
- /*
- * If SME is active, this memory will be marked encrypted by the
- * kernel when it is accessed (including relocation). However, the
- * ramdisk image was loaded decrypted by the bootloader, so make
- * sure that it is encrypted before accessing it. For SEV the
- * ramdisk will already be encrypted, so only do this for SME.
- */
- if (sme_active())
- sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
-
initrd_start = 0;
mapped_size = memblock_mem_size(max_pfn_mapped);
@@ -906,9 +895,6 @@ void __init setup_arch(char **cmdline_p)
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_64BIT, &efi.flags);
}
-
- if (efi_enabled(EFI_BOOT))
- efi_memblock_x86_reserve_range();
#endif
x86_init.oem.arch_setup();
@@ -962,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ if (efi_enabled(EFI_BOOT))
+ efi_memblock_x86_reserve_range();
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 8c6da1a643da..ac057f9b0763 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -25,8 +25,8 @@ static inline void signal_compat_build_tests(void)
* limits also have to look at this code. Make sure any
* new fields are handled in copy_siginfo_to_user32()!
*/
- BUILD_BUG_ON(NSIGILL != 8);
- BUILD_BUG_ON(NSIGFPE != 8);
+ BUILD_BUG_ON(NSIGILL != 11);
+ BUILD_BUG_ON(NSIGFPE != 13);
BUILD_BUG_ON(NSIGSEGV != 4);
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 4);
@@ -64,7 +64,7 @@ static inline void signal_compat_build_tests(void)
CHECK_SI_SIZE (_kill, 2*sizeof(int));
CHECK_CSI_OFFSET(_timer);
- CHECK_CSI_SIZE (_timer, 5*sizeof(int));
+ CHECK_CSI_SIZE (_timer, 3*sizeof(int));
CHECK_SI_SIZE (_timer, 6*sizeof(int));
CHECK_CSI_OFFSET(_rt);
@@ -75,9 +75,11 @@ static inline void signal_compat_build_tests(void)
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
+#ifdef CONFIG_X86_X32_ABI
CHECK_CSI_OFFSET(_sigchld_x32);
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
/* no _sigchld_x32 in the generic siginfo_t */
+#endif
CHECK_CSI_OFFSET(_sigfault);
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
@@ -96,6 +98,8 @@ static inline void signal_compat_build_tests(void)
void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
{
+ signal_compat_build_tests();
+
/* Don't leak in-kernel non-uapi flags to user-space */
if (oact)
oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
@@ -111,116 +115,3 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
if (in_x32_syscall())
act->sa.sa_flags |= SA_X32_ABI;
}
-
-int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
- bool x32_ABI)
-{
- int err = 0;
-
- signal_compat_build_tests();
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- put_user_try {
- /* If you change siginfo_t structure, please make sure that
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- put_user_ex(from->si_signo, &to->si_signo);
- put_user_ex(from->si_errno, &to->si_errno);
- put_user_ex(from->si_code, &to->si_code);
-
- if (from->si_code < 0) {
- put_user_ex(from->si_pid, &to->si_pid);
- put_user_ex(from->si_uid, &to->si_uid);
- put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
- } else {
- /*
- * First 32bits of unions are always present:
- * si_pid === si_band === si_tid === si_addr(LS half)
- */
- put_user_ex(from->_sifields._pad[0],
- &to->_sifields._pad[0]);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR ||
- from->si_code == BUS_MCEERR_AO))
- put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
-
- if (from->si_signo == SIGSEGV) {
- if (from->si_code == SEGV_BNDERR) {
- compat_uptr_t lower = (unsigned long)from->si_lower;
- compat_uptr_t upper = (unsigned long)from->si_upper;
- put_user_ex(lower, &to->si_lower);
- put_user_ex(upper, &to->si_upper);
- }
- if (from->si_code == SEGV_PKUERR)
- put_user_ex(from->si_pkey, &to->si_pkey);
- }
- break;
- case SIL_SYS:
- put_user_ex(from->si_syscall, &to->si_syscall);
- put_user_ex(from->si_arch, &to->si_arch);
- break;
- case SIL_CHLD:
- if (!x32_ABI) {
- put_user_ex(from->si_utime, &to->si_utime);
- put_user_ex(from->si_stime, &to->si_stime);
- } else {
- put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
- put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
- }
- put_user_ex(from->si_status, &to->si_status);
- /* FALL THROUGH */
- case SIL_KILL:
- put_user_ex(from->si_uid, &to->si_uid);
- break;
- case SIL_POLL:
- put_user_ex(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- put_user_ex(from->si_overrun, &to->si_overrun);
- put_user_ex(ptr_to_compat(from->si_ptr),
- &to->si_ptr);
- break;
- case SIL_RT:
- put_user_ex(from->si_uid, &to->si_uid);
- put_user_ex(from->si_int, &to->si_int);
- break;
- }
- }
- } put_user_catch(err);
-
- return err;
-}
-
-/* from syscall's path, where we know the ABI */
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- return __copy_siginfo_to_user32(to, from, in_x32_syscall());
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- int err = 0;
- u32 ptr32;
-
- if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- get_user_try {
- get_user_ex(to->si_signo, &from->si_signo);
- get_user_ex(to->si_errno, &from->si_errno);
- get_user_ex(to->si_code, &from->si_code);
-
- get_user_ex(to->si_pid, &from->si_pid);
- get_user_ex(to->si_uid, &from->si_uid);
- get_user_ex(ptr32, &from->si_ptr);
- to->si_ptr = compat_ptr(ptr32);
- } get_user_catch(err);
-
- return err;
-}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed556d50d7ed..6f27facbaa9b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,7 +75,6 @@
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
#include <asm/i8259.h>
-#include <asm/realmode.h>
#include <asm/misc.h>
#include <asm/qspinlock.h>
@@ -934,7 +933,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
* the targeted processor.
*/
- if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+ if (x86_platform.legacy.warm_reset) {
pr_debug("Setting warm reset code and vector.\n");
@@ -1006,7 +1005,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
/* mark "stuck" area as not stuck */
*trampoline_status = 0;
- if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+ if (x86_platform.legacy.warm_reset) {
/*
* Cleanup possible dangling ends...
*/
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 20161ef53537..093f2ea5dd56 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -102,7 +102,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
- regs = unwind_get_entry_regs(&state);
+ regs = unwind_get_entry_regs(&state, NULL);
if (regs) {
/*
* Kernel mode registers on the stack indicate an
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a4eb27918ceb..a2486f444073 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
return -1;
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pte_unmap(pte);
+
+ /*
+ * PTI poisons low addresses in the kernel page tables in the
+ * name of making them unusable for userspace. To execute
+ * code at such a low address, the poison must be cleared.
+ *
+ * Note: 'pgd' actually gets set in p4d_alloc() _or_
+ * pud_alloc() depending on 4/5-level paging.
+ */
+ pgd->pgd &= ~_PAGE_NX;
+
return 0;
}
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 749d189f8cd4..774ebafa97c4 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -69,9 +69,12 @@ static struct irqaction irq0 = {
static void __init setup_default_timer_irq(void)
{
- if (!nr_legacy_irqs())
- return;
- setup_irq(0, &irq0);
+ /*
+ * Unconditionally register the legacy timer; even without legacy
+ * PIC/PIT we need this for the HPET0 in legacy replacement mode.
+ */
+ if (setup_irq(0, &irq0))
+ pr_info("Failed to register legacy timer interrupt\n");
}
/* Default timer init function */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8ea117f8142e..fb4302738410 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -25,6 +25,7 @@
#include <asm/geode.h>
#include <asm/apic.h>
#include <asm/intel-family.h>
+#include <asm/i8259.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -363,6 +364,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
unsigned long tscmin, tscmax;
int pitcnt;
+ if (!has_legacy_pic()) {
+ /*
+ * Relies on tsc_early_delay_calibrate() to have given us semi
+ * usable udelay(), wait for the same 50ms we would have with
+ * the PIT loop below.
+ */
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ return ULONG_MAX;
+ }
+
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@@ -487,6 +502,9 @@ static unsigned long quick_pit_calibrate(void)
u64 tsc, delta;
unsigned long d1, d2;
+ if (!has_legacy_pic())
+ return 0;
+
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@@ -602,7 +620,6 @@ unsigned long native_calibrate_tsc(void)
case INTEL_FAM6_KABYLAKE_DESKTOP:
crystal_khz = 24000; /* 24.0 MHz */
break;
- case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
@@ -612,6 +629,8 @@ unsigned long native_calibrate_tsc(void)
}
}
+ if (crystal_khz == 0)
+ return 0;
/*
* TSC frequency determined by CPUID is a "hardware reported"
* frequency and is the most accurate one so far we have. This
@@ -987,8 +1006,6 @@ static void __init detect_art(void)
/* clocksource code */
-static struct clocksource clocksource_tsc;
-
static void tsc_resume(struct clocksource *cs)
{
tsc_verify_tsc_adjust(true);
@@ -1039,12 +1056,31 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
/*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/
+static struct clocksource clocksource_tsc_early = {
+ .name = "tsc-early",
+ .rating = 299,
+ .read = read_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_MUST_VERIFY,
+ .archdata = { .vclock_mode = VCLOCK_TSC },
+ .resume = tsc_resume,
+ .mark_unstable = tsc_cs_mark_unstable,
+ .tick_stable = tsc_cs_tick_stable,
+};
+
+/*
+ * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
+ * this one will immediately take over. We will only register if TSC has
+ * been found good.
+ */
static struct clocksource clocksource_tsc = {
.name = "tsc",
.rating = 300,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC },
.resume = tsc_resume,
@@ -1168,8 +1204,8 @@ static void tsc_refine_calibration_work(struct work_struct *work)
int cpu;
/* Don't bother refining TSC on unstable systems */
- if (check_tsc_unstable())
- goto out;
+ if (tsc_unstable)
+ return;
/*
* Since the work is started early in boot, we may be
@@ -1221,9 +1257,13 @@ static void tsc_refine_calibration_work(struct work_struct *work)
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
out:
+ if (tsc_unstable)
+ return;
+
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ clocksource_unregister(&clocksource_tsc_early);
}
@@ -1232,13 +1272,11 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0;
+ if (check_tsc_unstable())
+ return 0;
+
if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
- /* lower the rating if we already know its unstable: */
- if (check_tsc_unstable()) {
- clocksource_tsc.rating = 0;
- clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- }
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@@ -1251,6 +1289,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ clocksource_unregister(&clocksource_tsc_early);
return 0;
}
@@ -1315,6 +1354,12 @@ void __init tsc_init(void)
(unsigned long)cpu_khz / 1000,
(unsigned long)cpu_khz % 1000);
+ if (cpu_khz != tsc_khz) {
+ pr_info("Detected %lu.%03lu MHz TSC",
+ (unsigned long)tsc_khz / 1000,
+ (unsigned long)tsc_khz % 1000);
+ }
+
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
@@ -1349,9 +1394,12 @@ void __init tsc_init(void)
check_system_tsc_reliable();
- if (unsynchronized_tsc())
+ if (unsynchronized_tsc()) {
mark_tsc_unstable("TSCs unsynchronized");
+ return;
+ }
+ clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
}
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index be86a865087a..1f9188f5357c 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
}
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+static struct orc_entry *orc_find(unsigned long ip);
+
+/*
+ * Ftrace dynamic trampolines do not have orc entries of their own.
+ * But they are copies of the ftrace entries that are static and
+ * defined in ftrace_*.S, which do have orc entries.
+ *
+ * If the undwinder comes across a ftrace trampoline, then find the
+ * ftrace function that was used to create it, and use that ftrace
+ * function's orc entrie, as the placement of the return code in
+ * the stack will be identical.
+ */
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+ struct ftrace_ops *ops;
+ unsigned long caller;
+
+ ops = ftrace_ops_trampoline(ip);
+ if (!ops)
+ return NULL;
+
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+ caller = (unsigned long)ftrace_regs_call;
+ else
+ caller = (unsigned long)ftrace_call;
+
+ /* Prevent unlikely recursion */
+ if (ip == caller)
+ return NULL;
+
+ return orc_find(caller);
+}
+#else
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+ return NULL;
+}
+#endif
+
static struct orc_entry *orc_find(unsigned long ip)
{
+ static struct orc_entry *orc;
+
if (!orc_init)
return NULL;
@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
/* Module lookup: */
- return orc_module_find(ip);
+ orc = orc_module_find(ip);
+ if (orc)
+ return orc;
+
+ return orc_ftrace_find(ip);
}
static void orc_sort_swap(void *_a, void *_b, int size)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index a3755d293a48..85c7ef23d99f 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -528,11 +528,11 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
return 0;
}
-static int push_ret_address(struct pt_regs *regs, unsigned long ip)
+static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
{
unsigned long new_sp = regs->sp - sizeof_long();
- if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
+ if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
return -EFAULT;
regs->sp = new_sp;
@@ -566,7 +566,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
regs->ip += correction;
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
regs->sp += sizeof_long(); /* Pop incorrect return address */
- if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
+ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
return -ERESTART;
}
/* popf; tell the caller to not touch TF */
@@ -655,7 +655,7 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
*
* But there is corner case, see the comment in ->post_xol().
*/
- if (push_ret_address(regs, new_ip))
+ if (emulate_push_stack(regs, new_ip))
return false;
} else if (!check_jmp_cond(auprobe, regs)) {
offs = 0;
@@ -665,6 +665,16 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
return true;
}
+static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
+
+ if (emulate_push_stack(regs, *src_ptr))
+ return false;
+ regs->ip += auprobe->push.ilen;
+ return true;
+}
+
static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
BUG_ON(!branch_is_call(auprobe));
@@ -703,6 +713,10 @@ static const struct uprobe_xol_ops branch_xol_ops = {
.post_xol = branch_post_xol_op,
};
+static const struct uprobe_xol_ops push_xol_ops = {
+ .emulate = push_emulate_op,
+};
+
/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
@@ -750,6 +764,87 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
return 0;
}
+/* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
+static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+ u8 opc1 = OPCODE1(insn), reg_offset = 0;
+
+ if (opc1 < 0x50 || opc1 > 0x57)
+ return -ENOSYS;
+
+ if (insn->length > 2)
+ return -ENOSYS;
+ if (insn->length == 2) {
+ /* only support rex_prefix 0x41 (x64 only) */
+#ifdef CONFIG_X86_64
+ if (insn->rex_prefix.nbytes != 1 ||
+ insn->rex_prefix.bytes[0] != 0x41)
+ return -ENOSYS;
+
+ switch (opc1) {
+ case 0x50:
+ reg_offset = offsetof(struct pt_regs, r8);
+ break;
+ case 0x51:
+ reg_offset = offsetof(struct pt_regs, r9);
+ break;
+ case 0x52:
+ reg_offset = offsetof(struct pt_regs, r10);
+ break;
+ case 0x53:
+ reg_offset = offsetof(struct pt_regs, r11);
+ break;
+ case 0x54:
+ reg_offset = offsetof(struct pt_regs, r12);
+ break;
+ case 0x55:
+ reg_offset = offsetof(struct pt_regs, r13);
+ break;
+ case 0x56:
+ reg_offset = offsetof(struct pt_regs, r14);
+ break;
+ case 0x57:
+ reg_offset = offsetof(struct pt_regs, r15);
+ break;
+ }
+#else
+ return -ENOSYS;
+#endif
+ } else {
+ switch (opc1) {
+ case 0x50:
+ reg_offset = offsetof(struct pt_regs, ax);
+ break;
+ case 0x51:
+ reg_offset = offsetof(struct pt_regs, cx);
+ break;
+ case 0x52:
+ reg_offset = offsetof(struct pt_regs, dx);
+ break;
+ case 0x53:
+ reg_offset = offsetof(struct pt_regs, bx);
+ break;
+ case 0x54:
+ reg_offset = offsetof(struct pt_regs, sp);
+ break;
+ case 0x55:
+ reg_offset = offsetof(struct pt_regs, bp);
+ break;
+ case 0x56:
+ reg_offset = offsetof(struct pt_regs, si);
+ break;
+ case 0x57:
+ reg_offset = offsetof(struct pt_regs, di);
+ break;
+ }
+ }
+
+ auprobe->push.reg_offset = reg_offset;
+ auprobe->push.ilen = insn->length;
+ auprobe->ops = &push_xol_ops;
+ return 0;
+}
+
/**
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
@@ -771,6 +866,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (ret != -ENOSYS)
return ret;
+ ret = push_setup_xol_ops(auprobe, &insn);
+ if (ret != -ENOSYS)
+ return ret;
+
/*
* Figure out which fixups default_post_xol_op() will need to perform,
* and annotate defparam->fixups accordingly.
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1e413a9326aa..9b138a06c1a4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -124,6 +124,12 @@ SECTIONS
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
#endif
+#ifdef CONFIG_RETPOLINE
+ __indirect_thunk_start = .;
+ *(.text.__x86.indirect_thunk)
+ __indirect_thunk_end = .;
+#endif
+
/* End of text section */
_etext = .;
} :text = 0x9090
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b514b2b2845a..290ecf711aec 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -25,6 +25,7 @@
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
+#include <asm/nospec-branch.h>
#include "x86.h"
#include "tss.h"
@@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
- asm("push %[flags]; popf; call *%[fastop]"
- : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
+ asm("push %[flags]; popf; " CALL_NOSPEC
+ : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
return rc;
}
@@ -5335,9 +5336,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
- asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
+ asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
- [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
+ [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c4deb1f34faa..2b8eb4da4d08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
- kvm_event_needs_reinjection(vcpu)))
+ kvm_event_needs_reinjection(vcpu) ||
+ vcpu->arch.exception.pending))
return false;
if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
int kvm_mmu_module_init(void)
{
+ int ret = -ENOMEM;
+
kvm_mmu_clear_all_pte_masks();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
if (!pte_list_desc_cache)
- goto nomem;
+ goto out;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
sizeof(struct kvm_mmu_page),
0, SLAB_ACCOUNT, NULL);
if (!mmu_page_header_cache)
- goto nomem;
+ goto out;
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
- goto nomem;
+ goto out;
- register_shrinker(&mmu_shrinker);
+ ret = register_shrinker(&mmu_shrinker);
+ if (ret)
+ goto out;
return 0;
-nomem:
+out:
mmu_destroy_caches();
- return -ENOMEM;
+ return ret;
}
/*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb714f1cdf7e..f40d0da1f1d3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h;
struct nested_state *g;
- u32 h_intercept_exceptions;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
h = &svm->nested.hsave->control;
g = &svm->nested;
- /* No need to intercept #UD if L1 doesn't intercept it */
- h_intercept_exceptions =
- h->intercept_exceptions & ~(1U << UD_VECTOR);
-
c->intercept_cr = h->intercept_cr | g->intercept_cr;
c->intercept_dr = h->intercept_dr | g->intercept_dr;
- c->intercept_exceptions =
- h_intercept_exceptions | g->intercept_exceptions;
+ c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
c->intercept = h->intercept | g->intercept;
}
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm)
{
int er;
- WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -4986,6 +4980,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
#endif
+ /*
+ * Clear host registers marked as clobbered to prevent
+ * speculative use.
+ */
+ "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
+ "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
+ "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
+ "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
+ "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
+#ifdef CONFIG_X86_64
+ "xor %%r8, %%r8 \n\t"
+ "xor %%r9, %%r9 \n\t"
+ "xor %%r10, %%r10 \n\t"
+ "xor %%r11, %%r11 \n\t"
+ "xor %%r12, %%r12 \n\t"
+ "xor %%r13, %%r13 \n\t"
+ "xor %%r14, %%r14 \n\t"
+ "xor %%r15, %%r15 \n\t"
+#endif
"pop %%" _ASM_BP
:
: [svm]"a"(svm),
@@ -5015,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 023afa0c8887..a8b96dc4cd83 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
#include <asm/apic.h>
#include <asm/irq_remapping.h>
#include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
#include "trace.h"
#include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
{
BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
- if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
- vmcs_field_to_offset_table[field] == 0)
+ if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+ return -ENOENT;
+
+ /*
+ * FIXME: Mitigation for CVE-2017-5753. To be replaced with a
+ * generic mechanism.
+ */
+ asm("lfence");
+
+ if (vmcs_field_to_offset_table[field] == 0)
return -ENOENT;
return vmcs_field_to_offset_table[field];
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
- eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
(1u << DB_VECTOR) | (1u << AC_VECTOR);
if ((vcpu->guest_debug &
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
- else
- eb |= 1u << UD_VECTOR;
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
return 1; /* already handled by vmx_vcpu_run() */
if (is_invalid_opcode(intr_info)) {
- WARN_ON_ONCE(is_guest_mode(vcpu));
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -9123,14 +9129,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
#endif
"pushf\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
- "call *%[entry]\n\t"
+ CALL_NOSPEC
:
#ifdef CONFIG_X86_64
[sp]"=&r"(tmp),
#endif
ASM_CALL_CONSTRAINT
:
- [entry]"r"(entry),
+ THUNK_TARGET(entry),
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
@@ -9415,6 +9421,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
+ "setbe %c[fail](%0)\n\t"
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9431,12 +9438,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
+ "xor %%r8d, %%r8d \n\t"
+ "xor %%r9d, %%r9d \n\t"
+ "xor %%r10d, %%r10d \n\t"
+ "xor %%r11d, %%r11d \n\t"
+ "xor %%r12d, %%r12d \n\t"
+ "xor %%r13d, %%r13d \n\t"
+ "xor %%r14d, %%r14d \n\t"
+ "xor %%r15d, %%r15d \n\t"
#endif
"mov %%cr2, %%" _ASM_AX " \n\t"
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+ "xor %%eax, %%eax \n\t"
+ "xor %%ebx, %%ebx \n\t"
+ "xor %%esi, %%esi \n\t"
+ "xor %%edi, %%edi \n\t"
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
- "setbe %c[fail](%0) \n\t"
".pushsection .rodata \n\t"
".global vmx_return \n\t"
"vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9473,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1cec2c62a0b0..c53298dfbf50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7496,13 +7496,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
- if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+ if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
/*
* When EFER.LME and CR0.PG are set, the processor is in
* 64-bit mode (though maybe in a 32-bit code segment).
* CR4.PAE and EFER.LMA must be set.
*/
- if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+ if (!(sregs->cr4 & X86_CR4_PAE)
|| !(sregs->efer & EFER_LMA))
return -EINVAL;
} else {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 7b181b61170e..91e9700cc6dc 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,9 @@ lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+lib-$(CONFIG_RETPOLINE) += retpoline.o
+OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb548b41..46e71a74e612 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>
-
+#include <asm/nospec-branch.h>
+
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
@@ -156,7 +157,7 @@ ENTRY(csum_partial)
negl %ebx
lea 45f(%ebx,%ebx,2), %ebx
testl %esi, %esi
- jmp *%ebx
+ JMP_NOSPEC %ebx
# Handle 2-byte-aligned regions
20: addw (%esi), %ax
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
andl $-32,%edx
lea 3f(%ebx,%ebx), %ebx
testl %esi, %esi
- jmp *%ebx
+ JMP_NOSPEC %ebx
1: addl $64,%esi
addl $64,%edi
SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 4846eff7e4c8..f5b7f1b3b6d7 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -162,7 +162,7 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
-inline void __const_udelay(unsigned long xloops)
+void __const_udelay(unsigned long xloops)
{
unsigned long lpj = this_cpu_read(cpu_info.loops_per_jiffy) ? : loops_per_jiffy;
int d0;
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
new file mode 100644
index 000000000000..7b881d03d0dd
--- /dev/null
+++ b/arch/x86/lib/error-inject.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+asmlinkage void just_return_func(void);
+
+asm(
+ ".type just_return_func, @function\n"
+ "just_return_func:\n"
+ " ret\n"
+ ".size just_return_func, .-just_return_func\n"
+);
+
+void override_function_with_return(struct pt_regs *regs)
+{
+ regs->ip = (unsigned long)&just_return_func;
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 000000000000..480edc3a5e03
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/stringify.h>
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+#include <asm/export.h>
+#include <asm/nospec-branch.h>
+#include <asm/bitsperlong.h>
+
+.macro THUNK reg
+ .section .text.__x86.indirect_thunk
+
+ENTRY(__x86_indirect_thunk_\reg)
+ CFI_STARTPROC
+ JMP_NOSPEC %\reg
+ CFI_ENDPROC
+ENDPROC(__x86_indirect_thunk_\reg)
+.endm
+
+/*
+ * Despite being an assembler file we can't just use .irp here
+ * because __KSYM_DEPS__ only uses the C preprocessor and would
+ * only see one instance of "__x86_indirect_thunk_\reg" rather
+ * than one per register with the correct names. So we do it
+ * the simple and nasty way...
+ */
+#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
+#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+
+GENERATE_THUNK(_ASM_AX)
+GENERATE_THUNK(_ASM_BX)
+GENERATE_THUNK(_ASM_CX)
+GENERATE_THUNK(_ASM_DX)
+GENERATE_THUNK(_ASM_SI)
+GENERATE_THUNK(_ASM_DI)
+GENERATE_THUNK(_ASM_BP)
+#ifdef CONFIG_64BIT
+GENERATE_THUNK(r8)
+GENERATE_THUNK(r9)
+GENERATE_THUNK(r10)
+GENERATE_THUNK(r11)
+GENERATE_THUNK(r12)
+GENERATE_THUNK(r13)
+GENERATE_THUNK(r14)
+GENERATE_THUNK(r15)
+#endif
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version - two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+.macro STUFF_RSB nr:req sp:req
+ mov $(\nr / 2), %_ASM_BX
+ .align 16
+771:
+ call 772f
+773: /* speculation trap */
+ pause
+ lfence
+ jmp 773b
+ .align 16
+772:
+ call 774f
+775: /* speculation trap */
+ pause
+ lfence
+ jmp 775b
+ .align 16
+774:
+ dec %_ASM_BX
+ jnz 771b
+ add $((BITS_PER_LONG/8) * \nr), \sp
+.endm
+
+#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+
+ENTRY(__fill_rsb)
+ STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
+ ret
+END(__fill_rsb)
+EXPORT_SYMBOL_GPL(__fill_rsb)
+
+#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+
+ENTRY(__clear_rsb)
+ STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
+ ret
+END(__clear_rsb)
+EXPORT_SYMBOL_GPL(__clear_rsb)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index f56902c1f04b..2a4849e92831 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -61,10 +61,10 @@ enum address_markers_idx {
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
+ CPU_ENTRY_AREA_NR,
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
LDT_NR,
#endif
- CPU_ENTRY_AREA_NR,
#ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR,
#endif
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 9fe656c42aa5..45f5d6cf65ae 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -21,16 +21,16 @@ ex_fixup_handler(const struct exception_table_entry *x)
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
}
-bool ex_handler_default(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_default(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
return true;
}
EXPORT_SYMBOL(ex_handler_default);
-bool ex_handler_fault(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr;
@@ -42,8 +42,8 @@ EXPORT_SYMBOL_GPL(ex_handler_fault);
* Handler for UD0 exception following a failed test against the
* result of a refcount inc/dec/add/sub.
*/
-bool ex_handler_refcount(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
/* First unconditionally saturate the refcount. */
*(int *)regs->cx = INT_MIN / 2;
@@ -95,8 +95,8 @@ EXPORT_SYMBOL(ex_handler_refcount);
* of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
-bool ex_handler_fprestore(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
@@ -108,8 +108,8 @@ bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
-bool ex_handler_ext(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
@@ -118,8 +118,8 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_ext);
-bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
@@ -133,8 +133,8 @@ bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
-bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx,
@@ -147,8 +147,8 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
-bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (static_cpu_has(X86_BUG_NULL_SEG))
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
@@ -157,7 +157,7 @@ bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_clear_fs);
-bool ex_has_fault_handler(unsigned long ip)
+__visible bool ex_has_fault_handler(unsigned long ip)
{
const struct exception_table_entry *e;
ex_handler_t handler;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 06fe3d51d385..800de815519c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -172,14 +172,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
-static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
+static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
+ u32 *pkey)
{
/* This is effectively an #ifdef */
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return;
/* Fault not from Protection Keys: nothing to do */
- if (si_code != SEGV_PKUERR)
+ if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
return;
/*
* force_sig_info_fault() is called from a number of
@@ -218,7 +219,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
- fill_sig_info_pkey(si_code, &info, pkey);
+ fill_sig_info_pkey(si_signo, si_code, &info, pkey);
force_sig_info(si_signo, &info, tsk);
}
@@ -438,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd)) {
- set_pgd(pgd, *pgd_ref);
- arch_flush_lazy_mmu_mode();
- } else if (CONFIG_PGTABLE_LEVELS > 4) {
- /*
- * With folded p4d, pgd_none() is always false, so the pgd may
- * point to an empty page table entry and pgd_page_vaddr()
- * will return garbage.
- *
- * We will do the correct sanity check on the p4d level.
- */
- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ if (CONFIG_PGTABLE_LEVELS > 4) {
+ if (pgd_none(*pgd)) {
+ set_pgd(pgd, *pgd_ref);
+ arch_flush_lazy_mmu_mode();
+ } else {
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
}
/* With 4-level paging, copying happens on the p4d level. */
@@ -458,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (p4d_none(*p4d_ref))
return -1;
- if (p4d_none(*p4d)) {
+ if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
set_p4d(p4d, *p4d_ref);
arch_flush_lazy_mmu_mode();
} else {
@@ -469,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
* Below here mismatches are bugs because these lower tables
* are shared:
*/
+ BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
pud = pud_offset(p4d, address);
pud_ref = pud_offset(p4d_ref, address);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8ca324d07282..82f5252c723a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -868,7 +868,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
-EXPORT_SYMBOL_GPL(cpu_tlbstate);
+EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 47388f0c0e59..af6f2f9c6a26 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
-static __init void *early_alloc(size_t size, int nid)
+static __init void *early_alloc(size_t size, int nid, bool panic)
{
- return memblock_virt_alloc_try_nid_nopanic(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ if (panic)
+ return memblock_virt_alloc_try_nid(size, size,
+ __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ else
+ return memblock_virt_alloc_try_nid_nopanic(size, size,
+ __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
@@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
if (boot_cpu_has(X86_FEATURE_PSE) &&
((end - addr) == PMD_SIZE) &&
IS_ALIGNED(addr, PMD_SIZE)) {
- p = early_alloc(PMD_SIZE, nid);
+ p = early_alloc(PMD_SIZE, nid, false);
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
return;
else if (p)
memblock_free(__pa(p), PMD_SIZE);
}
- p = early_alloc(PAGE_SIZE, nid);
+ p = early_alloc(PAGE_SIZE, nid, true);
pmd_populate_kernel(&init_mm, pmd, p);
}
@@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
if (!pte_none(*pte))
continue;
- p = early_alloc(PAGE_SIZE, nid);
+ p = early_alloc(PAGE_SIZE, nid, true);
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
set_pte_at(&init_mm, addr, pte, entry);
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
((end - addr) == PUD_SIZE) &&
IS_ALIGNED(addr, PUD_SIZE)) {
- p = early_alloc(PUD_SIZE, nid);
+ p = early_alloc(PUD_SIZE, nid, false);
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
return;
else if (p)
memblock_free(__pa(p), PUD_SIZE);
}
- p = early_alloc(PAGE_SIZE, nid);
+ p = early_alloc(PAGE_SIZE, nid, true);
pud_populate(&init_mm, pud, p);
}
@@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
unsigned long next;
if (p4d_none(*p4d)) {
- void *p = early_alloc(PAGE_SIZE, nid);
+ void *p = early_alloc(PAGE_SIZE, nid, true);
p4d_populate(&init_mm, p4d, p);
}
@@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
unsigned long next;
if (pgd_none(*pgd)) {
- p = early_alloc(PAGE_SIZE, nid);
+ p = early_alloc(PAGE_SIZE, nid, true);
pgd_populate(&init_mm, pgd, p);
}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 879ef930e2c2..aedebd2ebf1e 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -34,25 +34,14 @@
#define TB_SHIFT 40
/*
- * Virtual address start and end range for randomization. The end changes base
- * on configuration to have the highest amount of space for randomization.
- * It increases the possible random position for each randomized region.
+ * Virtual address start and end range for randomization.
*
- * You need to add an if/def entry if you introduce a new memory region
- * compatible with KASLR. Your entry must be in logical order with memory
- * layout. For example, ESPFIX is before EFI because its virtual address is
- * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
- * ensure that this order is correct and won't be changed.
+ * The end address could depend on more configuration options to make the
+ * highest amount of space for randomization available, but that's too hard
+ * to keep straight and caused issues already.
*/
static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
-
-#if defined(CONFIG_X86_ESPFIX64)
-static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
-#elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_END;
-#else
-static const unsigned long vaddr_end = __START_KERNEL_map;
-#endif
+static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
/* Default values */
unsigned long page_offset_base = __PAGE_OFFSET_BASE;
@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
unsigned long remain_entropy;
/*
- * All these BUILD_BUG_ON checks ensures the memory layout is
- * consistent with the vaddr_start/vaddr_end variables.
+ * These BUILD_BUG_ON checks ensure the memory layout is consistent
+ * with the vaddr_start/vaddr_end variables. These checks are very
+ * limited....
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
- BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
- vaddr_end >= EFI_VA_END);
- BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
- IS_ENABLED(CONFIG_EFI)) &&
- vaddr_end >= __START_KERNEL_map);
+ BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
if (!kaslr_memory_enabled())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 391b13402e40..1a53071e2e17 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -15,7 +15,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/mem_encrypt.h>
@@ -464,37 +464,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
}
-static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
- unsigned long end)
+struct sme_populate_pgd_data {
+ void *pgtable_area;
+ pgd_t *pgd;
+
+ pmdval_t pmd_flags;
+ pteval_t pte_flags;
+ unsigned long paddr;
+
+ unsigned long vaddr;
+ unsigned long vaddr_end;
+};
+
+static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
unsigned long pgd_start, pgd_end, pgd_size;
pgd_t *pgd_p;
- pgd_start = start & PGDIR_MASK;
- pgd_end = end & PGDIR_MASK;
+ pgd_start = ppd->vaddr & PGDIR_MASK;
+ pgd_end = ppd->vaddr_end & PGDIR_MASK;
- pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
- pgd_size *= sizeof(pgd_t);
+ pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
- pgd_p = pgd_base + pgd_index(start);
+ pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
memset(pgd_p, 0, pgd_size);
}
-#define PGD_FLAGS _KERNPG_TABLE_NOENC
-#define P4D_FLAGS _KERNPG_TABLE_NOENC
-#define PUD_FLAGS _KERNPG_TABLE_NOENC
-#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+#define PGD_FLAGS _KERNPG_TABLE_NOENC
+#define P4D_FLAGS _KERNPG_TABLE_NOENC
+#define PUD_FLAGS _KERNPG_TABLE_NOENC
+#define PMD_FLAGS _KERNPG_TABLE_NOENC
+
+#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+
+#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
+#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+ (_PAGE_PAT | _PAGE_PWT))
+
+#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
+
+#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
+
+#define PTE_FLAGS_DEC PTE_FLAGS
+#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+ (_PAGE_PAT | _PAGE_PWT))
+
+#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
-static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
- unsigned long vaddr, pmdval_t pmd_val)
+static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
pgd_t *pgd_p;
p4d_t *p4d_p;
pud_t *pud_p;
pmd_t *pmd_p;
- pgd_p = pgd_base + pgd_index(vaddr);
+ pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
if (native_pgd_val(*pgd_p)) {
if (IS_ENABLED(CONFIG_X86_5LEVEL))
p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@@ -504,15 +529,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
pgd_t pgd;
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
- p4d_p = pgtable_area;
+ p4d_p = ppd->pgtable_area;
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
- pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+ ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
} else {
- pud_p = pgtable_area;
+ pud_p = ppd->pgtable_area;
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
- pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
}
@@ -520,58 +545,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
}
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
- p4d_p += p4d_index(vaddr);
+ p4d_p += p4d_index(ppd->vaddr);
if (native_p4d_val(*p4d_p)) {
pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
} else {
p4d_t p4d;
- pud_p = pgtable_area;
+ pud_p = ppd->pgtable_area;
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
- pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
native_set_p4d(p4d_p, p4d);
}
}
- pud_p += pud_index(vaddr);
+ pud_p += pud_index(ppd->vaddr);
if (native_pud_val(*pud_p)) {
if (native_pud_val(*pud_p) & _PAGE_PSE)
- goto out;
+ return NULL;
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
} else {
pud_t pud;
- pmd_p = pgtable_area;
+ pmd_p = ppd->pgtable_area;
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
- pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
+ ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
native_set_pud(pud_p, pud);
}
- pmd_p += pmd_index(vaddr);
+ return pmd_p;
+}
+
+static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+{
+ pmd_t *pmd_p;
+
+ pmd_p = sme_prepare_pgd(ppd);
+ if (!pmd_p)
+ return;
+
+ pmd_p += pmd_index(ppd->vaddr);
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
- native_set_pmd(pmd_p, native_make_pmd(pmd_val));
+ native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
+}
-out:
- return pgtable_area;
+static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+{
+ pmd_t *pmd_p;
+ pte_t *pte_p;
+
+ pmd_p = sme_prepare_pgd(ppd);
+ if (!pmd_p)
+ return;
+
+ pmd_p += pmd_index(ppd->vaddr);
+ if (native_pmd_val(*pmd_p)) {
+ if (native_pmd_val(*pmd_p) & _PAGE_PSE)
+ return;
+
+ pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
+ } else {
+ pmd_t pmd;
+
+ pte_p = ppd->pgtable_area;
+ memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
+ ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
+
+ pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
+ native_set_pmd(pmd_p, pmd);
+ }
+
+ pte_p += pte_index(ppd->vaddr);
+ if (!native_pte_val(*pte_p))
+ native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
+}
+
+static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+{
+ while (ppd->vaddr < ppd->vaddr_end) {
+ sme_populate_pgd_large(ppd);
+
+ ppd->vaddr += PMD_PAGE_SIZE;
+ ppd->paddr += PMD_PAGE_SIZE;
+ }
+}
+
+static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+{
+ while (ppd->vaddr < ppd->vaddr_end) {
+ sme_populate_pgd(ppd);
+
+ ppd->vaddr += PAGE_SIZE;
+ ppd->paddr += PAGE_SIZE;
+ }
+}
+
+static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+ pmdval_t pmd_flags, pteval_t pte_flags)
+{
+ unsigned long vaddr_end;
+
+ ppd->pmd_flags = pmd_flags;
+ ppd->pte_flags = pte_flags;
+
+ /* Save original end value since we modify the struct value */
+ vaddr_end = ppd->vaddr_end;
+
+ /* If start is not 2MB aligned, create PTE entries */
+ ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
+ __sme_map_range_pte(ppd);
+
+ /* Create PMD entries */
+ ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
+ __sme_map_range_pmd(ppd);
+
+ /* If end is not 2MB aligned, create PTE entries */
+ ppd->vaddr_end = vaddr_end;
+ __sme_map_range_pte(ppd);
+}
+
+static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+{
+ __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
+}
+
+static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+{
+ __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
+}
+
+static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+{
+ __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}
static unsigned long __init sme_pgtable_calc(unsigned long len)
{
- unsigned long p4d_size, pud_size, pmd_size;
+ unsigned long p4d_size, pud_size, pmd_size, pte_size;
unsigned long total;
/*
* Perform a relatively simplistic calculation of the pagetable
- * entries that are needed. That mappings will be covered by 2MB
- * PMD entries so we can conservatively calculate the required
+ * entries that are needed. Those mappings will be covered mostly
+ * by 2MB PMD entries so we can conservatively calculate the required
* number of P4D, PUD and PMD structures needed to perform the
- * mappings. Incrementing the count for each covers the case where
- * the addresses cross entries.
+ * mappings. For mappings that are not 2MB aligned, PTE mappings
+ * would be needed for the start and end portion of the address range
+ * that fall outside of the 2MB alignment. This results in, at most,
+ * two extra pages to hold PTE entries for each range that is mapped.
+ * Incrementing the count for each covers the case where the addresses
+ * cross entries.
*/
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -585,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
}
pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
+ pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
- total = p4d_size + pud_size + pmd_size;
+ total = p4d_size + pud_size + pmd_size + pte_size;
/*
* Now calculate the added pagetable structures needed to populate
@@ -610,29 +738,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
return total;
}
-void __init sme_encrypt_kernel(void)
+void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
{
unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len;
unsigned long kernel_start, kernel_end, kernel_len;
+ unsigned long initrd_start, initrd_end, initrd_len;
+ struct sme_populate_pgd_data ppd;
unsigned long pgtable_area_len;
- unsigned long paddr, pmd_flags;
unsigned long decrypted_base;
- void *pgtable_area;
- pgd_t *pgd;
if (!sme_active())
return;
/*
- * Prepare for encrypting the kernel by building new pagetables with
- * the necessary attributes needed to encrypt the kernel in place.
+ * Prepare for encrypting the kernel and initrd by building new
+ * pagetables with the necessary attributes needed to encrypt the
+ * kernel in place.
*
* One range of virtual addresses will map the memory occupied
- * by the kernel as encrypted.
+ * by the kernel and initrd as encrypted.
*
* Another range of virtual addresses will map the memory occupied
- * by the kernel as decrypted and write-protected.
+ * by the kernel and initrd as decrypted and write-protected.
*
* The use of write-protect attribute will prevent any of the
* memory from being cached.
@@ -643,6 +771,20 @@ void __init sme_encrypt_kernel(void)
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
kernel_len = kernel_end - kernel_start;
+ initrd_start = 0;
+ initrd_end = 0;
+ initrd_len = 0;
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_len = (unsigned long)bp->hdr.ramdisk_size |
+ ((unsigned long)bp->ext_ramdisk_size << 32);
+ if (initrd_len) {
+ initrd_start = (unsigned long)bp->hdr.ramdisk_image |
+ ((unsigned long)bp->ext_ramdisk_image << 32);
+ initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
+ initrd_len = initrd_end - initrd_start;
+ }
+#endif
+
/* Set the encryption workarea to be immediately after the kernel */
workarea_start = kernel_end;
@@ -665,16 +807,21 @@ void __init sme_encrypt_kernel(void)
*/
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
+ if (initrd_len)
+ pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
/* PUDs and PMDs needed in the current pagetables for the workarea */
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
/*
* The total workarea includes the executable encryption area and
- * the pagetable area.
+ * the pagetable area. The start of the workarea is already 2MB
+ * aligned, align the end of the workarea on a 2MB boundary so that
+ * we don't try to create/allocate PTE entries from the workarea
+ * before it is mapped.
*/
workarea_len = execute_len + pgtable_area_len;
- workarea_end = workarea_start + workarea_len;
+ workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
/*
* Set the address to the start of where newly created pagetable
@@ -683,45 +830,30 @@ void __init sme_encrypt_kernel(void)
* pagetables and when the new encrypted and decrypted kernel
* mappings are populated.
*/
- pgtable_area = (void *)execute_end;
+ ppd.pgtable_area = (void *)execute_end;
/*
* Make sure the current pagetable structure has entries for
* addressing the workarea.
*/
- pgd = (pgd_t *)native_read_cr3_pa();
- paddr = workarea_start;
- while (paddr < workarea_end) {
- pgtable_area = sme_populate_pgd(pgd, pgtable_area,
- paddr,
- paddr + PMD_FLAGS);
-
- paddr += PMD_PAGE_SIZE;
- }
+ ppd.pgd = (pgd_t *)native_read_cr3_pa();
+ ppd.paddr = workarea_start;
+ ppd.vaddr = workarea_start;
+ ppd.vaddr_end = workarea_end;
+ sme_map_range_decrypted(&ppd);
/* Flush the TLB - no globals so cr3 is enough */
native_write_cr3(__native_read_cr3());
/*
* A new pagetable structure is being built to allow for the kernel
- * to be encrypted. It starts with an empty PGD that will then be
- * populated with new PUDs and PMDs as the encrypted and decrypted
- * kernel mappings are created.
+ * and initrd to be encrypted. It starts with an empty PGD that will
+ * then be populated with new PUDs and PMDs as the encrypted and
+ * decrypted kernel mappings are created.
*/
- pgd = pgtable_area;
- memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
- pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
-
- /* Add encrypted kernel (identity) mappings */
- pmd_flags = PMD_FLAGS | _PAGE_ENC;
- paddr = kernel_start;
- while (paddr < kernel_end) {
- pgtable_area = sme_populate_pgd(pgd, pgtable_area,
- paddr,
- paddr + pmd_flags);
-
- paddr += PMD_PAGE_SIZE;
- }
+ ppd.pgd = ppd.pgtable_area;
+ memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
+ ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
/*
* A different PGD index/entry must be used to get different
@@ -730,47 +862,79 @@ void __init sme_encrypt_kernel(void)
* the base of the mapping.
*/
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
+ if (initrd_len) {
+ unsigned long check_base;
+
+ check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
+ decrypted_base = max(decrypted_base, check_base);
+ }
decrypted_base <<= PGDIR_SHIFT;
+ /* Add encrypted kernel (identity) mappings */
+ ppd.paddr = kernel_start;
+ ppd.vaddr = kernel_start;
+ ppd.vaddr_end = kernel_end;
+ sme_map_range_encrypted(&ppd);
+
/* Add decrypted, write-protected kernel (non-identity) mappings */
- pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
- paddr = kernel_start;
- while (paddr < kernel_end) {
- pgtable_area = sme_populate_pgd(pgd, pgtable_area,
- paddr + decrypted_base,
- paddr + pmd_flags);
-
- paddr += PMD_PAGE_SIZE;
+ ppd.paddr = kernel_start;
+ ppd.vaddr = kernel_start + decrypted_base;
+ ppd.vaddr_end = kernel_end + decrypted_base;
+ sme_map_range_decrypted_wp(&ppd);
+
+ if (initrd_len) {
+ /* Add encrypted initrd (identity) mappings */
+ ppd.paddr = initrd_start;
+ ppd.vaddr = initrd_start;
+ ppd.vaddr_end = initrd_end;
+ sme_map_range_encrypted(&ppd);
+ /*
+ * Add decrypted, write-protected initrd (non-identity) mappings
+ */
+ ppd.paddr = initrd_start;
+ ppd.vaddr = initrd_start + decrypted_base;
+ ppd.vaddr_end = initrd_end + decrypted_base;
+ sme_map_range_decrypted_wp(&ppd);
}
/* Add decrypted workarea mappings to both kernel mappings */
- paddr = workarea_start;
- while (paddr < workarea_end) {
- pgtable_area = sme_populate_pgd(pgd, pgtable_area,
- paddr,
- paddr + PMD_FLAGS);
+ ppd.paddr = workarea_start;
+ ppd.vaddr = workarea_start;
+ ppd.vaddr_end = workarea_end;
+ sme_map_range_decrypted(&ppd);
- pgtable_area = sme_populate_pgd(pgd, pgtable_area,
- paddr + decrypted_base,
- paddr + PMD_FLAGS);
-
- paddr += PMD_PAGE_SIZE;
- }
+ ppd.paddr = workarea_start;
+ ppd.vaddr = workarea_start + decrypted_base;
+ ppd.vaddr_end = workarea_end + decrypted_base;
+ sme_map_range_decrypted(&ppd);
/* Perform the encryption */
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
- kernel_len, workarea_start, (unsigned long)pgd);
+ kernel_len, workarea_start, (unsigned long)ppd.pgd);
+
+ if (initrd_len)
+ sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
+ initrd_len, workarea_start,
+ (unsigned long)ppd.pgd);
/*
* At this point we are running encrypted. Remove the mappings for
* the decrypted areas - all that is needed for this is to remove
* the PGD entry/entries.
*/
- sme_clear_pgd(pgd, kernel_start + decrypted_base,
- kernel_end + decrypted_base);
+ ppd.vaddr = kernel_start + decrypted_base;
+ ppd.vaddr_end = kernel_end + decrypted_base;
+ sme_clear_pgd(&ppd);
+
+ if (initrd_len) {
+ ppd.vaddr = initrd_start + decrypted_base;
+ ppd.vaddr_end = initrd_end + decrypted_base;
+ sme_clear_pgd(&ppd);
+ }
- sme_clear_pgd(pgd, workarea_start + decrypted_base,
- workarea_end + decrypted_base);
+ ppd.vaddr = workarea_start + decrypted_base;
+ ppd.vaddr_end = workarea_end + decrypted_base;
+ sme_clear_pgd(&ppd);
/* Flush the TLB - no globals so cr3 is enough */
native_write_cr3(__native_read_cr3());
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 730e6d541df1..01f682cf77a8 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
/*
* Entry parameters:
- * RDI - virtual address for the encrypted kernel mapping
- * RSI - virtual address for the decrypted kernel mapping
- * RDX - length of kernel
+ * RDI - virtual address for the encrypted mapping
+ * RSI - virtual address for the decrypted mapping
+ * RDX - length to encrypt
* RCX - virtual address of the encryption workarea, including:
* - stack page (PAGE_SIZE)
* - encryption routine page (PAGE_SIZE)
@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
addq $PAGE_SIZE, %rax /* Workarea encryption routine */
push %r12
- movq %rdi, %r10 /* Encrypted kernel */
- movq %rsi, %r11 /* Decrypted kernel */
- movq %rdx, %r12 /* Kernel length */
+ movq %rdi, %r10 /* Encrypted area */
+ movq %rsi, %r11 /* Decrypted area */
+ movq %rdx, %r12 /* Area length */
/* Copy encryption routine into the workarea */
movq %rax, %rdi /* Workarea encryption routine */
@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
rep movsb
/* Setup registers for call */
- movq %r10, %rdi /* Encrypted kernel */
- movq %r11, %rsi /* Decrypted kernel */
+ movq %r10, %rdi /* Encrypted area */
+ movq %r11, %rsi /* Decrypted area */
movq %r8, %rdx /* Pagetables used for encryption */
- movq %r12, %rcx /* Kernel length */
+ movq %r12, %rcx /* Area length */
movq %rax, %r8 /* Workarea encryption routine */
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
ENTRY(__enc_copy)
/*
- * Routine used to encrypt kernel.
+ * Routine used to encrypt memory in place.
* This routine must be run outside of the kernel proper since
* the kernel will be encrypted during the process. So this
* routine is defined here and then copied to an area outside
@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
* during execution.
*
* On entry the registers must be:
- * RDI - virtual address for the encrypted kernel mapping
- * RSI - virtual address for the decrypted kernel mapping
+ * RDI - virtual address for the encrypted mapping
+ * RSI - virtual address for the decrypted mapping
* RDX - address of the pagetables to use for encryption
- * RCX - length of kernel
+ * RCX - length of area
* R8 - intermediate copy buffer
*
* RAX - points to this routine
*
- * The kernel will be encrypted by copying from the non-encrypted
- * kernel space to an intermediate buffer and then copying from the
- * intermediate buffer back to the encrypted kernel space. The physical
- * addresses of the two kernel space mappings are the same which
- * results in the kernel being encrypted "in place".
+ * The area will be encrypted by copying from the non-encrypted
+ * memory space to an intermediate buffer and then copying from the
+ * intermediate buffer back to the encrypted memory space. The physical
+ * addresses of the two mappings are the same which results in the area
+ * being encrypted "in place".
*/
/* Enable the new page tables */
mov %rdx, %cr3
@@ -103,47 +103,55 @@ ENTRY(__enc_copy)
orq $X86_CR4_PGE, %rdx
mov %rdx, %cr4
+ push %r15
+ push %r12
+
+ movq %rcx, %r9 /* Save area length */
+ movq %rdi, %r10 /* Save encrypted area address */
+ movq %rsi, %r11 /* Save decrypted area address */
+
/* Set the PAT register PA5 entry to write-protect */
- push %rcx
movl $MSR_IA32_CR_PAT, %ecx
rdmsr
- push %rdx /* Save original PAT value */
+ mov %rdx, %r15 /* Save original PAT value */
andl $0xffff00ff, %edx /* Clear PA5 */
orl $0x00000500, %edx /* Set PA5 to WP */
wrmsr
- pop %rdx /* RDX contains original PAT value */
- pop %rcx
-
- movq %rcx, %r9 /* Save kernel length */
- movq %rdi, %r10 /* Save encrypted kernel address */
- movq %rsi, %r11 /* Save decrypted kernel address */
wbinvd /* Invalidate any cache entries */
- /* Copy/encrypt 2MB at a time */
+ /* Copy/encrypt up to 2MB at a time */
+ movq $PMD_PAGE_SIZE, %r12
1:
- movq %r11, %rsi /* Source - decrypted kernel */
+ cmpq %r12, %r9
+ jnb 2f
+ movq %r9, %r12
+
+2:
+ movq %r11, %rsi /* Source - decrypted area */
movq %r8, %rdi /* Dest - intermediate copy buffer */
- movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ movq %r12, %rcx
rep movsb
movq %r8, %rsi /* Source - intermediate copy buffer */
- movq %r10, %rdi /* Dest - encrypted kernel */
- movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ movq %r10, %rdi /* Dest - encrypted area */
+ movq %r12, %rcx
rep movsb
- addq $PMD_PAGE_SIZE, %r11
- addq $PMD_PAGE_SIZE, %r10
- subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */
+ addq %r12, %r11
+ addq %r12, %r10
+ subq %r12, %r9 /* Kernel length decrement */
jnz 1b /* Kernel length not zero? */
/* Restore PAT register */
- push %rdx /* Save original PAT value */
movl $MSR_IA32_CR_PAT, %ecx
rdmsr
- pop %rdx /* Restore original PAT value */
+ mov %r15, %rdx /* Restore original PAT value */
wrmsr
+ pop %r12
+ pop %r15
+
ret
.L__enc_copy_end:
ENDPROC(__enc_copy)
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index bce8aea65606..ce38f165489b 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -56,13 +56,13 @@
static void __init pti_print_if_insecure(const char *reason)
{
- if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+ if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
pr_info("%s\n", reason);
}
static void __init pti_print_if_secure(const char *reason)
{
- if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
pr_info("%s\n", reason);
}
@@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
}
autosel:
- if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
return;
enable:
setup_force_cpu_cap(X86_FEATURE_PTI);
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
*
* Returns a pointer to a P4D on success, or NULL on failure.
*/
-static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
{
pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
if (!new_p4d_page)
return NULL;
- if (pgd_none(*pgd)) {
- set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
- new_p4d_page = 0;
- }
- if (new_p4d_page)
- free_page(new_p4d_page);
+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
}
BUILD_BUG_ON(pgd_large(*pgd) != 0);
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
*
* Returns a pointer to a PMD on success, or NULL on failure.
*/
-static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
if (!new_pud_page)
return NULL;
- if (p4d_none(*p4d)) {
- set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
- new_pud_page = 0;
- }
- if (new_pud_page)
- free_page(new_pud_page);
+ set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
}
pud = pud_offset(p4d, address);
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
if (!new_pmd_page)
return NULL;
- if (pud_none(*pud)) {
- set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
- new_pmd_page = 0;
- }
- if (new_pmd_page)
- free_page(new_pmd_page);
+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
}
return pmd_offset(pud, address);
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
if (!new_pte_page)
return NULL;
- if (pmd_none(*pmd)) {
- set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
- new_pte_page = 0;
- }
- if (new_pte_page)
- free_page(new_pte_page);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
}
pte = pte_offset_kernel(pmd, address);
@@ -367,7 +347,8 @@ static void __init pti_setup_espfix64(void)
static void __init pti_clone_entry_text(void)
{
pti_clone_pmds((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end, _PAGE_RW);
+ (unsigned long) __irqentry_text_end,
+ _PAGE_RW | _PAGE_GLOBAL);
}
/*
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a1561957dccb..5bfe61a5e8e3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_restore(flags);
}
+static void sync_current_stack_to_mm(struct mm_struct *mm)
+{
+ unsigned long sp = current_stack_pointer;
+ pgd_t *pgd = pgd_offset(mm, sp);
+
+ if (CONFIG_PGTABLE_LEVELS > 4) {
+ if (unlikely(pgd_none(*pgd))) {
+ pgd_t *pgd_ref = pgd_offset_k(sp);
+
+ set_pgd(pgd, *pgd_ref);
+ }
+ } else {
+ /*
+ * "pgd" is faked. The top level entries are "p4d"s, so sync
+ * the p4d. This compiles to approximately the same code as
+ * the 5-level case.
+ */
+ p4d_t *p4d = p4d_offset(pgd, sp);
+
+ if (unlikely(p4d_none(*p4d))) {
+ pgd_t *pgd_ref = pgd_offset_k(sp);
+ p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
+
+ set_p4d(p4d, *p4d_ref);
+ }
+ }
+}
+
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* mapped in the new pgd, we'll double-fault. Forcibly
* map it.
*/
- unsigned int index = pgd_index(current_stack_pointer);
- pgd_t *pgd = next->pgd + index;
-
- if (unlikely(pgd_none(*pgd)))
- set_pgd(pgd, init_mm.pgd[index]);
+ sync_current_stack_to_mm(next);
}
/* Stop remote flushes for the previous mm */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0554e8aef4d5..4923d92f918d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <linux/bpf.h>
-int bpf_jit_enable __read_mostly;
-
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
@@ -154,6 +152,11 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
+static bool is_axreg(u32 reg)
+{
+ return reg == BPF_REG_0;
+}
+
/* add modifiers if 'reg' maps to x64 registers r8..r15 */
static u8 add_1mod(u8 byte, u32 reg)
{
@@ -447,16 +450,36 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
else if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
+ /* b3 holds 'normal' opcode, b2 short form only valid
+ * in case dst is eax/rax.
+ */
switch (BPF_OP(insn->code)) {
- case BPF_ADD: b3 = 0xC0; break;
- case BPF_SUB: b3 = 0xE8; break;
- case BPF_AND: b3 = 0xE0; break;
- case BPF_OR: b3 = 0xC8; break;
- case BPF_XOR: b3 = 0xF0; break;
+ case BPF_ADD:
+ b3 = 0xC0;
+ b2 = 0x05;
+ break;
+ case BPF_SUB:
+ b3 = 0xE8;
+ b2 = 0x2D;
+ break;
+ case BPF_AND:
+ b3 = 0xE0;
+ b2 = 0x25;
+ break;
+ case BPF_OR:
+ b3 = 0xC8;
+ b2 = 0x0D;
+ break;
+ case BPF_XOR:
+ b3 = 0xF0;
+ b2 = 0x35;
+ break;
}
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
+ else if (is_axreg(dst_reg))
+ EMIT1_off32(b2, imm32);
else
EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
break;
@@ -545,26 +568,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
*/
EMIT2(0x31, 0xd2);
- if (BPF_SRC(insn->code) == BPF_X) {
- /* if (src_reg == 0) return 0 */
-
- /* cmp r11, 0 */
- EMIT4(0x49, 0x83, 0xFB, 0x00);
-
- /* jne .+9 (skip over pop, pop, xor and jmp) */
- EMIT2(X86_JNE, 1 + 1 + 2 + 5);
- EMIT1(0x5A); /* pop rdx */
- EMIT1(0x58); /* pop rax */
- EMIT2(0x31, 0xc0); /* xor eax, eax */
-
- /* jmp cleanup_addr
- * addrs[i] - 11, because there are 11 bytes
- * after this insn: div, mov, pop, pop, mov
- */
- jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
- EMIT1_off32(0xE9, jmp_offset);
- }
-
if (BPF_CLASS(insn->code) == BPF_ALU64)
/* div r11 */
EMIT3(0x49, 0xF7, 0xF3);
@@ -1109,19 +1112,29 @@ common_load:
return proglen;
}
+struct x64_jit_data {
+ struct bpf_binary_header *header;
+ int *addrs;
+ u8 *image;
+ int proglen;
+ struct jit_context ctx;
+};
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
+ struct x64_jit_data *jit_data;
int proglen, oldproglen = 0;
struct jit_context ctx = {};
bool tmp_blinded = false;
+ bool extra_pass = false;
u8 *image = NULL;
int *addrs;
int pass;
int i;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
@@ -1135,10 +1148,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+ addrs = jit_data->addrs;
+ if (addrs) {
+ ctx = jit_data->ctx;
+ oldproglen = jit_data->proglen;
+ image = jit_data->image;
+ header = jit_data->header;
+ extra_pass = true;
+ goto skip_init_addrs;
+ }
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
- goto out;
+ goto out_addrs;
}
/* Before first pass, make a rough estimation of addrs[]
@@ -1149,6 +1180,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
addrs[i] = proglen;
}
ctx.cleanup_addr = proglen;
+skip_init_addrs:
/* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large bpf programs
@@ -1189,7 +1221,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image) {
bpf_flush_icache(header, image + proglen);
- bpf_jit_binary_lock_ro(header);
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(header);
+ } else {
+ jit_data->addrs = addrs;
+ jit_data->ctx = ctx;
+ jit_data->proglen = proglen;
+ jit_data->image = image;
+ jit_data->header = header;
+ }
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;
@@ -1197,8 +1237,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog;
}
+ if (!prog->is_func || extra_pass) {
out_addrs:
- kfree(addrs);
+ kfree(addrs);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7a5350d08cef..563049c483a1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
} else if (!strcmp(str, "nocrs")) {
pci_probe |= PCI_ROOT_NO_CRS;
return NULL;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ } else if (!strcmp(str, "big_root_window")) {
+ pci_probe |= PCI_BIG_ROOT_WINDOW;
+ return NULL;
+#endif
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
return NULL;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e663d6bf1328..54ef19e90705 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -662,10 +662,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
*/
static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
{
- unsigned i;
- u32 base, limit, high;
+ static const char *name = "PCI Bus 0000:00";
struct resource *res, *conflict;
+ u32 base, limit, high;
struct pci_dev *other;
+ unsigned i;
+
+ if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
+ return;
/* Check that we are the only device of that type */
other = pci_get_device(dev->vendor, dev->device, NULL);
@@ -699,22 +703,30 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
if (!res)
return;
- res->name = "PCI Bus 0000:00";
+ /*
+ * Allocate a 256GB window directly below the 0xfd00000000 hardware
+ * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
+ */
+ res->name = name;
res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
- res->start = 0x100000000ull;
+ res->start = 0xbd00000000ull;
res->end = 0xfd00000000ull - 1;
- /* Just grab the free area behind system memory for this */
- while ((conflict = request_resource_conflict(&iomem_resource, res))) {
- if (conflict->end >= res->end) {
- kfree(res);
+ conflict = request_resource_conflict(&iomem_resource, res);
+ if (conflict) {
+ kfree(res);
+ if (conflict->name != name)
return;
- }
- res->start = conflict->end + 1;
- }
- dev_info(&dev->dev, "adding root bus resource %pR\n", res);
+ /* We are resuming from suspend; just reenable the window */
+ res = conflict;
+ } else {
+ dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
+ res);
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ pci_bus_add_resource(dev->bus, res, 0);
+ }
base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@@ -726,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
-
- pci_bus_add_resource(dev->bus, res, 0);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
#endif
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 511921045312..43867bc85368 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -300,6 +300,7 @@ int __init intel_mid_pci_init(void)
pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
+ acpi_noirq_set();
return 1;
}
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 53d600217973..75577c1490c4 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -26,6 +26,7 @@
#include <linux/pci_ids.h>
#include <linux/export.h>
#include <linux/list.h>
+#include <linux/dma-direct.h>
#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index d87ac96e37ed..c310a8284358 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -25,7 +25,6 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
@@ -135,7 +134,9 @@ pgd_t * __init efi_call_phys_prolog(void)
pud[j] = *pud_offset(p4d_k, vaddr);
}
}
+ pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
}
+
out:
__flush_tlb_all();
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 8a99a2e96537..5b513ccffde4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
/*
* Update the first page pointer to skip over the CSH header.
*/
- cap_info->pages[0] += csh->headersize;
+ cap_info->phys[0] += csh->headersize;
+
+ /*
+ * cap_info->capsule should point at a virtual mapping of the entire
+ * capsule, starting at the capsule header. Our image has the Quark
+ * security header prepended, so we cannot rely on the default vmap()
+ * mapping created by the generic capsule code.
+ * Given that the Quark firmware does not appear to care about the
+ * virtual mapping, let's just point cap_info->capsule at our copy
+ * of the capsule header.
+ */
+ cap_info->capsule = &cap_info->header;
return 1;
}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index dc036e511f48..5a0483e7bf66 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
return 0;
}
-static const struct bt_sfi_data tng_bt_sfi_data __initdata = {
+static struct bt_sfi_data tng_bt_sfi_data __initdata = {
.setup = tng_bt_sfi_setup,
};
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 86676cec99a1..2c67bae6bb53 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -194,7 +194,7 @@ void __init x86_intel_mid_early_setup(void)
x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
- x86_init.pci.init = intel_mid_pci_init;
+ x86_init.pci.arch_init = intel_mid_pci_init;
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 19b43e3a9f0f..7be1e1fe9ae3 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -96,8 +96,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
pentry->freq_hz, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
- /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
- mp_irq.irqflag = 5;
+ mp_irq.irqflag = MP_IRQTRIG_EDGE | MP_IRQPOL_ACTIVE_HIGH;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
@@ -168,7 +167,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
totallen, (u32)pentry->phys_addr, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = 0xf; /* level trigger and active low */
+ mp_irq.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 8538a6723171..c2e9285d1bf1 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1751,7 +1751,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
uv1 = 1;
/* the 14-bit pnode */
- write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
+ write_mmr_descriptor_base(pnode,
+ (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
/*
* Initializing all 8 (ITEMS_PER_DESC) descriptors for each
* cpu even though we only use the first one; one descriptor can
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 972b8e8d939c..09af7ff53044 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -13,28 +13,28 @@ else
posttest_64bit = -n
endif
-distill_awk = $(srctree)/arch/x86/tools/distill.awk
+reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
- cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
+ cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | $(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
quiet_cmd_sanitytest = TEST $@
cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
-posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
+posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
$(call cmd,sanitytest)
-hostprogs-y += test_get_len insn_sanity
+hostprogs-y += insn_decoder_test insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
# Dependencies are also needed.
-$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/insn_decoder_test.c
index ecf31e0358c8..a3b4fd954931 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -9,10 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2009
*/
@@ -21,6 +17,7 @@
#include <string.h>
#include <assert.h>
#include <unistd.h>
+#include <stdarg.h>
#define unlikely(cond) (cond)
@@ -33,7 +30,7 @@
* particular. See if insn_get_length() and the disassembler agree
* on the length of each instruction in an elf disassembly.
*
- * Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
+ * Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
*/
const char *prog;
@@ -42,8 +39,8 @@ static int x86_64;
static void usage(void)
{
- fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |"
- " %s [-y|-n] [-v]\n", prog);
+ fprintf(stderr, "Usage: objdump -d a.out | awk -f objdump_reformat.awk"
+ " | %s [-y|-n] [-v]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
fprintf(stderr, "\t-v verbose mode\n");
@@ -52,10 +49,21 @@ static void usage(void)
static void malformed_line(const char *line, int line_nr)
{
- fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line);
+ fprintf(stderr, "%s: error: malformed line %d:\n%s",
+ prog, line_nr, line);
exit(3);
}
+static void pr_warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s: warning: ", prog);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
static void dump_field(FILE *fp, const char *name, const char *indent,
struct insn_field *field)
{
@@ -153,21 +161,20 @@ int main(int argc, char **argv)
insn_get_length(&insn);
if (insn.length != nb) {
warnings++;
- fprintf(stderr, "Warning: %s found difference at %s\n",
- prog, sym);
- fprintf(stderr, "Warning: %s", line);
- fprintf(stderr, "Warning: objdump says %d bytes, but "
- "insn_get_length() says %d\n", nb,
- insn.length);
+ pr_warn("Found an x86 instruction decoder bug, "
+ "please report this.\n", sym);
+ pr_warn("%s", line);
+ pr_warn("objdump says %d bytes, but insn_get_length() "
+ "says %d\n", nb, insn.length);
if (verbose)
dump_insn(stderr, &insn);
}
}
if (warnings)
- fprintf(stderr, "Warning: decoded and checked %d"
- " instructions with %d warnings\n", insns, warnings);
+ pr_warn("Decoded and checked %d instructions with %d "
+ "failures\n", insns, warnings);
else
- fprintf(stdout, "Success: decoded and checked %d"
- " instructions\n", insns);
+ fprintf(stdout, "%s: success: Decoded and checked %d"
+ " instructions\n", prog, insns);
return 0;
}
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/objdump_reformat.awk
index e0edeccc1429..f418c91b71f0 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/objdump_reformat.awk
@@ -1,7 +1,7 @@
#!/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
-# Distills the disassembly as follows:
+# Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
+# Reformats the disassembly as follows:
# - Removes all lines except the disassembled instructions.
# - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
# into a single line.
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
index 2cfcfe4f6b2a..dd2ad82eee80 100644
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void)
if (is_pagetable_dying_supported())
pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
- register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+ WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
#endif
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 4d62c071b166..d85076223a69 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
{
struct {
struct mmuext_op op;
-#ifdef CONFIG_SMP
- DECLARE_BITMAP(mask, num_processors);
-#else
DECLARE_BITMAP(mask, NR_CPUS);
-#endif
} *args;
struct multicall_space mcs;
+ const size_t mc_entry_size = sizeof(args->op) +
+ sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
- mcs = xen_mc_entry(sizeof(*args));
+ mcs = xen_mc_entry(mc_entry_size);
args = mcs.args;
args->op.arg2.vcpumask = to_cpumask(args->mask);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 02f3445a2b5f..cd97a62394e7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -23,8 +23,6 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static bool xen_pvspin = true;
-#include <asm/qspinlock.h>
-
static void xen_qlock_kick(int cpu)
{
int irq = per_cpu(lock_kicker_irq, cpu);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 75011b80660f..3b34745d0a52 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
void xen_save_time_memory_area(void);
void xen_restore_time_memory_area(void);
-void __init xen_init_time_ops(void);
+void __ref xen_init_time_ops(void);
void __init xen_hvm_init_time_ops(void);
irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8bc52f749f20..c921e8bccdc8 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,6 +15,9 @@ config XTENSA
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
+ select GENERIC_STRNCPY_FROM_USER if KASAN
+ select HAVE_ARCH_KASAN if MMU
+ select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
@@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH
config HAVE_XTENSA_GPIO32
def_bool n
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0x6e400000
+
menu "Processor type and features"
choice
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7ee02fe4a63d..3a934b72a272 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -42,10 +42,11 @@ export PLATFORM
# temporarily until string.h is fixed
KBUILD_CFLAGS += -ffreestanding -D__linux__
-
-KBUILD_CFLAGS += -pipe -mlongcalls
-
+KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
+KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
+
+KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
ifneq ($(CONFIG_LD_NO_RELAX),)
LDFLAGS := --no-relax
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index bf7fabe6310d..bbf3b4b080cd 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -42,6 +42,7 @@ __start_a0:
.align 4
.section .text, "ax"
+ .literal_position
.begin literal_prefix .text
/* put literals in here! */
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index d2a7f48564a4..355127faade1 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg
CFLAGS_REMOVE_inffast.o = -pg
endif
+KASAN_SANITIZE := n
+
+CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong
quiet_cmd_copy_zlib = COPY $@
cmd_copy_zlib = cat $< > $@
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 8d16925765cb..2bf964df37ba 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index f2d3094aa1d1..3221b7053fa3 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_USELIB=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 744adeaf2945..985fa8546e4e 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 78c2529d0459..624f9b3a3878 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 14e3ca353ac8..11fed6c06a7c 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 746dcc8b5abc..7f2ae5872151 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -150,5 +150,45 @@
__endl \ar \as
.endm
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 47e46dcf5d49..5d98a7ad4251 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -11,6 +11,8 @@
#ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H
+#include <asm/thread_info.h>
+
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void)
#else
-#define CURRENT_SHIFT 13
-
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 153bf2370988..44098800dad7 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -23,14 +23,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &xtensa_dma_map_ops;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return (dma_addr_t)paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return (phys_addr_t)daddr;
-}
-
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 0d30403b6c95..7e25c1b50ac0 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -44,7 +44,7 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index eaaf1ebcc7a4..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
- u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
- "1: l32i %1, %3, 0\n"
- " mov %0, %5\n"
- " wsr %1, scompare1\n"
- "2: s32c1i %0, %3, 0\n"
- "3:\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
- "4: .long 3b\n"
- "5: l32r %1, 4b\n"
- " movi %0, %6\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .long 1b,5b,2b,5b\n"
+ " .long 1b,4b\n"
" .previous\n"
- : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
- : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
: "memory");
- *uval = prev;
return ret;
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 6e070db1022e..04e9340eac4b 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page)
* page table.
*/
BUILD_BUG_ON(PKMAP_BASE <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000000..54be80876e57
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 561f8729bcde..2317c835a4db 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -71,4 +71,11 @@
#endif
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000000..0ba9973235d9
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index f7e186dfc4e4..de5e6cbbafe4 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
void init_mmu(void);
+void init_kio(void);
static inline void set_rasid_register (unsigned long val)
{
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 2cebdbbdb633..37251b2ef871 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -3,6 +3,10 @@ static inline void init_mmu(void)
{
}
+static inline void init_kio(void)
+{
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 4ddbfd57a7c8..5d69c11c01b8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -36,8 +36,6 @@
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
-#define PGTABLE_START 0x80000000
-
/*
* Cache aliasing:
*
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 30dd5b2e4ad5..38802259978f 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -12,9 +12,9 @@
#define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
/*
* We only use two ring levels, user and kernel space.
@@ -170,6 +170,7 @@
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index e2d9c5eb10bd..3a5c5918aea3 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -10,6 +10,7 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
+#include <asm/kmem_layout.h>
#include <uapi/asm/ptrace.h>
/*
@@ -38,20 +39,6 @@
* +-----------------------+ --------
*/
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
-#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
-#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
-#define EXC_TABLE_SIZE 0x400
-
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 881a1134a4b4..477594e5817f 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -76,6 +76,7 @@
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
/* PS register fields. */
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000000..e368f94fd2af
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 8d5d9dfadb09..89b51a0c752f 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n)
"bne %1, %5, 1b\n"
"2:"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy)
- : "0" (__dest), "1" (__src), "r" (__src+__n)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
: "memory");
return __xdest;
@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"2:\n\t"
"sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
- : "0" (__cs), "1" (__ct), "r" (__cs+__n));
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
return __res;
}
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be2400f745a..2bd19ae61e47 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -11,7 +11,9 @@
#ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H
-#ifdef __KERNEL__
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
#ifndef __ASSEMBLY__
# include <asm/processor.h>
@@ -77,14 +79,11 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- __asm__("extui %0,a1,0,13\n\t"
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
"xor %0, a1, %0" : "=&r" (ti) : );
return ti;
}
@@ -93,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \
- extui reg, sp, 0, 13; \
+ extui reg, sp, 0, CURRENT_SHIFT; \
xor reg, sp, reg
#endif
@@ -130,8 +129,7 @@ static inline struct thread_info *current_thread_info(void)
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
-#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
-#endif /* __KERNEL__ */
#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 2e69aa4b843f..f5cd7a7e65e0 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -13,12 +13,47 @@
#include <asm/ptrace.h>
/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* For fast syscall handler */
+ unsigned long syscall_save;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
* handler must be either of the following:
* void (*)(struct pt_regs *regs);
* void (*)(struct pt_regs *regs, unsigned long exccause);
*/
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
void secondary_trap_init(void);
static inline void spill_registers(void)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index b8f152b6aaa5..f1158b4c629c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -44,6 +44,8 @@
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size)
{
- if ( ! memset(addr, 0, size) )
+ if (!__memset(addr, 0, size))
return size;
return 0;
}
@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size)
#define __clear_user __xtensa_clear_user
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
extern long __strncpy_user(char *, const char *, long);
static inline long
@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count)
return __strncpy_user(dst, src, count);
return -EFAULT;
}
+#else
+long strncpy_from_user(char *dst, const char *src, long count);
+#endif
/*
* Return the size of a string (including the ending 0!)
diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
index 4d249040b33d..e3246d41182c 100644
--- a/arch/xtensa/include/uapi/asm/poll.h
+++ b/arch/xtensa/include/uapi/asm/poll.h
@@ -12,9 +12,26 @@
#ifndef _XTENSA_POLL_H
#define _XTENSA_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 0x0100
-#define POLLREMOVE 0x0800
+#define POLLWRBAND (__force __poll_t)0x0100
+#define POLLREMOVE (__force __poll_t)0x0800
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index bb8d55775a97..91907590d183 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
-AFLAGS_head.o += -mtext-section-literals
-AFLAGS_mxhead.o += -mtext-section-literals
-
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 890004af03a9..9301452e521e 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -19,6 +19,7 @@
#include <linux/linkage.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
#include <asm/processor.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
@@ -66,8 +67,6 @@
#define INSN_T 24
#define INSN_OP1 16
-.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
-.macro __ssa8 r; ssa8b \r; .endm
.macro __ssa8r r; ssa8l \r; .endm
.macro __sh r, s; srl \r, \s; .endm
.macro __sl r, s; sll \r, \s; .endm
@@ -81,8 +80,6 @@
#define INSN_T 4
#define INSN_OP1 12
-.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
-.macro __ssa8 r; ssa8l \r; .endm
.macro __ssa8r r; ssa8b \r; .endm
.macro __sh r, s; sll \r, \s; .endm
.macro __sl r, s; srl \r, \s; .endm
@@ -155,7 +152,7 @@
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-
+ .literal_position
ENTRY(fast_unaligned)
/* Note: We don't expect the address to be aligned on a word
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index bcb5beb81177..022cf918ec20 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -76,6 +76,9 @@ int main(void)
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+#ifdef CONFIG_CC_STACKPROTECTOR
+ DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
/* offsets in thread_info struct */
@@ -129,5 +132,18 @@ int main(void)
offsetof(struct debug_table, icount_level_save));
#endif
+ /* struct exc_table */
+ DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
+ DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
+ DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
+ DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+ DEFINE(EXC_TABLE_SYSCALL_SAVE,
+ offsetof(struct exc_table, syscall_save));
+ DEFINE(EXC_TABLE_FAST_USER,
+ offsetof(struct exc_table, fast_user_handler));
+ DEFINE(EXC_TABLE_FAST_KERNEL,
+ offsetof(struct exc_table, fast_kernel_handler));
+ DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+
return 0;
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 3a98503ad11a..4f8b52d575a2 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore)
ENTRY(fast_coprocessor_double)
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
ENDPROC(fast_coprocessor_double)
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 37a239556889..5caff0744f3c 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -14,6 +14,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
@@ -125,6 +126,7 @@
*
* Note: _user_exception might be at an odd address. Don't use call0..call12
*/
+ .literal_position
ENTRY(user_exception)
@@ -475,8 +477,7 @@ common_exception_return:
1:
irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_off
- callx4 a4
+ call4 trace_hardirqs_off
#endif
/* Jump if we are returning from kernel exceptions. */
@@ -503,24 +504,20 @@ common_exception_return:
/* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
#endif
rsil a2, 0
- movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1
- callx4 a4
+ call4 do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b
3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
#endif
rsil a2, 0
- movi a4, schedule # void schedule (void)
- callx4 a4
+ call4 schedule # void schedule (void)
j 1b
#ifdef CONFIG_PREEMPT
@@ -531,8 +528,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
- movi a4, preempt_schedule_irq
- callx4 a4
+ call4 preempt_schedule_irq
j 1b
#endif
@@ -545,23 +541,20 @@ common_exception_return:
5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f
- movi a4, restore_dbreak
- callx4 a4
+ call4 restore_dbreak
7:
#endif
#ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
- movi a4, check_tlb_sanity
- callx4 a4
+ call4 check_tlb_sanity
#endif
6:
4:
#ifdef CONFIG_TRACE_IRQFLAGS
extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
1:
#endif
/* Restore optional registers. */
@@ -777,6 +770,8 @@ ENDPROC(kernel_exception)
* When we get here, a0 is trashed and saved to excsave[debuglevel]
*/
+ .literal_position
+
ENTRY(debug_exception)
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
@@ -916,6 +911,8 @@ ENDPROC(debug_exception)
unrecoverable_text:
.ascii "Unrecoverable error in exception handler\0"
+ .literal_position
+
ENTRY(unrecoverable_exception)
movi a0, 1
@@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception)
movi a0, 0
addi a1, a1, PT_REGS_OFFSET
- movi a4, panic
movi a6, unrecoverable_text
-
- callx4 a4
+ call4 panic
1: j 1b
@@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable)
xsr a2, depc # restore a2, depc
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
ENDPROC(fast_syscall_unrecoverable)
@@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable)
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*
* Note: we don't have to save a2; a2 holds the return value
- *
- * We use the two macros TRY and CATCH:
- *
- * TRY adds an entry to the __ex_table fixup table for the immediately
- * following instruction.
- *
- * CATCH catches any exception that occurred at one of the preceding TRY
- * statements and continues from there
- *
- * Usage TRY l32i a0, a1, 0
- * <other code>
- * done: rfe
- * CATCH <set return code>
- * j done
*/
-#ifdef CONFIG_FAST_SYSCALL_XTENSA
-
-#define TRY \
- .section __ex_table, "a"; \
- .word 66f, 67f; \
- .text; \
-66:
+ .literal_position
-#define CATCH \
-67:
+#ifdef CONFIG_FAST_SYSCALL_XTENSA
ENTRY(fast_syscall_xtensa)
@@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa)
.Lswp: /* Atomic compare and swap */
-TRY l32i a0, a3, 0 # read old value
+EX(.Leac) l32i a0, a3, 0 # read old value
bne a0, a4, 1f # same as old value? jump
-TRY s32i a5, a3, 0 # different, modify value
+EX(.Leac) s32i a5, a3, 0 # different, modify value
l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 1 # and return 1
@@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value
.Lnswp: /* Atomic set, add, and exg_add. */
-TRY l32i a7, a3, 0 # orig
+EX(.Leac) l32i a7, a3, 0 # orig
addi a6, a6, -SYS_XTENSA_ATOMIC_SET
add a0, a4, a7 # + arg
moveqz a0, a4, a6 # set
addi a6, a6, SYS_XTENSA_ATOMIC_SET
-TRY s32i a0, a3, 0 # write new value
+EX(.Leac) s32i a0, a3, 0 # write new value
mov a0, a2
mov a2, a7
@@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value
l32i a0, a0, PT_AREG0 # restore a0
rfe
-CATCH
.Leac: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, -EFAULT
@@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers)
rsync
movi a6, SIGSEGV
- movi a4, do_exit
- callx4 a4
+ call4 do_exit
/* shouldn't return, so panic */
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0 # should not return
+ call0 unrecoverable_exception # should not return
1: j 1b
@@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers)
ENTRY(fast_second_level_miss_double_kernel)
-1: movi a0, unrecoverable_exception
- callx0 a0 # should not return
+1:
+ call0 unrecoverable_exception # should not return
1: j 1b
ENDPROC(fast_second_level_miss_double_kernel)
@@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited)
* void system_call (struct pt_regs* regs, int exccause)
* a2 a3
*/
+ .literal_position
ENTRY(system_call)
@@ -1896,9 +1867,8 @@ ENTRY(system_call)
l32i a3, a2, PT_AREG2
mov a6, a2
- movi a4, do_syscall_trace_enter
s32i a3, a2, PT_SYSCALL
- callx4 a4
+ call4 do_syscall_trace_enter
mov a3, a6
/* syscall = sys_call_table[syscall_nr] */
@@ -1930,9 +1900,8 @@ ENTRY(system_call)
1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2
- movi a4, do_syscall_trace_leave
mov a6, a2
- callx4 a4
+ call4 do_syscall_trace_leave
retw
ENDPROC(system_call)
@@ -2002,6 +1971,12 @@ ENTRY(_switch_to)
s32i a1, a2, THREAD_SP # save stack pointer
#endif
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ movi a6, __stack_chk_guard
+ l32i a8, a3, TASK_STACK_CANARY
+ s32i a8, a6, 0
+#endif
+
/* Disable ints while we manipulate the stack pointer. */
irq_save a14, a3
@@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork)
/* void schedule_tail (struct task_struct *prev)
* Note: prev is still in a6 (return value from fake call4 frame)
*/
- movi a4, schedule_tail
- callx4 a4
+ call4 schedule_tail
- movi a4, do_syscall_trace_leave
mov a6, a1
- callx4 a4
+ call4 do_syscall_trace_leave
j common_exception_return
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 23ce62e60435..9c4e9433e536 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -264,11 +264,8 @@ ENTRY(_startup)
/* init_arch kick-starts the linux kernel */
- movi a4, init_arch
- callx4 a4
-
- movi a4, start_kernel
- callx4 a4
+ call4 init_arch
+ call4 start_kernel
should_never_return:
j should_never_return
@@ -294,8 +291,7 @@ should_never_return:
movi a6, 0
wsr a6, excsave1
- movi a4, secondary_start_kernel
- callx4 a4
+ call4 secondary_start_kernel
j should_never_return
#endif /* CONFIG_SMP */
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index b715237bae61..902845ddacb7 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -22,8 +22,6 @@
#include <linux/kernel.h>
#include <linux/cache.h>
-#undef DEBUG_RELOCATE
-
static int
decode_calln_opcode (unsigned char *location)
{
@@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned char *location;
uint32_t value;
-#ifdef DEBUG_RELOCATE
- printk("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-#endif
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
@@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= ((unsigned long)location & -4) + 4;
if ((value & 3) != 0 ||
((value + (1 << 19)) >> 20) != 0) {
- printk("%s: relocation out of range, "
+ pr_err("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
@@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= (((unsigned long)location + 3) & -4);
if ((value & 3) != 0 ||
(signed int)value >> 18 != -1) {
- printk("%s: relocation out of range, "
+ pr_err("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
@@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_OP:
case R_XTENSA_SLOT13_OP:
case R_XTENSA_SLOT14_OP:
- printk("%s: unexpected FLIX relocation: %u\n",
+ pr_err("%s: unexpected FLIX relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
@@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_ALT:
case R_XTENSA_SLOT13_ALT:
case R_XTENSA_SLOT14_ALT:
- printk("%s: unexpected ALT relocation: %u\n",
+ pr_err("%s: unexpected ALT relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
default:
- printk("%s: unexpected relocation: %u\n",
+ pr_err("%s: unexpected relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 903963ee495d..d981f01c8d89 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -29,14 +29,6 @@
#include <asm/pci-bridge.h>
#include <asm/platform.h>
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
/* PCI Controller */
@@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- printk (KERN_ERR "PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
+ pr_err("PCI: Device %s not available because "
+ "of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
@@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
res = &pci_ctrl->io_resource;
if (!res->flags) {
if (io_offset)
- printk (KERN_ERR "I/O resource not set for host"
- " bridge %d\n", pci_ctrl->index);
+ pr_err("I/O resource not set for host bridge %d\n",
+ pci_ctrl->index);
res->start = 0;
res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
@@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
if (!res->flags) {
if (i > 0)
continue;
- printk(KERN_ERR "Memory resource not set for "
- "host bridge %d\n", pci_ctrl->index);
+ pr_err("Memory resource not set for host bridge %d\n",
+ pci_ctrl->index);
res->start = 0;
res->end = ~0U;
res->flags = IORESOURCE_MEM;
@@ -176,7 +168,7 @@ static int __init pcibios_init(void)
struct pci_bus *bus;
int next_busno = 0, ret;
- printk("PCI: Probing PCI hardware\n");
+ pr_info("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
@@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available because "
+ pr_err("PCI: Device %s not available because "
"of resource collisions\n", pci_name(dev));
return -EINVAL;
}
@@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index ff4f0ecb03dd..8dd0593fb2c4 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
#if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti)
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index e2461968efb2..c0845cb1cbb9 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -278,7 +278,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct pt_regs *regs)
{
int i;
- siginfo_t info;
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
if (bp->attr.bp_type & HW_BREAKPOINT_X) {
@@ -293,12 +292,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
i = (i << 1) | 1;
}
- info.si_signo = SIGTRAP;
- info.si_errno = i;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)bkpt->address;
-
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
}
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 08175df7a69e..a931af9075f2 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -36,6 +36,7 @@
#endif
#include <asm/bootparam.h>
+#include <asm/kasan.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Boot parameters must start with a BP_TAG_FIRST tag. */
if (tag->id != BP_TAG_FIRST) {
- printk(KERN_WARNING "Invalid boot parameters!\n");
+ pr_warn("Invalid boot parameters!\n");
return 0;
}
@@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Parse all tags. */
while (tag != NULL && tag->id != BP_TAG_LAST) {
- for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
if (tag->id == t->tag) {
t->parse(tag);
break;
}
}
if (t == &__tagtable_end)
- printk(KERN_WARNING "Ignoring tag "
- "0x%08x\n", tag->id);
+ pr_warn("Ignoring tag 0x%08x\n", tag->id);
tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
}
@@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
/* round down to nearest 256MB boundary */
xtensa_kio_paddr &= 0xf0000000;
+ init_kio();
+
return 1;
}
#else
@@ -246,6 +248,14 @@ void __init early_init_devtree(void *params)
void __init init_arch(bp_tag_t *bp_start)
{
+ /* Initialize MMU. */
+
+ init_mmu();
+
+ /* Initialize initial KASAN shadow map */
+
+ kasan_early_init();
+
/* Parse boot parameters */
if (bp_start)
@@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start)
/* Early hook for platforms */
platform_init(bp_start);
-
- /* Initialize MMU. */
-
- init_mmu();
}
/*
@@ -277,13 +283,13 @@ extern char _end[];
extern char _stext[];
extern char _WindowVectors_text_start;
extern char _WindowVectors_text_end;
-extern char _DebugInterruptVector_literal_start;
+extern char _DebugInterruptVector_text_start;
extern char _DebugInterruptVector_text_end;
-extern char _KernelExceptionVector_literal_start;
+extern char _KernelExceptionVector_text_start;
extern char _KernelExceptionVector_text_end;
-extern char _UserExceptionVector_literal_start;
+extern char _UserExceptionVector_text_start;
extern char _UserExceptionVector_text_end;
-extern char _DoubleExceptionVector_literal_start;
+extern char _DoubleExceptionVector_text_start;
extern char _DoubleExceptionVector_text_end;
#if XCHAL_EXCM_LEVEL >= 2
extern char _Level2InterruptVector_text_start;
@@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p)
{
+ pr_info("config ID: %08x:%08x\n",
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
+ if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
+ get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
+ pr_info("built for config ID: %08x:%08x\n",
+ XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
+
*cmdline_p = command_line;
platform_setup(cmdline_p);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_WindowVectors_text_start),
__pa(&_WindowVectors_text_end));
- mem_reserve(__pa(&_DebugInterruptVector_literal_start),
+ mem_reserve(__pa(&_DebugInterruptVector_text_start),
__pa(&_DebugInterruptVector_text_end));
- mem_reserve(__pa(&_KernelExceptionVector_literal_start),
+ mem_reserve(__pa(&_KernelExceptionVector_text_start),
__pa(&_KernelExceptionVector_text_end));
- mem_reserve(__pa(&_UserExceptionVector_literal_start),
+ mem_reserve(__pa(&_UserExceptionVector_text_start),
__pa(&_UserExceptionVector_text_end));
- mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
+ mem_reserve(__pa(&_DoubleExceptionVector_text_start),
__pa(&_DoubleExceptionVector_text_end));
#if XCHAL_EXCM_LEVEL >= 2
@@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p)
#endif
parse_early_param();
bootmem_init();
-
+ kasan_init();
unflatten_and_copy_device_tree();
#ifdef CONFIG_SMP
@@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot)
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n"
+ "config ID\t: %08x:%08x\n"
"byte order\t: %s\n"
"cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n",
num_online_cpus(),
cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID,
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000,
(ccount_freq/10000) % 100,
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index d427e784ab44..f88e7a0b232c 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -28,8 +28,6 @@
#include <asm/coprocessor.h>
#include <asm/unistd.h>
-#define DEBUG_SIG 0
-
extern struct task_struct *coproc_owners[];
struct rt_sigframe
@@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp;
-#if DEBUG_SIG
- printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
- current->comm, current->pid, sig, frame, regs->pc);
-#endif
+ pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+ current->comm, current->pid, sig, frame, regs->pc);
return 0;
}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index bae697a06a98..32c5207f1226 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -33,6 +33,7 @@
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
@@ -158,8 +159,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers.
*/
-DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
-
+DEFINE_PER_CPU(struct exc_table, exc_table);
DEFINE_PER_CPU(struct debug_table, debug_table);
void die(const char*, struct pt_regs*, long);
@@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
void do_unhandled(struct pt_regs *regs, unsigned long exccause)
{
__die_if_kernel("Caught unhandled exception - should not happen",
- regs, SIGKILL);
+ regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process */
- printk("Caught unhandled exception in '%s' "
- "(pid = %d, pc = %#010lx) - should not happen\n"
- "\tEXCCAUSE is %ld\n",
- current->comm, task_pid_nr(current), regs->pc, exccause);
+ pr_info_ratelimited("Caught unhandled exception in '%s' "
+ "(pid = %d, pc = %#010lx) - should not happen\n"
+ "\tEXCCAUSE is %ld\n",
+ current->comm, task_pid_nr(current), regs->pc,
+ exccause);
force_sig(SIGILL, current);
}
@@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs)
/* If in user mode, send SIGILL signal to current process. */
- printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
- current->comm, task_pid_nr(current), regs->pc);
+ pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+ current->comm, task_pid_nr(current), regs->pc);
force_sig(SIGILL, current);
}
@@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs)
siginfo_t info;
__die_if_kernel("Unhandled unaligned exception in kernel",
- regs, SIGKILL);
+ regs, SIGKILL);
current->thread.bad_vaddr = regs->excvaddr;
current->thread.error_code = -3;
- printk("Unaligned memory access to %08lx in '%s' "
- "(pid = %d, pc = %#010lx)\n",
- regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
+ pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
+ "(pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm,
+ task_pid_nr(current), regs->pc);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
@@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs)
}
-static void set_handler(int idx, void *handler)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
-}
+#define set_handler(type, cause, handler) \
+ do { \
+ unsigned int cpu; \
+ \
+ for_each_possible_cpu(cpu) \
+ per_cpu(exc_table, cpu).type[cause] = (handler);\
+ } while (0)
/* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler)
{
- void *previous = (void *)per_cpu(exc_table, 0)[
- EXC_TABLE_DEFAULT / 4 + cause];
- set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
+ void *previous = per_cpu(exc_table, 0).default_handler[cause];
+
+ set_handler(default_handler, cause, handler);
return previous;
}
static void trap_init_excsave(void)
{
- unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
}
@@ -418,10 +420,10 @@ void __init trap_init(void)
/* Setup default vectors. */
- for(i = 0; i < 64; i++) {
- set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
- set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
- set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
+ for (i = 0; i < EXCCAUSE_N; i++) {
+ set_handler(fast_user_handler, i, user_exception);
+ set_handler(fast_kernel_handler, i, kernel_exception);
+ set_handler(default_handler, i, do_unhandled);
}
/* Setup specific handlers. */
@@ -433,11 +435,11 @@ void __init trap_init(void)
void *handler = dispatch_init_table[i].handler;
if (fast == 0)
- set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
+ set_handler(default_handler, cause, handler);
if (fast && fast & USER)
- set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
+ set_handler(fast_user_handler, cause, handler);
if (fast && fast & KRNL)
- set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
+ set_handler(fast_kernel_handler, cause, handler);
}
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 332e9d635fb6..841503d3307c 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector)
*/
.section .DoubleExceptionVector.text, "ax"
- .begin literal_prefix .DoubleExceptionVector
- .globl _DoubleExceptionVector_WindowUnderflow
- .globl _DoubleExceptionVector_WindowOverflow
ENTRY(_DoubleExceptionVector)
@@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector)
/* Check for kernel double exception (usually fatal). */
rsr a2, ps
- _bbci.l a2, PS_UM_BIT, .Lksp
+ _bbsi.l a2, PS_UM_BIT, 1f
+ j .Lksp
+ .align 4
+ .literal_position
+1:
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
@@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow:
.Lunrecoverable:
rsr a3, excsave1
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
@@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception:
rotw -3
j 1b
-
ENDPROC(_DoubleExceptionVector)
- .end literal_prefix
-
.text
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
@@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector)
* a3: exctable, original value in excsave1
*/
+ .literal_position
+
ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 162c77e53ca8..70b731edc7b8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -45,24 +45,16 @@ jiffies = jiffies_64;
LONG(sym ## _end); \
LONG(LOADADDR(section))
-/* Macro to define a section for a vector.
- *
- * Use of the MIN function catches the types of errors illustrated in
- * the following example:
- *
- * Assume the section .DoubleExceptionVector.literal is completely
- * full. Then a programmer adds code to .DoubleExceptionVector.text
- * that produces another literal. The final literal position will
- * overlay onto the first word of the adjacent code section
- * .DoubleExceptionVector.text. (In practice, the literals will
- * overwrite the code, and the first few instructions will be
- * garbage.)
+/*
+ * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
+ * defined code for every vector is located with other init data. At startup
+ * time head.S copies code for every vector to its final position according
+ * to description recorded in the corresponding RELOCATE_ENTRY.
*/
#ifdef CONFIG_VECTORS_OFFSET
-#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
- section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
- LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
+#define SECTION_VECTOR(sym, section, addr, prevsec) \
+ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
{ \
. = ALIGN(4); \
sym ## _start = ABSOLUTE(.); \
@@ -112,26 +104,19 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
#endif
- SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4)
SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
- SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4)
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
- SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
- SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
TEXT_TEXT
- VMLINUX_SYMBOL(__sched_text_start) = .;
- *(.sched.literal .sched.text)
- VMLINUX_SYMBOL(__sched_text_end) = .;
- VMLINUX_SYMBOL(__cpuidle_text_start) = .;
- *(.cpuidle.literal .cpuidle.text)
- VMLINUX_SYMBOL(__cpuidle_text_end) = .;
- VMLINUX_SYMBOL(__lock_text_start) = .;
- *(.spinlock.literal .spinlock.text)
- VMLINUX_SYMBOL(__lock_text_end) = .;
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
}
_etext = .;
@@ -196,8 +181,6 @@ SECTIONS
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
- RELOCATE_ENTRY(_DoubleExceptionVector_literal,
- .DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
@@ -230,25 +213,19 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR,
.dummy)
- SECTION_VECTOR (_DebugInterruptVector_literal,
- .DebugInterruptVector.literal,
- DEBUG_VECTOR_VADDR - 4,
- SIZEOF(.WindowVectors.text),
- .WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
DEBUG_VECTOR_VADDR,
- 4,
- .DebugInterruptVector.literal)
+ .WindowVectors.text)
#undef LAST
#define LAST .DebugInterruptVector.text
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
INTLEVEL2_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
#endif
@@ -256,7 +233,7 @@ SECTIONS
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
INTLEVEL3_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
#endif
@@ -264,7 +241,7 @@ SECTIONS
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
INTLEVEL4_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
#endif
@@ -272,7 +249,7 @@ SECTIONS
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
INTLEVEL5_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
#endif
@@ -280,40 +257,23 @@ SECTIONS
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
INTLEVEL6_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
- SECTION_VECTOR (_KernelExceptionVector_literal,
- .KernelExceptionVector.literal,
- KERNEL_VECTOR_VADDR - 4,
- SIZEOF(LAST), LAST)
-#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
KERNEL_VECTOR_VADDR,
- 4,
- .KernelExceptionVector.literal)
- SECTION_VECTOR (_UserExceptionVector_literal,
- .UserExceptionVector.literal,
- USER_VECTOR_VADDR - 4,
- SIZEOF(.KernelExceptionVector.text),
- .KernelExceptionVector.text)
+ LAST)
+#undef LAST
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
USER_VECTOR_VADDR,
- 4,
- .UserExceptionVector.literal)
- SECTION_VECTOR (_DoubleExceptionVector_literal,
- .DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 20,
- SIZEOF(.UserExceptionVector.text),
- .UserExceptionVector.text)
+ .KernelExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 20,
- .DoubleExceptionVector.literal)
+ .UserExceptionVector.text)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
@@ -323,7 +283,6 @@ SECTIONS
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
RESET_VECTOR1_VADDR,
- SIZEOF(.DoubleExceptionVector.text),
.DoubleExceptionVector.text)
. = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
@@ -373,5 +332,4 @@ SECTIONS
/* Sections to be discarded */
DISCARDS
- /DISCARD/ : { *(.exit.literal) }
}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 672391003e40..04f19de46700 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -41,7 +41,12 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
EXPORT_SYMBOL(__strncpy_user);
+#endif
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 4eb573d2720e..528fe0dd9339 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -14,9 +14,10 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <linux/linkage.h>
#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@@ -175,23 +176,8 @@ ENDPROC(csum_partial)
/*
* Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for each access type.
*/
-#define SRC(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6001f ; \
- .previous
-
-#define DST(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
@@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic)
add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
.Loop5:
#endif
-SRC( l32i a9, a2, 0 )
-SRC( l32i a8, a2, 4 )
-DST( s32i a9, a3, 0 )
-DST( s32i a8, a3, 4 )
+EX(10f) l32i a9, a2, 0
+EX(10f) l32i a8, a2, 4
+EX(11f) s32i a9, a3, 0
+EX(11f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 8 )
-SRC( l32i a8, a2, 12 )
-DST( s32i a9, a3, 8 )
-DST( s32i a8, a3, 12 )
+EX(10f) l32i a9, a2, 8
+EX(10f) l32i a8, a2, 12
+EX(11f) s32i a9, a3, 8
+EX(11f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 16 )
-SRC( l32i a8, a2, 20 )
-DST( s32i a9, a3, 16 )
-DST( s32i a8, a3, 20 )
+EX(10f) l32i a9, a2, 16
+EX(10f) l32i a8, a2, 20
+EX(11f) s32i a9, a3, 16
+EX(11f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 24 )
-SRC( l32i a8, a2, 28 )
-DST( s32i a9, a3, 24 )
-DST( s32i a8, a3, 28 )
+EX(10f) l32i a9, a2, 24
+EX(10f) l32i a8, a2, 28
+EX(11f) s32i a9, a3, 24
+EX(11f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 )
add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
.Loop6:
#endif
-SRC( l32i a9, a2, 0 )
-DST( s32i a9, a3, 0 )
+EX(10f) l32i a9, a2, 0
+EX(11f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
.Loop7:
#endif
-SRC( l16ui a9, a2, 0 )
-DST( s16i a9, a3, 0 )
+EX(10f) l16ui a9, a2, 0
+EX(11f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 )
4:
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
-SRC( l8ui a9, a2, 0 )
-DST( s8i a9, a3, 0 )
+EX(10f) l8ui a9, a2, 0
+EX(11f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
.Loop8:
#endif
-SRC( l8ui a9, a2, 0 )
-SRC( l8ui a8, a2, 1 )
-DST( s8i a9, a3, 0 )
-DST( s8i a8, a3, 1 )
+EX(10f) l8ui a9, a2, 0
+EX(10f) l8ui a8, a2, 1
+EX(11f) s8i a9, a3, 0
+EX(11f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic)
a12 = original dst for exception handling
*/
-6001:
+10:
_movi a2, -EFAULT
s32i a2, a6, 0 /* src_err_ptr */
@@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic)
2:
retw
-6002:
+11:
movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index b1c219acabe7..c0f6981719d6 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -9,23 +9,9 @@
* Copyright (C) 2002 - 2012 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
- .macro src_b r, w0, w1
-#ifdef __XTENSA_EB__
- src \r, \w0, \w1
-#else
- src \r, \w1, \w0
-#endif
- .endm
-
- .macro ssa8 r
-#ifdef __XTENSA_EB__
- ssa8b \r
-#else
- ssa8l \r
-#endif
- .endm
+#include <asm/asmmacro.h>
/*
* void *memcpy(void *dst, const void *src, size_t len);
@@ -123,10 +109,8 @@
addi a5, a5, 2
j .Ldstaligned # dst is now aligned, return to main algorithm
- .align 4
- .global memcpy
- .type memcpy,@function
-memcpy:
+ENTRY(__memcpy)
+WEAK(memcpy)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@@ -209,7 +193,7 @@ memcpy:
.Lsrcunaligned:
_beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
- ssa8 a3 # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
/* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */
@@ -229,16 +213,16 @@ memcpy:
.Loop2:
l32i a7, a3, 4
l32i a8, a3, 8
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
l32i a9, a3, 12
- src_b a7, a7, a8
+ __src_b a7, a7, a8
s32i a7, a5, 4
l32i a6, a3, 16
- src_b a8, a8, a9
+ __src_b a8, a8, a9
s32i a8, a5, 8
addi a3, a3, 16
- src_b a9, a9, a6
+ __src_b a9, a9, a6
s32i a9, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
@@ -249,10 +233,10 @@ memcpy:
# copy 8 bytes
l32i a7, a3, 4
l32i a8, a3, 8
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
addi a3, a3, 8
- src_b a7, a7, a8
+ __src_b a7, a7, a8
s32i a7, a5, 4
addi a5, a5, 8
mov a6, a8
@@ -261,7 +245,7 @@ memcpy:
# copy 4 bytes
l32i a7, a3, 4
addi a3, a3, 4
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
addi a5, a5, 4
mov a6, a7
@@ -288,14 +272,14 @@ memcpy:
s8i a6, a5, 0
retw
+ENDPROC(__memcpy)
/*
* void bcopy(const void *src, void *dest, size_t n);
*/
- .align 4
- .global bcopy
- .type bcopy,@function
-bcopy:
+
+ENTRY(bcopy)
+
entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len
mov a5, a3
@@ -303,6 +287,8 @@ bcopy:
mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy
+ENDPROC(bcopy)
+
/*
* void *memmove(void *dst, const void *src, size_t len);
*
@@ -391,10 +377,8 @@ bcopy:
j .Lbackdstaligned # dst is now aligned,
# return to main algorithm
- .align 4
- .global memmove
- .type memmove,@function
-memmove:
+ENTRY(__memmove)
+WEAK(memmove)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@@ -485,7 +469,7 @@ memmove:
.Lbacksrcunaligned:
_beqz a4, .Lbackdone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
- ssa8 a3 # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
* the lint or ferret client, or 0
* to save a few cycles */
@@ -506,15 +490,15 @@ memmove:
l32i a7, a3, 12
l32i a8, a3, 8
addi a5, a5, -16
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 12
l32i a9, a3, 4
- src_b a7, a8, a7
+ __src_b a7, a8, a7
s32i a7, a5, 8
l32i a6, a3, 0
- src_b a8, a9, a8
+ __src_b a8, a9, a8
s32i a8, a5, 4
- src_b a9, a6, a9
+ __src_b a9, a6, a9
s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS
bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
@@ -526,9 +510,9 @@ memmove:
l32i a7, a3, 4
l32i a8, a3, 0
addi a5, a5, -8
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 4
- src_b a7, a8, a7
+ __src_b a7, a8, a7
s32i a7, a5, 0
mov a6, a8
.Lback12:
@@ -537,7 +521,7 @@ memmove:
addi a3, a3, -4
l32i a7, a3, 0
addi a5, a5, -4
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 0
mov a6, a7
.Lback13:
@@ -566,11 +550,4 @@ memmove:
s8i a6, a5, 0
retw
-
-/*
- * Local Variables:
- * mode:fundamental
- * comment-start: "# "
- * comment-start-skip: "# *"
- * End:
- */
+ENDPROC(__memmove)
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 10b8c400f175..276747dec300 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -11,7 +11,9 @@
* Copyright (C) 2002 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* void *memset(void *dst, int c, size_t length)
@@ -28,20 +30,10 @@
* the alignment labels).
*/
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
-
-
.text
-.align 4
-.global memset
-.type memset,@function
-memset:
+ENTRY(__memset)
+WEAK(memset)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits
@@ -73,10 +65,10 @@ memset:
add a6, a6, a5 # a6 = end of last 16B chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1:
- EX(s32i, a3, a5, 0, memset_fixup)
- EX(s32i, a3, a5, 4, memset_fixup)
- EX(s32i, a3, a5, 8, memset_fixup)
- EX(s32i, a3, a5, 12, memset_fixup)
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
+EX(10f) s32i a3, a5, 8
+EX(10f) s32i a3, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a5, a6, .Loop1
@@ -84,23 +76,23 @@ memset:
.Loop1done:
bbci.l a4, 3, .L2
# set 8 bytes
- EX(s32i, a3, a5, 0, memset_fixup)
- EX(s32i, a3, a5, 4, memset_fixup)
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
addi a5, a5, 8
.L2:
bbci.l a4, 2, .L3
# set 4 bytes
- EX(s32i, a3, a5, 0, memset_fixup)
+EX(10f) s32i a3, a5, 0
addi a5, a5, 4
.L3:
bbci.l a4, 1, .L4
# set 2 bytes
- EX(s16i, a3, a5, 0, memset_fixup)
+EX(10f) s16i a3, a5, 0
addi a5, a5, 2
.L4:
bbci.l a4, 0, .L5
# set 1 byte
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
.L5:
.Lret1:
retw
@@ -114,7 +106,7 @@ memset:
bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
# dst is only byte aligned
# set 1 byte
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
addi a5, a5, 1
addi a4, a4, -1
# now retest if dst aligned
@@ -122,7 +114,7 @@ memset:
.L20:
# dst half-aligned
# set 2 bytes
- EX(s16i, a3, a5, 0, memset_fixup)
+EX(10f) s16i a3, a5, 0
addi a5, a5, 2
addi a4, a4, -2
j .L0 # dst is now aligned, return to main algorithm
@@ -141,7 +133,7 @@ memset:
add a6, a5, a4 # a6 = ending address
#endif /* !XCHAL_HAVE_LOOPS */
.Lbyteloop:
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
blt a5, a6, .Lbyteloop
@@ -149,12 +141,13 @@ memset:
.Lbytesetdone:
retw
+ENDPROC(__memset)
.section .fixup, "ax"
.align 4
/* We return zero if a failure occurred. */
-memset_fixup:
+10:
movi a2, 0
retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
index 34d05abbd921..a2b558161d6d 100644
--- a/arch/xtensa/lib/pci-auto.c
+++ b/arch/xtensa/lib/pci-auto.c
@@ -49,17 +49,6 @@
*
*/
-
-/* define DEBUG to print some debugging messages. */
-
-#undef DEBUG
-
-#ifdef DEBUG
-# define DBG(x...) printk(x)
-#else
-# define DBG(x...)
-#endif
-
static int pciauto_upper_iospc;
static int pciauto_upper_memspc;
@@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
{
bar_size &= PCI_BASE_ADDRESS_IO_MASK;
upper_limit = &pciauto_upper_iospc;
- DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
+ pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
}
else
{
@@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
upper_limit = &pciauto_upper_memspc;
- DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
+ pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
}
/* Allocate a base address (bar_size is negative!) */
@@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
if (found_mem64)
pci_write_config_dword(dev, (bar+=4), 0x00000000);
- DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
+ pr_debug("size=0x%x, address=0x%x\n",
+ ~bar_size + 1, *upper_limit);
}
}
@@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
if (irq == -1)
irq = 0;
- DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
+ pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
@@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
int iosave, memsave;
- DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
- PCI_SLOT(pci_devfn));
+ pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n",
+ PCI_SLOT(pci_devfn));
/* Allocate PCI I/O and/or memory space */
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
@@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
}
-
-#if 0
- /* Skip legacy mode IDE controller */
-
- if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
-
- unsigned char prg_iface;
- pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
-
- if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
- DBG("PCI Autoconfig: Skipping legacy mode "
- "IDE controller\n");
- continue;
- }
- }
-#endif
-
/*
* Found a peripheral, enable some standard
* settings
@@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
/* Allocate PCI I/O and/or memory space */
- DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
- current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
+ pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
+ current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn));
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 1ad0ecf45368..5fce16b67dca 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -11,16 +11,10 @@
* Copyright (C) 2002 Tensilica Inc.
*/
-#include <variant/core.h>
#include <linux/errno.h>
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* char *__strncpy_user(char *dst, const char *src, size_t len)
@@ -54,10 +48,8 @@
# a12/ tmp
.text
-.align 4
-.global __strncpy_user
-.type __strncpy_user,@function
-__strncpy_user:
+ENTRY(__strncpy_user)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register
@@ -75,9 +67,9 @@ __strncpy_user:
j .Ldstunaligned
.Lsrc1mod2: # src address is odd
- EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 0 # get byte 0
addi a3, a3, 1 # advance src pointer
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
@@ -85,16 +77,16 @@ __strncpy_user:
bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
.Lsrc2mod4: # src address is 2 mod 4
- EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 0 # get byte 0
/* 1-cycle interlock */
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
beqz a4, .Lret # if len is zero
- EX(l8ui, a9, a3, 1, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 1 # get byte 0
addi a3, a3, 2 # advance src pointer
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
@@ -117,12 +109,12 @@ __strncpy_user:
add a12, a12, a11 # a12 = end of last 4B chunck
#endif
.Loop1:
- EX(l32i, a9, a3, 0, fixup_l) # get word from src
+EX(11f) l32i a9, a3, 0 # get word from src
addi a3, a3, 4 # advance src pointer
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
bnone a9, a7, .Lz2 # if byte 2 is zero
- EX(s32i, a9, a11, 0, fixup_s) # store word to dst
+EX(10f) s32i a9, a11, 0 # store word to dst
bnone a9, a8, .Lz3 # if byte 3 is zero
addi a11, a11, 4 # advance dst pointer
#if !XCHAL_HAVE_LOOPS
@@ -132,7 +124,7 @@ __strncpy_user:
.Loop1done:
bbci.l a4, 1, .L100
# copy 2 bytes
- EX(l16ui, a9, a3, 0, fixup_l)
+EX(11f) l16ui a9, a3, 0
addi a3, a3, 2 # advance src pointer
#ifdef __XTENSA_EB__
bnone a9, a7, .Lz0 # if byte 2 is zero
@@ -141,13 +133,13 @@ __strncpy_user:
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
#endif
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
addi a11, a11, 2 # advance dst pointer
.L100:
bbci.l a4, 0, .Lret
- EX(l8ui, a9, a3, 0, fixup_l)
+EX(11f) l8ui a9, a3, 0
/* slot */
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
beqz a9, .Lret # if byte is zero
addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
# the effect of adding 3 in .Lz3 code
@@ -161,14 +153,14 @@ __strncpy_user:
#ifdef __XTENSA_EB__
movi a9, 0
#endif /* __XTENSA_EB__ */
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen
retw
.Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__
extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
addi a11, a11, 1 # advance dst pointer
sub a2, a11, a2 # compute strlen
retw
@@ -176,9 +168,9 @@ __strncpy_user:
#ifdef __XTENSA_EB__
extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
movi a9, 0
- EX(s8i, a9, a11, 2, fixup_s)
+EX(10f) s8i a9, a11, 2
addi a11, a11, 2 # advance dst pointer
sub a2, a11, a2 # compute strlen
retw
@@ -196,9 +188,9 @@ __strncpy_user:
add a12, a11, a4 # a12 = ending address
#endif /* XCHAL_HAVE_LOOPS */
.Lnextbyte:
- EX(l8ui, a9, a3, 0, fixup_l)
+EX(11f) l8ui a9, a3, 0
addi a3, a3, 1
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
beqz a9, .Lunalignedend
addi a11, a11, 1
#if !XCHAL_HAVE_LOOPS
@@ -209,6 +201,7 @@ __strncpy_user:
sub a2, a11, a2 # compute strlen
retw
+ENDPROC(__strncpy_user)
.section .fixup, "ax"
.align 4
@@ -218,8 +211,7 @@ __strncpy_user:
* implementation in memset(). Thus, we differentiate between
* load/store fixups. */
-fixup_s:
-fixup_l:
+10:
+11:
movi a2, -EFAULT
retw
-
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 4c03b1e581e9..0b956ce7f386 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -11,15 +11,9 @@
* Copyright (C) 2002 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
+#include <asm/asmmacro.h>
/*
* size_t __strnlen_user(const char *s, size_t len)
@@ -49,10 +43,8 @@
# a10/ tmp
.text
-.align 4
-.global __strnlen_user
-.type __strnlen_user,@function
-__strnlen_user:
+ENTRY(__strnlen_user)
+
entry sp, 16 # minimal stack frame
# a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end;
@@ -77,7 +69,7 @@ __strnlen_user:
add a10, a10, a4 # a10 = end of last 4B chunk
#endif /* XCHAL_HAVE_LOOPS */
.Loop:
- EX(l32i, a9, a4, 4, lenfixup) # get next word of string
+EX(10f) l32i a9, a4, 4 # get next word of string
addi a4, a4, 4 # advance string pointer
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
@@ -88,7 +80,7 @@ __strnlen_user:
#endif
.Ldone:
- EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks
+EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
bbci.l a3, 1, .L100
# check two more bytes (bytes 0, 1 of word)
@@ -125,14 +117,14 @@ __strnlen_user:
retw
.L1mod2: # address is odd
- EX(l8ui, a9, a4, 4, lenfixup) # get byte 0
+EX(10f) l8ui a9, a4, 4 # get byte 0
addi a4, a4, 1 # advance string pointer
beqz a9, .Lz3 # if byte 0 is zero
bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
.L2mod4: # address is 2 mod 4
addi a4, a4, 2 # advance ptr for aligned access
- EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string
+EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
# byte 3 is zero
@@ -140,8 +132,10 @@ __strnlen_user:
sub a2, a4, a2 # subtract to get length
retw
+ENDPROC(__strnlen_user)
+
.section .fixup, "ax"
.align 4
-lenfixup:
+10:
movi a2, 0
retw
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index d9cd766bde3e..64ab1971324f 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -53,30 +53,13 @@
* a11/ original length
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
-#ifdef __XTENSA_EB__
-#define ALIGN(R, W0, W1) src R, W0, W1
-#define SSA8(R) ssa8b R
-#else
-#define ALIGN(R, W0, W1) src R, W1, W0
-#define SSA8(R) ssa8l R
-#endif
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
-
+#include <asm/asmmacro.h>
.text
- .align 4
- .global __xtensa_copy_user
- .type __xtensa_copy_user,@function
-__xtensa_copy_user:
+ENTRY(__xtensa_copy_user)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value
@@ -89,7 +72,7 @@ __xtensa_copy_user:
# per iteration
movi a8, 3 # if source is also aligned,
bnone a3, a8, .Laligned # then use word copy
- SSA8( a3) # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
bnez a4, .Lsrcunaligned
movi a2, 0 # return success for len==0
retw
@@ -102,9 +85,9 @@ __xtensa_copy_user:
bltui a4, 7, .Lbytecopy # do short copies byte by byte
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
+EX(10f) l8ui a6, a3, 0
addi a3, a3, 1
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) s8i a6, a5, 0
addi a5, a5, 1
addi a4, a4, -1
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
@@ -112,11 +95,11 @@ __xtensa_copy_user:
.Ldst2mod4: # dst 16-bit aligned
# copy 2 bytes
bltui a4, 6, .Lbytecopy # do short copies byte by byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(l8ui, a7, a3, 1, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
addi a3, a3, 2
- EX(s8i, a6, a5, 0, fixup)
- EX(s8i, a7, a5, 1, fixup)
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
addi a5, a5, 2
addi a4, a4, -2
j .Ldstaligned # dst is now aligned, return to main algorithm
@@ -135,9 +118,9 @@ __xtensa_copy_user:
add a7, a3, a4 # a7 = end address for source
#endif /* !XCHAL_HAVE_LOOPS */
.Lnextbyte:
- EX(l8ui, a6, a3, 0, fixup)
+EX(10f) l8ui a6, a3, 0
addi a3, a3, 1
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) s8i a6, a5, 0
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte
@@ -161,15 +144,15 @@ __xtensa_copy_user:
add a8, a8, a3 # a8 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1:
- EX(l32i, a6, a3, 0, fixup)
- EX(l32i, a7, a3, 4, fixup)
- EX(s32i, a6, a5, 0, fixup)
- EX(l32i, a6, a3, 8, fixup)
- EX(s32i, a7, a5, 4, fixup)
- EX(l32i, a7, a3, 12, fixup)
- EX(s32i, a6, a5, 8, fixup)
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a6, a3, 8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a7, a3, 12
+EX(10f) s32i a6, a5, 8
addi a3, a3, 16
- EX(s32i, a7, a5, 12, fixup)
+EX(10f) s32i a7, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1
@@ -177,31 +160,31 @@ __xtensa_copy_user:
.Loop1done:
bbci.l a4, 3, .L2
# copy 8 bytes
- EX(l32i, a6, a3, 0, fixup)
- EX(l32i, a7, a3, 4, fixup)
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
addi a3, a3, 8
- EX(s32i, a6, a5, 0, fixup)
- EX(s32i, a7, a5, 4, fixup)
+EX(10f) s32i a6, a5, 0
+EX(10f) s32i a7, a5, 4
addi a5, a5, 8
.L2:
bbci.l a4, 2, .L3
# copy 4 bytes
- EX(l32i, a6, a3, 0, fixup)
+EX(10f) l32i a6, a3, 0
addi a3, a3, 4
- EX(s32i, a6, a5, 0, fixup)
+EX(10f) s32i a6, a5, 0
addi a5, a5, 4
.L3:
bbci.l a4, 1, .L4
# copy 2 bytes
- EX(l16ui, a6, a3, 0, fixup)
+EX(10f) l16ui a6, a3, 0
addi a3, a3, 2
- EX(s16i, a6, a5, 0, fixup)
+EX(10f) s16i a6, a5, 0
addi a5, a5, 2
.L4:
bbci.l a4, 0, .L5
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
.L5:
movi a2, 0 # return success for len bytes copied
retw
@@ -217,7 +200,7 @@ __xtensa_copy_user:
# copy 16 bytes per iteration for word-aligned dst and unaligned src
and a10, a3, a8 # save unalignment offset for below
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
- EX(l32i, a6, a3, 0, fixup) # load first word
+EX(10f) l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS
loopnez a7, .Loop2done
#else /* !XCHAL_HAVE_LOOPS */
@@ -226,19 +209,19 @@ __xtensa_copy_user:
add a12, a12, a3 # a12 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop2:
- EX(l32i, a7, a3, 4, fixup)
- EX(l32i, a8, a3, 8, fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
- EX(l32i, a9, a3, 12, fixup)
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, fixup)
- EX(l32i, a6, a3, 16, fixup)
- ALIGN( a8, a8, a9)
- EX(s32i, a8, a5, 8, fixup)
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a9, a3, 12
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a6, a3, 16
+ __src_b a8, a8, a9
+EX(10f) s32i a8, a5, 8
addi a3, a3, 16
- ALIGN( a9, a9, a6)
- EX(s32i, a9, a5, 12, fixup)
+ __src_b a9, a9, a6
+EX(10f) s32i a9, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a12, .Loop2
@@ -246,43 +229,44 @@ __xtensa_copy_user:
.Loop2done:
bbci.l a4, 3, .L12
# copy 8 bytes
- EX(l32i, a7, a3, 4, fixup)
- EX(l32i, a8, a3, 8, fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
addi a3, a3, 8
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, fixup)
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
addi a5, a5, 8
mov a6, a8
.L12:
bbci.l a4, 2, .L13
# copy 4 bytes
- EX(l32i, a7, a3, 4, fixup)
+EX(10f) l32i a7, a3, 4
addi a3, a3, 4
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
addi a5, a5, 4
mov a6, a7
.L13:
add a3, a3, a10 # readjust a3 with correct misalignment
bbci.l a4, 1, .L14
# copy 2 bytes
- EX(l8ui, a6, a3, 0, fixup)
- EX(l8ui, a7, a3, 1, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
addi a3, a3, 2
- EX(s8i, a6, a5, 0, fixup)
- EX(s8i, a7, a5, 1, fixup)
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
addi a5, a5, 2
.L14:
bbci.l a4, 0, .L15
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
.L15:
movi a2, 0 # return success for len bytes copied
retw
+ENDPROC(__xtensa_copy_user)
.section .fixup, "ax"
.align 4
@@ -294,7 +278,7 @@ __xtensa_copy_user:
*/
-fixup:
+10:
sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */
retw
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 0b3d296a016a..734888a00dc8 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,3 +5,8 @@
obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 3c75c4e597da..57dc231a0709 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -33,9 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-//#define printd(x...) printk(x)
-#define printd(x...) do { } while(0)
-
/*
* Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index a14df5aa98c8..8b9b6f44bb06 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -25,8 +25,6 @@
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
-#undef DEBUG_PAGE_FAULT
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
-#ifdef DEBUG_PAGE_FAULT
- printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
- address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
-#endif
+ pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
+ current->comm, current->pid,
+ address, exccause, regs->pc,
+ is_write ? "w" : "", is_exec ? "x" : "");
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
-#ifdef DEBUG_PAGE_FAULT
- printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
- current->comm, regs->pc, entry->fixup);
-#endif
+ pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
current->thread.bad_uaddr = address;
regs->pc = entry->fixup;
return;
@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
- "address %08lx\n pc = %08lx, ra = %08lx\n",
- address, regs->pc, regs->areg[0]);
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
die("Oops", regs, sig);
do_exit(sig);
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 720fe4e8b497..d776ec0d7b22 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -100,29 +100,51 @@ void __init mem_init(void)
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_MMU
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
-#ifdef CONFIG_MMU
- " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+ KASAN_SHADOW_SIZE >> 20,
#endif
- " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_MMU
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
-#ifdef CONFIG_MMU
- VMALLOC_START, VMALLOC_END,
- (VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif
- ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
+ (unsigned long)_text, (unsigned long)_etext,
+ (unsigned long)(_etext - _text) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)_sdata,
+ (unsigned long)(_sdata - __start_rodata) >> 10,
+ (unsigned long)_sdata, (unsigned long)_edata,
+ (unsigned long)(_edata - _sdata) >> 10,
+ (unsigned long)__init_begin, (unsigned long)__init_end,
+ (unsigned long)(__init_end - __init_begin) >> 10,
+ (unsigned long)__bss_start, (unsigned long)__bss_stop,
+ (unsigned long)(__bss_stop - __bss_start) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 000000000000..6b532b6bd785
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,95 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void __init kasan_early_init(void)
+{
+ unsigned long vaddr = KASAN_SHADOW_START;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+
+ for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+ }
+ early_trap_init();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long n_pages = (end - start) / PAGE_SIZE;
+ unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+ unsigned long i, j;
+ unsigned long vaddr = (unsigned long)start;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ pr_debug("%s: %p - %p\n", __func__, start, end);
+
+ for (i = j = 0; i < n_pmds; ++i) {
+ int k;
+
+ for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+ phys_addr_t phys =
+ memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+ MEMBLOCK_ALLOC_ANYWHERE);
+
+ set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+ }
+
+ for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+ set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ int i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+ (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+ BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+ /*
+ * Replace shadow map pages that cover addresses from VMALLOC area
+ * start to the end of KSEG with clean writable pages.
+ */
+ populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+ /* Write protect kasan_zero_page and zero-initialize it again. */
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+
+ local_flush_tlb_all();
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ /* At this point kasan is fully initialized. Enable error messages. */
+ current->kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 358d748d9083..9d1ecfc53670 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -56,7 +56,6 @@ static void __init fixedrange_init(void)
void __init paging_init(void)
{
- memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
@@ -82,6 +81,23 @@ void init_mmu(void)
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
+ init_kio();
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
+}
+
+void init_kio(void)
+{
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
@@ -95,17 +111,4 @@ void init_mmu(void)
write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6);
#endif
-
- local_flush_tlb_all();
-
- /* Set rasid register to a known value. */
-
- set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
-
- /* Set PTEVADDR special register to the start of the page
- * table, which is in kernel mappable space (ie. not
- * statically mapped). This register's value is undefined on
- * reset.
- */
- set_ptevaddr_register(PGTABLE_START);
}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 35c822286bbe..59153d0aa890 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
if (mm->context.asid[cpu] == NO_CONTEXT)
return;
-#if 0
- printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
- (unsigned long)mm->context.asid[cpu], start, end);
-#endif
+ pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context.asid[cpu], start, end);
local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 464c2684c4f1..92f567f9a21e 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -185,7 +185,7 @@ int __init rs_init(void)
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
- printk ("%s %s\n", serial_name, serial_version);
+ pr_info("%s %s\n", serial_name, serial_version);
/* Initialize the tty_driver structure */
@@ -214,7 +214,7 @@ static __exit void rs_exit(void)
int error;
if ((error = tty_unregister_driver(serial_driver)))
- printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
+ pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n",
error);
put_tty_driver(serial_driver);
tty_port_destroy(&serial_port);
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 6363b18e5b8c..d027dddc41ca 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
@@ -606,8 +608,6 @@ struct iss_net_init {
* those fields. They will be later initialized in iss_net_init.
*/
-#define ERR KERN_ERR "iss_net_setup: "
-
static int __init iss_net_setup(char *str)
{
struct iss_net_private *device = NULL;
@@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str)
end = strchr(str, '=');
if (!end) {
- printk(ERR "Expected '=' after device number\n");
+ pr_err("Expected '=' after device number\n");
return 1;
}
*end = 0;
rc = kstrtouint(str, 0, &n);
*end = '=';
if (rc < 0) {
- printk(ERR "Failed to parse '%s'\n", str);
+ pr_err("Failed to parse '%s'\n", str);
return 1;
}
str = end;
@@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str)
spin_unlock(&devices_lock);
if (device && device->index == n) {
- printk(ERR "Device %u already configured\n", n);
+ pr_err("Device %u already configured\n", n);
return 1;
}
new = alloc_bootmem(sizeof(*new));
if (new == NULL) {
- printk(ERR "Alloc_bootmem failed\n");
+ pr_err("Alloc_bootmem failed\n");
return 1;
}
@@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str)
return 1;
}
-#undef ERR
-
__setup("eth", iss_net_setup);
/*
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index da1525ec4c87..d819dc77fe65 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -775,10 +775,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
unsigned long flags;
int i;
+ spin_lock_irqsave(&bfqd->lock, flags);
+
if (!entity) /* root group */
- return;
+ goto put_async_queues;
- spin_lock_irqsave(&bfqd->lock, flags);
/*
* Empty all service_trees belonging to this group before
* deactivating the group itself.
@@ -809,6 +810,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
}
__bfq_deactivate_entity(entity, false);
+
+put_async_queues:
bfq_put_async_queues(bfqd, bfqg);
spin_unlock_irqrestore(&bfqd->lock, flags);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index bcb6d21baf12..47e6ec7427c4 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -166,6 +166,20 @@ static const int bfq_async_charge_factor = 10;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
const int bfq_timeout = HZ / 8;
+/*
+ * Time limit for merging (see comments in bfq_setup_cooperator). Set
+ * to the slowest value that, in our tests, proved to be effective in
+ * removing false positives, while not causing true positives to miss
+ * queue merging.
+ *
+ * As can be deduced from the low time limit below, queue merging, if
+ * successful, happens at the very beggining of the I/O of the involved
+ * cooperating processes, as a consequence of the arrival of the very
+ * first requests from each cooperator. After that, there is very
+ * little chance to find cooperators.
+ */
+static const unsigned long bfq_merge_time_limit = HZ/10;
+
static struct kmem_cache *bfq_pool;
/* Below this threshold (in ns), we consider thinktime immediate. */
@@ -178,7 +192,7 @@ static struct kmem_cache *bfq_pool;
#define BFQQ_SEEK_THR (sector_t)(8 * 100)
#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -195,15 +209,17 @@ static struct kmem_cache *bfq_pool;
* interactive applications automatically, using the following formula:
* duration = (R / r) * T, where r is the peak rate of the device, and
* R and T are two reference parameters.
- * In particular, R is the peak rate of the reference device (see below),
- * and T is a reference time: given the systems that are likely to be
- * installed on the reference device according to its speed class, T is
- * about the maximum time needed, under BFQ and while reading two files in
- * parallel, to load typical large applications on these systems.
- * In practice, the slower/faster the device at hand is, the more/less it
- * takes to load applications with respect to the reference device.
- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
- * applications.
+ * In particular, R is the peak rate of the reference device (see
+ * below), and T is a reference time: given the systems that are
+ * likely to be installed on the reference device according to its
+ * speed class, T is about the maximum time needed, under BFQ and
+ * while reading two files in parallel, to load typical large
+ * applications on these systems (see the comments on
+ * max_service_from_wr below, for more details on how T is obtained).
+ * In practice, the slower/faster the device at hand is, the more/less
+ * it takes to load applications with respect to the reference device.
+ * Accordingly, the longer/shorter BFQ grants weight raising to
+ * interactive applications.
*
* BFQ uses four different reference pairs (R, T), depending on:
* . whether the device is rotational or non-rotational;
@@ -240,6 +256,60 @@ static int T_slow[2];
static int T_fast[2];
static int device_speed_thresh[2];
+/*
+ * BFQ uses the above-detailed, time-based weight-raising mechanism to
+ * privilege interactive tasks. This mechanism is vulnerable to the
+ * following false positives: I/O-bound applications that will go on
+ * doing I/O for much longer than the duration of weight
+ * raising. These applications have basically no benefit from being
+ * weight-raised at the beginning of their I/O. On the opposite end,
+ * while being weight-raised, these applications
+ * a) unjustly steal throughput to applications that may actually need
+ * low latency;
+ * b) make BFQ uselessly perform device idling; device idling results
+ * in loss of device throughput with most flash-based storage, and may
+ * increase latencies when used purposelessly.
+ *
+ * BFQ tries to reduce these problems, by adopting the following
+ * countermeasure. To introduce this countermeasure, we need first to
+ * finish explaining how the duration of weight-raising for
+ * interactive tasks is computed.
+ *
+ * For a bfq_queue deemed as interactive, the duration of weight
+ * raising is dynamically adjusted, as a function of the estimated
+ * peak rate of the device, so as to be equal to the time needed to
+ * execute the 'largest' interactive task we benchmarked so far. By
+ * largest task, we mean the task for which each involved process has
+ * to do more I/O than for any of the other tasks we benchmarked. This
+ * reference interactive task is the start-up of LibreOffice Writer,
+ * and in this task each process/bfq_queue needs to have at most ~110K
+ * sectors transferred.
+ *
+ * This last piece of information enables BFQ to reduce the actual
+ * duration of weight-raising for at least one class of I/O-bound
+ * applications: those doing sequential or quasi-sequential I/O. An
+ * example is file copy. In fact, once started, the main I/O-bound
+ * processes of these applications usually consume the above 110K
+ * sectors in much less time than the processes of an application that
+ * is starting, because these I/O-bound processes will greedily devote
+ * almost all their CPU cycles only to their target,
+ * throughput-friendly I/O operations. This is even more true if BFQ
+ * happens to be underestimating the device peak rate, and thus
+ * overestimating the duration of weight raising. But, according to
+ * our measurements, once transferred 110K sectors, these processes
+ * have no right to be weight-raised any longer.
+ *
+ * Basing on the last consideration, BFQ ends weight-raising for a
+ * bfq_queue if the latter happens to have received an amount of
+ * service at least equal to the following constant. The constant is
+ * set to slightly more than 110K, to have a minimum safety margin.
+ *
+ * This early ending of weight-raising reduces the amount of time
+ * during which interactive false positives cause the two problems
+ * described at the beginning of these comments.
+ */
+static const unsigned long max_service_from_wr = 120000;
+
#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
@@ -403,6 +473,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
}
}
+/*
+ * See the comments on bfq_limit_depth for the purpose of
+ * the depths set in the function.
+ */
+static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+{
+ bfqd->sb_shift = bt->sb.shift;
+
+ /*
+ * In-word depths if no bfq_queue is being weight-raised:
+ * leaving 25% of tags only for sync reads.
+ *
+ * In next formulas, right-shift the value
+ * (1U<<bfqd->sb_shift), instead of computing directly
+ * (1U<<(bfqd->sb_shift - something)), to be robust against
+ * any possible value of bfqd->sb_shift, without having to
+ * limit 'something'.
+ */
+ /* no more than 50% of tags for async I/O */
+ bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
+ /*
+ * no more than 75% of tags for sync writes (25% extra tags
+ * w.r.t. async I/O, to prevent async I/O from starving sync
+ * writes)
+ */
+ bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
+
+ /*
+ * In-word depths in case some bfq_queue is being weight-
+ * raised: leaving ~63% of tags for sync reads. This is the
+ * highest percentage for which, in our tests, application
+ * start-up times didn't suffer from any regression due to tag
+ * shortage.
+ */
+ /* no more than ~18% of tags for async I/O */
+ bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
+ /* no more than ~37% of tags for sync writes (~20% extra tags) */
+ bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
+}
+
+/*
+ * Async I/O can easily starve sync I/O (both sync reads and sync
+ * writes), by consuming all tags. Similarly, storms of sync writes,
+ * such as those that sync(2) may trigger, can starve sync reads.
+ * Limit depths of async I/O and sync writes so as to counter both
+ * problems.
+ */
+static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+{
+ struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+ struct bfq_data *bfqd = data->q->elevator->elevator_data;
+ struct sbitmap_queue *bt;
+
+ if (op_is_sync(op) && !op_is_write(op))
+ return;
+
+ if (data->flags & BLK_MQ_REQ_RESERVED) {
+ if (unlikely(!tags->nr_reserved_tags)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+ bt = &tags->breserved_tags;
+ } else
+ bt = &tags->bitmap_tags;
+
+ if (unlikely(bfqd->sb_shift != bt->sb.shift))
+ bfq_update_depths(bfqd, bt);
+
+ data->shallow_depth =
+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+
+ bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
+ __func__, bfqd->wr_busy_queues, op_is_sync(op),
+ data->shallow_depth);
+}
+
static struct bfq_queue *
bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
sector_t sector, struct rb_node **ret_parent,
@@ -444,6 +590,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
return bfqq;
}
+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
+{
+ return bfqq->service_from_backlogged > 0 &&
+ time_is_before_jiffies(bfqq->first_IO_time +
+ bfq_merge_time_limit);
+}
+
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
@@ -454,6 +607,14 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->pos_root = NULL;
}
+ /*
+ * bfqq cannot be merged any longer (see comments in
+ * bfq_setup_cooperator): no point in adding bfqq into the
+ * position tree.
+ */
+ if (bfq_too_late_for_merging(bfqq))
+ return;
+
if (bfq_class_idle(bfqq))
return;
if (!bfqq->next_rq)
@@ -1247,6 +1408,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
if (old_wr_coeff == 1 && wr_or_deserves_wr) {
/* start a weight-raising period */
if (interactive) {
+ bfqq->service_from_wr = 0;
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
} else {
@@ -1627,6 +1789,8 @@ static void bfq_remove_request(struct request_queue *q,
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
+ } else {
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1933,6 +2097,9 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
struct bfq_queue *new_bfqq)
{
+ if (bfq_too_late_for_merging(new_bfqq))
+ return false;
+
if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
(bfqq->ioprio_class != new_bfqq->ioprio_class))
return false;
@@ -1957,20 +2124,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
}
/*
- * If this function returns true, then bfqq cannot be merged. The idea
- * is that true cooperation happens very early after processes start
- * to do I/O. Usually, late cooperations are just accidental false
- * positives. In case bfqq is weight-raised, such false positives
- * would evidently degrade latency guarantees for bfqq.
- */
-static bool wr_from_too_long(struct bfq_queue *bfqq)
-{
- return bfqq->wr_coeff > 1 &&
- time_is_before_jiffies(bfqq->last_wr_start_finish +
- msecs_to_jiffies(100));
-}
-
-/*
* Attempt to schedule a merge of bfqq with the currently in-service
* queue or with a close queue among the scheduled queues. Return
* NULL if no merge was scheduled, a pointer to the shared bfq_queue
@@ -1983,11 +2136,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
* to maintain. Besides, in such a critical condition as an out of memory,
* the benefits of queue merging may be little relevant, or even negligible.
*
- * Weight-raised queues can be merged only if their weight-raising
- * period has just started. In fact cooperating processes are usually
- * started together. Thus, with this filter we avoid false positives
- * that would jeopardize low-latency guarantees.
- *
* WARNING: queue merging may impair fairness among non-weight raised
* queues, for at least two reasons: 1) the original weight of a
* merged queue may change during the merged state, 2) even being the
@@ -2001,12 +2149,24 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
+ /*
+ * Prevent bfqq from being merged if it has been created too
+ * long ago. The idea is that true cooperating processes, and
+ * thus their associated bfq_queues, are supposed to be
+ * created shortly after each other. This is the case, e.g.,
+ * for KVM/QEMU and dump I/O threads. Basing on this
+ * assumption, the following filtering greatly reduces the
+ * probability that two non-cooperating processes, which just
+ * happen to do close I/O for some short time interval, have
+ * their queues merged by mistake.
+ */
+ if (bfq_too_late_for_merging(bfqq))
+ return NULL;
+
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
- if (!io_struct ||
- wr_from_too_long(bfqq) ||
- unlikely(bfqq == &bfqd->oom_bfqq))
+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;
/* If there is only one backlogged queue, don't search. */
@@ -2015,12 +2175,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
in_service_bfqq = bfqd->in_service_queue;
- if (!in_service_bfqq || in_service_bfqq == bfqq
- || wr_from_too_long(in_service_bfqq) ||
- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
- goto check_scheduled;
-
- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
+ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
bfqq->entity.parent == in_service_bfqq->entity.parent &&
bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
@@ -2032,12 +2189,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* queues. The only thing we need is that the bio/request is not
* NULL, as we need it to establish whether a cooperator exists.
*/
-check_scheduled:
new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
bfq_io_struct_pos(io_struct, request));
- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
- likely(new_bfqq != &bfqd->oom_bfqq) &&
+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
bfq_may_be_close_cooperator(bfqq, new_bfqq))
return bfq_setup_merge(bfqq, new_bfqq);
@@ -2062,7 +2217,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
if (unlikely(bfq_bfqq_just_created(bfqq) &&
- !bfq_bfqq_in_large_burst(bfqq))) {
+ !bfq_bfqq_in_large_burst(bfqq) &&
+ bfqq->bfqd->low_latency)) {
/*
* bfqq being merged right after being created: bfqq
* would have deserved interactive weight raising, but
@@ -2917,45 +3073,87 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* whereas soft_rt_next_start is set to infinity for applications that do
* not.
*
- * Unfortunately, even a greedy application may happen to behave in an
- * isochronous way if the CPU load is high. In fact, the application may
- * stop issuing requests while the CPUs are busy serving other processes,
- * then restart, then stop again for a while, and so on. In addition, if
- * the disk achieves a low enough throughput with the request pattern
- * issued by the application (e.g., because the request pattern is random
- * and/or the device is slow), then the application may meet the above
- * bandwidth requirement too. To prevent such a greedy application to be
- * deemed as soft real-time, a further rule is used in the computation of
- * soft_rt_next_start: soft_rt_next_start must be higher than the current
- * time plus the maximum time for which the arrival of a request is waited
- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
- * This filters out greedy applications, as the latter issue instead their
- * next request as soon as possible after the last one has been completed
- * (in contrast, when a batch of requests is completed, a soft real-time
- * application spends some time processing data).
+ * Unfortunately, even a greedy (i.e., I/O-bound) application may
+ * happen to meet, occasionally or systematically, both the above
+ * bandwidth and isochrony requirements. This may happen at least in
+ * the following circumstances. First, if the CPU load is high. The
+ * application may stop issuing requests while the CPUs are busy
+ * serving other processes, then restart, then stop again for a while,
+ * and so on. The other circumstances are related to the storage
+ * device: the storage device is highly loaded or reaches a low-enough
+ * throughput with the I/O of the application (e.g., because the I/O
+ * is random and/or the device is slow). In all these cases, the
+ * I/O of the application may be simply slowed down enough to meet
+ * the bandwidth and isochrony requirements. To reduce the probability
+ * that greedy applications are deemed as soft real-time in these
+ * corner cases, a further rule is used in the computation of
+ * soft_rt_next_start: the return value of this function is forced to
+ * be higher than the maximum between the following two quantities.
+ *
+ * (a) Current time plus: (1) the maximum time for which the arrival
+ * of a request is waited for when a sync queue becomes idle,
+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
+ * postpone for a moment the reason for adding a few extra
+ * jiffies; we get back to it after next item (b). Lower-bounding
+ * the return value of this function with the current time plus
+ * bfqd->bfq_slice_idle tends to filter out greedy applications,
+ * because the latter issue their next request as soon as possible
+ * after the last one has been completed. In contrast, a soft
+ * real-time application spends some time processing data, after a
+ * batch of its requests has been completed.
*
- * Unfortunately, the last filter may easily generate false positives if
- * only bfqd->bfq_slice_idle is used as a reference time interval and one
- * or both the following cases occur:
- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
- * HZ=100.
+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
+ * above, greedy applications may happen to meet both the
+ * bandwidth and isochrony requirements under heavy CPU or
+ * storage-device load. In more detail, in these scenarios, these
+ * applications happen, only for limited time periods, to do I/O
+ * slowly enough to meet all the requirements described so far,
+ * including the filtering in above item (a). These slow-speed
+ * time intervals are usually interspersed between other time
+ * intervals during which these applications do I/O at a very high
+ * speed. Fortunately, exactly because of the high speed of the
+ * I/O in the high-speed intervals, the values returned by this
+ * function happen to be so high, near the end of any such
+ * high-speed interval, to be likely to fall *after* the end of
+ * the low-speed time interval that follows. These high values are
+ * stored in bfqq->soft_rt_next_start after each invocation of
+ * this function. As a consequence, if the last value of
+ * bfqq->soft_rt_next_start is constantly used to lower-bound the
+ * next value that this function may return, then, from the very
+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
+ * likely to be constantly kept so high that any I/O request
+ * issued during the low-speed interval is considered as arriving
+ * to soon for the application to be deemed as soft
+ * real-time. Then, in the high-speed interval that follows, the
+ * application will not be deemed as soft real-time, just because
+ * it will do I/O at a high speed. And so on.
+ *
+ * Getting back to the filtering in item (a), in the following two
+ * cases this filtering might be easily passed by a greedy
+ * application, if the reference quantity was just
+ * bfqd->bfq_slice_idle:
+ * 1) HZ is so low that the duration of a jiffy is comparable to or
+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
+ * devices with HZ=100. The time granularity may be so coarse
+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
+ * is rather lower than the exact value.
* 2) jiffies, instead of increasing at a constant rate, may stop increasing
* for a while, then suddenly 'jump' by several units to recover the lost
* increments. This seems to happen, e.g., inside virtual machines.
- * To address this issue, we do not use as a reference time interval just
- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
- * particular we add the minimum number of jiffies for which the filter
- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
- * machines.
+ * To address this issue, in the filtering in (a) we do not use as a
+ * reference time interval just bfqd->bfq_slice_idle, but
+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
+ * minimum number of jiffies for which the filter seems to be quite
+ * precise also in embedded systems and KVM/QEMU virtual machines.
*/
static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
- return max(bfqq->last_idle_bklogged +
- HZ * bfqq->service_from_backlogged /
- bfqd->bfq_wr_max_softrt_rate,
- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ return max3(bfqq->soft_rt_next_start,
+ bfqq->last_idle_bklogged +
+ HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
/**
@@ -3000,17 +3198,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
/*
- * Increase service_from_backlogged before next statement,
- * because the possible next invocation of
- * bfq_bfqq_charge_time would likely inflate
- * entity->service. In contrast, service_from_backlogged must
- * contain real service, to enable the soft real-time
- * heuristic to correctly compute the bandwidth consumed by
- * bfqq.
- */
- bfqq->service_from_backlogged += entity->service;
-
- /*
* As above explained, charge slow (typically seeky) and
* timed-out queues with the time and not the service
* received, to favor sequential workloads.
@@ -3535,6 +3722,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->entity.prio_changed = 1;
}
}
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
+ bfqq->service_from_wr > max_service_from_wr) {
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ }
}
/*
* To improve latency (for this or other queues), immediately
@@ -3630,8 +3823,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
}
/*
- * We exploit the put_rq_private hook to decrement
- * rq_in_driver, but put_rq_private will not be
+ * We exploit the bfq_finish_request hook to decrement
+ * rq_in_driver, but bfq_finish_request will not be
* invoked on this request. So, to avoid unbalance,
* just start this request, without incrementing
* rq_in_driver. As a negative consequence,
@@ -3640,14 +3833,14 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
* bfq_schedule_dispatch to be invoked uselessly.
*
* As for implementing an exact solution, the
- * put_request hook, if defined, is probably invoked
- * also on this request. So, by exploiting this hook,
- * we could 1) increment rq_in_driver here, and 2)
- * decrement it in put_request. Such a solution would
- * let the value of the counter be always accurate,
- * but it would entail using an extra interface
- * function. This cost seems higher than the benefit,
- * being the frequency of non-elevator-private
+ * bfq_finish_request hook, if defined, is probably
+ * invoked also on this request. So, by exploiting
+ * this hook, we could 1) increment rq_in_driver here,
+ * and 2) decrement it in bfq_finish_request. Such a
+ * solution would let the value of the counter be
+ * always accurate, but it would entail using an extra
+ * interface function. This cost seems higher than the
+ * benefit, being the frequency of non-elevator-private
* requests very low.
*/
goto start_rq;
@@ -3689,35 +3882,16 @@ exit:
return rq;
}
-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
- struct request *rq;
#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- struct bfq_queue *in_serv_queue, *bfqq;
- bool waiting_rq, idle_timer_disabled;
-#endif
-
- spin_lock_irq(&bfqd->lock);
-
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- in_serv_queue = bfqd->in_service_queue;
- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
-
- rq = __bfq_dispatch_request(hctx);
-
- idle_timer_disabled =
- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
-
-#else
- rq = __bfq_dispatch_request(hctx);
-#endif
- spin_unlock_irq(&bfqd->lock);
+static void bfq_update_dispatch_stats(struct request_queue *q,
+ struct request *rq,
+ struct bfq_queue *in_serv_queue,
+ bool idle_timer_disabled)
+{
+ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- bfqq = rq ? RQ_BFQQ(rq) : NULL;
if (!idle_timer_disabled && !bfqq)
- return rq;
+ return;
/*
* rq and bfqq are guaranteed to exist until this function
@@ -3732,7 +3906,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(hctx->queue->queue_lock);
+ spin_lock_irq(q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
@@ -3751,9 +3925,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
- spin_unlock_irq(hctx->queue->queue_lock);
+ spin_unlock_irq(q->queue_lock);
+}
+#else
+static inline void bfq_update_dispatch_stats(struct request_queue *q,
+ struct request *rq,
+ struct bfq_queue *in_serv_queue,
+ bool idle_timer_disabled) {}
#endif
+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+ struct bfq_queue *in_serv_queue;
+ bool waiting_rq, idle_timer_disabled;
+
+ spin_lock_irq(&bfqd->lock);
+
+ in_serv_queue = bfqd->in_service_queue;
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+ rq = __bfq_dispatch_request(hctx);
+
+ idle_timer_disabled =
+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+
+ spin_unlock_irq(&bfqd->lock);
+
+ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
+ idle_timer_disabled);
+
return rq;
}
@@ -4002,10 +4204,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->split_time = bfq_smallest_from_now();
/*
- * Set to the value for which bfqq will not be deemed as
- * soft rt when it becomes backlogged.
+ * To not forget the possibly high bandwidth consumed by a
+ * process/queue in the recent past,
+ * bfq_bfqq_softrt_next_start() returns a value at least equal
+ * to the current value of bfqq->soft_rt_next_start (see
+ * comments on bfq_bfqq_softrt_next_start). Set
+ * soft_rt_next_start to now, to mean that bfqq has consumed
+ * no bandwidth so far.
*/
- bfqq->soft_rt_next_start = bfq_greatest_from_now();
+ bfqq->soft_rt_next_start = jiffies;
/* first request is almost certainly seeky */
bfqq->seek_history = 1;
@@ -4276,16 +4483,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
return idle_timer_disabled;
}
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+static void bfq_update_insert_stats(struct request_queue *q,
+ struct bfq_queue *bfqq,
+ bool idle_timer_disabled,
+ unsigned int cmd_flags)
+{
+ if (!bfqq)
+ return;
+
+ /*
+ * bfqq still exists, because it can disappear only after
+ * either it is merged with another queue, or the process it
+ * is associated with exits. But both actions must be taken by
+ * the same process currently executing this flow of
+ * instructions.
+ *
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+ spin_lock_irq(q->queue_lock);
+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+ if (idle_timer_disabled)
+ bfqg_stats_update_idle_time(bfqq_group(bfqq));
+ spin_unlock_irq(q->queue_lock);
+}
+#else
+static inline void bfq_update_insert_stats(struct request_queue *q,
+ struct bfq_queue *bfqq,
+ bool idle_timer_disabled,
+ unsigned int cmd_flags) {}
+#endif
+
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
struct bfq_queue *bfqq = RQ_BFQQ(rq);
bool idle_timer_disabled = false;
unsigned int cmd_flags;
-#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4304,7 +4541,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
} else {
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
/*
* Update bfqq, because, if a queue merge has occurred
@@ -4312,9 +4548,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* redirected into a new queue.
*/
bfqq = RQ_BFQQ(rq);
-#else
- __bfq_insert_request(bfqd, rq);
-#endif
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
@@ -4323,35 +4556,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
}
}
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
/*
* Cache cmd_flags before releasing scheduler lock, because rq
* may disappear afterwards (for example, because of a request
* merge).
*/
cmd_flags = rq->cmd_flags;
-#endif
+
spin_unlock_irq(&bfqd->lock);
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- if (!bfqq)
- return;
- /*
- * bfqq still exists, because it can disappear only after
- * either it is merged with another queue, or the process it
- * is associated with exits. But both actions must be taken by
- * the same process currently executing this flow of
- * instruction.
- *
- * In addition, the following queue lock guarantees that
- * bfqq_group(bfqq) exists as well.
- */
- spin_lock_irq(q->queue_lock);
- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
- if (idle_timer_disabled)
- bfqg_stats_update_idle_time(bfqq_group(bfqq));
- spin_unlock_irq(q->queue_lock);
-#endif
+ bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
+ cmd_flags);
}
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
@@ -4482,7 +4697,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
bfq_schedule_dispatch(bfqd);
}
-static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+static void bfq_finish_request_body(struct bfq_queue *bfqq)
{
bfqq->allocated--;
@@ -4512,7 +4727,7 @@ static void bfq_finish_request(struct request *rq)
spin_lock_irqsave(&bfqd->lock, flags);
bfq_completed_request(bfqq, bfqd);
- bfq_put_rq_priv_body(bfqq);
+ bfq_finish_request_body(bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
@@ -4533,7 +4748,7 @@ static void bfq_finish_request(struct request *rq)
bfqg_stats_update_io_remove(bfqq_group(bfqq),
rq->cmd_flags);
}
- bfq_put_rq_priv_body(bfqq);
+ bfq_finish_request_body(bfqq);
}
rq->elv.priv[0] = NULL;
@@ -4818,6 +5033,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
hrtimer_cancel(&bfqd->idle_slice_timer);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ /* release oom-queue reference to root group */
+ bfqg_and_blkg_put(bfqd->root_group);
+
blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
#else
spin_lock_irq(&bfqd->lock);
@@ -5206,6 +5424,7 @@ static struct elv_fs_entry bfq_attrs[] = {
static struct elevator_type iosched_bfq_mq = {
.ops.mq = {
+ .limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request,
.finish_request = bfq_finish_request,
.exit_icq = bfq_exit_icq,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 91c4390903a1..350c39ae2896 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -337,6 +337,11 @@ struct bfq_queue {
* last transition from idle to backlogged.
*/
unsigned long service_from_backlogged;
+ /*
+ * Cumulative service received from the @bfq_queue since its
+ * last transition to weight-raised state.
+ */
+ unsigned long service_from_wr;
/*
* Value of wr start time when switching to soft rt
@@ -344,6 +349,8 @@ struct bfq_queue {
unsigned long wr_start_at_switch_to_srt;
unsigned long split_time; /* time of last split */
+
+ unsigned long first_IO_time; /* time of first I/O for this queue */
};
/**
@@ -627,6 +634,18 @@ struct bfq_data {
struct bfq_io_cq *bio_bic;
/* bfqq associated with the task issuing current bio for merging */
struct bfq_queue *bio_bfqq;
+
+ /*
+ * Cached sbitmap shift, used to compute depth limits in
+ * bfq_update_depths.
+ */
+ unsigned int sb_shift;
+
+ /*
+ * Depth limits used in bfq_limit_depth (see comments on the
+ * function)
+ */
+ unsigned int word_depths[2][2];
};
enum bfqq_state_flags {
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index e495d3f9b4b0..4498c43245e2 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -835,6 +835,13 @@ void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
struct bfq_entity *entity = &bfqq->entity;
struct bfq_service_tree *st;
+ if (!bfqq->service_from_backlogged)
+ bfqq->first_IO_time = jiffies;
+
+ if (bfqq->wr_coeff > 1)
+ bfqq->service_from_wr += served;
+
+ bfqq->service_from_backlogged += served;
for_each_entity(entity) {
st = bfq_entity_service_tree(entity);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 23b42e8aa03e..9cfdd6c83b5b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -374,7 +374,6 @@ static void bio_integrity_verify_fn(struct work_struct *work)
/**
* __bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
- * @error: Pointer to errno
*
* Description: Completion for integrity I/O
*
diff --git a/block/bio.c b/block/bio.c
index 9ef6cf3addb3..e1708db48258 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -971,34 +971,6 @@ void bio_advance(struct bio *bio, unsigned bytes)
EXPORT_SYMBOL(bio_advance);
/**
- * bio_alloc_pages - allocates a single page for each bvec in a bio
- * @bio: bio to allocate pages for
- * @gfp_mask: flags for allocation
- *
- * Allocates pages up to @bio->bi_vcnt.
- *
- * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
- * freed.
- */
-int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
-{
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, i) {
- bv->bv_page = alloc_page(gfp_mask);
- if (!bv->bv_page) {
- while (--bv >= bio->bi_io_vec)
- __free_page(bv->bv_page);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bio_alloc_pages);
-
-/**
* bio_copy_data - copy contents of data buffers from one chain of bios to
* another
* @src: source bio list
@@ -1838,7 +1810,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_advance(bio, split->bi_iter.bi_size);
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
- bio_set_flag(bio, BIO_TRACE_COMPLETION);
+ bio_set_flag(split, BIO_TRACE_COMPLETION);
return split;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index b8881750a3ac..a2005a485335 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -126,6 +126,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
+ seqcount_init(&rq->gstate_seq);
+ u64_stats_init(&rq->aborted_gstate_sync);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -562,6 +564,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
}
}
+void blk_drain_queue(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ __blk_drain_queue(q, true);
+ spin_unlock_irq(q->queue_lock);
+}
+
/**
* blk_queue_bypass_start - enter queue bypass mode
* @q: queue of interest
@@ -689,11 +698,18 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_freeze_queue(q);
spin_lock_irq(lock);
- if (!q->mq_ops)
- __blk_drain_queue(q, true);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /*
+ * make sure all in-progress dispatch are completed because
+ * blk_freeze_queue() can only complete all requests, and
+ * dispatch may still be in-progress since we dispatch requests
+ * from more than one contexts
+ */
+ if (q->mq_ops)
+ blk_mq_quiesce_queue(q);
+
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -1641,6 +1657,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
lockdep_assert_held(q->queue_lock);
+ blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -2050,6 +2067,21 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+{
+ if (part->policy && op_is_write(bio_op(bio))) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR
+ "generic_make_request: Trying to write "
+ "to read-only block-device %s (partno %d)\n",
+ bio_devname(bio, b), part->partno);
+ return true;
+ }
+
+ return false;
+}
+
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
@@ -2058,27 +2090,28 @@ static inline int blk_partition_remap(struct bio *bio)
struct hd_struct *p;
int ret = 0;
+ rcu_read_lock();
+ p = __disk_get_part(bio->bi_disk, bio->bi_partno);
+ if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) ||
+ bio_check_ro(bio, p))) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
- if (!bio->bi_partno ||
- (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
- return 0;
+ if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)
+ goto out;
- rcu_read_lock();
- p = __disk_get_part(bio->bi_disk, bio->bi_partno);
- if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
- bio->bi_iter.bi_sector += p->start_sect;
- bio->bi_partno = 0;
- trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
- bio->bi_iter.bi_sector - p->start_sect);
- } else {
- printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
- ret = -EIO;
- }
- rcu_read_unlock();
+ bio->bi_iter.bi_sector += p->start_sect;
+ bio->bi_partno = 0;
+ trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+ bio->bi_iter.bi_sector - p->start_sect);
+out:
+ rcu_read_unlock();
return ret;
}
@@ -2137,15 +2170,19 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
-
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported;
if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
goto end_io;
- if (blk_partition_remap(bio))
- goto end_io;
+ if (!bio->bi_partno) {
+ if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
+ goto end_io;
+ } else {
+ if (blk_partition_remap(bio))
+ goto end_io;
+ }
if (bio_check_eod(bio, nr_sectors))
goto end_io;
@@ -2488,8 +2525,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- blk_mq_request_bypass_insert(rq, true);
- return BLK_STS_OK;
+ return blk_mq_request_issue_directly(rq);
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2841,7 +2877,7 @@ void blk_start_request(struct request *req)
wbt_issue(req->q->rq_wb, &req->issue_stat);
}
- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ BUG_ON(blk_rq_is_complete(req));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
@@ -3410,20 +3446,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
-
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 5c0f3dc446dc..f7b292f12449 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -61,7 +61,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be reused after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_sched_insert_request(rq, at_head, true, false, false);
+ blk_mq_sched_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2bc544ce3d2e..a676084d4740 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -37,6 +37,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secure_erase(q))
return -EOPNOTSUPP;
@@ -156,6 +159,9 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
@@ -233,6 +239,9 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
@@ -287,6 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
while (nr_sects != 0) {
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask);
diff --git a/block/blk-map.c b/block/blk-map.c
index d3a94719f03f..db9373bd31ac 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -119,7 +119,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
- int ret;
+ int ret = -EINVAL;
if (!iter_is_iovec(iter))
goto fail;
@@ -148,7 +148,7 @@ unmap_rq:
__blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
- return -EINVAL;
+ return ret;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index f5dedd57dff6..8452fc7164cc 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -128,9 +128,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
nsegs++;
sectors = max_sectors;
}
- if (sectors)
- goto split;
- /* Make this single bvec as the 1st segment */
+ goto split;
}
if (bvprvp && blk_queue_cluster(q)) {
@@ -146,22 +144,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprvp = &bvprv;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
continue;
}
new_segment:
if (nsegs == queue_max_segments(q))
goto split;
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
+
nsegs++;
bvprv = bv;
bvprvp = &bvprv;
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
}
do_split = false;
@@ -174,6 +171,8 @@ split:
bio = new;
}
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index b56a4f35720d..21cbc1f071c6 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -289,17 +289,12 @@ static const char *const rqf_name[] = {
RQF_NAME(HASHED),
RQF_NAME(STATS),
RQF_NAME(SPECIAL_PAYLOAD),
+ RQF_NAME(ZONE_WRITE_LOCKED),
+ RQF_NAME(MQ_TIMEOUT_EXPIRED),
+ RQF_NAME(MQ_POLL_SLEPT),
};
#undef RQF_NAME
-#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
-static const char *const rqaf_name[] = {
- RQAF_NAME(COMPLETE),
- RQAF_NAME(STARTED),
- RQAF_NAME(POLL_SLEPT),
-};
-#undef RQAF_NAME
-
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
@@ -316,8 +311,7 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
seq_puts(m, ", .rq_flags=");
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
ARRAY_SIZE(rqf_name));
- seq_puts(m, ", .atomic_flags=");
- blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
+ seq_printf(m, ", complete=%d", blk_rq_is_complete(rq));
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
rq->internal_tag);
if (mq_ops->show_rq)
@@ -409,7 +403,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
const struct show_busy_params *params = data;
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
- test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ blk_mq_rq_state(rq) != MQ_RQ_IDLE)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
}
@@ -703,7 +697,11 @@ static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
const struct blk_mq_debugfs_attr *attr = m->private;
void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
- if (!attr->write)
+ /*
+ * Attributes that only implement .seq_ops are read-only and 'attr' is
+ * the same with 'data' in this case.
+ */
+ if (attr == data || !attr->write)
return -EPERM;
return attr->write(data, buf, count, ppos);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c117bd8fd1f6..55c0a745b427 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -172,7 +172,6 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
WRITE_ONCE(hctx->dispatch_from, ctx);
}
-/* return true if hw queue need to be run again */
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
@@ -428,7 +427,7 @@ done:
}
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
- bool run_queue, bool async, bool can_block)
+ bool run_queue, bool async)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index ba1d1418a96d..1e9c9018ace1 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -18,7 +18,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
- bool run_queue, bool async, bool can_block);
+ bool run_queue, bool async);
void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 79969c3c234f..a54b4b070f1c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -248,7 +248,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
-static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
+void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -265,13 +265,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
q->mq_sysfs_init_done = false;
}
-void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
-{
- mutex_lock(&q->sysfs_lock);
- __blk_mq_unregister_dev(dev, q);
- mutex_unlock(&q->sysfs_lock);
-}
-
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c81b40ecd3f1..336dde07b230 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -134,12 +134,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
ws = bt_wait_ptr(bt, data->hctx);
drop_ctx = data->ctx == NULL;
do {
- prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
-
- tag = __blk_mq_get_tag(data, bt);
- if (tag != -1)
- break;
-
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
@@ -155,6 +149,13 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (tag != -1)
break;
+ prepare_to_wait_exclusive(&ws->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ tag = __blk_mq_get_tag(data, bt);
+ if (tag != -1)
+ break;
+
if (data->ctx)
blk_mq_put_ctx(data->ctx);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11097477eeab..01f271d40825 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -95,8 +95,7 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
- !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
+ if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
/*
* index[0] counts the specific partition that was asked
* for. index[1] counts the ones that are active on the
@@ -161,6 +160,8 @@ void blk_freeze_queue(struct request_queue *q)
* exported to drivers as the only user for unfreeze is blk_mq.
*/
blk_freeze_queue_start(q);
+ if (!q->mq_ops)
+ blk_drain_queue(q);
blk_mq_freeze_queue_wait(q);
}
@@ -220,7 +221,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
- synchronize_srcu(hctx->queue_rq_srcu);
+ synchronize_srcu(hctx->srcu);
else
rcu = true;
}
@@ -270,15 +271,14 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct request *rq = tags->static_rqs[tag];
-
- rq->rq_flags = 0;
+ req_flags_t rq_flags = 0;
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
if (blk_mq_tag_busy(data->hctx)) {
- rq->rq_flags = RQF_MQ_INFLIGHT;
+ rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag;
@@ -286,27 +286,22 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
data->hctx->tags->rqs[rq->tag] = rq;
}
- INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q;
rq->mq_ctx = data->ctx;
+ rq->rq_flags = rq_flags;
+ rq->cpu = -1;
rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
- /* do not touch atomic flags, it needs atomic ops against the timer */
- rq->cpu = -1;
+ INIT_LIST_HEAD(&rq->queuelist);
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->part = NULL;
rq->start_time = jiffies;
-#ifdef CONFIG_BLK_CGROUP
- rq->rl = NULL;
- set_start_time_ns(rq);
- rq->io_start_time_ns = 0;
-#endif
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
@@ -314,6 +309,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
+ rq->__deadline = 0;
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -322,6 +318,12 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->end_io_data = NULL;
rq->next_rq = NULL;
+#ifdef CONFIG_BLK_CGROUP
+ rq->rl = NULL;
+ set_start_time_ns(rq);
+ rq->io_start_time_ns = 0;
+#endif
+
data->ctx->rq_dispatched[op_is_sync(op)]++;
return rq;
}
@@ -441,7 +443,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
blk_queue_exit(q);
return ERR_PTR(-EXDEV);
}
- cpu = cpumask_first(alloc_data.hctx->cpumask);
+ cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
@@ -483,8 +485,7 @@ void blk_mq_free_request(struct request *rq)
if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
- clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+ blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -530,6 +531,9 @@ static void __blk_mq_complete_request(struct request *rq)
bool shared = false;
int cpu;
+ WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
+ blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
+
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
if (rq->rq_flags & RQF_STATS) {
@@ -557,6 +561,56 @@ static void __blk_mq_complete_request(struct request *rq)
put_cpu();
}
+static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
+ __releases(hctx->srcu)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+ rcu_read_unlock();
+ else
+ srcu_read_unlock(hctx->srcu, srcu_idx);
+}
+
+static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+ __acquires(hctx->srcu)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ /* shut up gcc false positive */
+ *srcu_idx = 0;
+ rcu_read_lock();
+ } else
+ *srcu_idx = srcu_read_lock(hctx->srcu);
+}
+
+static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
+{
+ unsigned long flags;
+
+ /*
+ * blk_mq_rq_aborted_gstate() is used from the completion path and
+ * can thus be called from irq context. u64_stats_fetch in the
+ * middle of update on the same CPU leads to lockup. Disable irq
+ * while updating.
+ */
+ local_irq_save(flags);
+ u64_stats_update_begin(&rq->aborted_gstate_sync);
+ rq->aborted_gstate = gstate;
+ u64_stats_update_end(&rq->aborted_gstate_sync);
+ local_irq_restore(flags);
+}
+
+static u64 blk_mq_rq_aborted_gstate(struct request *rq)
+{
+ unsigned int start;
+ u64 aborted_gstate;
+
+ do {
+ start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
+ aborted_gstate = rq->aborted_gstate;
+ } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
+
+ return aborted_gstate;
+}
+
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
@@ -568,17 +622,33 @@ static void __blk_mq_complete_request(struct request *rq)
void blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
+ int srcu_idx;
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq))
+
+ /*
+ * If @rq->aborted_gstate equals the current instance, timeout is
+ * claiming @rq and we lost. This is synchronized through
+ * hctx_lock(). See blk_mq_timeout_work() for details.
+ *
+ * Completion path never blocks and we can directly use RCU here
+ * instead of hctx_lock() which can be either RCU or SRCU.
+ * However, that would complicate paths which want to synchronize
+ * against us. Let stay in sync with the issue path so that
+ * hctx_lock() covers both issue and completion paths.
+ */
+ hctx_lock(hctx, &srcu_idx);
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
+ hctx_unlock(hctx, srcu_idx);
}
EXPORT_SYMBOL(blk_mq_complete_request);
int blk_mq_request_started(struct request *rq)
{
- return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);
@@ -596,34 +666,27 @@ void blk_mq_start_request(struct request *rq)
wbt_issue(q->rq_wb, &rq->issue_stat);
}
- blk_add_timer(rq);
-
- WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
+ WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
/*
- * Mark us as started and clear complete. Complete might have been
- * set if requeue raced with timeout, which then marked it as
- * complete. So be sure to clear complete again when we start
- * the request, otherwise we'll ignore the completion event.
+ * Mark @rq in-flight which also advances the generation number,
+ * and register for timeout. Protect with a seqcount to allow the
+ * timeout path to read both @rq->gstate and @rq->deadline
+ * coherently.
*
- * Ensure that ->deadline is visible before we set STARTED, such that
- * blk_mq_check_expired() is guaranteed to observe our ->deadline when
- * it observes STARTED.
+ * This is the only place where a request is marked in-flight. If
+ * the timeout path reads an in-flight @rq->gstate, the
+ * @rq->deadline it reads together under @rq->gstate_seq is
+ * guaranteed to be the matching one.
*/
- smp_wmb();
- set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
- /*
- * Coherence order guarantees these consecutive stores to a
- * single variable propagate in the specified order. Thus the
- * clear_bit() is ordered _after_ the set bit. See
- * blk_mq_check_expired().
- *
- * (the bits must be part of the same byte for this to be
- * true).
- */
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
- }
+ preempt_disable();
+ write_seqcount_begin(&rq->gstate_seq);
+
+ blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
+ blk_add_timer(rq);
+
+ write_seqcount_end(&rq->gstate_seq);
+ preempt_enable();
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -637,13 +700,9 @@ void blk_mq_start_request(struct request *rq)
EXPORT_SYMBOL(blk_mq_start_request);
/*
- * When we reach here because queue is busy, REQ_ATOM_COMPLETE
- * flag isn't set yet, so there may be race with timeout handler,
- * but given rq->deadline is just set in .queue_rq() under
- * this situation, the race won't be possible in reality because
- * rq->timeout should be set as big enough to cover the window
- * between blk_mq_start_request() called from .queue_rq() and
- * clearing REQ_ATOM_STARTED here.
+ * When we reach here because queue is busy, it's safe to change the state
+ * to IDLE without checking @rq->aborted_gstate because we should still be
+ * holding the RCU read lock and thus protected against timeout.
*/
static void __blk_mq_requeue_request(struct request *rq)
{
@@ -655,7 +714,8 @@ static void __blk_mq_requeue_request(struct request *rq)
wbt_requeue(q->rq_wb, &rq->issue_stat);
blk_mq_sched_requeue_request(rq);
- if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
+ blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
@@ -687,13 +747,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, true, false, false, true);
+ blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, false, false, false, true);
+ blk_mq_sched_insert_request(rq, false, false, false);
}
blk_mq_run_hw_queues(q, false);
@@ -727,7 +787,7 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
- kblockd_schedule_delayed_work(&q->requeue_work, 0);
+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
@@ -753,24 +813,15 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
struct blk_mq_timeout_data {
unsigned long next;
unsigned int next_set;
+ unsigned int nr_expired;
};
-void blk_mq_rq_timed_out(struct request *req, bool reserved)
+static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
const struct blk_mq_ops *ops = req->q->mq_ops;
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
- /*
- * We know that complete is set at this point. If STARTED isn't set
- * anymore, then the request isn't active and the "timeout" should
- * just be ignored. This can happen due to the bitflag ordering.
- * Timeout first checks if STARTED is set, and if it is, assumes
- * the request is active. But if we race with completion, then
- * both flags will get cleared. So check here again, and ignore
- * a timeout event with a request that isn't active.
- */
- if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
- return;
+ req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
if (ops->timeout)
ret = ops->timeout(req, reserved);
@@ -780,8 +831,13 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
__blk_mq_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
+ /*
+ * As nothing prevents from completion happening while
+ * ->aborted_gstate is set, this may lead to ignored
+ * completions and further spurious timeouts.
+ */
+ blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req);
- blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
break;
@@ -795,50 +851,51 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- unsigned long deadline;
+ unsigned long gstate, deadline;
+ int start;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- return;
+ might_sleep();
- /*
- * Ensures that if we see STARTED we must also see our
- * up-to-date deadline, see blk_mq_start_request().
- */
- smp_rmb();
+ if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
+ return;
- deadline = READ_ONCE(rq->deadline);
+ /* read coherent snapshots of @rq->state_gen and @rq->deadline */
+ while (true) {
+ start = read_seqcount_begin(&rq->gstate_seq);
+ gstate = READ_ONCE(rq->gstate);
+ deadline = blk_rq_deadline(rq);
+ if (!read_seqcount_retry(&rq->gstate_seq, start))
+ break;
+ cond_resched();
+ }
- /*
- * The rq being checked may have been freed and reallocated
- * out already here, we avoid this race by checking rq->deadline
- * and REQ_ATOM_COMPLETE flag together:
- *
- * - if rq->deadline is observed as new value because of
- * reusing, the rq won't be timed out because of timing.
- * - if rq->deadline is observed as previous value,
- * REQ_ATOM_COMPLETE flag won't be cleared in reuse path
- * because we put a barrier between setting rq->deadline
- * and clearing the flag in blk_mq_start_request(), so
- * this rq won't be timed out too.
- */
- if (time_after_eq(jiffies, deadline)) {
- if (!blk_mark_rq_complete(rq)) {
- /*
- * Again coherence order ensures that consecutive reads
- * from the same variable must be in that order. This
- * ensures that if we see COMPLETE clear, we must then
- * see STARTED set and we'll ignore this timeout.
- *
- * (There's also the MB implied by the test_and_clear())
- */
- blk_mq_rq_timed_out(rq, reserved);
- }
+ /* if in-flight && overdue, mark for abortion */
+ if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
+ time_after_eq(jiffies, deadline)) {
+ blk_mq_rq_update_aborted_gstate(rq, gstate);
+ data->nr_expired++;
+ hctx->nr_expired++;
} else if (!data->next_set || time_after(data->next, deadline)) {
data->next = deadline;
data->next_set = 1;
}
}
+static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, void *priv, bool reserved)
+{
+ /*
+ * We marked @rq->aborted_gstate and waited for RCU. If there were
+ * completions that we lost to, they would have finished and
+ * updated @rq->gstate by now; otherwise, the completion path is
+ * now guaranteed to see @rq->aborted_gstate and yield. If
+ * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
+ */
+ if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
+ READ_ONCE(rq->gstate) == rq->aborted_gstate)
+ blk_mq_rq_timed_out(rq, reserved);
+}
+
static void blk_mq_timeout_work(struct work_struct *work)
{
struct request_queue *q =
@@ -846,7 +903,9 @@ static void blk_mq_timeout_work(struct work_struct *work)
struct blk_mq_timeout_data data = {
.next = 0,
.next_set = 0,
+ .nr_expired = 0,
};
+ struct blk_mq_hw_ctx *hctx;
int i;
/* A deadlock might occur if a request is stuck requiring a
@@ -865,14 +924,46 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
+ /* scan for the expired ones and set their ->aborted_gstate */
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
+ if (data.nr_expired) {
+ bool has_rcu = false;
+
+ /*
+ * Wait till everyone sees ->aborted_gstate. The
+ * sequential waits for SRCUs aren't ideal. If this ever
+ * becomes a problem, we can add per-hw_ctx rcu_head and
+ * wait in parallel.
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->nr_expired)
+ continue;
+
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+ has_rcu = true;
+ else
+ synchronize_srcu(hctx->srcu);
+
+ hctx->nr_expired = 0;
+ }
+ if (has_rcu)
+ synchronize_rcu();
+
+ /* terminate the ones we won */
+ blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
+ }
+
if (data.next_set) {
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
- struct blk_mq_hw_ctx *hctx;
-
+ /*
+ * Request timeouts are handled as a forward rolling timer. If
+ * we end up here it means that no requests are pending and
+ * also that no request has been pending for a while. Mark
+ * each hctx as idle.
+ */
queue_for_each_hw_ctx(q, hctx, i) {
/* the hctx may be unmapped, so check it here */
if (blk_mq_hw_queue_mapped(hctx))
@@ -1008,66 +1099,67 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
/*
* Mark us waiting for a tag. For shared tags, this involves hooking us into
- * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
- * restart. For both caes, take care to check the condition again after
+ * the tag wakeups. For non-shared tags, we can simply mark us needing a
+ * restart. For both cases, take care to check the condition again after
* marking us as waiting.
*/
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
struct request *rq)
{
struct blk_mq_hw_ctx *this_hctx = *hctx;
- bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
struct sbq_wait_state *ws;
wait_queue_entry_t *wait;
bool ret;
- if (!shared_tags) {
+ if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
- } else {
- wait = &this_hctx->dispatch_wait;
- if (!list_empty_careful(&wait->entry))
- return false;
- spin_lock(&this_hctx->lock);
- if (!list_empty(&wait->entry)) {
- spin_unlock(&this_hctx->lock);
- return false;
- }
+ /*
+ * It's possible that a tag was freed in the window between the
+ * allocation failure and adding the hardware queue to the wait
+ * queue.
+ *
+ * Don't clear RESTART here, someone else could have set it.
+ * At most this will cost an extra queue run.
+ */
+ return blk_mq_get_driver_tag(rq, hctx, false);
+ }
- ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
- add_wait_queue(&ws->wait, wait);
+ wait = &this_hctx->dispatch_wait;
+ if (!list_empty_careful(&wait->entry))
+ return false;
+
+ spin_lock(&this_hctx->lock);
+ if (!list_empty(&wait->entry)) {
+ spin_unlock(&this_hctx->lock);
+ return false;
}
+ ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
+ add_wait_queue(&ws->wait, wait);
+
/*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
*/
ret = blk_mq_get_driver_tag(rq, hctx, false);
-
- if (!shared_tags) {
- /*
- * Don't clear RESTART here, someone else could have set it.
- * At most this will cost an extra queue run.
- */
- return ret;
- } else {
- if (!ret) {
- spin_unlock(&this_hctx->lock);
- return false;
- }
-
- /*
- * We got a tag, remove ourselves from the wait queue to ensure
- * someone else gets the wakeup.
- */
- spin_lock_irq(&ws->wait.lock);
- list_del_init(&wait->entry);
- spin_unlock_irq(&ws->wait.lock);
+ if (!ret) {
spin_unlock(&this_hctx->lock);
- return true;
+ return false;
}
+
+ /*
+ * We got a tag, remove ourselves from the wait queue to ensure
+ * someone else gets the wakeup.
+ */
+ spin_lock_irq(&ws->wait.lock);
+ list_del_init(&wait->entry);
+ spin_unlock_irq(&ws->wait.lock);
+ spin_unlock(&this_hctx->lock);
+
+ return true;
}
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
@@ -1204,9 +1296,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
/*
* We should be running this queue from one of the CPUs that
* are mapped to it.
+ *
+ * There are at least two related races now between setting
+ * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
+ * __blk_mq_run_hw_queue():
+ *
+ * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
+ * but later it becomes online, then this warning is harmless
+ * at all
+ *
+ * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
+ * but later it becomes offline, then the warning can't be
+ * triggered, and we depend on blk-mq timeout handler to
+ * handle dispatched requests to this hctx
*/
- WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
- cpu_online(hctx->next_cpu));
+ if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+ cpu_online(hctx->next_cpu)) {
+ printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
+ raw_smp_processor_id(),
+ cpumask_empty(hctx->cpumask) ? "inactive": "active");
+ dump_stack();
+ }
/*
* We can't run the queue inline with ints disabled. Ensure that
@@ -1214,17 +1324,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
WARN_ON_ONCE(in_interrupt());
- if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- blk_mq_sched_dispatch_requests(hctx);
- rcu_read_unlock();
- } else {
- might_sleep();
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
- blk_mq_sched_dispatch_requests(hctx);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
- }
+ hctx_lock(hctx, &srcu_idx);
+ blk_mq_sched_dispatch_requests(hctx);
+ hctx_unlock(hctx, srcu_idx);
}
/*
@@ -1235,20 +1339,47 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
+ bool tried = false;
+
if (hctx->queue->nr_hw_queues == 1)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
int next_cpu;
-
- next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
+select_cpu:
+ next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
+ cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(hctx->cpumask);
+ next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
- hctx->next_cpu = next_cpu;
+ /*
+ * No online CPU is found, so have to make sure hctx->next_cpu
+ * is set correctly for not breaking workqueue.
+ */
+ if (next_cpu >= nr_cpu_ids)
+ hctx->next_cpu = cpumask_first(hctx->cpumask);
+ else
+ hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+ /*
+ * Do unbound schedule if we can't find a online CPU for this hctx,
+ * and it should only happen in the path of handling CPU DEAD.
+ */
+ if (!cpu_online(hctx->next_cpu)) {
+ if (!tried) {
+ tried = true;
+ goto select_cpu;
+ }
+
+ /*
+ * Make sure to re-select CPU next time once after CPUs
+ * in hctx->cpumask become online again.
+ */
+ hctx->next_cpu_batch = 1;
+ return WORK_CPU_UNBOUND;
+ }
return hctx->next_cpu;
}
@@ -1272,9 +1403,8 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
put_cpu();
}
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work,
- msecs_to_jiffies(msecs));
+ kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
+ msecs_to_jiffies(msecs));
}
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
@@ -1285,7 +1415,23 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (blk_mq_hctx_has_pending(hctx)) {
+ int srcu_idx;
+ bool need_run;
+
+ /*
+ * When queue is quiesced, we may be switching io scheduler, or
+ * updating nr_hw_queues, or other things, and we can't run queue
+ * any more, even __blk_mq_hctx_has_pending() can't be called safely.
+ *
+ * And queue will be rerun in blk_mq_unquiesce_queue() if it is
+ * quiesced.
+ */
+ hctx_lock(hctx, &srcu_idx);
+ need_run = !blk_queue_quiesced(hctx->queue) &&
+ blk_mq_hctx_has_pending(hctx);
+ hctx_unlock(hctx, srcu_idx);
+
+ if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0);
return true;
}
@@ -1593,9 +1739,9 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq,
- blk_qc_t *cookie, bool may_sleep)
+static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1604,15 +1750,52 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
};
blk_qc_t new_cookie;
blk_status_t ret;
+
+ new_cookie = request_to_qc_t(hctx, rq);
+
+ /*
+ * For OK queue, we are done. For error, caller may kill it.
+ * Any other error (busy), just add it to our list as we
+ * previously would have done.
+ */
+ ret = q->mq_ops->queue_rq(hctx, &bd);
+ switch (ret) {
+ case BLK_STS_OK:
+ *cookie = new_cookie;
+ break;
+ case BLK_STS_RESOURCE:
+ __blk_mq_requeue_request(rq);
+ break;
+ default:
+ *cookie = BLK_QC_T_NONE;
+ break;
+ }
+
+ return ret;
+}
+
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie,
+ bool bypass_insert)
+{
+ struct request_queue *q = rq->q;
bool run_queue = true;
- /* RCU or SRCU read lock is needed before checking quiesced flag */
+ /*
+ * RCU or SRCU read lock is needed before checking quiesced flag.
+ *
+ * When queue is stopped or quiesced, ignore 'bypass_insert' from
+ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+ * and avoid driver to try to dispatch again.
+ */
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
run_queue = false;
+ bypass_insert = false;
goto insert;
}
- if (q->elevator)
+ if (q->elevator && !bypass_insert)
goto insert;
if (!blk_mq_get_driver_tag(rq, NULL, false))
@@ -1623,47 +1806,47 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
goto insert;
}
- new_cookie = request_to_qc_t(hctx, rq);
-
- /*
- * For OK queue, we are done. For error, kill it. Any other
- * error (busy), just add it to our list as we previously
- * would have done
- */
- ret = q->mq_ops->queue_rq(hctx, &bd);
- switch (ret) {
- case BLK_STS_OK:
- *cookie = new_cookie;
- return;
- case BLK_STS_RESOURCE:
- __blk_mq_requeue_request(rq);
- goto insert;
- default:
- *cookie = BLK_QC_T_NONE;
- blk_mq_end_request(rq, ret);
- return;
- }
-
+ return __blk_mq_issue_directly(hctx, rq, cookie);
insert:
- blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
+ if (bypass_insert)
+ return BLK_STS_RESOURCE;
+
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
+ return BLK_STS_OK;
}
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
{
- if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- rcu_read_unlock();
- } else {
- unsigned int srcu_idx;
+ blk_status_t ret;
+ int srcu_idx;
- might_sleep();
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
- __blk_mq_try_issue_directly(hctx, rq, cookie, true);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
- }
+ hctx_lock(hctx, &srcu_idx);
+
+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
+ if (ret == BLK_STS_RESOURCE)
+ blk_mq_sched_insert_request(rq, false, true, false);
+ else if (ret != BLK_STS_OK)
+ blk_mq_end_request(rq, ret);
+
+ hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq)
+{
+ blk_status_t ret;
+ int srcu_idx;
+ blk_qc_t unused_cookie;
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+
+ hctx_lock(hctx, &srcu_idx);
+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
+ hctx_unlock(hctx, srcu_idx);
+
+ return ret;
}
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
@@ -1774,7 +1957,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else if (q->elevator) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_sched_insert_request(rq, false, true, true, true);
+ blk_mq_sched_insert_request(rq, false, true, true);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -1867,6 +2050,22 @@ static size_t order_to_size(unsigned int order)
return (size_t)PAGE_SIZE << order;
}
+static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, int node)
+{
+ int ret;
+
+ if (set->ops->init_request) {
+ ret = set->ops->init_request(set, rq, hctx_idx, node);
+ if (ret)
+ return ret;
+ }
+
+ seqcount_init(&rq->gstate_seq);
+ u64_stats_init(&rq->aborted_gstate_sync);
+ return 0;
+}
+
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth)
{
@@ -1928,12 +2127,9 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
struct request *rq = p;
tags->static_rqs[i] = rq;
- if (set->ops->init_request) {
- if (set->ops->init_request(set, rq, hctx_idx,
- node)) {
- tags->static_rqs[i] = NULL;
- goto fail;
- }
+ if (blk_mq_init_request(set, rq, hctx_idx, node)) {
+ tags->static_rqs[i] = NULL;
+ goto fail;
}
p += rq_size;
@@ -1992,7 +2188,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
{
blk_mq_debugfs_unregister_hctx(hctx);
- blk_mq_tag_idle(hctx);
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
@@ -2003,7 +2200,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
set->ops->exit_hctx(hctx, hctx_idx);
if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->queue_rq_srcu);
+ cleanup_srcu_struct(hctx->srcu);
blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
@@ -2072,13 +2269,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (!hctx->fq)
goto sched_exit_hctx;
- if (set->ops->init_request &&
- set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
- node))
+ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
- init_srcu_struct(hctx->queue_rq_srcu);
+ init_srcu_struct(hctx->srcu);
blk_mq_debugfs_register_hctx(q, hctx);
@@ -2114,16 +2309,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
- /* If the cpu isn't present, the cpu is mapped to first hctx */
- if (!cpu_present(i))
- continue;
-
- hctx = blk_mq_map_queue(q, i);
-
/*
* Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device
*/
+ hctx = blk_mq_map_queue(q, i);
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
hctx->numa_node = local_memory_node(cpu_to_node(i));
}
@@ -2180,7 +2370,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
- for_each_present_cpu(i) {
+ for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
@@ -2234,7 +2424,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Initialize batch roundrobin counts
*/
- hctx->next_cpu = cpumask_first(hctx->cpumask);
+ hctx->next_cpu = cpumask_first_and(hctx->cpumask,
+ cpu_online_mask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
}
@@ -2367,7 +2558,7 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
@@ -2384,6 +2575,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
blk_mq_sysfs_unregister(q);
+
+ /* protect against switching io scheduler */
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
@@ -2428,6 +2622,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
q->nr_hw_queues = i;
+ mutex_unlock(&q->sysfs_lock);
blk_mq_sysfs_register(q);
}
@@ -2599,9 +2794,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
- if (set->ops->map_queues)
+ if (set->ops->map_queues) {
+ int cpu;
+ /*
+ * transport .map_queues is usually done in the following
+ * way:
+ *
+ * for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ * mask = get_cpu_mask(queue)
+ * for_each_cpu(cpu, mask)
+ * set->mq_map[cpu] = queue;
+ * }
+ *
+ * When we need to remap, the table has to be cleared for
+ * killing stale mapping since one CPU may not be mapped
+ * to any hw queue.
+ */
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+
return set->ops->map_queues(set);
- else
+ } else
return blk_mq_map_queues(set);
}
@@ -2710,6 +2923,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
@@ -2733,6 +2947,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!ret)
q->nr_requests = nr;
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
@@ -2848,7 +3063,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
unsigned int nsecs;
ktime_t kt;
- if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
+ if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
return false;
/*
@@ -2868,7 +3083,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (!nsecs)
return false;
- set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+ rq->rq_flags |= RQF_MQ_POLL_SLEPT;
/*
* This will be replaced with the stats tracking code, using
@@ -2882,7 +3097,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
hrtimer_init_sleeper(&hs, current);
do {
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
hrtimer_start_expires(&hs.timer, mode);
@@ -2968,12 +3183,6 @@ static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
static int __init blk_mq_init(void)
{
- /*
- * See comment in block/blk.h rq_atomic_flags enum
- */
- BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) !=
- (REQ_ATOM_COMPLETE / BITS_PER_BYTE));
-
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead);
return 0;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6c7c3ff5bf62..88c558f71819 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -27,6 +27,20 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
+/*
+ * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
+ * and the upper bits the generation number.
+ */
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+
+ MQ_RQ_STATE_BITS = 2,
+ MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
+ MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS,
+};
+
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
@@ -60,6 +74,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq);
+
/*
* CPU -> queue mappings
*/
@@ -81,10 +98,41 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
-extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
-
void blk_mq_release(struct request_queue *q);
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline int blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
+}
+
+/**
+ * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
+ * @rq: target request.
+ * @state: new state to set.
+ *
+ * Set @rq's state to @state. The caller is responsible for ensuring that
+ * there are no other updaters. A request can transition into IN_FLIGHT
+ * only from IDLE and doing so increments the generation number.
+ */
+static inline void blk_mq_rq_update_state(struct request *rq,
+ enum mq_rq_state state)
+{
+ u64 old_val = READ_ONCE(rq->gstate);
+ u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
+
+ if (state == MQ_RQ_IN_FLIGHT) {
+ WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
+ new_val += MQ_RQ_GEN_INC;
+ }
+
+ /* avoid exposing interim values */
+ WRITE_ONCE(rq->gstate, new_val);
+}
+
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 870484eaed1f..cbea895a5547 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -853,6 +853,10 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+/**
+ * blk_register_queue - register a block layer queue with sysfs
+ * @disk: Disk of which the request queue should be registered with sysfs.
+ */
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -909,11 +913,12 @@ int blk_register_queue(struct gendisk *disk)
if (q->request_fn || (q->mq_ops && q->elevator)) {
ret = elv_register_queue(q);
if (ret) {
+ mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
- goto unlock;
+ return ret;
}
}
ret = 0;
@@ -921,7 +926,15 @@ unlock:
mutex_unlock(&q->sysfs_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_register_queue);
+/**
+ * blk_unregister_queue - counterpart of blk_register_queue()
+ * @disk: Disk of which the request queue should be unregistered from sysfs.
+ *
+ * Note: the caller is responsible for guaranteeing that this function is called
+ * after blk_register_queue() has finished.
+ */
void blk_unregister_queue(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@@ -929,21 +942,39 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q))
return;
- mutex_lock(&q->sysfs_lock);
- queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q);
- mutex_unlock(&q->sysfs_lock);
+ /* Return early if disk->queue was never registered. */
+ if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ return;
- wbt_exit(q);
+ /*
+ * Since sysfs_remove_dir() prevents adding new directory entries
+ * before removal of existing entries starts, protect against
+ * concurrent elv_iosched_store() calls.
+ */
+ mutex_lock(&q->sysfs_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
+ spin_unlock_irq(q->queue_lock);
+ /*
+ * Remove the sysfs attributes before unregistering the queue data
+ * structures that can be modified through sysfs.
+ */
if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q);
-
- if (q->request_fn || (q->mq_ops && q->elevator))
- elv_unregister_queue(q);
+ mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
+
+ wbt_exit(q);
+
+ mutex_lock(&q->sysfs_lock);
+ if (q->request_fn || (q->mq_ops && q->elevator))
+ elv_unregister_queue(q);
+ mutex_unlock(&q->sysfs_lock);
+
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index d19f416d6101..c5a131673733 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -216,9 +216,9 @@ struct throtl_data
unsigned int scale;
- struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
- struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
- struct latency_bucket __percpu *latency_buckets;
+ struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
+ struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
+ struct latency_bucket __percpu *latency_buckets[2];
unsigned long last_calculate_time;
unsigned long filtered_latency;
@@ -1511,10 +1511,20 @@ static struct cftype throtl_legacy_files[] = {
.seq_show = blkg_print_stat_bytes,
},
{
+ .name = "throttle.io_service_bytes_recursive",
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_bytes_recursive,
+ },
+ {
.name = "throttle.io_serviced",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_ios,
},
+ {
+ .name = "throttle.io_serviced_recursive",
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
{ } /* terminate */
};
@@ -2040,10 +2050,10 @@ static void blk_throtl_update_idletime(struct throtl_grp *tg)
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static void throtl_update_latency_buckets(struct throtl_data *td)
{
- struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
- int i, cpu;
- unsigned long last_latency = 0;
- unsigned long latency;
+ struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
+ int i, cpu, rw;
+ unsigned long last_latency[2] = { 0 };
+ unsigned long latency[2];
if (!blk_queue_nonrot(td->queue))
return;
@@ -2052,56 +2062,67 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
td->last_calculate_time = jiffies;
memset(avg_latency, 0, sizeof(avg_latency));
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- struct latency_bucket *tmp = &td->tmp_buckets[i];
-
- for_each_possible_cpu(cpu) {
- struct latency_bucket *bucket;
-
- /* this isn't race free, but ok in practice */
- bucket = per_cpu_ptr(td->latency_buckets, cpu);
- tmp->total_latency += bucket[i].total_latency;
- tmp->samples += bucket[i].samples;
- bucket[i].total_latency = 0;
- bucket[i].samples = 0;
- }
+ for (rw = READ; rw <= WRITE; rw++) {
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
+
+ for_each_possible_cpu(cpu) {
+ struct latency_bucket *bucket;
+
+ /* this isn't race free, but ok in practice */
+ bucket = per_cpu_ptr(td->latency_buckets[rw],
+ cpu);
+ tmp->total_latency += bucket[i].total_latency;
+ tmp->samples += bucket[i].samples;
+ bucket[i].total_latency = 0;
+ bucket[i].samples = 0;
+ }
- if (tmp->samples >= 32) {
- int samples = tmp->samples;
+ if (tmp->samples >= 32) {
+ int samples = tmp->samples;
- latency = tmp->total_latency;
+ latency[rw] = tmp->total_latency;
- tmp->total_latency = 0;
- tmp->samples = 0;
- latency /= samples;
- if (latency == 0)
- continue;
- avg_latency[i].latency = latency;
+ tmp->total_latency = 0;
+ tmp->samples = 0;
+ latency[rw] /= samples;
+ if (latency[rw] == 0)
+ continue;
+ avg_latency[rw][i].latency = latency[rw];
+ }
}
}
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- if (!avg_latency[i].latency) {
- if (td->avg_buckets[i].latency < last_latency)
- td->avg_buckets[i].latency = last_latency;
- continue;
- }
+ for (rw = READ; rw <= WRITE; rw++) {
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ if (!avg_latency[rw][i].latency) {
+ if (td->avg_buckets[rw][i].latency < last_latency[rw])
+ td->avg_buckets[rw][i].latency =
+ last_latency[rw];
+ continue;
+ }
- if (!td->avg_buckets[i].valid)
- latency = avg_latency[i].latency;
- else
- latency = (td->avg_buckets[i].latency * 7 +
- avg_latency[i].latency) >> 3;
+ if (!td->avg_buckets[rw][i].valid)
+ latency[rw] = avg_latency[rw][i].latency;
+ else
+ latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
+ avg_latency[rw][i].latency) >> 3;
- td->avg_buckets[i].latency = max(latency, last_latency);
- td->avg_buckets[i].valid = true;
- last_latency = td->avg_buckets[i].latency;
+ td->avg_buckets[rw][i].latency = max(latency[rw],
+ last_latency[rw]);
+ td->avg_buckets[rw][i].valid = true;
+ last_latency[rw] = td->avg_buckets[rw][i].latency;
+ }
}
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
throtl_log(&td->service_queue,
- "Latency bucket %d: latency=%ld, valid=%d", i,
- td->avg_buckets[i].latency, td->avg_buckets[i].valid);
+ "Latency bucket %d: read latency=%ld, read valid=%d, "
+ "write latency=%ld, write valid=%d", i,
+ td->avg_buckets[READ][i].latency,
+ td->avg_buckets[READ][i].valid,
+ td->avg_buckets[WRITE][i].latency,
+ td->avg_buckets[WRITE][i].valid);
}
#else
static inline void throtl_update_latency_buckets(struct throtl_data *td)
@@ -2242,16 +2263,17 @@ static void throtl_track_latency(struct throtl_data *td, sector_t size,
struct latency_bucket *latency;
int index;
- if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
+ if (!td || td->limit_index != LIMIT_LOW ||
+ !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
!blk_queue_nonrot(td->queue))
return;
index = request_bucket_index(size);
- latency = get_cpu_ptr(td->latency_buckets);
+ latency = get_cpu_ptr(td->latency_buckets[op]);
latency[index].total_latency += time;
latency[index].samples++;
- put_cpu_ptr(td->latency_buckets);
+ put_cpu_ptr(td->latency_buckets[op]);
}
void blk_throtl_stat_add(struct request *rq, u64 time_ns)
@@ -2270,6 +2292,7 @@ void blk_throtl_bio_endio(struct bio *bio)
unsigned long finish_time;
unsigned long start_time;
unsigned long lat;
+ int rw = bio_data_dir(bio);
tg = bio->bi_cg_private;
if (!tg)
@@ -2298,7 +2321,7 @@ void blk_throtl_bio_endio(struct bio *bio)
bucket = request_bucket_index(
blk_stat_size(&bio->bi_issue_stat));
- threshold = tg->td->avg_buckets[bucket].latency +
+ threshold = tg->td->avg_buckets[rw][bucket].latency +
tg->latency_target;
if (lat > threshold)
tg->bad_bio_cnt++;
@@ -2391,9 +2414,16 @@ int blk_throtl_init(struct request_queue *q)
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
+ td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
LATENCY_BUCKET_SIZE, __alignof__(u64));
- if (!td->latency_buckets) {
+ if (!td->latency_buckets[READ]) {
+ kfree(td);
+ return -ENOMEM;
+ }
+ td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
+ LATENCY_BUCKET_SIZE, __alignof__(u64));
+ if (!td->latency_buckets[WRITE]) {
+ free_percpu(td->latency_buckets[READ]);
kfree(td);
return -ENOMEM;
}
@@ -2412,7 +2442,8 @@ int blk_throtl_init(struct request_queue *q)
/* activate policy */
ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
if (ret) {
- free_percpu(td->latency_buckets);
+ free_percpu(td->latency_buckets[READ]);
+ free_percpu(td->latency_buckets[WRITE]);
kfree(td);
}
return ret;
@@ -2423,7 +2454,8 @@ void blk_throtl_exit(struct request_queue *q)
BUG_ON(!q->td);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
- free_percpu(q->td->latency_buckets);
+ free_percpu(q->td->latency_buckets[READ]);
+ free_percpu(q->td->latency_buckets[WRITE]);
kfree(q->td);
}
@@ -2441,15 +2473,17 @@ void blk_throtl_register_queue(struct request_queue *q)
} else {
td->throtl_slice = DFL_THROTL_SLICE_HD;
td->filtered_latency = LATENCY_FILTERED_HD;
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
- td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
+ td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
+ }
}
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
/* if no low limit, use previous default */
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
- td->track_bio_latency = !q->mq_ops && !q->request_fn;
+ td->track_bio_latency = !queue_is_rq_based(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 764ecf9aeb30..a05e3676d24a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -112,7 +112,9 @@ static void blk_rq_timed_out(struct request *req)
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set)
{
- if (time_after_eq(jiffies, rq->deadline)) {
+ const unsigned long deadline = blk_rq_deadline(rq);
+
+ if (time_after_eq(jiffies, deadline)) {
list_del_init(&rq->timeout_list);
/*
@@ -120,8 +122,8 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
*/
if (!blk_mark_rq_complete(rq))
blk_rq_timed_out(rq);
- } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
- *next_timeout = rq->deadline;
+ } else if (!*next_set || time_after(*next_timeout, deadline)) {
+ *next_timeout = deadline;
*next_set = 1;
}
}
@@ -156,12 +158,17 @@ void blk_timeout_work(struct work_struct *work)
*/
void blk_abort_request(struct request *req)
{
- if (blk_mark_rq_complete(req))
- return;
-
if (req->q->mq_ops) {
- blk_mq_rq_timed_out(req, false);
+ /*
+ * All we need to ensure is that timeout scan takes place
+ * immediately and that scan sees the new timeout value.
+ * No need for fancy synchronizations.
+ */
+ blk_rq_set_deadline(req, jiffies);
+ mod_timer(&req->q->timeout, 0);
} else {
+ if (blk_mark_rq_complete(req))
+ return;
blk_delete_timer(req);
blk_rq_timed_out(req);
}
@@ -208,7 +215,8 @@ void blk_add_timer(struct request *req)
if (!req->timeout)
req->timeout = q->rq_timeout;
- WRITE_ONCE(req->deadline, jiffies + req->timeout);
+ blk_rq_set_deadline(req, jiffies + req->timeout);
+ req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/*
* Only the non-mq case needs to add the request to a protected list.
@@ -222,7 +230,7 @@ void blk_add_timer(struct request *req)
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
+ expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index ff57fb51b338..acb7252c7e81 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -22,6 +22,48 @@ static inline sector_t blk_zone_start(struct request_queue *q,
}
/*
+ * Return true if a request is a write requests that needs zone write locking.
+ */
+bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ if (!rq->q->seq_zones_wlock)
+ return false;
+
+ if (blk_rq_is_passthrough(rq))
+ return false;
+
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_WRITE:
+ return blk_rq_zone_is_seq(rq);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
+
+void __blk_req_zone_write_lock(struct request *rq)
+{
+ if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
+ rq->q->seq_zones_wlock)))
+ return;
+
+ WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
+ rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
+}
+EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
+
+void __blk_req_zone_write_unlock(struct request *rq)
+{
+ rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
+ if (rq->q->seq_zones_wlock)
+ WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
+ rq->q->seq_zones_wlock));
+}
+EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
+
+/*
* Check that a zone report belongs to the partition.
* If yes, fix its start sector and write pointer, copy it in the
* zone information array and return true. Return false otherwise.
diff --git a/block/blk.h b/block/blk.h
index 3f1446937aec..46db5dc83dcb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -120,33 +120,23 @@ void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req);
/*
- * Internal atomic flags for request handling
- */
-enum rq_atomic_flags {
- /*
- * Keep these two bits first - not because we depend on the
- * value of them, but we do depend on them being in the same
- * byte of storage to ensure ordering on writes. Keeping them
- * first will achieve that nicely.
- */
- REQ_ATOM_COMPLETE = 0,
- REQ_ATOM_STARTED,
-
- REQ_ATOM_POLL_SLEPT,
-};
-
-/*
* EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds
+ * sure that only one of them succeeds. Steal the bottom bit of the
+ * __deadline field for this.
*/
static inline int blk_mark_rq_complete(struct request *rq)
{
- return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+ return test_and_set_bit(0, &rq->__deadline);
}
static inline void blk_clear_rq_complete(struct request *rq)
{
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+ clear_bit(0, &rq->__deadline);
+}
+
+static inline bool blk_rq_is_complete(struct request *rq)
+{
+ return test_bit(0, &rq->__deadline);
}
/*
@@ -172,6 +162,9 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
}
+int elv_register_queue(struct request_queue *q);
+void elv_unregister_queue(struct request_queue *q);
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -246,6 +239,21 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
+ * Steal a bit from this field for legacy IO path atomic IO marking. Note that
+ * setting the deadline clears the bottom bit, potentially clearing the
+ * completed bit. The user has to be OK with this (current ones are fine).
+ */
+static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
+{
+ rq->__deadline = time & ~0x1UL;
+}
+
+static inline unsigned long blk_rq_deadline(struct request *rq)
+{
+ return rq->__deadline & ~0x1UL;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
@@ -330,4 +338,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_BOUNCE */
+extern void blk_drain_queue(struct request_queue *q);
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index 1d05c422c932..6a3e68292273 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -113,45 +113,50 @@ int init_emergency_isa_pool(void)
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
- struct bio_vec tovec, *fromvec = from->bi_io_vec;
+ struct bio_vec tovec, fromvec;
struct bvec_iter iter;
+ /*
+ * The bio of @from is created by bounce, so we can iterate
+ * its bvec from start to end, but the @from->bi_iter can't be
+ * trusted because it might be changed by splitting.
+ */
+ struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
bio_for_each_segment(tovec, to, iter) {
- if (tovec.bv_page != fromvec->bv_page) {
+ fromvec = bio_iter_iovec(from, from_iter);
+ if (tovec.bv_page != fromvec.bv_page) {
/*
* fromvec->bv_offset and fromvec->bv_len might have
* been modified by the block layer, so use the original
* copy, bounce_copy_vec already uses tovec->bv_len
*/
- vfrom = page_address(fromvec->bv_page) +
+ vfrom = page_address(fromvec.bv_page) +
tovec.bv_offset;
bounce_copy_vec(&tovec, vfrom);
flush_dcache_page(tovec.bv_page);
}
-
- fromvec++;
+ bio_advance_iter(from, &from_iter, tovec.bv_len);
}
}
static void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, *org_vec;
+ struct bio_vec *bvec, orig_vec;
int i;
- int start = bio_orig->bi_iter.bi_idx;
+ struct bvec_iter orig_iter = bio_orig->bi_iter;
/*
* free up bounce indirect pages used
*/
bio_for_each_segment_all(bvec, bio, i) {
- org_vec = bio_orig->bi_io_vec + i + start;
-
- if (bvec->bv_page == org_vec->bv_page)
- continue;
-
- dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
- mempool_free(bvec->bv_page, pool);
+ orig_vec = bio_iter_iovec(bio_orig, orig_iter);
+ if (bvec->bv_page != orig_vec.bv_page) {
+ dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
+ mempool_free(bvec->bv_page, pool);
+ }
+ bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
}
bio_orig->bi_status = bio->bi_status;
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 15d25ccd51a5..1474153f73e3 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -30,7 +30,7 @@
/**
* bsg_teardown_job - routine to teardown a bsg job
- * @job: bsg_job that is to be torn down
+ * @kref: kref inside bsg_job that is to be torn down
*/
static void bsg_teardown_job(struct kref *kref)
{
@@ -251,6 +251,7 @@ static void bsg_exit_rq(struct request_queue *q, struct request *req)
* @name: device to give bsg device
* @job_fn: bsg job handler
* @dd_job_size: size of LLD data needed for each job
+ * @release: @dev release function
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
bsg_job_fn *job_fn, int dd_job_size,
diff --git a/block/bsg.c b/block/bsg.c
index 452f94f1c5d4..2e2c1e222209 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -32,6 +32,9 @@
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
+#define bsg_dbg(bd, fmt, ...) \
+ pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
+
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
@@ -55,14 +58,6 @@ enum {
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
-#undef BSG_DEBUG
-
-#ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
-#else
-#define dprintk(fmt, args...)
-#endif
-
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
@@ -123,7 +118,7 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
- dprintk("%s: returning free cmd %p\n", bd->name, bc);
+ bsg_dbg(bd, "returning free cmd %p\n", bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
@@ -222,7 +217,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode)
if (!bcd->class_dev)
return ERR_PTR(-ENXIO);
- dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+ bsg_dbg(bd, "map hdr %llx/%u %llx/%u\n",
+ (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
@@ -299,8 +295,8 @@ static void bsg_rq_end_io(struct request *rq, blk_status_t status)
struct bsg_device *bd = bc->bd;
unsigned long flags;
- dprintk("%s: finished rq %p bc %p, bio %p\n",
- bd->name, rq, bc, bc->bio);
+ bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
+ rq, bc, bc->bio);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
@@ -333,7 +329,7 @@ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
- dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+ bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
@@ -379,7 +375,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
}
} while (1);
- dprintk("%s: returning done %p\n", bd->name, bc);
+ bsg_dbg(bd, "returning done %p\n", bc);
return bc;
}
@@ -390,7 +386,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct scsi_request *req = scsi_req(rq);
int ret = 0;
- dprintk("rq %p bio %p 0x%x\n", rq, bio, req->result);
+ pr_debug("rq %p bio %p 0x%x\n", rq, bio, req->result);
/*
* fill in all the output members
*/
@@ -469,7 +465,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
struct bsg_command *bc;
int ret, tret;
- dprintk("%s: entered\n", bd->name);
+ bsg_dbg(bd, "entered\n");
/*
* wait for all commands to complete
@@ -572,7 +568,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
int ret;
ssize_t bytes_read;
- dprintk("%s: read %zd bytes\n", bd->name, count);
+ bsg_dbg(bd, "read %zd bytes\n", count);
bsg_set_block(bd, file);
@@ -646,7 +642,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
ssize_t bytes_written;
int ret;
- dprintk("%s: write %zd bytes\n", bd->name, count);
+ bsg_dbg(bd, "write %zd bytes\n", count);
if (unlikely(uaccess_kernel()))
return -EINVAL;
@@ -664,7 +660,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!bytes_written || err_block_err(ret))
bytes_written = ret;
- dprintk("%s: returning %zd\n", bd->name, bytes_written);
+ bsg_dbg(bd, "returning %zd\n", bytes_written);
return bytes_written;
}
@@ -717,7 +713,7 @@ static int bsg_put_device(struct bsg_device *bd)
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
- dprintk("%s: tearing down\n", bd->name);
+ bsg_dbg(bd, "tearing down\n");
/*
* close can always block
@@ -744,9 +740,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct file *file)
{
struct bsg_device *bd;
-#ifdef BSG_DEBUG
unsigned char buf[32];
-#endif
if (!blk_queue_scsi_passthrough(rq)) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
@@ -771,7 +765,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
- dprintk("bound to <%s>, max queue %d\n",
+ bsg_dbg(bd, "bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
mutex_unlock(&bsg_mutex);
@@ -845,10 +839,10 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
-static unsigned int bsg_poll(struct file *file, poll_table *wait)
+static __poll_t bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b83f77460d28..9de9f156e203 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -50,8 +50,6 @@ struct deadline_data {
int front_merges;
};
-static void deadline_move_request(struct deadline_data *, struct request *);
-
static inline struct rb_root *
deadline_rb_root(struct deadline_data *dd, struct request *rq)
{
@@ -100,6 +98,12 @@ deadline_add_request(struct request_queue *q, struct request *rq)
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);
+ /*
+ * This may be a requeue of a write request that has locked its
+ * target zone. If it is the case, this releases the zone lock.
+ */
+ blk_req_zone_write_unlock(rq);
+
deadline_add_rq_rb(dd, rq);
/*
@@ -190,6 +194,12 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
struct request_queue *q = rq->q;
+ /*
+ * For a zoned block device, write requests must write lock their
+ * target zone.
+ */
+ blk_req_zone_write_lock(rq);
+
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
}
@@ -231,6 +241,69 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
}
/*
+ * For the specified data direction, return the next request to dispatch using
+ * arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ if (list_empty(&dd->fifo_list[data_dir]))
+ return NULL;
+
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ return rq;
+ }
+
+ return NULL;
+}
+
+/*
+ * For the specified data direction, return the next request to dispatch using
+ * sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ rq = dd->next_rq[data_dir];
+ if (!rq)
+ return NULL;
+
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ while (rq) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ return rq;
+ rq = deadline_latter_request(rq);
+ }
+
+ return NULL;
+}
+
+/*
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
@@ -239,16 +312,15 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct request *rq;
+ struct request *rq, *next_rq;
int data_dir;
/*
* batches are currently reads XOR writes
*/
- if (dd->next_rq[WRITE])
- rq = dd->next_rq[WRITE];
- else
- rq = dd->next_rq[READ];
+ rq = deadline_next_request(dd, WRITE);
+ if (!rq)
+ rq = deadline_next_request(dd, READ);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
@@ -262,7 +334,8 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
if (reads) {
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
- if (writes && (dd->starved++ >= dd->writes_starved))
+ if (deadline_fifo_request(dd, WRITE) &&
+ (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
data_dir = READ;
@@ -291,21 +364,29 @@ dispatch_find_request:
/*
* we are not running a batch, find best request for selected data_dir
*/
- if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ next_rq = deadline_next_request(dd, data_dir);
+ if (deadline_check_fifo(dd, data_dir) || !next_rq) {
/*
* A deadline has expired, the last request was in the other
* direction, or we have run out of higher-sectored requests.
* Start again from the request with the earliest expiry time.
*/
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ rq = deadline_fifo_request(dd, data_dir);
} else {
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
- rq = dd->next_rq[data_dir];
+ rq = next_rq;
}
+ /*
+ * For a zoned block device, if we only have writes queued and none of
+ * them can be dispatched, rq will be NULL.
+ */
+ if (!rq)
+ return 0;
+
dd->batching = 0;
dispatch_request:
@@ -318,6 +399,16 @@ dispatch_request:
return 1;
}
+/*
+ * For zoned block devices, write unlock the target zone of completed
+ * write requests.
+ */
+static void
+deadline_completed_request(struct request_queue *q, struct request *rq)
+{
+ blk_req_zone_write_unlock(rq);
+}
+
static void deadline_exit_queue(struct elevator_queue *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -439,6 +530,7 @@ static struct elevator_type iosched_deadline = {
.elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests,
+ .elevator_completed_req_fn = deadline_completed_request,
.elevator_add_req_fn = deadline_add_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
diff --git a/block/elevator.c b/block/elevator.c
index 7bda083d5968..e87e9b43aba0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -869,6 +869,8 @@ int elv_register_queue(struct request_queue *q)
struct elevator_queue *e = q->elevator;
int error;
+ lockdep_assert_held(&q->sysfs_lock);
+
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -886,10 +888,11 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
-EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
{
+ lockdep_assert_held(&q->sysfs_lock);
+
if (q) {
struct elevator_queue *e = q->elevator;
@@ -900,7 +903,6 @@ void elv_unregister_queue(struct request_queue *q)
wbt_enable_default(q);
}
}
-EXPORT_SYMBOL(elv_unregister_queue);
int elv_register(struct elevator_type *e)
{
@@ -967,7 +969,10 @@ static int elevator_switch_mq(struct request_queue *q,
{
int ret;
+ lockdep_assert_held(&q->sysfs_lock);
+
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
@@ -994,6 +999,7 @@ static int elevator_switch_mq(struct request_queue *q,
blk_add_trace_msg(q, "elv switch: none");
out:
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
@@ -1010,6 +1016,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
bool old_registered = false;
int err;
+ lockdep_assert_held(&q->sysfs_lock);
+
if (q->mq_ops)
return elevator_switch_mq(q, new_e);
diff --git a/block/genhd.c b/block/genhd.c
index 96a66f671720..88a53c188cb7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -629,16 +629,18 @@ exit:
}
/**
- * device_add_disk - add partitioning information to kernel list
+ * __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
* with the kernel.
*
* FIXME: error handling
*/
-void device_add_disk(struct device *parent, struct gendisk *disk)
+static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ bool register_queue)
{
dev_t devt;
int retval;
@@ -682,7 +684,8 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
exact_match, exact_lock, disk);
}
register_disk(parent, disk);
- blk_register_queue(disk);
+ if (register_queue)
+ blk_register_queue(disk);
/*
* Take an extra ref on queue which will be put on disk_release()
@@ -693,8 +696,19 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_add_events(disk);
blk_integrity_add(disk);
}
+
+void device_add_disk(struct device *parent, struct gendisk *disk)
+{
+ __device_add_disk(parent, disk, true);
+}
EXPORT_SYMBOL(device_add_disk);
+void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
+{
+ __device_add_disk(parent, disk, false);
+}
+EXPORT_SYMBOL(device_add_disk_no_queue_reg);
+
void del_gendisk(struct gendisk *disk)
{
struct disk_part_iter piter;
@@ -725,7 +739,8 @@ void del_gendisk(struct gendisk *disk)
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- bdi_unregister(disk->queue->backing_dev_info);
+ if (!(disk->flags & GENHD_FL_HIDDEN))
+ bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk);
} else {
WARN_ON(1);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 0179e484ec98..c56f211c8440 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -59,6 +59,7 @@ struct deadline_data {
int front_merges;
spinlock_t lock;
+ spinlock_t zone_lock;
struct list_head dispatch;
};
@@ -192,13 +193,83 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
}
/*
+ * For the specified data direction, return the next request to
+ * dispatch using arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ if (list_empty(&dd->fifo_list[data_dir]))
+ return NULL;
+
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ goto out;
+ }
+ rq = NULL;
+out:
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ rq = dd->next_rq[data_dir];
+ if (!rq)
+ return NULL;
+
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ while (rq) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ break;
+ rq = deadline_latter_request(rq);
+ }
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+static struct request *__dd_dispatch_request(struct deadline_data *dd)
{
- struct deadline_data *dd = hctx->queue->elevator->elevator_data;
- struct request *rq;
+ struct request *rq, *next_rq;
bool reads, writes;
int data_dir;
@@ -214,10 +285,9 @@ static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
/*
* batches are currently reads XOR writes
*/
- if (dd->next_rq[WRITE])
- rq = dd->next_rq[WRITE];
- else
- rq = dd->next_rq[READ];
+ rq = deadline_next_request(dd, WRITE);
+ if (!rq)
+ rq = deadline_next_request(dd, READ);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
@@ -231,7 +301,8 @@ static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
if (reads) {
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
- if (writes && (dd->starved++ >= dd->writes_starved))
+ if (deadline_fifo_request(dd, WRITE) &&
+ (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
data_dir = READ;
@@ -260,21 +331,29 @@ dispatch_find_request:
/*
* we are not running a batch, find best request for selected data_dir
*/
- if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ next_rq = deadline_next_request(dd, data_dir);
+ if (deadline_check_fifo(dd, data_dir) || !next_rq) {
/*
* A deadline has expired, the last request was in the other
* direction, or we have run out of higher-sectored requests.
* Start again from the request with the earliest expiry time.
*/
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ rq = deadline_fifo_request(dd, data_dir);
} else {
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
- rq = dd->next_rq[data_dir];
+ rq = next_rq;
}
+ /*
+ * For a zoned block device, if we only have writes queued and none of
+ * them can be dispatched, rq will be NULL.
+ */
+ if (!rq)
+ return NULL;
+
dd->batching = 0;
dispatch_request:
@@ -284,17 +363,27 @@ dispatch_request:
dd->batching++;
deadline_move_request(dd, rq);
done:
+ /*
+ * If the request needs its target zone locked, do it.
+ */
+ blk_req_zone_write_lock(rq);
rq->rq_flags |= RQF_STARTED;
return rq;
}
+/*
+ * One confusing aspect here is that we get called for a specific
+ * hardware queue, but we return a request that may not be for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ */
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
struct request *rq;
spin_lock(&dd->lock);
- rq = __dd_dispatch_request(hctx);
+ rq = __dd_dispatch_request(dd);
spin_unlock(&dd->lock);
return rq;
@@ -339,6 +428,7 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
spin_lock_init(&dd->lock);
+ spin_lock_init(&dd->zone_lock);
INIT_LIST_HEAD(&dd->dispatch);
q->elevator = eq;
@@ -395,6 +485,12 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);
+ /*
+ * This may be a requeue of a write request that has locked its
+ * target zone. If it is the case, this releases the zone lock.
+ */
+ blk_req_zone_write_unlock(rq);
+
if (blk_mq_sched_try_insert_merge(q, rq))
return;
@@ -439,6 +535,26 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock);
}
+/*
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests. Do this while holding the zone lock
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * while deadline_next_request() are executing.
+ */
+static void dd_completed_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (blk_queue_is_zoned(q)) {
+ struct deadline_data *dd = q->elevator->elevator_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ blk_req_zone_write_unlock(rq);
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+ }
+}
+
static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
@@ -640,6 +756,7 @@ static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
.dispatch_request = dd_dispatch_request,
+ .completed_request = dd_completed_request,
.next_request = elv_rb_latter_request,
.former_request = elv_rb_former_request,
.bio_merge = dd_bio_merge,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 0af3a3db6fb0..82c44f7df911 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -301,7 +301,9 @@ static void parse_bsd(struct parsed_partitions *state,
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
- if (memcmp(flavour, "bsd\0", 4) == 0)
+ /* FreeBSD has relative offset if C partition offset is zero */
+ if (memcmp(flavour, "bsd\0", 4) == 0 &&
+ le32_to_cpu(l->d_partitions[2].p_offset) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index edcfff974527..60b471f8621b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -384,9 +384,10 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
- * @file: file this ioctl operates on (optional)
* @q: request queue to send scsi commands down
* @disk: gendisk to operate on (option)
+ * @mode: mode used to open the file through which the ioctl has been
+ * submitted
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
@@ -415,10 +416,10 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-#define OMAX_SB_LEN 16 /* For backward compatibility */
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
struct scsi_ioctl_command __user *sic)
{
+ enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
struct scsi_request *req;
int err;
@@ -692,38 +693,9 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
if (bd && bd == bd->bd_contains)
return 0;
- /* Actually none of these is particularly useful on a partition,
- * but they are safe.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- case SCSI_IOCTL_GET_PCI:
- case SCSI_IOCTL_PROBE_HOST:
- case SG_GET_VERSION_NUM:
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_RESERVED_SIZE:
- case SG_SET_RESERVED_SIZE:
- case SG_EMULATED_HOST:
- return 0;
- case CDROM_GET_CAPABILITY:
- /* Keep this until we remove the printk below. udev sends it
- * and we do not want to spam dmesg about it. CD-ROMs do
- * not have partitions, so we get here only for disks.
- */
- return -ENOIOCTLCMD;
- default:
- break;
- }
-
if (capable(CAP_SYS_RAWIO))
return 0;
- /* In particular, rule out all resets and host-specific ioctls. */
- printk_ratelimited(KERN_WARNING
- "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
-
return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f7911963bb79..b75264b09a46 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -106,6 +106,7 @@ config CRYPTO_KPP
config CRYPTO_ACOMP2
tristate
select CRYPTO_ALGAPI2
+ select SGL_ALLOC
config CRYPTO_ACOMP
tristate
@@ -130,7 +131,7 @@ config CRYPTO_DH
config CRYPTO_ECDH
tristate "ECDH algorithm"
- select CRYTPO_KPP
+ select CRYPTO_KPP
select CRYPTO_RNG_DEFAULT
help
Generic implementation of the ECDH algorithm
@@ -1339,6 +1340,7 @@ config CRYPTO_SALSA20_586
tristate "Salsa20 stream cipher algorithm (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
@@ -1352,6 +1354,7 @@ config CRYPTO_SALSA20_X86_64
tristate "Salsa20 stream cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index d674884b2d51..cdbc03b35510 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 1441f07d0a19..09776bb1360e 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -28,7 +26,6 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <crypto/ablk_helper.h>
diff --git a/crypto/aead.c b/crypto/aead.c
index f794b30a9407..60b3bbe973e7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
+ err = setkey_unaligned(tfm, key, keylen);
+ else
+ err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
@@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+
aead->authsize = alg->maxauthsize;
if (alg->exit)
@@ -295,7 +304,7 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher2();
+ ctx->sknull = crypto_get_default_null_skcipher();
err = PTR_ERR(ctx->sknull);
if (IS_ERR(ctx->sknull))
goto out;
@@ -315,7 +324,7 @@ out:
return err;
drop_null:
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -325,7 +334,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 444a387df219..0f8d8d5523c3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- const u32 forbidden = CRYPTO_ALG_INTERNAL;
+ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
if (sock->state == SS_CONNECTED)
return -EINVAL;
@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
- private = type->bind(sa->salg_name,
- sa->salg_feat & ~forbidden,
- sa->salg_mask & ~forbidden);
+ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
@@ -664,7 +666,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
unsigned int i;
list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
- ctx->rcvused -= rsgl->sg_num_bytes;
+ atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &areq->first_rsgl)
@@ -1062,13 +1064,13 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
/**
* af_alg_poll - poll system call handler
*/
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
@@ -1163,7 +1165,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
areq->last_rsgl = rsgl;
len += err;
- ctx->rcvused += err;
+ atomic_add(err, &ctx->rcvused);
rsgl->sg_num_bytes = err;
iov_iter_advance(&msg->msg_iter, err);
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3a35d67de7d9..266fc1d64f61 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return ahash_setkey_unaligned(tfm, key, keylen);
+ err = ahash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = tfm->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return tfm->setkey(tfm, key, keylen);
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
@@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_ahash_op(req, tfm->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
- hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
- hash->has_setkey = true;
+ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
if (alg->export)
hash->export = alg->export;
@@ -649,5 +661,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 60d7366ed343..395b082d03a9 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_priority < 0)
return -EINVAL;
- atomic_set(&alg->cra_refcnt, 1);
+ refcount_set(&alg->cra_refcnt, 1);
return crypto_set_driver_name(alg);
}
@@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
@@ -167,6 +166,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
spawn->alg = NULL;
spawns = &inst->alg.cra_users;
+
+ /*
+ * We may encounter an unregistered instance here, since
+ * an instance's spawns are set up prior to the instance
+ * being registered. An unregistered instance will have
+ * NULL ->cra_users.next, since ->cra_users isn't
+ * properly initialized until registration. But an
+ * unregistered instance cannot have any users, so treat
+ * it the same as ->cra_users being empty.
+ */
+ if (spawns->next == NULL)
+ break;
}
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
@@ -224,7 +235,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (!larval->adult)
goto free_larval;
- atomic_set(&larval->alg.cra_refcnt, 1);
+ refcount_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
@@ -380,7 +391,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
alg->cra_flags |= CRYPTO_ALG_DEAD;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
@@ -399,7 +409,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
if (ret)
return ret;
- BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+ BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -458,7 +468,6 @@ int crypto_register_template(struct crypto_template *tmpl)
}
list_add(&tmpl->list, &crypto_template_list);
- crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
@@ -485,12 +494,10 @@ void crypto_unregister_template(struct crypto_template *tmpl)
BUG_ON(err);
}
- crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
-
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, n, list, list) {
- BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
+ BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1);
crypto_free_instance(inst);
}
crypto_remove_final(&users);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index ddcc45f77edd..4b07edd5a9ff 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,6 @@
struct aead_tfm {
struct crypto_aead *aead;
- bool has_key;
struct crypto_skcipher *null_tfm;
};
@@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -491,7 +490,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
- null_tfm = crypto_get_default_null_skcipher2();
+ null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
@@ -509,7 +508,7 @@ static void aead_release(void *private)
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
kfree(tfm);
}
@@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize)
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
- int err;
-
- err = crypto_aead_setkey(tfm->aead, key, keylen);
- tfm->has_key = !err;
- return err;
+ return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -571,7 +566,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
- ctx->rcvused = 0;
+ atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
@@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 76d2e716c792..6c9b1927a520 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,11 +34,6 @@ struct hash_ctx {
struct ahash_request req;
};
-struct algif_hash_tfm {
- struct crypto_ahash *hash;
- bool has_key;
-};
-
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct algif_hash_tfm *tfm;
+ struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = {
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- struct algif_hash_tfm *tfm;
- struct crypto_ahash *hash;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- hash = crypto_alloc_ahash(name, type, mask);
- if (IS_ERR(hash)) {
- kfree(tfm);
- return ERR_CAST(hash);
- }
-
- tfm->hash = hash;
-
- return tfm;
+ return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
- struct algif_hash_tfm *tfm = private;
-
- crypto_free_ahash(tfm->hash);
- kfree(tfm);
+ crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct algif_hash_tfm *tfm = private;
- int err;
-
- err = crypto_ahash_setkey(tfm->hash, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
@@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk)
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
- struct hash_ctx *ctx;
+ struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
- struct algif_hash_tfm *tfm = private;
- struct crypto_ahash *hash = tfm->hash;
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ struct hash_ctx *ctx;
+ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
@@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
static int hash_accept_parent(void *private, struct sock *sk)
{
- struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *tfm = private;
- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index baef9bfccdda..c4e885df4564 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -38,11 +38,6 @@
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_tfm {
- struct crypto_skcipher *skcipher;
- bool has_key;
-};
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
@@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_blocksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
@@ -193,7 +186,6 @@ out:
return ret;
}
-
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
@@ -221,7 +213,7 @@ static int skcipher_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct skcipher_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -235,7 +227,7 @@ static int skcipher_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -314,41 +306,17 @@ static struct proto_ops algif_skcipher_ops_nokey = {
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- struct skcipher_tfm *tfm;
- struct crypto_skcipher *skcipher;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- skcipher = crypto_alloc_skcipher(name, type, mask);
- if (IS_ERR(skcipher)) {
- kfree(tfm);
- return ERR_CAST(skcipher);
- }
-
- tfm->skcipher = skcipher;
-
- return tfm;
+ return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
- struct skcipher_tfm *tfm = private;
-
- crypto_free_skcipher(tfm->skcipher);
- kfree(tfm);
+ crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct skcipher_tfm *tfm = private;
- int err;
-
- err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
@@ -357,8 +325,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
@@ -370,27 +337,26 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_tfm *tfm = private;
- struct crypto_skcipher *skcipher = tfm->skcipher;
+ struct crypto_skcipher *tfm = private;
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
- ctx->rcvused = 0;
+ atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
@@ -405,9 +371,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
static int skcipher_accept_parent(void *private, struct sock *sk)
{
- struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *tfm = private;
- if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
diff --git a/crypto/api.c b/crypto/api.c
index 2a2479d168aa..70a894e52ff3 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (IS_ERR(larval))
return ERR_CAST(larval);
- atomic_set(&larval->alg.cra_refcnt, 2);
+ refcount_set(&larval->alg.cra_refcnt, 2);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
@@ -205,7 +205,8 @@ struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_lookup);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
@@ -231,7 +232,6 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
return crypto_larval_add(name, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_larval_lookup);
int crypto_probing_notify(unsigned long val, void *v)
{
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 875470b0e026..d3d6d72fe649 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0cf5fefdb859..15f91ddd7f0e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6c43a0a17a55..01c0d4aa2563 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -18,7 +18,6 @@
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/errno.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index a02286bf319e..32ddd4836ff5 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index df5c72629383..66169c178314 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -16,8 +16,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 058c8d755d03..c8e5ec69790e 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -13,8 +13,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 4a45fa4890c0..e451c3cb6a56 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -9,44 +9,38 @@
* (at your option) any later version.
*/
+#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u8 stream[CHACHA20_BLOCK_SIZE];
+ u32 stream[CHACHA20_BLOCK_WORDS];
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, bytes);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
}
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
{
- static const char constant[16] = "expand 32-byte k";
-
- state[0] = le32_to_cpuvp(constant + 0);
- state[1] = le32_to_cpuvp(constant + 4);
- state[2] = le32_to_cpuvp(constant + 8);
- state[3] = le32_to_cpuvp(constant + 12);
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
@@ -55,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
- state[12] = le32_to_cpuvp(iv + 0);
- state[13] = le32_to_cpuvp(iv + 4);
- state[14] = le32_to_cpuvp(iv + 8);
- state[15] = le32_to_cpuvp(iv + 12);
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
@@ -72,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
@@ -111,7 +105,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index db1bc3147bc4..600afa99941f 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
algt->mask));
if (IS_ERR(poly))
return PTR_ERR(poly);
+ poly_hash = __crypto_hash_alg_common(poly);
+
+ err = -EINVAL;
+ if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
+ goto out_put_poly;
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- poly_hash = __crypto_hash_alg_common(poly);
err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
aead_crypto_instance(inst));
if (err)
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index aa2a25fc7482..718cbce8d169 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 4c0a0e271876..372320399622 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -146,6 +146,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index bd43cf5be14c..addca7bae33f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -32,7 +32,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#define CRYPTD_MAX_CPU_QLEN 1000
+static unsigned int cryptd_max_cpu_qlen = 1000;
+module_param(cryptd_max_cpu_qlen, uint, 0);
+MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
struct cryptd_cpu_queue {
struct crypto_queue queue;
@@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
}
@@ -893,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
@@ -911,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
@@ -1372,7 +1375,7 @@ static int __init cryptd_init(void)
{
int err;
- err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
+ err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
if (err)
return err;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 0dbe2be7f783..5c291eedaa70 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg,
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
@@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
goto drop_alg;
err = -EBUSY;
- if (atomic_read(&alg->cra_refcnt) > 2)
+ if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
err = crypto_unregister_instance((struct crypto_instance *)alg);
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 633a9bcdc574..18f32f2a5e1c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
- err = -EFAULT;
+ return -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index e3d889b122e0..45819e6015bf 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8589681fb9f6..0ad879e1f9b2 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 24e601954c7a..a4b1c026aaee 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
-
- /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a84e3..1bffb3f712dd 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/crypto/internal.h b/crypto/internal.h
index f07320423191..5ac27fba10e8 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -30,9 +30,6 @@
enum {
CRYPTO_MSG_ALG_REQUEST,
CRYPTO_MSG_ALG_REGISTER,
- CRYPTO_MSG_ALG_UNREGISTER,
- CRYPTO_MSG_TMPL_REGISTER,
- CRYPTO_MSG_TMPL_UNREGISTER,
};
struct crypto_instance;
@@ -78,7 +75,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
@@ -106,13 +102,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
- atomic_inc(&alg->cra_refcnt);
+ refcount_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
+ if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
alg->cra_destroy(alg);
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 744e35134c45..ec5c6a087c90 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
@@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index eca04d3729b3..fe5129d6ff4e 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/slab.h>
-#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
@@ -517,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
@@ -535,7 +533,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
- inst->alg.setkey = mcryptd_hash_setkey;
+ if (crypto_hash_alg_has_setkey(halg))
+ inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb99fe25..f8ec3d4ba4a8 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->child);
}
+static void pcrypt_free(struct aead_instance *inst)
+{
+ struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
+
+ crypto_drop_aead(&ctx->spawn);
+ kfree(inst);
+}
+
static int pcrypt_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.encrypt = pcrypt_aead_encrypt;
inst->alg.decrypt = pcrypt_aead_decrypt;
+ inst->free = pcrypt_free;
+
err = aead_register_instance(tmpl, inst);
if (err)
goto out_drop_aead;
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
return -EINVAL;
}
-static void pcrypt_free(struct crypto_instance *inst)
-{
- struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
-
- crypto_drop_aead(&ctx->spawn);
- kfree(inst);
-}
-
static int pcrypt_cpumask_change_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
- .free = pcrypt_free,
.module = THIS_MODULE,
};
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b1c2d57dc734..b7a3a0613a30 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- /* Poly1305 requires a unique key for each tag, which implies that
- * we can't set it on the tfm that gets accessed by multiple users
- * simultaneously. Instead we expect the key as the first 32 bytes in
- * the update() call. */
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
-
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
dctx->s[3] = get_unaligned_le32(key + 12);
}
+/*
+ * Poly1305 requires a unique key for each tag, which implies that we can't set
+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we
+ * expect the key as the first 32 bytes in the update() call.
+ */
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
@@ -210,7 +204,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- __le32 *mac = (__le32 *)dst;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
@@ -267,10 +260,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h3 = (h3 >> 18) | (h4 << 8);
/* mac = (h + s) % (2^128) */
- f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
- f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
- f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
- f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
+ f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
return 0;
}
@@ -281,14 +274,12 @@ static struct shash_alg poly1305_alg = {
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/proc.c b/crypto/proc.c
index 2cc10c96d753..822fcef6d91c 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
- seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
+ seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt));
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0eea5622..5074006a56c3 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -19,49 +19,19 @@
*
*/
-#include <linux/init.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <crypto/algapi.h>
-#include <asm/byteorder.h>
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-/*
- * Start of code taken from D. J. Bernstein's reference implementation.
- * With some modifications and optimizations made to suit our needs.
- */
-
-/*
-salsa20-ref.c version 20051118
-D. J. Bernstein
-Public domain.
-*/
-
-#define U32TO8_LITTLE(p, v) \
- { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
- (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
-#define U8TO32_LITTLE(p) \
- (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
- ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-
-static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
+static void salsa20_block(u32 *state, __le32 *stream)
{
u32 x[16];
int i;
- memcpy(x, input, sizeof(x));
- for (i = 20; i > 0; i -= 2) {
+ memcpy(x, state, sizeof(x));
+
+ for (i = 0; i < 20; i += 2) {
x[ 4] ^= rol32((x[ 0] + x[12]), 7);
x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
x[12] ^= rol32((x[ 8] + x[ 4]), 13);
@@ -95,145 +65,137 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
x[14] ^= rol32((x[13] + x[12]), 13);
x[15] ^= rol32((x[14] + x[13]), 18);
}
- for (i = 0; i < 16; ++i)
- x[i] += input[i];
- for (i = 0; i < 16; ++i)
- U32TO8_LITTLE(output + 4 * i,x[i]);
-}
-static const char sigma[16] = "expand 32-byte k";
-static const char tau[16] = "expand 16-byte k";
+ for (i = 0; i < 16; i++)
+ stream[i] = cpu_to_le32(x[i] + state[i]);
+
+ if (++state[8] == 0)
+ state[9]++;
+}
-static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
{
- const char *constants;
+ __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- ctx->input[1] = U8TO32_LITTLE(k + 0);
- ctx->input[2] = U8TO32_LITTLE(k + 4);
- ctx->input[3] = U8TO32_LITTLE(k + 8);
- ctx->input[4] = U8TO32_LITTLE(k + 12);
- if (kbytes == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* kbytes == 16 */
- constants = tau;
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= SALSA20_BLOCK_SIZE) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ bytes -= SALSA20_BLOCK_SIZE;
+ dst += SALSA20_BLOCK_SIZE;
+ }
+ if (bytes) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
- ctx->input[11] = U8TO32_LITTLE(k + 0);
- ctx->input[12] = U8TO32_LITTLE(k + 4);
- ctx->input[13] = U8TO32_LITTLE(k + 8);
- ctx->input[14] = U8TO32_LITTLE(k + 12);
- ctx->input[0] = U8TO32_LITTLE(constants + 0);
- ctx->input[5] = U8TO32_LITTLE(constants + 4);
- ctx->input[10] = U8TO32_LITTLE(constants + 8);
- ctx->input[15] = U8TO32_LITTLE(constants + 12);
}
-static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv)
{
- ctx->input[6] = U8TO32_LITTLE(iv + 0);
- ctx->input[7] = U8TO32_LITTLE(iv + 4);
- ctx->input[8] = 0;
- ctx->input[9] = 0;
+ memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
+ state[6] = get_unaligned_le32(iv + 0);
+ state[7] = get_unaligned_le32(iv + 4);
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_init);
-static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
- const u8 *src, unsigned int bytes)
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
{
- u8 buf[64];
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes) {
- salsa20_wordtobyte(buf, ctx->input);
-
- ctx->input[8]++;
- if (!ctx->input[8])
- ctx->input[9]++;
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+ struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *constants;
- if (bytes <= 64) {
- crypto_xor(dst, buf, bytes);
- return;
- }
+ if (keysize != SALSA20_MIN_KEY_SIZE &&
+ keysize != SALSA20_MAX_KEY_SIZE)
+ return -EINVAL;
- crypto_xor(dst, buf, 64);
- bytes -= 64;
- dst += 64;
+ ctx->initial_state[1] = get_unaligned_le32(key + 0);
+ ctx->initial_state[2] = get_unaligned_le32(key + 4);
+ ctx->initial_state[3] = get_unaligned_le32(key + 8);
+ ctx->initial_state[4] = get_unaligned_le32(key + 12);
+ if (keysize == 32) { /* recommended */
+ key += 16;
+ constants = sigma;
+ } else { /* keysize == 16 */
+ constants = tau;
}
-}
-
-/*
- * End of code taken from D. J. Bernstein's reference implementation.
- */
+ ctx->initial_state[11] = get_unaligned_le32(key + 0);
+ ctx->initial_state[12] = get_unaligned_le32(key + 4);
+ ctx->initial_state[13] = get_unaligned_le32(key + 8);
+ ctx->initial_state[14] = get_unaligned_le32(key + 12);
+ ctx->initial_state[0] = get_unaligned_le32(constants + 0);
+ ctx->initial_state[5] = get_unaligned_le32(constants + 4);
+ ctx->initial_state[10] = get_unaligned_le32(constants + 8);
+ ctx->initial_state[15] = get_unaligned_le32(constants + 12);
+
+ /* space for the nonce; it will be overridden for each request */
+ ctx->initial_state[6] = 0;
+ ctx->initial_state[7] = 0;
+
+ /* initial block number */
+ ctx->initial_state[8] = 0;
+ ctx->initial_state[9] = 0;
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
-{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_setkey);
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int salsa20_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_crypt,
+ .decrypt = salsa20_crypt,
};
static int __init salsa20_generic_mod_init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit salsa20_generic_mod_fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(salsa20_generic_mod_init);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 2075e2c4e7df..968bbcf65c94 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -140,53 +140,6 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static void crypto_scomp_sg_free(struct scatterlist *sgl)
-{
- int i, n;
- struct page *page;
-
- if (!sgl)
- return;
-
- n = sg_nents(sgl);
- for_each_sg(sgl, sgl, n, i) {
- page = sg_page(sgl);
- if (page)
- __free_page(page);
- }
-
- kfree(sgl);
-}
-
-static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
-{
- struct scatterlist *sgl;
- struct page *page;
- int i, n;
-
- n = ((size - 1) >> PAGE_SHIFT) + 1;
-
- sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
- if (!sgl)
- return NULL;
-
- sg_init_table(sgl, n);
-
- for (i = 0; i < n; i++) {
- page = alloc_page(gfp);
- if (!page)
- goto err;
- sg_set_page(sgl + i, page, PAGE_SIZE, 0);
- }
-
- return sgl;
-
-err:
- sg_mark_end(sgl + i);
- crypto_scomp_sg_free(sgl);
- return NULL;
-}
-
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
@@ -220,7 +173,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch_dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
- req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
+ req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
if (!req->dst)
goto out;
}
@@ -274,7 +227,7 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = crypto_scomp_sg_free;
+ crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0;
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 570b7d1aa0ca..39dbf2f7e5f5 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize != sizeof(u64))
goto free_inst;
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7e8ed96236ce..a965b9d80559 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -5,6 +5,7 @@
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -17,12 +18,10 @@
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha3.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#define KECCAK_ROUNDS 24
-#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
-
static const u64 keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
@@ -34,100 +33,133 @@ static const u64 keccakf_rndc[24] = {
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
-static const int keccakf_rotc[24] = {
- 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
- 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
-};
-
-static const int keccakf_piln[24] = {
- 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
- 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
-};
-
/* update the state with given number of rounds */
-static void keccakf(u64 st[25])
+static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
{
- int i, j, round;
- u64 t, bc[5];
+ u64 t[5], tt, bc[5];
+ int round;
for (round = 0; round < KECCAK_ROUNDS; round++) {
/* Theta */
- for (i = 0; i < 5; i++)
- bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
- ^ st[i + 20];
-
- for (i = 0; i < 5; i++) {
- t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
- for (j = 0; j < 25; j += 5)
- st[j + i] ^= t;
- }
+ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+ bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+ bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+ bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+ bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+ t[0] = bc[4] ^ rol64(bc[1], 1);
+ t[1] = bc[0] ^ rol64(bc[2], 1);
+ t[2] = bc[1] ^ rol64(bc[3], 1);
+ t[3] = bc[2] ^ rol64(bc[4], 1);
+ t[4] = bc[3] ^ rol64(bc[0], 1);
+
+ st[0] ^= t[0];
/* Rho Pi */
- t = st[1];
- for (i = 0; i < 24; i++) {
- j = keccakf_piln[i];
- bc[0] = st[j];
- st[j] = ROTL64(t, keccakf_rotc[i]);
- t = bc[0];
- }
+ tt = st[1];
+ st[ 1] = rol64(st[ 6] ^ t[1], 44);
+ st[ 6] = rol64(st[ 9] ^ t[4], 20);
+ st[ 9] = rol64(st[22] ^ t[2], 61);
+ st[22] = rol64(st[14] ^ t[4], 39);
+ st[14] = rol64(st[20] ^ t[0], 18);
+ st[20] = rol64(st[ 2] ^ t[2], 62);
+ st[ 2] = rol64(st[12] ^ t[2], 43);
+ st[12] = rol64(st[13] ^ t[3], 25);
+ st[13] = rol64(st[19] ^ t[4], 8);
+ st[19] = rol64(st[23] ^ t[3], 56);
+ st[23] = rol64(st[15] ^ t[0], 41);
+ st[15] = rol64(st[ 4] ^ t[4], 27);
+ st[ 4] = rol64(st[24] ^ t[4], 14);
+ st[24] = rol64(st[21] ^ t[1], 2);
+ st[21] = rol64(st[ 8] ^ t[3], 55);
+ st[ 8] = rol64(st[16] ^ t[1], 45);
+ st[16] = rol64(st[ 5] ^ t[0], 36);
+ st[ 5] = rol64(st[ 3] ^ t[3], 28);
+ st[ 3] = rol64(st[18] ^ t[3], 21);
+ st[18] = rol64(st[17] ^ t[2], 15);
+ st[17] = rol64(st[11] ^ t[1], 10);
+ st[11] = rol64(st[ 7] ^ t[2], 6);
+ st[ 7] = rol64(st[10] ^ t[0], 3);
+ st[10] = rol64( tt ^ t[1], 1);
/* Chi */
- for (j = 0; j < 25; j += 5) {
- for (i = 0; i < 5; i++)
- bc[i] = st[j + i];
- for (i = 0; i < 5; i++)
- st[j + i] ^= (~bc[(i + 1) % 5]) &
- bc[(i + 2) % 5];
- }
+ bc[ 0] = ~st[ 1] & st[ 2];
+ bc[ 1] = ~st[ 2] & st[ 3];
+ bc[ 2] = ~st[ 3] & st[ 4];
+ bc[ 3] = ~st[ 4] & st[ 0];
+ bc[ 4] = ~st[ 0] & st[ 1];
+ st[ 0] ^= bc[ 0];
+ st[ 1] ^= bc[ 1];
+ st[ 2] ^= bc[ 2];
+ st[ 3] ^= bc[ 3];
+ st[ 4] ^= bc[ 4];
+
+ bc[ 0] = ~st[ 6] & st[ 7];
+ bc[ 1] = ~st[ 7] & st[ 8];
+ bc[ 2] = ~st[ 8] & st[ 9];
+ bc[ 3] = ~st[ 9] & st[ 5];
+ bc[ 4] = ~st[ 5] & st[ 6];
+ st[ 5] ^= bc[ 0];
+ st[ 6] ^= bc[ 1];
+ st[ 7] ^= bc[ 2];
+ st[ 8] ^= bc[ 3];
+ st[ 9] ^= bc[ 4];
+
+ bc[ 0] = ~st[11] & st[12];
+ bc[ 1] = ~st[12] & st[13];
+ bc[ 2] = ~st[13] & st[14];
+ bc[ 3] = ~st[14] & st[10];
+ bc[ 4] = ~st[10] & st[11];
+ st[10] ^= bc[ 0];
+ st[11] ^= bc[ 1];
+ st[12] ^= bc[ 2];
+ st[13] ^= bc[ 3];
+ st[14] ^= bc[ 4];
+
+ bc[ 0] = ~st[16] & st[17];
+ bc[ 1] = ~st[17] & st[18];
+ bc[ 2] = ~st[18] & st[19];
+ bc[ 3] = ~st[19] & st[15];
+ bc[ 4] = ~st[15] & st[16];
+ st[15] ^= bc[ 0];
+ st[16] ^= bc[ 1];
+ st[17] ^= bc[ 2];
+ st[18] ^= bc[ 3];
+ st[19] ^= bc[ 4];
+
+ bc[ 0] = ~st[21] & st[22];
+ bc[ 1] = ~st[22] & st[23];
+ bc[ 2] = ~st[23] & st[24];
+ bc[ 3] = ~st[24] & st[20];
+ bc[ 4] = ~st[20] & st[21];
+ st[20] ^= bc[ 0];
+ st[21] ^= bc[ 1];
+ st[22] ^= bc[ 2];
+ st[23] ^= bc[ 3];
+ st[24] ^= bc[ 4];
/* Iota */
st[0] ^= keccakf_rndc[round];
}
}
-static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
-{
- memset(sctx, 0, sizeof(*sctx));
- sctx->md_len = digest_sz;
- sctx->rsiz = 200 - 2 * digest_sz;
- sctx->rsizw = sctx->rsiz / 8;
-}
-
-static int sha3_224_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_224_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_256_init(struct shash_desc *desc)
+int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- sha3_init(sctx, SHA3_256_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_384_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_384_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_512_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
+ sctx->rsiz = 200 - 2 * digest_size;
+ sctx->rsizw = sctx->rsiz / 8;
+ sctx->partial = 0;
- sha3_init(sctx, SHA3_512_DIGEST_SIZE);
+ memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_init);
-static int sha3_update(struct shash_desc *desc, const u8 *data,
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
@@ -149,7 +181,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int i;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) src)[i];
+ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
keccakf(sctx->st);
done += sctx->rsiz;
@@ -163,125 +195,89 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_update);
-static int sha3_final(struct shash_desc *desc, u8 *out)
+int crypto_sha3_final(struct shash_desc *desc, u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int i, inlen = sctx->partial;
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
sctx->buf[inlen++] = 0x06;
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx->buf[sctx->rsiz - 1] |= 0x80;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
+ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
keccakf(sctx->st);
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] = cpu_to_le64(sctx->st[i]);
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
- memcpy(out, sctx->st, sctx->md_len);
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
memset(sctx, 0, sizeof(*sctx));
return 0;
}
-
-static struct shash_alg sha3_224 = {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = sha3_224_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-224",
- .cra_driver_name = "sha3-224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_256 = {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = sha3_256_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-256",
- .cra_driver_name = "sha3-256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_384 = {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = sha3_384_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "sha3-384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_512 = {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = sha3_512_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-512",
- .cra_driver_name = "sha3-512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
+EXPORT_SYMBOL(crypto_sha3_final);
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
static int __init sha3_generic_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&sha3_224);
- if (ret < 0)
- goto err_out;
- ret = crypto_register_shash(&sha3_256);
- if (ret < 0)
- goto err_out_224;
- ret = crypto_register_shash(&sha3_384);
- if (ret < 0)
- goto err_out_256;
- ret = crypto_register_shash(&sha3_512);
- if (ret < 0)
- goto err_out_384;
-
- return 0;
-
-err_out_384:
- crypto_unregister_shash(&sha3_384);
-err_out_256:
- crypto_unregister_shash(&sha3_256);
-err_out_224:
- crypto_unregister_shash(&sha3_224);
-err_out:
- return ret;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha3_generic_mod_fini(void)
{
- crypto_unregister_shash(&sha3_224);
- crypto_unregister_shash(&sha3_256);
- crypto_unregister_shash(&sha3_384);
- crypto_unregister_shash(&sha3_512);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha3_generic_mod_init);
diff --git a/crypto/shash.c b/crypto/shash.c
index e849d3ee2e27..5d732c6bb4b2 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return shash_setkey_unaligned(tfm, key, keylen);
+ err = shash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return shash->setkey(tfm, key, keylen);
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
- crt->has_setkey = alg->setkey != shash_no_setkey;
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
if (alg->export)
crt->export = shash_async_export;
@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ hash->descsize = alg->descsize;
+
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
- hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 88203370a62f..208226d7f908 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 11af5fd6a443..0fe2a2923ad0 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
@@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
@@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ int err;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if ((unsigned long)key & alignmask)
- return skcipher_setkey_unaligned(tfm, key, keylen);
+ err = skcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ err = cipher->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return cipher->setkey(tfm, key, keylen);
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
@@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9267cbdb14d2..14213a096fd2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -67,6 +67,7 @@ static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
+static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
@@ -79,6 +80,66 @@ static char *check[] = {
NULL
};
+static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
+static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
+
+#define XBUFSIZE 8
+#define MAX_IVLEN 32
+
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void testmgr_free_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
+ unsigned int buflen, const void *assoc,
+ unsigned int aad_size)
+{
+ int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
+ int k, rem;
+
+ if (np > XBUFSIZE) {
+ rem = PAGE_SIZE;
+ np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
+ }
+
+ sg_init_table(sg, np + 1);
+
+ sg_set_buf(&sg[0], assoc, aad_size);
+
+ if (rem)
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
+
+ if (rem)
+ sg_set_buf(&sg[k + 1], xbuf[k], rem);
+}
+
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -86,6 +147,298 @@ static inline int do_one_aead_op(struct aead_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_aead_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct scatterlist sgout[XBUFSIZE];
+ struct aead_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+};
+
+static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_aead_encrypt(data[i].req);
+ else
+ rc[i] = crypto_aead_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_aead_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_aead_speed(const char *algo, int enc, int secs,
+ struct aead_speed_template *template,
+ unsigned int tcount, u8 authsize,
+ unsigned int aad_size, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_aead_data *data;
+ struct crypto_aead *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ void *assoc;
+ u32 *b_size;
+ char *iv;
+ int ret;
+
+
+ if (aad_size >= PAGE_SIZE) {
+ pr_err("associate data length (%u) too big\n", aad_size);
+ return;
+ }
+
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_iv;
+
+ tfm = crypto_alloc_aead(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, authsize);
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].axbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].axbuf);
+ goto out_free_xbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xoutbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xoutbuf);
+ goto out_free_axbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ aead_request_free(data[i].req);
+ goto out_free_xoutbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ crypto_init_wait(&data[i].wait);
+ aead_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_aead, tfm), e);
+
+ i = 0;
+ do {
+ b_size = aead_sizes;
+ do {
+ if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ authsize + *b_size,
+ XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_aead_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_aead_ivsize(tfm);
+ if (iv_len)
+ memset(iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_aead_data *cur = &data[j];
+
+ assoc = cur->axbuf[0];
+ memset(assoc, 0xff, aad_size);
+
+ sg_init_aead(cur->sg, cur->xbuf,
+ *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
+
+ sg_init_aead(cur->sgout, cur->xoutbuf,
+ *b_size + (enc ? authsize : 0),
+ assoc, aad_size);
+
+ aead_request_set_ad(cur->req, aad_size);
+
+ if (!enc) {
+
+ aead_request_set_crypt(cur->req,
+ cur->sgout,
+ cur->sg,
+ *b_size, iv);
+ ret = crypto_aead_encrypt(cur->req);
+ ret = do_one_aead_op(cur->req, ret);
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
+
+ aead_request_set_crypt(cur->req, cur->sg,
+ cur->sgout, *b_size +
+ (enc ? 0 : authsize),
+ iv);
+
+ }
+
+ if (secs)
+ ret = test_mb_aead_jiffies(data, enc, *b_size,
+ secs, num_mb);
+ else
+ ret = test_mb_aead_cycles(data, enc, *b_size,
+ num_mb);
+
+ if (ret) {
+ pr_err("%s() failed return code=%d\n", e, ret);
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ aead_request_free(data[i].req);
+out_free_xoutbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xoutbuf);
+out_free_axbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].axbuf);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_aead(tfm);
+out_free_data:
+ kfree(data);
+out_free_iv:
+ kfree(iv);
+}
+
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
@@ -151,60 +504,6 @@ out:
return ret;
}
-static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
-static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
-
-#define XBUFSIZE 8
-#define MAX_IVLEN 32
-
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++) {
- buf[i] = (void *)__get_free_page(GFP_KERNEL);
- if (!buf[i])
- goto err_free_buf;
- }
-
- return 0;
-
-err_free_buf:
- while (i-- > 0)
- free_page((unsigned long)buf[i]);
-
- return -ENOMEM;
-}
-
-static void testmgr_free_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++)
- free_page((unsigned long)buf[i]);
-}
-
-static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
- unsigned int buflen)
-{
- int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
- int k, rem;
-
- if (np > XBUFSIZE) {
- rem = PAGE_SIZE;
- np = XBUFSIZE;
- } else {
- rem = buflen % PAGE_SIZE;
- }
-
- sg_init_table(sg, np + 1);
- np--;
- for (k = 0; k < np; k++)
- sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
-
- sg_set_buf(&sg[k + 1], xbuf[k], rem);
-}
-
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
@@ -316,19 +615,37 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
- sg_init_aead(sg, xbuf,
- *b_size + (enc ? 0 : authsize));
+ sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? authsize : 0), assoc,
+ aad_size);
- sg_set_buf(&sg[0], assoc, aad_size);
- sg_set_buf(&sgout[0], assoc, aad_size);
+ aead_request_set_ad(req, aad_size);
+
+ if (!enc) {
+
+ /*
+ * For decryption we need a proper auth so
+ * we do the encryption path once with buffers
+ * reversed (input <-> output) to calculate it
+ */
+ aead_request_set_crypt(req, sgout, sg,
+ *b_size, iv);
+ ret = do_one_aead_op(req,
+ crypto_aead_encrypt(req));
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
- aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
@@ -381,24 +698,98 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
}
struct test_mb_ahash_data {
- struct scatterlist sg[TVMEMSIZE];
+ struct scatterlist sg[XBUFSIZE];
char result[64];
struct ahash_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
-static void test_mb_ahash_speed(const char *algo, unsigned int sec,
- struct hash_speed *speed)
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++)
+ rc[i] = crypto_ahash_digest(data[i].req);
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
- unsigned long start, end;
- unsigned long cycles;
unsigned int i, j, k;
int ret;
- data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
@@ -409,7 +800,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto free_data;
}
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < num_mb; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
@@ -424,7 +815,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
&data[i].wait);
- test_hash_sg_init(data[i].sg);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
@@ -435,16 +831,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (speed[i].blen != speed[i].plen)
continue;
- if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
- for (k = 0; k < 8; k++)
+ for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
@@ -453,34 +849,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- start = get_cycles();
-
- for (k = 0; k < 8; k++) {
- ret = crypto_ahash_digest(data[k].req);
- if (ret == -EINPROGRESS) {
- ret = 0;
- continue;
- }
-
- if (ret)
- break;
-
- crypto_req_done(&data[k].req->base, 0);
- }
-
- for (j = 0; j < k; j++) {
- struct crypto_wait *wait = &data[j].wait;
- int wait_ret;
-
- wait_ret = crypto_wait_req(-EINPROGRESS, wait);
- if (wait_ret)
- ret = wait_ret;
- }
+ if (secs)
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ else
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- end = get_cycles();
- cycles = end - start;
- pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles, cycles / (8 * speed[i].blen));
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
@@ -489,10 +863,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
}
out:
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
ahash_request_free(data[k].req);
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
@@ -736,6 +1110,254 @@ static void test_hash_speed(const char *algo, unsigned int secs,
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
+struct test_mb_skcipher_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_skcipher_encrypt(data[i].req);
+ else
+ rc[i] = crypto_skcipher_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_skcipher_data *data;
+ struct crypto_skcipher *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ u32 *b_size;
+ char iv[128];
+ int ret;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_skcipher(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ skcipher_request_free(data[i].req);
+ goto out_free_xbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ skcipher_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ crypto_init_wait(&data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_skcipher, tfm), e);
+
+ i = 0;
+ do {
+ b_size = block_sizes;
+ do {
+ if (*b_size > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ *b_size, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_skcipher_clear_flags(tfm, ~0);
+
+ ret = crypto_skcipher_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_skcipher_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_skcipher_ivsize(tfm);
+ if (iv_len)
+ memset(&iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_skcipher_data *cur = &data[j];
+ unsigned int k = *b_size;
+ unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
+ unsigned int p = 0;
+
+ sg_init_table(cur->sg, pages);
+
+ while (k > PAGE_SIZE) {
+ sg_set_buf(cur->sg + p, cur->xbuf[p],
+ PAGE_SIZE);
+ memset(cur->xbuf[p], 0xff, PAGE_SIZE);
+ p++;
+ k -= PAGE_SIZE;
+ }
+
+ sg_set_buf(cur->sg + p, cur->xbuf[p], k);
+ memset(cur->xbuf[p], 0xff, k);
+
+ skcipher_request_set_crypt(cur->req, cur->sg,
+ cur->sg, *b_size,
+ iv);
+ }
+
+ if (secs)
+ ret = test_mb_acipher_jiffies(data, enc,
+ *b_size, secs,
+ num_mb);
+ else
+ ret = test_mb_acipher_cycles(data, enc,
+ *b_size, num_mb);
+
+ if (ret) {
+ pr_err("%s() failed flags=%x\n", e,
+ crypto_skcipher_get_flags(tfm));
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ skcipher_request_free(data[i].req);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_skcipher(tfm);
+out_free_data:
+ kfree(data);
+}
+
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -1557,16 +2179,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
+ test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_20);
+ test_aead_speed("gcm(aes)", DECRYPT, sec,
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
+ test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
+ test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
+ NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
@@ -1574,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
+ case 215:
+ test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ break;
+
+ case 216:
+ test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ break;
+
+ case 217:
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1778,19 +2435,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
if (mode > 400 && mode < 500) break;
/* fall through */
case 422:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 423:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 424:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 425:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 499:
@@ -2008,6 +2669,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_8_32);
break;
+ case 600:
+ test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ break;
+
+ case 601:
+ test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ break;
+
+ case 602:
+ test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 603:
+ test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 604:
+ test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ break;
+
+ case 605:
+ test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 606:
+ test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ break;
+
+ case 607:
+ test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 608:
+ test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 609:
+ test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ break;
+
case 1000:
test_available();
break;
@@ -2069,6 +2942,8 @@ module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
+module_param(num_mb, uint, 0000);
+MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29d7020b8826..d5e23a142a04 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int ahash_guard_result(char *result, char c, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (result[i] != c)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
@@ -185,7 +197,8 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
- const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ int digestsize = crypto_ahash_digestsize(tfm);
req = *preq;
statesize = crypto_ahash_statesize(
@@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
+ memset(result, 1, digestsize);
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alg: hash: Failed to export() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, export used req->result for %s\n",
+ algo);
+ goto out;
+ }
ahash_request_free(req);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, import used req->result for %s\n",
+ algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
@@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out;
}
} else {
+ memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: update failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a714b6293959..6044f6906bd6 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = {
"\xc9\xfd\x55\x74\x49\x44\x79\xba"
"\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
"\xd0\xfc\xce\x33",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26"
+ "\xc3\x88\x20\x71\x15\x06\xe8\x2d"
+ "\xa3\x92\x44\xab\x3e\xe7\xff\x86"
+ "\xb6\x79\x10\x72",
},
};
@@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = {
"\x49\x10\x03\x76\xa8\x23\x5e\x2c"
"\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
"\xdb\x32\xdd\x97\x49\x6d\x33\x76",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71"
+ "\xf7\xfa\x80\xf5\xea\x11\x03\xb1"
+ "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7"
+ "\x8a\x97\xbb\xf2\x07\x08\x38\x30",
},
};
@@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = {
"\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
"\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
"\x9e\xef\x51\xac\xd0\x65\x7c\x22",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71"
+ "\xcf\xca\x30\x85\x9b\xc1\x25\xc7"
+ "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b"
+ "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51"
+ "\xd2\x21\xae\x2d\xc7\xae\x8c\x40"
+ "\xb9\xe6\x56\x48\x03\xcd\x88\x6b",
},
};
@@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = {
"\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
"\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
"\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde"
+ "\xf0\xc6\x42\x17\xd7\xb2\x26\x47"
+ "\x90\x28\xa6\x84\xe8\x49\x7a\x86"
+ "\xd6\xb8\x9e\xf8\x07\x59\x21\x03"
+ "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0"
+ "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b"
+ "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41"
+ "\x32\xc2\x8e\x50\x91\x86\x90\xfb",
},
};
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 5f62c4f9f6e0..f3a0dd25f871 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -24,9 +24,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index ebf7a3efb572..07e62433fbfb 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -23,9 +23,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index df90b332554c..25c75af50d3f 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author:
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 02b6fd0864c8..879dc0604cba 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -213,6 +213,8 @@ source "drivers/mux/Kconfig"
source "drivers/opp/Kconfig"
+source "drivers/visorbus/Kconfig"
+
source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index e09663776d8b..7a0438744053 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,4 +186,5 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
+obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 46505396869e..d650c5b6ec90 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
-config X86_PM_TIMER
- bool "Power Management Timer Support" if EXPERT
- depends on X86
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
- in most cases even if ACPI is unusable or blacklisted.
-
- This timing source is not affected by power management features
- like aggressive processor idling, throttling, frequency and/or
- voltage scaling, unlike the commonly used Time Stamp Counter
- (TSC) timing source.
-
- You should nearly always say Y here because many modern
- systems require this timer.
-
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EXPERT
+ depends on X86 && (ACPI || JAILHOUSE_GUEST)
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 3ec05aa1a903..2ff5c8c04e3b 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -718,9 +718,9 @@ again:
return size > 0 ? size : ret;
}
-static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
+static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
{
- int masks = 0;
+ __poll_t masks = 0;
poll_wait(file, &acpi_aml_io.wait, wait);
if (acpi_aml_user_readable())
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02cc8ea1..2bcffec8dbf0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
return 0;
}
+struct lpss_device_links {
+ const char *supplier_hid;
+ const char *supplier_uid;
+ const char *consumer_hid;
+ const char *consumer_uid;
+ u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+ const char *hid2, const char *uid2)
+{
+ return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+ const char *hid;
+ const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct hid_uid *id = data;
+
+ if (!adev)
+ return 0;
+
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+ struct hid_uid data = {
+ .hid = hid,
+ .uid = uid,
+ };
+
+ return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(adev->handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+ &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == handle)
+ return true;
+ }
+
+ return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ device_link_add(dev2, dev1, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ device_link_add(dev1, dev2, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+ struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+ const struct lpss_device_links *link = &lpss_device_links[i];
+
+ if (acpi_lpss_is_supplier(adev, link))
+ acpi_lpss_link_consumer(&pdev->dev, link);
+
+ if (acpi_lpss_is_consumer(adev, link))
+ acpi_lpss_link_supplier(&pdev->dev, link);
+ }
+}
+
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
+ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+ adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
+ acpi_lpss_create_device_links(adev, pdev);
return 1;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0e2eb8..f53ccc680238 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1)
+ only_lcd = acpi_osi_is_win8();
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b5ac5c..2243c8164b34 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
prefix, ACPICA_COPYRIGHT, \
prefix
+#define ACPI_COMMON_BUILD_TIME \
+ "Build date/time: %s %s\n", __DATE__, __TIME__
+
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5252f5..54b8d9df9423 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types);
+
+void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed442703f..45ef3f5dc9ad 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
/*****************************************************************************
*
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
*
****************************************************************************/
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
*
****************************************************************************/
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
-/* Misc */
+/* Miscellaneous */
ACPI_GLOBAL(u32, acpi_gbl_original_mode);
ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
/* Lists for tracking memory allocations (debug only) */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
/*****************************************************************************
*
- * Namespace globals
+ * ACPI Namespace
*
****************************************************************************/
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
/*****************************************************************************
*
- * Interpreter globals
+ * Interpreter/Parser globals
*
****************************************************************************/
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
/*****************************************************************************
*
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
****************************************************************************/
#if (!ACPI_REDUCED_HARDWARE)
-
ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
ACPI_GLOBAL(struct acpi_fixed_event_handler,
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
ACPI_GLOBAL(u32, acpi_sci_count);
ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
/*****************************************************************************
*
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
*
****************************************************************************/
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
#endif /* ACPI_DEBUGGER */
#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
#endif
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
*acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
#ifdef ACPI_APPLICATION
-
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
#endif /* ACPI_APPLICATION */
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8bb1678..a56675f0661e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u32 loop_count; /* While() loop counter */
+ u64 loop_timeout; /* While() loop timeout */
};
/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
acpi_object_type *types;
/*
- * Arguments to be passed to method for the command
- * Threads -
- * the Number of threads, ID of current thread and
- * Index of current thread inside all them created.
+ * Arguments to be passed to method for the commands Threads and
+ * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+ *
+ * For the Threads command, the Number of threads, ID of current
+ * thread and Index of current thread inside all them created.
*/
char init_args;
#ifdef ACPI_DEBUGGER
- acpi_object_type arg_types[4];
+ acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
#endif
- char *arguments[4];
+ char *arguments[ACPI_METHOD_NUM_ARGS];
char num_threads_str[11];
char id_of_thread_str[11];
char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96cc00f..128a3d71b598 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51b3e37..2fb1bb78d85c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9db7ef..b6b29d717824 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
#ifndef ACPI_MSG_ERROR
#define ACPI_MSG_ERROR "ACPI Error: "
#endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION "ACPI Exception: "
-#endif
#ifndef ACPI_MSG_WARNING
#define ACPI_MSG_WARNING "ACPI Warning: "
#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
#endif
#ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
#endif
#ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
#endif
/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
*/
acpi_status acpi_ut_init_globals(void);
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
const char *acpi_ut_get_mutex_name(u32 mutex_id);
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
#endif
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
void acpi_ut_repair_name(char *name);
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
u8 node_flags, const char *format, ...);
void
-acpi_ut_namespace_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_name,
+ acpi_status lookup_status);
void
acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319752f0..ed088fceb18d 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
acpi_db_execution_walk(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
ACPI_FUNCTION_NAME(db_execute_setup);
- /* Catenate the current scope to the supplied name */
+ /* Concatenate the current scope to the supplied name */
info->pathname[0] = 0;
if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_single_execution_thread
+ *
+ * PARAMETERS: context - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+ struct acpi_db_method_info *info = context;
+ acpi_status status;
+ struct acpi_buffer return_obj;
+
+ acpi_os_printf("\n");
+
+ status = acpi_db_execute_method(info, &return_obj);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s During evaluation of %s\n",
+ acpi_format_exception(status), info->pathname);
+ return;
+ }
+
+ /* Display a return object, if any */
+
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ acpi_gbl_db_method_info.pathname,
+ return_obj.pointer, (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ }
+
+ acpi_os_printf("\nBackground thread completed\n%c ",
+ ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_create_execution_thread
+ *
+ * PARAMETERS: method_name_arg - Control method to execute
+ * arguments - Array of arguments to the method
+ * types - Corresponding array of object types
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ * arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types)
+{
+ acpi_status status;
+ u32 i;
+
+ memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+ acpi_gbl_db_method_info.name = method_name_arg;
+ acpi_gbl_db_method_info.init_args = 1;
+ acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+ acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+ /* Setup method arguments, up to 7 (0-6) */
+
+ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+ acpi_gbl_db_method_info.arguments[i] = *arguments;
+ arguments++;
+
+ acpi_gbl_db_method_info.arg_types[i] = *types;
+ types++;
+ }
+
+ status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Get the NS node, determines existence also */
+
+ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+ &acpi_gbl_db_method_info.method);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s Could not get handle for %s\n",
+ acpi_format_exception(status),
+ acpi_gbl_db_method_info.pathname);
+ return;
+ }
+
+ status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+ acpi_db_single_execution_thread,
+ &acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ acpi_os_printf("\nBackground thread started\n");
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_create_execution_threads
*
* PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea291d93..cf9607945704 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
}
acpi_os_printf("Debug output file %s opened\n", name);
- strncpy(acpi_gbl_db_debug_filename, name,
- sizeof(acpi_gbl_db_debug_filename));
+ acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+ sizeof(acpi_gbl_db_debug_filename));
acpi_gbl_db_output_to_file = TRUE;
}
#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79db064..954ca3b981a7 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
CMD_UNLOAD,
CMD_TERMINATE,
+ CMD_BACKGROUND,
CMD_THREADS,
CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"UNLOAD", 1},
{"TERMINATE", 0},
+ {"BACKGROUND", 1},
{"THREADS", 3},
{"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
/*
* Help for all debugger commands. First argument is the number of lines
* of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
*/
static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
- {0, "\nGeneral-Purpose Commands:", "\n"},
+ {0, "\nNamespace Access:", "\n"},
+ {1, " Businfo", "Display system bus info\n"},
+ {1, " Disassemble <Method>", "Disassemble a control method\n"},
+ {1, " Find <AcpiName> (? is wildcard)",
+ "Find ACPI name(s) with wildcards\n"},
+ {1, " Integrity", "Validate namespace integrity\n"},
+ {1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Namespace [Object] [Depth]",
+ "Display loaded namespace tree/subtree\n"},
+ {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
+ {1, " Objects [ObjectType]",
+ "Display summary of all objects or just given type\n"},
+ {1, " Owner <OwnerId> [Depth]",
+ "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
+ {1, " Predefined", "Check all predefined names\n"},
+ {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+ {1, " References <Addr>", "Find all references to object at addr\n"},
+ {1, " Resources [DeviceName]",
+ "Display Device resources (no arg = all devices)\n"},
+ {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
+ {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+ {1, " Type <Object>", "Display object type\n"},
+
+ {0, "\nControl Method Execution:", "\n"},
+ {1, " Evaluate <Namepath> [Arguments]",
+ "Evaluate object or control method\n"},
+ {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Background <Namepath> [Arguments]",
+ "Evaluate object/method in a separate thread\n"},
+ {1, " Thread <Threads><Loops><NamePath>",
+ "Spawn threads to execute method(s)\n"},
+#endif
+ {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+ {7, " [Arguments] formats:", "Control method argument formats\n"},
+ {1, " Hex Integer", "Integer\n"},
+ {1, " \"Ascii String\"", "String\n"},
+ {1, " (Hex Byte List)", "Buffer\n"},
+ {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+ {1, " [Package Element List]", "Package\n"},
+ {1, " [0x01 0x1234 \"string\"]",
+ "Package example (3 elements)\n"},
+
+ {0, "\nMiscellaneous:", "\n"},
{1, " Allocations", "Display list of current memory allocations\n"},
{2, " Dump <Address>|<Namepath>", "\n"},
{0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Stack", "Display CPU stack usage\n"},
{1, " Tables", "Info about current ACPI table(s)\n"},
{1, " Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Terminate", "Delete namespace and all internal objects\n"},
+#endif
{1, " ! <CommandNumber>", "Execute command from history buffer\n"},
{1, " !!", "Execute last command again\n"},
- {0, "\nNamespace Access Commands:", "\n"},
- {1, " Businfo", "Display system bus info\n"},
- {1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Find <AcpiName> (? is wildcard)",
- "Find ACPI name(s) with wildcards\n"},
- {1, " Integrity", "Validate namespace integrity\n"},
- {1, " Methods", "Display list of loaded control methods\n"},
- {1, " Namespace [Object] [Depth]",
- "Display loaded namespace tree/subtree\n"},
- {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
- {1, " Objects [ObjectType]",
- "Display summary of all objects or just given type\n"},
- {1, " Owner <OwnerId> [Depth]",
- "Display loaded namespace by object owner\n"},
- {1, " Paths", "Display full pathnames of namespace objects\n"},
- {1, " Predefined", "Check all predefined names\n"},
- {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
- {1, " References <Addr>", "Find all references to object at addr\n"},
- {1, " Resources [DeviceName]",
- "Display Device resources (no arg = all devices)\n"},
- {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
- {1, " Type <Object>", "Display object type\n"},
+ {0, "\nMethod and Namespace Debugging:", "\n"},
+ {5, " Trace <State> [<Namepath>] [Once]",
+ "Trace control method execution\n"},
+ {1, " Enable", "Enable all messages\n"},
+ {1, " Disable", "Disable tracing\n"},
+ {1, " Method", "Enable method execution messages\n"},
+ {1, " Opcode", "Enable opcode execution messages\n"},
+ {3, " Test <TestName>", "Invoke a debug test\n"},
+ {1, " Objects", "Read/write/compare all namespace data objects\n"},
+ {1, " Predefined",
+ "Validate all ACPI predefined names (_STA, etc.)\n"},
+ {1, " Execute predefined",
+ "Execute all predefined (public) methods\n"},
- {0, "\nControl Method Execution Commands:", "\n"},
+ {0, "\nControl Method Single-Step Execution:", "\n"},
{1, " Arguments (or Args)", "Display method arguments\n"},
{1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
{1, " Call", "Run to next control method invocation\n"},
- {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
- {6, " Evaluate", "Synonym for Execute\n"},
- {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
- {1, " Hex Integer", "Integer method argument\n"},
- {1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Hex Byte List)", "Buffer method argument\n"},
- {1, " [Package Element List]", "Package method argument\n"},
- {5, " Execute predefined",
- "Execute all predefined (public) methods\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
{1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Results", "Display method result stack\n"},
{1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
{1, " Stop", "Terminate control method\n"},
- {5, " Trace <State> [<Namepath>] [Once]",
- "Trace control method execution\n"},
- {1, " Enable", "Enable all messages\n"},
- {1, " Disable", "Disable tracing\n"},
- {1, " Method", "Enable method execution messages\n"},
- {1, " Opcode", "Enable opcode execution messages\n"},
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
#ifdef ACPI_APPLICATION
- {0, "\nHardware Simulation Commands:", "\n"},
- {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
- {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPE devices\n"},
- {1, " Sci", "Generate an SCI\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
- {0, "\nFile I/O Commands:", "\n"},
+ {0, "\nFile Operations:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
{1, " Open <Output Filename>", "Open a file for debug output\n"},
{1, " Unload <Namepath>",
"Unload an ACPI table via namespace object\n"},
- {0, "\nUser Space Commands:", "\n"},
- {1, " Terminate", "Delete namespace and all internal objects\n"},
- {1, " Thread <Threads><Loops><NamePath>",
- "Spawn threads to execute method(s)\n"},
-
- {0, "\nDebug Test Commands:", "\n"},
- {3, " Test <TestName>", "Invoke a debug test\n"},
- {1, " Objects", "Read/write/compare all namespace data objects\n"},
- {1, " Predefined",
- "Execute all ACPI predefined names (_STA, etc.)\n"},
+ {0, "\nHardware Simulation:", "\n"},
+ {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPE devices\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
#endif
{0, NULL, NULL}
};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
/* No argument to help, display help for all commands */
+ acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
while (next->invocation) {
acpi_os_printf("%-38s%s", next->invocation,
next->description);
next++;
}
+ acpi_os_printf("\n");
+
} else {
/* Display help for all commands that match the subtring */
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
/* acpi_initialize (NULL); */
break;
+ case CMD_BACKGROUND:
+
+ acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+ &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2]);
+ break;
+
case CMD_THREADS:
acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81b0499..4b6ebc2a2851 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
+ control_state->control.loop_timeout = acpi_os_get_timer() +
+ (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
/* Predicate was true, the body of the loop was just executed */
/*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
+ * This infinite loop detection mechanism allows the interpreter
+ * to escape possibly infinite loops. This can occur in poorly
+ * written AML when the hardware does not respond within a while
+ * loop and the loop does not implement a timeout.
*/
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- acpi_gbl_max_loop_iterations) {
- status = AE_AML_INFINITE_LOOP;
+ if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+ control_state->control.
+ loop_timeout)) {
+ status = AE_AML_LOOP_TIMEOUT;
break;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5ea029..0cab34a593d5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
walk_state,
&info->connection_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ child->common.
value.name,
status);
return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
return_ACPI_STATUS(status);
} else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 82448551781b..b21fe084ffc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(op->common.value.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ op->common.value.
string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487edfe2de..5a602b75084e 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
{
union acpi_operand_object **element_ptr;
+ ACPI_FUNCTION_TRACE(ds_init_package_element);
+
if (!source_object) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
source_object->package.flags |= AOPOBJ_DATA_VALID;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
union acpi_generic_state scope_info;
union acpi_operand_object *element = *element_ptr;
struct acpi_namespace_node *resolved_node;
+ struct acpi_namespace_node *original_node;
char *external_path = NULL;
acpi_object_type type;
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
+ original_node = resolved_node;
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
if (ACPI_FAILURE(status)) {
return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
-
- /* TBD: This may not be necesssary */
-
- acpi_ut_add_reference(resolved_node->object);
+ case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
+ /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+ acpi_ut_remove_reference(original_node->object);
break;
default:
/*
* For all other types - the node was resolved to an actual
- * operand object with a value, return the object
+ * operand object with a value, return the object. Remove
+ * a reference on the existing object.
*/
+ acpi_ut_remove_reference(element);
*element_ptr = (union acpi_operand_object *)resolved_node;
break;
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b95684..4c5faf629a83 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(name_string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ name_string, status);
}
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a89702..5771e4e4a99a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+ status);
return_ACPI_STATUS(status);
}
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ path, status);
return_ACPI_STATUS(status);
}
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef5a4ec..b3d0aaec8203 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ buffer_ptr,
status);
}
#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
return_ACPI_STATUS(status);
}
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
}
break;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447ff92df..bb58419f0d61 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
+
+ /*
+ * Special case for an EC timeout. These are seen so frequently
+ * that an additional error message is helpful
+ */
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+ (status == AE_TIME)) {
+ ACPI_ERROR((AE_INFO,
+ "Timeout from EC hardware or EC device driver"));
+ }
}
if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc4b7c2..b2ff61bdb9a8 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME(ex_dump_operand)
+ ACPI_FUNCTION_NAME(ex_dump_operand);
- /* Check if debug output enabled */
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -904,7 +905,7 @@ void
acpi_ex_dump_operands(union acpi_operand_object **operands,
const char *opcode_name, u32 num_operands)
{
- ACPI_FUNCTION_NAME(ex_dump_operands);
+ ACPI_FUNCTION_TRACE(ex_dump_operands);
if (!opcode_name) {
opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** End operand dump for [%s]\n", opcode_name));
- return;
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25d45b1..5b4282902a83 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
*
******************************************************************************/
acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
{
acpi_status status;
- u32 delta_ticks;
+ u64 delta_ticks;
u64 quotient;
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_SUPPORT);
}
+ if (start_ticks == end_ticks) {
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
*/
- if (start_ticks < end_ticks) {
- delta_ticks = end_ticks - start_ticks;
- } else if (start_ticks > end_ticks) {
+ delta_ticks = end_ticks;
+ if (start_ticks > end_ticks) {
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
/* 24-bit Timer */
- delta_ticks =
- (((0x00FFFFFF - start_ticks) +
- end_ticks) & 0x00FFFFFF);
+ delta_ticks |= (u64)1 << 24;
} else {
/* 32-bit Timer */
- delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ delta_ticks |= (u64)1 << 32;
}
- } else { /* start_ticks == end_ticks */
-
- *time_elapsed = 0;
- return_ACPI_STATUS(AE_OK);
}
+ delta_ticks -= start_ticks;
/*
* Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
* time_elapsed (microseconds) =
* (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
- *time_elapsed = (u32) quotient;
+ *time_elapsed = (u32)quotient;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec4eab4..d1679035d5f3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_NAME(hw_validate_io_request);
+ ACPI_FUNCTION_TRACE(hw_validate_io_request);
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return (AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return (AE_LIMIT);
+ return_ACPI_STATUS(AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f51ca8d..33e652a12fca 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag &&
- (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags |= IMPLICIT_EXTERNAL;
- }
-#endif
}
/* Special handling for the last segment (num_segments == 0) */
else {
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag
+ && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags &= ~IMPLICIT_EXTERNAL;
+ }
+#endif
+
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775bbc92..d55dcc82f434 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
/* Check if we are resolving a named reference within a package */
- ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+ ACPI_ERROR_NAMESPACE(&scope_info,
+ original_object->string.pointer, status);
goto error_exit;
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760a0308..22c92d1a24d8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
* for error and debug statements.
*
******************************************************************************/
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
return_PTR(name_buffer);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ * 1) Path associated with the prefix_scope namespace node
+ * 2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path)
+{
+ acpi_status status;
+ char *full_path = NULL;
+ char *external_path = NULL;
+ char *prefix_path = NULL;
+ u32 prefix_path_length = 0;
+
+ /* If there is a prefix, get the pathname to it */
+
+ if (prefix_scope && prefix_scope->scope.node) {
+ prefix_path =
+ acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+ TRUE);
+ if (prefix_path) {
+ prefix_path_length = strlen(prefix_path);
+ }
+ }
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+ NULL, &external_path);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+ full_path =
+ ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+ 2);
+ if (!full_path) {
+ goto cleanup;
+ }
+
+ /* Don't merge if the External path is already fully qualified */
+
+ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+ strcat(full_path, prefix_path);
+ if (prefix_path[1]) {
+ strcat(full_path, ".");
+ }
+ }
+
+ acpi_ns_normalize_pathname(external_path);
+ strcat(full_path, external_path);
+
+cleanup:
+ if (prefix_path) {
+ ACPI_FREE(prefix_path);
+ }
+ if (external_path) {
+ ACPI_FREE(external_path);
+ }
+
+ return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_normalize_pathname
+ *
+ * PARAMETERS: original_path - Path to be normalized, in External format
+ *
+ * RETURN: The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ * For example: \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+ char *input_path = original_path;
+ char *new_path_buffer;
+ char *new_path;
+ u32 i;
+
+ /* Allocate a temp buffer in which to construct the new path */
+
+ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+ new_path = new_path_buffer;
+ if (!new_path_buffer) {
+ return;
+ }
+
+ /* Special characters may appear at the beginning of the path */
+
+ if (*input_path == '\\') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ while (*input_path == '^') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ /* Remainder of the path */
+
+ while (*input_path) {
+
+ /* Do one nameseg at a time */
+
+ for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+ *new_path = *input_path;
+ new_path++;
+ }
+
+ input_path++;
+ }
+
+ /* Dot means that there are more namesegs to come */
+
+ if (*input_path == '.') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+ }
+
+ *new_path = 0;
+ strcpy(original_path, new_path_buffer);
+ ACPI_FREE(new_path_buffer);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957f5ef0..e91dbee9235f 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
+ new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c838aee..9b51f65823b2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
- * external_params - List of parameters to pass to method,
+ * external_params - List of parameters to pass to a method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
- * return_buffer - Where to put method's return value (if
+ * return_buffer - Where to put the object's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
+ /* Get a handle here, in order to build an error message if needed */
+
+ target_handle = handle;
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- } else {
- target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfaca555f..171e2faa7c50 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* Final exception check (may have been changed from code above) */
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df71bba..c0b179883ff2 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
* external declaration opcode. Setting walk_state->Aml to
* walk_state->parser_state.Aml + 2 moves increments the
* walk_state->Aml past the object type and the paramcount of the
- * external opcode. For the error message, only print the AML
- * offset. We could attempt to print the name but this may cause
- * a segmentation fault when printing the namepath because the
- * AML may be incorrect.
+ * external opcode.
*/
- acpi_os_printf
- ("// Invalid external declaration at AML offset 0x%x.\n",
- walk_state->aml -
- walk_state->parser_state.aml_start);
walk_state->aml = walk_state->parser_state.aml + 2;
+ walk_state->parser_state.aml = walk_state->aml;
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 02642760cb93..cd59dfe6a47d 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
- (acpi_ps_get_opcode_info(opcode))->
- name, sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.
+ aml_op_name)));
}
/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (opcode == AML_SCOPE_OP) {
acpi_gbl_current_scope = op;
}
- }
- if (gbl_capture_comments) {
- ASL_CV_TRANSFER_COMMENTS(op);
+ if (acpi_gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
+ }
}
return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885e2ca3..cff7154b7fee 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
{
acpi_thread_id thread_id;
va_list args;
+#ifdef ACPI_APPLICATION
+ int fill_count;
+#endif
/* Check if debug output enabled */
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+ fill_count = 48 - acpi_gbl_nesting_level -
+ strlen(acpi_ut_trim_function_name(function_name));
+ if (fill_count < 0) {
+ fill_count = 0;
+ }
+
+ acpi_os_printf("[%02ld] %*s",
+ acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+ acpi_os_printf("%s%*s: ",
+ acpi_ut_trim_function_name(function_name), fill_count,
+ " ");
+#else
acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
va_start(args, format);
acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2d961a..55debbad487d 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
return (acpi_gbl_ref_class_names[object->reference.class]);
}
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
return (acpi_gbl_mutex_names[mutex_id]);
}
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e3368186e1c1..42388dcb5ccc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ * prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_path,
+ acpi_status lookup_status)
+{
+ char *full_path;
+ const char *message;
+
+ /*
+ * Main cases:
+ * 1) Object creation, object must not already exist
+ * 2) Object lookup, object must exist
+ */
+ switch (lookup_status) {
+ case AE_ALREADY_EXISTS:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure creating";
+ break;
+
+ case AE_NOT_FOUND:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure looking up";
+ break;
+
+ default:
+
+ acpi_os_printf(ACPI_MSG_ERROR);
+ message = "Failure looking up";
+ break;
+ }
+
+ /* Concatenate the prefix path and the internal path */
+
+ full_path =
+ acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+ acpi_os_printf("%s [%s], %s", message,
+ full_path ? full_path : "Could not get pathname",
+ acpi_format_exception(lookup_status));
+
+ if (full_path) {
+ ACPI_FREE(full_path);
+ }
+
+ ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_namespace_error
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d1691d..45eeb0dcf283 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
- acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680076c4..2055a858e5f5 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.hi = operand_ovl.part.lo;
- operand_ovl.part.lo ^= operand_ovl.part.lo;
+ operand_ovl.part.lo = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.lo = operand_ovl.part.hi;
- operand_ovl.part.hi ^= operand_ovl.part.hi;
+ operand_ovl.part.hi = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 586354788018..524ba931d5e8 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %u could not acquire Mutex [0x%X]",
- (u32)this_thread_id, mutex_id));
+ "Thread %u could not acquire Mutex [%s] (0x%X)",
+ (u32)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
}
return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [0x%X] is not acquired, cannot release",
- mutex_id));
+ "Mutex [%s] (0x%X) is not acquired, cannot release",
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
return (AE_NOT_ACQUIRED);
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 792664982ea3..33a0970646df 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
/*******************************************************************************
*
* FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
strncat(dest, source, max_transfer_length);
return (FALSE);
}
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+ /* Always terminate destination string */
+
+ strncpy(dest, source, dest_size);
+ dest[dest_size - 1] = 0;
+}
+
#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b133c0e4..f6b8dd24b006 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
+ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5cec94f..97f48d71f9e6 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
/*******************************************************************************
*
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
* FUNCTION: acpi_ut_strtoul_multiply64
*
* PARAMETERS: multiplicand - Current accumulated converted integer
- * multiplier - Base/Radix
+ * base - Base/Radix
* out_product - Where the product is returned
*
* RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
******************************************************************************/
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
{
- u64 val;
+ u64 product;
+ u64 quotient;
/* Exit if either operand is zero */
*out_product = 0;
- if (!multiplicand || !multiplier) {
+ if (!multiplicand || !base) {
return (AE_OK);
}
- /* Check for 64-bit overflow before the actual multiplication */
-
- acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
- if (multiplicand > val) {
+ /*
+ * Check for 64-bit overflow before the actual multiplication.
+ *
+ * Notes: 64-bit division is often not supported on 32-bit platforms
+ * (it requires a library function), Therefore ACPICA has a local
+ * 64-bit divide function. Also, Multiplier is currently only used
+ * as the radix (8/10/16), to the 64/32 divide will always work.
+ */
+ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+ if (multiplicand > quotient) {
return (AE_NUMERIC_OVERFLOW);
}
- val = multiplicand * multiplier;
+ product = multiplicand * base;
/* Check for 32-bit overflow if necessary */
- if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
return (AE_NUMERIC_OVERFLOW);
}
- *out_product = val;
+ *out_product = product;
return (AE_OK);
}
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
* FUNCTION: acpi_ut_strtoul_add64
*
* PARAMETERS: addend1 - Current accumulated converted integer
- * addend2 - New hex value/char
+ * digit - New hex value/char
* out_sum - Where sum is returned (Accumulator)
*
* RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
*
******************************************************************************/
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
{
u64 sum;
/* Check for 64-bit overflow before the actual addition */
- if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
return (AE_NUMERIC_OVERFLOW);
}
- sum = addend1 + addend2;
+ sum = addend1 + digit;
/* Check for 32-bit overflow if necessary */
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88ecbd5..633b4e2c669f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+ acpi_ut_safe_strncpy(allocation->module, (char *)module,
+ ACPI_MAX_MODULE_NAME);
if (!element) {
@@ -717,7 +717,7 @@ exit:
if (!num_outstanding) {
ACPI_INFO(("No outstanding allocations"));
} else {
- ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
num_outstanding, num_outstanding));
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e500bfa..9da4f8ef2e77 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* RETURN: None
*
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- * and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ * info as well as decoded acpi_status.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
/* For AE_OK, just print the message */
if (ACPI_SUCCESS(status)) {
- acpi_os_printf(ACPI_MSG_EXCEPTION);
+ acpi_os_printf(ACPI_MSG_ERROR);
} else {
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_os_printf(ACPI_MSG_ERROR "%s, ",
acpi_format_exception(status));
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7fad3bb..1efefe919555 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -410,7 +410,52 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
flags = 0;
if (flags != -1)
- memory_failure_queue(pfn, 0, flags);
+ memory_failure_queue(pfn, flags);
+#endif
+}
+
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ * These need to be reported by the AER driver but no recovery is
+ * necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ * These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ * panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+ if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+ /*
+ * If firmware reset the component to contain
+ * the error, we must reinitialize it before
+ * use, so treat it as a fatal AER error.
+ */
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
+ }
#endif
}
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
ghes_handle_memory_failure(gdata, sev);
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(gdata->error_severity);
-
- /*
- * If firmware reset the component to contain
- * the error, we must reinitialize it before
- * use, so treat it as a fatal AER error.
- */
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
- pcie_err->aer_info);
- }
-
+ ghes_handle_aer(gdata);
}
-#endif
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = cper_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56e33ae..19bc440820e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
+static int battery_full_discharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+ if (battery_full_discharging && battery->rate_now == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+ battery_full_discharging = 1;
+ return 0;
+}
+
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
.callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS GL502VSK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX305LA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+ },
+ },
{},
};
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
-#endif
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
-#endif
goto fail;
}
+#endif
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 4d0979e02a28..f87ed3be779a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
}
EXPORT_SYMBOL_GPL(acpi_match_device);
+void *acpi_get_match_data(const struct device *dev)
+{
+ const struct acpi_device_id *match;
+
+ if (!dev->driver)
+ return NULL;
+
+ if (!dev->driver->acpi_match_table)
+ return NULL;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return NULL;
+
+ return (void *)match->driver_data;
+}
+EXPORT_SYMBOL_GPL(acpi_get_match_data);
+
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d371fa7..e1eee7a60fad 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+ {
+ /* GP-electronic T701 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {}
+};
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
/* Send the platform triggered reliable event */
if (do_update) {
+ acpi_handle_debug(device->handle, "ACPI LID %s\n",
+ state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ return -ENODEV;
+
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a4c8ad98560d..c4d0a1c912f0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
- if (dev->power.direct_complete && pm_resume_via_firmware())
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * acpi_subsys_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
- return pm_generic_suspend_noirq(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
int acpi_subsys_resume_noirq(struct device *dev)
{
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index a041689e5701..545e91420cde 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -357,7 +357,7 @@ static ssize_t real_power_state_show(struct device *dev,
return sprintf(buf, "%s\n", acpi_power_state_string(state));
}
-static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+static DEVICE_ATTR_RO(real_power_state);
static ssize_t power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -367,7 +367,7 @@ static ssize_t power_state_show(struct device *dev,
return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
}
-static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+static DEVICE_ATTR_RO(power_state);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
@@ -462,7 +462,7 @@ static ssize_t description_show(struct device *dev,
return result;
}
-static DEVICE_ATTR(description, 0444, description_show, NULL);
+static DEVICE_ATTR_RO(description);
static ssize_t
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b9af3d..d9f38c645e4a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f060356a22..f13ba2c07667 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
struct acpi_ged_event {
struct list_head node;
struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
- struct device *dev = context;
+ struct acpi_ged_device *geddev = context;
+ struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_ERROR;
}
- dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
- if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
- irqflags, "ACPI:Ged", event)) {
+ if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
+ dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+ list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
+ struct acpi_ged_device *geddev;
acpi_status acpi_ret;
+ geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+ if (!geddev)
+ return -ENOMEM;
+
+ geddev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
- acpi_ged_request_interrupt, &pdev->dev);
+ acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+ platform_set_drvdata(pdev, geddev);
return 0;
}
+static void ged_shutdown(struct platform_device *pdev)
+{
+ struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ free_irq(event->irq, event);
+ list_del(&event->node);
+ dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+ event->gsi, event->irq);
+ }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+ ged_shutdown(pdev);
+ return 0;
+}
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
+ .remove = ged_remove,
+ .shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423de43c..1d0a501bc7f0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc0fda4..8ccaae3550d2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
srat_proc, ARRAY_SIZE(srat_proc), 0);
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_memory_affinity, 0);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914dfc3e..85ad679390e3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
- printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ pr_info("%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
}
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aad4d20..886ac8b93cd0 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
&intel_bxtwc_pmic_opregion_data);
}
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
{ .name = "bxt_wcove_region" },
{},
};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
},
.id_table = bxt_wc_opregion_id_table,
};
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9c9c7a..f6d73a243d80 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
},
.id_table = chtdc_ti_pmic_opregion_id_table,
};
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7a9d39..9912422c8185 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
&intel_cht_wc_pmic_opregion_data);
}
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
{ .name = "cht_wcove_region" },
{},
};
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
.probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
},
.id_table = cht_wc_opregion_id_table,
};
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761ab1bc..7ffa74048107 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
.name = "crystal_cove_pmic",
},
};
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f81095..316e55174aa9 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
.name = "axp288_pmic_acpi",
},
};
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d50a7b6ccddd..5f0071c7e2e1 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e26ea209b63e..466d1503aba0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return acpi_get_match_data(dev);
+}
+
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
+ .device_get_match_data = acpi_fwnode_device_get_match_data, \
.property_present = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8082871b409a..46cde0912762 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{},
};
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+ ignore_blacklist = true;
+}
+
static void __init acpi_sleep_dmi_check(void)
{
int year;
+ if (ignore_blacklist)
+ return;
+
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
acpi_nvs_nosave_s3();
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+ if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+ (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150bb35bf..4fc59c3bc673 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX 0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX 0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
- acpi_masked_gpes |= ((u64)1<<gpe);
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
acpi_status status;
u8 gpe;
- for (gpe = 0;
- gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
- gpe++) {
- if (acpi_masked_gpes & ((u64)1<<gpe)) {
- status = acpi_get_gpe_device(gpe, &handle);
- if (ACPI_SUCCESS(status)) {
- pr_info("Masking GPE 0x%x.\n", gpe);
- (void)acpi_mask_gpe(handle, gpe, TRUE);
- }
+ for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1acebe3..78db97687f26 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
}
EXPORT_SYMBOL(acpi_dev_found);
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+ const char *dev_name;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
- struct acpi_dev_present_info *match = data;
+ struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
+ match->dev_name = acpi_dev_name(adev);
+
if (match->hrv == -1)
return 1;
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
- struct acpi_dev_present_info match = {};
+ struct acpi_dev_match_info match = {};
struct device *dev;
strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match,
- acpi_dev_present_cb);
-
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ struct acpi_dev_match_info match = {};
+ struct device *dev;
+
+ strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ match.uid = uid;
+ match.hrv = hrv;
+
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 145c062c176d..d21040c5d343 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4383,7 +4383,7 @@ static int binder_thread_release(struct binder_proc *proc,
return active_transactions;
}
-static unsigned int binder_poll(struct file *filp,
+static __poll_t binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb5339166563..a7120d621154 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -92,6 +92,25 @@ config SATA_AHCI
If unsure, say N.
+config SATA_MOBILE_LPM_POLICY
+ int "Default SATA Link Power Management policy for mobile chipsets"
+ range 0 4
+ default 0
+ depends on SATA_AHCI
+ help
+ Select the Default SATA Link Power Management (LPM) policy to use
+ for mobile / laptop variants of chipsets / "South Bridges".
+
+ The value set has the following meanings:
+ 0 => Keep firmware settings
+ 1 => Maximum performance
+ 2 => Medium power
+ 3 => Medium power with Device Initiated PM enabled
+ 4 => Minimum power
+
+ Note "Minimum power" is known to cause issues, including disk
+ corruption, with some disks and should not be used.
+
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
help
@@ -925,15 +944,6 @@ endif # ATA_BMDMA
comment "PIO-only SFF controllers"
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
-
- If unsure, say N.
-
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8daec3e657f8..f1f5a3fbc777 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
# SFF PIO only
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
obj-$(CONFIG_PATA_FALCON) += pata_falcon.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5443cb71d7ba..355a95a83a34 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -65,6 +65,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_mobile,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -140,6 +141,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_mobile] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -252,13 +260,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -268,9 +276,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -293,9 +301,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -304,28 +312,28 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -353,26 +361,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -386,6 +394,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -593,6 +606,9 @@ static int marvell_enable = 1;
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+static int mobile_lpm_policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+module_param(mobile_lpm_policy, int, 0644);
+MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
@@ -1728,6 +1744,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
+ if ((hpriv->flags & AHCI_HFLAG_IS_MOBILE) &&
+ mobile_lpm_policy >= ATA_LPM_UNKNOWN &&
+ mobile_lpm_policy <= ATA_LPM_MIN_POWER)
+ ap->target_lpm_policy = mobile_lpm_policy;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 749fd94441b0..a9d996e17d75 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -251,6 +251,9 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
+ AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
+ SATA_MOBILE_LPM_POLICY
+ as default lpm_policy */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5936d1679bf3..ea430819c80b 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -70,6 +70,13 @@
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+#define BUS_CTRL_ENDIAN_NSP_CONF \
+ (0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
+
+#define BUS_CTRL_ENDIAN_CONF_MASK \
+ (0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT | \
+ 0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
+
enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
@@ -89,14 +96,6 @@ struct brcm_ahci_priv {
enum brcm_ahci_version version;
};
-static const struct ata_port_info ahci_brcm_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
-};
-
static inline u32 brcm_sata_readreg(void __iomem *addr)
{
/*
@@ -250,20 +249,105 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+ u32 data;
/* Configure endianness */
- if (priv->version == BRCM_SATA_NSP) {
- u32 data = brcm_sata_readreg(ctrl);
-
- data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
- (0x03 << DMADESC_ENDIAN_SHIFT));
- data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
- (0x02 << DMADESC_ENDIAN_SHIFT);
- brcm_sata_writereg(data, ctrl);
- } else
- brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
+ data = brcm_sata_readreg(ctrl);
+ data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
+ if (priv->version == BRCM_SATA_NSP)
+ data |= BUS_CTRL_ENDIAN_NSP_CONF;
+ else
+ data |= BUS_CTRL_ENDIAN_CONF;
+ brcm_sata_writereg(data, ctrl);
+}
+
+static unsigned int brcm_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_host *host = ap->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int err_mask;
+ unsigned long flags;
+ int i, rc;
+ u32 ctl;
+
+ /* Try to read the device ID and, if this fails, proceed with the
+ * recovery sequence below
+ */
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (likely(!err_mask))
+ return err_mask;
+
+ /* Disable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Perform the SATA PHY reset sequence */
+ brcm_sata_phy_disable(priv, ap->port_no);
+
+ /* Bring the PHY back on */
+ brcm_sata_phy_enable(priv, ap->port_no);
+
+ /* Re-initialize and calibrate the PHY */
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_phys;
+
+ rc = phy_calibrate(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ /* Re-enable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl |= HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ata_do_dev_read_id(dev, tf, id);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+ return AC_ERR_OTHER;
+}
+
+static void brcm_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
}
+static struct ata_port_operations ahci_brcm_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = brcm_ahci_host_stop,
+ .read_id = brcm_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+ .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_brcm_platform_ops,
+};
+
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b702c20fbc2b..7ecb1322a514 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -458,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
-static struct pci_bits piix_enable_bits[] = {
+static const struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8193b38a1cae..3c09122bf038 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4449,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
deleted file mode 100644
index 9aeb7a6dd4d4..000000000000
--- a/drivers/ata/pata_at32.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * AVR32 SMC/CFC PATA Driver
- *
- * Copyright (C) 2007 Atmel Norway
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#define DEBUG
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <scsi/scsi_host.h>
-#include <linux/ata.h>
-#include <linux/libata.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/board.h>
-#include <mach/smc.h>
-
-#define DRV_NAME "pata_at32"
-#define DRV_VERSION "0.0.3"
-
-/*
- * CompactFlash controller memory layout relative to the base address:
- *
- * Attribute memory: 0000 0000 -> 003f ffff
- * Common memory: 0040 0000 -> 007f ffff
- * I/O memory: 0080 0000 -> 00bf ffff
- * True IDE Mode: 00c0 0000 -> 00df ffff
- * Alt IDE Mode: 00e0 0000 -> 00ff ffff
- *
- * Only True IDE and Alt True IDE mode are needed for this driver.
- *
- * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
- * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
- */
-#define CF_IDE_OFFSET 0x00c00000
-#define CF_ALT_IDE_OFFSET 0x00e00000
-#define CF_RES_SIZE 2048
-
-/*
- * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
- * adaptor with a logic analyzer or similar.
- */
-#undef DEBUG_BUS
-
-/*
- * ATA PIO modes
- *
- * Name | Mb/s | Min cycle time | Mask
- * --------+-------+----------------+--------
- * Mode 0 | 3.3 | 600 ns | 0x01
- * Mode 1 | 5.2 | 383 ns | 0x03
- * Mode 2 | 8.3 | 240 ns | 0x07
- * Mode 3 | 11.1 | 180 ns | 0x0f
- * Mode 4 | 16.7 | 120 ns | 0x1f
- *
- * Alter PIO_MASK below according to table to set maximal PIO mode.
- */
-enum {
- PIO_MASK = ATA_PIO4,
-};
-
-/*
- * Struct containing private information about device.
- */
-struct at32_ide_info {
- unsigned int irq;
- struct resource res_ide;
- struct resource res_alt;
- void __iomem *ide_addr;
- void __iomem *alt_addr;
- unsigned int cs;
- struct smc_config smc;
-};
-
-/*
- * Setup SMC for the given ATA timing.
- */
-static int pata_at32_setup_timing(struct device *dev,
- struct at32_ide_info *info,
- const struct ata_timing *ata)
-{
- struct smc_config *smc = &info->smc;
- struct smc_timing timing;
-
- int active;
- int recover;
-
- memset(&timing, 0, sizeof(struct smc_timing));
-
- /* Total cycle time */
- timing.read_cycle = ata->cyc8b;
-
- /* DIOR <= CFIOR timings */
- timing.nrd_setup = ata->setup;
- timing.nrd_pulse = ata->act8b;
- timing.nrd_recover = ata->rec8b;
-
- /* Convert nanosecond timing to clock cycles */
- smc_set_timing(smc, &timing);
-
- /* Add one extra cycle setup due to signal ring */
- smc->nrd_setup = smc->nrd_setup + 1;
-
- active = smc->nrd_setup + smc->nrd_pulse;
- recover = smc->read_cycle - active;
-
- /* Need at least two cycles recovery */
- if (recover < 2)
- smc->read_cycle = active + 2;
-
- /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
- smc->ncs_read_setup = 1;
- smc->ncs_read_pulse = smc->read_cycle - 2;
-
- /* Write timings same as read timings */
- smc->write_cycle = smc->read_cycle;
- smc->nwe_setup = smc->nrd_setup;
- smc->nwe_pulse = smc->nrd_pulse;
- smc->ncs_write_setup = smc->ncs_read_setup;
- smc->ncs_write_pulse = smc->ncs_read_pulse;
-
- /* Do some debugging output of ATA and SMC timings */
- dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
- ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
-
- dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
- smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
- smc->ncs_read_setup, smc->ncs_read_pulse);
-
- /* Finally, configure the SMC */
- return smc_set_configuration(info->cs, smc);
-}
-
-/*
- * Procedures for libATA.
- */
-static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing timing;
- struct at32_ide_info *info = ap->host->private_data;
-
- int ret;
-
- /* Compute ATA timing */
- ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
- if (ret) {
- dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
- return;
- }
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(ap->dev, info, &timing);
- if (ret) {
- dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
- return;
- }
-}
-
-static struct scsi_host_template at32_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations at32_port_ops = {
- .inherits = &ata_sff_port_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = pata_at32_set_piomode,
-};
-
-static int __init pata_at32_init_one(struct device *dev,
- struct at32_ide_info *info)
-{
- struct ata_host *host;
- struct ata_port *ap;
-
- host = ata_host_alloc(dev, 1);
- if (!host)
- return -ENOMEM;
-
- ap = host->ports[0];
-
- /* Setup ATA bindings */
- ap->ops = &at32_port_ops;
- ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
-
- /*
- * Since all 8-bit taskfile transfers has to go on the lower
- * byte of the data bus and there is a bug in the SMC that
- * makes it impossible to alter the bus width during runtime,
- * we need to hardwire the address signals as follows:
- *
- * A_IDE(2:0) <= A_EBI(3:1)
- *
- * This makes all addresses on the EBI even, thus all data
- * will be on the lower byte of the data bus. All addresses
- * used by libATA need to be altered according to this.
- */
- ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
- ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
-
- ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
- ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
- ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
- ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
- ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
- ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
- ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
- ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
- ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
- ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
-
- /* Set info as private data of ATA host */
- host->private_data = info;
-
- /* Register ATA device and return */
- return ata_host_activate(host, info->irq, ata_sff_interrupt,
- IRQF_SHARED | IRQF_TRIGGER_RISING,
- &at32_sht);
-}
-
-/*
- * This function may come in handy for people analyzing their own
- * EBI -> PATA adaptors.
- */
-#ifdef DEBUG_BUS
-
-static void __init pata_at32_debug_bus(struct device *dev,
- struct at32_ide_info *info)
-{
- const int d1 = 0xff;
- const int d2 = 0x00;
-
- int i;
-
- /* Write 8-bit values (registers) */
- iowrite8(d1, info->alt_addr + (0x06 << 1));
- iowrite8(d2, info->alt_addr + (0x06 << 1));
-
- for (i = 0; i < 8; i++) {
- iowrite8(d1, info->ide_addr + (i << 1));
- iowrite8(d2, info->ide_addr + (i << 1));
- }
-
- /* Write 16 bit values (data) */
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-}
-
-#endif
-
-static int __init pata_at32_probe(struct platform_device *pdev)
-{
- const struct ata_timing initial_timing =
- {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
-
- struct device *dev = &pdev->dev;
- struct at32_ide_info *info;
- struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- int irq;
- int ret;
-
- if (!board)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- /* Retrive IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- /* Setup struct containing private information */
- info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = irq;
- info->cs = board->cs;
-
- /* Request memory resources */
- info->res_ide.start = res->start + CF_IDE_OFFSET;
- info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
- info->res_ide.name = "ide";
- info->res_ide.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_ide);
- if (ret)
- goto err_req_res_ide;
-
- info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
- info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
- info->res_alt.name = "alt";
- info->res_alt.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_alt);
- if (ret)
- goto err_req_res_alt;
-
- /* Setup non-timing elements of SMC */
- info->smc.bus_width = 2; /* 16 bit data bus */
- info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
- info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
- info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
- info->smc.byte_write = 0; /* Byte select access type */
- info->smc.tdf_mode = 0; /* TDF optimization disabled */
- info->smc.tdf_cycles = 0; /* No TDF wait cycles */
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(dev, info, &initial_timing);
- if (ret)
- goto err_setup_timing;
-
- /* Map ATA address space */
- ret = -ENOMEM;
- info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
- info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
- if (!info->ide_addr || !info->alt_addr)
- goto err_ioremap;
-
-#ifdef DEBUG_BUS
- pata_at32_debug_bus(dev, info);
-#endif
-
- /* Setup and register ATA device */
- ret = pata_at32_init_one(dev, info);
- if (ret)
- goto err_ata_device;
-
- return 0;
-
- err_ata_device:
- err_ioremap:
- err_setup_timing:
- release_resource(&info->res_alt);
- err_req_res_alt:
- release_resource(&info->res_ide);
- err_req_res_ide:
- kfree(info);
-
- return ret;
-}
-
-static int __exit pata_at32_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct at32_ide_info *info;
-
- if (!host)
- return 0;
-
- info = host->private_data;
- ata_host_detach(host);
-
- if (!info)
- return 0;
-
- release_resource(&info->res_ide);
- release_resource(&info->res_alt);
-
- kfree(info);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:at32_ide");
-
-static struct platform_driver pata_at32_driver = {
- .remove = __exit_p(pata_at32_remove),
- .driver = {
- .name = "at32_ide",
- },
-};
-
-module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
-MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 49d705c9f0f7..4d49fd3c927b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,6 +278,10 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
+ /* SB600 doesn't have secondary port wired */
+ if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ ppi[1] = &ata_dummy_port_info;
+
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7a21edf89e72..8468b300193b 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -683,7 +683,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
return (u8 *)buf;
}
- mdelay(1);
+ usleep_range(500, 1000);
}
kfree(buf);
printk(KERN_ERR "it821x_firmware_command: timeout\n");
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6db2e34bd52f..1a18e675ba9f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -580,7 +580,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
- mdelay(30);
+ msleep(30);
#ifdef PDC_DEBUG
/*
@@ -620,7 +620,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
start_time = ktime_get();
/* Let the counter run for 100 ms. */
- mdelay(100);
+ msleep(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cc208b72b199..42d4589b43d4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3596,7 +3596,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
- mdelay(1);
+ usleep_range(500, 1000);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index ce47eb17901d..6470e3c4c990 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -473,7 +473,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
ENI_PRV_POS(skb) = eni_vcc->descr+size+1;
skb_queue_tail(&eni_dev->rx_queue,skb);
eni_vcc->last = skb;
-rx_enqueued++;
+ rx_enqueued++;
}
eni_vcc->descr = here;
eni_out(dma_wr,MID_DMA_WR_RX);
@@ -715,7 +715,7 @@ static void get_service(struct atm_dev *dev)
else eni_dev->slow = vcc;
eni_dev->last_slow = vcc;
}
-putting++;
+ putting++;
ENI_VCC(vcc)->servicing++;
}
}
@@ -744,7 +744,7 @@ static void dequeue_rx(struct atm_dev *dev)
}
EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb),
ENI_PRV_POS(skb));
-rx_dequeued++;
+ rx_dequeued++;
vcc = ATM_SKB(skb)->vcc;
eni_vcc = ENI_VCC(vcc);
first = 0;
@@ -1174,7 +1174,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos);
eni_out(dma_wr,MID_DMA_WR_TX);
skb_queue_tail(&eni_dev->tx_queue,skb);
-queued++;
+ queued++;
return enq_ok;
}
@@ -1195,7 +1195,7 @@ static void poll_tx(struct atm_dev *dev)
if (res == enq_ok) continue;
DPRINTK("re-queuing TX PDU\n");
skb_queue_head(&tx->backlog,skb);
-requeued++;
+ requeued++;
if (res == enq_jam) return;
break;
}
@@ -1232,7 +1232,7 @@ static void dequeue_tx(struct atm_dev *dev)
else dev_kfree_skb_irq(skb);
atomic_inc(&vcc->stats->tx);
wake_up(&eni_dev->tx_wait);
-dma_complete++;
+ dma_complete++;
}
}
@@ -1555,7 +1555,7 @@ static void eni_tasklet(unsigned long data)
}
if (events & MID_TX_COMPLETE) {
EVENT("INT: TX COMPLETE\n",0,0);
-tx_complete++;
+ tx_complete++;
wake_up(&eni_dev->tx_wait);
/* poll_rx ? */
}
@@ -2069,14 +2069,14 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
}
*(u32 *) skb->data = htonl(*(u32 *) skb->data);
}
-submitted++;
+ submitted++;
ATM_SKB(skb)->vcc = vcc;
tasklet_disable(&ENI_DEV(vcc->dev)->task);
res = do_tx(skb);
tasklet_enable(&ENI_DEV(vcc->dev)->task);
if (res == enq_ok) return 0;
skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
-backlogged++;
+ backlogged++;
tasklet_schedule(&ENI_DEV(vcc->dev)->task);
return 0;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc87907d6a1..784ff0003c1b 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -86,10 +86,9 @@ config FW_LOADER
require userspace firmware loading support, but a module built
out-of-tree does.
-config FIRMWARE_IN_KERNEL
- bool "Include in-kernel firmware blobs in kernel binary"
+config EXTRA_FIRMWARE
+ string "External firmware blobs to build into the kernel binary"
depends on FW_LOADER
- default y
help
Various drivers in the kernel source tree may require firmware,
which is generally available in your distribution's linux-firmware
@@ -99,23 +98,6 @@ config FIRMWARE_IN_KERNEL
/lib/firmware/ on your system, so they can be loaded by userspace
helpers on request.
- Enabling this option will build each required firmware blob
- specified by EXTRA_FIRMWARE into the kernel directly, where
- request_firmware() will find them without having to call out to
- userspace. This may be useful if your root file system requires a
- device that uses such firmware and you do not wish to use an
- initrd.
-
- This single option controls the inclusion of firmware for
- every driver that uses request_firmware(), which avoids a
- proliferation of 'Include firmware for xxx device' options.
-
- Say 'N' and let firmware be loaded from userspace.
-
-config EXTRA_FIRMWARE
- string "External firmware blobs to build into the kernel binary"
- depends on FW_LOADER
- help
This option allows firmware to be built into the kernel for the case
where the user either cannot or doesn't want to provide it from
userspace at runtime (for example, when the firmware in question is
@@ -126,11 +108,11 @@ config EXTRA_FIRMWARE
firmware files -- the same names that appear in MODULE_FIRMWARE()
and request_firmware() in the source. These files should exist under
the directory specified by the EXTRA_FIRMWARE_DIR option, which is
- by default the firmware subdirectory of the kernel source tree.
+ /lib/firmware by default.
For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
- the usb8388.bin file into the firmware directory, and build the kernel.
- Then any request_firmware("usb8388.bin") will be satisfied internally
+ the usb8388.bin file into /lib/firmware, and build the kernel. Then
+ any request_firmware("usb8388.bin") will be satisfied internally
without needing to call out to userspace.
WARNING: If you include additional firmware files into your binary
@@ -236,6 +218,9 @@ config GENERIC_CPU_DEVICES
config GENERIC_CPU_AUTOPROBE
bool
+config GENERIC_CPU_VULNERABILITIES
+ bool
+
config SOC_BUS
bool
select GLOB
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 4de87b0b53c8..52ec5174bcb1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -1,15 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arch specific cpu topology information
*
* Copyright (C) 2016, ARM Ltd.
* Written by: Juri Lelli, ARM Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/acpi.h>
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 95e3ef82f3b7..20736aaa0e69 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* attribute_container.c - implementation of a simple container for classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
- * This file is licensed under GPLv2
- *
* The basic idea here is to enable a device to be attached to an
* aritrary numer of classes without having to allocate storage for them.
* Instead, the contained classes select the devices they need to attach
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 22a64fd3309b..ef6183306b40 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bus.c - bus driver management
*
@@ -5,9 +6,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- *
*/
#include <linux/async.h>
@@ -309,7 +307,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
klist_iter_init_node(&bus->p->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)) && !error)
+ while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
return error;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 07532d83be0b..edf726267282 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cacheinfo support - processor cache information via sysfs
*
* Based on arch/x86/kernel/cpu/intel_cacheinfo.c
* Author: Sudeep Holla <sudeep.holla@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 52eb8e644acd..54def4e02f00 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* class.c - basic device class management
*
@@ -5,9 +6,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2003-2004 Greg Kroah-Hartman
* Copyright (c) 2003-2004 IBM Corp.
- *
- * This file is released under the GPLv2
- *
*/
#include <linux/device.h>
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89b032f2ffd2..8946dfee4768 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Componentized device handling.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This is work in progress. We gather up the component devices into a list,
* and bind them when instructed. At the moment, we're specific to the DRM
* subsystem, and only handles one master device, but this doesn't have to be
@@ -17,6 +14,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
struct component;
@@ -41,6 +39,7 @@ struct master {
const struct component_master_ops *ops;
struct device *dev;
struct component_match *match;
+ struct dentry *dentry;
};
struct component {
@@ -56,6 +55,80 @@ static DEFINE_MUTEX(component_mutex);
static LIST_HEAD(component_list);
static LIST_HEAD(masters);
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *component_debugfs_dir;
+
+static int component_devices_show(struct seq_file *s, void *data)
+{
+ struct master *m = s->private;
+ struct component_match *match = m->match;
+ size_t i;
+
+ mutex_lock(&component_mutex);
+ seq_printf(s, "%-40s %20s\n", "master name", "status");
+ seq_puts(s, "-------------------------------------------------------------\n");
+ seq_printf(s, "%-40s %20s\n\n",
+ dev_name(m->dev), m->bound ? "bound" : "not bound");
+
+ seq_printf(s, "%-40s %20s\n", "device name", "status");
+ seq_puts(s, "-------------------------------------------------------------\n");
+ for (i = 0; i < match->num; i++) {
+ struct device *d = (struct device *)match->compare[i].data;
+
+ seq_printf(s, "%-40s %20s\n", dev_name(d),
+ match->compare[i].component ?
+ "registered" : "not registered");
+ }
+ mutex_unlock(&component_mutex);
+
+ return 0;
+}
+
+static int component_devices_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, component_devices_show, inode->i_private);
+}
+
+static const struct file_operations component_devices_fops = {
+ .open = component_devices_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init component_debug_init(void)
+{
+ component_debugfs_dir = debugfs_create_dir("device_component", NULL);
+
+ return 0;
+}
+
+core_initcall(component_debug_init);
+
+static void component_master_debugfs_add(struct master *m)
+{
+ m->dentry = debugfs_create_file(dev_name(m->dev), 0444,
+ component_debugfs_dir,
+ m, &component_devices_fops);
+}
+
+static void component_master_debugfs_del(struct master *m)
+{
+ debugfs_remove(m->dentry);
+ m->dentry = NULL;
+}
+
+#else
+
+static void component_master_debugfs_add(struct master *m)
+{ }
+
+static void component_master_debugfs_del(struct master *m)
+{ }
+
+#endif
+
static struct master *__master_find(struct device *dev,
const struct component_master_ops *ops)
{
@@ -290,6 +363,7 @@ static void free_master(struct master *master)
struct component_match *match = master->match;
int i;
+ component_master_debugfs_del(master);
list_del(&master->node);
if (match) {
@@ -323,6 +397,7 @@ int component_master_add_with_match(struct device *dev,
master->ops = ops;
master->match = match;
+ component_master_debugfs_add(master);
/* Add to the list of available masters. */
mutex_lock(&component_mutex);
list_add(&master->node, &masters);
diff --git a/drivers/base/container.c b/drivers/base/container.c
index ecbfbe2e908f..1ba42d2d3532 100644
--- a/drivers/base/container.c
+++ b/drivers/base/container.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* System bus type for containers.
*
* Copyright (C) 2013, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/container.h>
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 110230d86527..61515ef91184 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/core.c - core driver model code (device registration, etc)
*
@@ -5,9 +6,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2006 Novell, Inc.
- *
- * This file is released under the GPLv2
- *
*/
#include <linux/device.h>
@@ -2116,7 +2114,7 @@ int device_for_each_child(struct device *parent, void *data,
return 0;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)) && !error)
+ while (!error && (child = next_device(&i)))
error = fn(child, data);
klist_iter_exit(&i);
return error;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b608d821..d21a2d913107 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPU subsystem support
*/
@@ -511,10 +512,58 @@ static void __init cpu_dev_register_generic(void)
#endif
}
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+ .name = "vulnerabilities",
+ .attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
void __init cpu_dev_init(void)
{
if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
panic("Failed to register CPU subsystem");
cpu_dev_register_generic();
+ cpu_register_vulnerabilities();
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 2c964f56dafe..de6fd092bf2f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/dd.c - The core device/driver interactions.
*
@@ -13,8 +14,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007-2009 Novell Inc.
- *
- * This file is released under the GPLv2
*/
#include <linux/device.h>
@@ -289,6 +288,18 @@ static void driver_bound(struct device *dev)
kobject_uevent(&dev->kobj, KOBJ_BIND);
}
+static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ device_lock(dev);
+ if (dev->driver->coredump)
+ dev->driver->coredump(dev);
+ device_unlock(dev);
+
+ return count;
+}
+static DEVICE_ATTR_WO(coredump);
+
static int driver_sysfs_add(struct device *dev)
{
int ret;
@@ -298,14 +309,26 @@ static int driver_sysfs_add(struct device *dev)
BUS_NOTIFY_BIND_DRIVER, dev);
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
+ kobject_name(&dev->kobj));
+ if (ret)
+ goto fail;
+
+ ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
+ "driver");
+ if (ret)
+ goto rm_dev;
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
+ !device_create_file(dev, &dev_attr_coredump))
+ return 0;
+
+ sysfs_remove_link(&dev->kobj, "driver");
+
+rm_dev:
+ sysfs_remove_link(&dev->driver->p->kobj,
kobject_name(&dev->kobj));
- if (ret == 0) {
- ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
- "driver");
- if (ret)
- sysfs_remove_link(&dev->driver->p->kobj,
- kobject_name(&dev->kobj));
- }
+
+fail:
return ret;
}
@@ -314,6 +337,8 @@ static void driver_sysfs_remove(struct device *dev)
struct device_driver *drv = dev->driver;
if (drv) {
+ if (drv->coredump)
+ device_remove_file(dev, &dev_attr_coredump);
sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
sysfs_remove_link(&dev->kobj, "driver");
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 7be310f7db73..f1a3353f3494 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -1,23 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is provided under the GPLv2 license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 71d577025285..95b67281cd2a 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/devres.c - device resource management
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
*/
#include <linux/device.h>
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index ea9726e71468..d987dcd1bd56 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Contiguous Memory Allocator for DMA mapping framework
* Copyright (c) 2010-2011 by Samsung Electronics.
* Written by:
* Marek Szyprowski <m.szyprowski@samsung.com>
* Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
*/
#define pr_fmt(fmt) "cma: " fmt
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index e584eddef0a7..3b118353ea17 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/dma-mapping.c - arch-independent dma-mapping routines
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
*/
#include <linux/acpi.h>
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 4eabfe28d2b3..ba912558a510 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver.c - centralized device driver management
*
@@ -5,9 +6,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
- *
*/
#include <linux/device.h>
@@ -52,7 +50,7 @@ int driver_for_each_device(struct device_driver *drv, struct device *start,
klist_iter_init_node(&drv->p->klist_devices, &i,
start ? &start->p->knode_driver : NULL);
- while ((dev = next_device(&i)) && !error)
+ while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
return error;
diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c
index 113815556809..8dff940e0db9 100644
--- a/drivers/base/firmware.c
+++ b/drivers/base/firmware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* firmware.c - firmware subsystem hoohaw.
*
@@ -5,8 +6,6 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
*/
#include <linux/kobject.h>
#include <linux/module.h>
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4b57cf5bc81d..7dd36ace6152 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* firmware_class.c - Multi purpose firmware loading support
*
@@ -41,6 +42,96 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
+enum fw_status {
+ FW_STATUS_UNKNOWN,
+ FW_STATUS_LOADING,
+ FW_STATUS_DONE,
+ FW_STATUS_ABORTED,
+};
+
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct completion completion;
+ enum fw_status status;
+};
+
+/* firmware behavior options */
+#define FW_OPT_UEVENT (1U << 0)
+#define FW_OPT_NOWAIT (1U << 1)
+#define FW_OPT_USERHELPER (1U << 2)
+#define FW_OPT_NO_WARN (1U << 3)
+#define FW_OPT_NOCACHE (1U << 4)
+#define FW_OPT_NOFALLBACK (1U << 5)
+
+struct firmware_cache {
+ /* firmware_buf instance will be added into the below list */
+ spinlock_t lock;
+ struct list_head head;
+ int state;
+
+#ifdef CONFIG_PM_SLEEP
+ /*
+ * Names of firmware images which have been cached successfully
+ * will be added into the below list so that device uncache
+ * helper can trace which firmware images have been cached
+ * before.
+ */
+ spinlock_t name_lock;
+ struct list_head fw_names;
+
+ struct delayed_work work;
+
+ struct notifier_block pm_notify;
+#endif
+};
+
+struct fw_priv {
+ struct kref ref;
+ struct list_head list;
+ struct firmware_cache *fwc;
+ struct fw_state fw_st;
+ void *data;
+ size_t size;
+ size_t allocated_size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool is_paged_buf;
+ bool need_uevent;
+ struct page **pages;
+ int nr_pages;
+ int page_array_size;
+ struct list_head pending_list;
+#endif
+ const char *fw_name;
+};
+
+struct fw_cache_entry {
+ struct list_head list;
+ const char *name;
+};
+
+struct fw_name_devm {
+ unsigned long magic;
+ const char *name;
+};
+
+static inline struct fw_priv *to_fw_priv(struct kref *ref)
+{
+ return container_of(ref, struct fw_priv, ref);
+}
+
+#define FW_LOADER_NO_CACHE 0
+#define FW_LOADER_START_CACHE 1
+
+/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
+ * guarding for corner cases a global lock should be OK */
+static DEFINE_MUTEX(fw_lock);
+
+static struct firmware_cache fw_cache;
+
/* Builtin firmware support */
#ifdef CONFIG_FW_LOADER
@@ -48,6 +139,14 @@ MODULE_LICENSE("GPL");
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
+static void fw_copy_to_prealloc_buf(struct firmware *fw,
+ void *buf, size_t size)
+{
+ if (!buf || size < fw->size)
+ return;
+ memcpy(buf, fw->data, fw->size);
+}
+
static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
void *buf, size_t size)
{
@@ -57,9 +156,8 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
+ fw_copy_to_prealloc_buf(fw, buf, size);
- if (buf && fw->size <= size)
- memcpy(buf, fw->data, fw->size);
return true;
}
}
@@ -93,13 +191,6 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum fw_status {
- FW_STATUS_UNKNOWN,
- FW_STATUS_LOADING,
- FW_STATUS_DONE,
- FW_STATUS_ABORTED,
-};
-
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -107,29 +198,17 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
-/*
- * Concurrent request_firmware() for the same firmware need to be
- * serialized. struct fw_state is simple state machine which hold the
- * state of the firmware loading.
- */
-struct fw_state {
- struct completion completion;
- enum fw_status status;
-};
-
-static void fw_state_init(struct fw_state *fw_st)
+static void fw_state_init(struct fw_priv *fw_priv)
{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
-static inline bool __fw_state_is_done(enum fw_status status)
-{
- return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
-}
-
-static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+static int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
{
+ struct fw_state *fw_st = &fw_priv->fw_st;
long ret;
ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
@@ -141,226 +220,172 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
return ret < 0 ? ret : 0;
}
-static void __fw_state_set(struct fw_state *fw_st,
+static void __fw_state_set(struct fw_priv *fw_priv,
enum fw_status status)
{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
WRITE_ONCE(fw_st->status, status);
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
complete_all(&fw_st->completion);
}
-#define fw_state_start(fw_st) \
- __fw_state_set(fw_st, FW_STATUS_LOADING)
-#define fw_state_done(fw_st) \
- __fw_state_set(fw_st, FW_STATUS_DONE)
-#define fw_state_aborted(fw_st) \
- __fw_state_set(fw_st, FW_STATUS_ABORTED)
-#define fw_state_wait(fw_st) \
- __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
-
-static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+static inline void fw_state_start(struct fw_priv *fw_priv)
{
- return fw_st->status == status;
+ __fw_state_set(fw_priv, FW_STATUS_LOADING);
}
-#define fw_state_is_aborted(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_ABORTED)
-
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_aborted(fw_st) \
- __fw_state_set(fw_st, FW_STATUS_ABORTED)
-#define fw_state_is_done(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_DONE)
-#define fw_state_is_loading(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_wait_timeout(fw_st, timeout) \
- __fw_state_wait_common(fw_st, timeout)
-
-#endif /* CONFIG_FW_LOADER_USER_HELPER */
+static inline void fw_state_done(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_DONE);
+}
-/* firmware behavior options */
-#define FW_OPT_UEVENT (1U << 0)
-#define FW_OPT_NOWAIT (1U << 1)
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-#define FW_OPT_USERHELPER (1U << 2)
-#else
-#define FW_OPT_USERHELPER 0
-#endif
-#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
-#define FW_OPT_FALLBACK FW_OPT_USERHELPER
-#else
-#define FW_OPT_FALLBACK 0
-#endif
-#define FW_OPT_NO_WARN (1U << 3)
-#define FW_OPT_NOCACHE (1U << 4)
+static inline void fw_state_aborted(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_ABORTED);
+}
-struct firmware_cache {
- /* firmware_buf instance will be added into the below list */
- spinlock_t lock;
- struct list_head head;
- int state;
+static inline int fw_state_wait(struct fw_priv *fw_priv)
+{
+ return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
+}
-#ifdef CONFIG_PM_SLEEP
- /*
- * Names of firmware images which have been cached successfully
- * will be added into the below list so that device uncache
- * helper can trace which firmware images have been cached
- * before.
- */
- spinlock_t name_lock;
- struct list_head fw_names;
+static bool __fw_state_check(struct fw_priv *fw_priv,
+ enum fw_status status)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
- struct delayed_work work;
+ return fw_st->status == status;
+}
- struct notifier_block pm_notify;
-#endif
-};
+static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
+}
-struct firmware_buf {
- struct kref ref;
- struct list_head list;
- struct firmware_cache *fwc;
- struct fw_state fw_st;
- void *data;
- size_t size;
- size_t allocated_size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
- bool is_paged_buf;
- bool need_uevent;
- struct page **pages;
- int nr_pages;
- int page_array_size;
- struct list_head pending_list;
-#endif
- const char *fw_id;
-};
-struct fw_cache_entry {
- struct list_head list;
- const char *name;
-};
+static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
-struct fw_name_devm {
- unsigned long magic;
- const char *name;
-};
+static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
-#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
+static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
+{
+ return __fw_state_wait_common(fw_priv, timeout);
+}
-#define FW_LOADER_NO_CACHE 0
-#define FW_LOADER_START_CACHE 1
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
static int fw_cache_piggyback_on_request(const char *name);
-/* fw_lock could be moved to 'struct firmware_priv' but since it is just
- * guarding for corner cases a global lock should be OK */
-static DEFINE_MUTEX(fw_lock);
-
-static struct firmware_cache fw_cache;
-
-static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
- struct firmware_cache *fwc,
- void *dbuf, size_t size)
+static struct fw_priv *__allocate_fw_priv(const char *fw_name,
+ struct firmware_cache *fwc,
+ void *dbuf, size_t size)
{
- struct firmware_buf *buf;
+ struct fw_priv *fw_priv;
- buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
- if (!buf)
+ fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
+ if (!fw_priv)
return NULL;
- buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
- if (!buf->fw_id) {
- kfree(buf);
+ fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
+ if (!fw_priv->fw_name) {
+ kfree(fw_priv);
return NULL;
}
- kref_init(&buf->ref);
- buf->fwc = fwc;
- buf->data = dbuf;
- buf->allocated_size = size;
- fw_state_init(&buf->fw_st);
+ kref_init(&fw_priv->ref);
+ fw_priv->fwc = fwc;
+ fw_priv->data = dbuf;
+ fw_priv->allocated_size = size;
+ fw_state_init(fw_priv);
#ifdef CONFIG_FW_LOADER_USER_HELPER
- INIT_LIST_HEAD(&buf->pending_list);
+ INIT_LIST_HEAD(&fw_priv->pending_list);
#endif
- pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
+ pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
- return buf;
+ return fw_priv;
}
-static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
+static struct fw_priv *__lookup_fw_priv(const char *fw_name)
{
- struct firmware_buf *tmp;
+ struct fw_priv *tmp;
struct firmware_cache *fwc = &fw_cache;
list_for_each_entry(tmp, &fwc->head, list)
- if (!strcmp(tmp->fw_id, fw_name))
+ if (!strcmp(tmp->fw_name, fw_name))
return tmp;
return NULL;
}
/* Returns 1 for batching firmware requests with the same name */
-static int fw_lookup_and_allocate_buf(const char *fw_name,
- struct firmware_cache *fwc,
- struct firmware_buf **buf, void *dbuf,
- size_t size)
+static int alloc_lookup_fw_priv(const char *fw_name,
+ struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf,
+ size_t size)
{
- struct firmware_buf *tmp;
+ struct fw_priv *tmp;
spin_lock(&fwc->lock);
- tmp = __fw_lookup_buf(fw_name);
+ tmp = __lookup_fw_priv(fw_name);
if (tmp) {
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
- *buf = tmp;
- pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
+ *fw_priv = tmp;
+ pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
return 1;
}
- tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
+ tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
if (tmp)
list_add(&tmp->list, &fwc->head);
spin_unlock(&fwc->lock);
- *buf = tmp;
+ *fw_priv = tmp;
return tmp ? 0 : -ENOMEM;
}
-static void __fw_free_buf(struct kref *ref)
+static void __free_fw_priv(struct kref *ref)
__releases(&fwc->lock)
{
- struct firmware_buf *buf = to_fwbuf(ref);
- struct firmware_cache *fwc = buf->fwc;
+ struct fw_priv *fw_priv = to_fw_priv(ref);
+ struct firmware_cache *fwc = fw_priv->fwc;
- pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
- __func__, buf->fw_id, buf, buf->data,
- (unsigned int)buf->size);
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
- list_del(&buf->list);
+ list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
#ifdef CONFIG_FW_LOADER_USER_HELPER
- if (buf->is_paged_buf) {
+ if (fw_priv->is_paged_buf) {
int i;
- vunmap(buf->data);
- for (i = 0; i < buf->nr_pages; i++)
- __free_page(buf->pages[i]);
- vfree(buf->pages);
+ vunmap(fw_priv->data);
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ vfree(fw_priv->pages);
} else
#endif
- if (!buf->allocated_size)
- vfree(buf->data);
- kfree_const(buf->fw_id);
- kfree(buf);
+ if (!fw_priv->allocated_size)
+ vfree(fw_priv->data);
+ kfree_const(fw_priv->fw_name);
+ kfree(fw_priv);
}
-static void fw_free_buf(struct firmware_buf *buf)
+static void free_fw_priv(struct fw_priv *fw_priv)
{
- struct firmware_cache *fwc = buf->fwc;
+ struct firmware_cache *fwc = fw_priv->fwc;
spin_lock(&fwc->lock);
- if (!kref_put(&buf->ref, __fw_free_buf))
+ if (!kref_put(&fw_priv->ref, __free_fw_priv))
spin_unlock(&fwc->lock);
}
@@ -383,7 +408,7 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
static int
-fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
+fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
{
loff_t size;
int i, len;
@@ -393,9 +418,9 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
size_t msize = INT_MAX;
/* Already populated data member means we're loading into a buffer */
- if (buf->data) {
+ if (fw_priv->data) {
id = READING_FIRMWARE_PREALLOC_BUFFER;
- msize = buf->allocated_size;
+ msize = fw_priv->allocated_size;
}
path = __getname();
@@ -408,15 +433,15 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
continue;
len = snprintf(path, PATH_MAX, "%s/%s",
- fw_path[i], buf->fw_id);
+ fw_path[i], fw_priv->fw_name);
if (len >= PATH_MAX) {
rc = -ENAMETOOLONG;
break;
}
- buf->size = 0;
- rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
- id);
+ fw_priv->size = 0;
+ rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
+ msize, id);
if (rc) {
if (rc == -ENOENT)
dev_dbg(device, "loading %s failed with error %d\n",
@@ -426,9 +451,9 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
path, rc);
continue;
}
- dev_dbg(device, "direct-loading %s\n", buf->fw_id);
- buf->size = size;
- fw_state_done(&buf->fw_st);
+ dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
+ fw_priv->size = size;
+ fw_state_done(fw_priv);
break;
}
__putname(path);
@@ -444,22 +469,22 @@ static void firmware_free_data(const struct firmware *fw)
vfree(fw->data);
return;
}
- fw_free_buf(fw->priv);
+ free_fw_priv(fw->priv);
}
/* store the pages buffer info firmware from buf */
-static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
{
- fw->priv = buf;
+ fw->priv = fw_priv;
#ifdef CONFIG_FW_LOADER_USER_HELPER
- fw->pages = buf->pages;
+ fw->pages = fw_priv->pages;
#endif
- fw->size = buf->size;
- fw->data = buf->data;
+ fw->size = fw_priv->size;
+ fw->data = fw_priv->data;
- pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
- __func__, buf->fw_id, buf, buf->data,
- (unsigned int)buf->size);
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
}
#ifdef CONFIG_PM_SLEEP
@@ -523,13 +548,13 @@ static int fw_add_devm_name(struct device *dev, const char *name)
}
#endif
-static int assign_firmware_buf(struct firmware *fw, struct device *device,
- unsigned int opt_flags)
+static int assign_fw(struct firmware *fw, struct device *device,
+ unsigned int opt_flags)
{
- struct firmware_buf *buf = fw->priv;
+ struct fw_priv *fw_priv = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
+ if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -544,20 +569,20 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
/* don't cache firmware handled without uevent */
if (device && (opt_flags & FW_OPT_UEVENT) &&
!(opt_flags & FW_OPT_NOCACHE))
- fw_add_devm_name(device, buf->fw_id);
+ fw_add_devm_name(device, fw_priv->fw_name);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
if (!(opt_flags & FW_OPT_NOCACHE) &&
- buf->fwc->state == FW_LOADER_START_CACHE) {
- if (fw_cache_piggyback_on_request(buf->fw_id))
- kref_get(&buf->ref);
+ fw_priv->fwc->state == FW_LOADER_START_CACHE) {
+ if (fw_cache_piggyback_on_request(fw_priv->fw_name))
+ kref_get(&fw_priv->ref);
}
/* pass the pages buffer to driver at the last minute */
- fw_set_page_data(buf, fw);
+ fw_set_page_data(fw_priv, fw);
mutex_unlock(&fw_lock);
return 0;
}
@@ -566,49 +591,50 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* user-mode helper code
*/
#ifdef CONFIG_FW_LOADER_USER_HELPER
-struct firmware_priv {
+struct fw_sysfs {
bool nowait;
struct device dev;
- struct firmware_buf *buf;
+ struct fw_priv *fw_priv;
struct firmware *fw;
};
-static struct firmware_priv *to_firmware_priv(struct device *dev)
+static struct fw_sysfs *to_fw_sysfs(struct device *dev)
{
- return container_of(dev, struct firmware_priv, dev);
+ return container_of(dev, struct fw_sysfs, dev);
}
-static void __fw_load_abort(struct firmware_buf *buf)
+static void __fw_load_abort(struct fw_priv *fw_priv)
{
/*
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (fw_state_is_done(&buf->fw_st))
+ if (fw_sysfs_done(fw_priv))
return;
- list_del_init(&buf->pending_list);
- fw_state_aborted(&buf->fw_st);
+ list_del_init(&fw_priv->pending_list);
+ fw_state_aborted(fw_priv);
}
-static void fw_load_abort(struct firmware_priv *fw_priv)
+static void fw_load_abort(struct fw_sysfs *fw_sysfs)
{
- struct firmware_buf *buf = fw_priv->buf;
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
- __fw_load_abort(buf);
+ __fw_load_abort(fw_priv);
}
static LIST_HEAD(pending_fw_head);
static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
{
- struct firmware_buf *buf;
- struct firmware_buf *next;
+ struct fw_priv *fw_priv;
+ struct fw_priv *next;
mutex_lock(&fw_lock);
- list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
- if (!buf->need_uevent || !only_kill_custom)
- __fw_load_abort(buf);
+ list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
+ pending_list) {
+ if (!fw_priv->need_uevent || !only_kill_custom)
+ __fw_load_abort(fw_priv);
}
mutex_unlock(&fw_lock);
}
@@ -651,18 +677,18 @@ ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- kfree(fw_priv);
+ kfree(fw_sysfs);
}
-static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
return -ENOMEM;
return 0;
@@ -670,12 +696,12 @@ static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
int err = 0;
mutex_lock(&fw_lock);
- if (fw_priv->buf)
- err = do_firmware_uevent(fw_priv, env);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
mutex_unlock(&fw_lock);
return err;
}
@@ -687,15 +713,25 @@ static struct class firmware_class = {
.dev_release = fw_dev_release,
};
+static inline int register_sysfs_loader(void)
+{
+ return class_register(&firmware_class);
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+ class_unregister(&firmware_class);
+}
+
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
int loading = 0;
mutex_lock(&fw_lock);
- if (fw_priv->buf)
- loading = fw_state_is_loading(&fw_priv->buf->fw_st);
+ if (fw_sysfs->fw_priv)
+ loading = fw_sysfs_loading(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -707,14 +743,15 @@ static ssize_t firmware_loading_show(struct device *dev,
#endif
/* one pages buffer should be mapped/unmapped only once */
-static int fw_map_pages_buf(struct firmware_buf *buf)
+static int map_fw_priv_pages(struct fw_priv *fw_priv)
{
- if (!buf->is_paged_buf)
+ if (!fw_priv->is_paged_buf)
return 0;
- vunmap(buf->data);
- buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
- if (!buf->data)
+ vunmap(fw_priv->data);
+ fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
+ PAGE_KERNEL_RO);
+ if (!fw_priv->data)
return -ENOMEM;
return 0;
}
@@ -736,32 +773,32 @@ static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware_buf *fw_buf;
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
ssize_t written = count;
int loading = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&fw_lock);
- fw_buf = fw_priv->buf;
- if (fw_state_is_aborted(&fw_buf->fw_st))
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv))
goto out;
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!fw_state_is_done(&fw_buf->fw_st)) {
- for (i = 0; i < fw_buf->nr_pages; i++)
- __free_page(fw_buf->pages[i]);
- vfree(fw_buf->pages);
- fw_buf->pages = NULL;
- fw_buf->page_array_size = 0;
- fw_buf->nr_pages = 0;
- fw_state_start(&fw_buf->fw_st);
+ if (!fw_sysfs_done(fw_priv)) {
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ vfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
+ fw_state_start(fw_priv);
}
break;
case 0:
- if (fw_state_is_loading(&fw_buf->fw_st)) {
+ if (fw_sysfs_loading(fw_priv)) {
int rc;
/*
@@ -770,25 +807,25 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- rc = fw_map_pages_buf(fw_buf);
+ rc = map_fw_priv_pages(fw_priv);
if (rc)
dev_err(dev, "%s: map pages failed\n",
__func__);
else
rc = security_kernel_post_read_file(NULL,
- fw_buf->data, fw_buf->size,
+ fw_priv->data, fw_priv->size,
READING_FIRMWARE);
/*
* Same logic as fw_load_abort, only the DONE bit
* is ignored and we set ABORT only on failure.
*/
- list_del_init(&fw_buf->pending_list);
+ list_del_init(&fw_priv->pending_list);
if (rc) {
- fw_state_aborted(&fw_buf->fw_st);
+ fw_state_aborted(fw_priv);
written = rc;
} else {
- fw_state_done(&fw_buf->fw_st);
+ fw_state_done(fw_priv);
}
break;
}
@@ -797,7 +834,7 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
/* fallthrough */
case -1:
- fw_load_abort(fw_priv);
+ fw_load_abort(fw_sysfs);
break;
}
out:
@@ -807,16 +844,16 @@ out:
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
loff_t offset, size_t count, bool read)
{
if (read)
- memcpy(buffer, buf->data + offset, count);
+ memcpy(buffer, fw_priv->data + offset, count);
else
- memcpy(buf->data + offset, buffer, count);
+ memcpy(fw_priv->data + offset, buffer, count);
}
-static void firmware_rw(struct firmware_buf *buf, char *buffer,
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
loff_t offset, size_t count, bool read)
{
while (count) {
@@ -825,14 +862,14 @@ static void firmware_rw(struct firmware_buf *buf, char *buffer,
int page_ofs = offset & (PAGE_SIZE-1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
- page_data = kmap(buf->pages[page_nr]);
+ page_data = kmap(fw_priv->pages[page_nr]);
if (read)
memcpy(buffer, page_data + page_ofs, page_cnt);
else
memcpy(page_data + page_ofs, buffer, page_cnt);
- kunmap(buf->pages[page_nr]);
+ kunmap(fw_priv->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
@@ -844,69 +881,69 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware_buf *buf;
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
ssize_t ret_count;
mutex_lock(&fw_lock);
- buf = fw_priv->buf;
- if (!buf || fw_state_is_done(&buf->fw_st)) {
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_sysfs_done(fw_priv)) {
ret_count = -ENODEV;
goto out;
}
- if (offset > buf->size) {
+ if (offset > fw_priv->size) {
ret_count = 0;
goto out;
}
- if (count > buf->size - offset)
- count = buf->size - offset;
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
ret_count = count;
- if (buf->data)
- firmware_rw_buf(buf, buffer, offset, count, true);
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
else
- firmware_rw(buf, buffer, offset, count, true);
+ firmware_rw(fw_priv, buffer, offset, count, true);
out:
mutex_unlock(&fw_lock);
return ret_count;
}
-static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
{
- struct firmware_buf *buf = fw_priv->buf;
+ struct fw_priv *fw_priv= fw_sysfs->fw_priv;
int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
/* If the array of pages is too small, grow it... */
- if (buf->page_array_size < pages_needed) {
+ if (fw_priv->page_array_size < pages_needed) {
int new_array_size = max(pages_needed,
- buf->page_array_size * 2);
+ fw_priv->page_array_size * 2);
struct page **new_pages;
new_pages = vmalloc(new_array_size * sizeof(void *));
if (!new_pages) {
- fw_load_abort(fw_priv);
+ fw_load_abort(fw_sysfs);
return -ENOMEM;
}
- memcpy(new_pages, buf->pages,
- buf->page_array_size * sizeof(void *));
- memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
- (new_array_size - buf->page_array_size));
- vfree(buf->pages);
- buf->pages = new_pages;
- buf->page_array_size = new_array_size;
+ memcpy(new_pages, fw_priv->pages,
+ fw_priv->page_array_size * sizeof(void *));
+ memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+ (new_array_size - fw_priv->page_array_size));
+ vfree(fw_priv->pages);
+ fw_priv->pages = new_pages;
+ fw_priv->page_array_size = new_array_size;
}
- while (buf->nr_pages < pages_needed) {
- buf->pages[buf->nr_pages] =
+ while (fw_priv->nr_pages < pages_needed) {
+ fw_priv->pages[fw_priv->nr_pages] =
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!buf->pages[buf->nr_pages]) {
- fw_load_abort(fw_priv);
+ if (!fw_priv->pages[fw_priv->nr_pages]) {
+ fw_load_abort(fw_sysfs);
return -ENOMEM;
}
- buf->nr_pages++;
+ fw_priv->nr_pages++;
}
return 0;
}
@@ -928,37 +965,37 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
- struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware_buf *buf;
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
- buf = fw_priv->buf;
- if (!buf || fw_state_is_done(&buf->fw_st)) {
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_sysfs_done(fw_priv)) {
retval = -ENODEV;
goto out;
}
- if (buf->data) {
- if (offset + count > buf->allocated_size) {
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
retval = -ENOMEM;
goto out;
}
- firmware_rw_buf(buf, buffer, offset, count, false);
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
retval = count;
} else {
- retval = fw_realloc_buffer(fw_priv, offset + count);
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
if (retval)
goto out;
retval = count;
- firmware_rw(buf, buffer, offset, count, false);
+ firmware_rw(fw_priv, buffer, offset, count, false);
}
- buf->size = max_t(size_t, offset + count, buf->size);
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
out:
mutex_unlock(&fw_lock);
return retval;
@@ -991,22 +1028,22 @@ static const struct attribute_group *fw_dev_attr_groups[] = {
NULL
};
-static struct firmware_priv *
+static struct fw_sysfs *
fw_create_instance(struct firmware *firmware, const char *fw_name,
struct device *device, unsigned int opt_flags)
{
- struct firmware_priv *fw_priv;
+ struct fw_sysfs *fw_sysfs;
struct device *f_dev;
- fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
- if (!fw_priv) {
- fw_priv = ERR_PTR(-ENOMEM);
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
goto exit;
}
- fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
- fw_priv->fw = firmware;
- f_dev = &fw_priv->dev;
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
device_initialize(f_dev);
dev_set_name(f_dev, "%s", fw_name);
@@ -1014,20 +1051,20 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
f_dev->class = &firmware_class;
f_dev->groups = fw_dev_attr_groups;
exit:
- return fw_priv;
+ return fw_sysfs;
}
/* load a firmware via user helper */
-static int _request_firmware_load(struct firmware_priv *fw_priv,
+static int _request_firmware_load(struct fw_sysfs *fw_sysfs,
unsigned int opt_flags, long timeout)
{
int retval = 0;
- struct device *f_dev = &fw_priv->dev;
- struct firmware_buf *buf = fw_priv->buf;
+ struct device *f_dev = &fw_sysfs->dev;
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
/* fall back on userspace loading */
- if (!buf->data)
- buf->is_paged_buf = true;
+ if (!fw_priv->data)
+ fw_priv->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
@@ -1038,31 +1075,31 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
}
mutex_lock(&fw_lock);
- list_add(&buf->pending_list, &pending_fw_head);
+ list_add(&fw_priv->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
if (opt_flags & FW_OPT_UEVENT) {
- buf->need_uevent = true;
+ fw_priv->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
- dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
- kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
+ kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
} else {
timeout = MAX_JIFFY_OFFSET;
}
- retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ retval = fw_sysfs_wait_timeout(fw_priv, timeout);
if (retval < 0) {
mutex_lock(&fw_lock);
- fw_load_abort(fw_priv);
+ fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);
}
- if (fw_state_is_aborted(&buf->fw_st)) {
+ if (fw_state_is_aborted(fw_priv)) {
if (retval == -ERESTARTSYS)
retval = -EINTR;
else
retval = -EAGAIN;
- } else if (buf->is_paged_buf && !buf->data)
+ } else if (fw_priv->is_paged_buf && !fw_priv->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1075,7 +1112,7 @@ static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
unsigned int opt_flags)
{
- struct firmware_priv *fw_priv;
+ struct fw_sysfs *fw_sysfs;
long timeout;
int ret;
@@ -1096,17 +1133,17 @@ static int fw_load_from_user_helper(struct firmware *firmware,
}
}
- fw_priv = fw_create_instance(firmware, name, device, opt_flags);
- if (IS_ERR(fw_priv)) {
- ret = PTR_ERR(fw_priv);
+ fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
goto out_unlock;
}
- fw_priv->buf = firmware->priv;
- ret = _request_firmware_load(fw_priv, opt_flags, timeout);
+ fw_sysfs->fw_priv = firmware->priv;
+ ret = _request_firmware_load(fw_sysfs, opt_flags, timeout);
if (!ret)
- ret = assign_firmware_buf(firmware, device, opt_flags);
+ ret = assign_fw(firmware, device, opt_flags);
out_unlock:
usermodehelper_read_unlock();
@@ -1114,16 +1151,60 @@ out_unlock:
return ret;
}
+#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
+static bool fw_force_sysfs_fallback(unsigned int opt_flags)
+{
+ return true;
+}
+#else
+static bool fw_force_sysfs_fallback(unsigned int opt_flags)
+{
+ if (!(opt_flags & FW_OPT_USERHELPER))
+ return false;
+ return true;
+}
+#endif
+
+static bool fw_run_sysfs_fallback(unsigned int opt_flags)
+{
+ if ((opt_flags & FW_OPT_NOFALLBACK))
+ return false;
+
+ return fw_force_sysfs_fallback(opt_flags);
+}
+
+static int fw_sysfs_fallback(struct firmware *fw, const char *name,
+ struct device *device,
+ unsigned int opt_flags,
+ int ret)
+{
+ if (!fw_run_sysfs_fallback(opt_flags))
+ return ret;
+
+ dev_warn(device, "Falling back to user helper\n");
+ return fw_load_from_user_helper(fw, name, device, opt_flags);
+}
#else /* CONFIG_FW_LOADER_USER_HELPER */
-static inline int
-fw_load_from_user_helper(struct firmware *firmware, const char *name,
- struct device *device, unsigned int opt_flags)
+static int fw_sysfs_fallback(struct firmware *fw, const char *name,
+ struct device *device,
+ unsigned int opt_flags,
+ int ret)
{
- return -ENOENT;
+ /* Keep carrying over the same error */
+ return ret;
}
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+
#endif /* CONFIG_FW_LOADER_USER_HELPER */
/* prepare firmware and firmware_buf structs;
@@ -1135,7 +1216,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size)
{
struct firmware *firmware;
- struct firmware_buf *buf;
+ struct fw_priv *fw_priv;
int ret;
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
@@ -1150,18 +1231,18 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 0; /* assigned */
}
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
/*
- * bind with 'buf' now to avoid warning in failure path
+ * bind with 'priv' now to avoid warning in failure path
* of requesting firmware.
*/
- firmware->priv = buf;
+ firmware->priv = fw_priv;
if (ret > 0) {
- ret = fw_state_wait(&buf->fw_st);
+ ret = fw_state_wait(fw_priv);
if (!ret) {
- fw_set_page_data(buf, firmware);
+ fw_set_page_data(fw_priv, firmware);
return 0; /* assigned */
}
}
@@ -1177,20 +1258,20 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
* released until the last user calls release_firmware().
*
* Failed batched requests are possible as well, in such cases we just share
- * the struct firmware_buf and won't release it until all requests are woken
+ * the struct fw_priv and won't release it until all requests are woken
* and have gone through this same path.
*/
static void fw_abort_batch_reqs(struct firmware *fw)
{
- struct firmware_buf *buf;
+ struct fw_priv *fw_priv;
/* Loaded directly? */
if (!fw || !fw->priv)
return;
- buf = fw->priv;
- if (!fw_state_is_aborted(&buf->fw_st))
- fw_state_aborted(&buf->fw_st);
+ fw_priv = fw->priv;
+ if (!fw_state_is_aborted(fw_priv))
+ fw_state_aborted(fw_priv);
}
/* called from request_firmware() and request_firmware_work_func() */
@@ -1220,13 +1301,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
dev_warn(device,
"Direct firmware load for %s failed with error %d\n",
name, ret);
- if (opt_flags & FW_OPT_USERHELPER) {
- dev_warn(device, "Falling back to user helper\n");
- ret = fw_load_from_user_helper(fw, name, device,
- opt_flags);
- }
+ ret = fw_sysfs_fallback(fw, name, device, opt_flags, ret);
} else
- ret = assign_firmware_buf(fw, device, opt_flags);
+ ret = assign_fw(fw, device, opt_flags);
out:
if (ret < 0) {
@@ -1268,7 +1345,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0,
- FW_OPT_UEVENT | FW_OPT_FALLBACK);
+ FW_OPT_UEVENT);
module_put(THIS_MODULE);
return ret;
}
@@ -1292,7 +1369,8 @@ int request_firmware_direct(const struct firmware **firmware_p,
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0,
- FW_OPT_UEVENT | FW_OPT_NO_WARN);
+ FW_OPT_UEVENT | FW_OPT_NO_WARN |
+ FW_OPT_NOFALLBACK);
module_put(THIS_MODULE);
return ret;
}
@@ -1321,8 +1399,7 @@ request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, buf, size,
- FW_OPT_UEVENT | FW_OPT_FALLBACK |
- FW_OPT_NOCACHE);
+ FW_OPT_UEVENT | FW_OPT_NOCACHE);
module_put(THIS_MODULE);
return ret;
}
@@ -1414,7 +1491,7 @@ request_firmware_nowait(
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
- fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
+ fw_work->opt_flags = FW_OPT_NOWAIT |
(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
if (!try_module_get(module)) {
@@ -1463,13 +1540,13 @@ static int cache_firmware(const char *fw_name)
return ret;
}
-static struct firmware_buf *fw_lookup_buf(const char *fw_name)
+static struct fw_priv *lookup_fw_priv(const char *fw_name)
{
- struct firmware_buf *tmp;
+ struct fw_priv *tmp;
struct firmware_cache *fwc = &fw_cache;
spin_lock(&fwc->lock);
- tmp = __fw_lookup_buf(fw_name);
+ tmp = __lookup_fw_priv(fw_name);
spin_unlock(&fwc->lock);
return tmp;
@@ -1488,7 +1565,7 @@ static struct firmware_buf *fw_lookup_buf(const char *fw_name)
*/
static int uncache_firmware(const char *fw_name)
{
- struct firmware_buf *buf;
+ struct fw_priv *fw_priv;
struct firmware fw;
pr_debug("%s: %s\n", __func__, fw_name);
@@ -1496,9 +1573,9 @@ static int uncache_firmware(const char *fw_name)
if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
return 0;
- buf = fw_lookup_buf(fw_name);
- if (buf) {
- fw_free_buf(buf);
+ fw_priv = lookup_fw_priv(fw_name);
+ if (fw_priv) {
+ free_fw_priv(fw_priv);
return 0;
}
@@ -1767,20 +1844,11 @@ static int fw_suspend(void)
static struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
-#else
-static int fw_cache_piggyback_on_request(const char *name)
-{
- return 0;
-}
-#endif
-static void __init fw_cache_init(void)
+static int __init register_fw_pm_ops(void)
{
- spin_lock_init(&fw_cache.lock);
- INIT_LIST_HEAD(&fw_cache.head);
- fw_cache.state = FW_LOADER_NO_CACHE;
+ int ret;
-#ifdef CONFIG_PM_SLEEP
spin_lock_init(&fw_cache.name_lock);
INIT_LIST_HEAD(&fw_cache.fw_names);
@@ -1788,10 +1856,39 @@ static void __init fw_cache_init(void)
device_uncache_fw_images_work);
fw_cache.pm_notify.notifier_call = fw_pm_notify;
- register_pm_notifier(&fw_cache.pm_notify);
+ ret = register_pm_notifier(&fw_cache.pm_notify);
+ if (ret)
+ return ret;
register_syscore_ops(&fw_syscore_ops);
+
+ return ret;
+}
+
+static inline void unregister_fw_pm_ops(void)
+{
+ unregister_syscore_ops(&fw_syscore_ops);
+ unregister_pm_notifier(&fw_cache.pm_notify);
+}
+#else
+static int fw_cache_piggyback_on_request(const char *name)
+{
+ return 0;
+}
+static inline int register_fw_pm_ops(void)
+{
+ return 0;
+}
+static inline void unregister_fw_pm_ops(void)
+{
+}
#endif
+
+static void __init fw_cache_init(void)
+{
+ spin_lock_init(&fw_cache.lock);
+ INIT_LIST_HEAD(&fw_cache.head);
+ fw_cache.state = FW_LOADER_NO_CACHE;
}
static int fw_shutdown_notify(struct notifier_block *unused1,
@@ -1812,25 +1909,31 @@ static struct notifier_block fw_shutdown_nb = {
static int __init firmware_class_init(void)
{
+ int ret;
+
+ /* No need to unfold these on exit */
fw_cache_init();
- register_reboot_notifier(&fw_shutdown_nb);
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- return class_register(&firmware_class);
-#else
- return 0;
-#endif
+
+ ret = register_fw_pm_ops();
+ if (ret)
+ return ret;
+
+ ret = register_reboot_notifier(&fw_shutdown_nb);
+ if (ret)
+ goto out;
+
+ return register_sysfs_loader();
+
+out:
+ unregister_fw_pm_ops();
+ return ret;
}
static void __exit firmware_class_exit(void)
{
-#ifdef CONFIG_PM_SLEEP
- unregister_syscore_ops(&fw_syscore_ops);
- unregister_pm_notifier(&fw_cache.pm_notify);
-#endif
+ unregister_fw_pm_ops();
unregister_reboot_notifier(&fw_shutdown_nb);
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- class_unregister(&firmware_class);
-#endif
+ unregister_sysfs_loader();
}
fs_initcall(firmware_class_init);
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
index 4f8b741f4615..1ce59b4b53ce 100644
--- a/drivers/base/hypervisor.c
+++ b/drivers/base/hypervisor.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* hypervisor.c - /sys/hypervisor subsystem.
*
* Copyright (C) IBM Corp. 2006
* Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2007 Novell Inc.
- *
- * This file is released under the GPLv2
*/
#include <linux/kobject.h>
diff --git a/drivers/base/init.c b/drivers/base/init.c
index 48c0e220acc0..dd85b05a6a16 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
- *
- * This file is released under the GPLv2
*/
#include <linux/device.h>
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 372d10af2600..2772f5d1948a 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ISA bus.
*/
diff --git a/drivers/base/map.c b/drivers/base/map.c
index c1d38234d725..5650ab2b247a 100644
--- a/drivers/base/map.c
+++ b/drivers/base/map.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/base/map.c
*
* (C) Copyright Al Viro 2002,2003
- * Released under GPL v2.
*
* NOTE: data structure needs to be changed. It works, but for large dev_t
* it will be too slow. It is isolated, though, so these changes will be
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 1d60b58a8c19..fe4b24f05f6a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -569,7 +569,7 @@ store_hard_offline_page(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 2a215780eda2..46ad4d636731 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* module.c - module sysfs fun for drivers
- *
- * This file is released under the GPLv2
- *
*/
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index eb929dd6ef1e..c22864458511 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver core interface to the pinctrl subsystem.
*
@@ -6,8 +7,6 @@
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#include <linux/device.h>
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index e5473525e7b2..8e22073aeeed 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MSI framework for platform devices
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c203fb90c1a0..f1bf7b38d91c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* platform.c - platform 'pseudo' bus for legacy devices
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
*
- * This file is released under the GPLv2
- *
* Please see Documentation/driver-model/platform.txt for more
* information.
*/
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0c80bea05bcb..528b24149bc7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
static int genpd_finish_suspend(struct device *dev, bool poweroff)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
return ret;
+ }
}
genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret = 0;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
+ return pm_generic_resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_resume(dev);
-
- ret = pm_generic_resume_noirq(dev);
- if (ret)
- return ret;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return pm_generic_resume_noirq(dev);
}
/**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_suspend(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
return ret;
}
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", 0, &pd_args);
- if (ret < 0) {
- if (ret != -ENOENT)
- return ret;
-
- /*
- * Try legacy Samsung-specific bindings
- * (for backwards compatibility of DT ABI)
- */
- pd_args.args_count = 0;
- pd_args.np = of_parse_phandle(dev->of_node,
- "samsung,power-domain", 0);
- if (!pd_args.np)
- return -ENOENT;
- }
+ if (ret < 0)
+ return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 08744b572af6..02a497e7c785 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -541,6 +540,73 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
}
/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+ switch (resume_msg.event) {
+ case PM_EVENT_RESUME:
+ return PMSG_SUSPEND;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RESTORE:
+ return PMSG_FREEZE;
+ case PM_EVENT_RECOVER:
+ return PMSG_HIBERNATE;
+ }
+ return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -551,8 +617,9 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
@@ -566,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
+ skip_resume = dev_pm_may_skip_resume(dev);
+
+ callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ pm_message_t suspend_msg = suspend_event(state);
+
+ /*
+ * If "freeze" callbacks have been skipped during a transition
+ * related to hibernation, the subsequent "thaw" callbacks must
+ * be skipped too or bad things may happen. Otherwise, resume
+ * callbacks are going to be run for the device, so its runtime
+ * PM status must be changed to reflect the new state after the
+ * transition under way.
+ */
+ if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+ !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+ if (state.event == PM_EVENT_THAW) {
+ skip_resume = true;
+ goto Skip;
+ } else {
+ pm_runtime_set_active(dev);
+ }
+ }
}
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_noirq_suspended = false;
- Out:
+ if (skip_resume) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
+Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
@@ -681,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
dpm_noirq_end();
}
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -691,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -706,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_resume_early_cb(dev, state, &info);
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
@@ -1089,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+ bool no_subsys_suspend_noirq)
+{
+ pm_message_t resume_msg = resume_event(state);
+
+ /*
+ * If all of the device driver's "noirq", "late" and "early" callbacks
+ * are invoked directly by the core, the decision to allow the device to
+ * stay in suspend can be based on its current runtime PM status and its
+ * wakeup settings.
+ */
+ if (no_subsys_suspend_noirq &&
+ !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+ !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+ !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+ return !pm_runtime_status_suspended(dev) &&
+ (resume_msg.event != PM_EVENT_RESUME ||
+ (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+ /*
+ * The only safe strategy here is to require that if the device may not
+ * be left in suspend, resume callbacks must be invoked for it.
+ */
+ return !dev->power.may_skip_resume;
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1100,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool no_subsys_cb = false;
int error = 0;
TRACE_DEVICE(dev);
@@ -1120,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+ if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ dev->power.must_resume = dev->power.must_resume ||
+ atomic_read(&dev->power.usage_count) > 1 ||
+ device_must_resume(dev, state, no_subsys_cb);
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1249,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
return ret;
}
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1259,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -1281,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev_pm_smart_suspend_and_suspended(dev) &&
+ !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_late_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
@@ -1435,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1500,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1543,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
- spin_unlock_irq(&parent->power.lock);
- }
- dpm_clear_suppliers_direct_complete(dev);
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
@@ -1665,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,
@@ -1678,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- dev->power.wakeup_path = device_may_wakeup(dev);
+ dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks) {
ret = 1; /* Let device go direct_complete */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 7beee75399d4..21244c53e377 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
-extern int device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
-static inline int
-device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq)
-{
- return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
static inline void device_wakeup_detach_irq(struct device *dev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6e89b51ea3d9..8bef3cb2424d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
- int ret = 0;
+ int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
@@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- if (!callback) {
- ret = -ENOSYS;
- goto err;
- }
-
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret)
goto err;
/*
- * Increase the runtime PM usage count for the device's parent, in case
- * when we find the device being used when system suspend was invoked.
- * This informs pm_runtime_force_resume() to resume the parent
- * immediately, which is needed to be able to resume its children,
- * when not deferring the resume to be managed via runtime PM.
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
*/
- if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
- pm_runtime_get_noresume(dev->parent);
+ if (pm_runtime_need_not_resume(dev))
+ pm_runtime_set_suspended(dev);
+ else
+ __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_set_suspended(dev);
return 0;
+
err:
pm_runtime_enable(dev);
return ret;
@@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
@@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
- if (!callback) {
- ret = -ENOSYS;
- goto out;
- }
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
goto out;
/*
- * Decrease the parent's runtime PM usage count, if we increased it
- * during system suspend in pm_runtime_force_suspend().
- */
- if (atomic_read(&dev->power.usage_count) > 1) {
- if (dev->parent)
- pm_runtime_put_noidle(dev->parent);
- } else {
- goto out;
- }
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
- ret = pm_runtime_set_active(dev);
- if (ret)
- goto out;
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28b1857..0f651efc58a1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
return sprintf(buf, p);
}
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
- else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ } else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sprintf(buf, "auto\n");
- else if (value == PM_QOS_LATENCY_ANY)
+ if (value == PM_QOS_LATENCY_ANY)
return sprintf(buf, "any\n");
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
if (value < 0)
return -EINVAL;
} else {
- if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
- else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
- pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
- pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
if (!device_can_wakeup(dev))
return -EINVAL;
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1
- && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+ if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof _disabled - 1
- && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
+static DEVICE_ATTR_RO(runtime_usage);
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
+static DEVICE_ATTR_RO(runtime_active_kids);
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
+ if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
+ if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+ if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
- else if (len == sizeof _disabled - 1 &&
- strncmp(buf, _disabled, len) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index ae0429827f31..a8ac86e4d79e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
- int err;
if (!dev || !wirq)
return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- err = device_wakeup_attach_irq(dev, wirq);
- if (!err)
- dev->power.wakeirq = wirq;
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
- return err;
+ return 0;
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 38559f04db2c..ea01621ed769 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,6 +19,11 @@
#include "power.h"
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
ws = wakeup_source_register(dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
*
* Call under the device's power.lock lock.
*/
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
- if (!ws) {
- dev_err(dev, "forgot to call call device_init_wakeup?\n");
- return -EINVAL;
- }
+ if (!ws)
+ return;
if (ws->wakeirq)
- return -EEXIST;
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
- return 0;
}
/**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
- if (dev->power.can_wakeup)
- device_wakeup_disable(dev);
-
+ device_wakeup_disable(dev);
device_set_wakeup_capable(dev, false);
}
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 851b1b6596a4..302236281d83 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* property.c - Unified device property interface.
*
* Copyright (C) 2014, Intel Corporation
* Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -16,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
@@ -698,6 +696,23 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+static void property_entry_free_data(const struct property_entry *p)
+{
+ size_t i, nval;
+
+ if (p->is_array) {
+ if (p->is_string && p->pointer.str) {
+ nval = p->length / sizeof(const char *);
+ for (i = 0; i < nval; i++)
+ kfree(p->pointer.str[i]);
+ }
+ kfree(p->pointer.raw_data);
+ } else if (p->is_string) {
+ kfree(p->value.str);
+ }
+ kfree(p->name);
+}
+
static int property_copy_string_array(struct property_entry *dst,
const struct property_entry *src)
{
@@ -728,34 +743,24 @@ static int property_entry_copy_data(struct property_entry *dst,
{
int error;
- dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- return -ENOMEM;
-
if (src->is_array) {
- if (!src->length) {
- error = -ENODATA;
- goto out_free_name;
- }
+ if (!src->length)
+ return -ENODATA;
if (src->is_string) {
error = property_copy_string_array(dst, src);
if (error)
- goto out_free_name;
+ return error;
} else {
dst->pointer.raw_data = kmemdup(src->pointer.raw_data,
src->length, GFP_KERNEL);
- if (!dst->pointer.raw_data) {
- error = -ENOMEM;
- goto out_free_name;
- }
+ if (!dst->pointer.raw_data)
+ return -ENOMEM;
}
} else if (src->is_string) {
dst->value.str = kstrdup(src->value.str, GFP_KERNEL);
- if (!dst->value.str && src->value.str) {
- error = -ENOMEM;
- goto out_free_name;
- }
+ if (!dst->value.str && src->value.str)
+ return -ENOMEM;
} else {
dst->value.raw_data = src->value.raw_data;
}
@@ -764,28 +769,15 @@ static int property_entry_copy_data(struct property_entry *dst,
dst->is_array = src->is_array;
dst->is_string = src->is_string;
+ dst->name = kstrdup(src->name, GFP_KERNEL);
+ if (!dst->name)
+ goto out_free_data;
+
return 0;
-out_free_name:
- kfree(dst->name);
- return error;
-}
-
-static void property_entry_free_data(const struct property_entry *p)
-{
- size_t i, nval;
-
- if (p->is_array) {
- if (p->is_string && p->pointer.str) {
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
- }
- kfree(p->pointer.raw_data);
- } else if (p->is_string) {
- kfree(p->value.str);
- }
- kfree(p->name);
+out_free_data:
+ property_entry_free_data(dst);
+ return -ENOMEM;
}
/**
@@ -997,6 +989,32 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
/**
+ * fwnode_get_next_available_child_node - Return the next
+ * available child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ */
+struct fwnode_handle *
+fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ struct fwnode_handle *next_child = child;
+
+ if (!fwnode)
+ return NULL;
+
+ do {
+ next_child = fwnode_get_next_child_node(fwnode, next_child);
+
+ if (!next_child || fwnode_device_is_available(next_child))
+ break;
+ } while (next_child);
+
+ return next_child;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+
+/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
@@ -1126,21 +1144,21 @@ enum dev_dma_attr device_get_dma_attr(struct device *dev)
EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
- * device_get_phy_mode - Get phy mode for given device
- * @dev: Pointer to the given device
+ * fwnode_get_phy_mode - Get phy mode for given firmware node
+ * @fwnode: Pointer to the given node
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
-int device_get_phy_mode(struct device *dev)
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
{
const char *pm;
int err, i;
- err = device_property_read_string(dev, "phy-mode", &pm);
+ err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
if (err < 0)
- err = device_property_read_string(dev,
+ err = fwnode_property_read_string(fwnode,
"phy-connection-type", &pm);
if (err < 0)
return err;
@@ -1151,13 +1169,27 @@ int device_get_phy_mode(struct device *dev)
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+ return fwnode_get_phy_mode(dev_fwnode(dev));
+}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *device_get_mac_addr(struct device *dev,
+static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
const char *name, char *addr,
int alen)
{
- int ret = device_property_read_u8_array(dev, name, addr, alen);
+ int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
return addr;
@@ -1165,8 +1197,8 @@ static void *device_get_mac_addr(struct device *dev,
}
/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
+ * fwnode_get_mac_address - Get the MAC from the firmware node
+ * @fwnode: Pointer to the firmware node
* @addr: Address of buffer to store the MAC in
* @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
*
@@ -1187,23 +1219,60 @@ static void *device_get_mac_addr(struct device *dev,
* In this case, the real MAC is in 'local-mac-address', and 'mac-address'
* exists but is all zeros.
*/
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
{
char *res;
- res = device_get_mac_addr(dev, "mac-address", addr, alen);
+ res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
if (res)
return res;
- res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+ res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
if (res)
return res;
- return device_get_mac_addr(dev, "address", addr, alen);
+ return fwnode_get_mac_addr(fwnode, "address", addr, alen);
+}
+EXPORT_SYMBOL(fwnode_get_mac_address);
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
+ */
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+ return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
}
EXPORT_SYMBOL(device_get_mac_address);
/**
+ * fwnode_irq_get - Get IRQ directly from a fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Zero-based index of the IRQ
+ *
+ * Returns Linux IRQ number on success. Other values are determined
+ * accordingly to acpi_/of_ irq_get() operation.
+ */
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
+{
+ struct device_node *of_node = to_of_node(fwnode);
+ struct resource res;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_OF) && of_node)
+ return of_irq_get(of_node, index);
+
+ ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
+ if (ret)
+ return ret;
+
+ return res.start;
+}
+EXPORT_SYMBOL(fwnode_irq_get);
+
+/**
* device_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
@@ -1340,3 +1409,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+void *device_get_match_data(struct device *dev)
+{
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
+ dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index cc162b48c6d7..aff34c0c2a3e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,7 +6,6 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
- select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -43,5 +42,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
-config REGMAP_HWSPINLOCK
- bool
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE_BUS
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 63dec9222892..5ed0023fabda 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 8641183cac2f..53785e0e297a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -77,6 +77,7 @@ struct regmap {
int async_ret;
#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
struct dentry *debugfs;
const char *debugfs_name;
@@ -215,10 +216,17 @@ struct regmap_field {
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
#else
static inline void regmap_debugfs_initcall(void) { }
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
#endif
/* regcache core declarations */
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 4d2e50bfc726..bc6cd88b8cc6 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -37,9 +37,12 @@ static int regcache_flat_init(struct regmap *map)
cache = map->cache;
- for (i = 0; i < map->num_reg_defaults; i++)
- cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
- map->reg_defaults[i].def;
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
return 0;
}
@@ -56,8 +59,9 @@ static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[regcache_flat_get_index(map, reg)];
+ *value = cache[index];
return 0;
}
@@ -66,8 +70,9 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- cache[regcache_flat_get_index(map, reg)] = value;
+ cache[index] = value;
return 0;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce3511c733..f3266334063e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -529,6 +529,18 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
const char *devname = "dummy";
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 000000000000..50a66382d87d
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8d516a9bfc01..ee302ccdfbc8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -414,7 +414,6 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
-#ifdef REGMAP_HWSPINLOCK
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -457,7 +456,11 @@ static void regmap_unlock_hwlock_irqrestore(void *__map)
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
-#endif
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
static void regmap_lock_mutex(void *__map)
{
@@ -669,16 +672,26 @@ struct regmap *__regmap_init(struct device *dev,
goto err;
}
- if (config->lock && config->unlock) {
+ if (config->name) {
+ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+ }
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
- } else if (config->hwlock_id) {
-#ifdef REGMAP_HWSPINLOCK
+ } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
- goto err_map;
+ goto err_name;
}
switch (config->hwlock_mode) {
@@ -697,10 +710,6 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
-#else
- ret = -EINVAL;
- goto err_map;
-#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -762,14 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->name = config->name;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
- if (config->read_flag_mask || config->write_flag_mask) {
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
@@ -1116,8 +1126,10 @@ err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
err_map:
kfree(map);
err:
@@ -1305,8 +1317,9 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2423,13 +2436,15 @@ static int _regmap_bus_read(void *context, unsigned int reg,
{
int ret;
struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
if (ret == 0)
- *val = map->format.parse_val(map->work_buf);
+ *val = map->format.parse_val(work_val);
return ret;
}
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 909dedae4c4e..4e80f48ad5d6 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/sysfs.h>
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 8d98a329f6ea..6e076f359dcc 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* syscore.c - Execution of system core operations.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
#include <linux/syscore_ops.h>
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index a3355d66bc12..e7f145d662f0 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index d936fcf9f1fb..5fd9f167ecc1 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* driver/base/topology.c - Populate sysfs with cpu topology information
*
@@ -6,22 +7,6 @@
* Copyright (C) 2006, Intel Corp.
*
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/mm.h>
#include <linux/cpu.h>
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index f6c453c3816e..5ed86ded6e6b 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* transport_class.c - implementation of generic transport classes
* using attribute_containers
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
- * This file is licensed under GPLv2
- *
* The basic idea here is to allow any "device controller" (which
* would most often be a Host Bus Adapter to use the services of one
* or more tranport classes for performing transport specific
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 02d78f6cecbb..ba8acca036df 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"
- depends on MIPS && BCMA_DRIVER_PCI
+ depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
help
PCI core hostmode operation (external PCI bus).
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
index b1a6e327cb23..cf889fc62ac7 100644
--- a/drivers/bcma/driver_pcie2.c
+++ b/drivers/bcma/driver_pcie2.c
@@ -83,7 +83,8 @@ static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
bcma_core_pcie2_set_ltr_vals(pcie2);
/* TODO:
- si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+ *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+ */
/* enable the LTR */
devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777bdfb2..728075214959 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6619,43 +6619,27 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
#ifdef DAC960_GAM_MINOR
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
+static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
{
- long ErrorCode = 0;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- {
- DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
- (DAC960_ControllerInfo_T __user *) Argument;
DAC960_ControllerInfo_T ControllerInfo;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceControllerInfo == NULL)
ErrorCode = -EINVAL;
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;
+ goto out;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
- break;
+ goto out;
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6670,12 +6654,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
- break;
- }
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- {
- DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V1_UserCommand_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V1_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6688,39 +6672,41 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
int ControllerNumber, DataTransferLength;
unsigned char *DataTransferBuffer = NULL;
dma_addr_t DataTransferBufferDMA;
+ long ErrorCode;
+
if (UserSpaceUserCommand == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V1_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ControllerNumber = UserCommand.ControllerNumber;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
ErrorCode = -EINVAL;
if (Controller->FirmwareType != DAC960_V1_Controller)
- break;
+ goto out;
CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
DataTransferLength = UserCommand.DataTransferLength;
if (CommandOpcode & 0x80)
- break;
+ goto out;
if (CommandOpcode == DAC960_V1_DCDB)
{
if (copy_from_user(&DCDB, UserCommand.DCDB,
sizeof(DAC960_V1_DCDB_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
if (DCDB.Channel >= DAC960_V1_MaxChannels)
- break;
+ goto out;
if (!((DataTransferLength == 0 &&
DCDB.Direction
== DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6730,15 +6716,15 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
(DataTransferLength < 0 &&
DCDB.Direction
== DAC960_V1_DCDB_DataTransferSystemToDevice)))
- break;
+ goto out;
if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
!= abs(DataTransferLength))
- break;
+ goto out;
DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
if (DCDB_IOBUF == NULL) {
ErrorCode = -ENOMEM;
- break;
+ goto out;
}
}
ErrorCode = -ENOMEM;
@@ -6748,19 +6734,19 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
}
if (CommandOpcode == DAC960_V1_DCDB)
@@ -6837,12 +6823,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (DCDB_IOBUF != NULL)
pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
DCDB_IOBUF, DCDB_IOBUFDMA);
- break;
- }
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- {
- DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V2_UserCommand_T __user *) Argument;
+ out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V2_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6855,26 +6841,26 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
dma_addr_t DataTransferBufferDMA;
unsigned char *RequestSenseBuffer = NULL;
dma_addr_t RequestSenseBufferDMA;
+ long ErrorCode = -EINVAL;
- ErrorCode = -EINVAL;
if (UserSpaceUserCommand == NULL)
- break;
+ goto out;
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V2_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = UserCommand.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller){
ErrorCode = -EINVAL;
- break;
+ goto out;
}
DataTransferLength = UserCommand.DataTransferLength;
ErrorCode = -ENOMEM;
@@ -6884,14 +6870,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
@@ -7001,42 +6987,44 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (RequestSenseBuffer != NULL)
pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
RequestSenseBuffer, RequestSenseBufferDMA);
- break;
- }
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- {
- DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
- (DAC960_V2_GetHealthStatus_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
+{
DAC960_V2_GetHealthStatus_T GetHealthStatus;
DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceGetHealthStatus == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
sizeof(DAC960_V2_GetHealthStatus_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = GetHealthStatus.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&HealthStatusBuffer,
GetHealthStatus.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
@@ -7046,7 +7034,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
- break;
+ goto out;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
@@ -7054,7 +7042,39 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
else
ErrorCode = 0;
- }
+
+out:
+ return ErrorCode;
+}
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
+ unsigned long Argument)
+{
+ long ErrorCode = 0;
+ void __user *argp = (void __user *)Argument;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+
+ mutex_lock(&DAC960_mutex);
+ switch (Request)
+ {
+ case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+ ErrorCode = DAC960_ControllerCount;
+ break;
+ case DAC960_IOCTL_GET_CONTROLLER_INFO:
+ ErrorCode = DAC960_gam_get_controller_info(argp);
+ break;
+ case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v1_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v2_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+ ErrorCode = DAC960_gam_v2_get_health_status(argp);
break;
default:
ErrorCode = -ENOTTY;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 40579d0cb3d1..ad9b687a236a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -20,6 +20,10 @@ config BLK_DEV_NULL_BLK
tristate "Null test block driver"
select CONFIGFS_FS
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 9220f8e833d0..c0ebda1283cc 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -112,8 +112,7 @@ enum frame_flags {
struct frame {
struct list_head head;
u32 tag;
- struct timeval sent; /* high-res time packet was sent */
- u32 sent_jiffs; /* low-res jiffies-based sent time */
+ ktime_t sent; /* high-res time packet was sent */
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 812fed069708..540bb60cd071 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -398,8 +398,7 @@ aoecmd_ata_rw(struct aoedev *d)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -489,8 +488,7 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -499,33 +497,17 @@ resend(struct aoedev *d, struct frame *f)
static int
tsince_hr(struct frame *f)
{
- struct timeval now;
- int n;
+ u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
- do_gettimeofday(&now);
- n = now.tv_usec - f->sent.tv_usec;
- n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+ /* delta is normally under 4.2 seconds, avoid 64-bit division */
+ if (likely(delta <= UINT_MAX))
+ return (u32)delta / NSEC_PER_USEC;
- if (n < 0)
- n = -n;
+ /* avoid overflow after 71 minutes */
+ if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
+ return INT_MAX;
- /* For relatively long periods, use jiffies to avoid
- * discrepancies caused by updates to the system time.
- *
- * On system with HZ of 1000, 32-bits is over 49 days
- * worth of jiffies, or over 71 minutes worth of usecs.
- *
- * Jiffies overflow is handled by subtraction of unsigned ints:
- * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
- * $3 = 4
- * (gdb)
- */
- if (n > USEC_PER_SEC / 4) {
- n = ((u32) jiffies) - f->sent_jiffs;
- n *= USEC_PER_SEC / HZ;
- }
-
- return n;
+ return div_u64(delta, NSEC_PER_USEC);
}
static int
@@ -589,7 +571,6 @@ reassign_frame(struct frame *f)
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
- nf->sent_jiffs = f->sent_jiffs;
f->skb = skb;
return nf;
@@ -633,8 +614,7 @@ probe(struct aoetgt *t)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -1432,10 +1412,8 @@ aoecmd_ata_id(struct aoedev *d)
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
- }
+ if (skb)
+ f->sent = ktime_get();
return skb;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index bd97908c766f..9f4e6f502b84 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
- unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
+ unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4b4697a1f963..0a0394aa1b9c 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1847,7 +1847,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov = {.iov_base = buf, .iov_len = size};
- struct msghdr msg;
+ struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
int rv, sent = 0;
if (!sock)
@@ -1855,12 +1855,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
/* THINK if (signal_pending) return ... ? */
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
if (sock == connection->data.socket) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cb2fa63f6bc0..c72dee0ef083 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,7 +516,8 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e61506968..d5fe720cf149 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
return err;
}
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
{
- struct loop_device *lo = disk->private_data;
int err;
if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&lo->lo_ctl_mutex);
}
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+ mutex_lock(&loop_index_mutex);
+ __lo_release(disk->private_data);
+ mutex_unlock(&loop_index_mutex);
+}
+
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ad0477ae820f..6655893a3a7a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -12,9 +12,9 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
-#include <linux/lightnvm.h>
#include <linux/configfs.h>
#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -27,6 +27,10 @@
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(null_timeout_attr);
+#endif
+
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
@@ -107,7 +111,6 @@ struct nullb_device {
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool use_lightnvm; /* register as a LightNVM device */
bool blocking; /* blocking blk-mq device */
bool use_per_node_hctx; /* use per-node allocation for hardware context */
bool power; /* power on/off the device */
@@ -121,7 +124,6 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
- struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
unsigned int queue_depth;
@@ -139,7 +141,6 @@ static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
-static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
enum {
@@ -166,6 +167,11 @@ static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static char g_timeout_str[80];
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+#endif
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -208,10 +214,6 @@ static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool g_use_lightnvm;
-module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
-MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
@@ -345,7 +347,6 @@ NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(use_lightnvm, bool);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
@@ -455,7 +456,6 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_use_lightnvm,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
@@ -573,7 +573,6 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocksize = g_bs;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
- dev->use_lightnvm = g_use_lightnvm;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
return dev;
@@ -1352,6 +1351,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
@@ -1369,6 +1374,16 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_DEFER;
}
+static bool should_timeout_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_timeout_str[0])
+ return should_fail(&null_timeout_attr, 1);
+#endif
+
+ return false;
+}
+
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
@@ -1376,12 +1391,20 @@ static void null_request_fn(struct request_queue *q)
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
+ if (!should_timeout_request(rq)) {
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
}
}
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1399,12 +1422,16 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
- return null_handle_cmd(cmd);
+ if (!should_timeout_request(bd->rq))
+ return null_handle_cmd(cmd);
+
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_softirq_done_fn,
+ .timeout = null_timeout_rq,
};
static void cleanup_queue(struct nullb_queue *nq)
@@ -1423,170 +1450,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-#ifdef CONFIG_NVM
-
-static void null_lnvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
- rqd->error = status ? -EIO : 0;
- nvm_end_io(rqd);
-
- blk_put_request(rq);
-}
-
-static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct bio *bio = rqd->bio;
-
- rq = blk_mq_alloc_request(q,
- op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
-
- blk_init_request_from_bio(rq, bio);
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
-
- return 0;
-}
-
-static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
-{
- struct nullb *nullb = dev->q->queuedata;
- sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- sector_t blksize;
- struct nvm_id_group *grp;
-
- id->ver_id = 0x1;
- id->vmnt = 0;
- id->cap = 0x2;
- id->dom = 0x1;
-
- id->ppaf.blk_offset = 0;
- id->ppaf.blk_len = 16;
- id->ppaf.pg_offset = 16;
- id->ppaf.pg_len = 16;
- id->ppaf.sect_offset = 32;
- id->ppaf.sect_len = 8;
- id->ppaf.pln_offset = 40;
- id->ppaf.pln_len = 8;
- id->ppaf.lun_offset = 48;
- id->ppaf.lun_len = 8;
- id->ppaf.ch_offset = 56;
- id->ppaf.ch_len = 8;
-
- sector_div(size, nullb->dev->blocksize); /* convert size to pages */
- size >>= 8; /* concert size to pgs pr blk */
- grp = &id->grp;
- grp->mtype = 0;
- grp->fmtype = 0;
- grp->num_ch = 1;
- grp->num_pg = 256;
- blksize = size;
- size >>= 16;
- grp->num_lun = size + 1;
- sector_div(blksize, grp->num_lun);
- grp->num_blk = blksize;
- grp->num_pln = 1;
-
- grp->fpg_sz = nullb->dev->blocksize;
- grp->csecs = nullb->dev->blocksize;
- grp->trdt = 25000;
- grp->trdm = 25000;
- grp->tprt = 500000;
- grp->tprm = 500000;
- grp->tbet = 1500000;
- grp->tbem = 1500000;
- grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = nullb->dev->hw_queue_depth;
-
- return 0;
-}
-
-static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
-{
- mempool_t *virtmem_pool;
-
- virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
- if (!virtmem_pool) {
- pr_err("null_blk: Unable to create virtual memory pool\n");
- return NULL;
- }
-
- return virtmem_pool;
-}
-
-static void null_lnvm_destroy_dma_pool(void *pool)
-{
- mempool_destroy(pool);
-}
-
-static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return mempool_alloc(pool, mem_flags);
-}
-
-static void null_lnvm_dev_dma_free(void *pool, void *entry,
- dma_addr_t dma_handler)
-{
- mempool_free(entry, pool);
-}
-
-static struct nvm_dev_ops null_lnvm_dev_ops = {
- .identity = null_lnvm_id,
- .submit_io = null_lnvm_submit_io,
-
- .create_dma_pool = null_lnvm_create_dma_pool,
- .destroy_dma_pool = null_lnvm_destroy_dma_pool,
- .dev_dma_alloc = null_lnvm_dev_dma_alloc,
- .dev_dma_free = null_lnvm_dev_dma_free,
-
- /* Simulate nvme protocol restriction */
- .max_phys_sect = 64,
-};
-
-static int null_nvm_register(struct nullb *nullb)
-{
- struct nvm_dev *dev;
- int rv;
-
- dev = nvm_alloc_dev(0);
- if (!dev)
- return -ENOMEM;
-
- dev->q = nullb->q;
- memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
- dev->ops = &null_lnvm_dev_ops;
-
- rv = nvm_register(dev);
- if (rv) {
- kfree(dev);
- return rv;
- }
- nullb->ndev = dev;
- return 0;
-}
-
-static void null_nvm_unregister(struct nullb *nullb)
-{
- nvm_unregister(nullb->ndev);
-}
-#else
-static int null_nvm_register(struct nullb *nullb)
-{
- pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
- return -EINVAL;
-}
-static void null_nvm_unregister(struct nullb *nullb) {}
-#endif /* CONFIG_NVM */
-
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
@@ -1595,10 +1458,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
- if (dev->use_lightnvm)
- null_nvm_unregister(nullb);
- else
- del_gendisk(nullb->disk);
+ del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
@@ -1610,8 +1470,7 @@ static void null_del_dev(struct nullb *nullb)
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!dev->use_lightnvm)
- put_disk(nullb->disk);
+ put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1775,11 +1634,6 @@ static void null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->use_lightnvm && dev->blocksize != 4096)
- dev->blocksize = 4096;
-
- if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
- dev->queue_mode = NULL_Q_MQ;
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
@@ -1805,6 +1659,20 @@ static void null_validate_conf(struct nullb_device *dev)
dev->mbps = 0;
}
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (!g_timeout_str[0])
+ return true;
+
+ if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+ return false;
+
+ null_timeout_attr.verbose = 0;
+#endif
+ return true;
+}
+
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
@@ -1838,6 +1706,10 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
+ if (!null_setup_fault())
+ goto out_cleanup_queues;
+
+ nullb->tag_set->timeout = 5 * HZ;
nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
@@ -1861,8 +1733,14 @@ static int null_add_dev(struct nullb_device *dev)
rv = -ENOMEM;
goto out_cleanup_queues;
}
+
+ if (!null_setup_fault())
+ goto out_cleanup_blk_queue;
+
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
+ nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1895,11 +1773,7 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (dev->use_lightnvm)
- rv = null_nvm_register(nullb);
- else
- rv = null_gendisk_register(nullb);
-
+ rv = null_gendisk_register(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1938,18 +1812,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (g_use_lightnvm && g_bs != 4096) {
- pr_warn("null_blk: LightNVM only supports 4k block size\n");
- pr_warn("null_blk: defaults block size to 4k\n");
- g_bs = 4096;
- }
-
- if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
- pr_warn("null_blk: LightNVM only supported for blk-mq\n");
- pr_warn("null_blk: defaults queue mode to blk-mq\n");
- g_queue_mode = NULL_Q_MQ;
- }
-
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
@@ -1982,16 +1844,6 @@ static int __init null_init(void)
goto err_conf;
}
- if (g_use_lightnvm) {
- ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
- 0, 0, NULL);
- if (!ppa_cache) {
- pr_err("null_blk: unable to create ppa cache\n");
- ret = -ENOMEM;
- goto err_ppa;
- }
- }
-
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
if (!dev) {
@@ -2015,8 +1867,6 @@ err_dev:
null_del_dev(nullb);
null_free_dev(dev);
}
- kmem_cache_destroy(ppa_cache);
-err_ppa:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
@@ -2047,8 +1897,6 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
-
- kmem_cache_destroy(ppa_cache);
}
module_init(null_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 67974796c350..531a0915066b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (ret)
+ return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- bdput(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
mutex_unlock(&rbd_dev->watch_mutex);
}
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+ strcpy(rbd_dev->lock_cookie, cookie);
+ rbd_set_owner_cid(rbd_dev, &cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
/*
* lock_rwsem must be held for write
*/
static int rbd_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_client_id cid = rbd_get_cid(rbd_dev);
char cookie[32];
int ret;
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
return ret;
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
- strcpy(rbd_dev->lock_cookie, cookie);
- rbd_set_owner_cid(rbd_dev, &cid);
- queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+ __rbd_lock(rbd_dev, cookie);
return 0;
}
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->lock_dwork, 0);
} else {
- strcpy(rbd_dev->lock_cookie, cookie);
+ __rbd_lock(rbd_dev, cookie);
}
}
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
deleted file mode 100644
index e5565fbaeb30..000000000000
--- a/drivers/block/smart1,2.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-
-/*
- * This file contains the controller communication implementation for
- * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
- * this should support:
- *
- * PCI:
- * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
- * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
- *
- * EISA:
- * SMART-2/E, SMART, IAES, IDA-2, IDA
- */
-
-/*
- * Memory mapped FIFO interface (SMART 42xx cards)
- */
-static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- writel( S42XX_INTR_OFF,
- h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * For older cards FIFO Full = 0.
- * On this card 0 means there is room, anything else FIFO Full.
- *
- */
-static unsigned long smart4_fifo_full(ctlr_info_t *h)
-{
-
- return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
-}
-
-/* This type of controller returns -1 if the fifo is empty,
- * Not 0 like the others.
- * And we need to let it know we read a value out
- */
-static unsigned long smart4_completed(ctlr_info_t *h)
-{
- long register_value
- = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- /* Fifo is empty */
- if( register_value == 0xffffffff)
- return 0;
-
- /* Need to let it know we got the reply */
- /* We do this by writing a 0 to the port we just read from */
- writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- return ((unsigned long) register_value);
-}
-
- /*
- * This hardware returns interrupt pending at a different place and
- * it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the command_completed call.
- */
-static unsigned long smart4_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + S42XX_INTR_STATUS);
-
- if( register_value & S42XX_INTR_PENDING)
- return FIFO_NOT_EMPTY;
- return 0 ;
-}
-
-static struct access_method smart4_access = {
- smart4_submit_command,
- smart4_intr_mask,
- smart4_fifo_full,
- smart4_intr_pending,
- smart4_completed,
-};
-
-/*
- * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
- */
-static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + COMMAND_FIFO);
-}
-
-static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- writel(val, h->vaddr + INTR_MASK);
-}
-
-static unsigned long smart2_fifo_full(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_FIFO);
-}
-
-static unsigned long smart2_completed(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2_intr_pending(ctlr_info_t *h)
-{
- return readl(h->vaddr + INTR_PENDING);
-}
-
-static struct access_method smart2_access = {
- smart2_submit_command,
- smart2_intr_mask,
- smart2_fifo_full,
- smart2_intr_pending,
- smart2_completed,
-};
-
-/*
- * IO access for SMART-2/E cards
- */
-static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
-}
-
-static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- outl(val, h->io_mem_addr + INTR_MASK);
-}
-
-static unsigned long smart2e_fifo_full(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_FIFO);
-}
-
-static unsigned long smart2e_completed(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2e_intr_pending(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + INTR_PENDING);
-}
-
-static struct access_method smart2e_access = {
- smart2e_submit_command,
- smart2e_intr_mask,
- smart2e_fifo_full,
- smart2e_intr_pending,
- smart2e_completed,
-};
-
-/*
- * IO access for older SMART-1 type cards
- */
-#define SMART1_SYSTEM_MASK 0xC8E
-#define SMART1_SYSTEM_DOORBELL 0xC8F
-#define SMART1_LOCAL_MASK 0xC8C
-#define SMART1_LOCAL_DOORBELL 0xC8D
-#define SMART1_INTR_MASK 0xC89
-#define SMART1_LISTADDR 0xC90
-#define SMART1_LISTLEN 0xC94
-#define SMART1_TAG 0xC97
-#define SMART1_COMPLETE_ADDR 0xC98
-#define SMART1_LISTSTATUS 0xC9E
-
-#define CHANNEL_BUSY 0x01
-#define CHANNEL_CLEAR 0x02
-
-static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- /*
- * This __u16 is actually a bunch of control flags on SMART
- * and below. We want them all to be zero.
- */
- c->hdr.size = 0;
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
- outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
-
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-}
-
-static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val == 1) {
- outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
- outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
- outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
- } else {
- outb(0, h->io_mem_addr + 0xC8E);
- }
-}
-
-static unsigned long smart1_fifo_full(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
- return chan;
-}
-
-static unsigned long smart1_completed(ctlr_info_t *h)
-{
- unsigned char status;
- unsigned long cmd;
-
- if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
- status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-
- /*
- * this is x86 (actually compaq x86) only, so it's ok
- */
- if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
- } else {
- cmd = 0;
- }
- return cmd;
-}
-
-static unsigned long smart1_intr_pending(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
- return chan;
-}
-
-static struct access_method smart1_access = {
- smart1_submit_command,
- smart1_intr_mask,
- smart1_fifo_full,
- smart1_intr_pending,
- smart1_completed,
-};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba30003a..0afa6c8c3857 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
static void zram_page_end_io(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 60e1c7d6986d..07e55cd8f8c8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -31,6 +31,16 @@ config BT_HCIBTUSB
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (btusb).
+config BT_HCIBTUSB_AUTOSUSPEND
+ bool "Enable USB autosuspend for Bluetooth USB devices by default"
+ depends on BT_HCIBTUSB
+ help
+ Say Y here to enable USB autosuspend for Bluetooth USB devices by
+ default.
+
+ This can be overridden by passing btusb.enable_autosuspend=[y|n]
+ on the kernel commandline.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
@@ -67,6 +77,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
+ depends on NVMEM || !NVMEM
depends on TTY
help
Bluetooth HCI UART driver.
@@ -97,6 +108,7 @@ config BT_HCIUART_NOKIA
tristate "UART Nokia H4+ protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
depends on PM
select BT_HCIUART_H4
select BT_BCM
@@ -158,6 +170,7 @@ config BT_HCIUART_3WIRE
config BT_HCIUART_INTEL
bool "Intel protocol support"
depends on BT_HCIUART
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_INTEL
help
@@ -171,6 +184,7 @@ config BT_HCIUART_BCM
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d513ef4743dc..82437a69f99c 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -302,9 +302,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
}
/* Wait until the command reaches the baseband */
- prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
- finish_wait(&wq, &wait);
+ mdelay(100);
/* Set baud on baseband */
info->ctrl_reg &= ~0x03;
@@ -316,9 +314,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Wait before the next HCI packet can be send */
- prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- finish_wait(&wq, &wait);
+ mdelay(1000);
}
if (len == skb->len) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 7971bfbd4321..801ea4ca65e4 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -259,7 +259,7 @@ static int bpa10x_flush(struct hci_dev *hdev)
static int bpa10x_setup(struct hci_dev *hdev)
{
- const u8 req[] = { 0x07 };
+ static const u8 req[] = { 0x07 };
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index afa4cb3b16e3..6659f113042c 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -323,6 +323,7 @@ static const struct {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
+ { 0x2122, "BCM4343A0" }, /* 001.001.034 */
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */
{ 0x6119, "BCM4345C0" }, /* 003.001.025 */
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d9e6b41658e5..cfe6ad4cc621 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -44,8 +44,8 @@ struct bcm_set_sleep_mode {
__u8 tristate_control;
__u8 usb_auto_sleep;
__u8 usb_resume_timeout;
- __u8 pulsed_host_wake;
__u8 break_to_host;
+ __u8 pulsed_host_wake;
} __packed;
struct bcm_set_pcm_int_params {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 4459555c9d88..5270d5513201 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -75,7 +76,7 @@ EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
int btintel_enter_mfg(struct hci_dev *hdev)
{
- const u8 param[] = { 0x01, 0x00 };
+ static const u8 param[] = { 0x01, 0x00 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
@@ -569,6 +570,160 @@ struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
}
EXPORT_SYMBOL_GPL(btintel_regmap_init);
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
+{
+ struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
+ struct sk_buff *skb;
+
+ params.boot_param = cpu_to_le32(boot_param);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to send Intel Reset command");
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
+
+int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*params)) {
+ bt_dev_err(hdev, "Intel boot parameters size mismatch");
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ memcpy(params, skb->data, sizeof(*params));
+
+ kfree_skb(skb);
+
+ if (params->status) {
+ bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
+ params->status);
+ return -bt_to_errno(params->status);
+ }
+
+ bt_dev_info(hdev, "Device revision is %u",
+ le16_to_cpu(params->dev_revid));
+
+ bt_dev_info(hdev, "Secure boot is %s",
+ params->secure_boot ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "OTP lock is %s",
+ params->otp_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "API lock is %s",
+ params->api_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "Debug lock is %s",
+ params->debug_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_boot_params);
+
+int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
+ u32 *boot_param)
+{
+ int err;
+ const u8 *fw_ptr;
+ u32 frag_len;
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of signature information from the firmware
+ * as the Sign fragment.
+ */
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
+ goto done;
+ }
+
+ fw_ptr = fw->data + 644;
+ frag_len = 0;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+ /* Each SKU has a different reset parameter to use in the
+ * HCI_Intel_Reset command and it is embedded in the firmware
+ * data. So, instead of using static value per SKU, check
+ * the firmware data and save it for later use.
+ */
+ if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
+ /* The boot parameter is the first 32-bit value
+ * and rest of 3 octets are reserved.
+ */
+ *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
+
+ bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
+ }
+
+ frag_len += sizeof(*cmd) + cmd->plen;
+
+ /* The parameter length of the secure send command requires
+ * a 4 byte alignment. It happens so that the firmware file
+ * contains proper Intel_NOP commands to align the fragments
+ * as needed.
+ *
+ * Send set of commands with 4 byte alignment from the
+ * firmware data buffer as a single Data fragement.
+ */
+ if (!(frag_len % 4)) {
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+ if (err < 0) {
+ bt_dev_err(hdev,
+ "Failed to send firmware data (%d)",
+ err);
+ goto done;
+ }
+
+ fw_ptr += frag_len;
+ frag_len = 0;
+ }
+ }
+
+done:
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_download_firmware);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 1e8955aaafed..41c642cc523f 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -69,6 +69,14 @@ struct intel_secure_send_result {
__u8 status;
} __packed;
+struct intel_reset {
+ __u8 reset_type;
+ __u8 patch_enable;
+ __u8 ddc_reload;
+ __u8 boot_option;
+ __le32 boot_param;
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev);
@@ -89,7 +97,11 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
-
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param);
+int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params);
+int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
+ u32 *boot_param);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -165,4 +177,23 @@ static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
{
return ERR_PTR(-EINVAL);
}
+
+static inline int btintel_send_intel_reset(struct hci_dev *hdev,
+ u32 reset_param)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_download_firmware(struct hci_dev *dev,
+ const struct firmware *fw,
+ u32 *boot_param)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 663bed63b871..2c9a5fc9137d 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -88,7 +88,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- kfree_skb(skb);
+ if (!ret)
+ kfree_skb(skb);
return ret;
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index c8e945d19ffe..20142bc77554 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
tuple = tuple->next;
}
+ /* BCM43341 devices soldered onto the PCB (non-removable) use an
+ * uart connection for bluetooth, ignore the BT SDIO interface.
+ */
+ if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+ func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+ !mmc_card_is_removable(func->card->host))
+ return -ENODEV;
+
data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f7120c9eb9bd..2a55380ad730 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/firmware.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -40,6 +41,7 @@
static bool disable_scofix;
static bool force_scofix;
+static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool reset = true;
@@ -263,6 +265,7 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@@ -270,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
@@ -387,9 +391,8 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
-#define BTUSB_RESET_RESUME 10
-#define BTUSB_DIAG_RUNNING 11
-#define BTUSB_OOB_WAKE_ENABLED 12
+#define BTUSB_DIAG_RUNNING 10
+#define BTUSB_OOB_WAKE_ENABLED 11
struct btusb_data {
struct hci_dev *hdev;
@@ -2006,15 +2009,11 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
- static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
- 0x00, 0x08, 0x04, 0x00 };
struct btusb_data *data = hci_get_drvdata(hdev);
- struct sk_buff *skb;
struct intel_version ver;
- struct intel_boot_params *params;
+ struct intel_boot_params params;
const struct firmware *fw;
- const u8 *fw_ptr;
- u32 frag_len;
+ u32 boot_param;
char fwname[64];
ktime_t calltime, delta, rettime;
unsigned long long duration;
@@ -2022,6 +2021,12 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ /* Set the default boot parameter to 0x0 and it is updated to
+ * SKU specific boot parameter after reading Intel_Write_Boot_Params
+ * command while downloading the firmware.
+ */
+ boot_param = 0x00000000;
+
calltime = ktime_get();
/* Read the Intel version information to determine if the device
@@ -2092,55 +2097,24 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
- skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*params)) {
- BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
- kfree_skb(skb);
- return -EILSEQ;
- }
-
- params = (struct intel_boot_params *)skb->data;
-
- bt_dev_info(hdev, "Device revision is %u",
- le16_to_cpu(params->dev_revid));
-
- bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
- params->min_fw_build_nn, params->min_fw_build_cw,
- 2000 + params->min_fw_build_yy);
+ err = btintel_read_boot_params(hdev, &params);
+ if (err)
+ return err;
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
- if (params->limited_cce != 0x00) {
+ if (params.limited_cce != 0x00) {
BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
- hdev->name, params->limited_cce);
- kfree_skb(skb);
+ hdev->name, params.limited_cce);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
- if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
@@ -2171,7 +2145,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
@@ -2189,7 +2163,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (err < 0) {
BT_ERR("%s: Failed to load Intel firmware file (%d)",
hdev->name, err);
- kfree_skb(skb);
return err;
}
@@ -2203,7 +2176,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
@@ -2217,8 +2190,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- kfree_skb(skb);
-
if (fw->size < 644) {
BT_ERR("%s: Invalid size of firmware file (%zu)",
hdev->name, fw->size);
@@ -2228,64 +2199,10 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
set_bit(BTUSB_DOWNLOADING, &data->flags);
- /* Start the firmware download transaction with the Init fragment
- * represented by the 128 bytes of CSS header.
- */
- err = btintel_secure_send(hdev, 0x00, 128, fw->data);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware header (%d)",
- hdev->name, err);
- goto done;
- }
-
- /* Send the 256 bytes of public key information from the firmware
- * as the PKey fragment.
- */
- err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware public key (%d)",
- hdev->name, err);
- goto done;
- }
-
- /* Send the 256 bytes of signature information from the firmware
- * as the Sign fragment.
- */
- err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware signature (%d)",
- hdev->name, err);
+ /* Start firmware downloading and get boot parameter */
+ err = btintel_download_firmware(hdev, fw, &boot_param);
+ if (err < 0)
goto done;
- }
-
- fw_ptr = fw->data + 644;
- frag_len = 0;
-
- while (fw_ptr - fw->data < fw->size) {
- struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
- frag_len += sizeof(*cmd) + cmd->plen;
-
- /* The parameter length of the secure send command requires
- * a 4 byte alignment. It happens so that the firmware file
- * contains proper Intel_NOP commands to align the fragments
- * as needed.
- *
- * Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
- */
- if (!(frag_len % 4)) {
- err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware data (%d)",
- hdev->name, err);
- goto done;
- }
-
- fw_ptr += frag_len;
- frag_len = 0;
- }
- }
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
@@ -2338,12 +2255,9 @@ done:
set_bit(BTUSB_BOOTING, &data->flags);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
+ err = btintel_send_intel_reset(hdev, boot_param);
+ if (err)
+ return err;
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
@@ -3120,9 +3034,9 @@ static int btusb_probe(struct usb_interface *intf,
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
- * Explicitly request a device reset on resume.
+ * explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3133,7 +3047,7 @@ static int btusb_probe(struct usb_interface *intf,
* but the USB hub doesn't notice any status change.
* Explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#endif
@@ -3213,6 +3127,9 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
+ if (enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
+
err = hci_register_dev(hdev);
if (err < 0)
goto out_free_dev;
@@ -3299,14 +3216,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
enable_irq(data->oob_wake_irq);
}
- /* Optionally request a device reset on resume, but only when
- * wakeups are disabled. If wakeups are enabled we assume the
- * device will stay powered up throughout suspend.
- */
- if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
- !device_may_wakeup(&data->udev->dev))
- data->udev->reset_resume = 1;
-
return 0;
}
@@ -3425,6 +3334,9 @@ MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
module_param(force_scofix, bool, 0644);
MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 707c2d1b84c7..0438a64b8185 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -29,6 +29,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -52,7 +53,37 @@
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
-/* device driver resources */
+/**
+ * struct bcm_device - device driver resources
+ * @serdev_hu: HCI UART controller struct
+ * @list: bcm_device_list node
+ * @dev: physical UART slave
+ * @name: device name logged by bt_dev_*() functions
+ * @device_wakeup: BT_WAKE pin,
+ * assert = Bluetooth device must wake up or remain awake,
+ * deassert = Bluetooth device may sleep when sleep criteria are met
+ * @shutdown: BT_REG_ON pin,
+ * power up or power down Bluetooth device internal regulators
+ * @set_device_wakeup: callback to toggle BT_WAKE pin
+ * either by accessing @device_wakeup or by calling @btlp
+ * @set_shutdown: callback to toggle BT_REG_ON pin
+ * either by accessing @shutdown or by calling @btpu/@btpd
+ * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
+ * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
+ * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
+ * @clk: clock used by Bluetooth device
+ * @clk_enabled: whether @clk is prepared and enabled
+ * @init_speed: default baudrate of Bluetooth device;
+ * the host UART is initially set to this baudrate so that
+ * it can configure the Bluetooth device for @oper_speed
+ * @oper_speed: preferred baudrate of Bluetooth device;
+ * set to 0 if @init_speed is already the preferred baudrate
+ * @irq: interrupt triggered by HOST_WAKE_BT pin
+ * @irq_active_low: whether @irq is active low
+ * @hu: pointer to HCI UART controller struct,
+ * used to disable flow control during runtime suspend and system sleep
+ * @is_suspended: whether flow control is currently disabled
+ */
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
struct hci_uart serdev_hu;
@@ -63,6 +94,11 @@ struct bcm_device {
const char *name;
struct gpio_desc *device_wakeup;
struct gpio_desc *shutdown;
+ int (*set_device_wakeup)(struct bcm_device *, bool);
+ int (*set_shutdown)(struct bcm_device *, bool);
+#ifdef CONFIG_ACPI
+ acpi_handle btlp, btpu, btpd;
+#endif
struct clk *clk;
bool clk_enabled;
@@ -74,7 +110,7 @@ struct bcm_device {
#ifdef CONFIG_PM
struct hci_uart *hu;
- bool is_suspended; /* suspend/resume flag */
+ bool is_suspended;
#endif
};
@@ -170,11 +206,21 @@ static bool bcm_device_exists(struct bcm_device *device)
static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
{
- if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
- clk_prepare_enable(dev->clk);
+ int err;
- gpiod_set_value(dev->shutdown, powered);
- gpiod_set_value(dev->device_wakeup, powered);
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
+ err = clk_prepare_enable(dev->clk);
+ if (err)
+ return err;
+ }
+
+ err = dev->set_shutdown(dev, powered);
+ if (err)
+ goto err_clk_disable;
+
+ err = dev->set_device_wakeup(dev, powered);
+ if (err)
+ goto err_revert_shutdown;
if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
clk_disable_unprepare(dev->clk);
@@ -182,6 +228,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
dev->clk_enabled = powered;
return 0;
+
+err_revert_shutdown:
+ dev->set_shutdown(dev, !powered);
+err_clk_disable:
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+ clk_disable_unprepare(dev->clk);
+ return err;
}
#ifdef CONFIG_PM
@@ -191,9 +244,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
- pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
+ pm_request_resume(bdev->dev);
return IRQ_HANDLED;
}
@@ -218,8 +269,10 @@ static int bcm_request_irq(struct bcm_data *bcm)
bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
IRQF_TRIGGER_RISING,
"host_wake", bdev);
- if (err)
+ if (err) {
+ bdev->irq = err;
goto unlock;
+ }
device_init_wakeup(bdev->dev, true);
@@ -247,8 +300,8 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
/* Irrelevant USB flags */
.usb_auto_sleep = 0,
.usb_resume_timeout = 0,
+ .break_to_host = 0,
.pulsed_host_wake = 0,
- .break_to_host = 0
};
static int bcm_setup_sleep(struct hci_uart *hu)
@@ -304,6 +357,7 @@ static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
struct list_head *p;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -318,7 +372,10 @@ static int bcm_open(struct hci_uart *hu)
mutex_lock(&bcm_device_lock);
if (hu->serdev) {
- serdev_device_open(hu->serdev);
+ err = serdev_device_open(hu->serdev);
+ if (err)
+ goto err_free;
+
bcm->dev = serdev_device_get_drvdata(hu->serdev);
goto out;
}
@@ -346,17 +403,33 @@ out:
if (bcm->dev) {
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
- bcm_gpio_set_power(bcm->dev, true);
+ err = bcm_gpio_set_power(bcm->dev, true);
+ if (err)
+ goto err_unset_hu;
}
mutex_unlock(&bcm_device_lock);
return 0;
+
+err_unset_hu:
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+#ifdef CONFIG_PM
+ else
+ bcm->dev->hu = NULL;
+#endif
+err_free:
+ mutex_unlock(&bcm_device_lock);
+ hu->priv = NULL;
+ kfree(bcm);
+ return err;
}
static int bcm_close(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
struct bcm_device *bdev = NULL;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -374,16 +447,17 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM
- pm_runtime_disable(bdev->dev);
- pm_runtime_set_suspended(bdev->dev);
-
- if (device_can_wakeup(bdev->dev)) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
+ pm_runtime_disable(bdev->dev);
}
-#endif
+
+ err = bcm_gpio_set_power(bdev, false);
+ if (err)
+ bt_dev_err(hu->hdev, "Failed to power down");
+ else
+ pm_runtime_set_suspended(bdev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -512,11 +586,8 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
} else if (!bcm->rx_skb) {
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
- if (bcm->dev && bcm_device_exists(bcm->dev)) {
- pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
- pm_runtime_put_autosuspend(bcm->dev->dev);
- }
+ if (bcm->dev && bcm_device_exists(bcm->dev))
+ pm_request_resume(bcm->dev->dev);
mutex_unlock(&bcm_device_lock);
}
@@ -566,6 +637,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
static int bcm_suspend_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
@@ -577,27 +649,37 @@ static int bcm_suspend_device(struct device *dev)
}
/* Suspend the device */
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, false);
- bt_dev_dbg(bdev, "suspend, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, false);
+ if (err) {
+ if (bdev->is_suspended && bdev->hu) {
+ bdev->is_suspended = false;
+ hci_uart_set_flow_control(bdev->hu, false);
+ }
+ return -EBUSY;
}
+ bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+ msleep(15);
+
return 0;
}
static int bcm_resume_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, true);
- bt_dev_dbg(bdev, "resume, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, true);
+ if (err) {
+ dev_err(dev, "Failed to power up\n");
+ return err;
}
+ bt_dev_dbg(bdev, "resume, delaying 15 ms");
+ msleep(15);
+
/* When this executes, the device has woken up already */
if (bdev->is_suspended && bdev->hu) {
bdev->is_suspended = false;
@@ -632,7 +714,7 @@ static int bcm_suspend(struct device *dev)
if (pm_runtime_active(dev))
bcm_suspend_device(dev);
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
error = enable_irq_wake(bdev->irq);
if (!error)
bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -648,6 +730,7 @@ unlock:
static int bcm_resume(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err = 0;
bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
@@ -662,19 +745,21 @@ static int bcm_resume(struct device *dev)
if (!bdev->hu)
goto unlock;
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
disable_irq_wake(bdev->irq);
bt_dev_dbg(bdev, "BCM irq: disabled");
}
- bcm_resume_device(dev);
+ err = bcm_resume_device(dev);
unlock:
mutex_unlock(&bcm_device_lock);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (!err) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
return 0;
}
@@ -771,25 +856,84 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
return 0;
}
+
+static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd,
+ NULL, NULL, NULL)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev->dev);
+ const union acpi_object *obj;
+
+ if (!adev ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd)))
+ return -ENODEV;
+
+ if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) &&
+ obj->buffer.length == 8)
+ dev->init_speed = *(u64 *)obj->buffer.pointer;
+
+ dev->set_device_wakeup = bcm_apple_set_device_wakeup;
+ dev->set_shutdown = bcm_apple_set_shutdown;
+
+ return 0;
+}
+#else
+static inline int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ACPI */
+static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ gpiod_set_value(dev->device_wakeup, awake);
+ return 0;
+}
+
+static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ gpiod_set_value(dev->shutdown, powered);
+ return 0;
+}
+
static int bcm_get_resources(struct bcm_device *dev)
{
dev->name = dev_name(dev->dev);
+ if (x86_apple_machine && !bcm_apple_get_resources(dev))
+ return 0;
+
dev->clk = devm_clk_get(dev->dev, NULL);
- dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
- "device-wakeup",
- GPIOD_OUT_LOW);
+ dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup);
- dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
- GPIOD_OUT_LOW);
+ dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
+ dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
+ dev->set_shutdown = bcm_gpio_set_shutdown;
+
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
if (dev->irq <= 0) {
struct gpio_desc *gpio;
@@ -802,7 +946,7 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->irq = gpiod_to_irq(gpio);
}
- dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
+ dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
return 0;
}
@@ -892,7 +1036,9 @@ static int bcm_probe(struct platform_device *pdev)
list_add_tail(&dev->list, &bcm_device_list);
mutex_unlock(&bcm_device_lock);
- bcm_gpio_set_power(dev, false);
+ ret = bcm_gpio_set_power(dev, false);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to power down\n");
return 0;
}
@@ -939,6 +1085,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
@@ -993,7 +1140,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
return err;
- bcm_gpio_set_power(bcmdev, false);
+ err = bcm_gpio_set_power(bcmdev, false);
+ if (err)
+ dev_err(&serdev->dev, "Failed to power down\n");
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index aad07e40ea4f..7c166e3b308b 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -540,18 +540,15 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
static int intel_setup(struct hci_uart *hu)
{
- static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
- 0x00, 0x08, 0x04, 0x00 };
struct intel_data *intel = hu->priv;
struct hci_dev *hdev = hu->hdev;
struct sk_buff *skb;
struct intel_version ver;
- struct intel_boot_params *params;
+ struct intel_boot_params params;
struct list_head *p;
const struct firmware *fw;
- const u8 *fw_ptr;
char fwname[64];
- u32 frag_len;
+ u32 boot_param;
ktime_t calltime, delta, rettime;
unsigned long long duration;
unsigned int init_speed, oper_speed;
@@ -563,6 +560,12 @@ static int intel_setup(struct hci_uart *hu)
hu->hdev->set_diag = btintel_set_diag;
hu->hdev->set_bdaddr = btintel_set_bdaddr;
+ /* Set the default boot parameter to 0x0 and it is updated to
+ * SKU specific boot parameter after reading Intel_Write_Boot_Params
+ * command while downloading the firmware.
+ */
+ boot_param = 0x00000000;
+
calltime = ktime_get();
if (hu->init_speed)
@@ -656,85 +659,95 @@ static int intel_setup(struct hci_uart *hu)
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
- skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
- bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
- PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*params)) {
- bt_dev_err(hdev, "Intel boot parameters size mismatch");
- kfree_skb(skb);
- return -EILSEQ;
- }
-
- params = (struct intel_boot_params *)skb->data;
- if (params->status) {
- bt_dev_err(hdev, "Intel boot parameters command failure (%02x)",
- params->status);
- err = -bt_to_errno(params->status);
- kfree_skb(skb);
+ err = btintel_read_boot_params(hdev, &params);
+ if (err)
return err;
- }
-
- bt_dev_info(hdev, "Device revision is %u",
- le16_to_cpu(params->dev_revid));
-
- bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
- params->min_fw_build_nn, params->min_fw_build_cw,
- 2000 + params->min_fw_build_yy);
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
- if (params->limited_cce != 0x00) {
+ if (params.limited_cce != 0x00) {
bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
- params->limited_cce);
- kfree_skb(skb);
+ params.limited_cce);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
- if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
/* With this Intel bootloader only the hardware variant and device
- * revision information are used to select the right firmware.
+ * revision information are used to select the right firmware for SfP
+ * and WsP.
*
* The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
*
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT 3.0 (LnP/SfP)
+ * 12 (0x0c) for iBT 3.5 (WsP)
+ *
+ * For ThP/JfP and for future SKU's, the FW name varies based on HW
+ * variant, HW revision and FW revision, as these are dependent on CNVi
+ * and RF Combination.
+ *
+ * 18 (0x12) for iBT3.5 (ThP/JfP)
+ *
+ * The firmware file name for these will be
+ * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
+ *
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params.dev_revid));
+ break;
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
+ return -EINVAL;
+ }
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
err);
- kfree_skb(skb);
return err;
}
bt_dev_info(hdev, "Found device firmware: %s", fwname);
/* Save the DDC file name for later */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
-
- kfree_skb(skb);
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params.dev_revid));
+ break;
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
+ return -EINVAL;
+ }
if (fw->size < 644) {
bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
@@ -745,70 +758,10 @@ static int intel_setup(struct hci_uart *hu)
set_bit(STATE_DOWNLOADING, &intel->flags);
- /* Start the firmware download transaction with the Init fragment
- * represented by the 128 bytes of CSS header.
- */
- err = btintel_secure_send(hdev, 0x00, 128, fw->data);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
- goto done;
- }
-
- /* Send the 256 bytes of public key information from the firmware
- * as the PKey fragment.
- */
- err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware public key (%d)",
- err);
- goto done;
- }
-
- /* Send the 256 bytes of signature information from the firmware
- * as the Sign fragment.
- */
- err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware signature (%d)",
- err);
+ /* Start firmware downloading and get boot parameter */
+ err = btintel_download_firmware(hdev, fw, &boot_param);
+ if (err < 0)
goto done;
- }
-
- fw_ptr = fw->data + 644;
- frag_len = 0;
-
- while (fw_ptr - fw->data < fw->size) {
- struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
- frag_len += sizeof(*cmd) + cmd->plen;
-
- bt_dev_dbg(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
- fw->size);
-
- /* The parameter length of the secure send command requires
- * a 4 byte alignment. It happens so that the firmware file
- * contains proper Intel_NOP commands to align the fragments
- * as needed.
- *
- * Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
- */
- if (frag_len % 4)
- continue;
-
- /* Send each command from the firmware data buffer as
- * a single Data fragment.
- */
- err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware data (%d)",
- err);
- goto done;
- }
-
- fw_ptr += frag_len;
- frag_len = 0;
- }
set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
@@ -869,12 +822,9 @@ done:
set_bit(STATE_BOOTING, &intel->flags);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
- HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
+ err = btintel_send_intel_reset(hdev, boot_param);
+ if (err)
+ return err;
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c823914b3a80..b6a71705b7d6 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -794,7 +794,7 @@ static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e2c078d61730..1b4417a623a4 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -53,9 +53,14 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/gpio/consumer.h>
+#include <linux/nvmem-consumer.h>
#include "hci_uart.h"
+/* Vendor-specific HCI commands */
+#define HCI_VS_WRITE_BD_ADDR 0xfc06
+#define HCI_VS_UPDATE_UART_HCI_BAUDRATE 0xff36
+
/* HCILL commands */
#define HCILL_GO_TO_SLEEP_IND 0x30
#define HCILL_GO_TO_SLEEP_ACK 0x31
@@ -86,6 +91,7 @@ struct ll_device {
struct serdev_device *serdev;
struct gpio_desc *enable_gpio;
struct clk *ext_clk;
+ bdaddr_t bdaddr;
};
struct ll_struct {
@@ -620,7 +626,7 @@ static int download_firmware(struct ll_device *lldev)
case ACTION_SEND_COMMAND: /* action send */
bt_dev_dbg(lldev->hu.hdev, "S");
cmd = (struct hci_command *)action_ptr;
- if (cmd->opcode == 0xff36) {
+ if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
/* ignore remote change
* baud rate HCI VS command
*/
@@ -628,11 +634,11 @@ static int download_firmware(struct ll_device *lldev)
break;
}
if (cmd->prefix != 1)
- bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+ bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- bt_dev_err(lldev->hu.hdev, "send command failed\n");
+ bt_dev_err(lldev->hu.hdev, "send command failed");
err = PTR_ERR(skb);
goto out_rel_fw;
}
@@ -659,6 +665,24 @@ out_rel_fw:
return err;
}
+static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ bdaddr_t bdaddr_swapped;
+ struct sk_buff *skb;
+
+ /* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
+ * address to be MSB first, but bdaddr_t has the convention of being
+ * LSB first.
+ */
+ baswap(&bdaddr_swapped, bdaddr);
+ skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
+ &bdaddr_swapped, HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+
+ return PTR_ERR_OR_ZERO(skb);
+}
+
static int ll_setup(struct hci_uart *hu)
{
int err, retry = 3;
@@ -671,14 +695,20 @@ static int ll_setup(struct hci_uart *hu)
lldev = serdev_device_get_drvdata(serdev);
+ hu->hdev->set_bdaddr = ll_set_bdaddr;
+
serdev_device_set_flow_control(serdev, true);
do {
- /* Configure BT_EN to HIGH state */
+ /* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
msleep(5);
gpiod_set_value_cansleep(lldev->enable_gpio, 1);
- msleep(100);
+ err = serdev_device_wait_for_cts(serdev, true, 200);
+ if (err) {
+ bt_dev_err(hu->hdev, "Failed to get CTS");
+ return err;
+ }
err = download_firmware(lldev);
if (!err)
@@ -691,6 +721,18 @@ static int ll_setup(struct hci_uart *hu)
if (err)
return err;
+ /* Set BD address if one was specified at probe */
+ if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
+ /* This means that there was an error getting the BD address
+ * during probe, so mark the device as having a bad address.
+ */
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ } else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
+ err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
+ if (err)
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ }
+
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
@@ -700,7 +742,12 @@ static int ll_setup(struct hci_uart *hu)
speed = 0;
if (speed) {
- struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
if (!IS_ERR(skb)) {
kfree_skb(skb);
serdev_device_set_baudrate(serdev, speed);
@@ -716,6 +763,7 @@ static int hci_ti_probe(struct serdev_device *serdev)
{
struct hci_uart *hu;
struct ll_device *lldev;
+ struct nvmem_cell *bdaddr_cell;
u32 max_speed = 3000000;
lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
@@ -737,6 +785,52 @@ static int hci_ti_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
hci_uart_set_speeds(hu, 115200, max_speed);
+ /* optional BD address from nvram */
+ bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
+ if (IS_ERR(bdaddr_cell)) {
+ int err = PTR_ERR(bdaddr_cell);
+
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ /* ENOENT means there is no matching nvmem cell and ENOSYS
+ * means that nvmem is not enabled in the kernel configuration.
+ */
+ if (err != -ENOENT && err != -ENOSYS) {
+ /* If there was some other error, give userspace a
+ * chance to fix the problem instead of failing to load
+ * the driver. Using BDADDR_NONE as a flag that is
+ * tested later in the setup function.
+ */
+ dev_warn(&serdev->dev,
+ "Failed to get \"bd-address\" nvmem cell (%d)\n",
+ err);
+ bacpy(&lldev->bdaddr, BDADDR_NONE);
+ }
+ } else {
+ bdaddr_t *bdaddr;
+ size_t len;
+
+ bdaddr = nvmem_cell_read(bdaddr_cell, &len);
+ nvmem_cell_put(bdaddr_cell);
+ if (IS_ERR(bdaddr)) {
+ dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
+ return PTR_ERR(bdaddr);
+ }
+ if (len != sizeof(bdaddr_t)) {
+ dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
+ kfree(bdaddr);
+ return -EINVAL;
+ }
+
+ /* As per the device tree bindings, the value from nvmem is
+ * expected to be MSB first, but in the kernel it is expected
+ * that bdaddr_t is LSB first.
+ */
+ baswap(&lldev->bdaddr, bdaddr);
+ kfree(bdaddr);
+ }
+
return hci_uart_register_device(hu, &llp);
}
@@ -748,6 +842,7 @@ static void hci_ti_remove(struct serdev_device *serdev)
}
static const struct of_device_id hci_ti_of_match[] = {
+ { .compatible = "ti,cc2560" },
{ .compatible = "ti,wl1271-st" },
{ .compatible = "ti,wl1273-st" },
{ .compatible = "ti,wl1281-st" },
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbd7db7384e6..05ec530b8a3a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -932,6 +932,9 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ ret = 0;
}
/* Setup bdaddr */
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 71664b22ec9d..e0e6461b9200 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
hci_set_drvdata(hdev, hu);
INIT_WORK(&hu->write_work, hci_uart_write_work);
+ percpu_init_rwsem(&hu->proto_lock);
/* Only when vendor specific setup callback is provided, consider
* the manufacturer information valid. This avoids filling in the
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index e6f6dbc04131..0521748a1972 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -299,7 +299,7 @@ static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from)
return vhci_get_user(data, from);
}
-static unsigned int vhci_poll(struct file *file, poll_table *wait)
+static __poll_t vhci_poll(struct file *file, poll_table *wait)
{
struct vhci_data *data = file->private_data;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index dc7b3c7b7d42..57e011d36a79 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -120,7 +120,7 @@ config QCOM_EBI2
SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
- bool "Simple Power-Managed Bus Driver"
+ tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93781cf..1b76d9585902 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
+ .uevent = of_device_uevent_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 1dfb9f8de171..a2a1c1478cd0 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -236,7 +236,7 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
return ret;
}
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
{
struct apm_user *as = fp->private_data;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 0d7b577e0ff0..2f92cc46698b 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -406,7 +406,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
* Do I need this function at all???
*/
#if 0
-static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file)) & 0x0f;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 839ee61d352a..2697c22e3be2 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -91,7 +91,7 @@ static ssize_t dtlk_read(struct file *, char __user *,
size_t nbytes, loff_t * ppos);
static ssize_t dtlk_write(struct file *, const char __user *,
size_t nbytes, loff_t * ppos);
-static unsigned int dtlk_poll(struct file *, poll_table *);
+static __poll_t dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
static long dtlk_ioctl(struct file *file,
@@ -228,9 +228,9 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
return -EAGAIN;
}
-static unsigned int dtlk_poll(struct file *file, poll_table * wait)
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
{
- int mask = 0;
+ __poll_t mask = 0;
unsigned long expires;
TRACE_TEXT(" dtlk_poll");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b941e6d59fd6..dbed4953f86c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -342,7 +342,7 @@ out:
return retval;
}
-static unsigned int hpet_poll(struct file *file, poll_table * wait)
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f6e3e5abc117..4d0f571c15f9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
-config HW_RANDOM_BCM63XX
- tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX || BMIPS_GENERIC
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM63xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called bcm63xx-rng
-
- If unusure, say Y.
-
config HW_RANDOM_BCM2835
- tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM2835 SoCs.
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm2835-rng
@@ -306,19 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
-config HW_RANDOM_TPM
- tristate "TPM HW Random Number Generator support"
- depends on TCG_TPM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator in the Trusted Platform Module
-
- To compile this driver as a module, choose M here: the
- module will be called tpm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
@@ -449,6 +424,18 @@ config HW_RANDOM_S390
If unsure, say Y.
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default HW_RANDOM
+ ---help---
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f3728d008fff..b780370bd4eb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
-obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
@@ -27,7 +27,6 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
-obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 574211a49549..7a84cec30c3a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/clk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -29,116 +30,180 @@
#define RNG_INT_OFF 0x1
-static void __init nsp_rng_init(void __iomem *base)
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
{
- u32 val;
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
- /* mask the interrupt */
- val = readl(base + RNG_INT_MASK);
- val |= RNG_INT_OFF;
- writel(val, base + RNG_INT_MASK);
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
}
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
- void __iomem *rng_base = (void __iomem *)rng->priv;
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
u32 max_words = max / sizeof(u32);
u32 num_words, count;
- while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
- num_words = readl(rng_base + RNG_STATUS) >> 24;
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
if (num_words > max_words)
num_words = max_words;
for (count = 0; count < num_words; count++)
- ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
return num_words * sizeof(u32);
}
-static struct hwrng bcm2835_rng_ops = {
- .name = "bcm2835",
- .read = bcm2835_rng_read,
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
};
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng"},
- { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
- { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
{},
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
+ const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- void (*rng_setup)(void __iomem *base);
const struct of_device_id *rng_id;
- void __iomem *rng_base;
+ struct bcm2835_rng_priv *priv;
+ struct resource *r;
int err;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* map peripheral */
- rng_base = of_iomap(np, 0);
- if (!rng_base) {
- dev_err(dev, "failed to remap rng regs");
- return -ENODEV;
- }
- bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ priv->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get(dev, NULL);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id) {
- iounmap(rng_base);
+ if (!rng_id)
return -EINVAL;
- }
- /* Check for rng init function, execute it */
- rng_setup = rng_id->data;
- if (rng_setup)
- rng_setup(rng_base);
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
/* register driver */
- err = hwrng_register(&bcm2835_rng_ops);
- if (err) {
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
dev_err(dev, "hwrng registration failed\n");
- iounmap(rng_base);
- } else
+ else
dev_info(dev, "hwrng registered\n");
return err;
}
-static int bcm2835_rng_remove(struct platform_device *pdev)
-{
- void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
-
- /* disable rng hardware */
- __raw_writel(0, rng_base + RNG_CTRL);
-
- /* unregister driver */
- hwrng_unregister(&bcm2835_rng_ops);
- iounmap(rng_base);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+static struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
- .remove = bcm2835_rng_remove,
+ .id_table = bcm2835_rng_devtype,
};
module_platform_driver(bcm2835_rng_driver);
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index 5132c9cde50d..000000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Broadcom BCM63xx Random Number Generator support
- *
- * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2009, Broadcom Corporation
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/of.h>
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-struct bcm63xx_rng_priv {
- struct hwrng rng;
- struct clk *clk;
- void __iomem *regs;
-};
-
-#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
-
-static int bcm63xx_rng_init(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
- int error;
-
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val |= RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- return 0;
-}
-
-static void bcm63xx_rng_cleanup(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val &= ~RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- clk_disable_unprepare(priv->clk);
-}
-
-static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
-}
-
-static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- *data = __raw_readl(priv->regs + RNG_DATA);
-
- return 4;
-}
-
-static int bcm63xx_rng_probe(struct platform_device *pdev)
-{
- struct resource *r;
- int ret;
- struct bcm63xx_rng_priv *priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->rng.name = pdev->name;
- priv->rng.init = bcm63xx_rng_init;
- priv->rng.cleanup = bcm63xx_rng_cleanup;
- priv->rng.data_present = bcm63xx_rng_data_present;
- priv->rng.data_read = bcm63xx_rng_data_read;
-
- priv->clk = devm_clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
- return ret;
- }
-
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), pdev->name)) {
- dev_err(&pdev->dev, "request mem failed");
- return -EBUSY;
- }
-
- priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!priv->regs) {
- dev_err(&pdev->dev, "ioremap failed");
- return -ENOMEM;
- }
-
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rng device: %d\n",
- ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "registered RNG driver\n");
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id bcm63xx_rng_of_match[] = {
- { .compatible = "brcm,bcm6368-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
-#endif
-
-static struct platform_driver bcm63xx_rng_driver = {
- .probe = bcm63xx_rng_probe,
- .driver = {
- .name = "bcm63xx-rng",
- .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
- },
-};
-
-module_platform_driver(bcm63xx_rng_driver);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 657b8770b6b9..91bb98c42a1c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -306,6 +306,10 @@ static int enable_best_rng(void)
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
+ } else {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 000000000000..1947aed7c044
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Åukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = &exynos_trng_pm_ops,
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Åukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 88db42d30760..eca87249bcff 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int imx_rngc_suspend(struct device *dev)
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev)
return 0;
}
-static int imx_rngc_resume(struct device *dev)
+static int __maybe_unused imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops imx_rngc_pm_ops = {
- .suspend = imx_rngc_suspend,
- .resume = imx_rngc_resume,
-};
-#endif
+SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
-#ifdef CONFIG_PM
.pm = &imx_rngc_pm_ops,
-#endif
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8da7bcf54105..7f99cd52b40e 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
deleted file mode 100644
index d6d448266f07..000000000000
--- a/drivers/char/hw_random/tpm-rng.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 Kent Yoder IBM Corporation
- *
- * HWRNG interfaces to pull RNG data from a TPM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/hw_random.h>
-#include <linux/tpm.h>
-
-#define MODULE_NAME "tpm-rng"
-
-static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- return tpm_get_random(TPM_ANY_NUM, data, max);
-}
-
-static struct hwrng tpm_rng = {
- .name = MODULE_NAME,
- .read = tpm_rng_read,
-};
-
-static int __init rng_init(void)
-{
- return hwrng_register(&tpm_rng);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- hwrng_unregister(&tpm_rng);
-}
-module_exit(rng_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("RNG driver for TPM devices");
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6edfaa72b98b..7992c870b0a2 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -338,10 +338,10 @@ static int bt_bmc_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
{
struct bt_bmc *bt_bmc = file_bt_bmc(file);
- unsigned int mask = 0;
+ __poll_t mask = 0;
u8 ctrl;
poll_wait(file, &bt_bmc->queue, wait);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2ffca4232686..a011a7739f5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -78,10 +78,10 @@ static void file_receive_handler(struct ipmi_recv_msg *msg,
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
struct ipmi_file_private *priv = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, &priv->wait, wait);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ab78b3be7e33..c5112b17d7ea 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -106,7 +106,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
- pdev->driver_override = override;
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ override);
+ if (!pdev->driver_override)
+ goto err;
if (type == IPMI_DMI_TYPE_SSIF) {
set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f45732a2cb3e..e0b0d7e2d976 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -84,7 +84,7 @@ static int panic_op_write_handler(const char *val,
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
@@ -2588,7 +2588,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", id.device_id);
}
-static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR_RO(device_id);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@@ -2604,8 +2604,7 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
}
-static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
- NULL);
+static DEVICE_ATTR_RO(provides_device_sdrs);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2620,7 +2619,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
}
-static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR_RO(revision);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -2637,7 +2636,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
id.firmware_revision_2);
}
-static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR_RO(firmware_revision);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@@ -2655,7 +2654,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&id),
ipmi_version_minor(&id));
}
-static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR_RO(ipmi_version);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@@ -2688,7 +2687,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
}
-static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR_RO(manufacturer_id);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2704,7 +2703,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
}
-static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR_RO(product_id);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@@ -2742,7 +2741,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 38, "%pUl\n", guid.b);
}
-static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR_RO(guid);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 07fddbefefe4..bcf493d8e238 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -250,8 +250,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
ipmi->irq = opal_event_request(prop);
}
- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
- "opal-ipmi", ipmi)) {
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
dev_warn(dev, "Unable to request irq\n");
goto err_dispose;
}
@@ -264,7 +265,6 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
goto err_unregister;
}
- /* todo: query actual ipmi_device_id */
rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71fad747c0c7..6768cb2dd740 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1938,8 +1938,10 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL)
+ if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
smi_info->timer_can_start = false;
if (smi_info->timer_running)
@@ -2045,6 +2047,7 @@ static int try_smi_init(struct smi_info *new_smi)
int rv = 0;
int i;
char *init_name = NULL;
+ bool platform_device_registered = false;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2173,6 +2176,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv);
goto out_err;
}
+ platform_device_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
@@ -2279,10 +2283,11 @@ out_err:
}
if (new_smi->pdev) {
- platform_device_unregister(new_smi->pdev);
+ if (platform_device_registered)
+ platform_device_unregister(new_smi->pdev);
+ else
+ platform_device_put(new_smi->pdev);
new_smi->pdev = NULL;
- } else if (new_smi->pdev) {
- platform_device_put(new_smi->pdev);
}
kfree(init_name);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 9573f1116450..f4214870d726 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
#endif
#ifdef CONFIG_OF
module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the"
" default scan of the interfaces identified via OpenFirmware");
#endif
#ifdef CONFIG_DMI
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3cfaec728604..f929e72bdac8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2071,8 +2071,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- if (addr_info->client)
- i2c_unregister_device(addr_info->client);
+ i2c_unregister_device(addr_info->client);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 76b270678b50..34bc1f3ca414 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -232,7 +232,7 @@ static int set_param_str(const char *val, const struct kernel_param *kp)
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
@@ -298,7 +298,7 @@ module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
module_param(panic_wdt_timeout, timeout, 0644);
-MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
@@ -887,9 +887,9 @@ static int ipmi_open(struct inode *ino, struct file *filep)
}
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &read_q, wait);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 9a1aaf538758..819fe37a3683 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -415,10 +415,10 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
return count;
}
-static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
+static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
{
struct reader_dev *dev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dev->poll_wait, wait);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d256110ba672..7a56d1a13ec3 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -769,10 +769,10 @@ static int pp_release(struct inode *inode, struct file *file)
}
/* No kernel lock held - fine */
-static unsigned int pp_poll(struct file *file, poll_table *wait)
+static __poll_t pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &pp->irq_wait, wait);
if (atomic_read(&pp->irqc))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ec42c8bb9b0d..80f2c326db47 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,9 +431,9 @@ static int crng_init = 0;
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+ __u32 out[CHACHA20_BLOCK_WORDS]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 block[CHACHA20_BLOCK_WORDS];
__u32 key[8];
} buf;
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u32 out[CHACHA20_BLOCK_WORDS])
{
unsigned long v, flags;
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
{
struct crng_state *crng = NULL;
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = &tmp[used / sizeof(__u32)];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
struct crng_state *crng = NULL;
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1784,10 +1784,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static unsigned int
+static __poll_t
random_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
+ extract_crng((__u32 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
+ extract_crng(batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5542a438bbd0..c6a317120a55 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -147,7 +147,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait);
+static __poll_t rtc_poll(struct file *file, poll_table *wait);
#endif
static void get_rtc_alm_time(struct rtc_time *alm_tm);
@@ -790,7 +790,7 @@ no_irq:
}
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 6aa32679fd58..7f49fa0f41d7 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -321,10 +321,10 @@ scdrv_write(struct file *file, const char __user *buf,
return status;
}
-static unsigned int
+static __poll_t
scdrv_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status = 0;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d3a979e25724..fc041c462aa4 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -940,7 +940,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
if (kfifo_len(&sonypi_device.fifo))
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a30352202f1f..18c81cbe4704 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -26,6 +26,17 @@ menuconfig TCG_TPM
if TCG_TPM
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ ---help---
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
config TCG_TIS_CORE
tristate
---help---
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 34b4bcf46f43..acd758381c58 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,8 +6,9 @@ obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
-tpm-$(CONFIG_OF) += tpm_of.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
+tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
+tpm-$(CONFIG_OF) += tpm_eventlog_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0eca20c5a80c..0a62c19937b6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,8 +26,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
DEFINE_IDR(dev_nums_idr);
static DEFINE_MUTEX(idr_lock);
@@ -80,21 +81,26 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - return tpm_chip for a given chip number
- * @chip_num: id to find
+ * tpm_chip_find_get() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * The return'd chip has been tpm_try_get_ops'd and must be released via
- * tpm_put_ops
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_chip_put_ops() after use.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
{
- struct tpm_chip *chip, *res = NULL;
+ struct tpm_chip *res = NULL;
+ int chip_num = 0;
int chip_prev;
mutex_lock(&idr_lock);
- if (chip_num == TPM_ANY_NUM) {
- chip_num = 0;
+ if (!chip) {
do {
chip_prev = chip_num;
chip = idr_get_next(&dev_nums_idr, &chip_num);
@@ -104,8 +110,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find(&dev_nums_idr, chip_num);
- if (chip && !tpm_try_get_ops(chip))
+ if (!tpm_try_get_ops(chip))
res = chip;
}
@@ -387,6 +392,26 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ return tpm_get_random(chip, data, max);
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -419,11 +444,13 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
rc = tpm_add_char_device(chip);
- if (rc) {
- tpm_bios_log_teardown(chip);
- return rc;
- }
+ if (rc)
+ goto out_hwrng;
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
@@ -432,6 +459,14 @@ int tpm_chip_register(struct tpm_chip *chip)
}
return 0;
+
+out_hwrng:
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -451,6 +486,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
cdev_device_del(&chip->cdevs, &chip->devs);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1d6729be4cd6..76df4fbcf089 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -30,9 +30,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/pm_runtime.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
#define TPM_MAX_ORDINAL 243
#define TSC_MAX_ORDINAL 12
@@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-static bool tpm_validate_command(struct tpm_chip *chip,
+static int tpm_validate_command(struct tpm_chip *chip,
struct tpm_space *space,
const u8 *cmd,
size_t len)
@@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
unsigned int nr_handles;
if (len < TPM_HEADER_SIZE)
- return false;
+ return -EINVAL;
if (!space)
- return true;
+ return 0;
if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
cc = be32_to_cpu(header->ordinal);
@@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
if (i < 0) {
dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
cc);
- return false;
+ return -EOPNOTSUPP;
}
attrs = chip->cc_attrs_tbl[i];
@@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
goto err_len;
}
- return true;
+ return 0;
err_len:
dev_dbg(&chip->dev,
"%s: insufficient command length %zu", __func__, len);
- return false;
+ return -EINVAL;
}
/**
@@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
unsigned long stop;
bool need_locality;
- if (!tpm_validate_command(chip, space, buf, bufsiz))
- return -EINVAL;
+ rc = tpm_validate_command(chip, space, buf, bufsiz);
+ if (rc == -EINVAL)
+ return rc;
+ /*
+ * If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (rc == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ return bufsiz;
+ }
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
@@ -413,6 +425,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (chip->dev.parent)
pm_runtime_get_sync(chip->dev.parent);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
@@ -489,6 +504,9 @@ out:
chip->locality = -1;
}
out_no_locality:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
if (chip->dev.parent)
pm_runtime_put_sync(chip->dev.parent);
@@ -809,19 +827,20 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
}
/**
- * tpm_is_tpm2 - is the chip a TPM2 chip?
- * @chip_num: tpm idx # or ANY
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * Returns < 0 on error, and 1 or 0 on success depending whether the chip
- * is a TPM2 chip.
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
*/
-int tpm_is_tpm2(u32 chip_num)
+int tpm_is_tpm2(struct tpm_chip *chip)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
@@ -833,23 +852,19 @@ int tpm_is_tpm2(u32 chip_num)
EXPORT_SYMBOL_GPL(tpm_is_tpm2);
/**
- * tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
- * @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @res_buf: the value of the PCR
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
@@ -889,25 +904,26 @@ static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
}
/**
- * tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
- * @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @hash: the hash value used to extend the PCR value
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Note: with TPM 2.0 extends also those banks with a known digest size to the
+ * cryto subsystem in order to prevent malicious use of those PCR banks. In the
+ * future we should dynamically determine digest sizes.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
{
int rc;
- struct tpm_chip *chip;
struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1019,82 +1035,29 @@ out:
return rc;
}
-int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
- "attempting tpm_cmd");
+ "attempting to a send a command");
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
- bool check_cancel, bool *canceled)
-{
- u8 status = chip->ops->status(chip);
-
- *canceled = false;
- if ((status & mask) == mask)
- return true;
- if (check_cancel && chip->ops->req_canceled(chip, status)) {
- *canceled = true;
- return true;
- }
- return false;
-}
-
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel)
-{
- unsigned long stop;
- long rc;
- u8 status;
- bool canceled = false;
-
- /* check current status */
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- wait_for_tpm_stat_cond(chip, mask, check_cancel,
- &canceled),
- timeout);
- if (rc > 0) {
- if (canceled)
- return -ECANCELED;
- return 0;
- }
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- tpm_msleep(TPM_TIMEOUT);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-
#define TPM_ORD_SAVESTATE 152
#define SAVESTATE_RESULT_SIZE 10
@@ -1187,16 +1150,15 @@ static const struct tpm_input_header tpm_getrandom_header = {
};
/**
- * tpm_get_random() - Get random bytes from the tpm's RNG
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
*
- * Returns < 0 on error and the number of bytes read on success
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
{
- struct tpm_chip *chip;
struct tpm_cmd_t tpm_cmd;
u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
int err, total = 0, retries = 5;
@@ -1205,8 +1167,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1248,22 +1210,23 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
EXPORT_SYMBOL_GPL(tpm_get_random);
/**
- * tpm_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * tpm_seal_trusted() - seal a trusted key payload
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_seal_trusted(chip, payload, options);
@@ -1275,21 +1238,23 @@ EXPORT_SYMBOL_GPL(tpm_seal_trusted);
/**
* tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_unseal_trusted(chip, payload, options);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 528cffbd49d3..f895fba4e20d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
@@ -34,6 +35,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include <linux/tpm_eventlog.h>
#include <crypto/hash_info.h>
#ifdef CONFIG_X86
@@ -93,12 +95,17 @@ enum tpm2_structures {
TPM2_ST_SESSIONS = 0x8002,
};
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
};
@@ -210,6 +217,9 @@ struct tpm_chip {
int dev_num; /* /dev/tpm# */
unsigned long is_open; /* only one allowed */
+ char hwrng_name[64];
+ struct hwrng hwrng;
+
struct mutex tpm_mutex; /* tpm is processing */
unsigned long timeout_a; /* jiffies */
@@ -385,10 +395,6 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
-struct tpm2_digest {
- u16 alg_id;
- u8 digest[SHA512_DIGEST_SIZE];
-} __packed;
/* A string buffer type for constructing TPM commands. This is based on the
* ideas of string buffer code in security/keys/trusted.h but is heap based
@@ -512,16 +518,14 @@ int tpm_do_selftest(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel);
static inline void tpm_msleep(unsigned int delay_msec)
{
- usleep_range(delay_msec * 1000,
- (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(int chip_num);
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -575,4 +579,34 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
u8 *cmd);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
+
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/tpm1_eventlog.c
index 9a8605e500b5..add798bd69d0 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/tpm1_eventlog.c
@@ -21,13 +21,14 @@
*/
#include <linux/seq_file.h>
+#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
static const char* tcpa_event_type_strings[] = {
@@ -371,6 +372,10 @@ static int tpm_read_log(struct tpm_chip *chip)
if (rc != -ENODEV)
return rc;
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
return tpm_read_log_of(chip);
}
@@ -388,11 +393,13 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
+ int log_version;
int rc = 0;
rc = tpm_read_log(chip);
- if (rc)
+ if (rc < 0)
return rc;
+ log_version = rc;
cnt = 0;
chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
@@ -404,7 +411,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
chip->bin_log_seqops.chip = chip;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
chip->bin_log_seqops.seqops =
&tpm2_binary_b_measurements_seqops;
else
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f40d20671a78..c17e75348a99 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -849,28 +849,26 @@ static const struct tpm_input_header tpm2_selftest_header = {
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int delay_msec = 20;
+ unsigned int delay_msec = 10;
long duration;
struct tpm2_cmd cmd;
duration = jiffies_to_msecs(
tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- while (duration > 0) {
+ while (1) {
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = 0;
rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
0, 0, "continue selftest");
- if (rc != TPM2_RC_TESTING)
+ if (rc != TPM2_RC_TESTING || delay_msec >= duration)
break;
- tpm_msleep(delay_msec);
- duration -= delay_msec;
-
- /* wait longer the next round */
+ /* wait longer than before */
delay_msec *= 2;
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c
index 34a8afa69138..1ce4411292ba 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/tpm2_eventlog.c
@@ -21,9 +21,9 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_eventlog_acpi.c
index 169edf3ce86d..66f19e93c216 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_eventlog_acpi.c
@@ -25,9 +25,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
@@ -102,7 +102,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
- return 0;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
err:
kfree(log->bios_event_log);
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/tpm_eventlog_efi.c
new file mode 100644
index 000000000000..e3f9ffd341d2
--- /dev/null
+++ b/drivers/char/tpm/tpm_eventlog_efi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ goto err_memunmap;
+ memcpy(log->bios_event_log, log_tbl->log, log_size);
+ log->bios_event_log_end = log->bios_event_log + log_size;
+
+ tpm_log_version = log_tbl->version;
+ memunmap(log_tbl);
+ return tpm_log_version;
+
+err_memunmap:
+ memunmap(log_tbl);
+ return -ENOMEM;
+}
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_eventlog_of.c
index aadb7f464076..96fd5646f866 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_eventlog_of.c
@@ -17,9 +17,9 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -76,5 +76,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
memcpy(log->bios_event_log, __va(base), size);
- return 0;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 79d6bbb58e39..c1dd39eaaeeb 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -665,9 +665,9 @@ out_err:
}
static const struct i2c_device_id tpm_tis_i2c_table[] = {
- {"tpm_i2c_infineon", 0},
- {"slb9635tt", 0},
- {"slb9645tt", 1},
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
{},
};
@@ -675,24 +675,9 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
#ifdef CONFIG_OF
static const struct of_device_id tpm_tis_i2c_of_match[] = {
- {
- .name = "tpm_i2c_infineon",
- .type = "tpm",
- .compatible = "infineon,tpm_i2c_infineon",
- .data = (void *)0
- },
- {
- .name = "slb9635tt",
- .type = "tpm",
- .compatible = "infineon,slb9635tt",
- .data = (void *)0
- },
- {
- .name = "slb9645tt",
- .type = "tpm",
- .compatible = "infineon,slb9645tt",
- .data = (void *)1
- },
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
{},
};
MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e2d1055fb814..f08949a5f678 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
}
#endif
-#ifdef CONFIG_X86
-#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
-#define ILB_REMAP_SIZE 0x100
-#define LPC_CNTRL_REG_OFFSET 0x84
-#define LPC_CLKRUN_EN (1 << 2)
-
-static void __iomem *ilb_base_addr;
-
-static inline bool is_bsw(void)
-{
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
-}
-
-/**
- * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
- */
-static void tpm_platform_begin_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Disable LPC CLKRUN# */
- clkrun_val &= ~LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-
-/**
- * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
- */
-static void tpm_platform_end_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Enable LPC CLKRUN# */
- clkrun_val |= LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-#else
-static inline bool is_bsw(void)
-{
- return false;
-}
-
-static void tpm_platform_begin_xfer(void)
-{
-}
-
-static void tpm_platform_end_xfer(void)
-{
-}
-#endif
-
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
*result++ = ioread8(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
iowrite8(*value++, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread16(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread32(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
iowrite32(value, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -461,11 +366,6 @@ static int __init init_tis(void)
if (rc)
goto err_force;
-#ifdef CONFIG_X86
- if (is_bsw())
- ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
- ILB_REMAP_SIZE);
-#endif
rc = platform_driver_register(&tis_drv);
if (rc)
goto err_platform;
@@ -484,10 +384,6 @@ err_pnp:
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
err_force:
return rc;
}
@@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
pnp_unregister_driver(&tis_pnp_driver);
platform_driver_unregister(&tis_drv);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
if (force_pdev)
platform_device_unregister(force_pdev);
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdde971bc810..183a5f54d875 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,6 +31,74 @@
#include "tpm.h"
#include "tpm_tis_core.h"
+/* This is a polling delay to check for status and burstcount.
+ * As per ddwg input, expectation is that status check and burstcount
+ * check should return within few usecs.
+ */
+#define TPM_POLL_SLEEP 1 /* msec */
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_POLL_SLEEP);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
/* Before we attempt to access the TPM we must see that the valid bit is set.
* The specification says that this bit is 0 at reset and remains 0 until the
* 'TPM has gone through its self test and initialization and has established
@@ -164,7 +232,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_POLL_SLEEP);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -421,19 +489,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
int i, rc;
u32 did_vid;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
if (rc < 0)
- return rc;
+ goto out;
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
- return true;
+ rc = true;
}
- return false;
+ rc = false;
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return rc;
}
/*
@@ -653,14 +730,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
u32 interrupt;
int rc;
+ tpm_tis_clkrun_enable(chip, true);
+
rc = tpm_tis_read32(priv, reg, &interrupt);
if (rc < 0)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
}
EXPORT_SYMBOL_GPL(tpm_tis_remove);
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
static const struct tpm_class_ops tpm_tis = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = tpm_tis_status,
@@ -673,13 +809,17 @@ static const struct tpm_class_ops tpm_tis = {
.req_canceled = tpm_tis_req_canceled,
.request_locality = request_locality,
.relinquish_locality = release_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
};
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
acpi_handle acpi_dev_handle)
{
- u32 vendor, intfcaps, intmask;
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
u8 rid;
int rc, probe;
struct tpm_chip *chip;
@@ -700,6 +840,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
dev_set_drvdata(&chip->dev, priv);
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -790,9 +947,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
}
}
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
out_err:
+ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ chip->ops->clk_enable(chip, false);
+
tpm_tis_remove(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_tis_core_init);
@@ -804,22 +972,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* reenable interrupts that device may have lost or
* BIOS/firmware may have disabled
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
if (rc < 0)
- return;
+ goto out;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- return;
+ goto out;
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
}
int tpm_tis_resume(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6bbac319ff3b..d5c6a2e952b3 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -79,6 +79,11 @@ enum tis_defaults {
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
};
@@ -89,6 +94,8 @@ struct tpm_tis_data {
int irq;
bool irq_tested;
unsigned int flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
@@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return data->phy_ops->write32(data, addr, value);
}
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
void tpm_tis_remove(struct tpm_chip *chip);
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 1d877cc9af97..674218b50b13 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -173,10 +173,10 @@ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
*
* Return: Poll flags
*/
-static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
{
struct proxy_dev *proxy_dev = filp->private_data;
- unsigned ret;
+ __poll_t ret;
poll_wait(filp, &proxy_dev->wq, wait);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 656e8af95d52..911475d36800 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/freezer.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/io/tpmif.h>
@@ -39,6 +40,66 @@ enum status_bits {
VTPM_STATUS_CANCELED = 0x8,
};
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
static u8 vtpm_status(struct tpm_chip *chip)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d1aed2513bd9..813a2e46824d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -982,10 +982,10 @@ error_out:
return ret;
}
-static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
- unsigned int ret;
+ __poll_t ret;
port = filp->private_data;
poll_wait(filp, &port->waitqueue, wait);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b6c9cdead7f3..88e1cf475d3f 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1736,10 +1736,10 @@ end:
return pos;
}
-static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
{
struct xilly_channel *channel = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(filp, &channel->endpoint->ep_wait, wait);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c729a88007d0..b3b4ed9b6874 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+ select TIMER_OF
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
@@ -441,6 +442,13 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config SPRD_TIMER
+ bool "Spreadtrum timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ help
+ Enables support for the Spreadtrum timer driver.
+
config SYS_SUPPORTS_SH_MTU2
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 72711f1491e3..d6dec4489d66 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index c68630565079..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0;
}
-CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
-CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 9de47d4d2d9e..43f4d5c4d6fa 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
- ((divided_rate + 500000) % 1000000) / 1000);
+ ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index a31990408153..06ed88a2a8a0 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -24,7 +24,13 @@
#include "timer-of.h"
-static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_exit - Release the interrupt
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Free the irq resource
+ */
+static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -34,8 +40,24 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt);
}
-static __init int timer_irq_init(struct device_node *np,
- struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_init - Request the interrupt
+ * @np: a device tree node pointer
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Get the interrupt number from the DT from its definition and
+ * request it. The interrupt is gotten by falling back the following way:
+ *
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+ * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+ * otherwise 'request_irq()' is used.
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_irq_init(struct device_node *np,
+ struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -72,15 +94,30 @@ static __init int timer_irq_init(struct device_node *np,
return 0;
}
-static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_exit - Release the clock resources
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Disables and releases the refcount on the clk
+ */
+static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
-static __init int timer_clk_init(struct device_node *np,
- struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_init - Initialize the clock resources
+ * @np: a device tree node pointer
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Get the clock by name or by index, enable it and get the rate
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_clk_init(struct device_node *np,
+ struct of_timer_clk *of_clk)
{
int ret;
@@ -116,19 +153,19 @@ out_clk_put:
goto out;
}
-static __init void timer_base_exit(struct of_timer_base *of_base)
+static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
-static __init int timer_base_init(struct device_node *np,
- struct of_timer_base *of_base)
+static __init int timer_of_base_init(struct device_node *np,
+ struct of_timer_base *of_base)
{
- const char *name = of_base->name ? of_base->name : np->full_name;
-
- of_base->base = of_io_request_and_map(np, of_base->index, name);
+ of_base->base = of_base->name ?
+ of_io_request_and_map(np, of_base->index, of_base->name) :
+ of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", name);
+ pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base);
}
@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
- ret = timer_base_init(np, &to->of_base);
+ ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
- ret = timer_clk_init(np, &to->of_clk);
+ ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
- ret = timer_irq_init(np, &to->of_irq);
+ ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name)
to->clkevt.name = np->name;
+
+ to->np = np;
+
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
return ret;
}
@@ -187,11 +227,11 @@ out_fail:
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 3f708f1be43d..a5478f3e8589 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of {
unsigned int flags;
+ struct device_node *np;
struct clock_event_device clkevt;
struct of_timer_base of_base;
struct of_timer_irq of_irq;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
new file mode 100644
index 000000000000..ef9ebeafb3ed
--- /dev/null
+++ b/drivers/clocksource/timer-sprd.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define TIMER_NAME "sprd_timer"
+
+#define TIMER_LOAD_LO 0x0
+#define TIMER_LOAD_HI 0x4
+#define TIMER_VALUE_LO 0x8
+#define TIMER_VALUE_HI 0xc
+
+#define TIMER_CTL 0x10
+#define TIMER_CTL_PERIOD_MODE BIT(0)
+#define TIMER_CTL_ENABLE BIT(1)
+#define TIMER_CTL_64BIT_WIDTH BIT(16)
+
+#define TIMER_INT 0x14
+#define TIMER_INT_EN BIT(0)
+#define TIMER_INT_RAW_STS BIT(1)
+#define TIMER_INT_MASK_STS BIT(2)
+#define TIMER_INT_CLR BIT(3)
+
+#define TIMER_VALUE_SHDW_LO 0x18
+#define TIMER_VALUE_SHDW_HI 0x1c
+
+#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+
+static void sprd_timer_enable(void __iomem *base, u32 flag)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val |= TIMER_CTL_ENABLE;
+ if (flag & TIMER_CTL_64BIT_WIDTH)
+ val |= TIMER_CTL_64BIT_WIDTH;
+ else
+ val &= ~TIMER_CTL_64BIT_WIDTH;
+
+ if (flag & TIMER_CTL_PERIOD_MODE)
+ val |= TIMER_CTL_PERIOD_MODE;
+ else
+ val &= ~TIMER_CTL_PERIOD_MODE;
+
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_disable(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val &= ~TIMER_CTL_ENABLE;
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
+{
+ writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
+ writel_relaxed(0, base + TIMER_LOAD_HI);
+}
+
+static void sprd_timer_enable_interrupt(void __iomem *base)
+{
+ writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
+}
+
+static void sprd_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_INT);
+
+ val |= TIMER_INT_CLR;
+ writel_relaxed(val, base + TIMER_INT);
+}
+
+static int sprd_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), cycles);
+ sprd_timer_enable(timer_of_base(to), 0);
+
+ return 0;
+}
+
+static int sprd_timer_set_periodic(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
+ sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static int sprd_timer_shutdown(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ return 0;
+}
+
+static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_clear_interrupt(timer_of_base(to));
+
+ if (clockevent_state_oneshot(ce))
+ sprd_timer_disable(timer_of_base(to));
+
+ ce->event_handler(ce);
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sprd_timer_shutdown,
+ .set_state_periodic = sprd_timer_set_periodic,
+ .set_next_event = sprd_timer_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .handler = sprd_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init sprd_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ sprd_timer_enable_interrupt(timer_of_base(&to));
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ 1, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 8f2423789ba9..e5cdc3af684c 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -16,175 +17,318 @@
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
+#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
+#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
+#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
+#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
-struct stm32_clock_event_ddata {
- struct clock_event_device evtdev;
- unsigned periodic_top;
- void __iomem *base;
+#define TIM_PSC_MAX USHRT_MAX
+#define TIM_PSC_CLKRATE 10000
+
+struct stm32_timer_private {
+ int bits;
};
-static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
+/**
+ * stm32_timer_of_bits_set - set accessor helper
+ * @to: a timer_of structure pointer
+ * @bits: the number of bits (16 or 32)
+ *
+ * Accessor helper to set the number of bits in the timer-of private
+ * structure.
+ *
+ */
+static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ struct stm32_timer_private *pd = to->private_data;
- writel_relaxed(0, base + TIM_CR1);
- return 0;
+ pd->bits = bits;
+}
+
+/**
+ * stm32_timer_of_bits_get - get accessor helper
+ * @to: a timer_of structure pointer
+ *
+ * Accessor helper to get the number of bits in the timer-of private
+ * structure.
+ *
+ * Returns an integer corresponding to the number of bits.
+ */
+static int stm32_timer_of_bits_get(struct timer_of *to)
+{
+ struct stm32_timer_private *pd = to->private_data;
+
+ return pd->bits;
+}
+
+static void __iomem *stm32_timer_cnt __read_mostly;
+
+static u64 notrace stm32_read_sched_clock(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
+}
+
+static struct delay_timer stm32_timer_delay;
+
+static unsigned long stm32_read_delay(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
}
-static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+static void stm32_clock_event_disable(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ writel_relaxed(0, timer_of_base(to) + TIM_DIER);
+}
+
+/**
+ * stm32_timer_start - Start the counter without event
+ * @to: a timer_of structure pointer
+ *
+ * Start the timer in order to have the counter reset and start
+ * incrementing but disable interrupt event when there is a counter
+ * overflow. By default, the counter direction is used as upcounter.
+ */
+static void stm32_timer_start(struct timer_of *to)
+{
+ writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
+}
+
+static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_clock_event_disable(to);
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
+ struct clock_event_device *clkevt)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ struct timer_of *to = to_timer_of(clkevt);
+ unsigned long now, next;
+
+ next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
+ writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
+ now = readl_relaxed(timer_of_base(to) + TIM_CNT);
+
+ if ((next - now) > evt)
+ return -ETIME;
- writel_relaxed(evt, data->base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
- data->base + TIM_CR1);
+ writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
+
+ return 0;
+}
+
+static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
+
+ return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
+}
+
+static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
- struct stm32_clock_event_ddata *data = dev_id;
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- writel_relaxed(0, data->base + TIM_SR);
+ if (clockevent_state_periodic(clkevt))
+ stm32_clock_event_set_periodic(clkevt);
+ else
+ stm32_clock_event_shutdown(clkevt);
- data->evtdev.event_handler(&data->evtdev);
+ clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
-static struct stm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "stm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = stm32_clock_event_shutdown,
- .set_state_periodic = stm32_clock_event_set_periodic,
- .set_state_oneshot = stm32_clock_event_shutdown,
- .tick_resume = stm32_clock_event_shutdown,
- .set_next_event = stm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
+/**
+ * stm32_timer_width - Sort out the timer width (32/16)
+ * @to: a pointer to a timer-of structure
+ *
+ * Write the 32-bit max value and read/return the result. If the timer
+ * is 32 bits wide, the result will be UINT_MAX, otherwise it will
+ * be truncated by the 16-bit register to USHRT_MAX.
+ *
+ */
+static void __init stm32_timer_set_width(struct timer_of *to)
+{
+ u32 width;
+
+ writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
+
+ width = readl_relaxed(timer_of_base(to) + TIM_ARR);
+
+ stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
+}
-static int __init stm32_clockevent_init(struct device_node *np)
+/**
+ * stm32_timer_set_prescaler - Compute and set the prescaler register
+ * @to: a pointer to a timer-of structure
+ *
+ * Depending on the timer width, compute the prescaler to always
+ * target a 10MHz timer rate for 16 bits. 32-bit timers are
+ * considered precise and long enough to not use the prescaler.
+ */
+static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data = &clock_event_ddata;
- struct clk *clk;
- struct reset_control *rstc;
- unsigned long rate, max_delta;
- int irq, ret, bits, prescaler = 1;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
+ int prescaler = 1;
+
+ if (stm32_timer_of_bits_get(to) != 32) {
+ prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
+ TIM_PSC_CLKRATE);
+ /*
+ * The prescaler register is an u16, the variable
+ * can't be greater than TIM_PSC_MAX, let's cap it in
+ * this case.
+ */
+ prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
+ writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
+ writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- rate = clk_get_rate(clk);
+ /* Adjust rate and period given the prescaler value */
+ to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+}
- rstc = of_reset_control_get(np, NULL);
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
+static int __init stm32_clocksource_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
+ const char *name = to->np->full_name;
+
+ /*
+ * This driver allows to register several timers and relies on
+ * the generic time framework to select the right one.
+ * However, nothing allows to do the same for the
+ * sched_clock. We are not interested in a sched_clock for the
+ * 16-bit timers but only for the 32-bit one, so if no 32-bit
+ * timer is registered yet, we select this 32-bit timer as a
+ * sched_clock.
+ */
+ if (bits == 32 && !stm32_timer_cnt) {
+
+ /*
+ * Start immediately the counter as we will be using
+ * it right after.
+ */
+ stm32_timer_start(to);
+
+ stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
+ sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
+ pr_info("%s: STM32 sched_clock registered\n", name);
+
+ stm32_timer_delay.read_current_timer = stm32_read_delay;
+ stm32_timer_delay.freq = timer_of_rate(to);
+ register_current_timer_delay(&stm32_timer_delay);
+ pr_info("%s: STM32 delay timer registered\n", name);
}
- data->base = of_iomap(np, 0);
- if (!data->base) {
- ret = -ENXIO;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
+ return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
+ timer_of_rate(to), bits == 32 ? 250 : 100,
+ bits, clocksource_mmio_readl_up);
+}
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- pr_err("%pOF: failed to get irq.\n", np);
- goto err_get_irq;
- }
+static void __init stm32_clockevent_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
- /* Detect whether the timer is 16 or 32 bits */
- writel_relaxed(~0U, data->base + TIM_ARR);
- max_delta = readl_relaxed(data->base + TIM_ARR);
- if (max_delta == ~0U) {
- prescaler = 1;
- bits = 32;
- } else {
- prescaler = 1024;
- bits = 16;
- }
- writel_relaxed(0, data->base + TIM_ARR);
+ to->clkevt.name = to->np->full_name;
+ to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
+ to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
+ to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
+ to->clkevt.tick_resume = stm32_clock_event_shutdown;
+ to->clkevt.set_next_event = stm32_clock_event_set_next_event;
+ to->clkevt.rating = bits == 32 ? 250 : 100;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
+ (1 << bits) - 1);
+
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ to->np, bits);
+}
+
+static int __init stm32_timer_init(struct device_node *node)
+{
+ struct reset_control *rstc;
+ struct timer_of *to;
+ int ret;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- writel_relaxed(prescaler - 1, data->base + TIM_PSC);
- writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
- writel_relaxed(0, data->base + TIM_SR);
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = stm32_clock_event_handler;
- data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+ ret = timer_of_init(node, to);
+ if (ret)
+ goto err;
- clockevents_config_and_register(&data->evtdev,
- DIV_ROUND_CLOSEST(rate, prescaler),
- 0x1, max_delta);
+ to->private_data = kzalloc(sizeof(struct stm32_timer_private),
+ GFP_KERNEL);
+ if (!to->private_data)
+ goto deinit;
- ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
- "stm32 clockevent", data);
- if (ret) {
- pr_err("%pOF: failed to request irq.\n", np);
- goto err_get_irq;
+ rstc = of_reset_control_get(node, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
}
- pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
- np, bits);
+ stm32_timer_set_width(to);
- return ret;
+ stm32_timer_set_prescaler(to);
+
+ ret = stm32_clocksource_init(to);
+ if (ret)
+ goto deinit;
+
+ stm32_clockevent_init(to);
+ return 0;
-err_get_irq:
- iounmap(data->base);
-err_iomap:
- clk_disable_unprepare(clk);
-err_clk_enable:
- clk_put(clk);
-err_clk_get:
+deinit:
+ timer_of_cleanup(to);
+err:
+ kfree(to);
return ret;
}
-TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index bdce4488ded1..3a88e33b0cfe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,29 @@
# ARM CPU Frequency scaling drivers
#
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
If in doubt, say N.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
config ARM_SA1110_CPUFREQ
bool
-config ARM_SCPI_CPUFREQ
- tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
-
- This driver uses SCPI Message Protocol driver to interact with the
- firmware providing the CPU DVFS functionality.
-
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
This add the CPUFreq driver support for Intel PXA2xx SOCs.
If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- default n
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 812f9e0d01a3..e07715ce8844 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 65ec5f01aa8d..c56b57dcfda5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int cur_cluster = cpu_to_cluster(policy->cpu);
- struct device_node *np;
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
- np = of_node_get(cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(cdev[cur_cluster])) {
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev[cur_cluster]));
- cdev[cur_cluster] = NULL;
- }
- }
- of_node_put(np);
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 000000000000..c6ebc88a7d8d
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct clk *clk, u8 *divider)
+{
+ int load_lvl;
+ struct clk *parent;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+
+ /*
+ * Set cpu clock source, for all the level we keep the same
+ * clock source that the one already configured. For this one
+ * we need to use the clock framework
+ */
+ parent = clk_get_parent(clk);
+ clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned int cur_frequency;
+ struct regmap *nb_pm_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /* Get nominal (current) CPU frequency */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ if (!dvfs)
+ return -EINVAL;
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret) {
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+ return ret;
+ }
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ecc56e26f8f6..3b585e4bfac5 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "marvell,armadaxp", },
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
{ .compatible = "nvidia,tegra124", },
{ .compatible = "st,stih407", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 545946ad0752..de3d104c25d7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- struct device_node *np = of_node_get(priv->cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- priv->cdev = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(priv->cdev)) {
- dev_err(priv->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(priv->cdev));
-
- priv->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 41d148af7748..421f318c0e66 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
- struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+ struct cpufreq_policy *policy)
{
- int err = -EINVAL;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_PERFORMANCE;
- err = 0;
- } else if (!strncasecmp(str_governor, "powersave",
- CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_POWERSAVE;
- err = 0;
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
}
} else {
struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
-
- if (t == NULL) {
+ if (!t) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
+
ret = request_module("cpufreq_%s", str_governor);
- mutex_lock(&cpufreq_governor_mutex);
+ if (ret)
+ return -EINVAL;
- if (ret == 0)
- t = find_governor(str_governor);
- }
+ mutex_lock(&cpufreq_governor_mutex);
- if (t != NULL) {
- *governor = t;
- err = 0;
+ t = find_governor(str_governor);
}
+ if (t && !try_module_get(t->owner))
+ t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
+
+ if (t) {
+ policy->governor = t;
+ return 0;
+ }
}
- return err;
+
+ return -EINVAL;
}
/**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy,
- &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy))
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
+
+ if (new_policy.governor)
+ module_put(new_policy.governor->owner);
+
return ret ? ret : count;
}
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (policy->last_policy)
new_policy.policy = policy->last_policy;
else
- cpufreq_parse_governor(gov->name, &new_policy.policy,
- NULL);
+ cpufreq_parse_governor(gov->name, &new_policy);
}
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1e55b5790853..1572129844a5 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ struct cpufreq_stats {
unsigned int *trans_table;
};
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
- return 0;
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d9b2c2de49c4..741f22e5cee3 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
static struct device *cpu_dev;
static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
- old_freq = clk_get_rate(arm_clk) / 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
- clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
- clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
else
- clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
- clk_set_parent(step_clk, secondary_sel_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
} else {
- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, new_freq * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
- clk_prepare_enable(pll1_sys_clk);
+ clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, new_freq * 1000);
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
- clk_disable_unprepare(pll1_sys_clk);
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- policy->clk = arm_clk;
+ policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = policy->max;
@@ -244,6 +264,43 @@ put_node:
of_node_put(np);
}
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz;
+ * 2b'11: Reserved;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -266,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- arm_clk = clk_get(cpu_dev, "arm");
- pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
- pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
- step_clk = clk_get(cpu_dev, "step");
- pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
- dev_err(cpu_dev, "failed to get clocks\n");
- ret = -ENOENT;
- goto put_clk;
- }
-
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull")) {
- pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
- secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
- if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
- dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
- ret = -ENOENT;
- goto put_clk;
- }
- }
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -311,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- imx6q_opp_check_speed_grading(cpu_dev);
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ imx6ul_opp_check_speed_grading(cpu_dev);
+ else
+ imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
@@ -424,22 +471,11 @@ put_reg:
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
-put_clk:
- if (!IS_ERR(arm_clk))
- clk_put(arm_clk);
- if (!IS_ERR(pll1_sys_clk))
- clk_put(pll1_sys_clk);
- if (!IS_ERR(pll1_sw_clk))
- clk_put(pll1_sw_clk);
- if (!IS_ERR(step_clk))
- clk_put(step_clk);
- if (!IS_ERR(pll2_pfd2_396m_clk))
- clk_put(pll2_pfd2_396m_clk);
- if (!IS_ERR(pll2_bus_clk))
- clk_put(pll2_bus_clk);
- if (!IS_ERR(secondary_sel_clk))
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
of_node_put(np);
+
return ret;
}
@@ -453,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
- clk_put(arm_clk);
- clk_put(pll1_sys_clk);
- clk_put(pll1_sw_clk);
- clk_put(step_clk);
- clk_put(pll2_pfd2_396m_clk);
- clk_put(pll2_bus_clk);
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a0e88bef76..7edf7a0e5a96 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-static const struct pstate_funcs bxt_funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
-};
-
#define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12df40dd..5faa37c5b091 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
- policy->cpuinfo.transition_latency = 200000; /* nsec */
+ policy->transition_delay_us = 200000; /* usec */
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index e0d5090b303d..8c04dddd3c28 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- struct device_node *np = of_node_get(info->cpu_dev->of_node);
- u32 capacitance = 0;
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
- info->cdev = of_cpufreq_power_cooling_register(np,
- policy, capacitance, NULL);
-
- if (IS_ERR(info->cdev)) {
- dev_err(info->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(info->cdev));
-
- info->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
{ .compatible = "mediatek,mt7622", },
{ .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index ed915ee85dd9..31513bd42705 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
return PTR_ERR(clk);
}
- /*
- * In case of a failure of dev_pm_opp_add(), we don't
- * bother with cleaning up the registered OPP (there's
- * no function to do so), and simply cancel the
- * registration of the cpufreq device.
- */
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
- return ret;
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ goto opp_register_failed;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
+
+opp_register_failed:
+ /* As registering has failed remove all the opp for all cpus */
+ dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+ return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index b6d7c4c98d0a..29cdec198657 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
@@ -38,14 +39,13 @@
#include <asm/opal.h>
#include <linux/timer.h>
-#define POWERNV_MAX_PSTATES 256
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
-#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
-#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -94,6 +94,27 @@ struct global_pstate_info {
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
bool wof_enabled;
} powernv_pstate_info;
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
- pr_warn_once("index %u is out of bound\n", i);
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
{
- int min = powernv_freqs[powernv_pstate_info.min].driver_data;
- int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
- if (min > 0) {
- if (unlikely((pstate < max) || (pstate > min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
- } else {
- if (unlikely((pstate > max) || (pstate < min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
}
- /*
- * abs() is deliberately used so that is works with
- * both monotonically increasing and decreasing
- * pstate values
- */
- return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
powernv_pstate_info.wof_enabled = true;
next:
- pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
+
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
- powernv_freqs[i].driver_data = id;
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = (struct pstate_idx_revmap_data *)
+ kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
- else if (id == pstate_nominal)
+ if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
- else if (id == pstate_min)
+ if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
}
/* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
- pr_warn("PState id %d outside of PState table, "
- "reporting nominal id %d instead\n",
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
*/
struct powernv_smp_call_data {
unsigned int freq;
- int pstate_id;
- int gpstate_id;
+ u8 pstate_id;
+ u8 gpstate_id;
};
/*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
- s8 local_pstate_id;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
-
- /*
- * The local pstate id corresponds bits 48..55 in the PMSR.
- * Note: Watch out for the sign!
- */
- local_pstate_id = (pmspr_val >> 48) & 0xFF;
- freq_data->pstate_id = local_pstate_id;
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
- pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
- raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
- freq_data->freq);
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
}
/*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax;
+ u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
- pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
- freq_data.gpstate_id = (s8)GET_GPSTATE(val);
- freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4ada55b8856e..0562761a3dec 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
- struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
- if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
- pr_err("cpu%d is not running as cooling device: %ld\n",
- policy->cpu, PTR_ERR(cpud->cdev));
-
- cpud->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 05d299052c5c..247fcbfa4cb5 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,27 +18,89 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+ struct thermal_cooling_device *cdev;
+};
static struct scpi_ops *scpi_ops;
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scpi_data *priv = policy->driver_data;
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+ if (!ret && (clk_get_rate(priv->clk) != rate))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return scpi_ops->get_transition_latency(cpu_dev);
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
}
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
- if (ret)
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_put_clk;
+ }
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+ return 0;
+
+out_put_clk:
+ clk_put(priv->clk);
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_cpumask_remove_table(policy->cpus);
+
return ret;
}
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
- .name = "scpi",
- .get_transition_latency = scpi_get_transition_latency,
- .init_opp_table = scpi_init_opp_table,
- .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ kfree(priv);
+ dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+ struct thermal_cooling_device *cdev;
+
+ cdev = of_cpufreq_cooling_register(policy);
+ if (!IS_ERR(cdev))
+ priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .ready = scpi_cpufreq_ready,
+ .target_index = scpi_cpufreq_set_target,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
+ int ret;
+
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
- return bL_cpufreq_register(&scpi_cpufreq_ops);
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
}
static int scpi_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 923317f03b4b..a099b7bf74cd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_mask;
unsigned long efuse_shift;
unsigned long rev_offset;
+ bool multi_regulator;
};
struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
+ struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_offset = 0x07fc,
.efuse_mask = 0x1fff,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
.efuse_offset = 0x0610,
.efuse_mask = 0x3f,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.efuse_mask = 0xf80000,
.efuse_shift = 19,
.rev_offset = 0x204,
+ .multi_regulator = true,
};
/**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
+ struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
+ const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT));
- if (ret) {
+ ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT);
+ if (IS_ERR(ti_opp_table)) {
dev_err(opp_data->cpu_dev,
"Failed to set supported hardware\n");
+ ret = PTR_ERR(ti_opp_table);
goto fail_put_node;
}
- of_node_put(opp_data->opp_node);
+ opp_data->opp_table = ti_opp_table;
+
+ if (opp_data->soc_data->multi_regulator) {
+ ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+ reg_names,
+ ARRAY_SIZE(reg_names));
+ if (IS_ERR(ti_opp_table)) {
+ dev_pm_opp_put_supported_hw(opp_data->opp_table);
+ ret = PTR_ERR(ti_opp_table);
+ goto fail_put_node;
+ }
+ }
+ of_node_put(opp_data->opp_node);
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
@@ -269,4 +290,22 @@ free_opp_data:
return ret;
}
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+ platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 4e78263e34a4..5d359aff3cc5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock acquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
+ if (!gov)
+ return -EINVAL;
+
if (gov == cpuidle_curr_governor)
return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47ec920d5b71..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index eeaf27859d80..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c44954e274bc..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 8ac3bd37203b..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -82,7 +80,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -109,6 +106,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 0a22ec5d1a96..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 456278440863..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index ce70b44d0fb6..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 616720a04e7a..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 8142de7ba050..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff02b713c6f6..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -21,7 +21,6 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
@@ -18,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4eed7171e2ae..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -73,6 +73,29 @@
#define IV AES_BLOCK_SIZE
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
if (input == NULL)
goto out;
- reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
}
out:
req->base.complete(&req->base, err);
+}
- }
-
-static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
-
-
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
- req->base.complete(&req->base, err);
-}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
skip = 0;
}
}
- if (walk->nents == 0) {
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
-{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
-}
-
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
if (srclen <= dstlen)
break;
less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
+ dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
if (offset == sg_dma_len(dst)) {
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip = 0;
}
src = sg_next(src);
- srcskip = 0;
+ srcskip = 0;
}
return min(srclen, dstlen);
}
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
- unsigned int dst_size;
unsigned int authsize = crypto_aead_authsize(tfm);
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, temp;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
return NULL;
reqctx->b0_dma = 0;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp & 0xF,
null ? 0 : assoclen + IV + 1,
temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
- memcpy(reqctx->iv, req->iv, IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
+ }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
@@ -2202,9 +2220,9 @@ err:
return ERR_PTR(error);
}
-static int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2246,9 +2264,9 @@ err:
return -ENOMEM;
}
-static void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev,
}
}
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
}
}
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid)
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam)
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
-// reqctx->srcsg = ulp_walk.last_sg;
-// reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
+ 0);
+ ulptx_walk_end(&ulp_walk);
}
}
-
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev,
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
- return error;
+ return -ENOMEM;
req_ctx->is_sg_map = 1;
return 0;
}
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev,
}
-
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2469,8 +2483,9 @@ err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
assoclen = req->assoclen - 8;
reqctx->b0_dma = 0;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst,
- req->cryptlen + (op_type ? -authsize : authsize),
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3375,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 96c9335ee728..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
-
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f5a2624081dc..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 94e7412f6164..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 94a87e3ad9bc..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -210,8 +212,6 @@ struct dsgl_walk {
struct phys_sge_pairs *to;
};
-
-
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -227,21 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
-static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
- unsigned short op_type);
-static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
- *req, unsigned short op_type);
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid);
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type);
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam);
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req);
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req);
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param);
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req);
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -607,14 +646,11 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
+ kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -622,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -646,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -665,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -690,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -754,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -768,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -780,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -838,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -846,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -855,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -902,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -26,13 +27,17 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
unsigned int key_len;
};
+struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
+ bool needs_inv;
+};
+
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
@@ -64,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -73,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -90,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -126,9 +137,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -238,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -265,7 +276,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +351,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -351,14 +359,34 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return ndesc;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,9 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
-
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -381,32 +407,54 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_aes_send(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request req;
+ SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct skcipher_request));
+ memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
- skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
- skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_cipher_send_inv;
+ sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -424,19 +472,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->direction = dir;
+ sreq->needs_inv = false;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_cipher_send_inv;
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.send = safexcel_aes_send;
-
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
@@ -450,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -476,6 +526,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_send;
+ ctx->base.handle_result = safexcel_handle_result;
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
return 0;
}
@@ -494,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -32,9 +31,12 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
+ bool needs_inv;
+
+ int nents;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
@@ -119,15 +121,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len, result_sz = sreq->state_sz;
+ int cache_len;
*ret = 0;
@@ -148,11 +150,13 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
- result_sz = crypto_ahash_digestsize(ahash);
- memcpy(sreq->state, areq->result, result_sz);
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -165,9 +169,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -273,7 +291,7 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+ ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
@@ -292,7 +310,6 @@ send_command:
req->processed += len;
request->req = &areq->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
@@ -374,8 +391,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -384,14 +399,36 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return 1;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int err;
+
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
+ if (req->needs_inv) {
+ req->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -400,8 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -412,32 +448,50 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_ahash_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int ret;
+
+ if (req->needs_inv)
+ ret = safexcel_ahash_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_ahash_send_req(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct ahash_request req;
+ AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct ahash_request));
+ memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
- ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
- ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_ahash_send_inv;
+ rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -450,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -470,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -481,14 +545,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->base.send = safexcel_ahash_send;
-
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+ req->needs_inv = false;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_ahash_send_inv;
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+ if (ctx->base.needs_inv) {
+ ctx->base.needs_inv = false;
+ req->needs_inv = true;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -504,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -588,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -622,6 +694,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -668,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -809,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -874,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -881,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8705b28eb02c..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 293832488cc9..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -24,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -409,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -420,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index f2246a5abcf6..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void)
}
if (!per_cpu(cpu_txwin, i)) {
- /* shoudn't happen, Each chip will have NX engine */
- pr_err("NX engine is not availavle for CPU %d\n", i);
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
return -EINVAL;
}
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 5a6dc53b2b9d..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 142c6020cec7..188f44b7eb27 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,17 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 and Exynos HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Hash part based on omap-sham.c driver.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data)
&dd->hash_flags)) {
/* hash or semi-hash ready */
clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
+ goto finish;
}
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 78fb496ecb4e..fe2af6aa88fc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index bc1cb284111c..12b62d0aac27 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -157,13 +157,13 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
spin_unlock_irqrestore(&dcb->poll->lock, flags);
}
-static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct dma_fence *fence_excl;
- unsigned long events;
+ __poll_t events;
unsigned shared_count, seq;
dmabuf = file->private_data;
@@ -195,7 +195,7 @@ retry:
if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- unsigned long pevents = POLLIN;
+ __poll_t pevents = POLLIN;
if (shared_count == 0)
pevents |= POLLOUT;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 03830634e141..8e8c4a12a0bc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -312,7 +312,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b52b0d55247e..97483df1f82e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_desc_free(&plchan->at->vd);
+ vchan_terminate_vdesc(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
return 0;
}
+static void pl08x_synchronize(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ vchan_synchronize(&plchan->vc);
+}
+
static int pl08x_pause(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.device_pause = pl08x_pause;
pl08x->memcpy.device_resume = pl08x_resume;
pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
+ pl08x->memcpy.device_synchronize = pl08x_synchronize;
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
@@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_pause = pl08x_pause;
pl08x->slave.device_resume = pl08x_resume;
pl08x->slave.device_terminate_all = pl08x_terminate_all;
+ pl08x->slave.device_synchronize = pl08x_synchronize;
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.directions =
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6204cc32d09c..847f84a41a69 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- bcm2835_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
@@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void bcm2835_dma_synchronize(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
int irq, unsigned int irq_flags)
{
@@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+ od->ddev.device_synchronize = bcm2835_dma_synchronize;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index f7e965f63274..d9bee65a18a4 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
ARRAY_SIZE(am335x_usb_queues_tx));
- if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
+ if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7373b7a555ec..85820a2d69d4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
/* Clear the DMA status and stop the transfer. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
if (jzchan->desc) {
- jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
@@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void jz4780_dma_synchronize(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_synchronize(&jzchan->vchan);
+}
+
static int jz4780_dma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
dd->device_config = jz4780_dma_config;
dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_synchronize = jz4780_dma_synchronize;
dd->device_tx_status = jz4780_dma_tx_status;
dd->device_issue_pending = jz4780_dma_issue_pending;
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ec5f9d2bc820..80cc2be6483c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
struct dmatest_thread *thread =
- container_of(arg, struct dmatest_thread, done_wait);
+ container_of(done, struct dmatest_thread, test_done);
if (!thread->done) {
done->done = true;
wake_up_all(done->wait);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 9364a3ed345a..948df1ab5f1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
/* Move the cyclic channel back to default queue */
if (!echan->tc && echan->edesc->cyclic)
edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
- /*
- * free the running request descriptor
- * since it is not in any of the vdesc lists
- */
- edma_desc_free(&echan->edesc->vdesc);
+
+ vchan_terminate_vdesc(&echan->edesc->vdesc);
echan->edesc = NULL;
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0391f930aecc..25cec9c243e1 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
- struct mdc_tx_desc *mdesc;
unsigned long flags;
LIST_HEAD(head);
@@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
MDC_CONTROL_AND_STATUS);
- mdesc = mchan->desc;
- mchan->desc = NULL;
+ if (mchan->desc) {
+ vchan_terminate_vdesc(&mchan->desc->vd);
+ mchan->desc = NULL;
+ }
vchan_get_all_descriptors(&mchan->vc, &head);
mdc_get_new_events(mchan);
spin_unlock_irqrestore(&mchan->vc.lock, flags);
- if (mdesc)
- mdc_desc_free(&mdesc->vd);
vchan_dma_desc_free_list(&mchan->vc, &head);
return 0;
}
+static void mdc_synchronize(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+
+ vchan_synchronize(&mchan->vc);
+}
+
static int mdc_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+ mdma->dma_dev.device_synchronize = mdc_synchronize;
mdma->dma_dev.device_config = mdc_slave_config;
mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2184881afe76..e7db24c67030 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX SDMA driver");
+#if IS_ENABLED(CONFIG_SOC_IMX6Q)
+MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
+#endif
+#if IS_ENABLED(CONFIG_SOC_IMX7D)
+MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
+#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 58d4ccd33672..8b5b23a8ace9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -597,7 +597,6 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
@@ -715,7 +714,6 @@ static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01d2a750a621..26b67455208f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
c->phy = NULL;
p->vchan = NULL;
if (p->ds_run) {
- k3_dma_free_desc(&p->ds_run->vd);
+ vchan_terminate_vdesc(&p->ds_run->vd);
p->ds_run = NULL;
}
p->ds_done = NULL;
@@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void k3_dma_synchronize(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int k3_dma_transfer_pause(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_pause = k3_dma_transfer_pause;
d->slave.device_resume = k3_dma_transfer_resume;
d->slave.device_terminate_all = k3_dma_terminate_all;
+ d->slave.device_synchronize = k3_dma_synchronize;
d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
/* init virtual channel */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 5ba5714d0b7c..94d7bd7d2880 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
- if (IS_ERR(ch->cookie))
- return PTR_ERR(ch->cookie);
- return 0;
+ return PTR_ERR_OR_ZERO(ch->cookie);
}
static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f6dd849159d8..d21c19822feb 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- omap_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e3669850aef4..963cc5228d05 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,6 +50,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -104,6 +105,10 @@ static unsigned int nr_desc_prm;
module_param(nr_desc_prm, uint, 0644);
MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+enum hidma_cap {
+ HIDMA_MSI_CAP = 1,
+ HIDMA_IDENTITY_CAP,
+};
/* process completed descriptors */
static void hidma_process_completed(struct hidma_chan *mchan)
@@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#endif
}
-static bool hidma_msi_capable(struct device *dev)
+static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *of_compat;
- int ret = -EINVAL;
-
- if (!adev || acpi_disabled) {
- ret = device_property_read_string(dev, "compatible",
- &of_compat);
- if (ret)
- return false;
+ enum hidma_cap cap;
- ret = strcmp(of_compat, "qcom,hidma-1.1");
- } else {
-#ifdef CONFIG_ACPI
- ret = strcmp(acpi_device_hid(adev), "QCOM8062");
-#endif
- }
- return ret == 0;
+ cap = (enum hidma_cap) device_get_match_data(dev);
+ return cap ? ((cap & test_cap) > 0) : 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev)
* Determine the MSI capability of the platform. Old HW doesn't
* support MSI.
*/
- msi = hidma_msi_capable(&pdev->dev);
-
+ msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev)
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
- dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+ if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
+ dmadev->chidx = readl(dmadev->dev_trca + 0x40);
+ else
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
/* Set DMA mask to 64 bits. */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
- {"QCOM8062"},
+ {"QCOM8062", HIDMA_MSI_CAP},
+ {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
{},
};
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
@@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1",},
+ {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
+ {.compatible = "qcom,hidma-1.2",
+ .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 4999e266b2de..7c6e2ff212a2 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
*/
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
+ unsigned long irqflags;
+
if (cause & HIDMA_ERR_INT_MASK) {
dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
@@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
return;
}
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+
/*
* Fine tuned for this HW...
*
@@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
* Try to consume as many EVREs as possible.
*/
hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
}
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 7335e2eb9b72..000c7019ca7d 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/module.h>
@@ -356,67 +357,37 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
struct platform_device *pdev_parent = of_find_device_by_node(np);
struct platform_device_info pdevinfo;
- struct of_phandle_args out_irq;
struct device_node *child;
- struct resource *res = NULL;
- const __be32 *cell;
- int ret = 0, size, i, num;
- u64 addr, addr_size;
+ struct resource *res;
+ int ret = 0;
+
+ /* allocate a resource array */
+ res = kcalloc(3, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
for_each_available_child_of_node(np, child) {
- struct resource *res_iter;
struct platform_device *new_pdev;
- cell = of_get_property(child, "reg", &size);
- if (!cell) {
- ret = -EINVAL;
+ ret = of_address_to_resource(child, 0, &res[0]);
+ if (!ret)
goto out;
- }
-
- size /= sizeof(*cell);
- num = size /
- (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
- /* allocate a resource array */
- res = kcalloc(num, sizeof(*res), GFP_KERNEL);
- if (!res) {
- ret = -ENOMEM;
+ ret = of_address_to_resource(child, 1, &res[1]);
+ if (!ret)
goto out;
- }
-
- /* read each reg value */
- i = 0;
- res_iter = res;
- while (i < size) {
- addr = of_read_number(&cell[i],
- of_n_addr_cells(child));
- i += of_n_addr_cells(child);
-
- addr_size = of_read_number(&cell[i],
- of_n_size_cells(child));
- i += of_n_size_cells(child);
-
- res_iter->start = addr;
- res_iter->end = res_iter->start + addr_size - 1;
- res_iter->flags = IORESOURCE_MEM;
- res_iter++;
- }
- ret = of_irq_parse_one(child, 0, &out_irq);
- if (ret)
+ ret = of_irq_to_resource(child, 0, &res[2]);
+ if (ret <= 0)
goto out;
- res_iter->start = irq_create_of_mapping(&out_irq);
- res_iter->name = "hidma event irq";
- res_iter->flags = IORESOURCE_IRQ;
-
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.fwnode = &child->fwnode;
pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
pdevinfo.name = child->name;
pdevinfo.id = object_counter++;
pdevinfo.res = res;
- pdevinfo.num_res = num;
+ pdevinfo.num_res = 3;
pdevinfo.data = NULL;
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
@@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
*/
of_msi_configure(&new_pdev->dev, child);
of_node_put(child);
- kfree(res);
- res = NULL;
}
out:
kfree(res);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f04c4702d98b..cd92d696bcf9 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
/* Dequeue current job */
if (s3cchan->at) {
- s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ vchan_terminate_vdesc(&s3cchan->at->vd);
s3cchan->at = NULL;
}
@@ -744,6 +744,13 @@ unlock:
return ret;
}
+static void s3c24xx_dma_synchronize(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+
+ vchan_synchronize(&s3cchan->vc);
+}
+
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
s3cdma->slave.filter.map = pdata->slave_map;
s3cdma->slave.filter.mapcnt = pdata->slavecnt;
s3cdma->slave.filter.fn = s3c24xx_dma_filter;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b2c7db3e480..e3ff162c03fc 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
/* -----------------------------------------------------------------------------
* Stop and reset
*/
+static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
+{
+ u32 chcr;
+ unsigned int i;
+
+ /*
+ * Ensure that the setting of the DE bit is actually 0 after
+ * clearing it.
+ */
+ for (i = 0; i < 1024; i++) {
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+ udelay(1);
+ }
+
+ dev_err(chan->chan.device->dev, "CHCR DE check error\n");
+}
+
+static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+
+ /* set DE=0 and flush remaining data */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
+
+ /* make sure all remaining data was flushed */
+ rcar_dmac_chcr_de_barrier(chan);
+
+ /* back DE */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
{
@@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+ rcar_dmac_chcr_de_barrier(chan);
}
static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
@@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
+ if (desc->direction == DMA_DEV_TO_MEM)
+ rcar_dmac_sync_tcr(chan);
+
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+ residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
return residue;
}
@@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+ if (mask & RCAR_DMACHCR_DE)
+ rcar_dmac_chcr_de_barrier(chan);
if (chcr & RCAR_DMACHCR_DSE)
ret |= rcar_dmac_isr_desc_stage_end(chan);
@@ -1615,22 +1657,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
- /*
- * TODO: Wait for the current transfer to complete and stop the device.
- */
- return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
- /* TODO: Resume transfers, if any. */
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM
static int rcar_dmac_runtime_suspend(struct device *dev)
{
@@ -1646,7 +1672,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rcar_dmac_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b652071a2096..b106e8a60af6 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
return 0;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5db0f6e1ff8..4dbb30cf94ac 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b9d75a54c896..9a558e30c461 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
+ sconfig->device_fc) {
if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
return -EINVAL;
tdc->slave_id = sconfig->slave_id;
@@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= TEGRA_APBDMA_CSR_ONCE;
+
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 7df910e7c348..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..395c698edb4d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
break;
else {
dev_err(chan2dev(chan),
- "Couldnt allocate any descriptors\n");
+ "Couldn't allocate any descriptors\n");
return -ENOMEM;
}
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 545e97279083..88ad8ed2a8d6 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- if (dmaengine_desc_test_reuse(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+ vchan_vdesc_fini(vd);
dmaengine_desc_callback_invoke(&cb, NULL);
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 3f776a46a29c..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
struct list_head desc_completed;
struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
}
/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
* vchan_cyclic_callback - report the completion of a period
* @vd: virtual descriptor
*/
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
}
/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
* vchan_next_desc - peek at the next descriptor to be processed
* @vc: virtual channel to obtain descriptor from
*
@@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
* Makes sure that all scheduled or active callbacks have finished running. For
* proper operation the caller has to ensure that no new callbacks are scheduled
* after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ unsigned long flags;
+
tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
}
#endif
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eef13380ca8..27b523530c4a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
#define XILINX_DMA_REG_PARK_PTR 0x0028
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
@@ -163,6 +165,7 @@
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
/* Multi-Channel DMA Descriptor offsets*/
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @pad1: Reserved @0x10
- * @pad2: Reserved @0x14
+ * @mcdma_control: Control field for mcdma @0x10
+ * @vsize_stride: Vsize and Stride field for mcdma @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
- * @src_addrmsb: Source address MSB @0x0C
+ * @src_addr_msb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
- * @dest_addrmsb: Destination address MSB @0x14
+ * @dest_addr_msb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
* @pending_list: Descriptors waiting
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @dev: The dma device
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
+ * @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
+ * @tdest: TDEST value for mcdma
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
struct list_head pending_list;
struct list_head active_list;
struct list_head done_list;
+ struct list_head free_seg_list;
struct dma_chan common;
struct dma_pool *desc_pool;
struct device *dev;
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
bool cyclic;
bool genlock;
bool err;
+ bool idle;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
u32 desc_submitcount;
u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
};
/**
- * enum xdma_ip_type: DMA IP type.
+ * enum xdma_ip_type - DMA IP type.
*
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
*
*/
enum xdma_ip_type {
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
{
- struct xilinx_axidma_tx_segment *segment;
- dma_addr_t phys;
-
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ unsigned long flags;
- segment->phys = phys;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_axidma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
return segment;
}
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
struct xilinx_axidma_tx_segment *segment)
{
- dma_pool_free(chan->desc_pool, segment, segment->phys);
+ xilinx_dma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
}
/**
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan);
+
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+
+ /* Free Memory that is allocated for cyclic DMA Mode */
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
}
/**
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ int i;
/* Has this channel already been allocated? */
if (chan->desc_pool)
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* for meeting Xilinx VDMA specification requirement.
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_axidma_tx_segment),
- __alignof__(struct xilinx_axidma_tx_segment),
- 0);
+ /* Allocate the buffer descriptors. */
+ chan->seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_v[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_v[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
0);
}
- if (!chan->desc_pool) {
+ if (!chan->desc_pool &&
+ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -855,22 +919,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/*
- * For AXI DMA case after submitting a pending_list, keep
- * an extra segment allocated so that the "next descriptor"
- * pointer on the tail descriptor always points to a
- * valid descriptor, even when paused after reaching taildesc.
- * This way, it is possible to issue additional
- * transfers without halting and restarting the channel.
- */
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
- /*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p, GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
}
dma_cookie_init(dchan);
@@ -936,34 +998,10 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
}
/**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_HALTED) &&
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
- XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_IDLE;
-}
-
-/**
* xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
- u32 reg;
+ u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment;
/* This function was invoked with lock held */
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
-
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* Configure channel to allow number frame buffers */
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
- chan->desc_pendingcount);
-
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- if (config->park && (config->park_frm >= 0) &&
- (config->park_frm < chan->num_frms)) {
- if (chan->direction == DMA_MEM_TO_DEV)
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
- else
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
}
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- }
- if (!chan->has_sg) {
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
chan->desc_submitcount++;
chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
- } else {
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
}
+
+ chan->idle = false;
}
/**
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if (chan->has_sg) {
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ struct xilinx_axidma_tx_segment *tail_segment;
u32 reg;
if (chan->err)
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
+ if (!chan->idle)
return;
- }
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
- if (chan->has_sg && !chan->xdev->mcdma) {
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
-
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
-
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
- }
-
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
}
chan->err = false;
+ chan->idle = true;
+ chan->desc_submitcount = 0;
return err;
}
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
chan->start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_tx_segment *segment;
struct xilinx_vdma_desc_hw *hw;
if (!is_slave_direction(xt->dir))
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
- prev = segment;
-
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
u32 *app_w = (u32 *)context;
struct scatterlist *sg;
size_t copy;
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
XILINX_DMA_NUM_APP_WORDS);
}
- if (prev)
- prev->hw.next_desc = segment->phys;
-
- prev = segment;
sg_used += copy;
/*
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment = list_first_entry(&desc->segments,
struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (chan->direction == DMA_MEM_TO_DEV) {
@@ -1821,11 +1839,14 @@ error:
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
+ * @dchan: DMA channel
+ * @buf_addr: Physical address of the buffer
+ * @buf_len: Total length of the cyclic buffers
+ * @period_len: length of individual cyclic buffer
* @direction: DMA direction
* @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
@@ -2009,7 +2030,9 @@ error:
/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
+ * @dchan: Driver specific DMA Channel pointer
+ *
+ * Return: '0' always.
*/
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
+ chan->idle = true;
if (chan->cyclic) {
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
chan->cyclic = false;
}
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
return 0;
}
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
+ * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
+ /* This variable ensures that descriptors are not
+ * Submitted when dma engine is in progress. This variable is
+ * Added to avoid polling for a bit in the status register to
+ * Know dma state in the driver hot path.
+ */
+ chan->idle = true;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->free_seg_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
* Return: 0 always.
*/
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node) {
+ struct device_node *node)
+{
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto error;
}
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
return 0;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 1ee1241ca797..f14645817ed8 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
@@ -47,6 +48,7 @@
#define ZYNQMP_DMA_SRC_START_MSB 0x15C
#define ZYNQMP_DMA_DST_START_LSB 0x160
#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_TOTAL_BYTE 0x188
#define ZYNQMP_DMA_RATE_CTRL 0x18C
#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
@@ -138,6 +140,8 @@
#define ZYNQMP_DMA_BUS_WIDTH_64 64
#define ZYNQMP_DMA_BUS_WIDTH_128 128
+#define ZDMA_PM_TIMEOUT 100
+
#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
- * @clk_main: Pointer to main clock
- * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
- struct clk *clk_main;
- struct clk *clk_apb;
};
/**
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_device {
struct device *dev;
struct dma_device common;
struct zynqmp_dma_chan *chan;
+ struct clk *clk_main;
+ struct clk *clk_apb;
};
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
struct zynqmp_dma_desc_sw *desc;
- int i;
+ int i, ret;
+
+ ret = pm_runtime_get_sync(chan->dev);
+ if (ret < 0)
+ return ret;
chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
GFP_KERNEL);
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
{
writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
chan->idle = false;
writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
}
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
*/
static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
{
- u32 val;
-
+ if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
}
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
* zynqmp_dma_device_config - Zynqmp dma device configuration
* @dchan: DMA channel
* @config: DMA device config
+ *
+ * Return: 0 always
*/
static int zynqmp_dma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
/**
* zynqmp_dma_free_descriptors - Free channel descriptors
- * @dchan: DMA channel pointer
+ * @chan: ZynqMP DMA channel pointer
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
kfree(chan->sw_desc_pool);
+ pm_runtime_mark_last_busy(chan->dev);
+ pm_runtime_put_autosuspend(chan->dev);
}
/**
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
if (status & ZYNQMP_DMA_INT_OVRFL) {
zynqmp_dma_handle_ovfl_int(chan, status);
- dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
ret = IRQ_HANDLED;
}
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if (!chan)
return;
- devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
tasklet_kill(&chan->tasklet);
list_del(&chan->common.device_node);
- clk_disable_unprepare(chan->clk_apb);
- clk_disable_unprepare(chan->clk_main);
}
/**
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
"zynqmp-dma", chan);
if (err)
return err;
- chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
- if (IS_ERR(chan->clk_main)) {
- dev_err(&pdev->dev, "main clock not found.\n");
- return PTR_ERR(chan->clk_main);
- }
-
- chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
- if (IS_ERR(chan->clk_apb)) {
- dev_err(&pdev->dev, "apb clock not found.\n");
- return PTR_ERR(chan->clk_apb);
- }
-
- err = clk_prepare_enable(chan->clk_main);
- if (err) {
- dev_err(&pdev->dev, "Unable to enable main clock.\n");
- return err;
- }
-
- err = clk_prepare_enable(chan->clk_apb);
- if (err) {
- clk_disable_unprepare(chan->clk_main);
- dev_err(&pdev->dev, "Unable to enable apb clock.\n");
- return err;
- }
chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
chan->idle = true;
@@ -953,6 +941,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
}
/**
+ * zynqmp_dma_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_resume - Resume from suspend
+ * @dev: Address of the device structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(zdev->clk_main);
+ clk_disable_unprepare(zdev->clk_apb);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(zdev->clk_main);
+ if (err) {
+ dev_err(dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(zdev->clk_apb);
+ if (err) {
+ dev_err(dev, "Unable to enable apb clock.\n");
+ clk_disable_unprepare(zdev->clk_main);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
+ SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
+ zynqmp_dma_runtime_resume, NULL)
+};
+
+/**
* zynqmp_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->device_config = zynqmp_dma_device_config;
p->dev = &pdev->dev;
+ zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(zdev->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(zdev->clk_main);
+ }
+
+ zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(zdev->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(zdev->clk_apb);
+ }
+
platform_set_drvdata(pdev, zdev);
+ pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(zdev->dev);
+ pm_runtime_enable(zdev->dev);
+ pm_runtime_get_sync(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev)) {
+ ret = zynqmp_dma_runtime_resume(zdev->dev);
+ if (ret)
+ return ret;
+ }
ret = zynqmp_dma_chan_probe(zdev, pdev);
if (ret) {
dev_err(&pdev->dev, "Probing channel failed\n");
- goto free_chan_resources;
+ goto err_disable_pm;
}
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
goto free_chan_resources;
}
+ pm_runtime_mark_last_busy(zdev->dev);
+ pm_runtime_put_sync_autosuspend(zdev->dev);
+
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
return 0;
free_chan_resources:
zynqmp_dma_chan_remove(zdev->chan);
+err_disable_pm:
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
return ret;
}
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
+ pm_runtime_disable(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
return 0;
}
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
.driver = {
.name = "xilinx-zynqmp-dma",
.of_match_table = zynqmp_dma_of_match,
+ .pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 96afb2aeed18..3c4017007647 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -457,4 +457,11 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_TI
+ tristate "Texas Instruments DDR3 ECC Controller"
+ depends on ARCH_KEYSTONE || SOC_DRA7XX
+ help
+ Support for error detection and correction on the
+ TI SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 0fd9ffa63299..b54912eb39af 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index ec5d695bbb72..3c68bb525d5d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 9c1ffe3e912b..aeb222ca3ed1 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
new file mode 100644
index 000000000000..6ac26d1b929f
--- /dev/null
+++ b/drivers/edac/ti_edac.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Texas Instruments DDR3 ECC error correction and detection driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+#include "edac_module.h"
+
+/* EMIF controller registers */
+#define EMIF_SDRAM_CONFIG 0x008
+#define EMIF_IRQ_STATUS 0x0ac
+#define EMIF_IRQ_ENABLE_SET 0x0b4
+#define EMIF_ECC_CTRL 0x110
+#define EMIF_1B_ECC_ERR_CNT 0x130
+#define EMIF_1B_ECC_ERR_THRSH 0x134
+#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
+#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
+
+/* Bit definitions for EMIF_SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK GENMASK(31, 29)
+#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
+#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
+#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
+#define SDRAM_K2_NARROW_MODE_SHIFT 12
+#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
+#define SDRAM_ROWSIZE_SHIFT 7
+#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
+#define SDRAM_IBANK_SHIFT 4
+#define SDRAM_IBANK_MASK GENMASK(6, 4)
+#define SDRAM_K2_IBANK_SHIFT 5
+#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
+#define SDRAM_K2_EBANK_SHIFT 3
+#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
+#define SDRAM_PAGESIZE_SHIFT 0
+#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
+#define SDRAM_K2_PAGESIZE_SHIFT 0
+#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
+
+#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
+
+/* IRQ bit definitions */
+#define EMIF_1B_ECC_ERR BIT(5)
+#define EMIF_2B_ECC_ERR BIT(4)
+#define EMIF_WR_ECC_ERR BIT(3)
+#define EMIF_SYS_ERR BIT(0)
+/* Bit 31 enables ECC and 28 enables RMW */
+#define ECC_ENABLED (BIT(31) | BIT(28))
+
+#define EDAC_MOD_NAME "ti-emif-edac"
+
+enum {
+ EMIF_TYPE_DRA7,
+ EMIF_TYPE_K2
+};
+
+struct ti_edac {
+ void __iomem *reg;
+};
+
+static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
+{
+ return readl_relaxed(edac->reg + offset);
+}
+
+static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
+{
+ writel_relaxed(val, edac->reg + offset);
+}
+
+static irqreturn_t ti_edac_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct ti_edac *edac = mci->pvt_info;
+ u32 irq_status;
+ u32 err_addr;
+ int err_count;
+
+ irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
+
+ if (irq_status & EMIF_1B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
+ err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
+ ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "1B");
+ }
+
+ if (irq_status & EMIF_2B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "2B");
+ }
+
+ if (irq_status & EMIF_WR_ECC_ERR)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 0, 0, -1, 0, 0, 0,
+ mci->ctl_name, "WR");
+
+ ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
+{
+ struct dimm_info *dimm;
+ struct ti_edac *edac = mci->pvt_info;
+ int bits;
+ u32 val;
+ u32 memsize;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+
+ val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
+
+ if (type == EMIF_TYPE_DRA7) {
+ bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
+ bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
+ bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
+
+ if (val & SDRAM_NARROW_MODE_MASK) {
+ bits++;
+ dimm->dtype = DEV_X16;
+ } else {
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ }
+ } else {
+ bits = 16;
+ bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
+ SDRAM_K2_PAGESIZE_SHIFT) + 8;
+ bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
+ bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
+
+ val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
+ SDRAM_K2_NARROW_MODE_SHIFT;
+ switch (val) {
+ case 0:
+ bits += 3;
+ dimm->dtype = DEV_X64;
+ break;
+ case 1:
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ break;
+ case 2:
+ bits++;
+ dimm->dtype = DEV_X16;
+ break;
+ }
+ }
+
+ memsize = 1 << bits;
+
+ dimm->nr_pages = memsize >> PAGE_SHIFT;
+ dimm->grain = 4;
+ if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
+ dimm->mtype = MEM_DDR2;
+ else
+ dimm->mtype = MEM_DDR3;
+
+ val = ti_edac_readl(edac, EMIF_ECC_CTRL);
+ if (val & ECC_ENABLED)
+ dimm->edac_mode = EDAC_SECDED;
+ else
+ dimm->edac_mode = EDAC_NONE;
+}
+
+static const struct of_device_id ti_edac_of_match[] = {
+ { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
+ { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ {},
+};
+
+static int _emif_get_id(struct device_node *node)
+{
+ struct device_node *np;
+ const __be32 *addrp;
+ u32 addr, my_addr;
+ int my_id = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, ti_edac_of_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int ti_edac_probe(struct platform_device *pdev)
+{
+ int error_irq = 0, ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ const struct of_device_id *id;
+ struct ti_edac *edac;
+ int emif_id;
+
+ id = of_match_device(ti_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF controller regs not defined\n");
+ return PTR_ERR(reg);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ /* Allocate ID number for our EMIF controller */
+ emif_id = _emif_get_id(pdev->dev.of_node);
+ if (emif_id < 0)
+ return -EINVAL;
+
+ mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ edac = mci->pvt_info;
+ edac->reg = reg;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ /* Setup memory layout */
+ ti_edac_setup_dimm(mci, (u32)(id->data));
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+ if (!error_irq) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
+ "emif-edac-irq", mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "request_irq fail for EMIF EDAC irq\n");
+ goto err;
+ }
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register mci: %d.\n", ret);
+ goto err;
+ }
+
+ /* Generate an interrupt with each 1b error */
+ ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
+ EMIF_1B_ECC_ERR_THRSH);
+
+ /* Enable interrupts */
+ ti_edac_writel(edac,
+ EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
+ EMIF_IRQ_ENABLE_SET);
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+ return ret;
+}
+
+static int ti_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver ti_edac_driver = {
+ .probe = ti_edac_probe,
+ .remove = ti_edac_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = ti_edac_of_match,
+ },
+};
+
+module_platform_driver(ti_edac_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index a301fcf46e88..523391bb3fbe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1784,10 +1784,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
{
struct client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &client->wait, pt);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 180f0a96528c..fee2e9e7ea20 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -328,11 +328,11 @@ nosy_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &client->buffer.wait, pt);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index fa87a055905e..e77f77caa0f3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -48,6 +48,14 @@ config ARM_SCPI_POWER_DOMAIN
This enables support for the SCPI power domains which can be
enabled or disabled via the SCP firmware
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index feaa890197f3..b248238ddc6a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 000000000000..1ea71640fdc2
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) (arg.event = event, \
+ arg.first_error = 0, \
+ atomic_set(&arg.errors, 0))
+
+static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ /* Not an error value ... */
+ return sdei_err;
+}
+
+/*
+ * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
+ * to translate.
+ */
+static int sdei_is_err(struct arm_smccc_res *res)
+{
+ switch (res->a0) {
+ case SDEI_NOT_SUPPORTED:
+ case SDEI_INVALID_PARAMETERS:
+ case SDEI_DENIED:
+ case SDEI_PENDING:
+ case SDEI_OUT_OF_RESOURCE:
+ return true;
+ }
+
+ return false;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err = 0;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ if (sdei_is_err(&res))
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ reg->event_num = event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ if (sdei_event_find(event_num)) {
+ kfree(event->registered);
+ kfree(event);
+ event = ERR_PTR(-EBUSY);
+ } else {
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+ }
+
+ return event;
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_del(&event->list);
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_enable);
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_disable);
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_unregister(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_unregister(event->event_num);
+
+ return sdei_do_cross_call(_local_event_unregister, event);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ do {
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ break;
+ }
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+
+ sdei_event_destroy(event);
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_unregister);
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON(preemptible());
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_register(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+
+
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err) {
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ return err;
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ do {
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ break;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num,
+ err);
+ break;
+ }
+
+ err = _sdei_event_register(event);
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num,
+ err);
+ }
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_register);
+
+static int sdei_reregister_event(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ err = _sdei_event_register(event);
+ if (err) {
+ pr_err("Failed to re-register event %u\n", event->event_num);
+ sdei_event_destroy(event);
+ return err;
+ }
+
+ if (event->reenable) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ }
+
+ if (err)
+ pr_err("Failed to re-enable event %u\n", event->event_num);
+
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_reregister_event(event);
+ if (err)
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_unregister(&arg);
+ if (arg.first_error)
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_register(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+
+ if (event->reenable) {
+ CROSSCALL_INIT(arg, event);
+ _local_event_enable(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err)
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+
+ return err;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return CONDUIT_INVALID;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+ }
+
+ return CONDUIT_INVALID;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err == -EOPNOTSUPP)
+ pr_err("advertised but not implemented in platform firmware\n");
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_dt(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np, *fw_np;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return false;
+
+ np = of_find_matching_node(fw_np, sdei_of_match);
+ of_node_put(fw_np);
+ if (!np)
+ return false;
+
+ pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
+ of_node_put(np);
+ if (!pdev)
+ return false;
+
+ return true;
+}
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct platform_device *pdev;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
+ 0);
+ if (IS_ERR(pdev))
+ return false;
+
+ return true;
+}
+
+static int __init sdei_init(void)
+{
+ if (sdei_present_dt() || sdei_present_acpi())
+ platform_driver_register(&sdei_driver);
+
+ return 0;
+}
+
+/*
+ * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
+ * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
+ * by device_initcall(). We want to be called in the middle.
+ */
+subsys_initcall_sync(sdei_init);
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ mm_segment_t orig_addr_limit;
+ u32 event_num = arg->event_num;
+
+ orig_addr_limit = get_fs();
+ set_fs(USER_DS);
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ set_fs(orig_addr_limit);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2b4c39fdfa91..6047ed4e8a3d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -159,13 +159,21 @@ config RESET_ATTACK_MITIGATION
using the TCG Platform Reset Attack Mitigation specification. This
protects against an attacker forcibly rebooting the system while it
still contains secrets in RAM, booting another OS and extracting the
- secrets.
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
endmenu
config UEFI_CPER
bool
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
config EFI_DEV_PATH_PARSER
bool
depends on ACPI
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 269501dfba53..cb805374f4bc 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -11,7 +11,7 @@
KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
obj-$(CONFIG_EFI) += capsule.o memmap.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
@@ -30,3 +30,4 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c4dd84..e456f4602df1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
#define NO_FURTHER_WRITE_ACTION -1
-#ifndef phys_to_page
-#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
-#endif
-
/**
* efi_free_all_buff_pages - free all previous allocated buffer pages
* @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
static void efi_free_all_buff_pages(struct capsule_info *cap_info)
{
while (cap_info->index > 0)
- __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
+ __free_page(cap_info->pages[--cap_info->index]);
cap_info->index = NO_FURTHER_WRITE_ACTION;
}
@@ -49,7 +45,7 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
if (pages_needed == 0) {
- pr_err("invalid capsule size");
+ pr_err("invalid capsule size\n");
return -EINVAL;
}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
cap_info->pages = temp_page;
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
return 0;
}
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
**/
static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
{
+ bool do_vunmap = false;
int ret;
- ret = efi_capsule_update(&cap_info->header, cap_info->pages);
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
if (ret) {
pr_err("capsule update failed\n");
return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
goto failed;
}
- cap_info->pages[cap_info->index++] = page_to_phys(page);
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
} else {
- page = phys_to_page(cap_info->pages[cap_info->index - 1]);
+ page = cap_info->pages[cap_info->index - 1];
}
kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
struct capsule_info *cap_info = file->private_data;
kfree(cap_info->pages);
+ kfree(cap_info->phys);
kfree(file->private_data);
file->private_data = NULL;
return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
file->private_data = cap_info;
return 0;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 000000000000..698e5c8e0c8d
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,356 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+#define INDENT_SP " "
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s%s", newpfx, INDENT_SP);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d2fcafcea07e..c165933ebf38 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -122,7 +122,7 @@ static const char * const proc_isa_strs[] = {
"ARM A64",
};
-static const char * const proc_error_type_strs[] = {
+const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
@@ -157,8 +157,8 @@ static void cper_print_proc_generic(const char *pfx,
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
- proc_error_type_strs,
- ARRAY_SIZE(proc_error_type_strs));
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
@@ -188,122 +188,6 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-static const char * const arm_reg_ctx_strs[] = {
- "AArch32 general purpose registers",
- "AArch32 EL1 context registers",
- "AArch32 EL2 context registers",
- "AArch32 secure context registers",
- "AArch64 general purpose registers",
- "AArch64 EL1 context registers",
- "AArch64 EL2 context registers",
- "AArch64 EL3 context registers",
- "Misc. system register structure",
-};
-
-static void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc)
-{
- int i, len, max_ctx_type;
- struct cper_arm_err_info *err_info;
- struct cper_arm_ctx_info *ctx_info;
- char newpfx[64];
-
- printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
-
- len = proc->section_length - (sizeof(*proc) +
- proc->err_info_num * (sizeof(*err_info)));
- if (len < 0) {
- printk("%ssection length: %d\n", pfx, proc->section_length);
- printk("%ssection length is too small\n", pfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
- return;
- }
-
- if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
- printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
- pfx, proc->mpidr);
-
- if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
- printk("%serror affinity level: %d\n", pfx,
- proc->affinity_level);
-
- if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
- printk("%srunning state: 0x%x\n", pfx, proc->running_state);
- printk("%sPower State Coordination Interface state: %d\n",
- pfx, proc->psci_state);
- }
-
- snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
-
- err_info = (struct cper_arm_err_info *)(proc + 1);
- for (i = 0; i < proc->err_info_num; i++) {
- printk("%sError info structure %d:\n", pfx, i);
-
- printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
-
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
- if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
- printk("%sfirst error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
- printk("%slast error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
- printk("%spropagated error captured\n",
- newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
- printk("%soverflow occurred, error info is incomplete\n",
- newpfx);
- }
-
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
- proc_error_type_strs[err_info->type] : "unknown");
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
- printk("%serror_info: 0x%016llx\n", newpfx,
- err_info->error_info);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
- printk("%svirtual fault address: 0x%016llx\n",
- newpfx, err_info->virt_fault_addr);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
- printk("%sphysical fault address: 0x%016llx\n",
- newpfx, err_info->physical_fault_addr);
- err_info += 1;
- }
-
- ctx_info = (struct cper_arm_ctx_info *)err_info;
- max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
- for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
-
- printk("%sContext info structure %d:\n", pfx, i);
- if (len < size) {
- printk("%ssection length is too small\n", newpfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- return;
- }
- if (ctx_info->type > max_ctx_type) {
- printk("%sInvalid context type: %d (max: %d)\n",
- newpfx, ctx_info->type, max_ctx_type);
- return;
- }
- printk("%sregister context type: %s\n", newpfx,
- arm_reg_ctx_strs[ctx_info->type]);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
- (ctx_info + 1), ctx_info->size, 0);
- len -= size;
- ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
- }
-
- if (len > 0) {
- printk("%sVendor specific error info has %u bytes:\n", pfx,
- len);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
- len, true);
- }
-}
-#endif
-
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 557a47829d03..cd42f66a7c85 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR
};
EXPORT_SYMBOL(efi);
@@ -464,6 +465,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{NULL_GUID, NULL, NULL},
};
@@ -552,6 +554,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (efi_enabled(EFI_MEMMAP))
efi_memattr_init();
+ efi_tpm_eventlog_init();
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -608,7 +612,7 @@ static int __init efi_load_efivars(void)
return 0;
pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(efi_load_efivars);
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index adaa4a964f0c..7b3ba40f0745 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -30,8 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o
-lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 6224cdbc9669..da661bf8cb96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -4,15 +4,18 @@
* Copyright (C) 2016 CoreOS, Inc
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
*
* This file is part of the Linux kernel, and is made available under the
* terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
#include <asm/efi.h>
#include "efistub.h"
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
@@ -56,3 +59,81 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location, log_last_entry;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ void *tcg2_protocol;
+
+ status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
+ &tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
+ &log_location, &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (!log_location)
+ return;
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size,
+ (void **) &log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg,
+ "Unable to allocate memory for event log\n");
+ return;
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_call_early(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_call_early(free_pool, log_tbl);
+}
+
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+{
+ /* Only try to retrieve the logs in 1.2 format. */
+ efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
+}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 000000000000..0cbeb3d46b18
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned int tbl_size;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return 0;
+}
+
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index d687ca3d5049..8b25d31e8401 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -496,6 +496,8 @@ static void __init psci_init_migrate(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
psci_function_id[PSCI_FN_CPU_SUSPEND] =
PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f810e5df..bb1c068bff19 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
return 0;
}
-static int find_clusters(const struct cpumask *cpus,
- const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+ const struct cpumask **cpu_groups)
{
unsigned int nb = 0;
cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
cpumask_copy(tmp, cpus);
while (!cpumask_empty(tmp)) {
- const struct cpumask *cluster =
+ const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
- clusters[nb++] = cluster;
- cpumask_andnot(tmp, tmp, cluster);
+ cpu_groups[nb++] = cpu_group;
+ cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
{
int err;
cpumask_var_t offlined_cpus;
- int i, nb_cluster;
- const struct cpumask **clusters;
+ int i, nb_cpu_group;
+ const struct cpumask **cpu_groups;
char *page_buf;
err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus clusters. */
- clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
- GFP_KERNEL);
- if (!clusters)
+ /* We may have up to nb_available_cpus cpu_groups. */
+ cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
- goto out_free_clusters;
+ goto out_free_cpu_groups;
err = 0;
- nb_cluster = find_clusters(cpu_online_mask, clusters);
+ nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
/*
* Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
- * Take down CPUs by cluster this time. When the last CPU is turned
- * off, the cluster itself should shut down.
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
*/
- for (i = 0; i < nb_cluster; ++i) {
- int cluster_id =
- topology_physical_package_id(cpumask_any(clusters[i]));
+ for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
- clusters[i]);
+ cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
- pr_info("Trying to turn off and on again cluster %d "
- "(CPUs %s)\n", cluster_id, page_buf);
- err += down_and_up_cpus(clusters[i], offlined_cpus);
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
-out_free_clusters:
- kfree(clusters);
+out_free_cpu_groups:
+ kfree(cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d6a8e851ad13..8dbb2280538d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -122,12 +122,6 @@ config GPIO_ATH79
Select this option to enable GPIO driver for
Atheros AR71XX/AR724X/AR913X SoC devices.
-config GPIO_AXP209
- tristate "X-Powers AXP209 PMIC GPIO Support"
- depends on MFD_AXP20X
- help
- Say yes to enable GPIO support for the AXP209 PMIC
-
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -704,6 +698,22 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WINBOND
+ tristate "Winbond Super I/O GPIO support"
+ depends on ISA_BUS_API
+ help
+ This option enables support for GPIOs found on Winbond Super I/O
+ chips.
+ Currently, only W83627UHG (also known as Nuvoton NCT6627UD) is
+ supported.
+
+ You will need to provide a module parameter "gpios", or a
+ boot-time parameter "gpio_winbond.gpios" with a bitmask of GPIO
+ ports to enable (bit 0 is GPIO1, bit 1 is GPIO2, etc.).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-winbond.
+
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
depends on ISA_BUS_API
@@ -1234,6 +1244,16 @@ config GPIO_PCI_IDIO_16
low). Input filter control is not supported by this driver, and the
input filters are deactivated by this driver.
+config GPIO_PCIE_IDIO_24
+ tristate "ACCES PCIe-IDIO-24 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES PCIe-IDIO-24 family (PCIe-IDIO-24,
+ PCIe-IDI-24, PCIe-IDO-24, PCIe-IDIO-12). An interrupt is generated
+ when any of the inputs change state (low to high or high to low).
+ Input filter control is not supported by this driver, and the input
+ filters are deactivated by this driver.
+
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
select MFD_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4bc24febb889..cccb0d40846c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
-obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -96,6 +95,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
+obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
+obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index afbff155a0ba..e82cc763633c 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -125,6 +125,48 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get_index);
/**
+ * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @dev: device for lifecycle management
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc **dr;
+ struct gpio_desc *desc;
+
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+ }
+
+ *dr = desc;
+ devres_add(dev, dr);
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+
+/**
* devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
* device's child node
* @dev: GPIO consumer
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 15a1f4b348c4..fb7b620763a2 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,12 +9,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index abf199609546..21452622d954 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -12,8 +12,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mfd/adp5520.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
struct adp5520_gpio {
struct device *master;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e717f8dc3966..3530ccd17e04 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 8e76d390e653..8c3ff6e2366f 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -18,7 +18,8 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/of_gpio.h> /* For of_mm_gpio_chip */
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 30ad7d7c1678..fdcebe59510d 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -28,7 +28,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index d4e6ba0301bc..ba51ea15f379 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 6b3ca6601af2..77e485557498 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -60,6 +60,7 @@ struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
uint16_t debounce_regs;
+ uint16_t tolerance_regs;
const char names[4][3];
};
@@ -70,48 +71,56 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.val_regs = 0x0000,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
+ .tolerance_regs = 0x001c,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
+ .tolerance_regs = 0x003c,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
+ .tolerance_regs = 0x00ac,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
+ .tolerance_regs = 0x00fc,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
+ .tolerance_regs = 0x012c,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
+ .tolerance_regs = 0x015c,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
+ .tolerance_regs = 0x018c,
.names = { "Y", "Z", "AA", "AB" },
},
{
- .val_regs = 0x01E8,
- .irq_regs = 0x01A8,
+ .val_regs = 0x01e8,
+ .irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
+ .tolerance_regs = 0x01bc,
.names = { "AC", "", "", "" },
},
};
@@ -140,7 +149,7 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
- WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+ WARN_ON(bank >= ARRAY_SIZE(aspeed_gpio_banks));
return &aspeed_gpio_banks[bank];
}
@@ -534,6 +543,30 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
return 0;
}
+static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
+ unsigned int offset, bool enable)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ const struct aspeed_gpio_bank *bank;
+ unsigned long flags;
+ u32 val;
+
+ bank = to_bank(offset);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ val = readl(gpio->base + bank->tolerance_regs);
+
+ if (enable)
+ val |= GPIO_BIT(offset);
+ else
+ val &= ~GPIO_BIT(offset);
+
+ writel(val, gpio->base + bank->tolerance_regs);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
if (!have_gpio(gpiochip_get_data(chip), offset))
@@ -771,6 +804,8 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
return -ENOTSUPP;
+ else if (param == PIN_CONFIG_PERSIST_STATE)
+ return aspeed_gpio_reset_tolerance(chip, offset, arg);
return -ENOTSUPP;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 5fad89dfab7e..3ae7c1876bf4 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -324,3 +324,6 @@ static struct platform_driver ath79_gpio_driver = {
};
module_platform_driver(ath79_gpio_driver);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
deleted file mode 100644
index 4a346b7b4172..000000000000
--- a/drivers/gpio/gpio-axp209.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * AXP20x GPIO driver
- *
- * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/axp20x.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#define AXP20X_GPIO_FUNCTIONS 0x7
-#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
-#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
-#define AXP20X_GPIO_FUNCTION_INPUT 2
-
-struct axp20x_gpio {
- struct gpio_chip chip;
- struct regmap *regmap;
-};
-
-static int axp20x_gpio_get_reg(unsigned offset)
-{
- switch (offset) {
- case 0:
- return AXP20X_GPIO0_CTRL;
- case 1:
- return AXP20X_GPIO1_CTRL;
- case 2:
- return AXP20X_GPIO2_CTRL;
- }
-
- return -EINVAL;
-}
-
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- AXP20X_GPIO_FUNCTION_INPUT);
-}
-
-static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
- if (ret)
- return ret;
-
- return !!(val & BIT(offset + 4));
-}
-
-static int axp20x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- ret = regmap_read(gpio->regmap, reg, &val);
- if (ret)
- return ret;
-
- /*
- * This shouldn't really happen if the pin is in use already,
- * or if it's not in use yet, it doesn't matter since we're
- * going to change the value soon anyway. Default to output.
- */
- if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
-
- /*
- * The GPIO directions are the three lowest values.
- * 2 is input, 0 and 1 are output
- */
- return val & 2;
-}
-
-static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
- : AXP20X_GPIO_FUNCTION_OUT_LOW);
-}
-
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- axp20x_gpio_output(chip, offset, value);
-}
-
-static int axp20x_gpio_probe(struct platform_device *pdev)
-{
- struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp20x_gpio *gpio;
- int ret;
-
- if (!of_device_is_available(pdev->dev.of_node))
- return -ENODEV;
-
- if (!axp20x) {
- dev_err(&pdev->dev, "Parent drvdata not set\n");
- return -EINVAL;
- }
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio)
- return -ENOMEM;
-
- gpio->chip.base = -1;
- gpio->chip.can_sleep = true;
- gpio->chip.parent = &pdev->dev;
- gpio->chip.label = dev_name(&pdev->dev);
- gpio->chip.owner = THIS_MODULE;
- gpio->chip.get = axp20x_gpio_get;
- gpio->chip.get_direction = axp20x_gpio_get_direction;
- gpio->chip.set = axp20x_gpio_set;
- gpio->chip.direction_input = axp20x_gpio_input;
- gpio->chip.direction_output = axp20x_gpio_output;
- gpio->chip.ngpio = 3;
-
- gpio->regmap = axp20x->regmap;
-
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register GPIO chip\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
-
- return 0;
-}
-
-static const struct of_device_id axp20x_gpio_match[] = {
- { .compatible = "x-powers,axp209-gpio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
-
-static struct platform_driver axp20x_gpio_driver = {
- .probe = axp20x_gpio_probe,
- .driver = {
- .name = "axp20x-gpio",
- .of_match_table = axp20x_gpio_match,
- },
-};
-
-module_platform_driver(axp20x_gpio_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 76861a00bb92..eb8369b21e90 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -17,7 +17,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/init.h>
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return val ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ return !!val;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index bb4f8cf18bd9..16c7f9f49416 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -19,7 +19,6 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index acefb25e8eca..b8ec75cbd4b5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -46,7 +46,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Steal the hardware definitions from the bttv driver. */
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index b6f0f729656c..58531d8b8c6e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 90278b19aa0e..8814c8f47e57 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -12,7 +12,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#include <asm/msr.h>
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index dd8977cf3e85..b6d3e997eb26 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -15,7 +15,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/syscalls.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 82053b52cba0..2f1b5d23b10c 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -13,7 +13,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e4b3d7db68c9..0b951ca78ec4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -9,7 +9,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 7b3394fdc624..b7a3a2db699b 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -176,8 +176,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
return PTR_ERR(g->base);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 98c7ff2a76e7..8d62db447ec1 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
return platform_driver_register(&iop3xx_gpio_driver);
}
arch_initcall(iop3xx_gpio_init);
+
+MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d43d0a2cc4c5..efb46edff81f 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -414,6 +414,6 @@ static void __exit it87_gpio_exit(void)
module_init(it87_gpio_init);
module_exit(it87_gpio_exit);
-MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
+MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index c04fae1ba32a..9d8bcc69f245 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -709,8 +709,7 @@ static int max732x_probe(struct i2c_client *client,
return 0;
out_failed:
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return ret;
}
@@ -734,8 +733,7 @@ static int max732x_remove(struct i2c_client *client)
gpiochip_remove(&chip->gpio_chip);
/* unregister any dummy i2c_client */
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31ac337..c38624ea0251 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+ const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+ return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mrfld_gpio_pinrange *range;
+ const char *pinctrl_dev_name;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
- "pinctrl-merrifield",
+ pinctrl_dev_name,
range->gpio_base,
range->pin_base,
range->npins);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f9042bcc27a4..7b14d6280e44 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
{
unsigned long get_mask = 0;
unsigned long set_mask = 0;
- int bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
- if (gc->bgpio_dir & BIT(bit))
- set_mask |= BIT(bit);
- else
- get_mask |= BIT(bit);
- }
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
+ /* Exploit the fact that we know which directions are set */
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
/*
* This only works if the bits in the GPIO register are in native endianness.
- * It is dirt simple and fast in this case. (Also the most common case.)
*/
static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
-
- *bits = gc->read_reg(gc->reg_dat) & *mask;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+ *bits |= gc->read_reg(gc->reg_dat) & *mask;
return 0;
}
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
unsigned long val;
int bit;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
/* Create a mirrored mask */
- bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
readmask |= bgpio_line2mask(gc, bit);
/* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
* Mirror the result into the "bits" result, this will give line 0
* in bit 0 ... line 31 in bit 31 for a 32bit register.
*/
- bit = 0;
- while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
*bits |= bgpio_line2mask(gc, bit);
return 0;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 9532d86a82f7..3a545ad17817 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -27,13 +27,15 @@
#include "gpiolib.h"
#define GPIO_MOCKUP_NAME "gpio-mockup"
-#define GPIO_MOCKUP_MAX_GC 10
+#define GPIO_MOCKUP_MAX_GC 10
/*
* We're storing two values per chip: the GPIO base and the number
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
+#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
+
enum {
GPIO_MOCKUP_DIR_OUT = 0,
GPIO_MOCKUP_DIR_IN = 1,
@@ -46,7 +48,7 @@ enum {
*/
struct gpio_mockup_line_status {
int dir;
- bool value;
+ int value;
};
struct gpio_mockup_chip {
@@ -62,17 +64,33 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
+struct gpio_mockup_platform_data {
+ int base;
+ int ngpio;
+ int index;
+ bool named_lines;
+};
+
static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
-static int gpio_mockup_params_nr;
-module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
+static int gpio_mockup_num_ranges;
+module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400);
static bool gpio_mockup_named_lines;
module_param_named(gpio_mockup_named_lines,
gpio_mockup_named_lines, bool, 0400);
-static const char gpio_mockup_name_start = 'A';
static struct dentry *gpio_mockup_dbg_dir;
+static int gpio_mockup_range_base(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2];
+}
+
+static int gpio_mockup_range_ngpio(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2 + 1];
+}
+
static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -80,16 +98,26 @@ static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].value;
}
-static void gpio_mockup_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
chip->lines[offset].value = !!value;
}
-static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, mask, gc->ngpio)
+ gpio_mockup_set(gc, bit, test_bit(bit, bits));
+
+}
+
+static int gpio_mockup_dirout(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -115,29 +143,6 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].dir;
}
-static int gpio_mockup_name_lines(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- char **names;
- int i;
-
- names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
- if (!names)
- return -ENOMEM;
-
- for (i = 0; i < gc->ngpio; i++) {
- names[i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d", gc->label, i);
- if (!names[i])
- return -ENOMEM;
- }
-
- gc->names = (const char *const *)names;
-
- return 0;
-}
-
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -188,15 +193,21 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
struct gpio_mockup_dbgfs_private *priv;
- struct dentry *evfile;
+ struct dentry *evfile, *link;
struct gpio_chip *gc;
+ const char *devname;
char *name;
int i;
gc = &chip->gc;
+ devname = dev_name(&gc->gpiodev->dev);
- chip->dbg_dir = debugfs_create_dir(gc->label, gpio_mockup_dbg_dir);
- if (!chip->dbg_dir)
+ chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir);
+ if (IS_ERR_OR_NULL(chip->dbg_dir))
+ goto err;
+
+ link = debugfs_create_symlink(gc->label, gpio_mockup_dbg_dir, devname);
+ if (IS_ERR_OR_NULL(link))
goto err;
for (i = 0; i < gc->ngpio; i++) {
@@ -214,23 +225,63 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_event_ops);
- if (!evfile)
+ if (IS_ERR_OR_NULL(evfile))
goto err;
}
return;
err:
- dev_err(dev, "error creating debugfs directory\n");
+ dev_err(dev, "error creating debugfs event files\n");
}
-static int gpio_mockup_add(struct device *dev,
- struct gpio_mockup_chip *chip,
- const char *name, int base, int ngpio)
+static int gpio_mockup_name_lines(struct device *dev,
+ struct gpio_mockup_chip *chip)
{
struct gpio_chip *gc = &chip->gc;
- int ret;
+ char **names;
+ int i;
+
+ names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", gc->label, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ gc->names = (const char *const *)names;
+
+ return 0;
+}
+
+static int gpio_mockup_probe(struct platform_device *pdev)
+{
+ struct gpio_mockup_platform_data *pdata;
+ struct gpio_mockup_chip *chip;
+ struct gpio_chip *gc;
+ int rv, base, ngpio;
+ struct device *dev;
+ char *name;
+
+ dev = &pdev->dev;
+ pdata = dev_get_platdata(dev);
+ base = pdata->base;
+ ngpio = pdata->ngpio;
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-%c",
+ pdev->name, pdata->index);
+ if (!name)
+ return -ENOMEM;
+
+ gc = &chip->gc;
gc->base = base;
gc->ngpio = ngpio;
gc->label = name;
@@ -238,6 +289,7 @@ static int gpio_mockup_add(struct device *dev,
gc->parent = dev;
gc->get = gpio_mockup_get;
gc->set = gpio_mockup_set;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
@@ -248,19 +300,19 @@ static int gpio_mockup_add(struct device *dev,
if (!chip->lines)
return -ENOMEM;
- if (gpio_mockup_named_lines) {
- ret = gpio_mockup_name_lines(dev, chip);
- if (ret)
- return ret;
+ if (pdata->named_lines) {
+ rv = gpio_mockup_name_lines(dev, chip);
+ if (rv)
+ return rv;
}
- ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (ret)
- return ret;
+ rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
+ if (rv < 0)
+ return rv;
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
- if (ret)
- return ret;
+ rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (rv)
+ return rv;
if (gpio_mockup_dbg_dir)
gpio_mockup_debugfs_setup(dev, chip);
@@ -268,58 +320,6 @@ static int gpio_mockup_add(struct device *dev,
return 0;
}
-static int gpio_mockup_probe(struct platform_device *pdev)
-{
- int ret, i, base, ngpio, num_chips;
- struct device *dev = &pdev->dev;
- struct gpio_mockup_chip *chips;
- char *chip_name;
-
- if (gpio_mockup_params_nr < 2 || (gpio_mockup_params_nr % 2))
- return -EINVAL;
-
- /* Each chip is described by two values. */
- num_chips = gpio_mockup_params_nr / 2;
-
- chips = devm_kcalloc(dev, num_chips, sizeof(*chips), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, chips);
-
- for (i = 0; i < num_chips; i++) {
- base = gpio_mockup_ranges[i * 2];
-
- if (base == -1)
- ngpio = gpio_mockup_ranges[i * 2 + 1];
- else
- ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
-
- if (ngpio >= 0) {
- chip_name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%c", GPIO_MOCKUP_NAME,
- gpio_mockup_name_start + i);
- if (!chip_name)
- return -ENOMEM;
-
- ret = gpio_mockup_add(dev, &chips[i],
- chip_name, base, ngpio);
- } else {
- ret = -EINVAL;
- }
-
- if (ret) {
- dev_err(dev,
- "adding gpiochip failed: %d (base: %d, ngpio: %d)\n",
- ret, base, base < 0 ? ngpio : base + ngpio);
-
- return ret;
- }
- }
-
- return 0;
-}
-
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = GPIO_MOCKUP_NAME,
@@ -327,44 +327,88 @@ static struct platform_driver gpio_mockup_driver = {
.probe = gpio_mockup_probe,
};
-static struct platform_device *pdev;
-static int __init mock_device_init(void)
+static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
+
+static void gpio_mockup_unregister_pdevs(void)
{
- int err;
+ struct platform_device *pdev;
+ int i;
- gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
- if (!gpio_mockup_dbg_dir)
- pr_err("%s: error creating debugfs directory\n",
- GPIO_MOCKUP_NAME);
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+ pdev = gpio_mockup_pdevs[i];
- pdev = platform_device_alloc(GPIO_MOCKUP_NAME, -1);
- if (!pdev)
- return -ENOMEM;
+ if (pdev)
+ platform_device_unregister(pdev);
+ }
+}
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return err;
+static int __init gpio_mockup_init(void)
+{
+ int i, num_chips, err = 0, index = 'A';
+ struct gpio_mockup_platform_data pdata;
+ struct platform_device *pdev;
+
+ if ((gpio_mockup_num_ranges < 2) ||
+ (gpio_mockup_num_ranges % 2) ||
+ (gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
+ return -EINVAL;
+
+ /* Each chip is described by two values. */
+ num_chips = gpio_mockup_num_ranges / 2;
+
+ /*
+ * The second value in the <base GPIO - number of GPIOS> pair must
+ * always be greater than 0.
+ */
+ for (i = 0; i < num_chips; i++) {
+ if (gpio_mockup_range_ngpio(i) < 0)
+ return -EINVAL;
}
+ gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
+ if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
+ gpio_mockup_err("error creating debugfs directory\n");
+
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
- platform_device_unregister(pdev);
+ gpio_mockup_err("error registering platform driver\n");
return err;
}
+ for (i = 0; i < num_chips; i++) {
+ pdata.index = index++;
+ pdata.base = gpio_mockup_range_base(i);
+ pdata.ngpio = pdata.base < 0
+ ? gpio_mockup_range_ngpio(i)
+ : gpio_mockup_range_ngpio(i) - pdata.base;
+ pdata.named_lines = gpio_mockup_named_lines;
+
+ pdev = platform_device_register_resndata(NULL,
+ GPIO_MOCKUP_NAME,
+ i, NULL, 0, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev)) {
+ gpio_mockup_err("error registering device");
+ platform_driver_unregister(&gpio_mockup_driver);
+ gpio_mockup_unregister_pdevs();
+ return PTR_ERR(pdev);
+ }
+
+ gpio_mockup_pdevs[i] = pdev;
+ }
+
return 0;
}
-static void __exit mock_device_exit(void)
+static void __exit gpio_mockup_exit(void)
{
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- platform_device_unregister(pdev);
+ gpio_mockup_unregister_pdevs();
}
-module_init(mock_device_init);
-module_exit(mock_device_exit);
+module_init(gpio_mockup_init);
+module_exit(gpio_mockup_exit);
MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
MODULE_AUTHOR("Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e136d666f1e5..ab5035b96886 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1058,7 +1058,9 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
+ struct gpio_irq_chip *irq;
static int gpio;
+ const char *label;
int irq_base = 0;
int ret;
@@ -1080,21 +1082,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.parent = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
- bank->chip.label = "gpio";
+ label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d",
+ gpio, gpio + bank->width - 1);
+ if (!label)
+ return -ENOMEM;
+ bank->chip.label = label;
bank->chip.base = gpio;
}
bank->chip.ngpio = bank->width;
- ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
-
- if (!bank->is_mpuio)
- gpio += bank->width;
-
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1115,25 +1111,30 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irqc->irq_set_wake = NULL;
}
- ret = gpiochip_irqchip_add(&bank->chip, irqc,
- irq_base, handle_bad_irq,
- IRQ_TYPE_NONE);
+ irq = &bank->chip.irq;
+ irq->chip = irqc;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->num_parents = 1;
+ irq->parents = &bank->irq;
+ irq->first = irq_base;
+ ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
dev_err(bank->chip.parent,
- "Couldn't add irqchip to gpiochip %d\n", ret);
- gpiochip_remove(&bank->chip);
- return -ENODEV;
+ "Could not register gpio chip %d\n", ret);
+ return ret;
}
- gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
-
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
+ if (!bank->is_mpuio)
+ gpio += bank->width;
+
return ret;
}
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
new file mode 100644
index 000000000000..f666e2e69074
--- /dev/null
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the ACCES PCIe-IDIO-24 family
+ * Copyright (C) 2018 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: PCIe-IDIO-24,
+ * PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct idio_24_gpio_reg - GPIO device registers structure
+ * @out0_7: Read: FET Outputs 0-7
+ * Write: FET Outputs 0-7
+ * @out8_15: Read: FET Outputs 8-15
+ * Write: FET Outputs 8-15
+ * @out16_23: Read: FET Outputs 16-23
+ * Write: FET Outputs 16-23
+ * @ttl_out0_7: Read: TTL/CMOS Outputs 0-7
+ * Write: TTL/CMOS Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Reserved
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: Reserved
+ * @in16_23: Read: Isolated Inputs 16-23
+ * Write: Reserved
+ * @ttl_in0_7: Read: TTL/CMOS Inputs 0-7
+ * Write: Reserved
+ * @cos0_7: Read: COS Status Inputs 0-7
+ * Write: COS Clear Inputs 0-7
+ * @cos8_15: Read: COS Status Inputs 8-15
+ * Write: COS Clear Inputs 8-15
+ * @cos16_23: Read: COS Status Inputs 16-23
+ * Write: COS Clear Inputs 16-23
+ * @cos_ttl0_7: Read: COS Status TTL/CMOS 0-7
+ * Write: COS Clear TTL/CMOS 0-7
+ * @ctl: Read: Control Register
+ * Write: Control Register
+ * @reserved: Read: Reserved
+ * Write: Reserved
+ * @cos_enable: Read: COS Enable
+ * Write: COS Enable
+ * @soft_reset: Read: IRQ Output Pin Status
+ * Write: Software Board Reset
+ */
+struct idio_24_gpio_reg {
+ u8 out0_7;
+ u8 out8_15;
+ u8 out16_23;
+ u8 ttl_out0_7;
+ u8 in0_7;
+ u8 in8_15;
+ u8 in16_23;
+ u8 ttl_in0_7;
+ u8 cos0_7;
+ u8 cos8_15;
+ u8 cos16_23;
+ u8 cos_ttl0_7;
+ u8 ctl;
+ u8 reserved;
+ u8 cos_enable;
+ u8 soft_reset;
+};
+
+/**
+ * struct idio_24_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the GPIO device registers
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct idio_24_gpio {
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct idio_24_gpio_reg __iomem *reg;
+ unsigned long irq_mask;
+};
+
+static int idio_24_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 24)
+ return 0;
+
+ /* Isolated Inputs */
+ if (offset < 48)
+ return 1;
+
+ /* TTL/CMOS I/O */
+ /* OUT MODE = 1 when TTL/CMOS Output Mode is set */
+ return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+}
+
+static int idio_24_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Clear TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) & ~out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ return 0;
+}
+
+static int idio_24_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Set TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) | out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int idio_24_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long offset_mask = BIT(offset % 8);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 8)
+ return !!(ioread8(&idio24gpio->reg->out0_7) & offset_mask);
+
+ if (offset < 16)
+ return !!(ioread8(&idio24gpio->reg->out8_15) & offset_mask);
+
+ if (offset < 24)
+ return !!(ioread8(&idio24gpio->reg->out16_23) & offset_mask);
+
+ /* Isolated Inputs */
+ if (offset < 32)
+ return !!(ioread8(&idio24gpio->reg->in0_7) & offset_mask);
+
+ if (offset < 40)
+ return !!(ioread8(&idio24gpio->reg->in8_15) & offset_mask);
+
+ if (offset < 48)
+ return !!(ioread8(&idio24gpio->reg->in16_23) & offset_mask);
+
+ /* TTL/CMOS Outputs */
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return !!(ioread8(&idio24gpio->reg->ttl_out0_7) & offset_mask);
+
+ /* TTL/CMOS Inputs */
+ return !!(ioread8(&idio24gpio->reg->ttl_in0_7) & offset_mask);
+}
+
+static void idio_24_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+ void __iomem *base;
+ const unsigned int mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned int out_state;
+
+ /* Isolated Inputs */
+ if (offset > 23 && offset < 48)
+ return;
+
+ /* TTL/CMOS Inputs */
+ if (offset > 47 && !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
+ return;
+
+ /* TTL/CMOS Outputs */
+ if (offset > 47)
+ base = &idio24gpio->reg->ttl_out0_7;
+ /* FET Outputs */
+ else if (offset > 15)
+ base = &idio24gpio->reg->out16_23;
+ else if (offset > 7)
+ base = &idio24gpio->reg->out8_15;
+ else
+ base = &idio24gpio->reg->out0_7;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ if (value)
+ out_state = ioread8(base) | mask;
+ else
+ out_state = ioread8(base) & ~mask;
+
+ iowrite8(out_state, base);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_ack(struct irq_data *data)
+{
+}
+
+static void idio_24_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ unsigned char new_irq_mask;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ idio24gpio->irq_mask &= BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+
+ if (!new_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Disable Rising Edge detection */
+ cos_enable_state &= ~BIT(bank_offset);
+ /* Disable Falling Edge detection */
+ cos_enable_state &= ~BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned char prev_irq_mask;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask |= BIT(bit_offset);
+
+ if (!prev_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Enable Rising Edge detection */
+ cos_enable_state |= BIT(bank_offset);
+ /* Enable Falling Edge detection */
+ cos_enable_state |= BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* The only valid irq types are none and both-edges */
+ if (flow_type != IRQ_TYPE_NONE &&
+ (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip idio_24_irqchip = {
+ .name = "pcie-idio-24",
+ .irq_ack = idio_24_irq_ack,
+ .irq_mask = idio_24_irq_mask,
+ .irq_unmask = idio_24_irq_unmask,
+ .irq_set_type = idio_24_irq_set_type
+};
+
+static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
+{
+ struct idio_24_gpio *const idio24gpio = dev_id;
+ unsigned long irq_status;
+ struct gpio_chip *const chip = &idio24gpio->chip;
+ unsigned long irq_mask;
+ int gpio;
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Read Change-Of-State status */
+ irq_status = ioread32(&idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ /* Make sure our device generated IRQ */
+ if (!irq_status)
+ return IRQ_NONE;
+
+ /* Handle only unmasked IRQ */
+ irq_mask = idio24gpio->irq_mask & irq_status;
+
+ for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
+ gpio + 24));
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Clear Change-Of-State status */
+ iowrite32(irq_status, &idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define IDIO_24_NGPIO 56
+static const char *idio_24_names[IDIO_24_NGPIO] = {
+ "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
+ "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
+ "OUT16", "OUT17", "OUT18", "OUT19", "OUT20", "OUT21", "OUT22", "OUT23",
+ "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
+ "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15",
+ "IIN16", "IIN17", "IIN18", "IIN19", "IIN20", "IIN21", "IIN22", "IIN23",
+ "TTL0", "TTL1", "TTL2", "TTL3", "TTL4", "TTL5", "TTL6", "TTL7"
+};
+
+static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *const dev = &pdev->dev;
+ struct idio_24_gpio *idio24gpio;
+ int err;
+ const size_t pci_bar_index = 2;
+ const char *const name = pci_name(pdev);
+
+ idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
+ if (!idio24gpio)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device (%d)\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ if (err) {
+ dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+ return err;
+ }
+
+ idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+
+ idio24gpio->chip.label = name;
+ idio24gpio->chip.parent = dev;
+ idio24gpio->chip.owner = THIS_MODULE;
+ idio24gpio->chip.base = -1;
+ idio24gpio->chip.ngpio = IDIO_24_NGPIO;
+ idio24gpio->chip.names = idio_24_names;
+ idio24gpio->chip.get_direction = idio_24_gpio_get_direction;
+ idio24gpio->chip.direction_input = idio_24_gpio_direction_input;
+ idio24gpio->chip.direction_output = idio_24_gpio_direction_output;
+ idio24gpio->chip.get = idio_24_gpio_get;
+ idio24gpio->chip.set = idio_24_gpio_set;
+
+ raw_spin_lock_init(&idio24gpio->lock);
+
+ /* Software board reset */
+ iowrite8(0, &idio24gpio->reg->soft_reset);
+
+ err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ err = gpiochip_irqchip_add(&idio24gpio->chip, &idio_24_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, pdev->irq, idio_24_irq_handler, IRQF_SHARED,
+ name, idio24gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id idio_24_pci_dev_id[] = {
+ { PCI_DEVICE(0x494F, 0x0FD0) }, { PCI_DEVICE(0x494F, 0x0BD0) },
+ { PCI_DEVICE(0x494F, 0x07D0) }, { PCI_DEVICE(0x494F, 0x0FC0) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, idio_24_pci_dev_id);
+
+static struct pci_driver idio_24_driver = {
+ .name = "pcie-idio-24",
+ .id_table = idio_24_pci_dev_id,
+ .probe = idio_24_probe
+};
+
+module_pci_driver(idio_24_driver);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES PCIe-IDIO-24 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e6e5cca624a7..f8d7d1cd8488 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
};
int i, j;
+ /*
+ * STMPE1600: to be able to get IRQ from pins,
+ * a read must be done on GPMR register, or a write in
+ * GPSR or GPCR registers
+ */
+ if (stmpe->partnum == STMPE1600) {
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ }
+
for (i = 0; i < CACHE_NR_REGS; i++) {
/* STMPE801 and STMPE1600 don't have RE and FE registers */
if ((stmpe->partnum == STMPE801 ||
@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
- struct stmpe *stmpe = stmpe_gpio->stmpe;
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
-
- /*
- * STMPE1600 workaround: to be able to get IRQ from pins,
- * a read must be done on GPMR register, or a write in
- * GPSR or GPCR registers
- */
- if (stmpe->partnum == STMPE1600)
- stmpe_reg_read(stmpe,
- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
}
static void stmpe_dbg_show_one(struct seq_file *s,
@@ -273,15 +273,21 @@ static void stmpe_dbg_show_one(struct seq_file *s,
u8 fall_reg;
u8 irqen_reg;
- char *edge_det_values[] = {"edge-inactive",
- "edge-asserted",
- "not-supported"};
- char *rise_values[] = {"no-rising-edge-detection",
- "rising-edge-detection",
- "not-supported"};
- char *fall_values[] = {"no-falling-edge-detection",
- "falling-edge-detection",
- "not-supported"};
+ static const char * const edge_det_values[] = {
+ "edge-inactive",
+ "edge-asserted",
+ "not-supported"
+ };
+ static const char * const rise_values[] = {
+ "no-rising-edge-detection",
+ "rising-edge-detection",
+ "not-supported"
+ };
+ static const char * const fall_values[] = {
+ "no-falling-edge-detection",
+ "falling-edge-detection",
+ "not-supported"
+ };
#define NOT_SUPPORTED_IDX 2
u8 edge_det = NOT_SUPPORTED_IDX;
u8 rise = NOT_SUPPORTED_IDX;
@@ -344,7 +350,7 @@ static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
for (i = 0; i < gc->ngpio; i++, gpio++) {
stmpe_dbg_show_one(s, gc, i, gpio);
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
}
@@ -426,12 +432,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct stmpe_gpio *stmpe_gpio;
- int ret;
- int irq = 0;
-
- irq = platform_get_irq(pdev, 0);
+ int ret, irq;
- stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
@@ -453,6 +456,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask)
stmpe_gpio->chip.irq.need_valid_mask = true;
+ irq = platform_get_irq(pdev, 0);
if (irq < 0)
dev_info(&pdev->dev,
"device configured in no-irq mode: "
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index b5adb79a631a..d16e9d4a129b 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -553,8 +553,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
0, 0, of_node_to_fwnode(dev->of_node),
&thunderx_gpio_irqd_ops, txgpio);
- if (!txgpio->irqd)
+ if (!txgpio->irqd) {
+ err = -ENOMEM;
goto out;
+ }
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
new file mode 100644
index 000000000000..7f8f5b02e31d
--- /dev/null
+++ b/drivers/gpio/gpio-winbond.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO interface for Winbond Super I/O chips
+ * Currently, only W83627UHG (Nuvoton NCT6627UD) is supported.
+ *
+ * Author: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gpio/driver.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+
+#define WB_GPIO_DRIVER_NAME KBUILD_MODNAME
+
+#define WB_SIO_BASE 0x2e
+#define WB_SIO_BASE_HIGH 0x4e
+
+#define WB_SIO_EXT_ENTER_KEY 0x87
+#define WB_SIO_EXT_EXIT_KEY 0xaa
+
+/* global chip registers */
+
+#define WB_SIO_REG_LOGICAL 0x07
+
+#define WB_SIO_REG_CHIP_MSB 0x20
+#define WB_SIO_REG_CHIP_LSB 0x21
+
+#define WB_SIO_CHIP_ID_W83627UHG 0xa230
+#define WB_SIO_CHIP_ID_W83627UHG_MASK GENMASK(15, 4)
+
+#define WB_SIO_REG_DPD 0x22
+#define WB_SIO_REG_DPD_UARTA 4
+#define WB_SIO_REG_DPD_UARTB 5
+
+#define WB_SIO_REG_IDPD 0x23
+#define WB_SIO_REG_IDPD_UARTC 4
+#define WB_SIO_REG_IDPD_UARTD 5
+#define WB_SIO_REG_IDPD_UARTE 6
+#define WB_SIO_REG_IDPD_UARTF 7
+
+#define WB_SIO_REG_GLOBAL_OPT 0x24
+#define WB_SIO_REG_GO_ENFDC 1
+
+#define WB_SIO_REG_OVTGPIO3456 0x29
+#define WB_SIO_REG_OG3456_G3PP 3
+#define WB_SIO_REG_OG3456_G4PP 4
+#define WB_SIO_REG_OG3456_G5PP 5
+#define WB_SIO_REG_OG3456_G6PP 7
+
+#define WB_SIO_REG_I2C_PS 0x2a
+#define WB_SIO_REG_I2CPS_I2CFS 1
+
+#define WB_SIO_REG_GPIO1_MF 0x2c
+#define WB_SIO_REG_G1MF_G1PP 6
+#define WB_SIO_REG_G1MF_G2PP 7
+#define WB_SIO_REG_G1MF_FS_MASK GENMASK(1, 0)
+#define WB_SIO_REG_G1MF_FS_IR_OFF 0
+#define WB_SIO_REG_G1MF_FS_IR 1
+#define WB_SIO_REG_G1MF_FS_GPIO1 2
+#define WB_SIO_REG_G1MF_FS_UARTB 3
+
+/* not an actual device number, just a value meaning 'no device' */
+#define WB_SIO_DEV_NONE 0xff
+
+/* registers with offsets >= 0x30 are specific for a particular device */
+
+/* UART B logical device */
+#define WB_SIO_DEV_UARTB 0x03
+#define WB_SIO_UARTB_REG_ENABLE 0x30
+#define WB_SIO_UARTB_ENABLE_ON 0
+
+/* UART C logical device */
+#define WB_SIO_DEV_UARTC 0x06
+#define WB_SIO_UARTC_REG_ENABLE 0x30
+#define WB_SIO_UARTC_ENABLE_ON 0
+
+/* GPIO3, GPIO4 logical device */
+#define WB_SIO_DEV_GPIO34 0x07
+#define WB_SIO_GPIO34_REG_ENABLE 0x30
+#define WB_SIO_GPIO34_ENABLE_3 0
+#define WB_SIO_GPIO34_ENABLE_4 1
+#define WB_SIO_GPIO34_REG_IO3 0xe0
+#define WB_SIO_GPIO34_REG_DATA3 0xe1
+#define WB_SIO_GPIO34_REG_INV3 0xe2
+#define WB_SIO_GPIO34_REG_IO4 0xe4
+#define WB_SIO_GPIO34_REG_DATA4 0xe5
+#define WB_SIO_GPIO34_REG_INV4 0xe6
+
+/* WDTO, PLED, GPIO5, GPIO6 logical device */
+#define WB_SIO_DEV_WDGPIO56 0x08
+#define WB_SIO_WDGPIO56_REG_ENABLE 0x30
+#define WB_SIO_WDGPIO56_ENABLE_5 1
+#define WB_SIO_WDGPIO56_ENABLE_6 2
+#define WB_SIO_WDGPIO56_REG_IO5 0xe0
+#define WB_SIO_WDGPIO56_REG_DATA5 0xe1
+#define WB_SIO_WDGPIO56_REG_INV5 0xe2
+#define WB_SIO_WDGPIO56_REG_IO6 0xe4
+#define WB_SIO_WDGPIO56_REG_DATA6 0xe5
+#define WB_SIO_WDGPIO56_REG_INV6 0xe6
+
+/* GPIO1, GPIO2, SUSLED logical device */
+#define WB_SIO_DEV_GPIO12 0x09
+#define WB_SIO_GPIO12_REG_ENABLE 0x30
+#define WB_SIO_GPIO12_ENABLE_1 0
+#define WB_SIO_GPIO12_ENABLE_2 1
+#define WB_SIO_GPIO12_REG_IO1 0xe0
+#define WB_SIO_GPIO12_REG_DATA1 0xe1
+#define WB_SIO_GPIO12_REG_INV1 0xe2
+#define WB_SIO_GPIO12_REG_IO2 0xe4
+#define WB_SIO_GPIO12_REG_DATA2 0xe5
+#define WB_SIO_GPIO12_REG_INV2 0xe6
+
+/* UART D logical device */
+#define WB_SIO_DEV_UARTD 0x0d
+#define WB_SIO_UARTD_REG_ENABLE 0x30
+#define WB_SIO_UARTD_ENABLE_ON 0
+
+/* UART E logical device */
+#define WB_SIO_DEV_UARTE 0x0e
+#define WB_SIO_UARTE_REG_ENABLE 0x30
+#define WB_SIO_UARTE_ENABLE_ON 0
+
+/*
+ * for a description what a particular field of this struct means please see
+ * a description of the relevant module parameter at the bottom of this file
+ */
+struct winbond_gpio_params {
+ unsigned long base;
+ unsigned long gpios;
+ unsigned long ppgpios;
+ unsigned long odgpios;
+ bool pledgpio;
+ bool beepgpio;
+ bool i2cgpio;
+};
+
+static struct winbond_gpio_params params;
+
+static int winbond_sio_enter(unsigned long base)
+{
+ if (!request_muxed_region(base, 2, WB_GPIO_DRIVER_NAME))
+ return -EBUSY;
+
+ /*
+ * datasheet says two successive writes of the "key" value are needed
+ * in order for chip to enter the "Extended Function Mode"
+ */
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+
+ return 0;
+}
+
+static void winbond_sio_select_logical(unsigned long base, u8 dev)
+{
+ outb(WB_SIO_REG_LOGICAL, base);
+ outb(dev, base + 1);
+}
+
+static void winbond_sio_leave(unsigned long base)
+{
+ outb(WB_SIO_EXT_EXIT_KEY, base);
+
+ release_region(base, 2);
+}
+
+static void winbond_sio_reg_write(unsigned long base, u8 reg, u8 data)
+{
+ outb(reg, base);
+ outb(data, base + 1);
+}
+
+static u8 winbond_sio_reg_read(unsigned long base, u8 reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void winbond_sio_reg_bset(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val |= BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static void winbond_sio_reg_bclear(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val &= ~BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static bool winbond_sio_reg_btest(unsigned long base, u8 reg, u8 bit)
+{
+ return winbond_sio_reg_read(base, reg) & BIT(bit);
+}
+
+/**
+ * struct winbond_gpio_port_conflict - possibly conflicting device information
+ * @name: device name (NULL means no conflicting device defined)
+ * @dev: Super I/O logical device number where the testreg register
+ * is located (or WB_SIO_DEV_NONE - don't select any
+ * logical device)
+ * @testreg: register number where the testbit bit is located
+ * @testbit: index of a bit to check whether an actual conflict exists
+ * @warnonly: if set then a conflict isn't fatal (just warn about it),
+ * otherwise disable the particular GPIO port if a conflict
+ * is detected
+ */
+struct winbond_gpio_port_conflict {
+ const char *name;
+ u8 dev;
+ u8 testreg;
+ u8 testbit;
+ bool warnonly;
+};
+
+/**
+ * struct winbond_gpio_info - information about a particular GPIO port (device)
+ * @dev: Super I/O logical device number of the registers
+ * specified below
+ * @enablereg: port enable bit register number
+ * @enablebit: index of a port enable bit
+ * @outputreg: output driver mode bit register number
+ * @outputppbit: index of a push-pull output driver mode bit
+ * @ioreg: data direction register number
+ * @invreg: pin data inversion register number
+ * @datareg: pin data register number
+ * @conflict: description of a device that possibly conflicts with
+ * this port
+ */
+struct winbond_gpio_info {
+ u8 dev;
+ u8 enablereg;
+ u8 enablebit;
+ u8 outputreg;
+ u8 outputppbit;
+ u8 ioreg;
+ u8 invreg;
+ u8 datareg;
+ struct winbond_gpio_port_conflict conflict;
+};
+
+static const struct winbond_gpio_info winbond_gpio_infos[6] = {
+ { /* 0 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_1,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G1PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO1,
+ .invreg = WB_SIO_GPIO12_REG_INV1,
+ .datareg = WB_SIO_GPIO12_REG_DATA1,
+ .conflict = {
+ .name = "UARTB",
+ .dev = WB_SIO_DEV_UARTB,
+ .testreg = WB_SIO_UARTB_REG_ENABLE,
+ .testbit = WB_SIO_UARTB_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 1 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_2,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G2PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO2,
+ .invreg = WB_SIO_GPIO12_REG_INV2,
+ .datareg = WB_SIO_GPIO12_REG_DATA2
+ /* special conflict handling so doesn't use conflict data */
+ },
+ { /* 2 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_3,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G3PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO3,
+ .invreg = WB_SIO_GPIO34_REG_INV3,
+ .datareg = WB_SIO_GPIO34_REG_DATA3,
+ .conflict = {
+ .name = "UARTC",
+ .dev = WB_SIO_DEV_UARTC,
+ .testreg = WB_SIO_UARTC_REG_ENABLE,
+ .testbit = WB_SIO_UARTC_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 3 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_4,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G4PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO4,
+ .invreg = WB_SIO_GPIO34_REG_INV4,
+ .datareg = WB_SIO_GPIO34_REG_DATA4,
+ .conflict = {
+ .name = "UARTD",
+ .dev = WB_SIO_DEV_UARTD,
+ .testreg = WB_SIO_UARTD_REG_ENABLE,
+ .testbit = WB_SIO_UARTD_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 4 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_5,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G5PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO5,
+ .invreg = WB_SIO_WDGPIO56_REG_INV5,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA5,
+ .conflict = {
+ .name = "UARTE",
+ .dev = WB_SIO_DEV_UARTE,
+ .testreg = WB_SIO_UARTE_REG_ENABLE,
+ .testbit = WB_SIO_UARTE_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 5 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_6,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G6PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO6,
+ .invreg = WB_SIO_WDGPIO56_REG_INV6,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA6,
+ .conflict = {
+ .name = "FDC",
+ .dev = WB_SIO_DEV_NONE,
+ .testreg = WB_SIO_REG_GLOBAL_OPT,
+ .testbit = WB_SIO_REG_GO_ENFDC,
+ .warnonly = false
+ }
+ }
+};
+
+/* returns whether changing a pin is allowed */
+static bool winbond_gpio_get_info(unsigned int *gpio_num,
+ const struct winbond_gpio_info **info)
+{
+ bool allow_changing = true;
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG) {
+ if (*gpio_num < 8)
+ break;
+
+ *gpio_num -= 8;
+ }
+
+ *info = &winbond_gpio_infos[i];
+
+ /*
+ * GPIO2 (the second port) shares some pins with a basic PC
+ * functionality, which is very likely controlled by the firmware.
+ * Don't allow changing these pins by default.
+ */
+ if (i == 1) {
+ if (*gpio_num == 0 && !params.pledgpio)
+ allow_changing = false;
+ else if (*gpio_num == 1 && !params.beepgpio)
+ allow_changing = false;
+ else if ((*gpio_num == 5 || *gpio_num == 6) && !params.i2cgpio)
+ allow_changing = false;
+ }
+
+ return allow_changing;
+}
+
+static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
+
+ winbond_gpio_get_info(&offset, &info);
+
+ val = winbond_sio_enter(*base);
+ if (val)
+ return val;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ val = winbond_sio_reg_btest(*base, info->datareg, offset);
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ winbond_sio_leave(*base);
+
+ return val;
+}
+
+static int winbond_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bset(*base, info->ioreg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static int winbond_gpio_direction_out(struct gpio_chip *gc,
+ unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bclear(*base, info->ioreg, offset);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return;
+
+ if (winbond_sio_enter(*base) != 0)
+ return;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+}
+
+static struct gpio_chip winbond_gpio_chip = {
+ .base = -1,
+ .label = WB_GPIO_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .can_sleep = true,
+ .get = winbond_gpio_get,
+ .direction_input = winbond_gpio_direction_in,
+ .set = winbond_gpio_set,
+ .direction_output = winbond_gpio_direction_out,
+};
+
+static void winbond_gpio_configure_port0_pins(unsigned long base)
+{
+ unsigned int val;
+
+ val = winbond_sio_reg_read(base, WB_SIO_REG_GPIO1_MF);
+ if ((val & WB_SIO_REG_G1MF_FS_MASK) == WB_SIO_REG_G1MF_FS_GPIO1)
+ return;
+
+ pr_warn("GPIO1 pins were connected to something else (%.2x), fixing\n",
+ val);
+
+ val &= ~WB_SIO_REG_G1MF_FS_MASK;
+ val |= WB_SIO_REG_G1MF_FS_GPIO1;
+
+ winbond_sio_reg_write(base, WB_SIO_REG_GPIO1_MF, val);
+}
+
+static void winbond_gpio_configure_port1_check_i2c(unsigned long base)
+{
+ params.i2cgpio = !winbond_sio_reg_btest(base, WB_SIO_REG_I2C_PS,
+ WB_SIO_REG_I2CPS_I2CFS);
+ if (!params.i2cgpio)
+ pr_warn("disabling GPIO2.5 and GPIO2.6 as I2C is enabled\n");
+}
+
+static bool winbond_gpio_configure_port(unsigned long base, unsigned int idx)
+{
+ const struct winbond_gpio_info *info = &winbond_gpio_infos[idx];
+ const struct winbond_gpio_port_conflict *conflict = &info->conflict;
+
+ /* is there a possible conflicting device defined? */
+ if (conflict->name != NULL) {
+ if (conflict->dev != WB_SIO_DEV_NONE)
+ winbond_sio_select_logical(base, conflict->dev);
+
+ if (winbond_sio_reg_btest(base, conflict->testreg,
+ conflict->testbit)) {
+ if (conflict->warnonly)
+ pr_warn("enabled GPIO%u share pins with active %s\n",
+ idx + 1, conflict->name);
+ else {
+ pr_warn("disabling GPIO%u as %s is enabled\n",
+ idx + 1, conflict->name);
+ return false;
+ }
+ }
+ }
+
+ /* GPIO1 and GPIO2 need some (additional) special handling */
+ if (idx == 0)
+ winbond_gpio_configure_port0_pins(base);
+ else if (idx == 1)
+ winbond_gpio_configure_port1_check_i2c(base);
+
+ winbond_sio_select_logical(base, info->dev);
+
+ winbond_sio_reg_bset(base, info->enablereg, info->enablebit);
+
+ if (params.ppgpios & BIT(idx))
+ winbond_sio_reg_bset(base, info->outputreg,
+ info->outputppbit);
+ else if (params.odgpios & BIT(idx))
+ winbond_sio_reg_bclear(base, info->outputreg,
+ info->outputppbit);
+ else
+ pr_notice("GPIO%u pins are %s\n", idx + 1,
+ winbond_sio_reg_btest(base, info->outputreg,
+ info->outputppbit) ?
+ "push-pull" :
+ "open drain");
+
+ return true;
+}
+
+static int winbond_gpio_configure(unsigned long base)
+{
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG)
+ if (!winbond_gpio_configure_port(base, i))
+ __clear_bit(i, &params.gpios);
+
+ if (!params.gpios) {
+ pr_err("please use 'gpios' module parameter to select some active GPIO ports to enable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int winbond_gpio_check_chip(unsigned long base)
+{
+ int ret;
+ unsigned int chip;
+
+ ret = winbond_sio_enter(base);
+ if (ret)
+ return ret;
+
+ chip = winbond_sio_reg_read(base, WB_SIO_REG_CHIP_MSB) << 8;
+ chip |= winbond_sio_reg_read(base, WB_SIO_REG_CHIP_LSB);
+
+ pr_notice("chip ID at %lx is %.4x\n", base, chip);
+
+ if ((chip & WB_SIO_CHIP_ID_W83627UHG_MASK) !=
+ WB_SIO_CHIP_ID_W83627UHG) {
+ pr_err("not an our chip\n");
+ ret = -ENODEV;
+ }
+
+ winbond_sio_leave(base);
+
+ return ret;
+}
+
+static int winbond_gpio_imatch(struct device *dev, unsigned int id)
+{
+ unsigned long gpios_rem;
+ int ret;
+
+ gpios_rem = params.gpios & ~GENMASK(ARRAY_SIZE(winbond_gpio_infos) - 1,
+ 0);
+ if (gpios_rem) {
+ pr_warn("unknown ports (%lx) enabled in GPIO ports bitmask\n",
+ gpios_rem);
+ params.gpios &= ~gpios_rem;
+ }
+
+ if (params.ppgpios & params.odgpios) {
+ pr_err("some GPIO ports are set both to push-pull and open drain mode at the same time\n");
+ return 0;
+ }
+
+ if (params.base != 0)
+ return winbond_gpio_check_chip(params.base) == 0;
+
+ /*
+ * if the 'base' module parameter is unset probe two chip default
+ * I/O port bases
+ */
+ params.base = WB_SIO_BASE;
+ ret = winbond_gpio_check_chip(params.base);
+ if (ret == 0)
+ return 1;
+ if (ret != -ENODEV && ret != -EBUSY)
+ return 0;
+
+ params.base = WB_SIO_BASE_HIGH;
+ return winbond_gpio_check_chip(params.base) == 0;
+}
+
+static int winbond_gpio_iprobe(struct device *dev, unsigned int id)
+{
+ int ret;
+
+ if (params.base == 0)
+ return -EINVAL;
+
+ ret = winbond_sio_enter(params.base);
+ if (ret)
+ return ret;
+
+ ret = winbond_gpio_configure(params.base);
+
+ winbond_sio_leave(params.base);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Add 8 gpios for every GPIO port that was enabled in gpios
+ * module parameter (that wasn't disabled earlier in
+ * winbond_gpio_configure() & co. due to, for example, a pin conflict).
+ */
+ winbond_gpio_chip.ngpio = hweight_long(params.gpios) * 8;
+
+ /*
+ * GPIO6 port has only 5 pins, so if it is enabled we have to adjust
+ * the total count appropriately
+ */
+ if (params.gpios & BIT(5))
+ winbond_gpio_chip.ngpio -= (8 - 5);
+
+ winbond_gpio_chip.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &winbond_gpio_chip, &params.base);
+}
+
+static struct isa_driver winbond_gpio_idriver = {
+ .driver = {
+ .name = WB_GPIO_DRIVER_NAME,
+ },
+ .match = winbond_gpio_imatch,
+ .probe = winbond_gpio_iprobe,
+};
+
+module_isa_driver(winbond_gpio_idriver, 1);
+
+module_param_named(base, params.base, ulong, 0444);
+MODULE_PARM_DESC(base,
+ "I/O port base (when unset - probe chip default ones)");
+
+/* This parameter sets which GPIO devices (ports) we enable */
+module_param_named(gpios, params.gpios, ulong, 0444);
+MODULE_PARM_DESC(gpios,
+ "bitmask of GPIO ports to enable (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * These two parameters below set how we configure GPIO ports output drivers.
+ * It can't be a one bitmask since we need three values per port: push-pull,
+ * open-drain and keep as-is (this is the default).
+ */
+module_param_named(ppgpios, params.ppgpios, ulong, 0444);
+MODULE_PARM_DESC(ppgpios,
+ "bitmask of GPIO ports to set to push-pull mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+module_param_named(odgpios, params.odgpios, ulong, 0444);
+MODULE_PARM_DESC(odgpios,
+ "bitmask of GPIO ports to set to open drain mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * GPIO2.0 and GPIO2.1 control a basic PC functionality that we
+ * don't allow tinkering with by default (it is very likely that the
+ * firmware owns these pins).
+ * These two parameters below allow overriding these prohibitions.
+ */
+module_param_named(pledgpio, params.pledgpio, bool, 0644);
+MODULE_PARM_DESC(pledgpio,
+ "enable changing value of GPIO2.0 bit (Power LED), default no.");
+
+module_param_named(beepgpio, params.beepgpio, bool, 0644);
+MODULE_PARM_DESC(beepgpio,
+ "enable changing value of GPIO2.1 bit (BEEP), default no.");
+
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("GPIO interface for Winbond Super I/O chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d6f3d9ee1350..0ecffd172a80 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -414,7 +414,8 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args)
+ struct acpi_reference_args *args,
+ unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -430,6 +431,8 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
args->args[1] = par->line_index;
args->args[2] = par->active_low;
args->nargs = 3;
+
+ *quirks = gm->quirks;
return true;
}
@@ -461,8 +464,8 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
}
}
-int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+static int
+__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
int ret = 0;
@@ -489,12 +492,31 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
return ret;
}
+int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+ struct device *dev = &info->adev->dev;
+ enum gpiod_flags old = *flags;
+ int ret;
+
+ ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+ if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+ if (ret)
+ dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+ } else {
+ if (ret)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+ *flags = old;
+ }
+
+ return ret;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
int pin_index;
bool active_low;
- struct acpi_device *adev;
struct gpio_desc *desc;
int n;
};
@@ -531,8 +553,8 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.triggering = agpio->triggering;
} else {
lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
+ lookup->info.polarity = lookup->active_low;
}
-
}
return 1;
@@ -541,12 +563,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
struct acpi_gpio_info *info)
{
+ struct acpi_device *adev = lookup->info.adev;
struct list_head res_list;
int ret;
INIT_LIST_HEAD(&res_list);
- ret = acpi_dev_get_resources(lookup->adev, &res_list,
+ ret = acpi_dev_get_resources(adev, &res_list,
acpi_populate_gpio_lookup,
lookup);
if (ret < 0)
@@ -557,11 +580,8 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info) {
+ if (info)
*info = lookup->info;
- if (lookup->active_low)
- info->polarity = lookup->active_low;
- }
return 0;
}
@@ -570,6 +590,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
struct acpi_gpio_lookup *lookup)
{
struct acpi_reference_args args;
+ unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
@@ -581,14 +602,14 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (!adev)
return ret;
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args))
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args,
+ &quirks))
return ret;
}
/*
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
- lookup->adev = args.adev;
if (args.nargs != 3)
return -EPROTO;
@@ -596,6 +617,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
+ lookup->info.adev = args.adev;
+ lookup->info.quirks = quirks;
return 0;
}
@@ -643,11 +666,11 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ERR_PTR(ret);
dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
- dev_name(&lookup.adev->dev), lookup.index,
+ dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.adev = adev;
+ lookup.info.adev = adev;
}
ret = acpi_gpio_resource_lookup(&lookup, info);
@@ -664,7 +687,6 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
- int err;
int i;
/* Try first from _DSD */
@@ -703,10 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
if (info.polarity == GPIO_ACTIVE_LOW)
*lookupflags |= GPIO_ACTIVE_LOW;
- err = acpi_gpio_update_gpiod_flags(dflags, info.flags);
- if (err)
- dev_dbg(dev, "Override GPIO initialization flags\n");
-
+ acpi_gpio_update_gpiod_flags(dflags, &info);
return desc;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 72a0695d2ac3..564bb7a31da4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -56,6 +56,42 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
return gpiochip_get_desc(chip, ret);
}
+static void of_gpio_flags_quirks(struct device_node *np,
+ enum of_gpio_flags *flags)
+{
+ /*
+ * Some GPIO fixed regulator quirks.
+ * Note that active low is the default.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ of_device_is_compatible(np, "regulator-gpio"))) {
+ /*
+ * The regulator GPIO handles are specified such that the
+ * presence or absence of "enable-active-high" solely controls
+ * the polarity of the GPIO line. Any phandle flags must
+ * be actively ignored.
+ */
+ if (*flags & OF_GPIO_ACTIVE_LOW) {
+ pr_warn("%s GPIO handle specifies active low - ignored\n",
+ of_node_full_name(np));
+ *flags &= ~OF_GPIO_ACTIVE_LOW;
+ }
+ if (!of_property_read_bool(np, "enable-active-high"))
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
+ /*
+ * Legacy open drain handling for fixed voltage regulators.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ of_device_is_compatible(np, "reg-fixed-voltage") &&
+ of_property_read_bool(np, "gpio-open-drain")) {
+ *flags |= (OF_GPIO_SINGLE_ENDED | OF_GPIO_OPEN_DRAIN);
+ pr_info("%s uses legacy open drain flag - update the DTS if you can\n",
+ of_node_full_name(np));
+ }
+}
+
/**
* of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API
* @np: device node to get GPIO from
@@ -93,6 +129,9 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
+ if (flags)
+ of_gpio_flags_quirks(np, flags);
+
pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
__func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
@@ -117,6 +156,71 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
+/*
+ * The SPI GPIO bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+
+ /*
+ * Hopefully the compiler stubs the rest of the function if this
+ * is false.
+ */
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return ERR_PTR(-ENOENT);
+
+ /* Allow this specifically for "spi-gpio" devices */
+ if (!of_device_is_compatible(np, "spi-gpio") || !con_id)
+ return ERR_PTR(-ENOENT);
+
+ /* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
+
+ desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
+ return desc;
+}
+
+/*
+ * Some regulator bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /* These are the connection IDs we accept as legacy GPIO phandles */
+ const char *whitelist[] = {
+ "wlf,ldoena", /* Arizona */
+ "wlf,ldo1ena", /* WM8994 */
+ "wlf,ldo2ena", /* WM8994 */
+ };
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_REGULATOR))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id)
+ return ERR_PTR(-ENOENT);
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++)
+ if (!strcmp(con_id, whitelist[i]))
+ break;
+
+ if (i == ARRAY_SIZE(whitelist))
+ return ERR_PTR(-ENOENT);
+
+ desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
+ return desc;
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
@@ -126,6 +230,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
struct gpio_desc *desc;
unsigned int i;
+ /* Try GPIO property "foo-gpios" and "foo-gpio" */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
@@ -140,6 +245,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
+ /* Special handling for SPI GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_spi_gpio(dev, con_id, &of_flags);
+
+ /* Special handling for regulator GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -153,8 +266,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
+ if (of_flags & OF_GPIO_TRANSITORY)
+ *flags |= GPIO_TRANSITORY;
return desc;
}
@@ -214,6 +327,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (xlate_flags & OF_GPIO_ACTIVE_LOW)
*lflags |= GPIO_ACTIVE_LOW;
+ if (xlate_flags & OF_GPIO_TRANSITORY)
+ *lflags |= GPIO_TRANSITORY;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3f454eaf2101..3dbaf489a8a5 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#include "gpiolib.h"
@@ -106,8 +107,14 @@ static ssize_t value_show(struct device *dev,
mutex_lock(&data->mutex);
- status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
+ status = gpiod_get_value_cansleep(desc);
+ if (status < 0)
+ goto err;
+ buf[0] = '0' + status;
+ buf[1] = '\n';
+ status = 2;
+err:
mutex_unlock(&data->mutex);
return status;
@@ -118,7 +125,7 @@ static ssize_t value_store(struct device *dev,
{
struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status = 0;
mutex_lock(&data->mutex);
@@ -127,7 +134,11 @@ static ssize_t value_store(struct device *dev,
} else {
long value;
- status = kstrtol(buf, 0, &value);
+ if (size <= 2 && isdigit(buf[0]) &&
+ (size == 1 || buf[1] == '\n'))
+ value = buf[0] - '0';
+ else
+ status = kstrtol(buf, 0, &value);
if (status == 0) {
gpiod_set_value_cansleep(desc, value);
status = size;
@@ -138,7 +149,7 @@ static ssize_t value_store(struct device *dev,
return status;
}
-static DEVICE_ATTR_RW(value);
+static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
@@ -474,11 +485,15 @@ static ssize_t export_store(struct class *class,
status = -ENODEV;
goto done;
}
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+
+ status = gpiod_set_transitory(desc, false);
+ if (!status) {
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+ }
done:
if (status)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44332b793718..36ca5064486e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- if (!desc || !desc->gdev || !desc->gdev->chip)
+ if (!desc || !desc->gdev)
return NULL;
return desc->gdev->chip;
}
@@ -196,7 +196,7 @@ static int gpiochip_find_base(int ngpio)
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
*
- * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error.
+ * Returns 0 for output, 1 for input, or an error code in case of error.
*
* This function may sleep if gpiod_cansleep() is true.
*/
@@ -460,6 +460,15 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
@@ -506,6 +515,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ ret = gpiod_set_transitory(desc, false);
+ if (ret < 0)
+ goto out_free_descs;
+
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -588,6 +601,9 @@ out_free_lh:
* @events: KFIFO for the GPIO events
* @read_lock: mutex lock to protect reads from colliding with adding
* new events to the FIFO
+ * @timestamp: cache for the timestamp storing it between hardirq
+ * and IRQ thread, used to bring the timestamp close to the actual
+ * event
*/
struct lineevent_state {
struct gpio_device *gdev;
@@ -598,17 +614,18 @@ struct lineevent_state {
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
struct mutex read_lock;
+ u64 timestamp;
};
#define GPIOEVENT_REQUEST_VALID_FLAGS \
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
-static unsigned int lineevent_poll(struct file *filep,
+static __poll_t lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct lineevent_state *le = filep->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(filep, &le->wait, wait);
@@ -732,7 +749,10 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
struct gpioevent_data ge;
int ret, level;
- ge.timestamp = ktime_get_real_ns();
+ /* Do not leak kernel stack to userspace */
+ memset(&ge, 0, sizeof(ge));
+
+ ge.timestamp = le->timestamp;
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
@@ -760,6 +780,19 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_HANDLED;
}
+static irqreturn_t lineevent_irq_handler(int irq, void *p)
+{
+ struct lineevent_state *le = p;
+
+ /*
+ * Just store the timestamp in hardirq context so we get it as
+ * close in time as possible to the actual event.
+ */
+ le->timestamp = ktime_get_real_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
static int lineevent_create(struct gpio_device *gdev, void __user *ip)
{
struct gpioevent_request eventreq;
@@ -852,7 +885,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
- NULL,
+ lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
le->label,
@@ -1050,7 +1083,7 @@ static void gpiodevice_release(struct device *dev)
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
- kfree(gdev->label);
+ kfree_const(gdev->label);
kfree(gdev->descs);
kfree(gdev);
}
@@ -1159,10 +1192,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_descs;
}
- if (chip->label)
- gdev->label = kstrdup(chip->label, GFP_KERNEL);
- else
- gdev->label = kstrdup("unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
goto err_free_descs;
@@ -1209,31 +1239,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
- /*
- * REVISIT: most hardware initializes GPIOs as inputs
- * (often with pullups enabled) so power usage is
- * minimized. Linux code should set the gpio direction
- * first thing; but until it does, and in case
- * chip->get_direction is not set, we may expose the
- * wrong direction in sysfs.
- */
-
- if (chip->get_direction) {
- /*
- * If we have .get_direction, set up the initial
- * direction flag from the hardware.
- */
- int dir = chip->get_direction(chip, i);
- if (!dir)
- set_bit(FLAG_IS_OUT, &desc->flags);
- } else if (!chip->direction_input) {
- /*
- * If the chip lacks the .direction_input callback
- * we logically assume all lines are outputs.
- */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
#ifdef CONFIG_PINCTRL
@@ -1283,7 +1296,7 @@ err_remove_from_list:
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
- kfree(gdev->label);
+ kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
err_free_gdev:
@@ -1383,7 +1396,7 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -1510,14 +1523,15 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->irq.valid_mask = NULL;
}
-static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
- unsigned int offset)
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset)
{
/* No mask means all valid */
if (likely(!gpiochip->irq.valid_mask))
return true;
return test_bit(offset, gpiochip->irq.valid_mask);
}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
@@ -2174,40 +2188,37 @@ done:
* macro to avoid endless duplication. If the desc is NULL it is an
* optional GPIO and calls should just bail out.
*/
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer)\n", func);
+ return PTR_ERR(desc);
+ }
+ if (!desc->gdev) {
+ pr_warn("%s: invalid GPIO (no device)\n", func);
+ return -EINVAL;
+ }
+ if (!desc->gdev->chip) {
+ dev_warn(&desc->gdev->dev,
+ "%s: backing chip is gone\n", func);
+ return 0;
+ }
+ return 1;
+}
+
#define VALIDATE_DESC(desc) do { \
- if (!desc) \
- return 0; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return PTR_ERR(desc); \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return -EINVAL; \
- } \
- if ( !desc->gdev->chip ) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return 0; \
- } } while (0)
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc) \
- return; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return; \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
return; \
- } \
- if (!desc->gdev->chip) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return; \
- } } while (0)
-
+ } while (0)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
@@ -2456,7 +2467,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc = desc->gdev->chip;
+ struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -2473,6 +2484,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
+ gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2530,6 +2542,50 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
/**
+ * gpiod_set_transitory - Lose or retain GPIO state on suspend or reset
+ * @desc: descriptor of the GPIO for which to configure persistence
+ * @transitory: True to lose state on suspend or reset, false for persistence
+ *
+ * Returns:
+ * 0 on success, otherwise a negative error code.
+ */
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ struct gpio_chip *chip;
+ unsigned long packed;
+ int gpio;
+ int rc;
+
+ VALIDATE_DESC(desc);
+ /*
+ * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * persistence state.
+ */
+ if (transitory)
+ set_bit(FLAG_TRANSITORY, &desc->flags);
+ else
+ clear_bit(FLAG_TRANSITORY, &desc->flags);
+
+ /* If the driver supports it, set the persistence state now */
+ chip = desc->gdev->chip;
+ if (!chip->set_config)
+ return 0;
+
+ packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
+ !transitory);
+ gpio = gpio_chip_hwgpio(desc);
+ rc = chip->set_config(chip, gpio, packed);
+ if (rc == -ENOTSUPP) {
+ dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
+ gpio);
+ return 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_transitory);
+
+/**
* gpiod_is_active_low - test whether a GPIO is active-low or not
* @desc: the gpio descriptor to test
*
@@ -2893,6 +2949,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
/**
+ * gpiod_set_value_nocheck() - set a GPIO line value without checking
+ * @desc: the descriptor to set the value on
+ * @value: value to set
+ *
+ * This sets the value of a GPIO line backing a descriptor, applying
+ * different semantic quirks like active low and open drain/source
+ * handling.
+ */
+static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+{
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ gpio_set_open_source_value_commit(desc, value);
+ else
+ gpiod_set_raw_value_commit(desc, value);
+}
+
+/**
* gpiod_set_value() - assign a gpio's value
* @desc: gpio whose value will be assigned
* @value: value to assign
@@ -2906,16 +2983,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -3116,8 +3185,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
- &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -3243,9 +3311,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
VALIDATE_DESC_VOID(desc);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -3554,8 +3620,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
+
+ status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+ if (status < 0)
+ return status;
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -3595,6 +3663,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -3623,7 +3693,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
@@ -3639,17 +3713,88 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags. If the node does not have the requested GPIO
+ * property, NULL is returned.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ unsigned long lflags = 0;
+ enum of_gpio_flags flags;
+ bool active_low = false;
+ bool single_ended = false;
+ bool open_drain = false;
+ bool transitory = false;
+ int ret;
+
+ desc = of_get_named_gpiod_flags(node, propname,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+ /* If it is not there, just return NULL */
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ return desc;
+ }
+
+ active_low = flags & OF_GPIO_ACTIVE_LOW;
+ single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
+ transitory = flags & OF_GPIO_TRANSITORY;
+
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (active_low)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (single_ended) {
+ if (open_drain)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (transitory)
+ lflags |= GPIO_TRANSITORY;
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL(gpiod_get_from_of_node);
+
+/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain in the consumer
+ * @index: index of the GPIO to obtain for the consumer
* @dflags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
- * from firmware.
+ * from opaque firmware.
*
- * Function properly finds the corresponding GPIO using whatever is the
+ * The function properly finds the corresponding GPIO using whatever is the
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
@@ -3666,53 +3811,35 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
{
struct gpio_desc *desc = ERR_PTR(-ENODEV);
unsigned long lflags = 0;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
int ret;
if (!fwnode)
return ERR_PTR(-EINVAL);
if (is_of_node(fwnode)) {
- enum of_gpio_flags flags;
-
- desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname,
- index, &flags);
- if (!IS_ERR(desc)) {
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- }
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
} else if (is_acpi_node(fwnode)) {
struct acpi_gpio_info info;
desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (!IS_ERR(desc)) {
- active_low = info.polarity == GPIO_ACTIVE_LOW;
- ret = acpi_gpio_update_gpiod_flags(&dflags, info.flags);
- if (ret)
- pr_debug("Override GPIO initialization flags\n");
- }
- }
+ if (IS_ERR(desc))
+ return desc;
- if (IS_ERR(desc))
- return desc;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+
+ if (info.polarity == GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+ }
+ /* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 6c44d1652139..b17ec6795c81 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -58,7 +58,7 @@ struct gpio_device {
struct gpio_desc *descs;
int base;
u16 ngpio;
- char *label;
+ const char *label;
void *data;
struct list_head list;
@@ -75,16 +75,20 @@ struct gpio_device {
/**
* struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
+ struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
int polarity;
int triggering;
+ unsigned int quirks;
};
/* gpio suffixes used for ACPI and device tree lookup */
@@ -124,7 +128,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- enum gpiod_flags update);
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
@@ -149,7 +153,7 @@ static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
{
return 0;
}
@@ -189,6 +193,12 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_desc **desc_array,
int *value_array);
+/* This is just passed between gpiolib and devres */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
+
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
@@ -205,7 +215,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1aba47..34daf895f848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void dpp1_cm_set_gamut_remap(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c244bf5..ed1216b53465 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust)
+ enum dc_color_space colorspace)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t ocsc_mode = 0;
- if (default_adjust != NULL) {
- switch (default_adjust->out_color_space) {
+ switch (colorspace) {
case COLOR_SPACE_SRGB:
case COLOR_SPACE_2020_RGB_FULLRANGE:
ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
case COLOR_SPACE_UNKNOWN:
default:
break;
- }
}
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c3b454..05dc01e54531 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
tbl_entry.color_space = color_space;
//tbl_entry.regval = matrix;
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ } else {
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a68460edcd..9420dfb94d39 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
void (*opp_set_csc_default)(
struct dpp *dpp,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void (*opp_set_csc_adjustment)(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..a0f4d2a2a481 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
u32 addr = drm_fb_obj(fb)->dev_addr;
- int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
num_planes = 3;
- for (i = 0; i < num_planes; i++)
+ addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * fb->format->cpp[i];
+ x * format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
if (plane->fb)
drm_framebuffer_put(plane->fb);
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
/* Power down most RAMs and FIFOs if this is the primary plane */
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
dma_ctrl0_mask = CFG_GRA_ENA;
} else {
+ /* Power down the Y/U/V FIFOs */
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
dma_ctrl0_mask = CFG_DMA_ENA;
}
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..bfd3514fbe9b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
};
struct armada_plane_state {
+ u16 src_x;
+ u16 src_y;
u32 src_hw;
u32 dst_hw;
u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..aba947696178 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ const struct drm_format_info *format;
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
uint32_t val, ctrl0;
unsigned idx = 0;
- bool visible;
+ bool visible, fb_changed;
int ret;
trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
+ /*
+ * Shifting a YUV packed format image by one pixel causes the U/V
+ * planes to swap. Compensate for it by also toggling the UV swap.
+ */
+ format = fb->format;
+ if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ fb_changed = plane->fb != fb ||
+ dplane->base.state.src_x != src.x1 >> 16 ||
+ dplane->base.state.src_y != src.y1 >> 16;
+
if (!dcrtc->plane) {
dcrtc->plane = plane;
armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
+ if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- if (plane->fb != fb) {
- u32 addrs[3], pixel_format;
- int num_planes, hsub;
+ if (fb_changed) {
+ u32 addrs[3];
/*
* Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
- src_y = src.y1 >> 16;
- src_x = src.x1 >> 16;
+ dplane->base.state.src_y = src_y = src.y1 >> 16;
+ dplane->base.state.src_x = src_x = src.x1 >> 16;
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
- pixel_format = fb->format->format;
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = fb->format->num_planes;
-
- /*
- * Annoyingly, shifting a YUYV-format image by one pixel
- * causes the U/V planes to toggle. Toggle the UV swap.
- * (Unfortunately, this causes momentary colour flickering.)
- */
- if (src_x & (hsub - 1) && num_planes == 1)
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
-
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b3c6e997ccdb..9a17bd3639d1 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -559,10 +559,10 @@ EXPORT_SYMBOL(drm_read);
*
* Mask of POLL flags indicating the current status of the file.
*/
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file_priv->event_wait, wait);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..49af94627c8a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, int rings)
+ unsigned int opcode, unsigned long rings)
{
struct cmd_info *info = NULL;
unsigned int ring;
- for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
info = find_cmd_entry(gvt, opcode, ring);
if (info)
break;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e331142badb..64d67ff9bf08 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
return ret;
} else {
if (!test_bit(index, spt->post_shadow_bitmap)) {
+ int type = spt->shadow_page.type;
+
ppgtt_get_shadow_entry(spt, &se, index);
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
if (ret)
return ret;
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &se, index);
}
-
ppgtt_set_post_shadow(spt, index);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c582b6..e143004e66d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18de6569d04a..5cfba89ed586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
struct drm_i915_gem_request *rq;
struct intel_engine_cs *engine;
- if (!dma_fence_is_i915(fence))
+ if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
return;
rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 59ee808f8fd9..d453756ca128 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2331,12 +2331,12 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- unsigned int events = 0;
+ __poll_t events = 0;
stream->ops->poll_wait(stream, file, wait);
@@ -2365,11 +2365,11 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
struct drm_i915_private *dev_priv = stream->dev_priv;
- int ret;
+ __poll_t ret;
mutex_lock(&dev_priv->perf.lock);
ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3866c49bc390..7923dfd9963c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6977,6 +6977,7 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
#define MASK_WAKEMEM (1<<13)
#define SKL_DFSM _MMIO(0x51000)
@@ -7026,6 +7027,8 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -8522,6 +8525,7 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 791759f632e1..fb46ce3bd5f2 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -434,13 +434,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return ret ?: count;
}
-static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
-static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
-static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR_RO(gt_act_freq_mhz);
+static DEVICE_ATTR_RO(gt_cur_freq_mhz);
+static DEVICE_ATTR_RW(gt_boost_freq_mhz);
+static DEVICE_ATTR_RW(gt_max_freq_mhz);
+static DEVICE_ATTR_RW(gt_min_freq_mhz);
-static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
+static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62b71c0..60cf4e58389a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
WARN_ON(vco != 8100000 && vco != 8640000);
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
- I915_WRITE(CDCLK_CTL, val);
- POSTING_READ(CDCLK_CTL);
-
/*
* We always enable DPLL0 with the lowest link rate possible, but still
* taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack;
+ u32 freq_select, pcu_ack, cdclk_ctl;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* set CDCLK_CTL */
+ /* Choose frequency for this cdclk */
switch (cdclk) {
case 450000:
case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
+ cdclk_ctl = I915_READ(CDCLK_CTL);
+
+ if (dev_priv->cdclk.hw.vco != vco) {
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ }
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ POSTING_READ(CDCLK_CTL);
+
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30cf273d57aa..50f8443641b8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe));
}
-static void assert_cursor(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
- else
- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
- I915_STATE_WARN(cur_state != state,
- "cursor on pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
-
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe_name(pipe), onoff(state), onoff(cur_state));
}
-static void assert_plane(struct drm_i915_private *dev_priv,
- enum plane plane, bool state)
+static void assert_plane(struct intel_plane *plane, bool state)
{
- u32 val;
- bool cur_state;
+ bool cur_state = plane->get_hw_state(plane);
- val = I915_READ(DSPCNTR(plane));
- cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
- "plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), onoff(state), onoff(cur_state));
+ "%s assertion failure (expected %s, current %s)\n",
+ plane->base.name, onoff(state), onoff(cur_state));
}
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
-
-static void assert_planes_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- int i;
-
- /* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 val = I915_READ(DSPCNTR(pipe));
- I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
- "plane %c assertion failure, should be disabled but not\n",
- plane_name(pipe));
- return;
- }
-
- /* Need to check both planes against the pipe */
- for_each_pipe(dev_priv, i) {
- u32 val = I915_READ(DSPCNTR(i));
- enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
- I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
- "plane %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(i), pipe_name(pipe));
- }
-}
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
-static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void assert_planes_disabled(struct intel_crtc *crtc)
{
- int sprite;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- if (INTEL_GEN(dev_priv) >= 9) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(PLANE_CTL(pipe, sprite));
- I915_STATE_WARN(val & PLANE_CTL_ENABLE,
- "plane %d assertion failure, should be off on pipe %c but is still active\n",
- sprite, pipe_name(pipe));
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
- I915_STATE_WARN(val & SP_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- sprite_name(pipe, sprite), pipe_name(pipe));
- }
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 val = I915_READ(SPRCTL(pipe));
- I915_STATE_WARN(val & SPRITE_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
- u32 val = I915_READ(DVSCNTR(pipe));
- I915_STATE_WARN(val & DVS_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- }
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ assert_plane_disabled(plane);
}
static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1918,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
/*
* A pipe without a PLL won't actually be able to drive bits from
@@ -1989,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -2820,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->active_planes);
}
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, false);
+
+ if (plane->id == PLANE_PRIMARY)
+ intel_pre_disable_primary_noatomic(&crtc->base);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc);
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -2877,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- intel_set_plane_visible(to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state),
- false);
- intel_pre_disable_primary_noatomic(&intel_crtc->base);
- trace_intel_disable_plane(primary, intel_crtc);
- intel_plane->disable_plane(intel_plane, intel_crtc);
+ intel_plane_disable_noatomic(intel_crtc, intel_plane);
return;
@@ -3385,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+{
+
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane plane = primary->plane;
+ enum pipe pipe = primary->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
@@ -4866,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* a vblank wait.
*/
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4899,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
if (!crtc->config->ips_enabled)
return;
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5899,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
+ struct intel_plane *plane;
u64 domains;
struct drm_atomic_state *state;
struct intel_crtc_state *crtc_state;
@@ -5907,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
if (!intel_crtc->active)
return;
- if (crtc->primary->state->visible) {
- intel_pre_disable_primary_noatomic(crtc);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- crtc->primary->state->visible = false;
+ if (plane_state->base.visible)
+ intel_plane_disable_noatomic(intel_crtc, plane);
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -9477,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
+static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -9670,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
@@ -12544,11 +12551,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -13201,6 +13212,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13211,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13218,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13225,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
}
if (INTEL_GEN(dev_priv) >= 9)
@@ -13314,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->update_plane = i845_update_cursor;
cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->update_plane = i9xx_update_cursor;
cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
@@ -14462,6 +14479,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -14665,8 +14684,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe));
- assert_plane_disabled(dev_priv, PLANE_A);
- assert_plane_disabled(dev_priv, PLANE_B);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -14677,22 +14699,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
-static bool
-intel_check_plane_mapping(struct intel_crtc *crtc)
+static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
+ struct intel_plane *primary)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val;
+ enum plane plane = primary->plane;
+ u32 val = I915_READ(DSPCNTR(plane));
- if (INTEL_INFO(dev_priv)->num_pipes == 1)
- return true;
+ return (val & DISPLAY_PLANE_ENABLE) == 0 ||
+ (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+}
- val = I915_READ(DSPCNTR(!crtc->plane));
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
- if ((val & DISPLAY_PLANE_ENABLE) &&
- (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
- return false;
+ if (INTEL_GEN(dev_priv) >= 4)
+ return;
- return true;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+
+ if (intel_plane_mapping_ok(crtc, plane))
+ continue;
+
+ DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
+ plane->base.name);
+ intel_plane_disable_noatomic(crtc, plane);
+ }
}
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14748,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- continue;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ if (plane_state->base.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
}
}
- /* We need to sanitize the plane -> pipe mapping first because this will
- * disable the crtc (and hence change the state) if it is wrong. Note
- * that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
- bool plane;
-
- DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
- crtc->base.base.id, crtc->base.name);
-
- /* Pipe has the wrong plane attached and the plane is active.
- * Temporarily change the plane mapping and disable everything
- * ... */
- plane = crtc->plane;
- crtc->base.primary->state->visible = true;
- crtc->plane = !plane;
- intel_crtc_disable_noatomic(&crtc->base, ctx);
- crtc->plane = plane;
- }
-
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14879,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
-static bool primary_get_hw_state(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct intel_crtc *crtc)
{
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
- bool visible;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
- visible = crtc->active && primary_get_hw_state(primary);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ bool visible = plane->get_hw_state(plane);
- intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
- to_intel_plane_state(primary->base.state),
- visible);
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15094,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ intel_sanitize_plane_mapping(dev_priv);
+
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
@@ -15270,6 +15287,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6c7f8bca574e..5d77f75a9f9c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -862,6 +862,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
+ bool (*get_hw_state)(struct intel_plane *plane);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
+bool skl_plane_get_hw_state(struct intel_plane *plane);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..6074e04dc99f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ if (ret)
+ return ret;
+
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e25607435..e71a8cd50498 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+ if (i915_gem_request_completed(request))
+ return;
+
if (prio <= READ_ONCE(request->priotree.priority))
return;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430fccdc..55ea5eb3b7df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c63d3b..7e115f3927f6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC5\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4fcf80ca91dd..4a8a5d918a83 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+bool
+skl_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static void
chv_update_csc(struct intel_plane *plane, uint32_t format)
{
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static int
intel_check_sprite_plane(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->get_hw_state = vlv_plane_get_hw_state;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->get_hw_state = ivb_plane_get_hw_state;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
+ intel_plane->get_hw_state = g4x_plane_get_hw_state;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0760b93e9d1f..baab93398e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 435ff8662cfa..ef687414969e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1447,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
args.nv50.ro = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
+ argc = sizeof(args.nv50);
break;
case NVIF_CLASS_MEM_GF100:
args.gf100.version = 0;
args.gf100.ro = 0;
args.gf100.kind = mem->kind;
+ argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
@@ -1459,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
}
ret = nvif_object_map_handle(&mem->mem.object,
- &argc, argc,
+ &args, argc,
&handle, &length);
if (ret != 1)
return ret ? ret : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 00eeaaffeae5..08e77cd55e6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a37b4f3..700fc754f28a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gf119_sor_dp_pattern,
+ .drive = gf119_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 9646adec57cb..243f0a5c8a62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -73,7 +73,8 @@ static int
nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- bar->func->bar1.fini(bar);
+ if (bar->func->bar1.fini)
+ bar->func->bar1.fini(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index b10077d38839..35878fb538f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -26,7 +26,6 @@ gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
- .bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 352a65f9371c..67ee983bb026 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
nvkm-y += nvkm/subdev/mmu/nv44.o
nvkm-y += nvkm/subdev/mmu/nv50.o
nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
nvkm-y += nvkm/subdev/mmu/gf100.o
nvkm-y += nvkm/subdev/mmu/gk104.o
nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
nvkm-y += nvkm/subdev/mmu/vmmnv41.o
nvkm-y += nvkm/subdev/mmu/vmmnv44.o
nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
nvkm-y += nvkm/subdev/mmu/vmmgf100.o
nvkm-y += nvkm/subdev/mmu/vmmgk104.o
nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 000000000000..0527b50730d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 6d8f61ea467a..da06e64d8a7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
const struct nvkm_vmm_desc_func *func;
};
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 000000000000..e63d984cbfd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 863a2edd9861..64f75d906202 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -32,7 +32,7 @@ static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
- u64 next = addr | map->type, data;
+ u64 next = addr + map->type, data;
u32 pten;
int log2blk;
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
- const u64 data = *map->dma++ | map->type;
+ const u64 data = *map->dma++ + map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
.pde = nv50_vmm_pgd_pde,
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_12[] = {
{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_16[] = {
{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static void
+void
nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
mutex_unlock(&subdev->mutex);
}
-static int
+int
nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return 0;
}
-static void
+void
nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
}
}
-static int
+int
nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index deb96de54b00..ee2431a7804e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device;
bool handled = false;
+
+ if (pci->irq < 0)
+ return IRQ_HANDLED;
+
nvkm_mc_intr_unarm(device);
if (pci->msi)
pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci->irq >= 0) {
- free_irq(pci->irq, pci);
- pci->irq = -1;
- }
-
if (pci->agp.bridge)
nvkm_agp_fini(pci);
@@ -108,8 +107,20 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci_is_pcie(pci->pdev))
- return nvkm_pcie_oneinit(pci);
+ struct pci_dev *pdev = pci->pdev;
+ int ret;
+
+ if (pci_is_pcie(pci->pdev)) {
+ ret = nvkm_pcie_oneinit(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+ if (ret)
+ return ret;
+
+ pci->irq = pdev->irq;
return 0;
}
@@ -117,7 +128,6 @@ static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- struct pci_dev *pdev = pci->pdev;
int ret;
if (pci->agp.bridge) {
@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init)
pci->func->init(pci);
- ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
- if (ret)
- return ret;
-
- pci->irq = pdev->irq;
-
/* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time.
*/
if (pci->msi)
pci->func->msi_rearm(pci);
- return ret;
+ return 0;
}
static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
+
nvkm_agp_dtor(pci);
+
+ if (pci->irq >= 0) {
+ /* freq_irq() will call the handler, we use pci->irq == -1
+ * to signal that it's been torn down and should be a noop.
+ */
+ int irq = pci->irq;
+ pci->irq = -1;
+ free_irq(irq, pci);
+ }
+
if (pci->msi)
pci_disable_msi(pci->pdev);
+
return nvkm_pci(subdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626eddf24d5..23db74ae1826 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
/* then read the message */
msg.len = cnt & 0xf;
+ if (msg.len > CEC_MAX_MSG_SIZE - 2)
+ msg.len = CEC_MAX_MSG_SIZE - 2;
msg.msg[0] = hdmi_read_reg(core->base,
HDMI_CEC_RX_CMD_HEADER);
msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
}
}
-static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
-{
- if (stat1 & 2) {
- u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
-
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_NACK |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
- } else if (stat1 == 0) {
- cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
- }
-}
-
void hdmi4_cec_irq(struct hdmi_core_data *core)
{
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
- if (stat0 & 0x40)
+ if (stat0 & 0x20) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
- else if (stat0 & 0x24)
- hdmi_cec_transmit_fifo_empty(core, stat1);
- if (stat1 & 2) {
+ } else if (stat1 & 0x02) {
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
cec_transmit_done(core->adap,
CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES,
0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
if (stat0 & 0x02)
hdmi_cec_received_msg(core);
- if (stat1 & 0x3)
- REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
/*
* Enable CEC interrupts:
* Transmit Buffer Full/Empty Change event
- * Transmitter FIFO Empty event
* Receiver FIFO Not Empty event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
/*
* Enable CEC interrupts:
- * RX FIFO Overrun Error event
- * Short Pulse Detected event
* Frame Retransmit Count Exceeded event
- * Start Bit Irregularity event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
/* cec calibration enable (self clearing) */
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fdc56c1c953..b9bfa806d346 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
- x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL)
- return -ENOMEM;
- y = kmalloc(ybuf_size, GFP_KERNEL);
- if (y == NULL) {
- kfree(x);
- return -ENOMEM;
- }
- if (copy_from_user(x, depth->x, xbuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
- if (copy_from_user(y, depth->y, xbuf_size)) {
+ x = memdup_user(depth->x, xbuf_size);
+ if (IS_ERR(x))
+ return PTR_ERR(x);
+ y = memdup_user(depth->y, ybuf_size);
+ if (IS_ERR(y)) {
kfree(x);
- kfree(y);
- return -EFAULT;
+ return PTR_ERR(y);
}
-
buffer_size = depth->n * sizeof(u32);
buffer = memdup_user(depth->buffer, buffer_size);
if (IS_ERR(buffer)) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index dc332ea56f6c..3ecffa52c814 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (!best_parent ||
+ abs(rate - rounded / i / j) <
+ abs(rate - best_parent / best_half /
+ best_div)) {
best_parent = rounded;
- best_div = i;
+ best_half = i;
+ best_div = j;
}
}
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1dedac802..476079f1255f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
name, err);
goto remove;
}
+ } else {
+ /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+ sor->clk_out = sor->clk;
}
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b5ba6441489f..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1007,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 638540943c61..c94cce96544c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, j, unref_list_count, prev_idx;
+ unsigned int i, j, k, unref_list_count;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return;
}
- prev_idx = 0;
+ k = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ kernel_state->bo[k++] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
* because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
- kernel_state->bo[j + prev_idx] = &bo->base.base;
- j++;
+ kernel_state->bo[k++] = &bo->base.base;
}
- prev_idx = j + 1;
}
+ WARN_ON_ONCE(k != state->bo_count);
+
if (exec[0])
state->start_bin = exec[0]->ct0ca;
if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}
+static void
+vc4_flush_texture_caches(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ V3D_WRITE(V3D_L2CACTL,
+ V3D_L2CACTL_L2CCLR);
+
+ V3D_WRITE(V3D_SLCACTL,
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
+}
+
/* Sets the registers for the next job to be actually be executed in
* the hardware.
*
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
+ /* A previous RCL may have written to one of our textures, and
+ * our full cache flush at bin time may have occurred before
+ * that RCL completed. Flush the texture cache now, but not
+ * the instructions or uniforms (since we don't write those
+ * from an RCL).
+ */
+ vc4_flush_texture_caches(dev);
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 26eddbb62893..3dd62d75f531 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- /* Undo the effects of a previous vc4_irq_uninstall. */
- enable_irq(dev->irq);
-
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43840b8..493f392b3a0a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
return ret;
vc4_v3d_init_hw(vc4->dev);
+
+ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+ enable_irq(vc4->dev->irq);
vc4_irq_postinstall(vc4->dev);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7e5f30e234b1..d08753e8fd94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -713,7 +713,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern unsigned int vmw_fops_poll(struct file *filp,
+extern __poll_t vmw_fops_poll(struct file *filp,
struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..87e8af5776a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
}
view_type = vmw_view_cmd_to_type(header->id);
+ if (view_type == vmw_view_max)
+ return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 01be355525e4..67f844678ac8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -412,7 +412,7 @@ out_clips:
* Wrapper around the drm_poll function that makes sure the device is
* processing the fifo if drm_poll decides to wait.
*/
-unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740b3724..fcd58145d0da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
/* Mapping is managed by prepare_fb/cleanup_fb */
- memset(&vps->guest_map, 0, sizeof(vps->guest_map));
memset(&vps->host_map, 0, sizeof(vps->host_map));
vps->cpp = 0;
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
/* Should have been freed by cleanup_fb */
- if (vps->guest_map.virtual) {
- DRM_ERROR("Guest mapping not freed\n");
- ttm_bo_kunmap(&vps->guest_map);
- }
-
if (vps->host_map.virtual) {
DRM_ERROR("Host mapping not freed\n");
ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
*/
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- return -ENOSYS;
+ return -EINVAL;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c8389ff21..cd9da2dd79af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
int pinned;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8a09807c5de..3824595fece1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bc5f6026573d..63a4cd794b73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437fd787..b68d74888ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
bool defined;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
s32 src_pitch, dst_pitch;
u8 *src, *dst;
bool not_used;
-
+ struct ttm_bo_kmap_obj guest_map;
+ int ret;
if (!dirty->num_hits)
return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
+ ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+ &guest_map);
+ if (ret) {
+ DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+ ret);
+ goto out_cleanup;
+ }
/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+ dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+ ttm_bo_kunmap(&guest_map);
out_cleanup:
ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->guest_map.virtual)
- ttm_bo_kunmap(&vps->guest_map);
-
if (vps->host_map.virtual)
ttm_bo_kunmap(&vps->host_map);
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
*/
if (vps->content_fb_type == SEPARATE_DMA &&
!(dev_priv->capabilities & SVGA_CAP_3D)) {
-
- struct vmw_framebuffer_dmabuf *new_vfbd;
-
- new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
- ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
- NULL);
- if (ret)
- goto out_srf_unpin;
-
- ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
- new_vfbd->buffer->base.num_pages,
- &vps->guest_map);
-
- ttm_bo_unreserve(&new_vfbd->buffer->base);
-
- if (ret) {
- DRM_ERROR("Failed to map content buffer to CPU\n");
- goto out_srf_unpin;
- }
-
ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
vps->surf->res.backup->base.num_pages,
&vps->host_map);
if (ret) {
DRM_ERROR("Failed to map display buffer to CPU\n");
- ttm_bo_kunmap(&vps->guest_map);
goto out_srf_unpin;
}
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->display_srf = vps->surf;
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
if (!stdu->defined)
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d35d6d271f3f..dfd8d0048980 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1266,7 +1266,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
+static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait)
{
pr_debug("%s\n", __func__);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 779c5ae47f36..19c499f5623d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -280,6 +280,7 @@ config HID_ELECOM
---help---
Support for ELECOM devices:
- BM084 Bluetooth Mouse
+ - EX-G Trackball (Wired and wireless)
- DEFT Trackball (Wired and wireless)
- HUGE Trackball (Wired and wireless)
@@ -396,6 +397,17 @@ config HID_ITE
---help---
Support for ITE devices not fully compliant with HID standard.
+config HID_JABRA
+ tristate "Jabra USB HID Driver"
+ depends on HID
+ ---help---
+ Support for Jabra USB HID devices.
+
+ Prevents mapping of vendor defined HID usages to input events. Without
+ this driver HID reports from Jabra devices may incorrectly be seen as
+ mouse button events.
+ Say M here if you may ever plug in a Jabra USB device.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 235bd2a7b333..eb13b9e92d85 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the HID driver
#
-hid-y := hid-core.o hid-input.o
+hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
obj-$(CONFIG_HID) += hid.o
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_ITE) += hid-ite.o
+obj-$(CONFIG_HID_JABRA) += hid-jabra.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1bb7b63b3150..88b9703318e4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -26,6 +26,7 @@
* any later version.
*/
+#include <linux/dmi.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input/mt.h>
@@ -119,6 +120,24 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.max_contacts = 5,
};
+static const struct asus_touchpad_info asus_t100ha_tp = {
+ .max_x = 2640,
+ .max_y = 1320,
+ .res_x = 30, /* units/mm */
+ .res_y = 29, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t200ta_tp = {
+ .max_x = 3120,
+ .max_y = 1716,
+ .res_x = 30, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
static const struct asus_touchpad_info asus_t100chi_tp = {
.max_x = 2640,
.max_y = 1320,
@@ -606,7 +625,17 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING;
- drvdata->tp = &asus_t100ta_tp;
+ /*
+ * The T100HA uses the same USB-ids as the T100TAF and
+ * the T200TA uses the same USB-ids as the T100TA, while
+ * both have different max x/y values as the T100TA[F].
+ */
+ if (dmi_match(DMI_PRODUCT_NAME, "T100HAN"))
+ drvdata->tp = &asus_t100ha_tp;
+ else if (dmi_match(DMI_PRODUCT_NAME, "T200TA"))
+ drvdata->tp = &asus_t200ta_tp;
+ else
+ drvdata->tp = &asus_t100ta_tp;
}
}
@@ -686,9 +715,10 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
- /* For the T100TA keyboard dock */
+ /* For the T100TA/T200TA keyboard dock */
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
- *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+ (*rsize == 76 || *rsize == 101) &&
+ rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
@@ -751,7 +781,10 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+ USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c3f608131cf..c2560aae5542 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -830,31 +830,6 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
- /* fall back to generic driver in case specific driver doesn't exist */
- switch (hid->group) {
- case HID_GROUP_MULTITOUCH_WIN_8:
- /* fall-through */
- case HID_GROUP_MULTITOUCH:
- if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_SENSOR_HUB:
- if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_RMI:
- if (!IS_ENABLED(CONFIG_HID_RMI))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_WACOM:
- if (!IS_ENABLED(CONFIG_HID_WACOM))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_LOGITECH_DJ_DEVICE:
- if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
- hid->group = HID_GROUP_GENERIC;
- break;
- }
vfree(parser);
return 0;
}
@@ -1597,8 +1572,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(hid_input_report);
-static bool hid_match_one_id(struct hid_device *hdev,
- const struct hid_device_id *id)
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id)
{
return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
@@ -1606,7 +1581,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1862,541 +1837,6 @@ void hid_hw_close(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_hw_close);
-/*
- * A list of devices for which there is a specialized driver on HID bus.
- *
- * Please note that for multitouch devices (driven by hid-multitouch driver),
- * there is a proper autodetection and autoloading in place (based on presence
- * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
- * as we are doing the right thing in hid_scan_usage().
- *
- * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
- * physical is found inside a usage page of type sensor, hid-sensor-hub will be
- * used as a driver. See hid_scan_report().
- */
-static const struct hid_device_id hid_have_special_driver[] = {
-#if IS_ENABLED(CONFIG_HID_A4TECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACRUX)
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ALPS)
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLE)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLEIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ASUS)
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_AUREAL)
- { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BELKIN)
- { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BETOP_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHERRY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHICONY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CMEDIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CORSAIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CP2112)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CYPRESS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELECOM)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELO)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EMS_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EZKEY)
- { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GEMBIRD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GFRM)
- { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
- { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GREENASIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GT683R)
- { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GYRATION)
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
-#endif
-#if IS_ENABLED(CONFIG_HID_HOLTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ICADE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KENSINGTON)
- { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KYE)
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LCPOWER)
- { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LENOVO)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MICROSOFT)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MONTEREY)
- { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WIIMOTE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTI)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTRIG)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ORTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PENMOUNT)
- { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PETALYNX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PICOLCD)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRIMAX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RETRODE)
- { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RMI)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ROCCAT)
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAITEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAMSUNG)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SONY)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
- { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_STEELSERIES)
- { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SUNPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-#endif
-#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TIVO)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TOPSEED)
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TWINHAN)
- { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UCLOGIC)
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
- { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WALTOP)
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-#endif
-#if IS_ENABLED(CONFIG_HID_XINMO)
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZYDACRON)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-#endif
- { }
-};
-
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
@@ -2463,8 +1903,8 @@ static void hid_free_dynids(struct hid_driver *hdrv)
spin_unlock(&hdrv->dyn_lock);
}
-static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
- struct hid_driver *hdrv)
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv)
{
struct hid_dynid *dynid;
@@ -2479,6 +1919,7 @@ static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
return hid_match_id(hdev, hdrv->id_table);
}
+EXPORT_SYMBOL_GPL(hid_match_device);
static int hid_bus_match(struct device *dev, struct device_driver *drv)
{
@@ -2508,6 +1949,23 @@ static int hid_device_probe(struct device *dev)
goto unlock;
}
+ if (hdrv->match) {
+ if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ } else {
+ /*
+ * hid-generic implements .match(), so if
+ * hid_ignore_special_drivers is set, we can safely
+ * return.
+ */
+ if (hid_ignore_special_drivers) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ }
+
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
@@ -2604,7 +2062,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static struct bus_type hid_bus_type = {
+struct bus_type hid_bus_type = {
.name = "hid",
.dev_groups = hid_dev_groups,
.drv_groups = hid_drv_groups,
@@ -2613,315 +2071,7 @@ static struct bus_type hid_bus_type = {
.remove = hid_device_remove,
.uevent = hid_uevent,
};
-
-/* a list of devices that shouldn't be handled by HID core at all */
-static const struct hid_device_id hid_ignore_list[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
- { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
- { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
-#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
- { }
-};
-
-/**
- * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
- *
- * There are composite devices for which we want to ignore only a certain
- * interface. This is a list of devices for which only the mouse interface will
- * be ignored. This allows a dedicated driver to take care of the interface.
- */
-static const struct hid_device_id hid_mouse_ignore_list[] = {
- /* appletouch driver */
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
- { }
-};
-
-bool hid_ignore(struct hid_device *hdev)
-{
- if (hdev->quirks & HID_QUIRK_NO_IGNORE)
- return false;
- if (hdev->quirks & HID_QUIRK_IGNORE)
- return true;
-
- switch (hdev->vendor) {
- case USB_VENDOR_ID_CODEMERCS:
- /* ignore all Code Mercenaries IOWarrior devices */
- if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
- hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
- return true;
- break;
- case USB_VENDOR_ID_LOGITECH:
- if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
- hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
- return true;
- /*
- * The Keene FM transmitter USB device has the same USB ID as
- * the Logitech AudioHub Speaker, but it should ignore the hid.
- * Check if the name is that of the Keene device.
- * For reference: the name of the AudioHub is
- * "HOLTEK AudioHub Speaker".
- */
- if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
- !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
- return true;
- break;
- case USB_VENDOR_ID_SOUNDGRAPH:
- if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
- hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
- return true;
- break;
- case USB_VENDOR_ID_HANWANG:
- if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
- hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
- return true;
- break;
- case USB_VENDOR_ID_JESS:
- if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
- hdev->type == HID_TYPE_USBNONE)
- return true;
- break;
- case USB_VENDOR_ID_VELLEMAN:
- /* These are not HID devices. They are handled by comedi. */
- if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
- (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
- return true;
- break;
- case USB_VENDOR_ID_ATMEL_V_USB:
- /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
- * it has the same USB ID as many Atmel V-USB devices. This
- * usb radio is handled by radio-ma901.c driver so we want
- * ignore the hid. Check the name, bus, product and ignore
- * if we have MA901 usb radio.
- */
- if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
- hdev->bus == BUS_USB &&
- strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
- return true;
- break;
- }
-
- if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
- return true;
-
- return !!hid_match_id(hdev, hid_ignore_list);
-}
-EXPORT_SYMBOL_GPL(hid_ignore);
+EXPORT_SYMBOL(hid_bus_type);
int hid_add_device(struct hid_device *hdev)
{
@@ -2931,6 +2081,8 @@ int hid_add_device(struct hid_device *hdev)
if (WARN_ON(hdev->status & HID_STAT_ADDED))
return -EBUSY;
+ hdev->quirks = hid_lookup_quirk(hdev);
+
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
if (hid_ignore(hdev))
@@ -2960,7 +2112,7 @@ int hid_add_device(struct hid_device *hdev)
if (hid_ignore_special_drivers) {
hdev->group = HID_GROUP_GENERIC;
} else if (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver)) {
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
@@ -3044,6 +2196,29 @@ void hid_destroy_device(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_destroy_device);
+
+static int __bus_add_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *added_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_add_driver)
+ hdrv->bus_add_driver(added_hdrv);
+
+ return 0;
+}
+
+static int __bus_removed_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *removed_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_removed_driver)
+ hdrv->bus_removed_driver(removed_hdrv);
+
+ return 0;
+}
+
int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
const char *mod_name)
{
@@ -3055,6 +2230,8 @@ int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
INIT_LIST_HEAD(&hdrv->dyn_list);
spin_lock_init(&hdrv->dyn_lock);
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_add_driver);
+
return driver_register(&hdrv->driver);
}
EXPORT_SYMBOL_GPL(__hid_register_driver);
@@ -3063,6 +2240,8 @@ void hid_unregister_driver(struct hid_driver *hdrv)
{
driver_unregister(&hdrv->driver);
hid_free_dynids(hdrv);
+
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
}
EXPORT_SYMBOL_GPL(hid_unregister_driver);
@@ -3117,6 +2296,7 @@ static void __exit hid_exit(void)
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
+ hid_quirks_exit(HID_BUS_ANY);
}
module_init(hid_init);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5271db593478..c783fd5ef809 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1179,7 +1179,7 @@ out:
return ret;
}
-static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
{
struct hid_debug_list *list = file->private_data;
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 54aeea57d209..1a1ecc491c02 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,9 +1,15 @@
/*
- * HID driver for ELECOM devices.
+ * HID driver for ELECOM devices:
+ * - BM084 Bluetooth Mouse
+ * - EX-G Trackball (Wired and wireless)
+ * - DEFT Trackball (Wired and wireless)
+ * - HUGE Trackball (Wired and wireless)
+ *
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+ * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
*/
/*
@@ -19,6 +25,34 @@
#include "hid-ids.h"
+/*
+ * Certain ELECOM mice misreport their button count meaning that they only work
+ * correctly with the ELECOM mouse assistant software which is unavailable for
+ * Linux. A four extra INPUT reports and a FEATURE report are described by the
+ * report descriptor but it does not appear that these enable software to
+ * control what the extra buttons map to. The only simple and straightforward
+ * solution seems to involve fixing up the report descriptor.
+ *
+ * Report descriptor format:
+ * Positions 13, 15, 21 and 31 store the button bit count, button usage minimum,
+ * button usage maximum and padding bit count respectively.
+ */
+#define MOUSE_BUTTONS_MAX 8
+static void mouse_button_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int rsize,
+ int nbuttons)
+{
+ if (rsize < 32 || rdesc[12] != 0x95 ||
+ rdesc[14] != 0x75 || rdesc[15] != 0x01 ||
+ rdesc[20] != 0x29 || rdesc[30] != 0x75)
+ return;
+ hid_info(hdev, "Fixing up Elecom mouse button count\n");
+ nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX);
+ rdesc[13] = nbuttons;
+ rdesc[21] = nbuttons;
+ rdesc[31] = MOUSE_BUTTONS_MAX - nbuttons;
+}
+
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -31,45 +65,15 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x00;
}
break;
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRED:
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRELESS:
+ mouse_button_fixup(hdev, rdesc, *rsize, 6);
+ break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
- /* The DEFT/HUGE trackball has eight buttons, but its descriptor
- * only reports five, disabling the three Fn buttons on the top
- * of the mouse.
- *
- * Apply the following diff to the descriptor:
- *
- * Collection (Physical), Collection (Physical),
- * Report ID (1), Report ID (1),
- * Report Count (5), -> Report Count (8),
- * Report Size (1), Report Size (1),
- * Usage Page (Button), Usage Page (Button),
- * Usage Minimum (01h), Usage Minimum (01h),
- * Usage Maximum (05h), -> Usage Maximum (08h),
- * Logical Minimum (0), Logical Minimum (0),
- * Logical Maximum (1), Logical Maximum (1),
- * Input (Variable), Input (Variable),
- * Report Count (1), -> Report Count (0),
- * Report Size (3), Report Size (3),
- * Input (Constant), Input (Constant),
- * Report Size (16), Report Size (16),
- * Report Count (2), Report Count (2),
- * Usage Page (Desktop), Usage Page (Desktop),
- * Usage (X), Usage (X),
- * Usage (Y), Usage (Y),
- * Logical Minimum (-32768), Logical Minimum (-32768),
- * Logical Maximum (32767), Logical Maximum (32767),
- * Input (Variable, Relative), Input (Variable, Relative),
- * End Collection, End Collection,
- */
- if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
- rdesc[13] = 8; /* Button/Variable Report Count */
- rdesc[21] = 8; /* Button/Variable Usage Maximum */
- rdesc[29] = 0; /* Button/Constant Report Count */
- }
+ mouse_button_fixup(hdev, rdesc, *rsize, 8);
break;
}
return rdesc;
@@ -77,6 +81,8 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index e288a4a06fe8..3c0a1bf433d7 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -24,8 +24,71 @@
#include <linux/hid.h>
+static struct hid_driver hid_generic;
+
+static int __unmap_hid_generic(struct device *dev, void *data)
+{
+ struct hid_driver *hdrv = data;
+ struct hid_device *hdev = to_hid_device(dev);
+
+ /* only unbind matching devices already bound to hid-generic */
+ if (hdev->driver != &hid_generic ||
+ hid_match_device(hdev, hdrv) == NULL)
+ return 0;
+
+ if (dev->parent) /* Needed for USB */
+ device_lock(dev->parent);
+ device_release_driver(dev);
+ if (dev->parent)
+ device_unlock(dev->parent);
+
+ return 0;
+}
+
+static void hid_generic_add_driver(struct hid_driver *hdrv)
+{
+ bus_for_each_dev(&hid_bus_type, NULL, hdrv, __unmap_hid_generic);
+}
+
+static void hid_generic_removed_driver(struct hid_driver *hdrv)
+{
+ int ret;
+
+ ret = driver_attach(&hid_generic.driver);
+}
+
+static int __check_hid_generic(struct device_driver *drv, void *data)
+{
+ struct hid_driver *hdrv = to_hid_driver(drv);
+ struct hid_device *hdev = data;
+
+ if (hdrv == &hid_generic)
+ return 0;
+
+ return hid_match_device(hdev, hdrv) != NULL;
+}
+
+static bool hid_generic_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ if (ignore_special_driver)
+ return true;
+
+ if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
+ return false;
+
+ /*
+ * If any other driver wants the device, leave the device to this other
+ * driver.
+ */
+ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic))
+ return false;
+
+ return true;
+}
+
static const struct hid_device_id hid_table[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_GENERIC, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, hid_table);
@@ -33,6 +96,9 @@ MODULE_DEVICE_TABLE(hid, hid_table);
static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
+ .match = hid_generic_match,
+ .bus_add_driver = hid_generic_add_driver,
+ .bus_removed_driver = hid_generic_removed_driver,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5da3d6256d25..43ddcdfbd0da 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -178,7 +178,8 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
-#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807
#define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
#define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
@@ -370,6 +371,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRED 0x00fb
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRELESS 0x00fc
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
@@ -535,6 +538,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -1156,6 +1160,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-jabra.c b/drivers/hid/hid-jabra.c
new file mode 100644
index 000000000000..1f52daf14426
--- /dev/null
+++ b/drivers/hid/hid-jabra.c
@@ -0,0 +1,58 @@
+/*
+ * Jabra USB HID Driver
+ *
+ * Copyright (c) 2017 Niels Skou Olsen <nolsen@jabra.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define HID_UP_VENDOR_DEFINED_MIN 0xff000000
+#define HID_UP_VENDOR_DEFINED_MAX 0xffff0000
+
+static int jabra_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int is_vendor_defined =
+ ((usage->hid & HID_USAGE_PAGE) >= HID_UP_VENDOR_DEFINED_MIN &&
+ (usage->hid & HID_USAGE_PAGE) <= HID_UP_VENDOR_DEFINED_MAX);
+
+ dbg_hid("hid=0x%08x appl=0x%08x coll_idx=0x%02x usage_idx=0x%02x: %s\n",
+ usage->hid,
+ field->application,
+ usage->collection_index,
+ usage->usage_index,
+ is_vendor_defined ? "ignored" : "defaulted");
+
+ /* Ignore vendor defined usages, default map standard usages */
+ return is_vendor_defined ? -1 : 0;
+}
+
+static const struct hid_device_id jabra_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, jabra_devices);
+
+static struct hid_driver jabra_driver = {
+ .name = "jabra",
+ .id_table = jabra_devices,
+ .input_mapping = jabra_input_mapping,
+};
+module_hid_driver(jabra_driver);
+
+MODULE_AUTHOR("Niels Skou Olsen <nolsen@jabra.com>");
+MODULE_DESCRIPTION("Jabra USB HID Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 65ea23be9677..3b4739bde05d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -85,11 +85,12 @@ MODULE_LICENSE("GPL");
#define MT_IO_FLAGS_PENDING_SLOTS 2
struct mt_slot {
- __s32 x, y, cx, cy, p, w, h;
+ __s32 x, y, cx, cy, p, w, h, a;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
bool confidence_state; /* is the touch made by a finger? */
+ bool has_azimuth; /* the contact reports azimuth */
};
struct mt_class {
@@ -119,6 +120,10 @@ struct mt_device {
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
int cc_index; /* contact count field index in the report */
int cc_value_index; /* contact count value index in the field */
+ int scantime_index; /* scantime field index in the report */
+ int scantime_val_index; /* scantime value index in the field */
+ int prev_scantime; /* scantime reported in the previous packet */
+ int left_button_state; /* left button state */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned long initial_quirks; /* initial quirks state */
@@ -582,8 +587,15 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
- input_set_abs_params(hi->input,
- ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ /*
+ * Only set ABS_MT_ORIENTATION if it is not
+ * already set by the HID_DG_AZIMUTH usage.
+ */
+ if (!test_bit(ABS_MT_ORIENTATION,
+ hi->input->absbit))
+ input_set_abs_params(hi->input,
+ ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
mt_store_field(usage, td, hi);
return 1;
@@ -599,6 +611,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
mt_store_field(usage, td, hi);
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
+ td->scantime_index = field->index;
+ td->scantime_val_index = usage->usage_index;
return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
@@ -608,6 +626,21 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
+ case HID_DG_AZIMUTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_ORIENTATION);
+ /*
+ * Azimuth has the range of [0, MAX) representing a full
+ * revolution. Set ABS_MT_ORIENTATION to a quarter of
+ * MAX according the definition of ABS_MT_ORIENTATION
+ */
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ -field->logical_maximum / 4,
+ field->logical_maximum / 4,
+ cls->sn_move ?
+ field->logical_maximum / cls->sn_move : 0, 0);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
@@ -700,6 +733,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
int wide = (s->w > s->h);
int major = max(s->w, s->h);
int minor = min(s->w, s->h);
+ int orientation = wide;
+
+ if (s->has_azimuth)
+ orientation = s->a;
/*
* divided by two to match visual scale of touch
@@ -716,7 +753,8 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
input_event(input, EV_ABS, ABS_MT_DISTANCE,
!s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION,
+ orientation);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
@@ -734,10 +772,16 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
+ __s32 cls = td->mtclass.name;
+
+ if (cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL)
+ input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+
input_mt_sync_frame(input);
input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
+ td->left_button_state = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -778,9 +822,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
}
static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+ struct hid_usage *usage, __s32 value,
+ bool first_packet)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
@@ -832,11 +878,49 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT:
break;
+ case HID_DG_AZIMUTH:
+ /*
+ * Azimuth is counter-clockwise and ranges from [0, MAX)
+ * (a full revolution). Convert it to clockwise ranging
+ * [-MAX/2, MAX/2].
+ *
+ * Note that ABS_MT_ORIENTATION require us to report
+ * the limit of [-MAX/4, MAX/4], but the value can go
+ * out of range to [-MAX/2, MAX/2] to report an upside
+ * down ellipsis.
+ */
+ if (value > field->logical_maximum / 2)
+ value -= field->logical_maximum;
+ td->curdata.a = -value;
+ td->curdata.has_azimuth = true;
+ break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of
+ * a (possible) multi-packet frame.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ !first_packet)
+ return;
+
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ td->left_button_state |= value;
+ return;
+ }
+
if (usage->type)
input_event(input, usage->type, usage->code,
value);
@@ -855,9 +939,11 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
struct hid_field *field;
+ bool first_packet;
unsigned count;
- int r, n;
+ int r, n, scantime = 0;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
@@ -867,13 +953,31 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
+ if (td->scantime_index >= 0) {
+ field = report->field[td->scantime_index];
+ scantime = field->value[td->scantime_val_index];
+ }
if (td->cc_index >= 0) {
struct hid_field *field = report->field[td->cc_index];
int value = field->value[td->cc_value_index];
- if (value)
+
+ /*
+ * For Win8 PTPs the first packet (td->num_received == 0) may
+ * have a contactcount of 0 if there only is a button event.
+ * We double check that this is not a continuation packet
+ * of a possible multi-packet frame be checking that the
+ * timestamp has changed.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ td->num_received == 0 && td->prev_scantime != scantime)
+ td->num_expected = value;
+ /* A non 0 contact count always indicates a first packet */
+ else if (value)
td->num_expected = value;
}
+ td->prev_scantime = scantime;
+ first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -883,7 +987,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
for (n = 0; n < count; n++)
mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n]);
+ field->value[n], first_packet);
}
if (td->num_received >= td->num_expected)
@@ -1329,6 +1433,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->maxcontact_report_id = -1;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
+ td->scantime_index = -1;
td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
@@ -1649,14 +1754,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
- /* Panasonic panels */
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT780) },
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT880) },
-
/* Novatek Panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
@@ -1667,6 +1764,14 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_NTRIG, 0x1b05) },
+ /* Panasonic panels */
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT780) },
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT880) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
new file mode 100644
index 000000000000..5f6035a5ce36
--- /dev/null
+++ b/drivers/hid/hid-quirks.c
@@ -0,0 +1,1276 @@
+/*
+ * HID quirks support for Linux
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2006-2007 Jiri Kosina
+ * Copyright (c) 2007 Paul Walmsley
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "hid-ids.h"
+
+/*
+ * Alphabetically sorted by vendor then product.
+ */
+
+static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN), HID_QUIRK_MULTI_INPUT},
+ { HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK), HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL), HID_QUIRK_HIDINPUT_FORCE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+
+ { 0 }
+};
+
+/*
+ * A list of devices for which there is a specialized driver on HID bus.
+ *
+ * Please note that for multitouch devices (driven by hid-multitouch driver),
+ * there is a proper autodetection and autoloading in place (based on presence
+ * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
+ * as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
+ */
+static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELECOM)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+ { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+ { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ITE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_JABRA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LENOVO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RETRODE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ROCCAT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAITEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+#endif
+ { }
+};
+
+/* a list of devices that shouldn't be handled by HID core at all */
+static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
+#endif
+ { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { }
+};
+
+/**
+ * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
+ *
+ * There are composite devices for which we want to ignore only a certain
+ * interface. This is a list of devices for which only the mouse interface will
+ * be ignored. This allows a dedicated driver to take care of the interface.
+ */
+static const struct hid_device_id hid_mouse_ignore_list[] = {
+ /* appletouch driver */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+};
+
+bool hid_ignore(struct hid_device *hdev)
+{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
+ switch (hdev->vendor) {
+ case USB_VENDOR_ID_CODEMERCS:
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_LOGITECH:
+ if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
+ hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
+ return true;
+ /*
+ * The Keene FM transmitter USB device has the same USB ID as
+ * the Logitech AudioHub Speaker, but it should ignore the hid.
+ * Check if the name is that of the Keene device.
+ * For reference: the name of the AudioHub is
+ * "HOLTEK AudioHub Speaker".
+ */
+ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
+ !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
+ return true;
+ break;
+ case USB_VENDOR_ID_SOUNDGRAPH:
+ if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
+ hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_HANWANG:
+ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
+ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
+ case USB_VENDOR_ID_ATMEL_V_USB:
+ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+ * it has the same USB ID as many Atmel V-USB devices. This
+ * usb radio is handled by radio-ma901.c driver so we want
+ * ignore the hid. Check the name, bus, product and ignore
+ * if we have MA901 usb radio.
+ */
+ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+ hdev->bus == BUS_USB &&
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+ return true;
+ break;
+ case USB_VENDOR_ID_ELAN:
+ /*
+ * Many Elan devices have a product id of 0x0401 and are handled
+ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
+ * is not (and cannot be) handled by that driver ->
+ * Ignore all 0x0401 devs except for the ELAN0800 dev.
+ */
+ if (hdev->product == 0x0401 &&
+ strncmp(hdev->name, "ELAN0800", 8) != 0)
+ return true;
+ break;
+ }
+
+ if (hdev->type == HID_TYPE_USBMOUSE &&
+ hid_match_id(hdev, hid_mouse_ignore_list))
+ return true;
+
+ return !!hid_match_id(hdev, hid_ignore_list);
+}
+EXPORT_SYMBOL_GPL(hid_ignore);
+
+/* Dynamic HID quirks list - specified at runtime */
+struct quirks_list_struct {
+ struct hid_device_id hid_bl_item;
+ struct list_head node;
+};
+
+static LIST_HEAD(dquirks_list);
+static DEFINE_MUTEX(dquirks_lock);
+
+/* Runtime ("dynamic") quirks manipulation functions */
+
+/**
+ * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Scans dquirks_list for a matching dynamic quirk and returns
+ * the pointer to the relevant struct hid_device_id if found.
+ * Must be called with a read lock held on dquirks_lock.
+ *
+ * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ */
+static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
+{
+ struct quirks_list_struct *q;
+ struct hid_device_id *bl_entry = NULL;
+
+ list_for_each_entry(q, &dquirks_list, node) {
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+ bl_entry = &q->hid_bl_item;
+ break;
+ }
+ }
+
+ if (bl_entry != NULL)
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ bl_entry->driver_data, bl_entry->vendor,
+ bl_entry->product);
+
+ return bl_entry;
+}
+
+
+/**
+ * hid_modify_dquirk: add/replace a HID quirk
+ * @id: the HID device to match
+ * @quirks: the unsigned long quirks value to add/replace
+ *
+ * Description:
+ * If an dynamic quirk exists in memory for this device, replace its
+ * quirks value with what was provided. Otherwise, add the quirk
+ * to the dynamic quirks list.
+ *
+ * Returns: 0 OK, -error on failure.
+ */
+static int hid_modify_dquirk(const struct hid_device_id *id,
+ const unsigned long quirks)
+{
+ struct hid_device *hdev;
+ struct quirks_list_struct *q_new, *q;
+ int list_edited = 0;
+ int ret = 0;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
+ if (!q_new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdev->bus = q_new->hid_bl_item.bus = id->bus;
+ hdev->group = q_new->hid_bl_item.group = id->group;
+ hdev->vendor = q_new->hid_bl_item.vendor = id->vendor;
+ hdev->product = q_new->hid_bl_item.product = id->product;
+ q_new->hid_bl_item.driver_data = quirks;
+
+ mutex_lock(&dquirks_lock);
+
+ list_for_each_entry(q, &dquirks_list, node) {
+
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+
+ list_replace(&q->node, &q_new->node);
+ kfree(q);
+ list_edited = 1;
+ break;
+
+ }
+
+ }
+
+ if (!list_edited)
+ list_add_tail(&q_new->node, &dquirks_list);
+
+ mutex_unlock(&dquirks_lock);
+
+ out:
+ kfree(hdev);
+ return ret;
+}
+
+/**
+ * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
+ *
+ * Description:
+ * Free all memory associated with dynamic quirks - called before
+ * module unload.
+ *
+ */
+static void hid_remove_all_dquirks(__u16 bus)
+{
+ struct quirks_list_struct *q, *temp;
+
+ mutex_lock(&dquirks_lock);
+ list_for_each_entry_safe(q, temp, &dquirks_list, node) {
+ if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) {
+ list_del(&q->node);
+ kfree(q);
+ }
+ }
+ mutex_unlock(&dquirks_lock);
+
+}
+
+/**
+ * hid_quirks_init: apply HID quirks specified at module load time
+ */
+int hid_quirks_init(char **quirks_param, __u16 bus, int count)
+{
+ struct hid_device_id id = { 0 };
+ int n = 0, m;
+ unsigned short int vendor, product;
+ u32 quirks;
+
+ id.bus = bus;
+
+ for (; n < count && quirks_param[n]; n++) {
+
+ m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
+ &vendor, &product, &quirks);
+
+ id.vendor = (__u16)vendor;
+ id.product = (__u16)product;
+
+ if (m != 3 ||
+ hid_modify_dquirk(&id, quirks) != 0) {
+ pr_warn("Could not parse HID quirk module param %s\n",
+ quirks_param[n]);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_quirks_init);
+
+/**
+ * hid_quirks_exit: release memory associated with dynamic_quirks
+ * @bus: a bus to match against
+ *
+ * Description:
+ * Release all memory associated with dynamic quirks for a given bus.
+ * Called upon module unload.
+ * Use HID_BUS_ANY to remove all dynamic quirks.
+ *
+ * Returns: nothing
+ */
+void hid_quirks_exit(__u16 bus)
+{
+ hid_remove_all_dquirks(bus);
+}
+EXPORT_SYMBOL_GPL(hid_quirks_exit);
+
+/**
+ * hid_gets_squirk: return any static quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Given a HID device, return a pointer to the quirked hid_device_id entry
+ * associated with that device.
+ *
+ * Returns: the quirks.
+ */
+static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+{
+ const struct hid_device_id *bl_entry;
+ unsigned long quirks = 0;
+
+ if (hid_match_id(hdev, hid_ignore_list))
+ quirks |= HID_QUIRK_IGNORE;
+
+ if (hid_match_id(hdev, hid_have_special_driver))
+ quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
+
+ bl_entry = hid_match_id(hdev, hid_quirks);
+ if (bl_entry != NULL)
+ quirks |= bl_entry->driver_data;
+
+ if (quirks)
+ dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ quirks, hdev->vendor, hdev->product);
+ return quirks;
+}
+
+/**
+ * hid_lookup_quirk: return any quirks associated with a HID device
+ * @hdev: the HID device to look for
+ *
+ * Description:
+ * Given a HID device, return any quirks associated with that device.
+ *
+ * Returns: an unsigned long quirks value.
+ */
+unsigned long hid_lookup_quirk(const struct hid_device *hdev)
+{
+ unsigned long quirks = 0;
+ const struct hid_device_id *quirk_entry = NULL;
+
+ /* NCR devices must not be queried for reports */
+ if (hdev->bus == BUS_USB &&
+ hdev->vendor == USB_VENDOR_ID_NCR &&
+ hdev->product >= USB_DEVICE_ID_NCR_FIRST &&
+ hdev->product <= USB_DEVICE_ID_NCR_LAST)
+ return HID_QUIRK_NO_INIT_REPORTS;
+
+ /* These devices must be ignored if version (bcdDevice) is too old */
+ if (hdev->bus == BUS_USB && hdev->vendor == USB_VENDOR_ID_JABRA) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_JABRA_SPEAK_410:
+ if (hdev->version < 0x0111)
+ return HID_QUIRK_IGNORE;
+ break;
+ case USB_DEVICE_ID_JABRA_SPEAK_510:
+ if (hdev->version < 0x0214)
+ return HID_QUIRK_IGNORE;
+ break;
+ }
+ }
+
+ mutex_lock(&dquirks_lock);
+ quirk_entry = hid_exists_dquirk(hdev);
+ if (quirk_entry)
+ quirks = quirk_entry->driver_data;
+ else
+ quirks = hid_gets_squirk(hdev);
+ mutex_unlock(&dquirks_lock);
+
+ return quirks;
+}
+EXPORT_SYMBOL_GPL(hid_lookup_quirk);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 0f43c4292685..c6c05df3e8d2 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -731,6 +731,7 @@ static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb28b87..317c9c2c0a7c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
+ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+ return;
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index fb77dec720a4..b7e86aba6f33 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -137,7 +137,7 @@ exit_unlock:
return retval;
}
-static unsigned int roccat_poll(struct file *file, poll_table *wait)
+static __poll_t roccat_poll(struct file *file, poll_table *wait)
{
struct roccat_reader *reader = file->private_data;
poll_wait(file, &reader->device->wait, wait);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 0bcf041368c7..21ed6c55c40a 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -702,11 +702,11 @@ static int hid_sensor_custom_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static unsigned int hid_sensor_custom_poll(struct file *file,
+static __poll_t hid_sensor_custom_poll(struct file *file,
struct poll_table_struct *wait)
{
struct hid_sensor_custom *sensor_inst;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sensor_inst = container_of(file->private_data,
struct hid_sensor_custom, custom_dev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b9dc3ac4d4aa..ccdc5f2d01b1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -473,6 +473,7 @@ struct motion_output_report_02 {
#define DS4_FEATURE_REPORT_0x02_SIZE 37
#define DS4_FEATURE_REPORT_0x05_SIZE 41
#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0xA3_SIZE 49
#define DS4_INPUT_REPORT_0x11_SIZE 78
#define DS4_OUTPUT_REPORT_0x05_SIZE 32
#define DS4_OUTPUT_REPORT_0x11_SIZE 78
@@ -544,6 +545,8 @@ struct sony_sc {
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
+ unsigned fw_version;
+ unsigned hw_version;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -627,6 +630,29 @@ static ssize_t ds4_store_poll_interval(struct device *dev,
static DEVICE_ATTR(bt_poll_interval, 0644, ds4_show_poll_interval,
ds4_store_poll_interval);
+static ssize_t sony_show_firmware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->fw_version);
+}
+
+static DEVICE_ATTR(firmware_version, 0444, sony_show_firmware_version, NULL);
+
+static ssize_t sony_show_hardware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->hw_version);
+}
+
+static DEVICE_ATTR(hardware_version, 0444, sony_show_hardware_version, NULL);
static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
@@ -1646,6 +1672,31 @@ static void dualshock4_calibration_work(struct work_struct *work)
spin_unlock_irqrestore(&sc->lock, flags);
}
+static int dualshock4_get_version_info(struct sony_sc *sc)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(DS4_FEATURE_REPORT_0xA3_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(sc->hdev, 0xA3, buf,
+ DS4_FEATURE_REPORT_0xA3_SIZE,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ kfree(buf);
+ return ret;
+ }
+
+ sc->hw_version = get_unaligned_le16(&buf[35]);
+ sc->fw_version = get_unaligned_le16(&buf[41]);
+
+ kfree(buf);
+ return 0;
+}
+
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
static const u8 sixaxis_leds[10][4] = {
@@ -2399,10 +2450,7 @@ static int sony_check_add(struct sony_sc *sc)
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2432,10 +2480,7 @@ static int sony_check_add(struct sony_sc *sc)
sc->mac_address[5-n] = buf[4+n];
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else {
return 0;
}
@@ -2619,6 +2664,28 @@ static int sony_input_configured(struct hid_device *hdev,
goto err_stop;
}
+ ret = dualshock4_get_version_info(sc);
+ if (ret < 0) {
+ hid_err(sc->hdev, "Failed to get version data from Dualshock 4\n");
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (ret) {
+ /* Make zero for cleanup reasons of sysfs entries. */
+ sc->fw_version = 0;
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
+ if (ret) {
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
/*
* The Dualshock 4 touchpad supports 2 touches and has a
* resolution of 1920x942 (44.86 dots/mm).
@@ -2695,6 +2762,10 @@ err_stop:
*/
if (sc->ds4_bt_poll_interval)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
if (sc->quirks & SONY_BATTERY_SUPPORT)
@@ -2796,6 +2867,12 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
+
sony_cancel_work_sync(sc);
kfree(sc->output_report_dmabuf);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 5fbe0f81ab2e..be210219f982 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,7 +249,7 @@ out:
return ret;
}
-static unsigned int hidraw_poll(struct file *file, poll_table *wait)
+static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index e054ee43c1e2..7230243b94d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -934,11 +934,6 @@ static int i2c_hid_of_probe(struct i2c_client *client,
}
pdata->hid_descriptor_address = val;
- ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
- &val);
- if (!ret)
- pdata->post_power_delay_ms = val;
-
return 0;
}
@@ -955,6 +950,16 @@ static inline int i2c_hid_of_probe(struct i2c_client *client,
}
#endif
+static void i2c_hid_fwnode_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ u32 val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ pdata->post_power_delay_ms = val;
+}
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -998,6 +1003,9 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
+ /* Parse platform agnostic common properties from ACPI / device tree */
+ i2c_hid_fwnode_probe(client, &ihid->pdata);
+
ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(ihid->pdata.supply)) {
ret = PTR_ERR(ihid->pdata.supply);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 2aac097c3f70..97869b7410eb 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,7 @@
#define SPT_Ax_DEVICE_ID 0x9D35
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d824f74f99..582e449be9fe 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 6f819f144cb4..fc43850a155e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -753,7 +753,7 @@ unlock:
return ret ? ret : count;
}
-static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 0ff227d0c033..b6349e30bd93 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,7 +3,7 @@
# Makefile for the USB input drivers
#
-usbhid-y := hid-core.o hid-quirks.o
+usbhid-y := hid-core.o
usbhid-$(CONFIG_USB_HIDDEV) += hiddev.o
usbhid-$(CONFIG_HID_PID) += hid-pidff.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 640dfb937c69..77c50cdfff97 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -978,8 +978,7 @@ static int usbhid_parse(struct hid_device *hid)
int num_descriptors;
size_t offset = offsetof(struct hid_descriptor, desc);
- quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ quirks = hid_lookup_quirk(hid);
if (quirks & HID_QUIRK_IGNORE)
return -ENODEV;
@@ -1328,8 +1327,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->bus = BUS_USB;
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
+ hid->version = le16_to_cpu(dev->descriptor.bcdDevice);
hid->name[0] = 0;
- hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1641,7 +1640,7 @@ static int __init hid_init(void)
{
int retval = -ENOMEM;
- retval = usbhid_quirks_init(quirks_param);
+ retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS);
if (retval)
goto usbhid_quirks_init_fail;
retval = usb_register(&hid_driver);
@@ -1651,7 +1650,7 @@ static int __init hid_init(void)
return 0;
usb_register_fail:
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
usbhid_quirks_init_fail:
return retval;
}
@@ -1659,7 +1658,7 @@ usbhid_quirks_init_fail:
static void __exit hid_exit(void)
{
usb_deregister(&hid_driver);
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
}
module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
deleted file mode 100644
index 331f7f34ec14..000000000000
--- a/drivers/hid/usbhid/hid-quirks.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * USB HID quirks support for Linux
- *
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2007 Paul Walmsley
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/hid.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include "../hid-ids.h"
-
-/*
- * Alphabetically sorted blacklist by quirk type.
- */
-
-static const struct hid_blacklist {
- __u16 idVendor;
- __u16 idProduct;
- __u32 quirks;
-} hid_blacklist[] = {
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT},
- { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-
- { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
-
- { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
-
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
-
- { 0, 0 }
-};
-
-/* Dynamic HID quirks list - specified at runtime */
-struct quirks_list_struct {
- struct hid_blacklist hid_bl_item;
- struct list_head node;
-};
-
-static LIST_HEAD(dquirks_list);
-static DECLARE_RWSEM(dquirks_rwsem);
-
-/* Runtime ("dynamic") quirks manipulation functions */
-
-/**
- * usbhid_exists_dquirk: find any dynamic quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Scans dquirks_list for a matching dynamic quirk and returns
- * the pointer to the relevant struct hid_blacklist if found.
- * Must be called with a read lock held on dquirks_rwsem.
- *
- * Returns: NULL if no quirk found, struct hid_blacklist * if found.
- */
-static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
- const u16 idProduct)
-{
- struct quirks_list_struct *q;
- struct hid_blacklist *bl_entry = NULL;
-
- list_for_each_entry(q, &dquirks_list, node) {
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
- bl_entry = &q->hid_bl_item;
- break;
- }
- }
-
- if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
-
- return bl_entry;
-}
-
-
-/**
- * usbhid_modify_dquirk: add/replace a HID quirk
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- * @quirks: the u32 quirks value to add/replace
- *
- * Description:
- * If an dynamic quirk exists in memory for this (idVendor,
- * idProduct) pair, replace its quirks value with what was
- * provided. Otherwise, add the quirk to the dynamic quirks list.
- *
- * Returns: 0 OK, -error on failure.
- */
-static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
- const u32 quirks)
-{
- struct quirks_list_struct *q_new, *q;
- int list_edited = 0;
-
- if (!idVendor) {
- dbg_hid("Cannot add a quirk with idVendor = 0\n");
- return -EINVAL;
- }
-
- q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
- if (!q_new)
- return -ENOMEM;
-
- q_new->hid_bl_item.idVendor = idVendor;
- q_new->hid_bl_item.idProduct = idProduct;
- q_new->hid_bl_item.quirks = quirks;
-
- down_write(&dquirks_rwsem);
-
- list_for_each_entry(q, &dquirks_list, node) {
-
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
-
- list_replace(&q->node, &q_new->node);
- kfree(q);
- list_edited = 1;
- break;
-
- }
-
- }
-
- if (!list_edited)
- list_add_tail(&q_new->node, &dquirks_list);
-
- up_write(&dquirks_rwsem);
-
- return 0;
-}
-
-/**
- * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
- *
- * Description:
- * Free all memory associated with dynamic quirks - called before
- * module unload.
- *
- */
-static void usbhid_remove_all_dquirks(void)
-{
- struct quirks_list_struct *q, *temp;
-
- down_write(&dquirks_rwsem);
- list_for_each_entry_safe(q, temp, &dquirks_list, node) {
- list_del(&q->node);
- kfree(q);
- }
- up_write(&dquirks_rwsem);
-
-}
-
-/**
- * usbhid_quirks_init: apply USB HID quirks specified at module load time
- */
-int usbhid_quirks_init(char **quirks_param)
-{
- u16 idVendor, idProduct;
- u32 quirks;
- int n = 0, m;
-
- for (; n < MAX_USBHID_BOOT_QUIRKS && quirks_param[n]; n++) {
-
- m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
- &idVendor, &idProduct, &quirks);
-
- if (m != 3 ||
- usbhid_modify_dquirk(idVendor, idProduct, quirks) != 0) {
- pr_warn("Could not parse HID quirk module param %s\n",
- quirks_param[n]);
- }
- }
-
- return 0;
-}
-
-/**
- * usbhid_quirks_exit: release memory associated with dynamic_quirks
- *
- * Description:
- * Release all memory associated with dynamic quirks. Called upon
- * module unload.
- *
- * Returns: nothing
- */
-void usbhid_quirks_exit(void)
-{
- usbhid_remove_all_dquirks();
-}
-
-/**
- * usbhid_exists_squirk: return any static quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return a pointer to
- * the hid_blacklist entry associated with that device.
- *
- * Returns: pointer if quirk found, or NULL if no quirks found.
- */
-static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
- const u16 idProduct)
-{
- const struct hid_blacklist *bl_entry = NULL;
- int n = 0;
-
- for (; hid_blacklist[n].idVendor; n++)
- if (hid_blacklist[n].idVendor == idVendor &&
- (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
- hid_blacklist[n].idProduct == idProduct))
- bl_entry = &hid_blacklist[n];
-
- if (bl_entry != NULL)
- dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
- return bl_entry;
-}
-
-/**
- * usbhid_lookup_quirk: return any quirks associated with a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return any quirks associated
- * with that device.
- *
- * Returns: a u32 quirks value.
- */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
-{
- u32 quirks = 0;
- const struct hid_blacklist *bl_entry = NULL;
-
- /* NCR devices must not be queried for reports */
- if (idVendor == USB_VENDOR_ID_NCR &&
- idProduct >= USB_DEVICE_ID_NCR_FIRST &&
- idProduct <= USB_DEVICE_ID_NCR_LAST)
- return HID_QUIRK_NO_INIT_REPORTS;
-
- down_read(&dquirks_rwsem);
- bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
- if (!bl_entry)
- bl_entry = usbhid_exists_squirk(idVendor, idProduct);
- if (bl_entry)
- quirks = bl_entry->quirks;
- up_read(&dquirks_rwsem);
-
- return quirks;
-}
-
-EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7d749b19c27c..0ff3e7e70c8d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -422,7 +422,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
* "poll" file op
* No kernel lock - fine
*/
-static unsigned int hiddev_poll(struct file *file, poll_table *wait)
+static __poll_t hiddev_poll(struct file *file, poll_table *wait)
{
struct hiddev_list *list = file->private_data;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index ee71ad9b6cc1..409543160af7 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -56,6 +56,107 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
return retval;
}
+static void wacom_wac_queue_insert(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo,
+ u8 *raw_data, int size)
+{
+ bool warned = false;
+
+ while (kfifo_avail(fifo) < size) {
+ if (!warned)
+ hid_warn(hdev, "%s: kfifo has filled, starting to drop events\n", __func__);
+ warned = true;
+
+ kfifo_skip(fifo);
+ }
+
+ kfifo_in(fifo, raw_data, size);
+}
+
+static void wacom_wac_queue_flush(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo)
+{
+ while (!kfifo_is_empty(fifo)) {
+ u8 buf[WACOM_PKGLEN_MAX];
+ int size;
+ int err;
+
+ size = kfifo_out(fifo, buf, sizeof(buf));
+ err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
+ if (err) {
+ hid_warn(hdev, "%s: unable to flush event due to error %d\n",
+ __func__, err);
+ }
+ }
+}
+
+static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ bool flush = false;
+ bool insert = false;
+ int i, j;
+
+ if (wacom_wac->serial[0] || !(features->quirks & WACOM_QUIRK_TOOLSERIAL))
+ return 0;
+
+ /* Queue events which have invalid tool type or serial number */
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ struct hid_field *field = report->field[i];
+ struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ unsigned int offset;
+ unsigned int size;
+ unsigned int value;
+
+ if (equivalent_usage != HID_DG_INRANGE &&
+ equivalent_usage != HID_DG_TOOLSERIALNUMBER &&
+ equivalent_usage != WACOM_HID_WD_SERIALHI &&
+ equivalent_usage != WACOM_HID_WD_TOOLTYPE)
+ continue;
+
+ offset = field->report_offset;
+ size = field->report_size;
+ value = hid_field_extract(hdev, raw_data+1, offset + j * size, size);
+
+ /* If we go out of range, we need to flush the queue ASAP */
+ if (equivalent_usage == HID_DG_INRANGE)
+ value = !value;
+
+ if (value) {
+ flush = true;
+ switch (equivalent_usage) {
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = value;
+ break;
+
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ break;
+
+ case WACOM_HID_WD_TOOLTYPE:
+ wacom_wac->id[0] = value;
+ break;
+ }
+ }
+ else {
+ insert = true;
+ }
+ }
+ }
+
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+
+ return insert && !flush;
+}
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -64,6 +165,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size > WACOM_PKGLEN_MAX)
return 1;
+ if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
+ return -1;
+
memcpy(wacom->wacom_wac.data, raw_data, size);
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -2347,23 +2451,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
int i;
unsigned long flags;
- spin_lock_irqsave(&remote->remote_lock, flags);
- remote->remotes[index].registered = false;
- spin_unlock_irqrestore(&remote->remote_lock, flags);
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
- if (remote->remotes[index].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index].battery.bat_desc);
+ spin_lock_irqsave(&remote->remote_lock, flags);
+ remote->remotes[i].registered = false;
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[index].group.name)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index]);
+ if (remote->remotes[i].battery.battery)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i].battery.bat_desc);
+
+ if (remote->remotes[i].group.name)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i]);
- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (remote->remotes[i].serial == serial) {
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].registered = false;
remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
@@ -2580,6 +2684,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail;
}
+ error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error)
+ goto fail;
+
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
@@ -2643,6 +2751,8 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
+ kfifo_free(&wacom_wac->pen_fifo);
+
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 16af6886e828..90c38a0523e9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
- bool is_touch_on = value;
bool do_report = false;
/*
@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
break;
case WACOM_HID_WD_MUTE_DEVICE:
- if (wacom_wac->shared->touch_input && value) {
- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
- is_touch_on = wacom_wac->shared->is_touch_on;
- }
-
- /* fall through*/
case WACOM_HID_WD_TOUCHONOFF:
if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !is_touch_on);
+ SW_MUTE_DEVICE, !(*is_touch_on));
input_sync(wacom_wac->shared->touch_input);
}
break;
@@ -2085,7 +2085,29 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
+ features->quirks |= WACOM_QUIRK_TOOLSERIAL;
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
+
+ /* Adjust AES usages to match modern convention */
+ if (usage->hid == WACOM_HID_WT_SERIALNUMBER && field->report_size == 16) {
+ if (field->index + 2 < field->report->maxfield) {
+ struct hid_field *a = field->report->field[field->index + 1];
+ struct hid_field *b = field->report->field[field->index + 2];
+
+ if (a->maxusage > 0 && a->usage[0].hid == HID_DG_TOOLSERIALNUMBER && a->report_size == 32 &&
+ b->maxusage > 0 && b->usage[0].hid == 0xFF000000 && b->report_size == 8) {
+ features->quirks |= WACOM_QUIRK_AESPEN;
+ usage->hid = WACOM_HID_WD_TOOLTYPE;
+ field->logical_minimum = S16_MIN;
+ field->logical_maximum = S16_MAX;
+ a->logical_minimum = S32_MIN;
+ a->logical_maximum = S32_MAX;
+ b->usage[0].hid = WACOM_HID_WD_SERIALHI;
+ b->logical_minimum = 0;
+ b->logical_maximum = U8_MAX;
+ }
+ }
+ }
break;
case WACOM_HID_WD_SENSE:
features->quirks |= WACOM_QUIRK_SENSE;
@@ -2093,15 +2115,18 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
break;
case WACOM_HID_WD_SERIALHI:
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ }
}
break;
case WACOM_HID_WD_FINGERWHEEL:
@@ -4390,6 +4415,12 @@ static const struct wacom_features wacom_features_0x360 =
static const struct wacom_features wacom_features_0x361 =
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x37A =
+ { "Wacom One by Wacom S", 15200, 9500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x37B =
+ { "Wacom One by Wacom M", 21600, 13500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4558,6 +4589,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x343) },
{ BT_DEVICE_WACOM(0x360) },
{ BT_DEVICE_WACOM(0x361) },
+ { USB_DEVICE_WACOM(0x37A) },
+ { USB_DEVICE_WACOM(0x37B) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 64d8f014602e..15d9c14fbdf7 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/hid.h>
+#include <linux/kfifo.h>
/* maximum packet length for USB/BT devices */
#define WACOM_PKGLEN_MAX 361
@@ -86,7 +87,9 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
+#define WACOM_QUIRK_AESPEN 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
+#define WACOM_QUIRK_TOOLSERIAL 0x0010
/* device types */
#define WACOM_DEVICETYPE_NONE 0x0000
@@ -107,6 +110,7 @@
#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALNUMBER (WACOM_HID_UP_WACOMDIGITIZER | 0x5b)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
@@ -150,6 +154,7 @@
#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_SERIALNUMBER (WACOM_HID_UP_WACOMTOUCH | 0x5b)
#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
@@ -336,6 +341,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
+ struct kfifo_rec_ptr_2 pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 727f968ac1cb..8fbbacb0fe21 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -451,11 +451,11 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
cs_release_cmd(msg);
if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
- struct timespec tspec;
+ struct timespec64 tspec;
struct cs_timestamp *tstamp =
&hi->mmap_cfg->tstamp_rx_ctrl;
- ktime_get_ts(&tspec);
+ ktime_get_ts64(&tspec);
tstamp->tv_sec = (__u32) tspec.tv_sec;
tstamp->tv_nsec = (__u32) tspec.tv_nsec;
@@ -1124,10 +1124,10 @@ static int cs_char_fasync(int fd, struct file *file, int on)
return 0;
}
-static unsigned int cs_char_poll(struct file *file, poll_table *wait)
+static __poll_t cs_char_poll(struct file *file, poll_table *wait)
{
struct cs_char *csdata = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &cs_char_data.wait, wait);
spin_lock_bh(&csdata->lock);
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4402a71e23f7..047959e74bb1 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -104,7 +104,7 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf,
return ret ? ret : count;
}
-static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
+static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
{
struct hvutil_transport *hvt;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 12eb8caa4263..50e071444a5c 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -140,6 +140,29 @@ static u32 hv_copyto_ringbuffer(
return start_write_offset;
}
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static void
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
+{
+ u32 read_loc, write_loc, dsize;
+
+ /* Capture the read/write indices before they changed */
+ read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+ write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+ dsize = rbi->ring_datasize;
+
+ *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ *read = dsize - *write;
+}
+
/* Get various debug metrics for the specified ring buffer. */
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7ad017690e3a..ef23553ff5cb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -26,11 +26,9 @@ if HWMON
config HWMON_VID
tristate
- default n
config HWMON_DEBUG_CHIP
bool "Hardware Monitoring Chip debugging messages"
- default n
help
Say Y here if you want the I2C chip drivers to produce a bunch of
debug messages to the system log. Select this if you are having
@@ -42,7 +40,6 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
depends on AB8500_GPADC && AB8500_BM
- default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -302,7 +299,6 @@ config SENSORS_APPLESMC
select NEW_LEDS
select LEDS_CLASS
select INPUT_POLLDEV
- default n
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -678,7 +674,6 @@ config SENSORS_JC42
config SENSORS_POWR1220
tristate "Lattice POWR1220 Power Monitoring"
depends on I2C
- default n
help
If you say yes here you get access to the hardware monitoring
functions of the Lattice POWR1220 isp Power Supply Monitoring,
@@ -702,7 +697,6 @@ config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC2945
I2C System Monitor.
@@ -727,7 +721,6 @@ config SENSORS_LTC2990
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4151
High Voltage I2C Current and Voltage Monitor interface.
@@ -738,7 +731,6 @@ config SENSORS_LTC4151
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4215
Hot Swap Controller I2C interface.
@@ -750,7 +742,6 @@ config SENSORS_LTC4222
tristate "Linear Technology LTC4222"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4222
Dual Hot Swap Controller I2C interface.
@@ -761,7 +752,6 @@ config SENSORS_LTC4222
config SENSORS_LTC4245
tristate "Linear Technology LTC4245"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4245
Multiple Supply Hot Swap Controller I2C interface.
@@ -773,7 +763,6 @@ config SENSORS_LTC4260
tristate "Linear Technology LTC4260"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4260
Positive Voltage Hot Swap Controller I2C interface.
@@ -784,7 +773,6 @@ config SENSORS_LTC4260
config SENSORS_LTC4261
tristate "Linear Technology LTC4261"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4261
Negative Voltage Hot Swap Controller I2C interface.
@@ -1276,7 +1264,6 @@ config SENSORS_NSA320
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
- default n
help
If you say yes here you get support for Philips PCF8591 4-channel
ADC, 1-channel DAC chips.
@@ -1459,7 +1446,6 @@ config SENSORS_SMSC47B397
config SENSORS_SCH56XX_COMMON
tristate
- default n
config SENSORS_SCH5627
tristate "SMSC SCH5627"
@@ -1505,7 +1491,6 @@ config SENSORS_STTS751
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
- default n
help
If you say yes here you get support for the hardware monitoring
features of the Summit Microelectronics SMM665/SMM665B Six-Channel
@@ -1725,6 +1710,16 @@ config SENSORS_VT8231
This driver can also be built as a module. If so, the module
will be called vt8231.
+config SENSORS_W83773G
+ tristate "Nuvoton W83773G"
+ depends on I2C
+ help
+ If you say yes here you get support for the Nuvoton W83773G hardware
+ monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83773g.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, Asus AS99127F"
depends on I2C
@@ -1782,7 +1777,6 @@ config SENSORS_W83795
config SENSORS_W83795_FANCTRL
bool "Include automatic fan control support (DANGEROUS)"
depends on SENSORS_W83795
- default n
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0fe489fab663..f814b4ace138 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
# asb100, then w83781d go first, as they can override other drivers' addresses.
obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
+obj-$(CONFIG_SENSORS_W83773G) += w83773g.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
obj-$(CONFIG_SENSORS_W83795) += w83795.o
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 63a95e23ca81..693a3d53cab5 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -181,6 +182,7 @@ struct aspeed_cooling_device {
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
+ struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
bool fan_tach_present[16];
@@ -905,6 +907,13 @@ static int aspeed_create_fan(struct device *dev,
return 0;
}
+static void aspeed_pwm_tacho_remove(void *data)
+{
+ struct aspeed_pwm_tacho_data *priv = data;
+
+ reset_control_assert(priv->rst);
+}
+
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -931,6 +940,19 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tacho_remove, priv);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE_EXT, 0);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd86b3c..4bdbf77f7197 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -246,7 +246,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
- struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ u16 devfn = PCI_DEVFN(0, 0);
+ struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
/*
* Explicit tjmax table entries override heuristics.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c7c9e95e58a8..bf3bb7e1adab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,7 @@ static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
+static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -242,6 +243,9 @@ static int i8k_get_fan_status(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
@@ -253,6 +257,9 @@ static int i8k_get_fan_speed(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -264,7 +271,7 @@ static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
- if (disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
regs.ebx = fan & 0xff;
@@ -289,6 +296,9 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -300,6 +310,9 @@ static int i8k_set_fan(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
@@ -772,6 +785,8 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_support && index >= 8)
+ return 0;
if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15))
return 0;
@@ -1039,6 +1054,30 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
};
/*
+ * On some machines all fan related SMM functions implemented by Dell BIOS
+ * firmware freeze kernel for about 500ms. Until Dell fixes these problems fan
+ * support for affected blacklisted Dell machines stay disabled.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=195751
+ */
+static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ {
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ },
+ {
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ },
+ { }
+};
+
+/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
@@ -1060,8 +1099,17 @@ static int __init i8k_probe(void)
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
- disallow_fan_type_call = true;
+ if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan support\n");
+ if (!force)
+ disallow_fan_support = true;
+ }
+
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan type call\n");
+ if (!force)
+ disallow_fan_type_call = true;
+ }
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 7b73d2002d3e..0ae1ee1dbf76 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -37,7 +37,7 @@
/**
* struct hih6130 - HIH-6130 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: pointer to I2C client device
* @lock: mutex to protect measurement values
* @valid: only false before first measurement is taken
* @last_update: time of last update (jiffies)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index af5123042990..32083e452cde 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -678,7 +678,7 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: pointer to hwmon chip information
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
@@ -785,11 +785,11 @@ EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
/**
* devm_hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
+ * @groups: pointer to list of driver specific attribute groups
*
* Returns the pointer to the new device. The new device is automatically
* unregistered with the parent device.
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f6a76679c650..5e5b32a1ec4b 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -23,7 +23,8 @@
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
* @hwmon_dev: associated hwmon device
- * @attr_group: the group of attributes
+ * @attr_group: the group of attributes
+ * @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
*/
struct iio_hwmon_state {
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 62e38fa8cda2..e9e6aeabbf84 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -116,21 +118,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[chip];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e175664a..06b4e1c78bd8 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 005ffb5ffa92..49f4b33a5685 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -100,7 +100,7 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_chip_update_interval:
*val = data->sample_time;
- break;;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 08479006c7f9..6e4298e99222 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -39,6 +39,7 @@ config SENSORS_ADM1275
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
+ depends on LEDS_CLASS
help
If you say yes here you get hardware monitoring support for the IBM
Common Form Factor power supply.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index cb56da6834e5..93d9a9ea112b 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -8,12 +8,29 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pmbus.h>
#include "pmbus.h"
+#define CFFPS_FRU_CMD 0x9A
+#define CFFPS_PN_CMD 0x9B
+#define CFFPS_SN_CMD 0x9E
+#define CFFPS_CCIN_CMD 0xBD
+#define CFFPS_FW_CMD_START 0xFA
+#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_SYS_CONFIG_CMD 0xDA
+
+#define CFFPS_INPUT_HISTORY_CMD 0xD6
+#define CFFPS_INPUT_HISTORY_SIZE 100
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -24,6 +41,153 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+#define CFFPS_LED_BLINK BIT(0)
+#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
+#define CFFPS_BLINK_RATE_MS 250
+
+enum {
+ CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_FRU,
+ CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_CCIN,
+ CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_NUM_ENTRIES
+};
+
+struct ibm_cffps_input_history {
+ struct mutex update_lock;
+ unsigned long last_update;
+
+ u8 byte_count;
+ u8 data[CFFPS_INPUT_HISTORY_SIZE];
+};
+
+struct ibm_cffps {
+ struct i2c_client *client;
+
+ struct ibm_cffps_input_history input_history;
+
+ int debugfs_entries[CFFPS_DEBUGFS_NUM_ENTRIES];
+
+ char led_name[32];
+ u8 led_state;
+ struct led_classdev led;
+};
+
+#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
+
+static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int rc;
+ u8 msgbuf0[1] = { CFFPS_INPUT_HISTORY_CMD };
+ u8 msgbuf1[CFFPS_INPUT_HISTORY_SIZE + 1] = { 0 };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags | I2C_M_RD,
+ .len = CFFPS_INPUT_HISTORY_SIZE + 1,
+ .buf = msgbuf1,
+ },
+ };
+
+ if (!*ppos) {
+ mutex_lock(&psu->input_history.update_lock);
+ if (time_after(jiffies, psu->input_history.last_update + HZ)) {
+ /*
+ * Use a raw i2c transfer, since we need more bytes
+ * than Linux I2C supports through smbus xfr (only 32).
+ */
+ rc = i2c_transfer(psu->client->adapter, msg, 2);
+ if (rc < 0) {
+ mutex_unlock(&psu->input_history.update_lock);
+ return rc;
+ }
+
+ psu->input_history.byte_count = msgbuf1[0];
+ memcpy(psu->input_history.data, &msgbuf1[1],
+ CFFPS_INPUT_HISTORY_SIZE);
+ psu->input_history.last_update = jiffies;
+ }
+
+ mutex_unlock(&psu->input_history.update_lock);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos,
+ psu->input_history.data,
+ psu->input_history.byte_count);
+}
+
+static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 cmd;
+ int i, rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+ char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_INPUT_HISTORY:
+ return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_FRU:
+ cmd = CFFPS_FRU_CMD;
+ break;
+ case CFFPS_DEBUGFS_PN:
+ cmd = CFFPS_PN_CMD;
+ break;
+ case CFFPS_DEBUGFS_SN:
+ cmd = CFFPS_SN_CMD;
+ break;
+ case CFFPS_DEBUGFS_CCIN:
+ rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 5, "%04X", rc);
+ goto done;
+ case CFFPS_DEBUGFS_FW:
+ for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD_START + i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
+
+ rc = i * 2;
+ goto done;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_smbus_read_block_data(psu->client, cmd, data);
+ if (rc < 0)
+ return rc;
+
+done:
+ data[rc] = '\n';
+ rc += 2;
+
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
+}
+
+static const struct file_operations ibm_cffps_fops = {
+ .llseek = noop_llseek,
+ .read = ibm_cffps_debugfs_op,
+ .open = simple_open,
+};
+
static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
int reg)
{
@@ -105,6 +269,69 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
+static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ if (brightness == LED_OFF) {
+ psu->led_state = CFFPS_LED_OFF;
+ } else {
+ brightness = LED_FULL;
+ if (psu->led_state != CFFPS_LED_BLINK)
+ psu->led_state = CFFPS_LED_ON;
+ }
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ psu->led_state);
+ if (rc < 0)
+ return;
+
+ led_cdev->brightness = brightness;
+}
+
+static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ psu->led_state = CFFPS_LED_BLINK;
+
+ if (led_cdev->brightness == LED_OFF)
+ return 0;
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_BLINK);
+ if (rc < 0)
+ return rc;
+
+ *delay_on = CFFPS_BLINK_RATE_MS;
+ *delay_off = CFFPS_BLINK_RATE_MS;
+
+ return 0;
+}
+
+static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
+{
+ int rc;
+ struct i2c_client *client = psu->client;
+ struct device *dev = &client->dev;
+
+ snprintf(psu->led_name, sizeof(psu->led_name), "%s-%02x", client->name,
+ client->addr);
+ psu->led.name = psu->led_name;
+ psu->led.max_brightness = LED_FULL;
+ psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.blink_set = ibm_cffps_led_blink_set;
+
+ rc = devm_led_classdev_register(dev, &psu->led);
+ if (rc)
+ dev_warn(dev, "failed to register led class: %d\n", rc);
+}
+
static struct pmbus_driver_info ibm_cffps_info = {
.pages = 1,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
@@ -116,10 +343,69 @@ static struct pmbus_driver_info ibm_cffps_info = {
.read_word_data = ibm_cffps_read_word_data,
};
+static struct pmbus_platform_data ibm_cffps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &ibm_cffps_info);
+ int i, rc;
+ struct dentry *debugfs;
+ struct dentry *ibm_cffps_dir;
+ struct ibm_cffps *psu;
+
+ client->dev.platform_data = &ibm_cffps_pdata;
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't fail the probe if there isn't enough memory for leds and
+ * debugfs.
+ */
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return 0;
+
+ psu->client = client;
+ mutex_init(&psu->input_history.update_lock);
+ psu->input_history.last_update = jiffies - HZ;
+
+ ibm_cffps_create_led_class(psu);
+
+ /* Don't fail the probe if we can't create debugfs */
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return 0;
+
+ ibm_cffps_dir = debugfs_create_dir(client->name, debugfs);
+ if (!ibm_cffps_dir)
+ return 0;
+
+ for (i = 0; i < CFFPS_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("input_history", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
+ &ibm_cffps_fops);
+ debugfs_create_file("fru", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
+ &ibm_cffps_fops);
+ debugfs_create_file("part_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
+ &ibm_cffps_fops);
+ debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
+ &ibm_cffps_fops);
+ debugfs_create_file("ccin", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
+ &ibm_cffps_fops);
+ debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
+ &ibm_cffps_fops);
+
+ return 0;
}
static const struct i2c_device_id ibm_cffps_id[] = {
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 8b906b44484b..977315b0fd90 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -25,168 +25,19 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
-{
- s16 exponent;
- s32 mantissa;
- long val;
-
- /* We only modify LINEAR11 formats */
- exponent = ((s16)data) >> 11;
- mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
-
- val = mantissa * 1000L;
-
- /* scale result to micro-units for power sensors */
- if (class == PSC_POWER)
- val = val * 1000L;
-
- if (exponent >= 0)
- val <<= exponent;
- else
- val >>= -exponent;
-
- return val;
-}
-
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
-
-static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
-{
- s16 exponent = 0, mantissa;
- bool negative = false;
-
- if (val == 0)
- return 0;
-
- if (val < 0) {
- negative = true;
- val = -val;
- }
-
- /* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
- val = DIV_ROUND_CLOSEST(val, 1000L);
-
- /* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
- exponent++;
- val >>= 1;
- }
- /* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
- exponent--;
- val <<= 1;
- }
-
- /* Convert mantissa from milli-units to units */
- mantissa = DIV_ROUND_CLOSEST(val, 1000);
-
- /* Ensure that resulting number is within range */
- if (mantissa > 0x3ff)
- mantissa = 0x3ff;
-
- /* restore sign */
- if (negative)
- mantissa = -mantissa;
-
- /* Convert to 5 bit exponent, 11 bit mantissa */
- return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
-}
-
-static u16 ir35221_scale_result(s16 data, int shift,
- enum pmbus_sensor_classes class)
-{
- long val;
-
- val = ir35221_reg2data(data, class);
-
- if (shift < 0)
- val >>= -shift;
- else
- val <<= shift;
-
- return ir35221_data2reg(val, class);
-}
-
static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_VIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
- break;
- case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_POUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_IOUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
- break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
@@ -194,9 +45,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_VIN_VALLEY);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page,
@@ -205,12 +53,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_IOUT_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_IOUT_VALLEY);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
ret = pmbus_read_word_data(client, page,
@@ -224,36 +66,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- int ret;
- u16 val;
-
- switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- default:
- ret = -ENODATA;
- break;
- }
-
- return ret;
-}
-
static int ir35221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -292,7 +104,6 @@ static int ir35221_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- info->write_word_data = ir35221_write_word_data;
info->read_word_data = ir35221_read_word_data;
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 10d17fb8f283..53db78753a0d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -53,11 +53,6 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
-/* LM25063 only */
-
-#define LM25063_READ_VOUT_MAX 0xe5
-#define LM25063_READ_VOUT_MIN 0xe6
-
struct __coeff {
short m, b, R;
};
@@ -122,36 +117,6 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
- [lm25063] = {
- [PSC_VOLTAGE_IN] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_VOLTAGE_OUT] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_CURRENT_IN] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_CURRENT_IN_L] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_POWER] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_POWER_L] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_TEMPERATURE] = {
- .m = 15596,
- .R = -3,
- },
- },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -272,10 +237,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
- case lm25063:
- /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
- ret = DIV_ROUND_CLOSEST(ret * 20, 625);
- break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -330,24 +291,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
- break;
- case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
- break;
- default:
- ret = lm25066_read_word_data(client, page, reg);
- break;
- }
- return ret;
-}
-
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -502,11 +445,6 @@ static int lm25066_probe(struct i2c_client *client,
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
data->rlimit = 0x0fff;
- } else if (data->id == lm25063) {
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_POUT;
- info->read_word_data = lm25063_read_word_data;
- data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
@@ -543,7 +481,6 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
- {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 9313849d5160..c9dc8799b5e1 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -16,12 +16,231 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAN_CONFIG = 0xf1,
};
+#define MAX31785 0x3030
+#define MAX31785A 0x3040
+
+#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+
#define MAX31785_NR_PAGES 23
+#define MAX31785_NR_FAN_PAGES 6
+
+static int max31785_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ return -ENOTSUPP;
+ case PMBUS_FAN_CONFIG_12:
+ return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
+ reg);
+ }
+
+ return -ENODATA;
+}
+
+static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ return -ENOTSUPP;
+}
+
+static int max31785_read_long_data(struct i2c_client *client, int page,
+ int reg, u32 *data)
+{
+ unsigned char cmdbuf[1];
+ unsigned char rspbuf[4];
+ int rc;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rspbuf),
+ .buf = rspbuf,
+ },
+ };
+
+ cmdbuf[0] = reg;
+
+ rc = pmbus_set_page(client, page);
+ if (rc < 0)
+ return rc;
+
+ rc = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (rc < 0)
+ return rc;
+
+ *data = (rspbuf[0] << (0 * 8)) | (rspbuf[1] << (1 * 8)) |
+ (rspbuf[2] << (2 * 8)) | (rspbuf[3] << (3 * 8));
+
+ return rc;
+}
+
+static int max31785_get_pwm(struct i2c_client *client, int page)
+{
+ int rv;
+
+ rv = pmbus_get_fan_rate_device(client, page, 0, percent);
+ if (rv < 0)
+ return rv;
+ else if (rv >= 0x8000)
+ return 0;
+ else if (rv >= 0x2711)
+ return 0x2710;
+
+ return rv;
+}
+
+static int max31785_get_pwm_mode(struct i2c_client *client, int page)
+{
+ int config;
+ int command;
+
+ config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (config < 0)
+ return config;
+
+ command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ if (command < 0)
+ return command;
+
+ if (config & PB_FAN_1_RPM)
+ return (command >= 0x8000) ? 3 : 2;
+
+ if (command >= 0x8000)
+ return 3;
+ else if (command >= 0x2711)
+ return 0;
+
+ return 1;
+}
+
+static int max31785_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ u32 val;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_READ_FAN_SPEED_1:
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
+ reg, &val);
+ if (rv < 0)
+ return rv;
+
+ rv = (val >> 16) & 0xffff;
+ break;
+ case PMBUS_FAN_COMMAND_1:
+ /*
+ * PMBUS_FAN_COMMAND_x is probed to judge whether or not to
+ * expose fan control registers.
+ *
+ * Don't expose fan_target attribute for virtual pages.
+ */
+ rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ break;
+ case PMBUS_VIRT_PWM_1:
+ rv = max31785_get_pwm(client, page);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ rv = max31785_get_pwm_mode(client, page);
+ break;
+ default:
+ rv = -ENODATA;
+ break;
+ }
+
+ return rv;
+}
+
+static inline u32 max31785_scale_pwm(u32 sensor_val)
+{
+ /*
+ * The datasheet describes the accepted value range for manual PWM as
+ * [0, 0x2710], while the hwmon pwmX sysfs interface accepts values in
+ * [0, 255]. The MAX31785 uses DIRECT mode to scale the FAN_COMMAND
+ * registers and in PWM mode the coefficients are m=1, b=0, R=2. The
+ * important observation here is that 0x2710 == 10000 == 100 * 100.
+ *
+ * R=2 (== 10^2 == 100) accounts for scaling the value provided at the
+ * sysfs interface into the required hardware resolution, but it does
+ * not yet yield a value that we can write to the device (this initial
+ * scaling is handled by pmbus_data2reg()). Multiplying by 100 below
+ * translates the parameter value into the percentage units required by
+ * PMBus, and then we scale back by 255 as required by the hwmon pwmX
+ * interface to yield the percentage value at the appropriate
+ * resolution for hardware.
+ */
+ return (sensor_val * 100) / 255;
+}
+
+static int max31785_pwm_enable(struct i2c_client *client, int page,
+ u16 word)
+{
+ int config = 0;
+ int rate;
+
+ switch (word) {
+ case 0:
+ rate = 0x7fff;
+ break;
+ case 1:
+ rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
+ if (rate < 0)
+ return rate;
+ rate = max31785_scale_pwm(rate);
+ break;
+ case 2:
+ config = PB_FAN_1_RPM;
+ rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
+ if (rate < 0)
+ return rate;
+ break;
+ case 3:
+ rate = 0xffff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+}
+
+static int max31785_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_PWM_1:
+ return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ max31785_scale_pwm(word));
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ return max31785_pwm_enable(client, page, word);
+ default:
+ break;
+ }
+
+ return -ENODATA;
+}
#define MAX31785_FAN_FUNCS \
- (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_PWM12)
#define MAX31785_TEMP_FUNCS \
(PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
@@ -29,14 +248,26 @@ enum max31785_regs {
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+#define MAX37185_NUM_FAN_PAGES 6
+
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
+ .write_word_data = max31785_write_word_data,
+ .read_byte_data = max31785_read_byte_data,
+ .read_word_data = max31785_read_word_data,
+ .write_byte = max31785_write_byte,
+
/* RPM */
.format[PSC_FAN] = direct,
.m[PSC_FAN] = 1,
.b[PSC_FAN] = 0,
.R[PSC_FAN] = 0,
+ /* PWM */
+ .format[PSC_PWM] = direct,
+ .m[PSC_PWM] = 1,
+ .b[PSC_PWM] = 0,
+ .R[PSC_PWM] = 2,
.func[0] = MAX31785_FAN_FUNCS,
.func[1] = MAX31785_FAN_FUNCS,
.func[2] = MAX31785_FAN_FUNCS,
@@ -72,13 +303,46 @@ static const struct pmbus_driver_info max31785_info = {
.func[22] = MAX31785_VOUT_FUNCS,
};
+static int max31785_configure_dual_tach(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MFR_FAN_CONFIG_DUAL_TACH) {
+ int virtual = MAX31785_NR_PAGES + i;
+
+ info->pages = virtual + 1;
+ info->func[virtual] |= PMBUS_HAVE_FAN12;
+ info->func[virtual] |= PMBUS_PAGE_VIRTUAL;
+ }
+ }
+
+ return 0;
+}
+
static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ bool dual_tach = false;
s64 ret;
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -89,6 +353,25 @@ static int max31785_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = i2c_smbus_read_word_data(client, MFR_REVISION);
+ if (ret < 0)
+ return ret;
+
+ if (ret == MAX31785A) {
+ dual_tach = true;
+ } else if (ret == MAX31785) {
+ if (!strcmp("max31785a", id->name))
+ dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ } else {
+ return -ENODEV;
+ }
+
+ if (dual_tach) {
+ ret = max31785_configure_dual_tach(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return pmbus_do_probe(client, id, info);
}
@@ -100,9 +383,18 @@ static const struct i2c_device_id max31785_id[] = {
MODULE_DEVICE_TABLE(i2c, max31785_id);
+static const struct of_device_id max31785_of_match[] = {
+ { .compatible = "maxim,max31785" },
+ { .compatible = "maxim,max31785a" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max31785_of_match);
+
static struct i2c_driver max31785_driver = {
.driver = {
.name = "max31785",
+ .of_match_table = max31785_of_match,
},
.probe = max31785_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa613bd209e3..1d24397d36ec 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -190,6 +190,33 @@ enum pmbus_regs {
PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
PMBUS_VIRT_STATUS_VMON,
+
+ /*
+ * RPM and PWM Fan control
+ *
+ * Drivers wanting to expose PWM control must define the behaviour of
+ * PMBUS_VIRT_PWM_[1-4] and PMBUS_VIRT_PWM_ENABLE_[1-4] in the
+ * {read,write}_word_data callback.
+ *
+ * pmbus core provides a default implementation for
+ * PMBUS_VIRT_FAN_TARGET_[1-4].
+ *
+ * TARGET, PWM and PWM_ENABLE members must be defined sequentially;
+ * pmbus core uses the difference between the provided register and
+ * it's _1 counterpart to calculate the FAN/PWM ID.
+ */
+ PMBUS_VIRT_FAN_TARGET_1,
+ PMBUS_VIRT_FAN_TARGET_2,
+ PMBUS_VIRT_FAN_TARGET_3,
+ PMBUS_VIRT_FAN_TARGET_4,
+ PMBUS_VIRT_PWM_1,
+ PMBUS_VIRT_PWM_2,
+ PMBUS_VIRT_PWM_3,
+ PMBUS_VIRT_PWM_4,
+ PMBUS_VIRT_PWM_ENABLE_1,
+ PMBUS_VIRT_PWM_ENABLE_2,
+ PMBUS_VIRT_PWM_ENABLE_3,
+ PMBUS_VIRT_PWM_ENABLE_4,
};
/*
@@ -223,6 +250,8 @@ enum pmbus_regs {
#define PB_FAN_1_RPM BIT(6)
#define PB_FAN_1_INSTALLED BIT(7)
+enum pmbus_fan_mode { percent = 0, rpm };
+
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
@@ -313,6 +342,7 @@ enum pmbus_sensor_classes {
PSC_POWER,
PSC_TEMPERATURE,
PSC_FAN,
+ PSC_PWM,
PSC_NUM_CLASSES /* Number of power sensor classes */
};
@@ -339,6 +369,10 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
#define PMBUS_HAVE_VMON BIT(18)
#define PMBUS_HAVE_STATUS_VMON BIT(19)
+#define PMBUS_HAVE_PWM12 BIT(20)
+#define PMBUS_HAVE_PWM34 BIT(21)
+
+#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13 };
@@ -421,5 +455,12 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client);
#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a139940cd991..f7c47d7994e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -65,6 +65,7 @@ struct pmbus_sensor {
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
+ bool convert; /* Whether or not to apply linear/vid/direct */
int data; /* Sensor data.
Negative if there was a read error */
};
@@ -129,6 +130,27 @@ struct pmbus_debugfs_entry {
u8 reg;
};
+static const int pmbus_fan_rpm_mask[] = {
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_command_registers[] = {
+ PMBUS_FAN_COMMAND_1,
+ PMBUS_FAN_COMMAND_2,
+ PMBUS_FAN_COMMAND_3,
+ PMBUS_FAN_COMMAND_4,
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -140,18 +162,27 @@ EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- int rv = 0;
- int newpage;
+ int rv;
- if (page >= 0 && page != data->currpage) {
+ if (page < 0 || page == data->currpage)
+ return 0;
+
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- if (newpage != page)
- rv = -EIO;
- else
- data->currpage = page;
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (rv < 0)
+ return rv;
+
+ if (rv != page)
+ return -EIO;
}
- return rv;
+
+ data->currpage = page;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -198,6 +229,28 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int bit;
+ int id;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_update_fan(client, page, id, bit, bit, word);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -214,11 +267,38 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_write_virt_reg(client, page, reg, word);
+
return pmbus_write_word_data(client, page, reg, word);
}
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+
+ return _pmbus_write_word_data(client, page,
+ pmbus_fan_command_registers[id], command);
+}
+EXPORT_SYMBOL_GPL(pmbus_update_fan);
+
int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -231,6 +311,24 @@ int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ int id;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, rpm);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -246,8 +344,10 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_read_virt_reg(client, page, reg);
+
return pmbus_read_word_data(client, page, reg);
}
@@ -312,6 +412,68 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
return pmbus_read_byte_data(client, page, reg);
}
+static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
+ int reg)
+{
+ struct pmbus_sensor *sensor;
+
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
+ if (sensor->page == page && sensor->reg == reg)
+ return sensor;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode,
+ bool from_cache)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ bool want_rpm, have_rpm;
+ struct pmbus_sensor *s;
+ int config;
+ int reg;
+
+ want_rpm = (mode == rpm);
+
+ if (from_cache) {
+ reg = want_rpm ? PMBUS_VIRT_FAN_TARGET_1 : PMBUS_VIRT_PWM_1;
+ s = pmbus_find_sensor(data, page, reg + id);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ return s->data;
+ }
+
+ config = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (config < 0)
+ return config;
+
+ have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
+ if (want_rpm == have_rpm)
+ return pmbus_read_word_data(client, page,
+ pmbus_fan_command_registers[id]);
+
+ /* Can't sensibly map between RPM and PWM, just return zero */
+ return 0;
+}
+
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, false);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_device);
+
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, true);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_cached);
+
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
@@ -513,7 +675,7 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/* X = 1/m * (Y * 10^-R - b) */
R = -R;
/* scale result to milli-units for everything but fans */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R += 3;
b *= 1000;
}
@@ -568,6 +730,9 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
+ if (!sensor->convert)
+ return sensor->data;
+
switch (data->info->format[sensor->class]) {
case direct:
val = pmbus_reg2data_direct(data, sensor);
@@ -672,7 +837,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
/* Calculate Y = (m * X + b) * 10^R */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -703,6 +868,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
{
u16 regval;
+ if (!sensor->convert)
+ return val;
+
switch (data->info->format[sensor->class]) {
case direct:
regval = pmbus_data2reg_direct(data, sensor, val);
@@ -915,7 +1083,8 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
int seq, int page, int reg,
enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ bool update, bool readonly,
+ bool convert)
{
struct pmbus_sensor *sensor;
struct device_attribute *a;
@@ -925,12 +1094,18 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
return NULL;
a = &sensor->attribute;
- snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
- name, seq, type);
+ if (type)
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ else
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d",
+ name, seq);
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
+ sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1029,7 +1204,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
curr = pmbus_add_sensor(data, name, l->attr, index,
page, l->reg, attr->class,
attr->update || l->update,
- false);
+ false, true);
if (!curr)
return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
@@ -1068,7 +1243,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ attr->class, true, true, true);
if (!base)
return -ENOMEM;
if (attr->sfunc) {
@@ -1592,13 +1767,6 @@ static const int pmbus_fan_registers[] = {
PMBUS_READ_FAN_SPEED_4
};
-static const int pmbus_fan_config_registers[] = {
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_34,
- PMBUS_FAN_CONFIG_34
-};
-
static const int pmbus_fan_status_registers[] = {
PMBUS_STATUS_FAN_12,
PMBUS_STATUS_FAN_12,
@@ -1621,6 +1789,42 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
+
+/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
+static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ struct pmbus_data *data, int index, int page, int id,
+ u8 config)
+{
+ struct pmbus_sensor *sensor;
+
+ sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+ PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ return 0;
+
+ sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+ PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ true, false, false);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int pmbus_add_fan_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1655,9 +1859,18 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if (pmbus_add_sensor(data, "fan", "input", index,
page, pmbus_fan_registers[f],
- PSC_FAN, true, true) == NULL)
+ PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
+ /* Fan control */
+ if (pmbus_check_word_register(client, page,
+ pmbus_fan_command_registers[f])) {
+ ret = pmbus_add_fan_ctrl(client, data, index,
+ page, f, regval);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Each fan status register covers multiple fans,
* so we have to do some magic.
@@ -2168,6 +2381,14 @@ int pmbus_do_remove(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->debugfs;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_debugfs_dir);
+
static int __init pmbus_core_init(void)
{
pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 25d28343ba93..2be77752cd56 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -179,6 +179,7 @@ struct sht15_data {
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
+ * @len: Length of retrieved data
*
* This implements section 2 of the CRC datasheet.
*/
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 06706d288355..190e7b39ce32 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@
/**
* struct sht21 - SHT21 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: I2C client device
* @lock: mutex to protect measurement values
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 6ea99cd6ae79..370b57dafab7 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -732,6 +732,13 @@ static int sht3x_probe(struct i2c_client *client,
mutex_init(&data->i2c_lock);
mutex_init(&data->data_lock);
+ /*
+ * An attempt to read limits register too early
+ * causes a NACK response from the chip.
+ * Waiting for an empirical delay of 500 us solves the issue.
+ */
+ usleep_range(500, 600);
+
ret = limits_update(data);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
new file mode 100644
index 000000000000..e858093ac806
--- /dev/null
+++ b/drivers/hwmon/w83773g.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Driver for the Nuvoton W83773G SMBus temperature sensor IC.
+ * Supported models: W83773G
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* W83773 has 3 channels */
+#define W83773_CHANNELS 3
+
+/* The W83773 registers */
+#define W83773_CONVERSION_RATE_REG_READ 0x04
+#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
+#define W83773_MANUFACTURER_ID_REG 0xFE
+#define W83773_LOCAL_TEMP 0x00
+
+static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
+
+static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
+static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
+
+static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
+static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
+
+/* this is the number of sensors in the device */
+static const struct i2c_device_id w83773_id[] = {
+ { "w83773g" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, w83773_id);
+
+static const struct of_device_id w83773_of_match[] = {
+ {
+ .compatible = "nuvoton,w83773g"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w83773_of_match);
+
+static inline long temp_of_local(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline long temp_of_remote(s8 hb, u8 lb)
+{
+ return (hb << 3 | lb >> 5) * 125;
+}
+
+static int get_local_temp(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_LOCAL_TEMP, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_local(regval);
+ return 0;
+}
+
+static int get_remote_temp(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int get_fault(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_STATUS[index], &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = (regval & 0x04) >> 2;
+ return 0;
+}
+
+static int get_offset(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int set_offset(struct regmap *regmap, int index, long val)
+{
+ int ret;
+ u8 high_byte;
+ u8 low_byte;
+
+ val = clamp_val(val, -127825, 127825);
+ /* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
+ val /= 125;
+ high_byte = val >> 3;
+ low_byte = (val & 0x07) << 5;
+
+ ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
+}
+
+static int get_update_interval(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = 16000 >> regval;
+ return 0;
+}
+
+static int set_update_interval(struct regmap *regmap, long val)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval can be calculated as
+ * interval = (1 << (8 - rate)) * 62.5;
+ * Rounded rate is therefore
+ * rate = 8 - __fls(interval * 8 / (62.5 * 7));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ val = clamp_val(val, 62, 16000) * 10;
+ rate = 8 - __fls((val * 8 / (625 * 7)));
+ return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
+}
+
+static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return get_update_interval(regmap, val);
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ return get_local_temp(regmap, val);
+ return get_remote_temp(regmap, channel - 1, val);
+ case hwmon_temp_fault:
+ return get_fault(regmap, channel - 1, val);
+ case hwmon_temp_offset:
+ return get_offset(regmap, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return set_update_interval(regmap, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return set_offset(regmap, channel - 1, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const u32 w83773_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_chip = {
+ .type = hwmon_chip,
+ .config = w83773_chip_config,
+};
+
+static const u32 w83773_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_temp = {
+ .type = hwmon_temp,
+ .config = w83773_temp_config,
+};
+
+static const struct hwmon_channel_info *w83773_info[] = {
+ &w83773_chip,
+ &w83773_temp,
+ NULL
+};
+
+static const struct hwmon_ops w83773_ops = {
+ .is_visible = w83773_is_visible,
+ .read = w83773_read,
+ .write = w83773_write,
+};
+
+static const struct hwmon_chip_info w83773_chip_info = {
+ .ops = &w83773_ops,
+ .info = w83773_info,
+};
+
+static const struct regmap_config w83773_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int w83773_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Set the conversion rate to 2 Hz */
+ ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
+ if (ret < 0) {
+ dev_err(&client->dev, "error writing config rate register\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ regmap,
+ &w83773_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver w83773_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "w83773g",
+ .of_match_table = of_match_ptr(w83773_of_match),
+ },
+ .probe = w83773_probe,
+ .id_table = w83773_id,
+};
+
+module_i2c_driver(w83773_driver);
+
+MODULE_AUTHOR("Lei YU <mine260309@gmail.com>");
+MODULE_DESCRIPTION("W83773G temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index a18794128bf8..7c375443ede6 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -104,26 +104,17 @@ static int of_coresight_alloc_memory(struct device *dev,
int of_coresight_get_cpu(const struct device_node *node)
{
int cpu;
- bool found;
- struct device_node *dn, *np;
+ struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
-
/* Affinity defaults to CPU0 */
if (!dn)
return 0;
-
- for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
- found = (dn == np);
- of_node_put(np);
- if (found)
- break;
- }
+ cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
/* Affinity to CPU0 if no cpu nodes are found */
- return found ? cpu : 0;
+ return (cpu < 0) ? 0 : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 21bf619a86c5..9fee4c054d3d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
- bool suspended;
- bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 58add69a441c..153b947702c5 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -42,6 +42,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include "i2c-designware-core.h"
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
- return pm_runtime_suspended(dev);
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
}
static void dw_i2c_plat_complete(struct device *dev)
{
- if (dev->power.direct_complete)
+ /*
+ * The device can only be in runtime suspend at this point if it has not
+ * been resumed throughout the ending system suspend/resume cycle, so if
+ * the platform firmware might mess up with it, request the runtime PM
+ * framework to resume it.
+ */
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
#else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (i_dev->suspended) {
- i_dev->skip_resume = true;
- return 0;
- }
-
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
- i_dev->suspended = true;
-
return 0;
}
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (!i_dev->suspended)
- return 0;
-
- if (i_dev->skip_resume) {
- i_dev->skip_resume = false;
- return 0;
- }
-
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- i_dev->suspended = false;
-
return 0;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 706164b4c5be..f7829a74140c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (!client)
return;
- if (client->dev.of_node)
+
+ if (client->dev.of_node) {
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ of_node_put(client->dev.of_node);
+ }
+
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 4bb9927afd01..a1082c04ac5c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
the underlying bus driver */
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev, "Invalid block %s size %d\n",
+ read_write == I2C_SMBUS_READ ? "read" : "write",
+ data->block[0]);
+ return -EINVAL;
+ }
+
if (read_write == I2C_SMBUS_READ) {
msg[1].len = data->block[0];
} else {
msg[0].len = data->block[0] + 1;
- if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(&adapter->dev,
- "Invalid block write size %d\n",
- data->block[0]);
- return -EINVAL;
- }
for (i = 1; i <= data->block[0]; i++)
msgbuf0[i] = data->block[i];
}
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4efe4c6e956c..abe0822dd429 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -232,7 +232,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
unsigned int offset;
u8 *buf;
- cursg = cmd->cursg;
if (cursg == NULL)
cursg = cmd->cursg = sg;
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index f85014fbaa12..8ffc308d5fd0 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -81,9 +81,21 @@ static const struct i2c_device_id bmc150_accel_id[] = {
MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
+static const struct of_device_id bmc150_accel_of_match[] = {
+ { .compatible = "bosch,bmc150_accel" },
+ { .compatible = "bosch,bmi055_accel" },
+ { .compatible = "bosch,bma255" },
+ { .compatible = "bosch,bma250e" },
+ { .compatible = "bosch,bma222e" },
+ { .compatible = "bosch,bma280" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bmc150_accel_of_match);
+
static struct i2c_driver bmc150_accel_driver = {
.driver = {
.name = "bmc150_accel_i2c",
+ .of_match_table = bmc150_accel_of_match,
.acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
.pm = &bmc150_accel_pm_ops,
},
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 6c214783241c..d4b555203427 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/byteorder/generic.h>
@@ -25,7 +26,7 @@
#define DA280_MODE_ENABLE 0x1e
#define DA280_MODE_DISABLE 0x9e
-enum { da226, da280 };
+enum da280_chipset { da226, da280 };
/*
* a value of + or -4096 corresponds to + or - 1G
@@ -91,12 +92,24 @@ static const struct iio_info da280_info = {
.read_raw = da280_read_raw,
};
+static enum da280_chipset da280_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -EINVAL;
+
+ return (enum da280_chipset) id->driver_data;
+}
+
static int da280_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
struct iio_dev *indio_dev;
struct da280_data *data;
+ enum da280_chipset chip;
ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
if (ret != DA280_CHIP_ID)
@@ -114,7 +127,14 @@ static int da280_probe(struct i2c_client *client,
indio_dev->info = &da280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = da280_channels;
- if (id->driver_data == da226) {
+
+ if (ACPI_HANDLE(&client->dev)) {
+ chip = da280_match_acpi_device(&client->dev);
+ } else {
+ chip = id->driver_data;
+ }
+
+ if (chip == da226) {
indio_dev->name = "da226";
indio_dev->num_channels = 2;
} else {
@@ -158,6 +178,12 @@ static int da280_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+static const struct acpi_device_id da280_acpi_match[] = {
+ {"MIRAACC", da280},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
+
static const struct i2c_device_id da280_i2c_id[] = {
{ "da226", da226 },
{ "da280", da280 },
@@ -168,6 +194,7 @@ MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
static struct i2c_driver da280_driver = {
.driver = {
.name = "da280",
+ .acpi_match_table = ACPI_PTR(da280_acpi_match),
.pm = &da280_pm_ops,
},
.probe = da280_probe,
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 98fbb628d5bd..38411e1c155b 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -63,3 +63,6 @@ static struct i2c_driver kxsd9_i2c_driver = {
.id_table = kxsd9_i2c_id,
};
module_i2c_driver(kxsd9_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("KXSD9 accelerometer I2C interface");
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index bfd4bc806fc2..7a2da7f9d4dc 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
*
@@ -13,9 +14,6 @@
* Copyright 2015 Martin Kepplinger <martink@posteo.de>
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
*
* TODO: orientation events
*/
@@ -135,7 +133,7 @@ struct mma8452_event_regs {
u8 ev_count;
};
-static const struct mma8452_event_regs ev_regs_accel_falling = {
+static const struct mma8452_event_regs ff_mt_ev_regs = {
.ev_cfg = MMA8452_FF_MT_CFG,
.ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
.ev_cfg_chan_shift = MMA8452_FF_MT_CHAN_SHIFT,
@@ -145,7 +143,7 @@ static const struct mma8452_event_regs ev_regs_accel_falling = {
.ev_count = MMA8452_FF_MT_COUNT
};
-static const struct mma8452_event_regs ev_regs_accel_rising = {
+static const struct mma8452_event_regs trans_ev_regs = {
.ev_cfg = MMA8452_TRANSIENT_CFG,
.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
.ev_cfg_chan_shift = MMA8452_TRANSIENT_CHAN_SHIFT,
@@ -284,7 +282,7 @@ static const int mma8452_samp_freq[8][2] = {
};
/* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
-static const unsigned int mma8452_transient_time_step_us[4][8] = {
+static const unsigned int mma8452_time_step_us[4][8] = {
{ 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 }, /* normal */
{ 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 }, /* l p l n */
{ 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 }, /* high res*/
@@ -777,12 +775,12 @@ static int mma8452_get_event_regs(struct mma8452_data *data,
& MMA8452_INT_TRANS) &&
(data->chip_info->enabled_events
& MMA8452_INT_TRANS))
- *ev_reg = &ev_regs_accel_rising;
+ *ev_reg = &trans_ev_regs;
else
- *ev_reg = &ev_regs_accel_falling;
+ *ev_reg = &ff_mt_ev_regs;
return 0;
case IIO_EV_DIR_FALLING:
- *ev_reg = &ev_regs_accel_falling;
+ *ev_reg = &ff_mt_ev_regs;
return 0;
default:
return -EINVAL;
@@ -826,7 +824,7 @@ static int mma8452_read_event_value(struct iio_dev *indio_dev,
if (power_mode < 0)
return power_mode;
- us = ret * mma8452_transient_time_step_us[power_mode][
+ us = ret * mma8452_time_step_us[power_mode][
mma8452_get_odr_index(data)];
*val = us / USEC_PER_SEC;
*val2 = us % USEC_PER_SEC;
@@ -883,7 +881,7 @@ static int mma8452_write_event_value(struct iio_dev *indio_dev,
return ret;
steps = (val * USEC_PER_SEC + val2) /
- mma8452_transient_time_step_us[ret][
+ mma8452_time_step_us[ret][
mma8452_get_odr_index(data)];
if (steps < 0 || steps > 0xff)
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 460aa58e0159..6fe995cf16a6 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -920,8 +920,6 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)adata->dev->platform_data;
int irq = adata->get_irq_data_ready(indio_dev);
int err;
@@ -948,9 +946,6 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
&adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
- if (!pdata)
- pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
-
err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
if (err < 0)
goto st_accel_power_off;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ef86296b8b0d..72bc2b71765a 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
+ depends on HAS_DMA
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Atmel SAMA5D2 ADC which is
@@ -318,6 +319,8 @@ config HI8435
config HX711
tristate "AVIA HX711 ADC for weight cells"
depends on GPIOLIB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for AVIA HX711 ADC which is used
for weigh cells
@@ -629,6 +632,18 @@ config SPEAR_ADC
To compile this driver as a module, choose M here: the
module will be called spear_adc.
+config SD_ADC_MODULATOR
+ tristate "Generic sigma delta modulator"
+ depends on OF
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Select this option to enables sigma delta modulator. This driver can
+ support generic sigma delta modulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called sd_adc_modulator.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +671,31 @@ config STM32_ADC
This driver can also be built as a module. If so, the module
will be called stm32-adc.
+config STM32_DFSDM_CORE
+ tristate "STMicroelectronics STM32 DFSDM core"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Select this option to enable the driver for STMicroelectronics
+ STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+ tristate "STMicroelectronics STM32 dfsdm adc"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+ STMicroelectronics STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86 && ISA_BUS_API
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9572c1090f35..28a9423997f3 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 8a958d5f1905..327a49ba1991 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -53,11 +54,12 @@ struct aspeed_adc_model_data {
};
struct aspeed_adc_data {
- struct device *dev;
- void __iomem *base;
- spinlock_t clk_lock;
- struct clk_hw *clk_prescaler;
- struct clk_hw *clk_scaler;
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t clk_lock;
+ struct clk_hw *clk_prescaler;
+ struct clk_hw *clk_scaler;
+ struct reset_control *rst;
};
#define ASPEED_CHAN(_idx, _data_reg_addr) { \
@@ -217,6 +219,15 @@ static int aspeed_adc_probe(struct platform_device *pdev)
goto scaler_error;
}
+ data->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(data->rst)) {
+ dev_err(&pdev->dev,
+ "invalid or missing reset controller device tree entry");
+ ret = PTR_ERR(data->rst);
+ goto reset_error;
+ }
+ reset_control_deassert(data->rst);
+
model_data = of_device_get_match_data(&pdev->dev);
if (model_data->wait_init_sequence) {
@@ -263,9 +274,10 @@ iio_register_error:
writel(ASPEED_OPERATION_MODE_POWER_DOWN,
data->base + ASPEED_REG_ENGINE_CONTROL);
clk_disable_unprepare(data->clk_scaler->clk);
+reset_error:
+ reset_control_assert(data->rst);
clk_enable_error:
clk_hw_unregister_divider(data->clk_scaler);
-
scaler_error:
clk_hw_unregister_divider(data->clk_prescaler);
return ret;
@@ -280,6 +292,7 @@ static int aspeed_adc_remove(struct platform_device *pdev)
writel(ASPEED_OPERATION_MODE_POWER_DOWN,
data->base + ASPEED_REG_ENGINE_CONTROL);
clk_disable_unprepare(data->clk_scaler->clk);
+ reset_control_assert(data->rst);
clk_hw_unregister_divider(data->clk_scaler);
clk_hw_unregister_divider(data->clk_prescaler);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 755a493c2a2c..4eff8351ce29 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -16,6 +16,8 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -100,6 +102,8 @@
#define AT91_SAMA5D2_LCDR 0x20
/* Interrupt Enable Register */
#define AT91_SAMA5D2_IER 0x24
+/* Interrupt Enable Register - general overrun error */
+#define AT91_SAMA5D2_IER_GOVRE BIT(25)
/* Interrupt Disable Register */
#define AT91_SAMA5D2_IDR 0x28
/* Interrupt Mask Register */
@@ -167,13 +171,19 @@
/*
* Maximum number of bytes to hold conversion from all channels
- * plus the timestamp
+ * without the timestamp.
*/
-#define AT91_BUFFER_MAX_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
- AT91_SAMA5D2_DIFF_CHAN_CNT) * 2 + 8)
+#define AT91_BUFFER_MAX_CONVERSION_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT) * 2)
+
+/* This total must also include the timestamp */
+#define AT91_BUFFER_MAX_BYTES (AT91_BUFFER_MAX_CONVERSION_BYTES + 8)
#define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
+#define AT91_HWFIFO_MAX_SIZE_STR "128"
+#define AT91_HWFIFO_MAX_SIZE 128
+
#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
@@ -228,6 +238,28 @@ struct at91_adc_trigger {
bool hw_trig;
};
+/**
+ * at91_adc_dma - at91-sama5d2 dma information struct
+ * @dma_chan: the dma channel acquired
+ * @rx_buf: dma coherent allocated area
+ * @rx_dma_buf: dma handler for the buffer
+ * @phys_addr: physical address of the ADC base register
+ * @buf_idx: index inside the dma buffer where reading was last done
+ * @rx_buf_sz: size of buffer used by DMA operation
+ * @watermark: number of conversions to copy before DMA triggers irq
+ * @dma_ts: hold the start timestamp of dma operation
+ */
+struct at91_adc_dma {
+ struct dma_chan *dma_chan;
+ u8 *rx_buf;
+ dma_addr_t rx_dma_buf;
+ phys_addr_t phys_addr;
+ int buf_idx;
+ int rx_buf_sz;
+ int watermark;
+ s64 dma_ts;
+};
+
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -242,6 +274,7 @@ struct at91_adc_state {
u32 conversion_value;
struct at91_adc_soc_info soc_info;
wait_queue_head_t wq_data_available;
+ struct at91_adc_dma dma_st;
u16 buffer[AT91_BUFFER_MAX_HWORDS];
/*
* lock to prevent concurrent 'single conversion' requests through
@@ -322,11 +355,17 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
if (state) {
at91_adc_writel(st, AT91_SAMA5D2_CHER,
BIT(chan->channel));
- at91_adc_writel(st, AT91_SAMA5D2_IER,
- BIT(chan->channel));
+ /* enable irq only if not using DMA */
+ if (!st->dma_st.dma_chan) {
+ at91_adc_writel(st, AT91_SAMA5D2_IER,
+ BIT(chan->channel));
+ }
} else {
- at91_adc_writel(st, AT91_SAMA5D2_IDR,
- BIT(chan->channel));
+ /* disable irq only if not using DMA */
+ if (!st->dma_st.dma_chan) {
+ at91_adc_writel(st, AT91_SAMA5D2_IDR,
+ BIT(chan->channel));
+ }
at91_adc_writel(st, AT91_SAMA5D2_CHDR,
BIT(chan->channel));
}
@@ -340,6 +379,10 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(indio);
+ /* if we are using DMA, we must not reenable irq after each trigger */
+ if (st->dma_st.dma_chan)
+ return 0;
+
enable_irq(st->irq);
/* Needed to ACK the DRDY interruption */
@@ -350,6 +393,153 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
static const struct iio_trigger_ops at91_adc_trigger_ops = {
.set_trigger_state = &at91_adc_configure_trigger,
.try_reenable = &at91_adc_reenable_trigger,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int at91_adc_dma_size_done(struct at91_adc_state *st)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+ int i, size;
+
+ status = dmaengine_tx_status(st->dma_st.dma_chan,
+ st->dma_st.dma_chan->cookie,
+ &state);
+ if (status != DMA_IN_PROGRESS)
+ return 0;
+
+ /* Transferred length is size in bytes from end of buffer */
+ i = st->dma_st.rx_buf_sz - state.residue;
+
+ /* Return available bytes */
+ if (i >= st->dma_st.buf_idx)
+ size = i - st->dma_st.buf_idx;
+ else
+ size = st->dma_st.rx_buf_sz + i - st->dma_st.buf_idx;
+ return size;
+}
+
+static void at91_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ iio_trigger_poll_chained(indio_dev->trig);
+}
+
+static int at91_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+ u8 bit;
+
+ if (!st->dma_st.dma_chan)
+ return 0;
+
+ /* we start a new DMA, so set buffer index to start */
+ st->dma_st.buf_idx = 0;
+
+ /*
+ * compute buffer size w.r.t. watermark and enabled channels.
+ * scan_bytes is aligned so we need an exact size for DMA
+ */
+ st->dma_st.rx_buf_sz = 0;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan = indio_dev->channels + bit;
+
+ st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
+ }
+ st->dma_st.rx_buf_sz *= st->dma_st.watermark;
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(st->dma_st.dma_chan,
+ st->dma_st.rx_dma_buf,
+ st->dma_st.rx_buf_sz,
+ st->dma_st.rx_buf_sz / 2,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+
+ if (!desc) {
+ dev_err(&indio_dev->dev, "cannot prepare DMA cyclic\n");
+ return -EBUSY;
+ }
+
+ desc->callback = at91_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(&indio_dev->dev, "cannot submit DMA cyclic\n");
+ dmaengine_terminate_async(st->dma_st.dma_chan);
+ return ret;
+ }
+
+ /* enable general overrun error signaling */
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_GOVRE);
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(st->dma_st.dma_chan);
+
+ /* consider current time as DMA start time for timestamps */
+ st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "DMA cyclic started\n");
+
+ return 0;
+}
+
+static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ ret = at91_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "buffer postenable failed\n");
+ return ret;
+ }
+
+ return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+ u8 bit;
+
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "buffer predisable failed\n");
+
+ if (!st->dma_st.dma_chan)
+ return ret;
+
+ /* if we are using DMA we must clear registers and end DMA */
+ dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+ /*
+ * For each enabled channel we must read the last converted value
+ * to clear EOC status and not get a possible interrupt later.
+ * This value is being read by DMA from LCDR anyway
+ */
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan = indio_dev->channels + bit;
+
+ if (st->dma_st.dma_chan)
+ at91_adc_readl(st, chan->address);
+ }
+
+ /* read overflow register to clear possible overflow status */
+ at91_adc_readl(st, AT91_SAMA5D2_OVER);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+ .postenable = &at91_adc_buffer_postenable,
+ .predisable = &at91_adc_buffer_predisable,
};
static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
@@ -388,24 +578,77 @@ static int at91_adc_trigger_init(struct iio_dev *indio)
return 0;
}
-static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
+ struct iio_poll_func *pf)
{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio = pf->indio_dev;
- struct at91_adc_state *st = iio_priv(indio);
+ struct at91_adc_state *st = iio_priv(indio_dev);
int i = 0;
u8 bit;
- for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
- struct iio_chan_spec const *chan = indio->channels + bit;
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan = indio_dev->channels + bit;
st->buffer[i] = at91_adc_readl(st, chan->address);
i++;
}
+ iio_push_to_buffers_with_timestamp(indio_dev, st->buffer,
+ pf->timestamp);
+}
- iio_push_to_buffers_with_timestamp(indio, st->buffer, pf->timestamp);
+static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int transferred_len = at91_adc_dma_size_done(st);
+ s64 ns = iio_get_time_ns(indio_dev);
+ s64 interval;
+ int sample_index = 0, sample_count, sample_size;
+
+ u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+ /* if we reached this point, we cannot sample faster */
+ if (status & AT91_SAMA5D2_IER_GOVRE)
+ pr_info_ratelimited("%s: conversion overrun detected\n",
+ indio_dev->name);
- iio_trigger_notify_done(indio->trig);
+ sample_size = div_s64(st->dma_st.rx_buf_sz, st->dma_st.watermark);
+
+ sample_count = div_s64(transferred_len, sample_size);
+
+ /*
+ * interval between samples is total time since last transfer handling
+ * divided by the number of samples (total size divided by sample size)
+ */
+ interval = div_s64((ns - st->dma_st.dma_ts), sample_count);
+
+ while (transferred_len >= sample_size) {
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ (st->dma_st.rx_buf + st->dma_st.buf_idx),
+ (st->dma_st.dma_ts + interval * sample_index));
+ /* adjust remaining length */
+ transferred_len -= sample_size;
+ /* adjust buffer index */
+ st->dma_st.buf_idx += sample_size;
+ /* in case of reaching end of buffer, reset index */
+ if (st->dma_st.buf_idx >= st->dma_st.rx_buf_sz)
+ st->dma_st.buf_idx = 0;
+ sample_index++;
+ }
+ /* adjust saved time for next transfer handling */
+ st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
+}
+
+static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (st->dma_st.dma_chan)
+ at91_adc_trigger_handler_dma(indio_dev);
+ else
+ at91_adc_trigger_handler_nodma(indio_dev, pf);
+
+ iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
@@ -414,7 +657,7 @@ static int at91_adc_buffer_init(struct iio_dev *indio)
{
return devm_iio_triggered_buffer_setup(&indio->dev, indio,
&iio_pollfunc_store_time,
- &at91_adc_trigger_handler, NULL);
+ &at91_adc_trigger_handler, &at91_buffer_setup_ops);
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -485,10 +728,13 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
if (!(status & imr))
return IRQ_NONE;
- if (iio_buffer_enabled(indio)) {
+ if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
- } else {
+ } else if (iio_buffer_enabled(indio) && st->dma_st.dma_chan) {
+ disable_irq_nosync(irq);
+ WARN(true, "Unexpected irq occurred\n");
+ } else if (!iio_buffer_enabled(indio)) {
st->conversion_value = at91_adc_readl(st, st->chan->address);
st->conversion_done = true;
wake_up_interruptible(&st->wq_data_available);
@@ -510,7 +756,6 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
-
mutex_lock(&st->lock);
st->chan = chan;
@@ -541,6 +786,9 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
@@ -580,9 +828,123 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
return 0;
}
+static void at91_adc_dma_init(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ struct dma_slave_config config = {0};
+ /*
+ * We make the buffer double the size of the fifo,
+ * such that DMA uses one half of the buffer (full fifo size)
+ * and the software uses the other half to read/write.
+ */
+ unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
+ AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
+ PAGE_SIZE);
+
+ if (st->dma_st.dma_chan)
+ return;
+
+ st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
+
+ if (!st->dma_st.dma_chan) {
+ dev_info(&pdev->dev, "can't get DMA channel\n");
+ goto dma_exit;
+ }
+
+ st->dma_st.rx_buf = dma_alloc_coherent(st->dma_st.dma_chan->device->dev,
+ pages * PAGE_SIZE,
+ &st->dma_st.rx_dma_buf,
+ GFP_KERNEL);
+ if (!st->dma_st.rx_buf) {
+ dev_info(&pdev->dev, "can't allocate coherent DMA area\n");
+ goto dma_chan_disable;
+ }
+
+ /* Configure DMA channel to read data register */
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = (phys_addr_t)(st->dma_st.phys_addr
+ + AT91_SAMA5D2_LCDR);
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.src_maxburst = 1;
+ config.dst_maxburst = 1;
+
+ if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) {
+ dev_info(&pdev->dev, "can't configure DMA slave\n");
+ goto dma_free_area;
+ }
+
+ dev_info(&pdev->dev, "using %s for rx DMA transfers\n",
+ dma_chan_name(st->dma_st.dma_chan));
+
+ return;
+
+dma_free_area:
+ dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
+ st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
+dma_chan_disable:
+ dma_release_channel(st->dma_st.dma_chan);
+ st->dma_st.dma_chan = 0;
+dma_exit:
+ dev_info(&pdev->dev, "continuing without DMA support\n");
+}
+
+static void at91_adc_dma_disable(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
+ AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
+ PAGE_SIZE);
+
+ /* if we are not using DMA, just return */
+ if (!st->dma_st.dma_chan)
+ return;
+
+ /* wait for all transactions to be terminated first*/
+ dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+ dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
+ st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
+ dma_release_channel(st->dma_st.dma_chan);
+ st->dma_st.dma_chan = 0;
+
+ dev_info(&pdev->dev, "continuing without DMA support\n");
+}
+
+static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (val > AT91_HWFIFO_MAX_SIZE)
+ return -EINVAL;
+
+ if (!st->selected_trig->hw_trig) {
+ dev_dbg(&indio_dev->dev, "we need hw trigger for DMA\n");
+ return 0;
+ }
+
+ dev_dbg(&indio_dev->dev, "new watermark is %u\n", val);
+ st->dma_st.watermark = val;
+
+ /*
+ * The logic here is: if we have watermark 1, it means we do
+ * each conversion with it's own IRQ, thus we don't need DMA.
+ * If the watermark is higher, we do DMA to do all the transfers in bulk
+ */
+
+ if (val == 1)
+ at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+ else if (val > 1)
+ at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+
+ return 0;
+}
+
static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
.write_raw = &at91_adc_write_raw,
+ .hwfifo_set_watermark = &at91_adc_set_watermark,
};
static void at91_adc_hw_init(struct at91_adc_state *st)
@@ -599,6 +961,42 @@ static void at91_adc_hw_init(struct at91_adc_state *st)
at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
}
+static ssize_t at91_adc_get_fifo_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev =
+ platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
+}
+
+static ssize_t at91_adc_get_watermark(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev =
+ platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
+}
+
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+ at91_adc_get_fifo_state, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
+ at91_adc_get_watermark, NULL, 0);
+
+static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
+static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
+
+static const struct attribute *at91_adc_fifo_attributes[] = {
+ &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
+ &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+ &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
+ &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
+ NULL,
+};
+
static int at91_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -674,6 +1072,9 @@ static int at91_adc_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
+ /* if we plan to use DMA, we need the physical address of the regs */
+ st->dma_st.phys_addr = res->start;
+
st->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(st->base))
return PTR_ERR(st->base);
@@ -737,11 +1138,22 @@ static int at91_adc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "couldn't setup the triggers.\n");
goto per_clk_disable_unprepare;
}
+ /*
+ * Initially the iio buffer has a length of 2 and
+ * a watermark of 1
+ */
+ st->dma_st.watermark = 1;
+
+ iio_buffer_set_attrs(indio_dev->buffer,
+ at91_adc_fifo_attributes);
}
+ if (dma_coerce_mask_and_coherent(&indio_dev->dev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "cannot set DMA mask to 32-bit\n");
+
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto per_clk_disable_unprepare;
+ goto dma_disable;
if (st->selected_trig->hw_trig)
dev_info(&pdev->dev, "setting up trigger as %s\n",
@@ -752,6 +1164,8 @@ static int at91_adc_probe(struct platform_device *pdev)
return 0;
+dma_disable:
+ at91_adc_dma_disable(pdev);
per_clk_disable_unprepare:
clk_disable_unprepare(st->per_clk);
vref_disable:
@@ -768,6 +1182,8 @@ static int at91_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
+ at91_adc_dma_disable(pdev);
+
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 3836d4222a3e..71a5ee652b79 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1177,9 +1177,9 @@ static int at91_adc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
st->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(st->reg_base)) {
+ if (IS_ERR(st->reg_base))
return PTR_ERR(st->reg_base);
- }
+
/*
* Disable all IRQs before setting up the handler
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 60c9e853dd81..031d568b4972 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -92,22 +92,14 @@ static const struct iio_chan_spec axp288_adc_channels[] = {
},
};
-#define AXP288_ADC_MAP(_adc_channel_label, _consumer_dev_name, \
- _consumer_channel) \
- { \
- .adc_channel_label = _adc_channel_label, \
- .consumer_dev_name = _consumer_dev_name, \
- .consumer_channel = _consumer_channel, \
- }
-
/* for consumer drivers */
static struct iio_map axp288_adc_default_maps[] = {
- AXP288_ADC_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
- AXP288_ADC_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
- AXP288_ADC_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
- AXP288_ADC_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
- AXP288_ADC_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
- AXP288_ADC_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
+ IIO_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
+ IIO_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
+ IIO_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
+ IIO_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
+ IIO_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
+ IIO_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
{},
};
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index d10b9f13d557..9430b54121e0 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -24,6 +24,9 @@
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -89,6 +92,11 @@ struct hx711_data {
int gain_set; /* gain set on device */
int gain_chan_a; /* gain for channel A */
struct mutex lock;
+ /*
+ * triggered buffer
+ * 2x32-bit channel + 64-bit timestamp
+ */
+ u32 buffer[4];
};
static int hx711_cycle(struct hx711_data *hx711_data)
@@ -145,15 +153,16 @@ static int hx711_wait_for_ready(struct hx711_data *hx711_data)
int i, val;
/*
- * a maximum reset cycle time of 56 ms was measured.
- * we round it up to 100 ms
+ * in some rare cases the reset takes quite a long time
+ * especially when the channel is changed.
+ * Allow up to one second for it
*/
for (i = 0; i < 100; i++) {
val = gpiod_get_value(hx711_data->gpiod_dout);
if (!val)
break;
- /* sleep at least 1 ms */
- msleep(1);
+ /* sleep at least 10 ms */
+ msleep(10);
}
if (val)
return -EIO;
@@ -195,9 +204,7 @@ static int hx711_reset(struct hx711_data *hx711_data)
* after a dummy read we need to wait vor readiness
* for not mixing gain pulses with the clock
*/
- ret = hx711_wait_for_ready(hx711_data);
- if (ret)
- return ret;
+ val = hx711_wait_for_ready(hx711_data);
}
return val;
@@ -236,34 +243,40 @@ static int hx711_set_gain_for_channel(struct hx711_data *hx711_data, int chan)
return 0;
}
+static int hx711_reset_read(struct hx711_data *hx711_data, int chan)
+{
+ int ret;
+ int val;
+
+ /*
+ * hx711_reset() must be called from here
+ * because it could be calling hx711_read() by itself
+ */
+ if (hx711_reset(hx711_data)) {
+ dev_err(hx711_data->dev, "reset failed!");
+ return -EIO;
+ }
+
+ ret = hx711_set_gain_for_channel(hx711_data, chan);
+ if (ret < 0)
+ return ret;
+
+ val = hx711_read(hx711_data);
+
+ return val;
+}
+
static int hx711_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
int *val, int *val2, long mask)
{
struct hx711_data *hx711_data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&hx711_data->lock);
- /*
- * hx711_reset() must be called from here
- * because it could be calling hx711_read() by itself
- */
- if (hx711_reset(hx711_data)) {
- mutex_unlock(&hx711_data->lock);
- dev_err(hx711_data->dev, "reset failed!");
- return -EIO;
- }
-
- ret = hx711_set_gain_for_channel(hx711_data, chan->channel);
- if (ret < 0) {
- mutex_unlock(&hx711_data->lock);
- return ret;
- }
-
- *val = hx711_read(hx711_data);
+ *val = hx711_reset_read(hx711_data, chan->channel);
mutex_unlock(&hx711_data->lock);
@@ -339,6 +352,36 @@ static int hx711_write_raw_get_fmt(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_NANO;
}
+static irqreturn_t hx711_trigger(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hx711_data *hx711_data = iio_priv(indio_dev);
+ int i, j = 0;
+
+ mutex_lock(&hx711_data->lock);
+
+ memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer));
+
+ for (i = 0; i < indio_dev->masklength; i++) {
+ if (!test_bit(i, indio_dev->active_scan_mask))
+ continue;
+
+ hx711_data->buffer[j] = hx711_reset_read(hx711_data,
+ indio_dev->channels[i].channel);
+ j++;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, hx711_data->buffer,
+ pf->timestamp);
+
+ mutex_unlock(&hx711_data->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static ssize_t hx711_scale_available_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -387,6 +430,13 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
.indexed = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_VOLTAGE,
@@ -394,7 +444,15 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
.indexed = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static int hx711_probe(struct platform_device *pdev)
@@ -459,10 +517,9 @@ static int hx711_probe(struct platform_device *pdev)
* 1 LSB = (AVDD * 100) / GAIN / 1678 [10^-9 mV]
*/
ret = regulator_get_voltage(hx711_data->reg_avdd);
- if (ret < 0) {
- regulator_disable(hx711_data->reg_avdd);
- return ret;
- }
+ if (ret < 0)
+ goto error_regulator;
+
/* we need 10^-9 mV */
ret *= 100;
@@ -482,12 +539,27 @@ static int hx711_probe(struct platform_device *pdev)
indio_dev->channels = hx711_chan_spec;
indio_dev->num_channels = ARRAY_SIZE(hx711_chan_spec);
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ hx711_trigger, NULL);
+ if (ret < 0) {
+ dev_err(dev, "setup of iio triggered buffer failed\n");
+ goto error_regulator;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "Couldn't register the device\n");
- regulator_disable(hx711_data->reg_avdd);
+ goto error_buffer;
}
+ return 0;
+
+error_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+error_regulator:
+ regulator_disable(hx711_data->reg_avdd);
+
return ret;
}
@@ -501,6 +573,8 @@ static int hx711_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
regulator_disable(hx711_data->reg_avdd);
return 0;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 84a43871f7dc..0635a79864bf 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -44,13 +44,14 @@
#define INA226_MASK_ENABLE 0x06
#define INA226_CVRF BIT(3)
-#define INA219_CNVR BIT(1)
#define INA2XX_MAX_REGISTERS 8
/* settings - depend on use case */
-#define INA219_CONFIG_DEFAULT 0x399F /* PGA=8 */
+#define INA219_CONFIG_DEFAULT 0x399F /* PGA=1/8, BRNG=32V */
#define INA219_DEFAULT_IT 532
+#define INA219_DEFAULT_BRNG 1 /* 32V */
+#define INA219_DEFAULT_PGA 125 /* 1000/8 */
#define INA226_CONFIG_DEFAULT 0x4327
#define INA226_DEFAULT_AVG 4
#define INA226_DEFAULT_IT 1110
@@ -63,6 +64,14 @@
*/
#define INA2XX_MODE_MASK GENMASK(3, 0)
+/* Gain for VShunt: 1/8 (default), 1/4, 1/2, 1 */
+#define INA219_PGA_MASK GENMASK(12, 11)
+#define INA219_SHIFT_PGA(val) ((val) << 11)
+
+/* VBus range: 32V (default), 16V */
+#define INA219_BRNG_MASK BIT(13)
+#define INA219_SHIFT_BRNG(val) ((val) << 13)
+
/* Averaging for VBus/VShunt/Power */
#define INA226_AVG_MASK GENMASK(11, 9)
#define INA226_SHIFT_AVG(val) ((val) << 9)
@@ -79,6 +88,11 @@
#define INA226_ITS_MASK GENMASK(5, 3)
#define INA226_SHIFT_ITS(val) ((val) << 3)
+/* INA219 Bus voltage register, low bits are flags */
+#define INA219_OVF BIT(0)
+#define INA219_CNVR BIT(1)
+#define INA219_BUS_VOLTAGE_SHIFT 3
+
/* Cosmetic macro giving the sampling period for a full P=UxI cycle */
#define SAMPLING_PERIOD(c) ((c->int_time_vbus + c->int_time_vshunt) \
* c->avg)
@@ -110,11 +124,12 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
- int shunt_div;
- int bus_voltage_shift;
+ int calibration_value;
+ int shunt_voltage_lsb; /* nV */
+ int bus_voltage_shift; /* position of lsb */
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ /* fixed relation between current and power lsb, uW/uA */
+ int power_lsb_factor;
enum ina2xx_ids chip_id;
};
@@ -127,26 +142,28 @@ struct ina2xx_chip_info {
int avg;
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
+ int range_vbus; /* Bus voltage maximum in V */
+ int pga_gain_vshunt; /* Shunt voltage PGA gain */
bool allow_async_readout;
};
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
- .shunt_div = 100,
- .bus_voltage_shift = 3,
+ .calibration_value = 4096,
+ .shunt_voltage_lsb = 10000,
+ .bus_voltage_shift = INA219_BUS_VOLTAGE_SHIFT,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
.chip_id = ina219,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
- .shunt_div = 400,
+ .calibration_value = 2048,
+ .shunt_voltage_lsb = 2500,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
.chip_id = ina226,
},
};
@@ -170,6 +187,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
else
*val = regval;
+ if (chan->address == INA2XX_BUS_VOLTAGE)
+ *val >>= chip->config->bus_voltage_shift;
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -197,26 +217,48 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->address) {
case INA2XX_SHUNT_VOLTAGE:
- /* processed (mV) = raw/shunt_div */
- *val2 = chip->config->shunt_div;
- *val = 1;
+ /* processed (mV) = raw * lsb(nV) / 1000000 */
+ *val = chip->config->shunt_voltage_lsb;
+ *val2 = 1000000;
return IIO_VAL_FRACTIONAL;
case INA2XX_BUS_VOLTAGE:
- /* processed (mV) = raw*lsb (uV) / (1000 << shift) */
+ /* processed (mV) = raw * lsb (uV) / 1000 */
*val = chip->config->bus_voltage_lsb;
- *val2 = 1000 << chip->config->bus_voltage_shift;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+
+ case INA2XX_CURRENT:
+ /*
+ * processed (mA) = raw * current_lsb (mA)
+ * current_lsb (mA) = shunt_voltage_lsb (nV) /
+ * shunt_resistor (uOhm)
+ */
+ *val = chip->config->shunt_voltage_lsb;
+ *val2 = chip->shunt_resistor_uohm;
return IIO_VAL_FRACTIONAL;
case INA2XX_POWER:
- /* processed (mW) = raw*lsb (uW) / 1000 */
- *val = chip->config->power_lsb;
+ /*
+ * processed (mW) = raw * power_lsb (mW)
+ * power_lsb (mW) = power_lsb_factor (mW/mA) *
+ * current_lsb (mA)
+ */
+ *val = chip->config->power_lsb_factor *
+ chip->config->shunt_voltage_lsb;
+ *val2 = chip->shunt_resistor_uohm;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->address) {
+ case INA2XX_SHUNT_VOLTAGE:
+ *val = chip->pga_gain_vshunt;
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
- case INA2XX_CURRENT:
- /* processed (mA) = raw (mA) */
- *val = 1;
+ case INA2XX_BUS_VOLTAGE:
+ *val = chip->range_vbus == 32 ? 1 : 2;
return IIO_VAL_INT;
}
}
@@ -353,6 +395,74 @@ static int ina219_set_int_time_vshunt(struct ina2xx_chip_info *chip,
return 0;
}
+static const int ina219_vbus_range_tab[] = { 1, 2 };
+static int ina219_set_vbus_range_denom(struct ina2xx_chip_info *chip,
+ unsigned int range,
+ unsigned int *config)
+{
+ if (range == 1)
+ chip->range_vbus = 32;
+ else if (range == 2)
+ chip->range_vbus = 16;
+ else
+ return -EINVAL;
+
+ *config &= ~INA219_BRNG_MASK;
+ *config |= INA219_SHIFT_BRNG(range == 1 ? 1 : 0) & INA219_BRNG_MASK;
+
+ return 0;
+}
+
+static const int ina219_vshunt_gain_tab[] = { 125, 250, 500, 1000 };
+static const int ina219_vshunt_gain_frac[] = {
+ 125, 1000, 250, 1000, 500, 1000, 1000, 1000 };
+
+static int ina219_set_vshunt_pga_gain(struct ina2xx_chip_info *chip,
+ unsigned int gain,
+ unsigned int *config)
+{
+ int bits;
+
+ if (gain < 125 || gain > 1000)
+ return -EINVAL;
+
+ bits = find_closest(gain, ina219_vshunt_gain_tab,
+ ARRAY_SIZE(ina219_vshunt_gain_tab));
+
+ chip->pga_gain_vshunt = ina219_vshunt_gain_tab[bits];
+ bits = 3 - bits;
+
+ *config &= ~INA219_PGA_MASK;
+ *config |= INA219_SHIFT_PGA(bits) & INA219_PGA_MASK;
+
+ return 0;
+}
+
+static int ina2xx_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->address) {
+ case INA2XX_SHUNT_VOLTAGE:
+ *type = IIO_VAL_FRACTIONAL;
+ *length = sizeof(ina219_vshunt_gain_frac) / sizeof(int);
+ *vals = ina219_vshunt_gain_frac;
+ return IIO_AVAIL_LIST;
+
+ case INA2XX_BUS_VOLTAGE:
+ *type = IIO_VAL_INT;
+ *length = sizeof(ina219_vbus_range_tab) / sizeof(int);
+ *vals = ina219_vbus_range_tab;
+ return IIO_AVAIL_LIST;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int ina2xx_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -395,6 +505,14 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
}
break;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ if (chan->address == INA2XX_SHUNT_VOLTAGE)
+ ret = ina219_set_vshunt_pga_gain(chip, val * 1000 +
+ val2 / 1000, &tmp);
+ else
+ ret = ina219_set_vbus_range_denom(chip, val, &tmp);
+ break;
+
default:
ret = -EINVAL;
}
@@ -434,25 +552,21 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
}
/*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-3. The only remaining parameter is RShunt.
- * There is no need to expose the CALIBRATION register
- * to the user for now. But we need to reset this register
- * if the user updates RShunt after driver init, e.g upon
- * reading an EEPROM/Probe-type value.
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (INA 226: eq. 3, INA219: eq. 4) the best values
+ * are 2048 for ina226 and 4096 for ina219. They are hardcoded as
+ * calibration_value.
*/
static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
{
- u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor_uohm);
-
- return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+ return regmap_write(chip->regmap, INA2XX_CALIBRATION,
+ chip->config->calibration_value);
}
static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
{
- if (val <= 0 || val > chip->config->calibration_factor)
+ if (val == 0 || val > INT_MAX)
return -EINVAL;
chip->shunt_resistor_uohm = val;
@@ -485,11 +599,6 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
if (ret)
return ret;
- /* Update the Calibration register */
- ret = ina2xx_set_calibration(chip);
- if (ret)
- return ret;
-
return len;
}
@@ -532,19 +641,23 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
* Sampling Freq is a consequence of the integration times of
* the Voltage channels.
*/
-#define INA219_CHAN_VOLTAGE(_index, _address) { \
+#define INA219_CHAN_VOLTAGE(_index, _address, _shift) { \
.type = IIO_VOLTAGE, \
.address = (_address), \
.indexed = 1, \
.channel = (_index), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_INT_TIME), \
+ BIT(IIO_CHAN_INFO_INT_TIME) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+ .info_mask_separate_available = \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = (_index), \
.scan_type = { \
.sign = 'u', \
- .realbits = 16, \
+ .shift = _shift, \
+ .realbits = 16 - _shift, \
.storagebits = 16, \
.endianness = IIO_LE, \
} \
@@ -579,23 +692,18 @@ static const struct iio_chan_spec ina226_channels[] = {
};
static const struct iio_chan_spec ina219_channels[] = {
- INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE),
- INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE),
+ INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE, 0),
+ INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE, INA219_BUS_VOLTAGE_SHIFT),
INA219_CHAN(IIO_POWER, 2, INA2XX_POWER),
INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
-static int ina2xx_work_buffer(struct iio_dev *indio_dev)
+static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- unsigned short data[8];
- int bit, ret, i = 0;
- s64 time_a, time_b;
+ int ret;
unsigned int alert;
- int cnvr_need_clear = 0;
-
- time_a = iio_get_time_ns(indio_dev);
/*
* Because the timer thread and the chip conversion clock
@@ -608,23 +716,31 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
* For now, we do an extra read of the MASK_ENABLE register (INA226)
* resp. the BUS_VOLTAGE register (INA219).
*/
- if (!chip->allow_async_readout)
- do {
- if (chip->config->chip_id == ina226) {
- ret = regmap_read(chip->regmap,
- INA226_MASK_ENABLE, &alert);
- alert &= INA226_CVRF;
- } else {
- ret = regmap_read(chip->regmap,
- INA2XX_BUS_VOLTAGE, &alert);
- alert &= INA219_CNVR;
- cnvr_need_clear = alert;
- }
+ if (chip->config->chip_id == ina226) {
+ ret = regmap_read(chip->regmap,
+ INA226_MASK_ENABLE, &alert);
+ alert &= INA226_CVRF;
+ } else {
+ ret = regmap_read(chip->regmap,
+ INA2XX_BUS_VOLTAGE, &alert);
+ alert &= INA219_CNVR;
+ }
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
+
+ return !!alert;
+}
+
+static int ina2xx_work_buffer(struct iio_dev *indio_dev)
+{
+ struct ina2xx_chip_info *chip = iio_priv(indio_dev);
+ /* data buffer needs space for channel data and timestap */
+ unsigned short data[4 + sizeof(s64)/sizeof(short)];
+ int bit, ret, i = 0;
+ s64 time;
- } while (!alert);
+ time = iio_get_time_ns(indio_dev);
/*
* Single register reads: bulk_read will not work with ina226/219
@@ -640,26 +756,11 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
return ret;
data[i++] = val;
-
- if (INA2XX_SHUNT_VOLTAGE + bit == INA2XX_POWER)
- cnvr_need_clear = 0;
- }
-
- /* Dummy read on INA219 power register to clear CNVR flag */
- if (cnvr_need_clear && chip->config->chip_id == ina219) {
- unsigned int val;
-
- ret = regmap_read(chip->regmap, INA2XX_POWER, &val);
- if (ret < 0)
- return ret;
}
- time_b = iio_get_time_ns(indio_dev);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, time);
- iio_push_to_buffers_with_timestamp(indio_dev,
- (unsigned int *)data, time_a);
-
- return (unsigned long)(time_b - time_a) / 1000;
+ return 0;
};
static int ina2xx_capture_thread(void *data)
@@ -667,7 +768,9 @@ static int ina2xx_capture_thread(void *data)
struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
int sampling_us = SAMPLING_PERIOD(chip);
- int buffer_us;
+ int ret;
+ struct timespec64 next, now, delta;
+ s64 delay_us;
/*
* Poll a bit faster than the chip internal Fs, in case
@@ -676,13 +779,43 @@ static int ina2xx_capture_thread(void *data)
if (!chip->allow_async_readout)
sampling_us -= 200;
+ ktime_get_ts64(&next);
+
do {
- buffer_us = ina2xx_work_buffer(indio_dev);
- if (buffer_us < 0)
- return buffer_us;
+ while (!chip->allow_async_readout) {
+ ret = ina2xx_conversion_ready(indio_dev);
+ if (ret < 0)
+ return ret;
- if (sampling_us > buffer_us)
- udelay(sampling_us - buffer_us);
+ /*
+ * If the conversion was not yet finished,
+ * reset the reference timestamp.
+ */
+ if (ret == 0)
+ ktime_get_ts64(&next);
+ else
+ break;
+ }
+
+ ret = ina2xx_work_buffer(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ktime_get_ts64(&now);
+
+ /*
+ * Advance the timestamp for the next poll by one sampling
+ * interval, and sleep for the remainder (next - now)
+ * In case "next" has already passed, the interval is added
+ * multiple times, i.e. samples are dropped.
+ */
+ do {
+ timespec64_add_ns(&next, 1000 * sampling_us);
+ delta = timespec64_sub(next, now);
+ delay_us = div_s64(timespec64_to_ns(&delta), 1000);
+ } while (delay_us <= 0);
+
+ usleep_range(delay_us, (delay_us * 3) >> 1);
} while (!kthread_should_stop());
@@ -746,7 +879,6 @@ static IIO_CONST_ATTR_NAMED(ina226_integration_time_available,
integration_time_available,
"0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
-
static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
ina2xx_allow_async_readout_show,
ina2xx_allow_async_readout_store, 0);
@@ -780,6 +912,7 @@ static const struct attribute_group ina226_attribute_group = {
static const struct iio_info ina219_info = {
.attrs = &ina219_attribute_group,
.read_raw = ina2xx_read_raw,
+ .read_avail = ina2xx_read_avail,
.write_raw = ina2xx_write_raw,
.debugfs_reg_access = ina2xx_debug_reg,
};
@@ -860,6 +993,8 @@ static int ina2xx_probe(struct i2c_client *client,
chip->avg = 1;
ina219_set_int_time_vbus(chip, INA219_DEFAULT_IT, &val);
ina219_set_int_time_vshunt(chip, INA219_DEFAULT_IT, &val);
+ ina219_set_vbus_range_denom(chip, INA219_DEFAULT_BRNG, &val);
+ ina219_set_vshunt_pga_gain(chip, INA219_DEFAULT_PGA, &val);
}
ret = ina2xx_init(chip, val);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 36047147ce7c..29fa7736d80c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -96,8 +96,8 @@
#define MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK GENMASK(11, 0)
#define MESON_SAR_ADC_AUX_SW 0x1c
- #define MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_MASK(_chan) \
- (GENMASK(10, 8) << (((_chan) - 2) * 2))
+ #define MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_SHIFT(_chan) \
+ (8 + (((_chan) - 2) * 3))
#define MESON_SAR_ADC_AUX_SW_VREF_P_MUX BIT(6)
#define MESON_SAR_ADC_AUX_SW_VREF_N_MUX BIT(5)
#define MESON_SAR_ADC_AUX_SW_MODE_SEL BIT(4)
@@ -221,6 +221,7 @@ enum meson_sar_adc_chan7_mux_sel {
struct meson_sar_adc_data {
bool has_bl30_integration;
+ unsigned long clock_rate;
u32 bandgap_reg;
unsigned int resolution;
const char *name;
@@ -233,7 +234,6 @@ struct meson_sar_adc_priv {
const struct meson_sar_adc_data *data;
struct clk *clkin;
struct clk *core_clk;
- struct clk *sana_clk;
struct clk *adc_sel_clk;
struct clk *adc_clk;
struct clk_gate clk_gate;
@@ -622,7 +622,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
static int meson_sar_adc_init(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int regval, ret;
+ int regval, i, ret;
/*
* make sure we start at CH7 input since the other muxes are only used
@@ -677,6 +677,32 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
FIELD_PREP(MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK,
1));
+ /*
+ * set up the input channel muxes in MESON_SAR_ADC_CHAN_10_SW
+ * (0 = SAR_ADC_CH0, 1 = SAR_ADC_CH1)
+ */
+ regval = FIELD_PREP(MESON_SAR_ADC_CHAN_10_SW_CHAN0_MUX_SEL_MASK, 0);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_MUX_SEL_MASK,
+ regval);
+ regval = FIELD_PREP(MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK, 1);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK,
+ regval);
+
+ /*
+ * set up the input channel muxes in MESON_SAR_ADC_AUX_SW
+ * (2 = SAR_ADC_CH2, 3 = SAR_ADC_CH3, ...) and enable
+ * MESON_SAR_ADC_AUX_SW_YP_DRIVE_SW and
+ * MESON_SAR_ADC_AUX_SW_XP_DRIVE_SW like the vendor driver.
+ */
+ regval = 0;
+ for (i = 2; i <= 7; i++)
+ regval |= i << MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_SHIFT(i);
+ regval |= MESON_SAR_ADC_AUX_SW_YP_DRIVE_SW;
+ regval |= MESON_SAR_ADC_AUX_SW_XP_DRIVE_SW;
+ regmap_write(priv->regmap, MESON_SAR_ADC_AUX_SW, regval);
+
ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
if (ret) {
dev_err(indio_dev->dev.parent,
@@ -684,7 +710,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
return ret;
}
- ret = clk_set_rate(priv->adc_clk, 1200000);
+ ret = clk_set_rate(priv->adc_clk, priv->data->clock_rate);
if (ret) {
dev_err(indio_dev->dev.parent,
"failed to set adc clock rate\n");
@@ -731,12 +757,6 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
goto err_core_clk;
}
- ret = clk_prepare_enable(priv->sana_clk);
- if (ret) {
- dev_err(indio_dev->dev.parent, "failed to enable sana clk\n");
- goto err_sana_clk;
- }
-
regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
@@ -763,8 +783,6 @@ err_adc_clk:
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
MESON_SAR_ADC_REG3_ADC_EN, 0);
meson_sar_adc_set_bandgap(indio_dev, false);
- clk_disable_unprepare(priv->sana_clk);
-err_sana_clk:
clk_disable_unprepare(priv->core_clk);
err_core_clk:
regulator_disable(priv->vref);
@@ -790,7 +808,6 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
meson_sar_adc_set_bandgap(indio_dev, false);
- clk_disable_unprepare(priv->sana_clk);
clk_disable_unprepare(priv->core_clk);
regulator_disable(priv->vref);
@@ -866,6 +883,7 @@ static const struct iio_info meson_sar_adc_iio_info = {
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
.has_bl30_integration = false,
+ .clock_rate = 1150000,
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
@@ -874,6 +892,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
.has_bl30_integration = false,
+ .clock_rate = 1150000,
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
@@ -882,6 +901,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
.has_bl30_integration = true,
+ .clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
@@ -890,6 +910,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
.has_bl30_integration = true,
+ .clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
@@ -898,6 +919,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
.has_bl30_integration = true,
+ .clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
@@ -993,16 +1015,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return PTR_ERR(priv->core_clk);
}
- priv->sana_clk = devm_clk_get(&pdev->dev, "sana");
- if (IS_ERR(priv->sana_clk)) {
- if (PTR_ERR(priv->sana_clk) == -ENOENT) {
- priv->sana_clk = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get sana clk\n");
- return PTR_ERR(priv->sana_clk);
- }
- }
-
priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk");
if (IS_ERR(priv->adc_clk)) {
if (PTR_ERR(priv->adc_clk) == -ENOENT) {
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index 47d24ae5462f..fe3d7826783c 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -5,6 +5,7 @@
#include <linux/math64.h>
#include <linux/log2.h>
#include <linux/err.h>
+#include <linux/module.h>
#include "qcom-vadc-common.h"
@@ -229,3 +230,6 @@ int qcom_vadc_decimation_from_dt(u32 value)
return __ffs64(value / VADC_DECIMATION_MIN);
}
EXPORT_SYMBOL(qcom_vadc_decimation_from_dt);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm ADC common functionality");
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644
index 000000000000..560d8c7d9d86
--- /dev/null
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 1,
+ .shift = 0,
+ },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio;
+
+ iio = devm_iio_device_alloc(dev, 0);
+ if (!iio)
+ return -ENOMEM;
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = dev->of_node;
+ iio->name = dev_name(dev);
+ iio->info = &iio_sd_mod_iio_info;
+ iio->modes = INDIO_BUFFER_HARDWARE;
+
+ iio->num_channels = 1;
+ iio->channels = &iio_sd_mod_ch;
+
+ platform_set_drvdata(pdev, iio);
+
+ return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+ { .compatible = "sd-modulator" },
+ { .compatible = "ads1201" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+ .driver = {
+ .name = "iio_sd_adc_mod",
+ .of_match_table = of_match_ptr(sd_adc_of_match),
+ },
+ .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 6aefef99f935..40be7d9fadbf 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is part of STM32 ADC driver
*
@@ -6,19 +7,6 @@
*
* Inspired from: fsl-imx25-tsadc
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 250ee958a669..8af507b3f32d 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is part of STM32 ADC driver
*
* Copyright (C) 2016, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __STM32_ADC_H
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index c9d96f935dba..7f5def465340 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is part of STM32 ADC driver
*
* Copyright (C) 2016, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
@@ -92,6 +79,7 @@
#define STM32H7_ADC_SQR3 0x38
#define STM32H7_ADC_SQR4 0x3C
#define STM32H7_ADC_DR 0x40
+#define STM32H7_ADC_DIFSEL 0xC0
#define STM32H7_ADC_CALFACT 0xC4
#define STM32H7_ADC_CALFACT2 0xC8
@@ -153,6 +141,8 @@ enum stm32h7_adc_dmngt {
/* BOOST bit must be set on STM32H7 when ADC clock is above 20MHz */
#define STM32H7_BOOST_CLKRATE 20000000UL
+#define STM32_ADC_CH_MAX 20 /* max number of channels */
+#define STM32_ADC_CH_SZ 10 /* max channel name size */
#define STM32_ADC_MAX_SQ 16 /* SQ1..SQ16 */
#define STM32_ADC_MAX_SMP 7 /* SMPx range is [0..7] */
#define STM32_ADC_TIMEOUT_US 100000
@@ -297,9 +287,11 @@ struct stm32_adc_cfg {
* @rx_buf: dma rx buffer cpu address
* @rx_dma_buf: dma rx buffer bus address
* @rx_buf_sz: dma rx buffer size
+ * @difsel bitmask to set single-ended/differential channel
* @pcsel bitmask to preselect channels on some devices
* @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
+ * @chan_name: channel name array
*/
struct stm32_adc {
struct stm32_adc_common *common;
@@ -318,72 +310,37 @@ struct stm32_adc {
u8 *rx_buf;
dma_addr_t rx_dma_buf;
unsigned int rx_buf_sz;
+ u32 difsel;
u32 pcsel;
u32 smpr_val[2];
struct stm32_adc_calib cal;
+ char chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
};
-/**
- * struct stm32_adc_chan_spec - specification of stm32 adc channel
- * @type: IIO channel type
- * @channel: channel number (single ended)
- * @name: channel name (single ended)
- */
-struct stm32_adc_chan_spec {
- enum iio_chan_type type;
- int channel;
- const char *name;
+struct stm32_adc_diff_channel {
+ u32 vinp;
+ u32 vinn;
};
/**
* struct stm32_adc_info - stm32 ADC, per instance config data
- * @channels: Reference to stm32 channels spec
* @max_channels: Number of channels
* @resolutions: available resolutions
* @num_res: number of available resolutions
*/
struct stm32_adc_info {
- const struct stm32_adc_chan_spec *channels;
int max_channels;
const unsigned int *resolutions;
const unsigned int num_res;
};
-/*
- * Input definitions common for all instances:
- * stm32f4 can have up to 16 channels
- * stm32h7 can have up to 20 channels
- */
-static const struct stm32_adc_chan_spec stm32_adc_channels[] = {
- { IIO_VOLTAGE, 0, "in0" },
- { IIO_VOLTAGE, 1, "in1" },
- { IIO_VOLTAGE, 2, "in2" },
- { IIO_VOLTAGE, 3, "in3" },
- { IIO_VOLTAGE, 4, "in4" },
- { IIO_VOLTAGE, 5, "in5" },
- { IIO_VOLTAGE, 6, "in6" },
- { IIO_VOLTAGE, 7, "in7" },
- { IIO_VOLTAGE, 8, "in8" },
- { IIO_VOLTAGE, 9, "in9" },
- { IIO_VOLTAGE, 10, "in10" },
- { IIO_VOLTAGE, 11, "in11" },
- { IIO_VOLTAGE, 12, "in12" },
- { IIO_VOLTAGE, 13, "in13" },
- { IIO_VOLTAGE, 14, "in14" },
- { IIO_VOLTAGE, 15, "in15" },
- { IIO_VOLTAGE, 16, "in16" },
- { IIO_VOLTAGE, 17, "in17" },
- { IIO_VOLTAGE, 18, "in18" },
- { IIO_VOLTAGE, 19, "in19" },
-};
-
static const unsigned int stm32f4_adc_resolutions[] = {
/* sorted values so the index matches RES[1:0] in STM32F4_ADC_CR1 */
12, 10, 8, 6,
};
+/* stm32f4 can have up to 16 channels */
static const struct stm32_adc_info stm32f4_adc_info = {
- .channels = stm32_adc_channels,
.max_channels = 16,
.resolutions = stm32f4_adc_resolutions,
.num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
@@ -394,9 +351,9 @@ static const unsigned int stm32h7_adc_resolutions[] = {
16, 14, 12, 10, 8,
};
+/* stm32h7 can have up to 20 channels */
static const struct stm32_adc_info stm32h7_adc_info = {
- .channels = stm32_adc_channels,
- .max_channels = 20,
+ .max_channels = STM32_ADC_CH_MAX,
.resolutions = stm32h7_adc_resolutions,
.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
};
@@ -983,15 +940,19 @@ pwr_dwn:
* stm32h7_adc_prepare() - Leave power down mode to enable ADC.
* @adc: stm32 adc instance
* Leave power down mode.
+ * Configure channels as single ended or differential before enabling ADC.
* Enable ADC.
* Restore calibration data.
- * Pre-select channels that may be used in PCSEL (required by input MUX / IO).
+ * Pre-select channels that may be used in PCSEL (required by input MUX / IO):
+ * - Only one input is selected for single ended (e.g. 'vinp')
+ * - Two inputs are selected for differential channels (e.g. 'vinp' & 'vinn')
*/
static int stm32h7_adc_prepare(struct stm32_adc *adc)
{
int ret;
stm32h7_adc_exit_pwr_down(adc);
+ stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel);
ret = stm32h7_adc_enable(adc);
if (ret)
@@ -1263,10 +1224,23 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
return ret;
case IIO_CHAN_INFO_SCALE:
- *val = adc->common->vref_mv;
- *val2 = chan->scan_type.realbits;
+ if (chan->differential) {
+ *val = adc->common->vref_mv * 2;
+ *val2 = chan->scan_type.realbits;
+ } else {
+ *val = adc->common->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ }
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->differential)
+ /* ADC_full_scale / 2 */
+ *val = -((1 << chan->scan_type.realbits) / 2);
+ else
+ *val = 0;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
@@ -1315,6 +1289,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
{
struct stm32_adc *adc = iio_priv(indio_dev);
unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
+ unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
/*
* dma cyclic transfers are used, buffer is split into two periods.
@@ -1323,7 +1298,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
* - one buffer (period) driver can push with iio_trigger_poll().
*/
watermark = min(watermark, val * (unsigned)(sizeof(u16)));
- adc->rx_buf_sz = watermark * 2;
+ adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
return 0;
}
@@ -1628,29 +1603,40 @@ static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns)
}
static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan,
- const struct stm32_adc_chan_spec *channel,
- int scan_index, u32 smp)
+ struct iio_chan_spec *chan, u32 vinp,
+ u32 vinn, int scan_index, bool differential)
{
struct stm32_adc *adc = iio_priv(indio_dev);
-
- chan->type = channel->type;
- chan->channel = channel->channel;
- chan->datasheet_name = channel->name;
+ char *name = adc->chan_name[vinp];
+
+ chan->type = IIO_VOLTAGE;
+ chan->channel = vinp;
+ if (differential) {
+ chan->differential = 1;
+ chan->channel2 = vinn;
+ snprintf(name, STM32_ADC_CH_SZ, "in%d-in%d", vinp, vinn);
+ } else {
+ snprintf(name, STM32_ADC_CH_SZ, "in%d", vinp);
+ }
+ chan->datasheet_name = name;
chan->scan_index = scan_index;
chan->indexed = 1;
chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET);
chan->scan_type.sign = 'u';
chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
chan->scan_type.storagebits = 16;
chan->ext_info = stm32_adc_ext_info;
- /* Prepare sampling time settings */
- stm32_adc_smpr_init(adc, chan->channel, smp);
-
/* pre-build selected channels mask */
adc->pcsel |= BIT(chan->channel);
+ if (differential) {
+ /* pre-build diff channels mask */
+ adc->difsel |= BIT(chan->channel);
+ /* Also add negative input to pre-selected channels */
+ adc->pcsel |= BIT(chan->channel2);
+ }
}
static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
@@ -1658,17 +1644,40 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
struct device_node *node = indio_dev->dev.of_node;
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
+ struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
struct property *prop;
const __be32 *cur;
struct iio_chan_spec *channels;
- int scan_index = 0, num_channels, ret;
+ int scan_index = 0, num_channels = 0, num_diff = 0, ret, i;
u32 val, smp = 0;
- num_channels = of_property_count_u32_elems(node, "st,adc-channels");
- if (num_channels < 0 ||
- num_channels > adc_info->max_channels) {
+ ret = of_property_count_u32_elems(node, "st,adc-channels");
+ if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
- return num_channels < 0 ? num_channels : -EINVAL;
+ return -EINVAL;
+ } else if (ret > 0) {
+ num_channels += ret;
+ }
+
+ ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
+ sizeof(*diff));
+ if (ret > adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+ return -EINVAL;
+ } else if (ret > 0) {
+ int size = ret * sizeof(*diff) / sizeof(u32);
+
+ num_diff = ret;
+ num_channels += ret;
+ ret = of_property_read_u32_array(node, "st,adc-diff-channels",
+ (u32 *)diff, size);
+ if (ret)
+ return ret;
+ }
+
+ if (!num_channels) {
+ dev_err(&indio_dev->dev, "No channels configured\n");
+ return -ENODATA;
}
/* Optional sample time is provided either for each, or all channels */
@@ -1689,6 +1698,33 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
return -EINVAL;
}
+ /* Channel can't be configured both as single-ended & diff */
+ for (i = 0; i < num_diff; i++) {
+ if (val == diff[i].vinp) {
+ dev_err(&indio_dev->dev,
+ "channel %d miss-configured\n", val);
+ return -EINVAL;
+ }
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
+ 0, scan_index, false);
+ scan_index++;
+ }
+
+ for (i = 0; i < num_diff; i++) {
+ if (diff[i].vinp >= adc_info->max_channels ||
+ diff[i].vinn >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ diff[i].vinp, diff[i].vinn);
+ return -EINVAL;
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ diff[i].vinp, diff[i].vinn, scan_index,
+ true);
+ scan_index++;
+ }
+
+ for (i = 0; i < scan_index; i++) {
/*
* Using of_property_read_u32_index(), smp value will only be
* modified if valid u32 value can be decoded. This allows to
@@ -1696,12 +1732,9 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
* value per channel.
*/
of_property_read_u32_index(node, "st,min-sample-time-nsecs",
- scan_index, &smp);
-
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
- &adc_info->channels[val],
- scan_index, smp);
- scan_index++;
+ i, &smp);
+ /* Prepare sampling time settings */
+ stm32_adc_smpr_init(adc, channels[i].channel, smp);
}
indio_dev->num_channels = scan_index;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644
index 000000000000..daa026d6a94f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING 100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+ DFSDM_AUDIO,
+ DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+ int type;
+ int (*init)(struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+ struct stm32_dfsdm *dfsdm;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ unsigned int fl_id;
+ unsigned int ch_id;
+
+ /* ADC specific */
+ unsigned int oversamp;
+ struct iio_hw_consumer *hwc;
+ struct completion completion;
+ u32 *buffer;
+
+ /* Audio specific */
+ unsigned int spi_freq; /* SPI bus clock frequency */
+ unsigned int sample_freq; /* Sample frequency after filter decimation */
+ int (*cb)(const void *data, size_t size, void *cb_priv);
+ void *cb_priv;
+
+ /* DMA */
+ u8 *rx_buf;
+ unsigned int bufi; /* Buffer current position */
+ unsigned int buf_sz; /* Buffer size */
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+ const char *name;
+ unsigned int val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+ { "SPI_R", 0 }, /* SPI with data on rising edge */
+ { "SPI_F", 1 }, /* SPI with data on falling edge */
+ { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+ { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+ {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+ /* External SPI clock (CLKIN x) */
+ { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+ /* Internal SPI clock (CLKOUT) */
+ { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+ {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+ const struct stm32_dfsdm_str2field *list)
+{
+ const struct stm32_dfsdm_str2field *p = list;
+
+ for (p = list; p && p->name; p++)
+ if (!strcmp(p->name, str))
+ return p->val;
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ unsigned int fast, unsigned int oversamp)
+{
+ unsigned int i, d, fosr, iosr;
+ u64 res;
+ s64 delta;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+ * This function tries to compute filter oversampling and integrator
+ * oversampling, base on oversampling ratio requested by user.
+ *
+ * Decimation d depends on the filter order and the oversampling ratios.
+ * ford: filter order
+ * fosr: filter over sampling ratio
+ * iosr: integrator over sampling ratio
+ */
+ if (fl->ford == DFSDM_FASTSINC_ORDER) {
+ m = 2;
+ p = 2;
+ }
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+ * to reach 24 bits data output resolution.
+ * Leave as soon as if exact resolution if reached.
+ * Otherwise the higher resolution below 32 bits is kept.
+ */
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+ d = fosr * iosr;
+ else if (fl->ford == DFSDM_FASTSINC_ORDER)
+ d = fosr * (iosr + 3) + 2;
+ else
+ d = fosr * (iosr - 1 + p) + p;
+
+ if (d > oversamp)
+ break;
+ else if (d != oversamp)
+ continue;
+ /*
+ * Check resolution (limited to signed 32 bits)
+ * res <= 2^31
+ * Sincx filters:
+ * res = m * fosr^p x iosr (with m=1, p=ford)
+ * FastSinc filter
+ * res = m * fosr^p x iosr (with m=2, p=2)
+ */
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+ if (res > DFSDM_MAX_RES)
+ break;
+ }
+ if (res > DFSDM_MAX_RES)
+ continue;
+ res = res * (u64)m * (u64)iosr;
+ if (res > DFSDM_MAX_RES)
+ continue;
+
+ delta = res - DFSDM_DATA_RES;
+
+ if (res >= fl->res) {
+ fl->res = res;
+ fl->fosr = fosr;
+ fl->iosr = iosr;
+ fl->fast = fast;
+ pr_debug("%s: fosr = %d, iosr = %d\n",
+ __func__, fl->fosr, fl->iosr);
+ }
+
+ if (!delta)
+ return 0;
+ }
+ }
+
+ if (!fl->fosr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+ struct stm32_dfsdm_channel *ch)
+{
+ unsigned int id = ch->id;
+ struct regmap *regmap = dfsdm->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SITP_MASK,
+ DFSDM_CHCFGR1_SITP(ch->type));
+ if (ret < 0)
+ return ret;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SPICKSEL_MASK,
+ DFSDM_CHCFGR1_SPICKSEL(ch->src));
+ if (ret < 0)
+ return ret;
+ return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_CHINSEL_MASK,
+ DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
+{
+ int ret;
+
+ /* Enable filter */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+ if (ret < 0)
+ return ret;
+
+ /* Start conversion */
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSWSTART_MASK,
+ DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+ /* Disable conversion */
+ regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id, unsigned int ch_id)
+{
+ struct regmap *regmap = dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ int ret;
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+ DFSDM_FCR_IOSR(fl->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+ DFSDM_FCR_FOSR(fl->fosr - 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+ DFSDM_FCR_FORD(fl->ford));
+ if (ret)
+ return ret;
+
+ /* No scan mode supported for the moment */
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+ DFSDM_CR1_RCH(ch_id));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSYNC_MASK,
+ DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ const char *of_str;
+ int chan_idx = ch->scan_index;
+ int ret, val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-channels", chan_idx,
+ &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channels' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev,
+ " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-names", chan_idx,
+ &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channel-names' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-types", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-clk-src", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-alt-channel", chan_idx,
+ &df_ch->alt_si);
+ if (ret < 0)
+ df_ch->alt_si = 0;
+
+ return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int sample_freq = adc->sample_freq;
+ unsigned int spi_freq;
+ int ret;
+
+ dev_err(&indio_dev->dev, "enter %s\n", __func__);
+ /* If DFSDM is master on SPI, SPI freq can not be updated */
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &spi_freq);
+ if (ret)
+ return ret;
+
+ if (!spi_freq)
+ return -EINVAL;
+
+ if (sample_freq) {
+ if (spi_freq % sample_freq)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / sample_freq));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "No filter parameters that match!\n");
+ return ret;
+ }
+ }
+ adc->spi_freq = spi_freq;
+
+ return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+ unsigned int dma_en = 0, cont_en = 0;
+
+ ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+ adc->ch_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ if (dma) {
+ /* Enable DMA transfer*/
+ dma_en = DFSDM_CR1_RDMAEN(1);
+ /* Enable conversion triggered by SPI clock*/
+ cont_en = DFSDM_CR1_RCONT(1);
+ }
+ /* Enable DMA transfer*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, dma_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ /* Enable conversion triggered by SPI clock*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, cont_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ return 0;
+
+stop_channels:
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+ return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+
+ stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+ /* Clean conversion options */
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+ /*
+ * DMA cyclic transfers are used, buffer is split into two periods.
+ * There should be :
+ * - always one buffer (period) DMA is working on
+ * - one buffer (period) driver pushed to ASoC side.
+ */
+ watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+ adc->buf_sz = watermark * 2;
+
+ return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(adc->dma_chan,
+ adc->dma_chan->cookie,
+ &state);
+ if (status == DMA_IN_PROGRESS) {
+ /* Residue is size in bytes from end of buffer */
+ unsigned int i = adc->buf_sz - state.residue;
+ unsigned int size;
+
+ /* Return available bytes */
+ if (i >= adc->bufi)
+ size = i - adc->bufi;
+ else
+ size = adc->buf_sz + i - adc->bufi;
+
+ return size;
+ }
+
+ return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+ /*
+ * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+ * offers only an interface to push data samples per samples.
+ * For this reason IIO buffer interface is not used and interface is
+ * bypassed using a private callback registered by ASoC.
+ * This should be a temporary solution waiting a cyclic DMA engine
+ * support in IIO.
+ */
+
+ dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+ adc->bufi, available);
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+ *buffer = (*buffer & 0xFFFFFF00) << 8;
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos],
+ adc->buf_sz - old_pos, adc->cb_priv);
+ adc->bufi = 0;
+ old_pos = 0;
+ }
+ }
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+ adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+ adc->buf_sz, adc->buf_sz / 2);
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+ adc->dma_buf,
+ adc->buf_sz, adc->buf_sz / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_all(adc->dma_chan);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(adc->dma_chan);
+
+ return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ /* Reset adc buffer index */
+ adc->bufi = 0;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_start_conv(adc, true);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto stop_dfsdm;
+ }
+
+ if (adc->dma_chan) {
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
+ goto err_stop_conv;
+ }
+ }
+
+ return 0;
+
+err_stop_conv:
+ stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan)
+ dmaengine_terminate_all(adc->dma_chan);
+
+ stm32_dfsdm_stop_conv(adc);
+
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+ .postenable = &stm32_dfsdm_postenable,
+ .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ * DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ * - data: pointer to data buffer
+ * - size: size in byte of the data buffer
+ * - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = cb;
+ adc->cb_priv = private;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = NULL;
+ adc->cb_priv = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *res)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = res;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+ if (ret < 0)
+ goto stop_dfsdm;
+
+ ret = stm32_dfsdm_start_conv(adc, false);
+ if (ret < 0) {
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+ goto stop_dfsdm;
+ }
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ DFSDM_TIMEOUT);
+
+ /* Mask IRQ for regular conversion achievement*/
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else if (timeout < 0)
+ ret = timeout;
+ else
+ ret = IIO_VAL_INT;
+
+ stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int spi_freq = adc->spi_freq;
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = stm32_dfsdm_set_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+
+ return ret;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ spi_freq = adc->dfsdm->spi_master_freq;
+
+ if (spi_freq % val)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / val));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Not able to find parameter that match!\n");
+ return ret;
+ }
+ adc->sample_freq = val;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ iio_hw_consumer_disable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->oversamp;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->sample_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+ struct stm32_dfsdm_adc *adc = arg;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ unsigned int status, int_en;
+
+ regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+ regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+ if (status & DFSDM_ISR_REOCF_MASK) {
+ /* Read the data register clean the IRQ status */
+ regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+ complete(&adc->completion);
+ }
+
+ if (status & DFSDM_ISR_ROVRF_MASK) {
+ if (int_en & DFSDM_CR2_ROVRIE_MASK)
+ dev_warn(&indio_dev->dev, "Overrun detected\n");
+ regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK,
+ DFSDM_ICR_CLRROVRF_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+ /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+ {
+ .name = "spi_clk_freq",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = dfsdm_adc_audio_get_spiclk,
+ .write = dfsdm_adc_audio_set_spiclk,
+ },
+ {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan) {
+ dma_free_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+ dma_release_channel(adc->dma_chan);
+ }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+ DFSDM_RDATAR(adc->fl_id),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ &adc->dma_buf, GFP_KERNEL);
+ if (!adc->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+err_release:
+ dma_release_channel(adc->dma_chan);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
+ ch->type = IIO_VOLTAGE;
+ ch->indexed = 1;
+
+ /*
+ * IIO_CHAN_INFO_RAW: used to compute regular conversion
+ * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+ */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+ ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+ ch->scan_type.sign = 'u';
+ }
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+ adc->ch_id = ch->channel;
+
+ return stm32_dfsdm_chan_configure(adc->dfsdm,
+ &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_channel *d_ch;
+ int ret;
+
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+ ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->scan_index = 0;
+
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+ if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int num_ch;
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+ ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+ "st,adc-channels");
+ if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch < 0 ? num_ch : -EINVAL;
+ }
+
+ /* Bind to SD modulator IIO device */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return -EPROBE_DEFER;
+
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+ GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ ch->scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ }
+
+ indio_dev->num_channels = num_ch;
+ indio_dev->channels = ch;
+
+ init_completion(&adc->completion);
+
+ return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+ .type = DFSDM_IIO,
+ .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+ .type = DFSDM_AUDIO,
+ .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ {
+ .compatible = "st,stm32-dfsdm-adc",
+ .data = &stm32h7_dfsdm_adc_data,
+ },
+ {
+ .compatible = "st,stm32-dfsdm-dmic",
+ .data = &stm32h7_dfsdm_audio_data,
+ },
+ {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dfsdm_adc *adc;
+ struct device_node *np = dev->of_node;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct iio_dev *iio;
+ char *name;
+ int ret, irq, val;
+
+
+ dev_data = of_device_get_match_data(dev);
+ iio = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio) {
+ dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio);
+ adc->dfsdm = dev_get_drvdata(dev->parent);
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = np;
+ iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+ if (ret != 0) {
+ dev_err(dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (dev_data->type == DFSDM_AUDIO) {
+ iio->info = &stm32_dfsdm_info_audio;
+ snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+ } else {
+ iio->info = &stm32_dfsdm_info_adc;
+ snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+ }
+ iio->name = name;
+
+ /*
+ * In a first step IRQs generated for channels are not treated.
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set filter order\n");
+ return ret;
+ }
+
+ adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+ ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+ if (!ret)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+ ret = dev_data->init(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(iio);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dev_err(dev, "of_platform_populate\n");
+ if (dev_data->type == DFSDM_AUDIO) {
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find an audio DAI\n");
+ goto err_unregister;
+ }
+ }
+
+ return 0;
+
+err_unregister:
+ iio_device_unregister(iio);
+err_cleanup:
+ stm32_dfsdm_dma_release(iio);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (adc->dev_data->type == DFSDM_AUDIO)
+ of_platform_depopulate(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ stm32_dfsdm_dma_release(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+ .driver = {
+ .name = "stm32-dfsdm-adc",
+ .of_match_table = stm32_dfsdm_adc_match,
+ },
+ .probe = stm32_dfsdm_adc_probe,
+ .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644
index 000000000000..6290332cfd3f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+ unsigned int num_filters;
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS 4
+#define STM32H7_DFSDM_NUM_CHANNELS 8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < DFSDM_FILTER_BASE_ADR)
+ return false;
+
+ /*
+ * Mask is done on register to avoid to list registers of all
+ * filter instances.
+ */
+ switch (reg & DFSDM_FILTER_REG_MASK) {
+ case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x2B8,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+ .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+ .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+ struct platform_device *pdev; /* platform device */
+
+ struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+ unsigned int spi_clk_out_div; /* SPI clkout divider value */
+ atomic_t n_active_ch; /* number of current active channels */
+
+ struct clk *clk; /* DFSDM clock */
+ struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct device *dev = &priv->pdev->dev;
+ unsigned int clk_div = priv->spi_clk_out_div;
+ int ret;
+
+ if (atomic_inc_return(&priv->n_active_ch) == 1) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start clock\n");
+ goto error_ret;
+ }
+ if (priv->aclk) {
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start audio clock\n");
+ goto disable_clk;
+ }
+ }
+
+ /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+ if (ret < 0)
+ goto disable_aclk;
+
+ /* Global enable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(1));
+ if (ret < 0)
+ goto disable_aclk;
+ }
+
+ dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+
+disable_aclk:
+ clk_disable_unprepare(priv->aclk);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+error_ret:
+ atomic_dec(&priv->n_active_ch);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ int ret;
+
+ if (atomic_dec_and_test(&priv->n_active_ch)) {
+ /* Global disable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(0));
+ if (ret < 0)
+ return ret;
+
+ /* Stop SPI CLKOUT */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(0));
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(priv->clk);
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ }
+ dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ struct dfsdm_priv *priv)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ unsigned long clk_freq;
+ unsigned int spi_freq, rem;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+ * "dfsdm" or "audio" clocks can be used as source clock for
+ * the SPI clock out signal and internal processing, depending
+ * on use case.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+ return -EINVAL;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(priv->aclk))
+ priv->aclk = NULL;
+
+ if (priv->aclk)
+ clk_freq = clk_get_rate(priv->aclk);
+ else
+ clk_freq = clk_get_rate(priv->clk);
+
+ /* SPI clock out frequency */
+ ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &spi_freq);
+ if (ret < 0) {
+ /* No SPI master mode */
+ return 0;
+ }
+
+ priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ priv->dfsdm.spi_master_freq = spi_freq;
+
+ if (rem) {
+ dev_warn(&pdev->dev, "SPI clock not accurate\n");
+ dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+ clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+ }
+
+ return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+ {
+ .compatible = "st,stm32h7-dfsdm",
+ .data = &stm32h7_dfsdm_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+ struct dfsdm_priv *priv;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct stm32_dfsdm *dfsdm;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+
+ dev_data = of_device_get_match_data(&pdev->dev);
+
+ dfsdm = &priv->dfsdm;
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+ sizeof(*dfsdm->ch_list),
+ GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+ dfsdm->num_chs = dev_data->num_channels;
+
+ ret = stm32_dfsdm_parse_of(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+ dfsdm->base,
+ &stm32h7_dfsdm_regmap_cfg);
+ if (IS_ERR(dfsdm->regmap)) {
+ ret = PTR_ERR(dfsdm->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dfsdm);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+ .probe = stm32_dfsdm_probe,
+ .driver = {
+ .name = "stm32-dfsdm",
+ .of_match_table = stm32_dfsdm_of_match,
+ },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644
index 000000000000..8708394b0725
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset | Registers block |
+ * --------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * --------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * --------------------------------------------------------
+ * | ... | ..... |
+ * --------------------------------------------------------
+ * | 0x0E0 | CHANNEL 7 |
+ * --------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * --------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * --------------------------------------------------------
+ * | 0x300 | FILTER 2 |
+ * --------------------------------------------------------
+ * | 0x400 | FILTER 3 |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y) ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y) ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y) ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y) ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v) FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v) FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v) FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v) FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v) FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v) FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v) FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v) FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v) FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v) FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v) FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v) FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v) FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v) FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v) FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR 0x100
+#define DFSDM_FILTER_REG_MASK 0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x) ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x00)
+#define DFSDM_CR2(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x04)
+#define DFSDM_ISR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x08)
+#define DFSDM_ICR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x0C)
+#define DFSDM_JCHGR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x10)
+#define DFSDM_FCR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x14)
+#define DFSDM_JDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x18)
+#define DFSDM_RDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x1C)
+#define DFSDM_AWHTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x20)
+#define DFSDM_AWLTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x24)
+#define DFSDM_AWSR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x28)
+#define DFSDM_AWCFR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x2C)
+#define DFSDM_EXMAX(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x30)
+#define DFSDM_EXMIN(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK BIT(0)
+#define DFSDM_CR1_DFEN(v) FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK BIT(1)
+#define DFSDM_CR1_JSWSTART(v) FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK BIT(3)
+#define DFSDM_CR1_JSYNC(v) FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK BIT(4)
+#define DFSDM_CR1_JSCAN(v) FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK BIT(5)
+#define DFSDM_CR1_JDMAEN(v) FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v) FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v) FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK BIT(17)
+#define DFSDM_CR1_RSWSTART(v) FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK BIT(18)
+#define DFSDM_CR1_RCONT(v) FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK BIT(19)
+#define DFSDM_CR1_RSYNC(v) FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK BIT(21)
+#define DFSDM_CR1_RDMAEN(v) FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v) FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK BIT(29)
+#define DFSDM_CR1_FAST(v) FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK BIT(30)
+#define DFSDM_CR1_AWFSEL(v) FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK GENMASK(6, 0)
+#define DFSDM_CR2_IE(v) FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK BIT(0)
+#define DFSDM_CR2_JEOCIE(v) FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK BIT(1)
+#define DFSDM_CR2_REOCIE(v) FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK BIT(2)
+#define DFSDM_CR2_JOVRIE(v) FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK BIT(3)
+#define DFSDM_CR2_ROVRIE(v) FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK BIT(4)
+#define DFSDM_CR2_AWDIE(v) FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK BIT(5)
+#define DFSDM_CR2_SCDIE(v) FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK BIT(6)
+#define DFSDM_CR2_CKABIE(v) FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v) FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v) FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK BIT(0)
+#define DFSDM_ISR_JEOCF(v) FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK BIT(1)
+#define DFSDM_ISR_REOCF(v) FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK BIT(2)
+#define DFSDM_ISR_JOVRF(v) FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK BIT(3)
+#define DFSDM_ISR_ROVRF(v) FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK BIT(4)
+#define DFSDM_ISR_AWDF(v) FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK BIT(13)
+#define DFSDM_ISR_JCIP(v) FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK BIT(14)
+#define DFSDM_ISR_RCIP(v) FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v) FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v) FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v) FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK BIT(3)
+#define DFSDM_ICR_CLRROVRF(v) FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v) FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y) \
+ (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v) FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y) BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y) \
+ (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v) FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v) FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v) FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v) FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v) FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v) FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v) FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v) FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v) FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v) FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order */
+enum stm32_dfsdm_sinc_order {
+ DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+ DFSDM_SINC1_ORDER, /* Sinc 1 filter type */
+ DFSDM_SINC2_ORDER, /* Sinc 2 filter type */
+ DFSDM_SINC3_ORDER, /* Sinc 3 filter type */
+ DFSDM_SINC4_ORDER, /* Sinc 4 filter type (N.A. for watchdog) */
+ DFSDM_SINC5_ORDER, /* Sinc 5 filter type (N.A. for watchdog) */
+ DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+ unsigned int iosr;
+ unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+ u64 res;
+ unsigned int sync_mode;
+ unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+ unsigned int id;
+ unsigned int type;
+ unsigned int src;
+ unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap: regmap for register read/write
+ * @fl_list: filter resources list
+ * @num_fls: number of filter resources available
+ * @ch_list: channel resources list
+ * @num_chs: number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ struct regmap *regmap;
+ struct stm32_dfsdm_filter *fl_list;
+ unsigned int num_fls;
+ struct stm32_dfsdm_channel *ch_list;
+ unsigned int num_chs;
+ unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+ DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b3e573cc6f5f..80df5a377d30 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -523,7 +523,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
}
am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
- if (found == false)
+ if (!found)
ret = -EBUSY;
err_unlock:
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 4ffd3db7817f..338774cba19b 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
Should be selected by drivers that want to use this functionality.
+config IIO_BUFFER_HW_CONSUMER
+ tristate "Industrial I/O HW buffering"
+ help
+ Provides a way to bonding when an IIO device has a direct connection
+ to another device in hardware. In this case buffers for data transfers
+ are handled by hardware.
+
+ Should be selected by drivers that want to use the generic Hw consumer
+ interface.
+
config IIO_KFIFO_BUF
tristate "Industrial I/O buffering based on kfifo"
help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 95f9f41c58b7..1403eb2f9409 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -7,5 +7,6 @@
obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4847534700e7..ea63c838eeae 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -104,6 +104,17 @@ error_free_cb_buff:
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+ size_t watermark)
+{
+ if (!watermark)
+ return -EINVAL;
+ cb_buff->buffer.watermark = watermark;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 000000000000..95165697d8ae
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+ struct list_head buffers;
+ struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+ struct list_head head;
+ struct iio_dev *indio_dev;
+ struct iio_buffer buffer;
+ long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+ struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+ struct hw_consumer_buffer *hw_buf =
+ iio_buffer_to_hw_consumer_buffer(buffer);
+ kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+ .release = &iio_hw_buf_release,
+ .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+ struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+ size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ if (buf->indio_dev == indio_dev)
+ return buf;
+ }
+
+ buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buffer.access = &iio_hw_buf_access;
+ buf->indio_dev = indio_dev;
+ buf->buffer.scan_mask = buf->scan_mask;
+
+ iio_buffer_init(&buf->buffer);
+ list_add_tail(&buf->head, &hwc->buffers);
+
+ return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+ struct hw_consumer_buffer *buf;
+ struct iio_hw_consumer *hwc;
+ struct iio_channel *chan;
+ int ret;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hwc->buffers);
+
+ hwc->channels = iio_channel_get_all(dev);
+ if (IS_ERR(hwc->channels)) {
+ ret = PTR_ERR(hwc->channels);
+ goto err_free_hwc;
+ }
+
+ chan = &hwc->channels[0];
+ while (chan->indio_dev) {
+ buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_put_buffers;
+ }
+ set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+ chan++;
+ }
+
+ return hwc;
+
+err_put_buffers:
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ iio_channel_release_all(hwc->channels);
+err_free_hwc:
+ kfree(hwc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf, *n;
+
+ iio_channel_release_all(hwc->channels);
+ list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+ iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+ struct iio_hw_consumer **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+ struct iio_hw_consumer **ptr, *iio_hwc;
+
+ ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ iio_hwc = iio_hw_consumer_alloc(dev);
+ if (IS_ERR(iio_hwc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = iio_hwc;
+ devres_add(dev, ptr);
+ }
+
+ return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_hw_consumer_release,
+ devm_iio_hw_consumer_match, hwc);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+ int ret;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+ if (ret)
+ goto err_disable_buffers;
+ }
+
+ return 0;
+
+err_disable_buffers:
+ list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 97bce8345c6a..fbe2431f5b81 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -96,7 +96,6 @@ static const struct iio_chan_spec ccs811_channels[] = {
.channel2 = IIO_MOD_CO2,
.modified = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
@@ -255,24 +254,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
switch (chan->channel2) {
case IIO_MOD_CO2:
*val = 0;
- *val2 = 12834;
+ *val2 = 100;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_MOD_VOC:
*val = 0;
- *val2 = 84246;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val2 = 100;
+ return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
- case IIO_CHAN_INFO_OFFSET:
- if (!(chan->type == IIO_CONCENTRATION &&
- chan->channel2 == IIO_MOD_CO2))
- return -EINVAL;
- *val = -400;
- return IIO_VAL_INT;
default:
return -EINVAL;
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index ed8063f2da99..7d30c59da3e2 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -191,7 +191,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- struct cros_ec_device *ec_device;
struct iio_dev *indio_dev;
struct cros_ec_sensors_state *state;
struct iio_chan_spec *channel;
@@ -201,7 +200,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "No CROS EC device found.\n");
return -EINVAL;
}
- ec_device = ec_dev->ec_dev;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
index b910e91d7c0d..82a01addd919 100644
--- a/drivers/iio/common/ssp_sensors/ssp.h
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -188,7 +188,7 @@ struct ssp_sensorhub_info {
*/
struct ssp_data {
struct spi_device *spi;
- struct ssp_sensorhub_info *sensorhub_info;
+ const struct ssp_sensorhub_info *sensorhub_info;
struct timer_list wdt_timer;
struct work_struct work_wdt;
struct delayed_work work_refresh;
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 2ba2ff5e59c4..af3aa38f67cd 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -486,7 +486,7 @@ static struct ssp_data *ssp_parse_dt(struct device *dev)
if (!match)
goto err_mcu_reset_gpio;
- data->sensorhub_info = (struct ssp_sensorhub_info *)match->data;
+ data->sensorhub_info = match->data;
dev_set_drvdata(dev, data);
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 704284a475ae..2ab106bb3e03 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -277,12 +277,9 @@ static int ssp_handle_big_data(struct ssp_data *data, char *dataframe, int *idx)
static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
{
int idx, sd;
- struct timespec ts;
struct ssp_sensor_data *spd;
struct iio_dev **indio_devs = data->sensor_devs;
- getnstimeofday(&ts);
-
for (idx = 0; idx < len;) {
switch (dataframe[idx++]) {
case SSP_MSG2AP_INST_BYPASS_DATA:
@@ -329,7 +326,7 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
}
if (data->time_syncing)
- data->timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+ data->timestamp = ktime_get_real_ns();
return 0;
}
diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c
index 81ae5f74216d..42fb8ba67090 100644
--- a/drivers/iio/counter/stm32-lptimer-cnt.c
+++ b/drivers/iio/counter/stm32-lptimer-cnt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer Encoder and Counter driver
*
@@ -7,7 +8,6 @@
*
* Inspired by 104-quad-8 and stm32-timer-trigger drivers.
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/bitfield.h>
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index afa856d10c26..8b5aad4c32d9 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -476,7 +476,7 @@ static int mcp4725_probe(struct i2c_client *client,
goto err_disable_vref_reg;
}
pd = (inbuf[0] >> 1) & 0x3;
- data->powerdown = pd > 0 ? true : false;
+ data->powerdown = pd > 0;
data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
if (data->id == MCP4726)
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 55026fe1c610..d0fb3124de07 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is part of STM32 DAC driver
*
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/iio/dac/stm32-dac-core.h b/drivers/iio/dac/stm32-dac-core.h
index daf09931857c..d3b415fb9575 100644
--- a/drivers/iio/dac/stm32-dac-core.h
+++ b/drivers/iio/dac/stm32-dac-core.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is part of STM32 DAC driver
*
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __STM32_DAC_CORE_H
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 9ffab02bf9f9..cce26a3a6627 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is part of STM32 DAC driver
*
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Authors: Amelie Delaunay <amelie.delaunay@st.com>
* Fabrice Gasnier <fabrice.gasnier@st.com>
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitfield.h>
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index fe8884543da0..efd0005f59b4 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -56,7 +56,7 @@ static int iio_dummy_evgen_create(void)
return -ENOMEM;
ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
- if (ret) {
+ if (ret < 0) {
kfree(iio_evgen);
return ret;
}
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 90ec4bed62b7..605eee23780c 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -124,7 +124,7 @@ static int adis16136_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16136_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16136_product_id_fops,
adis16136_show_product_id, NULL, "%llu\n");
static int adis16136_show_flash_count(void *arg, u64 *val)
@@ -142,18 +142,21 @@ static int adis16136_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16136_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16136_flash_count_fops,
adis16136_show_flash_count, NULL, "%lld\n");
static int adis16136_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
- debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
- adis16136, &adis16136_serial_fops);
- debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ debugfs_create_file_unsafe("serial_number", 0400,
+ indio_dev->debugfs_dentry, adis16136,
+ &adis16136_serial_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ indio_dev->debugfs_dentry,
adis16136, &adis16136_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ debugfs_create_file_unsafe("flash_count", 0400,
+ indio_dev->debugfs_dentry,
adis16136, &adis16136_flash_count_fops);
return 0;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 15046172e437..63ca31628a93 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,7 +27,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
-#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 147a8c14235f..15ccadc74891 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -3,6 +3,9 @@
*
* Copyright (C) 2017 Matt Ranostay <matt@ranostay.consulting>
*
+ * Support for MAX30105 optical particle sensor
+ * Copyright (C) 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -13,6 +16,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
+ * 7-bit I2C chip address: 0x57
* TODO: proximity power saving feature
*/
@@ -32,6 +36,18 @@
#define MAX30102_REGMAP_NAME "max30102_regmap"
#define MAX30102_DRV_NAME "max30102"
+#define MAX30102_PART_NUMBER 0x15
+
+enum max30102_chip_id {
+ max30102,
+ max30105,
+};
+
+enum max3012_led_idx {
+ MAX30102_LED_RED,
+ MAX30102_LED_IR,
+ MAX30105_LED_GREEN,
+};
#define MAX30102_REG_INT_STATUS 0x00
#define MAX30102_REG_INT_STATUS_PWR_RDY BIT(0)
@@ -52,7 +68,7 @@
#define MAX30102_REG_FIFO_OVR_CTR 0x05
#define MAX30102_REG_FIFO_RD_PTR 0x06
#define MAX30102_REG_FIFO_DATA 0x07
-#define MAX30102_REG_FIFO_DATA_ENTRY_LEN 6
+#define MAX30102_REG_FIFO_DATA_BYTES 3
#define MAX30102_REG_FIFO_CONFIG 0x08
#define MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES BIT(1)
@@ -60,11 +76,18 @@
#define MAX30102_REG_FIFO_CONFIG_AFULL BIT(0)
#define MAX30102_REG_MODE_CONFIG 0x09
-#define MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN BIT(0)
-#define MAX30102_REG_MODE_CONFIG_MODE_HR_EN BIT(1)
-#define MAX30102_REG_MODE_CONFIG_MODE_MASK 0x03
+#define MAX30102_REG_MODE_CONFIG_MODE_NONE 0x00
+#define MAX30102_REG_MODE_CONFIG_MODE_HR 0x02 /* red LED */
+#define MAX30102_REG_MODE_CONFIG_MODE_HR_SPO2 0x03 /* red + IR LED */
+#define MAX30102_REG_MODE_CONFIG_MODE_MULTI 0x07 /* multi-LED mode */
+#define MAX30102_REG_MODE_CONFIG_MODE_MASK GENMASK(2, 0)
#define MAX30102_REG_MODE_CONFIG_PWR BIT(7)
+#define MAX30102_REG_MODE_CONTROL_SLOT21 0x11 /* multi-LED control */
+#define MAX30102_REG_MODE_CONTROL_SLOT43 0x12
+#define MAX30102_REG_MODE_CONTROL_SLOT_MASK (GENMASK(6, 4) | GENMASK(2, 0))
+#define MAX30102_REG_MODE_CONTROL_SLOT_SHIFT 4
+
#define MAX30102_REG_SPO2_CONFIG 0x0a
#define MAX30102_REG_SPO2_CONFIG_PULSE_411_US 0x03
#define MAX30102_REG_SPO2_CONFIG_SR_400HZ 0x03
@@ -75,6 +98,7 @@
#define MAX30102_REG_RED_LED_CONFIG 0x0c
#define MAX30102_REG_IR_LED_CONFIG 0x0d
+#define MAX30105_REG_GREEN_LED_CONFIG 0x0e
#define MAX30102_REG_TEMP_CONFIG 0x21
#define MAX30102_REG_TEMP_CONFIG_TEMP_EN BIT(0)
@@ -82,14 +106,18 @@
#define MAX30102_REG_TEMP_INTEGER 0x1f
#define MAX30102_REG_TEMP_FRACTION 0x20
+#define MAX30102_REG_REV_ID 0xfe
+#define MAX30102_REG_PART_ID 0xff
+
struct max30102_data {
struct i2c_client *client;
struct iio_dev *indio_dev;
struct mutex lock;
struct regmap *regmap;
+ enum max30102_chip_id chip_id;
- u8 buffer[8];
- __be32 processed_buffer[2]; /* 2 x 18-bit (padded to 32-bits) */
+ u8 buffer[12];
+ __be32 processed_buffer[3]; /* 3 x 18-bit (padded to 32-bits) */
};
static const struct regmap_config max30102_regmap_config = {
@@ -99,37 +127,47 @@ static const struct regmap_config max30102_regmap_config = {
.val_bits = 8,
};
-static const unsigned long max30102_scan_masks[] = {0x3, 0};
+static const unsigned long max30102_scan_masks[] = {
+ BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR),
+ 0
+};
+
+static const unsigned long max30105_scan_masks[] = {
+ BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR),
+ BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR) |
+ BIT(MAX30105_LED_GREEN),
+ 0
+};
+
+#define MAX30102_INTENSITY_CHANNEL(_si, _mod) { \
+ .type = IIO_INTENSITY, \
+ .channel2 = _mod, \
+ .modified = 1, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .shift = 8, \
+ .realbits = 18, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
static const struct iio_chan_spec max30102_channels[] = {
+ MAX30102_INTENSITY_CHANNEL(MAX30102_LED_RED, IIO_MOD_LIGHT_RED),
+ MAX30102_INTENSITY_CHANNEL(MAX30102_LED_IR, IIO_MOD_LIGHT_IR),
{
- .type = IIO_INTENSITY,
- .channel2 = IIO_MOD_LIGHT_RED,
- .modified = 1,
-
- .scan_index = 0,
- .scan_type = {
- .sign = 'u',
- .shift = 8,
- .realbits = 18,
- .storagebits = 32,
- .endianness = IIO_BE,
- },
- },
- {
- .type = IIO_INTENSITY,
- .channel2 = IIO_MOD_LIGHT_IR,
- .modified = 1,
-
- .scan_index = 1,
- .scan_type = {
- .sign = 'u',
- .shift = 8,
- .realbits = 18,
- .storagebits = 32,
- .endianness = IIO_BE,
- },
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
},
+};
+
+static const struct iio_chan_spec max30105_channels[] = {
+ MAX30102_INTENSITY_CHANNEL(MAX30102_LED_RED, IIO_MOD_LIGHT_RED),
+ MAX30102_INTENSITY_CHANNEL(MAX30102_LED_IR, IIO_MOD_LIGHT_IR),
+ MAX30102_INTENSITY_CHANNEL(MAX30105_LED_GREEN, IIO_MOD_LIGHT_GREEN),
{
.type = IIO_TEMP,
.info_mask_separate =
@@ -138,25 +176,69 @@ static const struct iio_chan_spec max30102_channels[] = {
},
};
-static int max30102_set_powermode(struct max30102_data *data, bool state)
+static int max30102_set_power(struct max30102_data *data, bool en)
{
return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
MAX30102_REG_MODE_CONFIG_PWR,
- state ? 0 : MAX30102_REG_MODE_CONFIG_PWR);
+ en ? 0 : MAX30102_REG_MODE_CONFIG_PWR);
}
+static int max30102_set_powermode(struct max30102_data *data, u8 mode, bool en)
+{
+ u8 reg = mode;
+
+ if (!en)
+ reg |= MAX30102_REG_MODE_CONFIG_PWR;
+
+ return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
+ MAX30102_REG_MODE_CONFIG_PWR |
+ MAX30102_REG_MODE_CONFIG_MODE_MASK, reg);
+}
+
+#define MAX30102_MODE_CONTROL_LED_SLOTS(slot2, slot1) \
+ ((slot2 << MAX30102_REG_MODE_CONTROL_SLOT_SHIFT) | slot1)
+
static int max30102_buffer_postenable(struct iio_dev *indio_dev)
{
struct max30102_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 reg;
- return max30102_set_powermode(data, true);
+ switch (*indio_dev->active_scan_mask) {
+ case BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR):
+ reg = MAX30102_REG_MODE_CONFIG_MODE_HR_SPO2;
+ break;
+ case BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR) |
+ BIT(MAX30105_LED_GREEN):
+ ret = regmap_update_bits(data->regmap,
+ MAX30102_REG_MODE_CONTROL_SLOT21,
+ MAX30102_REG_MODE_CONTROL_SLOT_MASK,
+ MAX30102_MODE_CONTROL_LED_SLOTS(2, 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap,
+ MAX30102_REG_MODE_CONTROL_SLOT43,
+ MAX30102_REG_MODE_CONTROL_SLOT_MASK,
+ MAX30102_MODE_CONTROL_LED_SLOTS(0, 3));
+ if (ret)
+ return ret;
+
+ reg = MAX30102_REG_MODE_CONFIG_MODE_MULTI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return max30102_set_powermode(data, reg, true);
}
static int max30102_buffer_predisable(struct iio_dev *indio_dev)
{
struct max30102_data *data = iio_priv(indio_dev);
- return max30102_set_powermode(data, false);
+ return max30102_set_powermode(data, MAX30102_REG_MODE_CONFIG_MODE_NONE,
+ false);
}
static const struct iio_buffer_setup_ops max30102_buffer_setup_ops = {
@@ -180,32 +262,51 @@ static inline int max30102_fifo_count(struct max30102_data *data)
return 0;
}
-static int max30102_read_measurement(struct max30102_data *data)
+#define MAX30102_COPY_DATA(i) \
+ memcpy(&data->processed_buffer[(i)], \
+ &buffer[(i) * MAX30102_REG_FIFO_DATA_BYTES], \
+ MAX30102_REG_FIFO_DATA_BYTES)
+
+static int max30102_read_measurement(struct max30102_data *data,
+ unsigned int measurements)
{
int ret;
u8 *buffer = (u8 *) &data->buffer;
ret = i2c_smbus_read_i2c_block_data(data->client,
MAX30102_REG_FIFO_DATA,
- MAX30102_REG_FIFO_DATA_ENTRY_LEN,
+ measurements *
+ MAX30102_REG_FIFO_DATA_BYTES,
buffer);
- memcpy(&data->processed_buffer[0], &buffer[0], 3);
- memcpy(&data->processed_buffer[1], &buffer[3], 3);
+ switch (measurements) {
+ case 3:
+ MAX30102_COPY_DATA(2);
+ case 2: /* fall-through */
+ MAX30102_COPY_DATA(1);
+ case 1: /* fall-through */
+ MAX30102_COPY_DATA(0);
+ break;
+ default:
+ return -EINVAL;
+ }
- return (ret == MAX30102_REG_FIFO_DATA_ENTRY_LEN) ? 0 : -EINVAL;
+ return (ret == measurements * MAX30102_REG_FIFO_DATA_BYTES) ?
+ 0 : -EINVAL;
}
static irqreturn_t max30102_interrupt_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct max30102_data *data = iio_priv(indio_dev);
+ unsigned int measurements = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
int ret, cnt = 0;
mutex_lock(&data->lock);
while (cnt || (cnt = max30102_fifo_count(data)) > 0) {
- ret = max30102_read_measurement(data);
+ ret = max30102_read_measurement(data, measurements);
if (ret)
break;
@@ -251,6 +352,29 @@ static int max30102_led_init(struct max30102_data *data)
if (ret)
return ret;
+ if (data->chip_id == max30105) {
+ ret = of_property_read_u32(np,
+ "maxim,green-led-current-microamp", &val);
+ if (ret) {
+ dev_info(dev, "no green-led-current-microamp set\n");
+
+ /* Default to 7 mA green LED */
+ val = 7000;
+ }
+
+ ret = max30102_get_current_idx(val, &reg);
+ if (ret) {
+ dev_err(dev, "invalid green LED current setting %d\n",
+ val);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MAX30105_REG_GREEN_LED_CONFIG,
+ reg);
+ if (ret)
+ return ret;
+ }
+
ret = of_property_read_u32(np, "maxim,ir-led-current-microamp", &val);
if (ret) {
dev_info(dev, "no ir-led-current-microamp set\n");
@@ -261,7 +385,7 @@ static int max30102_led_init(struct max30102_data *data)
ret = max30102_get_current_idx(val, &reg);
if (ret) {
- dev_err(dev, "invalid IR LED current setting %d", val);
+ dev_err(dev, "invalid IR LED current setting %d\n", val);
return ret;
}
@@ -277,7 +401,7 @@ static int max30102_chip_init(struct max30102_data *data)
if (ret)
return ret;
- /* enable 18-bit HR + SPO2 readings at 400Hz */
+ /* configure 18-bit HR + SpO2 readings at 400Hz */
ret = regmap_write(data->regmap, MAX30102_REG_SPO2_CONFIG,
(MAX30102_REG_SPO2_CONFIG_ADC_4096_STEPS
<< MAX30102_REG_SPO2_CONFIG_ADC_MASK_SHIFT) |
@@ -287,14 +411,6 @@ static int max30102_chip_init(struct max30102_data *data)
if (ret)
return ret;
- /* enable SPO2 mode */
- ret = regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
- MAX30102_REG_MODE_CONFIG_MODE_MASK,
- MAX30102_REG_MODE_CONFIG_MODE_HR_EN |
- MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN);
- if (ret)
- return ret;
-
/* average 4 samples + generate FIFO interrupt */
ret = regmap_write(data->regmap, MAX30102_REG_FIFO_CONFIG,
(MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES
@@ -329,20 +445,31 @@ static int max30102_read_temp(struct max30102_data *data, int *val)
return 0;
}
-static int max30102_get_temp(struct max30102_data *data, int *val)
+static int max30102_get_temp(struct max30102_data *data, int *val, bool en)
{
int ret;
+ if (en) {
+ ret = max30102_set_power(data, true);
+ if (ret)
+ return ret;
+ }
+
/* start acquisition */
ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
MAX30102_REG_TEMP_CONFIG_TEMP_EN,
MAX30102_REG_TEMP_CONFIG_TEMP_EN);
if (ret)
- return ret;
+ goto out;
msleep(35);
+ ret = max30102_read_temp(data, val);
- return max30102_read_temp(data, val);
+out:
+ if (en)
+ max30102_set_power(data, false);
+
+ return ret;
}
static int max30102_read_raw(struct iio_dev *indio_dev,
@@ -355,20 +482,19 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
/*
- * Temperature reading can only be acquired while engine
- * is running
+ * Temperature reading can only be acquired when not in
+ * shutdown; leave shutdown briefly when buffer not running
*/
mutex_lock(&indio_dev->mlock);
-
if (!iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else {
- ret = max30102_get_temp(data, val);
- if (!ret)
- ret = IIO_VAL_INT;
- }
-
+ ret = max30102_get_temp(data, val, true);
+ else
+ ret = max30102_get_temp(data, val, false);
mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+
+ ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
*val = 1000; /* 62.5 */
@@ -391,6 +517,7 @@ static int max30102_probe(struct i2c_client *client,
struct iio_buffer *buffer;
struct iio_dev *indio_dev;
int ret;
+ unsigned int reg;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -403,10 +530,7 @@ static int max30102_probe(struct i2c_client *client,
iio_device_attach_buffer(indio_dev, buffer);
indio_dev->name = MAX30102_DRV_NAME;
- indio_dev->channels = max30102_channels;
indio_dev->info = &max30102_info;
- indio_dev->num_channels = ARRAY_SIZE(max30102_channels);
- indio_dev->available_scan_masks = max30102_scan_masks;
indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
indio_dev->setup_ops = &max30102_buffer_setup_ops;
indio_dev->dev.parent = &client->dev;
@@ -414,16 +538,50 @@ static int max30102_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->indio_dev = indio_dev;
data->client = client;
+ data->chip_id = id->driver_data;
mutex_init(&data->lock);
i2c_set_clientdata(client, indio_dev);
+ switch (data->chip_id) {
+ case max30105:
+ indio_dev->channels = max30105_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max30105_channels);
+ indio_dev->available_scan_masks = max30105_scan_masks;
+ break;
+ case max30102:
+ indio_dev->channels = max30102_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max30102_channels);
+ indio_dev->available_scan_masks = max30102_scan_masks;
+ break;
+ default:
+ return -ENODEV;
+ }
+
data->regmap = devm_regmap_init_i2c(client, &max30102_regmap_config);
if (IS_ERR(data->regmap)) {
- dev_err(&client->dev, "regmap initialization failed.\n");
+ dev_err(&client->dev, "regmap initialization failed\n");
return PTR_ERR(data->regmap);
}
- max30102_set_powermode(data, false);
+
+ /* check part ID */
+ ret = regmap_read(data->regmap, MAX30102_REG_PART_ID, &reg);
+ if (ret)
+ return ret;
+ if (reg != MAX30102_PART_NUMBER)
+ return -ENODEV;
+
+ /* show revision ID */
+ ret = regmap_read(data->regmap, MAX30102_REG_REV_ID, &reg);
+ if (ret)
+ return ret;
+ dev_dbg(&client->dev, "max3010x revision %02x\n", reg);
+
+ /* clear mode setting, chip shutdown */
+ ret = max30102_set_powermode(data, MAX30102_REG_MODE_CONFIG_MODE_NONE,
+ false);
+ if (ret)
+ return ret;
ret = max30102_chip_init(data);
if (ret)
@@ -452,19 +610,21 @@ static int max30102_remove(struct i2c_client *client)
struct max30102_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- max30102_set_powermode(data, false);
+ max30102_set_power(data, false);
return 0;
}
static const struct i2c_device_id max30102_id[] = {
- { "max30102", 0 },
+ { "max30102", max30102 },
+ { "max30105", max30105 },
{}
};
MODULE_DEVICE_TABLE(i2c, max30102_id);
static const struct of_device_id max30102_dt_ids[] = {
{ .compatible = "maxim,max30102" },
+ { .compatible = "maxim,max30105" },
{ }
};
MODULE_DEVICE_TABLE(of, max30102_dt_ids);
@@ -481,5 +641,5 @@ static struct i2c_driver max30102_driver = {
module_i2c_driver(max30102_driver);
MODULE_AUTHOR("Matt Ranostay <matt@ranostay.consulting>");
-MODULE_DESCRIPTION("MAX30102 heart rate and pulse oximeter sensor");
+MODULE_DESCRIPTION("MAX30102 heart rate/pulse oximeter and MAX30105 particle sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index 51d021966222..c581af8c0f5d 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -61,7 +61,8 @@ struct hts221_hw {
extern const struct dev_pm_ops hts221_pm_ops;
int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val);
-int hts221_probe(struct iio_dev *iio_dev);
+int hts221_probe(struct device *dev, int irq, const char *name,
+ const struct hts221_transfer_function *tf_ops);
int hts221_set_enable(struct hts221_hw *hw, bool enable);
int hts221_allocate_buffers(struct hts221_hw *hw);
int hts221_allocate_trigger(struct hts221_hw *hw);
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index daef177219b6..d3f7904766bd 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -581,12 +581,26 @@ static const struct iio_info hts221_info = {
static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
-int hts221_probe(struct iio_dev *iio_dev)
+int hts221_probe(struct device *dev, int irq, const char *name,
+ const struct hts221_transfer_function *tf_ops)
{
- struct hts221_hw *hw = iio_priv(iio_dev);
+ struct iio_dev *iio_dev;
+ struct hts221_hw *hw;
int err;
u8 data;
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, (void *)iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->name = name;
+ hw->dev = dev;
+ hw->irq = irq;
+ hw->tf = tf_ops;
+
mutex_init(&hw->lock);
err = hts221_check_whoami(hw);
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index f38e4b7e0160..2c97350a0f76 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -66,22 +66,8 @@ static const struct hts221_transfer_function hts221_transfer_fn = {
static int hts221_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct hts221_hw *hw;
- struct iio_dev *iio_dev;
-
- iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
- if (!iio_dev)
- return -ENOMEM;
-
- i2c_set_clientdata(client, iio_dev);
-
- hw = iio_priv(iio_dev);
- hw->name = client->name;
- hw->dev = &client->dev;
- hw->irq = client->irq;
- hw->tf = &hts221_transfer_fn;
-
- return hts221_probe(iio_dev);
+ return hts221_probe(&client->dev, client->irq,
+ client->name, &hts221_transfer_fn);
}
static const struct acpi_device_id hts221_acpi_match[] = {
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 57cbc256771b..55b29b53b9d1 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -80,22 +80,8 @@ static const struct hts221_transfer_function hts221_transfer_fn = {
static int hts221_spi_probe(struct spi_device *spi)
{
- struct hts221_hw *hw;
- struct iio_dev *iio_dev;
-
- iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
- if (!iio_dev)
- return -ENOMEM;
-
- spi_set_drvdata(spi, iio_dev);
-
- hw = iio_priv(iio_dev);
- hw->name = spi->modalias;
- hw->dev = &spi->dev;
- hw->irq = spi->irq;
- hw->tf = &hts221_transfer_fn;
-
- return hts221_probe(iio_dev);
+ return hts221_probe(&spi->dev, spi->irq,
+ spi->modalias, &hts221_transfer_fn);
}
static const struct of_device_id hts221_spi_of_match[] = {
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 4c45488e3a7f..c775fedbcaf6 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -43,7 +43,7 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 7a33d6bd60e0..a27fe208f3ae 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -194,7 +194,7 @@ static int adis16480_show_serial_number(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_serial_number_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_serial_number_fops,
adis16480_show_serial_number, NULL, "0x%.4llx\n");
static int adis16480_show_product_id(void *arg, u64 *val)
@@ -212,7 +212,7 @@ static int adis16480_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_product_id_fops,
adis16480_show_product_id, NULL, "%llu\n");
static int adis16480_show_flash_count(void *arg, u64 *val)
@@ -230,24 +230,28 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
adis16480_show_flash_count, NULL, "%lld\n");
static int adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
- debugfs_create_file("firmware_revision", 0400,
+ debugfs_create_file_unsafe("firmware_revision", 0400,
indio_dev->debugfs_dentry, adis16480,
&adis16480_firmware_revision_fops);
- debugfs_create_file("firmware_date", 0400, indio_dev->debugfs_dentry,
- adis16480, &adis16480_firmware_date_fops);
- debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
- adis16480, &adis16480_serial_number_fops);
- debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
- adis16480, &adis16480_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- adis16480, &adis16480_flash_count_fops);
+ debugfs_create_file_unsafe("firmware_date", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_firmware_date_fops);
+ debugfs_create_file_unsafe("serial_number", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index dd6fc6d21f9d..d78a10403bac 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -196,8 +196,7 @@ void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
{
struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
- if (st->mux_client)
- i2c_unregister_device(st->mux_client);
+ i2c_unregister_device(st->mux_client);
}
#else
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index e57337159b57..14f2eb6e9fb7 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -16,7 +16,9 @@ config IIO_ST_LSM6DSX
config IIO_ST_LSM6DSX_I2C
tristate
depends on IIO_ST_LSM6DSX
+ select REGMAP_I2C
config IIO_ST_LSM6DSX_SPI
tristate
depends on IIO_ST_LSM6DSX
+ select REGMAP_SPI
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 4fdb7fcc3ea8..8fdd723afa05 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -27,23 +27,12 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSX_MAX_ID,
};
+#define ST_LSM6DSX_BUFF_SIZE 256
#define ST_LSM6DSX_CHAN_SIZE 2
#define ST_LSM6DSX_SAMPLE_SIZE 6
-
-#if defined(CONFIG_SPI_MASTER)
-#define ST_LSM6DSX_RX_MAX_LENGTH 256
-#define ST_LSM6DSX_TX_MAX_LENGTH 8
-
-struct st_lsm6dsx_transfer_buffer {
- u8 rx_buf[ST_LSM6DSX_RX_MAX_LENGTH];
- u8 tx_buf[ST_LSM6DSX_TX_MAX_LENGTH] ____cacheline_aligned;
-};
-#endif /* CONFIG_SPI_MASTER */
-
-struct st_lsm6dsx_transfer_function {
- int (*read)(struct device *dev, u8 addr, int len, u8 *data);
- int (*write)(struct device *dev, u8 addr, int len, u8 *data);
-};
+#define ST_LSM6DSX_MAX_WORD_LEN ((32 / ST_LSM6DSX_SAMPLE_SIZE) * \
+ ST_LSM6DSX_SAMPLE_SIZE)
+#define ST_LSM6DSX_SHIFT_VAL(val, mask) (((val) << __ffs(mask)) & (mask))
struct st_lsm6dsx_reg {
u8 addr;
@@ -127,47 +116,43 @@ struct st_lsm6dsx_sensor {
/**
* struct st_lsm6dsx_hw - ST IMU MEMS hw instance
* @dev: Pointer to instance of struct device (I2C or SPI).
+ * @regmap: Register map of the device.
* @irq: Device interrupt line (I2C or SPI).
- * @lock: Mutex to protect read and write operations.
* @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
+ * @conf_lock: Mutex to prevent concurrent FIFO configuration update.
* @fifo_mode: FIFO operating mode supported by the device.
* @enable_mask: Enabled sensor bitmask.
* @sip: Total number of samples (acc/gyro) in a given pattern.
+ * @buff: Device read buffer.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
- * @tf: Transfer function structure used by I/O operations.
- * @tb: Transfer buffers used by SPI I/O operations.
*/
struct st_lsm6dsx_hw {
struct device *dev;
+ struct regmap *regmap;
int irq;
- struct mutex lock;
struct mutex fifo_lock;
+ struct mutex conf_lock;
enum st_lsm6dsx_fifo_mode fifo_mode;
u8 enable_mask;
u8 sip;
+ u8 *buff;
+
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
const struct st_lsm6dsx_settings *settings;
-
- const struct st_lsm6dsx_transfer_function *tf;
-#if defined(CONFIG_SPI_MASTER)
- struct st_lsm6dsx_transfer_buffer tb;
-#endif /* CONFIG_SPI_MASTER */
};
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
- const struct st_lsm6dsx_transfer_function *tf_ops);
+ struct regmap *regmap);
int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor);
int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor);
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw);
-int st_lsm6dsx_write_with_mask(struct st_lsm6dsx_hw *hw, u8 addr, u8 mask,
- u8 val);
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor,
u16 watermark);
int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 755c472e8a05..1d6aa9b1a4cf 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -30,6 +30,8 @@
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/platform_data/st_sensors_pdata.h>
@@ -120,8 +122,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
dec_reg = &hw->settings->decimator[sensor->id];
if (dec_reg->addr) {
- err = st_lsm6dsx_write_with_mask(hw, dec_reg->addr,
- dec_reg->mask, data);
+ int val = ST_LSM6DSX_SHIFT_VAL(data, dec_reg->mask);
+
+ err = regmap_update_bits(hw->regmap, dec_reg->addr,
+ dec_reg->mask, val);
if (err < 0)
return err;
}
@@ -137,8 +141,10 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
{
int err;
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
- ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
+ err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_MODE_MASK,
+ FIELD_PREP(ST_LSM6DSX_FIFO_MODE_MASK,
+ fifo_mode));
if (err < 0)
return err;
@@ -154,8 +160,9 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
u8 data;
data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0;
- return st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
- ST_LSM6DSX_FIFO_ODR_MASK, data);
+ return regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_ODR_MASK,
+ FIELD_PREP(ST_LSM6DSX_FIFO_ODR_MASK, data));
}
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
@@ -163,9 +170,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
struct st_lsm6dsx_hw *hw = sensor->hw;
struct st_lsm6dsx_sensor *cur_sensor;
+ int i, err, data;
__le16 wdata;
- int i, err;
- u8 data;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
cur_sensor = iio_priv(hw->iio_devs[i]);
@@ -187,24 +193,42 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
fifo_watermark = (fifo_watermark / sip) * sip;
fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
- mutex_lock(&hw->lock);
-
- err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_th.addr + 1,
- sizeof(data), &data);
+ err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
+ &data);
if (err < 0)
- goto out;
+ return err;
fifo_th_mask = hw->settings->fifo_ops.fifo_th.mask;
fifo_watermark = ((data << 8) & ~fifo_th_mask) |
(fifo_watermark & fifo_th_mask);
wdata = cpu_to_le16(fifo_watermark);
- err = hw->tf->write(hw->dev, hw->settings->fifo_ops.fifo_th.addr,
- sizeof(wdata), (u8 *)&wdata);
-out:
- mutex_unlock(&hw->lock);
+ return regmap_bulk_write(hw->regmap,
+ hw->settings->fifo_ops.fifo_th.addr,
+ &wdata, sizeof(wdata));
+}
- return err < 0 ? err : 0;
+/*
+ * Set max bulk read to ST_LSM6DSX_MAX_WORD_LEN in order to avoid
+ * a kmalloc for each bus access
+ */
+static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 *data,
+ unsigned int data_len)
+{
+ unsigned int word_len, read_len = 0;
+ int err;
+
+ while (read_len < data_len) {
+ word_len = min_t(unsigned int, data_len - read_len,
+ ST_LSM6DSX_MAX_WORD_LEN);
+ err = regmap_bulk_read(hw->regmap,
+ ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
+ data + read_len, word_len);
+ if (err < 0)
+ return err;
+ read_len += word_len;
+ }
+ return 0;
}
/**
@@ -223,11 +247,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
u8 iio_buff[ALIGN(ST_LSM6DSX_SAMPLE_SIZE, sizeof(s64)) + sizeof(s64)];
- u8 buff[pattern_len];
__le16 fifo_status;
- err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_diff.addr,
- sizeof(fifo_status), (u8 *)&fifo_status);
+ err = regmap_bulk_read(hw->regmap,
+ hw->settings->fifo_ops.fifo_diff.addr,
+ &fifo_status, sizeof(fifo_status));
if (err < 0)
return err;
@@ -255,8 +279,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
samples);
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
- sizeof(buff), buff);
+ err = st_lsm6dsx_read_block(hw, hw->buff, pattern_len);
if (err < 0)
return err;
@@ -281,7 +304,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
while (acc_sip > 0 || gyro_sip > 0) {
if (gyro_sip-- > 0) {
- memcpy(iio_buff, &buff[offset],
+ memcpy(iio_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_GYRO],
@@ -291,7 +314,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
}
if (acc_sip-- > 0) {
- memcpy(iio_buff, &buff[offset],
+ memcpy(iio_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_ACC],
@@ -325,38 +348,40 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
struct st_lsm6dsx_hw *hw = sensor->hw;
int err;
+ mutex_lock(&hw->conf_lock);
+
if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) {
err = st_lsm6dsx_flush_fifo(hw);
if (err < 0)
- return err;
+ goto out;
}
if (enable) {
err = st_lsm6dsx_sensor_enable(sensor);
if (err < 0)
- return err;
+ goto out;
} else {
err = st_lsm6dsx_sensor_disable(sensor);
if (err < 0)
- return err;
+ goto out;
}
err = st_lsm6dsx_set_fifo_odr(sensor, enable);
if (err < 0)
- return err;
+ goto out;
err = st_lsm6dsx_update_decimators(hw);
if (err < 0)
- return err;
+ goto out;
err = st_lsm6dsx_update_watermark(sensor, sensor->watermark);
if (err < 0)
- return err;
+ goto out;
if (hw->enable_mask) {
err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
if (err < 0)
- return err;
+ goto out;
/*
* store enable buffer timestamp as reference to compute
@@ -365,7 +390,10 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
sensor->ts = iio_get_time_ns(iio_dev);
}
- return 0;
+out:
+ mutex_unlock(&hw->conf_lock);
+
+ return err;
}
static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
@@ -444,17 +472,20 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
return -EINVAL;
}
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_HLACTIVE_ADDR,
- ST_LSM6DSX_REG_HLACTIVE_MASK,
- irq_active_low);
+ err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_HLACTIVE_ADDR,
+ ST_LSM6DSX_REG_HLACTIVE_MASK,
+ FIELD_PREP(ST_LSM6DSX_REG_HLACTIVE_MASK,
+ irq_active_low));
if (err < 0)
return err;
pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
if ((np && of_property_read_bool(np, "drive-open-drain")) ||
(pdata && pdata->open_drain)) {
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_PP_OD_ADDR,
- ST_LSM6DSX_REG_PP_OD_MASK, 1);
+ err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_PP_OD_ADDR,
+ ST_LSM6DSX_REG_PP_OD_MASK,
+ FIELD_PREP(ST_LSM6DSX_REG_PP_OD_MASK,
+ 1));
if (err < 0)
return err;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 239c735242be..c2fa3239b9c6 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -37,6 +37,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/platform_data/st_sensors_pdata.h>
@@ -277,36 +279,9 @@ static const struct iio_chan_spec st_lsm6dsx_gyro_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3),
};
-int st_lsm6dsx_write_with_mask(struct st_lsm6dsx_hw *hw, u8 addr, u8 mask,
- u8 val)
-{
- u8 data;
- int err;
-
- mutex_lock(&hw->lock);
-
- err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
- if (err < 0) {
- dev_err(hw->dev, "failed to read %02x register\n", addr);
- goto out;
- }
-
- data = (data & ~mask) | ((val << __ffs(mask)) & mask);
-
- err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
- if (err < 0)
- dev_err(hw->dev, "failed to write %02x register\n", addr);
-
-out:
- mutex_unlock(&hw->lock);
-
- return err;
-}
-
static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
{
- int err, i, j;
- u8 data;
+ int err, i, j, data;
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
@@ -322,8 +297,7 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
return -ENODEV;
}
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_WHOAMI_ADDR, sizeof(data),
- &data);
+ err = regmap_read(hw->regmap, ST_LSM6DSX_REG_WHOAMI_ADDR, &data);
if (err < 0) {
dev_err(hw->dev, "failed to read whoami register\n");
return err;
@@ -342,22 +316,22 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
u32 gain)
{
- enum st_lsm6dsx_sensor_id id = sensor->id;
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
int i, err;
u8 val;
for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
- if (st_lsm6dsx_fs_table[id].fs_avl[i].gain == gain)
+ if (st_lsm6dsx_fs_table[sensor->id].fs_avl[i].gain == gain)
break;
if (i == ST_LSM6DSX_FS_LIST_SIZE)
return -EINVAL;
- val = st_lsm6dsx_fs_table[id].fs_avl[i].val;
- err = st_lsm6dsx_write_with_mask(sensor->hw,
- st_lsm6dsx_fs_table[id].reg.addr,
- st_lsm6dsx_fs_table[id].reg.mask,
- val);
+ val = st_lsm6dsx_fs_table[sensor->id].fs_avl[i].val;
+ reg = &st_lsm6dsx_fs_table[sensor->id].reg;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(val, reg->mask));
if (err < 0)
return err;
@@ -385,7 +359,8 @@ static int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr,
static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
{
- enum st_lsm6dsx_sensor_id id = sensor->id;
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
int err;
u8 val;
@@ -393,10 +368,9 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
if (err < 0)
return err;
- return st_lsm6dsx_write_with_mask(sensor->hw,
- st_lsm6dsx_odr_table[id].reg.addr,
- st_lsm6dsx_odr_table[id].reg.mask,
- val);
+ reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+ return regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(val, reg->mask));
}
int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
@@ -414,16 +388,17 @@ int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor)
{
- enum st_lsm6dsx_sensor_id id = sensor->id;
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
int err;
- err = st_lsm6dsx_write_with_mask(sensor->hw,
- st_lsm6dsx_odr_table[id].reg.addr,
- st_lsm6dsx_odr_table[id].reg.mask, 0);
+ reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(0, reg->mask));
if (err < 0)
return err;
- sensor->hw->enable_mask &= ~BIT(id);
+ sensor->hw->enable_mask &= ~BIT(sensor->id);
return 0;
}
@@ -431,6 +406,7 @@ int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor)
static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
u8 addr, int *val)
{
+ struct st_lsm6dsx_hw *hw = sensor->hw;
int err, delay;
__le16 data;
@@ -441,14 +417,13 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
delay = 1000000 / sensor->odr;
usleep_range(delay, 2 * delay);
- err = sensor->hw->tf->read(sensor->hw->dev, addr, sizeof(data),
- (u8 *)&data);
+ err = regmap_bulk_read(hw->regmap, addr, &data, sizeof(data));
if (err < 0)
return err;
st_lsm6dsx_sensor_disable(sensor);
- *val = (s16)data;
+ *val = (s16)le16_to_cpu(data);
return IIO_VAL_INT;
}
@@ -528,7 +503,12 @@ static int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
if (val < 1 || val > hw->settings->max_fifo_size)
return -EINVAL;
+ mutex_lock(&hw->conf_lock);
+
err = st_lsm6dsx_update_watermark(sensor, val);
+
+ mutex_unlock(&hw->conf_lock);
+
if (err < 0)
return err;
@@ -652,20 +632,20 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
{
- u8 data, drdy_int_reg;
+ u8 drdy_int_reg;
int err;
- data = ST_LSM6DSX_REG_RESET_MASK;
- err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_RESET_ADDR, sizeof(data),
- &data);
+ err = regmap_write(hw->regmap, ST_LSM6DSX_REG_RESET_ADDR,
+ ST_LSM6DSX_REG_RESET_MASK);
if (err < 0)
return err;
msleep(200);
/* enable Block Data Update */
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_BDU_ADDR,
- ST_LSM6DSX_REG_BDU_MASK, 1);
+ err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_BDU_ADDR,
+ ST_LSM6DSX_REG_BDU_MASK,
+ FIELD_PREP(ST_LSM6DSX_REG_BDU_MASK, 1));
if (err < 0)
return err;
@@ -674,8 +654,10 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
- return st_lsm6dsx_write_with_mask(hw, drdy_int_reg,
- ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK, 1);
+ return regmap_update_bits(hw->regmap, drdy_int_reg,
+ ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+ FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+ 1));
}
static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
@@ -726,7 +708,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
}
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
- const struct st_lsm6dsx_transfer_function *tf_ops)
+ struct regmap *regmap)
{
struct st_lsm6dsx_hw *hw;
int i, err;
@@ -737,12 +719,16 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
dev_set_drvdata(dev, (void *)hw);
- mutex_init(&hw->lock);
mutex_init(&hw->fifo_lock);
+ mutex_init(&hw->conf_lock);
+
+ hw->buff = devm_kzalloc(dev, ST_LSM6DSX_BUFF_SIZE, GFP_KERNEL);
+ if (!hw->buff)
+ return -ENOMEM;
hw->dev = dev;
hw->irq = irq;
- hw->tf = tf_ops;
+ hw->regmap = regmap;
err = st_lsm6dsx_check_whoami(hw, hw_id);
if (err < 0)
@@ -778,6 +764,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
{
struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
struct st_lsm6dsx_sensor *sensor;
+ const struct st_lsm6dsx_reg *reg;
int i, err = 0;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
@@ -785,9 +772,9 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
- err = st_lsm6dsx_write_with_mask(hw,
- st_lsm6dsx_odr_table[sensor->id].reg.addr,
- st_lsm6dsx_odr_table[sensor->id].reg.mask, 0);
+ reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(0, reg->mask));
if (err < 0)
return err;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 305fec712ab0..41525dd2aab7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -14,55 +14,30 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include "st_lsm6dsx.h"
-static int st_lsm6dsx_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_msg msg[2];
-
- msg[0].addr = client->addr;
- msg[0].flags = client->flags;
- msg[0].len = 1;
- msg[0].buf = &addr;
-
- msg[1].addr = client->addr;
- msg[1].flags = client->flags | I2C_M_RD;
- msg[1].len = len;
- msg[1].buf = data;
-
- return i2c_transfer(client->adapter, msg, 2);
-}
-
-static int st_lsm6dsx_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_msg msg;
- u8 send[len + 1];
-
- send[0] = addr;
- memcpy(&send[1], data, len * sizeof(u8));
-
- msg.addr = client->addr;
- msg.flags = client->flags;
- msg.len = len + 1;
- msg.buf = send;
-
- return i2c_transfer(client->adapter, &msg, 1);
-}
-
-static const struct st_lsm6dsx_transfer_function st_lsm6dsx_transfer_fn = {
- .read = st_lsm6dsx_i2c_read,
- .write = st_lsm6dsx_i2c_write,
+static const struct regmap_config st_lsm6dsx_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
};
static int st_lsm6dsx_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ int hw_id = id->driver_data;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &st_lsm6dsx_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
return st_lsm6dsx_probe(&client->dev, client->irq,
- (int)id->driver_data, id->name,
- &st_lsm6dsx_transfer_fn);
+ hw_id, id->name, regmap);
}
static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 95472f153ad2..2c8135834479 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -14,72 +14,30 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include "st_lsm6dsx.h"
-#define SENSORS_SPI_READ BIT(7)
-
-static int st_lsm6dsx_spi_read(struct device *dev, u8 addr, int len,
- u8 *data)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct st_lsm6dsx_hw *hw = spi_get_drvdata(spi);
- int err;
-
- struct spi_transfer xfers[] = {
- {
- .tx_buf = hw->tb.tx_buf,
- .bits_per_word = 8,
- .len = 1,
- },
- {
- .rx_buf = hw->tb.rx_buf,
- .bits_per_word = 8,
- .len = len,
- }
- };
-
- hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
-
- err = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
- if (err < 0)
- return err;
-
- memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
-
- return len;
-}
-
-static int st_lsm6dsx_spi_write(struct device *dev, u8 addr, int len,
- u8 *data)
-{
- struct st_lsm6dsx_hw *hw;
- struct spi_device *spi;
-
- if (len >= ST_LSM6DSX_TX_MAX_LENGTH)
- return -ENOMEM;
-
- spi = to_spi_device(dev);
- hw = spi_get_drvdata(spi);
-
- hw->tb.tx_buf[0] = addr;
- memcpy(&hw->tb.tx_buf[1], data, len);
-
- return spi_write(spi, hw->tb.tx_buf, len + 1);
-}
-
-static const struct st_lsm6dsx_transfer_function st_lsm6dsx_transfer_fn = {
- .read = st_lsm6dsx_spi_read,
- .write = st_lsm6dsx_spi_write,
+static const struct regmap_config st_lsm6dsx_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
};
static int st_lsm6dsx_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ int hw_id = id->driver_data;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &st_lsm6dsx_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
return st_lsm6dsx_probe(&spi->dev, spi->irq,
- (int)id->driver_data, id->name,
- &st_lsm6dsx_transfer_fn);
+ hw_id, id->name, regmap);
}
static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d2b465140a6b..6184c100a94a 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -169,7 +169,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or 0 for other cases
*/
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filp->private_data;
@@ -1198,6 +1198,18 @@ out:
return ret ? ret : len;
}
+static ssize_t iio_dma_show_data_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ size_t bytes;
+
+ bytes = iio_buffer_data_available(indio_dev->buffer);
+
+ return sprintf(buf, "%zu\n", bytes);
+}
+
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
iio_buffer_write_length);
static struct device_attribute dev_attr_length_ro = __ATTR(length,
@@ -1208,11 +1220,14 @@ static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
iio_buffer_show_watermark, iio_buffer_store_watermark);
static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
S_IRUGO, iio_buffer_show_watermark, NULL);
+static DEVICE_ATTR(data_available, S_IRUGO,
+ iio_dma_show_data_available, NULL);
static struct attribute *iio_buffer_attrs[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
&dev_attr_watermark.attr,
+ &dev_attr_data_available.attr,
};
int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 2f0998ebeed2..19bdf3d2962a 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -588,6 +588,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
return snprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
+ /* fall through */
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 90fac8ec63c9..0bcf073e46db 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -95,12 +95,12 @@ EXPORT_SYMBOL(iio_push_event);
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or a negative error code on failure
*/
-static unsigned int iio_event_poll(struct file *filep,
+static __poll_t iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- unsigned int events = 0;
+ __poll_t events = 0;
if (!indio_dev->info)
return events;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 069defcc6d9b..ec98790e2a28 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -664,9 +664,8 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
-static int iio_read_channel_attribute(struct iio_channel *chan,
- int *val, int *val2,
- enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -682,6 +681,7 @@ err_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
chan->channel, val, val2, info);
}
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
goto err_unlock;
}
- ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 2356ed9285df..93fd421b10d7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -334,6 +334,30 @@ config STK3310
Choosing M will build the driver as a module. If so, the module
will be called stk3310.
+config ST_UVIS25
+ tristate "STMicroelectronics UVIS25 sensor driver"
+ depends on (I2C || SPI)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select ST_UVIS25_I2C if (I2C)
+ select ST_UVIS25_SPI if (SPI_MASTER)
+ help
+ Say yes here to build support for STMicroelectronics UVIS25
+ uv sensor
+
+ To compile this driver as a module, choose M here: the module
+ will be called st_uvis25.
+
+config ST_UVIS25_I2C
+ tristate
+ depends on ST_UVIS25
+ select REGMAP_I2C
+
+config ST_UVIS25_SPI
+ tristate
+ depends on ST_UVIS25
+ select REGMAP_SPI
+
config TCS3414
tristate "TAOS TCS3414 digital color sensor"
depends on I2C
@@ -425,4 +449,14 @@ config VL6180
To compile this driver as a module, choose M here: the
module will be called vl6180.
+config ZOPT2201
+ tristate "ZOPT2201 ALS and UV B sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the IDT
+ ZOPT2201 ambient light and UV B sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zopt2201.
+
endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c263469b7ce9..f714067a7816 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -33,6 +33,9 @@ obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
+obj-$(CONFIG_ST_UVIS25) += st_uvis25_core.o
+obj-$(CONFIG_ST_UVIS25_I2C) += st_uvis25_i2c.o
+obj-$(CONFIG_ST_UVIS25_SPI) += st_uvis25_spi.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL2583) += tsl2583.o
@@ -41,3 +44,4 @@ obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VL6180) += vl6180.o
+obj-$(CONFIG_ZOPT2201) += zopt2201.o
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index b2a46b390d5c..acfad4aeb27a 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -181,7 +181,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- struct cros_ec_device *ec_device;
struct iio_dev *indio_dev;
struct cros_ec_light_prox_state *state;
struct iio_chan_spec *channel;
@@ -191,7 +190,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
dev_warn(dev, "No CROS EC device found.\n");
return -EINVAL;
}
- ec_device = ec_dev->ec_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
new file mode 100644
index 000000000000..5e970ab480cd
--- /dev/null
+++ b/drivers/iio/light/st_uvis25.h
@@ -0,0 +1,37 @@
+/*
+ * STMicroelectronics uvis25 sensor driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_UVIS25_H
+#define ST_UVIS25_H
+
+#define ST_UVIS25_DEV_NAME "uvis25"
+
+#include <linux/iio/iio.h>
+
+/**
+ * struct st_uvis25_hw - ST UVIS25 sensor instance
+ * @regmap: Register map of the device.
+ * @trig: The trigger in use by the driver.
+ * @enabled: Status of the sensor (false->off, true->on).
+ * @irq: Device interrupt line (I2C or SPI).
+ */
+struct st_uvis25_hw {
+ struct regmap *regmap;
+
+ struct iio_trigger *trig;
+ bool enabled;
+ int irq;
+};
+
+extern const struct dev_pm_ops st_uvis25_pm_ops;
+
+int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap);
+
+#endif /* ST_UVIS25_H */
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
new file mode 100644
index 000000000000..302635836e6b
--- /dev/null
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -0,0 +1,359 @@
+/*
+ * STMicroelectronics uvis25 sensor driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define ST_UVIS25_REG_WHOAMI_ADDR 0x0f
+#define ST_UVIS25_REG_WHOAMI_VAL 0xca
+#define ST_UVIS25_REG_CTRL1_ADDR 0x20
+#define ST_UVIS25_REG_ODR_MASK BIT(0)
+#define ST_UVIS25_REG_BDU_MASK BIT(1)
+#define ST_UVIS25_REG_CTRL2_ADDR 0x21
+#define ST_UVIS25_REG_BOOT_MASK BIT(7)
+#define ST_UVIS25_REG_CTRL3_ADDR 0x22
+#define ST_UVIS25_REG_HL_MASK BIT(7)
+#define ST_UVIS25_REG_STATUS_ADDR 0x27
+#define ST_UVIS25_REG_UV_DA_MASK BIT(0)
+#define ST_UVIS25_REG_OUT_ADDR 0x28
+
+static const struct iio_chan_spec st_uvis25_channels[] = {
+ {
+ .type = IIO_UVINDEX,
+ .address = ST_UVIS25_REG_OUT_ADDR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 8,
+ .storagebits = 8,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static int st_uvis25_check_whoami(struct st_uvis25_hw *hw)
+{
+ int err, data;
+
+ err = regmap_read(hw->regmap, ST_UVIS25_REG_WHOAMI_ADDR, &data);
+ if (err < 0) {
+ dev_err(regmap_get_device(hw->regmap),
+ "failed to read whoami register\n");
+ return err;
+ }
+
+ if (data != ST_UVIS25_REG_WHOAMI_VAL) {
+ dev_err(regmap_get_device(hw->regmap),
+ "wrong whoami {%02x vs %02x}\n",
+ data, ST_UVIS25_REG_WHOAMI_VAL);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int st_uvis25_set_enable(struct st_uvis25_hw *hw, bool enable)
+{
+ int err;
+
+ err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+ ST_UVIS25_REG_ODR_MASK, enable);
+ if (err < 0)
+ return err;
+
+ hw->enabled = enable;
+
+ return 0;
+}
+
+static int st_uvis25_read_oneshot(struct st_uvis25_hw *hw, u8 addr, int *val)
+{
+ int err;
+
+ err = st_uvis25_set_enable(hw, true);
+ if (err < 0)
+ return err;
+
+ msleep(1500);
+
+ /*
+ * in order to avoid possible race conditions with interrupt
+ * generation, disable the sensor first and then poll output
+ * register. That sequence guarantees the interrupt will be reset
+ * when irq line is unmasked
+ */
+ err = st_uvis25_set_enable(hw, false);
+ if (err < 0)
+ return err;
+
+ err = regmap_read(hw->regmap, addr, val);
+
+ return err < 0 ? err : IIO_VAL_INT;
+}
+
+static int st_uvis25_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED: {
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+ /*
+ * mask irq line during oneshot read since the sensor
+ * does not export the capability to disable data-ready line
+ * in the register map and it is enabled by default.
+ * If the line is unmasked during read_raw() it will be set
+ * active and never reset since the trigger is disabled
+ */
+ if (hw->irq > 0)
+ disable_irq(hw->irq);
+ ret = st_uvis25_read_oneshot(hw, ch->address, val);
+ if (hw->irq > 0)
+ enable_irq(hw->irq);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(iio_dev);
+
+ return ret;
+}
+
+static irqreturn_t st_uvis25_trigger_handler_thread(int irq, void *private)
+{
+ struct st_uvis25_hw *hw = private;
+ int err, status;
+
+ err = regmap_read(hw->regmap, ST_UVIS25_REG_STATUS_ADDR, &status);
+ if (err < 0)
+ return IRQ_HANDLED;
+
+ if (!(status & ST_UVIS25_REG_UV_DA_MASK))
+ return IRQ_NONE;
+
+ iio_trigger_poll_chained(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int st_uvis25_allocate_trigger(struct iio_dev *iio_dev)
+{
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+ struct device *dev = regmap_get_device(hw->regmap);
+ bool irq_active_low = false;
+ unsigned long irq_type;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_low = true;
+ break;
+ default:
+ dev_info(dev, "mode %lx unsupported\n", irq_type);
+ return -EINVAL;
+ }
+
+ err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL3_ADDR,
+ ST_UVIS25_REG_HL_MASK, irq_active_low);
+ if (err < 0)
+ return err;
+
+ err = devm_request_threaded_irq(dev, hw->irq, NULL,
+ st_uvis25_trigger_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ iio_dev->name, hw);
+ if (err) {
+ dev_err(dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ hw->trig = devm_iio_trigger_alloc(dev, "%s-trigger",
+ iio_dev->name);
+ if (!hw->trig)
+ return -ENOMEM;
+
+ iio_trigger_set_drvdata(hw->trig, iio_dev);
+ hw->trig->dev.parent = dev;
+
+ return devm_iio_trigger_register(dev, hw->trig);
+}
+
+static int st_uvis25_buffer_preenable(struct iio_dev *iio_dev)
+{
+ return st_uvis25_set_enable(iio_priv(iio_dev), true);
+}
+
+static int st_uvis25_buffer_postdisable(struct iio_dev *iio_dev)
+{
+ return st_uvis25_set_enable(iio_priv(iio_dev), false);
+}
+
+static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = {
+ .preenable = st_uvis25_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = st_uvis25_buffer_postdisable,
+};
+
+static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
+{
+ u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)];
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+ int err;
+
+ err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer);
+ if (err < 0)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_get_time_ns(iio_dev));
+
+out:
+ iio_trigger_notify_done(hw->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int st_uvis25_allocate_buffer(struct iio_dev *iio_dev)
+{
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+ return devm_iio_triggered_buffer_setup(regmap_get_device(hw->regmap),
+ iio_dev, NULL,
+ st_uvis25_buffer_handler_thread,
+ &st_uvis25_buffer_ops);
+}
+
+static const struct iio_info st_uvis25_info = {
+ .read_raw = st_uvis25_read_raw,
+};
+
+static int st_uvis25_init_sensor(struct st_uvis25_hw *hw)
+{
+ int err;
+
+ err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL2_ADDR,
+ ST_UVIS25_REG_BOOT_MASK, 1);
+ if (err < 0)
+ return err;
+
+ msleep(2000);
+
+ return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+ ST_UVIS25_REG_BDU_MASK, 1);
+}
+
+int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap)
+{
+ struct st_uvis25_hw *hw;
+ struct iio_dev *iio_dev;
+ int err;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*hw));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, (void *)iio_dev);
+
+ hw = iio_priv(iio_dev);
+ hw->irq = irq;
+ hw->regmap = regmap;
+
+ err = st_uvis25_check_whoami(hw);
+ if (err < 0)
+ return err;
+
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->dev.parent = dev;
+ iio_dev->channels = st_uvis25_channels;
+ iio_dev->num_channels = ARRAY_SIZE(st_uvis25_channels);
+ iio_dev->name = ST_UVIS25_DEV_NAME;
+ iio_dev->info = &st_uvis25_info;
+
+ err = st_uvis25_init_sensor(hw);
+ if (err < 0)
+ return err;
+
+ if (hw->irq > 0) {
+ err = st_uvis25_allocate_buffer(iio_dev);
+ if (err < 0)
+ return err;
+
+ err = st_uvis25_allocate_trigger(iio_dev);
+ if (err)
+ return err;
+ }
+
+ return devm_iio_device_register(dev, iio_dev);
+}
+EXPORT_SYMBOL(st_uvis25_probe);
+
+static int __maybe_unused st_uvis25_suspend(struct device *dev)
+{
+ struct iio_dev *iio_dev = dev_get_drvdata(dev);
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+ return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+ ST_UVIS25_REG_ODR_MASK, 0);
+}
+
+static int __maybe_unused st_uvis25_resume(struct device *dev)
+{
+ struct iio_dev *iio_dev = dev_get_drvdata(dev);
+ struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+ if (hw->enabled)
+ return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+ ST_UVIS25_REG_ODR_MASK, 1);
+
+ return 0;
+}
+
+const struct dev_pm_ops st_uvis25_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(st_uvis25_suspend, st_uvis25_resume)
+};
+EXPORT_SYMBOL(st_uvis25_pm_ops);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
new file mode 100644
index 000000000000..afd6eb01a202
--- /dev/null
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -0,0 +1,69 @@
+/*
+ * STMicroelectronics uvis25 i2c driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define UVIS25_I2C_AUTO_INCREMENT BIT(7)
+
+static const struct regmap_config st_uvis25_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .write_flag_mask = UVIS25_I2C_AUTO_INCREMENT,
+ .read_flag_mask = UVIS25_I2C_AUTO_INCREMENT,
+};
+
+static int st_uvis25_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &st_uvis25_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return st_uvis25_probe(&client->dev, client->irq, regmap);
+}
+
+static const struct of_device_id st_uvis25_i2c_of_match[] = {
+ { .compatible = "st,uvis25", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_uvis25_i2c_of_match);
+
+static const struct i2c_device_id st_uvis25_i2c_id_table[] = {
+ { ST_UVIS25_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_uvis25_i2c_id_table);
+
+static struct i2c_driver st_uvis25_driver = {
+ .driver = {
+ .name = "st_uvis25_i2c",
+ .pm = &st_uvis25_pm_ops,
+ .of_match_table = of_match_ptr(st_uvis25_i2c_of_match),
+ },
+ .probe = st_uvis25_i2c_probe,
+ .id_table = st_uvis25_i2c_id_table,
+};
+module_i2c_driver(st_uvis25_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
new file mode 100644
index 000000000000..cdfee5e84d5e
--- /dev/null
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -0,0 +1,68 @@
+/*
+ * STMicroelectronics uvis25 spi driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define UVIS25_SENSORS_SPI_READ BIT(7)
+#define UVIS25_SPI_AUTO_INCREMENT BIT(6)
+
+static const struct regmap_config st_uvis25_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = UVIS25_SENSORS_SPI_READ | UVIS25_SPI_AUTO_INCREMENT,
+ .write_flag_mask = UVIS25_SPI_AUTO_INCREMENT,
+};
+
+static int st_uvis25_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &st_uvis25_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return st_uvis25_probe(&spi->dev, spi->irq, regmap);
+}
+
+static const struct of_device_id st_uvis25_spi_of_match[] = {
+ { .compatible = "st,uvis25", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_uvis25_spi_of_match);
+
+static const struct spi_device_id st_uvis25_spi_id_table[] = {
+ { ST_UVIS25_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_uvis25_spi_id_table);
+
+static struct spi_driver st_uvis25_driver = {
+ .driver = {
+ .name = "st_uvis25_spi",
+ .pm = &st_uvis25_pm_ops,
+ .of_match_table = of_match_ptr(st_uvis25_spi_of_match),
+ },
+ .probe = st_uvis25_spi_probe,
+ .id_table = st_uvis25_spi_id_table,
+};
+module_spi_driver(st_uvis25_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
new file mode 100644
index 000000000000..041ac9effdb0
--- /dev/null
+++ b/drivers/iio/light/zopt2201.c
@@ -0,0 +1,568 @@
+/*
+ * zopt2201.c - Support for IDT ZOPT2201 ambient light and UV B sensor
+ *
+ * Copyright 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: https://www.idt.com/document/dst/zopt2201-datasheet
+ * 7-bit I2C slave addresses 0x53 (default) or 0x52 (programmed)
+ *
+ * TODO: interrupt support, ALS/UVB raw mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define ZOPT2201_DRV_NAME "zopt2201"
+
+/* Registers */
+#define ZOPT2201_MAIN_CTRL 0x00
+#define ZOPT2201_LS_MEAS_RATE 0x04
+#define ZOPT2201_LS_GAIN 0x05
+#define ZOPT2201_PART_ID 0x06
+#define ZOPT2201_MAIN_STATUS 0x07
+#define ZOPT2201_ALS_DATA 0x0d /* LSB first, 13 to 20 bits */
+#define ZOPT2201_UVB_DATA 0x10 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_UV_COMP_DATA 0x13 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_COMP_DATA 0x16 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_INT_CFG 0x19
+#define ZOPT2201_INT_PST 0x1a
+
+#define ZOPT2201_MAIN_CTRL_LS_MODE BIT(3) /* 0 .. ALS, 1 .. UV B */
+#define ZOPT2201_MAIN_CTRL_LS_EN BIT(1)
+
+/* Values for ZOPT2201_LS_MEAS_RATE resolution / bit width */
+#define ZOPT2201_MEAS_RES_20BIT 0 /* takes 400 ms */
+#define ZOPT2201_MEAS_RES_19BIT 1 /* takes 200 ms */
+#define ZOPT2201_MEAS_RES_18BIT 2 /* takes 100 ms, default */
+#define ZOPT2201_MEAS_RES_17BIT 3 /* takes 50 ms */
+#define ZOPT2201_MEAS_RES_16BIT 4 /* takes 25 ms */
+#define ZOPT2201_MEAS_RES_13BIT 5 /* takes 3.125 ms */
+#define ZOPT2201_MEAS_RES_SHIFT 4
+
+/* Values for ZOPT2201_LS_MEAS_RATE measurement rate */
+#define ZOPT2201_MEAS_FREQ_25MS 0
+#define ZOPT2201_MEAS_FREQ_50MS 1
+#define ZOPT2201_MEAS_FREQ_100MS 2 /* default */
+#define ZOPT2201_MEAS_FREQ_200MS 3
+#define ZOPT2201_MEAS_FREQ_500MS 4
+#define ZOPT2201_MEAS_FREQ_1000MS 5
+#define ZOPT2201_MEAS_FREQ_2000MS 6
+
+/* Values for ZOPT2201_LS_GAIN */
+#define ZOPT2201_LS_GAIN_1 0
+#define ZOPT2201_LS_GAIN_3 1
+#define ZOPT2201_LS_GAIN_6 2
+#define ZOPT2201_LS_GAIN_9 3
+#define ZOPT2201_LS_GAIN_18 4
+
+/* Values for ZOPT2201_MAIN_STATUS */
+#define ZOPT2201_MAIN_STATUS_POWERON BIT(5)
+#define ZOPT2201_MAIN_STATUS_INT BIT(4)
+#define ZOPT2201_MAIN_STATUS_DRDY BIT(3)
+
+#define ZOPT2201_PART_NUMBER 0xb2
+
+struct zopt2201_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 gain;
+ u8 res;
+ u8 rate;
+};
+
+static const struct {
+ unsigned int gain; /* gain factor */
+ unsigned int scale; /* micro lux per count */
+} zopt2201_gain_als[] = {
+ { 1, 19200000 },
+ { 3, 6400000 },
+ { 6, 3200000 },
+ { 9, 2133333 },
+ { 18, 1066666 },
+};
+
+static const struct {
+ unsigned int gain; /* gain factor */
+ unsigned int scale; /* micro W/m2 per count */
+} zopt2201_gain_uvb[] = {
+ { 1, 460800 },
+ { 3, 153600 },
+ { 6, 76800 },
+ { 9, 51200 },
+ { 18, 25600 },
+};
+
+static const struct {
+ unsigned int bits; /* sensor resolution in bits */
+ unsigned long us; /* measurement time in micro seconds */
+} zopt2201_resolution[] = {
+ { 20, 400000 },
+ { 19, 200000 },
+ { 18, 100000 },
+ { 17, 50000 },
+ { 16, 25000 },
+ { 13, 3125 },
+};
+
+static const struct {
+ unsigned int scale, uscale; /* scale factor as integer + micro */
+ u8 gain; /* gain register value */
+ u8 res; /* resolution register value */
+} zopt2201_scale_als[] = {
+ { 19, 200000, 0, 5 },
+ { 6, 400000, 1, 5 },
+ { 3, 200000, 2, 5 },
+ { 2, 400000, 0, 4 },
+ { 2, 133333, 3, 5 },
+ { 1, 200000, 0, 3 },
+ { 1, 66666, 4, 5 },
+ { 0, 800000, 1, 4 },
+ { 0, 600000, 0, 2 },
+ { 0, 400000, 2, 4 },
+ { 0, 300000, 0, 1 },
+ { 0, 266666, 3, 4 },
+ { 0, 200000, 2, 3 },
+ { 0, 150000, 0, 0 },
+ { 0, 133333, 4, 4 },
+ { 0, 100000, 2, 2 },
+ { 0, 66666, 4, 3 },
+ { 0, 50000, 2, 1 },
+ { 0, 33333, 4, 2 },
+ { 0, 25000, 2, 0 },
+ { 0, 16666, 4, 1 },
+ { 0, 8333, 4, 0 },
+};
+
+static const struct {
+ unsigned int scale, uscale; /* scale factor as integer + micro */
+ u8 gain; /* gain register value */
+ u8 res; /* resolution register value */
+} zopt2201_scale_uvb[] = {
+ { 0, 460800, 0, 5 },
+ { 0, 153600, 1, 5 },
+ { 0, 76800, 2, 5 },
+ { 0, 57600, 0, 4 },
+ { 0, 51200, 3, 5 },
+ { 0, 28800, 0, 3 },
+ { 0, 25600, 4, 5 },
+ { 0, 19200, 1, 4 },
+ { 0, 14400, 0, 2 },
+ { 0, 9600, 2, 4 },
+ { 0, 7200, 0, 1 },
+ { 0, 6400, 3, 4 },
+ { 0, 4800, 2, 3 },
+ { 0, 3600, 0, 0 },
+ { 0, 3200, 4, 4 },
+ { 0, 2400, 2, 2 },
+ { 0, 1600, 4, 3 },
+ { 0, 1200, 2, 1 },
+ { 0, 800, 4, 2 },
+ { 0, 600, 2, 0 },
+ { 0, 400, 4, 1 },
+ { 0, 200, 4, 0 },
+};
+
+static int zopt2201_enable_mode(struct zopt2201_data *data, bool uvb_mode)
+{
+ u8 out = ZOPT2201_MAIN_CTRL_LS_EN;
+
+ if (uvb_mode)
+ out |= ZOPT2201_MAIN_CTRL_LS_MODE;
+
+ return i2c_smbus_write_byte_data(data->client, ZOPT2201_MAIN_CTRL, out);
+}
+
+static int zopt2201_read(struct zopt2201_data *data, u8 reg)
+{
+ struct i2c_client *client = data->client;
+ int tries = 10;
+ u8 buf[3];
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = zopt2201_enable_mode(data, reg == ZOPT2201_UVB_DATA);
+ if (ret < 0)
+ goto fail;
+
+ while (tries--) {
+ unsigned long t = zopt2201_resolution[data->res].us;
+
+ if (t <= 20000)
+ usleep_range(t, t + 1000);
+ else
+ msleep(t / 1000);
+ ret = i2c_smbus_read_byte_data(client, ZOPT2201_MAIN_STATUS);
+ if (ret < 0)
+ goto fail;
+ if (ret & ZOPT2201_MAIN_STATUS_DRDY)
+ break;
+ }
+
+ if (tries < 0) {
+ ret = -ETIMEDOUT;
+ goto fail;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg, sizeof(buf), buf);
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_smbus_write_byte_data(client, ZOPT2201_MAIN_CTRL, 0x00);
+ if (ret < 0)
+ goto fail;
+ mutex_unlock(&data->lock);
+
+ return (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct iio_chan_spec zopt2201_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .address = ZOPT2201_ALS_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_UV,
+ .address = ZOPT2201_UVB_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_UVINDEX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static int zopt2201_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct zopt2201_data *data = iio_priv(indio_dev);
+ u64 tmp;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = zopt2201_read(data, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = zopt2201_read(data, ZOPT2201_UVB_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret * 18 *
+ (1 << (20 - zopt2201_resolution[data->res].bits)) /
+ zopt2201_gain_uvb[data->gain].gain;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->address) {
+ case ZOPT2201_ALS_DATA:
+ *val = zopt2201_gain_als[data->gain].scale;
+ break;
+ case ZOPT2201_UVB_DATA:
+ *val = zopt2201_gain_uvb[data->gain].scale;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val2 = 1000000;
+ *val2 *= (1 << (zopt2201_resolution[data->res].bits - 13));
+ tmp = div_s64(*val * 1000000ULL, *val2);
+ *val = div_s64_rem(tmp, 1000000, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = zopt2201_resolution[data->res].us;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int zopt2201_set_resolution(struct zopt2201_data *data, u8 res)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, ZOPT2201_LS_MEAS_RATE,
+ (res << ZOPT2201_MEAS_RES_SHIFT) |
+ data->rate);
+ if (ret < 0)
+ return ret;
+
+ data->res = res;
+
+ return 0;
+}
+
+static int zopt2201_write_resolution(struct zopt2201_data *data,
+ int val, int val2)
+{
+ int i, ret;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_resolution); i++)
+ if (val2 == zopt2201_resolution[i].us) {
+ mutex_lock(&data->lock);
+ ret = zopt2201_set_resolution(data, i);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int zopt2201_set_gain(struct zopt2201_data *data, u8 gain)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, ZOPT2201_LS_GAIN, gain);
+ if (ret < 0)
+ return ret;
+
+ data->gain = gain;
+
+ return 0;
+}
+
+static int zopt2201_write_scale_als_by_idx(struct zopt2201_data *data, int idx)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
+ if (ret < 0)
+ goto unlock;
+
+ ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
+
+unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int zopt2201_write_scale_als(struct zopt2201_data *data,
+ int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_scale_als); i++)
+ if (val == zopt2201_scale_als[i].scale &&
+ val2 == zopt2201_scale_als[i].uscale) {
+ return zopt2201_write_scale_als_by_idx(data, i);
+ }
+
+ return -EINVAL;
+}
+
+static int zopt2201_write_scale_uvb_by_idx(struct zopt2201_data *data, int idx)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
+ if (ret < 0)
+ goto unlock;
+
+ ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
+
+unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int zopt2201_write_scale_uvb(struct zopt2201_data *data,
+ int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_scale_uvb); i++)
+ if (val == zopt2201_scale_uvb[i].scale &&
+ val2 == zopt2201_scale_uvb[i].uscale)
+ return zopt2201_write_scale_uvb_by_idx(data, i);
+
+ return -EINVAL;
+}
+
+static int zopt2201_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct zopt2201_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return zopt2201_write_resolution(data, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->address) {
+ case ZOPT2201_ALS_DATA:
+ return zopt2201_write_scale_als(data, val, val2);
+ case ZOPT2201_UVB_DATA:
+ return zopt2201_write_scale_uvb(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t zopt2201_show_int_time_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_resolution); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06lu ",
+ zopt2201_resolution[i].us);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEV_ATTR_INT_TIME_AVAIL(zopt2201_show_int_time_available);
+
+static ssize_t zopt2201_show_als_scale_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_scale_als); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
+ zopt2201_scale_als[i].scale,
+ zopt2201_scale_als[i].uscale);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t zopt2201_show_uvb_scale_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(zopt2201_scale_uvb); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
+ zopt2201_scale_uvb[i].scale,
+ zopt2201_scale_uvb[i].uscale);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_illuminance_scale_available, 0444,
+ zopt2201_show_als_scale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_intensity_uv_scale_available, 0444,
+ zopt2201_show_uvb_scale_avail, NULL, 0);
+
+static struct attribute *zopt2201_attributes[] = {
+ &iio_dev_attr_integration_time_available.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_intensity_uv_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group zopt2201_attribute_group = {
+ .attrs = zopt2201_attributes,
+};
+
+static const struct iio_info zopt2201_info = {
+ .read_raw = zopt2201_read_raw,
+ .write_raw = zopt2201_write_raw,
+ .attrs = &zopt2201_attribute_group,
+};
+
+static int zopt2201_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct zopt2201_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ ret = i2c_smbus_read_byte_data(client, ZOPT2201_PART_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != ZOPT2201_PART_NUMBER)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &zopt2201_info;
+ indio_dev->channels = zopt2201_channels;
+ indio_dev->num_channels = ARRAY_SIZE(zopt2201_channels);
+ indio_dev->name = ZOPT2201_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->rate = ZOPT2201_MEAS_FREQ_100MS;
+ ret = zopt2201_set_resolution(data, ZOPT2201_MEAS_RES_18BIT);
+ if (ret < 0)
+ return ret;
+
+ ret = zopt2201_set_gain(data, ZOPT2201_LS_GAIN_3);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id zopt2201_id[] = {
+ { "zopt2201", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, zopt2201_id);
+
+static struct i2c_driver zopt2201_driver = {
+ .driver = {
+ .name = ZOPT2201_DRV_NAME,
+ },
+ .probe = zopt2201_probe,
+ .id_table = zopt2201_id,
+};
+
+module_i2c_driver(zopt2201_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("IDT ZOPT2201 ambient light and UV B sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index c09329069d0a..42a827a66512 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -788,6 +788,7 @@ static const struct acpi_device_id ak_acpi_match[] = {
{"AK8975", AK8975},
{"AK8963", AK8963},
{"INVN6500", AK8963},
+ {"AK009911", AK09911},
{"AK09911", AK09911},
{"AK09912", AK09912},
{ },
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fd1da26a62e4..5ec3e41b65f2 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -55,6 +55,28 @@ struct bmp180_calib {
s16 MD;
};
+/* See datasheet Section 4.2.2. */
+struct bmp280_calib {
+ u16 T1;
+ s16 T2;
+ s16 T3;
+ u16 P1;
+ s16 P2;
+ s16 P3;
+ s16 P4;
+ s16 P5;
+ s16 P6;
+ s16 P7;
+ s16 P8;
+ s16 P9;
+ u8 H1;
+ s16 H2;
+ u8 H3;
+ s16 H4;
+ s16 H5;
+ s8 H6;
+};
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -62,7 +84,10 @@ struct bmp280_data {
struct completion done;
bool use_eoc;
const struct bmp280_chip_info *chip_info;
- struct bmp180_calib calib;
+ union {
+ struct bmp180_calib bmp180;
+ struct bmp280_calib bmp280;
+ } calib;
struct regulator *vddd;
struct regulator *vdda;
unsigned int start_up_time; /* in microseconds */
@@ -120,67 +145,121 @@ static const struct iio_chan_spec bmp280_channels[] = {
},
};
-/*
- * Returns humidity in percent, resolution is 0.01 percent. Output value of
- * "47445" represents 47445/1024 = 46.333 %RH.
- *
- * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
- */
-
-static u32 bmp280_compensate_humidity(struct bmp280_data *data,
- s32 adc_humidity)
+static int bmp280_read_calib(struct bmp280_data *data,
+ struct bmp280_calib *calib,
+ unsigned int chip)
{
+ int ret;
+ unsigned int tmp;
struct device *dev = data->dev;
- unsigned int H1, H3, tmp;
- int H2, H4, H5, H6, ret, var;
+ __le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+ __le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ /* Read temperature calibration values. */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ t_buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ calib->T1 = le16_to_cpu(t_buf[T1]);
+ calib->T2 = le16_to_cpu(t_buf[T2]);
+ calib->T3 = le16_to_cpu(t_buf[T3]);
- ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1);
+ /* Read pressure calibration values. */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ p_buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
+
+ calib->P1 = le16_to_cpu(p_buf[P1]);
+ calib->P2 = le16_to_cpu(p_buf[P2]);
+ calib->P3 = le16_to_cpu(p_buf[P3]);
+ calib->P4 = le16_to_cpu(p_buf[P4]);
+ calib->P5 = le16_to_cpu(p_buf[P5]);
+ calib->P6 = le16_to_cpu(p_buf[P6]);
+ calib->P7 = le16_to_cpu(p_buf[P7]);
+ calib->P8 = le16_to_cpu(p_buf[P8]);
+ calib->P9 = le16_to_cpu(p_buf[P9]);
+
+ /*
+ * Read humidity calibration values.
+ * Due to some odd register addressing we cannot just
+ * do a big bulk read. Instead, we have to read each Hx
+ * value separately and sometimes do some bit shifting...
+ * Humidity data is only available on BME280.
+ */
+ if (chip != BME280_CHIP_ID)
+ return 0;
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
if (ret < 0) {
dev_err(dev, "failed to read H1 comp value\n");
return ret;
}
+ calib->H1 = tmp;
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- H2 = sign_extend32(le16_to_cpu(tmp), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(tmp), 15);
- ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3);
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
dev_err(dev, "failed to read H3 comp value\n");
return ret;
}
+ calib->H3 = tmp;
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
- (be16_to_cpu(tmp) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
+ (be16_to_cpu(tmp) & 0xf), 11);
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
dev_err(dev, "failed to read H6 comp value\n");
return ret;
}
- H6 = sign_extend32(tmp, 7);
+ calib->H6 = sign_extend32(tmp, 7);
+
+ return 0;
+}
+/*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ s32 adc_humidity)
+{
+ s32 var;
+ struct bmp280_calib *calib = &data->calib.bmp280;
var = ((s32)data->t_fine) - (s32)76800;
- var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
- + (s32)16384) >> 15) * (((((((var * H6) >> 10)
- * (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
- + (s32)2097152) * H2 + 8192) >> 14);
- var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
+ var = ((((adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
+ + (s32)16384) >> 15) * (((((((var * calib->H6) >> 10)
+ * (((var * (s32)calib->H3) >> 11) + (s32)32768)) >> 10)
+ + (s32)2097152) * calib->H2 + 8192) >> 14);
+ var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
return var >> 12;
};
@@ -195,31 +274,14 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
static s32 bmp280_compensate_temp(struct bmp280_data *data,
s32 adc_temp)
{
- int ret;
s32 var1, var2;
- __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+ struct bmp280_calib *calib = &data->calib.bmp280;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
- buf, BMP280_COMP_TEMP_REG_COUNT);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read temperature calibration parameters\n");
- return ret;
- }
-
- /*
- * The double casts are necessary because le16_to_cpu returns an
- * unsigned 16-bit value. Casting that value directly to a
- * signed 32-bit will not do proper sign extension.
- *
- * Conversely, T1 and P1 are unsigned values, so they can be
- * cast straight to the larger type.
- */
- var1 = (((adc_temp >> 3) - ((s32)le16_to_cpu(buf[T1]) << 1)) *
- ((s32)(s16)le16_to_cpu(buf[T2]))) >> 11;
- var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
- ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
- ((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
+ var1 = (((adc_temp >> 3) - ((s32)calib->T1 << 1)) *
+ ((s32)calib->T2)) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32)calib->T1)) *
+ ((adc_temp >> 4) - ((s32)calib->T1))) >> 12) *
+ ((s32)calib->T3)) >> 14;
data->t_fine = var1 + var2;
return (data->t_fine * 5 + 128) >> 8;
@@ -235,34 +297,25 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
static u32 bmp280_compensate_press(struct bmp280_data *data,
s32 adc_press)
{
- int ret;
s64 var1, var2, p;
- __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
-
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
- buf, BMP280_COMP_PRESS_REG_COUNT);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read pressure calibration parameters\n");
- return ret;
- }
+ struct bmp280_calib *calib = &data->calib.bmp280;
var1 = ((s64)data->t_fine) - 128000;
- var2 = var1 * var1 * (s64)(s16)le16_to_cpu(buf[P6]);
- var2 += (var1 * (s64)(s16)le16_to_cpu(buf[P5])) << 17;
- var2 += ((s64)(s16)le16_to_cpu(buf[P4])) << 35;
- var1 = ((var1 * var1 * (s64)(s16)le16_to_cpu(buf[P3])) >> 8) +
- ((var1 * (s64)(s16)le16_to_cpu(buf[P2])) << 12);
- var1 = ((((s64)1) << 47) + var1) * ((s64)le16_to_cpu(buf[P1])) >> 33;
+ var2 = var1 * var1 * (s64)calib->P6;
+ var2 += (var1 * (s64)calib->P5) << 17;
+ var2 += ((s64)calib->P4) << 35;
+ var1 = ((var1 * var1 * (s64)calib->P3) >> 8) +
+ ((var1 * (s64)calib->P2) << 12);
+ var1 = ((((s64)1) << 47) + var1) * ((s64)calib->P1) >> 33;
if (var1 == 0)
return 0;
p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
p = div64_s64(p, var1);
- var1 = (((s64)(s16)le16_to_cpu(buf[P9])) * (p >> 13) * (p >> 13)) >> 25;
- var2 = (((s64)(s16)le16_to_cpu(buf[P8])) * p) >> 19;
- p = ((p + var1 + var2) >> 8) + (((s64)(s16)le16_to_cpu(buf[P7])) << 4);
+ var1 = (((s64)calib->P9) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = ((s64)(calib->P8) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64)calib->P7) << 4);
return (u32)p;
}
@@ -752,7 +805,7 @@ static int bmp180_read_calib(struct bmp280_data *data,
static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
{
s32 x1, x2;
- struct bmp180_calib *calib = &data->calib;
+ struct bmp180_calib *calib = &data->calib.bmp180;
x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
x2 = (calib->MC << 11) / (x1 + calib->MD);
@@ -814,7 +867,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
s32 b3, b6;
u32 b4, b7;
s32 oss = data->oversampling_press;
- struct bmp180_calib *calib = &data->calib;
+ struct bmp180_calib *calib = &data->calib.bmp180;
b6 = data->t_fine - 4000;
x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
@@ -1028,11 +1081,19 @@ int bmp280_common_probe(struct device *dev,
dev_set_drvdata(dev, indio_dev);
/*
- * The BMP085 and BMP180 has calibration in an E2PROM, read it out
- * at probe time. It will not change.
+ * Some chips have calibration parameters "programmed into the devices'
+ * non-volatile memory during production". Let's read them out at probe
+ * time once. They will not change.
*/
if (chip_id == BMP180_CHIP_ID) {
- ret = bmp180_read_calib(data, &data->calib);
+ ret = bmp180_read_calib(data, &data->calib.bmp180);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read calibration coefficients\n");
+ goto out_disable_vdda;
+ }
+ } else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
+ ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index df23dbcc030a..b8a2c2c8cac5 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -1031,6 +1031,7 @@ static const struct dev_pm_ops sx9500_pm_ops = {
static const struct acpi_device_id sx9500_acpi_match[] = {
{"SSX9500", 0},
+ {"SASX9500", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index de361d879929..98cdc7e47f3d 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer Trigger driver
*
@@ -5,8 +6,6 @@
*
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
*
- * License terms: GNU General Public License (GPL), version 2
- *
* Inspired by Benjamin Gaignard's stm32-timer-trigger driver
*/
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index b542dc484969..ccf1ce653b25 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/iio/iio.h>
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index cbf186522016..5cd700421695 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET
depends on INET
depends on m || IPV6 != m
+ depends on !ALPHA
select IRQ_POLL
---help---
Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 504b926552c6..f69833db0a32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o
+ security.o nldev.o restrack.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f4e8185bccd3..a5b4cf030c11 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -243,8 +243,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr,
- u16 *vlan_id)
+ struct rdma_dev_addr *dev_addr)
{
struct net_device *dev;
@@ -266,9 +265,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
return -EADDRNOTAVAIL;
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -279,9 +275,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -481,7 +474,7 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr,
MAX_ADDR_LEN);
@@ -558,7 +551,7 @@ static int addr_resolve(struct sockaddr *src_in,
}
if (ndev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
/*
* Put the loopback device and get the translated
* device instead.
@@ -744,7 +737,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
- struct rdma_dev_addr *addr;
struct completion comp;
int status;
};
@@ -752,39 +744,31 @@ struct resolve_cb_context {
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- if (!status)
- memcpy(((struct resolve_cb_context *)context)->addr,
- addr, sizeof(struct rdma_dev_addr));
((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit)
{
- int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
- struct net_device *dev;
-
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
-
+ int ret;
rdma_gid2ip(&sgid_addr._sockaddr, sgid);
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- if (if_index)
- dev_addr.bound_dev_if = *if_index;
+ dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
- ctx.addr = &dev_addr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
&dev_addr, 1000, resolve_cb, &ctx);
@@ -798,42 +782,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
return ret;
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
- dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
- if (!dev)
- return -ENODEV;
- if (if_index)
- *if_index = dev_addr.bound_dev_if;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- if (hoplimit)
- *hoplimit = dev_addr.hoplimit;
- dev_put(dev);
- return ret;
-}
-EXPORT_SYMBOL(rdma_addr_find_l2_eth_by_grh);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
-{
- int ret = 0;
- struct rdma_dev_addr dev_addr;
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } gid_addr;
-
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
-
- memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.net = &init_net;
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
- if (ret)
- return ret;
-
- memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
- return ret;
+ *hoplimit = dev_addr.hoplimit;
+ return 0;
}
-EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 77515638c55c..e9a409d7f4e2 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -573,27 +573,24 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
struct ib_gid_attr attr;
if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
- goto next;
+ continue;
if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
- goto next;
+ continue;
memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
- if (filter(gid, &attr, context))
+ if (filter(gid, &attr, context)) {
found = true;
-
-next:
- if (found)
+ if (index)
+ *index = i;
break;
+ }
}
read_unlock_irqrestore(&table->rwlock, flags);
if (!found)
return -ENOENT;
-
- if (index)
- *index = i;
return 0;
}
@@ -824,12 +821,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
if (err)
return err;
- err = roce_rescan_device(ib_dev);
-
- if (err) {
- gid_table_cleanup_one(ib_dev);
- gid_table_release_one(ib_dev);
- }
+ rdma_roce_rescan_device(ib_dev);
return err;
}
@@ -883,7 +875,6 @@ int ib_find_gid_by_filter(struct ib_device *device,
port_num, filter,
context, index);
}
-EXPORT_SYMBOL(ib_find_gid_by_filter);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f6b159d79977..e6749157fd86 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,13 +452,14 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
- grh, &av->ah_attr);
+ return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
@@ -494,8 +495,11 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
- ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
+ &av->ah_attr);
+ if (ret)
+ return ret;
+
av->timeout = path->packet_life_time + 1;
spin_lock_irqsave(&cm.lock, flags);
@@ -1560,6 +1564,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
return pkey;
}
+/**
+ * Convert OPA SGID to IB SGID
+ * ULPs (such as IPoIB) do not understand OPA GIDs and will
+ * reject them as the local_gid will not match the sgid. Therefore,
+ * change the pathrec's SGID to an IB SGID.
+ *
+ * @work: Work completion
+ * @path: Path record
+ */
+static void cm_opa_to_ib_sgid(struct cm_work *work,
+ struct sa_path_rec *path)
+{
+ struct ib_device *dev = work->port->cm_dev->ib_device;
+ u8 port_num = work->port->port_num;
+
+ if (rdma_cap_opa_ah(dev, port_num) &&
+ (ib_is_opa_gid(&path->sgid))) {
+ union ib_gid sgid;
+
+ if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ dev_warn(&dev->dev,
+ "Error updating sgid in CM request\n");
+ return;
+ }
+
+ path->sgid = sgid;
+ }
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1573,10 +1606,13 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (cm_req_has_alt_path(req_msg))
+ cm_opa_to_ib_sgid(work, param->primary_path);
+ if (cm_req_has_alt_path(req_msg)) {
param->alternate_path = &work->path[1];
- else
+ cm_opa_to_ib_sgid(work, param->alternate_path);
+ } else {
param->alternate_path = NULL;
+ }
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
@@ -1826,9 +1862,11 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto destroy;
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
@@ -1841,9 +1879,10 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
- kfree(cm_id_priv->timewait_info);
- goto destroy;
+ goto free_timeinfo;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1861,56 +1900,50 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num,
grh->sgid_index,
&gid, &gid_attr);
- if (!ret) {
- if (gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
- if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
- sa_path_set_dmac(&work->path[0],
- cm_id_priv->av.ah_attr.roce.dmac);
- work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
- cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
+ goto rejected;
+ }
+
+ if (gid_attr.ndev) {
+ work->path[0].rec_type =
+ sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
+ sa_path_set_ifindex(&work->path[0],
+ gid_attr.ndev->ifindex);
+ sa_path_set_ndev(&work->path[0],
+ dev_net(gid_attr.ndev));
+ dev_put(gid_attr.ndev);
+ } else {
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
+ if (cm_req_has_alt_path(req_msg))
+ work->path[1].rec_type = work->path[0].rec_type;
+ cm_format_paths_from_req(req_msg, &work->path[0],
+ &work->path[1]);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ sa_path_set_dmac(&work->path[0],
+ cm_id_priv->av.ah_attr.roce.dmac);
+ work->path[0].hop_limit = grh->hop_limit;
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
if (ret) {
- int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- &gid_attr);
- if (!err && gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
- &work->path[0].sgid, sizeof work->path[0].sgid,
- NULL, 0);
+ int err;
+
+ err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid,
+ NULL);
+ if (err)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ NULL, 0, NULL, 0);
+ else
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid,
+ sizeof(work->path[0].sgid),
+ NULL, 0);
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
@@ -1919,7 +1952,7 @@ static int cm_req_handler(struct cm_work *work)
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
- sizeof work->path[0].sgid, NULL, 0);
+ sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
@@ -1945,6 +1978,8 @@ static int cm_req_handler(struct cm_work *work)
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
+free_timeinfo:
+ kfree(cm_id_priv->timewait_info);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
@@ -1997,6 +2032,8 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
+ pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2063,6 +2100,8 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
+ pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto error;
}
@@ -2170,6 +2209,8 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
+ pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
return -EINVAL;
}
@@ -2183,6 +2224,10 @@ static int cm_rep_handler(struct cm_work *work)
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+ __func__, cm_id_priv->id.state,
+ be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
@@ -2196,6 +2241,8 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: Failed to insert remote id %d\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
/* Check for a stale connection. */
@@ -2213,6 +2260,10 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
+
if (cur_cm_id_priv) {
cm_id = &cur_cm_id_priv->id;
ib_send_cm_dreq(cm_id, NULL, 0);
@@ -2359,6 +2410,8 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2428,6 +2481,8 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
+ pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
return -EINVAL;
}
@@ -2493,6 +2548,9 @@ static int cm_dreq_handler(struct cm_work *work)
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
+ pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(dreq_msg->local_comm_id),
+ be32_to_cpu(dreq_msg->remote_comm_id));
return -EINVAL;
}
@@ -2535,6 +2593,9 @@ static int cm_dreq_handler(struct cm_work *work)
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2638,6 +2699,8 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
cm_enter_timewait(cm_id_priv);
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2748,6 +2811,9 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto out;
}
@@ -2811,6 +2877,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
}
/* fall through */
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto error1;
}
@@ -2912,6 +2981,9 @@ static int cm_mra_handler(struct cm_work *work)
counter[CM_MRA_COUNTER]);
/* fall through */
default:
+ pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto out;
}
@@ -3085,6 +3157,12 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto deref;
+
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3131,9 +3209,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -3386,6 +3461,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
+ int ret;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
@@ -3398,9 +3474,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto out;
+
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
@@ -3692,6 +3771,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
+ pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3727,6 +3807,8 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
break;
}
@@ -3924,6 +4006,9 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -3971,6 +4056,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -4030,6 +4118,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6294a7001d33..e66963ca58bd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -601,7 +601,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr, NULL);
+ ret = rdma_translate_ip(addr, dev_addr);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -612,11 +612,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
static inline int cma_validate_port(struct ib_device *device, u8 port,
enum ib_gid_type gid_type,
- union ib_gid *gid, int dev_type,
- int bound_if_index)
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
- int ret = -ENODEV;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
return ret;
@@ -624,11 +627,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
- ndev = dev_get_by_index(&init_net, bound_if_index);
- else
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ return ret;
+ } else {
gid_type = IB_GID_TYPE_IB;
-
+ }
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -669,8 +674,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
listen_id_priv->gid_type, gidp,
- dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -691,8 +695,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
cma_dev->default_gid_type[port - 1],
- gidp, dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ gidp, id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -2036,6 +2039,33 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ struct rdma_addr *addr = &cm_id->route.addr;
+
+ if (!cm_id->device) {
+ if (sgid)
+ memset(sgid, 0, sizeof(*sgid));
+ if (dgid)
+ memset(dgid, 0, sizeof(*dgid));
+ return;
+ }
+
+ if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
+ if (sgid)
+ rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
+ if (dgid)
+ rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
+ } else {
+ if (sgid)
+ rdma_addr_get_sgid(&addr->dev_addr, sgid);
+ if (dgid)
+ rdma_addr_get_dgid(&addr->dev_addr, dgid);
+ }
+}
+EXPORT_SYMBOL(rdma_read_gids);
+
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
@@ -2132,7 +2162,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2414,6 +2444,26 @@ out:
kfree(work);
}
+static void cma_init_resolve_route_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+}
+
+static void cma_init_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+}
+
static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2424,11 +2474,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
@@ -2449,10 +2495,63 @@ err1:
return ret;
}
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths)
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
+/*
+ * cma_iboe_set_path_rec_l2_fields() is helper function which sets
+ * path record type based on GID type.
+ * It also sets up other L2 fields which includes destination mac address
+ * netdev ifindex, of the path record.
+ * It returns the netdev of the bound interface for this path record entry.
+ */
+static struct net_device *
+cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ struct rdma_addr *addr = &route->addr;
+ unsigned long supported_gids;
+ struct net_device *ndev;
+
+ if (!addr->dev_addr.bound_dev_if)
+ return NULL;
+
+ ndev = dev_get_by_index(addr->dev_addr.net,
+ addr->dev_addr.bound_dev_if);
+ if (!ndev)
+ return NULL;
+
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ gid_type = cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
+ /* Use the hint from IP Stack to select GID Type */
+ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
+ gid_type = ib_network_to_gid_type(addr->dev_addr.network);
+ route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
+
+ sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
+ sa_path_set_ifindex(route->path_rec, ndev->ifindex);
+ sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
+ return ndev;
+}
+
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec)
{
struct rdma_id_private *id_priv;
+ struct net_device *ndev;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -2460,20 +2559,33 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- id->route.num_paths = num_paths;
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+ dev_put(ndev);
+ }
+
+ id->route.num_paths = 1;
return 0;
+
+err_free:
+ kfree(id->route.path_rec);
+ id->route.path_rec = NULL;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
-EXPORT_SYMBOL(rdma_set_ib_paths);
+EXPORT_SYMBOL(rdma_set_ib_path);
static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
{
@@ -2483,11 +2595,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
}
@@ -2510,26 +2618,14 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
-static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
- unsigned long supported_gids,
- enum ib_gid_type default_gid)
-{
- if ((network_type == RDMA_NETWORK_IPV4 ||
- network_type == RDMA_NETWORK_IPV6) &&
- test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
- return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- return default_gid;
-}
-
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct net_device *ndev = NULL;
- enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ struct net_device *ndev;
+
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
@@ -2539,9 +2635,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
-
route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
@@ -2550,42 +2643,17 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
- if (addr->dev_addr.bound_dev_if) {
- unsigned long supported_gids;
-
- ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
-
- supported_gids = roce_gid_type_mask_support(id_priv->id.device,
- id_priv->id.port_num);
- gid_type = cma_route_gid_type(addr->dev_addr.network,
- supported_gids,
- id_priv->gid_type);
- route->path_rec->rec_type =
- sa_conv_gid_to_pathrec_type(gid_type);
- sa_path_set_ndev(route->path_rec, &init_net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
- }
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
ret = -ENODEV;
goto err2;
}
- sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
-
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
- /* Use the hint from IP Stack to select GID Type */
- if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
- gid_type = ib_network_to_gid_type(addr->dev_addr.network);
- route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
-
if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
/* TODO: get the hoplimit from the inet/inet6 device */
route->path_rec->hop_limit = addr->dev_addr.hoplimit;
@@ -2607,11 +2675,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- work->event.status = 0;
-
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -2791,11 +2855,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -2821,11 +2881,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -3404,9 +3460,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = ret;
break;
}
- ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
- id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ ib_init_ah_attr_from_path(id_priv->id.device,
+ id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3873,7 +3930,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev =
- dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
enum ib_gid_type gid_type =
id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
@@ -4010,8 +4067,10 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
- mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
+ mgid->raw[0] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
+ mgid->raw[1] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -4061,7 +4120,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
goto out2;
@@ -4179,7 +4238,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
struct net_device *ndev = NULL;
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
+ ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
if (ndev) {
cma_igmp_send(ndev,
@@ -4235,7 +4294,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
if (event != NETDEV_BONDING_FAILOVER)
return NOTIFY_DONE;
- if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ if (!netif_is_bond_master(ndev))
return NOTIFY_DONE;
mutex_lock(&lock);
@@ -4432,7 +4491,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
@@ -4444,6 +4503,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
id_stats->qp_type = id->qp_type;
i_id++;
+ nlmsg_end(skb, nlh);
}
cb->args[1] = 0;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 31dfee0c8295..eee38b40be99 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -295,7 +295,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..c4560d84dfae 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -40,8 +40,12 @@
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/restrack.h>
#include "mad_priv.h"
+/* Total number of ports combined across all struct ib_devices's */
+#define RDMA_MAX_PORTS 1024
+
struct pkey_index_qp_list {
struct list_head pkey_index_list;
u16 pkey_index;
@@ -137,7 +141,6 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-int roce_rescan_device(struct ib_device *ib_dev);
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
int ib_cache_setup_one(struct ib_device *device);
@@ -191,13 +194,6 @@ void ib_sa_cleanup(void);
int rdma_nl_init(void);
void rdma_nl_exit(void);
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
@@ -213,11 +209,6 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec);
-
void ib_security_destroy_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
@@ -240,14 +231,6 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
#else
-static inline int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
-{
- return 0;
-}
-
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
}
@@ -314,8 +297,35 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
}
#endif
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
+
+static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_qp *qp;
+
+ qp = dev->create_qp(pd, attr, udata);
+ if (IS_ERR(qp))
+ return qp;
+
+ qp->device = dev;
+ qp->pd = pd;
+ /*
+ * We don't track XRC QPs for now, because they don't have PD
+ * and more importantly they are created internaly by driver,
+ * see mlx5 create_dev_resources() as an example.
+ */
+ if (attr->qp_type < IB_QPT_XRC_INI) {
+ qp->res.type = RDMA_RESTRACK_QP;
+ rdma_restrack_add(&qp->res);
+ } else
+ qp->res.valid = false;
+
+ return qp;
+}
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index f2ae75fa3128..bc79ca8215d7 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -25,9 +25,10 @@
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
-static int __ib_process_cq(struct ib_cq *cq, int budget)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
{
int i, n, completed = 0;
+ struct ib_wc *wcs = poll_wc ? : cq->wc;
/*
* budget might be (-1) if the caller does not
@@ -35,9 +36,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* minimum here.
*/
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
- budget - completed), cq->wc)) > 0) {
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
- struct ib_wc *wc = &cq->wc[i];
+ struct ib_wc *wc = &wcs[i];
if (wc->wr_cqe)
wc->wr_cqe->done(cq, wc);
@@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
- * This function is used to process all outstanding CQ entries on a
- * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
- * context and does not ask for completion interrupts from the HCA.
+ * This function is used to process all outstanding CQ entries.
+ * It does not offload CQ processing to a different context and does
+ * not ask for completion interrupts from the HCA.
+ * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
+ * concurrent processing.
*
* Note: do not pass -1 as %budget unless it is guaranteed that the number
* of completions that will be processed is small.
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
+ struct ib_wc wcs[IB_POLL_BATCH];
- return __ib_process_cq(cq, budget);
+ return __ib_process_cq(cq, budget, wcs);
}
EXPORT_SYMBOL(ib_process_cq_direct);
@@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed;
- completed = __ib_process_cq(cq, budget);
+ completed = __ib_process_cq(cq, budget, NULL);
if (completed < budget) {
irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -105,7 +108,7 @@ static void ib_cq_poll_work(struct work_struct *work)
struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed;
- completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work);
@@ -117,20 +120,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * ib_alloc_cq - allocate a completion queue
+ * __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
+ * @caller: module owner name.
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -154,6 +159,10 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
if (!cq->wc)
goto out_destroy_cq;
+ cq->res.type = RDMA_RESTRACK_CQ;
+ cq->res.kern_name = caller;
+ rdma_restrack_add(&cq->res);
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
cq->comp_handler = ib_cq_completion_direct;
@@ -178,11 +187,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
out_free_wc:
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_alloc_cq);
+EXPORT_SYMBOL(__ib_alloc_cq);
/**
* ib_free_cq - free a completion queue
@@ -209,6 +219,7 @@ void ib_free_cq(struct ib_cq *cq)
}
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
WARN_ON_ONCE(ret);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 30914f3baa5f..e8010e73a1cf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
{
struct ib_device *device;
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL;
}
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+ struct ib_device *device;
+
+ down_read(&lists_rwsem);
+ device = __ib_device_get_by_index(index);
+ if (device)
+ get_device(&device->dev);
+
+ up_read(&lists_rwsem);
+ return device;
+}
+
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
@@ -247,6 +263,8 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
+ rdma_restrack_init(&device->res);
+
device->dev.class = &ib_class;
device_initialize(&device->dev);
@@ -272,7 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
{
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
- kobject_put(&device->dev.kobj);
+ put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -446,7 +464,6 @@ int ib_register_device(struct ib_device *device,
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
- WARN_ON_ONCE(!parent);
WARN_ON_ONCE(device->dma_device);
if (device->dev.dma_ops) {
/*
@@ -455,16 +472,25 @@ int ib_register_device(struct ib_device *device,
* into device->dev.
*/
device->dma_device = &device->dev;
- if (!device->dev.dma_mask)
- device->dev.dma_mask = parent->dma_mask;
- if (!device->dev.coherent_dma_mask)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
+ if (!device->dev.dma_mask) {
+ if (parent)
+ device->dev.dma_mask = parent->dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
+ if (!device->dev.coherent_dma_mask) {
+ if (parent)
+ device->dev.coherent_dma_mask =
+ parent->coherent_dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
} else {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
*/
+ WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
@@ -572,6 +598,8 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
+ rdma_restrack_clean(&device->res);
+
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
@@ -1017,32 +1045,22 @@ EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
- * @gid_type: Type of GID.
* @ndev: The ndev related to the GID to search for.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the GID table where the GID was found. This
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index)
+ struct net_device *ndev, u8 *port_num, u16 *index)
{
union ib_gid tmp_gid;
int ret, port, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
- if (rdma_cap_roce_gid_table(device, port)) {
- if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, index)) {
- *port_num = port;
- return 0;
- }
- }
-
- if (gid_type != IB_GID_TYPE_IB)
+ if (rdma_cap_roce_gid_table(device, port))
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 84d2615b5d4b..a0a9ed719031 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -388,13 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
EXPORT_SYMBOL(ib_flush_fmr_pool);
/**
- * ib_fmr_pool_map_phys -
- * @pool:FMR pool to allocate FMR from
- * @page_list:List of pages to map
- * @list_len:Number of pages in @page_list
- * @io_virtual_address:I/O virtual address for new FMR
- *
- * Map an FMR from an FMR pool.
+ * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
+ * @pool_handle: FMR pool to allocate FMR from
+ * @page_list: List of pages to map
+ * @list_len: Number of pages in @page_list
+ * @io_virtual_address: I/O virtual address for new FMR
*/
struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
u64 *page_list,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 3c4faadb8cdd..81528f64061a 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cb91245e9163..c50596f7f98a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -49,7 +49,6 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1fb72c356e36..3ccaae18ad75 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -41,8 +41,6 @@
#include <linux/module.h>
#include "core_priv.h"
-#include "core_priv.h"
-
static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
static struct {
@@ -83,15 +81,13 @@ static bool is_nl_valid(unsigned int type, unsigned int op)
if (!is_nl_msg_valid(type, op))
return false;
- cb_table = rdma_nl_types[type].cb_table;
-#ifdef CONFIG_MODULES
- if (!cb_table) {
+ if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type);
mutex_lock(&rdma_nl_mutex);
- cb_table = rdma_nl_types[type].cb_table;
}
-#endif
+
+ cb_table = rdma_nl_types[type].cb_table;
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
return false;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9a05245a1acf..fa8655e3b3ed 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -31,6 +31,8 @@
*/
#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
#include <net/netlink.h>
#include <rdma/rdma_netlink.h>
@@ -52,16 +54,42 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
+ .len = 16 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
+ .len = TASK_COMM_LEN },
};
-static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
- char fw[IB_FW_VERSION_NAME_MAX];
-
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
return -EMSGSIZE;
+
+ return 0;
+}
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
return -EMSGSIZE;
@@ -92,10 +120,9 @@ static int fill_port_info(struct sk_buff *msg,
struct ib_port_attr attr;
int ret;
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
- return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
return -EMSGSIZE;
@@ -126,6 +153,137 @@ static int fill_port_info(struct sk_buff *msg,
return 0;
}
+static int fill_res_info_entry(struct sk_buff *msg,
+ const char *name, u64 curr)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
+ goto err;
+ if (nla_put_u64_64bit(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+{
+ static const char * const names[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_PD] = "pd",
+ [RDMA_RESTRACK_CQ] = "cq",
+ [RDMA_RESTRACK_QP] = "qp",
+ };
+
+ struct rdma_restrack_root *res = &device->res;
+ struct nlattr *table_attr;
+ int ret, i, curr;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
+ if (!names[i])
+ continue;
+ curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
+ ret = fill_res_info_entry(msg, names[i], curr);
+ if (ret)
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct ib_qp *qp, uint32_t port)
+{
+ struct rdma_restrack_entry *res = &qp->res;
+ struct ib_qp_init_attr qp_init_attr;
+ struct nlattr *entry_attr;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
+ if (ret)
+ return ret;
+
+ if (port && port != qp_attr.port_num)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ goto out;
+
+ /* In create_qp() port is not set yet */
+ if (qp_attr.port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
+ goto err;
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
+ qp_attr.dest_qp_num))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
+ qp_attr.rq_psn))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
+ goto err;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
+ qp_attr.path_mig_state))
+ goto err;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
+ goto err;
+
+ /*
+ * Existence of task means that it is user QP and netlink
+ * user is invited to go and read /proc/PID/comm to get name
+ * of the task file and res->task_com should be NULL.
+ */
+ if (rdma_is_kernel_res(res)) {
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, task_pid_vnr(res->task)))
+ goto err;
+ }
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+out:
+ return -EMSGSIZE;
+}
+
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -142,27 +300,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_dev_info(msg, device);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +385,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
- if (!rdma_is_port_valid(device, port))
- return -EINVAL;
+ if (!rdma_is_port_valid(device, port)) {
+ err = -EINVAL;
+ goto err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_port_info(msg, device, port);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +439,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(ifindex);
+ device = ib_device_get_by_index(ifindex);
if (!device)
return -EINVAL;
@@ -299,10 +473,219 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh);
}
-out: cb->args[0] = idx;
+out:
+ put_device(&device->dev);
+ cb->args[0] = idx;
return skb->len;
}
+static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ goto err;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, 0);
+
+ ret = fill_res_info(msg, device);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ put_device(&device->dev);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return ret;
+}
+
+static int _nldev_res_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_res_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
+}
+
+static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ int err, ret = 0, idx = 0;
+ struct nlattr *table_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct ib_qp *qp = NULL;
+ struct nlmsghdr *nlh;
+ u32 index, port = 0;
+
+ err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
+ /*
+ * Right now, we are expecting the device index to get QP information,
+ * but it is possible to extend this code to return all devices in
+ * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
+ * if it doesn't exist, we will iterate over all devices.
+ *
+ * But it is not needed for now.
+ */
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ /*
+ * If no PORT_INDEX is supplied, we will return all QPs from that device
+ */
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_index;
+ }
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_QP_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start(skb, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ down_read(&device->res.rwsem);
+ hash_for_each_possible(device->res.hash, res, node, RDMA_RESTRACK_QP) {
+ if (idx < start)
+ goto next;
+
+ if ((rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != &init_pid_ns) ||
+ (!rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != task_active_pid_ns(res->task)))
+ /*
+ * 1. Kernel QPs should be visible in init namspace only
+ * 2. Present only QPs visible in the current namespace
+ */
+ goto next;
+
+ if (!rdma_restrack_get(res))
+ /*
+ * Resource is under release now, but we are not
+ * relesing lock now, so it will be released in
+ * our next pass, once we will get ->next pointer.
+ */
+ goto next;
+
+ qp = container_of(res, struct ib_qp, res);
+
+ up_read(&device->res.rwsem);
+ ret = fill_res_qp_entry(skb, qp, port);
+ down_read(&device->res.rwsem);
+ /*
+ * Return resource back, but it won't be released till
+ * the &device->res.rwsem will be released for write.
+ */
+ rdma_restrack_put(res);
+
+ if (ret == -EMSGSIZE)
+ /*
+ * There is a chance to optimize here.
+ * It can be done by using list_prepare_entry
+ * and list_for_each_entry_continue afterwards.
+ */
+ break;
+ if (ret)
+ goto res_err;
+next: idx++;
+ }
+ up_read(&device->res.rwsem);
+
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more QPs to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!qp)
+ goto err;
+
+ put_device(&device->dev);
+ return skb->len;
+
+res_err:
+ nla_nest_cancel(skb, table_attr);
+ up_read(&device->res.rwsem);
+
+err:
+ nlmsg_cancel(skb, nlh);
+
+err_index:
+ put_device(&device->dev);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -312,6 +695,23 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_GET] = {
+ .doit = nldev_res_get_doit,
+ .dump = nldev_res_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .dump = nldev_res_get_qp_dumpit,
+ /*
+ * .doit is not implemented yet for two reasons:
+ * 1. It is not needed yet.
+ * 2. There is a need to provide identifier, while it is easy
+ * for the QPs (device index + port index + LQPN), it is not
+ * the case for the rest of resources (PD and CQ). Because it
+ * is better to provide similar interface for all resources,
+ * let's wait till we will have other resources implemented
+ * too.
+ */
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
new file mode 100644
index 000000000000..857637bf46da
--- /dev/null
+++ b/drivers/infiniband/core/restrack.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <linux/mutex.h>
+#include <linux/sched/task.h>
+#include <linux/uaccess.h>
+#include <linux/pid_namespace.h>
+
+void rdma_restrack_init(struct rdma_restrack_root *res)
+{
+ init_rwsem(&res->rwsem);
+}
+
+void rdma_restrack_clean(struct rdma_restrack_root *res)
+{
+ WARN_ON_ONCE(!hash_empty(res->hash));
+}
+
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns)
+{
+ struct rdma_restrack_entry *e;
+ u32 cnt = 0;
+
+ down_read(&res->rwsem);
+ hash_for_each_possible(res->hash, e, node, type) {
+ if (ns == &init_pid_ns ||
+ (!rdma_is_kernel_res(e) &&
+ ns == task_active_pid_ns(e->task)))
+ cnt++;
+ }
+ up_read(&res->rwsem);
+ return cnt;
+}
+EXPORT_SYMBOL(rdma_restrack_count);
+
+static void set_kern_name(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_qp *qp;
+
+ if (type != RDMA_RESTRACK_QP)
+ /* PD and CQ types already have this name embedded in */
+ return;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->pd) {
+ WARN_ONCE(true, "XRC QPs are not supported\n");
+ /* Survive, despite the programmer's error */
+ res->kern_name = " ";
+ return;
+ }
+
+ res->kern_name = qp->pd->res.kern_name;
+}
+
+static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_device *dev;
+ struct ib_xrcd *xrcd;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+
+ switch (type) {
+ case RDMA_RESTRACK_PD:
+ pd = container_of(res, struct ib_pd, res);
+ dev = pd->device;
+ break;
+ case RDMA_RESTRACK_CQ:
+ cq = container_of(res, struct ib_cq, res);
+ dev = cq->device;
+ break;
+ case RDMA_RESTRACK_QP:
+ qp = container_of(res, struct ib_qp, res);
+ dev = qp->device;
+ break;
+ case RDMA_RESTRACK_XRCD:
+ xrcd = container_of(res, struct ib_xrcd, res);
+ dev = xrcd->device;
+ break;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
+ return NULL;
+ }
+
+ return dev;
+}
+
+void rdma_restrack_add(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev = res_to_dev(res);
+
+ if (!dev)
+ return;
+
+ if (!uaccess_kernel()) {
+ get_task_struct(current);
+ res->task = current;
+ res->kern_name = NULL;
+ } else {
+ set_kern_name(res);
+ res->task = NULL;
+ }
+
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->valid = true;
+
+ down_write(&dev->res.rwsem);
+ hash_add(dev->res.hash, &res->node, res->type);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_add);
+
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
+{
+ return kref_get_unless_zero(&res->kref);
+}
+EXPORT_SYMBOL(rdma_restrack_get);
+
+static void restrack_release(struct kref *kref)
+{
+ struct rdma_restrack_entry *res;
+
+ res = container_of(kref, struct rdma_restrack_entry, kref);
+ complete(&res->comp);
+}
+
+int rdma_restrack_put(struct rdma_restrack_entry *res)
+{
+ return kref_put(&res->kref, restrack_release);
+}
+EXPORT_SYMBOL(rdma_restrack_put);
+
+void rdma_restrack_del(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev;
+
+ if (!res->valid)
+ return;
+
+ dev = res_to_dev(res);
+ if (!dev)
+ return;
+
+ rdma_restrack_put(res);
+
+ wait_for_completion(&res->comp);
+
+ down_write(&dev->res.rwsem);
+ hash_del(&res->node);
+ res->valid = false;
+ if (res->task)
+ put_task_struct(res->task);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 90e3889b7fbe..5a52ec77940a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -410,15 +410,18 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_unlock();
}
-/* This function will rescan all of the network devices in the system
- * and add their gids, as needed, to the relevant RoCE devices. */
-int roce_rescan_device(struct ib_device *ib_dev)
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
enum_all_gids_of_dev_cb, NULL);
-
- return 0;
}
+EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
u8 port,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index ab5e1024fea9..8cf15d4a8ac4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,9 +1227,9 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr)
{
int ret;
u16 gid_index;
@@ -1341,10 +1341,11 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_path);
+EXPORT_SYMBOL(ib_init_ah_attr_from_path);
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&query->port->ah_lock, flags);
@@ -1356,6 +1357,15 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->sm_ah = query->port->sm_ah;
spin_unlock_irqrestore(&query->port->ah_lock, flags);
+ /*
+ * Always check if sm_ah has valid dlid assigned,
+ * before querying for class port info
+ */
+ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
+ !rdma_is_valid_unicast_lid(&ah_attr)) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -EAGAIN;
+ }
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 59b2f96d986a..b61dda6b04fc 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -653,12 +653,11 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
return ret;
}
-EXPORT_SYMBOL(ib_security_modify_qp);
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
+static int ib_security_pkey_access(struct ib_device *dev,
+ u8 port_num,
+ u16 pkey_index,
+ void *sec)
{
u64 subnet_prefix;
u16 pkey;
@@ -678,7 +677,6 @@ int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
-EXPORT_SYMBOL(ib_security_pkey_access);
static int ib_mad_agent_security_change(struct notifier_block *nb,
unsigned long event,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index e30d86fa1855..8ae1308eecc7 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- WARN_ON_ONCE(!device->dev.parent);
ret = dev_set_name(class_dev, "%s", device->name);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f2a7f62c2834..8ae636bb09e5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -53,6 +53,8 @@
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Libor Michalek");
MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
MODULE_LICENSE("Dual BSD/GPL");
@@ -104,10 +106,13 @@ struct ib_ucm_event {
enum {
IB_UCM_MAJOR = 231,
IB_UCM_BASE_MINOR = 224,
- IB_UCM_MAX_DEVICES = 32
+ IB_UCM_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UCM_NUM_FIXED_MINOR = 32,
+ IB_UCM_NUM_DYNAMIC_MINOR = IB_UCM_MAX_DEVICES - IB_UCM_NUM_FIXED_MINOR,
};
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
+static dev_t dynamic_ucm_dev;
static void ib_ucm_add_one(struct ib_device *device);
static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
@@ -1130,11 +1135,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
return result;
}
-static unsigned int ib_ucm_poll(struct file *filp,
+static __poll_t ib_ucm_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_ucm_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
@@ -1199,7 +1204,6 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1210,10 +1214,7 @@ static void ib_ucm_release_dev(struct device *dev)
static void ib_ucm_free_dev(struct ib_ucm_device *ucm_dev)
{
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(ucm_dev->devnum, dev_map);
- else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
+ clear_bit(ucm_dev->devnum, dev_map);
}
static const struct file_operations ucm_fops = {
@@ -1235,27 +1236,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
-static dev_t overflow_maj;
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- pr_err("ucm: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
- if (ret >= IB_UCM_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_ucm_add_one(struct ib_device *device)
{
int devnum;
@@ -1274,19 +1254,14 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->dev.release = ib_ucm_release_dev;
devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (devnum >= IB_UCM_MAX_DEVICES) {
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- ucm_dev->devnum = devnum;
- base = devnum + IB_UCM_BASE_DEV;
- set_bit(devnum, dev_map);
- }
+ if (devnum >= IB_UCM_MAX_DEVICES)
+ goto err;
+ ucm_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UCM_NUM_FIXED_MINOR)
+ base = dynamic_ucm_dev + devnum - IB_UCM_NUM_FIXED_MINOR;
+ else
+ base = IB_UCM_BASE_DEV + devnum;
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
@@ -1334,13 +1309,20 @@ static int __init ib_ucm_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR,
"infiniband_cm");
if (ret) {
pr_err("ucm: couldn't register device number\n");
goto error1;
}
+ ret = alloc_chrdev_region(&dynamic_ucm_dev, 0, IB_UCM_NUM_DYNAMIC_MINOR,
+ "infiniband_cm");
+ if (ret) {
+ pr_err("ucm: couldn't register dynamic device number\n");
+ goto err_alloc;
+ }
+
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("ucm: couldn't create abi_version attribute\n");
@@ -1357,7 +1339,9 @@ static int __init ib_ucm_init(void)
error3:
class_remove_file(&cm_class, &class_attr_abi_version.attr);
error2:
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
+err_alloc:
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
error1:
return ret;
}
@@ -1366,9 +1350,8 @@ static void __exit ib_ucm_cleanup(void)
{
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version.attr);
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eb85b546e223..6ba4231f2b07 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,13 +904,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
IB_PATH_BIDIRECTIONAL;
- if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
- ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
- } else {
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
struct sa_path_rec ib;
sa_convert_path_opa_to_ib(&ib, rec);
ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
+
+ } else {
+ ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
}
}
@@ -943,8 +944,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
+ NULL);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.src_addr);
}
@@ -956,8 +957,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, NULL,
+ (union ib_gid *)&addr->sib_addr);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.dst_addr);
}
@@ -1231,9 +1232,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
} else {
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
}
if (ret)
return ret;
@@ -1630,10 +1631,10 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ucma_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 130606c3b07c..9a4e899d94b3 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 4b64dd02e090..78c77962422e 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -55,16 +55,21 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
enum {
- IB_UMAD_MAX_PORTS = 64,
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
IB_UMAD_MAJOR = 231,
- IB_UMAD_MINOR_BASE = 0
+ IB_UMAD_MINOR_BASE = 0,
+ IB_UMAD_NUM_FIXED_MINOR = 64,
+ IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
+ IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
};
/*
@@ -127,9 +132,12 @@ struct ib_umad_packet {
static struct class *umad_class;
-static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ IB_UMAD_NUM_FIXED_MINOR;
+static dev_t dynamic_umad_dev;
+static dev_t dynamic_issm_dev;
-static DEFINE_SPINLOCK(port_lock);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -233,8 +241,7 @@ static void recv_handler(struct ib_mad_agent *agent,
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
- if (agent->device->port_immutable[agent->port_num].core_cap_flags &
- RDMA_CORE_PORT_INTEL_OPA)
+ if (rdma_cap_opa_mad(agent->device, agent->port_num))
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
mad_recv_wc->wc->slid);
else
@@ -246,10 +253,14 @@ static void recv_handler(struct ib_mad_agent *agent,
if (packet->mad.hdr.grh_present) {
struct rdma_ah_attr ah_attr;
const struct ib_global_route *grh;
+ int ret;
- ib_init_ah_from_wc(agent->device, agent->port_num,
- mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
- &ah_attr);
+ ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto err2;
grh = rdma_ah_read_grh(&ah_attr);
packet->mad.hdr.gid_index = grh->sgid_index;
@@ -500,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
+ ah_attr.type = rdma_ah_find_type(agent->device,
file->port->port_num);
rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
@@ -617,12 +628,12 @@ err:
return ret;
}
-static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ib_umad_file *file = filp->private_data;
/* we will always be able to post a MAD send */
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &file->recv_wait, wait);
@@ -1139,54 +1150,26 @@ static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_MAD_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
-static int find_overflow_devnum(struct ib_device *device)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
- if (ret) {
- dev_err(&device->dev,
- "couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
- if (ret >= IB_UMAD_MAX_PORTS)
- return -1;
-
- return ret;
-}
-
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
- dev_t base;
+ dev_t base_umad;
+ dev_t base_issm;
- spin_lock(&port_lock);
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS) {
- spin_unlock(&port_lock);
- devnum = find_overflow_devnum(device);
- if (devnum < 0)
- return -1;
-
- spin_lock(&port_lock);
- port->dev_num = devnum + IB_UMAD_MAX_PORTS;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
+ if (devnum >= IB_UMAD_MAX_PORTS)
+ return -1;
+ port->dev_num = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+ base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+ base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
} else {
- port->dev_num = devnum;
- base = devnum + base_dev;
- set_bit(devnum, dev_map);
+ base_umad = devnum + base_umad_dev;
+ base_issm = devnum + base_issm_dev;
}
- spin_unlock(&port_lock);
port->ib_dev = device;
port->port_num = port_num;
@@ -1198,7 +1181,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->cdev.owner = THIS_MODULE;
cdev_set_parent(&port->cdev, &umad_dev->kobj);
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
- if (cdev_add(&port->cdev, base, 1))
+ if (cdev_add(&port->cdev, base_umad, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dev.parent,
@@ -1212,12 +1195,11 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
cdev_set_parent(&port->sm_cdev, &umad_dev->kobj);
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
- if (cdev_add(&port->sm_cdev, base, 1))
+ if (cdev_add(&port->sm_cdev, base_issm, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dev.parent,
@@ -1244,10 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
return -1;
}
@@ -1281,11 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
-
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(port->dev_num, dev_map);
- else
- clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
+ clear_bit(port->dev_num, dev_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1361,13 +1336,23 @@ static int __init ib_umad_init(void)
{
int ret;
- ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
+ ret = register_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2,
"infiniband_mad");
if (ret) {
pr_err("couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2,
+ "infiniband_mad");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+ dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
+
umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class);
@@ -1395,7 +1380,12 @@ out_class:
class_destroy(umad_class);
out_chrdev:
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
+
+out_alloc:
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
out:
return ret;
@@ -1405,9 +1395,10 @@ static void __exit ib_umad_cleanup(void)
{
ib_unregister_client(&umad_client);
class_destroy(umad_class);
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 840b24096690..256934d1f64f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -340,6 +340,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
uobj->object = pd;
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
+ pd->res.type = RDMA_RESTRACK_PD;
+ rdma_restrack_add(&pd->res);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
@@ -1033,6 +1035,8 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
goto err_cb;
uobj_alloc_commit(&obj->uobject);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
return obj;
@@ -1145,10 +1149,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return 0;
+ return PTR_ERR_OR_ZERO(obj);
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
@@ -1199,7 +1200,7 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.ex.imm_data = wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
@@ -1517,7 +1518,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = device->create_qp(pd, &attr, uhw);
+ qp = _ib_create_qp(device, pd, &attr, uhw);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1530,7 +1531,6 @@ static int create_qp(struct ib_uverbs_file *file,
goto err_cb;
qp->real_qp = qp;
- qp->device = device;
qp->pd = pd;
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 71ff2644e053..d96dc1d17be1 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -243,16 +243,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
size_t ctx_size;
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
- if (hdr->reserved)
- return -EINVAL;
-
object_spec = uverbs_get_object(ib_dev, hdr->object_id);
if (!object_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
method_spec = uverbs_get_method(object_spec, hdr->method_id);
if (!method_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
return -EINVAL;
@@ -305,6 +302,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
+
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (unlikely(err == -EPROTONOSUPPORT)) {
+ WARN_ON_ONCE(err == -EPROTONOSUPPORT);
+ err = -EINVAL;
+ }
out:
if (ctx != (void *)data)
kfree(ctx);
@@ -341,7 +348,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (hdr.reserved) {
- err = -EOPNOTSUPP;
+ err = -EPROTONOSUPPORT;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 381fd9c096ae..5b811bf574d6 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -62,14 +62,16 @@ MODULE_LICENSE("Dual BSD/GPL");
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
- IB_UVERBS_MAX_DEVICES = 32
+ IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UVERBS_NUM_FIXED_MINOR = 32,
+ IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
};
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
+static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
@@ -339,11 +341,11 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
sizeof(struct ib_uverbs_comp_event_desc));
}
-static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
struct file *filp,
struct poll_table_struct *wait)
{
- unsigned int pollflags = 0;
+ __poll_t pollflags = 0;
poll_wait(filp, &ev_queue->poll_wait, wait);
@@ -355,13 +357,13 @@ static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
return pollflags;
}
-static unsigned int ib_uverbs_async_event_poll(struct file *filp,
+static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
return ib_uverbs_event_poll(filp->private_data, filp, wait);
}
-static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
+static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_uverbs_completion_event_file *comp_ev_file =
@@ -1005,34 +1007,6 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
-
-/*
- * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
- * requesting a new major number and doubling the number of max devices we
- * support. It's stupid, but simple.
- */
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
- "infiniband_verbs");
- if (ret) {
- pr_err("user_verbs: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
- if (ret >= IB_UVERBS_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1062,24 +1036,15 @@ static void ib_uverbs_add_one(struct ib_device *device)
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
- spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES) {
- spin_unlock(&map_lock);
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- spin_lock(&map_lock);
- uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- uverbs_dev->devnum = devnum;
- base = devnum + IB_UVERBS_BASE_DEV;
- set_bit(devnum, dev_map);
- }
- spin_unlock(&map_lock);
+ if (devnum >= IB_UVERBS_MAX_DEVICES)
+ goto err;
+ uverbs_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+ base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+ else
+ base = IB_UVERBS_BASE_DEV + devnum;
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
@@ -1124,10 +1089,7 @@ err_class:
err_cdev:
cdev_del(&uverbs_dev->cdev);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
@@ -1219,11 +1181,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
dev_set_drvdata(uverbs_dev->dev, NULL);
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
cdev_del(&uverbs_dev->cdev);
-
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(uverbs_dev->devnum, dev_map);
- else
- clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ clear_bit(uverbs_dev->devnum, dev_map);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1265,13 +1223,22 @@ static int __init ib_uverbs_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR,
"infiniband_verbs");
if (ret) {
pr_err("user_verbs: couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
+ IB_UVERBS_NUM_DYNAMIC_MINOR,
+ "infiniband_verbs");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
@@ -1299,7 +1266,12 @@ out_class:
class_destroy(uverbs_class);
out_chrdev:
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+
+out_alloc:
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
out:
return ret;
@@ -1309,9 +1281,10 @@ static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
class_destroy(uverbs_class);
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
}
module_init(ib_uverbs_init);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index c3ee5d9b336d..b571176babbe 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -35,6 +35,7 @@
#include <rdma/ib_verbs.h>
#include <linux/bug.h>
#include <linux/file.h>
+#include <rdma/restrack.h>
#include "rdma_core.h"
#include "uverbs.h"
@@ -319,6 +320,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
obj->uobject.object = cq;
obj->uobject.user_handle = user_handle;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
if (ret)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e36d27ed4daa..16ebc6372c31 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -124,16 +124,24 @@ EXPORT_SYMBOL(ib_wc_status_msg);
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
- case IB_RATE_2_5_GBPS: return 1;
- case IB_RATE_5_GBPS: return 2;
- case IB_RATE_10_GBPS: return 4;
- case IB_RATE_20_GBPS: return 8;
- case IB_RATE_30_GBPS: return 12;
- case IB_RATE_40_GBPS: return 16;
- case IB_RATE_60_GBPS: return 24;
- case IB_RATE_80_GBPS: return 32;
- case IB_RATE_120_GBPS: return 48;
- default: return -1;
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ case IB_RATE_14_GBPS: return 6;
+ case IB_RATE_56_GBPS: return 22;
+ case IB_RATE_112_GBPS: return 45;
+ case IB_RATE_168_GBPS: return 67;
+ case IB_RATE_25_GBPS: return 10;
+ case IB_RATE_100_GBPS: return 40;
+ case IB_RATE_200_GBPS: return 80;
+ case IB_RATE_300_GBPS: return 120;
+ default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
@@ -141,16 +149,24 @@ EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
- case 1: return IB_RATE_2_5_GBPS;
- case 2: return IB_RATE_5_GBPS;
- case 4: return IB_RATE_10_GBPS;
- case 8: return IB_RATE_20_GBPS;
- case 12: return IB_RATE_30_GBPS;
- case 16: return IB_RATE_40_GBPS;
- case 24: return IB_RATE_60_GBPS;
- case 32: return IB_RATE_80_GBPS;
- case 48: return IB_RATE_120_GBPS;
- default: return IB_RATE_PORT_CURRENT;
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ case 6: return IB_RATE_14_GBPS;
+ case 22: return IB_RATE_56_GBPS;
+ case 45: return IB_RATE_112_GBPS;
+ case 67: return IB_RATE_168_GBPS;
+ case 10: return IB_RATE_25_GBPS;
+ case 40: return IB_RATE_100_GBPS;
+ case 80: return IB_RATE_200_GBPS;
+ case 120: return IB_RATE_300_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
@@ -247,6 +263,10 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
+ pd->res.type = RDMA_RESTRACK_PD;
+ pd->res.kern_name = caller;
+ rdma_restrack_add(&pd->res);
+
if (mr_access_flags) {
struct ib_mr *mr;
@@ -296,6 +316,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
requires the caller to guarantee we can't race here. */
WARN_ON(atomic_read(&pd->usecnt));
+ rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return
an error here. */
ret = pd->device->dealloc_pd(pd);
@@ -421,8 +442,7 @@ static bool find_gid_index(const union ib_gid *gid,
const struct ib_gid_attr *gid_attr,
void *context)
{
- struct find_gid_index_context *ctx =
- (struct find_gid_index_context *)context;
+ struct find_gid_index_context *ctx = context;
if (ctx->gid_type != gid_attr->gid_type)
return false;
@@ -481,8 +501,53 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/* Resolve destination mac address and hop limit for unicast destination
+ * GID entry, considering the source GID entry as well.
+ * ah_attribute must have have valid port_num, sgid_index.
+ */
+static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct ib_gid_attr sgid_attr;
+ struct ib_global_route *grh;
+ int hop_limit = 0xff;
+ union ib_gid sgid;
+ int ret;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+
+ ret = ib_query_gid(device,
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index,
+ &sgid, &sgid_attr);
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ return ret;
+ }
+
+ /* If destination is link local and source GID is RoCEv1,
+ * IP stack is not used.
+ */
+ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
+ sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
+ ah_attr->roce.dmac);
+ goto done;
+ }
+
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ah_attr->roce.dmac,
+ sgid_attr.ndev, &hop_limit);
+done:
+ dev_put(sgid_attr.ndev);
+
+ grh->hop_limit = hop_limit;
+ return ret;
+}
+
/*
- * This function creates ah from the incoming packet.
+ * This function initializes address handle attributes from the incoming packet.
* Incoming packet has dgid of the receiver node on which this code is
* getting executed and, sgid contains the GID of the sender.
*
@@ -490,13 +555,10 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
- * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
- * position of arguments dgid and sgid do not match the order of the
- * parameters.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
u16 gid_index;
@@ -523,57 +585,33 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
if (ret)
return ret;
+ rdma_ah_set_sl(ah_attr, wc->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
if (rdma_protocol_roce(device, port_num)) {
- int if_index = 0;
u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
wc->vlan_id : 0xffff;
- struct net_device *idev;
- struct net_device *resolved_dev;
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
- ah_attr->roce.dmac,
- wc->wc_flags & IB_WC_WITH_VLAN ?
- NULL : &vlan_id,
- &if_index, &hoplimit);
- if (ret) {
- dev_put(idev);
- return ret;
- }
-
- resolved_dev = dev_get_by_index(&init_net, if_index);
- rcu_read_lock();
- if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
- resolved_dev))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(idev);
- dev_put(resolved_dev);
+ ret = get_sgid_index_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type, &gid_index);
if (ret)
return ret;
- ret = get_sgid_index_from_eth(device, port_num, vlan_id,
- &dgid, gid_type, &gid_index);
- if (ret)
- return ret;
- }
-
- rdma_ah_set_dlid(ah_attr, wc->slid);
- rdma_ah_set_sl(ah_attr, wc->sl);
- rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- rdma_ah_set_port_num(ah_attr, port_num);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ } else {
+ rdma_ah_set_dlid(ah_attr, wc->slid);
+ rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (!rdma_cap_eth_ah(device, port_num)) {
+ if (wc->wc_flags & IB_WC_GRH) {
if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
ret = ib_find_cached_gid_by_port(device, &dgid,
IB_GID_TYPE_IB,
@@ -584,18 +622,17 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
} else {
gid_index = 0;
}
- }
-
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ }
+ return 0;
}
- return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_wc);
+EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
@@ -603,7 +640,7 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
struct rdma_ah_attr ah_attr;
int ret;
- ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
+ ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
@@ -850,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = device->create_qp(pd, qp_init_attr, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
if (IS_ERR(qp))
return qp;
@@ -860,7 +897,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- qp->device = device;
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
@@ -890,7 +926,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
atomic_inc(&qp_init_attr->srq->usecnt);
}
- qp->pd = pd;
qp->send_cq = qp_init_attr->send_cq;
qp->xrcd = NULL;
@@ -1269,16 +1304,8 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
return -EINVAL;
- if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
- return 0;
-
grh = rdma_ah_retrieve_grh(ah_attr);
- if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
- ah_attr->roce.dmac);
- return 0;
- }
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
@@ -1290,40 +1317,52 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
(char *)ah_attr->roce.dmac);
}
} else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
-
- ifindex = sgid_attr.ndev->ifindex;
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ }
+ return ret;
+}
- ret =
- rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac,
- NULL, &ifindex, &hop_limit);
+/**
+ * IB core internal function to perform QP attributes modification.
+ */
+static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ int ret;
- dev_put(sgid_attr.ndev);
+ if (rdma_ib_or_roce(qp->device, port)) {
+ if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
+ pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->rq_psn &= 0xffffff;
+ }
- grh->hop_limit = hop_limit;
+ if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
+ pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->sq_psn &= 0xffffff;
+ }
}
-out:
+
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
- * @qp: The QP to modify.
+ * @ib_qp: The QP to modify.
* @attr: On input, specifies the QP attributes to modify. On output,
* the current values of selected QP attributes are returned.
* @attr_mask: A bit-mask used to specify which attributes of the QP
@@ -1332,21 +1371,20 @@ out:
* are being modified.
* It returns 0 on success and returns appropriate error code on error.
*/
-int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ struct ib_qp *qp = ib_qp->real_qp;
int ret;
- if (attr_mask & IB_QP_AV) {
+ if (attr_mask & IB_QP_AV &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
- ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
-
- return ret;
+ return _ib_modify_qp(qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1409,7 +1447,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
+ return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1503,6 +1541,7 @@ int ib_destroy_qp(struct ib_qp *qp)
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
+ rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
@@ -1545,6 +1584,8 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->event_handler = event_handler;
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
}
return cq;
@@ -1563,6 +1604,7 @@ int ib_destroy_cq(struct ib_cq *cq)
if (atomic_read(&cq->usecnt))
return -EBUSY;
+ rdma_restrack_del(&cq->res);
return cq->device->destroy_cq(cq);
}
EXPORT_SYMBOL(ib_destroy_cq);
@@ -1747,7 +1789,7 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
}
EXPORT_SYMBOL(ib_detach_mcast);
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
{
struct ib_xrcd *xrcd;
@@ -1765,7 +1807,7 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
return xrcd;
}
-EXPORT_SYMBOL(ib_alloc_xrcd);
+EXPORT_SYMBOL(__ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
@@ -1790,11 +1832,11 @@ EXPORT_SYMBOL(ib_dealloc_xrcd);
* ib_create_wq - Creates a WQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the WQ.
- * @wq_init_attr: A list of initial attributes required to create the
+ * @wq_attr: A list of initial attributes required to create the
* WQ. If WQ creation succeeds, then the attributes are updated to
* the actual capabilities of the created WQ.
*
- * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * wq_attr->max_wr and wq_attr->max_sge determine
* the requested size of the WQ, and set to the actual values allocated
* on return.
* If ib_create_wq() succeeds, then max_wr and max_sge will always be
@@ -2156,16 +2198,16 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_send_wr swr = {}, *bad_swr;
int ret;
- swr.wr_cqe = &sdrain.cqe;
- sdrain.cqe.done = ib_drain_qp_done;
- init_completion(&sdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
ret = ib_post_send(qp, &swr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
@@ -2190,16 +2232,16 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_recv_wr rwr = {}, *bad_rwr;
int ret;
- rwr.wr_cqe = &rdrain.cqe;
- rdrain.cqe.done = ib_drain_qp_done;
- init_completion(&rdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ecbac91b2e14..ca32057e886f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -43,20 +43,41 @@
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-
-#define BNXT_RE_PAGE_SIZE_4K BIT(12)
-#define BNXT_RE_PAGE_SIZE_8K BIT(13)
-#define BNXT_RE_PAGE_SIZE_64K BIT(16)
-#define BNXT_RE_PAGE_SIZE_2M BIT(21)
-#define BNXT_RE_PAGE_SIZE_8M BIT(23)
-#define BNXT_RE_PAGE_SIZE_1G BIT(30)
-
-#define BNXT_RE_MAX_MR_SIZE BIT(30)
+#define BNXT_RE_PAGE_SHIFT_4K (12)
+#define BNXT_RE_PAGE_SHIFT_8K (13)
+#define BNXT_RE_PAGE_SHIFT_64K (16)
+#define BNXT_RE_PAGE_SHIFT_2M (21)
+#define BNXT_RE_PAGE_SHIFT_8M (23)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+
+#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
+#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
+#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
+#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
+#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
+#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+#define BNXT_RE_PCT_RSVD_FOR_PF 50
#define BNXT_RE_UD_QP_HW_STALL 0x400000
@@ -100,6 +121,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -145,6 +167,9 @@ struct bnxt_re_dev {
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
atomic_t nq_alloc_cnt;
+ u32 is_virtfn;
+ u32 num_vfs;
+ struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7b28219eba46..77416bc61e6e 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -58,16 +58,55 @@
#include "hw_counters.h"
static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP] = "missin_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
+ [BNXT_RE_DUP_REQ] = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -76,6 +115,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ int rc = 0;
if (!port || !stats)
return -EINVAL;
@@ -97,6 +137,91 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_TX_BYTES] =
le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
}
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
+ if (rc)
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ rdev->stats.to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ rdev->stats.seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ rdev->stats.max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ rdev->stats.rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ rdev->stats.missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ rdev->stats.unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ rdev->stats.bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ rdev->stats.local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ rdev->stats.local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ rdev->stats.mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ rdev->stats.remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ rdev->stats.remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ rdev->stats.remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ rdev->stats.dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ rdev->stats.res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ rdev->stats.res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ rdev->stats.res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ rdev->stats.res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ rdev->stats.res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ rdev->stats.res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ rdev->stats.res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ rdev->stats.res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ rdev->stats.res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ rdev->stats.res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ rdev->stats.res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ rdev->stats.res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ rdev->stats.res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ rdev->stats.res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ rdev->stats.res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ rdev->stats.res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ rdev->stats.res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ rdev->stats.res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ rdev->stats.res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ rdev->stats.res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ rdev->stats.res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ rdev->stats.res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ rdev->stats.res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ rdev->stats.res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ rdev->stats.res_rx_pci_err;
+ }
+
return ARRAY_SIZE(bnxt_re_stat_name);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index be0dc0093b58..a01a922717d5 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,45 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_DUP_REQ,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2032db7db766..9b8fa77b8831 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -141,12 +141,13 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
-
- ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+ min(sizeof(dev_attr->fw_ver),
+ sizeof(ib_attr->fw_ver)));
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -247,8 +248,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
IB_PORT_VENDOR_CLASS_SUP |
IB_PORT_IP_BASED_GIDS;
- /* Max MSG size set to 2G for now */
- port_attr->max_msg_sz = 0x80000000;
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
port_attr->qkey_viol_cntr = 0;
port_attr->pkey_tbl_len = dev_attr->max_pkey;
@@ -281,6 +281,15 @@ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+ rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+}
+
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey)
{
@@ -532,7 +541,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
pbl_tbl = dma_addr;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false);
+ BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
goto fail;
@@ -1018,6 +1027,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp;
struct bnxt_re_cq *cq;
+ struct bnxt_re_srq *srq;
int rc, entries;
if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
@@ -1073,9 +1083,15 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
}
if (qp_init_attr->srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not supported");
- rc = -ENOTSUPP;
- goto fail;
+ srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
+ ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ rc = -EINVAL;
+ goto fail;
+ }
+ qp->qplib_qp.srq = &srq->qplib_srq;
+ qp->qplib_qp.rq.max_wqe = 0;
} else {
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
@@ -1280,6 +1296,237 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
}
+/* Shared Receive Queues */
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc;
+
+ if (qplib_srq->cq)
+ nq = qplib_srq->cq->nq;
+ rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
+ return rc;
+ }
+
+ if (srq->umem && !IS_ERR(srq->umem))
+ ib_umem_release(srq->umem);
+ kfree(srq);
+ atomic_dec(&rdev->srq_count);
+ if (nq)
+ nq->budget--;
+ return 0;
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq_req ureq;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct ib_umem *umem;
+ int bytes = 0;
+ struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct bnxt_re_ucontext *cntx = container_of(context,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+
+ bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(context, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ srq->umem = umem;
+ qplib_srq->nmap = umem->nmap;
+ qplib_srq->sglist = umem->sg_head.sgl;
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+
+ return 0;
+}
+
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_srq *srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc, entries;
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+
+ srq->qplib_srq.max_wqe = entries;
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
+ nq = &rdev->nq[0];
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp;
+
+ resp.srqid = srq->qplib_srq.id;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ bnxt_qplib_destroy_srq(&rdev->qplib_res,
+ &srq->qplib_srq);
+ goto exit;
+ }
+ }
+ if (nq)
+ nq->budget++;
+ atomic_inc(&rdev->srq_count);
+
+ return &srq->ib_srq;
+
+fail:
+ if (udata && srq->umem && !IS_ERR(srq->umem)) {
+ ib_umem_release(srq->umem);
+ srq->umem = NULL;
+ }
+
+ kfree(srq);
+exit:
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ break;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ return rc;
+ }
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+ break;
+ default:
+ dev_err(rdev_to_dev(rdev),
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_srq tsrq;
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ /* Get live SRQ attr */
+ tsrq.qplib_srq.id = srq->qplib_srq.id;
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = tsrq.qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0, payload_sz = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -2295,10 +2542,14 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
{
- struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_dev *rdev = cq->rdev;
int rc;
- struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
+ struct bnxt_re_cq *cq;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_re_dev *rdev;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2308,12 +2559,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
if (!IS_ERR_OR_NULL(cq->umem))
ib_umem_release(cq->umem);
- if (cq) {
- kfree(cq->cql);
- kfree(cq);
- }
atomic_dec(&rdev->cq_count);
nq->budget--;
+ kfree(cq->cql);
+ kfree(cq);
+
return 0;
}
@@ -3078,7 +3328,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3104,10 +3355,8 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
+ if (rc)
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
- return rc;
- }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3170,7 +3419,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
- goto fail;
+ goto bail;
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->ib_mr.lkey;
@@ -3192,9 +3441,10 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
return &mr->ib_mr;
fail_mr:
- bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-fail:
kfree(mr->pages);
+fail:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+bail:
kfree(mr);
return ERR_PTR(rc);
}
@@ -3248,6 +3498,46 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
+static int bnxt_re_page_size_ok(int page_shift)
+{
+ switch (page_shift) {
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
+ int page_shift)
+{
+ u64 *pbl_tbl = pbl_tbl_orig;
+ u64 paddr;
+ u64 page_mask = (1ULL << page_shift) - 1;
+ int i, pages;
+ struct scatterlist *sg;
+ int entry;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (i = 0; i < pages; i++) {
+ paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
+ if (pbl_tbl == pbl_tbl_orig)
+ *pbl_tbl++ = paddr & ~page_mask;
+ else if ((paddr & page_mask) == 0)
+ *pbl_tbl++ = paddr;
+ }
+ }
+ return pbl_tbl - pbl_tbl_orig;
+}
+
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3257,10 +3547,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl, *pbl_tbl_orig;
- int i, umem_pgs, pages, rc;
- struct scatterlist *sg;
- int entry;
+ u64 *pbl_tbl = NULL;
+ int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
@@ -3277,64 +3565,68 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
umem = ib_umem_get(ib_pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
- goto free_mr;
+ goto free_mrw;
}
mr->ib_umem = umem;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
- goto release_umem;
- }
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
dev_err(rdev_to_dev(rdev), "umem is invalid!");
rc = -EINVAL;
- goto free_mrw;
+ goto free_umem;
}
mr->qplib_mr.total_size = length;
pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
if (!pbl_tbl) {
- rc = -EINVAL;
- goto free_mrw;
+ rc = -ENOMEM;
+ goto free_umem;
}
- pbl_tbl_orig = pbl_tbl;
- if (umem->hugetlb) {
- dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
+ page_shift = umem->page_shift;
+
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
- if (umem->page_shift != PAGE_SHIFT) {
- dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
- rc = -EFAULT;
+ if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+ dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ rc = -EINVAL;
goto fail;
}
- /* Map umem buf ptrs to the PBL */
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> umem->page_shift;
- for (i = 0; i < pages; i++, pbl_tbl++)
- *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
+ if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
+ page_shift = BNXT_RE_PAGE_SHIFT_2M;
+ dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
+ 1 << page_shift);
}
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
- umem_pgs, false);
+
+ /* Map umem buf ptrs to the PBL */
+ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
+ umem_pgs, false, 1 << page_shift);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register user MR");
goto fail;
}
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
@@ -3342,11 +3634,11 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return &mr->ib_mr;
fail:
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
+free_umem:
+ ib_umem_release(umem);
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-release_umem:
- ib_umem_release(umem);
free_mr:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 1df11ed272ea..423ebe012f95 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -68,6 +68,15 @@ struct bnxt_re_ah {
struct bnxt_qplib_ah qplib_ah;
};
+struct bnxt_re_srq {
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct ib_srq ib_srq;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
+};
+
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
@@ -143,6 +152,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
@@ -164,6 +174,16 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int bnxt_re_destroy_srq(struct ib_srq *srq);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index aafc19aa5de1..508d00a5a106 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,79 @@ static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+/* SR-IOV helper functions */
+
+static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
+{
+ struct bnxt *bp;
+
+ bp = netdev_priv(rdev->en_dev->net);
+ if (BNXT_VF(bp))
+ rdev->is_virtfn = 1;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
+ u32 i;
+ u32 vf_pct;
+ u32 num_vfs;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+
+ rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ dev_attr->max_qp);
+
+ rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ /* Use max_mr from fw since max_mrw does not get set */
+ rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
+ dev_attr->max_mr);
+ rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ dev_attr->max_srq);
+ rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
+ dev_attr->max_cq);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+
+ if (rdev->num_vfs) {
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ num_vfs = 100 * rdev->num_vfs;
+ vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
+ vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
+ vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF
+ * and divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware
+ * severely restricts the number of MRs, then let PF have
+ * half and divide the rest among VFs, as for the other
+ * resource types.
+ */
+ if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
+ vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
+ else
+ vf_mrws = (rdev->qplib_ctx.mrw_count -
+ BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
+ vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ }
+ rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
+ rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
+ rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
+ rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
+ rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+}
+
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
@@ -91,6 +164,15 @@ static void bnxt_re_start(void *p)
static void bnxt_re_sriov_config(void *p, int num_vfs)
{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ rdev->num_vfs = num_vfs;
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
}
static void bnxt_re_shutdown(void *p)
@@ -417,7 +499,7 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-EINVAL);
if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_dbg(&pdev->dev,
+ dev_info(&pdev->dev,
"%s: probe error: RoCE is not supported on this device",
ROCE_DRV_MODULE_NAME);
return ERR_PTR(-ENODEV);
@@ -490,6 +572,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_port = bnxt_re_query_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
+ ibdev->get_dev_fw_str = bnxt_re_query_fw_str;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
ibdev->get_netdev = bnxt_re_get_netdev;
@@ -505,6 +588,12 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_ah = bnxt_re_query_ah;
ibdev->destroy_ah = bnxt_re_destroy_ah;
+ ibdev->create_srq = bnxt_re_create_srq;
+ ibdev->modify_srq = bnxt_re_modify_srq;
+ ibdev->query_srq = bnxt_re_query_srq;
+ ibdev->destroy_srq = bnxt_re_destroy_srq;
+ ibdev->post_srq_recv = bnxt_re_post_srq_recv;
+
ibdev->create_qp = bnxt_re_create_qp;
ibdev->modify_qp = bnxt_re_modify_qp;
ibdev->query_qp = bnxt_re_query_qp;
@@ -541,14 +630,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->dev_attr.fw_ver);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -558,12 +639,10 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(fw_rev, 0444, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
static struct device_attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_rev,
&dev_attr_hca_type
};
@@ -616,10 +695,10 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return rdev;
}
-static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
- struct creq_func_event *aeqe)
+static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ *unaffi_async)
{
- switch (aeqe->event) {
+ switch (unaffi_async->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
@@ -648,6 +727,93 @@ static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
+static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+{
+ struct ib_event event;
+
+ memset(&event, 0, sizeof(event));
+ if (qp->qplib_qp.srq) {
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ }
+
+ if (event.device && qp->ib_qp.event_handler)
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
+ struct bnxt_qplib_qp *lib_qp = obj;
+ struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ }
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_qp_event *affi_async;
+ struct creq_func_event *unaffi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+ int rc = 0;
+
+ if (!srq) {
+ dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ rc = -EINVAL;
+ goto done;
+ }
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ib_srq;
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ else
+ ib_event.event = IB_EVENT_SRQ_ERR;
+
+ if (srq->ib_srq.event_handler) {
+ /* Lock event_handler? */
+ (*srq->ib_srq.event_handler)(&ib_event,
+ srq->ib_srq.srq_context);
+ }
+done:
+ return rc;
+}
+
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *handle)
{
@@ -690,7 +856,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler, NULL);
+ &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
if (rc) {
dev_err(rdev_to_dev(rdev),
@@ -734,7 +901,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto fail;
@@ -1035,19 +1203,6 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
}
}
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
-{
- u32 i;
-
- rdev->qplib_ctx.qpc_count = BNXT_RE_MAX_QPC_COUNT;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT;
- rdev->qplib_ctx.srqc_count = BNXT_RE_MAX_SRQC_COUNT;
- rdev->qplib_ctx.cq_count = BNXT_RE_MAX_CQ_COUNT;
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-}
-
/* worker thread for polling periodic events. Now used for QoS programming*/
static void bnxt_re_worker(struct work_struct *work)
{
@@ -1070,6 +1225,9 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ /* Check whether VF or PF */
+ bnxt_re_get_sriov_func_type(rdev);
+
rc = bnxt_re_request_msix(rdev);
if (rc) {
pr_err("Failed to get MSI-X vectors: %#x\n", rc);
@@ -1101,16 +1259,18 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
(rdev->en_dev->pdev, &rdev->rcfw,
rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
- 0, &bnxt_re_aeq_handler);
+ rdev->is_virtfn, &bnxt_re_aeq_handler);
if (rc) {
pr_err("Failed to enable RCFW channel: %#x\n", rc);
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- bnxt_re_set_resource_limits(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
if (rc) {
@@ -1125,7 +1285,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto free_ctx;
}
- rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 0);
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
+ rdev->is_virtfn);
if (rc) {
pr_err("Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
@@ -1144,13 +1305,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail;
}
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- pr_info("RoCE priority not yet configured\n");
+ if (!rdev->is_virtfn) {
+ rc = bnxt_re_setup_qos(rdev);
+ if (rc)
+ pr_info("RoCE priority not yet configured\n");
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
+ set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ }
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
@@ -1176,6 +1339,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1400,7 +1564,7 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev, *next;
LIST_HEAD(to_be_deleted);
mutex_lock(&bnxt_re_dev_lock);
@@ -1408,8 +1572,11 @@ static void __exit bnxt_re_mod_exit(void)
if (!list_empty(&bnxt_re_dev_list))
list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
mutex_unlock(&bnxt_re_dev_lock);
-
- list_for_each_entry(rdev, &to_be_deleted, list) {
+ /*
+ * Cleanup the devices in reverse order so that the VF device
+ * cleanup is done before PF cleanup
+ */
+ list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 61764f7aa79b..8b5f11ac0e42 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -52,6 +52,7 @@
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -278,6 +279,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct nq_base *nqe, **nq_ptr;
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
+ int num_srqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
int budget = nq->budget;
@@ -320,6 +322,26 @@ static void bnxt_qplib_service_nq(unsigned long data)
spin_unlock_bh(&cq->compl_lock);
break;
}
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
+ DBR_DBR_TYPE_SRQ_ARMENA);
+ if (!nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ num_srqne_processed++;
+ else
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: SRQ event 0x%x not handled",
+ nqsrqe->event);
+ break;
+ }
case NQ_BASE_TYPE_DBQ_EVENT:
break;
default:
@@ -384,17 +406,19 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *, u8 event))
+ struct bnxt_qplib_srq *,
+ u8 event))
{
resource_size_t nq_base;
int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
+ if (cqn_handler)
+ nq->cqn_handler = cqn_handler;
- nq->cqn_handler = cqn_handler;
-
- nq->srqn_handler = srqn_handler;
+ if (srqn_handler)
+ nq->srqn_handler = srqn_handler;
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
@@ -410,7 +434,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
- bnxt_qplib_disable_nq(nq);
goto fail;
}
@@ -469,6 +492,238 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
return 0;
}
+/* SRQ */
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct dbr_dbr db_msg = { 0 };
+ void __iomem *db;
+ u32 sw_prod = 0;
+
+ /* Ring DB */
+ sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
+ HWQ_CMP(srq_hwq->prod, srq_hwq);
+ db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
+ DBR_DBR_INDEX_MASK);
+ db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
+ DBR_DBR_XID_MASK) | arm_type);
+ db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
+ srq->dbr_base : srq->dpi->dbr;
+ wmb(); /* barrier before db ring */
+ __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
+}
+
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_destroy_srq req;
+ struct creq_destroy_srq_resp resp;
+ u16 cmd_flags = 0;
+ int rc;
+
+ RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+ return 0;
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_create_srq req;
+ struct creq_create_srq_resp resp;
+ struct bnxt_qplib_pbl *pbl;
+ u16 cmd_flags = 0;
+ int rc, idx;
+
+ srq->hwq.max_elements = srq->max_wqe;
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
+ srq->nmap, &srq->hwq.max_elements,
+ BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
+ PAGE_SIZE, HWQ_TYPE_QUEUE);
+ if (rc)
+ goto exit;
+
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq)
+ goto fail;
+
+ RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64(srq);
+
+ req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
+ pbl = &srq->hwq.pbl[PBL_LVL_0];
+ req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
+ CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT) |
+ (pbl->pg_size == ROCE_PG_SIZE_4K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
+ pbl->pg_size == ROCE_PG_SIZE_8K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
+ pbl->pg_size == ROCE_PG_SIZE_64K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
+ pbl->pg_size == ROCE_PG_SIZE_2M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
+ pbl->pg_size == ROCE_PG_SIZE_8M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
+ pbl->pg_size == ROCE_PG_SIZE_1G ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ if (srq->threshold)
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ u32 sw_prod, sw_cons, count = 0;
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ if (count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ } else {
+ /* Deferred arming */
+ srq->arm_req = true;
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_query_srq req;
+ struct creq_query_srq_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_srq_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ /* Configure the request */
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe, **srqe_ptr;
+ struct sq_sge *hw_sge;
+ u32 sw_prod, sw_cons, count = 0;
+ int i, rc = 0, next;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
+ srq->id);
+ rc = -EINVAL;
+ spin_unlock(&srq_hwq->lock);
+ goto done;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
+ srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
+ memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+ srq_hwq->prod++;
+
+ spin_lock(&srq_hwq->lock);
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ /* retaining srq_hwq->cons for this logic
+ * actually the lock is only required to
+ * read srq_hwq->cons.
+ */
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
+ if (srq->arm_req == true && count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ }
+done:
+ return rc;
+}
+
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
@@ -737,6 +992,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
pbl->pg_size == ROCE_PG_SIZE_1G ?
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ } else {
+ /* SRQ */
+ if (qp->srq) {
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
}
if (qp->rcq)
@@ -2068,6 +2329,16 @@ done:
return rc;
}
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ srq->hwq.cons++; /* Support for SRQE counter */
+ spin_unlock(&srq->hwq.lock);
+}
+
static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct cq_res_rc *hwcqe,
struct bnxt_qplib_cqe **pcqe,
@@ -2075,6 +2346,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2102,27 +2374,46 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
@@ -2136,6 +2427,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2166,27 +2458,48 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
hwcqe->src_qp_high_srq_or_rq_wr_id) &
CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx %#x exceeded RQ max %#x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
return rc;
@@ -2218,6 +2531,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2256,26 +2570,49 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index c582d4ec8173..211b27a8f9e2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -39,6 +39,27 @@
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ void __iomem *dbr_base;
+ u64 srq_handle;
+ u32 id;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ struct scatterlist *sglist;
+ int start_idx;
+ int last_idx;
+ u32 nmap;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
+};
+
struct bnxt_qplib_sge {
u64 addr;
u32 lkey;
@@ -79,6 +100,7 @@ static inline u32 get_psne_idx(u32 val)
struct bnxt_qplib_swq {
u64 wr_id;
+ int next_idx;
u8 type;
u8 flags;
u32 start_psn;
@@ -404,29 +426,27 @@ struct bnxt_qplib_cq {
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u16 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)
- (struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)
- (struct bnxt_qplib_nq *nq,
- void *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+
+ int vector;
+ cpumask_t mask;
+ int budget;
+ bool requested;
+ struct tasklet_struct worker;
+ struct bnxt_qplib_hwq hwq;
+
+ u16 bar_reg;
+ u16 bar_reg_off;
+ u16 ring_id;
+ void __iomem *bar_reg_iomem;
+
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+ int (*srqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq,
+ u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
};
struct bnxt_qplib_nq_work {
@@ -441,8 +461,18 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *srq,
+ struct bnxt_qplib_srq *srq,
u8 event));
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bb5574adf195..8329ec6a7946 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -93,7 +93,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
- opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
@@ -615,7 +616,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *))
+ void *, void *))
{
resource_size_t res_base;
struct cmdq_init init;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2946a7cfae82..6bee6e3636ea 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -167,7 +167,7 @@ struct bnxt_qplib_rcfw {
#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *);
+ void *, void *);
u32 seq_num;
/* Bar region info */
@@ -199,9 +199,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)
- (struct bnxt_qplib_rcfw *,
- struct creq_func_event *));
+ int (*aeq_handler)(struct bnxt_qplib_rcfw *,
+ void *aeqe, void *obj));
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 4e101704e801..ad37d54affcc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -104,13 +104,12 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
- memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9543ce51a28a..c015c1861351 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -64,8 +64,28 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
+static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ char *fw_ver)
+{
+ struct cmdq_query_version req;
+ struct creq_query_version_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return;
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+ struct bnxt_qplib_dev_attr *attr, bool vf)
{
struct cmdq_query_func req;
struct creq_query_func_resp resp;
@@ -95,7 +115,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- attr->max_qp += 1;
+ if (!vf)
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -133,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
attr->max_sgid = le32_to_cpu(sb->max_gid);
- strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
@@ -150,6 +171,38 @@ bail:
return rc;
}
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct cmdq_set_func_resources req;
+ struct creq_set_func_resources_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_count);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_count);
+ req.number_of_cq = cpu_to_le32(ctx->cq_count);
+
+ req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
+ req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
+ req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
+ req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp,
+ NULL, 0);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set function resources");
+ }
+ return rc;
+}
+
/* SGID */
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
@@ -604,7 +657,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block)
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
@@ -615,6 +668,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u32 pg_size;
if (num_pbls) {
+ /* Allocate memory for the non-leaf pages to store buf ptrs.
+ * Non-leaf pages always uses system PAGE_SIZE
+ */
pg_ptrs = roundup_pow_of_two(num_pbls);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
@@ -632,6 +688,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
mr->hwq.max_elements = pages;
+ /* Use system PAGE_SIZE */
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
@@ -652,18 +709,22 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Configure the request */
if (mr->hwq.level == PBL_LVL_MAX) {
+ /* No PBL provided, just use system PAGE_SIZE */
level = 0;
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
level = mr->hwq.level + 1;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
- pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
}
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
((ilog2(pg_size) <<
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
req.access = (mr->flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
@@ -729,3 +790,73 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
0);
return 0;
}
+
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats)
+{
+ struct cmdq_query_roce_stats req;
+ struct creq_query_roce_stats_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_roce_stats_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ return -ENOMEM;
+ }
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 11322582f5e4..9d3e8b994945 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -45,7 +45,8 @@
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
struct bnxt_qplib_dev_attr {
- char fw_ver[32];
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
@@ -127,6 +128,85 @@ struct bnxt_qplib_frpl {
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
@@ -147,7 +227,10 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+ struct bnxt_qplib_dev_attr *attr, bool vf);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
@@ -155,7 +238,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block);
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
@@ -164,4 +247,6 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats);
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index c3cba6063a03..2d7ea096a247 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -954,6 +954,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1383,8 +1384,20 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
#define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
#define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
u8 access;
#define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
@@ -1392,7 +1405,21 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
#define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
#define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
- __le16 unused_1;
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
__le32 key;
__le64 pbl;
__le64 va;
@@ -1799,6 +1826,16 @@ struct cmdq_set_func_resources {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
};
/* Read hardware resource context command (24 bytes) */
@@ -2013,6 +2050,20 @@ struct creq_modify_qp_resp {
__le16 reserved48[3];
};
+/* cmdq_query_roce_stats (size:128b/16B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
/* Query QP command response (16 bytes) */
struct creq_query_qp_resp {
u8 type;
@@ -2783,6 +2834,80 @@ struct creq_query_cc_resp_sb {
__le64 reserved64_1;
};
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:2624b/328B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 21db3b48a617..4cf17c650c36 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -257,8 +257,8 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
ep->emss);
}
@@ -2733,9 +2733,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -3567,8 +3566,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_info("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
@@ -4097,9 +4096,15 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
@@ -4201,8 +4206,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index af77d128d242..7a9d0de89d6a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(c4iw_wr_log_size_order,
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-struct workqueue_struct *reg_workq;
+static struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -108,19 +108,19 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
(wq->rdev->wr_log_size - 1);
le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
- getnstimeofday(&le.poll_host_ts);
+ le.poll_host_time = ktime_get();
le.valid = 1;
le.cqe_sge_ts = CQE_TS(cqe);
if (SQ_TYPE(cqe)) {
le.qid = wq->sq.qid;
le.opcode = CQE_OPCODE(cqe);
- le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
le.wr_id = CQE_WRID_SQ_IDX(cqe);
} else {
le.qid = wq->rq.qid;
le.opcode = FW_RI_RECEIVE;
- le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
le.wr_id = CQE_WRID_MSN(cqe);
}
@@ -130,9 +130,9 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
static int wr_log_show(struct seq_file *seq, void *v)
{
struct c4iw_dev *dev = seq->private;
- struct timespec prev_ts = {0, 0};
+ ktime_t prev_time;
struct wr_log_entry *lep;
- int prev_ts_set = 0;
+ int prev_time_set = 0;
int idx, end;
#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
@@ -145,33 +145,29 @@ static int wr_log_show(struct seq_file *seq, void *v)
lep = &dev->rdev.wr_log[idx];
while (idx != end) {
if (lep->valid) {
- if (!prev_ts_set) {
- prev_ts_set = 1;
- prev_ts = lep->poll_host_ts;
+ if (!prev_time_set) {
+ prev_time_set = 1;
+ prev_time = lep->poll_host_time;
}
- seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
- "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ seq_printf(seq, "%04u: nsec %llu qid %u opcode "
+ "%u %s 0x%x host_wr_delta nsec %llu "
"post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
"poll_sge_ts 0x%llx post_poll_delta_ns %llu "
"cqe_poll_delta_ns %llu\n",
idx,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ prev_time)),
lep->qid, lep->opcode,
lep->opcode == FW_RI_RECEIVE ?
"msn" : "wrid",
lep->wr_id,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ lep->post_host_time)),
lep->post_sge_ts, lep->cqe_sge_ts,
lep->poll_sge_ts,
ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
- prev_ts = lep->poll_host_ts;
+ prev_time = lep->poll_host_time;
}
idx++;
if (idx > (dev->rdev.wr_log_size - 1))
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index a252d5c40ae3..3e9d8b277ab9 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -236,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_debug("unknown cqid 0x%x\n", qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 65dd3726ca02..cc929002c05e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -153,8 +153,8 @@ struct c4iw_hw_queue {
};
struct wr_log_entry {
- struct timespec post_host_ts;
- struct timespec poll_host_ts;
+ ktime_t post_host_time;
+ ktime_t poll_host_time;
u64 post_sge_ts;
u64 cqe_sge_ts;
u64 poll_sge_ts;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d5c92fc520d6..de77b6027d69 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1042,7 +1042,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(&swsqe->host_ts);
+ swsqe->host_time = ktime_get();
}
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1117,8 +1117,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(
- &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
+ ktime_get();
}
wqe->recv.opcode = FW_RI_RECV_WR;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 79e8ee12c391..8369c7c8de83 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -277,7 +277,7 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
@@ -318,7 +318,7 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 4f057e8ffe50..6660f920f42e 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6518,11 +6518,12 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /*
- * Take the 8051 out of reset, wait until 8051 is ready, and set host
- * version bit.
- */
- release_and_wait_ready_8051_firmware(dd);
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -8564,23 +8565,27 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
- * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
- * will still continue executing.
- *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
- lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+ mutex_lock(&dd->dc8051_lock);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8681,29 +8686,6 @@ static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
- return return_code;
-}
-
-/*
- * Returns:
- * < 0 = Linux error, not able to get access
- * > 0 = 8051 command RETURN_CODE
- */
-static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
- return_code = _do_8051_command(dd, type, in_data, out_data);
-
-fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8713,17 +8695,16 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
- lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8732,18 +8713,6 @@ static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- return_code = _load_8051_config(dd, field_id, lane_id, config_data);
- mutex_unlock(&dd->dc8051_lock);
-
- return return_code;
-}
-
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8859,14 +8828,13 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
- lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -9270,6 +9238,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ goto set_local_link_attributes_fail;
+ }
+
/*
* DC supports continuous updates.
*/
@@ -14944,9 +14920,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
num_vls > HFI1_MAX_VLS_SUPPORTED) {
- hfi1_early_err(&pdev->dev,
- "Invalid num_vls %u, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
+ dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
num_vls = HFI1_MAX_VLS_SUPPORTED;
}
ppd->vls_supported = num_vls;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 133e313feca4..21fca8ec5076 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -508,6 +508,7 @@
#define DOWN_REMOTE_REASON_SHIFT 16
#define DOWN_REMOTE_REASON_MASK 0xff
+#define HOST_INTERFACE_VERSION 1
#define HOST_INTERFACE_VERSION_SHIFT 16
#define HOST_INTERFACE_VERSION_MASK 0xff
@@ -713,7 +714,6 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 4f65ac671044..067b29f35f21 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -159,22 +159,6 @@ static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
}
-const char *get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
- return iname;
-}
-
-const char *get_card_name(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return get_unit_name(dd->unit);
-}
-
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
{
struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c38b06..bd6f03cc5ee0 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -74,7 +74,7 @@
static int hfi1_file_open(struct inode *inode, struct file *fp);
static int hfi1_file_close(struct inode *inode, struct file *fp);
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
@@ -102,8 +102,8 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo,
struct hfi1_ctxtdata **cd);
static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
-static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
-static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
@@ -607,10 +607,10 @@ static int vma_fault(struct vm_fault *vmf)
return 0;
}
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned pollflag;
+ __poll_t pollflag;
uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
if (!uctxt)
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
}
if (ret) {
- hfi1_rcd_put(fd->uctxt);
- fd->uctxt = NULL;
spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+ hfi1_rcd_put(fd->uctxt);
+ fd->uctxt = NULL;
}
return ret;
@@ -1425,13 +1425,13 @@ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
return ret;
}
-static unsigned int poll_urgent(struct file *fp,
+static __poll_t poll_urgent(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
@@ -1448,13 +1448,13 @@ static unsigned int poll_urgent(struct file *fp,
return pollflag;
}
-static unsigned int poll_next(struct file *fp,
+static __poll_t poll_next(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 98868df78a7e..2b57ba70ddd6 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -68,7 +68,6 @@
#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
-#define HOST_INTERFACE_VERSION 1
MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
@@ -976,46 +975,6 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
- * Clear all reset bits, releasing the 8051.
- * Wait for firmware to be ready to accept host requests.
- * Then, set host version bit.
- *
- * This function executes even if the 8051 is in reset mode when
- * dd->dc_shutdown == 1.
- *
- * Expects dd->dc8051_lock to be held.
- */
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
-{
- int ret;
-
- lockdep_assert_held(&dd->dc8051_lock);
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
- /*
- * Wait for firmware to be ready to accept host
- * requests.
- */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
- dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
- get_firmware_state(dd));
- return ret;
- }
-
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1080,22 +1039,31 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
/*
- * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
- * Then, set host version bit.
*/
- mutex_lock(&dd->dc8051_lock);
- ret = release_and_wait_ready_8051_firmware(dd);
- mutex_unlock(&dd->dc8051_lock);
- if (ret)
- return ret;
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 8ce9118d4a7f..b42c22292597 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1623,7 +1623,7 @@ static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
* the 'error info' for this failure.
*/
static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
- u16 slid)
+ u32 slid)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -1971,8 +1971,6 @@ int get_platform_config_field(struct hfi1_devdata *dd,
table_type, int table_index, int field_index,
u32 *data, u32 len);
-const char *get_unit_name(int unit);
-const char *get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -2122,39 +2120,42 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err_ratelimited(dd, fmt, ...) \
dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_warn_ratelimited(dd, fmt, ...) \
dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_info(dd, fmt, ...) \
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_info_ratelimited(dd, fmt, ...) \
dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_dbg(dd, fmt, ...) \
dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
- get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 8e3b3e7d829a..9b128268fb28 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1272,6 +1272,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+
/*
* Initialize all locks for the device. This needs to be as early as
* possible so locks are usable.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index cf8dba34fe30..34547a48a445 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4348,11 +4348,7 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- /*
- * On OPA devices it is okay to lose the upper 16 bits of LID as this
- * information is obtained elsewhere. Mask off the upper 16 bits.
- */
- ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
+ ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
return 1;
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index e7b3ce123da6..70aceefe14d5 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -77,6 +77,7 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = mmu_notifier_range_start,
};
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4b01ccd895b4..5507910e8b8a 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -556,6 +556,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
struct sdma_engine *sde;
struct send_context *send_context;
struct rvt_ack_entry *e = NULL;
+ struct rvt_srq *srq = qp->ibqp.srq ?
+ ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
@@ -563,7 +565,7 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
if (qp->s_ack_queue)
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -610,7 +612,11 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
/* ack queue information */
e ? e->opcode : 0,
e ? e->psn : 0,
- e ? e->lpsn : 0);
+ e ? e->lpsn : 0,
+ qp->r_min_rnr_timer,
+ srq ? "SRQ" : "RQ",
+ srq ? srq->rq.size : qp->r_rq.size
+ );
}
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index af5f7936f7e5..14cc212a21c7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
@@ -843,11 +841,11 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
/* Convert dwords to flits */
len = (*hwords + *nwords) >> 1;
- hfi1_make_16b_hdr(hdr,
- ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr),
+ hfi1_make_16b_hdr(hdr, ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1)),
opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
- 16B),
- len, pkey, becn, 0, l4, sc5);
+ 16B), len, pkey, becn, 0, l4, sc5);
bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
bth0 |= extra_bytes << 20;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
}
/* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn);
return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e331ea..13b994738f41 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89b5fc8..61c130dbed10 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask];
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee04821..132b63e787d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091eccca..deb184574395 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a38785e224cc..b8776a362a91 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1486,7 +1486,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
- mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
/*
* sm_lid of 0xFFFF needs special handling so that it can
@@ -1844,7 +1844,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
- size_t lcpysz = IB_DEVICE_NAME_MAX;
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
@@ -1872,8 +1871,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
*/
if (!ib_hfi1_sys_image_guid)
ib_hfi1_sys_image_guid = ibdev->node_guid;
- lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
- strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dev.parent = &dd->pcidev->dev;
@@ -1893,7 +1890,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index ff426a625e13..97bf2cd1cacb 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 1085cb249bc1..9ebe839d8b24 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -103,6 +103,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
context->out_param = out_param;
complete(&context->done);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_event);
/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index b1c94223c28b..9549ae51a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -88,6 +88,16 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+ HNS_ROCE_CMD_QUERY_AEQC = 0x82,
+ HNS_ROCE_CMD_DESTROY_AEQC = 0x83,
+ HNS_ROCE_CMD_CREATE_CEQC = 0x90,
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
};
enum {
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 7ecb7a4147a8..dd67fafd0c40 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -376,6 +376,12 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
+#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
+
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
@@ -385,4 +391,9 @@
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
+#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
+#define ROCEE_VF_ABN_INT_ST_REG 0x13004
+#define ROCEE_VF_ABN_INT_EN_REG 0x13008
+#define ROCEE_VF_EVENT_INT_EN_REG 0x1300c
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 2111b57a3489..bccc9b54c9ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -196,15 +196,14 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- if (hr_dev->eq_table.eq) {
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
- }
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -460,6 +459,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
++cq->arm_sn;
cq->comp(cq);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_completion);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
@@ -482,6 +482,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_event);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b154ce40cded..42c3b5a2d441 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,12 +62,16 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_MAX_IRQ_NUM 34
+#define HNS_ROCE_MAX_IRQ_NUM 128
-#define HNS_ROCE_COMP_VEC_NUM 32
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
-#define HNS_ROCE_AEQE_VEC_NUM 1
-#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+#define HNS_ROCE_CEQ 0
+#define HNS_ROCE_AEQ 1
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
@@ -130,6 +134,7 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+ HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
@@ -173,6 +178,7 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
};
enum hns_roce_mtt_type {
@@ -441,6 +447,21 @@ struct hns_roce_cmd_mailbox {
struct hns_roce_dev;
+struct hns_roce_rinl_sge {
+ void *addr;
+ u32 len;
+};
+
+struct hns_roce_rinl_wqe {
+ struct hns_roce_rinl_sge *sg_list;
+ u32 sge_cnt;
+};
+
+struct hns_roce_rinl_buf {
+ struct hns_roce_rinl_wqe *wqe_list;
+ u32 wqe_cnt;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -462,7 +483,9 @@ struct hns_roce_qp {
u8 resp_depth;
u8 state;
u32 access_flags;
+ u32 atomic_rd_en;
u32 pkey_index;
+ u32 qkey;
void (*event)(struct hns_roce_qp *,
enum hns_roce_event);
unsigned long qpn;
@@ -472,6 +495,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+
+ struct hns_roce_rinl_buf rq_inl_buf;
};
struct hns_roce_sqp {
@@ -485,6 +510,45 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_ceqe {
+ u32 comp;
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
void __iomem *doorbell;
@@ -498,11 +562,31 @@ struct hns_roce_eq {
int log_page_size;
int cons_index;
struct hns_roce_buf_list *buf_list;
+ int over_ignore;
+ int coalesce;
+ int arm_st;
+ u64 eqe_ba;
+ int eqe_ba_pg_sz;
+ int eqe_buf_pg_sz;
+ int hop_num;
+ u64 *bt_l0; /* Base address table for L0 */
+ u64 **bt_l1; /* Base address table for L1 */
+ u64 **buf;
+ dma_addr_t l0_dma;
+ dma_addr_t *l1_dma;
+ dma_addr_t *buf_dma;
+ u32 l0_last_num; /* L0 last chunk num */
+ u32 l1_last_num; /* L1 last chunk num */
+ int eq_max_cnt;
+ int eq_period;
+ int shift;
+ dma_addr_t cur_eqe_ba;
+ dma_addr_t nxt_eqe_ba;
};
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base;
+ void __iomem **eqc_base; /* only for hw v1 */
};
struct hns_roce_caps {
@@ -528,7 +612,7 @@ struct hns_roce_caps {
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
- int num_comp_vectors; /* 32 ceq */
+ int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
@@ -550,7 +634,7 @@ struct hns_roce_caps {
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth;
- int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
@@ -574,6 +658,9 @@ struct hns_roce_caps {
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
+ u32 eqe_ba_pg_sz;
+ u32 eqe_buf_pg_sz;
+ u32 eqe_hop_num;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
@@ -623,6 +710,8 @@ struct hns_roce_hw {
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int (*init_eq)(struct hns_roce_dev *hr_dev);
+ void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
};
struct hns_roce_dev {
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
deleted file mode 100644
index d184431e2bf5..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_eq.h"
-
-static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
-{
- roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
- (req_not << eq->log_entries), eq->doorbell);
- /* Memory barrier */
- mb();
-}
-
-static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw(eq))) {
- dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- /* Memory barrier */
- rmb();
-
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_db_overflow_handle(hr_dev, aeqe);
- break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ 0x%lx overflow.\n",
- roce_get_field(aeqe->event.ce_event.ceqe,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to zero\n"
- );
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw(eq))) {
- /* Memory barrier */
- rmb();
- cqn = roce_get_field(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to zero\n");
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return ceqes_found;
-}
-
-static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &eq->hr_dev->pdev->dev;
- int eqovf_found = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- int i = 0;
-
- /**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
-
- if (roce_get_bit(aeshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
-
- if (roce_get_bit(ceshift_val,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- eqovf_found++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return eqovf_found;
-}
-
-static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- int eqes_found = 0;
-
- if (likely(eq->type_flag == HNS_ROCE_CEQ))
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- eqes_found = hns_roce_ceq_int(hr_dev, eq);
- else if (likely(eq->type_flag == HNS_ROCE_AEQ))
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- eqes_found = hns_roce_aeq_int(hr_dev, eq);
- else
- /* AEQ queue overflow irq */
- eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
-
- return eqes_found;
-}
-
-static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
-{
- int int_work = 0;
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
-
- int_work = hns_roce_eq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- u32 val;
-
- val = readl(eqc);
-
- if (enable_flag)
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- writel(val, eqc);
-}
-
-static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqconsindx_val = 0;
- u32 eqcuridx_val = 0;
- u32 eqshift_val = 0;
- int num_bas = 0;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
- }
- eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i = i - 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int i = 0;
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
-{
- int i = 0;
- u32 aemask_val;
- int masken = 0;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
-{
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-}
-
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq = NULL;
- int eq_num = 0;
- int ret = 0;
- int i = 0;
- int j = 0;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth[i];
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_ceqe);
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_aeqe);
- }
- }
-
- /* Disable irq */
- hns_roce_int_mask_en(hr_dev);
-
- /* Configure CE irq interval and burst num */
- hns_roce_ce_int_default_cfg(hr_dev);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < eq_num; j++) {
- ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
- 0, hr_dev->irq_names[j], eq_table->eq + j);
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j = j - 1; j >= 0; j--)
- free_irq(eq_table->eq[j].irq, eq_table->eq + j);
-
-err_create_eq_fail:
- for (i = i - 1; i >= 0; i--)
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- int i;
- int eq_num;
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
- }
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
deleted file mode 100644
index c6d212d12e03..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_EQ_H
-#define _HNS_ROCE_EQ_H
-
-#define HNS_ROCE_CEQ 1
-#define HNS_ROCE_AEQ 2
-
-#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
-#define HNS_ROCE_CEQC_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define EQ_ENABLE 1
-#define EQ_DISABLE 0
-#define CONS_INDEX_MASK 0xffff
-
-#define CEQ_REG_OFFSET 0x18
-
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
-struct hns_roce_aeqe {
- u32 asyn;
- union {
- struct {
- u32 qp;
- u32 rsv0;
- u32 rsv1;
- } qp_event;
-
- struct {
- u32 cq;
- u32 rsv0;
- u32 rsv1;
- } cq_event;
-
- struct {
- u32 port;
- u32 rsv0;
- u32 rsv1;
- } port_event;
-
- struct {
- u32 ceqe;
- u32 rsv0;
- u32 rsv1;
- } ce_event;
-
- struct {
- __le64 out_param;
- __le16 token;
- u8 status;
- u8 rsv0;
- } __packed cmd;
- } event;
-};
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
- (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
- (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
- (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
- (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
- (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
-
-struct hns_roce_ceqe {
- union {
- int comp;
- } ceqe;
-};
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
-
-#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index af27168faf0f..21ca9fa7c9d1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
@@ -774,7 +775,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto create_lp_qp_failed;
}
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
IB_QPS_INIT, IB_QPS_RTR);
if (ret) {
dev_err(dev, "modify qp failed(%d)!\n", ret);
@@ -1492,9 +1493,9 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
caps->num_uars = HNS_ROCE_V1_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
@@ -1529,10 +1530,8 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_ports + 1;
}
- for (i = 0; i < caps->num_comp_vectors; i++)
- caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
-
- caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
@@ -2312,15 +2311,16 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immediate_data));
break;
case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
if (roce_get_bit(cqe->cqe_byte_4,
CQE_BYTE_4_IMM_INDICATOR_S)) {
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(
- cqe->immediate_data);
+ wc->ex.imm_data = cpu_to_be32(
+ le32_to_cpu(cqe->immediate_data));
} else {
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
@@ -3960,6 +3960,732 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
+static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
+ (req_not << eq->log_entries), eq->doorbell);
+}
+
+static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v1(eq))) {
+
+ /* Make sure we read the AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ }
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v1(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return ceqes_found;
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_ceq_int(hr_dev, eq);
+ else
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = &hr_dev->pdev->dev;
+ int int_work = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i;
+
+ /*
+ * Abnormal interrupt:
+ * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
+ * interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ /* AEQE overflow */
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ int_work++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
+{
+ u32 aemask_val;
+ int masken = 0;
+ int i;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+ int i;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq;
+ int irq_num;
+ int eq_num;
+ int ret;
+ int i, j;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_v1_int_mask_enable(hr_dev);
+
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < eq_num)
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_eq, 0,
+ hr_dev->irq_names[j],
+ &eq_table->eq[j]);
+ else
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_abn, 0,
+ hr_dev->irq_names[j], hr_dev);
+
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j -= 1; j >= 0; j--)
+ free_irq(hr_dev->irq[j], &eq_table->eq[j]);
+
+err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(hr_dev->irq[i], &eq_table->eq[i]);
+
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+ for (i = eq_num; i < irq_num; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
@@ -3983,6 +4709,8 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
+ .init_eq = hns_roce_v1_init_eq_table,
+ .cleanup_eq = hns_roce_v1_cleanup_eq_table,
};
static const struct of_device_id hns_roce_of_match[] = {
@@ -4060,10 +4788,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
@@ -4132,14 +4856,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
+ HNS_ROCE_V1_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 21a07ef0afc9..b44ddd239060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -60,8 +60,13 @@
#define HNS_ROCE_V1_GID_NUM 16
#define HNS_ROCE_V1_RESV_QP 8
-#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
-#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+#define HNS_ROCE_V1_MAX_IRQ_NUM 34
+#define HNS_ROCE_V1_COMP_VEC_NUM 32
+#define HNS_ROCE_V1_AEQE_VEC_NUM 1
+#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
+
+#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
+#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
@@ -159,6 +164,41 @@
#define SDB_INV_CNT_OFFSET 8
#define SDB_ST_CMP_VAL 8
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define CEQ_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+
struct hns_roce_cq_context {
u32 cqc_byte_4;
u32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8e18445714a9..256fe110107a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <net/addrconf.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -51,32 +52,106 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ void *wqe, unsigned int *sge_ind,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int i;
+
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) {
+ *bad_wr = wr;
+ dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+ 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ if (likely(wr->sg_list[i + 2].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + 2 + i);
+ dseg++;
+ (*sge_ind)++;
+ }
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
unsigned int sge_ind = 0;
- unsigned int wqe_sz = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
+ bool loopback;
int ret = 0;
+ u8 *smac;
int nreq;
int i;
- if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL;
return -EOPNOTSUPP;
}
- if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+ qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
@@ -106,161 +181,255 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->wr_id;
owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
- for (i = 0; i < wr->num_sge; i++)
- rc_sq_wqe->msg_len += wr->sg_list[i].length;
- rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
+ ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+ for (i = 0; i < wr->num_sge; i++)
+ ud_sq_wqe->msg_len += wr->sg_list[i].length;
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+ ud_sq_wqe->immtdata = send_ieth(wr);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey =
+ cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ roce_set_field(ud_sq_wqe->byte_32,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S,
+ ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+ ah->av.vlan);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S,
+ qp->port);
+
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
+ hns_get_gid_index(hr_dev, qp->phy_port,
+ ah->av.gid_index));
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
+ GID_LEN_V2);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < wr->num_sge; i++) {
+ set_data_seg_v2(dseg + i, wr->sg_list + i);
+ sge_ind++;
+ }
+
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
- break;
- case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
- break;
- case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
- break;
- case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
- break;
- default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
- break;
- }
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (rc_sq_wqe->msg_len >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len,
- hr_dev->caps.max_sq_inline);
- goto out;
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
}
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *)wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- wqe_sz += wr->sg_list[i].length;
- }
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
+ &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ ind++;
} else {
- if (wr->num_sge <= 2) {
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
- } else {
- roce_set_field(rc_sq_wqe->byte_20,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < 2; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
-
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- set_data_seg_v2(dseg + i,
- wr->sg_list + 2 + i);
- sge_ind++;
- }
- }
-
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
- wqe_sz += wr->num_sge *
- sizeof(struct hns_roce_v2_wqe_data_seg);
+ dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ return -EOPNOTSUPP;
}
- ind++;
}
out:
@@ -299,6 +468,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
@@ -347,6 +517,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dseg[i].addr = 0;
}
+ /* rq support inline data */
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -908,9 +1086,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
- caps->num_aeq_vectors = 1;
- caps->num_comp_vectors = 63;
- caps->num_other_vectors = 0;
+ caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -955,12 +1133,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
- HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+ HNS_ROCE_CAP_FLAG_RQ_INLINE;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1382,6 +1566,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
+ V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
@@ -1422,6 +1608,15 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M,
+ V2_CQC_BYTE_56_CQ_PERIOD_S,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -1457,6 +1652,40 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
+static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ u32 wr_num, wr_cnt, sge_num;
+ u32 sge_cnt, data_len, size;
+ void *wqe_buf;
+
+ wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+ sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ data_len = wc->byte_len;
+
+ for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
+ size = min(sge_list[sge_cnt].len, data_len);
+ memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
+
+ data_len -= size;
+ wqe_buf += size;
+ }
+
+ if (data_len) {
+ wc->status = IB_WC_LOC_LEN_ERR;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -1469,6 +1698,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
u32 opcode;
u32 status;
int qpn;
+ int ret;
/* Find cqe according to consumer index */
cqe = next_cqe_sw_v2(hr_cq);
@@ -1636,7 +1866,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1645,18 +1875,29 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
+ if ((wc->qp->qp_type == IB_QPT_RC ||
+ wc->qp->qp_type == IB_QPT_UC) &&
+ (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
+ if (ret)
+ return -EAGAIN;
+ }
+
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1670,6 +1911,21 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
+ wc->port_num = roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+ memcpy(wc->smac, cqe->smac, 4);
+ wc->smac[4] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_4_M,
+ V2_CQE_BYTE_28_SMAC_4_S);
+ wc->smac[5] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_5_M,
+ V2_CQE_BYTE_28_SMAC_5_S);
+ wc->vlan_id = 0xffff;
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
}
return 0;
@@ -1859,8 +2115,39 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
+ int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -1877,9 +2164,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -1944,18 +2240,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -2176,9 +2467,17 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -2239,7 +2538,7 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -2255,10 +2554,10 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
- if (attr_mask & IB_QP_PKEY_INDEX)
- context->qkey_xrcd = attr->pkey_index;
- else
- context->qkey_xrcd = hr_qp->pkey_index;
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ }
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
@@ -2354,7 +2653,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
+ hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -2463,11 +2763,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -2511,8 +2814,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
+ else
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
@@ -2557,12 +2865,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
-
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2625,13 +2927,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ context->sq_cur_sge_blk_addr =
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- hr_qp->sq.max_gs > 2 ?
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
(mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -2766,6 +3069,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
return 0;
}
@@ -2794,7 +3105,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -2829,6 +3141,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
}
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2845,6 +3160,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
@@ -3098,6 +3416,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
return 0;
}
@@ -3162,6 +3485,1146 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+{
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ if (eq->type_flag == HNS_ROCE_AEQ) {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ } else {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
+ HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ }
+
+ roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+ hns_roce_write64_k(doorbell, eq->doorbell);
+}
+
+static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local work queue catastrophic error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local access violation work queue error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid request local work queue error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 cqn;
+
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe;
+
+ if (!eq->hop_num)
+ aeqe = get_aeqe_v2(eq, eq->cons_index);
+ else
+ aeqe = mhop_get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqe_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v2(eq))) {
+
+ /* Make sure we read AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_warn(dev, "SRQ not support.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
+ break;
+ default:
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ ++eq->cons_index;
+ aeqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+ return aeqe_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
+ buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+
+ if (!eq->hop_num)
+ ceqe = get_ceqe_v2(eq, eq->cons_index);
+ else
+ ceqe = mhop_get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_ceqe *ceqe;
+ int ceqe_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v2(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_V2_CEQE_COMP_CQN_M,
+ HNS_ROCE_V2_CEQE_COMP_CQN_S);
+
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+
+ return ceqe_found;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* Completion event interrupt */
+ int_work = hns_roce_v2_ceq_int(hr_dev, eq);
+ else
+ /* Asychronous event interrupt */
+ int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = hr_dev->dev;
+ int int_work = 0;
+ u32 int_st;
+ u32 int_en;
+
+ /* Abnormal interrupt */
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+ dev_err(dev, "BUS ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+ dev_err(dev, "OTHER ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else
+ dev_err(dev, "There is no abnormal irq found!\n");
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ int eq_num, int enable_flag)
+{
+ int i;
+
+ if (enable_flag == EQ_ENABLE) {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M);
+ } else {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
+ }
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (eqn < hr_dev->caps.num_comp_vectors)
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_CEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ else
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_AEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+}
+
+static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ u64 idx;
+ u64 size;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ int eqe_alloc;
+ int ba_num;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
+ buf_chk_sz;
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ dma_free_coherent(dev, (unsigned int)(eq->entries *
+ eq->eqe_size), eq->bt_l0, eq->l0_dma);
+ return;
+ }
+
+ /* hop_num = 1 or hop = 2 */
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ if (mhop_num == 1) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ if (i == eq->l0_last_num - 1) {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ dma_free_coherent(dev, size, eq->buf[i],
+ eq->buf_dma[i]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ }
+ } else if (mhop_num == 2) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * (bt_chk_sz / 8) + j;
+ if ((i == eq->l0_last_num - 1)
+ && j == eq->l1_last_num - 1) {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ dma_free_coherent(dev, size,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+ kfree(eq->buf_dma);
+ kfree(eq->buf);
+ kfree(eq->l1_dma);
+ kfree(eq->bt_l1);
+ eq->buf_dma = NULL;
+ eq->buf = NULL;
+ eq->l1_dma = NULL;
+ eq->bt_l1 = NULL;
+}
+
+static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ u32 buf_chk_sz;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ if (hr_dev->caps.eqe_hop_num) {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ return;
+ }
+
+ if (eq->buf_list)
+ dma_free_coherent(hr_dev->dev, buf_chk_sz,
+ eq->buf_list->buf, eq->buf_list->map);
+}
+
+static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ void *mb_buf)
+{
+ struct hns_roce_eq_context *eqc;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+ /* init eqc */
+ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
+ eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
+ eq->shift = ilog2((unsigned int)eq->entries);
+
+ if (!eq->hop_num)
+ eq->eqe_ba = eq->buf_list->map;
+ else
+ eq->eqe_ba = eq->l0_dma;
+
+ /* set eqc state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQ_ST_M,
+ HNS_ROCE_EQC_EQ_ST_S,
+ HNS_ROCE_V2_EQ_STATE_VALID);
+
+ /* set eqe hop num */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_HOP_NUM_M,
+ HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
+
+ /* set eqc over_ignore */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_OVER_IGNORE_M,
+ HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
+
+ /* set eqc coalesce */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_COALESCE_M,
+ HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
+
+ /* set eqc arm_state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_ARM_ST_M,
+ HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
+
+ /* set eqn */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQN_M,
+ HNS_ROCE_EQC_EQN_S, eq->eqn);
+
+ /* set eqe_cnt */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQE_CNT_M,
+ HNS_ROCE_EQC_EQE_CNT_S,
+ HNS_ROCE_EQ_INIT_EQE_CNT);
+
+ /* set eqe_ba_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BA_PG_SZ_M,
+ HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+
+ /* set eqe_buf_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BUF_PG_SZ_M,
+ HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+
+ /* set eq_producer_idx */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_PROD_INDX_M,
+ HNS_ROCE_EQC_PROD_INDX_S,
+ HNS_ROCE_EQ_INIT_PROD_IDX);
+
+ /* set eq_max_cnt */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_MAX_CNT_M,
+ HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
+
+ /* set eq_period */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_PERIOD_M,
+ HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
+
+ /* set eqe_report_timer */
+ roce_set_field(eqc->eqe_report_timer,
+ HNS_ROCE_EQC_REPORT_TIMER_M,
+ HNS_ROCE_EQC_REPORT_TIMER_S,
+ HNS_ROCE_EQ_INIT_REPORT_TIMER);
+
+ /* set eqe_ba [34:3] */
+ roce_set_field(eqc->eqe_ba0,
+ HNS_ROCE_EQC_EQE_BA_L_M,
+ HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+
+ /* set eqe_ba [64:35] */
+ roce_set_field(eqc->eqe_ba1,
+ HNS_ROCE_EQC_EQE_BA_H_M,
+ HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+
+ /* set eq shift */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_SHIFT_M,
+ HNS_ROCE_EQC_SHIFT_S, eq->shift);
+
+ /* set eq MSI_IDX */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_MSI_INDX_M,
+ HNS_ROCE_EQC_MSI_INDX_S,
+ HNS_ROCE_EQ_INIT_MSI_IDX);
+
+ /* set cur_eqe_ba [27:12] */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+
+ /* set cur_eqe_ba [59:28] */
+ roce_set_field(eqc->byte_32,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+
+ /* set cur_eqe_ba [63:60] */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+
+ /* set eq consumer idx */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CONS_INDX_M,
+ HNS_ROCE_EQC_CONS_INDX_S,
+ HNS_ROCE_EQ_INIT_CONS_IDX);
+
+ /* set nex_eqe_ba[43:12] */
+ roce_set_field(eqc->nxt_eqe_ba0,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+
+ /* set nex_eqe_ba[63:44] */
+ roce_set_field(eqc->nxt_eqe_ba1,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
+}
+
+static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ int eq_alloc_done = 0;
+ int eq_buf_cnt = 0;
+ int eqe_alloc;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ u64 size;
+ u64 idx;
+ int ba_num;
+ int bt_num;
+ int record_i;
+ int record_j;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
+ / buf_chk_sz;
+ bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ if (eq->entries > buf_chk_sz / eq->eqe_size) {
+ dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
+ eq->entries);
+ return -EINVAL;
+ }
+ eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
+ &(eq->l0_dma), GFP_KERNEL);
+ if (!eq->bt_l0)
+ return -ENOMEM;
+
+ eq->cur_eqe_ba = eq->l0_dma;
+ eq->nxt_eqe_ba = 0;
+
+ memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
+
+ return 0;
+ }
+
+ eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
+ if (!eq->buf_dma)
+ return -ENOMEM;
+ eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
+ if (!eq->buf)
+ goto err_kcalloc_buf;
+
+ if (mhop_num == 2) {
+ eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
+ if (!eq->l1_dma)
+ goto err_kcalloc_l1_dma;
+
+ eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
+ if (!eq->bt_l1)
+ goto err_kcalloc_bt_l1;
+ }
+
+ /* alloc L0 BT */
+ eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
+ if (!eq->bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 1) {
+ if (ba_num > (bt_chk_sz / 8))
+ dev_err(dev, "ba_num %d is too large for 1 hop\n",
+ ba_num);
+
+ /* alloc buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ }
+ eq->buf[i] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[i]),
+ GFP_KERNEL);
+ if (!eq->buf[i])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[i], 0, size);
+ *(eq->bt_l0 + i) = eq->buf_dma[i];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+
+ } else if (mhop_num == 2) {
+ /* alloc L1 BT and buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
+ &(eq->l1_dma[i]),
+ GFP_KERNEL);
+ if (!eq->bt_l1[i])
+ goto err_dma_alloc_l1;
+ *(eq->bt_l0 + i) = eq->l1_dma[i];
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ }
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
+ if (!eq->buf[idx])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[idx], 0, size);
+ *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num) {
+ eq_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (eq_alloc_done)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+ }
+
+ eq->l0_last_num = i + 1;
+ if (mhop_num == 2)
+ eq->l1_last_num = j + 1;
+
+ return 0;
+
+err_dma_alloc_l1:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ goto err_dma_alloc_l0;
+
+err_dma_alloc_buf:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+
+ if (mhop_num == 1)
+ for (i -= i; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ else if (mhop_num == 2) {
+ record_i = i;
+ record_j = j;
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ if (i == record_i && j >= record_j)
+ break;
+
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+
+err_dma_alloc_l0:
+ kfree(eq->bt_l1);
+ eq->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(eq->l1_dma);
+ eq->l1_dma = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(eq->buf);
+ eq->buf = NULL;
+
+err_kcalloc_buf:
+ kfree(eq->buf_dma);
+ eq->buf_dma = NULL;
+
+ return -ENOMEM;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ unsigned int eq_cmd)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u32 buf_chk_sz = 0;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!hr_dev->caps.eqe_hop_num) {
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+
+ eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
+ GFP_KERNEL);
+ if (!eq->buf_list) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &(eq->buf_list->map),
+ GFP_KERNEL);
+ if (!eq->buf_list->buf) {
+ ret = -ENOMEM;
+ goto err_alloc_buf;
+ }
+
+ memset(eq->buf_list->buf, 0, buf_chk_sz);
+ } else {
+ ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+ }
+
+ hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
+ eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+ goto err_cmd_mbox;
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_cmd_mbox:
+ if (!hr_dev->caps.eqe_hop_num)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ else {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ goto free_cmd_mbox;
+ }
+
+err_alloc_buf:
+ kfree(eq->buf_list);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_eq *eq;
+ unsigned int eq_cmd;
+ int irq_num;
+ int eq_num;
+ int other_num;
+ int comp_num;
+ int aeq_num;
+ int i, j, k;
+ int ret;
+
+ other_num = hr_dev->caps.num_other_vectors;
+ comp_num = hr_dev->caps.num_comp_vectors;
+ aeq_num = hr_dev->caps.num_aeq_vectors;
+
+ eq_num = comp_num + aeq_num;
+ irq_num = eq_num + other_num;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_failed_kzalloc;
+ }
+ }
+
+ /* create eq */
+ for (j = 0; j < eq_num; j++) {
+ eq = &eq_table->eq[j];
+ eq->hr_dev = hr_dev;
+ eq->eqn = j;
+ if (j < comp_num) {
+ /* CEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j + other_num + aeq_num];
+ eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+ } else {
+ /* AEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j - comp_num + other_num];
+ eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+ }
+
+ ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+ if (ret) {
+ dev_err(dev, "eq create failed.\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+ /* irq contains: abnormal + AEQ + CEQ*/
+ for (k = 0; k < irq_num; k++)
+ if (k < other_num)
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
+ else if (k < (other_num + aeq_num))
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+ k - other_num);
+ else
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+ k - other_num - aeq_num);
+
+ for (k = 0; k < irq_num; k++) {
+ if (k < other_num)
+ ret = request_irq(hr_dev->irq[k],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[k], hr_dev);
+
+ else if (k < (other_num + comp_num))
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k + aeq_num],
+ &eq_table->eq[k - other_num]);
+ else
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k - comp_num],
+ &eq_table->eq[k - other_num]);
+ if (ret) {
+ dev_err(dev, "Request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ return 0;
+
+err_request_irq_fail:
+ for (k -= 1; k >= 0; k--)
+ if (k < other_num)
+ free_irq(hr_dev->irq[k], hr_dev);
+ else
+ free_irq(eq_table->eq[k - other_num].irq,
+ &eq_table->eq[k - other_num]);
+
+err_create_eq_fail:
+ for (j -= 1; j >= 0; j--)
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
+
+err_failed_kzalloc:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ /* Disable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ hns_roce_v2_destroy_eqc(hr_dev, i);
+
+ free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
+
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
@@ -3183,6 +4646,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.post_recv = hns_roce_v2_post_recv,
.req_notify_cq = hns_roce_v2_req_notify_cq,
.poll_cq = hns_roce_v2_poll_cq,
+ .init_eq = hns_roce_v2_init_eq_table,
+ .cleanup_eq = hns_roce_v2_cleanup_eq_table,
};
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
@@ -3197,6 +4662,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct pci_device_id *id;
+ int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
@@ -3214,8 +4680,15 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
+ addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+ hr_dev->iboe.netdevs[0]->dev_addr);
+
+ for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+ i + handle->rinfo.base_vector);
+
/* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 0;
+ hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 04b7a51b8efb..960df095392a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -53,6 +53,10 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_IRQ_NUM 65
+#define HNS_ROCE_V2_COMP_VEC_NUM 63
+#define HNS_ROCE_V2_AEQE_VEC_NUM 1
+#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
@@ -78,6 +82,8 @@
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_EQE_HOP_NUM 2
+
#define HNS_ROCE_V2_GID_INDEX_NUM 256
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -105,6 +111,12 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+enum {
+ NO_ARMED = 0x0,
+ REG_NXT_CEQE = 0x2,
+ REG_NXT_SE_CEQE = 0x3
+};
+
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
@@ -229,6 +241,9 @@ struct hns_roce_v2_cq_context {
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
};
+#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
+
#define V2_CQC_BYTE_4_CQ_ST_S 0
#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
@@ -747,11 +762,14 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_cqe {
u32 byte_4;
- u32 rkey_immtdata;
+ union {
+ __le32 rkey;
+ __be32 immtdata;
+ };
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
- u32 smac;
+ u8 smac[4];
u32 byte_28;
u32 byte_32;
};
@@ -901,6 +919,90 @@ struct hns_roce_v2_cq_db {
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+struct hns_roce_v2_ud_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 byte_24;
+ u32 qkey;
+ u32 byte_32;
+ u32 byte_36;
+ u32 byte_40;
+ u32 dmac;
+ u32 byte_48;
+ u8 dgid[GID_LEN_V2];
+
+};
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_UD_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_UD_SEND_WQE_BYTE_16_PD_S 0
+#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16)
+
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0)
+
+#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
+#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
+
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
+
+#define V2_UD_SEND_WQE_DMAC_0_S 0
+#define V2_UD_SEND_WQE_DMAC_0_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_DMAC_1_S 8
+#define V2_UD_SEND_WQE_DMAC_1_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_DMAC_2_S 16
+#define V2_UD_SEND_WQE_DMAC_2_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_DMAC_3_S 24
+#define V2_UD_SEND_WQE_DMAC_3_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_S 0
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_S 8
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S 16
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_S 24
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
+
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
@@ -1129,9 +1231,6 @@ struct hns_roce_cmq_desc {
u32 data[6];
};
-#define ROCEE_VF_MB_CFG0_REG 0x40
-#define ROCEE_VF_MB_STATUS_REG 0x58
-
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
@@ -1174,4 +1273,178 @@ struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
};
+struct hns_roce_eq_context {
+ u32 byte_4;
+ u32 byte_8;
+ u32 byte_12;
+ u32 eqe_report_timer;
+ u32 eqe_ba0;
+ u32 eqe_ba1;
+ u32 byte_28;
+ u32 byte_32;
+ u32 byte_36;
+ u32 nxt_eqe_ba0;
+ u32 nxt_eqe_ba1;
+ u32 rsv[5];
+};
+
+#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x0
+
+#define HNS_ROCE_V2_EQ_STATE_INVALID 0
+#define HNS_ROCE_V2_EQ_STATE_VALID 1
+#define HNS_ROCE_V2_EQ_STATE_OVERFLOW 2
+#define HNS_ROCE_V2_EQ_STATE_FAILURE 3
+
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_0 0
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_1 1
+
+#define HNS_ROCE_V2_EQ_COALESCE_0 0
+#define HNS_ROCE_V2_EQ_COALESCE_1 1
+
+#define HNS_ROCE_V2_EQ_FIRED 0
+#define HNS_ROCE_V2_EQ_ARMED 1
+#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
+
+#define HNS_ROCE_EQ_INIT_EQE_CNT 0
+#define HNS_ROCE_EQ_INIT_PROD_IDX 0
+#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
+#define HNS_ROCE_EQ_INIT_MSI_IDX 0
+#define HNS_ROCE_EQ_INIT_CONS_IDX 0
+#define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0
+
+#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31
+#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31
+
+#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000
+#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
+
+#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
+#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
+#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+
+#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
+#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
+#define HNS_ROCE_EQ_DB_CMD_CEQ 0x2
+#define HNS_ROCE_EQ_DB_CMD_CEQ_ARMED 0x3
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define EQ_REG_OFFSET 0x4
+
+#define HNS_ROCE_INT_NAME_LEN 32
+#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
+#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+
+/* WORD0 */
+#define HNS_ROCE_EQC_EQ_ST_S 0
+#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
+
+#define HNS_ROCE_EQC_HOP_NUM_S 2
+#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
+
+#define HNS_ROCE_EQC_OVER_IGNORE_S 4
+#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
+
+#define HNS_ROCE_EQC_COALESCE_S 5
+#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
+
+#define HNS_ROCE_EQC_ARM_ST_S 6
+#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
+
+#define HNS_ROCE_EQC_EQN_S 8
+#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_EQE_CNT_S 16
+#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
+
+/* WORD1 */
+#define HNS_ROCE_EQC_BA_PG_SZ_S 0
+#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
+#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define HNS_ROCE_EQC_PROD_INDX_S 8
+#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
+
+/* WORD2 */
+#define HNS_ROCE_EQC_MAX_CNT_S 0
+#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
+
+#define HNS_ROCE_EQC_PERIOD_S 16
+#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
+
+/* WORD3 */
+#define HNS_ROCE_EQC_REPORT_TIMER_S 0
+#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
+
+/* WORD4 */
+#define HNS_ROCE_EQC_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD5 */
+#define HNS_ROCE_EQC_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
+
+/* WORD6 */
+#define HNS_ROCE_EQC_SHIFT_S 0
+#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
+
+#define HNS_ROCE_EQC_MSI_INDX_S 8
+#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
+
+/* WORD7 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
+
+/* WORD8 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_CONS_INDX_S 8
+#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
+
+/* WORD9 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD10 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
+
+#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
+#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
+
+#define HNS_ROCE_V2_EQ_DB_CMD_S 16
+#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+
+#define HNS_ROCE_V2_EQ_DB_TAG_S 0
+#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_EQ_DB_PARA_S 0
+#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf02ac2d3596..aa0c242ddc50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -748,12 +748,10 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_cmd_init;
}
- if (hr_dev->cmd_mod) {
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
- }
+ ret = hr_dev->hw->init_eq(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
}
if (hr_dev->cmd_mod) {
@@ -805,8 +803,7 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
@@ -837,8 +834,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 49586ec8126a..4414cea9ef56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,6 +65,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_event);
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type)
@@ -454,6 +455,13 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ /* ud sqwqe's sge use extend sge */
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ hr_qp->sq.max_gs);
+ hr_qp->sge.sge_shift = 4;
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -493,6 +501,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
int ret = 0;
u32 page_shift;
u32 npages;
+ int i;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -500,6 +509,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
@@ -512,18 +523,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ /* allocate recv inline buf */
+ hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
+ sizeof(struct hns_roce_rinl_wqe),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
+
+ /* Firstly, allocate a list of sge space buffer */
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list =
+ kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
+ init_attr->cap.max_recv_sge *
+ sizeof(struct hns_roce_rinl_sge),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
+ ret = -ENOMEM;
+ goto err_wqe_list;
+ }
+
+ for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
+ /* Secondly, reallocate the buffer */
+ hr_qp->rq_inl_buf.wqe_list[i].sg_list =
+ &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
+ init_attr->cap.max_recv_sge];
+ }
+
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
- goto err_out;
+ goto err_rq_sge_list;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
@@ -532,7 +573,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -566,13 +607,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
/* Set SQ size */
@@ -580,7 +621,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_out;
+ goto err_rq_sge_list;
}
/* QP doorbell register address */
@@ -596,7 +637,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -678,6 +719,14 @@ err_buf:
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_rq_sge_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+
+err_wqe_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+
err_out:
return ret;
}
@@ -724,8 +773,13 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
+
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ hr_qp->ibqp.qp_num = 1;
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp);
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index f6d20ba88c03..2962979c06e9 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -5,4 +5,3 @@ config INFINIBAND_I40IW
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
- INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 4ae9131b6350..bcddd7061fc0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -587,5 +587,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 77870f9e1736..abf4cd897849 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -92,14 +92,9 @@ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
{
u8 encoded_ird_size;
- u8 pof2_cm_ird = 1;
-
- /* round-off to next powerof2 */
- while (pof2_cm_ird < cm_ird)
- pof2_cm_ird *= 2;
/* ird_size field is encoded in qp_ctx */
- switch (pof2_cm_ird) {
+ switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
case I40IW_HW_IRD_SETTING_64:
encoded_ird_size = 3;
break;
@@ -125,13 +120,16 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
* @conn_ird: connection IRD
* @conn_ord: connection ORD
*/
-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
{
if (conn_ird > I40IW_MAX_IRD_SIZE)
conn_ird = I40IW_MAX_IRD_SIZE;
if (conn_ord > I40IW_MAX_ORD_SIZE)
conn_ord = I40IW_MAX_ORD_SIZE;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
cm_node->ird_size = conn_ird;
cm_node->ord_size = conn_ord;
@@ -2878,15 +2876,13 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
* i40iw_create_cm_node - make a connection node with params
* @cm_core: cm's core
* @iwdev: iwarp device structure
- * @private_data_len: len to provate data for mpa request
- * @private_data: pointer to private data for connection
+ * @conn_param: upper layer connection parameters
* @cm_info: quad info for connection
*/
static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_core *cm_core,
struct i40iw_device *iwdev,
- u16 private_data_len,
- void *private_data,
+ struct iw_cm_conn_param *conn_param,
struct i40iw_cm_info *cm_info)
{
struct i40iw_cm_node *cm_node;
@@ -2894,6 +2890,9 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_node *loopback_remotenode;
struct i40iw_cm_info loopback_cm_info;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
@@ -2902,6 +2901,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
loopback_remotelistener = i40iw_find_listener(
cm_core,
@@ -2935,6 +2936,10 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
private_data_len);
loopback_remotenode->pdata.size = private_data_len;
+ if (loopback_remotenode->ord_size > cm_node->ird_size)
+ loopback_remotenode->ord_size =
+ cm_node->ird_size;
+
cm_node->state = I40IW_CM_STATE_OFFLOADED;
cm_node->tcp_cntxt.rcv_nxt =
loopback_remotenode->tcp_cntxt.loc_seq_num;
@@ -3691,7 +3696,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status =
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
if (status)
@@ -3815,9 +3820,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, cm_id->tos, cm_info.user_pri);
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
- conn_param->private_data_len,
- (void *)conn_param->private_data,
- &cm_info);
+ conn_param, &cm_info);
if (IS_ERR(cm_node)) {
ret = PTR_ERR(cm_node);
@@ -3849,11 +3852,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
cm_node->apbvt_set = true;
- i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
- !cm_node->ord_size)
- cm_node->ord_size = 1;
-
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -4058,7 +4056,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
0);
if (status)
@@ -4242,10 +4240,16 @@ set_qhash:
}
/**
- * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @disconnect_all: flag indicating disconnect all QPs
+ * teardown QPs where source or destination addr matches ip addr
*/
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct list_head *list_core_temp;
@@ -4259,8 +4263,13 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct i40iw_cm_node, list);
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->connected_entry, &connected_list);
+ if (disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
+ !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
@@ -4294,6 +4303,9 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
enum i40iw_quad_hash_manage_type op =
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+
/* Disable or enable qhash for listeners */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
@@ -4303,8 +4315,6 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
memcpy(nfo.loc_addr, listen_node->loc_addr,
sizeof(nfo.loc_addr));
nfo.loc_port = listen_node->loc_port;
- nfo.ipv4 = listen_node->ipv4;
- nfo.vlan_id = listen_node->vlan_id;
nfo.user_pri = listen_node->user_pri;
if (!list_empty(&listen_node->child_listen_list)) {
i40iw_qhash_ctrl(iwdev,
@@ -4326,7 +4336,7 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- /* disconnect any connected qp's on ifdown */
+ /* teardown connected qp's on ifdown */
if (!ifup)
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 0d5840d2c4fc..cf60c451e071 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -276,8 +276,6 @@ struct i40iw_cm_tcp_context {
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
-
- struct timeval sent_ts;
};
enum i40iw_cm_listener_state {
@@ -337,7 +335,7 @@ struct i40iw_cm_node {
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
@@ -455,5 +453,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index da9821a10e0d..c74fd3309b93 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1893,8 +1893,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
u32 count)
{
- if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
- return I40IW_ERR_INVALID_SIZE;
if (dev->is_pf)
i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
@@ -3872,7 +3870,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
struct i40iw_virt_mem virt_mem;
u32 i, mem_size;
u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
- u32 powerof2;
u64 sd_needed;
u32 loop_count = 0;
@@ -3928,8 +3925,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+ roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+ roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
@@ -3945,16 +3944,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
if ((loop_count > 1000) ||
((!(loop_count % 10)) &&
(qpwanted > qpwantedoriginal * 2 / 3))) {
- if (qpwanted > FPM_MULTIPLIER) {
- qpwanted -= FPM_MULTIPLIER;
- powerof2 = 1;
- while (powerof2 < qpwanted)
- powerof2 *= 2;
- powerof2 /= 2;
- qpwanted = powerof2;
- } else {
- qpwanted /= 2;
- }
+ if (qpwanted > FPM_MULTIPLIER)
+ qpwanted = roundup_pow_of_two(qpwanted -
+ FPM_MULTIPLIER);
+ qpwanted >>= 1;
}
if (mrwanted > FPM_MULTIPLIER * 10)
mrwanted -= FPM_MULTIPLIER * 10;
@@ -3962,8 +3955,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
pblewanted -= FPM_MULTIPLIER * 1000;
} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
- sd_needed = i40iw_est_sd(dev, hmc_info);
-
i40iw_debug(dev, I40IW_DEBUG_HMC,
"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
loop_count, sd_needed,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 029083cb81d5..4b65e4140bd7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -97,6 +97,7 @@
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define Q2_BAD_FRAME_OFFSET 72
+#define Q2_FPSN_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index e96bdafbcbb3..61540e14e4b9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -385,6 +385,8 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
@@ -403,7 +405,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e824296713e2..b08862978de8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,6 +99,10 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
+static struct notifier_block i40iw_netdevice_notifier = {
+ .notifier_call = i40iw_netdevice_event
+};
+
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -483,6 +487,7 @@ static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
status = i40iw_create_hmc_obj_type(dev, &info);
if (status) {
i40iw_pr_err("create obj type %d status = %d\n",
@@ -607,7 +612,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
/* init the waitq of the cqp_requests and add them to the list */
- for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ for (i = 0; i < sqsize; i++) {
init_waitqueue_head(&cqp->cqp_requests[i].waitq);
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
}
@@ -1285,7 +1290,7 @@ static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
__LINE__, statuscpu2);
if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
break; /* SUCCESS */
- mdelay(1000);
+ msleep(1000);
retrycount++;
} while (retrycount < 14);
i40iw_wr32(hw, 0xb4040, 0x4C104C5);
@@ -1393,6 +1398,7 @@ static void i40iw_register_notifiers(void)
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
+ register_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1404,6 +1410,7 @@ static void i40iw_unregister_notifiers(void)
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ unregister_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1793,7 +1800,7 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
if (reset)
iwdev->reset = true;
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
destroy_workqueue(iwdev->virtchnl_wq);
i40iw_deinit_device(iwdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 796a815b53fd..4c21197830b3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -1378,7 +1377,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
/* first partial seq # in q2 */
- u32 fps = qp->q2_buf[16];
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
struct list_head *plist;
@@ -1483,7 +1482,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
* @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 660aa3edae56..53a7d58c84b5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -184,4 +184,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 3ec5389a81a1..8afa5a67a86b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -894,20 +894,6 @@ exit:
}
/**
- * i40iw_qp_roundup - return round up QP WQ depth
- * @wqdepth: WQ depth in quantas to round up
- */
-static int i40iw_qp_round_up(u32 wqdepth)
-{
- int scount = 1;
-
- for (wqdepth--; scount <= 16; scount *= 2)
- wqdepth |= wqdepth >> scount;
-
- return ++wqdepth;
-}
-
-/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
@@ -934,7 +920,7 @@ void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
*/
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
+ *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
@@ -953,7 +939,7 @@ enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
*/
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+ *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index e73efc59a0ab..b125925641e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -59,7 +59,6 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
- I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
@@ -72,7 +71,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 8845dba7c438..ddc1056b0b4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -137,7 +137,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
}
/**
- * i40iw_inetaddr_event - system notifier for netdev events
+ * i40iw_inetaddr_event - system notifier for ipv4 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -200,7 +200,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
}
/**
- * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * i40iw_inet6addr_event - system notifier for ipv6 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -252,7 +252,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
}
/**
- * i40iw_net_event - system notifier for net events
+ * i40iw_net_event - system notifier for netevents
* @notfier: not used
* @event: event for notifier
* @ptr: neighbor
@@ -297,6 +297,50 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
}
/**
+ * i40iw_netdevice_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *event_netdev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
+ return NOTIFY_DONE;
+
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ iwdev->iw_status = 1;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ /* Fall through */
+ case NETDEV_UP:
+ i40iw_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
* i40iw_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
* @wait: cqp to be used in wait mode
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 3c6f3ce88f89..70024e8e2692 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -412,6 +412,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
{
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
@@ -1637,6 +1638,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
err_code = -EOVERFLOW;
goto err;
}
+ stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
@@ -2242,14 +2244,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
@@ -2271,7 +2271,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op_type = I40IW_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index bf4f14a1b4fc..9a566ee3ceff 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -170,7 +170,7 @@ err_buf:
return err;
}
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
+#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -246,7 +246,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
if (err)
goto err_dbmap;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8c8a16791a3f..8d2ee9322f2e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -589,6 +589,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+
resp.rss_caps.rx_hash_fields_mask =
MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
@@ -598,6 +599,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP;
+
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ resp.rss_caps.rx_hash_fields_mask |=
+ MLX4_IB_RX_HASH_INNER;
}
}
@@ -2995,9 +3001,8 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (i = 0; i < ibdev->num_ports; ++i)
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -3102,11 +3107,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index caf490ab24c8..f045491f2c14 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -734,10 +734,24 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /*
+ * Hash according to inner headers if exist, otherwise
+ * according to outer headers.
+ */
+ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
+ } else {
+ pr_debug("RSS Hash for inner headers isn't supported\n");
+ return (-EOPNOTSUPP);
+ }
+ }
+
return 0;
}
-static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
+static int create_qp_rss(struct mlx4_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx4_ib_create_qp_rss *ucmd,
struct mlx4_ib_qp *qp)
@@ -860,7 +874,7 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- err = create_qp_rss(to_mdev(pd->device), pd, init_attr, &ucmd, qp);
+ err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1836,6 +1850,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
mlx4_ib_gid_index_to_real_index(dev, port,
grh->sgid_index);
+ if (real_sgid_index < 0)
+ return real_sgid_index;
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 2d32b519bb61..985fa2637390 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -247,21 +247,30 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
void *out;
void *field;
int err;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
if (err)
goto free;
@@ -270,21 +279,32 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
free:
kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
void *in;
void *field;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
u32 attr_mask = 0;
int err;
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
@@ -299,8 +319,10 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
+ err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
@@ -324,7 +346,7 @@ static ssize_t set_param(struct file *filp, const char __user *buf,
if (kstrtou32(lbuf, 0, &var))
return -EINVAL;
- ret = mlx5_ib_set_cc_params(param->dev, offset, var);
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
return ret ? ret : count;
}
@@ -340,7 +362,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (*pos)
return 0;
- ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -362,44 +384,51 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
if (!mlx5_debugfs_root ||
- !dev->dbg_cc_params ||
- !dev->dbg_cc_params->root)
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
return;
- debugfs_remove_recursive(dev->dbg_cc_params->root);
- kfree(dev->dbg_cc_params);
- dev->dbg_cc_params = NULL;
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
}
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
int i;
if (!mlx5_debugfs_root)
goto out;
- if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
- !MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
goto out;
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
if (!dbg_cc_params)
- goto out;
+ goto err;
- dev->dbg_cc_params = dbg_cc_params;
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
dbg_cc_params->root = debugfs_create_dir("cc_params",
- dev->mdev->priv.dbg_root);
+ mdev->priv.dbg_root);
if (!dbg_cc_params->root)
goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
dbg_cc_params->params[i].dentry =
debugfs_create_file(mlx5_ib_dbg_cc_name[i],
0600, dbg_cc_params->root,
@@ -408,11 +437,17 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
if (!dbg_cc_params->params[i].dentry)
goto err;
}
-out: return 0;
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+out:
+ return 0;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
- mlx5_ib_cleanup_cong_debugfs(dev);
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
/*
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18705cbcdc8c..5b974fb97611 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1010,7 +1010,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1003b0133a49..32a9e9228b13 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -197,10 +197,9 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err;
void *out_cnt;
@@ -222,7 +221,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
@@ -235,7 +234,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ err = mlx5_core_query_ib_ppcnt(mdev, port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
@@ -255,9 +254,11 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
+ int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
@@ -265,14 +266,20 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data));
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return IB_MAD_RESULT_FAILURE;
+
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
} else {
- return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad);
}
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return ret;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
@@ -519,7 +526,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ if (port < 1 || port > dev->num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8ac50de2b242..4236c8086820 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,16 +50,14 @@
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
-#include <linux/mlx5/fs.h>
-#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
-#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@@ -72,10 +70,36 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION "\n";
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ struct mlx5_core_dev *dev;
+ void *context;
+ enum mlx5_dev_event event;
+ unsigned long param;
+};
+
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
};
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
+{
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
+
static enum rdma_link_layer
mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
{
@@ -115,24 +139,32 @@ static int get_port_state(struct ib_device *ibdev,
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
- roce.nb);
+ u8 port_num = roce->native_port_num;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
- NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ write_lock(&roce->netdev_lock);
+
+ if (ndev->dev.parent == &mdev->pdev->dev)
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&roce->netdev_lock);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
if (lag_ndev) {
@@ -140,27 +172,28 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
- if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
- if (get_port_state(&ibdev->ib_dev, 1, &port_state))
- return NOTIFY_DONE;
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto done;
- if (ibdev->roce.last_port_state == port_state)
- return NOTIFY_DONE;
+ if (roce->last_port_state == port_state)
+ goto done;
- ibdev->roce.last_port_state = port_state;
+ roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
if (port_state == IB_PORT_DOWN)
ibev.event = IB_EVENT_PORT_ERR;
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- return NOTIFY_DONE;
+ goto done;
- ibev.element.port_num = 1;
+ ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
}
break;
@@ -169,7 +202,8 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
-
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
@@ -178,22 +212,88 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ struct mlx5_core_dev *mdev;
- ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NULL;
+
+ ndev = mlx5_lag_get_roce_netdev(mdev);
if (ndev)
- return ndev;
+ goto out;
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce.netdev_lock);
- ndev = ibdev->roce.netdev;
+ read_lock(&ibdev->roce[port_num - 1].netdev_lock);
+ ndev = ibdev->roce[port_num - 1].netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce.netdev_lock);
+ read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+out:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return ndev;
}
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u8 ib_port_num,
+ u8 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return ibdev->mdev;
+
+ port = &ibdev->port[ib_port_num - 1];
+ if (!port)
+ return NULL;
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
@@ -256,19 +356,33 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
+ u8 mdev_port_num;
int err;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
+ err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
+ mdev_port_num);
if (err)
- return err;
+ goto out;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
@@ -284,12 +398,16 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->state = IB_PORT_DOWN;
props->phys_state = 3;
- mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr;
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev)
- return 0;
+ goto out;
if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock();
@@ -312,7 +430,10 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
- return 0;
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
}
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
@@ -354,7 +475,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac, vlan,
- vlan_id);
+ vlan_id, port_num);
}
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -438,11 +559,11 @@ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
}
static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
struct ib_device_attr *props)
{
u8 tmp;
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
u8 atomic_req_8B_endianness_mode =
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
@@ -459,6 +580,29 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev,
}
}
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
+{
+ struct ib_device_attr props = {};
+
+ get_atomic_caps_dc(dev, &props);
+ return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
+}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -587,6 +731,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
struct mlx5_ib_query_device_resp resp = {};
size_t resp_len;
u64 max_tso;
@@ -650,7 +795,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
if (MLX5_CAP_ETH(mdev, csum_cap)) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
@@ -682,7 +827,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_TCP |
MLX5_RX_HASH_DST_PORT_TCP |
MLX5_RX_HASH_SRC_PORT_UDP |
- MLX5_RX_HASH_DST_PORT_UDP;
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -698,7 +844,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
- MLX5_CAP_GEN(dev->mdev, general_notification_event))
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
@@ -706,7 +853,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
@@ -746,7 +894,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len =
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
- get_atomic_caps(dev, props);
+ get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
@@ -770,7 +918,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ IB_LINK_LAYER_ETHERNET && raw_support) {
props->rss_caps.max_rwq_indirection_tables =
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
props->rss_caps.max_rwq_indirection_table_size =
@@ -807,7 +955,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_comp_caps);
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
resp.packet_pacing_caps.qp_rate_limit_max =
@@ -866,7 +1015,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
@@ -1097,7 +1247,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
}
if (!ret && props) {
- count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
props->gid_tbl_len -= count;
}
return ret;
@@ -1122,20 +1287,43 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u8 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ return err;
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
- return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
- pkey);
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
default:
return -EINVAL;
}
@@ -1174,23 +1362,32 @@ static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
int err;
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
if (err)
- return err;
+ goto out;
if (~ctx.cap_mask1_perm & mask) {
mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
mask, ctx.cap_mask1_perm);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
ctx.cap_mask1 = value;
ctx.cap_mask1_perm = mask;
- err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
return err;
}
@@ -1241,9 +1438,18 @@ static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
- u32 *num_sys_pages)
+ struct mlx5_bfreg_info *bfregi)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
@@ -1260,16 +1466,21 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
- *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
-
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
- mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
- req->total_num_bfregs, *num_sys_pages);
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
return 0;
}
@@ -1281,13 +1492,17 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
}
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
return 0;
error:
@@ -1306,12 +1521,16 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d\n", i);
- return err;
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
+ err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
+ return err;
+ }
}
}
+
return 0;
}
@@ -1324,7 +1543,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1562,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
mutex_lock(&dev->lb_mutex);
@@ -1360,6 +1581,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_ucontext *context;
struct mlx5_bfreg_info *bfregi;
int ver;
@@ -1420,13 +1642,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bfregi = &context->bfregi;
/* updates req->total_num_bfregs */
- err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
- bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
@@ -1468,7 +1690,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_bfregs = req.total_num_bfregs;
- resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
+ resp.num_ports = dev->num_ports;
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
@@ -1487,6 +1709,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.eth_min_inline);
}
+ if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (mdev->clock_info)
+ resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+ resp.response_length += sizeof(resp.clock_info_versions);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1500,8 +1728,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
}
- resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2);
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
}
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
@@ -1510,6 +1737,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
+ if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
+ resp.response_length += sizeof(resp.num_dyn_bfregs);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
@@ -1564,15 +1796,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- int idx)
+ int uar_idx)
{
int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
- bfregi->sys_pages[idx] / fw_uars_per_page;
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -1590,6 +1820,12 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
+{
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
+
static void mlx5_ib_vma_open(struct vm_area_struct *area)
{
/* vma_open is called when a new VMA is created on top of our VMA. This
@@ -1731,6 +1967,38 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
}
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ phys_addr_t pfn;
+ int err;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (!dev->mdev->clock_info_page)
+ return -EOPNOTSUPP;
+
+ pfn = page_to_pfn(dev->mdev->clock_info_page);
+ err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (err)
+ return err;
+
+ mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
+ vma->vm_start,
+ (unsigned long long)pfn << PAGE_SHIFT);
+
+ return mlx5_ib_set_vma_data(vma, context);
+}
+
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
@@ -1740,21 +2008,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
- int uars_per_page;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
- idx = get_index(vma->vm_pgoff);
- if (idx % uars_per_page ||
- idx * uars_per_page >= bfregi->num_sys_pages) {
- mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
+
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
return -EINVAL;
}
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
/* Some architectures don't support WC memory */
#if defined(CONFIG_X86)
if (!pat_enabled())
@@ -1774,7 +2050,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
}
- pfn = uar_index2pfn(dev, bfregi, idx);
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+
+ pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
@@ -1783,14 +2092,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto err;
}
pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return mlx5_ib_set_vma_data(vma, context);
+ err = mlx5_ib_set_vma_data(vma, context);
+ if (err)
+ goto err;
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
+ return 0;
+
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_free_uar(dev->mdev, idx);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
+
+ return err;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -1805,6 +2132,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -1833,6 +2161,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
vma->vm_start,
(unsigned long long)pfn << PAGE_SHIFT);
break;
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
default:
return -EINVAL;
@@ -2661,7 +2991,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
if (domain != IB_FLOW_DOMAIN_USER ||
- flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
+ flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
@@ -2926,15 +3256,24 @@ static void delay_drop_handler(struct work_struct *work)
mutex_unlock(&delay_drop->lock);
}
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static void mlx5_ib_handle_event(struct work_struct *_work)
{
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
u8 port = 0;
- switch (event) {
+ if (mlx5_core_is_mp_slave(work->dev)) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->context;
+ }
+
+ switch (work->event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
@@ -2944,39 +3283,39 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)param;
+ port = (u8)work->param;
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
- return;
+ goto out;
- ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -2998,9 +3337,26 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
-
out:
- return;
+ kfree(work);
+}
+
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = dev;
+ work->param = param;
+ work->context = context;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -3009,7 +3365,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
+ for (port = 1; port <= dev->num_ports; port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -3036,16 +3392,15 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
+ for (port = 1; port <= dev->num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev)
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- int port;
struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
@@ -3066,22 +3421,21 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
- memset(pprops, 0, sizeof(*pprops));
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- break;
- }
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
- dprops->max_pkeys, pprops->gid_tbl_len);
+ memset(pprops, 0, sizeof(*pprops));
+ err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
+ goto out;
}
+ dev->mdev->port_caps[port - 1].pkey_table_len =
+ dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len =
+ pprops->gid_tbl_len;
+ mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
+ port, dprops->max_pkeys, pprops->gid_tbl_len);
+
out:
kfree(pprops);
kfree(dprops);
@@ -3371,12 +3725,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
if (ll == IB_LINK_LAYER_INFINIBAND)
return RDMA_CORE_PORT_IBA_IB;
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ if (raw_support)
+ ret = RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -3466,33 +3822,33 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
+ dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce[port_num].nb);
if (err) {
- dev->roce.nb.notifier_call = NULL;
+ dev->roce[port_num].nb.notifier_call = NULL;
return err;
}
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce.nb);
- dev->roce.nb.notifier_call = NULL;
+ if (dev->roce[port_num].nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->roce[port_num].nb.notifier_call = NULL;
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- err = mlx5_add_netdev_notifier(dev);
+ err = mlx5_add_netdev_notifier(dev, port_num);
if (err)
return err;
@@ -3513,7 +3869,7 @@ err_disable_roce:
mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_netdev_notifier(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
return err;
}
@@ -3575,11 +3931,12 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
- unsigned int i;
+ int i;
for (i = 0; i < dev->num_ports; i++) {
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -3621,6 +3978,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
err_names:
kfree(cnts->names);
+ cnts->names = NULL;
return -ENOMEM;
}
@@ -3667,37 +4025,33 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ int err = 0;
int i;
- int ret;
for (i = 0; i < dev->num_ports; i++) {
- struct mlx5_ib_port *port = &dev->port[i];
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
+ if (err)
+ goto err_alloc;
+
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ dev->port[i].cnts.offsets);
- ret = mlx5_core_alloc_q_counter(dev->mdev,
- &port->cnts.set_id);
- if (ret) {
+ err = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].cnts.set_id);
+ if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
- i + 1, ret);
- goto dealloc_counters;
+ i + 1, err);
+ goto err_alloc;
}
-
- ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
- if (ret)
- goto dealloc_counters;
-
- mlx5_ib_fill_counters(dev, port->cnts.names,
- port->cnts.offsets);
+ dev->port[i].cnts.set_id_valid = true;
}
return 0;
-dealloc_counters:
- while (--i >= 0)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
-
- return ret;
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
@@ -3716,7 +4070,7 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct mlx5_ib_port *port,
struct rdma_hw_stats *stats)
{
@@ -3729,7 +4083,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_q_counter(dev->mdev,
+ ret = mlx5_core_query_q_counter(mdev,
port->cnts.set_id, 0,
out, outlen);
if (ret)
@@ -3751,28 +4105,43 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ struct mlx5_core_dev *mdev;
int ret, num_counters;
+ u8 mdev_port_num;
if (!stats)
return -EINVAL;
- ret = mlx5_ib_query_q_counters(dev, port, stats);
+ num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+
+ /* q_counters are per IB device, query the master mdev */
+ ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
- num_counters = port->cnts.num_q_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
+ &mdev_port_num);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
ret = mlx5_lag_query_cong_counters(dev->mdev,
stats->value +
port->cnts.num_q_counters,
port->cnts.num_cong_counters,
port->cnts.offsets +
port->cnts.num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret)
return ret;
- num_counters += port->cnts.num_cong_counters;
}
+done:
return num_counters;
}
@@ -3934,36 +4303,250 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
{
- struct mlx5_ib_dev *dev;
- enum rdma_link_layer ll;
- int port_type_cap;
- const char *name;
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
int err;
int i;
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
- printk_once(KERN_INFO "%s", mlx5_version);
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+ mpi->ibdev = NULL;
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
+ spin_unlock(&port->mp.mpi_lock);
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
- dev->mdev = mdev;
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
+ if (err)
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+}
+
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ int err;
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
+
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
+ err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
+ if (err)
+ goto unbind;
+
+ err = mlx5_add_netdev_notifier(ibdev, port_num);
+ if (err) {
+ mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
+ port_num + 1);
+ goto unbind;
+ }
+
+ err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
+ if (err)
+ goto unbind;
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
+ if (err)
+ return err;
- dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
+
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound) {
+ get_port_caps(dev, i + 1);
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
+ }
+ }
+
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
+
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ }
+ }
+ }
+
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_multiport_master(dev);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&dev->mr_srcu);
+#endif
+ kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const char *name;
+ int err;
+ int i;
+
+ dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port)
- goto err_dealloc;
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ rwlock_init(&dev->roce[i].netdev_lock);
+ }
- rwlock_init(&dev->roce.netdev_lock);
- err = get_port_caps(dev);
+ err = mlx5_ib_init_multiport_master(dev);
if (err)
goto err_free_port;
+ if (!mlx5_core_mp_enabled(mdev)) {
+ int i;
+
+ for (i = 1; i <= dev->num_ports; i++) {
+ err = get_port_caps(dev, i);
+ if (err)
+ break;
+ }
+ } else {
+ err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
+ }
+ if (err)
+ goto err_mp;
+
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -3976,12 +4559,37 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
- dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_free_port;
+#endif
+
+ return 0;
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+
+err_free_port:
+ kfree(dev->port);
+
+ return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4020,8 +4628,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
- if (ll == IB_LINK_LAYER_ETHERNET)
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
dev->ib_dev.del_gid = mlx5_ib_del_gid;
@@ -4078,8 +4684,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
- mlx5_ib_internal_fill_odp_caps(dev);
-
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4090,11 +4694,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
- dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
- }
-
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4109,8 +4708,39 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
- if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb_mutex);
+
+ return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+ int err;
+ int i;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->roce[i].dev = dev;
+ dev->roce[i].native_port_num = i + 1;
+ dev->roce[i].last_port_state = IB_PORT_DOWN;
+ }
+
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
@@ -4122,142 +4752,329 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ err = mlx5_enable_eth(dev, port_num);
+ if (err)
+ return err;
}
- err = init_node_data(dev);
- if (err)
- goto err_free_port;
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
- INIT_LIST_HEAD(&dev->qp_list);
- spin_lock_init(&dev->reset_flow_resource_lock);
+ return 0;
+}
+
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_eth(dev);
- if (err)
- goto err_free_port;
- dev->roce.last_port_state = IB_PORT_DOWN;
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
}
+}
- err = create_dev_resources(&dev->devr);
- if (err)
- goto err_disable_eth;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_dev_resources(&dev->devr);
+}
- err = mlx5_ib_odp_init_one(dev);
- if (err)
- goto err_rsrc;
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_dev_resources(&dev->devr);
+}
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_internal_fill_odp_caps(dev);
+
+ return mlx5_ib_odp_init_one(dev);
+}
+
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- err = mlx5_ib_alloc_counters(dev);
- if (err)
- goto err_odp;
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+
+ return mlx5_ib_alloc_counters(dev);
}
- err = mlx5_ib_init_cong_debugfs(dev);
- if (err)
- goto err_cnt;
+ return 0;
+}
+
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ mlx5_ib_dealloc_counters(dev);
+}
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
- goto err_cong;
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
- goto err_uar_page;
+ return err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- goto err_bfreg;
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
- goto err_fp_bfreg;
+ return err;
+}
- err = create_umr_res(dev);
- if (err)
- goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_umrc_res(dev);
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
init_delay_drop(dev);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+ int i;
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_delay_drop;
+ return err;
}
- if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(mdev, disable_local_lb))
- mutex_init(&dev->lb_mutex);
+ return 0;
+}
+
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ ib_dealloc_device((struct ib_device *)dev);
+}
+
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
+
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_profile *profile)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
+
+ printk_once(KERN_INFO "%s", mlx5_version);
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->mdev = mdev;
+ dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->profile = profile;
dev->ib_active = true;
return dev;
-err_delay_drop:
- cancel_delay_drop(dev);
- destroy_umrc_res(dev);
+err_out:
+ __mlx5_ib_remove(dev, profile, i);
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+ return NULL;
+}
-err_fp_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_roce_init,
+ mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_stage_odp_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+};
-err_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+{
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
-err_uar_page:
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return NULL;
-err_cong:
- mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
+ mpi->mdev = mdev;
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return NULL;
+ }
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid)
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
-err_disable_eth:
- if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev);
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ break;
+ }
}
-err_free_port:
- kfree(dev->port);
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ } else {
+ mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+ return mpi;
+}
- return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ enum rdma_link_layer ll;
+ int port_type_cap;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
+ u8 port_num = mlx5_core_native_port_num(mdev) - 1;
+
+ return mlx5_ib_add_slave_port(mdev, port_num);
+ }
+
+ return __mlx5_ib_add(mdev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
- struct mlx5_ib_dev *dev = context;
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
- cancel_delay_drop(dev);
- mlx5_remove_netdev_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
- mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
- mlx5_ib_cleanup_cong_debugfs(dev);
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_eth(dev);
- kfree(dev->port);
- ib_dealloc_device(&dev->ib_dev);
+ if (mlx5_core_is_mp_slave(mdev)) {
+ mpi = context;
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return;
+ }
+
+ dev = context;
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static struct mlx5_interface mlx5_ib_interface = {
@@ -4274,6 +5091,10 @@ static int __init mlx5_ib_init(void)
{
int err;
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq)
+ return -ENOMEM;
+
mlx5_ib_odp_init();
err = mlx5_register_interface(&mlx5_ib_interface);
@@ -4284,6 +5105,7 @@ static int __init mlx5_ib_init(void)
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ destroy_workqueue(mlx5_ib_event_wq);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2c5f3533bbc9..139385129973 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,15 +70,6 @@ enum {
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
-enum mlx5_ib_mmap_cmd {
- MLX5_IB_MMAP_REGULAR_PAGE = 0,
- MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
- MLX5_IB_MMAP_WC_PAGE = 2,
- MLX5_IB_MMAP_NC_PAGE = 3,
- /* 5 is chosen in order to be compatible with old versions of libmlx5 */
- MLX5_IB_MMAP_CORE_CLOCK = 5,
-};
-
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
@@ -112,6 +103,11 @@ enum {
MLX5_TM_MAX_SGE = 1,
};
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
@@ -200,6 +196,8 @@ struct mlx5_ib_flow_db {
* creates the actual hardware QP.
*/
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
#define MLX5_IB_UMR_OCTOWORD 16
@@ -360,12 +358,18 @@ struct mlx5_bf {
struct mlx5_sq_bfreg *bfreg;
};
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
};
struct mlx5_buf buf;
@@ -404,6 +408,8 @@ struct mlx5_ib_qp {
u32 rate_limit;
u32 underlay_qpn;
bool tunnel_offload_en;
+ /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
+ enum ib_qp_type qp_sub_type;
};
struct mlx5_ib_cq_buf {
@@ -636,10 +642,21 @@ struct mlx5_ib_counters {
u32 num_q_counters;
u32 num_cong_counters;
u16 set_id;
+ bool set_id_valid;
+};
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
};
struct mlx5_ib_port {
struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
};
struct mlx5_roce {
@@ -651,12 +668,15 @@ struct mlx5_roce {
struct notifier_block nb;
atomic_t next_port;
enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u8 native_port_num;
};
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
+ u8 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -709,10 +729,50 @@ struct mlx5_ib_delay_drop {
struct mlx5_ib_dbg_delay_drop *dbg;
};
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
- struct mlx5_roce roce;
+ struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
@@ -746,12 +806,14 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ const struct mlx5_ib_profile *profile;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -956,13 +1018,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
@@ -977,7 +1040,6 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
@@ -1001,8 +1063,8 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
@@ -1021,6 +1083,15 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 ib_port_num,
+ u8 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 port_num);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1052,8 +1123,8 @@ static inline u32 check_cq_create_flags(u32 flags)
* It returns non-zero value for unsupported CQ
* create flags, otherwise it returns zero.
*/
- return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
}
static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
@@ -1113,10 +1184,10 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
-static inline int get_num_uars(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi)
+static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d109fe8290a7..556e015678de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1206,6 +1206,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
bool use_umr = true;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EINVAL);
+
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..f1a87a690a4c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1207,10 +1207,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret;
- ret = init_srcu_struct(&dev->mr_srcu);
- if (ret)
- return ret;
-
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
if (ret) {
@@ -1222,11 +1218,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
-{
- cleanup_srcu_struct(&dev->mr_srcu);
-}
-
int mlx5_ib_odp_init(void)
{
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 31ad28853efa..39d24bf694a8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -493,7 +493,7 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
+ return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
return bfregn;
}
-static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--;
@@ -613,6 +613,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -627,7 +628,8 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn)
+ struct mlx5_bfreg_info *bfregi, int bfregn,
+ bool dyn_bfreg)
{
int bfregs_per_sys_page;
int index_of_sys_page;
@@ -637,8 +639,16 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
- offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
}
@@ -764,7 +774,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
- int uar_index;
+ int uar_index = 0;
int npages;
u32 offset = 0;
int bfregn;
@@ -780,12 +790,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
context = to_mucontext(pd->uobject->context);
- /*
- * TBD: should come from the verbs when we have the API
- */
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd.bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+
+ bfregn = MLX5_IB_INVALID_BFREG;
+ } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
+ /*
+ * TBD: should come from the verbs when we have the API
+ */
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
bfregn = MLX5_CROSS_CHANNEL_BFREG;
+ }
else {
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (bfregn < 0) {
@@ -804,8 +822,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
}
- uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -845,7 +865,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
- resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
@@ -874,7 +897,8 @@ err_umem:
ib_umem_release(ubuffer->umem);
err_bfreg:
- free_bfreg(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
@@ -887,7 +911,13 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
- free_bfreg(dev, &context->bfregi, qp->bfregn);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -1015,6 +1045,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
+ (attr->qp_type == MLX5_IB_QPT_DCI) ||
(attr->qp_type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
@@ -2086,20 +2117,108 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
return "IB_QPT_RAW_PACKET";
case MLX5_IB_QPT_REG_UMR:
return "MLX5_IB_QPT_REG_UMR";
+ case IB_QPT_DRIVER:
+ return "IB_QPT_DRIVER";
case IB_QPT_MAX:
default:
return "Invalid QP type";
}
}
+static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ int err = 0;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ void *dctc;
+
+ if (!attr->srq || !attr->recv_cq)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+
+ err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ ucmd, sizeof(*ucmd), &uidx);
+ if (err)
+ return ERR_PTR(err);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ qp->qp_sub_type = MLX5_IB_QPT_DCT;
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+
+ qp->state = IB_QPS_RESET;
+
+ return &qp->ibqp;
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
+{
+ enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
+ int err;
+
+ if (!udata)
+ return -EINVAL;
+
+ if (udata->inlen < sizeof(*ucmd)) {
+ mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ return -EINVAL;
+ }
+ err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
+ if (err)
+ return err;
+
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
+ init_attr->qp_type = MLX5_IB_QPT_DCI;
+ } else {
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
+ init_attr->qp_type = MLX5_IB_QPT_DCT;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid QP flags\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!MLX5_CAP_GEN(dev->mdev, dct)) {
+ mlx5_ib_dbg(dev, "DC transport is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
+ struct ib_qp_init_attr *verbs_init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
int err;
+ struct ib_qp_init_attr mlx_init_attr;
+ struct ib_qp_init_attr *init_attr = verbs_init_attr;
if (pd) {
dev = to_mdev(pd->device);
@@ -2124,6 +2243,26 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
+ if (init_attr->qp_type == IB_QPT_DRIVER) {
+ struct mlx5_ib_create_qp ucmd;
+
+ init_attr = &mlx_init_attr;
+ memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
+ err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
+ if (init_attr->cap.max_recv_wr ||
+ init_attr->cap.max_recv_sge) {
+ mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ }
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
@@ -2145,6 +2284,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
+ case MLX5_IB_QPT_DCI:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -2185,9 +2325,31 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
+ qp->qp_sub_type = init_attr->qp_type;
+
return &qp->ibqp;
}
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
+ }
+ }
+
+ kfree(mqp->dct.in);
+ kfree(mqp);
+ return 0;
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2196,6 +2358,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
+ if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2763,7 +2928,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
- err = to_mlx5_st(ibqp->qp_type);
+ err = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
@@ -2796,8 +2962,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
+ u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce.next_port) %
+ &dev->roce[p].next_port) %
MLX5_MAX_PORTS + 1;
context->flags |= cpu_to_be32(tx_affinity << 24);
}
@@ -2922,7 +3089,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
- mlx5_st = to_mlx5_st(ibqp->qp_type);
+ mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (mlx5_st < 0)
goto out;
@@ -2994,6 +3162,139 @@ out:
return err;
}
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int err = 0;
+ int required = IB_QP_STATE;
+ void *dctc;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ if (!mlx5_ib_dc_atomic_is_supported(dev))
+ return -EOPNOTSUPP;
+ MLX5_SET(dctc, dctc, rae, 1);
+ MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+ MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dctn) +
+ sizeof(resp.dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ resp.response_length = min_resp_len;
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+
+ err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in));
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+ if (err)
+ qp->state = IB_QPS_ERR;
+ else
+ qp->state = new_state;
+ return err;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -3011,8 +3312,14 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ if (ibqp->qp_type == IB_QPT_DRIVER)
+ qp_type = qp->qp_sub_type;
+ else
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
mutex_lock(&qp->mutex);
@@ -3031,15 +3338,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ attr->port_num > dev->num_ports)) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
goto out;
@@ -4358,16 +4671,14 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
struct rdma_ah_attr *ah_attr,
struct mlx5_qp_path *path)
{
- struct mlx5_core_dev *dev = ibdev->mdev;
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
- rdma_ah_set_port_num(ah_attr, path->port);
- if (rdma_ah_get_port_num(ah_attr) == 0 ||
- rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
+ if (!path->port || path->port > ibdev->num_ports)
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+
rdma_ah_set_port_num(ah_attr, path->port);
rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
@@ -4578,6 +4889,71 @@ out:
return err;
}
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ if (err)
+ goto out;
+
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
+
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
+
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
+
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
+}
+
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
@@ -4597,6 +4973,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
+ if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
+
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
@@ -4686,13 +5066,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int err;
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }
kfree(xrcd);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index c6fe89d79248..2fe503e86c1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
if (ret < 0)
goto out;
@@ -623,13 +623,12 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
+ &page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
deleted file mode 100644
index 5fe56e810739..000000000000
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MTHCA_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct mthca_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mthca_reg_mr {
-/*
- * Mark the memory region with a DMA attribute that causes
- * in-flight DMA to be flushed when the region is written to:
- */
-#define MTHCA_MR_DMASYNC 0x1
- __u32 mr_attrs;
- __u32 reserved;
-};
-
-struct mthca_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct mthca_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mthca_resize_cq {
- __u32 lkey;
- __u32 reserved;
-};
-
-struct mthca_create_srq {
- __u32 lkey;
- __u32 db_index;
- __u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mthca_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* MTHCA_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c56ca2a74df5..6cdfbf8c5674 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1365,7 +1365,7 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
if (cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d827d03e3941..b9cc02b4e8d5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -279,7 +279,6 @@ struct nes_cm_tcp_context {
u8 rcv_wscale;
struct nes_cm_tsa_context tsa_cntxt;
- struct timeval sent_ts;
};
@@ -341,7 +340,7 @@ struct nes_cm_node {
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct nes_cm_listener *listener;
enum nes_cm_conn_type conn_type;
struct nes_vnic *nesvnic;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0ba695a88b62..9904918589a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
+ &q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
@@ -1819,12 +1818,11 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -2212,10 +2210,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2263,10 +2260,9 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2320,11 +2316,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index e528d7acb7f6..24d20a4aa262 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,15 +73,13 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
@@ -834,7 +832,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7866fd8051f6..8009bdad4e5b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -461,7 +461,7 @@ retry:
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
struct ocrdma_pd *pd)
{
- return (uctx->cntxt_pd == pd ? true : false);
+ return (uctx->cntxt_pd == pd);
}
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
@@ -550,13 +550,12 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
memset(&resp, 0, sizeof(resp));
@@ -885,13 +884,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 50812b33291b..a9c3378bca38 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index b7587f10e7de..78b49002fbd2 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -164,6 +164,13 @@ struct rdma_srq_sge {
__le32 l_key;
};
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+ __le16 icid; /* internal CID */
+ u8 agg_flags; /* aggregative flags */
+ u8 reserved;
+};
+
/* Rdma doorbell data for SQ and RQ */
struct rdma_pwm_val16_data {
__le16 icid;
@@ -180,12 +187,16 @@ struct rdma_pwm_val32_data {
__le16 icid;
u8 agg_flags;
u8 params;
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
__le32 value;
};
@@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe {
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
- __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
+ __le32 reserved5;
};
/* First element (16 bytes) of fmr wqe */
@@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd {
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
- __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
+ __le32 reserved5;
};
struct rdma_sq_local_inv_wqe {
@@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe {
__le32 xrc_srq;
u8 req_type;
u8 flags;
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
-#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
struct regpair remote_va;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b26aa88dab48..53f00dbf313f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
if (!va)
goto err;
- memset(va, 0, pbl_info->pbl_size);
pbl_table[i].va = va;
pbl_table[i].pa = pa;
}
@@ -3040,7 +3039,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swqe->wqe_size = 2;
swqe2 = qed_chain_produce(&qp->sq.pbl);
- swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
wr, bad_wr);
swqe->length = cpu_to_le32(length);
@@ -3471,9 +3470,9 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
if (qp->state != QED_ROCE_QP_STATE_ERR)
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
@@ -3591,7 +3590,7 @@ static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
wc->byte_len = le32_to_cpu(resp->length);
if (resp->flags & QEDR_RESP_IMM) {
- wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
wc->wc_flags |= IB_WC_WITH_IMM;
if (resp->flags & QEDR_RESP_RDMA)
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 092ed8103842..0235f76bbc72 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1428,8 +1428,6 @@ u64 qib_sps_ints(void);
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
-const char *qib_get_unit_name(int unit);
-const char *qib_get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -1488,15 +1486,15 @@ extern struct mutex qib_mutex;
#define qib_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
##__VA_ARGS__)
#define qib_devinfo(pcidev, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 33d3335385e8..3117cc5f2a9a 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -81,22 +81,6 @@ MODULE_DESCRIPTION("Intel IB driver");
struct qlogic_ib_stats qib_stats;
-const char *qib_get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), "infinipath%u", unit);
- return iname;
-}
-
-const char *qib_get_card_name(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return qib_get_unit_name(dd->unit);
-}
-
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 33a2e74c8495..5838b3bf34b9 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -163,8 +163,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
qib_dev_err(dd,
- "Can't set %s GUID from base, wraps to OUI!\n",
- qib_get_unit_name(t));
+ "Can't set GUID from base, wraps to OUI!\n");
dd->base_guid = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 2d6a191afec0..f7593b5e2b76 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -58,7 +58,7 @@ static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static __poll_t qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
/*
@@ -568,20 +568,16 @@ done:
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{
struct qib_pportdata *ppd = rcd->ppd;
- int i, any = 0, pidx = -1;
+ int i, pidx = -1;
+ bool any = false;
u16 lkey = key & 0x7FFF;
- int ret;
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
/* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
+ return 0;
- if (!lkey) {
- ret = -EINVAL;
- goto bail;
- }
+ if (!lkey)
+ return -EINVAL;
/*
* Set the full membership bit, because it has to be
@@ -594,18 +590,14 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i] && pidx == -1)
pidx = i;
- if (rcd->pkeys[i] == key) {
- ret = -EEXIST;
- goto bail;
- }
+ if (rcd->pkeys[i] == key)
+ return -EEXIST;
}
- if (pidx == -1) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pidx == -1)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
- any++;
+ any = true;
continue;
}
if (ppd->pkeys[i] == key) {
@@ -613,44 +605,34 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
if (atomic_inc_return(pkrefs) > 1) {
rcd->pkeys[pidx] = key;
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any++;
+ return 0;
}
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any = true;
}
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey)
/*
* It makes no sense to have both the limited and
* full membership PKEY set at the same time since
* the unlimited one will disable the limited one.
*/
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
+ return -EEXIST;
}
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!any)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
rcd->pkeys[pidx] = key;
ppd->pkeys[i] = key;
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- ret = 0;
- goto bail;
+ return 0;
}
}
- ret = -EBUSY;
-
-bail:
- return ret;
+ return -EBUSY;
}
/**
@@ -1092,12 +1074,12 @@ bail:
return ret;
}
-static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1114,12 +1096,12 @@ static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1135,10 +1117,10 @@ static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
- unsigned pollflag;
+ __poll_t pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 85dfbba427f6..3990f386aa32 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1119,6 +1119,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
+
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
deleted file mode 100644
index 8fdf79f8d4e4..000000000000
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct rvt_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- qib_get_mr(mr);
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in qib_verbs.c to insure enough bits
- * for generation number
- */
- mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
- ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- qib_get_mr(mr);
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * qib_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void qib_free_lkey(struct rvt_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - ib_rvt_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- qib_put_mr(mr);
- mr->lkey_published = 0;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct rvt_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /* We use RKEY == zero for kernel virtual addresses */
- rcu_read_lock();
- if (rkey == 0) {
- struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / RVT_SEGSZ;
- n = entries_spanned_by_off % RVT_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754fb8579..cfddff45413f 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
@@ -434,13 +432,13 @@ no_flow_control:
qp->s_state = OP(COMPARE_SWAP);
put_ib_ateth_swap(wqe->atomic_wr.swap,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
put_ib_ateth_swap(wqe->atomic_wr.compare_add,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
}
put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
&ohdr->u.atomic_eth);
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done;
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e844d4c8..4662cc7bde92 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37ace44..70c58b88192c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed193ce..386c3c4da0c7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index c55000501582..fabee760407e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1571,7 +1571,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (!ib_qib_sys_image_guid)
ib_qib_sys_image_guid = ppd->guid;
- strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
ibdev->phys_port_cnt = dd->num_pports;
@@ -1586,7 +1585,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 685ef2293cb8..4210ca14014d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,7 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-#include "usnic_ib_sysfs.h"
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index aa2456a4f9bd..a688a5669168 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -47,7 +47,6 @@
#include "usnic_log.h"
#include "usnic_uiom.h"
#include "usnic_transport.h"
-#include "usnic_ib_verbs.h"
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 4f7bd3b6a315..44cb1cfba417 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,7 +93,7 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir;
u32 cq_handle;
bool is_kernel;
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
@@ -196,7 +196,7 @@ struct pvrdma_qp {
u8 state;
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index e529622cefad..faa9478c14a6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
+ cq->is_kernel = !context;
- if (context) {
+ if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem);
} else {
- cq->is_kernel = true;
-
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE;
@@ -178,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
- atomic_set(&cq->refcnt, 1);
+ refcount_set(&cq->refcnt, 1);
init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
- if (context) {
+ if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
- if (context)
+ if (!cq->is_kernel)
ib_umem_release(cq->umem);
err_cq:
atomic_dec(&dev->num_cqs);
@@ -230,7 +229,7 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index e92681878c93..d650a9fcde24 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
- dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
GFP_KERNEL);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
- dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
GFP_KERNEL);
if (!dev->qp_tbl)
goto err_cq_free;
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp)
- atomic_inc(&qp->refcnt);
+ refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) {
@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
}
}
@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) {
@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
}
@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
@@ -882,8 +882,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
@@ -891,7 +891,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Setup the shared region */
- memset(dev->dsr, 0, sizeof(*dev->dsr));
dev->dsr->driver_version = PVRDMA_VERSION;
dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
PVRDMA_GOS_BITS_32 :
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 8519f3212e52..fa96fa4fb829 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,10 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int nchunks;
int ret;
- int entry;
- struct scatterlist *sg;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- nchunks = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
- nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
- if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- nchunks);
+ umem->npages);
ret = -EINVAL;
goto err_umem;
}
@@ -158,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -175,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = nchunks;
+ cmd->nchunks = umem->npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 4059308e1454..7bf518bdbf21 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
- atomic_set(&qp->refcnt, 1);
+ refcount_set(&qp->refcnt, 1);
init_completion(&qp->free);
qp->state = IB_QPS_RESET;
+ qp->is_kernel = !(pd->uobject && udata);
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n");
@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
- qp->is_kernel = true;
-
ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp);
if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
@@ -428,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 97d71e49c092..fb52b669bfce 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -56,7 +56,7 @@
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is solicited
+ * @solicited: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
@@ -101,8 +101,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
@@ -198,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -214,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
@@ -369,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index b3a38c5e4cad..dd11c6fcd060 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -272,7 +272,7 @@ bail:
/**
* rvt_attach_mcast - attach a qp to a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
@@ -335,7 +335,7 @@ bail_mcast:
/**
* rvt_detach_mcast - remove a qp from a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 42713511b53b..1b2e5362a3ff 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -768,7 +768,7 @@ bail:
/**
* rvt_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
+ * @ibfmr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df60742a..c82e6bb3d77c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -269,7 +269,7 @@ no_qp_table:
/**
* free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
+ * @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
@@ -335,9 +335,9 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
/**
* alloc_qpn - Allocate the next available qpn or zero/one for QP type
* IB_QPT_SMI/IB_QPT_GSI
- *@rdi: rvt device info structure
- *@qpt: queue pair number table pointer
- *@port_num: IB port number, 1 based, comes from core
+ * @rdi: rvt device info structure
+ * @qpt: queue pair number table pointer
+ * @port_num: IB port number, 1 based, comes from core
*
* Return: The queue pair number
*/
@@ -1650,9 +1650,9 @@ static inline int rvt_qp_valid_operation(
/**
* rvt_qp_is_avail - determine queue capacity
- * @qp - the qp
- * @rdi - the rdmavt device
- * @reserved_op - is reserved operation
+ * @qp: the qp
+ * @rdi: the rdmavt device
+ * @reserved_op: is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */
if (likely(qp->s_avail))
return 0;
- smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
@@ -2075,6 +2074,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
to = rvt_aeth_to_usec(aeth);
+ trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
}
@@ -2104,17 +2104,14 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
* stop an rnr timer and return if the timer
* had been pending.
*/
-static int rvt_stop_rnr_timer(struct rvt_qp *qp)
+static void rvt_stop_rnr_timer(struct rvt_qp *qp)
{
- int rval = 0;
-
lockdep_assert_held(&qp->s_lock);
/* Remove QP from rnr timer */
if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~RVT_S_WAIT_RNR;
- rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
+ trace_rvt_rnrnak_stop(qp, 0);
}
- return rval;
}
/**
@@ -2167,6 +2164,7 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
spin_lock_irqsave(&qp->s_lock, flags);
rvt_stop_rnr_timer(qp);
+ trace_rvt_rnrnak_timeout(qp, 0);
rdi->driver_f.schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
return HRTIMER_NORESTART;
@@ -2175,8 +2173,8 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
/**
* rvt_qp_iter_init - initial for QP iteration
- * @rdi - rvt devinfo
- * @v - u64 value
+ * @rdi: rvt devinfo
+ * @v: u64 value
*
* This returns an iterator suitable for iterating QPs
* in the system.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index f7c48e9023de..3707952b4364 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -90,7 +90,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
return ERR_PTR(-EINVAL);
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -101,7 +101,10 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct rvt_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
+ srq->rq.wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
+ dev->dparms.node);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -129,16 +132,12 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else {
- srq->ip = NULL;
}
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
@@ -200,7 +199,10 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
+ dev->dparms.node);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index bb4b1e710f22..36ddbd291ee0 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,8 +45,8 @@
*
*/
-#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
-#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rvt_get_ibdev_name(rdi))
#include "trace_rvt.h"
#include "trace_qp.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index 4c77a3119bda..efc9d814b032 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -85,6 +85,48 @@ DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
+DECLARE_EVENT_CLASS(
+ rvt_rnrnak_template,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(void *, hrtimer)
+ __field(u32, s_flags)
+ __field(u32, to)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->hrtimer = &qp->s_rnr_timer;
+ __entry->s_flags = qp->s_flags;
+ __entry->to = to;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->hrtimer,
+ __entry->s_flags,
+ __entry->to
+ )
+);
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_add,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_stop,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
#endif /* __RVT_TRACE_QP_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 64bdd442078a..a4553b2b3696 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -224,7 +224,8 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
* rvt_query_pkey - Return a pkey from the table at a given index
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @intex: Index into pkey table
+ * @index: Index into pkey table
+ * @pkey: returned pkey from the port pkey table
*
* Return: 0 on failure pkey otherwise
*/
@@ -255,7 +256,7 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* rvt_query_gid - Return a gid from the table
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @index: = Index in table
+ * @guid_index: Index in table
* @gid: Gid to return
*
* Return: 0 on success
@@ -297,8 +298,8 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
/**
* rvt_alloc_ucontext - Allocate a user context
- * @ibdev: Vers IB dev
- * @data: User data allocated
+ * @ibdev: Verbs IB dev
+ * @udata: User data allocated
*/
static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
@@ -413,7 +414,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* required for rdmavt to function.
*/
if ((!rdi->driver_f.port_callback) ||
- (!rdi->driver_f.get_card_name) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index f363505312be..8823b2e7aac6 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -63,19 +63,19 @@
#define rvt_pr_info(rdi, fmt, ...) \
__rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_warn(rdi, fmt, ...) \
__rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_err(rdi, fmt, ...) \
__rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 320bffc980d8..bad4a576d7cf 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,8 +1,8 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on NET_UDP_TUNNEL
- depends on CRYPTO_CRC32
+ select NET_UDP_TUNNEL
+ select CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8c3d30b3092d..b7debb6f2eac 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,6 @@ void rxe_release(struct kref *kref)
ib_dealloc_device(&rxe->ib_dev);
}
-void rxe_dev_put(struct rxe_dev *rxe)
-{
- kref_put(&rxe->ref_cnt, rxe_release);
-}
-EXPORT_SYMBOL_GPL(rxe_dev_put);
-
/* initialize rxe device parameters */
static int rxe_init_device_param(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 6447d736d5a4..7d232611303f 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -57,6 +57,7 @@
#include "rxe_hdr.h"
#include "rxe_param.h"
#include "rxe_verbs.h"
+#include "rxe_loc.h"
#define RXE_UVERBS_ABI_VERSION (1)
@@ -95,7 +96,10 @@ void rxe_remove_all(void);
int rxe_rcv(struct sk_buff *skb);
-void rxe_dev_put(struct rxe_dev *rxe);
+static inline void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char *name);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index d7472a442a2c..96c3a6c5c4b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
void rxe_release(struct kref *kref);
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59dee10bebcb..159246b03867 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -82,7 +82,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
}
-struct rxe_recv_sockets recv_sockets;
+static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
@@ -452,31 +452,26 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- struct sk_buff *nskb;
struct rxe_av *av;
int err;
av = rxe_get_av(pkt);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
-
- nskb->destructor = rxe_skb_tx_dtor;
- nskb->sk = pkt->qp->sk->sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (av->network_type == RDMA_NETWORK_IPV4) {
- err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
- kfree_skb(nskb);
+ kfree_skb(skb);
return -EINVAL;
}
@@ -485,7 +480,6 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
- kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 1c06b3bfe1b6..728d8c71b36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,7 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct rxe_recv_sockets recv_sockets;
extern struct notifier_block rxe_net_notifier;
void rxe_release_udp_tunnel(struct socket *sk);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 4469592b839d..137d6c0c49d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+static void rxe_qp_do_cleanup(struct work_struct *work)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
rxe_drop_all_mcast_groups(qp);
@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sock_release(qp->sk);
}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+{
+ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+
+ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index fb8c83e055e1..4c3f899241d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -336,7 +336,6 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
union ib_gid dgid;
union ib_gid *pdgid;
- u16 index;
if (skb->protocol == htons(ETH_P_IP)) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
@@ -348,7 +347,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, rxe->ndev, &index);
+ 1, rxe->ndev, NULL);
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 26a7f923045b..7bdaf71b8221 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid)) {
- rxe_drain_req_pkts(qp, true);
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
goto exit;
- }
-
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
- rxe_drain_req_pkts(qp, true);
- goto exit;
- }
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = consumer_index(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4240866a5331..d37bb9b97569 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -863,8 +863,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_IMMDT_MASK) {
uwc->wc_flags |= IB_WC_WITH_IMM;
- uwc->ex.imm_data =
- (__u32 __force)immdt_imm(pkt);
+ uwc->ex.imm_data = immdt_imm(pkt);
}
if (pkt->mask & RXE_IETH_MASK) {
@@ -1210,7 +1209,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index d03002b9d84d..7210a784abb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
(queue_count(qp->sq.queue) > 1);
rxe_run_task(&qp->req.task, must_sched);
+ if (unlikely(qp->req.state == QP_STATE_ERROR))
+ rxe_run_task(&qp->comp.task, 1);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 0c2dbe45c729..1019f5e7dbdd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,6 +35,7 @@
#define RXE_VERBS_H
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
@@ -281,6 +282,8 @@ struct rxe_qp {
struct timer_list rnr_nak_timer;
spinlock_t state_lock; /* guard requester and completer */
+
+ struct execute_work cleanup_work;
};
enum rxe_mem_state {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2c13123bfd69..962fbcb57dc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,12 +766,14 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb_orphan(skb);
skb_dst_drop(skb);
- if (netif_queue_stopped(dev))
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS)) {
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
napi_schedule(&priv->send_napi);
- }
+ }
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
@@ -876,7 +878,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
@@ -884,8 +886,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0);
if (ret) {
- printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
- IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
@@ -1456,8 +1458,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
@@ -1563,7 +1564,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
- printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e6151a29c412..10384ea50bed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -644,7 +644,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS))
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
@@ -1085,8 +1085,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
netif_addr_unlock_bh(priv->dev);
- err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
- priv->dev, &port, &index);
+ err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index);
netif_addr_lock_bh(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..5930c7d9a8fb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -768,13 +768,30 @@ static void path_rec_completion(int status,
if (!status) {
struct rdma_ah_attr av;
- if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av))
ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -840,6 +857,23 @@ static void path_rec_completion(int status,
}
}
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -852,21 +886,11 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!path)
return NULL;
- path->dev = dev;
-
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- if (rdma_cap_opa_ah(priv->ca, priv->port))
- path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
- else
- path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
- path->pathrec.numb_path = 1;
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ init_path_rec(priv, path, gid);
return path;
}
@@ -902,8 +926,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +941,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -956,7 +988,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -973,7 +1005,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -983,6 +1015,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -995,6 +1029,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
@@ -1004,6 +1042,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
+ if (!new_path)
+ /* make sure there is no changes in the existing path record */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
@@ -1020,8 +1062,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
} else
__path_add(dev, path);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1041,11 +1082,16 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1091,8 +1137,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
@@ -1663,8 +1710,8 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
- printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
- priv->ca->name, ipoib_sendq_size);
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
@@ -2196,16 +2243,17 @@ static struct net_device *ipoib_add_port(const char *format,
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
- if (!priv)
+ if (!priv) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
goto alloc_mem_failed;
+ }
SET_NETDEV_DEV(priv->dev, hca->dev.parent);
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
if (result) {
- printk(KERN_WARNING "%s: ib_query_port %d failed\n",
- hca->name, port);
+ pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
goto device_init_failed;
}
@@ -2220,8 +2268,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
- printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2238,8 +2286,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
if (result) {
- printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2249,8 +2297,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ipoib_dev_init(priv->dev, hca, port);
if (result) {
- printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: failed to initialize port %d (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2260,8 +2308,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = register_netdev(priv->dev);
if (result) {
- printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
- hca->name, port, result);
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
goto register_failed;
}
@@ -2326,8 +2374,7 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- pr_err("Failed to init port, removing it\n");
- ipoib_remove_one(device, dev_list);
+ kfree(dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a1ed25422b72..984a88096f39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -178,7 +178,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
- printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
@@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
- printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+ pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
@@ -208,7 +208,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
- printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
+ pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2a07692007bd..df49c4eb67f7 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -142,8 +142,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
- iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
- "VA:%#llX + unsol:%d\n",
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
@@ -436,7 +435,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = NULL;
+ struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
@@ -452,10 +451,8 @@ int iser_send_data_out(struct iscsi_conn *conn,
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
- if (tx_desc == NULL) {
- iser_err("Failed to alloc desc for post dataout\n");
+ if (!tx_desc)
return -ENOMEM;
- }
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->cqe.done = iser_dataout_comp;
@@ -475,8 +472,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Offset:%ld & DSL:%ld in Data-Out "
- "inconsistent with total len:%ld, itt:%d\n",
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
@@ -614,8 +610,8 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
iser_conn, rkey);
if (unlikely(!iser_conn->snd_w_inv)) {
- iser_err("conn %p: unexpected remote invalidation, "
- "terminating connection\n", iser_conn);
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
return -EPROTO;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 720dfb3a1ac2..fff40b097947 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ ib_drain_qp(isert_conn->qp);
list_del_init(&isert_conn->node);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -2123,6 +2124,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
u32 rkey, offset;
int ret;
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
@@ -2150,11 +2154,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
+
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index d6fd248320ae..3b296bac4f60 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -126,6 +126,7 @@ struct isert_cmd {
struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4b615c1451e7..15711dcc6f58 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -710,7 +710,7 @@ vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
/**
* opa_vnic_vema_send_trap -- This function sends a trap to the EM
- * @cport: pointer to vnic control port
+ * @adapter: pointer to vnic adapter
* @data: pointer to trap data filled by calling function
* @lid: issuers lid (encap_slid from vesw_port_info)
*
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 972d4b3c5223..b48843833d69 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
+#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -144,7 +145,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
@@ -265,8 +268,8 @@ static void srp_qp_event(struct ib_event *event, void *context)
ib_event_msg(event->event), event->event);
}
-static int srp_init_qp(struct srp_target_port *target,
- struct ib_qp *qp)
+static int srp_init_ib_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
@@ -277,7 +280,7 @@ static int srp_init_qp(struct srp_target_port *target,
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
- be16_to_cpu(target->pkey),
+ be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
@@ -298,32 +301,110 @@ out:
return ret;
}
-static int srp_new_cm_id(struct srp_rdma_ch *ch)
+static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
- srp_cm_handler, ch);
+ srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
- if (ch->cm_id)
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = new_cm_id;
+ if (ch->ib_cm.cm_id)
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
- ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
- ch->path.rec_type = SA_PATH_REC_TYPE_IB;
- ch->path.sgid = target->sgid;
- ch->path.dgid = target->orig_dgid;
- ch->path.pkey = target->pkey;
- ch->path.service_id = target->service_id;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
+ ch->ib_cm.path.sgid = target->sgid;
+ ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
+ ch->ib_cm.path.pkey = target->ib_cm.pkey;
+ ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
+static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
+{
+ switch (((struct sockaddr *)sa)->sa_family) {
+ case AF_INET:
+ snprintf(dst, size, "%pI4",
+ &((struct sockaddr_in *)sa)->sin_addr);
+ break;
+ case AF_INET6:
+ snprintf(dst, size, "%pI6",
+ &((struct sockaddr_in6 *)sa)->sin6_addr);
+ break;
+ default:
+ snprintf(dst, size, "???");
+ break;
+ }
+ return dst;
+}
+
+static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct rdma_cm_id *new_cm_id;
+ char src_addr[64], dst_addr[64];
+ int ret;
+
+ new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ new_cm_id = NULL;
+ goto out;
+ }
+
+ init_completion(&ch->done);
+ ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
+ (struct sockaddr *)&target->rdma_cm.src : NULL,
+ (struct sockaddr *)&target->rdma_cm.dst,
+ SRP_PATH_REC_TIMEOUT_MS);
+ if (ret) {
+ pr_err("No route available from %s to %s (%d)\n",
+ target->rdma_cm.src_specified ?
+ inet_ntop(&target->rdma_cm.src, src_addr,
+ sizeof(src_addr)) : "(any)",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ ret = ch->status;
+ if (ret) {
+ pr_err("Resolving address %s failed (%d)\n",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+
+ swap(ch->rdma_cm.cm_id, new_cm_id);
+
+out:
+ if (new_cm_id)
+ rdma_destroy_id(new_cm_id);
+
+ return ret;
+}
+
+static int srp_new_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
+ srp_new_ib_cm_id(ch);
+}
+
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
@@ -521,16 +602,25 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
- qp = ib_create_qp(dev->pd, init_attr);
- if (IS_ERR(qp)) {
- ret = PTR_ERR(qp);
+ if (target->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
+ qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ qp = ib_create_qp(dev->pd, init_attr);
+ if (!IS_ERR(qp)) {
+ ret = srp_init_ib_qp(target, qp);
+ if (ret)
+ ib_destroy_qp(qp);
+ } else {
+ ret = PTR_ERR(qp);
+ }
+ }
+ if (ret) {
+ pr_err("QP creation failed for dev %s: %d\n",
+ dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
- ret = srp_init_qp(target, qp);
- if (ret)
- goto err_qp;
-
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
@@ -574,7 +664,10 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0;
err_qp:
- ib_destroy_qp(qp);
+ if (target->using_rdma_cm)
+ rdma_destroy_qp(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
@@ -600,9 +693,16 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (!ch->target)
return;
- if (ch->cm_id) {
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = NULL;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id) {
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ ch->rdma_cm.cm_id = NULL;
+ }
+ } else {
+ if (ch->ib_cm.cm_id) {
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = NULL;
+ }
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
@@ -658,16 +758,16 @@ static void srp_path_rec_completion(int status,
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
- ch->path = *pathrec;
+ ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
-static int srp_lookup_path(struct srp_rdma_ch *ch)
+static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret = -ENODEV;
- ch->path.numb_path = 1;
+ ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
@@ -678,10 +778,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (!scsi_host_get(target->scsi_host))
goto out;
- ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
- &ch->path,
+ &ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@@ -690,8 +790,8 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
- ch, &ch->path_query);
- ret = ch->path_query_id;
+ ch, &ch->ib_cm.path_query);
+ ret = ch->ib_cm.path_query_id;
if (ret < 0)
goto put;
@@ -702,7 +802,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
- PFX "Path record query failed\n");
+ PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
+ ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id));
put:
scsi_host_put(target->scsi_host);
@@ -711,6 +814,34 @@ out:
return ret;
}
+static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ init_completion(&ch->done);
+
+ ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ wait_for_completion_interruptible(&ch->done);
+
+ if (ch->status != 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path resolution failed\n");
+
+ return ch->status;
+}
+
+static int srp_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
+ srp_ib_lookup_path(ch);
+}
+
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
@@ -732,48 +863,76 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
struct {
- struct ib_cm_req_param param;
- struct srp_login_req priv;
+ struct rdma_conn_param rdma_param;
+ struct srp_login_req_rdma rdma_req;
+ struct ib_cm_req_param ib_param;
+ struct srp_login_req ib_req;
} *req = NULL;
+ char *ipi, *tpi;
int status;
- u8 subnet_timeout;
-
- subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->param.primary_path = &ch->path;
- req->param.alternate_path = NULL;
- req->param.service_id = target->service_id;
- req->param.qp_num = ch->qp->qp_num;
- req->param.qp_type = ch->qp->qp_type;
- req->param.private_data = &req->priv;
- req->param.private_data_len = sizeof req->priv;
- req->param.flow_control = 1;
-
- get_random_bytes(&req->param.starting_psn, 4);
- req->param.starting_psn &= 0xffffff;
+ req->ib_param.flow_control = 1;
+ req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
- req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = subnet_timeout + 2;
- req->param.local_cm_response_timeout = subnet_timeout + 2;
- req->param.retry_count = target->tl_retry_count;
- req->param.rnr_retry_count = 7;
- req->param.max_cm_retries = 15;
-
- req->priv.opcode = SRP_LOGIN_REQ;
- req->priv.tag = 0;
- req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
- req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ req->ib_param.responder_resources = 4;
+ req->ib_param.rnr_retry_count = 7;
+ req->ib_param.max_cm_retries = 15;
+
+ req->ib_req.opcode = SRP_LOGIN_REQ;
+ req->ib_req.tag = 0;
+ req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
+ req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
- SRP_MULTICHAN_SINGLE);
+ req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
+ SRP_MULTICHAN_SINGLE);
+
+ if (target->using_rdma_cm) {
+ req->rdma_param.flow_control = req->ib_param.flow_control;
+ req->rdma_param.responder_resources =
+ req->ib_param.responder_resources;
+ req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
+ req->rdma_param.retry_count = req->ib_param.retry_count;
+ req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
+ req->rdma_param.private_data = &req->rdma_req;
+ req->rdma_param.private_data_len = sizeof(req->rdma_req);
+
+ req->rdma_req.opcode = req->ib_req.opcode;
+ req->rdma_req.tag = req->ib_req.tag;
+ req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
+ req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
+ req->rdma_req.req_flags = req->ib_req.req_flags;
+
+ ipi = req->rdma_req.initiator_port_id;
+ tpi = req->rdma_req.target_port_id;
+ } else {
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
+
+ req->ib_param.primary_path = &ch->ib_cm.path;
+ req->ib_param.alternate_path = NULL;
+ req->ib_param.service_id = target->ib_cm.service_id;
+ get_random_bytes(&req->ib_param.starting_psn, 4);
+ req->ib_param.starting_psn &= 0xffffff;
+ req->ib_param.qp_num = ch->qp->qp_num;
+ req->ib_param.qp_type = ch->qp->qp_type;
+ req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.private_data = &req->ib_req;
+ req->ib_param.private_data_len = sizeof(req->ib_req);
+
+ ipi = req->ib_req.initiator_port_id;
+ tpi = req->ib_req.target_port_id;
+ }
+
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
@@ -784,19 +943,15 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
- memcpy(req->priv.initiator_port_id,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->initiator_ext, 8);
- memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
- memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
+ memcpy(ipi, &target->sgid.global.interface_id, 8);
+ memcpy(ipi + 8, &target->initiator_ext, 8);
+ memcpy(tpi, &target->ioc_guid, 8);
+ memcpy(tpi + 8, &target->id_ext, 8);
} else {
- memcpy(req->priv.initiator_port_id,
- &target->initiator_ext, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.target_port_id, &target->id_ext, 8);
- memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
+ memcpy(ipi, &target->initiator_ext, 8);
+ memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
+ memcpy(tpi, &target->id_ext, 8);
+ memcpy(tpi + 8, &target->ioc_guid, 8);
}
/*
@@ -809,12 +964,14 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
- memset(req->priv.initiator_port_id, 0, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->srp_host->srp_dev->dev->node_guid, 8);
+ memset(ipi, 0, 8);
+ memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
- status = ib_send_cm_req(ch->cm_id, &req->param);
+ if (target->using_rdma_cm)
+ status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
+ else
+ status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
@@ -841,14 +998,23 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
- int i;
+ int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
- if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+ ret = 0;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id)
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ if (ch->ib_cm.cm_id)
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
+ NULL, 0);
+ }
+ if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
@@ -962,6 +1128,7 @@ static void srp_remove_target(struct srp_target_port *target)
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
@@ -2349,7 +2516,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
- int ret;
+ int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
@@ -2379,40 +2546,42 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
- if (!qp_attr)
- goto error;
-
- qp_attr->qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
-
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
-
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
- goto error_free;
+ goto error;
}
- qp_attr->qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
+ if (!target->using_rdma_cm) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto error;
- target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+ qp_attr->qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ qp_attr->qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ }
error_free:
kfree(qp_attr);
@@ -2421,41 +2590,43 @@ error:
ch->status = ret;
}
-static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
- struct srp_rdma_ch *ch)
+static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event,
+ struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
+ u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
- ch->path.pkey = cpi->redirect_pkey;
+ dlid = be16_to_cpu(cpi->redirect_lid);
+ sa_path_set_dlid(&ch->ib_cm.path, dlid);
+ ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
- memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
+ memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
- ch->status = sa_path_get_dlid(&ch->path) ?
- SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
+ union ib_gid *dgid = &ch->ib_cm.path.dgid;
+
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
- memcpy(ch->path.dgid.raw,
- event->param.rej_rcvd.ari, 16);
+ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
- be64_to_cpu(ch->path.dgid.global.subnet_prefix),
- be64_to_cpu(ch->path.dgid.global.interface_id));
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
@@ -2484,7 +2655,8 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
- target->orig_dgid.raw, reason);
+ target->ib_cm.orig_dgid.raw,
+ reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@@ -2504,7 +2676,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -2527,7 +2699,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
- srp_cm_rej_handler(cm_id, event, ch);
+ srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
@@ -2565,6 +2737,135 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
+static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
+ struct rdma_cm_event *event)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ int opcode;
+
+ switch (event->status) {
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->param.conn.private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej =
+ (struct srp_login_rej *)
+ event->param.conn.private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
+ opcode);
+ }
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->status);
+ ch->status = -ECONNRESET;
+ break;
+ }
+}
+
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ ch->status = -ENXIO;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ ch->status = -EHOSTUNREACH;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ comp = 1;
+ srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
+ break;
+
+ case RDMA_CM_EVENT_REJECTED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_rdma_cm_rej_handler(ch, event);
+ break;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->connected) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "received DREQ\n");
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ comp = 1;
+ ch->status = 0;
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ break;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+
+ comp = 1;
+ ch->status = 0;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
@@ -2717,6 +3018,16 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct srp_target_port *target = host_to_target(shost);
+
+ if (target->target_can_queue)
+ starget->can_queue = target->target_can_queue;
+ return 0;
+}
+
static int srp_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2766,7 +3077,10 @@ static ssize_t show_service_id(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2774,7 +3088,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2791,7 +3107,9 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
- return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@@ -2799,7 +3117,9 @@ static ssize_t show_orig_dgid(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@@ -2921,6 +3241,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .target_alloc = srp_target_alloc,
.slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
@@ -3044,6 +3365,9 @@ static bool srp_conn_unique(struct srp_host *host,
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
+ (!target->using_rdma_cm ||
+ memcmp(&target->rdma_cm.dst, &t->rdma_cm.dst,
+ sizeof(target->rdma_cm.dst)) == 0) &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
@@ -3060,6 +3384,9 @@ out:
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
+ * or
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
+ * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
@@ -3080,11 +3407,20 @@ enum {
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
- SRP_OPT_ALL = (SRP_OPT_ID_EXT |
- SRP_OPT_IOC_GUID |
- SRP_OPT_DGID |
- SRP_OPT_PKEY |
- SRP_OPT_SERVICE_ID),
+ SRP_OPT_IP_SRC = 1 << 15,
+ SRP_OPT_IP_DEST = 1 << 16,
+ SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+};
+
+static unsigned int srp_opt_mandatory[] = {
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID,
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
@@ -3095,6 +3431,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
+ { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
@@ -3103,15 +3440,33 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { SRP_OPT_IP_SRC, "src=%s" },
+ { SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_ERR, NULL }
};
-static int srp_parse_options(const char *buf, struct srp_target_port *target)
+static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ const char *addr_port_str)
+{
+ char *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str = addr;
+ int ret;
+
+ if (!addr)
+ return -ENOMEM;
+ strsep(&port_str, ":");
+ ret = inet_pton_with_scope(net, AF_UNSPEC, addr, port_str, sa);
+ kfree(addr);
+ return ret;
+}
+
+static int srp_parse_options(struct net *net, const char *buf,
+ struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
- char dgid[3];
substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
int opt_mask = 0;
int token;
int ret = -EINVAL;
@@ -3136,7 +3491,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid id_ext parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->id_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3146,7 +3507,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid ioc_guid parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ioc_guid = cpu_to_be64(ull);
kfree(p);
break;
@@ -3162,16 +3529,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
goto out;
}
- for (i = 0; i < 16; ++i) {
- strlcpy(dgid, p + i * 2, sizeof(dgid));
- if (sscanf(dgid, "%hhx",
- &target->orig_dgid.raw[i]) < 1) {
- ret = -EINVAL;
- kfree(p);
- goto out;
- }
- }
+ ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
+ if (ret < 0)
+ goto out;
break;
case SRP_OPT_PKEY:
@@ -3179,7 +3540,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
- target->pkey = cpu_to_be16(token);
+ target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
@@ -3188,7 +3549,45 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad service_id parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ib_cm.service_id = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_SRC:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->rdma_cm.src_specified = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_DEST:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->using_rdma_cm = true;
kfree(p);
break;
@@ -3221,6 +3620,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->cmd_per_lun = token;
break;
+ case SRP_OPT_TARGET_CAN_QUEUE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
case SRP_OPT_IO_CLASS:
if (match_hex(args, &token)) {
pr_warn("bad IO class parameter '%s'\n", p);
@@ -3242,7 +3650,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad initiator_ext value '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->initiator_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3297,14 +3711,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
}
}
- if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
- ret = 0;
- else
- for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
- if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
- !(srp_opt_tokens[i].token & opt_mask))
- pr_warn("target creation request is missing parameter '%s'\n",
- srp_opt_tokens[i].pattern);
+ for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
+ if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret)
+ pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
@@ -3345,6 +3759,7 @@ static ssize_t srp_create_target(struct device *dev,
target = host_to_target(target_host);
+ target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
@@ -3366,18 +3781,29 @@ static ssize_t srp_create_target(struct device *dev,
if (ret < 0)
goto put;
- ret = srp_parse_options(buf, target);
+ ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
- shost_printk(KERN_INFO, target->scsi_host,
- PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be64_to_cpu(target->initiator_ext));
+ if (target->using_rdma_cm) {
+ char dst_addr[64];
+
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)));
+ } else {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be64_to_cpu(target->initiator_ext));
+ }
ret = -EEXIST;
goto out;
}
@@ -3478,11 +3904,18 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_connect_ch(ch, multich);
if (ret) {
+ char dst[64];
+
+ if (target->using_rdma_cm)
+ inet_ntop(&target->rdma_cm.dst, dst,
+ sizeof(dst));
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %pI6 failed\n",
+ PFX "Connection %d/%d to %s failed\n",
ch_start + cpu_idx,
- target->ch_count,
- ch->target->orig_dgid.raw);
+ target->ch_count, dst);
if (node_idx == 0 && cpu_idx == 0) {
goto free_ch;
} else {
@@ -3507,13 +3940,25 @@ connected:
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
- shost_printk(KERN_DEBUG, target->scsi_host, PFX
- "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be16_to_cpu(target->pkey),
- be64_to_cpu(target->service_id),
- target->sgid.raw, target->orig_dgid.raw);
+ if (target->using_rdma_cm) {
+ char dst[64];
+
+ inet_ntop(&target->rdma_cm.dst, dst, sizeof(dst));
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ target->sgid.raw, dst);
+ } else {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id),
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw);
+ }
}
ret = count;
@@ -3523,8 +3968,16 @@ out:
put:
scsi_host_put(target->scsi_host);
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If a call to srp_remove_target() has not been scheduled,
+ * drop the network namespace reference now that was obtained
+ * earlier in this function.
+ */
+ if (target->state != SRP_TARGET_REMOVED)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
+ }
return ret;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index a814f5ef16f9..a2706086b9c7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -45,6 +45,7 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_fmr_pool.h>
+#include <rdma/rdma_cm.h>
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000,
@@ -153,11 +154,18 @@ struct srp_rdma_ch {
struct completion done;
int status;
- struct sa_path_rec path;
- struct ib_sa_query *path_query;
- int path_query_id;
+ union {
+ struct ib_cm {
+ struct sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct rdma_cm {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
- struct ib_cm_id *cm_id;
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
struct srp_request *req_ring;
@@ -182,6 +190,7 @@ struct srp_target_port {
/* read only in the hot path */
u32 global_rkey;
struct srp_rdma_ch *ch;
+ struct net *net;
u32 ch_count;
u32 lkey;
enum srp_target_state state;
@@ -194,7 +203,6 @@ struct srp_target_port {
union ib_gid sgid;
__be64 id_ext;
__be64 ioc_guid;
- __be64 service_id;
__be64 initiator_ext;
u16 io_class;
struct srp_host *srp_host;
@@ -203,6 +211,7 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ unsigned int target_can_queue;
int mr_pool_size;
int mr_per_cmd;
int queue_size;
@@ -210,8 +219,28 @@ struct srp_target_port {
int comp_vector;
int tl_retry_count;
- union ib_gid orig_dgid;
- __be16 pkey;
+ bool using_rdma_cm;
+
+ union {
+ struct {
+ __be64 service_id;
+ union ib_gid orig_dgid;
+ __be16 pkey;
+ } ib_cm;
+ struct {
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } src;
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } dst;
+ bool src_specified;
+ } rdma_cm;
+ };
u32 rq_tmo_jiffies;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..0373b7c40902 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
@@ -120,7 +121,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
}
/**
- * srpt_event_handler() - Asynchronous IB event callback function.
+ * srpt_event_handler - asynchronous IB event callback function
+ * @handler: IB event handler registered by ib_register_event_handler().
+ * @event: Description of the event that occurred.
*
* Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also
@@ -132,6 +135,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
{
struct srpt_device *sdev;
struct srpt_port *sport;
+ u8 port_num;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
@@ -142,10 +146,15 @@ static void srpt_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
sport->lid = 0;
sport->sm_lid = 0;
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
case IB_EVENT_PORT_ACTIVE:
@@ -155,25 +164,31 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
default:
- pr_err("received unrecognized IB event %d\n",
- event->event);
+ pr_err("received unrecognized IB event %d\n", event->event);
break;
}
}
/**
- * srpt_srq_event() - SRQ event callback function.
+ * srpt_srq_event - SRQ event callback function
+ * @event: Description of the event that occurred.
+ * @ctx: Context pointer specified at SRQ creation time.
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- pr_info("SRQ event %d\n", event->event);
+ pr_debug("SRQ event %d\n", event->event);
}
static const char *get_ch_state_name(enum rdma_ch_state s)
@@ -194,16 +209,18 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
}
/**
- * srpt_qp_event() - QP event callback function.
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+ * @ch: SRPT RDMA channel.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
+ event->event, ch, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
- ib_cm_notify(ch->cm_id, event->event);
+ ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_debug("%s-%d, state %s: received Last WQE event.\n",
@@ -217,8 +234,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
}
/**
- * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
- *
+ * srpt_set_ioc - initialize a IOUnitInfo structure
+ * @c_list: controller list.
* @slot: one-based slot number.
* @value: four-bit value.
*
@@ -241,7 +258,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
}
/**
- * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
*
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification.
@@ -260,7 +278,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
}
/**
- * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ * srpt_get_iou - write IOUnitInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
*
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document.
@@ -284,7 +303,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
}
/**
- * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ * srpt_get_ioc - write IOControllerprofile to a management datagram
+ * @sport: HCA port through which the MAD has been received.
+ * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
*
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP
@@ -314,7 +336,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (sdev->use_srq)
send_queue_depth = sdev->srq_size;
else
- send_queue_depth = min(SRPT_RQ_SIZE,
+ send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp));
@@ -342,7 +364,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
}
/**
- * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ * srpt_get_svc_entries - write ServiceEntries to a management datagram
+ * @ioc_guid: I/O controller GUID to use in reply.
+ * @slot: I/O controller number.
+ * @hi: End of the range of service entries to be specified in the reply.
+ * @lo: Start of the range of service entries to be specified in the reply..
+ * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
*
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document.
@@ -379,8 +406,8 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
/**
- * srpt_mgmt_method_get() - Process a received management datagram.
- * @sp: source port through which the MAD has been received.
+ * srpt_mgmt_method_get - process a received management datagram
+ * @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD.
* @rsp_mad: response MAD.
*/
@@ -419,7 +446,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
}
/**
- * srpt_mad_send_handler() - Post MAD-send callback function.
+ * srpt_mad_send_handler - MAD send completion callback
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @mad_wc: Work completion reporting that the MAD has been sent.
*/
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
@@ -429,7 +458,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
}
/**
- * srpt_mad_recv_handler() - MAD reception callback function.
+ * srpt_mad_recv_handler - MAD reception callback function
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @send_buf: Not used.
+ * @mad_wc: Work completion reporting that a MAD has been received.
*/
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -493,8 +525,18 @@ err:
ib_free_recv_mad(mad_wc);
}
+static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+{
+ const __be16 *g = (const __be16 *)guid;
+
+ return snprintf(buf, size, "%04x:%04x:%04x:%04x",
+ be16_to_cpu(g[0]), be16_to_cpu(g[1]),
+ be16_to_cpu(g[2]), be16_to_cpu(g[3]));
+}
+
/**
- * srpt_refresh_port() - Configure a HCA port.
+ * srpt_refresh_port - configure a HCA port
+ * @sport: SRPT HCA port.
*
* Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs
@@ -507,7 +549,6 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
- __be16 *guid;
int ret;
memset(&port_modify, 0, sizeof(port_modify));
@@ -531,11 +572,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
sport->port_guid_wwn.priv = sport;
- guid = (__be16 *)&sport->gid.global.interface_id;
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
+ srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ &sport->gid.global.interface_id);
sport->port_gid_wwn.priv = sport;
snprintf(sport->port_gid, sizeof(sport->port_gid),
"0x%016llx%016llx",
@@ -577,7 +615,8 @@ err_mod_port:
}
/**
- * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ * srpt_unregister_mad_agent - unregister MAD callback functions
+ * @sdev: SRPT HCA pointer.
*
* Note: It is safe to call this function more than once for the same device.
*/
@@ -602,7 +641,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
}
/**
- * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ * srpt_alloc_ioctx - allocate a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx_size: I/O context size.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size, int dma_size,
@@ -633,7 +676,11 @@ err:
}
/**
- * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ * srpt_free_ioctx - free a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx: I/O context pointer.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
int dma_size, enum dma_data_direction dir)
@@ -647,7 +694,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
}
/**
- * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size.
@@ -685,7 +732,12 @@ out:
}
/**
- * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
+ * @ioctx_ring: I/O context ring to be freed.
+ * @sdev: SRPT HCA pointer.
+ * @ring_size: Number of ring elements.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size,
@@ -702,23 +754,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
}
/**
- * srpt_get_cmd_state() - Get the state of a SCSI command.
- */
-static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return state;
-}
-
-/**
- * srpt_set_cmd_state() - Set the state of a SCSI command.
+ * srpt_set_cmd_state - set the state of a SCSI command
+ * @ioctx: Send I/O context.
+ * @new: New I/O context state.
*
* Does not modify the state of aborted commands. Returns the previous command
* state.
@@ -727,21 +765,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous != SRPT_STATE_DONE)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous;
}
/**
- * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ * srpt_test_and_set_cmd_state - test and set the state of a command
+ * @ioctx: Send I/O context.
+ * @old: Current I/O context state.
+ * @new: New I/O context state.
*
* Returns true if and only if the previous command state was equal to 'old'.
*/
@@ -750,22 +786,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW);
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous == old)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
return previous == old;
}
/**
- * srpt_post_recv() - Post an IB receive request.
+ * srpt_post_recv - post an IB receive request
+ * @sdev: SRPT HCA pointer.
+ * @ch: SRPT RDMA channel.
+ * @ioctx: Receive I/O context pointer.
*/
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
@@ -791,7 +828,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
}
/**
- * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ * srpt_zerolength_write - perform a zero-length RDMA write
+ * @ch: SRPT RDMA channel.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE
@@ -802,6 +840,9 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
struct ib_send_wr wr, *bad_wr;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+
memset(&wr, 0, sizeof(wr));
wr.opcode = IB_WR_RDMA_WRITE;
wr.wr_cqe = &ch->zw_cqe;
@@ -813,13 +854,17 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = cq->cq_context;
+ pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
+ wc->status);
+
if (wc->status == IB_WC_SUCCESS) {
srpt_process_wait_list(ch);
} else {
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
- WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ pr_debug("%s-%d: already disconnected.\n",
+ ch->sess_name, ch->qp->qp_num);
}
}
@@ -928,11 +973,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
}
/**
- * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be
* written.
+ * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
+ * @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written.
*
@@ -998,7 +1045,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
/**
- * srpt_init_ch_qp() - Initialize queue pair attributes.
+ * srpt_init_ch_qp - initialize queue pair attributes
+ * @ch: SRPT RDMA channel.
+ * @qp: Queue pair pointer.
*
* Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
@@ -1013,10 +1062,14 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
return -ENOMEM;
attr->qp_state = IB_QPS_INIT;
- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
- attr->pkey_index = 0;
+
+ ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
+ ch->pkey, &attr->pkey_index);
+ if (ret < 0)
+ pr_err("Translating pkey %#x failed (%d) - using index 0\n",
+ ch->pkey, ret);
ret = ib_modify_qp(qp, attr,
IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
@@ -1027,7 +1080,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
}
/**
- * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1044,7 +1097,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1057,7 +1110,7 @@ out:
}
/**
- * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1074,7 +1127,7 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1087,7 +1140,8 @@ out:
}
/**
- * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ * srpt_ch_qp_err - set the channel queue pair state to 'error'
+ * @ch: SRPT RDMA channel.
*/
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
@@ -1098,7 +1152,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
+ * @ch: SRPT RDMA channel.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
@@ -1120,11 +1175,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0;
- init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
* transport_init_se_cmd() does not initialize all fields, so do it
@@ -1137,14 +1190,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_abort_cmd() - Abort a SCSI command.
+ * srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command.
- * @context: Preferred execution context.
*/
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
enum srpt_command_state state;
- unsigned long flags;
BUG_ON(!ioctx);
@@ -1153,7 +1204,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* the ib_srpt driver, change the state to the next state.
*/
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEED_DATA:
@@ -1168,7 +1218,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
__func__, state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag);
@@ -1207,6 +1256,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
/**
+ * srpt_rdma_read_done - RDMA read completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to
@@ -1234,11 +1287,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
target_execute_cmd(&ioctx->cmd);
else
pr_err("%s[%d]: wrong state = %d\n", __func__,
- __LINE__, srpt_get_cmd_state(ioctx));
+ __LINE__, ioctx->state);
}
/**
- * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will
@@ -1298,7 +1351,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
}
/**
- * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response.
@@ -1346,7 +1399,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
}
/**
- * srpt_handle_cmd() - Process SRP_CMD.
+ * srpt_handle_cmd - process a SRP_CMD information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*/
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
@@ -1428,7 +1484,10 @@ static int srp_tmr_to_tcm(int fn)
}
/**
- * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*
* Returns 0 if and only if the request will be processed by the target core.
*
@@ -1450,9 +1509,9 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srp_tsk = recv_ioctx->ioctx.buf;
cmd = &send_ioctx->cmd;
- pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
- " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
- srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
+ ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
@@ -1471,41 +1530,42 @@ fail:
}
/**
- * srpt_handle_new_iu() - Process a newly received information unit.
+ * srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received.
- * @ioctx: SRPT I/O context associated with the information unit.
+ * @recv_ioctx: Receive I/O context associated with the information unit.
*/
-static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static bool
+srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
{
+ struct srpt_send_ioctx *send_ioctx = NULL;
struct srp_cmd *srp_cmd;
+ bool res = false;
+ u8 opcode;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto push;
+
ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING))
- goto out_wait;
-
- if (unlikely(ch->state != CH_LIVE))
- return;
-
srp_cmd = recv_ioctx->ioctx.buf;
- if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx) {
- if (!list_empty(&ch->cmd_wait_list))
- goto out_wait;
- send_ioctx = srpt_get_send_ioctx(ch);
- }
+ opcode = srp_cmd->opcode;
+ if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
+ send_ioctx = srpt_get_send_ioctx(ch);
if (unlikely(!send_ioctx))
- goto out_wait;
+ goto push;
}
- switch (srp_cmd->opcode) {
+ if (!list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(!ch->processing_wait_list);
+ list_del_init(&recv_ioctx->wait_list);
+ }
+
+ switch (opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
break;
@@ -1525,16 +1585,22 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_err("Received SRP_RSP\n");
break;
default:
- pr_err("received IU with unknown opcode 0x%x\n",
- srp_cmd->opcode);
+ pr_err("received IU with unknown opcode 0x%x\n", opcode);
break;
}
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
- return;
+ res = true;
+
+out:
+ return res;
-out_wait:
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+push:
+ if (list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(ch->processing_wait_list);
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ }
+ goto out;
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1549,10 +1615,10 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
pr_err("req_lim = %d < 0\n", req_lim);
- srpt_handle_new_iu(ch, ioctx, NULL);
+ srpt_handle_new_iu(ch, ioctx);
} else {
- pr_info("receiving failed for ioctx %p with status %d\n",
- ioctx, wc->status);
+ pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
+ ioctx, wc->status);
}
}
@@ -1563,22 +1629,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_recv_ioctx *recv_ioctx, *tmp;
+
+ WARN_ON_ONCE(ch->state == CH_CONNECTING);
- while (!list_empty(&ch->cmd_wait_list) &&
- ch->state >= CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
+ if (list_empty(&ch->cmd_wait_list))
+ return;
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ WARN_ON_ONCE(ch->processing_wait_list);
+ ch->processing_wait_list = true;
+ list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
+ wait_list) {
+ if (!srpt_handle_new_iu(ch, recv_ioctx))
+ break;
}
+ ch->processing_wait_list = false;
}
/**
+ * srpt_send_done - send completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta
@@ -1620,7 +1692,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * srpt_create_ch_ib() - Create receive and send completion queues.
+ * srpt_create_ch_ib - create receive and send completion queues
+ * @ch: SRPT RDMA channel.
*/
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
@@ -1628,7 +1701,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
- u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int sq_size = sport->port_attrib.srp_sq_size;
int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1639,12 +1712,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out;
retry:
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
+ ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + srp_sq_size, ret);
+ ch->rq_size + sq_size, ret);
goto out;
}
@@ -1662,8 +1735,8 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
- qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+ qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
@@ -1677,8 +1750,8 @@ retry:
if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
- srp_sq_size /= 2;
- if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+ sq_size /= 2;
+ if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
@@ -1689,9 +1762,9 @@ retry:
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
- qp_init->cap.max_send_wr, ch->cm_id);
+ qp_init->cap.max_send_wr, ch);
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
@@ -1719,7 +1792,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * srpt_close_ch() - Close an RDMA channel.
+ * srpt_close_ch - close a RDMA channel
+ * @ch: SRPT RDMA channel.
*
* Make sure all resources associated with the channel will be deallocated at
* an appropriate time.
@@ -1744,8 +1818,6 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
pr_err("%s-%d: changing queue pair into error state failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
- pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
- ch->qp->qp_num);
ret = srpt_zerolength_write(ch);
if (ret < 0) {
pr_err("%s-%d: queuing zero-length write failed: %d\n",
@@ -1777,9 +1849,9 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
- ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
- ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
@@ -1787,83 +1859,135 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-/*
- * Send DREQ and wait for DREP. Return true if and only if this function
- * changed the state of @ch.
- */
-static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
- __must_hold(&sdev->mutex)
+static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
+ struct srpt_nexus *nexus;
+ struct srpt_rdma_ch *ch2;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (ch2 == ch) {
+ res = false;
+ goto done;
+ }
+ }
+ }
+done:
+ rcu_read_unlock();
- lockdep_assert_held(&sdev->mutex);
+ return res;
+}
+
+/* Send DREQ and wait for DREP. */
+static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+{
+ struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
- WARN_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
+ mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
- if (!wait)
- goto out;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
+ 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
-out:
- mutex_lock(&sdev->mutex);
- return wait;
}
-static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
- __must_hold(&sdev->mutex)
+static void __srpt_close_all_ch(struct srpt_port *sport)
{
- struct srpt_device *sdev = sport->sdev;
+ struct srpt_nexus *nexus;
struct srpt_rdma_ch *ch;
- lockdep_assert_held(&sdev->mutex);
+ lockdep_assert_held(&sport->mutex);
- if (sport->enabled == enabled)
- return;
- sport->enabled = enabled;
- if (sport->enabled)
- return;
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sport->sdev->device->name, sport->port);
+ srpt_close_ch(ch);
+ }
+ }
+}
-again:
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_info("%s: closing channel %s-%d\n",
- sdev->device->name, ch->sess_name,
- ch->qp->qp_num);
- if (srpt_disconnect_ch_sync(ch))
- goto again;
+/*
+ * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
+ * it does not yet exist.
+ */
+static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
+ const u8 i_port_id[16],
+ const u8 t_port_id[16])
+{
+ struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
+
+ for (;;) {
+ mutex_lock(&sport->mutex);
+ list_for_each_entry(n, &sport->nexus_list, entry) {
+ if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
+ memcmp(n->t_port_id, t_port_id, 16) == 0) {
+ nexus = n;
+ break;
+ }
}
+ if (!nexus && tmp_nexus) {
+ list_add_tail_rcu(&tmp_nexus->entry,
+ &sport->nexus_list);
+ swap(nexus, tmp_nexus);
+ }
+ mutex_unlock(&sport->mutex);
+
+ if (nexus)
+ break;
+ tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!tmp_nexus) {
+ nexus = ERR_PTR(-ENOMEM);
+ break;
+ }
+ INIT_LIST_HEAD(&tmp_nexus->ch_list);
+ memcpy(tmp_nexus->i_port_id, i_port_id, 16);
+ memcpy(tmp_nexus->t_port_id, t_port_id, 16);
}
+ kfree(tmp_nexus);
+
+ return nexus;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sport->mutex)
+{
+ lockdep_assert_held(&sport->mutex);
+
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (!enabled)
+ __srpt_close_all_ch(sport);
}
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
- kfree(ch);
+ kfree_rcu(ch, rcu);
}
static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct srpt_port *sport;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
- ch->qp->qp_num, ch->release_done);
+ pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -1878,169 +2002,141 @@ static void srpt_release_channel_work(struct work_struct *w)
transport_deregister_session(se_sess);
ch->sess = NULL;
- ib_destroy_cm_id(ch->cm_id);
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
srp_max_req_size, DMA_FROM_DEVICE);
- mutex_lock(&sdev->mutex);
- list_del_init(&ch->list);
- if (ch->release_done)
- complete(ch->release_done);
- mutex_unlock(&sdev->mutex);
+ sport = ch->sport;
+ mutex_lock(&sport->mutex);
+ list_del_rcu(&ch->list);
+ mutex_unlock(&sport->mutex);
- wake_up(&sdev->ch_releaseQ);
+ wake_up(&sport->ch_releaseQ);
kref_put(&ch->kref, srpt_free_ch);
}
/**
- * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
+ * @cm_id: IB/CM connection identifier.
+ * @port_num: Port through which the IB/CM REQ message was received.
+ * @pkey: P_Key of the incoming connection.
+ * @req: SRP login request.
+ * @src_addr: GID of the port that submitted the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
- void *private_data)
+ u8 port_num, __be16 pkey,
+ const struct srp_login_req *req,
+ const char *src_addr)
{
struct srpt_device *sdev = cm_id->context;
- struct srpt_port *sport = &sdev->port[param->port - 1];
- struct srp_login_req *req;
- struct srp_login_rsp *rsp;
- struct srp_login_rej *rej;
- struct ib_cm_rep_param *rep_param;
- struct srpt_rdma_ch *ch, *tmp_ch;
- __be16 *guid;
+ struct srpt_port *sport = &sdev->port[port_num - 1];
+ struct srpt_nexus *nexus;
+ struct srp_login_rsp *rsp = NULL;
+ struct srp_login_rej *rej = NULL;
+ struct ib_cm_rep_param *rep_param = NULL;
+ struct srpt_rdma_ch *ch;
+ char i_port_id[36];
u32 it_iu_len;
- int i, ret = 0;
+ int i, ret;
WARN_ON_ONCE(irqs_disabled());
- if (WARN_ON(!sdev || !private_data))
+ if (WARN_ON(!sdev || !req))
return -EINVAL;
- req = (struct srp_login_req *)private_data;
-
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
+ req->initiator_port_id, req->target_port_id, it_iu_len,
+ port_num, &sport->gid, be16_to_cpu(pkey));
+ nexus = srpt_get_nexus(sport, req->initiator_port_id,
+ req->target_port_id);
+ if (IS_ERR(nexus)) {
+ ret = PTR_ERR(nexus);
+ goto out;
+ }
+
+ ret = -ENOMEM;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL);
rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
-
- if (!rsp || !rej || !rep_param) {
- ret = -ENOMEM;
+ if (!rsp || !rej || !rep_param)
goto out;
- }
+ ret = -EINVAL;
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because its"
- " length (%d bytes) is out of range (%d .. %d)\n",
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
}
if (!sport->enabled) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because the target port"
- " has not yet been enabled\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
+ sport->sdev->device->name, port_num);
goto reject;
}
- if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-
- mutex_lock(&sdev->mutex);
-
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
- if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
- && !memcmp(ch->t_port_id, req->target_port_id, 16)
- && param->port == ch->sport->port
- && param->listen_id == ch->sport->sdev->cm_id
- && ch->cm_id) {
- if (srpt_disconnect_ch(ch) < 0)
- continue;
- pr_info("Relogin - closed existing channel %s\n",
- ch->sess_name);
- rsp->rsp_flags =
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
- }
- }
-
- mutex_unlock(&sdev->mutex);
-
- } else
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
- ret = -ENOMEM;
- pr_err("rejected SRP_LOGIN_REQ because it"
- " has an invalid target port identifier.\n");
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
goto reject;
}
+ ret = -ENOMEM;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
- ret = -ENOMEM;
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
goto reject;
}
kref_init(&ch->kref);
+ ch->pkey = be16_to_cpu(pkey);
+ ch->nexus = nexus;
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
- memcpy(ch->i_port_id, req->initiator_port_id, 16);
- memcpy(ch->t_port_id, req->target_port_id, 16);
- ch->sport = &sdev->port[param->port - 1];
- ch->cm_id = cm_id;
+ ch->sport = sport;
+ ch->ib_cm.cm_id = cm_id;
cm_id->context = ch;
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/
- ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+ ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
- ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]),
- ch->rsp_size, DMA_TO_DEVICE);
- if (!ch->ioctx_ring)
+ ch->max_rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ch;
+ }
INIT_LIST_HEAD(&ch->free_list);
for (i = 0; i < ch->rq_size; i++) {
@@ -2059,59 +2155,88 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ring;
}
+ for (i = 0; i < ch->rq_size; i++)
+ INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
}
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because creating"
- " a new RDMA channel failed.\n");
- goto free_recv_ring;
- }
-
- ret = srpt_ch_qp_rtr(ch, ch->qp);
- if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because enabling"
- " RTR failed (error code = %d)\n", ret);
- goto destroy_ib;
+ pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
+ goto free_recv_ring;
}
- guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
- snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
- snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
- be64_to_cpu(*(__be64 *)ch->i_port_id),
- be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+ strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)nexus->i_port_id),
+ be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->ini_guid, ch, NULL);
+ ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL, ch->sess_name, ch,
+ TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->sess_name + 2, ch, NULL);
+ i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
- pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
- ch->sess_name);
- rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ ret = PTR_ERR(ch->sess);
+ pr_info("Rejected login for initiator %s: ret = %d.\n",
+ ch->sess_name, ret);
+ rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto reject;
+ }
+
+ mutex_lock(&sport->mutex);
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ struct srpt_rdma_ch *ch2;
+
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch2) < 0)
+ continue;
+ pr_info("Relogin - closed existing channel %s\n",
+ ch2->sess_name);
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ } else {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+ }
+
+ list_add_tail_rcu(&ch->list, &nexus->ch_list);
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ sdev->device->name, port_num);
+ mutex_unlock(&sport->mutex);
+ goto reject;
+ }
+
+ mutex_unlock(&sport->mutex);
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
+ ret);
goto destroy_ib;
}
- pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
- ch->sess_name, ch->cm_id);
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
+ ch->sess_name, ch);
/* create srp_login_response */
rsp->opcode = SRP_LOGIN_RSP;
@@ -2119,8 +2244,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2136,25 +2261,31 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rep_param->responder_resources = 4;
rep_param->initiator_depth = 4;
- ret = ib_send_cm_rep(cm_id, rep_param);
- if (ret) {
- pr_err("sending SRP_LOGIN_REQ response failed"
- " (error code = %d)\n", ret);
- goto release_channel;
- }
+ /*
+ * Hold the sport mutex while accepting a connection to avoid that
+ * srpt_disconnect_ch() is invoked concurrently with this code.
+ */
+ mutex_lock(&sport->mutex);
+ if (sport->enabled && ch->state == CH_CONNECTING)
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&sport->mutex);
- mutex_lock(&sdev->mutex);
- list_add_tail(&ch->list, &sdev->rch_list);
- mutex_unlock(&sdev->mutex);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ goto reject;
+ default:
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
goto out;
-release_channel:
- srpt_disconnect_ch(ch);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
destroy_ib:
srpt_destroy_ch_ib(ch);
@@ -2166,15 +2297,20 @@ free_recv_ring:
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
free_ch:
+ cm_id->context = NULL;
kfree(ch);
+ ch = NULL;
+
+ WARN_ON_ONCE(ret == 0);
reject:
+ pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof(*rej));
@@ -2187,6 +2323,19 @@ out:
return ret;
}
+static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ char sguid[40];
+
+ srpt_format_guid(sguid, sizeof(sguid),
+ &param->primary_path->dgid.global.interface_id);
+
+ return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
+ private_data, sguid);
+}
+
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
@@ -2207,7 +2356,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
}
/**
- * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
+ * @ch: SRPT RDMA channel.
*
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
@@ -2216,21 +2366,34 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
- if (srpt_set_ch_state(ch, CH_LIVE)) {
- ret = srpt_ch_qp_rts(ch, ch->qp);
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+ if (ret < 0) {
+ pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
+ ch->qp->qp_num);
+ srpt_close_ch(ch);
+ return;
+ }
- if (ret == 0) {
- /* Trigger wait list processing. */
- ret = srpt_zerolength_write(ch);
- WARN_ONCE(ret < 0, "%d\n", ret);
- } else {
- srpt_close_ch(ch);
- }
+ /*
+ * Note: calling srpt_close_ch() if the transition to the LIVE state
+ * fails is not necessary since that means that that function has
+ * already been invoked from another thread.
+ */
+ if (!srpt_set_ch_state(ch, CH_LIVE)) {
+ pr_err("%s-%d: channel transition to LIVE state failed\n",
+ ch->sess_name, ch->qp->qp_num);
+ return;
}
+
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
}
/**
- * srpt_cm_handler() - IB connection manager callback function.
+ * srpt_cm_handler - IB connection manager callback function
+ * @cm_id: IB/CM connection identifier.
+ * @event: IB/CM event.
*
* A non-zero return value will cause the caller destroy the CM ID.
*
@@ -2247,8 +2410,8 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
ret = 0;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
- ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
- event->private_data);
+ ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
break;
case IB_CM_REJ_RECEIVED:
srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
@@ -2295,11 +2458,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+ return ioctx->state == SRPT_STATE_NEED_DATA;
}
/*
- * srpt_write_pending() - Start data transfer from initiator to target (write).
+ * srpt_write_pending - Start data transfer from initiator to target (write).
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
@@ -2356,7 +2519,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
}
/**
- * srpt_queue_response() - Transmits the response to a SCSI command.
+ * srpt_queue_response - transmit the response to a SCSI command
+ * @cmd: SCSI target command.
*
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
@@ -2370,13 +2534,11 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
- unsigned long flags;
int resp_len, ret, i;
u8 srp_tm_status;
BUG_ON(!ch);
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
@@ -2391,7 +2553,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
ch, ioctx->ioctx.index, ioctx->state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
@@ -2495,26 +2656,56 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
+static bool srpt_ch_list_empty(struct srpt_port *sport)
+{
+ struct srpt_nexus *nexus;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry)
+ if (!list_empty(&nexus->ch_list))
+ res = false;
+ rcu_read_unlock();
+
+ return res;
+}
+
/**
- * srpt_release_sdev() - Free the channel resources associated with a target.
+ * srpt_release_sport - disable login and wait for associated channels
+ * @sport: SRPT HCA port.
*/
-static int srpt_release_sdev(struct srpt_device *sdev)
+static int srpt_release_sport(struct srpt_port *sport)
{
- int i, res;
+ struct srpt_nexus *nexus, *next_n;
+ struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
-
- mutex_lock(&sdev->mutex);
- for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- srpt_set_enabled(&sdev->port[i], false);
- mutex_unlock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, false);
+ mutex_unlock(&sport->mutex);
+
+ while (wait_event_timeout(sport->ch_releaseQ,
+ srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for session unregistration ...\n",
+ sport->sdev->device->name, sport->port);
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ pr_info("%s-%d: state %s\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ }
+ }
+ rcu_read_unlock();
+ }
- res = wait_event_interruptible(sdev->ch_releaseQ,
- list_empty_careful(&sdev->rch_list));
- if (res)
- pr_err("%s: interrupted.\n", __func__);
+ mutex_lock(&sport->mutex);
+ list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
+ list_del(&nexus->entry);
+ kfree_rcu(nexus, rcu);
+ }
+ mutex_unlock(&sport->mutex);
return 0;
}
@@ -2601,8 +2792,10 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
sdev->use_srq = true;
sdev->srq = srq;
- for (i = 0; i < sdev->srq_size; ++i)
+ for (i = 0; i < sdev->srq_size; ++i) {
+ INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
return 0;
}
@@ -2624,7 +2817,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
}
/**
- * srpt_add_one() - Infiniband device addition callback function.
+ * srpt_add_one - InfiniBand device addition callback function
+ * @device: Describes a HCA.
*/
static void srpt_add_one(struct ib_device *device)
{
@@ -2639,9 +2833,7 @@ static void srpt_add_one(struct ib_device *device)
goto err;
sdev->device = device;
- INIT_LIST_HEAD(&sdev->rch_list);
- init_waitqueue_head(&sdev->ch_releaseQ);
- mutex_init(&sdev->mutex);
+ mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
@@ -2682,6 +2874,9 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
+ INIT_LIST_HEAD(&sport->nexus_list);
+ init_waitqueue_head(&sport->ch_releaseQ);
+ mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
@@ -2722,7 +2917,9 @@ err:
}
/**
- * srpt_remove_one() - InfiniBand device removal callback function.
+ * srpt_remove_one - InfiniBand device removal callback function
+ * @device: Describes a HCA.
+ * @client_data: The value passed as the third argument to ib_set_client_data().
*/
static void srpt_remove_one(struct ib_device *device, void *client_data)
{
@@ -2752,7 +2949,9 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_lock(&srpt_dev_lock);
list_del(&sdev->list);
spin_unlock(&srpt_dev_lock);
- srpt_release_sdev(sdev);
+
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ srpt_release_sport(&sdev->port[i]);
srpt_free_srq(sdev);
@@ -2828,7 +3027,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
}
/**
- * srpt_close_session() - Forcibly close a session.
+ * srpt_close_session - forcibly close a session
+ * @se_sess: SCSI target session.
*
* Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes
@@ -2837,15 +3037,13 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- struct srpt_device *sdev = ch->sport->sdev;
- mutex_lock(&sdev->mutex);
srpt_disconnect_ch_sync(ch);
- mutex_unlock(&sdev->mutex);
}
/**
- * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
+ * @se_sess: SCSI target session.
*
* A quote from RFC 4455 (SCSI-MIB) about this MIB object:
* This object represents an arbitrary integer used to uniquely identify a
@@ -2867,7 +3065,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx);
+ return ioctx->state;
}
static int srpt_parse_guid(u64 *guid, const char *name)
@@ -2884,7 +3082,7 @@ out:
}
/**
- * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID.
*/
@@ -3065,18 +3263,24 @@ static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
if (val != !!val)
return -EINVAL;
- ret = mutex_lock_interruptible(&sdev->mutex);
+ ret = mutex_lock_interruptible(&sdev->sdev_mutex);
if (ret < 0)
return ret;
+ ret = mutex_lock_interruptible(&sport->mutex);
+ if (ret < 0)
+ goto unlock_sdev;
enabled = sport->enabled;
/* Log out all initiator systems before changing 'use_srq'. */
srpt_set_enabled(sport, false);
sport->port_attrib.use_srq = val;
srpt_use_srq(sdev, sport->port_attrib.use_srq);
srpt_set_enabled(sport, enabled);
- mutex_unlock(&sdev->mutex);
+ ret = count;
+ mutex_unlock(&sport->mutex);
+unlock_sdev:
+ mutex_unlock(&sdev->sdev_mutex);
- return count;
+ return ret;
}
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
@@ -3105,7 +3309,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- struct srpt_device *sdev = sport->sdev;
unsigned long tmp;
int ret;
@@ -3120,9 +3323,9 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
return -EINVAL;
}
- mutex_lock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
srpt_set_enabled(sport, tmp);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
return count;
}
@@ -3135,8 +3338,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
};
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @wwn: Corresponds to $driver/$port.
+ * @group: Not used.
+ * @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
struct config_group *group,
@@ -3158,8 +3363,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @tpg: Target portal group to deregister.
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
@@ -3170,8 +3375,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg)
}
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port
+ * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
+ * @tf: Not used.
+ * @group: Not used.
+ * @name: $port.
*/
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
@@ -3181,8 +3388,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port
+ * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
+ * @wwn: $port.
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
@@ -3240,7 +3447,7 @@ static const struct target_core_fabric_ops srpt_template = {
};
/**
- * srpt_init_module() - Kernel module initialization.
+ * srpt_init_module - kernel module initialization
*
* Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 673387d365a3..4d9199fd00dc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -54,6 +54,8 @@
*/
#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+struct srpt_nexus;
+
enum {
/*
* SRP IOControllerProfile attributes for SRP target ports that have
@@ -114,7 +116,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
- SRPT_RQ_SIZE = 128,
+ MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535,
@@ -134,7 +136,7 @@ enum {
};
/**
- * enum srpt_command_state - SCSI command state managed by SRPT.
+ * enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival.
@@ -158,7 +160,8 @@ enum srpt_command_state {
};
/**
- * struct srpt_ioctx - Shared SRPT I/O context information.
+ * struct srpt_ioctx - shared SRPT I/O context information
+ * @cqe: Completion queue element.
* @buf: Pointer to the buffer.
* @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array.
@@ -171,7 +174,7 @@ struct srpt_ioctx {
};
/**
- * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/
@@ -187,13 +190,20 @@ struct srpt_rw_ctx {
};
/**
- * struct srpt_send_ioctx - SRPT send I/O context.
+ * struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above.
* @ch: Channel pointer.
- * @spinlock: Protects 'state'.
+ * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
+ * @rw_ctxs: RDMA read/write contexts.
+ * @rdma_cqe: RDMA completion queue element.
+ * @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
+ * @n_rdma: Number of work requests needed to transfer this ioctx.
+ * @n_rw_ctx: Size of rw_ctxs array.
+ * @queue_status_only: Send a SCSI status back to the initiator but no data.
+ * @sense_data: Sense data to be sent to the initiator.
*/
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
@@ -204,10 +214,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe;
struct list_head free_list;
- spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
- struct completion tx_done;
u8 n_rdma;
u8 n_rw_ctx;
bool queue_status_only;
@@ -215,7 +223,7 @@ struct srpt_send_ioctx {
};
/**
- * enum rdma_ch_state - SRP channel state.
+ * enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
@@ -233,17 +241,19 @@ enum rdma_ch_state {
};
/**
- * struct srpt_rdma_ch - RDMA channel.
- * @cm_id: IB CM ID associated with the channel.
+ * struct srpt_rdma_ch - RDMA channel
+ * @nexus: I_T nexus this channel is associated with.
* @qp: IB queue pair used for communicating over this channel.
+ * @cm_id: IB CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @zw_cqe: Zero-length write CQE.
+ * @rcu: RCU head.
+ * @kref: kref for this channel.
* @rq_size: IB receive queue size.
- * @rsp_size IB response message size in bytes.
+ * @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
- * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
- * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
* @max_ti_iu_len: maximum target-to-initiator information unit length.
* @req_lim: request limit: maximum number of requests that may be sent
* by the initiator without having received a response.
@@ -251,30 +261,34 @@ enum rdma_ch_state {
* @spinlock: Protects free_list and state.
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
+ * @processing_wait_list: Whether or not cmd_wait_list is being processed.
* @ioctx_ring: Send ring.
* @ioctx_recv_ring: Receive I/O context ring.
- * @list: Node for insertion in the srpt_device.rch_list list.
+ * @list: Node in srpt_nexus.ch_list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
* against concurrent modification by the cm_id spinlock.
+ * @pkey: P_Key of the IB partition for this SRP channel.
* @sess: Session information associated with this SRP channel.
* @sess_name: Session name.
- * @ini_guid: Initiator port GUID.
* @release_work: Allows scheduling of srpt_release_channel().
- * @release_done: Enables waiting for srpt_release_channel() completion.
*/
struct srpt_rdma_ch {
- struct ib_cm_id *cm_id;
+ struct srpt_nexus *nexus;
struct ib_qp *qp;
+ union {
+ struct {
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ };
struct ib_cq *cq;
struct ib_cqe zw_cqe;
+ struct rcu_head rcu;
struct kref kref;
int rq_size;
- u32 rsp_size;
+ u32 max_rsp_size;
atomic_t sq_wr_avail;
struct srpt_port *sport;
- u8 i_port_id[16];
- u8 t_port_id[16];
int max_ti_iu_len;
atomic_t req_lim;
atomic_t req_lim_delta;
@@ -285,15 +299,31 @@ struct srpt_rdma_ch {
struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
+ uint16_t pkey;
+ bool processing_wait_list;
struct se_session *sess;
- u8 sess_name[36];
- u8 ini_guid[24];
+ u8 sess_name[24];
struct work_struct release_work;
- struct completion *release_done;
};
/**
- * struct srpt_port_attib - Attributes for SRPT port
+ * struct srpt_nexus - I_T nexus
+ * @rcu: RCU head for this data structure.
+ * @entry: srpt_port.nexus_list list node.
+ * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ */
+struct srpt_nexus {
+ struct rcu_head rcu;
+ struct list_head entry;
+ struct list_head ch_list;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+};
+
+/**
+ * struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
@@ -307,7 +337,7 @@ struct srpt_port_attrib {
};
/**
- * struct srpt_port - Information associated by SRPT with a single IB port.
+ * struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -323,7 +353,10 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID.
- * @port_acl_list: Head of the list with all node ACLs for this port.
+ * @port_attrib: Port attributes that can be accessed through configfs.
+ * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @mutex: Protects nexus_list.
+ * @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
struct srpt_port {
struct srpt_device *sdev;
@@ -341,21 +374,22 @@ struct srpt_port {
struct se_portal_group port_gid_tpg;
struct se_wwn port_gid_wwn;
struct srpt_port_attrib port_attrib;
+ wait_queue_head_t ch_releaseQ;
+ struct mutex mutex;
+ struct list_head nexus_list;
};
/**
- * struct srpt_device - Information associated by SRPT with a single HCA.
+ * struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
- * @ch_releaseQ: Enables waiting for removal from rch_list.
- * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -367,11 +401,9 @@ struct srpt_device {
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct list_head rch_list;
- wait_queue_head_t ch_releaseQ;
- struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 925571475005..0193dd4f0452 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -635,11 +635,11 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int evdev_poll(struct file *file, poll_table *wait)
+static __poll_t evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &evdev->wait, wait);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e30642db50d5..0d0b2ab1bb6b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1048,7 +1048,7 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
-static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
+static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &input_devices_poll_wait, wait);
if (file->f_version != input_devices_state) {
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 7b29a8944039..fe3255572886 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -436,7 +436,7 @@ static ssize_t joydev_read(struct file *file, char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int joydev_poll(struct file *file, poll_table *wait)
+static __poll_t joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d86e59515b9c..d88d3e0f59fb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,6 +229,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -476,6 +477,22 @@ static const u8 xboxone_hori_init[] = {
};
/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init1[] = {
+ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init2[] = {
+ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
+/*
* A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 1c8c56efc995..9c3f7ec3bd3d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -408,7 +408,7 @@ static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
return retval;
}
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
+static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return union_desc;
dev_err(&intf->dev,
- "Union descriptor to short (%d vs %zd\n)",
+ "Union descriptor too short (%d vs %zd)\n",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6c51d404874b..c37aea9ac272 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->coexist)
return true;
- node = of_find_node_by_name(node, "codec");
+ node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
return true;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 5690eb7ff954..15e0d352c4cc 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0;
int error;
- of_node_get(twl6040_core_dev->of_node);
- twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
+ twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
"vibra");
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 39ddd9a73feb..91df0df15e68 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -694,7 +694,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
return retval;
}
-static unsigned int uinput_poll(struct file *file, poll_table *wait)
+static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
0, width, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
0, height, 0, 0);
- input_set_abs_params(mtouch, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
if (ret) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 579b899add26..dbe57da8c1a1 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
} else {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
}
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX_BL;
- no_data_y = SS4_MFPACKET_NO_AY_BL;
} else {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX;
} else {
- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
}
+ no_data_y = SS4_MFPACKET_NO_AY;
+
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX;
- no_data_y = SS4_MFPACKET_NO_AY;
}
f->first_mp = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index c80a7c76cb76..79b6d69d1486 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
-#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
-#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
-#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
-#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
+#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
+#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
+#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
/*
* enum V7_PACKET_ID - defines the packet type for V7
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6 ... 14:
+ case 6 ... 15:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ee5466a374bf..cd9f61cb3fc6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0046", /* X250 */
"LEN004a", /* W541 */
"LEN200f", /* T450s */
+ "LEN2018", /* T460p */
NULL
};
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0871010f18d5..bbd29220dbe9 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -19,6 +19,13 @@
#include "psmouse.h"
#include "trackpoint.h"
+static const char * const trackpoint_variants[] = {
+ [TP_VARIANT_IBM] = "IBM",
+ [TP_VARIANT_ALPS] = "ALPS",
+ [TP_VARIANT_ELAN] = "Elan",
+ [TP_VARIANT_NXP] = "NXP",
+};
+
/*
* Power-on Reset: Resets all trackpoint parameters, including RAM values,
* to defaults.
@@ -26,7 +33,7 @@
*/
static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
{
- unsigned char results[2];
+ u8 results[2];
int tries = 0;
/* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/* Check for success response -- 0xAA00 */
if (results[0] != 0xAA || results[1] != 0x00)
- return -1;
+ return -ENODEV;
return 0;
}
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/*
* Device IO: read, write and toggle bit
*/
-static int trackpoint_read(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char *results)
+static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_write(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char val)
+static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char mask)
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
{
/* Bad things will happen if the loc param isn't in this range */
if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
- unsigned char mask, unsigned char value)
+static int trackpoint_update_bit(struct ps2dev *ps2dev,
+ u8 loc, u8 mask, u8 value)
{
int retval = 0;
- unsigned char data;
+ u8 data;
trackpoint_read(ps2dev, loc, &data);
if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
*/
struct trackpoint_attr_data {
size_t field_offset;
- unsigned char command;
- unsigned char mask;
- unsigned char inverted;
- unsigned char power_on_default;
+ u8 command;
+ u8 mask;
+ bool inverted;
+ u8 power_on_default;
};
-static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
+ void *data, char *buf)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
+ u8 value = *(u8 *)((void *)tp + attr->field_offset);
if (attr->inverted)
value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned char value;
+ u8 *field = (void *)tp + attr->field_offset;
+ u8 value;
int err;
err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned int value;
+ bool *field = (void *)tp + attr->field_offset;
+ bool value;
int err;
- err = kstrtouint(buf, 10, &value);
+ err = kstrtobool(buf, &value);
if (err)
return err;
- if (value > 1)
- return -EINVAL;
-
if (attr->inverted)
value = !value;
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
&trackpoint_attr_##_name, \
trackpoint_show_int_attr, trackpoint_set_bit_attr)
-#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
-do { \
- struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
- \
- trackpoint_update_bit(&_psmouse->ps2dev, \
- _attr->command, _attr->mask, _tp->_name); \
-} while (0)
-
-#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
-do { \
- if (!_power_on || \
- _tp->_name != trackpoint_attr_##_name.power_on_default) { \
- if (!trackpoint_attr_##_name.mask) \
- trackpoint_write(&_psmouse->ps2dev, \
- trackpoint_attr_##_name.command, \
- _tp->_name); \
- else \
- TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
- } \
-} while (0)
-
-#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
- (_tp->_name = trackpoint_attr_##_name.power_on_default)
-
TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
-TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
TP_DEF_PTSON);
-TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
TP_DEF_SKIPBACK);
-TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
TP_DEF_EXT_DEV);
+static bool trackpoint_is_attr_available(struct psmouse *psmouse,
+ struct attribute *attr)
+{
+ struct trackpoint_data *tp = psmouse->private;
+
+ return tp->variant_id == TP_VARIANT_IBM ||
+ attr == &psmouse_attr_sensitivity.dattr.attr ||
+ attr == &psmouse_attr_press_to_select.dattr.attr;
+}
+
+static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct serio *serio = to_serio_port(dev);
+ struct psmouse *psmouse = serio_get_drvdata(serio);
+
+ return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
+}
+
static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_sensitivity.dattr.attr,
&psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
};
static struct attribute_group trackpoint_attr_group = {
- .attrs = trackpoint_attrs,
+ .is_visible = trackpoint_is_attr_visible,
+ .attrs = trackpoint_attrs,
};
-static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
-{
- unsigned char param[2] = { 0 };
+#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
+do { \
+ struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
+ \
+ if ((!_power_on || _tp->_name != _attr->power_on_default) && \
+ trackpoint_is_attr_available(_psmouse, \
+ &psmouse_attr_##_name.dattr.attr)) { \
+ if (!_attr->mask) \
+ trackpoint_write(&_psmouse->ps2dev, \
+ _attr->command, _tp->_name); \
+ else \
+ trackpoint_update_bit(&_psmouse->ps2dev, \
+ _attr->command, _attr->mask, \
+ _tp->_name); \
+ } \
+} while (0)
- if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
- return -1;
+#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
+do { \
+ _tp->_name = trackpoint_attr_##_name.power_on_default; \
+} while (0)
- /* add new TP ID. */
- if (!(param[0] & TP_MAGIC_IDENT))
- return -1;
+static int trackpoint_start_protocol(struct psmouse *psmouse,
+ u8 *variant_id, u8 *firmware_id)
+{
+ u8 param[2] = { 0 };
+ int error;
- if (firmware_id)
- *firmware_id = param[1];
+ error = ps2_command(&psmouse->ps2dev,
+ param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
+ if (error)
+ return error;
+
+ switch (param[0]) {
+ case TP_VARIANT_IBM:
+ case TP_VARIANT_ALPS:
+ case TP_VARIANT_ELAN:
+ case TP_VARIANT_NXP:
+ if (variant_id)
+ *variant_id = param[0];
+ if (firmware_id)
+ *firmware_id = param[1];
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
/*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
{
struct trackpoint_data *tp = psmouse->private;
- if (!in_power_on_state) {
+ if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
/*
* Disable features that may make device unusable
* with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
static void trackpoint_disconnect(struct psmouse *psmouse)
{
- sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+ device_remove_group(&psmouse->ps2dev.serio->dev,
+ &trackpoint_attr_group);
kfree(psmouse->private);
psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
static int trackpoint_reconnect(struct psmouse *psmouse)
{
- int reset_fail;
+ struct trackpoint_data *tp = psmouse->private;
+ int error;
+ bool was_reset;
- if (trackpoint_start_protocol(psmouse, NULL))
- return -1;
+ error = trackpoint_start_protocol(psmouse, NULL, NULL);
+ if (error)
+ return error;
- reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
- if (trackpoint_sync(psmouse, !reset_fail))
- return -1;
+ was_reset = tp->variant_id == TP_VARIANT_IBM &&
+ trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
+
+ error = trackpoint_sync(psmouse, was_reset);
+ if (error)
+ return error;
return 0;
}
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
- unsigned char firmware_id;
- unsigned char button_info;
+ struct trackpoint_data *tp;
+ u8 variant_id;
+ u8 firmware_id;
+ u8 button_info;
int error;
- if (trackpoint_start_protocol(psmouse, &firmware_id))
- return -1;
+ error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
+ if (error)
+ return error;
if (!set_properties)
return 0;
- if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
- button_info = 0x33;
- }
-
- psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
- if (!psmouse->private)
+ tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
return -ENOMEM;
- psmouse->vendor = "IBM";
+ trackpoint_defaults(tp);
+ tp->variant_id = variant_id;
+ tp->firmware_id = firmware_id;
+
+ psmouse->private = tp;
+
+ psmouse->vendor = trackpoint_variants[variant_id];
psmouse->name = "TrackPoint";
psmouse->reconnect = trackpoint_reconnect;
psmouse->disconnect = trackpoint_disconnect;
+ if (variant_id != TP_VARIANT_IBM) {
+ /* Newer variants do not support extended button query. */
+ button_info = 0x33;
+ } else {
+ error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
+ if (error) {
+ psmouse_warn(psmouse,
+ "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ } else if (!button_info) {
+ psmouse_warn(psmouse,
+ "got 0 in extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ }
+ }
+
if ((button_info & 0x0f) >= 3)
- __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+ input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
__set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
- trackpoint_defaults(psmouse->private);
-
- error = trackpoint_power_on_reset(ps2dev);
-
- /* Write defaults to TP only if reset fails. */
- if (error)
+ if (variant_id != TP_VARIANT_IBM ||
+ trackpoint_power_on_reset(ps2dev) != 0) {
+ /*
+ * Write defaults to TP if we did not reset the trackpoint.
+ */
trackpoint_sync(psmouse, false);
+ }
- error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+ error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
if (error) {
psmouse_err(psmouse,
"failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
}
psmouse_info(psmouse,
- "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
- firmware_id,
+ "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+ psmouse->vendor, firmware_id,
(button_info & 0xf0) >> 4, button_info & 0x0f);
return 0;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 88055755f82e..10a039148234 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,10 +21,16 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
- /* by the firmware ID */
- /* Firmware ID includes 0x1, 0x2, 0x3 */
+/*
+ * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
+ * 0x01 was the original IBM trackpoint, others implement very limited
+ * subset of trackpoint features.
+ */
+#define TP_VARIANT_IBM 0x01
+#define TP_VARIANT_ALPS 0x02
+#define TP_VARIANT_ELAN 0x03
+#define TP_VARIANT_NXP 0x04
/*
* Commands
@@ -136,18 +142,20 @@
#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
-struct trackpoint_data
-{
- unsigned char sensitivity, speed, inertia, reach;
- unsigned char draghys, mindrag;
- unsigned char thresh, upthresh;
- unsigned char ztime, jenks;
- unsigned char drift_time;
+struct trackpoint_data {
+ u8 variant_id;
+ u8 firmware_id;
+
+ u8 sensitivity, speed, inertia, reach;
+ u8 draghys, mindrag;
+ u8 thresh, upthresh;
+ u8 ztime, jenks;
+ u8 drift_time;
/* toggles */
- unsigned char press_to_select;
- unsigned char skipback;
- unsigned char ext_dev;
+ bool press_to_select;
+ bool skipback;
+ bool ext_dev;
};
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 2d7f691ec71c..731d84ae5101 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -757,11 +757,11 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int mousedev_poll(struct file *file, poll_table *wait)
+static __poll_t mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &mousedev->wait, wait);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4f2bb5947a4e..141ea228aac6 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
"Failed to process interrupt request: %d\n", ret);
- if (count)
+ if (count) {
kfree(attn_data.data);
+ attn_data.data = NULL;
+ }
if (!kfifo_is_empty(&drvdata->attn_fifo))
return rmi_irq_fn(irq, dev_id);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index ae966e333a2f..8a07ae147df6 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
dev_set_drvdata(&fn->dev, f01);
- error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
+ error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
if (error)
- dev_warn(&fn->dev,
- "Failed to create attribute group: %d\n", error);
+ dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
return 0;
}
+static void rmi_f01_remove(struct rmi_function *fn)
+{
+ /* Note that the bus device is used, not the F01 device */
+ sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+}
+
static int rmi_f01_config(struct rmi_function *fn)
{
struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
},
.func = 0x01,
.probe = rmi_f01_probe,
+ .remove = rmi_f01_remove,
.config = rmi_f01_config,
.attention = rmi_f01_attention,
.suspend = rmi_f01_suspend,
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 516f9fe77a17..fccf55a380b2 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -239,11 +239,11 @@ out:
return retval;
}
-static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
+static __poll_t serio_raw_poll(struct file *file, poll_table *wait)
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &serio_raw->wait, wait);
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index df1fd41860ac..a63de06b08bc 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -248,7 +248,7 @@ out:
return error ?: count;
}
-static unsigned int userio_char_poll(struct file *file, poll_table *wait)
+static __poll_t userio_char_poll(struct file *file, poll_table *wait)
{
struct userio_device *userio = file->private_data;
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 7ed828a51f4c..3486d9403805 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
int data, n, ret;
if (!np)
return -ENODEV;
- np = of_find_node_by_name(np, "touch");
+ np = of_get_child_by_name(np, "touch");
if (!np) {
dev_err(&pdev->dev, "Can't find touch node\n");
return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set tsi prebias time */
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set prebias & prechg time of pen detect */
data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
+
+ of_node_put(np);
+
return 0;
+
+err_put_node:
+ of_node_put(np);
+
+ return -EINVAL;
}
#else
#define pm860x_touch_dt_init(x, y, z) (-1)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..819213e88f32 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/async.h>
#include <linux/i2c.h>
@@ -999,7 +1000,7 @@ static ssize_t show_iap_mode(struct device *dev,
"Normal" : "Recovery");
}
-static DEVICE_ATTR(calibrate, S_IWUSR, NULL, calibrate_store);
+static DEVICE_ATTR_WO(calibrate);
static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
}
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
#include <linux/of.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 8d7f9c8f2771..9642f103b726 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -13,6 +13,7 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
+#include <linux/module.h>
static bool touchscreen_get_prop_u32(struct device *dev,
const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
}
EXPORT_SYMBOL(touchscreen_report_pos);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 26b1cb8a88ec..675efa93d444 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Samsung S6SY761 Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// Samsung S6SY761 Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <asm/unaligned.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c12d01899939..2a123e20a42e 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * STMicroelectronics FTS Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// STMicroelectronics FTS Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 7d94e1d39e5e..df72493a0f13 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -427,6 +427,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static const struct mmu_notifier_ops iommu_mn = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.invalidate_range = mn_invalidate_range,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1UL << ias) - 1;
domain->geometry.force_aperture = true;
- smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (ret < 0)
+ if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
+ return ret;
+ }
- return ret;
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
}
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
- int i;
+ int i, j;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ /* Bridged PCI devices may end up with duplicated IDs */
+ for (j = 0; j < i; j++)
+ if (fwspec->ids[j] == sid)
+ break;
+ if (j < i)
+ continue;
+
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4a2de34895ec..a1373cf34326 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4808,7 +4808,7 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index ed1cf7c5a43b..0a826eb7fe48 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -276,6 +276,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static const struct mmu_notifier_ops intel_mmuops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.release = intel_mm_release,
.change_pte = intel_change_pte,
.invalidate_range = intel_invalidate_range,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c70476b34a53..d913aec85109 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -343,4 +343,12 @@ config MESON_IRQ_GPIO
help
Support Meson SoC Family GPIO Interrupt Multiplexer
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d2df34a54d38..d27e3e3619e0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 667b9e14b032..dfe4a460340b 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -98,13 +98,35 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
-static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
-{
- int irq = irq_create_mapping(intc.domain, hwirq);
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
}
static void
@@ -165,7 +187,8 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onecell
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
};
static void
@@ -218,19 +241,6 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
- &bcm2836_arm_irqchip_gpu);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
- &bcm2836_arm_irqchip_pmu);
-
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b56c3e23f0af..a57c0fbbd34a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1070,31 +1070,6 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
-static int get_cpu_number(struct device_node *dn)
-{
- const __be32 *cell;
- u64 hwid;
- int cpu;
-
- cell = of_get_property(dn, "reg", NULL);
- if (!cell)
- return -1;
-
- hwid = of_read_number(cell, of_n_addr_cells(dn));
-
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK)
- return -1;
-
- for_each_possible_cpu(cpu)
- if (cpu_logical_map(cpu) == hwid)
- return cpu;
-
- return -1;
-}
-
/* Create all possible partitions at boot time */
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
{
@@ -1145,8 +1120,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(!cpu_node))
continue;
- cpu = get_cpu_number(cpu_node);
- if (WARN_ON(cpu == -1))
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
continue;
pr_cont("%pOF[%d] ", cpu_node, cpu);
@@ -1331,6 +1306,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1380,6 +1359,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000000..2a92f03c73e4
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
index cf6d0c455518..e66ef4373b1e 100644
--- a/drivers/irqchip/irq-ompic.c
+++ b/drivers/irqchip/irq-ompic.c
@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
/* Setup the device */
ompic_base = ioremap(res.start, resource_size(&res));
- if (IS_ERR(ompic_base)) {
+ if (!ompic_base) {
pr_err("ompic: unable to map registers");
- return PTR_ERR(ompic_base);
+ return -ENOMEM;
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index dde8f46bc254..e268811dc544 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -724,11 +724,11 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
return count;
}
-static unsigned int
+static __poll_t
capi_poll(struct file *file, poll_table *wait)
{
struct capidev *cdev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!cdev->ap.applid)
return POLLERR;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 1c5dc345e7c5..34b7704042a4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -119,10 +119,10 @@ isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_
/***************************************/
/* select routines for various kernels */
/***************************************/
-static unsigned int
+static __poll_t
isdn_divert_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &(rd_queue), wait);
/* mask = POLLOUT | POLLWRNORM; */
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 72e58bf07577..70f16102a001 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -98,9 +98,9 @@ void diva_os_get_time(dword *sec, dword *usec)
/*
* device node operations
*/
-static unsigned int maint_poll(struct file *file, poll_table *wait)
+static __poll_t maint_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &msgwaitq, wait);
mask = POLLOUT | POLLWRNORM;
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 0033d74a7291..da5cc5ab7e2d 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -74,7 +74,7 @@ static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
loff_t *offset);
static ssize_t um_idi_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset);
-static unsigned int um_idi_poll(struct file *file, poll_table *wait);
+static __poll_t um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
@@ -365,7 +365,7 @@ um_idi_write(struct file *file, const char __user *buf, size_t count,
return (ret);
}
-static unsigned int um_idi_poll(struct file *file, poll_table *wait)
+static __poll_t um_idi_poll(struct file *file, poll_table *wait)
{
diva_um_idi_os_context_t *p_os;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b2023e08dcd2..fbc788e6f0db 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -650,7 +650,7 @@ static ssize_t divas_read(struct file *file, char __user *buf,
return (ret);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
if (!file->private_data) {
return (POLLERR);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b57efd6ad916..3478f6f099eb 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -99,7 +99,7 @@ divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off
return (-ENODEV);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
return (POLLERR);
}
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index aaca0b3d662e..6abea6915f49 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -281,10 +281,10 @@ hysdn_log_close(struct inode *ino, struct file *filep)
/*************************************************/
/* select/poll routine to be able using select() */
/*************************************************/
-static unsigned int
+static __poll_t
hysdn_log_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
hysdn_card *card = PDE_DATA(file_inode(file));
struct procdata *pd = card->proclog;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8b03d618185e..0521c32949d4 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1227,10 +1227,10 @@ out:
return retval;
}
-static unsigned int
+static __poll_t
isdn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index e07aefb9151d..57884319b4b1 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -685,10 +685,10 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
-unsigned int
+__poll_t
isdn_ppp_poll(struct file *file, poll_table *wait)
{
- u_int mask;
+ __poll_t mask;
struct ippp_buf_queue *bf, *bl;
u_long flags;
struct ippp_struct *is;
diff --git a/drivers/isdn/i4l/isdn_ppp.h b/drivers/isdn/i4l/isdn_ppp.h
index 4e9b8935a4eb..34b8a2ce84f3 100644
--- a/drivers/isdn/i4l/isdn_ppp.h
+++ b/drivers/isdn/i4l/isdn_ppp.h
@@ -23,7 +23,7 @@ extern int isdn_ppp_autodial_filter(struct sk_buff *, isdn_net_local *);
extern int isdn_ppp_xmit(struct sk_buff *, struct net_device *);
extern void isdn_ppp_receive(isdn_net_dev *, isdn_net_local *, struct sk_buff *);
extern int isdn_ppp_dev_ioctl(struct net_device *, struct ifreq *, int);
-extern unsigned int isdn_ppp_poll(struct file *, struct poll_table_struct *);
+extern __poll_t isdn_ppp_poll(struct file *, struct poll_table_struct *);
extern int isdn_ppp_ioctl(int, struct file *, unsigned int, unsigned long);
extern void isdn_ppp_release(int, struct file *);
extern int isdn_ppp_dial_slave(char *);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index e3654782a3e2..21d50e4cc5e1 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -645,8 +645,10 @@ l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
- struct msghdr msg;
struct sockaddr_in sin_rx;
+ struct kvec iov;
+ struct msghdr msg = {.msg_name = &sin_rx,
+ .msg_namelen = sizeof(sin_rx)};
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
@@ -661,6 +663,9 @@ l1oip_socket_thread(void *data)
goto fail;
}
+ iov.iov_base = recvbuf;
+ iov.iov_len = recvbuf_size;
+
/* make daemon */
allow_signal(SIGTERM);
@@ -697,12 +702,6 @@ l1oip_socket_thread(void *data)
goto fail;
}
- /* build receive message */
- msg.msg_name = &sin_rx;
- msg.msg_namelen = sizeof(sin_rx);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
-
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
@@ -719,12 +718,9 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- struct kvec iov = {
- .iov_base = recvbuf,
- .iov_len = recvbuf_size,
- };
- recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
- recvbuf_size, 0);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
+ recvbuf_size);
+ recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c50a34340f67..f4272d4e0a26 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -141,11 +141,11 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
return ret;
}
-static unsigned int
+static __poll_t
mISDN_poll(struct file *filep, poll_table *wait)
{
struct mISDNtimerdev *dev = filep->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 318a28fd58fe..3e763d2a0cb3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -137,6 +137,13 @@ config LEDS_LM3642
converter plus 1.5A constant current driver for a high-current
white LED.
+config LEDS_LM3692X
+ tristate "LED support for LM3692x Chips"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ This option enables support for the TI LM3692x family
+ of white LED string drivers used for backlighting.
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
@@ -347,7 +354,7 @@ config LEDS_LP8788
config LEDS_LP8860
tristate "LED support for the TI LP8860 4 channel LED driver"
- depends on LEDS_CLASS && I2C
+ depends on LEDS_CLASS && I2C && OF
select REGMAP_I2C
help
If you say yes here you get support for the TI LP8860 4 channel
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2a6b5a4f86d..987884a5b9a5 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index f3654fd2eaf3..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -186,8 +186,9 @@ void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- led_stop_software_blink(led_cdev);
+ del_timer_sync(&led_cdev->blink_timer);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 9a257f969300..f883616d9e60 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -360,7 +360,8 @@ static int as3645a_set_flash_brightness(struct led_classdev_flash *fled,
{
struct as3645a *flash = fled_to_as3645a(fled);
- flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua);
+ flash->flash_current = as3645a_current_to_reg(flash, true,
+ brightness_ua);
return as3645a_set_current(flash);
}
@@ -455,8 +456,8 @@ static int as3645a_detect(struct as3645a *flash)
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
- dev_err(dev, "AS3645A not detected "
- "(model %d rfu %d)\n", model, rfu);
+ dev_err(dev, "AS3645A not detected (model %d rfu %d)\n",
+ model, rfu);
return -ENODEV;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index d03ed6b4176b..851c1920b63c 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -549,8 +549,12 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
new file mode 100644
index 000000000000..437173d1712c
--- /dev/null
+++ b/drivers/leds/leds-lm3692x.c
@@ -0,0 +1,393 @@
+/*
+ * TI lm3692x LED Driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Data sheet is located
+ * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LM3692X_REV 0x0
+#define LM3692X_RESET 0x1
+#define LM3692X_EN 0x10
+#define LM3692X_BRT_CTRL 0x11
+#define LM3692X_PWM_CTRL 0x12
+#define LM3692X_BOOST_CTRL 0x13
+#define LM3692X_AUTO_FREQ_HI 0x15
+#define LM3692X_AUTO_FREQ_LO 0x16
+#define LM3692X_BL_ADJ_THRESH 0x17
+#define LM3692X_BRT_LSB 0x18
+#define LM3692X_BRT_MSB 0x19
+#define LM3692X_FAULT_CTRL 0x1e
+#define LM3692X_FAULT_FLAGS 0x1f
+
+#define LM3692X_SW_RESET BIT(0)
+#define LM3692X_DEVICE_EN BIT(0)
+#define LM3692X_LED1_EN BIT(1)
+#define LM3692X_LED2_EN BIT(2)
+
+/* Brightness Control Bits */
+#define LM3692X_BL_ADJ_POL BIT(0)
+#define LM3692X_RAMP_RATE_125us 0x00
+#define LM3692X_RAMP_RATE_250us BIT(1)
+#define LM3692X_RAMP_RATE_500us BIT(2)
+#define LM3692X_RAMP_RATE_1ms (BIT(1) | BIT(2))
+#define LM3692X_RAMP_RATE_2ms BIT(3)
+#define LM3692X_RAMP_RATE_4ms (BIT(3) | BIT(1))
+#define LM3692X_RAMP_RATE_8ms (BIT(2) | BIT(3))
+#define LM3692X_RAMP_RATE_16ms (BIT(1) | BIT(2) | BIT(3))
+#define LM3692X_RAMP_EN BIT(4)
+#define LM3692X_BRHT_MODE_REG 0x00
+#define LM3692X_BRHT_MODE_PWM BIT(5)
+#define LM3692X_BRHT_MODE_MULTI_RAMP BIT(6)
+#define LM3692X_BRHT_MODE_RAMP_MULTI (BIT(5) | BIT(6))
+#define LM3692X_MAP_MODE_EXP BIT(7)
+
+/* PWM Register Bits */
+#define LM3692X_PWM_FILTER_100 BIT(0)
+#define LM3692X_PWM_FILTER_150 BIT(1)
+#define LM3692X_PWM_FILTER_200 (BIT(0) | BIT(1))
+#define LM3692X_PWM_HYSTER_1LSB BIT(2)
+#define LM3692X_PWM_HYSTER_2LSB BIT(3)
+#define LM3692X_PWM_HYSTER_3LSB (BIT(3) | BIT(2))
+#define LM3692X_PWM_HYSTER_4LSB BIT(4)
+#define LM3692X_PWM_HYSTER_5LSB (BIT(4) | BIT(2))
+#define LM3692X_PWM_HYSTER_6LSB (BIT(4) | BIT(3))
+#define LM3692X_PWM_POLARITY BIT(5)
+#define LM3692X_PWM_SAMP_4MHZ BIT(6)
+#define LM3692X_PWM_SAMP_24MHZ BIT(7)
+
+/* Boost Control Bits */
+#define LM3692X_OCP_PROT_1A BIT(0)
+#define LM3692X_OCP_PROT_1_25A BIT(1)
+#define LM3692X_OCP_PROT_1_5A (BIT(0) | BIT(1))
+#define LM3692X_OVP_21V BIT(2)
+#define LM3692X_OVP_25V BIT(3)
+#define LM3692X_OVP_29V (BIT(2) | BIT(3))
+#define LM3692X_MIN_IND_22UH BIT(4)
+#define LM3692X_BOOST_SW_1MHZ BIT(5)
+#define LM3692X_BOOST_SW_NO_SHIFT BIT(6)
+
+/* Fault Control Bits */
+#define LM3692X_FAULT_CTRL_OVP BIT(0)
+#define LM3692X_FAULT_CTRL_OCP BIT(1)
+#define LM3692X_FAULT_CTRL_TSD BIT(2)
+#define LM3692X_FAULT_CTRL_OPEN BIT(3)
+
+/* Fault Flag Bits */
+#define LM3692X_FAULT_FLAG_OVP BIT(0)
+#define LM3692X_FAULT_FLAG_OCP BIT(1)
+#define LM3692X_FAULT_FLAG_TSD BIT(2)
+#define LM3692X_FAULT_FLAG_SHRT BIT(3)
+#define LM3692X_FAULT_FLAG_OPEN BIT(4)
+
+/**
+ * struct lm3692x_led -
+ * @lock - Lock for reading/writing the device
+ * @client - Pointer to the I2C client
+ * @led_dev - LED class device pointer
+ * @regmap - Devices register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @label - LED label
+ */
+struct lm3692x_led {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ char label[LED_MAX_NAME_SIZE];
+};
+
+static const struct reg_default lm3692x_reg_defs[] = {
+ {LM3692X_EN, 0xf},
+ {LM3692X_BRT_CTRL, 0x61},
+ {LM3692X_PWM_CTRL, 0x73},
+ {LM3692X_BOOST_CTRL, 0x6f},
+ {LM3692X_AUTO_FREQ_HI, 0x0},
+ {LM3692X_AUTO_FREQ_LO, 0x0},
+ {LM3692X_BL_ADJ_THRESH, 0x0},
+ {LM3692X_BRT_LSB, 0x7},
+ {LM3692X_BRT_MSB, 0xff},
+ {LM3692X_FAULT_CTRL, 0x7},
+};
+
+static const struct regmap_config lm3692x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3692X_FAULT_FLAGS,
+ .reg_defaults = lm3692x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3692x_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int lm3692x_fault_check(struct lm3692x_led *led)
+{
+ int ret;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (ret)
+ return ret;
+
+ if (read_buf)
+ dev_err(&led->client->dev, "Detected a fault 0x%X\n", read_buf);
+
+ /* The first read may clear the fault. Check again to see if the fault
+ * still exits and return that value.
+ */
+ regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (read_buf)
+ dev_err(&led->client->dev, "Second read of fault flags 0x%X\n",
+ read_buf);
+
+ return read_buf;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3692x_led *led =
+ container_of(led_cdev, struct lm3692x_led, led_dev);
+ int ret;
+ int led_brightness_lsb = (brt_val >> 5);
+
+ mutex_lock(&led->lock);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lm3692x_init(struct lm3692x_led *led)
+{
+ int ret;
+
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL, 0x00);
+ if (ret)
+ goto out;
+
+ /*
+ * For glitch free operation, the following data should
+ * only be written while device enable bit is 0
+ * per Section 7.5.14 of the data sheet
+ */
+ ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
+ LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
+ LM3692X_BRHT_MODE_RAMP_MULTI |
+ LM3692X_BL_ADJ_POL |
+ LM3692X_RAMP_RATE_250us);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_HI, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_LO, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BL_ADJ_THRESH, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
+ LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ if (ret)
+ goto out;
+
+ return ret;
+out:
+ dev_err(&led->client->dev, "Fail writing initialization values\n");
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return ret;
+}
+
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lm3692x_led *led;
+ struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::backlight_cluster", id->name);
+ };
+
+ led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(led->enable_gpio)) {
+ ret = PTR_ERR(led->enable_gpio);
+ dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+
+ mutex_init(&led->lock);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lm3692x_init(led);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3692x_remove(struct i2c_client *client)
+{
+ struct lm3692x_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3692x_id[] = {
+ { "lm36922", 0 },
+ { "lm36923", 1 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm3692x_id);
+
+static const struct of_device_id of_lm3692x_leds_match[] = {
+ { .compatible = "ti,lm36922", },
+ { .compatible = "ti,lm36923", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3692x_leds_match);
+
+static struct i2c_driver lm3692x_driver = {
+ .driver = {
+ .name = "lm3692x",
+ .of_match_table = of_lm3692x_leds_match,
+ },
+ .probe = lm3692x_probe,
+ .remove = lm3692x_remove,
+ .id_table = lm3692x_id,
+};
+module_i2c_driver(lm3692x_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LM3692X LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3e70775a2d54..39c72a908f3b 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -22,6 +22,7 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
#define LP8860_DISP_CL1_BRT_MSB 0x00
#define LP8860_DISP_CL1_BRT_LSB 0x01
@@ -86,8 +87,6 @@
#define LP8860_CLEAR_FAULTS 0x01
-#define LP8860_DISP_LED_NAME "display_cluster"
-
/**
* struct lp8860_led -
* @lock - Lock for reading/writing the device
@@ -98,7 +97,7 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
-**/
+ */
struct lp8860_led {
struct mutex lock;
struct i2c_client *client;
@@ -107,7 +106,7 @@ struct lp8860_led {
struct regmap *eeprom_regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- const char *label;
+ char label[LED_MAX_NAME_SIZE];
};
struct lp8860_eeprom_reg {
@@ -247,6 +246,15 @@ static int lp8860_init(struct lp8860_led *led)
unsigned int read_buf;
int ret, i, reg_count;
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 1);
@@ -282,12 +290,25 @@ static int lp8860_init(struct lp8860_led *led)
ret = regmap_write(led->regmap,
LP8860_EEPROM_CNTRL,
LP8860_PROGRAM_EEPROM);
- if (ret)
+ if (ret) {
dev_err(&led->client->dev, "Failed programming EEPROM\n");
+ goto out;
+ }
+
+ return ret;
+
out:
if (ret)
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
return ret;
}
@@ -365,19 +386,25 @@ static int lp8860_probe(struct i2c_client *client,
int ret;
struct lp8860_led *led;
struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led->label = LP8860_DISP_LED_NAME;
-
- if (client->dev.of_node) {
- ret = of_property_read_string(np, "label", &led->label);
- if (ret) {
- dev_err(&client->dev, "Missing label in dt\n");
- return -EINVAL;
- }
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label), "%s:%s",
+ id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::display_cluster", id->name);
}
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
@@ -394,7 +421,6 @@ static int lp8860_probe(struct i2c_client *client,
led->client = client;
led->led_dev.name = led->label;
- led->led_dev.max_brightness = LED_FULL;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
mutex_init(&led->lock);
@@ -421,7 +447,7 @@ static int lp8860_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = led_classdev_register(&client->dev, &led->led_dev);
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
if (ret) {
dev_err(&client->dev, "led register err: %d\n", ret);
return ret;
@@ -435,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
- led_classdev_unregister(&led->led_dev);
-
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -447,6 +471,8 @@ static int lp8860_remove(struct i2c_client *client)
"Failed to disable regulator\n");
}
+ mutex_destroy(&led->lock);
+
return 0;
}
@@ -456,18 +482,16 @@ static const struct i2c_device_id lp8860_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8860_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8860_leds_match[] = {
{ .compatible = "ti,lp8860", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
-#endif
static struct i2c_driver lp8860_driver = {
.driver = {
.name = "lp8860",
- .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ .of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
.remove = lp8860_remove,
@@ -477,4 +501,4 @@ module_i2c_driver(lp8860_driver);
MODULE_DESCRIPTION("Texas Instruments LP8860 LED driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a52674327857..8988ba3b2d65 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8d456dc6c5bf..df80c89ebe7f 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index bb090216b4dc..a2559b4fdfff 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -81,7 +81,7 @@ config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
depends on LEDS_TRIGGERS
help
- This allows LEDs to be controlled by a immediate CPU usage.
+ This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
intense brightness depending on the instant CPU load.
If unsure, say N.
@@ -135,4 +135,11 @@ config LEDS_TRIGGER_PANIC
a different trigger.
If unsure, say Y.
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on NET && LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 4a8b6cff7761..f3cfe1950538 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
new file mode 100644
index 000000000000..6df4781a6308
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 Ben Whitten <ben.whitten@gmail.com>
+// Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+//
+// LED Kernel Netdev Trigger
+//
+// Toggles the LED to reflect the link and traffic state of a named net device
+//
+// Derived from ledtrig-timer.c which is:
+// Copyright 2005-2006 Openedhand Ltd.
+// Author: Richard Purdie <rpurdie@openedhand.com>
+
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ * interval - duration of LED blink, in milliseconds
+ * link - LED's normal state reflects whether the link is up
+ * (has carrier) or not
+ * tx - LED blinks on transmitted data
+ * rx - LED blinks on receive data
+ *
+ */
+
+struct led_netdev_data {
+ spinlock_t lock;
+
+ struct delayed_work work;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ atomic_t interval;
+ unsigned int last_activity;
+
+ unsigned long mode;
+#define NETDEV_LED_LINK 0
+#define NETDEV_LED_TX 1
+#define NETDEV_LED_RX 2
+#define NETDEV_LED_MODE_LINKUP 3
+};
+
+enum netdev_led_attr {
+ NETDEV_ATTR_LINK,
+ NETDEV_ATTR_TX,
+ NETDEV_ATTR_RX
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ int current_brightness;
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
+
+ current_brightness = led_cdev->brightness;
+ if (current_brightness)
+ led_cdev->blink_brightness = current_brightness;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+
+ if (!test_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode))
+ led_set_brightness(led_cdev, LED_OFF);
+ else {
+ if (test_bit(NETDEV_LED_LINK, &trigger_data->mode))
+ led_set_brightness(led_cdev,
+ led_cdev->blink_brightness);
+ else
+ led_set_brightness(led_cdev, LED_OFF);
+
+ /* If we are looking for RX/TX start periodically
+ * checking stats
+ */
+ if (test_bit(NETDEV_LED_TX, &trigger_data->mode) ||
+ test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ schedule_delayed_work(&trigger_data->work, 0);
+ }
+}
+
+static ssize_t device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ ssize_t len;
+
+ spin_lock_bh(&trigger_data->lock);
+ len = sprintf(buf, "%s\n", trigger_data->device_name);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return len;
+}
+
+static ssize_t device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size >= IFNAMSIZ)
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ strncpy(trigger_data->device_name, buf, size);
+ if (size > 0 && trigger_data->device_name[size - 1] == '\n')
+ trigger_data->device_name[size - 1] = 0;
+
+ if (trigger_data->device_name[0] != 0)
+ trigger_data->net_dev =
+ dev_get_by_name(&init_net, trigger_data->device_name);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ if (trigger_data->net_dev != NULL)
+ if (netif_carrier_ok(trigger_data->net_dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+
+ trigger_data->last_activity = 0;
+
+ set_baseline_state(trigger_data);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(device_name);
+
+static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
+ enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int bit;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", test_bit(bit, &trigger_data->mode));
+}
+
+static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
+ size_t size, enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+ int bit;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (state)
+ set_bit(bit, &trigger_data->mode);
+ else
+ clear_bit(bit, &trigger_data->mode);
+
+ set_baseline_state(trigger_data);
+
+ return size;
+}
+
+static ssize_t link_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_LINK);
+}
+
+static ssize_t link_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_LINK);
+}
+
+static DEVICE_ATTR_RW(link);
+
+static ssize_t tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_TX);
+}
+
+static ssize_t tx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_TX);
+}
+
+static DEVICE_ATTR_RW(tx);
+
+static ssize_t rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_RX);
+}
+
+static ssize_t rx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_RX);
+}
+
+static DEVICE_ATTR_RW(rx);
+
+static ssize_t interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n",
+ jiffies_to_msecs(atomic_read(&trigger_data->interval)));
+}
+
+static ssize_t interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ /* impose some basic bounds on the timer interval */
+ if (value >= 5 && value <= 10000) {
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ set_baseline_state(trigger_data); /* resets timer */
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(interval);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt, void *dv)
+{
+ struct net_device *dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
+ struct led_netdev_data *trigger_data = container_of(nb,
+ struct
+ led_netdev_data,
+ notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
+ return NOTIFY_DONE;
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ return NOTIFY_DONE;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ switch (evt) {
+ case NETDEV_REGISTER:
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ break;
+ case NETDEV_CHANGENAME:
+ case NETDEV_UNREGISTER:
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ break;
+ }
+
+ set_baseline_state(trigger_data);
+
+ spin_unlock_bh(&trigger_data->lock);
+
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_work(struct work_struct *work)
+{
+ struct led_netdev_data *trigger_data = container_of(work,
+ struct
+ led_netdev_data,
+ work.work);
+ struct rtnl_link_stats64 *dev_stats;
+ unsigned int new_activity;
+ struct rtnl_link_stats64 temp;
+ unsigned long interval;
+ int invert;
+
+ /* If we dont have a device, insure we are off */
+ if (!trigger_data->net_dev) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ return;
+ }
+
+ /* If we are not looking for RX/TX then return */
+ if (!test_bit(NETDEV_LED_TX, &trigger_data->mode) &&
+ !test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ return;
+
+ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ (test_bit(NETDEV_LED_TX, &trigger_data->mode) ?
+ dev_stats->tx_packets : 0) +
+ (test_bit(NETDEV_LED_RX, &trigger_data->mode) ?
+ dev_stats->rx_packets : 0);
+
+ if (trigger_data->last_activity != new_activity) {
+ led_stop_software_blink(trigger_data->led_cdev);
+
+ invert = test_bit(NETDEV_LED_LINK, &trigger_data->mode);
+ interval = jiffies_to_msecs(
+ atomic_read(&trigger_data->interval));
+ /* base state is ON (link present) */
+ led_blink_set_oneshot(trigger_data->led_cdev,
+ &interval,
+ &interval,
+ invert);
+ trigger_data->last_activity = new_activity;
+ }
+
+ schedule_delayed_work(&trigger_data->work,
+ (atomic_read(&trigger_data->interval)*2));
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ spin_lock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_link);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_rx);
+ if (rc)
+ goto err_out_link;
+ rc = device_create_file(led_cdev->dev, &dev_attr_tx);
+ if (rc)
+ goto err_out_rx;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_tx;
+ rc = register_netdevice_notifier(&trigger_data->notifier);
+ if (rc)
+ goto err_out_interval;
+ return;
+
+err_out_interval:
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+err_out_tx:
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+err_out_rx:
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+err_out_link:
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7acce64b692a..9d1769073562 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -1,22 +1,15 @@
-/*
- * LED Kernel Transient Trigger
- *
- * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
- *
- * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
- * ledtrig-heartbeat.c
- * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
- * Neil Brown <neilb@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-/*
- * Transient trigger allows one shot timer activation. Please refer to
- * Documentation/leds/ledtrig-transient.txt for details
-*/
+// SPDX-License-Identifier: GPL-2.0
+//
+// LED Kernel Transient Trigger
+//
+// Transient trigger allows one shot timer activation. Please refer to
+// Documentation/leds/ledtrig-transient.txt for details
+// Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+//
+// Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+// ledtrig-heartbeat.c
+// Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+// Neil Brown <neilb@suse.de>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -238,4 +231,4 @@ module_exit(transient_trig_exit);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 5e9e8a1fdefb..5beacab05ed7 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -176,7 +176,7 @@ static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
return retval;
}
-static unsigned int uleds_poll(struct file *file, poll_table *wait)
+static __poll_t uleds_poll(struct file *file, poll_table *wait)
{
struct uleds_device *udev = file->private_data;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2a953efec4e1..10c08982185a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,13 +27,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_RRPC
- tristate "Round-robin Hybrid Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target is implemented using a linear mapping table and
- cost-based garbage collection. It is optimized for 4K IO sizes.
-
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 2c3fd9d2c08c..97d9d7c71550 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_NVM) := core.o
-obj-$(CONFIG_NVM_RRPC) += rrpc.o
obj-$(CONFIG_NVM_PBLK) += pblk.o
pblk-y := pblk-init.o pblk-core.o pblk-rb.o \
pblk-write.o pblk-cache.o pblk-read.o \
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 83249b43dd06..dcc9e621e651 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,12 +45,6 @@ struct nvm_dev_map {
int nr_chnls;
};
-struct nvm_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -62,6 +56,30 @@ static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
return NULL;
}
+static bool nvm_target_exists(const char *name)
+{
+ struct nvm_dev *dev;
+ struct nvm_target *tgt;
+ bool ret = false;
+
+ down_write(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ list_for_each_entry(tgt, &dev->targets, list) {
+ if (!strcmp(name, tgt->disk->disk_name)) {
+ ret = true;
+ mutex_unlock(&dev->mlock);
+ goto out;
+ }
+ }
+ mutex_unlock(&dev->mlock);
+ }
+
+out:
+ up_write(&nvm_lock);
+ return ret;
+}
+
static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
int i;
@@ -104,7 +122,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+ int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map));
@@ -122,7 +140,8 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
}
static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
+ u16 lun_begin, u16 lun_end,
+ u16 op)
{
struct nvm_tgt_dev *tgt_dev = NULL;
struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -130,10 +149,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
+ int nr_chnls = nr_luns / dev->geo.nr_luns;
+ int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
+ int bch = lun_begin / dev->geo.nr_luns;
+ int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0;
int lun_balanced = 1;
int prev_nr_luns;
@@ -154,15 +173,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns)
goto err_luns;
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0;
@@ -199,8 +218,9 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.all_luns = nr_luns;
+ tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.op = op;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q;
tgt_dev->map = dev_map;
@@ -226,27 +246,79 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
-static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
- struct nvm_tgt_type *tmp, *tt = NULL;
+ struct nvm_tgt_type *tt;
- if (lock)
- down_write(&nvm_tgtt_lock);
+ list_for_each_entry(tt, &nvm_tgt_types, list)
+ if (!strcmp(name, tt->name))
+ return tt;
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
+ return NULL;
+}
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+ struct nvm_tgt_type *tt;
+
+ down_write(&nvm_tgtt_lock);
+ tt = __nvm_find_target_type(name);
+ up_write(&nvm_tgtt_lock);
- if (lock)
- up_write(&nvm_tgtt_lock);
return tt;
}
+static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
+ int lun_end)
+{
+ if (lun_begin > lun_end || lun_end >= geo->all_luns) {
+ pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ lun_begin, lun_end, geo->all_luns - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __nvm_config_simple(struct nvm_dev *dev,
+ struct nvm_ioctl_create_simple *s)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = geo->all_luns - 1;
+ }
+
+ return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
+}
+
+static int __nvm_config_extended(struct nvm_dev *dev,
+ struct nvm_ioctl_create_extended *e)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
+ e->lun_begin = 0;
+ e->lun_end = dev->geo.all_luns - 1;
+ }
+
+ /* op not set falls into target's default */
+ if (e->op == 0xFFFF)
+ e->op = NVM_TARGET_DEFAULT_OP;
+
+ if (e->op < NVM_TARGET_MIN_OP ||
+ e->op > NVM_TARGET_MAX_OP) {
+ pr_err("nvm: invalid over provisioning value\n");
+ return -EINVAL;
+ }
+
+ return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct nvm_ioctl_create_extended e;
struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
@@ -255,22 +327,41 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
void *targetdata;
int ret;
- tt = nvm_find_target_type(create->tgttype, 1);
+ switch (create->conf.type) {
+ case NVM_CONFIG_TYPE_SIMPLE:
+ ret = __nvm_config_simple(dev, &create->conf.s);
+ if (ret)
+ return ret;
+
+ e.lun_begin = create->conf.s.lun_begin;
+ e.lun_end = create->conf.s.lun_end;
+ e.op = NVM_TARGET_DEFAULT_OP;
+ break;
+ case NVM_CONFIG_TYPE_EXTENDED:
+ ret = __nvm_config_extended(dev, &create->conf.e);
+ if (ret)
+ return ret;
+
+ e = create->conf.e;
+ break;
+ default:
+ pr_err("nvm: config type not valid\n");
+ return -EINVAL;
+ }
+
+ tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
return -EINVAL;
}
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&dev->mlock);
+ if (nvm_target_exists(create->tgtname)) {
+ pr_err("nvm: target name already exists (%s)\n",
+ create->tgtname);
return -EINVAL;
}
- mutex_unlock(&dev->mlock);
- ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+ ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
if (ret)
return ret;
@@ -280,7 +371,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_reserve;
}
- tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
pr_err("nvm: could not create target device\n");
ret = -ENOMEM;
@@ -350,7 +441,7 @@ err_dev:
err_t:
kfree(t);
err_reserve:
- nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
return ret;
}
@@ -420,7 +511,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap;
int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
+ int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i];
@@ -524,41 +615,12 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}
-void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct nvm_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-EXPORT_SYMBOL(nvm_part_to_tgt);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
down_write(&nvm_tgtt_lock);
- if (nvm_find_target_type(tt->name, 0))
+ if (__nvm_find_target_type(tt->name))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
@@ -726,112 +788,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io_sync);
-int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.opcode = NVM_OP_ERASE;
- rqd.flags = geo->plane_mode >> 1;
-
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- if (ret)
- return ret;
-
- ret = nvm_submit_io_sync(tgt_dev, &rqd);
- if (ret) {
- pr_err("rrpr: erase I/O submission failed: %d\n", ret);
- goto free_ppa_list;
- }
-
-free_ppa_list:
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_sync);
-
-int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
- return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
-}
-EXPORT_SYMBOL(nvm_get_l2p_tbl);
-
-int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &dev->geo;
- struct nvm_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &dev->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &dev->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_get_area);
-
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &dev->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-EXPORT_SYMBOL(nvm_put_area);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -858,10 +814,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != geo->blks_per_lun * geo->plane_mode)
+ if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode;
blktype = blks[offset];
@@ -877,7 +833,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return geo->blks_per_lun;
+ return geo->nr_chks;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -892,53 +848,6 @@ int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
-static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- struct nvm_geo *geo = &dev->geo;
- int i;
-
- dev->lps_per_blk = geo->pgs_per_blk;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* Just a linear array */
- for (i = 0; i < dev->lps_per_blk; i++)
- dev->lptbl[i] = i;
-
- return 0;
-}
-
-static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- int i, p;
- struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
-
- if (!mlc->num_pairs)
- return 0;
-
- dev->lps_per_blk = mlc->num_pairs;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* The lower page table encoding consists of a list of bytes, where each
- * has a lower and an upper half. The first half byte maintains the
- * increment value and every value after is an offset added to the
- * previous incrementation value
- */
- dev->lptbl[0] = mlc->pairs[0] & 0xF;
- for (i = 1; i < dev->lps_per_blk; i++) {
- p = mlc->pairs[i >> 1];
- if (i & 0x1) /* upper */
- dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
- else /* lower */
- dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
- }
-
- return 0;
-}
-
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -946,66 +855,44 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* Whole device values */
geo->nr_chnls = grp->num_ch;
- geo->luns_per_chnl = grp->num_lun;
-
- /* Generic device values */
- geo->pgs_per_blk = grp->num_pg;
- geo->blks_per_lun = grp->num_blk;
- geo->nr_planes = grp->num_pln;
- geo->fpg_size = grp->fpg_sz;
- geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->nr_luns = grp->num_lun;
+
+ /* Generic device geometry values */
+ geo->ws_min = grp->ws_min;
+ geo->ws_opt = grp->ws_opt;
+ geo->ws_seq = grp->ws_seq;
+ geo->ws_per_chk = grp->ws_per_chk;
+ geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs;
geo->oob_size = grp->sos;
- geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap;
- memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
- if (grp->mpos & 0x020202)
- geo->plane_mode = NVM_PLANE_DOUBLE;
- if (grp->mpos & 0x040404)
- geo->plane_mode = NVM_PLANE_QUAD;
+ geo->sec_per_chk = grp->clba;
+ geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
+ geo->all_luns = geo->nr_luns * geo->nr_chnls;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* calculated values */
+ /* 1.2 spec device geometry values */
+ geo->plane_mode = 1 << geo->ws_seq;
+ geo->nr_planes = geo->ws_opt / geo->ws_min;
+ geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
- geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
- geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
- geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = geo->nr_luns * geo->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
+ dev->total_secs = geo->all_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- ret = -EINVAL;
- goto err_fmtype;
- }
-
INIT_LIST_HEAD(&dev->area_list);
INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
@@ -1031,7 +918,6 @@ static void nvm_free(struct nvm_dev *dev)
dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_unregister_map(dev);
- kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
}
@@ -1062,8 +948,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes,
- geo->pgs_per_blk, geo->blks_per_lun,
- geo->nr_luns, geo->nr_chnls);
+ geo->ws_per_chk, geo->nr_chks,
+ geo->all_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
@@ -1135,7 +1021,6 @@ EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
- struct nvm_ioctl_create_simple *s;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1146,23 +1031,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
- pr_err("nvm: config type not valid\n");
- return -EINVAL;
- }
- s = &create->conf.s;
-
- if (s->lun_begin == -1 && s->lun_end == -1) {
- s->lun_begin = 0;
- s->lun_end = dev->geo.nr_luns - 1;
- }
-
- if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
- return -EINVAL;
- }
-
return nvm_create_tgt(dev, create);
}
@@ -1262,6 +1130,12 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
return -EFAULT;
+ if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
+ create.conf.e.rsv != 0) {
+ pr_err("nvm: reserved config field in use\n");
+ return -EINVAL;
+ }
+
create.dev[DISK_NAME_LEN - 1] = '\0';
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 0d227ef7d1b9..000fcad38136 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -19,12 +19,16 @@
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
+ struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
+ unsigned long start_time = jiffies;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
+ generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
* rollback from here on.
@@ -67,6 +71,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
+ generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 76516ee84e9a..0487b9340c1d 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -32,8 +32,8 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
@@ -48,7 +48,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_dev_ppa_to_pos(geo, *ppa);
+ int pos = pblk_ppa_to_pos(geo, *ppa);
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
@@ -66,7 +66,7 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
+ line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
atomic_dec(&line->left_seblks);
if (rqd->error) {
@@ -144,7 +144,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_tgt_ppa_to_line(ppa);
+ line_id = pblk_ppa_to_line(ppa);
line = &pblk->lines[line_id];
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
@@ -650,7 +650,7 @@ next_rq:
} else {
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_dev_ppa_to_pos(geo, ppa);
+ int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
@@ -668,7 +668,7 @@ next_rq:
}
ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
@@ -742,7 +742,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ) {
+ } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -802,7 +802,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
if (rqd.error) {
if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
- else
+ else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -816,7 +816,7 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
@@ -854,8 +854,8 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not sync erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
- smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
+ smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */
if (cur) {
@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line);
- line->sec_in_line -= geo->sec_per_blk;
+ line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb)
nr_bb++;
}
@@ -1145,7 +1145,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
@@ -1233,7 +1233,7 @@ retry:
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, retry_line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, false);
if (pblk_line_erase(pblk, retry_line))
goto retry;
@@ -1252,7 +1252,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
- int is_next = 0;
spin_lock(&l_mg->free_lock);
line = pblk_line_get(pblk);
@@ -1280,7 +1279,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
@@ -1290,10 +1288,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
return NULL;
}
- pblk_rl_free_lines_dec(&pblk->rl, line);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
retry_setup:
if (!pblk_line_init_metadata(pblk, line, NULL)) {
line = pblk_line_retry(pblk, line);
@@ -1311,6 +1305,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+
return line;
}
@@ -1395,7 +1391,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
@@ -1444,6 +1439,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, new, true);
+
/* Allocate next line for preparation */
spin_lock(&l_mg->free_lock);
l_mg->data_next = pblk_line_get(pblk);
@@ -1457,13 +1454,9 @@ retry_setup:
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
out:
return new;
}
@@ -1561,8 +1554,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not async erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
}
return err;
@@ -1746,7 +1739,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int nr_luns = geo->nr_luns;
+ int nr_luns = geo->all_luns;
int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
@@ -1884,7 +1877,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_dev_ppa_to_line(ppa);
+ int line_id = pblk_ppa_to_line(ppa);
struct pblk_line *line = &pblk->lines[line_id];
kref_get(&line->ref);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 9c8e114c8a54..3d899383666e 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -169,7 +169,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
* the line untouched. TODO: Implement a recovery routine that scans and
* moves all sectors on the line.
*/
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
+ goto fail_free_emeta;
+ }
+
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
goto fail_free_emeta;
@@ -519,22 +526,12 @@ void pblk_gc_should_start(struct pblk *pblk)
}
}
-/*
- * If flush_wq == 1 then no lock should be held by the caller since
- * flush_workqueue can sleep
- */
-static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
-{
- pblk->gc.gc_active = 0;
- pr_debug("pblk: gc stop\n");
-}
-
void pblk_gc_should_stop(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
if (gc->gc_active && !gc->gc_forced)
- pblk_gc_stop(pblk, 0);
+ gc->gc_active = 0;
}
void pblk_gc_should_kick(struct pblk *pblk)
@@ -660,7 +657,7 @@ void pblk_gc_exit(struct pblk *pblk)
gc->gc_enabled = 0;
del_timer_sync(&gc->gc_timer);
- pblk_gc_stop(pblk, 1);
+ gc->gc_active = 0;
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 695826a06b5d..93d671ca518e 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
}
ppaf.ch_len = power_len;
- power_len = get_count_order(geo->luns_per_chnl);
- if (1 << power_len != geo->luns_per_chnl) {
+ power_len = get_count_order(geo->nr_luns);
+ if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL;
}
@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
- geo->nr_planes * geo->nr_luns;
+ geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk))
return -ENOMEM;
@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
+ pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
+ pblk_rec_cache);
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_e_rq_pool;
@@ -354,6 +355,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
+ pblk_rwb_free(pblk);
+
pblk_free_global_caches(pblk);
}
@@ -409,7 +412,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks;
int nr_blks, ret;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
+ nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
@@ -482,20 +485,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret;
/* TODO: Implement unbalanced LUN support */
- if (geo->luns_per_chnl < 0) {
+ if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL;
}
- pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
+ pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
+ GFP_KERNEL);
if (!pblk->luns)
return -ENOMEM;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */
int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls;
- int lunid = lun_raw + ch * geo->luns_per_chnl;
+ int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i];
rlun->bppa = luns[lunid];
@@ -577,22 +581,37 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
+ int sec_meta, blk_meta;
- pblk->over_pct = 20;
+ if (geo->op == NVM_TARGET_DEFAULT_OP)
+ pblk->op = PBLK_DEFAULT_OP;
+ else
+ pblk->op = geo->op;
provisioned = nr_free_blks;
- provisioned *= (100 - pblk->over_pct);
+ provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
+ pblk->op_blks = nr_free_blks - provisioned;
+
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
- pblk->capacity = provisioned * geo->sec_per_blk;
+ pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
+
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
+
+ pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
+
atomic_set(&pblk->rl.free_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
static int pblk_lines_alloc_metadata(struct pblk *pblk)
@@ -683,7 +702,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
- max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+ max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -693,26 +712,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL;
}
- div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+ div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL;
}
- l_mg->nr_lines = geo->blks_per_lun;
+ l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
- lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
- lm->blk_per_line = geo->nr_luns;
- lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
+ lm->blk_per_line = geo->all_luns;
+ lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
- lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4;
- lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
+ lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition
@@ -742,12 +761,12 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+ lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1;
- if (geo->nr_luns > 1)
+ if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
- lm->emeta_sec[0], geo->sec_per_blk);
+ lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -772,7 +791,7 @@ add_emeta_page:
goto fail_free_bb_template;
}
- bb_distance = (geo->nr_luns) * geo->sec_per_pl;
+ bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
@@ -844,7 +863,7 @@ add_emeta_page:
pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return 0;
@@ -858,7 +877,7 @@ fail_free_bb_template:
fail_free_meta:
pblk_line_meta_free(pblk);
fail:
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return ret;
@@ -866,15 +885,19 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
- mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
if (IS_ERR(pblk->writer_ts)) {
- pr_err("pblk: could not allocate writer kthread\n");
- return PTR_ERR(pblk->writer_ts);
+ int err = PTR_ERR(pblk->writer_ts);
+
+ if (err != -EINTR)
+ pr_err("pblk: could not allocate writer kthread (%d)\n",
+ err);
+ return err;
}
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
+ mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
+
return 0;
}
@@ -910,7 +933,6 @@ static void pblk_tear_down(struct pblk *pblk)
pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
- pblk_rwb_free(pblk);
pblk_rl_free(&pblk->rl);
pr_debug("pblk: consistent tear down\n");
@@ -1025,7 +1047,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_writer_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write thread\n");
+ if (ret != -EINTR)
+ pr_err("pblk: could not initialize write thread\n");
goto fail_free_lines;
}
@@ -1041,13 +1064,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false);
- tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
+ tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- geo->nr_luns, pblk->l_mg.nr_lines,
+ pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
+ tdisk->disk_name,
+ geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6f3ecde2140f..7445e6430c52 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -146,7 +146,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
return;
/* Erase blocks that are bad in this line but might not be in next */
- if (unlikely(ppa_empty(*erase_ppa)) &&
+ if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
int bit = -1;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b8f78e401482..ec8fc314646b 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -54,7 +54,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->sync_point = EMPTY_ENTRY;
+ rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
@@ -112,7 +112,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_DEBUG
- atomic_set(&rb->inflight_sync_point, 0);
+ atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
@@ -226,7 +226,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
@@ -349,35 +349,35 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
smp_store_release(&entry->w_ctx.flags, flags);
}
-static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
+static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
unsigned int pos)
{
struct pblk_rb_entry *entry;
- unsigned int subm, sync_point;
+ unsigned int sync, flush_point;
- subm = READ_ONCE(rb->subm);
+ sync = READ_ONCE(rb->sync);
+
+ if (pos == sync)
+ return 0;
#ifdef CONFIG_NVM_DEBUG
- atomic_inc(&rb->inflight_sync_point);
+ atomic_inc(&rb->inflight_flush_point);
#endif
- if (pos == subm)
- return 0;
+ flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
+ entry = &rb->entries[flush_point];
- sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[sync_point];
+ pblk_rb_sync_init(rb, NULL);
- /* Protect syncs */
- smp_store_release(&rb->sync_point, sync_point);
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, flush_point);
- if (!bio)
- return 0;
+ if (bio)
+ bio_list_add(&entry->w_ctx.bios, bio);
- spin_lock_irq(&rb->s_lock);
- bio_list_add(&entry->w_ctx.bios, bio);
- spin_unlock_irq(&rb->s_lock);
+ pblk_rb_sync_end(rb, NULL);
- return 1;
+ return bio ? 1 : 0;
}
static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
@@ -416,7 +416,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
struct pblk *pblk = container_of(rb, struct pblk, rwb);
unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_sync_point_set(rb, NULL, mem))
+ if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
pblk_write_should_kick(pblk);
@@ -440,7 +440,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->nr_flush);
#endif
- if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
+ if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
*io_ret = NVM_IO_OK;
}
@@ -606,21 +606,6 @@ try:
return NVM_IO_ERR;
}
- if (flags & PBLK_FLUSH_ENTRY) {
- unsigned int sync_point;
-
- sync_point = READ_ONCE(rb->sync_point);
- if (sync_point == pos) {
- /* Protect syncs */
- smp_store_release(&rb->sync_point, EMPTY_ENTRY);
- }
-
- flags &= ~PBLK_FLUSH_ENTRY;
-#ifdef CONFIG_NVM_DEBUG
- atomic_dec(&rb->inflight_sync_point);
-#endif
- }
-
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
@@ -730,15 +715,24 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
{
- unsigned int sync;
- unsigned int i;
-
+ unsigned int sync, flush_point;
lockdep_assert_held(&rb->s_lock);
sync = READ_ONCE(rb->sync);
+ flush_point = READ_ONCE(rb->flush_point);
- for (i = 0; i < nr_entries; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
+ if (flush_point != EMPTY_ENTRY) {
+ unsigned int secs_to_flush;
+
+ secs_to_flush = pblk_rb_ring_count(flush_point, sync,
+ rb->nr_entries);
+ if (secs_to_flush < nr_entries) {
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, EMPTY_ENTRY);
+ }
+ }
+
+ sync = (sync + nr_entries) & (rb->nr_entries - 1);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -746,22 +740,27 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
return sync;
}
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
+/* Calculate how many sectors to submit up to the current flush point. */
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
{
- unsigned int subm, sync_point;
- unsigned int count;
+ unsigned int subm, sync, flush_point;
+ unsigned int submitted, to_flush;
- /* Protect syncs */
- sync_point = smp_load_acquire(&rb->sync_point);
- if (sync_point == EMPTY_ENTRY)
+ /* Protect flush points */
+ flush_point = smp_load_acquire(&rb->flush_point);
+ if (flush_point == EMPTY_ENTRY)
return 0;
+ /* Protect syncs */
+ sync = smp_load_acquire(&rb->sync);
+
subm = READ_ONCE(rb->subm);
+ submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
/* The sync point itself counts as a sector to sync */
- count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
+ to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return count;
+ return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
/*
@@ -801,7 +800,7 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
(rb->sync == rb->l2p_update) &&
- (rb->sync_point == EMPTY_ENTRY)) {
+ (rb->flush_point == EMPTY_ENTRY)) {
goto out;
}
@@ -848,7 +847,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
queued_entries++;
spin_unlock_irq(&rb->s_lock);
- if (rb->sync_point != EMPTY_ENTRY)
+ if (rb->flush_point != EMPTY_ENTRY)
offset = scnprintf(buf, PAGE_SIZE,
"%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
rb->nr_entries,
@@ -857,14 +856,14 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
- rb->sync_point,
+ rb->flush_point,
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
else
offset = scnprintf(buf, PAGE_SIZE,
@@ -875,13 +874,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
return offset;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index ca79d8fb3e60..2f761283f43e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -141,7 +141,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
struct ppa_addr ppa = ppa_list[i];
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
kref_put(&line->ref, pblk_line_put_wq);
}
}
@@ -158,8 +158,12 @@ static void pblk_end_user_read(struct bio *bio)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
+ unsigned long start_time = r_ctx->start_time;
+
+ generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -193,9 +197,9 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
@@ -270,7 +274,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
struct pblk_line *line = &pblk->lines[line_id];
kref_put(&line->ref, pblk_line_put);
@@ -306,6 +310,8 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
return NVM_IO_OK;
err:
+ pr_err("pblk: failed to perform partial read\n");
+
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
@@ -357,6 +363,7 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
@@ -372,6 +379,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
+ generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -383,6 +392,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->start_time = jiffies;
r_ctx->lba = blba;
/* Save the index for this bio's start. This is needed in case
@@ -422,7 +432,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
- return NVM_IO_ERR;
+ goto fail_end_io;
}
rqd->bio = int_bio;
@@ -433,7 +443,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
- return ret;
+ goto fail_end_io;
}
return NVM_IO_OK;
@@ -442,17 +452,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
- if (ret) {
- pr_err("pblk: failed to perform partial read\n");
- return ret;
- }
-
- return NVM_IO_OK;
+ return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
+fail_end_io:
+ __pblk_end_io_read(pblk, rqd, false);
+ return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index eadb3eb5d4dc..1d5e961bf5e0 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -111,18 +111,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
return 0;
}
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
crc = pblk_calc_emeta_crc(pblk, emeta_buf);
if (le32_to_cpu(emeta_buf->crc) != crc)
- return NULL;
+ return 1;
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
- return NULL;
+ return 1;
- return emeta_to_lbas(pblk, emeta_buf);
+ return 0;
}
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -137,7 +137,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
u64 nr_valid_lbas, nr_lbas = 0;
u64 i;
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list)
return 1;
@@ -149,7 +149,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct ppa_addr ppa;
int pos;
- ppa = addr_to_pblk_ppa(pblk, i, line->id);
+ ppa = addr_to_gen_ppa(pblk, i, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
/* Do not update bad blocks */
@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->sec_per_blk;
+ nr_bb * geo->sec_per_chk;
}
struct pblk_recov_alloc {
@@ -263,12 +263,12 @@ next_read_rq:
int pos;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
r_ptr_int += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
@@ -288,7 +288,7 @@ next_read_rq:
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
*/
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -411,12 +411,12 @@ next_pad_rq:
int pos;
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
}
@@ -541,12 +541,12 @@ next_rq:
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
@@ -672,12 +672,12 @@ next_rq:
paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
paddr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
@@ -817,7 +817,7 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
while (emeta_secs) {
emeta_start--;
- ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
if (!test_bit(pos, line->blk_bitmap))
emeta_secs--;
@@ -938,6 +938,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
goto next;
}
+ if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
if (pblk_recov_l2p_from_emeta(pblk, line))
pblk_recov_l2p_from_oob(pblk, line);
@@ -984,10 +989,8 @@ next:
}
spin_unlock(&l_mg->free_lock);
- if (is_next) {
+ if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
- }
out:
if (found_lines != recovered_lines)
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc71922260..0d457b162f23 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
return atomic_read(&rl->free_blocks);
}
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+ return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+ unsigned long free_blocks)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
- unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
int max = rl->rb_budget;
if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
pblk_gc_should_stop(pblk);
}
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+ __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_add(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+ free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_sub(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+
+ if (used)
+ free_blocks = atomic_sub_return(blk_in_line,
+ &rl->free_user_blocks);
+ else
+ free_blocks = atomic_read(&rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
void pblk_rl_init(struct pblk_rl *rl, int budget)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+ int sec_meta, blk_meta;
+
unsigned int rb_windows;
- rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
- rl->high_pw = get_count_order(rl->high);
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
- rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
- if (rl->low < min_blocks)
- rl->low = min_blocks;
+ rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+ rl->high_pw = get_count_order(rl->high);
rl->rsv_blocks = min_blocks;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index cd49e8875d4e..620bab853579 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0;
int i;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
int active = 1;
rlun = &pblk->luns[i];
@@ -49,11 +49,12 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
{
- int free_blocks, total_blocks;
+ int free_blocks, free_user_blocks, total_blocks;
int rb_user_max, rb_user_cnt;
int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
- free_blocks = atomic_read(&pblk->rl.free_blocks);
+ free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
+ free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rb_user_max = pblk->rl.rb_user_max;
rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rb_gc_max = pblk->rl.rb_gc_max;
@@ -64,16 +65,16 @@ static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
total_blocks = pblk->rl.total_blocks;
return snprintf(page, PAGE_SIZE,
- "u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+ "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
rb_user_cnt,
rb_user_max,
rb_gc_cnt,
rb_gc_max,
rb_state,
rb_budget,
- pblk->rl.low,
pblk->rl.high,
free_blocks,
+ free_user_blocks,
total_blocks,
READ_ONCE(pblk->rl.rb_user_active));
}
@@ -238,7 +239,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
- geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
+ geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
@@ -287,7 +288,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
- geo->sec_per_blk);
+ geo->sec_per_chk);
return sz;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6c1cafafef53..aae86ed60b98 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -21,13 +21,28 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
+ struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
+ int pos = c_ctx->sentry + i;
+ int flags;
+
+ w_ctx = pblk_rb_w_ctx(rwb, pos);
+ flags = READ_ONCE(w_ctx->flags);
+
+ if (flags & PBLK_FLUSH_ENTRY) {
+ flags &= ~PBLK_FLUSH_ENTRY;
+ /* Release flags on context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_dec(&rwb->inflight_flush_point);
+#endif
+ }
- w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
@@ -439,7 +454,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *meta_line;
int err;
- ppa_set_empty(&erase_ppa);
+ pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
@@ -457,7 +472,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
}
- if (!ppa_empty(erase_ppa)) {
+ if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
@@ -508,7 +523,7 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_avail)
return 1;
- secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 59a64d461a5d..8c357fb6538e 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -51,17 +51,16 @@
#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-#define pblk_for_each_lun(pblk, rlun, i) \
- for ((i) = 0, rlun = &(pblk)->luns[0]; \
- (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-
/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
+#define PBLK_DEFAULT_OP (11)
+
enum {
PBLK_READ = READ,
PBLK_WRITE = WRITE,/* Write from write buffer */
PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_READ_RECOV, /* Recovery read - errors allowed */
PBLK_ERASE,
};
@@ -114,6 +113,7 @@ struct pblk_c_ctx {
/* read context */
struct pblk_g_ctx {
void *private;
+ unsigned long start_time;
u64 lba;
};
@@ -170,7 +170,7 @@ struct pblk_rb {
* the last submitted entry that has
* been successfully persisted to media
*/
- unsigned int sync_point; /* Sync point - last entry that must be
+ unsigned int flush_point; /* Sync point - last entry that must be
* flushed to the media. Used with
* REQ_FLUSH and REQ_FUA
*/
@@ -193,7 +193,7 @@ struct pblk_rb {
spinlock_t s_lock; /* Sync lock */
#ifdef CONFIG_NVM_DEBUG
- atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
+ atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -256,9 +256,6 @@ struct pblk_rl {
unsigned int high; /* Upper threshold for rate limiter (free run -
* user I/O rate limiter
*/
- unsigned int low; /* Lower threshold for rate limiter (user I/O
- * rate limiter - stall)
- */
unsigned int high_pw; /* High rounded up as a power of 2 */
#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
@@ -292,7 +289,9 @@ struct pblk_rl {
unsigned long long nr_secs;
unsigned long total_blocks;
- atomic_t free_blocks;
+
+ atomic_t free_blocks; /* Total number of free blocks (+ OP) */
+ atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
};
#define PBLK_LINE_EMPTY (~0U)
@@ -583,7 +582,9 @@ struct pblk {
*/
sector_t capacity; /* Device capacity when bad blocks are subtracted */
- int over_pct; /* Percentage of device used for over-provisioning */
+
+ int op; /* Percentage of device used for over-provisioning */
+ int op_blks; /* Number of blocks used for over-provisioning */
/* pblk provisioning values. Used by rate limiter */
struct pblk_rl rl;
@@ -691,7 +692,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
unsigned int pblk_rb_read_count(struct pblk_rb *rb);
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
@@ -812,7 +813,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp);
@@ -843,6 +844,7 @@ void pblk_rl_free(struct pblk_rl *rl);
void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
@@ -851,7 +853,8 @@ void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -907,28 +910,47 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
+ return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
}
-static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_line(struct ppa_addr p)
{
return p.g.blk;
}
-static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
{
- return p.g.blk;
+ return p.g.lun * geo->nr_chnls + p.g.ch;
}
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
+ u64 line_id)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ ppa.g.blk = line_id;
+ ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
+ ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
+ ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
+ ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
+ ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
+
+ return ppa;
}
-/* A block within a line corresponds to the lun */
-static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ u64 paddr;
+
+ paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
+ paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
+ paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
+ paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
+ paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+
+ return paddr;
}
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
@@ -960,24 +982,6 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
return ppa64;
}
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
- sector_t lba)
-{
- struct ppa_addr ppa;
-
- if (pblk->ppaf_bitsize < 32) {
- u32 *map = (u32 *)pblk->trans_map;
-
- ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
- } else {
- struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
- ppa = map[lba];
- }
-
- return ppa;
-}
-
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
u32 ppa32 = 0;
@@ -999,33 +1003,36 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
return ppa32;
}
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
- struct ppa_addr ppa)
+static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
+ sector_t lba)
{
+ struct ppa_addr ppa;
+
if (pblk->ppaf_bitsize < 32) {
u32 *map = (u32 *)pblk->trans_map;
- map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
} else {
- u64 *map = (u64 *)pblk->trans_map;
+ struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
- map[lba] = ppa.ppa;
+ ppa = map[lba];
}
+
+ return ppa;
}
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
- struct ppa_addr p)
+static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa)
{
- u64 paddr;
+ if (pblk->ppaf_bitsize < 32) {
+ u32 *map = (u32 *)pblk->trans_map;
- paddr = 0;
- paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
- paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
- paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
- paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
- paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+ map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ } else {
+ u64 *map = (u64 *)pblk->trans_map;
- return paddr;
+ map[lba] = ppa.ppa;
+ }
}
static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
@@ -1040,10 +1047,7 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
- if (lppa.ppa == rppa.ppa)
- return true;
-
- return false;
+ return (lppa.ppa == rppa.ppa);
}
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
@@ -1066,32 +1070,6 @@ static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
return p;
}
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.blk = line_id;
- ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
- ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
- ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
- ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
- ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
-
- return ppa;
-}
-
-static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-
- return ppa;
-}
-
static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
struct line_header *header)
{
@@ -1212,10 +1190,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls &&
- ppa->g.lun < geo->luns_per_chnl &&
+ ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes &&
- ppa->g.blk < geo->blks_per_lun &&
- ppa->g.pg < geo->pgs_per_blk &&
+ ppa->g.blk < geo->nr_chks &&
+ ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg)
continue;
@@ -1245,7 +1223,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
for (i = 0; i < rqd->nr_ppas; i++) {
ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1288,11 +1266,6 @@ static inline unsigned int pblk_get_secs(struct bio *bio)
return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}
-static inline sector_t pblk_get_sector(sector_t lba)
-{
- return lba * NR_PHY_IN_LOG;
-}
-
static inline void pblk_setup_uuid(struct pblk *pblk)
{
uuid_le uuid;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
deleted file mode 100644
index 0993c14be860..000000000000
--- a/drivers/lightnvm/rrpc.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#include "rrpc.h"
-
-static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
-
-#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
-
-static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
-
- lockdep_assert_held(&rrpc->rev_lock);
-
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
-
- spin_lock(&rblk->lock);
-
- div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
-
- spin_unlock(&rblk->lock);
-
- rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
-}
-
-static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
-{
- sector_t i;
-
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
-
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
-}
-
-static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
-{
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
-
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
-
- return rqd;
-}
-
-static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
-
- rrpc_unlock_laddr(rrpc, inf);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
-{
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
-
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
-
- schedule();
- }
-
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
-
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
-}
-
-static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- return (rblk->next_page == dev->geo.sec_per_blk);
-}
-
-/* Calculate relative addr for the given block, considering instantiated LUNs */
-static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return rlun->id * dev->geo.sec_per_blk;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
- struct rrpc_addr *gp)
-{
- struct rrpc_block *rblk = gp->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- u64 addr = gp->addr;
- struct ppa_addr paddr;
-
- paddr.ppa = addr;
- paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
- paddr.g.ch = rlun->bppa.g.ch;
- paddr.g.lun = rlun->bppa.g.lun;
- paddr.g.blk = rblk->id;
-
- return paddr;
-}
-
-/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
-{
- struct rrpc *rrpc = rlun->rrpc;
-
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
-}
-
-static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
- struct rrpc_lun *rlun)
-{
- struct rrpc_block *rblk = NULL;
-
- if (list_empty(&rlun->free_list))
- goto out;
-
- rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
-
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
-
-out:
- return rblk;
-}
-
-static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&rlun->lock);
- if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
- pr_err("nvm: rrpc: cannot give block to non GC request\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
-
- rblk = __rrpc_get_blk(rrpc, rlun);
- if (!rblk) {
- pr_err("nvm: rrpc: cannot get new block\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
- spin_unlock(&rlun->lock);
-
- bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
-
- return rblk;
-}
-
-static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- if (rblk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&rblk->list, &rlun->free_list);
- rlun->nr_free_blocks++;
- rblk->state = NVM_BLK_ST_FREE;
- } else if (rblk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id, rblk->state);
- list_move_tail(&rblk->list, &rlun->bb_list);
- }
- spin_unlock(&rlun->lock);
-}
-
-static void rrpc_put_blks(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
-}
-
-static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
-{
- int next = atomic_inc_return(&rrpc->next_lun);
-
- return &rrpc->luns[next % rrpc->nr_luns];
-}
-
-static void rrpc_gc_kick(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- unsigned int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
-}
-
-/*
- * timed GC every interval.
- */
-static void rrpc_gc_timer(struct timer_list *t)
-{
- struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
-
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void rrpc_end_sync_bio(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- if (bio->bi_status)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
-
- complete(waiting);
-}
-
-/*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
-static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct request_queue *q = dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = dev->geo.sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
-
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
-
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
-
- /* Lock laddr */
- phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
-
-try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
-
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
-
- spin_unlock(&rrpc->rev_lock);
-
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_status) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
-
- bio_reset(bio);
- reinit_completion(&wait);
-
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
-
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_status)
- goto finished;
-
- bio_reset(bio);
- }
-
-finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
-
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void rrpc_block_gc(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct ppa_addr ppa;
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
- ppa.g.blk = rblk->id;
-
- if (nvm_erase_sync(rrpc->dev, &ppa, 1))
- goto put_back;
-
- rrpc_put_blk(rrpc, rblk);
-
- return;
-
-put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-}
-
-/* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
-static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
-{
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
-
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
-}
-
-/* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
-static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
-{
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblk, *max;
-
- BUG_ON(list_empty(prio_list));
-
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblk, prio_list, prio)
- max = rblk_max_invalid(max, rblk);
-
- return max;
-}
-
-static void rrpc_lun_gc(struct work_struct *work)
-{
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
-
- nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
-
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
-
- spin_lock(&rlun->lock);
- while (nr_blocks_need > rlun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblk = block_prio_find_max(rlun);
-
- if (!rblk->nr_invalid_pages)
- break;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
- list_del_init(&rblk->prio);
-
- WARN_ON(!block_is_full(rrpc, rblk));
-
- pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
-
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
-
- /* TODO: Hint that request queue can be started again */
-}
-
-static void rrpc_gc_queue(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-}
-
-static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
-};
-
-static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
-{
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
-
- if (!is_gc)
- return get_next_lun(rrpc);
-
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->nr_free_blocks > max_free->nr_free_blocks)
- max_free = rlun;
- }
-
- return max_free;
-}
-
-static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
-{
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
-
- BUG_ON(laddr >= rrpc->nr_sects);
-
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
-
- gp->addr = paddr;
- gp->rblk = rblk;
-
- rev = &rrpc->rev_trans_map[gp->addr];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
-
- return gp;
-}
-
-static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- u64 addr = ADDR_EMPTY;
-
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
-
- addr = rblk->next_page;
-
- rblk->next_page++;
-out:
- spin_unlock(&rblk->lock);
- return addr;
-}
-
-/* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
-static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
-{
- struct nvm_tgt_dev *tgt_dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct rrpc_addr *p;
- struct ppa_addr ppa;
- u64 paddr;
- int gc_force = 0;
-
- ppa.ppa = ADDR_EMPTY;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
-
- if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
- return ppa;
-
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
-
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
-retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
-
- if (paddr != ADDR_EMPTY)
- goto done;
-
- if (!list_empty(&rlun->wblk_list)) {
-new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
-
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
-
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
-
- pr_err("rrpc: failed to allocate new block\n");
- return ppa;
-done:
- spin_unlock(&rlun->lock);
- p = rrpc_update_map(rrpc, laddr, rblk, paddr);
- if (!p)
- return ppa;
-
- /* return global address */
- return rrpc_ppa_to_gaddr(tgt_dev, p);
-}
-
-static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_block_gc *gcb;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
-
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-}
-
-static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
-{
- struct rrpc_lun *rlun = NULL;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
- rrpc->luns[i].bppa.g.lun == p.g.lun) {
- rlun = &rrpc->luns[i];
- break;
- }
- }
-
- return rlun;
-}
-
-static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
-
- rlun = rrpc_ppa_to_lun(rrpc, ppa);
- rblk = &rlun->blocks[ppa.g.blk];
- rblk->state = NVM_BLK_ST_BAD;
-
- nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
-}
-
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- void *comp_bits = &rqd->ppa_status;
- struct ppa_addr ppa, prev_ppa;
- int nr_ppas = rqd->nr_ppas;
- int bit;
-
- if (rqd->nr_ppas == 1)
- __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
-
- ppa_set_empty(&prev_ppa);
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- ppa = rqd->ppa_list[bit];
- if (ppa_cmp_blk(ppa, prev_ppa))
- continue;
-
- __rrpc_mark_bad_block(rrpc, ppa);
- }
-}
-
-static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- int cmnt_size, i;
-
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
-
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == dev->geo.sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
-}
-
-static void rrpc_end_io(struct nvm_rq *rqd)
-{
- struct rrpc *rrpc = rqd->private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
-
- if (bio_data_dir(rqd->bio) == WRITE) {
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- rrpc_mark_bad_block(rrpc, rqd);
-
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- }
-
- bio_put(rqd->bio);
-
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
-
- rrpc_unlock_rq(rrpc, rqd);
-
- if (npages > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
-
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
-{
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- BUG_ON(!(laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
-
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct ppa_addr p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_list[i] = p;
- }
-
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct ppa_addr p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_addr = p;
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
-
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
-
- return rrpc_read_rq(rrpc, bio, rqd, flags);
-}
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- int err;
-
- if (bio_size < dev->geo.sec_size)
- return NVM_IO_ERR;
- else if (bio_size > dev->geo.max_rq_size)
- return NVM_IO_ERR;
-
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
-
- bio_get(bio);
- rqd->bio = bio;
- rqd->private = rrpc;
- rqd->nr_ppas = nr_pages;
- rqd->end_io = rrpc_end_io;
- rrq->flags = flags;
-
- err = nvm_submit_io(dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
-
- return NVM_IO_OK;
-}
-
-static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
-{
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
-
- blk_queue_split(q, &bio);
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- memset(rqd, 0, sizeof(struct nvm_rq));
-
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
-
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
-}
-
-static void rrpc_requeue(struct work_struct *work)
-{
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
-
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
-}
-
-static void rrpc_gc_free(struct rrpc *rrpc)
-{
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
-
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
-}
-
-static int rrpc_gc_init(struct rrpc *rrpc)
-{
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
-
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
-
- timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
-
- return 0;
-}
-
-static void rrpc_map_free(struct rrpc *rrpc)
-{
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
-}
-
-static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- u64 i;
-
- for (i = 0; i < nlb; i++) {
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
-
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- pr_err("nvm: Maybe loaded an old target L2P\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- div_u64_rem(pba, rrpc->nr_sects, &mod);
-
- gaddr = rrpc_recov_addr(dev, pba);
- rlun = rrpc_ppa_to_lun(rrpc, gaddr);
- if (!rlun) {
- pr_err("rrpc: l2p corruption on lba %llu\n",
- slba + i);
- return -EINVAL;
- }
-
- rblk = &rlun->blocks[gaddr.g.blk];
- if (!rblk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
- }
-
- addr[i].addr = pba;
- addr[i].rblk = rblk;
- raddr[mod].addr = slba + i;
- }
-
- return 0;
-}
-
-static int rrpc_map_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
-
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
-
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
-
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
-
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
-
- /* Bring up the mapping table from device */
- ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
-#define ADDR_POOL_SIZE 64
-
-static int rrpc_core_init(struct rrpc *rrpc)
-{
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
-
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
-
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
-
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
-
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
-
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
-
- return 0;
-}
-
-static void rrpc_core_free(struct rrpc *rrpc)
-{
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
-}
-
-static void rrpc_luns_free(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- vfree(rlun->blocks);
- }
-
- kfree(rrpc->luns);
-}
-
-static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
-{
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_block *rblk;
- struct ppa_addr ppa;
- u8 *blks;
- int nr_blks;
- int i;
- int ret;
-
- if (!dev->parent->ops->get_bb_tbl)
- return 0;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret) {
- pr_err("rrpc: could not get BB table\n");
- goto out;
- }
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0) {
- ret = nr_blks;
- goto out;
- }
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_FREE)
- continue;
-
- rblk = &rlun->blocks[i];
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- rlun->nr_free_blocks--;
- }
-
-out:
- kfree(blks);
- return ret;
-}
-
-static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
-{
- rlun->bppa.ppa = 0;
- rlun->bppa.g.ch = ppa.g.ch;
- rlun->bppa.g.lun = ppa.g.lun;
-}
-
-static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
-
- if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
-
- spin_lock_init(&rrpc->rev_lock);
-
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
-
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rlun->id = i;
- rrpc_set_lun_ppa(rlun, luns[i]);
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- geo->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
-
- INIT_LIST_HEAD(&rlun->free_list);
- INIT_LIST_HEAD(&rlun->used_list);
- INIT_LIST_HEAD(&rlun->bb_list);
-
- for (j = 0; j < geo->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
-
- rblk->id = j;
- rblk->rlun = rlun;
- rblk->state = NVM_BLK_T_FREE;
- INIT_LIST_HEAD(&rblk->prio);
- INIT_LIST_HEAD(&rblk->list);
- spin_lock_init(&rblk->lock);
-
- list_add_tail(&rblk->list, &rlun->free_list);
- }
-
- rlun->rrpc = rrpc;
- rlun->nr_free_blocks = geo->blks_per_lun;
- rlun->reserved_blocks = 2; /* for GC only */
-
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
-
- if (rrpc_bb_discovery(dev, rlun))
- goto err;
-
- }
-
- return 0;
-err:
- return ret;
-}
-
-/* returns 0 on success and stores the beginning address in *begin */
-static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t size = rrpc->nr_sects * dev->geo.sec_size;
- int ret;
-
- size >>= 9;
-
- ret = nvm_get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->geo.sec_size) - 9);
-
- return ret;
-}
-
-static void rrpc_area_free(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
-
- nvm_put_area(dev, begin);
-}
-
-static void rrpc_free(struct rrpc *rrpc)
-{
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
-
- kfree(rrpc);
-}
-
-static void rrpc_exit(void *private)
-{
- struct rrpc *rrpc = private;
-
- del_timer(&rrpc->gc_timer);
-
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
-
- rrpc_free(rrpc);
-}
-
-static sector_t rrpc_capacity(void *private)
-{
- struct rrpc *rrpc = private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
-
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
-
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
-
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
-}
-
-/*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
-static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
-
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
- paddr = bpaddr + offset;
-
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
-
- laddr = &rrpc->trans_map[pladdr];
-
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
-}
-
-static int rrpc_blocks_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
-
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
-
- for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
-
- return 0;
-}
-
-static int rrpc_luns_configure(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
-
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
-
- return 0;
-err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
-}
-
-static struct nvm_tgt_type tt_rrpc;
-
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
- int flags)
-{
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
-
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
-
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
-
- rrpc->dev = dev;
- rrpc->disk = tdisk;
-
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
-
- rrpc->nr_luns = geo->nr_luns;
- rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
-
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
-
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
-
- ret = rrpc_luns_init(rrpc, dev->luns);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
-
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
-
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
-
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
-
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
-
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
-
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
-
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-
- return rrpc;
-err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
-}
-
-/* round robin, page-based FTL, and cost-based GC */
-static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
-
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
-
- .init = rrpc_init,
- .exit = rrpc_exit,
-};
-
-static int __init rrpc_module_init(void)
-{
- return nvm_register_tgt_type(&tt_rrpc);
-}
-
-static void rrpc_module_exit(void)
-{
- nvm_unregister_tgt_type(&tt_rrpc);
-}
-
-module_init(rrpc_module_init);
-module_exit(rrpc_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
deleted file mode 100644
index fdb6ff902903..000000000000
--- a/drivers/lightnvm/rrpc.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#ifndef RRPC_H_
-#define RRPC_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 10
-#define GC_TIME_SECS 100
-
-#define RRPC_SECTOR (512)
-#define RRPC_EXPOSED_PAGE_SIZE (4096)
-
-#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
-
-struct rrpc_inflight {
- struct list_head reqs;
- spinlock_t lock;
-};
-
-struct rrpc_inflight_rq {
- struct list_head list;
- sector_t l_start;
- sector_t l_end;
-};
-
-struct rrpc_rq {
- struct rrpc_inflight_rq inflight_rq;
- unsigned long flags;
-};
-
-struct rrpc_block {
- int id; /* id inside of LUN */
- struct rrpc_lun *rlun;
-
- struct list_head prio; /* LUN CG list */
- struct list_head list; /* LUN free, used, bb list */
-
-#define MAX_INVALID_PAGES_STORAGE 8
- /* Bitmap for invalid page intries */
- unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
- /* points to the next writable page within a block */
- unsigned int next_page;
- /* number of pages that are invalid, wrt host page size */
- unsigned int nr_invalid_pages;
-
- int state;
-
- spinlock_t lock;
- atomic_t data_cmnt_size; /* data pages committed to stable storage */
-};
-
-struct rrpc_lun {
- struct rrpc *rrpc;
-
- int id;
- struct ppa_addr bppa;
-
- struct rrpc_block *cur, *gc_cur;
- struct rrpc_block *blocks; /* Reference to block allocation */
-
- struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head wblk_list; /* Queued blocks to be written to */
-
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
- unsigned int nr_free_blocks; /* Number of unused blocks */
-
- struct work_struct ws_gc;
-
- int reserved_blocks;
-
- spinlock_t lock;
-};
-
-struct rrpc {
- struct nvm_tgt_dev *dev;
- struct gendisk *disk;
-
- sector_t soffset; /* logical sector offset */
-
- int nr_luns;
- struct rrpc_lun *luns;
-
- /* calculated values */
- unsigned long long nr_sects;
-
- /* Write strategy variables. Move these into each for structure for each
- * strategy
- */
- atomic_t next_lun; /* Whenever a page is written, this is updated
- * to point to the next write lun
- */
-
- spinlock_t bio_lock;
- struct bio_list requeue_bios;
- struct work_struct ws_requeue;
-
- /* Simple translation map of logical addresses to physical addresses.
- * The logical addresses is known by the host system, while the physical
- * addresses are used when writing to the disk block device.
- */
- struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
-
- struct rrpc_inflight inflights;
-
- mempool_t *addr_pool;
- mempool_t *page_pool;
- mempool_t *gcb_pool;
- mempool_t *rq_pool;
-
- struct timer_list gc_timer;
- struct workqueue_struct *krqd_wq;
- struct workqueue_struct *kgc_wq;
-};
-
-struct rrpc_block_gc {
- struct rrpc *rrpc;
- struct rrpc_block *rblk;
- struct work_struct ws_gc;
-};
-
-/* Logical to physical mapping */
-struct rrpc_addr {
- u64 addr;
- struct rrpc_block *rblk;
-};
-
-/* Physical to logical mapping */
-struct rrpc_rev_addr {
- u64 addr;
-};
-
-static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- return l;
-}
-
-static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
-{
- return linear_to_generic_addr(&dev->geo, pba);
-}
-
-static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
-}
-
-static inline sector_t rrpc_get_laddr(struct bio *bio)
-{
- return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int rrpc_get_pages(struct bio *bio)
-{
- return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
-}
-
-static inline sector_t rrpc_get_sector(sector_t laddr)
-{
- return laddr * NR_PHY_IN_LOG;
-}
-
-static inline int request_intersects(struct rrpc_inflight_rq *r,
- sector_t laddr_start, sector_t laddr_end)
-{
- return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
-}
-
-static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages, struct rrpc_inflight_rq *r)
-{
- sector_t laddr_end = laddr + pages - 1;
- struct rrpc_inflight_rq *rtmp;
-
- WARN_ON(irqs_disabled());
-
- spin_lock_irq(&rrpc->inflights.lock);
- list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
- if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
- /* existing, overlapping request, come back later */
- spin_unlock_irq(&rrpc->inflights.lock);
- return 1;
- }
- }
-
- r->l_start = laddr;
- r->l_end = laddr_end;
-
- list_add_tail(&r->list, &rrpc->inflights.reqs);
- spin_unlock_irq(&rrpc->inflights.lock);
- return 0;
-}
-
-static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages,
- struct rrpc_inflight_rq *r)
-{
- BUG_ON((laddr + pages) > rrpc->nr_sects);
-
- return __rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
-{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-
- return &rrqd->inflight_rq;
-}
-
-static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd)
-{
- sector_t laddr = rrpc_get_laddr(bio);
- unsigned int pages = rrpc_get_pages(bio);
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-
- return rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
- struct rrpc_inflight_rq *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rrpc->inflights.lock, flags);
- list_del_init(&r->list);
- spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
-}
-
-static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_ppas;
-
- BUG_ON((r->l_start + pages) > rrpc->nr_sects);
-
- rrpc_unlock_laddr(rrpc, r);
-}
-
-#endif /* RRPC_H_ */
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 899ec1f4c833..346e6f5f77be 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1245,10 +1245,10 @@ static ssize_t smu_read(struct file *file, char __user *buf,
return -EBADFD;
}
-static unsigned int smu_fpoll(struct file *file, poll_table *wait)
+static __poll_t smu_fpoll(struct file *file, poll_table *wait)
{
struct smu_private *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c4c2b3b85ebc..e8b29fc532e1 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2157,11 +2157,11 @@ pmu_write(struct file *file, const char __user *buf,
return 0;
}
-static unsigned int
+static __poll_t
pmu_fpoll(struct file *filp, poll_table *wait)
{
struct pmu_private *pp = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 93f3d4d61fa7..f84730d63b1f 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -235,7 +235,7 @@ kfree_err:
return ret;
}
-static unsigned int
+static __poll_t
mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
{
struct mbox_test_device *tdev = filp->private_data;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 83b9362be09c..2c8ac3688815 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,13 @@ config DM_BIO_PRISON
source "drivers/md/persistent-data/Kconfig"
+config DM_UNSTRIPED
+ tristate "Unstriped target"
+ depends on BLK_DEV_DM
+ ---help---
+ Unstripes I/O so it is issued solely on a single drive in a HW
+ RAID0 or dm-striped target.
+
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f701bb211783..63255f3ebd97 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a0cc1bc6d884..6cc6c0f9c3a9 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -525,15 +525,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 843877e017e1..5e2d4e80198e 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -320,14 +320,15 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct bch_ratelimit writeback_rate;
- struct delayed_work writeback_rate_update;
-
/*
- * Internal to the writeback code, so read_dirty() can keep track of
- * where it's at.
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
*/
- sector_t last_read;
+ atomic_t backing_idle;
+
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
@@ -336,6 +337,14 @@ struct cached_dev {
struct keybuf writeback_keys;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
/* For tracking sequential IO */
#define RECENT_IO_BITS 7
#define RECENT_IO (1 << RECENT_IO_BITS)
@@ -488,6 +497,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
+ unsigned devices_max_used;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
struct closure caching;
@@ -852,7 +862,7 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */
-void bch_count_io_errors(struct cache *, blk_status_t, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81e8dc3dbe5e..bf3a48aa9a9a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -432,6 +432,7 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
+ /* No problem for multipage bvec since the bio is just allocated */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -1678,7 +1679,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
/* don't reclaim buckets to which writeback keys point */
rcu_read_lock();
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
@@ -1803,10 +1804,7 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
- if (IS_ERR(c->gc_thread))
- return PTR_ERR(c->gc_thread);
-
- return 0;
+ return PTR_ERR_OR_ZERO(c->gc_thread);
}
/* Initial partial gc */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 1841d0359bac..7f12920c14f7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
#include "closure.h"
@@ -18,10 +19,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
- /* Must deliver precisely one wakeup */
- if (r == 1 && (flags & CLOSURE_SLEEPING))
- wake_up_process(cl->task);
-
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
@@ -100,28 +97,34 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
}
EXPORT_SYMBOL(closure_wait);
-/**
- * closure_sync - sleep until a closure has nothing left to wait on
- *
- * Sleeps until the refcount hits 1 - the thread that's running the closure owns
- * the last refcount.
- */
-void closure_sync(struct closure *cl)
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
{
- while (1) {
- __closure_start_sleep(cl);
- closure_set_ret_ip(cl);
+ cl->s->done = 1;
+ wake_up_process(cl->s->task);
+}
- if ((atomic_read(&cl->remaining) &
- CLOSURE_REMAINING_MASK) == 1)
- break;
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
schedule();
}
- __closure_end_sleep(cl);
+ __set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(closure_sync);
+EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -168,12 +171,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s\n",
+ seq_printf(f, "%s%s\n",
test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
- r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "");
+ r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ccfbea6f9f6b..3b9dfc9962ad 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -103,6 +103,7 @@
*/
struct closure;
+struct closure_syncer;
typedef void (closure_fn) (struct closure *);
struct closure_waitlist {
@@ -115,10 +116,6 @@ enum closure_state {
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
- * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
- * - indicates that cl->task is valid and closure_put() may wake it up.
- * Only set or cleared by the thread that owns the closure.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -128,22 +125,16 @@ enum closure_state {
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
- *
- * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
- * closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 23),
- CLOSURE_DESTRUCTOR = (1 << 23),
- CLOSURE_WAITING = (1 << 25),
- CLOSURE_SLEEPING = (1 << 27),
- CLOSURE_RUNNING = (1 << 29),
- CLOSURE_STACK = (1 << 31),
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
- CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -152,7 +143,7 @@ struct closure {
union {
struct {
struct workqueue_struct *wq;
- struct task_struct *task;
+ struct closure_syncer *s;
struct llist_node list;
closure_fn *fn;
};
@@ -178,7 +169,19 @@ void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
-void closure_sync(struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -215,24 +218,6 @@ static inline void closure_set_waiting(struct closure *cl, unsigned long f)
#endif
}
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
-
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
-}
-
-static inline void __closure_start_sleep(struct closure *cl)
-{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
-}
-
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
@@ -241,7 +226,6 @@ static inline void closure_set_stopped(struct closure *cl)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
- BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
@@ -300,7 +284,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/**
@@ -322,6 +306,8 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c7a02c4900da..af89408befe8 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
return;
check->bi_opf = REQ_OP_READ;
- if (bio_alloc_pages(check, GFP_NOIO))
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
@@ -251,8 +251,7 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- int ret = 0;
-
debug = debugfs_create_dir("bcache", NULL);
- return ret;
+
+ return IS_ERR_OR_NULL(debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index fac97ec2d0e2..a783c5a41ff1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -51,7 +51,10 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
-void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
{
/*
* The halflife of an error is:
@@ -94,8 +97,9 @@ void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s, recovering",
- bdevname(ca->bdev, buf), m);
+ pr_err("%s: IO error on %s%s",
+ bdevname(ca->bdev, buf), m,
+ is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
@@ -108,6 +112,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
@@ -129,7 +134,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
atomic_inc(&c->congested);
}
- bch_count_io_errors(ca, error, m);
+ bch_count_io_errors(ca, error, is_read, m);
}
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index d50c1c97da68..a24c3a95b2c0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
- if (bio_alloc_pages(bio, GFP_KERNEL))
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 643c3021624f..1a46b41dac70 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
int ret;
bch_btree_op_init(&s->op, -1);
@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
return;
}
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
closure_return(cl);
}
@@ -611,8 +633,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = s->orig_bio->bi_disk->queue;
- generic_end_io_acct(q, bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue,
+ bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -841,7 +863,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
cache_bio->bi_private = &s->cl;
bch_bio_map(cache_bio, NULL);
- if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
if (reada)
@@ -974,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b4d28928dec5..133b81225ea9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
- struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
@@ -274,7 +274,9 @@ static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, bio->bi_status, "writing superblock");
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -721,6 +723,9 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
d->c = c;
c->devices[id] = d;
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
closure_get(&c->caching);
}
@@ -906,6 +911,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -1166,7 +1177,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev->bd_holder = dc;
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9))
@@ -1261,7 +1272,7 @@ static int flash_devs_run(struct cache_set *c)
struct uuid_entry *u;
for (u = c->uuids;
- u < c->uuids + c->nr_uuids && !ret;
+ u < c->uuids + c->devices_max_used && !ret;
u++)
if (UUID_FLASH_ONLY(u))
ret = flash_dev_run(c, u);
@@ -1427,7 +1438,7 @@ static void __cache_set_unregister(struct closure *cl)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++)
+ for (i = 0; i < c->devices_max_used; i++)
if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
@@ -1490,7 +1501,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
-
+ c->devices_max_used = 0;
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1810,7 +1821,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+ put_page(bio_first_page_all(&ca->sb_bio));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1864,7 +1875,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index e548b8b51322..a23cd6a14b74 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -249,6 +249,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
: 0;
}
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
void bch_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
@@ -276,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ed5e8a412eb8..4df4c5c1cab2 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
}
void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 56a37884ca8b..51306a19ab03 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -18,17 +18,39 @@
#include <trace/events/bcache.h>
/* Rate limiting */
-
-static void __update_writeback_rate(struct cached_dev *dc)
+static uint64_t __calc_target_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
- int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
- c->cached_dev_sectors);
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
/*
* PI controller:
* Figures out the amount that should be written per second.
@@ -49,6 +71,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
* This acts as a slow, long-term average that is not subject to
* variations in usage like the p term.
*/
+ int64_t target = __calc_target_rate(dc);
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t error = dirty - target;
int64_t proportional_scaled =
@@ -116,6 +139,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
+ uint16_t sequence;
struct bio bio;
};
@@ -194,6 +218,27 @@ static void write_dirty(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
/*
* IO errors are signalled using the dirty bit on the key.
@@ -211,6 +256,9 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -219,8 +267,10 @@ static void read_dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
+ /* is_read = 1 */
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- bio->bi_status, "reading dirty data from cache");
+ bio->bi_status, 1,
+ "reading dirty data from cache");
dirty_endio(bio);
}
@@ -237,10 +287,15 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
- struct keybuf_key *w;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
struct dirty_io *io;
struct closure cl;
+ uint16_t sequence = 0;
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
closure_init_stack(&cl);
/*
@@ -248,45 +303,109 @@ static void read_dirty(struct cached_dev *dc)
* mempools.
*/
- while (!kthread_should_stop()) {
-
- w = bch_keybuf_next(&dc->writeback_keys);
- if (!w)
- break;
-
- BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
-
- if (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)
- while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
-
- dc->last_read = KEY_OFFSET(&w->key);
-
- io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- goto err;
-
- w->private = io;
- io->dc = dc;
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bio_alloc_pages(&io->bio, GFP_KERNEL))
- goto err_free;
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() && next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio,
+ PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /* We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
- trace_bcache_writeback(&w->key);
+ delay = writeback_delay(dc, size);
- down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
- delay = writeback_delay(dc, KEY_SIZE(&w->key));
+ while (!kthread_should_stop() && delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
}
if (0) {
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index a9e3ffb4b03c..66f1c527fa24 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,6 +5,16 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -21,7 +31,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c546b567f3b5..414c9af54ded 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -662,7 +662,7 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
- if (rw != WRITE) {
+ if (rw != REQ_OP_WRITE) {
n_sectors = 1 << b->c->sectors_per_block_bits;
offset = 0;
} else {
@@ -740,7 +740,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -753,7 +753,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1123,7 +1123,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1193,7 +1193,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -1454,7 +1454,7 @@ retry:
old_block = b->block;
__unlink_buffer(b);
__link_buffer(b, new_block, b->list_mode);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
__unlink_buffer(b);
@@ -1716,7 +1716,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE_NAME(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
@@ -1727,7 +1727,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
}
@@ -1738,27 +1738,28 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!b) {
r = -ENOMEM;
- goto bad_buffer;
+ goto bad;
}
__free_buffer_wake(b);
}
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ c->shrinker.seeks = 1;
+ c->shrinker.batch = 0;
+ r = register_shrinker(&c->shrinker);
+ if (r)
+ goto bad;
+
mutex_lock(&dm_bufio_clients_lock);
dm_bufio_client_count++;
list_add(&c->client_list, &dm_bufio_all_clients);
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.count_objects = dm_bufio_shrink_count;
- c->shrinker.scan_objects = dm_bufio_shrink_scan;
- c->shrinker.seeks = 1;
- c->shrinker.batch = 0;
- register_shrinker(&c->shrinker);
-
return c;
-bad_buffer:
-bad_cache:
+bad:
while (!list_empty(&c->reserved_buffers)) {
struct dm_buffer *b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1767,6 +1768,7 @@ bad_cache:
}
dm_io_client_destroy(c->dm_io);
bad_dm_io:
+ mutex_destroy(&c->lock);
kfree(c);
bad_client:
return ERR_PTR(r);
@@ -1811,6 +1813,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
BUG_ON(c->n_buffers[i]);
dm_io_client_destroy(c->dm_io);
+ mutex_destroy(&c->lock);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6a14f945783c..3222e21cbbf8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -91,8 +91,7 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- mempool_t *io_pool;
-
+ struct bio_set *io_bs;
struct bio_set *bs;
/*
@@ -130,8 +129,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
-void dm_init_md_queue(struct mapped_device *md);
-void dm_init_normal_md_queue(struct mapped_device *md);
int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..8168f737590e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1446,7 +1446,6 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
- bv->bv_page = NULL;
}
}
@@ -1954,10 +1953,15 @@ static int crypt_setkey(struct crypt_config *cc)
/* Ignore extra keys (which are used for IV etc) */
subkey_size = crypt_subkey_size(cc);
- if (crypt_integrity_hmac(cc))
+ if (crypt_integrity_hmac(cc)) {
+ if (subkey_size < cc->key_mac_size)
+ return -EINVAL;
+
crypt_copy_authenckey(cc->authenc_key, cc->key,
subkey_size - cc->key_mac_size,
cc->key_mac_size);
+ }
+
for (i = 0; i < cc->tfms_count; i++) {
if (crypt_integrity_hmac(cc))
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2057,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
ret = crypt_setkey(cc);
- /* wipe the kernel key payload copy in each case */
- memset(cc->key, 0, cc->key_size * sizeof(u8));
-
if (!ret) {
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
kzfree(cc->key_string);
@@ -2192,6 +2193,8 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
+ mutex_destroy(&cc->bio_alloc_lock);
+
/* Must zero key material before freeing */
kzfree(cc);
}
@@ -2523,6 +2526,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
}
}
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
return ret;
}
@@ -2697,8 +2704,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
ti->error = "Cannot allocate integrity tags mempool";
+ ret = -ENOMEM;
goto bad;
}
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return ret;
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
ret = cc->iv_gen_ops->init(cc);
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 18, 0},
+ .version = {1, 18, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 288386bfbfb5..1783d80c9cad 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -229,6 +229,8 @@ static void delay_dtr(struct dm_target *ti)
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
+ mutex_destroy(&dc->timer_lock);
+
kfree(dc);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b82cb1ab1eaa..1b907b15f5c3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -70,6 +70,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
arg_name = dm_shift_arg(as);
argc--;
+ if (!arg_name) {
+ ti->error = "Insufficient feature arguments";
+ return -EINVAL;
+ }
+
/*
* drop_writes
*/
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
int r = 0;
unsigned i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
- unsigned char *crypt_data = NULL;
+ unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+ struct skcipher_request *req = NULL;
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
if (blocksize == 1) {
struct scatterlist *sg;
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
- skcipher_request_set_tfm(req, ic->journal_crypt);
+
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
ic->journal_xor = dm_integrity_alloc_page_list(ic);
if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
+ skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
unsigned crypt_len = roundup(ivsize, blocksize);
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
+
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
if (!crypt_data) {
*error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- skcipher_request_set_tfm(req, ic->journal_crypt);
-
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
if (!ic->journal_scatterlist) {
*error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
struct skcipher_request *section_req;
__u32 section_le = cpu_to_le32(i);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
- skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
+ skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
}
bad:
kfree(crypt_data);
+ kfree(crypt_iv);
+ skcipher_request_free(req);
+
return r;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b4357ed4d541..a8d914d5abbe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -58,8 +58,7 @@ struct dm_io_client *dm_io_client_create(void)
if (!client->pool)
goto bad;
- client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e52676fa9832..3f6791afd3e4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1929,10 +1929,10 @@ static int dm_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned dm_poll(struct file *filp, poll_table *wait)
+static __poll_t dm_poll(struct file *filp, poll_table *wait)
{
struct dm_file *priv = filp->private_data;
- unsigned mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dm_global_eventq, wait);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index eb45cc3df31d..e6e7c686646d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -477,8 +477,10 @@ static int run_complete_job(struct kcopyd_job *job)
* If this is the master job, the sub jobs have already
* completed so we can free everything.
*/
- if (job->master_job == job)
+ if (job->master_job == job) {
+ mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
+ }
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -750,6 +752,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ mutex_init(&job->lock);
/*
* set up for the read.
@@ -811,7 +814,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
else {
- mutex_init(&job->lock);
job->progress = 0;
split_job(job);
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 189badbeddaf..3362d866793b 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -594,7 +594,7 @@ static int log_mark(struct log_writes_c *lc, char *data)
return -ENOMEM;
}
- block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
if (!block->data) {
DMERR("Error copying mark data");
kfree(block);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f7810cc869ac..7d3e572072f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,36 +64,30 @@ struct priority_group {
/* Multipath context */
struct multipath {
- struct list_head list;
- struct dm_target *ti;
-
- const char *hw_handler_name;
- char *hw_handler_params;
+ unsigned long flags; /* Multipath state flags */
spinlock_t lock;
-
- unsigned nr_priority_groups;
- struct list_head priority_groups;
-
- wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ enum dm_queue_mode queue_mode;
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned long flags; /* Multipath state flags */
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ unsigned nr_priority_groups;
+ struct list_head priority_groups;
+ const char *hw_handler_name;
+ char *hw_handler_params;
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
-
- atomic_t nr_valid_paths; /* Total number of usable paths */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
- enum dm_queue_mode queue_mode;
-
struct mutex work_mutex;
struct work_struct trigger_event;
+ struct dm_target *ti;
struct work_struct process_queued_bios;
struct bio_list queued_bios;
@@ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath) {
- pgpath->is_active = true;
- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
- }
+ if (!pgpath)
+ return NULL;
+
+ pgpath->is_active = true;
return pgpath;
}
@@ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- set_bit(MPATHF_QUEUE_IO, &m->flags);
atomic_set(&m->nr_valid_paths, 0);
- atomic_set(&m->pg_init_in_progress, 0);
- atomic_set(&m->pg_init_count, 0);
- m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
- init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->queue_mode = DM_TYPE_NONE;
@@ -221,13 +210,26 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+ init_waitqueue_head(&m->pg_init_wait);
}
dm_table_set_type(ti->table, m->queue_mode);
@@ -246,6 +248,7 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
+ mutex_destroy(&m->work_mutex);
kfree(m);
}
@@ -264,29 +267,23 @@ static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
return dm_per_bio_data(bio, multipath_per_bio_data_size());
}
-static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
{
/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
- struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
void *bio_details = mpio + 1;
-
return bio_details;
}
-static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
- struct dm_bio_details **bio_details_p)
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
{
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
- struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
- memset(mpio, 0, sizeof(*mpio));
- memset(bio_details, 0, sizeof(*bio_details));
- dm_bio_record(bio_details, bio);
+ mpio->nr_bytes = bio->bi_iter.bi_size;
+ mpio->pgpath = NULL;
+ *mpio_p = mpio;
- if (mpio_p)
- *mpio_p = mpio;
- if (bio_details_p)
- *bio_details_p = bio_details;
+ dm_bio_record(bio_details, bio);
}
/*-----------------------------------------------
@@ -340,6 +337,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
m->current_pg = pg;
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ return;
+
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,7 +385,8 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
- clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
@@ -516,12 +517,10 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
- if (pg_init_all_paths(m))
- return DM_MAPIO_DELAY_REQUEUE;
- return DM_MAPIO_REQUEUE;
+ pg_init_all_paths(m);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -530,12 +529,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- bool queue_dying = blk_queue_dying(q);
- if (queue_dying) {
+ if (blk_queue_dying(q)) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- return DM_MAPIO_DELAY_REQUEUE;
+
+ /*
+ * blk-mq's SCHED_RESTART can cover this requeue, so we
+ * needn't deal with it by DELAY_REQUEUE. More importantly,
+ * we have to return DM_MAPIO_REQUEUE so that blk-mq can
+ * get the queue busy feedback (via BLK_STS_RESOURCE),
+ * otherwise I/O merging can suffer.
+ */
+ if (q->mq_ops)
+ return DM_MAPIO_REQUEUE;
+ else
+ return DM_MAPIO_DELAY_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -557,9 +567,9 @@ static void multipath_release_clone(struct request *clone)
/*
* Map cloned bios (bio-based multipath)
*/
-static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+
+static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
- size_t nr_bytes = bio->bi_iter.bi_size;
struct pgpath *pgpath;
unsigned long flags;
bool queue_io;
@@ -568,7 +578,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
- pgpath = choose_pgpath(m, nr_bytes);
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
@@ -576,14 +586,62 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, bio);
spin_unlock_irqrestore(&m->lock, flags);
+
/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
else if (!queue_io)
queue_work(kmultipathd, &m->process_queued_bios);
- return DM_MAPIO_SUBMITTED;
+
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return pgpath;
+}
+
+static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+{
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ /* Do we need to select a new pgpath? */
+ /*
+ * FIXME: currently only switching path if no path (due to failure, etc)
+ * - which negates the point of using a path selector
+ */
+ pgpath = READ_ONCE(m->current_pgpath);
+ if (!pgpath)
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+
+ if (!pgpath) {
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return ERR_PTR(-EAGAIN);
+ }
+ return NULL;
}
+ return pgpath;
+}
+
+static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+ struct dm_mpath_io *mpio)
+{
+ struct pgpath *pgpath;
+
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ pgpath = __map_bio_nvme(m, bio);
+ else
+ pgpath = __map_bio(m, bio);
+
+ if (IS_ERR(pgpath))
+ return DM_MAPIO_SUBMITTED;
+
if (!pgpath) {
if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
@@ -592,7 +650,6 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
mpio->pgpath = pgpath;
- mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
bio_set_dev(bio, pgpath->path.dev->bdev);
@@ -601,7 +658,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path,
- nr_bytes);
+ mpio->nr_bytes);
return DM_MAPIO_REMAPPED;
}
@@ -610,8 +667,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = NULL;
- multipath_init_per_bio_data(bio, &mpio, NULL);
-
+ multipath_init_per_bio_data(bio, &mpio);
return __multipath_map_bio(m, bio, mpio);
}
@@ -619,7 +675,8 @@ static void process_queued_io_list(struct multipath *m)
{
if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
- else if (m->queue_mode == DM_TYPE_BIO_BASED)
+ else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -649,7 +706,9 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
- r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
+ r = __multipath_map_bio(m, bio, mpio);
switch (r) {
case DM_MAPIO_KILL:
bio->bi_status = BLK_STS_IOERR;
@@ -752,34 +811,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
return 0;
}
-static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
- struct dm_target *ti)
+static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error)
{
- int r;
- struct pgpath *p;
- struct multipath *m = ti->private;
- struct request_queue *q = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
const char *attached_handler_name;
-
- /* we need at least a path arg */
- if (as->argc < 1) {
- ti->error = "no device given";
- return ERR_PTR(-EINVAL);
- }
-
- p = alloc_pgpath();
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
- &p->path.dev);
- if (r) {
- ti->error = "error getting device";
- goto bad;
- }
-
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
- q = bdev_get_queue(p->path.dev->bdev);
+ int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
@@ -811,26 +847,59 @@ retain:
char b[BDEVNAME_SIZE];
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
- bdevname(p->path.dev->bdev, b));
+ bdevname(bdev, b));
goto retain;
}
if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "error attaching hardware handler";
+ return r;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
- ti->error = "unable to set hardware "
- "handler parameters";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "unable to set hardware handler parameters";
+ return r;
}
}
}
+ return 0;
+}
+
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
+ struct dm_target *ti)
+{
+ int r;
+ struct pgpath *p;
+ struct multipath *m = ti->private;
+
+ /* we need at least a path arg */
+ if (as->argc < 1) {
+ ti->error = "no device given";
+ return ERR_PTR(-EINVAL);
+ }
+
+ p = alloc_pgpath();
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
+ if (r) {
+ ti->error = "error getting device";
+ goto bad;
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
+ if (r) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
@@ -838,7 +907,6 @@ retain:
}
return p;
-
bad:
free_pgpath(p);
return ERR_PTR(r);
@@ -933,7 +1001,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
- if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
dm_consume_args(as, hw_argc);
DMERR("bio-based multipath doesn't allow hardware handler args");
return 0;
@@ -1022,6 +1091,8 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "nvme"))
+ m->queue_mode = DM_TYPE_NVME_BIO_BASED;
else if (!strcasecmp(queue_mode_name, "rq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1122,7 +1193,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1151,16 +1222,19 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ if (m->hw_handler_name) {
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+ }
- flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
-
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -1475,21 +1549,6 @@ static void activate_path_work(struct work_struct *work)
activate_or_offline_path(pgpath);
}
-static int noretry_error(blk_status_t error)
-{
- switch (error) {
- case BLK_STS_NOTSUPP:
- case BLK_STS_NOSPC:
- case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
- case BLK_STS_MEDIUM:
- return 1;
- }
-
- /* Anything else could be a path failure, so should be retried */
- return 0;
-}
-
static int multipath_end_io(struct dm_target *ti, struct request *clone,
blk_status_t error, union map_info *map_context)
{
@@ -1508,10 +1567,13 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
- if (error && !noretry_error(error)) {
+ if (error && blk_path_error(error)) {
struct multipath *m = ti->private;
- r = DM_ENDIO_REQUEUE;
+ if (error == BLK_STS_RESOURCE)
+ r = DM_ENDIO_DELAY_REQUEUE;
+ else
+ r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
@@ -1536,7 +1598,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
}
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
- blk_status_t *error)
+ blk_status_t *error)
{
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
@@ -1544,7 +1606,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
unsigned long flags;
int r = DM_ENDIO_DONE;
- if (!*error || noretry_error(*error))
+ if (!*error || !blk_path_error(*error))
goto done;
if (pgpath)
@@ -1561,9 +1623,6 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
goto done;
}
- /* Queue for the daemon to resubmit */
- dm_bio_restore(get_bio_details_from_bio(clone), clone);
-
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone);
spin_unlock_irqrestore(&m->lock, flags);
@@ -1671,6 +1730,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
+ case DM_TYPE_NVME_BIO_BASED:
+ DMEMIT("queue_mode nvme ");
+ break;
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 23f178641794..969c4f1a3633 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -195,9 +195,6 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list) {
if (!best ||
(atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
@@ -210,6 +207,9 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6319d846e0ad..7ef469e902c6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -29,6 +29,9 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
+/* Global list of all raid sets */
+static LIST_HEAD(raid_sets);
+
static bool devices_handle_discard_safely = false;
/*
@@ -105,8 +108,6 @@ struct raid_dev {
#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
-#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
-
/*
* Definitions of various constructor flags to
* be used in checks of valid / invalid flags
@@ -209,6 +210,8 @@ struct raid_dev {
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
#define RT_FLAG_RS_SUSPENDED 5
+#define RT_FLAG_RS_IN_SYNC 6
+#define RT_FLAG_RS_RESYNCING 7
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -224,8 +227,8 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
+ struct list_head list;
- uint32_t bitmap_loaded;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
unsigned long runtime_flags;
@@ -270,6 +273,19 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
+/* Find any raid_set in active slot for @rs on global list */
+static struct raid_set *rs_find_active(struct raid_set *rs)
+{
+ struct raid_set *r;
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+
+ list_for_each_entry(r, &raid_sets, list)
+ if (r != rs && dm_table_get_md(r->ti->table) == md)
+ return r;
+
+ return NULL;
+}
+
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@@ -572,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static int raid10_name_to_format(const char *name)
+static const int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
@@ -675,15 +691,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
return NULL;
}
-/*
- * Conditionally change bdev capacity of @rs
- * in case of a disk add/remove reshape
- */
-static void rs_set_capacity(struct raid_set *rs)
+/* Adjust rdev sectors */
+static void rs_set_rdev_sectors(struct raid_set *rs)
{
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
- struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
/*
* raid10 sets rdev->sector to the device size, which
@@ -692,8 +704,16 @@ static void rs_set_capacity(struct raid_set *rs)
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags))
rdev->sectors = mddev->dev_sectors;
+}
- set_capacity(gendisk, mddev->array_sectors);
+/*
+ * Change bdev capacity of @rs in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ set_capacity(gendisk, rs->md.array_sectors);
revalidate_disk(gendisk);
}
@@ -744,6 +764,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
+ INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -761,6 +782,9 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
+ /* Add @rs to global list. */
+ list_add(&rs->list, &raid_sets);
+
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@@ -773,6 +797,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
+/* Free all @rs allocations and remove it from global list. */
static void raid_set_free(struct raid_set *rs)
{
int i;
@@ -790,6 +815,8 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
+ list_del(&rs->list);
+
kfree(rs);
}
@@ -1002,7 +1029,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
!rs->dev[i].rdev.sb_page)
rebuild_cnt++;
- switch (rs->raid_type->level) {
+ switch (rs->md.level) {
case 0:
break;
case 1:
@@ -1017,6 +1044,11 @@ static int validate_raid_redundancy(struct raid_set *rs)
break;
case 10:
copies = raid10_md_layout_to_copies(rs->md.new_layout);
+ if (copies < 2) {
+ DMERR("Bogus raid10 data copies < 2!");
+ return -EINVAL;
+ }
+
if (rebuild_cnt < copies)
break;
@@ -1576,6 +1608,24 @@ static sector_t __rdev_sectors(struct raid_set *rs)
return 0;
}
+/* Check that calculated dev_sectors fits all component devices. */
+static int _check_data_dev_sectors(struct raid_set *rs)
+{
+ sector_t ds = ~0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
+ ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ if (ds < rs->md.dev_sectors) {
+ rs->ti->error = "Component device(s) too small";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/* Calculate the sectors per device and per array used for @rs */
static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
{
@@ -1625,7 +1675,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
- return 0;
+ return _check_data_dev_sectors(rs);
bad:
rs->ti->error = "Target length not divisible by number of data devices";
return -EINVAL;
@@ -1674,8 +1724,11 @@ static void do_table_event(struct work_struct *ws)
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
smp_rmb(); /* Make sure we access most actual mddev properties */
- if (!rs_is_reshaping(rs))
+ if (!rs_is_reshaping(rs)) {
+ if (rs_is_raid10(rs))
+ rs_set_rdev_sectors(rs);
rs_set_capacity(rs);
+ }
dm_table_event(rs->ti->table);
}
@@ -1860,7 +1913,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_takeover_requested(rs))
return false;
- if (!mddev->level)
+ if (rs_is_raid0(rs))
return false;
change = mddev->new_layout != mddev->layout ||
@@ -1868,7 +1921,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
rs->delta_disks;
/* Historical case to support raid1 reshape without delta disks */
- if (mddev->level == 1) {
+ if (rs_is_raid1(rs)) {
if (rs->delta_disks)
return !!rs->delta_disks;
@@ -1876,7 +1929,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
mddev->raid_disks != rs->raid_disks;
}
- if (mddev->level == 10)
+ if (rs_is_raid10(rs))
return change &&
!__is_raid10_far(mddev->new_layout) &&
rs->delta_disks >= 0;
@@ -2340,7 +2393,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
DMERR("new device%s provided without 'rebuild'",
new_devs > 1 ? "s" : "");
return -EINVAL;
- } else if (rs_is_recovering(rs)) {
+ } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
(unsigned long long) mddev->recovery_cp);
return -EINVAL;
@@ -2640,12 +2693,19 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
}
out:
+ /*
+ * Raise recovery_cp in case data_offset != 0 to
+ * avoid false recovery positives in the constructor.
+ */
+ if (rs->md.recovery_cp < rs->md.dev_sectors)
+ rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
if (!test_bit(Journal, &rdev->flags)) {
@@ -2682,14 +2742,14 @@ static int rs_setup_takeover(struct raid_set *rs)
sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
if (rt_is_raid10(rs->raid_type)) {
- if (mddev->level == 0) {
+ if (rs_is_raid0(rs)) {
/* Userpace reordered disks -> adjust raid_disk indexes */
__reorder_raid_disk_indexes(rs);
/* raid0 -> raid10_far layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
rs->raid10_copies);
- } else if (mddev->level == 1)
+ } else if (rs_is_raid1(rs))
/* raid1 -> raid10_near layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
rs->raid_disks);
@@ -2777,6 +2837,23 @@ static int rs_prepare_reshape(struct raid_set *rs)
return 0;
}
+/* Get reshape sectors from data_offsets or raid set */
+static sector_t _get_reshape_sectors(struct raid_set *rs)
+{
+ struct md_rdev *rdev;
+ sector_t reshape_sectors = 0;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags)) {
+ reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
+ rdev->data_offset - rdev->new_data_offset :
+ rdev->new_data_offset - rdev->data_offset;
+ break;
+ }
+
+ return max(reshape_sectors, (sector_t) rs->data_offset);
+}
+
/*
*
* - change raid layout
@@ -2788,6 +2865,7 @@ static int rs_setup_reshape(struct raid_set *rs)
{
int r = 0;
unsigned int cur_raid_devs, d;
+ sector_t reshape_sectors = _get_reshape_sectors(rs);
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
@@ -2804,13 +2882,13 @@ static int rs_setup_reshape(struct raid_set *rs)
/*
* Adjust array size:
*
- * - in case of adding disks, array size has
+ * - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
* which'll hapen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
- * - in case of removing disks, array size
+ * - in case of removing disk(s), array size
* has to shrink before starting the reshape,
* which'll happen here;
* reshape will happen backward, so space has to
@@ -2841,7 +2919,7 @@ static int rs_setup_reshape(struct raid_set *rs)
rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
}
- mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+ mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
@@ -2874,6 +2952,15 @@ static int rs_setup_reshape(struct raid_set *rs)
mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
}
+ /*
+ * Adjust device size for forward reshape
+ * because md_finish_reshape() reduces it.
+ */
+ if (!mddev->reshape_backwards)
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors += reshape_sectors;
+
return r;
}
@@ -2890,7 +2977,7 @@ static void configure_discard_support(struct raid_set *rs)
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
- raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
+ raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
@@ -2915,7 +3002,7 @@ static void configure_discard_support(struct raid_set *rs)
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
*/
- ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
+ ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
ti->num_discard_bios = 1;
}
@@ -2935,10 +3022,10 @@ static void configure_discard_support(struct raid_set *rs)
static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- bool resize;
+ bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors;
+ sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3021,7 +3108,10 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- resize = calculated_dev_sectors != rdev_sectors;
+
+ reshape_sectors = _get_reshape_sectors(rs);
+ if (calculated_dev_sectors != rdev_sectors)
+ resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3105,19 +3195,22 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
- r = rs_prepare_reshape(rs);
- if (r)
- return r;
+ /* Out-of-place space has to be available to allow for a reshape unless raid1! */
+ if (reshape_sectors || rs_is_raid1(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
- /* Reshaping ain't recovery, so disable recovery */
- rs_setup_recovery(rs, MaxSector);
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ }
rs_set_cur(rs);
} else {
/* May not set recovery when a device rebuild is requested */
@@ -3144,13 +3237,20 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_lock_nointr(&rs->md);
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
-
if (r) {
ti->error = "Failed to run raid array";
mddev_unlock(&rs->md);
goto bad;
}
+ r = md_start(&rs->md);
+
+ if (r) {
+ ti->error = "Failed to start raid array";
+ mddev_unlock(&rs->md);
+ goto bad_md_start;
+ }
+
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
@@ -3198,6 +3298,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md);
return 0;
+bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
@@ -3239,25 +3340,27 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
}
/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev)
+static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return "frozen";
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ /* The MD sync thread can be done with io but still be running */
+ if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
+ (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return "reshape";
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
return "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_CHECK, &recovery))
return "check";
return "repair";
}
- if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return "recover";
}
@@ -3274,7 +3377,7 @@ static const char *decipher_sync_action(struct mddev *mddev)
* 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
* '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
*/
-static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
+static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
{
if (!rdev->bdev)
return "-";
@@ -3282,85 +3385,108 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
return "D";
else if (test_bit(Journal, &rdev->flags))
return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
- else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
+ (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
+ !test_bit(In_sync, &rdev->flags)))
return "a";
else
return "A";
}
-/* Helper to return resync/reshape progress for @rs and @array_in_sync */
-static sector_t rs_get_progress(struct raid_set *rs,
- sector_t resync_max_sectors, bool *array_in_sync)
+/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
+static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+ sector_t resync_max_sectors)
{
- sector_t r, curr_resync_completed;
+ sector_t r;
struct mddev *mddev = &rs->md;
- curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- *array_in_sync = false;
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
if (rs_is_raid0(rs)) {
r = resync_max_sectors;
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- r = mddev->reshape_position;
-
- /* Reshape is relative to the array size */
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
- r != MaxSector) {
- if (r == MaxSector) {
- *array_in_sync = true;
- r = resync_max_sectors;
- } else {
- /* Got to reverse on backward reshape */
- if (mddev->reshape_backwards)
- r = mddev->array_sectors - r;
-
- /* Devide by # of data stripes */
- sector_div(r, mddev_data_stripes(rs));
- }
-
- /* Sync is relative to the component device size */
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- r = curr_resync_completed;
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ test_bit(MD_RECOVERY_RUNNING, &recovery))
+ r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
- if ((r == MaxSector) ||
- (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
- (mddev->curr_resync_completed == resync_max_sectors))) {
+ if (r >= resync_max_sectors &&
+ (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
+ (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
+ !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
+ !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
/*
* Sync complete.
*/
- *array_in_sync = true;
- r = resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* In case we have finished recovering, the array is in sync. */
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+ /*
+ * In case we are recovering, the array is not in sync
+ * and health chars should show the recovering legs.
+ */
+ ;
+
+ } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "resync" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "reshape" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
* should not be 'a' anymore.
*/
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
} else {
struct md_rdev *rdev;
/*
+ * We are idle and recovery is needed, prevent 'A' chars race
+ * caused by components still set to in-sync by constrcuctor.
+ */
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery))
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ /*
* The raid set may be doing an initial sync, or it may
* be rebuilding individual components. If all the
* devices are In_sync, then it is the raid set that is
* being initialized.
*/
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags))
- *array_in_sync = true;
-#if 0
- r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
-#endif
+ !test_bit(In_sync, &rdev->flags)) {
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ break;
+ }
}
}
- return r;
+ return min(r, resync_max_sectors);
}
/* Helper to return @dev name or "-" if !@dev */
@@ -3376,7 +3502,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
struct mddev *mddev = &rs->md;
struct r5conf *conf = mddev->private;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
- bool array_in_sync;
+ unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
unsigned int rebuild_disks;
@@ -3396,17 +3522,18 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
+ recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = decipher_sync_action(&rs->md);
+ sync_action = decipher_sync_action(&rs->md, recovery);
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
- DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
+ DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
/*
* In-sync/Reshape ratio:
@@ -3457,7 +3584,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* v1.10.0+:
*/
DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
- __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
+ __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
break;
case STATUSTYPE_TABLE:
@@ -3613,24 +3740,19 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
-static void raid_presuspend(struct dm_target *ti)
-{
- struct raid_set *rs = ti->private;
-
- md_stop_writes(&rs->md);
-}
-
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Writes have to be stopped before suspending to avoid deadlocks. */
+ if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
+ md_stop_writes(&rs->md);
+
mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
mddev_unlock(&rs->md);
}
-
- rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
@@ -3807,10 +3929,33 @@ static int raid_preresume(struct dm_target *ti)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- /* This is a resume after a suspend of the set -> it's already started */
+ /* This is a resume after a suspend of the set -> it's already started. */
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
+ if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ struct raid_set *rs_active = rs_find_active(rs);
+
+ if (rs_active) {
+ /*
+ * In case no rebuilds have been requested
+ * and an active table slot exists, copy
+ * current resynchonization completed and
+ * reshape position pointers across from
+ * suspended raid set in the active slot.
+ *
+ * This resumes the new mapping at current
+ * offsets to continue recover/reshape without
+ * necessarily redoing a raid set partially or
+ * causing data corruption in case of a reshape.
+ */
+ if (rs_active->md.curr_resync_completed != MaxSector)
+ mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
+ if (rs_active->md.reshape_position != MaxSector)
+ mddev->reshape_position = rs_active->md.reshape_position;
+ }
+ }
+
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@@ -3842,11 +3987,10 @@ static int raid_preresume(struct dm_target *ti)
mddev->resync_min = mddev->recovery_cp;
}
- rs_set_capacity(rs);
-
/* Check for any reshape request unless new raid set */
- if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
/* Initiate a reshape. */
+ rs_set_rdev_sectors(rs);
mddev_lock_nointr(mddev);
r = rs_start_reshape(rs);
mddev_unlock(mddev);
@@ -3872,21 +4016,15 @@ static void raid_resume(struct dm_target *ti)
attempt_restore_of_faulty_devices(rs);
}
- mddev->ro = 0;
- mddev->in_sync = 0;
-
- /*
- * Keep the RAID set frozen if reshape/rebuild flags are set.
- * The RAID set is unfrozen once the next table load/resume,
- * which clears the reshape/rebuild flags, occurs.
- * This ensures that the constructor for the inactive table
- * retrieves an up-to-date reshape_position.
- */
- if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Only reduce raid set size before running a disk removing reshape. */
+ if (mddev->delta_disks < 0)
+ rs_set_capacity(rs);
+
mddev_lock_nointr(mddev);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
mddev_resume(mddev);
mddev_unlock(mddev);
}
@@ -3894,7 +4032,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 13, 0},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -3903,7 +4041,6 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
- .presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d32f25489c2..aeaaaef43eff 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -315,6 +315,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
break;
+ case DM_ENDIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
+ break;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -395,7 +399,7 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
blk_status_t r;
@@ -404,9 +408,10 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
+ return r;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -476,8 +481,10 @@ static int map_request(struct dm_rq_target_io *tio)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
+ blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+check_again:
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -492,7 +499,17 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE) {
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+ r = DM_MAPIO_DELAY_REQUEUE;
+ else
+ r = DM_MAPIO_REQUEUE;
+ goto check_again;
+ }
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
@@ -700,7 +717,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
/* disable dm_old_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
/* Initialize the request-based DM worker thread */
@@ -713,8 +729,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return error;
}
- elv_register_queue(md->queue);
-
return 0;
}
@@ -810,17 +824,9 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = PTR_ERR(q);
goto out_tag_set;
}
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- err = blk_mq_register_dev(disk_to_dev(md->disk), q);
- if (err)
- goto out_cleanup_queue;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 7b8642045c55..f006a9005593 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -282,9 +282,6 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list)
if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
best = pi;
@@ -292,6 +289,9 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a0613bd8ed00..216035be5661 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -47,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
- struct rw_semaphore lock;
+ struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -439,9 +439,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- down_read(&s->lock);
+ mutex_lock(&s->lock);
active = s->active;
- up_read(&s->lock);
+ mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@@ -909,7 +909,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -924,7 +924,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@@ -983,9 +983,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
goto shut;
}
@@ -1026,10 +1026,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1071,10 +1071,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
- init_rwsem(&s->lock);
+ mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1338,9 +1338,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- down_write(&snap_dest->lock);
+ mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
- up_write(&snap_dest->lock);
+ mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1371,6 +1371,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ mutex_destroy(&s->lock);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1458,7 +1460,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@@ -1466,14 +1468,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@@ -1498,7 +1500,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1694,7 +1696,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/* FIXME: should only take write lock if we need
* to copy an exception */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1717,9 +1719,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@@ -1754,7 +1756,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@@ -1764,7 +1766,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@@ -1774,7 +1776,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
out:
return r;
}
@@ -1810,7 +1812,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1841,12 +1843,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return r;
}
@@ -1878,7 +1880,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_read(&snap_src->lock);
+ mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1888,7 +1890,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- up_read(&snap_src->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1934,11 +1936,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_write(&snap_src->lock);
- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&snap_src->lock);
+ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- up_write(&snap_dest->lock);
- up_write(&snap_src->lock);
+ mutex_unlock(&snap_dest->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1953,9 +1955,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->active = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -1995,7 +1997,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2020,7 +2022,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
break;
@@ -2086,7 +2088,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@@ -2113,9 +2115,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@@ -2158,7 +2160,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 29bc51084c82..56059fb56e2d 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -228,6 +228,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
dm_stat_free(&s->rcu_head);
}
free_percpu(stats->last);
+ mutex_destroy(&stats->mutex);
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aaffd0c0ee9a..5fe7ec356c33 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -866,7 +866,8 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED ||
+ table_type == DM_TYPE_NVME_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -909,13 +910,33 @@ static bool dm_table_supports_dax(struct dm_table *t)
return true;
}
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
+
+struct verify_rq_based_data {
+ unsigned sq_count;
+ unsigned mq_count;
+};
+
+static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct verify_rq_based_data *v = data;
+
+ if (q->mq_ops)
+ v->mq_count++;
+ else
+ v->sq_count++;
+
+ return queue_is_rq_based(q);
+}
+
static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- unsigned sq_count = 0, mq_count = 0;
+ struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
- struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -923,6 +944,14 @@ static int dm_table_determine_type(struct dm_table *t)
/* target already set the table's type */
if (t->type == DM_TYPE_BIO_BASED)
return 0;
+ else if (t->type == DM_TYPE_NVME_BIO_BASED) {
+ if (!dm_table_does_not_support_partial_completion(t)) {
+ DMERR("nvme bio-based is only possible with devices"
+ " that don't support partial completion");
+ return -EINVAL;
+ }
+ /* Fallthru, also verify all devices are blk-mq */
+ }
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
goto verify_rq_based;
}
@@ -937,8 +966,8 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMWARN("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types"
+ " can't be mixed up");
return -EINVAL;
}
}
@@ -959,8 +988,18 @@ static int dm_table_determine_type(struct dm_table *t)
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
- (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
+ } else {
+ /* Check if upgrading to NVMe bio-based is valid or required */
+ tgt = dm_table_get_immutable_target(t);
+ if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
+ } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ }
+ }
return 0;
}
@@ -980,7 +1019,8 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMWARN("Request-based dm doesn't support multiple targets yet");
+ DMERR("%s DM doesn't support multiple targets",
+ t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
return -EINVAL;
}
@@ -997,28 +1037,29 @@ verify_rq_based:
return 0;
}
- /* Non-request-stackable devices can't be used for request-based dm */
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
-
- if (!queue_is_rq_based(q)) {
- DMERR("table load rejected: including"
- " non-request-stackable devices");
- return -EINVAL;
- }
+ tgt = dm_table_get_immutable_target(t);
+ if (!tgt) {
+ DMERR("table load rejected: immutable target is required");
+ return -EINVAL;
+ } else if (tgt->max_io_len) {
+ DMERR("table load rejected: immutable target that splits IO is not supported");
+ return -EINVAL;
+ }
- if (q->mq_ops)
- mq_count++;
- else
- sq_count++;
+ /* Non-request-stackable devices can't be used for request-based dm */
+ if (!tgt->type->iterate_devices ||
+ !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ DMERR("table load rejected: including non-request-stackable devices");
+ return -EINVAL;
}
- if (sq_count && mq_count) {
+ if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = mq_count > 0;
+ t->all_blk_mq = v.mq_count > 0;
- if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ if (!t->all_blk_mq &&
+ (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
@@ -1079,7 +1120,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0;
- struct dm_target *tgt;
+ unsigned min_pool_size = 0;
+ struct dm_target *ti;
unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
@@ -1089,11 +1131,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
+ ti = t->targets + i;
+ per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+ min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
+ per_io_data_size, min_pool_size);
if (!t->mempools)
return -ENOMEM;
@@ -1705,6 +1749,20 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
+static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ char b[BDEVNAME_SIZE];
+
+ /* For now, NVMe devices are the only devices of this class */
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+}
+
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
+{
+ return dm_table_all_devices_attribute(t, device_no_partial_completion);
+}
+
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1820,6 +1878,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
#define SECTOR_TO_BLOCK_SHIFT 3
/*
+ * For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
+ * For btree remove:
+ * 2 for shadow spine +
+ * 4 for rebalance 3 child node
*/
-#define THIN_MAX_CONCURRENT_LOCKS 5
+#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index f91d771fff4b..629c555890c1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -492,6 +492,11 @@ static void pool_table_init(void)
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}
+static void pool_table_exit(void)
+{
+ mutex_destroy(&dm_thin_pool_table.mutex);
+}
+
static void __pool_table_insert(struct pool *pool)
{
BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
@@ -1717,7 +1722,7 @@ static void __remap_and_issue_shared_cell(void *context,
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
inc_all_io_entry(info->tc->pool, bio);
@@ -4387,6 +4392,8 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
+
+ pool_table_exit();
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
new file mode 100644
index 000000000000..65f838fa2e99
--- /dev/null
+++ b/drivers/md/dm-unstripe.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/device-mapper.h>
+
+struct unstripe_c {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+ uint32_t stripes;
+
+ uint32_t unstripe;
+ sector_t unstripe_width;
+ sector_t unstripe_offset;
+
+ uint32_t chunk_size;
+ u8 chunk_shift;
+};
+
+#define DM_MSG_PREFIX "unstriped"
+
+static void cleanup_unstripe(struct unstripe_c *uc, struct dm_target *ti)
+{
+ if (uc->dev)
+ dm_put_device(ti, uc->dev);
+ kfree(uc);
+}
+
+/*
+ * Contruct an unstriped mapping.
+ * <number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+ */
+static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct unstripe_c *uc;
+ sector_t tmp_len;
+ unsigned long long start;
+ char dummy;
+
+ if (argc != 5) {
+ ti->error = "Invalid number of arguments";
+ return -EINVAL;
+ }
+
+ uc = kzalloc(sizeof(*uc), GFP_KERNEL);
+ if (!uc) {
+ ti->error = "Memory allocation for unstriped context failed";
+ return -ENOMEM;
+ }
+
+ if (kstrtouint(argv[0], 10, &uc->stripes) || !uc->stripes) {
+ ti->error = "Invalid stripe count";
+ goto err;
+ }
+
+ if (kstrtouint(argv[1], 10, &uc->chunk_size) || !uc->chunk_size) {
+ ti->error = "Invalid chunk_size";
+ goto err;
+ }
+
+ // FIXME: must support non power of 2 chunk_size, dm-stripe.c does
+ if (!is_power_of_2(uc->chunk_size)) {
+ ti->error = "Non power of 2 chunk_size is not supported yet";
+ goto err;
+ }
+
+ if (kstrtouint(argv[2], 10, &uc->unstripe)) {
+ ti->error = "Invalid stripe number";
+ goto err;
+ }
+
+ if (uc->unstripe > uc->stripes && uc->stripes > 1) {
+ ti->error = "Please provide stripe between [0, # of stripes]";
+ goto err;
+ }
+
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) {
+ ti->error = "Couldn't get striped device";
+ goto err;
+ }
+
+ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+ ti->error = "Invalid striped device offset";
+ goto err;
+ }
+ uc->physical_start = start;
+
+ uc->unstripe_offset = uc->unstripe * uc->chunk_size;
+ uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
+ uc->chunk_shift = fls(uc->chunk_size) - 1;
+
+ tmp_len = ti->len;
+ if (sector_div(tmp_len, uc->chunk_size)) {
+ ti->error = "Target length not divisible by chunk size";
+ goto err;
+ }
+
+ if (dm_set_target_max_io_len(ti, uc->chunk_size)) {
+ ti->error = "Failed to set max io len";
+ goto err;
+ }
+
+ ti->private = uc;
+ return 0;
+err:
+ cleanup_unstripe(uc, ti);
+ return -EINVAL;
+}
+
+static void unstripe_dtr(struct dm_target *ti)
+{
+ struct unstripe_c *uc = ti->private;
+
+ cleanup_unstripe(uc, ti);
+}
+
+static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ /* Shift us up to the right "row" on the stripe */
+ sector += uc->unstripe_width * (sector >> uc->chunk_shift);
+
+ /* Account for what stripe we're operating on */
+ sector += uc->unstripe_offset;
+
+ return sector;
+}
+
+static int unstripe_map(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+
+ bio_set_dev(bio, uc->dev->bdev);
+ bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void unstripe_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct unstripe_c *uc = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%d %llu %d %s %llu",
+ uc->stripes, (unsigned long long)uc->chunk_size, uc->unstripe,
+ uc->dev->name, (unsigned long long)uc->physical_start);
+ break;
+ }
+}
+
+static int unstripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct unstripe_c *uc = ti->private;
+
+ return fn(ti, uc->dev, uc->physical_start, ti->len, data);
+}
+
+static void unstripe_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ struct unstripe_c *uc = ti->private;
+
+ limits->chunk_sectors = uc->chunk_size;
+}
+
+static struct target_type unstripe_target = {
+ .name = "unstriped",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = unstripe_ctr,
+ .dtr = unstripe_dtr,
+ .map = unstripe_map,
+ .status = unstripe_status,
+ .iterate_devices = unstripe_iterate_devices,
+ .io_hints = unstripe_io_hints,
+};
+
+static int __init dm_unstripe_init(void)
+{
+ int r;
+
+ r = dm_register_target(&unstripe_target);
+ if (r < 0)
+ DMERR("target registration failed");
+
+ return r;
+}
+
+static void __exit dm_unstripe_exit(void)
+{
+ dm_unregister_target(&unstripe_target);
+}
+
+module_init(dm_unstripe_init);
+module_exit(dm_unstripe_exit);
+
+MODULE_DESCRIPTION(DM_NAME " unstriped target");
+MODULE_AUTHOR("Scott Bauer <scott.bauer@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 70485de37b66..969954915566 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2333,6 +2333,9 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Free the zone descriptors */
dmz_drop_zones(zmd);
+
+ mutex_destroy(&zmd->mblk_flush_lock);
+ mutex_destroy(&zmd->map_lock);
}
/*
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6d7bda6f8190..caff02caf083 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -827,6 +827,7 @@ err_fwq:
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
+ mutex_destroy(&dmz->chunk_lock);
bioset_free(dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
@@ -861,6 +862,8 @@ static void dmz_dtr(struct dm_target *ti)
dmz_put_zoned_device(ti);
+ mutex_destroy(&dmz->chunk_lock);
+
kfree(dmz);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index de17b7193299..d6de00f367ef 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,18 +60,73 @@ void dm_issue_global_event(void)
}
/*
- * One of these is allocated per bio.
+ * One of these is allocated (on-stack) per original bio.
*/
+struct clone_info {
+ struct dm_table *map;
+ struct bio *bio;
+ struct dm_io *io;
+ sector_t sector;
+ unsigned sector_count;
+};
+
+/*
+ * One of these is allocated per clone bio.
+ */
+#define DM_TIO_MAGIC 7282014
+struct dm_target_io {
+ unsigned magic;
+ struct dm_io *io;
+ struct dm_target *ti;
+ unsigned target_bio_nr;
+ unsigned *len_ptr;
+ bool inside_dm_io;
+ struct bio clone;
+};
+
+/*
+ * One of these is allocated per original bio.
+ * It contains the first clone used for that original.
+ */
+#define DM_IO_MAGIC 5191977
struct dm_io {
+ unsigned magic;
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
- struct bio *bio;
+ struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
+ /* last member of dm_target_io is 'struct bio' */
+ struct dm_target_io tio;
};
+void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ if (!tio->inside_dm_io)
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+ return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
+}
+EXPORT_SYMBOL_GPL(dm_per_bio_data);
+
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+ if (io->magic == DM_IO_MAGIC)
+ return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
+ BUG_ON(io->magic != DM_TIO_MAGIC);
+ return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
+}
+EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+}
+EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -93,8 +148,8 @@ static int dm_numa_node = DM_NUMA_NODE;
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
- mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *io_bs;
};
struct table_device {
@@ -103,7 +158,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -170,14 +224,9 @@ static int __init local_init(void)
{
int r = -ENOMEM;
- /* allocate a slab for the dm_ios */
- _io_cache = KMEM_CACHE(dm_io, 0);
- if (!_io_cache)
- return r;
-
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
- goto out_free_io_cache;
+ return r;
_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
@@ -212,8 +261,6 @@ out_free_rq_cache:
kmem_cache_destroy(_rq_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
-out_free_io_cache:
- kmem_cache_destroy(_io_cache);
return r;
}
@@ -225,7 +272,6 @@ static void local_exit(void)
kmem_cache_destroy(_rq_cache);
kmem_cache_destroy(_rq_tio_cache);
- kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
@@ -486,18 +532,69 @@ out:
return r;
}
-static struct dm_io *alloc_io(struct mapped_device *md)
+static void start_io_acct(struct dm_io *io);
+
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
- return mempool_alloc(md->io_pool, GFP_NOIO);
+ struct dm_io *io;
+ struct dm_target_io *tio;
+ struct bio *clone;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = true;
+ tio->io = NULL;
+
+ io = container_of(tio, struct dm_io, tio);
+ io->magic = DM_IO_MAGIC;
+ io->status = 0;
+ atomic_set(&io->io_count, 1);
+ io->orig_bio = bio;
+ io->md = md;
+ spin_lock_init(&io->endio_lock);
+
+ start_io_acct(io);
+
+ return io;
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
- mempool_free(io, md->io_pool);
+ bio_put(&io->tio.clone);
+}
+
+static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, gfp_t gfp_mask)
+{
+ struct dm_target_io *tio;
+
+ if (!ci->io->tio.io) {
+ /* the dm_target_io embedded in ci->io is available */
+ tio = &ci->io->tio;
+ } else {
+ struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = false;
+ }
+
+ tio->magic = DM_TIO_MAGIC;
+ tio->io = ci->io;
+ tio->ti = ti;
+ tio->target_bio_nr = target_bio_nr;
+
+ return tio;
}
static void free_tio(struct dm_target_io *tio)
{
+ if (tio->inside_dm_io)
+ return;
bio_put(&tio->clone);
}
@@ -510,17 +607,15 @@ int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
- int cpu;
+ struct bio *bio = io->orig_bio;
int rw = bio_data_dir(bio);
io->start_time = jiffies;
- cpu = part_stat_lock();
- part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
- part_stat_unlock();
+ generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+
atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
+ atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -531,7 +626,7 @@ static void start_io_acct(struct dm_io *io)
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
int pending;
int rw = bio_data_dir(bio);
@@ -752,15 +847,6 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
return 0;
}
-/*-----------------------------------------------------------------
- * CRUD START:
- * A more elegant soln is in the works that uses the queue
- * merge fn, unfortunately there are a couple of changes to
- * the block layer that I want to make for this. So in the
- * interests of getting something for people to use I give
- * you this clearly demarcated crap.
- *---------------------------------------------------------------*/
-
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -780,8 +866,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE &&
- __noflush_suspending(md)))
+ if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
@@ -793,7 +878,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
@@ -801,7 +887,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
io_error = io->status;
- bio = io->bio;
+ bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
@@ -847,7 +933,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (unlikely(error == BLK_STS_TARGET)) {
+ if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
@@ -920,7 +1006,15 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- ti->max_io_len = (uint32_t) len;
+ /*
+ * BIO based queue uses its own splitting. When multipage bvecs
+ * is switched on, size of the incoming bio may be too big to
+ * be handled in some targets, such as crypt.
+ *
+ * When these targets are ready for the big bio, we can remove
+ * the limit.
+ */
+ ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
return 0;
}
@@ -997,7 +1091,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH.
+ * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1047,7 +1141,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->bio;
+ struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
@@ -1114,67 +1208,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-/*
- * Flush current->bio_list when the target map method blocks.
- * This fixes deadlocks in snapshot and possibly in other targets.
- */
-struct dm_offload {
- struct blk_plug plug;
- struct blk_plug_cb cb;
-};
-
-static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct dm_offload *o = container_of(cb, struct dm_offload, cb);
- struct bio_list list;
- struct bio *bio;
- int i;
-
- INIT_LIST_HEAD(&o->cb.list);
-
- if (unlikely(!current->bio_list))
- return;
-
- for (i = 0; i < 2; i++) {
- list = current->bio_list[i];
- bio_list_init(&current->bio_list[i]);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set ||
- !bs->rescue_workqueue) {
- bio_list_add(&current->bio_list[i], bio);
- continue;
- }
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
- }
- }
-}
-
-static void dm_offload_start(struct dm_offload *o)
-{
- blk_start_plug(&o->plug);
- o->cb.callback = flush_current_bio_list;
- list_add(&o->cb.list, &current->plug->cb_list);
-}
-
-static void dm_offload_end(struct dm_offload *o)
-{
- list_del(&o->cb.list);
- blk_finish_plug(&o->plug);
-}
-
-static void __map_bio(struct dm_target_io *tio)
+static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct dm_offload o;
struct bio *clone = &tio->clone;
+ struct dm_io *io = tio->io;
+ struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
+ blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1183,44 +1225,37 @@ static void __map_bio(struct dm_target_io *tio)
* anything, the target has assumed ownership of
* this io.
*/
- atomic_inc(&tio->io->io_count);
+ atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
- dm_offload_start(&o);
r = ti->type->map(ti, clone);
- dm_offload_end(&o);
-
switch (r) {
case DM_MAPIO_SUBMITTED:
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(tio->io->bio), sector);
- generic_make_request(clone);
+ bio_dev(io->orig_bio), sector);
+ if (md->type == DM_TYPE_NVME_BIO_BASED)
+ ret = direct_make_request(clone);
+ else
+ ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
- dec_pending(tio->io, BLK_STS_IOERR);
free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
- dec_pending(tio->io, BLK_STS_DM_REQUEUE);
free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-}
-struct clone_info {
- struct mapped_device *md;
- struct dm_table *map;
- struct bio *bio;
- struct dm_io *io;
- sector_t sector;
- unsigned sector_count;
-};
+ return ret;
+}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
@@ -1264,28 +1299,49 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return 0;
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr)
+static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ struct dm_target *ti, unsigned num_bios)
{
struct dm_target_io *tio;
- struct bio *clone;
+ int try;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ if (!num_bios)
+ return;
- tio->io = ci->io;
- tio->ti = ti;
- tio->target_bio_nr = target_bio_nr;
+ if (num_bios == 1) {
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ bio_list_add(blist, &tio->clone);
+ return;
+ }
- return tio;
+ for (try = 0; try < 2; try++) {
+ int bio_nr;
+ struct bio *bio;
+
+ if (try)
+ mutex_lock(&ci->io->md->table_devices_lock);
+ for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+ tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
+ if (!tio)
+ break;
+
+ bio_list_add(blist, &tio->clone);
+ }
+ if (try)
+ mutex_unlock(&ci->io->md->table_devices_lock);
+ if (bio_nr == num_bios)
+ return;
+
+ while ((bio = bio_list_pop(blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ free_tio(tio);
+ }
+ }
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len)
+static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target_io *tio, unsigned *len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
struct bio *clone = &tio->clone;
tio->len_ptr = len;
@@ -1294,16 +1350,22 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
if (len)
bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
+ return __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
- unsigned target_bio_nr;
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct bio *bio;
+ struct dm_target_io *tio;
+
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
- for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
- __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
+ while ((bio = bio_list_pop(&blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ (void) __clone_and_map_simple_bio(ci, tio, len);
+ }
}
static int __send_empty_flush(struct clone_info *ci)
@@ -1319,32 +1381,22 @@ static int __send_empty_flush(struct clone_info *ci)
}
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
- unsigned target_bio_nr;
- unsigned num_target_bios = 1;
- int r = 0;
+ int r;
- /*
- * Does the target want to receive duplicate copies of the bio?
- */
- if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
- num_target_bios = ti->num_write_bios(ti, bio);
-
- for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, target_bio_nr);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- break;
- }
- __map_bio(tio);
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ tio->len_ptr = len;
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(tio);
+ return r;
}
+ (void) __map_bio(tio);
- return r;
+ return 0;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1371,56 +1423,50 @@ static bool is_split_required_for_discard(struct dm_target *ti)
return ti->split_discard_bios;
}
-static int __send_changing_extent_only(struct clone_info *ci,
+static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
get_num_bios_fn get_num_bios,
is_split_required_fn is_split_required)
{
- struct dm_target *ti;
unsigned len;
unsigned num_bios;
- do {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- /*
- * Even though the device advertised support for this type of
- * request, that does not mean every target supports it, and
- * reconfiguration might also have changed that since the
- * check was performed.
- */
- num_bios = get_num_bios ? get_num_bios(ti) : 0;
- if (!num_bios)
- return -EOPNOTSUPP;
+ /*
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
+ * check was performed.
+ */
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
+ return -EOPNOTSUPP;
- if (is_split_required && !is_split_required(ti))
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
- else
- len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
+ if (is_split_required && !is_split_required(ti))
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ else
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, &len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
- ci->sector += len;
- } while (ci->sector_count -= len);
+ ci->sector += len;
+ ci->sector_count -= len;
return 0;
}
-static int __send_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_discard_bios,
+ return __send_changing_extent_only(ci, ti, get_num_discard_bios,
is_split_required_for_discard);
}
-static int __send_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
}
-static int __send_write_zeroes(struct clone_info *ci)
+static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
}
/*
@@ -1433,17 +1479,17 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
- return __send_discard(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- return __send_write_same(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
- return __send_write_zeroes(ci);
-
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ return __send_discard(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+ return __send_write_same(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+ return __send_write_zeroes(ci, ti);
+
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
len = ci->sector_count;
else
@@ -1460,34 +1506,33 @@ static int __split_and_process_non_flush(struct clone_info *ci)
return 0;
}
+static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ ci->map = map;
+ ci->io = alloc_io(md, bio);
+ ci->sector = bio->bi_iter.bi_sector;
+}
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static void __split_and_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
if (unlikely(!map)) {
bio_io_error(bio);
- return;
+ return ret;
}
- ci.map = map;
- ci.md = md;
- ci.io = alloc_io(md);
- ci.io->status = 0;
- atomic_set(&ci.io->io_count, 1);
- ci.io->bio = bio;
- ci.io->md = md;
- spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_iter.bi_sector;
-
- start_io_acct(ci.io);
+ init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.md->flush_bio;
+ ci.bio = &ci.io->md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1498,32 +1543,95 @@ static void __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error)
+ while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
+ if (current->bio_list && ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to generic_make_request()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ * As this path is not used for REQ_OP_ZONE_REPORT,
+ * the usage of io->orig_bio in dm_remap_zone_report()
+ * won't be affected by this reassignment.
+ */
+ struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
+ md->queue->bio_split);
+ ci.io->orig_bio = b;
+ bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
+ bio_chain(b, bio);
+ ret = generic_make_request(bio);
+ break;
+ }
+ }
}
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
}
-/*-----------------------------------------------------------------
- * CRUD END
- *---------------------------------------------------------------*/
/*
- * The request function that just remaps the bio built up by
- * dm_merge_bvec.
+ * Optimized variant of __split_and_process_bio that leverages the
+ * fact that targets that use it do _not_ have a need to split bios.
*/
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t __process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int error = 0;
+
+ if (unlikely(!map)) {
+ bio_io_error(bio);
+ return ret;
+ }
+
+ init_clone_info(&ci, md, map, bio);
+
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ ci.bio = &ci.io->md->flush_bio;
+ ci.sector_count = 0;
+ error = __send_empty_flush(&ci);
+ /* dec_pending submits any data associated with flush */
+ } else {
+ struct dm_target *ti = md->immutable_target;
+ struct dm_target_io *tio;
+
+ /*
+ * Defend against IO still getting in during teardown
+ * - as was seen for a time with nvme-fcloop
+ */
+ if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ error = -EIO;
+ goto out;
+ }
+
+ tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ ci.bio = bio;
+ ci.sector_count = bio_sectors(bio);
+ ret = __clone_and_map_simple_bio(&ci, tio, NULL);
+ }
+out:
+ /* drop the extra reference count */
+ dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
+}
+
+typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
+
+static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
+ process_bio_fn process_bio)
{
- int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0);
-
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
@@ -1532,12 +1640,27 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return ret;
}
- __split_and_process_bio(md, map, bio);
+ ret = process_bio(md, map, bio);
+
dm_put_live_table(md, srcu_idx);
- return BLK_QC_T_NONE;
+ return ret;
+}
+
+/*
+ * The request function that remaps the bio to one target and
+ * splits off any remainder.
+ */
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __split_and_process_bio);
+}
+
+static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __process_bio);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -1618,20 +1741,9 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-void dm_init_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize data that will only be used by a non-blk-mq DM queue
- * - must do so here (in alloc_dev callchain) before queue is used
- */
- md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
-}
-
-void dm_init_normal_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
- dm_init_md_queue(md);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
@@ -1645,9 +1757,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
+ if (md->io_bs)
+ bioset_free(md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
@@ -1673,6 +1786,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->bdev = NULL;
}
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
+
dm_mq_cleanup_mapped_device(md);
}
@@ -1726,10 +1843,10 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info->congested_data = md;
- dm_init_md_queue(md);
-
- md->disk = alloc_disk_node(1, numa_node_id);
+ md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
@@ -1753,7 +1870,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
md->dax_dev = dax_dev;
- add_disk(md->disk);
+ add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1812,17 +1929,22 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->bs) {
- /* The md already has necessary mempools. */
- if (dm_table_bio_based(t)) {
- /*
- * Reload bioset because front_pad may have changed
- * because a different table was loaded.
- */
+ if (dm_table_bio_based(t)) {
+ /*
+ * The md may already have mempools that need changing.
+ * If so, reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ if (md->bs) {
bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ md->bs = NULL;
}
+ if (md->io_bs) {
+ bioset_free(md->io_bs);
+ md->io_bs = NULL;
+ }
+
+ } else if (md->bs) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
@@ -1834,13 +1956,12 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->bs);
+ BUG_ON(!p || md->bs || md->io_bs);
- md->io_pool = p->io_pool;
- p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
-
+ md->io_bs = p->io_bs;
+ p->io_bs = NULL;
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
@@ -1886,6 +2007,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
+ bool request_based = dm_table_request_based(t);
sector_t size;
lockdep_assert_held(&md->suspend_lock);
@@ -1909,12 +2031,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t)) {
+ if (request_based)
dm_stop_queue(q);
+
+ if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
/*
- * Leverage the fact that request-based DM targets are
- * immutable singletons and establish md->immutable_target
- * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ * Leverage the fact that request-based DM targets and
+ * NVMe bio based targets are immutable singletons
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq;
+ * and __process_bio.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -1954,13 +2079,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
+ int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
- dm_sysfs_init(md);
+ r = dm_sysfs_init(md);
+ if (r) {
+ free_dev(md);
+ return r;
+ }
*result = md;
return 0;
@@ -2013,10 +2143,12 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
+ struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
@@ -2034,21 +2166,24 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- /*
- * DM handles splitting bios as needed. Free the bio_split bioset
- * since it won't be used (saves 1 process per bio-based DM device).
- */
- bioset_free(md->queue->bio_split);
- md->queue->bio_split = NULL;
-
- if (type == DM_TYPE_DAX_BIO_BASED)
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
+ break;
+ case DM_TYPE_NVME_BIO_BASED:
+ dm_init_normal_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request_nvme);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
+ r = dm_calculate_queue_limits(t, &limits);
+ if (r) {
+ DMERR("Cannot calculate initial queue limits");
+ return r;
+ }
+ dm_table_set_restrictions(t, md->queue, &limits);
+ blk_register_queue(md->disk);
+
return 0;
}
@@ -2113,7 +2248,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
- struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -2124,7 +2258,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(q);
+ blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
@@ -2735,11 +2869,12 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_io_data_size)
+ unsigned integrity, unsigned per_io_data_size,
+ unsigned min_pool_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
- unsigned int front_pad;
+ unsigned int front_pad, io_front_pad;
if (!pools)
return NULL;
@@ -2747,16 +2882,19 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- pool_size = dm_get_reserved_bio_based_ios();
+ case DM_TYPE_NVME_BIO_BASED:
+ pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
-
- pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
- if (!pools->io_pool)
+ io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+ pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
+ if (!pools->io_bs)
+ goto out;
+ if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
case DM_TYPE_MQ_REQUEST_BASED:
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2764,7 +2902,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
BUG();
}
- pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
+ pools->bs = bioset_create(pool_size, front_pad, 0);
if (!pools->bs)
goto out;
@@ -2784,10 +2922,10 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- mempool_destroy(pools->io_pool);
-
if (pools->bs)
bioset_free(pools->bs);
+ if (pools->io_bs)
+ bioset_free(pools->io_bs);
kfree(pools);
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 36399bb875dd..114a81b27c37 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -49,7 +49,6 @@ struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
*---------------------------------------------------------------*/
-void dm_table_destroy(struct dm_table *t);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
@@ -206,7 +205,8 @@ void dm_kcopyd_exit(void);
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_bio_data_size);
+ unsigned integrity, unsigned per_bio_data_size,
+ unsigned min_pool_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e4dee0ec2de..0081ace39a64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
return NULL;
}
-static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
@@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
static struct md_personality *find_pers(int level, char *clevel)
{
@@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
- /*
- * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
- * up mddev->thread. It is important to initialize critical
- * resources for mddev->thread BEFORE calling pers->run().
- */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
@@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_allow_write(mddev);
+ /* run start up tasks that require md_thread */
+ md_start(mddev);
+
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5689,6 +5688,21 @@ out:
return err;
}
+int md_start(struct mddev *mddev)
+{
+ int ret = 0;
+
+ if (mddev->pers->start) {
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ ret = mddev->pers->start(mddev);
+ clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(md_start);
+
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
return -ENODEV;
rcu_read_lock();
- rdev = find_rdev_rcu(mddev, dev);
+ rdev = md_find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
@@ -7871,10 +7885,10 @@ static int md_seq_open(struct inode *inode, struct file *file)
}
static int md_unloading;
-static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
- int mask;
+ __poll_t mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
@@ -8169,7 +8183,8 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7d6bcf0eba0c..58cd20a5e85e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -485,6 +485,7 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE, /* A reshape is happening */
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+ MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -523,7 +524,13 @@ struct md_personality
struct list_head list;
struct module *owner;
bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ /*
+ * start up works that do NOT require md_thread. tasks that
+ * requires md_thread should go into start()
+ */
int (*run)(struct mddev *mddev);
+ /* start up works that require md threads */
+ int (*start)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
@@ -687,6 +694,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
+extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
@@ -702,6 +710,7 @@ extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
pn->keys[1] = rn->keys[0];
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
- /*
- * rejig the spine. This is ugly, since it knows too
- * much about the spine
- */
- if (s->nodes[0] != new_parent) {
- unlock_block(s->info, s->nodes[0]);
- s->nodes[0] = new_parent;
- }
- if (key < le64_to_cpu(rn->keys[0])) {
- unlock_block(s->info, right);
- s->nodes[1] = left;
- } else {
- unlock_block(s->info, left);
- s->nodes[1] = right;
- }
- s->count = 2;
-
+ unlock_block(s->info, left);
+ unlock_block(s->info, right);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6df398e3a008..b2eae332e1a2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -815,6 +815,17 @@ static void flush_pending_writes(struct r1conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
flush_bio_list(conf, bio);
blk_finish_plug(&plug);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c131835cf008..99c9207899a7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -900,6 +900,18 @@ static void flush_pending_writes(struct r10conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
+
blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 39f31f07ffe9..3c65f52b68f5 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1111,9 +1111,6 @@ void r5l_write_stripe_run(struct r5l_log *log)
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
- if (!log)
- return -ENODEV;
-
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
/*
* in write through (journal only)
@@ -1592,8 +1589,6 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log)
- return;
if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
@@ -2448,7 +2443,6 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
raid5_release_stripe(sh);
}
- md_wakeup_thread(conf->mddev->thread);
/* reuse conf->wait_for_quiescent in recovery */
wait_event(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0);
@@ -2491,10 +2485,10 @@ static int r5l_recovery_log(struct r5l_log *log)
ctx->seq += 10000;
if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
- pr_debug("md/raid:%s: starting from clean shutdown\n",
+ pr_info("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else
- pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
+ pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx->data_only_stripes,
ctx->data_parity_stripes);
@@ -3036,6 +3030,23 @@ ioerr:
return ret;
}
+int r5l_start(struct r5l_log *log)
+{
+ int ret;
+
+ if (!log)
+ return 0;
+
+ ret = r5l_load_log(log);
+ if (ret) {
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ r5l_exit_log(conf);
+ }
+ return ret;
+}
+
void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
@@ -3138,13 +3149,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
rcu_assign_pointer(conf->log, log);
- if (r5l_load_log(log))
- goto error;
-
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
-error:
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 284578b0a349..0c76bcedfc1c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -32,6 +32,7 @@ extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev,
struct md_rdev *rdev);
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+extern int r5l_start(struct r5l_log *log);
extern struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
@@ -42,6 +43,7 @@ extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
extern void ppl_write_stripe_run(struct r5conf *conf);
extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+extern void ppl_quiesce(struct r5conf *conf, int quiesce);
static inline bool raid5_has_ppl(struct r5conf *conf)
{
@@ -87,6 +89,34 @@ static inline void log_write_stripe_run(struct r5conf *conf)
ppl_write_stripe_run(conf);
}
+static inline void log_flush_stripe_to_raid(struct r5conf *conf)
+{
+ if (conf->log)
+ r5l_flush_stripe_to_raid(conf->log);
+ else if (raid5_has_ppl(conf))
+ ppl_write_stripe_run(conf);
+}
+
+static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
+{
+ int ret = -ENODEV;
+
+ if (conf->log)
+ ret = r5l_handle_flush_request(conf->log, bio);
+ else if (raid5_has_ppl(conf))
+ ret = 0;
+
+ return ret;
+}
+
+static inline void log_quiesce(struct r5conf *conf, int quiesce)
+{
+ if (conf->log)
+ r5l_quiesce(conf->log, quiesce);
+ else if (raid5_has_ppl(conf))
+ ppl_quiesce(conf, quiesce);
+}
+
static inline void log_exit(struct r5conf *conf)
{
if (conf->log)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 628c0bf7b9fd..2764c2290062 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -85,6 +85,9 @@
* (for a single member disk). New io_units are added to the end of the list
* and the first io_unit is submitted, if it is not submitted already.
* The current io_unit accepting new stripes is always at the end of the list.
+ *
+ * If write-back cache is enabled for any of the disks in the array, its data
+ * must be flushed before next io_unit is submitted.
*/
#define PPL_SPACE_SIZE (128 * 1024)
@@ -104,6 +107,7 @@ struct ppl_conf {
struct kmem_cache *io_kc;
mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *flush_bs;
/* used only for recovery */
int recovered_entries;
@@ -128,6 +132,8 @@ struct ppl_log {
sector_t next_io_sector;
unsigned int entry_space;
bool use_multippl;
+ bool wb_cache_on;
+ unsigned long disk_flush_bitmap;
};
#define PPL_IO_INLINE_BVECS 32
@@ -145,6 +151,7 @@ struct ppl_io_unit {
struct list_head stripe_list; /* stripes added to the io_unit */
atomic_t pending_stripes; /* how many stripes not written to raid */
+ atomic_t pending_flushes; /* how many disk flushes are in progress */
bool submitted; /* true if write to log started */
@@ -249,6 +256,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
+ atomic_set(&io->pending_flushes, 0);
bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
pplhdr = page_address(io->header_page);
@@ -475,7 +483,18 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (log->use_multippl)
log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
+ WARN_ON(log->disk_flush_bitmap != 0);
+
list_for_each_entry(sh, &io->stripe_list, log_list) {
+ for (i = 0; i < sh->disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if ((ppl_conf->child_logs[i].wb_cache_on) &&
+ (test_bit(R5_Wantwrite, &dev->flags))) {
+ set_bit(i, &log->disk_flush_bitmap);
+ }
+ }
+
/* entries for full stripe writes have no partial parity */
if (test_bit(STRIPE_FULL_WRITE, &sh->state))
continue;
@@ -540,6 +559,7 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
{
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
unsigned long flags;
pr_debug("%s: seq: %llu\n", __func__, io->seq);
@@ -565,6 +585,112 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
spin_unlock(&ppl_conf->no_mem_stripes_lock);
local_irq_restore(flags);
+
+ wake_up(&conf->wait_for_quiescent);
+}
+
+static void ppl_flush_endio(struct bio *bio)
+{
+ struct ppl_io_unit *io = bio->bi_private;
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ char b[BDEVNAME_SIZE];
+
+ pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+
+ if (bio->bi_status) {
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
+ if (rdev)
+ md_error(rdev->mddev, rdev);
+ rcu_read_unlock();
+ }
+
+ bio_put(bio);
+
+ if (atomic_dec_and_test(&io->pending_flushes)) {
+ ppl_io_unit_finished(io);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+static void ppl_do_flush(struct ppl_io_unit *io)
+{
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ int raid_disks = conf->raid_disks;
+ int flushed_disks = 0;
+ int i;
+
+ atomic_set(&io->pending_flushes, raid_disks);
+
+ for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
+ struct md_rdev *rdev;
+ struct block_device *bdev = NULL;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ bdev = rdev->bdev;
+ rcu_read_unlock();
+
+ if (bdev) {
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+
+ bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs);
+ bio_set_dev(bio, bdev);
+ bio->bi_private = io;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_end_io = ppl_flush_endio;
+
+ pr_debug("%s: dev: %s\n", __func__,
+ bio_devname(bio, b));
+
+ submit_bio(bio);
+ flushed_disks++;
+ }
+ }
+
+ log->disk_flush_bitmap = 0;
+
+ for (i = flushed_disks ; i < raid_disks; i++) {
+ if (atomic_dec_and_test(&io->pending_flushes))
+ ppl_io_unit_finished(io);
+ }
+}
+
+static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
+ struct ppl_log *log)
+{
+ struct ppl_io_unit *io;
+
+ io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
+ log_sibling);
+
+ return !io || !io->submitted;
+}
+
+void ppl_quiesce(struct r5conf *conf, int quiesce)
+{
+ struct ppl_conf *ppl_conf = conf->log_private;
+ int i;
+
+ if (quiesce) {
+ for (i = 0; i < ppl_conf->count; i++) {
+ struct ppl_log *log = &ppl_conf->child_logs[i];
+
+ spin_lock_irq(&log->io_list_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ ppl_no_io_unit_submitted(conf, log),
+ log->io_list_lock);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
}
void ppl_stripe_write_finished(struct stripe_head *sh)
@@ -574,8 +700,12 @@ void ppl_stripe_write_finished(struct stripe_head *sh)
io = sh->ppl_io;
sh->ppl_io = NULL;
- if (io && atomic_dec_and_test(&io->pending_stripes))
- ppl_io_unit_finished(io);
+ if (io && atomic_dec_and_test(&io->pending_stripes)) {
+ if (io->log->disk_flush_bitmap)
+ ppl_do_flush(io);
+ else
+ ppl_io_unit_finished(io);
+ }
}
static void ppl_xor(int size, struct page *page1, struct page *page2)
@@ -1108,6 +1238,8 @@ static void __ppl_exit_log(struct ppl_conf *ppl_conf)
if (ppl_conf->bs)
bioset_free(ppl_conf->bs);
+ if (ppl_conf->flush_bs)
+ bioset_free(ppl_conf->flush_bs);
mempool_destroy(ppl_conf->io_pool);
kmem_cache_destroy(ppl_conf->io_kc);
@@ -1173,6 +1305,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
{
+ struct request_queue *q;
+
if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
PPL_HEADER_SIZE) * 2) {
log->use_multippl = true;
@@ -1185,6 +1319,10 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
PPL_HEADER_SIZE;
}
log->next_io_sector = rdev->ppl.sector;
+
+ q = bdev_get_queue(rdev->bdev);
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ log->wb_cache_on = true;
}
int ppl_init_log(struct r5conf *conf)
@@ -1192,8 +1330,8 @@ int ppl_init_log(struct r5conf *conf)
struct ppl_conf *ppl_conf;
struct mddev *mddev = conf->mddev;
int ret = 0;
+ int max_disks;
int i;
- bool need_cache_flush = false;
pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
mdname(conf->mddev));
@@ -1219,6 +1357,14 @@ int ppl_init_log(struct r5conf *conf)
return -EINVAL;
}
+ max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+ BITS_PER_BYTE;
+ if (conf->raid_disks > max_disks) {
+ pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
+ mdname(mddev), max_disks);
+ return -EINVAL;
+ }
+
ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
if (!ppl_conf)
return -ENOMEM;
@@ -1244,6 +1390,12 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
+ ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0);
+ if (!ppl_conf->flush_bs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ppl_conf->count = conf->raid_disks;
ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
GFP_KERNEL);
@@ -1275,23 +1427,14 @@ int ppl_init_log(struct r5conf *conf)
log->rdev = rdev;
if (rdev) {
- struct request_queue *q;
-
ret = ppl_validate_rdev(rdev);
if (ret)
goto err;
- q = bdev_get_queue(rdev->bdev);
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- need_cache_flush = true;
ppl_init_child_log(log, rdev);
}
}
- if (need_cache_flush)
- pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
- mdname(mddev));
-
/* load and possibly recover the logs from the member disks */
ret = ppl_load(ppl_conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 98ce4272ace9..50d01144b805 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5563,7 +5563,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
- int ret = r5l_handle_flush_request(conf->log, bi);
+ int ret = log_handle_flush_request(conf, bi);
if (ret == 0)
return true;
@@ -6168,7 +6168,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
break;
if (i == NR_STRIPE_HASH_LOCKS) {
spin_unlock_irq(&conf->device_lock);
- r5l_flush_stripe_to_raid(conf->log);
+ log_flush_stripe_to_raid(conf);
spin_lock_irq(&conf->device_lock);
return batch_size;
}
@@ -8060,7 +8060,7 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
}
- r5l_quiesce(conf->log, quiesce);
+ log_quiesce(conf, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
@@ -8364,6 +8364,13 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
return err;
}
+static int raid5_start(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ return r5l_start(conf->log);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8371,6 +8378,7 @@ static struct md_personality raid6_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8395,6 +8403,7 @@ static struct md_personality raid5_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8420,6 +8429,7 @@ static struct md_personality raid4_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 3dba3aa34a43..9d6c496f756c 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -43,13 +43,13 @@ static inline struct cec_devnode *cec_devnode_data(struct file *filp)
/* CEC file operations */
-static unsigned int cec_poll(struct file *filp,
+static __poll_t cec_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
- unsigned int res = 0;
+ __poll_t res = 0;
if (!devnode->registered)
return POLLERR | POLLHUP;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 8c87d6837c49..8ee3eebef4db 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -320,13 +320,13 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
@@ -359,10 +359,10 @@ static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wai
return res;
}
-static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
- unsigned int res;
+ __poll_t res;
mutex_lock(vdev->lock);
res = __fops_poll(file, wait);
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 1a8677ade391..0c0878bcf251 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -374,7 +374,7 @@ exit:
return rc;
}
-static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
+static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait)
{
struct smsdvb_debugfs *debug_data = file->private_data;
int rc;
@@ -384,12 +384,9 @@ static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
poll_wait(file, &debug_data->stats_queue, wait);
rc = smsdvb_stats_wait_read(debug_data);
- if (rc > 0)
- rc = POLLIN | POLLRDNORM;
-
kref_put(&debug_data->refcount, smsdvb_debugfs_data_release);
- return rc;
+ return rc > 0 ? POLLIN | POLLRDNORM : 0;
}
static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 3ddd44e1ee77..3fe0eb740a6d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1066,10 +1066,10 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
-static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
return POLLERR;
@@ -1160,11 +1160,11 @@ static long dvb_dvr_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
-static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk("%s\n", __func__);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index d48b61eb01f4..3f6c8bd8f869 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1782,11 +1782,11 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
*
* return: Standard poll mask.
*/
-static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int slot;
int result = 0;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2afaa8226342..48e16fd003a7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2470,7 +2470,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
}
-static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index edbb30fdd9d9..fb8a1d2ffd24 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -207,7 +207,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
return err;
}
-static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
+static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait)
{
return POLLIN;
}
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 423248f577b6..3049b1f505e5 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -99,7 +99,7 @@ static ssize_t media_write(struct file *filp, const char __user *buf,
return devnode->fops->write(filp, buf, sz, off);
}
-static unsigned int media_poll(struct file *filp,
+static __poll_t media_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct media_devnode *devnode = media_devnode_data(filp);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b366a7e1d976..c988669e22ff 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2955,13 +2955,13 @@ static ssize_t bttv_read(struct file *file, char __user *data,
return retval;
}
-static unsigned int bttv_poll(struct file *file, poll_table *wait)
+static __poll_t bttv_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv_buffer *buf;
enum v4l2_field field;
- unsigned int rc = 0;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+ __poll_t req_events = poll_requested_events(wait);
if (v4l2_event_pending(&fh->fh))
rc = POLLPRI;
@@ -3329,13 +3329,13 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa6588_command cmd;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 4f9c2395941b..2b0abd5bbf64 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -602,14 +602,14 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
}
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx18_open_id *id = file2id(filp);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
@@ -629,7 +629,7 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(id->type == CX18_ENC_STREAM_TYPE_YUV)) {
- int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
+ __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
if (v4l2_event_pending(&id->fh))
res |= POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 37ef34e866cb..5b44d30efd8f 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -23,7 +23,7 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t *pos);
int cx18_v4l2_close(struct file *filp);
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
int cx18_start_capture(struct cx18_open_id *id);
void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index f4bd4908acdd..09a25d6c2cd1 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -732,13 +732,13 @@ static ssize_t ts_read(struct file *file, __user char *buf,
return (count && (left == count)) ? -EAGAIN : (count - left);
}
-static unsigned int ts_poll(struct file *file, poll_table *wait)
+static __poll_t ts_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &input->dma->wq, wait);
poll_wait(file, &output->dma->wq, wait);
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..4aa773507201 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -730,12 +730,12 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
return res;
}
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
- int res = 0;
+ __poll_t res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
@@ -764,14 +764,14 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index 5e08800772ca..e0029b2b8d09 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -28,8 +28,8 @@ ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t * pos);
int ivtv_v4l2_close(struct file *filp);
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
int ivtv_start_capture(struct ivtv_open_id *id);
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end);
int ivtv_start_decoding(struct ivtv_open_id *id, int speed);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 23999a8cef37..f74b08635082 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1423,9 +1423,9 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
}
-static unsigned int meye_poll(struct file *file, poll_table *wait)
+static __poll_t meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 82d2a24644e4..0ceaa3473cf2 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1227,11 +1227,11 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
- unsigned int rc = v4l2_ctrl_poll(file, wait);
+ __poll_t rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index f21c245a54f7..e7b31a5b14fd 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -909,13 +909,13 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = v4l2_ctrl_poll(file, wait);
+ __poll_t mask = v4l2_ctrl_poll(file, wait);
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 9255d7d23947..6f97c8f2e00d 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -614,11 +614,11 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = 0;
+ __poll_t mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 2aa4ba675194..4d10e2f979d2 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -937,11 +937,11 @@ static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event
* DVB device file operations
******************************************************************************/
-static unsigned int dvb_video_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
@@ -989,11 +989,11 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
-static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 1fe49171d823..96ca227cf51b 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -223,13 +223,13 @@ static int dvb_ca_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
+static __poll_t dvb_ca_poll (struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(8, "av7110:%p\n",av7110);
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d07840072337..b6a6c4f171d0 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2501,13 +2501,13 @@ static int zoran_s_jpegcomp(struct file *file, void *__fh,
return res;
}
-static unsigned int
+static __poll_t
zoran_poll (struct file *file,
poll_table *wait)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
- int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
int frame;
unsigned long flags;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7b3f6f8e3dc8..cf65b39807fe 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -730,7 +730,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll: It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 2a2994ef15d5..b2dc524112f7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -707,12 +707,12 @@ static int gsc_m2m_release(struct file *file)
return 0;
}
-static unsigned int gsc_m2m_poll(struct file *file,
+static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
- unsigned int ret;
+ __poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index dba21215dc84..de285a269390 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1263,13 +1263,13 @@ static ssize_t viu_read(struct file *file, char __user *data, size_t count,
return 0;
}
-static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
{
struct viu_fh *fh = file->private_data;
struct videobuf_queue *q = &fh->vb_vidq;
struct viu_dev *dev = fh->dev;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c8a12493f395..945ef1e2ccc7 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -950,11 +950,11 @@ static int deinterlace_release(struct file *file)
return 0;
}
-static unsigned int deinterlace_poll(struct file *file,
+static __poll_t deinterlace_poll(struct file *file,
struct poll_table_struct *wait)
{
struct deinterlace_ctx *ctx = file->private_data;
- int ret;
+ __poll_t ret;
deinterlace_lock(ctx);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4a2b1afa19c4..5a8eff60e95f 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -838,12 +838,12 @@ static int emmaprp_release(struct file *file)
return 0;
}
-static unsigned int emmaprp_poll(struct file *file,
+static __poll_t emmaprp_poll(struct file *file,
struct poll_table_struct *wait)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
struct emmaprp_ctx *ctx = file->private_data;
- unsigned int res;
+ __poll_t res;
mutex_lock(&pcdev->dev_mutex);
res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6f1b0c799e58..abb14ee20538 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -839,7 +839,7 @@ static void omap_vout_buffer_release(struct videobuf_queue *q,
/*
* File operations
*/
-static unsigned int omap_vout_poll(struct file *file,
+static __poll_t omap_vout_poll(struct file *file,
struct poll_table_struct *wait)
{
struct omap_vout_device *vout = file->private_data;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 218e6d7ae93a..a751c89a3ea8 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1383,11 +1383,11 @@ static int isp_video_release(struct file *file)
return 0;
}
-static unsigned int isp_video_poll(struct file *file, poll_table *wait)
+static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
struct isp_video *video = video_drvdata(file);
- int ret;
+ __poll_t ret;
mutex_lock(&video->queue_lock);
ret = vb2_poll(&vfh->queue, file, wait);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 25c7a7d42292..437395a61065 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -590,12 +590,12 @@ static int s3c_camif_close(struct file *file)
return ret;
}
-static unsigned int s3c_camif_poll(struct file *file,
+static __poll_t s3c_camif_poll(struct file *file,
struct poll_table_struct *wait)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
- int ret;
+ __poll_t ret;
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index bc68dbbcaec1..fe94bd6b705e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -988,14 +988,14 @@ static int s5p_mfc_release(struct file *file)
}
/* Poll */
-static unsigned int s5p_mfc_poll(struct file *file,
+static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
mutex_lock(&dev->mfc_mutex);
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index dedc1b024f6f..976ea0bb5b6c 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1016,7 +1016,7 @@ static int sh_veu_release(struct file *file)
return 0;
}
-static unsigned int sh_veu_poll(struct file *file,
+static __poll_t sh_veu_poll(struct file *file,
struct poll_table_struct *wait)
{
struct sh_veu_file *veu_file = file->private_data;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 36762ec954e7..9b069783e3ed 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1553,7 +1553,7 @@ static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
return ret;
}
-static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 916ff68b73d4..d964c072832c 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -805,11 +805,11 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
return err;
}
-static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+static __poll_t soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- unsigned res = POLLERR;
+ __poll_t res = POLLERR;
if (icd->streamer != file)
return POLLERR;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 805d4a8fc17e..f77be9302120 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -764,7 +764,7 @@ out_unlock:
}
-static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
{
struct via_camera *cam = video_drvdata(filp);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5f316a5e38db..c80239592088 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -416,7 +416,7 @@ static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
return vivid_radio_tx_write(file, buf, size, offset);
}
-static unsigned int vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
index 47c36c26096b..71f3ebb7aecf 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.c
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -141,7 +141,7 @@ retry:
return i;
}
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
index 1077d8f061eb..2b33edb60942 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.h
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_RX_H_
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
index 0e8025b7b4dd..f0917f4e7d8c 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.c
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -105,7 +105,7 @@ retry:
return i;
}
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
index 7f8ff7547119..3c3343d70cbc 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.h
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_TX_H_
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 7575e5370a49..dba611003a00 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -481,11 +481,11 @@ static int cadet_release(struct file *file)
return 0;
}
-static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 540ac887a63c..49293dd707b9 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1153,12 +1153,12 @@ static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf,
return rval;
}
-static unsigned int si476x_radio_fops_poll(struct file *file,
+static __poll_t si476x_radio_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si476x_radio *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- unsigned int err = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t err = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
if (atomic_read(&radio->core->is_alive))
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 3cbdc085c65d..f92b0f9241a9 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1089,7 +1089,7 @@ out:
return r;
}
-static unsigned int wl1273_fm_fops_poll(struct file *file,
+static __poll_t wl1273_fm_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index c89a7d5b8c55..68fe9e5c7a70 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -507,12 +507,12 @@ done:
/*
* si470x_fops_poll - poll RDS data
*/
-static unsigned int si470x_fops_poll(struct file *file,
+static __poll_t si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- int retval = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t retval = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
/* switch on rds reception */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fc5a7abc83d2..fd603c1b96bb 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -102,7 +102,7 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
return sizeof(rds);
}
-static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
{
int ret;
struct fmdev *fmdev;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index e16d1138ca48..aab53641838b 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -272,10 +272,10 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(lirc_dev_fop_close);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
struct lirc_dev *d = file->private_data;
- unsigned int ret;
+ __poll_t ret;
if (!d->attached)
return POLLHUP | POLLERR;
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index 81f72c0b561f..ab238ac8bfc0 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -444,7 +444,7 @@ int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock);
-unsigned int cpia2_poll(struct camera_data *cam,
+__poll_t cpia2_poll(struct camera_data *cam,
struct file *filp, poll_table *wait);
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 0efba0da0a45..e7524920c618 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -2370,10 +2370,10 @@ long cpia2_read(struct camera_data *cam,
* cpia2_poll
*
*****************************************************************************/
-unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
+__poll_t cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status = v4l2_ctrl_poll(filp, wait);
+ __poll_t status = v4l2_ctrl_poll(filp, wait);
if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
!cam->streaming) {
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 3dedd83f0b19..74c97565ccc6 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -169,10 +169,10 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
* cpia2_v4l_poll
*
*****************************************************************************/
-static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- unsigned int res;
+ __poll_t res;
mutex_lock(&cam->v4l2_lock);
res = cpia2_poll(cam, filp, wait);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index d538fa407742..103e3299b77f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1812,13 +1812,13 @@ static ssize_t mpeg_read(struct file *file, char __user *data,
file->f_flags & O_NONBLOCK);
}
-static unsigned int mpeg_poll(struct file *file,
+static __poll_t mpeg_poll(struct file *file,
struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res |= POLLPRI;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 226059fc672b..d7b2e694bbb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2006,12 +2006,12 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
+static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
- unsigned res = 0;
+ __poll_t res = 0;
int rc;
rc = check_dev(dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 961343873fd0..b72d02e225fd 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1862,11 +1862,11 @@ out:
return ret;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- int ret = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t ret = 0;
PDEBUG(D_FRAM, "poll");
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7fb036d6a86e..d0d638c2e900 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -521,12 +521,12 @@ err:
return ret;
}
-static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
+static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct hdpvr_buffer *buf = NULL;
struct hdpvr_device *dev = video_drvdata(filp);
- unsigned int mask = v4l2_ctrl_poll(filp, wait);
+ __poll_t mask = v4l2_ctrl_poll(filp, wait);
if (!(req_events & (POLLIN | POLLRDNORM)))
return mask;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 4320bda9352d..11cdfe356801 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1190,9 +1190,9 @@ static ssize_t pvr2_v4l2_read(struct file *file,
}
-static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct pvr2_v4l2_fh *fh = file->private_data;
int ret;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c0bba773db25..cba0916d33e5 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -721,10 +721,10 @@ static ssize_t v4l_stk_read(struct file *fp, char __user *buf,
return ret;
}
-static unsigned int v4l_stk_poll(struct file *fp, poll_table *wait)
+static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait)
{
struct stk_camera *dev = video_drvdata(fp);
- unsigned res = v4l2_ctrl_poll(fp, wait);
+ __poll_t res = v4l2_ctrl_poll(fp, wait);
poll_wait(fp, &dev->wait_frame, wait);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 9fa25de6b5a9..317bf5a3933e 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1423,13 +1423,13 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
return 0;
}
-static unsigned int
+static __poll_t
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
- int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
@@ -1457,11 +1457,11 @@ __tm6000_poll(struct file *file, struct poll_table_struct *wait)
return res;
}
-static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
struct tm6000_fh *fh = file->private_data;
struct tm6000_core *dev = fh->dev;
- unsigned int res;
+ __poll_t res;
mutex_lock(&dev->lock);
res = __tm6000_poll(file, wait);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index c8d78b2f3de4..692c463f14f7 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -340,10 +340,10 @@ unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
}
#endif
-unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
- unsigned int ret;
+ __poll_t ret;
mutex_lock(&queue->mutex);
ret = vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 3e7e283a44a8..381f614b2f4c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_mapping32 *p = (void *)kp;
+ compat_caddr_t info;
+ u32 count;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
- __get_user(kp->menu_count, &up->menu_count))
+ if (copy_from_user(p, up, sizeof(*p)))
return -EFAULT;
- memset(kp->reserved, 0, sizeof(kp->reserved));
-
- if (kp->menu_count == 0) {
- kp->menu_info = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->menu_info))
- return -EFAULT;
- kp->menu_info = compat_ptr(p);
+ count = p->menu_count;
+ info = p->menu_info;
+ memset(kp->reserved, 0, sizeof(kp->reserved));
+ kp->menu_info = count ? compat_ptr(info) : NULL;
+ kp->menu_count = count;
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
- __put_user(kp->menu_count, &up->menu_count))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
+ put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- if (__clear_user(up->reserved, sizeof(up->reserved)))
+ if (clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
@@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_query32 v;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), data)))
+ if (copy_from_user(&v, up, sizeof(v)))
return -EFAULT;
- if (kp->size == 0) {
- kp->data = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->data))
- return -EFAULT;
- kp->data = compat_ptr(p);
-
+ *kp = (struct uvc_xu_control_query){
+ .unit = v.unit,
+ .selector = v.selector,
+ .query = v.query,
+ .size = v.size,
+ .data = v.size ? compat_ptr(v.data) : NULL
+ };
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), data)))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
-
return 0;
}
@@ -1423,7 +1412,7 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvc_queue_mmap(&stream->queue, vma);
}
-static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 05398784d1c8..9b44a7cd5ec1 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -678,7 +678,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
extern int uvc_queue_mmap(struct uvc_video_queue *queue,
struct vm_area_struct *vma);
-extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+extern __poll_t uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
extern unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1d888661fd03..8b7c19943d46 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1287,12 +1287,12 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static unsigned int zr364xx_poll(struct file *file,
+static __poll_t zr364xx_poll(struct file *file,
struct poll_table_struct *wait)
{
struct zr364xx_camera *cam = video_drvdata(file);
struct videobuf_queue *q = &cam->vb_vidq;
- unsigned res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
_DBG("%s\n", __func__);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index cbb2ef43945f..b07657149434 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3457,7 +3457,7 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c647ba648805..8ad8c1627768 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -331,10 +331,10 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- unsigned int res = POLLERR | POLLHUP;
+ __poll_t res = POLLERR | POLLHUP;
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index bc580fbe18fa..186156f8952a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -500,14 +500,14 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
@@ -794,11 +794,11 @@ int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
struct v4l2_fh *fh = file->private_data;
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
- unsigned int ret;
+ __poll_t ret;
if (m2m_ctx->q_lock)
mutex_lock(m2m_ctx->q_lock);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 43fefa73e0a3..28966fa8c610 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -469,7 +469,7 @@ static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int subdev_poll(struct file *file, poll_table *wait)
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index e87fb13b22dc..9a89d3ae170f 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1118,13 +1118,13 @@ done:
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
videobuf_queue_lock(q);
if (q->streaming) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a8589d96ef72..0d9f772d6d03 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2018,10 +2018,10 @@ void vb2_core_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
unsigned long flags;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 4075314a6989..a49f7eb98c2e 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -671,11 +671,11 @@ void vb2_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = 0;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
@@ -904,12 +904,12 @@ exit:
}
EXPORT_SYMBOL_GPL(vb2_fop_read);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *q = vdev->queue;
struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned res;
+ __poll_t res;
void *fileio;
/*
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..90a66b3f7ae1 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/mach-types.h>
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
}
EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
int gpmc_get_client_irq(unsigned irq_config)
{
if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_ONENAND)
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- struct omap_onenand_platform_data *gpmc_onenand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
- child);
- return -ENODEV;
- }
-
- gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
- GFP_KERNEL);
- if (!gpmc_onenand_data)
- return -ENOMEM;
-
- gpmc_onenand_data->cs = val;
- gpmc_onenand_data->of_node = child;
- gpmc_onenand_data->dma_channel = -1;
-
- if (!of_property_read_u32(child, "dma-channel", &val))
- gpmc_onenand_data->dma_channel = val;
-
- return gpmc_onenand_init(gpmc_onenand_data);
-}
-#else
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
/**
* gpmc_probe_generic_child - configures the gpmc for a child device
* @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
+ if (of_node_cmp(child->name, "onenand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
if (of_device_is_compatible(child, "ti,omap2-nand")) {
/* NAND specific setup */
val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
} else {
ret = of_property_read_u32(child, "bank-width",
&gpmc_s.device_width);
- if (ret < 0) {
- dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n",
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
child);
goto err;
}
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
if (!child->name)
continue;
- if (of_node_cmp(child->name, "onenand") == 0)
- ret = gpmc_probe_onenand_child(pdev, child);
- else
- ret = gpmc_probe_generic_child(pdev, child);
-
+ ret = gpmc_probe_generic_child(pdev, child);
if (ret) {
dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
child->name, ret);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 7310e32b5991..aa2b0786bbe9 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -45,7 +45,7 @@ config MEMSTICK_R592
config MEMSTICK_REALTEK_PCI
tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support Memstick card interface
of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support Memstick card interface
of Realtek RTS5129/39 series USB card reader
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 818fa94354ae..a44b4578ba4d 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_ms {
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 2e3cf012ef48..4f64563df7de 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -25,7 +25,7 @@
#include <linux/workqueue.h>
#include <linux/memstick.h>
#include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/sched.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7a93400eea2a..51eb1b027963 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
@@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
@@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
- pChain->NextChainOffset = next;
- pChain->Address = cpu_to_le32(dma_addr);
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+ u32 tmp = dma_addr & 0xFFFFFFFF;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
- MPI_SGE_FLAGS_64_BIT_ADDRESSING);
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
- pChain->NextChainOffset = next;
+ pChain->NextChainOffset = next;
- pChain->Address.Low = cpu_to_le32(tmp);
- tmp = (u32)(upper_32_bits(dma_addr));
- pChain->Address.High = cpu_to_le32(tmp);
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits(dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
-return 0;
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
device_state);
/* put ioc into READY_STATE */
- if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+ if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
@@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
- int in_isr;
+ int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
@@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
break;
}
if (ds)
- strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
+ strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
-union loginfo_type {
- u32 loginfo;
- struct {
- u32 subcode:16;
- u32 code:8;
- u32 originator:4;
- u32 bus_type:4;
- }dw;
-};
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7b3b41368931..8d12017b9893 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
else
karg.host_no = -1;
- /* Reformat the fw_version into a string
- */
- karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
- ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
- karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
- karg.fw_version[2] = '.';
- karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
- ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
- karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
- karg.fw_version[5] = '.';
- karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
- ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
- karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
- karg.fw_version[8] = '.';
- karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
- ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
- karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
- karg.fw_version[11] = '\0';
+ /* Reformat the fw_version into a string */
+ snprintf(karg.fw_version, sizeof(karg.fw_version),
+ "%.2hhu.%.2hhu.%.2hhu.%.2hhu",
+ ioc->facts.FWVersion.Struct.Major,
+ ioc->facts.FWVersion.Struct.Minor,
+ ioc->facts.FWVersion.Struct.Unit,
+ ioc->facts.FWVersion.Struct.Dev);
/* Issue a config request to get the device serial number
*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 345f6035599e..439ee9c5f535 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
* issue target reset to next device in the queue
*/
- head = &hd->target_reset_list;
if (list_empty(head))
return;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1d20a800e967..b860eb5aa194 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config MFD_CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ select CROS_EC_CTL
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON)
+ depends on (ARM || HEXAGON || COMPILE_TEST)
select IRQ_DOMAIN
select MFD_CORE
select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
-config MFD_RTSX_PCI
- tristate "Realtek PCI-E card reader"
- depends on PCI
- select MFD_CORE
- help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick, Memory Stick Pro, Secure Digital and
- MultiMediaCard.
-
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
-config MFD_RTSX_USB
- tristate "Realtek USB card reader"
- depends on USB
- select MFD_CORE
- help
- Select this option to get support for Realtek USB 2.0 card readers
- including RTS5129, RTS5139, RTS5179 and RTS5170.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
System Registers are the platform configuration block
on the ARM Ltd. Versatile Express board.
+config RAVE_SP_CORE
+ tristate "RAVE SP MCU core driver"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ Select this to get support for the Supervisory Processor
+ device found on several devices in RAVE line of hardware.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d9474ade32e6..d9d2cf0d32ef 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c815241e02..1afa27de7191 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
return 0;
}
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
return ab8500_registers_print(dev, bank, s);
}
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
num_interrupts[line]++;
}
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
/*
* - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
#define AB8500_LAST_SIM_REG 0x8B
#define AB8505_LAST_SIM_REG 0x8C
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
err = abx500_get_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
/* Config 1 will allow APE side to read SIM registers */
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
AB8500_SUPPLY_CONTROL_CONFIG_1);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
err = abx500_get_register_interruptible(dev,
bank, reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
+
return 0;
-}
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_modem_registers,
- inode->i_private);
+report_read_failure:
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+
+report_write_failure:
+ dev_err(dev, "ab->write fail %d\n", err);
+ return err;
}
-static const struct file_operations ab8500_modem_fops = {
- .open = ab8500_modem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bat_ctrl_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
- .open = ab8500_gpadc_bat_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
{
int btemp_ball_raw;
int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_btemp_ball_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
- .open = ab8500_gpadc_btemp_ball_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
{
int main_charger_v_raw;
int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
- .open = ab8500_gpadc_main_charger_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
{
int acc_detect1_raw;
int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect1_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
- .open = ab8500_gpadc_acc_detect1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
{
int acc_detect2_raw;
int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect2_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
- .open = ab8500_gpadc_acc_detect2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
{
int aux1_raw;
int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-static const struct file_operations ab8500_gpadc_aux1_fops = {
- .open = ab8500_gpadc_aux1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
{
int aux2_raw;
int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
- .open = ab8500_gpadc_aux2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
{
int main_bat_v_raw;
int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_bat_v_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
- .open = ab8500_gpadc_main_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
{
int vbus_v_raw;
int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
- .open = ab8500_gpadc_vbus_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
{
int main_charger_c_raw;
int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
- .open = ab8500_gpadc_main_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
{
int usb_charger_c_raw;
int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
- .open = ab8500_gpadc_usb_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
{
int bk_bat_v_raw;
int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bk_bat_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
- .open = ab8500_gpadc_bk_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
{
int die_temp_raw;
int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_die_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
- .open = ab8500_gpadc_die_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
{
int usb_id_raw;
int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
- .open = ab8500_gpadc_usb_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
{
int xtal_temp_raw;
int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_xtal_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
- .open = ab8540_gpadc_xtal_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
- .open = ab8540_gpadc_vbat_true_meas_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
- .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_meas_raw;
int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
- void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
{
int bat_temp_raw;
int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
- .open = ab8540_gpadc_bat_temp_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
{
struct ab8500_gpadc *gpadc;
u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
- .open = ab8540_gpadc_otp_cal_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
-static const struct file_operations ab8500_interrupts_fops = {
- .open = ab8500_interrupts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_registers_fops);
+ &plf->dev, &ab8500_bank_registers_fops);
if (!file)
goto err;
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index 064bde9cff5a..f684a93a3340 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -39,34 +39,43 @@
#define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) & \
FLEX_MR_OPMODE_MASK)
+struct atmel_flexcom {
+ void __iomem *base;
+ u32 opmode;
+ struct clk *clk;
+};
static int atmel_flexcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk *clk;
struct resource *res;
- void __iomem *base;
- u32 opmode;
+ struct atmel_flexcom *ddata;
int err;
- err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
if (err)
return err;
- if (opmode < ATMEL_FLEXCOM_MODE_USART ||
- opmode > ATMEL_FLEXCOM_MODE_TWI)
+ if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+ ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return PTR_ERR(ddata->clk);
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(ddata->clk);
if (err)
return err;
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
* inaccessible and are read as zero. Also the external I/O lines of the
* Flexcom are muxed to reach the selected device.
*/
- writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+ writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(ddata->clk);
return devm_of_platform_populate(&pdev->dev);
}
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+ struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = clk_prepare_enable(ddata->clk);
+ if (err)
+ return err;
+
+ val = FLEX_MR_OPMODE(ddata->opmode),
+ writel(val, ddata->base + FLEX_MR);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+ atmel_flexcom_resume);
+
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
+ .pm = &atmel_flexcom_pm_ops,
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 2468b431bb22..e94c72c2faa2 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index b0ca5a4c841e..d61024141e2b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
};
static const struct mfd_cell ec_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &ec_p,
.pdata_size = sizeof(ec_p),
};
static const struct mfd_cell ec_pd_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &pd_p,
.pdata_size = sizeof(pd_p),
};
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index cf6c4f0846b8..e4fafdd96e5e 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -25,9 +25,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "cros_ec_debugfs.h"
#include "cros_ec_dev.h"
+#define DRV_NAME "cros-ec-dev"
+
/* Device variables */
#define CROS_MAX_DEV 128
static int ec_major;
@@ -461,7 +462,7 @@ static int ec_device_remove(struct platform_device *pdev)
}
static const struct platform_device_id cros_ec_id[] = {
- { "cros-ec-ctl", 0 },
+ { DRV_NAME, 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, cros_ec_id);
@@ -493,7 +494,7 @@ static const struct dev_pm_ops cros_ec_dev_pm_ops = {
static struct platform_driver cros_ec_dev_driver = {
.driver = {
- .name = "cros-ec-ctl",
+ .name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
.probe = ec_device_probe,
@@ -544,6 +545,7 @@ static void __exit cros_ec_dev_exit(void)
module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
MODULE_VERSION("1.0");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453608c5..45e9453608c5 100644
--- a/drivers/platform/chrome/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 59c82cdcf48d..1b52b8557034 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -72,8 +72,7 @@
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- * if no record
+ * @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -379,18 +378,15 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 sum;
u8 rx_byte;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -509,18 +505,15 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
u8 rx_byte;
int sum;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e0ab9bb1530..9e545eb6e8b4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
if (ret)
goto err_remove_ltr;
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
return 0;
err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
static int resume_lpss_device(struct device *dev, void *data)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 36adf9e8153e..274306d98ac1 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -16,7 +16,6 @@
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 55d824b3a808..390b27cb2c2e 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
return -EINVAL;
pld->io_base = devm_ioport_map(dev, ioport->start,
- ioport->end - ioport->start);
+ resource_size(ioport));
if (!pld->io_base)
return -ENOMEM;
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index cf1120abbf52..53dc1a43472c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->end = res->start + SPIBASE_APL_SZ - 1;
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
info->writeable = !!(bcr & BCR_WPD);
}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index dc5caeaaa6a1..da9612dbb222 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 3922a93f9f92..663a2398b6b1 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ u8 powerhold_mask;
struct device_node *np = palmas_dev->dev->of_node;
if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
PALMAS_PRIMARY_SECONDARY_PAD2);
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+ if (of_device_is_compatible(np, "ti,tps65917"))
+ powerhold_mask =
+ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+ else
+ powerhold_mask =
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ powerhold_mask, 0);
if (ret)
dev_err(palmas_dev->dev,
"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6155d123a84e..f952dff6765f 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
- dev_err(pcf->dev, "Falied to allocate %s\n", name);
+ dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644
index 000000000000..5c858e784a89
--- /dev/null
+++ b/drivers/mfd/rave-sp.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ * - message to MCU => ACK response
+ * - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX 0x02
+#define RAVE_SP_ETX 0x03
+#define RAVE_SP_DLE 0x10
+
+#define RAVE_SP_MAX_DATA_SIZE 64
+#define RAVE_SP_CHECKSUM_SIZE 2 /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE \
+ (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE 2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE \
+ (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET 0
+#define RAVE_SP_BOOT_SOURCE_SET 1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
+
+#define RAVE_SP_BOOT_SOURCE_SD 0
+#define RAVE_SP_BOOT_SOURCE_EMMC 1
+#define RAVE_SP_BOOT_SOURCE_NOR 2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF: Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA: Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+ RAVE_SP_EXPECT_SOF,
+ RAVE_SP_EXPECT_DATA,
+ RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state: Current state of the deframer
+ * @data: Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+ enum rave_sp_deframer_state state;
+ unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+ size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length: Expected reply length
+ * @data: Buffer to store reply payload in
+ * @code: Expected reply code
+ * @ackid: Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+ size_t length;
+ void *data;
+ u8 code;
+ u8 ackid;
+ struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length: Caculated checksum length
+ * @subroutine: Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+ size_t length;
+ void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+ int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum: Variant specific checksum implementation
+ * @cmd: Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+ const struct rave_sp_checksum *checksum;
+ struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev: Pointer to underlying serdev
+ * @deframer: Stored state of the protocol deframer
+ * @ackid: ACK ID used in last reply sent to the device
+ * @bus_lock: Lock to serialize access to the device
+ * @reply_lock: Lock protecting @reply
+ * @reply: Pointer to memory to store reply payload
+ *
+ * @variant: Device variant specific information
+ * @event_notifier_list: Input event notification chain
+ *
+ */
+struct rave_sp {
+ struct serdev_device *serdev;
+ struct rave_sp_deframer deframer;
+ atomic_t ackid;
+ struct mutex bus_lock;
+ struct mutex reply_lock;
+ struct rave_sp_reply *reply;
+
+ const struct rave_sp_variant *variant;
+ struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+ return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block *nb = *(struct notifier_block **)res;
+ struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+ WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block **rcnb;
+ int ret;
+
+ rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+ sizeof(*rcnb), GFP_KERNEL);
+ if (!rcnb)
+ return -ENOMEM;
+
+ ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+ if (!ret) {
+ *rcnb = nb;
+ devres_add(dev, rcnb);
+ } else {
+ devres_free(rcnb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+ *crc = *buf++;
+ size--;
+
+ while (size--)
+ *crc += *buf++;
+
+ *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+ const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+ /*
+ * While the rest of the wire protocol is little-endian,
+ * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+ */
+ put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+ while (n--) {
+ const unsigned char byte = *src++;
+
+ switch (byte) {
+ case RAVE_SP_STX:
+ case RAVE_SP_ETX:
+ case RAVE_SP_DLE:
+ *dest++ = RAVE_SP_DLE;
+ /* FALLTHROUGH */
+ default:
+ *dest++ = byte;
+ }
+ }
+
+ return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+ unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+ unsigned char *dest = frame;
+ size_t length;
+
+ if (WARN_ON(checksum_length > sizeof(crc)))
+ return -ENOMEM;
+
+ if (WARN_ON(data_size > sizeof(frame)))
+ return -ENOMEM;
+
+ sp->variant->checksum->subroutine(data, data_size, crc);
+
+ *dest++ = RAVE_SP_STX;
+ dest = stuff(dest, data, data_size);
+ dest = stuff(dest, crc, checksum_length);
+ *dest++ = RAVE_SP_ETX;
+
+ length = dest - frame;
+
+ print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+ 16, 1, frame, length, false);
+
+ return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+ /*
+ * There isn't a single rule that describes command code ->
+ * ACK code transformation, but, going through various
+ * versions of ICDs, there appear to be three distinct groups
+ * that can be described by simple transformation.
+ */
+ switch (command) {
+ case 0xA0 ... 0xBE:
+ /*
+ * Commands implemented by firmware found in RDU1 and
+ * older devices all seem to obey the following rule
+ */
+ return command + 0x20;
+ case 0xE0 ... 0xEF:
+ /*
+ * Events emitted by all versions of the firmare use
+ * least significant bit to get an ACK code
+ */
+ return command | 0x01;
+ default:
+ /*
+ * Commands implemented by firmware found in RDU2 are
+ * similar to "old" commands, but they use slightly
+ * different offset
+ */
+ return command + 0x40;
+ }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size)
+{
+ struct rave_sp_reply reply = {
+ .data = reply_data,
+ .length = reply_data_size,
+ .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+ };
+ unsigned char *data = __data;
+ int command, ret = 0;
+ u8 ackid;
+
+ command = sp->variant->cmd.translate(data[0]);
+ if (command < 0)
+ return command;
+
+ ackid = atomic_inc_return(&sp->ackid);
+ reply.ackid = ackid;
+ reply.code = rave_sp_reply_code((u8)command),
+
+ mutex_lock(&sp->bus_lock);
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = &reply;
+ mutex_unlock(&sp->reply_lock);
+
+ data[0] = command;
+ data[1] = ackid;
+
+ rave_sp_write(sp, data, data_size);
+
+ if (!wait_for_completion_timeout(&reply.received, HZ)) {
+ dev_err(&sp->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = NULL;
+ mutex_unlock(&sp->reply_lock);
+ }
+
+ mutex_unlock(&sp->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ u8 cmd[] = {
+ [0] = rave_sp_reply_code(data[0]),
+ [1] = data[1],
+ };
+
+ rave_sp_write(sp, cmd, sizeof(cmd));
+
+ blocking_notifier_call_chain(&sp->event_notifier_list,
+ rave_sp_action_pack(data[0], data[2]),
+ NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ struct device *dev = &sp->serdev->dev;
+ struct rave_sp_reply *reply;
+ const size_t payload_length = length - 2;
+
+ mutex_lock(&sp->reply_lock);
+ reply = sp->reply;
+
+ if (reply) {
+ if (reply->code == data[0] && reply->ackid == data[1] &&
+ payload_length >= reply->length) {
+ /*
+ * We are relying on memcpy(dst, src, 0) to be a no-op
+ * when handling commands that have a no-payload reply
+ */
+ memcpy(reply->data, &data[2], reply->length);
+ complete(&reply->received);
+ sp->reply = NULL;
+ } else {
+ dev_err(dev, "Ignoring incorrect reply\n");
+ dev_dbg(dev, "Code: expected = 0x%08x received = 0x%08x\n",
+ reply->code, data[0]);
+ dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+ reply->ackid, data[1]);
+ dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+ reply->length, payload_length);
+ }
+ }
+
+ mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+ const unsigned char *data,
+ size_t length)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ const size_t payload_length = length - checksum_length;
+ const u8 *crc_reported = &data[payload_length];
+ struct device *dev = &sp->serdev->dev;
+ u8 crc_calculated[checksum_length];
+
+ print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+ 16, 1, data, length, false);
+
+ if (unlikely(length <= checksum_length)) {
+ dev_warn(dev, "Dropping short frame\n");
+ return;
+ }
+
+ sp->variant->checksum->subroutine(data, payload_length,
+ crc_calculated);
+
+ if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+ dev_warn(dev, "Dropping bad frame\n");
+ return;
+ }
+
+ if (rave_sp_id_is_event(data[0]))
+ rave_sp_receive_event(sp, data, length);
+ else
+ rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev);
+ struct rave_sp_deframer *deframer = &sp->deframer;
+ const unsigned char *src = buf;
+ const unsigned char *end = buf + size;
+
+ while (src < end) {
+ const unsigned char byte = *src++;
+
+ switch (deframer->state) {
+ case RAVE_SP_EXPECT_SOF:
+ if (byte == RAVE_SP_STX)
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+
+ case RAVE_SP_EXPECT_DATA:
+ /*
+ * Treat special byte values first
+ */
+ switch (byte) {
+ case RAVE_SP_ETX:
+ rave_sp_receive_frame(sp,
+ deframer->data,
+ deframer->length);
+ /*
+ * Once we extracted a complete frame
+ * out of a stream, we call it done
+ * and proceed to bailing out while
+ * resetting the framer to initial
+ * state, regardless if we've consumed
+ * all of the stream or not.
+ */
+ goto reset_framer;
+ case RAVE_SP_STX:
+ dev_warn(dev, "Bad frame: STX before ETX\n");
+ /*
+ * If we encounter second "start of
+ * the frame" marker before seeing
+ * corresponding "end of frame", we
+ * reset the framer and ignore both:
+ * frame started by first SOF and
+ * frame started by current SOF.
+ *
+ * NOTE: The above means that only the
+ * frame started by third SOF, sent
+ * after this one will have a chance
+ * to get throught.
+ */
+ goto reset_framer;
+ case RAVE_SP_DLE:
+ deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+ /*
+ * If we encounter escape sequence we
+ * need to skip it and collect the
+ * byte that follows. We do it by
+ * forcing the next iteration of the
+ * encompassing while loop.
+ */
+ continue;
+ }
+ /*
+ * For the rest of the bytes, that are not
+ * speical snoflakes, we do the same thing
+ * that we do to escaped data - collect it in
+ * deframer buffer
+ */
+
+ /* FALLTHROUGH */
+
+ case RAVE_SP_EXPECT_ESCAPED_DATA:
+ deframer->data[deframer->length++] = byte;
+
+ if (deframer->length == sizeof(deframer->data)) {
+ dev_warn(dev, "Bad frame: Too long\n");
+ /*
+ * If the amount of data we've
+ * accumulated for current frame so
+ * far starts to exceed the capacity
+ * of deframer's buffer, there's
+ * nothing else we can do but to
+ * discard that data and start
+ * assemblying a new frame again
+ */
+ goto reset_framer;
+ }
+
+ /*
+ * We've extracted out special byte, now we
+ * can go back to regular data collecting
+ */
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is throught consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+
+reset_framer:
+ /*
+ * NOTE: A number of codepaths that will drop us here will do
+ * so before consuming all 'size' bytes of the data passed by
+ * serdev layer. We rely on the fact that serdev layer will
+ * re-execute this handler with the remainder of the Rx bytes
+ * once we report actual number of bytes that we processed.
+ */
+ deframer->state = RAVE_SP_EXPECT_SOF;
+ deframer->length = 0;
+
+ return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_STATUS &&
+ command <= RAVE_SP_CMD_CONTROL_EVENTS)
+ return command;
+
+ return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+ command <= RAVE_SP_CMD_GET_GPIO_STATE)
+ return command;
+
+ if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+ /*
+ * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+ * different from that for RDU1 and it is set to 0x28.
+ */
+ return 0x28;
+ }
+
+ return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+ /*
+ * All of the following command codes were taken from "Table :
+ * Communications Protocol Message Types" in section 3.3
+ * "MESSAGE TYPES" of Rave PIC24 ICD.
+ */
+ switch (command) {
+ case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+ return 0x11;
+ case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+ return 0x12;
+ case RAVE_SP_CMD_BOOT_SOURCE:
+ return 0x14;
+ case RAVE_SP_CMD_SW_WDT:
+ return 0x1C;
+ case RAVE_SP_CMD_RESET:
+ return 0x1E;
+ case RAVE_SP_CMD_RESET_REASON:
+ return 0x1F;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+ .length = 1,
+ .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+ .length = 2,
+ .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_default_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_rdu1_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+ .checksum = &rave_sp_checksum_ccitt,
+ .cmd = {
+ .translate = rave_sp_rdu2_cmd_translate,
+ },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+ { .compatible = "zii,rave-sp-niu", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-esb", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1 },
+ { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2 },
+ { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+ .receive_buf = rave_sp_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp;
+ u32 baud;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+ dev_err(dev,
+ "'current-speed' is not specified in device node\n");
+ return -EINVAL;
+ }
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->serdev = serdev;
+ dev_set_drvdata(dev, sp);
+
+ sp->variant = of_device_get_match_data(dev);
+ if (!sp->variant)
+ return -ENODEV;
+
+ mutex_init(&sp->bus_lock);
+ mutex_init(&sp->reply_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+ serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, baud);
+
+ return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+ .probe = rave_sp_probe,
+ .driver = {
+ .name = "rave-sp",
+ .of_match_table = rave_sp_dt_ids,
+ },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 075330a25f61..a00f99f36559 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-lptimer.h>
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a6675a449409..1d347e5dfa79 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-timers.h>
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b93fe4c4957a..7eaa40bc703f 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -13,6 +13,7 @@
*/
#include <linux/err.h>
+#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -87,6 +88,24 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (ret)
reg_io_width = 4;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ syscon_config.use_hwlock = true;
+ syscon_config.hwlock_id = ret;
+ syscon_config.hwlock_mode = HWLOCK_IRQSTATE;
+ } else if (ret < 0) {
+ switch (ret) {
+ case -ENOENT:
+ /* Ignore missing hwlock, it's optional. */
+ break;
+ default:
+ pr_err("Failed to retrieve valid hwlock: %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto err_regmap;
+ }
+ }
+
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0f3fab47fe48..3cd958a31f36 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -124,7 +124,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct ti_tscadc_dev *tscadc;
struct resource *res;
struct clk *clk;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node;
struct mfd_cell *cell;
struct property *prop;
const __be32 *cur;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index 83af78c1b0eb..ebf54cc28f7a 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -9,6 +9,26 @@
#include <linux/export.h>
#include <linux/mfd/tmio.h>
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
{
/* Enable the MMC/SD Control registers */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b20df970cbca..6722073e339b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config MISC_RTSX
+ tristate
+ default MISC_RTSX_PCI || MISC_RTSX_USB
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5ca5f64df478..8d8cc096063b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644
index 000000000000..69e815e32a8c
--- /dev/null
+++ b/drivers/misc/cardreader/Kconfig
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+ Realtek card readers support access to many types of memory cards,
+ such as Memory Stick, Memory Stick Pro, Secure Digital and
+ MultiMediaCard.
+
+config MISC_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644
index 000000000000..9fabfcc6fa7a
--- /dev/null
+++ b/drivers/misc/cardreader/Makefile
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/mfd/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index b3ae6592014a..434fd070d3e3 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5209.c b/drivers/misc/cardreader/rts5209.c
index b95beecf767f..ce68c48d8ec9 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5227.c b/drivers/misc/cardreader/rts5227.c
index ff296a4bf3d2..024dcba8d6c8 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9ed9dc84eac8..9119261337cc 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 7fcf37ba922c..dbe013abdb83 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
@@ -738,4 +738,3 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
}
-
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644
index 000000000000..07cb93abf685
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.c
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Steven FENG <steven_feng@realsil.com.cn>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[6][3] = {
+ {0x94, 0x94, 0x94},
+ {0x11, 0x11, 0x18},
+ {0x55, 0x55, 0x5C},
+ {0x94, 0x94, 0x94},
+ {0x94, 0x94, 0x94},
+ {0xFF, 0xFF, 0xFF},
+ };
+ u8 driving_1v8[6][3] = {
+ {0x9A, 0x89, 0x89},
+ {0xC4, 0xC4, 0xC4},
+ {0x3C, 0x3C, 0x3C},
+ {0x9B, 0x99, 0x99},
+ {0x9A, 0x89, 0x89},
+ {0xFE, 0xFE, 0xFE},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ msleep(20);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ sd_set_sample_push_timing_sd30(pcr);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+ return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_17);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5260_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rts5260_stop_cmd(pcr);
+ rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5260_card_before_power_off(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWEROFF);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_400mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_THD_MASK,
+ RTS5260_DVIO_OCP_THD_350);
+
+ rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_210);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN, 0);
+ }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+ val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_sd_power_off_card3v3(pcr);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_ms_power_off_card3v3(pcr);
+
+ if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ }
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_init_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+ CLK_PM_EN, CLK_PM_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ PWR_GATE_EN, PWR_GATE_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+ PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+ U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+ OBFF_EN_MASK, OBFF_DISABLE);
+
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+ int lss_l1_1, lss_l1_2;
+
+ lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+ lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+ if (lss_l1_2) {
+ pcr_dbg(pcr, "Set parameters for L1.2.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_2_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_2_PD_FE_EN);
+ } else if (lss_l1_1) {
+ pcr_dbg(pcr, "Set parameters for L1.1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_1_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_1_PD_FE_EN);
+ } else {
+ pcr_dbg(pcr, "Set parameters for L1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_0_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_0_PD_FE_EN);
+ }
+
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ /*Option cut APHY*/
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+ 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+ 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+ 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+ 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+ /*CDR DEC*/
+ rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+ /*PWMPFM*/
+ rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+ 0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+ /*No Power Saving WA*/
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+ 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 lval;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ rts5260_pwr_saving_setting(pcr);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rts5260_init_from_cfg(pcr);
+
+ /* force no MDIO*/
+ rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+ 0xFF, RTS5260_MIMO_DISABLE);
+ /*Modify SDVCC Tune Default Parameters!*/
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rts5260_init_hw(pcr);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .turn_on_led = rts5260_turn_on_led,
+ .turn_off_led = rts5260_turn_off_led,
+ .extra_init_hw = rts5260_extra_init_hw,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts5260_card_power_on,
+ .card_power_off = rts5260_card_power_off,
+ .switch_output_voltage = rts5260_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+ .stop_cmd = rts5260_stop_cmd,
+ .set_aspm = rts5260_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5260_enable_ocp,
+ .disable_ocp = rts5260_disable_ocp,
+ .init_ocp = rts5260_init_ocp,
+ .process_ocp = rts5260_process_ocp,
+ .get_ocpstat = rts5260_get_ocpstat,
+ .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5260_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+ pcr->ops = &rts5260_pcr_ops;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+ option->ocp_en = 1;
+ if (option->ocp_en)
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+ option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644
index 000000000000..53a1411c8868
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.h
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL 0xFF73
+#define RTS5260_DVCC_OCP_EN (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVCC_POWERON (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL 0xFF75
+#define RTS5260_DVIO_OCP_EN (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVIO_POWERON (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DV331812_CFG 0xFF71
+#define RTS5260_DV331812_OCP_EN (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DV331812_POWERON (0x01 << 3)
+#define RTS5260_DV331812_SEL (0x01 << 2)
+#define RTS5260_DV331812_VDD1 (0x01 << 2)
+#define RTS5260_DV331812_VDD2 (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4)
+
+#endif
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 590fb9aad77d..fd09b0960097 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -29,7 +29,7 @@
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
@@ -62,6 +62,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -334,6 +335,9 @@ EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
+ if (pcr->ops->stop_cmd)
+ return pcr->ops->stop_cmd(pcr);
+
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
@@ -826,7 +830,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
/* Wait SSC clock stable */
- udelay(10);
+ udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
@@ -963,6 +967,20 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->process_ocp)
+ pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+ if (pcr->option.ocp_en)
+ rtsx_pci_process_ocp(pcr);
+
+ return 0;
+}
+
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
@@ -987,6 +1005,9 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
+ if (int_reg & SD_OC_INT)
+ rtsx_pci_process_ocp_interrupt(pcr);
+
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
@@ -1119,6 +1140,102 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
}
#endif
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->enable_ocp)
+ pcr->ops->enable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->disable_ocp)
+ pcr->ops->disable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->init_ocp) {
+ pcr->ops->init_ocp(pcr);
+ } else {
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (option->ocp_en) {
+ u8 val = option->sd_400mA_ocp_thd;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+ rtsx_pci_write_register(pcr, REG_OCPPARA1,
+ SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+ rtsx_pci_write_register(pcr, REG_OCPPARA2,
+ SD_OCP_THD_MASK, val);
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ /* OC power down */
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
+ }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ if (pcr->ops->get_ocpstat)
+ return pcr->ops->get_ocpstat(pcr, val);
+ else
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->clear_ocpstat) {
+ pcr->ops->clear_ocpstat(pcr);
+ } else {
+ u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+ msleep(50);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+ return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+ return 0;
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -1189,6 +1306,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5250:
case PID_524A:
case PID_525A:
+ case PID_5260:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1265,6 +1383,9 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+ case 0x5260:
+ rts5260_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1543,6 +1664,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
rtsx_pci_power_off(pcr, HOST_ENTER_S1);
pci_disable_device(pcidev);
+ free_irq(pcr->irq, (void *)pcr);
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
}
#else /* CONFIG_PM */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ec784e04fe20..6ea1655db0bb 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -22,7 +22,7 @@
#ifndef __RTSX_PCR_H
#define __RTSX_PCR_H
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
@@ -44,6 +44,8 @@
#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
+#define SSC_CLOCK_STABLE_WAIT 130
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -57,6 +59,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr);
void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -99,5 +102,12 @@ do { \
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59d61b04c197..b97903ff1a72 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -23,7 +23,7 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 7c11bad5cded..753b1a698fc4 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -427,7 +427,7 @@ int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
return afu_mmap(file, vm);
}
EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
{
return afu_poll(file, poll);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index e46a4062904a..a798c2ccd67d 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1081,7 +1081,7 @@ int afu_open(struct inode *inode, struct file *file);
int afu_release(struct inode *inode, struct file *file);
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int afu_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 76c0b0ca9388..90341ccda9bd 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -354,10 +354,10 @@ static inline bool ctx_event_pending(struct cxl_context *ctx)
return false;
}
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
- int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 512a4897dbf6..7fd0bdc1436a 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -54,7 +54,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
}
- set_dma_ops(&dev->dev, &dma_direct_ops);
+ set_dma_ops(&dev->dev, &dma_nommu_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
return _cxl_pci_associate_default_context(dev, afu);
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index cfdf0524b12e..35693c0a78e2 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -511,7 +511,7 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
return err ? -EFAULT : len;
}
-static unsigned int ilo_poll(struct file *fp, poll_table *wait)
+static __poll_t ilo_poll(struct file *fp, poll_table *wait)
{
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 8d53609861d8..e49888eab87d 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -651,7 +651,7 @@ out:
return retval;
}
-static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e825f013e54e..505b710291e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -542,12 +542,12 @@ static long mei_compat_ioctl(struct file *file,
*
* Return: poll mask
*/
-static unsigned int mei_poll(struct file *file, poll_table *wait)
+static __poll_t mei_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
bool notify_en;
if (WARN_ON(!cl || !cl->dev))
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b08b5c..8a3e48ec37dd 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -1311,10 +1311,10 @@ static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
spin_lock(&ep->lock);
}
-unsigned int
+__poll_t
__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev_dbg(scif_info.mdev.this_device,
"SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
@@ -1389,7 +1389,8 @@ scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
{
struct poll_wqueues table;
poll_table *pt;
- int i, mask, count = 0, timed_out = timeout_msecs == 0;
+ int i, count = 0, timed_out = timeout_msecs == 0;
+ __poll_t mask;
u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
: msecs_to_jiffies(timeout_msecs);
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index 1771d7a9b8d0..f39b663da287 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -203,7 +203,7 @@ void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
int __scif_flush(scif_epd_t epd);
int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-unsigned int __scif_pollfd(struct file *f, poll_table *wait,
+__poll_t __scif_pollfd(struct file *f, poll_table *wait,
struct scif_endpt *ep);
int __scif_pin_pages(void *addr, size_t len, int *out_prot,
int map_flags, scif_pinned_pages_t *pages);
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
index f7e826142a72..5c2a57ae4f85 100644
--- a/drivers/misc/mic/scif/scif_fd.c
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -41,7 +41,7 @@ static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
return scif_mmap(vma, priv);
}
-static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
+static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
{
struct scif_endpt *priv = f->private_data;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 27db64ec9efe..01d1f2ba7bb8 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -1015,10 +1015,10 @@ __unlock_ret:
* in the card->host (TX) path, when userspace is unblocked by poll it
* must drain all available descriptors or it can stall.
*/
-static unsigned int vop_poll(struct file *f, poll_table *wait)
+static __poll_t vop_poll(struct file *f, poll_table *wait)
{
struct vop_vdev *vdev = f->private_data;
- int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&vdev->vdev_mutex);
if (vop_vdev_inited(vdev)) {
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30754927fd80..8fa68cf308e0 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -256,10 +256,10 @@ static int phantom_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int phantom_poll(struct file *file, poll_table *wait)
+static __poll_t phantom_poll(struct file *file, poll_table *wait)
{
struct phantom_device *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
poll_wait(file, &dev->wait, wait);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 9918eda0e05f..a3454eb56fbf 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -258,6 +258,7 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops gru_mmuops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end,
.release = gru_release,
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 8a16a26e9658..6640e7651533 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -166,11 +166,11 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
* This is used to wake up the VMX when a VMCI call arrives, or
* to wake up select() or poll() at the next clock tick.
*/
-static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
struct vmci_ctx *context = vmci_host_dev->context;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
/* Check for VMCI calls to this VM context. */
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8af5c2672f71..0339538c182d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -129,23 +129,6 @@
* *_MEM state, and vice versa.
*/
-/*
- * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
- * types are passed around to enqueue and dequeue routines. Note that
- * often the functions passed are simply wrappers around memcpy
- * itself.
- *
- * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
- * there's an unused last parameter for the hosted side. In
- * ESX, that parameter holds a buffer type.
- */
-typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
- u64 queue_offset, const void *src,
- size_t src_offset, size_t size);
-typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size);
-
/* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if {
struct mutex __mutex; /* Protects the queue. */
@@ -351,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
-static int __qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src,
- size_t size,
- bool is_iovec)
+static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
+ u64 queue_offset,
+ struct iov_iter *from,
+ size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
@@ -380,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
else
to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = (struct msghdr *)src;
- int err;
-
- /* The iovec will track bytes_copied internally. */
- err = memcpy_from_msg((u8 *)va + page_offset,
- msg, to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)va + page_offset,
- (u8 *)src + bytes_copied, to_copy);
+ if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
+ from)) {
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
}
-
bytes_copied += to_copy;
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@@ -411,11 +382,9 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
-static int __qp_memcpy_from_queue(void *dest,
- const struct vmci_queue *queue,
- u64 queue_offset,
- size_t size,
- bool is_iovec)
+static int qp_memcpy_from_queue_iter(struct iov_iter *to,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
@@ -427,6 +396,7 @@ static int __qp_memcpy_from_queue(void *dest,
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
+ int err;
if (kernel_if->host)
va = kmap(kernel_if->u.h.page[page_index]);
@@ -440,23 +410,12 @@ static int __qp_memcpy_from_queue(void *dest,
else
to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = dest;
- int err;
-
- /* The iovec will track bytes_copied internally. */
- err = memcpy_to_msg(msg, (u8 *)va + page_offset,
- to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)dest + bytes_copied,
- (u8 *)va + page_offset, to_copy);
+ err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
+ if (err != to_copy) {
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
}
-
bytes_copied += to_copy;
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@@ -569,54 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
return VMCI_SUCCESS;
}
-static int qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src, size_t src_offset, size_t size)
-{
- return __qp_memcpy_to_queue(queue, queue_offset,
- (u8 *)src + src_offset, size, false);
-}
-
-static int qp_memcpy_from_queue(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
-{
- return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
- queue, queue_offset, size, false);
-}
-
-/*
- * Copies from a given iovec from a VMCI Queue.
- */
-static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
- u64 queue_offset,
- const void *msg,
- size_t src_offset, size_t size)
-{
-
- /*
- * We ignore src_offset because src is really a struct iovec * and will
- * maintain offset internally.
- */
- return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
-}
-
-/*
- * Copies to a given iovec from a VMCI Queue.
- */
-static int qp_memcpy_from_queue_iov(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
-{
- /*
- * We ignore dest_offset because dest is really a struct iovec * and
- * will maintain offset internally.
- */
- return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
-}
-
/*
* Allocates kernel VA space of specified size plus space for the queue
* and kernel interface. This is different from the guest queue allocator,
@@ -2629,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 produce_q_size,
- const void *buf,
- size_t buf_size,
- vmci_memcpy_to_queue_func memcpy_to_queue)
+ struct iov_iter *from)
{
s64 free_space;
u64 tail;
+ size_t buf_size = iov_iter_count(from);
size_t written;
ssize_t result;
@@ -2654,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
written = (size_t) (free_space > buf_size ? buf_size : free_space);
tail = vmci_q_header_producer_tail(produce_q->q_header);
if (likely(tail + written < produce_q_size)) {
- result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+ result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
} else {
/* Tail pointer wraps around. */
const size_t tmp = (size_t) (produce_q_size - tail);
- result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+ result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
if (result >= VMCI_SUCCESS)
- result = memcpy_to_queue(produce_q, 0, buf, tmp,
+ result = qp_memcpy_to_queue_iter(produce_q, 0, from,
written - tmp);
}
@@ -2690,11 +2600,10 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 consume_q_size,
- void *buf,
- size_t buf_size,
- vmci_memcpy_from_queue_func memcpy_from_queue,
+ struct iov_iter *to,
bool update_consumer)
{
+ size_t buf_size = iov_iter_count(to);
s64 buf_ready;
u64 head;
size_t read;
@@ -2716,15 +2625,15 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
head = vmci_q_header_consumer_head(produce_q->q_header);
if (likely(head + read < consume_q_size)) {
- result = memcpy_from_queue(buf, 0, consume_q, head, read);
+ result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
} else {
/* Head pointer wraps around. */
const size_t tmp = (size_t) (consume_q_size - head);
- result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+ result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
if (result >= VMCI_SUCCESS)
- result = memcpy_from_queue(buf, tmp, consume_q, 0,
+ result = qp_memcpy_from_queue_iter(to, consume_q, 0,
read - tmp);
}
@@ -3118,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
int buf_type)
{
ssize_t result;
+ struct iov_iter from;
+ struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
- buf, buf_size,
- qp_memcpy_to_queue);
+ &from);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3159,18 +3071,21 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
int buf_type)
{
ssize_t result;
+ struct iov_iter to;
+ struct kvec v = {.iov_base = buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, true);
+ &to, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3200,19 +3115,22 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
size_t buf_size,
int buf_type)
{
+ struct iov_iter to;
+ struct kvec v = {.iov_base = buf, .iov_len = buf_size};
ssize_t result;
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, false);
+ &to, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3253,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
- msg, iov_size,
- qp_memcpy_to_queue_iov);
+ &msg->msg_iter);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3295,9 +3212,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- true);
+ &msg->msg_iter, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3339,9 +3254,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- false);
+ &msg->msg_iter, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ccfa98af1dd3..20135a5de748 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -112,6 +118,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -189,7 +196,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
- blk_cleanup_queue(md->queue.queue);
+ blk_put_queue(md->queue.queue);
ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
@@ -921,14 +928,54 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, bool *gen_err)
+ struct request *req, u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
+ bool done = time_after(jiffies, timeout);
+
err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
@@ -936,25 +983,18 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
return err;
}
- if (status & R1_ERROR) {
- pr_err("%s: %s: error sending status cmd, status %#x\n",
- req->rq_disk->disk_name, __func__, status);
- *gen_err = true;
- }
-
- /* We may rely on the host hw to handle busy detection.*/
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
- hw_busy_detect)
- break;
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s %s\n",
+ if (done) {
+ pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ req->rq_disk->disk_name, __func__, status);
return -ETIMEDOUT;
}
@@ -963,229 +1003,11 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
* so make sure to check both the busy
* indication and the card state.
*/
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ } while (!mmc_blk_in_tran_state(status));
return err;
}
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, bool *gen_err, u32 *stop_status)
-{
- struct mmc_host *host = card->host;
- struct mmc_command cmd = {};
- int err;
- bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
- /*
- * Normally we use R1B responses for WRITE, but in cases where the host
- * has specified a max_busy_timeout we need to validate it. A failure
- * means we need to prevent the host from doing hw busy detection, which
- * is done by converting to a R1 response instead.
- */
- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- if (use_r1b_resp) {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 5);
- if (err)
- return err;
-
- *stop_status = cmd.resp[0];
-
- /* No need to check card status in case of READ. */
- if (rq_data_dir(req) == READ)
- return 0;
-
- if (!mmc_host_is_spi(host) &&
- (*stop_status & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop command, resp %#x\n",
- req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = true;
- }
-
- return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM 3
-#define ERR_RETRY 2
-#define ERR_ABORT 1
-#define ERR_CONTINUE 0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
- bool status_valid, u32 status)
-{
- switch (error) {
- case -EILSEQ:
- /* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
- name, status);
- return ERR_RETRY;
-
- case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
-
- /* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /*
- * If it was a r/w cmd crc error, or illegal command
- * (eg, issued in wrong state) then retry - we should
- * have corrected the state problem above.
- */
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /* Otherwise abort the command */
- return ERR_ABORT;
-
- default:
- /* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
- req->rq_disk->disk_name, error, status);
- return ERR_ABORT;
- }
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state. Essentially, we do this as follows:
- * - Obtain card status. If the first attempt to obtain card status fails,
- * the status word will reflect the failed status cmd, not the failed
- * r/w cmd. If we fail to obtain card status, it suggests we can no
- * longer communicate with the card.
- * - Check the card state. If the card received the cmd but there was a
- * transient problem with the response, it might still be in a data transfer
- * mode. Try to send it a stop command. If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- * transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- * illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
- bool prev_cmd_status_valid = true;
- u32 status, stop_status = 0;
- int err, retry;
-
- if (mmc_card_removed(card))
- return ERR_NOMEDIUM;
-
- /*
- * Try to get card status which indicates both the card state
- * and why there was no response. If the first attempt fails,
- * we can't be sure the returned status is for the r/w command.
- */
- for (retry = 2; retry >= 0; retry--) {
- err = __mmc_send_status(card, &status, 0);
- if (!err)
- break;
-
- /* Re-tune if needed */
- mmc_retune_recheck(card->host);
-
- prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
- req->rq_disk->disk_name, err, retry ? "retry" : "abort");
- }
-
- /* We couldn't get a response from the card. Give up. */
- if (err) {
- /* Check if the card is removed */
- if (mmc_detect_card_removed(card->host))
- return ERR_NOMEDIUM;
- return ERR_ABORT;
- }
-
- /* Flag ECC errors */
- if ((status & R1_CARD_ECC_FAILED) ||
- (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
- (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = true;
-
- /* Flag General errors */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if ((status & R1_ERROR) ||
- (brq->stop.resp[0] & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0], status);
- *gen_err = true;
- }
-
- /*
- * Check the current card state. If it is in some data transfer
- * mode, tell it to stop (and hopefully transition back to TRAN.)
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
- R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card,
- DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
- req, gen_err, &stop_status);
- if (err) {
- pr_err("%s: error %d sending stop command\n",
- req->rq_disk->disk_name, err);
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- return ERR_ABORT;
- }
-
- if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = true;
- }
-
- /* Check for set block count errors */
- if (brq->sbc.error)
- return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
- prev_cmd_status_valid, status);
-
- /* Check for r/w command errors */
- if (brq->cmd.error)
- return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
- prev_cmd_status_valid, status);
-
- /* Data errors */
- if (!brq->stop.error)
- return ERR_CONTINUE;
-
- /* Now for stop errors. These aren't fatal to the transfer. */
- pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->stop.error,
- brq->cmd.resp[0], status);
-
- /*
- * Subsitute in our own stop status as this will give the error
- * state which happened during the execution of the r/w command.
- */
- if (stop_status) {
- brq->stop.resp[0] = stop_status;
- brq->stop.error = 0;
- }
- return ERR_CONTINUE;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1281,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1324,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1394,7 +1216,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1404,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1430,15 +1252,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-#define CMD_ERRORS \
- (R1_OUT_OF_RANGE | /* Command argument out of range */ \
- R1_ADDRESS_ERROR | /* Misaligned address */ \
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
@@ -1481,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
}
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- areq);
- struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mmc_queue_req_to_req(mq_mrq);
- int need_retune = card->host->need_retune;
- bool ecc_err = false;
- bool gen_err = false;
-
- /*
- * sbc.error indicates a problem with the set block count
- * command. No data will have been transferred.
- *
- * cmd.error indicates a problem with the r/w command. No
- * data will have been transferred.
- *
- * stop.error indicates a problem with the stop command. Data
- * may have been transferred, or may still be transferring.
- */
-
- mmc_blk_eval_resp_error(brq);
-
- if (brq->sbc.error || brq->cmd.error ||
- brq->stop.error || brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
- case ERR_RETRY:
- return MMC_BLK_RETRY;
- case ERR_ABORT:
- return MMC_BLK_ABORT;
- case ERR_NOMEDIUM:
- return MMC_BLK_NOMEDIUM;
- case ERR_CONTINUE:
- break;
- }
- }
-
- /*
- * Check for errors relating to the execution of the
- * initial command - such as address errors. No data
- * has been transferred.
- */
- if (brq->cmd.resp[0] & CMD_ERRORS) {
- pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq->cmd.resp[0]);
- return MMC_BLK_ABORT;
- }
-
- /*
- * Everything else is either success, or a data error of some
- * kind. If it was a write, we may have transitioned to
- * program mode, which we have to wait for it to complete.
- */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- int err;
-
- /* Check stop command response */
- if (brq->stop.resp[0] & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0]);
- gen_err = true;
- }
-
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
- &gen_err);
- if (err)
- return MMC_BLK_CMD_ERR;
- }
-
- /* if general error occurs, retry the write operation. */
- if (gen_err) {
- pr_warn("%s: retrying write for general error\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY;
- }
-
- /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
- if (brq->data.error || brq->stop.error) {
- if (need_retune && !brq->retune_retry_done) {
- pr_debug("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
- brq->retune_retry_done = 1;
- return MMC_BLK_RETRY;
- }
- pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req),
- brq->cmd.resp[0], brq->stop.resp[0]);
-
- if (rq_data_dir(req) == READ) {
- if (ecc_err)
- return MMC_BLK_ECC_ERR;
- return MMC_BLK_DATA_ERR;
- } else {
- return MMC_BLK_CMD_ERR;
- }
- }
-
- if (!brq->data.bytes_xfered)
- return MMC_BLK_RETRY;
-
- if (blk_rq_bytes(req) != brq->data.bytes_xfered)
- return MMC_BLK_PARTIAL;
-
- return MMC_BLK_SUCCESS;
-}
-
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p)
@@ -1706,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->areq.mrq = &brq->mrq;
-
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
@@ -1715,6 +1428,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1779,318 +1624,637 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
- mqrq->areq.err_check = mmc_blk_err_check;
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
}
-static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- bool old_req_pending)
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
- bool req_pending;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, req, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ int retries = 0;
+
+ do {
+ u32 status;
int err;
- err = mmc_sd_num_wr_blocks(card, &blocks);
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
if (err)
- req_pending = old_req_pending;
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_blk_in_tran_state(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+ continue;
+
+ retries = 0;
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
else
- req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
- } else {
- req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
- }
- return req_pending;
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, 512));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, 512);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
}
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
- struct request *req,
- struct mmc_queue_req *mqrq)
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
- mq->qcnt--;
+ return !!brq->mrq.sbc;
}
-/**
- * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @req: a new request that want to be started after the current one
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
*/
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
- struct mmc_queue_req *mqrq)
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
- if (!req)
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
return;
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_blk_in_tran_state(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
/*
- * If the card was removed, just cancel everything and return.
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
*/
- if (mmc_card_removed(mq->card)) {
- req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, BLK_STS_IOERR);
- mq->qcnt--; /* FIXME: just set to 0? */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ /* FIXME: Missing single sector read for large sector size */
+ if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
+ brq->data.blocks > 1) {
+ /* Read one sector at a time */
+ mmc_blk_read_single(mq, req);
return;
}
- /* Else proceed and try to restart the current async request */
- mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
- mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq;
- int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
- enum mmc_blk_status status;
- struct mmc_queue_req *mqrq_cur = NULL;
- struct mmc_queue_req *mq_rq;
- struct request *old_req;
- struct mmc_async_req *new_areq;
- struct mmc_async_req *old_areq;
- bool req_pending = true;
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
- if (new_req) {
- mqrq_cur = req_to_mmc_queue_req(new_req);
- mq->qcnt++;
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_start_bkops(mq->card, true);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, req);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
}
- if (!mq->qcnt)
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
return;
- do {
- if (new_req) {
- /*
- * When 4KB native sector is enabled, only 8 blocks
- * multiple read or write is allowed
- */
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
- pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- new_req->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
- return;
- }
+ mutex_lock(&mq->complete_lock);
- mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
- new_areq = &mqrq_cur->areq;
- } else
- new_areq = NULL;
+ if (!mq->complete_req)
+ goto out_unlock;
- old_areq = mmc_start_areq(card->host, new_areq, &status);
- if (!old_areq) {
- /*
- * We have just put the first request into the pipeline
- * and there is nothing more to do until it is
- * complete.
- */
- return;
- }
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
/*
- * An asynchronous request has been completed and we proceed
- * to handle the result of it.
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
*/
- mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
- brq = &mq_rq->brq;
- old_req = mmc_queue_req_to_req(mq_rq);
- type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-
- switch (status) {
- case MMC_BLK_SUCCESS:
- case MMC_BLK_PARTIAL:
- /*
- * A block was successfully transferred.
- */
- mmc_blk_reset_success(md, type);
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(q->queue_lock, flags);
- req_pending = blk_end_request(old_req, BLK_STS_OK,
- brq->data.bytes_xfered);
- /*
- * If the blk_end_request function returns non-zero even
- * though all data has been transferred and no errors
- * were returned by the host controller, it's a bug.
- */
- if (status == MMC_BLK_SUCCESS && req_pending) {
- pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(old_req),
- brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- return;
- }
- break;
- case MMC_BLK_CMD_ERR:
- req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
- if (mmc_blk_reset(md, card->host, type)) {
- if (req_pending)
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- else
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_RETRY:
- retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
- break;
- /* Fall through */
- case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
- break;
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- case MMC_BLK_DATA_ERR: {
- int err;
-
- err = mmc_blk_reset(md, card->host, type);
- if (!err)
- break;
- if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- /* Fall through */
- }
- case MMC_BLK_ECC_ERR:
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warn("%s: retrying using single block read\n",
- old_req->rq_disk->disk_name);
- disable_multi = 1;
- break;
- }
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- req_pending = blk_end_request(old_req, BLK_STS_IOERR,
- brq->data.blksz);
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- default:
- pr_err("%s: Unhandled return value (%d)",
- old_req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ kblockd_schedule_work(&mq->complete_work);
- if (req_pending) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_areq(card->host,
- &mq_rq->areq, NULL);
- mq_rq->brq.retune_retry_done = retune_retry_done;
- }
- } while (req_pending);
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
- mq->qcnt--;
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
}
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
- int ret;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
-
- if (req && !mq->qcnt)
- /* claim host only for the first request */
- mmc_get_card(card, NULL);
+ struct mmc_host *host = card->host;
+ int ret;
ret = mmc_blk_part_switch(card, md->part_type);
- if (ret) {
- if (req) {
- blk_end_request_all(req, BLK_STS_IOERR);
- }
- goto out;
- }
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
- if (req) {
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- /*
- * Complete ongoing async transfer before issuing
- * ioctl()s
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
- /*
- * Complete ongoing async transfer before issuing
- * discard.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
- /*
- * Complete ongoing async transfer before issuing
- * secure erase.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
- /*
- * Complete ongoing async transfer before issuing
- * flush.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
- /* Normal request, just issue it */
- mmc_blk_issue_rw_rq(mq, req);
- card->host->context_info.is_waiting_last_req = false;
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ ret = mmc_blk_cqe_issue_flush(mq, req);
break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
}
- } else {
- /* No request, flushing the pipeline with NULL */
- mmc_blk_issue_rw_rq(mq, NULL);
- card->host->context_info.is_waiting_last_req = false;
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
}
-
-out:
- if (!mq->qcnt)
- mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2156,6 +2320,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->queue.blkdata = md;
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
@@ -2471,10 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- spin_lock_irq(md->queue.queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
- spin_unlock_irq(md->queue.queue->queue_lock);
- blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2623,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
+ kfree(ext_csd);
goto out_free;
}
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636101ef..31153f656f41 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -5,6 +5,16 @@
struct mmc_queue;
struct request;
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7586ff2ad1f1..fc92c6c1c9a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
- mmc_init_context_info(card->host);
-
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f0f44f4dd5f..c0ba6d8823b7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -341,6 +341,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ init_completion(&mrq->cmd_completion);
+
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
@@ -361,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_start_request);
-/*
- * mmc_wait_data_done() - done callback for data request
- * @mrq: done data request
- *
- * Wakes up mmc context, passed as a callback to host controller driver
- */
-static void mmc_wait_data_done(struct mmc_request *mrq)
-{
- struct mmc_context_info *context_info = &mrq->host->context_info;
-
- context_info->is_done_rcv = true;
- wake_up_interruptible(&context_info->wait);
-}
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -392,37 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
wait_for_completion(&ongoing_mrq->cmd_completion);
}
-/*
- *__mmc_start_data_req() - starts data request
- * @host: MMC host to start the request
- * @mrq: data request to start
- *
- * Sets the done callback to be called when request is completed by the card.
- * Starts data mmc request execution
- * If an ongoing transfer is already in progress, wait for the command line
- * to become available before sending another command.
- */
-static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- int err;
-
- mmc_wait_ongoing_tfr_cmd(host);
-
- mrq->done = mmc_wait_data_done;
- mrq->host = host;
-
- init_completion(&mrq->cmd_completion);
-
- err = mmc_start_request(host, mrq);
- if (err) {
- mrq->cmd->error = err;
- mmc_complete_cmd(mrq);
- mmc_wait_data_done(mrq);
- }
-
- return err;
-}
-
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -432,8 +389,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- init_completion(&mrq->cmd_completion);
-
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
@@ -650,164 +605,11 @@ EXPORT_SYMBOL(mmc_cqe_recovery);
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
- if (host->areq)
- return host->context_info.is_done_rcv;
- else
- return completion_done(&mrq->completion);
+ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
- * mmc_pre_req - Prepare for a new request
- * @host: MMC host to prepare command
- * @mrq: MMC request to prepare for
- *
- * mmc_pre_req() is called in prior to mmc_start_req() to let
- * host prepare for the new request. Preparation of a request may be
- * performed while another request is running on the host.
- */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- if (host->ops->pre_req)
- host->ops->pre_req(host, mrq);
-}
-
-/**
- * mmc_post_req - Post process a completed request
- * @host: MMC host to post process command
- * @mrq: MMC request to post process for
- * @err: Error, if non zero, clean up any resources made in pre_req
- *
- * Let the host post process a completed request. Post processing of
- * a request may be performed while another reuqest is running.
- */
-static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
- int err)
-{
- if (host->ops->post_req)
- host->ops->post_req(host, mrq, err);
-}
-
-/**
- * mmc_finalize_areq() - finalize an asynchronous request
- * @host: MMC host to finalize any ongoing request on
- *
- * Returns the status of the ongoing asynchronous request, but
- * MMC_BLK_SUCCESS if no request was going on.
- */
-static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
-{
- struct mmc_context_info *context_info = &host->context_info;
- enum mmc_blk_status status;
-
- if (!host->areq)
- return MMC_BLK_SUCCESS;
-
- while (1) {
- wait_event_interruptible(context_info->wait,
- (context_info->is_done_rcv ||
- context_info->is_new_req));
-
- if (context_info->is_done_rcv) {
- struct mmc_command *cmd;
-
- context_info->is_done_rcv = false;
- cmd = host->areq->mrq->cmd;
-
- if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- status = host->areq->err_check(host->card,
- host->areq);
- break; /* return status */
- } else {
- mmc_retune_recheck(host);
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host),
- cmd->opcode, cmd->error);
- cmd->retries--;
- cmd->error = 0;
- __mmc_start_request(host, host->areq->mrq);
- continue; /* wait for done/new event again */
- }
- }
-
- return MMC_BLK_NEW_REQUEST;
- }
-
- mmc_retune_release(host);
-
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
- mmc_start_bkops(host->card, true);
- }
-
- return status;
-}
-
-/**
- * mmc_start_areq - start an asynchronous request
- * @host: MMC host to start command
- * @areq: asynchronous request to start
- * @ret_stat: out parameter for status
- *
- * Start a new MMC custom command request for a host.
- * If there is on ongoing async request wait for completion
- * of that request and start the new one and return.
- * Does not wait for the new request to complete.
- *
- * Returns the completed request, NULL in case of none completed.
- * Wait for the an ongoing request (previoulsy started) to complete and
- * return the completed request. If there is no ongoing request, NULL
- * is returned without waiting. NULL is not an error condition.
- */
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
-{
- enum mmc_blk_status status;
- int start_err = 0;
- struct mmc_async_req *previous = host->areq;
-
- /* Prepare a new request */
- if (areq)
- mmc_pre_req(host, areq->mrq);
-
- /* Finalize previous request */
- status = mmc_finalize_areq(host);
- if (ret_stat)
- *ret_stat = status;
-
- /* The previous request is still going on... */
- if (status == MMC_BLK_NEW_REQUEST)
- return NULL;
-
- /* Fine so far, start the new request! */
- if (status == MMC_BLK_SUCCESS && areq)
- start_err = __mmc_start_data_req(host, areq->mrq);
-
- /* Postprocess the old request at this point */
- if (host->areq)
- mmc_post_req(host, host->areq->mrq, 0);
-
- /* Cancel a prepared request if it was not started. */
- if ((status != MMC_BLK_SUCCESS || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- if (status != MMC_BLK_SUCCESS)
- host->areq = NULL;
- else
- host->areq = areq;
-
- return previous;
-}
-EXPORT_SYMBOL(mmc_start_areq);
-
-/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
@@ -2959,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
@@ -2994,22 +2804,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
}
#endif
-/**
- * mmc_init_context_info() - init synchronization context
- * @host: mmc host
- *
- * Init struct context_info needed to implement asynchronous
- * request mechanism, used by mmc core, host driver and mmc requests
- * supplier.
- */
-void mmc_init_context_info(struct mmc_host *host)
-{
- host->context_info.is_new_req = false;
- host->context_info.is_done_rcv = false;
- host->context_info.is_waiting_last_req = false;
- init_waitqueue_head(&host->context_info.wait);
-}
-
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 71e6c6d7ceb7..d6303d69071b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
msleep(ms);
- }
}
void mmc_rescan(struct work_struct *work);
@@ -91,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
-void mmc_init_context_info(struct mmc_host *host);
-
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
@@ -110,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-struct mmc_async_req;
-
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
-
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
int mmc_can_erase(struct mmc_card *card);
@@ -152,4 +142,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_cqe_recovery(struct mmc_host *host);
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb689a1065ed..06ec19b5bf9f 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23;
}
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
static inline int mmc_boot_partition_access(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
@@ -74,6 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card)
return card->host->ios.enhanced_strobe;
}
-
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 478869805b96..ef18daeaa4cc 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -101,7 +101,7 @@ struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
- struct timespec ts;
+ struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
@@ -171,11 +171,6 @@ struct mmc_test_multiple_rw {
enum mmc_test_prep_media prepare;
};
-struct mmc_test_async_req {
- struct mmc_async_req areq;
- struct mmc_test_card *test;
-};
-
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -515,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
/*
* Calculate transfer rate in bytes per second.
*/
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
- ns = ts->tv_sec;
- ns *= 1000000000;
- ns += ts->tv_nsec;
-
+ ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
@@ -542,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
- unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -567,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
- struct timespec *ts1, struct timespec *ts2)
+ struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
@@ -591,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
- unsigned int count, struct timespec *ts1,
- struct timespec *ts2)
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
- (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
@@ -741,30 +733,6 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_test_async_req *test_async =
- container_of(areq, struct mmc_test_async_req, areq);
- int ret;
-
- mmc_test_wait_busy(test_async->test);
-
- /*
- * FIXME: this would earlier just casts a regular error code,
- * either of the kernel type -ERRORCODE or the local test framework
- * RESULT_* errorcode, into an enum mmc_blk_status and return as
- * result check. Instead, convert it to some reasonable type by just
- * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
- * If possible, a reasonable error code should be returned.
- */
- ret = mmc_test_check_result(test_async->test, areq->mrq);
- if (ret)
- return MMC_BLK_CMD_ERR;
-
- return MMC_BLK_SUCCESS;
-}
-
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -831,6 +799,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void)
return rq;
}
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len,
@@ -838,17 +845,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned blksz, int write, int count)
{
struct mmc_test_req *rq1, *rq2;
- struct mmc_test_async_req test_areq[2];
- struct mmc_async_req *done_areq;
- struct mmc_async_req *cur_areq = &test_areq[0].areq;
- struct mmc_async_req *other_areq = &test_areq[1].areq;
- enum mmc_blk_status status;
+ struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
- test_areq[0].test = test;
- test_areq[1].test = test;
-
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
@@ -856,33 +856,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
goto err;
}
- cur_areq->mrq = &rq1->mrq;
- cur_areq->err_check = mmc_test_check_result_async;
- other_areq->mrq = &rq2->mrq;
- other_areq->err_check = mmc_test_check_result_async;
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
- done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
-
- if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
- ret = RESULT_FAIL;
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
+ blksz, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
goto err;
- }
- if (done_areq)
- mmc_test_req_reset(container_of(done_areq->mrq,
- struct mmc_test_req, mrq));
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
- swap(cur_areq, other_areq);
+ swap(mrq, prev_mrq);
dev_addr += blocks;
}
- done_areq = mmc_start_areq(test->card->host, NULL, &status);
- if (status != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
-
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
@@ -1449,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret = 0;
int i;
struct mmc_test_area *t = &test->area;
@@ -1475,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
dev_addr, t->blocks, 512, write, count);
@@ -1489,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
@@ -1747,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1758,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
@@ -1779,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1818,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
@@ -1826,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1864,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1882,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
@@ -1890,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
@@ -1912,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
@@ -1921,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
- getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
@@ -1998,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
@@ -2025,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
@@ -2033,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
return ret;
dev_addr += ssz;
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
@@ -2328,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test)
int err;
err = mmc_hw_reset(host);
- if (!err)
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
return RESULT_OK;
- else if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
+ }
return RESULT_FAIL;
}
@@ -2356,11 +2355,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
- enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2373,9 +2370,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- test_areq.areq.mrq = mrq;
- test_areq.areq.err_check = mmc_test_check_result_async;
-
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2388,11 +2382,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_areq(host, &test_areq.areq, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS) {
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
goto out_free;
- }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2426,9 +2418,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_areq(host, NULL, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
@@ -3066,10 +3056,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
- (unsigned long)tr->ts.tv_sec,
- (unsigned long)tr->ts.tv_nsec,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4f33d277b125..421fab7250ac 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -22,100 +22,147 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "host.h"
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
- struct mmc_queue *mq = q->queuedata;
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
- if (mq && mmc_card_removed(mq->card))
- return BLKPREP_KILL;
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
- req->rq_flags |= RQF_DONTPREP;
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
- return BLKPREP_OK;
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
}
-static int mmc_queue_thread(void *d)
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
{
- struct mmc_queue *mq = d;
- struct request_queue *q = mq->queue;
- struct mmc_context_info *cntx = &mq->card->host->context_info;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
- current->flags |= PF_MEMALLOC;
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
- down(&mq->thread_sem);
- do {
- struct request *req;
+ if (mq->use_cqe)
+ return mmc_cqe_issue_type(host, req);
- spin_lock_irq(q->queue_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- req = blk_fetch_request(q);
- mq->asleep = false;
- cntx->is_waiting_last_req = false;
- cntx->is_new_req = false;
- if (!req) {
- /*
- * Dispatch queue is empty so set flags for
- * mmc_request_fn() to wake us up.
- */
- if (mq->qcnt)
- cntx->is_waiting_last_req = true;
- else
- mq->asleep = true;
- }
- spin_unlock_irq(q->queue_lock);
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
- if (req || mq->qcnt) {
- set_current_state(TASK_RUNNING);
- mmc_blk_issue_rq(mq, req);
- cond_resched();
- } else {
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- up(&mq->thread_sem);
- schedule();
- down(&mq->thread_sem);
- }
- } while (1);
- up(&mq->thread_sem);
+ return MMC_ISSUE_SYNC;
+}
- return 0;
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
}
-/*
- * Generic MMC request handler. This is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
- struct request *req;
- struct mmc_context_info *cntx;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
- if (!mq) {
- while ((req = blk_fetch_request(q)) != NULL) {
- req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, BLK_STS_IOERR);
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ __mmc_cqe_recovery_notifier(mq);
+ return BLK_EH_RESET_TIMER;
}
- return;
+ /* No timeout */
+ return BLK_EH_HANDLED;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
}
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+ int ret;
- cntx = &mq->card->host->context_info;
+ spin_lock_irqsave(q->queue_lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
+ if (mq->recovery_needed || !mq->use_cqe)
+ ret = BLK_EH_RESET_TIMER;
+ else
+ ret = mmc_cqe_timed_out(req);
- if (mq->asleep)
- wake_up_process(mq->thread);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return ret;
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(q->queue_lock);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@@ -154,11 +201,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
-static int mmc_init_request(struct request_queue *q, struct request *req,
- gfp_t gfp)
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
@@ -177,6 +223,131 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+/*
+ * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
+ * will not be dispatched in parallel.
+ */
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (mq->recovery_needed) {
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ spin_unlock_irq(q->queue_lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -196,124 +367,139 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- /* Initialize thread_sem even if it is not used */
- sema_init(&mq->thread_sem, 1);
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
}
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
+ const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
- struct mmc_host *host = card->host;
- int ret = -ENOMEM;
-
- mq->card = card;
- mq->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mq->queue)
- return -ENOMEM;
- mq->queue->queue_lock = lock;
- mq->queue->request_fn = mmc_request_fn;
- mq->queue->init_rq_fn = mmc_init_request;
- mq->queue->exit_rq_fn = mmc_exit_request;
- mq->queue->cmd_size = sizeof(struct mmc_queue_req);
- mq->queue->queuedata = mq;
- mq->qcnt = 0;
- ret = blk_init_allocated_queue(mq->queue);
- if (ret) {
- blk_cleanup_queue(mq->queue);
+ int ret;
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = mq_ops;
+ mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
+ BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
return ret;
- }
-
- blk_queue_prep_rq(mq->queue, mmc_prep_request);
-
- mmc_setup_queue(mq, card);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
- host->index, subname ? subname : "");
-
- if (IS_ERR(mq->thread)) {
- ret = PTR_ERR(mq->thread);
- goto cleanup_queue;
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
}
+ mq->queue->queue_lock = lock;
+ mq->queue->queuedata = mq;
+
return 0;
-cleanup_queue:
- blk_cleanup_queue(mq->queue);
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+
return ret;
}
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
- /* Make sure the queue isn't suspended, as that will deadlock */
- mmc_queue_resume(mq);
+static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ int q_depth;
+ int ret;
+
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ q_depth = MMC_QUEUE_DEPTH;
+
+ ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
+ if (ret)
+ return ret;
- /* Then terminate our worker thread */
- kthread_stop(mq->thread);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
- /* Empty the queue */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mmc_setup_queue(mq, card);
- mq->card = NULL;
+ return 0;
}
-EXPORT_SYMBOL(mmc_cleanup_queue);
/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
*
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
+ * Initialise a MMC card request queue.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+ struct mmc_host *host = card->host;
- if (!mq->suspended) {
- mq->suspended |= true;
+ mq->card = card;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mq->use_cqe = host->cqe_enabled;
- down(&mq->thread_sem);
- }
+ return mmc_mq_init(mq, card, lock);
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
}
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
void mmc_queue_resume(struct mmc_queue *mq)
{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
struct request_queue *q = mq->queue;
- unsigned long flags;
- if (mq->suspended) {
- mq->suspended = false;
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
- up(&mq->thread_sem);
+ blk_cleanup_queue(q);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
}
/*
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 547b457c4251..17e59d50b496 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -8,6 +8,20 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
@@ -20,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr);
}
-struct task_struct;
struct mmc_blk_data;
struct mmc_blk_ioc_data;
@@ -30,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
- int retune_retry_done;
};
/**
@@ -52,28 +64,34 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
+ int retries;
};
struct mmc_queue {
struct mmc_card *card;
- struct task_struct *thread;
- struct semaphore thread_sem;
- bool suspended;
- bool asleep;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- /*
- * FIXME: this counter is not a very reliable way of keeping
- * track of how many requests that are ongoing. Switch to just
- * letting the block core keep track of requests and per-request
- * associated mmc_queue_req data.
- */
- int qcnt;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -84,4 +102,22 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
#endif
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 863f1dbbfc1b..3698b0576009 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- int ret, irq;
+ int irq = -EINVAL;
+ int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
- irq = gpiod_to_irq(ctx->cd_gpio);
-
/*
- * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
*/
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
@@ -307,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 567028c9219a..67bd3344dd03 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
+ select MMC_CQHCI
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -132,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -320,7 +322,7 @@ config MMC_SDHCI_BCM_KONA
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF || ACPI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
Needed by some Fujitsu SoC for MMC / SD / SDIO support.
@@ -595,11 +597,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
- depends on SUPERH || ARM || ARM64
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
- select MMC_SDHI_SYS_DMAC if (SUPERH || ARM)
- select MMC_SDHI_INTERNAL_DMAC if ARM64
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -607,6 +606,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI
+ default MMC_SDHI if (SUPERH || ARM)
help
This provides DMA support for SDHI SD/SDIO controllers
using SYS-DMAC via DMA Engine. This supports the controllers
@@ -616,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI
+ default MMC_SDHI if ARM64
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -838,14 +839,14 @@ config MMC_USDHI6ROL0
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek PCI-E card reader
config MMC_REALTEK_USB
tristate "Realtek USB SD/MMC Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek RTS5129/39 series card reader
@@ -857,6 +858,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQHCI
+ tristate "Command Queue Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index a43cf0d5a5d3..84cd1388abc3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o
-endif
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o
-endif
+obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
+obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
@@ -92,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 63fe5091ca59..63d27589cd89 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -42,13 +42,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
-#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
-#include <asm/io.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
new file mode 100644
index 000000000000..159270e947cf
--- /dev/null
+++ b/drivers/mmc/host/cqhci.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include "cqhci.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+struct cqhci_slot {
+ struct mmc_request *mrq;
+ unsigned int flags;
+#define CQHCI_EXTERNAL_TIMEOUT BIT(0)
+#define CQHCI_COMPLETED BIT(1)
+#define CQHCI_HOST_CRC BIT(2)
+#define CQHCI_HOST_TIMEOUT BIT(3)
+#define CQHCI_HOST_OTHER BIT(4)
+};
+
+static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
+ *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
+ return;
+ }
+
+ *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
+{
+ cqhci_writel(cq_host, set, CQHCI_ISTE);
+ cqhci_writel(cq_host, set, CQHCI_ISGE);
+}
+
+#define DRV_NAME "cqhci"
+
+#define CQHCI_DUMP(f, x...) \
+ pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
+
+static void cqhci_dumpregs(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
+
+ CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CAP),
+ cqhci_readl(cq_host, CQHCI_VER));
+ CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CFG),
+ cqhci_readl(cq_host, CQHCI_CTL));
+ CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_IS),
+ cqhci_readl(cq_host, CQHCI_ISTE));
+ CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_ISGE),
+ cqhci_readl(cq_host, CQHCI_IC));
+ CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDLBA),
+ cqhci_readl(cq_host, CQHCI_TDLBAU));
+ CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDBR),
+ cqhci_readl(cq_host, CQHCI_TCN));
+ CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_DQS),
+ cqhci_readl(cq_host, CQHCI_DPT));
+ CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TCLR),
+ cqhci_readl(cq_host, CQHCI_SSC1));
+ CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_SSC2),
+ cqhci_readl(cq_host, CQHCI_CRDCT));
+ CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_RMEM),
+ cqhci_readl(cq_host, CQHCI_TERRI));
+ CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CRI),
+ cqhci_readl(cq_host, CQHCI_CRA));
+
+ if (cq_host->ops->dumpregs)
+ cq_host->ops->dumpregs(mmc);
+ else
+ CQHCI_DUMP(": ===========================================\n");
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
+{
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
+ CQHCI_TASK_DESC_SZ, CQHCI_CFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
+ cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long)cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static void __cqhci_enable(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+
+ /* Configuration must not be changed while enabled */
+ if (cqcfg & CQHCI_ENABLE) {
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ }
+
+ cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
+
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ cqcfg |= CQHCI_DCMD;
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
+ cqcfg |= CQHCI_TASK_DESC_SZ;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBA);
+ cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBAU);
+
+ cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ cqcfg |= CQHCI_ENABLE;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+
+ /* Ensure all writes are done before interrupts are enabled */
+ wmb();
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ cq_host->activated = true;
+}
+
+static void __cqhci_disable(struct cqhci_host *cq_host)
+{
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cq_host->mmc->cqe_on = false;
+
+ cq_host->activated = false;
+}
+
+int cqhci_suspend(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (cq_host->enabled)
+ __cqhci_disable(cq_host);
+
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_suspend);
+
+int cqhci_resume(struct mmc_host *mmc)
+{
+ /* Re-enable is done upon first request */
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_resume);
+
+static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int err;
+
+ if (cq_host->enabled)
+ return 0;
+
+ cq_host->rca = card->rca;
+
+ err = cqhci_host_alloc_tdl(cq_host);
+ if (err)
+ return err;
+
+ __cqhci_enable(cq_host);
+
+ cq_host->enabled = true;
+
+#ifdef DEBUG
+ cqhci_dumpregs(cq_host);
+#endif
+ return 0;
+}
+
+/* CQHCI is idle and should halt immediately, so set a small timeout */
+#define CQHCI_OFF_TIMEOUT 100
+
+static void cqhci_off(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ ktime_t timeout;
+ bool timed_out;
+ u32 reg;
+
+ if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+ return;
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, false);
+
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+
+ timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ reg = cqhci_readl(cq_host, CQHCI_CTL);
+ if ((reg & CQHCI_HALT) || timed_out)
+ break;
+ }
+
+ if (timed_out)
+ pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
+ else
+ pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+
+ mmc->cqe_on = false;
+}
+
+static void cqhci_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->enabled)
+ return;
+
+ cqhci_off(mmc);
+
+ __cqhci_disable(cq_host);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
+ cq_host->trans_desc_base,
+ cq_host->trans_desc_dma_base);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+
+ cq_host->trans_desc_base = NULL;
+ cq_host->desc_base = NULL;
+
+ cq_host->enabled = false;
+}
+
+static void cqhci_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr)
+{
+ u32 req_flags = mrq->data->flags;
+
+ *data = CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(intr) |
+ CQHCI_ACT(0x5) |
+ CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
+ CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
+ CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
+ CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
+ CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
+ CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
+ CQHCI_BLK_COUNT(mrq->data->blocks) |
+ CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
+ mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+}
+
+static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (CQHCI_VALID(1) |
+ CQHCI_END(end ? 1 : 0) |
+ CQHCI_INT(0) |
+ CQHCI_ACT(0x4) |
+ CQHCI_DAT_LENGTH(len));
+
+ if (dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cqhci_prep_tran_desc(struct mmc_request *mrq,
+ struct cqhci_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ bool dma64 = cq_host->dma64;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cqhci_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ return 0;
+}
+
+static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_R1B) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(1) |
+ CQHCI_QBAR(1) |
+ CQHCI_ACT(0x5) |
+ CQHCI_CMD_INDEX(mrq->cmd->opcode) |
+ CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
+ mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+
+}
+
+static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static inline int cqhci_tag(struct mmc_request *mrq)
+{
+ return mrq->cmd ? DCMD_SLOT : mrq->tag;
+}
+
+static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ /* First request after resume has to re-enable */
+ if (!cq_host->activated)
+ __cqhci_enable(cq_host);
+
+ if (!mmc->cqe_on) {
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+ }
+
+ if (mrq->data) {
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+ cqhci_prep_task_desc(mrq, &data, 1);
+ *task_desc = cpu_to_le64(data);
+ err = cqhci_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: cqhci: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), err);
+ return err;
+ }
+ } else {
+ cqhci_prep_dcmd_desc(mmc, mrq);
+ }
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+
+ if (cq_host->recovery_halt) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ cq_host->slot[tag].mrq = mrq;
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+ mmc_hostname(mmc), tag);
+out_unlock:
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (err)
+ cqhci_post_req(mmc, mrq);
+
+ return err;
+}
+
+static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool notify)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->recovery_halt) {
+ cq_host->recovery_halt = true;
+ pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
+ wake_up(&cq_host->wait_queue);
+ if (notify && mrq->recovery_notifier)
+ mrq->recovery_notifier(mrq);
+ }
+}
+
+static unsigned int cqhci_error_flags(int error1, int error2)
+{
+ int error = error1 ? error1 : error2;
+
+ switch (error) {
+ case -EILSEQ:
+ return CQHCI_HOST_CRC;
+ case -ETIMEDOUT:
+ return CQHCI_HOST_TIMEOUT;
+ default:
+ return CQHCI_HOST_OTHER;
+ }
+}
+
+static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
+ int data_error)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot;
+ u32 terri;
+ int tag;
+
+ spin_lock(&cq_host->lock);
+
+ terri = cqhci_readl(cq_host, CQHCI_TERRI);
+
+ pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error, terri);
+
+ /* Forget about errors when recovery has already been triggered */
+ if (cq_host->recovery_halt)
+ goto out_unlock;
+
+ if (!cq_host->qcnt) {
+ WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error,
+ terri);
+ goto out_unlock;
+ }
+
+ if (CQHCI_TERRI_C_VALID(terri)) {
+ tag = CQHCI_TERRI_C_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(cmd_error, data_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (CQHCI_TERRI_D_VALID(terri)) {
+ tag = CQHCI_TERRI_D_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (!cq_host->recovery_halt) {
+ /*
+ * The only way to guarantee forward progress is to mark at
+ * least one task in error, so if none is indicated, pick one.
+ */
+ for (tag = 0; tag < NUM_SLOTS; tag++) {
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ break;
+ }
+ }
+
+out_unlock:
+ spin_unlock(&cq_host->lock);
+}
+
+static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq) {
+ WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
+ mmc_hostname(mmc), tag);
+ return;
+ }
+
+ /* No completions allowed during recovery */
+ if (cq_host->recovery_halt) {
+ slot->flags |= CQHCI_COMPLETED;
+ return;
+ }
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+ }
+
+ mmc_cqe_request_done(mmc, mrq);
+}
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ status = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, status, CQHCI_IS);
+
+ pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
+
+ if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ cqhci_error_irq(mmc, status, cmd_error, data_error);
+
+ if (status & CQHCI_IS_TCC) {
+ /* read TCN and complete the request */
+ comp_status = cqhci_readl(cq_host, CQHCI_TCN);
+ cqhci_writel(cq_host, comp_status, CQHCI_TCN);
+ pr_debug("%s: cqhci: TCN: 0x%08lx\n",
+ mmc_hostname(mmc), comp_status);
+
+ spin_lock(&cq_host->lock);
+
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: cqhci: completing tag %lu\n",
+ mmc_hostname(mmc), tag);
+ cqhci_finish_mrq(mmc, tag);
+ }
+
+ if (cq_host->waiting_for_idle && !cq_host->qcnt) {
+ cq_host->waiting_for_idle = false;
+ wake_up(&cq_host->wait_queue);
+ }
+
+ spin_unlock(&cq_host->lock);
+ }
+
+ if (status & CQHCI_IS_TCL)
+ wake_up(&cq_host->wait_queue);
+
+ if (status & CQHCI_IS_HAC)
+ wake_up(&cq_host->wait_queue);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cqhci_irq);
+
+static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
+{
+ unsigned long flags;
+ bool is_idle;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ is_idle = !cq_host->qcnt || cq_host->recovery_halt;
+ *ret = cq_host->recovery_halt ? -EBUSY : 0;
+ cq_host->waiting_for_idle = !is_idle;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ return is_idle;
+}
+
+static int cqhci_wait_for_idle(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int ret;
+
+ wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
+
+ return ret;
+}
+
+static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool *recovery_needed)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ unsigned long flags;
+ bool timed_out;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ timed_out = slot->mrq == mrq;
+ if (timed_out) {
+ slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
+ cqhci_recovery_needed(mmc, mrq, false);
+ *recovery_needed = cq_host->recovery_halt;
+ }
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (timed_out) {
+ pr_err("%s: cqhci: timeout for tag %d\n",
+ mmc_hostname(mmc), tag);
+ cqhci_dumpregs(cq_host);
+ }
+
+ return timed_out;
+}
+
+static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
+{
+ return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
+}
+
+static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_CLEAR_ALL_TASKS;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
+
+ return ret;
+}
+
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
+static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ if (cqhci_halted(cq_host))
+ return true;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_HALT;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+}
+
+/*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+ * layers will need to send a STOP command), so we set the timeout based on a
+ * generous command timeout.
+ */
+#define CQHCI_START_HALT_TIMEOUT 5
+
+static void cqhci_recovery_start(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, true);
+
+ mmc->cqe_on = false;
+}
+
+static int cqhci_error_from_flags(unsigned int flags)
+{
+ if (!flags)
+ return 0;
+
+ /* CRC errors might indicate re-tuning so prefer to report that */
+ if (flags & CQHCI_HOST_CRC)
+ return -EILSEQ;
+
+ if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
+{
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ data->bytes_xfered = 0;
+ data->error = cqhci_error_from_flags(slot->flags);
+ } else {
+ mrq->cmd->error = cqhci_error_from_flags(slot->flags);
+ }
+
+ mmc_cqe_request_done(cq_host->mmc, mrq);
+}
+
+static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
+{
+ int i;
+
+ for (i = 0; i < cq_host->num_slots; i++)
+ cqhci_recover_mrq(cq_host, i);
+}
+
+/*
+ * By now the command and data lines should be unused so there is no reason for
+ * CQHCI to take a long time to halt, but if it doesn't halt there could be
+ * problems clearing tasks, so be generous.
+ */
+#define CQHCI_FINISH_HALT_TIMEOUT 20
+
+/* CQHCI could be expected to clear it's internal state pretty quickly */
+#define CQHCI_CLEAR_TIMEOUT 20
+
+static void cqhci_recovery_finish(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+ u32 cqcfg;
+ bool ok;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+ if (!ok) {
+ pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /* Be sure that there are no tasks */
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+ WARN_ON(!ok);
+ }
+
+ cqhci_recover_mrqs(cq_host);
+
+ WARN_ON(cq_host->qcnt);
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ cq_host->qcnt = 0;
+ cq_host->recovery_halt = false;
+ mmc->cqe_on = false;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ /* Ensure all writes are done before interrupts are re-enabled */
+ wmb();
+
+ cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
+}
+
+static const struct mmc_cqe_ops cqhci_cqe_ops = {
+ .cqe_enable = cqhci_enable,
+ .cqe_disable = cqhci_disable,
+ .cqe_request = cqhci_request,
+ .cqe_post_req = cqhci_post_req,
+ .cqe_off = cqhci_off,
+ .cqe_wait_for_idle = cqhci_wait_for_idle,
+ .cqe_timeout = cqhci_timeout,
+ .cqe_recovery_start = cqhci_recovery_start,
+ .cqe_recovery_finish = cqhci_recovery_finish,
+};
+
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct resource *cqhci_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cqhci_mem");
+ if (!cqhci_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host)
+ return ERR_PTR(-ENOMEM);
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cqhci_memres->start,
+ resource_size(cqhci_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cqhci regs\n");
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cqhci_pltfm_init);
+
+static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
+{
+ return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
+}
+
+static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
+{
+ u32 ver = cqhci_readl(cq_host, CQHCI_VER);
+
+ return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
+}
+
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cqe_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cqe_ops = &cqhci_cqe_ops;
+
+ mmc->cqe_qdepth = NUM_SLOTS;
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ mmc->cqe_qdepth -= 1;
+
+ cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
+ sizeof(*cq_host->slot), GFP_KERNEL);
+ if (!cq_host->slot) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ spin_lock_init(&cq_host->lock);
+
+ init_completion(&cq_host->halt_comp);
+ init_waitqueue_head(&cq_host->wait_queue);
+
+ pr_info("%s: CQHCI version %u.%02u\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host));
+
+ return 0;
+
+out_err:
+ pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host), err);
+ return err;
+}
+EXPORT_SYMBOL(cqhci_init);
+
+MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
+MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
new file mode 100644
index 000000000000..9e68286a07b4
--- /dev/null
+++ b/drivers/mmc/host/cqhci.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQHCI_H
+#define LINUX_MMC_CQHCI_H
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <asm/io.h>
+
+/* registers */
+/* version */
+#define CQHCI_VER 0x00
+#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
+#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
+#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
+
+/* capabilities */
+#define CQHCI_CAP 0x04
+/* configuration */
+#define CQHCI_CFG 0x08
+#define CQHCI_DCMD 0x00001000
+#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_ENABLE 0x00000001
+
+/* control */
+#define CQHCI_CTL 0x0C
+#define CQHCI_CLEAR_ALL_TASKS 0x00000100
+#define CQHCI_HALT 0x00000001
+
+/* interrupt status */
+#define CQHCI_IS 0x10
+#define CQHCI_IS_HAC BIT(0)
+#define CQHCI_IS_TCC BIT(1)
+#define CQHCI_IS_RED BIT(2)
+#define CQHCI_IS_TCL BIT(3)
+
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+
+/* interrupt status enable */
+#define CQHCI_ISTE 0x14
+
+/* interrupt signal enable */
+#define CQHCI_ISGE 0x18
+
+/* interrupt coalescing */
+#define CQHCI_IC 0x1C
+#define CQHCI_IC_ENABLE BIT(31)
+#define CQHCI_IC_RESET BIT(16)
+#define CQHCI_IC_ICCTHWEN BIT(15)
+#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
+#define CQHCI_IC_ICTOVALWEN BIT(7)
+#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
+
+/* task list base address */
+#define CQHCI_TDLBA 0x20
+
+/* task list base address upper */
+#define CQHCI_TDLBAU 0x24
+
+/* door-bell */
+#define CQHCI_TDBR 0x28
+
+/* task completion notification */
+#define CQHCI_TCN 0x2C
+
+/* device queue status */
+#define CQHCI_DQS 0x30
+
+/* device pending tasks */
+#define CQHCI_DPT 0x34
+
+/* task clear */
+#define CQHCI_TCLR 0x38
+
+/* send status config 1 */
+#define CQHCI_SSC1 0x40
+
+/* send status config 2 */
+#define CQHCI_SSC2 0x44
+
+/* response for dcmd */
+#define CQHCI_CRDCT 0x48
+
+/* response mode error mask */
+#define CQHCI_RMEM 0x50
+
+/* task error info */
+#define CQHCI_TERRI 0x54
+
+#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
+#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
+#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
+#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
+#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
+#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
+
+/* command response index */
+#define CQHCI_CRI 0x58
+
+/* command response argument */
+#define CQHCI_CRA 0x5C
+
+#define CQHCI_INT_ALL 0xF
+#define CQHCI_IC_DEFAULT_ICCTH 31
+#define CQHCI_IC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define CQHCI_VALID(x) (((x) & 1) << 0)
+#define CQHCI_END(x) (((x) & 1) << 1)
+#define CQHCI_INT(x) (((x) & 1) << 2)
+#define CQHCI_ACT(x) (((x) & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
+#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
+#define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
+#define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
+#define CQHCI_PRIORITY(x) (((x) & 1) << 13)
+#define CQHCI_QBAR(x) (((x) & 1) << 14)
+#define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
+#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
+#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
+#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
+#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+
+struct cqhci_host_ops;
+struct mmc_host;
+struct cqhci_slot;
+
+struct cqhci_host {
+ const struct cqhci_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ /* relative card address of device */
+ unsigned int rca;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+ int qcnt;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CQHCI_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool activated;
+ bool waiting_for_idle;
+ bool recovery_halt;
+
+ size_t desc_size;
+ size_t data_size;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQHCI_CFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct completion halt_comp;
+ wait_queue_head_t wait_queue;
+ struct cqhci_slot *slot;
+};
+
+struct cqhci_host_ops {
+ void (*dumpregs)(struct mmc_host *mmc);
+ void (*write_l)(struct cqhci_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cqhci_host *host, int reg);
+ void (*enable)(struct mmc_host *mmc);
+ void (*disable)(struct mmc_host *mmc, bool recovery);
+};
+
+static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+struct platform_device;
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error);
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
+int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_resume(struct mmc_host *mmc);
+
+#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 351330dfb954..8e363174f9d6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
-static unsigned __initdata use_dma = 1;
+static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
return ret;
}
-static void __init_or_module
-davinci_release_dma_channels(struct mmc_davinci_host *host)
+static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
@@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
dma_release_channel(host->dma_rx);
}
-static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
@@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
return 0;
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static int davinci_mmcsd_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
@@ -1254,8 +1253,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
goto parse_fail;
}
} else {
@@ -1414,11 +1414,12 @@ static struct platform_driver davinci_mmcsd_driver = {
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
+ .probe = davinci_mmcsd_probe,
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
-module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e0862d3f65b3..32a6a228cd12 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e8a1bb1ae694..70b0df8b9c78 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -82,6 +82,10 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
*/
struct variant_data {
unsigned int clkreg;
@@ -111,6 +115,9 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
};
static struct variant_data variant_arm = {
@@ -120,6 +127,9 @@ static struct variant_data variant_arm = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -128,6 +138,9 @@ static struct variant_data variant_arm_extended_fifo = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -137,6 +150,9 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_u300 = {
@@ -152,6 +168,9 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_nomadik = {
@@ -168,6 +187,9 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500 = {
@@ -190,6 +212,9 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500v2 = {
@@ -214,6 +239,26 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_qcom = {
@@ -232,6 +277,9 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
/* Busy detection for the ST Micro variant */
@@ -396,6 +444,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
@@ -406,7 +455,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -921,8 +973,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ host->variant->start_err |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -1286,7 +1339,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status = readl(host->base + MMCISTATUS);
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~MCI_IRQ1MASK;
@@ -1429,16 +1482,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
/*
@@ -1583,6 +1638,35 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ dev_err(&dev->dev, "failed to get pinctrl");
+ ret = PTR_ERR(host->pinctrl);
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(host->pins_default)) {
+ dev_err(mmc_dev(mmc), "Can't select default pins\n");
+ ret = PTR_ERR(host->pins_default);
+ goto host_free;
+ }
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain)) {
+ dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
+ ret = PTR_ERR(host->pins_opendrain);
+ goto host_free;
+ }
+ }
+
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1729,7 +1813,10 @@ static int mmci_probe(struct amba_device *dev,
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1809,6 +1896,7 @@ static int mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,7 +1907,9 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1951,6 +2041,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4a8bef1aac8f..f91cdf7f6dae 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -192,6 +192,8 @@
#define NR_SG 128
+#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
+
struct clk;
struct variant_data;
struct dma_chan;
@@ -223,9 +225,13 @@ struct mmci_host {
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
+ u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_opendrain;
u8 hw_designer;
u8 hw_revision:4;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index b9dfea5d8193..f13f798d8506 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -35,6 +35,28 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ struct completion dma_dataend;
+ struct tasklet_struct dma_complete;
+};
+
+struct renesas_sdhi {
+ struct clk *clk;
+ struct clk *clk_cd;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
+ u32 scc_tappos;
+};
+
+#define host_to_priv(host) \
+ container_of((host)->pdata, struct renesas_sdhi, mmc_data)
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
int renesas_sdhi_remove(struct platform_device *pdev);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235d5742..80943fa07db6 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -46,19 +47,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-#define host_to_priv(host) \
- container_of((host)->pdata, struct renesas_sdhi, mmc_data)
-
-struct renesas_sdhi {
- struct clk *clk;
- struct clk *clk_cd;
- struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default, *pins_uhs;
- void __iomem *scc_ctl;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -280,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
@@ -497,7 +485,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eprobe;
+ return ret;
}
/*
@@ -523,11 +511,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
"state_uhs");
}
- host = tmio_mmc_host_alloc(pdev);
- if (!host) {
- ret = -ENOMEM;
- goto eprobe;
- }
+ host = tmio_mmc_host_alloc(pdev, mmc_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
@@ -541,18 +527,18 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->bus_shift = of_data->bus_shift;
}
- host->dma = dma_priv;
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
- host->card_busy = renesas_sdhi_card_busy;
- host->start_signal_voltage_switch =
+ host->ops.card_busy = renesas_sdhi_card_busy;
+ host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
}
@@ -586,10 +572,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
- if (ret < 0)
+ ret = renesas_sdhi_clk_enable(host);
+ if (ret)
goto efree;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
@@ -606,7 +596,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
+ priv->scc_tappos = taps->tap;
hit = true;
break;
}
@@ -650,20 +640,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
eirq:
tmio_mmc_host_remove(host);
+edisclk:
+ renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
-eprobe:
+
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
int renesas_sdhi_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
+ renesas_sdhi_clk_disable(host);
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 41cbe84c1d18..7c03cfead6f9 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
@@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
INFO1_CLEAR);
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void
@@ -130,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
- tasklet_schedule(&host->dma_complete);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ tasklet_schedule(&priv->dma_priv.dma_complete);
}
static void
@@ -220,10 +224,12 @@ static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_complete,
+ tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
@@ -255,6 +261,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.0" },
{ .soc_id = "r8a7796", .revision = "ES1.0" },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9ab10436e4b8..82d757c480b2 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
@@ -138,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
- complete(&host->dma_dataend);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
+ struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
@@ -161,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
spin_unlock_irq(&host->lock);
- wait_for_completion(&host->dma_dataend);
+ wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
@@ -171,6 +176,7 @@ out:
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
@@ -214,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -245,6 +251,7 @@ pio:
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
@@ -293,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -341,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
spin_lock_irq(&host->lock);
- if (host && host->data) {
+ if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
@@ -359,9 +366,11 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!host->dma || (!host->pdev->dev.of_node &&
- (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ if (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -378,7 +387,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_tx,
+ priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -389,7 +398,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
- cfg.dst_addr_width = host->dma->dma_buswidth;
+ cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
@@ -398,7 +407,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_rx,
+ priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -408,7 +417,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
- cfg.src_addr_width = host->dma->dma_buswidth;
+ cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
@@ -420,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
if (!host->bounce_buf)
goto ebouncebuf;
- init_completion(&host->dma_dataend);
+ init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0848dc0f882e..30bd8081307e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -30,7 +30,7 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_sdmmc {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 76da1687ab37..78422079ecfa 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -31,7 +31,7 @@
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <asm/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a62a4a..f77493604312 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
struct s3cmci_reg {
unsigned short addr;
unsigned char *name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
DBG_REG(CON),
DBG_REG(PRE),
DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
static int s3cmci_regs_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
- struct s3cmci_reg *rptr = debug_regs;
+ const struct s3cmci_reg *rptr = debug_regs;
for (; rptr->name; rptr++)
seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
@@ -1658,7 +1660,7 @@ static int s3cmci_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq == 0) {
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b988997a1e80..4065da58789d 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
@@ -96,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
+#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
+#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
+#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
+#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
+ INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
+ u32 hs_caps;
};
static const guid_t intel_dsm_guid =
@@ -152,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
{
int err;
+ intel_host->hs_caps = ~0;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -161,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
+
+ intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -398,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid,
return 0;
}
+static int intel_setup_host(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -409,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -421,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -432,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -446,6 +481,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+/* AMD sdhci reset dll register. */
+#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
+
+static int amd_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ return MMC_SET_DRIVER_TYPE_A;
+}
+
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+{
+ /* AMD Platform requires dll setting */
+ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
+ usleep_range(10, 20);
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+}
+
+/*
+ * For AMD Platform it is required to disable the tuning
+ * bit first controller to bring to HS Mode from HS200
+ * mode, later enable to tune to HS400 mode.
+ */
+static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int old_timing = host->timing;
+
+ sdhci_set_ios(mmc, ios);
+ if (old_timing == MMC_TIMING_MMC_HS200 &&
+ ios->timing == MMC_TIMING_MMC_HS)
+ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
+ if (old_timing != MMC_TIMING_MMC_HS400 &&
+ ios->timing == MMC_TIMING_MMC_HS400) {
+ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
+ sdhci_acpi_amd_hs400_dll(host);
+ }
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+ .ops = &sdhci_acpi_ops_amd,
+};
+
+static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+
+ sdhci_read_caps(host);
+ if (host->caps1 & SDHCI_SUPPORT_DDR50)
+ host->mmc->caps = MMC_CAP_1_8V_DDR;
+
+ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
+ (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+
+ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ host->mmc_host_ops.set_ios = amd_set_ios;
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -469,6 +581,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
+ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
@@ -485,6 +598,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
+ { "AMDI0040" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -566,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ err = -EINVAL;
+ goto err_free;
+ }
host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
resource_size(iomem));
@@ -609,10 +727,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
}
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_free;
+ if (c->slot && c->slot->setup_host) {
+ err = c->slot->setup_host(pdev);
+ if (err)
+ goto err_cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_cleanup;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
@@ -625,6 +753,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return 0;
+err_cleanup:
+ sdhci_cleanup_host(c->host);
err_free:
sdhci_free_host(c->host);
return err;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 85140c9af581..cd2b5f643a15 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -193,6 +193,7 @@ struct pltfm_imx_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ unsigned int actual_clock;
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -687,6 +688,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
return;
}
+ /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
+ if (is_imx53_esdhc(imx_data)) {
+ /*
+ * According to the i.MX53 reference manual, if DLLCTRL[10] can
+ * be set, then the controller is eSDHCv3, else it is eSDHCv2.
+ */
+ val = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
+ temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val, host->ioaddr + ESDHC_DLL_CTRL);
+ if (temp & BIT(10))
+ pre_div = 2;
+ }
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
@@ -1389,11 +1404,15 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
int ret;
ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (!sdhci_sdio_irq_enabled(host)) {
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
}
@@ -1409,31 +1428,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ err = clk_prepare_enable(imx_data->clk_ahb);
+ if (err)
+ return err;
+
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
if (err)
- return err;
+ goto disable_ahb_clk;
err = clk_prepare_enable(imx_data->clk_ipg);
if (err)
goto disable_per_clk;
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
}
- err = clk_prepare_enable(imx_data->clk_ahb);
- if (err)
- goto disable_ipg_clk;
+
err = sdhci_runtime_resume_host(host);
if (err)
- goto disable_ahb_clk;
+ goto disable_ipg_clk;
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(imx_data->clk_ahb);
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_per);
+disable_ahb_clk:
+ clk_disable_unprepare(imx_data->clk_ahb);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0720ea717011..c33a5f7393bd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,11 +25,13 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#include "cqhci.h"
+#include "sdhci-pltfm.h"
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -90,6 +92,7 @@ struct sdhci_arasan_data {
struct phy *phy;
bool is_phy_on;
+ bool has_cqe;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
@@ -262,6 +265,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -269,6 +283,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -278,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_arasan_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_arasan_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = {
+ .enable = sdhci_arasan_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_arasan_dumpregs,
+};
+
+static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ .set_clock = sdhci_arasan_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
+ .irq = sdhci_arasan_cqhci_irq,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -297,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
+ if (sdhci_arasan->has_cqe) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -353,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev)
sdhci_arasan->is_phy_on = true;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret) {
+ dev_err(dev, "Cannot resume host.\n");
+ return ret;
+ }
+
+ if (sdhci_arasan->has_cqe)
+ return cqhci_resume(host->mmc);
+
+ return 0;
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -556,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!sdhci_arasan->has_cqe)
+ return sdhci_add_host(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_arasan_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -566,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ const struct sdhci_pltfm_data *pdata;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
+ pdata = &sdhci_arasan_cqe_pdata;
+ else
+ pdata = &sdhci_arasan_pdata;
+
+ host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
- sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -663,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_hs400_enhanced_strobe;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
+ sdhci_arasan->has_cqe = true;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
}
- ret = sdhci_add_host(host);
+ ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1f424374bbbb..4ffa6b173a21 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
+ u32 val;
+
sdhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ }
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c
new file mode 100644
index 000000000000..499f3205ec5c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-arasan.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with
+ * integrated phy.
+ *
+ * Copyright (C) 2017 Arasan Chip Systems Inc.
+ *
+ * Author: Atul Garg <agarg@arasan.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */
+#define PHY_ADDR_REG 0x300
+#define PHY_DAT_REG 0x304
+
+#define PHY_WRITE BIT(8)
+#define PHY_BUSY BIT(9)
+#define DATA_MASK 0xFF
+
+/* PHY Specific Registers */
+#define DLL_STATUS 0x00
+#define IPAD_CTRL1 0x01
+#define IPAD_CTRL2 0x02
+#define IPAD_STS 0x03
+#define IOREN_CTRL1 0x06
+#define IOREN_CTRL2 0x07
+#define IOPU_CTRL1 0x08
+#define IOPU_CTRL2 0x09
+#define ITAP_DELAY 0x0C
+#define OTAP_DELAY 0x0D
+#define STRB_SEL 0x0E
+#define CLKBUF_SEL 0x0F
+#define MODE_CTRL 0x11
+#define DLL_TRIM 0x12
+#define CMD_CTRL 0x20
+#define DATA_CTRL 0x21
+#define STRB_CTRL 0x22
+#define CLK_CTRL 0x23
+#define PHY_CTRL 0x24
+
+#define DLL_ENBL BIT(3)
+#define RTRIM_EN BIT(1)
+#define PDB_ENBL BIT(1)
+#define RETB_ENBL BIT(6)
+#define ODEN_CMD BIT(1)
+#define ODEN_DAT 0xFF
+#define REN_STRB BIT(0)
+#define REN_CMND BIT(1)
+#define REN_DATA 0xFF
+#define PU_CMD BIT(1)
+#define PU_DAT 0xFF
+#define ITAPDLY_EN BIT(0)
+#define OTAPDLY_EN BIT(0)
+#define OD_REL_CMD BIT(1)
+#define OD_REL_DAT 0xFF
+#define DLLTRM_ICP 0x8
+#define PDB_CMND BIT(0)
+#define PDB_DATA 0xFF
+#define PDB_STRB BIT(0)
+#define PDB_CLOCK BIT(0)
+#define CALDONE_MASK 0x10
+#define DLL_RDY_MASK 0x10
+#define MAX_CLK_BUF 0x7
+
+/* Mode Controls */
+#define ENHSTRB_MODE BIT(0)
+#define HS400_MODE BIT(1)
+#define LEGACY_MODE BIT(2)
+#define DDR50_MODE BIT(3)
+
+/*
+ * Controller has no specific bits for HS200/HS.
+ * Used BIT(4), BIT(5) for software programming.
+ */
+#define HS200_MODE BIT(4)
+#define HISPD_MODE BIT(5)
+
+#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN)
+#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN)
+#define FREQSEL(x) (((x) << 5) | DLL_ENBL)
+#define IOPAD(x, y) ((x) | ((y) << 2))
+
+/* Arasan private data */
+struct arasan_host {
+ u32 chg_clk;
+};
+
+static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ val = sdhci_readw(host, PHY_ADDR_REG);
+ if (!(val & mask))
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset)
+{
+ sdhci_writew(host, data, PHY_DAT_REG);
+ sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG);
+ return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+}
+
+static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data)
+{
+ int ret;
+
+ sdhci_writew(host, 0, PHY_DAT_REG);
+ sdhci_writew(host, offset, PHY_ADDR_REG);
+ ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+
+ /* Masking valid data bits */
+ *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK;
+ return ret;
+}
+
+static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ int ret;
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ ret = arasan_phy_read(host, offset, &val);
+ if (ret)
+ return -EBUSY;
+ else if (val & mask)
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+/* Initialize the Arasan PHY */
+static int arasan_phy_init(struct sdhci_host *host)
+{
+ int ret;
+ u8 val;
+
+ /* Program IOPADs and wait for calibration to be done */
+ if (arasan_phy_read(host, IPAD_CTRL1, &val) ||
+ arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) ||
+ arasan_phy_read(host, IPAD_CTRL2, &val) ||
+ arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2))
+ return -EBUSY;
+ ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK);
+ if (ret)
+ return -EBUSY;
+
+ /* Program CMD/Data lines */
+ if (arasan_phy_read(host, IOREN_CTRL1, &val) ||
+ arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) ||
+ arasan_phy_read(host, IOPU_CTRL1, &val) ||
+ arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) ||
+ arasan_phy_read(host, CMD_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) ||
+ arasan_phy_read(host, IOREN_CTRL2, &val) ||
+ arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) ||
+ arasan_phy_read(host, IOPU_CTRL2, &val) ||
+ arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) ||
+ arasan_phy_read(host, DATA_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) ||
+ arasan_phy_read(host, STRB_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) ||
+ arasan_phy_read(host, CLK_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) ||
+ arasan_phy_read(host, CLKBUF_SEL, &val) ||
+ arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) ||
+ arasan_phy_write(host, LEGACY_MODE, MODE_CTRL))
+ return -EBUSY;
+ return 0;
+}
+
+/* Set Arasan PHY for different modes */
+static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap,
+ u8 drv_type, u8 itap, u8 trim, u8 clk)
+{
+ u8 val;
+ int ret;
+
+ if (mode == HISPD_MODE || mode == HS200_MODE)
+ ret = arasan_phy_write(host, 0x0, MODE_CTRL);
+ else
+ ret = arasan_phy_write(host, mode, MODE_CTRL);
+ if (ret)
+ return ret;
+ if (mode == HS400_MODE || mode == HS200_MODE) {
+ ret = arasan_phy_read(host, IPAD_CTRL1, &val);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1);
+ if (ret)
+ return ret;
+ }
+ if (mode == LEGACY_MODE) {
+ ret = arasan_phy_write(host, 0x0, OTAP_DELAY);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ } else {
+ ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY);
+ if (ret)
+ return ret;
+ if (mode != HS200_MODE)
+ ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY);
+ else
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ }
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, trim, DLL_TRIM);
+ if (ret)
+ return ret;
+ }
+ ret = arasan_phy_write(host, 0, DLL_STATUS);
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS);
+ if (ret)
+ return ret;
+ ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK);
+ if (ret)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int arasan_select_phy_clock(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct arasan_host *arasan_host = sdhci_pci_priv(slot);
+ u8 clk;
+
+ if (arasan_host->chg_clk == host->mmc->ios.clock)
+ return 0;
+
+ arasan_host->chg_clk = host->mmc->ios.clock;
+ if (host->mmc->ios.clock == 200000000)
+ clk = 0x0;
+ else if (host->mmc->ios.clock == 100000000)
+ clk = 0x2;
+ else if (host->mmc->ios.clock == 50000000)
+ clk = 0x1;
+ else
+ clk = 0x0;
+
+ if (host->mmc_host_ops.hs400_enhanced_strobe) {
+ arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0,
+ DLLTRM_ICP, clk);
+ } else {
+ switch (host->mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ arasan_phy_set(host, HS200_MODE, 0x2,
+ host->mmc->ios.drv_type, 0x0,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ arasan_phy_set(host, DDR50_MODE, 0x1, 0x0,
+ 0x0, DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ arasan_phy_set(host, HS400_MODE, 0x1,
+ host->mmc->ios.drv_type, 0xa,
+ DLLTRM_ICP, clk);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA;
+ err = arasan_phy_init(slot->host);
+ if (err)
+ return -ENODEV;
+ return 0;
+}
+
+static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ sdhci_set_clock(host, clock);
+
+ /* Change phy settings for the new clock */
+ arasan_select_phy_clock(host);
+}
+
+static const struct sdhci_ops arasan_sdhci_pci_ops = {
+ .set_clock = arasan_sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_arasan = {
+ .probe_slot = arasan_pci_probe_slot,
+ .ops = &arasan_sdhci_pci_ops,
+ .priv_size = sizeof(struct arasan_host),
+};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 3e4f04fd5175..6d1a983e6227 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -30,17 +30,37 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include "cqhci.h"
+
#include "sdhci.h"
#include "sdhci-pci.h"
-static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
-static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+{
+ mmc_pm_flag_t pm_flags = 0;
+ int i;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ struct sdhci_pci_slot *slot = chip->slots[i];
+
+ if (slot)
+ pm_flags |= slot->host->mmc->pm_flags;
+ }
+
+ return device_set_wakeup_enable(&chip->pdev->dev,
+ (pm_flags & MMC_PM_KEEP_POWER) &&
+ (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+}
+
+static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
+ sdhci_pci_init_wakeup(chip);
+
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
@@ -56,9 +76,6 @@ static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
-
- if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
- sdhci_enable_irq_wakeups(host);
}
return 0;
@@ -69,52 +86,44 @@ err_pci_suspend:
return ret;
}
-static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
- mmc_pm_flag_t pm_flags = 0;
- int i;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
for (i = 0; i < chip->num_slots; i++) {
- struct sdhci_pci_slot *slot = chip->slots[i];
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
- if (slot)
- pm_flags |= slot->host->mmc->pm_flags;
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
}
- return device_init_wakeup(&chip->pdev->dev,
- (pm_flags & MMC_PM_KEEP_POWER) &&
- (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+ return 0;
}
-static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
- sdhci_pci_init_wakeup(chip);
-
- return 0;
+ return sdhci_pci_suspend_host(chip);
}
-int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
- struct sdhci_pci_slot *slot;
- int i, ret;
-
- for (i = 0; i < chip->num_slots; i++) {
- slot = chip->slots[i];
- if (!slot)
- continue;
+ int ret;
- ret = sdhci_resume_host(slot->host);
- if (ret)
- return ret;
- }
+ ret = sdhci_pci_resume_host(chip);
+ if (ret)
+ return ret;
- return 0;
+ return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
@@ -166,8 +175,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
return 0;
}
+
+static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_pci_runtime_suspend_host(chip);
+}
+
+static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = sdhci_pci_runtime_resume_host(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(chip->slots[0]->host->mmc);
+}
#endif
+static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_pci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -583,6 +632,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.voltage_switch = sdhci_intel_voltage_switch,
};
+static const struct sdhci_ops sdhci_intel_glk_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
+ .irq = sdhci_cqhci_irq,
+};
+
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
@@ -612,12 +673,80 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+ }
+
+ return ret;
+}
+
+static void glk_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
}
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops glk_cqhci_ops = {
+ .enable = glk_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_pci_dumpregs,
+};
+
+static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + 0x200;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->ops = &glk_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
return ret;
}
@@ -699,11 +828,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
+ .add_host = glk_emmc_add_host,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = sdhci_cqhci_suspend,
+ .resume = sdhci_cqhci_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = sdhci_cqhci_runtime_suspend,
+ .runtime_resume = sdhci_cqhci_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
- .ops = &sdhci_intel_byt_ops,
+ .ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -778,6 +916,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
@@ -955,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
@@ -965,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
- sdhci_pci_init_wakeup(chip);
-
return 0;
}
@@ -1306,6 +1444,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1320,7 +1459,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
* *
\*****************************************************************************/
-static int sdhci_pci_enable_dma(struct sdhci_host *host)
+int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
@@ -1543,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
}
- host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (device_can_wakeup(&pdev->dev))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 0056f08a29cc..5cbcdc448f98 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,9 @@
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
+#define PCI_VENDOR_ID_ARASAN 0x16e6
+#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+
/*
* PCI device class and mask
*/
@@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
-
+int sdhci_pci_enable_dma(struct sdhci_host *host);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
+extern const struct sdhci_pci_fixes sdhci_arasan;
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 8c0f88428556..14511526a3a8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto err_host;
+ }
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0842bbc2d7ad..4d0791f6ec23 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void xenon_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
+ .voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e9290a3439d5..070aff9c108f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
@@ -2821,25 +2828,33 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
+ SDHCI_WAKE_ON_INT;
+ u32 irq_val = 0;
+ u8 wake_val = 0;
u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
- u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
- SDHCI_INT_CARD_INT;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
+ irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
}
+
+ wake_val |= SDHCI_WAKE_ON_INT;
+ irq_val |= SDHCI_INT_CARD_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ val |= wake_val;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
+
+ host->irq_wake_enabled = !enable_irq_wake(host->irq);
+
+ return host->irq_wake_enabled;
}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
@@ -2850,6 +2865,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
+ disable_irq_wake(host->irq);
+
+ host->irq_wake_enabled = false;
}
int sdhci_suspend_host(struct sdhci_host *host)
@@ -2858,15 +2877,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
mmc_retune_timer_stop(host->mmc);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (!device_may_wakeup(mmc_dev(host->mmc)) ||
+ !sdhci_enable_irq_wakeups(host)) {
host->ier = 0;
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
}
+
return 0;
}
@@ -2894,15 +2912,14 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ } else {
ret = request_threaded_irq(host->irq, sdhci_irq,
sdhci_thread_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
}
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54bc444c317f..afab26fd70e6 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -484,6 +484,7 @@ struct sdhci_host {
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
+ bool irq_wake_enabled; /* IRQ wakeup is enabled */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -718,7 +719,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-void sdhci_enable_irq_wakeups(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 04ca0d33a521..485f7591fae4 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -10,9 +10,11 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sdhci_get_of_property(pdev);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
@@ -158,25 +159,29 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
goto err;
}
- priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
- ret = clk_prepare_enable(priv->clk_iface);
- if (ret)
- goto err;
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
- priv->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto err_clk;
- }
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_clk;
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
@@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id f_sdh30_acpi_ids[] = {
+ { "SCX0002" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
+#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
- .of_match_table = f_sdh30_dt_ids,
+ .of_match_table = of_match_ptr(f_sdh30_dt_ids),
+ .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 53fb18bb7bee..7bb00c68a756 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- u32 opc = cmd->opcode;
+ u32 opc;
u32 mask = 0;
unsigned long flags;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index cc98355dbdb9..bad612d6f879 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -3,7 +3,7 @@
* (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
* (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
- * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
* (C) Copyright 2017 Sootech SA
*
@@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
goto error_assert_reset;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto error_assert_reset;
+ }
+
return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
@@ -1393,5 +1398,5 @@ module_platform_driver(sunxi_mmc_driver);
MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>");
MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 64b7e9f18361..43a2ea5cff24 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -92,14 +92,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- host = tmio_mmc_host_alloc(pdev);
- if (!host)
+ host = tmio_mmc_host_alloc(pdev, pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
goto cell_disable;
+ }
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata, NULL);
+ host->mmc->f_max = pdata->hclk;
+ host->mmc->f_min = pdata->hclk / 512;
+
+ ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -128,15 +133,11 @@ out:
static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
- if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
- }
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3e6ff8921440..e7d651352dc9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,12 +112,6 @@
struct tmio_mmc_data;
struct tmio_mmc_host;
-struct tmio_mmc_dma {
- enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
- void (*enable)(struct tmio_mmc_host *host, bool enable);
-};
-
struct tmio_mmc_dma_ops {
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
void (*enable)(struct tmio_mmc_host *host, bool enable);
@@ -134,6 +128,7 @@ struct tmio_mmc_host {
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
+ struct mmc_host_ops ops;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
@@ -144,18 +139,15 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
- unsigned long bus_shift;
+ unsigned int bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
- struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -174,7 +166,6 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
- u32 scc_tappos;
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
@@ -185,9 +176,6 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
- int (*card_busy)(struct mmc_host *mmc);
- int (*start_signal_voltage_switch)(struct mmc_host *mmc,
- struct mmc_ios *ios);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
@@ -207,11 +195,10 @@ struct tmio_mmc_host {
const struct tmio_mmc_dma_ops *dma_ops;
};
-struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
-int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -240,26 +227,26 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return ioread16(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return ioread16(host->ctl + (addr << host->bus_shift)) |
+ ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
- readsl(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
@@ -270,26 +257,26 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
*/
if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ iowrite16(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
int addr, u32 val)
{
- writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift));
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
- writesl(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
#endif
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 583bf3262df5..33494241245a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret == 0)
set_bit(i, host->taps);
- mdelay(1);
+ usleep_range(1000, 1200);
}
ret = host->select_tuning(host);
@@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work)
tmio_mmc_finish_request(host);
}
-static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
-{
- if (!host->clk_enable)
- return -ENOTSUPP;
-
- return host->clk_enable(host);
-}
-
-static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
-{
- if (host->clk_disable)
- host->clk_disable(host);
-}
-
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -958,7 +944,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* 100us were not enough. Is this the same 140us delay, as in
* tmio_mmc_set_ios()?
*/
- udelay(200);
+ usleep_range(200, 300);
}
/*
* It seems, VccQ should be switched on after Vcc, this is also what the
@@ -966,7 +952,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
ret = regulator_enable(mmc->supply.vqmmc);
- udelay(200);
+ usleep_range(200, 300);
}
if (ret < 0)
@@ -1059,7 +1045,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Let things settle. delay taken from winCE driver */
- udelay(140);
+ usleep_range(140, 200);
if (PTR_ERR(host->mrq) == -EINTR)
dev_dbg(&host->pdev->dev,
"%s.%d: IOS interrupted: clk %u, mode %u",
@@ -1076,15 +1062,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_ro(mmc);
-
- if (ret >= 0)
- return ret;
-
- ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- return ret;
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_multi_io_quirk(struct mmc_card *card,
@@ -1098,7 +1078,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1145,19 +1125,45 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-struct tmio_mmc_host*
-tmio_mmc_host_alloc(struct platform_device *pdev)
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
+ struct resource *res;
+ void __iomem *ctl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctl))
+ return ERR_CAST(ctl);
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
+ host->ctl = ctl;
host->mmc = mmc;
host->pdev = pdev;
+ host->pdata = pdata;
+ host->ops = tmio_mmc_ops;
+ mmc->ops = &host->ops;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ host = ERR_PTR(ret);
+ goto free;
+ }
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+free:
+ mmc_free_host(mmc);
return host;
}
@@ -1169,32 +1175,24 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops)
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
+ struct tmio_mmc_data *pdata = _host->pdata;
struct mmc_host *mmc = _host->mmc;
- struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
- tmio_mmc_of_parse(pdev, pdata);
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0)
+ return -EINVAL;
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
_host->write16_hook = NULL;
- res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- return -EINVAL;
-
- ret = mmc_of_parse(mmc);
- if (ret < 0)
- return ret;
-
- _host->pdata = pdata;
- platform_set_drvdata(pdev, mmc);
-
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
@@ -1202,15 +1200,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
return ret;
- _host->ctl = devm_ioremap(&pdev->dev,
- res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl)
- return -ENOMEM;
-
- tmio_mmc_ops.card_busy = _host->card_busy;
- tmio_mmc_ops.start_signal_voltage_switch =
- _host->start_signal_voltage_switch;
- mmc->ops = &tmio_mmc_ops;
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret)
+ return ret;
+ }
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
@@ -1233,7 +1227,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
mmc->max_seg_size = mmc->max_req_size;
- _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ if (mmc_can_gpio_ro(mmc))
+ _host->ops.get_ro = mmc_gpio_get_ro;
+
+ _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1246,18 +1243,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_MIN_RCAR2)
_host->native_hotplug = true;
- if (tmio_mmc_clk_enable(_host) < 0) {
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- }
-
- /*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
- * looping forever...
- */
- if (mmc->f_min == 0)
- return -EINVAL;
-
/*
* While using internal tmio hardware logic for card detection, we need
* to ensure it stays powered for it to work.
@@ -1293,7 +1278,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
- _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);
@@ -1307,14 +1291,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret)
- goto remove_host;
-
- mmc_gpiod_request_cd_irq(mmc);
- }
-
return 0;
remove_host:
@@ -1343,16 +1319,27 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
#ifdef CONFIG_PM
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
+{
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ return host->clk_enable(host);
+}
+
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
@@ -1372,8 +1359,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
int tmio_mmc_host_runtime_resume(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_reset(host);
tmio_mmc_clk_enable(host);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 0806f72102c0..a85af236b44d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -904,9 +904,6 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- if (from + len > mtd->size)
- return -EINVAL;
-
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
@@ -990,36 +987,6 @@ err_in_read:
goto out;
}
-/**
- * doc_read - Read bytes from flash
- * @mtd: the device
- * @from: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
- *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
- *
- * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
- */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_oob_ops ops;
- size_t ret;
-
- memset(&ops, 0, sizeof(ops));
- ops.datbuf = buf;
- ops.len = len;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = doc_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
@@ -1471,8 +1438,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
- if (ofs + len > mtd->size)
- return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1513,39 +1478,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
return ret;
}
-/**
- * doc_write - Write a buffer to the chip
- * @mtd: the device
- * @to: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to write (must be a full page size, ie. 512)
- * @retlen: the number of bytes actually written (0 or 512)
- * @buf: the buffer to get bytes from
- *
- * Writes data to the chip.
- *
- * Returns 0 if write successful, -EIO if write error
- */
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct docg3 *docg3 = mtd->priv;
- int ret;
- struct mtd_oob_ops ops;
-
- doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
- ops.datbuf = (char *)buf;
- ops.len = len;
- ops.mode = MTD_OPS_PLACE_OOB;
- ops.oobbuf = NULL;
- ops.ooblen = 0;
- ops.ooboffs = 0;
-
- ret = doc_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
@@ -1866,8 +1798,6 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dbe6a1de2bb8..a4e18f6aaa33 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -307,10 +307,18 @@ static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
+ spi_nor_restore(&flash->spi_nor);
+
/* Clean up MTD stuff. */
return mtd_device_unregister(&flash->spi_nor.mtd);
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ spi_nor_restore(&flash->spi_nor);
+}
/*
* Do NOT add to this array without reading the following:
*
@@ -386,6 +394,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = m25p_remove,
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 8956b7dcc984..75f71d166fd6 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -68,6 +68,7 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -84,12 +85,16 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
@@ -100,6 +105,7 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -117,12 +123,16 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 73b605577447..28553c840d32 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -503,6 +503,11 @@ int add_mtd_device(struct mtd_info *mtd)
return -EEXIST;
BUG_ON(mtd->writesize == 0);
+
+ if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -1053,7 +1058,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1068,11 +1086,25 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
ledtrig_mtd_activity();
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index be088bccd593..76cd21d1171b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -105,34 +105,17 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = mtd_to_part(mtd);
+ struct mtd_ecc_stats stats;
int res;
- if (from >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && from + ops->len > mtd->size)
- return -EINVAL;
-
- /*
- * If OOB is also requested, make sure that we do not read past the end
- * of this partition.
- */
- if (ops->oobbuf) {
- size_t len, pages;
-
- len = mtd_oobavail(mtd, ops);
- pages = mtd_div_by_ws(mtd->size, mtd);
- pages -= mtd_div_by_ws(from, mtd);
- if (ops->ooboffs + ops->ooblen > pages * len)
- return -EINVAL;
- }
-
+ stats = part->parent->ecc_stats;
res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected++;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed++;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -189,10 +172,6 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = mtd_to_part(mtd);
- if (to >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && to + ops->len > mtd->size)
- return -EINVAL;
return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
@@ -435,8 +414,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
- slave->mtd._read = part_read;
- slave->mtd._write = part_write;
+ if (parent->_read)
+ slave->mtd._read = part_read;
+ if (parent->_write)
+ slave->mtd._write = part_write;
if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f07492c6f4b2..7eb0e1f4f980 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1223,8 +1223,9 @@ static int mtdswap_show(struct seq_file *s, void *data)
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
mutex_lock(&d->mbd_dev->lock);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aafed9a2..e6b8c59f2c0d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on !MTD_NAND_MARVELL
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
+config MTD_NAND_MARVELL
+ tristate "NAND controller support on Marvell boards"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time. The GPMI may conflicts with other
- block, such as SD card. So pay attention to it when you enable
- the GPMI.
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a1349aad3..921634ba400c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a56bc23..b2f00b398490 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
return 0;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca6ce43..87bbd177b3e5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index dd56a671ea42..c28fd2bc1a84 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int ret;
if (!buf) {
- buf = chip->buffers->databuf;
+ buf = chip->data_buf;
/* Invalidate page cache */
chip->pagebuf = -1;
}
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
if (ret)
return ret;
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(chip);
}
return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c438a57..567ff972d5fc 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status = 0;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
uint32_t ctrl;
int err = 0;
int old_dma;
- struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- &cafe->dmaaddr, GFP_KERNEL);
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf) {
err = -ENOMEM;
goto out_irq;
}
- cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
/* Set up DMA address */
- cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
- if (sizeof(cafe->dmaaddr) > 4)
- /* Shift in two parts to shut the compiler up */
- cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
- else
- cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* this driver does not need the @ecccalc and @ecccode */
- nbuf->ecccalc = NULL;
- nbuf->ecccode = NULL;
- nbuf->databuf = (uint8_t *)(nbuf + 1);
-
/* Restore the DMA flag */
usedma = old_dma;
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
out_free_dma:
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8ae8c04..313c7f50621b 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- uint8_t *ecc_code = chip->buffers->ecccode;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int i, ret, stat;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ int i, stat;
for (i = 0; i < ecc_steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int page, int write)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
- unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int i, pos, len;
/* BBM at the beginning of the OOB area */
- chip->cmdfunc(mtd, start_cmd, writesize, page);
if (write)
- chip->write_buf(mtd, bufpoi, oob_skip);
+ nand_prog_page_begin_op(chip, page, writesize, bufpoi,
+ oob_skip);
else
- chip->read_buf(mtd, bufpoi, oob_skip);
+ nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
bufpoi += oob_skip;
/* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- chip->cmdfunc(mtd, rnd_cmd, pos, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, pos, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, pos, bufpoi, len,
+ false);
bufpoi += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
bufpoi += len;
}
}
/* OOB free */
len = oobsize - (bufpoi - chip->oob_poi);
- chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, size - len, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, size - len, bufpoi, len,
+ false);
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int ret, i, pos, len;
- ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
if (ret)
return ret;
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(buf, dma_buf + pos, len);
+ memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(buf, dma_buf + writesize + oob_skip,
+ memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(oob, dma_buf + writesize, oob_skip);
+ memcpy(oob, tmp_buf + writesize, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(oob, dma_buf + pos, len);
+ memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(oob, dma_buf + writesize + oob_skip,
+ memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, dma_buf + size - len, len);
+ memcpy(oob, tmp_buf + size - len, len);
}
return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status;
denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* This simplifies the logic.
*/
if (!buf || !oob_required)
- memset(dma_buf, 0xff, size);
+ memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout */
if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, buf, len);
+ memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(dma_buf + writesize + oob_skip, buf,
+ memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(dma_buf + writesize, oob, oob_skip);
+ memcpy(tmp_buf + writesize, oob, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, oob, len);
+ memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(dma_buf + writesize + oob_skip, oob,
+ memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(dma_buf + size - len, oob, len);
+ memcpy(tmp_buf + size - len, oob, len);
}
- return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
irq_status = denali_wait_for_irq(denali,
INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+ return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
chip->read_buf = denali_read_buf;
chip->write_buf = denali_write_buf;
}
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066dacac..9ad33d237378 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+void denali_remove(struct denali_nand_info *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae31412..49cb3e1f8bd0 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc52e2e..6bc93ea66f50 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
int status;
DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(this, NULL);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(this);
doc->curchip = chip;
doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc71662..72f1327c4430 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+ nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
+ int status;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
poll_status(doc);
write_nop(docptr);
- return nand->waitfunc(mtd, nand);
+ status = nand->waitfunc(mtd, nand);
+ if (status < 0)
+ return status;
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, bool use_ecc)
+ const uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s...\n", __func__);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
- return 0;
+ return nand_prog_page_end_op(nand);
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, false);
+ return write_page(mtd, nand, buf, page, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, true);
+ return write_page(mtd, nand, buf, page, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f90aa2c..8b6dcd739ecb 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
- fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
const uint8_t *buf, int oob_required, int page)
{
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac7f34c..4872a7ba6503 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9bf49e..f49ed46fa770 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int off, len, group = 0;
/*
* ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
- chip->read_buf(mtd, oob + j, len);
+ nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d4d824ef64e9..61fdd733492f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page_data(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
void *auxiliary_virt;
@@ -1094,8 +1096,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, eccbuf, eccbytes);
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
/*
* ECC data are not byte aligned and we may have
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
+}
+
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
meta = geo->metadata_size;
if (first) {
col = meta + (size + ecc_parity_size) * first;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
meta = 0;
buf = buf + first * size;
}
+ nand_read_page_op(chip, page, col, NULL, 0);
+
/* Save the old environment */
r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Read the subpage now */
this->swap_block_mark = false;
- max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
/* Restore */
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "ecc write page.\n");
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
payload_virt, payload_phys);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
chip->oob_poi[0] = chip->read_byte(mtd);
}
@@ -1432,7 +1448,6 @@ static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
struct mtd_oob_region of = { };
- int status = 0;
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
- chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
}
/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
int step;
- chip->read_buf(mtd, tmp_buf,
- mtd->writesize + mtd->oobsize);
+ nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
/*
* If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
- chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
-
- return 0;
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
- int column, page, status, chipnr;
+ int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
chip->select_chip(mtd, -1);
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
- chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ nand_read_page_op(chip, page, 12, NULL, 0);
chip->read_buf(mtd, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int status;
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
- /* Compute the page address. */
- page = block * block_size_in_pages;
-
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
- /* Wait for the erase to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = nand_erase_op(chip, block);
+ if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- /* Wait for the write to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
/* Send the command to read the conventional block mark. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->read_byte(mtd);
chip->select_chip(mtd, -1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce13d10..06c1f993912c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
};
/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
- enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
+int common_nfc_set_geometry(struct gpmi_nand_data *);
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_init(struct gpmi_nand_data *);
+int gpmi_extra_init(struct gpmi_nand_data *);
+void gpmi_clear_bch(struct gpmi_nand_data *);
+void gpmi_dump_info(struct gpmi_nand_data *);
+int bch_set_geometry(struct gpmi_nand_data *);
+int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+int gpmi_send_command(struct gpmi_nand_data *);
+void gpmi_begin(struct gpmi_nand_data *);
+void gpmi_end(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *);
+int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261c3e17..cb862793ab6d 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct hinfc_host *host = nand_get_controller_data(chip);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4af3e9..613b00a9604b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 id[2];
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
/* Retrieve the IDs from the first chip. */
chip->select_chip(mtd, 0);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- *nand_maf_id = chip->read_byte(mtd);
- *nand_dev_id = chip->read_byte(mtd);
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ *nand_maf_id = id[0];
+ *nand_dev_id = id[1];
} else {
/* Detect additional chip. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- if (*nand_maf_id != chip->read_byte(mtd)
- || *nand_dev_id != chip->read_byte(mtd)) {
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
ret = -ENODEV;
goto notfound_id;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468db653..e357948a7505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Writing Command and Address */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
memcpy(dma_buf, buf, mtd->writesize);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(mtd, chip);
}
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a1554d..5f7cc6da0a7f 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
int page)
{
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
uint8_t *pb;
int error;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
/* Write ECC data to device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
- chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 000000000000..2196f2a233d6
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+};
+
+/**
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[0];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * NAND controller capabilities for distinction between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * NAND controller structure: stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ecc_clk: ECC block clock, two times the NAND controller clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @caps: NAND controller capabilities for each compatible string
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ecc_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * NAND controller timings expressed in NAND Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+
+/**
+ * NAND driver structure filled during the parsing of the ->exec_op() subop
+ * subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+ if (!ret) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
+ nfc->selected_chip = NULL;
+ marvell_nand->selected_die = -1;
+ return;
+ }
+
+ /*
+ * Do not change the timing registers when using the DT property
+ * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
+ * marvell_nand structure are supposedly empty.
+ */
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check a chunk is correct or not according to hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before declaring the subpage corrupted.
+ */
+static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the naked read operation only on the last chunk.
+ * Otherwise, use monolithic read.
+ */
+ if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
+ u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ecc_offset_in_page, ret;
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ *
+ * However, for any subpage read error reported by ->correct(), the ECC
+ * bytes must be read in raw mode and the full subpage must be checked
+ * to see if it is entirely empty of if there was an actual error.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ /* Derive ECC bytes positions (in page/buffer) and length */
+ ecc = chip->oob_poi +
+ (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * ALIGN(lt->ecc_bytes, 32));
+ ecc_offset_in_page =
+ (chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes)) +
+ (chunk < lt->full_chunk_cnt ?
+ lt->data_bytes + lt->spare_bytes :
+ lt->last_data_bytes + lt->last_spare_bytes);
+ ecc_len = chunk < lt->full_chunk_cnt ?
+ lt->ecc_bytes : lt->last_ecc_bytes;
+
+ /* Do the actual raw read of the ECC bytes */
+ nand_change_read_column_op(chip, ecc_offset_in_page,
+ ecc, ecc_len, false);
+
+ /* Derive data/spare bytes positions (in buffer) and length */
+ data = buf + (chunk * lt->data_bytes);
+ data_len = chunk < lt->full_chunk_cnt ?
+ lt->data_bytes : lt->last_data_bytes;
+ spare = chip->oob_poi + (chunk * (lt->spare_bytes +
+ lt->ecc_bytes));
+ spare_len = chunk < lt->full_chunk_cnt ?
+ lt->spare_bytes : lt->last_spare_bytes;
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
+ spare_len, ecc, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len = nand_subop_get_data_len(subop, op_id);
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
+ if (chip->ecc_step_ds && chip->ecc_strength_ds) {
+ ecc->size = chip->ecc_step_ds;
+ ecc->strength = chip->ecc_strength_ds;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface
+ *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH) |
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE |
+ NDTR1_TR(nfc_tmg.tR);
+
+ return 0;
+}
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). CS are only incremented one by one while the RB
+ * pin is always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata) {
+ nsels = 1;
+ } else if (nfc->caps->legacy_of_bindings &&
+ !of_get_property(np, "num-cs", &nsels)) {
+ dev_err(dev, "missing num-cs property\n");
+ return -EINVAL;
+ } else if (!of_get_property(np, "reg", &nsels)) {
+ dev_err(dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ if (!pdata)
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
+ (nsels *
+ sizeof(struct marvell_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->exec_op = marvell_nfc_exec_op;
+ chip->select_chip = marvell_nfc_select_chip;
+ if (nfc->caps->is_nfcv2 &&
+ !of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->setup_data_interface = marvell_nfc_setup_data_interface;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_data_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+ ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
+ if (ret) {
+ dev_err(dev, "could not identify the nand chip\n");
+ return ret;
+ }
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts[0],
+ pdata->nr_parts[0]);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(nand_to_mtd(&entry->chip));
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(nfc->dev, "No resource defined for data DMA\n");
+ return -ENXIO;
+ }
+
+ param.drcmr = r->start;
+ param.prio = PXAD_PRIO_LOWEST;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ nfc->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, nfc->dev,
+ "data");
+ if (!nfc->dma_chan) {
+ dev_err(nfc->dev,
+ "Unable to request data DMA channel\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf)
+ return -ENOMEM;
+
+ nfc->use_dma = true;
+
+ return 0;
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+
+ regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
+ reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
+ regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+
+ regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
+ reg |= GENCONF_ND_CLK_CTRL_EN;
+ regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_hw_control_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->ecc_clk))
+ return PTR_ERR(nfc->ecc_clk);
+
+ ret = clk_prepare_enable(nfc->ecc_clk);
+ if (ret)
+ return ret;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return 0;
+}
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214d169e..40d86a861a70 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCIRQ_EN (0x80)
-#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ECC_DECDONE (0x124)
-#define ECC_DECIRQ_EN (0x200)
-#define ECC_DECIRQ_STA (0x204)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
- ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
struct mtk_ecc_caps {
u32 err_mask;
const u8 *ecc_strength;
+ const u32 *ecc_regs;
u8 num_ecc_strength;
- u32 encode_parity_reg0;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
int pg_irq_sel;
};
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
40, 44, 48, 52, 56, 60, 68, 72, 80
};
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
- enum mtk_ecc_operation op;
u32 dec, enc;
- dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
if (dec) {
- op = ECC_DECODE;
- dec = readw(ecc->regs + ECC_DECDONE);
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
- enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
- if (enc) {
- op = ECC_ENCODE;
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
complete(&ecc->done);
- } else {
+ else
return IRQ_NONE;
- }
}
return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
- config->strength * ECC_PARITY_BITS;
+ config->strength * ecc->caps->parity_bits;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
- writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
- if (op == ECC_DECODE)
+ if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
- writew(0, ecc->regs + ECC_IRQ_REG(op));
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
- len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
- ecc->regs + ecc->caps->encode_parity_reg0,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
- .encode_parity_reg0 = 0x10,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
- .encode_parity_reg0 = 0x300,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 1,
};
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 7,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
},
{},
};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
- max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14f1b80..a455df080952 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
#include <linux/types.h>
-#define ECC_PARITY_BITS (14)
-
enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d131cc0..6977da3a26aa 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
-#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
74
};
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
- reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
- reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u32 reg;
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!raw)
mtk_ecc_disable(nfc->ecc);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- int ret;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
- if (ret < 0)
- return -EIO;
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
-
- return ret & NAND_STATUS_FAIL ? -EIO : 0;
+ return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
- if (column != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+ nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
- ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
- if (mtd->writesize > 512) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return ret;
/* calculate oob bytes except ecc parity data */
- free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
free = spare - free;
/*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
}
}
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
},
{}
};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
- struct mtd_info *mtd;
int ret;
u32 i;
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
- mtd = nand_to_mtd(nand);
- for (i = 0; i < chip->nsels; i++) {
- nand->select_chip(mtd, i);
- nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- }
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6135d007a068..e70ca16a5118 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
{
register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
- if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
- ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status;
unsigned long timeo = 400;
+ u8 status;
+ int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
- status = (int)chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- const struct nand_data_interface *conf;
int ret;
if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
* timings to timing mode 0.
*/
- conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, chipnr, conf);
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (!chip->setup_data_interface || !chip->data_interface)
+ if (!chip->setup_data_interface)
return 0;
/*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
goto err;
}
- ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
err:
return ret;
}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
- chip->data_interface = kzalloc(sizeof(*chip->data_interface),
- GFP_KERNEL);
- if (!chip->data_interface)
- return -ENOMEM;
for (mode = fls(modes) - 1; mode >= 0; mode--) {
- ret = onfi_init_data_interface(chip, chip->data_interface,
- NAND_SDR_IFACE, mode);
+ ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;
- /* Pass -1 to only */
+ /*
+ * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
+ * controller supports the requested timings.
+ */
ret = chip->setup_data_interface(mtd,
NAND_DATA_IFACE_CHECK_ONLY,
- chip->data_interface);
+ &chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0;
}
-static void nand_release_data_interface(struct nand_chip *chip)
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- kfree(chip->data_interface);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop:\n");
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ int ret;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ const struct nand_op_parser_pattern *pattern;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &ctx))
+ continue;
+
+ nand_op_parser_trace(&ctx);
+
+ if (check_only)
+ break;
+
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (i == parser->npatterns) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
- * Returns 0 for success or negative error code otherwise
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_data_interface for details), do the reset operation, and
+ * apply back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* interface settings, hence this weird ->select_chip() dance.
*/
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
chip->select_chip(mtd, chipnr);
+ chip->data_interface = saved_data_intf;
ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->read_buf(mtd, buf, eccsize);
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->read_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
p = bufpoi + data_col_addr;
- chip->read_buf(mtd, p, datafrag_len);
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+ chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
gaps = 1;
if (gaps) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
(busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
- &chip->buffers->ecccode[i],
+ &chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
stat = chip->ecc.correct(mtd, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
return max_bitflips;
}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+ bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
/* Invalidate page cache */
chip->pagebuf = -1;
}
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
}
if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
@@ -2027,33 +3605,6 @@ read_retry:
}
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_READING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2061,9 +3612,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -2081,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
- int i, toread, sndrnd = 0, pos;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
+ int ret;
+
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
bufpoi += toread;
length -= toread;
}
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2113,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
*/
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
@@ -2140,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
@@ -2154,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
} else
pos = eccsize;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
@@ -2163,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
bufpoi += len;
length -= len;
}
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_oob_syndrome);
@@ -2199,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
@@ -2214,21 +3787,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2256,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2281,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -2299,13 +3859,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->retlen = 0;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > mtd->size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
@@ -2336,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
- return 0;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
@@ -2362,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->write_buf(mtd, buf, eccsize);
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->write_buf(mtd, oob, eccbytes);
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->write_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2403,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
@@ -2433,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
@@ -2447,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2469,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -2478,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, buf, ecc_size);
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
@@ -2503,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2537,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ int ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -2589,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required, page);
@@ -2605,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- if (nand_standard_page_accessors(&chip->ecc)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- }
-
return 0;
}
@@ -2737,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ memcpy(&chip->data_buf[column], buf, bytes);
+ wbuf = chip->data_buf;
}
if (unlikely(oob)) {
@@ -2822,33 +4434,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_WRITING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -2874,22 +4459,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(to >> chip->chip_shift);
/*
@@ -2945,13 +4514,6 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
@@ -2984,11 +4546,12 @@ out:
static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
/* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
- return chip->waitfunc(mtd, chip);
+ return nand_erase_op(chip, eraseblock);
}
/**
@@ -3072,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
/* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
+ if (status) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3215,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int status;
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return nand_set_features_op(chip, addr, subfeature_param);
}
/**
@@ -3243,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
/**
@@ -3319,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
- if (chip->cmdfunc == NULL)
+ if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
@@ -3396,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
- int ret = -EINVAL;
+ int ret;
int len;
int i;
@@ -3411,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
return -ENOMEM;
/* Send our own NAND_CMD_PARAM. */
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
/* Use the Change Read Column command to skip the ONFI param pages. */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- sizeof(*p) * p->num_of_param_pages , -1);
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
- /* Read out the Extended Parameter Page. */
- chip->read_buf(mtd, (uint8_t *)ep, len);
+ ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
@@ -3471,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ char id[4];
+ int i, ret, val;
/* Try ONFI for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
- if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
- chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3574,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_jedec_params *p = &chip->jedec_params;
struct jedec_ecc_info *ecc;
- int val;
- int i, j;
+ char id[5];
+ int i, val, ret;
/* Try JEDEC for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'C')
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
le16_to_cpu(p->crc))
@@ -3866,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
- int busw;
- int i;
+ int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
@@ -3875,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip, 0);
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
/* Select the device */
chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
/* Read manufacturer and device IDs */
- maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ maf_id = id_data[0];
+ dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
@@ -3894,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* not match, ignore the device completely.
*/
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
/* Read entire ID string */
- for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
- id_data[i] = chip->read_byte(mtd);
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4190,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
+ /* Enforce the right timings for reset/detection */
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+
ret = nand_dt_init(chip);
if (ret)
return ret;
@@ -4197,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
+ * populated.
+ */
+ if (!chip->exec_op) {
/*
- * Default functions assigned for chip_select() and
- * cmdfunc() both expect cmd_ctrl() to be populated,
- * so we need to check that that's the case
+ * Default functions assigned for ->cmdfunc() and
+ * ->select_chip() both expect ->cmd_ctrl() to be populated.
*/
- pr_err("chip.cmd_ctrl() callback is not provided");
- return -EINVAL;
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ pr_err("->cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
}
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -4225,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
chip->select_chip(mtd, i);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
- if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd)) {
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
chip->select_chip(mtd, -1);
break;
}
@@ -4600,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
-static bool invalid_ecc_page_accessors(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (nand_standard_page_accessors(ecc))
- return false;
-
- /*
- * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
- * controller driver implements all the page accessors because
- * default helpers are not suitable when the core does not
- * send the READ0/PAGEPROG commands.
- */
- return (!ecc->read_page || !ecc->write_page ||
- !ecc->read_page_raw || !ecc->write_page_raw ||
- (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
- (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
- ecc->hwctl && ecc->calculate));
-}
-
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4632,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf = NULL;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4641,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
return -EINVAL;
}
- if (invalid_ecc_page_accessors(chip)) {
- pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
- }
-
- if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
-
- nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccalc) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccode) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!nbuf->databuf) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- chip->buffers = nbuf;
- } else if (!chip->buffers) {
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
- }
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4685,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ret = nand_manufacturer_init(chip);
chip->select_chip(mtd, -1);
if (ret)
- goto err_free_nbuf;
+ goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
@@ -4836,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_nand_manuf_cleanup;
}
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
@@ -4917,8 +6444,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
@@ -4954,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->select_chip(mtd, -1);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -4964,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
return 0;
-err_nand_data_iface_cleanup:
- nand_release_data_interface(chip);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
-err_free_nbuf:
- if (nbuf) {
- kfree(nbuf->databuf);
- kfree(nbuf->ecccode);
- kfree(nbuf->ecccalc);
- kfree(nbuf);
- }
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
return ret;
}
@@ -5028,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- nand_release_data_interface(chip);
-
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
- kfree(chip->buffers->databuf);
- kfree(chip->buffers->ecccode);
- kfree(chip->buffers->ecccalc);
- kfree(chip->buffers);
- }
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b6739bf8..36092850be2c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd_to_nand(mtd);
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ return create_bbt(mtd, this->data_buf, bd, -1);
}
/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751eda317..d542908a0ebb 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
struct mtd_info *mtd = nand_to_mtd(chip);
- u8 jedecid[6] = { };
- int i = 0;
+ u16 column = ((u16)addr << 8) | addr;
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- for (i = 0; i < 5; i++)
- jedecid[i] = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
+ chip->write_byte(mtd, val);
- return !strcmp("JEDEC", jedecid);
+ return 0;
}
static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
- int status;
- int i;
+ int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
/*
* Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
- int column = hynix->read_retry->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, values[i]);
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
}
/* Apply the new settings. */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
for (i = 0; i < info->nregs; i++) {
- int column = info->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, info->values[i]);
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
}
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
/* Sequence to enter OTP mode? */
- chip->cmdfunc(mtd, 0x17, -1, -1);
- chip->cmdfunc(mtd, 0x04, -1, -1);
- chip->cmdfunc(mtd, 0x19, -1, -1);
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
/* Now read the page */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
- chip->read_buf(mtd, buf, info->size);
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
/* Put everything back to normal */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
- chip->write_byte(mtd, 0x0);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- return 0;
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c376e8..02e109ae73f1 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
- int status;
- int max_bitflips = 0;
+ u8 status;
+ int ret, max_bitflips = 0;
- micron_nand_on_die_ecc_setup(chip, true);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = chip->read_byte(mtd);
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
+
/*
* The internal ECC doesn't tell us the number of bitflips
* that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
else if (status & NAND_STATUS_WRITE_RECOMMENDED)
max_bitflips = chip->ecc.strength;
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
-
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+out:
micron_nand_on_die_ecc_setup(chip, false);
- return max_bitflips;
+ return ret ? ret : max_bitflips;
}
static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int status;
-
- micron_nand_on_die_ecc_setup(chip, true);
+ int ret;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+ ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int
-micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
-{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
-
- return 0;
-}
-
-static int
-micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return ret;
}
enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.bytes = 8;
chip->ecc.size = 512;
chip->ecc.strength = 4;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw =
- micron_nand_read_page_raw_on_die_ecc;
- chip->ecc.write_page_raw =
- micron_nand_write_page_raw_on_die_ecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
}
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f0129ae7..ef022f62f74c 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
} else {
nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->ecc_step_ds = 512;
+ chip->ecc_strength_ds = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
}
}
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533bcc5bd..9400d039ddbd 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
- * @iface: The data interface to be initialized
* @mode: The ONFI timing mode
*/
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode)
{
+ struct nand_data_interface *iface = &chip->data_interface;
+
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
- return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
+EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c4906a..8cdf7d3d8fa7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
int ret;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
- u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
- chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total);
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
/* Calculate ecc bytes */
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..d1979c7dbe7e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
int i, id, ntypes;
+ u8 idbuf[2];
ntypes = ARRAY_SIZE(builtin_flash_types);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- id = chip->read_byte(mtd);
- id |= chip->read_byte(mtd) << 0x8;
+ nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
+ id = idbuf[0] | (idbuf[1] << 8);
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@@ -963,6 +961,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
switch (command) {
case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
break;
@@ -1350,10 +1349,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1363,7 +1362,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
struct pxa3xx_nand_info *info = host->info_data;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1ac5646..563b759ffca6 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -1725,6 +1726,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf = NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -1750,6 +1752,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int i, ret;
int read_loc;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = chip->oob_poi;
@@ -1850,6 +1853,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1902,6 +1907,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1916,6 +1924,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1970,6 +1979,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1990,7 +2002,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
- int ret, status = 0;
+ int ret;
host->use_ecc = true;
@@ -2027,11 +2039,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2089,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret, status = 0;
+ int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2114,11 +2122,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2636,6 +2640,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287af4614..595635b9e9de 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
- int status;
+ u8 status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = (int)chip->read_byte(mtd);
+ nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(mtd, 0);
- dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(dev->chip);
dev->chip->select_chip(mtd, -1);
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a4f5f3..c4e7755448e6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e58b0f..1581671b05ae 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
#define SM_SMALL_OOB_SIZE 8
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be3e766..f5a55c63935c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
int ret;
if (*cur_off != data_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
- if (nand->options & NAND_NEED_SCRAMBLING) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- } else {
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- }
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
true, page);
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- offset + mtd->writesize, -1);
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
if (!randomize)
sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
!i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
*/
- if (randomized) {
- /* TODO: use DMA to read page in raw mode */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- }
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
int ret;
if (data_off != *cur_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN,
- offset + mtd->writesize, -1);
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
{
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
chip->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
}
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
buf, page);
}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
sunxi_nfc_randomizer_config(mtd, page, false);
sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
NULL, page);
- return 0;
+ return nand_prog_page_end_op(chip);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
}
static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- int ret, status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ int ret;
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f03943..c5bee00b7f5e 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
tango_read_buf(mtd, *buf, len);
*buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->waitfunc(mtd, chip);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc->write_page = tango_write_page;
ecc->read_oob = tango_read_oob;
ecc->write_oob = tango_write_oob;
- ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
err = nand_scan_tail(mtd);
if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32332e1..dcaa924502de 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
+ u8 status;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
- nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return nand_chip->read_byte(mtd);
+ nand_status_op(nand_chip, &status);
+ return status;
}
/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b48a05..80d31a58e558 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int eccsize = chip->ecc.size;
int stat;
- vf610_nfc_read_buf(mtd, buf, eccsize);
+ nand_read_page_op(chip, page, 0, buf, eccsize);
if (oob_required)
vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc->use_hw_ecc = true;
nfc->write_sz = mtd->writesize + mtd->oobsize;
- return 0;
+ return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6a2b11..9dc15748947b 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
- devices. For further information see
- <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+ devices.
if MTD_ONENAND
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on OF || COMPILE_TEST
help
- Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388d3031..87c34f607a75 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -50,24 +49,17 @@ struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
- unsigned int mem_size;
- int gpio_irq;
+ struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
- int dma_channel;
- int freq;
- int (*setup)(void __iomem *base, int *freq_ptr);
- struct regulator *regulator;
- u8 flags;
+ struct dma_chan *dma_chan;
};
-static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap2_onenand_dma_complete_func(void *completion)
{
- struct omap2_onenand *c = data;
-
- complete(&c->dma_done);
+ complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
writew(value, c->onenand.base + reg);
}
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (c->flags & ONENAND_IN_OMAP34XX)
- /* Add a delay to let GPIO settle */
- syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
- if (c->gpio_irq) {
- result = gpio_get_value(c->gpio_irq);
- if (result == -1) {
- ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- intr = read_reg(c, ONENAND_REG_INTERRUPT);
- wait_err("gpio error", state, ctrl, intr);
- return -EIO;
- }
- } else
- result = 0;
- if (result == 0) {
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
int retry_cnt = 0;
retry:
- result = wait_for_completion_timeout(&c->irq_done,
- msecs_to_jiffies(20));
- if (result == 0) {
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
return 0;
}
-#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -371,7 +433,7 @@ out_copy:
return 0;
}
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
return -1;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -442,136 +488,6 @@ out_copy:
return 0;
}
-#else
-
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy(buffer, (__force void *)(this->base + bram_offset),
- count);
- return 0;
- }
-
- dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count / 4, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- return 0;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy((__force void *)(this->base + bram_offset), buffer,
- count);
- return 0;
- }
-
- dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
- DMA_TO_DEVICE);
- dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
- count / 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- return 0;
-}
-
-#else
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
-static int omap2_onenand_enable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_enable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't enable regulator\n");
-
- return ret;
-}
-
-static int omap2_onenand_disable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_disable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't disable regulator\n");
-
- return ret;
-}
-
static int omap2_onenand_probe(struct platform_device *pdev)
{
- struct omap_onenand_platform_data *pdata;
- struct omap2_onenand *c;
- struct onenand_chip *this;
- int r;
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "error getting memory resource\n");
+ return -EINVAL;
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
}
- c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
- c->flags = pdata->flags;
- c->gpmc_cs = pdata->cs;
- c->gpio_irq = pdata->gpio_irq;
- c->dma_channel = pdata->dma_channel;
- if (c->dma_channel < 0) {
- /* if -1, don't use DMA */
- c->gpio_irq = 0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- r = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto err_kfree;
- }
-
+ c->gpmc_cs = val;
c->phys_base = res->start;
- c->mem_size = resource_size(res);
-
- if (request_mem_region(c->phys_base, c->mem_size,
- pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
- c->phys_base, c->mem_size);
- r = -EBUSY;
- goto err_kfree;
- }
- c->onenand.base = ioremap(c->phys_base, c->mem_size);
- if (c->onenand.base == NULL) {
- r = -ENOMEM;
- goto err_release_mem_region;
- }
- if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, &c->freq);
- if (r < 0) {
- dev_err(&pdev->dev, "Onenand platform setup failed: "
- "%d\n", r);
- goto err_iounmap;
- }
- c->setup = pdata->onenand_setup;
+ c->onenand.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ r = PTR_ERR(c->int_gpiod);
+ /* Just try again if this happens */
+ if (r != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio: %d\n", r);
+ return r;
}
- if (c->gpio_irq) {
- if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
- dev_err(&pdev->dev, "Failed to request GPIO%d for "
- "OneNAND\n", c->gpio_irq);
- goto err_iounmap;
- }
- gpio_direction_input(c->gpio_irq);
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
- if ((r = request_irq(gpio_to_irq(c->gpio_irq),
- omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
- pdev->dev.driver->name, c)) < 0)
- goto err_release_gpio;
+ c->onenand.wait = omap2_onenand_wait;
}
- if (c->dma_channel >= 0) {
- r = omap_request_dma(0, pdev->dev.driver->name,
- omap2_onenand_dma_cb, (void *) c,
- &c->dma_channel);
- if (r == 0) {
- omap_set_dma_write_mode(c->dma_channel,
- OMAP_DMA_WRITE_NON_POSTED);
- omap_set_dma_src_data_pack(c->dma_channel, 1);
- omap_set_dma_src_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(c->dma_channel, 1);
- omap_set_dma_dest_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- } else {
- dev_info(&pdev->dev,
- "failed to allocate DMA for OneNAND, "
- "using PIO instead\n");
- c->dma_channel = -1;
- }
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
- c->onenand.base, c->freq);
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
c->pdev = pdev;
c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
- c->mtd.dev.parent = &pdev->dev;
- mtd_set_of_node(&c->mtd, pdata->of_node);
-
- this = &c->onenand;
- if (c->dma_channel >= 0) {
- this->wait = omap2_onenand_wait;
- if (c->flags & ONENAND_IN_OMAP34XX) {
- this->read_bufferram = omap3_onenand_read_bufferram;
- this->write_bufferram = omap3_onenand_write_bufferram;
- } else {
- this->read_bufferram = omap2_onenand_read_bufferram;
- this->write_bufferram = omap2_onenand_write_bufferram;
- }
- }
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
- if (pdata->regulator_can_sleep) {
- c->regulator = regulator_get(&pdev->dev, "vonenand");
- if (IS_ERR(c->regulator)) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- r = PTR_ERR(c->regulator);
- goto err_release_dma;
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
}
- c->onenand.enable = omap2_onenand_enable;
- c->onenand.disable = omap2_onenand_disable;
- }
- if (pdata->skip_initial_unlocking)
- this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_regulator;
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
- r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
-err_release_regulator:
- regulator_put(c->regulator);
err_release_dma:
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
- if (c->gpio_irq)
- free_irq(gpio_to_irq(c->gpio_irq), c);
-err_release_gpio:
- if (c->gpio_irq)
- gpio_free(c->gpio_irq);
-err_iounmap:
- iounmap(c->onenand.base);
-err_release_mem_region:
- release_mem_region(c->phys_base, c->mem_size);
-err_kfree:
- kfree(c);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
return r;
}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
- regulator_put(c->regulator);
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
- if (c->gpio_irq) {
- free_irq(gpio_to_irq(c->gpio_irq), c);
- gpio_free(c->gpio_irq);
- }
- iounmap(c->onenand.base);
- release_mem_region(c->phys_base, c->mem_size);
- kfree(c);
return 0;
}
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
},
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1a6d0e367b89..979f4031f23c 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1383,15 +1383,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
@@ -1448,38 +1439,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
/**
- * onenand_read - [MTD Interface] Read data from flash
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put data
- *
- * Read with ecc
-*/
-static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_4KB_PAGE(this) ?
- onenand_mlc_read_ops_nolock(mtd, from, &ops) :
- onenand_read_ops_nolock(mtd, from, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @param mtd: MTD device structure
* @param from: offset to read from
@@ -2056,15 +2015,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to write past end of device\n",
- __func__);
- return -EINVAL;
- }
-
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
@@ -2129,35 +2079,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
}
/**
- * onenand_write - [MTD Interface] write buffer to FLASH
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- *
- * Write with ECC
- */
-static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = (u_char *) buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_WRITING);
- ret = onenand_write_ops_nolock(mtd, to, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @param mtd: MTD device structure
* @param to: offset to write
@@ -4038,8 +3959,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = onenand_read;
- mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a7bf8f..2e9d076e445a 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/mach/flash.h>
-
#include "samsung.h"
enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
- struct resource *base_res;
void __iomem *ahb_addr;
- struct resource *ahb_res;
int bootram_command;
- void __iomem *page_buf;
- void __iomem *oob_buf;
+ void *page_buf;
+ void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
- struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
- m = (unsigned int *) onenand->page_buf;
- s = (unsigned int *) onenand->oob_buf;
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
unsigned char *p;
if (area == ONENAND_DATARAM) {
- p = (unsigned char *) onenand->page_buf;
+ p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
- p = (unsigned char *) onenand->oob_buf;
+ p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
- mtd = kzalloc(size, GFP_KERNEL);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
- onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
- if (!onenand) {
- err = -ENOMEM;
- goto onenand_fail;
- }
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENOENT;
- goto ahb_resource_failed;
- }
+ onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
- onenand->base_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->base_res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- err = -EBUSY;
- goto resource_failed;
- }
+ onenand->phys_base = r->start;
- onenand->base = ioremap(r->start, resource_size(r));
- if (!onenand->base) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
- err = -EFAULT;
- goto ioremap_failed;
- }
/* Set onenand_chip also */
this->base = onenand->base;
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no buffer memory resource defined\n");
- err = -ENOENT;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->ahb_res) {
- dev_err(&pdev->dev, "failed to request buffer memory resource\n");
- err = -EBUSY;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_addr = ioremap(r->start, resource_size(r));
- if (!onenand->ahb_addr) {
- dev_err(&pdev->dev, "failed to map buffer memory resource\n");
- err = -EINVAL;
- goto ahb_ioremap_failed;
- }
+ onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
- onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!onenand->page_buf) {
- err = -ENOMEM;
- goto page_buf_fail;
- }
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
/* Allocate 128 SpareRAM */
- onenand->oob_buf = kzalloc(128, GFP_KERNEL);
- if (!onenand->oob_buf) {
- err = -ENOMEM;
- goto oob_buf_fail;
- }
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no dma memory resource defined\n");
- err = -ENOENT;
- goto dma_resource_failed;
- }
-
- onenand->dma_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->dma_res) {
- dev_err(&pdev->dev, "failed to request dma memory resource\n");
- err = -EBUSY;
- goto dma_resource_failed;
- }
-
- onenand->dma_addr = ioremap(r->start, resource_size(r));
- if (!onenand->dma_addr) {
- dev_err(&pdev->dev, "failed to map dma memory resource\n");
- err = -EINVAL;
- goto dma_ioremap_failed;
- }
-
- onenand->phys_base = onenand->base_res->start;
+ onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
- err = request_irq(r->start, s5pc110_onenand_irq,
- IRQF_SHARED, "onenand", &onenand);
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
- goto scan_failed;
+ return err;
}
}
}
- if (onenand_scan(mtd, 1)) {
- err = -EFAULT;
- goto scan_failed;
- }
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
platform_set_drvdata(pdev, mtd);
return 0;
-
-scan_failed:
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
-dma_ioremap_failed:
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
- kfree(onenand->oob_buf);
-oob_buf_fail:
- kfree(onenand->page_buf);
-page_buf_fail:
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
-ahb_ioremap_failed:
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
-dma_resource_failed:
-ahb_resource_failed:
- iounmap(onenand->base);
-ioremap_failed:
- if (onenand->base_res)
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-resource_failed:
- kfree(onenand);
-onenand_fail:
- kfree(mtd);
- return err;
}
static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
-
- iounmap(onenand->base);
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-
- kfree(onenand->oob_buf);
- kfree(onenand->page_buf);
- kfree(onenand);
- kfree(mtd);
+
return 0;
}
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index 5fe0079ea5ed..8893dc82a5c8 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -192,7 +192,7 @@ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
@@ -219,7 +219,7 @@ exit:
return ret;
}
-void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
@@ -244,7 +244,7 @@ static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
return -EINVAL;
block_num = ftl->log2phy[log_num];
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 75a2bc447a99..4b8e9183489a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -58,6 +58,7 @@ struct cqspi_flash_pdata {
u8 data_width;
u8 cs;
bool registered;
+ bool use_direct_mode;
};
struct cqspi_st {
@@ -68,6 +69,7 @@ struct cqspi_st {
void __iomem *iobase;
void __iomem *ahb_base;
+ resource_size_t ahb_size;
struct completion transfer_complete;
struct mutex bus_mutex;
@@ -103,6 +105,7 @@ struct cqspi_st {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
@@ -450,8 +453,7 @@ static int cqspi_command_write_addr(struct spi_nor *nor,
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_indirect_read_setup(struct spi_nor *nor,
- const unsigned int from_addr)
+static int cqspi_read_setup(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -459,8 +461,6 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
unsigned int dummy_clk = 0;
unsigned int reg;
- writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -493,8 +493,8 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -504,6 +504,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
unsigned int bytes_to_read = 0;
int ret = 0;
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
@@ -570,8 +571,7 @@ failrd:
return ret;
}
-static int cqspi_indirect_write_setup(struct spi_nor *nor,
- const unsigned int to_addr)
+static int cqspi_write_setup(struct spi_nor *nor)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -584,8 +584,6 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
reg = cqspi_calc_rdreg(nor, nor->program_opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (nor->addr_width - 1);
@@ -593,8 +591,8 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx)
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
{
const unsigned int page_size = nor->page_size;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -604,6 +602,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
unsigned int write_bytes;
int ret;
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
@@ -894,17 +893,22 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_indirect_write_setup(nor, to);
+ ret = cqspi_write_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ else
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
if (ret)
return ret;
@@ -914,17 +918,22 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
if (ret)
return ret;
- ret = cqspi_indirect_read_setup(nor, from);
+ ret = cqspi_read_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
return ret;
@@ -1059,6 +1068,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
+ u32 reg;
+
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
@@ -1081,6 +1092,11 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1156,6 +1172,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+ }
}
return 0;
@@ -1215,6 +1237,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index f17d22435bfc..2901c7bd9e30 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -801,10 +801,10 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index ef034d898a23..699951523179 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -138,7 +138,6 @@
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
- * @preopcodes: Preopcodes which are supported.
*/
struct intel_spi {
struct device *dev;
@@ -155,7 +154,6 @@ struct intel_spi {
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
- u8 preopcodes[2];
};
static bool writeable;
@@ -400,10 +398,6 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
}
}
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index abe455ccd68b..5442993b71ff 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -110,7 +110,7 @@
#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-struct mt8173_nor {
+struct mtk_nor {
struct spi_nor nor;
struct device *dev;
void __iomem *base; /* nor flash base address */
@@ -118,48 +118,48 @@ struct mt8173_nor {
struct clk *nor_clk;
};
-static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
{
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
switch (nor->read_proto) {
case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
MTK_NOR_CFG1_REG);
break;
case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
default:
- writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
}
}
-static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
{
int reg;
u8 val = cmdval & 0x1f;
- writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
!(reg & val), 100, 10000);
}
-static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
{
int len = 1 + txlen + rxlen;
int i, ret, idx;
@@ -167,26 +167,26 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
if (len > MTK_NOR_MAX_SHIFT)
return -EINVAL;
- writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
/* start at PRGDATA5, go down to PRGDATA0 */
idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
/* opcode */
- writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
/* program TX data */
for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
/* clear out rest of TX registers */
while (idx >= 0) {
- writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
}
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
if (ret)
return ret;
@@ -195,20 +195,20 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
/* read out RX data */
for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
return 0;
}
/* Do a WRSR (Write Status Register) command */
-static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
{
- writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
}
-static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
{
u8 reg;
@@ -216,27 +216,27 @@ static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
* 0: pre-fetch buffer use for read
* 1: pre-fetch buffer use for page program
*/
- writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
0x01 == (reg & 0x01), 100, 10000);
}
-static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
{
u8 reg;
- writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
10000);
}
-static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
{
u8 val;
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
- val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
switch (nor->addr_width) {
case 3:
@@ -246,115 +246,115 @@ static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
val |= MTK_NOR_4B_ADDR_EN;
break;
default:
- dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
nor->addr_width);
break;
}
- writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
}
-static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
{
int i;
- mt8173_nor_set_addr_width(mt8173_nor);
+ mtk_nor_set_addr_width(mtk_nor);
for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
addr >>= 8;
}
/* Last register is non-contiguous */
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
}
-static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
u8 *buf = (u8 *)buffer;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
/* set mode for fast read mode ,dual mode or quad mode */
- mt8173_nor_set_read_mode(mt8173_nor);
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
- buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
}
return length;
}
-static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
- int addr, int length, u8 *data)
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
{
int i, ret;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
if (ret < 0)
return ret;
}
return 0;
}
-static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
- const u8 *buf)
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
{
int i, bufidx, data;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
bufidx = 0;
for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
buf[bufidx + 1]<<8 | buf[bufidx];
bufidx += 4;
- writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
}
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
}
-static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
size_t i;
- ret = mt8173_nor_write_buffer_enable(mt8173_nor);
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
return ret;
}
for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write buffer failed!\n");
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
return ret;
}
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
}
- ret = mt8173_nor_write_buffer_disable(mt8173_nor);
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
return ret;
}
if (i < len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to,
- (int)(len - i), (u8 *)buf);
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write single byte failed!\n");
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
return ret;
}
}
@@ -362,72 +362,72 @@ static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_RDSR:
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
if (ret < 0)
return ret;
if (len == 1)
- *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
else
- dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
break;
}
return ret;
}
-static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_WRSR:
/* We only handle 1 byte */
- ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
if (ret)
- dev_warn(mt8173_nor->dev, "write reg failure!\n");
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
break;
}
return ret;
}
-static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
{
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
}
-static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
{
int ret;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
return ret;
}
return 0;
}
-static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -439,18 +439,18 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct spi_nor *nor;
/* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
- nor = &mt8173_nor->nor;
- nor->dev = mt8173_nor->dev;
- nor->priv = mt8173_nor;
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
/* fill the hooks to spi nor */
- nor->read = mt8173_nor_read;
- nor->read_reg = mt8173_nor_read_reg;
- nor->write = mt8173_nor_write;
- nor->write_reg = mt8173_nor_write_reg;
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
@@ -465,34 +465,34 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
struct device_node *flash_np;
struct resource *res;
int ret;
- struct mt8173_nor *mt8173_nor;
+ struct mtk_nor *mtk_nor;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
- mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
- if (!mt8173_nor)
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
return -ENOMEM;
- platform_set_drvdata(pdev, mt8173_nor);
+ platform_set_drvdata(pdev, mtk_nor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mt8173_nor->base))
- return PTR_ERR(mt8173_nor->base);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
- mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mt8173_nor->spi_clk))
- return PTR_ERR(mt8173_nor->spi_clk);
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
- mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mt8173_nor->nor_clk))
- return PTR_ERR(mt8173_nor->nor_clk);
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
- mt8173_nor->dev = &pdev->dev;
+ mtk_nor->dev = &pdev->dev;
- ret = mt8173_nor_enable_clk(mt8173_nor);
+ ret = mtk_nor_enable_clk(mtk_nor);
if (ret)
return ret;
@@ -503,20 +503,20 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto nor_free;
}
- ret = mtk_nor_init(mt8173_nor, flash_np);
+ ret = mtk_nor_init(mtk_nor, flash_np);
nor_free:
if (ret)
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return ret;
}
static int mtk_nor_drv_remove(struct platform_device *pdev)
{
- struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
@@ -524,18 +524,18 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- return mt8173_nor_enable_clk(mt8173_nor);
+ return mtk_nor_enable_clk(mtk_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index bc266f70a15b..d445a4d3b770 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -330,8 +330,22 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
- else
- return fsr & FSR_READY;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
@@ -552,6 +566,27 @@ erase_err:
return ret;
}
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -650,7 +685,6 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -714,11 +748,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -735,7 +765,6 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -802,11 +831,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -1020,7 +1045,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1065,7 +1096,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
- /* Spansion -- single (large) sector size only, at least
+ /* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1094,6 +1125,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2713,6 +2746,16 @@ static void spi_nor_resume(struct mtd_info *mtd)
dev_err(dev, "resume() failed\n");
}
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c885a9..cde19c99e77b 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
- if (err == -EUCLEAN)
+ if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b210fdb31c98..b1fc28f63882 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -457,6 +457,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 136ce05d2328..e941395de3ae 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
- if (!max_beb_per1024)
- return 0;
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
/*
* Here we are using size of the entire flash chip and
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 388e46be6ad9..250e30fac61b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
- * leb_write_lock - lock logical eraseblock for writing.
+ * leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b4422a..590d967011bb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_avalible(struct rb_root *root)
+static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 5a832bc79b1b..91705962ba73 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
- p = &av->root.rb_node;
while (*p) {
parent = *p;
@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
- kfree(fm->e[i]);
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 85237cf661f9..3fd8d7ff7a02 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -315,6 +316,10 @@ out_sysfs:
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b5b8cd6f481c..2052a647220e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
- anchor = !anchor_pebs_avalible(&ubi->free);
+ anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
@@ -1529,6 +1529,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
+ bool sync = false;
+
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi_assert(!ubi->lookuptbl[e->pnum]);
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
goto out_free;
- }
}
found_pebs++;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 2aaa3f7f2ba9..a9e2d669acd8 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_avalible(struct rb_root *root);
+static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 959d22aaa063..d1271c1ee23c 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -410,6 +410,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
return dev->of_node == data;
}
+/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
struct device *dev;
@@ -463,6 +464,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
(!args.args_count && (mux_chip->controllers > 1))) {
dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
np, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
@@ -473,10 +475,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
np, controller, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
- get_device(&mux_chip->dev);
return &mux_chip->mux[controller];
}
EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0936da592e12..944ec3c9282c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -497,4 +497,15 @@ config THUNDERBOLT_NET
source "drivers/net/hyperv/Kconfig"
+config NETDEVSIM
+ tristate "Simulated networking device"
+ depends on DEBUG_FS
+ help
+ This driver is a developer testing tool and software model that can
+ be used to test various control path networking APIs, especially
+ HW-offload related.
+
+ To compile this driver as a module, choose M here: the module
+ will be called netdevsim.
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 766f62d02a0b..04c3b747812c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/
thunderbolt-net-y += thunderbolt.o
obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_NETDEVSIM) += netdevsim/
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 8a9b085c2a98..58c705f24f96 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
{
u8 macaddr[ETH_ALEN];
u8 *mac;
- int i;
if (newval->string) {
- i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &macaddr[0], &macaddr[1], &macaddr[2],
- &macaddr[3], &macaddr[4], &macaddr[5]);
- if (i != ETH_ALEN)
+ if (!mac_pton(newval->string, macaddr))
goto err;
mac = macaddr;
} else {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b8029ea03307..433a14b9f731 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -264,7 +264,6 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
}
/* Create payload CAIF frames. */
- pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
while (nfrms < CFHSI_MAX_PKTS) {
struct caif_payload_info *info;
int hpad;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index d065c0e2d18e..406b4847e5dc 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -251,14 +251,14 @@ static void c_can_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct c_can_pci_data c_can_sta2x11= {
+static const struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
.bar = 0,
};
-static struct c_can_pci_data c_can_pch = {
+static const struct c_can_pci_data c_can_pch = {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_32,
.freq = 50000000, /* 50 MHz */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 365a8cc62405..b1779566c5bb 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -27,6 +27,7 @@
#include <linux/can/skb.h>
#include <linux/can/netlink.h>
#include <linux/can/led.h>
+#include <linux/of.h>
#include <net/rtnetlink.h>
#define MOD_DESC "CAN device driver interface"
@@ -411,7 +412,7 @@ EXPORT_SYMBOL_GPL(can_change_state);
* Local echo of CAN messages
*
* CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.txt). To test the handling of CAN
+ * (see Documentation/networking/can.rst). To test the handling of CAN
* interfaces that do not support the local echo both driver types are
* implemented. In the case that the driver does not support the echo
* the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
@@ -814,6 +815,29 @@ int open_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(open_candev);
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
/*
* Common close function for cleanup before the device gets closed.
*
@@ -913,6 +937,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->bitrate_const_cnt);
if (err)
return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
memcpy(&priv->bittiming, &bt, sizeof(bt));
if (priv->do_set_bittiming) {
@@ -997,6 +1028,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->data_bitrate_const_cnt);
if (err)
return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
if (priv->do_set_data_bittiming) {
@@ -1064,6 +1102,7 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
return size;
}
@@ -1121,7 +1160,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt,
- priv->data_bitrate_const))
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
)
return -EMSGSIZE;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0626dcfd1f3d..634c51e6b8ae 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -190,6 +190,7 @@
* MX53 FlexCAN2 03.00.00.00 yes no no no no
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
* VF610 FlexCAN3 ? no yes no yes yes?
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -279,6 +280,10 @@ struct flexcan_priv {
struct clk *clk_per;
const struct flexcan_devtype_data *devtype_data;
struct regulator *reg_xceiver;
+
+ /* Read and Write APIs */
+ u32 (*read)(void __iomem *addr);
+ void (*write)(u32 val, void __iomem *addr);
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@ -301,6 +306,12 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
+static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -313,39 +324,45 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
.brp_inc = 1,
};
-/* Abstract off the read/write for arm versus ppc. This
- * assumes that PPC uses big-endian registers and everything
- * else uses little-endian registers, independent of CPU
- * endianness.
+/* FlexCAN module is essentially modelled as a little-endian IP in most
+ * SoCs, i.e the registers as well as the message buffer areas are
+ * implemented in a little-endian fashion.
+ *
+ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
+ * module in a big-endian fashion (i.e the registers as well as the
+ * message buffer areas are implemented in a big-endian way).
+ *
+ * In addition, the FlexCAN module can be found on SoCs having ARM or
+ * PPC cores. So, we need to abstract off the register read/write
+ * functions, ensuring that these cater to all the combinations of module
+ * endianness and underlying CPU endianness.
*/
-#if defined(CONFIG_PPC)
-static inline u32 flexcan_read(void __iomem *addr)
+static inline u32 flexcan_read_be(void __iomem *addr)
{
- return in_be32(addr);
+ return ioread32be(addr);
}
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_be(u32 val, void __iomem *addr)
{
- out_be32(addr, val);
+ iowrite32be(val, addr);
}
-#else
-static inline u32 flexcan_read(void __iomem *addr)
+
+static inline u32 flexcan_read_le(void __iomem *addr)
{
- return readl(addr);
+ return ioread32(addr);
}
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_le(u32 val, void __iomem *addr)
{
- writel(val, addr);
+ iowrite32(val, addr);
}
-#endif
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
}
static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
@@ -353,7 +370,7 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
}
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
@@ -378,14 +395,14 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
return -ETIMEDOUT;
return 0;
@@ -397,14 +414,14 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
return -ETIMEDOUT;
return 0;
@@ -416,14 +433,14 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
udelay(100);
- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
return -ETIMEDOUT;
return 0;
@@ -435,14 +452,14 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
return -ETIMEDOUT;
return 0;
@@ -453,11 +470,11 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
- flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+ priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
return -ETIMEDOUT;
return 0;
@@ -468,7 +485,7 @@ static int __flexcan_get_berr_counter(const struct net_device *dev,
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg = flexcan_read(&regs->ecr);
+ u32 reg = priv->read(&regs->ecr);
bec->txerr = (reg >> 0) & 0xff;
bec->rxerr = (reg >> 8) & 0xff;
@@ -524,24 +541,24 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (cf->can_dlc > 0) {
data = be32_to_cpup((__be32 *)&cf->data[0]);
- flexcan_write(data, &priv->tx_mb->data[0]);
+ priv->write(data, &priv->tx_mb->data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
- flexcan_write(data, &priv->tx_mb->data[1]);
+ priv->write(data, &priv->tx_mb->data[1]);
}
can_put_echo_skb(skb, dev, 0);
- flexcan_write(can_id, &priv->tx_mb->can_id);
- flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+ priv->write(can_id, &priv->tx_mb->can_id);
+ priv->write(ctrl, &priv->tx_mb->can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
*/
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb_reserved->can_ctrl);
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb_reserved->can_ctrl);
return NETDEV_TX_OK;
@@ -660,7 +677,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
u32 code;
do {
- reg_ctrl = flexcan_read(&mb->can_ctrl);
+ reg_ctrl = priv->read(&mb->can_ctrl);
} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
/* is this MB empty? */
@@ -675,17 +692,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
offload->dev->stats.rx_errors++;
}
} else {
- reg_iflag1 = flexcan_read(&regs->iflag1);
+ reg_iflag1 = priv->read(&regs->iflag1);
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
return 0;
- reg_ctrl = flexcan_read(&mb->can_ctrl);
+ reg_ctrl = priv->read(&mb->can_ctrl);
}
/* increase timstamp to full 32 bit */
*timestamp = reg_ctrl << 16;
- reg_id = flexcan_read(&mb->can_id);
+ reg_id = priv->read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
@@ -695,19 +712,19 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
- *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
- *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
+ *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
/* mark as read */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
/* Clear IRQ */
if (n < 32)
- flexcan_write(BIT(n), &regs->iflag1);
+ priv->write(BIT(n), &regs->iflag1);
else
- flexcan_write(BIT(n - 32), &regs->iflag2);
+ priv->write(BIT(n - 32), &regs->iflag2);
} else {
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- flexcan_read(&regs->timer);
+ priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+ priv->read(&regs->timer);
}
return 1;
@@ -719,8 +736,8 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
u32 iflag1, iflag2;
- iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
- iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+ iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
+ iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
return (u64)iflag2 << 32 | iflag1;
@@ -736,7 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
u32 reg_iflag1, reg_esr;
enum can_state last_state = priv->can.state;
- reg_iflag1 = flexcan_read(&regs->iflag1);
+ reg_iflag1 = priv->read(&regs->iflag1);
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -759,7 +776,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* FIFO overflow interrupt */
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
handled = IRQ_HANDLED;
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+ priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
+ &regs->iflag1);
dev->stats.rx_over_errors++;
dev->stats.rx_errors++;
}
@@ -773,18 +791,18 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
- flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb->can_ctrl);
+ priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
netif_wake_queue(dev);
}
- reg_esr = flexcan_read(&regs->esr);
+ reg_esr = priv->read(&regs->esr);
/* ACK all bus error and state change IRQ sources */
if (reg_esr & FLEXCAN_ESR_ALL_INT) {
handled = IRQ_HANDLED;
- flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
}
/* state change interrupt or broken error state quirk fix is enabled */
@@ -846,7 +864,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg;
- reg = flexcan_read(&regs->ctrl);
+ reg = priv->read(&regs->ctrl);
reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
@@ -870,11 +888,11 @@ static void flexcan_set_bittiming(struct net_device *dev)
reg |= FLEXCAN_CTRL_SMP;
netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
- flexcan_write(reg, &regs->ctrl);
+ priv->write(reg, &regs->ctrl);
/* print chip status */
netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
}
/* flexcan_chip_start
@@ -913,7 +931,7 @@ static int flexcan_chip_start(struct net_device *dev)
* choose format C
* set max mailbox number
*/
- reg_mcr = flexcan_read(&regs->mcr);
+ reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
@@ -927,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev)
FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
}
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
- flexcan_write(reg_mcr, &regs->mcr);
+ priv->write(reg_mcr, &regs->mcr);
/* CTRL
*
@@ -940,7 +958,7 @@ static int flexcan_chip_start(struct net_device *dev)
* enable bus off interrupt
* (== FLEXCAN_CTRL_ERR_STATE)
*/
- reg_ctrl = flexcan_read(&regs->ctrl);
+ reg_ctrl = priv->read(&regs->ctrl);
reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
FLEXCAN_CTRL_ERR_STATE;
@@ -960,45 +978,45 @@ static int flexcan_chip_start(struct net_device *dev)
/* leave interrupts disabled for now */
reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
- reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
- flexcan_write(reg_ctrl2, &regs->ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
/* clear and invalidate all mailboxes first */
for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
- flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
- &regs->mb[i].can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+ &regs->mb[i].can_ctrl);
}
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
- flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
- &regs->mb[i].can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
+ &regs->mb[i].can_ctrl);
}
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb_reserved->can_ctrl);
/* mark TX mailbox as INACTIVE */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb->can_ctrl);
/* acceptance mask/acceptance code (accept everything) */
- flexcan_write(0x0, &regs->rxgmask);
- flexcan_write(0x0, &regs->rx14mask);
- flexcan_write(0x0, &regs->rx15mask);
+ priv->write(0x0, &regs->rxgmask);
+ priv->write(0x0, &regs->rx14mask);
+ priv->write(0x0, &regs->rx15mask);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
- flexcan_write(0x0, &regs->rxfgmask);
+ priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
- flexcan_write(0, &regs->rximr[i]);
+ priv->write(0, &regs->rximr[i]);
/* On Vybrid, disable memory error detection interrupts
* and freeze mode.
@@ -1011,16 +1029,16 @@ static int flexcan_chip_start(struct net_device *dev)
* and Correction of Memory Errors" to write to
* MECR register
*/
- reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
- flexcan_write(reg_ctrl2, &regs->ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
- reg_mecr = flexcan_read(&regs->mecr);
+ reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
- flexcan_write(reg_mecr, &regs->mecr);
+ priv->write(reg_mecr, &regs->mecr);
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
- flexcan_write(reg_mecr, &regs->mecr);
+ priv->write(reg_mecr, &regs->mecr);
}
err = flexcan_transceiver_enable(priv);
@@ -1036,14 +1054,14 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
- flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
- flexcan_write(priv->reg_imask1_default, &regs->imask1);
- flexcan_write(priv->reg_imask2_default, &regs->imask2);
+ priv->write(priv->reg_ctrl_default, &regs->ctrl);
+ priv->write(priv->reg_imask1_default, &regs->imask1);
+ priv->write(priv->reg_imask2_default, &regs->imask2);
enable_irq(dev->irq);
/* print chip status */
netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
return 0;
@@ -1068,10 +1086,10 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_chip_disable(priv);
/* Disable all interrupts */
- flexcan_write(0, &regs->imask2);
- flexcan_write(0, &regs->imask1);
- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
- &regs->ctrl);
+ priv->write(0, &regs->imask2);
+ priv->write(0, &regs->imask1);
+ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ &regs->ctrl);
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
@@ -1186,26 +1204,26 @@ static int register_flexcandev(struct net_device *dev)
err = flexcan_chip_disable(priv);
if (err)
goto out_disable_per;
- reg = flexcan_read(&regs->ctrl);
+ reg = priv->read(&regs->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
- flexcan_write(reg, &regs->ctrl);
+ priv->write(reg, &regs->ctrl);
err = flexcan_chip_enable(priv);
if (err)
goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
/* Currently we only support newer versions of this core
* featuring a RX hardware FIFO (although this driver doesn't
* make use of it on some cores). Older cores, found on some
* Coldfire derivates are not tested.
*/
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
@@ -1233,8 +1251,12 @@ static void unregister_flexcandev(struct net_device *dev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+ { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1314,6 +1336,21 @@ static int flexcan_probe(struct platform_device *pdev)
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
+
+ if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+ priv->read = flexcan_read_be;
+ priv->write = flexcan_write_be;
+ } else {
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "fsl,p1010-flexcan")) {
+ priv->read = flexcan_read_be;
+ priv->write = flexcan_write_be;
+ } else {
+ priv->read = flexcan_read_le;
+ priv->write = flexcan_write_le;
+ }
+ }
+
priv->can.clock.freq = clock_freq;
priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index f4947a74b65f..2594f7779c6f 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/iopoll.h>
#include <linux/can/dev.h>
@@ -126,6 +127,12 @@ enum m_can_mram_cfg {
#define DBTP_DSJW_SHIFT 0
#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
+/* Transmitter Delay Compensation Register (TDCR) */
+#define TDCR_TDCO_SHIFT 8
+#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
+#define TDCR_TDCF_SHIFT 0
+#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
+
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
@@ -625,21 +632,16 @@ static int m_can_clk_start(struct m_can_priv *priv)
{
int err;
- err = clk_prepare_enable(priv->hclk);
+ err = pm_runtime_get_sync(priv->device);
if (err)
- return err;
-
- err = clk_prepare_enable(priv->cclk);
- if (err)
- clk_disable_unprepare(priv->hclk);
+ pm_runtime_put_noidle(priv->device);
return err;
}
static void m_can_clk_stop(struct m_can_priv *priv)
{
- clk_disable_unprepare(priv->cclk);
- clk_disable_unprepare(priv->hclk);
+ pm_runtime_put_sync(priv->device);
}
static int m_can_get_berr_counter(const struct net_device *dev,
@@ -987,13 +989,47 @@ static int m_can_set_bittiming(struct net_device *dev)
m_can_write(priv, M_CAN_NBTP, reg_btp);
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_btp = 0;
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) |
- (tseg1 << DBTP_DTSEG1_SHIFT) |
- (tseg2 << DBTP_DTSEG2_SHIFT);
+
+ /* TDC is only needed for bitrates beyond 2.5 MBit/s.
+ * This is mentioned in the "Bit Time Requirements for CAN FD"
+ * paper presented at the International CAN Conference 2013
+ */
+ if (dbt->bitrate > 2500000) {
+ u32 tdco, ssp;
+
+ /* Use the same value of secondary sampling point
+ * as the data sampling point
+ */
+ ssp = dbt->sample_point;
+
+ /* Equation based on Bosch's M_CAN User Manual's
+ * Transmitter Delay Compensation Section
+ */
+ tdco = (priv->can.clock.freq / 1000) *
+ ssp / dbt->bitrate;
+
+ /* Max valid TDCO value is 127 */
+ if (tdco > 127) {
+ netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
+ tdco);
+ tdco = 127;
+ }
+
+ reg_btp |= DBTP_TDC;
+ m_can_write(priv, M_CAN_TDCR,
+ tdco << TDCR_TDCO_SHIFT);
+ }
+
+ reg_btp |= (brp << DBTP_DBRP_SHIFT) |
+ (sjw << DBTP_DSJW_SHIFT) |
+ (tseg1 << DBTP_DTSEG1_SHIFT) |
+ (tseg2 << DBTP_DTSEG2_SHIFT);
+
m_can_write(priv, M_CAN_DBTP, reg_btp);
}
@@ -1143,11 +1179,6 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
return 0;
}
-static void free_m_can_dev(struct net_device *dev)
-{
- free_candev(dev);
-}
-
/* Checks core release number of M_CAN
* returns 0 if an unsupported device is detected
* else it returns the release and step coded as:
@@ -1207,31 +1238,20 @@ static bool m_can_niso_supported(const struct m_can_priv *priv)
return !niso_timeout;
}
-static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
- void __iomem *addr, u32 tx_fifo_size)
+static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
+ void __iomem *addr)
{
- struct net_device *dev;
struct m_can_priv *priv;
int m_can_version;
- unsigned int echo_buffer_count;
m_can_version = m_can_check_core_release(addr);
/* return if unsupported version */
if (!m_can_version) {
- dev = NULL;
- goto return_dev;
+ dev_err(&pdev->dev, "Unsupported version number: %2d",
+ m_can_version);
+ return -EINVAL;
}
- /* If version < 3.1.x, then only one echo buffer is used */
- echo_buffer_count = ((m_can_version == 30)
- ? 1U
- : (unsigned int)tx_fifo_size);
-
- dev = alloc_candev(sizeof(*priv), echo_buffer_count);
- if (!dev) {
- dev = NULL;
- goto return_dev;
- }
priv = netdev_priv(dev);
netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
@@ -1273,16 +1293,12 @@ static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
: 0);
break;
default:
- /* Unsupported device: free candev */
- free_m_can_dev(dev);
dev_err(&pdev->dev, "Unsupported version number: %2d",
priv->version);
- dev = NULL;
- break;
+ return -EINVAL;
}
-return_dev:
- return dev;
+ return 0;
}
static int m_can_open(struct net_device *dev)
@@ -1574,37 +1590,26 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto failed_ret;
}
- /* Enable clocks. Necessary to read Core Release in order to determine
- * M_CAN version
- */
- ret = clk_prepare_enable(hclk);
- if (ret)
- goto disable_hclk_ret;
-
- ret = clk_prepare_enable(cclk);
- if (ret)
- goto disable_cclk_ret;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
addr = devm_ioremap_resource(&pdev->dev, res);
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
ret = -ENODEV;
- goto disable_cclk_ret;
+ goto failed_ret;
}
mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!mram_addr) {
ret = -ENOMEM;
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* get message ram configuration */
@@ -1613,7 +1618,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(&pdev->dev, "Could not get Message RAM configuration.");
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* Get TX FIFO size
@@ -1622,11 +1627,12 @@ static int m_can_plat_probe(struct platform_device *pdev)
tx_fifo_size = mram_config_vals[7];
/* allocate the m_can device */
- dev = alloc_m_can_dev(pdev, addr, tx_fifo_size);
+ dev = alloc_candev(sizeof(*priv), tx_fifo_size);
if (!dev) {
ret = -ENOMEM;
- goto disable_cclk_ret;
+ goto failed_ret;
}
+
priv = netdev_priv(dev);
dev->irq = irq;
priv->device = &pdev->dev;
@@ -1640,30 +1646,42 @@ static int m_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
+ /* Enable clocks. Necessary to read Core Release in order to determine
+ * M_CAN version
+ */
+ pm_runtime_enable(&pdev->dev);
+ ret = m_can_clk_start(priv);
+ if (ret)
+ goto pm_runtime_fail;
+
+ ret = m_can_dev_setup(pdev, dev, addr);
+ if (ret)
+ goto clk_disable;
+
ret = register_m_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto failed_free_dev;
+ goto clk_disable;
}
devm_can_led_init(dev);
+ of_can_transceiver(dev);
+
dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
KBUILD_MODNAME, dev->irq, priv->version);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
-
- goto disable_cclk_ret;
-
-failed_free_dev:
- free_m_can_dev(dev);
-disable_cclk_ret:
- clk_disable_unprepare(cclk);
-disable_hclk_ret:
- clk_disable_unprepare(hclk);
+clk_disable:
+ m_can_clk_stop(priv);
+pm_runtime_fail:
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ free_candev(dev);
+ }
failed_ret:
return ret;
}
@@ -1721,14 +1739,47 @@ static int m_can_plat_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
unregister_m_can_dev(dev);
+
+ pm_runtime_disable(&pdev->dev);
+
platform_set_drvdata(pdev, NULL);
- free_m_can_dev(dev);
+ free_candev(dev);
return 0;
}
+static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->cclk);
+ clk_disable_unprepare(priv->hclk);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->hclk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->cclk);
+ if (err)
+ clk_disable_unprepare(priv->hclk);
+
+ return err;
+}
+
static const struct dev_pm_ops m_can_pmops = {
+ SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
+ m_can_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
};
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index f394f77d7528..d94dae216820 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -256,7 +256,7 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
weight = offload->mb_first - offload->mb_last;
}
- return can_rx_offload_init_queue(dev, offload, weight);;
+ return can_rx_offload_init_queue(dev, offload, weight);
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 5d067c1b987f..89d60d8e467c 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -508,7 +508,7 @@ static void slc_sync(void)
}
/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(dev_t line)
+static struct slcan *slc_alloc(void)
{
int i;
char name[IFNAMSIZ];
@@ -583,7 +583,7 @@ static int slcan_open(struct tty_struct *tty)
/* OK. Find a free SLCAN channel to use. */
err = -ENFILE;
- sl = slc_alloc(tty_devnum(tty));
+ sl = slc_alloc();
if (sl == NULL)
goto err_exit;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index f3f05fea8e1f..98d118b3aaf4 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -612,8 +612,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
return 0;
}
-static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
- struct spi_device *spi)
+static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
{
mcp251x_do_set_bittiming(net);
@@ -775,7 +774,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mcp251x_hw_reset(spi);
- mcp251x_setup(net, priv, spi);
+ mcp251x_setup(net, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -971,7 +970,7 @@ static int mcp251x_open(struct net_device *net)
mcp251x_open_clean(net);
goto open_unlock;
}
- ret = mcp251x_setup(net, priv, spi);
+ ret = mcp251x_setup(net, spi);
if (ret) {
mcp251x_open_clean(net);
goto open_unlock;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b00358297424..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..17c21ad3b95e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -243,7 +243,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *gsdev)
{
struct gs_device_mode *dm;
struct usb_interface *intf = gsdev->iface;
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
rc);
- return rc;
+ return (rc > 0) ? 0 : rc;
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -709,7 +709,7 @@ static int gs_can_close(struct net_device *netdev)
atomic_set(&dev->active_tx_urbs, 0);
/* reset the device */
- rc = gs_cmd_reset(parent, dev);
+ rc = gs_cmd_reset(dev);
if (rc < 0)
netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25a9b79cc42d..f530a80f5051 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -408,7 +408,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
{
struct sk_buff *skb;
struct can_frame *cf;
- struct timeval tv;
enum can_state new_state;
/* ignore this error until 1st ts received */
@@ -525,8 +524,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
- peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16,
+ &hwts->hwtstamp);
}
mc->netdev->stats.rx_packets++;
@@ -610,7 +609,6 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
struct sk_buff *skb;
struct can_frame *cf;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(mc->netdev, &cf);
@@ -658,9 +656,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
}
/* convert timestamp into kernel time */
- peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
/* update statistics */
mc->netdev->stats.rx_packets++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1ca76e03e965..50e911428638 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -80,21 +80,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
}
}
-static void peak_usb_add_us(struct timeval *tv, u32 delta_us)
-{
- /* number of s. to add to final time */
- u32 delta_s = delta_us / 1000000;
-
- delta_us -= delta_s * 1000000;
-
- tv->tv_usec += delta_us;
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- delta_s++;
- }
- tv->tv_sec += delta_s;
-}
-
/*
* sometimes, another now may be more recent than current one...
*/
@@ -103,7 +88,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
time_ref->ts_dev_2 = ts_now;
/* should wait at least two passes before computing */
- if (time_ref->tv_host.tv_sec > 0) {
+ if (ktime_to_ns(time_ref->tv_host) > 0) {
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
@@ -118,26 +103,26 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
*/
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
{
- if (time_ref->tv_host_0.tv_sec == 0) {
+ if (ktime_to_ns(time_ref->tv_host_0) == 0) {
/* use monotonic clock to correctly compute further deltas */
- time_ref->tv_host_0 = ktime_to_timeval(ktime_get());
- time_ref->tv_host.tv_sec = 0;
+ time_ref->tv_host_0 = ktime_get();
+ time_ref->tv_host = ktime_set(0, 0);
} else {
/*
- * delta_us should not be >= 2^32 => delta_s should be < 4294
+ * delta_us should not be >= 2^32 => delta should be < 4294s
* handle 32-bits wrapping here: if count of s. reaches 4200,
* reset counters and change time base
*/
- if (time_ref->tv_host.tv_sec != 0) {
- u32 delta_s = time_ref->tv_host.tv_sec
- - time_ref->tv_host_0.tv_sec;
- if (delta_s > 4200) {
+ if (ktime_to_ns(time_ref->tv_host)) {
+ ktime_t delta = ktime_sub(time_ref->tv_host,
+ time_ref->tv_host_0);
+ if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) {
time_ref->tv_host_0 = time_ref->tv_host;
time_ref->ts_total = 0;
}
}
- time_ref->tv_host = ktime_to_timeval(ktime_get());
+ time_ref->tv_host = ktime_get();
time_ref->tick_count++;
}
@@ -146,13 +131,12 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
}
/*
- * compute timeval according to current ts and time_ref data
+ * compute time according to current ts and time_ref data
*/
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
- struct timeval *tv)
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
{
- /* protect from getting timeval before setting now */
- if (time_ref->tv_host.tv_sec > 0) {
+ /* protect from getting time before setting now */
+ if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
delta_us = ts - time_ref->ts_dev_2;
@@ -164,10 +148,9 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
delta_us *= time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
- *tv = time_ref->tv_host_0;
- peak_usb_add_us(tv, (u32)delta_us);
+ *time = ktime_add_us(time_ref->tv_host_0, delta_us);
} else {
- *tv = ktime_to_timeval(ktime_get());
+ *time = ktime_get();
}
}
@@ -175,13 +158,11 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
* post received skb after having set any hw timestamp
*/
int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
+ struct peak_time_ref *time_ref, u32 ts_low)
{
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
- struct timeval tv;
- peak_usb_get_ts_tv(time_ref, ts_low, &tv);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp);
return netif_rx(skb);
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c01316cac354..fb23489e1cb8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -96,7 +96,7 @@ extern const struct peak_usb_adapter pcan_usb_pro_fd;
extern const struct peak_usb_adapter pcan_usb_x6;
struct peak_time_ref {
- struct timeval tv_host_0, tv_host;
+ ktime_t tv_host_0, tv_host;
u32 ts_dev_1, ts_dev_2;
u64 ts_total;
u32 tick_count;
@@ -151,10 +151,9 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
const struct peak_usb_adapter *adapter);
void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
- struct timeval *tv);
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
+ struct peak_time_ref *time_ref, u32 ts_low);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e30c98..dd161c5eea8e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
- int i, n = 1, packet_len;
+ int packet_len;
ptrdiff_t cmd_len;
/* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr = cmd_head;
+ packet_len = cmd_len;
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
- if ((dev->udev->speed != USB_SPEED_HIGH) &&
- (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
- packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
- n += cmd_len / packet_len;
- } else {
- packet_len = cmd_len;
- }
+ if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
+ packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
- for (i = 0; i < n; i++) {
+ do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr += packet_len;
- }
+ cmd_len -= packet_len;
+
+ if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
+ packet_len = cmd_len;
+
+ } while (packet_len > 0);
return err;
}
@@ -513,8 +514,7 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
@@ -574,8 +574,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->can_dlc;
@@ -617,8 +616,7 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
netdev->stats.rx_over_errors++;
netdev->stats.rx_errors++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index bbdd6058cd2f..0105fbfea273 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -531,7 +531,6 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
struct net_device *netdev = dev->netdev;
struct can_frame *can_frame;
struct sk_buff *skb;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(netdev, &can_frame);
@@ -549,9 +548,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
else
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
- peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
+ &hwts->hwtstamp);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
@@ -571,7 +570,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
u8 err_mask = 0;
struct sk_buff *skb;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
/* nothing should be sent while in BUS_OFF state */
@@ -667,9 +665,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
dev->can.state = new_state;
- peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
netif_rx(skb);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a8cb33264ff1..c2b04f505e16 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -61,7 +61,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME);
/*
* CAN test feature:
* Enable the echo on driver level for testing the CAN core echo modes.
- * See Documentation/networking/can.txt for details.
+ * See Documentation/networking/can.rst for details.
*/
static bool echo; /* echo testing. Default: 0 (Off) */
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..ed6828821fbd 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
tbp = peer_tb;
}
- if (tbp[IFLA_IFNAME]) {
+ if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
@@ -227,10 +227,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
netif_carrier_off(peer);
err = rtnl_configure_link(peer, ifmp);
- if (err < 0) {
- unregister_netdevice(peer);
- return err;
- }
+ if (err < 0)
+ goto unregister_network_device;
/* register first device */
if (tb[IFLA_IFNAME])
@@ -239,10 +237,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
err = register_netdevice(dev);
- if (err < 0) {
- unregister_netdevice(peer);
- return err;
- }
+ if (err < 0)
+ goto unregister_network_device;
netif_carrier_off(dev);
@@ -254,6 +250,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
rcu_assign_pointer(priv->peer, dev);
return 0;
+
+unregister_network_device:
+ unregister_netdevice(peer);
+ return err;
}
static void vxcan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 83a9bc892a3b..2b81b97e994f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,7 +33,7 @@ config NET_DSA_MT7530
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA
+ depends on NET_DSA && NET_DSA_LEGACY
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..db830a1141d9 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1029,8 +1029,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
EXPORT_SYMBOL(b53_vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
@@ -1047,8 +1046,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_vlan_prepare);
void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1495,15 +1493,17 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
return false;
}
-static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
- int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- /* Older models support a different tag format that we do not
- * support in net/dsa/tag_brcm.c yet.
+ /* Older models (5325, 5365) support a different tag format that we do
+ * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+ * mode to be turned on which means we need to specifically manage ARL
+ * misses on multicast addresses (TBD).
*/
- if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+ if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+ !b53_can_enable_brcm_tags(ds, port))
return DSA_TAG_PROTO_NONE;
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
@@ -1514,6 +1514,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_BRCM;
}
+EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index daaaa1ecb996..d954cf36ecd8 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -295,11 +295,9 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -310,6 +308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b62d47210db8..0378eded31f2 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -34,12 +34,6 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
-static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
- int port)
-{
- return DSA_TAG_PROTO_BRCM;
-}
-
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -860,7 +854,7 @@ static const struct b53_io_ops bcm_sf2_io_ops = {
};
static const struct dsa_switch_ops bcm_sf2_ops = {
- .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
+ .get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
@@ -954,6 +948,9 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7278-switch-v4.0",
.data = &bcm_sf2_7278_data
},
+ { .compatible = "brcm,bcm7278-switch-v4.8",
+ .data = &bcm_sf2_7278_data
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bb71d3d6f65b..7aa84ee4e771 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -174,9 +174,9 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int
+dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
@@ -193,8 +193,7 @@ static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
}
static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b24566bb74d2..6171c0853ff1 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -249,6 +249,28 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
return -EIO;
}
+static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
+{
+ int i;
+
+ for (i = 0; i < 25; i++) {
+ u32 reg;
+ int ret;
+
+ ret = lan9303_read(chip->regmap, offset, &reg);
+ if (ret) {
+ dev_err(chip->dev, "%s failed to read offset %d: %d\n",
+ __func__, offset, ret);
+ return ret;
+ }
+ if (!(reg & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
{
int ret;
@@ -274,22 +296,8 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
{
- int ret, i;
- u32 reg;
-
- for (i = 0; i < 25; i++) {
- ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, &reg);
- if (ret) {
- dev_err(chip->dev,
- "Failed to read pmi access status: %d\n", ret);
- return ret;
- }
- if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -EIO;
+ return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
+ LAN9303_PMI_ACCESS_MII_BUSY);
}
static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
@@ -366,22 +374,8 @@ EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
{
- int ret, i;
- u32 reg;
-
- for (i = 0; i < 25; i++) {
- ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, &reg);
- if (ret) {
- dev_err(chip->dev,
- "Failed to read csr command status: %d\n", ret);
- return ret;
- }
- if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -EIO;
+ return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
+ LAN9303_SWITCH_CSR_CMD_BUSY);
}
static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
@@ -485,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
{
int reg;
- /* depending on the 'phy_addr_sel_strap' setting, the three phys are
+ /* Calculate chip->phy_addr_base:
+ * Depending on the 'phy_addr_sel_strap' setting, the three phys are
* using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
* 'phy_addr_sel_strap' setting directly, so we need a test, which
* configuration is active:
@@ -500,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
return reg;
}
- if ((reg != 0) && (reg != 0xffff))
- chip->phy_addr_sel_strap = 1;
- else
- chip->phy_addr_sel_strap = 0;
+ chip->phy_addr_base = reg != 0 && reg != 0xffff;
dev_dbg(chip->dev, "Phy setup '%s' detected\n",
- chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+ chip->phy_addr_base ? "1-2-3" : "0-1-2");
return 0;
}
@@ -546,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
return NULL;
}
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
- int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
{
int i;
- for (i = 0; i < 0x1000; i++) {
+ for (i = 0; i < 25; i++) {
u32 reg;
lan9303_read_switch_reg(chip, regno, &reg);
- if ((reg & mask) == value)
+ if (!(reg & mask))
return 0;
usleep_range(1000, 2000);
}
+
return -ETIMEDOUT;
}
@@ -569,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_MAKE_ENTRY);
- lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
- 0);
+ lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
return 0;
@@ -583,6 +573,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
{
int i;
+ mutex_lock(&chip->alr_mutex);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_FIRST);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
@@ -606,6 +597,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
LAN9303_ALR_CMD_GET_NEXT);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
}
+ mutex_unlock(&chip->alr_mutex);
}
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -694,16 +686,20 @@ static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
{
struct lan9303_alr_cache_entry *entr;
+ mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr) { /*New entry */
entr = lan9303_alr_cache_find_free(chip);
- if (!entr)
+ if (!entr) {
+ mutex_unlock(&chip->alr_mutex);
return -ENOSPC;
+ }
ether_addr_copy(entr->mac_addr, mac);
}
entr->port_map |= BIT(port);
entr->stp_override = stp_override;
lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+ mutex_unlock(&chip->alr_mutex);
return 0;
}
@@ -713,15 +709,18 @@ static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
{
struct lan9303_alr_cache_entry *entr;
+ mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr)
- return 0; /* no static entry found */
+ goto out; /* no static entry found */
entr->port_map &= ~BIT(port);
if (entr->port_map == 0) /* zero means its free again */
eth_zero_addr(entr->mac_addr);
lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
+out:
+ mutex_unlock(&chip->alr_mutex);
return 0;
}
@@ -818,18 +817,16 @@ static void lan9303_bridge_ports(struct lan9303 *chip)
lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
}
-static int lan9303_handle_reset(struct lan9303 *chip)
+static void lan9303_handle_reset(struct lan9303 *chip)
{
if (!chip->reset_gpio)
- return 0;
+ return;
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
/* release (deassert) reset and activate the device */
gpiod_set_value_cansleep(chip->reset_gpio, 0);
-
- return 0;
}
/* stop processing packets for all ports */
@@ -866,7 +863,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if ((reg >> 16) != LAN9303_CHIP_ID) {
dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
reg >> 16);
- return ret;
+ return -ENODEV;
}
/* The default state of the LAN9303 device is to forward packets between
@@ -1018,7 +1015,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds)
static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct lan9303 *chip = ds->priv;
- int phy_base = chip->phy_addr_sel_strap;
+ int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_read(chip, regnum);
@@ -1032,7 +1029,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
- int phy_base = chip->phy_addr_sel_strap;
+ int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_write(chip, regnum, val);
@@ -1069,7 +1066,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
res = lan9303_phy_write(ds, port, MII_BMCR, ctl);
- if (port == chip->phy_addr_sel_strap) {
+ if (port == chip->phy_addr_base) {
/* Virtual Phy: Remove Turbo 200Mbit mode */
lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
@@ -1093,8 +1090,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
- MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1217,8 +1213,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
@@ -1235,8 +1230,7 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
@@ -1284,26 +1278,31 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
static int lan9303_register_switch(struct lan9303 *chip)
{
+ int base;
+
chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
if (!chip->ds)
return -ENOMEM;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
- chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+ base = chip->phy_addr_base;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
return dsa_register_switch(chip->ds);
}
-static void lan9303_probe_reset_gpio(struct lan9303 *chip,
+static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_LOW);
+ if (IS_ERR(chip->reset_gpio))
+ return PTR_ERR(chip->reset_gpio);
- if (IS_ERR(chip->reset_gpio)) {
+ if (!chip->reset_gpio) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
- return;
+ return 0;
}
chip->reset_duration = 200;
@@ -1318,6 +1317,8 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
/* A sane reset duration should not be longer than 1s */
if (chip->reset_duration > 1000)
chip->reset_duration = 1000;
+
+ return 0;
}
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
@@ -1325,13 +1326,14 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
int ret;
mutex_init(&chip->indirect_mutex);
+ mutex_init(&chip->alr_mutex);
- lan9303_probe_reset_gpio(chip, np);
-
- ret = lan9303_handle_reset(chip);
+ ret = lan9303_probe_reset_gpio(chip, np);
if (ret)
return ret;
+ lan9303_handle_reset(chip);
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b5be93a1e0df..663b0d5b982b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -559,8 +559,7 @@ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
}
static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
/* nothing needed */
@@ -568,8 +567,7 @@ static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
}
static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
@@ -858,16 +856,14 @@ exit:
}
static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
/* nothing to do */
return 0;
}
static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2820d69810b3..8a0bb000d056 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -805,6 +805,69 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
}
static void
+mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ bool all_user_ports_removed = true;
+ int i;
+
+ /* When a port is removed from the bridge, the port would be set up
+ * back to the default as is at initial boot which is a VLAN-unaware
+ * port.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_MATRIX_MODE);
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+
+ priv->ports[port].vlan_filtering = false;
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ if (dsa_is_user_port(ds, i) &&
+ priv->ports[i].vlan_filtering) {
+ all_user_ports_removed = false;
+ break;
+ }
+ }
+
+ /* CPU port also does the same thing until all user ports belonging to
+ * the CPU port get out of VLAN filtering mode.
+ */
+ if (all_user_ports_removed) {
+ mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+ mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
+ PORT_SPEC_TAG);
+ }
+}
+
+static void
+mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* The real fabric path would be decided on the membership in the
+ * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
+ * means potential VLAN can be consisting of certain subset of all
+ * ports.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port),
+ PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+
+ /* Trapped into security mode allows packet forwarding through VLAN
+ * table lookup.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
+
+ /* Set the port as a user port which is to be able to recognize VID
+ * from incoming packets before fetching entry within the VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+}
+
+static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
@@ -817,8 +880,11 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
+ * And the other port's port matrix cannot be broken when the
+ * other port is still a VLAN-aware port.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
+ if (!priv->ports[i].vlan_filtering &&
+ dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -836,6 +902,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
PCR_MATRIX(BIT(MT7530_CPU_PORT)));
priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ mt7530_port_set_vlan_unaware(ds, port);
+
mutex_unlock(&priv->reg_mutex);
}
@@ -906,6 +974,220 @@ err:
return 0;
}
+static int
+mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
+{
+ struct mt7530_dummy_poll p;
+ u32 val;
+ int ret;
+
+ val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
+ mt7530_write(priv, MT7530_VTCR, val);
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
+ ret = readx_poll_timeout(_mt7530_read, &p, val,
+ !(val & VTCR_BUSY), 20, 20000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ return ret;
+ }
+
+ val = mt7530_read(priv, MT7530_VTCR);
+ if (val & VTCR_INVALID) {
+ dev_err(priv->dev, "read VTCR invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ priv->ports[port].vlan_filtering = vlan_filtering;
+
+ if (vlan_filtering) {
+ /* The port is being kept as VLAN-unaware port when bridge is
+ * set up with vlan_filtering not being set, Otherwise, the
+ * port and the corresponding CPU port is required the setup
+ * for becoming a VLAN-aware port.
+ */
+ mt7530_port_set_vlan_aware(ds, port);
+ mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ }
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ /* nothing needed */
+
+ return 0;
+}
+
+static void
+mt7530_hw_vlan_add(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members | BIT(entry->port) |
+ BIT(MT7530_CPU_PORT);
+
+ /* Validate the entry with independent learning, create egress tag per
+ * VLAN and joining the port as one of the port members.
+ */
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+
+ /* Decide whether adding tag or not for those outgoing packets from the
+ * port inside the VLAN.
+ */
+ val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
+ MT7530_VLAN_EGRESS_TAG;
+ mt7530_rmw(priv, MT7530_VAWD2,
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
+
+ /* CPU port is always taken as a tagged port for serving more than one
+ * VLANs across and also being applied with egress type stack mode for
+ * that VLAN tags would be appended after hardware special tag used as
+ * DSA tag.
+ */
+ mt7530_rmw(priv, MT7530_VAWD2,
+ ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
+ ETAG_CTRL_P(MT7530_CPU_PORT,
+ MT7530_VLAN_EGRESS_STACK));
+}
+
+static void
+mt7530_hw_vlan_del(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members & ~BIT(entry->port);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+ if (!(val & VLAN_VALID)) {
+ dev_err(priv->dev,
+ "Cannot be deleted due to invalid entry\n");
+ return;
+ }
+
+ /* If certain member apart from CPU port is still alive in the VLAN,
+ * the entry would be kept valid. Otherwise, the entry is got to be
+ * disabled.
+ */
+ if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
+ VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+ } else {
+ mt7530_write(priv, MT7530_VAWD1, 0);
+ mt7530_write(priv, MT7530_VAWD2, 0);
+ }
+}
+
+static void
+mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
+ struct mt7530_hw_vlan_entry *entry,
+ mt7530_vlan_op vlan_op)
+{
+ u32 val;
+
+ /* Fetch entry */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+
+ entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
+
+ /* Manipulate entry */
+ vlan_op(priv, entry);
+
+ /* Flush result to hardware */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
+}
+
+static void
+mt7530_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mt7530_hw_vlan_entry new_entry;
+ struct mt7530_priv *priv = ds->priv;
+ u16 vid;
+
+ /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+ * being set.
+ */
+ if (!priv->ports[port].vlan_filtering)
+ return;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vid, &new_entry,
+ mt7530_hw_vlan_add);
+ }
+
+ if (pvid) {
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID(vlan->vid_end));
+ priv->ports[port].pvid = vlan->vid_end;
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mt7530_hw_vlan_entry target_entry;
+ struct mt7530_priv *priv = ds->priv;
+ u16 vid, pvid;
+
+ /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+ * being set.
+ */
+ if (!priv->ports[port].vlan_filtering)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ pvid = priv->ports[port].pvid;
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vid, &target_entry,
+ mt7530_hw_vlan_del);
+
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (pvid == vid)
+ pvid = G0_PORT_VID_DEF;
+ }
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
+ priv->ports[port].pvid = pvid;
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port)
{
@@ -1035,6 +1317,10 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_fdb_add = mt7530_port_fdb_add,
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
+ .port_vlan_filtering = mt7530_port_vlan_filtering,
+ .port_vlan_prepare = mt7530_port_vlan_prepare,
+ .port_vlan_add = mt7530_port_vlan_add,
+ .port_vlan_del = mt7530_port_vlan_del,
};
static int
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 74db9822eb40..d9b407a22a58 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -17,6 +17,7 @@
#define MT7530_NUM_PORTS 7
#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
+#define MT7530_ALL_MEMBERS 0xff
#define NUM_TRGMII_CTRL 5
@@ -88,21 +89,42 @@ enum mt7530_fdb_cmd {
/* Register for vlan table control */
#define MT7530_VTCR 0x90
#define VTCR_BUSY BIT(31)
-#define VTCR_FUNC (((x) & 0xf) << 12)
-#define VTCR_FUNC_RD_VID 0x1
-#define VTCR_FUNC_WR_VID 0x2
-#define VTCR_FUNC_INV_VID 0x3
-#define VTCR_FUNC_VAL_VID 0x4
+#define VTCR_INVALID BIT(16)
+#define VTCR_FUNC(x) (((x) & 0xf) << 12)
#define VTCR_VID ((x) & 0xfff)
+enum mt7530_vlan_cmd {
+ /* Read/Write the specified VID entry from VAWD register based
+ * on VID.
+ */
+ MT7530_VTCR_RD_VID = 0,
+ MT7530_VTCR_WR_VID = 1,
+};
+
/* Register for setup vlan and acl write data */
#define MT7530_VAWD1 0x94
#define PORT_STAG BIT(31)
+/* Independent VLAN Learning */
#define IVL_MAC BIT(30)
+/* Per VLAN Egress Tag Control */
+#define VTAG_EN BIT(28)
+/* VLAN Member Control */
#define PORT_MEM(x) (((x) & 0xff) << 16)
-#define VALID BIT(1)
+/* VLAN Entry Valid */
+#define VLAN_VALID BIT(0)
+#define PORT_MEM_SHFT 16
+#define PORT_MEM_MASK 0xff
#define MT7530_VAWD2 0x98
+/* Egress Tag Control */
+#define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1))
+#define ETAG_CTRL_P_MASK(p) ETAG_CTRL_P(p, 3)
+
+enum mt7530_vlan_egress_attr {
+ MT7530_VLAN_EGRESS_UNTAG = 0,
+ MT7530_VLAN_EGRESS_TAG = 2,
+ MT7530_VLAN_EGRESS_STACK = 3,
+};
/* Register for port STP state control */
#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
@@ -120,11 +142,23 @@ enum mt7530_stp_state {
/* Register for port control */
#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
#define PORT_VLAN(x) ((x) & 0x3)
+
+enum mt7530_port_mode {
+ /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+ MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+
+ /* Security Mode: Discard any frame due to ingress membership
+ * violation or VID missed on the VLAN table.
+ */
+ MT7530_PORT_SECURITY_MODE = PORT_VLAN(3),
+};
+
#define PCR_MATRIX(x) (((x) & 0xff) << 16)
#define PORT_PRI(x) (((x) & 0x7) << 24)
#define EG_TAG(x) (((x) & 0x3) << 28)
#define PCR_MATRIX_MASK PCR_MATRIX(0xff)
#define PCR_MATRIX_CLR PCR_MATRIX(0)
+#define PCR_PORT_VLAN_MASK PORT_VLAN(3)
/* Register for port security control */
#define MT7530_PSC_P(x) (0x200c + ((x) * 0x100))
@@ -134,10 +168,20 @@ enum mt7530_stp_state {
#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
#define PORT_SPEC_TAG BIT(5)
#define VLAN_ATTR(x) (((x) & 0x3) << 6)
+#define VLAN_ATTR_MASK VLAN_ATTR(3)
+
+enum mt7530_vlan_port_attr {
+ MT7530_VLAN_USER = 0,
+ MT7530_VLAN_TRANSPARENT = 3,
+};
+
#define STAG_VPID (((x) & 0xffff) << 16)
/* Register for port port-and-protocol based vlan 1 control */
#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100))
+#define G0_PORT_VID(x) (((x) & 0xfff) << 0)
+#define G0_PORT_VID_MASK G0_PORT_VID(0xfff)
+#define G0_PORT_VID_DEF G0_PORT_VID(1)
/* Register for port MAC control register */
#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
@@ -345,9 +389,20 @@ struct mt7530_fdb {
bool noarp;
};
+/* struct mt7530_port - This is the main data structure for holding the state
+ * of the port.
+ * @enable: The status used for show port is enabled or not.
+ * @pm: The matrix used to show all connections with the port.
+ * @pvid: The VLAN specified is to be considered a PVID at ingress. Any
+ * untagged frames will be assigned to the related VLAN.
+ * @vlan_filtering: The flags indicating whether the port that can recognize
+ * VLAN-tagged frames.
+ */
struct mt7530_port {
bool enable;
u32 pm;
+ u16 pvid;
+ bool vlan_filtering;
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -382,6 +437,22 @@ struct mt7530_priv {
struct mutex reg_mutex;
};
+struct mt7530_hw_vlan_entry {
+ int port;
+ u8 old_members;
+ bool untagged;
+};
+
+static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
+ int port, bool untagged)
+{
+ e->port = port;
+ e->untagged = untagged;
+}
+
+typedef void (*mt7530_vlan_op)(struct mt7530_priv *,
+ struct mt7530_hw_vlan_entry *);
+
struct mt7530_hw_stats {
const char *string;
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 66d33e97cbc5..eb328bade225 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1185,8 +1185,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
static int
mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -1295,8 +1294,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1725,9 +1723,11 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
{
- bool flood = port == dsa_upstream_port(chip->ds);
+ struct dsa_switch *ds = chip->ds;
+ bool flood;
/* Upstream ports flood frames with unknown unicast or multicast DA */
+ flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
if (chip->info->ops->port_set_egress_floods)
return chip->info->ops->port_set_egress_floods(chip, port,
flood, flood);
@@ -1744,6 +1744,39 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ int upstream_port;
+ int err;
+
+ upstream_port = dsa_upstream_port(ds, port);
+ if (chip->info->ops->port_set_upstream_port) {
+ err = chip->info->ops->port_set_upstream_port(chip, port,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (port == upstream_port) {
+ if (chip->info->ops->set_cpu_port) {
+ err = chip->info->ops->set_cpu_port(chip,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->set_egress_port) {
+ err = chip->info->ops->set_egress_port(chip,
+ upstream_port);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -1814,13 +1847,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg = 0;
- if (chip->info->ops->port_set_upstream_port) {
- err = chip->info->ops->port_set_upstream_port(
- chip, port, dsa_upstream_port(ds));
- if (err)
- return err;
- }
+ err = mv88e6xxx_setup_upstream_port(chip, port);
+ if (err)
+ return err;
err = mv88e6xxx_port_set_8021q_mode(chip, port,
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
@@ -1946,21 +1975,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
{
struct dsa_switch *ds = chip->ds;
- u32 upstream_port = dsa_upstream_port(ds);
int err;
- if (chip->info->ops->set_cpu_port) {
- err = chip->info->ops->set_cpu_port(chip, upstream_port);
- if (err)
- return err;
- }
-
- if (chip->info->ops->set_egress_port) {
- err = chip->info->ops->set_egress_port(chip, upstream_port);
- if (err)
- return err;
- }
-
/* Disable remote management, and set the switch's DSA device number. */
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
@@ -3741,6 +3757,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv)
@@ -3788,10 +3805,10 @@ free:
return NULL;
}
+#endif
static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
@@ -3801,8 +3818,7 @@ static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -3829,7 +3845,9 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
}
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
.probe = mv88e6xxx_drv_probe,
+#endif
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.adjust_link = mv88e6xxx_adjust_link,
@@ -3958,11 +3976,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out_g1_irq;
}
+
+ err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
+ if (err)
+ goto out_g2_irq;
+
+ err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+ if (err)
+ goto out_g1_atu_prob_irq;
}
err = mv88e6xxx_mdios_register(chip, np);
if (err)
- goto out_g2_irq;
+ goto out_g1_vtu_prob_irq;
err = mv88e6xxx_register_switch(chip);
if (err)
@@ -3972,6 +3998,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
out_mdio:
mv88e6xxx_mdios_unregister(chip);
+out_g1_vtu_prob_irq:
+ if (chip->irq > 0)
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+out_g1_atu_prob_irq:
+ if (chip->irq > 0)
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
out_g2_irq:
if (chip->info->g2_irqs > 0 && chip->irq > 0)
mv88e6xxx_g2_irq_free(chip);
@@ -3995,6 +4027,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_mdios_unregister(chip);
if (chip->irq > 0) {
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
mutex_lock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 334f6f7544ba..3dba6e90adcf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -207,6 +207,8 @@ struct mv88e6xxx_chip {
int irq;
int device_irq;
int watchdog_irq;
+ int atu_prob_irq;
+ int vtu_prob_irq;
};
struct mv88e6xxx_bus_ops {
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index b0dc7518b47f..6aee7316fea6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -29,9 +29,9 @@
#define MV88E6XXX_G1_STS_IRQ_AVB 8
#define MV88E6XXX_G1_STS_IRQ_DEVICE 7
#define MV88E6XXX_G1_STS_IRQ_STATS 6
-#define MV88E6XXX_G1_STS_IRQ_VTU_PROBLEM 5
+#define MV88E6XXX_G1_STS_IRQ_VTU_PROB 5
#define MV88E6XXX_G1_STS_IRQ_VTU_DONE 4
-#define MV88E6XXX_G1_STS_IRQ_ATU_PROBLEM 3
+#define MV88E6XXX_G1_STS_IRQ_ATU_PROB 3
#define MV88E6XXX_G1_STS_IRQ_ATU_DONE 2
#define MV88E6XXX_G1_STS_IRQ_TCAM_DONE 1
#define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE 0
@@ -82,6 +82,10 @@
#define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT 0x4000
#define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE 0x5000
#define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT 0x6000
+#define MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_VTU_OP_MISS_VIOLATION BIT(5)
+#define MV88E6XXX_G1_VTU_OP_SPID_MASK 0xf
/* Offset 0x06: VTU VID Register */
#define MV88E6XXX_G1_VTU_VID 0x06
@@ -122,6 +126,10 @@
#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB 0x5000
#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB 0x6000
#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
+#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5)
+#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
/* Offset 0x0C: ATU Data Register */
#define MV88E6XXX_G1_ATU_DATA 0x0c
@@ -255,6 +263,8 @@ int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all);
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -269,5 +279,7 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index efeef4b01442..20d941f4273b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -9,6 +9,8 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include "chip.h"
#include "global1.h"
@@ -307,3 +309,88 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
}
+
+static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_atu_entry entry;
+ int err;
+ u16 val;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6xxx_g1_atu_op(chip, 0,
+ MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
+ if (err)
+ goto out;
+
+ mutex_unlock(&chip->reg_lock);
+
+ if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU age out violation for %pM\n",
+ entry.mac);
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU member violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION)
+ dev_err_ratelimited(chip->dev,
+ "ATU miss violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+
+ if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION)
+ dev_err_ratelimited(chip->dev,
+ "ATU full violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+
+ return IRQ_HANDLED;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
+ err);
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_ATU_PROB);
+ if (chip->atu_prob_irq < 0)
+ return chip->atu_prob_irq;
+
+ err = request_threaded_irq(chip->atu_prob_irq, NULL,
+ mv88e6xxx_g1_atu_prob_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->atu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->atu_prob_irq, chip);
+ irq_dispose_mapping(chip->atu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 8c8a0ec3d6e9..7997961647de 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -11,6 +11,9 @@
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
#include "chip.h"
#include "global1.h"
@@ -513,3 +516,74 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
}
+
+static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_vtu_entry entry;
+ int spid;
+ int err;
+ u16 val;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ if (err)
+ goto out;
+
+ mutex_unlock(&chip->reg_lock);
+
+ spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
+
+ if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
+ dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
+ entry.vid, spid);
+ }
+
+ if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION)
+ dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
+ entry.vid, spid);
+
+ return IRQ_HANDLED;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
+ err);
+
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_VTU_PROB);
+ if (chip->vtu_prob_irq < 0)
+ return chip->vtu_prob_irq;
+
+ err = request_threaded_irq(chip->vtu_prob_irq, NULL,
+ mv88e6xxx_g1_vtu_prob_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->vtu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->vtu_prob_irq, chip);
+ irq_dispose_mapping(chip->vtu_prob_irq);
+}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 58483af80bdb..30b1c8512049 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,48 +42,7 @@
#define DRV_NAME "dummy"
#define DRV_VERSION "1.0"
-#undef pr_fmt
-#define pr_fmt(fmt) DRV_NAME ": " fmt
-
static int numdummies = 1;
-static int num_vfs;
-
-struct vf_data_storage {
- u8 vf_mac[ETH_ALEN];
- u16 pf_vlan; /* When set, guest VLAN config not allowed. */
- u16 pf_qos;
- __be16 vlan_proto;
- u16 min_tx_rate;
- u16 max_tx_rate;
- u8 spoofchk_enabled;
- bool rss_query_enabled;
- u8 trusted;
- int link_state;
-};
-
-struct dummy_priv {
- struct vf_data_storage *vfinfo;
-};
-
-static int dummy_num_vf(struct device *dev)
-{
- return num_vfs;
-}
-
-static struct bus_type dummy_bus = {
- .name = "dummy",
- .num_vf = dummy_num_vf,
-};
-
-static void release_dummy_parent(struct device *dev)
-{
-}
-
-static struct device dummy_parent = {
- .init_name = "dummy",
- .bus = &dummy_bus,
- .release = release_dummy_parent,
-};
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
@@ -133,25 +92,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- struct dummy_priv *priv = netdev_priv(dev);
-
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- priv->vfinfo = NULL;
-
- if (!num_vfs)
- return 0;
-
- dev->dev.parent = &dummy_parent;
- priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
- GFP_KERNEL);
- if (!priv->vfinfo) {
- free_percpu(dev->dstats);
- return -ENOMEM;
- }
-
return 0;
}
@@ -169,117 +113,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
return 0;
}
-static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
- return -EINVAL;
-
- memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
-
- return 0;
-}
-
-static int dummy_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos, __be16 vlan_proto)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
-
- priv->vfinfo[vf].pf_vlan = vlan;
- priv->vfinfo[vf].pf_qos = qos;
- priv->vfinfo[vf].vlan_proto = vlan_proto;
-
- return 0;
-}
-
-static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].min_tx_rate = min;
- priv->vfinfo[vf].max_tx_rate = max;
-
- return 0;
-}
-
-static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].spoofchk_enabled = val;
-
- return 0;
-}
-
-static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].rss_query_enabled = val;
-
- return 0;
-}
-
-static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].trusted = val;
-
- return 0;
-}
-
-static int dummy_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- ivi->vf = vf;
- memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
- ivi->vlan = priv->vfinfo[vf].pf_vlan;
- ivi->qos = priv->vfinfo[vf].pf_qos;
- ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
- ivi->linkstate = priv->vfinfo[vf].link_state;
- ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
- ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
- ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
- ivi->trusted = priv->vfinfo[vf].trusted;
- ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
-
- return 0;
-}
-
-static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].link_state = state;
-
- return 0;
-}
-
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -289,14 +122,6 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
.ndo_change_carrier = dummy_change_carrier,
- .ndo_set_vf_mac = dummy_set_vf_mac,
- .ndo_set_vf_vlan = dummy_set_vf_vlan,
- .ndo_set_vf_rate = dummy_set_vf_rate,
- .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk,
- .ndo_set_vf_trust = dummy_set_vf_trust,
- .ndo_get_vf_config = dummy_get_vf_config,
- .ndo_set_vf_link_state = dummy_set_vf_link_state,
- .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
};
static void dummy_get_drvinfo(struct net_device *dev,
@@ -323,13 +148,6 @@ static const struct ethtool_ops dummy_ethtool_ops = {
.get_ts_info = dummy_get_ts_info,
};
-static void dummy_free_netdev(struct net_device *dev)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- kfree(priv->vfinfo);
-}
-
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -338,7 +156,6 @@ static void dummy_setup(struct net_device *dev)
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = dummy_free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
@@ -370,7 +187,6 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
.kind = DRV_NAME,
- .priv_size = sizeof(struct dummy_priv),
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -379,16 +195,12 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
-module_param(num_vfs, int, 0);
-MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
-
static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
- "dummy%d", NET_NAME_ENUM, dummy_setup);
+ dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup);
if (!dev_dummy)
return -ENOMEM;
@@ -407,21 +219,6 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
- if (num_vfs) {
- err = bus_register(&dummy_bus);
- if (err < 0) {
- pr_err("registering dummy bus failed\n");
- return err;
- }
-
- err = device_register(&dummy_parent);
- if (err < 0) {
- pr_err("registering dummy parent device failed\n");
- bus_unregister(&dummy_bus);
- return err;
- }
- }
-
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
if (err < 0)
@@ -437,22 +234,12 @@ static int __init dummy_init_module(void)
out:
rtnl_unlock();
- if (err && num_vfs) {
- device_unregister(&dummy_parent);
- bus_unregister(&dummy_bus);
- }
-
return err;
}
static void __exit dummy_cleanup_module(void)
{
rtnl_link_unregister(&dummy_link_ops);
-
- if (num_vfs) {
- device_unregister(&dummy_parent);
- bus_unregister(&dummy_bus);
- }
}
module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int dirty_tx; /* The ring entries to be free()ed. */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
- struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
- timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
- vp->cur_rx = vp->dirty_rx = 0;
+ vp->cur_rx = 0;
/* Initialize the RxEarly register as recommended. */
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
int i;
int retval;
+ dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ break;
+ vp->rx_ring[i].addr = cpu_to_le32(dma);
}
if (i != RX_RING_SIZE) {
pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
- int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+ int rx_work_limit = RX_RING_SIZE;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
+ dma_addr_t newdma;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
+ /* Pre-allocate the replacement skb. If it or its
+ * mapping fails then recycle the buffer thats already
+ * in place
+ */
+ newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (!newskb) {
+ dev->stats.rx_dropped++;
+ goto clear_complete;
+ }
+ newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ dev->stats.rx_dropped++;
+ consume_skb(newskb);
+ goto clear_complete;
+ }
+
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
- vp->rx_skbuff[entry] = NULL;
+ vp->rx_skbuff[entry] = newskb;
+ vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
netif_rx(skb);
dev->stats.rx_packets++;
}
- entry = (++vp->cur_rx) % RX_RING_SIZE;
- }
- /* Refill the Rx ring buffers. */
- for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
- struct sk_buff *skb;
- entry = vp->dirty_rx % RX_RING_SIZE;
- if (vp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
- if (skb == NULL) {
- static unsigned long last_jif;
- if (time_after(jiffies, last_jif + 10 * HZ)) {
- pr_warn("%s: memory shortage\n",
- dev->name);
- last_jif = jiffies;
- }
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
- mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
- break; /* Bad news! */
- }
- vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
- vp->rx_skbuff[entry] = skb;
- }
+clear_complete:
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
}
return 0;
}
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory. Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
- struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
- struct net_device *dev = vp->mii.dev;
-
- spin_lock_irq(&vp->lock);
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
- boomerang_rx(dev);
- if (vortex_debug > 1) {
- pr_debug("%s: rx_oom_timer %s\n", dev->name,
- ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
- }
- spin_unlock_irq(&vp->lock);
-}
-
static void
vortex_down(struct net_device *dev, int final_down)
{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 9497f18eaba0..2f91ce8dc614 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,7 +123,8 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+static int mac8390_initdev(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -169,11 +170,11 @@ static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
{
- switch (dev->dr_sw) {
+ switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_APPLE_SONIC_NB:
case NUBUS_DRHW_APPLE_SONIC_LC:
case NUBUS_DRHW_SONNET:
@@ -184,7 +185,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_ASANTE_LC:
return MAC8390_NONE;
case NUBUS_DRHW_CABLETRON:
@@ -201,7 +202,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ if (fres->dr_hw == NUBUS_DRHW_CABLETRON)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
@@ -212,7 +213,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_INTERLAN:
return MAC8390_INTERLAN;
default:
@@ -225,8 +226,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
* These correspond to Dayna Sonic cards
* which use the macsonic driver
*/
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 ||
+ fres->dr_hw == NUBUS_DRHW_INTERLAN)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
@@ -289,7 +290,8 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+static bool __init mac8390_init(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type cardtype)
{
struct nubus_dir dir;
@@ -394,7 +396,7 @@ static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- struct nubus_dev *ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
struct ei_device *ei_local;
@@ -414,8 +416,11 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
- ndev))) {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1 << ndev->board->slot))
continue;
@@ -489,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
};
static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_dev *ndev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c60421339a98..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -170,6 +171,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 39f6273358ed..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index a1a52eb53b14..8f71b79b4949 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev)
ace_set_txprd(regs, ap, 0);
writel(0, &regs->RxRetCsm);
- /*
- * Enable DMA engine now.
- * If we do this sooner, Mckinley box pukes.
- * I assume it's because Tigon II DMA engine wants to check
- * *something* even before the CPU is started.
- */
- writel(1, &regs->AssistState); /* enable DMA */
+ /*
+ * Enable DMA engine now.
+ * If we do this sooner, Mckinley box pukes.
+ * I assume it's because Tigon II DMA engine wants to check
+ * *something* even before the CPU is started.
+ */
+ writel(1, &regs->AssistState); /* enable DMA */
/*
* Start the NIC CPU
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index b11e573ad57a..ea149c134e15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bb53c3a4f8e9..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..6975150d144e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
{
@@ -158,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->per_napi_packets = 0;
ring->per_napi_bytes = 0;
ring->cpu = 0;
+ ring->first_interrupt = false;
+ ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -1274,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
+ ena_napi->tx_ring->first_interrupt = true;
+ ena_napi->rx_ring->first_interrupt = true;
+
napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED;
@@ -1565,7 +1573,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
static int ena_up_complete(struct ena_adapter *adapter)
{
- int rc, i;
+ int rc;
rc = ena_rss_configure(adapter);
if (rc)
@@ -1584,17 +1592,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
ena_napi_enable_all(adapter);
- /* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
- ena_unmask_interrupt(&adapter->tx_ring[i],
- &adapter->rx_ring[i]);
-
- /* schedule napi in case we had pending packets
- * from the last time we disable napi
- */
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-
return 0;
}
@@ -1731,7 +1728,7 @@ create_err:
static int ena_up(struct ena_adapter *adapter)
{
- int rc;
+ int rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
@@ -1774,6 +1771,17 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ /* Enable completion queues interrupt */
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_unmask_interrupt(&adapter->tx_ring[i],
+ &adapter->rx_ring[i]);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
return rc;
err_up:
@@ -1884,6 +1892,17 @@ static int ena_close(struct net_device *netdev)
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
+ /* Check for device status and issue reset if needed*/
+ check_for_admin_com_state(adapter);
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Destroy failure, restarting device\n");
+ ena_dump_stats_to_dmesg(adapter);
+ /* rtnl lock already obtained in dev_ioctl() layer */
+ ena_destroy_device(adapter);
+ ena_restore_device(adapter);
+ }
+
return 0;
}
@@ -2544,11 +2563,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
ena_com_set_admin_running_state(ena_dev, false);
- ena_close(netdev);
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
/* Before releasing the ENA resources, a device reset is required.
* (to prevent the device from accessing them).
- * In case the reset flag is set and the device is up, ena_close
+ * In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
@@ -2648,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work)
rtnl_unlock();
}
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
- struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+ struct ena_ring *rx_ring)
+{
+ if (likely(rx_ring->first_interrupt))
+ return 0;
+
+ if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+ return 0;
+
+ rx_ring->no_interrupt_event_cnt++;
+
+ if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+ netif_err(adapter, rx_err, adapter->netdev,
+ "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+ rx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ struct ena_ring *tx_ring)
{
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
@@ -2659,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
- if (unlikely(last_jiffies &&
- time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+ if (last_jiffies == 0)
+ /* no pending Tx at this location */
+ continue;
+
+ if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))) {
+ /* If after graceful period interrupt is still not
+ * received, we schedule a reset
+ */
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+ tx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ if (unlikely(time_is_before_jiffies(last_jiffies +
+ adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2689,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
return rc;
}
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
int i, budget, rc;
/* Make sure the driver doesn't turn the device in other process */
@@ -2710,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
+ rx_ring = &adapter->rx_ring[i];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
- rc = check_missing_comp_in_queue(adapter, tx_ring);
+ rc = check_for_rx_interrupt_queue(adapter, rx_ring);
if (unlikely(rc))
return;
@@ -2870,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t)
check_for_admin_com_state(adapter);
- check_for_missing_tx_completions(adapter);
+ check_for_missing_completions(adapter);
check_for_empty_rx_ring(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3bbc003871de..f1972b5ab650 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 3
+#define DRV_MODULE_VER_MINOR 5
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
@@ -122,6 +122,7 @@
* We wait for 6 sec just to be on the safe side.
*/
#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
@@ -236,6 +237,9 @@ struct ena_ring {
/* The maximum header length the device can handle */
u8 tx_max_header_size;
+ bool first_interrupt;
+ u16 no_interrupt_event_cnt;
+
/* cpu for TPH */
int cpu;
/* number of tx/rx_buffer_info's entries */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 9aec43c5bba8..48ca97fbe7bc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a74a8fbad53a..7a3ebfd236f5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index e4ae696920ef..686f6d8c9e79 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -39,4 +39,5 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 105fdb958cef..0b49f1aeebd3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -65,7 +65,13 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_CFG_FC_MODE 3U
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e222a234..d52b088ff8f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,8 +16,45 @@
#include <linux/pci.h>
#include "ver.h"
-#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+
+#define AQ_DEVICE_ID_0001 0x0001
+#define AQ_DEVICE_ID_D100 0xD100
+#define AQ_DEVICE_ID_D107 0xD107
+#define AQ_DEVICE_ID_D108 0xD108
+#define AQ_DEVICE_ID_D109 0xD109
+
+#define AQ_DEVICE_ID_AQC100 0x00B1
+#define AQ_DEVICE_ID_AQC107 0x07B1
+#define AQ_DEVICE_ID_AQC108 0x08B1
+#define AQ_DEVICE_ID_AQC109 0x09B1
+#define AQ_DEVICE_ID_AQC111 0x11B1
+#define AQ_DEVICE_ID_AQC112 0x12B1
+
+#define AQ_DEVICE_ID_AQC100S 0x80B1
+#define AQ_DEVICE_ID_AQC107S 0x87B1
+#define AQ_DEVICE_ID_AQC108S 0x88B1
+#define AQ_DEVICE_ID_AQC109S 0x89B1
+#define AQ_DEVICE_ID_AQC111S 0x91B1
+#define AQ_DEVICE_ID_AQC112S 0x92B1
+
+#define AQ_DEVICE_ID_AQC111E 0x51B1
+#define AQ_DEVICE_ID_AQC112E 0x52B1
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY 0
+#define AQ_HWREV_1 1
+#define AQ_HWREV_2 2
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index b3825de6cdfb..a2d416b24ffc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
* version 2, as published by the Free Software Foundation.
*/
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
* functions.
*/
@@ -15,12 +15,15 @@
#define AQ_HW_H
#include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
u64 link_speed_msk;
unsigned int hw_priv_flags;
+ u32 media_type;
u32 rxds;
u32 txds;
u32 txhwb_alignment;
@@ -28,7 +31,7 @@ struct aq_hw_caps_s {
u32 vecs;
u32 mtu;
u32 mac_regs_count;
- u8 ports;
+ u32 hw_alive_check_addr;
u8 msix_irqs;
u8 tcs;
u8 rxd_alignment;
@@ -39,7 +42,6 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
- u32 fw_ver_expected;
};
struct aq_hw_link_status_s {
@@ -86,30 +88,43 @@ struct aq_stats_s {
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#define AQ_HW_MEDIA_TYPE_TP 1U
+#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
struct aq_hw_s {
- struct aq_obj_s header;
+ atomic_t flags;
+ u8 rbl_enabled:1;
struct aq_nic_cfg_s *aq_nic_cfg;
- struct aq_pci_func_s *aq_pci_func;
+ const struct aq_fw_ops *aq_fw_ops;
void __iomem *mmio;
- unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
+ struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
struct aq_ring_param_s;
-struct aq_nic_cfg_s;
struct sk_buff;
struct aq_hw_ops {
- struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
- unsigned int port, struct aq_hw_ops *ops);
-
- void (*destroy)(struct aq_hw_s *self);
-
- int (*get_hw_caps)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
@@ -123,20 +138,11 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_get_mac_permanent)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- u8 *mac);
-
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self);
-
- int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
-
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -184,9 +190,8 @@ struct aq_hw_ops {
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
-
- int (*hw_update_stats)(struct aq_hw_s *self);
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
@@ -197,4 +202,20 @@ struct aq_hw_ops {
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
+struct aq_fw_ops {
+ int (*init)(struct aq_hw_s *self);
+
+ int (*reset)(struct aq_hw_s *self);
+
+ int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+ int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+
+ int (*update_link_status)(struct aq_hw_s *self);
+
+ int (*update_stats)(struct aq_hw_s *self);
+};
+
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465995f6..d526c4f19d34 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -13,6 +13,7 @@
#include "aq_hw_utils.h"
#include "aq_hw.h"
+#include "aq_nic.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -39,8 +40,10 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
u32 value = readl(hw->mmio + reg);
- if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
- aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+ if ((~0U) == value &&
+ (~0U) == readl(hw->mmio +
+ hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
@@ -54,11 +57,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 03b72ddbffb9..dc88a1221f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,9 @@ do { \
} \
} while (0)
+#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+
struct aq_hw_s;
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d86775..ba5fe8c4125d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,49 +13,39 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
-static const struct pci_device_id aq_pci_tbl[] = {
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
{
- struct aq_hw_ops *ops = NULL;
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *aq_nic = NULL;
+
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ if (!ndev)
+ return NULL;
- ops = hw_atl_a0_get_ops_by_id(pdev);
- if (!ops)
- ops = hw_atl_b0_get_ops_by_id(pdev);
+ aq_nic = netdev_priv(ndev);
+ aq_nic->ndev = ndev;
+ ndev->netdev_ops = &aq_ndev_ops;
+ ndev->ethtool_ops = &aq_ethtool_ops;
- return ops;
+ return ndev;
}
static int aq_ndev_open(struct net_device *ndev)
{
- struct aq_nic_s *aq_nic = NULL;
int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic = aq_nic_alloc_hot(ndev);
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
- }
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
@@ -78,7 +68,6 @@ static int aq_ndev_close(struct net_device *ndev)
if (err < 0)
goto err_exit;
aq_nic_deinit(aq_nic);
- aq_nic_free_hot_resources(aq_nic);
err_exit:
return err;
@@ -150,15 +139,13 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
if (err < 0)
- goto err_exit;
+ return;
if (netdev_mc_count(ndev)) {
err = aq_nic_set_multicast_list(aq_nic, ndev);
if (err < 0)
- goto err_exit;
+ return;
}
-
-err_exit:;
}
static const struct net_device_ops aq_ndev_ops = {
@@ -170,66 +157,3 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
-
-static int aq_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- struct aq_hw_ops *aq_hw_ops = NULL;
- struct aq_pci_func_s *aq_pci_func = NULL;
- int err = 0;
-
- err = pci_enable_device(pdev);
- if (err < 0)
- goto err_exit;
- aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
- aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
- &aq_ndev_ops, &aq_ethtool_ops);
- if (!aq_pci_func) {
- err = -ENOMEM;
- goto err_exit;
- }
- err = aq_pci_func_init(aq_pci_func);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- if (err < 0) {
- if (aq_pci_func)
- aq_pci_func_free(aq_pci_func);
- }
- return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- aq_pci_func_deinit(aq_pci_func);
- aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
- .name = AQ_CFG_DRV_NAME,
- .id_table = aq_pci_tbl,
- .probe = aq_pci_probe,
- .remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e575e0..ce92152eb43e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
#include "aq_common.h"
+struct net_device *aq_ndev_alloc(void);
+
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 75a894a9251c..ebbaf63eaf47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,6 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
-#include "aq_nic_internal.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -61,19 +60,13 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
-/* Fills aq_nic_cfg with valid defaults */
-static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- cfg->aq_hw_caps = &self->aq_hw_caps;
-
- cfg->vecs = AQ_CFG_VECS_DEF;
cfg->tcs = AQ_CFG_TCS_DEF;
- cfg->rxds = AQ_CFG_RXDS_DEF;
- cfg->txds = AQ_CFG_TXDS_DEF;
-
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
cfg->itr = aq_itr;
@@ -94,19 +87,13 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
-}
-
-/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
-int aq_nic_cfg_start(struct aq_nic_s *self)
-{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
/*descriptors */
- cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
- cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
/*rss rings */
- cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
@@ -120,23 +107,22 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
- cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+ cfg->irq_type = aq_pci_func_get_irq_type(self);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
- (self->aq_hw_caps.vecs == 1U) ||
+ (cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
cfg->vecs = 1U;
}
- cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
- cfg->hw_features = self->aq_hw_caps.hw_features;
- return 0;
+ cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
+ cfg->hw_features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
- int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+ int err = self->aq_fw_ops->update_link_status(self->aq_hw);
if (err)
return err;
@@ -150,9 +136,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
+ aq_utils_obj_set(&self->flags,
AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
+ aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
@@ -160,7 +146,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
@@ -171,15 +157,15 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = aq_nic_update_link_status(self);
if (err)
goto err_exit;
- if (self->aq_hw_ops.hw_update_stats)
- self->aq_hw_ops.hw_update_stats(self->aq_hw);
+ if (self->aq_fw_ops->update_stats)
+ self->aq_fw_ops->update_stats(self->aq_hw);
aq_nic_update_ndev_stats(self);
@@ -205,60 +191,6 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
-static struct net_device *aq_nic_ndev_alloc(void)
-{
- return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops)
-{
- struct net_device *ndev = NULL;
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- ndev = aq_nic_ndev_alloc();
- if (!ndev) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- self = netdev_priv(ndev);
-
- ndev->netdev_ops = ndev_ops;
- ndev->ethtool_ops = et_ops;
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- ndev->if_port = port;
- self->ndev = ndev;
-
- self->aq_pci_func = aq_pci_func;
-
- self->aq_hw_ops = *aq_hw_ops;
- self->port = (u8)port;
-
- self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
- &self->aq_hw_ops);
- err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
- pdev->device, pdev->subsystem_device);
- if (err < 0)
- goto err_exit;
-
- aq_nic_cfg_init_defaults(self);
-
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
-}
-
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -267,10 +199,14 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
err = -EINVAL;
goto err_exit;
}
- err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
- self->aq_nic_cfg.aq_hw_caps,
+
+ err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ if (err)
+ goto err_exit;
+
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
- if (err < 0)
+ if (err)
goto err_exit;
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -281,83 +217,39 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
}
#endif
+ for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
err = register_netdev(self->ndev);
- if (err < 0)
+ if (err)
goto err_exit;
err_exit:
return err;
}
-int aq_nic_ndev_init(struct aq_nic_s *self)
+void aq_nic_ndev_init(struct aq_nic_s *self)
{
- struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
- self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
-
- return 0;
-}
-
-void aq_nic_ndev_free(struct aq_nic_s *self)
-{
- if (!self->ndev)
- goto err_exit;
-
- if (self->ndev->reg_state == NETREG_REGISTERED)
- unregister_netdev(self->ndev);
-
- if (self->aq_hw)
- self->aq_hw_ops.destroy(self->aq_hw);
-
- free_netdev(self->ndev);
-
-err_exit:;
-}
-
-struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
-{
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- if (!ndev) {
- err = -EINVAL;
- goto err_exit;
- }
- self = netdev_priv(ndev);
-
- if (!self) {
- err = -EINVAL;
- goto err_exit;
- }
- if (netif_running(ndev))
- netif_tx_disable(ndev);
- netif_carrier_off(self->ndev);
-
- for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
- self->aq_vecs++) {
- self->aq_vec[self->aq_vecs] =
- aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
- if (!self->aq_vec[self->aq_vecs]) {
- err = -ENOMEM;
- goto err_exit;
- }
- }
+ self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -366,11 +258,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
self->aq_ring_tx[idx] = ring;
}
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
- return self->ndev->dev.parent;
-}
-
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
@@ -383,18 +270,20 @@ int aq_nic_init(struct aq_nic_s *self)
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
- err = self->aq_hw_ops.hw_reset(self->aq_hw);
+ err = self->aq_hw_ops->hw_reset(self->aq_hw);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
- aq_nic_get_ndev(self)->dev_addr);
+ err = self->aq_hw_ops->hw_init(self->aq_hw,
+ aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+ aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+
+ netif_carrier_off(self->ndev);
err_exit:
return err;
@@ -406,13 +295,13 @@ int aq_nic_start(struct aq_nic_s *self)
int err = 0;
unsigned int i = 0U;
- err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
self->packet_filter);
if (err < 0)
goto err_exit;
@@ -424,7 +313,7 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- err = self->aq_hw_ops.hw_start(self->aq_hw);
+ err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -442,14 +331,14 @@ int aq_nic_start(struct aq_nic_s *self)
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+ err = aq_pci_func_alloc_irq(self, i,
self->ndev->name, aq_vec,
- aq_vec_get_affinity_mask(aq_vec));
+ aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
- err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+ err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
goto err_exit;
@@ -634,9 +523,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
- ring,
- frags);
+ err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ ring, frags);
if (err >= 0) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
@@ -651,14 +539,14 @@ err_exit:
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
- return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+ return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
}
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
if (err < 0)
goto err_exit;
@@ -690,11 +578,11 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
* multicast mask
*/
self->packet_filter |= IFF_ALLMULTI;
- self->aq_hw->aq_nic_cfg->mc_list_count = 0;
- return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ self->aq_nic_cfg.mc_list_count = 0;
+ return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
} else {
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
@@ -709,7 +597,7 @@ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
- return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+ return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
}
unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
@@ -724,8 +612,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
regs->version = 1;
- err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
- &self->aq_hw_caps, regs_buff);
+ err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ regs_buff);
if (err < 0)
goto err_exit;
@@ -735,7 +624,7 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
- return self->aq_hw_caps.mac_regs_count;
+ return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
@@ -743,7 +632,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
unsigned int i = 0U;
unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
- struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
if (!stats)
goto err_exit;
@@ -774,7 +663,6 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
i++;
data += i;
- count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
@@ -788,7 +676,7 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
struct net_device *ndev = self->ndev;
- struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
@@ -802,39 +690,46 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
struct ethtool_link_ksettings *cmd)
{
- cmd->base.port = PORT_TP;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ cmd->base.port = PORT_FIBRE;
+ else
+ cmd->base.port = PORT_TP;
/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
cmd->base.duplex = DUPLEX_FULL;
cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_hw_caps.flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control)
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
@@ -865,7 +760,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
}
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
@@ -876,7 +774,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
int err = 0;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- rate = self->aq_hw_caps.link_speed_msk;
+ rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
self->aq_nic_cfg.is_autoneg = true;
} else {
speed = cmd->base.speed;
@@ -907,7 +805,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
goto err_exit;
break;
}
- if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+ if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
err = -1;
goto err_exit;
}
@@ -915,7 +813,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
self->aq_nic_cfg.is_autoneg = false;
}
- err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+ err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
if (err < 0)
goto err_exit;
@@ -934,7 +832,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
u32 fw_version = 0U;
- self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+ self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
return fw_version;
}
@@ -949,18 +847,18 @@ int aq_nic_stop(struct aq_nic_s *self)
del_timer_sync(&self->service_timer);
- self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
del_timer_sync(&self->polling_timer);
else
- aq_pci_func_free_irqs(self->aq_pci_func);
+ aq_pci_func_free_irqs(self);
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
- return self->aq_hw_ops.hw_stop(self->aq_hw);
+ return self->aq_hw_ops->hw_stop(self->aq_hw);
}
void aq_nic_deinit(struct aq_nic_s *self)
@@ -976,23 +874,23 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+ (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
} else {
- (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+ (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
}
err_exit:;
}
-void aq_nic_free_hot_resources(struct aq_nic_s *self)
+void aq_nic_free_vectors(struct aq_nic_s *self)
{
unsigned int i = 0U;
if (!self)
goto err_exit;
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ for (i = ARRAY_SIZE(self->aq_vec); i--;) {
if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 3c9f8db03d5f..d16b0f1a95aa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,26 +14,15 @@
#include "aq_common.h"
#include "aq_rss.h"
+#include "aq_hw.h"
struct aq_ring_s;
-struct aq_pci_func_s;
struct aq_hw_ops;
-
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
+struct aq_fw_s;
+struct aq_vec_s;
struct aq_nic_cfg_s {
- struct aq_hw_caps_s *aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
@@ -44,7 +33,6 @@ struct aq_nic_cfg_s {
u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
- u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
@@ -69,20 +57,43 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops);
-int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s {
+ atomic_t flags;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ const struct aq_hw_ops *aq_hw_ops;
+ const struct aq_fw_ops *aq_fw_ops;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+
+ struct pci_dev *pdev;
+ unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self);
struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
struct aq_ring_s *ring);
-struct device *aq_nic_get_dev(struct aq_nic_s *self);
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
-int aq_nic_cfg_start(struct aq_nic_s *self);
+void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
@@ -93,6 +104,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
+void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711dc165..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
- struct aq_obj_s header;
- struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
- struct aq_hw_s *aq_hw;
- struct net_device *ndev;
- struct aq_pci_func_s *aq_pci_func;
- unsigned int aq_vecs;
- unsigned int packet_filter;
- unsigned int power_state;
- u8 port;
- struct aq_hw_ops aq_hw_ops;
- struct aq_hw_caps_s aq_hw_caps;
- struct aq_nic_cfg_s aq_nic_cfg;
- struct timer_list service_timer;
- struct timer_list polling_timer;
- struct aq_hw_link_status_s link_status;
- struct {
- u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
- } mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
- AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
- AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
- AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 58c29d04b186..22889fc158f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,181 +9,135 @@
/* File aq_pci_func.c: Definition of PCI functions. */
-#include "aq_pci_func.h"
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "aq_main.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
-#include <linux/interrupt.h>
-
-struct aq_pci_func_s {
- struct pci_dev *pdev;
- struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
- void __iomem *mmio;
- void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
- resource_size_t mmio_pa;
- unsigned int msix_entry_mask;
- unsigned int ports;
- bool is_pci_enabled;
- bool is_regions;
- bool is_pci_using_dac;
- struct aq_hw_caps_s aq_hw_caps;
+#include "aq_pci_func.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
+
+ {}
};
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops)
-{
- struct aq_pci_func_s *self = NULL;
- int err = 0;
- unsigned int port = 0U;
-
- if (!aq_hw_ops) {
- err = -EFAULT;
- goto err_exit;
- }
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- pci_set_drvdata(pdev, self);
- self->pdev = pdev;
+static const struct aq_board_revision_s hw_atl_boards[] = {
+ { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+ { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
+ { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
+
+ { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
+ { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
+ { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
+ { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
+ { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
+ { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
+ { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
+};
- err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
- pdev->subsystem_device);
- if (err < 0)
- goto err_exit;
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
- self->ports = self->aq_hw_caps.ports;
+static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
+ const struct aq_hw_ops **ops,
+ const struct aq_hw_caps_s **caps)
+{
+ int i = 0;
- for (port = 0; port < self->ports; ++port) {
- struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- pdev, self,
- port, aq_hw_ops);
+ if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
+ return -EINVAL;
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
+ for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
+ if (hw_atl_boards[i].devid == pdev->device &&
+ (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
+ hw_atl_boards[i].revision == pdev->revision)) {
+ *ops = hw_atl_boards[i].ops;
+ *caps = hw_atl_boards[i].caps;
+ break;
}
- self->port[port] = aq_nic;
}
-err_exit:
- if (err < 0) {
- if (self)
- aq_pci_func_free(self);
- self = NULL;
- }
+ if (i == ARRAY_SIZE(hw_atl_boards))
+ return -EINVAL;
- (void)err;
- return self;
+ return 0;
}
-int aq_pci_func_init(struct aq_pci_func_s *self)
+int aq_pci_func_init(struct pci_dev *pdev)
{
int err = 0;
- unsigned int bar = 0U;
- unsigned int port = 0U;
- unsigned int numvecs = 0U;
-
- err = pci_enable_device(self->pdev);
- if (err < 0)
- goto err_exit;
- self->is_pci_enabled = true;
-
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
- self->is_pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
}
if (err) {
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(self->pdev,
+ err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
- self->is_pci_using_dac = 0;
}
if (err != 0) {
err = -ENOSR;
goto err_exit;
}
- err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+ err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
if (err < 0)
goto err_exit;
- self->is_regions = true;
+ pci_set_master(pdev);
- pci_set_master(self->pdev);
-
- for (bar = 0; bar < 4; ++bar) {
- if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
- resource_size_t reg_sz;
-
- self->mmio_pa = pci_resource_start(self->pdev, bar);
- if (self->mmio_pa == 0U) {
- err = -EIO;
- goto err_exit;
- }
-
- reg_sz = pci_resource_len(self->pdev, bar);
- if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
- err = -EIO;
- goto err_exit;
- }
-
- self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
- if (!self->mmio) {
- err = -EIO;
- goto err_exit;
- }
- break;
- }
- }
-
- numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
- numvecs = min(numvecs, num_online_cpus());
-
- /* enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
-
- if (err < 0) {
- err = pci_alloc_irq_vectors(self->pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (err < 0)
- goto err_exit;
- }
-#endif /* AQ_CFG_FORCE_LEGACY_INT */
-
- /* net device init */
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
-
- err = aq_nic_cfg_start(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_init(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_register(self->port[port]);
- if (err < 0)
- goto err_exit;
- }
+ return 0;
err_exit:
- if (err < 0)
- aq_pci_func_deinit(self);
return err;
}
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
@@ -204,11 +158,10 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
-
return err;
}
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i = 0U;
@@ -224,12 +177,7 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
}
}
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
-{
- return self->mmio;
-}
-
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
{
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
@@ -238,62 +186,159 @@ unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
return AQ_HW_IRQ_LEGACY;
}
-void aq_pci_func_deinit(struct aq_pci_func_s *self)
+static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
{
- if (!self)
- goto err_exit;
-
- aq_pci_func_free_irqs(self);
pci_free_irq_vectors(self->pdev);
-
- if (self->is_regions)
- pci_release_regions(self->pdev);
-
- if (self->is_pci_enabled)
- pci_disable_device(self->pdev);
-
-err_exit:;
}
-void aq_pci_func_free(struct aq_pci_func_s *self)
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
- unsigned int port = 0U;
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+ struct net_device *ndev;
+ resource_size_t mmio_pa;
+ u32 bar;
+ u32 numvecs;
- if (!self)
- goto err_exit;
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
+ err = aq_pci_func_init(pdev);
+ if (err)
+ goto err_pci_func;
- aq_nic_ndev_free(self->port[port]);
+ ndev = aq_ndev_alloc();
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_ndev;
}
- if (self->mmio)
- iounmap(self->mmio);
+ self = netdev_priv(ndev);
+ self->pdev = pdev;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ pci_set_drvdata(pdev, self);
- kfree(self);
+ err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
+ &aq_nic_get_cfg(self)->aq_hw_caps);
+ if (err)
+ goto err_ioremap;
-err_exit:;
-}
+ self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg)
-{
- int err = 0;
- unsigned int port = 0U;
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
+ resource_size_t reg_sz;
- if (!self) {
- err = -EFAULT;
- goto err_exit;
+ mmio_pa = pci_resource_start(pdev, bar);
+ if (mmio_pa == 0U) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ reg_sz = pci_resource_len(pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ if (!self->aq_hw->mmio) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ break;
+ }
}
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
- (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+ if (bar == 4) {
+ err = -EIO;
+ goto err_ioremap;
}
-err_exit:
+ numvecs = min((u8)AQ_CFG_VECS_DEF,
+ aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+ /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (err < 0)
+ goto err_hwinit;
+ }
+#endif
+
+ /* net device init */
+ aq_nic_cfg_start(self);
+
+ aq_nic_ndev_init(self);
+
+ err = aq_nic_ndev_register(self);
+ if (err < 0)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+err_hwinit:
+ iounmap(self->aq_hw->mmio);
+err_ioremap:
+ free_netdev(ndev);
+err_pci_func:
+ pci_release_regions(pdev);
+err_ndev:
+ pci_disable_device(pdev);
return err;
}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ if (self->ndev) {
+ if (self->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(self->ndev);
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+ iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw);
+ pci_release_regions(pdev);
+ free_netdev(self->ndev);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb033791203..aeee67bf69fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,22 +13,20 @@
#define AQ_PCI_FUNC_H
#include "aq_common.h"
+#include "aq_nic.h"
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops);
-int aq_pci_func_init(struct aq_pci_func_s *self);
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+struct aq_board_revision_s {
+ unsigned short devid;
+ unsigned short revision;
+ const struct aq_hw_ops *ops;
+ const struct aq_hw_caps_s *caps;
+};
+
+int aq_pci_func_init(struct pci_dev *pdev);
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec,
cpumask_t *affinity_mask);
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
-int aq_pci_func_start(struct aq_pci_func_s *self);
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
-void aq_pci_func_deinit(struct aq_pci_func_s *self);
-void aq_pci_func_free(struct aq_pci_func_s *self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg);
+void aq_pci_func_free_irqs(struct aq_nic_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca6534b85..0be6a11370bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_record_rx_queue(skb, self->idx);
- napi_gro_receive(napi, skb);
-
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
+
+ napi_gro_receive(napi, skb);
}
err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 5844078764bd..965fae0fb6e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
#include "aq_common.h"
struct page;
+struct aq_nic_cfg_s;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
};
struct aq_ring_s {
- struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdfb874a..786ea8187c69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
#include "aq_common.h"
-#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
- atomic_t flags;
-};
-
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a099ef..f890b8a5a862 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
#include <linux/netdevice.h>
struct aq_vec_s {
- struct aq_obj_s header;
- struct aq_hw_ops *aq_hw_ops;
+ const struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
@@ -166,7 +165,7 @@ err_exit:
return self;
}
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b184236c..8bdf60bb3f63 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
struct aq_hw_s;
struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f18dce14c93c..67e2f9fb9402 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,78 +12,100 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_a0_internal.h"
-static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device)
-{
- memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
-
- if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
-
- if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
- }
-
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds = 248U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_10G |
+ HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-static void hw_atl_a0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
+ hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
@@ -99,51 +121,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -151,20 +175,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -193,11 +216,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -212,35 +236,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -251,38 +275,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -301,10 +325,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
@@ -312,9 +336,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -325,20 +347,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
int err = 0;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
hw_atl_a0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
- reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
- reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
@@ -346,26 +366,25 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
- hw_atl_utils_update_stats(self);
+ self->aq_fw_ops->update_stats(self);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
- [(aq_nic_cfg->vecs > 1U) ?
- 1 : 0]);
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
- ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
- ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
@@ -376,28 +395,28 @@ err_exit:
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -483,36 +502,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw,
+ aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -524,25 +544,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -563,7 +583,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -572,13 +592,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- ring->hw_head = hw_head_;
+ ring->hw_head = hw_head;
err = aq_hw_err_from_flags(self);
err_exit:
@@ -602,15 +622,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
- reg_rx_dma_desc_status_get(self, ring->idx)) {
- rdm_rx_desc_en_set(self, false, ring->idx);
- rdm_rx_desc_res_set(self, true, ring->idx);
- rdm_rx_desc_res_set(self, false, ring->idx);
- rdm_rx_desc_en_set(self, true, ring->idx);
+ hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+ hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
- (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ (hw_atl_rdm_rx_desc_head_ptr_get(self,
+ ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -693,26 +714,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- if ((1U << 16) & reg_gen_irq_status_get(self))
-
- atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+ if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -723,18 +743,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -761,17 +783,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
@@ -823,7 +847,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, itr_rx, i);
+ hw_atl_reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -837,38 +861,19 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_a0_create,
- .destroy = hw_atl_a0_destroy,
- .get_hw_caps = hw_atl_a0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_a0_hw_set_speed,
.hw_init = hw_atl_a0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -898,21 +903,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 1U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527954c9..25fe954def03 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -16,19 +16,11 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
-
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
-
-#endif
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+extern const struct aq_hw_ops hw_atl_ops_a0;
#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a0330cf0..1d8855558d74 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,69 +88,4 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
-/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_A0_RSS_MAX,
- .tcs = HW_ATL_A0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_A0_RXD_SIZE,
- .rxds = 248U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_A0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_A0_TX_RINGS,
- .rx_rings = HW_ATL_A0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_A0_RATE_10G |
- HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_A0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
-};
-
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index e4a22ce7bf09..819f6bcf9b4e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,83 +12,89 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
#include "hw_atl_llh_internal.h"
-static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device)
-{
- memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
-
- if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
-
- if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
- }
-
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds = 4U * 1024U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-static void hw_atl_b0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
-
- HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
-
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
-err_exit:
return err;
}
@@ -100,51 +106,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -152,20 +160,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -194,11 +201,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -215,15 +223,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
unsigned int i;
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* LRO offloads */
{
@@ -232,43 +240,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
- rpo_lro_max_num_of_descriptors_set(self, val, i);
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
- rpo_lro_time_base_divider_set(self, 0x61AU);
- rpo_lro_inactive_interval_set(self, 0);
- rpo_lro_max_coalescing_interval_set(self, 2);
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
- rpo_lro_qsessions_lim_set(self, 1U);
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
- rpo_lro_total_desc_lim_set(self, 2U);
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
- rpo_lro_patch_optimization_en_set(self, 0U);
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
- rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
- rpo_lro_pkt_lim_set(self, 1U);
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
- rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_rpo_lro_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -279,55 +288,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
- rpf_vlan_flr_act_set(self, 1U, 0U);
- rpf_vlan_id_flr_set(self, 0U, 0U);
- rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+ hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
- rpf_vlan_accept_untagged_packets_set(self, 1U);
- rpf_vlan_untagged_act_set(self, 1U);
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
- rpf_vlan_flr_act_set(self, 1U, 1U);
- rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- rpf_vlan_flr_en_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -346,10 +355,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
@@ -357,9 +366,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -371,51 +378,50 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
int err = 0;
u32 val;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_B0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
hw_atl_b0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
hw_atl_b0_hw_qos_set(self);
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
/* Force limit MRRS on RDM/TDM to 2K */
- val = aq_hw_read_reg(self, pci_reg_control6_adr);
- aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
/* TX DMA total request limit. B0 hardware is not capable to
* handle more than (8K-MRRS) incoming DMA data.
* Value 24 in 256byte units
*/
- aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
- hw_atl_utils_update_stats(self);
+ self->aq_fw_ops->update_stats(self);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -427,28 +433,28 @@ err_exit:
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -534,36 +540,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -575,25 +581,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -614,7 +620,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -623,9 +629,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
@@ -728,22 +734,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -754,20 +760,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
@@ -796,16 +802,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -824,10 +830,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
- tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
- tdm_tdm_intr_moder_en_set(self, 1U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
- rdm_rdm_intr_moder_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
@@ -887,18 +893,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
- tdm_tdm_intr_moder_en_set(self, 0U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
- rdm_rdm_intr_moder_en_set(self, 0U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -913,38 +919,19 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_b0_create,
- .destroy = hw_atl_b0_destroy,
- .get_hw_caps = hw_atl_b0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_b0_hw_set_speed,
.hw_init = hw_atl_b0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -974,21 +961,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 2U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6c1f3..2cc8dacfdc27 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -16,19 +16,27 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
+#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
+#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
+#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
-#endif
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
+
+extern const struct aq_hw_ops hw_atl_ops_b0;
+
+#define hw_atl_ops_b1 hw_atl_ops_b0
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6edfca2..405d1455c222 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,70 +142,6 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_B0_RSS_MAX,
- .tcs = HW_ATL_B0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_B0_RXD_SIZE,
- .rxds = 8U * 1024U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_B0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_B0_TX_RINGS,
- .rx_rings = HW_ATL_B0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_LRO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_B0_RATE_10G |
- HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_B0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
-};
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651afa8c7..10ba035dadb1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
#include "../aq_hw_utils.h"
/* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
{
- aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
}
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
- return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
}
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
- glb_reg_res_dis_msk,
- glb_reg_res_dis_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
glb_reg_res_dis);
}
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
- aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
- glb_soft_res_shift, soft_res);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
}
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
- glb_soft_res_msk,
- glb_soft_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
}
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
}
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
}
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
}
/* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
{
- aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
}
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
irq_map_en_rx);
}
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
irq_map_en_tx);
}
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
irq_map_rx);
}
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
irq_map_tx);
}
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
}
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
- aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
}
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
- itr_reg_res_dsbl_msk,
- itr_reg_res_dsbl_shift, irq_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
}
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
}
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
}
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
}
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
- aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift, res_irq);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
}
/* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
- rdm_dcadcpuid_msk,
- rdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
}
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
- rdm_dca_en_shift, rx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
}
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
- rdm_dca_mode_shift, rx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
}
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
- rdm_descddata_size_msk,
- rdm_descddata_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
rx_desc_data_buff_size);
}
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
- rdm_dcaddesc_en_msk,
- rdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
rx_desc_dca_en);
}
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
- rdm_descden_msk,
- rdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
rx_desc_en);
}
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
- rdm_descdhdr_size_msk,
- rdm_descdhdr_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
rx_desc_head_buff_size);
}
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
- rdm_descdhdr_split_msk,
- rdm_descdhdr_split_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
rx_desc_head_splitting);
}
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
- rdm_descdhd_msk, rdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
}
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
- rdm_descdlen_msk, rdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
rx_desc_len);
}
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
- rdm_descdreset_msk, rdm_descdreset_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
rx_desc_res);
}
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
- rdm_int_desc_wrb_en_msk,
- rdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
rx_desc_wr_wb_irq_en);
}
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
- rdm_dcadhdr_en_msk,
- rdm_dcadhdr_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
rx_head_dca_en);
}
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
- rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
rx_pld_dca_en);
}
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
- rdm_int_rim_en_msk,
- rdm_int_rim_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
rdm_intr_moder_en);
}
/* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
{
- aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
}
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
}
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
- aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
}
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
- aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
}
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
rx_dma_desc_base_addrlsw);
}
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
rx_dma_desc_base_addrmsw);
}
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
}
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
rx_dma_desc_tail_ptr);
}
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
}
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
}
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
{
- aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
}
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
{
- aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
}
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
rx_intr_moderation_ctl);
}
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
{
- aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
}
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
tx_dma_desc_base_addrlsw);
}
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
tx_dma_desc_base_addrmsw);
}
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
tx_dma_desc_tail_ptr);
}
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
- rpb_dma_sys_lbk_msk,
- rpb_dma_sys_lbk_shift, dma_sys_lbk);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
- rpb_rpf_rx_tc_mode_msk,
- rpb_rpf_rx_tc_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
rx_traf_class_mode);
}
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
- rpb_rx_buf_en_shift, rx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
}
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
- rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
rx_buff_hi_threshold_per_tc);
}
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
- rpb_rxblo_thresh_msk,
- rpb_rxblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
rx_buff_lo_threshold_per_tc);
}
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
- rpb_rx_fc_mode_msk,
- rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
- rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
rx_pkt_buff_size_per_tc);
}
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
- rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
rx_xoff_en_per_tc);
}
/* rpf */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
- rpfl2bc_thresh_msk,
- rpfl2bc_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
l2broadcast_count_threshold);
}
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
- rpfl2bc_en_shift, l2broadcast_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
}
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
- rpfl2bc_act_shift, l2broadcast_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
}
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
- rpfl2mc_enf_msk,
- rpfl2mc_enf_shift, l2multicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
- rpfl2promis_mode_msk,
- rpfl2promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
l2promiscuous_mode_en);
}
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
- rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
l2unicast_flr_act);
}
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
- rpfl2uc_enf_msk,
- rpfl2uc_enf_shift, l2unicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
}
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
l2unicast_dest_addresslsw);
}
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
- rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
l2unicast_dest_addressmsw);
}
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
- rpfl2mc_accept_all_msk,
- rpfl2mc_accept_all_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
l2_accept_all_mc_packets);
}
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
user_priority_tc_map);
}
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
- rpf_rss_key_addr_msk,
- rpf_rss_key_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
rss_key_addr);
}
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
- aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
rss_key_wr_data);
}
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
}
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
rss_key_wr_en);
}
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
- rpf_rss_redir_addr_msk,
- rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
}
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
- rpf_rss_redir_wr_data_msk,
- rpf_rss_redir_wr_data_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
rss_redir_tbl_wr_data);
}
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
}
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
}
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
- rpf_tpo_rpf_sys_lbk_msk,
- rpf_tpo_rpf_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
tpo_to_rpf_sys_lbk);
}
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
- rpf_vl_inner_tpid_msk,
- rpf_vl_inner_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
vlan_inner_etht);
}
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
- rpf_vl_outer_tpid_msk,
- rpf_vl_outer_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
vlan_outer_etht);
}
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
- rpf_vl_promis_mode_msk,
- rpf_vl_promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
vlan_prom_mode_en);
}
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
- rpf_vl_accept_untagged_mode_msk,
- rpf_vl_accept_untagged_mode_shift,
- vlan_accept_untagged_packets);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
}
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
- rpf_vl_untagged_act_msk,
- rpf_vl_untagged_act_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
vlan_untagged_act);
}
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
- rpf_vl_en_f_msk,
- rpf_vl_en_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
vlan_flr_en);
}
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
- rpf_vl_act_f_msk,
- rpf_vl_act_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
vlan_flr_act);
}
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
- rpf_vl_id_f_msk,
- rpf_vl_id_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
vlan_id_flr);
}
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
- rpf_et_enf_msk,
- rpf_et_enf_shift, etht_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
- rpf_et_upfen_msk, rpf_et_upfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
- rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
etht_rx_queue_en);
}
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
- rpf_et_upf_msk,
- rpf_et_upf_shift, etht_user_priority);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
- rpf_et_rxqf_msk,
- rpf_et_rxqf_shift, etht_rx_queue);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
- rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
etht_mgt_queue);
}
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
- rpf_et_actf_msk,
- rpf_et_actf_shift, etht_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
- rpf_et_valf_msk,
- rpf_et_valf_shift, etht_flr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
/* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
- rpo_ipv4chk_en_msk,
- rpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
- rpo_descdvl_strip_msk,
- rpo_descdvl_strip_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
rx_desc_vlan_stripping);
}
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
- rpol4chk_en_shift, tcp_udp_crc_offload_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
}
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
- aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
}
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
- rpo_lro_ptopt_en_msk,
- rpo_lro_ptopt_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
lro_patch_optimization_en);
}
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
- u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
- rpo_lro_qses_lmt_msk,
- rpo_lro_qses_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
lro_qsessions_lim);
}
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
- rpo_lro_tot_dsc_lmt_msk,
- rpo_lro_tot_dsc_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
lro_total_desc_lim);
}
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
- rpo_lro_pkt_min_msk,
- rpo_lro_pkt_min_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
lro_min_pld_of_first_pkt);
}
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
- aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
}
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_number_of_descriptors,
- u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
lro_max_number_of_descriptors);
}
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
- rpo_lro_tb_div_msk,
- rpo_lro_tb_div_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
lro_time_base_divider);
}
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
- rpo_lro_ina_ival_msk,
- rpo_lro_ina_ival_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
lro_inactive_interval);
}
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
- rpo_lro_max_ival_msk,
- rpo_lro_max_ival_shift,
- lro_max_coalescing_interval);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
}
/* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
- rx_reg_res_dsbl_msk,
- rx_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
rx_reg_res_dis);
}
/* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
- tdm_dcadcpuid_msk,
- tdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
}
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
{
- aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
}
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
- tdm_dca_en_shift, tx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
}
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
- tdm_dca_mode_shift, tx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
}
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
- tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
tx_desc_dca_en);
}
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
- tdm_descden_msk,
- tdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
tx_desc_en);
}
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
- tdm_descdhd_msk, tdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
}
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
- tdm_descdlen_msk,
- tdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
tx_desc_len);
}
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
- tdm_int_desc_wrb_en_msk,
- tdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
tx_desc_wr_wb_irq_en);
}
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
- tdm_descdwrb_thresh_msk,
- tdm_descdwrb_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
tx_desc_wr_wb_threshold);
}
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
- tdm_int_mod_en_msk,
- tdm_int_mod_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
tdm_irq_moderation_en);
}
/* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
- thm_lso_tcp_flag_first_msk,
- thm_lso_tcp_flag_first_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
lso_tcp_flag_of_first_pkt);
}
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
- thm_lso_tcp_flag_last_msk,
- thm_lso_tcp_flag_last_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
lso_tcp_flag_of_last_pkt);
}
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
- thm_lso_tcp_flag_mid_msk,
- thm_lso_tcp_flag_mid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
- tpb_tx_buf_en_shift, tx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
- tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
tx_buff_hi_threshold_per_tc);
}
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
- tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
tx_buff_lo_threshold_per_tc);
}
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
- tpb_dma_sys_lbk_msk,
- tpb_dma_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
tx_dma_sys_lbk_en);
}
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
- tpb_txbbuf_size_msk,
- tpb_txbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
tx_pkt_buff_size_per_tc);
}
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
- tpb_tx_scp_ins_en_msk,
- tpb_tx_scp_ins_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
- tpo_ipv4chk_en_msk,
- tpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
- tpol4chk_en_msk,
- tpol4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
tcp_udp_crc_offload_en);
}
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
- tpo_pkt_sys_lbk_msk,
- tpo_pkt_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
- tps_data_tc_arb_mode_msk,
- tps_data_tc_arb_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
tx_pkt_shed_data_arb_mode);
}
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
- tps_desc_rate_ta_rst_msk,
- tps_desc_rate_ta_rst_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
curr_time_res);
}
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
- tps_desc_rate_lim_msk,
- tps_desc_rate_lim_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
tx_pkt_shed_desc_rate_lim);
}
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
- tps_desc_tc_arb_mode_msk,
- tps_desc_tc_arb_mode_shift,
- tx_pkt_shed_desc_tc_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
- tps_desc_tctcredit_max_msk,
- tps_desc_tctcredit_max_shift,
- tx_pkt_shed_desc_tc_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
- tps_desc_tctweight_msk,
- tps_desc_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
}
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
- tps_desc_vm_arb_mode_msk,
- tps_desc_vm_arb_mode_shift,
- tx_pkt_shed_desc_vm_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
- tps_data_tctcredit_max_msk,
- tps_data_tctcredit_max_shift,
- tx_pkt_shed_tc_data_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
- tps_data_tctweight_msk,
- tps_data_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
/* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
- tx_reg_res_dsbl_msk,
- tx_reg_res_dsbl_shift, tx_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
}
/* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
- msm_reg_access_busy_msk,
- msm_reg_access_busy_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
}
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
- msm_reg_addr_msk,
- msm_reg_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
reg_addr_for_indirect_addr);
}
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
- msm_reg_rd_strobe_msk,
- msm_reg_rd_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
reg_rd_strobe);
}
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
}
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
- aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
}
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
- msm_reg_wr_strobe_msk,
- msm_reg_wr_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
reg_wr_strobe);
}
/* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
- pci_reg_res_dsbl_msk,
- pci_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
pci_reg_res_dis);
}
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
- u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
{
- aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b95adb..dfb426f2dc2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
- u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
/* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
/* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
/* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
/* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
/* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
/* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
/* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
/* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
/* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
/* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
/* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
- u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
/* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
- u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr,
- u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
/* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
- u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
/* rpb */
/* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
/* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
/* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
/* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
/* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
/* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
/* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
- u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
/* set ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter);
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
/* set ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter);
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
/* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
/* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
/* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping,
- u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
/* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
/* rx */
/* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
/* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
/* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
/* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
/* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
/* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 93450ec930e8..e0cf70120f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
*/
-#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
/* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
/* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
/* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
/* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
/* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
/* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
/* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
/* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
/* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
/* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
/* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
/* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
/* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
/* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
/* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
/* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
/* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
/* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
/* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
/* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
/* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
/* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
/* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
/* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
/* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
/* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
/* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
/* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
/* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
/* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* rx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
*/
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
/* rx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
/* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
/* rx desc{d}_data_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
*/
/* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
/* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
/* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
/* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
/* rx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
/* rx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
/* rx desc{d}_hdr_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
*/
/* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
/* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
/* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
/* rx desc{d}_hdr_split bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
*/
/* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
/* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
/* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
/* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
/* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
/* rx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
/* rx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
/* rx desc{d}_reset bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
*/
/* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
/* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
/* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
/* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
/* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
/* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
/* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
/* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* rx dca{d}_hdr_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
*/
/* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
/* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
/* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
/* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
/* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
/* rx dca{d}_pay_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
*/
/* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
/* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
/* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
/* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
/* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
/* RX rdm_int_rim_en Bitfield Definitions
* Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
*/
/* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
/* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
/* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
/* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
/* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
/* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
/* general interrupt mapping register definitions
* preprocessor definitions for general interrupt mapping register
* base address: 0x00002180
* parameter: regidx {f} | stride size 0x4 | range [0, 3]
*/
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
/* general interrupt status register definitions
* preprocessor definitions for general interrupt status register
* address: 0x000021A0
*/
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
/* interrupt global control register definitions
* preprocessor definitions for interrupt global control register
* address: 0x00002300
*/
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
/* interrupt throttle register definitions
* preprocessor definitions for interrupt throttle register
* base address: 0x00002800
* parameter: throttle {t} | stride size 0x4 | range [0, 31]
*/
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
/* rx dma descriptor base address lsw definitions
* preprocessor definitions for rx dma descriptor base address lsw
* base address: 0x00005b00
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00005b00u + (descriptor) * 0x20)
/* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
* base address: 0x00005b04
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00005b04u + (descriptor) * 0x20)
/* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
* base address: 0x00005b14
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
/* rx dma descriptor tail pointer register definitions
* preprocessor definitions for rx dma descriptor tail pointer register
* base address: 0x00005b10
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
/* rx interrupt moderation control register definitions
* Preprocessor definitions for RX Interrupt Moderation Control Register
* Base Address: 0x00005A40
* Parameter: RIM {R} | stride size 0x4 | range [0, 31]
*/
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
/* rx filter multicast filter mask register definitions
* preprocessor definitions for rx filter multicast filter mask register
* address: 0x00005270
*/
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
/* rx filter multicast filter register definitions
* preprocessor definitions for rx filter multicast filter register
* base address: 0x00005250
* parameter: filter {f} | stride size 0x4 | range [0, 7]
*/
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
/* RX Filter RSS Control Register 1 Definitions
* Preprocessor definitions for RX Filter RSS Control Register 1
* Address: 0x000054C0
*/
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
/* RX Filter Control Register 2 Definitions
* Preprocessor definitions for RX Filter Control Register 2
* Address: 0x00005104
*/
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
/* tx tx dma debug control [1f:0] bitfield definitions
* preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
*/
/* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
/* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
/* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
/* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
/* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
/* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
/* tx dma descriptor base address lsw definitions
* preprocessor definitions for tx dma descriptor base address lsw
* base address: 0x00007c00
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00007c00u + (descriptor) * 0x40)
/* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
* base address: 0x00007c10
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
/* rx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
*/
/* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
/* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
/* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
/* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
/* rx rx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
*/
/* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
/* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
/* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
/* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
/* rx rx{b}_hi_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
*/
/* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
/* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
/* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
/* rx rx{b}_lo_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
*/
/* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
/* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
/* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
/* rx rx_fc_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
*/
/* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
/* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
/* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
/* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
/* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
/* rx rx{b}_buf_size[8:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
*/
/* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
/* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
/* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
/* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
/* rx rx{b}_xoff_en bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
*/
/* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
/* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
/* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
/* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
/* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
/* rx l2_bc_thresh[f:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
*/
/* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
/* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
/* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
/* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
/* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
/* rx l2_bc_en bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
*/
/* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
/* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
/* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
/* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
/* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
/* rx l2_bc_act[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
*/
/* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
/* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
/* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
/* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
/* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
/* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
/* rx l2_mc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
*/
/* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
/* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
/* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
/* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
/* rx l2_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
*/
/* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
/* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
/* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
/* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
/* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
/* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
/* rx l2_uc_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
*/
/* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
/* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
/* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
/* rx l2_uc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
*/
/* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
/* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
/* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
/* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
/* rx l2_mc_accept_all bitfield definitions
* Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
*/
/* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
/* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
/* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
/* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
/* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
/* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
/* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
/* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
/* rx rss_key_addr[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
*/
/* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
/* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
/* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
/* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
/* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
/* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
/* rx rss_key_wr_data[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
*/
/* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
/* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
/* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
/* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
/* rx rss_key_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
*/
/* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
/* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
/* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
/* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
/* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
/* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
/* rx rss_redir_addr[3:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
*/
/* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
/* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
/* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
/* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
/* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
/* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
/* rx rss_redir_wr_data[f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
*/
/* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
/* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
/* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
/* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
/* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
/* rx rss_redir_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
*/
/* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
/* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
/* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
/* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
/* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
/* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
/* rx tpo_rpf_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
*/
/* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
/* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
/* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
/* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
/* rx vl_inner_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
*/
/* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
/* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
/* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
/* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
/* rx vl_outer_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
*/
/* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
/* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
/* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
/* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
/* rx vl_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
*/
/* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
/* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
/* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
/* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
/* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
/* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
/* RX vl_accept_untagged_mode Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
*/
/* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
/* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
/* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
/* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
/* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
/* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
/* rX vl_untagged_act[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
*/
/* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
/* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
/* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
/* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
/* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
/* RX vl_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
*/
/* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
/* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
/* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
/* RX vl_act{F}[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
*/
/* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
/* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
/* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
/* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
/* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
/* RX vl_id{F}[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
*/
/* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
/* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
/* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
/* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
/* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
*/
/* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
/* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
/* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
*/
/* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
/* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
/* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
/* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
/* rx et_up{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
*/
/* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
/* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
/* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
/* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
/* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
/* rx et_rxq{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
*/
/* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
/* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
/* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
/* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
/* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
/* rx et_up{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
*/
/* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
/* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
/* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
/* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
/* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
/* rx et_rxq{f}[4:0] bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
*/
/* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
/* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
/* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
/* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
/* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
/* rx et_mng_rxq{f} bitfield definitions
* preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
*/
/* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
/* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
/* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
/* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
/* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
/* rx et_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
*/
/* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
/* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
/* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
/* rx et_val{f}[f:0] bitfield definitions
* preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
*/
/* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
/* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
/* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
/* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
/* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
/* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
/* rx desc{d}_vl_strip bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
*/
/* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
/* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
/* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
/* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
/* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
/* rx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
/* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
/* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
/* tx dca{d}_cpuid[7:0] bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
*/
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
/* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
/* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
/* tx lso_en[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
*/
/* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
/* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
/* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
/* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
/* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
/* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
/* tx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
*/
/* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
/* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
/* tx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
/* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
/* tx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
/* tx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
/* tx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
/* tx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
/* tx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
/* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
/* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
/* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
*/
/* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
/* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
/* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
/* tx lso_tcp_flag_first[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
*/
/* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
/* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
/* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
/* tx lso_tcp_flag_last[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
*/
/* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
/* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
/* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
/* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
/* tx lso_tcp_flag_mid[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
*/
/* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
/* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
/* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
/* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
/* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
/* RX lro_en[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
*/
/* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
/* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
/* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
/* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
/* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
/* RX lro_ptopt_en Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
*/
/* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
/* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
/* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
/* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
/* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
/* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
/* RX lro_q_ses_lmt Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
*/
/* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
/* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
/* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
/* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
/* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
*/
/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
/* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
/* RX lro_pkt_min[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
*/
/* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
/* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
/* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
/* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
/* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
/* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
/* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
/* RX lro_tb_div[11:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
*/
/* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
/* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
/* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
/* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
/* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
/* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
/* RX lro_ina_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
*/
/* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
/* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
/* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
/* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
/* RX lro_max_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
*/
/* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
/* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
/* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
/* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
/* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
/* TX dca{D}_cpuid[7:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
*/
/* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
/* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
/* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
/* TX dca{D}_desc_en Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
*/
/* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
/* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
/* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
/* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
/* TX desc{D}_en Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
*/
/* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
/* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
/* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
/* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
/* TX desc{D}_hd[C:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
*/
/* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
/* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
/* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
/* TX desc{D}_len[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
*/
/* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
/* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
/* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
/* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
/* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
*/
/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
(0x00007C18 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
/* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
/* TX tdm_int_mod_en Bitfield Definitions
* Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
*/
/* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
/* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
/* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
/* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
/* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
/* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
* PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
/* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
/* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
*/
/* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
/* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
/* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
/* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
*/
/* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
/* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
/* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
/* tx tx{b}_lo_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
*/
/* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
/* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
/* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
/* tx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
*/
/* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
/* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
/* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
/* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
/* tx tx_scp_ins_en bitfield definitions
* preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
*/
/* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
/* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
/* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
/* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
/* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
/* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
/* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
/* tx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
/* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
/* tx pkt_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
*/
/* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
/* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
/* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
/* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
/* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
*/
/* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
/* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
*/
/* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
/* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
/* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
/* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
/* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
/* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
/* tx desc_rate_limit[a:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
*/
/* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
/* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
/* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
/* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
/* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
/* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
/* tx desc_tc_arb_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
*/
/* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
/* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
/* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
/* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
*/
/* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
/* tx desc_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
*/
/* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
/* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
/* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
/* tx desc_vm_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
*/
/* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
/* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
/* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
/* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
/* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
/* mac_phy register access busy bitfield definitions
* preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
*/
/* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
/* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
/* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
/* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
/* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
/* mac_phy msm register address[7:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
*/
/* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
/* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
/* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
/* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
/* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
/* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
/* mac_phy register read strobe bitfield definitions
* preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
*/
/* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
/* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
/* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
/* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
/* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
/* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
/* mac_phy msm register read data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
*/
/* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
/* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
/* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
/* mac_phy msm register write data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
*/
/* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
/* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
/* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
/* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
/* mac_phy register write strobe bitfield definitions
* preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
*/
/* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
/* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
/* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
/* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
/* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
/* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
*/
/* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
/* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
/* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
/* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
/* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
/* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
/* mif register reset disable bitfield definitions
* preprocessor definitions for the bitfield "register reset disable".
@@ -2324,27 +2332,27 @@
*/
/* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
/* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
/* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
/* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
/* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
/* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
/* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
/* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
/* tx dma total request limit */
-#define tx_dma_total_req_limit_adr 0x00007b20u
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
@@ -2352,7 +2360,7 @@
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
/* pcie reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2360,22 +2368,23 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
/* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
/* PCI core control register */
-#define pci_reg_control6_adr 0x1014u
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
/* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f2ce12ed4218..967f0fd07fcf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,41 +11,244 @@
* abstraction layer.
*/
-#include "../aq_hw.h"
+#include "../aq_nic.h"
#include "../aq_hw_utils.h"
-#include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
#include <linux/random.h>
#define HW_ATL_UCP_0X370_REG 0x0370U
#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_FW_VERSION 0x18
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
#define HW_ATL_MPI_STATE_MSK 0x00FFU
#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
#define HW_ATL_MPI_SPEED_SHIFT 16U
-static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
- u32 *p, u32 cnt)
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
+
+#define HW_ATL_MAC_PHY_CONTROL 0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+
+#define FORCE_FLASHLESS 0
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
- AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
- HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &self->chip_features);
+
+ hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_1x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x\n",
+ self->fw_ver_actual);
+ return -EOPNOTSUPP;
+ }
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+ return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+ int k = 0;
+ u32 gsr;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ AQ_HW_SLEEP(50);
+
+ /* Cleanup SPI */
+ aq_hw_write_reg(self, 0x534, 0xA0);
+ aq_hw_write_reg(self, 0x100, 0x9F);
+ aq_hw_write_reg(self, 0x100, 0x809F);
+
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ /* Kickstart MAC */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+ aq_hw_write_reg(self, 0x520, 0x1);
+ AQ_HW_SLEEP(10);
+ aq_hw_write_reg(self, 0x404, 0x180e0);
+
+ for (k = 0; k < 1000; k++) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+ flb_status = flb_status & 0x10;
+ if (flb_status)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("MAC kickstart failed\n");
+ return -EIO;
+ }
+
+ /* FW reset */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ AQ_HW_SLEEP(50);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+
+ /* Kickstart PHY - skipped */
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+ u32 gsr, rbl_status;
+ int k;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+
+ /* Alter RBL status */
+ aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+ (gsr & 0xFFFFBFFF) | 0x8000);
+
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0x0);
+
+ aq_hw_write_reg(self, 0x404, 0x40e0);
+
+ /* Wait for RBL boot */
+ for (k = 0; k < 1000; k++) {
+ rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+ if (rbl_status && rbl_status != 0xDEAD)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (!rbl_status || rbl_status == 0xDEAD) {
+ aq_pr_err("RBL Restart failed");
+ return -EIO;
+ }
+
+ /* Restore NVR */
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0xA0);
+
+ if (rbl_status == 0xF1A7) {
+ aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
+ return -ENOTSUPP;
+ }
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+ int k;
+ u32 boot_exit_code = 0;
+
+ for (k = 0; k < 1000; ++k) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+ boot_exit_code = aq_hw_read_reg(self,
+ HW_ATL_MPI_BOOT_EXIT_CODE);
+ if (flb_status != 0x06000000 || boot_exit_code != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ aq_pr_err("Neither RBL nor FLB firmware started\n");
+ return -EOPNOTSUPP;
+ }
+
+ self->rbl_enabled = (boot_exit_code != 0);
+
+ if (self->rbl_enabled)
+ return hw_atl_utils_soft_reset_rbl(self);
+ else
+ return hw_atl_utils_soft_reset_flb(self);
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+
+ AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -66,7 +269,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -78,7 +281,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
int err = 0;
bool is_locked;
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -97,7 +300,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
}
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -119,7 +322,7 @@ err_exit:
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ const struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
@@ -133,20 +336,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
- err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
-
- if (err < 0)
- pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
- AQ_CFG_DRV_NAME,
- aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -174,14 +369,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
- (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
- sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
@@ -199,7 +394,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
- PHAL_ATLANTIC->rpc_tid = sw.tid;
+ self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
@@ -221,9 +416,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->rpc_addr,
+ self->rpc_addr,
(u32 *)(void *)
- &PHAL_ATLANTIC->rpc,
+ &self->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
@@ -231,19 +426,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
- *rpc = &PHAL_ATLANTIC->rpc;
+ *rpc = &self->rpc;
}
err_exit:
return err;
}
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
{
int err = 0;
- err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
if (err < 0)
goto err_exit;
@@ -259,7 +453,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
}
@@ -270,7 +464,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
@@ -281,27 +475,27 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
- pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
}
err_exit:;
}
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state)
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
- u32 ucp_0x368 = 0;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+ val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state, u32 speed)
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed)
{
int err = 0;
u32 transaction_id = 0;
@@ -320,11 +514,22 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
goto err_exit;
}
- err = hw_atl_utils_mpi_set_speed(self, speed, state);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
+ (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
err_exit:;
}
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ val = state | (val & HW_ATL_MPI_SPEED_MSK);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+ return 0;
+}
+
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
@@ -365,7 +570,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
@@ -373,15 +577,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u32 l = 0U;
u32 mac_addr[2];
- self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
-
- err = hw_atl_utils_mpi_create(self, aq_hw_caps);
- if (err < 0)
- goto err_exit;
-
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
@@ -396,7 +591,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
- AQ_DIMOF(mac_addr));
+ ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -427,7 +622,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
mac[0] = (u8)(0xFFU & h);
}
-err_exit:
return err;
}
@@ -465,7 +659,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
- u32 val = reg_glb_mif_id_get(self);
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
@@ -500,13 +694,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
- mbox.stats._N_ - hw_self->last_stats._N_)
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+ mbox.stats._N_ - self->last_stats._N_)
+
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
AQ_SDELTA(mprc);
@@ -527,19 +721,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
- hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
- memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- return &PHAL_ATLANTIC->curr_stats;
+ return &self->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -568,14 +762,14 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
- hw_atl_utils_hw_mac_regs[i]);
+ hw_atl_utils_hw_mac_regs[i]);
return 0;
}
@@ -584,3 +778,13 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
}
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .reset = NULL,
+ .get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .set_link_speed = hw_atl_utils_mpi_set_speed,
+ .set_state = hw_atl_utils_mpi_set_state,
+ .update_link_status = hw_atl_utils_mpi_get_link_status,
+ .update_stats = hw_atl_utils_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 21aeca6908d3..2c690947910a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
-#include "../aq_common.h"
-
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
struct hw_atl_stats_s stats;
};
-struct __packed hw_atl_s {
- struct aq_hw_s base;
- struct hw_atl_stats_s last_stats;
- struct aq_stats_s curr_stats;
- u64 speed;
- unsigned int chip_features;
- u32 fw_ver_actual;
- atomic_t dpc;
- u32 mbox_addr;
- u32 rpc_addr;
- u32 rpc_tid;
- struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- PHAL_ATLANTIC->chip_features)
+ self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
@@ -171,6 +180,73 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+enum hw_atl_fw2x_rate {
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
+};
+
+enum hw_atl_fw2x_caps_lo {
+ CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD,
+ CAPS_LO_10GBASET_FD,
+};
+
+enum hw_atl_fw2x_caps_hi {
+ CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE,
+ CAPS_HI_RESERVED3,
+ CAPS_HI_RESERVED4,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_RESERVED5,
+ CAPS_HI_RESERVED6,
+ CAPS_HI_RESERVED7,
+ CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED9,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS,
+ CAPS_HI_TRANSACTION_ID,
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -183,19 +259,15 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state,
u32 speed);
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state);
-
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
@@ -208,5 +280,10 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 000000000000..8cfce95c82fc
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,184 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+ 1000U, 10U);
+ return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+ enum hw_atl_fw2x_rate rate = 0;
+
+ if (speed & AQ_NIC_RATE_10G)
+ rate |= FW2X_RATE_10G;
+
+ if (speed & AQ_NIC_RATE_5G)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_5GSR)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_2GS)
+ rate |= FW2X_RATE_2G5;
+
+ if (speed & AQ_NIC_RATE_1G)
+ rate |= FW2X_RATE_1G;
+
+ if (speed & AQ_NIC_RATE_100M)
+ rate |= FW2X_RATE_100M;
+
+ return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+ return 0;
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ /* No explicit state in 2x fw */
+ return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+ if (speed) {
+ if (speed & FW2X_RATE_10G)
+ link_status->mbps = 10000;
+ else if (speed & FW2X_RATE_5G)
+ link_status->mbps = 5000;
+ else if (speed & FW2X_RATE_2G5)
+ link_status->mbps = 2500;
+ else if (speed & FW2X_RATE_1G)
+ link_status->mbps = 1000;
+ else if (speed & FW2X_RATE_100M)
+ link_status->mbps = 100;
+ else
+ link_status->mbps = 10000;
+ } else {
+ link_status->mbps = 0;
+ }
+
+ return 0;
+}
+
+int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2] = { 0 };
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+
+ if (efuse_addr != 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ efuse_addr + (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err)
+ return err;
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ unsigned int rnd = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ l = 0xE3000000U
+ | (0xFFFFU & rnd)
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+ return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+
+ /* Toggle statistics bit for FW to update */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ AQ_HW_WAIT_FOR(orig_stats_val !=
+ (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
+ if (err)
+ return err;
+
+ return hw_atl_utils_update_stats(self);
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+ .init = aq_fw2x_init,
+ .reset = NULL,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
+ .update_link_status = aq_fw2x_update_link_status,
+ .update_stats = aq_fw2x_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 9009f2651e70..5265b937677b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -10,9 +10,9 @@
#ifndef VER_H
#define VER_H
-#define NIC_MAJOR_DRIVER_VERSION 1
-#define NIC_MINOR_DRIVER_VERSION 6
-#define NIC_BUILD_DRIVER_VERSION 13
+#define NIC_MAJOR_DRIVER_VERSION 2
+#define NIC_MINOR_DRIVER_VERSION 0
+#define NIC_BUILD_DRIVER_VERSION 2
#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d9346e2ac720..14a59e51db67 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm63xx_enet_platform_data *pd;
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
- const char *clk_name;
int i, ret;
if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
- priv->mac_id = pdev->id;
- /* get rx & tx dma channel id for this mac */
- if (priv->mac_id == 0) {
- priv->rx_chan = 0;
- priv->tx_chan = 1;
- clk_name = "enet0";
- } else {
- priv->rx_chan = 2;
- priv->tx_chan = 3;
- clk_name = "enet1";
- }
-
- priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+ priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->dma_chan_width = pd->dma_chan_width;
priv->dma_has_sram = pd->dma_has_sram;
priv->dma_desc_shift = pd->dma_desc_shift;
+ priv->rx_chan = pd->rx_chan;
+ priv->tx_chan = pd->tx_chan;
}
- if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+ if (priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
- sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+ sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
@@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 5a66728d4776..1d3c917eb830 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
struct bcm_enet_priv {
- /* mac id (from platform device id) */
- int mac_id;
-
/* base remapped address of device */
void __iomem *base;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 087f01b4dc3a..f15a8fc6dfc9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Insert TSB and checksum infos */
if (priv->tsb_en) {
skb = bcm_sysport_insert_tsb(skb, dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1d96cd594ade..8eef9fb6b1fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
-#define ENET_BRCM_TAG_LEN 4
-
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
u32 flags;
int i;
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (netdev_uses_dsa(net_dev)) {
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
- goto err_stats;
- }
-
if (skb->len > BGMAC_DESC_CTL1_LEN) {
netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
@@ -240,7 +226,6 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
-err_stats:
net_dev->stats.tx_dropped++;
net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 7919f6112ecf..5e34b34f7740 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5818,8 +5818,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
- struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
- struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
tx_napi = bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8ae269ec17a1..d7c98e807ca8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
- be16_to_cpu(skb->protocol));
+ netdev_WARN_ONCE(bp->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
}
}
#endif
@@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
*/
if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
- else if (bp->dev->features & NETIF_F_GRO &&
- bnx2x_mtu_allows_gro(bp->dev->mtu))
+ else if (bp->dev->features & NETIF_F_GRO_HW)
fp->mode = TPA_MODE_GRO;
else
fp->mode = TPA_MODE_DISABLED;
@@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (!bnx2x_mtu_allows_gro(new_mtu))
+ dev->features &= ~NETIF_F_GRO_HW;
+
if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
@@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
}
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM)) {
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+ features &= ~NETIF_F_GRO_HW;
+ if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- features &= ~NETIF_F_GRO;
- }
return features;
}
@@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
}
}
- /* if GRO is changed while LRO is enabled, don't force a reload */
- if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
- changes &= ~NETIF_F_GRO;
-
- /* if GRO is changed while HW TPA is off, don't force a reload */
- if ((changes & NETIF_F_GRO) && bp->disable_tpa)
- changes &= ~NETIF_F_GRO;
+ /* Don't care about GRO changes */
+ changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ddd5d3ebd201..7b08323e3f3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12409,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
}
if (CHIP_IS_E1(bp))
@@ -13282,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13318,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_LRO)
+ dev->features &= ~NETIF_F_GRO_HW;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61ca4eb7c6fa..1500243b9886 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
+ BCM5745x_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -147,6 +148,7 @@ static const struct {
[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
static const u16 bnxt_vf_req_snif[] = {
HWRM_FUNC_CFG,
+ HWRM_FUNC_VF_CFG,
HWRM_PORT_PHY_QCFG,
HWRM_CFA_L2_FILTER_ALLOC,
};
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = 1;
}
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
}
cons = rxcmp->rx_cmp_opaque;
@@ -1644,7 +1649,10 @@ next_rx:
rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
-next_rx_no_prod:
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
@@ -1706,12 +1714,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (BNXT_VF(bp))
goto async_event_process_exit;
- if (data1 & 0x20000) {
+
+ /* print unsupported speed warning in forced speed mode only */
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+ (data1 & 0x20000)) {
u16 fw_speed = link_info->force_link_speed;
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
- netdev_warn(bp->dev, "Link speed %d no longer supported\n",
- speed);
+ if (speed != SPEED_UNKNOWN)
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
@@ -1798,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
+ cpr->event_ctr++;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
@@ -2021,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
+ if (bp->flags & BNXT_FLAG_DIM) {
+ struct net_dim_sample dim_sample;
+
+ net_dim_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
mmiowb();
return work_done;
}
@@ -2243,6 +2265,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (rxr->xdp_prog)
bpf_prog_put(rxr->xdp_prog);
+ if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2276,6 +2301,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring = &rxr->rx_ring_struct;
+ rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+ if (rc < 0)
+ return rc;
+
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
@@ -2606,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
}
}
@@ -2751,7 +2782,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if (bp->dev->features & NETIF_F_GRO)
+ else if (bp->dev->features & NETIF_F_GRO_HW)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -2830,6 +2861,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
@@ -2839,10 +2873,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
bp->rx_dir = DMA_BIDIRECTIONAL;
bp->rx_skb_func = bnxt_rx_page_skb;
+ /* Disable LRO or GRO_HW */
+ netdev_update_features(bp->dev);
} else {
bp->dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -4469,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return -EIO;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ u16 cp, stats;
+
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ cp = min_t(u16, cp, stats);
+ hw_resc->resv_cp_rings = cp;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
@@ -4488,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_cfg_input req = {0};
+ u32 enables = 0;
int rc;
- if (bp->hwrm_spec_code < 0x10601)
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+ }
+ if (!enables)
return 0;
- if (BNXT_VF(bp))
+ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ bp->hw_resc.resv_tx_rings = tx_rings;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 enables = 0;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
- req.num_tx_rings = cpu_to_le16(*tx_rings);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+ int cp, int vnic)
+{
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ else
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int tx = bp->tx_nr_rings;
+ int rx = bp->rx_nr_rings;
+ int cp = bp->cp_nr_rings;
+ int grp, rx_rings, rc;
+ bool sh = false;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+
+ grp = bp->rx_nr_rings;
+ if (tx == hw_resc->resv_tx_rings &&
+ (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+ (rx == hw_resc->resv_rx_rings &&
+ grp == hw_resc->resv_hw_ring_grps &&
+ cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+ return 0;
+
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ if (rc)
return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
- mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc)
- bp->tx_reserved_rings = *tx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ rx = hw_resc->resv_rx_rings;
+ cp = hw_resc->resv_cp_rings;
+ grp = hw_resc->resv_hw_ring_grps;
+ vnic = hw_resc->resv_vnics;
+ }
+
+ rx_rings = rx;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (rx >= 2) {
+ rx_rings = rx >> 1;
+ } else {
+ if (netif_running(bp->dev))
+ return -ENOMEM;
+
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
+ }
+ rx_rings = min_t(int, rx_rings, grp);
+ rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx = rx_rings << 1;
+ cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ bp->tx_nr_rings = tx;
+ bp->rx_nr_rings = rx_rings;
+ bp->cp_nr_rings = cp;
+
+ if (!tx || !rx || !cp || !grp || !vnic)
+ return -ENOMEM;
+
return rc;
}
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
- struct hwrm_func_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rx = bp->rx_nr_rings;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return false;
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ return true;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (hw_resc->resv_rx_rings != rx ||
+ hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+ hw_resc->resv_vnics != vnic))
+ return true;
+ return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 flags, enables;
int rc;
- if (bp->hwrm_spec_code < 0x10801)
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
return 0;
- if (BNXT_VF(bp))
- return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ u32 flags, enables;
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+ enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ }
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+ ring_grps, cp_rings);
+
+ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings);
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
@@ -4579,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = cpu_to_le16(flags);
}
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal coal;
+ unsigned int grp_idx;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+ coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+ coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+ if (!bnapi->rx_ring)
+ return -ENODEV;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+ bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+ grp_idx = bnapi->index;
+ req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+ return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+ HWRM_CMD_TIMEOUT);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
@@ -4732,11 +5060,60 @@ func_qcfg_exit:
return rc;
}
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -EIO;
+ goto hwrm_func_resc_qcaps_exit;
+ }
+
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->vf_resv_strategy =
+ le16_to_cpu(resp->vf_reservation_strategy);
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+ }
+hwrm_func_resc_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -4746,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!hw_resc->max_hw_ring_grps)
+ hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4763,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!pf->max_hw_ring_grps)
- pf->max_hw_ring_grps = pf->max_tx_rings;
- pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- pf->max_vnics = le16_to_cpu(resp->max_vnics);
- pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4781,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
-
- vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!vf->max_hw_ring_grps)
- vf->max_hw_ring_grps = vf->max_tx_rings;
- vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- vf->max_vnics = le16_to_cpu(resp->max_vnics);
- vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
@@ -4810,6 +5175,21 @@ hwrm_func_qcaps_exit:
return rc;
}
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -4879,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
- bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
- resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
- if (resp->hwrm_intf_maj < 1) {
+ bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ if (resp->hwrm_intf_maj_8b < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd);
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
- resp->hwrm_fw_rsvd);
+ resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+ resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5031,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
return rc;
}
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+ req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+ if (size == 128)
+ req.cache_linesize =
+ FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5166,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
- if (bp->tx_reserved_rings != bp->tx_nr_rings) {
- int tx = bp->tx_nr_rings;
-
- if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
- tx < bp->tx_nr_rings) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
}
rc = bnxt_hwrm_ring_alloc(bp);
@@ -5394,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_rsscos_ctxs;
-#endif
- return bp->pf.max_rsscos_ctxs;
+ return bp->hw_resc.max_rsscos_ctxs;
}
static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_vnics;
-#endif
- return bp->pf.max_vnics;
+ return bp->hw_resc.max_vnics;
}
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_stat_ctxs;
-#endif
- return bp->pf.max_stat_ctxs;
+ return bp->hw_resc.max_stat_ctxs;
}
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_stat_ctxs = max;
- else
-#endif
- bp->pf.max_stat_ctxs = max;
+ bp->hw_resc.max_stat_ctxs = max;
}
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_cp_rings;
-#endif
- return bp->pf.max_cp_rings;
+ return bp->hw_resc.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_cp_rings = max;
- else
-#endif
- bp->pf.max_cp_rings = max;
+ bp->hw_resc.max_cp_rings = max;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return min_t(unsigned int, bp->vf.max_irqs,
- bp->vf.max_cp_rings);
-#endif
- return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_irqs = max_irqs;
- else
-#endif
- bp->pf.max_irqs = max_irqs;
+ bp->hw_resc.max_irqs = max_irqs;
}
static int bnxt_init_msix(struct bnxt *bp)
@@ -5567,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+ int orig_cp = bp->hw_resc.resv_cp_rings;
+ int tcs = netdev_get_num_tc(bp->dev);
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+ rc = __bnxt_reserve_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+ return rc;
+ }
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ }
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ netdev_err(bp->dev, "tx ring reservation failure\n");
+ netdev_reset_tc(bp->dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ return -ENOMEM;
+ }
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ return 0;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -5692,8 +6082,14 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++)
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (bp->bnapi[i]->rx_ring)
+ cancel_work_sync(&cpr->dim.work);
+
napi_disable(&bp->bnapi[i]->napi);
+ }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -5701,7 +6097,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
bp->bnapi[i]->in_reset = false;
+
+ if (bp->bnapi[i]->rx_ring) {
+ INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+ cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -6311,6 +6713,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_setup_int_mode(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6446,23 +6852,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init)
{
- int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
- if (bp->sriov_cfg) {
- rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
- !bp->sriov_cfg,
- BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
- }
-
/* Close the VF-reps before closing PF */
if (BNXT_PF(bp))
bnxt_vf_reps_close(bp);
-#endif
+
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
@@ -6485,6 +6881,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_del_napi(bp);
}
bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ __bnxt_close_nic(bp, irq_re_init, link_re_init);
return rc;
}
@@ -6764,13 +7176,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
- netdev_warn(bp->dev,
- "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(max_rss_ctxs - 1, max_vnics - 1));
+ if (bp->rx_nr_rings > 1)
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
- return true;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return true;
+
+ if (vnics == bp->hw_resc.resv_vnics)
+ return true;
+
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ if (vnics <= bp->hw_resc.resv_vnics)
+ return true;
+
+ netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ return false;
#else
return false;
#endif
@@ -6784,6 +7209,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+ if (!(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ if (features & NETIF_F_GRO_HW)
+ features &= ~NETIF_F_LRO;
+
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
@@ -6817,9 +7251,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
- if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (features & NETIF_F_GRO_HW)
flags |= BNXT_FLAG_GRO;
- if (features & NETIF_F_LRO)
+ else if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7096,7 +7530,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
- int rc;
+ int rx_rings = rx;
+ int cp, rc;
if (tcs)
tx_sets = tcs;
@@ -7112,7 +7547,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
- return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7346,7 +7784,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct bnxt *bp = cb_priv;
- if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -7717,12 +8156,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- /* In SRIOV each PF-pool (PF + child VFs) serves as a
- * switching domain, the PF's perm mac-addr can be used
- * as the unique parent-id
- */
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+ attr->u.ppid.id_len = sizeof(bp->switch_id);
+ memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
break;
default:
return -EOPNOTSUPP;
@@ -7800,8 +8235,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
- if (bp->xdp_prog)
- bpf_prog_put(bp->xdp_prog);
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -7869,24 +8302,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp)
{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_ring_grps = 0;
-#ifdef CONFIG_BNXT_SRIOV
- if (!BNXT_PF(bp)) {
- *max_tx = bp->vf.max_tx_rings;
- *max_rx = bp->vf.max_rx_rings;
- *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
- max_ring_grps = bp->vf.max_hw_ring_grps;
- } else
-#endif
- {
- *max_tx = bp->pf.max_tx_rings;
- *max_rx = bp->pf.max_rx_rings;
- *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
- max_ring_grps = bp->pf.max_hw_ring_grps;
- }
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
*max_cp -= 1;
*max_rx -= 2;
@@ -7922,8 +8345,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
if (rc)
return rc;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bnxt_set_ring_params(bp);
}
@@ -7951,6 +8374,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
return rc;
}
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+ bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+ bp->rx_nr_rings = bp->cp_nr_rings;
+ bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -7966,14 +8400,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
+ else
+ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ rc = __bnxt_reserve_rings(bp);
if (rc)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ /* Rings may have been trimmed, re-reserve the trimmed rings. */
+ if (bnxt_need_reserve_rings(bp)) {
+ rc = __bnxt_reserve_rings(bp);
+ if (rc)
+ netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ }
bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -7982,11 +8428,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
+ int rc;
+
ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return 0;
+
bnxt_hwrm_func_qcaps(bp);
- bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+ __bnxt_close_nic(bp, true, false);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ dev_close(bp->dev);
+ else
+ rc = bnxt_open_nic(bp, true, false);
+ return rc;
}
static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8000,7 +8458,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
+ /* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
@@ -8106,7 +8564,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_GRO_HW)
+ dev->features &= ~NETIF_F_LRO;
dev->priv_flags |= IFF_UNICAST_FLT;
#ifdef CONFIG_BNXT_SRIOV
@@ -8208,6 +8670,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f0045f..1989c470172c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_VERSION "1.9.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 8
+#define DRV_VER_MIN 9
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
@@ -23,6 +23,8 @@
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -607,6 +609,17 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
struct bnxt_tpa_info {
void *data;
u8 *data_ptr;
@@ -664,12 +677,20 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
+ struct xdp_rxq_info xdp_rxq;
};
struct bnxt_cp_ring_info {
u32 cp_raw_cons;
void __iomem *cp_doorbell;
+ struct bnxt_coal rx_ring_coal;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 event_ctr;
+
+ struct net_dim dim;
+
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -755,19 +776,38 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
-#if defined(CONFIG_BNXT_SRIOV)
-struct bnxt_vf_info {
- u16 fw_fid;
- u8 mac_addr[ETH_ALEN];
+struct bnxt_hw_resc {
+ u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 min_cp_rings;
u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 min_rx_rings;
u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
u16 max_l2_ctxs;
- u16 max_irqs;
+ u16 min_vnics;
u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 max_irqs;
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */
+ u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only
+ * stored by PF.
+ */
u16 vlan;
u32 flags;
#define BNXT_VF_QOS 0x1
@@ -788,15 +828,6 @@ struct bnxt_pf_info {
u16 fw_fid;
u16 port_id;
u8 mac_addr[ETH_ALEN];
- u16 max_rsscos_ctxs;
- u16 max_cp_rings;
- u16 max_tx_rings; /* HW assigned max tx rings for this PF */
- u16 max_rx_rings; /* HW assigned max rx rings for this PF */
- u16 max_hw_ring_grps;
- u16 max_irqs;
- u16 max_l2_ctxs;
- u16 max_vnics;
- u16 max_stat_ctxs;
u32 first_vf_id;
u16 active_vfs;
u16 max_vfs;
@@ -808,6 +839,9 @@ struct bnxt_pf_info {
u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
+ u8 vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -944,17 +978,6 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
-struct bnxt_coal {
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
- /* RING_IDLE enabled when coal ticks < idle_thresh */
- u16 idle_thresh;
- u8 bufs_per_record;
- u8 budget;
-};
-
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1126,6 +1149,8 @@ struct bnxt {
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+ #define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1185,7 +1210,6 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
- int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1297,6 +1321,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+ struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
@@ -1346,6 +1371,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
+ u8 switch_id[8];
struct bnxt_tc_info *tc_info;
};
@@ -1421,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fed37cd9ae1d..3c746f2d9ed8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
- memset(data, 0, data_len);
bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
get.dest_data_addr = cpu_to_le64(mapping);
get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct bnxt_cp_ring_info *cpr = container_of(dim,
+ struct bnxt_cp_ring_info,
+ dim);
+ struct bnxt_napi *bnapi = container_of(cpr,
+ struct bnxt_napi,
+ cp_ring);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+ bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b13ce5ebde8d..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
+ coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
int rc = 0;
u16 mult;
+ if (coal->use_adaptive_rx_coalesce) {
+ bp->flags |= BNXT_FLAG_DIM;
+ } else {
+ if (bp->flags & BNXT_FLAG_DIM) {
+ bp->flags &= ~(BNXT_FLAG_DIM);
+ goto reset_coalesce;
+ }
+ }
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
update_stats = true;
}
+reset_coalesce:
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
@@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev,
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
break;
+ case BNXT_FW_RESET_AP:
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ break;
default:
return -EINVAL;
}
@@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc)
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ } else if (*flags == ETH_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10803)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+ if (!rc)
+ netdev_info(dev, "Reset Application Processor request successful.\n");
} else {
rc = -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ff601b42fcc8..836ef682f24c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,7 @@ struct bnxt_led_cfg {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+#define BNXT_FW_RESET_AP 0xfffe
#define BNXT_FW_RESET_CHIP 0xffff
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0880e4..82d17f8cc0db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 3
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
-#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
-#define HWRM_VERSION_STR "1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_ENGINE_CKV_HELLO 0x12dUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ u8 reserved2[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- __le16 len;
- __le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
- __le32 unused_2;
-};
-
-/* HWRM Completion Record (16 bytes) */
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* HWRM Forwarded Request (16 bytes) */
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused_0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* HWRM Forwarded Response (16 bytes) */
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
-};
-
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
-};
-
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 hwrm_intf_rsvd;
- u8 hwrm_fw_maj;
- u8 hwrm_fw_min;
- u8 hwrm_fw_bld;
- u8 hwrm_fw_rsvd;
- u8 mgmt_fw_maj;
- u8 mgmt_fw_min;
- u8 mgmt_fw_bld;
- u8 mgmt_fw_rsvd;
- u8 netctrl_fw_maj;
- u8 netctrl_fw_min;
- u8 netctrl_fw_bld;
- u8 netctrl_fw_rsvd;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- u8 roce_fw_maj;
- u8 roce_fw_min;
- u8 roce_fw_bld;
- u8 roce_fw_rsvd;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- __le32 reserved2[4];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 init_pending;
- #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_0;
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_2;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_3;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- __le32 unused_4;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 enables;
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+ __le16 stag_vid;
+ u8 stag_pcp;
+ u8 unused_1;
+ __be16 stag_tpid;
+ __le16 ctag_vid;
+ u8 ctag_pcp;
+ u8 unused_2;
+ __be16 ctag_tpid;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 unused_3[4];
+};
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- __le16 mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- u8 unused_2;
- __le16 num_mcast_filters;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 num_mcast_filters;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-/* Output (16 bytes) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0;
- u8 unused_1;
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[2];
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- __le16 unused_1;
- __le32 timestamp;
- __le32 unused_2;
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- __le32 unused_0;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0;
- u8 unused_1;
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- __le16 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2;
- u8 unused_3;
- __le32 tx_lpi_timer;
- __le32 unused_4;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2[2];
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le32 unused_3;
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- u8 unused_0;
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
- #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- u8 unused_1;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- u8 unused_0;
- __le16 unused_1;
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_error;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
-};
-
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 unused_0;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* Output (80 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused_0[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- u8 led0_group_id;
- u8 unused_1;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- u8 led1_group_id;
- u8 unused_2;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- u8 led2_group_id;
- u8 unused_3;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- u8 led3_group_id;
- u8 unused_4;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- __le16 unused_0;
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 valid;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 valid;
};
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- __le16 port_id;
- __le16 unused_0;
+ __le16 port_id;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_1[5];
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
};
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
};
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
};
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- u8 unused_0;
- u8 unused_1;
- __le32 max_agg_timer;
- __le32 min_agg_len;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- __le32 unused_0;
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- __le16 unused_1[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ u8 unused_0[4];
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 unused_1[6];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 unused_1;
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- u8 unused_2;
- u8 unused_3;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- u8 unused_4;
- u8 unused_5;
- __le32 reserved1;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- u8 unused_6;
- u8 unused_7;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0[3];
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_1[2];
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_2[2];
+ __le32 reserved1;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 unused_3;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- u8 unused_8[3];
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 unused_4[3];
};
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[6];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- u8 l2_addr[6];
- u8 unused_0;
- u8 unused_1;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_2;
- u8 unused_3;
- u8 t_l2_addr[6];
- u8 unused_4;
- u8 unused_5;
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- u8 unused_6;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_7;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- u8 unused_8;
- __le32 unused_9;
- __le64 l2_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0[2];
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- __le32 unused_0;
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
};
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- u8 unused_0;
- __le16 unused_1;
- __le32 encap_data[20];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 unused_0;
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ u8 unused_0[4];
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- __le16 unused_1[3];
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2;
- u8 unused_3;
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- u8 unused_4;
- u8 unused_5;
- u8 unused_6[3];
- u8 unused_7;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 unused_0;
+};
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- __le16 unused_0;
-};
-
-/* Output (176 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- __le32 unused_0;
- char vfr_name[32];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __be16 tunnel_dst_port_val;
- __be32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __le16 tunnel_dst_port_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 host_idx;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ u8 host_idx;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- u8 count;
- u8 unused_0;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ u8 count;
+ u8 unused_0;
};
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- u8 unused_0;
- __le32 unused_1;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- __le32 unused_2;
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
};
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- __le32 unused_0;
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3[3];
- u8 unused_4;
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- __le16 unused_5[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- __le16 unused_4[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- u8 wol_pkt_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
};
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:384b/48B) */
struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- __le32 dir_item_length;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ __le32 dir_item_length;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
};
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- u8 unused_1[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- __le16 unused_0;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ u8 unused_0[7];
};
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ u8 unused_0[7];
};
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1;
- u8 unused_2;
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
- __le32 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 unused_6;
- u8 valid;
-};
-
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 pcie_lane_num;
+ u8 unused_0[6];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 resp_data_addr;
- __le32 resp_data_offset;
- __le16 data_len;
- u8 flags;
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 total_data_len;
- __le16 copied_data_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- __le16 unused_0;
- __le16 size;
- __le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_PORT_LED_CFG (0x2dUL)
- #define HWRM_PORT_LED_QCFG (0x2eUL)
- #define HWRM_PORT_LED_QCAPS (0x2fUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_FUNC_VLAN_CFG (0x33UL)
- #define HWRM_FUNC_VLAN_QCFG (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
- #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
- #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
- #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
- #define HWRM_FW_IPC_MAILBOX (0xccUL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
- #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
- #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
- #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
- #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
- #define HWRM_CFA_VFR_ALLOC (0xfdUL)
- #define HWRM_CFA_VFR_FREE (0xfeUL)
- #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
- #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
- #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
- #define HWRM_CFA_FLOW_ALLOC (0x103UL)
- #define HWRM_CFA_FLOW_FREE (0x104UL)
- #define HWRM_CFA_FLOW_FLUSH (0x105UL)
- #define HWRM_CFA_FLOW_STATS (0x106UL)
- #define HWRM_CFA_FLOW_INFO (0x107UL)
- #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
- #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
- #define HWRM_CFA_PAIR_ALLOC (0x10dUL)
- #define HWRM_CFA_PAIR_FREE (0x10eUL)
- #define HWRM_CFA_PAIR_INFO (0x10fUL)
- #define HWRM_FW_IPC_MSG (0x110UL)
- #define HWRM_SELFTEST_QLIST (0x200UL)
- #define HWRM_SELFTEST_EXEC (0x201UL)
- #define HWRM_SELFTEST_IRQ (0x202UL)
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_DBG_ERASE_NVM (0xff15UL)
- #define HWRM_DBG_CFG (0xff16UL)
- #define HWRM_DBG_COREDUMP_LIST (0xff17UL)
- #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
- #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
- #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
- #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
- #define HWRM_NVM_FLUSH (0xfff0UL)
- #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
- #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- __le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- u8 priority;
- u8 valid;
- u8 unused_0[3];
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..d87faad901fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->vf = vf_id;
vf = &bp->pf.vf[vf_id];
- memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ if (is_valid_ether_addr(vf->mac_addr))
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ else
+ memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_vf_resource_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+ u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int i, rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+ vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+ else
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+ req.min_rsscos_ctx = cpu_to_le16(1);
+ req.max_rsscos_ctx = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+ req.min_cmpl_rings = cpu_to_le16(1);
+ req.min_tx_rings = cpu_to_le16(1);
+ req.min_rx_rings = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_vnics = cpu_to_le16(1);
+ req.min_stat_ctx = cpu_to_le16(1);
+ req.min_hw_ring_grps = cpu_to_le16(1);
+ } else {
+ vf_cp_rings /= num_vfs;
+ vf_tx_rings /= num_vfs;
+ vf_rx_rings /= num_vfs;
+ vf_vnics /= num_vfs;
+ vf_stat_ctx /= num_vfs;
+ vf_ring_grps /= num_vfs;
+
+ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_vnics = cpu_to_le16(vf_vnics);
+ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ }
+ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_vnics = cpu_to_le16(vf_vnics);
+ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -ENOMEM;
+ break;
+ }
+ pf->active_vfs = i + 1;
+ pf->vf[i].fw_fid = pf->first_vf_id + i;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (pf->active_vfs) {
+ u16 n = 1;
+
+ if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+ n = pf->active_vfs;
+
+ hw_resc->max_tx_rings -= vf_tx_rings * n;
+ hw_resc->max_rx_rings -= vf_rx_rings * n;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+ hw_resc->max_cp_rings -= vf_cp_rings * n;
+ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+ hw_resc->max_vnics -= vf_vnics * n;
+
+ rc = pf->active_vfs;
+ }
+ return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
- u16 vf_ring_grps;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ max_stat_ctxs = hw_resc->max_stat_ctxs;
+
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
else
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
- vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
- vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
- vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+ num_vfs;
+ vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+ vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc) {
- pf->max_tx_rings -= total_vf_tx_rings;
- pf->max_rx_rings -= vf_rx_rings * num_vfs;
- pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
- pf->max_cp_rings -= vf_cp_rings * num_vfs;
- pf->max_rsscos_ctxs -= num_vfs;
- pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
- pf->max_vnics -= vf_vnics * num_vfs;
+ if (rc)
+ rc = -ENOMEM;
+ if (pf->active_vfs) {
+ hw_resc->max_tx_rings -= total_vf_tx_rings;
+ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+ hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+ hw_resc->max_rsscos_ctxs -= num_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ hw_resc->max_vnics -= vf_vnics * num_vfs;
+ rc = pf->active_vfs;
}
return rc;
}
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ else
+ return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
- avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rss_ctxs = vfs_supported;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
min_rx_rings)
rx_ok = 1;
} else {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
avail_cp >= min_tx_rings)
tx_ok = 1;
- if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+ min_rss_ctxs)
rss_ok = 1;
if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1;
/* Reserve resources for VFs */
- rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
- if (rc)
- goto err_out2;
+ rc = bnxt_func_cfg(bp, *num_vfs);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ goto err_out2;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+ *num_vfs = rc;
+ }
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ exec_fwd_resp_exit:
return rc;
}
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+ struct hwrm_func_vf_cfg_input *req =
+ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+ /* Only allow VF to set a valid MAC address if the PF assigned MAC
+ * address is zero
+ */
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+ !is_valid_ether_addr(vf->mac_addr)) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
struct hwrm_cfa_l2_filter_alloc_input *req =
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+ bool mac_ok = false;
- if (!is_valid_ether_addr(vf->mac_addr) ||
- ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ /* VF MAC address must first match PF MAC address, if it is valid.
+ * Otherwise, it must match the VF MAC address if firmware spec >=
+ * 1.2.2
+ */
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+ mac_ok = true;
+ } else if (bp->hwrm_spec_code < 0x10202) {
+ mac_ok = true;
+ } else {
+ mac_ok = true;
+ }
+ if (mac_ok)
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
- else
- return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
}
static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
+ case HWRM_FUNC_VF_CFG:
+ rc = bnxt_vf_store_mac(bp, vf);
+ break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 3d201d7324bd..fbe6e208e17b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
}
/* Is dev a VF-rep? */
- if (dev != pf_bp->dev)
+ if (bnxt_dev_is_vf_rep(dev))
return bnxt_vf_rep_get_fid(dev);
bp = netdev_priv(dev);
@@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
- int ifindex = tcf_mirred_ifindex(tc_act);
- struct net_device *dev;
+ struct net_device *dev = tcf_mirred_dev(tc_act);
- dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
- netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+ netdev_info(bp->dev, "no dev in mirred action");
return -EINVAL;
}
@@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- if (rc)
- return rc;
-
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- return rc;
+ return 0;
}
#define GET_KEY(flow_cmd, key_type) \
@@ -421,7 +416,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
- if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+ if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
@@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
void *flow_node;
int rc, i;
- rc = rhashtable_walk_start(iter);
- if (rc && rc != -EAGAIN) {
- i = 0;
- goto done;
- }
+ rhashtable_walk_start(iter);
rc = 0;
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
@@ -1483,9 +1474,6 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
{
int rc = 0;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d188c43..26290403f38f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -124,7 +124,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
- if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -241,6 +242,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
* happens under the rtnl_lock() this routine is safe
@@ -376,6 +382,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
ether_addr_copy(dev->dev_addr, dev->perm_addr);
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +466,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto err;
+
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe70e42..38b9a75ad724 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return false;
+}
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e5847557a..1389ab5e05df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp.data = *data_ptr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
+ xdp.rxq = &rxr->xdp_rxq;
orig_data = xdp.data;
mapping = rx_buf->mapping - bp->rx_dma_offset;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24b4f4ceceef..b1e35a9accf1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT)
- phy_mac_interrupt(priv->dev->phydev,
- !!(status & UMAC_IRQ_LINK_UP));
+ if (status & UMAC_IRQ_LINK_EVENT) {
+ priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ phy_mac_interrupt(priv->dev->phydev);
+ }
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8995cfefbfcf..a77ee2f8fb8d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3227,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -14789,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
static void tg3_get_5720_nvram_info(struct tg3 *tp)
{
- u32 nvcfg1, nvmpinstrp;
+ u32 nvcfg1, nvmpinstrp, nv_status;
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14801,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
}
switch (nvmpinstrp) {
+ case FLASH_5762_MX25L_100:
+ case FLASH_5762_MX25L_200:
+ case FLASH_5762_MX25L_400:
+ case FLASH_5762_MX25L_800:
+ case FLASH_5762_MX25L_160_320:
+ tp->nvram_pagesize = 4096;
+ tp->nvram_jedecnum = JEDEC_MACRONIX;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ tg3_flag_set(tp, FLASH);
+ nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+ tp->nvram_size =
+ (1 << (nv_status >> AUTOSENSE_DEVID &
+ AUTOSENSE_DEVID_MASK)
+ << AUTOSENSE_SIZE_IN_MB);
+ return;
+
case FLASH_5762_EEPROM_HD:
nvmpinstrp = FLASH_5720_EEPROM_HD;
break;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1f0271fa7c74..47f51cc0566d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1863,7 +1863,7 @@
#define NVRAM_STAT 0x00007004
#define NVRAM_WRDATA 0x00007008
#define NVRAM_ADDR 0x0000700c
-#define NVRAM_ADDR_MSK 0x00ffffff
+#define NVRAM_ADDR_MSK 0x07ffffff
#define NVRAM_RDDATA 0x00007010
#define NVRAM_CFG1 0x00007014
#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
@@ -1945,6 +1945,11 @@
#define FLASH_5720_EEPROM_LD 0x00000003
#define FLASH_5762_EEPROM_HD 0x02000001
#define FLASH_5762_EEPROM_LD 0x02000003
+#define FLASH_5762_MX25L_100 0x00800000
+#define FLASH_5762_MX25L_200 0x00800002
+#define FLASH_5762_MX25L_400 0x00800001
+#define FLASH_5762_MX25L_800 0x00800003
+#define FLASH_5762_MX25L_160_320 0x03800002
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2009,7 +2014,11 @@
/* 0x702c unused */
#define NVRAM_ADDR_LOCKOUT 0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS 0x00007038
+#define AUTOSENSE_DEVID 0x00000010
+#define AUTOSENSE_DEVID_MASK 0x00000007
+#define AUTOSENSE_SIZE_IN_MB 17
+/* 0x703c --> 0x7500 unused */
#define OTP_MODE 0x00007500
#define OTP_MODE_OTP_THRU_GRC 0x00000001
@@ -3378,6 +3387,7 @@ struct tg3 {
#define JEDEC_ST 0x20
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
+#define JEDEC_MACRONIX 0xc2
#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
#define ATMEL_AT24C02_PAGE_SIZE (8)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c93f3a2dc6c1..86659823b259 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/interrupt.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -164,14 +165,38 @@
#define GEM_DCFG5 0x0290 /* Design Config 5 */
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
+#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
+/* Screener Type 2 match registers */
+#define GEM_SCRT2 0x540
+
+/* EtherType registers */
+#define GEM_ETHT 0x06E0
+
+/* Type 2 compare registers */
+#define GEM_T2CMPW0 0x0700
+#define GEM_T2CMPW1 0x0704
+#define T2CMP_OFST(t2idx) (t2idx * 2)
+
+/* type 2 compare registers
+ * each location requires 3 compare regs
+ */
+#define GEM_IP4SRC_CMP(idx) (idx * 3)
+#define GEM_IP4DST_CMP(idx) (idx * 3 + 1)
+#define GEM_PORT_CMP(idx) (idx * 3 + 2)
+
+/* Which screening type 2 EtherType register will be used (0 - 7) */
+#define SCRT2_ETHT 0
+
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
+#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@@ -455,6 +480,16 @@
#define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1
+/* Bitfields in DCFG8. */
+#define GEM_T1SCR_OFFSET 24
+#define GEM_T1SCR_SIZE 8
+#define GEM_T2SCR_OFFSET 16
+#define GEM_T2SCR_SIZE 8
+#define GEM_SCR2ETH_OFFSET 8
+#define GEM_SCR2ETH_SIZE 8
+#define GEM_SCR2CMP_OFFSET 0
+#define GEM_SCR2CMP_SIZE 8
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16
@@ -483,6 +518,66 @@
#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
#define GEM_RXTSMODE_SIZE 2
+/* Bitfields in SCRT2 */
+#define GEM_QUEUE_OFFSET 0 /* Queue Number */
+#define GEM_QUEUE_SIZE 4
+#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */
+#define GEM_VLANPR_SIZE 3
+#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */
+#define GEM_VLANEN_SIZE 1
+#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */
+#define GEM_ETHT2IDX_SIZE 3
+#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */
+#define GEM_ETHTEN_SIZE 1
+#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */
+#define GEM_CMPA_SIZE 5
+#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */
+#define GEM_CMPAEN_SIZE 1
+#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */
+#define GEM_CMPB_SIZE 5
+#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */
+#define GEM_CMPBEN_SIZE 1
+#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */
+#define GEM_CMPC_SIZE 5
+#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */
+#define GEM_CMPCEN_SIZE 1
+
+/* Bitfields in ETHT */
+#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */
+#define GEM_ETHTCMP_SIZE 16
+
+/* Bitfields in T2CMPW0 */
+#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */
+#define GEM_T2CMP_SIZE 16
+#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */
+#define GEM_T2MASK_SIZE 16
+
+/* Bitfields in T2CMPW1 */
+#define GEM_T2DISMSK_OFFSET 9 /* disable mask */
+#define GEM_T2DISMSK_SIZE 1
+#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */
+#define GEM_T2CMPOFST_SIZE 2
+#define GEM_T2OFST_OFFSET 0 /* offset value */
+#define GEM_T2OFST_SIZE 7
+
+/* Offset for screener type 2 compare values (T2CMPOFST).
+ * Note the offset is applied after the specified point,
+ * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
+ * of 12 bytes from this would be the source IP address in an IP header
+ */
+#define GEM_T2COMPOFST_SOF 0
+#define GEM_T2COMPOFST_ETYPE 1
+#define GEM_T2COMPOFST_IPHDR 2
+#define GEM_T2COMPOFST_TCPUDP 3
+
+/* offset from EtherType to IP address */
+#define ETYPE_SRCIP_OFFSET 12
+#define ETYPE_DSTIP_OFFSET 16
+
+/* offset from IP header to port */
+#define IPHDR_SRCPORT_OFFSET 0
+#define IPHDR_DSTPORT_OFFSET 2
+
/* Transmit DMA buffer descriptor Word 1 */
#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
#define GEM_DMA_TXVALID_SIZE 1
@@ -583,6 +678,8 @@
#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
+#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
@@ -920,13 +1017,42 @@ static const struct gem_statistic gem_statistics[] = {
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+#define QUEUE_STAT_TITLE(title) { \
+ .stat_string = title, \
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+ union {
+ unsigned long first;
+ unsigned long rx_packets;
+ };
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+ QUEUE_STAT_TITLE("rx_packets"),
+ QUEUE_STAT_TITLE("rx_bytes"),
+ QUEUE_STAT_TITLE("rx_dropped"),
+ QUEUE_STAT_TITLE("tx_packets"),
+ QUEUE_STAT_TITLE("tx_bytes"),
+ QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
struct macb;
+struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp);
- int (*mog_rx)(struct macb *bp, int budget);
+ int (*mog_rx)(struct macb_queue *queue, int budget);
};
/* MACB-PTP interface: adapt to platform needs. */
@@ -968,6 +1094,9 @@ struct macb_queue {
unsigned int IMR;
unsigned int TBQP;
unsigned int TBQPH;
+ unsigned int RBQS;
+ unsigned int RBQP;
+ unsigned int RBQPH;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
@@ -975,6 +1104,16 @@ struct macb_queue {
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+ unsigned int rx_tail;
+ unsigned int rx_prepared_head;
+ struct macb_dma_desc *rx_ring;
+ struct sk_buff **rx_skbuff;
+ void *rx_buffers;
+ struct napi_struct napi;
+ struct queue_stats stats;
+
#ifdef CONFIG_MACB_USE_HWSTAMP
struct work_struct tx_ts_task;
unsigned int tx_ts_head, tx_ts_tail;
@@ -982,6 +1121,16 @@ struct macb_queue {
#endif
};
+struct ethtool_rx_fs_item {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_fs_list {
+ struct list_head list;
+ unsigned int count;
+};
+
struct macb {
void __iomem *regs;
bool native_io;
@@ -990,11 +1139,6 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
- unsigned int rx_tail;
- unsigned int rx_prepared_head;
- struct macb_dma_desc *rx_ring;
- struct sk_buff **rx_skbuff;
- void *rx_buffers;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1011,15 +1155,11 @@ struct macb {
struct clk *tx_clk;
struct clk *rx_clk;
struct net_device *dev;
- struct napi_struct napi;
union {
struct macb_stats macb;
struct gem_stats gem;
} hw_stats;
- dma_addr_t rx_ring_dma;
- dma_addr_t rx_buffers_dma;
-
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1032,7 +1172,6 @@ struct macb {
unsigned int dma_burst_length;
phy_interface_t phy_interface;
- struct gpio_desc *reset_gpio;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
@@ -1040,7 +1179,7 @@ struct macb {
int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length;
- u64 ethtool_stats[GEM_STATS_LEN];
+ u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
unsigned int rx_frm_len_mask;
unsigned int jumbo_max_len;
@@ -1057,6 +1196,13 @@ struct macb {
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
struct hwtstamp_config tstamp_config;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_fs_list rx_fs_list;
+ spinlock_t rx_fs_lock;
+ unsigned int max_tuples;
+
+ struct tasklet_struct hresp_err_tasklet;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f74b97b..e84afcf1ecb5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
return index & (bp->rx_ring_size - 1);
}
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
{
- index = macb_rx_ring_wrap(bp, index);
- index = macb_adj_dma_desc_idx(bp, index);
- return &bp->rx_ring[index];
+ index = macb_rx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->rx_ring[index];
}
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
{
- return bp->rx_buffers + bp->rx_buffer_size *
- macb_rx_ring_wrap(bp, index);
+ return queue->rx_buffers + queue->bp->rx_buffer_size *
+ macb_rx_ring_wrap(queue->bp, index);
}
/* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netif_wake_subqueue(bp->dev, queue_index);
}
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
- bp->rx_ring_size) > 0) {
- entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+ while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+ bp->rx_ring_size) > 0) {
+ entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
rmb();
- bp->rx_prepared_head++;
- desc = macb_rx_desc(bp, entry);
+ queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
- if (!bp->rx_skbuff[entry]) {
+ if (!queue->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
break;
}
- bp->rx_skbuff[entry] = skb;
+ queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
/* Make descriptor updates visible to hardware */
wmb();
- netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+ queue, queue->rx_prepared_head, queue->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
unsigned int end)
{
unsigned int frag;
for (frag = begin; frag != end; frag++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
*/
}
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
unsigned int len;
unsigned int entry;
struct sk_buff *skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr;
bool rxused;
- entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = macb_rx_desc(bp, entry);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
if (!rxused)
break;
- bp->rx_tail++;
+ queue->rx_tail++;
count++;
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
- skb = bp->rx_skbuff[entry];
+ skb = queue->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
- bp->rx_skbuff[entry] = NULL;
+ queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ queue->stats.rx_bytes += skb->len;
gem_ptp_do_rxstamp(bp, skb, desc);
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
netif_receive_skb(skb);
}
- gem_rx_refill(bp);
+ gem_rx_refill(queue);
return count;
}
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
unsigned int last_frag)
{
unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int offset;
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb *bp = queue->bp;
- desc = macb_rx_desc(bp, last_frag);
+ desc = macb_rx_desc(queue, last_frag);
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
if (!skb) {
bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag),
+ macb_rx_buffer(queue, frag),
frag_len);
offset += bp->rx_buffer_size;
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
{
+ struct macb *bp = queue->bp;
dma_addr_t addr;
struct macb_dma_desc *desc = NULL;
int i;
- addr = bp->rx_buffers_dma;
+ addr = queue->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- desc = macb_rx_desc(bp, i);
+ desc = macb_rx_desc(queue, i);
macb_set_addr(bp, desc, addr);
desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
desc->addr |= MACB_BIT(RX_WRAP);
- bp->rx_tail = 0;
+ queue->rx_tail = 0;
}
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
- for (tail = bp->rx_tail; budget > 0; tail++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+ for (tail = queue->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
u32 ctrl;
/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
- discard_partial_frame(bp, first_frag, tail);
+ discard_partial_frame(queue, first_frag, tail);
first_frag = tail;
}
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
continue;
}
- dropped = macb_rx_frame(bp, first_frag, tail);
+ dropped = macb_rx_frame(queue, first_frag, tail);
first_frag = -1;
if (unlikely(dropped < 0)) {
reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
- macb_init_rx_ring(bp);
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_init_rx_ring(queue);
+ queue_writel(queue, RBQP, queue->rx_ring_dma);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
}
if (first_frag != -1)
- bp->rx_tail = first_frag;
+ queue->rx_tail = first_frag;
else
- bp->rx_tail = tail;
+ queue->rx_tail = tail;
return received;
}
static int macb_poll(struct napi_struct *napi, int budget)
{
- struct macb *bp = container_of(napi, struct macb, napi);
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+ struct macb *bp = queue->bp;
int work_done;
u32 status;
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ work_done = bp->macbgem_ops.mog_rx(queue, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_RX_INT_FLAGS);
}
}
@@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_hresp_error_task(unsigned long data)
+{
+ struct macb *bp = (struct macb *)data;
+ struct net_device *dev = bp->dev;
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+
+ /* Initialize TX and RX buffers */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+ macb_writel(bp, NCR, ctrl);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&queue->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&bp->napi);
+ __napi_schedule(&queue->napi);
}
}
@@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /* TODO: Reset the hardware, and maybe move the
- * netdev_err to a lower-priority context as well
- * (work queue?)
- */
+ tasklet_schedule(&bp->hresp_err_tasklet);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp)
{
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb_queue *queue;
dma_addr_t addr;
+ unsigned int q;
int i;
- if (!bp->rx_skbuff)
- return;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (!queue->rx_skbuff)
+ continue;
- for (i = 0; i < bp->rx_ring_size; i++) {
- skb = bp->rx_skbuff[i];
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ skb = queue->rx_skbuff[i];
- if (!skb)
- continue;
+ if (!skb)
+ continue;
- desc = macb_rx_desc(bp, i);
- addr = macb_get_addr(bp, desc);
+ desc = macb_rx_desc(queue, i);
+ addr = macb_get_addr(bp, desc);
- dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- skb = NULL;
- }
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
- kfree(bp->rx_skbuff);
- bp->rx_skbuff = NULL;
+ kfree(queue->rx_skbuff);
+ queue->rx_skbuff = NULL;
+ }
}
static void macb_free_rx_buffers(struct macb *bp)
{
- if (bp->rx_buffers) {
+ struct macb_queue *queue = &bp->queues[0];
+
+ if (queue->rx_buffers) {
dma_free_coherent(&bp->pdev->dev,
bp->rx_ring_size * bp->rx_buffer_size,
- bp->rx_buffers, bp->rx_buffers_dma);
- bp->rx_buffers = NULL;
+ queue->rx_buffers, queue->rx_buffers_dma);
+ queue->rx_buffers = NULL;
}
}
@@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (bp->rx_ring) {
+ if (queue->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- bp->rx_ring, bp->rx_ring_dma);
- bp->rx_ring = NULL;
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
}
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp)
static int gem_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = bp->rx_ring_size * sizeof(struct sk_buff *);
- bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
- if (!bp->rx_skbuff)
- return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- bp->rx_ring_size, bp->rx_skbuff);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = bp->rx_ring_size * sizeof(struct sk_buff *);
+ queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!queue->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ bp->rx_ring_size, queue->rx_skbuff);
+ }
return 0;
}
static int macb_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue = &bp->queues[0];
int size;
size = bp->rx_ring_size * bp->rx_buffer_size;
- bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_buffers_dma, GFP_KERNEL);
- if (!bp->rx_buffers)
+ queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_buffers_dma, GFP_KERNEL);
+ if (!queue->rx_buffers)
return -ENOMEM;
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
return 0;
}
@@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp)
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
- }
-
- size = RX_RING_BYTES(bp);
- bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_ring_dma, GFP_KERNEL);
- if (!bp->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ size = RX_RING_BYTES(bp);
+ queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_ring_dma, GFP_KERNEL);
+ if (!queue->rx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+ }
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
- }
- bp->rx_tail = 0;
- bp->rx_prepared_head = 0;
+ queue->rx_tail = 0;
+ queue->rx_prepared_head = 0;
+
+ gem_rx_refill(queue);
+ }
- gem_rx_refill(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp)
int i;
struct macb_dma_desc *desc = NULL;
- macb_init_rx_ring(bp);
+ macb_init_rx_ring(&bp->queues[0]);
for (i = 0; i < bp->tx_ring_size; i++) {
desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp)
*/
static void macb_configure_dma(struct macb *bp)
{
+ struct macb_queue *queue;
+ u32 buffer_size;
+ unsigned int q;
u32 dmacfg;
+ buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (q)
+ queue_writel(queue, RBQS, buffer_size);
+ else
+ dmacfg |= GEM_BF(RXBS, buffer_size);
+ }
if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
#endif
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb_queue *queue;
+ unsigned int q;
int err;
netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev)
return err;
}
- napi_enable(&bp->napi);
-
bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_enable(&queue->napi);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev)
static int macb_close(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
netif_tx_stop_all_queues(dev);
- napi_disable(&bp->napi);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_disable(&queue->napi);
if (dev->phydev)
phy_stop(dev->phydev);
@@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- unsigned int i;
+ struct macb_queue *queue;
+ unsigned int i, q, idx;
+ unsigned long *stat;
+
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp)
*(++p) += val;
}
}
+
+ idx = GEM_STATS_LEN;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+ bp->ethtool_stats[idx++] = *stat;
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
bp = netdev_priv(dev);
gem_update_stats(bp);
- memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
}
static int gem_get_sset_count(struct net_device *dev, int sset)
{
+ struct macb *bp = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return GEM_STATS_LEN;
+ return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
+ char stat_string[ETH_GSTRING_LEN];
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned int i;
+ unsigned int q;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
memcpy(p, gem_statistics[i].stat_string,
ETH_GSTRING_LEN);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+ snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+ q, queue_statistics[i].stat_string);
+ memcpy(p, stat_string, ETH_GSTRING_LEN);
+ }
+ }
break;
}
}
@@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+ struct ethtool_rx_fs_item *item;
+ u32 t2_scr;
+ int num_t2_scr;
+
+ num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ struct ethtool_rx_flow_spec *fs = &item->fs;
+ struct ethtool_tcpip4_spec *tp4sp_m;
+
+ if (fs->location >= num_t2_scr)
+ continue;
+
+ t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+ /* enable/disable screener regs for the flow entry */
+ t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+ /* only enable fields with no masking */
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+ if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+ if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+ t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+ gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+ }
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+ uint16_t index = fs->location;
+ u32 w0, w1, t2_scr;
+ bool cmp_a = false;
+ bool cmp_b = false;
+ bool cmp_c = false;
+
+ tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+ /* 1st compare reg - IP source address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4src;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+ cmp_a = true;
+ }
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+ /* 2nd compare reg - IP destination address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4dst;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+ cmp_b = true;
+ }
+
+ /* ignore both port fields if masking set in both */
+ if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+ /* 3rd compare reg - source port, destination port */
+ w0 = 0;
+ w1 = 0;
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+ if (tp4sp_m->psrc == tp4sp_m->pdst) {
+ w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else {
+ /* only one port definition */
+ w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+ w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+ if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else { /* dst port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+ }
+ }
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+ cmp_c = true;
+ }
+
+ t2_scr = 0;
+ t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+ t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+ if (cmp_a)
+ t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+ if (cmp_b)
+ t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+ if (cmp_c)
+ t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+ gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool added = false;
+
+ newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+ if (newfs == NULL)
+ return -ENOMEM;
+ memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+ netdev_dbg(netdev,
+ "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ /* find correct place to add in list */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location > newfs->fs.location) {
+ list_add_tail(&newfs->list, &item->list);
+ added = true;
+ break;
+ } else if (item->fs.location == fs->location) {
+ netdev_err(netdev, "Rule not added: location %d not free!\n",
+ fs->location);
+ ret = -EBUSY;
+ goto err;
+ }
+ }
+ if (!added)
+ list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+ gem_prog_cmp_regs(bp, fs);
+ bp->rx_fs_list.count++;
+ /* enable filtering if NTUPLE on */
+ if (netdev->features & NETIF_F_NTUPLE)
+ gem_enable_flow_filters(bp, 1);
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(newfs);
+ return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ /* disable screener regs for the flow entry */
+ fs = &(item->fs);
+ netdev_dbg(netdev,
+ "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc),
+ htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ gem_writel_n(bp, SCRT2, fs->location, 0);
+
+ list_del(&item->list);
+ bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ uint32_t cnt = 0;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = item->fs.location;
+ cnt++;
+ }
+ cmd->data = bp->max_tuples;
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->rx_fs_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gem_get_flow_entry(netdev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.location >= bp->max_tuples)
+ || (cmd->fs.ring_cookie >= bp->num_queues)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gem_add_flow_filter(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gem_del_flow_filter(netdev, cmd);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
+ .get_rxnfc = gem_get_rxnfc,
+ .set_rxnfc = gem_set_rxnfc,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev,
gem_writel(bp, NCFGR, netcfg);
}
+ /* RX Flow Filters */
+ if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+ bool turn_on = features & NETIF_F_NTUPLE;
+
+ gem_enable_flow_filters(bp, turn_on);
+ }
return 0;
}
@@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev)
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
int err;
- u32 val;
+ u32 val, reg;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
+ netif_napi_add(dev, &queue->napi, macb_poll, 64);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+ queue->RBQP = GEM_RBQP(hw_q - 1);
+ queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1);
+ queue->RBQPH = GEM_RBQPH(hw_q - 1);
+ }
#endif
} else {
/* queue0 uses legacy registers */
@@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+ queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH;
+ queue->RBQPH = MACB_RBQPH;
+ }
#endif
}
@@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev)
}
dev->netdev_ops = &macb_netdev_ops;
- netif_napi_add(dev, &bp->napi, macb_poll, 64);
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
@@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
+ /* Check RX Flow Filters support.
+ * Max Rx flows set by availability of screeners & compare regs:
+ * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+ */
+ reg = gem_readl(bp, DCFG8);
+ bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
+ if (bp->max_tuples > 0) {
+ /* also needs one ethtype match to check IPv4 */
+ if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+ /* program this reg now */
+ reg = 0;
+ reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+ /* Filtering is supported in hw but don't enable it in kernel now */
+ dev->hw_features |= NETIF_F_NTUPLE;
+ /* init Rx flow definitions */
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ bp->rx_fs_list.count = 0;
+ spin_lock_init(&bp->rx_fs_lock);
+ } else
+ bp->max_tuples = 0;
+ }
+
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
- lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp)),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring)
+ &q->rx_ring_dma, GFP_KERNEL);
+ if (!q->rx_ring)
return -ENOMEM;
- lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
- if (!lp->rx_buffers) {
+ &q->rx_buffers_dma, GFP_KERNEL);
+ if (!q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
return -ENOMEM;
}
- addr = lp->rx_buffers_dma;
+ addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- desc = macb_rx_desc(lp, i);
+ desc = macb_rx_desc(q, i);
macb_set_addr(lp, desc, addr);
desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev)
desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rx_tail = 0;
+ q->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- macb_writel(lp, RBQP, lp->rx_ring_dma);
+ macb_writel(lp, RBQP, q->rx_ring_dma);
/* Enable Receive and Transmit */
ctl = macb_readl(lp, NCR);
@@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
u32 ctl;
/* Disable Receiver and Transmitter */
@@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- lp->rx_buffers, lp->rx_buffers_dma);
- lp->rx_buffers = NULL;
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
return 0;
}
@@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) {
- p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+ p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
@@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev)
desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
- if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
- lp->rx_tail = 0;
+ if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+ q->rx_tail = 0;
else
- lp->rx_tail++;
+ q->rx_tail++;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
}
}
@@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev)
= macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
- struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
@@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev)
else
macb_get_hwaddr(bp);
- /* Power up the PHY if there is a GPIO reset */
- phy_node = of_get_next_available_child(np, NULL);
- if (phy_node) {
- int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
- if (gpio_is_valid(gpio)) {
- bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_direction_output(bp->reset_gpio, 1);
- }
- }
- of_node_put(phy_node);
-
err = of_get_phy_mode(np);
if (err < 0) {
pdata = dev_get_platdata(&pdev->dev);
@@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
+ tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+ (unsigned long)bp);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -3558,10 +4004,6 @@ err_out_unregister_mdio:
of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
err_out_free_netdev:
free_netdev(dev);
@@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev)
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
depends on 64BIT
---help---
This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config THUNDER_NIC_RGX
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT
+ imply PTP_1588_CLOCK
+ default y
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Cavium ethernet device drivers.
#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (!clock->ptp_clock) {
+ err = -ENODEV;
+ goto error_stop;
+ }
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+ return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+ pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab09e64..f38abf626412 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
size = octdevsize + priv_size + configsize +
(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
- buf = vmalloc(size);
+ buf = vzalloc(size);
if (!buf)
return NULL;
- memset(buf, 0, size);
-
oct = (struct octeon_device *)buf;
oct->priv = (void *)(buf + octdevsize);
oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
- oct->ioq_vector = vmalloc(size);
+ oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
return 1;
- memset(oct->ioq_vector, 0, size);
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
struct u64_stats_sync syncp;
};
+struct cavium_ptp;
+
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
@@ -312,6 +314,33 @@ struct nicvf {
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -521,6 +551,11 @@ struct pfc {
u8 fc_tx;
};
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -540,6 +575,7 @@ union nic_mbx {
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
struct pfc pfc;
+ struct set_ptp ptp;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..7ff66a8194e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -18,7 +18,7 @@
#include "q_struct.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-nic"
+#define DRV_NAME "nicpf"
#define DRV_VERSION "1.0"
struct hw_info {
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* TNS and TNS bypass modes are present only on 88xx */
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
/* Disable TNS mode on both interfaces */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
}
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
}
}
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
goto unlock;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..5603f5ab1fee 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,14 +9,16 @@
/* ETHTOOL Support for VNIC_VF Device*/
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "q_struct.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
struct nicvf_stat {
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
return 0;
}
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
.set_pauseparam = nicvf_set_pauseparam,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = nicvf_get_ts_info,
.get_link_ksettings = nicvf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36c4c58..b68cde9f17d2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,13 +20,15 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
/* Supported devices */
@@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+struct nicvf_xdp_tx {
+ u64 dma_addr;
+ u8 qidx;
+};
+
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+ /* Check if it's a recycled page, if not unmap the DMA mapping.
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
- struct sk_buff **skb)
+ struct rcv_queue *rq, struct sk_buff **skb)
{
struct xdp_buff xdp;
struct page *page;
+ struct nicvf_xdp_tx *xdp_tx = NULL;
u32 action;
- u16 len, offset = 0;
+ u16 len, err, offset = 0;
u64 dma_addr, cpu_addr;
void *orig_data;
@@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
+ xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
xdp.data = (void *)cpu_addr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
rcu_read_lock();
@@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
switch (action) {
case XDP_PASS:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
/* Build SKB and pass on packet to network stack */
*skb = build_skb(xdp.data,
@@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
case XDP_TX:
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
+ case XDP_REDIRECT:
+ /* Save DMA address for use while transmitting */
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ xdp_tx->dma_addr = dma_addr;
+ xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+ if (!err)
+ return true;
+
+ /* Free the page on error */
+ nicvf_unmap_page(nic, page, dma_addr);
+ put_page(page);
+ break;
default:
bpf_warn_invalid_xdp_action(action);
/* fall through */
@@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
put_page(page);
return true;
}
return false;
}
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
int budget, int *subdesc_cnt,
@@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
skb_set_hash(skb, hash, hash_type);
}
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+ struct cqe_rx_t *cqe_rx,
+ struct snd_queue *sq, struct rcv_queue *rq)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
/* For XDP, ignore pkts spanning multiple pages */
if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
/* Packet consumed by XDP */
- if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
return;
} else {
skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
+ nicvf_set_rxtstamp(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
struct snd_queue *sq = &qs->sq[cq_idx];
+ struct rcv_queue *rq = &qs->rq[cq_idx];
unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
@@ -799,7 +875,7 @@ loop:
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -808,10 +884,12 @@ loop:
&tx_pkts, &tx_bytes);
tx_done++;
break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
- case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
@@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
nicvf_free_cq_poll(nic);
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
/* Clear multiqset info */
nic->pnicvf = nic;
return 0;
}
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
@@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
@@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
+ struct nicvf_xdp_tx *xdp_tx;
+ struct snd_queue *sq;
+ struct page *page;
+ int err, qidx;
+
+ if (!netif_running(netdev) || !nic->xdp_prog)
+ return -EINVAL;
+
+ page = virt_to_page(xdp->data);
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ qidx = xdp_tx->qidx;
+
+ if (xdp_tx->qidx >= nic->xdp_tx_queues)
+ return -EINVAL;
+
+ /* Get secondary Qset's info */
+ if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+ qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[qidx - 1];
+ if (!snic)
+ return -EINVAL;
+ qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qidx];
+ err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+ xdp_tx->dma_addr,
+ xdp->data_end - xdp->data);
+ if (err)
+ return -ENOMEM;
+
+ nicvf_xdp_sq_doorbell(snic, sq, qidx);
+ return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+ return;
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
+ .ndo_xdp_xmit = nicvf_xdp_xmit,
+ .ndo_xdp_flush = nicvf_xdp_flush,
+ .ndo_do_ioctl = nicvf_ioctl,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct nicvf *nic;
int err, qcount;
u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12dbde95b..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Reserve space for header modifications by BPF program */
if (rbdr->is_xdp)
- buf_len += XDP_PACKET_HEADROOM;
+ buf_len += XDP_HEADROOM;
/* Check if it's recycled */
if (pgcache)
@@ -224,8 +224,9 @@ ret:
nic->rb_page = NULL;
return -ENOMEM;
}
+
if (pgcache)
- pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ pgcache->dma_addr = *rbuf + XDP_HEADROOM;
nic->rb_page_offset += buf_len;
}
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
if (!rq->enable) {
nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
return;
}
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq->caching = 1;
+ /* Driver have no proper error path for failed XDP RX-queue info reg */
+ WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
/* Send a mailbox msg to PF to config RQ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
@@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
qs_cfg->be = 1;
#endif
qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
int qentry;
if (subdesc_cnt > sq->xdp_free_cnt)
- return 0;
+ return -1;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
@@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
sq->xdp_desc_cnt += subdesc_cnt;
- return 1;
+ return 0;
}
/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor
@@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
if (page_ref_count(page) != 1)
return;
- len += XDP_PACKET_HEADROOM;
+ len += XDP_HEADROOM;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a3230773..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -92,6 +94,9 @@
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
@@ -251,6 +256,7 @@ struct rcv_queue {
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct cmp_queue {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..91d34ea40e2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -21,7 +21,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-BGX"
+#define DRV_NAME "thunder_bgx"
#define DRV_VERSION "1.0"
struct lmac {
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
@@ -172,6 +174,7 @@
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 578c7f8f11bf..2d5e8dab1f70 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-xcv"
+#define DRV_NAME "thunder_xcv"
#define DRV_VERSION "1.0"
/* Register offsets */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 5713e83be08c..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -69,6 +69,7 @@ config CHELSIO_T4
depends on PCI && (IPV6 || IPV6=n)
select FW_LOADER
select MDIO
+ select ZLIB_DEFLATE
---help---
This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 6a015362c340..185fe8df7628 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3304,6 +3304,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->ethtool_ops = &cxgb_ethtool_ops;
netdev->min_mtu = 81;
netdev->max_mtu = ETH_MAX_MTU;
+ netdev->dev_port = pi->port_id;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..53b6a02c778e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
- cudbg_common.o cudbg_lib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+ if (size > pdbg_init->compress_buff_size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_init->compress_buff;
+ pin_buff->offset = 0;
+ pin_buff->size = size;
+ return 0;
+ }
+
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
- pdbg_buff->size -= size;
return 0;
}
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff)
{
- pdbg_buff->size += pin_buff->size;
+ /* Clear compression buffer for re-use */
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+ memset(pdbg_init->compress_buff, 0,
+ pdbg_init->compress_buff_size);
+
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 605689957496..b57acb8dc35b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
#ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
-struct card_mem {
- u16 size_edc0;
- u16 size_edc1;
- u16 mem_flag;
-};
-
struct cudbg_mbox_log {
struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
u8 data[0];
};
+static const char * const cudbg_region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+ bool exist; /* Does region exists in current memory? */
+ u32 start; /* Start wrt 0 */
+ u32 end; /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+ u32 base;
+ u32 limit;
+ u32 idx;
+};
+
+struct cudbg_meminfo {
+ struct cudbg_mem_desc avail[4];
+ struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+ u32 avail_c;
+ u32 mem_c;
+ u32 up_ram_lo;
+ u32 up_ram_hi;
+ u32 up_extmem2_lo;
+ u32 up_extmem2_hi;
+ u32 rx_pages_data[3];
+ u32 tx_pages_data[4];
+ u32 p_structs;
+ u32 reserved[12];
+ u32 port_used[4];
+ u32 port_alloc[4];
+ u32 loopback_used[NCHAN];
+ u32 loopback_alloc[NCHAN];
+};
+
struct cudbg_cim_pif_la {
int size;
u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16];
};
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
#define CUDBG_MAX_FL_QIDS 1024
struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
};
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+ {0x0, 0x34},
+ {0x3c, 0x40},
+ {0x50, 0x64},
+ {0x70, 0x80},
+ {0x94, 0xa0},
+ {0xb0, 0xb8},
+ {0xd0, 0xd4},
+ {0x100, 0x128},
+ {0x140, 0x148},
+ {0x150, 0x164},
+ {0x170, 0x178},
+ {0x180, 0x194},
+ {0x1a0, 0x1b8},
+ {0x1c0, 0x208},
+};
+
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
};
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+ {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1ee62c5..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
/* Error codes */
#define CUDBG_STATUS_NO_MEM -19
#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19,
+ CUDBG_MC0 = 20,
+ CUDBG_MC1 = 21,
CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
CUDBG_SGE_INDIRECT = 37,
CUDBG_ULPRX_LA = 41,
CUDBG_TP_LA = 43,
+ CUDBG_MEMINFO = 44,
CUDBG_CIM_PIF_LA = 45,
CUDBG_CLK = 46,
CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_PCIE_CONFIG = 55,
CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PBT_TABLE = 65,
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
+ CUDBG_HMA = 68,
CUDBG_MAX_ENTITY = 70,
};
@@ -81,6 +87,10 @@ struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
+ u8 compress_type; /* Type of compression to use */
+ void *compress_buff; /* Compression buffer */
+ u32 compress_buff_size; /* Compression buffer size */
+ void *workspace; /* Workspace for zlib */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf88d18f..557fd8bfd54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,18 +15,65 @@
*
*/
+#include <linux/sort.h>
+
#include "t4_regs.h"
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
#include "cudbg_entity.h"
+#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
{
- cudbg_update_buff(pin_buff, dbg_buff);
- cudbg_put_buff(pin_buff, dbg_buff);
+ struct cudbg_buffer temp_in_buff = { 0 };
+ int bytes_left, bytes_read, bytes;
+ u32 offset = dbg_buff->offset;
+ int rc;
+
+ temp_in_buff.offset = pin_buff->offset;
+ temp_in_buff.data = pin_buff->data;
+ temp_in_buff.size = pin_buff->size;
+
+ bytes_left = pin_buff->size;
+ bytes_read = 0;
+ while (bytes_left > 0) {
+ /* Do compression in smaller chunks */
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+ temp_in_buff.size = bytes;
+ rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+ if (rc)
+ return rc;
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+ pin_buff->size = dbg_buff->offset - offset;
+ return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ int rc = 0;
+
+ if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+ cudbg_update_buff(pin_buff, dbg_buff);
+ } else {
+ rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+ if (rc)
+ goto out;
+ }
+
+out:
+ cudbg_put_buff(pdbg_init, pin_buff);
+ return rc;
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
return 0;
}
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct cudbg_mem_desc *)a)->base -
+ ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff)
+{
+ struct cudbg_mem_desc *md;
+ u32 lo, hi, used, alloc;
+ int n, i;
+
+ memset(meminfo_buff->avail, 0,
+ ARRAY_SIZE(meminfo_buff->avail) *
+ sizeof(struct cudbg_mem_desc));
+ memset(meminfo_buff->mem, 0,
+ (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+ md = meminfo_buff->mem;
+
+ for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+ meminfo_buff->mem[i].limit = 0;
+ meminfo_buff->mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 0;
+ i++;
+ }
+
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(padap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 3;
+ i++;
+ }
+
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 2;
+ i++;
+ }
+
+ if (lo & HMA_MUX_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 5;
+ i++;
+ }
+ }
+
+ if (!i) /* no memory available */
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ meminfo_buff->avail_c = i;
+ sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+ (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(padap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region);
+ if (!is_t4(padap->params.chip)) {
+ u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+ u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+ u32 size = 0;
+
+ if (is_t5(padap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(padap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = padap->vres.ocq.start;
+ if (padap->vres.ocq.size)
+ md->limit = md->base + padap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (meminfo_buff->avail[n].limit <
+ meminfo_buff->avail[n + 1].base)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ if (meminfo_buff->avail[n].limit)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ n = md - meminfo_buff->mem;
+ meminfo_buff->mem_c = n;
+
+ sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+
+ lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_ram_lo = lo;
+ meminfo_buff->up_ram_hi = hi;
+
+ lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_extmem2_lo = lo;
+ meminfo_buff->up_extmem2_hi = hi;
+
+ lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
+ meminfo_buff->rx_pages_data[1] =
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+ meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+ lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+ meminfo_buff->tx_pages_data[1] =
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+ meminfo_buff->tx_pages_data[2] =
+ hi >= (1 << 20) ? 'M' : 'K';
+ meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+ meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->port_used[i] = used;
+ meminfo_buff->port_alloc[i] = alloc;
+ }
+
+ for (i = 0; i < padap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->loopback_used[i] = used;
+ meminfo_buff->loopback_alloc[i] = alloc;
+ }
+
+ return 0;
+}
+
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
@@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
- rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
}
dparams = &padap->params.devlog;
- rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
@@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
}
size += sizeof(cfg);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
NULL);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
int size, rc;
size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
(u32 *)temp_buff.data,
(u32 *)((char *)temp_buff.data +
5 * CIM_MALA_SIZE));
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
struct cudbg_cim_qcfg *cim_qcfg_data;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
&temp_buff);
if (rc)
return rc;
@@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
cim_qcfg_data->obq_wr);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
cim_qcfg_data->thres);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
/* collect CIM OBQ */
qsize = cudbg_cim_obq_size(padap, qid);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
}
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+ struct cudbg_meminfo *mem_info,
+ u8 mem_type, u8 *idx)
+{
+ u8 i, flag;
+
+ switch (mem_type) {
+ case MEM_EDC0:
+ flag = EDC0_FLAG;
+ break;
+ case MEM_EDC1:
+ flag = EDC1_FLAG;
+ break;
+ case MEM_MC0:
+ /* Some T5 cards have both MC0 and MC1. */
+ flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+ break;
+ case MEM_MC1:
+ flag = MC1_FLAG;
+ break;
+ case MEM_HMA:
+ flag = HMA_FLAG;
+ break;
+ default:
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+ }
+
+ for (i = 0; i < mem_info->avail_c; i++) {
+ if (mem_info->avail[i].idx == flag) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, const char *region_name,
+ struct cudbg_mem_desc *mem_desc)
+{
+ u8 mc, found = 0;
+ u32 i, idx = 0;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+ if (!strcmp(cudbg_region[i], region_name)) {
+ found = 1;
+ idx = i;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ found = 0;
+ for (i = 0; i < meminfo->mem_c; i++) {
+ if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+ continue; /* Skip holes */
+
+ if (!(meminfo->mem[i].limit))
+ meminfo->mem[i].limit =
+ i < meminfo->mem_c - 1 ?
+ meminfo->mem[i + 1].base - 1 : ~0;
+
+ if (meminfo->mem[i].idx == idx) {
+ /* Check if the region exists in @mem_type memory */
+ if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+ meminfo->mem[i].limit < meminfo->avail[mc].base)
+ return -EINVAL;
+
+ if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+ return -EINVAL;
+
+ memcpy(mem_desc, &meminfo->mem[i],
+ sizeof(struct cudbg_mem_desc));
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, u32 *out_base, u32 *out_end)
+{
+ u8 mc_idx;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+ if (rc)
+ return rc;
+
+ if (*out_base < meminfo->avail[mc_idx].base)
+ *out_base = 0;
+ else
+ *out_base -= meminfo->avail[mc_idx].base;
+
+ if (*out_end > meminfo->avail[mc_idx].limit)
+ *out_end = meminfo->avail[mc_idx].limit;
+ else
+ *out_end -= meminfo->avail[mc_idx].base;
+
+ return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+ const char *region_name,
+ struct cudbg_region_info *payload)
+{
+ struct cudbg_mem_desc mem_desc = { 0 };
+ struct cudbg_meminfo meminfo;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+ &mem_desc);
+ if (rc) {
+ payload->exist = false;
+ return 0;
+ }
+
+ payload->exist = true;
+ payload->start = mem_desc.base;
+ payload->end = mem_desc.limit;
+
+ return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+ &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len,
struct cudbg_error *cudbg_err)
{
+ static const char * const region_name[] = { "Tx payload:",
+ "Rx payload:" };
unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_region_info payload[2];
+ u32 yield_count = 0;
int rc = 0;
+ u8 i;
+
+ /* Get TX/RX Payload region range if they exist */
+ memset(payload, 0, sizeof(payload));
+ for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+ rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+ &payload[i]);
+ if (rc)
+ return rc;
+
+ if (payload[i].exist) {
+ /* Align start and end to avoid wrap around */
+ payload[i].start = roundup(payload[i].start,
+ CUDBG_CHUNK_SIZE);
+ payload[i].end = rounddown(payload[i].end,
+ CUDBG_CHUNK_SIZE);
+ }
+ }
bytes_left = tot_len;
while (bytes_left > 0) {
+ /* As MC size is huge and read through PIO access, this
+ * loop will hold cpu for a longer time. OS may think that
+ * the process is hanged and will generate CPU stall traces.
+ * So yield the cpu regularly.
+ */
+ yield_count++;
+ if (!(yield_count % CUDBG_YIELD_ITERATION))
+ schedule();
+
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
- rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
+
+ for (i = 0; i < ARRAY_SIZE(payload); i++)
+ if (payload[i].exist &&
+ bytes_read >= payload[i].start &&
+ bytes_read + bytes <= payload[i].end)
+ /* TX and RX Payload regions can't overlap */
+ goto skip_read;
+
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
bytes_read, bytes,
@@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
+
+skip_read:
bytes_left -= bytes;
bytes_read += bytes;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
return rc;
}
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
- struct card_mem *mem_info)
-{
- struct adapter *padap = pdbg_init->adap;
- u32 value;
-
- value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
- value = EDRAM0_SIZE_G(value);
- mem_info->size_edc0 = (u16)value;
-
- value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
- value = EDRAM1_SIZE_G(value);
- mem_info->size_edc1 = (u16)value;
-
- value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
- if (value & EDRAM0_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC0_FLAG);
- if (value & EDRAM1_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err)
{
@@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- struct card_mem mem_info = {0};
- unsigned long flag, size;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_meminfo mem_info;
+ unsigned long size;
+ u8 mc_idx;
int rc;
+ memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(padap, &mem_info);
+ if (rc)
+ return rc;
+
cudbg_t4_fwcache(pdbg_init, cudbg_err);
- cudbg_collect_mem_info(pdbg_init, &mem_info);
- switch (mem_type) {
- case MEM_EDC0:
- flag = (1 << EDC0_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
- break;
- case MEM_EDC1:
- flag = (1 << EDC1_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
- break;
- default:
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
+ rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+ if (rc)
+ return rc;
- if (mem_info.mem_flag & flag) {
- rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
- size, cudbg_err);
- if (rc)
- goto err;
- } else {
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
-err:
- return rc;
+ size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ cudbg_err);
}
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
MEM_EDC1);
}
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_HMA);
+}
+
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
- int rc;
+ int rc, nentries;
- rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+ nentries = t4_chip_rss_size(padap);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
rc = t4_read_rss(padap, (u16 *)temp_buff.data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
int vf, rc, vf_count;
vf_count = padap->params.arch.vfcount;
- rc = cudbg_get_buff(dbg_buff,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
vf_count * sizeof(struct cudbg_rss_vf_conf),
&temp_buff);
if (rc)
@@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
for (vf = 0; vf < vf_count; vf++)
t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh, true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
int rc;
- rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
struct cudbg_pm_stats *pm_stats_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
&temp_buff);
if (rc)
return rc;
@@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
for (i = 0; i < NTX_SCHED; ++i)
t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
&hw_sched_buff->ipg[i], true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ &temp_buff);
if (rc)
return rc;
@@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
struct cudbg_ulprx_la *ulprx_la_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
&temp_buff);
if (rc)
return rc;
@@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
ulprx_la_buff->size = ULPRX_LA_SIZE;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
int size, rc;
size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
+
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_meminfo *meminfo_buff;
+ int rc;
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ rc = cudbg_fill_meminfo(padap, meminfo_buff);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_cim_pif_la) +
2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
(u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
NULL, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
&temp_buff);
if (rc)
return rc;
@@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
clk_info_buff->finwait2_timer =
tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
pm_pio->ireg_local_offset);
ch_pm++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
u32 para[2], val[2];
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_tid_info_region_rev1),
&temp_buff);
if (rc)
return rc;
@@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
sizeof(struct cudbg_ver_hdr);
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (!is_fw_attached(pdbg_init))
+ goto fill_tid;
+
#define FW_PARAM_PFVF_A(param) \
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
@@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->uotid_base = val[0];
@@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->hpftid_base = val[0];
tid->nhpftids = val[1] - val[0] + 1;
}
+#undef FW_PARAM_PFVF_A
+
+fill_tid:
tid->ntids = padap->tids.ntids;
tid->nstids = padap->tids.nstids;
tid->stid_base = padap->tids.stid_base;
@@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
-#undef FW_PARAM_PFVF_A
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size, *value, j;
+ int i, rc, n;
+
+ size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ value = (u32 *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ for (j = t5_pcie_config_array[i][0];
+ j <= t5_pcie_config_array[i][1]; j += 4) {
+ t4_hw_pci_read_cfg4(padap, j, value);
+ value++;
+ }
+ }
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+ int index, bit, bit_pos = 0;
+
+ switch (type) {
+ case CTXT_EGRESS:
+ bit_pos = 176;
+ break;
+ case CTXT_INGRESS:
+ bit_pos = 141;
+ break;
+ case CTXT_FLM:
+ bit_pos = 89;
+ break;
+ }
+ index = bit_pos / 32;
+ bit = bit_pos % 32;
+ return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+ struct cudbg_region_info *ctx_info,
+ u8 *mem_type)
{
- u32 value, size;
+ struct cudbg_mem_desc mem_desc;
+ struct cudbg_meminfo meminfo;
+ u32 i, j, value, found;
u8 flq;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ /* Get EGRESS and INGRESS context region size */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ found = 0;
+ memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+ for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+ rc = cudbg_get_mem_region(padap, &meminfo, j,
+ cudbg_region[i],
+ &mem_desc);
+ if (!rc) {
+ found = 1;
+ rc = cudbg_get_mem_relative(padap, &meminfo, j,
+ &mem_desc.base,
+ &mem_desc.limit);
+ if (rc) {
+ ctx_info[i].exist = false;
+ break;
+ }
+ ctx_info[i].exist = true;
+ ctx_info[i].start = mem_desc.base;
+ ctx_info[i].end = mem_desc.limit;
+ mem_type[i] = j;
+ break;
+ }
+ }
+ if (!found)
+ ctx_info[i].exist = false;
+ }
+ /* Get FLM and CNM max qid. */
value = t4_read_reg(padap, SGE_FLM_CFG_A);
/* Get number of data freelist queues */
flq = HDRSTARTFLQ_G(value);
- size = CUDBG_MAX_FL_QIDS >> flq;
+ ctx_info[CTXT_FLM].exist = true;
+ ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
- /* Add extra space for congestion manager contexts.
- * The number of CONM contexts are same as number of freelist
+ /* The number of CONM contexts are same as number of freelist
* queues.
*/
- size += size;
+ ctx_info[CTXT_CNM].exist = true;
+ ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+ return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+ u32 i, size = 0;
+ int rc;
+
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < CTXT_CNM; i++) {
+ if (!region_info[i].exist) {
+ if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+ size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+ SGE_CTXT_SIZE;
+ continue;
+ }
+
+ size += (region_info[i].end - region_info[i].start + 1) /
+ SGE_CTXT_SIZE;
+ }
return size * sizeof(struct cudbg_ch_cntxt);
}
@@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
}
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+ u8 ctxt_type,
+ struct cudbg_ch_cntxt **out_buff)
+{
+ struct cudbg_ch_cntxt *buff = *out_buff;
+ int rc;
+ u32 j;
+
+ for (j = 0; j < max_qid; j++) {
+ cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+ rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = ctxt_type;
+ buff->cntxt_id = j;
+ buff++;
+ if (ctxt_type == CTXT_FLM) {
+ cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = j;
+ buff++;
+ }
+ }
+
+ *out_buff = buff;
+}
+
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
struct adapter *padap = pdbg_init->adap;
+ u32 j, size, max_ctx_size, max_ctx_qid;
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u32 size, i = 0;
+ u64 *dst_off, *src_off;
+ u8 *ctx_buf;
+ u8 i, k;
int rc;
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
rc = cudbg_dump_context_size(padap);
if (rc <= 0)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
size = rc;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
+ /* Get buffer with enough space to read the biggest context
+ * region in memory.
+ */
+ max_ctx_size = max(region_info[CTXT_EGRESS].end -
+ region_info[CTXT_EGRESS].start + 1,
+ region_info[CTXT_INGRESS].end -
+ region_info[CTXT_INGRESS].start + 1);
+
+ ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+ if (!ctx_buf) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return -ENOMEM;
+ }
+
buff = (struct cudbg_ch_cntxt *)temp_buff.data;
- while (size > 0) {
- buff->cntxt_type = CTXT_FLM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
- buff->cntxt_type = CTXT_CNM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
+ /* Collect EGRESS and INGRESS context data.
+ * In case of failures, fallback to collecting via FW or
+ * backdoor access.
+ */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ if (!region_info[i].exist) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
- i++;
+ max_ctx_size = region_info[i].end - region_info[i].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init)) {
+ t4_sge_ctxt_flush(padap, padap->mbox, i);
+
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+ region_info[i].start, max_ctx_size,
+ (__be32 *)ctx_buf, 1);
+ }
+
+ if (rc || !is_fw_attached(pdbg_init)) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
+
+ for (j = 0; j < max_ctx_qid; j++) {
+ src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+ dst_off = (u64 *)buff->data;
+
+ /* The data is stored in 64-bit cpu order. Convert it
+ * to big endian before parsing.
+ */
+ for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+ dst_off[k] = cpu_to_be64(src_off[k]);
+
+ rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = i;
+ buff->cntxt_id = j;
+ buff++;
+ }
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ kvfree(ctx_buf);
+
+ /* Collect FREELIST and CONGESTION MANAGER contexts */
+ max_ctx_size = region_info[CTXT_FLM].end -
+ region_info[CTXT_FLM].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+ /* Since FLM and CONM are 1-to-1 mapped, the below function
+ * will fetch both FLM and CONM contexts.
+ */
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap,
mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
}
-static int cudbg_collect_tcam_index(struct adapter *padap,
+static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
struct cudbg_mps_tcam *tcam, u32 idx)
{
+ struct adapter *padap = pdbg_init->adap;
u64 tcamy, tcamx, val;
u32 ctl, data2;
int rc = 0;
@@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap,
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
FW_LDST_CMD_IDX_V(idx));
- rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
- if (rc)
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+
+ if (rc || !is_fw_attached(pdbg_init)) {
cudbg_mps_rpl_backdoor(padap, &mps_rplc);
- else
+ /* Ignore error since we collected directly from
+ * reading registers.
+ */
+ rc = 0;
+ } else {
mps_rplc = ldst_cmd.u.mps.rplc;
+ }
tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
@@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
n = padap->params.arch.mps_tcam_size;
size = sizeof(struct cudbg_mps_tcam) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tcam = (struct cudbg_mps_tcam *)temp_buff.data;
for (i = 0; i < n; i++) {
- rc = cudbg_collect_tcam_index(padap, tcam, i);
+ rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
total_size += sizeof(struct cudbg_mps_tcam);
@@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
if (!total_size) {
rc = CUDBG_SYSTEM_ERROR;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
&temp_buff);
if (rc)
return rc;
@@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
size += sizeof(struct cudbg_tcam);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
bytes += sizeof(struct cudbg_tid_data);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
int rc;
size = sizeof(u16) * NMTUS * NCCTRL_WIN;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
t4_read_cong_tbl(padap, (void *)temp_buff.data);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
}
ma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
u32 i, j;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
@@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ u32 local_offset, local_range;
struct ireg_buf *up_cim;
+ u32 size, j, iter;
+ u32 instance = 0;
int i, rc, n;
- u32 size;
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else
+ return CUDBG_STATUS_NOT_IMPLEMENTED;
+
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t5_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t5_up_cim_reg_array[i][3];
+ instance = t5_up_cim_reg_array[i][4];
} else if (is_t6(padap->params.chip)) {
up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t6_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t6_up_cim_reg_array[i][3];
+ instance = t6_up_cim_reg_array[i][4];
}
- rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
- up_cim_reg->ireg_offset_range, buff);
- if (rc) {
- cudbg_put_buff(&temp_buff, dbg_buff);
- return rc;
+ switch (instance) {
+ case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x120;
+ local_range = 1;
+ break;
+ case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x10;
+ local_range = 1;
+ break;
+ default:
+ iter = 1;
+ local_offset = 0;
+ local_range = up_cim_reg->ireg_offset_range;
+ break;
+ }
+
+ for (j = 0; j < iter; j++, buff++) {
+ rc = t4_cim_read(padap,
+ up_cim_reg->ireg_local_offset +
+ (j * local_offset), local_range, buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
up_cim++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
int i, rc;
u32 addr;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_pbt_tables),
&temp_buff);
if (rc)
return rc;
@@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_dynamic[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_static[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->lrf_table[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_data[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
}
mboxlog++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
hma_fli->ireg_local_offset);
hma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e33e86..eebefe7cd18e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
int cudbg_dump_context_size(struct adapter *padap);
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
+ CUDBG_COMPRESSION_ZLIB,
};
struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..25cc06d75cff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+ struct cudbg_buffer *pin_buff)
+{
+ if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+ pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+ pin_buff->offset = 0;
+ pin_buff->size = sizeof(struct cudbg_compress_hdr);
+ pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+ return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer temp_buff = { 0 };
+ struct z_stream_s compress_stream;
+ struct cudbg_compress_hdr *c_hdr;
+ int rc;
+
+ /* Write compression header to output buffer before compression */
+ rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+ if (rc)
+ return rc;
+
+ c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+ c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+ memset(&compress_stream, 0, sizeof(struct z_stream_s));
+ compress_stream.workspace = pdbg_init->workspace;
+ rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ compress_stream.next_in = pin_buff->data;
+ compress_stream.avail_in = pin_buff->size;
+ compress_stream.next_out = pout_buff->data + pout_buff->offset;
+ compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+ rc = zlib_deflate(&compress_stream, Z_FINISH);
+ if (rc != Z_STREAM_END)
+ return CUDBG_SYSTEM_ERROR;
+
+ rc = zlib_deflateEnd(&compress_stream);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ c_hdr->compress_size = compress_stream.total_out;
+ c_hdr->decompress_size = pin_buff->size;
+ pout_buff->offset += compress_stream.total_out;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..60d23805dfc3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+ u32 compress_id;
+ u64 decompress_size;
+ u64 compress_size;
+ u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+ return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL);
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -58,6 +58,13 @@
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
+/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,7 +84,8 @@ enum {
MEM_EDC1,
MEM_MC,
MEM_MC0 = MEM_MC,
- MEM_MC1
+ MEM_MC1,
+ MEM_HMA,
};
enum {
@@ -311,6 +319,7 @@ struct vpd_params {
};
struct pci_params {
+ unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
@@ -344,7 +353,6 @@ struct adapter_params {
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
- unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers; /* firmware version */
unsigned int bs_vers; /* bootstrap version */
@@ -564,6 +572,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
};
struct rx_sw_desc;
@@ -819,12 +828,17 @@ struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
unsigned int tx_rate;
bool pf_set_mac;
+ u16 vlan;
};
struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -839,6 +853,10 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __be16 geneve_port;
+ u8 geneve_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -868,7 +886,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -968,6 +989,11 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1305,6 +1331,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1423,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
+/**
+ * t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ * @fw_mod_type: the Firmware Mofule Type
+ *
+ * Return whether the Firmware Module Type represents a real Transceiver
+ * Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+ fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1512,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1621,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1653,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1696,8 +1745,23 @@ void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625e9833..30485f9a598f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,14 @@
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
+#include "cudbg_zlib.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+ { CUDBG_MC0, cudbg_collect_mc0_meminfo },
+ { CUDBG_MC1, cudbg_collect_mc1_meminfo },
+ { CUDBG_HMA, cudbg_collect_hma_meminfo },
};
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
{ CUDBG_TP_LA, cudbg_collect_tp_la },
+ { CUDBG_MEMINFO, cudbg_collect_meminfo },
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
{ CUDBG_CLK, cudbg_collect_clk_info },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
len = cudbg_mbytes_to_bytes(len);
break;
+ case CUDBG_MC0:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM0_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ len = EXT_MEM0_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_MC1:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM1_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
case CUDBG_RSS:
- len = RSS_NENTRIES * sizeof(u16);
+ len = t4_chip_rss_size(adap) * sizeof(u16);
break;
case CUDBG_RSS_VF_CONF:
len = adap->params.arch.vfcount *
@@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TP_LA:
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
+ case CUDBG_MEMINFO:
+ len = sizeof(struct cudbg_meminfo);
+ break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_PCIE_CONFIG:
+ len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ break;
case CUDBG_DUMP_CONTEXT:
len = cudbg_dump_context_size(adap);
break;
@@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ n = 0;
+ if (is_t5(adap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(adap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_PBT_TABLE:
@@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
}
break;
+ case CUDBG_HMA:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & HMA_MUX_F) {
+ /* In T6, there's no MC1. So, HMA shares MC1
+ * address space.
+ */
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
default:
break;
}
@@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
+ u32 wsize;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
}
}
+ /* If compression is enabled, a smaller destination buffer is enough */
+ wsize = cudbg_get_workspace_size();
+ if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+ len = CUDBG_DUMP_BUFF_SIZE;
+
return len;
}
@@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
- struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
- u32 entity_size, i;
- u32 total_size = 0;
+ u32 i, total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
- /* Skip entities that won't fit in output buffer */
- entity_size = cxgb4_get_entity_length(adap, e->entity);
- if (entity_size >
- pdbg_init->outbuf_size - *tot_size - total_size)
- continue;
-
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
@@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
*tot_size += total_size;
}
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+ u32 workspace_size;
+
+ workspace_size = cudbg_get_workspace_size();
+ pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+ workspace_size);
+ if (!pdbg_init->compress_buff)
+ return -ENOMEM;
+
+ pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+ pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+ CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+ return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+ if (pdbg_init->compress_buff)
+ vfree(pdbg_init->compress_buff);
+}
+
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
- struct cudbg_init cudbg_init = { 0 };
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
+ struct cudbg_init cudbg_init;
struct cudbg_hdr *cudbg_hdr;
+ int rc;
size = *buf_size;
+ memset(&cudbg_init, 0, sizeof(struct cudbg_init));
cudbg_init.adap = adap;
cudbg_init.outbuf = buf;
cudbg_init.outbuf_size = size;
@@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
- cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
@@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
if (size < min_size)
return -ENOMEM;
+ rc = cudbg_get_workspace_size();
+ if (rc) {
+ /* Zlib available. So, use zlib deflate */
+ cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+ rc = cudbg_alloc_compress_buff(&cudbg_init);
+ if (rc) {
+ /* Ignore error and continue without compression. */
+ dev_warn(adap->pdev_dev,
+ "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
+ rc);
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ rc = 0;
+ }
+ } else {
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ }
+
+ cudbg_hdr->compress_type = cudbg_init.compress_type;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
@@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
buf,
&total_size);
+ cudbg_free_compress_buff(&cudbg_init);
cudbg_hdr->data_len = total_size;
- *buf_size = total_size;
+ if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+ *buf_size = size;
+ else
+ *buf_size = total_size;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5aa2214..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,8 +20,12 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 917663b35603..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
*/
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI */
- vniy = ((data2 & DATAVIDH2_F) << 23) |
+ vniy = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
dip_hit = data2 & DATADIPHIT_F;
} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
+ vnix = 0;
ctl |= CTLXYBITSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI mask */
- vnix = ((data2 & DATAVIDH2_F) << 23) |
+ vnix = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
}
} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
addr[1], addr[2], addr[3],
addr[4], addr[5],
(unsigned long long)mask,
- vniy, vnix, dip_hit ? 'Y' : 'N',
+ vniy, (vnix | vniy),
+ dip_hit ? 'Y' : 'N',
port_num,
(cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
static int rss_open(struct inode *inode, struct file *file)
{
- int ret;
- struct seq_tab *p;
struct adapter *adap = inode->i_private;
+ int ret, nentries;
+ struct seq_tab *p;
- p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+ nentries = t4_chip_rss_size(adap);
+ p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
if (!p)
return -ENOMEM;
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
+ unsigned int tid_start = 0;
struct adapter *adap = seq->private;
const struct tid_info *t = &adap->tids;
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ if (chip > CHELSIO_T5)
+ tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
- adap->tids.hash_base,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ sb - 1, adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", tid_start,
+ tid_start + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
-struct mem_desc {
- unsigned int base;
- unsigned int limit;
- unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
- return ((const struct mem_desc *)a)->base -
- ((const struct mem_desc *)b)->base;
-}
-
static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to)
{
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
- "MC0:", "MC1:"};
- static const char * const region[] = {
- "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
- "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
- "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
- "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
- "RQUDP region:", "PBL region:", "TXPBL region:",
- "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
- "On-chip queues:"
- };
-
- int i, n;
- u32 lo, hi, used, alloc;
- struct mem_desc avail[4];
- struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ "MC0:", "MC1:", "HMA:"};
struct adapter *adap = seq->private;
+ struct cudbg_meminfo meminfo;
+ int i, rc;
- for (i = 0; i < ARRAY_SIZE(mem); i++) {
- mem[i].limit = 0;
- mem[i].idx = i;
- }
-
- /* Find and sort the populated memory ranges */
- i = 0;
- lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
- if (lo & EDRAM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
- avail[i].base = EDRAM0_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
- avail[i].idx = 0;
- i++;
- }
- if (lo & EDRAM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
- avail[i].base = EDRAM1_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
- avail[i].idx = 1;
- i++;
- }
-
- if (is_t5(adap->params.chip)) {
- if (lo & EXT_MEM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
- avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
- avail[i].idx = 3;
- i++;
- }
- if (lo & EXT_MEM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
- avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
- avail[i].idx = 4;
- i++;
- }
- } else {
- if (lo & EXT_MEM_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- avail[i].base = EXT_MEM_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
- avail[i].idx = 2;
- i++;
- }
- }
- if (!i) /* no memory available */
- return 0;
- sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
- (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
- /* the next few have explicit upper bounds */
- md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
- PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
- md++;
-
- md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
- PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
- md++;
-
- if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
- hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
- md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- } else {
- hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- md->base = t4_read_reg(adap,
- LE_DB_HASH_TBL_BASE_ADDR_A);
- }
- md->limit = 0;
- } else {
- md->base = 0;
- md->idx = ARRAY_SIZE(region); /* hide it */
- }
- md++;
-
-#define ulp_region(reg) do { \
- md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
- (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
- ulp_region(RX_ISCSI);
- ulp_region(RX_TDDP);
- ulp_region(TX_TPT);
- ulp_region(RX_STAG);
- ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
- ulp_region(RX_PBL);
- ulp_region(TX_PBL);
-#undef ulp_region
- md->base = 0;
- md->idx = ARRAY_SIZE(region);
- if (!is_t4(adap->params.chip)) {
- u32 size = 0;
- u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
- u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
- if (is_t5(adap->params.chip)) {
- if (sge_ctrl & VFIFO_ENABLE_F)
- size = DBVFIFO_SIZE_G(fifo_size);
- } else {
- size = T6_DBVFIFO_SIZE_G(fifo_size);
- }
-
- if (size) {
- md->base = BASEADDR_G(t4_read_reg(adap,
- SGE_DBVFIFO_BADDR_A));
- md->limit = md->base + (size << 2) - 1;
- }
- }
-
- md++;
-
- md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
- md->limit = 0;
- md++;
- md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
- md->limit = 0;
- md++;
-
- md->base = adap->vres.ocq.start;
- if (adap->vres.ocq.size)
- md->limit = md->base + adap->vres.ocq.size - 1;
- else
- md->idx = ARRAY_SIZE(region); /* hide it */
- md++;
-
- /* add any address-space holes, there can be up to 3 */
- for (n = 0; n < i - 1; n++)
- if (avail[n].limit < avail[n + 1].base)
- (md++)->base = avail[n].limit;
- if (avail[n].limit)
- (md++)->base = avail[n].limit;
-
- n = md - mem;
- sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+ memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(adap, &meminfo);
+ if (rc)
+ return -ENXIO;
- for (lo = 0; lo < i; lo++)
- mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
- avail[lo].limit - 1);
+ for (i = 0; i < meminfo.avail_c; i++)
+ mem_region_show(seq, memory[meminfo.avail[i].idx],
+ meminfo.avail[i].base,
+ meminfo.avail[i].limit - 1);
seq_putc(seq, '\n');
- for (i = 0; i < n; i++) {
- if (mem[i].idx >= ARRAY_SIZE(region))
+ for (i = 0; i < meminfo.mem_c; i++) {
+ if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* skip holes */
- if (!mem[i].limit)
- mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
- mem_region_show(seq, region[mem[i].idx], mem[i].base,
- mem[i].limit);
+ if (!meminfo.mem[i].limit)
+ meminfo.mem[i].limit =
+ i < meminfo.mem_c - 1 ?
+ meminfo.mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+ meminfo.mem[i].base, meminfo.mem[i].limit);
}
seq_putc(seq, '\n');
- lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP RAM:", lo, hi);
-
- lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP Extmem2:", lo, hi);
+ mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+ mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+ meminfo.up_extmem2_hi);
- lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- PMRXMAXPAGE_G(lo),
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
- (lo & PMRXNUMCHN_F) ? 2 : 1);
+ meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+ meminfo.rx_pages_data[2]);
- lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
- hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- PMTXMAXPAGE_G(lo),
- hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
- seq_printf(seq, "%u p-structs\n\n",
- t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
-
- for (i = 0; i < 4; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+ meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+
+ seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+ for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
- for (i = 0; i < adap->params.arch.nchan; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap,
- MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+ for (i = 0; i < adap->params.arch.nchan; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
+ i, meminfo.loopback_used[i],
+ meminfo.loopback_alloc[i]);
+
return 0;
}
@@ -3096,6 +2906,8 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb338212f5af..7852d98bad75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static int cxgb4_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u8 sff8472_comp, sff_diag_type, sff_rev;
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (!t4_is_inserted_mod_type(pi->mod_type))
+ return -EINVAL;
+
+ switch (pi->port_type) {
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSA:
+ case FW_PORT_TYPE_SFP28:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+ SFF_8472_COMP_LEN, &sff8472_comp);
+ if (ret)
+ return ret;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+ SFP_DIAG_TYPE_LEN, &sff_diag_type);
+ if (ret)
+ return ret;
+
+ if (!sff8472_comp || (sff_diag_type & 4)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case FW_PORT_TYPE_QSFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_CR2_QSFP:
+ case FW_PORT_TYPE_CR4_QSFP:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+ SFF_REV_LEN, &sff_rev);
+ /* For QSFP type ports, revision value >= 3
+ * means the SFP is 8636 compliant.
+ */
+ if (ret)
+ return ret;
+ if (sff_rev >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eprom, u8 *data)
+{
+ int ret = 0, offset = eprom->offset, len = eprom->len;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ memset(data, 0, eprom->len);
+ if (offset + len <= I2C_PAGE_SIZE)
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+
+ /* offset + len spans 0xa0 and 0xa1 pages */
+ if (offset <= I2C_PAGE_SIZE) {
+ /* read 0xa0 page */
+ len = I2C_PAGE_SIZE - offset;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+ if (ret)
+ return ret;
+ offset = I2C_PAGE_SIZE;
+ /* Remaining bytes to be read from second page =
+ * Total length - bytes read from first page
+ */
+ len = eprom->len - len;
+ }
+ /* Read additional optical diagnostics from page 0xa2 if supported */
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+ offset, len, &data[eprom->len - len]);
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_dump = set_dump,
.get_dump_flag = get_dump_flag,
.get_dump_data = get_dump_data,
+ .get_module_info = cxgb4_get_module_info,
+ .get_module_eeprom = cxgb4_get_module_eeprom,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f308a253..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
if (ftid >= t->nftids)
ftid = -1;
} else {
- ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ if (is_t6(adap->params.chip)) {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 1);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region
+ * unallocated
+ */
+ bitmap_release_region(t->ftid_bmap, ftid, 1);
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
- /* this is only a lookup, keep the found region unallocated */
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
}
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
return -EBUSY;
}
- if (family == PF_INET)
+ if (family == PF_INET) {
__set_bit(fidx, t->ftid_bmap);
- else
- bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
return 0;
}
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET)
+ if (family == PF_INET) {
__clear_bit(fidx, t->ftid_bmap);
- else
- bitmap_release_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
}
@@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
- if (f->fs.hash && f->fs.type)
+ if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
unsigned int max_fidx, fidx;
struct filter_entry *f;
u32 iconf;
@@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
* insertion.
*/
if (fs->type == 0) { /* IPv4 */
- /* If our IPv4 filter isn't being written to a
- * multiple of four filter index and there's an IPv6
- * filter at the multiple of 4 base slot, then we
- * prevent insertion.
+ /* For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ * Hence we need to delete the filter in multiple of 4 slot.
*/
- fidx = filter_id & ~0x3;
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
if (fidx != filter_id &&
adapter->tids.ftid_tab[fidx].fs.type) {
f = &adapter->tids.ftid_tab[fidx];
@@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
}
} else { /* IPv6 */
- /* Ensure that the IPv6 filter is aligned on a
- * multiple of 4 boundary.
- */
- if (filter_id & 0x3) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
- return -EINVAL;
- }
+ if (chip_ver < CHELSIO_T6) {
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
- /* Check all except the base overlapping IPv4 filter slots. */
- for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ /* Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < filter_id + 4;
+ fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+ /* Check overlapping IPv4 filter slot */
+ fidx = filter_id + 1;
f = &adapter->tids.ftid_tab[fidx];
if (f->valid) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
- fidx);
- return -EINVAL;
+ pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+ __func__, fidx);
+ return -EBUSY;
}
}
}
@@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
fidx = filter_id + adapter->tids.ftid_base;
ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
if (ret)
return ret;
- /* Check to make sure the filter requested is writable ... */
+ /* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
if (ret) {
/* Clear the bits we have set above */
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
return ret;
}
@@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (f->valid)
clear_filter(adapter, f);
+ if (is_t6(adapter->params.chip) && fs->type &&
+ ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+ IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+ chip_ver);
+ return ret;
+ }
+ }
+
/* Convert the filter specification into our internal format.
* We copy the PF/VF specification into the Outer VLAN field
* here so the rest of the code -- including the interface to
@@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
ret = set_filter_wr(adapter, filter_id);
if (ret) {
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
clear_filter(adapter, f);
}
@@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
struct filter_entry *f;
unsigned int max_fidx;
int ret;
@@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
if (f->valid) {
f->ctx = ctx;
cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET);
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6f900ffe25cc..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
* called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
#define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
+ {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
{ 0, } \
@@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- return t4_sge_ctxt_flush(adap, adap->mbox);
+ return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
}
#ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
{
/* Turn carrier off since we don't have to transmit anything on this
* interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
}
/* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{
- unsigned int i;
u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ unsigned int i, vf, nvfs;
+ u16 a, b;
int err;
u8 *na;
- u16 a, b;
+ adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+ PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
- if (!err) {
- na = adap->params.vpd.na;
- for (i = 0; i < ETH_ALEN; i++)
- hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
- hex2val(na[2 * i + 1]));
- a = (hw_addr[0] << 8) | hw_addr[1];
- b = (hw_addr[1] << 8) | hw_addr[2];
- a ^= b;
- a |= 0x0200; /* locally assigned Ethernet MAC address */
- a &= ~0x0100; /* not a multicast Ethernet MAC address */
- macaddr[0] = a >> 8;
- macaddr[1] = a & 0xff;
-
- for (i = 2; i < 5; i++)
- macaddr[i] = hw_addr[i + 1];
-
- for (i = 0; i < adap->num_vfs; i++) {
- macaddr[5] = adap->pf * 16 + i;
- ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
- }
+ if (err)
+ return;
+
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+ vf < nvfs; vf++) {
+ macaddr[5] = adap->pf * 16 + vf;
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return ret;
}
-static int cxgb_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
return 0;
}
-static int cxgb_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct port_info *pi = netdev_priv(dev);
unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
return 0;
}
-#endif
+static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
+ return -EPROTONOSUPPORT;
+
+ ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
+ if (!ret) {
+ adap->vfinfo[vf].vlan = vlan;
+ return 0;
+ }
+
+ dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
+ ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+ return ret;
+}
+#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
@@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return cxgb4_tc_flower_replace(dev, cls_flower);
@@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev,
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EINVAL;
}
- if (!tc_can_offload(dev))
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!adapter->geneve_port_cnt ||
+ adapter->geneve_port != ti->port)
+ return; /* Invalid GENEVE destination port */
+
+ adapter->geneve_port_cnt--;
+ if (adapter->geneve_port_cnt)
+ return;
+
+ adapter->geneve_port = 0;
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (adapter->geneve_port_cnt &&
+ adapter->geneve_port == ti->port) {
+ adapter->geneve_port_cnt++;
+ return;
+ }
+
+ /* We will support only one GENEVE port */
+ if (adapter->geneve_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->geneve_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->geneve_port = ti->port;
+ adapter->geneve_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+ GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = dummy_open,
- .ndo_set_vf_mac = cxgb_set_vf_mac,
- .ndo_get_vf_config = cxgb_get_vf_config,
- .ndo_set_vf_rate = cxgb_set_vf_rate,
- .ndo_get_phys_port_id = cxgb_get_phys_port_id,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
};
#endif
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
@@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
- .get_drvinfo = get_drvinfo,
+ .get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -4096,7 +4294,7 @@ static int adap_init0(struct adapter *adap)
} else {
adap->vres.ncrypto_fc = val[0];
}
- adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->params.crypto = ntohs(caps_cmd.cryptocaps);
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
@@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
{
dev->type = ARPHRD_NONE;
dev->mtu = 0;
@@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
- struct adapter *adap = pci_get_drvdata(pdev);
- struct net_device *netdev;
- struct port_info *pi;
- char name[IFNAMSIZ];
- int err;
-
- snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
- dummy_setup);
- if (!netdev)
- return -ENOMEM;
-
- pi = netdev_priv(netdev);
- pi->adapter = adap;
- pi->tx_chan = adap->pf % adap->params.nports;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adap->port[0] = netdev;
- pi->port_id = 0;
-
- err = register_netdev(adap->port[0]);
- if (err) {
- pr_info("Unable to register VF mgmt netdev %s\n", name);
- free_netdev(adap->port[0]);
- adap->port[0] = NULL;
- return err;
- }
- return 0;
-}
-
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
- num_vfs = current_vfs;
- return num_vfs;
+ return current_vfs;
}
-
- /* Disable SRIOV when zero is passed.
- * One needs to disable SRIOV before modifying it, else
- * stack throws the below warning:
- * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ /* Note that the upper-level code ensures that we're never called with
+ * a non-zero "num_vfs" when we already have VFs instantiated. But
+ * it never hurts to code defensively.
*/
+ if (num_vfs != 0 && current_vfs != 0)
+ return -EBUSY;
+
+ /* Nothing to do for no change. */
+ if (num_vfs == current_vfs)
+ return num_vfs;
+
+ /* Disable SRIOV when zero is passed. */
if (!num_vfs) {
pci_disable_sriov(pdev);
- if (adap->port[0]) {
- unregister_netdev(adap->port[0]);
- adap->port[0] = NULL;
- }
+ /* free VF Management Interface */
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+
/* free VF resources */
+ adap->num_vfs = 0;
kfree(adap->vfinfo);
adap->vfinfo = NULL;
- adap->num_vfs = 0;
- return num_vfs;
+ return 0;
}
- if (num_vfs != current_vfs) {
- err = pci_enable_sriov(pdev, num_vfs);
+ if (!current_vfs) {
+ struct fw_pfvf_cmd port_cmd, port_rpl;
+ struct net_device *netdev;
+ unsigned int pmask, port;
+ struct pci_dev *pbridge;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ u32 devcap2;
+ u16 flags;
+ int pos;
+
+ /* If we want to instantiate Virtual Functions, then our
+ * parent bridge's PCI-E needs to support Alternative Routing
+ * ID (ARI) because our VFs will show up at function offset 8
+ * and above.
+ */
+ pbridge = pdev->bus->self;
+ pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+ pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+ !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+ /* Our parent bridge does not support ARI so issue a
+ * warning and skip instantiating the VFs. They
+ * won't be reachable.
+ */
+ dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+ pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+ PCI_FUNC(pbridge->devfn));
+ return -ENOTSUPP;
+ }
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adap->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+ err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
if (err)
return err;
+ pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+ port = ffs(pmask) - 1;
+ /* Allocate VF Management Interface. */
+ snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+ adap->pf);
+ netdev = alloc_netdev(sizeof(struct port_info),
+ name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+ if (!netdev)
+ return -ENOMEM;
- adap->num_vfs = num_vfs;
- err = config_mgmt_dev(pdev);
- if (err)
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ pi->lport = port;
+ pi->tx_chan = port;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+ pi->port_id = 0;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
return err;
+ }
+ /* Allocate and set up VF Information. */
+ adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (!adap->vfinfo) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return -ENOMEM;
+ }
+ cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+ }
+ /* Instantiate the requested number of VFs. */
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ pr_info("Unable to instantiate %d VFs\n", num_vfs);
+ if (!current_vfs) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ }
+ return err;
}
- adap->vfinfo = kcalloc(adap->num_vfs,
- sizeof(struct vf_info), GFP_KERNEL);
- if (adap->vfinfo)
- fill_vf_station_mac_addr(adap);
+ adap->num_vfs = num_vfs;
return num_vfs;
}
-#endif
+#endif /* CONFIG_PCI_IOV */
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
- u32 v, port_vec;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+
+ adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
goto out_unmap_bar0;
@@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+ pci_set_drvdata(pdev, adapter);
+
if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
- iounmap(regs);
-#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
+ return 0;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
-
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
adap_idx++;
-
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
-
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
- spin_lock_init(&adapter->mbox_lock);
-
- INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_fw_sge_queues(adapter);
return 0;
-sriov:
-#ifdef CONFIG_PCI_IOV
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto free_pci_region;
- }
-
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->regs = regs;
- adapter->adap_idx = adap_idx;
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto free_adapter;
- }
- spin_lock_init(&adapter->mbox_lock);
- INIT_LIST_HEAD(&adapter->mlist.list);
-
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
- &v, &port_vec);
- if (err < 0) {
- dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_mbox_log;
- }
-
- adapter->params.nports = hweight32(port_vec);
- pci_set_drvdata(pdev, adapter);
- return 0;
-
-free_mbox_log:
- kfree(adapter->mbox_log);
- free_adapter:
- kfree(adapter);
- free_pci_region:
- iounmap(regs);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- return err;
-#else
- return 0;
-#endif
-
out_free_dev:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
@@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PCI_IOV
else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
+ cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
}
@@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
-#ifdef CONFIG_PCI_IOV
- else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- }
-#endif
}
static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..36563364bae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -43,7 +43,7 @@
#define STATS_CHECK_PERIOD (HZ / 2)
-struct ch_tc_pedit_fields pedits[] = {
+static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
ethtype_mask = 0;
}
+ if (ethtype_key == ETH_P_IPV6)
+ fs->type = 1;
+
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
VLAN_PRIO_SHIFT);
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
VLAN_PRIO_SHIFT);
- fs->val.ivlan = cpu_to_be16(vlan_tci);
- fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+ fs->val.ivlan = vlan_tci;
+ fs->mask.ivlan = vlan_tci_mask;
/* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets,
@@ -405,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
} else if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out = __dev_get_by_index(dev_net(in),
- ifindex);
+ struct net_device *out = tcf_mirred_dev(a);
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
@@ -582,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing */
} else if (is_tcf_mirred_egress_redirect(a)) {
struct adapter *adap = netdev2adap(dev);
- struct net_device *n_dev;
- unsigned int i, ifindex;
+ struct net_device *n_dev, *target_dev;
+ unsigned int i;
bool found = false;
- ifindex = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (ifindex == n_dev->ifindex) {
+ if (target_dev == n_dev) {
found = true;
break;
}
@@ -765,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
- flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(flower_entry))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
@@ -786,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
spin_unlock(&flower_entry->lock);
}
}
-walk_stop:
+
rhashtable_walk_stop(&iter);
+
} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13a964d..ab174bcfbfb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
/* Re-direct to specified port in hardware. */
if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *n_dev;
- unsigned int i, index;
+ struct net_device *n_dev, *target_dev;
bool found = false;
+ unsigned int i;
- index = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (index == n_dev->ifindex) {
+ if (target_dev == n_dev) {
fs->action = FILTER_SWITCH;
fs->eport = i;
found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 71a315bc1409..6b5fea4532f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->crypto = adap->params.crypto;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 08e709ab6dd4..1d37672902da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -297,6 +297,7 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
+ atomic_t ipsec_cnt;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
@@ -322,6 +323,7 @@ struct cxgb4_lld_info {
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
+ unsigned char crypto; /* crypto support */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned int cclk_ps; /* Core clock period in psec */
@@ -370,6 +372,7 @@ struct cxgb4_uld_info {
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
+ int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f937789..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -41,6 +41,7 @@
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
+#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -53,6 +54,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_uld.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -110,14 +112,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
@@ -134,11 +128,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter,
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
@@ -277,6 +266,7 @@ unwind:
out_err:
return -ENOMEM;
}
+EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q)
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
q->in_use -= avail;
}
}
+EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
@@ -770,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -788,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -810,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -827,13 +833,14 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
@@ -849,9 +856,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
@@ -903,6 +910,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
+EXPORT_SYMBOL(cxgb4_write_sgl);
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
@@ -921,14 +929,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
}
/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
/* Make sure that all writes to the TX Descriptors are committed
* before we tell the hardware about them.
@@ -995,9 +1003,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb();
}
}
+EXPORT_SYMBOL(cxgb4_ring_tx_db);
/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
+ * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
@@ -1007,8 +1016,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
+void cxgb4_inline_tx_skb(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
@@ -1030,6 +1039,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
if ((uintptr_t)p & 8)
*p = 0;
}
+EXPORT_SYMBOL(cxgb4_inline_tx_skb);
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
@@ -1154,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ else if (adapter->geneve_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_GENEVE;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ case TX_TNL_TYPE_GENEVE:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1177,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1199,6 +1311,12 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
+ ssi = skb_shinfo(skb);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (xfrm_offload(skb) && !ssi->gso_size)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_IPSEC_INLINE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -1215,7 +1333,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- reclaim_completed_tx(adap, &q->q, true);
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1227,7 +1345,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,11 +1360,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1264,39 +1386,63 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
- ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1341,13 +1487,13 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
dev_consume_skb_any(skb);
} else {
int last_desc;
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
+ cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
+ end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
@@ -1359,7 +1505,7 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
- ring_tx_db(adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
@@ -1369,9 +1515,9 @@ out_free: dev_kfree_skb_any(skb);
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
@@ -1446,13 +1592,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
- ring_tx_db(q->adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
@@ -1487,7 +1633,7 @@ static void restart_ctrlq(unsigned long data)
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
@@ -1500,14 +1646,15 @@ static void restart_ctrlq(unsigned long data)
}
}
if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
+ringdb:
+ if (written)
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
@@ -1650,7 +1797,7 @@ static void service_ofldq(struct sge_uld_txq *q)
*/
spin_unlock(&q->sendq.lock);
- reclaim_completed_tx(q->adap, &q->q, false);
+ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
@@ -1661,9 +1808,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
+ cxgb4_inline_tx_skb(skb, &q->q, pos);
+ else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
@@ -1694,9 +1841,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (void *)txq->desc;
}
- write_sgl(skb, &q->q, (void *)pos,
- end, hdr_len,
- (dma_addr_t *)skb->head);
+ cxgb4_write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
+ (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
@@ -1710,7 +1857,7 @@ static void service_ofldq(struct sge_uld_txq *q)
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
@@ -1725,7 +1872,7 @@ static void service_ofldq(struct sge_uld_txq *q)
kfree_skb(skb);
}
if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..047609ef0515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
- if (pcie_fw & PCIE_FW_ERR_F)
+ if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
+ adap->flags &= ~FW_OK;
+ }
}
/*
@@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* wait [for a while] till we're at the front [or bail out with an
* EBUSY] ...
*/
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_add_tail(&entry.list, &adap->mlist.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
delay_idx = 0;
ms = delay[0];
@@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
*/
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
t4_fatal_err(adap);
return ret;
}
@@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ * MEM_HMA = 4
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- if (mtype != MEM_MC1)
+ if (mtype == MEM_HMA) {
+ memoffset = 2 * (edc_size * 1024 * 1024);
+ } else if (mtype != MEM_MC1) {
memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
+ } else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -2844,8 +2849,6 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
@@ -3558,8 +3561,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- unsigned int fw_img_start = adap->params.sf_fw_start;
- unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+ unsigned int fw_start_sec = FLASH_FW_START_SEC;
+ unsigned int fw_size = FLASH_FW_MAX_SIZE;
+ unsigned int fw_start = FLASH_FW_START;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3579,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size differs from size in FW header\n");
return -EINVAL;
}
- if (size > FW_MAX_SIZE) {
+ if (size > fw_size) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
- FW_MAX_SIZE);
+ fw_size);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3608,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = fw_img_start;
+ addr = fw_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3622,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- fw_img_start + offsetof(struct fw_hdr, fw_ver),
+ fw_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
@@ -4924,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter)
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ return RSS_NENTRIES;
+ else
+ return T6_RSS_NENTRIES;
+}
+
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
@@ -5062,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
*/
int t4_read_rss(struct adapter *adapter, u16 *map)
{
+ int i, ret, nentries;
u32 val;
- int i, ret;
- for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ nentries = t4_chip_rss_size(adapter);
+ for (i = 0; i < nentries / 2; ++i) {
ret = rd_rss_row(adapter, i, &val);
if (ret)
return ret;
@@ -5077,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) || !adap->use_bd;
+ return (adap->flags & FW_OK) && !adap->use_bd;
}
/**
@@ -6072,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"CR2_QSFP",
"SFP28",
"KR_SFP28",
+ "KR_XLAUI"
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -6527,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
+ * @ctx_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
*/
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+ FW_LDST_ADDRSPC_SGE_EGRC :
+ FW_LDST_ADDRSPC_SGE_INGC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
@@ -7452,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -8492,22 +8615,6 @@ found:
return 0;
}
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
- u16 val;
- u32 pcie_cap;
-
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
- pci_read_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
- val |= range;
- pci_write_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -8593,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
- /* Set pci completion timeout value to 4 seconds. */
- set_pcie_completion_timeout(adapter, 0xd);
+ /* Set PCIe completion timeout to 4 seconds. */
+ pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
return 0;
}
@@ -9737,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
}
+
+/**
+ * t4_i2c_rd - read I2C data from adapter
+ * @adap: the adapter
+ * @port: Port number if per-port device; <0 if not
+ * @devid: per-port device ID or absolute device ID
+ * @offset: byte offset into device I2C space
+ * @len: byte length of I2C space data
+ * @buf: buffer in which to return I2C data
+ *
+ * Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf)
+{
+ struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+ unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+ int ret = 0;
+
+ if (len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ /* Dont allow reads that spans multiple pages */
+ if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+ ldst_cmd.u.i2c.did = devid;
+
+ while (len > 0) {
+ unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+ ldst_cmd.u.i2c.boffset = offset;
+ ldst_cmd.u.i2c.blen = i2c_len;
+
+ ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_rpl);
+ if (ret)
+ break;
+
+ memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+ offset += i2c_len;
+ buf += i2c_len;
+ len -= i2c_len;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_set_vlan_acl - Set a VLAN id for the specified VF
+ * @adapter: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @vf: one of the VFs instantiated by the specified PF
+ * @vlan: The vlanid to be set
+ */
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan)
+{
+ struct fw_acl_vlan_cmd vlan_cmd;
+ unsigned int enable;
+
+ enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
+ memset(&vlan_cmd, 0, sizeof(vlan_cmd));
+ vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F |
+ FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
+ FW_ACL_VLAN_CMD_VFN_V(vf));
+ vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+ /* Drop all packets that donot match vlan id */
+ vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+ if (enable != 0) {
+ vlan_cmd.nvlan = 1;
+ vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
+ }
+
+ return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed184356..361d5032c288 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
#include <linux/types.h>
enum {
- NCHAN = 4, /* # of HW channels */
- MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
- EEPROMSIZE = 17408, /* Serial EEPROM physical size */
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
- EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
- RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- NTX_SCHED = 8, /* # of HW Tx scheduling queues */
- PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7, /* # of PM stats in T6 */
- MBOX_LEN = 64, /* mailbox size in bytes */
- TRACE_LEN = 112, /* length of trace data and mask */
- FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408,/* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
};
enum {
@@ -70,7 +71,9 @@ enum {
/* SGE context types */
enum ctxt_type {
- CTXT_FLM = 2,
+ CTXT_EGRESS,
+ CTXT_INGRESS,
+ CTXT_FLM,
CTXT_CNM,
};
@@ -284,4 +287,14 @@ enum {
#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define I2C_PAGE_SIZE 0x100
+#define SFP_DIAG_TYPE_ADDR 0x5c
+#define SFP_DIAG_TYPE_LEN 0x1
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_COMP_LEN 0x1
+#define SFF_REV_ADDR 0x1
+#define SFF_REV_LEN 0x1
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e02de5d..51b18035d691 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
/* T6 adapters:
*/
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece72828..a6df73398d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
#define MYPORT_BASE 0x1c000
#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
@@ -961,6 +964,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define HMA_MUX_S 5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F HMA_MUX_V(1U)
+
#define EXT_MEM1_BASE_S 16
#define EXT_MEM1_BASE_M 0xfffU
#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,28 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
+#define MPS_RX_GENEVE_TYPE_A 0x11238
+
+#define GENEVE_EN_S 16
+#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S)
+#define GENEVE_EN_F GENEVE_EN_V(1U)
+
+#define GENEVE_S 0
+#define GENEVE_M 0xffffU
+#define GENEVE_V(x) ((x) << GENEVE_S)
+#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2559,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57eb4ad3485d..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -513,6 +513,13 @@ struct fw_ulptx_wr {
u64 cookie;
};
+#define FW_ULPTX_WR_DATA_S 28
+#define FW_ULPTX_WR_DATA_M 0x1
+#define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S)
+#define FW_ULPTX_WR_DATA_G(x) \
+ (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M)
+#define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U)
+
struct fw_tp_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
@@ -828,6 +835,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_MPS = 0x0020,
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+ FW_LDST_ADDRSPC_I2C = 0x0038,
};
enum fw_ldst_mps_fid {
@@ -2059,6 +2067,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2075,6 +2084,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2086,6 +2102,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2095,6 +2118,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2121,6 +2150,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
@@ -2325,14 +2360,22 @@ struct fw_acl_vlan_cmd {
#define FW_ACL_VLAN_CMD_VFN_S 0
#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
-#define FW_ACL_VLAN_CMD_EN_S 31
-#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_M 0x1
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_G(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M)
+#define FW_ACL_VLAN_CMD_EN_F FW_ACL_VLAN_CMD_EN_V(1U)
#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
-#define FW_ACL_VLAN_CMD_FM_S 6
-#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_M 0x1
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_G(x) \
+ (((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M)
+#define FW_ACL_VLAN_CMD_FM_F FW_ACL_VLAN_CMD_FM_V(1U)
/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
enum fw_port_cap {
@@ -2828,6 +2871,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_KR_SFP28,
+ FW_PORT_TYPE_KR_XLAUI,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 08c6ddb84a04..5883f09e3804 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -92,6 +92,7 @@ struct sge_rspq;
*/
struct port_info {
struct adapter *adapter; /* our adapter */
+ u32 vlan_id; /* vlan id for VST */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b48361cfdc78..b7e79e64d2ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -791,6 +791,8 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
goto err_unwind;
+ pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
+
netif_tx_start_all_queues(dev);
set_bit(pi->port_id, &adapter->open_device_map);
return 0;
@@ -1229,7 +1231,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -1323,6 +1326,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
SET_LMM(25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..dfce5df7538e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(qidx >= pi->nqsets);
txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+ if (pi->vlan_id && !skb_vlan_tag_present(skb))
+ __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+ pi->vlan_id);
+
/*
* Take this opportunity to reclaim any TX Descriptors whose DMA
* transfers have completed.
@@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
{
struct adapter *adapter = rxq->rspq.adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
int ret;
struct sk_buff *skb;
@@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ pi = netdev_priv(skb->dev);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
be16_to_cpu(pkt->vlan));
rxq->stats.vlan_ex++;
@@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
struct adapter *adapter = rspq->adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
+ pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec &&
@@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
rxq->stats.vlan_ex++;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(pkt->vlan));
}
netif_receive_skb(skb);
@@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9cf9c56b0f73..712e8f0c71b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -413,5 +413,6 @@ int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
int t4vf_prep_adapter(struct adapter *);
int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
unsigned int *naddr, u8 *addr);
+int t4vf_get_vf_vlan_acl(struct adapter *adapter);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 67aec59a14e6..798695bf8678 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2147,3 +2147,31 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
return ret;
}
+
+/**
+ * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to
+ * the VI of this VF.
+ * @adapter: The adapter
+ *
+ * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_vlan_acl(struct adapter *adapter)
+{
+ struct fw_acl_vlan_cmd cmd;
+ int vlan = 0;
+ int ret = 0;
+
+ cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+
+ if (!ret)
+ vlan = be16_to_cpu(cmd.vlanid[0]);
+
+ return vlan;
+}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index f910f0f386d6..977d4c2c759d 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -187,6 +187,7 @@ struct net_device * __init mac89x0_probe(int unit)
unsigned long ioaddr;
unsigned short sig;
int err = -ENODEV;
+ struct nubus_rsrc *fres;
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
@@ -207,8 +208,9 @@ struct net_device * __init mac89x0_probe(int unit)
/* We might have to parameterize this later */
slot = 0xE;
/* Get out now if there's a real NuBus card in slot E */
- if (nubus_find_slot(slot, NULL) != NULL)
- goto out;
+ for_each_func_rsrc(fres)
+ if (fres->board->slot == slot)
+ goto out;
/* The pseudo-ISA bits always live at offset 0x300 (gee,
wonder why...) */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 6a9527004cb1..9b218f0e5a4c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_NAPI_BUDGET 256
+
#define ENIC_AIC_LARGE_PKT_DIFF 3
struct enic_msix_entry {
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 462d0ce51240..efb9333c7cf8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
#include "enic_res.h"
#include "enic.h"
@@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
return __enic_set_rsskey(enic);
}
+static int enic_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
@@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
+ .get_ts_info = enic_get_ts_info,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e130fb757e7b..f202ba72a811 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
+ skb_tx_timestamp(skb);
if (!skb->xmit_more || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
@@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int cq_wq = enic_cq_wq(enic, 0);
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
- unsigned int wq_work_to_do = -1; /* no limit */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
@@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct vnic_wq *wq = &enic->wq[wq_index];
unsigned int cq;
unsigned int intr;
- unsigned int wq_work_to_do = -1; /* clean all desc possible */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int wq_work_done;
unsigned int wq_irq;
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..89bc4579724d
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Cortina ethernet devices
+
+config NET_VENDOR_CORTINA
+ bool "Cortina Gemini devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_CORTINA
+
+config GEMINI_ETHERNET
+ tristate "Gemini Gigabit Ethernet support"
+ depends on OF
+ depends on HAS_IOMEM
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
+
+endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Cortina Gemini network device drivers.
+
+obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Ethernet device driver for Cortina Systems Gemini SoC
+ * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
+ * Net Engine and Gigabit Ethernet MAC (GMAC)
+ * This hardware contains a TCP Offload Engine (TOE) but currently the
+ * driver does not make use of it.
+ *
+ * Authors:
+ * Linus Walleij <linus.walleij@linaro.org>
+ * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
+ * Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
+ * Gary Chen & Ch Hsu Storlink Semiconductor
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include "gemini.h"
+
+#define DRV_NAME "gmac-gemini"
+#define DRV_VERSION "1.0"
+
+#define HSIZE_8 0x00
+#define HSIZE_16 0x01
+#define HSIZE_32 0x02
+
+#define HBURST_SINGLE 0x00
+#define HBURST_INCR 0x01
+#define HBURST_INCR4 0x02
+#define HBURST_INCR8 0x03
+
+#define HPROT_DATA_CACHE BIT(0)
+#define HPROT_PRIVILIGED BIT(1)
+#define HPROT_BUFFERABLE BIT(2)
+#define HPROT_CACHABLE BIT(3)
+
+#define DEFAULT_RX_COALESCE_NSECS 0
+#define DEFAULT_GMAC_RXQ_ORDER 9
+#define DEFAULT_GMAC_TXQ_ORDER 8
+#define DEFAULT_RX_BUF_ORDER 11
+#define DEFAULT_NAPI_WEIGHT 64
+#define TX_MAX_FRAGS 16
+#define TX_QUEUE_NUM 1 /* max: 6 */
+#define RX_MAX_ALLOC_ORDER 2
+
+#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
+ GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
+#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
+ GMAC0_SWTQ00_FIN_INT_BIT)
+#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+/**
+ * struct gmac_queue_page - page buffer per-page info
+ */
+struct gmac_queue_page {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct gmac_txq {
+ struct gmac_txdesc *ring;
+ struct sk_buff **skb;
+ unsigned int cptr;
+ unsigned int noirq_packets;
+};
+
+struct gemini_ethernet;
+
+struct gemini_ethernet_port {
+ u8 id; /* 0 or 1 */
+
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct device *dev;
+ void __iomem *dma_base;
+ void __iomem *gmac_base;
+ struct clk *pclk;
+ struct reset_control *reset;
+ int irq;
+ __le32 mac_addr[3];
+
+ void __iomem *rxq_rwptr;
+ struct gmac_rxdesc *rxq_ring;
+ unsigned int rxq_order;
+
+ struct napi_struct napi;
+ struct hrtimer rx_coalesce_timer;
+ unsigned int rx_coalesce_nsecs;
+ unsigned int freeq_refill;
+ struct gmac_txq txq[TX_QUEUE_NUM];
+ unsigned int txq_order;
+ unsigned int irq_every_tx_packets;
+
+ dma_addr_t rxq_dma_base;
+ dma_addr_t txq_dma_base;
+
+ unsigned int msg_enable;
+ spinlock_t config_lock; /* Locks config register */
+
+ struct u64_stats_sync tx_stats_syncp;
+ struct u64_stats_sync rx_stats_syncp;
+ struct u64_stats_sync ir_stats_syncp;
+
+ struct rtnl_link_stats64 stats;
+ u64 hw_stats[RX_STATS_NUM];
+ u64 rx_stats[RX_STATUS_NUM];
+ u64 rx_csum_stats[RX_CHKSUM_NUM];
+ u64 rx_napi_exits;
+ u64 tx_frag_stats[TX_MAX_FRAGS];
+ u64 tx_frags_linearized;
+ u64 tx_hw_csummed;
+};
+
+struct gemini_ethernet {
+ struct device *dev;
+ void __iomem *base;
+ struct gemini_ethernet_port *port0;
+ struct gemini_ethernet_port *port1;
+
+ spinlock_t irq_lock; /* Locks IRQ-related registers */
+ unsigned int freeq_order;
+ unsigned int freeq_frag_order;
+ struct gmac_rxdesc *freeq_ring;
+ dma_addr_t freeq_dma_base;
+ struct gmac_queue_page *freeq_pages;
+ unsigned int num_freeq_pages;
+ spinlock_t freeq_lock; /* Locks queue from reentrance */
+};
+
+#define GMAC_STATS_NUM ( \
+ RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
+ TX_MAX_FRAGS + 2)
+
+static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
+ "GMAC_IN_DISCARDS",
+ "GMAC_IN_ERRORS",
+ "GMAC_IN_MCAST",
+ "GMAC_IN_BCAST",
+ "GMAC_IN_MAC1",
+ "GMAC_IN_MAC2",
+ "RX_STATUS_GOOD_FRAME",
+ "RX_STATUS_TOO_LONG_GOOD_CRC",
+ "RX_STATUS_RUNT_FRAME",
+ "RX_STATUS_SFD_NOT_FOUND",
+ "RX_STATUS_CRC_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_CRC",
+ "RX_STATUS_ALIGNMENT_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_ALIGN",
+ "RX_STATUS_RX_ERR",
+ "RX_STATUS_DA_FILTERED",
+ "RX_STATUS_BUFFER_FULL",
+ "RX_STATUS_11",
+ "RX_STATUS_12",
+ "RX_STATUS_13",
+ "RX_STATUS_14",
+ "RX_STATUS_15",
+ "RX_CHKSUM_IP_UDP_TCP_OK",
+ "RX_CHKSUM_IP_OK_ONLY",
+ "RX_CHKSUM_NONE",
+ "RX_CHKSUM_3",
+ "RX_CHKSUM_IP_ERR_UNKNOWN",
+ "RX_CHKSUM_IP_ERR",
+ "RX_CHKSUM_TCP_UDP_ERR",
+ "RX_CHKSUM_7",
+ "RX_NAPI_EXITS",
+ "TX_FRAGS[1]",
+ "TX_FRAGS[2]",
+ "TX_FRAGS[3]",
+ "TX_FRAGS[4]",
+ "TX_FRAGS[5]",
+ "TX_FRAGS[6]",
+ "TX_FRAGS[7]",
+ "TX_FRAGS[8]",
+ "TX_FRAGS[9]",
+ "TX_FRAGS[10]",
+ "TX_FRAGS[11]",
+ "TX_FRAGS[12]",
+ "TX_FRAGS[13]",
+ "TX_FRAGS[14]",
+ "TX_FRAGS[15]",
+ "TX_FRAGS[16+]",
+ "TX_FRAGS_LINEARIZED",
+ "TX_HW_CSUMMED",
+};
+
+static void gmac_dump_dma_state(struct net_device *netdev);
+
+static void gmac_update_config0_reg(struct net_device *netdev,
+ u32 val, u32 vmask)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = (reg & ~vmask) | val;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_enable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg &= ~CONFIG0_TX_RX_DISABLE;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_disable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val |= CONFIG0_TX_RX_DISABLE;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+
+ mdelay(10); /* let GMAC consume packet */
+}
+
+static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val &= ~CONFIG0_FLOW_CTL;
+ if (tx)
+ val |= CONFIG0_FLOW_TX;
+ if (rx)
+ val |= CONFIG0_FLOW_RX;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_speed_set(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ union gmac_status status, old_status;
+ int pause_tx = 0;
+ int pause_rx = 0;
+
+ status.bits32 = readl(port->gmac_base + GMAC_STATUS);
+ old_status.bits32 = status.bits32;
+ status.bits.link = phydev->link;
+ status.bits.duplex = phydev->duplex;
+
+ switch (phydev->speed) {
+ case 1000:
+ status.bits.speed = GMAC_SPEED_1000;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
+ netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ break;
+ case 100:
+ status.bits.speed = GMAC_SPEED_100;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ break;
+ case 10:
+ status.bits.speed = GMAC_SPEED_10;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ break;
+ default:
+ netdev_warn(netdev, "Not supported PHY speed (%d)\n",
+ phydev->speed);
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phydev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phydev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause_rx = 1;
+ if (cap & FLOW_CTRL_TX)
+ pause_tx = 1;
+ }
+
+ gmac_set_flow_control(netdev, pause_tx, pause_rx);
+
+ if (old_status.bits32 == status.bits32)
+ return;
+
+ if (netif_msg_link(port)) {
+ phy_print_status(phydev);
+ netdev_info(netdev, "link flow control: %s\n",
+ phydev->pause
+ ? (phydev->asym_pause ? "tx" : "both")
+ : (phydev->asym_pause ? "rx" : "none")
+ );
+ }
+
+ gmac_disable_tx_rx(netdev);
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ gmac_enable_tx_rx(netdev);
+}
+
+static int gmac_setup_phy(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_status status = { .bits32 = 0 };
+ struct device *dev = port->dev;
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(netdev,
+ dev->of_node,
+ gmac_speed_set);
+ if (!phy)
+ return -ENODEV;
+ netdev->phydev = phy;
+
+ netdev_info(netdev, "connected to PHY \"%s\"\n",
+ phydev_name(phy));
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ phy->supported &= PHY_GBIT_FEATURES;
+ phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+ phy->advertising = phy->supported;
+
+ /* set PHY interface type */
+ switch (phy->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_MII;
+ netdev_info(netdev, "connect to MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_GMII;
+ netdev_info(netdev, "connect to GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII\n");
+ break;
+ default:
+ netdev_err(netdev, "Unsupported MII interface\n");
+ phy_disconnect(phy);
+ netdev->phydev = NULL;
+ return -EINVAL;
+ }
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+
+ return 0;
+}
+
+static int gmac_pick_rx_max_len(int max_l3_len)
+{
+ /* index = CONFIG_MAXLEN_XXX values */
+ static const int max_len[8] = {
+ 1536, 1518, 1522, 1542,
+ 9212, 10236, 1518, 1518
+ };
+ int i, n = 5;
+
+ max_l3_len += ETH_HLEN + VLAN_HLEN;
+
+ if (max_l3_len > max_len[n])
+ return -1;
+
+ for (i = 0; i < 5; i++) {
+ if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
+ n = i;
+ }
+
+ return n;
+}
+
+static int gmac_init(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0 = { .bits = {
+ .dis_tx = 1,
+ .dis_rx = 1,
+ .ipv4_rx_chksum = 1,
+ .ipv6_rx_chksum = 1,
+ .rx_err_detect = 1,
+ .rgmm_edge = 1,
+ .port0_chk_hwq = 1,
+ .port1_chk_hwq = 1,
+ .port0_chk_toeq = 1,
+ .port1_chk_toeq = 1,
+ .port0_chk_classq = 1,
+ .port1_chk_classq = 1,
+ } };
+ union gmac_ahb_weight ahb_weight = { .bits = {
+ .rx_weight = 1,
+ .tx_weight = 1,
+ .hash_weight = 1,
+ .pre_req = 0x1f,
+ .tq_dv_threshold = 0,
+ } };
+ union gmac_tx_wcr0 hw_weigh = { .bits = {
+ .hw_tq3 = 1,
+ .hw_tq2 = 1,
+ .hw_tq1 = 1,
+ .hw_tq0 = 1,
+ } };
+ union gmac_tx_wcr1 sw_weigh = { .bits = {
+ .sw_tq5 = 1,
+ .sw_tq4 = 1,
+ .sw_tq3 = 1,
+ .sw_tq2 = 1,
+ .sw_tq1 = 1,
+ .sw_tq0 = 1,
+ } };
+ union gmac_config1 config1 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 24,
+ } };
+ union gmac_config2 config2 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 32,
+ } };
+ union gmac_config3 config3 = { .bits = {
+ .set_threshold = 0,
+ .rel_threshold = 0,
+ } };
+ union gmac_config0 tmp;
+ u32 val;
+
+ config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
+ tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ config0.bits.reserved = tmp.bits.reserved;
+ writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
+ writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
+ writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
+ writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
+
+ val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
+
+ writel(hw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
+ writel(sw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
+
+ port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
+ port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
+ port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
+
+ /* Mark every quarter of the queue a packet for interrupt
+ * in order to be able to wake up the queue if it was stopped
+ */
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+
+ return 0;
+}
+
+static void gmac_uninit(struct net_device *netdev)
+{
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
+}
+
+static int gmac_setup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ size_t entries = 1 << port->txq_order;
+ struct gmac_txq *txq = port->txq;
+ struct gmac_txdesc *desc_ring;
+ size_t len = n_txq * entries;
+ struct sk_buff **skb_tab;
+ void __iomem *rwptr_reg;
+ unsigned int r;
+ int i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
+ if (!skb_tab)
+ return -ENOMEM;
+
+ desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
+ &port->txq_dma_base, GFP_KERNEL);
+
+ if (!desc_ring) {
+ kfree(skb_tab);
+ return -ENOMEM;
+ }
+
+ if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "TX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->txq_dma_base | port->txq_order,
+ port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ for (i = 0; i < n_txq; i++) {
+ txq->ring = desc_ring;
+ txq->skb = skb_tab;
+ txq->noirq_packets = 0;
+
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+ txq->cptr = r;
+
+ txq++;
+ desc_ring += entries;
+ skb_tab += entries;
+ }
+
+ return 0;
+}
+
+static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
+ unsigned int r)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int m = (1 << port->txq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int c = txq->cptr;
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ unsigned int hwchksum = 0;
+ unsigned long bytes = 0;
+ struct gmac_txdesc *txd;
+ unsigned short nfrags;
+ unsigned int errs = 0;
+ unsigned int pkts = 0;
+ unsigned int word3;
+ dma_addr_t mapping;
+
+ if (c == r)
+ return;
+
+ while (c != r) {
+ txd = txq->ring + c;
+ word0 = txd->word0;
+ word1 = txd->word1;
+ mapping = txd->word2.buf_adr;
+ word3 = txd->word3.bits32;
+
+ dma_unmap_single(geth->dev, mapping,
+ word0.bits.buffer_size, DMA_TO_DEVICE);
+
+ if (word3 & EOF_BIT)
+ dev_kfree_skb(txq->skb[c]);
+
+ c++;
+ c &= m;
+
+ if (!(word3 & SOF_BIT))
+ continue;
+
+ if (!word0.bits.status_tx_ok) {
+ errs++;
+ continue;
+ }
+
+ pkts++;
+ bytes += txd->word1.bits.byte_count;
+
+ if (word1.bits32 & TSS_CHECKUM_ENABLE)
+ hwchksum++;
+
+ nfrags = word0.bits.desc_count - 1;
+ if (nfrags) {
+ if (nfrags >= TX_MAX_FRAGS)
+ nfrags = TX_MAX_FRAGS - 1;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frag_stats[nfrags]++;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ }
+ }
+
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ port->stats.tx_errors += errs;
+ port->stats.tx_packets += pkts;
+ port->stats.tx_bytes += bytes;
+ port->tx_hw_csummed += hwchksum;
+ u64_stats_update_end(&port->ir_stats_syncp);
+
+ txq->cptr = c;
+}
+
+static void gmac_cleanup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *rwptr_reg;
+ unsigned int r, i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ for (i = 0; i < n_txq; i++) {
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+
+ gmac_clean_txq(netdev, port->txq + i, r);
+ }
+ writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ kfree(port->txq->skb);
+ dma_free_coherent(geth->dev,
+ n_txq * sizeof(*port->txq->ring) << port->txq_order,
+ port->txq->ring, port->txq_dma_base);
+}
+
+static int gmac_setup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct nontoe_qhdr __iomem *qhdr;
+
+ qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ port->rxq_rwptr = &qhdr->word1;
+
+ /* Remap a slew of memory to use for the RX queue */
+ port->rxq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*port->rxq_ring) << port->rxq_order,
+ &port->rxq_dma_base, GFP_KERNEL);
+ if (!port->rxq_ring)
+ return -ENOMEM;
+ if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
+ dev_warn(geth->dev, "RX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
+ writel(0, port->rxq_rwptr);
+ return 0;
+}
+
+static struct gmac_queue_page *
+gmac_get_queue_page(struct gemini_ethernet *geth,
+ struct gemini_ethernet_port *port,
+ dma_addr_t addr)
+{
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+ int i;
+
+ /* Only look for even pages */
+ mapping = addr & PAGE_MASK;
+
+ if (!geth->freeq_pages) {
+ dev_err(geth->dev, "try to get page with no page list\n");
+ return NULL;
+ }
+
+ /* Look up a ring buffer page from virtual mapping */
+ for (i = 0; i < geth->num_freeq_pages; i++) {
+ gpage = &geth->freeq_pages[i];
+ if (gpage->mapping == mapping)
+ return gpage;
+ }
+
+ return NULL;
+}
+
+static void gmac_cleanup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct gmac_rxdesc *rxd = port->rxq_ring;
+ static struct gmac_queue_page *gpage;
+ struct nontoe_qhdr __iomem *qhdr;
+ void __iomem *dma_reg;
+ void __iomem *ptr_reg;
+ dma_addr_t mapping;
+ union dma_rwptr rw;
+ unsigned int r, w;
+
+ qhdr = geth->base +
+ TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ dma_reg = &qhdr->word0;
+ ptr_reg = &qhdr->word1;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+ writew(r, ptr_reg + 2);
+
+ writel(0, dma_reg);
+
+ /* Loop from read pointer to write pointer of the RX queue
+ * and free up all pages by the queue.
+ */
+ while (r != w) {
+ mapping = rxd[r].word2.buf_adr;
+ r++;
+ r &= ((1 << port->rxq_order) - 1);
+
+ if (!mapping)
+ continue;
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find page\n");
+ continue;
+ }
+ /* Release the RX queue reference to the page */
+ put_page(gpage->page);
+ }
+
+ dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
+ port->rxq_ring, port->rxq_dma_base);
+}
+
+static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
+ int pn)
+{
+ struct gmac_rxdesc *freeq_entry;
+ struct gmac_queue_page *gpage;
+ unsigned int fpp_order;
+ unsigned int frag_len;
+ dma_addr_t mapping;
+ struct page *page;
+ int i;
+
+ /* First allocate and DMA map a single page */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ mapping = dma_map_single(geth->dev, page_address(page),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping)) {
+ put_page(page);
+ return NULL;
+ }
+
+ /* The assign the page mapping (physical address) to the buffer address
+ * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
+ * 4k), and the default RX frag order is 11 (fragments are up 20 2048
+ * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
+ * each page normally needs two entries in the queue.
+ */
+ frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
+ fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ freeq_entry = geth->freeq_ring + (pn << fpp_order);
+ dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
+ pn, frag_len, (1 << fpp_order), freeq_entry);
+ for (i = (1 << fpp_order); i > 0; i--) {
+ freeq_entry->word2.buf_adr = mapping;
+ freeq_entry++;
+ mapping += frag_len;
+ }
+
+ /* If the freeq entry already has a page mapped, then unmap it. */
+ gpage = &geth->freeq_pages[pn];
+ if (gpage->page) {
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ /* This should be the last reference to the page so it gets
+ * released
+ */
+ put_page(gpage->page);
+ }
+
+ /* Then put our new mapping into the page table */
+ dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
+ pn, (unsigned int)mapping, page);
+ gpage->mapping = mapping;
+ gpage->page = page;
+
+ return page;
+}
+
+/**
+ * geth_fill_freeq() - Fill the freeq with empty fragments to use
+ * @geth: the ethernet adapter
+ * @refill: whether to reset the queue by filling in all freeq entries or
+ * just refill it, usually the interrupt to refill the queue happens when
+ * the queue is half empty.
+ */
+static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int count = 0;
+ unsigned int pn, epn;
+ unsigned long flags;
+ union dma_rwptr rw;
+ unsigned int m_pn;
+
+ /* Mask for page */
+ m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
+
+ spin_lock_irqsave(&geth->freeq_lock, flags);
+
+ rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
+ epn = (rw.bits.rptr >> fpp_order) - 1;
+ epn &= m_pn;
+
+ /* Loop over the freeq ring buffer entries */
+ while (pn != epn) {
+ struct gmac_queue_page *gpage;
+ struct page *page;
+
+ gpage = &geth->freeq_pages[pn];
+ page = gpage->page;
+
+ dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
+ pn, page_ref_count(page), 1 << fpp_order);
+
+ if (page_ref_count(page) > 1) {
+ unsigned int fl = (pn - epn) & m_pn;
+
+ if (fl > 64 >> fpp_order)
+ break;
+
+ page = geth_freeq_alloc_map_page(geth, pn);
+ if (!page)
+ break;
+ }
+
+ /* Add one reference per fragment in the page */
+ page_ref_add(page, 1 << fpp_order);
+ count += 1 << fpp_order;
+ pn++;
+ pn &= m_pn;
+ }
+
+ writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+
+ spin_unlock_irqrestore(&geth->freeq_lock, flags);
+
+ return count;
+}
+
+static int geth_setup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ union queue_threshold qt;
+ union dma_skb_size skbsz;
+ unsigned int filled;
+ unsigned int pn;
+
+ geth->freeq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ &geth->freeq_dma_base, GFP_KERNEL);
+ if (!geth->freeq_ring)
+ return -ENOMEM;
+ if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "queue ring base it not aligned\n");
+ goto err_freeq;
+ }
+
+ /* Allocate a mapping to page look-up index */
+ geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
+ GFP_KERNEL);
+ if (!geth->freeq_pages)
+ goto err_freeq;
+ geth->num_freeq_pages = pages;
+
+ dev_info(geth->dev, "allocate %d pages for queue\n", pages);
+ for (pn = 0; pn < pages; pn++)
+ if (!geth_freeq_alloc_map_page(geth, pn))
+ goto err_freeq_alloc;
+
+ filled = geth_fill_freeq(geth, false);
+ if (!filled)
+ goto err_freeq_alloc;
+
+ qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+ qt.bits.swfq_empty = 32;
+ writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+
+ skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
+ writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
+ writel(geth->freeq_dma_base | geth->freeq_order,
+ geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ return 0;
+
+err_freeq_alloc:
+ while (pn > 0) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ --pn;
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ gpage = &geth->freeq_pages[pn];
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+err_freeq:
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+ geth->freeq_ring = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
+ * @geth: the Gemini global ethernet state
+ */
+static void geth_cleanup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ unsigned int pn;
+
+ writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
+ geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ for (pn = 0; pn < pages; pn++) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+
+ gpage = &geth->freeq_pages[pn];
+ while (page_ref_count(gpage->page) > 0)
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+}
+
+/**
+ * geth_resize_freeq() - resize the software queue depth
+ * @port: the port requesting the change
+ *
+ * This gets called at least once during probe() so the device queue gets
+ * "resized" from the hardware defaults. Since both ports/net devices share
+ * the same hardware queue, some synchronization between the ports is
+ * needed.
+ */
+static int geth_resize_freeq(struct gemini_ethernet_port *port)
+{
+ struct gemini_ethernet *geth = port->geth;
+ struct net_device *netdev = port->netdev;
+ struct gemini_ethernet_port *other_port;
+ struct net_device *other_netdev;
+ unsigned int new_size = 0;
+ unsigned int new_order;
+ unsigned long flags;
+ u32 en;
+ int ret;
+
+ if (netdev->dev_id == 0)
+ other_netdev = geth->port1->netdev;
+ else
+ other_netdev = geth->port0->netdev;
+
+ if (other_netdev && netif_running(other_netdev))
+ return -EBUSY;
+
+ new_size = 1 << (port->rxq_order + 1);
+ netdev_dbg(netdev, "port %d size: %d order %d\n",
+ netdev->dev_id,
+ new_size,
+ port->rxq_order);
+ if (other_netdev) {
+ other_port = netdev_priv(other_netdev);
+ new_size += 1 << (other_port->rxq_order + 1);
+ netdev_dbg(other_netdev, "port %d size: %d order %d\n",
+ other_netdev->dev_id,
+ (1 << (other_port->rxq_order + 1)),
+ other_port->rxq_order);
+ }
+
+ new_order = min(15, ilog2(new_size - 1) + 1);
+ dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
+ new_size, new_order);
+ if (geth->freeq_order == new_order)
+ return 0;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ /* Disable the software queue IRQs */
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ en &= ~SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ /* Drop the old queue */
+ if (geth->freeq_ring)
+ geth_cleanup_freeq(geth);
+
+ /* Allocate a new queue with the desired order */
+ geth->freeq_order = new_order;
+ ret = geth_setup_freeq(geth);
+
+ /* Restart the interrupts - NOTE if this is the first resize
+ * after probe(), this is where the interrupts get turned on
+ * in the first place.
+ */
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ en |= SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return ret;
+}
+
+static void gmac_tx_irq_enable(struct net_device *netdev,
+ unsigned int txq, int en)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
+ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+ if (en)
+ writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = en ? val | mask : val & ~mask;
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+}
+
+static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+{
+ struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
+
+ gmac_tx_irq_enable(netdev, txq_num, 0);
+ netif_tx_wake_queue(ntxq);
+}
+
+static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ struct gmac_txq *txq, unsigned short *desc)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct skb_shared_info *skb_si = skb_shinfo(skb);
+ unsigned short m = (1 << port->txq_order) - 1;
+ short frag, last_frag = skb_si->nr_frags - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int word1, word3, buflen;
+ unsigned short w = *desc;
+ struct gmac_txdesc *txd;
+ skb_frag_t *skb_frag;
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+ if (skb->protocol == htons(ETH_P_8021Q))
+ mtu += VLAN_HLEN;
+
+ word1 = skb->len;
+ word3 = SOF_BIT;
+
+ if (word1 > mtu) {
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mtu;
+ }
+
+ if (skb->ip_summed != CHECKSUM_NONE) {
+ int tcp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+ } else { /* IPv6 */
+ word1 |= TSS_IPV6_ENABLE_BIT;
+ tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+ }
+
+ word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+ }
+
+ frag = -1;
+ while (frag <= last_frag) {
+ if (frag == -1) {
+ buffer = skb->data;
+ buflen = skb_headlen(skb);
+ } else {
+ skb_frag = skb_si->frags + frag;
+ buffer = page_address(skb_frag_page(skb_frag)) +
+ skb_frag->page_offset;
+ buflen = skb_frag->size;
+ }
+
+ if (frag == last_frag) {
+ word3 |= EOF_BIT;
+ txq->skb[w] = skb;
+ }
+
+ mapping = dma_map_single(geth->dev, buffer, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping))
+ goto map_error;
+
+ txd = txq->ring + w;
+ txd->word0.bits32 = buflen;
+ txd->word1.bits32 = word1;
+ txd->word2.buf_adr = mapping;
+ txd->word3.bits32 = word3;
+
+ word3 &= MTU_SIZE_BIT_MASK;
+ w++;
+ w &= m;
+ frag++;
+ }
+
+ *desc = w;
+ return 0;
+
+map_error:
+ while (w != *desc) {
+ w--;
+ w &= m;
+
+ dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
+ txq->ring[w].word0.bits.buffer_size,
+ DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->txq_order) - 1;
+ struct netdev_queue *ntxq;
+ unsigned short r, w, d;
+ void __iomem *ptr_reg;
+ struct gmac_txq *txq;
+ int txq_num, nfrags;
+ union dma_rwptr rw;
+
+ SKB_FRAG_ASSERT(skb);
+
+ if (skb->len >= 0x10000)
+ goto out_drop_free;
+
+ txq_num = skb_get_queue_mapping(skb);
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
+ txq = &port->txq[txq_num];
+ ntxq = netdev_get_tx_queue(netdev, txq_num);
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ gmac_clean_txq(netdev, txq, r);
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ netif_tx_stop_queue(ntxq);
+
+ d = txq->cptr + nfrags + 16;
+ d &= m;
+ txq->ring[d].word3.bits.eofie = 1;
+ gmac_tx_irq_enable(netdev, txq_num, 1);
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ netdev->stats.tx_fifo_errors++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
+ if (skb_linearize(skb))
+ goto out_drop;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frags_linearized++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w))
+ goto out_drop_free;
+ }
+
+ writew(w, ptr_reg + 2);
+
+ gmac_clean_txq(netdev, txq, r);
+ return NETDEV_TX_OK;
+
+out_drop_free:
+ dev_kfree_skb(skb);
+out_drop:
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->stats.tx_dropped++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_OK;
+}
+
+static void gmac_tx_timeout(struct net_device *netdev)
+{
+ netdev_err(netdev, "Tx timeout\n");
+ gmac_dump_dma_state(netdev);
+}
+
+static void gmac_enable_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_info(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
+ enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
+ union gmac_rxdesc_0 word0,
+ unsigned int frame_len)
+{
+ unsigned int rx_csum = word0.bits.chksum_status;
+ unsigned int rx_status = word0.bits.status;
+ struct sk_buff *skb = NULL;
+
+ port->rx_stats[rx_status]++;
+ port->rx_csum_stats[rx_csum]++;
+
+ if (word0.bits.derr || word0.bits.perr ||
+ rx_status || frame_len < ETH_ZLEN ||
+ rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
+ port->stats.rx_errors++;
+
+ if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
+ port->stats.rx_length_errors++;
+ if (RX_ERROR_OVER(rx_status))
+ port->stats.rx_over_errors++;
+ if (RX_ERROR_CRC(rx_status))
+ port->stats.rx_crc_errors++;
+ if (RX_ERROR_FRAME(rx_status))
+ port->stats.rx_frame_errors++;
+ return NULL;
+ }
+
+ skb = napi_get_frags(&port->napi);
+ if (!skb)
+ goto update_exit;
+
+ if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+update_exit:
+ port->stats.rx_bytes += frame_len;
+ port->stats.rx_packets++;
+ return skb;
+}
+
+static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->rxq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg = port->rxq_rwptr;
+ unsigned int frame_len, frag_len;
+ struct gmac_rxdesc *rx = NULL;
+ struct gmac_queue_page *gpage;
+ static struct sk_buff *skb;
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_3 word3;
+ struct page *page = NULL;
+ unsigned int page_offs;
+ unsigned short r, w;
+ union dma_rwptr rw;
+ dma_addr_t mapping;
+ int frag_nr = 0;
+
+ rw.bits32 = readl(ptr_reg);
+ /* Reset interrupt as all packages until here are taken into account */
+ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ while (budget && w != r) {
+ rx = port->rxq_ring + r;
+ word0 = rx->word0;
+ word1 = rx->word1;
+ mapping = rx->word2.buf_adr;
+ word3 = rx->word3;
+
+ r++;
+ r &= m;
+
+ frag_len = word0.bits.buffer_size;
+ frame_len = word1.bits.byte_count;
+ page_offs = mapping & ~PAGE_MASK;
+
+ if (!mapping) {
+ netdev_err(netdev,
+ "rxq[%u]: HW BUG: zero DMA desc\n", r);
+ goto err_drop;
+ }
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find mapping\n");
+ continue;
+ }
+ page = gpage->page;
+
+ if (word3.bits32 & SOF_BIT) {
+ if (skb) {
+ napi_free_frags(&port->napi);
+ port->stats.rx_dropped++;
+ }
+
+ skb = gmac_skb_if_good_frame(port, word0, frame_len);
+ if (!skb)
+ goto err_drop;
+
+ page_offs += NET_IP_ALIGN;
+ frag_len -= NET_IP_ALIGN;
+ frag_nr = 0;
+
+ } else if (!skb) {
+ put_page(page);
+ continue;
+ }
+
+ if (word3.bits32 & EOF_BIT)
+ frag_len = frame_len - skb->len;
+
+ /* append page frag to skb */
+ if (frag_nr == MAX_SKB_FRAGS)
+ goto err_drop;
+
+ if (frag_len == 0)
+ netdev_err(netdev, "Received fragment with len = 0\n");
+
+ skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
+ skb->len += frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ frag_nr++;
+
+ if (word3.bits32 & EOF_BIT) {
+ napi_gro_frags(&port->napi);
+ skb = NULL;
+ --budget;
+ }
+ continue;
+
+err_drop:
+ if (skb) {
+ napi_free_frags(&port->napi);
+ skb = NULL;
+ }
+
+ if (mapping)
+ put_page(page);
+
+ port->stats.rx_dropped++;
+ }
+
+ writew(r, ptr_reg);
+ return budget;
+}
+
+static int gmac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(napi->dev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int freeq_threshold;
+ unsigned int received;
+
+ freeq_threshold = 1 << (geth->freeq_order - 1);
+ u64_stats_update_begin(&port->rx_stats_syncp);
+
+ received = gmac_rx(napi->dev, budget);
+ if (received < budget) {
+ napi_gro_flush(napi, false);
+ napi_complete_done(napi, received);
+ gmac_enable_rx_irq(napi->dev, 1);
+ ++port->rx_napi_exits;
+ }
+
+ port->freeq_refill += (budget - received);
+ if (port->freeq_refill > freeq_threshold) {
+ port->freeq_refill -= freeq_threshold;
+ geth_fill_freeq(geth, true);
+ }
+
+ u64_stats_update_end(&port->rx_stats_syncp);
+ return received;
+}
+
+static void gmac_dump_dma_state(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg;
+ u32 reg[5];
+
+ /* Interrupt status */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* Interrupt enable */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* RX DMA status */
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(port->rxq_rwptr);
+ reg[3] = GET_WPTR(port->rxq_rwptr);
+ netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
+ netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* TX DMA status */
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
+ netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* FREE queues status */
+ ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
+
+ reg[0] = GET_RPTR(ptr_reg);
+ reg[1] = GET_WPTR(ptr_reg);
+
+ ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
+
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+}
+
+static void gmac_update_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int rx_discards, rx_mcast, rx_bcast;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+
+ rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
+ port->hw_stats[0] += rx_discards;
+ port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
+ rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
+ port->hw_stats[2] += rx_mcast;
+ rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
+ port->hw_stats[3] += rx_bcast;
+ port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
+ port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
+
+ port->stats.rx_missed_errors += rx_discards;
+ port->stats.multicast += rx_mcast;
+ port->stats.multicast += rx_bcast;
+
+ writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+/**
+ * gmac_get_intr_flags() - get interrupt status flags for a port from
+ * @netdev: the net device for the port to get flags from
+ * @i: the interrupt status register 0..4
+ */
+static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *irqif_reg, *irqen_reg;
+ unsigned int offs, val;
+
+ /* Calculate the offset using the stride of the status registers */
+ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
+ GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
+ irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
+
+ val = readl(irqif_reg) & readl(irqen_reg);
+ return val;
+}
+
+static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
+{
+ struct gemini_ethernet_port *port =
+ container_of(timer, struct gemini_ethernet_port,
+ rx_coalesce_timer);
+
+ napi_schedule(&port->napi);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gmac_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port;
+ struct net_device *netdev = data;
+ struct gemini_ethernet *geth;
+ u32 val, orr = 0;
+
+ port = netdev_priv(netdev);
+ geth = port->geth;
+
+ val = gmac_get_intr_flags(netdev, 0);
+ orr |= val;
+
+ if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
+ /* Oh, crap */
+ netdev_err(netdev, "hw failure/sw bug\n");
+ gmac_dump_dma_state(netdev);
+
+ /* don't know how to recover, just reduce losses */
+ gmac_enable_irq(netdev, 0);
+ return IRQ_HANDLED;
+ }
+
+ if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
+ gmac_tx_irq(netdev, 0);
+
+ val = gmac_get_intr_flags(netdev, 1);
+ orr |= val;
+
+ if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
+ gmac_enable_rx_irq(netdev, 0);
+
+ if (!port->rx_coalesce_nsecs) {
+ napi_schedule(&port->napi);
+ } else {
+ ktime_t ktime;
+
+ ktime = ktime_set(0, port->rx_coalesce_nsecs);
+ hrtimer_start(&port->rx_coalesce_timer, ktime,
+ HRTIMER_MODE_REL);
+ }
+ }
+
+ val = gmac_get_intr_flags(netdev, 4);
+ orr |= val;
+
+ if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
+ gmac_update_hw_stats(netdev);
+
+ if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ spin_lock(&geth->irq_lock);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ ++port->stats.rx_fifo_errors;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock(&geth->irq_lock);
+ }
+
+ return orr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void gmac_start_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 1;
+ dma_ctrl.bits.td_enable = 1;
+ dma_ctrl.bits.loopback = 0;
+ dma_ctrl.bits.drop_small_ack = 0;
+ dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
+ dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
+ dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.rd_bus = HSIZE_8;
+ dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
+ dma_ctrl.bits.td_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.td_bus = HSIZE_8;
+
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static void gmac_stop_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 0;
+ dma_ctrl.bits.td_enable = 0;
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static int gmac_open(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err;
+
+ if (!netdev->phydev) {
+ err = gmac_setup_phy(netdev);
+ if (err) {
+ netif_err(port, ifup, netdev,
+ "PHY init failed: %d\n", err);
+ return err;
+ }
+ }
+
+ err = request_irq(netdev->irq, gmac_irq,
+ IRQF_SHARED, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "no IRQ\n");
+ return err;
+ }
+
+ netif_carrier_off(netdev);
+ phy_start(netdev->phydev);
+
+ err = geth_resize_freeq(port);
+ if (err) {
+ netdev_err(netdev, "could not resize freeq\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_rxq(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup RXQ\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_txqs(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup TXQs\n");
+ gmac_cleanup_rxq(netdev);
+ goto err_stop_phy;
+ }
+
+ napi_enable(&port->napi);
+
+ gmac_start_dma(port);
+ gmac_enable_irq(netdev, 1);
+ gmac_enable_tx_rx(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+
+ netdev_info(netdev, "opened\n");
+
+ return 0;
+
+err_stop_phy:
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+ return err;
+}
+
+static int gmac_stop(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ hrtimer_cancel(&port->rx_coalesce_timer);
+ netif_tx_stop_all_queues(netdev);
+ gmac_disable_tx_rx(netdev);
+ gmac_stop_dma(port);
+ napi_disable(&port->napi);
+
+ gmac_enable_irq(netdev, 0);
+ gmac_cleanup_rxq(netdev);
+ gmac_cleanup_txqs(netdev);
+
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+
+ gmac_update_hw_stats(netdev);
+ return 0;
+}
+
+static void gmac_set_rx_mode(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_rx_fltr filter = { .bits = {
+ .broadcast = 1,
+ .multicast = 1,
+ .unicast = 1,
+ } };
+ struct netdev_hw_addr *ha;
+ unsigned int bit_nr;
+ u32 mc_filter[2];
+
+ mc_filter[1] = 0;
+ mc_filter[0] = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ filter.bits.error = 1;
+ filter.bits.promiscuous = 1;
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
+ }
+ }
+
+ writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
+ writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
+ writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
+}
+
+static void gmac_write_mac_address(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ __le32 addr[3];
+
+ memset(addr, 0, sizeof(addr));
+ memcpy(addr, netdev->dev_addr, ETH_ALEN);
+
+ writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
+ writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
+ writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
+}
+
+static int gmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ gmac_write_mac_address(netdev);
+
+ return 0;
+}
+
+static void gmac_clear_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ readl(port->gmac_base + GMAC_IN_DISCARDS);
+ readl(port->gmac_base + GMAC_IN_ERRORS);
+ readl(port->gmac_base + GMAC_IN_MCAST);
+ readl(port->gmac_base + GMAC_IN_BCAST);
+ readl(port->gmac_base + GMAC_IN_MAC1);
+ readl(port->gmac_base + GMAC_IN_MAC2);
+}
+
+static void gmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with RX NAPI */
+ do {
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ stats->rx_packets = port->stats.rx_packets;
+ stats->rx_bytes = port->stats.rx_bytes;
+ stats->rx_errors = port->stats.rx_errors;
+ stats->rx_dropped = port->stats.rx_dropped;
+
+ stats->rx_length_errors = port->stats.rx_length_errors;
+ stats->rx_over_errors = port->stats.rx_over_errors;
+ stats->rx_crc_errors = port->stats.rx_crc_errors;
+ stats->rx_frame_errors = port->stats.rx_frame_errors;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+
+ /* Racing with MIB and TX completion interrupts */
+ do {
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ stats->tx_errors = port->stats.tx_errors;
+ stats->tx_packets = port->stats.tx_packets;
+ stats->tx_bytes = port->stats.tx_bytes;
+
+ stats->multicast = port->stats.multicast;
+ stats->rx_missed_errors = port->stats.rx_missed_errors;
+ stats->rx_fifo_errors = port->stats.rx_fifo_errors;
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+
+ /* Racing with hard_start_xmit */
+ do {
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ stats->tx_dropped = port->stats.tx_dropped;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+
+ stats->rx_dropped += stats->rx_missed_errors;
+}
+
+static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_len = gmac_pick_rx_max_len(new_mtu);
+
+ if (max_len < 0)
+ return -EINVAL;
+
+ gmac_disable_tx_rx(netdev);
+
+ netdev->mtu = new_mtu;
+ gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
+ CONFIG0_MAXLEN_MASK);
+
+ netdev_update_features(netdev);
+
+ gmac_enable_tx_rx(netdev);
+
+ return 0;
+}
+
+static netdev_features_t gmac_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+ features &= ~GMAC_OFFLOAD_FEATURES;
+
+ return features;
+}
+
+static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int enable = features & NETIF_F_RXCSUM;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+ return 0;
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
+}
+
+static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
+}
+
+static void gmac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *values)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+ u64 *p;
+ int i;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with MIB interrupt */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ for (i = 0; i < RX_STATS_NUM; i++)
+ *p++ = port->hw_stats[i];
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ values = p;
+
+ /* Racing with RX NAPI */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ for (i = 0; i < RX_STATUS_NUM; i++)
+ *p++ = port->rx_stats[i];
+ for (i = 0; i < RX_CHKSUM_NUM; i++)
+ *p++ = port->rx_csum_stats[i];
+ *p++ = port->rx_napi_exits;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ values = p;
+
+ /* Racing with TX start_xmit */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ for (i = 0; i < TX_MAX_FRAGS; i++) {
+ *values++ = port->tx_frag_stats[i];
+ port->tx_frag_stats[i] = 0;
+ }
+ *values++ = port->tx_frags_linearized;
+ *values++ = port->tx_hw_csummed;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+}
+
+static int gmac_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+
+ return 0;
+}
+
+static int gmac_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+}
+
+static int gmac_nway_reset(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_start_aneg(netdev->phydev);
+}
+
+static void gmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ pparam->rx_pause = config0.bits.rx_fc_en;
+ pparam->tx_pause = config0.bits.tx_fc_en;
+ pparam->autoneg = true;
+}
+
+static void gmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ rp->rx_max_pending = 1 << 15;
+ rp->rx_mini_max_pending = 0;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = 1 << 15;
+
+ rp->rx_pending = 1 << port->rxq_order;
+ rp->rx_mini_pending = 0;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = 1 << port->txq_order;
+}
+
+static int gmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err = 0;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (rp->rx_pending) {
+ port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
+ err = geth_resize_freeq(port);
+ }
+ if (rp->tx_pending) {
+ port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+ }
+
+ return err;
+}
+
+static int gmac_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ ecmd->rx_max_coalesced_frames = 1;
+ ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
+ ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
+
+ return 0;
+}
+
+static int gmac_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ if (ecmd->tx_max_coalesced_frames < 1)
+ return -EINVAL;
+ if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
+ return -EINVAL;
+
+ port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
+ port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static u32 gmac_get_msglevel(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ return port->msg_enable;
+}
+
+static void gmac_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ port->msg_enable = level;
+}
+
+static void gmac_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
+}
+
+static const struct net_device_ops gmac_351x_ops = {
+ .ndo_init = gmac_init,
+ .ndo_uninit = gmac_uninit,
+ .ndo_open = gmac_open,
+ .ndo_stop = gmac_stop,
+ .ndo_start_xmit = gmac_start_xmit,
+ .ndo_tx_timeout = gmac_tx_timeout,
+ .ndo_set_rx_mode = gmac_set_rx_mode,
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+ .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+};
+
+static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .get_sset_count = gmac_get_sset_count,
+ .get_strings = gmac_get_strings,
+ .get_ethtool_stats = gmac_get_ethtool_stats,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = gmac_get_ksettings,
+ .set_link_ksettings = gmac_set_ksettings,
+ .nway_reset = gmac_nway_reset,
+ .get_pauseparam = gmac_get_pauseparam,
+ .get_ringparam = gmac_get_ringparam,
+ .set_ringparam = gmac_set_ringparam,
+ .get_coalesce = gmac_get_coalesce,
+ .set_coalesce = gmac_set_coalesce,
+ .get_msglevel = gmac_get_msglevel,
+ .set_msglevel = gmac_set_msglevel,
+ .get_drvinfo = gmac_get_drvinfo,
+};
+
+static irqreturn_t gemini_port_irq_thread(int irq, void *data)
+{
+ unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ unsigned long flags;
+
+ geth = port->geth;
+ /* The queue is half empty so refill it */
+ geth_fill_freeq(geth, true);
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ /* ACK queue interrupt */
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ /* Enable queue interrupt again */
+ irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gemini_port_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ irqreturn_t ret = IRQ_NONE;
+ u32 val, en;
+
+ geth = port->geth;
+ spin_lock(&geth->irq_lock);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ if (val & en & SWFQ_EMPTY_INT_BIT) {
+ /* Disable the queue empty interrupt while we work on
+ * processing the queue. Also disable overrun interrupts
+ * as there is not much we can do about it here.
+ */
+ en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
+ | GMAC1_RX_OVERRUN_INT_BIT);
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock(&geth->irq_lock);
+
+ return ret;
+}
+
+static void gemini_port_remove(struct gemini_ethernet_port *port)
+{
+ if (port->netdev)
+ unregister_netdev(port->netdev);
+ clk_disable_unprepare(port->pclk);
+ geth_cleanup_freeq(port->geth);
+}
+
+static void gemini_ethernet_init(struct gemini_ethernet *geth)
+{
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ /* Interrupt config:
+ *
+ * GMAC0 intr bits ------> int0 ----> eth0
+ * GMAC1 intr bits ------> int1 ----> eth1
+ * TOE intr -------------> int1 ----> eth1
+ * Classification Intr --> int0 ----> eth0
+ * Default Q0 -----------> int0 ----> eth0
+ * Default Q1 -----------> int1 ----> eth1
+ * FreeQ intr -----------> int1 ----> eth1
+ */
+ writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
+ writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
+ writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
+
+ /* edge-triggered interrupts packed to level-triggered one... */
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ /* Set up queue */
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
+
+ geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
+ /* This makes the queue resize on probe() so that we
+ * set up and enable the queue IRQ. FIXME: fragile.
+ */
+ geth->freeq_order = 1;
+}
+
+static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
+{
+ port->mac_addr[0] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
+ port->mac_addr[1] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
+ port->mac_addr[2] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
+}
+
+static int gemini_ethernet_port_probe(struct platform_device *pdev)
+{
+ char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct gemini_ethernet_port *port;
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct resource *gmacres;
+ struct resource *dmares;
+ struct device *parent;
+ unsigned int id;
+ int irq;
+ int ret;
+
+ parent = dev->parent;
+ geth = dev_get_drvdata(parent);
+
+ if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
+ id = 0;
+ else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
+ id = 1;
+ else
+ return -ENODEV;
+
+ dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+ netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ if (!netdev) {
+ dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+ return -ENOMEM;
+ }
+
+ port = netdev_priv(netdev);
+ SET_NETDEV_DEV(netdev, dev);
+ port->netdev = netdev;
+ port->id = id;
+ port->geth = geth;
+ port->dev = dev;
+
+ /* DMA memory */
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dmares) {
+ dev_err(dev, "no DMA resource\n");
+ return -ENODEV;
+ }
+ port->dma_base = devm_ioremap_resource(dev, dmares);
+ if (IS_ERR(port->dma_base))
+ return PTR_ERR(port->dma_base);
+
+ /* GMAC config memory */
+ gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!gmacres) {
+ dev_err(dev, "no GMAC resource\n");
+ return -ENODEV;
+ }
+ port->gmac_base = devm_ioremap_resource(dev, gmacres);
+ if (IS_ERR(port->gmac_base))
+ return PTR_ERR(port->gmac_base);
+
+ /* Interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no IRQ\n");
+ return irq ? irq : -ENODEV;
+ }
+ port->irq = irq;
+
+ /* Clock the port */
+ port->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(port->pclk)) {
+ dev_err(dev, "no PCLK\n");
+ return PTR_ERR(port->pclk);
+ }
+ ret = clk_prepare_enable(port->pclk);
+ if (ret)
+ return ret;
+
+ /* Maybe there is a nice ethernet address we should use */
+ gemini_port_save_mac_addr(port);
+
+ /* Reset the port */
+ port->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(port->reset)) {
+ dev_err(dev, "no reset\n");
+ return PTR_ERR(port->reset);
+ }
+ reset_control_reset(port->reset);
+ usleep_range(100, 500);
+
+ /* Assign pointer in the main state container */
+ if (!id)
+ geth->port0 = port;
+ else
+ geth->port1 = port;
+ platform_set_drvdata(pdev, port);
+
+ /* Set up and register the netdev */
+ netdev->dev_id = port->id;
+ netdev->irq = irq;
+ netdev->netdev_ops = &gmac_351x_ops;
+ netdev->ethtool_ops = &gmac_351x_ethtool_ops;
+
+ spin_lock_init(&port->config_lock);
+ gmac_clear_hw_stats(netdev);
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll,
+ DEFAULT_NAPI_WEIGHT);
+
+ if (is_valid_ether_addr((void *)port->mac_addr)) {
+ memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ } else {
+ dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
+ port->mac_addr[0], port->mac_addr[1],
+ port->mac_addr[2]);
+ dev_info(dev, "using a random ethernet address\n");
+ random_ether_addr(netdev->dev_addr);
+ }
+ gmac_write_mac_address(netdev);
+
+ ret = devm_request_threaded_irq(port->dev,
+ port->irq,
+ gemini_port_irq,
+ gemini_port_irq_thread,
+ IRQF_SHARED,
+ port_names[port->id],
+ port);
+ if (ret)
+ return ret;
+
+ ret = register_netdev(netdev);
+ if (!ret) {
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
+ netdev_info(netdev,
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
+ }
+
+ port->netdev = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static int gemini_ethernet_port_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+ gemini_port_remove(port);
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_port_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
+
+static struct platform_driver gemini_ethernet_port_driver = {
+ .driver = {
+ .name = "gemini-ethernet-port",
+ .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ },
+ .probe = gemini_ethernet_port_probe,
+ .remove = gemini_ethernet_port_remove,
+};
+
+static int gemini_ethernet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ unsigned int retry = 5;
+ struct resource *res;
+ u32 val;
+
+ /* Global registers */
+ geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
+ if (!geth)
+ return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ geth->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(geth->base))
+ return PTR_ERR(geth->base);
+ geth->dev = dev;
+
+ /* Wait for ports to stabilize */
+ do {
+ udelay(2);
+ val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
+ barrier();
+ } while (!val && --retry);
+ if (!retry) {
+ dev_err(dev, "failed to reset ethernet\n");
+ return -EIO;
+ }
+ dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
+ (val >> 4) & 0xFFFU, val & 0xFU);
+
+ spin_lock_init(&geth->irq_lock);
+ spin_lock_init(&geth->freeq_lock);
+ gemini_ethernet_init(geth);
+
+ /* The children will use this */
+ platform_set_drvdata(pdev, geth);
+
+ /* Spawn child devices for the two ports */
+ return devm_of_platform_populate(dev);
+}
+
+static int gemini_ethernet_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet *geth = platform_get_drvdata(pdev);
+
+ gemini_ethernet_init(geth);
+ geth_cleanup_freeq(geth);
+
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
+
+static struct platform_driver gemini_ethernet_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ },
+ .probe = gemini_ethernet_probe,
+ .remove = gemini_ethernet_remove,
+};
+
+static int __init gemini_ethernet_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gemini_ethernet_port_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&gemini_ethernet_driver);
+ if (ret) {
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gemini_ethernet_module_init);
+
+static void __exit gemini_ethernet_module_exit(void)
+{
+ platform_driver_unregister(&gemini_ethernet_driver);
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+}
+module_exit(gemini_ethernet_module_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Register definitions for Gemini GMAC Ethernet device driver
+ *
+ * Copyright (C) 2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2010 Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ */
+#ifndef _GEMINI_ETHERNET_H
+#define _GEMINI_ETHERNET_H
+
+#include <linux/bitops.h>
+
+/* Base Registers */
+#define TOE_NONTOE_QUE_HDR_BASE 0x2000
+#define TOE_TOE_QUE_HDR_BASE 0x3000
+
+/* Queue ID */
+#define TOE_SW_FREE_QID 0x00
+#define TOE_HW_FREE_QID 0x01
+#define TOE_GMAC0_SW_TXQ0_QID 0x02
+#define TOE_GMAC0_SW_TXQ1_QID 0x03
+#define TOE_GMAC0_SW_TXQ2_QID 0x04
+#define TOE_GMAC0_SW_TXQ3_QID 0x05
+#define TOE_GMAC0_SW_TXQ4_QID 0x06
+#define TOE_GMAC0_SW_TXQ5_QID 0x07
+#define TOE_GMAC0_HW_TXQ0_QID 0x08
+#define TOE_GMAC0_HW_TXQ1_QID 0x09
+#define TOE_GMAC0_HW_TXQ2_QID 0x0A
+#define TOE_GMAC0_HW_TXQ3_QID 0x0B
+#define TOE_GMAC1_SW_TXQ0_QID 0x12
+#define TOE_GMAC1_SW_TXQ1_QID 0x13
+#define TOE_GMAC1_SW_TXQ2_QID 0x14
+#define TOE_GMAC1_SW_TXQ3_QID 0x15
+#define TOE_GMAC1_SW_TXQ4_QID 0x16
+#define TOE_GMAC1_SW_TXQ5_QID 0x17
+#define TOE_GMAC1_HW_TXQ0_QID 0x18
+#define TOE_GMAC1_HW_TXQ1_QID 0x19
+#define TOE_GMAC1_HW_TXQ2_QID 0x1A
+#define TOE_GMAC1_HW_TXQ3_QID 0x1B
+#define TOE_GMAC0_DEFAULT_QID 0x20
+#define TOE_GMAC1_DEFAULT_QID 0x21
+#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */
+#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */
+
+/* TOE DMA Queue Size should be 2^n, n = 6...12
+ * TOE DMA Queues are the following queue types:
+ * SW Free Queue, HW Free Queue,
+ * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
+ * The base address and descriptor number are configured at
+ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
+ */
+#define GET_WPTR(addr) readw((addr) + 2)
+#define GET_RPTR(addr) readw((addr))
+#define SET_WPTR(addr, data) writew((data), (addr) + 2)
+#define SET_RPTR(addr, data) writew((data), (addr))
+#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask))
+#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask))
+#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask))
+#define __RWPTR_MASK(order) ((1 << (order)) - 1)
+#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order)))
+#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order)))
+#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \
+ __RWPTR_MASK((order)))
+
+/* Global registers */
+#define GLOBAL_TOE_VERSION_REG 0x0000
+#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004
+#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008
+#define GLOBAL_DMA_SKB_SIZE_REG 0x0010
+#define GLOBAL_SWFQ_RWPTR_REG 0x0014
+#define GLOBAL_HWFQ_RWPTR_REG 0x0018
+#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020
+#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024
+#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028
+#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030
+#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034
+#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038
+#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040
+#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044
+#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048
+#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050
+#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054
+#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058
+#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060
+#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064
+#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068
+#define GLOBAL_HASH_TABLE_BASE_REG 0x006C
+#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070
+
+/* GMAC 0/1 DMA/TOE register */
+#define GMAC_DMA_CTRL_REG 0x0000
+#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004
+#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008
+#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C
+#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010
+#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014
+#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018
+#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C
+#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020
+#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024
+#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028
+#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C
+#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030
+#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038
+#define GMAC_DMA_TX_CURR_DESC_REG 0x003C
+#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040
+#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044
+#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048
+#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C
+#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050
+#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054
+#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058
+#define GMAC_DMA_RX_CURR_DESC_REG 0x005C
+#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060
+#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064
+#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068
+#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C
+#define GMAC_HASH_ENGINE_REG0 0x0070
+#define GMAC_HASH_ENGINE_REG1 0x0074
+/* matching rule 0 Control register 0 */
+#define GMAC_MR0CR0 0x0078
+#define GMAC_MR0CR1 0x007C
+#define GMAC_MR0CR2 0x0080
+#define GMAC_MR1CR0 0x0084
+#define GMAC_MR1CR1 0x0088
+#define GMAC_MR1CR2 0x008C
+#define GMAC_MR2CR0 0x0090
+#define GMAC_MR2CR1 0x0094
+#define GMAC_MR2CR2 0x0098
+#define GMAC_MR3CR0 0x009C
+#define GMAC_MR3CR1 0x00A0
+#define GMAC_MR3CR2 0x00A4
+/* Support Protocol Register 0 */
+#define GMAC_SPR0 0x00A8
+#define GMAC_SPR1 0x00AC
+#define GMAC_SPR2 0x00B0
+#define GMAC_SPR3 0x00B4
+#define GMAC_SPR4 0x00B8
+#define GMAC_SPR5 0x00BC
+#define GMAC_SPR6 0x00C0
+#define GMAC_SPR7 0x00C4
+/* GMAC Hash/Rx/Tx AHB Weighting register */
+#define GMAC_AHB_WEIGHT_REG 0x00C8
+
+/* TOE GMAC 0/1 register */
+#define GMAC_STA_ADD0 0x0000
+#define GMAC_STA_ADD1 0x0004
+#define GMAC_STA_ADD2 0x0008
+#define GMAC_RX_FLTR 0x000c
+#define GMAC_MCAST_FIL0 0x0010
+#define GMAC_MCAST_FIL1 0x0014
+#define GMAC_CONFIG0 0x0018
+#define GMAC_CONFIG1 0x001c
+#define GMAC_CONFIG2 0x0020
+#define GMAC_CONFIG3 0x0024
+#define GMAC_RESERVED 0x0028
+#define GMAC_STATUS 0x002c
+#define GMAC_IN_DISCARDS 0x0030
+#define GMAC_IN_ERRORS 0x0034
+#define GMAC_IN_MCAST 0x0038
+#define GMAC_IN_BCAST 0x003c
+#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */
+#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */
+
+#define RX_STATS_NUM 6
+
+/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
+union dma_q_base_size {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define DMA_Q_BASE_MASK (~0x0f)
+
+/* DMA SKB Buffer register (offset 0x0008) */
+union dma_skb_size {
+ unsigned int bits32;
+ struct bit_0008 {
+ unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */
+ unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */
+ } bits;
+};
+
+/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
+union dma_rwptr {
+ unsigned int bits32;
+ struct bit_000c {
+ unsigned int rptr : 16; /* Read Ptr, RO */
+ unsigned int wptr : 16; /* Write Ptr, RW */
+ } bits;
+};
+
+/* Interrupt Status Register 0 (offset 0x0020)
+ * Interrupt Mask Register 0 (offset 0x0024)
+ * Interrupt Select Register 0 (offset 0x0028)
+ */
+#define GMAC1_TXDERR_INT_BIT BIT(31)
+#define GMAC1_TXPERR_INT_BIT BIT(30)
+#define GMAC0_TXDERR_INT_BIT BIT(29)
+#define GMAC0_TXPERR_INT_BIT BIT(28)
+#define GMAC1_RXDERR_INT_BIT BIT(27)
+#define GMAC1_RXPERR_INT_BIT BIT(26)
+#define GMAC0_RXDERR_INT_BIT BIT(25)
+#define GMAC0_RXPERR_INT_BIT BIT(24)
+#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23)
+#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22)
+#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21)
+#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20)
+#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19)
+#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18)
+#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17)
+#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16)
+#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15)
+#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14)
+#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13)
+#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12)
+#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11)
+#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10)
+#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9)
+#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8)
+#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7)
+#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6)
+#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5)
+#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4)
+#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3)
+#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2)
+#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1)
+#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0)
+
+/* Interrupt Status Register 1 (offset 0x0030)
+ * Interrupt Mask Register 1 (offset 0x0034)
+ * Interrupt Select Register 1 (offset 0x0038)
+ */
+#define TOE_IQ3_FULL_INT_BIT BIT(31)
+#define TOE_IQ2_FULL_INT_BIT BIT(30)
+#define TOE_IQ1_FULL_INT_BIT BIT(29)
+#define TOE_IQ0_FULL_INT_BIT BIT(28)
+#define TOE_IQ3_INT_BIT BIT(27)
+#define TOE_IQ2_INT_BIT BIT(26)
+#define TOE_IQ1_INT_BIT BIT(25)
+#define TOE_IQ0_INT_BIT BIT(24)
+#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23)
+#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22)
+#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21)
+#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20)
+#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19)
+#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18)
+#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17)
+#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16)
+#define CLASS_RX_INT_BIT(x) BIT((x + 2))
+#define DEFAULT_Q1_INT_BIT BIT(1)
+#define DEFAULT_Q0_INT_BIT BIT(0)
+
+#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
+ TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
+#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
+ TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
+#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
+#define TOE_CLASS_RX_INT_BITS 0xfffc
+
+/* Interrupt Status Register 2 (offset 0x0040)
+ * Interrupt Mask Register 2 (offset 0x0044)
+ * Interrupt Select Register 2 (offset 0x0048)
+ */
+#define TOE_QL_FULL_INT_BIT(x) BIT(x)
+
+/* Interrupt Status Register 3 (offset 0x0050)
+ * Interrupt Mask Register 3 (offset 0x0054)
+ * Interrupt Select Register 3 (offset 0x0058)
+ */
+#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32)
+
+/* Interrupt Status Register 4 (offset 0x0060)
+ * Interrupt Mask Register 4 (offset 0x0064)
+ * Interrupt Select Register 4 (offset 0x0068)
+ */
+#define GMAC1_RESERVED_INT_BIT BIT(31)
+#define GMAC1_MIB_INT_BIT BIT(30)
+#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29)
+#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28)
+#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27)
+#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26)
+#define GMAC1_RX_OVERRUN_INT_BIT BIT(25)
+#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24)
+#define GMAC0_RESERVED_INT_BIT BIT(23)
+#define GMAC0_MIB_INT_BIT BIT(22)
+#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21)
+#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20)
+#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19)
+#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18)
+#define GMAC0_RX_OVERRUN_INT_BIT BIT(17)
+#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16)
+#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2)
+#define HWFQ_EMPTY_INT_BIT BIT(1)
+#define SWFQ_EMPTY_INT_BIT BIT(0)
+
+#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
+ GMAC0_RX_PAUSE_ON_INT_BIT | \
+ GMAC0_TX_PAUSE_ON_INT_BIT | \
+ GMAC0_RX_PAUSE_OFF_INT_BIT | \
+ GMAC0_TX_PAUSE_OFF_INT_BIT | \
+ GMAC0_RX_OVERRUN_INT_BIT | \
+ GMAC0_STATUS_CHANGE_INT_BIT)
+#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
+ GMAC1_RX_PAUSE_ON_INT_BIT | \
+ GMAC1_TX_PAUSE_ON_INT_BIT | \
+ GMAC1_RX_PAUSE_OFF_INT_BIT | \
+ GMAC1_TX_PAUSE_OFF_INT_BIT | \
+ GMAC1_RX_OVERRUN_INT_BIT | \
+ GMAC1_STATUS_CHANGE_INT_BIT)
+
+#define CLASS_RX_FULL_INT_BITS 0xfffc
+
+/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */
+union queue_threshold {
+ unsigned int bits32;
+ struct bit_0070_2 {
+ /* 7:0 Software Free Queue Empty Threshold */
+ unsigned int swfq_empty:8;
+ /* 15:8 Hardware Free Queue Empty Threshold */
+ unsigned int hwfq_empty:8;
+ /* 23:16 */
+ unsigned int intrq:8;
+ /* 31:24 */
+ unsigned int toe_class:8;
+ } bits;
+};
+
+/* GMAC DMA Control Register
+ * GMAC0 offset 0x8000
+ * GMAC1 offset 0xC000
+ */
+union gmac_dma_ctrl {
+ unsigned int bits32;
+ struct bit_8000 {
+ /* bit 1:0 Peripheral Bus Width */
+ unsigned int td_bus:2;
+ /* bit 3:2 TxDMA max burst size for every AHB request */
+ unsigned int td_burst_size:2;
+ /* bit 7:4 TxDMA protection control */
+ unsigned int td_prot:4;
+ /* bit 9:8 Peripheral Bus Width */
+ unsigned int rd_bus:2;
+ /* bit 11:10 DMA max burst size for every AHB request */
+ unsigned int rd_burst_size:2;
+ /* bit 15:12 DMA Protection Control */
+ unsigned int rd_prot:4;
+ /* bit 17:16 */
+ unsigned int rd_insert_bytes:2;
+ /* bit 27:18 */
+ unsigned int reserved:10;
+ /* bit 28 1: Drop, 0: Accept */
+ unsigned int drop_small_ack:1;
+ /* bit 29 Loopback TxDMA to RxDMA */
+ unsigned int loopback:1;
+ /* bit 30 Tx DMA Enable */
+ unsigned int td_enable:1;
+ /* bit 31 Rx DMA Enable */
+ unsigned int rd_enable:1;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 0
+ * GMAC0 offset 0x8004
+ * GMAC1 offset 0xC004
+ */
+union gmac_tx_wcr0 {
+ unsigned int bits32;
+ struct bit_8004 {
+ /* bit 5:0 HW TX Queue 3 */
+ unsigned int hw_tq0:6;
+ /* bit 11:6 HW TX Queue 2 */
+ unsigned int hw_tq1:6;
+ /* bit 17:12 HW TX Queue 1 */
+ unsigned int hw_tq2:6;
+ /* bit 23:18 HW TX Queue 0 */
+ unsigned int hw_tq3:6;
+ /* bit 31:24 */
+ unsigned int reserved:8;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 1
+ * GMAC0 offset 0x8008
+ * GMAC1 offset 0xC008
+ */
+union gmac_tx_wcr1 {
+ unsigned int bits32;
+ struct bit_8008 {
+ /* bit 4:0 SW TX Queue 0 */
+ unsigned int sw_tq0:5;
+ /* bit 9:5 SW TX Queue 1 */
+ unsigned int sw_tq1:5;
+ /* bit 14:10 SW TX Queue 2 */
+ unsigned int sw_tq2:5;
+ /* bit 19:15 SW TX Queue 3 */
+ unsigned int sw_tq3:5;
+ /* bit 24:20 SW TX Queue 4 */
+ unsigned int sw_tq4:5;
+ /* bit 29:25 SW TX Queue 5 */
+ unsigned int sw_tq5:5;
+ /* bit 31:30 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 0 Register
+ * GMAC0 offset 0x8040
+ * GMAC1 offset 0xC040
+ */
+union gmac_txdesc_0 {
+ unsigned int bits32;
+ struct bit_8040 {
+ /* bit 15:0 Transfer size */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 22 Tx Status, 1: Successful 0: Failed */
+ unsigned int status_tx_ok:1;
+ /* bit 28:23 Tx Status, Reserved bits */
+ unsigned int status_rvd:6;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 */
+ unsigned int reserved:1;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 1 Register
+ * GMAC0 offset 0x8044
+ * GMAC1 offset 0xC044
+ */
+union gmac_txdesc_1 {
+ unsigned int bits32;
+ struct txdesc_word1 {
+ /* bit 15: 0 Tx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 16 TSS segmentation use MTU setting */
+ unsigned int mtu_enable:1;
+ /* bit 17 IPV4 Header Checksum Enable */
+ unsigned int ip_chksum:1;
+ /* bit 18 IPV6 Tx Enable */
+ unsigned int ipv6_enable:1;
+ /* bit 19 TCP Checksum Enable */
+ unsigned int tcp_chksum:1;
+ /* bit 20 UDP Checksum Enable */
+ unsigned int udp_chksum:1;
+ /* bit 21 Bypass HW offload engine */
+ unsigned int bypass_tss:1;
+ /* bit 22 Don't update IP length field */
+ unsigned int ip_fixed_len:1;
+ /* bit 31:23 Tx Flag, Reserved */
+ unsigned int reserved:9;
+ } bits;
+};
+
+#define TSS_IP_FIXED_LEN_BIT BIT(22)
+#define TSS_BYPASS_BIT BIT(21)
+#define TSS_UDP_CHKSUM_BIT BIT(20)
+#define TSS_TCP_CHKSUM_BIT BIT(19)
+#define TSS_IPV6_ENABLE_BIT BIT(18)
+#define TSS_IP_CHKSUM_BIT BIT(17)
+#define TSS_MTU_ENABLE_BIT BIT(16)
+
+#define TSS_CHECKUM_ENABLE \
+ (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
+ TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
+
+/* GMAC DMA Tx Description Word 2 Register
+ * GMAC0 offset 0x8048
+ * GMAC1 offset 0xC048
+ */
+union gmac_txdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+/* GMAC DMA Tx Description Word 3 Register
+ * GMAC0 offset 0x804C
+ * GMAC1 offset 0xC04C
+ */
+union gmac_txdesc_3 {
+ unsigned int bits32;
+ struct txdesc_word3 {
+ /* bit 12: 0 Tx Frame Byte Count */
+ unsigned int mtu_size:13;
+ /* bit 28:13 */
+ unsigned int reserved:16;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+#define SOF_EOF_BIT_MASK 0x3fffffff
+#define SOF_BIT 0x80000000
+#define EOF_BIT 0x40000000
+#define EOFIE_BIT BIT(29)
+#define MTU_SIZE_BIT_MASK 0x1fff
+
+/* GMAC Tx Descriptor */
+struct gmac_txdesc {
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ union gmac_txdesc_2 word2;
+ union gmac_txdesc_3 word3;
+};
+
+/* GMAC DMA Rx Description Word 0 Register
+ * GMAC0 offset 0x8060
+ * GMAC1 offset 0xC060
+ */
+union gmac_rxdesc_0 {
+ unsigned int bits32;
+ struct bit_8060 {
+ /* bit 15:0 number of descriptors used for the current frame */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 24:22 Status of rx frame */
+ unsigned int status:4;
+ /* bit 28:26 Check Sum Status */
+ unsigned int chksum_status:3;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 TOE/CIS Queue Full dropped packet to default queue */
+ unsigned int drop:1;
+ } bits;
+};
+
+#define GMAC_RXDESC_0_T_derr BIT(30)
+#define GMAC_RXDESC_0_T_perr BIT(29)
+#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26)
+#define GMAC_RXDESC_0_T_status(x) BIT(x + 22)
+#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16)
+
+#define RX_CHKSUM_IP_UDP_TCP_OK 0
+#define RX_CHKSUM_IP_OK_ONLY 1
+#define RX_CHKSUM_NONE 2
+#define RX_CHKSUM_IP_ERR_UNKNOWN 4
+#define RX_CHKSUM_IP_ERR 5
+#define RX_CHKSUM_TCP_UDP_ERR 6
+#define RX_CHKSUM_NUM 8
+
+#define RX_STATUS_GOOD_FRAME 0
+#define RX_STATUS_TOO_LONG_GOOD_CRC 1
+#define RX_STATUS_RUNT_FRAME 2
+#define RX_STATUS_SFD_NOT_FOUND 3
+#define RX_STATUS_CRC_ERROR 4
+#define RX_STATUS_TOO_LONG_BAD_CRC 5
+#define RX_STATUS_ALIGNMENT_ERROR 6
+#define RX_STATUS_TOO_LONG_BAD_ALIGN 7
+#define RX_STATUS_RX_ERR 8
+#define RX_STATUS_DA_FILTERED 9
+#define RX_STATUS_BUFFER_FULL 10
+#define RX_STATUS_NUM 16
+
+#define RX_ERROR_LENGTH(s) \
+ ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_OVER(s) \
+ ((s) == RX_STATUS_BUFFER_FULL)
+#define RX_ERROR_CRC(s) \
+ ((s) == RX_STATUS_CRC_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC)
+#define RX_ERROR_FRAME(s) \
+ ((s) == RX_STATUS_ALIGNMENT_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_FIFO(s) \
+ (0)
+
+/* GMAC DMA Rx Description Word 1 Register
+ * GMAC0 offset 0x8064
+ * GMAC1 offset 0xC064
+ */
+union gmac_rxdesc_1 {
+ unsigned int bits32;
+ struct rxdesc_word1 {
+ /* bit 15: 0 Rx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 31:16 Software ID */
+ unsigned int sw_id:16;
+ } bits;
+};
+
+/* GMAC DMA Rx Description Word 2 Register
+ * GMAC0 offset 0x8068
+ * GMAC1 offset 0xC068
+ */
+union gmac_rxdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+#define RX_INSERT_NONE 0
+#define RX_INSERT_1_BYTE 1
+#define RX_INSERT_2_BYTE 2
+#define RX_INSERT_3_BYTE 3
+
+/* GMAC DMA Rx Description Word 3 Register
+ * GMAC0 offset 0x806C
+ * GMAC1 offset 0xC06C
+ */
+union gmac_rxdesc_3 {
+ unsigned int bits32;
+ struct rxdesc_word3 {
+ /* bit 7: 0 L3 data offset */
+ unsigned int l3_offset:8;
+ /* bit 15: 8 L4 data offset */
+ unsigned int l4_offset:8;
+ /* bit 23: 16 L7 data offset */
+ unsigned int l7_offset:8;
+ /* bit 24 Duplicated ACK detected */
+ unsigned int dup_ack:1;
+ /* bit 25 abnormal case found */
+ unsigned int abnormal:1;
+ /* bit 26 IPV4 option or IPV6 extension header */
+ unsigned int option:1;
+ /* bit 27 Out of Sequence packet */
+ unsigned int out_of_seq:1;
+ /* bit 28 Control Flag is present */
+ unsigned int ctrl_flag:1;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
+struct gmac_rxdesc {
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_2 word2;
+ union gmac_rxdesc_3 word3;
+};
+
+/* GMAC Matching Rule Control Register 0
+ * GMAC0 offset 0x8078
+ * GMAC1 offset 0xC078
+ */
+#define MR_L2_BIT BIT(31)
+#define MR_L3_BIT BIT(30)
+#define MR_L4_BIT BIT(29)
+#define MR_L7_BIT BIT(28)
+#define MR_PORT_BIT BIT(27)
+#define MR_PRIORITY_BIT BIT(26)
+#define MR_DA_BIT BIT(23)
+#define MR_SA_BIT BIT(22)
+#define MR_ETHER_TYPE_BIT BIT(21)
+#define MR_VLAN_BIT BIT(20)
+#define MR_PPPOE_BIT BIT(19)
+#define MR_IP_VER_BIT BIT(15)
+#define MR_IP_HDR_LEN_BIT BIT(14)
+#define MR_FLOW_LABLE_BIT BIT(13)
+#define MR_TOS_TRAFFIC_BIT BIT(12)
+#define MR_SPR_BIT(x) BIT(x)
+#define MR_SPR_BITS 0xff
+
+/* GMAC_AHB_WEIGHT registers
+ * GMAC0 offset 0x80C8
+ * GMAC1 offset 0xC0C8
+ */
+union gmac_ahb_weight {
+ unsigned int bits32;
+ struct bit_80C8 {
+ /* 4:0 */
+ unsigned int hash_weight:5;
+ /* 9:5 */
+ unsigned int rx_weight:5;
+ /* 14:10 */
+ unsigned int tx_weight:5;
+ /* 19:15 Rx Data Pre Request FIFO Threshold */
+ unsigned int pre_req:5;
+ /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
+ unsigned int tq_dv_threshold:5;
+ /* 31:25 */
+ unsigned int reserved:7;
+ } bits;
+};
+
+/* GMAC RX FLTR
+ * GMAC0 Offset 0xA00C
+ * GMAC1 Offset 0xE00C
+ */
+union gmac_rx_fltr {
+ unsigned int bits32;
+ struct bit1_000c {
+ /* Enable receive of unicast frames that are sent to STA
+ * address
+ */
+ unsigned int unicast:1;
+ /* Enable receive of multicast frames that pass multicast
+ * filter
+ */
+ unsigned int multicast:1;
+ /* Enable receive of broadcast frames */
+ unsigned int broadcast:1;
+ /* Enable receive of all frames */
+ unsigned int promiscuous:1;
+ /* Enable receive of all error frames */
+ unsigned int error:1;
+ unsigned int reserved:27;
+ } bits;
+};
+
+/* GMAC Configuration 0
+ * GMAC0 Offset 0xA018
+ * GMAC1 Offset 0xE018
+ */
+union gmac_config0 {
+ unsigned int bits32;
+ struct bit1_0018 {
+ /* 0: disable transmit */
+ unsigned int dis_tx:1;
+ /* 1: disable receive */
+ unsigned int dis_rx:1;
+ /* 2: transmit data loopback enable */
+ unsigned int loop_back:1;
+ /* 3: flow control also trigged by Rx queues */
+ unsigned int flow_ctrl:1;
+ /* 4-7: adjust IFG from 96+/-56 */
+ unsigned int adj_ifg:4;
+ /* 8-10 maximum receive frame length allowed */
+ unsigned int max_len:3;
+ /* 11: disable back-off function */
+ unsigned int dis_bkoff:1;
+ /* 12: disable 16 collisions abort function */
+ unsigned int dis_col:1;
+ /* 13: speed up timers in simulation */
+ unsigned int sim_test:1;
+ /* 14: RX flow control enable */
+ unsigned int rx_fc_en:1;
+ /* 15: TX flow control enable */
+ unsigned int tx_fc_en:1;
+ /* 16: RGMII in-band status enable */
+ unsigned int rgmii_en:1;
+ /* 17: IPv4 RX Checksum enable */
+ unsigned int ipv4_rx_chksum:1;
+ /* 18: IPv6 RX Checksum enable */
+ unsigned int ipv6_rx_chksum:1;
+ /* 19: Remove Rx VLAN tag */
+ unsigned int rx_tag_remove:1;
+ /* 20 */
+ unsigned int rgmm_edge:1;
+ /* 21 */
+ unsigned int rxc_inv:1;
+ /* 22 */
+ unsigned int ipv6_exthdr_order:1;
+ /* 23 */
+ unsigned int rx_err_detect:1;
+ /* 24 */
+ unsigned int port0_chk_hwq:1;
+ /* 25 */
+ unsigned int port1_chk_hwq:1;
+ /* 26 */
+ unsigned int port0_chk_toeq:1;
+ /* 27 */
+ unsigned int port1_chk_toeq:1;
+ /* 28 */
+ unsigned int port0_chk_classq:1;
+ /* 29 */
+ unsigned int port1_chk_classq:1;
+ /* 30, 31 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0))
+#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17))
+#define CONFIG0_FLOW_RX BIT(14)
+#define CONFIG0_FLOW_TX BIT(15)
+#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15))
+#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15))
+
+#define CONFIG0_MAXLEN_SHIFT 8
+#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT)
+#define CONFIG0_MAXLEN_1536 0
+#define CONFIG0_MAXLEN_1518 1
+#define CONFIG0_MAXLEN_1522 2
+#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_9k 4 /* 9212 */
+#define CONFIG0_MAXLEN_10k 5 /* 10236 */
+#define CONFIG0_MAXLEN_1518__6 6
+#define CONFIG0_MAXLEN_1518__7 7
+
+/* GMAC Configuration 1
+ * GMAC0 Offset 0xA01C
+ * GMAC1 Offset 0xE01C
+ */
+union gmac_config1 {
+ unsigned int bits32;
+ struct bit1_001c {
+ /* Flow control set threshold */
+ unsigned int set_threshold:8;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:8;
+ unsigned int reserved:16;
+ } bits;
+};
+
+#define GMAC_FLOWCTRL_SET_MAX 32
+#define GMAC_FLOWCTRL_SET_MIN 0
+#define GMAC_FLOWCTRL_RELEASE_MAX 32
+#define GMAC_FLOWCTRL_RELEASE_MIN 0
+
+/* GMAC Configuration 2
+ * GMAC0 Offset 0xA020
+ * GMAC1 Offset 0xE020
+ */
+union gmac_config2 {
+ unsigned int bits32;
+ struct bit1_0020 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC Configuration 3
+ * GMAC0 Offset 0xA024
+ * GMAC1 Offset 0xE024
+ */
+union gmac_config3 {
+ unsigned int bits32;
+ struct bit1_0024 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC STATUS
+ * GMAC0 Offset 0xA02C
+ * GMAC1 Offset 0xE02C
+ */
+union gmac_status {
+ unsigned int bits32;
+ struct bit1_002c {
+ /* Link status */
+ unsigned int link:1;
+ /* Link speed(00->2.5M 01->25M 10->125M) */
+ unsigned int speed:2;
+ /* Duplex mode */
+ unsigned int duplex:1;
+ unsigned int reserved_1:1;
+ /* PHY interface type */
+ unsigned int mii_rmii:2;
+ unsigned int reserved_2:25;
+ } bits;
+};
+
+#define GMAC_SPEED_10 0
+#define GMAC_SPEED_100 1
+#define GMAC_SPEED_1000 2
+
+#define GMAC_PHY_MII 0
+#define GMAC_PHY_GMII 1
+#define GMAC_PHY_RGMII_100_10 2
+#define GMAC_PHY_RGMII_1000 3
+
+/* Queue Header
+ * (1) TOE Queue Header
+ * (2) Non-TOE Queue Header
+ * (3) Interrupt Queue Header
+ *
+ * memory Layout
+ * TOE Queue Header
+ * 0x60003000 +---------------------------+ 0x0000
+ * | TOE Queue 0 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0020
+ * | TOE Queue 1 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0040
+ * | ...... |
+ * | |
+ * +---------------------------+
+ *
+ * Non TOE Queue Header
+ * 0x60002000 +---------------------------+ 0x0000
+ * | Default Queue 0 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0008
+ * | Default Queue 1 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0010
+ * | Classification Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Classification Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (n * 8 + 0x10)
+ * | ... |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (13 * 8 + 0x10)
+ * | Classification Queue 13 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x80
+ * | Interrupt Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 2 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 3 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ *
+ */
+#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32)
+#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
+#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
+#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10)
+#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80)
+#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
+#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
+
+/* NONTOE Queue Header Word 0 */
+union nontoe_qhdr0 {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define NONTOE_QHDR0_BASE_MASK (~0x0f)
+
+/* NONTOE Queue Header Word 1 */
+union nontoe_qhdr1 {
+ unsigned int bits32;
+ struct bit_nonqhdr1 {
+ /* bit 15:0 */
+ unsigned int rptr:16;
+ /* bit 31:16 */
+ unsigned int wptr:16;
+ } bits;
+};
+
+/* Non-TOE Queue Header */
+struct nontoe_qhdr {
+ union nontoe_qhdr0 word0;
+ union nontoe_qhdr1 word1;
+};
+
+#endif /* _GEMINI_ETHERNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd5246dfae..1a49297224ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
- int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ int num_entries = ARRAY_SIZE(cmd_priv_map);
u32 cmd_privileges = adapter->cmd_privileges;
for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..c36c81959198 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,14 @@ int be_update_queues(struct be_adapter *adapter)
be_schedule_worker(adapter);
+ /* The IF was destroyed and re-created. We need to clear
+ * all promiscuous flags valid for the destroyed IF.
+ * Without this promisc mode is not restored during
+ * be_open() because the driver thinks that it is
+ * already enabled in HW.
+ */
+ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
if (netif_running(netdev))
status = be_open(netdev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5385074b3b7d..e7381f8ef89d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -20,7 +20,8 @@
#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -195,7 +196,7 @@
* Evidently, ARM SoCs have the FEC block generated in a
* little endian mode so adjust endianness accordingly.
*/
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
#define fec32_to_cpu le32_to_cpu
#define fec16_to_cpu le16_to_cpu
#define cpu_to_fec32 cpu_to_le32
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8184d2fca9be..7a7f3a42b2aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -195,7 +195,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -1868,6 +1869,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ref);
if (ret)
goto failed_clk_ref;
+
+ phy_reset_after_clk_enable(ndev->phydev);
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
@@ -2107,7 +2110,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2840,6 +2844,7 @@ fec_enet_open(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ bool reset_again;
ret = pm_runtime_get_sync(&fep->pdev->dev);
if (ret < 0)
@@ -2850,6 +2855,17 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto clk_enable;
+ /* During the first fec_enet_open call the PHY isn't probed at this
+ * point. Therefore the phy_reset_after_clk_enable() call within
+ * fec_enet_clk_enable() fails. As we need this reset in order to be
+ * sure the PHY is working correctly we check if we need to reset again
+ * later when the PHY is probed
+ */
+ if (ndev->phydev && ndev->phydev->drv)
+ reset_again = false;
+ else
+ reset_again = true;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -2866,6 +2882,12 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ phy_reset_after_clk_enable(ndev->phydev);
+
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
@@ -3119,7 +3141,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize_log2 = __fls(dsize);
WARN_ON(dsize != (1 << dsize_log2));
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
@@ -3469,6 +3491,10 @@ fec_probe(struct platform_device *pdev)
goto failed_regulator;
}
} else {
+ if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto failed_regulator;
+ }
fep->reg_phy = NULL;
}
@@ -3552,8 +3578,9 @@ failed_clk_ipg:
failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
-failed_phy:
of_node_put(phy_node);
+failed_phy:
+ dev_id--;
failed_ioremap:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+ timeout_work);
+ struct net_device *dev = fep->ndev;
unsigned long flags;
int wake = 0;
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(dev->phydev);
}
phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ schedule_work(&fep->timeout_work);
+}
+
/*-----------------------------------------------------------------------------
* generic link-change handler - should be sufficient for most cases
*-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
+ cancel_work_sync(&fep->timeout_work);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
+ INIT_WORK(&fep->timeout_work, fs_timeout_work);
netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi;
+ struct work_struct timeout_work;
const struct fs_ops *ops;
int rx_ring, tx_ring;
dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7f837006bb6a..3bdeb295514b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
struct sk_buff *skb, bool first)
{
- unsigned int size = lstatus & BD_LENGTH_MASK;
+ int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
@@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
if (last)
size -= skb->len;
- /* in case the last fragment consisted only of the FCS */
+ /* Add the last fragment if it contains something other than
+ * the FCS, otherwise drop it and trim off any part of the FCS
+ * that was already received.
+ */
if (size > 0)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
+ else if (size < 0)
+ pskb_trim(skb, skb->len + size);
}
/* try reuse page */
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6aa7b8..8bcf470ff5f3 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@ config HNS3_HCLGE
compatibility layer. The engine would be used in Hisilicon hip08 family of
SoCs and further upcoming SoCs.
-config HNS3_ENET
- tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
- depends on HNS3 && HNS3_HCLGE
- ---help---
- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
- family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
- devices and their associated operations.
-
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
@@ -112,4 +103,23 @@ config HNS3_DCB
If unsure, say N.
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3
+ depends on HNS3_HCLGE
+ ---help---
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ depends on 64BIT && PCI
+ depends on HNS3
+ ---help---
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf490850..cac86e9ae0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_ctrl)
+ if (!mac_cb)
return 0;
return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63faf9a8..acf29633ec79 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
HNS_OP_LED_SET_FUNC = 0x3,
HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
};
enum _dsm_rst_type {
@@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
{
- u32 ret;
-
- if (dsaf_dev->sub_ctrl)
- ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
- else
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
return ret;
}
@@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
ACPI_FREE(obj);
}
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value =
- dsaf_read_syscon(mac_cb->cpld_ctrl,
- mac_cb->cpld_ctrl_reg);
- dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
- CPLD_LED_ON_VALUE);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- mac_cb->cpld_led_value);
+ val);
+ mac_cb->cpld_led_value = val;
break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
@@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
return 0;
}
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define RESET_REQ_OR_DREQ 1
static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
+ u32 val = 0;
+ int ret;
+
if (!mac_cb->cpld_ctrl)
return -ENODEV;
- *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
- + MAC_SFP_PORT_OFFSET);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+ *sfp_prsnt = !val;
return 0;
}
@@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
- int sfp_prsnt;
+ int sfp_prsnt = 0;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!mac_cb->phy_dev) {
@@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin;
+ u32 origin = 0;
if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
#define HILINK_ACCESS_SEL_CFG 0x40008
@@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
HILINK_ACCESS_SEL_CFG, 3);
}
- origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
@@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
misc_op->cpld_set_led = hns_cpld_set_led_acpi;
misc_op->cpld_reset_led = cpld_led_reset_acpi;
- misc_op->cpld_set_led_id = cpld_set_led_id;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 46a52d9bb196..886cbbf25761 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
regmap_write(base, reg, value);
}
-static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
{
- unsigned int val;
-
- regmap_read(base, reg, &val);
- return val;
+ return regmap_read(base, reg, val);
}
#define dsaf_read_dev(a, reg) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1f3e51..002534f12b66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000000..3e9203ea42a6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM 16
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 rsv1[2];
+ u8 msg_len;
+ u8 rsv2[3];
+ u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1[3];
+ u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb2238acb2..02145f2de820 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret = 0, lock_acquired;
+
+ /* we can get deadlocked if SRIOV is being enabled in context to probe
+ * and probe gets called again in same context. This can happen when
+ * pci_enable_sriov() is called to create VFs from PF probes context.
+ * Therefore, for simplicity uniformly defering further probing in all
+ * cases where we detect contention.
+ */
+ lock_acquired = mutex_trylock(&hnae3_common_lock);
+ if (!lock_acquired)
+ return -EPROBE_DEFER;
- mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+ ret = -EOPNOTSUPP;
goto out_err;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1039f2..fd06bc78c58e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
+ u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
@@ -274,10 +279,14 @@ struct hnae3_ae_dev {
* Get firmware version
* get_mdix_mode()
* Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
* set_vlan_filter()
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -347,7 +356,8 @@ struct hnae3_ae_ops {
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
- void (*get_regs)(struct hnae3_handle *handle, void *data);
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
@@ -380,12 +390,23 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle,
enum hnae3_reset_type reset);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *free_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
};
struct hnae3_dcb_ops {
@@ -435,6 +456,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
};
struct hnae3_roce_private_info {
@@ -452,9 +475,10 @@ struct hnae3_unic_private_info {
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
-#define HNAE3_SUPPORT_MAC_LOOPBACK 1
-#define HNAE3_SUPPORT_PHY_LOOPBACK 2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
struct hnae3_handle {
struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a7c50a..eb82700da7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
{
struct net_device *dev = handle->kinfo.netdev;
- if (!handle->kinfo.dcb_ops)
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
return;
dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 59415090ff0f..601b6295d3f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
/* required last entry */
{0, }
};
@@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
{
- /* this defines the configuration for GL (Interrupt Gap Limiter)
- * GL defines inter interrupt gap.
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
- */
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 rl_value)
-{
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+ !tqp_vector->rx_group.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_handle *h = priv->ae_handle;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
- /* Default :enable interrupt coalesce */
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+ /* Default: enable interrupt coalescing self-adaptive and GL */
+ tqp_vector->tx_group.gl_adapt_enable = 1;
+ tqp_vector->rx_group.gl_adapt_enable = 1;
+
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
- /* for now we are disabling Interrupt RL - we
- * will re-enable later
- */
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.int_gl);
+
+ /* Default: disable RL */
+ h->kinfo.int_rl_setting = 0;
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
@@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
return 0;
out_start_err:
@@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+ struct hns3_enet_ring *tx_ring,
+ u32 *inner_vlan_flag,
+ u32 *out_vlan_flag,
+ u16 *inner_vtag,
+ u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->tqp->handle->kinfo.netdev->features &
+ NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag;
+
+ vlan_tag = skb_vlan_tag_get(skb);
+ vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
+ } else if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+ << HNS3_TX_VLAN_PRIO_SHIFT);
+ }
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
@@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 bdtp_fe_sc_vld_ra_ri = 0;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
__be16 protocol;
@@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
skb = (struct sk_buff *)priv;
paylen = skb->len;
+ ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec,
+ &inner_vtag, &out_vtag);
+ if (unlikely(ret))
+ return ret;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
protocol = skb->protocol;
- /* vlan packet*/
- if (protocol == htons(ETH_P_8021Q)) {
- protocol = vlan_get_protocol(skb);
- skb->protocol = protocol;
- }
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
return ret;
@@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
}
/* move ring pointer to next.*/
@@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = hns3_fill_desc_tso;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+ } else {
+ priv->ops.fill_desc = hns3_fill_desc;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+ else
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+ if (ret)
+ return ret;
}
netdev->features = features;
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
unsigned int start;
unsigned int idx;
@@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
@@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.tx_busy;
+ tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.non_vld_descs;
+ rx_drop += ring->stats.err_pkt_len;
+ rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = netdev->stats.rx_dropped;
- stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+ stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+ netdev->mtu = new_mtu;
+
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
@@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF))
+ netdev->hw_features |=
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
- u16 rx_int_gl, tx_int_gl;
- bool rx, tx;
-
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
- rx_int_gl = tqp_vector->rx_group.int_gl;
- tx_int_gl = tqp_vector->tx_group.int_gl;
- if (rx && tx) {
- if (rx_int_gl > tx_int_gl) {
- tqp_vector->tx_group.int_gl = rx_int_gl;
- tqp_vector->tx_group.flow_level =
- tqp_vector->rx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
- } else {
- tqp_vector->rx_group.int_gl = tx_int_gl;
- tqp_vector->rx_group.flow_level =
- tqp_vector->tx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
- }
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ if (rx_group->gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->int_gl);
+ }
+
+ if (tx_group->gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->int_gl);
}
}
@@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
cur_chain = chain;
rx_ring = rx_ring->next;
@@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
- hns3_vector_gl_rl_init(tqp_vector);
+ hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
@@ -2649,6 +2808,19 @@ err:
return ret;
}
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+ devm_kfree(priv->dev, priv->ring_data);
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring);
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
}
+ devm_kfree(priv->dev, priv->ring_data);
return 0;
}
@@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
switch (type) {
case HNAE3_UP_CLIENT:
- ret = hns3_reset_notify_up_enet(handle);
- break;
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
case HNAE3_DOWN_CLIENT:
ret = hns3_reset_notify_down_enet(handle);
break;
@@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 free_tqps, max_rss_size, max_tqps;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+ max_tqps = h->kinfo.num_tc * max_rss_size;
+
+ return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+ if (ret)
+ return ret;
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_put_ring;
+
+ return 0;
+
+err_put_ring:
+ hns3_put_ring_config(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+ return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool if_running = netif_running(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+ new_tqp_num < kinfo->num_tc) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from %d to %d",
+ kinfo->num_tc,
+ hns3_get_max_available_channels(netdev));
+ return -EINVAL;
+ }
+
+ new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+ if (kinfo->num_tqps == new_tqp_num)
+ return 0;
+
+ if (if_running)
+ dev_close(netdev);
+
+ hns3_clear_all_ring(h);
+
+ ret = hns3_nic_uninit_vector_data(priv);
+ if (ret) {
+ dev_err(&netdev->dev,
+ "Unbind vector with tqp fail, nothing is changed");
+ goto open_netdev;
+ }
+
+ hns3_uninit_all_ring(priv);
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ if (ret) {
+ ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ if (ret) {
+ /* If revert to old tqp failed, fatal error occurred */
+ dev_err(&netdev->dev,
+ "Revert to old tqp num fail, ret=%d", ret);
+ return ret;
+ }
+ dev_info(&netdev->dev,
+ "Change tqp num fail, Revert to old tqp num");
+ }
+
+open_netdev:
+ if (if_running)
+ dev_open(netdev);
+
+ return ret;
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de759957b..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
-#define HNS3_INT_GL_50K 0x000A
-#define HNS3_INT_GL_20K 0x0019
-#define HNS3_INT_GL_18K 0x001B
-#define HNS3_INT_GL_8K 0x003E
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
+ u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
@@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
@@ -604,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c72da3..b034c7f24eda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
struct hns3_stats {
char stats_string[ETH_GSTRING_LEN];
- int stats_size;
int stats_offset;
};
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
- .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
- .stats_offset = offsetof(struct hns3_enet_ring, stats), \
-} \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("tx_pkts", tx_pkts),
- HNS3_TQP_STAT("tx_bytes", tx_bytes),
- HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
- HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
};
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("rx_pkts", rx_pkts),
- HNS3_TQP_STAT("rx_bytes", rx_bytes),
- HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
- HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
- HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
- HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
- HNS3_TQP_STAT("rx_l2_err", l2_err),
- HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps)
+ u32 stat_count, u32 num_tqps, const char *prefix)
{
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
u32 size_left;
u32 i, j;
u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
/* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, rx_prefix);
return data;
}
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring;
u8 *stat;
- u32 i;
+ int i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring;
- for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
- for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
&param->rx_pause, &param->tx_pause);
}
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 flowctrl_adv = 0;
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
+ advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
/* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+ /* 5.get flow control setttings */
+ if (h->ae_algo->ops->get_flowctrl_adv)
+ h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+ if (flowctrl_adv & ADVERTISED_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ if (flowctrl_adv & ADVERTISED_Asym_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
return 0;
}
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+ cmd->data = h->kinfo.rss_size;
break;
case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev)
return genphy_restart_aneg(phy);
}
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %d! Queue max id=%d\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+};
+
static const struct ethtool_ops hns3_ethtool_ops = {
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
@@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &hns3_ethtool_ops;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d01a58c..cb8ddd043476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
@@ -5,11 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed8845042..3fd10a6bec53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -102,6 +102,10 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
/* Device management command */
/* MAC commond */
@@ -111,6 +115,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -180,6 +185,10 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+ /* Vlan offload command */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
/* Interrupts cmd */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
@@ -191,6 +200,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table cmd */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
@@ -218,6 +228,9 @@ enum hclge_opcode_type {
/* Mailbox cmd */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -399,6 +412,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-#define HCLGE_MAC_MIN_MTU 64
-#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
+#define HCLGE_VLAN_MASK_EN_B 0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+ u8 rsv0[2];
+ u8 vlan_mask;
+ u8 rsv1;
+ u8 mac_mask[6];
+ u8 rsv2[14];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
@@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_ACCEPT_TAG_B 0
+#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
@@ -726,6 +811,23 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_LED_PORT_SPEED_STATE_S 0
+#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
+#define HCLGE_LED_ACTIVITY_STATE_S 0
+#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LINK_STATE_S 0
+#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 port_speed_led_config;
+ u8 link_led_config;
+ u8 activity_led_config;
+ u8 locate_led_config;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..32bc6f68e297 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
+#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -34,8 +36,10 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
@@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
- {"mac_trans_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
- {"mac_trans_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
- {"mac_trans_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
- {"mac_trans_err_all_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
- {"mac_trans_from_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
- {"mac_trans_from_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
- {"mac_rcv_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
- {"mac_rcv_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
- {"mac_rcv_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
- {"mac_rcv_fcs_err_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
- {"mac_rcv_send_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
- {"mac_rcv_send_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
+ .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .i_port_bitmap = 0x1,
+ },
};
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
+static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
+{
+ struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
+ struct hclge_desc desc;
+ __le64 *desc_data;
+ int ret;
+
+ /* for fiber port, need to query the total rx/tx packets statstics,
+ * used for data transferring checking.
+ */
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC total pkt stats fail, ret = %d\n", ret);
+
+ return ret;
+ }
+
+ desc_data = (__le64 *)(&desc.data[0]);
+ mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
+ mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
+
+ return 0;
+}
+
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
return 0;
@@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status);
hclge_update_netstat(hw_stats, net_stats);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = 1;
+ hdev->rss_size_max = cfg.rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
- if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
- else
- hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
return ret;
}
@@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return 0;
}
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
- struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query cmd failed %d.\n", ret);
- return ret;
- }
-
- mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
- return 0;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- hclge_query_autoneg_result(hdev);
+ if (phydev)
+ return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+ bool mask_vlan,
+ u8 *mac_mask)
+{
+ struct hclge_mac_vlan_mask_entry_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+ hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
+ ether_addr_copy(req->mac_mask, mac_mask);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
+ u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int mtu;
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set default mac_vlan_mask fail ret=%d\n", ret);
+ return ret;
+ }
+
+ if (netdev)
+ mtu = netdev->mtu;
+ else
+ mtu = ETH_DATA_LEN;
+
+ ret = hclge_set_mtu(handle, mtu);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->rst_service_task);
}
static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev);
}
@@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 rst_src_reg;
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ */
+
+ /* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ }
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 event_cause;
+ u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
- if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+
+ default:
+ dev_dbg(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ /* we should clear the source of interrupt */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
- ret = devm_request_irq(&hdev->pdev->dev,
- hdev->misc_vector.vector_irq,
- hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ /* this would be explicitly freed in the end */
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, "hclge_misc", hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
return ret;
}
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
static int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
@@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
cnt++;
}
- /* must clear reset status register to
- * prevent driver detect reset interrupt again
- */
- reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
if (cnt >= HCLGE_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
"Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
return ret;
}
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
u32 val;
- switch (type) {
+ switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
hclge_func_reset_cmd(hdev, 0);
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", type);
+ "Unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+ unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
- u32 rst_reg_val;
- rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_GLOBAL_RESET, addr))
rst_level = HNAE3_GLOBAL_RESET;
- else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_CORE_RESET, addr))
rst_level = HNAE3_CORE_RESET;
- else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_IMP_RESET, addr))
rst_level = HNAE3_IMP_RESET;
+ else if (test_bit(HNAE3_FUNC_RESET, addr))
+ rst_level = HNAE3_FUNC_RESET;
+
+ /* now, clear all other resets */
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
return rst_level;
}
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ /* perform reset of the stack & ae device for a client */
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ rtnl_unlock();
+ } else {
+ /* schedule again to check pending resets later */
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ }
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static void hclge_reset_event(struct hnae3_handle *handle,
enum hnae3_reset_type reset)
{
@@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
case HNAE3_FUNC_RESET:
case HNAE3_CORE_RESET:
case HNAE3_GLOBAL_RESET:
- if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
- dev_err(&hdev->pdev->dev, "Already in reset state");
- return;
- }
- hdev->reset_type = reset;
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->service_task);
+ /* request reset & schedule reset task */
+ set_bit(reset, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
- bool do_reset;
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
- do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
- /* Reset is detected by interrupt */
- if (hdev->reset_type == HNAE3_NONE_RESET)
- hdev->reset_type = hclge_detected_reset_event(hdev);
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
- if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, rst_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- case HNAE3_CORE_RESET:
- case HNAE3_GLOBAL_RESET:
- case HNAE3_IMP_RESET:
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- if (do_reset)
- hclge_do_reset(hdev, hdev->reset_type);
- else
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ hclge_reset_subtask(hdev);
- if (!hclge_reset_wait(hdev)) {
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- }
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- break;
- default:
- dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
- hdev->reset_type);
- break;
- }
- hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
{
- hclge_reset_subtask(hdev);
- hclge_enable_vector(&hdev->misc_vector, true);
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- hclge_misc_irq_service_task(hdev);
+ /* The total rx/tx packets statstics are wanted to be updated
+ * per second. Both hclge_update_stats_for_all() and
+ * hclge_mac_get_traffic_stats() can do it.
+ */
+ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+ hclge_update_stats_for_all(hdev);
+ hdev->hw_stats.stats_timer = 0;
+ } else {
+ hclge_mac_get_traffic_stats(hdev);
+ }
+
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_stats_for_all(hdev);
+ hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
@@ -3174,49 +3433,53 @@ err:
return ret;
}
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
- struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- int ret;
+ struct hclge_ctrl_vector_chain_cmd *req
+ = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
int i;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
+ hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
- ret);
- return ret;
+ status);
+ return -EIO;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ op,
false);
req->int_vector_id = vector_id;
}
@@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) {
req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
- "Map TQP fail, status is %d.\n", ret);
- return ret;
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
}
}
return 0;
}
-static int hclge_map_handle_ring_to_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
+ "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
- return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_from_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
- struct hnae3_ring_chain_node *node;
- struct hclge_desc desc;
- int i, vector_id;
- int ret;
+ int vector_id, ret;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
@@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id;
}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
- req->int_vector_id = vector_id;
-
- i = 0;
- for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
- if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
- req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n",
- ret);
- return ret;
- }
- i = 0;
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_DEL_RING_TO_VECTOR,
- false);
- req->int_vector_id = vector_id;
- }
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
}
- if (i > 0) {
- req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n", ret);
- return ret;
- }
- }
+ /* Free this MSIX or MSI vector */
+ hclge_free_vector(hdev, vector_id);
return 0;
}
@@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
return status;
}
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%d.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL;
}
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail, ret =%d.\n",
+ ret);
- if (!hclge_add_uc_addr(handle, new_addr)) {
- ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
- return 0;
+ ret = hclge_add_uc_addr(handle, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add uc mac address fail, ret =%d.\n",
+ ret);
+
+ ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore uc mac address fail, ret =%d.\n",
+ ret);
+ }
+
+ return -EIO;
}
- return -EIO;
+ ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "configure mac pause address fail, ret =%d.\n",
+ ret);
+ return -EIO;
+ }
+
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+ return 0;
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0;
}
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
@@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
}
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+ vcfg->accept_tag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+ vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_VLAN_TYPE_VF_TABLE 0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
struct hnae3_handle *handle;
+ struct hclge_vport *vport;
int ret;
+ int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
if (ret)
return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
if (ret)
return ret;
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->txvlan_cfg.accept_tag = true;
+ vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_rx_offload_cfg(vport);
+ if (ret)
+ return ret;
+ }
+
handle = &hdev->vport[0].nic;
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ int max_frm_size;
int ret;
- if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
- hdev->mps = new_mtu;
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(new_mtu);
+ req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret;
}
+ hdev->mps = max_frm_size;
+
return 0;
}
@@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+ u32 *flowctrl_adv)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+ (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (rx_en)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (tx_en)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+ return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ local_advertising = ADVERTISE_PAUSE_CAP;
+
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
u32 *rx_en, u32 *tx_en)
{
@@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ if (!fc_autoneg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ return phy_start_aneg(phydev);
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_request = 0;
+ hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ return ret;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+ INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
}
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *free_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 temp_tqps = 0;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ if (!hdev->htqp[i].alloced)
+ temp_tqps++;
+ }
+ *free_tqps = temp_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+ tqp->q.handle = NULL;
+ tqp->q.tqp_index = 0;
+ tqp->alloced = false;
+ }
+
+ devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+ kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int cur_rss_size = kinfo->rss_size;
+ int cur_tqps = kinfo->num_tqps;
+ u16 tc_offset[HCLGE_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 tc_size[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ int ret, i;
+
+ hclge_release_tqp(vport);
+
+ ret = hclge_knic_setup(vport, new_tqps_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+
+ data = (u32 *)data + regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
+ data);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
+ u8 act_led_status, u8 link_led_status,
+ u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
+ HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
+ hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
+ HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
+ hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
+ HCLGE_LED_LINK_STATE_S, link_led_status);
+ hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+#define BLINK_FREQUENCY 2
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret = 0;
+
+ if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_OFF);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+enum hclge_led_port_speed {
+ HCLGE_SPEED_LED_FOR_1G,
+ HCLGE_SPEED_LED_FOR_10G,
+ HCLGE_SPEED_LED_FOR_25G,
+ HCLGE_SPEED_LED_FOR_40G,
+ HCLGE_SPEED_LED_FOR_50G,
+ HCLGE_SPEED_LED_FOR_100G,
+};
+
+static u8 hclge_led_get_speed_status(u32 speed)
+{
+ u8 speed_led;
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_1G:
+ speed_led = HCLGE_SPEED_LED_FOR_1G;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_led = HCLGE_SPEED_LED_FOR_10G;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_led = HCLGE_SPEED_LED_FOR_25G;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_led = HCLGE_SPEED_LED_FOR_40G;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_led = HCLGE_SPEED_LED_FOR_50G;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_led = HCLGE_SPEED_LED_FOR_100G;
+ break;
+ default:
+ speed_led = HCLGE_LED_NO_CHANGE;
+ }
+
+ return speed_led;
+}
+
+static int hclge_update_led_status(struct hclge_dev *hdev)
+{
+ u8 port_speed_status, link_status, activity_status;
+ u64 rx_pkts, tx_pkts;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
+
+ rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
+ tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
+ if (rx_pkts != hdev->rx_pkts_for_led ||
+ tx_pkts != hdev->tx_pkts_for_led)
+ activity_status = HCLGE_LED_ON;
+ else
+ activity_status = HCLGE_LED_OFF;
+ hdev->rx_pkts_for_led = rx_pkts;
+ hdev->tx_pkts_for_led = tx_pkts;
+
+ if (hdev->hw.mac.link)
+ link_status = HCLGE_LED_ON;
+ else
+ link_status = HCLGE_LED_OFF;
+
+ return hclge_set_led_status_sfp(hdev, port_speed_status,
+ activity_status, link_status,
+ HCLGE_LED_NO_CHANGE);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
- .map_ring_to_vector = hclge_map_handle_ring_to_vector,
- .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback,
@@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_flowctrl_adv = hclge_get_flowctrl_adv,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814ea5d7..d99a76a9557c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_VF_NUM_PER_BYTE 8
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_GLOBAL_RESET_REG 0x20A00
@@ -92,6 +96,16 @@
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_REMOVING,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
- HCLGE_STATE_MBX_IRQ,
- HCLGE_STATE_RESET_INT,
+ HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX
};
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
#define HCLGE_MPF_ENBALE 1
struct hclge_caps {
u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
+ u16 rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_overrsize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_max_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+ u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_overrsize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_max_oct_pkt_num;
-
- u64 mac_trans_fragment_pkt_num;
- u64 mac_trans_undermin_pkt_num;
- u64 mac_trans_jabber_pkt_num;
- u64 mac_trans_err_all_pkt_num;
- u64 mac_trans_from_app_good_pkt_num;
- u64 mac_trans_from_app_bad_pkt_num;
- u64 mac_rcv_fragment_pkt_num;
- u64 mac_rcv_undermin_pkt_num;
- u64 mac_rcv_jabber_pkt_num;
- u64 mac_rcv_fcs_err_pkt_num;
- u64 mac_rcv_send_app_good_pkt_num;
- u64 mac_rcv_send_app_bad_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+ u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
};
+#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats;
+ u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
};
struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
unsigned long state;
enum hnae3_reset_type reset_type;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
+ struct work_struct rst_service_task;
+ struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -493,6 +548,29 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ u64 rx_pkts_for_led;
+ u64 tx_pkts_for_led;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag; /* Whether accept tagged packet from host */
+ bool accept_untag; /* Whether accept untagged packet from host */
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+ bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_vport {
@@ -507,6 +585,9 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
@@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
- struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000000..f38fc5ce9f51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ int resp_status,
+ u8 *resp_data, u16 resp_data_len)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ resp_data_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+ resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+ resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+ resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+ if (resp_data && resp_data_len > 0)
+ memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send response to VF\n", status);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg[0] = mbx_opcode;
+
+ memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send mailbox message to VF\n",
+ status);
+
+ return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kzfree(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg[2];
+
+ hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[5]);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+ hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg[1];
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+ if (ret)
+ return ret;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ bool en = req->msg[1] ? true : false;
+ struct hclge_promisc_param param;
+
+ /* always enable broadcast promisc bit */
+ hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+ hclge_rm_uc_addr_common(vport, old_addr);
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_rm_uc_addr_common(vport, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ status = hclge_add_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ status = hclge_rm_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+ u8 func_id = vport->vport_id;
+ bool enable = mbx_req->msg[2];
+
+ status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int status = 0;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ u16 vlan, proto;
+ bool is_kill;
+
+ is_kill = !!mbx_req->msg[2];
+ memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+ memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ is_kill, vlan, 0,
+ cpu_to_be16(proto));
+ }
+
+ if (gen_resp)
+ status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+ sizeof(u8));
+
+ return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 8
+ u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+ memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+ memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+ u8 msg_data[2];
+ u8 dest_vfid;
+
+ /* mac.link can only be 0 or 1 */
+ link_status = (u16)hdev->hw.mac.link;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id;
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+ hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_vport *vport;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* handle all the mailbox requests in the queue */
+ while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+ HCLGE_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ vport = &hdev->vport[req->mbx_src_vfid];
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+ req);
+ break;
+ case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+ req);
+ break;
+ case HCLGE_MBX_SET_PROMISC_MODE:
+ ret = hclge_set_vf_promisc_mode(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF promisc mode\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+ ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_MULTICAST:
+ ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+ ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_QINFO:
+ ret = hclge_get_vf_queue_info(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get Q info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_TCINFO:
+ ret = hclge_get_vf_tcinfo(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get TC info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_LINK_STATUS:
+ ret = hclge_get_link_info(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to get link stat for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_QUEUE_RESET:
+ hclge_reset_vf_queue(vport, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %d\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e9408d7d..c1dea3a47bdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
SUPPORTED_Pause | \
+ SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
}
int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5497cb..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
HCLGE_SHAPER_LVL_PF = 1,
};
-#define HCLGE_SHAPER_BS_U_DEF 1
-#define HCLGE_SHAPER_BS_S_DEF 4
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0;
}
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
{
struct hclge_desc desc;
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+ trans_time);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{
u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret;
u8 i;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_mac_pause_param_setup_hw(hdev);
+ }
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961918ab..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap;
};
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+};
+
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000000..fb93bbd35845
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000000..85985e731311
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used;
+
+ used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ u16 ntc = csq->next_to_clean;
+ struct hclgevf_desc *desc;
+ int clean = 0;
+ u32 head;
+
+ desc = &csq->desc[ntc];
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+ u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+ size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ hclgevf_ring_to_dma_dir(ring));
+
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+ struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_hw *hw = &hdev->hw;
+ int ring_type = ring->flag;
+ u32 reg_val;
+ int ret;
+
+ ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ spin_lock_init(&ring->lock);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->dev = hdev;
+
+ /* allocate CSQ/CRQ descriptor */
+ ret = hclgevf_alloc_cmd_desc(ring);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+ (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+ return ret;
+ }
+
+ /* initialize the hardware registers with csq/crq dma-address,
+ * descriptor number, head & tail pointers
+ */
+ switch (ring_type) {
+ case HCLGEVF_TYPE_CSQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ break;
+ case HCLGEVF_TYPE_CRQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ break;
+ }
+
+ return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode, bool is_read)
+{
+ memset(desc, 0, sizeof(struct hclgevf_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+ HCLGEVF_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int status = 0;
+ u16 retval;
+ u16 opcode;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /* Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ opcode = le16_to_cpu(desc[0].opcode);
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (hclgevf_cmd_csq_done(hw)) {
+ complete = true;
+ handle = 0;
+
+ while (handle < num) {
+ /* Get the result of hardware write back */
+ desc_to_use = &hw->cmq.csq.desc[ntc];
+ desc[handle] = *desc_to_use;
+
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ retval = le16_to_cpu(desc[handle].retval);
+ else
+ retval = le16_to_cpu(desc[0].retval);
+
+ if ((enum hclgevf_cmd_return_status)retval ==
+ HCLGEVF_CMD_EXEC_SUCCESS)
+ status = 0;
+ else
+ status = -EIO;
+ hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+ ntc++;
+ handle++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ status = -EAGAIN;
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle != num) {
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ }
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return status;
+}
+
+static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+ u32 *version)
+{
+ struct hclgevf_query_version_cmd *resp;
+ struct hclgevf_desc desc;
+ int status;
+
+ resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ status = hclgevf_cmd_send(hw, &desc, 1);
+ if (!status)
+ *version = le32_to_cpu(resp->firmware);
+
+ return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ /* setup Tx write back timeout */
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+ /* setup queue CSQ/CRQ rings */
+ hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CSQ ring\n", ret);
+ return ret;
+ }
+
+ hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CRQ ring\n", ret);
+ goto err_csq;
+ }
+
+ /* get firmware version */
+ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+ goto err_crq;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
+err_crq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+ return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000000..2caca9317f8c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT 200
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+ dma_addr_t dma;
+ void *va;
+ u32 length;
+};
+
+struct hclgevf_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclgevf_desc *desc;
+ struct hclgevf_desc_cb *desc_cb;
+ struct hclgevf_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+ HCLGEVF_CMD_EXEC_SUCCESS = 0,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_EXEC = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+};
+
+enum hclgevf_cmd_status {
+ HCLGEVF_STATUS_SUCCESS = 0,
+ HCLGEVF_ERR_CSQ_FULL = -1,
+ HCLGEVF_ERR_CSQ_TIMEOUT = -2,
+ HCLGEVF_ERR_CSQ_ERROR = -3
+};
+
+struct hclgevf_cmq {
+ struct hclgevf_cmq_ring csq;
+ struct hclgevf_cmq_ring crq;
+ u16 tx_timeout; /* Tx timeout */
+ enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+ /* Generic command */
+ HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ /* TQP command */
+ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* RSS cmd */
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
+ /* Mailbox cmd */
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+ __le32 firmware;
+ __le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_HASH_KEY_NUM 16
+struct hclgevf_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_stcp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_stcp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE 16
+
+struct hclgevf_rss_indirection_table_cmd {
+ u16 start_table_index;
+ u16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S 0
+#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S 12
+#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B 15
+#define HCLGEVF_MAX_TC_NUM 8
+struct hclgevf_rss_tc_mode_cmd {
+ u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ 0
+#define HCLGEVF_TYPE_CSQ 1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+#define HCLGEVF_NIC_CMQ_EN_B 16
+#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+ hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+ hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+ ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode,
+ bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000000..0d89965f7928
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+ struct hnae3_handle *handle)
+{
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_desc desc;
+ struct hclgevf_tqp *tqp;
+ int status;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_QUERY_RX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *buff = data;
+ int i = 0;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclgevf_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclgevf_tqps_get_sset_count(handle, strset);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg;
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+ true, &resp_msg, sizeof(u8));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get TC info from PF failed %d",
+ status);
+ return status;
+ }
+
+ hdev->hw_tc_map = resp_msg;
+
+ return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 8
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+ true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.desc_num = hdev->num_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ int i;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tc = 0;
+ kinfo->num_desc = hdev->num_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ kinfo->num_tc++;
+
+ kinfo->rss_size
+ = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+ new_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ int status;
+ u8 resp_msg;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+ 0, false, &resp_msg, sizeof(u8));
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *client;
+
+ client = handle->client;
+
+ if (link_state != hdev->hw.mac.link) {
+ client->ops->link_status_change(handle, !!link_state);
+ hdev->hw.mac.link = link_state;
+ }
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+
+ if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+ dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+ hdev->ae_dev->dev_type);
+ return -EINVAL;
+ }
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+ const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+ struct hclgevf_rss_indirection_table_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+ int i, j;
+
+ req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+ false);
+ req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+ req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set RSS indirection table\n",
+ status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
+{
+ struct hclgevf_rss_tc_mode_cmd *req;
+ u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+ u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+ u16 tc_size[HCLGEVF_MAX_TC_NUM];
+ struct hclgevf_desc desc;
+ u16 roundup_size;
+ int status;
+ int i;
+
+ req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ }
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set rss tc mode\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+ u8 *key)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_config_cmd *req;
+ int lkup_times = key ? 3 : 1;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int status;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+ lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+ for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ true);
+ req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get hardware RSS cfg, status = %d\n",
+ status);
+ return status;
+ }
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ if (key)
+ memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+ req->hash_key,
+ key_size);
+ }
+
+ if (hash) {
+ if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+ *hash = ETH_RSS_HASH_TOP;
+ else
+ *hash = ETH_RSS_HASH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ if (indir)
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+ return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM 3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ring_chain_node *node;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int i, vector_id;
+ int status;
+ u8 type;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ type = en ?
+ HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ req->msg[0] = type;
+ req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ i++;
+ /* msg[2] is cause num */
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+ node->tqp_index;
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+ HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+ HCLGEVF_RING_NODE_VARIABLE_NUM) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_MBX_VF_TO_PF,
+ false);
+ req->msg[0] = type;
+ req->msg[1] = vector_id;
+ }
+ }
+
+ if (i > 0) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
+ }
+
+ hclgevf_free_vector(hdev, vector);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+ req->msg[1] = en;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", status);
+
+ return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+ false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "TQP enable fail, status =%d.\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+ return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg[2] = {0};
+
+ msg[0] = en;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+ msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ u8 *new_mac_addr = (u8 *)p;
+ u8 msg_data[ETH_ALEN * 2];
+ int status;
+
+ ether_addr_copy(msg_data, new_mac_addr);
+ ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+ msg_data, ETH_ALEN * 2,
+ false, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ msg_data[0] = is_kill;
+ memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+ memcpy(&msg_data[3], &proto, sizeof(proto));
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+ NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+ mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+ hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclgevf_mbx_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, service_task);
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+ */
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return true;
+ }
+
+ dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+ return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ if (!hclgevf_check_event_cause(hdev, &clearval))
+ goto skip_sched;
+
+ /* schedule the VF mailbox service task, if not already scheduled */
+ hclgevf_mbx_task_schedule(hdev);
+
+ hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* get queue configuration from PF */
+ ret = hclge_get_queue_info(hdev);
+ if (ret)
+ return ret;
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector =
+ hdev->vector_status[hdev->num_msi_used];
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i, ret;
+
+ rss_cfg->rss_size = hdev->rss_size_max;
+
+ /* Initialize RSS indirect table for each vport */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+ ret = hclgevf_set_rss_indir_table(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ /* other vlan config(like, VLAN TX/RX offload) would also be added
+ * here later
+ */
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ /* ring enable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ /* Ring disable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, false);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ /* setup tasks for the MBX */
+ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+ /* setup tasks for service timer */
+ timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+ INIT_WORK(&hdev->service_task, hclgevf_service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ hclgevf_get_misc_vector(hdev);
+
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, "hclgevf_cmd", hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+ ret = rc->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ /* un-init roce, if it exists */
+ if (hdev->roce_client)
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if ((client->ops->uninit_instance) &&
+ (client->type != HNAE3_CLIENT_ROCE))
+ client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ goto err_no_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hdev = hdev;
+ hw->io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_no_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI initialization failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_irq_init;
+ }
+
+ hclgevf_state_init(hdev);
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ goto err_misc_irq_init;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize VF's MTA */
+ hdev->accept_mta_mc = true;
+ ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to set mta filter mode\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize RSS for this VF */
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+ return 0;
+
+err_config:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_irq_init:
+ hclgevf_pci_uninit(hdev);
+ return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_cmd_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = hdev->num_tqps;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .init_client_instance = hclgevf_register_client,
+ .uninit_client_instance = hclgevf_unregister_client,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_indir_size = hclgevf_get_rss_indir_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .get_channels = hclgevf_get_channels,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .name = HCLGEVF_NAME,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000000..a63bee4a3674
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM 0
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGEVF_RSS_KEY_SIZE 40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ /* task states */
+ HCLGEVF_STATE_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+ u8 mac_addr[ETH_ALEN];
+ int link;
+};
+
+struct hclgevf_hw {
+ void __iomem *io_base;
+ int num_vec;
+ struct hclgevf_cmq cmq;
+ struct hclgevf_mac mac;
+ void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+ struct device *dev; /* device for DMA mapping */
+ struct hnae3_queue q;
+ struct hlcgevf_tqp_stats tqp_stats;
+ u16 index; /* global index in a NIC controller */
+
+ bool alloced;
+};
+
+struct hclgevf_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+ u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+ u32 hash_algo;
+ u32 rss_size;
+ u8 hw_tc_map;
+ u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclgevf_rss_cfg rss_cfg;
+ unsigned long state;
+
+ u32 fw_version;
+ u16 num_tqps; /* num task queue pairs of this PF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_desc;
+ u8 hw_tc_map;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
+ struct work_struct mbx_service_task;
+
+ struct hclgevf_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000000..e39cad285fa9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to prtect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USCOEND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ udelay(HCLGEVF_SLEEP_USCOEND);
+ i++;
+ }
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx resp(=%d) from PF in %d tries\n",
+ hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%d,code1=%d), %d",
+ code0, code1, mbx_resp->resp_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ /* first two bytes are reserved for code & subcode */
+ if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ dev_err(&hdev->pdev->dev,
+ "VF send mbx msg fail, msg len %d exceeds max len %d\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = code;
+ req->msg[1] = subcode;
+ memcpy(&req->msg[2], msg_data, msg_len);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_mbx_resp_status *resp;
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclgevf_cmq_ring *crq;
+ struct hclgevf_desc *desc;
+ u16 link_status, flag;
+ u8 *temp;
+ int i;
+
+ resp = &hdev->mbx_resp;
+ crq = &hdev->hw.cmq.crq;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_PF_VF_RESP:
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%d)\n",
+ req->msg[1]);
+ resp->received_resp = true;
+
+ resp->origin_mbx_msg = (req->msg[1] << 16);
+ resp->origin_mbx_msg |= req->msg[2];
+ resp->resp_status = req->msg[3];
+
+ temp = (u8 *)&req->msg[4];
+ for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+ resp->additional_info[i] = *temp;
+ temp++;
+ }
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_status = le16_to_cpu(req->msg[1]);
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
+
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%d) mbx msg from PF\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff2450ed6..354c0982847b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
static inline int emac_phy_supports_gige(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline int emac_phy_gpcs(int phy_mode)
{
- return phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline void emac_tx_enable(struct emac_instance *dev)
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_TFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
@@ -2865,7 +2871,7 @@ static int emac_init_config(struct emac_instance *dev)
/* PHY mode needs some decoding */
dev->phy_mode = of_get_phy_mode(np);
if (dev->phy_mode < 0)
- dev->phy_mode = PHY_MODE_NA;
+ dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3168,7 +3174,7 @@ static int emac_probe(struct platform_device *ofdev)
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
- if (dev->phy_mode == PHY_MODE_SGMII)
+ if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
if (dev->phy.address >= 0)
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27ceebb..e2f80cca9bed 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
} u1;
};
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
#define EMAC_MR0_TXI 0x40000000
@@ -151,9 +138,11 @@ struct emac_regs {
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_8K 0x00040000
#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
-#define EMAC_STACR_STAC_WRITE 0x00002000
+#define EMAC_STACR_STAC_WRITE 0x00000800
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 35865d05fccd..aa070c063e48 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
- if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+ if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c4a1ac38bba8..00f5999de3cf 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,43 +52,28 @@
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_MII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
- switch (mode) {
- case PHY_MODE_RGMII:
- return "RGMII";
- case PHY_MODE_TBI:
- return "TBI";
- case PHY_MODE_GMII:
- return "GMII";
- case PHY_MODE_MII:
- return "MII";
- case PHY_MODE_RTBI:
- return "RTBI";
- default:
- BUG();
- }
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_MII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
return RGMII_FER_RGMII(input);
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
return RGMII_FER_TBI(input);
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
return RGMII_FER_GMII(input);
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return RGMII_FER_MII(input);
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
return RGMII_FER_RTBI(input);
default:
BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
- ofdev->dev.of_node, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node, input, phy_modes(mode));
++dev->users;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 89c42d362292..fdcc734541fe 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
*/
static inline int zmii_valid_mode(int mode)
{
- return mode == PHY_MODE_MII ||
- mode == PHY_MODE_RMII ||
- mode == PHY_MODE_SMII ||
- mode == PHY_MODE_NA;
+ return mode == PHY_INTERFACE_MODE_MII ||
+ mode == PHY_INTERFACE_MODE_RMII ||
+ mode == PHY_INTERFACE_MODE_SMII ||
+ mode == PHY_INTERFACE_MODE_NA;
}
static inline const char *zmii_mode_name(int mode)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return "MII";
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return "RMII";
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return "SMII";
default:
BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
static inline u32 zmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return ZMII_FER_MII(input);
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return ZMII_FER_RMII(input);
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return ZMII_FER_SMII(input);
default:
return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
* Please, always specify PHY mode in your board port to avoid
* any surprises.
*/
- if (dev->mode == PHY_MODE_NA) {
- if (*mode == PHY_MODE_NA) {
+ if (dev->mode == PHY_INTERFACE_MODE_NA) {
+ if (*mode == PHY_INTERFACE_MODE_NA) {
u32 r = dev->fer_save;
ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
- dev->mode = PHY_MODE_MII;
+ dev->mode = PHY_INTERFACE_MODE_MII;
else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
- dev->mode = PHY_MODE_RMII;
+ dev->mode = PHY_INTERFACE_MODE_RMII;
else
- dev->mode = PHY_MODE_SMII;
- } else
+ dev->mode = PHY_INTERFACE_MODE_SMII;
+ } else {
dev->mode = *mode;
-
+ }
printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
ofdev->dev.of_node,
zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
- if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+ if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%pOF: invalid mode %d specified for input %d\n",
ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
mutex_init(&dev->lock);
dev->ofdev = ofdev;
- dev->mode = PHY_MODE_NA;
+ dev->mode = PHY_INTERFACE_MODE_NA;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef37d3a..56d6f1a4205a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -59,6 +59,7 @@
#include <linux/mm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -410,6 +411,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
int i, j, rc;
+ u64 *size_array;
+
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +422,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size *
+ rx_pool->buff_size);
+ } else {
+ rc = reset_long_term_buff(adapter,
+ &rx_pool->long_term_buff);
+ }
+
if (rc)
return rc;
@@ -439,14 +454,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
- int rx_scrqs;
int i, j;
if (!adapter->rx_pool)
return;
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- for (i = 0; i < rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +482,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
+ adapter->num_active_rx_pools = 0;
}
static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +507,8 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
+ adapter->num_active_rx_pools = 0;
+
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
@@ -536,6 +552,8 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->next_free = 0;
}
+ adapter->num_active_rx_pools = rxadd_subcrqs;
+
return 0;
}
@@ -586,13 +604,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
- int i, tx_scrqs;
+ int i;
if (!adapter->tx_pool)
return;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- for (i = 0; i < tx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_tx_pools; i++) {
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff);
@@ -603,6 +620,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
+ adapter->num_active_tx_pools = 0;
}
static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +637,8 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool)
return -1;
+ adapter->num_active_tx_pools = 0;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
@@ -666,6 +686,8 @@ static int init_tx_pools(struct net_device *netdev)
tx_pool->producer_index = 0;
}
+ adapter->num_active_tx_pools = tx_subcrqs;
+
return 0;
}
@@ -756,6 +778,12 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (adapter->renegotiate);
+ /* handle pending MAC address changes after successful login */
+ if (adapter->mac_change_pending) {
+ __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+ adapter->mac_change_pending = false;
+ }
+
return 0;
}
@@ -854,7 +882,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- reinit_completion(&adapter->fw_done);
+ init_completion(&adapter->fw_done);
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +944,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (!adapter->vpd)
return -ENOMEM;
+ /* Vital Product Data (VPD) */
+ rc = ibmvnic_get_vpd(adapter);
+ if (rc) {
+ netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+ return rc;
+ }
+
adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1024,10 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc, vpd;
+ int rc;
mutex_lock(&adapter->reset_lock);
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@@ -1017,11 +1047,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
- /* Vital Product Data (VPD) */
- vpd = ibmvnic_get_vpd(adapter);
- if (vpd)
- netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
-
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1153,6 +1178,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_ARP)) {
+ hdr_len[1] = arp_hdr_len(skb->dev);
+ hdr_len[2] = 0;
}
memset(hdr_data, 0, 120);
@@ -1275,6 +1303,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst;
u64 *handle_array;
int index = 0;
+ u8 proto = 0;
int ret = 0;
if (adapter->resetting) {
@@ -1363,17 +1392,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (skb->protocol == htons(ETH_P_IP)) {
- if (ip_hdr(skb)->version == 4)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
- else if (ip_hdr(skb)->version == 6)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
- else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+ proto = ip_hdr(skb)->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+ proto = ipv6_hdr(skb)->nexthdr;
}
+ if (proto == IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+ else if (proto == IPPROTO_UDP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2;
@@ -1386,7 +1416,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* determine if l2/3/4 headers are sent to firmware */
if ((*hdrs >> 7) & 1 &&
(skb->protocol == htons(ETH_P_IP) ||
- skb->protocol == htons(ETH_P_IPV6))) {
+ skb->protocol == htons(ETH_P_IPV6) ||
+ skb->protocol == htons(ETH_P_ARP))) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->indir_arr[0] = tx_crq;
@@ -1517,25 +1548,29 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+
+ init_completion(&adapter->fw_done);
ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return 0;
+ return adapter->fw_done_rc ? -EIO : 0;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ int rc;
- if (adapter->state != VNIC_OPEN) {
+ if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
adapter->mac_change_pending = true;
return 0;
}
- __ibmvnic_set_mac(netdev, addr);
+ rc = __ibmvnic_set_mac(netdev, addr);
- return 0;
+ return rc;
}
/**
@@ -1545,6 +1580,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ u64 old_num_rx_queues, old_num_tx_queues;
struct net_device *netdev = adapter->netdev;
int i, rc;
@@ -1554,6 +1590,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
if (rc)
@@ -1598,6 +1637,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = init_resources(adapter);
if (rc)
return rc;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
+ adapter->req_tx_queues != old_num_tx_queues) {
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ init_rx_pools(netdev);
+ init_tx_pools(netdev);
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -2448,6 +2493,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
struct ibmvnic_sub_crq_queue *scrq = instance;
struct ibmvnic_adapter *adapter = scrq->adapter;
+ /* When booting a kdump kernel we can hit pending interrupts
+ * prior to completing driver initialization.
+ */
+ if (unlikely(adapter->state != VNIC_OPEN))
+ return IRQ_NONE;
+
adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -3345,7 +3396,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
return;
}
+ adapter->ip_offload_ctrl.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+ adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+ adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3518,8 +3573,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
-static void handle_change_mac_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
+static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
@@ -3528,10 +3583,13 @@ static void handle_change_mac_rsp(union ibmvnic_crq *crq,
rc = crq->change_mac_addr_rsp.rc.code;
if (rc) {
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
- return;
+ goto out;
}
memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
ETH_ALEN);
+out:
+ complete(&adapter->fw_done);
+ return rc;
}
static void handle_request_cap_rsp(union ibmvnic_crq *crq,
@@ -3585,7 +3643,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- *req_value = be64_to_cpu(crq->request_capability_rsp.number);
+
+ if (be16_to_cpu(crq->request_capability_rsp.capability) ==
+ REQ_MTU) {
+ pr_err("mtu of %llu is not supported. Reverting.\n",
+ *req_value);
+ *req_value = adapter->fallback.mtu;
+ } else {
+ *req_value =
+ be64_to_cpu(crq->request_capability_rsp.number);
+ }
+
ibmvnic_send_req_caps(adapter, 1);
return;
default:
@@ -3981,7 +4049,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
- handle_change_mac_rsp(crq, adapter);
+ adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
break;
case ERROR_INDICATION:
netdev_dbg(netdev, "Got Error Indication\n");
@@ -4285,7 +4353,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
- IBMVNIC_MAX_TX_QUEUES);
+ IBMVNIC_MAX_QUEUES);
if (!netdev)
return -ENOMEM;
@@ -4411,7 +4479,7 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(failover, 0200, NULL, failover_store);
+static DEVICE_ATTR_WO(failover);
static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
{
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,7 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_TX_QUEUES 5
+#define IBMVNIC_MAX_QUEUES 10
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+ u64 num_active_rx_pools;
+ u64 num_active_tx_pools;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a1ffbb..dc71e87c3260 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + stat->stat_offset;
break;
default:
- WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
- stat->type, i);
+ netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+ stat->type, i);
continue;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9f18d39bdc8f..1298b69f990b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3303,9 +3303,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl | 0x3);
+ ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
}
+ dev_info(&adapter->pdev->dev,
+ "Some CPU C-states have been disabled in order to enable jumbo frames\n");
pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
pm_qos_update_request(&adapter->pm_qos_req,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index ea3ab24265ee..760cfa52d02c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -353,7 +353,7 @@ int fm10k_iov_resume(struct pci_dev *pdev)
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* allocate all but the last GLORT to the VFs */
- if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
+ if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
break;
/* assign GLORT to VF, and restrict it to multicast */
@@ -511,7 +511,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return err;
/* allocate VFs if not already allocated */
- if (num_vfs && (num_vfs != current_vfs)) {
+ if (num_vfs && num_vfs != current_vfs) {
/* Disable completer abort error reporting as
* the VFs can trigger this any time they read a queue
* that they don't own.
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 538b42d5c187..8e12aae065d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
+ /* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
- return;
-
- /* update MACVLAN statistics */
- macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
- !!(rx_desc->w.hdr_info &
- cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
/**
@@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
if (rx_desc->w.vlan) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index adc62fb38c49..a38ae5c54da3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -934,8 +934,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (vid >= VLAN_N_VID)
return -EINVAL;
- /* Verify we have permission to add VLANs */
- if (hw->mac.vlan_override)
+ /* Verify that we have permission to add VLANs. If this is a request
+ * to remove a VLAN, we still want to allow the user to remove the
+ * VLAN device. In that case, we need to clear the bit in the
+ * active_vlans bitmask.
+ */
+ if (set && hw->mac.vlan_override)
return -EACCES;
/* update active_vlans bitmask */
@@ -954,6 +958,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
rx_ring->vid &= ~FM10K_VLAN_CLEAR;
}
+ /* If our VLAN has been overridden, there is no reason to send VLAN
+ * removal requests as they will be silently ignored.
+ */
+ if (hw->mac.vlan_override)
+ return 0;
+
/* Do not remove default VLAN ID related entries from VLAN and MAC
* tables
*/
@@ -1040,14 +1050,13 @@ static int __fm10k_uc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1106,14 +1115,13 @@ static int __fm10k_mc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_multicast_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1157,10 +1165,12 @@ static void fm10k_set_rx_mode(struct net_device *dev)
/* update xcast mode first, but only if it changed */
if (interface->xcast_mode != xcast_mode) {
- /* update VLAN table */
+ /* update VLAN table when entering promiscuous mode */
if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
0, true);
+
+ /* clear VLAN table when exiting promiscuous mode */
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
@@ -1182,9 +1192,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
void fm10k_restore_rx_state(struct fm10k_intfc *interface)
{
+ struct fm10k_l2_accel *l2_accel = interface->l2_accel;
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
- int xcast_mode;
+ int xcast_mode, i;
u16 vid, glort;
/* record glort for this interface */
@@ -1211,11 +1222,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
xcast_mode == FM10K_XCAST_MODE_PROMISC);
- /* Add filter for VLAN 0 */
- fm10k_queue_vlan_request(interface, 0, 0, true);
-
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
fm10k_queue_vlan_request(interface, vid, 0, true);
@@ -1234,6 +1242,24 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
+ /* synchronize macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_MULTI);
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ hw->mac.default_vid, true);
+ }
+ }
+
fm10k_mbx_unlock(interface);
/* record updated xcast mode state */
@@ -1490,7 +1516,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, true);
+ hw->mac.default_vid, true);
}
fm10k_mbx_unlock(interface);
@@ -1530,7 +1556,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, false);
+ hw->mac.default_vid, false);
}
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
return err;
}
-#ifdef CONFIG_PM
/**
* fm10k_resume - Generic PM resume hook
* @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
-
/**
* fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &fm10k_pm_ops,
},
-#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 425d814aed4d..d6406fc31ffb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -866,7 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
* used here to indicate to the VF that it will not have privilege to
* write VLAN_TABLE. All policy is enforced on the PF but this allows
- * the VF to correctly report errors to userspace rqeuests.
+ * the VF to correctly report errors to userspace requests.
*/
if (vf_info->pf_vid)
vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e019baa905c5..46e9f4e0a02c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -508,39 +508,40 @@ struct i40e_pf {
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_FILTER_SYNC BIT(5)
-#define I40E_FLAG_SRIOV_ENABLED BIT(6)
-#define I40E_FLAG_DCB_CAPABLE BIT(7)
-#define I40E_FLAG_DCB_ENABLED BIT(8)
-#define I40E_FLAG_FD_SB_ENABLED BIT(9)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
-#define I40E_FLAG_MFP_ENABLED BIT(13)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
-#define I40E_FLAG_LEGACY_RX BIT(21)
-#define I40E_FLAG_PTP BIT(22)
-#define I40E_FLAG_IWARP_ENABLED BIT(23)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
-#define I40E_FLAG_CLIENT_RESET BIT(26)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
-#define I40E_FLAG_TC_MQPRIO BIT(29)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(5)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(8)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(13)
+#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20)
+#define I40E_FLAG_LEGACY_RX BIT_ULL(21)
+#define I40E_FLAG_PTP BIT_ULL(22)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
+#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
+#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
+#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9af74253c3f7..e78971605e0b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1027,7 +1032,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..a852775d3059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1594,7 +1595,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1605,7 +1606,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1614,8 +1615,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2231,8 +2232,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2492,6 +2497,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..0de9610c1d8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (!client || !cdev)
return;
- /* Here we handle client opens. If the client is down, but
- * the netdev is up, then open the client.
+ /* Here we handle client opens. If the client is down, and
+ * the netdev is registered, then open the client.
*/
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ if (vsi->netdev_registered &&
client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
i40e_client_del_instance(pf);
}
}
- } else {
- /* Likewise for client close. If the client is up, but the netdev
- * is down, then close the client.
- */
- if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
- client->ops && client->ops->close) {
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- client->ops->close(&cdev->lan_info, client, false);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
+
+ /* enable/disable PE TCP_ENA flag based on netdev down/up
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+ else
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
}
/**
@@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return -ENOENT;
}
- if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
- } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a5315b5..ba55c889e4c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -132,7 +132,7 @@ struct i40e_info {
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
struct i40e_ops {
/* setup_q_vector_list enables queues with a particular vector */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..ef5a868aae46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1486,6 +1488,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1534,6 +1537,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1544,9 +1548,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = false;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -3465,13 +3466,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3490,6 +3492,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
@@ -3629,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -5236,7 +5275,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -5246,7 +5285,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5294,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5310,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5282,14 +5321,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -5364,11 +5403,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5412,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -5397,7 +5431,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5473,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf7212d4f..2f5bee713fef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+ I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+ I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+ I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
I40E_PF_STAT("rx_size_64", stats.rx_size_64),
I40E_PF_STAT("rx_size_127", stats.rx_size_127),
I40E_PF_STAT("rx_size_255", stats.rx_size_255),
@@ -229,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1585,6 +1590,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ /* Clear cloned XDP RX-queue info before setup call */
+ memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -2299,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+ struct i40e_ring *tx_ring = vsi->tx_rings[queue];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
@@ -2306,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
- vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
- vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
- vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
- vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- q_vector = vsi->rx_rings[queue]->q_vector;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = vsi->tx_rings[queue]->q_vector;
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
@@ -2740,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
no_input_set:
if (input_set & I40E_L3_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
if (input_set & I40E_L3_DST_MASK)
- fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
if (input_set & I40E_L4_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
if (input_set & I40E_L4_DST_MASK)
- fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -3800,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
i40e_write_fd_input_set(pf, index, new_mask);
+ /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+ * frames. If we're programming the input set for IPv4/Other, we also
+ * need to program the IPv4/Fragmented input set. Since we don't have
+ * separate support, we'll always assume and enforce that the two flow
+ * types must have matching input sets.
+ */
+ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ new_mask);
+
/* Add the new offset and update table, if necessary */
if (new_flex_offset) {
err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@@ -3822,6 +3841,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
}
/**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+ struct i40e_fdir_filter *b)
+{
+ /* The filters do not much if any of these criteria differ. */
+ if (a->dst_ip != b->dst_ip ||
+ a->src_ip != b->src_ip ||
+ a->dst_port != b->dst_port ||
+ a->src_port != b->src_port ||
+ a->flow_type != b->flow_type ||
+ a->ip4_proto != b->ip4_proto)
+ return false;
+
+ return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+
+ /* Loop through every filter, and check that it doesn't match */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* Don't check the filters match if they share the same fd_id,
+ * since the new filter is actually just updating the target
+ * of the old filter.
+ */
+ if (rule->fd_id == input->fd_id)
+ continue;
+
+ /* If any filters match, then print a warning message to the
+ * kernel message buffer and bail out.
+ */
+ if (i40e_match_fdir_filter(rule, input)) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d already matches this flow.\n",
+ rule->fd_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
@@ -3933,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->flex_offset = userdef.flex_offset;
}
- ret = i40e_add_del_fdir(vsi, input, true);
+ /* Avoid programming two filters with identical match criteria. */
+ ret = i40e_disallow_matching_filters(vsi, input);
if (ret)
- goto free_input;
+ goto free_filter_memory;
/* Add the input filter to the fdir_input_list, possibly replacing
* a previous filter. Do not free the input structure after adding it
* to the list as this would cause a use-after-free bug.
*/
i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
-
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ goto remove_sw_rule;
return 0;
-free_input:
+remove_sw_rule:
+ hlist_del(&input->fdir_node);
+ pf->fdir_pf_active_filters--;
+free_filter_memory:
kfree(input);
return ret;
}
@@ -4258,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 orig_flags, new_flags, changed_flags;
+ u64 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4309,13 +4415,32 @@ flags_complete:
!(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
return -EOPNOTSUPP;
+ /* Disable FW LLDP not supported if NPAR active or if FW
+ * API version < 1.7
+ */
+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->hw.func_caps.npar_enable) {
+ dev_warn(&pf->pdev->dev,
+ "Unable to stop FW LLDP if NPAR active\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pf->hw.aq.api_maj_ver < 1 ||
+ (pf->hw.aq.api_maj_ver == 1 &&
+ pf->hw.aq.api_min_ver < 7)) {
+ dev_warn(&pf->pdev->dev,
+ "FW ver does not support stopping FW LLDP\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
/* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
- if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
+ if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@@ -4354,12 +4479,37 @@ flags_complete:
}
}
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ struct i40e_dcbx_config *dcbcfg;
+ int i;
+
+ i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+ /* reset local_dcbx_config to default */
+ dcbcfg = &pf->hw.local_dcbx_config;
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 0;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = 0;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcbcfg->etscfg.prioritytable[i] = 0;
+ dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ } else {
+ i40e_aq_start_lldp(&pf->hw, NULL);
+ }
+ }
+
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
I40E_FLAG_LEGACY_RX |
- I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ I40E_FLAG_SOURCE_PRUNING_DISABLED |
+ I40E_FLAG_DISABLE_FW_LLDP))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..f95ce9b5e4fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ /* Copy the address first, so that we avoid a possible race with
+ * .set_rx_mode(). If we copy after changing the address in the filter
+ * list, we might open ourselves to a narrow race window where
+ * .set_rx_mode could delete our dev_addr filter and prevent traffic
+ * from passing.
+ */
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1811,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
@@ -1923,6 +1934,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
i40e_del_mac_filter(vsi, addr);
return 0;
@@ -4107,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
q_vector->num_ringpairs = num_ringpairs;
+ q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
@@ -4862,104 +4882,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
#endif
/**
- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
- * @q_idx: TX queue number
- * @vsi: Pointer to VSI struct
- *
- * This function checks specified queue for given VSI. Detects hung condition.
- * We proactively detect hung TX queues by checking if interrupts are disabled
- * but there are pending descriptors. If it appears hung, attempt to recover
- * by triggering a SW interrupt.
- **/
-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
-{
- struct i40e_ring *tx_ring = NULL;
- struct i40e_pf *pf;
- u32 val, tx_pending;
- int i;
-
- pf = vsi->back;
-
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (q_idx == vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
- }
- }
-
- if (!tx_ring)
- return;
-
- /* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- val = rd32(&pf->hw,
- I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
- tx_ring->vsi->base_vector - 1));
- else
- val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
-
- tx_pending = i40e_get_tx_pending(tx_ring);
-
- /* Interrupts are disabled and TX pending is non-zero,
- * trigger the SW interrupt (don't wait). Worst case
- * there will be one extra interrupt which may result
- * into not cleaning any queues because queues are cleaned.
- */
- if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
- i40e_force_wb(vsi, tx_ring->q_vector);
-}
-
-/**
- * i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @pf: pointer to PF struct
- *
- * LAN VSI has netdev and netdev has TX queues. This function is to check
- * each of those TX queues if they are hung, trigger recovery by issuing
- * SW interrupt.
- **/
-static void i40e_detect_recover_hung(struct i40e_pf *pf)
-{
- struct net_device *netdev;
- struct i40e_vsi *vsi;
- unsigned int i;
-
- /* Only for LAN VSI */
- vsi = pf->vsi[pf->lan_vsi];
-
- if (!vsi)
- return;
-
- /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
- return;
-
- /* Make sure type is MAIN VSI */
- if (vsi->type != I40E_VSI_MAIN)
- return;
-
- netdev = vsi->netdev;
- if (!netdev)
- return;
-
- /* Bail out if netif_carrier is not OK */
- if (!netif_carrier_ok(netdev))
- return;
-
- /* Go thru' TX queues for netdev */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- if (q)
- i40e_detect_recover_hung_queue(i, vsi);
- }
-}
-
-/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
*
@@ -5327,6 +5249,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
{
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
int ret = 0;
int i;
@@ -5344,10 +5268,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+ dev_info(&pf->pdev->dev,
"Failed configuring TC map %d for VSI %d\n",
enabled_tc, vsi->seid);
- goto out;
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+ &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed querying vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+ if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+ u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
+
+ if (!valid_tc)
+ valid_tc = bw_config.tc_valid_bits;
+ /* Always enable TC0, no matter what */
+ valid_tc |= 1;
+ dev_info(&pf->pdev->dev,
+ "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+ enabled_tc, bw_config.tc_valid_bits, valid_tc);
+ enabled_tc = valid_tc;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to configure TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
}
/* Update Queue Pairs Mapping for currently enabled UPs */
@@ -5387,13 +5341,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update the VSI after updating the VSI queue-mapping
* information
*/
- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5403,11 +5356,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update current VSI BW information */
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -6038,8 +5990,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
- /* Set L4type to both TCP and UDP support */
- mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+ /* Set L4type for TCP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6373,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
- /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+ * Also do not enable DCBx if FW LLDP agent is disabled
+ */
+ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
goto out;
/* Get the initial DCB configuration */
@@ -6401,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
+ } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %s aq_err %s\n",
@@ -6969,18 +6927,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
- return -EINVAL;
+ return -EOPNOTSUPP;
- /* Make sure port is specified, otherwise bail out, for channel
- * specific cloud filter needs 'L4 port' to be non-zero
+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+ * ports are not supported via big buffer now.
*/
- if (!filter->dst_port)
- return -EINVAL;
+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+ return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +6949,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
- return -EINVAL;
+ return -EOPNOTSUPP;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
@@ -7356,7 +7314,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,9 +7448,6 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
{
struct i40e_vsi *vsi = np->vsi;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return i40e_configure_clsflower(vsi, cls_flower);
@@ -7510,6 +7465,9 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct i40e_netdev_priv *np = cb_priv;
+ if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return i40e_setup_tc_cls_flower(np, type_data);
@@ -7727,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -9060,6 +9021,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
vsi->uplink_seid);
return ret;
}
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue +
+ ch->num_queue_pairs;
if (ch->max_tx_rate) {
u64 credits = ch->max_tx_rate;
@@ -9263,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
+ /* Enable FW to write a default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
ret = i40e_init_pf_dcb(pf);
if (ret) {
@@ -9680,7 +9655,7 @@ static void i40e_service_task(struct work_struct *work)
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf);
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -10447,10 +10422,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ if (!pf->irq_pile)
return -ENOMEM;
- }
+
pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
@@ -10768,8 +10742,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* Determine the RSS size of the VSI */
if (!vsi->rss_size) {
u16 qcount;
-
- qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ /* If the firmware does something weird during VSI init, we
+ * could end up with zero TCs. Check for that to avoid
+ * divide-by-zero. It probably won't pass traffic, but it also
+ * won't panic.
+ */
+ qcount = vsi->num_queue_pairs /
+ (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
}
if (!vsi->rss_size)
@@ -10957,7 +10936,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
ret = i40e_aq_update_nvm(&pf->hw,
I40E_SR_NVM_CONTROL_WORD,
0x10, sizeof(nvm_word),
- &nvm_word, true, NULL);
+ &nvm_word, true, 0, NULL);
/* Save off last admin queue command status before releasing
* the NVM
*/
@@ -11098,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.aq.fw_maj_ver >= 6)
pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
- if (pf->hw.func_caps.vmdq) {
+ if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
- if (pf->hw.func_caps.iwarp) {
+ if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
pf->flags |= I40E_FLAG_IWARP_ENABLED;
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
@@ -13581,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+
+ /* Enable FW to write default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..76a5cb04e4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -239,8 +239,9 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
@@ -389,7 +390,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code;
- u16 read_size = *words;
+ u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
@@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ i40e_nvmupd_clear_wait_state(hw);
status = 0;
- goto exit;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
-exit:
+
mutex_unlock(&hw->aq.arq_mutex);
return status;
}
@@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1118,38 +1134,53 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- default:
- break;
- }
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..83798b7841b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -333,7 +337,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -343,6 +349,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
@@ -400,13 +437,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5f9cac55aa55..afb72e711d43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..e554aa6cf070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
#include <linux/bpf_trace.h>
+#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -725,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ tx_ring = vsi->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40e_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40e_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40e_get_tx_pending(tx_ring) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -902,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
val);
} else {
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
@@ -929,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
} else {
u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@@ -1162,6 +1215,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1236,6 +1290,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1256,6 +1312,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int err = -ENOMEM;
int bi_size;
/* warn if we are about to overwrite the pointer */
@@ -1283,13 +1340,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index);
+ if (err < 0)
+ goto err;
+ }
+
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
return 0;
err:
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
- return -ENOMEM;
+ return err;
}
/**
@@ -2068,11 +2133,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false, xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2243,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
@@ -2253,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
return;
}
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -2303,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
@@ -3047,10 +3111,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -3058,7 +3142,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae1182e2ea..701b708628b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <net/xdp.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
@@ -277,7 +279,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -331,6 +333,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
};
struct i40e_rx_queue_stats {
@@ -428,6 +431,7 @@ struct i40e_ring {
*/
struct i40e_channel *ch;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -498,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..cd294e6a8587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -611,6 +618,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1502,19 +1510,19 @@ struct i40e_lldp_variables {
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1526,16 +1534,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1545,8 +1553,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1573,11 +1581,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)ls->link_speed;
+ i40e_virtchnl_link_speed(ls->link_speed);
}
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 8b0d4b255dea..d1aab6b8bfb1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..815de8d9c3fb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1562,7 +1563,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1573,7 +1574,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1582,8 +1583,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2196,8 +2197,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2457,6 +2462,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..67bf5cebb76f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1202,7 +1204,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
}
/**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -1212,7 +1214,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1223,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1239,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -1248,16 +1250,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1332,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1341,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -1363,7 +1360,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1402,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7fa7a41915c1..5b222246e08b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..357d6051281f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
+/**
+ * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->back->num_active_queues; i++) {
+ tx_ring = &vsi->back->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40evf_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40evf_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -316,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
q_vector->arm_wb_state = true;
}
@@ -336,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
val);
}
@@ -469,6 +521,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1444,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -1498,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
@@ -2012,10 +2062,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2023,7 +2093,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8d26c85d12e1..7798a6645c3f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -260,7 +260,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..54951c84a481 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -561,6 +568,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1422,19 +1430,19 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1446,16 +1454,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1465,8 +1473,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1493,11 +1501,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..9690c1ea019e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -114,14 +114,14 @@ struct i40e_q_vector {
struct i40evf_adapter *adapter;
struct i40e_vsi *vsi;
struct napi_struct napi;
- unsigned long reg_idx;
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
- int v_idx; /* vector index in list */
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
cpumask_t affinity_mask;
@@ -187,6 +187,7 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK,
+ __I40EVF_IN_REMOVE_TASK, /* device being removed */
};
/* board specific private data structure */
@@ -199,6 +200,9 @@ struct i40evf_adapter {
wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
int num_req_queues;
@@ -206,7 +210,6 @@ struct i40evf_adapter {
/* TX */
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
- struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index da006fa3fec1..e2d8aa19d205 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
+ struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
u16 vector;
- adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
- adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
- if (ec->use_adaptive_rx_coalesce)
- adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_rx_coalesce)
+ rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
- if (ec->use_adaptive_tx_coalesce)
- adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_tx_coalesce)
+ tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
- q_vector = adapter->rx_rings[queue].q_vector;
- q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = adapter->tx_rings[queue].q_vector;
- q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..16989ad2ca90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
- }
- }
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
- struct i40e_hw *hw = &adapter->hw;
- int i;
- u32 dyn_ctl;
-
- if (mask & 1) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
- }
- for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i)) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct net_device *netdev = data;
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
- u32 val;
/* handle non-queue interrupts, these reads clear the registers */
- val = rd32(hw, I40E_VFINT_ICR01);
- val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- val = rd32(hw, I40E_VFINT_DYN_CTL01) |
- I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, val);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* @adapter: board private structure
* @vlan: vlan tag
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f = NULL;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- goto out;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
clearout:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -768,21 +727,16 @@ out:
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* @adapter: board private structure
* @macaddr: the MAC address
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
- int count = 50;
if (!macaddr)
return NULL;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return NULL;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f) {
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- return NULL;
- }
+ if (!f)
+ goto clearout;
ether_addr_copy(f->macaddr, macaddr);
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
- int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
i40evf_add_filter(adapter, mca->addr);
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0) {
- dev_err(&adapter->pdev->dev,
- "Failed to get lock in %s\n", __func__);
- return;
- }
- }
- /* remove filter if not in netdev list */
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
/**
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
void i40evf_down(struct i40evf_adapter *adapter)
{
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
- usleep_range(500, 1000);
-
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->remove = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1441,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
+ q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT);
@@ -1770,13 +1717,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING) {
- i40evf_irq_enable_queues(adapter, ~0);
- i40evf_fire_sw_int(adapter, 0xFF);
- } else {
- i40evf_fire_sw_int(adapter, 0x1);
- }
-
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_detect_recover_hung(&adapter->vsi);
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1738,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ if (adapter->state == __I40EVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -1808,6 +1754,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
i40evf_free_all_rx_resources(adapter);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
@@ -1819,6 +1767,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
kfree(fv);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1854,6 +1804,13 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
+ bool running;
+
+ /* When device is being removed it doesn't make sense to run the reset
+ * task, just return in such a case.
+ */
+ if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
@@ -1913,7 +1870,13 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ running = (adapter->state == __I40EVF_RUNNING);
+
+ if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1948,6 +1911,8 @@ continue_reset:
adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -1956,15 +1921,19 @@ continue_reset:
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
vlf->add = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
- if (netif_running(adapter->netdev)) {
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+ */
+ if (running) {
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
if (err)
@@ -1993,9 +1962,13 @@ continue_reset:
adapter->state = __I40EVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -2239,8 +2212,14 @@ static int i40evf_open(struct net_device *netdev)
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
- return -EBUSY;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ if (adapter->state != __I40EVF_DOWN) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2243,8 @@ static int i40evf_open(struct net_device *netdev)
i40evf_irq_enable(adapter, true);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
return 0;
err_req_irq:
@@ -2273,6 +2254,8 @@ err_setup_rx:
i40evf_free_all_rx_resources(adapter);
err_setup_tx:
i40evf_free_all_tx_resources(adapter);
+err_unlock:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
@@ -2296,6 +2279,9 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2291,8 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2357,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ /* Don't allow changing VLAN_RX flag when VLAN is set for VF
+ * and return an error in this case
+ */
+ if (VLAN_ALLOWED(adapter)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
return -EINVAL;
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ }
return 0;
}
@@ -2943,6 +2937,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
+ spin_lock_init(&adapter->mac_vlan_list_lock);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2985,6 +2981,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
if (netif_running(netdev)) {
rtnl_lock();
i40evf_down(adapter);
@@ -2993,6 +2993,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
retval = pci_save_state(pdev);
if (retval)
return retval;
@@ -3066,7 +3068,8 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
-
+ /* Indicate we are in remove and not to run reset_task */
+ set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@@ -3101,8 +3104,6 @@ static void i40evf_remove(struct pci_dev *pdev)
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
- flush_scheduled_work();
-
i40evf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -3118,6 +3119,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
@@ -3130,6 +3132,8 @@ static void i40evf_remove(struct pci_dev *pdev)
kfree(f);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..50ce0d6c09ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
}
if (!flags) {
- adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+ I40EVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+ I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
@@ -965,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
+ bool link_up = vpe->event_data.link_event.link_status;
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
- if (adapter->link_up !=
- vpe->event_data.link_event.link_status) {
- adapter->link_up =
- vpe->event_data.link_event.link_status;
- if (adapter->link_up) {
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
- } else {
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
- }
- i40evf_print_link_message(adapter);
+
+ /* we've already got the right link status, bail */
+ if (adapter->link_up == link_up)
+ break;
+
+ /* If we get link up message and start queues before
+ * our queues are configured it will trigger a TX hang.
+ * In that case, just ignore the link status message,
+ * we'll get another one after we enable queues and
+ * actually prepared to send traffic.
+ */
+ if (link_up && adapter->state != __I40EVF_RUNNING)
+ break;
+
+ adapter->link_up = link_up;
+ if (link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
}
+ i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 92845692087a..1c6b8d9176a8 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -690,6 +690,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d06a8db514d4..606e6761758f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3338,37 +3338,7 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
static unsigned int igb_max_channels(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- unsigned int max_combined = 0;
-
- switch (hw->mac.type) {
- case e1000_i211:
- max_combined = IGB_MAX_RX_QUEUES_I211;
- break;
- case e1000_82575:
- case e1000_i210:
- max_combined = IGB_MAX_RX_QUEUES_82575;
- break;
- case e1000_i350:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 1;
- break;
- }
- /* fall through */
- case e1000_82576:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 2;
- break;
- }
- /* fall through */
- case e1000_82580:
- case e1000_i354:
- default:
- max_combined = IGB_MAX_RX_QUEUES;
- break;
- }
-
- return max_combined;
+ return igb_get_max_rss_queues(adapter);
}
static void igb_get_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c208753ff5b7..b88fae785369 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1744,6 +1744,20 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* value = idleSlope * 61034
* ----------------- (E6)
* 1000000
+ *
+ * NOTE: For i210, given the above, we can see that idleslope
+ * is represented in 16.38431 kbps units by the value at
+ * the TQAVCC register (1Gbps / 61034), which reduces
+ * the granularity for idleslope increments.
+ * For instance, if you want to configure a 2576kbps
+ * idleslope, the value to be written on the register
+ * would have to be 157.23. If rounded down, you end
+ * up with less bandwidth available than originally
+ * required (~2572 kbps). If rounded up, you end up
+ * with a higher bandwidth (~2589 kbps). Below the
+ * approach we take is to always round up the
+ * calculated value, so the resulting bandwidth might
+ * be slightly higher for some configurations.
*/
value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
@@ -3200,8 +3214,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* if allocation failed then we do not support SR-IOV */
if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev,
- "Unable to allocate memory for VF Data Storage\n");
err = -ENOMEM;
goto out;
}
@@ -3373,10 +3385,10 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#endif /* CONFIG_PCI_IOV */
}
-static void igb_init_queue_configuration(struct igb_adapter *adapter)
+unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 max_rss_queues;
+ unsigned int max_rss_queues;
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
@@ -3407,6 +3419,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
break;
}
+ return max_rss_queues;
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igb_get_max_rss_queues(adapter);
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
igb_set_flag_queue_pairs(adapter, max_rss_queues);
@@ -3676,7 +3696,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
int igb_close(struct net_device *netdev)
{
- if (netif_device_present(netdev))
+ if (netif_device_present(netdev) || netdev->dismantle)
return __igb_close(netdev, false);
return 0;
}
@@ -8718,7 +8738,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
/* Indicate to hardware the Address is Valid. */
if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
- rar_high |= E1000_RAH_AV;
+ if (is_valid_ether_addr(addr))
+ rar_high |= E1000_RAH_AV;
if (hw->mac.type == e1000_82575)
rar_high |= E1000_RAH_POOL_1 *
@@ -8756,17 +8777,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
+ * flag and allows to overwrite the MAC via VF netdev. This
+ * is necessary to allow libvirt a way to restore the original
+ * MAC after unbinding vfio-pci and reloading igbvf after shutting
+ * down a VM.
+ */
+ if (is_zero_ether_addr(mac)) {
+ adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev,
+ "remove administratively set MAC on VF %d\n",
+ vf);
+ } else if (is_valid_ether_addr(mac)) {
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+ mac, vf);
+ dev_info(&adapter->pdev->dev,
+ "Reload the VF driver to make this change effective.");
+ /* Generate additional warning if PF is down */
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
return -EINVAL;
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev,
- "Reload the VF driver to make this change effective.");
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF MAC address has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before attempting to use the VF device.\n");
}
return igb_set_vf_mac(adapter, vf, mac);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 841c2a083349..0746b19ec6d3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return;
}
@@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
*/
void igb_ptp_tx_hang(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
IGB_PTP_TX_TIMEOUT);
@@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 35e6fa643c7e..8319465eb38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
+ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c3555a629..c1e3a0039ea5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,7 +52,9 @@
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
+#include "ixgbe_ipsec.h"
+#include <net/xdp.h>
#include <net/busy_poll.h>
/* common prefix used by pr_<> macros */
@@ -170,10 +172,11 @@ enum ixgbe_tx_flags {
IXGBE_TX_FLAGS_CC = 0x08,
IXGBE_TX_FLAGS_IPV4 = 0x10,
IXGBE_TX_FLAGS_CSUM = 0x20,
+ IXGBE_TX_FLAGS_IPSEC = 0x40,
/* software defined flags */
- IXGBE_TX_FLAGS_SW_VLAN = 0x40,
- IXGBE_TX_FLAGS_FCOE = 0x80,
+ IXGBE_TX_FLAGS_SW_VLAN = 0x80,
+ IXGBE_TX_FLAGS_FCOE = 0x100,
};
/* VLAN info */
@@ -332,7 +335,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
- struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -371,6 +373,7 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -395,8 +398,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -629,15 +631,18 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
+#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr_setting;
u16 tx_work_limit;
+ u64 tx_ipsec;
/* Rx fast path data */
int num_rx_queues;
u16 rx_itr_setting;
+ u64 rx_ipsec;
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
@@ -674,6 +679,7 @@ struct ixgbe_adapter {
struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 hw_tcs;
u8 dcb_set_bitmap;
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
@@ -721,8 +727,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
- u16 eeprom_verh;
- u16 eeprom_verl;
+ char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
@@ -766,7 +771,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+ /* Bitmask indicating in use pools */
+ DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
@@ -780,6 +786,10 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
+
+#ifdef CONFIG_XFRM
+ struct ixgbe_ipsec *ipsec;
+#endif /* CONFIG_XFRM */
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1010,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+#ifdef CONFIG_XFRM_OFFLOAD
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd);
+#else
+static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb) { };
+static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd) { return 0; };
+#endif /* CONFIG_XFRM_OFFLOAD */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 8a32eb7d47b9..a0ebd9ecf243 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index d602637ccc40..4dfc81dbee4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -1310,10 +1310,11 @@ do { \
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
*
* This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
+ * several optimizations such as unwinding all of the loops, letting the
* compiler work out all of the conditional ifs since the keys are static
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
@@ -1446,7 +1447,7 @@ do { \
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applies the input_mask
@@ -2078,6 +2079,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2131,6 +2133,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..61188f343955 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
@@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
@@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
*
* return the VLVF index where this VLAN id should be placed
*
@@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 072ef3b5fc61..aaea8282bfd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -39,6 +39,10 @@
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame: maximum frame size
*/
static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
@@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame: Maximum frame size
+ * @direction: Configuring either Tx or Rx
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index b79e93a5b699..f94c7e82a30b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -34,7 +34,9 @@
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
@@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
@@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
@@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1011d644978f..1eed6811e914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -38,6 +38,7 @@
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
@@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
@@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 78c52375acc6..b33f3f87e4b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
- if (max_tc != netdev_get_num_tc(dev)) {
+ if (max_tc != adapter->hw_tcs) {
err = ixgbe_setup_tc(dev, max_tc);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 5e2c1e35e517..ad54080488ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
- * @pf: the pf that is stopping
+ * @adapter: the adapter that is exiting
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2a3667..221f15803480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
+ {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
+ {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1014,16 +1016,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
- nvm_track_id = (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl;
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- nvm_track_id);
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -1156,6 +1155,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
@@ -3082,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
- /* fallthrough */
+ break;
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
-
info->rx_filters |=
BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
@@ -3110,13 +3096,31 @@ static int ixgbe_get_ts_info(struct net_device *dev,
default:
return ethtool_op_get_ts_info(dev, info);
}
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
return 0;
}
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
@@ -3173,7 +3177,7 @@ static void ixgbe_get_channels(struct net_device *dev,
return;
/* same thing goes for being DCB enabled */
- if (netdev_get_num_tc(dev) > 1)
+ if (adapter->hw_tcs > 1)
return;
/* if ATR is disabled we can exit */
@@ -3219,7 +3223,7 @@ static int ixgbe_set_channels(struct net_device *dev,
#endif
/* use setup TC to update any traffic class queue mapping */
- return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ return ixgbe_setup_tc(dev, adapter->hw_tcs);
}
static int ixgbe_get_module_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..7a09a40e4472 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -150,6 +150,7 @@ skip_ddpinv:
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
+ * @target_mode: 1 to setup target mode, 0 to setup initiator mode
*
* Returns : 1 for success and 0 for no ddp
*/
@@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
- snprintf(info->firmware_version,
- sizeof(info->firmware_version),
- "0x%08x",
- (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl);
+ strlcpy(info->firmware_version, adapter->eeprom_id,
+ sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/**
* ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 000000000000..93eacddb6704
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,941 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "ixgbe.h"
+#include <net/xfrm.h>
+#include <crypto/aead.h>
+
+/**
+ * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @key: key byte array
+ * @salt: salt bytes
+ **/
+static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
+ u32 key[], u32 salt)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_FLUSH(hw);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_item - set an Rx table item
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @tbl: table selector
+ *
+ * Trigger the device to store into a particular Rx table the
+ * data that has already been loaded into the input register
+ **/
+static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
+ enum ixgbe_ipsec_tbl_sel tbl)
+{
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
+ idx << IXGBE_RXTXIDX_IDX_SHIFT |
+ IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @spi: security parameter index
+ * @key: key byte array
+ * @salt: salt bytes
+ * @mode: rx decrypt control bits
+ * @ip_idx: index into IP table for related IP address
+ **/
+static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
+ u32 key[], u32 salt, u32 mode, u32 ip_idx)
+{
+ int i;
+
+ /* store the SPI (in bigendian) and IPidx */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
+
+ /* store the key, salt, and mode */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @addr: IP address byte array
+ **/
+static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
+{
+ int i;
+
+ /* store the ip address */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
+}
+
+/**
+ * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 buf[4] = {0, 0, 0, 0};
+ u16 idx;
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+
+ /* scrub the tables - split the loops for the max of the IP table */
+ for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
+ }
+ for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ }
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+}
+
+/**
+ * ixgbe_ipsec_stop_data
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link = adapter->link_up;
+ u32 t_rdy, r_rdy;
+ u32 limit;
+ u32 reg;
+
+ /* halt data paths */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* If the tx fifo doesn't have link, but still has data,
+ * we can't clear the tx sec block. Set the MAC loopback
+ * before block clear
+ */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ mdelay(3);
+ }
+
+ /* wait for the paths to empty */
+ limit = 20;
+ do {
+ mdelay(10);
+ t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
+ IXGBE_SECTXSTAT_SECTX_RDY;
+ r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
+ IXGBE_SECRXSTAT_SECRX_RDY;
+ } while (!t_rdy && !r_rdy && limit--);
+
+ /* undo loopback if we played with it earlier */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg &= ~IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * ixgbe_ipsec_stop_engine
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+
+ /* disable the Rx and Tx engines and full packet store-n-forward */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_SECTX_DIS;
+ reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ /* restore the "tx security buffer almost full threshold" to 0x250 */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
+
+ /* Set minimum IFG between packets back to the default 0x1 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x1;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* final set for normal (no ipsec offload) processing */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_start_engine
+ * @adapter: board private structure
+ *
+ * NOTE: this increases power consumption whether being used or not
+ **/
+static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* Set minimum IFG between packets to 3 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* Set "tx security buffer almost full threshold" to 0x15 so that the
+ * almost full indication is generated only after buffer contains at
+ * least an entire jumbo packet.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ reg = (reg & 0xfffffc00) | 0x15;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
+
+ /* restart the data paths by clearing the DISABLE bits */
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
+
+ /* enable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
+ * @adapter: board private structure
+ **/
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
+ return;
+
+ /* clean up and restart the engine */
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+ ixgbe_ipsec_start_engine(adapter);
+
+ /* reload the IP addrs */
+ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
+ struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
+
+ if (ipsa->used)
+ ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
+ }
+
+ /* reload the Rx and Tx keys */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ struct rx_sa *rsa = &ipsec->rx_tbl[i];
+ struct tx_sa *tsa = &ipsec->tx_tbl[i];
+
+ if (rsa->used)
+ ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
+ rsa->key, rsa->salt,
+ rsa->mode, rsa->iptbl_ind);
+
+ if (tsa->used)
+ ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
+ }
+}
+
+/**
+ * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
+ * @ipsec: pointer to ipsec struct
+ * @rxtable: true if we need to look in the Rx table
+ *
+ * Returns the first unused index in either the Rx or Tx SA table
+ **/
+static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
+{
+ u32 i;
+
+ if (rxtable) {
+ if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->rx_tbl[i].used)
+ return i;
+ }
+ } else {
+ if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search tx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->tx_tbl[i].used)
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * ixgbe_ipsec_find_rx_state - find the state that matches
+ * @ipsec: pointer to ipsec struct
+ * @daddr: inbound address to match
+ * @proto: protocol to match
+ * @spi: SPI to match
+ * @ip4: true if using an ipv4 address
+ *
+ * Returns a pointer to the matching SA state information
+ **/
+static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
+ __be32 *daddr, u8 proto,
+ __be32 spi, bool ip4)
+{
+ struct rx_sa *rsa;
+ struct xfrm_state *ret = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+ if (spi == rsa->xs->id.spi &&
+ ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
+ (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
+ sizeof(rsa->xs->id.daddr.a6)))) &&
+ proto == rsa->xs->id.proto) {
+ ret = rsa->xs;
+ xfrm_state_hold(ret);
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @xs: pointer to xfrm_state struct
+ * @mykey: pointer to key array to populate
+ * @mysalt: pointer to salt value to populate
+ *
+ * This copies the protocol keys and salt to our own data tables. The
+ * 82599 family only supports the one algorithm.
+ **/
+static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ int key_len;
+
+ if (xs->aead) {
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+ } else {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* The key bytes come down in a bigendian array of bytes, so
+ * we don't need to do any byteswapping.
+ * 160 accounts for 16 byte key and 4 byte salt
+ */
+ if (key_len == 160) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len != 128) {
+ netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
+ return -EINVAL;
+ } else {
+ netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
+ *mysalt = 0;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int checked, match, first;
+ u16 sa_idx;
+ int ret;
+ int i;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa rsa;
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&rsa, 0, sizeof(rsa));
+ rsa.used = true;
+ rsa.xs = xs;
+
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.decrypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ return ret;
+ }
+
+ /* get ip for rx sa table */
+ if (xs->props.family == AF_INET6)
+ memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
+
+ /* The HW does not have a 1:1 mapping from keys to IP addrs, so
+ * check for a matching IP addr entry in the table. If the addr
+ * already exists, use it; else find an unused slot and add the
+ * addr. If one does not exist and there are no unused table
+ * entries, fail the request.
+ */
+
+ /* Find an existing match or first not used, and stop looking
+ * after we've checked all we know we have.
+ */
+ checked = 0;
+ match = -1;
+ first = -1;
+ for (i = 0;
+ i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
+ (checked < ipsec->num_rx_sa || first < 0);
+ i++) {
+ if (ipsec->ip_tbl[i].used) {
+ if (!memcmp(ipsec->ip_tbl[i].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr))) {
+ match = i;
+ break;
+ }
+ checked++;
+ } else if (first < 0) {
+ first = i; /* track the first empty seen */
+ }
+ }
+
+ if (ipsec->num_rx_sa == 0)
+ first = 0;
+
+ if (match >= 0) {
+ /* addrs are the same, we should use this one */
+ rsa.iptbl_ind = match;
+ ipsec->ip_tbl[match].ref_cnt++;
+
+ } else if (first >= 0) {
+ /* no matches, but here's an empty slot */
+ rsa.iptbl_ind = first;
+
+ memcpy(ipsec->ip_tbl[first].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr));
+ ipsec->ip_tbl[first].ref_cnt = 1;
+ ipsec->ip_tbl[first].used = true;
+
+ ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
+
+ } else {
+ /* no match and no empty slot */
+ netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ memset(&rsa, 0, sizeof(rsa));
+ return -ENOSPC;
+ }
+
+ rsa.mode = IXGBE_RXMOD_VALID;
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
+ if (rsa.decrypt)
+ rsa.mode |= IXGBE_RXMOD_DECRYPT;
+ if (rsa.xs->props.family == AF_INET6)
+ rsa.mode |= IXGBE_RXMOD_IPV6;
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
+ rsa.salt, rsa.mode, rsa.iptbl_ind);
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
+
+ ipsec->num_rx_sa++;
+
+ /* hash the new entry for faster search in Rx path */
+ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
+ rsa.xs->id.spi);
+ } else {
+ struct tx_sa tsa;
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Tx table\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&tsa, 0, sizeof(tsa));
+ tsa.used = true;
+ tsa.xs = xs;
+
+ if (xs->id.proto & IPPROTO_ESP)
+ tsa.encrypt = xs->ealg || xs->aead;
+
+ ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ memset(&tsa, 0, sizeof(tsa));
+ return ret;
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
+
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
+
+ ipsec->num_tx_sa++;
+ }
+
+ /* enable the engine if not already warmed up */
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
+ ixgbe_ipsec_start_engine(adapter);
+ adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 zerobuf[4] = {0, 0, 0, 0};
+ u16 sa_idx;
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa *rsa;
+ u8 ipi;
+
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+ rsa = &ipsec->rx_tbl[sa_idx];
+
+ if (!rsa->used) {
+ netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
+ hash_del_rcu(&rsa->hlist);
+
+ /* if the IP table entry is referenced by only this SA,
+ * i.e. ref_cnt is only 1, clear the IP table entry as well
+ */
+ ipi = rsa->iptbl_ind;
+ if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
+ ipsec->ip_tbl[ipi].ref_cnt--;
+
+ if (!ipsec->ip_tbl[ipi].ref_cnt) {
+ memset(&ipsec->ip_tbl[ipi], 0,
+ sizeof(struct rx_ip_sa));
+ ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+ }
+ }
+
+ memset(rsa, 0, sizeof(struct rx_sa));
+ ipsec->num_rx_sa--;
+ } else {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+
+ if (!ipsec->tx_tbl[sa_idx].used) {
+ netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
+ memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
+ ipsec->num_tx_sa--;
+ }
+
+ /* if there are no SAs left, stop the engine to save energy */
+ if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
+ adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
+ ixgbe_ipsec_stop_engine(adapter);
+ }
+}
+
+/**
+ * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ if (xs->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl != 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_ipsec_free - called by xfrm garbage collections
+ * @xs: pointer to transformer state struct
+ *
+ * We don't have any garbage to collect, so we shouldn't bother
+ * implementing this function, but the XFRM code doesn't check for
+ * existence before calling the API callback.
+ **/
+static void ixgbe_ipsec_free(struct xfrm_state *xs)
+{
+}
+
+static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
+ .xdo_dev_state_add = ixgbe_ipsec_add_sa,
+ .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
+ .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
+ .xdo_dev_state_free = ixgbe_ipsec_free,
+};
+
+/**
+ * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
+ * @tx_ring: outgoing context
+ * @first: current data packet
+ * @itd: ipsec Tx data for later use in building context descriptor
+ **/
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_state *xs;
+ struct tx_sa *tsa;
+
+ if (unlikely(!first->skb->sp->len)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
+ __func__, first->skb->sp->len);
+ return 0;
+ }
+
+ xs = xfrm_input_state(first->skb);
+ if (unlikely(!xs)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
+ __func__, xs);
+ return 0;
+ }
+
+ itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+ if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
+ __func__, itd->sa_idx, xs->xso.offload_handle);
+ return 0;
+ }
+
+ tsa = &ipsec->tx_tbl[itd->sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
+ __func__, itd->sa_idx);
+ return 0;
+ }
+
+ first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
+
+ itd->flags = 0;
+ if (xs->id.proto == IPPROTO_ESP) {
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (first->protocol == htons(ETH_P_IP))
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
+ itd->trailer_len = xs->props.trailer_len;
+ }
+ if (tsa->encrypt)
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
+
+ return 1;
+}
+
+/**
+ * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
+ * @rx_ring: receiving ring
+ * @rx_desc: receive data descriptor
+ * @skb: current data packet
+ *
+ * Determine if there was an ipsec encapsulation noticed, and if so set up
+ * the resulting status for later in the receive stack.
+ **/
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
+ IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_offload *xo = NULL;
+ struct xfrm_state *xs = NULL;
+ struct ipv6hdr *ip6 = NULL;
+ struct iphdr *ip4 = NULL;
+ void *daddr;
+ __be32 spi;
+ u8 *c_hdr;
+ u8 proto;
+
+ /* Find the ip and crypto headers in the data.
+ * We can assume no vlan header in the way, b/c the
+ * hw won't recognize the IPsec packet and anyway the
+ * currently vlan device doesn't support xfrm offload.
+ */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
+ ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ daddr = &ip4->daddr;
+ c_hdr = (u8 *)ip4 + ip4->ihl * 4;
+ } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
+ ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ daddr = &ip6->daddr;
+ c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ switch (pkt_info & ipsec_pkt_types) {
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
+ spi = ((struct ip_auth_hdr *)c_hdr)->spi;
+ proto = IPPROTO_AH;
+ break;
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
+ spi = ((struct ip_esp_hdr *)c_hdr)->spi;
+ proto = IPPROTO_ESP;
+ break;
+ default:
+ return;
+ }
+
+ xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
+ if (unlikely(!xs))
+ return;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp))
+ return;
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ adapter->rx_ipsec++;
+}
+
+/**
+ * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
+ * @adapter: board private structure
+ **/
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec;
+ size_t size;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ goto err1;
+ hash_init(ipsec->rx_sa_list);
+
+ size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->rx_tbl)
+ goto err2;
+
+ size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->tx_tbl)
+ goto err2;
+
+ size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
+ ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->ip_tbl)
+ goto err2;
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+
+ adapter->ipsec = ipsec;
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+
+ adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+ adapter->netdev->features |= NETIF_F_HW_ESP;
+ adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ return;
+
+err2:
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+err1:
+ kfree(adapter->ipsec);
+ netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+}
+
+/**
+ * ixgbe_stop_ipsec_offload - tear down the ipsec offload
+ * @adapter: board private structure
+ **/
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+
+ adapter->ipsec = NULL;
+ if (ipsec) {
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+ kfree(ipsec);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 000000000000..da3ce7849e85
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program. If not, see <http://www.gnu.org/licenses/>.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_IPSEC_H_
+#define _IXGBE_IPSEC_H_
+
+#define IXGBE_IPSEC_MAX_SA_COUNT 1024
+#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128
+#define IXGBE_IPSEC_BASE_RX_INDEX 0
+#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
+
+#define IXGBE_RXTXIDX_IPS_EN 0x00000001
+#define IXGBE_RXIDX_TBL_SHIFT 1
+enum ixgbe_ipsec_tbl_sel {
+ ips_rx_ip_tbl = 0x01,
+ ips_rx_spi_tbl = 0x02,
+ ips_rx_key_tbl = 0x03,
+};
+
+#define IXGBE_RXTXIDX_IDX_SHIFT 3
+#define IXGBE_RXTXIDX_READ 0x40000000
+#define IXGBE_RXTXIDX_WRITE 0x80000000
+
+#define IXGBE_RXMOD_VALID 0x00000001
+#define IXGBE_RXMOD_PROTO_ESP 0x00000004
+#define IXGBE_RXMOD_DECRYPT 0x00000008
+#define IXGBE_RXMOD_IPV6 0x00000010
+
+struct rx_sa {
+ struct hlist_node hlist;
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ u32 mode;
+ u8 iptbl_ind;
+ bool used;
+ bool decrypt;
+};
+
+struct rx_ip_sa {
+ __be32 ipaddr[4];
+ u32 ref_cnt;
+ bool used;
+};
+
+struct tx_sa {
+ struct xfrm_state *xs;
+ u32 key[4];
+ u32 salt;
+ bool encrypt;
+ bool used;
+};
+
+struct ixgbe_ipsec_tx_data {
+ u32 flags;
+ u16 trailer_len;
+ u16 sa_idx;
+};
+
+struct ixgbe_ipsec {
+ u16 num_rx_sa;
+ u16 num_tx_sa;
+ struct rx_ip_sa *ip_tbl;
+ struct rx_sa *rx_tbl;
+ struct tx_sa *tx_tbl;
+ DECLARE_HASHTABLE(rx_sa_list, 10);
+};
+#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..4242f0213e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
- u16 reg_idx;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u16 reg_idx, pool;
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= tcs)
+ if ((reg_idx & ~vmdq->mask) >= tcs) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
reg_idx++;
}
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
- struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
*tx = 0;
*rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
**/
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
- struct net_device *dev = adapter->netdev;
+ u8 num_tcs = adapter->hw_tcs;
unsigned int tx_idx, rx_idx;
int tc, offset, rss_i, i;
- u8 num_tcs = netdev_get_num_tc(dev);
/* verify we have DCB queueing enabled before proceeding */
if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
adapter->tx_ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->rx_ring[offset + i]->netdev = adapter->netdev;
adapter->tx_ring[offset + i]->dcb_tc = tc;
adapter->rx_ring[offset + i]->dcb_tc = tc;
}
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+ u16 reg_idx, pool;
int i;
- u16 reg_idx;
/* only proceed if VMDq is enabled */
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
break;
#endif
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
#ifdef IXGBE_FCOE
/* FCoE uses a linear block of queues so just assigning 1:1 */
- for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ for (; i < adapter->num_rx_queues; i++, reg_idx++) {
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
#endif
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i, reg_idx;
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->reg_idx = i;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
adapter->tx_ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit VMDq instances on the PF by number of Tx queues */
+ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
int tcs;
/* Map queue offset and counts onto allocated tx queues */
- tcs = netdev_get_num_tc(dev);
+ tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit l2fwd RSS based on total Tx queue limit */
+ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+ if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
#endif
+ /* populate TC0 for use by pool 0 */
+ netdev_set_tc_queue(adapter->netdev, 0,
+ adapter->num_rx_queues_per_pool, 0);
+
return true;
}
@@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
- adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
@@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
@@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- txr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = txr_idx;
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- rxr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = rxr_idx;
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
@@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
*/
/* Disable DCB unless we only have a single traffic class */
- if (netdev_get_num_tc(adapter->netdev) > 1) {
+ if (adapter->hw_tcs > 1) {
e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
@@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
}
+ adapter->hw_tcs = 0;
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
@@ -1268,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
}
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
- u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+ u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -1282,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62a18914f00f..0da5aa2c8aba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
- u32 head, tail;
+ unsigned int head, tail;
- if (ring->l2_accel_priv)
- adapter = ring->l2_accel_priv->real_adapter;
- else
- adapter = netdev_priv(ring->netdev);
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
- hw = &adapter->hw;
- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
-
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
+ return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate
**/
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
@@ -1173,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
@@ -1204,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
+ total_ipsec++;
/* free the skb */
if (ring_is_xdp(tx_ring))
@@ -1266,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
@@ -1754,9 +1755,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
+ ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, dev);
+
+ /* record Rx queue, or update MACVLAN statistics */
+ if (netif_is_ixgbe(dev))
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1921,10 +1931,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
if (IS_ERR(skb))
return true;
- /* verify that the packet does not have any known errors */
- if (unlikely(ixgbe_test_staterr(rx_desc,
- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
- !(netdev->features & NETIF_F_RXALL))) {
+ /* Verify netdev is present, and that packet does not have any
+ * errors that would be unacceptable to the netdev.
+ */
+ if (!netdev ||
+ (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
@@ -2021,8 +2034,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of data in rx_buffer
*
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
@@ -2318,12 +2331,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
bool xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
- struct xdp_buff xdp;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2515,13 +2530,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
@@ -2534,8 +2542,6 @@ enum latency_range {
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see ixgbe_param.c)
**/
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
@@ -3020,6 +3026,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
@@ -3475,6 +3483,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
*
**/
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
@@ -3586,7 +3595,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs, mtqc;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -3853,16 +3862,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
- unsigned int pf_pool = adapter->num_vfs;
/* Write redirection table to HW */
for (i = 0; i < reta_entries; i++) {
+ u16 pool = adapter->num_rx_pools;
+
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
- if ((i & 3) == 3) {
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ if ((i & 3) != 3)
+ continue;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
- vfreta = 0;
- }
+ vfreta = 0;
}
}
@@ -3899,13 +3912,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- *(adapter->rss_key + i));
+ for (i = 0; i < 10; i++) {
+ u16 pool = adapter->num_rx_pools;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+ *(adapter->rss_key + i));
+ }
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3933,7 +3950,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_RSS].mask)
mrqc = IXGBE_MRQC_RSSEN;
} else {
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
if (tcs > 4)
@@ -3971,7 +3988,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
- unsigned int pf_pool = adapter->num_vfs;
+ u16 pool = adapter->num_rx_pools;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3981,7 +3998,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFMRQC(VMDQ_P(pool)),
+ vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
@@ -3991,8 +4012,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
- * @adapter: address of board private structure
- * @index: index of ring to set
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
**/
static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
@@ -4112,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
#if (PAGE_SIZE < 8192)
- } else {
+ /* RXDCTL.RLPML does not work on 82599 */
+ } else if (hw->mac.type != ixgbe_mac_82599EB) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
- /* Limit the maximum frame size so we don't overrun the skb */
+ /* Limit the maximum frame size so we don't overrun the skb.
+ * This can happen in SRIOV mode when the MTU of the VF is
+ * higher than the MTU of the PF.
+ */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
@@ -4144,7 +4169,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- u16 pool;
+ u16 pool = adapter->num_rx_pools;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4161,7 +4186,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1u << 29;
- for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4488,8 +4513,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4525,8 +4551,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
@@ -4846,9 +4873,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
+
/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
+ * @vfn: pool to associate with unicast addresses
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
@@ -5195,7 +5224,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_tc = netdev_get_num_tc(adapter->netdev);
+ int num_tc = adapter->hw_tcs;
int i;
if (!num_tc)
@@ -5218,7 +5247,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int hdrm;
- u8 tc = netdev_get_num_tc(adapter->netdev);
+ u8 tc = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -5277,29 +5306,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int rss_i = adapter->num_rx_queues_per_pool;
- struct ixgbe_hw *hw = &adapter->hw;
- u16 pool = vadapter->pool;
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_L2HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (rss_i > 3)
- psrtype |= 2u << 29;
- else if (rss_i > 1)
- psrtype |= 1u << 29;
-
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5352,96 +5358,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
- struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int index = rx_ring->queue_index + vadapter->rx_base_queue;
-
- /* shutdown specific queue receive and wait for dma to settle */
- ixgbe_disable_rx_queue(adapter, rx_ring);
- usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
- ixgbe_clean_rx_ring(rx_ring);
- rx_ring->l2_accel_priv = NULL;
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
-{
- struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase = accel->rx_base_queue;
- unsigned int txbase = accel->tx_base_queue;
- int i;
-
- netif_tx_stop_all_queues(vdev);
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
- }
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
- adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
- }
-
-
- return 0;
-}
-
static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase, txbase, queues;
- int i, baseq, err = 0;
+ int i, baseq, err;
- if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
- baseq, baseq + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
- accel->rx_base_queue = rxbase = baseq;
- accel->tx_base_queue = txbase = baseq;
+ accel->rx_base_queue = baseq;
+ accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[baseq + i]->netdev = vdev;
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->rx_ring[rxbase + i]->netdev = vdev;
- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
- }
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->netdev = vdev;
- adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ /* ixgbe_add_mac_filter will return an index if it succeeds, so we
+ * need to only treat it as an error value if it is negative.
+ */
+ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+ VMDQ_P(accel->pool));
+ if (err >= 0) {
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return 0;
}
- queues = min_t(unsigned int,
- adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
- err = netif_set_real_num_tx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- err = netif_set_real_num_rx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- if (is_valid_ether_addr(vdev->dev_addr))
- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ adapter->rx_ring[baseq + i]->netdev = NULL;
- ixgbe_fwd_psrtype(accel);
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
- return err;
-fwd_queue_err:
- ixgbe_fwd_ring_down(vdev, accel);
return err;
}
@@ -5480,6 +5435,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
+ ixgbe_ipsec_restore(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -5917,21 +5873,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
-
- return 0;
-}
-
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -5961,10 +5902,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- /* disable any upper devices */
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_disable_macvlan, NULL);
-
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -6119,6 +6056,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
+ * @ii: pointer to ixgbe_info for device
*
* ixgbe_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
@@ -6153,6 +6091,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -6302,7 +6241,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
}
/* PF holds first pool slot */
- set_bit(0, &adapter->fwd_bitmask);
+ set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -6402,6 +6341,7 @@ err_setup_tx:
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: pointer to ixgbe_adapter
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
@@ -6444,6 +6384,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -6541,6 +6486,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
ixgbe_clean_rx_ring(rx_ring);
rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -6646,20 +6592,12 @@ int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- if (adapter->num_rx_pools > 1)
- queues = adapter->num_rx_queues_per_pool;
- else
- queues = adapter->num_tx_queues;
-
+ queues = adapter->num_tx_queues;
err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
- if (adapter->num_rx_pools > 1 &&
- adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
- queues = IXGBE_MAX_L2A_QUEUES;
- else
- queues = adapter->num_rx_queues;
+ queues = adapter->num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6783,7 +6721,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, fctrl;
+ u32 ctrl;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
@@ -6808,18 +6746,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
+ u32 fctrl;
+
ixgbe_set_rx_mode(netdev);
/* enable the optics for 82599 SFP+ fiber as we can WoL */
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & IXGBE_WUFC_MC) {
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
+ /* enable the reception of multicast packets */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7216,7 +7154,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
* @adapter: pointer to the device adapter structure
- * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -7273,18 +7210,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
-static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
-
- return 0;
-}
-
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -7338,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_10GB_FULL:
speed_str = "10 Gbps";
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ speed_str = "5 Gbps";
+ break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
speed_str = "2.5 Gbps";
break;
@@ -7365,12 +7293,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
- /* enable any upper devices */
- rtnl_lock();
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_enable_macvlan, NULL);
- rtnl_unlock();
-
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -7655,6 +7577,7 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 cap_speed;
u32 speed;
bool autoneg = false;
@@ -7667,16 +7590,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
- hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
- /* setup the highest link when no autoneg */
- if (!autoneg) {
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- speed = IXGBE_LINK_SPEED_10GB_FULL;
- }
- }
+ /* advertise highest capable link speed */
+ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -7688,7 +7609,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list structure
**/
static void ixgbe_service_timer(struct timer_list *t)
{
@@ -7888,10 +7809,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
}
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first)
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
+ u32 fceof_saidx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -7932,7 +7855,12 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
+ if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
+ fceof_saidx |= itd->sa_idx;
+ type_tucmd |= itd->flags | itd->trailer_len;
+ }
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7975,11 +7903,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
IXGBE_TX_FLAGS_CSUM,
IXGBE_ADVTXD_POPTS_TXSM);
- /* enble IPv4 checksum for TSO */
+ /* enable IPv4 checksum for TSO */
olinfo_status |= IXGBE_SET_FLAG(tx_flags,
IXGBE_TX_FLAGS_IPV4,
IXGBE_ADVTXD_POPTS_IXSM);
+ /* enable IPsec */
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPSEC,
+ IXGBE_ADVTXD_POPTS_IPSEC);
+
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -8332,14 +8265,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
- struct ixgbe_ring_feature *f;
int txq;
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *f;
#endif
- if (fwd_adapter)
- return skb->queue_mapping + fwd_adapter->tx_base_queue;
+ if (fwd_adapter) {
+ adapter = netdev_priv(dev);
+ txq = reciprocal_scale(skb_get_hash(skb),
+ adapter->num_rx_queues_per_pool);
+
+ return txq + fwd_adapter->tx_base_queue;
+ }
#ifdef IXGBE_FCOE
@@ -8438,6 +8376,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
__be16 protocol = skb->protocol;
u8 hdr_len = 0;
@@ -8542,11 +8481,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
else if (!tso)
- ixgbe_tx_csum(tx_ring, first);
+ ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
@@ -8671,7 +8615,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8695,7 +8639,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8862,14 +8806,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
- * @netdev: net device to configure
+ * @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8878,10 +8821,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
- return -EBUSY;
-
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -8898,6 +8837,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
+ adapter->hw_tcs = tc;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
@@ -8907,10 +8847,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
} else {
netdev_reset_tc(dev);
+ /* To support macvlan offload we have to use num_tc to
+ * restrict the queues that can be used by the device.
+ * By doing this we can avoid reporting a false number of
+ * queues.
+ */
+ if (!tc && adapter->num_rx_pools > 1)
+ netdev_set_num_tc(dev, 1);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->hw_tcs = tc;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -9044,6 +8993,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
struct upper_walk_data data;
struct net_device *upper;
@@ -9052,11 +9002,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
for (vf = 0; vf < num_vfs; ++vf) {
upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
if (upper->ifindex == ifindex) {
- if (adapter->num_rx_pools > 1)
- *queue = vf * 2;
- else
- *queue = vf * adapter->num_rx_queues_per_pool;
-
+ *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
*action = vf + 1;
*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
return 0;
@@ -9101,9 +9047,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
/* Redirect to a VF or a offloaded macvlan */
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *dev = tcf_mirred_dev(a);
- err = handle_redirect_action(adapter, ifindex, queue,
+ if (!dev)
+ return -EINVAL;
+ err = handle_redirect_action(adapter, dev->ifindex, queue,
action);
if (err == 0)
return err;
@@ -9362,9 +9310,6 @@ free_jump:
static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -9386,7 +9331,7 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct ixgbe_adapter *adapter = cb_priv;
- if (!tc_can_offload(adapter->netdev))
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -9444,7 +9389,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
rtnl_lock();
- ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ ixgbe_setup_tc(netdev, adapter->hw_tcs);
rtnl_unlock();
}
@@ -9520,7 +9465,7 @@ static int ixgbe_set_features(struct net_device *netdev,
/* We cannot enable ATR if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
/* We cannot enable ATR if we have 2 or more tcs */
- (netdev_get_num_tc(netdev) > 1) ||
+ (adapter->hw_tcs > 1) ||
/* We cannot enable ATR if RSS is disabled */
(adapter->ring_feature[RING_F_RSS].limit <= 1) ||
/* A sample rate of 0 indicates ATR disabled */
@@ -9695,8 +9640,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/**
* ixgbe_configure_bridge_mode - set various bridge modes
- * @adapter - the private structure
- * @mode - requested bridge mode
+ * @adapter: the private structure
+ * @mode: requested bridge mode
*
* Configure some settings require for various bridge modes.
**/
@@ -9821,6 +9766,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ int tcs = adapter->hw_tcs ? : 1;
unsigned int limit;
int pool, err;
@@ -9831,24 +9777,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
-#ifdef CONFIG_RPS
- if (vdev->num_rx_queues != vdev->num_tx_queues) {
- netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
- vdev->name);
- return ERR_PTR(-EINVAL);
- }
-#endif
- /* Check for hardware restriction on number of rx/tx queues */
- if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
- vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
- netdev_info(pdev,
- "%s: Supports RX/TX Queue counts 1,2, and 4\n",
- pdev->name);
- return ERR_PTR(-EINVAL);
- }
-
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
@@ -9856,53 +9786,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
- adapter->num_rx_pools++;
- set_bit(pool, &adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ set_bit(pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- if (err)
- goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- if (netif_running(pdev)) {
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+
+ if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
- }
- return fwd_adapter;
-fwd_add_err:
+ if (!err)
+ return fwd_adapter;
+
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
- unsigned int limit;
+ struct ixgbe_fwd_adapter *accel = priv;
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int limit, i;
+
+ /* delete unicast filter associated with offloaded interface */
+ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+ VMDQ_P(accel->pool));
+
+ /* disable ability to receive packets for this pool */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ /* Allow remaining Rx packets to get flushed out of the
+ * Rx FIFO before we drop the netdev for the ring.
+ */
+ usleep_range(10000, 20000);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+ struct ixgbe_q_vector *qv = ring->q_vector;
+
+ /* Make sure we aren't processing any packets and clear
+ * netdev to shut down the ring.
+ */
+ if (netif_running(adapter->netdev))
+ napi_synchronize(&qv->napi);
+ ring->netdev = NULL;
+ }
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
@@ -9914,13 +9859,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
adapter->ring_feature[RING_F_RSS].limit = rss;
}
- ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
- fwd_adapter->pool, adapter->num_rx_pools,
- fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
- kfree(fwd_adapter);
+ ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
+ accel->pool, adapter->num_rx_pools,
+ accel->rx_base_queue,
+ accel->rx_base_queue +
+ adapter->num_rx_queues_per_pool);
+ kfree(accel);
}
#define IXGBE_MAX_MAC_HDR_LEN 127
@@ -9954,6 +9899,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
+#ifdef CONFIG_XFRM_OFFLOAD
+ /* IPsec offload doesn't get along well with others *yet* */
+ if (skb->sp)
+ features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
+#endif
+
return features;
}
@@ -9987,7 +9938,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
- int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -10164,7 +10115,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* ixgbe_wol_supported - Check whether device supports WoL
* @adapter: the adapter private structure
* @device_id: the device ID
- * @subdev_id: the subsystem device ID
+ * @subdevice_id: the subsystem device ID
*
* This function is used by probe and ethtool to determine
* which devices have WoL support
@@ -10233,6 +10184,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ ixgbe_get_orom_version(hw, &nvm_ver);
+
+ if (nvm_ver.or_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+ nvm_ver.or_build, nvm_ver.or_patch);
+ return;
+ }
+
+ /* Set ETrack ID format */
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", nvm_ver.etk_id);
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10508,6 +10494,7 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
+ ixgbe_init_ipsec_offload(adapter);
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -10568,8 +10555,7 @@ skip_sriov:
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+ ixgbe_set_fw_version(adapter);
/* pick up the PCI bus settings for reporting later */
if (ixgbe_pcie_from_parent(hw))
@@ -10744,6 +10730,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 654a402f0e9e..91bde90f9265 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
/**
* ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
+ * @phy_id: hardware phy id
*
**/
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
@@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: status of link
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
@@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
* @lock: true if to take and release semaphore
*
@@ -1804,6 +1810,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
* @lock: true if to take and release semaphore
*
@@ -1904,6 +1913,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ae312c45696a..f6cc9166082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -166,7 +166,7 @@
/**
* ixgbe_ptp_setup_sdp_x540
- * @hw: the hardware private structure
+ * @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
* the X540 device. It will create a 1second periodic output that can
@@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
* ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
* @adapter: private adapter structure
* @hwtstamp: stack timestamp structure
- * @systim: unsigned 64bit system time value
+ * @timestamp: unsigned 64bit system time value
*
* We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
* which can be used by the stack's ptp functions.
@@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_set_ts_config - user entry point for timestamp mode
* @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
@@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_suspend - stop PTP work items
- * @ adapter: pointer to adapter struct
+ * @adapter: pointer to adapter struct
*
* this function suspends PTP activity, and prevents more PTP work from being
* generated, but does not destroy the PTP clock device.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..27a70a52f3c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-
/* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED;
/* Allocate memory for per VF control structures */
adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
@@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gpie;
- u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
- /* turn off device IOV mode */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
- int err = 0;
- u8 num_tc;
- int i;
int pre_existing_vfs = pci_num_vf(dev);
+ int err = 0, num_rx_pools, i, limit;
+ u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
@@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- num_tc = netdev_get_num_tc(adapter->netdev);
-
- if (num_tc > 4) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
- return -EPERM;
- }
- } else if ((num_tc > 1) && (num_tc <= 4)) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
- return -EPERM;
- }
- } else {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
- return -EPERM;
- }
+ num_tc = adapter->hw_tcs;
+ num_rx_pools = adapter->num_rx_pools;
+ limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+ (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
+
+ if (num_vfs > (limit - num_rx_pools)) {
+ e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+ num_tc, num_rx_pools - 1, limit - num_rx_pools);
+ return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+ int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
- if (!err && current_flags != adapter->flags)
+ if (!err && (current_flags != adapter->flags ||
+ prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
@@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+ u8 num_tcs = adapter->hw_tcs;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
{
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
struct net_device *dev = adapter->netdev;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int default_tc = 0;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..ca45359686d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
@@ -2321,11 +2360,6 @@ enum {
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
@@ -2377,6 +2411,9 @@ enum {
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */
+#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
@@ -2398,6 +2435,7 @@ enum {
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2464,13 +2502,6 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
-
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
@@ -2484,6 +2515,8 @@ enum {
IXGBE_RXDADV_ERR_LE | \
IXGBE_RXDADV_ERR_PE | \
IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \
IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
@@ -2862,7 +2895,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */
struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens;
- __le32 seqnum_seed;
+ __le32 fceof_saidx;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
@@ -2893,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc {
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2908,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cb7da5f9c4da..f470d0204771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 length, bufsz, i, start;
u16 *local_buffer;
- bufsz = sizeof(buf) / sizeof(buf[0]);
+ bufsz = ARRAY_SIZE(buf);
/* Read a chunk at the pointer location */
if (!buffer) {
@@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
}
/**
- * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
- * @hw: pointer to hardware structure
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait_to_complete: unused
*
- * Configures the extern PHY and the integrated KR PHY for SFP support.
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
@@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/**
* ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for native SFP support.
*/
@@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for SFP support.
*/
@@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait: true when waiting for completion is needed
*
* Setup internal/external PHY link speed based on link speed, then set
* external PHY auto advertised link speed.
@@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
+ * @speed: unused
+ * @autoneg_wait_to_complete: unused
*/
static s32
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
@@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
/**
* ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait: true when waiting for completion is needed
*/
static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* ixgbe_read_phy_reg_x550a - Reads specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ff9d05f308ee..4400e49090b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
IXGBEVF_NETDEV_STAT(multicast),
IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+ IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
+ IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+ IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};
#define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 581f44bbd7b3..f6952425c87d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct ixgbevf_stats {
@@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
struct ixgbevf_rx_queue_stats {
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
u64 csum_err;
};
@@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define IXGBEVF_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
/* board specific private data structure */
struct ixgbevf_adapter {
/* this field must be first, see ixgbevf_process_skb_fields */
@@ -287,8 +296,9 @@ struct ixgbevf_adapter {
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
int num_msix_vectors;
- u32 alloc_rx_page_failed;
- u32 alloc_rx_buff_failed;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
{
return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -554,7 +531,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -596,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -605,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
+ rx_ring->rx_stats.alloc_rx_page++;
return true;
}
@@ -640,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if pkt_addr didn't change
* because each write-back erases this info.
*/
@@ -654,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -742,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
new_buff->page_offset = old_buff->page_offset;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbevf_page_is_reserved(struct page *page)
@@ -755,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbevf_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ return false;
+
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -772,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
**/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
+ u16 size,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
@@ -796,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- put_page(page);
return false;
}
@@ -816,32 +833,7 @@ add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
- return false;
-
-#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
+ return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
}
static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
@@ -850,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
{
struct ixgbevf_rx_buffer *rx_buffer;
struct page *page;
+ u16 size = le16_to_cpu(rx_desc->wb.upper.length);
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -880,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
- if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
@@ -931,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.length)
break;
/* This memory barrier is needed to keep us from reading
@@ -944,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
+ }
cleaned_count++;
@@ -1554,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbevf_tx_buffer) * ring->count);
+
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -1722,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -1750,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbevf_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBEVF_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -1896,10 +1908,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
- xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
- (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
- IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
@@ -2108,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- unsigned int i;
+ u16 i = rx_ring->next_to_clean;
/* Free Rx ring sk_buff */
if (rx_ring->skb) {
@@ -2118,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring pages */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct ixgbevf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_page(rx_buffer->page);
- rx_buffer->page = NULL;
- }
- size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ rx_buffer->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
/**
@@ -2149,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
**/
static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned long size;
- unsigned int i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
}
- size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
- memset(tx_ring->desc, 0, tx_ring->size);
}
/**
@@ -2717,6 +2767,8 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
int i;
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2737,15 +2789,23 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc);
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->hw_csum_rx_error +=
- adapter->rx_ring[i]->hw_csum_rx_error;
- adapter->rx_ring[i]->hw_csum_rx_error = 0;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
}
+
+ adapter->hw_csum_rx_error = hw_csum_rx_error;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ adapter->alloc_rx_page = alloc_rx_page;
}
/**
* ixgbevf_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list struct
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
@@ -2888,7 +2948,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_subtask - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * @adapter: board private structure
**/
static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
{
@@ -2985,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3045,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3487,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbevf_tx_cmd_type(tx_flags);
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3525,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
@@ -3544,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
@@ -3594,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i-- == 0)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -4368,6 +4437,7 @@ static void __exit ixgbevf_exit_module(void)
/**
* ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
+ * @hw: pointer to private hardware struct
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c25006ce9af..38d3a327c1bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/**
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
+ * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -285,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
- * @adapter: pointer to the port handle
+ * @hw: pointer to hardware structure
* @reta: buffer to fill with RETA contents.
* @num_rx_queues: Number of Rx queues configured for this port
*
@@ -455,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
-
+ ARRAY_SIZE(msgbuf));
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
@@ -536,6 +536,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @netdev: unused
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -571,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[1] = xcast_mode;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
return err;
@@ -584,6 +586,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -609,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
goto mbx_err;
@@ -626,6 +630,10 @@ mbx_err:
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
@@ -655,7 +663,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait_to_complete: unused
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -740,6 +748,10 @@ out:
/**
* Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to private hardware struct
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -813,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
msgbuf[1] = max_size;
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -859,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[1] = api;
msg[2] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -911,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index da6fb825afea..ebe5c9148935 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,7 +60,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
- select FIXED_PHY
+ select PHYLINK
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a539263cd79c..25e9a551cc8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,7 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -189,6 +189,7 @@
#define MVNETA_GMAC_CTRL_0 0x2c00
#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
@@ -204,13 +205,19 @@
#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AN_COMPLETE BIT(11)
+#define MVNETA_GMAC_SYNC_OK BIT(14)
#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
+#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
+#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
+#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
+#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
@@ -237,6 +244,12 @@
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_1 0x2cc4
+#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_2 0x2cc8
+#define MVNETA_LPI_STATUS 0x2ccc
+
#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
/* Descriptor ring Macros */
@@ -313,6 +326,11 @@
#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
+enum {
+ ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_MAX_STATS,
+};
+
struct mvneta_statistic {
unsigned short offset;
unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
#define T_REG_32 32
#define T_REG_64 64
+#define T_SW 1
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x304c, T_REG_32, "broadcast_frames_sent", },
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+ { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
};
struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
u16 tx_ring_size;
u16 rx_ring_size;
- struct mii_bus *mii_bus;
phy_interface_t phy_interface;
- struct device_node *phy_node;
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
+ struct device_node *dn;
unsigned int tx_csum_limit;
- unsigned int use_inband_status:1;
+ struct phylink *phylink;
struct mvneta_bm *bm_priv;
struct mvneta_bm_pool *pool_long;
struct mvneta_bm_pool *pool_short;
int bm_win_id;
+ bool eee_enabled;
+ bool eee_active;
+ bool tx_lpi_enabled;
+
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp)
val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
- pp->link = 0;
- pp->duplex = -1;
- pp->speed = 0;
-
udelay(200);
}
@@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
}
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- if (enable) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_AN_FLOW_CTRL_EN);
- val |= MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- } else {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- }
-}
-
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
@@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
- mvneta_set_autoneg(pp, pp->use_inband_status);
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
- struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
{
- struct fixed_phy_status status;
- struct fixed_phy_status changed = {};
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
- if (gmac_stat & MVNETA_GMAC_SPEED_1000)
- status.speed = SPEED_1000;
- else if (gmac_stat & MVNETA_GMAC_SPEED_100)
- status.speed = SPEED_100;
- else
- status.speed = SPEED_10;
- status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- changed.link = 1;
- changed.speed = 1;
- changed.duplex = 1;
- fixed_phy_update_state(phy, &status, &changed);
- return 0;
+ phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev);
- struct net_device *ndev = pp->dev;
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
@@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
- if (pp->use_inband_status && (cause_misc &
- (MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
- mvneta_fixed_link_update(pp, ndev->phydev);
- }
+
+ if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE))
+ mvneta_link_change(pp);
}
/* Release Tx descriptors */
@@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- struct net_device *ndev = pp->dev;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
- phy_start(ndev->phydev);
+ phylink_start(pp->phylink);
netif_tx_start_all_queues(pp->dev);
}
static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
- struct net_device *ndev = pp->dev;
- phy_stop(ndev->phydev);
+ phylink_stop(pp->phylink);
if (!pp->neta_armada3700) {
for_each_online_cpu(cpu) {
@@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ /* Asymmetric pause is unsupported */
+ phylink_set(mask, Pause);
+ /* Half-duplex at speeds higher than 100Mbit is unsupported */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ /* 10M and 100M are only supported in non-802.3z mode */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+ struct phylink_link_state *state)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
- int status_change = 0;
+ u32 gmac_stat;
+
+ gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- if (phydev->link) {
- if ((pp->speed != phydev->speed) ||
- (pp->duplex != phydev->duplex)) {
- u32 val;
+ if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+ state->speed = SPEED_1000;
+ else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+ state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+ state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+
+ state->pause = 0;
+ if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_RX;
+ if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+ new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+ new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+ MVNETA_GMAC2_PORT_RESET);
+ new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_DUPLEX_EN);
- pp->duplex = phydev->duplex;
- pp->speed = phydev->speed;
- }
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+ if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+ if (phylink_test(state->advertising, Pause))
+ new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (state->pause & MLO_PAUSE_TXRX_MASK)
+ new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed */
+ if (state->duplex)
+ new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->speed == SPEED_1000)
+ new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the state from the PHY */
+ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z negotiation - only 1000base-X */
+ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ /* The MAC only supports FD mode */
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+ new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
}
- if (phydev->link != pp->link) {
- if (!phydev->link) {
- pp->duplex = -1;
- pp->speed = 0;
- }
+ /* Armada 370 documentation says we can only change the port mode
+ * and in-band enable when the link is down, so force it down
+ * while making these changes. We also do this for GMAC_CTRL2 */
+ if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+ (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+ (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ }
- pp->link = phydev->link;
- status_change = 1;
+ if (new_ctrl0 != gmac_ctrl0)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+ if (new_ctrl2 != gmac_ctrl2)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_clk != gmac_clk)
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+ if (new_an != gmac_an)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+ if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
}
+}
- if (status_change) {
- if (phydev->link) {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
- val |= MVNETA_GMAC_FORCE_LINK_PASS;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_up(pp);
- } else {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
- val |= MVNETA_GMAC_FORCE_LINK_DOWN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_down(pp);
- }
- phy_print_status(phydev);
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+ u32 lpi_ctl1;
+
+ lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ if (enable)
+ lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+ else
+ lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ mvneta_port_down(pp);
+
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
+
+ pp->eee_active = false;
+ mvneta_set_eee(pp, false);
}
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+ struct phy_device *phy)
{
- struct phy_device *phy_dev;
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
- phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
- pp->phy_interface);
- if (!phy_dev) {
- netdev_err(pp->dev, "could not find the PHY\n");
- return -ENODEV;
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val |= MVNETA_GMAC_FORCE_LINK_PASS;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
- phy_ethtool_get_wol(phy_dev, &wol);
- device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+ mvneta_port_up(pp);
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->advertising = phy_dev->supported;
+ if (phy && pp->eee_enabled) {
+ pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ }
+}
- pp->link = 0;
- pp->duplex = 0;
- pp->speed = 0;
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+ .validate = mvneta_validate,
+ .mac_link_state = mvneta_mac_link_state,
+ .mac_an_restart = mvneta_mac_an_restart,
+ .mac_config = mvneta_mac_config,
+ .mac_link_down = mvneta_mac_link_down,
+ .mac_link_up = mvneta_mac_link_up,
+};
- return 0;
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+ if (err)
+ netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+ phylink_ethtool_get_wol(pp->phylink, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+ return err;
}
static void mvneta_mdio_remove(struct mvneta_port *pp)
{
- struct net_device *ndev = pp->dev;
-
- phy_disconnect(ndev->phydev);
+ phylink_disconnect_phy(pp->phylink);
}
/* Electing a CPU must be done in an atomic way: it should be done
@@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
return 0;
@@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
return 0;
}
@@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- if (!dev->phydev)
- return -ENOTSUPP;
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
/* Ethtool methods */
@@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!phydev)
- return -ENODEV;
-
- if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
- u32 val;
-
- mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
-
- if (cmd->base.autoneg == AUTONEG_DISABLE) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
-
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
- }
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
- pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
- netdev_info(pp->dev, "autoneg status set to %i\n",
- pp->use_inband_status);
+ return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
- if (netif_running(ndev)) {
- mvneta_port_down(pp);
- mvneta_port_up(pp);
- }
- }
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ return phylink_ethtool_nway_reset(pp->phylink);
}
/* Set interrupt coalescing for ethtools */
@@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return 0;
}
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
@@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
- u32 high, low, val;
- u64 val64;
+ u32 high, low;
+ u64 val;
int i;
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
+ val = 0;
+
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
- pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
- val64 = (u64)high << 32 | low;
- pp->ethtool_stats[i] += val64;
+ val = (u64)high << 32 | low;
+ break;
+ case T_SW:
+ switch (s->offset) {
+ case ETHTOOL_STAT_EEE_WAKEUP:
+ val = phylink_get_eee_err(pp->phylink);
+ break;
+ }
break;
}
+
+ pp->ethtool_stats[i] += val;
}
}
@@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
static void mvneta_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
- if (dev->phydev)
- phy_ethtool_get_wol(dev->phydev, wol);
+ phylink_ethtool_get_wol(pp->phylink, wol);
}
static int mvneta_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
+ struct mvneta_port *pp = netdev_priv(dev);
int ret;
- if (!dev->phydev)
- return -EOPNOTSUPP;
-
- ret = phy_ethtool_set_wol(dev->phydev, wol);
+ ret = phylink_ethtool_set_wol(pp->phylink, wol);
if (!ret)
device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
return ret;
}
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *buf)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+ eee->eee_enabled = pp->eee_enabled;
+ eee->eee_active = pp->eee_active;
+ eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+ eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+ return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ /* The Armada 37x documents do not give limits for this other than
+ * it being an 8-bit register. */
+ if (eee->tx_lpi_enabled &&
+ (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ return -EINVAL;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi_ctl0 &= ~(0xff << 8);
+ lpi_ctl0 |= eee->tx_lpi_timer << 8;
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+ pp->eee_enabled = eee->eee_enabled;
+ pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+ mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+ return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
+ .nway_reset = mvneta_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_pauseparam = mvneta_ethtool_get_pauseparam,
+ .set_pauseparam = mvneta_ethtool_set_pauseparam,
.get_strings = mvneta_ethtool_get_strings,
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
@@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_module_info = mvneta_ethtool_get_module_info,
+ .get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+ .get_eee = mvneta_ethtool_get_eee,
+ .set_eee = mvneta_ethtool_set_eee,
};
/* Initialize hw */
@@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
/* Power up the port */
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
- u32 ctrl;
-
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- /* Even though it might look weird, when we're configured in
- * SGMII or QSGMII mode, the RGMII bit needs to be set.
- */
- switch(phy_mode) {
- case PHY_INTERFACE_MODE_QSGMII:
+ if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_SGMII:
+ else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- ctrl |= MVNETA_GMAC2_PORT_RGMII;
- break;
- default:
+ else if (!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
- }
-
- /* Cancel Port Reset */
- ctrl &= ~MVNETA_GMAC2_PORT_RESET;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
- while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
- MVNETA_GMAC2_PORT_RESET) != 0)
- continue;
return 0;
}
@@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
{
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
- struct device_node *phy_node;
struct device_node *bm_node;
struct mvneta_port *pp;
struct net_device *dev;
+ struct phylink *phylink;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
- const char *managed;
int tx_csum_limit;
int phy_mode;
int err;
@@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node) {
- if (!of_phy_is_fixed_link(dn)) {
- dev_err(&pdev->dev, "no PHY specified\n");
- err = -ENODEV;
- goto err_free_irq;
- }
-
- err = of_phy_register_fixed_link(dn);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot register fixed PHY\n");
- goto err_free_irq;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phy_node = of_node_get(dn);
- }
-
phy_mode = of_get_phy_mode(dn);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
err = -EINVAL;
- goto err_put_phy_node;
+ goto err_free_irq;
+ }
+
+ phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+ &mvneta_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_irq;
}
dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
spin_lock_init(&pp->lock);
- pp->phy_node = phy_node;
+ pp->phylink = phylink;
pp->phy_interface = phy_mode;
-
- err = of_property_read_string(dn, "managed", &managed);
- pp->use_inband_status = (err == 0 &&
- strcmp(managed, "in-band-status") == 0);
+ pp->dn = dn;
pp->rxq_def = rxq_def;
@@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
- goto err_put_phy_node;
+ goto err_free_phylink;
}
clk_prepare_enable(pp->clk);
@@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pp->dev);
- if (pp->use_inband_status) {
- struct phy_device *phy = of_phy_find_device(dn);
-
- mvneta_fixed_link_update(pp, phy);
-
- put_device(&phy->mdio.dev);
- }
-
return 0;
err_netdev:
@@ -4382,10 +4506,9 @@ err_free_ports:
err_clk:
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
-err_put_phy_node:
- of_node_put(phy_node);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+ if (pp->phylink)
+ phylink_destroy(pp->phylink);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
@@ -4397,7 +4520,6 @@ err_free_netdev:
static int mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
@@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
clk_disable_unprepare(pp->clk);
free_percpu(pp->ports);
free_percpu(pp->stats);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
irq_dispose_mapping(dev->irq);
- of_node_put(pp->phy_node);
+ phylink_destroy(pp->phylink);
free_netdev(dev);
if (pp->bm_priv) {
@@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct mvneta_port *pp = netdev_priv(dev);
+ rtnl_lock();
if (netif_running(dev))
mvneta_stop(dev);
+ rtnl_unlock();
netif_device_detach(dev);
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
@@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device)
return err;
}
- if (pp->use_inband_status)
- mvneta_fixed_link_update(pp, dev->phydev);
-
netif_device_attach(dev);
+ rtnl_lock();
if (netif_running(dev)) {
mvneta_open(dev);
mvneta_set_rx_mode(dev);
}
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 634b2f41cc9e..a1d7b88cf083 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -10,6 +10,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -454,11 +455,11 @@
/* Various constants */
/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_TXDONE_COAL_USEC 1000
#define MVPP2_RX_COAL_PKTS 32
-#define MVPP2_RX_COAL_USEC 100
+#define MVPP2_RX_COAL_USEC 64
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
@@ -504,10 +505,12 @@
#define MVPP2_DEFAULT_RXQ 4
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD 128
+#define MVPP2_MAX_RXD_MAX 1024
+#define MVPP2_MAX_RXD_DFLT 128
/* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD 1024
+#define MVPP2_MAX_TXD_MAX 2048
+#define MVPP2_MAX_TXD_DFLT 1024
/* Amount of Tx descriptors that can be reserved at once by CPU */
#define MVPP2_CPU_DESC_CHUNK 64
@@ -863,7 +866,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
- struct mvpp2_port **port_list;
+ struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
@@ -930,6 +933,9 @@ struct mvpp2_port {
struct mvpp2 *priv;
+ /* Firmware node associated to the port */
+ struct fwnode_handle *fwnode;
+
/* Per-port registers' base address */
void __iomem *base;
void __iomem *stats_base;
@@ -5802,6 +5808,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->reserved_num = 0;
txq_pcpu->txq_put_index = 0;
txq_pcpu->txq_get_index = 0;
+ txq_pcpu->tso_headers = NULL;
txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
@@ -5829,10 +5836,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->buffs);
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
+ if (txq_pcpu->tso_headers)
+ dma_free_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ txq_pcpu->tso_headers,
+ txq_pcpu->tso_headers_dma);
+
+ txq_pcpu->tso_headers = NULL;
}
if (txq->descs)
@@ -6832,13 +6842,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending == 0 || ring->tx_pending == 0)
return -EINVAL;
- if (ring->rx_pending > MVPP2_MAX_RXD)
- new_rx_pending = MVPP2_MAX_RXD;
+ if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+ new_rx_pending = MVPP2_MAX_RXD_MAX;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
- if (ring->tx_pending > MVPP2_MAX_TXD)
- new_tx_pending = MVPP2_MAX_TXD;
+ if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+ new_tx_pending = MVPP2_MAX_TXD_MAX;
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
@@ -7318,9 +7328,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
- c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
- c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
return 0;
}
@@ -7340,8 +7351,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- ring->rx_max_pending = MVPP2_MAX_RXD;
- ring->tx_max_pending = MVPP2_MAX_TXD;
+ ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+ ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
ring->rx_pending = port->rx_ring_size;
ring->tx_pending = port->tx_ring_size;
}
@@ -7492,7 +7503,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
strncpy(irqname, "rx-shared", sizeof(irqname));
}
- v->irq = of_irq_get_byname(port_node, irqname);
+ if (port_node)
+ v->irq = of_irq_get_byname(port_node, irqname);
+ else
+ v->irq = fwnode_irq_get(port->fwnode, i);
if (v->irq <= 0) {
ret = -EINVAL;
goto err;
@@ -7704,17 +7718,16 @@ static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
}
static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct device_node *port_node,
+ struct fwnode_handle *fwnode,
char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
- const char *dt_mac_addr;
+ char fw_mac_addr[ETH_ALEN];
- dt_mac_addr = of_get_mac_address(port_node);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
- *mac_from = "device tree";
- ether_addr_copy(dev->dev_addr, dt_mac_addr);
+ if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ *mac_from = "firmware node";
+ ether_addr_copy(dev->dev_addr, fw_mac_addr);
return;
}
@@ -7733,13 +7746,14 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
- struct device_node *port_node,
- struct mvpp2 *priv, int index)
+ struct fwnode_handle *port_fwnode,
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
- struct phy *comphy;
+ struct phy *comphy = NULL;
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
+ struct device_node *port_node = to_of_node(port_fwnode);
struct net_device *dev;
struct resource *res;
char *mac_from = "";
@@ -7750,7 +7764,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int phy_mode;
int err, i, cpu;
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ if (port_node) {
+ has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ } else {
+ has_tx_irqs = true;
+ queue_mode = MVPP2_QDIST_MULTI_MODE;
+ }
if (!has_tx_irqs)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
@@ -7765,36 +7784,43 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!dev)
return -ENOMEM;
- phy_node = of_parse_phandle(port_node, "phy", 0);
- phy_mode = of_get_phy_mode(port_node);
+ if (port_node)
+ phy_node = of_parse_phandle(port_node, "phy", 0);
+ else
+ phy_node = NULL;
+
+ phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
err = phy_mode;
goto err_free_netdev;
}
- comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
- if (IS_ERR(comphy)) {
- if (PTR_ERR(comphy) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
- goto err_free_netdev;
+ if (port_node) {
+ comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+ if (IS_ERR(comphy)) {
+ if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+ comphy = NULL;
}
- comphy = NULL;
}
- if (of_property_read_u32(port_node, "port-id", &id)) {
+ if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing port-id value\n");
goto err_free_netdev;
}
- dev->tx_queue_len = MVPP2_MAX_TXD;
+ dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvpp2_netdev_ops;
dev->ethtool_ops = &mvpp2_eth_tool_ops;
port = netdev_priv(dev);
port->dev = dev;
+ port->fwnode = port_fwnode;
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -7804,7 +7830,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (err)
goto err_free_netdev;
- port->link_irq = of_irq_get_byname(port_node, "link");
+ if (port_node)
+ port->link_irq = of_irq_get_byname(port_node, "link");
+ else
+ port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
if (port->link_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
@@ -7813,7 +7842,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* the link irq is optional */
port->link_irq = 0;
- if (of_property_read_bool(port_node, "marvell,loopback"))
+ if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
port->id = id;
@@ -7838,8 +7867,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MVPP21_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
} else {
- if (of_property_read_u32(port_node, "gop-port-id",
- &port->gop_id)) {
+ if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+ &port->gop_id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing gop-port-id value\n");
goto err_deinit_qvecs;
@@ -7869,10 +7898,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
+ mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
- port->tx_ring_size = MVPP2_MAX_TXD;
- port->rx_ring_size = MVPP2_MAX_RXD;
+ port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+ port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
SET_NETDEV_DEV(dev, &pdev->dev);
err = mvpp2_port_init(port);
@@ -7927,7 +7956,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- priv->port_list[index] = port;
+ priv->port_list[priv->port_count++] = port;
+
return 0;
err_free_port_pcpu:
@@ -8186,8 +8216,9 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_probe(struct platform_device *pdev)
{
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ const struct acpi_device_id *acpi_id;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -8198,8 +8229,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->hw_version =
- (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (has_acpi_companion(&pdev->dev)) {
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ priv->hw_version = (unsigned long)acpi_id->driver_data;
+ } else {
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -8213,10 +8250,23 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (has_acpi_companion(&pdev->dev)) {
+ /* In case the MDIO memory region is declared in
+ * the ACPI, it can already appear as 'in-use'
+ * in the OS. Because it is overlapped by second
+ * region of the network controller, make
+ * sure it is released, before requesting it again.
+ * The care is taken by mvpp2 driver to avoid
+ * concurrent access to this memory region.
+ */
+ release_resource(res);
+ }
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+ }
+ if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -8242,32 +8292,34 @@ static int mvpp2_probe(struct platform_device *pdev)
else
priv->max_port_rxqs = 32;
- priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
- err = clk_prepare_enable(priv->pp_clk);
- if (err < 0)
- return err;
-
- priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
- if (IS_ERR(priv->gop_clk)) {
- err = PTR_ERR(priv->gop_clk);
- goto err_pp_clk;
- }
- err = clk_prepare_enable(priv->gop_clk);
- if (err < 0)
- goto err_pp_clk;
+ if (dev_of_node(&pdev->dev)) {
+ priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+ if (IS_ERR(priv->pp_clk))
+ return PTR_ERR(priv->pp_clk);
+ err = clk_prepare_enable(priv->pp_clk);
+ if (err < 0)
+ return err;
- if (priv->hw_version == MVPP22) {
- priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
- if (IS_ERR(priv->mg_clk)) {
- err = PTR_ERR(priv->mg_clk);
- goto err_gop_clk;
+ priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+ if (IS_ERR(priv->gop_clk)) {
+ err = PTR_ERR(priv->gop_clk);
+ goto err_pp_clk;
}
-
- err = clk_prepare_enable(priv->mg_clk);
+ err = clk_prepare_enable(priv->gop_clk);
if (err < 0)
- goto err_gop_clk;
+ goto err_pp_clk;
+
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
if (IS_ERR(priv->axi_clk)) {
@@ -8280,10 +8332,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_gop_clk;
}
- }
- /* Get system's tclk rate */
- priv->tclk = clk_get_rate(priv->pp_clk);
+ /* Get system's tclk rate */
+ priv->tclk = clk_get_rate(priv->pp_clk);
+ } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->tclk)) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return -EINVAL;
+ }
if (priv->hw_version == MVPP22) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
@@ -8306,30 +8362,19 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_mg_clk;
}
- priv->port_count = of_get_available_child_count(dn);
+ /* Initialize ports */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ err = mvpp2_port_probe(pdev, port_fwnode, priv);
+ if (err < 0)
+ goto err_port_probe;
+ }
+
if (priv->port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
goto err_mg_clk;
}
- priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count,
- sizeof(*priv->port_list),
- GFP_KERNEL);
- if (!priv->port_list) {
- err = -ENOMEM;
- goto err_mg_clk;
- }
-
- /* Initialize ports */
- i = 0;
- for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, i);
- if (err < 0)
- goto err_port_probe;
- i++;
- }
-
/* Statistics must be gathered regularly because some of them (like
* packets counters) are 32-bit registers and could overflow quite
* quickly. For instance, a 10Gb link used at full bandwidth with the
@@ -8350,7 +8395,7 @@ static int mvpp2_probe(struct platform_device *pdev)
err_port_probe:
i = 0;
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i])
mvpp2_port_remove(priv->port_list[i]);
i++;
@@ -8369,14 +8414,14 @@ err_pp_clk:
static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
int i = 0;
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
mvpp2_port_remove(priv->port_list[i]);
@@ -8399,6 +8444,9 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ if (is_acpi_node(port_fwnode))
+ return 0;
+
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
@@ -8420,12 +8468,19 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+ { "MRVL0110", MVPP22 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
.remove = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
+ .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
},
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9efe1771423c..9fe85300e7b6 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
return -ETIMEDOUT;
}
- mdelay(1);
+ msleep(1);
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index fc67e35b253e..29826dd15204 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1952,14 +1952,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
}
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- /* Set GE2 driving and slew rate */
- regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
- /* set GE2 TDSEL */
- regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
- /* set GE2 TUNE */
- regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
/* Set linkdown as the default for each GMAC. Its own MCR would be set
* up with the more appropriate value when mtk_phy_link_adjust call is
@@ -2538,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,pctl");
- if (IS_ERR(eth->pctl)) {
- dev_err(&pdev->dev, "no pctl regmap found\n");
- return PTR_ERR(eth->pctl);
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
}
for (i = 0; i < 3; i++) {
@@ -2668,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
- .required_clks = MT7622_CLKS_BITMAP
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
const struct of_device_id of_mtk_match[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a3af4660de81..672b8c353c47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
* @caps Flags shown the extra capability for the SoC
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
*/
struct mtk_soc_data {
u32 caps;
u32 required_clks;
+ bool required_pctl;
};
/* currently no SoC has more than 2 macs */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc92aa68..1a0c3bf86ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
}
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ pg[i] = MLX4_EN_TC_VENDOR;
+ tc_tx_bw[i] = MLX4_EN_BW_MAX;
+ break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
+ if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+ en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ MLX4_EN_MIN_RX_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+ en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+ __func__, param->tx_pending,
+ MLX4_EN_MIN_TX_SIZE);
+ return -EINVAL;
+ }
+
rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a294fa6..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
- node))
+ node, i))
goto err;
+
}
#ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ u8 prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+ priv->ets.prio_tc[prio] = prio;
+ priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28efcda33..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node)
+ u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+ goto err_ring;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
- goto err_ring;
+ goto err_xdp_info;
}
}
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_xdp_info:
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err_ring:
kfree(ring);
*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
return 0;
}
#endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hdr += sizeof(struct vlan_hdr);
}
- if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
- return 0;
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int cq_ring = cq->ring;
bool doorbell_pending;
struct mlx4_cqe *cqe;
+ struct xdp_buff xdp;
int polled = 0;
int index;
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
+ xdp.rxq = &ring->xdp_rxq;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* read bytes but not past the end of the frag.
*/
if (xdp_prog) {
- struct xdp_buff xdp;
dma_addr_t dma;
void *orig_data;
u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- cqe->checksum == cpu_to_be16(0xffff)) {
- bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
- ip_summed = CHECKSUM_UNNECESSARY;
- hash_type = PKT_HASH_TYPE_L4;
- if (l2_tunnel)
- skb->csum_level = 1;
- ring->csum_ok++;
- } else {
+ bool l2_tunnel;
+
+ if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)))
goto csum_none;
- }
+
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
+ if (l2_tunnel)
+ skb->csum_level = 1;
+ ring->csum_ok++;
} else {
- if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV6))) {
- if (check_csum(cqe, skb, va, dev->features)) {
- goto csum_none;
- } else {
- ip_summed = CHECKSUM_COMPLETE;
- hash_type = PKT_HASH_TYPE_L3;
- ring->csum_complete++;
- }
- } else {
+ if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+ MLX4_CQE_STATUS_IPV6))))
+#else
+ 0))))
+#endif
goto csum_none;
- }
+ if (check_csum(cqe, skb, va, dev->features))
+ goto csum_none;
+ ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
+ ring->csum_complete++;
}
} else {
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b72677eccd4..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
+ struct xdp_rxq_info xdp_rxq;
};
struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
#define MLX4_EN_BW_MIN 1
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+#define MLX4_EN_TC_VENDOR 0
#define MLX4_EN_TC_ETS 7
enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node);
+ u32 size, u16 stride, int node, int queue_index);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764991c9..2e84f10f59ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
if (!fmr->maps)
return;
- fmr->maps = 0;
+ /* To unmap: it is sufficient to take back ownership from HW */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
- return;
- }
+ /* Make sure MPT status is visible */
+ wmb();
- err = mlx4_HW2SW_MPT(dev, NULL,
- key_to_hw_index(fmr->mr.key) &
- (dev->caps.num_mpts - 1));
- mlx4_free_cmd_mailbox(dev, mailbox);
- if (err) {
- pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
- return;
- }
- fmr->mr.enabled = MLX4_MPT_EN_SW;
+ fmr->maps = 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
+ if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+ /* In case of FMR was enabled and unmapped
+ * make sure to give ownership of MPT back to HW
+ * so HW2SW_MPT command will success.
+ */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+ /* Make sure MPT status is visible before changing MPT fields */
+ wmb();
+ fmr->mpt->length = 0;
+ fmr->mpt->start = 0;
+ /* Make sure MPT data is visible after changing MPT status */
+ wmb();
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+ /* make sure MPT status is visible */
+ wmb();
+ }
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 769598f7b6c8..3aaf4bad6c5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;
+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 543060c305a0..4c9360b25532 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,11 @@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -226,12 +229,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -242,8 +239,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -253,7 +250,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -472,32 +469,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -558,7 +529,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -571,6 +542,9 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
+
+ /* XDP read-mostly */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct mlx5e_channel {
@@ -587,6 +561,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
+ struct mlx5e_ch_stats stats;
/* control */
struct mlx5e_priv *priv;
@@ -655,6 +630,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -722,6 +698,11 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
@@ -860,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
+void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -895,7 +872,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -1054,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
@@ -1071,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+int mlx5e_bits_invert(unsigned long a, int size);
+
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
@@ -1110,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9bcf38f4123b..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
- int i;
struct ieee_ets ets;
+ int err;
+ int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
@@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
- ets.prio_tc[0] = 1;
- ets.prio_tc[1] = 0;
+ if (ets.ets_cap > 1) {
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ ets.prio_tc[0] = 1;
+ ets.prio_tc[1] = 0;
+ }
- mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ if (err)
+ netdev_err(priv->netdev,
+ "%s, Failed to init ETS: %d\n", __func__, err);
}
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8f05efa5c829..cc8048f68f11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -296,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
- u32 max_rq_size;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -315,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
- max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
@@ -326,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > max_rq_size) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
- __func__, param->rx_pending,
- max_rq_size);
- return -EINVAL;
- }
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -347,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
- if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
- return -EINVAL;
- }
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
@@ -480,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -534,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -542,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index def513484845..f64dda2bed31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
- ttc = &priv->fs.ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
goto del_rules;
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
rules = ttc->tunnel_rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.inner_ttc.ft.t;
+ dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
int err;
int tt;
- ttc = &priv->fs.inner_ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->inner_indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
@@ -1075,21 +1075,42 @@ err:
return err;
}
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params)
+{
+ ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params->inner_ttc = &priv->fs.inner_ttc;
+}
+
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
- ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_inner_ttc_table_rules(priv);
+ err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1111,10 +1132,9 @@ err:
return err;
}
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_ttc_table_rules(priv);
+ err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d9d8227f195f..47bab842c5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -173,182 +173,23 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats temp, *s = &temp;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
-
- memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = &c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
- s->rx_lro_packets += rq_stats->lro_packets;
- s->rx_lro_bytes += rq_stats->lro_bytes;
- s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
- s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_complete += rq_stats->csum_complete;
- s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
- s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
- s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
- s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
- s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
-
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_tso_packets += sq_stats->tso_packets;
- s->tx_tso_bytes += sq_stats->tso_bytes;
- s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
- s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
- s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
- s->tx_queue_stopped += sq_stats->stopped;
- s->tx_queue_wake += sq_stats->wake;
- s->tx_queue_dropped += sq_stats->dropped;
- s->tx_xmit_more += sq_stats->xmit_more;
- s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- s->tx_csum_none += sq_stats->csum_none;
- s->tx_csum_partial += sq_stats->csum_partial;
- }
- }
-
- s->link_down_events_phy = MLX5_GET(ppcnt_reg,
- priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
- memcpy(&priv->stats.sw, s, sizeof(*s));
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_core_dev *mdev = priv->mdev;
-
- MLX5_SET(query_vport_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int prio;
- void *out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- out = pstats->IEEE_802_3_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (!full)
- return;
-
- out = pstats->RFC_2863_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->RFC_2819_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->phy_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
- out = pstats->eth_ext_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- out = pstats->per_prio_counters[prio];
- MLX5_SET(ppcnt_reg, in, prio_tc, prio);
- mlx5_core_access_reg(mdev, in, sz, out, sz,
- MLX5_REG_PPCNT, 0, 0);
- }
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
- struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
-
- if (!priv->q_counter)
- return;
-
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
- return;
-
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
-
- if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
- return;
+ int i;
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-}
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
-{
- if (full) {
- mlx5e_update_pcie_counters(priv);
- mlx5e_ipsec_update_stats(priv);
- }
- mlx5e_update_pport_counters(priv, full);
- mlx5e_update_vport_counters(priv);
- mlx5e_update_q_counter(priv);
- mlx5e_update_sw_counters(priv);
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats)
+ mlx5e_stats_grps[i].update_stats(priv);
}
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_stats(priv, false);
+ int i;
+
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats_mask &
+ MLX5E_NDO_UPDATE_STATS)
+ mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -582,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
+ goto err_rq_wq_destroy;
+
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = params->rq_headroom;
@@ -674,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -687,6 +541,7 @@ err_destroy_umr_mkey:
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -699,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
@@ -919,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -952,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1565,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1747,7 +1604,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
@@ -1999,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2203,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
-static int mlx5e_bits_invert(unsigned long a, int size)
+int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
@@ -2669,7 +2526,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2690,7 +2547,6 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -2766,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
if (err)
return err;
+ /* Mark as unused given "Drop-RQ" packets never reach XDP */
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
rq->mdev = mdev;
return 0;
@@ -3085,9 +2944,6 @@ out:
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -3105,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -3219,12 +3075,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
- netdev->features |= feature; \
+ *features |= feature; \
else \
- netdev->features &= ~feature; \
+ *features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3347,6 +3203,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
@@ -3365,34 +3222,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return err;
}
- MLX5E_SET_FEATURE(netdev, feature, enable);
+ MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t oper_features = netdev->features;
int err;
- err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
- set_feature_lro);
- err |= mlx5e_handle_feature(netdev, features,
+ err = mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_LRO, set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
- set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
- set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
- set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
- set_feature_rx_vlan);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXALL, set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
- set_feature_arfs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_NTUPLE, set_feature_arfs);
#endif
- return err ? -EINVAL : 0;
+ if (err) {
+ netdev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3732,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
+static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int irqn_not_used, eqn;
+ struct mlx5_eq *eq;
+ u32 eqe_count;
+
+ if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
+ return false;
+
+ eq = mlx5_eqn2eq(mdev, eqn);
+ if (IS_ERR(eq))
+ return false;
+
+ netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eqn, eq->cons_index, eq->irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return false;
+
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ sq->channel->stats.eq_rearm++;
+ return true;
+}
+
static void mlx5e_tx_timeout(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool sched_work = false;
+ bool reopen_channels = false;
int i;
netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ if (!netif_xmit_stopped(dev_queue))
continue;
- sched_work = true;
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
- i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - dev_queue->trans_start));
+
+ /* If we recover a lost interrupt, most likely TX timeout will
+ * be resolved, skip reopening channels
+ */
+ if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ reopen_channels = true;
+ }
}
- if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work);
}
@@ -4038,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4102,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
@@ -4139,6 +4047,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ mlx5e_timestamp_init(priv);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4307,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
-
- if (priv->channels.params.xdp_prog)
- bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..363d8dcb7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
return 0;
}
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_sq *rep_sq, *tmp;
+ struct mlx5e_rep_priv *rpriv;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ list_del(&rep_sq->list);
+ kfree(rep_sq);
+ }
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ for (i = 0; i < sqns_num; i++) {
+ rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+ if (!rep_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ rep->vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(rep_sq);
+ goto out_err;
+ }
+ rep_sq->send_to_vport_rule = flow_rule;
+ list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5e_sqs2vport_stop(esw, rep);
+ return err;
+}
+
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
sqs[num_sqs++] = c->sq[tc].sqn;
}
- err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+ err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5e_sqs2vport_stop(esw, rep);
}
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
#endif
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
DELAY_PROBE_TIME);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe;
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
neigh_update.netevent_nb);
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe = NULL;
struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
unregister_netevent_notifier(&neigh_update->netevent_nb);
@@ -662,9 +719,6 @@ static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
@@ -877,6 +931,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
+
+ mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
@@ -904,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = PTR_ERR(flow_rule);
goto err_destroy_direct_tirs;
}
- rep->vport_rx_rule = flow_rule;
+ rpriv->vport_rx_rule = flow_rule;
err = mlx5e_tc_init(priv);
if (err)
@@ -913,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -924,10 +980,9 @@ err_destroy_direct_rqts:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
}
@@ -967,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
/* e-Switch vport representors */
static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
int err;
@@ -992,10 +1047,10 @@ err_remove_sqs:
}
static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -1008,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
}
static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
@@ -1019,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1027,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- rep->netdev = netdev;
+ rpriv->netdev = netdev;
rpriv->rep = rep;
+ rep->rep_if[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
@@ -1044,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
upriv);
if (err)
@@ -1076,16 +1135,19 @@ err_destroy_netdev:
}
static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
struct mlx5e_priv *upriv;
- unregister_netdev(rep->netdev);
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ unregister_netdev(netdev);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+ REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1100,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- u8 mac[ETH_ALEN];
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if = {};
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ rep_if.load = mlx5e_vport_rep_load;
+ rep_if.unload = mlx5e_vport_rep_unload;
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1123,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
int vport;
for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
+ mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if;
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = priv->ppriv;
+ rpriv->netdev = priv->netdev;
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = priv->netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+ rep_if.load = mlx5e_nic_rep_load;
+ rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
mlx5e_rep_register_vf_vports(priv); /* VFs vports */
}
@@ -1148,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+ mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
}
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
+ struct net_device *netdev;
+ struct mlx5_flow_handle *vport_rx_rule;
+ struct list_head vport_sqs_list;
};
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+ return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
struct mlx5e_neigh {
struct net_device *dev;
union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
int encap_size;
};
+struct mlx5e_rep_sq {
+ struct mlx5_flow_handle *send_to_vport_rule;
+ struct list_head list;
+};
+
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..0d4bb0688faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
return;
}
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_last_ethertype_ip(skb, &network_depth)) {
+ if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index e401d9d245f3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
int err = 0;
/* Temporarily enable local_lb */
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+ if (err)
+ return err;
+
+ if (!lbtp->local_lb) {
+ err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ if (err)
+ return err;
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
- return err;
+ goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
+
+ return 0;
+
+out:
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, false);
- }
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b74ddc7984bc..5f0f3493d747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ struct mlx5e_ch_stats *ch_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
+ ch_stats = &c->stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
+ }
+ }
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+ memcpy(&priv->stats.sw, s, sizeof(*s));
+}
+
static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
@@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
+
+ if (!priv->q_counter)
+ return;
+
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
static const struct counter_desc vport_stats_desc[] = {
{ "rx_vport_unicast_packets",
@@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
#define PPORT_802_3_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
@@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2819_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
@@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->eth_ext_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
static const struct counter_desc pcie_perf_stats_desc[] = {
@@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+}
+
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
+static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+ mlx5e_grp_per_prio_pfc_get_num_stats(priv);
+}
+
+static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
+ return idx;
+}
+
+static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
+}
+
static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 },
};
@@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
+static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_ipsec_update_stats(priv);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
+static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
}
@@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ch_stats_desc[j].format, i);
+
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
@@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+ ch_stats_desc, j);
+
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
@@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+/* The stats groups order is opposite to the update_stats() order calls */
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
{
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_sw_update_stats,
},
{
.get_num_stats = mlx5e_grp_q_get_num_stats,
.fill_strings = mlx5e_grp_q_fill_strings,
.fill_stats = mlx5e_grp_q_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_q_update_stats,
},
{
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_vport_update_stats,
},
{
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
.fill_strings = mlx5e_grp_802_3_fill_strings,
.fill_stats = mlx5e_grp_802_3_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_802_3_update_stats,
},
{
.get_num_stats = mlx5e_grp_2863_get_num_stats,
.fill_strings = mlx5e_grp_2863_fill_strings,
.fill_stats = mlx5e_grp_2863_fill_stats,
+ .update_stats = mlx5e_grp_2863_update_stats,
},
{
.get_num_stats = mlx5e_grp_2819_get_num_stats,
.fill_strings = mlx5e_grp_2819_fill_strings,
.fill_stats = mlx5e_grp_2819_fill_stats,
+ .update_stats = mlx5e_grp_2819_update_stats,
},
{
.get_num_stats = mlx5e_grp_phy_get_num_stats,
.fill_strings = mlx5e_grp_phy_fill_strings,
.fill_stats = mlx5e_grp_phy_fill_stats,
+ .update_stats = mlx5e_grp_phy_update_stats,
},
{
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ .update_stats = mlx5e_grp_eth_ext_update_stats,
},
{
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
.fill_strings = mlx5e_grp_pcie_fill_strings,
.fill_stats = mlx5e_grp_pcie_fill_stats,
+ .update_stats = mlx5e_grp_pcie_update_stats,
},
{
- .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_fill_stats,
+ .update_stats = mlx5e_grp_per_prio_update_stats,
},
{
.get_num_stats = mlx5e_grp_pme_get_num_stats,
@@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
.fill_strings = mlx5e_grp_ipsec_fill_strings,
.fill_stats = mlx5e_grp_ipsec_fill_stats,
+ .update_stats = mlx5e_grp_ipsec_update_stats,
},
{
.get_num_stats = mlx5e_grp_channels_get_num_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index d679e21f686e..0b3320a2b072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,7 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
char format[ETH_GSTRING_LEN];
@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 ch_eq_rearm;
/* Special handling counters */
u64 link_down_events_phy;
@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
u64 dropped;
};
+struct mlx5e_ch_stats {
+ u64 eq_rearm;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -201,11 +207,17 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
struct mlx5e_priv;
struct mlx5e_stats_grp {
+ u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec2e88a..fd98b0dc610f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,17 +51,22 @@
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+#include "fs_core.h"
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
};
struct mlx5e_tc_flow {
@@ -71,6 +76,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +99,32 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5e_priv *func_priv;
+ u32 tdn;
+ u32 tirn;
+
+ int num_channels;
+ struct mlx5e_rqt indir_rqt;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table ttc;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ u16 peer_vhca_id;
+ u8 prio;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+{
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+ struct mlx5e_priv *priv = hp->func_priv;
+ int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+
+ mlx5e_build_default_indir_rqt(indirection_rqt, sz,
+ hp->num_channels);
+
+ for (i = 0; i < sz; i++) {
+ ix = i;
+ if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+ ix = indirection_rqt[ix];
+ rqn = hp->pair->rqn[ix];
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+{
+ int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqtc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+
+ err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
+ if (!err)
+ hp->indir_rqt.enabled = true;
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)];
+ int tt, i, err;
+ void *tirc;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in,
+ MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ if (err) {
+ mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+ }
+ return 0;
+
+err_destroy_tirs:
+ for (i = 0; i < tt; i++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+}
+
+static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
+
+ memset(ttc_params, 0, sizeof(*ttc_params));
+
+ ttc_params->any_tt_tirn = hp->tirn;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+
+ ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_TC_PRIO;
+}
+
+static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct ttc_params ttc_params;
+ int err;
+
+ err = mlx5e_hairpin_create_indirect_rqt(hp);
+ if (err)
+ return err;
+
+ err = mlx5e_hairpin_create_indirect_tirs(hp);
+ if (err)
+ goto err_create_indirect_tirs;
+
+ mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
+ if (err)
+ goto err_create_ttc_table;
+
+ netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
+ hp->num_channels, hp->ttc.ft.t->id);
+
+ return 0;
+
+err_create_ttc_table:
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+err_create_indirect_tirs:
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+
+ return err;
+}
+
+static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+
+ mlx5e_destroy_ttc_table(priv, &hp->ttc);
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+ hp->func_priv = priv;
+ hp->num_channels = params->num_channels;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ if (hp->num_channels > 1) {
+ err = mlx5e_hairpin_rss_init(hp);
+ if (err)
+ goto rss_init_err;
+ }
+
+ return hp;
+
+rss_init_err:
+ mlx5e_hairpin_destroy_transport(hp);
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ if (hp->num_channels > 1)
+ mlx5e_hairpin_rss_cleanup(hp);
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
+{
+ return (peer_vhca_id << 16 | prio);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ u16 peer_vhca_id, u8 prio)
+{
+ struct mlx5e_hairpin_entry *hpe;
+ u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, hash_key) {
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+#define UNKNOWN_MATCH_PRIO 8
+
+static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec, u8 *match_prio)
+{
+ void *headers_c, *headers_v;
+ u8 prio_val, prio_mask = 0;
+ bool vlan_present;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
+ netdev_warn(priv->netdev,
+ "only PCP trust state supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
+ vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
+ if (vlan_present) {
+ prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ }
+
+ if (!vlan_present || !prio_mask) {
+ prio_val = UNKNOWN_MATCH_PRIO;
+ } else if (prio_mask != 0x7) {
+ netdev_warn(priv->netdev,
+ "masked priority match not supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+
+ *match_prio = prio_val;
+ return 0;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ u64 link_speed64;
+ u32 link_speed;
+ u8 match_prio;
+ u16 peer_id;
+ int err;
+
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ if (err)
+ return err;
+ hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_vhca_id = peer_id;
+ hpe->prio = match_prio;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+
+ params.log_num_packets = params.log_data_size -
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
+ params.log_num_packets = min_t(u8, params.log_num_packets,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+
+ params.q_counter = priv->q_counter;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ params.num_channels = link_speed64;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
+ hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+
+attach_flow:
+ if (hpe->hp->num_channels > 1) {
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ } else {
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ }
+ list_add(&flow->hairpin, &hpe->flows);
+
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->hairpin_ft;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ }
+ dest_ix++;
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_PRIO,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
- 0, 0);
+ MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
@@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +771,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
@@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
struct rtable *rt;
struct neighbour *n = NULL;
@@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = rt->dst.dev;
@@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
@@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = dst->dev;
#else
@@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+ REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+ out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
@@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- parse_attr->mirred_ifindex = ifindex;
+ parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index e7e7cef2bde4..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -417,7 +417,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
break;
-
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ mlx5_rsc_event(dev, rsn, eqe->type);
+ break;
case MLX5_EVENT_TYPE_PATH_MIG:
case MLX5_EVENT_TYPE_COMM_EST:
case MLX5_EVENT_TYPE_SQ_DRAINED:
@@ -530,6 +534,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
+ */
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
+{
+ u32 count_eqe;
+
+ disable_irq(eq->irqn);
+ count_eqe = eq->cons_index;
+ mlx5_eq_int(eq->irqn, eq);
+ count_eqe = eq->cons_index - count_eqe;
+ enable_irq(eq->irqn);
+
+ return count_eqe;
+}
+
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
@@ -715,6 +737,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, fpga))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &tsar_ctx,
+ tsar_ctx,
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
if (vport->qos.enabled)
return -EEXIST;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
initial_max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
if (!vport->qos.enabled)
return -EIO;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
vport->qos.esw_tsar_ix,
bitmask);
if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->offloads.vport_reps =
- kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps) {
- err = -ENOMEM;
+ err = esw_offloads_init_reps(esw);
+ if (err)
goto abort;
- }
hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
- kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- kfree(esw->offloads.vport_reps);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
kfree(esw);
}
@@ -2018,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
SRIOV_OFFLOADS
};
+enum {
+ REP_ETH,
+ NUM_REP_TYPES,
+};
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
};
};
-struct mlx5_esw_sq {
- struct mlx5_flow_handle *send_to_vport_rule;
- struct list_head list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
};
struct mlx5_eswitch_rep {
- int (*load)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
u16 vport;
u8 hw_id[ETH_ALEN];
- struct net_device *netdev;
-
- struct mlx5_flow_handle *vport_rx_rule;
- struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
- bool valid;
};
struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+ u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (!rep->valid)
+ if (!rep->rep_if[REP_ETH].valid)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
return err;
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
return flow_rule;
}
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_esw_sq *esw_sq, *tmp;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return;
-
- list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
- mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
- list_del(&esw_sq->list);
- kfree(esw_sq);
- }
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_esw_sq *esw_sq;
- int err;
- int i;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return 0;
-
- for (i = 0; i < sqns_num; i++) {
- esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
- if (!esw_sq) {
- err = -ENOMEM;
- goto out_err;
- }
-
- /* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
- sqns_array[i]);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- kfree(esw_sq);
- goto out_err;
- }
- esw_sq->send_to_vport_rule = flow_rule;
- list_add(&esw_sq->list, &rep->vport_sqs_list);
- }
- return 0;
-
-out_err:
- mlx5_eswitch_sqs2vport_stop(esw, rep);
- return err;
+ mlx5_del_flow_rules(rule);
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
return err;
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_esw_offload *offloads;
+ struct mlx5_eswitch_rep *rep;
+ u8 hw_id[ETH_ALEN];
+ int vport;
+
+ esw->offloads.vport_reps = kcalloc(total_vfs,
+ sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps)
+ return -ENOMEM;
+
+ offloads = &esw->offloads;
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+ for (vport = 0; vport < total_vfs; vport++) {
+ rep = &offloads->vport_reps[vport];
+
+ rep->vport = vport;
+ ether_addr_copy(rep->hw_id, hw_id);
+ }
+
+ offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+ return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = nvports - 1; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ rep->rep_if[rep_type].unload(rep);
+ }
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = NUM_REP_TYPES;
+
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
int err;
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
+
+ return 0;
+
+err_reps:
+ esw_offloads_unload_reps_type(esw, vport, rep_type);
+ return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = 0;
+ int err;
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+ if (err)
+ goto err_reps;
+ }
+
+ return err;
+
+err_reps:
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ int err;
+
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto create_fg_err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
-
- err = rep->load(esw, rep);
- if (err)
- goto err_reps;
- }
+ err = esw_offloads_load_reps(esw, nvports);
+ if (err)
+ goto err_reps;
return 0;
err_reps:
- for (vport--; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
esw_destroy_vport_rx_group(esw);
create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
- struct mlx5_eswitch_rep *rep;
- int vport;
-
- for (vport = nvports - 1; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
-
+ esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *__rep)
+ struct mlx5_eswitch_rep_if *__rep_if,
+ u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
- struct mlx5_eswitch_rep *rep;
-
- rep = &offloads->vport_reps[vport_index];
+ struct mlx5_eswitch_rep_if *rep_if;
- memset(rep, 0, sizeof(*rep));
+ rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
- rep->load = __rep->load;
- rep->unload = __rep->unload;
- rep->vport = __rep->vport;
- rep->netdev = __rep->netdev;
- ether_addr_copy(rep->hw_id, __rep->hw_id);
+ rep_if->load = __rep_if->load;
+ rep_if->unload = __rep_if->unload;
+ rep_if->priv = __rep_if->priv;
- INIT_LIST_HEAD(&rep->vport_sqs_list);
- rep->valid = true;
+ rep_if->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index)
+ int vport_index, u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
- rep->unload(esw, rep);
+ rep->rep_if[rep_type].unload(rep);
- rep->valid = false;
+ rep->rep_if[rep_type].valid = false;
}
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
- return rep->netdev;
+ return rep->rep_if[rep_type].priv;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4392f741c5f..e6175f8ac0e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -688,7 +688,7 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
@@ -727,7 +727,7 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, next_rcv_psn,
MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
@@ -888,7 +888,8 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
MLX5_ROCE_VERSION_2,
MLX5_ROCE_L3_TYPE_IPV6,
- remote_ip, remote_mac, true, 0);
+ remote_ip, remote_mac, true, 0,
+ MLX5_FPGA_PORT_NUM);
if (err) {
mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
ret = ERR_PTR(err);
@@ -954,7 +955,7 @@ err_cq:
mlx5_fpga_conn_destroy_cq(conn);
err_gid:
mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
- NULL, false, 0);
+ NULL, false, 0, MLX5_FPGA_PORT_NUM);
err_rsvd_gid:
mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
err:
@@ -982,7 +983,7 @@ void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
mlx5_fpga_conn_destroy_cq(conn);
mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
- NULL, NULL, false, 0);
+ NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
kfree(conn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9ecb2b8..c025c98700e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -89,6 +89,9 @@
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+#define KERNEL_NIC_TC_NUM_PRIOS 1
+#define KERNEL_NIC_TC_NUM_LEVELS 2
+
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
@@ -134,7 +137,7 @@ static struct init_tree_node {
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
@@ -2026,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->fdb_root_ns->ns;
else
return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (steering->esw_egress_root_ns)
- return &steering->esw_egress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (steering->esw_ingress_root_ns)
- return &steering->esw_ingress_root_ns->ns;
- else
- return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (steering->esw_egress_root_ns &&
+ steering->esw_egress_root_ns[vport])
+ return &steering->esw_egress_root_ns[vport]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (steering->esw_ingress_root_ns &&
+ steering->esw_ingress_root_ns[vport])
+ return &steering->esw_ingress_root_ns[vport]->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio, int num_levels)
{
@@ -2343,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+}
+
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->esw_egress_root_ns);
- cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_egress_acls_root_ns(dev);
+ cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2466,86 @@ out_err:
return PTR_ERR(prio);
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns)
+ steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns)
+ steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_egress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ kfree(steering->esw_egress_root_ns);
+ return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_ingress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_ingress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ kfree(steering->esw_ingress_root_ns);
+ return err;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
@@ -2488,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(steering);
+ err = init_egress_acls_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(steering);
+ err = init_ingress_acls_root_ns(dev);
if (err)
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
- struct mlx5_flow_root_namespace *esw_egress_root_ns;
- struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace **esw_egress_root_ns;
+ struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5ef1b56b6a96..9d11e92fb541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -195,12 +195,20 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+
+ if (MLX5_CAP_GEN(dev, sw_owner_id)) {
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i,
+ sw_owner_id[i]);
+ }
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8812d7208e8f..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -86,6 +85,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mlx5i_build_nic_params(mdev, &priv->channels.params);
+ mlx5e_timestamp_init(priv);
+
/* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -187,7 +188,7 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
- MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
ret = mlx5_core_create_qp(mdev, qp, in, inlen);
@@ -240,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -255,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -272,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -281,8 +292,8 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
@@ -396,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -450,7 +461,6 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(epriv, false);
mlx5e_activate_priv_channels(epriv);
- mlx5e_timestamp_set(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
@@ -485,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev)
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
- mlx5e_close_channels(&epriv->channels);;
+ mlx5e_close_channels(&epriv->channels);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed62b231..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -31,6 +31,8 @@
*/
#include <linux/clocksource.h>
+#include <linux/highmem.h>
+#include <rdma/mlx5-abi.h>
#include "en.h"
enum {
@@ -71,6 +73,28 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(mdev) & cc->mask;
}
+static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 sign;
+
+ if (!clock_info)
+ return;
+
+ sign = smp_load_acquire(&clock_info->sign);
+ smp_store_mb(clock_info->sign,
+ sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
+
+ clock_info->cycles = clock->tc.cycle_last;
+ clock_info->mult = clock->cycles.mult;
+ clock_info->nsec = clock->tc.nsec;
+ clock_info->frac = clock->tc.frac;
+
+ smp_store_release(&clock_info->sign,
+ sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
+}
+
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
@@ -109,6 +133,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
@@ -123,6 +148,7 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
write_lock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -152,6 +178,7 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
write_lock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -179,6 +206,7 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -423,9 +451,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
+ ptp_event.index = pin;
+ ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
- ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ ptp_event.pps_times.ts_real =
+ ns_to_timespec64(ptp_event.timestamp);
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
@@ -470,6 +502,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
+ clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
@@ -482,6 +515,25 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
clock->overflow_period = ns;
+ mdev->clock_info_page = alloc_page(GFP_KERNEL);
+ if (mdev->clock_info_page) {
+ mdev->clock_info = kmap(mdev->clock_info_page);
+ if (!mdev->clock_info) {
+ __free_page(mdev->clock_info_page);
+ mlx5_core_warn(mdev, "failed to map clock page\n");
+ } else {
+ mdev->clock_info->sign = 0;
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period =
+ clock->overflow_period;
+ }
+ }
+
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
@@ -521,5 +573,12 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
+
+ if (mdev->clock_info) {
+ kunmap(mdev->clock_info_page);
+ __free_page(mdev->clock_info_page);
+ mdev->clock_info = NULL;
+ }
+
kfree(clock->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 573f59f46d41..7722a3f9bb68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id)
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
@@ -148,6 +148,9 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
memcpy(addr_l3_addr, gid, gidsz);
}
+ if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
+ MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
+
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8a89c7e8cd63..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -75,6 +75,8 @@ static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static u32 sw_owner_id[4];
+
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -319,6 +321,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &priv->eq_table;
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
+ int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE;
@@ -328,21 +331,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
if (!priv->irq_info)
- goto err_free_msix;
+ return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
PCI_IRQ_MSIX);
- if (nvec < 0)
- return nvec;
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
-err_free_msix:
+err_free_irq_info:
kfree(priv->irq_info);
- return -ENOMEM;
+ return err;
}
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -548,6 +553,15 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+
+ if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
+ MLX5_SET(cmd_hca_cap,
+ set_hca_cap,
+ num_vhca_ports,
+ MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
+
err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
@@ -578,8 +592,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int ret = 0;
/* Disable local_lb by default */
- if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(dev, disable_local_lb))
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
@@ -1105,7 +1118,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_cmd_init_hca(dev);
+ err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop;
@@ -1121,9 +1134,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_poll;
}
- if (boot && mlx5_init_once(dev, priv)) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
+ if (boot) {
+ err = mlx5_init_once(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
+ goto err_stop_poll;
+ }
}
err = mlx5_alloc_irq_vectors(dev);
@@ -1133,8 +1149,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
dev->priv.uar = mlx5_get_uars_page(dev);
- if (!dev->priv.uar) {
+ if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ err = PTR_ERR(dev->priv.uar);
goto err_disable_msix;
}
@@ -1637,6 +1654,8 @@ static int __init init(void)
{
int err;
+ get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
+
mlx5_core_verify_params();
mlx5_register_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index ff4a0b889a6f..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,7 +86,7 @@ enum {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 889130edb715..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -98,6 +98,11 @@ static u64 sq_allowed_event_types(void)
return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
}
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
static bool is_event_type_allowed(int rsc_type, int event_type)
{
switch (rsc_type) {
@@ -107,6 +112,8 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
return BIT(event_type) & rq_allowed_event_types();
case MLX5_EVENT_QUEUE_TYPE_SQ:
return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
default:
WARN(1, "Event arrived for unknown resource type");
return false;
@@ -116,6 +123,7 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
{
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_dct *dct;
struct mlx5_core_qp *qp;
if (!common)
@@ -134,7 +142,11 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
break;
-
+ case MLX5_RES_DCT:
+ dct = (struct mlx5_core_dct *)common;
+ if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
+ complete(&dct->drained);
+ break;
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
@@ -142,9 +154,9 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
-static int create_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ int rsc_type)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
int err;
@@ -165,8 +177,8 @@ static int create_qprqsq_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+static void destroy_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
unsigned long flags;
@@ -179,6 +191,40 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
+ u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ return err;
+ }
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ err = create_resource_common(dev, qp, MLX5_RES_DCT);
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
+ (void *)&out, sizeof(dout));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in, int inlen)
@@ -197,7 +243,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
- err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
@@ -220,6 +266,47 @@ err_cmd:
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -229,7 +316,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
mlx5_debug_qp_remove(dev, qp);
- destroy_qprqsq_common(dev, qp);
+ destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
@@ -405,6 +492,20 @@ int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
@@ -441,7 +542,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
rq->qpn = rqn;
- err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
goto err_destroy_rq;
@@ -457,7 +558,7 @@ EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
- destroy_qprqsq_common(dev, rq);
+ destroy_resource_common(dev, rq);
mlx5_core_destroy_rq(dev, rq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
@@ -473,7 +574,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
sq->qpn = sqn;
- err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -489,7 +590,7 @@ EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
- destroy_qprqsq_common(dev, sq);
+ destroy_resource_common(dev, sq);
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..9e38343a951f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int i, j, err;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
+ if (err)
+ goto out_err_rq;
+ }
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
+ if (err)
+ goto out_err_sq;
+ }
+
+ return 0;
+
+out_err_sq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
+ i = hp->num_channels;
+out_err_rq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ }
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int i, j, err;
+
+ /* set peer SQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
+ if (err)
+ goto err_modify_sq;
+ }
+
+ /* set func RQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
+ if (err)
+ goto err_modify_rq;
+ }
+
+ return 0;
+
+err_modify_rq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+ i = hp->num_channels;
+err_modify_sq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ /* unset func RQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+ hp->num_channels = params->num_channels;
+
+ hp->rqn = (void *)hp + sizeof(*hp);
+ hp->sqn = hp->rqn + params->num_channels;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
struct mlx5_uars_page *ret;
mutex_lock(&mdev->priv.bfregs.reg_head.lock);
- if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
- ret = alloc_uars_page(mdev, false);
- if (IS_ERR(ret)) {
- ret = NULL;
- goto out;
- }
- list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
- } else {
+ if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
+ goto out;
}
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret))
+ goto out;
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b0025b13..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,9 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+/* Mutex to hold while enabling or disabling RoCE */
+static DEFINE_MUTEX(mlx5_roce_en_lock);
+
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
@@ -908,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
void *in;
int err;
- mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+ if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+ !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ return 0;
+
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_mc_local_lb, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
-
- MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_mc_local_lb, 1);
+
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_uc_local_lb, 1);
+
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ if (!err)
+ mlx5_core_dbg(mdev, "%s local_lb\n",
+ enable ? "enable" : "disable");
+
kvfree(in);
return err;
}
@@ -988,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_inc_return(&mdev->roce.roce_en) != 1)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (!mdev->roce.roce_en)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+
+ if (!err)
+ mdev->roce.roce_en++;
+ mutex_unlock(&mlx5_roce_en_lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_dec_return(&mdev->roce.roce_en) != 0)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (mdev->roce.roce_en) {
+ mdev->roce.roce_en--;
+ if (mdev->roce.roce_en == 0)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+
+ if (err)
+ mdev->roce.roce_en++;
+ }
+ mutex_unlock(&mlx5_roce_en_lock);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
@@ -1100,3 +1131,61 @@ ex:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ err = mlx5_nic_vport_enable_roce(port_mdev);
+ if (err)
+ goto free;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria,
+ MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+free:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
+
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id, 0);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria, 0);
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (!err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+ int err;
+
+ if (!mlxsw_bus->reset)
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ mlxsw_bus->reset(mlxsw_core->bus_priv);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
+ if (err)
+ mlxsw_core->reload_fail = true;
+ return err;
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv)
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
- alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
- if (!devlink) {
- err = -ENOMEM;
- goto err_devlink_alloc;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
}
mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = devlink_register(devlink, mlxsw_bus_info->dev);
- if (err)
- goto err_devlink_register;
+ if (!reload) {
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+ }
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- devlink_free(devlink);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+ if (!reload)
+ devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (mlxsw_core->reload_fail)
+ goto reload_fail;
+
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (reload)
+ return;
+reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+ void *bus_priv, bool reload,
+ struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
+ void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 6a979a09ab72..b698fb481b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -310,9 +310,33 @@ struct mlxsw_afa_block {
struct mlxsw_afa_set *first_set;
struct mlxsw_afa_set *cur_set;
unsigned int cur_act_index; /* In current set. */
- struct list_head fwd_entry_ref_list;
+ struct list_head resource_list; /* List of resources held by actions
+ * in this block.
+ */
};
+struct mlxsw_afa_resource {
+ struct list_head list;
+ void (*destructor)(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_resource *resource, *tmp;
+
+ list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+ list_del(&resource->list);
+ resource->destructor(block, resource);
+ }
+}
+
struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
{
struct mlxsw_afa_block *block;
@@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
- INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+ INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
/* At least one action set is always present, so just create it here */
@@ -336,8 +360,6 @@ err_first_set_create:
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
-
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_set *set = block->first_set;
@@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
mlxsw_afa_set_put(block->afa, set);
set = next_set;
} while (set);
- mlxsw_afa_fwd_entry_refs_destroy(block);
+ mlxsw_afa_resources_destroy(block);
kfree(block);
}
EXPORT_SYMBOL(mlxsw_afa_block_destroy);
@@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
}
struct mlxsw_afa_fwd_entry_ref {
- struct list_head list;
+ struct mlxsw_afa_resource resource;
struct mlxsw_afa_fwd_entry *fwd_entry;
};
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+ fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+ resource);
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
static struct mlxsw_afa_fwd_entry_ref *
mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
{
@@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
goto err_fwd_entry_get;
}
fwd_entry_ref->fwd_entry = fwd_entry;
- list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+ fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+ mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
return fwd_entry_ref;
err_fwd_entry_get:
@@ -517,23 +559,51 @@ err_fwd_entry_get:
return ERR_PTR(err);
}
+struct mlxsw_afa_counter {
+ struct mlxsw_afa_resource resource;
+ u32 counter_index;
+};
+
static void
-mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_counter *counter)
{
- list_del(&fwd_entry_ref->list);
- mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
- kfree(fwd_entry_ref);
+ block->afa->ops->counter_index_put(block->afa->ops_priv,
+ counter->counter_index);
+ kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_counter *counter;
+
+ counter = container_of(resource, struct mlxsw_afa_counter, resource);
+ mlxsw_afa_counter_destroy(block, counter);
}
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
{
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
- struct mlxsw_afa_fwd_entry_ref *tmp;
+ struct mlxsw_afa_counter *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+ &counter->counter_index);
+ if (err)
+ goto err_counter_index_get;
+ counter->resource.destructor = mlxsw_afa_counter_destructor;
+ mlxsw_afa_resource_add(block, &counter->resource);
+ return counter;
- list_for_each_entry_safe(fwd_entry_ref, tmp,
- &block->fwd_entry_ref_list, list)
- mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+err_counter_index_get:
+ kfree(counter);
+ return ERR_PTR(err);
}
#define MLXSW_AFA_ONE_ACTION_LEN 32
@@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
*/
MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+/* afa_trapdisc_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+
+/* afa_trapdisc_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
enum mlxsw_afa_trapdisc_trap_action trap_action,
@@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload,
mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
+static inline void
+mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
+{
+ mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+}
+
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
{
char *act = mlxsw_afa_block_append_action(block,
@@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+struct mlxsw_afa_mirror {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_in_port;
+ u8 local_out_port;
+ bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_mirror *mirror)
+{
+ block->afa->ops->mirror_del(block->afa->ops_priv,
+ mirror->local_in_port,
+ mirror->local_out_port,
+ mirror->ingress);
+ kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_mirror *mirror;
+
+ mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+ mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+ if (!mirror)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->mirror_add(block->afa->ops_priv,
+ local_in_port, local_out_port,
+ ingress, &mirror->span_id);
+ if (err)
+ goto err_mirror_add;
+
+ mirror->ingress = ingress;
+ mirror->local_out_port = local_out_port;
+ mirror->local_in_port = local_in_port;
+ mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+ mlxsw_afa_resource_add(block, &mirror->resource);
+ return mirror;
+
+err_mirror_add:
+ kfree(mirror);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+ u8 mirror_agent)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port, bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ ingress);
+ if (IS_ERR(mirror))
+ return PTR_ERR(mirror);
+
+ err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+ if (err)
+ goto err_append_allocated_mirror;
+
+ return 0;
+
+err_append_allocated_mirror:
+ mlxsw_afa_mirror_destroy(block, mirror);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload,
mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
}
-int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index)
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_POLCNT_CODE,
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
@@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
counter_index);
return 0;
}
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index)
+{
+ struct mlxsw_afa_counter *counter;
+ u32 counter_index;
+ int err;
+
+ counter = mlxsw_afa_counter_create(block);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+ counter_index = counter->counter_index;
+
+ err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+ if (err)
+ goto err_append_allocated_counter;
+
+ if (p_counter_index)
+ *p_counter_index = counter_index;
+ return 0;
+
+err_append_allocated_counter:
+ mlxsw_afa_counter_destroy(block, counter);
+ return err;
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
/* Virtual Router and Forwarding Domain Action
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index a8d3314c3a24..43132293475c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -46,6 +46,12 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+ int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+ void (*counter_index_put)(void *priv, unsigned int counter_index);
+ int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id);
+ void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress);
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index);
+ u32 *p_counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
- &mlxsw_i2c_bus, mlxsw_i2c);
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
- mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
struct mlxsw_item {
unsigned short offset; /* bytes in container */
- unsigned short step; /* step in bytes for indexed items */
+ short step; /* step in bytes for indexed items */
unsigned short in_step_offset; /* offset within one step */
unsigned char shift; /* shift in bits */
unsigned char element_size; /* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
- u32 single_size, double_size, linear_size;
-
- if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
- return -EIO;
-
- linear_size = profile->kvd_linear_size;
+ u64 single_size, double_size, linear_size;
+ int err;
- /* The hash part is what left of the kvd without the
- * linear part. It is split to the single size and
- * double size by the parts ratio from the profile.
- * Both sizes must be a multiplications of the
- * granularity from the profile.
- */
- double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
- double_size *= profile->kvd_hash_double_parts;
- double_size /= profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts;
- double_size /= profile->kvd_hash_granularity;
- double_size *= profile->kvd_hash_granularity;
- single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
- linear_size;
-
- /* Check results are legal. */
- if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
- double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
- MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
- return -EIO;
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static const struct mlxsw_bus mlxsw_pci_bus = {
- .kind = "pci",
- .init = mlxsw_pci_init,
- .fini = mlxsw_pci_fini,
- .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
- .skb_transmit = mlxsw_pci_skb_transmit,
- .cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
-};
-
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@@ -1643,7 +1614,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
- wmb(); /* reset needs to be written before we read control register */
+ /* Reset needs to be written before we read control register, and
+ * we must wait for the HW to become responsive once again
+ */
+ wmb();
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
@@ -1655,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+ mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
+ .reset = mlxsw_pci_reset,
+};
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1716,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@@ -1725,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
- &mlxsw_pci_bus, mlxsw_pci);
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@@ -1736,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1755,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
- mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
}
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bd8d28de152..3dcc58d61506 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1530
#define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
- .major = MLXSW_FWREV_MAJOR,
- .minor = MLXSW_FWREV_MINOR,
- .subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
}
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
- const struct mlxsw_fw_rev *b)
-{
- if (a->major != b->major)
- return a->major > b->major;
- if (a->minor != b->minor)
- return a->minor > b->minor;
- return a->subminor >= b->subminor;
-}
-
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct firmware *firmware;
int err;
- if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ /* Validate driver & FW are compatible */
+ if (rev->major != MLXSW_FWREV_MAJOR) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, MLXSW_FWREV_MAJOR);
+ return -EINVAL;
+ }
+ if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-static struct mlxsw_sp_span_entry *
+struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
int i;
@@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char mpar_pl[MLXSW_REG_MPAR_LEN];
- char sbib_pl[MLXSW_REG_SBIB_LEN];
int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
int err;
/* if it is an egress SPAN, bind a shared buffer to it */
@@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
}
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
- if (err)
- goto err_mpar_reg_write;
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
if (!inspected_port) {
@@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
return 0;
-err_mpar_reg_write:
err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
@@ -727,25 +737,22 @@ err_inspected_port_alloc:
}
static void
-mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
char sbib_pl[MLXSW_REG_SBIB_LEN];
- int pa_id = span_entry->id;
inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
if (!inspected_port)
return;
- /* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
/* remove the SBIB buffer if it was egress SPAN */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
@@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
kfree(inspected_port);
}
-static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type)
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
struct mlxsw_sp_span_entry *span_entry;
@@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
- err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
if (err)
goto err_port_bind;
@@ -784,9 +791,8 @@ err_port_bind:
return err;
}
-static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type)
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
span_entry->id);
- mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
const struct tc_action *a,
bool ingress)
{
- struct net *net = dev_net(mlxsw_sp_port->dev);
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
- int ifindex;
- ifindex = tcf_mirred_ifindex(a);
- to_dev = __dev_get_by_index(net, ifindex);
+ to_dev = tcf_mirred_dev(a);
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
@@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
+ true);
}
static void
@@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
- span_type);
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ span_type, true);
}
static int
@@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (f->common.chain_index)
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
@@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f,
- bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+ struct tc_cls_flower_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case TC_CLSFLOWER_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+ mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case TC_CLSFLOWER_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
- if (!tc_can_offload(mlxsw_sp_port->dev))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSMATCHALL:
+ if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
+ type_data))
+ return -EOPNOTSUPP;
+
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
- ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ case TC_SETUP_CLSFLOWER:
+ if (mlxsw_sp_acl_block_disabled(acl_block))
+ return -EOPNOTSUPP;
+
+ return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb) {
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ if (!acl_block)
+ return -ENOMEM;
+ block_cb = __tcf_block_cb_register(block,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ acl_block = tcf_block_cb_priv(block_cb);
+ }
+ tcf_block_cb_incref(block_cb);
+ err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = acl_block;
+ else
+ mlxsw_sp_port->eg_acl_block = acl_block;
+
+ return 0;
+
+err_block_bind:
+ if (!tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+err_cb_register:
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
+ return err;
}
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = NULL;
+ else
+ mlxsw_sp_port->eg_acl_block = NULL;
+
+ acl_block = tcf_block_cb_priv(block_cb);
+ err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_block_offload *f)
{
tc_setup_cb_t *cb;
+ bool ingress;
+ int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_ig;
- else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_eg;
- else
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ ingress = true;
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ ingress = false;
+ } else {
return -EOPNOTSUPP;
+ }
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+ f->block, ingress);
+ if (err) {
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return err;
+ }
+ return 0;
case TC_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+ f->block, ingress);
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return 0;
default:
@@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_RED:
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
}
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ if (!enable) {
+ if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+ mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+ !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ } else {
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ }
+ return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlxsw_sp_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ dev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(dev, enable);
+ if (err) {
+ netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ if (enable)
+ dev->features |= feature;
+ else
+ dev->features &= ~feature;
+
+ return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ mlxsw_sp_feature_hw_tc);
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
+ .ndo_set_features = mlxsw_sp_set_features,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3249,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+ u64 size)
+{
+ const struct mlxsw_config_profile *profile;
+
+ profile = &mlxsw_sp_config_profile;
+ if (size % profile->kvd_hash_granularity) {
+ NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+ return false;
+ }
+ return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+ .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ /* KVD top resource */
+ mlxsw_sp_kvd_size_params.size_min = kvd_size;
+ mlxsw_sp_kvd_size_params.size_max = kvd_size;
+ mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Linear part init */
+ mlxsw_sp_linear_size_params.size_min = linear_size_min;
+ mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+ double_size_min;
+ mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash double part init */
+ mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+ mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash single part init */
+ mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+ mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ true, kvd_size,
+ MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &mlxsw_sp_kvd_size_params,
+ &mlxsw_sp_resource_kvd_ops);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ false, linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_linear_size_params,
+ &mlxsw_sp_resource_kvd_linear_ops);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ false, double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_double_size_params,
+ &mlxsw_sp_resource_kvd_hash_double_ops);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ false, single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_single_size_params,
+ &mlxsw_sp_resource_kvd_hash_single_ops);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ profile->kvd_hash_granularity);
+ }
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};
@@ -4376,7 +4810,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack,
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
@@ -4504,6 +4941,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
@@ -4520,7 +4958,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..bdd8f94a452c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,7 +258,10 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
+ unsigned acl_rule_count;
+ struct mlxsw_sp_acl_block *ing_acl_block;
+ struct mlxsw_sp_acl_block *eg_acl_block;
};
static inline bool
@@ -365,6 +357,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -402,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
+ u8 destination_port,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -456,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
unsigned int counter_index;
- bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -475,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops {
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct net_device *dev, bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -496,17 +503,34 @@ struct mlxsw_sp_acl_ops {
enum mlxsw_sp_acl_profile profile);
};
+struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
@@ -530,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -573,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
/* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..0897a5435cc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
#include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_ruleset_ht_key {
- struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_acl_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+ return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->mlxsw_sp = mlxsw_sp;
+ return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+ return 0;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ops_ruleset_add;
- return ruleset;
-
-err_ops_ruleset_add:
- rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
- kfree(ruleset);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
- ops->ruleset_del(mlxsw_sp, ruleset->priv);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset,
- struct net_device *dev, bool ingress,
- u32 chain_index)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- int err;
-
- ruleset->ht_key.dev = dev;
- ruleset->ht_key.ingress = ingress;
- ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
- return err;
- if (!ruleset->ht_key.chain_index) {
+ goto err_ht_insert;
+
+ if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
- err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
- goto err_ops_ruleset_bind;
+ goto err_ruleset_bind;
}
- return 0;
-err_ops_ruleset_bind:
+ return ruleset;
+
+err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
- return err;
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!ruleset->ht_key.chain_index)
- ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ if (!chain_index)
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
}
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
{
if (--ruleset->ref_count)
return;
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
- bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
- ht_key.dev = dev;
- ht_key.ingress = ingress;
+ ht_key.block = block;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- int err;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
- if (IS_ERR(ruleset))
- return ruleset;
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
- ingress, chain_index);
- if (err)
- goto err_ruleset_bind;
- return ruleset;
-
-err_ruleset_bind:
- mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
- return ERR_PTR(err);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
-static int
-mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- int err;
-
- err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
- if (err)
- return err;
- rulei->counter_valid = true;
- return 0;
-}
-
-static void
-mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- rulei->counter_valid = false;
- mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
-}
-
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_port *out_port;
+ struct mlxsw_sp_port *in_port;
+
+ if (!list_is_singular(&block->binding_list))
+ return -EOPNOTSUPP;
+
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_acl_block_binding, list);
+ in_port = binding->mlxsw_sp_port;
+ if (!mlxsw_sp_port_dev_check(out_dev))
+ return -EINVAL;
+
+ out_port = netdev_priv(out_dev);
+ if (out_port->mlxsw_sp != mlxsw_sp)
+ return -EINVAL;
+
+ return mlxsw_afa_block_append_mirror(rulei->act_block,
+ in_port->local_port,
+ out_port->local_port,
+ binding->ingress);
+}
+
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio)
@@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- rulei->counter_index);
+ &rulei->counter_index);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
- if (err)
- goto err_counter_alloc;
return rule;
-err_counter_alloc:
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ ruleset->ht_key.block->rule_count++;
return 0;
err_rhashtable_insert:
@@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 4d3340ed0291..6ca6894125f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
}
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id)
+{
+ struct mlxsw_sp_port *in_port, *out_port;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ enum mlxsw_sp_span_type type;
+ int err;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ out_port = mlxsw_sp->ports[local_out_port];
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
+ if (!span_entry) {
+ err = -ENOENT;
+ goto err_span_entry_find;
+ }
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_span_entry_find:
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *in_port;
+ enum mlxsw_sp_span_type type;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+}
+
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
.kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
struct list_head region_list;
unsigned int region_count;
struct rhashtable chunk_ht;
- struct {
- u16 local_port;
- bool ingress;
- } bound;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- struct mlxsw_sp_port *mlxsw_sp_port;
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- if (!mlxsw_sp_port_dev_check(dev))
- return -EINVAL;
-
- mlxsw_sp_port = netdev_priv(dev);
- group->bound.local_port = mlxsw_sp_port->local_port;
- group->bound.ingress = ingress;
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
group->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
group->id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
- dev, ingress);
+ mlxsw_sp_port, ingress);
}
static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv)
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_port, ingress);
}
static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e57857ea4..6ce00e28d4ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
- ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
- ingress,
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = __dev_get_by_index(dev_net(dev), ifindex);
- if (out_dev == dev)
- out_dev = NULL;
-
+ out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev);
if (err)
return err;
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ struct net_device *out_dev = tcf_mirred_dev(a);
+
+ err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+ block, out_dev);
+ if (err)
+ return err;
} else if (is_tcf_vlan(a)) {
u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
u32 action = tcf_vlan_action(a);
@@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
@@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
- rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
}
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
if (err)
goto err_flower_parse;
@@ -435,15 +436,15 @@ err_rule_create:
return err;
}
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
@@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
u64 bytes;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (part->info->end_index -
+ part->info->start_index + 1) /
+ part->info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += part->info->alloc_size;
+ return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl_part *part;
+ u64 occ = 0;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+ occ += mlxsw_sp_kvdl_part_occ(part);
+
+ return occ;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 34a0b632e5dd..4c7f32d4288d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
if (!afa_block)
return ERR_PTR(-ENOMEM);
- err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ counter_index);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,12 +41,157 @@
#include "spectrum.h"
#include "reg.h"
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
u32 probability, bool is_ecn)
{
- char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+ char cwtp_cmd[MLXSW_REG_CWTP_LEN];
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
@@ -60,10 +205,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
static int
@@ -79,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -161,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+}
- mlxsw_sp_qdisc->handle = handle;
- return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+ u64 backlog;
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops;
+ u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+ backlog = xstats->backlog[tclass_num];
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ if (p->bands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ int tclass, i;
+ int err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ drops += xstats->tail_drop[i];
+ backlog += xstats->backlog[i];
+ }
+ drops = drops - stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ stats_base->drops += xstats->tail_drop[i];
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_PRIO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index be657b8533f0..f0b25baba09a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
@@ -84,6 +85,8 @@ struct mlxsw_sp_router {
struct rhashtable nexthop_ht;
struct list_head nexthop_list;
struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
} lpm;
@@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
};
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib,
+ u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib);
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
-static bool
-mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
-
- return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
-}
-
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
@@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -453,8 +457,6 @@ struct mlxsw_sp_fib {
struct list_head node_list;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
- unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
- struct mlxsw_sp_prefix_usage prefix_usage;
enum mlxsw_sp_l3proto proto;
};
@@ -469,12 +471,15 @@ struct mlxsw_sp_vr {
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
return ERR_PTR(-ENOMEM);
@@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
INIT_LIST_HEAD(&fib->node_list);
fib->proto = proto;
fib->vr = vr;
+ fib->lpm_tree = lpm_tree;
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+ if (err)
+ goto err_lpm_tree_bind;
return fib;
+err_lpm_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_rhashtable_init:
kfree(fib);
return ERR_PTR(err);
}
-static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib)
{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
WARN_ON(!list_empty(&fib->node_list));
- WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
@@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+ lpm_tree->ref_count = 1;
return lpm_tree;
err_left_struct_set:
@@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
- prefix_usage))
+ prefix_usage)) {
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
return lpm_tree;
+ }
}
return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
@@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
struct mlxsw_sp_lpm_tree *lpm_tree;
u64 max_trees;
- int i;
+ int err, i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
return -EIO;
@@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv4_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv6_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
return 0;
+
+err_ipv6_tree_get:
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+ kfree(mlxsw_sp->router->lpm.trees);
+ return err;
}
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
kfree(mlxsw_sp->router->lpm.trees);
}
@@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+ vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(vr->fib6)) {
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
@@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
return vr;
err_mr_table_create:
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
err_fib6_create:
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
{
mlxsw_sp_mr_table_destroy(vr->mr4_table);
vr->mr4_table = NULL;
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
}
@@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
mlxsw_sp_mr_table_empty(vr->mr4_table))
- mlxsw_sp_vr_destroy(vr);
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
static bool
@@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
if (!mlxsw_sp_vr_is_used(vr))
return false;
- if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
+ if (fib->lpm_tree->id == tree_id)
return true;
return false;
}
@@ -821,27 +873,31 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
int err;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err)
+ goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
return 0;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = old_tree;
+ return err;
}
static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib,
struct mlxsw_sp_lpm_tree *new_tree)
{
- struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
enum mlxsw_sp_l3proto proto = fib->proto;
+ struct mlxsw_sp_lpm_tree *old_tree;
u8 old_id, new_id = new_tree->id;
struct mlxsw_sp_vr *vr;
int i, err;
- if (!old_tree)
- goto no_replace;
+ old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
old_id = old_tree->id;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
@@ -855,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
goto err_tree_replace;
}
+ memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+ sizeof(new_tree->prefix_ref_count));
+ mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
return 0;
err_tree_replace:
@@ -866,33 +927,6 @@ err_tree_replace:
old_tree);
}
return err;
-
-no_replace:
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
- fib->lpm_tree = new_tree;
- mlxsw_sp_lpm_tree_hold(new_tree);
- return 0;
-}
-
-static void
-mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- struct mlxsw_sp_prefix_usage *req_prefix_usage)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
- unsigned char prefix;
-
- if (!mlxsw_sp_vr_is_used(vr))
- continue;
- mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
- mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
- }
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -1934,11 +1968,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
dipn = htonl(dip);
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
neigh_event_send(n, NULL);
@@ -1965,11 +1996,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&nd_tbl, &dip, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
neigh_event_send(n, NULL);
@@ -2628,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
- const struct in6_addr *gw, int ifindex)
+ const struct in6_addr *gw, int ifindex,
+ int weight)
{
int i;
@@ -2636,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
const struct mlxsw_sp_nexthop *nh;
nh = &nh_grp->nexthops[i];
- if (nh->ifindex == ifindex &&
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
}
@@ -2655,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct in6_addr *gw;
- int ifindex;
+ int ifindex, weight;
ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
- if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
return false;
}
@@ -3228,7 +3259,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
{
if (!removing)
nh->should_offload = 1;
- else if (nh->offloaded)
+ else
nh->should_offload = 0;
nh->update = 1;
}
@@ -4190,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
}
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- /* Since the tree is shared between all virtual routers we must
- * make sure it contains all the required prefix lengths. This
- * can be computed by either adding the new prefix length to the
- * existing prefix usage of a bound tree, or by aggregating the
- * prefix lengths across all virtual routers and adding the new
- * one as well.
- */
- if (fib->lpm_tree)
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &fib->lpm_tree->prefix_usage);
- else
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+ if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ goto out;
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
return PTR_ERR(lpm_tree);
- if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
- return 0;
-
err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
if (err)
- return err;
+ goto err_lpm_tree_replace;
+out:
+ lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
return 0;
-}
-static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib)
-{
- if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
- return;
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
- fib->lpm_tree = NULL;
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ return err;
}
-static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
{
- unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_fib *fib = fib_node->fib;
+ int err;
- if (fib->prefix_ref_count[prefix_len]++ == 0)
- mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-}
+ if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ return;
+ /* Try to construct a new LPM tree from the current prefix usage
+ * minus the unused one. If we fail, continue using the old one.
+ */
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+ fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return;
-static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
-{
- unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->fib;
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
+
+ return;
- if (--fib->prefix_ref_count[prefix_len] == 0)
- mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
}
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
@@ -4265,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
return err;
fib_node->fib = fib;
- err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
+ err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
if (err)
goto err_fib_lpm_tree_link;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib_lpm_tree_link:
@@ -4284,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib *fib = fib_node->fib;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
- mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
+ mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
}
@@ -4324,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
err_fib_node_init:
mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -4337,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static struct mlxsw_sp_fib4_entry *
@@ -4762,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = 1;
+ nh->nh_weight = rt->rt6i_nh_weight;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
@@ -5360,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int
@@ -5397,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -6046,7 +6072,7 @@ err_fid_get:
err_rif_alloc:
err_rif_index_alloc:
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -6069,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_put(fid);
kfree(rif);
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static void
@@ -6859,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
return 0;
err_loopback_op:
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
return err;
}
@@ -6873,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
--ul_vr->rif_count;
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
@@ -7008,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
}
#endif
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7020,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
@@ -7095,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_mp_hash_init;
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -7104,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
return NULL;
}
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 2fe96f1f3fe5..bd6e9014bc74 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -28,6 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include "ks8851.h"
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
* we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ const u8 *mac_addr;
+
+ mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ks8851_write_mac_addr(dev);
+ return;
+ }
- /* first, try reading what we've got already */
if (ks->rc_ccr & CCR_EEPROM) {
ks8851_read_mac_addr(dev);
if (is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a42433fb6949..b922ab5cedea 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -311,7 +311,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
- int commslot = 0;
+ bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
if (!MACH_IS_MAC)
return -ENODEV;
@@ -322,10 +322,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
- if (macintosh_config->ident == MAC_MODEL_Q630 ||
- macintosh_config->ident == MAC_MODEL_P588 ||
- macintosh_config->ident == MAC_MODEL_P575 ||
- macintosh_config->ident == MAC_MODEL_C610) {
+ if (commslot || macintosh_config->ident == MAC_MODEL_C610) {
int card_present;
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
@@ -333,7 +330,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
printk("none.\n");
return -ENODEV;
}
- commslot = 1;
}
printk("yes\n");
@@ -428,26 +424,26 @@ static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_rsrc *fres)
{
- if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
- ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ fres->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
- if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
- ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ if (fres->dr_hw == NUBUS_DRHW_SONIC &&
+ fres->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
- if (strstr(ndev->board->name, "DuoDock"))
+ if (strstr(fres->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
- if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
- ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 &&
+ fres->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
- if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
- ndev->dr_sw == 0) { /* huh? */
+ if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ fres->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
@@ -456,7 +452,7 @@ static int macsonic_ident(struct nubus_dev *ndev)
static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
@@ -464,9 +460,11 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
- NUBUS_TYPE_ETHERNET, ndev)) != NULL)
- {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 24c4408b5734..d5866d708dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,8 @@ nfp-objs := \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_ctrl.o \
+ nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
@@ -43,6 +45,7 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/cmsg.o \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 000000000000..80d3aa0fc9d3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+ u16 used_tags;
+
+ used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+ return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+ /* All FW communication for BPF is request-reply. To make sure we
+ * don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (nfp_bpf_all_tags_busy(bpf)) {
+ cmsg_warn(bpf, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+ return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+ while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+ bpf->tag_alloc_last != bpf->tag_alloc_next)
+ bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ skb_put(skb, size);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += sizeof(struct cmsg_key_value_pair) * n;
+
+ return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&bpf->cmsg_replies, skb) {
+ msg_tag = nfp_bpf_cmsg_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_bpf_free_tag(bpf, tag);
+ __skb_unlink(skb, &bpf->cmsg_replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ if (!skb)
+ nfp_bpf_free_tag(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+ int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_bpf_reply(bpf, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+ skb = nfp_bpf_reply(bpf, tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_bpf_reply_drop_tag(bpf, tag);
+ if (err < 0) {
+ cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+ type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+ enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+ struct cmsg_hdr *hdr;
+ int tag;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ tag = nfp_bpf_alloc_tag(bpf);
+ if (tag < 0) {
+ nfp_ctrl_unlock(bpf->app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = CMSG_MAP_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(bpf->app, skb);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+ skb->len, reply_size);
+ goto err_free;
+ }
+ if (hdr->type != __CMSG_REPLY(type)) {
+ cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ hdr->type, __CMSG_REPLY(type));
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+ struct cmsg_reply_map_simple *reply)
+{
+ static const int res_table[] = {
+ [CMSG_RC_SUCCESS] = 0,
+ [CMSG_RC_ERR_MAP_FD] = -EBADFD,
+ [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
+ [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
+ [CMSG_RC_ERR_MAP_PARSE] = -EIO,
+ [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
+ [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
+ [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
+ };
+ u32 rc;
+
+ rc = be32_to_cpu(reply->rc);
+ if (rc >= ARRAY_SIZE(res_table)) {
+ cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+ return -EIO;
+ }
+
+ return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+ struct cmsg_reply_map_alloc_tbl *reply;
+ struct cmsg_req_map_alloc_tbl *req;
+ struct sk_buff *skb;
+ u32 tid;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->key_size = cpu_to_be32(map->key_size);
+ req->value_size = cpu_to_be32(map->value_size);
+ req->max_entries = cpu_to_be32(map->max_entries);
+ req->map_type = cpu_to_be32(map->map_type);
+ req->map_flags = 0;
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+ sizeof(*reply));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ tid = be32_to_cpu(reply->tid);
+ dev_consume_skb_any(skb);
+
+ return tid;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+ struct cmsg_reply_map_free_tbl *reply;
+ struct cmsg_req_map_free_tbl *req;
+ struct sk_buff *skb;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb) {
+ cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+ return;
+ }
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+ sizeof(*reply));
+ if (IS_ERR(skb)) {
+ cmsg_warn(bpf, "leaking map - I/O error\n");
+ return;
+ }
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+ dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+ enum nfp_bpf_cmsg_type op,
+ u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ struct bpf_map *map = &offmap->map;
+ struct cmsg_reply_map_op *reply;
+ struct cmsg_req_map_op *req;
+ struct sk_buff *skb;
+ int err;
+
+ /* FW messages have no space for more than 32 bits of flags */
+ if (flags >> 32)
+ return -EOPNOTSUPP;
+
+ skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+ req->count = cpu_to_be32(1);
+ req->flags = cpu_to_be32(flags);
+
+ /* Copy inputs */
+ if (key)
+ memcpy(&req->elem[0].key, key, map->key_size);
+ if (value)
+ memcpy(&req->elem[0].value, value, map->value_size);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+ sizeof(*reply) + sizeof(*reply->elem));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ /* Copy outputs */
+ if (out_key)
+ memcpy(out_key, &reply->elem[0].key, map->key_size);
+ if (out_value)
+ memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+ dev_consume_skb_any(skb);
+
+ return 0;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+
+ tag = nfp_bpf_cmsg_get_tag(skb);
+ if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+ cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&bpf->cmsg_replies, skb);
+ wake_up_interruptible_all(&bpf->cmsg_wq);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return;
+err_unlock:
+ nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
new file mode 100644
index 000000000000..cfcc7bcb2c67
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_BPF_FW_H
+#define NFP_BPF_FW_H 1
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+enum bpf_cap_tlv_type {
+ NFP_BPF_CAP_TYPE_FUNC = 1,
+ NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
+ NFP_BPF_CAP_TYPE_MAPS = 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+ __le32 func_id;
+ __le32 func_addr;
+};
+
+struct nfp_bpf_cap_tlv_adjust_head {
+ __le32 flags;
+ __le32 off_min;
+ __le32 off_max;
+ __le32 guaranteed_sub;
+ __le32 guaranteed_add;
+};
+
+#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0)
+
+struct nfp_bpf_cap_tlv_maps {
+ __le32 types;
+ __le32 max_maps;
+ __le32 max_elems;
+ __le32 max_key_sz;
+ __le32 max_val_sz;
+ __le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION 1
+
+enum nfp_bpf_cmsg_type {
+ CMSG_TYPE_MAP_ALLOC = 1,
+ CMSG_TYPE_MAP_FREE = 2,
+ CMSG_TYPE_MAP_LOOKUP = 3,
+ CMSG_TYPE_MAP_UPDATE = 4,
+ CMSG_TYPE_MAP_DELETE = 5,
+ CMSG_TYPE_MAP_GETNEXT = 6,
+ CMSG_TYPE_MAP_GETFIRST = 7,
+ __CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT 7
+#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW 16
+#define CMSG_MAP_VALUE_LW 16
+
+enum nfp_bpf_cmsg_status {
+ CMSG_RC_SUCCESS = 0,
+ CMSG_RC_ERR_MAP_FD = 1,
+ CMSG_RC_ERR_MAP_NOENT = 2,
+ CMSG_RC_ERR_MAP_ERR = 3,
+ CMSG_RC_ERR_MAP_PARSE = 4,
+ CMSG_RC_ERR_MAP_EXIST = 5,
+ CMSG_RC_ERR_MAP_NOMEM = 6,
+ CMSG_RC_ERR_MAP_E2BIG = 7,
+};
+
+struct cmsg_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+ struct cmsg_hdr hdr;
+ __be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+ struct cmsg_hdr hdr;
+ __be32 key_size; /* in bytes */
+ __be32 value_size; /* in bytes */
+ __be32 max_entries;
+ __be32 map_type;
+ __be32 map_flags; /* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+};
+
+struct cmsg_key_value_pair {
+ __be32 key[CMSG_MAP_KEY_LW];
+ __be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+ __be32 count;
+ __be32 flags;
+ struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+ __be32 resv;
+ struct cmsg_key_value_pair elem[0];
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 995e95410b11..56451edf01c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -66,12 +67,6 @@
next2 = nfp_meta_next(next))
static bool
-nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return meta->l.next != &nfp_prog->insns;
-}
-
-static bool
nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return meta->l.prev != &nfp_prog->insns;
@@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
{
- return nfp_prog->start_off + nfp_prog->prog_len;
+ return nfp_prog->prog_len;
}
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+static bool
+nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
{
- return offset - nfp_prog->start_off;
+ /* If there is a recorded error we may have dropped instructions;
+ * that doesn't have to be due to translator bug, and the translation
+ * will fail anyway, so just return OK.
+ */
+ if (nfp_prog->error)
+ return true;
+ return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
}
/* --- Emitters --- */
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
{
enum cmd_ctx_swap ctx;
u64 insn;
@@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
FIELD_PREP(OP_CMD_CNT, size) |
FIELD_PREP(OP_CMD_SIG, sync) |
FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_INDIR, indir) |
FIELD_PREP(OP_CMD_MODE, mode);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
+emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
{
struct nfp_insn_re_regs reg;
int err;
@@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
return;
}
- __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
+ indir);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
+}
+
+static void
+emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
}
static void
@@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
nfp_prog_push(nfp_prog, insn);
}
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+ enum nfp_relo_type relo)
{
- if (defer > 2) {
+ if (mask == BR_UNC && defer > 2) {
pr_err("BUG: branch defer out of bounds %d\n", defer);
nfp_prog->error = -EFAULT;
return;
}
- __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
}
static void
emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
{
- __emit_br(nfp_prog, mask,
- mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
- BR_CSS_NONE, addr, defer);
+ emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
}
static void
@@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
return;
}
- __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both,
- reg.dst_lmextn, reg.src_lmextn);
+ /* Use reg.dst when destination is No-Dest. */
+ __emit_immed(nfp_prog,
+ swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
+ reg.breg, imm >> 8, width, invert, shift,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
}
static void
@@ -458,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
}
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+ enum nfp_relo_type relo)
+{
+ if (imm > 0xffff) {
+ pr_err("relocation of a large immediate!\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
@@ -490,24 +530,179 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
emit_nop(nfp_prog);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+}
+
+/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
+ * result to @dst from low end.
+ */
static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
- enum br_special special)
+wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
+ u8 offset)
{
- emit_br(nfp_prog, mask, 0, 0);
+ enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
+ u8 mask = (1 << field_len) - 1;
- nfp_prog->prog[nfp_prog->prog_len - 1] |=
- FIELD_PREP(OP_BR_SPECIAL, special);
+ emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}
-static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ swreg *rega, swreg *regb)
{
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+ if (offset == reg_imm(0)) {
+ *rega = reg_a(src_gpr);
+ *regb = reg_b(src_gpr + 1);
+ return;
+ }
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+ reg_imm(0));
+ *rega = imm_a(nfp_prog);
+ *regb = imm_b(nfp_prog);
}
-static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+/* NFP has Command Push Pull bus which supports bluk memory operations. */
+static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+ bool descending_seq = meta->ldst_gather_len < 0;
+ s16 len = abs(meta->ldst_gather_len);
+ swreg src_base, off;
+ bool src_40bit_addr;
+ unsigned int i;
+ u8 xfer_num;
+
+ off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+ src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
+ src_base = reg_a(meta->insn.src_reg * 2);
+ xfer_num = round_up(len, 4) / 4;
+
+ if (src_40bit_addr)
+ addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ &off);
+
+ /* Setup PREV_ALU fields to override memory read length. */
+ if (len > 32)
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+
+ /* Memory read from source addr into transfer-in registers. */
+ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+ src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+ src_base, off, xfer_num - 1, true, len > 32);
+
+ /* Move from transfer-in to transfer-out. */
+ for (i = 0; i < xfer_num; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
+
+ if (len <= 8) {
+ /* Use single direct_ref write8. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
+ true);
+ } else if (len <= 32 && IS_ALIGNED(len, 4)) {
+ /* Use single direct_ref write32. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
+ true);
+ } else if (len <= 32) {
+ /* Use single indirect_ref write8. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ len - 1, true);
+ } else if (IS_ALIGNED(len, 4)) {
+ /* Use single indirect_ref write32. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 1, true);
+ } else if (len <= 40) {
+ /* Use one direct_ref write32 to write the first 32-bytes, then
+ * another direct_ref write8 to write the remaining bytes.
+ */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, 7,
+ true);
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
+ imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
+ true);
+ } else {
+ /* Use one indirect_ref write32 to write 4-bytes aligned length,
+ * then another direct_ref write8 to write the remaining bytes.
+ */
+ u8 new_off;
+
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 2, true);
+ new_off = meta->paired_st->off + (xfer_num - 1) * 4;
+ off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
+ xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
+ (len & 0x3) - 1, true);
+ }
+
+ /* TODO: The following extra load is to make sure data flow be identical
+ * before and after we do memory copy optimization.
+ *
+ * The load destination register is not guaranteed to be dead, so we
+ * need to make sure it is loaded with the value the same as before
+ * this transformation.
+ *
+ * These extra loads could be removed once we have accurate register
+ * usage information.
+ */
+ if (descending_seq)
+ xfer_num = 0;
+ else if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ xfer_num = xfer_num - 1;
+ else
+ xfer_num = xfer_num - 2;
+
+ switch (BPF_SIZE(meta->insn.code)) {
+ case BPF_B:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 1,
+ IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
+ break;
+ case BPF_H:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 2, (len & 3) ^ 2);
+ break;
+ case BPF_W:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(0));
+ break;
+ case BPF_DW:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num));
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
+ reg_xfer(xfer_num + 1));
+ break;
+ }
+
+ if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
}
static int
@@ -540,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+ swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
- /* We load the value from the address indicated in @offset and then
+ /* We load the value from the address indicated in rreg + lreg and then
* mask out the data we don't need. Note: this is little endian!
*/
sz = max(size, 4);
mask = size < 4 ? GENMASK(size - 1, 0) : 0;
- emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
- reg_a(src_gpr), offset, sz / 4 - 1, true);
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+ lreg, rreg, sz / 4 - 1, true);
i = 0;
if (mask)
@@ -570,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
}
static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+ size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ swreg rega, regb;
+
+ addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+ return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ size, CMD_MODE_40b_BA);
+}
+
+static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -583,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
emit_alu(nfp_prog, reg_none(),
plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -596,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Check packet length */
tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -975,9 +1190,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
@@ -995,9 +1207,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
@@ -1027,9 +1236,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (swap) {
areg ^= breg;
breg ^= areg;
@@ -1052,6 +1258,136 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
+ struct nfp_bpf_cap_adjust_head *adjust_head;
+ u32 ret_einval, end;
+
+ adjust_head = &nfp_prog->bpf->adjust_head;
+
+ /* Optimized version - 5 vs 14 cycles */
+ if (nfp_prog->adjust_head_location != UINT_MAX) {
+ if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
+ return -EINVAL;
+
+ emit_alu(nfp_prog, pptr_reg(nfp_prog),
+ reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* TODO: when adjust head is guaranteed to succeed we can
+ * also eliminate the following if (r0 == 0) branch.
+ */
+
+ return 0;
+ }
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
+ end = ret_einval + 2;
+
+ /* We need to use a temp because offset is just a part of the pkt ptr */
+ emit_alu(nfp_prog, tmp,
+ reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
+
+ /* Validate result will fit within FW datapath constraints */
+ emit_alu(nfp_prog, reg_none(),
+ tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+ emit_alu(nfp_prog, reg_none(),
+ reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+
+ /* Validate the length is at least ETH_HLEN */
+ emit_alu(nfp_prog, tmp_len,
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, reg_none(),
+ tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ /* Load the ret code */
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* Modify the packet metadata */
+ emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
+
+ /* Skip over the -EINVAL ret code (defer 2) */
+ emit_br(nfp_prog, BR_UNC, end, 2);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ /* return -EINVAL target */
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ struct bpf_offloaded_map *offmap;
+ struct nfp_bpf_map *nfp_map;
+ bool load_lm_ptr;
+ u32 ret_tgt;
+ s64 lm_off;
+ swreg tid;
+
+ offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+ nfp_map = offmap->dev_priv;
+
+ /* We only have to reload LM0 if the key is not at start of stack */
+ lm_off = nfp_prog->stack_depth;
+ lm_off += meta->arg2.var_off.value + meta->arg2.off;
+ load_lm_ptr = meta->arg2_var_off || lm_off;
+
+ /* Set LM0 to start of key */
+ if (load_lm_ptr)
+ emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+ /* Load map ID into a register, it should actually fit as an immediate
+ * but in case it doesn't deal with it here, not in the delay slots.
+ */
+ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+ 2, RELO_BR_HELPER);
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+ /* Load map ID into A0 */
+ wrp_mov(nfp_prog, reg_a(0), tid);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ /* Reset the LM0 pointer */
+ if (!load_lm_ptr)
+ return 0;
+
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1486,14 +1822,29 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
- meta->insn.dst_reg * 2, size);
+ return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
+
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
}
static int
mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
{
+ if (meta->ldst_gather_len)
+ return nfp_cpp_memcpy(nfp_prog, meta);
+
if (meta->ptr.type == PTR_TO_CTX) {
if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return mem_ldx_xdp(nfp_prog, meta, size);
@@ -1508,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
+ if (meta->ptr.type == PTR_TO_MAP_VALUE)
+ return mem_ldx_emem(nfp_prog, meta, size);
+
return -EOPNOTSUPP;
}
@@ -1630,8 +1984,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (meta->insn.off < 0) /* TODO */
- return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
@@ -1646,9 +1998,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
or1 = reg_a(insn->dst_reg * 2);
or2 = reg_b(insn->dst_reg * 2 + 1);
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, imm_a(nfp_prog),
@@ -1689,15 +2038,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
meta->skip = true;
return 0;
@@ -1726,9 +2092,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
@@ -1753,9 +2116,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
@@ -1787,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1797,9 +2177,22 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
}
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ switch (meta->insn.imm) {
+ case BPF_FUNC_xdp_adjust_head:
+ return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_map_lookup_elem:
+ return map_lookup_stack(nfp_prog, meta);
+ default:
+ WARN_ONCE(1, "verifier allowed unsupported function\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
return 0;
}
@@ -1860,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
@@ -1867,99 +2264,64 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = goto_out,
};
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
- u16 addr_lo, addr_hi;
-
- addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
- addr_hi = offset != addr_lo;
- *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
- *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
- *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
/* --- Assembler logic --- */
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
- struct nfp_insn_meta *meta, *next;
- u32 off, br_idx;
- u32 idx;
+ struct nfp_insn_meta *meta, *jmp_dst;
+ u32 idx, br_idx;
- nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
- br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (list_is_last(&meta->l, &nfp_prog->insns))
+ br_idx = nfp_prog->last_bpf_off;
+ else
+ br_idx = list_next_entry(meta, l)->off - 1;
+
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
return -ELOOP;
}
/* Leave special branches for later */
- if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
continue;
- /* Find the target offset in assembler realm */
- off = meta->insn.off;
- if (!off) {
- pr_err("Fixup found zero offset!!\n");
+ if (!meta->jmp_dst) {
+ pr_err("Non-exit jump doesn't have destination info recorded!!\n");
return -ELOOP;
}
- while (off && nfp_meta_has_next(nfp_prog, next)) {
- next = nfp_meta_next(next);
- off--;
- }
- if (off) {
- pr_err("Fixup found too large jump!! %d\n", off);
- return -ELOOP;
- }
+ jmp_dst = meta->jmp_dst;
- if (next->skip) {
+ if (jmp_dst->skip) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
- for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
- idx <= br_idx; idx++) {
+ for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
- br_set_offset(&nfp_prog->prog[idx], next->off);
+ br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
}
}
- /* Fixup 'goto out's separately, they can be scattered around */
- for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
- enum br_special special;
-
- if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
- continue;
-
- special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
- switch (special) {
- case OP_BR_NORMAL:
- break;
- case OP_BR_GO_OUT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_out);
- break;
- case OP_BR_GO_ABORT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_abort);
- break;
- }
-
- nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
- }
-
return 0;
}
@@ -1987,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2014,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
emit_shf(nfp_prog, reg_b(2),
reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2033,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2054,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2105,6 +2467,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
nfp_prog->n_translated++;
}
+ nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
+
nfp_outro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
@@ -2173,6 +2537,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (next.src_reg || next.dst_reg)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
}
}
@@ -2209,40 +2576,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
if (next1.imm != 0x20 || next2.imm != 0x20)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
+ meta3->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
meta3->skip = true;
}
}
+/* load/store pair that forms memory copy sould look like the following:
+ *
+ * ld_width R, [addr_src + offset_src]
+ * st_width [addr_dest + offset_dest], R
+ *
+ * The destination register of load and source register of store should
+ * be the same, load and store should also perform at the same width.
+ * If either of addr_src or addr_dest is stack pointer, we don't do the
+ * CPP optimization as stack is modelled by registers on NFP.
+ */
+static bool
+curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta)
+{
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+
+ if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
+ return false;
+
+ if (ld_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (st_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
+ return false;
+
+ if (ld->dst_reg != st->src_reg)
+ return false;
+
+ /* There is jump to the store insn in this pair. */
+ if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ return true;
+}
+
+/* Currently, we only support chaining load/store pairs if:
+ *
+ * - Their address base registers are the same.
+ * - Their address offsets are in the same order.
+ * - They operate at the same memory width.
+ * - There is no jump into the middle of them.
+ */
+static bool
+curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta,
+ struct bpf_insn *prev_ld,
+ struct bpf_insn *prev_st)
+{
+ u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+ s16 prev_ld_off, prev_st_off;
+
+ /* This pair is the start pair. */
+ if (!prev_ld)
+ return true;
+
+ prev_size = BPF_LDST_BYTES(prev_ld);
+ curr_size = BPF_LDST_BYTES(ld);
+ prev_ld_base = prev_ld->src_reg;
+ prev_st_base = prev_st->dst_reg;
+ prev_ld_dst = prev_ld->dst_reg;
+ prev_ld_off = prev_ld->off;
+ prev_st_off = prev_st->off;
+
+ if (ld->dst_reg != prev_ld_dst)
+ return false;
+
+ if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
+ return false;
+
+ if (curr_size != prev_size)
+ return false;
+
+ /* There is jump to the head of this pair. */
+ if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ /* Both in ascending order. */
+ if (prev_ld_off + prev_size == ld->off &&
+ prev_st_off + prev_size == st->off)
+ return true;
+
+ /* Both in descending order. */
+ if (ld->off + curr_size == prev_ld_off &&
+ st->off + curr_size == prev_st_off)
+ return true;
+
+ return false;
+}
+
+/* Return TRUE if cross memory access happens. Cross memory access means
+ * store area is overlapping with load area that a later load might load
+ * the value from previous store, for this case we can't treat the sequence
+ * as an memory copy.
+ */
+static bool
+cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
+ struct nfp_insn_meta *head_st_meta)
+{
+ s16 head_ld_off, head_st_off, ld_off;
+
+ /* Different pointer types does not overlap. */
+ if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
+ return false;
+
+ /* load and store are both PTR_TO_PACKET, check ID info. */
+ if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
+ return true;
+
+ /* Canonicalize the offsets. Turn all of them against the original
+ * base register.
+ */
+ head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
+ head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
+ ld_off = ld->off + head_ld_meta->ptr.off;
+
+ /* Ascending order cross. */
+ if (ld_off > head_ld_off &&
+ head_ld_off < head_st_off && ld_off >= head_st_off)
+ return true;
+
+ /* Descending order cross. */
+ if (ld_off < head_ld_off &&
+ head_ld_off > head_st_off && ld_off <= head_st_off)
+ return true;
+
+ return false;
+}
+
+/* This pass try to identify the following instructoin sequences.
+ *
+ * load R, [regA + offA]
+ * store [regB + offB], R
+ * load R, [regA + offA + const_imm_A]
+ * store [regB + offB + const_imm_A], R
+ * load R, [regA + offA + 2 * const_imm_A]
+ * store [regB + offB + 2 * const_imm_A], R
+ * ...
+ *
+ * Above sequence is typically generated by compiler when lowering
+ * memcpy. NFP prefer using CPP instructions to accelerate it.
+ */
+static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *head_ld_meta = NULL;
+ struct nfp_insn_meta *head_st_meta = NULL;
+ struct nfp_insn_meta *meta1, *meta2;
+ struct bpf_insn *prev_ld = NULL;
+ struct bpf_insn *prev_st = NULL;
+ u8 count = 0;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn *ld = &meta1->insn;
+ struct bpf_insn *st = &meta2->insn;
+
+ /* Reset record status if any of the following if true:
+ * - The current insn pair is not load/store.
+ * - The load/store pair doesn't chain with previous one.
+ * - The chained load/store pair crossed with previous pair.
+ * - The chained load/store pair has a total size of memory
+ * copy beyond 128 bytes which is the maximum length a
+ * single NFP CPP command can transfer.
+ */
+ if (!curr_pair_is_memcpy(meta1, meta2) ||
+ !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
+ prev_st) ||
+ (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
+ head_st_meta) ||
+ head_ld_meta->ldst_gather_len >= 128))) {
+ if (!count)
+ continue;
+
+ if (count > 1) {
+ s16 prev_ld_off = prev_ld->off;
+ s16 prev_st_off = prev_st->off;
+ s16 head_ld_off = head_ld_meta->insn.off;
+
+ if (prev_ld_off < head_ld_off) {
+ head_ld_meta->insn.off = prev_ld_off;
+ head_st_meta->insn.off = prev_st_off;
+ head_ld_meta->ldst_gather_len =
+ -head_ld_meta->ldst_gather_len;
+ }
+
+ head_ld_meta->paired_st = &head_st_meta->insn;
+ head_st_meta->skip = true;
+ } else {
+ head_ld_meta->ldst_gather_len = 0;
+ }
+
+ /* If the chain is ended by an load/store pair then this
+ * could serve as the new head of the the next chain.
+ */
+ if (curr_pair_is_memcpy(meta1, meta2)) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ head_ld_meta->ldst_gather_len =
+ BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count = 1;
+ } else {
+ head_ld_meta = NULL;
+ head_st_meta = NULL;
+ prev_ld = NULL;
+ prev_st = NULL;
+ count = 0;
+ }
+
+ continue;
+ }
+
+ if (!head_ld_meta) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ } else {
+ meta1->skip = true;
+ meta2->skip = true;
+ }
+
+ head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count++;
+ }
+}
+
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
+ nfp_bpf_opt_ldst_gather(nfp_prog);
return 0;
}
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
+ __le64 *ustore = (__force __le64 *)prog;
int i;
- for (i = 0; i < nfp_prog->prog_len; i++) {
+ for (i = 0; i < len; i++) {
int err;
- err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ err = nfp_ustore_check_valid_no_ecc(prog[i]);
if (err)
return err;
- nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
- ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
}
return 0;
}
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+ void *prog;
+
+ prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+ if (!prog)
+ return;
+
+ nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+ memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+ kvfree(nfp_prog->prog);
+ nfp_prog->prog = prog;
+}
+
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
@@ -2258,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return -EINVAL;
}
- return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+ nfp_bpf_prog_trim(nfp_prog);
+
+ return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta;
+
+ /* Another pass to record jump information. */
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ u64 code = meta->insn.code;
+
+ if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+ BPF_OP(code) != BPF_CALL) {
+ struct nfp_insn_meta *dst_meta;
+ unsigned short dst_indx;
+
+ dst_indx = meta->n + 1 + meta->insn.off;
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+ cnt);
+
+ meta->jmp_dst = dst_meta;
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ }
+ }
+}
+
+bool nfp_bpf_supported_opcode(u8 code)
+{
+ return !!instr_cb[code];
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+ unsigned int i;
+ u64 *prog;
+ int err;
+
+ prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+ GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ enum nfp_relo_type special;
+ u32 val;
+
+ special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+ switch (special) {
+ case RELO_NONE:
+ continue;
+ case RELO_BR_REL:
+ br_add_offset(&prog[i], bv->start_off);
+ break;
+ case RELO_BR_GO_OUT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_out + bv->start_off);
+ break;
+ case RELO_BR_GO_ABORT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_abort + bv->start_off);
+ break;
+ case RELO_BR_NEXT_PKT:
+ br_set_offset(&prog[i], bv->tgt_done);
+ break;
+ case RELO_BR_HELPER:
+ val = br_get_offset(prog[i]);
+ val -= BR_OFF_RELO;
+ switch (val) {
+ case BPF_FUNC_map_lookup_elem:
+ val = nfp_prog->bpf->helpers.map_lookup;
+ break;
+ default:
+ pr_err("relocation of unknown helper %d\n",
+ val);
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ br_set_offset(&prog[i], val);
+ break;
+ case RELO_IMMED_REL:
+ immed_add_value(&prog[i], bv->start_off);
+ break;
+ }
+
+ prog[i] &= ~OP_RELO_TYPE;
+ }
+
+ err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+ if (err)
+ goto err_free_prog;
+
+ return prog;
+
+err_free_prog:
+ kfree(prog);
+ return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 13190aa09faf..322027792fe8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -34,10 +34,12 @@
#include <net/pkt_cls.h>
#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"
+#include "fw.h"
#include "main.h"
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -52,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn)
static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
int ret;
@@ -68,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running);
+ ret = nfp_net_bpf_offload(nn, prog, running, extack);
/* Stop offload if replace not possible */
if (ret && prog)
- nfp_bpf_xdp_offload(app, nn, NULL);
+ nfp_bpf_xdp_offload(app, nn, NULL, extack);
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
@@ -85,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
+ struct nfp_bpf_vnic *bv;
int err;
- nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
- if (!nn->app_priv)
+ bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+ if (!bv)
return -ENOMEM;
+ nn->app_priv = bv;
err = nfp_app_nic_vnic_alloc(app, nn, id);
if (err)
goto err_free_priv;
+ bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
return 0;
err_free_priv:
kfree(nn->app_priv);
@@ -105,8 +112,6 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
struct nfp_bpf_vnic *bv = nn->app_priv;
- if (nn->dp.bpf_offload_xdp)
- nfp_bpf_xdp_offload(app, nn, NULL);
WARN_ON(bv->tc_prog);
kfree(bv);
}
@@ -120,17 +125,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
struct nfp_bpf_vnic *bv;
int err;
- if (type != TC_SETUP_CLSBPF ||
- !tc_can_offload(nn->dp.netdev) ||
- !nfp_net_ebpf_capable(nn) ||
- cls_bpf->common.protocol != htons(ETH_P_ALL) ||
- cls_bpf->common.chain_index)
+ if (type != TC_SETUP_CLSBPF) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+ if (!nfp_net_ebpf_capable(nn)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "NFP firmware does not support eBPF offload");
return -EOPNOTSUPP;
+ }
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
- nn_err(nn, "only direct action with no legacy actions supported\n");
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only direct action with no legacy actions supported");
return -EOPNOTSUPP;
}
@@ -147,7 +164,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return 0;
}
- err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+ err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
+ cls_bpf->common.extack);
if (err)
return err;
@@ -191,23 +209,215 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
{
- return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
+ return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int max_mtu;
+
+ if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return 0;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (new_mtu > max_mtu) {
+ nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+ max_mtu);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
+ struct nfp_cpp *cpp = bpf->app->pf->cpp;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->adjust_head.flags = readl(&cap->flags);
+ bpf->adjust_head.off_min = readl(&cap->off_min);
+ bpf->adjust_head.off_max = readl(&cap->off_max);
+ bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
+ bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
+
+ if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
+ nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
+ return -EINVAL;
+ }
+ if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
+ !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
+ nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
+ memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ switch (readl(&cap->func_id)) {
+ case BPF_FUNC_map_lookup_elem:
+ bpf->helpers.map_lookup = readl(&cap->func_addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->maps.types = readl(&cap->types);
+ bpf->maps.max_maps = readl(&cap->max_maps);
+ bpf->maps.max_elems = readl(&cap->max_elems);
+ bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+ bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+ bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+ return 0;
+}
+
+static int nfp_bpf_parse_capabilities(struct nfp_app *app)
+{
+ struct nfp_cpp *cpp = app->pf->cpp;
+ struct nfp_cpp_area *area;
+ u8 __iomem *mem, *start;
+
+ mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
+ 8, &area);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
+
+ start = mem;
+ while (mem - start + 8 < nfp_cpp_area_size(area)) {
+ u8 __iomem *value;
+ u32 type, length;
+
+ type = readl(mem);
+ length = readl(mem + 4);
+ value = mem + 8;
+
+ mem += 8 + length;
+ if (mem - start > nfp_cpp_area_size(area))
+ goto err_release_free;
+
+ switch (type) {
+ case NFP_BPF_CAP_TYPE_FUNC:
+ if (nfp_bpf_parse_cap_func(app->priv, value, length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
+ if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_MAPS:
+ if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+ goto err_release_free;
+ break;
+ default:
+ nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
+ break;
+ }
+ }
+ if (mem - start != nfp_cpp_area_size(area)) {
+ nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
+ mem - start, nfp_cpp_area_size(area));
+ goto err_release_free;
+ }
+
+ nfp_cpp_area_release_free(area);
+
+ return 0;
+
+err_release_free:
+ nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
+ nfp_cpp_area_release_free(area);
+ return -EINVAL;
+}
+
+static int nfp_bpf_init(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf;
+ int err;
+
+ bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
+ if (!bpf)
+ return -ENOMEM;
+ bpf->app = app;
+ app->priv = bpf;
+
+ skb_queue_head_init(&bpf->cmsg_replies);
+ init_waitqueue_head(&bpf->cmsg_wq);
+ INIT_LIST_HEAD(&bpf->map_list);
+
+ err = nfp_bpf_parse_capabilities(app);
+ if (err)
+ goto err_free_bpf;
+
+ return 0;
+
+err_free_bpf:
+ kfree(bpf);
+ return err;
+}
+
+static void nfp_bpf_clean(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ WARN_ON(!list_empty(&bpf->map_list));
+ WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ kfree(bpf);
}
const struct nfp_app_type app_bpf = {
.id = NFP_APP_BPF_NIC,
.name = "ebpf",
+ .ctrl_cap_mask = 0,
+
+ .init = nfp_bpf_init,
+ .clean = nfp_bpf_clean,
+
+ .change_mtu = nfp_bpf_change_mtu,
+
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
+ .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
+ .bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload,
-
- .bpf_verifier_prep = nfp_bpf_verifier_prep,
- .bpf_translate = nfp_bpf_translate,
- .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 57b6043177a3..424fe8338105 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,22 +37,40 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include "../nfp_asm.h"
+#include "fw.h"
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
-#define OP_BR_SPECIAL 0xff00000000000000ULL
-
-enum br_special {
- OP_BR_NORMAL = 0,
- OP_BR_GO_OUT,
- OP_BR_GO_ABORT,
+#define OP_RELO_TYPE 0xff00000000000000ULL
+
+enum nfp_relo_type {
+ RELO_NONE = 0,
+ /* standard internal jumps */
+ RELO_BR_REL,
+ /* internal jumps to parts of the outro */
+ RELO_BR_GO_OUT,
+ RELO_BR_GO_ABORT,
+ /* external jumps to fixed addresses */
+ RELO_BR_NEXT_PKT,
+ RELO_BR_HELPER,
+ /* immediate relocation against load address */
+ RELO_IMMED_REL,
};
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO 15000
+
enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
@@ -78,6 +96,89 @@ enum pkt_vec {
#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
+/**
+ * struct nfp_app_bpf - bpf app priv structure
+ * @app: backpointer to the app
+ *
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @cmsg_replies: received cmsg replies waiting to be consumed
+ * @cmsg_wq: work queue for waiting for cmsg replies
+ *
+ * @map_list: list of offloaded maps
+ * @maps_in_use: number of currently offloaded maps
+ * @map_elems_in_use: number of elements allocated to offloaded maps
+ *
+ * @adjust_head: adjust head capability
+ * @flags: extra flags for adjust head
+ * @off_min: minimal packet offset within buffer required
+ * @off_max: maximum packet offset within buffer required
+ * @guaranteed_sub: amount of negative adjustment guaranteed possible
+ * @guaranteed_add: amount of positive adjustment guaranteed possible
+ *
+ * @maps: map capability
+ * @types: supported map types
+ * @max_maps: max number of maps supported
+ * @max_elems: max number of entries in each map
+ * @max_key_sz: max size of map key
+ * @max_val_sz: max size of map value
+ * @max_elem_sz: max size of map entry (key + value)
+ *
+ * @helpers: helper addressess for various calls
+ * @map_lookup: map lookup helper address
+ */
+struct nfp_app_bpf {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head cmsg_replies;
+ struct wait_queue_head cmsg_wq;
+
+ struct list_head map_list;
+ unsigned int maps_in_use;
+ unsigned int map_elems_in_use;
+
+ struct nfp_bpf_cap_adjust_head {
+ u32 flags;
+ int off_min;
+ int off_max;
+ int guaranteed_sub;
+ int guaranteed_add;
+ } adjust_head;
+
+ struct {
+ u32 types;
+ u32 max_maps;
+ u32 max_elems;
+ u32 max_key_sz;
+ u32 max_val_sz;
+ u32 max_elem_sz;
+ } maps;
+
+ struct {
+ u32 map_lookup;
+ } helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap: pointer to the offloaded BPF map
+ * @bpf: back pointer to bpf app private structure
+ * @tid: table id identifying map on datapath
+ * @l: link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+ struct bpf_offloaded_map *offmap;
+ struct nfp_app_bpf *bpf;
+ u32 tid;
+ struct list_head l;
+};
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -89,23 +190,47 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
#define nfp_meta_next(meta) list_next_entry(meta, l)
#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+#define FLAG_INSN_IS_JUMP_DST BIT(0)
+
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
* @ptr: pointer type for memory operations
+ * @ldst_gather_len: memcpy length gathered from load/store sequence
+ * @paired_st: the paired store insn at the head of the sequence
* @ptr_not_const: pointer is not always constant
+ * @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
+ * @flags: eBPF instruction extra optimization flags
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
struct nfp_insn_meta {
struct bpf_insn insn;
- struct bpf_reg_state ptr;
- bool ptr_not_const;
+ union {
+ struct {
+ struct bpf_reg_state ptr;
+ struct bpf_insn *paired_st;
+ s16 ldst_gather_len;
+ bool ptr_not_const;
+ };
+ struct nfp_insn_meta *jmp_dst;
+ struct {
+ u32 func_id;
+ struct bpf_reg_state arg1;
+ struct bpf_reg_state arg2;
+ bool arg2_var_off;
+ };
+ };
unsigned int off;
unsigned short n;
+ unsigned short flags;
bool skip;
instr_cb_t double_cb;
@@ -134,23 +259,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
+}
+
+static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
+}
+
/**
* struct nfp_prog - nfp BPF program
+ * @bpf: backpointer to the bpf app priv structure
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
- * @start_off: address of the first instruction in the memory
+ * @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
+ * @adjust_head_location: if program has single adjust head call - the insn no.
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
+ struct nfp_app_bpf *bpf;
+
u64 *prog;
unsigned int prog_len;
unsigned int __prog_alloc_len;
@@ -159,15 +297,15 @@ struct nfp_prog {
enum bpf_prog_type type;
- unsigned int start_off;
+ unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
- unsigned int tgt_done;
unsigned int n_translated;
int error;
unsigned int stack_depth;
+ unsigned int adjust_head_location;
struct list_head insns;
};
@@ -175,26 +313,49 @@ struct nfp_prog {
/**
* struct nfp_bpf_vnic - per-vNIC BPF priv structure
* @tc_prog: currently loaded cls_bpf program
+ * @start_off: address of the first instruction in the memory
+ * @tgt_done: jump target to get the next packet
*/
struct nfp_bpf_vnic {
struct bpf_prog *tc_prog;
+ unsigned int start_off;
+ unsigned int tgt_done;
};
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
struct nfp_net;
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog);
-
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ bool old_prog, struct netlink_ext_ack *extack);
+
+struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index bc879aeb62d4..0a7732385469 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,18 +36,23 @@
* Netronome network device driver: TC offload functions for PF and VF
*/
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -55,11 +60,10 @@ static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
+ struct nfp_insn_meta *meta;
unsigned int i;
for (i = 0; i < cnt; i++) {
- struct nfp_insn_meta *meta;
-
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return -ENOMEM;
@@ -70,6 +74,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
+ nfp_bpf_jit_prepare(nfp_prog, cnt);
+
return 0;
}
@@ -84,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
@@ -98,6 +105,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
+ nfp_prog->bpf = app->priv;
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
@@ -114,12 +122,12 @@ err_free:
return ret;
}
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
+ int err;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@@ -127,50 +135,179 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
-
- nfp_prog->stack_depth = prog->aux->stack_depth;
- nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+ nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog);
+ err = nfp_bpf_jit(nfp_prog);
+ if (err)
+ return err;
+
+ prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+ prog->aux->offload->jited_image = nfp_prog->prog;
+
+ return 0;
}
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- kfree(nfp_prog->prog);
+ kvfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
-static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ if (!key)
+ return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+ return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+ return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+ .map_get_next_key = nfp_bpf_map_get_next_key,
+ .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
+ .map_update_elem = nfp_bpf_ctrl_update_entry,
+ .map_delete_elem = nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map;
+ long long int res;
+
+ if (!bpf->maps.types)
+ return -EOPNOTSUPP;
+
+ if (offmap->map.map_flags ||
+ offmap->map.numa_node != NUMA_NO_NODE) {
+ pr_info("map flags are not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+ pr_info("map type not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->maps.max_maps == bpf->maps_in_use) {
+ pr_info("too many maps for a device\n");
+ return -ENOMEM;
+ }
+ if (bpf->maps.max_elems - bpf->map_elems_in_use <
+ offmap->map.max_entries) {
+ pr_info("map with too many elements: %u, left: %u\n",
+ offmap->map.max_entries,
+ bpf->maps.max_elems - bpf->map_elems_in_use);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz ||
+ offmap->map.value_size > bpf->maps.max_val_sz ||
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+ pr_info("elements don't fit in device constraints\n");
+ return -ENOMEM;
+ }
+
+ nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+ if (!nfp_map)
+ return -ENOMEM;
+
+ offmap->dev_priv = nfp_map;
+ nfp_map->offmap = offmap;
+ nfp_map->bpf = bpf;
+
+ res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+ if (res < 0) {
+ kfree(nfp_map);
+ return res;
+ }
+
+ nfp_map->tid = res;
+ offmap->dev_ops = &nfp_bpf_map_ops;
+ bpf->maps_in_use++;
+ bpf->map_elems_in_use += offmap->map.max_entries;
+ list_add_tail(&nfp_map->l, &bpf->map_list);
+
+ return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+ nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ list_del_init(&nfp_map->l);
+ bpf->map_elems_in_use -= offmap->map.max_entries;
+ bpf->maps_in_use--;
+ kfree(nfp_map);
+
+ return 0;
+}
+
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_bpf_verifier_prep(app, nn, bpf);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_bpf_translate(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_bpf_destroy(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ return nfp_bpf_map_free(app->priv, bpf->offmap);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
+ void *img;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
- nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
return -EOPNOTSUPP;
}
- dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+ if (IS_ERR(img))
+ return PTR_ERR(img);
+
+ dma_addr = dma_map_single(nn->dp.dev, img,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
- if (dma_mapping_error(nn->dp.dev, dma_addr))
+ if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+ kfree(img);
return -ENOMEM;
+ }
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -178,15 +315,18 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
/* Load up the JITed code */
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
if (err)
- nn_err(nn, "FW command error while loading BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while loading BPF");
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
+ kfree(img);
return err;
}
-static void nfp_net_bpf_start(struct nfp_net *nn)
+static void
+nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
{
int err;
@@ -195,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
- nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while enabling BPF");
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
@@ -210,12 +351,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
}
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog)
+ bool old_prog, struct netlink_ext_ack *extack)
{
int err;
if (prog) {
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
if (!offload)
return -EINVAL;
@@ -228,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
if (!(cap & NFP_NET_BPF_CAP_RELO)) {
- nn_err(nn, "FW does not support live reload\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW does not support live reload");
return -EBUSY;
}
}
@@ -240,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- err = nfp_net_bpf_load(nn, prog);
+ err = nfp_net_bpf_load(nn, prog, extack);
if (err)
return err;
if (!old_prog)
- nfp_net_bpf_start(nn);
+ nfp_net_bpf_start(nn, extack);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 8d43491ddd6b..479f602887e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,16 +31,18 @@
* SOFTWARE.
*/
-#define pr_fmt(fmt) "NFP net bpf: " fmt
-
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include "fw.h"
#include "main.h"
-static struct nfp_insn_meta *
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
+struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
{
@@ -68,6 +70,114 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return meta;
}
+static void
+nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct nfp_insn_meta *meta,
+ const struct bpf_reg_state *reg2)
+{
+ unsigned int location = UINT_MAX;
+ int imm;
+
+ /* Datapath usually can give us guarantees on how much adjust head
+ * can be done without the need for any checks. Optimize the simple
+ * case where there is only one adjust head by a constant.
+ */
+ if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
+ goto exit_set_location;
+ imm = reg2->var_off.value;
+ /* Translator will skip all checks, we need to guarantee min pkt len */
+ if (imm > ETH_ZLEN - ETH_HLEN)
+ goto exit_set_location;
+ if (imm > (int)bpf->adjust_head.guaranteed_add ||
+ imm < -bpf->adjust_head.guaranteed_sub)
+ goto exit_set_location;
+
+ if (nfp_prog->adjust_head_location) {
+ /* Only one call per program allowed */
+ if (nfp_prog->adjust_head_location != meta->n)
+ goto exit_set_location;
+
+ if (meta->arg2.var_off.value != imm)
+ goto exit_set_location;
+ }
+
+ location = meta->n;
+exit_set_location:
+ nfp_prog->adjust_head_location = location;
+}
+
+static int
+nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
+ struct nfp_insn_meta *meta)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
+ struct nfp_app_bpf *bpf = nfp_prog->bpf;
+ u32 func_id = meta->insn.imm;
+ s64 off, old_off;
+
+ switch (func_id) {
+ case BPF_FUNC_xdp_adjust_head:
+ if (!bpf->adjust_head.off_max) {
+ pr_vlog(env, "adjust_head not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
+ pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
+ break;
+
+ case BPF_FUNC_map_lookup_elem:
+ if (!bpf->helpers.map_lookup) {
+ pr_vlog(env, "map_lookup: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (reg2->type != PTR_TO_STACK) {
+ pr_vlog(env,
+ "map_lookup: unsupported key ptr type %d\n",
+ reg2->type);
+ return -EOPNOTSUPP;
+ }
+ if (!tnum_is_const(reg2->var_off)) {
+ pr_vlog(env, "map_lookup: variable key pointer\n");
+ return -EOPNOTSUPP;
+ }
+
+ off = reg2->var_off.value + reg2->off;
+ if (-off % 4) {
+ pr_vlog(env,
+ "map_lookup: unaligned stack pointer %lld\n",
+ -off);
+ return -EOPNOTSUPP;
+ }
+
+ /* Rest of the checks is only if we re-parse the same insn */
+ if (!meta->func_id)
+ break;
+
+ old_off = meta->arg2.var_off.value + meta->arg2.off;
+ meta->arg2_var_off |= off != old_off;
+
+ if (meta->arg1.map_ptr != reg1->map_ptr) {
+ pr_vlog(env, "map_lookup: called for different map\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ pr_vlog(env, "unsupported function id: %d\n", func_id);
+ return -EOPNOTSUPP;
+ }
+
+ meta->func_id = func_id;
+ meta->arg1 = *reg1;
+ meta->arg2 = *reg2;
+
+ return 0;
+}
+
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
struct bpf_verifier_env *env)
@@ -82,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
- pr_info("unsupported exit state: %d, var_off: %s\n",
+ pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
reg0->type, tn_buf);
return -EINVAL;
}
@@ -92,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
+ pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
reg0->type, imm);
return -EINVAL;
}
@@ -103,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
- const struct bpf_reg_state *reg)
+ const struct bpf_reg_state *reg,
+ struct bpf_verifier_env *env)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
- pr_info("variable ptr stack access\n");
+ pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
}
@@ -126,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
if (old_off % 4 == new_off % 4)
return 0;
- pr_info("stack access changed location was:%d is:%d\n",
+ pr_vlog(env, "stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
@@ -140,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_MAP_VALUE &&
reg->type != PTR_TO_PACKET) {
- pr_info("unsupported ptr type: %d\n", reg->type);
+ pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL;
}
if (reg->type == PTR_TO_STACK) {
- err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
if (err)
return err;
}
+ if (reg->type == PTR_TO_MAP_VALUE) {
+ if (is_mbpf_store(meta)) {
+ pr_vlog(env, "map writes not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
- pr_info("ptr type changed for instruction %d -> %d\n",
+ pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL;
}
@@ -171,25 +290,33 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
+ if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+ pr_vlog(env, "instruction %#02x not supported\n",
+ meta->insn.code);
+ return -EINVAL;
+ }
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
- pr_err("program uses extended registers - jit hardening?\n");
+ pr_vlog(env, "program uses extended registers - jit hardening?\n");
return -EINVAL;
}
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ return nfp_bpf_check_call(nfp_prog, env, meta);
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(nfp_prog, env);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ if (is_mbpf_load(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ if (is_mbpf_store(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f8bb87..b3567a596fc1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
return false;
}
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
u16 tmp_flags;
- int ifindex;
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ifindex = tcf_mirred_ifindex(action);
- out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ out_dev = tcf_mirred_dev(action);
if (!out_dev)
return -EOPNOTSUPP;
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
return 0;
}
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+ const struct tc_action *action)
{
struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
- return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+ struct nfp_flower_priv *priv = app->priv;
+
+ switch (tun->key.tp_dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ return NFP_FL_TUNNEL_VXLAN;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+ return NFP_FL_TUNNEL_GENEVE;
+ /* FALLTHROUGH */
+ default:
+ return NFP_FL_TUNNEL_NONE;
+ }
}
static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
- const struct tc_action *action,
- struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type)
{
- struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
- size_t act_size = sizeof(struct nfp_fl_set_vxlan);
- u32 tmp_set_vxlan_type_index = 0;
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- if (vxlan->options_len) {
- /* Do not support options e.g. vxlan gpe. */
+ if (ip_tun->options_len)
return -EOPNOTSUPP;
- }
- set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
- set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
- tmp_set_vxlan_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ tmp_set_ip_tun_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
- set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
- set_vxlan->tun_id = vxlan->key.tun_id;
- set_vxlan->tun_flags = vxlan->key.tun_flags;
- set_vxlan->ipv4_ttl = vxlan->key.ttl;
- set_vxlan->ipv4_tos = vxlan->key.tos;
+ set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+ set_tun->tun_id = ip_tun->key.tun_id;
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
return 0;
}
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
- struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ } else if (is_tcf_tunnel_set(a)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ if (*tun_type == NFP_FL_TUNNEL_NONE)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_VXLAN;
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ set_tun = (void *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
if (err)
return err;
-
- *a_len += sizeof(struct nfp_fl_set_vxlan);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9cdb6a3..baaea6f1a9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
return 0;
}
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+ struct nfp_flower_cmsg_portreify *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = cpu_to_be16(exists);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
static void
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
{
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_portreify *msg;
+ bool exists;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ if (!exists) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ return;
+ }
+
+ atomic_inc(&priv->reify_replies);
+ wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
- nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
- cmsg_hdr->version);
- goto out;
- }
-
type = cmsg_hdr->type;
switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
- nfp_flower_rx_flow_stats(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
@@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work)
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+ /* We need to deal with stats updates from HW asap */
+ nfp_flower_rx_flow_stats(app, skb);
+ dev_consume_skb_any(skb);
+ } else {
+ skb_queue_tail(&priv->cmsg_skbs, skb);
+ schedule_work(&priv->cmsg_work);
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 66070741d55f..adfe474c2cf0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
-#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
#define NFP_FLOWER_LAYER_MAC BIT(2)
#define NFP_FLOWER_LAYER_TP BIT(3)
@@ -50,8 +50,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
-#define NFP_FLOWER_LAYER_ETHER BIT(3)
-#define NFP_FLOWER_LAYER_ARP BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -108,6 +107,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_VXLAN = 2,
+ NFP_FL_TUNNEL_GENEVE = 4,
};
struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
__be16 reserved;
};
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- * 3 2 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | key_layers | mask_id | reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
- u8 nfp_flow_key_layer;
- u8 mask_id;
- u16 reserved;
-};
-
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_act_head head;
__be16 reserved;
- __be64 tun_id;
+ __be64 tun_id __packed;
__be32 tun_type_index;
- __be16 tun_flags;
- u8 ipv4_ttl;
- u8 ipv4_tos;
- __be32 extra[2];
-} __packed;
+ __be32 extra[3];
+};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
* NOTE: | TCI |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
u8 nfp_flow_key_layer;
u8 mask_id;
__be16 tci;
};
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | nfp_flow_key_layer2 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+ __be32 nfp_flow_key_layer2;
+};
+
/* Port details (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | tun_flags | tos | ttl |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | gpe_flags | Reserved | Next Protocol |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be16 tun_flags;
- u8 tos;
- u8 ttl;
- u8 gpe_flags;
- u8 reserved[2];
- u8 nxt_proto;
+ __be32 reserved[2];
__be32 tun_id;
};
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+ __be32 portnum;
+ u16 reserved;
+ __be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
unsigned int nbi, unsigned int nbi_port,
unsigned int phys_port);
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c0d2d3..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
if (port >= reprs->num_reprs)
return NULL;
- return reprs->reprs[port];
+ return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+ bool exists)
+{
+ struct nfp_reprs *reprs;
+ int i, err, count = 0;
+
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (!reprs)
+ return 0;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ err = nfp_flower_cmsg_portreify(repr, exists);
+ if (err)
+ return err;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (!tot_repl)
+ return 0;
+
+ lockdep_assert_held(&app->pf->lock);
+ err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ msecs_to_jiffies(10));
+ if (err <= 0) {
+ nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
if (err)
return err;
- netif_carrier_on(repr->netdev);
netif_tx_wake_all_queues(repr->netdev);
return 0;
@@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
static int
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
{
- netif_carrier_off(repr->netdev);
netif_tx_disable(repr->netdev);
return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
netdev_priv(netdev));
}
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
+ int err;
+
+ atomic_set(replies, 0);
+ err = nfp_flower_cmsg_portreify(repr, false);
+ if (err) {
+ nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+ return;
+ }
+
+ nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
enum nfp_port_type port_type;
struct nfp_reprs *reprs;
+ int i, err, reify_cnt;
const u8 queue = 0;
- int i, err;
port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
NFP_PORT_VF_PORT;
@@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
return -ENOMEM;
for (i = 0; i < cnt; i++) {
+ struct net_device *repr;
struct nfp_port *port;
u32 port_id;
- reprs->reprs[i] = nfp_repr_alloc(app);
- if (!reprs->reprs[i]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
- port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ port = nfp_port_alloc(app, port_type, repr);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
- eth_hw_addr_random(reprs->reprs[i]);
+ eth_hw_addr_random(repr);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
- err = nfp_repr_init(app, reprs->reprs[i],
+ err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
- reprs->reprs[i]->name);
+ repr->name);
}
nfp_app_reprs_set(app, repr_type, reprs);
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
return err;
}
@@ -233,10 +317,11 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ atomic_t *replies = &priv->reify_replies;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
+ int err, reify_cnt;
unsigned int i;
- int err;
ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
if (!ctrl_skb)
@@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
+ struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
- reprs->reprs[phys_port] = nfp_repr_alloc(app);
- if (!reprs->reprs[phys_port]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
- port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
- reprs->reprs[phys_port]);
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
@@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean;
}
- SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
- err = nfp_repr_init(app, reprs->reprs[phys_port],
+ err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
- phys_port, reprs->reprs[phys_port]->name);
+ phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- /* The MAC_REPR control message should be sent after the MAC
+ /* The REIFY/MAC_REPR control messages should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
* because the firmware may respond with control messages for the
* MAC representors, f.e. to provide the driver with information
* about their state, and without registration the driver will drop
* any such messages.
*/
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
nfp_ctrl_tx(app->ctrl, ctrl_skb);
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
@@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
struct nfp_flower_priv *app_priv;
- u64 version;
+ u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app)
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+ init_waitqueue_head(&app_priv->reify_wait_queue);
err = nfp_flower_metadata_init(app);
if (err)
goto err_free_app_priv;
+ /* Extract the extra features supported by the firmware. */
+ features = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_extra_features", &err);
+ if (err)
+ app_priv->flower_ext_feats = 0;
+ else
+ app_priv->flower_ext_feats = features;
+
return 0;
err_free_app_priv:
@@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
+
+ .ctrl_cap_mask = ~0U,
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
@@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = {
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
+ .repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
.repr_open = nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5ae6e0..332ff0fdc038 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
+#include "cmsg.h"
+
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
#define NFP_FL_MASK_ID_LOCATION 1
#define NFP_FL_VXLAN_PORT 4789
+#define NFP_FL_GENEVE_PORT 6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE BIT(0)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
+ * @flower_ext_feats: Bitmap of extra features the HW supports
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
* @nfp_mac_off_count: Number of MACs in address list
* @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @reify_replies: atomically stores the number of replies received
+ * from firmware for repr reify
+ * @reify_wait_queue: wait queue for repr reify response counting
*/
struct nfp_flower_priv {
struct nfp_app *app;
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
+ u64 flower_ext_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
+ atomic_t reify_replies;
+ wait_queue_head_t reify_wait_queue;
};
struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4f0e22..37c2ecae2a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+ memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
}
static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = 0;
- frame->reserved = 0;
+ frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
}
static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
- /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
- memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- vxlan_ips =
+ tun_ips =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target);
- frame->ip_src = vxlan_ips->src;
- frame->ip_dst = vxlan_ips->dst;
- *tun_dst = vxlan_ips->dst;
+ frame->ip_src = tun_ips->src;
+ frame->ip_dst = tun_ips->dst;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type)
{
- enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- __be32 tun_dst, tun_dst_mask = 0;
struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
- tun_type = NFP_FL_TUNNEL_VXLAN;
-
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
- flow, key_ls->key_layer, true);
- ext += sizeof(struct nfp_flower_meta_two);
- msk += sizeof(struct nfp_flower_meta_two);
-
- /* Populate Exact Port data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
- if (err)
- return err;
-
- /* Populate Mask Port Data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
- if (err)
- return err;
-
- ext += sizeof(struct nfp_flower_in_port);
- msk += sizeof(struct nfp_flower_in_port);
- } else {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
- key_ls->key_layer);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
- key_ls->key_layer);
- ext += sizeof(struct nfp_flower_meta_one);
- msk += sizeof(struct nfp_flower_meta_one);
- }
- if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
- /* Additional Metadata Fields.
- * Currently unsupported.
- */
- return -EOPNOTSUPP;
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ msk += sizeof(struct nfp_flower_meta_tci);
+
+ /* Populate Extended Metadata if Required. */
+ if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+ key_ls->key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_ls->key_layer_two);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ msk += sizeof(struct nfp_flower_ext_meta);
}
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false, tun_type);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true, tun_type);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
/* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ __be32 tun_dst;
+
/* Populate Exact VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
- flow, false, &tun_dst);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
/* Populate Mask VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
- flow, true, &tun_dst_mask);
- ext += sizeof(struct nfp_flower_vxlan);
- msk += sizeof(struct nfp_flower_vxlan);
+ nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f55dce..08c4c6dc5f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress)
+ bool egress,
+ enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
+ struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
return -EOPNOTSUPP;
key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
+ key_layer = NFP_FLOWER_LAYER_PORT;
+ key_size = sizeof(struct nfp_flower_meta_tci) +
+ sizeof(struct nfp_flower_in_port);
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
- if (mask_enc_ports->dst != cpu_to_be16(~0) ||
- enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_vxlan);
+ switch (enc_ports->dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ return -EOPNOTSUPP;
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+ err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ &tun_type);
if (err)
goto err_free_key_ls;
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+ tun_type);
if (err)
goto err_destroy_flow;
@@ -457,8 +483,7 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower, bool egress)
{
- if (!eth_proto_is_802_3(flower->common.protocol) ||
- flower->common.chain_index)
+ if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
@@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 955a9f44d244..6aedef0ad433 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -32,6 +32,8 @@
*/
#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
}
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
+{
+ return rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+}
+
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs)
{
struct nfp_reprs *old;
- old = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ old = nfp_reprs_get_locked(app, type);
rcu_assign_pointer(app->reprs[type], reprs);
return old;
@@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 0e5e0305ad1c..437964afa8ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -43,6 +43,7 @@
struct bpf_prog;
struct net_device;
struct netdev_bpf;
+struct netlink_ext_ack;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@@ -66,6 +67,9 @@ extern const struct nfp_app_type app_flower;
* struct nfp_app_type - application definition
* @id: application ID
* @name: application name
+ * @ctrl_cap_mask: ctrl vNIC capability mask, allows disabling features like
+ * IRQMOD which are on by default but counter-productive for
+ * control messages which are often latency-sensitive
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
@@ -77,18 +81,20 @@ extern const struct nfp_app_type app_flower;
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
* @repr_init: representor about to be registered
+ * @repr_preclean: representor about to unregistered, executed before app
+ * reference to the it is removed
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
+ * @change_mtu: MTU change on a netdev has been requested (veto-only, change
+ * is not guaranteed to be committed)
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
+ * @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
- * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
- * @bpf_translate: translate call for dev-specific BPF programs
- * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -98,6 +104,7 @@ struct nfp_app_type {
enum nfp_app_id id;
const char *name;
+ u32 ctrl_cap_mask;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
@@ -112,11 +119,15 @@ struct nfp_app_type {
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+ int new_mtu);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -125,14 +136,11 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
- int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -163,6 +171,7 @@ struct nfp_app {
void *priv;
};
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app)
@@ -226,12 +235,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
}
static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ if (app->type->repr_preclean)
+ app->type->repr_preclean(app, netdev);
+}
+
+static inline void
nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
{
if (app->type->repr_clean)
app->type->repr_clean(app, netdev);
}
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ if (!app || !app->type->change_mtu)
+ return 0;
+ return app->type->change_mtu(app, netdev, new_mtu);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -293,39 +317,29 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
return app->type->setup_tc(app, netdev, type, type_data);
}
-static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
- if (!app || !app->type->xdp_offload)
- return -EOPNOTSUPP;
- return app->type->xdp_offload(app, nn, prog);
+ if (!app || !app->type->bpf)
+ return -EINVAL;
+ return app->type->bpf(app, nn, bpf);
}
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- if (!app || !app->type->bpf_verifier_prep)
+ if (!app || !app->type->xdp_offload)
return -EOPNOTSUPP;
- return app->type->bpf_verifier_prep(app, nn, bpf);
+ return app->type->xdp_offload(app, nn, prog, extack);
}
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
- if (!app || !app->type->bpf_translate)
- return -EOPNOTSUPP;
- return app->type->bpf_translate(app, nn, prog);
-}
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
-{
- if (!app || !app->type->bpf_destroy)
- return -EOPNOTSUPP;
- return app->type->bpf_destroy(app, nn, prog);
+ return __nfp_ctrl_tx(app->ctrl, skb);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
@@ -378,6 +392,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 830f6de25f47..3f6952b66a49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -41,6 +41,7 @@
const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_WRITE32_SWAP] = { 0x02, 0x5f },
[CMD_TGT_READ8] = { 0x01, 0x43 },
[CMD_TGT_READ32] = { 0x00, 0x5c },
[CMD_TGT_READ32_LE] = { 0x01, 0x5c },
@@ -49,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
};
+static bool unreg_is_imm(u16 reg)
+{
+ return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
+u16 br_get_offset(u64 instr)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+ addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+ return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+ addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+ u16 addr;
+
+ addr = br_get_offset(*instr);
+ br_set_offset(instr, addr + offset);
+}
+
+static bool immed_can_modify(u64 instr)
+{
+ if (FIELD_GET(OP_IMMED_INV, instr) ||
+ FIELD_GET(OP_IMMED_SHIFT, instr) ||
+ FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+ pr_err("Can't decode/encode immed!\n");
+ return false;
+ }
+ return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+ u16 reg;
+
+ if (!immed_can_modify(instr))
+ return 0;
+
+ reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+ if (!unreg_is_imm(reg))
+ reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+ return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+ if (!immed_can_modify(*instr))
+ return;
+
+ if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+ *instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+ } else {
+ *instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+ }
+
+ *instr &= ~OP_IMMED_IMM;
+ *instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+ u16 val;
+
+ if (!immed_can_modify(*instr))
+ return;
+
+ val = immed_get_value(*instr);
+ immed_set_value(instr, val + offset);
+}
+
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{
bool lm_id, lm_dec = false;
@@ -120,7 +209,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_unreg(dst, true);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
@@ -200,7 +290,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 74d0c11ab2f9..5f9291db98e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -77,9 +77,11 @@
enum br_mask {
BR_BEQ = 0x00,
BR_BNE = 0x01,
+ BR_BMI = 0x02,
BR_BHS = 0x04,
BR_BLO = 0x05,
BR_BGE = 0x08,
+ BR_BLT = 0x09,
BR_UNC = 0x18,
};
@@ -92,6 +94,10 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
#define OP_BBYTE_BASE 0x0c800000000ULL
#define OP_BB_A_SRC 0x000000000ffULL
#define OP_BB_BYTE 0x00000000300ULL
@@ -132,6 +138,10 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
#define OP_SHF_BASE 0x08000000000ULL
#define OP_SHF_A_SRC 0x000000000ffULL
#define OP_SHF_SC 0x00000000300ULL
@@ -175,6 +185,7 @@ enum alu_op {
ALU_OP_NONE = 0x00,
ALU_OP_ADD = 0x01,
ALU_OP_NOT = 0x04,
+ ALU_OP_ADD_2B = 0x05,
ALU_OP_AND = 0x08,
ALU_OP_SUB_C = 0x0d,
ALU_OP_ADD_C = 0x11,
@@ -209,6 +220,7 @@ enum alu_dst_ab {
#define OP_CMD_CNT 0x0000e000000ULL
#define OP_CMD_SIG 0x000f0000000ULL
#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_INDIR 0x20000000000ULL
#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
@@ -219,6 +231,7 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_WRITE32_SWAP,
CMD_TGT_READ32,
CMD_TGT_READ32_LE,
CMD_TGT_READ32_SWAP,
@@ -240,6 +253,9 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
+#define CMD_OVE_LEN BIT(7)
+#define CMD_OV_LEN GENMASK(12, 8)
+
#define OP_LCSR_BASE 0x0fc00000000ULL
#define OP_LCSR_A_SRC 0x000000003ffULL
#define OP_LCSR_B_SRC 0x000000ffc00ULL
@@ -257,6 +273,7 @@ enum lcsr_wr_src {
#define OP_CARB_BASE 0x0e000000000ULL
#define OP_CARB_OR 0x00000010000ULL
+#define NFP_CSR_CTX_PTR 0x20
#define NFP_CSR_ACT_LM_ADDR0 0x64
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
@@ -377,4 +394,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
int nfp_ustore_check_valid_no_ecc(u64 insn);
u64 nfp_ustore_calc_ecc_insn(u64 insn);
+#define NFP_IND_ME_REFL_WR_SIG_INIT 3
+#define NFP_IND_ME_CTX_PTR_BASE_MASK GENMASK(9, 0)
+#define NFP_IND_NUM_CONTEXTS 8
+
+static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
+{
+ return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
+}
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 6c9f29c2e975..eb0fc614673d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -152,18 +152,8 @@ out:
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- if (!pf->app) {
- ret = -EBUSY;
- goto out;
- }
- ret = nfp_app_eswitch_mode_get(pf->app, mode);
-out:
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_get(pf->app, mode);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 35eaccbece36..cc570bb6563c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "nfpcore/nfp.h"
@@ -498,17 +499,16 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_hwinfo_free;
- err = devlink_register(devlink, &pdev->dev);
- if (err)
- goto err_hwinfo_free;
-
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_devlink_unreg;
+ goto err_hwinfo_free;
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
+ pf->dump_flag = NFP_DUMP_NSP_DIAG;
+ pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
+
err = nfp_pcie_sriov_read_nfd_limit(pf);
if (err)
goto err_fw_unload;
@@ -518,6 +518,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"Error: %d VFs already enabled, but loaded FW can only support %d\n",
pf->num_vfs, pf->limit_vfs);
+ err = -EINVAL;
goto err_fw_unload;
}
@@ -544,8 +545,7 @@ err_fw_unload:
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
kfree(pf->nspi);
-err_devlink_unreg:
- devlink_unregister(devlink);
+ vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
@@ -566,19 +566,15 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- struct devlink *devlink;
nfp_hwmon_unregister(pf);
- devlink = priv_to_devlink(pf);
-
- nfp_net_pci_remove(pf);
-
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
- devlink_unregister(devlink);
+ nfp_net_pci_remove(pf);
+ vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
if (pf->fw_loaded)
@@ -592,7 +588,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
- devlink_free(devlink);
+ devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index be0ee59f2eb9..add46e28212b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -39,6 +39,7 @@
#ifndef NFP_MAIN_H
#define NFP_MAIN_H
+#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
@@ -62,6 +63,17 @@ struct nfp_port;
struct nfp_rtsym_table;
/**
+ * struct nfp_dumpspec - NFP FW dump specification structure
+ * @size: Size of the data
+ * @data: Sequence of TLVs, each being an instruction to dump some data
+ * from FW
+ */
+struct nfp_dumpspec {
+ u32 size;
+ u8 data[0];
+};
+
+/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
@@ -83,6 +95,9 @@ struct nfp_rtsym_table;
* @mip: MIP handle
* @rtbl: RTsym table
* @hwinfo: HWInfo table
+ * @dumpspec: Debug dump specification
+ * @dump_flag: Store dump flag between set_dump and get_dump_flag
+ * @dump_len: Store dump length between set_dump and get_dump_flag
* @eth_tbl: NSP ETH table
* @nspi: NSP identification info
* @hwmon_dev: pointer to hwmon device
@@ -124,6 +139,9 @@ struct nfp_pf {
const struct nfp_mip *mip;
struct nfp_rtsym_table *rtbl;
struct nfp_hwinfo *hwinfo;
+ struct nfp_dumpspec *dumpspec;
+ u32 dump_flag;
+ u32 dump_len;
struct nfp_eth_table *eth_tbl;
struct nfp_nsp_identify *nspi;
@@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+enum nfp_dump_diag {
+ NFP_DUMP_NSP_DIAG = 0,
+};
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl);
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag);
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest);
+
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 7f9857c276b1..d88eda9707e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
+ unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
dma_addr_t dma;
- unsigned int size;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned;
/**
@@ -548,6 +551,8 @@ struct nfp_net_dp {
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
+ * @stride_rx: Queue controller RX queue spacing
+ * @stride_tx: Queue controller TX queue spacing
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -573,6 +578,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @tlv_caps: Parsed TLV capabilities
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -639,6 +645,8 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+ struct nfp_net_tlv_caps tlv_caps;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -834,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+ __acquires(&nn->r_vecs[0].lock)
+{
+ spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+ __releases(&nn->r_vecs[0].lock)
+{
+ spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..c0fd351c86b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*/
static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
+ u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+ if (!nfp_net_has_mbox(&nn->tlv_caps)) {
+ nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
+ return -EIO;
+ }
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
@@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return ret;
}
- return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
/* Interrupt configuration and handling
@@ -568,6 +574,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
return err;
}
nn_writeb(nn, ctrl_offset, entry->entry);
+ nfp_net_irq_unmask(nn, entry->entry);
return 0;
}
@@ -582,6 +589,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
unsigned int vector_idx)
{
nn_writeb(nn, ctrl_offset, 0xff);
+ nn_pci_flush(nn);
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
@@ -1608,11 +1616,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
+ struct xdp_buff xdp;
int idx;
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1703,7 +1713,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
dp->bpf_offload_xdp) && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- struct xdp_buff xdp;
int act;
xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -1917,6 +1926,13 @@ err_free:
return false;
}
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
@@ -2252,6 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -2275,7 +2293,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz;
+ int sz, err;
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx);
+ if (err < 0)
+ return err;
+ }
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2439,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
@@ -2850,6 +2875,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl = nn->dp.ctrl;
+ if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3034,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp;
+ int err;
+
+ err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+ if (err)
+ return err;
dp = nfp_net_clone_dp(nn);
if (!dp)
@@ -3055,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
}
@@ -3072,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
@@ -3366,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+ err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
if (err && flags & XDP_FLAGS_HW_MODE)
return err;
@@ -3392,17 +3429,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
if (nn->dp.bpf_offload_xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
+ xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
return 0;
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_app_bpf_translate(nn->app, nn,
- xdp->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_app_bpf_destroy(nn->app, nn,
- xdp->offload.prog);
default:
- return -EINVAL;
+ return nfp_app_bpf(nn->app, nn, xdp);
}
}
@@ -3561,9 +3591,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
-
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3729,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nfp_net_set_ethtool_ops(netdev);
}
-/**
- * nfp_net_init() - Initialise/finalise the nfp_net structure
- * @nn: NFP Net device structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_init(struct nfp_net *nn)
+static int nfp_net_read_caps(struct nfp_net *nn)
{
- int err;
-
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3773,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* For control vNICs mask out the capabilities app doesn't want. */
+ if (!nn->dp.netdev)
+ nn->cap &= nn->app->type->ctrl_cap_mask;
+
+ return 0;
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ err = nfp_net_read_caps(nn);
+ if (err)
+ return err;
+
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
nn->dp.mtu = nn->max_mtu;
@@ -3789,8 +3829,6 @@ int nfp_net_init(struct nfp_net *nn)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
- if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
@@ -3798,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ return err;
+
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
new file mode 100644
index 000000000000..ffb402746ad4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ caps->me_freq_mhz = 1200;
+ caps->mbox_off = NFP_NET_CFG_MBOX_BASE;
+ caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
+}
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps)
+{
+ u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE;
+ u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ;
+ u32 hdr;
+
+ nfp_net_tlv_caps_reset(caps);
+
+ hdr = readl(data);
+ if (!hdr)
+ return 0;
+
+ while (true) {
+ unsigned int length, offset;
+ u32 hdr = readl(data);
+
+ length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
+ offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
+ dev_err(dev, "TLV size not multiple of %u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, length);
+ return -EINVAL;
+ }
+ if (data + length > end) {
+ dev_err(dev, "oversized TLV offset:%u len:%u\n",
+ offset, length);
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) {
+ case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ dev_err(dev, "NULL TLV at offset:%u\n", offset);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_RESERVED:
+ break;
+ case NFP_NET_CFG_TLV_TYPE_END:
+ if (!length)
+ return 0;
+
+ dev_err(dev, "END TLV should be empty, has len:%d\n",
+ length);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ if (length != 4) {
+ dev_err(dev,
+ "ME FREQ TLV should be 4B, is %dB\n",
+ length);
+ return -EINVAL;
+ }
+
+ caps->me_freq_mhz = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX:
+ if (!length) {
+ caps->mbox_off = 0;
+ caps->mbox_len = 0;
+ } else {
+ caps->mbox_off = data - ctrl_mem;
+ caps->mbox_len = length;
+ }
+ break;
+ default:
+ if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
+ break;
+
+ dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n",
+ FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+ offset, length);
+ return -EINVAL;
+ }
+
+ data += length;
+ if (data + 4 > end) {
+ dev_err(dev, "reached end of BAR without END TLV\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Not reached */
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..eeecef2caac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -43,9 +43,7 @@
#ifndef _NFP_NET_CTRL_H_
#define _NFP_NET_CTRL_H_
-/* IMPORTANT: This header file is shared with the FW,
- * no OS specific constructs, please!
- */
+#include <linux/types.h>
/**
* Configuration BAR size.
@@ -91,23 +89,24 @@
#define NFP_NET_RSS_IPV6_EX_UDP 9
/**
- * @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_RXR_MAX: Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX: Maximum number of TX rings
+ * %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
/**
* Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL: Global control
- * @NFP_NET_CFG_UPDATE: Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU: Set MTU size
- * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR: MAC address
+ * %NFP_NET_CFG_CTRL: Global control
+ * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU: Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR: MAC address
*
* TODO:
* - define Error details in UPDATE
@@ -176,14 +175,14 @@
/**
* Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION: Firmware version number
- * @NFP_NET_CFG_STS: Status
- * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
- * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION: Firmware version number
+ * %NFP_NET_CFG_STS: Status
+ * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
* TODO:
* - define more STS bits
@@ -228,31 +227,37 @@
/**
* RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
- * @NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
/**
+ * TLV area start
+ * %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
+ */
+#define NFP_NET_CFG_TLV_BASE 0x0058
+
+/**
* VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
* BPF section
- * @NFP_NET_CFG_BPF_ABI: BPF ABI version
- * @NFP_NET_CFG_BPF_CAP: BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2
@@ -278,9 +283,9 @@
/**
* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG: RSS configuration word
- * @NFP_NET_CFG_RSS_KEY: RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL: RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG: RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/
#define NFP_NET_CFG_RSS_BASE 0x0100
#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
@@ -305,13 +310,13 @@
/**
* TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/
#define NFP_NET_CFG_TXR_BASE 0x0200
#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +330,12 @@
/**
* RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/
#define NFP_NET_CFG_RXR_BASE 0x0800
#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +348,7 @@
/**
* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
* corresponding entry is enabled. If the FW generates an interrupt,
* it writes a cause into the corresponding field. This also masks
@@ -393,8 +398,8 @@
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -408,24 +413,105 @@
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
-#define NFP_NET_CFG_MBOX_CMD 0x1800
-#define NFP_NET_CFG_MBOX_RET 0x1804
-#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
+#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
+#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
+
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
/**
* VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
*/
-#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_SIMPLE_VAL
#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/**
+ * TLV capabilities
+ * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
+ * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
+ * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
+ * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
+ * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV
+ *
+ * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
+ * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs
+ * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may
+ * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs
+ * don't conflict with other features which allocate space beyond
+ * %NFP_NET_CFG_TLV_BASE. %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap
+ * space used by such features.
+ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
+ */
+#define NFP_NET_CFG_TLV_TYPE 0x00
+#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
+#define NFP_NET_CFG_TLV_LENGTH 0x02
+#define NFP_NET_CFG_TLV_LENGTH_INC 4
+#define NFP_NET_CFG_TLV_VALUE 0x04
+
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
+#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
+#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
+
+/**
+ * Capability TLV types
+ *
+ * %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ * Special TLV type to catch bugs, should never be encountered. Drivers should
+ * treat encountering this type as error and refuse to probe.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_RESERVED:
+ * Reserved space, may contain legacy fixed-offset fields, or be used for
+ * padding. The use of this type should be otherwise avoided.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_END:
+ * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing
+ * further TLVs when encountered.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ * Single word, ME frequency in MHz as used in calculation for
+ * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX:
+ * Variable, mailbox area. Overwrites the default location which is
+ * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ */
+#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
+#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
+#define NFP_NET_CFG_TLV_TYPE_END 2
+#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3
+#define NFP_NET_CFG_TLV_TYPE_MBOX 4
+
+struct device;
+
+/**
+ * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+ * @me_freq_mhz: ME clock_freq (MHz)
+ * @mbox_off: vNIC mailbox area offset
+ * @mbox_len: vNIC mailbox area length
+ */
+struct nfp_net_tlv_caps {
+ u32 me_freq_mhz;
+ unsigned int mbox_off;
+ unsigned int mbox_len;
+};
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps);
+
+static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
+{
+ return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
+}
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
new file mode 100644
index 000000000000..bb8ed460086e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "nfp_asm.h"
+#include "nfp_main.h"
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp6000/nfp6000.h"
+
+#define NFP_DUMP_SPEC_RTSYM "_abi_dump_spec"
+
+#define ALIGN8(x) ALIGN(x, 8)
+
+enum nfp_dumpspec_type {
+ NFP_DUMPSPEC_TYPE_CPP_CSR = 0,
+ NFP_DUMPSPEC_TYPE_XPB_CSR = 1,
+ NFP_DUMPSPEC_TYPE_ME_CSR = 2,
+ NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3,
+ NFP_DUMPSPEC_TYPE_RTSYM = 4,
+ NFP_DUMPSPEC_TYPE_HWINFO = 5,
+ NFP_DUMPSPEC_TYPE_FWNAME = 6,
+ NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7,
+ NFP_DUMPSPEC_TYPE_PROLOG = 10000,
+ NFP_DUMPSPEC_TYPE_ERROR = 10001,
+};
+
+/* The following structs must be carefully aligned so that they can be used to
+ * interpret the binary dumpspec and populate the dump data in a deterministic
+ * way.
+ */
+
+/* generic type plus length */
+struct nfp_dump_tl {
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ char data[0];
+};
+
+/* NFP CPP parameters */
+struct nfp_dumpspec_cpp_isl_id {
+ u8 target;
+ u8 action;
+ u8 token;
+ u8 island;
+};
+
+struct nfp_dump_common_cpp {
+ struct nfp_dumpspec_cpp_isl_id cpp_id;
+ __be32 offset; /* address to start dump */
+ __be32 dump_length; /* total bytes to dump, aligned to reg size */
+};
+
+/* CSR dumpables */
+struct nfp_dumpspec_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+};
+
+struct nfp_dumpspec_rtsym {
+ struct nfp_dump_tl tl;
+ char rtsym[0];
+};
+
+/* header for register dumpable */
+struct nfp_dump_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+ __be32 error; /* error code encountered while reading */
+ __be32 error_offset; /* offset being read when error occurred */
+};
+
+struct nfp_dump_rtsym {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 error; /* error code encountered while reading */
+ u8 padded_name_length; /* pad so data starts at 8 byte boundary */
+ char rtsym[0];
+ /* after padded_name_length, there is dump_length data */
+};
+
+struct nfp_dump_prolog {
+ struct nfp_dump_tl tl;
+ __be32 dump_level;
+};
+
+struct nfp_dump_error {
+ struct nfp_dump_tl tl;
+ __be32 error;
+ char padding[4];
+ char spec[0];
+};
+
+/* to track state through debug size calculation TLV traversal */
+struct nfp_level_size {
+ __be32 requested_level; /* input */
+ u32 total_size; /* output */
+};
+
+/* to track state during debug dump creation TLV traversal */
+struct nfp_dump_state {
+ __be32 requested_level; /* input param */
+ u32 dumped_size; /* adds up to size of dumped data */
+ u32 buf_size; /* size of buffer pointer to by p */
+ void *p; /* current point in dump buffer */
+};
+
+typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl,
+ void *param);
+
+static int
+nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param,
+ nfp_tlv_visit tlv_visit)
+{
+ long long remaining = data_length;
+ struct nfp_dump_tl *tl;
+ u32 total_tlv_size;
+ void *p = data;
+ int err;
+
+ while (remaining >= sizeof(*tl)) {
+ tl = p;
+ if (!tl->type && !tl->length)
+ break;
+
+ if (be32_to_cpu(tl->length) > remaining - sizeof(*tl))
+ return -EINVAL;
+
+ total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length);
+
+ /* Spec TLVs should be aligned to 4 bytes. */
+ if (total_tlv_size % 4 != 0)
+ return -EINVAL;
+
+ p += total_tlv_size;
+ remaining -= total_tlv_size;
+ err = tlv_visit(pf, tl, param);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token,
+ cpp_id->island);
+}
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
+{
+ const struct nfp_rtsym *specsym;
+ struct nfp_dumpspec *dumpspec;
+ int bytes_read;
+ u32 cpp_id;
+
+ specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
+ if (!specsym)
+ return NULL;
+
+ /* expected size of this buffer is in the order of tens of kilobytes */
+ dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+ if (!dumpspec)
+ return NULL;
+
+ dumpspec->size = specsym->size;
+
+ cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
+ specsym->domain);
+
+ bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
+ specsym->size);
+ if (bytes_read != specsym->size) {
+ vfree(dumpspec);
+ nfp_warn(cpp, "Debug dump specification read failed.\n");
+ return NULL;
+ }
+
+ return dumpspec;
+}
+
+static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec)
+{
+ return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) +
+ be32_to_cpu(spec->length));
+}
+
+static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf)
+{
+ u32 fwname_len = strlen(nfp_mip_name(pf->mip));
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1);
+}
+
+static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ u32 tl_len, key_len;
+ const char *value;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ value = nfp_hwinfo_lookup(pf->hwinfo, spec->data);
+ if (!value)
+ return nfp_dump_error_tlv_size(spec);
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2);
+}
+
+static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr)
+{
+ u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl);
+ u32 available_sz = be32_to_cpu(spec_csr->tl.length);
+ u32 reg_width;
+
+ if (available_sz < required_read_sz)
+ return false;
+
+ reg_width = be32_to_cpu(spec_csr->register_width);
+
+ return reg_width == 32 || reg_width == 64;
+}
+
+static int
+nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ u32 size;
+
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec_rtsym->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv_size(spec);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ size = sizeof(sym->addr);
+ else
+ size = sym->size;
+
+ return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
+ ALIGN8(size);
+}
+
+static int
+nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_csr *spec_csr;
+ u32 *size = param;
+ u32 hwinfo_size;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ *size += nfp_calc_fwname_tlv_size(pf);
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS);
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ *size += nfp_calc_rtsym_dump_sz(pf, tl);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ *size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ *size += nfp_calc_hwinfo_field_sz(pf, tl);
+ break;
+ default:
+ *size += nfp_dump_error_tlv_size(tl);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_level_size *lev_sz = param;
+
+ if (dump_level->type != lev_sz->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length),
+ &lev_sz->total_size, nfp_add_tlv_size);
+}
+
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag)
+{
+ struct nfp_level_size lev_sz;
+ int err;
+
+ lev_sz.requested_level = cpu_to_be32(flag);
+ lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog));
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz,
+ nfp_calc_specific_level_size);
+ if (err)
+ return err;
+
+ return lev_sz.total_size;
+}
+
+static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *tl = dump->p;
+
+ if (total_tlv_sz > dump->buf_size)
+ return -ENOSPC;
+
+ if (dump->buf_size - total_tlv_sz < dump->dumped_size)
+ return -ENOSPC;
+
+ tl->type = cpu_to_be32(type);
+ tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl));
+
+ dump->dumped_size += total_tlv_sz;
+ dump->p += total_tlv_sz;
+
+ return 0;
+}
+
+static int
+nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_error *dump_header = dump->p;
+ u32 total_spec_size, total_size;
+ int err;
+
+ total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length);
+ total_size = ALIGN8(sizeof(*dump_header) + total_spec_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->error = cpu_to_be32(error);
+ memcpy(dump_header->spec, spec, total_spec_size);
+
+ return 0;
+}
+
+static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 fwname_len, total_size;
+ const char *fwname;
+ int err;
+
+ fwname = nfp_mip_name(pf->mip);
+ fwname_len = strlen(fwname);
+ total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, fwname, fwname_len);
+
+ return 0;
+}
+
+static int
+nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 hwinfo_size, total_size;
+ char *hwinfo;
+ int err;
+
+ hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo);
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, hwinfo, hwinfo_size);
+
+ return 0;
+}
+
+static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 tl_len, key_len, val_len;
+ const char *key, *value;
+ u32 total_size;
+ int err;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(spec, -EINVAL, dump);
+
+ key = spec->data;
+ value = nfp_hwinfo_lookup(pf->hwinfo, key);
+ if (!value)
+ return nfp_dump_error_tlv(spec, -ENOENT, dump);
+
+ val_len = strlen(value);
+ total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2);
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, key, key_len + 1);
+ memcpy(dump_header->data + key_len + 1, value, val_len + 1);
+
+ return 0;
+}
+
+static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB &&
+ cpp_id->action == 0 && cpp_id->token == 0;
+}
+
+static int
+nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ int bytes_read;
+ void *dest;
+ u32 cpp_id;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ total_size = header_size +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id);
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+
+ while (cpp_rd_addr < max_rd_addr) {
+ if (is_xpb_read(&spec_csr->cpp.cpp_id)) {
+ err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest);
+ } else {
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
+ dest, reg_sz);
+ err = bytes_read == reg_sz ? 0 : -EIO;
+ }
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz;
+ }
+
+ return 0;
+}
+
+/* Write context to CSRCtxPtr, then read from it. Then the value can be read
+ * from IndCtxStatus.
+ */
+static int
+nfp_read_indirect_csr(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset,
+ u32 reg_sz, u32 context, void *dest)
+{
+ u32 csr_ctx_ptr_offs;
+ u32 cpp_id;
+ int result;
+
+ csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset);
+ cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target,
+ NFP_IND_ME_REFL_WR_SIG_INIT,
+ cpp_params.token, cpp_params.island);
+ result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
+ if (result)
+ return result;
+
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ return 0;
+}
+
+static int
+nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_csr *spec_csr, u32 address,
+ u32 reg_sz, void *dest)
+{
+ u32 ctx;
+ int err;
+
+ for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) {
+ err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address,
+ reg_sz, ctx, dest + ctx * reg_sz);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_indirect_csr_range(struct nfp_pf *pf,
+ struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ u32 reg_data_length;
+ void *dest;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS;
+ total_size = header_size + ALIGN8(reg_data_length);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+ while (cpp_rd_addr < max_rd_addr) {
+ err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr,
+ cpp_rd_addr, reg_sz, dest);
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz * NFP_IND_NUM_CONTEXTS;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_rtsym *dump_header = dump->p;
+ struct nfp_dumpspec_cpp_isl_id cpp_params;
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ u32 header_size, total_size, sym_size;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ int bytes_read;
+ u32 cpp_id;
+ void *dest;
+ int err;
+
+ tl_len = be32_to_cpu(spec->tl.length);
+ key_len = strnlen(spec->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+
+ sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ sym_size = sizeof(sym->addr);
+ else
+ sym_size = sym->size;
+
+ header_size =
+ ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
+ total_size = header_size + ALIGN8(sym_size);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->padded_name_length =
+ header_size - offsetof(struct nfp_dump_rtsym, rtsym);
+ memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
+ dump_header->cpp.dump_length = cpu_to_be32(sym_size);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS) {
+ *(u64 *)dest = sym->addr;
+ } else {
+ cpp_params.target = sym->target;
+ cpp_params.action = NFP_CPP_ACTION_RW;
+ cpp_params.token = 0;
+ cpp_params.island = sym->domain;
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ dump_header->cpp.cpp_id = cpp_params;
+ dump_header->cpp.offset = cpu_to_be32(sym->addr);
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
+ sym_size);
+ if (bytes_read != sym_size) {
+ if (bytes_read >= 0)
+ bytes_read = -EIO;
+ dump_header->error = cpu_to_be32(bytes_read);
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ struct nfp_dump_state *dump = param;
+ struct nfp_dumpspec_csr *spec_csr;
+ int err;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ err = nfp_dump_fwname(pf, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_indirect_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)tl;
+ err = nfp_dump_single_rtsym(pf, spec_rtsym, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ err = nfp_dump_hwinfo(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ err = nfp_dump_hwinfo_field(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ default:
+ err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_dump_state *dump = param;
+
+ if (dump_level->type != dump->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length), dump,
+ nfp_dump_for_tlv);
+}
+
+static int nfp_dump_populate_prolog(struct nfp_dump_state *dump)
+{
+ struct nfp_dump_prolog *prolog = dump->p;
+ u32 total_size;
+ int err;
+
+ total_size = ALIGN8(sizeof(*prolog));
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump);
+ if (err)
+ return err;
+
+ prolog->dump_level = dump->requested_level;
+
+ return 0;
+}
+
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest)
+{
+ struct nfp_dump_state dump;
+ int err;
+
+ dump.requested_level = cpu_to_be32(dump_param->flag);
+ dump.dumped_size = 0;
+ dump.p = dest;
+ dump.buf_size = dump_param->len;
+
+ err = nfp_dump_populate_prolog(&dump);
+ if (err)
+ return err;
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump,
+ nfp_dump_specific_level);
+ if (err)
+ return err;
+
+ /* Set size of actual dump, to trigger warning if different from
+ * calculated size.
+ */
+ dump_param->len = dump.dumped_size;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd09eab..e1dae0616f52 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -47,18 +47,16 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
+#include <linux/firmware.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
+#include "nfp_main.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_port.h"
-enum nfp_dump_diag {
- NFP_DUMP_NSP_DIAG = 0,
-};
-
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
int off;
@@ -333,7 +331,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
ls >= ARRAY_SIZE(ls_to_ethtool))
return 0;
- cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.speed = ls_to_ethtool[ls];
cmd->base.duplex = DUPLEX_FULL;
return 0;
@@ -1066,15 +1064,34 @@ exit_release:
return ret;
}
+/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
+ * based dumps), since flag 0 (default) calculates the length in
+ * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
+ * without setting the flag first, for backward compatibility.
+ */
static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ s64 len;
if (!app)
return -EOPNOTSUPP;
- if (val->flag != NFP_DUMP_NSP_DIAG)
- return -EINVAL;
+ if (val->flag == NFP_DUMP_NSP_DIAG) {
+ app->pf->dump_flag = val->flag;
+ return 0;
+ }
+
+ if (!app->pf->dumpspec)
+ return -EOPNOTSUPP;
+
+ len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
+ val->flag);
+ if (len < 0)
+ return len;
+
+ app->pf->dump_flag = val->flag;
+ app->pf->dump_len = len;
return 0;
}
@@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
static int
nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, NULL);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return 0;
}
static int
nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, buffer);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
+ buffer);
}
static int nfp_net_set_coalesce(struct net_device *netdev,
@@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int
+nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *fw;
+ struct nfp_app *app;
+ struct nfp_nsp *nsp;
+ struct device *dev;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ app = nfp_app_from_netdev(netdev);
+ if (!app)
+ return -EOPNOTSUPP;
+
+ dev = &app->pdev->dev;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ dev_err(dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ err = request_firmware_direct(&fw, flash->data, dev);
+ if (err)
+ goto exit_close_nsp;
+
+ dev_info(dev, "Please be patient while writing flash image: %s\n",
+ flash->data);
+ dev_hold(netdev);
+ rtnl_unlock();
+
+ err = nfp_nsp_write_flash(nsp, fw);
+ if (err < 0) {
+ dev_err(dev, "Flash write failed: %d\n", err);
+ goto exit_rtnl_lock;
+ }
+ dev_info(dev, "Finished writing flash image\n");
+
+exit_rtnl_lock:
+ rtnl_lock();
+ dev_put(netdev);
+ release_firmware(fw);
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
+ .flash_device = nfp_net_flash_device,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
@@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
.get_sset_count = nfp_port_get_sset_count,
+ .flash_device = nfp_net_flash_device,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index c505014121c4..15fa47f622aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
return err;
@@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
+ mutex_lock(&pf->lock);
err = nfp_app_init(pf->app);
+ mutex_unlock(&pf->lock);
if (err)
goto err_free;
@@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
+
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
+
nfp_app_free(pf->app);
pf->app = NULL;
}
@@ -570,17 +572,6 @@ err_unmap_ctrl:
return err;
}
-static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
-{
- nfp_net_pf_app_stop(pf);
- /* stop app first, to avoid double free of ctrl vNIC's ddir */
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_pf_free_irqs(pf);
- nfp_net_pf_app_clean(pf);
- nfp_net_pci_unmap_mem(pf);
-}
-
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->vnics))
- nfp_net_pci_remove_finish(pf);
-
return 0;
}
@@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
int stride;
@@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
- mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
- if ((int)pf->max_data_vnics < 0) {
- err = pf->max_data_vnics;
- goto err_unlock;
- }
+ if ((int)pf->max_data_vnics < 0)
+ return pf->max_data_vnics;
err = nfp_net_pci_map_mem(pf);
if (err)
- goto err_unlock;
+ return err;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
@@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
+ err = devlink_register(devlink, &pf->pdev->dev);
+ if (err)
+ goto err_app_clean;
+
+ mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -799,32 +790,39 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
+ devlink_unregister(devlink);
+err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
-err_unlock:
- mutex_unlock(&pf->lock);
- cancel_work_sync(&pf->port_refresh_work);
return err;
}
void nfp_net_pci_remove(struct nfp_pf *pf)
{
- struct nfp_net *nn;
+ struct nfp_net *nn, *next;
mutex_lock(&pf->lock);
- if (list_empty(&pf->vnics))
- goto out;
-
- list_for_each_entry(nn, &pf->vnics, vnic_list)
- if (nfp_net_is_data_vnic(nn))
- nfp_net_pf_clean_vnic(pf, nn);
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
+ }
- nfp_net_pf_free_vnics(pf);
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
+ nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_pci_remove_finish(pf);
-out:
mutex_unlock(&pf->lock);
+ devlink_unregister(priv_to_devlink(pf));
+
+ nfp_net_pf_free_irqs(pf);
+ nfp_net_pf_app_clean(pf);
+ nfp_net_pci_unmap_mem(pf);
+
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 78b36c67c232..f67da6bde9da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -46,6 +46,13 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
+{
+ return rcu_dereference_protected(set->reprs[id],
+ lockdep_is_held(&app->pf->lock));
+}
+
static void
nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
int tx_status)
@@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
+ .ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
@@ -336,6 +351,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
if (!netdev)
return NULL;
+ netif_carrier_off(netdev);
+
repr = netdev_priv(netdev);
repr->netdev = netdev;
repr->app = app;
@@ -359,29 +376,45 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
nfp_repr_free(repr);
}
-void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
{
+ struct net_device *netdev;
unsigned int i;
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i])
- nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_repr_clean_and_free(netdev_priv(netdev));
+ }
kfree(reprs);
}
void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type)
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
{
+ struct net_device *netdev;
struct nfp_reprs *reprs;
+ int i;
- reprs = nfp_app_reprs_set(app, type, NULL);
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
if (!reprs)
return;
+ /* Preclean must happen before we remove the reprs reference from the
+ * app below.
+ */
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_app_repr_preclean(app, netdev);
+ }
+
+ reprs = nfp_app_reprs_set(app, type, NULL);
+
synchronize_rcu();
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
}
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
@@ -399,47 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
int nfp_reprs_resync_phys_ports(struct nfp_app *app)
{
- struct nfp_reprs *reprs, *old_reprs;
+ struct net_device *netdev;
+ struct nfp_reprs *reprs;
struct nfp_repr *repr;
int i;
- old_reprs =
- rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
- lockdep_is_held(&app->pf->lock));
- if (!old_reprs)
- return 0;
-
- reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
if (!reprs)
- return -ENOMEM;
-
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
- continue;
-
- repr = netdev_priv(old_reprs->reprs[i]);
- if (repr->port->type == NFP_PORT_INVALID)
- continue;
-
- reprs->reprs[i] = old_reprs->reprs[i];
- }
-
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- synchronize_rcu();
+ return 0;
- /* Now we free up removed representors */
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (!netdev)
continue;
- repr = netdev_priv(old_reprs->reprs[i]);
+ repr = netdev_priv(netdev);
if (repr->port->type != NFP_PORT_INVALID)
continue;
- nfp_app_repr_stop(app, repr);
+ nfp_app_repr_preclean(app, netdev);
+ rcu_assign_pointer(reprs->reprs[i], NULL);
+ synchronize_rcu();
nfp_repr_clean(repr);
}
- kfree(old_reprs);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..a621e8ff528e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -35,6 +35,7 @@
#define NFP_NET_REPR_H
struct metadata_dst;
+struct nfp_app;
struct nfp_net;
struct nfp_port;
@@ -47,7 +48,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device *reprs[0];
+ struct net_device __rcu *reprs[0];
};
/**
@@ -89,6 +90,7 @@ struct nfp_repr {
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function
+ * @__NFP_REPR_TYPE_MAX: number of representor types
*/
enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT,
@@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev)
return priv->dst->u.port_info.port_id;
}
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
+ unsigned int id);
+
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
-void
-nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
-void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type);
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
+void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
int nfp_reprs_resync_phys_ports(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c879626e035b..b802a1d55449 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 3ce51f03126f..ced62d112aa2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -49,6 +49,8 @@
struct nfp_hwinfo;
struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo);
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo);
/* Implemented in nfp_nsp.c, low level functions */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 5798adc57cbc..c8f2c064cce3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
+size_t nfp_cpp_area_size(struct nfp_cpp_area *area);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 04dd5758ecf5..ef30597aa319 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
* that it can be accessed directly.
*
* NOTE: @address and @size must be 32-bit aligned values.
- *
- * NOTE: The area must also be 'released' when the structure is freed.
+ * The area must also be 'released' when the structure is freed.
*
* Return: NFP CPP Area handle, or NULL
*/
@@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
* Read data from indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area,
* Write data to indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
}
/**
+ * nfp_cpp_area_size() - return size of a CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: Size of the area
+ */
+size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->size;
+}
+
+/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
@@ -666,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readl(struct nfp_cpp_area *area,
unsigned long offset, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -686,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writel(struct nfp_cpp_area *area,
unsigned long offset, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -704,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readq(struct nfp_cpp_area *area,
unsigned long offset, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -724,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
unsigned long offset, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -1072,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
* @xpb_addr: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
{
@@ -1087,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
* @xpb_addr: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
{
@@ -1105,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
*
* KERNEL: This operation is safe to call in interrupt or softirq context.
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
u32 mask, u32 value)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index ab86bceb93f2..20bad05e2e92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -64,18 +64,20 @@
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/* NOTE: This code should not use nfp_xpb_* functions,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 4f24aff1e772..063a9a6243d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
return NULL;
}
+
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->data;
+}
+
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo)
+{
+ return le32_to_cpu(hwinfo->size) - sizeof(u32);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 14a6d1ba51a9..39abac678b71 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -51,6 +51,9 @@
#include "nfp_cpp.h"
#include "nfp_nsp.h"
+#define NFP_NSP_TIMEOUT_DEFAULT 30
+#define NFP_NSP_TIMEOUT_BOOT 30
+
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
@@ -93,6 +96,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */
SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
};
@@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
}
static int
-nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
- u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+ u64 mask, u64 val, u32 timeout_sec)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + timeout_sec * HZ;
int err;
for (;;) {
@@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
}
/**
- * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * __nfp_nsp_command() - Execute a command on the NFP Service Processor
* @state: NFP SP state
* @code: NFP SP Command Code
* @option: NFP SP Command Argument
* @buff_cpp: NFP SP Buffer CPP Address info
* @buff_addr: NFP SP Buffer Host address
+ * @timeout_sec:Timeout value to wait for completion in seconds
*
* Return: 0 for success with no result
*
@@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
* -ENODEV if the NSP is not a supported model
* -EBUSY if the NSP is stuck
* -EINTR if interrupted while waiting for completion
- * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
*/
-static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
- u32 buff_cpp, u64 buff_addr)
+static int
+__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr, u32 timeout_sec)
{
u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
@@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return err;
/* Wait for NSP_COMMAND_START to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
err, code);
@@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
}
/* Wait for NSP_STATUS_BUSY to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0, timeout_sec);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
err, code);
@@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return ret_val;
}
-static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
- const void *in_buf, unsigned int in_size,
- void *out_buf, unsigned int out_size)
+static int
+nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr)
+{
+ return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
+static int
+__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size, u32 timeout_sec)
{
struct nfp_cpp *cpp = nsp->cpp;
unsigned int max_size;
@@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return err;
}
- ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
+ timeout_sec);
if (ret < 0)
return ret;
@@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return ret;
}
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
+ out_buf, out_size,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
int nfp_nsp_wait(struct nfp_nsp *state)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
int err;
- nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+ nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n",
+ NFP_NSP_TIMEOUT_BOOT);
for (;;) {
const unsigned long start_time = jiffies;
@@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
fw->size, NULL, 0);
}
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
+{
+ /* The flash time is specified to take a maximum of 70s so we add an
+ * additional factor to this spec time.
+ */
+ u32 timeout_sec = 2.5 * 70;
+
+ return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
+ fw->data, fw->size, NULL, 0, timeout_sec);
+}
+
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 650ca1a5bd21..e983c9d7f86c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index ecda474ac7c3..46107aefad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
break;
}
- if (err == sym->size)
- err = 0;
- else if (err >= 0)
- err = -EIO;
exit:
if (error)
*error = err;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 481876b5424c..66c665d0b926 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,9 +793,9 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- union ring_type get_rx, put_rx, first_rx, last_rx;
+ union ring_type get_rx, put_rx, last_rx;
struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
- struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+ struct nv_skb_map *last_rx_ctx;
struct nv_skb_map *rx_skb;
union ring_type rx_ring;
@@ -822,9 +822,9 @@ struct fe_priv {
/*
* tx specific fields.
*/
- union ring_type get_tx, put_tx, first_tx, last_tx;
+ union ring_type get_tx, put_tx, last_tx;
struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
- struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+ struct nv_skb_map *last_tx_ctx;
struct nv_skb_map *tx_skb;
union ring_type tx_ring;
@@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev)
struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
- if (less_rx-- == np->first_rx.orig)
+ if (less_rx-- == np->rx_ring.orig)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev)
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
- np->put_rx.orig = np->first_rx.orig;
+ np->put_rx.orig = np->rx_ring.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
- if (less_rx-- == np->first_rx.ex)
+ if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
wmb();
np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
- np->put_rx.ex = np->first_rx.ex;
+ np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ np->get_rx = np->rx_ring;
+ np->put_rx = np->rx_ring;
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
else
np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
- np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->get_rx_ctx = np->rx_skb;
+ np->put_rx_ctx = np->rx_skb;
np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
for (i = 0; i < np->rx_ring_size; i++) {
@@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+ np->get_tx = np->tx_ring;
+ np->put_tx = np->tx_ring;
if (!nv_optimized(np))
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
- np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->get_tx_ctx = np->tx_skb;
+ np->put_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
@@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.orig))
+ if (unlikely(put_tx == np->tx_ring.orig))
prev_tx = np->last_tx.orig;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.ex))
+ if (unlikely(put_tx == np->tx_ring.ex))
prev_tx = np->last_tx.ex;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
- if (flags & NV_TX_ERROR) {
+ if (unlikely(flags & NV_TX_ERROR)) {
if ((flags & NV_TX_RETRYERROR)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
} else {
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
}
if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
- np->get_tx.orig = np->first_tx.orig;
+ np->get_tx.orig = np->tx_ring.orig;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
@@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
- np->get_tx.ex = np->first_tx.ex;
+ np->get_tx.ex = np->tx_ring.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
@@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
- np->get_rx.orig = np->first_rx.orig;
+ np->get_rx.orig = np->rx_ring.orig;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
next_pkt:
if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
- np->get_rx.ex = np->first_rx.ex;
+ np->get_rx.ex = np->rx_ring.ex;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev)
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
*/
- {
- u32 miistat;
- miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- }
+ readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
np->linkspeed = 0;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c9a55b774935..07a2eb3781b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
- != ETH_ALEN) {
+ if (!mac_pton(maddr, addr)) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 26ddf092e3ec..0ee2490db729 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -85,6 +85,7 @@ config QED
tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI
select ZLIB_INFLATE
+ select CRC8
---help---
This enables the support for ...
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 0a66389c06c2..1cd39c9a0345 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter)
{
int ret = 0;
struct netxen_minidump_template_hdr *hdr;
- struct timespec val;
hdr = (struct netxen_minidump_template_hdr *)
adapter->mdump.md_template;
hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
- jiffies_to_timespec(jiffies, &val);
- hdr->driver_timestamp = (u32) val.tv_sec;
+ hdr->driver_timestamp = ktime_get_seconds();
hdr->driver_info_word2 = adapter->fw_version;
hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
ret = netxen_parse_md_template(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 91003bc6f00b..69488554f4b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -52,10 +52,10 @@
extern const struct qed_common_ops qed_common_ops_pass;
-#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 33
+#define QED_REVISION_VERSION 0
+#define QED_ENGINEERING_VERSION 20
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
return sw_fid;
}
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index afd07ad91631..6f546e869d8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -86,22 +86,22 @@
/* connection context union */
union conn_context {
- struct core_conn_context core_ctx;
- struct eth_conn_context eth_ctx;
- struct iscsi_conn_context iscsi_ctx;
- struct fcoe_conn_context fcoe_ctx;
- struct roce_conn_context roce_ctx;
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
+ struct e4_iscsi_conn_context iscsi_ctx;
+ struct e4_fcoe_conn_context fcoe_ctx;
+ struct e4_roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct iscsi_task_context iscsi_ctx;
- struct fcoe_task_context fcoe_ctx;
+ struct e4_iscsi_task_context iscsi_ctx;
+ struct e4_fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct rdma_task_context roce_ctx;
+ struct e4_rdma_task_context roce_ctx;
};
struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
u64 next;
};
-#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = qed_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
- memset(p_virt, 0, size);
ilt_shadow[line].p_phys = p_phys;
ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading)
{
- struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.is_first_pf = p_hwfn->first_on_engine;
+ params.is_pf_loading = is_pf_loading;
params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
+ params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, true);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
goto out0;
}
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
}
- memset(p_virt, 0, p_blk->real_size_in_page);
/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
* to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 17836349a274..a4e95869889f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
+ * @param is_pf_loading
*/
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index fe7c1f230028..449777f21237 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
-
- p_dest->pf_id = p_src->pf_id;
+ u8 update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 03c3cf77aaff..f2633ec87a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -21,25 +21,26 @@ enum mem_groups {
MEM_GROUP_DMAE_MEM,
MEM_GROUP_CM_MEM,
MEM_GROUP_QM_MEM,
- MEM_GROUP_TM_MEM,
+ MEM_GROUP_DORQ_MEM,
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
- MEM_GROUP_SDM_MEM,
MEM_GROUP_IOR,
- MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_SDM_MEM,
MEM_GROUP_PBUF,
+ MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
"DMAE_MEM",
"CM_MEM",
"QM_MEM",
- "TM_MEM",
+ "DORQ_MEM",
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
- "SDM_MEM",
"IOR",
- "RAM",
"BTB_RAM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "TM_MEM",
+ "SDM_MEM",
"PBUF",
+ "RAM",
"MULD_MEM",
"BTB_MEM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond14(const u32 *r, const u32 *imm)
-{
- return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
- cond14,
};
/******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
struct platform_defs {
const char *name;
u32 delay_factor;
+ u32 dmae_thresh;
+ u32 log_thresh;
};
/* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
/* Block constant definitions */
struct block_defs {
const char *name;
- bool has_dbg_bus[MAX_CHIP_IDS];
+ bool exists[MAX_CHIP_IDS];
bool associated_to_storm;
/* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
/* Reset register definitions */
struct reset_reg_defs {
u32 addr;
- u32 unreset_val;
bool exists[MAX_CHIP_IDS];
+ u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
const char *mem_name;
const char *type_name;
u32 addr;
+ u32 entry_width;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
enum dbg_grc_params grc_param;
u32 addr_reg_addr;
u32 data_reg_addr;
- u32 num_of_blocks[MAX_CHIP_IDS];
+ u32 is_256b_reg_addr;
+ u32 is_256b_bit_offset[MAX_CHIP_IDS];
+ u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
};
struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
-#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-#endif
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
#define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
-#define BIG_RAM_BLOCK_SIZE_BYTES 128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
- BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1
-#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
-#endif
#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
-#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
-#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
{{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
{0, 0, 0},
{0, 0, 0},
+ {0, 0, 0} } },
+ { "reserved",
+ {{0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0},
{0, 0, 0} } }
};
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
static struct block_defs block_grc_defs = {
"grc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
"pglue_b",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
static struct block_defs block_cnig_defs = {
"cnig",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
- CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
- CNIG_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+ CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+ CNIG_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 0
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
"ncsi",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {true, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
"bmb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
static struct block_defs block_pcie_defs = {
"pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
"mcp2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
static struct block_defs block_pswhst_defs = {
"pswhst",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
static struct block_defs block_pswhst2_defs = {
"pswhst2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
static struct block_defs block_pswrd_defs = {
"pswrd",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
static struct block_defs block_pswrd2_defs = {
"pswrd2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
static struct block_defs block_pswwr_defs = {
"pswwr",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
"pswrq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
static struct block_defs block_pswrq2_defs = {
"pswrq2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
static struct block_defs block_pglcs_defs = {
"pglcs",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
- PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
- PGLCS_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+ PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+ PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
static struct block_defs block_ptu_defs = {
"ptu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
static struct block_defs block_dmae_defs = {
"dmae",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
static struct block_defs block_tcm_defs = {
"tcm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
static struct block_defs block_mcm_defs = {
"mcm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
static struct block_defs block_ucm_defs = {
"ucm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
static struct block_defs block_xcm_defs = {
"xcm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
static struct block_defs block_ycm_defs = {
"ycm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
static struct block_defs block_pcm_defs = {
"pcm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
static struct block_defs block_qm_defs = {
"qm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
static struct block_defs block_tm_defs = {
"tm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
static struct block_defs block_dorq_defs = {
"dorq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
static struct block_defs block_brb_defs = {
"brb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
static struct block_defs block_src_defs = {
"src",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
static struct block_defs block_prs_defs = {
"prs",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
static struct block_defs block_tsdm_defs = {
"tsdm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
static struct block_defs block_msdm_defs = {
"msdm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
static struct block_defs block_usdm_defs = {
"usdm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
static struct block_defs block_xsdm_defs = {
"xsdm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
static struct block_defs block_ysdm_defs = {
"ysdm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
static struct block_defs block_psdm_defs = {
"psdm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
static struct block_defs block_tsem_defs = {
"tsem",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
static struct block_defs block_msem_defs = {
"msem",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
static struct block_defs block_usem_defs = {
"usem",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
static struct block_defs block_xsem_defs = {
"xsem",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
static struct block_defs block_ysem_defs = {
"ysem",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
static struct block_defs block_psem_defs = {
"psem",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
static struct block_defs block_rss_defs = {
"rss",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
static struct block_defs block_tmld_defs = {
"tmld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
static struct block_defs block_muld_defs = {
"muld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
static struct block_defs block_yuld_defs = {
"yuld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, false}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ MAX_DBG_BUS_CLIENTS},
YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
static struct block_defs block_xyld_defs = {
"xyld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
};
+static struct block_defs block_ptld_defs = {
+ "ptld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+ PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+ PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+ PTLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 28
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+ YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+ YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+ YPLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 27
+};
+
static struct block_defs block_prm_defs = {
"prm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
static struct block_defs block_pbf_pb1_defs = {
"pbf_pb1",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
static struct block_defs block_pbf_pb2_defs = {
"pbf_pb2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
static struct block_defs block_rpb_defs = {
"rpb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
static struct block_defs block_btb_defs = {
"btb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
static struct block_defs block_pbf_defs = {
"pbf",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
static struct block_defs block_rdif_defs = {
"rdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
static struct block_defs block_tdif_defs = {
"tdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
static struct block_defs block_cdu_defs = {
"cdu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
static struct block_defs block_ccfc_defs = {
"ccfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
static struct block_defs block_tcfc_defs = {
"tcfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
static struct block_defs block_igu_defs = {
"igu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
static struct block_defs block_cau_defs = {
"cau",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
};
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+ RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+ RGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 30
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+ TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+ TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+ TGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 31
+};
+
static struct block_defs block_umac_defs = {
"umac",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
- UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
- UMAC_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+ UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+ UMAC_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {true, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
"nig",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
static struct block_defs block_wol_defs = {
"wol",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
- WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
- WOL_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+ WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+ WOL_REG_DBG_FORCE_FRAME_K2_E5,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
static struct block_defs block_bmbn_defs = {
"bmbn",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
- BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
- BMBN_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+ BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+ BMBN_REG_DBG_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
"nwm",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
- NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
- NWM_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+ NWM_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
static struct block_defs block_nws_defs = {
"nws",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
- NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
- NWS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+ NWS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
"ms",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
- MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
- MS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+ MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+ MS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
"phy_pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_led_defs = {
- "led", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 14
};
static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "avs_wrap", {false, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 11
};
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+ "pxpreqbus", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
+ &block_pxpreqbus_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
static struct platform_defs s_platform_defs[] = {
- {"asic", 1},
- {"reserved", 0},
- {"reserved2", 0},
- {"reserved3", 0}
+ {"asic", 1, 256, 32768},
+ {"reserved", 0, 0, 0},
+ {"reserved2", 0, 0, 0},
+ {"reserved3", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS},
/* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0}, 0, 1, false, 1, 0},
+ {{0, 0, 0}, 0, 1, false, 1, 0},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0}, 0, 1, false, 0, 0}
+ {{0, 0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0,
- {256, 320},
- {32, 32} },
+ { "rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320, 512} },
- { "rss_mem_key_msb", "rss_key", 1024,
- {128, 208},
- {256, 256} },
+ { "rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208, 257} },
- { "rss_mem_key_lsb", "rss_key", 2048,
- {128, 208},
- {64, 64} },
+ { "rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208, 257} },
- { "rss_mem_info", "rss_info", 3072,
- {128, 208},
- {16, 16} },
+ { "rss_mem_info", "rss_info", 3072, 16,
+ {128, 208, 256} },
- { "rss_mem_ind", "rss_ind", 4096,
- {16384, 26624},
- {16, 16} }
+ { "rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624, 32768} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 5632} },
+ MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+ {153600, 180224, 282624} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 3680} },
+ MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+ {92160, 117760, 168960} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152} }
+ MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+ {36864, 36864, 36864} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
/* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x400, 0x600} },
/* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
- {false, true} },
+ { MISCS_REG_RESET_PL_HV_2_K2_E5,
+ {false, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VAUX,
+ {true, true, true}, {0x2, 0x2, 0x2} },
};
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
- {"sgmii_phy", MS_REG_MS_CMU_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+ {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
};
/**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
- dev_data->initialized = true;
+ dev_data->use_dmae = true;
+ dev_data->num_regs_read = 0;
+ dev_data->initialized = 1;
return DBG_STATUS_OK;
}
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
sizeof(fw_info_location);
dest = (u32 *)&fw_info_location;
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
/* Writes the "last" section (including CRC) to the specified buffer at the
* given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
- u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
{
u32 start_offset = offset;
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CFC_MEM:
case MEM_GROUP_CONN_CFC_MEM:
case MEM_GROUP_TASK_CFC_MEM:
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_reset_bit && block->unreset)
+ if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+ block->unreset)
reg_val[block->reset_reg] |=
BIT(block->reset_bit_offset);
}
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
continue;
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ reg_val[i] |=
+ s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
if (reg_val[i])
qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus)
{
- u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
if (!dump)
return len;
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ /* Print log if needed */
+ dev_data->num_regs_read += len;
+ if (dev_data->num_regs_read >=
+ s_platform_defs[dev_data->platform_id].log_thresh) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers...\n",
+ dev_data->num_regs_read);
+ dev_data->num_regs_read = 0;
+ }
- return offset;
+ /* Try reading using DMAE */
+ if (dev_data->use_dmae &&
+ (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+ wide_bus)) {
+ if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf), len, 0))
+ return len;
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
+
+ /* Read registers */
+ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+ return len;
}
/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
chip = &s_chip_defs[dev_data->chip_id];
chip_platform = &chip->per_platform[dev_data->platform_id];
- if (dump)
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "name", buf);
- if (dump)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from %s...\n",
- len, buf);
} else {
/* Dump address */
u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
offset += qed_dump_num_param(dump_buf + offset,
dump, "addr", addr_in_bytes);
- if (dump && len > 64)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from address 0x%x...\n",
- len, addr_in_bytes);
}
/* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ u32 rss_addr, num_entries, total_dwords;
struct rss_mem_defs *rss_defs;
- u32 addr, size;
+ u32 addr, num_dwords_to_read;
bool packed;
rss_defs = &s_rss_mem_defs[rss_mem_id];
rss_addr = rss_defs->addr;
num_entries = rss_defs->num_entries[dev_data->chip_id];
- entry_width = rss_defs->entry_width[dev_data->chip_id];
- total_dwords = (num_entries * entry_width) / 32;
- packed = (entry_width == 16);
+ total_dwords = (num_entries * rss_defs->entry_width) / 32;
+ packed = (rss_defs->entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
rss_defs->mem_name,
0,
total_dwords,
- entry_width,
+ rss_defs->entry_width,
packed,
rss_defs->type_name, false, 0);
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
}
addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
- size = RSS_REG_RSS_RAM_DATA_SIZE;
- for (i = 0; i < total_dwords; i += size, rss_addr++) {
+ while (total_dwords) {
+ num_dwords_to_read = min_t(u32,
+ RSS_REG_RSS_RAM_DATA_SIZE,
+ total_dwords);
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
addr,
- size,
+ num_dwords_to_read,
false);
+ total_dwords -= num_dwords_to_read;
+ rss_addr++;
}
}
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 total_blocks, ram_size, offset = 0, i;
+ u32 block_size, ram_size, offset = 0, reg_val, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
struct big_ram_defs *big_ram;
big_ram = &s_big_ram_defs[big_ram_id];
- total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
- ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+ ram_size = big_ram->ram_size[dev_data->chip_id];
+
+ reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+ block_size = reg_val &
+ BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+ : 128;
strncpy(type_name, big_ram->instance_name,
strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
mem_name,
0,
ram_size,
- BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ block_size * 8,
false, type_name, false, 0);
/* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset + ram_size;
/* Dump Big RAM */
- for (i = 0; i < total_blocks / 2; i++) {
+ for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+ i++) {
u32 addr, len;
qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
- len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ len = BRB_REG_BIG_RAM_DATA_SIZE;
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE,
+ MCP_REG_SCRATCH_SIZE_BB_K2,
false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
phy_defs->tbus_data_lo_addr;
data_hi_addr = phy_defs->base_addr +
phy_defs->tbus_data_hi_addr;
- bytes_buf = (u8 *)(dump_buf + offset);
if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
continue;
}
+ bytes_buf = (u8 *)(dump_buf + offset);
for (tbus_hi_offset = 0;
tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 block_id, line_id, offset = 0;
- /* Skip static debug if a debug bus recording is in progress */
- if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ /* Don't dump static debug if a debug bus recording is in progress */
+ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG, "Dumping static debug data...\n");
-
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] !=
+ MAX_DBG_BUS_CLIENTS)
qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
0);
}
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 block_dwords, addr, len;
u8 dbg_client_id;
- if (!block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] ==
+ MAX_DBG_BUS_CLIENTS)
continue;
- block_desc =
- get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
+ block_desc = get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
block_dwords = NUM_DBG_LINES(block_desc) *
STATIC_DEBUG_LINE_DWORDS;
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
if (dump) {
/* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
u32 next_reg_offset = 0;
- if (!dump) {
- offset += qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- entry_id,
- NULL);
- (*num_failing_rules)++;
- break;
- }
-
/* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
u32 padded_entry_size, addr;
bool wide_bus;
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size)
- : 1;
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
entry_id,
cond_reg_values);
(*num_failing_rules)++;
- break;
}
}
}
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
dump, "num_rules", num_failing_rules);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
(nvram_offset_bytes +
read_offset) |
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
offset += trace_meta_size_dwords;
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+ len = REG_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
- dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
- REG_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
- (u64)(uintptr_t)(&dump_buf[offset]),
- REG_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
}
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+ len = IGU_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
- dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
- IGU_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- IGU_REG_ERROR_HANDLING_MEMORY,
- (u64)(uintptr_t)(&dump_buf[offset]),
- IGU_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
- fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
}
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
bool dump,
u32 *num_dumped_dwords)
{
- u32 size_param_offset, override_window_dwords, offset = 0;
+ u32 size_param_offset, override_window_dwords, offset = 0, addr;
*num_dumped_dwords = 0;
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Add override window info to buffer */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt,
- GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- GRC_REG_PROTECTION_OVERRIDE_WINDOW,
- (u64)(uintptr_t)(dump_buf + offset),
- override_window_dwords, 0))
- return DBG_STATUS_DMAE_FAILED;
- offset += override_window_dwords;
+ qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true);
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
- last_list_idx = (next_list_idx > 0
- ? next_list_idx
- : asserts->list_num_elements) - 1;
+ last_list_idx = (next_list_idx > 0 ?
+ next_list_idx :
+ asserts->list_num_elements) - 1;
addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
asserts->list_dword_offset +
last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
}
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-
/***************************** Constant Arrays *******************************/
struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
{"phy_pcie", BLOCK_PHY_PCIE},
{"led", BLOCK_LED},
{"avs_wrap", BLOCK_AVS_WRAP},
+ {"pxpreqbus", BLOCK_PXPREQBUS},
{"misc_aeu", BLOCK_MISC_AEU},
{"bar0_map", BLOCK_BAR0_MAP}
};
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_DMAE_FAILED */
- "DMAE transaction failed",
+ /* DBG_STATUS_RESERVED2 */
+ "Reserved debug status - shouldn't be returned",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
if (*(char_buf + offset++)) {
/* String param */
*param_str_val = char_buf + offset;
+ *param_num_val = 0;
offset += strlen(*param_str_val) + 1;
if (offset & 0x3)
offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
true,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
false,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
*/
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6725,9 +6794,7 @@ free_mem:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
*element, char
*results_buf,
- u32 *results_offset,
- u32 *parsed_results_bytes)
+ u32 *results_offset)
{
const struct igu_fifo_addr_data *found_addr = NULL;
u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
prod_cons,
update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
- : "enable",
+ en_dis_int_for_sb ?
+ (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+ "enable",
segment ? "attn" : "regular",
timer_mask);
}
@@ -6969,9 +7035,7 @@ out:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_elements; i++) {
status = qed_parse_igu_fifo_element(&elements[i],
results_buf,
- &results_offset,
- parsed_results_bytes);
+ &results_offset);
if (status != DBG_STATUS_OK)
return status;
}
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
- u32 *num_errors, u32 *num_warnings)
+ u32 *num_errors,
+ u32 *num_warnings)
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *results_buf_size)
{
return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
- num_dumped_dwords,
results_buf, &parsed_buf_size);
}
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
NULL, results_buf_size);
}
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
results_buf,
&parsed_buf_size);
}
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_fw_asserts_dump(dump_buf,
results_buf, &parsed_buf_size);
}
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_bit_mapping *bit_mapping;
struct dbg_attn_reg_result *reg_result;
- struct dbg_attn_bit_mapping *mapping;
u8 num_reg_attn, bit_idx = 0;
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
+ bit_mapping = &((struct dbg_attn_bit_mapping *)
+ block_attn->ptr)[reg_result->block_attn_offset];
pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
/* Go over attention status bits */
for (j = 0; j < num_reg_attn; j++) {
- u16 attn_idx_val = GET_FIELD(mapping[j].data,
+ u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 name_offset, sts_addr;
+ u32 attn_name_offset, sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
*/
- if (GET_FIELD(mapping[j].data,
+ if (GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
bit_idx += (u8)attn_idx_val;
continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Find attention name */
- name_offset = block_attn_name_offsets[attn_idx_val];
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
attn_name = &((const char *)
- pstrings->ptr)[name_offset];
+ pstrings->ptr)[attn_name_offset];
attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
"Interrupt" : "Parity";
masked_str = reg_result->mask_val & BIT(bit_idx) ?
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 58a689fb04db..553a6d17260e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
}
- /* Protocl Configuration */
+ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* No need for a case for QED_CMDQS_CQS since
* CNQ/CMDQS are the same resource.
*/
- resc_max_val = NUM_OF_CMDQS_CQS;
+ resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index df195c02b711..2dc9b312a795 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct fcoe_conn_context *p_cxt = NULL;
+ struct e4_fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct fcoe_task_context *p_task_ctx = NULL;
+ struct e4_fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
SET_FIELD(p_task_ctx->timer_context.logical_client_0,
TIMERS_CONTEXT_VALIDLC0, 1);
SET_FIELD(p_task_ctx->timer_context.logical_client_1,
TIMERS_CONTEXT_VALIDLC1, 1);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 3427fe7049b5..de873d770575 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -54,7 +54,7 @@
struct qed_hwfn;
struct qed_ptt;
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
@@ -82,487 +82,7 @@ enum common_ramrod_cmd_id {
MAX_COMMON_RAMROD_CMD_ID
};
-/* The core storm context for the Ystorm */
-struct ystorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* The core storm context for the Pstorm */
-struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* Core Slowpath Connection storm context of Xstorm */
-struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
- __le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
-};
-
-struct xstorm_core_conn_ag_ctx {
- u8 reserved0;
- u8 core_state;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 consolid_prod;
- __le16 reserved16;
- __le16 tx_bd_cons;
- __le16 tx_bd_or_spq_prod;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le16 word7;
- __le16 word8;
- __le16 word9;
- __le16 word10;
- __le32 reg7;
- __le32 reg8;
- __le32 reg9;
- u8 byte7;
- u8 byte8;
- u8 byte9;
- u8 byte10;
- u8 byte11;
- u8 byte12;
- u8 byte13;
- u8 byte14;
- u8 byte15;
- u8 e5_reserved;
- __le16 word11;
- __le32 reg10;
- __le32 reg11;
- __le32 reg12;
- __le32 reg13;
- __le32 reg14;
- __le32 reg15;
- __le32 reg16;
- __le32 reg17;
- __le32 reg18;
- __le32 reg19;
- __le16 word12;
- __le16 word13;
- __le16 word14;
- __le16 word15;
-};
-
-struct tstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le32 reg7;
- __le32 reg8;
- u8 byte2;
- u8 byte3;
- __le16 word0;
- u8 byte4;
- u8 byte5;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le32 reg9;
- __le32 reg10;
-};
-
-struct ustorm_core_conn_ag_ctx {
- u8 reserved;
- u8 byte1;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
- u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le16 word1;
- __le32 rx_producers;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le16 word2;
- __le16 word3;
-};
-
-/* The core storm context for the Mstorm */
-struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
-};
-
-/* The core storm context for the Ustorm */
-struct ustorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* core connection context */
-struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
-};
-
+/* How ll2 should deal with packet upon errors */
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
@@ -570,21 +90,25 @@ enum core_error_handle {
MAX_CORE_ERROR_HANDLE
};
+/* Opcodes for the event ring */
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
+/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
@@ -592,6 +116,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
+/* Ethernet TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_bcast_pkts;
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
@@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
@@ -628,53 +155,64 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
+ CORE_RAMROD_TX_QUEUE_UPDATE,
MAX_CORE_RAMROD_CMD_ID
};
+/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
+/* Core RX BD for Light L2 */
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
+/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
+/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
+/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {
- CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_ILLEGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
+/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
@@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe {
__le32 reserved1[3];
};
+/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
@@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe {
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
- __le32 gid_dst[4];
+ __le32 src_qp;
+ __le32 reserved[3];
};
+/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
@@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe {
__le32 reserved1[5];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
@@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data {
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
- u8 inner_vlan_removal_en;
+ u8 inner_vlan_stripping_en;
+ u8 report_outer_vlan;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
- u8 reserved[7];
+ u8 reserved[6];
};
+/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
@@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
+/* Flags for Core TX BD */
struct core_tx_bd_data {
__le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_DATA_START_BD_MASK 0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT 2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT 8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
-#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
-#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
-};
-
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
+};
+
+/* Core TX BD for Light L2 */
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x3
-#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
+/* Light L2 TX Destination */
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
@@ -789,6 +339,7 @@ enum core_tx_dest {
MAX_CORE_TX_DEST
};
+/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
@@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data {
u8 resrved[3];
};
+/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg;
+ u8 reserved0;
+ __le16 qm_pq_id;
+ __le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {
DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
@@ -815,6 +376,487 @@ enum dcb_dscp_update_mode {
MAX_DCB_DSCP_UPDATE_MODE
};
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct e4_core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ union rdma_eqe_data rdma_data;
+ struct malicious_vf_eqe_data malicious_vf;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
/* Event Ring Next Page Address */
struct event_ring_next_addr {
struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+/* Ports mode */
enum fw_flow_ctrl_mode {
flow_ctrl_pause,
flow_ctrl_pfc,
MAX_FW_FLOW_CTRL_MODE
};
+/* GFT profile type */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE,
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_PORT,
+ MAX_GFT_PROFILE_TYPE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
};
enum iwarp_ll2_tx_queues {
- IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR,
MAX_IWARP_LL2_TX_QUEUES
};
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
+ ETH_PACKET_SIZE_TOO_LARGE,
MAX_MALICIOUS_VF_ERROR_ID
};
+/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+ __le16 tpid;
+ __le16 tci;
+};
+/* outer tag configurations */
+struct outer_tag_config_struct {
+ u8 enable_stag_pri_change;
+ u8 pri_map_valid;
+ u8 reserved[2];
+ struct vlan_header outer_tag;
+ u8 inner_to_outer_pri_map[8];
};
/* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
- PERSONALITY_RESERVED4,
+ PERSONALITY_RESERVED,
MAX_PERSONALITY_TYPE
};
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
- __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
u8 mf_mode;
u8 integ_phase;
u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- __le32 outer_tag;
+ u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
+ struct outer_tag_config_struct outer_tag_config;
};
+/* Data for port update ramrod */
struct protocol_dcb_data {
u8 dcb_enable_flag;
- u8 reserved_a;
+ u8 dscp_enable_flag;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved_b;
+ u8 dscp_val;
u8 reserved0;
};
+/* Update tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
__le16 reserved;
};
+/* Data for port update ramrod */
struct pf_update_ramrod_data {
- u8 pf_id;
u8 update_eth_dcb_data_mode;
u8 update_fcoe_dcb_data_mode;
u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
+ u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved;
+ u8 enable_stag_pri_change;
+ u8 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
+/* RDMA TX Stats */
struct rdma_sent_stats {
struct regpair sent_bytes;
struct regpair sent_pkts;
};
+/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
__le16 echo;
};
+/* RDMA RX Stats */
struct rdma_rcv_stats {
struct regpair rcv_bytes;
struct regpair rcv_pkts;
};
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg;
+ __le32 rl_bc_rate;
+ __le16 rl_max_rate;
+ __le16 rl_r_ai;
+ __le16 rl_r_hai;
+ __le16 dcqcn_g;
+ __le32 dcqcn_k_us;
+ __le32 dcqcn_timeuot_us;
+ __le32 qcn_timeuot_us;
+ __le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
struct regpair roce_irregular_pkt;
struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1;
+ struct regpair toe_irregular_pkt;
struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt;
};
/* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+/* VF zone size mode */
enum vf_zone_size_mode {
VF_ZONE_SIZE_MODE_DEFAULT,
VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
__le32 reserved1;
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
-};
-
/* DMAE command */
struct dmae_cmd {
__le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x590000,
- GRCBASE_YPLD = 0x5b0000,
+ GRCBASE_PTLD = 0x5a0000,
+ GRCBASE_YPLD = 0x5c0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_PXPREQBUS = 0x56000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
struct dbg_attn_reg_result reg_results[15];
};
-/* mode header */
+/* Mode header */
struct dbg_mode_hdr {
__le16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
__le32 mask_address;
};
-/* attention types */
+/* Attention types */
enum dbg_attn_type {
ATTN_TYPE_INTERRUPT,
ATTN_TYPE_PARITY,
MAX_DBG_ATTN_TYPE
};
+/* Debug Bus block data */
struct dbg_bus_block {
u8 num_of_lines;
u8 has_latency_events;
__le16 lines_offset;
};
+/* Debug Bus block user data */
struct dbg_bus_block_user_data {
u8 num_of_lines;
u8 has_latency_events;
__le16 names_offset;
};
+/* Block Debug line data */
struct dbg_bus_line {
u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
u8 group_sizes;
};
-/* condition header for registers dump */
+/* Condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
u8 block_id; /* block ID */
u8 data_size; /* size in dwords of the data following this header */
};
-/* memory data for registers dump */
+/* Memory data for registers dump */
struct dbg_dump_mem {
__le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
__le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
-/* register data for registers dump */
+/* Register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
-/* split header for registers dump */
+/* Split header for registers dump */
struct dbg_dump_split_hdr {
__le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
};
-/* condition header for idle check */
+/* Condition header for idle check */
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
__le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
__le16 num_entries;
u8 entry_size;
u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
struct dbg_mode_hdr mode; /* Mode header */
};
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
/* Idle Check rule parsing data */
struct dbg_idle_chk_rule_parsing_data {
__le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
};
-/* idle check severity types */
+/* Idle check severity types */
enum dbg_idle_chk_severity_types {
/* idle check failure should cause an error */
IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
__le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num;
u8 hw_id;
};
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+/* Debug Bus constraint operation types */
enum dbg_bus_constraint_ops {
DBG_BUS_CONSTRAINT_OP_EQ,
DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS
};
+/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
};
/* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
struct dbg_bus_storm_data storms[6];
};
+/* Debug bus filter types */
enum dbg_bus_filter_types {
DBG_BUS_FILTER_TYPE_OFF,
DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+/* Debug bus other engine mode */
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
MAX_DBG_BUS_OTHER_ENGINE_MODES
};
+/* Debug bus post-trigger recording types */
enum dbg_bus_post_trigger_types {
DBG_BUS_POST_TRIGGER_RECORD,
DBG_BUS_POST_TRIGGER_DROP,
MAX_DBG_BUS_POST_TRIGGER_TYPES
};
+/* Debug bus pre-trigger recording types */
enum dbg_bus_pre_trigger_types {
DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
MAX_DBG_BUS_PRE_TRIGGER_TYPES
};
+/* Debug bus SEMI frame modes */
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
- 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
- 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+/* Debug Bus Storm modes */
enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_PRINTF,
DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
u8 chip_id;
u8 platform_id;
u8 initialized;
- u8 reserved;
+ u8 use_dmae;
+ __le32 num_regs_read;
};
/********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* BRB RAM init requirements */
struct init_brb_ram_req {
__le32 guranteed_per_tc;
__le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
u8 num_active_tcs[MAX_NUM_PORTS];
};
+/* ETS per-TC init requirements */
struct init_ets_tc_req {
u8 use_sp;
u8 use_wfq;
__le16 weight;
};
+/* ETS init requirements */
struct init_ets_req {
__le32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
+/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {
__le16 lb_mac_rate;
__le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
__le16 tc_rate[NUM_OF_PHYS_TCS];
};
+/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry {
u8 tc_id;
u8 valid;
};
+/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per-port init parameters */
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
__le32 length;
};
-/* binary init buffer types */
+/* Binary init buffer types */
enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
};
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
@@ -2802,6 +2957,18 @@ struct iro {
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
#define MAX_NAME_LEN 16
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *num_warnings);
/**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ * meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
*
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
0x00000000, /* bar0_map, bb, 0 lines */
0x00000000, /* bar0_map, k2, 0 lines */
0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
};
/* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
u8 port_id;
u8 pf_id;
u8 max_phys_tcs_per_port;
- bool is_first_pf;
+ bool is_pf_loading;
u32 num_pf_cids;
u32 num_vf_cids;
u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
u8 num_vports;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed);
+
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ * QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6);
-
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+
+/* Tstorm ll2 port statistics */
#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
(IRO[2].base + ((port_id) * IRO[2].m1))
#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + (pf_id) * IRO[4].m1)
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
(IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+
+/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[27].base + ((ethtype) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
(IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[30].base + ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+
+/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+
+/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-static const struct iro iro_arr[49] = {
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+
+static const struct iro iro_arr[51] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6518, 0x20, 0x0, 0x0, 0x20},
+ {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+ {0x6530, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4c48, 0x0, 0x0, 0x0, 0x78},
+ {0x3e18, 0x0, 0x0, 0x0, 0x78},
+ {0x2b58, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x3df0, 0x0, 0x0, 0x0, 0x78},
- {0x29b0, 0x0, 0x0, 0x0, 0x78},
- {0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7f48, 0x0, 0x0, 0x0, 0x78},
+ {0x4998, 0x0, 0x0, 0x0, 0x78},
+ {0x7f50, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x61f8, 0x10, 0x0, 0x0, 0x10},
- {0xbd20, 0x30, 0x0, 0x0, 0x30},
- {0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4b60, 0x80, 0x0, 0x0, 0x40},
+ {0x6210, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x96c0, 0x30, 0x0, 0x0, 0x30},
+ {0x4b68, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc7c8, 0x0, 0x0, 0x0, 0x4},
- {0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8150, 0x40, 0x0, 0x0, 0x30},
- {0xec70, 0x60, 0x0, 0x0, 0x60},
- {0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf1b0, 0x78, 0x0, 0x0, 0x78},
+ {0x53a8, 0x80, 0x4, 0x0, 0x4},
+ {0xc7d0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba8, 0x80, 0x0, 0x0, 0x20},
+ {0x8158, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+ {0xf2b8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaef8, 0x0, 0x0, 0x0, 0xf0},
- {0xafe8, 0x8, 0x0, 0x0, 0x8},
+ {0xaf20, 0x0, 0x0, 0x0, 0xf0},
+ {0xb010, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
{0x24f8, 0x8, 0x0, 0x0, 0x8},
{0x0, 0x8, 0x0, 0x0, 0x8},
- {0x200, 0x10, 0x8, 0x0, 0x8},
- {0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd9a8, 0x38, 0x0, 0x0, 0x24},
- {0x12988, 0x10, 0x0, 0x0, 0x8},
- {0x11fa0, 0x38, 0x0, 0x0, 0x18},
- {0xa580, 0x38, 0x0, 0x0, 0x10},
- {0x86f8, 0x30, 0x0, 0x0, 0x18},
- {0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xde28, 0x48, 0x0, 0x0, 0x38},
- {0x10660, 0x20, 0x0, 0x0, 0x20},
- {0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5020, 0x10, 0x0, 0x0, 0x10},
- {0xc9b0, 0x30, 0x0, 0x0, 0x10},
- {0xeec0, 0x10, 0x0, 0x0, 0x10},
+ {0x400, 0x18, 0x8, 0x0, 0x8},
+ {0xb78, 0x18, 0x8, 0x0, 0x2},
+ {0xd898, 0x50, 0x0, 0x0, 0x3c},
+ {0x12908, 0x18, 0x0, 0x0, 0x10},
+ {0x11aa8, 0x40, 0x0, 0x0, 0x18},
+ {0xa588, 0x50, 0x0, 0x0, 0x20},
+ {0x8700, 0x40, 0x0, 0x0, 0x28},
+ {0x10300, 0x18, 0x0, 0x0, 0x10},
+ {0xde48, 0x48, 0x0, 0x0, 0x38},
+ {0x10768, 0x20, 0x0, 0x0, 0x20},
+ {0x2d28, 0x80, 0x0, 0x0, 0x10},
+ {0x5048, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+ {0xeee0, 0x10, 0x0, 0x0, 0x10},
+ {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+ {0x13108, 0x8, 0x0, 0x0, 0x8},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30855
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30905
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31675
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32699
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33211
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_error_code {
ETH_OK = 0x00,
ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
MAX_ETH_ERROR_CODE
};
+/* Opcodes for the event ring */
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
ETH_EVENT_RX_QUEUE_UPDATE,
ETH_EVENT_RX_QUEUE_STOP,
ETH_EVENT_FILTERS_UPDATE,
- ETH_EVENT_RESERVED,
- ETH_EVENT_RESERVED2,
- ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
ETH_EVENT_RX_ADD_UDP_FILTER,
ETH_EVENT_RX_DELETE_UDP_FILTER,
- ETH_EVENT_RESERVED4,
- ETH_EVENT_RESERVED5,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG,
ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+/* eth IPv4 Fragment Type */
enum eth_ip_type {
ETH_IPV4,
ETH_IPV6,
MAX_ETH_IP_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE,
MAX_ETH_RAMROD_CMD_ID
};
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+/* GFT filter update action type */
enum gft_filter_update_action {
GFT_ADD_FILTER,
GFT_DELETE_FILTER,
MAX_GFT_FILTER_UPDATE_ACTION
};
-enum gft_logic_filter_type {
- GFT_FILTER_TYPE,
- RFS_FILTER_TYPE,
- MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
struct rx_add_openflow_filter_data {
__le16 action_icid;
u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
+/* Ramrod data for rx create gft action */
struct rx_create_gft_action_data {
u8 vport_id;
u8 reserved[7];
};
+/* Ramrod data for rx create openflow action */
struct rx_create_openflow_action_data {
u8 vport_id;
u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
struct regpair reserved2;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id;
u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
struct rx_update_gft_filter_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
- __le16 rx_qid_or_action_icid;
- u8 vport_id;
- u8 filter_type;
+ __le16 action_icid;
+ __le16 rx_qid;
+ __le16 flow_id;
+ __le16 vport_id;
+ u8 action_icid_valid;
+ u8 rx_qid_valid;
+ u8 flow_id_valid;
u8 filter_action;
u8 assert_on_error;
+ u8 reserved;
};
/* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg;
+ __le16 qm_pq_id;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
__le32 reg4;
};
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
+/* GFT CAM line struct */
struct gft_cam_line {
__le32 camline;
#define GFT_CAM_LINE_VALID_MASK 0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
#define GFT_CAM_LINE_RESERVED1_SHIFT 29
};
+/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
struct gft_cam_line_mapped cam_line_mapped;
};
+/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
GFT_PROFILE_IPV6 = 1,
MAX_GFT_PROFILE_IP_VERSION
};
+/* Profile key stucr fot GFT logic in Prs */
struct gft_profile_key {
__le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
-#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
-#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
-};
-
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
enum gft_profile_tunnel_type {
GFT_PROFILE_NO_TUNNEL = 0,
GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
MAX_GFT_PROFILE_TUNNEL_TYPE
};
+/* Used in gft_profile_key: Indication for protocol type */
enum gft_profile_upper_protocol_type {
GFT_PROFILE_ROCE_PROTOCOL = 0,
GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
};
+/* GFT RAM line struct */
struct gft_ram_line {
__le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_RESERVED1_SHIFT 10
};
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
enum gft_vlan_select {
INNER_PROVIDER_VLAN = 0,
INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
MAX_GFT_VLAN_SELECT
};
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
struct rdma_close_func_ramrod_data {
u8 cnq_start_offset;
u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
u8 reserved[4];
};
+/* rdma function init CNQ parameters */
struct rdma_cnq_params {
__le16 sb_num;
u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
u8 reserved1[6];
};
+/* rdma create cq ramrod data */
struct rdma_create_cq_ramrod_data {
struct regpair cq_handle;
struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
__le16 reserved1;
};
+/* rdma deregister tid ramrod data */
struct rdma_deregister_tid_ramrod_data {
__le32 itid;
__le32 reserved;
};
+/* rdma destroy cq output params */
struct rdma_destroy_cq_output_params {
__le16 cnq_num;
__le16 reserved0;
__le32 reserved1;
};
+/* rdma destroy cq ramrod data */
struct rdma_destroy_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* RDMA slow path EQ cmd IDs */
enum rdma_event_opcode {
RDMA_EVENT_UNUSED,
RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
MAX_RDMA_EVENT_OPCODE
};
+/* RDMA FW return code for slow path ramrods */
enum rdma_fw_return_code {
RDMA_RETURN_OK = 0,
RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
MAX_RDMA_FW_RETURN_CODE
};
+/* rdma function init header */
struct rdma_init_func_hdr {
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
u8 vf_id;
u8 vf_valid;
- u8 reserved[3];
+ u8 relaxed_ordering;
+ u8 reserved[2];
};
+/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
MAX_RDMA_RAMROD_CMD_ID
};
+/* rdma register tid ramrod data */
struct rdma_register_tid_ramrod_data {
__le16 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
__le32 reserved4[2];
};
+/* rdma resize cq output params */
struct rdma_resize_cq_output_params {
__le32 old_cq_cons;
__le32 old_cq_prod;
};
+/* rdma resize cq ramrod data */
struct rdma_resize_cq_ramrod_data {
u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* The rdma storm context of Mstorm */
struct rdma_srq_context {
struct regpair temp[8];
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data {
struct regpair pbl_base_addr;
__le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
struct regpair producers_addr;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_destroy_ramrod_data {
struct rdma_srq_id srq_id;
__le32 reserved;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_modify_ramrod_data {
struct rdma_srq_id srq_id;
__le32 wqe_limit;
};
-struct ystorm_rdma_task_st_ctx {
- struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 msem_ctx_upd_seq;
- u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
- struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
- u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
- u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 reg2;
- __le32 dif_runt_value;
- __le32 reg4;
- __le32 reg5;
-};
-
-struct rdma_task_context {
- struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
- struct tdif_task_context tdif_context;
- struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
- struct mstorm_rdma_task_st_ctx mstorm_st_context;
- struct rdif_task_context rdif_context;
- struct ustorm_rdma_task_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
- struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
enum rdma_tid_type {
RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
__le32 reg10;
};
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
__le32 reg6;
};
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
__le32 reg3;
};
-struct mstorm_roce_conn_st_ctx {
- struct regpair temp[6];
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
};
+/* The roce storm context of Mstorm */
struct pstorm_roce_conn_st_ctx {
struct regpair temp[16];
};
-struct ystorm_roce_conn_st_ctx {
- struct regpair temp[2];
-};
-
+/* The roce storm context of Xstorm */
struct xstorm_roce_conn_st_ctx {
struct regpair temp[24];
};
+/* The roce storm context of Tstorm */
struct tstorm_roce_conn_st_ctx {
struct regpair temp[30];
};
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
struct ustorm_roce_conn_st_ctx {
struct regpair temp[12];
};
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
- struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct ustorm_roce_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
};
+/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
__le16 dpi;
};
+/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
__le16 dpi;
};
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+ struct regpair ecn_pkt_rcv;
+ struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+ struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp requester ramrod data */
struct roce_destroy_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* roce special events statistics */
struct roce_events_stats {
__le16 silent_drops;
__le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
__le32 reserved;
};
+/* ROCE slow path EQ cmd IDs */
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+/* roce func init ramrod data */
struct roce_init_func_params {
u8 ll2_queue_id;
u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
__le32 cnp_send_timeout;
};
+/* roce func init ramrod data */
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct roce_init_func_params roce;
};
+/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
__le32 ack_timeout_val;
__le16 mtu;
__le16 reserved2;
- __le32 reserved3[3];
+ __le32 reserved3[2];
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* roce modify qp responder ramrod data */
struct roce_modify_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
__le16 p_key;
__le32 flow_label;
__le16 mtu;
- __le16 reserved2;
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
+ u8 reserved2[6];
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* RoCE query qp requester output params */
struct roce_query_qp_req_output_params {
__le32 psn;
__le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
};
+/* RoCE query qp requester ramrod data */
struct roce_query_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
__le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
};
+/* RoCE query qp responder ramrod data */
struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
ROCE_RAMROD_CREATE_QP = 11,
ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
MAX_ROCE_RAMROD_CMD_ID
};
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
u8 byte4;
u8 byte5;
__le16 snd_sq_cons;
- __le16 word2;
+ __le16 conn_dpi;
__le16 word3;
__le32 reg9;
__le32 reg10;
};
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
- __le16 word1;
- __le16 irq_prod;
- __le16 word3;
- __le16 word4;
- __le16 ereserved1;
+ __le16 irq_prod_shadow;
+ __le16 word2;
__le16 irq_cons;
+ __le16 irq_prod;
+ __le16 e5_reserved1;
+ __le16 conn_dpi;
u8 rxmit_opcode;
u8 byte4;
u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+/* Roce doorbell data */
enum roce_flavor {
PLAIN_ROCE,
RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
MAX_ROCE_FLAVOR
};
+/* The iwarp storm context of Ystorm */
struct ystorm_iwarp_conn_st_ctx {
__le32 reserved[4];
};
+/* The iwarp storm context of Pstorm */
struct pstorm_iwarp_conn_st_ctx {
__le32 reserved[36];
};
+/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[44];
};
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
__le32 last_hq_sequence;
};
+/* The iwarp storm context of Tstorm */
struct tstorm_iwarp_conn_st_ctx {
__le32 reserved[60];
};
+/* The iwarp storm context of Mstorm */
struct mstorm_iwarp_conn_st_ctx {
__le32 reserved[32];
};
+/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
__le32 reserved[24];
};
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
};
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
struct iwarp_create_qp_ramrod_data {
u8 flags;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
u8 reserved1;
__le16 pd;
__le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
u8 reserved2[6];
};
+/* iWARP completion queue types */
enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
u8 reserved[5];
};
+/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
IWARP_EVENT_TYPE_TCP_OFFLOAD =
11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
MAX_IWARP_EQE_SYNC_OPCODE
};
+/* iWARP EQE completion status */
enum iwarp_fw_return_code {
IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
MAX_IWARP_FW_RETURN_CODE
};
+/* unaligned opaque data received from LL2 */
struct iwarp_init_func_params {
u8 ll2_ooo_q_index;
u8 reserved1[7];
};
+/* iwarp func init ramrod data */
struct iwarp_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct tcp_init_params tcp;
struct iwarp_init_func_params iwarp;
};
+/* iWARP QP - possible states to transition to */
enum iwarp_modify_qp_new_state_type {
IWARP_MODIFY_QP_STATE_CLOSING = 1,
- IWARP_MODIFY_QP_STATE_ERROR =
- 2,
+ IWARP_MODIFY_QP_STATE_ERROR = 2,
MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
};
+/* iwarp modify qp responder ramrod data */
struct iwarp_modify_qp_ramrod_data {
__le16 transition_to_state;
__le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
__le32 reserved3[3];
__le32 reserved4[8];
};
+/* MPA params for Enhanced mode */
struct mpa_rq_params {
__le32 ird;
__le32 ord;
};
+/* MPA host Address-Len for private data */
struct mpa_ulp_buffer {
struct regpair addr;
__le16 len;
__le16 reserved[3];
};
+/* iWARP MPA offload params common to Basic and Enhanced modes */
struct mpa_outgoing_params {
u8 crc_needed;
u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
struct mpa_ulp_buffer outgoing_ulp_buffer;
};
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
struct iwarp_mpa_offload_ramrod_data {
struct mpa_outgoing_params common;
__le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
u8 tcp_connect_side;
u8 rtr_pref;
#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
u8 reserved2;
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[15];
+ u8 reserved3[13];
};
+/* iWARP TCP connection offload params passed by driver to FW */
struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
u8 reserved[10];
};
+/* iWARP query QP output params */
struct iwarp_query_qp_output_params {
__le32 flags;
#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
u8 reserved1[4];
};
+/* iWARP query QP ramrod data */
struct iwarp_query_qp_ramrod_data {
struct regpair output_params_addr;
};
+/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
- 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
MAX_IWARP_RAMROD_CMD_ID
};
+/* Per PF iWARP retransmit path statistics */
struct iwarp_rxmit_stats_drv {
struct regpair tx_go_to_slow_start_event_cnt;
struct regpair tx_fast_retransmit_event_cnt;
};
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
struct iwarp_tcp_offload_ramrod_data {
struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
};
+/* iWARP MPA negotiation types */
enum mpa_negotiation_mode {
MPA_NEGOTIATION_TYPE_BASIC = 1,
MPA_NEGOTIATION_TYPE_ENHANCED = 2,
MAX_MPA_NEGOTIATION_MODE
};
+/* iWARP MPA Enhanced mode RTR types */
enum mpa_rtr_type {
MPA_RTR_TYPE_NONE = 0,
MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
MAX_MPA_RTR_TYPE
};
+/* unaligned opaque data received from LL2 */
struct unaligned_opaque_data {
__le16 first_mpa_offset;
u8 tcp_payload_offset;
u8 flags;
#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
__le32 cid;
};
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
__le32 reg3;
};
+/* The fcoe storm context of Ystorm */
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
struct regpair reserved;
__le16 min_frame_size;
u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
u8 dst_protection_per_mss;
u8 src_protection_per_mss;
u8 ptu_log_page_size;
u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
};
+/* FCoE 16-bits vlan structure */
struct fcoe_vlan_fields {
__le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
};
+/* FCoE 16-bits vlan union */
union fcoe_vlan_field_union {
struct fcoe_vlan_fields fields;
__le16 val;
};
+/* FCoE 16-bits vlan, vif union */
union fcoe_vlan_vif_field_union {
union fcoe_vlan_field_union vlan;
__le16 vif;
};
+/* Ethernet context section */
struct pstorm_fcoe_eth_context_section {
u8 remote_addr_3;
u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
__le16 inner_eth_type;
};
+/* The fcoe storm context of Pstorm */
struct pstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
u8 sid_1;
u8 sid_0;
u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5
u8 did_2;
u8 did_1;
u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
u8 reserved1;
};
+/* The fcoe storm context of Xstorm */
struct xstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
u8 cached_wqes_avail;
__le16 stat_ram_addr;
u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
u8 cached_wqes_offset;
u8 reserved2;
u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
u8 fcp_cmd_byte_credit;
u8 fcp_rsp_byte_credit;
__le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
__le16 xferq_pbl_next_index;
__le16 page_size;
u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
__le32 reg8;
};
+/* The fcoe storm context of Ustorm */
struct ustorm_fcoe_conn_st_ctx {
struct regpair respq_pbl_addr;
__le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
__le16 word3;
};
+/* The fcoe storm context of Tstorm */
struct tstorm_fcoe_conn_st_ctx {
__le16 stat_ram_addr;
__le16 rx_max_fc_payload_len;
__le16 e_d_tov_val;
u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
u8 timers_cleanup_invocation_cnt;
__le32 reserved1[2];
- __le32 dst_mac_address_bytes0to3;
- __le16 dst_mac_address_bytes4to5;
+ __le32 dst_mac_address_bytes_0_to_3;
+ __le16 dst_mac_address_bytes_4_to_5;
__le16 ramrod_echo;
u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
- u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+ u8 cq_relative_offset;
+ u8 cmdq_relative_offset;
u8 bdq_resource_id;
- u8 reserved0[5];
+ u8 reserved0[4];
};
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
__le16 xfer_prod;
- __le16 reserved1;
+ u8 num_cqs;
+ u8 reserved1;
u8 protection_info;
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
u8 reserved2[2];
};
+/* Non fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
__le16 conn_id;
__le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
struct regpair reserved2[3];
};
+/* The fcoe storm context of Mstorm */
struct mstorm_fcoe_conn_st_ctx {
struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
};
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
struct fcoe_conn_offload_ramrod_params {
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
struct fcoe_conn_terminate_ramrod_params {
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
+/* FCoE event type */
enum fcoe_event_type {
FCOE_EVENT_INIT_FUNC,
FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
MAX_FCOE_EVENT_TYPE
};
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
struct fcoe_init_ramrod_params {
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
+/* FCoE ramrod Command IDs */
enum fcoe_ramrod_cmd_id {
FCOE_RAMROD_CMD_ID_INIT_FUNC,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
MAX_FCOE_RAMROD_CMD_ID
};
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
__le32 reg3;
};
+/* The iscsi storm connection context of Ystorm */
struct ystorm_iscsi_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[8];
};
+/* Combined iSCSI and TCP storm connection of Pstorm */
struct pstorm_iscsi_tcp_conn_st_ctx {
__le32 tcp[32];
__le32 iscsi[4];
};
+/* The combined tcp and iscsi storm context of Xstorm */
struct xstorm_iscsi_tcp_conn_st_ctx {
- __le32 reserved_iscsi[40];
__le32 reserved_tcp[4];
+ __le32 reserved_iscsi[44];
};
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
__le16 word3;
};
+/* The iscsi storm connection context of Tstorm */
struct tstorm_iscsi_conn_st_ctx {
- __le32 reserved[40];
+ __le32 reserved[44];
};
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Combined iSCSI and TCP storm connection of Mstorm */
struct mstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_tcp[20];
- __le32 reserved_iscsi[8];
+ __le32 reserved_iscsi[12];
};
+/* The iscsi storm context of Ustorm */
struct ustorm_iscsi_conn_st_ctx {
__le32 reserved[52];
};
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
struct iscsi_init_ramrod_params {
struct iscsi_spe_func_init iscsi_init_spe;
struct tcp_init_params tcp_init;
};
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a05feb38c6ee..fca2dbd93ad9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase)
+{
+ u32 size = PAGE_SIZE / 2, val;
+ struct qed_dmae_params params;
+ int rc = 0;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ 2 * size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return -ENOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ memset((u8 *)p_virt + size, 0, size);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+ phase,
+ (u64)p_phys,
+ p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+ memset(&params, 0, sizeof(params));
+ rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index f2505c691c26..8db2839a8ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index b069ad088269..18fb5062a83d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,6 +31,7 @@
*/
#include <linux/types.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -40,102 +41,197 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+ {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+ {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
+#define QM_INVALID_PQ_ID 0xffff
+
/* Feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
/* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
+#define QM_OTHER_PQS_PER_PF 4
+
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5 /* in us */
-#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-#define QM_RL_INC_VAL(rate) max_t(u32, \
- (u32)(((rate ? rate : \
- 1000000) * \
- QM_RL_PERIOD * \
- 101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), \
+ 1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
+ QM_RL_INC_VAL(speed), \
+ 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+
/* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF 1
-#define QM_OPPOR_FW_STOP_DEF 0
-#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
- 4) * \
- 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 2
-#define QM_STOP_CMD_STRUCT_SIZE 2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
- _STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field, \
- value) SET_FIELD(var[cmd ## _ ## field ## \
- _OFFSET], \
- cmd ## _ ## field, \
- value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
- (max_phys_tcs_per_port) + \
- (tc))
-#define LB_VOQ(port) ( \
- MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port) \
- ((tc) < \
- LB_TC ? PHYS_VOQ(port, \
- tc, \
- max_phy_tcs_pr_port) \
- : LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+ ext_voq, wrr) \
+ do { \
+ typeof(map) __map; \
+ memset(&__map, 0, sizeof(__map)); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+ rl_valid); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+ vp_pq_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+ SET_FIELD(__map.reg, \
+ QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+ *((u32 *)&__map)); \
+ (map) = __map; \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+ ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+ u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+ else
+ return port_id * max_phys_tcs_per_port + tc;
+}
+
/* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (1 << MAX_NUM_VOQS) - 1);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (u32)voq_bit_mask);
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+
/* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_PF_RL_UPPER_BOUND);
}
}
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
* the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq, u16 cmdq_lines)
+ u8 ext_voq, u16 cmdq_lines)
{
- u32 qm_line_crd;
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
qm_line_crd);
}
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
- /* Clear PBF lines for all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
-
- /* find #lines to divide between active phys TCs */
- phys_lines = port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
+ u16 phys_lines, phys_lines_per_tc;
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
+ if (!port_params[port_id].active)
+ continue;
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
- phys_lines_per_tc);
- }
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers for pure LB TC */
- qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
+ /* Init registers per active TC */
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq,
+ phys_lines_per_tc);
}
+
+ /* Init registers for pure LB TC */
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq, PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- u32 temp;
-
if (!port_params[port_id].active)
continue;
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC */
+ /* Find blocks per physical TC. Use factor to avoid floating
+ * arithmethic.
+ */
num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1)
num_tcs_in_port++;
- }
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
(num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
-
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ tc) & 0x1) == 1) {
+ ext_voq =
+ qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET
+ (ext_voq), phys_blocks);
+ }
}
/* Init pure LB TC */
- temp = LB_VOQ(port_id);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
pure_lb_blocks);
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
- struct init_qm_vport_params *vport_params = p_params->vport_params;
- u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
- QM_PF_QUEUE_GROUP_SIZE;
- u16 i, pq_id, pq_group;
-
- /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+ first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
/* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
- bool is_vf_pq = (i >= p_params->num_pf_pqs);
- struct qm_rf_pq_map tx_pq_map;
-
- bool rl_valid = p_params->pq_params[i].rl_valid &&
- (p_params->pq_params[i].vport_id <
- MAX_QM_GLOBAL_RLS);
+ u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ struct qm_rf_pq_map_e4 tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u16 *p_first_tx_pq_id;
+
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ tc_id,
+ p_params->max_phys_tcs_per_port);
+ is_vf_pq = (i >= p_params->num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid &&
+ pq_params[i].vport_id < max_qm_global_rls;
/* Update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
- p_params->start_vport;
- u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
- u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+ p_first_tx_pq_id =
+ &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+ if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val =
+ (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
- if (first_tx_pq_id == QM_INVALID_PQ_ID) {
/* Create new VP PQ */
- pq_ids[p_params->pq_params[i].tc_id] = pq_id;
- first_tx_pq_id = pq_id;
+ *p_first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
- first_tx_pq_id,
- (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id <<
- QM_WFQ_VP_PQ_PF_SHIFT));
+ *p_first_tx_pq_id,
+ map_val);
}
- if (p_params->pq_params[i].rl_valid && !rl_valid)
+ /* Check RL ID */
+ if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+ max_qm_global_rls)
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
- /* Fill PQ map entry */
- memset(&tx_pq_map, 0, sizeof(tx_pq_map));
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg,
- QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- rl_valid ?
- p_params->pq_params[i].vport_id : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
- p_params->pq_params[i].wrr_group);
- /* Write PQ map entry to CAM */
- STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
- *((u32 *)&tx_pq_map));
- /* Set base address */
+ "Invalid VPORT ID for rate limiter configuration\n");
+
+ /* Prepare PQ map entry */
+ QM_INIT_TX_PQ_MAP(p_hwfn,
+ tx_pq_map,
+ E4,
+ pq_id,
+ rl_valid ? 1 : 0,
+ *p_first_tx_pq_id,
+ rl_valid ? pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
+
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (p_params->is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+
+ pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+ p_params->pf_id,
+ tc_id,
+ p_params->port_id,
+ rl_valid ? 1 : 0,
+ rl_valid ?
+ pq_params[i].vport_id : 0);
+ qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
- /* a single other PQ group is used in each PF,
- * where PQ group i is used in PF i.
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
*/
pq_group = pf_id;
pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u32 crd_reg_offset;
- u32 inc_val;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 inc_val, crd_reg_offset;
+ u8 ext_voq;
u16 i;
- if (p_params->pf_id < MAX_NUM_PFS_BB)
- crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
- else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
- crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
-
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ crd_reg_offset =
+ (p_params->pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn,
- crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
+
return 0;
}
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
- QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
+ u16 vport_pq_id;
u32 inc_val;
u8 tc, i;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
-
if (!vport_params[i].vport_wfq)
continue;
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return -1;
}
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
+ u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl :
+ link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn,
"Invalid VPORT rate-limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
udelay(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
}
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
{
- /* init AFullOprtnstcCrdMask */
+ /* Init AFullOprtnstcCrdMask */
u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+ /* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+ /* Enable/disable VPORT RL */
qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+ /* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
qed_cmdq_lines_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
+ /* Init BTB blocks in PBF */
qed_btb_blocks_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
return 0;
}
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* Map Other PQs (if any) */
- qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
- p_params->num_pf_cids, p_params->num_tids, 0);
+ qed_other_pq_map_rt_init(p_hwfn,
+ p_params->pf_id,
+ p_params->is_pf_loading, p_params->num_pf_cids,
+ p_params->num_tids, 0);
/* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ /* Init PF WFQ */
if (p_params->pf_wfq)
if (qed_pf_wfq_rt_init(p_hwfn, p_params))
return -1;
+ /* Init PF RL */
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
+ /* Set VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, vport_params))
+ p_params->num_vports, p_params->link_speed,
+ vport_params))
return -1;
return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
}
qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLPFCRD + pf_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn, p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
- inc_val);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
return 0;
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed)
{
- u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
/* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
- pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+ pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
/* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
- PAUSE_MASK, pq_mask);
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD, PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD,
GROUP_ID,
pq_id / QM_STOP_PQ_MASK_WIDTH);
if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
- if (enable)
- set_bit(bit, var);
- else
- clear_bit(bit, var);
-}
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+ do { \
+ typeof(var) *__p_var = &(var); \
+ typeof(offset) __offset = offset; \
+ *__p_var = (*__p_var & ~BIT(__offset)) | \
+ ((enable) ? BIT(__offset) : 0); \
+ } while (0)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
- vxlan_enable ? 1 : 0);
+ /* Update DORQ register */
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
}
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
- eth_gre_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
- ip_gre_enable ? 1 : 0);
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+ /* EDPM with geneve tunnel not supported in BB */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
- pf_id * RAM_LINE_SIZE;
-
- /*stop using gft logic */
+ /* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next gft session */
+
+ /* Zero camline */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+ /* Zero ramline */
+ qed_wr(p_hwfn,
+ p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ 0);
}
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
u32 rfs_cm_hdr_event_id;
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+ "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
- rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
- /* Configure Registers for RFS mode */
- qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ /* Do not load context only cid in PRS on match. */
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
- camline.cam_line_mapped.camline = 0;
- /* Cam line is now valid!! */
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* Do not use tenant ID exist bit for gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
- /* filters are per PF!! */
- SET_FIELD(camline.cam_line_mapped.camline,
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_PF_ID_MASK,
GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
- PRS_REG_GFT_CAM +
- CAM_LINE_SIZE * pf_id);
+ cam_line);
+ cam_line =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ramline.lo = 0;
- ramline.hi = 0;
- SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
- /* Each iteration write to reg */
- qed_wr(p_hwfn, p_ptt,
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ }
+
+ qed_wr(p_hwfn,
+ p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
- ramline.hi);
+ ram_line_lo);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ ram_line_hi);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
- ramline.hi);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+ /* Enable gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ static u8 crc8_table_valid; /* automatically initialized to 0 */
+ u32 validation_string = 0;
+ u32 data_to_crc;
+
+ if (!crc8_table_valid) {
+ crc8_populate_msb(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
+
+ /* The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian and calculate CRC8 */
+ data_to_crc = be32_to_cpu(validation_string);
+
+ crc = crc8(cdu_crc8_table,
+ (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
+ validation_byte |=
+ ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index e3f368882f46..3bb76da6baa2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
}
/* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
{
- DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+ int rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return -EINVAL;
+ }
+
+ return rc;
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
- qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 719cdbfe1695..d3eabcf9c86c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -59,10 +59,10 @@ struct qed_pi_info {
};
struct qed_sb_sp_info {
- struct qed_sb_info sb_info;
+ struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev))
return;
- sb_offset = igu_sb_id * PIS_PER_SB;
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 5199634ed630..54b4ee0acfd7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 813c77cc857f..c0d4a54a5edb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,22 +62,6 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
- return p_iscsi->event_cb(p_iscsi->event_context,
- fw_event_code, data);
- } else {
- DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
- return -EINVAL;
- }
-}
-
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
- u8 tcp_flags;
+ u16 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
};
static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
+static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
+
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
- p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+ p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0;
u32 dval;
u16 wval;
- u8 i;
u16 *p;
+ u8 i;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp->flags = p_conn->tcp_flags;
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp2->flags = p_conn->tcp_flags;
+ p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+ p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
}
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
}
}
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
{
if (!p_conn->queue_cnts_virt_addr)
goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc)
- rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+ rc = qed_iscsi_setup_connection(p_conn);
if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var;
- con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
- con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 409041eab189..ca4a81dc1ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
#define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1)
+#define QED_IWARP_DEF_MAX_RT_TIME (0)
+#define QED_IWARP_DEF_CWND_FACTOR (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
+
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo,
union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_ramrod_data *p_ramrod)
{
- p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->iwarp.ll2_ooo_q_index =
+ RESC_START(p_hwfn, QED_LL2_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+ p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+ return;
}
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
tcp->ttl = 0x40;
tcp->tos_or_tc = 0;
+ tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+ tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
+ tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+ tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+ tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode;
@@ -807,6 +826,7 @@ static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+ struct qed_iwarp_info *iwarp_info;
struct qed_sp_init_data init_data;
dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
p_mpa_ramrod->common.reject = 1;
}
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+ iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index c1ecd743305f..b8f612d00241 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
+ u16 rcv_wnd_size;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod);
+ struct iwarp_init_func_ramrod_data *p_ramrod);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 085338990f49..893ef08a4b39 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
- p_cid = vmalloc(sizeof(*p_cid));
+ p_cid = vzalloc(sizeof(*p_cid));
if (!p_cid)
return NULL;
- memset(p_cid, 0, sizeof(*p_cid));
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
{
- if (p_cfg_params->arfs_enable) {
- qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_cfg_params->tcp, p_cfg_params->udp,
- p_cfg_params->ipv4, p_cfg_params->ipv6);
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_PORT;
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+ qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ qed_arfs_mode_to_hsi(p_cfg_params->mode));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable",
- p_cfg_params->ipv6 ? "Enable" : "Disable");
+ p_cfg_params->ipv6 ? "Enable" : "Disable",
+ (u32)p_cfg_params->mode);
} else {
- qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+ qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
}
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length, u16 qid,
- u8 vport_id, bool b_is_add)
+ struct qed_ntuple_filter_params *p_params)
{
struct rx_update_gft_filter_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 abs_vport_id = 0;
int rc = -EINVAL;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- if (rc)
- return rc;
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
p_ramrod = &p_ent->ramrod.rx_update_gft;
- DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
- p_ramrod->pkt_hdr_length = cpu_to_le16(length);
- p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->filter_type = RFS_FILTER_TYPE;
- p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ }
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
+ p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+ p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id,
- b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+ p_params->b_is_add ? "Adding" : "Removing",
+ (u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+ enum qed_filter_config_mode mode)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
arfs_config_params.udp = true;
arfs_config_params.ipv4 = true;
arfs_config_params.ipv6 = true;
- arfs_config_params.arfs_enable = en_searcher;
-
+ arfs_config_params.mode = mode;
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&arfs_config_params);
return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
static void
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
- void *cookie, union event_ring_data *data,
- u8 fw_return_code)
+ void *cookie,
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
op->arfs_filter_op(dev, cookie, fw_return_code);
}
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
cb.function = qed_arfs_sp_response_handler;
cb.cookie = cookie;
- rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
- &cb, mapping, length, rx_queue_id,
- vport_id, add_filter);
+ if (params->b_is_vf) {
+ if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+ false)) {
+ DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+ params->vf_id);
+ return rc;
+ }
+
+ params->vport_id = params->vf_id + 1;
+ params->qid = QED_RFS_NTUPLE_QID_RSS;
+ }
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
if (rc)
DP_NOTICE(p_hwfn,
"Failed to issue a-RFS filter configuration\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index cc1f248551c9..c4030e949cce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable;
+ enum qed_filter_config_mode mode;
};
struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_cb,
+ struct qed_ntuple_filter_params *p_params);
+
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 047f556ca62e..c4f14fdc4e77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+ data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+ data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
}
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+ p_ramrod->inner_vlan_stripping_en =
+ p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
- p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
- CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+ switch (data->input.tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue)
p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate;
}
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params);
if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address);
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b99c7d26f34..6f46cb11f349 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset +
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
(u32 *)(p_buf + offset));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b3940564..5d040b873137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info);
}
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+ qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
static void qed_rdma_free(struct qed_hwfn *p_hwfn)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+ qed_rdma_free_reserved_lkey(p_hwfn);
qed_rdma_resc_free(p_hwfn);
}
@@ -553,7 +570,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
qed_iwarp_init_fw_ramrod(p_hwfn,
- &p_ent->ramrod.iwarp_init_func.iwarp);
+ &p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
/* Tid 0 will be used as the key for "reserved MR".
* The driver should allocate memory for it so it can be loaded but no
* ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 0cdb4337b3a0..f7122059b6b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -124,6 +124,8 @@
0x1f0434UL
#define PRS_REG_SEARCH_TAG1 \
0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+ 0x1f044cUL
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
0x1f0a0cUL
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
@@ -200,7 +202,13 @@
0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
-#define CDU_REG_CID_ADDR_PARAMS \
+#define CDU_REG_CCFC_CTX_VALID0 \
+ 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+ 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+ 0x580408UL
+#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
@@ -564,7 +572,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -580,11 +588,11 @@
#define PRS_REG_NGE_PORT 0x1f086cUL
#define NIG_REG_NGE_PORT 0x508b38UL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -595,15 +603,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -615,7 +623,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2 \
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -647,15 +655,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -717,15 +725,15 @@
0x1f0ba0UL
#define PRS_REG_DBG_FORCE_FRAME \
0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
0x218264UL
#define PRM_REG_DBG_SELECT \
0x2306a8UL
@@ -997,35 +1005,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1247,36 +1255,76 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+ 0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+ 0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+ 0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+ 0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5 \
+ 0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+ 0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5 \
+ 0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+ 0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x322050UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
#define MISC_REG_RESET_PL_HV \
@@ -1415,7 +1463,7 @@
0x1940000UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
@@ -1433,6 +1481,8 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+ 64
#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
@@ -1451,7 +1501,7 @@
0x238c30UL
#define MISCS_REG_BLOCK_256B_EN \
0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
57344
#define MCP_REG_CPU_REG_FILE \
0xe05200UL
@@ -1485,35 +1535,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a1d33f35aad3..5e927b6cac22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ cpu_to_le16(p_hwfn->hw_info.ovlan);
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index, p_ramrod->outer_tag);
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9abd001..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -97,9 +97,7 @@ static int __qed_spq_block(struct qed_hwfn *p_hwfn,
while (iter_cnt--) {
/* Validate we receive completion update */
- if (READ_ONCE(comp_done->done) == 1) {
- /* Read updated FW return value */
- smp_read_barrier_depends();
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
@@ -215,7 +213,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct core_conn_context *p_cxt;
+ struct e4_core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -233,11 +231,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
@@ -776,6 +774,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
+ bool eblock;
if (!p_hwfn)
return -EINVAL;
@@ -794,6 +793,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (rc)
goto spq_post_fail;
+ /* Check if entry is in block mode before qed_spq_add_entry,
+ * which might kfree p_ent.
+ */
+ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
@@ -811,7 +815,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_spq->lock);
- if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ if (eblock) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3f40b1de7957..5acb91b3564c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id,
- bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
+ struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 3955929ba892..9a8fd79611f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
#ifdef CONFIG_QED_SRIOV
/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+ return false;
+}
+
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index a3a70ade411f..9935978c5542 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 21
+#define QEDE_MINOR_VERSION 33
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -345,6 +346,7 @@ struct qede_rx_queue {
u64 xdp_no_pass;
void *handle;
+ struct xdp_rxq_info xdp_rxq;
};
union db_prod {
@@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
int qede_configure_vlan_filters(struct qede_dev *edev);
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features);
int qede_set_features(struct net_device *dev, netdev_features_t features);
void qede_set_rx_mode(struct net_device *ndev);
void qede_config_rx_mode(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index dae741270022..4ca3847fffd4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
+ if (new_mtu > PAGE_SIZE)
+ ndev->features &= ~NETIF_F_GRO_HW;
+
/* Set the mtu field and re-start the interface if needed */
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c1a0708a7d7c..6687e04d1558 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
u16 rxq_id, bool add_fltr)
{
const struct qed_eth_ops *op = edev->ops;
+ struct qed_ntuple_filter_params params;
if (n->used)
return;
+ memset(&params, 0, sizeof(params));
+
+ params.addr = n->mapping;
+ params.length = n->buf_len;
+ params.qid = rxq_id;
+ params.b_is_add = add_fltr;
+
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
"%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->used = true;
n->filter_op = add_fltr;
- op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
- rxq_id, add_fltr);
+ op->ntuple_filter_config(edev->cdev, n, &params);
}
static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count++;
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
- edev->ops->configure_arfs_searcher(edev->cdev, true);
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
edev->arfs->enable = true;
}
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count--;
if (!edev->arfs->filter_count && edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
}
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
if (!edev->arfs->filter_count) {
if (edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
#ifdef CONFIG_RFS_ACCEL
} else {
@@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
edev->ndev->features = args->u.features;
}
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+ !(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ return features;
+}
+
int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
bool need_reload = false;
- /* No action needed if hardware GRO is disabled during driver load */
- if (changes & NETIF_F_GRO) {
- if (dev->features & NETIF_F_GRO)
- need_reload = !edev->gro_disable;
- else
- need_reload = edev->gro_disable;
- }
+ if (changes & NETIF_F_GRO_HW)
+ need_reload = true;
if (need_reload) {
struct qede_reload_args args;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c56cddf..dafc079ab6b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data = xdp.data_hard_start + *data_offset;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
+ xdp.rxq = &rxq->xdp_rxq;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8f9b3eb82137..2db70eabddfe 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->priv_flags |= IFF_UNICAST_FLT;
/* user-changeble features */
- hw_features = NETIF_F_GRO | NETIF_F_SG |
+ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
@@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
fp = &edev->fp_array[i];
kfree(fp->sb_info);
+ /* Handle mem alloc failure case where qede_init_fp
+ * didn't register xdp_rxq_info yet.
+ * Implicit only (fp->type & QEDE_FASTPATH_RX)
+ */
+ if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
kfree(fp->rxq);
kfree(fp->xdp_tx);
kfree(fp->txq);
@@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
pci_set_drvdata(pdev, NULL);
- /* Release edev's reference to XDP's bpf if such exist */
- if (edev->xdp_prog)
- bpf_prog_put(edev->xdp_prog);
-
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
@@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
dma_addr_t mapping;
int i;
- /* Don't perform FW aggregations in case of XDP */
- if (edev->xdp_prog)
- edev->gro_disable = 1;
-
if (edev->gro_disable)
return 0;
- if (edev->ndev->mtu > PAGE_SIZE) {
- edev->gro_disable = 1;
- return 0;
- }
-
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
err:
qede_free_sge_mem(edev, rxq);
edev->gro_disable = 1;
+ edev->ndev->features &= ~NETIF_F_GRO_HW;
return -ENOMEM;
}
@@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
else
fp->rxq->data_direction = DMA_FROM_DEVICE;
fp->rxq->dev = &edev->pdev->dev;
+
+ /* Driver have no error path from here */
+ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+ fp->rxq->rxq_id) < 0);
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev)
edev->ndev->name, queue_id);
}
- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f7080d0ab874..46b0372dd032 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
struct list_head *head = &mbx->cmd_q;
struct qlcnic_cmd_args *cmd = NULL;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
while (!list_empty(head)) {
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
}
static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_del(&cmd->list);
mbx->num_cmds--;
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
init_completion(&cmd->completion);
cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_add_tail(&cmd->list, &mbx->cmd_q);
mbx->num_cmds++;
@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
queue_work(mbx->work_q, &mbx->work);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return 0;
}
@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
spin_unlock_irqrestore(&mbx->aen_lock, flags);
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
if (list_empty(head)) {
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return;
}
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
mbx_ops->encode_cmd(adapter, cmd);
mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index 5028fb4bec2b..4beedb8faa1e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -114,8 +114,9 @@ struct emac_tpd {
#define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val)
/* High-14bit Buffer Address, So, the 64b-bit address is
* {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping.
*/
-#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val)
+#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 31, val)
/* Format D. Word offset from the 1st byte of this packet to start to calculate
* the custom checksum.
*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 38c924bdd32e..13235baf4766 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -615,8 +615,11 @@ static int emac_probe(struct platform_device *pdev)
u32 reg;
int ret;
- /* The TPD buffer address is limited to 45 bits. */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45));
+ /* The TPD buffer address is limited to:
+ * 1. PTP: 45bits. (Driver doesn't support yet.)
+ * 2. NON-PTP: 46bits.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
if (ret) {
dev_err(&pdev->dev, "could not set DMA mask\n");
return ret;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index df21e900f874..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
- RMNET_INGRESS_FORMAT_DEAGGREGATION |
- RMNET_INGRESS_FORMAT_MAP;
- int egress_format = RMNET_EGRESS_FORMAT_MUXING |
- RMNET_EGRESS_FORMAT_MAP;
+ u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err2;
- netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
- ingress_format, egress_format);
- port->egress_data_format = egress_format;
- port->ingress_data_format = ingress_format;
port->rmnet_mode = mode;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ data_format = flags->flags & flags->mask;
+ }
+
+ netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+ port->data_format = data_format;
+
return 0;
err2:
@@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ u16 mux_id;
+
+ real_dev = __dev_get_by_index(dev_net(dev),
+ nla_get_u32(tb[IFLA_LINK]));
+
+ if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ return -ENODEV;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ if (data[IFLA_VLAN_ID]) {
+ mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ port->data_format = flags->flags & flags->mask;
+ }
+
+ return 0;
+}
+
static size_t rmnet_get_size(const struct net_device *dev)
{
- return nla_total_size(2); /* IFLA_VLAN_ID */
+ return nla_total_size(2) /* IFLA_VLAN_ID */ +
+ nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
}
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
@@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.newlink = rmnet_newlink,
.dellink = rmnet_dellink,
.get_size = rmnet_get_size,
+ .changelink = rmnet_changelink,
};
/* Needs either rcu_read_lock() or rtnl lock */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index c19259eea99e..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,8 +32,7 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
- u32 ingress_data_format;
- u32 egress_data_format;
+ u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 08e4afc0ab39..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
@@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
+ u16 len, pad;
u8 mux_id;
- u16 len;
if (RMNET_MAP_GET_CD_BIT(skb)) {
- if (port->ingress_data_format
- & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+ pad = RMNET_MAP_GET_PAD(skb);
+ len = RMNET_MAP_GET_LENGTH(skb) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
- skb_trim(skb, len);
rmnet_set_skb_proto(skb);
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
{
struct sk_buff *skbn;
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
- while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+ if (skb->dev->type == ARPHRD_ETHER) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, ETH_HLEN);
+ }
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
@@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+ additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+ required_headroom += additional_header_len;
+ }
+
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
goto fail;
}
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
goto fail;
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
- if (mux_id == 0xff)
- map_header->mux_id = 0;
- else
- map_header->mux_id = mux_id;
- }
+ map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
- return RMNET_MAP_SUCCESS;
+ return 0;
fail:
kfree_skb(skb);
- return RMNET_MAP_CONSUMED;
+ return -ENOMEM;
}
static void
@@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
- rmnet_map_ingress_handler(skb, port);
+ rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
@@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_priv *priv;
u8 mux_id;
+ sk_pacing_shift_update(skb->sk, 8);
+
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
@@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
return;
}
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
- switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
- case RMNET_MAP_CONSUMED:
- return;
-
- case RMNET_MAP_SUCCESS:
- break;
-
- default:
- kfree_skb(skb);
- return;
- }
- }
+ if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+ return;
rmnet_vnd_tx_fixup(skb, orig_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 3af3fe7b5457..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -30,15 +30,6 @@ struct rmnet_map_control_command {
};
} __aligned(1);
-enum rmnet_map_results {
- RMNET_MAP_SUCCESS,
- RMNET_MAP_CONSUMED,
- RMNET_MAP_GENERAL_FAILURE,
- RMNET_MAP_NOT_ENABLED,
- RMNET_MAP_FAILED_AGGREGATION,
- RMNET_MAP_FAILED_MUX
-};
-
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -56,6 +47,22 @@ struct rmnet_map_header {
u16 pkt_len;
} __aligned(1);
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 valid:1;
+ u8 reserved2:7;
+ u16 csum_start_offset;
+ u16 csum_length;
+ __be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ u16 csum_insert_offset:14;
+ u16 udp_ip4_ind:1;
+ u16 csum_enabled:1;
+} __aligned(1);
+
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -76,10 +83,13 @@ struct rmnet_map_header {
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
static void rmnet_map_send_ack(struct sk_buff *skb,
- unsigned char type)
+ unsigned char type,
+ struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
int xmit_status;
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (skb->len < sizeof(struct rmnet_map_header) +
+ RMNET_MAP_GET_LENGTH(skb) +
+ sizeof(struct rmnet_map_dl_csum_trailer)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_trim(skb, skb->len -
+ sizeof(struct rmnet_map_dl_csum_trailer));
+ }
+
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
- rmnet_map_send_ack(skb, rc);
+ rmnet_map_send_ack(skb, rc, port);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
*/
#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -21,6 +24,233 @@
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ __sum16 *check = NULL;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ check = &(((struct tcphdr *)txporthdr)->check);
+ break;
+
+ case IPPROTO_UDP:
+ check = &(((struct udphdr *)txporthdr)->check);
+ break;
+
+ default:
+ check = NULL;
+ break;
+ }
+
+ return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+ u16 csum_value, csum_value_final;
+ struct iphdr *ip4h;
+ void *txporthdr;
+ __be16 addend;
+
+ ip4h = (struct iphdr *)(skb->data);
+ if ((ntohs(ip4h->frag_off) & IP_MF) ||
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ return -EOPNOTSUPP;
+
+ txporthdr = skb->data + ip4h->ihl * 4;
+
+ csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ return 0;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+ ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+ (__force __be16)hdr_csum);
+
+ pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip4h->protocol) {
+ case IPPROTO_UDP:
+ /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL4 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+ u16 csum_value, csum_value_final;
+ __be16 ip6_hdr_csum, addend;
+ struct ipv6hdr *ip6h;
+ void *txporthdr;
+ u32 length;
+
+ ip6h = (struct ipv6hdr *)(skb->data);
+
+ txporthdr = skb->data + sizeof(struct ipv6hdr);
+ csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ ip6_hdr_csum = (__force __be16)
+ ~ntohs((__force __be16)ip_compute_csum(ip6h,
+ (int)(txporthdr - (void *)(skb->data))));
+ ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+ ip6_hdr_csum);
+
+ length = (ip6h->nexthdr == IPPROTO_UDP) ?
+ ntohs(((struct udphdr *)txporthdr)->len) :
+ ntohs(ip6h->payload_len);
+ pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ length, ip6h->nexthdr, 0));
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip6h->nexthdr) {
+ case IPPROTO_UDP:
+ /* RFC 2460 section 8.1
+ * DL6 One's complement rule for UDP checksum 0
+ */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL6 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = iphdr + ip4h->ihl * 4;
+
+ if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)iphdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+ ul_header->udp_ip4_ind = 1;
+ else
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+ if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)ip6hdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
u32 padding, map_datalen;
u8 *padbytes;
- if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
- return NULL;
-
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+ packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
return skbn;
}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+ struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return -EOPNOTSUPP;
+
+ csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+ if (!csum_trailer->valid)
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+ return -EPROTONOSUPPORT;
+#endif
+
+ return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
+{
+ struct rmnet_map_ul_csum_header *ul_header;
+ void *iphdr;
+
+ ul_header = (struct rmnet_map_ul_csum_header *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+ goto sw_csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ return;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ return;
+#else
+ goto sw_csum;
+#endif
+ }
+ }
+
+sw_csum:
+ ul_header->csum_start_offset = 0;
+ ul_header->csum_insert_offset = 0;
+ ul_header->csum_enabled = 0;
+ ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 49102f922b31..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,14 +19,10 @@
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
-#define RMNET_EGRESS_FORMAT_MAP BIT(1)
-#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2)
-#define RMNET_EGRESS_FORMAT_MUXING BIT(3)
-
-#define RMNET_INGRESS_FORMAT_MAP BIT(1)
-#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2)
-#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3)
-#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9caa5e387450..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
if (ep->egress_dev)
return -EINVAL;
+ if (rmnet_get_endpoint(port, id))
+ return -EBUSY;
+
+ rmnet_dev->hw_features = NETIF_F_RXCSUM;
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e87de2..81045dfa1cd8 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss > MSSMask) {
- WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
- mss);
+ netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
goto out_dma_error;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa65ad4..0bf7d1759250 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R8(IBISR0) & 0x02;
+ return RTL_R8(IBISR0) & 0x20;
}
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
}
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void __rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
{
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
- if (pm)
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
if (net_ratelimit())
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- if (pm)
- pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+ pm_runtime_idle(&tp->pci_dev->dev);
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
-{
- __rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -2244,19 +2235,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
- bool ret;
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ RTL_R32(CounterAddrHigh);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd);
- ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
-
- return ret;
+ return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
static bool rtl8169_reset_counters(struct net_device *dev)
@@ -4643,16 +4629,6 @@ static void rtl8169_phy_timer(struct timer_list *t)
rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
-static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
- void __iomem *ioaddr)
-{
- iounmap(ioaddr);
- pci_release_regions(pdev);
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
- free_netdev(dev);
-}
-
DECLARE_RTL_COND(rtl_phy_reset_cond)
{
return tp->phy_reset_pending(tp);
@@ -4784,14 +4760,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
return -EOPNOTSUPP;
}
-static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
-{
- if (tp->features & RTL_FEATURE_MSI) {
- pci_disable_msi(pdev);
- tp->features &= ~RTL_FEATURE_MSI;
- }
-}
-
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -7764,7 +7732,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
rtl_irq_enable_all(tp);
}
@@ -7977,7 +7945,7 @@ static int rtl_open(struct net_device *dev)
rtl_unlock_work(tp);
tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
rtl8169_check_link_status(dev, tp, ioaddr);
out:
@@ -8121,8 +8089,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
+ if (!tp->TxDescArray) {
+ rtl_pll_power_down(tp);
return 0;
+ }
rtl_lock_work(tp);
tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8164,9 +8134,11 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- return tp->TxDescArray ? -EBUSY : 0;
+ if (!netif_running(dev) || !netif_carrier_ok(dev))
+ pm_schedule_suspend(device, 10000);
+
+ return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8213,9 +8185,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = &pdev->dev;
-
- pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -8233,8 +8202,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
-
- pm_runtime_put_noidle(d);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -8256,9 +8223,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
- tp->counters, tp->counters_phys_addr);
-
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@@ -8266,9 +8230,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
-
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
}
static const struct net_device_ops rtl_netdev_ops = {
@@ -8445,11 +8406,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
MODULENAME, RTL8169_VERSION);
}
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- rc = -ENOMEM;
- goto out;
- }
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+ if (!dev)
+ return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &rtl_netdev_ops;
@@ -8472,13 +8431,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
PCIE_LINK_STATE_CLKPM);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc < 0) {
netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
+ return rc;
}
- if (pci_set_mwi(pdev) < 0)
+ if (pcim_set_mwi(pdev) < 0)
netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
/* make sure PCI base addr 1 is MMIO */
@@ -8486,30 +8445,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_err(tp, probe, dev,
"region #%d not an MMIO resource, aborting\n",
region);
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
netif_err(tp, probe, dev,
"Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
+ return rc;
}
/* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
+ R8169_REGS_SIZE);
if (!ioaddr) {
netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
+ return -EIO;
}
tp->mmio_addr = ioaddr;
@@ -8535,7 +8492,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_unmap_4;
+ return rc;
}
}
@@ -8697,16 +8654,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
- tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
- &tp->counters_phys_addr, GFP_KERNEL);
- if (!tp->counters) {
- rc = -ENOMEM;
- goto err_out_msi_5;
- }
+ tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr,
+ GFP_KERNEL);
+ if (!tp->counters)
+ return -ENOMEM;
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_6;
+ return rc;
pci_set_drvdata(pdev, dev);
@@ -8730,30 +8686,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
netif_carrier_off(dev);
-out:
- return rc;
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_sync(&pdev->dev);
-err_out_cnt_6:
- dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
- tp->counters_phys_addr);
-err_out_msi_5:
- netif_napi_del(&tp->napi);
- rtl_disable_msi(pdev, tp);
-err_out_unmap_4:
- iounmap(ioaddr);
-err_out_free_res_3:
- pci_release_regions(pdev);
-err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
-err_out_free_dev_1:
- free_netdev(dev);
- goto out;
+ return 0;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 009780df664b..c87f57ca4437 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2205,8 +2205,7 @@ out_dma_free:
if (chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
out_release:
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 75323000c364..a197e11f3a56 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
- add_reg(ARSTR);
if (cd->tsu) {
+ add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);
@@ -3125,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
struct sh_eth_private *mdp;
struct net_device *ndev;
- int ret, devno;
+ int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -3137,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- devno = pdev->id;
- if (devno < 0)
- devno = 0;
-
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
@@ -3222,25 +3218,43 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- /* ioremap the TSU registers */
if (mdp->cd->tsu) {
+ int port = pdev->id < 0 ? 0 : pdev->id % 2;
struct resource *rtsu;
+
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
- if (IS_ERR(mdp->tsu_addr)) {
- ret = PTR_ERR(mdp->tsu_addr);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "no TSU resource\n");
+ ret = -ENODEV;
+ goto out_release;
+ }
+ /* We can only request the TSU region for the first port
+ * of the two sharing this TSU for the probe to succeed...
+ */
+ if (port == 0 &&
+ !devm_request_mem_region(&pdev->dev, rtsu->start,
+ resource_size(rtsu),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "can't request TSU resource.\n");
+ ret = -EBUSY;
+ goto out_release;
+ }
+ /* ioremap the TSU registers */
+ mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+ resource_size(rtsu));
+ if (!mdp->tsu_addr) {
+ dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+ ret = -ENOMEM;
goto out_release;
}
- mdp->port = devno % 2;
+ mdp->port = port;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- /* initialize first or needed device */
- if (!devno || pd->needs_init) {
- if (mdp->cd->chip_reset)
- mdp->cd->chip_reset(ndev);
+ /* Need to init only the first port of the two sharing a TSU */
+ if (port == 0) {
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
- if (mdp->cd->tsu) {
/* TSU init (Init only)*/
sh_eth_tsu_init(mdp);
}
@@ -3282,8 +3296,7 @@ out_napi_del:
out_release:
/* net_dev free */
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6d6fb8cf3e7c..6473cc68c2d5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -789,7 +789,6 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
ofdpa_flags_nowait(flags),
ofdpa_cmd_flow_tbl_add,
found, NULL, NULL);
- return 0;
}
static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e566dbb3343d..75fbf58e421c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
}
+/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
+ * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
+ * bar; PFs use BAR 0/1 for memory.
+ */
+static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
+{
+ switch (efx->pci_dev->device) {
+ case 0x0b03: /* SFC9250 PF */
+ return 0;
+ default:
+ return 2;
+ }
+}
+
+/* All VFs use BAR 0/1 for memory */
+static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
+{
+ return 0;
+}
+
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
int bar;
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
return resource_size(&efx->pci_dev->resource[bar]);
}
@@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
@@ -257,9 +277,70 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
return -ENODEV;
}
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ u8 vi_window_mode = MCDI_BYTE(outbuf,
+ GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ } else {
+ /* keep default VI stride */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report VI window mode, assuming vi_stride = %u\n",
+ efx->vi_stride);
+ }
+
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ efx->num_mac_stats = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware reports num_mac_stats = %u\n",
+ efx->num_mac_stats);
+ } else {
+ /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report num_mac_stats, assuming %u\n",
+ efx->num_mac_stats);
+ }
+
return 0;
}
+static void efx_ef10_read_licensed_features(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
+ return;
+
+ nic_data->licensed_features = MCDI_QWORD(outbuf,
+ LICENSING_V3_OUT_LICENSED_FEATURES);
+}
+
static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
@@ -589,17 +670,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However, until we
- * use TX option descriptors we need two TX queues per channel.
- */
- efx->max_channels = min_t(unsigned int,
- EFX_MAX_CHANNELS,
- efx_ef10_mem_map_size(efx) /
- (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- efx->max_tx_channels = efx->max_channels;
- if (WARN_ON(efx->max_channels == 0))
- return -EIO;
-
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
@@ -671,6 +741,22 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc < 0)
goto fail5;
+ efx_ef10_read_licensed_features(efx);
+
+ /* We can have one VI for each vi_stride-byte region.
+ * However, until we use TX option descriptors we need two TX queues
+ * per channel.
+ */
+ efx->max_channels = min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ efx_ef10_mem_map_size(efx) /
+ (efx->vi_stride * EFX_TXQ_TYPES));
+ efx->max_tx_channels = efx->max_channels;
+ if (WARN_ON(efx->max_channels == 0)) {
+ rc = -EIO;
+ goto fail5;
+ }
+
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
@@ -695,7 +781,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc && rc != -EPERM)
goto fail5;
- efx_ptp_probe(efx, NULL);
+ efx_ptp_defer_probe_with_channel(efx);
#ifdef CONFIG_SFC_SRIOV
if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
@@ -865,6 +951,11 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Link a buffer to each TX queue */
efx_for_each_channel(channel, efx) {
+ /* Extra channels, even those with TXQs (PTP), do not require
+ * PIO resources.
+ */
+ if (!channel->type->want_pio)
+ continue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -907,7 +998,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
} else {
tx_queue->piobuf =
nic_data->pio_write_base +
- index * EFX_VI_PAGE_SIZE + offset;
+ index * efx->vi_stride + offset;
tx_queue->piobuf_offset = offset;
netif_dbg(efx, probe, efx->net_dev,
"linked VI %u to PIO buffer %u offset %x addr %p\n",
@@ -1212,7 +1303,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
void __iomem *membase;
int rc;
- channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ channel_vis = max(efx->n_channels,
+ (efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -1253,19 +1346,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* for writing PIO buffers through.
*
* The UC mapping contains (channel_vis - 1) complete VIs and the
- * first half of the next VI. Then the WC mapping begins with
- * the second half of this last VI.
+ * first 4K of the next VI. Then the WC mapping begins with
+ * the remainder of this last VI.
*/
- uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
+ uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
/* pio_write_vi_base rounds down to give the number of complete
* VIs inside the UC mapping.
*/
- pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
- EFX_VI_PAGE_SIZE) -
+ efx->vi_stride) -
uc_mem_map_size);
max_vis = pio_write_vi_base + nic_data->n_piobufs;
} else {
@@ -1337,7 +1430,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
nic_data->pio_write_vi_base = pio_write_vi_base;
nic_data->pio_write_base =
nic_data->wc_membase +
- (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
uc_mem_map_size);
rc = efx_ef10_link_piobufs(efx);
@@ -1571,6 +1664,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+ EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+ EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+ EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+ EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+ EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+ EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+ EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+ EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+ EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+ EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+ EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+ EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+ EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+ EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+ EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+ EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+ EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+ EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+ EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
@@ -1646,6 +1762,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK ( \
+ (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK ( \
+ (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_erase - 64)))
+
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1684,10 +1837,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
- raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
} else {
raw_mask[1] = 0;
}
+ /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+ raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+ /* CTPIO stats appear in V3. Only show them on devices that actually
+ * support CTPIO. Although this driver doesn't use CTPIO others might,
+ * and we may be reporting the stats for the underlying port.
+ */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+ raw_mask[1] |= EF10_CTPIO_STAT_MASK;
#if BITS_PER_LONG == 64
BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1791,7 +1956,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -1839,7 +2004,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
- u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ u32 dma_len = efx->num_mac_stats * sizeof(u64);
struct efx_buffer stats_buf;
__le64 *dma_stats;
int rc;
@@ -1864,7 +2029,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
}
dma_stats = stats_buf.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1883,7 +2048,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
goto out;
}
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
WARN_ON_ONCE(1);
goto out;
@@ -1951,8 +2116,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
} else {
unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
- EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
channel->channel);
}
@@ -2263,12 +2429,25 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+ /* Only attempt to enable TX timestamping if we have the license for it,
+ * otherwise TXQ init will fail
+ */
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
+ tx_queue->timestamping = false;
+ /* Disable sync events on this channel. */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, false);
+ }
+
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
+ * TSOv2 cannot be used with Hardware timestamping.
*/
if (csum_offload && (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
+ !tx_queue->timestamping) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
@@ -2294,14 +2473,16 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
- MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
@@ -2327,12 +2508,13 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
tx_queue->insert_count = 1;
txd = efx_tx_desc(tx_queue, 0);
- EFX_POPULATE_QWORD_4(*txd,
+ EFX_POPULATE_QWORD_5(*txd,
ESF_DZ_TX_DESC_IS_OPT, true,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
- ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+ ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
tx_queue->write_count = 1;
if (tso_v2) {
@@ -3233,8 +3415,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCPUDP_CKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3271,8 +3453,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
EFX_QWORD_VAL(*event));
else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3307,7 +3489,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
- rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
rx_encap_hdr =
nic_data->datapath_caps &
@@ -3385,8 +3567,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l3_class, rx_l4_class,
event);
} else {
- bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
- rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+ bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_FZ_L4_CLASS_UDP;
switch (rx_encap_hdr) {
case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
@@ -3407,7 +3589,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
}
}
- if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
flags |= EFX_RX_PKT_TCP;
channel->irq_mod_score += 2 * n_packets;
@@ -3427,31 +3609,92 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
return n_packets;
}
-static int
+static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
+{
+ u32 tstamp;
+
+ tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
+ tstamp <<= 16;
+ tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
+
+ return tstamp;
+}
+
+static void
efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue;
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
- int tx_descs = 0;
+ unsigned int tx_ev_type;
+ u64 ts_part;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
- return 0;
+ return;
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ /* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
tx_ev_q_label % EFX_TXQ_TYPES);
- tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
- return tx_descs;
+ if (!tx_queue->timestamping) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+ return;
+ }
+
+ /* Transmit timestamps are only available for 8XXX series. They result
+ * in three events per packet. These occur in order, and are:
+ * - the normal completion event
+ * - the low part of the timestamp
+ * - the high part of the timestamp
+ *
+ * Each part of the timestamp is itself split across two 16 bit
+ * fields in the event.
+ */
+ tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
+
+ switch (tx_ev_type) {
+ case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
+ /* In case of Queue flush or FLR, we might have received
+ * the previous TX completion event but not the Timestamp
+ * events.
+ */
+ if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
+ ESF_DZ_TX_DESCR_INDX);
+ tx_queue->completed_desc_ptr =
+ tx_ev_desc_ptr & tx_queue->ptr_mask;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_minor = ts_part;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_major = ts_part;
+
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ break;
+
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown tx event type %d (data "
+ EFX_QWORD_FMT ")\n",
+ channel->channel, tx_ev_type,
+ EFX_QWORD_VAL(*event));
+ break;
+ }
}
static void
@@ -3513,7 +3756,6 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
efx_qword_t event, *p_event;
unsigned int read_ptr;
int ev_code;
- int tx_descs = 0;
int spent = 0;
if (quota <= 0)
@@ -3553,13 +3795,7 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
}
break;
case ESE_DZ_EV_CODE_TX_EV:
- tx_descs += efx_ef10_handle_tx_event(channel, &event);
- if (tx_descs > efx->txq_entries) {
- spent = quota;
- goto out;
- } else if (++spent == quota) {
- goto out;
- }
+ efx_ef10_handle_tx_event(channel, &event);
break;
case ESE_DZ_EV_CODE_DRIVER_EV:
efx_ef10_handle_driver_event(channel, &event);
@@ -6034,7 +6270,8 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
efx_ef10_rx_enable_timestamping :
efx_ef10_rx_disable_timestamping;
- efx_for_each_channel(channel, efx) {
+ channel = efx_ptp_channel(efx);
+ if (channel) {
int rc = set(channel, temp);
if (en && rc != 0) {
efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
@@ -6392,7 +6629,7 @@ out_unlock:
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
- .mem_bar = EFX_MEM_VF_BAR,
+ .mem_bar = efx_ef10_vf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_vf,
.remove = efx_ef10_remove,
@@ -6500,7 +6737,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
const struct efx_nic_type efx_hunt_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = efx_ef10_pf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 2c4bf9476c37..6a56778cf06c 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
- * Copyright 2012-2015 Solarflare Communications Inc.
+ * Copyright 2012-2017 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -79,6 +79,8 @@
#define ER_DZ_EVQ_TMR 0x00000420
#define ER_DZ_EVQ_TMR_STEP 8192
#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
#define ERF_DZ_TC_TIMER_MODE_LBN 14
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
#define ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -159,16 +161,24 @@
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define ESF_DZ_RX_L4_CLASS_LBN 45
-#define ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define ESE_DZ_L4_CLASS_RSVD7 7
-#define ESE_DZ_L4_CLASS_RSVD6 6
-#define ESE_DZ_L4_CLASS_RSVD5 5
-#define ESE_DZ_L4_CLASS_RSVD4 4
-#define ESE_DZ_L4_CLASS_RSVD3 3
-#define ESE_DZ_L4_CLASS_UDP 2
-#define ESE_DZ_L4_CLASS_TCP 1
-#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
#define ESF_DZ_RX_L3_CLASS_LBN 42
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
#define ESE_DZ_L3_CLASS_RSVD7 7
@@ -215,6 +225,8 @@
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
#define ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -332,6 +344,8 @@
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -341,7 +355,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2A_DESC */
+/* TX_TSO_V2_DESC_A */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -360,8 +374,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-
-/* TX_TSO_FATSO2B_DESC */
+/* TX_TSO_V2_DESC_B */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -375,11 +388,10 @@
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
/*************************************************************************/
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e3c492fcaff0..16757cfc5b29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -27,6 +27,7 @@
#include <net/udp_tunnel.h>
#include "efx.h"
#include "nic.h"
+#include "io.h"
#include "selftest.h"
#include "sriov.h"
@@ -895,12 +896,20 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
.post_remove = efx_channel_dummy_op_void,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
.keep_eventq = false,
+ .want_pio = true,
};
int efx_channel_dummy_op_int(struct efx_channel *channel)
@@ -952,31 +961,42 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
{
- efx->link_advertising = advertising;
- if (advertising) {
- if (advertising & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
- }
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
- if (efx->link_advertising) {
+ if (efx->link_advertising[0]) {
if (wanted_fc & EFX_FC_RX)
- efx->link_advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
else
- efx->link_advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
- efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
}
}
@@ -1248,7 +1268,7 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
rc = pci_enable_device(pci_dev);
if (rc) {
@@ -1323,7 +1343,7 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
@@ -1489,6 +1509,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* Assign extra channels if possible */
+ efx->n_extra_tx_channels = 0;
j = efx->n_channels;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
@@ -1500,6 +1521,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
--j;
efx_get_channel(efx, j)->type =
efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
}
}
@@ -2909,6 +2932,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{0} /* end of list */
};
@@ -2977,6 +3004,9 @@ static int efx_init_struct(struct efx_nic *efx,
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 52c84b782901..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,11 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
-/* All VFs use BAR 0/1 for memory */
-#define EFX_MEM_BAR 2
-#define EFX_MEM_VF_BAR 0
-
int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
@@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
}
void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
efx->type->prepare_enable_fc_tx(efx);
- old_adv = efx->link_advertising;
+ old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
+ if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5334dc83d926..266b9bee1f3a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -818,17 +818,16 @@ static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static int
+static void
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
@@ -836,8 +835,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -856,8 +853,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
-
- return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -1090,7 +1085,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
@@ -1270,7 +1265,6 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int tx_packets = 0;
int spent = 0;
if (budget <= 0)
@@ -1304,12 +1298,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_farch_handle_tx_event(channel,
- &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
+ efx_farch_handle_tx_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_farch_handle_generated_event(channel, &event);
@@ -1680,20 +1669,21 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
*/
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count, buftbl_min, total_tx_channels;
#ifdef CONFIG_SFC_SRIOV
struct siena_nic_data *nic_data = efx->nic_data;
#endif
+ total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index afb94aa2c15e..89563170af52 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page size used as step between per-VI registers */
-#define EFX_VI_PAGE_SIZE 0x2000
+/* default VI stride (step between per-VI registers) is 8K */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
/* Calculate offset to page-mapped register */
-#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_VI_PAGE_SIZE + (reg))
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+ unsigned int reg)
+{
+ return page * efx->vi_stride + reg;
+}
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int page)
{
- reg = EFX_PAGED_REG(page, reg);
+ reg = efx_paged_reg(efx, page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
@@ -262,7 +265,7 @@ static inline void
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg, unsigned int page)
{
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
@@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx,
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
}
#define efx_writed_page_locked(efx, value, reg, page) \
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 154ef41d1927..ebd95972ae7b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 91fb54fd03d9..869d76f8f589 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -114,6 +114,8 @@
#define MCDI_HEADER_XFLAGS_WIDTH 8
/* Request response using event */
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
/* Maximum number of payload bytes */
#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
@@ -123,7 +125,7 @@
/* The MC can generate events for two reasons:
- * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
* - As a notification (link state, i2c event), controlled
* via MC_CMD_LOG_CTRL
*
@@ -279,6 +281,17 @@
/* Returned by MC_CMD_TESTASSERT if the action that should
* have caused an assertion failed to do so. */
#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
#define MC_CMD_ERR_CODE_OFST 0
@@ -360,6 +373,7 @@
/* enum: Fatal. */
#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -370,6 +384,8 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
/* enum: 100Mbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
/* enum: 1Gbs */
@@ -378,6 +394,12 @@
#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
/* enum: 40Gbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -456,8 +478,63 @@
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
/* enum: PTP status update */
#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
@@ -480,6 +557,22 @@
#define MCDI_EVENT_MUM_WATCHDOG 0x3
#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -552,73 +645,99 @@
* been processed and it may now resend the command
*/
#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
* timestamp
*/
#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
* timestamp
*/
#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
#define MCDI_EVENT_PTP_MAJOR_LBN 0
#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
* of timestamp
*/
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
* timestamp
*/
#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
#define MCDI_EVENT_PTP_MINOR_LBN 0
#define MCDI_EVENT_PTP_MINOR_WIDTH 32
/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
*/
#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
#define MCDI_EVENT_RX_ERR_DATA_LBN 0
#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
/* For CODE_PTP_TIME events, the major value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
/* For CODE_PTP_TIME events where report sync status is enabled, indicates
* whether the NIC clock has ever been set
*/
@@ -634,10 +753,17 @@
*/
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
/* Zero means that the request has been completed or authorized, and the driver
@@ -646,6 +772,10 @@
*/
#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -662,6 +792,7 @@
/* enum: Fatal. */
#define FCDI_EVENT_LEVEL_FATAL 0x3
#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -701,6 +832,7 @@
#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
#define FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -708,12 +840,15 @@
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -722,6 +857,7 @@
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
/* Index of MC port being referred to */
@@ -729,9 +865,11 @@
#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
/* FC Port index that matches the MC port index in SRC */
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
#define FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -748,14 +886,17 @@
#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
/* Seconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
/* Nanoseconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
@@ -783,6 +924,7 @@
/* enum: Fatal. */
#define MUM_EVENT_LEVEL_FATAL 0x3
#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
#define MUM_EVENT_SENSOR_ID_LBN 0
#define MUM_EVENT_SENSOR_ID_WIDTH 8
/* Enum values, see field(s): */
@@ -820,18 +962,23 @@
/* enum: Link fault has been asserted, or has cleared. */
#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
#define MUM_EVENT_SENSOR_DATA_LBN 0
#define MUM_EVENT_SENSOR_DATA_WIDTH 32
#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -864,7 +1011,9 @@
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_READ32_OUT msgresponse */
#define MC_CMD_READ32_OUT_LENMIN 4
@@ -882,13 +1031,14 @@
*/
#define MC_CMD_WRITE32 0x2
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -915,6 +1065,7 @@
* is a bitfield, with each bit as documented below.
*/
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -940,9 +1091,12 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
/* enum: Control should return to the caller rather than jumping */
#define MC_CMD_COPYCODE_JUMP_NONE 0x1
@@ -956,12 +1110,13 @@
*/
#define MC_CMD_SET_FUNC 0x4
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
/* MC_CMD_SET_FUNC_OUT msgresponse */
#define MC_CMD_SET_FUNC_OUT_LEN 0
@@ -973,7 +1128,7 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -982,9 +1137,11 @@
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
/* enum: indicates that the MC wasn't flash booted */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1007,11 +1164,13 @@
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
/* enum: No assertions have failed. */
#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
/* enum: A system-level assertion has failed. */
@@ -1024,6 +1183,7 @@
#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1034,7 +1194,9 @@
#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
/***********************************/
@@ -1050,12 +1212,14 @@
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
#define MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1076,23 +1240,29 @@
#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
/* placeholder, set to 0 */
#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
/* enum: Reserved version number to indicate "any" version. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
/* enum: Bootrom version value for Siena. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
/* enum: Bootrom version value for Huntington. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1104,9 +1274,11 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1136,41 +1308,54 @@
#define MC_CMD_PTP_OP_ENABLE 0x1
/* enum: Disable PTP packet timestamping operation. */
#define MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
#define MC_CMD_PTP_OP_TRANSMIT 0x3
/* enum: Read the current NIC time. */
#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
#define MC_CMD_PTP_OP_STATUS 0x5
/* enum: Adjust the PTP NIC's time. */
#define MC_CMD_PTP_OP_ADJUST 0x6
/* enum: Synchronize host and NIC time. */
#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
/* enum: Reset some of the PTP related statistics */
#define MC_CMD_PTP_OP_RESET_STATS 0xa
/* enum: Debug operations to MC. */
#define MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAWRITE 0xd
/* enum: Apply an offset to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -1191,7 +1376,7 @@
/* enum: Unsubscribe to stop receiving time events */
#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -1204,11 +1389,15 @@
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
/* enum: PTP, version 1 */
#define MC_CMD_PTP_MODE_V1 0x0
/* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -1225,16 +1414,21 @@
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -1244,17 +1438,30 @@
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_STATUS msgrequest */
#define MC_CMD_PTP_IN_STATUS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_ADJUST msgrequest */
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -1262,21 +1469,67 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
/* Host address in which to write "synchronization started" indication (64
* bits)
*/
@@ -1288,42 +1541,59 @@
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -1332,34 +1602,67 @@
/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of VLAN tags, 0 if not VLAN */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
/* Set of VLAN tags to filter against */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -1368,9 +1671,12 @@
/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable UUID filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
/* UUID to filter against */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -1380,18 +1686,25 @@
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable Domain filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
/* Domain number to filter against */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Set the clock source. */
#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
/* enum: Internal. */
#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
/* enum: External. */
@@ -1400,42 +1713,56 @@
/* MC_CMD_PTP_IN_RST_CLK msgrequest */
#define MC_CMD_PTP_IN_RST_CLK_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset value of Timer Reg. */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Enable or disable */
#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
/* enum: Enable */
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -1444,29 +1771,39 @@
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Unsubscribe options */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
/* enum: Unsubscribe a single queue */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
/* enum: Unsubscribe all queues */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
/* Event queue ID */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* NIC - Host System Clock Synchronization status */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
/* enum: Host System clock and NIC clock are not in sync */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
/* enum: Host System clock and NIC clock are synchronized */
@@ -1475,8 +1812,11 @@
* no longer in sync.
*/
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1485,12 +1825,16 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -1502,47 +1846,85 @@
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -1555,23 +1937,31 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
/* enum: Successful test */
#define MC_CMD_PTP_MANF_SUCCESS 0x0
/* enum: FPGA load failed */
@@ -1604,15 +1994,19 @@
#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -1628,9 +2022,11 @@
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
*/
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -1646,12 +2042,16 @@
* be assumed.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
/* enum: Major register has units of seconds, minor 2^-27s per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
/* Minimum acceptable value for a corrected synchronization timeset. When
* comparing host and NIC clock times, the MC returns a set of samples that
* contain the host start and end time, the MC time when the host start was
@@ -1660,46 +2060,66 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
/* Various PTP capabilities */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
@@ -1713,14 +2133,17 @@
*/
#define MC_CMD_CSR_READ32 0xc
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_CSR_READ32_OUT msgresponse */
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -1739,7 +2162,7 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -1747,7 +2170,9 @@
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -1756,6 +2181,7 @@
/* MC_CMD_CSR_WRITE32_OUT msgresponse */
#define MC_CMD_CSR_WRITE32_OUT_LEN 4
#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1776,6 +2202,7 @@
* sensors.
*/
#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
/* enum: OCSD (Option Card Sensor Data) sub-command. */
#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
/* enum: Last known valid HP sub-command. */
@@ -1790,10 +2217,12 @@
* NULL.)
*/
#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
/* MC_CMD_HP_OUT msgresponse */
#define MC_CMD_HP_OUT_LEN 4
#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
/* enum: OCSD stopped for this card. */
#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
/* enum: OCSD was successfully started with the address provided. */
@@ -1838,29 +2267,35 @@
* external devices.
*/
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
/* enum: Internal. */
#define MC_CMD_MDIO_BUS_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
#define MC_CMD_MDIO_CLAUSE22 0x20
/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
/* Status the MDIO commands return the raw status bits from the MDIO block. A
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
/* enum: Good. */
#define MC_CMD_MDIO_STATUS_GOOD 0x8
@@ -1879,22 +2314,27 @@
* external devices.
*/
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
/* MC_CMD_MDIO_CLAUSE22 0x20 */
/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -1902,6 +2342,7 @@
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -1912,7 +2353,7 @@
*/
#define MC_CMD_DBI_WRITE 0x12
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -1932,9 +2373,11 @@
/* MC_CMD_DBIWROP_TYPEDEF structuredef */
#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -1944,6 +2387,7 @@
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -1959,13 +2403,16 @@
#define MC_CMD_PORT_READ32_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
/***********************************/
@@ -1979,13 +2426,16 @@
#define MC_CMD_PORT_WRITE32_IN_LEN 8
/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1999,6 +2449,7 @@
#define MC_CMD_PORT_READ128_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
@@ -2007,6 +2458,7 @@
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
/***********************************/
@@ -2020,6 +2472,7 @@
#define MC_CMD_PORT_WRITE128_IN_LEN 20
/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -2028,6 +2481,7 @@
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
/* MC_CMD_CAPABILITIES structuredef */
#define MC_CMD_CAPABILITIES_LEN 4
@@ -2072,24 +2526,54 @@
#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
*/
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -2103,7 +2587,7 @@
*/
#define MC_CMD_DBI_READX 0x19
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
@@ -2130,9 +2614,11 @@
/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -2149,7 +2635,7 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -2198,14 +2684,17 @@
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
#define MC_CMD_DRV_ATTACH_LBN 0
#define MC_CMD_DRV_ATTACH_WIDTH 1
#define MC_CMD_DRV_PREBOOT_LBN 1
#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
/* enum: Prefer to use full featured firmware */
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
@@ -2229,13 +2718,16 @@
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -2260,6 +2752,7 @@
#define MC_CMD_SHMUART_IN_LEN 4
/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
/* MC_CMD_SHMUART_OUT msgresponse */
#define MC_CMD_SHMUART_OUT_LEN 0
@@ -2297,6 +2790,7 @@
* (TBD).
*/
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -2314,8 +2808,10 @@
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -2346,31 +2842,54 @@
/* MC_CMD_RXD_MONITOR_IN msgrequest */
#define MC_CMD_RXD_MONITOR_IN_LEN 12
#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
/* MC_CMD_RXD_MONITOR_OUT msgresponse */
#define MC_CMD_RXD_MONITOR_OUT_LEN 80
#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
/***********************************/
@@ -2379,13 +2898,14 @@
*/
#define MC_CMD_PUTS 0x23
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
#define MC_CMD_PUTS_IN_UART_LBN 0
#define MC_CMD_PUTS_IN_UART_WIDTH 1
#define MC_CMD_PUTS_IN_PORT_LBN 1
@@ -2417,6 +2937,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -2433,8 +2954,10 @@
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
#define MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -2459,17 +2982,39 @@
#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
#define MC_CMD_PHY_CAP_DDM_LBN 12
#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
/* enum: Xaui. */
#define MC_CMD_MEDIA_XAUI 0x1
/* enum: CX4. */
@@ -2485,6 +3030,7 @@
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -2515,6 +3061,7 @@
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
/* enum: Run the PHY's short cable BIST. */
#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
/* enum: Run the PHY's long cable BIST. */
@@ -2556,6 +3103,7 @@
#define MC_CMD_POLL_BIST_OUT_LEN 8
/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
/* enum: Running. */
#define MC_CMD_POLL_BIST_RUNNING 0x1
/* enum: Passed. */
@@ -2565,19 +3113,26 @@
/* enum: Timed-out. */
#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
/* enum: Ok. */
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
/* enum: Open. */
@@ -2590,14 +3145,17 @@
#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
@@ -2605,9 +3163,11 @@
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
/* enum: Complete. */
#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
/* enum: Bus switch off I2C write. */
@@ -2631,9 +3191,11 @@
#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
/* enum: Test has completed. */
#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
/* enum: RAM test - walk ones. */
@@ -2650,8 +3212,10 @@
#define MC_CMD_POLL_BIST_MEM_ECC 0x6
/* Failure address, only valid if result is POLL_BIST_FAILED */
#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
/* Bus or address space to which the failure address corresponds */
#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
/* enum: MC MIPS bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
@@ -2672,14 +3236,19 @@
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
/* Actual value read from RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
/* ECC error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
/* ECC parity error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
/* ECC fatal error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
/***********************************/
@@ -2831,6 +3400,143 @@
/* Enum values, see field(s): */
/* 100M */
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -2848,17 +3554,22 @@
#define MC_CMD_GET_LINK_OUT_LEN 28
/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
/* Autonegotiated speed in mbit/s. The link may still be down even if this
* reads non-zero.
*/
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -2873,9 +3584,11 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -2899,8 +3612,10 @@
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -2909,12 +3624,14 @@
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
/* A loopback speed of "0" is supported, and means (choose any available
* speed).
*/
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -2932,6 +3649,7 @@
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
#define MC_CMD_LED_DEFAULT 0x2 /* enum */
@@ -2954,17 +3672,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
@@ -2978,6 +3700,7 @@
/* enum: Issue flow control. */
#define MC_CMD_FCNTL_GENERATE 0x5
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
@@ -2987,17 +3710,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
@@ -3011,6 +3738,7 @@
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* Select which parameters to configure. A parameter will only be modified if
@@ -3019,6 +3747,7 @@
* set).
*/
#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -3040,6 +3769,7 @@
* to 0.
*/
#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
/***********************************/
@@ -3144,6 +3874,7 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -3158,9 +3889,16 @@
#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
/* port id so vadapter stats can be provided */
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -3305,9 +4043,126 @@
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_END 0x5f
-#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
/***********************************/
/* MC_CMD_SRIOV
@@ -3318,21 +4173,28 @@
/* MC_CMD_SRIOV_IN msgrequest */
#define MC_CMD_SRIOV_IN_LEN 12
#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
/* MC_CMD_SRIOV_OUT msgresponse */
#define MC_CMD_SRIOV_OUT_LEN 8
#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -3342,6 +4204,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -3352,6 +4215,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
@@ -3403,10 +4267,12 @@
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
/* enum: Magic */
#define MC_CMD_WOL_TYPE_MAGIC 0x0
/* enum: MS Windows Magic */
@@ -3428,7 +4294,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -3437,9 +4305,13 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -3448,7 +4320,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -3461,7 +4335,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -3476,8 +4352,11 @@
/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -3486,6 +4365,7 @@
/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -3499,6 +4379,7 @@
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -3516,6 +4397,7 @@
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -3556,6 +4438,7 @@
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -3612,47 +4495,65 @@
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_INFO_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_OUT_LEN 24
#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
*/
#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
/***********************************/
@@ -3670,6 +4571,7 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
@@ -3680,9 +4582,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3703,20 +4607,26 @@
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
/* Optional control info. If a partition is stored with an A/B versioning
* scheme (i.e. in more than one physical partition in NVRAM) the host can set
* this to control which underlying physical partition is used to read data
@@ -3726,6 +4636,7 @@
* verifying by reading with MODE=TARGET_BACKUP.
*/
#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
/* enum: Same as omitting MODE: caller sees data in current partition unless it
* holds the write lock in which case it sees data in the partition it is
* updating.
@@ -3765,10 +4676,13 @@
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -3791,10 +4705,13 @@
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -3815,9 +4732,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
* request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -3826,10 +4745,13 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3848,16 +4770,19 @@
* This process takes a few seconds to complete. So is likely to take more than
* the MCDI timeout. Hence signature verification is initiated when
* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
/* Result of nvram update completion processing */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
/* enum: Verify succeeded without any errors. */
#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
/* enum: CMS format verification failed due to an internal error. */
@@ -3884,6 +4809,12 @@
* Trusted approver's list.
*/
#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
/***********************************/
@@ -3911,6 +4842,7 @@
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
/* MC_CMD_REBOOT_OUT msgresponse */
@@ -3947,11 +4879,12 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
/* enum: Normal. */
#define MC_CMD_REBOOT_MODE_NORMAL 0x0
/* enum: Power-on Reset. */
@@ -3966,6 +4899,7 @@
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
/***********************************/
@@ -4001,7 +4935,7 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -4015,12 +4949,14 @@
* Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
*/
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
/* enum: Controller temperature: degC */
#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
/* enum: Phy common temperature: degC */
@@ -4183,6 +5119,20 @@
#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
/* enum: Board temperature (back): degC */
#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4196,6 +5146,7 @@
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO_OUT */
#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -4247,7 +5198,7 @@
*/
#define MC_CMD_READ_SENSORS 0x42
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
@@ -4266,6 +5217,7 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -4319,6 +5271,7 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
/* enum: Ok. */
#define MC_CMD_PHY_STATE_OK 0x1
/* enum: Faulty. */
@@ -4355,6 +5308,7 @@
/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4371,6 +5325,7 @@
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
@@ -4381,13 +5336,16 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -4398,6 +5356,7 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4412,7 +5371,9 @@
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -4451,6 +5412,7 @@
#define MC_CMD_TESTASSERT_V2_IN_LEN 4
/* How to provoke the assertion */
#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
* you're testing firmware, this is what you want.
*/
@@ -4486,6 +5448,7 @@
#define MC_CMD_WORKAROUND_IN_LEN 8
/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -4514,6 +5477,7 @@
* the workaround
*/
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
@@ -4523,6 +5487,7 @@
*/
#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
@@ -4543,6 +5508,7 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -4550,6 +5516,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -4568,12 +5535,14 @@
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
/* enum: Passed. */
#define MC_CMD_NVRAM_TEST_PASS 0x0
/* enum: Failed. */
@@ -4594,12 +5563,16 @@
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -4608,10 +5581,13 @@
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
/* enum: Out. */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
/* enum: In. */
@@ -4626,21 +5602,26 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -4657,9 +5638,13 @@
/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
/***********************************/
@@ -4680,6 +5665,7 @@
#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
/* total number of partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
/* type ID code for each of NUM_PARTITIONS partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -4700,6 +5686,7 @@
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -4707,7 +5694,9 @@
#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -4716,6 +5705,7 @@
#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
/* Subtype ID code for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
/* 1st component of W.X.Y.Z version number for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -4756,8 +5746,10 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
/* Number of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
/***********************************/
@@ -4772,6 +5764,7 @@
#define MC_CMD_CLP_IN_LEN 4
/* Sub operation */
#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
/* enum: Return to factory default settings */
#define MC_CMD_CLP_OP_DEFAULT 0x1
/* enum: Set MAC address */
@@ -4789,6 +5782,7 @@
/* MC_CMD_CLP_IN_DEFAULT msgrequest */
#define MC_CMD_CLP_IN_DEFAULT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -4796,6 +5790,7 @@
/* MC_CMD_CLP_IN_SET_MAC msgrequest */
#define MC_CMD_CLP_IN_SET_MAC_LEN 12
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MAC address assigned to port */
#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -4809,6 +5804,7 @@
/* MC_CMD_CLP_IN_GET_MAC msgrequest */
#define MC_CMD_CLP_IN_GET_MAC_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -4822,6 +5818,7 @@
/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* Boot flag */
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -4832,6 +5829,7 @@
/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -4849,11 +5847,12 @@
*/
#define MC_CMD_MUM 0x57
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_MUM_IN msgrequest */
#define MC_CMD_MUM_IN_LEN 4
#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_OP_LBN 0
#define MC_CMD_MUM_IN_OP_WIDTH 8
/* enum: NULL MCDI command to MUM */
@@ -4893,26 +5892,32 @@
#define MC_CMD_MUM_IN_NULL_LEN 4
/* MUM cmd header */
#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_READ msgrequest */
#define MC_CMD_MUM_IN_READ_LEN 16
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to read from registers of */
#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE 0x1
/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
/* 32-bit address to read from */
#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
/* Number of words to read. */
#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
/* MC_CMD_MUM_IN_WRITE msgrequest */
#define MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -4920,12 +5925,15 @@
#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to write to registers of */
#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
/* MC_CMD_MUM_DEV_HITTITE 0x1 */
/* 32-bit address to write to */
#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
/* Words to write */
#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -4938,12 +5946,16 @@
#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MUM I2C cmd code */
#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
/* Number of bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
/* Number of bytes to read */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
/* Bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -4954,21 +5966,28 @@
#define MC_CMD_MUM_IN_LOG_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
/* Enable/disable debug output to UART */
#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
/* MC_CMD_MUM_IN_GPIO msgrequest */
#define MC_CMD_MUM_IN_GPIO_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -4981,40 +6000,56 @@
/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -5027,26 +6062,34 @@
/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
@@ -5054,7 +6097,9 @@
#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -5064,13 +6109,16 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Bit-mask of clocks to be programmed */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
/* Control flags for clock programming */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -5082,19 +6130,24 @@
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Enable/Disable FPGA config from flash */
#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_QSFP msgrequest */
#define MC_CMD_MUM_IN_QSFP_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -5104,52 +6157,77 @@
#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5160,6 +6238,7 @@
/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -5197,8 +6276,10 @@
#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
/* The first 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -5207,8 +6288,10 @@
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
/* The first 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -5216,11 +6299,14 @@
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -5249,6 +6335,7 @@
/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -5256,6 +6343,7 @@
/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -5263,7 +6351,9 @@
/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -5272,6 +6362,7 @@
/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -5279,6 +6370,7 @@
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -5287,11 +6379,14 @@
/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -5299,12 +6394,14 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
/* Discrete (soldered) DDR resistor strap info */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
/* Number of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
/* Array of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -5365,6 +6462,7 @@
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
/* enum: An invalid port handle. */
#define EVB_PORT_ID_NULL 0x0
/* enum: The port assigned to this function.. */
@@ -5460,6 +6558,10 @@
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
/* enum: MUM Application table partition. */
@@ -5474,8 +6576,8 @@
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: UEFI expansion ROM if separate from PXE */
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
-/* enum: Spare partition 0 */
-#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -5488,6 +6590,27 @@
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5502,6 +6625,7 @@
/* LICENSED_APP_ID structuredef */
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
/* enum: OpenOnload */
#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
@@ -5526,6 +6650,14 @@
#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
/* enum: Capture SolarSystem 40G */
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -5590,6 +6722,14 @@
#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -5636,6 +6776,18 @@
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
/* enum: This is a TX completion event, not a timestamp */
#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
/* enum: This is the low part of a TX timestamp event */
#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
/* enum: This is the high part of a TX timestamp event */
@@ -5669,6 +6821,19 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -5676,7 +6841,7 @@
*/
#define MC_CMD_READ_REGS 0x50
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -5709,17 +6874,22 @@
#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -5735,6 +6905,7 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5745,13 +6916,16 @@
#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5762,6 +6936,7 @@
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -5774,6 +6949,7 @@
#define MC_CMD_INIT_EVQ_OUT_LEN 4
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -5781,17 +6957,22 @@
#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -5828,6 +7009,7 @@
*/
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5838,13 +7020,16 @@
#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5855,6 +7040,7 @@
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -5867,8 +7053,10 @@
#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
/* Actual configuration applied on the card */
#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -5916,17 +7104,22 @@
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -5945,8 +7138,10 @@
#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -5961,17 +7156,22 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -6007,8 +7207,10 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6017,6 +7219,7 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -6040,18 +7243,23 @@
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6072,8 +7280,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -6088,18 +7298,23 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6122,10 +7337,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6135,6 +7354,7 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
/* Flags related to Qbb flow control mode. */
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -6161,6 +7381,7 @@
* passed to INIT_EVQ
*/
#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_EVQ_OUT msgresponse */
#define MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -6178,6 +7399,7 @@
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_RXQ_OUT msgresponse */
#define MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -6195,6 +7417,7 @@
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_TXQ_OUT msgresponse */
#define MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -6212,6 +7435,7 @@
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
/* Bits 0 - 63 of event */
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -6237,6 +7461,7 @@
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
@@ -6252,6 +7477,7 @@
#define MC_PROXY_STATUS_BUFFER_LEN 16
/* Handle allocated by the firmware for this proxy transaction */
#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
/* enum: An invalid handle. */
#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
@@ -6282,6 +7508,7 @@
* elevated privilege mask granted to the requesting function.
*/
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
@@ -6298,6 +7525,7 @@
/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6309,6 +7537,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6318,6 +7547,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6328,8 +7558,10 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -6337,6 +7569,7 @@
/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6348,6 +7581,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6357,6 +7591,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6367,12 +7602,15 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6392,7 +7630,9 @@
/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
* is stored in the REPLY_BUFF.
*/
@@ -6408,6 +7648,7 @@
*/
#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -6427,17 +7668,22 @@
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
/* Size of buffer table pages to use, in bytes (note that only a few values are
* legal on any specific hardware).
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
/* Buffer table IDs for use in DMA descriptors. */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
/***********************************/
@@ -6453,10 +7699,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
/* ID */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Num entries */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* Buffer table entry address */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -6479,48 +7728,11 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-/* PORT_CONFIG_ENTRY structuredef */
-#define PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define PORT_CONFIG_ENTRY_CORE_OFST 1
-#define PORT_CONFIG_ENTRY_CORE_LEN 1
-#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
-#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
-#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
-#define PORT_CONFIG_ENTRY_CORE_LBN 8
-#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define PORT_CONFIG_ENTRY_LANES_OFST 4
-#define PORT_CONFIG_ENTRY_LANES_LBN 32
-#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -6534,6 +7746,7 @@
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
/* enum: single-recipient filter insert */
#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
/* enum: single-recipient filter remove */
@@ -6554,8 +7767,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -6586,6 +7801,7 @@
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6598,8 +7814,10 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6614,13 +7832,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
@@ -6653,8 +7874,10 @@
#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
/* Firmware defined register 1 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -6673,6 +7896,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* filter handle (for remove / unsubscribe operations) */
@@ -6683,8 +7907,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -6743,6 +7969,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6755,8 +7982,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6771,13 +8000,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
@@ -6810,11 +8042,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
* protocol is GRE) to match (as bytes in network order; set last byte to 0 for
* VXLAN/NVGRE, or 1 for Geneve)
*/
#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
@@ -6880,10 +8114,12 @@
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
* order; set last 12 bytes to 0 for IPv4 address)
*/
@@ -6899,6 +8135,7 @@
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6918,6 +8155,7 @@
#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_EXT_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6944,6 +8182,7 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
/* enum: read flags indicating restrictions on filter insertion for the calling
@@ -6966,10 +8205,12 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* number of supported match types */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
/* array of supported match types (valid MATCH_FIELDS values for
* MC_CMD_FILTER_OP) sorted in decreasing priority order
*/
@@ -6982,10 +8223,12 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* bitfield of filter insertion restrictions */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
@@ -7005,11 +8248,16 @@
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
/* enum: RX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
@@ -7021,26 +8269,33 @@
#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
/* selector (for MISC_STATE target) */
#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
/* enum: Port to datapath mapping */
#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -7049,6 +8304,7 @@
#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
/* value read (for DMEM reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
/* value read (for LUE reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -7093,6 +8349,7 @@
#define MC_CMD_SET_PF_COUNT_IN_LEN 4
/* New number of PFs on the device. */
#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -7113,6 +8370,7 @@
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
/***********************************/
@@ -7127,6 +8385,7 @@
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -7144,8 +8403,10 @@
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
* Use extended version in new code.
@@ -7153,21 +8414,26 @@
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7201,15 +8467,20 @@
#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
/***********************************/
@@ -7224,19 +8495,24 @@
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF, or 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous, 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -7258,12 +8534,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7278,6 +8557,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -7311,6 +8591,7 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -7392,6 +8673,7 @@
#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
/***********************************/
@@ -7406,6 +8688,7 @@
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -7423,6 +8706,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -7445,6 +8729,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
/***********************************/
@@ -7459,6 +8744,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* Transaction processing steering hint 1 for use with the Rx Queue. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -7478,6 +8764,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -7494,6 +8781,7 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* enum: MISC. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
/* enum: IDO. */
@@ -7506,10 +8794,12 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -7557,10 +8847,12 @@
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -7627,6 +8919,7 @@
* in a command from the host.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
@@ -7636,6 +8929,7 @@
* mc_flash_layout.h.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
@@ -7672,12 +8966,14 @@
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
/* enum: Last chunk, containing checksum rather than data */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
/* enum: Abort download of this item */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
/* Length of this chunk in bytes */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
/* Data for this chunk */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -7688,8 +8984,10 @@
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
/* enum: Code download OK, completed. */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
/* enum: Code download aborted as requested. */
@@ -7726,6 +9024,7 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -7793,6 +9092,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -7813,6 +9114,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7822,6 +9125,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -7848,7 +9153,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7864,6 +9171,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7888,7 +9197,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7901,12 +9212,16 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -7915,6 +9230,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -7982,6 +9298,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8002,6 +9320,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8011,6 +9331,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8037,7 +9359,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8053,6 +9377,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8077,7 +9403,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8090,14 +9418,19 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8124,6 +9457,18 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8181,9 +9526,10 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -8251,6 +9597,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8271,6 +9619,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8280,6 +9630,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8306,7 +9658,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8322,6 +9676,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8346,7 +9702,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8359,14 +9717,19 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8393,6 +9756,18 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8463,6 +9838,348 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
/***********************************/
@@ -8502,6 +10219,7 @@
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
/***********************************/
@@ -8516,6 +10234,7 @@
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -8533,17 +10252,22 @@
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
/* the desired maximum fill level */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -8561,10 +10285,13 @@
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8573,25 +10300,32 @@
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8600,18 +10334,23 @@
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -8629,8 +10368,10 @@
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -8648,6 +10389,7 @@
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -8665,8 +10407,10 @@
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of v-switch to create. */
#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
@@ -8679,6 +10423,7 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -8689,6 +10434,7 @@
* v-ports with this number of tags.
*/
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -8706,6 +10452,7 @@
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -8725,6 +10472,7 @@
#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -8742,8 +10490,10 @@
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of the new v-port. */
#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN (obsolete) */
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
/* enum: VEB (obsolete) */
@@ -8764,6 +10514,7 @@
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
/* Flags controlling v-port creation */
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -8773,8 +10524,10 @@
* v-switch.
*/
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8784,6 +10537,7 @@
#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
/* The handle of the new v-port */
#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
/***********************************/
@@ -8798,6 +10552,7 @@
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_FREE_OUT msgresponse */
#define MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -8815,18 +10570,23 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Flags controlling v-adaptor creation */
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
/* The number of VLAN tags to transparently insert/remove. */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8853,6 +10613,7 @@
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -8870,6 +10631,7 @@
#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* The new MAC address to assign to this v-adaptor */
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -8890,6 +10652,7 @@
#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -8910,15 +10673,19 @@
#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
/* The number of VLAN tags that may still be added */
#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -8933,8 +10700,10 @@
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
/* The target function to modify. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -8955,9 +10724,13 @@
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
/* Write enable bits 0-3, set to write, clear to read. */
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -8969,9 +10742,13 @@
*/
#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
/***********************************/
@@ -8986,11 +10763,13 @@
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
/* The handle of the new Onload stack */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
/***********************************/
@@ -9005,6 +10784,7 @@
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -9022,8 +10802,10 @@
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of context to allocate */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
/* enum: Allocate a context for exclusive use. The key and indirection table
* must be explicitly configured.
*/
@@ -9037,6 +10819,7 @@
* in the indirection table will be in the range 0 to NUM_QUEUES-1.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -9045,6 +10828,7 @@
* handle.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
/* enum: guaranteed invalid RSS context handle value */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
@@ -9061,6 +10845,7 @@
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -9078,6 +10863,7 @@
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -9098,6 +10884,7 @@
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -9118,6 +10905,7 @@
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* The 128-byte indirection table (1 byte per entry) */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -9138,6 +10926,7 @@
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -9158,6 +10947,7 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* Hash control flags. The _EN bits are always supported, but new modes are
* available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
* in this case, the MODE fields may be set to non-zero values, and will take
@@ -9171,6 +10961,7 @@
* particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9210,6 +11001,7 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -9227,6 +11019,7 @@
* always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9263,11 +11056,13 @@
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
* offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
* referenced RSS contexts must span no more than this number.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -9276,6 +11071,7 @@
* handle.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
/* enum: guaranteed invalid .1p mapping handle value */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
@@ -9292,6 +11088,7 @@
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -9309,6 +11106,7 @@
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
* handle)
*/
@@ -9331,6 +11129,7 @@
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -9356,10 +11155,13 @@
#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
/* Base absolute interrupt vector number. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
/***********************************/
@@ -9376,10 +11178,13 @@
* let the system find a suitable base.
*/
#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -9397,6 +11202,7 @@
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9417,6 +11223,7 @@
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9437,6 +11244,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -9444,6 +11252,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
/* The number of MAC addresses returned */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
/* Array of MAC addresses */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -9465,8 +11274,10 @@
#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
/* The handle of the v-port */
#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
/* Flags requesting what should be changed. */
#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -9476,14 +11287,17 @@
* v-switch.
*/
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
/* The number of MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
/* MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -9492,6 +11306,7 @@
/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
@@ -9508,15 +11323,18 @@
#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
/* The number of VLAN tags that may be used on a v-adaptor connected to this
* EVB port.
*/
#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -9528,14 +11346,16 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Number of buffer table entries to dump. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -9559,6 +11379,7 @@
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
@@ -9588,6 +11409,7 @@
/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -9611,8 +11433,10 @@
#define MC_CMD_GET_CLOCK_OUT_LEN 8
/* System frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
/* DPCPU frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/***********************************/
@@ -9621,36 +11445,43 @@
*/
#define MC_CMD_SET_CLOCK 0xad
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 28
/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
/* enum: Leave the system clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
/* enum: Leave the inter-core clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
/* enum: Leave the DPCPU clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for PCS clock domain */
#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
/* enum: Leave the PCS clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for MC clock domain */
#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
/* enum: Leave the MC clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for rmon clock domain */
#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
/* enum: Leave the rmon clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for vswitch clock domain */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
/* enum: Leave the vswitch clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
@@ -9658,30 +11489,37 @@
#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
/* enum: The system clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
/* enum: The inter-core clock domain doesn't exist / isn't used */
#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/* enum: The dpcpu clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
/* Resulting PCS frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
/* enum: The PCS clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
/* Resulting MC frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
/* enum: The MC clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
/* Resulting rmon frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
/* enum: The rmon clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
/* Resulting vswitch frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
/* enum: The vswitch clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
@@ -9692,11 +11530,12 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
/* enum: RxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
@@ -9761,12 +11600,15 @@
#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
/* Register data to write. Only valid in write/write-read. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
/* Register address. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
/* MC_CMD_DPCPU_RPC_OUT msgresponse */
#define MC_CMD_DPCPU_RPC_OUT_LEN 36
#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
/* DATA */
#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -9777,9 +11619,13 @@
#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
/***********************************/
@@ -9794,6 +11640,7 @@
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -9811,6 +11658,7 @@
#define MC_CMD_SHMBOOT_OP_IN_LEN 4
/* Identifies the operation to perform */
#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
/* enum: Copy slave_data section to the slave core. (Greenport only) */
#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
@@ -9824,13 +11672,16 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -9850,53 +11701,77 @@
*/
#define MC_CMD_DUMP_DO 0xe8
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
/* enum: The uart port this command was received over (if using a uart
* transport)
*/
#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/* MC_CMD_DUMP_DO_OUT msgresponse */
#define MC_CMD_DUMP_DO_OUT_LEN 4
#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
/***********************************/
@@ -9905,41 +11780,64 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/***********************************/
@@ -9950,17 +11848,20 @@
*/
#define MC_CMD_SET_PSU 0xea
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
/* desired value, eg voltage in mV */
#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
/* MC_CMD_SET_PSU_OUT msgresponse */
#define MC_CMD_SET_PSU_OUT_LEN 0
@@ -9980,7 +11881,9 @@
/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
/***********************************/
@@ -10016,12 +11919,16 @@
#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -10044,12 +11951,16 @@
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
/* Offset from which to read the data */
#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
/* MC_CMD_UART_RECV_DATA_IN msgresponse */
#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -10057,12 +11968,16 @@
#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -10075,14 +11990,16 @@
*/
#define MC_CMD_READ_FUSES 0xf0
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
/* Length of data to read in bytes */
#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
/* MC_CMD_READ_FUSES_OUT msgresponse */
#define MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -10090,6 +12007,7 @@
#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
/* Length of returned OTP data in bytes */
#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
/* Returned data */
#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -10197,6 +12115,60 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (0-7, Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -10268,7 +12240,7 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
@@ -10290,9 +12262,9 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
/* enum: TX Amplitude Fine control (Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
@@ -10361,7 +12333,24 @@
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10393,10 +12382,12 @@
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
/***********************************/
@@ -10594,6 +12585,7 @@
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10636,6 +12628,7 @@
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10647,23 +12640,30 @@
#define MC_CMD_LICENSING_OUT_LEN 28
/* count of application keys which are valid */
#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being blacklisted */
#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being for the wrong node
*/
#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10683,6 +12683,7 @@
#define MC_CMD_LICENSING_V3_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10696,20 +12697,26 @@
#define MC_CMD_LICENSING_V3_OUT_LEN 88
/* count of keys which are valid */
#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
/* count of keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
/* count of keys which are invalid due to being for the wrong node */
#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10750,8 +12757,10 @@
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
/* type of license (eg 3) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
/* length of the license ID (in bytes) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
/* the unique license ID of the adapter */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -10789,11 +12798,13 @@
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10824,6 +12835,7 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10874,8 +12886,10 @@
#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
/* application ID */
#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
/* enum: mask application */
@@ -10900,8 +12914,10 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
/* application ID */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
/* validation challenge */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -10910,6 +12926,7 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
/* feature expiry (time_t) */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
/* validation response */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -10918,10 +12935,13 @@
#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
/* application ID */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
/* flag */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -10959,8 +12979,10 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
/* application expiry units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
/* enum: expiry units are accounting units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
@@ -10984,7 +13006,7 @@
*/
#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -10995,6 +13017,7 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
/* enum: turn the features off */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
/* enum: turn the features back on */
@@ -11014,12 +13037,13 @@
*/
#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
/* operation code */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
/* enum: install a new license, overwriting any existing temporary license.
* This is an asynchronous operation owing to the time taken to validate an
* ECDSA license
@@ -11037,6 +13061,7 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
/* ECDSA license and signature */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -11044,15 +13069,18 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
/* status code */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
/* enum: finished validating and installing license */
#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
/* enum: license validation and installation in progress */
@@ -11084,14 +13112,17 @@
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11101,6 +13132,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11123,20 +13155,24 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11153,6 +13189,7 @@
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
/* the type of configuration setting to change */
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
* internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
*/
@@ -11166,6 +13203,7 @@
* on the type of configuration setting being changed
*/
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* new value: the details depend on the type of configuration setting being
* changed
*/
@@ -11190,12 +13228,14 @@
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
/* the type of configuration setting to read */
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
* the type of configuration setting being read
*/
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -11228,12 +13268,15 @@
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11243,6 +13286,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11265,18 +13309,22 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11291,16 +13339,22 @@
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
/* The rx queue to get stats for. */
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
/***********************************/
@@ -11309,6 +13363,8 @@
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
@@ -11316,20 +13372,27 @@
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
/* The maximum number of PFs the device can expose */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
/* The maximum number of VFs the device can expose in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
/* The maximum number of MSI-X vectors the device can provide in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each PF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each VF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one PF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one VF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
/***********************************/
@@ -11347,10 +13410,13 @@
#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
/* Default (canonical) board mode */
#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
/* Current board mode */
#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
/***********************************/
@@ -11359,21 +13425,26 @@
*/
#define MC_CMD_READ_ATB 0x100
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_ATB_IN msgrequest */
#define MC_CMD_READ_ATB_IN_LEN 16
#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
/* MC_CMD_READ_ATB_OUT msgresponse */
#define MC_CMD_READ_ATB_OUT_LEN 4
#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
/***********************************/
@@ -11390,7 +13461,9 @@
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -11425,6 +13498,7 @@
* 1,3 = 0x00030001
*/
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
@@ -11434,6 +13508,7 @@
* set to 1.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
@@ -11460,6 +13535,10 @@
* only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -11469,6 +13548,7 @@
#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
/* For an admin function, always all the privileges are reported. */
#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
/***********************************/
@@ -11485,12 +13565,14 @@
* e.g. VF 1,3 = 0x00030001
*/
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
/* New link state mode to be set */
#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
@@ -11501,11 +13583,12 @@
/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
/***********************************/
/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
* parameter to MC_CMD_INIT_RXQ.
*/
#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -11519,8 +13602,10 @@
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
/* Minimum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
/* Maximum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
/***********************************/
@@ -11529,7 +13614,7 @@
*/
#define MC_CMD_FUSE_DIAGS 0x102
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_FUSE_DIAGS_IN msgrequest */
#define MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -11538,28 +13623,40 @@
#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
/* Total number of mismatched bits between pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
/***********************************/
@@ -11576,6 +13673,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
/* The groups of functions to have their privilege masks modified. */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
@@ -11584,6 +13682,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
/* For VFS_OF_PF specify the PF, for ONE specify the target function */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -11592,10 +13691,12 @@
* refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
/* Privileges to be removed from the target functions. For privilege
* definitions refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -11613,8 +13714,10 @@
#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -11633,7 +13736,7 @@
*/
#define MC_CMD_XPM_WRITE_BYTES 0x104
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -11641,8 +13744,10 @@
#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
/* Start address (byte) */
#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
/* Data */
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -11659,14 +13764,16 @@
*/
#define MC_CMD_XPM_READ_SECTOR 0x105
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
/* Sector index */
#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
/* Sector size */
#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -11674,9 +13781,11 @@
#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
/* Sector type */
#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
/* Sector data */
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
@@ -11691,7 +13800,7 @@
*/
#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -11708,10 +13817,12 @@
#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
/* Sector data */
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -11722,6 +13833,7 @@
#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
/* New sector index */
#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
/***********************************/
@@ -11730,12 +13842,13 @@
*/
#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
/* Sector index */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -11747,14 +13860,16 @@
*/
#define MC_CMD_XPM_BLANK_CHECK 0x108
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -11762,6 +13877,7 @@
#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
/* Total number of bad (non-blank) locations */
#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
* into MCDI response)
*/
@@ -11777,14 +13893,16 @@
*/
#define MC_CMD_XPM_REPAIR 0x109
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_REPAIR_IN msgrequest */
#define MC_CMD_XPM_REPAIR_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
/* MC_CMD_XPM_REPAIR_OUT msgresponse */
#define MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -11797,7 +13915,7 @@
*/
#define MC_CMD_XPM_DECODER_TEST 0x10a
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -11816,7 +13934,7 @@
*/
#define MC_CMD_XPM_WRITE_TEST 0x10b
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -11842,10 +13960,13 @@
#define MC_CMD_EXEC_SIGNED_IN_LEN 28
/* the length of code to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
/* the length of date to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
/* the XPM sector containing the key to use */
#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
/* the expected CMAC value */
#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -11868,11 +13989,34 @@
#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
/* the length of data area to clear */
#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
/***********************************/
/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
* Configure UDP ports for tunnel encapsulation hardware acceleration. The
@@ -11913,27 +14057,6 @@
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
-/* enum: Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
/***********************************/
/* MC_CMD_RX_BALANCING
@@ -11950,12 +14073,16 @@
#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
/* The VLAN priority associated to the table index and vFIFO */
#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
/* The resulting bit of SRC^DST for indexing the table */
#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
/* The RX engine to which the vFIFO in the table entry will point to */
#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -11976,8 +14103,10 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
/* The tag to be appended */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
/* The length of the data */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
/* The data to be contained in the TLV structure */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -12002,6 +14131,7 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
/* Data type to be checked */
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -12009,10 +14139,13 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
/* Number of sectors found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
/* Number of bytes found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
/* Length of signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
/* Signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -12037,12 +14170,16 @@
#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
/* Function-relative queue instance */
#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
/* Requested value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
/* Requested value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
@@ -12052,8 +14189,10 @@
#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
/* Actual value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
/* Actual value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
/***********************************/
@@ -12071,29 +14210,35 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
/* Reserved for future use. */
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
* nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
* load/reload count.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
* allowed for timer load/reload counts.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
* multiple of this step size will be rounded in an implementation defined
* manner.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
* meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
/* Timer durations requested via MCDI that are not a multiple of this step size
* will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
/* For timers updated using the bug35388 workaround, this is the time interval
* (in nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
@@ -12101,17 +14246,20 @@
* is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, this is the maximum value
* allowed for timer load/reload counts. This field is only meaningful if the
* bug35388 workaround is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, timer load/reload counts
* not a multiple of this step size will be rounded in an implementation
* defined manner. This field is only meaningful if the bug35388 workaround is
* enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
/***********************************/
@@ -12129,19 +14277,24 @@
* local queue index.
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
/* Will the common pool be used as TX_vFIFO_ULL (1) */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
/* Number of buffers to reserve for the common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
/* TX datapath to which the Common Pool is connected to. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
/* enum: Extracts information from function */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
/* Network port or RX Engine to which the common pool connects. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
/* enum: Extracts information from function */
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
@@ -12157,6 +14310,7 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
/* ID of the common pool allocated */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
/***********************************/
@@ -12173,8 +14327,10 @@
/* Common pool previously allocated to which the new vFIFO will be associated
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
/* Port or RX engine to associate the vFIFO egress */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
/* enum: Extracts information from common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
@@ -12187,12 +14343,15 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
/* Minimum number of buffers that the pool must have */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
/* enum: Do not check the space available */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
/* Will the vFIFO be used as TX_vFIFO_ULL */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
/* Network priority of the vFIFO,if applicable */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
/* enum: Search for the lowest unused priority */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
@@ -12200,8 +14359,10 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
/* Short vFIFO ID */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
/* Network priority of the vFIFO */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
/***********************************/
@@ -12217,6 +14378,7 @@
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
/* Short vFIFO ID */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -12235,6 +14397,7 @@
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
/* Common pool ID given when pool allocated */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -12256,8 +14419,10 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
/* Available buffers for the ENG to NET vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 6e1f282b2976..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
- u32 result = 0;
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
- result |= SUPPORTED_Backplane;
+ SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseKX_Full;
+ SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseKX4_Full;
+ SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseKR4_Full;
+ SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
- result |= SUPPORTED_FIBRE;
+ SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseCR4_Full;
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
- result |= SUPPORTED_TP;
+ SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- result |= SUPPORTED_10baseT_Half;
+ SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- result |= SUPPORTED_10baseT_Full;
+ SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- result |= SUPPORTED_100baseT_Half;
+ SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- result |= SUPPORTED_100baseT_Full;
+ SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- result |= SUPPORTED_1000baseT_Half;
+ SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- result |= SUPPORTED_Pause;
+ SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- result |= SUPPORTED_Asym_Pause;
+ SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- result |= SUPPORTED_Autoneg;
+ SET_BIT(Autoneg);
- return result;
+ #undef SET_BIT
}
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
- if (cap & SUPPORTED_10baseT_Half)
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (cap & SUPPORTED_10baseT_Full)
+ if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (cap & SUPPORTED_100baseT_Half)
+ if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (cap & SUPPORTED_100baseT_Full)
+ if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (cap & SUPPORTED_1000baseT_Half)
+ if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (cap & SUPPORTED_Pause)
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (cap & SUPPORTED_Asym_Pause)
+ if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (cap & SUPPORTED_Autoneg)
+ if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+ #undef TEST_BIT
+
return result;
}
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
else
phy_data->forced_cap = caps;
@@ -435,8 +454,8 @@ fail:
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps = (efx->link_advertising ?
- ethtool_to_mcdi_cap(efx->link_advertising) :
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
- u32 supported, advertising, lp_advertising;
- supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- advertising = efx->link_advertising;
cmd->base.speed = efx->link_state.speed;
cmd->base.duplex = efx->link_state.fd;
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- lp_advertising =
- mcdi_to_ethtool_cap(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
}
static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if (cmd->base.autoneg) {
- caps = (ethtool_to_mcdi_cap(advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (cmd->base.duplex) {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
}
} else {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
}
}
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return rc;
if (cmd->base.autoneg) {
- efx_link_set_advertising(
- efx, advertising | ADVERTISED_Autoneg);
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_set_advertising(efx, 0);
+ efx_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
@@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
int period = action == EFX_STATS_ENABLE ? 1000 : 0;
dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
u32 dma_len = action != EFX_STATS_DISABLE ?
- MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+ efx->num_mac_stats * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
{
__le64 *dma_stats = efx->stats_buffer.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
@@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
__le64 *dma_stats = efx->stats_buffer.addr;
int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
- while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ while (dma_stats[efx->num_mac_stats - 1] ==
EFX_MC_STATS_GENERATION_INVALID &&
attempts-- != 0)
udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c0537ea06c9a..d20a8660ee48 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -191,6 +191,7 @@ struct efx_tx_buffer {
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -202,6 +203,10 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
+ * @completed_desc_ptr: Most recent completed pointer - only used with
+ * timestamping.
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -247,6 +252,7 @@ struct efx_tx_queue {
void __iomem *piobuf;
unsigned int piobuf_offset;
bool initialised;
+ bool timestamping;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -257,6 +263,9 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
+ unsigned int completed_desc_ptr;
+ u32 completed_timestamp_major;
+ u32 completed_timestamp_minor;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -522,8 +531,12 @@ struct efx_msi_context {
* @copy: Copy the channel state prior to reallocation. May be %NULL if
* reallocation is not supported.
* @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ * created. If %NULL, TX queues are not created.
* @keep_eventq: Flag for whether event queue should be kept initialised
* while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ * channel's TX queues.
*/
struct efx_channel_type {
void (*handle_no_channel)(struct efx_nic *);
@@ -532,7 +545,9 @@ struct efx_channel_type {
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*want_txqs)(struct efx_channel *);
bool keep_eventq;
+ bool want_pio;
};
enum efx_led_mode {
@@ -708,6 +723,7 @@ struct vfdi_status;
* @reset_work: Scheduled reset workitem
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
* @interrupt_mode: Interrupt mode
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
@@ -734,6 +750,7 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -773,6 +790,8 @@ struct vfdi_status;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -812,6 +831,7 @@ struct vfdi_status;
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
* @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
@@ -842,6 +862,8 @@ struct efx_nic {
resource_size_t membase_phys;
void __iomem *membase;
+ unsigned int vi_stride;
+
enum efx_int_mode interrupt_mode;
unsigned int timer_quantum_ns;
unsigned int timer_max_ns;
@@ -875,6 +897,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned n_extra_tx_channels;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -918,6 +941,7 @@ struct efx_nic {
netdev_features_t fixed_features;
+ u16 num_mac_stats;
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -930,7 +954,7 @@ struct efx_nic {
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- u32 link_advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
struct efx_link_state link_state;
unsigned int n_link_state_changes;
@@ -965,6 +989,7 @@ struct efx_nic {
#endif
struct efx_ptp_data *ptp_data;
+ bool ptp_warned;
char *vpd_sn;
@@ -1154,7 +1179,7 @@ struct efx_udp_tunnel {
*/
struct efx_nic_type {
bool is_vf;
- unsigned int mem_bar;
+ unsigned int (*mem_bar)(struct efx_nic *efx);
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1355,8 +1380,8 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
+ return channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel);
}
static inline struct efx_tx_queue *
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b51b6371724..6549fc685a48 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -325,6 +325,30 @@ enum {
EF10_STAT_tx_bad,
EF10_STAT_tx_bad_bytes,
EF10_STAT_tx_overflow,
+ EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_corrected_errors,
+ EF10_STAT_fec_corrected_symbols_lane0,
+ EF10_STAT_fec_corrected_symbols_lane1,
+ EF10_STAT_fec_corrected_symbols_lane2,
+ EF10_STAT_fec_corrected_symbols_lane3,
+ EF10_STAT_ctpio_dmabuf_start,
+ EF10_STAT_ctpio_vi_busy_fallback,
+ EF10_STAT_ctpio_long_write_success,
+ EF10_STAT_ctpio_missing_dbell_fail,
+ EF10_STAT_ctpio_overflow_fail,
+ EF10_STAT_ctpio_underflow_fail,
+ EF10_STAT_ctpio_timeout_fail,
+ EF10_STAT_ctpio_noncontig_wr_fail,
+ EF10_STAT_ctpio_frm_clobber_fail,
+ EF10_STAT_ctpio_invalid_wr_fail,
+ EF10_STAT_ctpio_vi_clobber_fallback,
+ EF10_STAT_ctpio_unqualified_fallback,
+ EF10_STAT_ctpio_runt_fallback,
+ EF10_STAT_ctpio_success,
+ EF10_STAT_ctpio_fallback,
+ EF10_STAT_ctpio_poison,
+ EF10_STAT_ctpio_erase,
EF10_STAT_COUNT
};
@@ -416,6 +440,7 @@ struct efx_ef10_nic_data {
struct efx_udp_tunnel udp_tunnels[16];
bool udp_tunnels_dirty;
struct mutex udp_tunnels_lock;
+ u64 licensed_features;
};
int efx_init_sriov(void);
@@ -424,6 +449,7 @@ void efx_fini_sriov(void);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
@@ -447,6 +473,8 @@ static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index caa89bf7603e..f21661532ed3 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -149,18 +149,14 @@ enum ptp_packet_state {
/* Maximum parts-per-billion adjustment that is acceptable */
#define MAX_PPB 1000000
-/* Number of bits required to hold the above */
-#define MAX_PPB_BITS 20
-
-/* Number of extra bits allowed when calculating fractional ns.
- * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
- * be less than 63.
- */
-#define PPB_EXTRA_BITS 2
-
/* Precalculate scale word to avoid long long division at runtime */
-#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
- MAX_PPB_BITS)) / 1000000000LL)
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40 26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44 22
#define PTP_SYNC_ATTEMPTS 4
@@ -218,8 +214,8 @@ struct efx_ptp_timeset {
* @channel: The PTP channel (Siena only)
* @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
* separate events)
- * @rxq: Receive queue (awaiting timestamps)
- * @txq: Transmit queue
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
@@ -233,19 +229,36 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
- * @time_format: Time format supported by this NIC
* @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
* @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
* @min_synchronisation_ns: Minimum acceptable corrected sync window
- * @ts_corrections.tx: Required driver correction of transmit timestamps
- * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ * timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ * timestamps
* @ts_corrections.pps_out: PPS output error (information only)
* @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ * transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ * receive timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
* @current_adjfreq: Current ppb adjustment.
* @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
@@ -264,6 +277,7 @@ struct efx_ptp_timeset {
* @oversize_sync_windows: Number of corrected sync windows that are too large
* @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
*/
struct efx_ptp_data {
struct efx_nic *efx;
@@ -284,22 +298,31 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
- unsigned int time_format;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
s32 correction);
+ struct {
+ u32 minor_max;
+ u32 sync_event_diff_min;
+ u32 sync_event_diff_max;
+ unsigned int sync_event_minor_shift;
+ } nic_time;
unsigned int min_synchronisation_ns;
+ unsigned int capabilities;
struct {
- s32 tx;
- s32 rx;
+ s32 ptp_tx;
+ s32 ptp_rx;
s32 pps_out;
s32 pps_in;
+ s32 general_tx;
+ s32 general_rx;
} ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
+ unsigned int adjfreq_ppb_shift;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -319,6 +342,7 @@ struct efx_ptp_data {
unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+ void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
};
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
@@ -329,6 +353,24 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
+ ));
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+ return efx_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
#define PTP_SW_STAT(ext_name, field_name) \
{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
#define PTP_MC_STAT(ext_name, mcdi_name) \
@@ -471,6 +513,89 @@ static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
return efx_ptp_s27_to_ktime(nic_major, nic_minor);
}
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ *nic_major = (u32)ts.tv_sec;
+ *nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt;
+
+ nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+ correction = DIV_ROUND_CLOSEST(correction, 4);
+
+ kt = ktime_set(nic_major, nic_minor);
+
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
+{
+ return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_ptp_channel(efx);
+ u32 major = 0;
+
+ if (channel)
+ major = channel->sync_timestamp_major;
+ return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+ struct efx_ptp_data *ptp,
+ u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = { 0 };
+
+ if (!(nic_major & 0x80000000)) {
+ WARN_ON_ONCE(nic_major >> 16);
+ /* Use the top bits from the latest sync event. */
+ nic_major &= 0xffff;
+ nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+ kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+ correction);
+ }
+ return kt;
+}
+
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ ktime_t kt;
+
+ if (efx_ptp_use_mac_tx_timestamps(efx))
+ kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ else
+ kt = ptp->nic_to_kernel_time(
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ return kt;
+}
+
/* Get PTP attributes and set up time conversions */
static int efx_ptp_get_attributes(struct efx_nic *efx)
{
@@ -502,31 +627,71 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
return rc;
}
- if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ switch (fmt) {
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
- } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->nic_time.minor_max = 1 << 27;
+ ptp->nic_time.sync_event_minor_shift = 19;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
- } else {
+ ptp->nic_time.minor_max = 1000000000;
+ ptp->nic_time.sync_event_minor_shift = 22;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+ ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+ ptp->nic_time.minor_max = 4000000000UL;
+ ptp->nic_time.sync_event_minor_shift = 24;
+ break;
+ default:
return -ERANGE;
}
- ptp->time_format = fmt;
-
- /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
- * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
- * to use for the minimum acceptable corrected synchronization window.
+ /* Precalculate acceptable difference between the minor time in the
+ * packet prefix and the last MCDI time sync event. We expect the
+ * packet prefix timestamp to be after of sync event by up to one
+ * sync event interval (0.25s) but we allow it to exceed this by a
+ * fuzz factor of (0.1s)
+ */
+ ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+ - (ptp->nic_time.minor_max / 10);
+ ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+ + (ptp->nic_time.minor_max / 10);
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+ * a value to use for the minimum acceptable corrected synchronization
+ * window and may return further capabilities.
* If we have the extra information store it. For older firmware that
* does not implement the extended command use the default value.
*/
- if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
ptp->min_synchronisation_ns =
MCDI_DWORD(outbuf,
PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
else
ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->capabilities = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+ else
+ ptp->capabilities = 0;
+
+ /* Set up the shift for conversion between frequency
+ * adjustments in parts-per-billion and the fixed-point
+ * fractional ns format that the adapter uses.
+ */
+ if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+ else
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
return 0;
}
@@ -534,8 +699,9 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
int rc;
+ size_t out_len;
/* Get the timestamp corrections from the NIC. If this operation is
* not supported (older NICs) then no correction is required.
@@ -545,21 +711,37 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ outbuf, sizeof(outbuf), &out_len);
if (rc == 0) {
- efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
- efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+ if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+ efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+ efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+ } else {
+ efx->ptp_data->ts_corrections.general_tx =
+ efx->ptp_data->ts_corrections.ptp_tx;
+ efx->ptp_data->ts_corrections.general_rx =
+ efx->ptp_data->ts_corrections.ptp_rx;
+ }
} else if (rc == -EINVAL) {
- efx->ptp_data->ts_corrections.tx = 0;
- efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.ptp_tx = 0;
+ efx->ptp_data->ts_corrections.ptp_rx = 0;
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
+ efx->ptp_data->ts_corrections.general_tx = 0;
+ efx->ptp_data->ts_corrections.general_rx = 0;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
sizeof(outbuf), rc);
@@ -873,8 +1055,24 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
return rc;
}
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ struct efx_tx_queue *tx_queue;
+ u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+
+ tx_queue = &ptp_data->channel->tx_queue[type];
+ if (tx_queue && tx_queue->timestamping) {
+ efx_enqueue_skb(tx_queue, skb);
+ } else {
+ WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+ dev_kfree_skb_any(skb);
+ }
+}
+
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
-static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
{
struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
@@ -910,16 +1108,16 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
- ptp_data->ts_corrections.tx);
+ ptp_data->ts_corrections.ptp_tx);
skb_tstamp_tx(skb, &timestamps);
rc = 0;
fail:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
- return rc;
+ return;
}
static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
@@ -1189,7 +1387,7 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_process_events(efx, &tempq);
while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
+ ptp_data->xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
@@ -1239,6 +1437,14 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
goto fail2;
}
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+ /* Request sync events on this channel. */
+ channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+ } else {
+ ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+ }
+
INIT_WORK(&ptp->work, efx_ptp_worker);
ptp->config.flags = 0;
ptp->config.tx_type = HWTSTAMP_TX_OFF;
@@ -1303,11 +1509,21 @@ fail1:
static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ int rc;
channel->irq_moderation_us = 0;
channel->rx_queue.core_index = 0;
- return efx_ptp_probe(efx, channel);
+ rc = efx_ptp_probe(efx, channel);
+ /* Failure to probe PTP is not fatal; this channel will just not be
+ * used for anything.
+ * In the case of EPERM, efx_ptp_probe will print its own message (in
+ * efx_ptp_get_attributes()), so we don't need to.
+ */
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to probe PTP, rc=%d\n", rc);
+ return 0;
}
void efx_ptp_remove(struct efx_nic *efx)
@@ -1332,6 +1548,7 @@ void efx_ptp_remove(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
}
static void efx_ptp_remove_channel(struct efx_channel *channel)
@@ -1548,6 +1765,17 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
+ /* Check licensed features. If we don't have the license for TX
+ * timestamps, the NIC will not support them.
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
+ ts_info->so_timestamping &=
+ ~SOF_TIMESTAMPING_TX_HARDWARE;
+ }
if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
ts_info->phc_index =
ptp_clock_index(primary->ptp_data->phc_clock);
@@ -1627,7 +1855,7 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
- ptp->ts_corrections.rx);
+ ptp->ts_corrections.ptp_rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1662,9 +1890,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
if (!ptp) {
- if (net_ratelimit())
+ if (!efx->ptp_warned) {
netif_warn(efx, drv, efx->net_dev,
"Received PTP event but PTP not set up\n");
+ efx->ptp_warned = true;
+ }
return;
}
@@ -1707,9 +1937,20 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ /* When extracting the sync timestamp minor value, we should discard
+ * the least significant two bits. These are not required in order
+ * to reconstruct full-range timestamps and they are optionally used
+ * to report status depending on the options supplied when subscribing
+ * for sync events.
+ */
channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
channel->sync_timestamp_minor =
- MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+ << ptp->nic_time.sync_event_minor_shift;
+
/* if sync events have been disabled then we want to silently ignore
* this event, so throw away result.
*/
@@ -1717,15 +1958,6 @@ void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
SYNC_EVENTS_VALID);
}
-/* make some assumptions about the time representation rather than abstract it,
- * since we currently only support one type of inline timestamping and only on
- * EF10.
- */
-#define MINOR_TICKS_PER_SECOND 0x8000000
-/* Fuzz factor for sync events to be out of order with RX events */
-#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
-#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
-
static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
@@ -1743,31 +1975,33 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
u32 pkt_timestamp_major, pkt_timestamp_minor;
u32 diff, carry;
struct skb_shared_hwtstamps *timestamps;
- pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
- skb_mac_header(skb)) +
- (u32) efx->ptp_data->ts_corrections.rx) &
- (MINOR_TICKS_PER_SECOND - 1);
+ if (channel->sync_events_state != SYNC_EVENTS_VALID)
+ return;
+
+ pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
/* get the difference between the packet and sync timestamps,
* modulo one second
*/
- diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
- (MINOR_TICKS_PER_SECOND - 1);
+ diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+ if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+ diff += ptp->nic_time.minor_max;
+
/* do we roll over a second boundary and need to carry the one? */
- carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
1 : 0;
- if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
- FUZZ) {
+ if (diff <= ptp->nic_time.sync_event_diff_max) {
/* packet is ahead of the sync event by a quarter of a second or
* less (allowing for fuzz)
*/
pkt_timestamp_major = channel->sync_timestamp_major + carry;
- } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ } else if (diff >= ptp->nic_time.sync_event_diff_min) {
/* packet is behind the sync event but within the fuzz factor.
* This means the RX packet and sync event crossed as they were
* placed on the event queue, which can sometimes happen.
@@ -1789,7 +2023,9 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
/* attach the timestamps to the skb */
timestamps = skb_hwtstamps(skb);
timestamps->hwtstamp =
- efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+ ptp->nic_to_kernel_time(pkt_timestamp_major,
+ pkt_timestamp_minor,
+ ptp->ts_corrections.general_rx);
}
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
@@ -1807,9 +2043,10 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
else if (delta < -MAX_PPB)
delta = -MAX_PPB;
- /* Convert ppb to fixed point ns. */
- adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
- (PPB_EXTRA_BITS + MAX_PPB_BITS));
+ /* Convert ppb to fixed point ns taking care to round correctly. */
+ adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+ (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+ ptp_data->adjfreq_ppb_shift;
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
@@ -1916,6 +2153,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.get_name = efx_ptp_get_channel_name,
/* no copy operation; there is no need to reallocate this channel */
.receive_skb = efx_ptp_rx,
+ .want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
};
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a617f657eae3..ae8645ae4492 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx)
return 0;
}
+/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3)
+ * for memory.
+ */
+static unsigned int siena_mem_bar(struct efx_nic *efx)
+{
+ return 2;
+}
+
static unsigned int siena_mem_map_size(struct efx_nic *efx)
{
return FR_CZ_MC_TREG_SMEM +
@@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -950,7 +958,7 @@ fail:
const struct efx_nic_type siena_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = siena_mem_bar,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9937a2450e57..cece961f2e82 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,9 +77,23 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
- (*bytes_compl) += buffer->skb->len;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
@@ -828,6 +842,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644
index 000000000000..6bcfe27fc560
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -0,0 +1,34 @@
+config NET_VENDOR_SOCIONEXT
+ bool "Socionext ethernet drivers"
+ default y
+ ---help---
+ Option to select ethernet drivers for Socionext platforms.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Socionext devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+ tristate "Socionext AVE ethernet support"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ select PHYLIB
+ ---help---
+ Driver for gigabit ethernet MACs, called AVE, in the
+ Socionext UniPhier family.
+
+config SNI_NETSEC
+ tristate "Socionext NETSEC ethernet support"
+ depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+ select PHYLIB
+ select MII
+ ---help---
+ Enable to add support for the SocioNext NetSec Gigabit Ethernet
+ controller + PHY, as found on the Synquacer SC2A11 SoC
+
+ To compile this driver as a module, choose M here: the module will be
+ called netsec. If unsure, say N.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644
index 000000000000..7fd837a999fd
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..f4c0b02ddad8
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST 0x104
+#define NETSEC_REG_COM_INIT 0x120
+
+#define NETSEC_REG_TOP_STATUS 0x200
+#define NETSEC_IRQ_RX BIT(1)
+#define NETSEC_IRQ_TX BIT(0)
+
+#define NETSEC_REG_TOP_INTEN 0x204
+#define NETSEC_REG_INTEN_SET 0x234
+#define NETSEC_REG_INTEN_CLR 0x238
+
+#define NETSEC_REG_NRM_TX_STATUS 0x400
+#define NETSEC_REG_NRM_TX_INTEN 0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
+#define NRM_TX_ST_NTOWNR BIT(17)
+#define NRM_TX_ST_TR_ERR BIT(16)
+#define NRM_TX_ST_TXDONE BIT(15)
+#define NRM_TX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS 0x440
+#define NETSEC_REG_NRM_RX_INTEN 0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
+#define NRM_RX_ST_RC_ERR BIT(16)
+#define NRM_RX_ST_PKTCNT BIT(15)
+#define NRM_RX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF 0xd0
+
+#define NETSEC_REG_CLK_EN 0x100
+
+#define NETSEC_REG_PKT_CTRL 0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL 0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
+#define NETSEC_REG_F_TAIKI_VER 0x230
+#define NETSEC_REG_DMA_HM_CTRL 0x214
+#define NETSEC_REG_DMA_MH_CTRL 0x220
+#define NETSEC_REG_ADDR_DIS_CORE 0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT 0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
+
+#define NETSEC_REG_NRM_TX_TMR 0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT 0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
+
+#define NETSEC_REG_NRM_RX_TMR 0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG 0x430
+#define NETSEC_REG_NRM_RX_CONFIG 0x470
+
+#define MAC_REG_STATUS 0x1024
+#define MAC_REG_DATA 0x11c0
+#define MAC_REG_CMD 0x11c4
+#define MAC_REG_FLOW_TH 0x11cc
+#define MAC_REG_INTF_SEL 0x11d4
+#define MAC_REG_DESC_INIT 0x11fc
+#define MAC_REG_DESC_SOFT_RST 0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
+
+#define GMAC_REG_MCR 0x0000
+#define GMAC_REG_MFFR 0x0004
+#define GMAC_REG_GAR 0x0010
+#define GMAC_REG_GDR 0x0014
+#define GMAC_REG_FCR 0x0018
+#define GMAC_REG_BMR 0x1000
+#define GMAC_REG_RDLAR 0x100c
+#define GMAC_REG_TDLAR 0x1010
+#define GMAC_REG_OMR 0x1018
+
+#define MHZ(n) ((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD 31
+#define NETSEC_TX_SHIFT_LD_FIELD 30
+#define NETSEC_TX_SHIFT_DRID_FIELD 24
+#define NETSEC_TX_SHIFT_PT_FIELD 21
+#define NETSEC_TX_SHIFT_TDRID_FIELD 16
+#define NETSEC_TX_SHIFT_CC_FIELD 15
+#define NETSEC_TX_SHIFT_FS_FIELD 9
+#define NETSEC_TX_LAST 8
+#define NETSEC_TX_SHIFT_CO 7
+#define NETSEC_TX_SHIFT_SO 6
+#define NETSEC_TX_SHIFT_TRS_FIELD 4
+
+#define NETSEC_RX_PKT_OWN_FIELD 31
+#define NETSEC_RX_PKT_LD_FIELD 30
+#define NETSEC_RX_PKT_SDRID_FIELD 24
+#define NETSEC_RX_PKT_FR_FIELD 23
+#define NETSEC_RX_PKT_ER_FIELD 21
+#define NETSEC_RX_PKT_ERR_FIELD 16
+#define NETSEC_RX_PKT_TDRID_FIELD 12
+#define NETSEC_RX_PKT_FS_FIELD 9
+#define NETSEC_RX_PKT_LS_FIELD 8
+#define NETSEC_RX_PKT_CO_FIELD 6
+
+#define NETSEC_RX_PKT_ERR_MASK 3
+
+#define NETSEC_MAX_TX_PKT_LEN 1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
+
+#define NETSEC_RING_GMAC 15
+#define NETSEC_RING_MAX 2
+
+#define NETSEC_TCP_SEG_LEN_MAX 1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL 0
+#define NETSEC_RX_CKSUM_OK 1
+#define NETSEC_RX_CKSUM_NG 2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX 2047
+
+#define NETSEC_FLOW_START_TH_MAX 95
+#define NETSEC_FLOW_STOP_TH_MAX 95
+#define NETSEC_FLOW_PAUSE_TIME_MIN 5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB BIT(2)
+#define NETSEC_COM_INIT_REG_CLS BIT(1)
+#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
+ NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET 0
+#define NETSEC_SOFT_RST_REG_RUN BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP 1
+#define MH_CTRL__MODE_TRANS BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ 0
+#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE BIT(20)
+#define NETSEC_MCR_PS BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
+
+#define NETSEC_FCR_RFE BIT(2)
+#define NETSEC_FCR_TFE BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
+#define GMAC_REG_SHIFT_CR_GAR 2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE 4
+#define NETSEC_REG_DESC_ENDIAN 0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
+#define NETSEC_MAC_DESC_INIT_REG_INIT 1
+
+#define NETSEC_EEPROM_MAC_ADDRESS 0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE 0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
+#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
+
+#define DESC_NUM 128
+#define NAPI_BUDGET (DESC_NUM / 2)
+
+#define DESC_SZ sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+
+enum ring_id {
+ NETSEC_RING_TX = 0,
+ NETSEC_RING_RX
+};
+
+struct netsec_desc {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *addr;
+ u16 len;
+};
+
+struct netsec_desc_ring {
+ dma_addr_t desc_dma;
+ struct netsec_desc *desc;
+ void *vaddr;
+ u16 pkt_cnt;
+ u16 head, tail;
+};
+
+struct netsec_priv {
+ struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+ struct ethtool_coalesce et_coalesce;
+ spinlock_t reglock; /* protect reg access */
+ struct napi_struct napi;
+ phy_interface_t phy_interface;
+ struct net_device *ndev;
+ struct device_node *phy_np;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ void __iomem *ioaddr;
+ void __iomem *eeprom_base;
+ struct device *dev;
+ struct clk *clk;
+ u32 msg_enable;
+ u32 freq;
+ bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+ u32 attr;
+ u32 data_buf_addr_up;
+ u32 data_buf_addr_lw;
+ u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+ u16 tcp_seg_len;
+ bool tcp_seg_offload_flag;
+ bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+ int rx_cksum_result;
+ int err_code;
+ bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+ writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+ return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC 1000
+#define TIMEOUT_SECONDARY_MS_MAC 100
+
+static u32 netsec_clk_type(u32 freq)
+{
+ if (freq < MHZ(35))
+ return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+ if (freq < MHZ(60))
+ return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+ if (freq < MHZ(100))
+ return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+ if (freq < MHZ(150))
+ return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+ if (freq < MHZ(250))
+ return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+ return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+
+ while (--timeout && netsec_read(priv, addr) & mask)
+ cpu_relax();
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ while (--timeout && netsec_read(priv, addr) & mask)
+ usleep_range(1000, 2000);
+
+ if (timeout)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+ netsec_write(priv, MAC_REG_DATA, value);
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+ return netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+ int ret;
+
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+ ret = netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+ if (ret)
+ return ret;
+
+ *read = netsec_read(priv, MAC_REG_DATA);
+
+ return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+ u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+ int ret, data;
+
+ do {
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ do {
+ usleep_range(1000, 2000);
+
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout && !ret)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+
+ value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+ if (phydev->speed != SPEED_1000)
+ value |= NETSEC_MCR_PS;
+
+ if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+ phydev->speed == SPEED_100)
+ value |= NETSEC_GMAC_MCR_REG_FES;
+
+ value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ value |= NETSEC_GMAC_MCR_REG_IBN;
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+{
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_GAR,
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+ struct netsec_priv *priv = bus->priv;
+ u32 data;
+ int ret;
+
+ if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "netsec", sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ *et_coalesce = priv->et_coalesce;
+
+ return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ priv->et_coalesce = *et_coalesce;
+
+ if (priv->et_coalesce.tx_coalesce_usecs < 50)
+ priv->et_coalesce.tx_coalesce_usecs = 50;
+ if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+ priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+ priv->et_coalesce.tx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+ priv->et_coalesce.tx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+ if (priv->et_coalesce.rx_coalesce_usecs < 50)
+ priv->et_coalesce.rx_coalesce_usecs = 50;
+ if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+ priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+ priv->et_coalesce.rx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+ priv->et_coalesce.rx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+ return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+ .get_drvinfo = netsec_et_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = netsec_et_get_coalesce,
+ .set_coalesce = netsec_et_set_coalesce,
+ .get_msglevel = netsec_et_get_msglevel,
+ .set_msglevel = netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+ struct netsec_desc *desc)
+{
+ struct sk_buff *skb;
+
+ if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+ skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+ } else {
+ desc->len = L1_CACHE_ALIGN(desc->len);
+ skb = netdev_alloc_skb(priv->ndev, desc->len);
+ }
+ if (!skb)
+ return NULL;
+
+ desc->addr = skb->data;
+ desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring, u16 idx,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+ u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+ (1 << NETSEC_RX_PKT_FS_FIELD) |
+ (1 << NETSEC_RX_PKT_LS_FIELD);
+
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx].dma_addr = desc->dma_addr;
+ dring->desc[idx].addr = desc->addr;
+ dring->desc[idx].len = desc->len;
+ dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ u16 idx,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc, u16 *len)
+{
+ struct netsec_de de = {};
+
+ memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+ *len = de.buf_len_info >> 16;
+
+ rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+ rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ *desc = dring->desc[idx];
+ return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc,
+ u16 *len)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct sk_buff *tmp_skb, *skb = NULL;
+ struct netsec_desc td;
+ int tail;
+
+ *rxpi = (struct netsec_rx_pkt_info){};
+
+ td.len = priv->ndev->mtu + 22;
+
+ tmp_skb = netsec_alloc_skb(priv, &td);
+
+ dma_rmb();
+
+ tail = dring->tail;
+
+ if (!tmp_skb) {
+ netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+ dring->desc[tail].skb);
+ } else {
+ skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+ netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+ }
+
+ /* move tail ahead */
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+
+ dring->pkt_cnt--;
+
+ return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ unsigned int pkts, bytes;
+
+ dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ if (dring->pkt_cnt < budget)
+ budget = dring->pkt_cnt;
+
+ pkts = 0;
+ bytes = 0;
+
+ while (pkts < budget) {
+ struct netsec_desc *desc;
+ struct netsec_de *entry;
+ int tail, eop;
+
+ tail = dring->tail;
+
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ desc = &dring->desc[tail];
+ entry = dring->vaddr + DESC_SZ * tail;
+
+ eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ if (eop) {
+ pkts++;
+ bytes += desc->skb->len;
+ dev_kfree_skb(desc->skb);
+ }
+ *desc = (struct netsec_desc){};
+ }
+ dring->pkt_cnt -= budget;
+
+ priv->ndev->stats.tx_packets += budget;
+ priv->ndev->stats.tx_bytes += bytes;
+
+ netdev_completed_queue(priv->ndev, budget, bytes);
+
+ return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ int new, done = 0;
+
+ do {
+ new = netsec_clean_tx_dring(priv, budget);
+ done += new;
+ budget -= new;
+ } while (new);
+
+ if (done && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct net_device *ndev = priv->ndev;
+ struct netsec_rx_pkt_info rx_info;
+ int done = 0, rx_num = 0;
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ u16 len;
+
+ while (done < budget) {
+ if (!rx_num) {
+ rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+ dring->pkt_cnt += rx_num;
+
+ /* move head 'rx_num' */
+ dring->head = (dring->head + rx_num) % DESC_NUM;
+
+ rx_num = dring->pkt_cnt;
+ if (!rx_num)
+ break;
+ }
+ done++;
+ rx_num--;
+ skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+ if (unlikely(!skb) || rx_info.err_flag) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: rx fail err(%d)\n",
+ __func__, rx_info.err_code);
+ ndev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+
+ if (priv->rx_cksum_offload_flag &&
+ rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct netsec_priv *priv;
+ struct net_device *ndev;
+ int tx, rx, done, todo;
+
+ priv = container_of(napi, struct netsec_priv, napi);
+ ndev = priv->ndev;
+
+ todo = budget;
+ do {
+ if (!todo)
+ break;
+
+ tx = netsec_process_tx(priv, todo);
+ todo -= tx;
+
+ if (!todo)
+ break;
+
+ rx = netsec_process_rx(priv, todo);
+ todo -= rx;
+ } while (rx || tx);
+
+ done = budget - todo;
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_SET,
+ NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ dring->desc[idx].skb = skb;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ u16 tso_seg_len = 0;
+ int filled;
+
+ /* differentiate between full/emtpy ring */
+ if (dring->head >= dring->tail)
+ filled = dring->head - dring->tail;
+ else
+ filled = dring->head + DESC_NUM - dring->tail;
+
+ if (DESC_NUM - filled < 2) { /* if less than 2 available */
+ netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ctrl.cksum_offload_flag = true;
+
+ if (skb_is_gso(skb))
+ tso_seg_len = skb_shinfo(skb)->gso_size;
+
+ if (tso_seg_len > 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(0, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ tx_ctrl.tcp_seg_offload_flag = true;
+ tx_ctrl.tcp_seg_len = tso_seg_len;
+ }
+
+ tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: DMA mapping failed\n", __func__);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tx_desc.addr = skb->data;
+ tx_desc.len = skb_headlen(skb);
+
+ skb_tx_timestamp(skb);
+ netdev_sent_queue(priv->ndev, skb->len);
+
+ netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ struct netsec_desc *desc;
+ u16 idx;
+
+ if (!dring->vaddr || !dring->desc)
+ return;
+
+ for (idx = 0; idx < DESC_NUM; idx++) {
+ desc = &dring->desc[idx];
+ if (!desc->addr)
+ continue;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ dev_kfree_skb(desc->skb);
+ }
+
+ memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+ memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+ dring->head = 0;
+ dring->tail = 0;
+ dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+ if (dring->vaddr) {
+ dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ dring->vaddr, dring->desc_dma);
+ dring->vaddr = NULL;
+ }
+
+ kfree(dring->desc);
+ dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ int ret = 0;
+
+ dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
+ if (!dring->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+ if (!dring->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ netsec_free_dring(priv, id);
+
+ return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ int n;
+
+ desc.len = priv->ndev->mtu + 22;
+
+ for (n = 0; n < DESC_NUM; n++) {
+ skb = netsec_alloc_skb(priv, &desc);
+ if (!skb) {
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return -ENOMEM;
+ }
+ netsec_set_rx_de(priv, dring, n, &desc, skb);
+ }
+
+ return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+ u32 addr_h, u32 addr_l, u32 size)
+{
+ u64 base = (u64)addr_h << 32 | addr_l;
+ void __iomem *ucode;
+ u32 i;
+
+ ucode = ioremap(base, size * sizeof(u32));
+ if (!ucode)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++)
+ netsec_write(priv, reg, readl(ucode + i * 4));
+
+ iounmap(ucode);
+ return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+ u32 addr_h, addr_l, size;
+ int err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = 0;
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+ u32 value;
+ int err;
+
+ /* stop DMA engines */
+ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+ netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+
+ while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+
+ while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+ }
+
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+ netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+ while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+ cpu_relax();
+
+ /* set desc_start addr */
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+
+ /* set normal tx dring ring config */
+ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+ netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* start DMA engines */
+ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+ netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+ usleep_range(1000, 2000);
+
+ if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+ netif_err(priv, probe, priv->ndev,
+ "microengine start failed\n");
+ return -ENXIO;
+ }
+ netsec_write(priv, NETSEC_REG_TOP_STATUS,
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+ value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+ if (priv->ndev->mtu > ETH_DATA_LEN)
+ value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+ /* change to normal mode */
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+ netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+ while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+ NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+ cpu_relax();
+
+ /* clear any pending EMPTY/ERR irq status */
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+ /* Disable TX & RX intr */
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+ return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+ int ret;
+
+ if (phydev->speed != SPEED_1000)
+ value = (NETSEC_GMAC_MCR_REG_CST |
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_RESET))
+ return -ETIMEDOUT;
+
+ /* Wait soft reset */
+ usleep_range(1000, 5000);
+
+ ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+ if (ret)
+ return ret;
+ if (value & NETSEC_GMAC_BMR_REG_SWR)
+ return -EAGAIN;
+
+ netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+ return -ETIMEDOUT;
+
+ netsec_write(priv, MAC_REG_DESC_INIT, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+ return -ETIMEDOUT;
+
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+ NETSEC_GMAC_RDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+ NETSEC_GMAC_TDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_update_to_phy_state(priv);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+
+ value |= NETSEC_GMAC_OMR_REG_SR;
+ value |= NETSEC_GMAC_OMR_REG_ST;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+ if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+ u32 value;
+ int ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+ value &= ~NETSEC_GMAC_OMR_REG_SR;
+ value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+ /* disable all interrupts */
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev->link)
+ netsec_start_gmac(priv);
+ else
+ netsec_stop_gmac(priv);
+
+ phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+ struct netsec_priv *priv = dev_id;
+ u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+ unsigned long flags;
+
+ /* Disable interrupts */
+ if (status & NETSEC_IRQ_TX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+ }
+ if (status & NETSEC_IRQ_RX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: fail setup ring\n", __func__);
+ goto err1;
+ }
+
+ ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+ IRQF_SHARED, "netsec", priv);
+ if (ret) {
+ netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+ goto err2;
+ }
+
+ if (dev_of_node(priv->dev)) {
+ if (!of_phy_connect(priv->ndev, priv->phy_np,
+ netsec_phy_adjust_link, 0,
+ priv->phy_interface)) {
+ netif_err(priv, link, priv->ndev, "missing PHY\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+ } else {
+ ret = phy_connect_direct(priv->ndev, priv->phydev,
+ netsec_phy_adjust_link,
+ priv->phy_interface);
+ if (ret) {
+ netif_err(priv, link, priv->ndev,
+ "phy_connect_direct() failed (%d)\n", ret);
+ goto err3;
+ }
+ }
+
+ phy_start(ndev->phydev);
+
+ netsec_start_gmac(priv);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ /* Enable RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+ return 0;
+err3:
+ free_irq(priv->ndev->irq, priv);
+err2:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+ pm_runtime_put_sync(priv->dev);
+ return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+
+ napi_disable(&priv->napi);
+
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+ netsec_stop_gmac(priv);
+
+ free_irq(priv->ndev->irq, priv);
+
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+
+ pm_runtime_put_sync(priv->dev);
+
+ return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+ if (ret)
+ return ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+ if (ret)
+ goto err1;
+
+ ret = netsec_reset_hardware(priv);
+ if (ret)
+ goto err2;
+
+ return 0;
+err2:
+ netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+ netsec_free_dring(priv, NETSEC_RING_TX);
+ return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netsec_free_dring(priv, NETSEC_RING_RX);
+ netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+ return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+ .ndo_init = netsec_netdev_init,
+ .ndo_uninit = netsec_netdev_uninit,
+ .ndo_open = netsec_netdev_open,
+ .ndo_stop = netsec_netdev_stop,
+ .ndo_start_xmit = netsec_netdev_start_xmit,
+ .ndo_set_features = netsec_netdev_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+ struct netsec_priv *priv)
+{
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk not found\n");
+ return PTR_ERR(priv->clk);
+ }
+ priv->freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+ struct netsec_priv *priv, u32 *phy_addr)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "missing required property 'phy-channel'\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev,
+ "socionext,phy-clock-frequency",
+ &priv->freq);
+ if (ret)
+ dev_err(&pdev->dev,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!dev_of_node(priv->dev) && phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
+
+ mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "SNI NETSEC MDIO";
+ bus->read = netsec_phy_read;
+ bus->write = netsec_phy_write;
+ bus->parent = priv->dev;
+ priv->mii_bus = bus;
+
+ if (dev_of_node(priv->dev)) {
+ struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+ mdio_node = of_get_child_by_name(parent, "mdio");
+ if (mdio_node) {
+ parent = mdio_node;
+ } else {
+ /* older f/w doesn't populate the mdio subnode,
+ * allow relaxed upgrade of f/w in due time.
+ */
+ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+ }
+
+ ret = of_mdiobus_register(bus, parent);
+ of_node_put(mdio_node);
+
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+ } else {
+ /* Mask out all PHYs from auto probing. */
+ bus->phy_mask = ~0;
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+
+ priv->phydev = get_phy_device(bus, phy_addr, false);
+ if (IS_ERR(priv->phydev)) {
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+ struct resource *mmio_res, *eeprom_res, *irq_res;
+ u8 *mac, macbuf[ETH_ALEN];
+ struct netsec_priv *priv;
+ u32 hw_ver, phy_addr = 0;
+ struct net_device *ndev;
+ int ret;
+
+ mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmio_res) {
+ dev_err(&pdev->dev, "No MMIO resource found.\n");
+ return -ENODEV;
+ }
+
+ eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!eeprom_res) {
+ dev_info(&pdev->dev, "No EEPROM resource found.\n");
+ return -ENODEV;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "No IRQ resource found.\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ spin_lock_init(&priv->reglock);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, priv);
+ ndev->irq = irq_res->start;
+ priv->dev = &pdev->dev;
+ priv->ndev = ndev;
+
+ priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ resource_size(mmio_res));
+ if (!priv->ioaddr) {
+ dev_err(&pdev->dev, "devm_ioremap() failed\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+ resource_size(eeprom_res));
+ if (!priv->eeprom_base) {
+ dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+ if (mac)
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (priv->eeprom_base &&
+ (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ void __iomem *macp = priv->eeprom_base +
+ NETSEC_EEPROM_MAC_ADDRESS;
+
+ ndev->dev_addr[0] = readb(macp + 3);
+ ndev->dev_addr[1] = readb(macp + 2);
+ ndev->dev_addr[2] = readb(macp + 1);
+ ndev->dev_addr[3] = readb(macp + 0);
+ ndev->dev_addr[4] = readb(macp + 7);
+ ndev->dev_addr[5] = readb(macp + 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ if (dev_of_node(&pdev->dev))
+ ret = netsec_of_probe(pdev, priv);
+ else
+ ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+ if (ret)
+ goto free_ndev;
+
+ if (!priv->freq) {
+ dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ /* default for throughput */
+ priv->et_coalesce.rx_coalesce_usecs = 500;
+ priv->et_coalesce.rx_max_coalesced_frames = 8;
+ priv->et_coalesce.tx_coalesce_usecs = 500;
+ priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+ ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+ &ndev->max_mtu);
+ if (ret < 0)
+ ndev->max_mtu = ETH_DATA_LEN;
+
+ /* runtime_pm coverage just for probe, open/close also cover it */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+ /* this driver only supports F_TAIKI style NETSEC */
+ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+ NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+ ret = -ENODEV;
+ goto pm_disable;
+ }
+
+ dev_info(&pdev->dev, "hardware revision %d.%d\n",
+ hw_ver >> 16, hw_ver & 0xffff);
+
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+ ndev->netdev_ops = &netsec_netdev_ops;
+ ndev->ethtool_ops = &netsec_ethtool_ops;
+
+ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features = ndev->features;
+
+ priv->rx_cksum_offload_flag = true;
+
+ ret = netsec_register_mdio(priv, phy_addr);
+ if (ret)
+ goto unreg_napi;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+ dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netif_err(priv, probe, ndev, "register_netdev() failed\n");
+ goto unreg_mii;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+
+unreg_mii:
+ netsec_unregister_mdio(priv);
+unreg_napi:
+ netif_napi_del(&priv->napi);
+pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_ndev:
+ free_netdev(ndev);
+ dev_err(&pdev->dev, "init failed\n");
+
+ return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+ struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+
+ netsec_unregister_mdio(priv);
+
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ clk_prepare_enable(priv->clk);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+ NETSEC_CLK_EN_REG_DOM_C |
+ NETSEC_CLK_EN_REG_DOM_G);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+ SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+ { .compatible = "socionext,synquacer-netsec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+ { "SCX0001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+ .probe = netsec_probe,
+ .remove = netsec_remove,
+ .driver = {
+ .name = "netsec",
+ .pm = &netsec_pm_ops,
+ .of_match_table = netsec_dt_ids,
+ .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+ },
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644
index 000000000000..111e7ca9df56
--- /dev/null
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR 0x000 /* ID */
+#define AVE_VR 0x004 /* Version */
+#define AVE_GRR 0x008 /* Global Reset */
+#define AVE_CFGR 0x00c /* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR 0x100 /* Global Interrupt Mask */
+#define AVE_GISR 0x104 /* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR 0x200 /* TX Setup */
+#define AVE_RXCR 0x204 /* RX Setup */
+#define AVE_RXMAC1R 0x208 /* MAC address (lower) */
+#define AVE_RXMAC2R 0x20c /* MAC address (upper) */
+#define AVE_MDIOCTR 0x214 /* MDIO Control */
+#define AVE_MDIOAR 0x218 /* MDIO Address */
+#define AVE_MDIOWDR 0x21c /* MDIO Data */
+#define AVE_MDIOSR 0x220 /* MDIO Status */
+#define AVE_MDIORDR 0x224 /* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC 0x300 /* Descriptor Control */
+#define AVE_TXDC 0x304 /* TX Descriptor Configuration */
+#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC 0x34c /* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE 0x800 /* PF Base Address */
+#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */
+#define AVE_PFEN 0xffc /* Packet Filter Enable */
+#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64 12 /* Descriptor Size */
+
+#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32 8 /* Descriptor Size */
+
+#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL 0x8028 /* Reset control */
+#define AVE_RSTCTRL_RMIIRST BIT(16)
+#define AVE_LINKSEL 0x8034 /* Link speed setting */
+#define AVE_LINKSEL_100M BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */
+#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */
+#define AVE_GRR_GRST BIT(0) /* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE BIT(31) /* Filter Function */
+#define AVE_CFGR_CHE BIT(30) /* Checksum Function */
+#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY BIT(24) /* PHY interrupt */
+#define AVE_GI_TX BIT(16) /* Tx complete */
+#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */
+#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP BIT(6) /* Drop packet */
+#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */
+#define AVE_TXCR_TXSPD_1G BIT(17)
+#define AVE_TXCR_TXSPD_100 BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN BIT(30) /* Rx enable */
+#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */
+#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */
+#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */
+#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */
+#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS BIT(0) /* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK GENMASK(31, 16)
+#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */
+#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */
+#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */
+#define AVE_TXDC_ADDR_START 0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */
+#define AVE_RXDC0_ADDR_START 0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN BIT(31) /* Descriptor ownership */
+#define AVE_STS_INTR BIT(29) /* Request for interrupt */
+#define AVE_STS_OK BIT(27) /* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */
+#define AVE_STS_1ST BIT(26) /* Head of buffer chain */
+#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */
+#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */
+#define AVE_STS_EC BIT(20) /* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV BIT(21) /* Checksum check performed */
+#define AVE_STS_CSER BIT(20) /* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1 GENMASK(25, 0)
+#define AVE_PFMBIT_MASK GENMASK(15, 0)
+
+#define AVE_PF_SIZE 17 /* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER 0 /* No.0 */
+#define AVE_PFNUM_UNICAST 1 /* No.1 */
+#define AVE_PFNUM_BROADCAST 2 /* No.2 */
+#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC 32 /* Tx descriptor */
+#define AVE_NR_RXDESC 64 /* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS 0
+#define AVE_DESC_OFS_ADDRL 4
+#define AVE_DESC_OFS_ADDRU 8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME 1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT 20
+#define AVE_FORCE_TXINTCNT 1
+
+#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
+
+enum desc_id {
+ AVE_DESCID_RX,
+ AVE_DESCID_TX,
+};
+
+enum desc_state {
+ AVE_DESC_RX_PERMIT,
+ AVE_DESC_RX_SUSPEND,
+ AVE_DESC_START,
+ AVE_DESC_STOP,
+};
+
+struct ave_desc {
+ struct sk_buff *skbs;
+ dma_addr_t skbs_dma;
+ size_t skbs_dmalen;
+};
+
+struct ave_desc_info {
+ u32 ndesc; /* number of descriptor */
+ u32 daddr; /* start address of descriptor */
+ u32 proc_idx; /* index of processing packet */
+ u32 done_idx; /* index of processed packet */
+ struct ave_desc *desc; /* skb info related descriptor */
+};
+
+struct ave_soc_data {
+ bool is_desc_64bit;
+};
+
+struct ave_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 dropped;
+ u64 collisions;
+ u64 fifo_errors;
+};
+
+struct ave_private {
+ void __iomem *base;
+ int irq;
+ int phy_id;
+ unsigned int desc_size;
+ u32 msg_enable;
+ struct clk *clk;
+ struct reset_control *rst;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ struct mii_bus *mdio;
+
+ /* stats */
+ struct ave_stats stats_rx;
+ struct ave_stats stats_tx;
+
+ /* NAPI support */
+ struct net_device *ndev;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+
+ /* descriptor */
+ struct ave_desc_info rx;
+ struct ave_desc_info tx;
+
+ /* flow control */
+ int pause_auto;
+ int pause_rx;
+ int pause_tx;
+
+ const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+ int offset)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry)
+{
+ return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+ int entry, int offset, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry, u32 val)
+{
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+ int entry, dma_addr_t paddr)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+ lower_32_bits(paddr));
+ if (IS_DESC_64BIT(priv))
+ ave_desc_write(ndev, id,
+ entry, AVE_DESC_OFS_ADDRU,
+ upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 ret;
+
+ ret = readl(priv->base + AVE_GIMR);
+ writel(0, priv->base + AVE_GIMR);
+
+ return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+ writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+ const unsigned char *mac_addr,
+ int reg1, int reg2)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(mac_addr[0] | mac_addr[1] << 8 |
+ mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+ writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 major, minor, vr;
+
+ vr = readl(priv->base + AVE_VR);
+ major = (vr & GENMASK(15, 8)) >> 8;
+ minor = (vr & GENMASK(7, 0));
+ snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct device *dev = ndev->dev.parent;
+
+ strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (ndev->phydev)
+ phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!ndev->phydev ||
+ (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+ return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ pause->autoneg = priv->pause_auto;
+ pause->rx_pause = priv->pause_rx;
+ pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -EINVAL;
+
+ priv->pause_auto = pause->autoneg;
+ priv->pause_rx = pause->rx_pause;
+ priv->pause_tx = pause->tx_pause;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (pause->rx_pause)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (pause->tx_pause)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ phy_start_aneg(phydev);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_drvinfo = ave_ethtool_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ave_ethtool_get_msglevel,
+ .set_msglevel = ave_ethtool_set_msglevel,
+ .get_wol = ave_ethtool_get_wol,
+ .set_wol = ave_ethtool_set_wol,
+ .get_pauseparam = ave_ethtool_get_pauseparam,
+ .set_pauseparam = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* read request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret) {
+ netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+ phyid, regnum);
+ return ret;
+ }
+
+ return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+ u16 val)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* write data */
+ writel(val, priv->base + AVE_MDIOWDR);
+
+ /* write request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret)
+ netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+ phyid, regnum);
+
+ return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+ void *ptr, size_t len, enum dma_data_direction dir,
+ dma_addr_t *paddr)
+{
+ dma_addr_t map_addr;
+
+ map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+ return -ENOMEM;
+
+ desc->skbs_dma = map_addr;
+ desc->skbs_dmalen = len;
+ *paddr = map_addr;
+
+ return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+ enum dma_data_direction dir)
+{
+ if (!desc->skbs_dma)
+ return;
+
+ dma_unmap_single(ndev->dev.parent,
+ desc->skbs_dma, desc->skbs_dmalen, dir);
+ desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = priv->rx.desc[entry].skbs;
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(ndev,
+ AVE_MAX_ETHFRAME);
+ if (!skb) {
+ netdev_err(ndev, "can't allocate skb for Rx\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* set disable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_STS_OWN);
+
+ /* map Rx buffer
+ * Rx buffer set to the Rx descriptor has two restrictions:
+ * - Rx buffer address is 4 byte aligned.
+ * - Rx buffer begins with 2 byte headroom, and data will be put from
+ * (buffer + 2).
+ * To satisfy this, specify the address to put back the buffer
+ * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+ * and expand the map size by NET_IP_ALIGN.
+ */
+ ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+ skb->data - NET_IP_ALIGN,
+ AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+ DMA_FROM_DEVICE, &paddr);
+ if (ret) {
+ netdev_err(ndev, "can't map skb for Rx\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ priv->rx.desc[entry].skbs = skb;
+
+ /* set buffer pointer */
+ ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+ /* set enable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+ return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+ u32 val;
+
+ switch (state) {
+ case AVE_DESC_START:
+ writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+ break;
+
+ case AVE_DESC_STOP:
+ writel(0, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+ 150, 15000)) {
+ netdev_err(ndev, "can't stop descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_SUSPEND:
+ val = readl(priv->base + AVE_DESCC);
+ val |= AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+ val & (AVE_DESCC_RDSTP << 16),
+ 150, 150000)) {
+ netdev_err(ndev, "can't suspend descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_PERMIT:
+ val = readl(priv->base + AVE_DESCC);
+ val &= ~AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ unsigned int nr_freebuf = 0;
+ unsigned int tx_packets = 0;
+ unsigned int tx_bytes = 0;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+
+ /* free pre-stored skb from done_idx to proc_idx */
+ while (proc_idx != done_idx) {
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+ /* do nothing if owner is HW (==1 for Tx) */
+ if (cmdsts & AVE_STS_OWN)
+ break;
+
+ /* check Tx status and updates statistics */
+ if (cmdsts & AVE_STS_OK) {
+ tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+ /* success */
+ if (cmdsts & AVE_STS_LAST)
+ tx_packets++;
+ } else {
+ /* error */
+ if (cmdsts & AVE_STS_LAST) {
+ priv->stats_tx.errors++;
+ if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+ priv->stats_tx.collisions++;
+ }
+ }
+
+ /* release skb */
+ if (priv->tx.desc[done_idx].skbs) {
+ ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+ DMA_TO_DEVICE);
+ dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+ priv->tx.desc[done_idx].skbs = NULL;
+ nr_freebuf++;
+ }
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->tx.done_idx = done_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_tx.syncp);
+ priv->stats_tx.packets += tx_packets;
+ priv->stats_tx.bytes += tx_bytes;
+ u64_stats_update_end(&priv->stats_tx.syncp);
+
+ /* wake queue for freeing buffer */
+ if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+ netif_wake_queue(ndev);
+
+ return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int rx_packets = 0;
+ unsigned int rx_bytes = 0;
+ u32 proc_idx, done_idx;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+ int restpkt, npkts;
+ u32 ndesc, cmdsts;
+
+ proc_idx = priv->rx.proc_idx;
+ done_idx = priv->rx.done_idx;
+ ndesc = priv->rx.ndesc;
+ restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+ for (npkts = 0; npkts < num; npkts++) {
+ /* we can't receive more packet, so fill desc quickly */
+ if (--restpkt < 0)
+ break;
+
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+ /* do nothing if owner is HW (==0 for Rx) */
+ if (!(cmdsts & AVE_STS_OWN))
+ break;
+
+ if (!(cmdsts & AVE_STS_OK)) {
+ priv->stats_rx.errors++;
+ proc_idx = (proc_idx + 1) % ndesc;
+ continue;
+ }
+
+ pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+ /* get skbuff for rx */
+ skb = priv->rx.desc[proc_idx].skbs;
+ priv->rx.desc[proc_idx].skbs = NULL;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+ skb->dev = ndev;
+ skb_put(skb, pktlen);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_packets++;
+ rx_bytes += pktlen;
+
+ netif_receive_skb(skb);
+
+ proc_idx = (proc_idx + 1) % ndesc;
+ }
+
+ priv->rx.proc_idx = proc_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_rx.syncp);
+ priv->stats_rx.packets += rx_packets;
+ priv->stats_rx.bytes += rx_bytes;
+ u64_stats_update_end(&priv->stats_rx.syncp);
+
+ /* refill the Rx buffers */
+ while (proc_idx != done_idx) {
+ if (ave_rxdesc_prepare(ndev, done_idx))
+ break;
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->rx.done_idx = done_idx;
+
+ return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_rx);
+ ndev = priv->ndev;
+
+ num = ave_rx_receive(ndev, budget);
+ if (num < budget) {
+ napi_complete_done(napi, num);
+
+ /* enable Rx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_RXIINT);
+ }
+
+ return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_tx);
+ ndev = priv->ndev;
+
+ num = ave_tx_complete(ndev);
+ napi_complete(napi);
+
+ /* enable Tx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_TX);
+
+ return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* set config register */
+ val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+ val |= AVE_CFGR_MII;
+ writel(val, priv->base + AVE_CFGR);
+
+ /* reset RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val &= ~AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ /* assert reset */
+ writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+ msleep(20);
+
+ /* 1st, negate PHY reset only */
+ writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val |= AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 rxcr_org;
+
+ /* save and disable MAC receive op */
+ rxcr_org = readl(priv->base + AVE_RXCR);
+ writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+ /* suspend Rx descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+ /* receive all packets before descriptor starts */
+ ave_rx_receive(ndev, priv->rx.ndesc);
+
+ /* assert reset */
+ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+ usleep_range(40, 50);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ usleep_range(10, 20);
+
+ /* negate interrupt status */
+ writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+ /* permit descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+ /* restore MAC reccieve op */
+ writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+ struct net_device *ndev = (struct net_device *)netdev;
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 gimr_val, gisr_val;
+
+ gimr_val = ave_irq_disable_all(ndev);
+
+ /* get interrupt status */
+ gisr_val = readl(priv->base + AVE_GISR);
+
+ /* PHY */
+ if (gisr_val & AVE_GI_PHY)
+ writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+ /* check exceeding packet */
+ if (gisr_val & AVE_GI_RXERR) {
+ writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+ netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+ }
+
+ gisr_val &= gimr_val;
+ if (!gisr_val)
+ goto exit_isr;
+
+ /* RxFIFO overflow */
+ if (gisr_val & AVE_GI_RXOVF) {
+ priv->stats_rx.fifo_errors++;
+ ave_rxfifo_reset(ndev);
+ goto exit_isr;
+ }
+
+ /* Rx drop */
+ if (gisr_val & AVE_GI_RXDROP) {
+ priv->stats_rx.dropped++;
+ writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+ }
+
+ /* Rx interval */
+ if (gisr_val & AVE_GI_RXIINT) {
+ napi_schedule(&priv->napi_rx);
+ /* still force to disable Rx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_RXIINT;
+ }
+
+ /* Tx completed */
+ if (gisr_val & AVE_GI_TX) {
+ napi_schedule(&priv->napi_tx);
+ /* still force to disable Tx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_TX;
+ }
+
+exit_isr:
+ ave_irq_restore(ndev, gimr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+ unsigned int entry,
+ const unsigned char *mac_addr,
+ unsigned int set_size)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+ if (WARN_ON(set_size > 6))
+ return -EINVAL;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set MAC address for the filter */
+ ave_hw_write_macaddr(ndev, mac_addr,
+ AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+ /* set byte mask */
+ writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+ priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to ring 0 */
+ writel(0, priv->base + AVE_PFSEL(entry));
+
+ /* restart filter */
+ ave_pfsel_start(ndev, entry);
+
+ return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+ unsigned int entry, u32 rxring)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set byte mask */
+ writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to rxring */
+ writel(rxring, priv->base + AVE_PFSEL(entry));
+
+ ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+ unsigned char bcast_mac[ETH_ALEN];
+ int i;
+
+ eth_broadcast_addr(bcast_mac);
+
+ for (i = 0; i < AVE_PF_SIZE; i++)
+ ave_pfsel_stop(ndev, i);
+
+ /* promiscious entry, select ring 0 */
+ ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+ /* unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+ /* broadcast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 val, txcr, rxcr, rxcr_org;
+ u16 rmt_adv = 0, lcl_adv = 0;
+ u8 cap;
+
+ /* set RGMII speed */
+ val = readl(priv->base + AVE_TXCR);
+ val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+ if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+ val |= AVE_TXCR_TXSPD_1G;
+ else if (phydev->speed == SPEED_100)
+ val |= AVE_TXCR_TXSPD_100;
+
+ writel(val, priv->base + AVE_TXCR);
+
+ /* set RMII speed (100M/10M only) */
+ if (!phy_interface_is_rgmii(phydev)) {
+ val = readl(priv->base + AVE_LINKSEL);
+ if (phydev->speed == SPEED_10)
+ val &= ~AVE_LINKSEL_100M;
+ else
+ val |= AVE_LINKSEL_100M;
+ writel(val, priv->base + AVE_LINKSEL);
+ }
+
+ /* check current RXCR/TXCR */
+ rxcr = readl(priv->base + AVE_RXCR);
+ txcr = readl(priv->base + AVE_TXCR);
+ rxcr_org = rxcr;
+
+ if (phydev->duplex) {
+ rxcr |= AVE_RXCR_FDUPEN;
+
+ if (phydev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (cap & FLOW_CTRL_TX)
+ txcr |= AVE_TXCR_FLOCTR;
+ else
+ txcr &= ~AVE_TXCR_FLOCTR;
+ if (cap & FLOW_CTRL_RX)
+ rxcr |= AVE_RXCR_FLOCTR;
+ else
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ } else {
+ rxcr &= ~AVE_RXCR_FDUPEN;
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ txcr &= ~AVE_TXCR_FLOCTR;
+ }
+
+ if (rxcr_org != rxcr) {
+ /* disable Rx mac */
+ writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+ /* change and enable TX/Rx mac */
+ writel(txcr, priv->base + AVE_TXCR);
+ writel(rxcr, priv->base + AVE_RXCR);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+ ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+ /* pfsel unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ave_private *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct device_node *mdio_np;
+ struct phy_device *phydev;
+ int ret;
+
+ /* enable clk because of hw access until ndo_open */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "can't enable clock\n");
+ return ret;
+ }
+ ret = reset_control_deassert(priv->rst);
+ if (ret) {
+ dev_err(dev, "can't deassert reset\n");
+ goto out_clk_disable;
+ }
+
+ ave_global_reset(ndev);
+
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np) {
+ dev_err(dev, "mdio node not found\n");
+ ret = -EINVAL;
+ goto out_reset_assert;
+ }
+ ret = of_mdiobus_register(priv->mdio, mdio_np);
+ of_node_put(mdio_np);
+ if (ret) {
+ dev_err(dev, "failed to register mdiobus\n");
+ goto out_reset_assert;
+ }
+
+ phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+ if (!phydev) {
+ dev_err(dev, "could not attach to PHY\n");
+ ret = -ENODEV;
+ goto out_mdio_unregister;
+ }
+
+ priv->phydev = phydev;
+
+ phy_ethtool_get_wol(phydev, &wol);
+ device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+ if (!phy_interface_is_rgmii(phydev)) {
+ phydev->supported &= ~PHY_GBIT_FEATURES;
+ phydev->supported |= PHY_BASIC_FEATURES;
+ }
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ phy_attached_info(phydev);
+
+ return 0;
+
+out_mdio_unregister:
+ mdiobus_unregister(priv->mdio);
+out_reset_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mdio);
+
+ /* disable clk because of hw access after ndo_stop */
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+ int ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+ ndev);
+ if (ret)
+ return ret;
+
+ priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+ GFP_KERNEL);
+ if (!priv->tx.desc) {
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+ GFP_KERNEL);
+ if (!priv->rx.desc) {
+ kfree(priv->tx.desc);
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ /* initialize Tx work and descriptor */
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+ }
+ writel(AVE_TXDC_ADDR_START |
+ (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+ priv->base + AVE_TXDC);
+
+ /* initialize Rx work and descriptor */
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (ave_rxdesc_prepare(ndev, entry))
+ break;
+ }
+ writel(AVE_RXDC0_ADDR_START |
+ (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+ priv->base + AVE_RXDC0);
+
+ ave_desc_switch(ndev, AVE_DESC_START);
+
+ ave_pfsel_init(ndev);
+ ave_macaddr_init(ndev);
+
+ /* set Rx configuration */
+ /* full duplex, enable pause drop, enalbe flow control */
+ val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+ AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set Tx configuration */
+ /* enable flow control, disable loopback */
+ writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+ /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+ val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+ val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+ writel(val, priv->base + AVE_IIRQC);
+
+ val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+ ave_irq_restore(ndev, val);
+
+ napi_enable(&priv->napi_rx);
+ napi_enable(&priv->napi_tx);
+
+ phy_start(ndev->phydev);
+ phy_start_aneg(ndev->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_free_irq:
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+
+ ave_irq_disable_all(ndev);
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ netif_tx_disable(ndev);
+ phy_stop(ndev->phydev);
+ napi_disable(&priv->napi_tx);
+ napi_disable(&priv->napi_rx);
+
+ ave_desc_switch(ndev, AVE_DESC_STOP);
+
+ /* free Tx buffer */
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ if (!priv->tx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+ priv->tx.desc[entry].skbs = NULL;
+ }
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+
+ /* free Rx buffer */
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (!priv->rx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+ priv->rx.desc[entry].skbs = NULL;
+ }
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+
+ kfree(priv->tx.desc);
+ kfree(priv->rx.desc);
+
+ return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ int ret, freepkt;
+ dma_addr_t paddr;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+ freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+ /* stop queue when not enough entry */
+ if (unlikely(freepkt < 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* add padding for short packet */
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* map Tx buffer
+ * Tx buffer set to the Tx descriptor doesn't have any restriction.
+ */
+ ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+ skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx.desc[proc_idx].skbs = skb;
+
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+ cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+ (skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+ /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+ if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+ cmdsts |= AVE_STS_INTR;
+
+ /* disable checksum calculation when skb doesn't calurate checksum */
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ cmdsts |= AVE_STS_NOCSUM;
+
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+ priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+ return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_adr;
+ int count, mc_cnt;
+ u32 val;
+
+ /* MAC addr filter enable for promiscious mode */
+ mc_cnt = netdev_mc_count(ndev);
+ val = readl(priv->base + AVE_RXCR);
+ if (ndev->flags & IFF_PROMISC || !mc_cnt)
+ val &= ~AVE_RXCR_AFEN;
+ else
+ val |= AVE_RXCR_AFEN;
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set all multicast address */
+ if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+ v4multi_macadr, 1);
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+ v6multi_macadr, 1);
+ } else {
+ /* stop all multicast filter */
+ for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+ ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+ /* set multicast addresses */
+ count = 0;
+ netdev_for_each_mc_addr(hw_adr, ndev) {
+ if (count == mc_cnt)
+ break;
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+ hw_adr->addr, 6);
+ count++;
+ }
+ }
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+ stats->rx_packets = priv->stats_rx.packets;
+ stats->rx_bytes = priv->stats_rx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+ stats->tx_packets = priv->stats_tx.packets;
+ stats->tx_bytes = priv->stats_tx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+ stats->rx_errors = priv->stats_rx.errors;
+ stats->tx_errors = priv->stats_tx.errors;
+ stats->rx_dropped = priv->stats_rx.dropped;
+ stats->tx_dropped = priv->stats_tx.dropped;
+ stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+ stats->collisions = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+ int ret = eth_mac_addr(ndev, p);
+
+ if (ret)
+ return ret;
+
+ ave_macaddr_init(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+ .ndo_init = ave_init,
+ .ndo_uninit = ave_uninit,
+ .ndo_open = ave_open,
+ .ndo_stop = ave_stop,
+ .ndo_start_xmit = ave_start_xmit,
+ .ndo_do_ioctl = ave_ioctl,
+ .ndo_set_rx_mode = ave_set_rx_mode,
+ .ndo_get_stats64 = ave_get_stats64,
+ .ndo_set_mac_address = ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+ const struct ave_soc_data *data;
+ struct device *dev = &pdev->dev;
+ char buf[ETHTOOL_FWVERS_LEN];
+ phy_interface_t phy_mode;
+ struct ave_private *priv;
+ struct net_device *ndev;
+ struct device_node *np;
+ struct resource *res;
+ const void *mac_addr;
+ void __iomem *base;
+ u64 dma_mask;
+ int irq, ret;
+ u32 ave_id;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ np = dev->of_node;
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(dev, "phy-mode not found\n");
+ return -EINVAL;
+ }
+ if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+ phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ dev_err(dev, "phy-mode is invalid\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "IRQ not found\n");
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ndev = alloc_etherdev(sizeof(struct ave_private));
+ if (!ndev) {
+ dev_err(dev, "can't allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ ndev->netdev_ops = &ave_netdev_ops;
+ ndev->ethtool_ops = &ave_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+ ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+ ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+
+ /* if the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+
+ priv = netdev_priv(ndev);
+ priv->base = base;
+ priv->irq = irq;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+ priv->phy_mode = phy_mode;
+ priv->data = data;
+
+ if (IS_DESC_64BIT(priv)) {
+ priv->desc_size = AVE_DESC_SIZE_64;
+ priv->tx.daddr = AVE_TXDM_64;
+ priv->rx.daddr = AVE_RXDM_64;
+ dma_mask = DMA_BIT_MASK(64);
+ } else {
+ priv->desc_size = AVE_DESC_SIZE_32;
+ priv->tx.daddr = AVE_TXDM_32;
+ priv->rx.daddr = AVE_RXDM_32;
+ dma_mask = DMA_BIT_MASK(32);
+ }
+ ret = dma_set_mask(dev, dma_mask);
+ if (ret)
+ goto out_free_netdev;
+
+ priv->tx.ndesc = AVE_NR_TXDESC;
+ priv->rx.ndesc = AVE_NR_RXDESC;
+
+ u64_stats_init(&priv->stats_tx.syncp);
+ u64_stats_init(&priv->stats_rx.syncp);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_free_netdev;
+ }
+
+ priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ goto out_free_netdev;
+ }
+
+ priv->mdio = devm_mdiobus_alloc(dev);
+ if (!priv->mdio) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+ priv->mdio->priv = ndev;
+ priv->mdio->parent = dev;
+ priv->mdio->read = ave_mdiobus_read;
+ priv->mdio->write = ave_mdiobus_write;
+ priv->mdio->name = "uniphier-mdio";
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register as a NAPI supported driver */
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+ netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+ priv->tx.ndesc);
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register netdevice\n");
+ goto out_del_napi;
+ }
+
+ /* get ID and version */
+ ave_id = readl(priv->base + AVE_IDR);
+ ave_hw_read_version(ndev, buf, sizeof(buf));
+
+ dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+ (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+ (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+ buf, priv->irq, phy_modes(phy_mode));
+
+ return 0;
+
+out_del_napi:
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ave_private *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+ .is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-ave4",
+ .data = &ave_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-ave4",
+ .data = &ave_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-ave4",
+ .data = &ave_ld11_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-ave4",
+ .data = &ave_ld20_data,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+ .probe = ave_probe,
+ .remove = ave_remove,
+ .driver = {
+ .name = "ave",
+ .of_match_table = of_ave_match,
+ },
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ce2ea2d491ac..2ffe76c0ff74 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -474,7 +474,7 @@ struct mac_device_info;
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init)(struct mac_device_info *hw, int mtu);
+ void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b32c5..5270d26f0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+#define PRG_ETH0_RGMII_TX_CLK_EN 10
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
struct clk_divider m250_div;
struct clk *m250_div_clk;
- struct clk_divider m25_div;
- struct clk *m25_div_clk;
+ struct clk_fixed_factor fixed_div2;
+ struct clk *fixed_div2_clk;
+
+ struct clk_gate rgmii_tx_en;
+ struct clk *rgmii_tx_en_clk;
u32 tx_delay_ns;
};
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
writel(data, dwmac->regs + reg);
}
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{
struct clk_init_data init;
int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
char clk_name[32];
const char *clk_div_parents[1];
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
- static const struct clk_div_table clk_25m_div_table[] = {
- { .val = 0, .div = 5 },
- { .val = 1, .div = 10 },
- { /* sentinel */ },
- };
/* get the mux parents from DT */
for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
init.name = clk_name;
init.ops = &clk_mux_ops;
- init.flags = 0;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = mux_parent_names;
init.num_parents = MUX_CLK_NUM_PARENTS;
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
dwmac->m250_div.hw.init = &init;
- dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
return PTR_ERR(dwmac->m250_div_clk);
- /* create the m25_div */
- snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ /* create the fixed_div2 */
+ snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_divider_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = CLK_SET_RATE_PARENT;
clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
init.parent_names = clk_div_parents;
init.num_parents = ARRAY_SIZE(clk_div_parents);
- dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
- dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
- dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
- dwmac->m25_div.table = clk_25m_div_table;
- dwmac->m25_div.hw.init = &init;
- dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->fixed_div2.mult = 1;
+ dwmac->fixed_div2.div = 2;
+ dwmac->fixed_div2.hw.init = &init;
- dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
- if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
- return PTR_ERR(dwmac->m25_div_clk);
+ dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+ return PTR_ERR(dwmac->fixed_div2_clk);
+
+ /* create the rgmii_tx_en */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+ dev_name(dev));
+ init.ops = &clk_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ dwmac->rgmii_tx_en.hw.init = &init;
+
+ dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+ &dwmac->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+ return PTR_ERR(dwmac->rgmii_tx_en_clk);
return 0;
}
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
- unsigned long clk_rate;
u8 tx_dly_val = 0;
switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* Generate a 25MHz clock for the PHY */
- clk_rate = 25 * 1000 * 1000;
-
/* enable RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+ /* Configure the 125MHz RGMII TX clock, the IP block changes
+ * the output automatically (= without us having to configure
+ * a register) based on the line-speed (125MHz for Gbit speeds,
+ * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+ */
+ ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to set RGMII TX clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to enable the RGMII TX clock\n");
+ return ret;
+ }
break;
case PHY_INTERFACE_MODE_RMII:
- /* Use the rate of the mux clock for the internal RMII PHY */
- clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
/* disable RGMII mode -> enables RMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- ret = clk_prepare_enable(dwmac->m25_div_clk);
- if (ret) {
- dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
- return ret;
- }
-
- ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
- if (ret) {
- clk_disable_unprepare(dwmac->m25_div_clk);
-
- dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
- return ret;
- }
-
/* enable TX_CLK and PHY_REF_CLK generator */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- ret = meson8b_init_clk(dwmac);
+ ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
{
struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
return stmmac_pltfr_remove(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 9eb7f65d8000..a3fa65b1ca8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -483,7 +483,8 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8a86340ff2d3..540d21786a43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -25,18 +25,28 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac_pcs.h"
#include "dwmac1000.h"
-static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac1000_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+ int mtu = dev->mtu;
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONTROL_ACS;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 8ef517356313..91b23f9db31a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -25,15 +25,26 @@
*******************************************************************************/
#include <linux/crc32.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac100_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + MAC_CONTROL);
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+ value |= MAC_CORE_INIT;
+
+ /* Clear ASTP bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~MAC_CONTROL_ASTP;
+
+ writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f3ed8f7853eb..ed222b20fcf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,16 +17,26 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <net/dsa.h>
#include "stmmac_pcs.h"
#include "dwmac4.h"
-static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac4_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
+ int mtu = dev->mtu;
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONFIG_ACS;
+
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7e089bf906b4..c728ffa095de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(p),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2a828a312814..6768a25b6aa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -341,7 +341,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes0 |= ETDES0_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
(unsigned int)x, (unsigned int)(x >> 32),
ep->basic.des2, ep->basic.des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index db4cee57bb24..ebd9e5e00f16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
i, (unsigned int)virt_to_phys(p),
(unsigned int)x, (unsigned int)(x >> 32),
p->des2, p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 337d53d12e94..7ad841434ec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
+ int interface = priv->plat->interface;
unsigned long flags;
bool ret = false;
+ if ((interface != PHY_INTERFACE_MODE_MII) &&
+ (interface != PHY_INTERFACE_MODE_GMII) &&
+ !phy_interface_mode_is_rgmii(interface))
+ goto out;
+
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
@@ -1997,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
- int status;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
u32 chan;
+ bool poll_scheduled = false;
+ int status[channels_to_check];
+
+ /* Each DMA channel can be used for rx and tx simultaneously, yet
+ * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+ * stmmac_channel struct.
+ * Because of this, stmmac_poll currently checks (and possibly wakes)
+ * all tx queues rather than just a single tx queue.
+ */
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ &priv->xstats,
+ chan);
- for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ for (chan = 0; chan < rx_channel_count; chan++) {
+ if (likely(status[chan] & handle_rx)) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- status = priv->hw->dma->dma_interrupt(priv->ioaddr,
- &priv->xstats, chan);
- if (likely((status & handle_rx)) || (status & handle_tx)) {
if (likely(napi_schedule_prep(&rx_q->napi))) {
stmmac_disable_dma_irq(priv, chan);
__napi_schedule(&rx_q->napi);
+ poll_scheduled = true;
}
}
+ }
- if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* If we scheduled poll, we already know that tx queues will be checked.
+ * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+ * completed transmission, if so, call stmmac_poll (once).
+ */
+ if (!poll_scheduled) {
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (status[chan] & handle_tx) {
+ /* It doesn't matter what rx queue we choose
+ * here. We use 0 since it always exists.
+ */
+ struct stmmac_rx_queue *rx_q =
+ &priv->rx_queue[0];
+
+ if (likely(napi_schedule_prep(&rx_q->napi))) {
+ stmmac_disable_dma_irq(priv, chan);
+ __napi_schedule(&rx_q->napi);
+ }
+ break;
+ }
+ }
+ }
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) {
@@ -2029,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
chan);
priv->xstats.threshold = tc;
}
- } else if (unlikely(status == tx_hard_error)) {
+ } else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
}
@@ -2483,7 +2527,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->hw, dev->mtu);
+ priv->hw->mac->core_init(priv->hw, dev);
/* Initialize MTL*/
if (priv->synopsys_id >= DWMAC_CORE_4_00)
@@ -3398,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
if (netif_msg_rx_status(priv)) {
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
- if (frame_len > ETH_FRAME_LEN)
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+ frame_len, status);
}
/* The zero-copy is always used for all the sizes
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index d655a4261e98..eb1c6b03c329 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a73600dceb8b..3c85a0885f9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -88,6 +88,7 @@ do { \
#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -352,6 +353,27 @@ struct cpsw_hw_stats {
u32 rxdmaoverruns;
};
+struct cpsw_slave_data {
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -365,12 +387,12 @@ struct cpsw_slave {
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
{
- return __raw_readl(slave->regs + offset);
+ return readl_relaxed(slave->regs + offset);
}
static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
{
- __raw_writel(val, slave->regs + offset);
+ writel_relaxed(val, slave->regs + offset);
}
struct cpsw_vector {
@@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static void cpsw_intr_enable(struct cpsw_common *cpsw)
{
- __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
- __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, true);
return;
@@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
static void cpsw_intr_disable(struct cpsw_common *cpsw)
{
- __raw_writel(0, &cpsw->wr_regs->tx_en);
- __raw_writel(0, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, false);
return;
@@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg)
{
unsigned long timeout = jiffies + HZ;
- __raw_writel(1, reg);
+ writel_relaxed(1, reg);
do {
cpu_relax();
- } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
- WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void cpsw_set_slave_mac(struct cpsw_slave *slave,
struct cpsw_priv *priv)
{
@@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
if (mac_control != slave->mac_control) {
phy_print_status(phy);
- __raw_writel(mac_control, &slave->sliver->mac_control);
+ writel_relaxed(mac_control, &slave->sliver->mac_control);
}
slave->mac_control = mac_control;
@@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
soft_reset_slave(slave);
/* setup priority mapping */
- __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+ writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
@@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
- __raw_writel(CPDMA_TX_PRIORITY_MAP,
- &cpsw->host_port_regs->cpdma_tx_pri_map);
- __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* initialize shared resources for every ndev */
if (!cpsw->usage_count) {
/* disable priority elevation */
- __raw_writel(0, &cpsw->regs->ptype);
+ writel_relaxed(0, &cpsw->regs->ptype);
/* enable statistics collection only on all ports */
- __raw_writel(0x7, &cpsw->regs->stat_port_en);
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
/* Enable internal fifo flow control */
writel(0x7, &cpsw->regs->flow_control);
@@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
- __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
{
- int (*poll)(struct napi_struct *, int);
struct cpsw_common *cpsw = priv->cpsw;
void (*handler)(void *, int, int);
struct netdev_queue *queue;
@@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
ch = &cpsw->rx_ch_num;
vec = cpsw->rxv;
handler = cpsw_rx_handler;
- poll = cpsw_rx_poll;
} else {
ch = &cpsw->tx_ch_num;
vec = cpsw->txv;
handler = cpsw_tx_handler;
- poll = cpsw_tx_poll;
}
while (*ch < ch_num) {
@@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(priv->dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_dma_ret;
+ }
+
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
- if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
- dev_err(priv->dev, "error initializing dma channels\n");
- ret = -ENOMEM;
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(priv->dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
goto clean_dma_ret;
}
ale_params.dev = &pdev->dev;
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
- ale_params.ale_ports = data->slaves;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
cpsw->ale = cpsw_ale_create(&ale_params);
if (!cpsw->ale) {
@@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
ret = ndev->irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
@@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
if (cpsw->data.dual_emac) {
@@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 1);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[0] = irq;
@@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
/* TX IRQ */
irq = platform_get_irq(pdev, 2);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[1] = irq;
@@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw_notice(priv, probe,
@@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_unregister_netdev_ret:
unregister_netdev(ndev);
-clean_ale_ret:
- cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
@@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpts_release(cpsw->cpts);
- cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 6c3037aa2cd3..cf111db3dc27 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -17,26 +17,9 @@
#include <linux/if_ether.h>
#include <linux/phy.h>
-struct cpsw_slave_data {
- struct device_node *phy_node;
- char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
- u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-};
-
-struct cpsw_platform_data {
- struct cpsw_slave_data *slave_data;
- u32 ss_reg_ofs; /* Subsystem control register offset */
- u32 channels; /* number of cpdma channels (symmetric) */
- u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
- u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
-};
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index b432a75fb874..93dc05c194d3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
- __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+ writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- ale_entry[i] = __raw_readl(ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
return idx;
}
@@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- __raw_writel(ale_entry[i], ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ writel_relaxed(ale_entry[i], ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
- __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
- ALE_TABLE_CONTROL);
+ writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+ ALE_TABLE_CONTROL);
return idx;
}
@@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
mask = BITMASK(info->bits);
@@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset);
+ tmp = readl_relaxed(ale->params.ale_regs + offset);
tmp = (tmp & ~(mask << shift)) | (value << shift);
- __raw_writel(tmp, ale->params.ale_regs + offset);
+ writel_relaxed(tmp, ale->params.ale_regs + offset);
return 0;
}
@@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+ tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
@@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+ del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+ struct cpsw_ale *ale;
u32 rev, ale_entries;
- rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+ ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+ if (!ale)
+ return NULL;
+
+ ale->params = *params;
+ ale->ageout = ale->params.ale_ageout * HZ;
+
+ rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
if (!ale->params.major_ver_mask)
ale->params.major_ver_mask = 0xff;
ale->version =
@@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale)
if (!ale->params.ale_entries) {
ale_entries =
- __raw_readl(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+ ALE_STATUS_SIZE_MASK;
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale)
"ALE Table size %ld\n", ale->params.ale_entries);
/* set default bits for existing h/w */
- ale->port_mask_bits = 3;
- ale->port_num_bits = 2;
- ale->vlan_field_bits = 3;
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = order_base_2(ale->params.ale_ports);
+ ale->vlan_field_bits = ale->params.ale_ports;
/* Set defaults override for ALE on NetCP NU switch and for version
* 1R3
@@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = ale->params.ale_ports - 1;
- ale->vlan_field_bits = ale->params.ale_ports;
- } else if (ale->version == ALE_VERSION_1R3) {
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = 3;
- ale->vlan_field_bits = ale->params.ale_ports;
}
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
- cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
-
- timer_setup(&ale->timer, cpsw_ale_timer, 0);
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
-
-void cpsw_ale_stop(struct cpsw_ale *ale)
-{
- del_timer_sync(&ale->timer);
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
-
-struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
-{
- struct cpsw_ale *ale;
-
- ale = kzalloc(sizeof(*ale), GFP_KERNEL);
- if (!ale)
- return NULL;
-
- ale->params = *params;
- ale->ageout = ale->params.ale_ageout * HZ;
-
return ale;
}
EXPORT_SYMBOL_GPL(cpsw_ale_create);
-int cpsw_ale_destroy(struct cpsw_ale *ale)
-{
- if (!ale)
- return -EINVAL;
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
- kfree(ale);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
-
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
int i;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 25d24e8d0904..d4fe9016429b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -100,7 +100,6 @@ enum cpsw_ale_port_state {
#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
-int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index e4d6edf387b3..6f9173ff9414 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
if (__chan_linear(chan_num) >= ctlr->num_chan)
- return NULL;
+ return ERR_PTR(-EINVAL);
chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bb561856af5..abceea802ea1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
-static int match_first_device(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "davinci_mdio", 12);
-}
-
/**
* emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- phy = bus_find_device(&mdio_bus_type, NULL, NULL,
- match_first_device);
+ phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+ "davinci_mdio");
if (phy) {
priv->phy_id = dev_name(phy);
if (!priv->phy_id || !*priv->phy_id)
@@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
emac_tx_handler, 0);
+ if (IS_ERR(priv->txchan)) {
+ dev_err(&pdev->dev, "error initializing tx dma channel\n");
+ rc = PTR_ERR(priv->txchan);
+ goto no_cpdma_chan;
+ }
+
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
emac_rx_handler, 1);
- if (WARN_ON(!priv->txchan || !priv->rxchan)) {
- rc = -ENOMEM;
+ if (IS_ERR(priv->rxchan)) {
+ dev_err(&pdev->dev, "error initializing rx dma channel\n");
+ rc = PTR_ERR(priv->rxchan);
goto no_cpdma_chan;
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* warning!!!! We are retrieving the virtual ptr in the sw_data
* field as a 32bit value. Will not work on 64bit machines
*/
- page = (struct page *)GET_SW_DATA0(desc);
+ page = (struct page *)GET_SW_DATA0(ndesc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index e831c49713ee..56dbc0b9fedc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -27,6 +27,7 @@
#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "netcp.h"
#include "cpts.h"
@@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = {
.get_ts_info = keystone_get_ts_info,
};
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void gbe_set_slave_mac(struct gbe_slave *slave,
struct gbe_intf *gbe_intf)
{
@@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
del_timer_sync(&gbe_dev->timer);
cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
- cpsw_ale_destroy(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
free_secondary_ports(gbe_dev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..b919e89a9b93 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -1638,19 +1652,16 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
-static void __net_exit geneve_exit_net(struct net *net)
+static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
struct net_device *dev, *aux;
- LIST_HEAD(list);
-
- rtnl_lock();
/* gather any geneve devices that were moved into this ns */
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, &list);
+ unregister_netdevice_queue(dev, head);
/* now gather any other geneve devices that were created in this ns */
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@ -1658,18 +1669,29 @@ static void __net_exit geneve_exit_net(struct net *net)
* to the list by the previous loop.
*/
if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, &list);
+ unregister_netdevice_queue(geneve->dev, head);
}
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
+}
+
+static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ geneve_destroy_tunnels(net, &list);
+
/* unregister the devices gathered above */
unregister_netdevice_many(&list);
rtnl_unlock();
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit = geneve_exit_net,
+ .exit_batch = geneve_exit_batch_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 88ddfb92122b..0db3bd1ea06f 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
- int ring_size;
u32 num_chn;
u32 send_sections;
u32 recv_sections;
@@ -188,18 +187,22 @@ struct rndis_message;
struct netvsc_device;
struct net_device_context;
+extern u32 netvsc_ring_bytes;
+extern struct reciprocal_value netvsc_ring_reciprocal;
+
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
void netvsc_device_remove(struct hv_device *device);
-int netvsc_send(struct net_device_context *ndc,
+int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer,
struct sk_buff *skb);
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *nvdev,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *key);
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen);
@@ -635,14 +637,27 @@ struct nvsp_message {
#define NETVSC_MTU 65535
#define NETVSC_MTU_MIN ETH_MIN_MTU
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
-#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
+/* Max buffer sizes allowed by a host */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16)
+
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024)
+
#define NETVSC_INVALID_INDEX -1
#define NETVSC_SEND_SECTION_SIZE 6144
#define NETVSC_RECV_SECTION_SIZE 1728
+/* Default size of TX buf: 1MB, RX buf: 16MB */
+#define NETVSC_MIN_TX_SECTIONS 10
+#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \
+ / NETVSC_SEND_SECTION_SIZE)
+#define NETVSC_MIN_RX_SECTIONS 10
+#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \
+ / NETVSC_RECV_SECTION_SIZE)
+
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0
@@ -690,6 +705,7 @@ struct netvsc_ethtool_stats {
unsigned long tx_busy;
unsigned long tx_send_full;
unsigned long rx_comp_busy;
+ unsigned long rx_no_memory;
unsigned long stop_queue;
unsigned long wake_queue;
};
@@ -804,13 +820,9 @@ struct netvsc_device {
struct rndis_device *extension;
- int ring_size;
-
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- atomic_t open_cnt;
-
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
struct rcu_head rcu;
@@ -1425,32 +1437,6 @@ struct rndis_message {
(sizeof(msg) + (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container)))
-/* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->info_buf_offset)
-
-/* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->status_buf_offset)
-
-/* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->oob_data_offset)
-
-/* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
- ((void *) &rndis_msg->msg)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
- ((void *) rndis_msg)
-
-
-
#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index bfc79698b8f4..17e529af79dc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
@@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
- atomic_set(&net_device->open_cnt, 0);
+
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device,
buf_size = device_info->recv_sections * device_info->recv_section_size;
buf_size = roundup(buf_size, PAGE_SIZE);
+ /* Legacy hosts only allow smaller receive buffer */
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ buf_size = min_t(unsigned int, buf_size,
+ NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
+
net_device->recv_buf = vzalloc(buf_size);
if (!net_device->recv_buf) {
netdev_err(ndev,
@@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device)
* Get the percentage of available bytes to write in the ring.
* The return value is in range from 0 to 100.
*/
-static inline u32 hv_ringbuf_avail_percent(
- struct hv_ring_buffer_info *ring_info)
+static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
{
- u32 avail_read, avail_write;
-
- hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+ u32 avail_write = hv_get_bytes_to_write(ring_info);
- return avail_write * 100 / ring_info->ring_datasize;
+ return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
}
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
@@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
return NETVSC_INVALID_INDEX;
}
-static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
- unsigned int section_index,
- u32 pend_size,
- struct hv_netvsc_packet *packet,
- struct rndis_message *rndis_msg,
- struct hv_page_buffer *pb,
- struct sk_buff *skb)
+static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ u32 pend_size,
+ struct hv_netvsc_packet *packet,
+ struct rndis_message *rndis_msg,
+ struct hv_page_buffer *pb,
+ bool xmit_more)
{
char *start = net_device->send_buf;
char *dest = start + (section_index * net_device->send_section_size)
+ pend_size;
int i;
- u32 msg_size = 0;
u32 padding = 0;
- u32 remain = packet->total_data_buflen % net_device->pkt_align;
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
packet->page_buf_cnt;
+ u32 remain;
/* Add padding */
- if (skb->xmit_more && remain && !packet->cp_partial) {
+ remain = packet->total_data_buflen & (net_device->pkt_align - 1);
+ if (xmit_more && remain) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
packet->total_data_buflen += padding;
@@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
u32 len = pb[i].len;
memcpy(dest, (src + offset), len);
- msg_size += len;
dest += len;
}
- if (padding) {
+ if (padding)
memset(dest, 0, padding);
- msg_size += padding;
- }
-
- return msg_size;
}
static inline int netvsc_send_pkt(
@@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
}
/* RCU already held by caller */
-int netvsc_send(struct net_device_context *ndev_ctx,
+int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
= rcu_dereference_bh(ndev_ctx->nvdev);
struct hv_device *device = ndev_ctx->device_ctx;
@@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL;
- bool try_batch;
- bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
+ bool try_batch, xmit_more;
/* If device is rescinded, return error and packet will get dropped. */
if (unlikely(!net_device || net_device->destroy))
@@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
}
}
+ /* Keep aggregating only if stack says more data is coming
+ * and not doing mixed modes send and not flow blocked
+ */
+ xmit_more = skb->xmit_more &&
+ !packet->cp_partial &&
+ !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
if (section_index != NETVSC_INVALID_INDEX) {
netvsc_copy_to_send_buf(net_device,
section_index, msd_len,
- packet, rndis_msg, pb, skb);
+ packet, rndis_msg, pb, xmit_more);
packet->send_buf_index = section_index;
@@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
- if (xmit_more && !packet->cp_partial) {
+ if (xmit_more) {
msdp->skb = skb;
msdp->pkt = packet;
msdp->count++;
@@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev,
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
/* Pass it to the upper layer */
- status = rndis_filter_receive(ndev, net_device, device,
+ status = rndis_filter_receive(ndev, net_device,
channel, data, buflen);
}
@@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
- int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
net_device_ctx->tx_table[i] = 0;
- net_device->ring_size = ring_size;
-
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
@@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
- ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
- ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb,
- net_device->chan_table);
+ ret = vmbus_open(device->channel, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
+ netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5129647d420c..c5584c2d440e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
+#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
@@ -46,17 +47,15 @@
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
-#define NETVSC_MIN_TX_SECTIONS 10
-#define NETVSC_DEFAULT_TX 192 /* ~1M */
-#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
-#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
-static int ring_size = 128;
-module_param(ring_size, int, S_IRUGO);
+static unsigned int ring_size __ro_after_init = 128;
+module_param(ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+unsigned int netvsc_ring_bytes __ro_after_init;
+struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -174,17 +173,15 @@ out:
return ret;
}
-static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
- int pkt_type)
+static inline void *init_ppi_data(struct rndis_message *msg,
+ u32 ppi_size, u32 pkt_type)
{
- struct rndis_packet *rndis_pkt;
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
struct rndis_per_packet_info *ppi;
- rndis_pkt = &msg->msg.pkt;
rndis_pkt->data_offset += ppi_size;
-
- ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
- rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+ ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+ + rndis_pkt->per_pkt_info_len;
ppi->size = ppi_size;
ppi->type = pkt_type;
@@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
rndis_pkt->per_pkt_info_len += ppi_size;
- return ppi;
+ return ppi + 1;
}
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
@@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
- struct rndis_packet *rndis_pkt;
struct net_device *vf_netdev;
u32 rndis_msg_size;
- struct rndis_per_packet_info *ppi;
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
@@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg = (struct rndis_message *)skb->head;
- memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
-
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
- rndis_pkt = &rndis_msg->msg.pkt;
- rndis_pkt->data_offset = sizeof(struct rndis_packet);
- rndis_pkt->data_len = packet->total_data_buflen;
- rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+ rndis_msg->msg.pkt = (struct rndis_packet) {
+ .data_offset = sizeof(struct rndis_packet),
+ .data_len = packet->total_data_buflen,
+ .per_pkt_info_offset = sizeof(struct rndis_packet),
+ };
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
hash = skb_get_hash_raw(skb);
if (hash != 0 && net->real_num_tx_queues > 1) {
+ u32 *hash_info;
+
rndis_msg_size += NDIS_HASH_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
- NBL_HASH_VALUE);
- *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+ hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+ NBL_HASH_VALUE);
+ *hash_info = hash;
}
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
- IEEE_8021Q_INFO);
+ vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+ IEEE_8021Q_INFO);
- vlan = (void *)ppi + ppi->ppi_offset;
+ vlan->value = 0;
vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT;
@@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
- TCP_LARGESEND_PKTINFO);
-
- lso_info = (void *)ppi + ppi->ppi_offset;
+ lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+ lso_info->value = 0;
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
@@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
- TCPIP_CHKSUM_PKTINFO);
-
- csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
- ppi->ppi_offset);
+ csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+ csum_info->value = 0;
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb);
if (likely(ret == 0))
return NETDEV_TX_OK;
@@ -658,22 +652,14 @@ no_memory:
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
- struct net_device *net;
- struct net_device_context *ndev_ctx;
+ struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_reconfig *event;
unsigned long flags;
- net = hv_get_drvdata(device_obj);
-
- if (!net)
- return;
-
- ndev_ctx = netdev_priv(net);
-
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
@@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *net_device,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *net_device;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- struct netvsc_channel *nvchan;
+ struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- rcu_read_lock();
- net_device = rcu_dereference(net_device_ctx->nvdev);
- if (unlikely(!net_device))
- goto drop;
-
- nvchan = &net_device->chan_table[q_idx];
-
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
csum_info, vlan, data, len);
if (unlikely(!skb)) {
-drop:
- ++net->stats.rx_dropped;
+ ++net_device_ctx->eth_stats.rx_no_memory;
rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@@ -804,8 +782,6 @@ drop:
u64_stats_update_end(&rx_stats->syncp);
napi_gro_receive(&nvchan->napi, skb);
- rcu_read_unlock();
-
return 0;
}
@@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
- device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
@@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
rndis_filter_close(nvdev);
memset(&device_info, 0, sizeof(device_info));
- device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
@@ -1133,12 +1107,13 @@ static const struct {
u16 offset;
} netvsc_stats[] = {
{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
- { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+ { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+ { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
}, vf_stats[] = {
@@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
- device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
@@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
- device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
@@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void)
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
- pr_info("Increased ring_size to %d (min allowed)\n",
+ pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
- ret = vmbus_driver_register(&netvsc_drv);
+ netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
+ ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 7b637c7dd1e5..c3ca191fea7f 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev,
kfree(req);
}
-static void dump_rndis_message(struct hv_device *hv_dev,
+static void dump_rndis_message(struct net_device *netdev,
const struct rndis_message *rndis_msg)
{
- struct net_device *netdev = hv_get_drvdata(hv_dev);
-
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
@@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
int ret;
/* Setup the packet to send it */
@@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
}
rcu_read_lock_bh();
- ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
rcu_read_unlock_bh();
return ret;
@@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
}
static int rndis_filter_receive_data(struct net_device *ndev,
+ struct netvsc_device *nvdev,
struct rndis_device *dev,
struct rndis_message *msg,
struct vmbus_channel *channel,
@@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev,
*/
data = (void *)((unsigned long)data + data_offset);
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(ndev, channel,
+
+ return netvsc_recv_callback(ndev, nvdev, channel,
data, rndis_pkt->data_len,
csum_info, vlan);
}
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen)
{
@@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev,
}
if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(dev, rndis_msg);
+ dump_rndis_message(ndev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+ return rndis_filter_receive_data(ndev, net_dev,
+ rndis_dev, rndis_msg,
channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
@@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev,
case RNDIS_MSG_INDICATE:
/* notification msgs */
- netvsc_linkstatus_callback(dev, rndis_msg);
+ netvsc_linkstatus_callback(ndev, rndis_msg);
break;
default:
netdev_err(ndev,
@@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
- ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
- nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+ ret = vmbus_open(new_sc, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
if (ret == 0)
napi_enable(&nvchan->napi);
@@ -1222,7 +1221,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
- const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
int i, ret;
@@ -1291,14 +1289,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
- /*
- * We will limit the VRSS channels to the number CPUs in the NUMA node
- * the primary channel is currently bound to.
- *
- * This also guarantees that num_possible_rss_qs <= num_online_cpus
- */
- node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
- num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
+ /* This guarantees that num_possible_rss_qs <= num_online_cpus */
+ num_possible_rss_qs = min_t(u32, num_online_cpus(),
rsscap.num_recv_que);
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
@@ -1362,9 +1354,6 @@ int rndis_filter_open(struct netvsc_device *nvdev)
if (!nvdev)
return -EINVAL;
- if (atomic_inc_return(&nvdev->open_cnt) != 1)
- return 0;
-
return rndis_filter_open_device(nvdev->extension);
}
@@ -1373,13 +1362,12 @@ int rndis_filter_close(struct netvsc_device *nvdev)
if (!nvdev)
return -EINVAL;
- if (atomic_dec_return(&nvdev->open_cnt) != 0)
- return 0;
-
return rndis_filter_close_device(nvdev->extension);
}
bool rndis_filter_opened(const struct netvsc_device *nvdev)
{
- return atomic_read(&nvdev->open_cnt) > 0;
+ const struct rndis_device *dev = nvdev->extension;
+
+ return dev->state == RNDIS_DEV_DATAINITIALIZED;
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 400fdbd3a120..64f1b1e77bc0 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1,7 +1,7 @@
/*
* Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver
*
- * Copyright 2009-2015 Analog Devices Inc.
+ * Copyright 2009-2017 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*
@@ -344,12 +344,18 @@ static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status,
return ret;
}
-static int adf7242_wait_ready(struct adf7242_local *lp, int line)
+static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line)
{
return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY,
STAT_RC_READY | STAT_SPI_READY, line);
}
+static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line)
+{
+ return adf7242_wait_status(lp, STAT_SPI_READY,
+ STAT_SPI_READY, line);
+}
+
static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
{
u8 *buf = lp->buf;
@@ -369,7 +375,7 @@ static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
buf[0] = CMD_SPI_PKT_WR;
@@ -401,7 +407,7 @@ static int adf7242_read_fbuf(struct adf7242_local *lp,
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
if (packet_read) {
@@ -432,7 +438,7 @@ static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data)
.rx_buf = lp->buf_read_rx,
};
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr);
@@ -462,7 +468,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
{
int status;
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr);
@@ -484,7 +490,7 @@ static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd)
dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd);
if (cmd != CMD_RC_PC_RESET_NO_WAIT)
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_rc_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_cmd = cmd;
@@ -557,6 +563,22 @@ static int adf7242_verify_firmware(struct adf7242_local *lp,
return 0;
}
+static void adf7242_clear_irqstat(struct adf7242_local *lp)
+{
+ adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX |
+ IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT |
+ IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA);
+}
+
+static int adf7242_cmd_rx(struct adf7242_local *lp)
+{
+ /* Wait until the ACK is sent */
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
+ adf7242_clear_irqstat(lp);
+
+ return adf7242_cmd(lp, CMD_RC_RX);
+}
+
static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
{
struct adf7242_local *lp = hw->priv;
@@ -660,7 +682,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
struct adf7242_local *lp = hw->priv;
adf7242_cmd(lp, CMD_RC_PHY_RDY);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
enable_irq(lp->spi->irq);
set_bit(FLAG_START, &lp->flags);
@@ -671,10 +693,10 @@ static void adf7242_stop(struct ieee802154_hw *hw)
{
struct adf7242_local *lp = hw->priv;
+ disable_irq(lp->spi->irq);
adf7242_cmd(lp, CMD_RC_IDLE);
clear_bit(FLAG_START, &lp->flags);
- disable_irq(lp->spi->irq);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
}
static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -789,9 +811,12 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
struct adf7242_local *lp = hw->priv;
int ret;
+ /* ensure existing instances of the IRQ handler have completed */
+ disable_irq(lp->spi->irq);
set_bit(FLAG_XMIT, &lp->flags);
reinit_completion(&lp->tx_complete);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
+ adf7242_clear_irqstat(lp);
ret = adf7242_write_fbuf(lp, skb->data, skb->len);
if (ret)
@@ -800,6 +825,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
ret = adf7242_cmd(lp, CMD_RC_CSMACA);
if (ret)
goto err;
+ enable_irq(lp->spi->irq);
ret = wait_for_completion_interruptible_timeout(&lp->tx_complete,
HZ / 10);
@@ -822,7 +848,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
err:
clear_bit(FLAG_XMIT, &lp->flags);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return ret;
}
@@ -846,7 +872,7 @@ static int adf7242_rx(struct adf7242_local *lp)
skb = dev_alloc_skb(len);
if (!skb) {
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return -ENOMEM;
}
@@ -854,14 +880,14 @@ static int adf7242_rx(struct adf7242_local *lp)
ret = adf7242_read_fbuf(lp, data, len, true);
if (ret < 0) {
kfree_skb(skb);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return ret;
}
lqi = data[len - 2];
lp->rssi = data[len - 1];
- adf7242_cmd(lp, CMD_RC_RX);
+ ret = adf7242_cmd_rx(lp);
skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */
@@ -870,7 +896,7 @@ static int adf7242_rx(struct adf7242_local *lp)
dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n",
__func__, ret, (int)len, (int)lqi, lp->rssi);
- return 0;
+ return ret;
}
static const struct ieee802154_ops adf7242_ops = {
@@ -888,7 +914,7 @@ static const struct ieee802154_ops adf7242_ops = {
.set_cca_ed_level = adf7242_set_cca_ed_level,
};
-static void adf7242_debug(u8 irq1)
+static void adf7242_debug(struct adf7242_local *lp, u8 irq1)
{
#ifdef DEBUG
u8 stat;
@@ -906,9 +932,12 @@ static void adf7242_debug(u8 irq1)
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
- dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n",
+ dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n",
__func__, stat,
+ stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+ stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+ stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -923,20 +952,20 @@ static irqreturn_t adf7242_isr(int irq, void *data)
unsigned int xmit;
u8 irq1;
- adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
-
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1);
if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
__func__, irq1);
- adf7242_debug(irq1);
+ adf7242_debug(lp, irq1);
xmit = test_bit(FLAG_XMIT, &lp->flags);
if (xmit && (irq1 & IRQ_CSMA_CA)) {
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+ RC_STATUS_MASK, __LINE__);
+
if (ADF7242_REPORT_CSMA_CA_STAT) {
u8 astat;
@@ -957,6 +986,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
lp->tx_stat = SUCCESS;
}
complete(&lp->tx_complete);
+ adf7242_clear_irqstat(lp);
} else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) &&
(irq1 & IRQ_FRAME_VALID)) {
adf7242_rx(lp);
@@ -965,16 +995,19 @@ static irqreturn_t adf7242_isr(int irq, void *data)
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n",
__func__, __LINE__, irq1);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
} else {
/* This can only be xmit without IRQ, likely a RX packet.
* we get an TX IRQ shortly - do nothing or let the xmit
* timeout handle this
*/
+
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n",
__func__, __LINE__, irq1, xmit);
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+ RC_STATUS_MASK, __LINE__);
complete(&lp->tx_complete);
+ adf7242_clear_irqstat(lp);
}
return IRQ_HANDLED;
@@ -994,7 +1027,7 @@ static int adf7242_soft_reset(struct adf7242_local *lp, int line)
adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous);
adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be,
lp->max_cca_retries);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
if (test_bit(FLAG_START, &lp->flags)) {
enable_irq(lp->spi->irq);
@@ -1060,7 +1093,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
adf7242_write_reg(lp, REG_IRQ1_EN0, 0);
adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF);
adf7242_cmd(lp, CMD_RC_IDLE);
@@ -1086,8 +1119,11 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
- seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat,
+ seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat,
+ stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+ stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+ stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -1257,12 +1293,14 @@ static int adf7242_remove(struct spi_device *spi)
static const struct of_device_id adf7242_of_match[] = {
{ .compatible = "adi,adf7242", },
+ { .compatible = "adi,adf7241", },
{ },
};
MODULE_DEVICE_TABLE(of, adf7242_of_match);
static const struct spi_device_id adf7242_device_id[] = {
{ .name = "adf7242", },
+ { .name = "adf7241", },
{ },
};
MODULE_DEVICE_TABLE(spi, adf7242_device_id);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 7900ed066d8a..e412dfdda7dd 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2638,12 +2638,12 @@ static long ca8210_test_int_ioctl(
*
* Return: set of poll return flags
*/
-static unsigned int ca8210_test_int_poll(
+static __poll_t ca8210_test_int_poll(
struct file *filp,
struct poll_table_struct *ptable
)
{
- unsigned int return_flags = 0;
+ __poll_t return_flags = 0;
struct ca8210_priv *priv = filp->private_data;
poll_wait(filp, &priv->test.readq, ptable);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 77cc4fbaeace..c1f008fe4e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -315,13 +315,13 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
*pskb = skb;
}
- ipvlan_skb_crossing_ns(skb, dev);
if (local) {
skb->pkt_type = PACKET_HOST;
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
+ skb->dev = dev;
ret = RX_HANDLER_ANOTHER;
success = true;
}
@@ -586,7 +586,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return NET_XMIT_SUCCESS;
}
- ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+ skb->dev = ipvlan->phy_dev;
return dev_queue_xmit(skb);
}
@@ -664,8 +664,6 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
struct ethhdr *eth = eth_hdr(skb);
rx_handler_result_t ret = RX_HANDLER_PASS;
- void *lyr3h;
- int addr_type;
if (is_multicast_ether_addr(eth->h_dest)) {
if (ipvlan_external_frame(skb, port)) {
@@ -683,15 +681,8 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
}
}
} else {
- struct ipvl_addr *addr;
-
- lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
- if (!lyr3h)
- return ret;
-
- addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
- if (addr)
- ret = ipvlan_rcv_frame(addr, pskb, false);
+ /* Perform like l3 mode for non-multicast packet */
+ ret = ipvlan_handle_mode_l3(pskb, port);
}
return ret;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 30cb803e2fe5..2469df118fbf 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -850,6 +850,19 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
return ipvlan_del_addr(ipvlan, ip6_addr, true);
}
+static bool ipvlan_is_valid_dev(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (!netif_is_ipvlan(dev))
+ return false;
+
+ if (!ipvlan || !ipvlan->port)
+ return false;
+
+ return true;
+}
+
static int ipvlan_addr6_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -857,10 +870,7 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -888,10 +898,7 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
if (in_softirq())
return NOTIFY_DONE;
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -932,10 +939,7 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct in_addr ip4_addr;
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -961,10 +965,7 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1d025ab9568f..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -393,7 +393,10 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
-#define DEFAULT_SAK_LEN 16
+#define MACSEC_GCM_AES_128_SAK_LEN 16
+#define MACSEC_GCM_AES_256_SAK_LEN 32
+
+#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
@@ -2362,15 +2365,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
{
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+ u64 csid;
if (!secy_nest)
return 1;
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_DEFAULT_CIPHER_ID;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto cancel;
+ }
+
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
MACSEC_SECY_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID,
- MACSEC_SECY_ATTR_PAD) ||
+ csid, MACSEC_SECY_ATTR_PAD) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -3015,8 +3029,8 @@ static void macsec_setup(struct net_device *dev)
eth_zero_addr(dev->broadcast);
}
-static void macsec_changelink_common(struct net_device *dev,
- struct nlattr *data[])
+static int macsec_changelink_common(struct net_device *dev,
+ struct nlattr *data[])
{
struct macsec_secy *secy;
struct macsec_tx_sc *tx_sc;
@@ -3056,6 +3070,22 @@ static void macsec_changelink_common(struct net_device *dev,
if (data[IFLA_MACSEC_VALIDATION])
secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+
+ if (data[IFLA_MACSEC_CIPHER_SUITE]) {
+ switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3071,9 +3101,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- macsec_changelink_common(dev, data);
-
- return 0;
+ return macsec_changelink_common(dev, data);
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3270,8 +3298,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err)
goto unlink;
- if (data)
- macsec_changelink_common(dev, data);
+ if (data) {
+ err = macsec_changelink_common(dev, data);
+ if (err)
+ goto del_dev;
+ }
err = register_macsec_dev(real_dev, dev);
if (err < 0)
@@ -3320,8 +3351,9 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
switch (csid) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
case MACSEC_DEFAULT_CIPHER_ID:
- case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_STD_ICV_LEN)
return -EINVAL;
@@ -3390,12 +3422,24 @@ static int macsec_fill_info(struct sk_buff *skb,
{
struct macsec_secy *secy = &macsec_priv(dev)->secy;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ u64 csid;
+
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_DEFAULT_CIPHER_ID;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto nla_put_failure;
+ }
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
+ csid, IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
+ /* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
+ return err;
destroy_macvlan_port:
- if (create)
+ /* the macvlan port may be freed by macvlan_uninit when fail to register.
+ * so we destroy the macvlan port only when it's valid.
+ */
+ if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000000..09388c06171d
--- /dev/null
+++ b/drivers/net/netdevsim/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NETDEVSIM) += netdevsim.o
+
+netdevsim-objs := \
+ netdev.o \
+
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+netdevsim-objs += \
+ bpf.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
new file mode 100644
index 000000000000..de73c1ff0939
--- /dev/null
+++ b/drivers/net/netdevsim/bpf.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+
+#include "netdevsim.h"
+
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
+
+struct nsim_bpf_bound_prog {
+ struct netdevsim *ns;
+ struct bpf_prog *prog;
+ struct dentry *ddir;
+ const char *state;
+ bool is_loaded;
+ struct list_head l;
+};
+
+#define NSIM_BPF_MAX_KEYS 2
+
+struct nsim_bpf_bound_map {
+ struct netdevsim *ns;
+ struct bpf_offloaded_map *map;
+ struct mutex mutex;
+ struct nsim_map_entry {
+ void *key;
+ void *value;
+ } entry[NSIM_BPF_MAX_KEYS];
+ struct list_head l;
+};
+
+static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
+{
+ const char **str = file->private;
+
+ if (*str)
+ seq_printf(file, "%s\n", *str);
+
+ return 0;
+}
+
+static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private);
+}
+
+static const struct file_operations nsim_bpf_string_fops = {
+ .owner = THIS_MODULE,
+ .open = nsim_debugfs_bpf_string_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static int
+nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ state = env->prog->aux->offload->dev_priv;
+ if (state->ns->bpf_bind_verifier_delay && !insn_idx)
+ msleep(state->ns->bpf_bind_verifier_delay);
+
+ if (insn_idx == env->prog->len - 1)
+ pr_vlog(env, "Hello from netdevsim!\n");
+
+ return 0;
+}
+
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
+ .insn_hook = nsim_bpf_verify_insn,
+};
+
+static bool nsim_xdp_offload_active(struct netdevsim *ns)
+{
+ return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+}
+
+static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!prog || !prog->aux->offload)
+ return;
+
+ state = prog->aux->offload->dev_priv;
+ state->is_loaded = loaded;
+}
+
+static int
+nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
+{
+ nsim_prog_set_loaded(ns->bpf_offloaded, false);
+
+ WARN(!!ns->bpf_offloaded != oldprog,
+ "bad offload state, expected offload %sto be active",
+ oldprog ? "" : "not ");
+ ns->bpf_offloaded = prog;
+ ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
+ nsim_prog_set_loaded(prog, true);
+
+ return 0;
+}
+
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_bpf_offload *cls_bpf = type_data;
+ struct bpf_prog *prog = cls_bpf->prog;
+ struct netdevsim *ns = cb_priv;
+ struct bpf_prog *oldprog;
+
+ if (type != TC_SETUP_CLSBPF) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ns->bpf_tc_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject BPF TC offload");
+ return -EOPNOTSUPP;
+ }
+ /* Note: progs without skip_sw will probably not be dev bound */
+ if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject unbound programs");
+ return -EOPNOTSUPP;
+ }
+
+ if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
+ return -EOPNOTSUPP;
+
+ oldprog = cls_bpf->oldprog;
+
+ /* Don't remove if oldprog doesn't match driver's state */
+ if (ns->bpf_offloaded != oldprog) {
+ oldprog = NULL;
+ if (!cls_bpf->prog)
+ return 0;
+ if (ns->bpf_offloaded) {
+ NSIM_EA(cls_bpf->common.extack,
+ "driver and netdev offload states mismatch");
+ return -EBUSY;
+ }
+ }
+
+ return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
+}
+
+int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
+ return -EBUSY;
+ return 0;
+}
+
+static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (!nsim_xdp_offload_active(ns) && !bpf->prog)
+ return 0;
+ if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
+ NSIM_EA(bpf->extack, "TC program is already loaded");
+ return -EBUSY;
+ }
+
+ return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
+}
+
+static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ int err;
+
+ if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
+ NSIM_EA(bpf->extack, "program loaded with different flags");
+ return -EBUSY;
+ }
+
+ if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
+ NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
+ NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+
+ if (bpf->command == XDP_SETUP_PROG_HW) {
+ err = nsim_xdp_offload_prog(ns, bpf);
+ if (err)
+ return err;
+ }
+
+ if (ns->xdp_prog)
+ bpf_prog_put(ns->xdp_prog);
+
+ ns->xdp_prog = bpf->prog;
+ ns->xdp_flags = bpf->flags;
+
+ if (!bpf->prog)
+ ns->xdp_prog_mode = XDP_ATTACHED_NONE;
+ else if (bpf->command == XDP_SETUP_PROG)
+ ns->xdp_prog_mode = XDP_ATTACHED_DRV;
+ else
+ ns->xdp_prog_mode = XDP_ATTACHED_HW;
+
+ return 0;
+}
+
+static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+ char name[16];
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->ns = ns;
+ state->prog = prog;
+ state->state = "verify";
+
+ /* Program id is not populated yet when we create the state. */
+ sprintf(name, "%u", ns->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+ if (IS_ERR_OR_NULL(state->ddir)) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
+ debugfs_create_file("state", 0400, state->ddir,
+ &state->state, &nsim_bpf_string_fops);
+ debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+
+ list_add_tail(&state->l, &ns->bpf_bound_progs);
+
+ prog->aux->offload->dev_priv = state;
+
+ return 0;
+}
+
+static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ state = prog->aux->offload->dev_priv;
+ WARN(state->is_loaded,
+ "offload state destroyed while program still bound");
+ debugfs_remove_recursive(state->ddir);
+ list_del(&state->l);
+ kfree(state);
+}
+
+static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (bpf->prog && bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
+ return -EINVAL;
+ }
+ if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
+ return -EINVAL;
+ }
+ if (nsim_xdp_offload_active(ns)) {
+ NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!bpf->prog)
+ return 0;
+
+ if (!bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+ return -EINVAL;
+ }
+ if (bpf->prog->aux->offload->netdev != ns->netdev) {
+ NSIM_EA(bpf->extack, "program bound to different dev");
+ return -EINVAL;
+ }
+
+ state = bpf->prog->aux->offload->dev_priv;
+ if (WARN_ON(strcmp(state->state, "xlated"))) {
+ NSIM_EA(bpf->extack, "offloading program in bad state");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
+{
+ return e->key && !memcmp(key, e->key, map->key_size);
+}
+
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
+ if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ if (!nmap->entry[idx].key)
+ return -ENOMEM;
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ if (!nmap->entry[idx].value) {
+ kfree(nmap->entry[idx].key);
+ nmap->entry[idx].key = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx = -ENOENT;
+
+ mutex_lock(&nmap->mutex);
+
+ if (key)
+ idx = nsim_map_key_find(offmap, key);
+ if (idx == -ENOENT)
+ idx = 0;
+ else
+ idx++;
+
+ for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
+ if (nmap->entry[idx].key) {
+ memcpy(next_key, nmap->entry[idx].key,
+ offmap->map.key_size);
+ break;
+ }
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ if (idx == ARRAY_SIZE(nmap->entry))
+ return -ENOENT;
+ return 0;
+}
+
+static int
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0)
+ memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static int
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx, err = 0;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx < 0 && flags == BPF_EXIST) {
+ err = idx;
+ goto exit_unlock;
+ }
+ if (idx >= 0 && flags == BPF_NOEXIST) {
+ err = -EEXIST;
+ goto exit_unlock;
+ }
+
+ if (idx < 0) {
+ for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
+ if (!nmap->entry[idx].key)
+ break;
+ if (idx == ARRAY_SIZE(nmap->entry)) {
+ err = -E2BIG;
+ goto exit_unlock;
+ }
+
+ err = nsim_map_alloc_elem(offmap, idx);
+ if (err)
+ goto exit_unlock;
+ }
+
+ memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
+ memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
+exit_unlock:
+ mutex_unlock(&nmap->mutex);
+
+ return err;
+}
+
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0) {
+ kfree(nmap->entry[idx].key);
+ kfree(nmap->entry[idx].value);
+ memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
+ .map_get_next_key = nsim_map_get_next_key,
+ .map_lookup_elem = nsim_map_lookup_elem,
+ .map_update_elem = nsim_map_update_elem,
+ .map_delete_elem = nsim_map_delete_elem,
+};
+
+static int
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap;
+ unsigned int i;
+ int err;
+
+ if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
+ offmap->map.map_type != BPF_MAP_TYPE_HASH))
+ return -EINVAL;
+ if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
+ return -ENOMEM;
+ if (offmap->map.map_flags)
+ return -EINVAL;
+
+ nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ if (!nmap)
+ return -ENOMEM;
+
+ offmap->dev_priv = nmap;
+ nmap->ns = ns;
+ nmap->map = offmap;
+ mutex_init(&nmap->mutex);
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ u32 *key;
+
+ err = nsim_map_alloc_elem(offmap, i);
+ if (err)
+ goto err_free;
+ key = nmap->entry[i].key;
+ *key = i;
+ }
+ }
+
+ offmap->dev_ops = &nsim_bpf_map_ops;
+ list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+
+ return 0;
+
+err_free:
+ while (--i) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ kfree(nmap);
+ return err;
+}
+
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ list_del_init(&nmap->l);
+ mutex_destroy(&nmap->mutex);
+ kfree(nmap);
+}
+
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bpf_bound_prog *state;
+ int err;
+
+ ASSERT_RTNL();
+
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ if (!ns->bpf_bind_accept)
+ return -EOPNOTSUPP;
+
+ err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
+ if (err)
+ return err;
+
+ bpf->verifier.ops = &nsim_bpf_analyzer_ops;
+ return 0;
+ case BPF_OFFLOAD_TRANSLATE:
+ state = bpf->offload.prog->aux->offload->dev_priv;
+
+ state->state = "xlated";
+ return 0;
+ case BPF_OFFLOAD_DESTROY:
+ nsim_bpf_destroy_prog(bpf->offload.prog);
+ return 0;
+ case XDP_QUERY_PROG:
+ bpf->prog_attached = ns->xdp_prog_mode;
+ bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
+ bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
+ return 0;
+ case XDP_SETUP_PROG:
+ err = nsim_setup_prog_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf);
+ case XDP_SETUP_PROG_HW:
+ err = nsim_setup_prog_hw_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ if (!ns->bpf_map_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_map_alloc(ns, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ nsim_bpf_map_free(bpf->offmap);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+ INIT_LIST_HEAD(&ns->bpf_bound_progs);
+ INIT_LIST_HEAD(&ns->bpf_bound_maps);
+
+ debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+ &ns->bpf_offloaded_id);
+
+ ns->bpf_bind_accept = true;
+ debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
+ &ns->bpf_bind_accept);
+ debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
+ &ns->bpf_bind_verifier_delay);
+ ns->ddir_bpf_bound_progs =
+ debugfs_create_dir("bpf_bound_progs", ns->ddir);
+ if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+ return -ENOMEM;
+
+ ns->bpf_tc_accept = true;
+ debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+ &ns->bpf_tc_accept);
+ debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+ &ns->bpf_tc_non_bound_accept);
+ ns->bpf_xdpdrv_accept = true;
+ debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+ &ns->bpf_xdpdrv_accept);
+ ns->bpf_xdpoffload_accept = true;
+ debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+ &ns->bpf_xdpoffload_accept);
+
+ ns->bpf_map_accept = true;
+ debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+ &ns->bpf_map_accept);
+
+ return 0;
+}
+
+void nsim_bpf_uninit(struct netdevsim *ns)
+{
+ WARN_ON(!list_empty(&ns->bpf_bound_progs));
+ WARN_ON(!list_empty(&ns->bpf_bound_maps));
+ WARN_ON(ns->xdp_prog);
+ WARN_ON(ns->bpf_offloaded);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
new file mode 100644
index 000000000000..3fd567928f3d
--- /dev/null
+++ b/drivers/net/netdevsim/netdev.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/pkt_cls.h>
+#include <net/rtnetlink.h>
+
+#include "netdevsim.h"
+
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
+static u32 nsim_dev_id;
+
+static int nsim_num_vf(struct device *dev)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ return ns->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+ .name = DRV_NAME,
+ .dev_name = DRV_NAME,
+ .num_vf = nsim_num_vf,
+};
+
+static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
+{
+ ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
+ GFP_KERNEL);
+ if (!ns->vfconfigs)
+ return -ENOMEM;
+ ns->num_vfs = num_vfs;
+
+ return 0;
+}
+
+static void nsim_vfs_disable(struct netdevsim *ns)
+{
+ kfree(ns->vfconfigs);
+ ns->vfconfigs = NULL;
+ ns->num_vfs = 0;
+}
+
+static ssize_t
+nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netdevsim *ns = to_nsim(dev);
+ unsigned int num_vfs;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num_vfs);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ if (ns->num_vfs == num_vfs)
+ goto exit_good;
+ if (ns->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+
+ if (num_vfs) {
+ ret = nsim_vfs_enable(ns, num_vfs);
+ if (ret)
+ goto exit_unlock;
+ } else {
+ nsim_vfs_disable(ns);
+ }
+exit_good:
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static ssize_t
+nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ return sprintf(buf, "%u\n", ns->num_vfs);
+}
+
+static struct device_attribute nsim_numvfs_attr =
+ __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
+
+static struct attribute *nsim_dev_attrs[] = {
+ &nsim_numvfs_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group nsim_dev_attr_group = {
+ .attrs = nsim_dev_attrs,
+};
+
+static const struct attribute_group *nsim_dev_attr_groups[] = {
+ &nsim_dev_attr_group,
+ NULL,
+};
+
+static void nsim_dev_release(struct device *dev)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ nsim_vfs_disable(ns);
+ free_netdev(ns->netdev);
+}
+
+static struct device_type nsim_dev_type = {
+ .groups = nsim_dev_attr_groups,
+ .release = nsim_dev_release,
+};
+
+static int nsim_init(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ ns->netdev = dev;
+ ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+ if (IS_ERR_OR_NULL(ns->ddir))
+ return -ENOMEM;
+
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_debugfs_destroy;
+
+ ns->dev.id = nsim_dev_id++;
+ ns->dev.bus = &nsim_bus;
+ ns->dev.type = &nsim_dev_type;
+ err = device_register(&ns->dev);
+ if (err)
+ goto err_bpf_uninit;
+
+ SET_NETDEV_DEV(dev, &ns->dev);
+
+ return 0;
+
+err_bpf_uninit:
+ nsim_bpf_uninit(ns);
+err_debugfs_destroy:
+ debugfs_remove_recursive(ns->ddir);
+ return err;
+}
+
+static void nsim_uninit(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ debugfs_remove_recursive(ns->ddir);
+ nsim_bpf_uninit(ns);
+}
+
+static void nsim_free(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ device_unregister(&ns->dev);
+ /* netdev and vf state will be freed out of device_release() */
+}
+
+static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_packets++;
+ ns->tx_bytes += skb->len;
+ u64_stats_update_end(&ns->syncp);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void nsim_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int nsim_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
+ new_mtu > NSIM_XDP_MAX_MTU)
+ return -EBUSY;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static void
+nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ns->syncp);
+ stats->tx_bytes = ns->tx_bytes;
+ stats->tx_packets = ns->tx_packets;
+ } while (u64_stats_fetch_retry(&ns->syncp, start));
+}
+
+static int
+nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
+}
+
+static int
+nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
+ ns, ns);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ /* Only refuse multicast addresses, zero address can mean unset/any. */
+ if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+ return -EINVAL;
+ memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+
+ return 0;
+}
+
+static int nsim_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ ns->vfconfigs[vf].vlan = vlan;
+ ns->vfconfigs[vf].qos = qos;
+ ns->vfconfigs[vf].vlan_proto = vlan_proto;
+
+ return 0;
+}
+
+static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ ns->vfconfigs[vf].min_tx_rate = min;
+ ns->vfconfigs[vf].max_tx_rate = max;
+
+ return 0;
+}
+
+static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].spoofchk_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].rss_query_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].trusted = val;
+
+ return 0;
+}
+
+static int
+nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ ivi->vf = vf;
+ ivi->linkstate = ns->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = ns->vfconfigs[vf].vlan;
+ ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
+ ivi->qos = ns->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = ns->vfconfigs[vf].trusted;
+ ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+
+ return 0;
+}
+
+static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ case IFLA_VF_LINK_STATE_ENABLE:
+ case IFLA_VF_LINK_STATE_DISABLE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ns->vfconfigs[vf].link_state = state;
+
+ return 0;
+}
+
+static int
+nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nsim_setup_tc_block(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nsim_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
+ return nsim_bpf_disable_tc(ns);
+
+ return 0;
+}
+
+static const struct net_device_ops nsim_netdev_ops = {
+ .ndo_init = nsim_init,
+ .ndo_uninit = nsim_uninit,
+ .ndo_start_xmit = nsim_start_xmit,
+ .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = nsim_change_mtu,
+ .ndo_get_stats64 = nsim_get_stats64,
+ .ndo_set_vf_mac = nsim_set_vf_mac,
+ .ndo_set_vf_vlan = nsim_set_vf_vlan,
+ .ndo_set_vf_rate = nsim_set_vf_rate,
+ .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk,
+ .ndo_set_vf_trust = nsim_set_vf_trust,
+ .ndo_get_vf_config = nsim_get_vf_config,
+ .ndo_set_vf_link_state = nsim_set_vf_link_state,
+ .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
+ .ndo_setup_tc = nsim_setup_tc,
+ .ndo_set_features = nsim_set_features,
+ .ndo_bpf = nsim_bpf,
+};
+
+static void nsim_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ eth_hw_addr_random(dev);
+
+ dev->netdev_ops = &nsim_netdev_ops;
+ dev->priv_destructor = nsim_free;
+
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_NOARP;
+ dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
+ IFF_NO_QUEUE;
+ dev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_HW_CSUM |
+ NETIF_F_TSO;
+ dev->hw_features |= NETIF_F_HW_TC;
+ dev->max_mtu = ETH_MAX_MTU;
+}
+
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops nsim_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct netdevsim),
+ .setup = nsim_setup,
+ .validate = nsim_validate,
+};
+
+struct dentry *nsim_ddir;
+
+static int __init nsim_module_init(void)
+{
+ int err;
+
+ nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR_OR_NULL(nsim_ddir))
+ return -ENOMEM;
+
+ err = bus_register(&nsim_bus);
+ if (err)
+ goto err_debugfs_destroy;
+
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_unreg_bus;
+
+ return 0;
+
+err_unreg_bus:
+ bus_unregister(&nsim_bus);
+err_debugfs_destroy:
+ debugfs_remove_recursive(nsim_ddir);
+ return err;
+}
+
+static void __exit nsim_module_exit(void)
+{
+ rtnl_link_unregister(&nsim_link_ops);
+ bus_unregister(&nsim_bus);
+ debugfs_remove_recursive(nsim_ddir);
+}
+
+module_init(nsim_module_init);
+module_exit(nsim_module_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
new file mode 100644
index 000000000000..ea081c10efb8
--- /dev/null
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+#define DRV_NAME "netdevsim"
+
+#define NSIM_XDP_MAX_MTU 4000
+
+#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
+
+struct bpf_prog;
+struct dentry;
+struct nsim_vf_config;
+
+struct netdevsim {
+ struct net_device *netdev;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+
+ struct device dev;
+
+ struct dentry *ddir;
+
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
+
+ struct bpf_prog *bpf_offloaded;
+ u32 bpf_offloaded_id;
+
+ u32 xdp_flags;
+ int xdp_prog_mode;
+ struct bpf_prog *xdp_prog;
+
+ u32 prog_id_gen;
+
+ bool bpf_bind_accept;
+ u32 bpf_bind_verifier_delay;
+ struct dentry *ddir_bpf_bound_progs;
+ struct list_head bpf_bound_progs;
+
+ bool bpf_tc_accept;
+ bool bpf_tc_non_bound_accept;
+ bool bpf_xdpdrv_accept;
+ bool bpf_xdpoffload_accept;
+
+ bool bpf_map_accept;
+ struct list_head bpf_bound_maps;
+};
+
+extern struct dentry *nsim_ddir;
+
+#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_init(struct netdevsim *ns);
+void nsim_bpf_uninit(struct netdevsim *ns);
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int nsim_bpf_disable_tc(struct netdevsim *ns);
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+#else
+static inline int nsim_bpf_init(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_uninit(struct netdevsim *ns)
+{
+}
+
+static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP;
+}
+
+static inline int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline int
+nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline struct netdevsim *to_nsim(struct device *ptr)
+{
+ return container_of(ptr, struct netdevsim, dev);
+}
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 18141c022b13..6fe5dc9201d0 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -68,8 +68,6 @@ static struct phy_driver am79c_driver[] = { {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = am79c_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
} };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e911e4990b20..411cf1072bae 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -71,7 +71,6 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
bool phy_reset:1;
- struct gpio_desc *gpiod_reset;
};
struct at803x_context {
@@ -216,56 +215,33 @@ static int at803x_suspend(struct phy_device *phydev)
int value;
int wol_enabled;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, AT803X_INTR_ENABLE);
wol_enabled = value & AT803X_INTR_ENABLE_WOL;
- value = phy_read(phydev, MII_BMCR);
-
if (wol_enabled)
- value |= BMCR_ISOLATE;
+ value = BMCR_ISOLATE;
else
- value |= BMCR_PDOWN;
-
- phy_write(phydev, MII_BMCR, value);
+ value = BMCR_PDOWN;
- mutex_unlock(&phydev->lock);
+ phy_modify(phydev, MII_BMCR, 0, value);
return 0;
}
static int at803x_resume(struct phy_device *phydev)
{
- int value;
-
- value = phy_read(phydev, MII_BMCR);
- value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
-
- return 0;
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct at803x_priv *priv;
- struct gpio_desc *gpiod_reset;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (phydev->drv->phy_id != ATH8030_PHY_ID)
- goto does_not_require_reset_workaround;
-
- gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_reset))
- return PTR_ERR(gpiod_reset);
-
- priv->gpiod_reset = gpiod_reset;
-
-does_not_require_reset_workaround:
phydev->priv = priv;
return 0;
@@ -339,14 +315,14 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* cannot recover from by software.
*/
if (phydev->state == PHY_NOLINK) {
- if (priv->gpiod_reset && !priv->phy_reset) {
+ if (phydev->mdio.reset && !priv->phy_reset) {
struct at803x_context context;
at803x_context_save(phydev, &context);
- gpiod_set_value(priv->gpiod_reset, 1);
+ phy_device_reset(phydev, 1);
msleep(1);
- gpiod_set_value(priv->gpiod_reset, 0);
+ phy_device_reset(phydev, 0);
msleep(1);
at803x_context_restore(phydev, &context);
@@ -404,8 +380,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -422,8 +396,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -439,8 +411,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 3fe8cc5c177e..6838129839ca 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -136,8 +136,6 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.name = "Broadcom Cygnus PHY",
.features = PHY_GBIT_FEATURES,
.config_init = bcm_cygnus_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index b0492ef2cdaa..cf14613745c9 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -69,8 +69,6 @@ static struct phy_driver bcm63xx_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
}, {
@@ -81,8 +79,6 @@ static struct phy_driver bcm63xx_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
} };
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8b33f688ac8a..421feb8f92fe 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -611,8 +611,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_GBIT_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_resume, \
.get_tunable = bcm7xxx_28nm_get_tunable, \
.set_tunable = bcm7xxx_28nm_set_tunable, \
@@ -630,8 +628,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_BASIC_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_ephy_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_ephy_resume, \
.get_sset_count = bcm_phy_get_sset_count, \
.get_strings = bcm_phy_get_strings, \
@@ -647,8 +643,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_BASIC_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.suspend = bcm7xxx_suspend, \
.resume = bcm7xxx_config_init, \
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d7ed69deabfb..3bb6b66dc7bf 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,6 +540,37 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
return err;
}
+struct bcm53xx_phy_priv {
+ u64 *stats;
+};
+
+static int bcm53xx_phy_probe(struct phy_device *phydev)
+{
+ struct bcm53xx_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bcm53xx_phy_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm53xx_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -548,8 +579,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -559,8 +588,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -570,8 +597,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -581,8 +606,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -592,8 +615,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -603,8 +624,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -614,8 +633,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -626,7 +643,6 @@ static struct phy_driver broadcom_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -637,7 +653,6 @@ static struct phy_driver broadcom_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -647,7 +662,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -658,8 +672,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -669,8 +681,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -680,8 +690,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -691,8 +699,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
}, {
@@ -702,10 +708,18 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+}, {
+ .phy_id = PHY_ID_BCM5395,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5395",
+ .flags = PHY_IS_INTERNAL,
+ .features = PHY_GBIT_FEATURES,
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm53xx_phy_get_stats,
+ .probe = bcm53xx_phy_probe,
} };
module_phy_driver(broadcom_drivers);
@@ -726,6 +740,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM57780, 0xfffffff0 },
{ PHY_ID_BCMAC131, 0xfffffff0 },
{ PHY_ID_BCM5241, 0xfffffff0 },
+ { PHY_ID_BCM5395, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d339c1afea77..c05af00bf4b6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -110,8 +110,6 @@ static struct phy_driver cis820x_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
}, {
@@ -121,8 +119,6 @@ static struct phy_driver cis820x_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
} };
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index e28913d9ea7e..5ee99b3b428c 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -153,7 +153,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -164,7 +163,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -175,7 +173,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -184,8 +181,6 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cbd629822f04..654f42d00092 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1502,8 +1502,6 @@ static struct phy_driver dp83640_driver = {
.probe = dp83640_probe,
.remove = dp83640_remove,
.config_init = dp83640_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
.ts_info = dp83640_ts_info,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 14335d14e9e4..6e8a2a4f3a6e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -325,8 +325,6 @@ static struct phy_driver dp83822_driver[] = {
.set_wol = dp83822_set_wol,
.ack_interrupt = dp83822_ack_interrupt,
.config_intr = dp83822_config_intr,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = dp83822_suspend,
.resume = dp83822_resume,
},
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 3966d43c5146..cd09c3af2117 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -95,8 +95,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.config_init = genphy_config_init, \
.suspend = genphy_suspend, \
.resume = genphy_resume, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
\
/* IRQ related */ \
.ack_interrupt = dp83848_ack_interrupt, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1ab976cc800..ab58224f897f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -324,8 +324,6 @@ static struct phy_driver dp83867_driver[] = {
.ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index eb5167210681..001fe1df7557 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed)
-{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct fixed_phy *fp;
-
- if (!phydev || phydev->mdio.bus != fmb->mii_bus)
- return -EINVAL;
-
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- write_seqcount_begin(&fp->seqcount);
-#define _UPD(x) if (changed->x) \
- fp->status.x = status->x
- _UPD(link);
- _UPD(speed);
- _UPD(duplex);
- _UPD(pause);
- _UPD(asym_pause);
-#undef _UPD
- fixed_phy_update(fp);
- write_seqcount_end(&fp->seqcount);
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL(fixed_phy_update_state);
-
int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_phy_status *status,
int link_gpio)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 567280a72241..791587a49215 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -227,8 +227,6 @@ static struct phy_driver icplus_driver[] = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_GBIT_FEATURES,
.config_init = &ip1001_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -239,8 +237,6 @@ static struct phy_driver icplus_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 55f8c52dd2f1..a11f80cb5388 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -243,7 +243,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -257,7 +256,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -271,7 +269,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -285,7 +282,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -298,8 +294,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -312,8 +306,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -326,8 +318,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -340,8 +330,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 09d215177fff..c14b254b2879 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -259,8 +259,6 @@ static struct phy_driver lxt97x_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
}, {
@@ -269,8 +267,6 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
@@ -290,7 +286,6 @@ static struct phy_driver lxt97x_driver[] = {
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
- .read_status = genphy_read_status,
} };
module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 82104edca393..22d9bc9c33a4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -83,7 +83,7 @@
#define MII_88E1121_PHY_MSCR_REG 21
#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5)
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
-#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4)))
+#define MII_88E1121_PHY_MSCR_DELAY_MASK (BIT(5) | BIT(4))
#define MII_88E1121_MISC_TEST 0x1a
#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00
@@ -96,6 +96,17 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_MISC_TEST 0x1b
+#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
+#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
+#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15)
+#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0
+#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14)
+
+#define MII_88E6390_TEMP_SENSOR 0x1c
+#define MII_88E6390_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -177,27 +188,19 @@ struct marvell_priv {
struct device *hwmon_dev;
};
-static int marvell_get_page(struct phy_device *phydev)
+static int marvell_read_page(struct phy_device *phydev)
{
- return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ return __phy_read(phydev, MII_MARVELL_PHY_PAGE);
}
-static int marvell_set_page(struct phy_device *phydev, int page)
+static int marvell_write_page(struct phy_device *phydev, int page)
{
- return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+ return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
}
-static int marvell_get_set_page(struct phy_device *phydev, int page)
+static int marvell_set_page(struct phy_device *phydev, int page)
{
- int oldpage = marvell_get_page(phydev);
-
- if (oldpage < 0)
- return oldpage;
-
- if (page != oldpage)
- return marvell_set_page(phydev, page);
-
- return 0;
+ return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
}
static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
static int marvell_of_reg_init(struct phy_device *phydev)
{
const __be32 *paddr;
- int len, i, saved_page, current_page, ret;
+ int len, i, saved_page, current_page, ret = 0;
if (!phydev->mdio.dev.of_node)
return 0;
@@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (!paddr || len < (4 * sizeof(*paddr)))
return 0;
- saved_page = marvell_get_page(phydev);
+ saved_page = phy_save_page(phydev);
if (saved_page < 0)
- return saved_page;
+ goto err;
current_page = saved_page;
- ret = 0;
len /= sizeof(*paddr);
for (i = 0; i < len - 3; i += 4) {
u16 page = be32_to_cpup(paddr + i);
@@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (page != current_page) {
current_page = page;
- ret = marvell_set_page(phydev, page);
+ ret = marvell_write_page(phydev, page);
if (ret < 0)
goto err;
}
val = 0;
if (mask) {
- val = phy_read(phydev, reg);
+ val = __phy_read(phydev, reg);
if (val < 0) {
ret = val;
goto err;
@@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev)
}
val |= val_bits;
- ret = phy_write(phydev, reg, val);
+ ret = __phy_write(phydev, reg, val);
if (ret < 0)
goto err;
}
err:
- if (current_page != saved_page) {
- i = marvell_set_page(phydev, saved_page);
- if (ret == 0)
- ret = i;
- }
- return ret;
+ return phy_restore_page(phydev, saved_page, ret);
}
#else
static int marvell_of_reg_init(struct phy_device *phydev)
@@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev)
static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
{
- int err, oldpage, mscr;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
- if (oldpage < 0)
- return oldpage;
-
- mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG);
- if (mscr < 0) {
- err = mscr;
- goto out;
- }
-
- mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK;
+ int mscr;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
- MII_88E1121_PHY_MSCR_TX_DELAY);
+ mscr = MII_88E1121_PHY_MSCR_RX_DELAY |
+ MII_88E1121_PHY_MSCR_TX_DELAY;
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+ mscr = MII_88E1121_PHY_MSCR_RX_DELAY;
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
-
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
-
-out:
- marvell_set_page(phydev, oldpage);
+ mscr = MII_88E1121_PHY_MSCR_TX_DELAY;
+ else
+ mscr = 0;
- return err;
+ return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (phy_interface_is_rgmii(phydev)) {
err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err)
+ if (err < 0)
return err;
}
@@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
static int m88e1318_config_aneg(struct phy_device *phydev)
{
- int err, oldpage, mscr;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
- if (oldpage < 0)
- return oldpage;
-
- mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
- mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
-
- err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
- if (err < 0)
- return err;
+ int err;
- err = marvell_set_page(phydev, oldpage);
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1318S_PHY_MSCR1_REG,
+ 0, MII_88E1318S_PHY_MSCR1_PAD_ODD);
if (err < 0)
return err;
@@ -700,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev)
static int m88e3016_config_init(struct phy_device *phydev)
{
- int reg;
+ int ret;
/* Enable Scrambler and Auto-Crossover */
- reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
- if (reg < 0)
- return reg;
-
- reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
- reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
-
- reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
- if (reg < 0)
- return reg;
+ ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
+ MII_88E3016_DISABLE_SCRAMBLER,
+ MII_88E3016_AUTO_MDIX_CROSSOVER);
+ if (ret < 0)
+ return ret;
return marvell_config_init(phydev);
}
@@ -721,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
u16 mode,
int fibre_copper_auto)
{
- int temp;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~(MII_M1111_HWCFG_MODE_MASK |
- MII_M1111_HWCFG_FIBER_COPPER_AUTO |
- MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= mode;
-
if (fibre_copper_auto)
- temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+ MII_M1111_HWCFG_FIBER_COPPER_RES,
+ mode);
}
static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
{
- int temp;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
+ int delay;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY);
+ delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- temp &= ~MII_M1111_RGMII_TX_DELAY;
- temp |= MII_M1111_RGMII_RX_DELAY;
+ delay = MII_M1111_RGMII_RX_DELAY;
} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- temp &= ~MII_M1111_RGMII_RX_DELAY;
- temp |= MII_M1111_RGMII_TX_DELAY;
+ delay = MII_M1111_RGMII_TX_DELAY;
+ } else {
+ delay = 0;
}
- return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY,
+ delay);
}
static int m88e1111_config_init_rgmii(struct phy_device *phydev)
@@ -802,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
int err;
err = m88e1111_config_init_rgmii_delays(phydev);
- if (err)
+ if (err < 0)
return err;
err = m88e1111_config_init_hwcfg_mode(
@@ -829,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
if (phy_interface_is_rgmii(phydev)) {
err = m88e1111_config_init_rgmii(phydev);
- if (err)
+ if (err < 0)
return err;
}
@@ -854,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev)
static int m88e1121_config_init(struct phy_device *phydev)
{
- int err, oldpage;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
- if (oldpage < 0)
- return oldpage;
+ int err;
/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
- err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
- MII_88E1121_PHY_LED_DEF);
+ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
if (err < 0)
return err;
- marvell_set_page(phydev, oldpage);
-
/* Set marvell,reg-init configuration from device tree */
return marvell_config_init(phydev);
}
@@ -875,7 +831,6 @@ static int m88e1121_config_init(struct phy_device *phydev)
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
- int temp;
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -887,16 +842,15 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
/* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
- temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
- temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
- temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
- err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
if (err < 0)
return err;
/* PHY reset is necessary after changing MODE[2:0] */
- temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
- err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
+ MII_88E1510_GEN_CTRL_REG_1_RESET);
if (err < 0)
return err;
@@ -1002,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev)
static int m88e1145_config_init_rgmii(struct phy_device *phydev)
{
- int temp;
int err;
err = m88e1111_config_init_rgmii_delays(phydev);
@@ -1014,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
if (err < 0)
return err;
- temp = phy_read(phydev, 0x1e);
- if (temp < 0)
- return temp;
-
- temp &= 0xf03f;
- temp |= 2 << 9; /* 36 ohm */
- temp |= 2 << 6; /* 39 ohm */
-
- err = phy_write(phydev, 0x1e, temp);
+ err = phy_modify(phydev, 0x1e, 0x0fc0,
+ 2 << 9 | /* 36 ohm */
+ 2 << 6); /* 39 ohm */
if (err < 0)
return err;
@@ -1398,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
+ int oldpage, ret = 0;
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
- return;
+ oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
+ if (oldpage < 0)
+ goto error;
- if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
- MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
- if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
- return;
+error:
+ phy_restore_page(phydev, oldpage, ret);
}
static int m88e1318_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int err, oldpage, temp;
+ int err = 0, oldpage;
- oldpage = marvell_get_page(phydev);
+ oldpage = phy_save_page(phydev);
+ if (oldpage < 0)
+ goto error;
if (wol->wolopts & WAKE_MAGIC) {
/* Explicitly switch to page 0x00, just to be sure */
- err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Enable the WOL interrupt */
- temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
- temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
- err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+ MII_88E1318S_PHY_CSIER_WOL_EIE);
if (err < 0)
- return err;
+ goto error;
- err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Setup LED[2] as interrupt pin (active low) */
- temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
- temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
- temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
- temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
- err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
+ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+ MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+ MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
if (err < 0)
- return err;
+ goto error;
- err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Store the device address for the magic packet */
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
((phydev->attached_dev->dev_addr[5] << 8) |
phydev->attached_dev->dev_addr[4]));
if (err < 0)
- return err;
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+ goto error;
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
((phydev->attached_dev->dev_addr[3] << 8) |
phydev->attached_dev->dev_addr[2]));
if (err < 0)
- return err;
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+ goto error;
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
((phydev->attached_dev->dev_addr[1] << 8) |
phydev->attached_dev->dev_addr[0]));
if (err < 0)
- return err;
+ goto error;
/* Clear WOL status and enable magic packet matching */
- temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
- temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
- err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
if (err < 0)
- return err;
+ goto error;
} else {
- err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Clear WOL status and disable magic packet matching */
- temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
- temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
- err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
if (err < 0)
- return err;
+ goto error;
}
- err = marvell_set_page(phydev, oldpage);
- if (err < 0)
- return err;
-
- return 0;
+error:
+ return phy_restore_page(phydev, oldpage, err);
}
static int marvell_get_sset_count(struct phy_device *phydev)
@@ -1519,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
struct marvell_priv *priv = phydev->priv;
- int oldpage, val;
+ int val;
u64 ret;
- oldpage = marvell_get_set_page(phydev, stat.page);
- if (oldpage < 0)
- return UINT64_MAX;
-
- val = phy_read(phydev, stat.reg);
+ val = phy_read_paged(phydev, stat.page, stat.reg);
if (val < 0) {
ret = UINT64_MAX;
} else {
@@ -1535,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
ret = priv->stats[i];
}
- marvell_set_page(phydev, oldpage);
-
return ret;
}
@@ -1553,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev,
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
int oldpage;
- int ret;
+ int ret = 0;
int val;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
+ oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0)
+ goto error;
/* Enable temperature sensor */
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = __phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
goto error;
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = phy_read(phydev, MII_88E1121_MISC_TEST);
+ val = __phy_read(phydev, MII_88E1121_MISC_TEST);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
if (ret < 0)
goto error;
*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return phy_restore_page(phydev, oldpage, ret);
}
static int m88e1121_hwmon_read(struct device *dev,
@@ -1671,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
{
- int oldpage;
int ret;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1510_TEMP_SENSOR);
if (ret < 0)
- goto error;
+ return ret;
*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return 0;
}
static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
{
- int oldpage;
int ret;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST);
if (ret < 0)
- goto error;
+ return ret;
*temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
/* convert to mC */
*temp *= 1000;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return 0;
}
static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
{
- int oldpage;
- int ret;
-
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
- if (ret < 0)
- goto error;
-
temp = temp / 1000;
temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
- (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
-
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
- return ret;
+ return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST,
+ MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK,
+ temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT);
}
static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
{
- int oldpage;
int ret;
*alarm = false;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST);
if (ret < 0)
- goto error;
- *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+ return ret;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
+ *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
- return ret;
+ return 0;
}
static int m88e1510_hwmon_read(struct device *dev,
@@ -1871,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
.info = m88e1510_hwmon_info,
};
+static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
+{
+ int sum = 0;
+ int oldpage;
+ int ret = 0;
+ int i;
+
+ *temp = 0;
+
+ oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
+ MII_88E6390_MISC_TEST_SAMPLE_1S;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ /* Reading the temperature sense has an errata. You need to read
+ * a number of times and take an average.
+ */
+ for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) {
+ ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR);
+ if (ret < 0)
+ goto error;
+ sum += ret & MII_88E6390_TEMP_SENSOR_MASK;
+ }
+
+ sum /= MII_88E6390_TEMP_SENSOR_SAMPLES;
+ *temp = (sum - 75) * 1000;
+
+ /* Disable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+
+error:
+ phy_restore_page(phydev, oldpage, ret);
+
+ return ret;
+}
+
+static int m88e6390_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e6390_get_temp(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static umode_t m88e6390_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e6390_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e6390_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e6390_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
+ .is_visible = m88e6390_hwmon_is_visible,
+ .read = m88e6390_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
+ .ops = &m88e6390_hwmon_hwmon_ops,
+ .info = m88e6390_hwmon_info,
+};
+
static int marvell_hwmon_name(struct phy_device *phydev)
{
struct marvell_priv *priv = phydev->priv;
@@ -1917,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
+}
#else
static int m88e1121_hwmon_probe(struct phy_device *phydev)
{
@@ -1927,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return 0;
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
#endif
static int marvell_probe(struct phy_device *phydev)
@@ -1964,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
+static int m88e6390_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e6390_hwmon_probe(phydev);
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,11 +1990,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -1992,11 +2009,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2015,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2028,11 +2048,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2052,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2073,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = {
.set_wol = &m88e1318_set_wol,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2091,6 +2116,8 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2104,11 +2131,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2122,11 +2150,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2139,12 +2168,12 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2166,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = {
.set_wol = &m88e1318_set_wol,
.resume = &marvell_resume,
.suspend = &marvell_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2186,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2205,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2216,7 +2251,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_aneg = &genphy_config_aneg,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
.read_status = &marvell_read_status,
@@ -2225,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2235,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = m88e1510_probe,
+ .probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2244,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 21b3f36e023a..8a0bd98fdec7 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -6,12 +6,18 @@
*
* There appears to be several different data paths through the PHY which
* are automatically managed by the PHY. The following has been determined
- * via observation and experimentation:
+ * via observation and experimentation for a setup using single-lane Serdes:
*
* SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
* 10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
* 10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
*
+ * With XAUI, observation shows:
+ *
+ * XAUI PHYXS -- <appropriate PCS as above>
+ *
+ * and no switching of the host interface mode occurs.
+ *
* If both the fiber and copper ports are connected, the first to gain
* link takes priority and the other port is completely locked out.
*/
@@ -23,19 +29,17 @@ enum {
MV_PCS_BASE_R = 0x1000,
MV_PCS_1000BASEX = 0x2000,
+ MV_PCS_PAIRSWAP = 0x8182,
+ MV_PCS_PAIRSWAP_MASK = 0x0003,
+ MV_PCS_PAIRSWAP_AB = 0x0002,
+ MV_PCS_PAIRSWAP_NONE = 0x0003,
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
*/
MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */
MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
-
- /* This register appears to reflect the copper status */
- MV_AN_RESULT = 0xa016,
- MV_AN_RESULT_SPD_10 = BIT(12),
- MV_AN_RESULT_SPD_100 = BIT(13),
- MV_AN_RESULT_SPD_1000 = BIT(14),
- MV_AN_RESULT_SPD_10000 = BIT(15),
};
static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
@@ -84,7 +88,6 @@ static int mv3310_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
- phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
phydev->interface != PHY_INTERFACE_MODE_10GKR)
@@ -150,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev)
if (val & MDIO_PMA_EXTABLE_1000BKX)
__set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
supported);
- if (val & MDIO_PMA_EXTABLE_100BTX)
+ if (val & MDIO_PMA_EXTABLE_100BTX) {
__set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
supported);
- if (val & MDIO_PMA_EXTABLE_10BT)
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ supported);
+ }
+ if (val & MDIO_PMA_EXTABLE_10BT) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
supported);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ supported);
+ }
}
if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
@@ -175,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev)
u32 advertising;
int ret;
+ /* We don't support manual MDI control */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
if (phydev->autoneg == AUTONEG_DISABLE) {
ret = genphy_c45_pma_setup_forced(phydev);
if (ret < 0)
@@ -233,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev)
return genphy_c45_aneg_done(phydev);
}
+static void mv3310_update_interface(struct phy_device *phydev)
+{
+ if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ /* The PHY automatically switches its serdes interface (and
+ * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+ * modes according to the speed. Florian suggests setting
+ * phydev->interface to communicate this to the MAC. Only do
+ * this if we are already in either SGMII or 10GBase-KR mode.
+ */
+ if (phydev->speed == SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ else if (phydev->speed >= SPEED_10 &&
+ phydev->speed < SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ }
+}
+
/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
static int mv3310_read_10gbr_status(struct phy_device *phydev)
{
@@ -240,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ mv3310_update_interface(phydev);
return 0;
}
@@ -264,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->link = 0;
phydev->pause = 0;
phydev->asym_pause = 0;
+ phydev->mdix = 0;
val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
if (val < 0)
@@ -294,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
- if (phydev->autoneg == AUTONEG_ENABLE) {
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
- if (val < 0)
- return val;
-
- if (val & MV_AN_RESULT_SPD_10000)
- phydev->speed = SPEED_10000;
- else if (val & MV_AN_RESULT_SPD_1000)
- phydev->speed = SPEED_1000;
- else if (val & MV_AN_RESULT_SPD_100)
- phydev->speed = SPEED_100;
- else if (val & MV_AN_RESULT_SPD_10)
- phydev->speed = SPEED_10;
-
- phydev->duplex = DUPLEX_FULL;
- }
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ phy_resolve_aneg_linkmode(phydev);
}
if (phydev->autoneg != AUTONEG_ENABLE) {
@@ -318,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev)
return val;
}
- if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
- /* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII and 10GBase-KR
- * modes according to the speed. Florian suggests setting
- * phydev->interface to communicate this to the MAC. Only do
- * this if we are already in either SGMII or 10GBase-KR mode.
- */
- if (phydev->speed == SPEED_10000)
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
- else if (phydev->speed >= SPEED_10 &&
- phydev->speed < SPEED_10000)
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ if (phydev->speed == SPEED_10000) {
+ val = genphy_c45_read_mdix(phydev);
+ if (val < 0)
+ return val;
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
+ if (val < 0)
+ return val;
+
+ switch (val & MV_PCS_PAIRSWAP_MASK) {
+ case MV_PCS_PAIRSWAP_AB:
+ phydev->mdix = ETH_TP_MDI_X;
+ break;
+ case MV_PCS_PAIRSWAP_NONE:
+ phydev->mdix = ETH_TP_MDI;
+ break;
+ default:
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
}
+ mv3310_update_interface(phydev);
+
return 0;
}
@@ -342,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.features = SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP |
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 08e0647b85e2..8d370667fa1b 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* Just ioremap, as this MDIO block is usually integrated into an
* Ethernet MAC controller register range
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 54d00a1d2bef..88272b3ac2e2 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,6 +38,7 @@
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/gpio/consumer.h>
#include <asm/irq.h>
@@ -46,11 +47,41 @@
#include "mdio-boardinfo.h"
+static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
+{
+ struct gpio_desc *gpiod = NULL;
+
+ /* Deassert the optional reset signal */
+ if (mdiodev->dev.of_node)
+ gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
+ "reset-gpios", 0, GPIOD_OUT_LOW,
+ "PHY reset");
+ if (PTR_ERR(gpiod) == -ENOENT)
+ gpiod = NULL;
+ else if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ mdiodev->reset = gpiod;
+
+ /* Assert the reset signal again */
+ mdio_device_reset(mdiodev, 1);
+
+ return 0;
+}
+
int mdiobus_register_device(struct mdio_device *mdiodev)
{
+ int err;
+
if (mdiodev->bus->mdio_map[mdiodev->addr])
return -EBUSY;
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
+ err = mdiobus_register_gpiod(mdiodev);
+ if (err)
+ return err;
+ }
+
mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
return 0;
@@ -421,6 +452,9 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
+ if (mdiodev->reset)
+ gpiod_put(mdiodev->reset);
+
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
@@ -494,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
EXPORT_SYMBOL(mdiobus_scan);
/**
+ * __mdiobus_read - Unlocked version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+ int retval;
+
+ WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+ retval = bus->read(bus, addr, regnum);
+
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(__mdiobus_read);
+
+/**
+ * __mdiobus_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+ int err;
+
+ WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+ err = bus->write(bus, addr, regnum, val);
+
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_write);
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -513,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- retval = bus->read(bus, addr, regnum);
+ retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
return retval;
}
EXPORT_SYMBOL(mdiobus_read_nested);
@@ -539,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
- retval = bus->read(bus, addr, regnum);
+ retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
return retval;
}
EXPORT_SYMBOL(mdiobus_read);
@@ -569,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- err = bus->write(bus, addr, regnum, val);
+ err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 0, addr, regnum, val, err);
-
return err;
}
EXPORT_SYMBOL(mdiobus_write_nested);
@@ -596,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
- err = bus->write(bus, addr, regnum, val);
+ err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 0, addr, regnum, val, err);
-
return err;
}
EXPORT_SYMBOL(mdiobus_write);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e24f28924af8..c924700cf37b 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -12,6 +12,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -22,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/unistd.h>
+#include <linux/delay.h>
void mdio_device_free(struct mdio_device *mdiodev)
{
@@ -114,6 +117,21 @@ void mdio_device_remove(struct mdio_device *mdiodev)
}
EXPORT_SYMBOL(mdio_device_remove);
+void mdio_device_reset(struct mdio_device *mdiodev, int value)
+{
+ unsigned int d;
+
+ if (!mdiodev->reset)
+ return;
+
+ gpiod_set_value(mdiodev->reset, value);
+
+ d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
+ if (d)
+ usleep_range(d, d + max_t(unsigned int, d / 10, 100));
+}
+EXPORT_SYMBOL(mdio_device_reset);
+
/**
* mdio_probe - probe an MDIO device
* @dev: device to probe
@@ -128,8 +146,16 @@ static int mdio_probe(struct device *dev)
struct mdio_driver *mdiodrv = to_mdio_driver(drv);
int err = 0;
- if (mdiodrv->probe)
+ if (mdiodrv->probe) {
+ /* Deassert the reset signal */
+ mdio_device_reset(mdiodev, 0);
+
err = mdiodrv->probe(mdiodev);
+ if (err) {
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
+ }
+ }
return err;
}
@@ -140,9 +166,13 @@ static int mdio_remove(struct device *dev)
struct device_driver *drv = mdiodev->dev.driver;
struct mdio_driver *mdiodrv = to_mdio_driver(drv);
- if (mdiodrv->remove)
+ if (mdiodrv->remove) {
mdiodrv->remove(mdiodev);
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 842eb871a6e3..ddc2c5ea3787 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -24,31 +24,129 @@
#include <linux/netdevice.h>
#include <linux/bitfield.h>
-static int meson_gxl_config_init(struct phy_device *phydev)
+#define TSTCNTL 20
+#define TSTCNTL_READ BIT(15)
+#define TSTCNTL_WRITE BIT(14)
+#define TSTCNTL_REG_BANK_SEL GENMASK(12, 11)
+#define TSTCNTL_TEST_MODE BIT(10)
+#define TSTCNTL_READ_ADDRESS GENMASK(9, 5)
+#define TSTCNTL_WRITE_ADDRESS GENMASK(4, 0)
+#define TSTREAD1 21
+#define TSTWRITE 23
+#define INTSRC_FLAG 29
+#define INTSRC_ANEG_PR BIT(1)
+#define INTSRC_PARALLEL_FAULT BIT(2)
+#define INTSRC_ANEG_LP_ACK BIT(3)
+#define INTSRC_LINK_DOWN BIT(4)
+#define INTSRC_REMOTE_FAULT BIT(5)
+#define INTSRC_ANEG_COMPLETE BIT(6)
+#define INTSRC_MASK 30
+
+#define BANK_ANALOG_DSP 0
+#define BANK_WOL 1
+#define BANK_BIST 3
+
+/* WOL Registers */
+#define LPI_STATUS 0xc
+#define LPI_STATUS_RSV12 BIT(12)
+
+/* BIST Registers */
+#define FR_PLL_CONTROL 0x1b
+#define FR_PLL_DIV0 0x1c
+#define FR_PLL_DIV1 0x1d
+
+static int meson_gxl_open_banks(struct phy_device *phydev)
{
- /* Enable Analog and DSP register Bank access by */
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x14, 0x0400);
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x14, 0x0400);
+ int ret;
+
+ /* Enable Analog and DSP register Bank access by
+ * toggling TSTCNTL_TEST_MODE bit in the TSTCNTL register
+ */
+ ret = phy_write(phydev, TSTCNTL, 0);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, TSTCNTL, 0);
+ if (ret)
+ return ret;
+ return phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+}
+
+static void meson_gxl_close_banks(struct phy_device *phydev)
+{
+ phy_write(phydev, TSTCNTL, 0);
+}
+
+static int meson_gxl_read_reg(struct phy_device *phydev,
+ unsigned int bank, unsigned int reg)
+{
+ int ret;
+
+ ret = meson_gxl_open_banks(phydev);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ |
+ FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+ TSTCNTL_TEST_MODE |
+ FIELD_PREP(TSTCNTL_READ_ADDRESS, reg));
+ if (ret)
+ goto out;
+
+ ret = phy_read(phydev, TSTREAD1);
+out:
+ /* Close the bank access on our way out */
+ meson_gxl_close_banks(phydev);
+ return ret;
+}
+
+static int meson_gxl_write_reg(struct phy_device *phydev,
+ unsigned int bank, unsigned int reg,
+ uint16_t value)
+{
+ int ret;
+
+ ret = meson_gxl_open_banks(phydev);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTWRITE, value);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_WRITE |
+ FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+ TSTCNTL_TEST_MODE |
+ FIELD_PREP(TSTCNTL_WRITE_ADDRESS, reg));
- /* Write Analog register 23 */
- phy_write(phydev, 0x17, 0x8E0D);
- phy_write(phydev, 0x14, 0x4417);
+out:
+ /* Close the bank access on our way out */
+ meson_gxl_close_banks(phydev);
+ return ret;
+}
+
+static int meson_gxl_config_init(struct phy_device *phydev)
+{
+ int ret;
/* Enable fractional PLL */
- phy_write(phydev, 0x17, 0x0005);
- phy_write(phydev, 0x14, 0x5C1B);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_CONTROL, 0x5);
+ if (ret)
+ return ret;
/* Program fraction FR_PLL_DIV1 */
- phy_write(phydev, 0x17, 0x029A);
- phy_write(phydev, 0x14, 0x5C1D);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV1, 0x029a);
+ if (ret)
+ return ret;
/* Program fraction FR_PLL_DIV1 */
- phy_write(phydev, 0x17, 0xAAAA);
- phy_write(phydev, 0x14, 0x5C1C);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV0, 0xaaaa);
+ if (ret)
+ return ret;
- return 0;
+ return genphy_config_init(phydev);
}
/* This function is provided to cope with the possible failures of this phy
@@ -78,27 +176,8 @@ static int meson_gxl_read_status(struct phy_device *phydev)
else if (!ret)
goto read_status_continue;
- /* Need to access WOL bank, make sure the access is open */
- ret = phy_write(phydev, 0x14, 0x0000);
- if (ret)
- return ret;
- ret = phy_write(phydev, 0x14, 0x0400);
- if (ret)
- return ret;
- ret = phy_write(phydev, 0x14, 0x0000);
- if (ret)
- return ret;
- ret = phy_write(phydev, 0x14, 0x0400);
- if (ret)
- return ret;
-
- /* Request LPI_STATUS WOL register */
- ret = phy_write(phydev, 0x14, 0x8D80);
- if (ret)
- return ret;
-
- /* Read LPI_STATUS value */
- wol = phy_read(phydev, 0x15);
+ /* Aneg is done, let's check everything is fine */
+ wol = meson_gxl_read_reg(phydev, BANK_WOL, LPI_STATUS);
if (wol < 0)
return wol;
@@ -110,7 +189,7 @@ static int meson_gxl_read_status(struct phy_device *phydev)
if (exp < 0)
return exp;
- if (!(wol & BIT(12)) ||
+ if (!(wol & LPI_STATUS_RSV12) ||
((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
/* Looks like aneg failed after all */
phydev_dbg(phydev, "LPA corruption - aneg restart\n");
@@ -122,17 +201,43 @@ read_status_continue:
return genphy_read_status(phydev);
}
+static int meson_gxl_ack_interrupt(struct phy_device *phydev)
+{
+ int ret = phy_read(phydev, INTSRC_FLAG);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int meson_gxl_config_intr(struct phy_device *phydev)
+{
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ val = INTSRC_ANEG_PR
+ | INTSRC_PARALLEL_FAULT
+ | INTSRC_ANEG_LP_ACK
+ | INTSRC_LINK_DOWN
+ | INTSRC_REMOTE_FAULT
+ | INTSRC_ANEG_COMPLETE;
+ } else {
+ val = 0;
+ }
+
+ return phy_write(phydev, INTSRC_MASK, val);
+}
+
static struct phy_driver meson_gxl_phy[] = {
{
.phy_id = 0x01814400,
.phy_id_mask = 0xfffffff0,
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
.config_init = meson_gxl_config_init,
- .config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
+ .ack_interrupt = meson_gxl_ack_interrupt,
+ .config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
@@ -150,4 +255,5 @@ MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl);
MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver");
MODULE_AUTHOR("Baoqi wang");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 422ff6333c52..0f45310300f6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -802,8 +802,6 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
@@ -817,8 +815,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -835,8 +831,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -854,7 +848,6 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
.config_aneg = ksz8041_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -871,8 +864,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -889,8 +880,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -907,8 +896,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -925,8 +912,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -941,8 +926,6 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
@@ -956,8 +939,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -976,7 +957,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1001,8 +981,6 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1022,8 +1000,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Microchip KSZ9477",
.features = PHY_GBIT_FEATURES,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 37ee856c7680..0f293ef28935 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -153,7 +153,6 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = genphy_config_init,
.config_aneg = lan88xx_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2addf1d3f619..2b1e336961f9 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -136,8 +136,6 @@ static struct phy_driver dp83865_driver[] = { {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
} };
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dada819c6b78..a4576859afae 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
+/**
+ * genphy_c45_read_mdix - read mdix status from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_mdix(struct phy_device *phydev)
+{
+ int val;
+
+ if (phydev->speed == SPEED_10000) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBT_SWAPPOL);
+ if (val < 0)
+ return val;
+
+ switch (val) {
+ case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+ phydev->mdix = ETH_TP_MDI;
+ break;
+
+ case 0:
+ phydev->mdix = ETH_TP_MDI_X;
+ break;
+
+ default:
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
+
/* The gen10g_* functions are the old Clause 45 stub */
static int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 21f75ae244b3..4083f00c97a5 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
+/**
+ * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * @phydev: The phy_device struct
+ *
+ * Resolve our and the link partner advertisments into their corresponding
+ * speed and duplex. If full duplex was negotiated, extract the pause mode
+ * from the link partner mask.
+ */
+void phy_resolve_aneg_linkmode(struct phy_device *phydev)
+{
+ u32 common = phydev->lp_advertising & phydev->advertising;
+
+ if (common & ADVERTISED_10000baseT_Full) {
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_1000baseT_Full) {
+ phydev->speed = SPEED_1000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_1000baseT_Half) {
+ phydev->speed = SPEED_1000;
+ phydev->duplex = DUPLEX_HALF;
+ } else if (common & ADVERTISED_100baseT_Full) {
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_100baseT_Half) {
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_HALF;
+ } else if (common & ADVERTISED_10baseT_Full) {
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_10baseT_Half) {
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
+ phydev->asym_pause = !!(phydev->lp_advertising &
+ ADVERTISED_Asym_Pause);
+ }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
u16 regnum)
{
/* Write the desired MMD Devad */
- bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+ __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad);
/* Write the desired MMD register address */
- bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
/* Select the Function : DATA with no post increment */
- bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+ __mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+ devad | MII_MMD_CTRL_NOINCR);
}
/**
@@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Read the content of the MMD's selected register */
- val = bus->read(bus, phy_addr, MII_MMD_DATA);
+ val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
mutex_unlock(&bus->mdio_lock);
}
return val;
@@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Write the data into MMD's selected register */
- bus->write(bus, phy_addr, MII_MMD_DATA, val);
+ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
mutex_unlock(&bus->mdio_lock);
ret = 0;
@@ -279,3 +323,207 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
return ret;
}
EXPORT_SYMBOL(phy_write_mmd);
+
+/**
+ * __phy_modify() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_read(phydev, regnum);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, regnum, (ret & ~mask) | set);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(__phy_modify);
+
+/**
+ * phy_modify - Convenience function for modifying a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify(phydev, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify);
+
+static int __phy_read_page(struct phy_device *phydev)
+{
+ return phydev->drv->read_page(phydev);
+}
+
+static int __phy_write_page(struct phy_device *phydev, int page)
+{
+ return phydev->drv->write_page(phydev, page);
+}
+
+/**
+ * phy_save_page() - take the bus lock and save the current page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Take the MDIO bus lock, and return the current page number. On error,
+ * returns a negative errno. phy_restore_page() must always be called
+ * after this, irrespective of success or failure of this call.
+ */
+int phy_save_page(struct phy_device *phydev)
+{
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ return __phy_read_page(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_save_page);
+
+/**
+ * phy_select_page() - take the bus lock, save the current page, and set a page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: desired page
+ *
+ * Take the MDIO bus lock to protect against concurrent access, save the
+ * current PHY page, and set the current page. On error, returns a
+ * negative errno, otherwise returns the previous page number.
+ * phy_restore_page() must always be called after this, irrespective
+ * of success or failure of this call.
+ */
+int phy_select_page(struct phy_device *phydev, int page)
+{
+ int ret, oldpage;
+
+ oldpage = ret = phy_save_page(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (oldpage != page) {
+ ret = __phy_write_page(phydev, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ return oldpage;
+}
+EXPORT_SYMBOL_GPL(phy_select_page);
+
+/**
+ * phy_restore_page() - restore the page register and release the bus lock
+ * @phydev: a pointer to a &struct phy_device
+ * @oldpage: the old page, return value from phy_save_page() or phy_select_page()
+ * @ret: operation's return code
+ *
+ * Release the MDIO bus lock, restoring @oldpage if it is a valid page.
+ * This function propagates the earliest error code from the group of
+ * operations.
+ *
+ * Returns:
+ * @oldpage if it was a negative value, otherwise
+ * @ret if it was a negative errno value, otherwise
+ * phy_write_page()'s negative value if it were in error, otherwise
+ * @ret.
+ */
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
+{
+ int r;
+
+ if (oldpage >= 0) {
+ r = __phy_write_page(phydev, oldpage);
+
+ /* Propagate the operation return code if the page write
+ * was successful.
+ */
+ if (ret >= 0 && r < 0)
+ ret = r;
+ } else {
+ /* Propagate the phy page selection error code */
+ ret = oldpage;
+ }
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restore_page);
+
+/**
+ * phy_read_paged() - Convenience function for reading a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ *
+ * Same rules as for phy_read().
+ */
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_read(phydev, regnum);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_read_paged);
+
+/**
+ * phy_write_paged() - Convenience function for writing a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @val: value to write
+ *
+ * Same rules as for phy_write().
+ */
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_write(phydev, regnum, val);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_write_paged);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_modify(phydev, regnum, mask, set);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ed10d1fc8f59..f3313a129531 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -493,7 +493,10 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
/* Invalidate LP advertising flags */
phydev->lp_advertising = 0;
- err = phydev->drv->config_aneg(phydev);
+ if (phydev->drv->config_aneg)
+ err = phydev->drv->config_aneg(phydev);
+ else
+ err = genphy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
@@ -629,9 +632,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (PHY_HALTED == phydev->state)
return IRQ_NONE; /* It can't be ours. */
- disable_irq_nosync(irq);
- atomic_inc(&phydev->irq_disable);
-
phy_change(phydev);
return IRQ_HANDLED;
@@ -689,7 +689,6 @@ phy_err:
*/
int phy_start_interrupts(struct phy_device *phydev)
{
- atomic_set(&phydev->irq_disable, 0);
if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
IRQF_ONESHOT | IRQF_SHARED,
phydev_name(phydev), phydev) < 0) {
@@ -716,13 +715,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
free_irq(phydev->irq, phydev);
- /* If work indeed has been cancelled, disable_irq() will have
- * been left unbalanced from phy_interrupt() and enable_irq()
- * has to be called so that other devices on the line work.
- */
- while (atomic_dec_return(&phydev->irq_disable) >= 0)
- enable_irq(phydev->irq);
-
return err;
}
EXPORT_SYMBOL(phy_stop_interrupts);
@@ -736,10 +728,11 @@ void phy_change(struct phy_device *phydev)
if (phy_interrupt_is_valid(phydev)) {
if (phydev->drv->did_interrupt &&
!phydev->drv->did_interrupt(phydev))
- goto ignore;
+ return;
- if (phy_disable_interrupts(phydev))
- goto phy_err;
+ if (phydev->state == PHY_HALTED)
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
}
mutex_lock(&phydev->lock);
@@ -747,28 +740,13 @@ void phy_change(struct phy_device *phydev)
phydev->state = PHY_CHANGELINK;
mutex_unlock(&phydev->lock);
- if (phy_interrupt_is_valid(phydev)) {
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
-
- /* Reenable interrupts */
- if (PHY_HALTED != phydev->state &&
- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
- goto irq_enable_err;
- }
-
/* reschedule state queue work to run as soon as possible */
phy_trigger_machine(phydev, true);
- return;
-ignore:
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
+ if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ goto phy_err;
return;
-irq_enable_err:
- disable_irq(phydev->irq);
- atomic_inc(&phydev->irq_disable);
phy_err:
phy_error(phydev);
}
@@ -1003,10 +981,6 @@ void phy_state_machine(struct work_struct *work)
phydev->state = PHY_NOLINK;
phy_link_down(phydev, true);
}
-
- if (phy_interrupt_is_valid(phydev))
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
break;
case PHY_HALTED:
if (phydev->link) {
@@ -1083,16 +1057,12 @@ void phy_state_machine(struct work_struct *work)
/**
* phy_mac_interrupt - MAC says the link has changed
* @phydev: phy_device struct with changed link
- * @new_link: Link is Up/Down.
*
- * Description: The MAC layer is able indicate there has been a change
- * in the PHY link status. Set the new link status, and trigger the
- * state machine, work a work queue.
+ * The MAC layer is able to indicate there has been a change in the PHY link
+ * status. Trigger the state machine and work a work queue.
*/
-void phy_mac_interrupt(struct phy_device *phydev, int new_link)
+void phy_mac_interrupt(struct phy_device *phydev)
{
- phydev->link = new_link;
-
/* Trigger a state machine change */
queue_work(system_power_efficient_wq, &phydev->phy_queue);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b15b31ca2618..b13eed21c87d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -634,6 +634,9 @@ int phy_device_register(struct phy_device *phydev)
if (err)
return err;
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
/* Run all of the fixups for this PHY */
err = phy_scan_fixups(phydev);
if (err) {
@@ -652,6 +655,9 @@ int phy_device_register(struct phy_device *phydev)
return 0;
out:
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
mdiobus_unregister_device(&phydev->mdio);
return err;
}
@@ -668,6 +674,10 @@ EXPORT_SYMBOL(phy_device_register);
void phy_device_remove(struct phy_device *phydev)
{
device_del(&phydev->mdio.dev);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
mdiobus_unregister_device(&phydev->mdio);
}
EXPORT_SYMBOL(phy_device_remove);
@@ -851,6 +861,9 @@ int phy_init_hw(struct phy_device *phydev)
{
int ret = 0;
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
if (!phydev->drv || !phydev->drv->config_init)
return 0;
@@ -1130,6 +1143,9 @@ void phy_detach(struct phy_device *phydev)
put_device(&phydev->mdio.dev);
if (ndev_owner != bus->owner)
module_put(bus->owner);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
@@ -1208,6 +1224,30 @@ out:
}
EXPORT_SYMBOL(phy_loopback);
+/**
+ * phy_reset_after_clk_enable - perform a PHY reset if needed
+ * @phydev: target phy_device struct
+ *
+ * Description: Some PHYs are known to need a reset after their refclk was
+ * enabled. This function evaluates the flags and perform the reset if it's
+ * needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy
+ * was reset.
+ */
+int phy_reset_after_clk_enable(struct phy_device *phydev)
+{
+ if (!phydev || !phydev->drv)
+ return -ENODEV;
+
+ if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) {
+ phy_device_reset(phydev, 1);
+ phy_device_reset(phydev, 0);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_reset_after_clk_enable);
+
/* Generic PHY support and helper functions */
/**
@@ -1328,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int ctl = phy_read(phydev, MII_BMCR);
+ u16 ctl = 0;
- ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -1342,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev)
if (DUPLEX_FULL == phydev->duplex)
ctl |= BMCR_FULLDPLX;
- return phy_write(phydev, MII_BMCR, ctl);
+ return phy_modify(phydev, MII_BMCR,
+ BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
@@ -1352,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced);
*/
int genphy_restart_aneg(struct phy_device *phydev)
{
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
-
/* Don't isolate the PHY if we're negotiating */
- ctl &= ~BMCR_ISOLATE;
-
- return phy_write(phydev, MII_BMCR, ctl);
+ return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
}
EXPORT_SYMBOL(genphy_restart_aneg);
@@ -1628,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init);
int genphy_suspend(struct phy_device *phydev)
{
- int value;
-
- mutex_lock(&phydev->lock);
-
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
+ return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
}
EXPORT_SYMBOL(genphy_suspend);
int genphy_resume(struct phy_device *phydev)
{
- int value;
-
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
-
- return 0;
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
}
EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
- int value;
-
- value = phy_read(phydev, MII_BMCR);
- if (value < 0)
- return value;
-
- if (enable)
- value |= BMCR_LOOPBACK;
- else
- value &= ~BMCR_LOOPBACK;
-
- return phy_write(phydev, MII_BMCR, value);
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ enable ? BMCR_LOOPBACK : 0);
}
EXPORT_SYMBOL(genphy_loopback);
@@ -1813,8 +1821,16 @@ static int phy_probe(struct device *dev)
/* Set the state to READY by default */
phydev->state = PHY_READY;
- if (phydev->drv->probe)
+ if (phydev->drv->probe) {
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
err = phydev->drv->probe(phydev);
+ if (err) {
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+ }
+ }
mutex_unlock(&phydev->lock);
@@ -1831,8 +1847,12 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
- if (phydev->drv && phydev->drv->remove)
+ if (phydev->drv && phydev->drv->remove) {
phydev->drv->remove(phydev);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+ }
phydev->drv = NULL;
return 0;
@@ -1909,9 +1929,7 @@ static struct phy_driver genphy_driver = {
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC,
- .config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 827f3f92560e..6ac8b29b2dc3 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -36,7 +36,11 @@ enum {
PHYLINK_DISABLE_LINK,
};
+/**
+ * struct phylink - internal data type for phylink
+ */
struct phylink {
+ /* private: */
struct net_device *netdev;
const struct phylink_mac_ops *ops;
@@ -50,6 +54,8 @@ struct phylink {
/* The link configuration settings */
struct phylink_link_state link_config;
struct gpio_desc *link_gpio;
+ void (*get_fixed_state)(struct net_device *dev,
+ struct phylink_link_state *s);
struct mutex state_mutex;
struct phylink_link_state phy_state;
@@ -87,6 +93,13 @@ static inline bool linkmode_empty(const unsigned long *src)
return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+/**
+ * phylink_set_port_modes() - set the port type modes in the ethtool mask
+ * @mask: ethtool link mode mask
+ *
+ * Sets all the port type modes in the ethtool mask. MAC drivers should
+ * use this in their 'validate' callback.
+ */
void phylink_set_port_modes(unsigned long *mask)
{
phylink_set(mask, TP);
@@ -117,8 +130,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static const char *modestr[] = {
[MLO_AN_PHY] = "phy",
[MLO_AN_FIXED] = "fixed",
- [MLO_AN_SGMII] = "SGMII",
- [MLO_AN_8023Z] = "802.3z",
+ [MLO_AN_INBAND] = "inband",
};
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
@@ -132,59 +144,64 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+static int phylink_parse_fixedlink(struct phylink *pl,
+ struct fwnode_handle *fwnode)
{
- struct device_node *fixed_node;
+ struct fwnode_handle *fixed_node;
const struct phy_setting *s;
struct gpio_desc *desc;
- const __be32 *fixed_prop;
u32 speed;
- int ret, len;
+ int ret;
- fixed_node = of_get_child_by_name(np, "fixed-link");
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
if (fixed_node) {
- ret = of_property_read_u32(fixed_node, "speed", &speed);
+ ret = fwnode_property_read_u32(fixed_node, "speed", &speed);
pl->link_config.speed = speed;
pl->link_config.duplex = DUPLEX_HALF;
- if (of_property_read_bool(fixed_node, "full-duplex"))
+ if (fwnode_property_read_bool(fixed_node, "full-duplex"))
pl->link_config.duplex = DUPLEX_FULL;
/* We treat the "pause" and "asym-pause" terminology as
* defining the link partner's ability. */
- if (of_property_read_bool(fixed_node, "pause"))
+ if (fwnode_property_read_bool(fixed_node, "pause"))
pl->link_config.pause |= MLO_PAUSE_SYM;
- if (of_property_read_bool(fixed_node, "asym-pause"))
+ if (fwnode_property_read_bool(fixed_node, "asym-pause"))
pl->link_config.pause |= MLO_PAUSE_ASYM;
if (ret == 0) {
- desc = fwnode_get_named_gpiod(&fixed_node->fwnode,
- "link-gpios", 0,
- GPIOD_IN, "?");
+ desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
+ 0, GPIOD_IN, "?");
if (!IS_ERR(desc))
pl->link_gpio = desc;
else if (desc == ERR_PTR(-EPROBE_DEFER))
ret = -EPROBE_DEFER;
}
- of_node_put(fixed_node);
+ fwnode_handle_put(fixed_node);
if (ret)
return ret;
} else {
- fixed_prop = of_get_property(np, "fixed-link", &len);
- if (!fixed_prop) {
+ u32 prop[5];
+
+ ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ NULL, 0);
+ if (ret != ARRAY_SIZE(prop)) {
netdev_err(pl->netdev, "broken fixed-link?\n");
return -EINVAL;
}
- if (len == 5 * sizeof(*fixed_prop)) {
- pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ?
+
+ ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ prop, ARRAY_SIZE(prop));
+ if (!ret) {
+ pl->link_config.duplex = prop[1] ?
DUPLEX_FULL : DUPLEX_HALF;
- pl->link_config.speed = be32_to_cpu(fixed_prop[2]);
- if (be32_to_cpu(fixed_prop[3]))
+ pl->link_config.speed = prop[2];
+ if (prop[3])
pl->link_config.pause |= MLO_PAUSE_SYM;
- if (be32_to_cpu(fixed_prop[4]))
+ if (prop[4])
pl->link_config.pause |= MLO_PAUSE_ASYM;
}
}
@@ -220,17 +237,17 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
return 0;
}
-static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
+static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
{
- struct device_node *dn;
+ struct fwnode_handle *dn;
const char *managed;
- dn = of_get_child_by_name(np, "fixed-link");
- if (dn || of_find_property(np, "fixed-link", NULL))
+ dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (dn || fwnode_property_present(fwnode, "fixed-link"))
pl->link_an_mode = MLO_AN_FIXED;
- of_node_put(dn);
+ fwnode_handle_put(dn);
- if (of_property_read_string(np, "managed", &managed) == 0 &&
+ if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
if (pl->link_an_mode == MLO_AN_FIXED) {
netdev_err(pl->netdev,
@@ -244,6 +261,7 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
pl->link_config.an_enabled = true;
+ pl->link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -253,17 +271,14 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, 100baseT_Full);
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
- pl->link_an_mode = MLO_AN_SGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
phylink_set(pl->supported, 1000baseX_Full);
- pl->link_an_mode = MLO_AN_8023Z;
break;
case PHY_INTERFACE_MODE_2500BASEX:
phylink_set(pl->supported, 2500baseX_Full);
- pl->link_an_mode = MLO_AN_8023Z;
break;
case PHY_INTERFACE_MODE_10GKR:
@@ -280,7 +295,6 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, 10000baseLR_Full);
phylink_set(pl->supported, 10000baseLRM_Full);
phylink_set(pl->supported, 10000baseER_Full);
- pl->link_an_mode = MLO_AN_SGMII;
break;
default:
@@ -320,8 +334,7 @@ static void phylink_mac_config(struct phylink *pl,
static void phylink_mac_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
- (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX ||
- pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX))
+ phy_interface_mode_is_8023z(pl->link_config.interface))
pl->ops->mac_an_restart(pl->netdev);
}
@@ -339,12 +352,14 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
}
/* The fixed state is... fixed except for the link state,
- * which may be determined by a GPIO.
+ * which may be determined by a GPIO or a callback.
*/
static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
{
*state = pl->link_config;
- if (pl->link_gpio)
+ if (pl->get_fixed_state)
+ pl->get_fixed_state(pl->netdev, state);
+ else if (pl->link_gpio)
state->link = !!gpiod_get_value(pl->link_gpio);
}
@@ -423,7 +438,7 @@ static void phylink_resolve(struct work_struct *w)
phylink_mac_config(pl, &link_state);
break;
- case MLO_AN_SGMII:
+ case MLO_AN_INBAND:
phylink_get_mac_state(pl, &link_state);
if (pl->phydev) {
bool changed = false;
@@ -449,10 +464,6 @@ static void phylink_resolve(struct work_struct *w)
}
}
break;
-
- case MLO_AN_8023Z:
- phylink_get_mac_state(pl, &link_state);
- break;
}
}
@@ -489,15 +500,27 @@ static void phylink_run_resolve(struct phylink *pl)
static const struct sfp_upstream_ops sfp_phylink_ops;
-static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
+static int phylink_register_sfp(struct phylink *pl,
+ struct fwnode_handle *fwnode)
{
- struct device_node *sfp_np;
+ struct fwnode_reference_args ref;
+ int ret;
- sfp_np = of_parse_phandle(np, "sfp", 0);
- if (!sfp_np)
+ if (!fwnode)
return 0;
- pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl,
+ ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 0, 0, &ref);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ return 0;
+
+ netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
+ ret);
+ return ret;
+ }
+
+ pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
&sfp_phylink_ops);
if (!pl->sfp_bus)
return -ENOMEM;
@@ -505,7 +528,22 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
return 0;
}
-struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
+/**
+ * phylink_create() - create a phylink instance
+ * @ndev: a pointer to the &struct net_device
+ * @fwnode: a pointer to a &struct fwnode_handle describing the network
+ * interface
+ * @iface: the desired link mode defined by &typedef phy_interface_t
+ * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ *
+ * Create a new phylink instance, and parse the link parameters found in @np.
+ * This will parse in-band modes, fixed-link or SFP configuration.
+ *
+ * Returns a pointer to a &struct phylink, or an error-pointer value. Users
+ * must use IS_ERR() to check for errors from this function.
+ */
+struct phylink *phylink_create(struct net_device *ndev,
+ struct fwnode_handle *fwnode,
phy_interface_t iface,
const struct phylink_mac_ops *ops)
{
@@ -521,7 +559,10 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
pl->netdev = ndev;
pl->phy_state.interface = iface;
pl->link_interface = iface;
- pl->link_port = PORT_MII;
+ if (iface == PHY_INTERFACE_MODE_MOCA)
+ pl->link_port = PORT_BNC;
+ else
+ pl->link_port = PORT_MII;
pl->link_config.interface = iface;
pl->link_config.pause = MLO_PAUSE_AN;
pl->link_config.speed = SPEED_UNKNOWN;
@@ -534,21 +575,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
- ret = phylink_parse_mode(pl, np);
+ ret = phylink_parse_mode(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
}
if (pl->link_an_mode == MLO_AN_FIXED) {
- ret = phylink_parse_fixedlink(pl, np);
+ ret = phylink_parse_fixedlink(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
}
}
- ret = phylink_register_sfp(pl, np);
+ ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
@@ -558,6 +599,13 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
}
EXPORT_SYMBOL_GPL(phylink_create);
+/**
+ * phylink_destroy() - cleanup and destroy the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Destroy a phylink instance. Any PHY that has been attached must have been
+ * cleaned up via phylink_disconnect_phy() prior to calling this function.
+ */
void phylink_destroy(struct phylink *pl)
{
if (pl->sfp_bus)
@@ -654,10 +702,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
+/**
+ * phylink_connect_phy() - connect a PHY to the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @phy: a pointer to a &struct phy_device.
+ *
+ * Connect @phy to the phylink instance specified by @pl by calling
+ * phy_attach_direct(). Configure the @phy according to the MAC driver's
+ * capabilities, start the PHYLIB state machine and enable any interrupts
+ * that the PHY supports.
+ *
+ * This updates the phylink's ethtool supported and advertising link mode
+ * masks.
+ *
+ * Returns 0 on success or a negative errno.
+ */
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
int ret;
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_interface))))
+ return -EINVAL;
+
+ if (pl->phydev)
+ return -EBUSY;
+
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
if (ret)
return ret;
@@ -670,14 +747,29 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
-int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
+/**
+ * phylink_of_phy_connect() - connect the PHY specified in the DT mode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @dn: a pointer to a &struct device_node.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified in the device node @dn to the phylink instance
+ * specified by @pl. Actions specified in phylink_connect_phy() will be
+ * performed.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
+ u32 flags)
{
struct device_node *phy_node;
struct phy_device *phy_dev;
int ret;
- /* Fixed links are handled without needing a PHY */
- if (pl->link_an_mode == MLO_AN_FIXED)
+ /* Fixed links and 802.3z are handled without needing a PHY */
+ if (pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
phy_node = of_parse_phandle(dn, "phy-handle", 0);
@@ -687,14 +779,13 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
phy_node = of_parse_phandle(dn, "phy-device", 0);
if (!phy_node) {
- if (pl->link_an_mode == MLO_AN_PHY) {
- netdev_err(pl->netdev, "unable to find PHY node\n");
+ if (pl->link_an_mode == MLO_AN_PHY)
return -ENODEV;
- }
return 0;
}
- phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface);
+ phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
+ pl->link_interface);
/* We're done with the phy_node handle */
of_node_put(phy_node);
@@ -709,11 +800,18 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
}
EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+/**
+ * phylink_disconnect_phy() - disconnect any PHY attached to the phylink
+ * instance.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disconnect any current PHY from the phylink instance described by @pl.
+ */
void phylink_disconnect_phy(struct phylink *pl)
{
struct phy_device *phy;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
phy = pl->phydev;
if (phy) {
@@ -730,6 +828,40 @@ void phylink_disconnect_phy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
+/**
+ * phylink_fixed_state_cb() - allow setting a fixed link callback
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @cb: callback to execute to determine the fixed link state.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * can be determined through e.g: an out of band MMIO register.
+ */
+int phylink_fixed_state_cb(struct phylink *pl,
+ void (*cb)(struct net_device *dev,
+ struct phylink_link_state *state))
+{
+ /* It does not make sense to let the link be overriden unless we use
+ * MLO_AN_FIXED
+ */
+ if (pl->link_an_mode != MLO_AN_FIXED)
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+ pl->get_fixed_state = cb;
+ mutex_unlock(&pl->state_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
+
+/**
+ * phylink_mac_change() - notify phylink of a change in MAC state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @up: indicates whether the link is currently up.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * changes (eg, link failure, new negotiation results, etc.)
+ */
void phylink_mac_change(struct phylink *pl, bool up)
{
if (!up)
@@ -739,9 +871,17 @@ void phylink_mac_change(struct phylink *pl, bool up)
}
EXPORT_SYMBOL_GPL(phylink_mac_change);
+/**
+ * phylink_start() - start a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Start the phylink instance specified by @pl, configuring the MAC for the
+ * desired link mode(s) and negotiation style. This should be called from the
+ * network device driver's &struct net_device_ops ndo_open() method.
+ */
void phylink_start(struct phylink *pl)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
phylink_an_mode_str(pl->link_an_mode),
@@ -754,6 +894,12 @@ void phylink_start(struct phylink *pl)
phylink_resolve_flow(pl, &pl->link_config);
phylink_mac_config(pl, &pl->link_config);
+ /* Restart autonegotiation if using 802.3z to ensure that the link
+ * parameters are properly negotiated. This is necessary for DSA
+ * switches using 802.3z negotiation to ensure they see our modes.
+ */
+ phylink_mac_an_restart(pl);
+
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
@@ -764,9 +910,18 @@ void phylink_start(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_start);
+/**
+ * phylink_stop() - stop a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Stop the phylink instance specified by @pl. This should be called from the
+ * network device driver's &struct net_device_ops ndo_stop() method. The
+ * network device's carrier state should not be changed prior to calling this
+ * function.
+ */
void phylink_stop(struct phylink *pl)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
phy_stop(pl->phydev);
@@ -779,9 +934,18 @@ void phylink_stop(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_stop);
+/**
+ * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
+ *
+ * Read the wake on lan parameters from the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is currently attached, report no
+ * support for wake on lan.
+ */
void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
wol->supported = 0;
wol->wolopts = 0;
@@ -791,11 +955,22 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
+/**
+ * phylink_ethtool_set_wol() - set wake on lan parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters
+ *
+ * Set the wake on lan parameters for the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP
+ * error.
+ *
+ * Returns zero on success or negative errno code.
+ */
int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_set_wol(pl->phydev, wol);
@@ -826,12 +1001,21 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
AUTONEG_DISABLE;
}
+/**
+ * phylink_ethtool_ksettings_get() - get the current link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings
+ *
+ * Read the current link settings for the phylink instance specified by @pl.
+ * This will be the link settings read from the MAC, PHY or fixed link
+ * settings depending on the current negotiation mode.
+ */
int phylink_ethtool_ksettings_get(struct phylink *pl,
struct ethtool_link_ksettings *kset)
{
struct phylink_link_state link_state;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev) {
phy_ethtool_ksettings_get(pl->phydev, kset);
@@ -851,14 +1035,13 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
phylink_get_ksettings(&link_state, kset);
break;
- case MLO_AN_SGMII:
+ case MLO_AN_INBAND:
/* If there is a phy attached, then use the reported
* settings from the phy with no modification.
*/
if (pl->phydev)
break;
- case MLO_AN_8023Z:
phylink_get_mac_state(pl, &link_state);
/* The MAC is reporting the link results from its own PCS
@@ -873,6 +1056,11 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+/**
+ * phylink_ethtool_ksettings_set() - set the link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes
+ */
int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
@@ -880,7 +1068,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
struct phylink_link_state config;
int ret;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (kset->base.autoneg != AUTONEG_DISABLE &&
kset->base.autoneg != AUTONEG_ENABLE)
@@ -967,11 +1155,22 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set);
+/**
+ * phylink_ethtool_nway_reset() - restart negotiation
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Restart negotiation for the phylink instance specified by @pl. This will
+ * cause any attached phy to restart negotiation with the link partner, and
+ * if the MAC is in a BaseX mode, the MAC will also be requested to restart
+ * negotiation.
+ *
+ * Returns zero on success, or negative error code.
+ */
int phylink_ethtool_nway_reset(struct phylink *pl)
{
int ret = 0;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_restart_aneg(pl->phydev);
@@ -981,10 +1180,15 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset);
+/**
+ * phylink_ethtool_get_pauseparam() - get the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
void phylink_ethtool_get_pauseparam(struct phylink *pl,
struct ethtool_pauseparam *pause)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN);
pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX);
@@ -992,12 +1196,17 @@ void phylink_ethtool_get_pauseparam(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam);
+/**
+ * phylink_ethtool_set_pauseparam() - set the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
int phylink_ethtool_set_pauseparam(struct phylink *pl,
struct ethtool_pauseparam *pause)
{
struct phylink_link_state *config = &pl->link_config;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (!phylink_test(pl->supported, Pause) &&
!phylink_test(pl->supported, Asym_Pause))
@@ -1030,8 +1239,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
phylink_mac_config(pl, config);
break;
- case MLO_AN_SGMII:
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
phylink_mac_config(pl, config);
phylink_mac_an_restart(pl);
break;
@@ -1070,24 +1278,21 @@ int phylink_ethtool_get_module_eeprom(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom);
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
- int ret = -EPROTONOSUPPORT;
-
- WARN_ON(!lockdep_rtnl_is_held());
-
- if (pl->phydev)
- ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
+/**
+ * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ * counter
+ * @pl: a pointer to a &struct phylink returned from phylink_create().
+ *
+ * Read the Energy Efficient Ethernet error counter from the PHY associated
+ * with the phylink instance specified by @pl.
+ *
+ * Returns positive error counter value, or negative error code.
+ */
int phylink_get_eee_err(struct phylink *pl)
{
int ret = 0;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_get_eee_err(pl->phydev);
@@ -1096,11 +1301,16 @@ int phylink_get_eee_err(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
+/**
+ * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ */
int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_get_eee(pl->phydev, eee);
@@ -1109,11 +1319,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
+/**
+ * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ */
int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_set_eee(pl->phydev, eee);
@@ -1248,9 +1463,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_PHY:
return -EOPNOTSUPP;
- case MLO_AN_SGMII:
- /* No phy, fall through to 8023z method */
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
if (phy_id == 0) {
val = phylink_get_mac_state(pl, &state);
if (val < 0)
@@ -1275,27 +1488,44 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
case MLO_AN_PHY:
return -EOPNOTSUPP;
- case MLO_AN_SGMII:
- /* No phy, fall through to 8023z method */
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
break;
}
return 0;
}
+/**
+ * phylink_mii_ioctl() - generic mii ioctl interface
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @ifr: a pointer to a &struct ifreq for socket ioctls
+ * @cmd: ioctl cmd to execute
+ *
+ * Perform the specified MII ioctl on the PHY attached to the phylink instance
+ * specified by @pl. If no PHY is attached, emulate the presence of the PHY.
+ *
+ * Returns: zero on success or negative error code.
+ *
+ * %SIOCGMIIPHY:
+ * read register from the current PHY.
+ * %SIOCGMIIREG:
+ * read register from the specified PHY.
+ * %SIOCSMIIREG:
+ * set a register on the specified PHY.
+ */
int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii = if_mii(ifr);
int ret;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev) {
- /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */
+ /* PHYs only exist for MLO_AN_PHY and SGMII */
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1318,6 +1548,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1349,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream,
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
struct phylink_link_state config;
phy_interface_t iface;
- int mode, ret = 0;
+ int ret = 0;
bool changed;
u8 port;
@@ -1357,14 +1588,13 @@ static int phylink_sfp_module_insert(void *upstream,
port = sfp_parse_port(pl->sfp_bus, id, support);
iface = sfp_parse_interface(pl->sfp_bus, id);
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
- mode = MLO_AN_SGMII;
- break;
case PHY_INTERFACE_MODE_1000BASEX:
- mode = MLO_AN_8023Z;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GKR:
break;
default:
return -EINVAL;
@@ -1382,16 +1612,18 @@ static int phylink_sfp_module_insert(void *upstream,
ret = phylink_validate(pl, support, &config);
if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
- if (mode == MLO_AN_8023Z && pl->phydev)
+ if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
changed = !bitmap_equal(pl->supported, support,
@@ -1401,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream,
linkmode_copy(pl->link_config.advertising, config.advertising);
}
- if (pl->link_an_mode != mode ||
+ if (pl->link_an_mode != MLO_AN_INBAND ||
pl->link_config.interface != config.interface) {
pl->link_config.interface = config.interface;
- pl->link_an_mode = mode;
+ pl->link_an_mode = MLO_AN_INBAND;
changed = true;
netdev_info(pl->netdev, "switched to %s/%s link mode\n",
- phylink_an_mode_str(mode),
+ phylink_an_mode_str(MLO_AN_INBAND),
phy_modes(config.interface));
}
@@ -1426,19 +1658,18 @@ static void phylink_sfp_link_down(void *upstream)
{
struct phylink *pl = upstream;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+ queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
-
- netif_carrier_off(pl->netdev);
}
static void phylink_sfp_link_up(void *upstream)
{
struct phylink *pl = upstream;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
phylink_run_resolve(pl);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index dbef8002bc28..889a4dce1648 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -118,8 +118,6 @@ static struct phy_driver qs6612_driver[] = { {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = qs6612_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
} };
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index eda0a6e86918..ee3ca4a2f12b 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -13,29 +13,44 @@
* option) any later version.
*
*/
+#include <linux/bitops.h>
#include <linux/phy.h>
#include <linux/module.h>
-#define RTL821x_PHYSR 0x11
-#define RTL821x_PHYSR_DUPLEX 0x2000
-#define RTL821x_PHYSR_SPEED 0xc000
-#define RTL821x_INER 0x12
-#define RTL821x_INER_INIT 0x6400
-#define RTL821x_INSR 0x13
-#define RTL821x_PAGE_SELECT 0x1f
-#define RTL8211E_INER_LINK_STATUS 0x400
+#define RTL821x_PHYSR 0x11
+#define RTL821x_PHYSR_DUPLEX BIT(13)
+#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
-#define RTL8211F_INER_LINK_STATUS 0x0010
-#define RTL8211F_INSR 0x1d
-#define RTL8211F_TX_DELAY 0x100
+#define RTL821x_INER 0x12
+#define RTL8211B_INER_INIT 0x6400
+#define RTL8211E_INER_LINK_STATUS BIT(10)
+#define RTL8211F_INER_LINK_STATUS BIT(4)
-#define RTL8201F_ISR 0x1e
-#define RTL8201F_IER 0x13
+#define RTL821x_INSR 0x13
+
+#define RTL821x_PAGE_SELECT 0x1f
+
+#define RTL8211F_INSR 0x1d
+
+#define RTL8211F_TX_DELAY BIT(8)
+
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_IER 0x13
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
+static int rtl821x_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, RTL821x_PAGE_SELECT);
+}
+
+static int rtl821x_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -58,31 +73,21 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43);
- err = phy_read(phydev, RTL8211F_INSR);
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
static int rtl8201_config_intr(struct phy_device *phydev)
{
- int err;
-
- /* switch to page 7 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x7);
+ u16 val;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, RTL8201F_IER,
- BIT(13) | BIT(12) | BIT(11));
+ val = BIT(13) | BIT(12) | BIT(11);
else
- err = phy_write(phydev, RTL8201F_IER, 0);
-
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ val = 0;
- return err;
+ return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
}
static int rtl8211b_config_intr(struct phy_device *phydev)
@@ -91,7 +96,7 @@ static int rtl8211b_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL821x_INER_INIT);
+ RTL8211B_INER_INIT);
else
err = phy_write(phydev, RTL821x_INER, 0);
@@ -113,43 +118,31 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
- int err;
+ u16 val;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, RTL821x_INER,
- RTL8211F_INER_LINK_STATUS);
+ val = RTL8211F_INER_LINK_STATUS;
else
- err = phy_write(phydev, RTL821x_INER, 0);
- phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+ val = 0;
- return err;
+ return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
}
static int rtl8211f_config_init(struct phy_device *phydev)
{
int ret;
- u16 reg;
+ u16 val = 0;
ret = genphy_config_init(phydev);
if (ret < 0)
return ret;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08);
- reg = phy_read(phydev, 0x11);
-
/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- reg |= RTL8211F_TX_DELAY;
- else
- reg &= ~RTL8211F_TX_DELAY;
-
- phy_write(phydev, 0x11, reg);
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ val = RTL8211F_TX_DELAY;
- return 0;
+ return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
}
static struct phy_driver realtek_drvs[] = {
@@ -159,28 +152,24 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x0000ffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
}, {
.phy_id = 0x001cc816,
.name = "RTL8201F 10/100Mbps Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
.phy_id = 0x001cc912,
.name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
}, {
@@ -189,8 +178,6 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -201,8 +188,6 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -213,13 +198,13 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
.config_init = &rtl8211f_config_init,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
},
};
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index c092af137056..f1da70b9b55f 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -213,7 +213,6 @@ static struct phy_driver rockchip_phy_driver[] = {
.soft_reset = genphy_soft_reset,
.config_init = rockchip_integrated_phy_config_init,
.config_aneg = rockchip_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = rockchip_phy_resume,
},
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..8961209ee949 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -8,10 +8,14 @@
#include "sfp.h"
+/**
+ * struct sfp_bus - internal representation of a sfp bus
+ */
struct sfp_bus {
+ /* private: */
struct kref kref;
struct list_head node;
- struct device_node *device_node;
+ struct fwnode_handle *fwnode;
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
@@ -26,6 +30,20 @@ struct sfp_bus {
bool started;
};
+/**
+ * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: optional pointer to an array of unsigned long for the
+ * ethtool support mask
+ *
+ * Parse the EEPROM identification given in @id, and return one of
+ * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
+ * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
+ * the connector type.
+ *
+ * If the port type is not known, returns %PORT_OTHER.
+ */
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
@@ -39,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFP_CONNECTOR_MT_RJ:
case SFP_CONNECTOR_MU:
case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- if (support)
- phylink_set(support, FIBRE);
port = PORT_FIBRE;
break;
case SFP_CONNECTOR_RJ45:
- if (support)
- phylink_set(support, TP);
port = PORT_TP;
break;
+ case SFP_CONNECTOR_COPPER_PIGTAIL:
+ port = PORT_DA;
+ break;
+
case SFP_CONNECTOR_UNSPEC:
if (id->base.e1000_base_t) {
- if (support)
- phylink_set(support, TP);
port = PORT_TP;
break;
}
@@ -62,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFP_CONNECTOR_MPO_1X12:
case SFP_CONNECTOR_MPO_2X16:
case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_COPPER_PIGTAIL:
case SFP_CONNECTOR_NOSEPARATE:
case SFP_CONNECTOR_MXC_2X16:
port = PORT_OTHER;
@@ -74,10 +89,40 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
}
+ if (support) {
+ switch (port) {
+ case PORT_FIBRE:
+ phylink_set(support, FIBRE);
+ break;
+
+ case PORT_TP:
+ phylink_set(support, TP);
+ break;
+ }
+ }
+
return port;
}
EXPORT_SYMBOL_GPL(sfp_parse_port);
+/**
+ * sfp_parse_interface() - Parse the phy_interface_t
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Derive the phy_interface_t mode for the information found in the
+ * module's identifying EEPROM. There is no standard or defined way
+ * to derive this information, so we use some heuristics.
+ *
+ * If the encoding is 64b66b, then the module must be >= 10G, so
+ * return %PHY_INTERFACE_MODE_10GKR.
+ *
+ * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
+ * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
+ * %PHY_INTERFACE_MODE_SGMII mode.
+ *
+ * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
+ */
phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
const struct sfp_eeprom_id *id)
{
@@ -107,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
break;
default:
+ if (id->base.e1000_base_cx) {
+ iface = PHY_INTERFACE_MODE_1000BASEX;
+ break;
+ }
+
iface = PHY_INTERFACE_MODE_NA;
dev_err(bus->sfp_dev,
"SFP module encoding does not support 8b10b nor 64b66b\n");
@@ -117,13 +167,38 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
}
EXPORT_SYMBOL_GPL(sfp_parse_interface);
+/**
+ * sfp_parse_support() - Parse the eeprom id for supported link modes
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: pointer to an array of unsigned long for the ethtool support mask
+ *
+ * Parse the EEPROM identification information and derive the supported
+ * ethtool link modes for the module.
+ */
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
+ unsigned int br_min, br_nom, br_max;
+
phylink_set(support, Autoneg);
phylink_set(support, Pause);
phylink_set(support, Asym_Pause);
+ /* Decode the bitrate information to MBd */
+ br_min = br_nom = br_max = 0;
+ if (id->base.br_nominal) {
+ if (id->base.br_nominal != 255) {
+ br_nom = id->base.br_nominal * 100;
+ br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+ br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+ } else if (id->ext.br_max) {
+ br_nom = 250 * id->ext.br_max;
+ br_max = br_nom + br_nom * id->ext.br_min / 100;
+ br_min = br_nom - br_nom * id->ext.br_min / 100;
+ }
+ }
+
/* Set ethtool support from the compliance fields. */
if (id->base.e10g_base_sr)
phylink_set(support, 10000baseSR_Full);
@@ -142,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(support, 1000baseT_Full);
}
+ /* 1000Base-PX or 1000Base-BX10 */
+ if ((id->base.e_base_px || id->base.e_base_bx10) &&
+ br_min <= 1300 && br_max >= 1200)
+ phylink_set(support, 1000baseX_Full);
+
+ /* For active or passive cables, select the link modes
+ * based on the bit rates and the cable compliance bytes.
+ */
+ if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+ /* This may look odd, but some manufacturers use 12000MBd */
+ if (br_min <= 12000 && br_max >= 10300)
+ phylink_set(support, 10000baseCR_Full);
+ if (br_min <= 3200 && br_max >= 3100)
+ phylink_set(support, 2500baseX_Full);
+ if (br_min <= 1300 && br_max >= 1200)
+ phylink_set(support, 1000baseX_Full);
+ }
+ if (id->base.sfp_ct_passive) {
+ if (id->base.passive.sff8431_app_e)
+ phylink_set(support, 10000baseCR_Full);
+ }
+ if (id->base.sfp_ct_active) {
+ if (id->base.active.sff8431_app_e ||
+ id->base.active.sff8431_lim) {
+ phylink_set(support, 10000baseCR_Full);
+ }
+ }
+
switch (id->base.extended_cc) {
case 0x00: /* Unspecified */
break;
@@ -175,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.br_nominal >= 12)
phylink_set(support, 1000baseX_Full);
}
-
- switch (id->base.connector) {
- case SFP_CONNECTOR_SC:
- case SFP_CONNECTOR_FIBERJACK:
- case SFP_CONNECTOR_LC:
- case SFP_CONNECTOR_MT_RJ:
- case SFP_CONNECTOR_MU:
- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- break;
-
- case SFP_CONNECTOR_UNSPEC:
- if (id->base.e1000_base_t)
- break;
-
- case SFP_CONNECTOR_SG: /* guess */
- case SFP_CONNECTOR_MPO_1X12:
- case SFP_CONNECTOR_MPO_2X16:
- case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_COPPER_PIGTAIL:
- case SFP_CONNECTOR_NOSEPARATE:
- case SFP_CONNECTOR_MXC_2X16:
- default:
- /* a guess at the supported link modes */
- dev_warn(bus->sfp_dev,
- "Guessing link modes, please report...\n");
- phylink_set(support, 1000baseT_Half);
- phylink_set(support, 1000baseT_Full);
- break;
- }
}
EXPORT_SYMBOL_GPL(sfp_parse_support);
@@ -215,7 +289,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus)
return bus->registered ? bus->upstream_ops : NULL;
}
-static struct sfp_bus *sfp_bus_get(struct device_node *np)
+static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
{
struct sfp_bus *sfp, *new, *found = NULL;
@@ -224,7 +298,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
mutex_lock(&sfp_mutex);
list_for_each_entry(sfp, &sfp_buses, node) {
- if (sfp->device_node == np) {
+ if (sfp->fwnode == fwnode) {
kref_get(&sfp->kref);
found = sfp;
break;
@@ -233,7 +307,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
if (!found && new) {
kref_init(&new->kref);
- new->device_node = np;
+ new->fwnode = fwnode;
list_add(&new->node, &sfp_buses);
found = new;
new = NULL;
@@ -246,7 +320,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
return found;
}
-static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex)
+static void sfp_bus_release(struct kref *kref)
{
struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref);
@@ -293,6 +367,16 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->registered = false;
}
+/**
+ * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @modinfo: a &struct ethtool_modinfo
+ *
+ * Fill in the type and eeprom_len parameters in @modinfo for a module on
+ * the sfp bus specified by @bus.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
{
if (!bus->registered)
@@ -301,6 +385,17 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
}
EXPORT_SYMBOL_GPL(sfp_get_module_info);
+/**
+ * sfp_get_module_eeprom() - Read the SFP module EEPROM
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @ee: a &struct ethtool_eeprom
+ * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes)
+ *
+ * Read the EEPROM as specified by the supplied @ee. See the documentation
+ * for &struct ethtool_eeprom for the region to be read.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data)
{
@@ -310,6 +405,15 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
}
EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
+/**
+ * sfp_upstream_start() - Inform the SFP that the network device is up
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be enabled by allowing TX_DISABLE to be deasserted. This
+ * should be called from the network device driver's &struct net_device_ops
+ * ndo_open() method.
+ */
void sfp_upstream_start(struct sfp_bus *bus)
{
if (bus->registered)
@@ -318,6 +422,15 @@ void sfp_upstream_start(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_start);
+/**
+ * sfp_upstream_stop() - Inform the SFP that the network device is down
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be disabled by asserting TX_DISABLE, disabling the laser
+ * in optical modules. This should be called from the network device
+ * driver's &struct net_device_ops ndo_stop() method.
+ */
void sfp_upstream_stop(struct sfp_bus *bus)
{
if (bus->registered)
@@ -326,11 +439,24 @@ void sfp_upstream_stop(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_stop);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+/**
+ * sfp_register_upstream() - Register the neighbouring device
+ * @fwnode: firmware node for the SFP bus
+ * @ndev: network device associated with the interface
+ * @upstream: the upstream private data
+ * @ops: the upstream's &struct sfp_upstream_ops
+ *
+ * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
+ * should use phylink, which will call this function for them. Returns
+ * a pointer to the allocated &struct sfp_bus.
+ *
+ * On error, returns %NULL.
+ */
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(np);
+ struct sfp_bus *bus = sfp_bus_get(fwnode);
int ret = 0;
if (bus) {
@@ -353,10 +479,18 @@ struct sfp_bus *sfp_register_upstream(struct device_node *np,
}
EXPORT_SYMBOL_GPL(sfp_register_upstream);
+/**
+ * sfp_unregister_upstream() - Unregister sfp bus
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Unregister a previously registered upstream connection for the SFP
+ * module. @bus is returned from sfp_register_upstream().
+ */
void sfp_unregister_upstream(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
bus->upstream = NULL;
bus->netdev = NULL;
rtnl_unlock();
@@ -433,7 +567,7 @@ EXPORT_SYMBOL_GPL(sfp_module_remove);
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(dev->of_node);
+ struct sfp_bus *bus = sfp_bus_get(dev->fwnode);
int ret = 0;
if (bus) {
@@ -459,7 +593,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->netdev)
+ sfp_unregister_bus(bus);
bus->sfp_dev = NULL;
bus->sfp = NULL;
bus->socket_ops = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9dfc1c4c954f..6c7d9289078d 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -98,12 +98,18 @@ static const enum gpiod_flags gpio_flags[] = {
static DEFINE_MUTEX(sfp_mutex);
+struct sff_data {
+ unsigned int gpios;
+ bool (*module_supported)(const struct sfp_eeprom_id *id);
+};
+
struct sfp {
struct device *dev;
struct i2c_adapter *i2c;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
struct phy_device *mod_phy;
+ const struct sff_data *type;
unsigned int (*get_state)(struct sfp *);
void (*set_state)(struct sfp *, unsigned int);
@@ -123,6 +129,36 @@ struct sfp {
struct sfp_eeprom_id id;
};
+static bool sff_module_supported(const struct sfp_eeprom_id *id)
+{
+ return id->base.phys_id == SFP_PHYS_ID_SFF &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sff_data = {
+ .gpios = SFP_F_LOS | SFP_F_TX_FAULT | SFP_F_TX_DISABLE,
+ .module_supported = sff_module_supported,
+};
+
+static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+{
+ return id->base.phys_id == SFP_PHYS_ID_SFP &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sfp_data = {
+ .gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT |
+ SFP_F_TX_DISABLE | SFP_F_RATE_SELECT,
+ .module_supported = sfp_module_supported,
+};
+
+static const struct of_device_id sfp_of_match[] = {
+ { .compatible = "sff,sff", .data = &sff_data, },
+ { .compatible = "sff,sfp", .data = &sfp_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sfp_of_match);
+
static unsigned long poll_jiffies;
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -141,6 +177,11 @@ static unsigned int sfp_gpio_get_state(struct sfp *sfp)
return state;
}
+static unsigned int sff_gpio_get_state(struct sfp *sfp)
+{
+ return sfp_gpio_get_state(sfp) | SFP_F_PRESENT;
+}
+
static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
{
if (state & SFP_F_PRESENT) {
@@ -315,12 +356,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
msleep(T_PHY_RESET_MS);
phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
- if (IS_ERR(phy)) {
- dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ if (phy == ERR_PTR(-ENODEV)) {
+ dev_info(sfp->dev, "no PHY detected\n");
return;
}
- if (!phy) {
- dev_info(sfp->dev, "no PHY detected\n");
+ if (IS_ERR(phy)) {
+ dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
return;
}
@@ -425,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
- char vendor[17];
- char part[17];
- char sn[17];
- char date[9];
- char rev[5];
u8 check;
int err;
@@ -465,24 +501,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
sfp->id = id;
- memcpy(vendor, sfp->id.base.vendor_name, 16);
- vendor[16] = '\0';
- memcpy(part, sfp->id.base.vendor_pn, 16);
- part[16] = '\0';
- memcpy(rev, sfp->id.base.vendor_rev, 4);
- rev[4] = '\0';
- memcpy(sn, sfp->id.ext.vendor_sn, 16);
- sn[16] = '\0';
- memcpy(date, sfp->id.ext.datecode, 8);
- date[8] = '\0';
-
- dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
- vendor, part, rev, sn, date);
-
- /* We only support SFP modules, not the legacy GBIC modules. */
- if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP ||
- sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) {
- dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n",
+ dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
+ (int)sizeof(id.base.vendor_name), id.base.vendor_name,
+ (int)sizeof(id.base.vendor_pn), id.base.vendor_pn,
+ (int)sizeof(id.base.vendor_rev), id.base.vendor_rev,
+ (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn,
+ (int)sizeof(id.ext.datecode), id.ext.datecode);
+
+ /* Check whether we support this module */
+ if (!sfp->type->module_supported(&sfp->id)) {
+ dev_err(sfp->dev,
+ "module is not supported - phys id 0x%02x 0x%02x\n",
sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
return -EINVAL;
}
@@ -683,20 +712,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN);
len -= first;
- ret = sfp->read(sfp, false, first, data, len);
+ ret = sfp_read(sfp, false, first, data, len);
if (ret < 0)
return ret;
first += len;
data += len;
}
- if (first >= ETH_MODULE_SFF_8079_LEN &&
- first < ETH_MODULE_SFF_8472_LEN) {
+ if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN);
len -= first;
first -= ETH_MODULE_SFF_8079_LEN;
- ret = sfp->read(sfp, true, first, data, len);
+ ret = sfp_read(sfp, true, first, data, len);
if (ret < 0)
return ret;
}
@@ -801,6 +829,7 @@ static void sfp_cleanup(void *data)
static int sfp_probe(struct platform_device *pdev)
{
+ const struct sff_data *sff;
struct sfp *sfp;
bool poll = false;
int irq, err, i;
@@ -815,10 +844,19 @@ static int sfp_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ sff = sfp->type = &sfp_data;
+
if (pdev->dev.of_node) {
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *id;
struct device_node *np;
+ id = of_match_node(sfp_of_match, node);
+ if (WARN_ON(!id))
+ return -EINVAL;
+
+ sff = sfp->type = id->data;
+
np = of_parse_phandle(node, "i2c-bus", 0);
if (np) {
struct i2c_adapter *i2c;
@@ -834,17 +872,22 @@ static int sfp_probe(struct platform_device *pdev)
return err;
}
}
+ }
- for (i = 0; i < GPIO_MAX; i++) {
+ for (i = 0; i < GPIO_MAX; i++)
+ if (sff->gpios & BIT(i)) {
sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
gpio_of_names[i], gpio_flags[i]);
if (IS_ERR(sfp->gpio[i]))
return PTR_ERR(sfp->gpio[i]);
}
- sfp->get_state = sfp_gpio_get_state;
- sfp->set_state = sfp_gpio_set_state;
- }
+ sfp->get_state = sfp_gpio_get_state;
+ sfp->set_state = sfp_gpio_set_state;
+
+ /* Modules that have no detect signal are always present */
+ if (!(sfp->gpio[GPIO_MODDEF0]))
+ sfp->get_state = sff_gpio_get_state;
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
if (!sfp->sfp_bus)
@@ -899,12 +942,6 @@ static int sfp_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id sfp_of_match[] = {
- { .compatible = "sff,sfp", },
- { },
-};
-MODULE_DEVICE_TABLE(of, sfp_of_match);
-
static struct platform_driver sfp_driver = {
.probe = sfp_probe,
.remove = sfp_remove,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2306bfae057f..be399d645224 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -227,8 +227,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -249,8 +247,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -276,7 +272,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -303,8 +298,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = lan911x_config_init,
/* IRQ related */
@@ -319,12 +312,11 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -351,7 +343,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d00cfb64529e..fbd548a1ad84 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -89,8 +89,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
.suspend = genphy_suspend,
@@ -102,8 +100,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 96b33475ea5e..55f48ee3595a 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -95,7 +95,6 @@ static struct phy_driver upd60620_driver[1] = { {
.features = PHY_BASIC_FEATURES,
.flags = 0,
.config_init = upd60620_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = upd60620_read_status,
} };
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index f78ff0279648..d9dd8fbfffc7 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -267,7 +267,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -278,7 +277,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -289,7 +287,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -300,7 +297,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -311,7 +307,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -321,8 +316,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8601_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -333,7 +326,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -344,8 +336,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -356,8 +346,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
} };
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 1b28e6e702f5..bdc4d23627c5 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -334,7 +334,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747ff4e3..ef6b2126b23a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -531,10 +531,10 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int ppp_poll(struct file *file, poll_table *wait)
+static __poll_t ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
- unsigned int mask;
+ __poll_t mask;
if (!pf)
return 0;
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
-
return 0;
err_unit:
+ mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7196f00f0991..047f6c68a441 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -327,7 +327,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
+ int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
-
- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
- 0, GFP_KERNEL);
+ hlen = LL_RESERVED_SPACE(dev);
+ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+ dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc63102ca96e..8940417c30e5 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -731,7 +731,7 @@ static void sl_sync(void)
/* Find a free SLIP channel, and link in this `tty' line. */
-static struct slip *sl_alloc(dev_t line)
+static struct slip *sl_alloc(void)
{
int i;
char name[IFNAMSIZ];
@@ -809,7 +809,7 @@ static int slip_open(struct tty_struct *tty)
/* OK. Find a free SLIP channel to use. */
err = -ENFILE;
- sl = sl_alloc(tty_devnum(tty));
+ sl = sl_alloc();
if (sl == NULL)
goto err_exit;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 0a886fda0129..0a5ed004781c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,9 +330,6 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
if (!q)
return RX_HANDLER_PASS;
- if (__skb_array_full(&q->skb_array))
- goto drop;
-
skb_push(skb, ETH_HLEN);
/* Apply the forward feature mask so that we perform segmentation
@@ -348,7 +345,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
goto drop;
if (!segs) {
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
goto wake_up;
}
@@ -358,7 +355,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (skb_array_produce(&q->skb_array, segs)) {
+ if (ptr_ring_produce(&q->ring, segs)) {
kfree_skb(segs);
kfree_skb_list(nskb);
break;
@@ -375,7 +372,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
!(features & NETIF_F_CSUM_MASK) &&
skb_checksum_help(skb))
goto drop;
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
}
@@ -497,7 +494,7 @@ static void tap_sock_destruct(struct sock *sk)
{
struct tap_queue *q = container_of(sk, struct tap_queue, sk);
- skb_array_cleanup(&q->skb_array);
+ ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
}
static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +514,7 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
@@ -546,7 +543,7 @@ static int tap_open(struct inode *inode, struct file *file)
err = tap_set_queue(tap, file, q);
if (err) {
- /* tap_sock_destruct() will take care of freeing skb_array */
+ /* tap_sock_destruct() will take care of freeing ptr_ring */
goto err_put;
}
@@ -572,10 +569,10 @@ static int tap_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int tap_poll(struct file *file, poll_table *wait)
+static __poll_t tap_poll(struct file *file, poll_table *wait)
{
struct tap_queue *q = file->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (!q)
goto out;
@@ -583,7 +580,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait)
mask = 0;
poll_wait(file, &q->wq.wait, wait);
- if (!skb_array_empty(&q->skb_array))
+ if (!ptr_ring_empty(&q->ring))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(&q->sk) ||
@@ -844,7 +841,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
- skb = skb_array_consume(&q->skb_array);
+ skb = ptr_ring_consume(&q->ring);
if (skb)
break;
if (noblock) {
@@ -1176,7 +1173,7 @@ static int tap_peek_len(struct socket *sock)
{
struct tap_queue *q = container_of(sock, struct tap_queue,
sock);
- return skb_array_peek_len(&q->skb_array);
+ return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
}
/* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1199,7 @@ struct socket *tap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tap_get_socket);
-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
{
struct tap_queue *q;
@@ -1211,29 +1208,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
q = file->private_data;
if (!q)
return ERR_PTR(-EBADFD);
- return &q->skb_array;
+ return &q->ring;
}
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
struct tap_queue *q;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tap->numqueues;
int ret, i = 0;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
list_for_each_entry(q, &tap->queue_list, next)
- arrays[i++] = &q->skb_array;
+ rings[i++] = &q->ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
- kfree(arrays);
+ kfree(rings);
return ret;
}
EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4f4a842a1c9c..0dc66e4fbb2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,8 @@ struct tun_file {
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
- struct skb_array tx_array;
+ struct ptr_ring tx_ring;
+ struct xdp_rxq_info xdp_rxq;
};
struct tun_flow_entry {
@@ -195,6 +196,11 @@ struct tun_flow_entry {
#define TUN_NUM_FLOW_ENTRIES 1024
+struct tun_prog {
+ struct rcu_head rcu;
+ struct bpf_prog *prog;
+};
+
/* Since the socket were moved to tun_file, to preserve the behavior of persist
* device, socket filter, sndbuf and vnet header size were restore when the
* file were attached to a persist device.
@@ -232,8 +238,33 @@ struct tun_struct {
u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
struct bpf_prog __rcu *xdp_prog;
+ struct tun_prog __rcu *steering_prog;
+ struct tun_prog __rcu *filter_prog;
};
+struct veth {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+bool tun_is_xdp_buff(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -537,15 +568,12 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
- struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
u32 txq = 0;
u32 numqueues = 0;
- rcu_read_lock();
numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
@@ -563,10 +591,37 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= numqueues;
}
- rcu_read_unlock();
return txq;
}
+static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+{
+ struct tun_prog *prog;
+ u16 ret = 0;
+
+ prog = rcu_dereference(tun->steering_prog);
+ if (prog)
+ ret = bpf_prog_run_clear_cb(prog->prog, skb);
+
+ return ret % tun->numqueues;
+}
+
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ u16 ret;
+
+ rcu_read_lock();
+ if (rcu_dereference(tun->steering_prog))
+ ret = tun_ebpf_select_queue(tun, skb);
+ else
+ ret = tun_automq_select_queue(tun, skb);
+ rcu_read_unlock();
+
+ return ret;
+}
+
static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
@@ -600,17 +655,39 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
+static void tun_ptr_free(void *ptr)
+{
+ if (!ptr)
+ return;
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ __skb_array_destroy_skb(ptr);
+ }
+}
+
static void tun_queue_purge(struct tun_file *tfile)
{
- struct sk_buff *skb;
+ void *ptr;
- while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
- kfree_skb(skb);
+ while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+ tun_ptr_free(ptr);
skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
+static void tun_cleanup_tx_ring(struct tun_file *tfile)
+{
+ if (tfile->tx_ring.queue) {
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+ }
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -657,8 +734,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- if (tun)
- skb_array_cleanup(&tfile->tx_array);
+ tun_cleanup_tx_ring(tfile);
sock_put(&tfile->sk);
}
}
@@ -673,7 +749,6 @@ static void tun_detach(struct tun_file *tfile, bool clean)
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
@@ -700,17 +775,16 @@ static void tun_detach_all(struct net_device *dev)
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
- if (xdp_prog)
- bpf_prog_put(xdp_prog);
-
if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
@@ -751,13 +825,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}
if (!tfile->detached &&
- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+
+ if (tfile->detached) {
+ /* Re-attach detached tfile, updating XDP queue_index */
+ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
+
+ if (tfile->xdp_rxq.queue_index != tfile->queue_index)
+ tfile->xdp_rxq.queue_index = tfile->queue_index;
+ } else {
+ /* Setup XDP RX-queue info, for new tfile getting attached */
+ err = xdp_rxq_info_reg(&tfile->xdp_rxq,
+ tun->dev, tfile->queue_index);
+ if (err < 0)
+ goto out;
+ err = 0;
+ }
+
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
@@ -937,23 +1027,10 @@ static int tun_net_close(struct net_device *dev)
}
/* Net device start xmit */
-static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{
- struct tun_struct *tun = netdev_priv(dev);
- int txq = skb->queue_mapping;
- struct tun_file *tfile;
- u32 numqueues = 0;
-
- rcu_read_lock();
- tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = READ_ONCE(tun->numqueues);
-
- /* Drop packet if interface is not attached */
- if (txq >= numqueues)
- goto drop;
-
#ifdef CONFIG_RPS
- if (numqueues == 1 && static_key_false(&rps_needed)) {
+ if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -969,6 +1046,37 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#endif
+}
+
+static unsigned int run_ebpf_filter(struct tun_struct *tun,
+ struct sk_buff *skb,
+ int len)
+{
+ struct tun_prog *prog = rcu_dereference(tun->filter_prog);
+
+ if (prog)
+ len = bpf_prog_run_clear_cb(prog->prog, skb);
+
+ return len;
+}
+
+/* Net device start xmit */
+static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ int txq = skb->queue_mapping;
+ struct tun_file *tfile;
+ int len = skb->len;
+
+ rcu_read_lock();
+ tfile = rcu_dereference(tun->tfiles[txq]);
+
+ /* Drop packet if interface is not attached */
+ if (txq >= tun->numqueues)
+ goto drop;
+
+ if (!rcu_dereference(tun->steering_prog))
+ tun_automq_xmit(tun, skb);
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
@@ -984,6 +1092,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
sk_filter(tfile->socket.sk, skb))
goto drop;
+ len = run_ebpf_filter(tun, skb, len);
+
+ /* Trim extra bytes since we may insert vlan proto & TCI
+ * in tun_put_user().
+ */
+ len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
+ if (len <= 0 || pskb_trim(skb, len))
+ goto drop;
+
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
goto drop;
@@ -996,7 +1113,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
- if (skb_array_produce(&tfile->tx_array, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
/* Notify and wake up reader process */
@@ -1169,6 +1286,67 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
};
+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct xdp_buff *buff = xdp->data_hard_start;
+ int headroom = xdp->data - xdp->data_hard_start;
+ struct tun_file *tfile;
+ u32 numqueues;
+ int ret = 0;
+
+ /* Assure headroom is available and buff is properly aligned */
+ if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+ return -ENOSPC;
+
+ *buff = *xdp;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Encode the XDP flag into lowest bit for consumer to differ
+ * XDP buffer from sk_buff.
+ */
+ if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ ret = -ENOSPC;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
+ u32 numqueues;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues)
+ goto out;
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Notify and wake up reader process */
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -1186,6 +1364,8 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
+ .ndo_xdp_xmit = tun_xdp_xmit,
+ .ndo_xdp_flush = tun_xdp_flush,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1248,12 +1428,12 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
+static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
struct sock *sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!tun)
return POLLERR;
@@ -1264,7 +1444,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_array_empty(&tfile->tx_array))
+ if (!ptr_ring_empty(&tfile->tx_ring))
mask |= POLLIN | POLLRDNORM;
if (tun->dev->flags & IFF_UP &&
@@ -1477,6 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp.data = buf + pad;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &tfile->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1551,7 +1732,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int copylen;
bool zerocopy = false;
int err;
- u32 rxhash;
+ u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tun);
@@ -1739,7 +1920,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_unlock();
}
- rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_lock();
+ if (!rcu_dereference(tun->steering_prog))
+ rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_unlock();
if (frags) {
/* Exercise flow dissector code path. */
@@ -1783,7 +1967,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
- tun_flow_update(tun, rxhash, tfile);
+ if (rxhash)
+ tun_flow_update(tun, rxhash, tfile);
+
return total_len;
}
@@ -1804,6 +1990,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}
+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp,
+ struct iov_iter *iter)
+{
+ int vnet_hdr_sz = 0;
+ size_t size = xdp->data_end - xdp->data;
+ struct tun_pcpu_stats *stats;
+ size_t ret;
+
+ if (tun->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr gso = { 0 };
+
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+ return -EINVAL;
+ if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+ sizeof(gso)))
+ return -EFAULT;
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ }
+
+ ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += ret;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
+
+ return ret;
+}
+
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
@@ -1868,10 +2088,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vlan_hlen) {
int ret;
- struct {
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- } veth;
+ struct veth veth;
veth.h_vlan_proto = skb->vlan_proto;
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
@@ -1901,15 +2118,14 @@ done:
return total;
}
-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
- int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
{
DECLARE_WAITQUEUE(wait, current);
- struct sk_buff *skb = NULL;
+ void *ptr = NULL;
int error = 0;
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
goto out;
if (noblock) {
error = -EAGAIN;
@@ -1920,8 +2136,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
current->state = TASK_INTERRUPTIBLE;
while (1) {
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
@@ -1940,12 +2156,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
out:
*err = error;
- return skb;
+ return ptr;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock, struct sk_buff *skb)
+ int noblock, void *ptr)
{
ssize_t ret;
int err;
@@ -1953,23 +2169,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to)) {
- if (skb)
- kfree_skb(skb);
+ tun_ptr_free(ptr);
return 0;
}
- if (!skb) {
+ if (!ptr) {
/* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
+ ptr = tun_ring_recv(tfile, noblock, &err);
+ if (!ptr)
return err;
}
- ret = tun_put_user(tun, tfile, skb, to);
- if (unlikely(ret < 0))
- kfree_skb(skb);
- else
- consume_skb(skb);
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ ret = tun_put_user_xdp(tun, tfile, xdp, to);
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ struct sk_buff *skb = ptr;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ }
return ret;
}
@@ -1991,6 +2215,39 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}
+static void tun_prog_free(struct rcu_head *rcu)
+{
+ struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
+
+ bpf_prog_destroy(prog->prog);
+ kfree(prog);
+}
+
+static int __tun_set_ebpf(struct tun_struct *tun,
+ struct tun_prog __rcu **prog_p,
+ struct bpf_prog *prog)
+{
+ struct tun_prog *old, *new = NULL;
+
+ if (prog) {
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->prog = prog;
+ }
+
+ spin_lock_bh(&tun->lock);
+ old = rcu_dereference_protected(*prog_p,
+ lockdep_is_held(&tun->lock));
+ rcu_assign_pointer(*prog_p, new);
+ spin_unlock_bh(&tun->lock);
+
+ if (old)
+ call_rcu(&old->rcu, tun_prog_free);
+
+ return 0;
+}
+
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1999,6 +2256,8 @@ static void tun_free_netdev(struct net_device *dev)
free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
+ __tun_set_ebpf(tun, &tun->steering_prog, NULL);
+ __tun_set_ebpf(tun, &tun->filter_prog, NULL);
}
static void tun_setup(struct net_device *dev)
@@ -2072,12 +2331,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
- struct sk_buff *skb = m->msg_control;
+ void *ptr = m->msg_control;
int ret;
if (!tun) {
ret = -EBADFD;
- goto out_free_skb;
+ goto out_free;
}
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2089,7 +2348,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2100,12 +2359,25 @@ out:
out_put_tun:
tun_put(tun);
-out_free_skb:
- if (skb)
- kfree_skb(skb);
+out_free:
+ tun_ptr_free(ptr);
return ret;
}
+static int tun_ptr_peek_len(void *ptr)
+{
+ if (likely(ptr)) {
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+ return __skb_array_len_with_tag(ptr);
+ } else {
+ return 0;
+ }
+}
+
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2116,7 +2388,7 @@ static int tun_peek_len(struct socket *sock)
if (!tun)
return 0;
- ret = skb_array_peek_len(&tfile->tx_array);
+ ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
tun_put(tun);
return ret;
@@ -2287,6 +2559,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
tun->rx_batched = 0;
+ RCU_INIT_POINTER(tun->steering_prog, NULL);
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
@@ -2479,6 +2752,26 @@ unlock:
return ret;
}
+static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+ void __user *data)
+{
+ struct bpf_prog *prog;
+ int fd;
+
+ if (copy_from_user(&fd, data, sizeof(fd)))
+ return -EFAULT;
+
+ if (fd == -1) {
+ prog = NULL;
+ } else {
+ prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ return __tun_set_ebpf(tun, prog_p, prog);
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
@@ -2755,6 +3048,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = 0;
break;
+ case TUNSETSTEERINGEBPF:
+ ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+ break;
+
+ case TUNSETFILTEREBPF:
+ ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -2851,6 +3152,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+ memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+
return 0;
}
@@ -2998,25 +3301,26 @@ static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tun->numqueues + tun->numdisabled;
int ret, i;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- arrays[i] = &tfile->tx_array;
+ rings[i] = &tfile->tx_ring;
}
list_for_each_entry(tfile, &tun->disabled, next)
- arrays[i++] = &tfile->tx_array;
+ rings[i++] = &tfile->tx_ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
- kfree(arrays);
+ kfree(rings);
return ret;
}
@@ -3102,7 +3406,7 @@ struct socket *tun_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tun_get_socket);
-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
{
struct tun_file *tfile;
@@ -3111,9 +3415,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
- return &tfile->tx_array;
+ return &tfile->tx_ring;
}
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);
module_init(tun_init);
module_exit(tun_cleanup);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804903c4..ec56ff29aac4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
dev->rx_qlen = 4;
+ dev->tx_qlen = 4;
}
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3000ddd1c7e2..76ac48095c29 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -826,7 +826,7 @@ err:
static const struct driver_info qmi_wwan_info = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -835,7 +835,7 @@ static const struct driver_info qmi_wwan_info = {
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -1244,6 +1245,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9abf7986..0657203ffb91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
PHY_RESET,
SCHEDULE_NAPI,
GREEN_ETHERNET,
+ DELL_TB_RX_AGG_BUG,
};
/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
dev_kfree_skb_any(skb);
remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
+
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ break;
}
if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ ocp_data |= RX_AGG_DISABLE;
+
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+ udev->serial && !strcmp(udev->serial, "000001000000")) {
+ dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+ set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32bf48d..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent)) {
- if (net_ratelimit())
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- } else {
+ if (!schedule_work (&dev->kevent))
+ netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+ else
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
- }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f5438d0978ca..a69ad39ee57e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
+ peer->gso_max_size = dev->gso_max_size;
+ peer->gso_max_segs = dev->gso_max_segs;
+
err = register_netdevice(peer);
put_net(net);
net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 559b215c0169..626c27352ae2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <net/route.h>
+#include <net/xdp.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -65,16 +66,39 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_UFO
};
-struct virtnet_stats {
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync rx_syncp;
- u64 tx_bytes;
- u64 tx_packets;
+struct virtnet_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
- u64 rx_bytes;
- u64 rx_packets;
+#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
+#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
};
+#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
+#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -86,6 +110,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+ struct virtnet_sq_stats stats;
+
struct napi_struct napi;
};
@@ -98,6 +124,8 @@ struct receive_queue {
struct bpf_prog __rcu *xdp_prog;
+ struct virtnet_rq_stats stats;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -115,6 +143,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -149,9 +179,6 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Active statistics */
- struct virtnet_stats __percpu *stats;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -261,9 +288,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
int opaque;
opaque = virtqueue_enable_cb_prepare(vq);
- if (napi_complete_done(napi, processed) &&
- unlikely(virtqueue_poll(vq, opaque)))
- virtqueue_napi_schedule(napi, vq);
+ if (napi_complete_done(napi, processed)) {
+ if (unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+ } else {
+ virtqueue_disable_cb(vq);
+ }
}
static void skb_xmit_done(struct virtqueue *vq)
@@ -556,6 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -689,6 +720,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.rxq = &rq->xdp_rxq;
+
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act != XDP_PASS)
@@ -1118,7 +1151,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
void *buf;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
@@ -1141,10 +1173,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
schedule_delayed_work(&vi->refill, 0);
}
- u64_stats_update_begin(&stats->rx_syncp);
- stats->rx_bytes += bytes;
- stats->rx_packets += received;
- u64_stats_update_end(&stats->rx_syncp);
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.bytes += bytes;
+ rq->stats.packets += received;
+ u64_stats_update_end(&rq->stats.syncp);
return received;
}
@@ -1153,8 +1185,6 @@ static void free_old_xmit_skbs(struct send_queue *sq)
{
struct sk_buff *skb;
unsigned int len;
- struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
unsigned int packets = 0;
unsigned int bytes = 0;
@@ -1173,10 +1203,10 @@ static void free_old_xmit_skbs(struct send_queue *sq)
if (!packets)
return;
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -1222,13 +1252,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i;
+ int i, err;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
+
+ err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+ if (err < 0)
+ return err;
+
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
@@ -1460,24 +1495,25 @@ static void virtnet_stats(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct virtnet_info *vi = netdev_priv(dev);
- int cpu;
unsigned int start;
+ int i;
- for_each_possible_cpu(cpu) {
- struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
u64 tpackets, tbytes, rpackets, rbytes;
+ struct receive_queue *rq = &vi->rq[i];
+ struct send_queue *sq = &vi->sq[i];
do {
- start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
- tpackets = stats->tx_packets;
- tbytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
+ start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ tpackets = sq->stats.packets;
+ tbytes = sq->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
- rpackets = stats->rx_packets;
- rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
+ start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ rpackets = rq->stats.packets;
+ rbytes = rq->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1557,6 +1593,7 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
@@ -1814,6 +1851,83 @@ static int virtnet_set_channels(struct net_device *dev,
return err;
}
+static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ char *p = (char *)data;
+ unsigned int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ i, virtnet_rq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
+ i, virtnet_sq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ }
+}
+
+static int virtnet_get_sset_count(struct net_device *dev, int sset)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
+ VIRTNET_SQ_STATS_LEN);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void virtnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ unsigned int idx = 0, start, i, j;
+ const u8 *stats_base;
+ size_t offset;
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct receive_queue *rq = &vi->rq[i];
+
+ stats_base = (u8 *)&rq->stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ offset = virtnet_rq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+ idx += VIRTNET_RQ_STATS_LEN;
+ }
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct send_queue *sq = &vi->sq[i];
+
+ stats_base = (u8 *)&sq->stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ offset = virtnet_sq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+ idx += VIRTNET_SQ_STATS_LEN;
+ }
+}
+
static void virtnet_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
@@ -1891,10 +2005,31 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
+ speed));
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+ duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
+ duplex));
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .get_strings = virtnet_get_strings,
+ .get_sset_count = virtnet_get_sset_count,
+ .get_ethtool_stats = virtnet_get_ethtool_stats,
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
@@ -2144,6 +2279,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
+ virtnet_update_settings(vi);
netif_carrier_on(vi->dev);
netif_tx_wake_all_queues(vi->dev);
} else {
@@ -2386,6 +2522,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
+
+ u64_stats_init(&vi->rq[i].stats.syncp);
+ u64_stats_init(&vi->sq[i].stats.syncp);
}
return 0;
@@ -2510,7 +2649,7 @@ static int virtnet_validate(struct virtio_device *vdev)
static int virtnet_probe(struct virtio_device *vdev)
{
- int i, err;
+ int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
@@ -2587,17 +2726,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
- vi->stats = alloc_percpu(struct virtnet_stats);
- err = -ENOMEM;
- if (vi->stats == NULL)
- goto free;
-
- for_each_possible_cpu(i) {
- struct virtnet_stats *virtnet_stats;
- virtnet_stats = per_cpu_ptr(vi->stats, i);
- u64_stats_init(&virtnet_stats->tx_syncp);
- u64_stats_init(&virtnet_stats->rx_syncp);
- }
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -2634,7 +2762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
*/
dev_err(&vdev->dev, "device MTU appears to have changed "
"it is now %d < %d", mtu, dev->min_mtu);
- goto free_stats;
+ goto free;
}
dev->mtu = mtu;
@@ -2658,7 +2786,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free;
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
@@ -2692,6 +2820,7 @@ static int virtnet_probe(struct virtio_device *vdev)
schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
+ virtnet_update_settings(vi);
netif_carrier_on(dev);
}
@@ -2712,8 +2841,6 @@ free_vqs:
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
-free_stats:
- free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
@@ -2746,7 +2873,6 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
- free_percpu(vi->stats);
free_netdev(vi->dev);
}
@@ -2793,7 +2919,8 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
- VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
+ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+ VIRTIO_NET_F_SPEED_DUPLEX
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029ded7c..cf95290b160c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
- rq->buf_info[i] = NULL;
}
if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
(rq->rx_ring[0].size + rq->rx_ring[1].size);
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq->buf_info_pa);
+ rq->buf_info[0] = rq->buf_info[1] = NULL;
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 9c51b8be0038..5ba222920e80 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -416,8 +416,8 @@ struct vmxnet3_adapter {
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
-#define VMXNET3_DEF_RX_RING_SIZE 256
-#define VMXNET3_DEF_RX_RING2_SIZE 128
+#define VMXNET3_DEF_RX_RING_SIZE 1024
+#define VMXNET3_DEF_RX_RING2_SIZE 256
#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index feb1b2e15c2e..139c61c8244a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct sock *sk,
struct sk_buff *skb)
{
- /* don't divert multicast */
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ /* don't divert multicast or local broadcast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+ ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 31f4b7911ef8..fab7a4db249e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2158,8 +2158,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
- skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
@@ -2200,8 +2199,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
- skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
@@ -3711,18 +3709,16 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
-static void __net_exit vxlan_exit_net(struct net *net)
+static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
unsigned int h;
- LIST_HEAD(list);
- rtnl_lock();
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, &list);
+ unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
@@ -3730,20 +3726,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
*/
if (!net_eq(dev_net(vxlan->dev), net)) {
gro_cells_destroy(&vxlan->gro_cells);
- unregister_netdevice_queue(vxlan->dev, &list);
+ unregister_netdevice_queue(vxlan->dev, head);
}
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
-
for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
+static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ vxlan_destroy_tunnels(net, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit = vxlan_exit_net,
+ .exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6ea16260ec76..f6b000ddcd15 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -924,7 +924,7 @@ static int chrdev_tx_done(struct channel_data *chan, int size)
return 1;
}
-static unsigned int cosa_poll(struct file *file, poll_table *poll)
+static __poll_t cosa_poll(struct file *file, poll_table *poll)
{
pr_info("cosa_poll is here\n");
return 0;
diff --git a/drivers/net/wimax/i2400m/sysfs.c b/drivers/net/wimax/i2400m/sysfs.c
index 1237109f251a..8c67df11105c 100644
--- a/drivers/net/wimax/i2400m/sysfs.c
+++ b/drivers/net/wimax/i2400m/sysfs.c
@@ -65,8 +65,7 @@ error_bad_value:
}
static
-DEVICE_ATTR(i2400m_idle_timeout, S_IWUSR,
- NULL, i2400m_idle_timeout_store);
+DEVICE_ATTR_WO(i2400m_idle_timeout);
static
struct attribute *i2400m_dev_attrs[] = {
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 87f56d0e17a6..deb5ae21a559 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -47,12 +47,19 @@ config ATH10K_DEBUG
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K && DEBUG_FS
- select RELAY
---help---
Enabled debugfs support
If unsure, say Y to make it easier to debug problems.
+config ATH10K_SPECTRAL
+ bool "Atheros ath10k spectral scan support"
+ depends on ATH10K_DEBUGFS
+ select RELAY
+ default n
+ ---help---
+ Say Y to enable access to the FFT/spectral data via debugfs.
+
config ATH10K_TRACING
bool "Atheros ath10k tracing support"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9492177e9063..6739ac26fd29 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -15,12 +15,13 @@ ath10k_core-y += mac.o \
p2p.o \
swap.o
-ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ff6815e95684..35d10490f6c3 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 2d3a2f31123d..af4978d6a14b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 9c0839b2ca8f..9a396817aa55 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a8afd690290f..b9def7bace2f 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* Guts of ath10k_ce_send.
* The caller takes responsibility for any needed locking.
*/
-int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
- void *per_transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -384,6 +384,87 @@ exit:
return ret;
}
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc_64 *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ __le32 *addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ addr = (__le32 *)&sdesc.addr;
+
+ flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ addr[0] = __cpu_to_le32(buffer);
+ addr[1] = __cpu_to_le32(flags);
+ if (flags & CE_SEND_FLAG_GATHER)
+ addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+ else
+ addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+}
+
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+ void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le64(paddr);
+ desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
{
struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
dest_ring->write_index = write_index;
}
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ce->ce_lock);
- ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+ ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ce->ce_lock);
return ret;
@@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- unsigned int *nbytesp)
+static int
+ _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+ struct ce_desc_64 sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /* This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_ctx,
+ unsigned int *nbytesp)
+{
+ return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_ctx,
+ nbytesp);
+}
+
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
int ret;
spin_lock_bh(&ce->ce_lock);
- ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+ ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
nbytesp);
+
spin_unlock_bh(&ce->ce_lock);
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp)
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
{
struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ return ce_state->ops->ce_revoke_recv_next(ce_state,
+ per_transfer_contextp,
+ bufferp);
+}
+
/*
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
+ dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
write_index = src_ring->write_index;
if (write_index != sw_index) {
- struct ce_desc *base = src_ring->base_addr_owner_space;
- struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
-
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(desc->addr);
- *nbytesp = __le16_to_cpu(desc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(desc->flags),
- CE_DESC_FLAGS_META_DATA);
+ ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+ bufferp, nbytesp,
+ transfer_idp);
if (per_transfer_contextp)
*per_transfer_contextp =
@@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
- memset(src_ring->base_addr_owner_space, 0,
- nentries * sizeof(struct ce_desc));
+ if (ar->hw_params.target_64bit)
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
- memset(dest_ring->base_addr_owner_space, 0,
- nentries * sizeof(struct ce_desc));
+ if (ar->hw_params.target_64bit)
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned = base_addr;
- src_ring->base_addr_owner_space = PTR_ALIGN(
- src_ring->base_addr_owner_space_unaligned,
- CE_DESC_RING_ALIGN);
- src_ring->base_addr_ce_space = ALIGN(
- src_ring->base_addr_ce_space_unaligned,
- CE_DESC_RING_ALIGN);
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!src_ring)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
return src_ring;
}
@@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
dest_ring->base_addr_ce_space_unaligned = base_addr;
- dest_ring->base_addr_owner_space = PTR_ALIGN(
- dest_ring->base_addr_owner_space_unaligned,
- CE_DESC_RING_ALIGN);
- dest_ring->base_addr_ce_space = ALIGN(
- dest_ring->base_addr_ce_space_unaligned,
- CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!dest_ring)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /* Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ memset(dest_ring->base_addr_owner_space_unaligned, 0,
+ nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
return dest_ring;
}
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_deinit_dest_ring(ar, ce_id);
}
-int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr)
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
- int ret;
-
- /*
- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
- * additional TX locking checks.
- *
- * For the lack of a better place do the check here.
- */
- BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- if (attr->src_nentries)
- ce_state->send_cb = attr->send_cb;
-
- if (attr->dest_nentries)
- ce_state->recv_cb = attr->recv_cb;
-
- if (attr->src_nentries) {
- ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
- if (IS_ERR(ce_state->src_ring)) {
- ret = PTR_ERR(ce_state->src_ring);
- ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
- ce_id, ret);
- ce_state->src_ring = NULL;
- return ret;
- }
+ if (ce_state->src_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
}
- if (attr->dest_nentries) {
- ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
- attr);
- if (IS_ERR(ce_state->dest_ring)) {
- ret = PTR_ERR(ce_state->dest_ring);
- ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
- ce_id, ret);
- ce_state->dest_ring = NULL;
- return ret;
- }
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
}
- return 0;
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
-void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
if (ce_state->src_ring) {
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
- sizeof(struct ce_desc) +
+ sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
if (ce_state->dest_ring) {
dma_free_coherent(ar->dev,
(ce_state->dest_ring->nentries *
- sizeof(struct ce_desc) +
+ sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->dest_ring = NULL;
}
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
spin_unlock_bh(&ce->ce_lock);
}
+
+static const struct ath10k_ce_ops ce_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+ .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data,
+ .ce_free_pipe = _ath10k_ce_free_pipe,
+ .ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+ .ce_completed_recv_next_nolock =
+ _ath10k_ce_completed_recv_next_nolock_64,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+ .ce_free_pipe = _ath10k_ce_free_pipe_64,
+ .ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ switch (ar->hw_rev) {
+ case ATH10K_HW_WCN3990:
+ ce_state->ops = &ce_64_ops;
+ break;
+ default:
+ ce_state->ops = &ce_ops;
+ break;
+ }
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ int ret;
+
+ ath10k_ce_set_ops(ar, ce_state);
+ /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = attr->send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = attr->recv_cb;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring =
+ ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+ ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index bdec794704d9..2c3c8f5e90ea 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0)
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
@@ -50,6 +54,16 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
+struct ce_desc_64 {
+ __le64 addr;
+ __le16 nbytes; /* length in register map */
+ __le16 flags; /* fw_metadata_high */
+ __le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
@@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
+ const struct ath10k_ce_ops *ops;
};
/* Copy Engine settable attributes */
@@ -160,7 +175,7 @@ struct ath10k_ce {
*/
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
@@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
@@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
@@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
*/
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp);
+ dma_addr_t *bufferp);
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
@@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
*/
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
+ dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
@@ -281,6 +296,32 @@ struct ce_attr {
void (*recv_cb)(struct ath10k_ce_pipe *);
};
+struct ath10k_ce_ops {
+ struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+ int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *nbytesp);
+ int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *nbytesp);
+ void (*ce_extract_desc_data)(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index, dma_addr_t *bufferp,
+ u32 *nbytesp, u32 *transfer_idp);
+ void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+ int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+ void *per_transfer_context,
+ dma_addr_t buffer, u32 nbytes,
+ u32 transfer_id, u32 flags);
+};
+
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@@ -292,6 +333,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx) - (int)(fromidx)) & (nentries_mask))
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b29fdbd21ead..b0fdc1023619 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@
#include "htt.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "coredump.h"
unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param;
@@ -39,17 +40,25 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -75,6 +84,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -99,6 +113,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -122,6 +141,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -145,6 +169,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -168,6 +197,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -194,6 +228,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -223,6 +262,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -257,6 +301,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 1560,
.vht160_mcs_tx_highest = 1560,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -290,6 +339,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 780,
.vht160_mcs_tx_highest = 780,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -313,6 +367,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -338,6 +397,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -368,6 +432,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ },
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "wcn3990 hw1.0",
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+ .rx_chain_mask = 0x7,
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+ .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+ .target_64bit = true,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
},
};
@@ -390,6 +479,8 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+ [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+ [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -860,6 +951,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
return 0;
}
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ const char *variant = NULL;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@@ -1163,7 +1276,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
len -= sizeof(*hdr);
data = hdr->data;
- if (len < ALIGN(ie_len, 4)) {
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ if (len < ie_len) {
ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL;
@@ -1202,9 +1318,6 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
goto out;
}
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
len -= ie_len;
data += ie_len;
}
@@ -1231,19 +1344,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+ if (ar->id.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ar->id.bdf_ext);
+
if (ar->id.bmi_ids_valid) {
scnprintf(name, name_len,
- "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
ath10k_bus_str(ar->hif.bus),
ar->id.bmi_chip_id,
- ar->id.bmi_board_id);
+ ar->id.bmi_board_id, variant);
goto out;
}
- if (ar->id.bdf_ext[0] != '\0')
- scnprintf(variant, sizeof(variant), ",variant=%s",
- ar->id.bdf_ext);
-
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
@@ -1335,6 +1448,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
len -= sizeof(*hdr);
data += sizeof(*hdr);
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
if (len < ie_len) {
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
@@ -1440,15 +1556,12 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
break;
}
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
len -= ie_len;
data += ie_len;
}
- if (!fw_file->firmware_data ||
- !fw_file->firmware_len) {
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+ (!fw_file->firmware_data || !fw_file->firmware_len)) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@@ -1474,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
break;
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
+ case ATH10K_BUS_SNOC:
scnprintf(fw_name, fw_name_len, "%s-%d.bin",
ATH10K_FW_FILE_BASE, fw_api);
break;
@@ -1759,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
- ret = ath10k_debug_fw_devcoredump(ar);
+ ret = ath10k_coredump_submit(ar);
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
@@ -2001,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw = fw;
- ath10k_bmi_start(ar);
-
- if (ath10k_init_configure_target(ar)) {
- status = -EINVAL;
- goto err;
- }
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_bmi_start(ar);
- status = ath10k_download_cal_data(ar);
- if (status)
- goto err;
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
- /* Some of of qca988x solutions are having global reset issue
- * during target initialization. Bypassing PLL setting before
- * downloading firmware and letting the SoC run on REF_CLK is
- * fixing the problem. Corresponding firmware change is also needed
- * to set the clock source once the target is initialized.
- */
- if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->running_fw->fw_file.fw_features)) {
- status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
- if (status) {
- ath10k_err(ar, "could not write to skip_clock_init: %d\n",
- status);
+ status = ath10k_download_cal_data(ar);
+ if (status)
goto err;
+
+ /* Some of of qca988x solutions are having global reset issue
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also
+ * needed to set the clock source once the target is
+ * initialized.
+ */
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+ if (status) {
+ ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+ status);
+ goto err;
+ }
}
- }
- status = ath10k_download_fw(ar);
- if (status)
- goto err;
+ status = ath10k_download_fw(ar);
+ if (status)
+ goto err;
- status = ath10k_init_uart(ar);
- if (status)
- goto err;
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
- if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar);
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ath10k_init_sdio(ar);
+ }
ar->htc.htc_ops.target_send_suspend_complete =
ath10k_send_suspend_complete;
@@ -2048,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err;
}
- status = ath10k_bmi_done(ar);
- if (status)
- goto err;
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+ }
status = ath10k_wmi_attach(ar);
if (status) {
@@ -2293,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
- memset(&target_info, 0, sizeof(target_info));
- if (ar->hif.bus == ATH10K_BUS_SDIO)
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
- else
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_USB:
+ memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
- if (ret) {
- ath10k_err(ar, "could not get target info (%d)\n", ret);
- goto err_power_down;
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_SNOC:
+ break;
+ default:
+ ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
}
- ar->target_version = target_info.version;
- ar->hw->wiphy->hw_version = target_info.version;
-
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2325,33 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_hwfw_info(ar);
- ret = ath10k_core_pre_cal_download(ar);
- if (ret) {
- /* pre calibration data download is not necessary
- * for all the chipsets. Ignore failures and continue.
- */
- ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "could not load pre cal data: %d\n", ret);
- }
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
- ret = ath10k_core_get_board_id_from_otp(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath10k_err(ar, "failed to get board id from otp: %d\n",
- ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
- ret = ath10k_core_check_smbios(ar);
- if (ret)
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+ ret = ath10k_core_check_smbios(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
- ret = ath10k_core_fetch_board_file(ar);
- if (ret) {
- ath10k_err(ar, "failed to fetch board file: %d\n", ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
- ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_board_info(ar);
+ }
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
@@ -2360,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
- if (ret) {
- ath10k_err(ar, "failed to initialize code swap segment: %d\n",
- ret);
- goto err_free_firmware_files;
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->normal_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
}
mutex_lock(&ar->conf_mutex);
@@ -2416,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_release_fw;
}
+ status = ath10k_coredump_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to register coredump\n");
+ goto err_unregister_mac;
+ }
+
status = ath10k_debug_register(ar);
if (status) {
ath10k_err(ar, "unable to initialize debugfs\n");
- goto err_unregister_mac;
+ goto err_unregister_coredump;
}
status = ath10k_spectral_create(ar);
@@ -2442,6 +2596,8 @@ err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
ath10k_debug_destroy(ar);
+err_unregister_coredump:
+ ath10k_coredump_unregister(ar);
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
@@ -2596,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_dummy_netdev(&ar->napi_dev);
- ret = ath10k_debug_create(ar);
+ ret = ath10k_coredump_create(ar);
if (ret)
goto err_free_aux_wq;
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_coredump;
+
return ar;
+err_free_coredump:
+ ath10k_coredump_destroy(ar);
+
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
@@ -2623,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
+ ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 643041ef3271..fe6b30356d3b 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -67,7 +67,6 @@
/* NAPI poll budget */
#define ATH10K_NAPI_BUDGET 64
-#define ATH10K_NAPI_QUOTA_LIMIT 60
/* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -93,6 +92,7 @@ enum ath10k_bus {
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
+ ATH10K_BUS_SNOC,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -106,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
return "sdio";
case ATH10K_BUS_USB:
return "usb";
+ case ATH10K_BUS_SNOC:
+ return "snoc";
}
return "unknown";
@@ -364,11 +366,11 @@ struct ath10k_sta {
struct rate_info txrate;
struct work_struct update_wk;
+ u64 rx_duration;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
- u64 rx_duration;
#endif
};
@@ -458,14 +460,17 @@ struct ath10k_ce_crash_hdr {
struct ath10k_ce_crash_data entries[];
};
+#define MAX_MEM_DUMP_TYPE 5
+
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
- bool crashed_since_read;
-
guid_t guid;
- struct timespec timestamp;
+ struct timespec64 timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+ u8 *ramdump_buf;
+ size_t ramdump_buf_len;
};
struct ath10k_debug {
@@ -488,12 +493,9 @@ struct ath10k_debug {
/* protected by conf_mutex */
u64 fw_dbglog_mask;
u32 fw_dbglog_level;
- u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
-
- struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
@@ -615,6 +617,12 @@ enum ath10k_fw_features {
/* Firmware does not support power save in station mode. */
ATH10K_FW_FEATURE_NO_PS = 17,
+ /* Firmware allows management tx by reference instead of by value. */
+ ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
+ /* Firmware load is done externally, not by bmi */
+ ATH10K_FW_FEATURE_NON_BMI = 19,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -963,6 +971,14 @@ struct ath10k {
} spectral;
#endif
+ u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct ath10k_fw_crash_data *fw_crash_data;
+ } coredump;
+#endif
+
struct {
/* protected by conf_mutex */
struct ath10k_fw_components utf_mode_fw;
@@ -1016,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
return false;
}
+extern unsigned long ath10k_coredump_mask;
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644
index 000000000000..4dde126dab17
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A064},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* RTC_SOC_BASE_ADDRESS */
+ .start = 0x0,
+
+ /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+ .len = 0x800 - 0x0,
+
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* STEREO_BASE_ADDRESS */
+ .start = 0x27000,
+
+ /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+ .len = 0x60000 - 0x27000,
+
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw21_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x90000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+ },
+ },
+
+ /* IRAM dump must be put last */
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x50000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x4000,
+ .len = 0x2000,
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x8000,
+ .len = 0x58000,
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+ {
+ .hw_id = QCA6174_HW_1_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_3_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_2_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw21_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA988X_HW_2_0_VERSION,
+ .region_table = {
+ .regions = qca988x_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+ },
+ },
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw;
+ const struct ath10k_mem_region *mem_region;
+ size_t size = 0;
+ int i;
+
+ hw = ath10k_coredump_get_mem_layout(ar);
+
+ if (!hw)
+ return 0;
+
+ mem_region = &hw->region_table.regions[0];
+
+ for (i = 0; i < hw->region_table.size; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+ /* make sure it is aligned 16 bytes for debug message print out */
+ size = ALIGN(size, 16);
+
+ return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
+ if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ return NULL;
+
+ if (WARN_ON(ar->target_version == 0))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+ if (ar->target_version == hw_mem_layouts[i].hw_id)
+ return &hw_mem_layouts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return NULL;
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ guid_copy(&dump_data->guid, &crash_data->guid);
+ dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ }
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+ }
+
+ /* Gather ram dump */
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ dump = ath10k_coredump_build(ar);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+ return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+ if (!ar->coredump.fw_crash_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+ if (ar->coredump.fw_crash_data->ramdump_buf) {
+ vfree(ar->coredump.fw_crash_data->ramdump_buf);
+ ar->coredump.fw_crash_data->ramdump_buf = NULL;
+ ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+ }
+
+ vfree(ar->coredump.fw_crash_data);
+ ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644
index 000000000000..bfee13038e59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+ /* contains multiple struct ath10k_dump_ram_data_hdr */
+ ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ guid_t guid;
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+ /* enum ath10k_mem_region_type */
+ __le32 region_type;
+
+ __le32 start;
+
+ /* length of payload data, not including this header */
+ __le32 length;
+
+ u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED 0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+ ATH10K_MEM_REGION_TYPE_REG = 1,
+ ATH10K_MEM_REGION_TYPE_DRAM = 2,
+ ATH10K_MEM_REGION_TYPE_AXI = 3,
+ ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
+ ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+ u32 start;
+ u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+ enum ath10k_mem_region_type type;
+ u32 start;
+ u32 len;
+
+ const char *name;
+
+ struct {
+ const struct ath10k_mem_section *sections;
+ u32 size;
+ } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+ u32 hw_id;
+
+ struct {
+ const struct ath10k_mem_region *regions;
+ int size;
+ } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index df514507d3f1..6d836a26272f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,10 +18,8 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
-#include <linux/utsname.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
-#include <linux/devcoredump.h>
#include "core.h"
#include "debug.h"
@@ -33,86 +31,6 @@
#define ATH10K_DEBUG_CAL_DATA_LEN 12064
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
- ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
- ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
-
- ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
- /* see ath10k_fw_crash_dump_type above */
- __le32 type;
-
- /* in bytes */
- __le32 tlv_len;
-
- /* pad to 32-bit boundaries as needed */
- u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
- /* dump file information */
-
- /* "ATH10K-FW-DUMP" */
- char df_magic[16];
-
- __le32 len;
-
- /* file dump version */
- __le32 version;
-
- /* some info we can get from ath10k struct that might help */
-
- guid_t guid;
-
- __le32 chip_id;
-
- /* 0 for now, in place for later hardware */
- __le32 bus_type;
-
- __le32 target_version;
- __le32 fw_version_major;
- __le32 fw_version_minor;
- __le32 fw_version_release;
- __le32 fw_version_build;
- __le32 phy_capability;
- __le32 hw_min_tx_power;
- __le32 hw_max_tx_power;
- __le32 ht_cap_info;
- __le32 vht_cap_info;
- __le32 num_rf_chains;
-
- /* firmware version string */
- char fw_ver[ETHTOOL_FWVERS_LEN];
-
- /* Kernel related information */
-
- /* time-of-day stamp */
- __le64 tv_sec;
-
- /* time-of-day stamp, nano-seconds */
- __le64 tv_nsec;
-
- /* LINUX_VERSION_CODE */
- __le32 kernel_ver_code;
-
- /* VERMAGIC_STRING */
- char kernel_ver[64];
-
- /* room for growth w/out changing binary format */
- u8 unused[128];
-
- /* struct ath10k_tlv_dump_data + more */
- u8 data[0];
-} __packed;
-
void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
@@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
- lockdep_assert_held(&ar->data_lock);
-
- crash_data->crashed_since_read = true;
- guid_gen(&crash_data->guid);
- getnstimeofday(&crash_data->timestamp);
-
- return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
- bool mark_read)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
- struct ath10k_ce_crash_hdr *ce_hdr;
- struct ath10k_dump_file_data *dump_data;
- struct ath10k_tlv_dump_data *dump_tlv;
- size_t hdr_len = sizeof(*dump_data);
- size_t len, sofar = 0;
- unsigned char *buf;
-
- len = hdr_len;
- len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
- len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]);
-
- sofar += hdr_len;
-
- /* This is going to get big when we start dumping FW RAM and such,
- * so go ahead and use vmalloc.
- */
- buf = vzalloc(len);
- if (!buf)
- return NULL;
-
- spin_lock_bh(&ar->data_lock);
-
- if (!crash_data->crashed_since_read) {
- spin_unlock_bh(&ar->data_lock);
- vfree(buf);
- return NULL;
- }
-
- dump_data = (struct ath10k_dump_file_data *)(buf);
- strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
- sizeof(dump_data->df_magic));
- dump_data->len = cpu_to_le32(len);
-
- dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
- guid_copy(&dump_data->guid, &crash_data->guid);
- dump_data->chip_id = cpu_to_le32(ar->chip_id);
- dump_data->bus_type = cpu_to_le32(0);
- dump_data->target_version = cpu_to_le32(ar->target_version);
- dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
- dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
- dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
- dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
- dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
- dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
- dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
- dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
- dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
- dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
- strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
- sizeof(dump_data->fw_ver));
-
- dump_data->kernel_ver_code = 0;
- strlcpy(dump_data->kernel_ver, init_utsname()->release,
- sizeof(dump_data->kernel_ver));
-
- dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
- dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
- /* Gather crash-dump */
- dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
- dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
- memcpy(dump_tlv->tlv_data, &crash_data->registers,
- sizeof(crash_data->registers));
- sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
- dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
- dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]));
- ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
- ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
- memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
- memcpy(ce_hdr->entries, crash_data->ce_crash_data,
- CE_COUNT * sizeof(ce_hdr->entries[0]));
- sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]);
-
- ar->debug.fw_crash_data->crashed_since_read = !mark_read;
-
- spin_unlock_bh(&ar->data_lock);
-
- return dump_data;
-}
-
-int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
- struct ath10k_dump_file_data *dump;
- void *dump_ptr;
- u32 dump_len;
-
- /* To keep the dump file available also for debugfs don't mark the
- * file read, only debugfs should do that.
- */
- dump = ath10k_build_dump_file(ar, false);
- if (!dump) {
- ath10k_warn(ar, "no crash dump data found for devcoredump");
- return -ENODATA;
- }
-
- /* Make a copy of the dump file for dev_coredumpv() as during the
- * transition period we need to own the original file. Once
- * fw_crash_dump debugfs file is removed no need to have a copy
- * anymore.
- */
- dump_len = le32_to_cpu(dump->len);
- dump_ptr = vzalloc(dump_len);
-
- if (!dump_ptr)
- return -ENOMEM;
-
- memcpy(dump_ptr, dump, dump_len);
-
- dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
-
- return 0;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
- struct ath10k *ar = inode->i_private;
- struct ath10k_dump_file_data *dump;
-
- ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
-
- dump = ath10k_build_dump_file(ar, true);
- if (!dump)
- return -ENODATA;
-
- file->private_data = dump;
-
- return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath10k_dump_file_data *dump_file = file->private_data;
-
- return simple_read_from_buffer(user_buf, count, ppos,
- dump_file,
- le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
- .open = ath10k_fw_crash_dump_open,
- .read = ath10k_fw_crash_dump_read,
- .release = ath10k_fw_crash_dump_release,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
static ssize_t ath10k_reg_addr_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1950,14 +1685,14 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
}
- if (ar->debug.pktlog_filter) {
+ if (ar->pktlog_filter) {
ret = ath10k_wmi_pdev_pktlog_enable(ar,
- ar->debug.pktlog_filter);
+ ar->pktlog_filter);
if (ret)
/* not serious */
ath10k_warn(ar,
"failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
+ ar->pktlog_filter, ret);
} else {
ret = ath10k_wmi_pdev_pktlog_disable(ar);
if (ret)
@@ -2097,12 +1832,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
- ar->debug.pktlog_filter = filter;
+ ar->pktlog_filter = filter;
ret = count;
goto out;
}
- if (filter == ar->debug.pktlog_filter) {
+ if (filter == ar->pktlog_filter) {
ret = count;
goto out;
}
@@ -2111,7 +1846,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
if (ret) {
ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
+ ar->pktlog_filter, ret);
goto out;
}
} else {
@@ -2122,7 +1857,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
}
}
- ar->debug.pktlog_filter = filter;
+ ar->pktlog_filter = filter;
ret = count;
out:
@@ -2139,7 +1874,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
- ar->debug.pktlog_filter);
+ ar->pktlog_filter);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
@@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
int ath10k_debug_create(struct ath10k *ar)
{
- ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
- if (!ar->debug.fw_crash_data)
- return -ENOMEM;
-
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
if (!ar->debug.cal_data)
return -ENOMEM;
@@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
void ath10k_debug_destroy(struct ath10k *ar)
{
- vfree(ar->debug.fw_crash_data);
- ar->debug.fw_crash_data = NULL;
-
vfree(ar->debug.cal_data);
ar->debug.cal_data = NULL;
@@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
&fops_simulate_fw_crash);
- debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
- &fops_fw_crash_dump);
-
debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
&fops_reg_addr);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 548ad5483a4a..e54308889e59 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -42,6 +42,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_SDIO_DUMP = 0x00020000,
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
+ ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -51,7 +52,8 @@ enum ath10k_pktlog_filter {
ATH10K_PKTLOG_RCFIND = 0x000000004,
ATH10K_PKTLOG_RCUPDATE = 0x000000008,
ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
- ATH10K_PKTLOG_ANY = 0x00000001f,
+ ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+ ATH10K_PKTLOG_ANY = 0x00000005f,
};
enum ath10k_dbg_aggr_mode {
@@ -60,6 +62,21 @@ enum ath10k_dbg_aggr_mode {
ATH10K_DBG_AGGR_MODE_MAX,
};
+/* Types of packet log events */
+enum ath_pktlog_type {
+ ATH_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type; /* Type of log information foll this header */
+ __le16 size; /* Size of variable length log information in bytes */
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
/* FIXME: How to calculate the buffer size sanely? */
#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
@@ -84,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
void ath10k_debug_tpc_stats_process(struct ath10k *ar,
struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
-int ath10k_debug_fw_devcoredump(struct ath10k *ar);
-
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -157,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
{
}
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- return NULL;
-}
-
static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
{
return 0;
@@ -173,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
-static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
- return 0;
-}
-
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
@@ -190,9 +191,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void ath10k_sta_update_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats);
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo);
#else
static inline
void ath10k_sta_update_rx_duration(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index d59ac6b83340..b260b09dd4d3 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
ath10k_sta_update_stats_rx_duration(ar, stats);
}
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo)
-{
- struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- struct ath10k *ar = arsta->arvif->ar;
-
- if (!ath10k_peer_stats_enabled(ar))
- return;
-
- sinfo->rx_duration = arsta->rx_duration;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
-
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- }
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
-}
-
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 6679dd9cfd12..6da4e3369c5a 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index e5c80f582ff5..492dc5b4bbf2 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 24663b07eeac..a2f8814b3e53 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index cd160b16db1e..625198dea18b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
WARN_ON(1);
return -EINVAL;
}
+ ath10k_htt_set_tx_ops(htt);
+ ath10k_htt_set_rx_ops(htt);
+
return 0;
}
@@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
- status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+ status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
- status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+ status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
if (status) {
ath10k_warn(ar, "failed to setup rx ring: %d\n",
status);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 6305308422c4..8cc2a8b278e4 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
struct htt_data_tx_desc_frag frags[6];
};
+struct htt_msdu_ext_desc_64 {
+ __le32 tso_flag[5];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
@@ -179,6 +187,22 @@ struct htt_data_tx_desc {
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
+struct htt_data_tx_desc_64 {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le64 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
enum htt_rx_ring_flags {
HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_SIZE_MIN 128
#define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring {
+struct htt_rx_ring_setup_ring32 {
__le32 fw_idx_shadow_reg_paddr;
__le32 rx_ring_base_paddr;
__le16 rx_ring_len; /* in 4-byte words */
@@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring64 {
+ __le64 fw_idx_shadow_reg_paddr;
+ __le64 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
struct htt_rx_ring_setup_hdr {
u8 num_rings; /* supported values: 1, 2 */
__le16 rsvd0;
} __packed;
-struct htt_rx_ring_setup {
+struct htt_rx_ring_setup_32 {
struct htt_rx_ring_setup_hdr hdr;
- struct htt_rx_ring_setup_ring rings[0];
+ struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring64 rings[0];
} __packed;
/*
@@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
u8 reserved;
} __packed;
+struct htt_rx_in_ord_msdu_desc_ext {
+ __le64 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
struct htt_rx_in_ord_ind {
u8 info;
__le16 peer_id;
u8 vdev_id;
u8 reserved;
__le16 msdu_count;
- struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+ union {
+ struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+ struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+ } __packed;
} __packed;
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
@@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
u8 pad[2];
} __packed;
-struct htt_frag_desc_bank_cfg {
+struct htt_frag_desc_bank_cfg32 {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
u8 num_banks;
u8 desc_size;
@@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
struct htt_q_state_conf q_state;
} __packed;
+struct htt_frag_desc_bank_cfg64 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
@@ -1497,6 +1569,23 @@ struct htt_peer_tx_stats {
u8 payload[0];
} __packed;
+#define ATH10K_10_2_TX_STATS_OFFSET 136
+#define PEER_STATS_FOR_NO_OF_PPDUS 4
+
+struct ath10k_10_2_peer_tx_stats {
+ u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le32 tx_duration;
+ u8 tx_ppdu_cnt;
+ u8 peer_id;
+} __packed;
+
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
@@ -1514,11 +1603,13 @@ struct htt_cmd {
struct htt_ver_req ver_req;
struct htt_mgmt_tx_desc mgmt_tx;
struct htt_data_tx_desc data_tx;
- struct htt_rx_ring_setup rx_setup;
+ struct htt_rx_ring_setup_32 rx_setup_32;
+ struct htt_rx_ring_setup_64 rx_setup_64;
struct htt_stats_req stats_req;
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
- struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+ struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
struct htt_tx_fetch_resp tx_fetch_resp;
};
} __packed;
@@ -1576,13 +1667,20 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
-struct ath10k_htt_txbuf {
+struct ath10k_htt_txbuf_32 {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc cmd_tx;
} __packed;
+struct ath10k_htt_txbuf_64 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@@ -1627,7 +1725,10 @@ struct ath10k_htt {
* rx buffers the host SW provides for the MAC HW to
* fill.
*/
- __le32 *paddrs_ring;
+ union {
+ __le64 *paddrs_ring_64;
+ __le32 *paddrs_ring_32;
+ };
/*
* Base address of ring, as a "physical" device address
@@ -1695,7 +1796,7 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls
*/
- struct sk_buff_head rx_compl_q;
+ struct sk_buff_head rx_msdus_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@@ -1704,12 +1805,20 @@ struct ath10k_htt {
struct {
dma_addr_t paddr;
- struct htt_msdu_ext_desc *vaddr;
+ union {
+ struct htt_msdu_ext_desc *vaddr_desc_32;
+ struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+ };
+ size_t size;
} frag_desc;
struct {
dma_addr_t paddr;
- struct ath10k_htt_txbuf *vaddr;
+ union {
+ struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+ struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+ };
+ size_t size;
} txbuf;
struct {
@@ -1724,6 +1833,28 @@ struct ath10k_htt {
} tx_q_state;
bool tx_mem_allocated;
+ const struct ath10k_htt_tx_ops *tx_ops;
+ const struct ath10k_htt_rx_ops *rx_ops;
+};
+
+struct ath10k_htt_tx_ops {
+ int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+ int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+ int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+ void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+ int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu);
+ int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+ void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+struct ath10k_htt_rx_ops {
+ size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+ void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+ void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+ int idx);
+ void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+ void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1803,8 +1934,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
@@ -1829,11 +1958,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
-int ath10k_htt_tx(struct ath10k_htt *htt,
- enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
-
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 7d295ee71534..6d96f9560950 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -25,9 +25,6 @@
#include <linux/log2.h>
-#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
-#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
-
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -36,7 +33,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
{
struct ath10k_skb_rxcb *rxcb;
@@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
}
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
struct htt_rx_desc *rx_desc;
@@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
- htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
- (u32)paddr);
+ paddr);
}
num--;
@@ -227,16 +278,15 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- skb_queue_purge(&htt->rx_compl_q);
+ skb_queue_purge(&htt->rx_msdus_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
- (htt->rx_ring.size *
- sizeof(htt->rx_ring.paddrs_ring)),
- htt->rx_ring.paddrs_ring,
+ htt->rx_ops->htt_get_rx_ring_size(htt),
+ htt->rx_ops->htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
dma_free_coherent(htt->ar->dev,
@@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
- htt->rx_ring.paddrs_ring[idx] = 0;
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
idx++;
idx &= htt->rx_ring.size_mask;
@@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
- u32 paddr)
+ u64 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
@@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
return msdu;
}
-static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
- struct htt_rx_in_ord_ind *ev,
- struct sk_buff_head *list)
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
- struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
int msdu_count;
@@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
return 0;
}
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u64 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
dma_addr_t paddr;
- void *vaddr;
+ void *vaddr, *vaddr_ring;
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
@@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
*/
htt->rx_ring.size = HTT_RX_RING_SIZE;
htt->rx_ring.size_mask = htt->rx_ring.size - 1;
- htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+ htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
- size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+ size = htt->rx_ops->htt_get_rx_ring_size(htt);
- vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
- if (!vaddr)
+ vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+ if (!vaddr_ring)
goto err_dma_ring;
- htt->rx_ring.paddrs_ring = vaddr;
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
htt->rx_ring.base_paddr = paddr;
vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -515,7 +614,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
- skb_queue_head_init(&htt->rx_compl_q);
+ skb_queue_head_init(&htt->rx_msdus_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
@@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
err_dma_idx:
dma_free_coherent(htt->ar->dev,
- (htt->rx_ring.size *
- sizeof(htt->rx_ring.paddrs_ring)),
- htt->rx_ring.paddrs_ring,
+ htt->rx_ops->htt_get_rx_ring_size(htt),
+ vaddr_ring,
htt->rx_ring.base_paddr);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
@@ -974,16 +1072,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
return out;
}
-static void ath10k_process_rx(struct ath10k *ar,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb)
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
char tid[32];
status = IEEE80211_SKB_RXCB(skb);
- *status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
@@ -1517,7 +1624,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
}
-static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status)
{
@@ -1540,7 +1647,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
status->flag |= RX_FLAG_ALLOW_SAME_PN;
}
- ath10k_process_rx(ar, status, msdu);
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
}
@@ -1652,7 +1759,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
- int ret, num_msdus;
+ int ret;
__skb_queue_head_init(&amsdu);
@@ -1674,7 +1781,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
- num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
/* only for ret = 1 indicates chained msdus */
@@ -1683,9 +1789,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
- return num_msdus;
+ return 0;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1893,15 +1999,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
-static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
- struct sk_buff_head *list)
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
- int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1940,10 +2045,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
- ath10k_process_rx(ar, status, msdu);
- num_msdu++;
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
- return num_msdu;
}
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
@@ -1959,7 +2062,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
- int ret, num_msdus = 0;
+ int ret;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -1981,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
vdev_id, peer_id, tid, offload, frag, msdu_count);
- if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return -EINVAL;
}
@@ -1990,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* extracted and processed.
*/
__skb_queue_head_init(&list);
- ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+ if (ar->hw_params.target_64bit)
+ ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+ &list);
+ else
+ ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+ &list);
+
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
@@ -2001,7 +2110,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* separately.
*/
if (offload)
- num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
+ ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@@ -2014,11 +2123,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
- num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
- ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
/* fall through */
@@ -2030,7 +2138,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return -EIO;
}
}
- return num_msdus;
+ return ret;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2449,6 +2557,62 @@ out:
rcu_read_unlock();
}
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+ struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct ath10k_10_2_peer_tx_stats *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ u16 log_type = __le16_to_cpu(hdr->log_type);
+ u32 peer_id = 0, i;
+
+ if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+ return;
+
+ tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+ ATH10K_10_2_TX_STATS_OFFSET);
+
+ if (!tx_stats->tx_ppdu_cnt)
+ return;
+
+ peer_id = tx_stats->peer_id;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+ p_tx_stats->succ_bytes =
+ __le16_to_cpu(tx_stats->success_bytes[i]);
+ p_tx_stats->retry_bytes =
+ __le16_to_cpu(tx_stats->retry_bytes[i]);
+ p_tx_stats->failed_bytes =
+ __le16_to_cpu(tx_stats->failed_bytes[i]);
+ p_tx_stats->ratecode = tx_stats->ratecode[i];
+ p_tx_stats->flags = tx_stats->flags[i];
+ p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+ p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+ p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -2566,6 +2730,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb->len -
offsetof(struct htt_resp,
pktlog_msg.payload));
+
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_fetch_10_2_tx_stats(ar,
+ resp->pktlog_msg.payload);
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2631,6 +2799,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+ struct sk_buff *skb;
+
+ while (quota < budget) {
+ if (skb_queue_empty(&ar->htt.rx_msdus_q))
+ break;
+
+ skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+ if (!skb)
+ break;
+ ath10k_process_rx(ar, skb);
+ quota++;
+ }
+
+ return quota;
+}
+
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
@@ -2638,63 +2824,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
- int quota = 0, done, num_rx_msdus;
+ int quota = 0, done, ret;
bool resched_napi = false;
__skb_queue_head_init(&tx_ind_q);
- /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
- * process it first to utilize full available quota.
+ /* Process pending frames before dequeuing more data
+ * from hardware.
*/
- while (quota < budget) {
- if (skb_queue_empty(&htt->rx_in_ord_compl_q))
- break;
-
- skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
- if (!skb) {
- resched_napi = true;
- goto exit;
- }
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+ if (quota == budget) {
+ resched_napi = true;
+ goto exit;
+ }
+ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
- num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+ ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
- if (num_rx_msdus < 0) {
- resched_napi = true;
- goto exit;
- }
dev_kfree_skb_any(skb);
- if (num_rx_msdus > 0)
- quota += num_rx_msdus;
-
- if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
- !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+ if (ret == -EIO) {
resched_napi = true;
goto exit;
}
}
- while (quota < budget) {
- /* no more data to receive */
- if (!atomic_read(&htt->num_mpdus_ready))
- break;
-
- num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
- if (num_rx_msdus < 0) {
+ while (atomic_read(&htt->num_mpdus_ready)) {
+ ret = ath10k_htt_rx_handle_amsdu(htt);
+ if (ret == -EIO) {
resched_napi = true;
goto exit;
}
-
- quota += num_rx_msdus;
atomic_dec(&htt->num_mpdus_ready);
- if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
- atomic_read(&htt->num_mpdus_ready)) {
- resched_napi = true;
- goto exit;
- }
}
+ /* Deliver received data after processing data from hardware */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
/* From NAPI documentation:
* The napi poll() function may also process TX completions, in which
* case if it processes the entire TX ring then it should count that
@@ -2732,3 +2899,29 @@ exit:
return done;
}
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->rx_ops = &htt_rx_ops_64;
+ else
+ htt->rx_ops = &htt_rx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 685faac1368f..d334b7be1fea 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
-static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
- if (!htt->txbuf.vaddr)
+ if (!htt->txbuf.vaddr_txbuff_32)
return;
- size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
- dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
- htt->txbuf.vaddr = NULL;
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_32 = NULL;
}
-static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
- size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
- htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
- GFP_KERNEL);
- if (!htt->txbuf.vaddr)
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_32);
+
+ htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_32)
return -ENOMEM;
+ htt->txbuf.size = size;
+
return 0;
}
-static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
size_t size;
- if (!htt->frag_desc.vaddr)
+ if (!htt->txbuf.vaddr_txbuff_64)
return;
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_64);
+
+ htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_32)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
dma_free_coherent(htt->ar->dev,
size,
- htt->frag_desc.vaddr,
+ htt->frag_desc.vaddr_desc_32,
htt->frag_desc.paddr);
- htt->frag_desc.vaddr = NULL;
+
+ htt->frag_desc.vaddr_desc_32 = NULL;
}
-static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
@@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
if (!ar->hw_params.continuous_frag_desc)
return 0;
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
- htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
- &htt->frag_desc.paddr,
- GFP_KERNEL);
- if (!htt->frag_desc.vaddr)
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_32) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_64)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_64,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_64) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
return 0;
}
@@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
int ret;
- ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+ ret = htt->tx_ops->htt_alloc_txbuff(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
return ret;
}
- ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+ ret = htt->tx_ops->htt_alloc_frag_desc(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
@@ -387,10 +473,10 @@ free_txq:
ath10k_htt_tx_free_txq(htt);
free_frag_desc:
- ath10k_htt_tx_free_cont_frag_desc(htt);
+ htt->tx_ops->htt_free_frag_desc(htt);
free_txbuf:
- ath10k_htt_tx_free_cont_txbuf(htt);
+ htt->tx_ops->htt_free_txbuff(htt);
return ret;
}
@@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
if (!htt->tx_mem_allocated)
return;
- ath10k_htt_tx_free_cont_txbuf(htt);
+ htt->tx_ops->htt_free_txbuff(htt);
ath10k_htt_tx_free_txq(htt);
- ath10k_htt_tx_free_cont_frag_desc(htt);
+ htt->tx_ops->htt_free_frag_desc(htt);
ath10k_htt_tx_free_txdone_fifo(htt);
htt->tx_mem_allocated = false;
}
@@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
- struct htt_frag_desc_bank_cfg *cfg;
+ struct htt_frag_desc_bank_cfg32 *cfg;
int ret, size;
u8 info;
@@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return -EINVAL;
}
- size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
skb = ath10k_htc_alloc_skb(ar, size);
if (!skb)
return -ENOMEM;
@@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
- cfg = &cmd->frag_desc_bank_cfg;
+ cfg = &cmd->frag_desc_bank_cfg32;
cfg->info = info;
cfg->num_banks = 1;
cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
- struct htt_rx_ring_setup_ring *ring;
+ struct htt_frag_desc_bank_cfg64 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg64;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+ cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring32 *ring =
+ (struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring64 *ring =
+ (struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
@@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
- len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
@@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
- ring = &cmd->rx_setup.rings[0];
+ ring = &cmd->rx_setup_32.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
- cmd->rx_setup.hdr.num_rings = 1;
+ cmd->rx_setup_32.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
@@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ath10k_htt_fill_rx_desc_offset_32(ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+ return 0;
+}
-#undef desc_offset
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring64 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /* HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_64.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_64.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_64(ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -895,8 +1136,9 @@ err:
return res;
}
-int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *msdu)
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
@@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
- struct ath10k_htt_txbuf *txbuf;
+ struct ath10k_htt_txbuf_32 *txbuf;
struct htt_data_tx_desc_frag *frags;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
+ struct htt_msdu_ext_desc *ext_desc_t = NULL;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- txbuf = &htt->txbuf.vaddr[msdu_id];
+ txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
txbuf_paddr = htt->txbuf.paddr +
- (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+ (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
@@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
- memset(&htt->frag_desc.vaddr[msdu_id], 0,
+ ext_desc_t = htt->frag_desc.vaddr_desc_32;
+ memset(&ext_desc_t[msdu_id], 0,
sizeof(struct htt_msdu_ext_desc));
frags = (struct htt_data_tx_desc_frag *)
- &htt->frag_desc.vaddr[msdu_id].frags;
- ext_desc = &htt->frag_desc.vaddr[msdu_id];
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi = 0;
@@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
- flags0, flags1, msdu->len, msdu_id, frags_paddr,
- (u32)skb_cb->paddr, vdev_id, tid, freq);
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -1093,3 +1337,239 @@ err_free_msdu_id:
err:
return res;
}
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_64 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ dma_addr_t frags_paddr = 0;
+ u32 txbuf_paddr;
+ struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+ struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* pass through */
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_64;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc_64));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+ frags[1].tword_addr.paddr_lo = 0;
+ frags[1].tword_addr.paddr_hi = 0;
+ frags[1].tword_addr.len_16 = 0;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+ /* fill fragment descriptor */
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+err:
+ return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+ .htt_tx = ath10k_htt_tx_32,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+ .htt_tx = ath10k_htt_tx_64,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->tx_ops = &htt_tx_ops_64;
+ else
+ htt->tx_ops = &htt_tx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 88955bbe20bd..497ac33e0fbf 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
};
+
+const struct ath10k_hw_ops wcn3990_ops = {};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 05f26e5858ad..6203bc65799b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0"
+
#define ATH10K_FW_FILE_BASE "firmware"
#define ATH10K_FW_API_MAX 6
#define ATH10K_FW_API_MIN 2
@@ -553,6 +557,16 @@ struct ath10k_hw_params {
/* Number of ciphers supported (i.e First N) in cipher_suites array */
int n_cipher_suites;
+
+ u32 num_peers;
+ u32 ast_skid_limit;
+ u32 num_wds_entries;
+
+ /* Targets supporting physical addressing capability above 32-bits */
+ bool target_64bit;
+
+ /* Target rx ring fill level */
+ u32 rx_ring_fill_level;
};
struct htt_rx_desc;
@@ -567,6 +581,7 @@ struct ath10k_hw_ops {
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
@@ -663,6 +678,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS 14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
@@ -868,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
+#define FW_RAM_CONFIG_ADDRESS 0x0018
#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0a947eef348d..ebb3f1b046f3 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
- if (vif->bss_conf.qos)
+ if (sta->wme)
arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
break;
case WMI_VDEV_TYPE_IBSS:
@@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->running_fw->fw_file.fw_features))
+ ar->running_fw->fw_file.fw_features) ||
+ test_bit(WMI_SERVICE_MGMT_TX_WMI,
+ ar->wmi.svc_map))
return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
return ATH10K_MAC_TX_HTT;
@@ -3595,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
switch (txpath) {
case ATH10K_MAC_TX_HTT:
- ret = ath10k_htt_tx(htt, txmode, skb);
+ ret = htt->tx_ops->htt_tx(htt, txmode, skb);
break;
case ATH10K_MAC_TX_HTT_MGMT:
ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (ath10k_peer_stats_enabled(ar)) {
+ ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+ goto err_stop;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true;
}
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = {
.unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
.sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
+ .sta_statistics = ath10k_sta_statistics,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = {
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
- .sta_statistics = ath10k_sta_statistics,
#endif
};
@@ -8244,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
- ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+ if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
}
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -8329,15 +8380,6 @@ int ath10k_mac_register(struct ath10k *ar)
ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
}
- /* Current wake_tx_queue implementation imposes a significant
- * performance penalty in some setups. The tx scheduling code needs
- * more work anyway so disable the wake_tx_queue unless firmware
- * supports the pull-push mechanism.
- */
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
- ar->running_fw->fw_file.fw_features))
- ar->ops->wake_tx_queue = NULL;
-
ret = ath10k_mac_init_rd(ar);
if (ret) {
ath10k_err(ar, "failed to derive regdom: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 553747bc19ed..81f8d6c0af35 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ffea348b2190..355db6a0fcf3 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
+#include "coredump.h"
#include "targaddrs.h"
#include "bmi.h"
@@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
+
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ATH10K_SKB_RXCB(skb)->paddr = paddr;
spin_lock_bh(&ce->ce_lock);
- ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
spin_unlock_bh(&ce->ce_lock);
if (ret) {
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1461,6 +1467,215 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
crash_data->registers[i] = reg_dump_values[i];
}
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section != NULL; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS, config);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS);
+ if (val != config) {
+ ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+ val, config);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count, shift;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* To get IRAM dump, the host driver needs to switch target
+ * ram config from DRAM to IRAM.
+ */
+ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+ current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+ shift = current_region->start >> 20;
+
+ ret = ath10k_pci_set_ram_config(ar, shift);
+ if (ret) {
+ ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ if (current_region->section_table.size > 0) {
+ /* Copy each section individually. */
+ count = ath10k_pci_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+ } else {
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ ret = ath10k_pci_diag_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+
+ count = current_region->len;
+ }
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data;
@@ -1470,7 +1685,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ar->stats.fw_crash_counter++;
- crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+ crash_data = ath10k_coredump_new(ar);
if (crash_data)
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@@ -1481,6 +1696,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_print_driver_info(ar);
ath10k_pci_dump_registers(ar, crash_data);
ath10k_ce_dump_registers(ar, crash_data);
+ ath10k_pci_dump_memory(ar, crash_data);
spin_unlock_bh(&ar->data_lock);
@@ -1858,7 +2074,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
if (ret) {
- u32 unused_buffer;
+ dma_addr_t unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
@@ -1871,7 +2087,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
err_resp:
if (resp) {
- u32 unused_buffer;
+ dma_addr_t unused_buffer;
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
dma_unmap_single(ar->dev, resp_paddr,
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 08704fbc11e3..e52fd83156b6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 28da14398951..545deb6d7af1 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -210,6 +210,10 @@ struct rx_frag_info {
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+ u8 ring4_more_count;
+ u8 ring5_more_count;
+ u8 ring6_more_count;
+ u8 ring7_more_count;
} __packed;
/*
@@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
__le32 info2; /* %RX_MSDU_START_INFO2_ */
} __packed;
+struct rx_msdu_start_wcn3990 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+ __le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
struct rx_msdu_start_qca99x0 qca99x0;
+ struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
@@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
__le32 info2;
} __packed;
+struct rx_msdu_end_wcn3990 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+ __le32 rule_indication_0;
+ __le32 rule_indication_1;
+ __le32 rule_indication_2;
+ __le32 rule_indication_3;
+} __packed;
+
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
struct rx_msdu_end_qca99x0 qca99x0;
+ struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
@@ -963,6 +986,12 @@ struct rx_pkt_end {
__le32 phy_timestamp_2;
} __packed;
+struct rx_pkt_end_wcn3990 {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le64 phy_timestamp_1;
+ __le64 phy_timestamp_2;
+} __packed;
+
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
@@ -998,6 +1027,12 @@ struct rx_location_info {
__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
+struct rx_location_info_wcn3990 {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+ __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
enum rx_phy_ppdu_end_info0 {
RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
@@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
+struct rx_ppdu_end_wcn3990 {
+ struct rx_pkt_end_wcn3990 rx_pkt_end;
+ struct rx_location_info_wcn3990 rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset;
+ __le32 reserved_info_0;
+ __le32 reserved_info_1;
+ __le32 rx_antenna_info;
+ __le32 rx_coex_info;
+ __le32 rx_mpdu_cnt_info;
+ __le64 phy_timestamp_tx;
+ __le32 rx_bb_length;
+} __packed;
+
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
@@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
+ struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 2048b1e5262b..af6995de7e00 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 89b0ad769d4f..13276f4dc12c 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -44,7 +44,7 @@ enum ath10k_spectral_mode {
SPECTRAL_MANUAL,
};
-#ifdef CONFIG_ATH10K_DEBUGFS
+#ifdef CONFIG_ATH10K_SPECTRAL
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_phyerr_ev_arg *phyerr,
@@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar)
{
}
-#endif /* CONFIG_ATH10K_DEBUGFS */
+#endif /* CONFIG_ATH10K_SPECTRAL */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index adf4592374b4..e7f57efadae1 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index f5dc0476493e..fa602f15fa93 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8bded5da9d0d..c2b5bad0459b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 9d3eb258ac2f..568810b41657 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index 191a8f34c8ea..6514d1a14242 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index ef717b631ff8..aa8978a8d751 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 3abb97f63b1e..65e2419543f9 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00cef0bd8..e40edced1d82 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index d4986f626c35..5b3b021526ab 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e7ea1ae1c438..2bf401e436d3 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 2fc3f24ff1ca..14093cfdc505 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct sk_buff *skb;
int ret;
+ u32 mgmt_tx_cmdid;
if (!ar->wmi.ops->gen_mgmt_tx)
return -EOPNOTSUPP;
@@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features))
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+ else
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+ ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7616c1c4bbd3..ae77a007ae07 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
return -ENOMEM;
}
+struct wmi_tlv_svc_rdy_parse {
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ bool svc_bmap_done;
+ bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+ svc_rdy->ev = ptr;
+ break;
+ case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+ svc_rdy->reg = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ svc_rdy->mem_reqs = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!svc_rdy->svc_bmap_done) {
+ svc_rdy->svc_bmap_done = true;
+ svc_rdy->svc_bmap = ptr;
+ } else if (!svc_rdy->dbs_hw_mode_done) {
+ svc_rdy->dbs_hw_mode_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg)
{
- const void **tb;
const struct hal_reg_capabilities *reg;
const struct wmi_tlv_svc_rdy_ev *ev;
const __le32 *svc_bmap;
const struct wlan_host_mem_req *mem_reqs;
+ struct wmi_tlv_svc_rdy_parse svc_rdy = { };
int ret;
- tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+ if (ret) {
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
- ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
- reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
- svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
- mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+ ev = svc_rdy.ev;
+ reg = svc_rdy.reg;
+ svc_bmap = svc_rdy.svc_bmap;
+ mem_reqs = svc_rdy.mem_reqs;
- if (!ev || !reg || !svc_bmap || !mem_reqs) {
- kfree(tb);
+ if (!ev || !reg || !svc_bmap || !mem_reqs)
return -EPROTO;
- }
/* This is an internal ABI compatibility check for WMI TLV so check it
* here instead of the generic WMI code.
@@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- kfree(tb);
return -ENOTSUPP;
}
@@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
ath10k_wmi_tlv_parse_mem_reqs, arg);
if (ret) {
- kfree(tb);
ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
return ret;
}
- kfree(tb);
return 0;
}
@@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
- cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+ cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+ cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_peer_keys = __cpu_to_le32(2);
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
- cfg->ast_skid_limit = __cpu_to_le32(0x10);
cfg->tx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_mcast_table_elems = __cpu_to_le32(0);
cfg->mcast2ucast_mode = __cpu_to_le32(0);
cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
- cfg->num_wds_entries = __cpu_to_le32(0x20);
cfg->dma_burst_size = __cpu_to_le32(0);
cfg->mac_aggr_delim = __cpu_to_le32(0);
cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -2450,6 +2484,80 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct wmi_tlv_mgmt_tx_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ void *ptr;
+ int len;
+ u32 buf_len = msdu->len;
+ struct ath10k_vif *arvif;
+ dma_addr_t mgmt_frame_dma;
+ u32 vdev_id;
+
+ if (!cb->vif)
+ return ERR_PTR(-EINVAL);
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+ buf_len = round_up(buf_len, 4);
+
+ len += buf_len;
+ len = round_up(len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->desc_id = 0;
+ cmd->chanfreq = 0;
+ cmd->buf_len = __cpu_to_le32(buf_len);
+ cmd->frame_len = __cpu_to_le32(msdu->len);
+ mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+ msdu->len, DMA_TO_DEVICE);
+ if (!mgmt_frame_dma)
+ return ERR_PTR(-ENOMEM);
+
+ cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(buf_len);
+
+ ptr += sizeof(*tlv);
+ memcpy(ptr, msdu->data, buf_len);
+
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms)
@@ -3258,6 +3366,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3592,6 +3701,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx,
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 22cf011e839a..da89128e8dd6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +22,7 @@
#define WMI_TLV_CMD_UNSUPPORTED 0
#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
enum wmi_tlv_grp_id {
WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
WMI_TLV_MGMT_TX_CMDID,
WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_MGMT_TX_SEND_CMD,
WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
WMI_TLV_ADDBA_SEND_CMDID,
WMI_TLV_ADDBA_STATUS_CMDID,
@@ -890,6 +892,63 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+ WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+ WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+ WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+ WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+ WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
WMI_TLV_TAG_MAX
};
@@ -965,6 +1024,50 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_TLV_SERVICE_MDNS_OFFLOAD,
WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ WMI_TLV_SERVICE_OCB,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+ WMI_TLV_SERVICE_MGMT_TX_HTT,
+ WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_TLV_SERVICE_EXT_MSG,
+ WMI_TLV_SERVICE_MAWC,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+ WMI_TLV_SERVICE_EGAP,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+ WMI_TLV_SERVICE_ATF,
+ WMI_TLV_SERVICE_COEX_GPIO,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_TLV_SERVICE_BPF_OFFLOAD,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+ WMI_TLV_SERVICE_NAN_DATA,
+ WMI_TLV_SERVICE_NAN_RTT,
+ WMI_TLV_SERVICE_11AX,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_TLV_SERVICE_MESH_11S,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
};
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_MDNS_OFFLOAD, len);
SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_MGMT_TX_WMI, len);
}
#undef SVCMAP
@@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
+struct wmi_tlv_mgmt_tx_cmd {
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le64 paddr;
+ __le32 frame_len;
+ __le32 buf_len;
+} __packed;
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cad2e42dcef6..58dc2189ba49 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -29,6 +29,7 @@
#include "p2p.h"
#include "hw.h"
#include "hif.h"
+#include "txrx.h"
#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->rate_max));
}
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tdls_peer_event *ev;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ int vdev_id;
+ int peer_status;
+ int peer_reason;
+ u8 reason;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+ skb->len);
+ return;
+ }
+
+ ev = (struct wmi_tdls_peer_event *)skb->data;
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+ peer_status = __le32_to_cpu(ev->peer_status);
+ peer_reason = __le32_to_cpu(ev->peer_reason);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer entry for %pM\n",
+ ev->peer_macaddr.addr);
+ return;
+ }
+
+ switch (peer_status) {
+ case WMI_TDLS_SHOULD_TEARDOWN:
+ switch (peer_reason) {
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+ break;
+ default:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+ vdev_id);
+ return;
+ }
+
+ ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received tdls teardown event for peer %pM reason %u\n",
+ ev->peer_macaddr.addr, peer_reason);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received unknown tdls peer event %u\n",
+ peer_status);
+ break;
+ }
+}
+
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
ath10k_wmi_event_pdev_tpc_config(ar, skb);
break;
+ case WMI_10_4_TDLS_PEER_EVENTID:
+ ath10k_wmi_handle_tdls_peer_event(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index c02b21cff38d..c7b30ed9015d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -195,6 +195,8 @@ enum wmi_service {
WMI_SERVICE_SMART_LOGGING_SUPPORT,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
/* keep last */
WMI_SERVICE_MAX,
@@ -336,6 +338,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
};
static inline char *wmi_service_name(int service_id)
@@ -444,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+ SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
default:
return NULL;
}
@@ -740,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
}
#undef SVCMAP
@@ -797,6 +803,7 @@ struct wmi_cmd_map {
u32 bcn_filter_rx_cmdid;
u32 prb_req_filter_rx_cmdid;
u32 mgmt_tx_cmdid;
+ u32 mgmt_tx_send_cmdid;
u32 prb_tmpl_cmdid;
u32 addba_clear_resp_cmdid;
u32 addba_send_cmdid;
@@ -2922,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
__le32 max_tdls_concurrent_buffer_sta;
};
-/* strucutre describing host memory chunk. */
+/* structure describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
__le32 req_id;
@@ -5236,7 +5243,8 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 0d46d6dc7578..c4cbccb29b31 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index 9745b9ddc7f5..6e810105b775 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b53eb2b85f02..2ba8cf3f38af 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_mgmt *mgmt;
bool hidden = false;
u8 *ies;
- int ies_len;
struct wmi_connect_cmd p;
int res;
int i, ret;
@@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
ies = mgmt->u.beacon.variable;
if (ies > info->beacon.head + info->beacon.head_len)
return -EINVAL;
- ies_len = info->beacon.head + info->beacon.head_len - ies;
if (info->ssid == NULL)
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 1379906bf849..8da9506f8c2b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
while (amsdu_len > mac_hdr_len) {
hdr = (struct ethhdr *) framep;
- payload_8023_len = ntohs(hdr->h_proto);
+ payload_8023_len = be16_to_cpu(hdr->h_proto);
if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 783a38f1a626..1f3523019509 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -61,13 +61,12 @@ config ATH9K_DEBUGFS
depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
select ATH9K_COMMON_DEBUG
- select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
- Also required for changing debug message flags at run time.
- As well as access to the FFT/spectral data and TX99.
+ Also required for changing debug message flags at run time and for
+ TX99.
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
@@ -177,7 +176,6 @@ config ATH9K_HTC_DEBUGFS
bool "Atheros ath9k_htc debugging"
depends on ATH9K_HTC && DEBUG_FS
select ATH9K_COMMON_DEBUG
- select RELAY
---help---
Say Y, if you need access to ath9k_htc's statistics.
As well as access to the FFT/spectral data.
@@ -192,3 +190,11 @@ config ATH9K_HWRNG
Say Y, feeds the entropy directly from the WiFi driver to the input
pool.
+
+config ATH9K_COMMON_SPECTRAL
+ bool "Atheros ath9k/ath9k_htc spectral scan support"
+ depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS
+ select RELAY
+ default n
+ ---help---
+ Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index d804ce7391a0..f71b2ad8275c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -62,8 +62,8 @@ ath9k_common-y:= common.o \
common-init.o \
common-beacon.o \
-ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
- common-spectral.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c2e210c0a770..f019a20e5a1f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3310,6 +3310,12 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
+ cptr = AR9300_BASE_ADDR_4K;
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
cptr = AR9300_BASE_ADDR_512;
ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
cptr);
@@ -3430,6 +3436,60 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
return len;
}
+static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size,
+ bool is_2g)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase;
+ struct ar9300_cal_data_per_freq_op_loop *cal_pier;
+ int cal_pier_nr;
+ int freq;
+ int i, j;
+
+ pBase = &eep->baseEepHeader;
+
+ if (is_2g)
+ cal_pier_nr = AR9300_NUM_2G_CAL_PIERS;
+ else
+ cal_pier_nr = AR9300_NUM_5G_CAL_PIERS;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!((pBase->txrxMask >> i) & 1))
+ continue;
+
+ len += snprintf(buf + len, size - len, "Chain %d\n", i);
+
+ len += snprintf(buf + len, size - len,
+ "Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n");
+
+ for (j = 0; j < cal_pier_nr; j++) {
+ if (is_2g) {
+ cal_pier = &eep->calPierData2G[i][j];
+ freq = 2300 + eep->calFreqPier2G[j];
+ } else {
+ cal_pier = &eep->calPierData5G[i][j];
+ freq = 4800 + eep->calFreqPier5G[j] * 5;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "%d\t", freq);
+
+ len += snprintf(buf + len, size - len,
+ "%d\t%d\t%d\t%d\t%d\t%d\n",
+ cal_pier->refPower,
+ cal_pier->voltMeas,
+ cal_pier->tempMeas,
+ cal_pier->rxTempMeas ?
+ N2DBM(cal_pier->rxNoisefloorCal) : 0,
+ cal_pier->rxTempMeas ?
+ N2DBM(cal_pier->rxNoisefloorPower) : 0,
+ cal_pier->rxTempMeas);
+ }
+ }
+
+ return len;
+}
+
static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
u8 *buf, u32 len, u32 size)
{
@@ -3441,10 +3501,18 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
"%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += scnprintf(buf + len, size - len,
+
+ len += scnprintf(buf + len, size - len, "Calibration data\n");
+ len = ar9003_dump_cal_data(ah, buf, len, size, true);
+
+ len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
+
+ len += snprintf(buf + len, size - len, "Calibration data\n");
+ len = ar9003_dump_cal_data(ah, buf, len, size, false);
+
goto out;
}
@@ -4683,7 +4751,8 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
int ichain,
int *pfrequency,
int *pcorrection,
- int *ptemperature, int *pvoltage)
+ int *ptemperature, int *pvoltage,
+ int *pnf_cal, int *pnf_power)
{
u8 *pCalPier;
struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
@@ -4725,6 +4794,10 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
*pcorrection = pCalPierStruct->refPower;
*ptemperature = pCalPierStruct->tempMeas;
*pvoltage = pCalPierStruct->voltMeas;
+ *pnf_cal = pCalPierStruct->rxTempMeas ?
+ N2DBM(pCalPierStruct->rxNoisefloorCal) : 0;
+ *pnf_power = pCalPierStruct->rxTempMeas ?
+ N2DBM(pCalPierStruct->rxNoisefloorPower) : 0;
return 0;
}
@@ -4889,14 +4962,18 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
int mode;
int lfrequency[AR9300_MAX_CHAINS],
lcorrection[AR9300_MAX_CHAINS],
- ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
+ lnf_cal[AR9300_MAX_CHAINS], lnf_pwr[AR9300_MAX_CHAINS];
int hfrequency[AR9300_MAX_CHAINS],
hcorrection[AR9300_MAX_CHAINS],
- htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS],
+ hnf_cal[AR9300_MAX_CHAINS], hnf_pwr[AR9300_MAX_CHAINS];
int fdiff;
int correction[AR9300_MAX_CHAINS],
- voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
- int pfrequency, pcorrection, ptemperature, pvoltage;
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS],
+ nf_cal[AR9300_MAX_CHAINS], nf_pwr[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage,
+ pnf_cal, pnf_pwr;
struct ath_common *common = ath9k_hw_common(ah);
mode = (frequency >= 4000);
@@ -4914,7 +4991,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
for (ipier = 0; ipier < npier; ipier++) {
if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
&pfrequency, &pcorrection,
- &ptemperature, &pvoltage)) {
+ &ptemperature, &pvoltage,
+ &pnf_cal, &pnf_pwr)) {
fdiff = frequency - pfrequency;
/*
@@ -4936,6 +5014,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
htemperature[ichain] =
ptemperature;
hvoltage[ichain] = pvoltage;
+ hnf_cal[ichain] = pnf_cal;
+ hnf_pwr[ichain] = pnf_pwr;
}
}
if (fdiff >= 0) {
@@ -4952,6 +5032,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ltemperature[ichain] =
ptemperature;
lvoltage[ichain] = pvoltage;
+ lnf_cal[ichain] = pnf_cal;
+ lnf_pwr[ichain] = pnf_pwr;
}
}
}
@@ -4960,15 +5042,20 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
+ ath_dbg(common, EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d n=%d %d p=%d %d\n",
ichain, frequency, lfrequency[ichain],
lcorrection[ichain], hfrequency[ichain],
- hcorrection[ichain]);
+ hcorrection[ichain], lnf_cal[ichain],
+ hnf_cal[ichain], lnf_pwr[ichain],
+ hnf_pwr[ichain]);
/* they're the same, so just pick one */
if (hfrequency[ichain] == lfrequency[ichain]) {
correction[ichain] = lcorrection[ichain];
voltage[ichain] = lvoltage[ichain];
temperature[ichain] = ltemperature[ichain];
+ nf_cal[ichain] = lnf_cal[ichain];
+ nf_pwr[ichain] = lnf_pwr[ichain];
}
/* the low frequency is good */
else if (frequency - lfrequency[ichain] < 1000) {
@@ -4992,12 +5079,26 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
hfrequency[ichain],
lvoltage[ichain],
hvoltage[ichain]);
+
+ nf_cal[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lnf_cal[ichain],
+ hnf_cal[ichain]);
+
+ nf_pwr[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lnf_pwr[ichain],
+ hnf_pwr[ichain]);
}
/* only low is good, use it */
else {
correction[ichain] = lcorrection[ichain];
temperature[ichain] = ltemperature[ichain];
voltage[ichain] = lvoltage[ichain];
+ nf_cal[ichain] = lnf_cal[ichain];
+ nf_pwr[ichain] = lnf_pwr[ichain];
}
}
/* only high is good, use it */
@@ -5005,10 +5106,14 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
correction[ichain] = hcorrection[ichain];
temperature[ichain] = htemperature[ichain];
voltage[ichain] = hvoltage[ichain];
+ nf_cal[ichain] = hnf_cal[ichain];
+ nf_pwr[ichain] = hnf_pwr[ichain];
} else { /* nothing is good, presume 0???? */
correction[ichain] = 0;
temperature[ichain] = 0;
voltage[ichain] = 0;
+ nf_cal[ichain] = 0;
+ nf_pwr[ichain] = 0;
}
}
@@ -5019,6 +5124,16 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
"for frequency=%d, calibration correction = %d %d %d\n",
frequency, correction[0], correction[1], correction[2]);
+ /* Store calibrated noise floor values */
+ for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++)
+ if (mode) {
+ ah->nf_5g.cal[ichain] = nf_cal[ichain];
+ ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
+ } else {
+ ah->nf_2g.cal[ichain] = nf_cal[ichain];
+ ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bd2269c7de6b..e8fda54acfe3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -62,6 +62,16 @@
*/
#define AR9300_PWR_TABLE_OFFSET 0
+/* Noise power data definitions
+ * units are: 4 x dBm - NOISE_PWR_DATA_OFFSET
+ * (e.g. -25 = (-25/4 - 90) = -96.25 dBm)
+ * range (for 6 signed bits) is (-32 to 31) + offset => -122dBm to -59dBm
+ * resolution (2 bits) is 0.25dBm
+ */
+#define NOISE_PWR_DATA_OFFSET -90
+#define NOISE_PWR_DBM_2_INT(_p) ((((_p) + 3) >> 2) + NOISE_PWR_DATA_OFFSET)
+#define N2DBM(_p) NOISE_PWR_DBM_2_INT(_p)
+
/* byte addressable */
#define AR9300_EEPROM_SIZE (16*1024)
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 13ab6bc46775..3d9447e21025 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -58,19 +58,25 @@ static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
}
static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ int chain)
{
- return ath9k_hw_get_nf_limits(ah, chan)->nominal;
+ s16 calib_nf = ath9k_hw_get_nf_limits(ah, chan)->cal[chain];
+
+ if (calib_nf)
+ return calib_nf;
+ else
+ return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
s16 nf)
{
- s8 noise = ATH_DEFAULT_NOISE_FLOOR;
+ s8 noise = ath9k_hw_get_default_nf(ah, chan, 0);
if (nf) {
s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
- ath9k_hw_get_default_nf(ah, chan);
+ ath9k_hw_get_default_nf(ah, chan, 0);
if (delta > 0)
noise += delta;
}
@@ -240,7 +246,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
unsigned i, j;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+ s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal;
u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
if (ah->caldata)
@@ -258,8 +264,13 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
nfval = ah->nf_override;
else if (h)
nfval = h[i].privNF;
- else
- nfval = default_nf;
+ else {
+ /* Try to get calibrated noise floor value */
+ nfval =
+ ath9k_hw_get_nf_limits(ah, chan)->cal[i];
+ if (nfval > -60 || nfval < -127)
+ nfval = default_nf;
+ }
REG_RMW(ah, ah->nf_regs[i],
(((u32) nfval << 1) & 0x1ff), 0x1ff);
@@ -429,20 +440,19 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h;
- s16 default_nf;
- int i, j;
+ int i, j, k = 0;
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
h = ah->caldata->nfCalHist;
- default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
h[i].currIndex = 0;
- h[i].privNF = default_nf;
+ h[i].privNF = ath9k_hw_get_default_nf(ah, chan, k);
h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
- for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
- h[i].nfCalBuffer[j] = default_nf;
- }
+ for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++)
+ h[i].nfCalBuffer[j] = h[i].privNF;
+ if (++k >= AR5416_MAX_CHAINS)
+ k = 0;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 5d1a51d83aa6..303ab470ce34 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
-#ifdef CONFIG_ATH9K_COMMON_DEBUG
+#ifdef CONFIG_ATH9K_COMMON_SPECTRAL
void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
@@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
{
return 0;
}
-#endif /* CONFIG_ATH9K_COMMON_DEBUG */
+#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 40a397fd0e0e..6fee9a464cce 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
fft = (struct ath9k_dfs_fft_40 *) (data + 2);
ath_dbg(common, DFS, "fixing datalen by 2\n");
}
- if (IS_CHAN_HT40MINUS(ah->curchan)) {
- int temp = is_ctl;
- is_ctl = is_ext;
- is_ext = temp;
- }
+ if (IS_CHAN_HT40MINUS(ah->curchan))
+ swap(is_ctl, is_ext);
+
for (i = 0; i < FFT_NUM_SAMPLES; i++)
max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
is_ext);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f808e5833d7e..a82ad739ab80 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ if (tid >= ATH9K_HTC_MAX_TID) {
+ ret = -EINVAL;
+ break;
+ }
ista = (struct ath9k_htc_sta *) sta->drv_priv;
spin_lock_bh(&priv->tx.tx_lock);
ista->tid_state[tid] = AGGR_OPERATIONAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8c5c2dd8fa7f..cd0f023ccf77 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -922,6 +922,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
+ u32 msi_cfg = 0;
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah))
@@ -929,22 +930,30 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
if (AR_SREV_9300_20_OR_LATER(ah)) {
imr_reg |= AR_IMR_RXOK_HP;
- if (ah->config.rx_intr_mitigation)
+ if (ah->config.rx_intr_mitigation) {
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+ } else {
imr_reg |= AR_IMR_RXOK_LP;
-
+ msi_cfg |= AR_INTCFG_MSI_RXOK;
+ }
} else {
- if (ah->config.rx_intr_mitigation)
+ if (ah->config.rx_intr_mitigation) {
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+ } else {
imr_reg |= AR_IMR_RXOK;
+ msi_cfg |= AR_INTCFG_MSI_RXOK;
+ }
}
- if (ah->config.tx_intr_mitigation)
+ if (ah->config.tx_intr_mitigation) {
imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_TXINTM | AR_INTCFG_MSI_TXMINTR;
+ } else {
imr_reg |= AR_IMR_TXOK;
+ msi_cfg |= AR_INTCFG_MSI_TXOK;
+ }
ENABLE_REGWRITE_BUFFER(ah);
@@ -952,6 +961,16 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
ah->imrs2_reg |= AR_IMR_S2_GTT;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+ if (ah->msi_enabled) {
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN;
+ ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+ REG_WRITE(ah, AR_INTCFG, msi_cfg);
+ ath_dbg(ath9k_hw_common(ah), ANY,
+ "value of AR_INTCFG=0x%X, msi_cfg=0x%X\n",
+ REG_READ(ah, AR_INTCFG), msi_cfg);
+ }
+
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 4ac70827d142..9804a24a2dc0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -754,6 +754,8 @@ struct ath_nf_limits {
s16 max;
s16 min;
s16 nominal;
+ s16 cal[AR5416_MAX_CHAINS];
+ s16 pwr[AR5416_MAX_CHAINS];
};
enum ath_cal_list {
@@ -977,6 +979,9 @@ struct ath_hw {
bool tpc_enabled;
u8 tx_power[Ar5416RateSize];
u8 tx_power_stbc[Ar5416RateSize];
+ bool msi_enabled;
+ u32 msi_mask;
+ u32 msi_reg;
};
struct ath_bus_ops {
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fa58a32227f5..e479fae5aab9 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/relay.h>
+#include <linux/dmi.h>
#include <net/ieee80211_radiotap.h>
#include "ath9k.h"
@@ -75,6 +76,10 @@ MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+int ath9k_use_msi;
+module_param_named(use_msi, ath9k_use_msi, int, 0444);
+MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible");
+
bool is_ath9k_unloaded;
#ifdef CONFIG_MAC80211_LEDS
@@ -92,6 +97,56 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
};
#endif
+static int __init set_use_msi(const struct dmi_system_id *dmi)
+{
+ ath9k_use_msi = 1;
+ return 1;
+}
+
+static const struct dmi_system_id ath9k_quirks[] __initconst = {
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 24-3460",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 24-3460"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Vostro 3262",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3262"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 3472",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3472"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Vostro 15-3572",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15-3572"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 14-3473",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14-3473"),
+ },
+ },
+ {}
+};
+
static void ath9k_deinit_softc(struct ath_softc *sc);
static void ath9k_op_ps_wakeup(struct ath_common *common)
@@ -1100,6 +1155,8 @@ static int __init ath9k_init(void)
goto err_pci_exit;
}
+ dmi_check_system(ath9k_quirks);
+
return 0;
err_pci_exit:
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 77c94f9e7b61..58d02c19b6d0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -832,6 +832,43 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
}
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+
+ if (ah->msi_enabled) {
+ u32 _msi_reg = 0;
+ u32 i = 0;
+ u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
+
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
+
+ if (ah->msi_reg == 0)
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
+ AR_PCIE_MSI, ah->msi_reg);
+
+ i = 0;
+ do {
+ REG_WRITE(ah, AR_PCIE_MSI,
+ (ah->msi_reg | AR_PCIE_MSI_ENABLE)
+ & msi_pend_addr_mask);
+ _msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ i++;
+ } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
+
+ if (i >= 200)
+ ath_err(ath9k_hw_common(ah),
+ "%s: _msi_reg = 0x%X\n",
+ __func__, _msi_reg);
+ }
}
void ath9k_hw_resume_interrupts(struct ath_hw *ah)
@@ -878,12 +915,21 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
+ if (ah->msi_enabled) {
+ ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
+
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
+ }
+
ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
+ ah->msi_mask = 0;
if (ints & ATH9K_INT_TX) {
+ ah->msi_mask |= AR_INTR_PRIO_TX;
if (ah->config.tx_intr_mitigation)
mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
else {
@@ -898,6 +944,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask |= AR_IMR_TXEOL;
}
if (ints & ATH9K_INT_RX) {
+ ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
if (AR_SREV_9300_20_OR_LATER(ah)) {
mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
if (ah->config.rx_intr_mitigation) {
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 223606311261..645f0fbd9179 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -22,6 +22,8 @@
#include <linux/module.h>
#include "ath9k.h"
+extern int ath9k_use_msi;
+
static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
@@ -889,6 +891,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u32 val;
int ret = 0;
char hw_name[64];
+ int msi_enabled = 0;
if (pcim_enable_device(pdev))
return -EIO;
@@ -960,7 +963,20 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
- ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ if (ath9k_use_msi) {
+ if (pci_enable_msi(pdev) == 0) {
+ msi_enabled = 1;
+ dev_err(&pdev->dev, "Using MSI\n");
+ } else {
+ dev_err(&pdev->dev, "Using INTx\n");
+ }
+ }
+
+ if (!msi_enabled)
+ ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ else
+ ret = request_irq(pdev->irq, ath_isr, 0, "ath9k", sc);
+
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto err_irq;
@@ -974,6 +990,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init;
}
+ sc->sc_ah->msi_enabled = msi_enabled;
+ sc->sc_ah->msi_reg = 0;
+
ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 2197aee2bb72..a8ac42c96d71 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -826,9 +826,9 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
sc->rx.discard_next = false;
/*
- * Discard zero-length packets.
+ * Discard zero-length packets and packets smaller than an ACK
*/
- if (!rx_stats->rs_datalen) {
+ if (rx_stats->rs_datalen < 10) {
RX_STAT_INC(rx_len_err);
goto corrupt;
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 80ff69f99229..653e79611830 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -146,6 +146,14 @@
#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
#define AR_MACMISC_MISC_OBS_BUS_1 1
+#define AR_INTCFG 0x005C
+#define AR_INTCFG_MSI_RXOK 0x00000000
+#define AR_INTCFG_MSI_RXINTM 0x00000004
+#define AR_INTCFG_MSI_RXMINTR 0x00000006
+#define AR_INTCFG_MSI_TXOK 0x00000000
+#define AR_INTCFG_MSI_TXINTM 0x00000010
+#define AR_INTCFG_MSI_TXMINTR 0x00000018
+
#define AR_DATABUF_SIZE 0x0060
#define AR_DATABUF_SIZE_MASK 0x00000FFF
@@ -1256,6 +1264,13 @@ enum {
#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
(AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
#define AR_PCIE_MSI_ENABLE 0x00000001
+#define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64 0xFFA0C9FF /* bits 8..11: value must be 0x5064 */
+
+#define AR_INTR_PRIO_TX 0x00000001
+#define AR_INTR_PRIO_RXLP 0x00000002
+#define AR_INTR_PRIO_RXHP 0x00000004
#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index d5c810a8cc52..a3f1f7d042a4 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -236,6 +236,14 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
return 0;
}
+static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
+{
+ size_t size;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
+}
+
static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
struct wcn36xx_dxe_mem_pool *pool)
{
@@ -722,7 +730,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ return ret;
+ }
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
/* Write channel head to a NEXT register */
@@ -740,7 +752,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_txh_ch;
+ }
+
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
/* Write channel head to a NEXT register */
@@ -760,7 +777,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_rxl_ch;
+ }
+
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -790,7 +812,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_rxh_ch;
+ }
/* For RX we need to prealocat buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
@@ -819,11 +845,19 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
ret = wcn36xx_dxe_request_irqs(wcn);
if (ret < 0)
- goto out_err;
+ goto out_err_irq;
return 0;
-out_err:
+out_err_irq:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+out_err_rxh_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+out_err_rxl_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+out_err_txh_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b765c647319d..182963522941 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_DHCP_START_IND = 189,
WCN36XX_HAL_DHCP_STOP_IND = 190,
+ /* Scan Offload(hw) APIs */
+ WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204,
+ WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
+ WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
+ WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+ WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
+
WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
@@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg {
} __packed;
+enum wcn36xx_hal_scan_type {
+ WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00,
+ WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSIDs hot list */
+ u8 num_bssid;
+ u8 bssids[4][ETH_ALEN];
+
+ /* Directed probe-requests will be sent for listed SSIDs (max 10)*/
+ u8 num_ssid;
+ struct wcn36xx_hal_mac_ssid ssids[10];
+
+ /* Report AP with hidden ssid */
+ u8 scan_hidden;
+
+ /* Self MAC address */
+ u8 mac[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Scan type */
+ enum wcn36xx_hal_scan_type scan_type;
+
+ /* Minimum scanning time on each channel (ms) */
+ u32 min_ch_time;
+
+ /* Maximum scanning time on each channel */
+ u32 max_ch_time;
+
+ /* Is a p2p search */
+ u8 p2p_search;
+
+ /* Channels to scan */
+ u8 num_channel;
+ u8 channels[80];
+
+ /* IE field */
+ u16 ie_len;
+ u8 ie[0];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+enum wcn36xx_hal_scan_offload_ind_type {
+ /* Scan has been started */
+ WCN36XX_HAL_SCAN_IND_STARTED = 0x01,
+ /* Scan has been completed */
+ WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02,
+ /* Moved to foreign channel */
+ WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08,
+ /* scan request has been dequeued */
+ WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10,
+ /* preempted by other high priority scan */
+ WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20,
+ /* scan start failed */
+ WCN36XX_HAL_SCAN_IND_FAILED = 0x40,
+ /*scan restarted */
+ WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80,
+ WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_scan_offload_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 type;
+ u32 channel_mhz;
+ u32 scan_id;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
enum wcn36xx_hal_rate_index {
HW_RATE_INDEX_1MBPS = 0x82,
HW_RATE_INDEX_2MBPS = 0x84,
@@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record {
u16 txop_limit;
} __packed;
-struct wcn36xx_hal_mac_ssid {
- u8 length;
- u8 ssid[32];
-} __packed;
-
/* Concurrency role. These are generic IDs that identify the various roles
* in the software system. */
enum wcn36xx_hal_con_mode {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..ab5be6d2c691 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ if (vif->bss_conf.ps) /* ps allowed ? */
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -629,7 +641,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
-
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
mutex_unlock(&wcn->scan_lock);
@@ -638,11 +649,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = false;
wcn->scan_req = &hw_req->req;
+
mutex_unlock(&wcn->scan_lock);
- schedule_work(&wcn->scan_work);
+ if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ /* legacy manual/sw scan */
+ schedule_work(&wcn->scan_work);
+ return 0;
+ }
- return 0;
+ return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -650,6 +666,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
+ if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+ struct cfg80211_scan_info scan_info = { .aborted = true };
+
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ }
+
mutex_lock(&wcn->scan_lock);
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
@@ -747,17 +769,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif_priv->dtim_period = bss_conf->dtim_period;
}
- if (changed & BSS_CHANGED_PS) {
- wcn36xx_dbg(WCN36XX_DBG_MAC,
- "mac bss PS set %d\n",
- bss_conf->ps);
- if (bss_conf->ps) {
- wcn36xx_pmc_enter_bmps_state(wcn, vif);
- } else {
- wcn36xx_pmc_exit_bmps_state(wcn, vif);
- }
- }
-
if (changed & BSS_CHANGED_BSSID) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
- wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
- return -EINVAL;
+ /* Unbalanced call or last BMPS enter failed */
+ wcn36xx_dbg(WCN36XX_DBG_PMC,
+ "Not in BMPS mode, no need to exit\n");
+ return -EALREADY;
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 9c6590d5348a..2a4871ca9c72 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
};
@@ -613,6 +615,85 @@ out:
return ret;
}
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
+ int ret, i;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+ msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+ msg_body.min_ch_time = 30;
+ msg_body.max_ch_time = 100;
+ msg_body.scan_hidden = 1;
+ memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+ msg_body.p2p_search = vif->p2p;
+
+ msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+ for (i = 0; i < msg_body.num_ssid; i++) {
+ msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+ sizeof(msg_body.ssids[i].ssid));
+ memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+ msg_body.ssids[i].length);
+ }
+
+ msg_body.num_channel = min_t(u8, req->n_channels,
+ sizeof(msg_body.channels));
+ for (i = 0; i < msg_body.num_channel; i++)
+ msg_body.channels[i] = req->channels[i]->hw_value;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+ msg_body.num_channel, msg_body.num_ssid,
+ msg_body.p2p_search ? "yes" : "no");
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan_offload failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop_scan_offload failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
return 0;
}
+static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_scan_offload_ind *rsp = buf;
+ struct cfg80211_scan_info scan_info = {};
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete scan indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+
+ switch (rsp->type) {
+ case WCN36XX_HAL_SCAN_IND_FAILED:
+ scan_info.aborted = true;
+ case WCN36XX_HAL_SCAN_IND_COMPLETED:
+ mutex_lock(&wcn->scan_lock);
+ wcn->scan_req = NULL;
+ mutex_unlock(&wcn->scan_lock);
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ break;
+ case WCN36XX_HAL_SCAN_IND_STARTED:
+ case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
+ case WCN36XX_HAL_SCAN_IND_DEQUEUED:
+ case WCN36XX_HAL_SCAN_IND_PREEMPTED:
+ case WCN36XX_HAL_SCAN_IND_RESTARTED:
+ break;
+ default:
+ wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
+ }
+
+ return 0;
+}
+
static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
void *buf,
size_t len)
@@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_CH_SWITCH_RSP:
case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
+ case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
+ case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
@@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ case WCN36XX_HAL_SCAN_OFFLOAD_IND:
msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
@@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
struct wcn36xx_hal_ind_msg,
list);
+ list_del(wcn->hal_ind_queue.next);
+ spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
@@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
+ case WCN36XX_HAL_SCAN_OFFLOAD_IND:
+ wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
}
- list_del(wcn->hal_ind_queue.next);
- spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
kfree(hal_ind_msg);
}
int wcn36xx_smd_open(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 013fc9546f56..8076edf40ac8 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode);
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index c131b5e1292f..d32c1f4e533a 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -39,7 +40,8 @@ struct bl_dedicated_registers_v1 {
/* valid only for version 2 and above */
__le32 bl_assert_code; /* 0x880A58 BL Assert code */
__le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
- __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
+ __le32 bl_shutdown_handshake; /* 0x880A60 BL cleaner shutdown */
+ __le32 bl_reserved[21]; /* 0x880A64 - 0x880AB4 */
__le32 bl_magic_number; /* 0x880AB8 BL Magic number */
} __packed;
@@ -58,4 +60,9 @@ struct bl_dedicated_registers_v0 {
u8 mac_address[6]; /* 0x880A4c BL mac address */
} __packed;
+/* bits for bl_shutdown_handshake */
+#define BL_SHUTDOWN_HS_GRTD BIT(0)
+#define BL_SHUTDOWN_HS_RTD BIT(1)
+#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31)
+
#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 85d5c04618eb..768f63f38341 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
u64 *cookie)
{
const u8 *buf = params->buf;
- size_t len = params->len;
+ size_t len = params->len, total;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
bool tx_status = false;
@@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
- cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ total = sizeof(*cmd) + len;
+ if (total < len)
+ return -EINVAL;
+
+ cmd = kmalloc(total, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
- rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (rc == 0)
tx_status = !evt.evt.status;
@@ -952,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil_to_wdev(wil);
- wdev->preset_chandef = *chandef;
+ wil->monitor_chandef = *chandef;
return 0;
}
@@ -1727,9 +1730,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy,
wil_dbg_pm(wil, "suspending\n");
- wil_p2p_stop_discovery(wil);
-
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_radio_operations(wil);
wil_abort_scan(wil, true);
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ mutex_unlock(&wil->mutex);
out:
return rc;
@@ -1744,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
return 0;
}
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int i, rc;
+
+ wil_dbg_misc(wil,
+ "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+ request->n_ssids, request->ie_len, request->flags);
+ for (i = 0; i < request->n_ssids; i++) {
+ wil_dbg_misc(wil, "SSID[%d]:", i);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
+ }
+ wil_dbg_misc(wil, "channels:");
+ for (i = 0; i < request->n_channels; i++)
+ wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+ i == request->n_channels - 1 ? "\n" : "");
+ wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+ request->n_match_sets, request->min_rssi_thold,
+ request->delay);
+ for (i = 0; i < request->n_match_sets; i++) {
+ struct cfg80211_match_set *ms = &request->match_sets[i];
+
+ wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+ i, ms->rssi_thold);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ ms->ssid.ssid,
+ ms->ssid.ssid_len, true);
+ }
+ wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+ for (i = 0; i < request->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+ wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+ i, sp->interval, sp->iterations);
+ }
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+ if (rc)
+ return rc;
+ return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+ u64 reqid)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ rc = wmi_stop_sched_scan(wil);
+ /* device would return error if it thinks PNO is already stopped.
+ * ignore the return code so user space and driver gets back in-sync
+ */
+ wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+ return 0;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1777,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
+ .sched_scan_start = wil_cfg80211_sched_scan_start,
+ .sched_scan_stop = wil_cfg80211_sched_scan_stop,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e58dc6dc1f9c..4a4888246e8c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
+ wil_pm_runtime_put(wil);
+
return 0;
}
@@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = {
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
- writel(val, (void __iomem *)data);
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ writel(val, (void __iomem *)d->offset);
wmb(); /* make sure write propagated to HW */
+ wil_pm_runtime_put(wil);
+
return 0;
}
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
- *val = readl((void __iomem *)data);
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ *val = readl((void __iomem *)d->offset);
+
+ wil_pm_runtime_put(wil);
return 0;
}
@@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
umode_t mode,
struct dentry *parent,
- void *value)
+ void *value,
+ struct wil6210_priv *wil)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_iomem_x32);
+ struct dentry *file;
+ struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+ wil->dbg_data.iomem_data_count];
+
+ data->wil = wil;
+ data->offset = value;
+
+ file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+ if (!IS_ERR_OR_NULL(file))
+ wil->dbg_data.iomem_data_count++;
+
+ return file;
}
static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
case doff_io32:
f = wil_debugfs_create_iomem_x32(tbl[i].name,
tbl[i].mode, dbg,
- base + tbl[i].off);
+ base + tbl[i].off,
+ wil);
break;
case doff_u8:
f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
static int wil_memread_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+ void __iomem *a;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+ wil_pm_runtime_put(wil);
+
return 0;
}
@@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
{
enum { max_count = 4096 };
struct wil_blob_wrapper *wil_blob = file->private_data;
+ struct wil6210_priv *wil = wil_blob->wil;
loff_t pos = *ppos;
size_t available = wil_blob->blob.size;
void *buf;
size_t ret;
+ int rc;
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0) {
+ kfree(buf);
+ return rc;
+ }
+
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
+
+ wil_pm_runtime_put(wil);
+
kfree(buf);
if (ret == count)
return -EFAULT;
@@ -808,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
params.buf = frame;
params.len = len;
- params.chan = wdev->preset_chandef.chan;
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
@@ -1571,8 +1631,6 @@ static ssize_t wil_write_suspend_stats(struct file *file,
struct wil6210_priv *wil = file->private_data;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
- wil->suspend_stats.collection_start = ktime_get();
return len;
}
@@ -1582,33 +1640,41 @@ static ssize_t wil_read_suspend_stats(struct file *file,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
- static char text[400];
- int n;
- unsigned long long stats_collection_time =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.collection_start));
+ char *text;
+ int n, ret, text_size = 500;
- n = snprintf(text, sizeof(text),
- "Suspend statistics:\n"
+ text = kmalloc(text_size, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+ n = snprintf(text, text_size,
+ "Radio on suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by device:%ld\n"
+ "Radio off suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
- "rejected by host:%ld rejected by device:%ld\n"
- "total suspend time:%lld min suspend time:%lld\n"
- "max suspend time:%lld stats collection time: %lld\n",
- wil->suspend_stats.successful_suspends,
- wil->suspend_stats.failed_suspends,
- wil->suspend_stats.successful_resumes,
- wil->suspend_stats.failed_resumes,
- wil->suspend_stats.rejected_by_host,
+ "General statistics:\n"
+ "rejected by host:%ld\n",
+ wil->suspend_stats.r_on.successful_suspends,
+ wil->suspend_stats.r_on.failed_suspends,
+ wil->suspend_stats.r_on.successful_resumes,
+ wil->suspend_stats.r_on.failed_resumes,
wil->suspend_stats.rejected_by_device,
- wil->suspend_stats.total_suspend_time,
- wil->suspend_stats.min_suspend_time,
- wil->suspend_stats.max_suspend_time,
- stats_collection_time);
+ wil->suspend_stats.r_off.successful_suspends,
+ wil->suspend_stats.r_off.failed_suspends,
+ wil->suspend_stats.r_off.successful_resumes,
+ wil->suspend_stats.r_off.failed_resumes,
+ wil->suspend_stats.rejected_by_host);
+
+ n = min_t(int, n, text_size);
- n = min_t(int, n, sizeof(text));
+ ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
- return simple_read_from_buffer(user_buf, count, ppos, text, n);
+ kfree(text);
+
+ return ret;
}
static const struct file_operations fops_suspend_stats = {
@@ -1736,14 +1802,31 @@ static const struct dbg_off dbg_statics[] = {
{},
};
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+ ARRAY_SIZE(dbg_wil_regs) - 1 +
+ ARRAY_SIZE(pseudo_isr_off) - 1 +
+ ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+ ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+ ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
int wil6210_debugfs_init(struct wil6210_priv *wil)
{
struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
wil_to_wiphy(wil)->debugfsdir);
-
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
+ wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+ sizeof(struct wil_debugfs_iomem_data),
+ GFP_KERNEL);
+ if (!wil->dbg_data.data_arr) {
+ debugfs_remove_recursive(dbg);
+ wil->debug = NULL;
+ return -ENOMEM;
+ }
+
+ wil->dbg_data.iomem_data_count = 0;
+
wil_pmc_init(wil);
wil6210_debugfs_init_files(wil, dbg);
@@ -1758,8 +1841,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
wil6210_debugfs_create_ITR_CNT(wil, dbg);
- wil->suspend_stats.collection_start = ktime_get();
-
return 0;
}
@@ -1768,6 +1849,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
+ kfree(wil->dbg_data.data_arr);
+
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
*/
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4dabf7..66200f616a37 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct wil6210_priv *wil = ndev_to_wil(ndev);
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
+ int ret;
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
+ wil_pm_runtime_put(wil);
+
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
return 0;
@@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int ret;
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
wil_configure_interrupt_moderation(wil);
+ wil_pm_runtime_put(wil);
+
return 0;
out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 2f2b910501ba..2c7b24f61587 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,15 +59,30 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* Comment header - common for all comment record types */
+struct wil_fw_record_comment_hdr {
+ __le32 magic;
+};
+
/* FW capabilities encoded inside a comment record */
#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
/* identifies capabilities record */
- __le32 magic;
+ struct wil_fw_record_comment_hdr hdr;
/* capabilities (variable size), see enum wmi_fw_capability */
u8 capabilities[0];
};
+/* brd file info encoded inside a comment record */
+#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
+ /* identifies brd file record */
+ struct wil_fw_record_comment_hdr hdr;
+ __le32 version;
+ __le32 base_addr;
+ __le32 max_size_bytes;
+} __packed;
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index e01acac88825..914c0106e94b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -26,14 +27,17 @@
prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
- ioaddr = wmi_buffer(wil, val); \
- if (!ioaddr) { \
- wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
- le32_to_cpu(val)); \
- return -EINVAL; \
- } \
- } while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+ void __iomem **ioaddr, __le32 val,
+ u32 size, const char *msg)
+{
+ *ioaddr = wmi_buffer_block(wil, val, size);
+ if (!(*ioaddr)) {
+ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+ return false;
+ }
+ return true;
+}
/**
* wil_fw_verify - verify firmware file validity
@@ -124,14 +128,6 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
return 0;
}
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
- size_t size)
-{
- wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
- return 0;
-}
-
static int
fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
size_t size)
@@ -139,9 +135,11 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
const struct wil_fw_record_capabilities *rec = data;
size_t capa_size;
- if (size < sizeof(*rec) ||
- le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+ if (size < sizeof(*rec)) {
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
return 0;
+ }
capa_size = size - offsetof(struct wil_fw_record_capabilities,
capabilities);
@@ -153,8 +151,56 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
return 0;
}
-static int fw_handle_data(struct wil6210_priv *wil, const void *data,
- size_t size)
+static int
+fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_brd_file *rec = data;
+
+ if (size < sizeof(*rec)) {
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
+ return 0;
+ }
+
+ wil->brd_file_addr = le32_to_cpu(rec->base_addr);
+ wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes);
+
+ wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n",
+ wil->brd_file_addr, wil->brd_file_max_size);
+
+ return 0;
+}
+
+static int
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_comment_hdr *hdr = data;
+ u32 magic;
+ int rc = 0;
+
+ if (size < sizeof(*hdr))
+ return 0;
+
+ magic = le32_to_cpu(hdr->magic);
+
+ switch (magic) {
+ case WIL_FW_CAPABILITIES_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
+ rc = fw_handle_capabilities(wil, data, size);
+ break;
+ case WIL_BRD_FILE_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
+ rc = fw_handle_brd_file(wil, data, size);
+ break;
+ }
+
+ return rc;
+}
+
+static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size, __le32 addr)
{
const struct wil_fw_record_data *d = data;
void __iomem *dst;
@@ -165,15 +211,23 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
- wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
- s);
+ if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
+ return -EINVAL;
+ wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
wil_memcpy_toio_32(dst, d->data, s);
wmb(); /* finish before processing next record */
return 0;
}
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_data *d = data;
+
+ return __fw_handle_data(wil, data, size, d->addr);
+}
+
static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -197,7 +251,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
v = le32_to_cpu(d->value);
wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -253,7 +308,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
u32 v = le32_to_cpu(block[i].value);
u32 x, y;
- FW_ADDR_CHECK(dst, block[i].addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+ return -EINVAL;
x = readl(dst);
y = (x & m) | (v & ~m);
@@ -319,10 +375,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
- FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr") ||
+ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+ "gateway_value_addr") ||
+ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
" cmd 0x%08x ctl 0x%08x\n",
@@ -378,12 +439,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr"))
+ return -EINVAL;
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
- "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_val[k],
+ d->gateway_value_addr[k],
+ 0, "gateway_value_addr"))
+ return -EINVAL;
+ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
@@ -422,7 +490,7 @@ static const struct {
int (*parse_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
} wil_fw_handlers[] = {
- {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
{wil_fw_type_data, fw_handle_data, fw_ignore_section},
{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
@@ -517,7 +585,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (rc) {
- wil_err_fw(wil, "Failed to load firmware %s\n", name);
+ wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
return rc;
}
wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
@@ -539,6 +607,100 @@ out:
}
/**
+ * wil_brd_process - process section from BRD file
+ *
+ * Return error code
+ */
+static int wil_brd_process(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr = data;
+ size_t s, hdr_sz;
+ u16 type;
+
+ /* Assuming the board file includes only one header record and one data
+ * record. Each record starts with wil_fw_record_head.
+ */
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ s = sizeof(*hdr) + le32_to_cpu(hdr->size);
+ if (s > size)
+ return -EINVAL;
+
+ /* Skip the header record and handle the data record */
+ hdr = (const void *)hdr + s;
+ size -= s;
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ hdr_sz = le32_to_cpu(hdr->size);
+
+ if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size)
+ return -EINVAL;
+ if (sizeof(*hdr) + hdr_sz > size)
+ return -EINVAL;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ type = le16_to_cpu(hdr->type);
+ if (type != wil_fw_type_data) {
+ wil_err_fw(wil, "invalid record type for board file: %d\n",
+ type);
+ return -EINVAL;
+ }
+ if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+ wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n",
+ wil->brd_file_addr);
+
+ rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+ cpu_to_le32(wil->brd_file_addr));
+
+ return rc;
+}
+
+/**
+ * wil_request_board - Request board file
+ *
+ * Request board image from the file
+ * board file address and max size are read from FW file
+ * during initialization.
+ * brd file shall include one header and one data section.
+ *
+ * Return error code
+ */
+int wil_request_board(struct wil6210_priv *wil, const char *name)
+{
+ int rc, dlen;
+ const struct firmware *brd;
+
+ rc = request_firmware(&brd, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load brd %s\n", name);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
+
+ /* Verify the header */
+ dlen = wil_fw_verify(wil, brd->data, brd->size);
+ if (dlen < 0) {
+ rc = dlen;
+ goto out;
+ }
+ /* Process the data record */
+ rc = wil_brd_process(wil, brd->data, dlen);
+
+out:
+ release_firmware(brd);
+ return rc;
+}
+
+/**
* wil_fw_verify_file_exists - checks if firmware file exist
*
* @wil: driver context
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f3fcf3..1835187ea075 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -358,6 +359,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -376,8 +396,9 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
- u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
- u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+ u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+ u32 ucode_assert_code =
+ wil_r(wil, wil->rgf_ucode_assert_code_addr);
wil_err(wil,
"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
@@ -393,7 +414,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_mbox_ready, wil->status);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
@@ -545,7 +567,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
- /* FIXME: IRQ mask debug */
+ /* IRQ mask debug */
if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 885924abf61c..0c61a6c13991 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -579,7 +580,6 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
wil->vring_idle_trsh = 16;
return 0;
@@ -638,6 +638,98 @@ void wil_priv_deinit(struct wil6210_priv *wil)
destroy_workqueue(wil->wmi_wq);
}
+static void wil_shutdown_bl(struct wil6210_priv *wil)
+{
+ u32 val;
+
+ wil_s(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
+
+ usleep_range(100, 150);
+
+ val = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ if (val & BL_SHUTDOWN_HS_RTD) {
+ wil_dbg_misc(wil, "BL is ready for halt\n");
+ return;
+ }
+
+ wil_err(wil, "BL did not report ready for halt\n");
+}
+
+/* this format is used by ARC embedded CPU for instruction memory */
+static inline u32 ARC_me_imm32(u32 d)
+{
+ return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
+}
+
+/* defines access to interrupt vectors for wil_freeze_bl */
+#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8)
+/* ARC long jump instruction */
+#define ARC_JAL_INST (0x20200f80)
+
+static void wil_freeze_bl(struct wil6210_priv *wil)
+{
+ u32 jal, upc, saved;
+ u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
+
+ jal = wil_r(wil, wil->iccm_base + ivt3);
+ if (jal != ARC_me_imm32(ARC_JAL_INST)) {
+ wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
+ return;
+ }
+
+ /* prevent the target from entering deep sleep
+ * and disabling memory access
+ */
+ saved = wil_r(wil, RGF_USER_USAGE_8);
+ wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
+ usleep_range(20, 25); /* let the BL process the bit */
+
+ /* redirect to endless loop in the INT_L1 context and let it trap */
+ wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
+ usleep_range(20, 25); /* let the BL get into the trap */
+
+ /* verify the BL is frozen */
+ upc = wil_r(wil, RGF_USER_CPU_PC);
+ if (upc < ivt3 || (upc > (ivt3 + 8)))
+ wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
+
+ wil_w(wil, RGF_USER_USAGE_8, saved);
+}
+
+static void wil_bl_prepare_halt(struct wil6210_priv *wil)
+{
+ u32 tmp, ver;
+
+ /* before halting device CPU driver must make sure BL is not accessing
+ * host memory. This is done differently depending on BL version:
+ * 1. For very old BL versions the procedure is skipped
+ * (not supported).
+ * 2. For old BL version we use a special trick to freeze the BL
+ * 3. For new BL versions we shutdown the BL using handshake procedure.
+ */
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+ if (!tmp) {
+ wil_dbg_misc(wil, "old BL, skipping halt preperation\n");
+ return;
+ }
+
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
+
+ if (ver > 0)
+ wil_shutdown_bl(wil);
+ else
+ wil_freeze_bl(wil);
+}
+
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
@@ -671,7 +763,7 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
-static int wil_target_reset(struct wil6210_priv *wil)
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
@@ -685,9 +777,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_halt_cpu(wil);
- /* clear all boot loader "ready" bits */
- wil_w(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
+ if (!no_flash) {
+ /* clear all boot loader "ready" bits */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready), 0);
+ /* this should be safe to write even with old BLs */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), 0);
+ }
/* Clear Fw Download notification */
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
@@ -728,21 +827,33 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ if (no_flash)
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, USER_EXT_USER_PMU_3);
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ else
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -750,6 +861,21 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+ if (no_flash) {
+ /* Reset OTP HW vectors to fit 40MHz */
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
+ wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
+ }
+
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -760,6 +886,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
u8 retry_short;
int rc;
+ wil_refresh_fw_capabilities(wil);
+
rc = wmi_get_mgmt_retry(wil, &retry_short);
if (!rc) {
wiphy->retry_short = retry_short;
@@ -767,6 +895,43 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
}
}
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ int features;
+
+ wil->keep_radio_on_during_sleep =
+ test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+ wil->platform_capa) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ else
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ if (wil->platform_ops.set_features) {
+ features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+ wil->fw_capabilities) &&
+ test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+ wil->platform_capa)) ?
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+ wil->platform_ops.set_features(wil->platform_handle, features);
+ }
+}
+
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
@@ -868,6 +1033,27 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
}
}
+static int wil_get_otp_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 mac[8];
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ sizeof(mac));
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "Invalid MAC %pM\n", mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ return 0;
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@@ -960,6 +1146,8 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
+ unsigned long status_flags = BIT(wil_status_resetting);
+ int no_flash;
wil_dbg_misc(wil, "reset\n");
@@ -980,6 +1168,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+ }
+
+ if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+ }
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
@@ -989,6 +1187,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the reset.
+ * following crash dump collection, reset would take place.
+ */
+ wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+ rc = -EBUSY;
+ goto out;
+ }
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1003,7 +1209,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
- bitmap_zero(wil->status, wil_status_last);
+ if (test_bit(wil_status_suspending, wil->status))
+ status_flags |= BIT(wil_status_suspending);
+ bitmap_and(wil->status, wil->status, &status_flags,
+ wil_status_last);
+ wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
mutex_unlock(&wil->wmi_mutex);
wil_mask_irq(wil);
@@ -1013,37 +1223,53 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
- wil_bl_crash_info(wil, false);
+ no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
+ if (!no_flash)
+ wil_bl_crash_info(wil, false);
wil_disable_irq(wil);
- rc = wil_target_reset(wil);
+ rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil_rx_fini(wil);
if (rc) {
- wil_bl_crash_info(wil, true);
- return rc;
+ if (!no_flash)
+ wil_bl_crash_info(wil, true);
+ goto out;
}
- rc = wil_get_bl_info(wil);
- if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
- rc = 0;
+ if (no_flash) {
+ rc = wil_get_otp_info(wil);
+ } else {
+ rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw)
+ /* ignore RF error if not going up */
+ rc = 0;
+ }
if (rc)
- return rc;
+ goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
wil_info(wil, "Use firmware <%s> + board <%s>\n",
wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ if (!no_flash)
+ wil_bl_prepare_halt(wil);
+
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
- return rc;
- rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
+ goto out;
+ if (wil->brd_file_addr)
+ rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ else
+ rc = wil_request_firmware(wil,
+ WIL_BOARD_FILE_NAME,
+ true);
if (rc)
- return rc;
+ goto out;
wil_pre_fw_config(wil);
wil_release_cpu(wil);
@@ -1055,6 +1281,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
+ clear_bit(wil_status_resetting, wil->status);
+
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
@@ -1071,11 +1299,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil_collect_fw_info(wil);
+
if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
wil_ps_update(wil, wil->ps_profile);
- wil_collect_fw_info(wil);
-
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
@@ -1088,6 +1316,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
return rc;
+
+out:
+ clear_bit(wil_status_resetting, wil->status);
+ return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1193,9 +1425,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
- wil_reset(wil, false);
-
- return 0;
+ return wil_reset(wil, false);
}
int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a6ab2d0fdf1..7ba4e0af8f57 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -21,6 +21,7 @@
static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc;
wil_dbg_misc(wil, "open\n");
@@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev)
return -EINVAL;
}
- return wil_up(wil);
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0)
+ return rc;
+
+ rc = wil_up(wil);
+ if (rc)
+ wil_pm_runtime_put(wil);
+
+ return rc;
}
static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc;
wil_dbg_misc(wil, "stop\n");
- return wil_down(wil);
+ rc = wil_down(wil);
+ if (!rc)
+ wil_pm_runtime_put(wil);
+
+ return rc;
}
static const struct net_device_ops wil_netdev_ops = {
@@ -136,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
- cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+ cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
if (!ndev) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 6a3ab4bf916d..809092a49192 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,6 +22,7 @@
#include <linux/suspend.h>
#include "wil6210.h"
#include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
static bool use_msi = true;
module_param(use_msi, bool, 0444);
@@ -30,29 +32,30 @@ static bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
static
-void wil_set_capabilities(struct wil6210_priv *wil)
+int wil_set_capabilities(struct wil6210_priv *wil)
{
const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
+ int platform_capa;
+ struct fw_map *iccm_section, *sct;
- bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->hw_capa, hw_capa_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
switch (jtag_id) {
case JTAG_DEV_ID_SPARROW:
+ memcpy(fw_mapping, sparrow_fw_mapping,
+ sizeof(sparrow_fw_mapping));
switch (chip_revision) {
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
@@ -62,6 +65,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
+ sct = wil_find_fw_mapping("mac_rgf_ext");
+ if (!sct) {
+ wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
@@ -72,21 +81,50 @@ void wil_set_capabilities(struct wil6210_priv *wil)
wil->hw_version = HW_VER_UNKNOWN;
break;
}
+ wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
+ break;
+ case JTAG_DEV_ID_TALYN:
+ wil->hw_name = "Talyn";
+ wil->hw_version = HW_VER_TALYN;
+ memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
+ BIT_NO_FLASH_INDICATION)
+ set_bit(hw_capa_no_flash, wil->hw_capa);
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
+ return -EINVAL;
+ }
+
+ iccm_section = wil_find_fw_mapping("fw_code");
+ if (!iccm_section) {
+ wil_err(wil, "fw_code section not found in fw_mapping\n");
+ return -EINVAL;
}
+ wil->iccm_base = iccm_section->host;
- wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+ wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
+ test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
+
+ /* Get platform capabilities */
+ if (wil->platform_ops.get_capa) {
+ platform_capa =
+ wil->platform_ops.get_capa(wil->platform_handle);
+ memcpy(wil->platform_capa, &platform_capa,
+ min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+ }
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
+ wil_refresh_fw_capabilities(wil);
- if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
- wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ return 0;
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -209,6 +247,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
+ int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+ int i;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -244,21 +284,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
- /* device supports 48bit addresses */
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ /* device supports >32bit addresses */
+ for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
- dev_err(dev,
- "dma_set_mask_and_coherent(32) failed: %d\n",
- rc);
- goto err_plat;
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
}
- } else {
- wil->use_extended_dma_addr = 1;
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
}
+ if (wil->dma_addr_size == 0)
+ goto err_plat;
+
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -293,18 +335,13 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* rollback to err_iounmap */
wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
- wil_set_capabilities(wil);
+ rc = wil_set_capabilities(wil);
+ if (rc) {
+ wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
+ goto err_iounmap;
+ }
wil6210_clear_irq(wil);
- wil->keep_radio_on_during_sleep =
- wil->platform_ops.keep_radio_on_during_sleep &&
- wil->platform_ops.keep_radio_on_during_sleep(
- wil->platform_handle) &&
- test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
- wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
- wil->keep_radio_on_during_sleep);
-
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@@ -319,20 +356,19 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto bus_disable;
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
- wil->pm_notify.notifier_call = wil6210_pm_notify;
+ if (IS_ENABLED(CONFIG_PM))
+ wil->pm_notify.notifier_call = wil6210_pm_notify;
+
rc = register_pm_notifier(&wil->pm_notify);
if (rc)
/* Do not fail the driver initialization, as suspend can
* be prevented in a later phase if needed
*/
wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
+ wil_pm_runtime_allow(wil);
return 0;
@@ -359,11 +395,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil_dbg_misc(wil, "pcie_remove\n");
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
+
+ wil_pm_runtime_forbid(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
@@ -381,18 +415,19 @@ static void wil_pcie_remove(struct pci_dev *pdev)
static const struct pci_device_id wil6210_pcie_ids[] = {
{ PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+ { PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -400,16 +435,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
if (rc)
goto out;
- rc = wil_suspend(wil, is_runtime);
+ rc = wil_suspend(wil, is_runtime, keep_radio_on);
if (!rc) {
- wil->suspend_stats.successful_suspends++;
-
- /* If platform device supports keep_radio_on_during_sleep
- * it will control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
/* disable bus mastering */
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.successful_suspends++;
+ } else {
+ wil->suspend_stats.r_on.successful_suspends++;
+ }
}
out:
return rc;
@@ -420,23 +457,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* If platform device supports keep_radio_on_during_sleep it will
- * control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on)
/* allow master */
pci_set_master(pdev);
- rc = wil_resume(wil, is_runtime);
+ rc = wil_resume(wil, is_runtime, keep_radio_on);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
- wil->suspend_stats.failed_resumes++;
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.failed_resumes++;
+ } else {
+ wil->suspend_stats.r_on.failed_resumes++;
+ }
} else {
- wil->suspend_stats.successful_resumes++;
+ if (keep_radio_on)
+ wil->suspend_stats.r_on.successful_resumes++;
+ else
+ wil->suspend_stats.r_off.successful_resumes++;
}
return rc;
@@ -481,21 +527,49 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
return rc;
}
-static int wil6210_pm_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
}
-static int wil6210_pm_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
{
return wil6210_resume(dev, false);
}
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "Runtime idle\n");
+
+ return wil_can_suspend(wil, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
+{
+ return wil6210_resume(dev, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 1;
+ }
+
+ return wil6210_suspend(dev, true);
+}
static const struct dev_pm_ops wil6210_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+ SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+ wil6210_pm_runtime_resume,
+ wil6210_pm_runtime_idle)
};
static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b447aaa..0a96518a566f 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,30 @@
#include "wil6210.h"
#include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
struct net_device *ndev = wil_to_ndev(wil);
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
+ if (wmi_only || debug_fw) {
+ wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+ wmi_only ? "wmi_only" : "debug_fw");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (is_runtime && !wil->platform_ops.suspend) {
+ rc = -EBUSY;
+ goto out;
+ }
if (!(ndev->flags & IFF_UP)) {
/* can always sleep when down */
wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
/* interface is running */
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
+ wil_dbg_pm(wil, "Sniffer\n");
+ rc = -EBUSY;
+ goto out;
+ /* for STA-like interface, don't runtime suspend */
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
rc = -EBUSY;
goto out;
}
+ /* Runtime pm not supported in case the interface is up */
+ if (is_runtime) {
+ wil_dbg_pm(wil, "STA-like interface\n");
+ rc = -EBUSY;
+ goto out;
+ }
break;
/* AP-like interface - can't suspend */
default:
@@ -120,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
/* Prevent handling of new tx and wmi commands */
set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
wil_update_net_queues_bh(wil, NULL, true);
if (!wil_is_tx_idle(wil)) {
@@ -158,7 +190,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -174,7 +206,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
*/
if (!wil_is_wmi_idle(wil)) {
wil_err(wil, "suspend failed due to pending WMI events\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
@@ -188,7 +220,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
if (rc) {
wil_err(wil, "platform device failed to suspend (%d)\n",
rc);
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
goto resume_after_fail;
@@ -230,11 +262,21 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
+ set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
+
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down : %d\n", rc);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -247,6 +289,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -254,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
set_bit(wil_status_suspended, wil->status);
out:
+ clear_bit(wil_status_suspending, wil->status);
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;
@@ -279,12 +323,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil)
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -301,19 +342,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
- if (!rc)
- wil->suspend_stats.suspend_start_time = ktime_get();
-
return rc;
}
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
- unsigned long long suspend_time_usec = 0;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
@@ -331,20 +365,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
else
rc = wil_resume_radio_off(wil);
- if (rc)
- goto out;
-
- suspend_time_usec =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.suspend_start_time));
- wil->suspend_stats.total_suspend_time += suspend_time_usec;
- if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
- wil->suspend_stats.min_suspend_time = suspend_time_usec;
- if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
- wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
out:
- wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
- is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+ wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+ rc);
return rc;
}
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_forbid(dev);
+ pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+ int rc;
+ struct device *dev = wil_to_dev(wil);
+
+ rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 2e301b6b32a9..4ea27b0bd278 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
&pmc->pring_pa,
GFP_KERNEL);
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 389c718cd257..16b8a4e5201f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return -ENOMEM;
}
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
- struct wireless_dev *wdev = wil->wdev;
struct wil6210_rtap {
struct ieee80211_radiotap_header rthdr;
/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
int rtap_len = sizeof(struct wil6210_rtap);
int phy_length = 0; /* phy info header size, bytes */
static char phy_data[128];
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (rtap_include_phy_info) {
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1e340d04bd70..0df2aada6659 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -82,18 +83,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
unsigned long successful_suspends;
- unsigned long failed_suspends;
unsigned long successful_resumes;
+ unsigned long failed_suspends;
unsigned long failed_resumes;
- unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+ struct wil_suspend_count_stats r_off;
+ struct wil_suspend_count_stats r_on;
+ unsigned long rejected_by_device; /* only radio on */
unsigned long rejected_by_host;
- unsigned long long total_suspend_time;
- unsigned long long min_suspend_time;
- unsigned long long max_suspend_time;
- ktime_t collection_start;
- ktime_t suspend_start_time;
};
/* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -161,10 +162,15 @@ struct RGF_ICR {
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
#define BIT_USER_OOB_R2_MODE BIT(30)
+#define RGF_USER_USAGE_8 (0x880020)
+ #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
+ #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
+ #define BIT_USER_EXT_CLK BIT(2)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
#define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_CPU_PC (0x8801e8)
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
@@ -190,6 +196,19 @@ struct RGF_ICR {
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
+#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
+#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
+#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
+#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
+#define RGF_USER_XPM_IFC_RD_TIME4 (0x880cf8)
+#define RGF_USER_XPM_IFC_RD_TIME5 (0x880cfc)
+#define RGF_USER_XPM_IFC_RD_TIME6 (0x880d00)
+#define RGF_USER_XPM_IFC_RD_TIME7 (0x880d04)
+#define RGF_USER_XPM_IFC_RD_TIME8 (0x880d08)
+#define RGF_USER_XPM_IFC_RD_TIME9 (0x880d0c)
+#define RGF_USER_XPM_IFC_RD_TIME10 (0x880d10)
+#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -280,22 +299,33 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define USER_EXT_USER_PMU_3 (0x88d00c)
+ #define BIT_PMU_DEVICE_RDY BIT(0)
+
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
+ #define JTAG_DEV_ID_TALYN (0x7e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC (0x8a0620)
+
/* crash codes for FW/Ucode stored here */
-#define RGF_FW_ASSERT_CODE (0x91f020)
-#define RGF_UCODE_ASSERT_CODE (0x91f028)
+
+/* ASSERT RGFs */
+#define SPARROW_RGF_FW_ASSERT_CODE (0x91f020)
+#define SPARROW_RGF_UCODE_ASSERT_CODE (0x91f028)
+#define TALYN_RGF_FW_ASSERT_CODE (0xa37020)
+#define TALYN_RGF_UCODE_ASSERT_CODE (0xa37028)
enum {
HW_VER_UNKNOWN,
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
+ HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
};
/* popular locations */
@@ -311,6 +341,10 @@ enum {
#define WIL_DATA_COMPLETION_TO_MS 200
/* Hardware definitions end */
+#define SPARROW_FW_MAPPING_TABLE_SIZE 10
+#define TALYN_FW_MAPPING_TABLE_SIZE 13
+#define MAX_FW_MAPPING_TABLE_SIZE 13
+
struct fw_map {
u32 from; /* linker address - from, inclusive */
u32 to; /* linker address - to, exclusive */
@@ -320,7 +354,10 @@ struct fw_map {
};
/* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[10];
+extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map sparrow_d0_mac_rgf_ext;
+extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
* mk_cidxtid - construct @cidxtid field
@@ -435,12 +472,13 @@ enum { /* for wil6210_priv.status */
wil_status_fwconnected,
wil_status_dontscan,
wil_status_mbox_ready, /* MBOX structures ready */
- wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_irqen, /* interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
+ wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@@ -565,7 +603,8 @@ enum {
};
enum {
- hw_capability_last
+ hw_capa_no_flash,
+ hw_capa_last
};
struct wil_probe_client_req {
@@ -616,6 +655,16 @@ struct blink_on_off_time {
u32 off_ms;
};
+struct wil_debugfs_iomem_data {
+ void *offset;
+ struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+ struct wil_debugfs_iomem_data *data_arr;
+ int iomem_data_count;
+};
+
extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
extern u8 led_id;
extern u8 led_polarity;
@@ -631,14 +680,19 @@ struct wil6210_priv {
u8 chip_revision;
const char *hw_name;
const char *wil_fw_name;
- DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ char *board_file;
+ u32 brd_file_addr;
+ u32 brd_file_max_size;
+ DECLARE_BITMAP(hw_capa, hw_capa_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
unsigned long last_fw_recovery; /* jiffies of last fw recovery */
wait_queue_head_t wq; /* for all wait_event() use */
/* profile */
+ struct cfg80211_chan_def monitor_chandef;
u32 monitor_flags;
u32 privacy; /* secure connection? */
u8 hidden_ssid; /* relevant in AP mode */
@@ -694,7 +748,7 @@ struct wil6210_priv {
struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring;
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
- bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+ u32 dma_addr_size; /* indicates dma addr size */
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -703,11 +757,12 @@ struct wil6210_priv {
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
- struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE];
u8 discovery_mode;
u8 abft_len;
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
+ struct wil_debugfs_data dbg_data;
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -731,16 +786,16 @@ struct wil6210_priv {
int fw_calib_result;
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
bool suspend_resp_rcvd;
bool suspend_resp_comp;
u32 bus_request_kbps;
u32 bus_request_kbps_pre_suspend;
+
+ u32 rgf_fw_assert_code_addr;
+ u32 rgf_ucode_assert_code_addr;
+ u32 iccm_base;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -861,10 +916,13 @@ int wil_up(struct wil6210_priv *wil);
int __wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void wil_set_ethtoolops(struct net_device *ndev);
+struct fw_map *wil_find_fw_mapping(const char *section);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -997,11 +1055,17 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
+int wil_request_board(struct wil6210_priv *wil, const char *name);
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
@@ -1016,4 +1080,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index e53cf0cf7031..1ed330674d9b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
+ set_bit(wil_status_collecting_dumps, wil->status);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resetting, wil->status)) {
+ wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+ clear_bit(wil_status_collecting_dumps, wil->status);
+ return -EINVAL;
+ }
+
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
(const void __iomem * __force)data, len);
}
+ clear_bit(wil_status_collecting_dumps, wil->status);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 5d9e4bfcb045..177026e5323b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -27,6 +27,18 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
+enum wil_platform_features {
+ WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+ WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+ WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+ WIL_PLATFORM_CAPA_EXT_CLK = 2,
+ WIL_PLATFORM_CAPA_MAX,
+};
+
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
- bool (*keep_radio_on_during_sleep)(void *handle);
+ int (*get_capa)(void *handle);
+ void (*set_features)(void *handle, int features);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ffdd2fa401b1..b31e2514f8c2 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +39,7 @@ MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
/**
* WMI event receiving - theory of operations
@@ -69,23 +71,23 @@ MODULE_PARM_DESC(led_id,
* On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
* AHB addresses starting from 0x880000
*
- * Internally, firmware uses addresses that allows faster access but
+ * Internally, firmware uses addresses that allow faster access but
* are invisible from the host. To read from these addresses, alternative
* AHB address must be used.
- *
- * Memory mapping
- * Linker address PCI/Host address
- * 0x880000 .. 0xa80000 2Mb BAR0
- * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
- * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
*/
/**
- * @fw_mapping provides memory remapping table
+ * @sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
+ *
+ * Sparrow memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM
+ * 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH
*/
-const struct fw_map fw_mapping[] = {
+const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true},
/* FW data RAM 32k */
@@ -111,6 +113,59 @@ const struct fw_map fw_mapping[] = {
{0x800000, 0x804000, 0x940000, "uc_data", false},
};
+/**
+ * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+ * it is a bit larger to support extra features
+ */
+const struct fw_map sparrow_d0_mac_rgf_ext = {
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+};
+
+/**
+ * @talyn_fw_mapping provides memory remapping table for Talyn
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_fw_mapping[] = {
+ /* FW code RAM 1M */
+ {0x000000, 0x100000, 0x900000, "fw_code", true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ /* mac_ext_rgf 1344b */
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false},
+};
+
+struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
@@ -138,15 +193,35 @@ static u32 wmi_addr_remap(u32 x)
}
/**
+ * find fw_mapping entry by section name
+ * @section - section name
+ *
+ * Return pointer to section or NULL if not found
+ */
+struct fw_map *wil_find_fw_mapping(const char *section)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
+ if (fw_mapping[i].name &&
+ !strcmp(section, fw_mapping[i].name))
+ return &fw_mapping[i];
+
+ return NULL;
+}
+
+/**
* Check address validity for WMI buffer; remap if needed
* @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ * exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
*
* return address for accessing buffer from the host;
* if buffer is not valid, return NULL.
*/
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
{
u32 off;
u32 ptr = le32_to_cpu(ptr_);
@@ -161,10 +236,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
+ if (size && ((off + size > wil->bar_size) || (off + size < off)))
+ return NULL;
return wil->csr + off;
}
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ return wmi_buffer_block(wil, ptr_, 0);
+}
+
/**
* Check address validity
*/
@@ -198,6 +280,242 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
return 0;
}
+static const char *cmdid2name(u16 cmdid)
+{
+ switch (cmdid) {
+ case WMI_NOTIFY_REQ_CMDID:
+ return "WMI_NOTIFY_REQ_CMD";
+ case WMI_START_SCAN_CMDID:
+ return "WMI_START_SCAN_CMD";
+ case WMI_CONNECT_CMDID:
+ return "WMI_CONNECT_CMD";
+ case WMI_DISCONNECT_CMDID:
+ return "WMI_DISCONNECT_CMD";
+ case WMI_SW_TX_REQ_CMDID:
+ return "WMI_SW_TX_REQ_CMD";
+ case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+ case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_BRP_SET_ANT_LIMIT_CMDID:
+ return "WMI_BRP_SET_ANT_LIMIT_CMD";
+ case WMI_TOF_SESSION_START_CMDID:
+ return "WMI_TOF_SESSION_START_CMD";
+ case WMI_AOA_MEAS_CMDID:
+ return "WMI_AOA_MEAS_CMD";
+ case WMI_PMC_CMDID:
+ return "WMI_PMC_CMD";
+ case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+ case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+ case WMI_VRING_CFG_CMDID:
+ return "WMI_VRING_CFG_CMD";
+ case WMI_BCAST_VRING_CFG_CMDID:
+ return "WMI_BCAST_VRING_CFG_CMD";
+ case WMI_TRAFFIC_SUSPEND_CMDID:
+ return "WMI_TRAFFIC_SUSPEND_CMD";
+ case WMI_TRAFFIC_RESUME_CMDID:
+ return "WMI_TRAFFIC_RESUME_CMD";
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMD";
+ case WMI_SET_MAC_ADDRESS_CMDID:
+ return "WMI_SET_MAC_ADDRESS_CMD";
+ case WMI_LED_CFG_CMDID:
+ return "WMI_LED_CFG_CMD";
+ case WMI_PCP_START_CMDID:
+ return "WMI_PCP_START_CMD";
+ case WMI_PCP_STOP_CMDID:
+ return "WMI_PCP_STOP_CMD";
+ case WMI_SET_SSID_CMDID:
+ return "WMI_SET_SSID_CMD";
+ case WMI_GET_SSID_CMDID:
+ return "WMI_GET_SSID_CMD";
+ case WMI_SET_PCP_CHANNEL_CMDID:
+ return "WMI_SET_PCP_CHANNEL_CMD";
+ case WMI_GET_PCP_CHANNEL_CMDID:
+ return "WMI_GET_PCP_CHANNEL_CMD";
+ case WMI_P2P_CFG_CMDID:
+ return "WMI_P2P_CFG_CMD";
+ case WMI_START_LISTEN_CMDID:
+ return "WMI_START_LISTEN_CMD";
+ case WMI_START_SEARCH_CMDID:
+ return "WMI_START_SEARCH_CMD";
+ case WMI_DISCOVERY_STOP_CMDID:
+ return "WMI_DISCOVERY_STOP_CMD";
+ case WMI_DELETE_CIPHER_KEY_CMDID:
+ return "WMI_DELETE_CIPHER_KEY_CMD";
+ case WMI_ADD_CIPHER_KEY_CMDID:
+ return "WMI_ADD_CIPHER_KEY_CMD";
+ case WMI_SET_APPIE_CMDID:
+ return "WMI_SET_APPIE_CMD";
+ case WMI_CFG_RX_CHAIN_CMDID:
+ return "WMI_CFG_RX_CHAIN_CMD";
+ case WMI_TEMP_SENSE_CMDID:
+ return "WMI_TEMP_SENSE_CMD";
+ case WMI_DEL_STA_CMDID:
+ return "WMI_DEL_STA_CMD";
+ case WMI_DISCONNECT_STA_CMDID:
+ return "WMI_DISCONNECT_STA_CMD";
+ case WMI_VRING_BA_EN_CMDID:
+ return "WMI_VRING_BA_EN_CMD";
+ case WMI_VRING_BA_DIS_CMDID:
+ return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RCP_DELBA_CMDID:
+ return "WMI_RCP_DELBA_CMD";
+ case WMI_RCP_ADDBA_RESP_CMDID:
+ return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_PS_DEV_PROFILE_CFG_CMDID:
+ return "WMI_PS_DEV_PROFILE_CFG_CMD";
+ case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_ABORT_SCAN_CMDID:
+ return "WMI_ABORT_SCAN_CMD";
+ case WMI_NEW_STA_CMDID:
+ return "WMI_NEW_STA_CMD";
+ case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+ case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+ case WMI_START_SCHED_SCAN_CMDID:
+ return "WMI_START_SCHED_SCAN_CMD";
+ case WMI_STOP_SCHED_SCAN_CMDID:
+ return "WMI_STOP_SCHED_SCAN_CMD";
+ default:
+ return "Untracked CMD";
+ }
+}
+
+static const char *eventid2name(u16 eventid)
+{
+ switch (eventid) {
+ case WMI_NOTIFY_REQ_DONE_EVENTID:
+ return "WMI_NOTIFY_REQ_DONE_EVENT";
+ case WMI_DISCONNECT_EVENTID:
+ return "WMI_DISCONNECT_EVENT";
+ case WMI_SW_TX_COMPLETE_EVENTID:
+ return "WMI_SW_TX_COMPLETE_EVENT";
+ case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+ return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+ case WMI_FW_READY_EVENTID:
+ return "WMI_FW_READY_EVENT";
+ case WMI_TRAFFIC_RESUME_EVENTID:
+ return "WMI_TRAFFIC_RESUME_EVENT";
+ case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+ case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+ case WMI_VRING_CFG_DONE_EVENTID:
+ return "WMI_VRING_CFG_DONE_EVENT";
+ case WMI_READY_EVENTID:
+ return "WMI_READY_EVENT";
+ case WMI_RX_MGMT_PACKET_EVENTID:
+ return "WMI_RX_MGMT_PACKET_EVENT";
+ case WMI_TX_MGMT_PACKET_EVENTID:
+ return "WMI_TX_MGMT_PACKET_EVENT";
+ case WMI_SCAN_COMPLETE_EVENTID:
+ return "WMI_SCAN_COMPLETE_EVENT";
+ case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+ return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+ case WMI_CONNECT_EVENTID:
+ return "WMI_CONNECT_EVENT";
+ case WMI_EAPOL_RX_EVENTID:
+ return "WMI_EAPOL_RX_EVENT";
+ case WMI_BA_STATUS_EVENTID:
+ return "WMI_BA_STATUS_EVENT";
+ case WMI_RCP_ADDBA_REQ_EVENTID:
+ return "WMI_RCP_ADDBA_REQ_EVENT";
+ case WMI_DELBA_EVENTID:
+ return "WMI_DELBA_EVENT";
+ case WMI_VRING_EN_EVENTID:
+ return "WMI_VRING_EN_EVENT";
+ case WMI_DATA_PORT_OPEN_EVENTID:
+ return "WMI_DATA_PORT_OPEN_EVENT";
+ case WMI_AOA_MEAS_EVENTID:
+ return "WMI_AOA_MEAS_EVENT";
+ case WMI_TOF_SESSION_END_EVENTID:
+ return "WMI_TOF_SESSION_END_EVENT";
+ case WMI_TOF_GET_CAPABILITIES_EVENTID:
+ return "WMI_TOF_GET_CAPABILITIES_EVENT";
+ case WMI_TOF_SET_LCR_EVENTID:
+ return "WMI_TOF_SET_LCR_EVENT";
+ case WMI_TOF_SET_LCI_EVENTID:
+ return "WMI_TOF_SET_LCI_EVENT";
+ case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+ return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+ case WMI_TOF_CHANNEL_INFO_EVENTID:
+ return "WMI_TOF_CHANNEL_INFO_EVENT";
+ case WMI_TRAFFIC_SUSPEND_EVENTID:
+ return "WMI_TRAFFIC_SUSPEND_EVENT";
+ case WMI_ECHO_RSP_EVENTID:
+ return "WMI_ECHO_RSP_EVENT";
+ case WMI_LED_CFG_DONE_EVENTID:
+ return "WMI_LED_CFG_DONE_EVENT";
+ case WMI_PCP_STARTED_EVENTID:
+ return "WMI_PCP_STARTED_EVENT";
+ case WMI_PCP_STOPPED_EVENTID:
+ return "WMI_PCP_STOPPED_EVENT";
+ case WMI_GET_SSID_EVENTID:
+ return "WMI_GET_SSID_EVENT";
+ case WMI_GET_PCP_CHANNEL_EVENTID:
+ return "WMI_GET_PCP_CHANNEL_EVENT";
+ case WMI_P2P_CFG_DONE_EVENTID:
+ return "WMI_P2P_CFG_DONE_EVENT";
+ case WMI_LISTEN_STARTED_EVENTID:
+ return "WMI_LISTEN_STARTED_EVENT";
+ case WMI_SEARCH_STARTED_EVENTID:
+ return "WMI_SEARCH_STARTED_EVENT";
+ case WMI_DISCOVERY_STOPPED_EVENTID:
+ return "WMI_DISCOVERY_STOPPED_EVENT";
+ case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+ return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+ case WMI_TEMP_SENSE_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_DONE_EVENT";
+ case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+ return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+ case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+ return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+ case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+ case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+ case WMI_START_SCHED_SCAN_EVENTID:
+ return "WMI_START_SCHED_SCAN_EVENT";
+ case WMI_STOP_SCHED_SCAN_EVENTID:
+ return "WMI_STOP_SCHED_SCAN_EVENT";
+ case WMI_SCHED_SCAN_RESULT_EVENTID:
+ return "WMI_SCHED_SCAN_RESULT_EVENT";
+ default:
+ return "Untracked EVENT";
+ }
+}
+
static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
{
struct {
@@ -222,7 +540,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
uint retry;
int rc = 0;
- if (sizeof(cmd) + len > r->entry_size) {
+ if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
@@ -294,7 +612,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
- wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+ cmdid2name(cmdid), cmdid, len);
wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
sizeof(cmd), true);
wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -566,8 +885,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
}
- /* FIXME FW can transmit only ucast frames to peer */
- /* FIXME real ring_id instead of hard coded 0 */
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].status = wil_sta_conn_pending;
@@ -830,6 +1147,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_sched_scan_result_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ struct cfg80211_bss *bss;
+
+ if (flen < 0) {
+ wil_err(wil, "sched scan result event too short, len %d\n",
+ len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "sched scan result length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ fc = rx_mgmt_frame->frame_control;
+ if (!ieee80211_is_probe_resp(fc)) {
+ wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+ fc);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ signal = 100 * data->info.rssi;
+ else
+ signal = data->info.sqi;
+
+ wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+ data->info.channel, data->info.mcs, data->info.rssi);
+ wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+ d_len, data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+ d_len, signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+
+ cfg80211_sched_scan_results(wiphy, 0);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -857,6 +1243,7 @@ static const struct {
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
+ {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
};
/*
@@ -963,8 +1350,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
- wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
- id, wmi->mid, tstamp);
+ wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+ eventid2name(id), id, wmi->mid, tstamp);
trace_wil6210_wmi_event(wmi, &wmi[1],
len - sizeof(*wmi));
}
@@ -1380,8 +1767,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -1461,7 +1854,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
int rc;
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
if (ch)
@@ -1801,6 +2194,16 @@ void wmi_event_flush(struct wil6210_priv *wil)
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
+static const char *suspend_status2name(u8 status)
+{
+ switch (status) {
+ case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+ return "LINK_NOT_IDLE";
+ default:
+ return "Untracked status";
+ }
+}
+
int wmi_suspend(struct wil6210_priv *wil)
{
int rc;
@@ -1816,7 +2219,7 @@ int wmi_suspend(struct wil6210_priv *wil)
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
- reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -1848,8 +2251,9 @@ int wmi_suspend(struct wil6210_priv *wil)
}
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
- if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
- wil_dbg_pm(wil, "device rejected the suspend\n");
+ if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+ wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+ suspend_status2name(reply.evt.status));
wil->suspend_stats.rejected_by_device++;
}
rc = reply.evt.status;
@@ -1861,21 +2265,50 @@ out:
return rc;
}
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+ string[0] = '\0';
+
+ if (!triggers) {
+ strlcat(string, " UNKNOWN", str_size);
+ return;
+ }
+
+ if (triggers & WMI_RESUME_TRIGGER_HOST)
+ strlcat(string, " HOST", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+ strlcat(string, " UCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+ strlcat(string, " BCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+ strlcat(string, " WMI_EVT", str_size);
+}
+
int wmi_resume(struct wil6210_priv *wil)
{
int rc;
+ char string[100];
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_resume_event evt;
} __packed reply;
reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+ reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
if (rc)
return rc;
+ resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+ sizeof(string));
+ wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+ reply.evt.status ? "failed" : "passed", string,
+ le32_to_cpu(reply.evt.resume_triggers));
return reply.evt.status;
}
@@ -1906,8 +2339,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
void *evt_data = (void *)(&wmi[1]);
u16 id = le16_to_cpu(wmi->command_id);
- wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
- id, wil->reply_id);
+ wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+ eventid2name(id), id, wil->reply_id);
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id) {
WARN_ON(wil->reply_buf);
@@ -2002,3 +2435,159 @@ out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_sets,
+ int n_match_sets)
+{
+ int i;
+
+ if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+ wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+ n_match_sets, WMI_MAX_PNO_SSID_NUM);
+ n_match_sets = WMI_MAX_PNO_SSID_NUM;
+ }
+ cmd->num_of_ssids = n_match_sets;
+
+ for (i = 0; i < n_match_sets; i++) {
+ struct wmi_sched_scan_ssid_match *wmi_match =
+ &cmd->ssid_for_match[i];
+ struct cfg80211_match_set *cfg_match = &match_sets[i];
+ int j;
+
+ wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+ memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+ min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+ wmi_match->rssi_threshold = S8_MIN;
+ if (cfg_match->rssi_thold >= S8_MIN &&
+ cfg_match->rssi_thold <= S8_MAX)
+ wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+ for (j = 0; j < n_ssids; j++)
+ if (wmi_match->ssid_len == ssids[j].ssid_len &&
+ memcmp(wmi_match->ssid, ssids[j].ssid,
+ wmi_match->ssid_len) == 0)
+ wmi_match->add_ssid_to_probe = true;
+ }
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ u32 n_channels,
+ struct ieee80211_channel **channels)
+{
+ int i;
+
+ if (n_channels > WMI_MAX_CHANNEL_NUM) {
+ wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+ n_channels, WMI_MAX_CHANNEL_NUM);
+ n_channels = WMI_MAX_CHANNEL_NUM;
+ }
+ cmd->num_of_channels = n_channels;
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *cfg_chan = channels[i];
+
+ cmd->channel_list[i] = cfg_chan->hw_value - 1;
+ }
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_sched_scan_plan *scan_plans,
+ int n_scan_plans)
+{
+ int i;
+
+ if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+ wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+ n_scan_plans, WMI_MAX_PLANS_NUM);
+ n_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ for (i = 0; i < n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+ cmd->scan_plans[i].interval_sec =
+ cpu_to_le16(cfg_plan->interval);
+ cmd->scan_plans[i].num_of_iterations =
+ cpu_to_le16(cfg_plan->iterations);
+ }
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request)
+{
+ int rc;
+ struct wmi_start_sched_scan_cmd cmd = {
+ .min_rssi_threshold = S8_MIN,
+ .initial_delay_sec = cpu_to_le16(request->delay),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_start_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (request->min_rssi_thold >= S8_MIN &&
+ request->min_rssi_thold <= S8_MAX)
+ cmd.min_rssi_threshold = request->min_rssi_thold;
+
+ wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+ request->match_sets, request->n_match_sets);
+ wmi_sched_scan_set_channels(wil, &cmd,
+ request->n_channels, request->channels);
+ wmi_sched_scan_set_plans(wil, &cmd,
+ request->scan_plans, request->n_scan_plans);
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+ WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "start sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_stop_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+ WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "stop sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee717a4f..d3e75f0ff245 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,8 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_PNO = 15,
+ WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
};
@@ -87,6 +89,8 @@ enum wmi_command_id {
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCHED_SCAN_CMDID = 0x05,
+ WMI_STOP_SCHED_SCAN_CMDID = 0x06,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
@@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
} channel_list[0];
} __packed;
+#define WMI_MAX_PNO_SSID_NUM (16)
+#define WMI_MAX_CHANNEL_NUM (6)
+#define WMI_MAX_PLANS_NUM (2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ s8 rssi_threshold;
+ /* boolean */
+ u8 add_ssid_to_probe;
+ u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+ __le16 interval_sec;
+ __le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+ struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+ u8 num_of_ssids;
+ s8 min_rssi_threshold;
+ u8 channel_list[WMI_MAX_CHANNEL_NUM];
+ u8 num_of_channels;
+ u8 reserved;
+ __le16 initial_delay_sec;
+ struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -1238,6 +1274,9 @@ enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_START_SCHED_SCAN_EVENTID = 0x1005,
+ WMI_STOP_SCHED_SCAN_EVENTID = 0x1006,
+ WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
@@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 rssi;
+ u8 range;
+ u8 sqi;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 qid;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 mid;
+ u8 cid;
+ /* From Radio MNGR */
+ u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+ WMI_PNO_SUCCESS = 0x00,
+ WMI_PNO_REJECT = 0x01,
+ WMI_PNO_INVALID_PARAMETERS = 0x02,
+ WMI_PNO_NOT_ENABLED = 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
enum wmi_acs_info_bitmask {
WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
@@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
- u8 mcs;
- s8 rssi;
- u8 range;
- u8 sqi;
- __le16 stype;
- __le16 status;
- __le32 len;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 qid;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 mid;
- u8 cid;
- /* From Radio MNGR */
- u8 channel;
-} __packed;
-
/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
struct wmi_rf_xpm_read_result_event {
/* enum wmi_fw_status_e - success=0 or fail=1 */
@@ -2267,8 +2331,8 @@ struct wmi_link_maintain_cfg_read_done_event {
} __packed;
enum wmi_traffic_suspend_status {
- WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
- WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
};
/* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2282,10 +2346,21 @@ enum wmi_traffic_resume_status {
WMI_TRAFFIC_RESUME_FAILED = 0x1,
};
+enum wmi_resume_trigger {
+ WMI_RESUME_TRIGGER_UNKNOWN = 0x0,
+ WMI_RESUME_TRIGGER_HOST = 0x1,
+ WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
+ WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
+ WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+};
+
/* WMI_TRAFFIC_RESUME_EVENTID */
struct wmi_traffic_resume_event {
- /* enum wmi_traffic_resume_status_e */
+ /* enum wmi_traffic_resume_status */
u8 status;
+ u8 reserved[3];
+ /* enum wmi_resume_trigger bitmap */
+ __le32 resume_triggers;
} __packed;
/* Power Save command completion status codes */
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index a5557d70689f..f2a2f41e3c96 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -1031,7 +1031,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
- mdelay(2);
+ usleep_range(2000, 3000);
b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 9f2d0b0cf6e5..2d3a5dd07a3f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -165,7 +165,7 @@ static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
static int
brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
+ void *buf, uint len, int *fwerr)
{
struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
@@ -175,6 +175,7 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+ *fwerr = 0;
ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
if (ret < 0) {
brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
@@ -211,25 +212,27 @@ retry:
memcpy(buf, info, len);
}
+ ret = 0;
+
/* Check the ERROR flag */
if (flags & BCDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
+ *fwerr = le32_to_cpu(msg->status);
done:
return ret;
}
static int
brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
+ void *buf, uint len, int *fwerr)
{
struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
- int ret = 0;
+ int ret;
u32 flags, id;
brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+ *fwerr = 0;
ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
if (ret < 0)
goto done;
@@ -249,9 +252,11 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
goto done;
}
+ ret = 0;
+
/* Check the ERROR flag */
if (flags & BCDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
+ *fwerr = le32_to_cpu(msg->status);
done:
return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index cd587325e286..0b68240ec7b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -118,7 +118,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
pdata->oob_irq_flags, "brcmf_oob_intr",
- &sdiodev->func[1]->dev);
+ &sdiodev->func1->dev);
if (ret != 0) {
brcmf_err("request_irq failed %d\n", ret);
return ret;
@@ -132,39 +132,40 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
}
sdiodev->irq_wake = true;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
/* assign GPIO to SDIO core */
addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
- gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+ gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
gpiocontrol |= 0x2;
- brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+ brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
- &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
+ 0xf, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
}
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
- data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+ data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
+ data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
+ SDIO_CCCR_IEN_FUNC0;
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
- data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+ data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
- data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
-
- sdio_release_host(sdiodev->func[1]);
+ data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
+ data, &ret);
+ sdio_release_host(sdiodev->func1);
} else {
brcmf_dbg(SDIO, "Entering\n");
- sdio_claim_host(sdiodev->func[1]);
- sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
- sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
+ sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
+ sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = true;
}
@@ -182,26 +183,26 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
struct brcmfmac_sdio_pd *pdata;
pdata = &sdiodev->settings->bus.sdio;
- sdio_claim_host(sdiodev->func[1]);
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
if (sdiodev->irq_wake) {
disable_irq_wake(pdata->oob_irq_nr);
sdiodev->irq_wake = false;
}
- free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
+ free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
}
if (sdiodev->sd_irq_requested) {
- sdio_claim_host(sdiodev->func[1]);
- sdio_release_irq(sdiodev->func[2]);
- sdio_release_irq(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_release_irq(sdiodev->func2);
+ sdio_release_irq(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = false;
}
}
@@ -230,271 +231,121 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
sdiodev->state = state;
}
-static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
- uint regaddr, u8 byte)
-{
- int err_ret;
-
- /*
- * Can only directly write to some F0 registers.
- * Handle CCCR_IENx and CCCR_ABORT command
- * as a special case.
- */
- if ((regaddr == SDIO_CCCR_ABORT) ||
- (regaddr == SDIO_CCCR_IENx))
- sdio_writeb(func, byte, regaddr, &err_ret);
- else
- sdio_f0_writeb(func, byte, regaddr, &err_ret);
-
- return err_ret;
-}
-
-static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
- u32 addr, u8 regsz, void *data, bool write)
-{
- struct sdio_func *func;
- int ret = -EINVAL;
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- write, fn, addr, regsz);
-
- /* only allow byte access on F0 */
- if (WARN_ON(regsz > 1 && !fn))
- return -EINVAL;
- func = sdiodev->func[fn];
-
- switch (regsz) {
- case sizeof(u8):
- if (write) {
- if (fn)
- sdio_writeb(func, *(u8 *)data, addr, &ret);
- else
- ret = brcmf_sdiod_f0_writeb(func, addr,
- *(u8 *)data);
- } else {
- if (fn)
- *(u8 *)data = sdio_readb(func, addr, &ret);
- else
- *(u8 *)data = sdio_f0_readb(func, addr, &ret);
- }
- break;
- case sizeof(u16):
- if (write)
- sdio_writew(func, *(u16 *)data, addr, &ret);
- else
- *(u16 *)data = sdio_readw(func, addr, &ret);
- break;
- case sizeof(u32):
- if (write)
- sdio_writel(func, *(u32 *)data, addr, &ret);
- else
- *(u32 *)data = sdio_readl(func, addr, &ret);
- break;
- default:
- brcmf_err("invalid size: %d\n", regsz);
- break;
- }
-
- if (ret)
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
-
- return ret;
-}
-
-static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 regsz, void *data, bool write)
-{
- u8 func;
- s32 retry = 0;
- int ret;
-
- if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
- return -ENOMEDIUM;
-
- /*
- * figure out how to read the register based on address range
- * 0x00 ~ 0x7FF: function 0 CCCR and FBR
- * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
- * The rest: function 1 silicon backplane core registers
- */
- if ((addr & ~REG_F0_REG_MASK) == 0)
- func = SDIO_FUNC_0;
- else
- func = SDIO_FUNC_1;
-
- do {
- if (!write)
- memset(data, 0, regsz);
- /* for retry wait for 1 ms till bus get settled down */
- if (retry)
- usleep_range(1000, 2000);
- ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
- data, write);
- } while (ret != 0 && ret != -ENOMEDIUM &&
- retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
- if (ret == -ENOMEDIUM)
- brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
- else if (ret != 0) {
- /*
- * SleepCSR register access can fail when
- * waking up the device so reduce this noise
- * in the logs.
- */
- if (addr != SBSDIO_FUNC1_SLEEPCSR)
- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", func, addr, ret);
- else
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", func, addr, ret);
- }
- return ret;
-}
-
-static int
-brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
+ u32 addr)
{
+ u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
int err = 0, i;
- u8 addr[3];
-
- if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
- return -ENOMEDIUM;
-
- addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
- addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
- addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
-
- for (i = 0; i < 3; i++) {
- err = brcmf_sdiod_regrw_helper(sdiodev,
- SBSDIO_FUNC1_SBADDRLOW + i,
- sizeof(u8), &addr[i], true);
- if (err) {
- brcmf_err("failed at addr: 0x%0x\n",
- SBSDIO_FUNC1_SBADDRLOW + i);
- break;
- }
- }
- return err;
-}
+ if (bar0 == sdiodev->sbwad)
+ return 0;
-static int
-brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-{
- uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
+ v = bar0 >> 8;
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
- if (err)
- return err;
+ for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
+ v & 0xff, &err);
+ if (!err)
sdiodev->sbwad = bar0;
- }
-
- *addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (width == 4)
- *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- return 0;
+ return err;
}
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- u8 data;
+ u32 data = 0;
int retval;
- brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- false);
- brcmf_dbg(SDIO, "data:0x%02x\n", data);
+ retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+ if (retval)
+ goto out;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ data = sdio_readl(sdiodev->func1, addr, &retval);
+out:
if (ret)
*ret = retval;
return data;
}
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret)
{
- u32 data = 0;
int retval;
- brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (retval)
- goto done;
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- false);
- brcmf_dbg(SDIO, "data:0x%08x\n", data);
-
-done:
- if (ret)
- *ret = retval;
+ goto out;
- return data;
-}
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 data, int *ret)
-{
- int retval;
+ sdio_writel(sdiodev->func1, data, addr, &retval);
- brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- true);
+out:
if (ret)
*ret = retval;
}
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u32 data, int *ret)
+static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, u32 addr,
+ struct sk_buff *skb)
{
- int retval;
+ unsigned int req_sz;
+ int err;
- brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
- retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
- if (retval)
- goto done;
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- true);
+ /* Single skb use the standard mmc interface */
+ req_sz = skb->len + 3;
+ req_sz &= (uint)~3;
-done:
- if (ret)
- *ret = retval;
+ switch (func->num) {
+ case 1:
+ err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
+ req_sz);
+ break;
+ case 2:
+ err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
+ break;
+ default:
+ /* bail out as things are really fishy here */
+ WARN(1, "invalid sdio function number: %d\n", func->num);
+ err = -ENOMEDIUM;
+ };
+
+ if (err == -ENOMEDIUM)
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
+ return err;
}
-static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, u32 addr,
+ struct sk_buff *skb)
{
unsigned int req_sz;
int err;
/* Single skb use the standard mmc interface */
- req_sz = pkt->len + 3;
+ req_sz = skb->len + 3;
req_sz &= (uint)~3;
- if (write)
- err = sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt->data)), req_sz);
- else if (fn == 1)
- err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
- addr, req_sz);
- else
- /* function 2 read is FIFO operation */
- err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
- req_sz);
+ err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
+
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
return err;
}
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
- * @fn: SDIO function number
+ * @func: SDIO function
* @write: direction flag
* @addr: dongle memory address as source/destination
* @pkt: skb pointer
@@ -503,7 +354,8 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func,
bool write, u32 addr,
struct sk_buff_head *pktlist)
{
@@ -529,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
req_sz = 0;
skb_queue_walk(pktlist, pkt_next)
req_sz += pkt_next->len;
- req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+ req_sz = ALIGN(req_sz, func->cur_blksize);
while (req_sz > PAGE_SIZE) {
pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
if (pkt_next == NULL) {
@@ -548,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
- func_blk_sz = sdiodev->func[fn]->cur_blksize;
+ func_blk_sz = func->cur_blksize;
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
@@ -565,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
- mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
- mmc_cmd.arg |= 1<<27; /* block mode */
+ mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1 << 27; /* block mode */
/* for function 1 the addr will be incremented */
- mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
@@ -614,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
/* incrementing addr for function 1 */
- if (fn == 1)
+ if (func->num == 1)
addr += req_sz;
- mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
- mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
+ mmc_set_data_timeout(&mmc_dat, func->card);
+ mmc_wait_for_req(func->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) {
@@ -686,16 +538,19 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
{
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
goto done;
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
done:
return err;
@@ -706,25 +561,28 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
{
struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
addr, pktq->qlen);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
goto done;
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
if (pktq->qlen == 1)
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
- pktq->next);
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+ pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
- glom_skb);
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+ glom_skb);
if (err)
goto done;
@@ -733,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
skb_pull(glom_skb, skb->len);
}
} else
- err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
- pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
+ addr, pktq);
done:
brcmu_pkt_buf_free_skb(glom_skb);
@@ -744,10 +602,11 @@ done:
int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
+
if (!mypkt) {
brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
@@ -756,40 +615,49 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
memcpy(mypkt->data, buf, nbytes);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+ if (err)
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
if (!err)
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
- mypkt);
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
+ mypkt);
brcmu_pkt_buf_free_skb(mypkt);
- return err;
+ return err;
}
int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq)
{
struct sk_buff *skb;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
return err;
- if (pktq->qlen == 1 || !sdiodev->sg_support)
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ if (pktq->qlen == 1 || !sdiodev->sg_support) {
skb_queue_walk(pktq, skb) {
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
- addr, skb);
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
+ addr, skb);
if (err)
break;
}
- else
- err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
- pktq);
+ } else {
+ err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
+ addr, pktq);
+ }
return err;
}
@@ -798,7 +666,7 @@ int
brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
u8 *data, uint size)
{
- int bcmerror = 0;
+ int err = 0;
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
@@ -818,13 +686,13 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
else
dsize = size;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Do the transfer(s) */
while (size) {
/* Set the backplane window to include the start address */
- bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
- if (bcmerror)
+ err = brcmf_sdiod_set_backplane_window(sdiodev, address);
+ if (err)
break;
brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
@@ -835,11 +703,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
skb_put(pkt, dsize);
- if (write)
+
+ if (write) {
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, pkt);
- if (bcmerror) {
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
+ sdaddr, pkt);
+ } else {
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
+ sdaddr, pkt);
+ }
+
+ if (err) {
brcmf_err("membytes transfer failed\n");
break;
}
@@ -859,24 +733,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
- brcmf_err("FAILED to set window back to 0x%x\n",
- sdiodev->sbwad);
+ sdio_release_host(sdiodev->func1);
- sdio_release_host(sdiodev->func[1]);
-
- return bcmerror;
+ return err;
}
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
{
- char t_func = (char)fn;
brcmf_dbg(SDIO, "Enter\n");
- /* issue abort cmd52 command through F0 */
- brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
- sizeof(t_func), &t_func, true);
+ /* Issue abort cmd52 command through F0 */
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
brcmf_dbg(SDIO, "Exit\n");
return 0;
@@ -890,7 +757,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
uint nents;
int err;
- func = sdiodev->func[2];
+ func = sdiodev->func2;
host = func->card->host;
sdiodev->sg_support = host->max_segs > 1;
max_blocks = min_t(uint, host->max_blk_count, 511u);
@@ -951,17 +818,17 @@ static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
brcmf_sdio_trigger_dpc(sdiodev->bus);
wait_event(sdiodev->freezer->thread_freeze,
atomic_read(expect) == sdiodev->freezer->frozen_count);
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
res = brcmf_sdio_sleep(sdiodev->bus, true);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return res;
}
static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
{
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
brcmf_sdio_sleep(sdiodev->bus, false);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
atomic_set(&sdiodev->freezer->freezing, 0);
complete_all(&sdiodev->freezer->resumed);
}
@@ -1011,19 +878,19 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
/* Disable Function 2 */
- sdio_claim_host(sdiodev->func[2]);
- sdio_disable_func(sdiodev->func[2]);
- sdio_release_host(sdiodev->func[2]);
+ sdio_claim_host(sdiodev->func2);
+ sdio_disable_func(sdiodev->func2);
+ sdio_release_host(sdiodev->func2);
/* Disable Function 1 */
- sdio_claim_host(sdiodev->func[1]);
- sdio_disable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_disable_func(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
- pm_runtime_allow(sdiodev->func[1]->card->host->parent);
+ pm_runtime_allow(sdiodev->func1->card->host->parent);
return 0;
}
@@ -1039,29 +906,27 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
- sdiodev->num_funcs = 2;
+ sdio_claim_host(sdiodev->func1);
- sdio_claim_host(sdiodev->func[1]);
-
- ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
goto out;
}
- ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
goto out;
}
/* increase F2 timeout */
- sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+ sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
/* Enable Function 1 */
- ret = sdio_enable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ ret = sdio_enable_func(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
if (ret) {
brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
@@ -1077,7 +942,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
- brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
+ brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -1138,6 +1003,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
dev = &func->dev;
+
+ /* Set MMC_QUIRK_LENIENT_FN0 for this card */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
/* prohibit ACPI power management for this device */
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
@@ -1161,17 +1030,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
/* store refs to functions used. mmc_card does
* not hold the F0 function pointer.
*/
- sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
- sdiodev->func[0]->num = 0;
- sdiodev->func[1] = func->card->sdio_func[0];
- sdiodev->func[2] = func;
+ sdiodev->func1 = func->card->sdio_func[0];
+ sdiodev->func2 = func;
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
bus_if->proto_type = BRCMF_PROTO_BCDC;
dev_set_drvdata(&func->dev, bus_if);
- dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
- sdiodev->dev = &sdiodev->func[1]->dev;
+ dev_set_drvdata(&sdiodev->func1->dev, bus_if);
+ sdiodev->dev = &sdiodev->func1->dev;
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
@@ -1187,8 +1054,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
fail:
dev_set_drvdata(&func->dev, NULL);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- kfree(sdiodev->func[0]);
+ dev_set_drvdata(&sdiodev->func1->dev, NULL);
kfree(sdiodev);
kfree(bus_if);
return err;
@@ -1217,11 +1083,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
/* only proceed with rest of cleanup if func 1 */
brcmf_sdiod_remove(sdiodev);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+ dev_set_drvdata(&sdiodev->func1->dev, NULL);
+ dev_set_drvdata(&sdiodev->func2->dev, NULL);
kfree(bus_if);
- kfree(sdiodev->func[0]);
kfree(sdiodev);
}
@@ -1247,7 +1112,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
- if (func->num != SDIO_FUNC_1)
+ if (func->num != 1)
return 0;
@@ -1264,7 +1129,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
else
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
- if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}
@@ -1276,7 +1141,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
- if (func->num != SDIO_FUNC_2)
+ if (func->num != 2)
return 0;
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index c5d1a1cbf601..f7b30ce2300d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
+ case BRCM_CC_4345_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d0609d30..9be0b051066a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
err = request_firmware(&clm, clm_name, dev);
if (err) {
- if (err == -ENOENT) {
- brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
- return 0;
- }
- brcmf_err("request CLM blob file failed (%d)\n", err);
- return err;
+ brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
+ err);
+ return 0;
}
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 53ae30259989..47de35a33853 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
}
}
+#define MAX_CAPS_BUFFER_SIZE 512
static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
{
- char caps[256];
+ char caps[MAX_CAPS_BUFFER_SIZE];
enum brcmf_feat_id id;
- int i;
+ int i, err;
+
+ err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+ if (err) {
+ brcmf_err("could not get firmware cap (%d)\n", err);
+ return;
+ }
- brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
brcmf_dbg(INFO, "[ %s]\n", caps);
for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f6a2df94dba7..f2cfdd3b2bf1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -107,7 +107,7 @@ static s32
brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
{
struct brcmf_pub *drvr = ifp->drvr;
- s32 err;
+ s32 err, fwerr;
if (drvr->bus_if->state != BRCMF_BUS_UP) {
brcmf_err("bus is down. we have nothing to do.\n");
@@ -117,16 +117,20 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
+ err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd,
+ data, len, &fwerr);
else
- err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
-
- if (err >= 0)
- return 0;
-
- brcmf_dbg(FIL, "Failed: %s (%d)\n",
- brcmf_fil_get_errstr((u32)(-err)), err);
-
+ err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd,
+ data, len, &fwerr);
+
+ if (err) {
+ brcmf_dbg(FIL, "Failed: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-err)), err);
+ } else if (fwerr < 0) {
+ brcmf_dbg(FIL, "Firmware error: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
+ err = -EBADE;
+ }
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index d2c834c3b2fc..e212a791a072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -477,7 +477,7 @@ static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len, int *fwerr)
{
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct sk_buff *skb = NULL;
@@ -485,6 +485,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
int err;
brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
+ *fwerr = 0;
msgbuf->ctl_completed = false;
err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
if (err)
@@ -508,14 +509,15 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
}
brcmu_pkt_buf_free_skb(skb);
- return msgbuf->ioctl_resp_status;
+ *fwerr = msgbuf->ioctl_resp_status;
+ return 0;
}
static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len, int *fwerr)
{
- return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
+ return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3c87157f5b85..8752707557bf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1251,14 +1251,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
u64 address;
u32 addr;
- devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
- &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
+ devinfo->shared.scratch =
+ dma_zalloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
- memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
-
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
address = (u64)devinfo->shared.scratch_dmahandle;
@@ -1268,14 +1268,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
- devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
- &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
+ devinfo->shared.ringupd =
+ dma_zalloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
- memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
-
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
address = (u64)devinfo->shared.ringupd_dmahandle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 2404f8a7c31c..8a8e08f09ea0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -30,9 +30,9 @@ struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb, struct brcmf_if **ifp);
int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
+ void *buf, uint len, int *fwerr);
int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
- uint len);
+ uint len, int *fwerr);
int (*tx_queue_data)(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *skb);
int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
@@ -71,14 +71,16 @@ static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
}
static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len,
+ int *fwerr)
{
- return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+ return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len,fwerr);
}
static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len,
+ int *fwerr)
{
- return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+ return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
}
static inline int brcmf_proto_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index cdf9e4161592..08686147b59d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -159,8 +159,8 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
+#define SD_REG(field) \
+ (offsetof(struct sdpcmd_regs, field))
/* SDIO function 1 register CHIPCLKCSR */
/* Force ALP request to backplane */
@@ -436,6 +436,7 @@ struct brcmf_sdio_count {
struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct brcmf_chip *ci; /* Chip info struct */
+ struct brcmf_core *sdio_core; /* sdio core info struct */
u32 hostintmask; /* Copy of Host Interrupt Mask */
atomic_t intstatus; /* Intstatus bits (events) pending */
@@ -659,32 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus)
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
}
-/*
- * Reads a register in the SDIO hardware block. This block occupies a series of
- * adresses on the 32 bit backplane bus.
- */
-static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
-{
- struct brcmf_core *core;
- int ret;
-
- core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
-
- return ret;
-}
-
-static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
-{
- struct brcmf_core *core;
- int ret;
-
- core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
-
- return ret;
-}
-
static int
brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
@@ -697,8 +672,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
if (on) {
/* device WAKEUP through KSO:
@@ -724,7 +698,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
* just one write attempt may fail,
* read it back until it matches written value
*/
- rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
&err);
if (!err) {
if ((rd_val & bmask) == cmp_val)
@@ -734,9 +708,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
/* bail out upon subsequent access errors */
if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
break;
+
udelay(KSO_WAIT_US);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
+ &err);
+
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
if (try_cnt > 2)
@@ -772,15 +748,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_err("HT Avail read error: %d\n", err);
@@ -790,35 +766,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
if (err) {
- brcmf_err("Devctl error setting CA: %d\n",
- err);
+ brcmf_err("Devctl error setting CA: %d\n", err);
return -EBADE;
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if (time_after(jiffies, timeout))
@@ -852,16 +827,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_err("Failed access turning clock off: %d\n",
@@ -951,14 +926,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
/* Going to sleep */
if (sleep) {
- clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+ clkcsr = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
brcmf_dbg(SDIO, "no clock, set ALP\n");
- brcmf_sdiod_regwb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_ALP_AVAIL_REQ, &err);
+ brcmf_sdiod_writeb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
}
err = brcmf_sdio_kso_control(bus, false);
} else {
@@ -1004,7 +979,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared_le sh_le;
__le32 addr_le;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_bus_sleep(bus, false, false);
/*
@@ -1038,7 +1013,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
if (rv < 0)
goto fail;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
@@ -1060,7 +1035,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
fail:
brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
rv, addr);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return rv;
}
@@ -1079,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
@@ -1087,12 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
brcmf_dbg(SDIO, "Enter\n");
/* Read mailbox data and ack that we did so */
- ret = r_sdreg32(bus, &hmb_data,
- offsetof(struct sdpcmd_regs, tohostmailboxdata));
+ hmb_data = brcmf_sdiod_readl(sdiod,
+ core->base + SD_REG(tohostmailboxdata),
+ &ret);
+
+ if (!ret)
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+ SMB_INT_ACK, &ret);
- if (ret == 0)
- w_sdreg32(bus, SMB_INT_ACK,
- offsetof(struct sdpcmd_regs, tosbmailbox));
bus->sdcnt.f1regdata += 2;
/* dongle indicates the firmware has halted/crashed */
@@ -1166,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
uint retries = 0;
u16 lastrbc;
u8 hi, lo;
@@ -1176,18 +1157,18 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
+ &err);
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCHI, &err);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
+ &err);
+ lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
+ &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1207,8 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (rtx) {
bus->sdcnt.rxrtx++;
- err = w_sdreg32(bus, SMB_NAK,
- offsetof(struct sdpcmd_regs, tosbmailbox));
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+ SMB_NAK, &err);
bus->sdcnt.f1regdata++;
if (err == 0)
@@ -1228,13 +1209,13 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
brcmf_err("sdio error, abort command and terminate frame\n");
bus->sdcnt.tx_sderrs++;
- brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+ brcmf_sdiod_abort(sdiodev, sdiodev->func2);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
- hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -1584,10 +1565,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
&bus->glom, dlen);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe */
@@ -1595,11 +1576,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return 0;
}
@@ -1609,10 +1590,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
BRCMF_SDIO_FT_SUPER);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
/* Remove superframe header, remember offset */
@@ -1628,10 +1609,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
BRCMF_SDIO_FT_SUB);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@@ -1640,11 +1621,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode) {
/* Terminate frame on error */
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = 0;
return 0;
}
@@ -1852,7 +1833,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_left = rd->len;
/* read header first for unknow frame length */
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (!rd->len) {
ret = brcmf_sdiod_recv_buf(bus->sdiodev,
bus->rxhdr, BRCMF_FIRSTREAD);
@@ -1862,7 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret);
bus->sdcnt.rx_hdrfail++;
brcmf_sdio_rxfail(bus, true, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
@@ -1872,7 +1853,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
BRCMF_SDIO_FT_NORMAL)) {
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
if (!bus->rxpending)
break;
else
@@ -1888,7 +1869,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1905,7 +1886,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
skb_pull(pkt, head_read);
@@ -1913,16 +1894,16 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
bus->sdcnt.f2rxdata++;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
if (ret < 0) {
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true,
RETRYCHAN(rd->channel));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
@@ -1933,7 +1914,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
@@ -1946,11 +1927,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
brcmf_sdio_rxfail(bus, true, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
rd->len_nxtfrm = rd_new.len_nxtfrm;
rd->channel = rd_new.channel;
rd->dat_offset = rd_new.dat_offset;
@@ -1966,9 +1947,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd_new.seq_num);
/* Force retry w/normal header read */
rd->len = 0;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
@@ -1991,9 +1972,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@@ -2091,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
int ntail, ret;
sdiodev = bus->sdiodev;
- blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ blksize = sdiodev->func2->cur_blksize;
/* sg entry alignment should be a divisor of block size */
WARN_ON(blksize % bus->sgentry_align);
@@ -2270,14 +2251,14 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
if (ret)
goto done;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
if (ret < 0)
brcmf_sdio_txfail(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
done:
brcmf_sdio_txpkt_postp(bus, pktq);
@@ -2295,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
struct sk_buff_head pktq;
+ u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
u32 intstatus = 0;
int ret = 0, prec_out, i;
uint cnt = 0;
@@ -2332,11 +2314,11 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr) {
/* Check device status, signal pending interrupt */
- sdio_claim_host(bus->sdiodev->func[1]);
- ret = r_sdreg32(bus, &intstatus,
- offsetof(struct sdpcmd_regs,
- intstatus));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
+ intstatus = brcmf_sdiod_readl(bus->sdiodev,
+ intstat_addr, &ret);
+ sdio_release_host(bus->sdiodev->func1);
+
bus->sdcnt.f2txdata++;
if (ret != 0)
break;
@@ -2419,12 +2401,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
static void brcmf_sdio_bus_stop(struct device *dev)
{
- u32 local_hostintmask;
- u8 saveclk;
- int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_core *core = bus->sdio_core;
+ u32 local_hostintmask;
+ u8 saveclk;
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -2435,35 +2418,37 @@ static void brcmf_sdio_bus_stop(struct device *dev)
}
if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Enable clock for device interrupts */
brcmf_sdio_bus_sleep(bus, false, false);
/* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+ brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
+ 0, NULL);
+
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
/* Force backplane clocks to assure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if (!err)
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
if (err)
brcmf_err("Failed to force clock for F2: err %d\n",
err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
- sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(sdiodev->func2);
/* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus));
+ brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
+ local_hostintmask, NULL);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
}
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2501,15 +2486,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- struct brcmf_core *buscore;
+ struct brcmf_core *core = bus->sdio_core;
u32 addr;
unsigned long val;
int ret;
- buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+ addr = core->base + SD_REG(intstatus);
- val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+ val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
return ret;
@@ -2519,7 +2503,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
/* Clear interrupts */
if (val) {
- brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+ brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
atomic_or(val, &bus->intstatus);
}
@@ -2529,7 +2513,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
u32 newstatus = 0;
+ u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
unsigned long intstatus;
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt; /* Temporary counter of tx/rx frames */
@@ -2537,7 +2523,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* If waiting for HTAVAIL, check status */
if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
@@ -2545,23 +2531,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ &err);
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, devctl, &err);
bus->clkstate = CLK_AVAIL;
}
}
@@ -2584,11 +2570,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
- err = w_sdreg32(bus, I_HMB_FC_CHANGE,
- offsetof(struct sdpcmd_regs, intstatus));
+ brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
+
+ newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
- err = r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus));
bus->sdcnt.f1regdata += 2;
atomic_set(&bus->fcstate,
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -2601,7 +2586,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
intstatus |= brcmf_sdio_hostmail(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
@@ -2644,7 +2629,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
data_ok(bus)) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
bus->ctrl_frame_len);
@@ -2652,7 +2637,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
wmb();
bus->ctrl_frame_stat = false;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
@@ -2668,14 +2653,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0);
if (bus->ctrl_frame_stat) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
bus->ctrl_frame_err = -ENODEV;
wmb();
bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
@@ -2890,13 +2875,13 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
CTL_DONE_TIMEOUT);
ret = 0;
if (bus->ctrl_frame_stat) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
brcmf_dbg(SDIO, "ctrl_frame timeout\n");
bus->ctrl_frame_stat = false;
ret = -ETIMEDOUT;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
if (!ret) {
brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
@@ -3020,7 +3005,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
return 0;
}
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (sh->assert_file_addr != 0) {
error = brcmf_sdiod_ramrw(bus->sdiodev, false,
sh->assert_file_addr, (u8 *)file, 80);
@@ -3033,7 +3018,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
if (error < 0)
return error;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
file, sh->assert_line, expr);
@@ -3307,7 +3292,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
int bcmerror;
u32 rstvec;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
rstvec = get_unaligned_le32(fw->data);
@@ -3336,7 +3321,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return bcmerror;
}
@@ -3347,31 +3332,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+ val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
/* Add CMD14 Support */
- brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
- &err);
+ brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+ (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ &err);
if (err) {
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
return;
}
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HT, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HT, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
return;
@@ -3385,16 +3370,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
/* enable KSO bit */
static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
{
+ struct brcmf_core *core = bus->sdio_core;
u8 val;
int err = 0;
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+ if (core->rev < 12)
return 0;
- val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+ val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3403,8 +3389,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3420,6 +3406,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_core *core = bus->sdio_core;
uint pad_size;
u32 value;
int err;
@@ -3428,7 +3415,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+ if (core->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3449,7 +3436,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
if (sdiodev->sg_support) {
bus->txglom = false;
value = 1;
- pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+ pad_size = bus->sdiodev->func2->cur_blksize << 1;
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@@ -3491,7 +3478,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
address = bus->ci->rambase;
offset = err = 0;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
while (offset < mem_size) {
len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
mem_size - offset;
@@ -3507,7 +3494,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
}
done:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return err;
}
@@ -3564,11 +3551,10 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->dpc_triggered) {
u8 devpend;
- sdio_claim_host(bus->sdiodev->func[1]);
- devpend = brcmf_sdiod_regrb(bus->sdiodev,
- SDIO_CCCR_INTx,
- NULL);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
+ devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
+ SDIO_CCCR_INTx, NULL);
+ sdio_release_host(bus->sdiodev->func1);
intstatus = devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
}
@@ -3594,13 +3580,13 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL);
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* Make sure backplane clock is on */
brcmf_sdio_bus_sleep(bus, false, false);
if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
}
#endif /* DEBUG */
@@ -3613,11 +3599,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->idlecount++;
if (bus->idlecount > bus->idletime) {
brcmf_dbg(SDIO, "idle\n");
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
bus->idlecount = 0;
brcmf_sdio_bus_sleep(bus, true, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
} else {
bus->idlecount = 0;
@@ -3705,12 +3691,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
@@ -3725,7 +3711,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
@@ -3733,8 +3719,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -3742,10 +3727,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+
if (!SBSDIO_ALPAV(clkval)) {
brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
clkval);
@@ -3753,11 +3739,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
@@ -3766,13 +3752,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
u32 rstvec)
{
struct brcmf_sdio_dev *sdiodev = ctx;
- struct brcmf_core *core;
+ struct brcmf_core *core = sdiodev->bus->sdio_core;
u32 reg_addr;
/* clear all interrupts */
- core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
- reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ reg_addr = core->base + SD_REG(intstatus);
+ brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
if (rstvec)
/* Write reset vector to address 0 */
@@ -3785,16 +3770,25 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
struct brcmf_sdio_dev *sdiodev = ctx;
u32 val, rev;
- val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
- sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
- addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ val = brcmf_sdiod_readl(sdiodev, addr, NULL);
+
+ /*
+ * this is a bit of special handling if reading the chipcommon chipid
+ * register. The 4339 is a next-gen of the 4335. It uses the same
+ * SDIO device id as 4335 and the chipid register returns 4335 as well.
+ * It can be identified as 4339 by looking at the chip revision. It
+ * is corrected here so the chip.c module has the right info.
+ */
+ if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) &&
+ (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
+ sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
val &= ~CID_ID_MASK;
val |= BRCM_CC_4339_CHIP_ID;
}
}
+
return val;
}
@@ -3802,7 +3796,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
{
struct brcmf_sdio_dev *sdiodev = ctx;
- brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, val, NULL);
}
static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
@@ -3823,21 +3817,21 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
u32 drivestrength;
sdiodev = bus->sdiodev;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
+ &err);
if (!err)
- clkctl = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3851,6 +3845,17 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
bus->ci = NULL;
goto fail;
}
+
+ /* Pick up the SDIO core info struct from chip.c */
+ bus->sdio_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ if (!bus->sdio_core)
+ goto fail;
+
+ /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
+ sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
+ if (!sdiodev->cc_core)
+ goto fail;
+
sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
BRCMF_BUSTYPE_SDIO,
bus->ci->chip,
@@ -3879,8 +3884,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
- if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
- ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+ if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+ ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
#endif
@@ -3897,29 +3902,29 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
if (err)
goto fail;
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
if (err)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
- reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
+ reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
+ brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3940,7 +3945,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
return true;
fail:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return false;
}
@@ -4020,22 +4025,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus_if;
- struct brcmf_sdio_dev *sdiodev;
- struct brcmf_sdio *bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
u8 saveclk;
brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
- bus_if = dev_get_drvdata(dev);
- sdiodev = bus_if->bus_priv.sdio;
+
if (err)
goto fail;
if (!bus_if->drvr)
return;
- bus = sdiodev->bus;
-
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4047,7 +4051,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
bus->sdcnt.tickcnt = 0;
brcmf_sdio_wd_timer(bus, true);
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
@@ -4055,10 +4059,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
goto release;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4066,10 +4070,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
/* Enable function 2 (frame transfers) */
- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata));
- err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
+ SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
+ err = sdio_enable_func(sdiodev->func2);
brcmf_dbg(INFO, "enable F2: err=%d\n", err);
@@ -4077,13 +4081,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
- w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask));
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
+ bus->hostintmask, NULL);
+
- brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
- sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(sdiodev->func2);
goto release;
}
@@ -4091,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
}
if (err == 0) {
@@ -4108,7 +4113,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (err != 0)
brcmf_sdio_clkctl(bus, CLK_NONE, false);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
err = brcmf_bus_started(dev);
if (err != 0) {
@@ -4118,10 +4123,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
return;
release:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
- device_release_driver(&sdiodev->func[2]->dev);
+ device_release_driver(&sdiodev->func2->dev);
device_release_driver(dev);
}
@@ -4148,7 +4153,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* single-threaded workqueue */
wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
- dev_name(&sdiodev->func[1]->dev));
+ dev_name(&sdiodev->func1->dev));
if (!wq) {
brcmf_err("insufficient memory to create txworkqueue\n");
goto fail;
@@ -4174,7 +4179,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_wdog/%s",
- dev_name(&sdiodev->func[1]->dev));
+ dev_name(&sdiodev->func1->dev));
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL;
@@ -4200,7 +4205,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
/* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->blocksize = bus->sdiodev->func2->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
/* Allocate buffers */
@@ -4216,17 +4221,17 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
}
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* Disable F2 to clear any intermediate frame state on the dongle */
- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(bus->sdiodev->func2);
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
@@ -4278,7 +4283,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->ci) {
if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is
@@ -4288,7 +4293,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
msleep(20);
brcmf_chip_set_passive(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
brcmf_chip_detach(bus->ci);
}
@@ -4335,9 +4340,9 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
{
int ret;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdio_bus_sleep(bus, sleep, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index f3da32fc6360..7faed831f07d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -21,10 +21,6 @@
#include <linux/firmware.h>
#include "firmware.h"
-#define SDIO_FUNC_0 0
-#define SDIO_FUNC_1 1
-#define SDIO_FUNC_2 2
-
#define SDIOD_FBR_SIZE 0x100
/* io_en */
@@ -39,28 +35,29 @@
#define INTR_STATUS_FUNC1 0x2
#define INTR_STATUS_FUNC2 0x4
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS 7
-
/* mask of register map */
#define REG_F0_REG_MASK 0x7FF
#define REG_F1_MISC_MASK 0x1FFFF
-/* as of sdiod rev 0, supports 3 functions */
-#define SBSDIO_NUM_FUNCTION 3
-
/* function 0 vendor specific CCCR registers */
+
#define SDIO_CCCR_BRCM_CARDCAP 0xf0
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
-#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
-#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
-#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET 0x02
-#define SDIO_CCCR_BRCM_SEPINT 0xf2
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT BIT(1)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT BIT(2)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC BIT(3)
+
+/* Interrupt enable bits for each function */
+#define SDIO_CCCR_IEN_FUNC0 BIT(0)
+#define SDIO_CCCR_IEN_FUNC1 BIT(1)
+#define SDIO_CCCR_IEN_FUNC2 BIT(2)
+
+#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET BIT(1)
-#define SDIO_SEPINT_MASK 0x01
-#define SDIO_SEPINT_OE 0x02
-#define SDIO_SEPINT_ACT_HI 0x04
+#define SDIO_CCCR_BRCM_SEPINT 0xf2
+#define SDIO_CCCR_BRCM_SEPINT_MASK BIT(0)
+#define SDIO_CCCR_BRCM_SEPINT_OE BIT(1)
+#define SDIO_CCCR_BRCM_SEPINT_ACT_HI BIT(2)
/* function 1 miscellaneous registers */
@@ -131,11 +128,6 @@
/* with b15, maps to 32-bit SB access */
#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-
-#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
/* Address bits from SBADDR regs */
#define SBSDIO_SBWINDOW_MASK 0xffff8000
@@ -178,9 +170,10 @@ struct brcmf_sdio;
struct brcmf_sdiod_freezer;
struct brcmf_sdio_dev {
- struct sdio_func *func[SDIO_MAX_FUNCS];
- u8 num_funcs; /* Supported funcs on client */
+ struct sdio_func *func1;
+ struct sdio_func *func2;
u32 sbwad; /* Save backplane window address */
+ struct brcmf_core *cc_core; /* chipcommon core info struct */
struct brcmf_sdio *bus;
struct device *dev;
struct brcmf_bus *bus_if;
@@ -296,13 +289,24 @@ struct sdpcmd_regs {
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
-/* sdio device register access interface */
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
- int *ret);
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
- int *ret);
+/* SDIO device register access interface */
+/* Accessors for SDIO Function 0 */
+#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
+ sdio_f0_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
+ sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+/* Accessors for SDIO Function 1 */
+#define brcmf_sdiod_readb(sdiodev, addr, r) \
+ sdio_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
+ sdio_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
@@ -342,7 +346,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
u8 *data, uint size);
/* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
+
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 763e8ba6b178..7e01981bc5c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
rfseq_updategainu_events,
rfseq_updategainu_dlys,
- sizeof(rfseq_updategainu_events) /
- sizeof(rfseq_updategainu_events[0]));
+ ARRAY_SIZE(rfseq_updategainu_events));
mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index dbf50ef6cd75..533bd4b0277e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/kernel.h>
#include <types.h>
#include "phytbl_n.h"
@@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = {
};
const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
- {&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
- 16}
- ,
- {&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]),
- 20, 0, 32}
- ,
- {&gainctrl_lut_core0_rev0,
- sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev0,
- sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]),
- 27, 192, 32}
- ,
-
- {&est_pwr_lut_core0_rev0,
- sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev0,
- sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev0,
- sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev0,
- sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27,
- 64, 8}
- ,
- {&iq_lut_core0_rev0,
- sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev0,
- sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev0,
- sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev0,
- sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448,
- 16}
- ,
+ {&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16},
+ {&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32},
+ {&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32},
+ {&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32},
+ {&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8},
+ {&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8},
+ {&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32},
+ {&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32},
+ {&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16},
+ {&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16},
};
const struct phytbl_info mimophytbl_info_rev0[] = {
- {&frame_struct_rev0,
- sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
- ,
- {&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]),
- 24, 0, 8}
- ,
- {&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12,
- 0, 32}
- ,
- {&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]),
- 14, 0, 32}
- ,
- {&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]),
- 13, 0, 32}
- ,
- {&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]),
- 11, 0, 16}
- ,
- {&tdi_tbl20_ant0_rev0,
- sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev0,
- sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev0,
- sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev0,
- sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768,
- 32}
- ,
- {&chanest_tbl_rev0,
- sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
- ,
- {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
- 8}
- ,
- {&noise_var_tbl0_rev0,
- sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
- 32}
- ,
- {&noise_var_tbl1_rev0,
- sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128,
- 32}
- ,
+ {&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32},
+ {&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8},
+ {&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32},
+ {&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32},
+ {&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32},
+ {&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16},
+ {&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32},
+ {&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32},
+ {&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8},
+ {&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32},
+ {&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32},
};
-const u32 mimophytbl_info_sz_rev0 =
- sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
-const u32 mimophytbl_info_sz_rev0_volatile =
- sizeof(mimophytbl_info_rev0_volatile) /
- sizeof(mimophytbl_info_rev0_volatile[0]);
+const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0);
+const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile);
static const u16 ant_swctrl_tbl_rev3[] = {
0x0082,
@@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
};
const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
- {&ant_swctrl_tbl_rev3,
- sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
- ,
+ {&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
- {&ant_swctrl_tbl_rev3_1,
- sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
- {&ant_swctrl_tbl_rev3_2,
- sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
- {&ant_swctrl_tbl_rev3_3,
- sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3[] = {
- {&frame_struct_rev3,
- sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
- ,
- {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
- 11, 0, 16}
- ,
- {&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12,
- 0, 32}
- ,
- {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
- 13, 0, 32}
- ,
- {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
- 14, 0, 32}
- ,
- {&noise_var_tbl_rev3,
- sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32}
- ,
- {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
- 16}
- ,
- {&tdi_tbl20_ant0_rev3,
- sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev3,
- sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev3,
- sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev3,
- sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
- 32}
- ,
- {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
- 20, 0, 32}
- ,
- {&chanest_tbl_rev3,
- sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
- ,
- {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
- 24, 0, 8}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
+ {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+ {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+ {&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32},
+ {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+ {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+ {&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32},
+ {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+ {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+ {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+ {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+ {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}
};
-const u32 mimophytbl_info_sz_rev3 =
- sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
-const u32 mimophytbl_info_sz_rev3_volatile =
- sizeof(mimophytbl_info_rev3_volatile) /
- sizeof(mimophytbl_info_rev3_volatile[0]);
-const u32 mimophytbl_info_sz_rev3_volatile1 =
- sizeof(mimophytbl_info_rev3_volatile1) /
- sizeof(mimophytbl_info_rev3_volatile1[0]);
-const u32 mimophytbl_info_sz_rev3_volatile2 =
- sizeof(mimophytbl_info_rev3_volatile2) /
- sizeof(mimophytbl_info_rev3_volatile2[0]);
-const u32 mimophytbl_info_sz_rev3_volatile3 =
- sizeof(mimophytbl_info_rev3_volatile3) /
- sizeof(mimophytbl_info_rev3_volatile3[0]);
+const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3);
+const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile);
+const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1);
+const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2);
+const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3);
static const u32 tmap_tbl_rev7[] = {
0x8a88aa80,
@@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
};
const struct phytbl_info mimophytbl_info_rev7[] = {
- {&frame_struct_rev3,
- sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
- ,
- {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
- 11, 0, 16}
- ,
- {&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12,
- 0, 32}
- ,
- {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
- 13, 0, 32}
- ,
- {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
- 14, 0, 32}
- ,
- {&noise_var_tbl_rev7,
- sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
- ,
- {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
- 16}
- ,
- {&tdi_tbl20_ant0_rev3,
- sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev3,
- sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev3,
- sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev3,
- sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
- 32}
- ,
- {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
- 20, 0, 32}
- ,
- {&chanest_tbl_rev3,
- sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
- ,
- {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
- 24, 0, 8}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
- ,
+ {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+ {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+ {&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32},
+ {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+ {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+ {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+ {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+ {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+ {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+ {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+ {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
{&papd_comp_rfpwr_tbl_core0_rev3,
- sizeof(papd_comp_rfpwr_tbl_core0_rev3) /
- sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16}
- ,
+ ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16},
{&papd_comp_rfpwr_tbl_core1_rev3,
- sizeof(papd_comp_rfpwr_tbl_core1_rev3) /
- sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16}
- ,
+ ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16},
{&papd_comp_epsilon_tbl_core0_rev7,
- sizeof(papd_comp_epsilon_tbl_core0_rev7) /
- sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32}
- ,
+ ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32},
{&papd_cal_scalars_tbl_core0_rev7,
- sizeof(papd_cal_scalars_tbl_core0_rev7) /
- sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32}
- ,
+ ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32},
{&papd_comp_epsilon_tbl_core1_rev7,
- sizeof(papd_comp_epsilon_tbl_core1_rev7) /
- sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32}
- ,
+ ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32},
{&papd_cal_scalars_tbl_core1_rev7,
- sizeof(papd_cal_scalars_tbl_core1_rev7) /
- sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32}
- ,
+ ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32},
};
-const u32 mimophytbl_info_sz_rev7 =
- sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7);
const struct phytbl_info mimophytbl_info_rev16[] = {
- {&noise_var_tbl_rev7,
- sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
- ,
+ {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
};
-const u32 mimophytbl_info_sz_rev16 =
- sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff136f299a0a..e6205eae51fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -9,12 +9,13 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644
index 000000000000..48f6f80eb24b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX 36
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN 24
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION 0x0a1d
+#define IWL_22000_TX_POWER_VERSION 0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET 0x880000
+#define IWL_22000_DCCM2_LEN 0x8000
+#define IWL_22000_SMEM_OFFSET 0x400000
+#define IWL_22000_SMEM_LEN 0xD0000
+
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+ IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000 10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000 \
+ .ucode_api_max = IWL_22000_UCODE_API_MAX, \
+ .ucode_api_min = IWL_22000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl_22000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL_22000_DCCM_OFFSET, \
+ .dccm_len = IWL_22000_DCCM_LEN, \
+ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \
+ .dccm2_len = IWL_22000_DCCM2_LEN, \
+ .smem_offset = IWL_22000_SMEM_OFFSET, \
+ .smem_len = IWL_22000_SMEM_LEN, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .apmg_not_supported = true, \
+ .mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = true, \
+ .use_tfh = true, \
+ .rf_id = true, \
+ .gen2 = true, \
+ .nvm_type = IWL_NVM_EXT, \
+ .dbgc_supported = true, \
+ .tx_cmd_queue_size = 32, \
+ .min_umac_error_event_table = 0x400000
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_HR_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_JF_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 9bb7c19d48eb..3f4d9bac9f73 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 34
-#define IWL8265_UCODE_API_MAX 34
+#define IWL8000_UCODE_API_MAX 36
+#define IWL8265_UCODE_API_MAX 36
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 22
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e7e75b458005..90a1d14cf7d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 34
+#define IWL9000_UCODE_API_MAX 36
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
deleted file mode 100644
index 705f83b02e13..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-
-/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 34
-
-/* Lowest firmware API version supported */
-#define IWL_A000_UCODE_API_MIN 24
-
-/* NVM versions */
-#define IWL_A000_NVM_VERSION 0x0a1d
-#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
-
-/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_A000_DCCM2_OFFSET 0x880000
-#define IWL_A000_DCCM2_LEN 0x8000
-#define IWL_A000_SMEM_OFFSET 0x400000
-#define IWL_A000_SMEM_LEN 0xD0000
-
-#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_A000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
-
-#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
-
-#define NVM_HW_SECTION_NUM_FAMILY_A000 10
-
-static const struct iwl_base_params iwl_a000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
- .num_of_queues = 512,
- .shadow_ram_support = true,
- .led_compensation = 57,
- .wd_timeout = IWL_LONG_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = true,
- .pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl_a000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-#define IWL_DEVICE_A000 \
- .ucode_api_max = IWL_A000_UCODE_API_MAX, \
- .ucode_api_min = IWL_A000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_A000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .base_params = &iwl_a000_base_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \
- .non_shared_ant = ANT_A, \
- .dccm_offset = IWL_A000_DCCM_OFFSET, \
- .dccm_len = IWL_A000_DCCM_LEN, \
- .dccm2_offset = IWL_A000_DCCM2_OFFSET, \
- .dccm2_len = IWL_A000_DCCM2_LEN, \
- .smem_offset = IWL_A000_SMEM_OFFSET, \
- .smem_len = IWL_A000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = true, \
- .use_tfh = true, \
- .rf_id = true, \
- .gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .tx_cmd_queue_size = 32, \
- .min_umac_error_event_table = 0x400000
-
-const struct iwl_cfg iwla000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .cdb = true,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_JF_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_F0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_JF_B0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_A0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 3684a3e180e5..007bfe7656a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -95,8 +95,8 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
struct iwl_lmac_alive {
- __le32 ucode_minor;
__le32 ucode_major;
+ __le32 ucode_minor;
u8 ver_subtype;
u8 ver_type;
u8 mac;
@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
struct iwl_umac_alive {
- __le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
+ __le32 umac_minor; /* UMAC version: minor */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index d09555afe2c5..87c1ddea75ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 {
BT_MBOX(3, UPDATE_REQUEST, 21, 1),
};
-enum iwl_bt_mxbox_dw4 {
- BT_MBOX(4, ATS_BT_INTERVAL, 0, 7),
- BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7),
-};
-
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
@@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance {
* @reserved: reserved
*/
struct iwl_bt_coex_profile_notif {
- __le32 mbox_msg[8];
- __le32 msg_idx;
- __le32 bt_ci_compliance;
-
- __le32 primary_ch_lut;
- __le32 secondary_ch_lut;
- __le32 bt_activity_grading;
- u8 ttc_status;
- u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
- * @ttc_status: is TTC enabled - one bit per PHY
- * @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
- */
-struct iwl_bt_coex_profile_notif_v4 {
__le32 mbox_msg[4];
__le32 msg_idx;
__le32 bt_ci_compliance;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7ebbf097488b..f285bacc8726 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -215,7 +215,7 @@ enum iwl_legacy_cmds {
/**
* @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
* &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
- * for newer (A000) hardware.
+ * for newer (22000) hardware.
*/
SCD_QUEUE_CFG = 0x1d,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index aa76dcc148bd..a57c7223df0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,6 +83,21 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+ */
+ TLC_MNG_CONFIG_CMD = 0xF,
+
+ /**
+ * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
+ */
+ TLC_MNG_NOTIF_REQ_CMD = 0x10,
+
+ /**
+ * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+ */
+ TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+ /**
* @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
*/
STA_PM_NOTIF = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 0a81fb1b6ed4..106782341544 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -250,10 +250,12 @@ struct iwl_mfu_assert_dump_notif {
* The ids for different type of markers to insert into the usniffer logs
*
* @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
+ * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
*/
enum iwl_mvm_marker_id {
MARKER_ID_TX_FRAME_LATENCY = 1,
-}; /* MARKER_ID_API_E_VER_1 */
+ MARKER_ID_SYNC_CLOCK = 2,
+}; /* MARKER_ID_API_E_VER_2 */
/**
* struct iwl_mvm_marker - mark info into the usniffer logs
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ec42c84e5df2..17c7ef1662a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -68,6 +68,10 @@
*/
enum iwl_mac_conf_subcmd_ids {
/**
+ * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+ */
+ LOW_LATENCY_CMD = 0x3,
+ /**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
@@ -82,4 +86,19 @@ struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
+ *
+ * @mac_id: MAC ID to whom to apply the low-latency configurations
+ * @low_latency_rx: 1/0 to set/clear Rx low latency direction
+ * @low_latency_tx: 1/0 to set/clear Tx low latency direction
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_mac_low_latency_cmd {
+ __le32 mac_id;
+ u8 low_latency_rx;
+ u8 low_latency_tx;
+ __le16 reserved;
+} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a13fd8a1be62..e49a6f7be613 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -62,6 +62,267 @@
#include "mac.h"
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
+ * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
+ * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
+ */
+enum iwl_tlc_mng_cfg_flags {
+ IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0),
+ IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1),
+ IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2),
+ IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3),
+ IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4),
+ IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw - channel width options
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw {
+ IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
+ */
+enum iwl_tlc_mng_cfg_chains {
+ IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+ IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+ IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_gi - guard interval options
+ * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
+ * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
+ * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
+ * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
+ */
+enum iwl_tlc_mng_cfg_gi {
+ IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0),
+ IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1),
+ IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2),
+ IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode {
+ IWL_TLC_MNG_MODE_CCK = 0,
+ IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_HT,
+ IWL_TLC_MNG_MODE_VHT,
+ IWL_TLC_MNG_MODE_HE,
+ IWL_TLC_MNG_MODE_INVALID,
+ IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_vht_he_types - VHT HE types
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
+ */
+enum iwl_tlc_mng_vht_he_types {
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates - HT/VHT rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates {
+ IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+ IWL_TLC_MNG_HT_RATE_MCS1,
+ IWL_TLC_MNG_HT_RATE_MCS2,
+ IWL_TLC_MNG_HT_RATE_MCS3,
+ IWL_TLC_MNG_HT_RATE_MCS4,
+ IWL_TLC_MNG_HT_RATE_MCS5,
+ IWL_TLC_MNG_HT_RATE_MCS6,
+ IWL_TLC_MNG_HT_RATE_MCS7,
+ IWL_TLC_MNG_HT_RATE_MCS8,
+ IWL_TLC_MNG_HT_RATE_MCS9,
+ IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_RS_ANT_NUM 3
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_supp_ch_width: channel width
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
+ * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
+ * @non_ht_supp_rates: bitmap of supported legacy rates
+ * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @reserved2: reserved
+ * @he_supp_rates: bitmap of supported HE rates
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * @he_gi_support: 11ax HE guard interval
+ * @max_ampdu_cnt: max AMPDU size (frames count)
+ */
+struct iwl_tlc_config_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_supp_ch_width;
+ u8 chains;
+ u8 max_supp_ss;
+ u8 valid_vht_he_types;
+ __le16 flags;
+ __le16 non_ht_supp_rates;
+ __le16 ht_supp_rates[MAX_RS_ANT_NUM];
+ u8 mode;
+ u8 reserved2;
+ __le16 he_supp_rates;
+ u8 sgi_ch_width_supp;
+ u8 he_gi_support;
+ __le32 max_ampdu_cnt;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_TLC_NOTIF_INIT_RATE_POS 0
+#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
+#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+
+/**
+ * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
+ * @sta_id: relevant station
+ * @reserved1: reserved
+ * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
+ * @interval: minimum time between notifications from TLC to the driver (msec)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_notif_req_config_cmd {
+ u8 sta_id;
+ u8 reserved1;
+ __le16 flags;
+ __le16 interval;
+ __le16 reserved2;
+} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ */
+struct iwl_tlc_update_notif {
+ u8 sta_id;
+ u8 reserved;
+ __le16 flags;
+ __le32 values[16];
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ * frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ * driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ * threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+ IWL_TLC_DEBUG_FIXED_RATE,
+ IWL_TLC_DEBUG_STATS_TH,
+ IWL_TLC_DEBUG_STATS_TIME_TH,
+ IWL_TLC_DEBUG_AGG_TIME_LIM,
+ IWL_TLC_DEBUG_AGG_DIS_START_TH,
+ IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+ IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+ IWL_TLC_DEBUG_START_AC_RATE_IDX,
+ IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ * not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ __le32 flags;
+ __le32 fixed_rate;
+ __le16 stats_threshold;
+ __le16 time_threshold;
+ __le16 agg_time_lim;
+ __le16 agg_dis_start_threshold;
+ __le16 agg_frame_count_lim;
+ __le16 addba_retry_delay;
+ u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+ u8 no_far_range_tweak;
+ u8 reserved2[3];
+} __packed;
+
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -253,7 +514,6 @@ enum {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
RATE_MCS_ANT_C_MSK)
#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
-#define RATE_MCS_ANT_NUM 3
/* Bit 17: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f5d5ba7e37ec..a2a40b515a3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -121,7 +121,7 @@ enum iwl_tx_flags {
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
- * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
* @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
* @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
* to a secured STA
@@ -301,7 +301,7 @@ struct iwl_dram_sec_info {
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8106fd4be996..67aefc8fc9ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
- if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
+ /*
+ * If the loading of the FW completed successfully, the next step is to
+ * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+ * zero, the FW was already loaded successully. If the state is "NO_FW"
+ * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+ * can try to collect the data, since FW might just not be fully
+ * loaded (no "ALIVE" yet), and the debug data is accessible.
+ *
+ * Corner case: got the FW alive but crashed before getting the SMEM
+ * config. In such a case, due to HW access problems, we might
+ * collect garbage.
+ */
+ if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+ fwrt->smem_cfg.num_lmacs,
"Can't collect dbg data when FW isn't alive\n"))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
new file mode 100644
index 000000000000..e2ded29a145d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -0,0 +1,195 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "api/commands.h"
+#include "debugfs.h"
+
+#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
+static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \
+ char *buf, size_t count, \
+ loff_t *ppos); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \
+ char *buf, size_t count, \
+ loff_t *ppos); \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct iwl_fw_runtime *fwrt = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \
+}
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, fwrt, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
+ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_mvm_marker marker = {
+ .dw_len = sizeof(struct iwl_mvm_marker) / 4,
+ .marker_id = MARKER_ID_SYNC_CLOCK,
+
+ /* the real timestamp is taken from the ftrace clock
+ * this is for finding the match between fw and kernel logs
+ */
+ .timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+ };
+
+ struct iwl_host_cmd hcmd = {
+ .id = MARKER_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &marker,
+ .len[0] = sizeof(marker),
+ };
+
+ return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
+{
+ int ret;
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+ unsigned long delay = fwrt->timestamp.delay;
+
+ ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+ if (!ret && delay)
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(delay));
+ else
+ IWL_INFO(fwrt,
+ "stopping timestamp_marker, ret: %d, delay: %u\n",
+ ret, jiffies_to_msecs(delay) / 1000);
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ u32 delay;
+
+ ret = kstrtou32(buf, 10, &delay);
+ if (ret < 0)
+ return ret;
+
+ IWL_INFO(fwrt,
+ "starting timestamp_marker trigger with delay: %us\n",
+ delay);
+
+ iwl_fw_cancel_timestamp(fwrt);
+
+ fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+ return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir)
+{
+ INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+ FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR);
+ return 0;
+err:
+ IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
new file mode 100644
index 000000000000..e57ff92a68ae
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "runtime.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir);
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ fwrt->timestamp.delay = 0;
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+#else
+static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir)
+{
+ return 0;
+}
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 37a5c5b4eda6..1a05d506ac9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -246,10 +246,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
- * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
- * include information about ACL time sharing.
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
+ * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
+ * deprecated.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -267,8 +267,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
- IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -313,6 +313,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
+ * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -367,6 +369,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -541,7 +545,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
} __packed;
/**
- * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
+ * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
@@ -556,7 +560,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
*
* This parses IWL_UCODE_TLV_FW_DBG_DEST
*/
-struct iwl_fw_dbg_dest_tlv {
+struct iwl_fw_dbg_dest_tlv_v1 {
u8 version;
u8 monitor_mode;
u8 size_power;
@@ -570,6 +574,26 @@ struct iwl_fw_dbg_dest_tlv {
struct iwl_fw_dbg_reg_op reg_ops[0];
} __packed;
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWL_M2S_UNIT_SIZE 0x100
+
+struct iwl_fw_dbg_dest_tlv {
+ u8 version;
+ u8 monitor_mode;
+ u8 size_power;
+ u8 reserved;
+ __le32 cfg_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 size_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 985496cc01d0..b23ffe12ad84 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -284,7 +284,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index bfe5316bbb6a..c39fe84bb4c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,10 +58,12 @@
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
+#include "debugfs.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct iwl_fw_runtime_ops *ops, void *ops_ctx)
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ struct dentry *dbgfs_dir)
{
memset(fwrt, 0, sizeof(*fwrt));
fwrt->trans = trans;
@@ -71,5 +73,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops = ops;
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
+ iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt)
+{
+ iwl_fw_cancel_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 50cfb6d795a5..e25c049f980f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -134,11 +134,21 @@ struct iwl_fw_runtime {
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
} dump;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ struct delayed_work wk;
+ u32 delay;
+ u64 seq;
+ } timestamp;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct iwl_fw_runtime_ops *ops, void *ops_ctx);
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ struct dentry *dbgfs_dir);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
enum iwl_ucode_type cur_fw_img)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 76675736ba4f..fb4b6442b4d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -63,8 +63,8 @@
#include "runtime.h"
#include "fw/api/commands.h"
-static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_packet *pkt)
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+ struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
@@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
- iwl_parse_shared_mem_a000(fwrt, pkt);
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e21e46cf6f9a..258d439bb0a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -89,7 +89,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_7000,
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
- IWL_DEVICE_FAMILY_A000,
+ IWL_DEVICE_FAMILY_22000,
};
/*
@@ -266,7 +266,7 @@ struct iwl_tt_params {
#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
@@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff {
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated
- * @gen2: a000 and on transport operation
+ * @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
* @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
@@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwla000_2ac_cfg_jf;
-extern const struct iwl_cfg iwla000_2ax_cfg_hr;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 7f16dcce0995..9518a82f44c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -95,7 +95,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
-
+ __field(void *, skbaddr)
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
@@ -110,6 +110,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
),
TP_fast_assign(
DEV_ASSIGN;
+ __entry->skbaddr = skb;
__entry->framelen = buf0_len;
if (hdr_len > 0)
__entry->framelen += skb->len - hdr_len;
@@ -120,9 +121,9 @@ TRACE_EVENT(iwlwifi_dev_tx,
__get_dynamic_array(buf1),
skb->len - hdr_len);
),
- TP_printk("[%s] TX %.2x (%zu bytes)",
+ TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
+ __entry->framelen, __entry->skbaddr)
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4b224d7d967c..9c4a7f648a44 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -296,7 +296,12 @@ struct iwl_firmware_pieces {
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
/* FW debug data parsed for driver usage */
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ bool dbg_dest_tlv_init;
+ u8 *dbg_dest_ver;
+ union {
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+ };
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
@@ -919,27 +924,60 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version), "%u.%u.%u",
- major, minor, local_comp);
+ if (major >= 35)
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%08x.%u", major, minor, local_comp);
+ else
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u", major, minor, local_comp);
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
- struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
+ struct iwl_fw_dbg_dest_tlv *dest = NULL;
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+ u8 mon_mode;
+
+ pieces->dbg_dest_ver = (u8 *)tlv_data;
+ if (*pieces->dbg_dest_ver == 1) {
+ dest = (void *)tlv_data;
+ } else if (*pieces->dbg_dest_ver == 0) {
+ dest_v1 = (void *)tlv_data;
+ } else {
+ IWL_ERR(drv,
+ "The version is %d, and it is invalid\n",
+ *pieces->dbg_dest_ver);
+ break;
+ }
- if (pieces->dbg_dest_tlv) {
+ if (pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"dbg destination ignored, already exists\n");
break;
}
- pieces->dbg_dest_tlv = dest;
+ pieces->dbg_dest_tlv_init = true;
+
+ if (dest_v1) {
+ pieces->dbg_dest_tlv_v1 = dest_v1;
+ mon_mode = dest_v1->monitor_mode;
+ } else {
+ pieces->dbg_dest_tlv = dest;
+ mon_mode = dest->monitor_mode;
+ }
+
IWL_INFO(drv, "Found debug destination: %s\n",
- get_fw_dbg_mode_string(dest->monitor_mode));
+ get_fw_dbg_mode_string(mon_mode));
+
+ drv->fw.dbg_dest_reg_num = (dest_v1) ?
+ tlv_len -
+ offsetof(struct iwl_fw_dbg_dest_tlv_v1,
+ reg_ops) :
+ tlv_len -
+ offsetof(struct iwl_fw_dbg_dest_tlv,
+ reg_ops);
- drv->fw.dbg_dest_reg_num =
- tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
- reg_ops);
drv->fw.dbg_dest_reg_num /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
@@ -948,7 +986,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_DBG_CONF: {
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
- if (!pieces->dbg_dest_tlv) {
+ if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"Ignore dbg config %d - no destination configured\n",
conf->id);
@@ -1335,15 +1373,51 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
- if (pieces->dbg_dest_tlv) {
- drv->fw.dbg_dest_tlv =
- kmemdup(pieces->dbg_dest_tlv,
- sizeof(*pieces->dbg_dest_tlv) +
- sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num, GFP_KERNEL);
+ if (pieces->dbg_dest_tlv_init) {
+ size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num;
+
+ drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv)
goto out_free_fw;
+
+ if (*pieces->dbg_dest_ver == 0) {
+ memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+ dbg_dest_size);
+ } else {
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
+ drv->fw.dbg_dest_tlv;
+
+ dest_tlv->version = pieces->dbg_dest_tlv->version;
+ dest_tlv->monitor_mode =
+ pieces->dbg_dest_tlv->monitor_mode;
+ dest_tlv->size_power =
+ pieces->dbg_dest_tlv->size_power;
+ dest_tlv->wrap_count =
+ pieces->dbg_dest_tlv->wrap_count;
+ dest_tlv->write_ptr_reg =
+ pieces->dbg_dest_tlv->write_ptr_reg;
+ dest_tlv->base_shift =
+ pieces->dbg_dest_tlv->base_shift;
+ memcpy(dest_tlv->reg_ops,
+ pieces->dbg_dest_tlv->reg_ops,
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num);
+
+ /* In version 1 of the destination tlv, which is
+ * relevant for internal buffer exclusively,
+ * the base address is part of given with the length
+ * of the buffer, and the size shift is give instead of
+ * end shift. We now store these values in base_reg,
+ * and end shift, and when dumping the data we'll
+ * manipulate it for extracting both the length and
+ * base address */
+ dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
+ dest_tlv->end_shift =
+ pieces->dbg_dest_tlv->size_shift;
+ }
}
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 66e5db41e559..11789ffb6512 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -121,7 +121,7 @@
#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
-/* a000 TFD table address, 64 bit */
+/* 22000 TFD table address, 64 bit */
#define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */
@@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
}
-/* a000 configuration registers */
+/* 22000 configuration registers */
/*
* TFH Configuration register.
@@ -697,8 +697,8 @@ struct iwl_tfh_tb {
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
- * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
- * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
@@ -750,10 +750,10 @@ struct iwl_tfh_tfd {
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
- * For devices up to a000:
+ * For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For a000 and on:
+ * For 22000 and on:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 921cab9e2d73..c25ed1a0bbb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -551,7 +551,7 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
- /* a000 functions */
+ /* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
@@ -579,6 +579,7 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
+ void (*sw_reset)(struct iwl_trans *trans);
bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
void (*release_nic_access)(struct iwl_trans *trans,
unsigned long *flags);
@@ -744,7 +745,7 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
- const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
@@ -1124,6 +1125,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
+static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
+{
+ if (trans->ops->sw_reset)
+ trans->ops->sw_reset(trans);
+}
+
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index a47635c32c11..9ffd21918b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
-iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 79c80f181f7d..890dbfff3a06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -7,7 +7,6 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +33,6 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!iwl_mvm_has_new_ats_coex_api(mvm)) {
- struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data;
-
- mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0];
- mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1];
- mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2];
- mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3];
- mvm->last_bt_notif.msg_idx = v4->msg_idx;
- mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance;
- mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut;
- mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut;
- mvm->last_bt_notif.bt_activity_grading =
- v4->bt_activity_grading;
- mvm->last_bt_notif.ttc_status = v4->ttc_status;
- mvm->last_bt_notif.rrc_status = v4->rrc_status;
- } else {
- /* save this notification for future use: rssi fluctuations */
- memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
- }
-
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n",
- mvm->last_bt_notif.bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
- le32_to_cpu(mvm->last_bt_notif.primary_ch_lut));
+ le32_to_cpu(notif->primary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
- le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut));
+ le32_to_cpu(notif->secondary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading));
+ le32_to_cpu(notif->bt_activity_grading));
+ /* remember this notification for future use: rssi fluctuations */
+ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b1f73dcabd31..0e6cf39285f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
-enum iwl_mvm_tcp_packet_type {
- MVM_TCP_TX_SYN,
- MVM_TCP_RX_SYNACK,
- MVM_TCP_TX_DATA,
- MVM_TCP_RX_ACK,
- MVM_TCP_RX_WAKE,
- MVM_TCP_TX_FIN,
-};
-
-static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
-{
- __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
- return cpu_to_le16(be16_to_cpu((__force __be16)check));
-}
-
-static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
- struct cfg80211_wowlan_tcp *tcp,
- void *_pkt, u8 *mask,
- __le16 *pseudo_hdr_csum,
- enum iwl_mvm_tcp_packet_type ptype)
-{
- struct {
- struct ethhdr eth;
- struct iphdr ip;
- struct tcphdr tcp;
- u8 data[];
- } __packed *pkt = _pkt;
- u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
- int i;
-
- pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
- pkt->ip.version = 4;
- pkt->ip.ihl = 5;
- pkt->ip.protocol = IPPROTO_TCP;
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- case MVM_TCP_TX_DATA:
- case MVM_TCP_TX_FIN:
- memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
- memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
- pkt->ip.ttl = 128;
- pkt->ip.saddr = tcp->src;
- pkt->ip.daddr = tcp->dst;
- pkt->tcp.source = cpu_to_be16(tcp->src_port);
- pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
- /* overwritten for TX SYN later */
- pkt->tcp.doff = sizeof(struct tcphdr) / 4;
- pkt->tcp.window = cpu_to_be16(65000);
- break;
- case MVM_TCP_RX_SYNACK:
- case MVM_TCP_RX_ACK:
- case MVM_TCP_RX_WAKE:
- memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
- memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
- pkt->ip.saddr = tcp->dst;
- pkt->ip.daddr = tcp->src;
- pkt->tcp.source = cpu_to_be16(tcp->dst_port);
- pkt->tcp.dest = cpu_to_be16(tcp->src_port);
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- /* firmware assumes 8 option bytes - 8 NOPs for now */
- memset(pkt->data, 0x01, 8);
- ip_tot_len += 8;
- pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
- pkt->tcp.syn = 1;
- break;
- case MVM_TCP_TX_DATA:
- ip_tot_len += tcp->payload_len;
- memcpy(pkt->data, tcp->payload, tcp->payload_len);
- pkt->tcp.psh = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_TX_FIN:
- pkt->tcp.fin = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_SYNACK:
- pkt->tcp.syn = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_ACK:
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_WAKE:
- ip_tot_len += tcp->wake_len;
- pkt->tcp.psh = 1;
- pkt->tcp.ack = 1;
- memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
- break;
- }
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- case MVM_TCP_TX_DATA:
- case MVM_TCP_TX_FIN:
- pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
- pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
- break;
- case MVM_TCP_RX_WAKE:
- for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
- u8 tmp = tcp->wake_mask[i];
- mask[i + 6] |= tmp << 6;
- if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
- mask[i + 7] = tmp >> 2;
- }
- /* fall through for ethernet/IP/TCP headers mask */
- case MVM_TCP_RX_SYNACK:
- case MVM_TCP_RX_ACK:
- mask[0] = 0xff; /* match ethernet */
- /*
- * match ethernet, ip.version, ip.ihl
- * the ip.ihl half byte is really masked out by firmware
- */
- mask[1] = 0x7f;
- mask[2] = 0x80; /* match ip.protocol */
- mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
- mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
- mask[5] = 0x80; /* match tcp flags */
- /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
- break;
- };
-
- *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
- pkt->ip.saddr, pkt->ip.daddr);
-}
-
-static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_wowlan_tcp *tcp)
-{
- struct iwl_wowlan_remote_wake_config *cfg;
- struct iwl_host_cmd cmd = {
- .id = REMOTE_WAKE_CONFIG_CMD,
- .len = { sizeof(*cfg), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
-
- if (!tcp)
- return 0;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
- cmd.data[0] = cfg;
-
- cfg->max_syn_retries = 10;
- cfg->max_data_retries = 10;
- cfg->tcp_syn_ack_timeout = 1; /* seconds */
- cfg->tcp_ack_timeout = 1; /* seconds */
-
- /* SYN (TX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->syn_tx.data, NULL,
- &cfg->syn_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_SYN);
- cfg->syn_tx.info.tcp_payload_length = 0;
-
- /* SYN/ACK (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
- &cfg->synack_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_SYNACK);
- cfg->synack_rx.info.tcp_payload_length = 0;
-
- /* KEEPALIVE/ACK (TX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->keepalive_tx.data, NULL,
- &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_DATA);
- cfg->keepalive_tx.info.tcp_payload_length =
- cpu_to_le16(tcp->payload_len);
- cfg->sequence_number_offset = tcp->payload_seq.offset;
- /* length must be 0..4, the field is little endian */
- cfg->sequence_number_length = tcp->payload_seq.len;
- cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
- cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
- if (tcp->payload_tok.len) {
- cfg->token_offset = tcp->payload_tok.offset;
- cfg->token_length = tcp->payload_tok.len;
- cfg->num_tokens =
- cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
- memcpy(cfg->tokens, tcp->payload_tok.token_stream,
- tcp->tokens_size);
- } else {
- /* set tokens to max value to almost never run out */
- cfg->num_tokens = cpu_to_le16(65535);
- }
-
- /* ACK (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->keepalive_ack_rx.data,
- cfg->keepalive_ack_rx.rx_mask,
- &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_ACK);
- cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
-
- /* WAKEUP (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
- &cfg->wake_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_WAKE);
- cfg->wake_rx.info.tcp_payload_length =
- cpu_to_le16(tcp->wake_len);
-
- /* FIN */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->fin_tx.data, NULL,
- &cfg->fin_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_FIN);
- cfg->fin_tx.info.tcp_payload_length = 0;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- kfree(cfg);
-
- return ret;
-}
-
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
- if (ret)
- return ret;
-
- ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
- return ret;
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
}
static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 2ff594f11259..a7892c1254a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+ static const size_t bufsz = 2048;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+ lq_sta->pers.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "A-MPDU size limit %d\n",
+ lq_sta->pers.dbg_agg_frame_count_lim);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "valid_tx_ant %s%s%s\n",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "last tx rate=0x%X ",
+ lq_sta->last_rate_n_flags);
+
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+ lq_sta->last_rate_n_flags);
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
static
-int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
- struct iwl_bt_coex_profile_notif *notif, char *buf,
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
int pos, int bufsz)
{
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
@@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
BT_MBOX_PRINT(3, INBAND_P, false);
BT_MBOX_PRINT(3, MSG_TYPE_2, false);
BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm));
-
- if (iwl_mvm_has_new_ats_coex_api(mvm)) {
- BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false);
- BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true);
- }
+ BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
return pos;
}
@@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz);
+ pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
notif->bt_ci_compliance);
@@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM");
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
continue;
pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
(int)(ARRAY_SIZE(stats->last_rates) - i));
- pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+ pos += rs_pretty_print_rate(pos, endpos - pos,
+ stats->last_rates[idx]);
}
spin_unlock_bh(&mvm->drv_stats_lock);
@@ -1179,7 +1221,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
loff_t *ppos)
{
struct iwl_trans *trans = mvm->trans;
- const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
struct iwl_continuous_record_cmd cont_rec = {};
int ret, rec_mode;
@@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, sta, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
static ssize_t
iwl_dbgfs_prph_reg_read(struct file *file,
char __user *user_buf,
@@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
@@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = {
.llseek = default_llseek,
};
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_has_tlc_offload(mvm))
+ MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
+
+ return;
+err:
+ IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c0de7bb86cf7..0920be637b57 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
ret = iwl_run_init_mvm_ucode(mvm, false);
- if (iwlmvm_mod_params.init_dbg)
- return 0;
-
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
return ret;
}
@@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove a000 disablement when we have RXQ config API */
+ /* TODO - remove 22000 disablement when we have RXQ config API */
if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
+ mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- if (!iwlmvm_mod_params.init_dbg)
+ if (!iwlmvm_mod_params.init_dbg || !ret)
iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3e92a117c0b8..8aed40a8bc38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_PM_SLEEP
-static const struct nl80211_wowlan_tcp_data_token_feature
-iwl_mvm_wowlan_tcp_token_feature = {
- .min_len = 0,
- .max_len = 255,
- .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
-};
-
-static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
- .tok = &iwl_mvm_wowlan_tcp_token_feature,
- .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
- sizeof(struct ethhdr) -
- sizeof(struct iphdr) -
- sizeof(struct tcphdr),
- .data_interval_max = 65535, /* __le16 in API */
- .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
- sizeof(struct ethhdr) -
- sizeof(struct iphdr) -
- sizeof(struct tcphdr),
- .seq = true,
-};
-#endif
-
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
/*
* Use the reserved field to indicate magic values.
@@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ }
+
if (iwl_mvm_has_new_rx_api(mvm))
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* this is the case for CCK frames, it's better (only 8) for OFDM */
hw->radiotap_timestamp.accuracy = 22;
- hw->rate_control_algorithm = "iwl-mvm-rs";
+ if (!iwl_mvm_has_tlc_offload(mvm))
+ hw->rate_control_algorithm = RS_NAME;
+
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
- mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
hw->wiphy->wowlan = &mvm->wowlan;
}
#endif
@@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
duration, type);
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
flush_work(&mvm->roc_done_wk);
mutex_lock(&mvm->mutex);
@@ -3813,7 +3801,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
mvm->noa_duration = noa_duration;
mvm->noa_vif = vif;
- return iwl_mvm_update_quotas(mvm, false, NULL);
+ return iwl_mvm_update_quotas(mvm, true, NULL);
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
mvm->trans->num_rx_queues);
/* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
qmask = BIT(0);
if (notif->sync)
atomic_set(&mvm->queue_sync_counter, 1);
@@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 55ab5349dd40..2d28e0804218 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
{
/* TODO - better define this */
- return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
+ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1272,16 +1272,16 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_NEW_RX_STATS);
}
-static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
}
-static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
{
- return fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
}
static inline struct agg_tx_status *
@@ -1600,9 +1600,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, const u32 rate);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
+ struct iwl_mvm_sta *mvmsta,
struct ieee80211_rx_status *rx_status);
/* power management */
@@ -1882,5 +1882,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+#endif
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 45470b6b351a..5d525a0023dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void)
}
ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-
- if (ret) {
+ if (ret)
pr_err("Unable to register MVM op_mode: %d\n", ret);
- iwl_mvm_rate_control_unregister();
- }
return ret;
}
@@ -605,7 +602,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
- iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
+ iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+ dbgfs_dir);
mvm->init_status = 0;
@@ -750,7 +748,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
- if (!iwlmvm_mod_params.init_dbg)
+ if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
mutex_unlock(&mvm->mutex);
@@ -804,6 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
+ iwl_fw_runtime_exit(&mvm->fwrt);
iwl_fw_flush_dump(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
@@ -844,7 +843,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
-
+ iwl_fw_runtime_exit(&mvm->fwrt);
iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);
@@ -1021,6 +1020,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
+ iwl_mvm_tlc_update_notif(mvm, pkt);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index b4a0264329b7..690559bdf421 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -202,6 +202,10 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return 0;
+
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 000000000000..55d1274c6092
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+ }
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+ if (chains & ANT_C)
+ fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+
+ return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 supp = 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+
+ return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ bool vht_ena = vht_cap && vht_cap->vht_supported;
+ u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
+ IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
+ IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+ if (mvm->cfg->ht_params->ldpc &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
+
+ return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+
+ for (i = 0; i < sta->rx_nss; i++) {
+ if (i == MAX_RS_ANT_NUM)
+ break;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+ }
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ int i;
+ unsigned long tmp;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ /* non HT rates */
+ supp = 0;
+ tmp = sta->supp_rates[sband->band];
+ for_each_set_bit(i, &tmp, BITS_PER_LONG)
+ supp |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_supp_rates = cpu_to_le16(supp);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ /* HT/VHT rates */
+ if (vht_cap && vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+ } else if (ht_cap && ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
+{
+ u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
+ struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
+ .sta_id = sta_id,
+ .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
+ .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+ struct iwl_tlc_update_notif *notif;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_lq_sta_rs_fw *lq_sta;
+
+ notif = (void *)pkt->data;
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+
+ if (!mvmsta) {
+ IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ notif->sta_id);
+ return;
+ }
+
+ lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
+ lq_sta->last_rate_n_flags =
+ le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+ IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+ lq_sta->last_rate_n_flags);
+ }
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ struct ieee80211_supported_band *sband;
+ struct iwl_tlc_config_cmd cfg_cmd = {
+ .sta_id = mvmsta->sta_id,
+ .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+ .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+ .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+ .max_supp_ss = sta->rx_nss,
+ .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+ .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ };
+ int ret;
+
+ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+#endif
+ sband = hw->wiphy->bands[band];
+ rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+
+ rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+ return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ lq_sta->pers.drv = mvm;
+ lq_sta->pers.sta_id = mvmsta->sta_id;
+ lq_sta->pers.chains = 0;
+ memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
+ lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index c69515ed72df..60abb0084ee5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -42,8 +42,6 @@
#include "mvm.h"
#include "debugfs.h"
-#define RS_NAME "iwl-mvm-rs"
-
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
/* Calculations of success ratio are done in fixed point where 12800 is 100%.
@@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
return -EINVAL;
if (tbl->column != RS_COLUMN_INVALID) {
- struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+ struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
pers->tx_stats[tbl->column][scale_index].total += attempts;
pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1416,13 +1414,13 @@ done:
/*
* mac80211 sends us Tx status
*/
-static void rs_mac80211_tx_status(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+ struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
struct rs_rate *rate = &search_tbl->rate;
const struct rs_tx_column *column = &rs_tx_columns[col_id];
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
unsigned long rate_mask = 0;
u32 rate_idx = 0;
- memcpy(search_tbl, tbl, sz);
+ memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
rate->sgi = column->sgi;
rate->ant = column->ant;
@@ -2787,9 +2783,10 @@ out:
/* Save info about RSSI of last Rx */
void rs_update_last_rssi(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
+ struct iwl_mvm_sta *mvmsta,
struct ieee80211_rx_status *rx_status)
{
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
int i;
lq_sta->pers.chains = rx_status->chains;
@@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
}
-static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
- struct ieee80211_tx_rate_control *txrc)
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ void *mvm_sta,
+ struct ieee80211_tx_rate_control *txrc)
{
- struct sk_buff *skb = txrc->skb;
- struct iwl_op_mode *op_mode __maybe_unused =
- (struct iwl_op_mode *)mvm_r;
+ struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ struct sk_buff *skb = txrc->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct iwl_lq_sta *lq_sta;
struct rs_rate *optimal_rate;
u32 last_ucode_rate;
@@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
mvm_sta = NULL;
}
- /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->pers.drv) {
- IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
- mvm_sta = NULL;
- }
-
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, mvm_sta, txrc))
return;
+ if (!mvm_sta)
+ return;
+
+ lq_sta = mvm_sta;
iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
@@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
}
}
-static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
@@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
lq_sta->pers.last_rssi = S8_MIN;
- return &mvmsta->lq_sta;
+ return lq_sta;
}
static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
{
spin_lock_bh(&mvm->drv_stats_lock);
memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
@@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
/*
* Called after adding a new station to initialize rate scaling
*/
-void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band, bool init)
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
struct ieee80211_supported_band *sband;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
@@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
-static void rs_rate_update(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
+static void rs_drv_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta,
+ void *priv_sta, u32 changed)
{
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
u8 tid;
- struct iwl_op_mode *op_mode =
- (struct iwl_op_mode *)mvm_r;
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
return;
@@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data,
{
struct rs_bfer_active_iter_data *data = _data;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
if (sta == data->exclude_sta)
@@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
/* Disallow BFER on another STA if active and we're a higher priority */
if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
- struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *bfersta_lq_cmd =
+ &bfer_mvmsta->lq_sta.rs_drv.lq;
u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
+
/* rate scale requires free function to be implemented */
static void rs_free(void *mvm_rate)
{
return;
}
-static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
- void *mvm_sta)
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
{
struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
@@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type, *bw;
@@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
!(rate & RATE_MCS_VHT_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
- return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
- rs_pretty_ant(ant),
- index == IWL_RATE_INVALID ? "BAD" :
- iwl_rate_mcs[index].mbps);
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs[index].mbps);
}
if (rate & RATE_MCS_VHT_MSK) {
@@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
bw = "BAD BW";
}
- return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
- type, rs_pretty_ant(ant), bw, mcs, nss,
- (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
- (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
- (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+ return scnprintf(buf, bufsz,
+ "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
/**
@@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
int desc = 0;
int i = 0;
ssize_t ret;
+ static const size_t bufsz = 2048;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm_sta *mvmsta =
- container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+ container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate;
u32 ss_params;
mvm = lq_sta->pers.drv;
- buff = kmalloc(2048, GFP_KERNEL);
+ buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->pers.dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "failed=%d success=%d rate=0%lX\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(rate)) ? "legacy" :
- is_vht(rate) ? "VHT" : "HT");
+ desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
if (!is_legacy(rate)) {
- desc += sprintf(buff + desc, " %s",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
(is_siso(rate)) ? "SISO" : "MIMO2");
- desc += sprintf(buff + desc, " %s",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
(is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" :
(is_ht80(rate)) ? "80MHz" :
(is_ht160(rate)) ? "160MHz" : "BAD BW");
- desc += sprintf(buff + desc, " %s %s %s %s\n",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
(lq_sta->is_agg) ? "AGG on" : "",
(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
}
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
lq_sta->lq.flags,
lq_sta->lq.mimo_delim,
lq_sta->lq.single_stream_ant_msk,
lq_sta->lq.dual_stream_ant_msk);
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
le16_to_cpu(lq_sta->lq.agg_time_limit),
lq_sta->lq.agg_disable_start_th,
lq_sta->lq.agg_frame_cnt_limit);
- desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+ desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+ lq_sta->lq.reduced_tpc);
ss_params = le32_to_cpu(lq_sta->lq.ss_params);
- desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "single stream params: %s%s%s%s\n",
(ss_params & LQ_SS_PARAMS_VALID) ?
"VALID" : "INVALID",
(ss_params & LQ_SS_BFER_ALLOWED) ?
@@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
", STBC" : "",
(ss_params & LQ_SS_FORCE) ?
", FORCE" : "");
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
lq_sta->lq.initial_rate_index[1],
@@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
- desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
- desc += rs_pretty_print_rate(buff+desc, r);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
-static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+ struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_mvm_sta *mvmsta;
- mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+ mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
if (!mvmsta->vif)
return;
@@ -4014,7 +4015,7 @@ err:
IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
}
-static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
{
}
#endif
@@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
-static void rs_rate_init_stub(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *mvm_sta)
+static void rs_rate_init_ops(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static const struct rate_control_ops rs_mvm_ops = {
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
.name = RS_NAME,
- .tx_status = rs_mac80211_tx_status,
- .get_rate = rs_get_rate,
- .rate_init = rs_rate_init_stub,
+ .tx_status = rs_drv_mac80211_tx_status,
+ .get_rate = rs_drv_get_rate,
+ .rate_init = rs_rate_init_ops,
.alloc = rs_alloc,
.free = rs_free,
- .alloc_sta = rs_alloc_sta,
+ .alloc_sta = rs_drv_alloc_sta,
.free_sta = rs_free_sta,
- .rate_update = rs_rate_update,
+ .rate_update = rs_drv_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = rs_add_debugfs,
- .remove_sta_debugfs = rs_remove_debugfs,
+ .add_sta_debugfs = rs_drv_add_sta_debugfs,
+ .remove_sta_debugfs = rs_remove_sta_debugfs,
#endif
};
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool init)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ rs_fw_rate_init(mvm, sta, band);
+ else
+ rs_drv_rate_init(mvm, sta, band, init);
+}
+
int iwl_mvm_rate_control_register(void)
{
- return ieee80211_rate_control_register(&rs_mvm_ops);
+ return ieee80211_rate_control_register(&rs_mvm_ops_drv);
}
void iwl_mvm_rate_control_unregister(void)
{
- ieee80211_rate_control_unregister(&rs_mvm_ops);
+ ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
}
-/**
- * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this request and previous requests,
- * and send the LQ command.
- * @mvmsta: The station
- * @enable: Enable Tx protection?
- */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
- bool enable)
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
{
- struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
lockdep_assert_held(&mvm->mutex);
@@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
return iwl_mvm_send_lq_cmd(mvm, lq, false);
}
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ return rs_fw_tx_protection(mvm, mvmsta, enable);
+ else
+ return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 32b4d66debea..fb18cb8c233d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -36,6 +36,8 @@
#include "fw-api.h"
#include "iwl-trans.h"
+#define RS_NAME "iwl-mvm-rs"
+
struct iwl_rs_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
@@ -218,6 +220,38 @@ struct iwl_rate_mcs_info {
};
/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+
+ /* persistent fields - initialized only once - keep last! */
+ struct lq_sta_pers_rs_fw {
+ u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ u32 dbg_fixed_rate;
+ u16 dbg_agg_frame_count_lim;
+#endif
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
+ struct iwl_mvm *drv;
+ } pers;
+};
+
+/**
* struct iwl_rate_scale_data -- tx success history for one rate
*/
struct iwl_rate_scale_data {
@@ -407,4 +441,18 @@ struct iwl_mvm_sta;
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d1a40688d5e1..d26833c5ce1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -222,7 +222,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */
- if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */
@@ -383,7 +385,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
false);
}
- rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
@@ -439,7 +441,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->bw = RATE_INFO_BW_160;
break;
}
- if (rate_n_flags & RATE_MCS_SGI_MSK)
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 3b8d44361380..a3f7c1bf3cc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -261,7 +261,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return 0;
case IWL_RX_MPDU_STATUS_SEC_TKIP:
/* Don't drop the frame and decrypt it in SW */
- if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
@@ -943,7 +945,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
false);
}
- rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
@@ -1020,7 +1022,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->bw = RATE_INFO_BW_160;
break;
}
- if (rate_n_flags & RATE_MCS_SGI_MSK)
+
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e4fd476e9ccb..356b16f40e78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
return newpos;
}
+#define WFA_TPC_IE_LEN 9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
static void
iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies,
@@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
memcpy(pos, ies->common_ies, ies->common_ie_len);
params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
- params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+ iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+ } else {
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+ }
}
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
@@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
- if (iwl_mvm_rrm_scan_needed(mvm))
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
if (params->pass_all)
@@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
}
- if (iwl_mvm_rrm_scan_needed(mvm))
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
if (params->pass_all)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 1add5615fc3a..6b2674e02606 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
goto err;
}
+ /*
+ * if rs is registered with mac80211, then "add station" will be handled
+ * via the corresponding ops, otherwise need to notify rate scaling here
+ */
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
if (ret)
@@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
}
/*
- * For a000 firmware and on we cannot add queue to a station unknown
+ * For 22000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm))
@@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
/*
- * For a000 firmware and on we cannot add queue to a station unknown
+ * For 22000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/*
* Enable cab queue after the ADD_STA command is sent.
- * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+ * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
* command with unknown station id, and for FW that doesn't support
* station API since the cab queue is not included in the
* tfd_queue_mask.
@@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->next_reclaimed);
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
@@ -2575,6 +2582,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.aggregate = true,
};
+ /*
+ * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+ * manager, so this function should never be called in this case.
+ */
+ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
@@ -2672,12 +2686,12 @@ out:
*/
mvmsta->max_agg_bufsize =
min(mvmsta->max_agg_bufsize, buf_size);
- mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+ mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
}
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
@@ -3615,7 +3629,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
if (mvm->trans->cfg->gen2)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index aedabe101cf0..5ffd6adbc383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
* @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ * the driver - %rs_drv or in the FW - %rs_fw.
* @reserved_queue: the queue reserved for this STA for DQA purposes
* Every STA has is given one reserved queue to allow it to operate. If no
* such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
u8 tid_to_baid[IWL_MAX_TID_COUNT];
- struct iwl_lq_sta lq_sta;
+ union {
+ struct iwl_lq_sta_rs_fw rs_fw;
+ struct iwl_lq_sta rs_drv;
+ } lq_sta;
struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e25cda9fbf6c..200ab50ec86b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
void iwl_mvm_roc_done_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
- u32 queues = 0;
/*
* Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
@@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
- queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
- }
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- queues |= BIT(mvm->aux_queue);
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
- }
synchronize_net();
@@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
- /*
- * Flush the done work, just in case it's still pending, so that
- * the work it does can complete and we can accept new frames.
- */
- flush_work(&mvm->roc_done_wk);
-
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 333bcb75b8af..dda77b327c98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -888,10 +888,9 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
+ * In case of GSO the first packet may have been split, so don't warn.
*/
- if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
- "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
- skb_queue_len(deferred_tx_frames))) {
+ if (skb_queue_len(deferred_tx_frames) == 1) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
@@ -1132,7 +1131,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
}
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
@@ -1624,7 +1623,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
- tid >= IWL_MAX_TID_COUNT,
+ tid > IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return;
@@ -1679,7 +1678,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
@@ -1719,8 +1718,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
ba_info->band = chanctx_conf->def.chan->band;
iwl_mvm_hwrate_to_tx_status(rate, ba_info);
- IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ if (!iwl_mvm_has_tlc_offload(mvm)) {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "No reclaim. Update rs directly\n");
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ }
}
out:
@@ -1771,8 +1773,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
+ tid = ba_tfd->tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
mvmsta->tid_data[i].lq_color = lq_color;
- iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 03ffd84786ca..d65e1db7c097 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
u8 ind = last_idx;
int i;
- for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
- ind = (ind + 1) % RATE_MCS_ANT_NUM;
+ for (i = 0; i < MAX_RS_ANT_NUM; i++) {
+ ind = (ind + 1) % MAX_RS_ANT_NUM;
if (valid & BIT(ind))
return ind;
}
@@ -516,8 +516,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- usleep_range(5000, 6000);
+ iwl_trans_sw_reset(trans);
/* set INIT_DONE flag */
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -595,6 +594,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ IWL_ERR(mvm,
+ "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+ return;
+ }
+
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
if (mvm->error_event_table[1])
@@ -906,7 +911,8 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, },
};
- if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
+ if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+ iwl_mvm_has_tlc_offload(mvm)))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1025,12 +1031,34 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int res;
+ bool low_latency;
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_vif_low_latency(mvmvif) == prev)
+ low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+ if (low_latency == prev)
return 0;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mvmvif->id)
+ };
+
+ if (low_latency) {
+ /* currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+ res = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(LOW_LATENCY_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (res)
+ IWL_ERR(mvm, "Failed to send low latency command\n");
+ }
+
res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
return res;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ccd7c33c4c28..56fc28750a41 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
-/* a000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
+/* 22000 Series */
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
+ if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
@@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rf_id_chp == jf_chp_id) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwla000_2ax_cfg_qnj_jf_b0;
+ cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
else
- cfg = &iwla000_2ac_cfg_jf;
+ cfg = &iwl22000_2ac_cfg_jf;
} else if (rf_id_chp == hr_chp_id) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwla000_2ax_cfg_qnj_hr_a0;
+ cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
else
- cfg = &iwla000_2ac_cfg_hr;
+ cfg = &iwl22000_2ac_cfg_hr;
}
iwl_trans->cfg = cfg;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..ca3b64ff4dad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -658,23 +658,20 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline void iwl_pcie_sw_reset(struct iwl_trans *trans)
-{
- /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- usleep_range(5000, 6000);
-}
-
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
- return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
- idx);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->cfg->use_tfh)
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+
+ return txq->tfds + trans_pcie->tfd_size * idx;
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ac05fd1e74c4..cb4012541f45 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -137,7 +137,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@@ -192,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 4541c86881d6..b406b536c850 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -176,6 +176,13 @@ out:
kfree(buf);
}
+static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
+{
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(5000, 6000);
+}
+
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -446,7 +453,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Set "initialization complete" bit to move adapter from
@@ -487,7 +494,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -580,7 +587,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
return;
}
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@@ -915,14 +922,9 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
int i;
- if (dest->version)
- IWL_ERR(trans,
- "DBG DEST version is %d - expect issues\n",
- dest->version);
-
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
@@ -1270,7 +1272,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -1744,7 +1746,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err;
}
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans);
if (err)
@@ -2816,8 +2818,17 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
* Update pointers to reflect actual values after
* shifting
*/
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ if (trans->dbg_dest_tlv->version) {
+ base = (iwl_read_prph(trans, base) &
+ IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg_dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->cfg->smem_offset;
+ } else {
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ }
+
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
@@ -2865,21 +2876,36 @@ static struct iwl_trans_dump_data
trans_pcie->fw_mon_size;
monitor_len = trans_pcie->fw_mon_size;
} else if (trans->dbg_dest_tlv) {
- u32 base, end;
+ u32 base, end, cfg_reg;
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+ if (trans->dbg_dest_tlv->version == 1) {
+ cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ cfg_reg = iwl_read_prph(trans, cfg_reg);
+ base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg_dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->cfg->smem_offset;
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
- end = iwl_read_prph(trans, end) <<
- trans->dbg_dest_tlv->end_shift;
+ monitor_len =
+ (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
+ trans->dbg_dest_tlv->end_shift;
+ monitor_len *= IWL_M2S_UNIT_SIZE;
+ } else {
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
- /* Make "end" point to the actual end */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
- trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
- end += (1 << trans->dbg_dest_tlv->end_shift);
- monitor_len = end - base;
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg_dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000 ||
+ trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
+ end += (1 << trans->dbg_dest_tlv->end_shift);
+ monitor_len = end - base;
+ }
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
monitor_len;
} else {
@@ -3025,6 +3051,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.write_mem = iwl_trans_pcie_write_mem, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
+ .sw_reset = iwl_trans_pcie_sw_reset, \
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
.release_nic_access = iwl_trans_pcie_release_nic_access, \
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \
@@ -3250,9 +3277,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0;
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
else
- trans->cfg = &iwla000_2ac_cfg_hr;
+ trans->cfg = &iwl22000_2ac_cfg_hr;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
lockdep_assert_held(&txq->lock);
iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_pcie_get_tfd(trans_pcie, txq, idx));
+ iwl_pcie_get_tfd(trans, txq, idx));
/* free SKB */
if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, idx);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
bool amsdu;
int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i, num_tbs;
- void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
+ void *tfd = iwl_pcie_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
- tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e8189c07b41f..1cf22e62e3dd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,7 @@
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -489,6 +490,8 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
+static struct workqueue_struct *hwsim_wq;
+static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@@ -499,6 +502,7 @@ static struct platform_driver mac80211_hwsim_driver = {
struct mac80211_hwsim_data {
struct list_head list;
+ struct rhash_head rht;
struct ieee80211_hw *hw;
struct device *dev;
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
@@ -573,6 +577,13 @@ struct mac80211_hwsim_data {
u64 tx_failed;
};
+static const struct rhashtable_params hwsim_rht_params = {
+ .nelem_hint = 2,
+ .automatic_shrinking = true,
+ .key_len = ETH_ALEN,
+ .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]),
+ .head_offset = offsetof(struct mac80211_hwsim_data, rht),
+};
struct hwsim_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -728,16 +739,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
val != PS_MANUAL_POLL)
return -EINVAL;
- old_ps = data->ps;
- data->ps = val;
-
- local_bh_disable();
if (val == PS_MANUAL_POLL) {
+ if (data->ps != PS_ENABLED)
+ return -EINVAL;
+ local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ local_bh_enable();
+ return 0;
+ }
+ old_ps = data->ps;
+ data->ps = val;
+
+ local_bh_disable();
+ if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps, data);
@@ -1003,6 +1019,36 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
return res;
}
+static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
+{
+ u16 result = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS;
+ if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT;
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE;
+ if (rate->flags & IEEE80211_TX_RC_MCS)
+ result |= MAC80211_HWSIM_TX_RC_MCS;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH;
+ if (rate->flags & IEEE80211_TX_RC_DUP_DATA)
+ result |= MAC80211_HWSIM_TX_RC_DUP_DATA;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= MAC80211_HWSIM_TX_RC_SHORT_GI;
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS)
+ result |= MAC80211_HWSIM_TX_RC_VHT_MCS;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH;
+ if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH;
+
+ return result;
+}
+
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
int dst_portid)
@@ -1015,6 +1061,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
unsigned int hwsim_flags = 0;
int i;
struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
+ struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES];
uintptr_t cookie;
if (data->ps != PS_DISABLED)
@@ -1066,7 +1113,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
tx_attempts[i].idx = info->status.rates[i].idx;
+ tx_attempts_flags[i].idx = info->status.rates[i].idx;
tx_attempts[i].count = info->status.rates[i].count;
+ tx_attempts_flags[i].flags =
+ trans_tx_rate_flags_ieee2hwsim(
+ &info->status.rates[i]);
}
if (nla_put(skb, HWSIM_ATTR_TX_INFO,
@@ -1074,6 +1125,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
tx_attempts))
goto nla_put_failure;
+ if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS,
+ sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES,
+ tx_attempts_flags))
+ goto nla_put_failure;
+
/* We create a cookie to identify this skb */
data->pending_cookie++;
cookie = data->pending_cookie;
@@ -2726,6 +2782,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
+ err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
+ if (err < 0) {
+ pr_debug("mac80211_hwsim: radio index %d already present\n",
+ idx);
+ spin_unlock_bh(&hwsim_radio_lock);
+ goto failed_final_insert;
+ }
+
list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock);
@@ -2734,6 +2799,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
return idx;
+failed_final_insert:
+ debugfs_remove_recursive(data->debugfs);
+ ieee80211_unregister_hw(data->hw);
failed_hw:
device_release_driver(data->dev);
failed_bind:
@@ -2869,22 +2937,9 @@ static void hwsim_mon_setup(struct net_device *dev)
static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
{
- struct mac80211_hwsim_data *data;
- bool _found = false;
-
- spin_lock_bh(&hwsim_radio_lock);
- list_for_each_entry(data, &hwsim_radios, list) {
- if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
- _found = true;
- break;
- }
- }
- spin_unlock_bh(&hwsim_radio_lock);
-
- if (!_found)
- return NULL;
-
- return data;
+ return rhashtable_lookup_fast(&hwsim_radios_rht,
+ addr,
+ hwsim_rht_params);
}
static void hwsim_register_wmediumd(struct net *net, u32 portid)
@@ -2969,7 +3024,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
txi->status.rates[i].idx = tx_attempts[i].idx;
txi->status.rates[i].count = tx_attempts[i].count;
- /*txi->status.rates[i].flags = 0;*/
}
txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -3120,6 +3174,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
+ GENL_SET_ERR_MSG(info, "too many channels specified");
+ return -EINVAL;
+ }
+
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
@@ -3144,8 +3203,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
+ kfree(hwname);
return -EINVAL;
+ }
param.regd = hwsim_world_regdom_custom[idx];
}
@@ -3186,6 +3247,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
continue;
list_del(&data->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
@@ -3341,8 +3404,10 @@ static void remove_user_radios(u32 portid)
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
list_del(&entry->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
+ hwsim_rht_params);
INIT_WORK(&entry->destroy_work, destroy_radio);
- schedule_work(&entry->destroy_work);
+ queue_work(hwsim_wq, &entry->destroy_work);
}
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -3416,8 +3481,10 @@ static void __net_exit hwsim_exit_net(struct net *net)
continue;
list_del(&data->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
INIT_WORK(&data->destroy_work, destroy_radio);
- schedule_work(&data->destroy_work);
+ queue_work(hwsim_wq, &data->destroy_work);
}
spin_unlock_bh(&hwsim_radio_lock);
}
@@ -3449,6 +3516,11 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
+ hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+ if (!hwsim_wq)
+ return -ENOMEM;
+ rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
+
err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
@@ -3587,8 +3659,12 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
+ flush_workqueue(hwsim_wq);
+
+ rhashtable_destroy(&hwsim_radios_rht);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
+ destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 3f5eda591dba..a96a79c1eff5 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -64,7 +64,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
- * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * %HWSIM_ATTR_TX_INFO, %WSIM_ATTR_TX_INFO_FLAGS,
+ * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
* @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
* returns the radio ID (>= 0) or negative on errors, if successful
* then multicast the result
@@ -123,6 +124,8 @@ enum {
* @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666
* @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio.
* @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
+ * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding
+ * rates of %HWSIM_ATTR_TX_INFO
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -149,6 +152,7 @@ enum {
HWSIM_ATTR_NO_VIF,
HWSIM_ATTR_FREQ,
HWSIM_ATTR_PAD,
+ HWSIM_ATTR_TX_INFO_FLAGS,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
@@ -171,4 +175,66 @@ struct hwsim_tx_rate {
u8 count;
} __packed;
+/**
+ * enum hwsim_tx_rate_flags - per-rate flags set by the rate control algorithm.
+ * Inspired by structure mac80211_rate_control_flags. New flags may be
+ * appended, but old flags not deleted, to keep compatibility for
+ * userspace.
+ *
+ * These flags are set by the Rate control algorithm for each rate during tx,
+ * in the @flags member of struct ieee80211_tx_rate.
+ *
+ * @MAC80211_HWSIM_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate.
+ * @MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required.
+ * This is set if the current BSS requires ERP protection.
+ * @MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
+ * @MAC80211_HWSIM_TX_RC_MCS: HT rate.
+ * @MAC80211_HWSIM_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is
+ * split into a higher 4 bits (Nss) and lower 4 bits (MCS number)
+ * @MAC80211_HWSIM_TX_RC_GREEN_FIELD: Indicates whether this rate should be used
+ * in Greenfield mode.
+ * @MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be
+ * 40 MHz.
+ * @MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ * (80+80 isn't supported yet)
+ * @MAC80211_HWSIM_TX_RC_DUP_DATA: The frame should be transmitted on both of
+ * the adjacent 20 MHz channels, if the current channel type is
+ * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
+ * @MAC80211_HWSIM_TX_RC_SHORT_GI: Short Guard interval should be used for this
+ * rate.
+ */
+enum hwsim_tx_rate_flags {
+ MAC80211_HWSIM_TX_RC_USE_RTS_CTS = BIT(0),
+ MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT = BIT(1),
+ MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE = BIT(2),
+
+ /* rate index is an HT/VHT MCS instead of an index */
+ MAC80211_HWSIM_TX_RC_MCS = BIT(3),
+ MAC80211_HWSIM_TX_RC_GREEN_FIELD = BIT(4),
+ MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH = BIT(5),
+ MAC80211_HWSIM_TX_RC_DUP_DATA = BIT(6),
+ MAC80211_HWSIM_TX_RC_SHORT_GI = BIT(7),
+ MAC80211_HWSIM_TX_RC_VHT_MCS = BIT(8),
+ MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH = BIT(9),
+ MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH = BIT(10),
+};
+
+/**
+ * struct hwsim_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate and number of retries used.
+ *
+ */
+struct hwsim_tx_rate_flag {
+ s8 idx;
+ u16 flags;
+} __packed;
#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6e0d9a9c5cfb..ce4432c535f0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
+ if (priv->scan_request) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "change virtual interface: scan in process\n");
+ return -EBUSY;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
@@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
switch (type) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index dcc529e9c0ef..874660052055 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
+ /* Setup the timer after transmit command, except that specific
+ * command might not have command response.
+ */
+ if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT)
+ mod_timer(&adapter->cmd_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
- /* Setup the timer after transmit command */
- mod_timer(&adapter->cmd_timer,
- jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
-
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 6f4239be609d..db2872daae97 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf,
{
struct mwifiex_private *priv = file->private_data;
- if (!priv->adapter->if_ops.device_dump)
- return -EIO;
-
- priv->adapter->if_ops.device_dump(priv->adapter);
+ /* For command timeouts, USB firmware will automatically emit
+ * firmware dump events, so we don't implement device_dump().
+ * For user-initiated dumps, we trigger it ourselves.
+ */
+ if (priv->adapter->iface_type == MWIFIEX_USB)
+ mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
+ else
+ priv->adapter->if_ops.device_dump(priv->adapter);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 13cd58e963b3..9c2cdef54074 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -56,6 +56,15 @@ struct mwifiex_fw_data {
u8 data[1];
} __packed;
+struct mwifiex_fw_dump_header {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 type;
+ __le16 len;
+} __packed;
+
+#define FW_DUMP_INFO_ENDED 0x0002
+
#define MWIFIEX_FW_DNLD_CMD_1 0x1
#define MWIFIEX_FW_DNLD_CMD_5 0x5
#define MWIFIEX_FW_DNLD_CMD_6 0x6
@@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_TDLS_CONFIG 0x0100
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
+#define HostCmd_CMD_FW_DUMP_EVENT 0x0125
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
@@ -570,6 +580,7 @@ enum mwifiex_channel_flags {
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
+#define EVENT_FW_DUMP_INFO 0x00000073
#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index e1aa86042469..d239e9248c05 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
+static void fw_dump_timer_fn(struct timer_list *t)
+{
+ struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+
+ mwifiex_upload_device_dump(adapter);
+}
+
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
+ adapter->devdump_len = 0;
+ timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
}
/*
@@ -396,6 +405,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
+ del_timer_sync(&adapter->devdump_timer);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a96bd7e653bf..12e739950332 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
}
EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
{
- void *p;
+ /* Dump all the memory data into single file, a userspace script will
+ * be used to split all the memory data to multiple files
+ */
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump start\n");
+ dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len,
+ GFP_KERNEL);
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump end\n");
+
+ /* Device dump data will be freed in device coredump release function
+ * after 5 min. Here reset adapter->devdump_data and ->devdump_len
+ * to avoid it been accidentally reused.
+ */
+ adapter->devdump_data = NULL;
+ adapter->devdump_len = 0;
+}
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+{
+ char *p;
char drv_version[64];
struct usb_card_rec *cardp;
struct sdio_mmc_card *sdio_card;
@@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
int i, idx;
struct netdev_queue *txq;
struct mwifiex_debug_info *debug_info;
- void *drv_info_dump;
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
- /* memory allocate here should be free in mwifiex_upload_device_dump*/
- drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
-
- if (!drv_info_dump)
- return 0;
-
- p = (char *)(drv_info_dump);
+ p = adapter->devdump_data;
+ strcpy(p, "========Start dump driverinfo========\n");
+ p += strlen("========Start dump driverinfo========\n");
p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
kfree(debug_info);
}
+ strcpy(p, "\n========End dump========\n");
+ p += strlen("\n========End dump========\n");
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
- *drv_info = drv_info_dump;
- return p - drv_info_dump;
+ adapter->devdump_len = p - (char *)adapter->devdump_data;
}
EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
- int drv_info_size)
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter)
{
- u8 idx, *dump_data, *fw_dump_ptr;
- u32 dump_len;
-
- dump_len = (strlen("========Start dump driverinfo========\n") +
- drv_info_size +
- strlen("\n========End dump========\n"));
+ u8 idx;
+ char *fw_dump_ptr;
+ u32 dump_len = 0;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
- dump_data = vzalloc(dump_len + 1);
- if (!dump_data)
- goto done;
-
- fw_dump_ptr = dump_data;
+ if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) {
+ /* Realloc in case buffer overflow */
+ fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len);
+ mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n");
+ if (!fw_dump_ptr) {
+ vfree(adapter->devdump_data);
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
- /* Dump all the memory data into single file, a userspace script will
- * be used to split all the memory data to multiple files
- */
- mwifiex_dbg(adapter, MSG,
- "== mwifiex dump information to /sys/class/devcoredump start");
+ memmove(fw_dump_ptr, adapter->devdump_data,
+ adapter->devdump_len);
+ vfree(adapter->devdump_data);
+ adapter->devdump_data = fw_dump_ptr;
+ }
- strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
- fw_dump_ptr += strlen("========Start dump driverinfo========\n");
- memcpy(fw_dump_ptr, drv_info, drv_info_size);
- fw_dump_ptr += drv_info_size;
- strcpy(fw_dump_ptr, "\n========End dump========\n");
- fw_dump_ptr += strlen("\n========End dump========\n");
+ fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
- /* device dump data will be free in device coredump release function
- * after 5 min
- */
- dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
- mwifiex_dbg(adapter, MSG,
- "== mwifiex dump information to /sys/class/devcoredump end");
+ adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data;
-done:
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
&adapter->mem_type_mapping_tbl[idx];
@@ -1241,10 +1248,8 @@ done:
entry->mem_ptr = NULL;
entry->mem_size = 0;
}
-
- vfree(drv_info);
}
-EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info);
/*
* CFG802.11 network device handler for statistics retrieval.
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 154c0796c0c5..6b5539b1f4d8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -94,6 +94,8 @@ enum {
#define MAX_EVENT_SIZE 2048
+#define MWIFIEX_FW_DUMP_SIZE (2 * 1024 * 1024)
+
#define ARP_FILTER_MAX_BUF_SIZE 68
#define MWIFIEX_KEY_BUFFER_SIZE 16
@@ -1032,6 +1034,10 @@ struct mwifiex_adapter {
bool wake_by_wifi;
/* Aggregation parameters*/
struct bus_aggr_params bus_aggr;
+ /* Device dump data/length */
+ void *devdump_data;
+ int devdump_len;
+ struct timer_list devdump_timer;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1656,9 +1662,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
- int drv_info_size);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1677,6 +1683,7 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
int mwifiex_set_mac_address(struct mwifiex_private *priv,
struct net_device *dev);
+void mwifiex_devdump_tmo_func(unsigned long function_context);
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index cd314946452c..97a6199692ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2769,19 +2769,27 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
{
- int drv_info_size;
- void *drv_info;
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
- drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+ mwifiex_drv_info_dump(adapter);
mwifiex_pcie_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+ mwifiex_prepare_fw_dump_info(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- pci_reset_function(card->dev);
+ /* We can't afford to wait here; remove() might be waiting on us. If we
+ * can't grab the device lock, maybe we'll get another chance later.
+ */
+ pci_try_reset_function(card->dev);
}
static void mwifiex_pcie_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fd5183c10c4e..a82880132af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2505,15 +2505,21 @@ done:
static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- int drv_info_size;
- void *drv_info;
- drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
+
+ mwifiex_drv_info_dump(adapter);
if (card->fw_dump_enh)
mwifiex_sdio_generic_fw_dump(adapter);
else
mwifiex_sdio_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+ mwifiex_prepare_fw_dump_info(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_sdio_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index fb090144a6d8..211e47d8b318 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -2206,6 +2206,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_CHAN_REGION_CFG:
ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
break;
+ case HostCmd_CMD_FW_DUMP_EVENT:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ cmd_ptr->size = cpu_to_le16(S_DS_GEN);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index d8db412b76c6..93dfb76cd8a6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -584,6 +584,62 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
adapter->coex_rx_win_size);
}
+static void
+mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_fw_dump_header *fw_dump_hdr =
+ (void *)adapter->event_body;
+
+ if (adapter->iface_type != MWIFIEX_USB) {
+ mwifiex_dbg(adapter, MSG,
+ "event is not on usb interface, ignore it\n");
+ return;
+ }
+
+ if (!adapter->devdump_data) {
+ /* When receive the first event, allocate device dump
+ * buffer, dump driver info.
+ */
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
+
+ mwifiex_drv_info_dump(adapter);
+
+ /* If no proceeded event arrive in 10s, upload device
+ * dump data, this will be useful if the end of
+ * transmission event get lost, in this cornel case,
+ * user would still get partial of the dump.
+ */
+ mod_timer(&adapter->devdump_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ }
+
+ /* Overflow check */
+ if (adapter->devdump_len + event_skb->len >= MWIFIEX_FW_DUMP_SIZE)
+ goto upload_dump;
+
+ memmove(adapter->devdump_data + adapter->devdump_len,
+ adapter->event_skb->data, event_skb->len);
+ adapter->devdump_len += event_skb->len;
+
+ if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+ mwifiex_dbg(adapter, MSG,
+ "receive end of transmission flag event!\n");
+ goto upload_dump;
+ }
+ return;
+
+upload_dump:
+ del_timer_sync(&adapter->devdump_timer);
+ mwifiex_upload_device_dump(adapter);
+}
+
/*
* This function handles events generated by firmware.
*
@@ -636,6 +692,7 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
* - EVENT_DELBA
* - EVENT_BA_STREAM_TIEMOUT
* - EVENT_AMSDU_AGGR_CTRL
+ * - EVENT_FW_DUMP_INFO
*/
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
@@ -1007,6 +1064,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->event_skb->len -
sizeof(eventcause));
break;
+ case EVENT_FW_DUMP_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: firmware debug info\n");
+ mwifiex_fw_dump_info_event(priv, adapter->event_skb);
+ break;
/* Debugging event; not used, but let's not print an ERROR for it. */
case EVENT_UNKNOWN_DEBUG:
mwifiex_dbg(adapter, EVENT, "event: debug\n");
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index e813b2ca740c..8e4e9b6919e0 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -199,7 +199,7 @@ struct mwl8k_priv {
struct ieee80211_channel channels_24[14];
struct ieee80211_rate rates_24[13];
struct ieee80211_supported_band band_50;
- struct ieee80211_channel channels_50[4];
+ struct ieee80211_channel channels_50[9];
struct ieee80211_rate rates_50[8];
u32 ap_macids_supported;
u32 sta_macids_supported;
@@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = {
{ .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
{ .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
{ .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
index 28843fed750a..92ce4062f307 100644
--- a/drivers/net/wireless/mediatek/Kconfig
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK
if WLAN_VENDOR_MEDIATEK
source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
endif # WLAN_VENDOR_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
index 9d5f182fd7fd..00f945f59b38 100644
--- a/drivers/net/wireless/mediatek/Makefile
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MT7601U) += mt7601u/
+obj-$(CONFIG_MT76_CORE) += mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 000000000000..fc05d79c80d0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,10 @@
+config MT76_CORE
+ tristate
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ ---help---
+ This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 000000000000..a0156bc01dea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+
+mt76-y := \
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt76x2e-y := \
+ mt76x2_pci.o mt76x2_dma.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_dfs.o mt76x2_trace.o
+
+CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
new file mode 100644
index 000000000000..8027bb7c03c2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+#define REORDER_TIMEOUT (HZ / 10)
+
+static void
+mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
+{
+ struct sk_buff *skb;
+
+ tid->head = ieee80211_sn_inc(tid->head);
+
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ return;
+
+ tid->reorder_buf[idx] = NULL;
+ tid->nframes--;
+ __skb_queue_tail(frames, skb);
+}
+
+static void
+mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
+ u16 head)
+{
+ int idx;
+
+ while (ieee80211_sn_less(tid->head, head)) {
+ idx = tid->head % tid->size;
+ mt76_aggr_release(tid, frames, idx);
+ }
+}
+
+static void
+mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ int idx = tid->head % tid->size;
+
+ while (tid->reorder_buf[idx]) {
+ mt76_aggr_release(tid, frames, idx);
+ idx = tid->head % tid->size;
+ }
+}
+
+static void
+mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status;
+ struct sk_buff *skb;
+ int start, idx, nframes;
+
+ if (!tid->nframes)
+ return;
+
+ mt76_rx_aggr_release_head(tid, frames);
+
+ start = tid->head % tid->size;
+ nframes = tid->nframes;
+
+ for (idx = (tid->head + 1) % tid->size;
+ idx != start && nframes;
+ idx = (idx + 1) % tid->size) {
+
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ continue;
+
+ nframes--;
+ status = (struct mt76_rx_status *) skb->cb;
+ if (!time_after(jiffies, status->reorder_time +
+ REORDER_TIMEOUT))
+ continue;
+
+ mt76_rx_aggr_release_frames(tid, frames, status->seqno);
+ }
+
+ mt76_rx_aggr_release_head(tid, frames);
+}
+
+static void
+mt76_rx_aggr_reorder_work(struct work_struct *work)
+{
+ struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
+ reorder_work.work);
+ struct mt76_dev *dev = tid->dev;
+ struct sk_buff_head frames;
+
+ __skb_queue_head_init(&frames);
+
+ local_bh_disable();
+
+ spin_lock(&tid->lock);
+ mt76_rx_aggr_check_release(tid, &frames);
+ spin_unlock(&tid->lock);
+
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+ mt76_rx_complete(dev, &frames, -1);
+
+ local_bh_enable();
+}
+
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_sta *sta;
+ struct mt76_rx_tid *tid;
+ bool sn_less;
+ u16 seqno, head, size;
+ u8 idx;
+
+ __skb_queue_tail(frames, skb);
+
+ sta = wcid_to_sta(wcid);
+ if (!sta || !status->aggr)
+ return;
+
+ tid = rcu_dereference(wcid->aggr[status->tid]);
+ if (!tid)
+ return;
+
+ spin_lock_bh(&tid->lock);
+
+ if (tid->stopped)
+ goto out;
+
+ head = tid->head;
+ seqno = status->seqno;
+ size = tid->size;
+ sn_less = ieee80211_sn_less(seqno, head);
+
+ if (!tid->started) {
+ if (sn_less)
+ goto out;
+
+ tid->started = true;
+ }
+
+ if (sn_less) {
+ __skb_unlink(skb, frames);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ if (seqno == head) {
+ tid->head = ieee80211_sn_inc(head);
+ if (tid->nframes)
+ mt76_rx_aggr_release_head(tid, frames);
+ goto out;
+ }
+
+ __skb_unlink(skb, frames);
+
+ /*
+ * Frame sequence number exceeds buffering window, free up some space
+ * by releasing previous frames
+ */
+ if (!ieee80211_sn_less(seqno, head + size)) {
+ head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
+ mt76_rx_aggr_release_frames(tid, frames, head);
+ }
+
+ idx = seqno % size;
+
+ /* Discard if the current slot is already in use */
+ if (tid->reorder_buf[idx]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ status->reorder_time = jiffies;
+ tid->reorder_buf[idx] = skb;
+ tid->nframes++;
+ mt76_rx_aggr_release_head(tid, frames);
+
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+
+out:
+ spin_unlock_bh(&tid->lock);
+}
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
+ u16 ssn, u8 size)
+{
+ struct mt76_rx_tid *tid;
+
+ mt76_rx_aggr_stop(dev, wcid, tidno);
+
+ tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
+ GFP_KERNEL);
+ if (!tid)
+ return -ENOMEM;
+
+ tid->dev = dev;
+ tid->head = ssn;
+ tid->size = size;
+ INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
+ spin_lock_init(&tid->lock);
+
+ rcu_assign_pointer(wcid->aggr[tidno], tid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
+
+static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+{
+ u8 size = tid->size;
+ int i;
+
+ spin_lock_bh(&tid->lock);
+
+ tid->stopped = true;
+ for (i = 0; tid->nframes && i < size; i++) {
+ struct sk_buff *skb = tid->reorder_buf[i];
+
+ if (!skb)
+ continue;
+
+ tid->nframes--;
+ dev_kfree_skb(skb);
+ }
+
+ spin_unlock_bh(&tid->lock);
+
+ cancel_delayed_work_sync(&tid->reorder_work);
+}
+
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
+{
+ struct mt76_rx_tid *tid;
+
+ rcu_read_lock();
+
+ tid = rcu_dereference(wcid->aggr[tidno]);
+ if (tid) {
+ rcu_assign_pointer(wcid->aggr[tidno], NULL);
+ mt76_rx_aggr_shutdown(dev, tid);
+ kfree_rcu(tid, rcu_head);
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 000000000000..c121b502a462
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76_dev *dev = data;
+
+ dev->bus->wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76_dev *dev = data;
+
+ *val = dev->bus->rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+ "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+ struct mt76_queue *q = &dev->q_tx[i];
+
+ if (!q->ndesc)
+ continue;
+
+ seq_printf(s,
+ "%d: queued=%d head=%d tail=%d swq_queued=%d\n",
+ i, q->queued, q->head, q->tail, q->swq_queued);
+ }
+
+ return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return NULL;
+
+ debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
+ if (dev->otp.data)
+ debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
+ debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 000000000000..3518703524e7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+#define DMA_DUMMY_TXWI ((void *) ~0)
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int size;
+ int i;
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ size = q->ndesc * sizeof(*q->entry);
+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ /* clear descriptors */
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ iowrite32(q->desc_dma, &q->regs->desc_base);
+ iowrite32(0, &q->regs->cpu_idx);
+ iowrite32(0, &q->regs->dma_idx);
+ iowrite32(q->ndesc, &q->regs->ring_size);
+
+ return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi)
+{
+ struct mt76_desc *desc;
+ u32 ctrl;
+ int i, idx = -1;
+
+ if (txwi)
+ q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+
+ for (i = 0; i < nbufs; i += 2, buf += 2) {
+ u32 buf0 = buf[0].addr, buf1 = 0;
+
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+ if (i < nbufs - 1) {
+ buf1 = buf[1].addr;
+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ }
+
+ if (i == nbufs - 1)
+ ctrl |= MT_DMA_CTL_LAST_SEC0;
+ else if (i == nbufs - 2)
+ ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+ idx = q->head;
+ q->head = (q->head + 1) % q->ndesc;
+
+ desc = &q->desc[idx];
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+ WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ q->queued++;
+ }
+
+ q->entry[idx].txwi = txwi;
+ q->entry[idx].skb = skb;
+
+ return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ struct mt76_queue_entry *prev_e)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+ u32 ctrl = le32_to_cpu(__ctrl);
+
+ if (!e->txwi || !e->skb) {
+ __le32 addr = READ_ONCE(q->desc[idx].buf0);
+ u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+ dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ DMA_TO_DEVICE);
+ }
+
+ if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+ __le32 addr = READ_ONCE(q->desc[idx].buf1);
+ u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+ dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ DMA_TO_DEVICE);
+ }
+
+ if (e->txwi == DMA_DUMMY_TXWI)
+ e->txwi = NULL;
+
+ *prev_e = *e;
+ memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ q->head = ioread32(&q->regs->dma_idx);
+ q->tail = q->head;
+ iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue_entry entry;
+ bool wake = false;
+ int last;
+
+ if (!q->ndesc)
+ return;
+
+ spin_lock_bh(&q->lock);
+ if (flush)
+ last = -1;
+ else
+ last = ioread32(&q->regs->dma_idx);
+
+ while (q->queued && q->tail != last) {
+ mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+ if (entry.schedule)
+ q->swq_queued--;
+
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, q, &entry, flush);
+
+ if (entry.txwi) {
+ mt76_put_txwi(dev, entry.txwi);
+ wake = true;
+ }
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ if (!flush && q->tail == last)
+ last = ioread32(&q->regs->dma_idx);
+ }
+
+ if (!flush)
+ mt76_txq_schedule(dev, q);
+ else
+ mt76_dma_sync_idx(dev, q);
+
+ wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ spin_unlock_bh(&q->lock);
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ int *len, u32 *info, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_desc *desc = &q->desc[idx];
+ dma_addr_t buf_addr;
+ void *buf = e->buf;
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+ if (len) {
+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+ *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+ }
+
+ if (info)
+ *info = le32_to_cpu(desc->info);
+
+ dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ e->buf = NULL;
+
+ return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more)
+{
+ int idx = q->tail;
+
+ *more = false;
+ if (!q->queued)
+ return NULL;
+
+ if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+{
+ dma_addr_t addr;
+ void *buf;
+ int frames = 0;
+ int len = SKB_WITH_OVERHEAD(q->buf_size);
+ int offset = q->buf_offset;
+ int idx;
+ void *(*alloc)(unsigned int fragsz);
+
+ if (napi)
+ alloc = napi_alloc_frag;
+ else
+ alloc = netdev_alloc_frag;
+
+ spin_lock_bh(&q->lock);
+
+ while (q->queued < q->ndesc - 1) {
+ struct mt76_queue_buf qbuf;
+
+ buf = alloc(q->buf_size);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ qbuf.addr = addr + offset;
+ qbuf.len = len - offset;
+ idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ frames++;
+ }
+
+ if (frames)
+ mt76_dma_kick_queue(dev, q);
+
+ spin_unlock_bh(&q->lock);
+
+ return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ void *buf;
+ bool more;
+
+ spin_lock_bh(&q->lock);
+ do {
+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ } while (1);
+ spin_unlock_bh(&q->lock);
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ mt76_dma_rx_cleanup(dev, q);
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ int len, bool more)
+{
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page);
+ struct sk_buff *skb = q->rx_head;
+
+ offset += q->buf_offset;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
+ q->buf_size);
+
+ if (more)
+ return;
+
+ q->rx_head = NULL;
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+ struct sk_buff *skb;
+ unsigned char *data;
+ int len;
+ int done = 0;
+ bool more;
+
+ while (done < budget) {
+ u32 info;
+
+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+ if (!data)
+ break;
+
+ if (q->rx_head) {
+ mt76_add_fragment(dev, q, data, len, more);
+ continue;
+ }
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+
+ skb_reserve(skb, q->buf_offset);
+ if (skb->tail + len > skb->end) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ if (q == &dev->q_rx[MT_RXQ_MCU]) {
+ u32 *rxfce = (u32 *) skb->cb;
+ *rxfce = info;
+ }
+
+ __skb_put(skb, len);
+ done++;
+
+ if (more) {
+ q->rx_head = skb;
+ continue;
+ }
+
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ }
+
+ mt76_dma_rx_fill(dev, q, true);
+ return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct mt76_dev *dev;
+ int qid, done = 0, cur;
+
+ dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+ qid = napi - dev->napi;
+
+ rcu_read_lock();
+
+ do {
+ cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
+ mt76_rx_poll_complete(dev, qid);
+ done += cur;
+ } while (cur && done < budget);
+
+ rcu_read_unlock();
+
+ if (done < budget) {
+ napi_complete(napi);
+ dev->drv->rx_poll_complete(dev, qid);
+ }
+
+ return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+ int i;
+
+ init_dummy_netdev(&dev->napi_dev);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+ 64);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ skb_queue_head_init(&dev->rx_skb[i]);
+ napi_enable(&dev->napi[i]);
+ }
+
+ return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+ .init = mt76_dma_init,
+ .alloc = mt76_dma_alloc_queue,
+ .add_buf = mt76_dma_add_buf,
+ .tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_reset = mt76_dma_rx_reset,
+ .kick = mt76_dma_kick_queue,
+};
+
+int mt76_dma_attach(struct mt76_dev *dev)
+{
+ dev->queue_ops = &mt76_dma_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+ mt76_dma_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ netif_napi_del(&dev->napi[i]);
+ mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 000000000000..1dad39697929
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define MT_RING_SIZE 0x10
+
+#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1 BIT(14)
+#define MT_DMA_CTL_BURST BIT(15)
+#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0 BIT(30)
+#define MT_DMA_CTL_DMA_DONE BIT(31)
+
+struct mt76_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 000000000000..530e5593765c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+ struct device_node *np = dev->dev->of_node;
+ struct mtd_info *mtd;
+ const __be32 *list;
+ const char *part;
+ phandle phandle;
+ int offset = 0;
+ int size;
+ size_t retlen;
+ int ret;
+
+ if (!np)
+ return -ENOENT;
+
+ list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+ if (!list)
+ return -ENOENT;
+
+ phandle = be32_to_cpup(list++);
+ if (!phandle)
+ return -ENOENT;
+
+ np = of_find_node_by_phandle(phandle);
+ if (!np)
+ return -EINVAL;
+
+ part = of_get_property(np, "label", NULL);
+ if (!part)
+ part = np->name;
+
+ mtd = get_mtd_device_nm(part);
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
+
+ if (size <= sizeof(*list))
+ return -EINVAL;
+
+ offset = be32_to_cpup(list);
+ ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+ put_mtd_device(mtd);
+ if (ret)
+ return ret;
+
+ if (retlen < len)
+ return -EINVAL;
+
+ return 0;
+#else
+ return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+ struct device_node *np = dev->dev->of_node;
+ const u8 *mac;
+
+ if (!np)
+ return;
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ memcpy(dev->macaddr, mac, ETH_ALEN);
+#endif
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+ dev->eeprom.size = len;
+ dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+ if (!dev->eeprom.data)
+ return -ENOMEM;
+
+ return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 000000000000..5fcb2deb89a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(48, 5240),
+
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+
+ CHAN5G(149, 5745),
+ CHAN5G(153, 5765),
+ CHAN5G(157, 5785),
+ CHAN5G(161, 5805),
+ CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ struct ieee80211_hw *hw = dev->hw;
+ int led_pin;
+
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return 0;
+
+ snprintf(dev->led_name, sizeof(dev->led_name),
+ "mt76-%s", wiphy_name(hw->wiphy));
+
+ dev->led_cdev.name = dev->led_name;
+ dev->led_cdev.default_trigger =
+ ieee80211_create_tpt_led_trigger(hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ mt76_tpt_blink,
+ ARRAY_SIZE(mt76_tpt_blink));
+
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ dev->led_pin = led_pin;
+ dev->led_al = of_property_read_bool(np, "led-active-low");
+ }
+
+ return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+ struct ieee80211_supported_band *sband = &msband->sband;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ void *chanlist;
+ u16 mcs_map;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan),
+ GFP_KERNEL);
+ if (!msband->chan)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+ dev->chandef.chan = &sband->channels[0];
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[1] = 0xff;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ if (!vht)
+ return 0;
+
+ vht_cap = &sband->vht_cap;
+ vht_cap->vht_supported = true;
+
+ mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) |
+ (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2));
+
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+ return mt76_init_sband(dev, &dev->sband_2g,
+ mt76_channels_2ghz,
+ ARRAY_SIZE(mt76_channels_2ghz),
+ rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates, bool vht)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->sband_5g,
+ mt76_channels_5ghz,
+ ARRAY_SIZE(mt76_channels_5ghz),
+ rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+ struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+ bool found = false;
+ int i;
+
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ found = true;
+ break;
+ }
+
+ if (found)
+ return;
+
+ sband->n_channels = 0;
+ dev->hw->wiphy->bands[band] = NULL;
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ dev_set_drvdata(dev->dev, dev);
+
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->cc_lock);
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ if (dev->cap.has_2ghz) {
+ ret = mt76_init_sband_2g(dev, rates, n_rates);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->cap.has_5ghz) {
+ ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+ if (ret)
+ return ret;
+ }
+
+ wiphy_read_of_freq_limits(dev->hw->wiphy);
+ mt76_check_sband(dev, NL80211_BAND_2GHZ);
+ mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+ ret = mt76_led_init(dev);
+ if (ret)
+ return ret;
+
+ return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+
+ ieee80211_unregister_hw(hw);
+ mt76_tx_free(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ __skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ struct mt76_channel_state *state;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ if (dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ dev->chandef = *chandef;
+
+ if (!offchannel)
+ dev->main_chan = chandef->chan;
+
+ if (chandef->chan != dev->main_chan) {
+ state = mt76_channel_state(dev, chandef->chan);
+ memset(state, 0, sizeof(*state));
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct mt76_sband *sband;
+ struct ieee80211_channel *chan;
+ struct mt76_channel_state *state;
+ int ret = 0;
+
+ if (idx == 0 && dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ sband = &dev->sband_2g;
+ if (idx >= sband->sband.n_channels) {
+ idx -= sband->sband.n_channels;
+ sband = &dev->sband_5g;
+ }
+
+ if (idx >= sband->sband.n_channels)
+ return -ENOENT;
+
+ chan = &sband->sband.channels[idx];
+ state = mt76_channel_state(dev, chan);
+
+ memset(survey, 0, sizeof(*survey));
+ survey->channel = chan;
+ survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+ if (chan == dev->main_chan)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_lock_bh(&dev->cc_lock);
+ survey->time = div_u64(state->cc_active, 1000);
+ survey->time_busy = div_u64(state->cc_busy, 1000);
+ spin_unlock_bh(&dev->cc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_key_seq seq;
+ int i;
+
+ wcid->rx_check_pn = false;
+
+ if (!key)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ wcid->rx_check_pn = true;
+
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+ }
+}
+EXPORT_SYMBOL(mt76_wcid_key_setup);
+
+static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76_rx_status mstat;
+
+ mstat = *((struct mt76_rx_status *) skb->cb);
+ memset(status, 0, sizeof(*status));
+
+ status->flag = mstat.flag;
+ status->freq = mstat.freq;
+ status->enc_flags = mstat.enc_flags;
+ status->encoding = mstat.encoding;
+ status->bw = mstat.bw;
+ status->rate_idx = mstat.rate_idx;
+ status->nss = mstat.nss;
+ status->band = mstat.band;
+ status->signal = mstat.signal;
+ status->chains = mstat.chains;
+
+ BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
+ BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal));
+ memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal));
+
+ return wcid_to_sta(mstat.wcid);
+}
+
+static int
+mt76_check_ccmp_pn(struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_hdr *hdr;
+ int ret;
+
+ if (!(status->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ if (!wcid || !wcid->rx_check_pn)
+ return 0;
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ /*
+ * Validate the first fragment both here and in mac80211
+ * All further fragments will be validated by mac80211 only.
+ */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ if (ieee80211_is_frag(hdr) &&
+ !ieee80211_is_first_frag(hdr->frame_control))
+ return 0;
+ }
+
+ BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
+ ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+ sizeof(status->iv));
+ if (ret <= 0)
+ return -EINVAL; /* replay */
+
+ memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ status->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ int queue)
+{
+ struct napi_struct *napi = NULL;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+
+ if (queue >= 0)
+ napi = &dev->napi[queue];
+
+ while ((skb = __skb_dequeue(frames)) != NULL) {
+ if (mt76_check_ccmp_pn(skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ sta = mt76_rx_convert(skb);
+ ieee80211_rx_napi(dev->hw, sta, skb, napi);
+ }
+}
+
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+{
+ struct sk_buff_head frames;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&frames);
+
+ while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+ mt76_rx_aggr_reorder(skb, &frames);
+
+ mt76_rx_complete(dev, &frames, q);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 000000000000..09a14dead6e3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+ u32 val;
+
+ val = ioread32(dev->regs + offset);
+ trace_reg_rr(dev, offset, val);
+
+ return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ trace_reg_wr(dev, offset, val);
+ iowrite32(val, dev->regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76_mmio_rr(dev, offset) & ~mask;
+ mt76_mmio_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+ int len)
+{
+ __iowrite32_copy(dev->regs + offset, data, len >> 2);
+}
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+ static const struct mt76_bus_ops mt76_mmio_ops = {
+ .rr = mt76_mmio_rr,
+ .rmw = mt76_mmio_rmw,
+ .wr = mt76_mmio_wr,
+ .copy = mt76_mmio_copy,
+ };
+
+ dev->bus = &mt76_mmio_ops;
+ dev->regs = regs;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 000000000000..129015c9d116
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE 256
+#define MT_MCU_RING_SIZE 32
+#define MT_RX_BUF_SIZE 2048
+
+struct mt76_dev;
+
+struct mt76_bus_ops {
+ u32 (*rr)(struct mt76_dev *dev, u32 offset);
+ void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+ u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+ void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
+ int len);
+};
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ MT_TXQ_BEACON,
+ MT_TXQ_CAB,
+ __MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+ MT_RXQ_MAIN,
+ MT_RXQ_MCU,
+ __MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+ dma_addr_t addr;
+ int len;
+};
+
+struct mt76_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ struct mt76_txwi_cache *txwi;
+ bool schedule;
+};
+
+struct mt76_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+ struct mt76_queue_regs __iomem *regs;
+
+ spinlock_t lock;
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+
+ struct list_head swq;
+ int swq_queued;
+
+ u16 head;
+ u16 tail;
+ int ndesc;
+ int queued;
+ int buf_size;
+
+ u8 buf_offset;
+ u8 hw_idx;
+
+ dma_addr_t desc_dma;
+ struct sk_buff *rx_head;
+};
+
+struct mt76_queue_ops {
+ int (*init)(struct mt76_dev *dev);
+
+ int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+
+ int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi);
+
+ void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more);
+
+ void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+ void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ bool flush);
+
+ void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+struct mt76_wcid {
+ struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+
+ struct work_struct aggr_work;
+
+ u8 idx;
+ u8 hw_key_idx;
+
+ u8 sta:1;
+
+ u8 rx_check_pn;
+ u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+
+ __le16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+ s8 max_txpwr_adj;
+ bool sw_iv;
+};
+
+struct mt76_txq {
+ struct list_head list;
+ struct mt76_queue *hwq;
+ struct mt76_wcid *wcid;
+
+ struct sk_buff_head retry_q;
+
+ u16 agg_ssn;
+ bool send_bar;
+ bool aggr;
+};
+
+struct mt76_txwi_cache {
+ u32 txwi[8];
+ dma_addr_t dma_addr;
+ struct list_head list;
+};
+
+
+struct mt76_rx_tid {
+ struct rcu_head rcu_head;
+
+ struct mt76_dev *dev;
+
+ spinlock_t lock;
+ struct delayed_work reorder_work;
+
+ u16 head;
+ u8 size;
+ u8 nframes;
+
+ u8 started:1, stopped:1, timer_pending:1;
+
+ struct sk_buff *reorder_buf[];
+};
+
+enum {
+ MT76_STATE_INITIALIZED,
+ MT76_STATE_RUNNING,
+ MT76_SCANNING,
+ MT76_RESET,
+};
+
+struct mt76_hw_cap {
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+struct mt76_driver_ops {
+ u16 txwi_size;
+
+ void (*update_survey)(struct mt76_dev *dev);
+
+ int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, u32 *tx_info);
+
+ void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+
+ void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
+ void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+};
+
+struct mt76_channel_state {
+ u64 cc_active;
+ u64 cc_busy;
+};
+
+struct mt76_sband {
+ struct ieee80211_supported_band sband;
+ struct mt76_channel_state *chan;
+};
+
+struct mt76_dev {
+ struct ieee80211_hw *hw;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *main_chan;
+
+ spinlock_t lock;
+ spinlock_t cc_lock;
+ const struct mt76_bus_ops *bus;
+ const struct mt76_driver_ops *drv;
+ void __iomem *regs;
+ struct device *dev;
+
+ struct net_device napi_dev;
+ struct napi_struct napi[__MT_RXQ_MAX];
+ struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+ struct list_head txwi_cache;
+ struct mt76_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_queue q_rx[__MT_RXQ_MAX];
+ const struct mt76_queue_ops *queue_ops;
+
+ u8 macaddr[ETH_ALEN];
+ u32 rev;
+ unsigned long state;
+
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+ struct debugfs_blob_wrapper eeprom;
+ struct debugfs_blob_wrapper otp;
+ struct mt76_hw_cap cap;
+
+ u32 debugfs_reg;
+
+ struct led_classdev led_cdev;
+ char led_name[32];
+ bool led_al;
+ u8 led_pin;
+};
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rate_power {
+ union {
+ struct {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 ht[16];
+ s8 vht[10];
+ };
+ s8 all[38];
+ };
+};
+
+struct mt76_rx_status {
+ struct mt76_wcid *wcid;
+
+ unsigned long reorder_time;
+
+ u8 iv[6];
+
+ u8 aggr:1;
+ u8 tid;
+ u16 seqno;
+
+ u16 freq;
+ u32 flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3;
+ u8 rate_idx;
+ u8 nss;
+ u8 band;
+ u8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field) \
+ FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+ return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+ return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+ struct mt76_sband *msband;
+ int idx;
+
+ if (c->band == NL80211_BAND_2GHZ)
+ msband = &dev->sband_2g;
+ else
+ msband = &dev->sband_5g;
+
+ idx = c - &msband->sband.channels[0];
+ return &msband->chan[idx];
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+ void *ptr = mtxq;
+
+ return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+static inline struct ieee80211_sta *
+wcid_to_sta(struct mt76_wcid *wcid)
+{
+ void *ptr = wcid;
+
+ if (!wcid || !wcid->sta)
+ return NULL;
+
+ return container_of(ptr, struct ieee80211_sta, drv_priv);
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
+ u16 ssn, u8 size);
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ int queue);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
new file mode 100644
index 000000000000..17df17afd9bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kfifo.h>
+
+#define MT7662_FIRMWARE "mt7662.bin"
+#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE 512
+
+#define MT76x2_RX_RING_SIZE 256
+#define MT_RX_HEADROOM 32
+
+#define MT_MAX_CHAINS 2
+
+#define MT_CALIBRATE_INTERVAL HZ
+
+#include "mt76.h"
+#include "mt76x2_regs.h"
+#include "mt76x2_mac.h"
+#include "mt76x2_dfs.h"
+
+struct mt76x2_mcu {
+ struct mutex mutex;
+
+ wait_queue_head_t wait;
+ struct sk_buff_head res_q;
+
+ u32 msg_seq;
+};
+
+struct mt76x2_rx_freq_cal {
+ s8 high_gain[MT_MAX_CHAINS];
+ s8 rssi_offset[MT_MAX_CHAINS];
+ s8 lna_gain;
+ u32 mcu_gain;
+};
+
+struct mt76x2_calibration {
+ struct mt76x2_rx_freq_cal rx;
+
+ u8 agc_gain_init[MT_MAX_CHAINS];
+ u8 agc_gain_cur[MT_MAX_CHAINS];
+
+ int avg_rssi[MT_MAX_CHAINS];
+ int avg_rssi_all;
+
+ s8 agc_gain_adjust;
+ s8 low_gain;
+
+ u8 temp;
+
+ bool init_cal_done;
+ bool tssi_cal_done;
+ bool tssi_comp_pending;
+ bool dpd_cal_done;
+ bool channel_cal_done;
+};
+
+struct mt76x2_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mac_address macaddr_list[8];
+
+ struct mutex mutex;
+
+ const u16 *beacon_offsets;
+ unsigned long wcid_mask[128 / BITS_PER_LONG];
+
+ int txpower_conf;
+ int txpower_cur;
+
+ u8 txdone_seq;
+ DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
+
+ struct mt76x2_mcu mcu;
+ struct sk_buff *rx_head;
+
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct pre_tbtt_tasklet;
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ u32 aggr_stats[32];
+
+ struct mt76_wcid global_wcid;
+ struct mt76_wcid __rcu *wcid[128];
+
+ spinlock_t irq_lock;
+ u32 irqmask;
+
+ struct sk_buff *beacons[8];
+ u8 beacon_mask;
+ u8 beacon_data_mask;
+
+ u32 rev;
+ u32 rxfilter;
+
+ u16 chainmask;
+
+ struct mt76x2_calibration cal;
+
+ s8 target_power;
+ s8 target_power_delta[2];
+ struct mt76_rate_power rate_power;
+ bool enable_tpc;
+
+ u8 coverage_class;
+ u8 slottime;
+
+ struct mt76x2_dfs_pattern_detector dfs_pd;
+};
+
+struct mt76x2_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76x2_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt76x2_vif *vif;
+ struct mt76x2_tx_status status;
+ int n_frames;
+};
+
+static inline bool is_mt7612(struct mt76x2_dev *dev)
+{
+ return (dev->rev >> 16) == 0x7612;
+}
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+
+static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
+{
+ mt76x2_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
+{
+ mt76x2_set_irq_mask(dev, mask, 0);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x2_dev *dev);
+void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
+void mt76x2_phy_power_on(struct mt76x2_dev *dev);
+int mt76x2_init_hardware(struct mt76x2_dev *dev);
+void mt76x2_stop_hardware(struct mt76x2_dev *dev);
+int mt76x2_eeprom_init(struct mt76x2_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
+
+int mt76x2_phy_start(struct mt76x2_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel);
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_dma_init(struct mt76x2_dev *dev);
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
+
+void mt76x2_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq);
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate);
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
new file mode 100644
index 000000000000..2629779e8d3e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ dev->irqmask &= ~clear;
+ dev->irqmask |= set;
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
+{
+ struct mt76x2_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(dev, intr, dev->irqmask);
+
+ intr &= dev->irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_PRE_TBTT)
+ tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+ /* send buffered multicast frames now */
+ if (intr & MT_INT_TBTT)
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+ if (intr & MT_INT_TX_STAT) {
+ mt76x2_mac_poll_tx_status(dev, true);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_GPTIMER) {
+ mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
new file mode 100644
index 000000000000..612feb593d7d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = file->private;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ seq_puts(file, "Length: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ", i * 8 + j + 1);
+ seq_puts(file, "\n");
+ seq_puts(file, "Count: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+ seq_puts(file, "\n");
+ seq_puts(file, "--------");
+ for (j = 0; j < 8; j++)
+ seq_puts(file, "-----------");
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static void
+seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
+{
+ int i;
+
+ seq_printf(file, "%10s:", str);
+ for (i = 0; i < len; i++)
+ seq_printf(file, " %2d", val[i]);
+ seq_puts(file, "\n");
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "Target power: %d\n", dev->target_power);
+
+ seq_puts_array(file, "Delta", dev->target_power_delta,
+ ARRAY_SIZE(dev->target_power_delta));
+ seq_puts_array(file, "CCK", dev->rate_power.cck,
+ ARRAY_SIZE(dev->rate_power.cck));
+ seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
+ ARRAY_SIZE(dev->rate_power.ofdm));
+ seq_puts_array(file, "HT", dev->rate_power.ht,
+ ARRAY_SIZE(dev->rate_power.ht));
+ seq_puts_array(file, "VHT", dev->rate_power.vht,
+ ARRAY_SIZE(dev->rate_power.vht));
+ return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x2_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+ int i;
+ struct mt76x2_dev *dev = file->private;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ seq_printf(file, "engine: %d\n", i);
+ seq_printf(file, " hw pattern detected:\t%d\n",
+ dfs_pd->stats[i].hw_pattern);
+ seq_printf(file, " hw pulse discarded:\t%d\n",
+ dfs_pd->stats[i].hw_pulse_discarded);
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+ .open = mt76x2_dfs_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return;
+
+ debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
+ debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
+
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+ read_txpower);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
new file mode 100644
index 000000000000..f936dc9a5476
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh, \
+ w_tolerance, tl, th, t_tolerance, \
+ bl, bh, event_exp, power_jmp) \
+{ \
+ .mode = m, \
+ .avg_len = len, \
+ .e_low = el, \
+ .e_high = eh, \
+ .w_low = wl, \
+ .w_high = wh, \
+ .w_margin = w_tolerance, \
+ .t_low = tl, \
+ .t_high = th, \
+ .t_margin = t_tolerance, \
+ .b_low = bl, \
+ .b_high = bh, \
+ .event_expiration = event_exp, \
+ .pwr_jmp = power_jmp \
+}
+
+static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0xfe808, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 }
+};
+
+static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
+ u8 enable)
+{
+ u32 data;
+
+ data = (1 << 1) | enable;
+ mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+{
+ bool ret = false;
+ u32 current_ts, delta_ts;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+ delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+ dfs_pd->chirp_pulse_ts = current_ts;
+
+ /* 12 sec */
+ if (delta_ts <= (12 * (1 << 20))) {
+ if (++dfs_pd->chirp_pulse_cnt > 8)
+ ret = true;
+ } else {
+ dfs_pd->chirp_pulse_cnt = 1;
+ }
+
+ return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_hw_pulse *pulse)
+{
+ u32 data;
+
+ /* select channel */
+ data = (MT_DFS_CH_EN << 16) | pulse->engine;
+ mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+ /* reported period */
+ pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+ /* reported width */
+ pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+ pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+ /* reported burst number */
+ pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_hw_pulse *pulse)
+{
+ bool ret = false;
+
+ if (!pulse->period || !pulse->w1)
+ return false;
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x2_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 10200) &&
+ pulse->period <= 61600);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ pulse->period <= 61600);
+ else
+ ret = (pulse->period >= 3500 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_ETSI:
+ if (pulse->engine >= 3)
+ break;
+
+ ret = (pulse->period >= 4900 &&
+ (pulse->period <= 10200 ||
+ pulse->period >= 12400) &&
+ pulse->period <= 100100);
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+ dev->mt76.chandef.chan->center_freq <= 5350) {
+ /* JPW53 */
+ if (pulse->w1 <= 130)
+ ret = (pulse->period >= 28360 &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 76900) &&
+ pulse->period <= 76940);
+ break;
+ }
+
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x2_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 10100 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else
+ ret = (pulse->period >= 3900 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return false;
+ }
+
+ return ret;
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ u32 engine_mask;
+ int i;
+
+ if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ goto out;
+
+ engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+ if (!(engine_mask & 0xf))
+ goto out;
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ struct mt76x2_dfs_hw_pulse pulse;
+
+ if (!(engine_mask & (1 << i)))
+ continue;
+
+ pulse.engine = i;
+ mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+ if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+ dfs_pd->stats[i].hw_pulse_discarded++;
+ continue;
+ }
+
+ /* hw detector rx radar pattern */
+ dfs_pd->stats[i].hw_pattern++;
+ ieee80211_radar_detected(dev->mt76.hw);
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ return;
+ }
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+ mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+{
+ u32 data;
+ u8 i, shift;
+ const struct mt76x2_radar_specs *radar_specs;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ shift = MT_DFS_NUM_ENGINES;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ shift = 2 * MT_DFS_NUM_ENGINES;
+ break;
+ default:
+ shift = 0;
+ break;
+ }
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs[shift];
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs[shift];
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+ dev->mt76.chandef.chan->center_freq <= 5350)
+ radar_specs = &jp_w53_radar_specs[shift];
+ else
+ radar_specs = &jp_w56_radar_specs[shift];
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return;
+ }
+
+ data = (MT_DFS_VGA_MASK << 16) |
+ (MT_DFS_PWR_GAIN_OFFSET << 12) |
+ (MT_DFS_PWR_DOWN_TIME << 8) |
+ (MT_DFS_SYM_ROUND << 4) |
+ (MT_DFS_DELTA_DELAY & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+ data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+ mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ /* configure engine */
+ mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+ /* detection mode + avg_len */
+ data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+ (radar_specs[i].mode & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+ /* dfs energy */
+ data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+ (radar_specs[i].e_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+ /* dfs period */
+ mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+ mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+ /* dfs burst */
+ mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+ mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+ /* dfs width */
+ data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+ (radar_specs[i].w_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+ /* dfs margins */
+ data = (radar_specs[i].w_margin << 16) |
+ radar_specs[i].t_margin;
+ mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+ /* dfs event expiration */
+ mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+ /* dfs pwr adj */
+ mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+ }
+
+ /* reset status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+ /* enable detection*/
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+ mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+{
+ u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+ agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+ agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+ val_r8 = (agc_r8 & 0x00007e00) >> 9;
+ val_r4 = agc_r4 & ~0x1f000000;
+ val_r4 += (((val_r8 + 1) >> 1) << 24);
+ mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+ dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+ dfs_r31 += val_r8;
+ dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+ dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+ mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+ mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ mt76x2_dfs_set_bbp_params(dev);
+ /* enable debug mode */
+ mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+ mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+ } else {
+ /* disable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* clear detector status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76_wr(dev, 0x212c, 0);
+
+ mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+ }
+}
+
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ dfs_pd->region = NL80211_DFS_UNSET;
+ tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+ (unsigned long)dev);
+}
+
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+ enum nl80211_dfs_regions region)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ if (dfs_pd->region != region) {
+ tasklet_disable(&dfs_pd->dfs_tasklet);
+ dfs_pd->region = region;
+ mt76x2_dfs_init_params(dev);
+ tasklet_enable(&dfs_pd->dfs_tasklet);
+ }
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
new file mode 100644
index 000000000000..8dbc783cc6bc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DFS_H
+#define __MT76x2_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL (10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES 4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND 0
+#define MT_DFS_DELTA_DELAY 2
+#define MT_DFS_VGA_MASK 0
+#define MT_DFS_PWR_GAIN_OFFSET 3
+#define MT_DFS_PWR_DOWN_TIME 0xf
+#define MT_DFS_RX_PE_MASK 0xff
+#define MT_DFS_PKT_END_MASK 0
+#define MT_DFS_CH_EN 0xf
+
+struct mt76x2_radar_specs {
+ u8 mode;
+ u16 avg_len;
+ u16 e_low;
+ u16 e_high;
+ u16 w_low;
+ u16 w_high;
+ u16 w_margin;
+ u32 t_low;
+ u32 t_high;
+ u16 t_margin;
+ u32 b_low;
+ u32 b_high;
+ u32 event_expiration;
+ u16 pwr_jmp;
+};
+
+struct mt76x2_dfs_hw_pulse {
+ u8 engine;
+ u32 period;
+ u32 w1;
+ u32 w2;
+ u32 burst;
+};
+
+struct mt76x2_dfs_engine_stats {
+ u32 hw_pattern;
+ u32 hw_pulse_discarded;
+};
+
+struct mt76x2_dfs_pattern_detector {
+ enum nl80211_dfs_regions region;
+
+ u8 chirp_pulse_cnt;
+ u32 chirp_pulse_ts;
+
+ struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+ struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+ enum nl80211_dfs_regions region);
+
+#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
new file mode 100644
index 000000000000..fd1ec4743e0b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+int
+mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq)
+{
+ struct mt76_queue *q = &dev->mt76.q_tx[qid];
+ struct mt76_queue_buf buf;
+ dma_addr_t addr;
+ u32 tx_info;
+
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->mt76.dev, addr))
+ return -ENOMEM;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+ spin_lock_bh(&q->lock);
+ mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+}
+
+static int
+mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc)
+{
+ int ret;
+
+ q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->hw_idx = idx;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+
+static int
+mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int ret;
+
+ q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static void
+mt76x2_tx_tasklet(unsigned long data)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
+ int i;
+
+ mt76x2_mac_process_tx_status_fifo(dev);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt76x2_mac_poll_tx_status(dev, false);
+ mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x2_dma_init(struct mt76x2_dev *dev)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ int ret;
+ int i;
+ struct mt76_txwi_cache __maybe_unused *t;
+ struct mt76_queue *q;
+
+ BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
+ BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
+
+ mt76_dma_attach(&dev->mt76);
+
+ init_waitqueue_head(&dev->mcu.wait);
+ skb_queue_head_init(&dev->mcu.res_q);
+
+ tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i], MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
+ ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ return mt76_init_queues(dev);
+}
+
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
+{
+ tasklet_kill(&dev->tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
new file mode 100644
index 000000000000..47f79d83fcb4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DMA_H
+#define __MT76x2_DMA_H
+
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN GENMASK(13, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_TCO BIT(29)
+#define MT_TXD_INFO_UCO BIT(30)
+#define MT_TXD_INFO_ICO BIT(31)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+enum mt76x2_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
new file mode 100644
index 000000000000..9c9bf3e785ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+ void *dest, int len)
+{
+ if (field + len > dev->mt76.eeprom.size)
+ return -1;
+
+ memcpy(dest, dev->mt76.eeprom.data + field, len);
+ return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+{
+ void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+ memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+ return 0;
+}
+
+static void
+mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+{
+ u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+ case BOARD_TYPE_5GHZ:
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->mt76.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ }
+}
+
+static int
+mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
+{
+ int ret, i;
+
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt76x2_efuse_read(dev, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+ u16 *efuse_w = (u16 *) efuse;
+
+ if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+ return false;
+
+ return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id) \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+ static const u8 cal_free_bytes[] = {
+ MT_EE_XTAL_TRIM_1,
+ MT_EE_TX_POWER_EXT_PA_5G + 1,
+ MT_EE_TX_POWER_0_START_2G,
+ MT_EE_TX_POWER_0_START_2G + 1,
+ MT_EE_TX_POWER_1_START_2G,
+ MT_EE_TX_POWER_1_START_2G + 1,
+ GROUP_5G(0),
+ GROUP_5G(1),
+ GROUP_5G(2),
+ GROUP_5G(3),
+ GROUP_5G(4),
+ GROUP_5G(5),
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+ MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 prev_grp0[4] = {
+ eeprom[MT_EE_TX_POWER_0_START_5G],
+ eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+ eeprom[MT_EE_TX_POWER_1_START_5G],
+ eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+ };
+ u16 val;
+ int i;
+
+ if (!mt76x2_has_cal_free_data(dev, efuse))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+ int offset = cal_free_bytes[i];
+
+ eeprom[offset] = efuse[offset];
+ }
+
+ if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+ efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+ if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+ efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+ if (!val)
+ val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+ switch (val) {
+ case 0x7662:
+ case 0x7612:
+ return 0;
+ default:
+ dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+ return -EINVAL;
+ }
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x2_dev *dev)
+{
+ void *efuse;
+ int len = MT7662_EEPROM_SIZE;
+ bool found;
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, len);
+ if (ret < 0)
+ return ret;
+
+ found = ret;
+ if (found)
+ found = !mt76x2_check_eeprom(dev);
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ efuse = dev->mt76.otp.data;
+
+ if (mt76x2_get_efuse_data(dev, efuse, len))
+ goto out;
+
+ if (found) {
+ mt76x2_apply_cal_free_data(dev, efuse);
+ } else {
+ /* FIXME: check if efuse data is complete */
+ found = true;
+ memcpy(dev->mt76.eeprom.data, efuse, len);
+ }
+
+out:
+ if (!found)
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline int
+mt76x2_sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static inline int
+mt76x2_sign_extend_optional(u32 val, unsigned int size)
+{
+ bool enable = val & BIT(size);
+
+ return enable ? mt76x2_sign_extend(val, size) : 0;
+}
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0 && val != 0xff;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+{
+ s8 *dest = dev->cal.rx.high_gain;
+
+ if (!field_valid(val)) {
+ dest[0] = 0;
+ dest[1] = 0;
+ return;
+ }
+
+ dest[0] = mt76x2_sign_extend(val, 4);
+ dest[1] = mt76x2_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+{
+ s8 *dest = dev->cal.rx.rssi_offset;
+
+ if (!field_valid(val)) {
+ dest[chain] = 0;
+ return;
+ }
+
+ dest[chain] = mt76x2_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return MT_CH_5G_JAPAN;
+ if (channel <= 48)
+ return MT_CH_5G_UNII_1;
+ if (channel <= 64)
+ return MT_CH_5G_UNII_2;
+ if (channel <= 114)
+ return MT_CH_5G_UNII_2E_1;
+ if (channel <= 144)
+ return MT_CH_5G_UNII_2E_2;
+ return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+{
+ enum mt76x2_cal_channel_group group;
+
+ group = mt76x2_get_cal_channel_group(channel);
+ switch (group) {
+ case MT_CH_5G_JAPAN:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_1:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_2E_1:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2E_2:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+ default:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+ }
+}
+
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ int channel = chan->hw_value;
+ s8 lna_5g[3], lna_2g;
+ u8 lna;
+ u16 val;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ else
+ val = mt76x2_get_5g_rx_gain(dev, channel);
+
+ mt76x2_set_rx_gain_group(dev, val);
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
+ } else {
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
+ }
+
+ val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
+ lna_2g = val & 0xff;
+ lna_5g[0] = val >> 8;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+ lna_5g[1] = val >> 8;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+ lna_5g[2] = val >> 8;
+
+ if (!field_valid(lna_5g[1]))
+ lna_5g[1] = lna_5g[0];
+
+ if (!field_valid(lna_5g[2]))
+ lna_5g[2] = lna_5g[0];
+
+ dev->cal.rx.mcu_gain = (lna_2g & 0xff);
+ dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+ dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+ dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+ lna_2g = 0;
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+ memset(lna_5g, 0, sizeof(lna_5g));
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ lna = lna_2g;
+ else if (channel <= 64)
+ lna = lna_5g[0];
+ else if (channel <= 128)
+ lna = lna_5g[1];
+ else
+ lna = lna_5g[2];
+
+ if (lna == 0xff)
+ lna = 0;
+
+ dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+}
+
+static s8
+mt76x2_rate_power_val(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return mt76x2_sign_extend_optional(val, 7);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan)
+{
+ bool is_5ghz;
+ u16 val;
+
+ is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ memset(t, 0, sizeof(*t));
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+ t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
+ t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+ else
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+ t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
+ t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+ else
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+ t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
+ t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+ t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
+ t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+ t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
+ t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+ t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
+ t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+ t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
+ t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+ t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
+ t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+ t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
+ t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+ if (!is_5ghz)
+ val >>= 8;
+ t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
+}
+
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ ret = max(ret, r->all[i]);
+
+ return ret;
+}
+
+static void
+mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan, int chain, int offset)
+{
+ int channel = chan->hw_value;
+ int delta_idx;
+ u8 data[6];
+ u16 val;
+
+ if (channel < 6)
+ delta_idx = 3;
+ else if (channel < 11)
+ delta_idx = 4;
+ else
+ delta_idx = 5;
+
+ mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan, int chain, int offset)
+{
+ int channel = chan->hw_value;
+ enum mt76x2_cal_channel_group group;
+ int delta_idx;
+ u16 val;
+ u8 data[5];
+
+ group = mt76x2_get_cal_channel_group(channel);
+ offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+ if (channel >= 192)
+ delta_idx = 4;
+ else if (channel >= 184)
+ delta_idx = 3;
+ else if (channel < 44)
+ delta_idx = 3;
+ else if (channel < 52)
+ delta_idx = 4;
+ else if (channel < 58)
+ delta_idx = 3;
+ else if (channel < 98)
+ delta_idx = 4;
+ else if (channel < 106)
+ delta_idx = 3;
+ else if (channel < 116)
+ delta_idx = 4;
+ else if (channel < 130)
+ delta_idx = 3;
+ else if (channel < 149)
+ delta_idx = 4;
+ else if (channel < 157)
+ delta_idx = 3;
+ else
+ delta_idx = 4;
+
+ mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+ t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan)
+{
+ u16 bw40, bw80;
+
+ memset(t, 0, sizeof(*t));
+
+ bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ bw40 >>= 8;
+ mt76x2_get_power_info_5g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_5G);
+ mt76x2_get_power_info_5g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_5G);
+ } else {
+ mt76x2_get_power_info_2g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_2G);
+ mt76x2_get_power_info_2g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_2G);
+ }
+
+ if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+ t->target_power = t->chain[0].target_power;
+
+ t->delta_bw40 = mt76x2_rate_power_val(bw40);
+ t->delta_bw80 = mt76x2_rate_power_val(bw80);
+}
+
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+{
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+ u16 val, slope;
+ u8 bounds;
+
+ memset(t, 0, sizeof(*t));
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+ return -EINVAL;
+
+ if (!mt76x2_ext_pa_enabled(dev, band))
+ return -EINVAL;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ if (!(val & BIT(7)))
+ return -EINVAL;
+
+ t->temp_25_ref = val & 0x7f;
+ if (band == NL80211_BAND_5GHZ) {
+ slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ } else {
+ slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+ }
+
+ t->high_slope = slope & 0xff;
+ t->low_slope = slope >> 8;
+ t->lower_bound = 0 - (bounds & 0xf);
+ t->upper_bound = (bounds >> 4) & 0xf;
+
+ return 0;
+}
+
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+ u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+ else
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+
+int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ ret = mt76x2_eeprom_load(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ mt76x2_eeprom_get_macaddr(dev);
+ mt76_eeprom_override(&dev->mt76);
+ dev->mt76.macaddr[0] &= ~BIT(1);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
new file mode 100644
index 000000000000..d79122728dca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "mt76x2.h"
+
+enum mt76x2_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_PCI_ID = 0x00A,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_NIC_CONF_2 = 0x042,
+
+ MT_EE_XTAL_TRIM_1 = 0x03a,
+ MT_EE_XTAL_TRIM_2 = 0x09e,
+
+ MT_EE_LNA_GAIN = 0x044,
+ MT_EE_RSSI_OFFSET_2G_0 = 0x046,
+ MT_EE_RSSI_OFFSET_2G_1 = 0x048,
+ MT_EE_RSSI_OFFSET_5G_0 = 0x04a,
+ MT_EE_RSSI_OFFSET_5G_1 = 0x04c,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x050,
+ MT_EE_TX_POWER_DELTA_BW80 = 0x052,
+
+ MT_EE_TX_POWER_EXT_PA_5G = 0x054,
+
+ MT_EE_TX_POWER_0_START_2G = 0x056,
+ MT_EE_TX_POWER_1_START_2G = 0x05c,
+
+ /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G 5
+#define MT_TX_POWER_GROUPS_5G 6
+ MT_EE_TX_POWER_0_START_5G = 0x062,
+
+ MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
+ MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
+
+ MT_EE_TX_POWER_1_START_5G = 0x080,
+
+ MT_EE_TX_POWER_CCK = 0x0a0,
+ MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
+ MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
+ MT_EE_TX_POWER_OFDM_5G_6M = 0x0b2,
+ MT_EE_TX_POWER_OFDM_5G_24M = 0x0b4,
+ MT_EE_TX_POWER_HT_MCS0 = 0x0a6,
+ MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
+ MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
+ MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
+ MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
+ MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
+ MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
+
+ MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
+ MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
+
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER = 0x0f6,
+ MT_EE_RF_2G_RX_HIGH_GAIN = 0x0f8,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN = 0x0fa,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN = 0x0fc,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN = 0x0fe,
+
+ MT_EE_BT_RCAL_RESULT = 0x138,
+ MT_EE_BT_VCDL_CALIBRATION = 0x13c,
+ MT_EE_BT_PMUCFG = 0x13e,
+
+ __MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+enum mt76x2_board_type {
+ BOARD_TYPE_2GHZ = 1,
+ BOARD_TYPE_5GHZ = 2,
+};
+
+enum mt76x2_cal_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+ u8 target_power;
+
+ s8 delta_bw40;
+ s8 delta_bw80;
+
+ struct {
+ s8 tssi_slope;
+ s8 tssi_offset;
+ s8 target_power;
+ s8 delta;
+ } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+ u8 temp_25_ref;
+ int lower_bound; /* J */
+ int upper_bound; /* J */
+ unsigned int high_slope; /* J / dB */
+ unsigned int low_slope; /* J / dB */
+};
+
+static inline int
+mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+{
+ if ((field & 1) || field >= __MT_EE_MAX)
+ return -1;
+
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+{
+ return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+{
+ u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+ else
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
new file mode 100644
index 000000000000..1b00ae4465a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_mcu.h"
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static bool
+mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+
+ return false;
+}
+
+static bool
+wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
+static void
+mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = MT_PBF_SYS_CTRL_MCU_RESET |
+ MT_PBF_SYS_CTRL_DMA_RESET |
+ MT_PBF_SYS_CTRL_MAC_RESET |
+ MT_PBF_SYS_CTRL_PBF_RESET |
+ MT_PBF_SYS_CTRL_ASY_RESET;
+
+ mt76_set(dev, MT_PBF_SYS_CTRL, val);
+ mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+static void
+mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+{
+ u16 eep_val;
+ s8 offset = 0;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+{
+ static const u8 null_addr[ETH_ALEN] = {};
+ const u8 *macaddr = dev->mt76.macaddr;
+ u32 val;
+ int i, k;
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+ val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+ val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+ mt76x2_mac_pbf_init(dev);
+ mt76_write_mac_initvals(dev);
+ mt76x2_fixup_xtal(dev);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+ mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+ mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+ mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+ mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+ FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+ 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ if (!hard)
+ return 0;
+
+ for (i = 0; i < 256 / 32; i++)
+ mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+ for (i = 0; i < 256; i++)
+ mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+
+ for (i = 0; i < 16; i++)
+ for (k = 0; k < 4; k++)
+ mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+
+ for (i = 0; i < 8; i++) {
+ mt76x2_mac_set_bssid(dev, i, null_addr);
+ mt76x2_mac_set_beacon(dev, i, NULL);
+ }
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+ mt76x2_init_beacon_offsets(dev);
+
+ mt76x2_set_tx_ackto(dev);
+
+ return 0;
+}
+
+int mt76x2_mac_start(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_TX_STAT);
+
+ return 0;
+}
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if (mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
+ continue;
+
+ if (mt76_rr(dev, MT_BBP(IBI, 12)))
+ continue;
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+
+void mt76x2_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, 0x1001c, 0xff);
+ mt76_set(dev, 0x1001c, 0x30);
+
+ mt76_wr(dev, 0x10014, 0x484f);
+ udelay(1);
+
+ mt76_set(dev, 0x10130, BIT(17));
+ udelay(125);
+
+ mt76_clear(dev, 0x10130, BIT(16));
+ udelay(50);
+
+ mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+
+ /* Enable RF BG */
+ mt76_set(dev, 0x10130, BIT(0) << shift);
+ udelay(10);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+ udelay(10);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, 0x10130, BIT(2) << shift);
+ udelay(10);
+
+ mt76x2_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+ udelay(10);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ udelay(10);
+
+ mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, 0x11204, BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, 0x10080, BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, 0x10064, BIT(18));
+
+ mt76x2_power_on_rf(dev, 0);
+ mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+{
+ u8 ackto, sifs, slottime = dev->slottime;
+
+ slottime += 3 * dev->coverage_class;
+
+ sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+ ackto = slottime + sifs;
+ mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+ MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+static void
+mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+
+int mt76x2_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 1024 byte per beacon */
+ 0xc000,
+ 0xc400,
+ 0xc800,
+ 0xcc00,
+ 0xd000,
+ 0xd400,
+ 0xd800,
+ 0xdc00,
+
+ /* BSS idx 8-15 not used for beacons */
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ };
+ u32 val;
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+ tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+ (unsigned long) dev);
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+ val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+ MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+ val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2_power_on(dev);
+
+ ret = mt76x2_eeprom_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_mac_reset(dev, true);
+ if (ret)
+ return ret;
+
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ ret = mt76x2_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ ret = mt76x2_mac_start(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_mac_stop(dev, false);
+
+ return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x2_mcu_set_radio_state(dev, false);
+ mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x2_dev *dev)
+{
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76x2_stop_hardware(dev);
+ mt76x2_dma_cleanup(dev);
+ mt76x2_mcu_cleanup(dev);
+}
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x2_txwi),
+ .update_survey = mt76x2_update_channel,
+ .tx_prepare_skb = mt76x2_tx_prepare_skb,
+ .tx_complete_skb = mt76x2_tx_complete_skb,
+ .rx_skb = mt76x2_queue_rx_skb,
+ .rx_poll_complete = mt76x2_rx_poll_complete,
+ };
+ struct ieee80211_hw *hw;
+ struct mt76x2_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ dev->mt76.drv = &drv_ops;
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->irq_lock);
+
+ return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76x2_dfs_set_domain(dev, request->dfs_region);
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ }
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
+ mt76);
+ u32 val;
+
+ val = MT_LED_STATUS_DURATION(0xff) |
+ MT_LED_STATUS_OFF(delay_off) |
+ MT_LED_STATUS_ON(delay_on);
+
+ mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+ mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt76x2_led_set_config(mt76, delta_on, delta_off);
+ return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt76x2_led_set_config(mt76, 0, 0xff);
+ else
+ mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+static void
+mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+
+int mt76x2_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ void *status_fifo;
+ int fifo_size;
+ int i, ret;
+
+ fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
+ status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+ if (!status_fifo)
+ return -ENOMEM;
+
+ kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+ ret = mt76x2_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ wiphy->reg_notifier = mt76x2_regd_notifier;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ mt76x2_dfs_init_detector(dev);
+
+ /* init led callbacks */
+ dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+ ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (ret)
+ goto fail;
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2_stop_hardware(dev);
+ return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
new file mode 100644
index 000000000000..6c30b5eaa9ca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+{
+ idx &= 7;
+ mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+ mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+ get_unaligned_le16(addr + 4));
+}
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((u32 *) &ccmp_pn[0]);
+ txwi->eiv = *((u32 *) &ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(skb->len);
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if (rxinfo & MT_RXINFO_BA)
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + st->retry;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ if (last_rate > 0)
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+static void
+mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
+ u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
+{
+ struct mt76x2_tx_status stat = {};
+ unsigned long flags;
+ u8 update = 1;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return;
+
+ trace_mac_txstat_poll(dev);
+
+ while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+ u32 stat1, stat2;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+ if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ stat.valid = 1;
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ trace_mac_txstat_fetch(dev, &stat);
+
+ if (!irq) {
+ mt76x2_send_tx_status(dev, &stat, &update);
+ continue;
+ }
+
+ kfifo_put(&dev->txstatus_fifo, stat);
+ }
+}
+
+static void
+mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *txwi_ptr)
+{
+ struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
+ struct mt76x2_txwi *txwi = txwi_ptr;
+
+ mt76x2_mac_poll_tx_status(dev, false);
+
+ txi->tries = 0;
+ txi->jiffies = jiffies;
+ txi->wcid = txwi->wcid;
+ txi->pktid = txwi->pktid;
+ trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ mt76x2_tx_complete(dev, skb);
+}
+
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
+{
+ struct mt76x2_tx_status stat;
+ u8 update = 1;
+
+ while (kfifo_get(&dev->txstatus_fifo, &stat))
+ mt76x2_send_tx_status(dev, &stat, &update);
+}
+
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ if (e->txwi)
+ mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+ else
+ dev_kfree_skb_any(e->skb);
+}
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+
+static int
+mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+ struct mt76x2_txwi txwi;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+ return -ENOSPC;
+
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+ int beacon_addr = dev->beacon_offsets[bcn_idx];
+ int ret = 0;
+ int i;
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+ if (skb) {
+ ret = mt76_write_beacon(dev, beacon_addr, skb);
+ if (!ret)
+ dev->beacon_data_mask |= BIT(bcn_idx) &
+ dev->beacon_mask;
+ } else {
+ dev->beacon_data_mask &= ~BIT(bcn_idx);
+ for (i = 0; i < beacon_len; i += 4)
+ mt76_wr(dev, beacon_addr + i, 0);
+ }
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+ return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+ struct sk_buff *skb)
+{
+ bool force_update = false;
+ int bcn_idx = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (vif_idx == i) {
+ force_update = !!dev->beacons[i] ^ !!skb;
+
+ if (dev->beacons[i])
+ dev_kfree_skb(dev->beacons[i]);
+
+ dev->beacons[i] = skb;
+ __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+ } else if (force_update && dev->beacons[i]) {
+ __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+ }
+
+ bcn_idx += !!dev->beacons[i];
+ }
+
+ for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (!(dev->beacon_data_mask & BIT(i)))
+ break;
+
+ __mt76x2_mac_set_beacon(dev, i, NULL);
+ }
+
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+ bcn_idx - 1);
+ return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+{
+ u8 old_mask = dev->beacon_mask;
+ bool en;
+ u32 reg;
+
+ if (val) {
+ dev->beacon_mask |= BIT(vif_idx);
+ } else {
+ dev->beacon_mask &= ~BIT(vif_idx);
+ mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+ }
+
+ if (!!old_mask == !!dev->beacon_mask)
+ return;
+
+ en = dev->beacon_mask;
+
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ reg = MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN;
+ mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+ if (en)
+ mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ else
+ mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76_channel_state *state;
+ u32 active, busy;
+
+ state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+ busy = mt76_rr(dev, MT_CH_BUSY);
+ active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ state->cc_busy += busy;
+ state->cc_active += active;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+ struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
+ mac_work.work);
+ int i, idx;
+
+ mt76x2_update_channel(&dev->mt76);
+ for (i = 0, idx = 0; i < 16; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->aggr_stats[idx++] += val & 0xffff;
+ dev->aggr_stats[idx++] += val >> 16;
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
new file mode 100644
index 000000000000..8a8a25e32d5f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76.h"
+
+struct mt76x2_dev;
+struct mt76x2_sta;
+struct mt76x2_vif;
+struct mt76x2_txwi;
+
+struct mt76x2_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76x2_tx_info {
+ unsigned long jiffies;
+ u8 tries;
+
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+};
+
+struct mt76x2_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ u8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+};
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_UNICAST BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0 BIT(22)
+#define MT_RXINFO_SW_FTYPE1 BIT(23)
+#define MT_RXINFO_PROBE_RESP BIT(24)
+#define MT_RXINFO_BEACON BIT(25)
+#define MT_RXINFO_DISASSOC BIT(26)
+#define MT_RXINFO_DEAUTH BIT(27)
+#define MT_RXINFO_ACTION BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF BIT(31)
+
+#define MT_RXWI_TID GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM BIT(11)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_TX_PWR_ADJ GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+struct mt76x2_txwi {
+ __le16 flags;
+ __le16 rate;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+static inline struct mt76x2_tx_info *
+mt76x2_skb_tx_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ return (void *) info->status.status_driver_data;
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
+int mt76x2_mac_start(struct mt76x2_dev *dev);
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x2_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi);
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+ struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
new file mode 100644
index 000000000000..bf26284b9989
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ret = mt76x2_phy_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2_stop_hardware(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+
+static int
+mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (vif->addr[0] & BIT(1))
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+ /*
+ * Client mode typically only has one configurable BSSID register,
+ * which is used for bssidx=0. This is linked to the MAC address.
+ * Since mac80211 allows changing interface types, and we cannot
+ * force the use of the primary MAC address for a station mode
+ * interface, we need some other way of configuring a per-interface
+ * remote BSSID.
+ * The hardware provides an AP-Client feature, where bssidx 0-7 are
+ * used for AP mode and bssidx 8-15 for client mode.
+ * We shift the station interface bss index by 8 to force the
+ * hardware to recognize the BSSID.
+ * The resulting bssidx mismatch for unicast frames is ignored by hw.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ idx += 8;
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = 254 - idx;
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static void
+mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+
+static int
+mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mt76_set_channel(&dev->mt76);
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ cancel_delayed_work_sync(&dev->cal_work);
+
+ mt76x2_mac_stop(dev, true);
+ ret = mt76x2_phy_set_channel(dev, chandef);
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_IDLE);
+ mt76_rr(dev, MT_CH_BUSY);
+
+ mt76x2_dfs_init_params(dev);
+
+ mt76x2_mac_resume(dev);
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+
+ return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+ mt76x2_phy_set_txpower(dev);
+ mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_BSSID)
+ mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL,
+ info->beacon_int << 4);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+ info->enable_beacon);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ dev->slottime = slottime;
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_dev *dev = hw->priv;
+ int idx = msta->wcid.idx;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ break;
+ }
+}
+
+static int
+mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int
+mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ *dbm = dev->txpower_cur / 2;
+
+ /* convert from per-chain power to combined output on 2x2 devices */
+ *dbm += 3;
+
+ return 0;
+}
+
+static int
+mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ dev->coverage_class = coverage_class;
+ mt76x2_set_tx_ackto(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2_start,
+ .stop = mt76x2_stop,
+ .add_interface = mt76x2_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .config = mt76x2_config,
+ .configure_filter = mt76x2_configure_filter,
+ .bss_info_changed = mt76x2_bss_info_changed,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .sta_notify = mt76x2_sta_notify,
+ .set_key = mt76x2_set_key,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2_sw_scan,
+ .sw_scan_complete = mt76x2_sw_scan_complete,
+ .flush = mt76x2_flush,
+ .ampdu_action = mt76x2_ampdu_action,
+ .get_txpower = mt76x2_get_txpower,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .set_coverage_class = mt76x2_set_coverage_class,
+ .get_survey = mt76_get_survey,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
new file mode 100644
index 000000000000..15820b11f9db
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_eeprom.h"
+
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
+static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
+ timeout);
+ return skb_dequeue(&dev->mcu.res_q);
+}
+
+static int
+mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd)
+{
+ unsigned long expires = jiffies + HZ;
+ int ret;
+ u8 seq;
+
+ if (!skb)
+ return -EINVAL;
+
+ mutex_lock(&dev->mcu.mutex);
+
+ seq = ++dev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+ if (ret)
+ goto out;
+
+ while (1) {
+ u32 *rxfce;
+ bool check_seq = false;
+
+ skb = mt76x2_mcu_get_response(dev, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev,
+ "MCU message %d (seq %d) timed out\n", cmd,
+ seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxfce = (u32 *) skb->cb;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+{
+ const struct firmware *fw = NULL;
+ struct mt76x2_patch_header *hdr;
+ bool rom_protect = !is_mt7612(dev);
+ int len, ret = 0;
+ __le32 *cur;
+ u32 patch_mask, patch_reg;
+
+ if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ goto out;
+ }
+
+ ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ ret = -EIO;
+ dev_err(dev->mt76.dev, "Failed to load firmware\n");
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *) fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = fw->size - sizeof(*hdr);
+ mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ /* Trigger ROM */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+ dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ /* release semaphore */
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x2_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x2_fw_header *hdr;
+ int i, len, ret;
+ __le32 *cur;
+ u32 offset, val;
+
+ ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto error;
+
+ hdr = (const struct mt76x2_fw_header *) fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto error;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+ cur += len / sizeof(*cur);
+ len = le32_to_cpu(hdr->dlm_len);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ offset = MT_MCU_DLM_ADDR_E3;
+ else
+ offset = MT_MCU_DLM_ADDR;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ mt76_wr_copy(dev, offset, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+ for (i = 200; i > 0; i--) {
+ val = mt76_rr(dev, MT_MCU_COM_REG0);
+
+ if (val & 1)
+ break;
+
+ msleep(10);
+ }
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ release_firmware(fw);
+ return -ETIMEDOUT;
+ }
+
+ dev_info(dev->mt76.dev, "Firmware running!\n");
+
+ release_firmware(fw);
+
+ return ret;
+
+error:
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+static int
+mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
+}
+
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
+}
+
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+
+ /* first set the channel without the extension channel info */
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+}
+
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
+}
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 param)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(param),
+ };
+ int ret;
+
+ mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+}
+
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
+}
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76pci_load_rom_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_function_select(dev, Q_SELECT, 1);
+ return 0;
+}
+
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
+{
+ struct sk_buff *skb;
+
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+ usleep_range(20000, 30000);
+
+ while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
new file mode 100644
index 000000000000..d7a7e83262ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL 0x0704
+#define MT_MCU_CLOCK_CTL 0x0708
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+#define MT_MCU_PCIE_REMAP_BASE1 0x0740
+#define MT_MCU_PCIE_REMAP_BASE2 0x0744
+#define MT_MCU_PCIE_REMAP_BASE3 0x0748
+#define MT_MCU_PCIE_REMAP_BASE4 0x074C
+
+#define MT_LED_CTRL 0x0770
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0 0x0774
+#define MT_LED_TX_BLINK_1 0x0778
+
+#define MT_LED_S0_BASE 0x077C
+#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE 0x0780
+#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+ MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+ MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+ MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_SEMAPHORE_00 0x07B0
+#define MT_MCU_SEMAPHORE_01 0x07B4
+#define MT_MCU_SEMAPHORE_02 0x07B8
+#define MT_MCU_SEMAPHORE_03 0x07BC
+
+#define MT_MCU_ROM_PATCH_OFFSET 0x80000
+#define MT_MCU_ROM_PATCH_ADDR 0x90000
+
+#define MT_MCU_ILM_OFFSET 0x80000
+#define MT_MCU_ILM_ADDR 0x80000
+
+#define MT_MCU_DLM_OFFSET 0x100000
+#define MT_MCU_DLM_ADDR 0x90000
+#define MT_MCU_DLM_ADDR_E3 0x90800
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ USB2_SW_DISCONNECT = 2,
+ USB3_SW_DISCONNECT = 3,
+ LOG_FW_DEBUG_MSG = 4,
+ GET_FW_VERSION = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibration {
+ MCU_CAL_R = 1,
+ MCU_CAL_TEMP_SENSOR,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_RC,
+ MCU_CAL_SX_LOGEN,
+ MCU_CAL_LC,
+ MCU_CAL_TX_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_TSSI,
+ MCU_CAL_TSSI_COMP,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQC_FI,
+ MCU_CAL_RXIQC_FD,
+ MCU_CAL_PWRON,
+ MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+ MT_RF_CR,
+ MT_BBP_CR,
+ MT_RF_BBP_CR,
+ MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+ u8 pa_mode;
+ u8 cal_mode;
+ u16 pad;
+
+ u8 slope0;
+ u8 slope1;
+ u8 offset0;
+ u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 param);
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
new file mode 100644
index 000000000000..e66f047ea448
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7662) },
+ { PCI_DEVICE(0x14c3, 0x7612) },
+ { PCI_DEVICE(0x14c3, 0x7602) },
+ { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mt76x2_dev *dev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dev = mt76x2_alloc_device(&pdev->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt76x2_register_device(dev);
+ if (ret)
+ goto error;
+
+ /* Fix up ASPM configuration */
+
+ /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+ mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+ /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+ mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+ /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+ return 0;
+
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76_unregister_device(mdev);
+ mt76x2_cleanup(dev);
+ ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76pci_device_table,
+ .probe = mt76pci_probe,
+ .remove = mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
new file mode 100644
index 000000000000..5b742749d5de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+static void
+mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static u8
+mt76x2_txpower_check(int value)
+{
+ if (value < 0)
+ return 0;
+ if (value > 0x2f)
+ return 0x2f;
+ return value;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ if (txp.target_power > dev->txpower_conf)
+ delta -= txp.target_power - dev->txpower_conf;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
+ txp.chain[0].delta);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+ mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
+ txp.chain[0].delta + delta));
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp.chain[0].delta + delta;
+ dev->target_power_delta[1] = txp.chain[1].delta + delta;
+ dev->rate_power = t;
+
+ txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
+ txp.chain[0].delta + delta);
+
+ txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
+ txp.chain[1].delta + delta);
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+
+static bool
+mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 flag = 0;
+
+ if (!mt76x2_tssi_enabled(dev))
+ return false;
+
+ if (mt76x2_channel_silent(dev))
+ return false;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ flag |= BIT(0);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (dev->cal.channel_cal_done)
+ return;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ if (!dev->cal.tssi_cal_done)
+ mt76x2_phy_tssi_init_cal(dev);
+
+ if (!mac_stopped)
+ mt76x2_mac_stop(dev, false);
+
+ if (is_5ghz)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+ if (!mac_stopped)
+ mt76x2_mac_resume(dev);
+
+ mt76x2_apply_gain_adj(dev);
+
+ dev->cal.channel_cal_done = true;
+}
+
+static void
+mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+
+static void
+mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+
+static void
+mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+static void
+mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x2_set_rx_chains(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_set_tx_dac(struct mt76x2_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
+{
+ dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+ dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+{
+ u32 val;
+ u8 gain_val[2];
+
+ gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+ if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1e42 << 16;
+ else
+ val = 0x1836 << 16;
+
+ val |= 0xf8;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+ mt76_wr(dev, MT_BBP(AGC, 9),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+ if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
+{
+ u32 false_cca;
+ u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+ dev->cal.agc_gain_adjust += 2;
+ else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+ dev->cal.agc_gain_adjust -= 2;
+ else
+ return;
+
+ mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
+ int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
+ int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
+ u8 *gain = dev->cal.agc_gain_init;
+ u8 gain_delta;
+ int low_gain;
+
+ dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
+ dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+ dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
+ dev->cal.avg_rssi[1]) / 512;
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+ if (dev->cal.low_gain == low_gain) {
+ mt76x2_phy_adjust_vga_gain(dev);
+ return;
+ }
+
+ dev->cal.low_gain = low_gain;
+
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+ else
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+
+ if (low_gain) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+ if (mt76x2_has_ext_lna(dev))
+ gain_delta = 10;
+ else
+ gain_delta = 14;
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+ else
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+ gain_delta = 0;
+ }
+
+ dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+ dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+ dev->cal.agc_gain_adjust = 0;
+ mt76x2_phy_set_gain_val(dev);
+}
+
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *chan = chandef->chan;
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ enum nl80211_band band = chan->band;
+ u8 channel;
+
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ int ch_group_index;
+ u8 bw, bw_index;
+ int freq, freq1;
+ int ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chan->hw_value;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, band);
+ mt76x2_configure_tx_delay(dev, band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_set_rx_chains(dev);
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x2_set_tx_dac(dev);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+ if (scan)
+ return 0;
+
+ dev->cal.low_gain = -1;
+ mt76x2_phy_channel_calibrate(dev, true);
+ mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+
+ return 0;
+}
+
+static void
+mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+{
+ struct mt76x2_temp_comp t;
+ int temp, db_diff;
+
+ if (mt76x2_get_temp_comp(dev, &t))
+ return;
+
+ temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+ temp -= t.temp_25_ref;
+ temp = (temp * 1789) / 1000 + 25;
+ dev->cal.temp = temp;
+
+ if (temp > 25)
+ db_diff = (temp - 25) / t.high_slope;
+ else
+ db_diff = (25 - temp) / t.low_slope;
+
+ db_diff = min(db_diff, t.upper_bound);
+ db_diff = max(db_diff, t.lower_bound);
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ db_diff * 2);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2_phy_channel_calibrate(dev, false);
+ mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_temp_compensate(dev);
+ mt76x2_phy_update_channel_gain(dev);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ ret = mt76x2_mcu_set_radio_state(dev, true);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
new file mode 100644
index 000000000000..ce3ab85c8b0f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_REGS_H
+#define __MT76x2_REGS_H
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG 0x110c
+#define MT_CH_TIME_CFG_TIMER_EN BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER 0x1110
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_CH_IDLE 0x1130
+#define MT_CH_BUSY 0x1134
+#define MT_EXT_CH_BUSY 0x1138
+#define MT_ED_CCA_TIMER 0x1140
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_VHT_HT_FBK_CFG1 0x1358
+
+#define MT_PROT_CFG_RATE GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL GENMASK(17, 16)
+#define MT_PROT_CFG_NAV GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH BIT(26)
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3 0x13ac
+#define MT_TX_ALC_CFG_4 0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31)
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_HT_CTRL_CFG 0x1410
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ GENMASK(27, 24)
+
+#define MT_RX_STAT_0 0x1700
+#define MT_RX_STAT_0_CRC_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_1 0x1704
+#define MT_RX_STAT_1_CCA_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_2 0x1708
+#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE 0x1c00
+#define MT_WCID_TX_RATE(_i) (MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+ u8 macaddr[6];
+ __le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+ u8 key[16];
+ u8 tx_mic[8];
+ u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x2_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
new file mode 100644
index 000000000000..a09f117848d6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x2_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
new file mode 100644
index 000000000000..4cd424148d4b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x2_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x2.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x2
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+ TP_PROTO(struct mt76x2_dev *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ ),
+ TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+ TP_PROTO(struct mt76x2_dev *dev),
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+ TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+ TP_PROTO(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat),
+
+ TP_ARGS(dev, stat),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ __field(bool, success)
+ __field(bool, aggr)
+ __field(bool, ack_req)
+ __field(u16, rate)
+ __field(u8, retry)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->success = stat->success;
+ __entry->aggr = stat->aggr;
+ __entry->ack_req = stat->ack_req;
+ __entry->wcid = stat->wcid;
+ __entry->pktid = stat->pktid;
+ __entry->rate = stat->rate;
+ __entry->retry = stat->retry;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT
+ " success:%d aggr:%d ack_req:%d"
+ " rate:%04x retry:%d",
+ DEV_PR_ARG, TXID_PR_ARG,
+ __entry->success, __entry->aggr, __entry->ack_req,
+ __entry->rate, __entry->retry
+ )
+);
+
+
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x2_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
new file mode 100644
index 000000000000..534e4bf9a34c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+struct beacon_bc_data {
+ struct mt76x2_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *) control->sta->drv_priv;
+ wcid = &msta->wcid;
+ }
+
+ if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+
+static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int qsel = MT_QSEL_EDCA;
+ int ret;
+
+ if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
+
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+
+ ret = mt76x2_insert_hdr_pad(skb);
+ if (ret < 0)
+ return ret;
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ qsel = MT_QSEL_MGMT;
+
+ *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ *tx_info |= MT_TXD_INFO_WIV;
+
+ return 0;
+}
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x2_dev *dev = data->dev;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+ struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nframes;
+
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x2_update_beacon_iter, dev);
+
+ do {
+ nframes = skb_queue_len(&data.q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x2_add_buffered_bc, &data);
+ } while (nframes != skb_queue_len(&data.q));
+
+ if (!nframes)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+ mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ }
+ spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 000000000000..ea4ab8729ae4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 000000000000..ea30895933c5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 000000000000..4eef69bd8a9e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+ dma_addr_t addr;
+ int size;
+
+ size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+ t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ t->dma_addr = addr;
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock_bh(&dev->lock);
+ if (!list_empty(&dev->txwi_cache)) {
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock_bh(&dev->lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock_bh(&dev->lock);
+ list_add(&t->list, &dev->txwi_cache);
+ spin_unlock_bh(&dev->lock);
+}
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ while ((t = __mt76_get_txwi(dev)) != NULL)
+ dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+ if (!txq->sta)
+ return MT_TXQ_BE;
+
+ return txq->ac;
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q;
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ q = &dev->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+ mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->kick(dev, q);
+
+ if (q->queued > q->ndesc - 8)
+ ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mtxq->retry_q);
+ if (skb) {
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (ps && skb_queue_empty(&mtxq->retry_q))
+ ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+ return skb;
+ }
+
+ skb = ieee80211_tx_dequeue(dev->hw, txq);
+ if (!skb)
+ return NULL;
+
+ return skb;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct sk_buff *skb, bool last)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+ if (last)
+ info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+ mt76_skb_set_moredata(skb, !last);
+ mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct sk_buff *last_skb = NULL;
+ struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ int i;
+
+ spin_lock_bh(&hwq->lock);
+ for (i = 0; tids && nframes; i++, tids >>= 1) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct sk_buff *skb;
+
+ if (!(tids & 1))
+ continue;
+
+ do {
+ skb = mt76_txq_dequeue(dev, mtxq, true);
+ if (!skb)
+ break;
+
+ if (mtxq->aggr)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ nframes--;
+ if (last_skb)
+ mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+ last_skb = skb;
+ } while (nframes);
+ }
+
+ if (last_skb) {
+ mt76_queue_ps_skb(dev, sta, last_skb, true);
+ dev->queue_ops->kick(dev, hwq);
+ }
+ spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+ struct mt76_txq *mtxq, bool *empty)
+{
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ struct mt76_wcid *wcid = mtxq->wcid;
+ struct sk_buff *skb;
+ int n_frames = 1, limit;
+ struct ieee80211_tx_rate tx_rate;
+ bool ampdu;
+ bool probe;
+ int idx;
+
+ skb = mt76_txq_dequeue(dev, mtxq, false);
+ if (!skb) {
+ *empty = true;
+ return 0;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
+ tx_rate = info->control.rates[0];
+
+ probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+ ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+ limit = ampdu ? 16 : 3;
+
+ if (ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+
+ if (idx < 0)
+ return idx;
+
+ do {
+ bool cur_ampdu;
+
+ if (probe)
+ break;
+
+ if (test_bit(MT76_SCANNING, &dev->state) ||
+ test_bit(MT76_RESET, &dev->state))
+ return -EBUSY;
+
+ skb = mt76_txq_dequeue(dev, mtxq, false);
+ if (!skb) {
+ *empty = true;
+ break;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+ if (ampdu != cur_ampdu ||
+ (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ skb_queue_tail(&mtxq->retry_q, skb);
+ break;
+ }
+
+ info->control.rates[0] = tx_rate;
+
+ if (cur_ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ if (idx < 0)
+ return idx;
+
+ n_frames++;
+ } while (n_frames < limit);
+
+ if (!probe) {
+ hwq->swq_queued++;
+ hwq->entry[idx].schedule = true;
+ }
+
+ dev->queue_ops->kick(dev, hwq);
+
+ return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+ struct mt76_txq *mtxq, *mtxq_last;
+ int len = 0;
+
+restart:
+ mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
+ while (!list_empty(&hwq->swq)) {
+ bool empty = false;
+ int cur;
+
+ mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+ if (mtxq->send_bar && mtxq->aggr) {
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct ieee80211_vif *vif = txq->vif;
+ u16 agg_ssn = mtxq->agg_ssn;
+ u8 tid = txq->tid;
+
+ mtxq->send_bar = false;
+ spin_unlock_bh(&hwq->lock);
+ ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+ spin_lock_bh(&hwq->lock);
+ goto restart;
+ }
+
+ list_del_init(&mtxq->list);
+
+ cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+ if (!empty)
+ list_add_tail(&mtxq->list, &hwq->swq);
+
+ if (cur < 0)
+ return cur;
+
+ len += cur;
+
+ if (mtxq == mtxq_last)
+ break;
+ }
+
+ return len;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+ int len;
+
+ do {
+ if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+ break;
+
+ len = mt76_txq_schedule_list(dev, hwq);
+ } while (len > 0);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+ int i;
+
+ for (i = 0; i <= MT_TXQ_BK; i++) {
+ struct mt76_queue *q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ mt76_txq_schedule(dev, q);
+ spin_unlock_bh(&q->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+ spin_lock_bh(&mtxq->hwq->lock);
+ mtxq->send_bar = mtxq->aggr && send_bar;
+ if (!list_empty(&mtxq->list))
+ list_del_init(&mtxq->list);
+ spin_unlock_bh(&mtxq->hwq->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct mt76_queue *hwq = mtxq->hwq;
+
+ spin_lock_bh(&hwq->lock);
+ if (list_empty(&mtxq->list))
+ list_add_tail(&mtxq->list, &hwq->swq);
+ mt76_txq_schedule(dev, hwq);
+ spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+ struct mt76_queue *hwq;
+ struct sk_buff *skb;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ hwq = mtxq->hwq;
+
+ spin_lock_bh(&hwq->lock);
+ if (!list_empty(&mtxq->list))
+ list_del(&mtxq->list);
+ spin_unlock_bh(&hwq->lock);
+
+ while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+ ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+ INIT_LIST_HEAD(&mtxq->list);
+ skb_queue_head_init(&mtxq->retry_q);
+
+ mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 000000000000..0c35b8db58cd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = dev->bus->rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = dev->bus->rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ usleep_range(10000, 20000);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+ int i, idx = 0, cur;
+
+ for (i = 0; i < size / BITS_PER_LONG; i++) {
+ idx = ffs(~mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ cur = i * BITS_PER_LONG + idx;
+ if (cur >= size)
+ break;
+
+ mask[i] |= BIT(idx);
+ return cur;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 000000000000..018d475504a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+ _var = (((_var) + 1) % _size)
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+ mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (enable)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 6711e7fb6926..0398bece5782 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -81,6 +81,41 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
};
static int
+qtnf_validate_iface_combinations(struct wiphy *wiphy,
+ struct qtnf_vif *change_vif,
+ enum nl80211_iftype new_type)
+{
+ struct qtnf_wmac *mac;
+ struct qtnf_vif *vif;
+ int i;
+ int ret = 0;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
+
+ mac = wiphy_priv(wiphy);
+ if (!mac)
+ return -EFAULT;
+
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ vif = &mac->iflist[i];
+ if (vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ params.iftype_num[vif->wdev.iftype]++;
+ }
+
+ if (change_vif) {
+ params.iftype_num[new_type]++;
+ params.iftype_num[change_vif->wdev.iftype]--;
+ } else {
+ params.iftype_num[new_type]++;
+ }
+
+ ret = cfg80211_check_combinations(wiphy, &params);
+
+ return ret;
+}
+
+static int
qtnf_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type,
@@ -90,6 +125,13 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
u8 *mac_addr;
int ret;
+ ret = qtnf_validate_iface_combinations(wiphy, vif, type);
+ if (ret) {
+ pr_err("VIF%u.%u combination check: failed to set type %d\n",
+ vif->mac->macid, vif->vifid, type);
+ return ret;
+ }
+
if (params)
mac_addr = params->macaddr;
else
@@ -120,10 +162,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
qtnf_scan_done(vif->mac, true);
- if (qtnf_cmd_send_del_intf(vif))
- pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
- vif->vifid);
-
/* Stop data */
netif_tx_stop_all_queues(netdev);
if (netif_carrier_ok(netdev))
@@ -132,6 +170,10 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(netdev);
+ if (qtnf_cmd_send_del_intf(vif))
+ pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
+ vif->vifid);
+
vif->netdev->ieee80211_ptr = NULL;
vif->netdev = NULL;
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
@@ -150,12 +192,20 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
u8 *mac_addr = NULL;
+ int ret;
mac = wiphy_priv(wiphy);
if (!mac)
return ERR_PTR(-EFAULT);
+ ret = qtnf_validate_iface_combinations(wiphy, NULL, type);
+ if (ret) {
+ pr_err("MAC%u invalid combination: failed to add type %d\n",
+ mac->macid, type);
+ return ERR_PTR(ret);
+ }
+
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
@@ -190,7 +240,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
goto err_mac;
}
- if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) {
+ if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
goto err_net;
@@ -381,6 +431,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
u32 short_cookie = prandom_u32();
u16 flags = 0;
+ u16 freq;
*cookie = short_cookie;
@@ -393,13 +444,21 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (params->dont_wait_for_ack)
flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
+ /* If channel is not specified, pass "freq = 0" to tell device
+ * firmware to use current channel.
+ */
+ if (params->chan)
+ freq = params->chan->center_freq;
+ else
+ freq = 0;
+
pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
- wdev->netdev->name, params->chan->center_freq,
+ wdev->netdev->name, freq,
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
params->len, short_cookie, flags);
return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
- params->chan->center_freq,
+ freq,
params->buf, params->len);
}
@@ -409,6 +468,7 @@ qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ sinfo->generation = vif->generation;
return qtnf_cmd_get_sta_info(vif, mac, sinfo);
}
@@ -430,11 +490,13 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
if (unlikely(ret == -ENOENT)) {
- qtnf_sta_list_del(&vif->sta_list, mac);
+ qtnf_sta_list_del(vif, mac);
cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
sinfo->filled = 0;
}
+ sinfo->generation = vif->generation;
+
return ret;
}
@@ -533,19 +595,13 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static void qtnf_scan_timeout(struct timer_list *t)
-{
- struct qtnf_wmac *mac = from_timer(mac, t, scan_timeout);
-
- pr_warn("mac%d scan timed out\n", mac->macid);
- qtnf_scan_done(mac, true);
-}
-
static int
qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ cancel_delayed_work_sync(&mac->scan_timeout);
+
mac->scan_req = request;
if (qtnf_cmd_send_scan(mac)) {
@@ -554,9 +610,8 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EFAULT;
}
- mac->scan_timeout.function = qtnf_scan_timeout;
- mod_timer(&mac->scan_timeout,
- jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
+ queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
+ QTNF_SCAN_TIMEOUT_SEC * HZ);
return 0;
}
@@ -617,7 +672,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
- vif->sta_state = QTNF_STA_DISCONNECTED;
return 0;
}
@@ -717,7 +771,8 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
}
if (!cfg80211_chandef_valid(chandef)) {
- pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name,
+ pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ ndev->name, chandef->chan->center_freq,
chandef->center_freq1, chandef->center_freq2,
chandef->width);
ret = -ENODATA;
@@ -750,6 +805,35 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int qtnf_start_radar_detection(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ int ret;
+
+ ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
+ if (ret)
+ pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
+
+ return ret;
+}
+
+static int qtnf_set_mac_acl(struct wiphy *wiphy,
+ struct net_device *dev,
+ const struct cfg80211_acl_data *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_set_mac_acl(vif, params);
+ if (ret)
+ pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret);
+
+ return ret;
+}
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -773,7 +857,9 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.disconnect = qtnf_disconnect,
.dump_survey = qtnf_dump_survey,
.get_channel = qtnf_get_channel,
- .channel_switch = qtnf_channel_switch
+ .channel_switch = qtnf_channel_switch,
+ .start_radar_detection = qtnf_start_radar_detection,
+ .set_mac_acl = qtnf_set_mac_acl,
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -802,6 +888,9 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
continue;
mac = bus->mac[mac_idx];
+ if (!mac)
+ continue;
+
wiphy = priv_to_wiphy(mac);
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
@@ -829,29 +918,29 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
return wiphy;
}
-static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
- struct ieee80211_iface_combination *if_comb,
- const struct qtnf_mac_info *mac_info)
+static int
+qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info)
{
- size_t max_interfaces = 0;
+ struct ieee80211_iface_combination *if_comb;
+ size_t n_if_comb;
u16 interface_modes = 0;
- size_t i;
+ size_t i, j;
+
+ if_comb = mac_info->if_comb;
+ n_if_comb = mac_info->n_if_comb;
- if (unlikely(!mac_info->limits || !mac_info->n_limits))
+ if (!if_comb || !n_if_comb)
return -ENOENT;
- if_comb->limits = mac_info->limits;
- if_comb->n_limits = mac_info->n_limits;
+ for (i = 0; i < n_if_comb; i++) {
+ if_comb[i].radar_detect_widths = mac_info->radar_detect_widths;
- for (i = 0; i < mac_info->n_limits; i++) {
- max_interfaces += mac_info->limits[i].max;
- interface_modes |= mac_info->limits[i].types;
+ for (j = 0; j < if_comb[i].n_limits; j++)
+ interface_modes |= if_comb[i].limits[j].types;
}
- if_comb->num_different_channels = 1;
- if_comb->beacon_int_infra_match = true;
- if_comb->max_interfaces = max_interfaces;
- if_comb->radar_detect_widths = mac_info->radar_detect_widths;
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = n_if_comb;
wiphy->interface_modes = interface_modes;
return 0;
@@ -860,7 +949,6 @@ static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
- struct ieee80211_iface_combination *iface_comb = NULL;
int ret;
if (!wiphy) {
@@ -868,14 +956,6 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
return -EFAULT;
}
- iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL);
- if (!iface_comb)
- return -ENOMEM;
-
- ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo);
- if (ret)
- goto out;
-
wiphy->frag_threshold = mac->macinfo.frag_thr;
wiphy->rts_threshold = mac->macinfo.rts_thr;
wiphy->retry_short = mac->macinfo.sretry_limit;
@@ -886,11 +966,13 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
-
- wiphy->iface_combinations = iface_comb;
- wiphy->n_iface_combinations = 1;
+ wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs;
wiphy->max_num_csa_counters = 2;
+ ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo);
+ if (ret)
+ goto out;
+
/* Initialize cipher suits */
wiphy->cipher_suites = qtnf_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites);
@@ -924,6 +1006,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
+ strlcpy(wiphy->fw_version, hw_info->fw_version,
+ sizeof(wiphy->fw_version));
+ wiphy->hw_version = hw_info->hw_version;
+
ret = wiphy_register(wiphy);
if (ret < 0)
goto out;
@@ -935,12 +1021,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
out:
- if (ret) {
- kfree(iface_comb);
- return ret;
- }
-
- return 0;
+ return ret;
}
void qtnf_netdev_updown(struct net_device *ndev, bool up)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 66db26613b1f..b73425122a10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -28,23 +28,4 @@ void qtnf_band_init_rates(struct ieee80211_supported_band *band);
void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
struct ieee80211_supported_band *band);
-static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
-{
- struct cfg80211_scan_info info = {
- .aborted = aborted,
- };
-
- if (timer_pending(&mac->scan_timeout))
- del_timer_sync(&mac->scan_timeout);
-
- mutex_lock(&mac->mac_lock);
-
- if (mac->scan_req) {
- cfg80211_scan_done(mac->scan_req, &info);
- mac->scan_req = NULL;
- }
-
- mutex_unlock(&mac->mac_lock);
-}
-
#endif /* _QTN_FMAC_CFG80211_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 8bc8dd637315..deca0060eb27 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -162,6 +162,14 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
memcpy(tlv->ie_data, buf, len);
}
+static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
+{
+ size_t size = sizeof(struct qlink_acl_data) +
+ acl->n_acl_entries * sizeof(struct qlink_mac_address);
+
+ return size;
+}
+
static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -178,6 +186,10 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
if (cfg80211_chandef_valid(&s->chandef))
len += sizeof(struct qlink_tlv_chandef);
+ if (s->acl)
+ len += sizeof(struct qlink_tlv_hdr) +
+ qtnf_cmd_acl_data_size(s->acl);
+
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
vif->mac->macid, vif->vifid, len);
@@ -203,7 +215,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_START_AP,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_start_ap *)cmd_skb->data;
@@ -247,7 +259,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF);
chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) -
sizeof(chtlv->hdr));
- qlink_chandef_cfg2q(&s->chandef, &chtlv->chan);
+ qlink_chandef_cfg2q(&s->chandef, &chtlv->chdef);
}
qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD,
@@ -283,6 +295,16 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
}
+ if (s->acl) {
+ size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+ struct qlink_tlv_hdr *tlv =
+ skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+
+ tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+ tlv->len = cpu_to_le16(acl_size);
+ qlink_acl_data_cfg2q(s->acl, (struct qlink_acl_data *)tlv->val);
+ }
+
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -313,7 +335,7 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_STOP_AP,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -347,7 +369,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_REGISTER_MGMT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -390,7 +412,7 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SEND_MGMT_FRAME,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -436,7 +458,7 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_MGMT_SET_APPIE,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
@@ -461,30 +483,8 @@ out:
}
static void
-qtnf_sta_info_parse_basic_counters(struct station_info *sinfo,
- const struct qlink_sta_stat_basic_counters *counters)
-{
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES);
- sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes);
- sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_BEACON_RX);
- sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets);
- sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets);
- sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_FAILED);
- sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped);
- sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed);
-}
-
-static void
qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
- const struct qlink_sta_info_rate *rate_src)
+ const struct qlink_sta_info_rate *rate_src)
{
rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10;
@@ -493,22 +493,23 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags = 0;
switch (rate_src->bw) {
- case QLINK_STA_INFO_RATE_BW_5:
+ case QLINK_CHAN_WIDTH_5:
rate_dst->bw = RATE_INFO_BW_5;
break;
- case QLINK_STA_INFO_RATE_BW_10:
+ case QLINK_CHAN_WIDTH_10:
rate_dst->bw = RATE_INFO_BW_10;
break;
- case QLINK_STA_INFO_RATE_BW_20:
+ case QLINK_CHAN_WIDTH_20:
+ case QLINK_CHAN_WIDTH_20_NOHT:
rate_dst->bw = RATE_INFO_BW_20;
break;
- case QLINK_STA_INFO_RATE_BW_40:
+ case QLINK_CHAN_WIDTH_40:
rate_dst->bw = RATE_INFO_BW_40;
break;
- case QLINK_STA_INFO_RATE_BW_80:
+ case QLINK_CHAN_WIDTH_80:
rate_dst->bw = RATE_INFO_BW_80;
break;
- case QLINK_STA_INFO_RATE_BW_160:
+ case QLINK_CHAN_WIDTH_160:
rate_dst->bw = RATE_INFO_BW_160;
break;
default:
@@ -578,87 +579,125 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
}
static void
-qtnf_sta_info_parse_generic_info(struct station_info *sinfo,
- const struct qlink_sta_info_generic *info)
+qtnf_cmd_sta_info_parse(struct station_info *sinfo,
+ const struct qlink_tlv_hdr *tlv,
+ size_t resp_size)
{
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) |
- BIT(NL80211_STA_INFO_INACTIVE_TIME);
- sinfo->connected_time = get_unaligned_le32(&info->connected_time);
- sinfo->inactive_time = get_unaligned_le32(&info->inactive_time);
+ const struct qlink_sta_stats *stats = NULL;
+ const u8 *map = NULL;
+ unsigned int map_len = 0;
+ unsigned int stats_len = 0;
+ u16 tlv_len;
+
+#define qtnf_sta_stat_avail(stat_name, bitn) \
+ (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+ (offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
+
+ while (resp_size >= sizeof(*tlv)) {
+ tlv_len = le16_to_cpu(tlv->len);
+
+ switch (le16_to_cpu(tlv->type)) {
+ case QTN_TLV_ID_STA_STATS_MAP:
+ map_len = tlv_len;
+ map = tlv->val;
+ break;
+ case QTN_TLV_ID_STA_STATS:
+ stats_len = tlv_len;
+ stats = (const struct qlink_sta_stats *)tlv->val;
+ break;
+ default:
+ break;
+ }
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
- BIT(NL80211_STA_INFO_SIGNAL_AVG);
- sinfo->signal = info->rssi - 120;
- sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET;
+ resp_size -= tlv_len + sizeof(*tlv);
+ tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+ }
- if (info->rx_rate.rate) {
+ if (!map || !stats)
+ return;
+
+ if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
+ }
+
+ if (qtnf_sta_stat_avail(connected_time,
+ QLINK_STA_INFO_CONNECTED_TIME)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->connected_time = le32_to_cpu(stats->connected_time);
+ }
+
+ if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
+ }
+
+ if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
+ }
+
+ if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
- qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate);
+ qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
}
- if (info->tx_rate.rate) {
+ if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
- qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate);
+ qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
}
- sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
- qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state);
-}
+ if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
+ }
-static int qtnf_cmd_sta_info_parse(struct station_info *sinfo,
- const u8 *payload, size_t payload_size)
-{
- const struct qlink_sta_stat_basic_counters *counters;
- const struct qlink_sta_info_generic *sta_info;
- u16 tlv_type;
- u16 tlv_value_len;
- size_t tlv_full_len;
- const struct qlink_tlv_hdr *tlv;
+ if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ }
- sinfo->filled = 0;
+ if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ }
- tlv = (const struct qlink_tlv_hdr *)payload;
- while (payload_size >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_size) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
- switch (tlv_type) {
- case QTN_TLV_ID_STA_BASIC_COUNTERS:
- if (unlikely(tlv_value_len < sizeof(*counters))) {
- pr_err("invalid TLV size %.4X: %u\n",
- tlv_type, tlv_value_len);
- break;
- }
+ if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ }
- counters = (void *)tlv->val;
- qtnf_sta_info_parse_basic_counters(sinfo, counters);
- break;
- case QTN_TLV_ID_STA_GENERIC_INFO:
- if (unlikely(tlv_value_len < sizeof(*sta_info)))
- break;
+ if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ }
- sta_info = (void *)tlv->val;
- qtnf_sta_info_parse_generic_info(sinfo, sta_info);
- break;
- default:
- pr_warn("unexpected TLV type: %.4X\n", tlv_type);
- break;
- }
- payload_size -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
}
- if (payload_size) {
- pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size);
- return -EINVAL;
+ if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
}
- return 0;
+ if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
+ }
+
+ if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
+ }
+
+ if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
+ }
+
+#undef qtnf_sta_stat_avail
}
int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -674,8 +713,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_GET_STA_INFO,
sizeof(*cmd));
-
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -715,7 +753,9 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
goto out;
}
- ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
+ qtnf_cmd_sta_info_parse(sinfo,
+ (const struct qlink_tlv_hdr *)resp->info,
+ var_resp_len);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -738,7 +778,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
cmd_type,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -811,7 +851,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_INTF,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -908,6 +948,16 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
struct qtnf_hw_info *hwinfo = &bus->hw_info;
const struct qlink_tlv_hdr *tlv;
const struct qlink_tlv_reg_rule *tlv_rule;
+ const char *bld_name = NULL;
+ const char *bld_rev = NULL;
+ const char *bld_type = NULL;
+ const char *bld_label = NULL;
+ u32 bld_tmstamp = 0;
+ u32 plat_id = 0;
+ const char *hw_id = NULL;
+ const char *calibration_ver = NULL;
+ const char *uboot_ver = NULL;
+ u32 hw_ver = 0;
struct ieee80211_reg_rule *rule;
u16 tlv_type;
u16 tlv_value_len;
@@ -934,6 +984,10 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->rd->alpha2[0] = resp->alpha2[0];
hwinfo->rd->alpha2[1] = resp->alpha2[1];
+ bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
+ plat_id = le32_to_cpu(resp->plat_id);
+ hw_ver = le32_to_cpu(resp->hw_ver);
+
switch (resp->dfs_region) {
case QLINK_DFS_FCC:
hwinfo->rd->dfs_region = NL80211_DFS_FCC;
@@ -994,6 +1048,27 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
le32_to_cpu(tlv_rule->flags));
break;
+ case QTN_TLV_ID_BUILD_NAME:
+ bld_name = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_REV:
+ bld_rev = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_TYPE:
+ bld_type = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_LABEL:
+ bld_label = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_HW_ID:
+ hw_id = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_CALIBRATION_VER:
+ calibration_ver = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_UBOOT_VER:
+ uboot_ver = (const void *)tlv->val;
+ break;
default:
break;
}
@@ -1016,21 +1091,46 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_tx_chain, hwinfo->total_rx_chain,
hwinfo->hw_capab);
+ pr_info("\nBuild name: %s" \
+ "\nBuild revision: %s" \
+ "\nBuild type: %s" \
+ "\nBuild label: %s" \
+ "\nBuild timestamp: %lu" \
+ "\nPlatform ID: %lu" \
+ "\nHardware ID: %s" \
+ "\nCalibration version: %s" \
+ "\nU-Boot version: %s" \
+ "\nHardware version: 0x%08x",
+ bld_name, bld_rev, bld_type, bld_label,
+ (unsigned long)bld_tmstamp,
+ (unsigned long)plat_id,
+ hw_id, calibration_ver, uboot_ver, hw_ver);
+
+ strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+ hwinfo->hw_version = hw_ver;
+
return 0;
}
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
- struct ieee80211_iface_limit *limits = NULL;
- const struct qlink_iface_limit *limit_record;
- size_t record_count = 0, rec = 0;
- u16 tlv_type, tlv_value_len;
- struct qlink_iface_comb_num *comb;
+ struct ieee80211_iface_combination *comb = NULL;
+ size_t n_comb = 0;
+ struct ieee80211_iface_limit *limits;
+ const struct qlink_iface_comb_num *comb_num;
+ const struct qlink_iface_limit_record *rec;
+ const struct qlink_iface_limit *lim;
+ u16 rec_len;
+ u16 tlv_type;
+ u16 tlv_value_len;
size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
-
- mac->macinfo.n_limits = 0;
+ u8 *ext_capa = NULL;
+ u8 *ext_capa_mask = NULL;
+ u8 ext_capa_len = 0;
+ u8 ext_capa_mask_len = 0;
+ int i = 0;
tlv = (const struct qlink_tlv_hdr *)tlv_buf;
while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
@@ -1045,52 +1145,89 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
switch (tlv_type) {
case QTN_TLV_ID_NUM_IFACE_COMB:
- if (unlikely(tlv_value_len != sizeof(*comb)))
+ if (tlv_value_len != sizeof(*comb_num))
return -EINVAL;
- comb = (void *)tlv->val;
- record_count = le16_to_cpu(comb->iface_comb_num);
+ comb_num = (void *)tlv->val;
+
+ /* free earlier iface comb memory */
+ qtnf_mac_iface_comb_free(mac);
- mac->macinfo.n_limits = record_count;
- /* free earlier iface limits memory */
- kfree(mac->macinfo.limits);
- mac->macinfo.limits =
- kzalloc(sizeof(*mac->macinfo.limits) *
- record_count, GFP_KERNEL);
+ mac->macinfo.n_if_comb =
+ le32_to_cpu(comb_num->iface_comb_num);
- if (unlikely(!mac->macinfo.limits))
+ mac->macinfo.if_comb =
+ kcalloc(mac->macinfo.n_if_comb,
+ sizeof(*mac->macinfo.if_comb),
+ GFP_KERNEL);
+
+ if (!mac->macinfo.if_comb)
return -ENOMEM;
- limits = mac->macinfo.limits;
+ comb = mac->macinfo.if_comb;
+
+ pr_debug("MAC%u: %zu iface combinations\n",
+ mac->macid, mac->macinfo.n_if_comb);
+
break;
case QTN_TLV_ID_IFACE_LIMIT:
- if (unlikely(!limits)) {
- pr_warn("MAC%u: limits are not inited\n",
+ if (unlikely(!comb)) {
+ pr_warn("MAC%u: no combinations advertised\n",
mac->macid);
return -EINVAL;
}
- if (unlikely(tlv_value_len != sizeof(*limit_record))) {
- pr_warn("MAC%u: record size mismatch\n",
+ if (n_comb >= mac->macinfo.n_if_comb) {
+ pr_warn("MAC%u: combinations count exceeded\n",
mac->macid);
- return -EINVAL;
+ n_comb++;
+ break;
}
- limit_record = (void *)tlv->val;
- limits[rec].max = le16_to_cpu(limit_record->max_num);
- limits[rec].types = qlink_iface_type_to_nl_mask(
- le16_to_cpu(limit_record->type));
+ rec = (void *)tlv->val;
+ rec_len = sizeof(*rec) + rec->n_limits * sizeof(*lim);
+
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: record %zu size mismatch\n",
+ mac->macid, n_comb);
+ return -EINVAL;
+ }
- /* supported modes: STA, AP */
- limits[rec].types &= BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_AP_VLAN) |
- BIT(NL80211_IFTYPE_STATION);
+ limits = kzalloc(sizeof(*limits) * rec->n_limits,
+ GFP_KERNEL);
+ if (!limits)
+ return -ENOMEM;
- pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid,
- limits[rec].max, limits[rec].types);
+ comb[n_comb].num_different_channels =
+ rec->num_different_channels;
+ comb[n_comb].max_interfaces =
+ le16_to_cpu(rec->max_interfaces);
+ comb[n_comb].n_limits = rec->n_limits;
+ comb[n_comb].limits = limits;
+
+ for (i = 0; i < rec->n_limits; i++) {
+ lim = &rec->limits[i];
+ limits[i].max = le16_to_cpu(lim->max_num);
+ limits[i].types =
+ qlink_iface_type_to_nl_mask(le16_to_cpu(lim->type));
+ pr_debug("MAC%u: comb[%zu]: MAX:%u TYPES:%.4X\n",
+ mac->macid, n_comb,
+ limits[i].max, limits[i].types);
+ }
- if (limits[rec].types)
- rec++;
+ n_comb++;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ if (unlikely(tlv_value_len > U8_MAX))
+ return -EINVAL;
+ ext_capa = (u8 *)tlv->val;
+ ext_capa_len = tlv_value_len;
+ break;
+ case QTN_TLV_ID_EXT_CAPABILITY_MASK:
+ if (unlikely(tlv_value_len > U8_MAX))
+ return -EINVAL;
+ ext_capa_mask = (u8 *)tlv->val;
+ ext_capa_mask_len = tlv_value_len;
break;
default:
break;
@@ -1106,12 +1243,40 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return -EINVAL;
}
- if (mac->macinfo.n_limits != rec) {
+ if (mac->macinfo.n_if_comb != n_comb) {
pr_err("MAC%u: combination mismatch: reported=%zu parsed=%zu\n",
- mac->macid, mac->macinfo.n_limits, rec);
+ mac->macid, mac->macinfo.n_if_comb, n_comb);
+ return -EINVAL;
+ }
+
+ if (ext_capa_len != ext_capa_mask_len) {
+ pr_err("MAC%u: ext_capa/_mask lengths mismatch: %u != %u\n",
+ mac->macid, ext_capa_len, ext_capa_mask_len);
return -EINVAL;
}
+ if (ext_capa_len > 0) {
+ ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
+ if (!ext_capa)
+ return -ENOMEM;
+
+ ext_capa_mask =
+ kmemdup(ext_capa_mask, ext_capa_mask_len, GFP_KERNEL);
+ if (!ext_capa_mask) {
+ kfree(ext_capa);
+ return -ENOMEM;
+ }
+ } else {
+ ext_capa = NULL;
+ ext_capa_mask = NULL;
+ }
+
+ kfree(mac->macinfo.extended_capabilities);
+ kfree(mac->macinfo.extended_capabilities_mask);
+ mac->macinfo.extended_capabilities = ext_capa;
+ mac->macinfo.extended_capabilities_mask = ext_capa_mask;
+ mac->macinfo.extended_capabilities_len = ext_capa_len;
+
return 0;
}
@@ -1143,6 +1308,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info->radar_detect_widths =
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
+ mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
sizeof(mac_info->ht_cap_mod_mask));
@@ -1186,7 +1352,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
size_t tlv_len;
size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
- const struct qlink_tlv_channel *qchan;
+ const struct qlink_channel *qchan;
struct ieee80211_channel *chan;
unsigned int chidx = 0;
u32 qflags;
@@ -1232,7 +1398,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
switch (tlv_type) {
case QTN_TLV_ID_CHANNEL:
- if (unlikely(tlv_len != sizeof(*qchan))) {
+ if (unlikely(tlv_dlen != sizeof(*qchan))) {
pr_err("invalid channel TLV len %zu\n",
tlv_len);
goto error_ret;
@@ -1243,7 +1409,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
goto error_ret;
}
- qchan = (const struct qlink_tlv_channel *)tlv;
+ qchan = (const struct qlink_channel *)tlv->val;
chan = &band->channels[chidx++];
qflags = le32_to_cpu(qchan->flags);
@@ -1490,7 +1656,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_MAC_INFO,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -1528,7 +1694,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_GET_HW_INFO,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -1708,7 +1874,7 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_FW_INIT,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -1757,7 +1923,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_ADD_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1810,7 +1976,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1851,7 +2017,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SET_DEFAULT_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1886,7 +2052,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SET_DEFAULT_MGMT_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1941,28 +2107,24 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CHANGE_STA,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
cmd = (struct qlink_cmd_change_sta *)cmd_skb->data;
ether_addr_copy(cmd->sta_addr, mac);
+ cmd->flag_update.mask =
+ cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_mask));
+ cmd->flag_update.value =
+ cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_set));
switch (vif->wdev.iftype) {
case NL80211_IFTYPE_AP:
cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP);
- cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_mask));
- cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_set));
break;
case NL80211_IFTYPE_STATION:
cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
- cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_mask));
- cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_set));
break;
default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
@@ -1997,7 +2159,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_STA,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -2037,8 +2199,8 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
- qchan->center_freq = cpu_to_le16(sc->center_freq);
- qchan->hw_value = cpu_to_le16(sc->hw_value);
+ qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
+ qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
if (sc->flags & IEEE80211_CHAN_NO_IR)
flags |= QLINK_CHAN_NO_IR;
@@ -2046,7 +2208,7 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
if (sc->flags & IEEE80211_CHAN_RADAR)
flags |= QLINK_CHAN_RADAR;
- qchan->flags = cpu_to_le32(flags);
+ qchan->chan.flags = cpu_to_le32(flags);
}
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
@@ -2067,7 +2229,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -2137,7 +2299,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CONNECT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_connect *)cmd_skb->data;
@@ -2239,7 +2401,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DISCONNECT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -2273,7 +2435,7 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_UPDOWN_INTF,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_updown *)cmd_skb->data;
@@ -2434,8 +2596,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
sizeof(*cmd));
-
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -2487,7 +2648,7 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CHAN_GET,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -2512,3 +2673,83 @@ out:
consume_skb(resp_skb);
return ret;
}
+
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+ const struct cfg80211_chan_def *chdef,
+ u32 cac_time_ms)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_start_cac *cmd;
+ int ret;
+ u16 res_code;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_START_CAC,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_start_cac *)cmd_skb->data;
+ cmd->cac_time_ms = cpu_to_le32(cac_time_ms);
+ qlink_chandef_cfg2q(chdef, &cmd->chan);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ qtnf_bus_unlock(bus);
+
+ if (ret)
+ return ret;
+
+ switch (res_code) {
+ case QLINK_CMD_RESULT_OK:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+ const struct cfg80211_acl_data *params)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_tlv_hdr *tlv;
+ size_t acl_size = qtnf_cmd_acl_data_size(params);
+ u16 res_code;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_SET_MAC_ACL,
+ sizeof(struct qlink_cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+ tlv->len = cpu_to_le16(acl_size);
+ qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ qtnf_bus_unlock(bus);
+
+ if (unlikely(ret))
+ return ret;
+
+ switch (res_code) {
+ case QLINK_CMD_RESULT_OK:
+ break;
+ case QLINK_CMD_RESULT_INVALID:
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index d981a76e5835..69a7d56f7e58 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+ const struct cfg80211_chan_def *chdef,
+ u32 cac_time_ms);
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+ const struct cfg80211_acl_data *params);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 3423dc51198b..cf26c15a84f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -119,9 +119,38 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Netdev handler for getting stats.
*/
-static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev)
+static void qtnf_netdev_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
{
- return &dev->stats;
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ unsigned int start;
+ int cpu;
+
+ netdev_stats_to_stats64(stats, &ndev->stats);
+
+ if (!vif->stats64)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats64;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ stats64 = per_cpu_ptr(vif->stats64, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats64->syncp);
+ rx_packets = stats64->rx_packets;
+ rx_bytes = stats64->rx_bytes;
+ tx_packets = stats64->tx_packets;
+ tx_bytes = stats64->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
}
/* Netdev handler for transmission timeout.
@@ -156,7 +185,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats = qtnf_netdev_get_stats,
+ .ndo_get_stats64 = qtnf_netdev_get_stats64,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -233,6 +262,23 @@ struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac)
return vif;
}
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
+{
+ struct ieee80211_iface_combination *comb;
+ int i;
+
+ if (mac->macinfo.if_comb) {
+ for (i = 0; i < mac->macinfo.n_if_comb; i++) {
+ comb = &mac->macinfo.if_comb[i];
+ kfree(comb->limits);
+ comb->limits = NULL;
+ }
+
+ kfree(mac->macinfo.if_comb);
+ mac->macinfo.if_comb = NULL;
+ }
+}
+
static void qtnf_vif_reset_handler(struct work_struct *work)
{
struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -258,13 +304,44 @@ static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac)
{
struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
- vif->wdev.iftype = NL80211_IFTYPE_AP;
+ vif->wdev.iftype = NL80211_IFTYPE_STATION;
vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
vif->wdev.wiphy = priv_to_wiphy(mac);
INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler);
vif->cons_tx_timeout_cnt = 0;
}
+static void qtnf_mac_scan_finish(struct qtnf_wmac *mac, bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ mutex_lock(&mac->mac_lock);
+
+ if (mac->scan_req) {
+ cfg80211_scan_done(mac->scan_req, &info);
+ mac->scan_req = NULL;
+ }
+
+ mutex_unlock(&mac->mac_lock);
+}
+
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
+{
+ cancel_delayed_work_sync(&mac->scan_timeout);
+ qtnf_mac_scan_finish(mac, aborted);
+}
+
+static void qtnf_mac_scan_timeout(struct work_struct *work)
+{
+ struct qtnf_wmac *mac =
+ container_of(work, struct qtnf_wmac, scan_timeout.work);
+
+ pr_warn("MAC%d: scan timed out\n", mac->macid);
+ qtnf_mac_scan_finish(mac, true);
+}
+
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
@@ -288,7 +365,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- timer_setup(&mac->scan_timeout, NULL, 0);
+ INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
+ mac->iflist[i].stats64 =
+ netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!mac->iflist[i].stats64)
+ pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
+ macid, i);
}
qtnf_mac_init_primary_intf(mac);
@@ -297,9 +379,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
+static const struct ethtool_ops qtnf_ethtool_ops = {
+ .get_drvinfo = cfg80211_get_drvinfo,
+};
+
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
- const char *name, unsigned char name_assign_type,
- enum nl80211_iftype iftype)
+ const char *name, unsigned char name_assign_type)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct net_device *dev;
@@ -320,12 +405,12 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->needs_free_netdev = true;
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &vif->wdev;
- dev->ieee80211_ptr->iftype = iftype;
ether_addr_copy(dev->dev_addr, vif->mac_addr);
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
+ dev->ethtool_ops = &qtnf_ethtool_ops;
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
@@ -366,6 +451,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
rtnl_unlock();
qtnf_sta_list_free(&vif->sta_list);
+ free_percpu(vif->stats64);
}
if (mac->wiphy_registered)
@@ -382,8 +468,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
wiphy->bands[band] = NULL;
}
- kfree(mac->macinfo.limits);
- kfree(wiphy->iface_combinations);
+ qtnf_mac_iface_comb_free(mac);
+ kfree(mac->macinfo.extended_capabilities);
+ kfree(mac->macinfo.extended_capabilities_mask);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
@@ -418,7 +505,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
if (ret) {
pr_err("MAC%u: failed to add VIF\n", macid);
goto error;
@@ -446,8 +533,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
rtnl_lock();
- ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM,
- NL80211_IFTYPE_AP);
+ ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
rtnl_unlock();
if (ret) {
@@ -644,6 +730,46 @@ void qtnf_wake_all_queues(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct pcpu_sw_netstats *stats64;
+
+ if (unlikely(!vif || !vif->stats64)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ return;
+ }
+
+ stats64 = this_cpu_ptr(vif->stats64);
+
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->rx_packets++;
+ stats64->rx_bytes += skb->len;
+ u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_rx_stats);
+
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct pcpu_sw_netstats *stats64;
+
+ if (unlikely(!vif || !vif->stats64)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ return;
+ }
+
+ stats64 = this_cpu_ptr(vif->stats64);
+
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->tx_packets++;
+ stats64->tx_bytes += skb->len;
+ u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
+
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 1b7bc0318f3e..3b884c80b6ab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -88,6 +88,9 @@ struct qtnf_vif {
struct work_struct reset_work;
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
+ int generation;
+
+ struct pcpu_sw_netstats __percpu *stats64;
};
struct qtnf_mac_info {
@@ -102,10 +105,14 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
+ u32 max_acl_mac_addrs;
struct ieee80211_ht_cap ht_cap_mod_mask;
struct ieee80211_vht_cap vht_cap_mod_mask;
- struct ieee80211_iface_limit *limits;
- size_t n_limits;
+ struct ieee80211_iface_combination *if_comb;
+ size_t n_if_comb;
+ u8 *extended_capabilities;
+ u8 *extended_capabilities_mask;
+ u8 extended_capabilities_len;
};
struct qtnf_chan_stats {
@@ -126,7 +133,7 @@ struct qtnf_wmac {
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
struct mutex mac_lock; /* lock during wmac speicific ops */
- struct timer_list scan_timeout;
+ struct delayed_work scan_timeout;
};
struct qtnf_hw_info {
@@ -138,14 +145,16 @@ struct qtnf_hw_info {
struct ieee80211_regdomain *rd;
u8 total_tx_chain;
u8 total_rx_chain;
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ u32 hw_version;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
- const char *name, unsigned char name_assign_type,
- enum nl80211_iftype iftype);
+ const char *name, unsigned char name_assign_type);
void qtnf_main_work_queue(struct work_struct *work);
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
@@ -153,9 +162,13 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
void qtnf_wake_all_queues(struct net_device *ndev);
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
+
void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 4abc6d9ed560..bcd415f96412 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -59,10 +59,11 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr,
frame_control);
- qtnf_sta_list_add(&vif->sta_list, sta_addr);
+ qtnf_sta_list_add(vif, sta_addr);
sinfo.assoc_req_ies = NULL;
sinfo.assoc_req_ies_len = 0;
+ sinfo.generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
@@ -132,7 +133,7 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid,
sta_addr, reason);
- if (qtnf_sta_list_del(&vif->sta_list, sta_addr))
+ if (qtnf_sta_list_del(vif, sta_addr))
cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr,
GFP_KERNEL);
@@ -237,9 +238,8 @@ qtnf_event_handle_mgmt_received(struct qtnf_vif *vif,
pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len,
le16_to_cpu(frame->frame_control), frame->addr2);
- cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq),
- le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data,
- frame_len, flags);
+ cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), rxmgmt->sig_dbm,
+ rxmgmt->frame_data, frame_len, flags);
return 0;
}
@@ -324,7 +324,7 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
sr->bssid, get_unaligned_le64(&sr->tsf),
le16_to_cpu(sr->capab),
le16_to_cpu(sr->bintval), ies, ies_len,
- sr->signal, GFP_KERNEL);
+ DBM_TO_MBM(sr->sig_dbm), GFP_KERNEL);
if (!bss)
return -ENOMEM;
@@ -369,7 +369,8 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
qlink_chandef_q2cfg(wiphy, &data->chan, &chandef);
if (!cfg80211_chandef_valid(&chandef)) {
- pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid,
+ pr_err("MAC%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ mac->macid, chandef.chan->center_freq,
chandef.center_freq1, chandef.center_freq2,
chandef.width);
return -EINVAL;
@@ -394,6 +395,63 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
return 0;
}
+static int qtnf_event_handle_radar(struct qtnf_vif *vif,
+ const struct qlink_event_radar *ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ struct cfg80211_chan_def chandef;
+
+ if (len < sizeof(*ev)) {
+ pr_err("MAC%u: payload is too short\n", vif->mac->macid);
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ qlink_chandef_q2cfg(wiphy, &ev->chan, &chandef);
+
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n",
+ vif->mac->macid,
+ chandef.center_freq1, chandef.center_freq2,
+ chandef.width);
+ return -EINVAL;
+ }
+
+ pr_info("%s: radar event=%u f1=%u f2=%u bw=%u\n",
+ vif->netdev->name, ev->event,
+ chandef.center_freq1, chandef.center_freq2,
+ chandef.width);
+
+ switch (ev->event) {
+ case QLINK_RADAR_DETECTED:
+ cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
+ break;
+ case QLINK_RADAR_CAC_FINISHED:
+ if (!vif->wdev.cac_started)
+ break;
+
+ cfg80211_cac_event(vif->netdev, &chandef,
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ break;
+ case QLINK_RADAR_CAC_ABORTED:
+ if (!vif->wdev.cac_started)
+ break;
+
+ cfg80211_cac_event(vif->netdev, &chandef,
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ break;
+ default:
+ pr_warn("%s: unhandled radar event %u\n",
+ vif->netdev->name, ev->event);
+ break;
+ }
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -448,6 +506,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_freq_change(mac, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_RADAR:
+ ret = qtnf_event_handle_radar(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
@@ -479,9 +541,9 @@ static int qtnf_event_process_skb(struct qtnf_bus *bus,
if (unlikely(!mac))
return -ENXIO;
- qtnf_bus_lock(bus);
+ rtnl_lock();
res = qtnf_event_parse(mac, skb);
- qtnf_bus_unlock(bus);
+ rtnl_unlock();
return res;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 7e487622d87d..6f6190964320 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -615,8 +615,7 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
PCI_DMA_TODEVICE);
if (skb->dev) {
- skb->dev->stats.tx_packets++;
- skb->dev->stats.tx_bytes += skb->len;
+ qtnf_update_tx_stats(skb->dev, skb);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -855,9 +854,7 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
+ qtnf_update_rx_stats(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(napi, skb);
} else {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index a432fb001c41..9bf3ae4d1b3b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -19,7 +19,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 6
+#define QLINK_PROTO_VER 11
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -122,17 +122,49 @@ enum qlink_channel_width {
};
/**
+ * struct qlink_channel - qlink control channel definition
+ *
+ * @hw_value: hardware-specific value for the channel
+ * @center_freq: center frequency in MHz
+ * @flags: channel flags from &enum qlink_channel_flags
+ * @band: band this channel belongs to
+ * @max_antenna_gain: maximum antenna gain in dBi
+ * @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
+ * @dfs_state: current state of this channel.
+ * Only relevant if radar is required on this channel.
+ * @beacon_found: helper to regulatory code to indicate when a beacon
+ * has been found on this channel. Use regulatory_hint_found_beacon()
+ * to enable this, this is useful only on 5 GHz band.
+ */
+struct qlink_channel {
+ __le16 hw_value;
+ __le16 center_freq;
+ __le32 flags;
+ u8 band;
+ u8 max_antenna_gain;
+ u8 max_power;
+ u8 max_reg_power;
+ __le32 dfs_cac_ms;
+ u8 dfs_state;
+ u8 beacon_found;
+ u8 rsvd[2];
+} __packed;
+
+/**
* struct qlink_chandef - qlink channel definition
*
+ * @chan: primary channel definition
* @center_freq1: center frequency of first segment
* @center_freq2: center frequency of second segment (80+80 only)
* @width: channel width, one of @enum qlink_channel_width
*/
struct qlink_chandef {
+ struct qlink_channel chan;
__le16 center_freq1;
__le16 center_freq2;
u8 width;
- u8 rsvd[3];
+ u8 rsvd;
} __packed;
#define QLINK_MAX_NR_CIPHER_SUITES 5
@@ -153,6 +185,17 @@ struct qlink_auth_encr {
u8 rsvd[2];
} __packed;
+/**
+ * struct qlink_sta_info_state - station flags mask/value
+ *
+ * @mask: STA flags mask, bitmap of &enum qlink_sta_flags
+ * @value: STA flags values, bitmap of &enum qlink_sta_flags
+ */
+struct qlink_sta_info_state {
+ __le32 mask;
+ __le32 value;
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -173,6 +216,7 @@ struct qlink_auth_encr {
* @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This
* command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
* capability.
+ * @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
*/
enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
@@ -192,8 +236,10 @@ enum qlink_cmd_type {
QLINK_CMD_BAND_INFO_GET = 0x001A,
QLINK_CMD_CHAN_SWITCH = 0x001B,
QLINK_CMD_CHAN_GET = 0x001C,
+ QLINK_CMD_START_CAC = 0x001D,
QLINK_CMD_START_AP = 0x0021,
QLINK_CMD_STOP_AP = 0x0022,
+ QLINK_CMD_SET_MAC_ACL = 0x0023,
QLINK_CMD_GET_STA_INFO = 0x0030,
QLINK_CMD_ADD_KEY = 0x0040,
QLINK_CMD_DEL_KEY = 0x0041,
@@ -368,16 +414,14 @@ struct qlink_cmd_set_def_mgmt_key {
/**
* struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command
*
- * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags
- * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags
+ * @flag_update: STA flags to update
* @if_type: Mode of interface operation, one of &enum qlink_iface_type
* @vlanid: VLAN ID to assign to specific STA
* @sta_addr: address of the STA for which parameters are set.
*/
struct qlink_cmd_change_sta {
struct qlink_cmd chdr;
- __le32 sta_flags_mask;
- __le32 sta_flags_set;
+ struct qlink_sta_info_state flag_update;
__le16 if_type;
__le16 vlanid;
u8 sta_addr[ETH_ALEN];
@@ -585,6 +629,40 @@ struct qlink_cmd_start_ap {
u8 info[0];
} __packed;
+/**
+ * struct qlink_cmd_start_cac - data for QLINK_CMD_START_CAC command
+ *
+ * @chan: a channel to start a radar detection procedure on.
+ * @cac_time_ms: CAC time.
+ */
+struct qlink_cmd_start_cac {
+ struct qlink_cmd chdr;
+ struct qlink_chandef chan;
+ __le32 cac_time_ms;
+} __packed;
+
+enum qlink_acl_policy {
+ QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED,
+ QLINK_ACL_POLICY_DENY_UNLESS_LISTED,
+};
+
+struct qlink_mac_address {
+ u8 addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_acl_data - ACL data
+ *
+ * @policy: filter policy, one of &enum qlink_acl_policy.
+ * @num_entries: number of MAC addresses in array.
+ * @mac_address: MAC addresses array.
+ */
+struct qlink_acl_data {
+ __le32 policy;
+ __le32 num_entries;
+ struct qlink_mac_address mac_addrs[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -646,6 +724,7 @@ struct qlink_resp_get_mac_info {
struct ieee80211_ht_cap ht_cap_mod_mask;
__le16 max_ap_assoc_sta;
__le16 radar_detect_widths;
+ __le32 max_acl_mac_addrs;
u8 bands_cap;
u8 rsvd[1];
u8 var_info[0];
@@ -685,6 +764,9 @@ struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
__le32 fw_ver;
__le32 hw_capab;
+ __le32 bld_tmstamp;
+ __le32 plat_id;
+ __le32 hw_ver;
__le16 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
@@ -709,17 +791,27 @@ struct qlink_resp_manage_intf {
struct qlink_intf_info intf_info;
} __packed;
+enum qlink_sta_info_rate_flags {
+ QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0),
+ QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
+ QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
+ QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+};
+
/**
* struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command
*
* Response data containing statistics for specified STA.
*
+ * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
+ * is valid.
* @sta_addr: MAC address of STA the response carries statistic for.
- * @info: statistics for specified STA.
+ * @info: variable statistics for specified STA.
*/
struct qlink_resp_get_sta_info {
struct qlink_resp rhdr;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
u8 info[0];
} __packed;
@@ -782,6 +874,7 @@ enum qlink_event_type {
QLINK_EVENT_BSS_JOIN = 0x0026,
QLINK_EVENT_BSS_LEAVE = 0x0027,
QLINK_EVENT_FREQ_CHANGE = 0x0028,
+ QLINK_EVENT_RADAR = 0x0029,
};
/**
@@ -869,15 +962,16 @@ enum qlink_rxmgmt_flags {
* struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event
*
* @freq: Frequency on which the frame was received in MHz.
- * @sig_dbm: signal strength in dBm.
* @flags: bitmap of &enum qlink_rxmgmt_flags.
+ * @sig_dbm: signal strength in dBm.
* @frame_data: data of Rx'd frame itself.
*/
struct qlink_event_rxmgmt {
struct qlink_event ehdr;
__le32 freq;
- __le32 sig_dbm;
__le32 flags;
+ s8 sig_dbm;
+ u8 rsvd[3];
u8 frame_data[0];
} __packed;
@@ -889,7 +983,7 @@ struct qlink_event_rxmgmt {
* event was generated was discovered.
* @capab: capabilities field.
* @bintval: beacon interval announced by discovered BSS.
- * @signal: signal strength.
+ * @sig_dbm: signal strength in dBm.
* @bssid: BSSID announced by discovered BSS.
* @ssid_len: length of SSID announced by BSS.
* @ssid: SSID announced by discovered BSS.
@@ -901,7 +995,7 @@ struct qlink_event_scan_result {
__le16 freq;
__le16 capab;
__le16 bintval;
- s8 signal;
+ s8 sig_dbm;
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 bssid[ETH_ALEN];
@@ -931,9 +1025,39 @@ struct qlink_event_scan_complete {
__le32 flags;
} __packed;
+enum qlink_radar_event {
+ QLINK_RADAR_DETECTED,
+ QLINK_RADAR_CAC_FINISHED,
+ QLINK_RADAR_CAC_ABORTED,
+ QLINK_RADAR_NOP_FINISHED,
+ QLINK_RADAR_PRE_CAC_EXPIRED,
+};
+
+/**
+ * struct qlink_event_radar - data for QLINK_EVENT_RADAR event
+ *
+ * @chan: channel on which radar event happened.
+ * @event: radar event type, one of &enum qlink_radar_event.
+ */
+struct qlink_event_radar {
+ struct qlink_event ehdr;
+ struct qlink_chandef chan;
+ u8 event;
+ u8 rsvd[3];
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
+/**
+ * enum qlink_tlv_id - list of TLVs that Qlink messages can carry
+ *
+ * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
+ * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
+ * &struct qlink_sta_stats. Valid values are marked as such in a bitmap
+ * carried by QTN_TLV_ID_STA_STATS_MAP.
+ */
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
QTN_TLV_ID_RTS_THRESH = 0x0202,
@@ -942,15 +1066,24 @@ enum qlink_tlv_id {
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
QTN_TLV_ID_CHANDEF = 0x0210,
+ QTN_TLV_ID_STA_STATS_MAP = 0x0211,
+ QTN_TLV_ID_STA_STATS = 0x0212,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
QTN_TLV_ID_CHANNEL_STATS = 0x0216,
- QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300,
- QTN_TLV_ID_STA_GENERIC_INFO = 0x0301,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
QTN_TLV_ID_IE_SET = 0x0305,
+ QTN_TLV_ID_EXT_CAPABILITY_MASK = 0x0306,
+ QTN_TLV_ID_ACL_DATA = 0x0307,
+ QTN_TLV_ID_BUILD_NAME = 0x0401,
+ QTN_TLV_ID_BUILD_REV = 0x0402,
+ QTN_TLV_ID_BUILD_TYPE = 0x0403,
+ QTN_TLV_ID_BUILD_LABEL = 0x0404,
+ QTN_TLV_ID_HW_ID = 0x0405,
+ QTN_TLV_ID_CALIBRATION_VER = 0x0406,
+ QTN_TLV_ID_UBOOT_VER = 0x0407,
};
struct qlink_tlv_hdr {
@@ -959,76 +1092,24 @@ struct qlink_tlv_hdr {
u8 val[0];
} __packed;
-struct qlink_iface_limit {
- __le16 max_num;
- __le16 type;
-} __packed;
-
struct qlink_iface_comb_num {
- __le16 iface_comb_num;
+ __le32 iface_comb_num;
} __packed;
-struct qlink_sta_stat_basic_counters {
- __le64 rx_bytes;
- __le64 tx_bytes;
- __le64 rx_beacons;
- __le32 rx_packets;
- __le32 tx_packets;
- __le32 rx_dropped;
- __le32 tx_failed;
-} __packed;
-
-enum qlink_sta_info_rate_flags {
- QLINK_STA_INFO_RATE_FLAG_INVALID = 0,
- QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0),
- QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
- QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
- QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
-};
-
-enum qlink_sta_info_rate_bw {
- QLINK_STA_INFO_RATE_BW_5 = 0,
- QLINK_STA_INFO_RATE_BW_10 = 1,
- QLINK_STA_INFO_RATE_BW_20 = 2,
- QLINK_STA_INFO_RATE_BW_40 = 3,
- QLINK_STA_INFO_RATE_BW_80 = 4,
- QLINK_STA_INFO_RATE_BW_160 = 5,
-};
-
-/**
- * struct qlink_sta_info_rate - STA rate statistics
- *
- * @rate: data rate in Mbps.
- * @flags: bitmap of &enum qlink_sta_flags.
- * @mcs: 802.11-defined MCS index.
- * nss: Number of Spatial Streams.
- * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw.
- */
-struct qlink_sta_info_rate {
- __le16 rate;
- u8 flags;
- u8 mcs;
- u8 nss;
- u8 bw;
+struct qlink_iface_limit {
+ __le16 max_num;
+ __le16 type;
} __packed;
-struct qlink_sta_info_state {
- __le32 mask;
- __le32 value;
+struct qlink_iface_limit_record {
+ __le16 max_interfaces;
+ u8 num_different_channels;
+ u8 n_limits;
+ struct qlink_iface_limit limits[0];
} __packed;
#define QLINK_RSSI_OFFSET 120
-struct qlink_sta_info_generic {
- struct qlink_sta_info_state state;
- __le32 connected_time;
- __le32 inactive_time;
- struct qlink_sta_info_rate rx_rate;
- struct qlink_sta_info_rate tx_rate;
- u8 rssi;
- u8 rssi_avg;
-} __packed;
-
struct qlink_tlv_frag_rts_thr {
struct qlink_tlv_hdr hdr;
__le16 thr;
@@ -1113,19 +1194,16 @@ enum qlink_dfs_state {
QLINK_DFS_AVAILABLE,
};
+/**
+ * struct qlink_tlv_channel - data for QTN_TLV_ID_CHANNEL TLV
+ *
+ * Channel settings.
+ *
+ * @channel: ieee80211 channel settings.
+ */
struct qlink_tlv_channel {
struct qlink_tlv_hdr hdr;
- __le16 hw_value;
- __le16 center_freq;
- __le32 flags;
- u8 band;
- u8 max_antenna_gain;
- u8 max_power;
- u8 max_reg_power;
- __le32 dfs_cac_ms;
- u8 dfs_state;
- u8 beacon_found;
- u8 rsvd[2];
+ struct qlink_channel chan;
} __packed;
/**
@@ -1137,7 +1215,7 @@ struct qlink_tlv_channel {
*/
struct qlink_tlv_chandef {
struct qlink_tlv_hdr hdr;
- struct qlink_chandef chan;
+ struct qlink_chandef chdef;
} __packed;
enum qlink_ie_set_type {
@@ -1176,4 +1254,105 @@ struct qlink_chan_stats {
s8 chan_noise;
} __packed;
+/**
+ * enum qlink_sta_info - station information bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_sta_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_STA_STATS_MAP.
+ *
+ * @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
+ * @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
+ * @QLINK_STA_INFO_RX_BYTES: lower 32 bits of rx_bytes value are valid.
+ * @QLINK_STA_INFO_TX_BYTES: lower 32 bits of tx_bytes value are valid.
+ * @QLINK_STA_INFO_RX_BYTES64: rx_bytes value is valid.
+ * @QLINK_STA_INFO_TX_BYTES64: tx_bytes value is valid.
+ * @QLINK_STA_INFO_RX_DROP_MISC: rx_dropped_misc value is valid.
+ * @QLINK_STA_INFO_BEACON_RX: rx_beacon value is valid.
+ * @QLINK_STA_INFO_SIGNAL: signal value is valid.
+ * @QLINK_STA_INFO_SIGNAL_AVG: signal_avg value is valid.
+ * @QLINK_STA_INFO_RX_BITRATE: rxrate value is valid.
+ * @QLINK_STA_INFO_TX_BITRATE: txrate value is valid.
+ * @QLINK_STA_INFO_RX_PACKETS: rx_packets value is valid.
+ * @QLINK_STA_INFO_TX_PACKETS: tx_packets value is valid.
+ * @QLINK_STA_INFO_TX_RETRIES: tx_retries value is valid.
+ * @QLINK_STA_INFO_TX_FAILED: tx_failed value is valid.
+ * @QLINK_STA_INFO_STA_FLAGS: sta_flags value is valid.
+ */
+enum qlink_sta_info {
+ QLINK_STA_INFO_CONNECTED_TIME,
+ QLINK_STA_INFO_INACTIVE_TIME,
+ QLINK_STA_INFO_RX_BYTES,
+ QLINK_STA_INFO_TX_BYTES,
+ QLINK_STA_INFO_RX_BYTES64,
+ QLINK_STA_INFO_TX_BYTES64,
+ QLINK_STA_INFO_RX_DROP_MISC,
+ QLINK_STA_INFO_BEACON_RX,
+ QLINK_STA_INFO_SIGNAL,
+ QLINK_STA_INFO_SIGNAL_AVG,
+ QLINK_STA_INFO_RX_BITRATE,
+ QLINK_STA_INFO_TX_BITRATE,
+ QLINK_STA_INFO_RX_PACKETS,
+ QLINK_STA_INFO_TX_PACKETS,
+ QLINK_STA_INFO_TX_RETRIES,
+ QLINK_STA_INFO_TX_FAILED,
+ QLINK_STA_INFO_STA_FLAGS,
+ QLINK_STA_INFO_NUM,
+};
+
+/**
+ * struct qlink_sta_info_rate - STA rate statistics
+ *
+ * @rate: data rate in Mbps.
+ * @flags: bitmap of &enum qlink_sta_info_rate_flags.
+ * @mcs: 802.11-defined MCS index.
+ * nss: Number of Spatial Streams.
+ * @bw: bandwidth, one of &enum qlink_channel_width.
+ */
+struct qlink_sta_info_rate {
+ __le16 rate;
+ u8 flags;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+} __packed;
+
+/**
+ * struct qlink_sta_stats - data for QTN_TLV_ID_STA_STATS
+ *
+ * Carries statistics of a STA. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_sta_info. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_STA_STATS_MAP.
+ */
+struct qlink_sta_stats {
+ __le64 rx_bytes;
+ __le64 tx_bytes;
+ __le64 rx_beacon;
+ __le64 rx_duration;
+ __le64 t_offset;
+ __le32 connected_time;
+ __le32 inactive_time;
+ __le32 rx_packets;
+ __le32 tx_packets;
+ __le32 tx_retries;
+ __le32 tx_failed;
+ __le32 rx_dropped_misc;
+ __le32 beacon_loss_count;
+ __le32 expected_throughput;
+ struct qlink_sta_info_state sta_flags;
+ struct qlink_sta_info_rate txrate;
+ struct qlink_sta_info_rate rxrate;
+ __le16 llid;
+ __le16 plid;
+ u8 local_pm;
+ u8 peer_pm;
+ u8 nonpeer_pm;
+ u8 rx_beacon_signal_avg;
+ u8 plink_state;
+ u8 signal;
+ u8 signal_avg;
+ u8 rsvd[1];
+};
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 61d999affb09..aeeda81b09ea 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -100,34 +100,6 @@ static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw)
}
}
-void qlink_chandef_q2cfg(struct wiphy *wiphy,
- const struct qlink_chandef *qch,
- struct cfg80211_chan_def *chdef)
-{
- chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
- chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
- chdef->width = qlink_chanwidth_to_nl(qch->width);
-
- switch (chdef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
- chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1);
- break;
- case NL80211_CHAN_WIDTH_40:
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
- chdef->chan = ieee80211_get_channel(wiphy,
- chdef->center_freq1 - 10);
- break;
- default:
- chdef->chan = NULL;
- break;
- }
-}
-
static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
{
switch (nlwidth) {
@@ -152,9 +124,29 @@ static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
}
}
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+ const struct qlink_chandef *qch,
+ struct cfg80211_chan_def *chdef)
+{
+ struct ieee80211_channel *chan;
+
+ chan = ieee80211_get_channel(wiphy, le16_to_cpu(qch->chan.center_freq));
+
+ chdef->chan = chan;
+ chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
+ chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
+ chdef->width = qlink_chanwidth_to_nl(qch->width);
+}
+
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
struct qlink_chandef *qch)
{
+ struct ieee80211_channel *chan = chdef->chan;
+
+ qch->chan.hw_value = cpu_to_le16(chan->hw_value);
+ qch->chan.center_freq = cpu_to_le16(chan->center_freq);
+ qch->chan.flags = cpu_to_le32(chan->flags);
+
qch->center_freq1 = cpu_to_le16(chdef->center_freq1);
qch->center_freq2 = cpu_to_le16(chdef->center_freq2);
qch->width = qlink_chanwidth_nl_to_qlink(chdef->width);
@@ -172,3 +164,33 @@ enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val)
return QLINK_HIDDEN_SSID_NOT_IN_USE;
}
}
+
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+ unsigned int arr_max_len)
+{
+ unsigned int idx = bit / BITS_PER_BYTE;
+ u8 mask = 1 << (bit - (idx * BITS_PER_BYTE));
+
+ if (idx >= arr_max_len)
+ return false;
+
+ return arr[idx] & mask;
+}
+
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+ struct qlink_acl_data *qacl)
+{
+ switch (acl->acl_policy) {
+ case NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED:
+ qacl->policy =
+ cpu_to_le32(QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED);
+ break;
+ case NL80211_ACL_POLICY_DENY_UNLESS_LISTED:
+ qacl->policy = cpu_to_le32(QLINK_ACL_POLICY_DENY_UNLESS_LISTED);
+ break;
+ }
+
+ qacl->num_entries = cpu_to_le32(acl->n_acl_entries);
+ memcpy(qacl->mac_addrs, acl->mac_addrs,
+ acl->n_acl_entries * sizeof(*qacl->mac_addrs));
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 260383d6d109..54caeb38917c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -69,5 +69,9 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
struct qlink_chandef *qch);
enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val);
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+ unsigned int arr_max_len);
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+ struct qlink_acl_data *qacl);
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index ed38e87471bf..e745733ba417 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -57,9 +57,10 @@ struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
return NULL;
}
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
const u8 *mac)
{
+ struct qtnf_sta_list *list = &vif->sta_list;
struct qtnf_sta_node *node;
if (unlikely(!mac))
@@ -77,13 +78,15 @@ struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
ether_addr_copy(node->mac_addr, mac);
list_add_tail(&node->list, &list->head);
atomic_inc(&list->size);
+ ++vif->generation;
done:
return node;
}
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac)
{
+ struct qtnf_sta_list *list = &vif->sta_list;
struct qtnf_sta_node *node;
bool ret = false;
@@ -93,6 +96,7 @@ bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
list_del(&node->list);
atomic_dec(&list->size);
kfree(node);
+ ++vif->generation;
ret = true;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index 0359eae8c24b..0d4d92b11540 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -26,9 +26,9 @@ struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
const u8 *mac);
struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
size_t index);
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
const u8 *mac);
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac);
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac);
void qtnf_sta_list_free(struct qtnf_sta_list *list);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index d2c289446c00..429d07b651dd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4903,7 +4903,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
min_sleep = 2000;
break;
default:
- WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+ WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration",
rt2x00dev->chip.rf);
return;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index f4fdad2ed319..72c55d1f8903 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -301,7 +301,7 @@ exit:
return status;
}
-static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
+static __poll_t rt2x00debug_poll_queue_dump(struct file *file,
poll_table *wait)
{
struct rt2x00debug_intf *intf = file->private_data;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index ecc96312a370..a971bc7a6b63 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -142,32 +142,28 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
if (!rt2x00dev->ops->hw->set_rts_threshold &&
(tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT))) {
- if (rt2x00queue_available(queue) <= 1)
- goto exit_fail;
+ if (rt2x00queue_available(queue) <= 1) {
+ /*
+ * Recheck for full queue under lock to avoid race
+ * conditions with rt2x00lib_txdone().
+ */
+ spin_lock(&queue->tx_lock);
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
+ spin_unlock(&queue->tx_lock);
+
+ goto exit_free_skb;
+ }
if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
- goto exit_fail;
+ goto exit_free_skb;
}
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
- goto exit_fail;
-
- /*
- * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
- * we should not use spin_lock_bh variant as bottom halve was already
- * disabled before ieee80211_xmit() call.
- */
- spin_lock(&queue->tx_lock);
- if (rt2x00queue_threshold(queue))
- rt2x00queue_pause_queue(queue);
- spin_unlock(&queue->tx_lock);
+ goto exit_free_skb;
return;
- exit_fail:
- spin_lock(&queue->tx_lock);
- rt2x00queue_pause_queue(queue);
- spin_unlock(&queue->tx_lock);
exit_free_skb:
ieee80211_free_txskb(hw, skb);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a2c1ca5c76d1..a6884e73d2ab 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -715,6 +715,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_kick_tx_queue(queue, &txdesc);
out:
+ /*
+ * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
+ * do this under queue->tx_lock. Bottom halve was already disabled
+ * before ieee80211_xmit() call.
+ */
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
+
spin_unlock(&queue->tx_lock);
return ret;
}
@@ -1244,10 +1252,8 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- rt2x00_err(rt2x00dev, "Queue allocation failed\n");
+ if (!queue)
return -ENOMEM;
- }
/*
* Initialize pointers
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index e6668ffb77e6..ff0971f1e2c8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
cck_power = priv->channels[channel - 1].hw_value & 0xF;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
- if (cck_power > 15)
- cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
- else
- cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
+ cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 95e3993d8a33..8828baf26e7b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1172,7 +1172,7 @@ struct rtl8723bu_c2h {
u8 basic_rate:1;
u8 bt_has_reset:1;
- u8 dummy4_1:1;;
+ u8 dummy4_1:1;
u8 ignore_wlan:1;
u8 auto_report:1;
u8 dummy4_2:3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index cad2272ae21b..d6c03bd5cc65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -244,7 +244,8 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE ||
+ rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
u16 mcs_map;
vht_cap->vht_supported = true;
@@ -395,6 +396,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
/* swlps or hwlps has been set in diff chip in init_sw_vars */
if (rtlpriv->psc.swctrl_lps) {
@@ -551,7 +553,8 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
- spin_lock_init(&rtlpriv->locks.ips_lock);
+ mutex_init(&rtlpriv->locks.ips_mutex);
+ mutex_init(&rtlpriv->locks.lps_mutex);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
@@ -561,9 +564,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
spin_lock_init(&rtlpriv->locks.scan_list_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
- spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.iqk_lock);
/* <5> init list */
INIT_LIST_HEAD(&rtlpriv->entry_list);
@@ -697,14 +698,94 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
}
}
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 ret = 0;
+
+ switch (rate_index) {
+ case RATR_INX_WIRELESS_NGB:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_BGN_40M_1SS;
+ else
+ ret = RATEID_IDX_BGN_40M_2SS;
+ ; break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_GN_N1SS;
+ else
+ ret = RATEID_IDX_GN_N2SS;
+ ; break;
+ case RATR_INX_WIRELESS_NB:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_BGN_20M_1SS_BN;
+ else
+ ret = RATEID_IDX_BGN_20M_2SS_BN;
+ ; break;
+ case RATR_INX_WIRELESS_GB:
+ ret = RATEID_IDX_BG;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = RATEID_IDX_G;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = RATEID_IDX_B;
+ break;
+ case RATR_INX_WIRELESS_MC:
+ if (wirelessmode == WIRELESS_MODE_B ||
+ wirelessmode == WIRELESS_MODE_G ||
+ wirelessmode == WIRELESS_MODE_N_24G ||
+ wirelessmode == WIRELESS_MODE_AC_24G)
+ ret = RATEID_IDX_BG;
+ else
+ ret = RATEID_IDX_G;
+ break;
+ case RATR_INX_WIRELESS_AC_5N:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_VHT_1SS;
+ else
+ ret = RATEID_IDX_VHT_2SS;
+ break;
+ case RATR_INX_WIRELESS_AC_24N:
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_VHT_1SS;
+ else
+ ret = RATEID_IDX_VHT_2SS;
+ } else {
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_MIX1;
+ else
+ ret = RATEID_IDX_MIX2;
+ }
+ break;
+ default:
+ ret = RATEID_IDX_BGN_40M_2SS;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rtl_mrate_idx_to_arfr_id);
+
static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct rtl_tcb_desc *tcb_desc)
{
+#define SET_RATE_ID(rate_id) \
+ ({typeof(rate_id) _id = rate_id; \
+ ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \
+ rtl_mrate_idx_to_arfr_id(hw, _id, \
+ (sta_entry ? sta_entry->wireless_mode : \
+ WIRELESS_MODE_G)) : \
+ _id); })
+
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_sta_info *sta_entry = NULL;
- u8 ratr_index = 7;
+ u8 ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
if (sta) {
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
@@ -719,7 +800,8 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
tcb_desc->use_driver_rate = 1;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_MC);
} else {
tcb_desc->ratr_index = ratr_index;
}
@@ -735,22 +817,30 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
tcb_desc->mac_id = 0;
- if (mac->mode == WIRELESS_MODE_AC_5G)
+ if (sta &&
+ (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID))
+ ; /* use sta_entry->ratr_index */
+ else if (mac->mode == WIRELESS_MODE_AC_5G)
tcb_desc->ratr_index =
- RATR_INX_WIRELESS_AC_5N;
+ SET_RATE_ID(RATR_INX_WIRELESS_AC_5N);
else if (mac->mode == WIRELESS_MODE_AC_24G)
tcb_desc->ratr_index =
- RATR_INX_WIRELESS_AC_24N;
+ SET_RATE_ID(RATR_INX_WIRELESS_AC_24N);
else if (mac->mode == WIRELESS_MODE_N_24G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_NGB);
else if (mac->mode == WIRELESS_MODE_N_5G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_NG);
else if (mac->mode & WIRELESS_MODE_G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_GB);
else if (mac->mode & WIRELESS_MODE_B)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_B);
else if (mac->mode & WIRELESS_MODE_A)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_G);
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
@@ -764,6 +854,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
}
}
}
+#undef SET_RATE_ID
}
static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
@@ -859,8 +950,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 hw_rate;
- if ((get_rf_type(rtlphy) == RF_2T2R) &&
- (sta->ht_cap.mcs.rx_mask[1] != 0))
+ if (get_rf_type(rtlphy) == RF_2T2R &&
+ sta->ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1140,9 +1231,19 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
{
+#define SET_RATE_ID(rate_id) \
+ ({typeof(rate_id) _id = rate_id; \
+ ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \
+ rtl_mrate_idx_to_arfr_id(hw, _id, \
+ (sta_entry ? sta_entry->wireless_mode : \
+ WIRELESS_MODE_G)) : \
+ _id); })
+
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct rtl_sta_info *sta_entry =
+ (sta ? (struct rtl_sta_info *)sta->drv_priv : NULL);
__le16 fc = rtl_get_fc(skb);
@@ -1165,7 +1266,8 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
if (info->control.rates[0].idx == 0 ||
ieee80211_is_nullfunc(fc)) {
tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_MC);
tcb_desc->disable_ratefallback = 1;
} else {
@@ -1180,7 +1282,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && (sta->ht_cap.ht_supported)) {
+ if (sta && sta->ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -1207,11 +1309,12 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
_rtl_query_protection_mode(hw, tcb_desc, info);
} else {
tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
tcb_desc->disable_ratefallback = 1;
tcb_desc->mac_id = 0;
tcb_desc->packet_bw = false;
}
+#undef SET_RATE_ID
}
EXPORT_SYMBOL(rtl_get_tcb_desc);
@@ -1229,7 +1332,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
- rtl_ips_nic_on(hw);
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1321,6 +1423,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (capab &
IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ if (tid >= MAX_TID_COUNT) {
+ rcu_read_unlock();
+ return true;
+ }
tid_data = &sta_entry->tids[tid];
if (tid_data->agg.rx_agg_state ==
RTL_RX_AGG_START)
@@ -1726,7 +1832,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
{
struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
- u8 reject_agg, ctrl_agg_size = 0, agg_size;
+ u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
@@ -1972,9 +2078,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
goto label_lps_done;
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ if (rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod > 8 ||
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
else
rtl_lps_enter(hw);
@@ -2225,9 +2331,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
- * According to Kernel Code, here is right.
- */
+ /* fall through */
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2528,6 +2632,9 @@ static int __init rtl_core_module_init(void)
if (rtl_rate_control_register())
pr_err("rtl: Unable to register rtl_rc, use default RC !!\n");
+ /* add debugfs */
+ rtl_debugfs_add_topdir();
+
/* init some global vars */
INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
spin_lock_init(&rtl_global_var.glb_list_lock);
@@ -2539,6 +2646,9 @@ static void __exit rtl_core_module_exit(void)
{
/*RC*/
rtl_rate_control_unregister();
+
+ /* remove debugfs */
+ rtl_debugfs_remove_topdir();
}
module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 26735319b38f..acc924635818 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -162,6 +162,8 @@ void rtl_c2hcmd_wq_callback(void *data);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 44c25724529e..8fce371749d3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -2704,11 +2704,11 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8192e2ant_init_coex_dm(btcoexist);
}
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u16 u16tmp[4];
u32 u32tmp[4];
@@ -2719,75 +2719,64 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ===========[Under Manual Control]===========");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ===========[Under Manual Control]===========");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
board_info->pg_ant_num, board_info->btdm_ant_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsMode(HsChnl)",
- wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsMode(HsChnl)",
+ wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
((!wifi_busy) ? "idle" :
((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
"uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
((btcoexist->bt_info.bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ?
("inquiry/page scan") :
@@ -2797,131 +2786,129 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
coex_dm->bt_status) ? "connected-idle" : "busy")))),
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", stack_info->sco_exist,
stack_info->hid_exist, stack_info->pan_exist,
stack_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8192e_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8192e_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
- coex_dm->cur_ss_type);
+ seq_printf(m, "\n %-35s = 0x%x ", "SS Type",
+ coex_dm->cur_ss_type);
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+
+ seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ seq_printf(m,
+ "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
- coex_dm->backup_ampdu_maxtime);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_maxtime);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
- u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x", "0x778", u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
- u32tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+ u32tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(hp rx[31:16]/tx[15:0])",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(lp rx[31:16]/tx[15:0])",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ seq_printf(m,
+ "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(hp rx[31:16]/tx[15:0])",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(lp rx[31:16]/tx[15:0])",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
if (btcoexist->auto_report_2ant)
btc8192e2ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index 65502acee52c..b8c95c7afc06 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -180,4 +180,5 @@ void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
u8 type);
void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f726f6d3567..fd3b1fb35dff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -2474,12 +2474,12 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
halbtc8723b1ant_query_bt_info(btcoexist);
}
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0;
u16 u16tmp[4];
u32 u32tmp[4];
@@ -2491,62 +2491,56 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]==========");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Under Manual Control]==========");
+ seq_puts(m, "\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Coex is STOPPED]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2555,110 +2549,106 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
- ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
- ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
- ((!wifi_busy) ? "idle" :
- ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
- "uplink" : "downlink")));
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
- "sta/vwifi/hs/p2pGo/p2pGc",
- ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
- ((coex_sta->bt_disabled) ? ("disabled") :
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
- ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "non-connected idle" :
- ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "connected-idle" : "busy")))),
- coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
- bt_link_info->hid_exist, bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+ "sta/vwifi/hs/p2pGo/p2pGc",
+ ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((coex_sta->bt_disabled) ? ("disabled") :
+ ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+ ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "non-connected idle" :
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "connected-idle" : "busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+ bt_link_info->hid_exist, bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8723b_1ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8723b_1ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
if (!btcoexist->manual_control) {
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
- "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+ seq_printf(m, "\n %-35s = %d/",
+ "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
+ seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+ "DelBA/ BtCtrlAgg/ AggSize",
(btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
(btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
btcoexist->bt_info.agg_buf_size);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Rate Mask", btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Rate Mask", btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
pstdmacase = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
"PS TDMA", coex_dm->ps_tdma_para,
pstdmacase, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
- "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d ",
+ "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d",
- "Coex Table Type", coex_sta->coex_table_type);
+ seq_printf(m, "\n %-35s = %d",
+ "Coex Table Type", coex_sta->coex_table_type);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
coex_dm->backup_ampdu_max_time);
@@ -2666,50 +2656,49 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
- (u32tmp[1] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+ (u32tmp[1] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
- ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2727,26 +2716,26 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
(u32tmp[3] & 0xffff);
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
- coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+ coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
if (btcoexist->auto_report_1ant)
halbtc8723b1ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 8d4fde235e11..934f27893c16 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -220,5 +220,6 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index e8f07573aed9..4907c2ffadfe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -3780,12 +3780,12 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8723b2ant_init_coex_dm(btcoexist);
}
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u32 u32tmp[4];
bool roam = false, scan = false;
@@ -3797,173 +3797,161 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
u32 fw_ver = 0, bt_patch_ver = 0;
u8 ap_num = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========[Under Manual Control]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ==========[Under Manual Control]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
(((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
((!wifi_busy) ? "idle" :
((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
"uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist, bt_link_info->hid_exist,
- bt_link_info->pan_exist, bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8723b_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8723b_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ seq_printf(m,
+ "\n %-35s", "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
+ coex_dm->cur_ignore_wlan_act);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x778/0x880[29:25]", u8tmp[0],
- (u32tmp[0] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
- ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3981,29 +3969,27 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
(u32tmp[3] & 0xffff);
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
- u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
if (btcoexist->auto_report_2ant)
btc8723b2ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist,
- BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index bc1e3042e271..aa24da402fb9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -195,7 +195,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmpbuf, u8 length);
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 4efac5fe9982..0b26419881c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -2172,12 +2172,12 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8821a1ant_query_bt_info(btcoexist);
}
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u16 u2_tmp[4];
u32 u4_tmp[4];
@@ -2188,49 +2188,36 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Under Manual Control]============");
+ seq_puts(m, "\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ============[Coex is STOPPED]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
&bt_hs_on);
@@ -2238,28 +2225,24 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
&wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
- (int)wifi_rssi, (int)bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+ (int)wifi_rssi, (int)bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
- link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2269,16 +2252,15 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
&wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
- (wifi_under_5g ? "5G" : "2.4G"),
- ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
- (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
- ((!wifi_busy) ? "idle" :
- ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
- "uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+ seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
+ (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
((coex_sta->bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
@@ -2289,166 +2271,143 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"connected-idle" : "busy")))),
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist,
- bt_link_info->hid_exist,
- bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist,
+ bt_link_info->hid_exist,
+ bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ?
- "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ?
+ "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_1ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_1ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
if (!btcoexist->manual_control) {
/* Sw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s",
- "============[Sw mechanism]============");
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d", "SM[LowPenaltyRA]",
- coex_dm->cur_low_penalty_ra);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
- (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
- (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
- btcoexist->bt_info.agg_buf_size);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+
+ seq_printf(m, "\n %-35s = %d", "SM[LowPenaltyRA]",
+ coex_dm->cur_low_penalty_ra);
+
+ seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+ "DelBA/ BtCtrlAgg/ AggSize",
+ (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+ (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+ btcoexist->bt_info.agg_buf_size);
+ seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA",
- coex_dm->ps_tdma_para,
- ps_tdma_case,
- coex_dm->auto_tdma_adjust);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA",
+ coex_dm->ps_tdma_para,
+ ps_tdma_case,
+ coex_dm->auto_tdma_adjust);
+
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d ", "IgnWlanAct",
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d ", "IgnWlanAct",
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ seq_printf(m, "\n %-35s", "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime",
- coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2,
- coex_dm->backup_retry_limit,
- coex_dm->backup_ampdu_max_time);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime",
+ coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2,
+ coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_max_time);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
- u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+ u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0x8db[6:5]",
- ((u1_tmp[0] & 0x60) >> 5));
+ seq_printf(m, "\n %-35s = 0x%x", "0x8db[6:5]",
+ ((u1_tmp[0] & 0x60) >> 5));
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
- (u4_tmp[0] & 0x30000000) >> 28,
- u4_tmp[0] & 0xff,
- u1_tmp[0] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+ (u4_tmp[0] & 0x30000000) >> 28,
+ u4_tmp[0] & 0xff,
+ u1_tmp[0] & 0x3);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/0x4c[24:23]/0x64[0]",
- u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x40/0x4c[24:23]/0x64[0]",
+ u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23),
+ u1_tmp[1] & 0x1);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
- u4_tmp[0], u1_tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+ u4_tmp[0], u1_tmp[0]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0xc50(dig)",
- u4_tmp[0] & 0xff);
+ seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+ u4_tmp[0] & 0xff);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
- u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+ u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
if (btcoexist->auto_report_1ant)
btc8821a1ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index b0a6626fbb66..a498ff5b1b9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -186,7 +186,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
u8 op_len, u8 *data);
void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 41943c34edff..d5f282cb9d24 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -3657,11 +3657,11 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8821a2ant_init_coex_dm(btcoexist);
}
-void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u32 u4tmp[4];
bool roam = false, scan = false, link = false, wifi_under_5g = false;
@@ -3671,32 +3671,22 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot_11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
- }
+ seq_puts(m, "\n ============[BT Coexist info]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "[Action Manual control]!!");
+ seq_printf(m, "\n %-35s", "[Action Manual control]!!");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
"CoexVer/ FwVer/ PatchVer",
glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3707,27 +3697,23 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl);
btcoexist->btc_get(btcoexist,
BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
"Dot11 channel / HsMode(HsChnl)",
wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
+ seq_printf(m, "\n %-35s = %3ph ",
"H2C Wifi inform bt chnl Info",
coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+ seq_printf(m, "\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
link, roam, scan);
btcoexist->btc_get(btcoexist,
@@ -3738,8 +3724,7 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist,
BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+ seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
(wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3748,134 +3733,128 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
"uplink" : "downlink")));
if (stack_info->profile_notified) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
stack_info->sco_exist, stack_info->hid_exist,
stack_info->pan_exist, stack_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist,
- BTC_DBG_DISP_BT_LINK_INFO);
+ BTC_DBG_DISP_BT_LINK_INFO,
+ m);
}
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ seq_printf(m, "\n %-35s = %s", "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
/* Sw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
if (!btcoexist->manual_control) {
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d",
- "PS TDMA",
- coex_dm->ps_tdma_para, ps_tdma_case);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr_lvl,
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %5ph case-%d",
+ "PS TDMA",
+ coex_dm->ps_tdma_para, ps_tdma_case);
+
+ seq_printf(m, "\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr_lvl,
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ seq_printf(m, "\n %-35s", "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
- coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+ coex_dm->bt_rf0x1e_backup);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
- "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
- u1tmp[0], u1tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x ",
+ "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+ u1tmp[0], u1tmp[1]);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x8db(ADC)/0xc5b[29:25](DAC)",
- ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x8db(ADC)/0xc5b[29:25](DAC)",
+ ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
- u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+ u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/ 0x4c[24:23]/ 0x974",
- u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x40/ 0x4c[24:23]/ 0x974",
+ u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522",
- u4tmp[0], u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(DIG)/0xa0a(CCK-TH)",
- u4tmp[0], u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(DIG)/0xa0a(CCK-TH)",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "OFDM-FA/ CCK-FA",
- u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "OFDM-FA/ CCK-FA",
+ u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8",
- u4tmp[0], u4tmp[1], u4tmp[2]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770 (hi-pri Rx/Tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8",
+ u4tmp[0], u4tmp[1], u4tmp[2]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770 (hi-pri Rx/Tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
"0x774(low-pri Rx/Tx)",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
/* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "0x41b (mgntQ hang chk == 0xf)",
- u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "0x41b (mgntQ hang chk == 0xf)",
+ u1tmp[0]);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index a839d5574422..ce3e58c4e660 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -226,7 +226,8 @@ ex_btc8821a2ant_periodical(
);
void
ex_btc8821a2ant_display_coex_info(
- struct btc_coexist *btcoexist
+ struct btc_coexist *btcoexist,
+ struct seq_file *m
);
void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b5e9877d935c..1404729441a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -25,19 +25,11 @@
#include "halbt_precomp.h"
-/***********************************************
- * Global variables
- ***********************************************/
-
-struct btc_coexist gl_bt_coexist;
-
-u32 btc_dbg_type[BTC_MSG_MAX];
-
/***************************************************
* Debug related function
***************************************************/
-const char *const gl_btc_wifi_bw_string[] = {
+static const char *const gl_btc_wifi_bw_string[] = {
"11bg",
"HT20",
"HT40",
@@ -45,7 +37,7 @@ const char *const gl_btc_wifi_bw_string[] = {
"HT160"
};
-const char *const gl_btc_wifi_freq_string[] = {
+static const char *const gl_btc_wifi_freq_string[] = {
"2.4G",
"5G"
};
@@ -103,21 +95,6 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
return false;
}
-static bool halbtc_is_bt40(struct rtl_priv *adapter)
-{
- struct rtl_priv *rtlpriv = adapter;
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool is_ht40 = true;
- enum ht_channel_width bw = rtlphy->current_chan_bw;
-
- if (bw == HT_CHANNEL_WIDTH_20)
- is_ht40 = false;
- else if (bw == HT_CHANNEL_WIDTH_20_40)
- is_ht40 = true;
-
- return is_ht40;
-}
-
static bool halbtc_legacy(struct rtl_priv *adapter)
{
struct rtl_priv *rtlpriv = adapter;
@@ -143,18 +120,26 @@ bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv =
- (struct rtl_priv *)btcoexist->adapter;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 wifi_bw = BTC_WIFI_BW_HT20;
- if (halbtc_is_bt40(rtlpriv)) {
- wifi_bw = BTC_WIFI_BW_HT40;
+ if (halbtc_legacy(rtlpriv)) {
+ wifi_bw = BTC_WIFI_BW_LEGACY;
} else {
- if (halbtc_legacy(rtlpriv))
- wifi_bw = BTC_WIFI_BW_LEGACY;
- else
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
wifi_bw = BTC_WIFI_BW_HT20;
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ wifi_bw = BTC_WIFI_BW_HT40;
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ wifi_bw = BTC_WIFI_BW_HT80;
+ break;
+ }
}
+
return wifi_bw;
}
@@ -171,7 +156,7 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
return chnl;
}
-u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
{
struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
@@ -186,12 +171,12 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
return rtlpriv->btcoexist.btc_info.single_ant_path;
}
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
{
return rtlpriv->btcoexist.btc_info.bt_type;
}
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
u8 num;
@@ -208,13 +193,117 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
return num;
}
-u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
{
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
return rtlhal->package_type;
}
+static
+u8 rtl_get_hwpg_rfe_type(struct rtl_priv *rtlpriv)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ return rtlhal->rfe_type;
+}
+
+static
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+ if (IS_HARDWARE_TYPE_8812(btcoexist->adapter))
+ return false;
+ else
+ return true;
+}
+
+static
+bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
+ u8 *cmd, u32 len, unsigned long wait_ms)
+{
+ struct rtl_priv *rtlpriv;
+ const u8 oper_ver = 0;
+ u8 req_num;
+
+ if (!halbtc_is_hw_mailbox_exist(btcoexist))
+ return false;
+
+ if (wait_ms) /* before h2c to avoid race condition */
+ reinit_completion(&btcoexist->bt_mp_comp);
+
+ rtlpriv = btcoexist->adapter;
+
+ /* fill req_num by op_code, and rtl_btc_btmpinfo_notify() use it
+ * to know message type
+ */
+ switch (op_code) {
+ case BT_OP_GET_BT_VERSION:
+ req_num = BT_SEQ_GET_BT_VERSION;
+ break;
+ case BT_OP_GET_AFH_MAP_L:
+ req_num = BT_SEQ_GET_AFH_MAP_L;
+ break;
+ case BT_OP_GET_AFH_MAP_M:
+ req_num = BT_SEQ_GET_AFH_MAP_M;
+ break;
+ case BT_OP_GET_AFH_MAP_H:
+ req_num = BT_SEQ_GET_AFH_MAP_H;
+ break;
+ case BT_OP_GET_BT_COEX_SUPPORTED_FEATURE:
+ req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE;
+ break;
+ case BT_OP_GET_BT_COEX_SUPPORTED_VERSION:
+ req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION;
+ break;
+ case BT_OP_GET_BT_ANT_DET_VAL:
+ req_num = BT_SEQ_GET_BT_ANT_DET_VAL;
+ break;
+ case BT_OP_GET_BT_BLE_SCAN_PARA:
+ req_num = BT_SEQ_GET_BT_BLE_SCAN_PARA;
+ break;
+ case BT_OP_GET_BT_BLE_SCAN_TYPE:
+ req_num = BT_SEQ_GET_BT_BLE_SCAN_TYPE;
+ break;
+ case BT_OP_GET_BT_DEVICE_INFO:
+ req_num = BT_SEQ_GET_BT_DEVICE_INFO;
+ break;
+ case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+ req_num = BT_SEQ_GET_BT_FORB_SLOT_VAL;
+ break;
+ case BT_OP_WRITE_REG_ADDR:
+ case BT_OP_WRITE_REG_VALUE:
+ case BT_OP_READ_REG:
+ default:
+ req_num = BT_SEQ_DONT_CARE;
+ break;
+ }
+
+ cmd[0] |= (oper_ver & 0x0f); /* Set OperVer */
+ cmd[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
+ cmd[1] = op_code;
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, len, cmd);
+
+ /* wait? */
+ if (!wait_ms)
+ return true;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
+
+ if (in_interrupt())
+ return false;
+
+ if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
+ msecs_to_jiffies(wait_ms)) == 0) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "btmpinfo wait (req_num=%d) timeout\n", req_num);
+
+ return false; /* timeout */
+ }
+
+ return true;
+}
+
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
@@ -342,25 +431,80 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist)
static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 cmd_buffer[4] = {0};
- u8 oper_ver = 0;
- u8 req_num = 0x0E;
if (btcoexist->bt_info.bt_real_fw_ver)
goto label_done;
- cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer[0]);
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_VERSION,
+ cmd_buffer, 4, 200);
label_done:
return btcoexist->bt_info.bt_real_fw_ver;
}
-u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
+static u32 halbtc_get_bt_coex_supported_feature(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ if (btcoexist->bt_info.bt_supported_feature)
+ goto label_done;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_COEX_SUPPORTED_FEATURE,
+ cmd_buffer, 4, 200);
+
+label_done:
+ return btcoexist->bt_info.bt_supported_feature;
+}
+
+static u32 halbtc_get_bt_coex_supported_version(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ if (btcoexist->bt_info.bt_supported_version)
+ goto label_done;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_COEX_SUPPORTED_VERSION,
+ cmd_buffer, 4, 200);
+
+label_done:
+ return btcoexist->bt_info.bt_supported_version;
+}
+
+static u32 halbtc_get_bt_device_info(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_DEVICE_INFO,
+ cmd_buffer, 4, 200);
+
+ return btcoexist->bt_info.bt_device_info;
+}
+
+static u32 halbtc_get_bt_forbidden_slot_val(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_FORBIDDEN_SLOT_VAL,
+ cmd_buffer, 4, 200);
+
+ return btcoexist->bt_info.bt_forb_slot_val;
+}
+
+static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
{
/* return value:
* [31:16] => connected port number
@@ -521,6 +665,18 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
case BTC_GET_U4_VENDOR:
*u32_tmp = BTC_VENDOR_OTHER;
break;
+ case BTC_GET_U4_SUPPORTED_VERSION:
+ *u32_tmp = halbtc_get_bt_coex_supported_version(btcoexist);
+ break;
+ case BTC_GET_U4_SUPPORTED_FEATURE:
+ *u32_tmp = halbtc_get_bt_coex_supported_feature(btcoexist);
+ break;
+ case BTC_GET_U4_BT_DEVICE_INFO:
+ *u32_tmp = halbtc_get_bt_device_info(btcoexist);
+ break;
+ case BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL:
+ *u32_tmp = halbtc_get_bt_forbidden_slot_val(btcoexist);
+ break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
*u8_tmp = rtlphy->current_channel;
break;
@@ -653,6 +809,104 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
return ret;
}
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+}
+
+static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ bool scan = false, link = false, roam = false, wifi_busy = false;
+ bool wifi_under_b_mode = false;
+ bool wifi_under_5g = false;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+ u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX;
+ u32 wifi_freq = BTC_FREQ_2_4G;
+ u32 wifi_link_status = 0x0;
+ bool bt_hs_on = false, under_ips = false, under_lps = false;
+ bool low_power = false, dc_mode = false;
+ u8 wifi_chnl = 0, wifi_hs_chnl = 0;
+ u8 ap_num = 0;
+
+ wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+ "STA/vWifi/HS/p2pGo/p2pGc",
+ ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(High Speed)",
+ wifi_chnl, wifi_hs_chnl, bt_hs_on);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi",
+ wifi_rssi - 100, bt_hs_rssi - 100);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan",
+ link, roam, scan);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+ wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_b_mode);
+
+ seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ",
+ "Wifi freq/ bw/ traffic",
+ gl_btc_wifi_freq_string[wifi_freq],
+ ((wifi_under_b_mode) ? "11b" :
+ gl_btc_wifi_bw_string[wifi_bw]),
+ ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX ==
+ wifi_traffic_dir) ? "uplink" :
+ "downlink")),
+ ap_num);
+
+ /* power status */
+ dc_mode = true; /*TODO*/
+ under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
+ under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
+ low_power = 0; /*TODO*/
+ seq_printf(m, "\n %-35s = %s%s%s%s",
+ "Power Status",
+ (dc_mode ? "DC mode" : "AC mode"),
+ (under_ips ? ", IPS ON" : ""),
+ (under_lps ? ", LPS ON" : ""),
+ (low_power ? ", 32k" : ""));
+
+ seq_printf(m,
+ "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
+ "Power mode cmd(lps/rpwm)",
+ btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
+ btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
+ btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+}
+
/************************************************************
* IO related function
************************************************************/
@@ -726,7 +980,8 @@ static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
rtl_write_dword(rtlpriv, reg_addr, data);
}
-void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
+static void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr,
+ u8 data)
{
struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -739,22 +994,6 @@ void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
rtl_write_byte(rtlpriv, reg_addr, data);
}
-void halbtc_set_macreg(void *btc_context, u32 reg_addr, u32 bit_mask, u32 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
-}
-
-u32 halbtc_get_macreg(void *btc_context, u32 reg_addr, u32 bit_mask)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
-}
-
static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
u32 data)
{
@@ -800,38 +1039,47 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
cmd_len, cmd_buf);
}
+static
void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
{
struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 cmd_buffer1[4] = {0};
u8 cmd_buffer2[4] = {0};
- u8 *addr_to_set = (u8 *)&offset;
- u8 *value_to_set = (u8 *)&set_val;
- u8 oper_ver = 0;
- u8 req_num = 0;
- if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
- cmd_buffer1[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer1[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer1[1] = 0x0d; /* OpCode: BT_LO_OP_WRITE_REG_VALUE */
- cmd_buffer1[2] = value_to_set[0]; /* Set WriteRegValue */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer1[0]);
-
- msleep(200);
- req_num++;
-
- cmd_buffer2[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer2[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer2[1] = 0x0c; /* OpCode: BT_LO_OP_WRITE_REG_ADDR */
- cmd_buffer2[3] = addr_to_set[0]; /* Set WriteRegAddr */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer2[0]);
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ *((__le16 *)&cmd_buffer1[2]) = cpu_to_le16((u16)set_val);
+ if (!halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_VALUE,
+ cmd_buffer1, 4, 200))
+ return;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ cmd_buffer2[2] = reg_type;
+ *((u8 *)&cmd_buffer2[3]) = (u8)offset;
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_ADDR,
+ cmd_buffer2, 4, 200);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type,
+ struct seq_file *m)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist, m);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist, m);
+ break;
+ case BTC_DBG_DISP_WIFI_STATUS:
+ halbtc_display_wifi_status(btcoexist, m);
+ break;
+ default:
+ break;
}
}
-bool halbtc_under_ips(struct btc_coexist *btcoexist)
+static bool halbtc_under_ips(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
@@ -849,12 +1097,95 @@ bool halbtc_under_ips(struct btc_coexist *btcoexist)
return false;
}
+static u8 halbtc_get_ant_det_val_from_bt(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_ANT_DET_VAL,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ant_det_val;
+}
+
+static u8 halbtc_get_ble_scan_type_from_bt(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_TYPE,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ble_scan_type;
+}
+
+static u32 halbtc_get_ble_scan_para_from_bt(void *btc_context, u8 scan_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_PARA,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ble_scan_para;
+}
+
+static bool halbtc_get_bt_afh_map_from_bt(void *btc_context, u8 map_type,
+ u8 *afh_map)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[2] = {0};
+ bool ret;
+ u32 *afh_map_l = (u32 *)afh_map;
+ u32 *afh_map_m = (u32 *)(afh_map + 4);
+ u16 *afh_map_h = (u16 *)(afh_map + 8);
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_L,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_l = btcoexist->bt_info.afh_map_l;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_M,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_m = btcoexist->bt_info.afh_map_m;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_H,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_h = btcoexist->bt_info.afh_map_h;
+
+exit:
+ return ret;
+}
+
/*****************************************************************
* Extern functions called by other module
*****************************************************************/
-bool exhalbtc_initlize_variables(void)
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
halbtc_dbg_init();
@@ -874,24 +1205,76 @@ bool exhalbtc_initlize_variables(void)
btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
btcoexist->btc_get = halbtc_get;
btcoexist->btc_set = halbtc_set;
btcoexist->btc_set_bt_reg = halbtc_set_bt_reg;
-
btcoexist->bt_info.bt_ctrl_buf_size = false;
btcoexist->bt_info.agg_buf_size = 5;
btcoexist->bt_info.increase_scan_dev_num = false;
+
+ btcoexist->btc_get_bt_coex_supported_feature =
+ halbtc_get_bt_coex_supported_feature;
+ btcoexist->btc_get_bt_coex_supported_version =
+ halbtc_get_bt_coex_supported_version;
+ btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt;
+ btcoexist->btc_get_ble_scan_type_from_bt =
+ halbtc_get_ble_scan_type_from_bt;
+ btcoexist->btc_get_ble_scan_para_from_bt =
+ halbtc_get_ble_scan_para_from_bt;
+ btcoexist->btc_get_bt_afh_map_from_bt =
+ halbtc_get_bt_afh_map_from_bt;
+
+ init_completion(&btcoexist->bt_mp_comp);
+
+ return true;
+}
+
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ struct wifi_only_haldata *wifionly_haldata;
+
+ if (!wifionly_cfg)
+ return false;
+
+ wifionly_cfg->adapter = rtlpriv;
+
+ switch (rtlpriv->rtlhal.interface) {
+ case INTF_PCI:
+ wifionly_cfg->chip_interface = BTC_INTF_PCI;
+ break;
+ case INTF_USB:
+ wifionly_cfg->chip_interface = BTC_INTF_USB;
+ break;
+ default:
+ wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
+ break;
+ }
+
+ wifionly_haldata = &wifionly_cfg->haldata_info;
+
+ wifionly_haldata->customer_id = CUSTOMER_NORMAL;
+ wifionly_haldata->efuse_pg_antnum = rtl_get_hwpg_ant_num(rtlpriv);
+ wifionly_haldata->efuse_pg_antpath =
+ rtl_get_hwpg_single_ant_path(rtlpriv);
+ wifionly_haldata->rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+ wifionly_haldata->ant_div_cfg = 0;
+
return true;
}
bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
struct rtl_priv *rtlpriv = adapter;
- u8 ant_num = 2, chip_type;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+ u8 ant_num = 2, chip_type, single_ant_path = 0;
+
+ if (!btcoexist)
+ return false;
if (btcoexist->binded)
return false;
@@ -922,10 +1305,16 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
btcoexist->bt_info.miracast_plus_bt = false;
chip_type = rtl_get_hwpg_bt_type(rtlpriv);
- exhalbtc_set_chip_type(chip_type);
+ exhalbtc_set_chip_type(btcoexist, chip_type);
ant_num = rtl_get_hwpg_ant_num(rtlpriv);
exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ /* set default antenna position to main port */
+ btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+
+ single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
+ exhalbtc_set_single_ant_path(btcoexist, single_ant_path);
+
if (rtl_get_hwpg_package_type(rtlpriv) == 0)
btcoexist->board_info.tfbga_package = false;
else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
@@ -940,6 +1329,9 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Package Type = Non-TFBGA\n");
+ btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+ btcoexist->board_info.ant_div_cfg = 0;
+
return true;
}
@@ -996,6 +1388,10 @@ void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only)
}
}
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg)
+{
+}
+
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -1122,6 +1518,11 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
halbtc_normal_low_power(btcoexist);
}
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g)
+{
+}
+
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
{
u8 asso_type;
@@ -1430,30 +1831,25 @@ void exhalbtc_stack_update_profile_info(void)
{
}
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi)
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->stack_info.min_bt_rssi = bt_rssi;
}
-void exhalbtc_set_hci_version(u16 hci_version)
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->stack_info.hci_version = hci_version;
}
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+ u16 bt_hci_version, u16 bt_patch_version)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
@@ -1461,7 +1857,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
btcoexist->bt_info.bt_hci_ver = bt_hci_version;
}
-void exhalbtc_set_chip_type(u8 chip_type)
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type)
{
switch (chip_type) {
default:
@@ -1469,51 +1865,58 @@ void exhalbtc_set_chip_type(u8 chip_type)
case BT_ISSC_3WIRE:
case BT_ACCEL:
case BT_RTL8756:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_UNDEF;
break;
case BT_CSR_BC4:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
break;
case BT_CSR_BC8:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
break;
case BT_RTL8723A:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723A;
break;
case BT_RTL8821A:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8821;
break;
case BT_RTL8723B:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723B;
break;
}
}
void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
if (BT_COEX_ANT_TYPE_PG == type) {
- gl_bt_coexist.board_info.pg_ant_num = ant_num;
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.pg_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
if (rtlpriv->cfg->mod_params->ant_sel == 1)
- gl_bt_coexist.board_info.btdm_ant_pos =
+ btcoexist->board_info.btdm_ant_pos =
BTC_ANTENNA_AT_AUX_PORT;
else
- gl_bt_coexist.board_info.btdm_ant_pos =
+ btcoexist->board_info.btdm_ant_pos =
BTC_ANTENNA_AT_MAIN_PORT;
}
}
/* Currently used by 8723b only, S0 or S1 */
-void exhalbtc_set_single_ant_path(u8 single_ant_path)
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+ u8 single_ant_path)
{
- gl_bt_coexist.board_info.single_ant_path = single_ant_path;
+ btcoexist->board_info.single_ant_path = single_ant_path;
}
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
@@ -1522,18 +1925,36 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8821a2ant_display_coex_info(btcoexist);
+ ex_btc8821a2ant_display_coex_info(btcoexist, m);
else if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8821a1ant_display_coex_info(btcoexist);
+ ex_btc8821a1ant_display_coex_info(btcoexist, m);
} else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8723b2ant_display_coex_info(btcoexist);
+ ex_btc8723b2ant_display_coex_info(btcoexist, m);
else if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8723b1ant_display_coex_info(btcoexist);
+ ex_btc8723b1ant_display_coex_info(btcoexist, m);
} else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8192e2ant_display_coex_info(btcoexist);
+ ex_btc8192e2ant_display_coex_info(btcoexist, m);
}
halbtc_normal_low_power(btcoexist);
}
+
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (btcoexist->manual_control)
+ return;
+
+ halbtc_leave_low_power(btcoexist);
+
+ halbtc_normal_low_power(btcoexist);
+}
+
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index f9b87c12db09..8ed217656539 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -103,8 +103,6 @@ enum btc_msg_type {
BTC_MSG_MAX
};
-extern u32 btc_dbg_type[];
-
/* following is for BTC_MSG_INTERFACE */
#define INTF_INIT BIT0
#define INTF_NOTIFY BIT2
@@ -152,8 +150,10 @@ struct btc_board_info {
u8 btdm_ant_num; /* ant number for btdm */
u8 btdm_ant_pos;
u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */
- bool bt_exist;
bool tfbga_package;
+
+ u8 rfe_type;
+ u8 ant_div_cfg;
};
enum btc_dbg_opcode {
@@ -181,10 +181,17 @@ enum btc_wifi_role {
BTC_ROLE_MAX
};
+enum btc_wireless_freq {
+ BTC_FREQ_2_4G = 0x0,
+ BTC_FREQ_5G = 0x1,
+ BTC_FREQ_MAX
+};
+
enum btc_wifi_bw_mode {
BTC_WIFI_BW_LEGACY = 0x0,
BTC_WIFI_BW_HT20 = 0x1,
BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_HT80 = 0x3,
BTC_WIFI_BW_MAX
};
@@ -275,6 +282,8 @@ enum btc_get_type {
BTC_GET_U4_VENDOR,
BTC_GET_U4_SUPPORTED_VERSION,
BTC_GET_U4_SUPPORTED_FEATURE,
+ BTC_GET_U4_BT_DEVICE_INFO,
+ BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL,
BTC_GET_U4_WIFI_IQK_TOTAL,
BTC_GET_U4_WIFI_IQK_OK,
BTC_GET_U4_WIFI_IQK_FAIL,
@@ -355,6 +364,7 @@ enum btc_dbg_disp_type {
BTC_DBG_DISP_BT_LINK_INFO = 0x1,
BTC_DBG_DISP_BT_FW_VER = 0x2,
BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_WIFI_STATUS = 0x04,
BTC_DBG_DISP_MAX
};
@@ -376,6 +386,14 @@ enum btc_notify_type_scan {
BTC_SCAN_MAX
};
+enum btc_notify_type_switchband {
+ BTC_NOT_SWITCH = 0x0,
+ BTC_SWITCH_TO_24G = 0x1,
+ BTC_SWITCH_TO_5G = 0x2,
+ BTC_SWITCH_TO_24G_NOFORSCAN = 0x3,
+ BTC_SWITCH_MAX
+};
+
enum btc_notify_type_associate {
BTC_ASSOCIATE_FINISH = 0x0,
BTC_ASSOCIATE_START = 0x1,
@@ -417,49 +435,6 @@ enum btc_notify_type_stack_operation {
BTC_STACK_OP_MAX
};
-typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
-
-typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
-
-typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
-
-typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u8 data1b);
-
-typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
-
-typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
-typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
- u8 bit_mask, u8 data);
-
-typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask);
-
-typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
- u32 reg_addr, u32 bit_mask);
-
-typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
- u32 cmd_len, u8 *cmd_buffer);
-
-typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
-
-typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
-
-typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
- u32 value);
-
-typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
-
struct btc_bt_info {
bool bt_disabled;
u8 rssi_adjust_for_agc_table_on;
@@ -491,6 +466,17 @@ struct btc_bt_info {
u8 lps_val;
u8 rpwm_val;
u32 ra_mask;
+
+ u32 afh_map_l;
+ u32 afh_map_m;
+ u16 afh_map_h;
+ u32 bt_supported_feature;
+ u32 bt_supported_version;
+ u32 bt_device_info;
+ u32 bt_forb_slot_val;
+ u8 bt_ant_det_val;
+ u8 bt_ble_scan_type;
+ u32 bt_ble_scan_para;
};
struct btc_stack_info {
@@ -546,6 +532,40 @@ enum btc_antenna_pos {
BTC_ANTENNA_AT_AUX_PORT = 0x2,
};
+enum btc_mp_h2c_op_code {
+ BT_OP_GET_BT_VERSION = 0,
+ BT_OP_WRITE_REG_ADDR = 12,
+ BT_OP_WRITE_REG_VALUE = 13,
+ BT_OP_READ_REG = 17,
+ BT_OP_GET_AFH_MAP_L = 30,
+ BT_OP_GET_AFH_MAP_M = 31,
+ BT_OP_GET_AFH_MAP_H = 32,
+ BT_OP_GET_BT_COEX_SUPPORTED_FEATURE = 42,
+ BT_OP_GET_BT_COEX_SUPPORTED_VERSION = 43,
+ BT_OP_GET_BT_ANT_DET_VAL = 44,
+ BT_OP_GET_BT_BLE_SCAN_PARA = 45,
+ BT_OP_GET_BT_BLE_SCAN_TYPE = 46,
+ BT_OP_GET_BT_DEVICE_INFO = 48,
+ BT_OP_GET_BT_FORBIDDEN_SLOT_VAL = 49,
+ BT_OP_MAX
+};
+
+enum btc_mp_h2c_req_num {
+ /* 4 bits only */
+ BT_SEQ_DONT_CARE = 0,
+ BT_SEQ_GET_BT_VERSION = 0xE,
+ BT_SEQ_GET_AFH_MAP_L = 0x5,
+ BT_SEQ_GET_AFH_MAP_M = 0x6,
+ BT_SEQ_GET_AFH_MAP_H = 0x9,
+ BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE = 0x7,
+ BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION = 0x8,
+ BT_SEQ_GET_BT_ANT_DET_VAL = 0x2,
+ BT_SEQ_GET_BT_BLE_SCAN_PARA = 0x3,
+ BT_SEQ_GET_BT_BLE_SCAN_TYPE = 0x4,
+ BT_SEQ_GET_BT_DEVICE_INFO = 0xA,
+ BT_SEQ_GET_BT_FORB_SLOT_VAL = 0xB,
+};
+
struct btc_coexist {
/* make sure only one adapter can bind the data context */
bool binded;
@@ -563,55 +583,86 @@ struct btc_coexist {
*/
bool auto_report_1ant;
bool auto_report_2ant;
+ bool dbg_mode_1ant;
+ bool dbg_mode_2ant;
bool initilized;
bool stop_coex_dm;
bool manual_control;
struct btc_statistics statistics;
u8 pwr_mode_val[10];
- /* function pointers - io related */
- bfp_btc_r1 btc_read_1byte;
- bfp_btc_w1 btc_write_1byte;
- bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
- bfp_btc_r2 btc_read_2byte;
- bfp_btc_w2 btc_write_2byte;
- bfp_btc_r4 btc_read_4byte;
- bfp_btc_w4 btc_write_4byte;
- bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
-
- bfp_btc_set_bb_reg btc_set_bb_reg;
- bfp_btc_get_bb_reg btc_get_bb_reg;
+ struct completion bt_mp_comp;
- bfp_btc_set_rf_reg btc_set_rf_reg;
- bfp_btc_get_rf_reg btc_get_rf_reg;
-
- bfp_btc_fill_h2c btc_fill_h2c;
-
- bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
-
- bfp_btc_get btc_get;
- bfp_btc_set btc_set;
-
- bfp_btc_set_bt_reg btc_set_bt_reg;
+ /* function pointers - io related */
+ u8 (*btc_read_1byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_1byte)(void *btc_context, u32 reg_addr, u32 data);
+ void (*btc_write_1byte_bitmask)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u8 data1b);
+ u16 (*btc_read_2byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_2byte)(void *btc_context, u32 reg_addr, u16 data);
+ u32 (*btc_read_4byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_4byte)(void *btc_context, u32 reg_addr, u32 data);
+
+ void (*btc_write_local_reg_1byte)(void *btc_context, u32 reg_addr,
+ u8 data);
+ void (*btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+ u32 (*btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+ void (*btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+ u32 (*btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+ void (*btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+ void (*btc_disp_dbg_msg)(void *btcoexist, u8 disp_type,
+ struct seq_file *m);
+
+ bool (*btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+ bool (*btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+ void (*btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
+ u32 value);
+ u32 (*btc_get_bt_coex_supported_feature)(void *btcoexist);
+ u32 (*btc_get_bt_coex_supported_version)(void *btcoexist);
+ u8 (*btc_get_ant_det_val_from_bt)(void *btcoexist);
+ u8 (*btc_get_ble_scan_type_from_bt)(void *btcoexist);
+ u32 (*btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type);
+ bool (*btc_get_bt_afh_map_from_bt)(void *btcoexist, u8 map_type,
+ u8 *afh_map);
};
bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
-extern struct btc_coexist gl_bt_coexist;
+#define rtl_btc_coexist(rtlpriv) \
+ ((struct btc_coexist *)((rtlpriv)->btcoexist.btc_context))
+#define rtl_btc_wifi_only(rtlpriv) \
+ ((struct wifi_only_cfg *)((rtlpriv)->btcoexist.wifi_only_context))
-bool exhalbtc_initlize_variables(void);
+struct wifi_only_cfg;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv);
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv);
bool exhalbtc_bind_bt_coex_withadapter(void *adapter);
+void exhalbtc_power_on_setting(struct btc_coexist *btcoexist);
+void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist);
void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only);
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg);
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g);
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
enum rt_media_status media_status);
void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
u8 length);
+void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
@@ -619,18 +670,63 @@ void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist);
void exhalbtc_periodical(struct btc_coexist *btcoexist);
void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
u8 *data);
+void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
+ u32 offset, u32 span, u32 seconds);
void exhalbtc_stack_update_profile_info(void);
-void exhalbtc_set_hci_version(u16 hci_version);
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi);
-void exhalbtc_set_bt_exist(bool bt_exist);
-void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version);
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+ u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi);
+void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist);
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type);
void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g);
void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
u8 *rssi_wifi, u8 *rssi_bt);
void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
-void exhalbtc_set_single_ant_path(u8 single_ant_path);
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+ u8 single_ant_path);
+
+/* The following are used by wifi_only case */
+enum wifionly_chip_interface {
+ WIFIONLY_INTF_UNKNOWN = 0,
+ WIFIONLY_INTF_PCI = 1,
+ WIFIONLY_INTF_USB = 2,
+ WIFIONLY_INTF_SDIO = 3,
+ WIFIONLY_INTF_MAX
+};
+
+enum wifionly_customer_id {
+ CUSTOMER_NORMAL = 0,
+ CUSTOMER_HP_1 = 1,
+};
+
+struct wifi_only_haldata {
+ u16 customer_id;
+ u8 efuse_pg_antnum;
+ u8 efuse_pg_antpath;
+ u8 rfe_type;
+ u8 ant_div_cfg;
+};
+
+struct wifi_only_cfg {
+ void *adapter;
+ struct wifi_only_haldata haldata_info;
+ enum wifionly_chip_interface chip_interface;
+};
+
+static inline
+void halwifionly_phy_set_bb_reg(struct wifi_only_cfg *wifi_conly_cfg,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = (struct rtl_priv *)wifi_conly_cfg->adapter;
+
+ rtl_set_bbreg(rtlpriv->hw, regaddr, bitmask, data);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 7d296a401b6f..cce4a37ea408 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -31,11 +31,16 @@
static struct rtl_btc_ops rtl_btc_operation = {
.btc_init_variables = rtl_btc_init_variables,
+ .btc_init_variables_wifi_only = rtl_btc_init_variables_wifi_only,
+ .btc_deinit_variables = rtl_btc_deinit_variables,
.btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_power_on_setting = rtl_btc_power_on_setting,
.btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_init_hw_config_wifi_only = rtl_btc_init_hw_config_wifi_only,
.btc_ips_notify = rtl_btc_ips_notify,
.btc_lps_notify = rtl_btc_lps_notify,
.btc_scan_notify = rtl_btc_scan_notify,
+ .btc_scan_notify_wifi_only = rtl_btc_scan_notify_wifi_only,
.btc_connect_notify = rtl_btc_connect_notify,
.btc_mediastatus_notify = rtl_btc_mediastatus_notify,
.btc_periodical = rtl_btc_periodical,
@@ -46,63 +51,143 @@ static struct rtl_btc_ops rtl_btc_operation = {
.btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
.btc_is_bt_disabled = rtl_btc_is_bt_disabled,
.btc_special_packet_notify = rtl_btc_special_packet_notify,
+ .btc_switch_band_notify = rtl_btc_switch_band_notify,
+ .btc_switch_band_notify_wifi_only = rtl_btc_switch_band_notify_wifionly,
.btc_record_pwr_mode = rtl_btc_record_pwr_mode,
.btc_get_lps_val = rtl_btc_get_lps_val,
.btc_get_rpwm_val = rtl_btc_get_rpwm_val,
.btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
.btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
.btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
+ .btc_display_bt_coex_info = rtl_btc_display_bt_coex_info,
};
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist) {
+ seq_puts(m, "btc_coexist context is NULL!\n");
+ return;
+ }
+
+ exhalbtc_display_bt_coex_info(btcoexist, m);
+}
+
void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
u8 safe_len;
- safe_len = sizeof(gl_bt_coexist.pwr_mode_val);
+ if (!btcoexist)
+ return;
+
+ safe_len = sizeof(btcoexist->pwr_mode_val);
if (safe_len > len)
safe_len = len;
- memcpy(gl_bt_coexist.pwr_mode_val, buf, safe_len);
+ memcpy(btcoexist->pwr_mode_val, buf, safe_len);
}
u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.lps_val;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return 0;
+
+ return btcoexist->bt_info.lps_val;
}
u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.rpwm_val;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return 0;
+
+ return btcoexist->bt_info.rpwm_val;
}
bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.bt_ctrl_lps;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.bt_ctrl_lps;
}
bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.bt_lps_on;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.bt_lps_on;
}
void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
u8 *ctrl_agg_size, u8 *agg_size)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist) {
+ *reject_agg = false;
+ *ctrl_agg_size = false;
+ return;
+ }
+
if (reject_agg)
- *reject_agg = gl_bt_coexist.bt_info.reject_agg_pkt;
+ *reject_agg = btcoexist->bt_info.reject_agg_pkt;
if (ctrl_agg_size)
- *ctrl_agg_size = gl_bt_coexist.bt_info.bt_ctrl_agg_buf_size;
+ *ctrl_agg_size = btcoexist->bt_info.bt_ctrl_agg_buf_size;
if (agg_size)
- *agg_size = gl_bt_coexist.bt_info.agg_buf_size;
+ *agg_size = btcoexist->bt_info.agg_buf_size;
+}
+
+static void rtl_btc_alloc_variable(struct rtl_priv *rtlpriv, bool wifi_only)
+{
+ if (wifi_only)
+ rtlpriv->btcoexist.wifi_only_context =
+ kzalloc(sizeof(struct wifi_only_cfg), GFP_KERNEL);
+ else
+ rtlpriv->btcoexist.btc_context =
+ kzalloc(sizeof(struct btc_coexist), GFP_KERNEL);
+}
+
+static void rtl_btc_free_variable(struct rtl_priv *rtlpriv)
+{
+ kfree(rtlpriv->btcoexist.btc_context);
+ rtlpriv->btcoexist.btc_context = NULL;
+
+ kfree(rtlpriv->btcoexist.wifi_only_context);
+ rtlpriv->btcoexist.wifi_only_context = NULL;
}
void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
{
- exhalbtc_initlize_variables();
+ rtl_btc_alloc_variable(rtlpriv, false);
+
+ exhalbtc_initlize_variables(rtlpriv);
exhalbtc_bind_bt_coex_withadapter(rtlpriv);
}
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+ rtl_btc_alloc_variable(rtlpriv, true);
+
+ exhalbtc_initlize_variables_wifi_only(rtlpriv);
+}
+
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv)
+{
+ rtl_btc_free_variable(rtlpriv);
+}
+
void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
{
/* move ant_num, bt_type and single_ant_path to
@@ -110,67 +195,155 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
*/
}
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_power_on_setting(btcoexist);
+}
+
void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
u8 bt_exist;
bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"%s, bt_exist is %d\n", __func__, bt_exist);
- exhalbtc_init_hw_config(&gl_bt_coexist, !bt_exist);
- exhalbtc_init_coex_dm(&gl_bt_coexist);
+ if (!btcoexist)
+ return;
+
+ exhalbtc_init_hw_config(btcoexist, !bt_exist);
+ exhalbtc_init_coex_dm(btcoexist);
+}
+
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_init_hw_config_wifi_only(wifionly_cfg);
}
void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
{
- exhalbtc_ips_notify(&gl_bt_coexist, type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_ips_notify(btcoexist, type);
+
+ if (type == ERFON) {
+ /* In some situation, it doesn't scan after leaving IPS, and
+ * this will cause btcoex in wrong state.
+ */
+ exhalbtc_scan_notify(btcoexist, 1);
+ exhalbtc_scan_notify(btcoexist, 0);
+ }
}
void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type)
{
- exhalbtc_lps_notify(&gl_bt_coexist, type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_lps_notify(btcoexist, type);
}
void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
{
- exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_scan_notify(btcoexist, scantype);
+}
+
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ u8 is_5g = (rtlhal->current_bandtype == BAND_ON_5G);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_scan_notify_wifi_only(wifionly_cfg, is_5g);
}
void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
{
- exhalbtc_connect_notify(&gl_bt_coexist, action);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_connect_notify(btcoexist, action);
}
void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
enum rt_media_status mstatus)
{
- exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_mediastatus_notify(btcoexist, mstatus);
}
void rtl_btc_periodical(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
/*rtl_bt_dm_monitor();*/
- exhalbtc_periodical(&gl_bt_coexist);
+ exhalbtc_periodical(btcoexist);
}
-void rtl_btc_halt_notify(void)
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
exhalbtc_halt_notify(btcoexist);
}
void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
{
- exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_bt_info_notify(btcoexist, tmp_buf, length);
}
void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
u8 extid, seq, len;
u16 bt_real_fw_ver;
u8 bt_fw_ver;
+ u8 *data;
+
+ if (!btcoexist)
+ return;
if ((length < 4) || (!tmp_buf))
return;
@@ -182,20 +355,70 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
len = tmp_buf[1] >> 4;
seq = tmp_buf[2] >> 4;
+ data = &tmp_buf[3];
/* BT Firmware version response */
- if (seq == 0x0E) {
+ switch (seq) {
+ case BT_SEQ_GET_BT_VERSION:
bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8);
bt_fw_ver = tmp_buf[5];
- gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver;
- gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver;
+ btcoexist->bt_info.bt_real_fw_ver = bt_real_fw_ver;
+ btcoexist->bt_info.bt_fw_ver = bt_fw_ver;
+ break;
+ case BT_SEQ_GET_AFH_MAP_L:
+ btcoexist->bt_info.afh_map_l = le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_SEQ_GET_AFH_MAP_M:
+ btcoexist->bt_info.afh_map_m = le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_SEQ_GET_AFH_MAP_H:
+ btcoexist->bt_info.afh_map_h = le16_to_cpu(*(__le16 *)data);
+ break;
+ case BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE:
+ btcoexist->bt_info.bt_supported_feature = tmp_buf[3] |
+ (tmp_buf[4] << 8);
+ break;
+ case BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION:
+ btcoexist->bt_info.bt_supported_version = tmp_buf[3] |
+ (tmp_buf[4] << 8);
+ break;
+ case BT_SEQ_GET_BT_ANT_DET_VAL:
+ btcoexist->bt_info.bt_ant_det_val = tmp_buf[3];
+ break;
+ case BT_SEQ_GET_BT_BLE_SCAN_PARA:
+ btcoexist->bt_info.bt_ble_scan_para = tmp_buf[3] |
+ (tmp_buf[4] << 8) |
+ (tmp_buf[5] << 16) |
+ (tmp_buf[6] << 24);
+ break;
+ case BT_SEQ_GET_BT_BLE_SCAN_TYPE:
+ btcoexist->bt_info.bt_ble_scan_type = tmp_buf[3];
+ break;
+ case BT_SEQ_GET_BT_DEVICE_INFO:
+ btcoexist->bt_info.bt_device_info =
+ le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+ btcoexist->bt_info.bt_forb_slot_val =
+ le32_to_cpu(*(__le32 *)data);
+ break;
}
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo complete req_num=%d\n", seq);
+
+ complete(&btcoexist->bt_mp_comp);
}
bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.limited_dig;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.limited_dig;
}
bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
@@ -227,8 +450,13 @@ bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return true;
+
/* It seems 'bt_disabled' is never be initialized or set. */
- if (gl_bt_coexist.bt_info.bt_disabled)
+ if (btcoexist->bt_info.bt_disabled)
return true;
else
return false;
@@ -236,7 +464,50 @@ bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type)
{
- return exhalbtc_special_packet_notify(&gl_bt_coexist, pkt_type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ return exhalbtc_special_packet_notify(btcoexist, pkt_type);
+}
+
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+ u8 type = BTC_NOT_SWITCH;
+
+ if (!btcoexist)
+ return;
+
+ switch (band_type) {
+ case BAND_ON_2_4G:
+ if (scanning)
+ type = BTC_SWITCH_TO_24G;
+ else
+ type = BTC_SWITCH_TO_24G_NOFORSCAN;
+ break;
+
+ case BAND_ON_5G:
+ type = BTC_SWITCH_TO_5G;
+ break;
+ }
+
+ if (type != BTC_NOT_SWITCH)
+ exhalbtc_switch_band_notify(btcoexist, type);
+}
+
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ u8 is_5g = (band_type == BAND_ON_5G);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_switch_band_notify_wifi_only(wifionly_cfg, is_5g);
}
struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index ac1253c46f44..8c996055de71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -28,22 +28,32 @@
#include "halbt_precomp.h"
void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv);
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv);
void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv);
void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv);
void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type);
void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype);
void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
enum rt_media_status mstatus);
void rtl_btc_periodical(struct rtl_priv *rtlpriv);
-void rtl_btc_halt_notify(void);
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv);
void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length);
bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning);
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning);
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m);
void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 3cb88825473e..cfea57efa7f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -345,9 +345,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtlpriv->locks.conf_mutex);
/* Free beacon resources */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
if (mac->beacon_enabled == 1) {
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -858,8 +858,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
* here just used for linked scanning, & linked
* and nolink check bssid is set in set network_type
*/
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+ mac->link_state >= MAC80211_LINKED) {
if (mac->opmode != NL80211_IFTYPE_AP &&
mac->opmode != NL80211_IFTYPE_MESH_POINT) {
if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1044,10 +1044,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
mutex_lock(&rtlpriv->locks.conf_mutex);
- if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if ((changed & BSS_CHANGED_BEACON) ||
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (changed & BSS_CHANGED_BEACON ||
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
@@ -1162,6 +1162,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
"BSS_CHANGED_ASSOC\n");
} else {
+ struct cfg80211_bss *bss = NULL;
+
mstatus = RT_MEDIA_DISCONNECT;
if (mac->link_state == MAC80211_LINKED)
@@ -1169,6 +1171,22 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
+
+ bss = cfg80211_get_bss(hw->wiphy, NULL,
+ (u8 *)mac->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_OFF);
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
+
+ if (bss) {
+ cfg80211_unlink_bss(hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
+ }
+
eth_zero_addr(mac->bssid);
mac->vendor = PEER_UNKNOWN;
mac->mode = 0;
@@ -1435,6 +1453,9 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1);
+ else if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+ 1);
if (rtlpriv->dm.supp_phymode_switch) {
if (rtlpriv->cfg->ops->chk_switch_dmdp)
@@ -1490,6 +1511,9 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0);
+ else if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+ 0);
}
static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1513,9 +1537,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
- if (((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"%s hardware based encryption for keyidx: %d, mac: %pM\n",
@@ -1588,7 +1612,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
} else {
- if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION &&
@@ -1775,7 +1799,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
break;
case PWR_CMD_WRITE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 38fef6dbb44b..d70385be9976 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -23,15 +23,17 @@
*****************************************************************************/
#include "wifi.h"
+#include "cam.h"
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_RTLWIFI_DEBUG
void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -51,7 +53,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -81,4 +83,481 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
}
EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
+struct rtl_debugfs_priv {
+ struct rtl_priv *rtlpriv;
+ int (*cb_read)(struct seq_file *m, void *v);
+ ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *loff);
+ u32 cb_data;
+};
+
+static struct dentry *debugfs_topdir;
+
+static int rtl_debug_get_common(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+
+ return debugfs_priv->cb_read(m, v);
+}
+
+static int dl_debug_open_common(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_debug_get_common, inode->i_private);
+}
+
+static const struct file_operations file_ops_common = {
+ .open = dl_debug_open_common,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
+ .cb_read = rtl_debug_get_mac_page, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
+RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100);
+RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200);
+RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300);
+RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400);
+RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500);
+RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600);
+RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700);
+RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000);
+RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100);
+RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200);
+RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300);
+RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400);
+RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500);
+RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600);
+RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700);
+
+static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
+ .cb_read = rtl_debug_get_bb_page, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
+RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900);
+RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00);
+RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00);
+RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00);
+RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00);
+RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
+RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00);
+RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800);
+RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900);
+RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00);
+RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00);
+RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00);
+RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00);
+RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00);
+RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00);
+
+static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ enum radio_path rfpath = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0x40;
+
+ if (IS_HARDWARE_TYPE_8822B(rtlpriv))
+ max = 0xff;
+
+ seq_printf(m, "\nPATH(%d)", rfpath);
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, rfpath, n, 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
+ .cb_read = rtl_debug_get_reg_rf, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
+RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B);
+
+static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ int start = debugfs_priv->cb_data;
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+ int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11);
+
+ /* This dump the current register page */
+ seq_printf(m,
+ "\n#################### SECURITY CAM (%d-%d) ##################\n",
+ start, end - 1);
+
+ for (j = start; j < end; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ /* polling bit, and No Write enable, and address */
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ /* Check polling bit is clear */
+ while ((i--) >= 0) {
+ ulstatus =
+ rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31))
+ continue;
+ else
+ break;
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
+ .cb_read = rtl_debug_get_cam_register, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
+RTL_DEBUG_IMPL_CAM_SERIES(2, 11);
+RTL_DEBUG_IMPL_CAM_SERIES(3, 22);
+
+static int rtl_debug_get_btcoex(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv,
+ m);
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_btcoex = {
+ .cb_read = rtl_debug_get_btcoex,
+ .cb_data = 0,
+};
+
+static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ char tmp[32 + 1];
+ int tmp_len;
+ u32 addr, val, len;
+ int num;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ /* write BB/MAC register */
+ num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3)
+ return count;
+
+ switch (len) {
+ case 1:
+ rtl_write_byte(rtlpriv, addr, (u8)val);
+ break;
+ case 2:
+ rtl_write_word(rtlpriv, addr, (u16)val);
+ break;
+ case 4:
+ rtl_write_dword(rtlpriv, addr, val);
+ break;
+ default:
+ /*printk("error write length=%d", len);*/
+ break;
+ }
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_reg = {
+ .cb_write = rtl_debugfs_set_write_reg,
+};
+
+static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ char tmp[32 + 1];
+ int tmp_len;
+ u8 h2c_len, h2c_data_packed[8];
+ int h2c_data[8]; /* idx 0: cmd */
+ int i;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X",
+ &h2c_data[0], &h2c_data[1],
+ &h2c_data[2], &h2c_data[3],
+ &h2c_data[4], &h2c_data[5],
+ &h2c_data[6], &h2c_data[7]);
+
+ if (h2c_len <= 0)
+ return count;
+
+ for (i = 0; i < h2c_len; i++)
+ h2c_data_packed[i] = (u8)h2c_data[i];
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0],
+ h2c_len - 1,
+ &h2c_data_packed[1]);
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = {
+ .cb_write = rtl_debugfs_set_write_h2c,
+};
+
+static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ char tmp[32 + 1];
+ int tmp_len;
+ int num;
+ int path;
+ u32 addr, bitmask, data;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ num = sscanf(tmp, "%X %X %X %X",
+ &path, &addr, &bitmask, &data);
+
+ if (num != 4) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
+ return count;
+ }
+
+ rtl_set_rfreg(hw, path, addr, bitmask, data);
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = {
+ .cb_write = rtl_debugfs_set_write_rfreg,
+};
+
+static int rtl_debugfs_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static ssize_t rtl_debugfs_common_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static const struct file_operations file_ops_common_write = {
+ .owner = THIS_MODULE,
+ .write = rtl_debugfs_common_write,
+ .open = simple_open,
+ .release = rtl_debugfs_close,
+};
+
+#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
+ do { \
+ rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
+ if (!debugfs_create_file(#name, mode, \
+ parent, &rtl_debug_priv_ ##name, \
+ &file_ops_ ##fopname)) \
+ pr_err("Unable to initialize debugfs:%s/%s\n", \
+ rtlpriv->dbg.debugfs_name, \
+ #name); \
+ } while (0)
+
+#define RTL_DEBUGFS_ADD(name) \
+ RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common)
+#define RTL_DEBUGFS_ADD_W(name) \
+ RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write)
+
+void rtl_debug_add_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct dentry *parent;
+
+ snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr);
+
+ rtlpriv->dbg.debugfs_dir =
+ debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
+ if (!rtlpriv->dbg.debugfs_dir) {
+ pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
+ rtlpriv->dbg.debugfs_name);
+ return;
+ }
+
+ parent = rtlpriv->dbg.debugfs_dir;
+
+ RTL_DEBUGFS_ADD(mac_0);
+ RTL_DEBUGFS_ADD(mac_1);
+ RTL_DEBUGFS_ADD(mac_2);
+ RTL_DEBUGFS_ADD(mac_3);
+ RTL_DEBUGFS_ADD(mac_4);
+ RTL_DEBUGFS_ADD(mac_5);
+ RTL_DEBUGFS_ADD(mac_6);
+ RTL_DEBUGFS_ADD(mac_7);
+ RTL_DEBUGFS_ADD(bb_8);
+ RTL_DEBUGFS_ADD(bb_9);
+ RTL_DEBUGFS_ADD(bb_a);
+ RTL_DEBUGFS_ADD(bb_b);
+ RTL_DEBUGFS_ADD(bb_c);
+ RTL_DEBUGFS_ADD(bb_d);
+ RTL_DEBUGFS_ADD(bb_e);
+ RTL_DEBUGFS_ADD(bb_f);
+ RTL_DEBUGFS_ADD(mac_10);
+ RTL_DEBUGFS_ADD(mac_11);
+ RTL_DEBUGFS_ADD(mac_12);
+ RTL_DEBUGFS_ADD(mac_13);
+ RTL_DEBUGFS_ADD(mac_14);
+ RTL_DEBUGFS_ADD(mac_15);
+ RTL_DEBUGFS_ADD(mac_16);
+ RTL_DEBUGFS_ADD(mac_17);
+ RTL_DEBUGFS_ADD(bb_18);
+ RTL_DEBUGFS_ADD(bb_19);
+ RTL_DEBUGFS_ADD(bb_1a);
+ RTL_DEBUGFS_ADD(bb_1b);
+ RTL_DEBUGFS_ADD(bb_1c);
+ RTL_DEBUGFS_ADD(bb_1d);
+ RTL_DEBUGFS_ADD(bb_1e);
+ RTL_DEBUGFS_ADD(bb_1f);
+ RTL_DEBUGFS_ADD(rf_a);
+ RTL_DEBUGFS_ADD(rf_b);
+
+ RTL_DEBUGFS_ADD(cam_1);
+ RTL_DEBUGFS_ADD(cam_2);
+ RTL_DEBUGFS_ADD(cam_3);
+
+ RTL_DEBUGFS_ADD(btcoex);
+
+ RTL_DEBUGFS_ADD_W(write_reg);
+ RTL_DEBUGFS_ADD_W(write_h2c);
+ RTL_DEBUGFS_ADD_W(write_rfreg);
+}
+EXPORT_SYMBOL_GPL(rtl_debug_add_one);
+
+void rtl_debug_remove_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir);
+ rtlpriv->dbg.debugfs_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(rtl_debug_remove_one);
+
+void rtl_debugfs_add_topdir(void)
+{
+ debugfs_topdir = debugfs_create_dir("rtlwifi", NULL);
+}
+
+void rtl_debugfs_remove_topdir(void)
+{
+ debugfs_remove_recursive(debugfs_topdir);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 947718001457..ad6834af618b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -219,4 +219,16 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
}
#endif
+
+#ifdef CONFIG_RTLWIFI_DEBUG
+void rtl_debug_add_one(struct ieee80211_hw *hw);
+void rtl_debug_remove_one(struct ieee80211_hw *hw);
+void rtl_debugfs_add_topdir(void);
+void rtl_debugfs_remove_topdir(void);
+#else
+#define rtl_debug_add_one(hw)
+#define rtl_debug_remove_one(hw)
+#define rtl_debugfs_add_topdir()
+#define rtl_debugfs_remove_topdir()
+#endif
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index ef9acd466cca..35b50be633f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -257,11 +257,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
- efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+ efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
goto out;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
+ efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16),
GFP_ATOMIC);
if (!efuse_word[i])
goto done;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c2575b0b9440..01ccf8884831 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
__le16 fc = rtl_get_fc(skb);
u8 queue_index = skb_get_queue_mapping(skb);
+ struct ieee80211_hdr *hdr;
if (unlikely(ieee80211_is_beacon(fc)))
return BEACON_QUEUE;
@@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
if (ieee80211_is_nullfunc(fc))
return HIGH_QUEUE;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+ hdr = rtl_get_hdr(skb);
+
+ if (is_multicast_ether_addr(hdr->addr1) ||
+ is_broadcast_ether_addr(hdr->addr1))
+ return HIGH_QUEUE;
+ }
return ac_to_hwq[queue_index];
}
@@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
else
entry = (u8 *)(&ring->desc[ring->idx]);
- if (rtlpriv->cfg->ops->get_available_desc &&
- rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
- "no available desc!\n");
- return;
- }
-
if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
return;
ring->idx = (ring->idx + 1) % ring->entries;
@@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
u8 tmp_one;
bool unicast = false;
u8 hw_queue = 0;
- unsigned int rx_remained_cnt;
+ unsigned int rx_remained_cnt = 0;
struct rtl_stats stats = {
.signal = 0,
.rate = 0,
@@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
- rx_remained_cnt =
+ if (rx_remained_cnt == 0)
+ rx_remained_cnt =
rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
hw_queue);
if (rx_remained_cnt == 0)
@@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
- u32 inta = 0;
- u32 intb = 0;
- u32 intc = 0;
- u32 intd = 0;
+ struct rtl_int intvec = {0};
+
irqreturn_t ret = IRQ_HANDLED;
if (rtlpci->irq_enabled == 0)
@@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->disable_interrupt(hw);
/*read ISR: 4/8bytes */
- rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
/*Shared IRQ or HW disappeared */
- if (!inta || inta == 0xffff)
+ if (!intvec.inta || intvec.inta == 0xffff)
goto done;
/*<1> beacon related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon ok interrupt!\n");
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
+ if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon err interrupt!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+ if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
- if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+ if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
/*<3> Rx related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+ if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+ if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
@@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+ if (unlikely(intvec.inta &
+ rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
@@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].cur_tx_rp = 0;
rtlpci->tx_ring[prio].cur_tx_wp = 0;
- rtlpci->tx_ring[prio].avl_desc = entries;
}
/* alloc dma for this ring */
@@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
+
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->tx_ring[i].cur_tx_rp = 0;
+ rtlpci->tx_ring[i].cur_tx_wp = 0;
+ }
+
ring->idx = 0;
+ ring->entries = rtlpci->txringcount[i];
}
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
@@ -1787,6 +1794,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
int err;
@@ -1796,9 +1804,12 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
if (rtlpriv->cfg->ops->get_btc_status &&
rtlpriv->cfg->ops->get_btc_status()) {
rtlpriv->btcoexist.btc_info.ap_num = 36;
- rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
- rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+ btc_ops->btc_init_variables(rtlpriv);
+ btc_ops->btc_init_hal_vars(rtlpriv);
+ } else if (btc_ops) {
+ btc_ops->btc_init_variables_wifi_only(rtlpriv);
}
+
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
@@ -1834,7 +1845,10 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
u8 rf_timeout = 0;
if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv);
+
+ if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv);
/*should be before disable interrupt&adapter
*and will do it immediately.
@@ -2302,6 +2316,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
rtlpriv->mac80211.mac80211_registered = 1;
+ /* add for debug */
+ rtl_debug_add_one(hw);
+
/*init rfkill */
rtl_init_rfkill(hw); /* Init PCI sw */
@@ -2350,6 +2367,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
wait_for_completion(&rtlpriv->firmware_loading_complete);
clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ /* remove form debug */
+ rtl_debug_remove_one(hw);
+
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index e7d070e8da2d..3fb56c845a61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -173,7 +173,6 @@ struct rtl8192_tx_ring {
/*add for new trx flow*/
struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
- u16 avl_desc; /* available_desc_to_write */
u16 cur_tx_wp; /* current_tx_write_point */
u16 cur_tx_rp; /* current_tx_read_point */
};
@@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
-static inline u16 calc_fifo_space(u16 rp, u16 wp)
+static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
{
if (rp <= wp)
- return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+ return size - 1 + rp - wp;
return rp - wp - 1;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 24c87fae5382..71af24e2e051 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -51,6 +51,11 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
&rtlmac->retry_long);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ rtlpriv->cfg->ops->switch_channel(hw);
+ rtlpriv->cfg->ops->set_channel_access(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw,
+ cfg80211_get_chandef_type(&hw->conf.chandef));
+
/*<3> Enable Interrupt */
rtlpriv->cfg->ops->enable_interrupt(hw);
@@ -289,7 +294,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
- spin_lock(&rtlpriv->locks.ips_lock);
+ mutex_lock(&rtlpriv->locks.ips_mutex);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -306,7 +311,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
ppsc->inactive_pwrstate);
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ mutex_unlock(&rtlpriv->locks.ips_mutex);
}
EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
@@ -415,7 +420,6 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flag;
if (!ppsc->fwctrl_lps)
return;
@@ -436,7 +440,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
/* Don't need to check (ppsc->dot11_psmode == EACTIVE), because
* bt_ccoexist may ask to enter lps.
@@ -446,7 +450,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
"Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
/* Interrupt safe routine to leave the leisure power save mode.*/
@@ -455,9 +459,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flag;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -478,7 +481,7 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
/* For sw LPS*/
@@ -568,7 +571,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- unsigned long flag;
if (!rtlpriv->psc.swctrl_lps)
return;
@@ -581,9 +583,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -600,7 +602,6 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- unsigned long flag;
u8 sleep_intv;
if (!rtlpriv->psc.sw_ps_enabled)
@@ -624,9 +625,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
}
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 02811eda57cd..d1cb7d405618 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -123,7 +123,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sta && (sta->ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && (sta->vht_cap.vht_supported))
+ if (sta && sta->vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_40)
@@ -135,8 +135,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (sta && sta->ht_cap.ht_supported &&
- ((wireless_mode == WIRELESS_MODE_N_5G) ||
- (wireless_mode == WIRELESS_MODE_N_24G)))
+ (wireless_mode == WIRELESS_MODE_N_5G ||
+ wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
if (sta && sta->vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -216,8 +216,8 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
- sta_entry = (struct rtl_sta_info *) sta->drv_priv;
- if ((sta->ht_cap.ht_supported) &&
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (sta->ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
@@ -265,11 +265,9 @@ static void *rtl_rate_alloc_sta(void *ppriv,
struct rtl_priv *rtlpriv = ppriv;
struct rtl_rate_priv *rate_priv;
- rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
- if (!rate_priv) {
- pr_err("Unable to allocate private rc structure\n");
+ rate_priv = kzalloc(sizeof(*rate_priv), gfp);
+ if (!rate_priv)
return NULL;
- }
rtlpriv->rate_priv = rate_priv;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index a2eca669873b..63874512598b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -141,6 +141,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
return 1;
pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+ rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index e30a18e64ff5..988d5ac57d02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
}
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index cdf49de1e6ed..214cd2a32018 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -29,8 +29,7 @@
void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl88ee_hw_init(struct ieee80211_hw *hw);
void rtl88ee_card_disable(struct ieee80211_hw *hw);
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 0f4c86a28716..4a81e0ef4b8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
}
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
-
- /*
- * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
- */
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
}
void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index b5c8e2fc1ba2..6711ea1a75d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92ce_hw_init(struct ieee80211_hw *hw);
void rtl92ce_card_disable(struct ieee80211_hw *hw);
void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 0da6c0136857..80123fd97221 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
}
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
-
- /*
- * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
- */
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
}
void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 9236aa91273d..e6c702e69ecf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -29,8 +29,7 @@
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92de_hw_init(struct ieee80211_hw *hw);
void rtl92de_card_disable(struct ieee80211_hw *hw);
void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index fe5da637e77a..fd7928fdbd1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
}
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index cd6d3322f033..3a63bec9b0cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -29,8 +29,7 @@
void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92ee_hw_init(struct ieee80211_hw *hw);
void rtl92ee_card_disable(struct ieee80211_hw *hw);
void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 12255682e890..4f7444331b07 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
if (!start_rx)
return 0;
- remind_cnt = calc_fifo_space(read_point, write_point);
+ remind_cnt = calc_fifo_space(read_point, write_point,
+ RTL_PCI_MAX_RX_COUNT);
if (remind_cnt == 0)
return 0;
@@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 point_diff = 0;
u16 current_tx_read_point = 0, current_tx_write_point = 0;
@@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
point_diff = calc_fifo_space(current_tx_read_point,
- current_tx_write_point);
+ current_tx_write_point,
+ TX_DESC_NUM_92E);
- rtlpci->tx_ring[q_idx].avl_desc = point_diff;
return point_diff;
}
@@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 cur_tx_rp = 0;
- u16 cur_tx_wp = 0;
- static bool over_run;
- u32 tmp = 0;
u8 q_idx = *val;
bool dma64 = rtlpriv->cfg->mod_params->dma64;
@@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
return;
}
+ /* make sure tx desc is available by caller */
ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
- if (over_run) {
- ring->cur_tx_wp = 0;
- over_run = false;
- }
- if (ring->avl_desc > 1) {
- ring->avl_desc--;
-
- rtl_write_word(rtlpriv,
- get_desc_addr_fr_q_idx(q_idx),
- ring->cur_tx_wp);
- }
-
- if (ring->avl_desc < (max_tx_desc - 15)) {
- u16 point_diff = 0;
-
- tmp =
- rtl_read_dword(rtlpriv,
- get_desc_addr_fr_q_idx(q_idx));
- cur_tx_rp = (u16)((tmp >> 16) & 0x0fff);
- cur_tx_wp = (u16)(tmp & 0x0fff);
-
- ring->cur_tx_wp = cur_tx_wp;
- ring->cur_tx_rp = cur_tx_rp;
- point_diff = ((cur_tx_rp > cur_tx_wp) ?
- (cur_tx_rp - cur_tx_wp) :
- (TX_DESC_NUM_92E - 1 -
- cur_tx_wp + cur_tx_rp));
-
- ring->avl_desc = point_diff;
- }
+ rtl_write_word(rtlpriv,
+ get_desc_addr_fr_q_idx(q_idx),
+ ring->cur_tx_wp);
}
break;
}
@@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 read_point, write_point, available_desc_num;
+ u16 read_point, write_point;
bool ret = false;
static u8 stop_report_cnt;
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
- u16 point_diff = 0;
u16 cur_tx_rp, cur_tx_wp;
u32 tmpu32 = 0;
@@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
cur_tx_wp = (u16)(tmpu32 & 0x0fff);
- ring->cur_tx_wp = cur_tx_wp;
+ /* don't need to update ring->cur_tx_wp */
ring->cur_tx_rp = cur_tx_rp;
- point_diff = ((cur_tx_rp > cur_tx_wp) ?
- (cur_tx_rp - cur_tx_wp) :
- (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp));
-
- ring->avl_desc = point_diff;
}
read_point = ring->cur_tx_rp;
write_point = ring->cur_tx_wp;
- available_desc_num = ring->avl_desc;
if (write_point > read_point) {
if (index < write_point && index >= read_point)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 76bf089cced4..30dea7b9bc17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
udelay(100);
}
-void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
- u32 *p_intb, u32 *p_intc, u32 *p_intd)
+void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, ISR + 4, intvec->intb);
}
void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 607056010974..fa836ceefc8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
u8 variable, u8 *val);
void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92se_hw_init(struct ieee80211_hw *hw);
void rtl92se_card_disable(struct ieee80211_hw *hw);
void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c3f98d58124c..545115db507e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
}
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, 0x3a0, intvec->inta);
}
void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index 19e467a37c72..c76e453f4f43 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8723e_hw_init(struct ieee80211_hw *hw);
void rtl8723e_card_disable(struct ieee80211_hw *hw);
void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 7cd1ffa7d4a7..f9ccd13c79f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -43,6 +43,7 @@
#include "../pwrseqcmd.h"
#include "pwrseq.h"
#include "../btcoexist/rtl_btc.h"
+#include <linux/kernel.h>
#define LLT_CONFIG 5
@@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
}
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
- rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+ rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
@@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
if (rtlhal->oem_id == RT_CID_DEFAULT) {
/* Does this one have a Toshiba SMID from group 1? */
- for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) {
if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
is_toshiba_smid1 = true;
break;
}
}
/* Does this one have a Toshiba SMID from group 2? */
- for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) {
if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
is_toshiba_smid2 = true;
break;
}
}
/* Does this one have a Samsung SMID? */
- for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) {
if (rtlefuse->eeprom_smid == samsung_smid[i]) {
is_samsung_smid = true;
break;
}
}
/* Does this one have a Lenovo SMID? */
- for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) {
if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
is_lenovo_smid = true;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index 2215a792f6bf..ae856a19e81a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8723be_hw_init(struct ieee80211_hw *hw);
void rtl8723be_card_disable(struct ieee80211_hw *hw);
void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9606641519e7..1263b12db5dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -35,6 +35,7 @@
#include "../rtl8723com/dm_common.h"
#include "table.h"
#include "trx.h"
+#include <linux/kernel.h>
static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
DESC92C_RATEMCS2, DESC92C_RATEMCS3,
DESC92C_RATEMCS4, DESC92C_RATEMCS5,
DESC92C_RATEMCS6, DESC92C_RATEMCS7};
- u8 i, size;
+ u8 i;
u8 power_index;
if (!rtlefuse->txpwr_fromeprom)
return;
- size = sizeof(cck_rates) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(cck_rates); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
cck_rates[i],
rtl_priv(hw)->phy.current_chan_bw,
@@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
cck_rates[i]);
}
- size = sizeof(ofdm_rates) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
ofdm_rates[i],
rtl_priv(hw)->phy.current_chan_bw,
@@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
ofdm_rates[i]);
}
- size = sizeof(ht_rates_1t) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
ht_rates_1t[i],
rtl_priv(hw)->phy.current_chan_bw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
index 381c16b9b3a9..160fee8333ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
@@ -25,6 +25,7 @@
*
*****************************************************************************/
+#include <linux/kernel.h>
#include "table.h"
u32 RTL8723BEPHY_REG_1TARRAY[] = {
@@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = {
};
-u32 RTL8723BEPHY_REG_1TARRAYLEN =
- sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32);
+u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY);
u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
@@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
};
-u32 RTL8723BEPHY_REG_ARRAY_PGLEN =
- sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG);
u32 RTL8723BE_RADIOA_1TARRAY[] = {
0x000, 0x00010000,
@@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = {
};
-u32 RTL8723BE_RADIOA_1TARRAYLEN =
- sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32);
+u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY);
u32 RTL8723BEMAC_1T_ARRAY[] = {
0x02F, 0x00000030,
@@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = {
};
-u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32);
+u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY);
u32 RTL8723BEAGCTAB_1TARRAY[] = {
0xC78, 0xFD000001,
@@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = {
};
-u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32);
+u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index efa7e1262461..521039c5d4ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -196,10 +196,12 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
enum version_8723e version = rtlhal->version;
int max_page;
- if (!rtlhal->pfirmware)
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+ rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 43e18c4c1e68..f20e77b4bb65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
}
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index 284d259fe557..e2ab783a2ad9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8821ae_hw_init(struct ieee80211_hw *hw);
void rtl8821ae_card_disable(struct ieee80211_hw *hw);
void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 408c4611e5de..f87f9d03b9fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -24,7 +24,7 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
-
+#include <linux/kernel.h>
#include "table.h"
u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x800, 0x8020D010,
@@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xEB8, 0x00508242,
};
-u32 RTL8812AE_PHY_REG_1TARRAYLEN =
- sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY);
u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x800, 0x0020D090,
@@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0xCB8, 0x00508240,
};
-u32 RTL8821AE_PHY_REG_1TARRAYLEN =
- sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY);
u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
@@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
};
-u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
- sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG);
u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
@@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
};
-u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
- sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG);
u32 RTL8812AE_RADIOA_ARRAY[] = {
0x000, 0x00010000,
@@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x018, 0x0001712A,
};
-u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY);
u32 RTL8812AE_RADIOB_ARRAY[] = {
0x056, 0x00051CF2,
@@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x008, 0x00008400,
};
-u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY);
u32 RTL8821AE_RADIOA_ARRAY[] = {
0x018, 0x0001712A,
@@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
};
-u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY);
u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x010, 0x0000000C,
@@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x718, 0x00000040,
};
-u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x428, 0x0000000A,
@@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x718, 0x00000040,
};
-u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY);
u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0xE50, 0x00000020,
};
-u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
- sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0xBF000001,
@@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0xC50, 0x00000020,
};
-u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
- sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
/******************************************************************************
* TXPWR_LMT.TXT
@@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
-u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
-u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 39b033b3b53a..ce3103bb8ebb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -962,7 +962,6 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
- rtl_ips_nic_on(hw);
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 92d4859ec906..a7aacbc3984e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -873,6 +873,24 @@ enum ratr_table_mode {
RATR_INX_WIRELESS_AC_24N = 9,
};
+enum ratr_table_mode_new {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+ RATEID_IDX_MIX1 = 11,
+ RATEID_IDX_MIX2 = 12,
+ RATEID_IDX_VHT_3SS = 13,
+ RATEID_IDX_BGN_3SS = 14,
+};
+
enum rtl_link_state {
MAC80211_NOLINK = 0,
MAC80211_LINKING = 1,
@@ -931,6 +949,10 @@ enum package_type {
PACKAGE_TFBGA79
};
+enum rtl_spec_ver {
+ RTL_SPEC_NEW_RATEID = BIT(0), /* use ratr_table_mode_new */
+};
+
struct octet_string {
u8 *octet;
u16 length;
@@ -2093,14 +2115,21 @@ struct rtl_wow_pattern {
u32 mask[4];
};
+/* struct to store contents of interrupt vectors */
+struct rtl_int {
+ u32 inta;
+ u32 intb;
+ u32 intc;
+ u32 intd;
+};
+
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
void (*read_chip_version)(struct ieee80211_hw *hw);
void (*read_eeprom_info) (struct ieee80211_hw *hw);
void (*interrupt_recognized) (struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *intvec);
int (*hw_init) (struct ieee80211_hw *hw);
void (*hw_disable) (struct ieee80211_hw *hw);
void (*hw_suspend) (struct ieee80211_hw *hw);
@@ -2308,6 +2337,7 @@ struct rtl_hal_cfg {
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
+ enum rtl_spec_ver spec_ver;
/*this map used for some registers or vars
defined int HAL but used in MAIN */
@@ -2318,17 +2348,14 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
- struct mutex ps_mutex;
+ struct mutex ips_mutex; /* mutex for enter/leave IPS */
+ struct mutex lps_mutex; /* mutex for enter/leave LPS */
/*spin lock */
- spinlock_t ips_lock;
spinlock_t irq_th_lock;
- spinlock_t irq_pci_lock;
- spinlock_t tx_lock;
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
spinlock_t entry_list_lock;
spinlock_t usb_lock;
@@ -2341,9 +2368,6 @@ struct rtl_locks {
/*Dual mac*/
spinlock_t cck_and_rw_pagea_lock;
- /*Easy concurrent*/
- spinlock_t check_sendpkt_lock;
-
spinlock_t iqk_lock;
};
@@ -2374,6 +2398,12 @@ struct rtl_works {
struct work_struct fill_h2c_cmd;
};
+struct rtl_debug {
+ /* add for debug */
+ struct dentry *debugfs_dir;
+ char debugfs_name[20];
+};
+
#define MIMO_PS_STATIC 0
#define MIMO_PS_DYNAMIC 1
#define MIMO_PS_NOLIMIT 3
@@ -2493,6 +2523,9 @@ struct rtl_btc_info {
struct bt_coexist_info {
struct rtl_btc_ops *btc_ops;
struct rtl_btc_info btc_info;
+ /* btc context */
+ void *btc_context;
+ void *wifi_only_context;
/* EEPROM BT info. */
u8 eeprom_bt_coexist;
u8 eeprom_bt_type;
@@ -2549,16 +2582,22 @@ struct bt_coexist_info {
struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv);
+ void (*btc_deinit_variables)(struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv,
+ u8 scantype);
void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
enum rt_media_status mstatus);
void (*btc_periodical) (struct rtl_priv *rtlpriv);
- void (*btc_halt_notify) (void);
+ void (*btc_halt_notify)(struct rtl_priv *rtlpriv);
void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
u8 *tmp_buf, u8 length);
void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv,
@@ -2568,6 +2607,12 @@ struct rtl_btc_ops {
bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
u8 pkt_type);
+ void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type,
+ bool scanning);
+ void (*btc_switch_band_notify_wifi_only)(struct rtl_priv *rtlpriv,
+ u8 type, bool scanning);
+ void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv,
+ struct seq_file *m);
void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
u8 (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
u8 (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
@@ -2642,6 +2687,7 @@ struct rtl_priv {
/* c2hcmd list for kthread level access */
struct list_head c2hcmd_list;
+ struct rtl_debug dbg;
int max_fw_size;
/*
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 1d799bffaa9f..e7d77acdf18c 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
/* asking for the data path parameters */
wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
GFP_KERNEL);
- if (!wl->data_path) {
- wl1251_error("Couldnt allocate data path parameters");
+ if (!wl->data_path)
return -ENOMEM;
- }
ret = wl1251_acx_data_path_params(wl, wl->data_path);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 6d02c660b4ab..037defd10b91 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index a4859993db3c..3ca9167d6146 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
feature, sizeof(*feature));
if (ret < 0) {
- wl1271_error("Couldnt set HW encryption");
+ wl1271_error("Couldn't set HW encryption");
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index f46d7fdf9a00..7011c5d9599f 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl);
int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
s8 *avg_rssi);
-#ifdef CONFIG_PM
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action);
int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
struct wl12xx_rx_filter *filter);
-#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d47921a84509..09714034dbf1 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -42,6 +42,7 @@
#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
+#define WL1271_SUSPEND_SLEEP 100
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
@@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
- struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
@@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
}
/* update the host-chipset time offset */
- getnstimeofday(&ts);
- wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+ wl->time_offset = (ktime_get_boot_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;
@@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
+static int wlcore_fw_sleep(struct wl1271 *wl)
+{
+ int ret;
+
+ mutex_lock(&wl->mutex);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+out:
+ mutex_unlock(&wl->mutex);
+ mdelay(WL1271_SUSPEND_SLEEP);
+
+ return 0;
+}
+
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
}
-#ifdef CONFIG_PM
static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{
@@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
}
-static int wl1271_op_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wow)
+static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
@@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
mutex_unlock(&wl->mutex);
if (ret < 0) {
@@ -1782,10 +1797,19 @@ out_sleep:
*/
cancel_delayed_work(&wl->tx_watchdog_work);
+ /*
+ * Use an immediate call for allowing the firmware to go into power
+ * save during suspend.
+ * Using a workque for this last write was only hapenning on resume
+ * leaving the firmware with power save disabled during suspend,
+ * while consuming full power during wowlan suspend.
+ */
+ wlcore_fw_sleep(wl);
+
return 0;
}
-static int wl1271_op_resume(struct ieee80211_hw *hw)
+static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
@@ -1869,7 +1893,6 @@ out:
return 0;
}
-#endif
static int wl1271_op_start(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a3f5e9ca492a..00e9b4624dcf 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra,
struct ieee80211_tx_info *control, u8 hlid)
{
- struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int ac, rate_idx;
s64 hosttime;
@@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
/* configure packet life time */
- getnstimeofday(&ts);
- hosttime = (timespec_to_ns(&ts) >> 10);
+ hosttime = (ktime_get_boot_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index 21bda2031e7e..6d063cde39d1 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -2,6 +2,6 @@
# Makefile for the nubus specific drivers.
#
-obj-y := nubus.o
+obj-y := nubus.o bus.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
new file mode 100644
index 000000000000..d306c348c857
--- /dev/null
+++ b/drivers/nubus/bus.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Bus implementation for the NuBus subsystem.
+//
+// Copyright (C) 2017 Finn Thain
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/nubus.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define to_nubus_board(d) container_of(d, struct nubus_board, dev)
+#define to_nubus_driver(d) container_of(d, struct nubus_driver, driver)
+
+static int nubus_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return 1;
+}
+
+static int nubus_device_probe(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (ndrv->probe)
+ err = ndrv->probe(to_nubus_board(dev));
+ return err;
+}
+
+static int nubus_device_remove(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (dev->driver && ndrv->remove)
+ err = ndrv->remove(to_nubus_board(dev));
+ return err;
+}
+
+struct bus_type nubus_bus_type = {
+ .name = "nubus",
+ .match = nubus_bus_match,
+ .probe = nubus_device_probe,
+ .remove = nubus_device_remove,
+};
+EXPORT_SYMBOL(nubus_bus_type);
+
+int nubus_driver_register(struct nubus_driver *ndrv)
+{
+ ndrv->driver.bus = &nubus_bus_type;
+ return driver_register(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_register);
+
+void nubus_driver_unregister(struct nubus_driver *ndrv)
+{
+ driver_unregister(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_unregister);
+
+static struct device nubus_parent = {
+ .init_name = "nubus",
+};
+
+int __init nubus_bus_register(void)
+{
+ int err;
+
+ err = device_register(&nubus_parent);
+ if (err)
+ return err;
+
+ err = bus_register(&nubus_bus_type);
+ if (!err)
+ return 0;
+
+ device_unregister(&nubus_parent);
+ return err;
+}
+
+static void nubus_device_release(struct device *dev)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct nubus_rsrc *fres, *tmp;
+
+ list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list)
+ if (fres->board == board) {
+ list_del(&fres->list);
+ kfree(fres);
+ }
+ kfree(board);
+}
+
+int nubus_device_register(struct nubus_board *board)
+{
+ board->dev.parent = &nubus_parent;
+ board->dev.release = nubus_device_release;
+ board->dev.bus = &nubus_bus_type;
+ dev_set_name(&board->dev, "slot.%X", board->slot);
+ return device_register(&board->dev);
+}
+
+static int nubus_print_device_name_fn(struct device *dev, void *data)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct seq_file *m = data;
+
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ return 0;
+}
+
+int nubus_proc_show(struct seq_file *m, void *data)
+{
+ return bus_for_each_dev(&nubus_bus_type, NULL, m,
+ nubus_print_device_name_fn);
+}
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index b793727cd4f7..4621ff98138c 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -31,8 +32,7 @@
/* Globals */
-struct nubus_dev *nubus_devices;
-struct nubus_board *nubus_boards;
+LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -146,7 +146,7 @@ static inline void *nubus_rom_addr(int slot)
return (void *)(0xF1000000 + (slot << 24));
}
-static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
{
unsigned char *p = nd->base;
@@ -161,7 +161,7 @@ static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
pointed to with offsets) out of the card ROM. */
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
- int len)
+ unsigned int len)
{
unsigned char *t = (unsigned char *)dest;
unsigned char *p = nubus_dirptr(dirent);
@@ -173,21 +173,49 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
}
EXPORT_SYMBOL(nubus_get_rsrc_mem);
-void nubus_get_rsrc_str(void *dest, const struct nubus_dirent *dirent,
- int len)
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
- while (len) {
- *t = nubus_get_rom(&p, 1, dirent->mask);
- if (!*t++)
+ while (len > 1) {
+ unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
+
+ if (!c)
break;
+ *t++ = c;
len--;
}
+ if (len > 0)
+ *t = '\0';
+ return t - dest;
}
EXPORT_SYMBOL(nubus_get_rsrc_str);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len)
+{
+ unsigned long buf[32];
+ unsigned int buf_size = sizeof(buf);
+ unsigned char *p = nubus_dirptr(dirent);
+
+ /* If possible, write out full buffers */
+ while (len >= buf_size) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
+ dirent->mask);
+ seq_write(m, buf, buf_size);
+ len -= buf_size;
+ }
+ /* If not, write out individual bytes */
+ while (len--)
+ seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
+}
+
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir)
{
@@ -199,12 +227,11 @@ int nubus_get_root_dir(const struct nubus_board *board,
EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
-int nubus_get_func_dir(const struct nubus_dev *dev,
- struct nubus_dir *dir)
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir)
{
- dir->ptr = dir->base = dev->directory;
+ dir->ptr = dir->base = fres->directory;
dir->done = 0;
- dir->mask = dev->board->lanes;
+ dir->mask = fres->board->lanes;
return 0;
}
EXPORT_SYMBOL(nubus_get_func_dir);
@@ -277,51 +304,20 @@ EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
-struct nubus_dev*
-nubus_find_device(unsigned short category, unsigned short type,
- unsigned short dr_hw, unsigned short dr_sw,
- const struct nubus_dev *from)
-{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type &&
- itor->dr_hw == dr_hw && itor->dr_sw == dr_sw)
- return itor;
- itor = itor->next;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nubus_find_device);
-
-struct nubus_dev*
-nubus_find_type(unsigned short category, unsigned short type,
- const struct nubus_dev *from)
+struct nubus_rsrc *nubus_first_rsrc_or_null(void)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc,
+ list);
}
-EXPORT_SYMBOL(nubus_find_type);
+EXPORT_SYMBOL(nubus_first_rsrc_or_null);
-struct nubus_dev*
-nubus_find_slot(unsigned int slot, const struct nubus_dev *from)
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->board->slot == slot)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ if (list_is_last(&from->list, &nubus_func_rsrcs))
+ return NULL;
+ return list_next_entry(from, list);
}
-EXPORT_SYMBOL(nubus_find_slot);
+EXPORT_SYMBOL(nubus_next_rsrc_or_null);
int
nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type,
@@ -339,31 +335,83 @@ EXPORT_SYMBOL(nubus_find_rsrc);
looking at, and print out lots and lots of information from the
resource blocks. */
-/* FIXME: A lot of this stuff will eventually be useful after
- initialization, for intelligently probing Ethernet and video chips,
- among other things. The rest of it should go in the /proc code.
- For now, we just use it to give verbose boot logs. */
+static int __init nubus_get_block_rsrc_dir(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
-static int __init nubus_show_display_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+ while (nubus_readdir(&dir, &ent) != -1) {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type, size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_vidmode(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
+
+ while (nubus_readdir(&dir, &ent) != -1) {
+ switch (ent.type) {
+ case 1: /* mVidParams */
+ case 2: /* mTable */
+ {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type,
+ size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ break;
+ }
+ default:
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
+ }
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_GAMMADIR:
- pr_info(" gamma directory offset: 0x%06x\n", ent->data);
+ pr_debug(" gamma directory offset: 0x%06x\n", ent->data);
+ nubus_get_block_rsrc_dir(fres->board, procdir, ent);
break;
case 0x0080 ... 0x0085:
- pr_info(" mode %02X info offset: 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" mode 0x%02x info offset: 0x%06x\n",
+ ent->type, ent->data);
+ nubus_get_display_vidmode(fres->board, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_network_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_network_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MAC_ADDRESS:
@@ -371,18 +419,21 @@ static int __init nubus_show_network_resource(struct nubus_dev *dev,
char addr[6];
nubus_get_rsrc_mem(addr, ent, 6);
- pr_info(" MAC address: %pM\n", addr);
+ pr_debug(" MAC address: %pM\n", addr);
+ nubus_proc_add_rsrc_mem(procdir, ent, 6);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MEMINFO:
@@ -390,8 +441,9 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long meminfo[2];
nubus_get_rsrc_mem(&meminfo, ent, 8);
- pr_info(" memory: [ 0x%08lx 0x%08lx ]\n",
- meminfo[0], meminfo[1]);
+ pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n",
+ meminfo[0], meminfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
case NUBUS_RESID_ROMINFO:
@@ -399,57 +451,60 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long rominfo[2];
nubus_get_rsrc_mem(&rominfo, ent, 8);
- pr_info(" ROM: [ 0x%08lx 0x%08lx ]\n",
- rominfo[0], rominfo[1]);
+ pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n",
+ rominfo[0], rominfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_private_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_private_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
- switch (dev->category) {
+ switch (fres->category) {
case NUBUS_CAT_DISPLAY:
- nubus_show_display_resource(dev, ent);
+ nubus_get_display_resource(fres, procdir, ent);
break;
case NUBUS_CAT_NETWORK:
- nubus_show_network_resource(dev, ent);
+ nubus_get_network_resource(fres, procdir, ent);
break;
case NUBUS_CAT_CPU:
- nubus_show_cpu_resource(dev, ent);
+ nubus_get_cpu_resource(fres, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static struct nubus_dev * __init
+static struct nubus_rsrc * __init
nubus_get_functional_resource(struct nubus_board *board, int slot,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
struct nubus_dirent ent;
- struct nubus_dev *dev;
+ struct nubus_rsrc *fres;
- pr_info(" Function 0x%02x:\n", parent->type);
+ pr_debug(" Functional resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
-
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
/* Actually we should probably panic if this fails */
- if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ fres = kzalloc(sizeof(*fres), GFP_ATOMIC);
+ if (!fres)
return NULL;
- dev->resid = parent->type;
- dev->directory = dir.base;
- dev->board = board;
+ fres->resid = parent->type;
+ fres->directory = dir.base;
+ fres->board = board;
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -458,130 +513,96 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
unsigned short nbtdata[4];
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- dev->category = nbtdata[0];
- dev->type = nbtdata[1];
- dev->dr_sw = nbtdata[2];
- dev->dr_hw = nbtdata[3];
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ fres->category = nbtdata[0];
+ fres->type = nbtdata[1];
+ fres->dr_sw = nbtdata[2];
+ fres->dr_hw = nbtdata[3];
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
{
- nubus_get_rsrc_str(dev->name, &ent, 64);
- pr_info(" name: %s\n", dev->name);
+ char name[64];
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ pr_debug(" name: %s\n", name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
}
case NUBUS_RESID_DRVRDIR:
{
/* MacOS driver. If we were NetBSD we might
use this :-) */
- struct nubus_dir drvr_dir;
- struct nubus_dirent drvr_ent;
-
- nubus_get_subdir(&ent, &drvr_dir);
- nubus_readdir(&drvr_dir, &drvr_ent);
- dev->driver = nubus_dirptr(&drvr_ent);
- pr_info(" driver at: 0x%p\n", dev->driver);
+ pr_debug(" driver directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
}
case NUBUS_RESID_MINOR_BASEOS:
+ {
/* We will need this in order to support
multiple framebuffers. It might be handy
for Ethernet as well */
- nubus_get_rsrc_mem(&dev->iobase, &ent, 4);
- pr_info(" memory offset: 0x%08lx\n", dev->iobase);
+ u32 base_offset;
+
+ nubus_get_rsrc_mem(&base_offset, &ent, 4);
+ pr_debug(" memory offset: 0x%08x\n", base_offset);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_MINOR_LENGTH:
+ {
/* Ditto */
- nubus_get_rsrc_mem(&dev->iosize, &ent, 4);
- pr_info(" memory length: 0x%08lx\n", dev->iosize);
+ u32 length;
+
+ nubus_get_rsrc_mem(&length, &ent, 4);
+ pr_debug(" memory length: 0x%08x\n", length);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_FLAGS:
- dev->flags = ent.data;
- pr_info(" flags: 0x%06x\n", dev->flags);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- dev->hwdevid = ent.data;
- pr_info(" hwdevid: 0x%06x\n", dev->hwdevid);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
/* Local/Private resources have their own
function */
- nubus_show_private_resource(dev, &ent);
+ nubus_get_private_resource(fres, dir.procdir, &ent);
}
}
- return dev;
-}
-
-/* This is cool. */
-static int __init nubus_get_vidnames(struct nubus_board *board,
- const struct nubus_dirent *parent)
-{
- struct nubus_dir dir;
- struct nubus_dirent ent;
-
- /* FIXME: obviously we want to put this in a header file soon */
- struct vidmode {
- u32 size;
- /* Don't know what this is yet */
- u16 id;
- /* Longest one I've seen so far is 26 characters */
- char name[32];
- };
-
- pr_info(" video modes supported:\n");
- nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
-
- while (nubus_readdir(&dir, &ent) != -1) {
- struct vidmode mode;
- u32 size;
-
- /* First get the length */
- nubus_get_rsrc_mem(&size, &ent, 4);
-
- /* Now clobber the whole thing */
- if (size > sizeof(mode) - 1)
- size = sizeof(mode) - 1;
- memset(&mode, 0, sizeof(mode));
- nubus_get_rsrc_mem(&mode, &ent, size);
- pr_info(" %02X: (%02X) %s\n", ent.type,
- mode.id, mode.name);
- }
- return 0;
+ return fres;
}
/* This is *really* cool. */
static int __init nubus_get_icon(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *ent)
{
/* Should be 32x32 if my memory serves me correctly */
- unsigned char icon[128];
- int x, y;
+ u32 icon[32];
+ int i;
nubus_get_rsrc_mem(&icon, ent, 128);
- pr_info(" icon:\n");
-
- /* We should actually plot these somewhere in the framebuffer
- init. This is just to demonstrate that they do, in fact,
- exist */
- for (y = 0; y < 32; y++) {
- pr_info(" ");
- for (x = 0; x < 32; x++) {
- if (icon[y * 4 + x / 8] & (0x80 >> (x % 8)))
- pr_cont("*");
- else
- pr_cont(" ");
- }
- pr_cont("\n");
- }
+ pr_debug(" icon:\n");
+ for (i = 0; i < 8; i++)
+ pr_debug(" %08x %08x %08x %08x\n",
+ icon[i * 4 + 0], icon[i * 4 + 1],
+ icon[i * 4 + 2], icon[i * 4 + 3]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 128);
+
return 0;
}
static int __init nubus_get_vendorinfo(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
@@ -589,19 +610,20 @@ static int __init nubus_get_vendorinfo(struct nubus_board *board,
static char *vendor_fields[6] = { "ID", "serial", "revision",
"part", "date", "unknown field" };
- pr_info(" vendor info:\n");
+ pr_debug(" vendor info:\n");
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
char name[64];
+ unsigned int len;
/* These are all strings, we think */
- nubus_get_rsrc_str(name, &ent, 64);
- if (ent.type > 5)
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ if (ent.type < 1 || ent.type > 5)
ent.type = 5;
- pr_info(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
}
return 0;
}
@@ -612,9 +634,9 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
struct nubus_dir dir;
struct nubus_dirent ent;
+ pr_debug(" Board resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -625,64 +647,81 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
useful except insofar as it tells us that
we really are looking at a board resource. */
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
if (nbtdata[0] != 1 || nbtdata[1] != 0 ||
nbtdata[2] != 0 || nbtdata[3] != 0)
- pr_err("this sResource is not a board resource!\n");
+ pr_err("Slot %X: sResource is not a board resource!\n",
+ slot);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
- nubus_get_rsrc_str(board->name, &ent, 64);
- pr_info(" name: %s\n", board->name);
+ {
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(board->name, &ent,
+ sizeof(board->name));
+ pr_debug(" name: %s\n", board->name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
+ }
case NUBUS_RESID_ICON:
- nubus_get_icon(board, &ent);
+ nubus_get_icon(board, dir.procdir, &ent);
break;
case NUBUS_RESID_BOARDID:
- pr_info(" board id: 0x%x\n", ent.data);
+ pr_debug(" board id: 0x%x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_PRIMARYINIT:
- pr_info(" primary init offset: 0x%06x\n", ent.data);
+ pr_debug(" primary init offset: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_VENDORINFO:
- nubus_get_vendorinfo(board, &ent);
+ nubus_get_vendorinfo(board, dir.procdir, &ent);
break;
case NUBUS_RESID_FLAGS:
- pr_info(" flags: 0x%06x\n", ent.data);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- pr_info(" hwdevid: 0x%06x\n", ent.data);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_SECONDINIT:
- pr_info(" secondary init offset: 0x%06x\n", ent.data);
+ pr_debug(" secondary init offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
/* WTF isn't this in the functional resources? */
case NUBUS_RESID_VIDNAMES:
- nubus_get_vidnames(board, &ent);
+ pr_debug(" vidnames directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
/* Same goes for this */
case NUBUS_RESID_VIDMODES:
- pr_info(" video mode parameter directory offset: 0x%06x\n",
- ent.data);
+ pr_debug(" video mode parameter directory offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent.type, ent.data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
}
}
return 0;
}
-/* Add a board (might be many devices) to the list */
-static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
+static void __init nubus_add_board(int slot, int bytelanes)
{
struct nubus_board *board;
- struct nubus_board **boardp;
unsigned char *rp;
unsigned long dpat;
struct nubus_dir dir;
struct nubus_dirent ent;
+ int prev_resid = -1;
/* Move to the start of the format block */
rp = nubus_rom_addr(slot);
@@ -690,19 +729,19 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Actually we should probably panic if this fails */
if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
- return NULL;
+ return;
board->fblock = rp;
/* Dump the format block for debugging purposes */
pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp = board->fblock;
board->slot = slot;
@@ -722,10 +761,10 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Directory offset should be small and negative... */
if (!(board->doffset & 0x00FF0000))
- pr_warn("Dodgy doffset!\n");
+ pr_warn("Slot %X: Dodgy doffset!\n", slot);
dpat = nubus_get_rom(&rp, 4, bytelanes);
if (dpat != NUBUS_TEST_PATTERN)
- pr_warn("Wrong test pattern %08lx!\n", dpat);
+ pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat);
/*
* I wonder how the CRC is meant to work -
@@ -742,53 +781,52 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
nubus_get_root_dir(board, &dir);
/* We're ready to rock */
- pr_info("Slot %X:\n", slot);
+ pr_debug("Slot %X resources:\n", slot);
/* Each slot should have one board resource and any number of
- functional resources. So we'll fill in some fields in the
- struct nubus_board from the board resource, then walk down
- the list of functional resources, spinning out a nubus_dev
- for each of them. */
+ * functional resources. So we'll fill in some fields in the
+ * struct nubus_board from the board resource, then walk down
+ * the list of functional resources, spinning out a nubus_rsrc
+ * for each of them.
+ */
if (nubus_readdir(&dir, &ent) == -1) {
/* We can't have this! */
- pr_err("Board resource not found!\n");
- return NULL;
- } else {
- pr_info(" Board resource:\n");
- nubus_get_board_resource(board, slot, &ent);
+ pr_err("Slot %X: Board resource not found!\n", slot);
+ kfree(board);
+ return;
}
+ if (ent.type < 1 || ent.type > 127)
+ pr_warn("Slot %X: Board resource ID is invalid!\n", slot);
+
+ board->procdir = nubus_proc_add_board(board);
+
+ nubus_get_board_resource(board, slot, &ent);
+
while (nubus_readdir(&dir, &ent) != -1) {
- struct nubus_dev *dev;
- struct nubus_dev **devp;
+ struct nubus_rsrc *fres;
- dev = nubus_get_functional_resource(board, slot, &ent);
- if (dev == NULL)
+ fres = nubus_get_functional_resource(board, slot, &ent);
+ if (fres == NULL)
continue;
- /* We zeroed this out above */
- if (board->first_dev == NULL)
- board->first_dev = dev;
+ /* Resources should appear in ascending ID order. This sanity
+ * check prevents duplicate resource IDs.
+ */
+ if (fres->resid <= prev_resid) {
+ kfree(fres);
+ continue;
+ }
+ prev_resid = fres->resid;
- /* Put it on the global NuBus device chain. Keep entries in order. */
- for (devp = &nubus_devices; *devp != NULL;
- devp = &((*devp)->next))
- /* spin */;
- *devp = dev;
- dev->next = NULL;
+ list_add_tail(&fres->list, &nubus_func_rsrcs);
}
- /* Put it on the global NuBus board chain. Keep entries in order. */
- for (boardp = &nubus_boards; *boardp != NULL;
- boardp = &((*boardp)->next))
- /* spin */;
- *boardp = board;
- board->next = NULL;
-
- return board;
+ if (nubus_device_register(board))
+ put_device(&board->dev);
}
-void __init nubus_probe_slot(int slot)
+static void __init nubus_probe_slot(int slot)
{
unsigned char dp;
unsigned char *rp;
@@ -796,11 +834,8 @@ void __init nubus_probe_slot(int slot)
rp = nubus_rom_addr(slot);
for (i = 4; i; i--) {
- int card_present;
-
rp--;
- card_present = hwreg_present(rp);
- if (!card_present)
+ if (!hwreg_present(rp))
continue;
dp = *rp;
@@ -822,10 +857,11 @@ void __init nubus_probe_slot(int slot)
}
}
-void __init nubus_scan_bus(void)
+static void __init nubus_scan_bus(void)
{
int slot;
+ pr_info("NuBus: Scanning NuBus slots.\n");
for (slot = 9; slot < 15; slot++) {
nubus_probe_slot(slot);
}
@@ -833,14 +869,16 @@ void __init nubus_scan_bus(void)
static int __init nubus_init(void)
{
+ int err;
+
if (!MACH_IS_MAC)
return 0;
- pr_info("NuBus: Scanning NuBus slots.\n");
- nubus_devices = NULL;
- nubus_boards = NULL;
- nubus_scan_bus();
nubus_proc_init();
+ err = nubus_bus_register();
+ if (err)
+ return err;
+ nubus_scan_bus();
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 004a122ac0ff..c2e5a7e6bd3e 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -11,39 +11,37 @@
structure in /proc analogous to the structure of the NuBus ROM
resources.
- Therefore each NuBus device is in fact a directory, which may in
- turn contain subdirectories. The "files" correspond to NuBus
- resource records. For those types of records which we know how to
- convert to formats that are meaningful to userspace (mostly just
- icons) these files will provide "cooked" data. Otherwise they will
- simply provide raw access (read-only of course) to the ROM. */
+ Therefore each board function gets a directory, which may in turn
+ contain subdirectories. Each slot resource is a file. Unrecognized
+ resources are empty files, since every resource ID requires a special
+ case (e.g. if the resource ID implies a directory or block, then its
+ value has to be interpreted as a slot ROM pointer etc.).
+ */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-
#include <linux/uaccess.h>
#include <asm/byteorder.h>
+/*
+ * /proc/bus/nubus/devices stuff
+ */
+
static int
nubus_devices_proc_show(struct seq_file *m, void *v)
{
- struct nubus_dev *dev = nubus_devices;
-
- while (dev) {
- seq_printf(m, "%x\t%04x %04x %04x %04x",
- dev->board->slot,
- dev->category,
- dev->type,
- dev->dr_sw,
- dev->dr_hw);
- seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
- dev = dev->next;
- }
+ struct nubus_rsrc *fres;
+
+ for_each_func_rsrc(fres)
+ seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n",
+ fres->board->slot, fres->category, fres->type,
+ fres->dr_sw, fres->dr_hw, fres->board->slot_addr);
return 0;
}
@@ -61,174 +59,163 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
-static const struct file_operations nubus_proc_subdir_fops = {
-#warning Need to set some I/O handlers here
-};
+/*
+ * /proc/bus/nubus/x/ stuff
+ */
-static void nubus_proc_subdir(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* dir)
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* Some of these are directories, others aren't */
- while (nubus_readdir(dir, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
-
- sprintf(name, "%x", ent.type);
- e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
- &nubus_proc_subdir_fops);
- if (!e)
- return;
- }
+ char name[2];
+
+ if (!proc_bus_nubus_dir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", board->slot);
+ return proc_mkdir(name, proc_bus_nubus_dir);
}
-/* Can't do this recursively since the root directory is structured
- somewhat differently from the subdirectories */
-static void nubus_proc_populate(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* root)
+/* The PDE private data for any directory under /proc/bus/nubus/x/
+ * is the bytelanes value for the board in slot x.
+ */
+
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* We know these are all directories (board resource + one or
- more functional resources) */
- while (nubus_readdir(root, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
- struct nubus_dir dir;
-
- sprintf(name, "%x", ent.type);
- e = proc_mkdir(name, parent);
- if (!e) return;
-
- /* And descend */
- if (nubus_get_subdir(&ent, &dir) == -1) {
- /* This shouldn't happen */
- printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n",
- dev->board->slot, ent.type);
- continue;
- } else {
- nubus_proc_subdir(dev, e, &dir);
- }
- }
+ char name[9];
+ int lanes = board->lanes;
+
+ if (!procdir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", ent->type);
+ return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
-int nubus_proc_attach_device(struct nubus_dev *dev)
+/* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to
+ * an instance of the following structure, which gives the location and size
+ * of the resource data in the slot ROM. For slot resources which hold only a
+ * small integer, this integer value is stored directly and size is set to 0.
+ * A NULL private data pointer indicates an unrecognized resource.
+ */
+
+struct nubus_proc_pde_data {
+ unsigned char *res_ptr;
+ unsigned int res_size;
+};
+
+static struct nubus_proc_pde_data *
+nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct proc_dir_entry *e;
- struct nubus_dir root;
- char name[8];
-
- if (dev == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- return -1;
- }
-
- if (dev->board == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- printk("dev = %p, dev->board = %p\n", dev, dev->board);
- return -1;
- }
-
- /* Create a directory */
- sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
- if (!e)
- return -ENOMEM;
+ struct nubus_proc_pde_data *pde_data;
- /* Now recursively populate it with files */
- nubus_get_root_dir(dev->board, &root);
- nubus_proc_populate(dev, e, &root);
+ pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
+ if (!pde_data)
+ return NULL;
- return 0;
+ pde_data->res_ptr = ptr;
+ pde_data->res_size = size;
+ return pde_data;
}
-EXPORT_SYMBOL(nubus_proc_attach_device);
-/*
- * /proc/nubus stuff
- */
-static int nubus_proc_show(struct seq_file *m, void *v)
+static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
- const struct nubus_board *board = v;
+ struct inode *inode = m->private;
+ struct nubus_proc_pde_data *pde_data;
- /* Display header on line 1 */
- if (v == SEQ_START_TOKEN)
- seq_puts(m, "Nubus devices found:\n");
- else
- seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ pde_data = PDE_DATA(inode);
+ if (!pde_data)
+ return 0;
+
+ if (pde_data->res_size > m->size)
+ return -EFBIG;
+
+ if (pde_data->res_size) {
+ int lanes = (int)proc_get_parent_data(inode);
+ struct nubus_dirent ent;
+
+ if (!lanes)
+ return 0;
+
+ ent.mask = lanes;
+ ent.base = pde_data->res_ptr;
+ ent.data = 0;
+ nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ } else {
+ unsigned int data = (unsigned int)pde_data->res_ptr;
+
+ seq_putc(m, data >> 16);
+ seq_putc(m, data >> 8);
+ seq_putc(m, data >> 0);
+ }
return 0;
}
-static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+static int nubus_proc_rsrc_open(struct inode *inode, struct file *file)
{
- struct nubus_board *board;
- unsigned pos;
-
- if (*_pos > LONG_MAX)
- return NULL;
- pos = *_pos;
- if (pos == 0)
- return SEQ_START_TOKEN;
- for (board = nubus_boards; board; board = board->next)
- if (--pos == 0)
- break;
- return board;
+ return single_open(file, nubus_proc_rsrc_show, inode);
}
-static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+static const struct file_operations nubus_proc_rsrc_fops = {
+ .open = nubus_proc_rsrc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size)
{
- /* Walk the list of NuBus boards */
- struct nubus_board *board = v;
-
- ++*_pos;
- if (v == SEQ_START_TOKEN)
- board = nubus_boards;
- else if (board)
- board = board->next;
- return board;
+ char name[9];
+ struct nubus_proc_pde_data *pde_data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ if (size)
+ pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ else
+ pde_data = NULL;
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops, pde_data);
}
-static void nubus_proc_stop(struct seq_file *p, void *v)
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
+ char name[9];
+ unsigned char *data = (unsigned char *)ent->data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops,
+ nubus_proc_alloc_pde_data(data, 0));
}
-static const struct seq_operations nubus_proc_seqops = {
- .start = nubus_proc_start,
- .next = nubus_proc_next,
- .stop = nubus_proc_stop,
- .show = nubus_proc_show,
-};
+/*
+ * /proc/nubus stuff
+ */
static int nubus_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nubus_proc_seqops);
+ return single_open(file, nubus_proc_show, NULL);
}
static const struct file_operations nubus_proc_fops = {
.open = nubus_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-void __init proc_bus_nubus_add_devices(void)
-{
- struct nubus_dev *dev;
-
- for(dev = nubus_devices; dev; dev = dev->next)
- nubus_proc_attach_device(dev);
-}
-
void __init nubus_proc_init(void)
{
proc_create("nubus", 0, NULL, &nubus_proc_fops);
- if (!MACH_IS_MAC)
- return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+ if (!proc_bus_nubus_dir)
+ return;
proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
- proc_bus_nubus_add_devices();
}
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a25fd43650ad..441e67e3a9d7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1e46e60b8f10..f431c32774f3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -29,6 +29,9 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvme.h"
#include "fabrics.h"
@@ -65,9 +68,26 @@ static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such are scan, aen handling, fw activation,
+ * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -89,13 +109,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->reset_work))
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -104,6 +124,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work);
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work)
{
@@ -122,7 +143,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
}
@@ -157,13 +178,20 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
return BLK_STS_NOTSUPP;
case NVME_SC_WRITE_FAULT:
case NVME_SC_READ_ERROR:
case NVME_SC_UNWRITTEN_BLOCK:
case NVME_SC_ACCESS_DENIED:
case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
return BLK_STS_MEDIUM;
case NVME_SC_GUARD_CHECK:
case NVME_SC_APPTAG_CHECK:
@@ -190,8 +218,12 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
- if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req)) {
+ blk_status_t status = nvme_error_status(req);
+
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+ if (nvme_req_needs_failover(req, status)) {
nvme_failover_req(req);
return;
}
@@ -202,8 +234,7 @@ void nvme_complete_rq(struct request *req)
return;
}
}
-
- blk_mq_end_request(req, nvme_error_status(req));
+ blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -232,6 +263,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
+ case NVME_CTRL_ADMIN_ONLY:
+ switch (old_state) {
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -247,6 +287,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -266,6 +307,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING:
changed = true;
@@ -591,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
+ if (ns)
+ trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+ else
+ trace_nvme_setup_admin_cmd(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -1217,16 +1263,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
- return -ENXIO;
+ goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
- return -ENXIO;
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1335,6 +1392,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned short bs = 1 << ns->lba_shift;
unsigned stream_alignment = 0;
if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1401,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
- blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+ blk_queue_logical_block_size(disk->queue, bs);
+ blk_queue_physical_block_size(disk->queue, bs);
+ blk_queue_io_min(disk->queue, bs);
+
if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -2048,6 +2109,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+{
+ int count = 0;
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD)
+ count++;
+ }
+ mutex_unlock(&subsys->lock);
+
+ return count;
+}
+
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
struct nvme_subsystem *subsys, *found;
@@ -2086,7 +2163,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (!(id->cmic & (1 << 1))) {
+ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2253,7 +2330,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout)
- dev_warn(ctrl->device,
+ dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout);
} else
@@ -2337,8 +2414,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- if (ctrl->state != NVME_CTRL_LIVE)
+ switch (ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
+ break;
+ default:
return -EWOULDBLOCK;
+ }
+
file->private_data = ctrl;
return 0;
}
@@ -2471,14 +2554,14 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
serial_len, subsys->serial, model_len, subsys->model,
head->ns_id);
}
-static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
+static DEVICE_ATTR_RO(wwid);
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
}
-static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
+static DEVICE_ATTR_RO(nguid);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2495,21 +2578,21 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
}
return sprintf(buf, "%pU\n", &ids->uuid);
}
-static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
+static DEVICE_ATTR_RO(uuid);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
}
-static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
+static DEVICE_ATTR_RO(eui);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
}
-static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
+static DEVICE_ATTR_RO(nsid);
static struct attribute *nvme_ns_id_attrs[] = {
&dev_attr_wwid.attr,
@@ -2602,6 +2685,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -2987,6 +3071,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_unlock(&ns->ctrl->namespaces_mutex);
synchronize_srcu(&ns->head->srcu);
+ nvme_mpath_check_last_path(ns);
nvme_put_ns(ns);
}
@@ -3074,6 +3159,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE)
return;
+ WARN_ON_ONCE(!ctrl->tagset);
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3094,8 +3181,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
- * Do not queue new scan work when a controller is reset during
- * removal.
+ * Only new queue scan work when admin and IO queues are both alive
*/
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work);
@@ -3472,16 +3558,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void)
{
- int result;
+ int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
- return -ENOMEM;
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
- goto destroy_wq;
+ goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -3500,8 +3596,13 @@ destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
destroy_wq:
destroy_workqueue(nvme_wq);
+out:
return result;
}
@@ -3511,6 +3612,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6816a0..5dd4ceefed8f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
return NULL;
kref_init(&host->ref);
+ uuid_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
@@ -492,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/
int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
- if (!ops->create_ctrl)
+ if (!ops->create_ctrl || !ops->module)
return -EINVAL;
down_write(&nvmf_transports_rwsem);
@@ -738,11 +739,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
- if (uuid_parse(p, &hostid)) {
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
+ kfree(p);
goto out;
}
+ kfree(p);
break;
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
@@ -868,32 +872,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
- goto out_unlock;
+ goto out_module_put;
}
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn);
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
+out_module_put:
+ module_put(ops->module);
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9ba614953607..25b19f722f5b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree.
+ * @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller.
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/
struct nvmf_transport_ops {
struct list_head entry;
+ struct module *module;
const char *name;
int required_opts;
int allowed_opts;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 794e66e4aa20..99bf51c7e513 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+ /* re-enable the admin_q so anything new can fast fail */
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(nctrl);
}
static void
@@ -3380,6 +3386,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl,
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba3d7f3349e5..50ef71ee3d86 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -31,27 +31,10 @@
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_l2p_tbl = 0xea,
nvme_nvm_admin_get_bb_tbl = 0xf2,
nvme_nvm_admin_set_bb_tbl = 0xf1,
};
-struct nvme_nvm_hb_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 slba;
-};
-
struct nvme_nvm_ph_rw {
__u8 opcode;
__u8 flags;
@@ -80,19 +63,6 @@ struct nvme_nvm_identity {
__u32 rsvd11[5];
};
-struct nvme_nvm_l2ptbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[4];
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le32 nlb;
- __le16 cdw14[6];
-};
-
struct nvme_nvm_getbbtbl {
__u8 opcode;
__u8 flags;
@@ -139,9 +109,7 @@ struct nvme_nvm_command {
union {
struct nvme_common_command common;
struct nvme_nvm_identity identity;
- struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase;
@@ -167,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun;
__u8 num_pln;
__u8 rsvd1;
- __le16 num_blk;
+ __le16 num_chk;
__le16 num_pg;
__le16 fpg_sz;
__le16 csecs;
@@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
@@ -249,51 +215,58 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
- struct nvm_id_group *dst;
+ struct nvm_id_group *grp;
+ int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1)
return -EINVAL;
src = &nvme_nvm_id->groups[0];
- dst = &nvm_id->grp;
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
+ grp = &nvm_id->grp;
+
+ grp->mtype = src->mtype;
+ grp->fmtype = src->fmtype;
+
+ grp->num_ch = src->num_ch;
+ grp->num_lun = src->num_lun;
+
+ grp->num_chk = le16_to_cpu(src->num_chk);
+ grp->csecs = le16_to_cpu(src->csecs);
+ grp->sos = le16_to_cpu(src->sos);
+
+ pg_per_blk = le16_to_cpu(src->num_pg);
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pl = sec_per_pg * src->num_pln;
+ grp->clba = sec_per_pl * pg_per_blk;
+ grp->ws_per_chk = pg_per_blk;
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ grp->mpos = le32_to_cpu(src->mpos);
+ grp->cpar = le16_to_cpu(src->cpar);
+ grp->mccap = le32_to_cpu(src->mccap);
+
+ grp->ws_opt = grp->ws_min = sec_per_pg;
+ grp->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ if (grp->mpos & 0x020202) {
+ grp->ws_seq = NVM_IO_DUAL_ACCESS;
+ grp->ws_opt <<= 1;
+ } else if (grp->mpos & 0x040404) {
+ grp->ws_seq = NVM_IO_QUAD_ACCESS;
+ grp->ws_opt <<= 2;
}
+ grp->trdt = le32_to_cpu(src->trdt);
+ grp->trdm = le32_to_cpu(src->trdm);
+ grp->tprt = le32_to_cpu(src->tprt);
+ grp->tprm = le32_to_cpu(src->tprm);
+ grp->tbet = le32_to_cpu(src->tbet);
+ grp->tbem = le32_to_cpu(src->tbem);
+
+ /* 1.2 compatibility */
+ grp->num_pln = src->num_pln;
+ grp->num_pg = le16_to_cpu(src->num_pg);
+ grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+
return 0;
}
@@ -332,62 +305,6 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
- u32 nlb_pr_rq = len / sizeof(u64);
- u64 cmd_slba = slba;
- void *entries;
- int ret = 0;
-
- c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
- entries = kmalloc(len, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- while (nlb) {
- u32 cmd_nlb = min(nlb_pr_rq, nlb);
- u64 elba = slba + cmd_nlb;
-
- c.l2p.slba = cpu_to_le64(cmd_slba);
- c.l2p.nlb = cpu_to_le32(cmd_nlb);
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
- (struct nvme_command *)&c, entries, len);
- if (ret) {
- dev_err(ns->ctrl->device,
- "L2P table transfer failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(elba > nvmdev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Transform physical address to target address space */
- nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
-
- if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
- ret = -EINTR;
- goto out;
- }
-
- cmd_slba += cmd_nlb;
- nlb -= cmd_nlb;
- }
-
-out:
- kfree(entries);
- return ret;
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
@@ -397,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->blks_per_lun * geo->plane_mode;
+ int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -438,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -474,10 +391,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-
- if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
- c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
@@ -597,8 +510,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
static struct nvm_dev_ops nvme_nvm_dev_ops = {
.identity = nvme_nvm_identity,
- .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
-
.get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
@@ -883,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1218a9fca846..3b211d9e58b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -33,51 +33,11 @@ void nvme_failover_req(struct request *req)
kblockd_schedule_work(&ns->head->requeue_work);
}
-bool nvme_req_needs_failover(struct request *req)
+bool nvme_req_needs_failover(struct request *req, blk_status_t error)
{
if (!(req->cmd_flags & REQ_NVME_MPATH))
return false;
-
- switch (nvme_req(req)->status & 0x7ff) {
- /*
- * Generic command status:
- */
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CAP_EXCEEDED:
- case NVME_SC_RESERVATION_CONFLICT:
- return false;
-
- /*
- * I/O command set specific error. Unfortunately these values are
- * reused for fabrics commands, but those should never get here.
- */
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_PI:
- case NVME_SC_READ_ONLY:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
- nvme_fabrics_command);
- return false;
-
- /*
- * Media and Data Integrity Errors:
- */
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_COMPARE_FAILED:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_UNWRITTEN_BLOCK:
- return false;
- }
-
- /* Everything else could be a path failure, so should be retried */
- return true;
+ return blk_path_error(error);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..8e4550fa08f8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
enum {
NVME_NS_LBA = 0,
@@ -119,6 +121,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
+ NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
@@ -393,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -401,7 +405,7 @@ extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req);
+bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
@@ -417,11 +421,21 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
rcu_assign_pointer(head->current_path, NULL);
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+
+ if (head->disk && list_empty(&head->list))
+ kblockd_schedule_work(&head->requeue_work);
+}
+
#else
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req)
+static inline bool nvme_req_needs_failover(struct request *req,
+ blk_status_t error)
{
return false;
}
@@ -448,6 +462,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
}
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c9082..6fe7af00a1f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct nvme_queue **queues;
+ struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = dev->queues[queue_idx];
+ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int nseg = blk_rq_nr_phys_segments(req);
+ unsigned int avg_seg_size;
+
+ if (nseg == 0)
+ return false;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ return false;
+ if (!iod->nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
unsigned int size = blk_rq_payload_bytes(rq);
+ iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr_t prp_dma;
int nprps, i;
- iod->use_sgl = false;
-
length -= (page_size - offset);
if (length <= 0) {
iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd)
+ struct request *req, struct nvme_rw_command *cmd, int entries)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int length = blk_rq_payload_bytes(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sg;
- int entries = iod->nents, i = 0;
dma_addr_t sgl_dma;
-
- iod->use_sgl = true;
+ int i = 0;
/* setting the transfer type as SGL */
cmd->flags = NVME_CMD_SGL_METABUF;
- if (length == sg_dma_len(sg)) {
+ if (entries == 1) {
nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
return BLK_STS_OK;
}
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
}
nvme_pci_sgl_set_data(&sg_list[i++], sg);
-
- length -= sg_dma_len(sg);
sg = sg_next(sg);
- entries--;
- } while (length > 0);
+ } while (--entries > 0);
- WARN_ON(entries > 0);
return BLK_STS_OK;
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int avg_seg_size;
-
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
- blk_rq_nr_phys_segments(req));
-
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
- return false;
- if (!iod->nvmeq->qid)
- return false;
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return false;
- return true;
-}
-
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
enum dma_data_direction dma_dir = rq_data_dir(req) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
blk_status_t ret = BLK_STS_IOERR;
+ int nr_mapped;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN))
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+ DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
goto out;
- if (nvme_pci_use_sgls(dev, req))
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ if (iod->use_sgl)
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -1046,7 +1044,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@@ -1140,9 +1138,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
- /* If there is a reset ongoing, we shouldn't reset again. */
- if (dev->ctrl.state == NVME_CTRL_RESETTING)
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
return false;
+ default:
+ break;
+ }
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
@@ -1282,7 +1285,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1290,10 +1292,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
- dev->queues[i] = NULL;
- nvme_free_queue(nvmeq);
+ nvme_free_queue(&dev->queues[i]);
}
}
@@ -1325,12 +1325,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
- struct nvme_queue *nvmeq = dev->queues[0];
-
- if (!nvmeq)
- return;
- if (nvme_suspend_queue(nvmeq))
- return;
+ struct nvme_queue *nvmeq = &dev->queues[0];
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
@@ -1369,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
@@ -1384,13 +1379,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
- node);
- if (!nvmeq)
- return NULL;
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1409,17 +1404,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
- dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
- return nvmeq;
+ return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
- kfree(nvmeq);
- return NULL;
+ return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1592,14 +1585,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
- if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
- if (!nvmeq)
- return -ENOMEM;
- }
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
+ if (result)
+ return result;
+ nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1629,7 +1620,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
- if (!nvme_alloc_queue(dev, i, dev->q_depth,
+ if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@@ -1638,15 +1629,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(dev->queues[i], i);
+ ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
/*
* Ignore failing Create SQ/CQ commands, we can continue with less
- * than the desired aount of queues, and even a controller without
- * I/O queues an still be used to issue admin commands. This might
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
* be useful to upgrade a buggy firmware for example.
*/
return ret >= 0 ? 0 : ret;
@@ -1663,30 +1654,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
+{
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
{
- u64 szu, size, offset;
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- void __iomem *cmb;
int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
- if (!(NVME_CMB_SZ(dev->cmbsz)))
- return NULL;
+ if (!dev->cmbsz)
+ return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
if (!use_cmb_sqes)
- return NULL;
+ return;
- szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
- size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
- return NULL;
+ return;
/*
* Controllers may support a CMB size larger than their BAR,
@@ -1696,13 +1697,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!cmb)
- return NULL;
-
+ dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ if (!dev->cmb)
+ return;
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
- return cmb;
+
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->ctrl.device,
+ "failed to add sysfs attribute for CMB\n");
}
static inline void nvme_release_cmb(struct nvme_dev *dev)
@@ -1770,7 +1774,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma;
int i = 0;
void **bufs;
- u64 size = 0, tmp;
+ u64 size, tmp;
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
@@ -1853,7 +1857,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
- int ret = 0;
+ int ret;
preferred = min(preferred, max);
if (min > max) {
@@ -1894,7 +1898,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@@ -1907,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
@@ -2007,9 +2011,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -2020,7 +2024,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
- if (nvme_delete_queue(dev->queues[i], opcode))
+ if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@@ -2035,13 +2039,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
}
/*
- * Return: error value if an error occurred setting up the queues or calling
- * Identify Device. 0 if these succeeded, even if adding some of the
- * namespaces failed. At the moment, these failures are silent. TBD which
- * failures should be reported.
+ * return error value only when tagset allocation failed
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ int ret;
+
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2057,8 +2060,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
- if (blk_mq_alloc_tag_set(&dev->tagset))
- return 0;
+ ret = blk_mq_alloc_tag_set(&dev->tagset);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return ret;
+ }
dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
@@ -2124,22 +2131,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
- /*
- * CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
- * has no name we can pass NULL as final argument to
- * sysfs_add_file_to_group.
- */
-
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
- dev->cmb = nvme_map_cmb(dev);
- if (dev->cmb) {
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
- }
- }
+ nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -2172,7 +2164,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i, queues;
+ int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2207,21 +2199,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_stop_queues(&dev->ctrl);
- queues = dev->online_queues - 1;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
- nvme_suspend_queue(dev->queues[i]);
-
- if (dead) {
- /* A device might become IO incapable very soon during
- * probe, before the admin queue is configured. Thus,
- * queue_count can be 0 here.
- */
- if (dev->ctrl.queue_count)
- nvme_suspend_queue(dev->queues[0]);
- } else {
- nvme_disable_io_queues(dev, queues);
+ if (!dead) {
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
+ for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2291,6 +2275,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
@@ -2302,6 +2287,16 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ /*
+ * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller RECONNECTING\n");
+ goto out;
+ }
+
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2354,15 +2349,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
+ new_state = NVME_CTRL_ADMIN_ONLY;
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ /* hit this only when allocate tagset fails */
+ if (nvme_dev_add(dev))
+ new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl);
}
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
- dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
goto out;
}
@@ -2470,8 +2473,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
- GFP_KERNEL, node);
+
+ dev->queues = kcalloc_node(num_possible_cpus() + 1,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2498,10 +2502,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
- nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- queue_work(nvme_wq, &dev->ctrl.reset_work);
+ nvme_reset_ctrl(&dev->ctrl);
+
return 0;
release_pools:
@@ -2525,7 +2529,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_reset_ctrl_sync(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af56596be6..2bc059f7d73c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,7 +66,6 @@ struct nvme_rdma_request {
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
- bool inline_data;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -974,12 +973,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_start_queues(&ctrl->ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
nvme_rdma_reconnect_or_remove(ctrl);
}
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
queue_work(nvme_wq, &ctrl->err_work);
@@ -1086,7 +1091,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->inline_data = true;
req->num_sge++;
return 0;
}
@@ -1158,7 +1162,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
int count, ret;
req->num_sge = 1;
- req->inline_data = false;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -1753,6 +1756,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
ret = nvme_rdma_configure_admin_queue(ctrl, false);
if (ret)
goto out_fail;
@@ -2006,6 +2015,7 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
@@ -2028,7 +2038,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
static struct ib_client nvme_rdma_ib_client = {
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 000000000000..41944bbef835
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,130 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 000000000000..ea91fccd1bc0
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,165 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_admin_cmd(opcode, cdw10) \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release))
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_cmd(opcode, cdw10) \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
+
+TRACE_EVENT(nvme_setup_admin_cmd,
+ TP_PROTO(struct nvme_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->cid, __entry->flags, __entry->metadata,
+ show_admin_opcode_name(__entry->opcode),
+ __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
+);
+
+
+TRACE_EVENT(nvme_setup_nvm_cmd,
+ TP_PROTO(int qid, struct nvme_command *cmd),
+ TP_ARGS(qid, cmd),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->qid = qid;
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->qid, __entry->nsid, __entry->cid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->opcode),
+ __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->qid = req->q->id;
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ ),
+ TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->cid, __entry->qid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 03e4ab65fe77..5f4f8b16685f 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
depends on INFINIBAND
depends on NVME_TARGET
+ select SGL_ALLOC
help
This enables the NVMe RDMA target support, which allows exporting NVMe
devices over RDMA.
@@ -39,6 +40,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
+ select SGL_ALLOC
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b54748ad5f48..0bd737117a80 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->ns = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
@@ -830,7 +833,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */
if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
+ goto out_remove_ida;
}
/*
@@ -860,6 +863,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+out_remove_ida:
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
@@ -877,21 +882,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
-
mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
}
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index db3bf6b8bf9e..19e9e42ae943 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
}
- pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out:
kfree(d);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 5fd86039e353..9b39a6cb1935 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
struct scatterlist *sg;
- struct page *page;
unsigned int nent;
- u32 page_len, length;
- int i = 0;
- length = fod->req.transfer_len;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
if (!sg)
goto out;
- sg_init_table(sg, nent);
-
- while (length) {
- page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
fod->data_sg = sg;
fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0;
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- fod->data_sg = NULL;
- fod->data_sg_cnt = 0;
out:
return NVME_SC_INTERNAL;
}
@@ -1746,18 +1719,13 @@ out:
static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
- struct scatterlist *sg;
- int count;
-
if (!fod->data_sg || !fod->data_sg_cnt)
return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
- __free_page(sg_page(sg));
- kfree(fod->data_sg);
+ sgl_free(fod->data_sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
}
@@ -2522,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- /* a FC port can only be 1 nvmet port id */
- if (!tgtport->port) {
- tgtport->port = port;
- port->priv = tgtport;
- nvmet_fc_tgtport_get(tgtport);
- ret = 0;
- } else
- ret = -EALREADY;
+ tgtport->port = port;
+ ret = 0;
break;
}
}
@@ -2540,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- struct nvmet_fc_tgtport *tgtport = port->priv;
- unsigned long flags;
- bool matched = false;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (tgtport->port == port) {
- matched = true;
- tgtport->port = NULL;
- }
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
-
- if (matched)
- nvmet_fc_tgtport_put(tgtport);
+ /* nothing to do */
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9de55ab..34712def81b1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -204,6 +204,10 @@ struct fcloop_lport {
struct completion unreg_done;
};
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
int status;
};
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
struct fcloop_fcpreq {
struct fcloop_tport *tport;
struct nvmefc_fcp_req *fcpreq;
spinlock_t reqlock;
u16 status;
+ u32 inistate;
bool active;
bool aborted;
- struct work_struct work;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
struct nvmefc_tgt_fcp_req tgt_fcp_req;
};
struct fcloop_ini_fcpreq {
struct nvmefc_fcp_req *fcpreq;
struct fcloop_fcpreq *tfcp_req;
- struct work_struct iniwork;
+ spinlock_t inilock;
};
static inline struct fcloop_lsreq *
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
-/*
- * FCP IO operation done by initiator abort.
- * call back up initiator "done" flows.
- */
static void
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+fcloop_tfcp_req_free(struct kref *ref)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
{
- struct fcloop_ini_fcpreq *inireq =
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ int ret = 0;
+ bool aborted = false;
- inireq->fcpreq->done(inireq->fcpreq);
+ spin_lock(&tfcp_req->reqlock);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+ return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
+
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock(&tfcp_req->reqlock);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock(&tfcp_req->reqlock);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
}
/*
@@ -364,20 +484,15 @@ static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
- container_of(work, struct fcloop_fcpreq, work);
- struct fcloop_tport *tport = tfcp_req->tport;
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
- if (tport->remoteport && fcpreq) {
- fcpreq->status = tfcp_req->status;
- fcpreq->done(fcpreq);
- }
-
- kfree(tfcp_req);
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
- int ret = 0;
if (!rport->targetport)
return -ECONNREFUSED;
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
inireq->fcpreq = fcpreq;
inireq->tfcp_req = tfcp_req;
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
+ spin_lock_init(&inireq->inilock);
+
tfcp_req->fcpreq = fcpreq;
tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
spin_lock_init(&tfcp_req->reqlock);
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
+ schedule_work(&tfcp_req->fcp_rcv_work);
- return ret;
+ return 0;
}
static void
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->work);
+ schedule_work(&tfcp_req->tio_done_work);
}
static void
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
void *hw_queue_handle,
struct nvmefc_fcp_req *fcpreq)
{
- struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
if (!tfcp_req)
/* abort has already been called */
return;
- if (rport->targetport)
- nvmet_fc_rcv_fcp_abort(rport->targetport,
- &tfcp_req->tgt_fcp_req);
-
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
- inireq->tfcp_req = NULL;
- tfcp_req->fcpreq = NULL;
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
spin_unlock(&tfcp_req->reqlock);
- /* post the aborted io completion */
- fcpreq->status = -ECANCELED;
- schedule_work(&inireq->iniwork);
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
}
static void
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
- struct fcloop_lport *lport = localport->private;
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* sizes of additional private data for data structures */
- .local_priv_sz = sizeof(struct fcloop_lport),
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+ .target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
};
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
struct fcloop_ctrl_options *opts;
struct nvme_fc_local_port *localport;
struct fcloop_lport *lport;
- int ret;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
- return -ENOMEM;
+ goto out_free_lport;
ret = fcloop_parse_options(opts, buf);
if (ret)
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
if (!ret) {
- unsigned long flags;
-
/* success */
- lport = localport->private;
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
spin_unlock_irqrestore(&fcloop_lock, flags);
-
- /* mark all of the input buffer consumed */
- ret = count;
}
out_free_opts:
kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
return ret ? ret : count;
}
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
wait_for_completion(&lport->unreg_done);
+ kfree(lport);
+
return ret;
}
@@ -1085,7 +1233,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fcloop_nport *nport = NULL, *tmpport;
- struct fcloop_tport *tport;
+ struct fcloop_tport *tport = NULL;
u64 nodename, portname;
unsigned long flags;
int ret;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1e21b286f299..7991ec3a17db 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
+ .module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
};
@@ -716,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 49912909c298..978e169c11bf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
- struct scatterlist *sg;
- int count;
-
- if (!sgl || !nents)
- return;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
- kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
- u32 length)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned int nent;
- int i = 0;
-
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- goto out;
-
- sg_init_table(sg, nent);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
-out:
- return NVME_SC_INTERNAL;
-}
-
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != &rsp->cmd->inline_sg)
- nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+ sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
- u16 status;
/* no data command? */
if (!len)
return 0;
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
+ rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+ if (!rsp->req.sg)
+ return NVME_SC_INTERNAL;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -976,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
- pr_info("freeing queue %d\n", queue->idx);
+ pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
@@ -1558,25 +1503,9 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- struct nvmet_rdma_queue *queue;
-
nvmet_unregister_transport(&nvmet_rdma_ops);
-
- flush_scheduled_work();
-
- mutex_lock(&nvmet_rdma_queue_mutex);
- while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
- struct nvmet_rdma_queue, queue_list))) {
- list_del_init(&queue->queue_list);
-
- mutex_unlock(&nvmet_rdma_queue_mutex);
- __nvmet_rdma_queue_disconnect(queue);
- mutex_lock(&nvmet_rdma_queue_mutex);
- }
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 26618ba8f92a..a9d6fe86585b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -316,6 +316,32 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
EXPORT_SYMBOL(of_get_cpu_node);
/**
+ * of_cpu_node_to_id: Get the logical CPU number for a given device_node
+ *
+ * @cpu_node: Pointer to the device_node for CPU.
+ *
+ * Returns the logical CPU number of the given CPU device_node.
+ * Returns -ENODEV if the CPU is not found.
+ */
+int of_cpu_node_to_id(struct device_node *cpu_node)
+{
+ int cpu;
+ bool found = false;
+ struct device_node *np;
+
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ found = (cpu_node == np);
+ of_node_put(np);
+ if (found)
+ return cpu;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(of_cpu_node_to_id);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 3481e69738b5..8c0c92712fc9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -77,6 +77,11 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
if (of_property_read_bool(child, "broken-turn-around"))
mdio->phy_ignore_ta_mask |= 1 << addr;
+ of_property_read_u32(child, "reset-assert-us",
+ &phy->mdio.reset_assert_delay);
+ of_property_read_u32(child, "reset-deassert-us",
+ &phy->mdio.reset_deassert_delay);
+
/* Associate the OF node with the device structure so it
* can be looked up later */
of_node_get(child);
@@ -231,7 +236,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
rc = of_mdiobus_register_phy(mdio, child, addr);
else
rc = of_mdiobus_register_device(mdio, child, addr);
- if (rc)
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
goto unregister;
}
@@ -255,7 +265,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc)
+ if (rc && rc != -ENODEV)
goto unregister;
}
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8ad33a44a7b8..f25d36358187 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return (void *)of_device_get_match_data(dev);
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
.device_is_available = of_fwnode_device_is_available,
+ .device_get_match_data = of_fwnode_device_get_match_data,
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..6ce6aefacc81 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644
index 000000000000..370eff3acd8a
--- /dev/null
+++ b/drivers/opp/ti-opp-supply.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon <nm@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv: reference voltage (usually Nominal voltage)
+ * @optimized_uv: Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+ unsigned int reference_uv;
+ unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+ struct ti_opp_supply_optimum_voltage_table *vdd_table;
+ u32 num_vdd_table;
+ u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags: specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ * milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1)
+#define OPPDM_HAS_NO_ABB BIT(2)
+ const u8 flags;
+ const u32 efuse_voltage_mask;
+ const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev: ti opp supply device for which we need to store info
+ * @data: data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ void __iomem *base;
+ struct property *prop;
+ struct resource *res;
+ const __be32 *val;
+ int proplen, i;
+ int ret = 0;
+ struct ti_opp_supply_optimum_voltage_table *table;
+ const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+ /* pick up Efuse based voltages */
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto out_map;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "Unable to map Efuse registers\n");
+ ret = -ENOMEM;
+ goto out_map;
+ }
+
+ /* Fetch efuse-settings. */
+ prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'ti,efuse-settings' property found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ proplen = prop->length / sizeof(int);
+ data->num_vdd_table = proplen / 2;
+ /* Verify for corrupted OPP entries in dt */
+ if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+ dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+ &data->vdd_absolute_max_voltage_uv);
+ if (ret) {
+ dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ table = kzalloc(sizeof(*data->vdd_table) *
+ data->num_vdd_table, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->vdd_table = table;
+
+ val = prop->value;
+ for (i = 0; i < data->num_vdd_table; i++, table++) {
+ u32 efuse_offset;
+ u32 tmp;
+
+ table->reference_uv = be32_to_cpup(val++);
+ efuse_offset = be32_to_cpup(val++);
+
+ tmp = readl(base + efuse_offset);
+ tmp &= of_data->efuse_voltage_mask;
+ tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+ table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+ tmp * 1000;
+
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+ i, efuse_offset, table->reference_uv,
+ table->optimized_uv);
+
+ /*
+ * Some older samples might not have optimized efuse
+ * Use reference voltage for those - just add debug message
+ * for them.
+ */
+ if (!table->optimized_uv) {
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+ i, efuse_offset, table->reference_uv);
+ table->optimized_uv = table->reference_uv;
+ }
+ }
+out:
+ iounmap(base);
+out_map:
+ return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev: device for which we need to free info
+ * @data: data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ kfree(data->vdd_table);
+ data->vdd_table = NULL;
+ data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev: device for which we need to find info
+ * @data: data specific to the device
+ * @reference_uv: reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+ struct ti_opp_supply_data *data,
+ int reference_uv)
+{
+ int i;
+ struct ti_opp_supply_optimum_voltage_table *table;
+
+ if (!data->num_vdd_table)
+ return reference_uv;
+
+ table = data->vdd_table;
+ if (!table)
+ return -EINVAL;
+
+ /* Find a exact match - this list is usually very small */
+ for (i = 0; i < data->num_vdd_table; i++, table++)
+ if (table->reference_uv == reference_uv)
+ return table->optimized_uv;
+
+ /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+ dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+ __func__, reference_uv);
+ return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+ struct dev_pm_opp_supply *supply,
+ int new_target_uv, struct regulator *reg,
+ char *reg_name)
+{
+ int ret;
+ unsigned long vdd_uv, uv_max;
+
+ if (new_target_uv)
+ vdd_uv = new_target_uv;
+ else
+ vdd_uv = supply->u_volt;
+
+ /*
+ * If we do have an absolute max voltage specified, then we should
+ * use that voltage instead to allow for cases where the voltage rails
+ * are ganged (example if we set the max for an opp as 1.12v, and
+ * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+ * be achieved if the regulator is constrainted to max of 1.12v, even
+ * if it can function at 1.25v
+ */
+ if (opp_data.vdd_absolute_max_voltage_uv)
+ uv_max = opp_data.vdd_absolute_max_voltage_uv;
+ else
+ uv_max = supply->u_volt_max;
+
+ if (vdd_uv > uv_max ||
+ vdd_uv < supply->u_volt_min ||
+ supply->u_volt_min > uv_max) {
+ dev_warn(dev,
+ "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+ supply->u_volt_min, vdd_uv, uv_max);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+ vdd_uv, supply->u_volt_min,
+ uv_max);
+
+ ret = regulator_set_voltage_triplet(reg,
+ supply->u_volt_min,
+ vdd_uv,
+ uv_max);
+ if (ret) {
+ dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+ reg_name, vdd_uv, supply->u_volt_min,
+ uv_max);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data: information on regulators and new and old opps provided by
+ * opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+ struct device *dev = data->dev;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct clk *clk = data->clk;
+ struct regulator *vdd_reg = data->regulators[0];
+ struct regulator *vbb_reg = data->regulators[1];
+ int vdd_uv;
+ int ret;
+
+ vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+ new_supply_vbb->u_volt);
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_voltage;
+
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_freq;
+
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ ret = clk_set_rate(clk, old_freq);
+ if (ret)
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply_vdd->u_volt) {
+ ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ return ret;
+
+ ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+ "vdd");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+ {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+ {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+ {.compatible = "ti,omap5-core-opp-supply",
+ .data = &omap_omap5core_of_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev = get_cpu_device(0);
+ const struct of_device_id *match;
+ const struct ti_opp_supply_of_data *of_data;
+ int ret = 0;
+
+ match = of_match_device(ti_opp_supply_of_match, dev);
+ if (!match) {
+ /* We do not expect this to happen */
+ dev_err(dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+ if (!match->data) {
+ /* Again, unlikely.. but mistakes do happen */
+ dev_err(dev, "%s: Bad data in match\n", __func__);
+ return -EINVAL;
+ }
+ of_data = match->data;
+
+ dev_set_drvdata(dev, (void *)of_data);
+
+ /* If we need optimized voltage */
+ if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+ ret = _store_optimized_voltages(dev, &opp_data);
+ if (ret)
+ return ret;
+ }
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+ ti_opp_supply_set_opp));
+ if (ret)
+ _free_optimized_voltages(dev, &opp_data);
+
+ return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+ .probe = ti_opp_supply_probe,
+ .driver = {
+ .name = "ti_opp_supply",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
- printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
+ printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
- DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
+ DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
__func__, bus, bus->busn_res.start,
bus->bridge->platform_data);
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed "
- "0x%lx/%lx (hpa 0x%p)\n",
+ "0x%lx/%lx (hpa 0x%px)\n",
name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
return retval;
}
- printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
+ printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
return 0;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 14fd865a5120..5958c8dda4e3 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
pm_generic_complete(dev);
/* Resume device if platform firmware has put it in reset-power-on */
- if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * pci_pm_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
+
return 0;
}
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_late(dev);;
+ return pm_generic_freeze_late(dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4a7c6864fdf4..764ca7b8840d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1458,6 +1458,7 @@ struct pci_devres {
unsigned int pinned:1;
unsigned int orig_intx:1;
unsigned int restore_intx:1;
+ unsigned int mwi:1;
u32 region_mask;
};
@@ -1476,6 +1477,9 @@ static void pcim_release(struct device *gendev, void *res)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
+ if (this->mwi)
+ pci_clear_mwi(dev);
+
if (this->restore_intx)
pci_intx(dev, this->orig_intx);
@@ -3761,6 +3765,27 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->mwi = 1;
+ return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ffbf4e723527..fb1c1bb87316 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 730cc897b94d..fab143a17f9c 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -510,11 +510,11 @@ out:
return -EBADMSG;
}
-static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(filp, &stuser->comp.wait, wait);
poll_wait(filp, &stdev->event_wq, wait);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index b8f44b068fc6..da5724cd89cf 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_DSU_PMU
+ tristate "ARM DynamIQ Shared Unit (DSU) PMU"
+ depends on ARM64
+ help
+ Provides support for performance monitor unit in ARM DynamIQ Shared
+ Unit (DSU). The DSU integrates one or more cores with an L3 memory
+ system, control logic. The PMU allows counting various events related
+ to DSU.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 710a0135bd61..c2f27419bdf0 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
new file mode 100644
index 000000000000..93c50e377507
--- /dev/null
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -0,0 +1,843 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU driver
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#define PMUNAME "arm_dsu"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/arm_dsu_pmu.h>
+#include <asm/local64.h>
+
+/* PMU event codes */
+#define DSU_PMU_EVT_CYCLES 0x11
+#define DSU_PMU_EVT_CHAIN 0x1e
+
+#define DSU_PMU_MAX_COMMON_EVENTS 0x40
+
+#define DSU_PMU_MAX_HW_CNTRS 32
+#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1)
+
+#define CLUSTERPMCR_E BIT(0)
+#define CLUSTERPMCR_P BIT(1)
+#define CLUSTERPMCR_C BIT(2)
+#define CLUSTERPMCR_N_SHIFT 11
+#define CLUSTERPMCR_N_MASK 0x1f
+#define CLUSTERPMCR_IDCODE_SHIFT 16
+#define CLUSTERPMCR_IDCODE_MASK 0xff
+#define CLUSTERPMCR_IMP_SHIFT 24
+#define CLUSTERPMCR_IMP_MASK 0xff
+#define CLUSTERPMCR_RES_MASK 0x7e8
+#define CLUSTERPMCR_RES_VAL 0x40
+
+#define DSU_ACTIVE_CPU_MASK 0x0
+#define DSU_ASSOCIATED_CPU_MASK 0x1
+
+/*
+ * We use the index of the counters as they appear in the counter
+ * bit maps in the PMU registers (e.g CLUSTERPMSELR).
+ * i.e,
+ * counter 0 - Bit 0
+ * counter 1 - Bit 1
+ * ...
+ * Cycle counter - Bit 31
+ */
+#define DSU_PMU_IDX_CYCLE_COUNTER 31
+
+/* All event counters are 32bit, with a 64bit Cycle counter */
+#define DSU_PMU_COUNTER_WIDTH(idx) \
+ (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
+
+#define DSU_PMU_COUNTER_MASK(idx) \
+ GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
+
+#define DSU_EXT_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { \
+ .attr = __ATTR(_name, 0444, _func, NULL), \
+ .var = (void *)_config \
+ } \
+ })[0].attr.attr)
+
+#define DSU_EVENT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
+
+#define DSU_FORMAT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
+
+#define DSU_CPUMASK_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
+
+struct dsu_hw_events {
+ DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
+ struct perf_event *events[DSU_PMU_MAX_HW_CNTRS];
+};
+
+/*
+ * struct dsu_pmu - DSU PMU descriptor
+ *
+ * @pmu_lock : Protects accesses to DSU PMU register from normal vs
+ * interrupt handler contexts.
+ * @hw_events : Holds the event counter state.
+ * @associated_cpus : CPUs attached to the DSU.
+ * @active_cpu : CPU to which the PMU is bound for accesses.
+ * @cpuhp_node : Node for CPU hotplug notifier link.
+ * @num_counters : Number of event counters implemented by the PMU,
+ * excluding the cycle counter.
+ * @irq : Interrupt line for counter overflow.
+ * @cpmceid_bitmap : Bitmap for the availability of architected common
+ * events (event_code < 0x40).
+ */
+struct dsu_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ raw_spinlock_t pmu_lock;
+ struct dsu_hw_events hw_events;
+ cpumask_t associated_cpus;
+ cpumask_t active_cpu;
+ struct hlist_node cpuhp_node;
+ s8 num_counters;
+ int irq;
+ DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
+};
+
+static unsigned long dsu_pmu_cpuhp_state;
+
+static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct dsu_pmu, pmu);
+}
+
+static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
+ (unsigned long)eattr->var);
+}
+
+static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t dsu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ unsigned long mask_id = (unsigned long)eattr->var;
+ const cpumask_t *cpumask;
+
+ switch (mask_id) {
+ case DSU_ACTIVE_CPU_MASK:
+ cpumask = &dsu_pmu->active_cpu;
+ break;
+ case DSU_ASSOCIATED_CPU_MASK:
+ cpumask = &dsu_pmu->associated_cpus;
+ break;
+ default:
+ return 0;
+ }
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+
+static struct attribute *dsu_pmu_format_attrs[] = {
+ DSU_FORMAT_ATTR(event, "config:0-31"),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = dsu_pmu_format_attrs,
+};
+
+static struct attribute *dsu_pmu_event_attrs[] = {
+ DSU_EVENT_ATTR(cycles, 0x11),
+ DSU_EVENT_ATTR(bus_access, 0x19),
+ DSU_EVENT_ATTR(memory_error, 0x1a),
+ DSU_EVENT_ATTR(bus_cycles, 0x1d),
+ DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
+ DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
+ DSU_EVENT_ATTR(l3d_cache, 0x2b),
+ DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
+ NULL,
+};
+
+static umode_t
+dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr.attr);
+ unsigned long evt = (unsigned long)eattr->var;
+
+ return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
+}
+
+static const struct attribute_group dsu_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = dsu_pmu_event_attrs,
+ .is_visible = dsu_pmu_event_attr_is_visible,
+};
+
+static struct attribute *dsu_pmu_cpumask_attrs[] = {
+ DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
+ DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_cpumask_attr_group = {
+ .attrs = dsu_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *dsu_pmu_attr_groups[] = {
+ &dsu_pmu_cpumask_attr_group,
+ &dsu_pmu_events_attr_group,
+ &dsu_pmu_format_attr_group,
+ NULL,
+};
+
+static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
+{
+ struct cpumask online_supported;
+
+ cpumask_and(&online_supported,
+ &dsu_pmu->associated_cpus, cpu_online_mask);
+ return cpumask_any_but(&online_supported, cpu);
+}
+
+static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
+{
+ return (idx < dsu_pmu->num_counters) ||
+ (idx == DSU_PMU_IDX_CYCLE_COUNTER);
+}
+
+static inline u64 dsu_pmu_read_counter(struct perf_event *event)
+{
+ u64 val;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return 0;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying reading invalid counter %d\n", idx);
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ val = __dsu_pmu_read_pmccntr();
+ else
+ val = __dsu_pmu_read_counter(idx);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+
+ return val;
+}
+
+static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
+{
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "writing to invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ __dsu_pmu_write_pmccntr(val);
+ else
+ __dsu_pmu_write_counter(idx, val);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ int idx;
+ unsigned long evtype = event->attr.config;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ unsigned long *used_mask = hw_events->used_mask;
+
+ if (evtype == DSU_PMU_EVT_CYCLES) {
+ if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
+ return -EAGAIN;
+ return DSU_PMU_IDX_CYCLE_COUNTER;
+ }
+
+ idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
+ if (idx >= dsu_pmu->num_counters)
+ return -EAGAIN;
+ set_bit(idx, hw_events->used_mask);
+ return idx;
+}
+
+static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_counter_interrupt_enable(idx);
+ __dsu_pmu_enable_counter(idx);
+}
+
+static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_disable_counter(idx);
+ __dsu_pmu_counter_interrupt_disable(idx);
+}
+
+static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ unsigned long flags;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying to set invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ __dsu_pmu_set_event(idx, event->hw.config_base);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_count, new_count;
+
+ do {
+ /* We may also be called from the irq handler */
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = dsu_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
+ prev_count);
+ delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
+ local64_add(delta, &event->count);
+}
+
+static void dsu_pmu_read(struct perf_event *event)
+{
+ dsu_pmu_event_update(event);
+}
+
+static inline u32 dsu_pmu_get_reset_overflow(void)
+{
+ return __dsu_pmu_get_reset_overflow();
+}
+
+/**
+ * dsu_pmu_set_event_period: Set the period for the counter.
+ *
+ * All DSU PMU event counters, except the cycle counter are 32bit
+ * counters. To handle cases of extreme interrupt latency, we program
+ * the counter with half of the max count for the counters.
+ */
+static void dsu_pmu_set_event_period(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
+
+ local64_set(&event->hw.prev_count, val);
+ dsu_pmu_write_counter(event, val);
+}
+
+static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
+{
+ int i;
+ bool handled = false;
+ struct dsu_pmu *dsu_pmu = dev;
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ unsigned long overflow;
+
+ overflow = dsu_pmu_get_reset_overflow();
+ if (!overflow)
+ return IRQ_NONE;
+
+ for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
+ struct perf_event *event = hw_events->events[i];
+
+ if (!event)
+ continue;
+ dsu_pmu_event_update(event);
+ dsu_pmu_set_event_period(event);
+ handled = true;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ /* We always reprogram the counter */
+ if (pmu_flags & PERF_EF_RELOAD)
+ WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
+ dsu_pmu_set_event_period(event);
+ if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
+ dsu_pmu_set_event(dsu_pmu, event);
+ event->hw.state = 0;
+ dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
+}
+
+static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+ dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
+ dsu_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dsu_pmu_add(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return -ENOENT;
+
+ idx = dsu_pmu_get_event_idx(hw_events, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ dsu_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void dsu_pmu_del(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ dsu_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void dsu_pmu_enable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ /* If no counters are added, skip enabling the PMU */
+ if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
+ return;
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr |= CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_disable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr &= ~CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static bool dsu_pmu_validate_event(struct pmu *pmu,
+ struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ if (is_software_event(event))
+ return true;
+ /* Reject groups spanning multiple HW PMUs. */
+ if (event->pmu != pmu)
+ return false;
+ return dsu_pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+/*
+ * Make sure the group of events can be scheduled at once
+ * on the PMU.
+ */
+static bool dsu_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct dsu_hw_events fake_hw;
+
+ if (event->group_leader == event)
+ return true;
+
+ memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
+ return false;
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
+ return false;
+ }
+ return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
+}
+
+static int dsu_pmu_event_init(struct perf_event *event)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event)) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
+ return -EINVAL;
+ }
+
+ if (has_branch_stack(event) ||
+ event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
+ dev_dbg(dsu_pmu->pmu.dev,
+ "Requested cpu is not associated with the DSU\n");
+ return -EINVAL;
+ }
+ /*
+ * Choose the current active CPU to read the events. We don't want
+ * to migrate the event contexts, irq handling etc to the requested
+ * CPU. As long as the requested CPU is within the same DSU, we
+ * are fine.
+ */
+ event->cpu = cpumask_first(&dsu_pmu->active_cpu);
+ if (event->cpu >= nr_cpu_ids)
+ return -EINVAL;
+ if (!dsu_pmu_validate_group(event))
+ return -EINVAL;
+
+ event->hw.config_base = event->attr.config;
+ return 0;
+}
+
+static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu;
+
+ dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
+ if (!dsu_pmu)
+ return ERR_PTR(-ENOMEM);
+
+ raw_spin_lock_init(&dsu_pmu->pmu_lock);
+ /*
+ * Initialise the number of counters to -1, until we probe
+ * the real number on a connected CPU.
+ */
+ dsu_pmu->num_counters = -1;
+ return dsu_pmu;
+}
+
+/**
+ * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
+ */
+static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
+{
+ int i = 0, n, cpu;
+ struct device_node *cpu_node;
+
+ n = of_count_phandle_with_args(dev, "cpus", NULL);
+ if (n <= 0)
+ return -ENODEV;
+ for (; i < n; i++) {
+ cpu_node = of_parse_phandle(dev, "cpus", i);
+ if (!cpu_node)
+ break;
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ /*
+ * We have to ignore the failures here and continue scanning
+ * the list to handle cases where the nr_cpus could be capped
+ * in the running kernel.
+ */
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, mask);
+ }
+ return 0;
+}
+
+/*
+ * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
+ */
+static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
+{
+ u64 num_counters;
+ u32 cpmceid[2];
+
+ num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
+ CLUSTERPMCR_N_MASK;
+ /* We can only support up to 31 independent counters */
+ if (WARN_ON(num_counters > 31))
+ num_counters = 31;
+ dsu_pmu->num_counters = num_counters;
+ if (!dsu_pmu->num_counters)
+ return;
+ cpmceid[0] = __dsu_pmu_read_pmceid(0);
+ cpmceid[1] = __dsu_pmu_read_pmceid(1);
+ bitmap_from_u32array(dsu_pmu->cpmceid_bitmap,
+ DSU_PMU_MAX_COMMON_EVENTS,
+ cpmceid,
+ ARRAY_SIZE(cpmceid));
+}
+
+static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
+{
+ cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
+ if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+ pr_warn("Failed to set irq affinity to %d\n", cpu);
+}
+
+/*
+ * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
+ * we haven't done it already.
+ */
+static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
+{
+ if (dsu_pmu->num_counters == -1)
+ dsu_pmu_probe_pmu(dsu_pmu);
+ /* Reset the interrupt overflow mask */
+ dsu_pmu_get_reset_overflow();
+}
+
+static int dsu_pmu_device_probe(struct platform_device *pdev)
+{
+ int irq, rc;
+ struct dsu_pmu *dsu_pmu;
+ char *name;
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ dsu_pmu = dsu_pmu_alloc(pdev);
+ if (IS_ERR(dsu_pmu))
+ return PTR_ERR(dsu_pmu);
+
+ rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
+ return rc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to find IRQ\n");
+ return -EINVAL;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ PMUNAME, atomic_inc_return(&pmu_idx));
+ if (!name)
+ return -ENOMEM;
+ rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
+ IRQF_NOBALANCING, name, dsu_pmu);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ dsu_pmu->irq = irq;
+ platform_set_drvdata(pdev, dsu_pmu);
+ rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ if (rc)
+ return rc;
+
+ dsu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .module = THIS_MODULE,
+ .pmu_enable = dsu_pmu_enable,
+ .pmu_disable = dsu_pmu_disable,
+ .event_init = dsu_pmu_event_init,
+ .add = dsu_pmu_add,
+ .del = dsu_pmu_del,
+ .start = dsu_pmu_start,
+ .stop = dsu_pmu_stop,
+ .read = dsu_pmu_read,
+
+ .attr_groups = dsu_pmu_attr_groups,
+ };
+
+ rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
+ if (rc) {
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ }
+
+ return rc;
+}
+
+static int dsu_pmu_device_remove(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&dsu_pmu->pmu);
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id dsu_pmu_of_match[] = {
+ { .compatible = "arm,dsu-pmu", },
+ {},
+};
+
+static struct platform_driver dsu_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(dsu_pmu_of_match),
+ },
+ .probe = dsu_pmu_device_probe,
+ .remove = dsu_pmu_device_remove,
+};
+
+static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
+ return 0;
+
+ /* If the PMU is already managed, there is nothing to do */
+ if (!cpumask_empty(&dsu_pmu->active_cpu))
+ return 0;
+
+ dsu_pmu_init_pmu(dsu_pmu);
+ dsu_pmu_set_active_cpu(cpu, dsu_pmu);
+
+ return 0;
+}
+
+static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ int dst;
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
+ return 0;
+
+ dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ /* If there are no active CPUs in the DSU, leave IRQ disabled */
+ if (dst >= nr_cpu_ids) {
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
+ dsu_pmu_set_active_cpu(dst, dsu_pmu);
+
+ return 0;
+}
+
+static int __init dsu_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DRVNAME,
+ dsu_pmu_cpu_online,
+ dsu_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+ return platform_driver_register(&dsu_pmu_driver);
+}
+
+static void __exit dsu_pmu_exit(void)
+{
+ platform_driver_unregister(&dsu_pmu_driver);
+ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
+}
+
+module_init(dsu_pmu_init);
+module_exit(dsu_pmu_exit);
+
+MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
+MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
+MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 91b224eced18..46501cc79fd7 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -82,19 +82,10 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
return -EINVAL;
}
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
-
- if (cpu >= nr_cpu_ids) {
+ cpu = of_cpu_node_to_id(dn);
+ if (cpu < 0) {
pr_warn("failed to find logical CPU for %s\n", dn->name);
+ cpu = nr_cpu_ids;
}
of_node_put(dn);
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8ce262fc2561..51b40aecb776 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
struct arm_spe_pmu *spe_pmu;
struct device *dev = &pdev->dev;
+ /*
+ * If kernelspace is unmapped when running at EL0, then the SPE
+ * buffer will fault and prematurely terminate the AUX session.
+ */
+ if (arm64_kernel_unmapped_at_el0()) {
+ dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
+ return -EPERM;
+ }
+
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
if (!spe_pmu) {
dev_err(dev, "failed to allocate spe_pmu\n");
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 3f953db70288..8708ea3b4d6d 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -150,6 +150,9 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
+
+ RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
};
enum sata_phy_ctrl_regs {
@@ -505,8 +508,36 @@ static int brcm_sata_phy_init(struct phy *phy)
return rc;
}
+static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = BIT(8);
+
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, tmp);
+}
+
+static int brcm_sata_phy_calibrate(struct phy *phy)
+{
+ struct brcm_sata_port *port = phy_get_drvdata(phy);
+ int rc = -EOPNOTSUPP;
+
+ switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_STB_40NM:
+ brcm_stb_sata_calibrate(port);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static const struct phy_ops phy_ops = {
.init = brcm_sata_phy_init,
+ .calibrate = brcm_sata_phy_calibrate,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1e7ce0b6f299..1b7febc43da9 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -50,6 +50,8 @@
#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
#define USB_CTRL_EBRIDGE 0x0c
#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_OBRIDGE 0x10
+#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000
#define USB_CTRL_MDIO 0x14
#define USB_CTRL_MDIO2 0x18
#define USB_CTRL_UTMI_CTL_1 0x2c
@@ -71,6 +73,7 @@
#define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */
#define USB_CTRL_USB30_PCTL 0x70
#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002
+#define USB_CTRL_USB30_PCTL_PHY3_IDDQ_OVERRIDE_MASK 0x00008000
#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000
#define USB_CTRL_USB_DEVICE_CTL1 0x90
#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */
@@ -116,7 +119,6 @@ enum {
USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR,
USB_CTRL_SETUP_OC3_DISABLE_SELECTOR,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR,
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_SELECTOR,
USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR,
USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR,
USB_CTRL_USB_PM_USB_PWRDN_SELECTOR,
@@ -203,7 +205,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -225,7 +226,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
@@ -247,7 +247,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -269,7 +268,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
@@ -291,7 +289,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -313,7 +310,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
@@ -335,7 +331,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -357,7 +352,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
USB_CTRL_SETUP_OC3_DISABLE_MASK,
USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
- 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
@@ -379,7 +373,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -401,7 +394,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
USB_CTRL_SETUP_OC3_DISABLE_MASK,
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
- USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
USB_CTRL_USB_PM_USB_PWRDN_MASK,
@@ -926,6 +918,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
break;
default:
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
USB_CTRL_SET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
break;
}
@@ -952,13 +945,17 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
* Don't enable this so the memory controller doesn't read
* into memory holes. NOTE: This bit is low true on 7366C0.
*/
- USB_CTRL_SET_FAMILY(params, EBRIDGE, ESTOP_SCB_REQ);
+ USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
/* Setup the endian bits */
reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ if (params->selected_family == BRCM_FAMILY_7271A0)
+ /* Enable LS keep alive fix for certain keyboards */
+ USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
}
void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
@@ -1003,6 +1000,7 @@ void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
{
brcmusb_xhci_soft_reset(params, 1);
+ USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
}
void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 195b98139e5f..d1dab36fa5b7 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -338,9 +338,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
ARRAY_SIZE(brcm_dr_mode_to_name),
mode, &priv->ini.mode);
}
- if (of_property_read_bool(dn, "brcm,has_xhci"))
+ if (of_property_read_bool(dn, "brcm,has-xhci"))
priv->has_xhci = true;
- if (of_property_read_bool(dn, "brcm,has_eohci"))
+ if (of_property_read_bool(dn, "brcm,has-eohci"))
priv->has_eohci = true;
err = brcm_usb_phy_dvr_init(dev, priv, dn);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 402385f2562a..1e96d0740ef5 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -20,6 +20,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -440,9 +441,9 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
u32 index = instance->index;
u32 tmp;
- /* switch to USB function. (system register, force ip into usb mode) */
+ /* switch to USB function, and enable usb pll */
tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~P2C_FORCE_UART_EN;
+ tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0);
writel(tmp, com + U3P_U2PHYDTM0);
@@ -502,10 +503,8 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
u32 index = instance->index;
u32 tmp;
- /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */
tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL);
- tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
+ tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
writel(tmp, com + U3P_U2PHYDTM0);
/* OTG Enable */
@@ -540,7 +539,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN);
- tmp |= P2C_FORCE_SUSPENDM;
writel(tmp, com + U3P_U2PHYDTM0);
/* OTG Disable */
@@ -548,18 +546,16 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN;
writel(tmp, com + U3P_USBPHYACR6);
- /* let suspendm=0, set utmi into analog power down */
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~P2C_RG_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
- udelay(1);
-
tmp = readl(com + U3P_U2PHYDTM1);
tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID);
tmp |= P2C_RG_SESSEND;
writel(tmp, com + U3P_U2PHYDTM1);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
+ tmp = readl(com + U3P_U2PHYDTM0);
+ tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
+ writel(tmp, com + U3P_U2PHYDTM0);
+
tmp = readl(com + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
writel(tmp, com + U3D_U2PHYDCR0);
@@ -1000,7 +996,6 @@ MODULE_DEVICE_TABLE(of, mtk_tphy_id_table);
static int mtk_tphy_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child_np;
@@ -1010,15 +1005,14 @@ static int mtk_tphy_probe(struct platform_device *pdev)
struct resource res;
int port, retval;
- match = of_match_node(mtk_tphy_id_table, pdev->dev.of_node);
- if (!match)
- return -EINVAL;
-
tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL);
if (!tphy)
return -ENOMEM;
- tphy->pdata = match->data;
+ tphy->pdata = of_device_get_match_data(dev);
+ if (!tphy->pdata)
+ return -EINVAL;
+
tphy->nphys = of_get_child_count(np);
tphy->phys = devm_kcalloc(dev, tphy->nphys,
sizeof(*tphy->phys), GFP_KERNEL);
@@ -1028,9 +1022,10 @@ static int mtk_tphy_probe(struct platform_device *pdev)
tphy->dev = dev;
platform_set_drvdata(pdev, tphy);
- if (tphy->pdata->version == MTK_PHY_V1) {
+ sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* SATA phy of V1 needn't it if not shared with PCIe or USB */
+ if (sif_res && tphy->pdata->version == MTK_PHY_V1) {
/* get banks shared by multiple phys */
- sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tphy->sif_base = devm_ioremap_resource(dev, sif_res);
if (IS_ERR(tphy->sif_base)) {
dev_err(dev, "failed to remap sif regs\n");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b4964b067aec..8f6e8e28996d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
if (ret)
return ERR_PTR(-ENODEV);
+ /* This phy type handled by the usb-phy subsystem for now */
+ if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 22c68f58b181..b8b226a20014 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -90,7 +90,17 @@
#define PHYCLKRST_COMMONONN BIT(0)
#define EXYNOS5_DRD_PHYREG0 0x14
+#define PHYREG0_SSC_REF_CLK_SEL BIT(21)
+#define PHYREG0_SSC_RANGE BIT(20)
+#define PHYREG0_CR_WRITE BIT(19)
+#define PHYREG0_CR_READ BIT(18)
+#define PHYREG0_CR_DATA_IN(_x) ((_x) << 2)
+#define PHYREG0_CR_CAP_DATA BIT(1)
+#define PHYREG0_CR_CAP_ADDR BIT(0)
+
#define EXYNOS5_DRD_PHYREG1 0x18
+#define PHYREG1_CR_DATA_OUT(_x) ((_x) << 1)
+#define PHYREG1_CR_ACK BIT(0)
#define EXYNOS5_DRD_PHYPARAM0 0x1c
@@ -119,6 +129,25 @@
#define EXYNOS5_DRD_PHYRESUME 0x34
#define EXYNOS5_DRD_LINKPORT 0x44
+/* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */
+#define EXYNOS5_DRD_PHYSS_LOSLEVEL_OVRD_IN (0x15)
+#define LOSLEVEL_OVRD_IN_LOS_BIAS_5420 (0x5 << 13)
+#define LOSLEVEL_OVRD_IN_LOS_BIAS_DEFAULT (0x0 << 13)
+#define LOSLEVEL_OVRD_IN_EN (0x1 << 10)
+#define LOSLEVEL_OVRD_IN_LOS_LEVEL_DEFAULT (0x9 << 0)
+
+#define EXYNOS5_DRD_PHYSS_TX_VBOOSTLEVEL_OVRD_IN (0x12)
+#define TX_VBOOSTLEVEL_OVRD_IN_VBOOST_5420 (0x5 << 13)
+#define TX_VBOOSTLEVEL_OVRD_IN_VBOOST_DEFAULT (0x4 << 13)
+
+#define EXYNOS5_DRD_PHYSS_LANE0_TX_DEBUG (0x1010)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_19M2_20M (0x4 << 4)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_24M (0x8 << 4)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_25M_26M (0x8 << 4)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_48M_50M_52M (0x20 << 4)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_62M5 (0x20 << 4)
+#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_96M_100M (0x40 << 4)
+
#define KHZ 1000
#define MHZ (KHZ * KHZ)
@@ -527,6 +556,151 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy)
return 0;
}
+static int crport_handshake(struct exynos5_usbdrd_phy *phy_drd,
+ u32 val, u32 cmd)
+{
+ u32 usec = 100;
+ unsigned int result;
+
+ writel(val | cmd, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+
+ do {
+ result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1);
+ if (result & PHYREG1_CR_ACK)
+ break;
+
+ udelay(1);
+ } while (usec-- > 0);
+
+ if (!usec) {
+ dev_err(phy_drd->dev,
+ "CRPORT handshake timeout1 (0x%08x)\n", val);
+ return -ETIME;
+ }
+
+ usec = 100;
+
+ writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+
+ do {
+ result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1);
+ if (!(result & PHYREG1_CR_ACK))
+ break;
+
+ udelay(1);
+ } while (usec-- > 0);
+
+ if (!usec) {
+ dev_err(phy_drd->dev,
+ "CRPORT handshake timeout2 (0x%08x)\n", val);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crport_ctrl_write(struct exynos5_usbdrd_phy *phy_drd,
+ u32 addr, u32 data)
+{
+ int ret;
+
+ /* Write Address */
+ writel(PHYREG0_CR_DATA_IN(addr),
+ phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+ ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(addr),
+ PHYREG0_CR_CAP_ADDR);
+ if (ret)
+ return ret;
+
+ /* Write Data */
+ writel(PHYREG0_CR_DATA_IN(data),
+ phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+ ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
+ PHYREG0_CR_CAP_DATA);
+ if (ret)
+ return ret;
+
+ ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
+ PHYREG0_CR_WRITE);
+
+ return ret;
+}
+
+/*
+ * Calibrate few PHY parameters using CR_PORT register to meet
+ * SuperSpeed requirements on Exynos5420 and Exynos5800 systems,
+ * which have 28nm USB 3.0 DRD PHY.
+ */
+static int exynos5420_usbdrd_phy_calibrate(struct exynos5_usbdrd_phy *phy_drd)
+{
+ unsigned int temp;
+ int ret = 0;
+
+ /*
+ * Change los_bias to (0x5) for 28nm PHY from a
+ * default value (0x0); los_level is set as default
+ * (0x9) as also reflected in los_level[30:26] bits
+ * of PHYPARAM0 register.
+ */
+ temp = LOSLEVEL_OVRD_IN_LOS_BIAS_5420 |
+ LOSLEVEL_OVRD_IN_EN |
+ LOSLEVEL_OVRD_IN_LOS_LEVEL_DEFAULT;
+ ret = crport_ctrl_write(phy_drd,
+ EXYNOS5_DRD_PHYSS_LOSLEVEL_OVRD_IN,
+ temp);
+ if (ret) {
+ dev_err(phy_drd->dev,
+ "Failed setting Loss-of-Signal level for SuperSpeed\n");
+ return ret;
+ }
+
+ /*
+ * Set tx_vboost_lvl to (0x5) for 28nm PHY Tuning,
+ * to raise Tx signal level from its default value of (0x4)
+ */
+ temp = TX_VBOOSTLEVEL_OVRD_IN_VBOOST_5420;
+ ret = crport_ctrl_write(phy_drd,
+ EXYNOS5_DRD_PHYSS_TX_VBOOSTLEVEL_OVRD_IN,
+ temp);
+ if (ret) {
+ dev_err(phy_drd->dev,
+ "Failed setting Tx-Vboost-Level for SuperSpeed\n");
+ return ret;
+ }
+
+ /*
+ * Set proper time to wait for RxDetect measurement, for
+ * desired reference clock of PHY, by tuning the CR_PORT
+ * register LANE0.TX_DEBUG which is internal to PHY.
+ * This fixes issue with few USB 3.0 devices, which are
+ * not detected (not even generate interrupts on the bus
+ * on insertion) without this change.
+ * e.g. Samsung SUM-TSB16S 3.0 USB drive.
+ */
+ switch (phy_drd->extrefclk) {
+ case EXYNOS5_FSEL_50MHZ:
+ temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_48M_50M_52M;
+ break;
+ case EXYNOS5_FSEL_20MHZ:
+ case EXYNOS5_FSEL_19MHZ2:
+ temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_19M2_20M;
+ break;
+ case EXYNOS5_FSEL_24MHZ:
+ default:
+ temp = LANE0_TX_DEBUG_RXDET_MEAS_TIME_24M;
+ break;
+ }
+
+ ret = crport_ctrl_write(phy_drd,
+ EXYNOS5_DRD_PHYSS_LANE0_TX_DEBUG,
+ temp);
+ if (ret)
+ dev_err(phy_drd->dev,
+ "Fail to set RxDet measurement time for SuperSpeed\n");
+
+ return ret;
+}
+
static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -538,11 +712,20 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
return phy_drd->phys[args->args[0]].phy;
}
+static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+
+ return exynos5420_usbdrd_phy_calibrate(phy_drd);
+}
+
static const struct phy_ops exynos5_usbdrd_phy_ops = {
.init = exynos5_usbdrd_phy_init,
.exit = exynos5_usbdrd_phy_exit,
.power_on = exynos5_usbdrd_phy_power_on,
.power_off = exynos5_usbdrd_phy_power_off,
+ .calibrate = exynos5_usbdrd_phy_calibrate,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 4571cc098b76..ce126955212c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -63,6 +63,16 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ help
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
+
config PINCTRL_BF54x
def_bool y if BF54x
select PINCTRL_ADI2
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index d0d4844f8022..4777f1595ce2 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
new file mode 100644
index 000000000000..22d3bb0bf927
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -0,0 +1,476 @@
+/*
+ * AXP20x pinctrl and GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS 0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
+#define AXP20X_GPIO_FUNCTION_INPUT 2
+
+#define AXP20X_FUNC_GPIO_OUT 0
+#define AXP20X_FUNC_GPIO_IN 1
+#define AXP20X_FUNC_LDO 2
+#define AXP20X_FUNC_ADC 3
+#define AXP20X_FUNCS_NB 4
+
+#define AXP20X_MUX_GPIO_OUT 0
+#define AXP20X_MUX_GPIO_IN BIT(1)
+#define AXP20X_MUX_ADC BIT(2)
+
+#define AXP813_MUX_ADC (BIT(2) | BIT(0))
+
+struct axp20x_pctrl_desc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ /* Stores the pins supporting LDO function. Bit offset is pin number. */
+ u8 ldo_mask;
+ /* Stores the pins supporting ADC function. Bit offset is pin number. */
+ u8 adc_mask;
+ u8 gpio_status_offset;
+ u8 adc_mux;
+};
+
+struct axp20x_pinctrl_function {
+ const char *name;
+ unsigned int muxval;
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct axp20x_pctl {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctl_dev;
+ struct device *dev;
+ const struct axp20x_pctrl_desc *desc;
+ struct axp20x_pinctrl_function funcs[AXP20X_FUNCS_NB];
+};
+
+static const struct pinctrl_pin_desc axp209_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"),
+};
+
+static const struct pinctrl_pin_desc axp813_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+};
+
+static const struct axp20x_pctrl_desc axp20x_data = {
+ .pins = axp209_pins,
+ .npins = ARRAY_SIZE(axp209_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 4,
+ .adc_mux = AXP20X_MUX_ADC,
+};
+
+static const struct axp20x_pctrl_desc axp813_data = {
+ .pins = axp813_pins,
+ .npins = ARRAY_SIZE(axp813_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0),
+ .gpio_status_offset = 0,
+ .adc_mux = AXP813_MUX_ADC,
+};
+
+static int axp20x_gpio_get_reg(unsigned int offset)
+{
+ switch (offset) {
+ case 0:
+ return AXP20X_GPIO0_CTRL;
+ case 1:
+ return AXP20X_GPIO1_CTRL;
+ case 2:
+ return AXP20X_GPIO2_CTRL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(pctl->regmap, AXP20X_GPIO20_SS, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset + pctl->desc->gpio_status_offset));
+}
+
+static int axp20x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(pctl->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * This shouldn't really happen if the pin is in use already,
+ * or if it's not in use yet, it doesn't matter since we're
+ * going to change the value soon anyway. Default to output.
+ */
+ if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
+ return 0;
+
+ /*
+ * The GPIO directions are the three lowest values.
+ * 2 is input, 0 and 1 are output
+ */
+ return val & 2;
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ chip->set(chip, offset, value);
+
+ return 0;
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return;
+
+ regmap_update_bits(pctl->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
+ u8 config)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ config);
+}
+
+static int axp20x_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ARRAY_SIZE(pctl->funcs);
+}
+
+static const char *axp20x_pmx_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->funcs[selector].name;
+}
+
+static int axp20x_pmx_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->funcs[selector].groups;
+ *num_groups = pctl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+static int axp20x_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int mask;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ if (function <= AXP20X_FUNC_GPIO_IN)
+ return axp20x_pmx_set(pctldev, group,
+ pctl->funcs[function].muxval);
+
+ if (function == AXP20X_FUNC_LDO)
+ mask = pctl->desc->ldo_mask;
+ else
+ mask = pctl->desc->adc_mask;
+
+ if (!(BIT(group) & mask))
+ return -EINVAL;
+
+ /*
+ * We let the regulator framework handle the LDO muxing as muxing bits
+ * are basically also regulators on/off bits. It's better not to enforce
+ * any state of the regulator when selecting LDO mux so that we don't
+ * interfere with the regulator driver.
+ */
+ if (function == AXP20X_FUNC_LDO)
+ return 0;
+
+ return axp20x_pmx_set(pctldev, group, pctl->funcs[function].muxval);
+}
+
+static int axp20x_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (input)
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval);
+
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval);
+}
+
+static const struct pinmux_ops axp20x_pmx_ops = {
+ .get_functions_count = axp20x_pmx_func_cnt,
+ .get_function_name = axp20x_pmx_func_name,
+ .get_function_groups = axp20x_pmx_func_groups,
+ .set_mux = axp20x_pmx_set_mux,
+ .gpio_set_direction = axp20x_pmx_gpio_set_direction,
+ .strict = true,
+};
+
+static int axp20x_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->npins;
+}
+
+static int axp20x_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned int *)&pctl->desc->pins[selector];
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const char *axp20x_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->pins[selector].name;
+}
+
+static const struct pinctrl_ops axp20x_pctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = axp20x_groups_cnt,
+ .get_group_name = axp20x_group_name,
+ .get_group_pins = axp20x_group_pins,
+};
+
+static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ unsigned int mask_len,
+ struct axp20x_pinctrl_function *func,
+ const struct pinctrl_pin_desc *pins)
+{
+ unsigned long int mask_cpy = mask;
+ const char **group;
+ unsigned int ngroups = hweight8(mask);
+ int bit;
+
+ func->ngroups = ngroups;
+ if (func->ngroups > 0) {
+ func->groups = devm_kzalloc(dev, ngroups * sizeof(const char *),
+ GFP_KERNEL);
+ group = func->groups;
+ for_each_set_bit(bit, &mask_cpy, mask_len) {
+ *group = pins[bit].name;
+ group++;
+ }
+ }
+}
+
+static void axp20x_build_funcs_groups(struct platform_device *pdev)
+{
+ struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
+ int i, pin, npins = pctl->desc->npins;
+
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].name = "gpio_in";
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval = AXP20X_MUX_GPIO_IN;
+ pctl->funcs[AXP20X_FUNC_LDO].name = "ldo";
+ /*
+ * Muxval for LDO is useless as we won't use it.
+ * See comment in axp20x_pmx_set_mux.
+ */
+ pctl->funcs[AXP20X_FUNC_ADC].name = "adc";
+ pctl->funcs[AXP20X_FUNC_ADC].muxval = pctl->desc->adc_mux;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ for (i = 0; i <= AXP20X_FUNC_GPIO_IN; i++) {
+ pctl->funcs[i].ngroups = npins;
+ pctl->funcs[i].groups = devm_kzalloc(&pdev->dev,
+ npins * sizeof(char *),
+ GFP_KERNEL);
+ for (pin = 0; pin < npins; pin++)
+ pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
+ }
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_LDO],
+ pctl->desc->pins);
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_ADC],
+ pctl->desc->pins);
+}
+
+static const struct of_device_id axp20x_pctl_match[] = {
+ { .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_pctl_match);
+
+static int axp20x_pctl_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp20x_pctl *pctl;
+ struct device *dev = &pdev->dev;
+ struct pinctrl_desc *pctrl_desc;
+ int ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->chip.base = -1;
+ pctl->chip.can_sleep = true;
+ pctl->chip.request = gpiochip_generic_request;
+ pctl->chip.free = gpiochip_generic_free;
+ pctl->chip.parent = &pdev->dev;
+ pctl->chip.label = dev_name(&pdev->dev);
+ pctl->chip.owner = THIS_MODULE;
+ pctl->chip.get = axp20x_gpio_get;
+ pctl->chip.get_direction = axp20x_gpio_get_direction;
+ pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.direction_input = axp20x_gpio_input;
+ pctl->chip.direction_output = axp20x_gpio_output;
+ pctl->chip.ngpio = pctl->desc->npins;
+
+ pctl->desc = (struct axp20x_pctrl_desc *)of_device_get_match_data(dev);
+ pctl->regmap = axp20x->regmap;
+ pctl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pctl);
+
+ axp20x_build_funcs_groups(pdev);
+
+ pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ if (!pctrl_desc)
+ return -ENOMEM;
+
+ pctrl_desc->name = dev_name(&pdev->dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pins = pctl->desc->pins;
+ pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->pctlops = &axp20x_pctrl_ops;
+ pctrl_desc->pmxops = &axp20x_pmx_ops;
+
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
+ if (IS_ERR(pctl->pctl_dev)) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return PTR_ERR(pctl->pctl_dev);
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &pctl->chip, pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctl->chip, dev_name(&pdev->dev),
+ pctl->desc->pins->number,
+ pctl->desc->pins->number,
+ pctl->desc->npins);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add pin range\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "AXP209 pinctrl and GPIO driver loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver axp20x_pctl_driver = {
+ .probe = axp20x_pctl_probe,
+ .driver = {
+ .name = "axp20x-gpio",
+ .of_match_table = axp20x_pctl_match,
+ },
+};
+
+module_platform_driver(axp20x_pctl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e290bbda..e728a96cabfd 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
-config CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+config CROS_EC_CTL
+ tristate
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index a077b1f0211d..ff3b369911f0 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,10 +2,9 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o \
- cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
+ cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4cc66f405760..5473e602f7e0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -29,9 +29,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
@@ -191,11 +188,11 @@ error:
return ret;
}
-static unsigned int cros_ec_console_log_poll(struct file *file,
+static __poll_t cros_ec_console_log_poll(struct file *file,
poll_table *wait)
{
struct cros_ec_debugfs *debug_info = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
@@ -390,6 +387,7 @@ remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
+EXPORT_SYMBOL(cros_ec_debugfs_init);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
{
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
}
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644
index 1ff3a50aa1b8..000000000000
--- a/drivers/platform/chrome/cros_ec_debugfs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index fd2b047a2748..6ea79d495aa2 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -33,8 +33,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include "cros_ec_dev.h"
-
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
@@ -414,6 +412,7 @@ error:
return ret;
}
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
int lb_suspend(struct cros_ec_dev *ec)
{
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
}
+EXPORT_SYMBOL(lb_suspend);
int lb_resume(struct cros_ec_dev *ec)
{
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
}
+EXPORT_SYMBOL(lb_resume);
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
.attrs = __lb_cmds_attrs,
.is_visible = cros_ec_lightbar_attrs_are_visible,
};
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..d6eebe872187 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,8 +34,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "cros_ec_dev.h"
-
/* Accessor functions */
static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
};
+EXPORT_SYMBOL(cros_ec_attr_group);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 564a0d08c8bf..6d38e6b08334 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
.bin_attrs = cros_ec_vbc_bin_attrs,
.is_bin_visible = cros_ec_vbc_is_visible,
};
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 0578d34eec3f..999f1152655a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -536,10 +536,10 @@ static ssize_t goldfish_pipe_write(struct file *filp,
/* is_write */ 1);
}
-static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status;
poll_wait(filp, &pipe->wake_queue, wait);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 6bcb750e1865..4f9bc72f0584 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -679,18 +679,12 @@ static int bat_writeable_property(struct power_supply *psy,
/* ============== */
/* Driver Globals */
/* ============== */
-static DEVICE_ATTR(wake_up_pme,
- 0644, wake_up_pme_show, wake_up_pme_store);
-static DEVICE_ATTR(wake_up_modem,
- 0644, wake_up_modem_show, wake_up_modem_store);
-static DEVICE_ATTR(wake_up_lan,
- 0644, wake_up_lan_show, wake_up_lan_store);
-static DEVICE_ATTR(wake_up_wlan,
- 0644, wake_up_wlan_show, wake_up_wlan_store);
-static DEVICE_ATTR(wake_up_key,
- 0644, wake_up_key_show, wake_up_key_store);
-static DEVICE_ATTR(wake_up_mouse,
- 0644, wake_up_mouse_show, wake_up_mouse_store);
+static DEVICE_ATTR_RW(wake_up_pme);
+static DEVICE_ATTR_RW(wake_up_modem);
+static DEVICE_ATTR_RW(wake_up_lan);
+static DEVICE_ATTR_RW(wake_up_wlan);
+static DEVICE_ATTR_RW(wake_up_key);
+static DEVICE_ATTR_RW(wake_up_mouse);
static DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL);
static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 935121814c97..a4fabf9d75f3 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4124,7 +4124,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
if (kfifo_len(&sonypi_compat.fifo))
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 6505c97705e1..1b491690ce07 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
if (key_code == KEY_RESERVED)
return;
if (pressed)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
class_unregister(&wmi_bus_class);
}
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
module_exit(acpi_wmi_exit);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e681140b85d8..077f334fdbae 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- return 0;
+ return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f054cdddfef8..803666ae3635 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
-#include <linux/kallsyms.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 75f63e38a8d1..ed2b109ae8fc 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
struct rockchip_iodomain {
struct device *dev;
struct regmap *grf;
- struct rockchip_iodomain_soc_data *soc_data;
+ const struct rockchip_iodomain_soc_data *soc_data;
struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
};
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
- .data = (void *)&soc_data_rk3188
+ .data = &soc_data_rk3188
},
{
.compatible = "rockchip,rk3228-io-voltage-domain",
- .data = (void *)&soc_data_rk3228
+ .data = &soc_data_rk3228
},
{
.compatible = "rockchip,rk3288-io-voltage-domain",
- .data = (void *)&soc_data_rk3288
+ .data = &soc_data_rk3288
},
{
.compatible = "rockchip,rk3328-io-voltage-domain",
- .data = (void *)&soc_data_rk3328
+ .data = &soc_data_rk3328
},
{
.compatible = "rockchip,rk3368-io-voltage-domain",
- .data = (void *)&soc_data_rk3368
+ .data = &soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3368_pmu
+ .data = &soc_data_rk3368_pmu
},
{
.compatible = "rockchip,rk3399-io-voltage-domain",
- .data = (void *)&soc_data_rk3399
+ .data = &soc_data_rk3399
},
{
.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3399_pmu
+ .data = &soc_data_rk3399_pmu
},
{
.compatible = "rockchip,rv1108-io-voltage-domain",
- .data = (void *)&soc_data_rv1108
+ .data = &soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rv1108_pmu
+ .data = &soc_data_rv1108_pmu
},
{ /* sentinel */ },
};
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
match = of_match_node(rockchip_iodomain_match, np);
- iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+ iod->soc_data = match->data;
parent = pdev->dev.parent;
if (parent && parent->of_node) {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ca0de1a78e85..a102e74ab24e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -98,15 +98,6 @@ config POWER_RESET_HISI
help
Reboot support for Hisilicon boards.
-config POWER_RESET_IMX
- bool "IMX6 power-off driver"
- depends on POWER_RESET && SOC_IMX6
- help
- This driver support power off external PMIC by PMIC_ON_REQ on i.mx6
- boards.If you want to use other pin to control external power,please
- say N here or disable in dts to make sure pm_power_off never be
- overwrote wrongly by this driver.
-
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index aeb65edb17b7..dcc92f5f7a37 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
-obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 31080c254124..0206cce328b3 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -68,7 +68,7 @@ struct shdwc_config {
};
struct shdwc {
- struct shdwc_config *cfg;
+ const struct shdwc_config *cfg;
void __iomem *at91_shdwc_base;
};
@@ -260,7 +260,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = (struct shdwc_config *)(match->data);
+ at91_shdwc->cfg = match->data;
sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sclk))
diff --git a/drivers/power/reset/imx-snvs-poweroff.c b/drivers/power/reset/imx-snvs-poweroff.c
deleted file mode 100644
index ad6ce5020ea7..000000000000
--- a/drivers/power/reset/imx-snvs-poweroff.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Power off driver for i.mx6
- * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved.
- *
- * based on msm-poweroff.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-static void __iomem *snvs_base;
-
-static void do_imx_poweroff(void)
-{
- u32 value = readl(snvs_base);
-
- /* set TOP and DP_EN bit */
- writel(value | 0x60, snvs_base);
-}
-
-static int imx_poweroff_probe(struct platform_device *pdev)
-{
- snvs_base = of_iomap(pdev->dev.of_node, 0);
- if (!snvs_base) {
- dev_err(&pdev->dev, "failed to get memory\n");
- return -ENODEV;
- }
-
- pm_power_off = do_imx_poweroff;
- return 0;
-}
-
-static const struct of_device_id of_imx_poweroff_match[] = {
- { .compatible = "fsl,sec-v4.0-poweroff", },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_imx_poweroff_match);
-
-static struct platform_driver imx_poweroff_driver = {
- .probe = imx_poweroff_probe,
- .driver = {
- .name = "imx-snvs-poweroff",
- .of_match_table = of_match_ptr(of_imx_poweroff_match),
- },
-};
-
-static int __init imx_poweroff_init(void)
-{
- return platform_driver_register(&imx_poweroff_driver);
-}
-device_initcall(imx_poweroff_init);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efdfe466..01b8c71697cb 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
+static int deassert_pshold(struct notifier_block *nb, unsigned long action,
void *data)
{
writel(0, msm_ps_hold);
@@ -33,14 +33,13 @@ static int do_msm_restart(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block restart_nb = {
- .notifier_call = do_msm_restart,
+ .notifier_call = deassert_pshold,
.priority = 128,
};
static void do_msm_poweroff(void)
{
- /* TODO: Add poweroff capability */
- do_msm_restart(&restart_nb, 0, NULL);
+ deassert_pshold(&restart_nb, 0, NULL);
}
static int msm_restart_probe(struct platform_device *pdev)
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index 7549c7f74a3c..c03e96e6a041 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
},
};
module_platform_driver(zx_reboot_driver);
+
+MODULE_DESCRIPTION("ZTE SoCs reset driver");
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 4ebbcce45c48..5a76c6d343de 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }
if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 38f4e87cf24d..0771f951b11f 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -159,7 +159,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
- struct axp_data *axp_data;
+ const struct axp_data *axp_data;
static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
NULL };
int i, irq, ret;
@@ -176,7 +176,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+ axp_data = of_device_get_match_data(&pdev->dev);
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
@@ -230,10 +230,10 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_ac_power_match[] = {
{
.compatible = "x-powers,axp202-ac-power-supply",
- .data = (void *)&axp20x_data,
+ .data = &axp20x_data,
}, {
.compatible = "x-powers,axp221-ac-power-supply",
- .data = (void *)&axp22x_data,
+ .data = &axp22x_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index d51ebd1da65e..9bfbde15b07d 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -1,6 +1,7 @@
/*
* axp288_charger.c - X-power AXP288 PMIC Charger driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -98,28 +99,10 @@
#define CV_4200MV 4200 /* 4200mV */
#define CV_4350MV 4350 /* 4350mV */
-#define CC_200MA 200 /* 200mA */
-#define CC_600MA 600 /* 600mA */
-#define CC_800MA 800 /* 800mA */
-#define CC_1000MA 1000 /* 1000mA */
-#define CC_1600MA 1600 /* 1600mA */
-#define CC_2000MA 2000 /* 2000mA */
-
-#define ILIM_100MA 100 /* 100mA */
-#define ILIM_500MA 500 /* 500mA */
-#define ILIM_900MA 900 /* 900mA */
-#define ILIM_1500MA 1500 /* 1500mA */
-#define ILIM_2000MA 2000 /* 2000mA */
-#define ILIM_2500MA 2500 /* 2500mA */
-#define ILIM_3000MA 3000 /* 3000mA */
-
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
#define USB_HOST_EXTCON_HID "INT3496"
#define USB_HOST_EXTCON_NAME "INT3496:00"
-static const unsigned int cable_ids[] =
- { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -139,7 +122,6 @@ struct axp288_chrg_info {
struct regmap_irq_chip_data *regmap_irqc;
int irq[CHRG_INTR_END];
struct power_supply *psy_usb;
- struct mutex lock;
/* OTG/Host mode */
struct {
@@ -152,18 +134,14 @@ struct axp288_chrg_info {
/* SDP/CDP/DCP USB charging cable notifications */
struct {
struct extcon_dev *edev;
- bool connected;
- enum power_supply_type chg_type;
- struct notifier_block nb[ARRAY_SIZE(cable_ids)];
+ struct notifier_block nb;
struct work_struct work;
} cable;
- int inlmt;
int cc;
int cv;
int max_cc;
int max_cv;
- int is_charger_enabled;
};
static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -220,51 +198,63 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
return ret;
}
-static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
- int inlmt)
+static int axp288_charger_get_vbus_inlmt(struct axp288_chrg_info *info)
{
- int ret;
unsigned int val;
- u8 reg_val;
+ int ret;
- /* Read in limit register */
ret = regmap_read(info->regmap, AXP20X_CHRG_BAK_CTRL, &val);
if (ret < 0)
- goto set_inlmt_fail;
-
- if (inlmt <= ILIM_100MA) {
- reg_val = CHRG_VBUS_ILIM_100MA;
- inlmt = ILIM_100MA;
- } else if (inlmt <= ILIM_500MA) {
- reg_val = CHRG_VBUS_ILIM_500MA;
- inlmt = ILIM_500MA;
- } else if (inlmt <= ILIM_900MA) {
- reg_val = CHRG_VBUS_ILIM_900MA;
- inlmt = ILIM_900MA;
- } else if (inlmt <= ILIM_1500MA) {
- reg_val = CHRG_VBUS_ILIM_1500MA;
- inlmt = ILIM_1500MA;
- } else if (inlmt <= ILIM_2000MA) {
- reg_val = CHRG_VBUS_ILIM_2000MA;
- inlmt = ILIM_2000MA;
- } else if (inlmt <= ILIM_2500MA) {
- reg_val = CHRG_VBUS_ILIM_2500MA;
- inlmt = ILIM_2500MA;
- } else {
- reg_val = CHRG_VBUS_ILIM_3000MA;
- inlmt = ILIM_3000MA;
+ return ret;
+
+ val >>= CHRG_VBUS_ILIM_BIT_POS;
+ switch (val) {
+ case CHRG_VBUS_ILIM_100MA:
+ return 100000;
+ case CHRG_VBUS_ILIM_500MA:
+ return 500000;
+ case CHRG_VBUS_ILIM_900MA:
+ return 900000;
+ case CHRG_VBUS_ILIM_1500MA:
+ return 1500000;
+ case CHRG_VBUS_ILIM_2000MA:
+ return 2000000;
+ case CHRG_VBUS_ILIM_2500MA:
+ return 2500000;
+ case CHRG_VBUS_ILIM_3000MA:
+ return 3000000;
+ default:
+ dev_warn(&info->pdev->dev, "Unknown ilim reg val: %d\n", val);
+ return 0;
}
+}
- reg_val = (val & ~CHRG_VBUS_ILIM_MASK)
- | (reg_val << CHRG_VBUS_ILIM_BIT_POS);
- ret = regmap_write(info->regmap, AXP20X_CHRG_BAK_CTRL, reg_val);
- if (ret >= 0)
- info->inlmt = inlmt;
+static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
+ int inlmt)
+{
+ int ret;
+ u8 reg_val;
+
+ if (inlmt >= 3000000)
+ reg_val = CHRG_VBUS_ILIM_3000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2500000)
+ reg_val = CHRG_VBUS_ILIM_2500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2000000)
+ reg_val = CHRG_VBUS_ILIM_2000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 1500000)
+ reg_val = CHRG_VBUS_ILIM_1500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 900000)
+ reg_val = CHRG_VBUS_ILIM_900MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 500000)
+ reg_val = CHRG_VBUS_ILIM_500MA << CHRG_VBUS_ILIM_BIT_POS;
else
- dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
+ reg_val = CHRG_VBUS_ILIM_100MA << CHRG_VBUS_ILIM_BIT_POS;
+ ret = regmap_update_bits(info->regmap, AXP20X_CHRG_BAK_CTRL,
+ CHRG_VBUS_ILIM_MASK, reg_val);
+ if (ret < 0)
+ dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
-set_inlmt_fail:
return ret;
}
@@ -283,7 +273,6 @@ static int axp288_charger_vbus_path_select(struct axp288_chrg_info *info,
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 vbus path select %d\n", ret);
-
return ret;
}
@@ -292,9 +281,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
{
int ret;
- if ((int)enable == info->is_charger_enabled)
- return 0;
-
if (enable)
ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -303,8 +289,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
CHRG_CCCV_CHG_EN, 0);
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 enable charger %d\n", ret);
- else
- info->is_charger_enabled = enable;
return ret;
}
@@ -376,8 +360,6 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
int ret = 0;
int scaled_val;
- mutex_lock(&info->lock);
-
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
scaled_val = min(val->intval, info->max_cc);
@@ -393,11 +375,15 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
if (ret < 0)
dev_warn(&info->pdev->dev, "set charge voltage failed\n");
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_set_vbus_inlmt(info, val->intval);
+ if (ret < 0)
+ dev_warn(&info->pdev->dev, "set input current limit failed\n");
+ break;
default:
ret = -EINVAL;
}
- mutex_unlock(&info->lock);
return ret;
}
@@ -406,9 +392,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct axp288_chrg_info *info = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&info->lock);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -419,7 +403,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_present(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_ONLINE:
@@ -430,7 +414,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_online(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -448,17 +432,17 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
val->intval = info->max_cv * 1000;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
- val->intval = info->inlmt * 1000;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_get_vbus_inlmt(info);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
default:
- ret = -EINVAL;
- goto psy_get_prop_fail;
+ return -EINVAL;
}
-psy_get_prop_fail:
- mutex_unlock(&info->lock);
- return ret;
+ return 0;
}
static int axp288_charger_property_is_writeable(struct power_supply *psy,
@@ -469,6 +453,7 @@ static int axp288_charger_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = 1;
break;
default:
@@ -487,7 +472,7 @@ static enum power_supply_property axp288_usb_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
static const struct power_supply_desc axp288_charger_desc = {
@@ -565,99 +550,53 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
container_of(work, struct axp288_chrg_info, cable.work);
int ret, current_limit;
struct extcon_dev *edev = info->cable.edev;
- bool old_connected = info->cable.connected;
- enum power_supply_type old_chg_type = info->cable.chg_type;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading status (%d)\n", ret);
+ return;
+ }
+
+ /* Offline? Disable charging and bail */
+ if (!(val & PS_STAT_VBUS_VALID)) {
+ dev_dbg(&info->pdev->dev, "USB charger disconnected\n");
+ axp288_charger_enable_charger(info, false);
+ power_supply_changed(info->psy_usb);
+ return;
+ }
/* Determine cable/charger type */
if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
+ dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
+ current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ dev_dbg(&info->pdev->dev, "USB CDP charger is connected\n");
+ current_limit = 1500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
- dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ dev_dbg(&info->pdev->dev, "USB DCP charger is connected\n");
+ current_limit = 2000000;
} else {
- if (old_connected)
- dev_dbg(&info->pdev->dev, "USB charger disconnected");
- info->cable.connected = false;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- }
-
- /* Cable status changed */
- if (old_connected == info->cable.connected &&
- old_chg_type == info->cable.chg_type)
+ /* Charger type detection still in progress, bail. */
return;
-
- mutex_lock(&info->lock);
-
- if (info->cable.connected) {
- axp288_charger_enable_charger(info, false);
-
- switch (info->cable.chg_type) {
- case POWER_SUPPLY_TYPE_USB:
- current_limit = ILIM_500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
- current_limit = ILIM_1500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_DCP:
- current_limit = ILIM_2000MA;
- break;
- default:
- /* Unknown */
- current_limit = 0;
- break;
- }
-
- /* Set vbus current limit first, then enable charger */
- ret = axp288_charger_set_vbus_inlmt(info, current_limit);
- if (ret == 0)
- axp288_charger_enable_charger(info, true);
- else
- dev_err(&info->pdev->dev,
- "error setting current limit (%d)", ret);
- } else {
- axp288_charger_enable_charger(info, false);
}
- mutex_unlock(&info->lock);
+ /* Set vbus current limit first, then enable charger */
+ ret = axp288_charger_set_vbus_inlmt(info, current_limit);
+ if (ret == 0)
+ axp288_charger_enable_charger(info, true);
+ else
+ dev_err(&info->pdev->dev,
+ "error setting current limit (%d)\n", ret);
power_supply_changed(info->psy_usb);
}
-/*
- * We need 3 copies of this, because there is no way to find out for which
- * cable id we are being called from the passed in arguments; and we must
- * have a separate nb for each extcon_register_notifier call.
- */
-static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
- unsigned long event, void *param)
+static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[0]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[1]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+ container_of(nb, struct axp288_chrg_info, cable.nb);
schedule_work(&info->cable.work);
return NOTIFY_OK;
}
@@ -785,6 +724,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return 0;
}
+static void axp288_charger_cancel_work(void *data)
+{
+ struct axp288_chrg_info *info = data;
+
+ cancel_work_sync(&info->otg.work);
+ cancel_work_sync(&info->cable.work);
+}
+
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
@@ -799,8 +746,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->pdev = pdev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->cable.chg_type = -1;
- info->is_charger_enabled = -1;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
if (info->cable.edev == NULL) {
@@ -820,7 +765,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- mutex_init(&info->lock);
ret = charger_init_hw_regs(info);
if (ret)
@@ -836,19 +780,19 @@ static int axp288_charger_probe(struct platform_device *pdev)
return ret;
}
+ /* Cancel our work on cleanup, register this before the notifiers */
+ ret = devm_add_action(dev, axp288_charger_cancel_work, info);
+ if (ret)
+ return ret;
+
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
- info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
- info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
- info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
- for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
- ret = devm_extcon_register_notifier(dev, info->cable.edev,
- cable_ids[i], &info->cable.nb[i]);
- if (ret) {
- dev_err(dev, "failed to register extcon notifier for %u: %d\n",
- cable_ids[i], ret);
- return ret;
- }
+ info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
+ ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(dev, "failed to register cable extcon notifier\n");
+ return ret;
}
schedule_work(&info->cable.work);
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index a8dcabc32721..4cc6e038dfdd 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1,6 +1,7 @@
/*
* axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,6 +17,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -31,6 +33,12 @@
#include <linux/seq_file.h>
#include <asm/unaligned.h>
+#define PS_STAT_VBUS_TRIGGER (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD (1 << 3)
+#define PS_STAT_VBUS_VALID (1 << 4)
+#define PS_STAT_VBUS_PRESENT (1 << 5)
+
#define CHRG_STAT_BAT_SAFE_MODE (1 << 3)
#define CHRG_STAT_BAT_VALID (1 << 4)
#define CHRG_STAT_BAT_PRESENT (1 << 5)
@@ -100,11 +108,22 @@ enum {
WL1_IRQ,
};
+enum {
+ BAT_TEMP = 0,
+ PMIC_TEMP,
+ SYSTEM_TEMP,
+ BAT_CHRG_CURR,
+ BAT_D_CURR,
+ BAT_VOLT,
+ IIO_CHANNEL_NUM
+};
+
struct axp288_fg_info {
struct platform_device *pdev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[AXP288_FG_INTR_NUM];
+ struct iio_channel *iio_channel[IIO_CHANNEL_NUM];
struct power_supply *bat;
struct mutex lock;
int status;
@@ -199,33 +218,6 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
}
-static int pmic_read_adc_val(const char *name, int *raw_val,
- struct axp288_fg_info *info)
-{
- int ret, val = 0;
- struct iio_channel *indio_chan;
-
- indio_chan = iio_channel_get(NULL, name);
- if (IS_ERR_OR_NULL(indio_chan)) {
- ret = PTR_ERR(indio_chan);
- goto exit;
- }
- ret = iio_read_channel_raw(indio_chan, &val);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "IIO channel read error: %x, %x\n", ret, val);
- goto err_exit;
- }
-
- dev_dbg(&info->pdev->dev, "adc raw val=%x\n", val);
- *raw_val = val;
-
-err_exit:
- iio_channel_release(indio_chan);
-exit:
- return ret;
-}
-
#ifdef CONFIG_DEBUG_FS
static int fuel_gauge_debug_show(struct seq_file *s, void *data)
{
@@ -296,22 +288,22 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
AXP288_FG_TUNE5,
fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
- ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-batttemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-pmic-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-system-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-systtemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-battvolt : %d\n", raw_val);
@@ -351,8 +343,7 @@ static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
static void fuel_gauge_get_status(struct axp288_fg_info *info)
{
- int pwr_stat, ret;
- int charge, discharge;
+ int pwr_stat, fg_res;
pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
if (pwr_stat < 0) {
@@ -360,36 +351,32 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
"PWR STAT read failed:%d\n", pwr_stat);
return;
}
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC charge current read failed:%d\n", ret);
- return;
- }
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC discharge current read failed:%d\n", ret);
- return;
+
+ /* Report full if Vbus is valid and the reported capacity is 100% */
+ if (pwr_stat & PS_STAT_VBUS_VALID) {
+ fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+ if (fg_res < 0) {
+ dev_err(&info->pdev->dev,
+ "FG RES read failed: %d\n", fg_res);
+ return;
+ }
+ if (fg_res == (FG_REP_CAP_VALID | 100)) {
+ info->status = POWER_SUPPLY_STATUS_FULL;
+ return;
+ }
}
- if (charge > 0)
+ if (pwr_stat & PS_STAT_BAT_CHRG_DIR)
info->status = POWER_SUPPLY_STATUS_CHARGING;
- else if (discharge > 0)
+ else
info->status = POWER_SUPPLY_STATUS_DISCHARGING;
- else {
- if (pwr_stat & CHRG_STAT_BAT_PRESENT)
- info->status = POWER_SUPPLY_STATUS_FULL;
- else
- info->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
}
static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
{
int ret = 0, raw_val;
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret < 0)
goto vbatt_read_fail;
@@ -400,24 +387,19 @@ vbatt_read_fail:
static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
{
- int ret, value = 0;
- int charge, discharge;
+ int ret, discharge;
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0)
- goto current_read_fail;
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
+ /* First check discharge current, so that we do only 1 read on bat. */
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
if (ret < 0)
- goto current_read_fail;
+ return ret;
- if (charge > 0)
- value = charge;
- else if (discharge > 0)
- value = -1 * discharge;
+ if (discharge > 0) {
+ *cur = -1 * discharge;
+ return 0;
+ }
- *cur = value;
-current_read_fail:
- return ret;
+ return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
}
static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
@@ -698,12 +680,54 @@ intr_failed:
}
}
+/*
+ * Some devices have no battery (HDMI sticks) and the axp288 battery's
+ * detection reports one despite it not being there.
+ */
+static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ {
+ /* Intel Cherry Trail Compute Stick, Windows version */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ },
+ },
+ {
+ /* Intel Cherry Trail Compute Stick, version without an OS */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ },
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ },
+ {}
+};
+
static int axp288_fuel_gauge_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int i, ret = 0;
struct axp288_fg_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
+ static const char * const iio_chan_name[] = {
+ [BAT_TEMP] = "axp288-batt-temp",
+ [PMIC_TEMP] = "axp288-pmic-temp",
+ [SYSTEM_TEMP] = "axp288-system-temp",
+ [BAT_CHRG_CURR] = "axp288-chrg-curr",
+ [BAT_D_CURR] = "axp288-chrg-d-curr",
+ [BAT_VOLT] = "axp288-batt-volt",
+ };
+
+ if (dmi_check_system(axp288_fuel_gauge_blacklist))
+ return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -719,18 +743,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
mutex_init(&info->lock);
INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++) {
+ /*
+ * Note cannot use devm_iio_channel_get because x86 systems
+ * lack the device<->channel maps which iio_channel_get will
+ * try to use when passed a non NULL device pointer.
+ */
+ info->iio_channel[i] =
+ iio_channel_get(NULL, iio_chan_name[i]);
+ if (IS_ERR(info->iio_channel[i])) {
+ ret = PTR_ERR(info->iio_channel[i]);
+ dev_dbg(&pdev->dev, "error getting iiochan %s: %d\n",
+ iio_chan_name[i], ret);
+ /* Wait for axp288_adc to load */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+
+ goto out_free_iio_chan;
+ }
+ }
+
ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
if (!(ret & FG_DES_CAP1_VALID)) {
dev_err(&pdev->dev, "axp288 not configured by firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_iio_chan;
}
ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
case CHRG_CCCV_CV_4100MV:
info->max_volt = 4100;
@@ -751,7 +796,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
if (IS_ERR(info->bat)) {
ret = PTR_ERR(info->bat);
dev_err(&pdev->dev, "failed to register battery: %d\n", ret);
- return ret;
+ goto out_free_iio_chan;
}
fuel_gauge_create_debugfs(info);
@@ -759,6 +804,13 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
return 0;
+
+out_free_iio_chan:
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ if (!IS_ERR_OR_NULL(info->iio_channel[i]))
+ iio_channel_release(info->iio_channel[i]);
+
+ return ret;
}
static const struct platform_device_id axp288_fg_id_table[] = {
@@ -780,6 +832,9 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
if (info->irq[i] >= 0)
free_irq(info->irq[i], info);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ iio_channel_release(info->iio_channel[i]);
+
return 0;
}
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 35ff406aca48..b58df04d03b3 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -162,9 +161,6 @@ struct bq24190_dev_info {
struct device *dev;
struct power_supply *charger;
struct power_supply *battery;
- struct extcon_dev *extcon;
- struct notifier_block extcon_nb;
- struct delayed_work extcon_work;
struct delayed_work input_current_limit_work;
char model_name[I2C_NAME_SIZE];
bool initialized;
@@ -686,6 +682,16 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
int ret, limit = 100;
u8 v;
+ /*
+ * This prop. can be passed on device instantiation from platform code:
+ * struct property_entry pe[] =
+ * { PROPERTY_ENTRY_BOOL("disable-reset"), ... };
+ * struct i2c_board_info bi =
+ * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
+ * struct i2c_adapter ad = { ... };
+ * i2c_add_adapter(&ad);
+ * i2c_new_device(&ad, &bi);
+ */
if (device_property_read_bool(bdi->dev, "disable-reset"))
return 0;
@@ -1193,8 +1199,6 @@ static int bq24190_charger_set_property(struct power_supply *psy,
static int bq24190_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- int ret;
-
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
@@ -1202,13 +1206,10 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- ret = 1;
- break;
+ return 1;
default:
- ret = 0;
+ return 0;
}
-
- return ret;
}
static void bq24190_input_current_limit_work(struct work_struct *work)
@@ -1623,75 +1624,6 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void bq24190_extcon_work(struct work_struct *work)
-{
- struct bq24190_dev_info *bdi =
- container_of(work, struct bq24190_dev_info, extcon_work.work);
- int error, iinlim = 0;
- u8 v;
-
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
- dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- return;
- }
-
- if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_SDP) == 1)
- iinlim = 500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_CDP) == 1 ||
- extcon_get_state(bdi->extcon, EXTCON_CHG_USB_ACA) == 1)
- iinlim = 1500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_DCP) == 1)
- iinlim = 2000000;
-
- if (iinlim) {
- error = bq24190_set_field_val(bdi, BQ24190_REG_ISC,
- BQ24190_REG_ISC_IINLIM_MASK,
- BQ24190_REG_ISC_IINLIM_SHIFT,
- bq24190_isc_iinlim_values,
- ARRAY_SIZE(bq24190_isc_iinlim_values),
- iinlim);
- if (error < 0)
- dev_err(bdi->dev, "Can't set IINLIM: %d\n", error);
- }
-
- /* if no charger found and in USB host mode, set OTG 5V boost, else normal */
- if (!iinlim && extcon_get_state(bdi->extcon, EXTCON_USB_HOST) == 1)
- v = BQ24190_REG_POC_CHG_CONFIG_OTG;
- else
- v = BQ24190_REG_POC_CHG_CONFIG_CHARGE;
-
- error = bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT,
- v);
- if (error < 0)
- dev_err(bdi->dev, "Can't set CHG_CONFIG: %d\n", error);
-
- pm_runtime_mark_last_busy(bdi->dev);
- pm_runtime_put_autosuspend(bdi->dev);
-}
-
-static int bq24190_extcon_event(struct notifier_block *nb, unsigned long event,
- void *param)
-{
- struct bq24190_dev_info *bdi =
- container_of(nb, struct bq24190_dev_info, extcon_nb);
-
- /*
- * The Power-Good detection may take up to 220ms, sometimes
- * the external charger detection is quicker, and the bq24190 will
- * reset to iinlim based on its own charger detection (which is not
- * hooked up when using external charger detection) resulting in
- * a too low default 500mA iinlim. Delay applying the extcon value
- * for 300ms to avoid this.
- */
- queue_delayed_work(system_wq, &bdi->extcon_work, msecs_to_jiffies(300));
-
- return NOTIFY_OK;
-}
-
static int bq24190_hw_init(struct bq24190_dev_info *bdi)
{
u8 v;
@@ -1766,7 +1698,6 @@ static int bq24190_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct power_supply_config charger_cfg = {}, battery_cfg = {};
struct bq24190_dev_info *bdi;
- const char *name;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1796,25 +1727,6 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- /*
- * Devicetree platforms should get extcon via phandle (not yet supported).
- * On ACPI platforms, extcon clients may invoke us with:
- * struct property_entry pe[] =
- * { PROPERTY_ENTRY_STRING("extcon-name", client_name), ... };
- * struct i2c_board_info bi =
- * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
- * struct i2c_adapter ad = { ... };
- * i2c_add_adapter(&ad);
- * i2c_new_device(&ad, &bi);
- */
- if (device_property_read_string(dev, "extcon-name", &name) == 0) {
- bdi->extcon = extcon_get_extcon_dev(name);
- if (!bdi->extcon)
- return -EPROBE_DEFER;
-
- dev_info(bdi->dev, "using extcon device %s\n", name);
- }
-
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 600);
@@ -1882,20 +1794,6 @@ static int bq24190_probe(struct i2c_client *client,
if (ret < 0)
goto out_sysfs;
- if (bdi->extcon) {
- INIT_DELAYED_WORK(&bdi->extcon_work, bq24190_extcon_work);
- bdi->extcon_nb.notifier_call = bq24190_extcon_event;
- ret = devm_extcon_register_notifier_all(dev, bdi->extcon,
- &bdi->extcon_nb);
- if (ret) {
- dev_err(dev, "Can't register extcon\n");
- goto out_sysfs;
- }
-
- /* Sync initial cable state */
- queue_delayed_work(system_wq, &bdi->extcon_work, 0);
- }
-
enable_irq_wake(client->irq);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 51f0961ecf3e..d99981542a46 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -323,6 +323,30 @@ static u8
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
BQ27XXX_DM_REG_ROWS,
},
+ bq27521_regs[BQ27XXX_REG_MAX] = {
+ [BQ27XXX_REG_CTRL] = 0x02,
+ [BQ27XXX_REG_TEMP] = 0x0a,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x0c,
+ [BQ27XXX_REG_AI] = 0x0e,
+ [BQ27XXX_REG_FLAGS] = 0x08,
+ [BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_FCC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ },
bq27530_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -557,6 +581,15 @@ static enum power_supply_property bq27520g4_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27521_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
static enum power_supply_property bq27530_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -671,6 +704,7 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
#define bq27520g2_dm_regs 0
#define bq27520g3_dm_regs 0
#define bq27520g4_dm_regs 0
+#define bq27521_dm_regs 0
#define bq27530_dm_regs 0
#define bq27531_dm_regs 0
#define bq27541_dm_regs 0
@@ -717,8 +751,8 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
#endif
#define BQ27XXX_O_ZERO 0x00000001
-#define BQ27XXX_O_OTDC 0x00000002
-#define BQ27XXX_O_UTOT 0x00000004
+#define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */
+#define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */
#define BQ27XXX_O_CFGUP 0x00000008
#define BQ27XXX_O_RAM 0x00000010
@@ -751,6 +785,7 @@ static struct {
[BQ27520G2] = BQ27XXX_DATA(bq27520g2, 0 , BQ27XXX_O_OTDC),
[BQ27520G3] = BQ27XXX_DATA(bq27520g3, 0 , BQ27XXX_O_OTDC),
[BQ27520G4] = BQ27XXX_DATA(bq27520g4, 0 , BQ27XXX_O_OTDC),
+ [BQ27521] = BQ27XXX_DATA(bq27521, 0 , 0),
[BQ27530] = BQ27XXX_DATA(bq27530, 0 , BQ27XXX_O_UTOT),
[BQ27531] = BQ27XXX_DATA(bq27531, 0 , BQ27XXX_O_UTOT),
[BQ27541] = BQ27XXX_DATA(bq27541, 0 , BQ27XXX_O_OTDC),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 0b11ed472f33..6b25e5f2337e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -239,6 +239,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27520g2", BQ27520G2 },
{ "bq27520g3", BQ27520G3 },
{ "bq27520g4", BQ27520G4 },
+ { "bq27521", BQ27521 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27531 },
{ "bq27541", BQ27541 },
@@ -269,6 +270,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27520g2" },
{ .compatible = "ti,bq27520g3" },
{ .compatible = "ti,bq27520g4" },
+ { .compatible = "ti,bq27521" },
{ .compatible = "ti,bq27530" },
{ .compatible = "ti,bq27531" },
{ .compatible = "ti,bq27541" },
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 6502fa7c2106..1de4b4493824 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -578,7 +578,7 @@ static int check_charging_duration(struct charger_manager *cm)
} else if (is_ext_pwr_online(cm) && !cm->charger_enabled) {
duration = curr - cm->charging_end_time;
- if (duration > desc->charging_max_duration_ms &&
+ if (duration > desc->discharging_max_duration_ms &&
is_ext_pwr_online(cm)) {
dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index ee71a2b37b12..98ba07869c3b 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -586,8 +586,8 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_battery_irq_thread,
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 08e4fd9ee607..4cfa3f0cd689 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -60,6 +60,7 @@ enum ltc294x_id {
#define LTC294X_REG_CONTROL_PRESCALER_SET(x) \
((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK)
#define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0
+#define LTC294X_REG_CONTROL_ADC_DISABLE(x) ((x) & ~(BIT(7) | BIT(6)))
struct ltc294x_info {
struct i2c_client *client; /* I2C Client pointer */
@@ -523,6 +524,29 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
return 0;
}
+static void ltc294x_i2c_shutdown(struct i2c_client *client)
+{
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+ int ret;
+ u8 value;
+ u8 control;
+
+ /* The LTC2941 does not need any special handling */
+ if (info->id == LTC2941_ID)
+ return;
+
+ /* Read control register */
+ ret = ltc294x_read_regs(info->client, LTC294X_REG_CONTROL, &value, 1);
+ if (ret < 0)
+ return;
+
+ /* Disable continuous ADC conversion as this drains the battery */
+ control = LTC294X_REG_CONTROL_ADC_DISABLE(value);
+ if (control != value)
+ ltc294x_write_regs(info->client, LTC294X_REG_CONTROL,
+ &control, 1);
+}
+
#ifdef CONFIG_PM_SLEEP
static int ltc294x_suspend(struct device *dev)
@@ -589,6 +613,7 @@ static struct i2c_driver ltc294x_driver = {
},
.probe = ltc294x_i2c_probe,
.remove = ltc294x_i2c_remove,
+ .shutdown = ltc294x_i2c_shutdown,
.id_table = ltc294x_i2c_id,
};
module_i2c_driver(ltc294x_driver);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 5b556a13f517..35dde81b1c9b 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -123,6 +123,8 @@ static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
static int max17042_get_status(struct max17042_chip *chip, int *status)
{
int ret, charge_full, charge_now;
+ int avg_current;
+ u32 data;
ret = power_supply_am_i_supplied(chip->battery);
if (ret < 0) {
@@ -152,10 +154,31 @@ static int max17042_get_status(struct max17042_chip *chip, int *status)
if (ret < 0)
return ret;
- if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD)
+ if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD) {
*status = POWER_SUPPLY_STATUS_FULL;
- else
+ return 0;
+ }
+
+ /*
+ * Even though we are supplied, we may still be discharging if the
+ * supply is e.g. only delivering 5V 0.5A. Check current if available.
+ */
+ if (!chip->pdata->enable_current_sense) {
*status = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ }
+
+ ret = regmap_read(chip->regmap, MAX17042_AvgCurrent, &data);
+ if (ret < 0)
+ return ret;
+
+ avg_current = sign_extend32(data, 15);
+ avg_current *= 1562500 / chip->pdata->r_sns;
+
+ if (avg_current > 0)
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
return 0;
}
@@ -863,16 +886,13 @@ static void max17042_init_worker(struct work_struct *work)
#ifdef CONFIG_OF
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_of_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
- if (!np)
- return dev->platform_data;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -897,7 +917,8 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
-#else
+#endif
+
static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
/*
* Some firmwares do not set FullSOCThr, Enable End-of-Charge Detection
@@ -907,15 +928,12 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
};
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_default_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
- if (dev->platform_data)
- return dev->platform_data;
-
/*
* The MAX17047 gets used on x86 where we might not have pdata, assume
* the firmware will already have initialized the fuel-gauge and provide
@@ -948,7 +966,21 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
+
+static struct max17042_platform_data *
+max17042_get_pdata(struct max17042_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+#ifdef CONFIG_OF
+ if (dev->of_node)
+ return max17042_get_of_pdata(chip);
#endif
+ if (dev->platform_data)
+ return dev->platform_data;
+
+ return max17042_get_default_pdata(chip);
+}
static const struct regmap_config max17042_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index ccb4217b9638..cb6e8f66c7a2 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -183,7 +183,7 @@ static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
return ret;
/* chan goes from 1 ... 4 */
- reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ reg = BIT(SBSM_SMB_BAT_OFFSET + chan);
ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
if (ret)
dev_err(dev, "Failed to select channel %i\n", chan);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d1694f1def72..35636e1d8a3d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
+#include <linux/suspend.h>
#include <asm/iosf_mbi.h>
#include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ u64 last_power_limit;
};
static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
struct rapl_domain *rd;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl, ret;;
+ int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static enum cpuhp_state pcap_rapl_online;
+static void power_limit_state_save(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, ret, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT1,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT2,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT1,
+ rd->rpl[i].last_power_limit);
+ break;
+ case PL2_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT2,
+ rd->rpl[i].last_power_limit);
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_SUSPEND_PREPARE:
+ power_limit_state_save();
+ break;
+ case PM_POST_SUSPEND:
+ power_limit_state_restore();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+ .notifier_call = rapl_pm_callback,
+};
+
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
/* Don't bail out if PSys is not supported */
rapl_register_psys();
+
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret)
+ goto err_unreg_all;
+
return 0;
+err_unreg_all:
+ cpuhp_remove_state(pcap_rapl_online);
+
err_unreg:
rapl_unregister_powercap();
return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
static void __exit rapl_exit(void)
{
+ unregister_pm_notifier(&rapl_pm_notifier);
cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 5b10b50f8686..64b2b2501a79 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
- int result = 0;
+ int result;
result = seed_constraint_attributes();
if (result)
return result;
- result = class_register(&powercap_class);
-
- return result;
+ return class_register(&powercap_class);
}
device_initcall(powercap_init);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6eb0db37dd88..1d42385b1aa5 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -49,7 +49,7 @@ static DEFINE_IDR(pps_idr);
* Char device methods
*/
-static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
+static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
{
struct pps_device *pps = file->private_data;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 58a97d420572..a593b4cf47bf 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -280,7 +280,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return err;
}
-unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
+__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b86f1bfecd6f..c7c62b782cb9 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -90,7 +90,7 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode);
ssize_t ptp_read(struct posix_clock *pc,
uint flags, char __user *buf, size_t cnt);
-uint ptp_poll(struct posix_clock *pc,
+__poll_t ptp_poll(struct posix_clock *pc,
struct file *fp, poll_table *wait);
/*
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index ec4bc1515f0d..6092b3a5978e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2319,7 +2319,7 @@ static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
{
struct mport_cdev_priv *priv = filp->private_data;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index ca44e6977cf2..2d9ec378a8bc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -327,7 +327,7 @@ int cec_add_elem(u64 pfn)
} else {
/* We have reached max count for this page, soft-offline it. */
pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
+ memory_failure_queue(pfn, MF_SOFT_OFFLINE);
ca->pfns_poisoned++;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 96cd55f9e3c5..b27417ca188a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -744,6 +744,13 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_SC2731
+ tristate "Spreadtrum SC2731 power regulator driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators on the
+ SC2731 PMIC.
+
config REGULATOR_SKY81452
tristate "Skyworks Solutions SKY81452 voltage regulator"
depends on MFD_SKY81452
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 80ffc57a9ca3..19fea09ba10a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b64b7916507f..42681c10cbe4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -58,8 +58,6 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
-static struct class regulator_class;
-
/*
* struct regulator_map
*
@@ -112,11 +110,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static struct regulator_dev *dev_to_rdev(struct device *dev)
-{
- return container_of(dev, struct regulator_dev, dev);
-}
-
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -236,26 +229,35 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* return 0 if the state is valid */
+static int regulator_check_states(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE);
+}
+
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
+ struct regulator_voltage *voltage;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
- if (!regulator->min_uV && !regulator->max_uV)
+ if (!voltage->min_uV && !voltage->max_uV)
continue;
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+ if (*max_uV > voltage->max_uV)
+ *max_uV = voltage->max_uV;
+ if (*min_uV < voltage->min_uV)
+ *min_uV = voltage->min_uV;
}
if (*min_uV > *max_uV) {
@@ -324,6 +326,24 @@ static int regulator_mode_constrain(struct regulator_dev *rdev,
return -EINVAL;
}
+static inline struct regulator_state *
+regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
+{
+ if (rdev->constraints == NULL)
+ return NULL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return &rdev->constraints->state_standby;
+ case PM_SUSPEND_MEM:
+ return &rdev->constraints->state_mem;
+ case PM_SUSPEND_MAX:
+ return &rdev->constraints->state_disk;
+ default:
+ return NULL;
+ }
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,29 +751,32 @@ static int drms_uA_update(struct regulator_dev *rdev)
}
static int suspend_set_state(struct regulator_dev *rdev,
- struct regulator_state *rstate)
+ suspend_state_t state)
{
int ret = 0;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
/* If we have no suspend mode configration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
- if (!rstate->enabled && !rstate->disabled) {
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
if (rdev->desc->ops->set_suspend_voltage ||
rdev->desc->ops->set_suspend_mode)
rdev_warn(rdev, "No configuration\n");
return 0;
}
- if (rstate->enabled && rstate->disabled) {
- rdev_err(rdev, "invalid configuration\n");
- return -EINVAL;
- }
-
- if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
+ if (rstate->enabled == ENABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_enable)
ret = rdev->desc->ops->set_suspend_enable(rdev);
- else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
+ else if (rstate->enabled == DISABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_disable)
ret = rdev->desc->ops->set_suspend_disable(rdev);
else /* OK if set_suspend_enable or set_suspend_disable is NULL */
ret = 0;
@@ -778,28 +801,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
}
- return ret;
-}
-
-/* locks held by caller */
-static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
-{
- if (!rdev->constraints)
- return -EINVAL;
- switch (state) {
- case PM_SUSPEND_STANDBY:
- return suspend_set_state(rdev,
- &rdev->constraints->state_standby);
- case PM_SUSPEND_MEM:
- return suspend_set_state(rdev,
- &rdev->constraints->state_mem);
- case PM_SUSPEND_MAX:
- return suspend_set_state(rdev,
- &rdev->constraints->state_disk);
- default:
- return -EINVAL;
- }
+ return ret;
}
static void print_constraints(struct regulator_dev *rdev)
@@ -1068,7 +1071,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_prepare(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_state(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
return ret;
@@ -1356,9 +1359,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->min_uV);
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->max_uV);
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444,
regulator->debugfs, regulator,
&constraint_flags_fops);
@@ -1417,20 +1420,6 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
-{
- struct device *dev;
-
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
-
- return dev ? dev_to_rdev(dev) : NULL;
-}
-
static int regulator_match(struct device *dev, const void *data)
{
struct regulator_dev *r = dev_to_rdev(dev);
@@ -2468,10 +2457,9 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
-static int _regulator_list_voltage(struct regulator *regulator,
- unsigned selector, int lock)
+static int _regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector, int lock)
{
- struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
@@ -2487,7 +2475,8 @@ static int _regulator_list_voltage(struct regulator *regulator,
if (lock)
mutex_unlock(&rdev->mutex);
} else if (rdev->is_switch && rdev->supply) {
- ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ ret = _regulator_list_voltage(rdev->supply->rdev,
+ selector, lock);
} else {
return -EINVAL;
}
@@ -2563,7 +2552,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- return _regulator_list_voltage(regulator, selector, 1);
+ return _regulator_list_voltage(regulator->rdev, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2605,8 +2594,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
- *vsel_reg = rdev->desc->vsel_reg;
- *vsel_mask = rdev->desc->vsel_mask;
+ *vsel_reg = rdev->desc->vsel_reg;
+ *vsel_mask = rdev->desc->vsel_mask;
return 0;
}
@@ -2897,10 +2886,38 @@ out:
return ret;
}
+static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, suspend_state_t state)
+{
+ struct regulator_state *rstate;
+ int uV, sel;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (min_uV < rstate->min_uV)
+ min_uV = rstate->min_uV;
+ if (max_uV > rstate->max_uV)
+ max_uV = rstate->max_uV;
+
+ sel = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
+
+ uV = rdev->desc->ops->list_voltage(rdev, sel);
+ if (uV >= min_uV && uV <= max_uV)
+ rstate->uV = uV;
+
+ return 0;
+}
+
static int regulator_set_voltage_unlocked(struct regulator *regulator,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ suspend_state_t state)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[state];
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
@@ -2911,7 +2928,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* should be a noop (some cpufreq implementations use the same
* voltage for multiple frequencies, for example).
*/
- if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
@@ -2921,8 +2938,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
goto out;
}
}
@@ -2940,12 +2957,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out;
/* restore original values in case of error */
- old_min_uV = regulator->min_uV;
- old_max_uV = regulator->max_uV;
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ old_min_uV = voltage->min_uV;
+ old_max_uV = voltage->max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
if (ret < 0)
goto out2;
@@ -2963,7 +2980,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out2;
}
- best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
goto out2;
@@ -2982,7 +2999,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (supply_change_uV > 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
@@ -2990,13 +3007,17 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
}
}
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ if (state == PM_SUSPEND_ON)
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ else
+ ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
+ max_uV, state);
if (ret < 0)
goto out2;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret)
dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
ret);
@@ -3007,8 +3028,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
out2:
- regulator->min_uV = old_min_uV;
- regulator->max_uV = old_max_uV;
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
return ret;
}
@@ -3037,7 +3058,8 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
regulator_lock_supply(regulator->rdev);
- ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
+ PM_SUSPEND_ON);
regulator_unlock_supply(regulator->rdev);
@@ -3045,6 +3067,89 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
+ suspend_state_t state, bool en)
+{
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (!rstate->changeable)
+ return -EPERM;
+
+ rstate->enabled = en;
+
+ return 0;
+}
+
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return regulator_suspend_toggle(rdev, state, true);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_enable);
+
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator *regulator;
+ struct regulator_voltage *voltage;
+
+ /*
+ * if any consumer wants this regulator device keeping on in
+ * suspend states, don't set it as disabled.
+ */
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
+ if (voltage->min_uV || voltage->max_uV)
+ return 0;
+ }
+
+ return regulator_suspend_toggle(rdev, state, false);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_disable);
+
+static int _regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (rstate->min_uV == rstate->max_uV) {
+ rdev_err(rdev, "The suspend voltage can't be changed!\n");
+ return -EPERM;
+ }
+
+ return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state);
+}
+
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int ret = 0;
+
+ /* PM_SUSPEND_ON is handled by regulator_set_voltage() */
+ if (regulator_check_states(state) || state == PM_SUSPEND_ON)
+ return -EINVAL;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = _regulator_set_suspend_voltage(regulator, min_uV,
+ max_uV, state);
+
+ regulator_unlock_supply(regulator->rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
+
/**
* regulator_set_voltage_time - get raise/fall time
* @regulator: regulator source
@@ -3138,6 +3243,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
int regulator_sync_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
mutex_lock(&rdev->mutex);
@@ -3149,20 +3255,20 @@ int regulator_sync_voltage(struct regulator *regulator)
}
/* This is only going to work if we've had a voltage configured. */
- if (!regulator->min_uV && !regulator->max_uV) {
+ if (!voltage->min_uV && !voltage->max_uV) {
ret = -EINVAL;
goto out;
}
- min_uV = regulator->min_uV;
- max_uV = regulator->max_uV;
+ min_uV = voltage->min_uV;
+ max_uV = voltage->max_uV;
/* This should be a paranoia check... */
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0);
if (ret < 0)
goto out;
@@ -3918,12 +4024,6 @@ static void regulator_dev_release(struct device *dev)
kfree(rdev);
}
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
struct device *parent = rdev->dev.parent;
@@ -4174,81 +4274,86 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
-static int _regulator_suspend_prepare(struct device *dev, void *data)
+#ifdef CONFIG_SUSPEND
+static int _regulator_suspend_late(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const suspend_state_t *state = data;
+ suspend_state_t *state = data;
int ret;
mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, *state);
+ ret = suspend_set_state(rdev, *state);
mutex_unlock(&rdev->mutex);
return ret;
}
/**
- * regulator_suspend_prepare - prepare regulators for system wide suspend
+ * regulator_suspend_late - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
- * This will usually be called by machine suspend code prior to supending.
*/
-int regulator_suspend_prepare(suspend_state_t state)
+static int regulator_suspend_late(struct device *dev)
{
- /* ON is handled by regulator active state */
- if (state == PM_SUSPEND_ON)
- return -EINVAL;
+ suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_prepare);
+ _regulator_suspend_late);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
-
-static int _regulator_suspend_finish(struct device *dev, void *data)
+static int _regulator_resume_early(struct device *dev, void *data)
{
+ int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
- int ret;
+ suspend_state_t *state = data;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, *state);
+ if (rstate == NULL)
+ return -EINVAL;
mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- ret = _regulator_do_enable(rdev);
- if (ret)
- dev_err(dev,
- "Failed to resume regulator %d\n",
- ret);
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
- ret = _regulator_do_disable(rdev);
- if (ret)
- dev_err(dev, "Failed to suspend regulator %d\n", ret);
- }
-unlock:
+ if (rdev->desc->ops->resume_early &&
+ (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND))
+ ret = rdev->desc->ops->resume_early(rdev);
+
mutex_unlock(&rdev->mutex);
- /* Keep processing regulators in spite of any errors */
- return 0;
+ return ret;
}
-/**
- * regulator_suspend_finish - resume regulators from system wide suspend
- *
- * Turn on regulators that might be turned off by regulator_suspend_prepare
- * and that should be turned on according to the regulators properties.
- */
-int regulator_suspend_finish(void)
+static int regulator_resume_early(struct device *dev)
{
- return class_for_each_device(&regulator_class, NULL, NULL,
- _regulator_suspend_finish);
+ suspend_state_t state = pm_suspend_target_state;
+
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_resume_early);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+#else /* !CONFIG_SUSPEND */
+
+#define regulator_suspend_late NULL
+#define regulator_resume_early NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
+ .suspend_late = regulator_suspend_late,
+ .resume_early = regulator_resume_early,
+};
+#endif
+
+struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+#ifdef CONFIG_PM
+ .pm = &regulator_pm_ops,
+#endif
+};
/**
* regulator_has_full_constraints - the system has fully specified constraints
*
@@ -4424,8 +4529,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
seq_printf(s, "%37dmV %5dmV",
- consumer->min_uV / 1000,
- consumer->max_uV / 1000);
+ consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
+ consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
case REGULATOR_CURRENT:
break;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 66a8ea0c8386..abfd56e8c78a 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -16,10 +16,25 @@
#ifndef __REGULATOR_INTERNAL_H
#define __REGULATOR_INTERNAL_H
+#include <linux/suspend.h>
+
+#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+
+struct regulator_voltage {
+ int min_uV;
+ int max_uV;
+};
+
/*
* struct regulator
*
* One for each consumer device.
+ * @voltage - a voltage array for each state of runtime, i.e.:
+ * PM_SUSPEND_ON
+ * PM_SUSPEND_TO_IDLE
+ * PM_SUSPEND_STANDBY
+ * PM_SUSPEND_MEM
+ * PM_SUSPEND_MAX
*/
struct regulator {
struct device *dev;
@@ -27,14 +42,22 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
- int min_uV;
- int max_uV;
+ struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
};
+extern struct class regulator_class;
+
+static inline struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 14637a01ba2d..092ed6efb3ec 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -177,14 +177,30 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
- suspend_state->enabled = true;
+ suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
- suspend_state->disabled = true;
+ suspend_state->enabled = DISABLE_IN_SUSPEND;
+ else
+ suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
+
+ if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
+ &pval))
+ suspend_state->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
+ &pval))
+ suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ else /* otherwise use min_uV as default suspend voltage */
+ suspend_state->uV = suspend_state->min_uV;
+
+ if (of_property_read_bool(suspend_np,
+ "regulator-changeable-in-suspend"))
+ suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
@@ -376,3 +392,17 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
return init_data;
}
+
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 0241ada47d04..63c7a0c17777 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -486,24 +486,6 @@ static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
-static int spmi_regulator_common_is_enabled(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 reg;
-
- spmi_vreg_read(vreg, SPMI_COMMON_REG_ENABLE, &reg, 1);
-
- return (reg & SPMI_COMMON_ENABLE_MASK) == SPMI_COMMON_ENABLE;
-}
-
-static int spmi_regulator_common_enable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -513,7 +495,7 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
vreg->vs_enable_time = ktime_get();
}
- return spmi_regulator_common_enable(rdev);
+ return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
@@ -524,14 +506,6 @@ static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
-static int spmi_regulator_common_disable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
@@ -1062,9 +1036,9 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
}
static struct regulator_ops spmi_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1077,9 +1051,9 @@ static struct regulator_ops spmi_smps_ops = {
};
static struct regulator_ops spmi_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1094,9 +1068,9 @@ static struct regulator_ops spmi_ldo_ops = {
};
static struct regulator_ops spmi_ln_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1107,8 +1081,8 @@ static struct regulator_ops spmi_ln_ldo_ops = {
static struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
@@ -1117,9 +1091,9 @@ static struct regulator_ops spmi_vs_ops = {
};
static struct regulator_ops spmi_boost_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1128,9 +1102,9 @@ static struct regulator_ops spmi_boost_ops = {
};
static struct regulator_ops spmi_ftsmps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1143,9 +1117,9 @@ static struct regulator_ops spmi_ftsmps_ops = {
};
static struct regulator_ops spmi_ult_lo_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
@@ -1157,9 +1131,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
};
static struct regulator_ops spmi_ult_ho_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
@@ -1172,9 +1146,9 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
};
static struct regulator_ops spmi_ult_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1711,6 +1685,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
+ vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
+ vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
@@ -1723,6 +1700,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = vreg;
+ config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
new file mode 100644
index 000000000000..eb2bdf060b7b
--- /dev/null
+++ b/drivers/regulator/sc2731-regulator.c
@@ -0,0 +1,256 @@
+ //SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * SC2731 regulator lock register
+ */
+#define SC2731_PWR_WR_PROT 0xf0c
+#define SC2731_WR_UNLOCK_VALUE 0x6e7f
+
+/*
+ * SC2731 enable register
+ */
+#define SC2731_POWER_PD_SW 0xc28
+#define SC2731_LDO_CAMA0_PD 0xcfc
+#define SC2731_LDO_CAMA1_PD 0xd04
+#define SC2731_LDO_CAMMOT_PD 0xd0c
+#define SC2731_LDO_VLDO_PD 0xd6c
+#define SC2731_LDO_EMMCCORE_PD 0xd2c
+#define SC2731_LDO_SDCORE_PD 0xd74
+#define SC2731_LDO_SDIO_PD 0xd70
+#define SC2731_LDO_WIFIPA_PD 0xd4c
+#define SC2731_LDO_USB33_PD 0xd5c
+#define SC2731_LDO_CAMD0_PD 0xd7c
+#define SC2731_LDO_CAMD1_PD 0xd84
+#define SC2731_LDO_CON_PD 0xd8c
+#define SC2731_LDO_CAMIO_PD 0xd94
+#define SC2731_LDO_SRAM_PD 0xd78
+
+/*
+ * SC2731 enable mask
+ */
+#define SC2731_DCDC_CPU0_PD_MASK BIT(4)
+#define SC2731_DCDC_CPU1_PD_MASK BIT(3)
+#define SC2731_DCDC_RF_PD_MASK BIT(11)
+#define SC2731_LDO_CAMA0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMA1_PD_MASK BIT(0)
+#define SC2731_LDO_CAMMOT_PD_MASK BIT(0)
+#define SC2731_LDO_VLDO_PD_MASK BIT(0)
+#define SC2731_LDO_EMMCCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDIO_PD_MASK BIT(0)
+#define SC2731_LDO_WIFIPA_PD_MASK BIT(0)
+#define SC2731_LDO_USB33_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD1_PD_MASK BIT(0)
+#define SC2731_LDO_CON_PD_MASK BIT(0)
+#define SC2731_LDO_CAMIO_PD_MASK BIT(0)
+#define SC2731_LDO_SRAM_PD_MASK BIT(0)
+
+/*
+ * SC2731 vsel register
+ */
+#define SC2731_DCDC_CPU0_VOL 0xc54
+#define SC2731_DCDC_CPU1_VOL 0xc64
+#define SC2731_DCDC_RF_VOL 0xcb8
+#define SC2731_LDO_CAMA0_VOL 0xd00
+#define SC2731_LDO_CAMA1_VOL 0xd08
+#define SC2731_LDO_CAMMOT_VOL 0xd10
+#define SC2731_LDO_VLDO_VOL 0xd28
+#define SC2731_LDO_EMMCCORE_VOL 0xd30
+#define SC2731_LDO_SDCORE_VOL 0xd38
+#define SC2731_LDO_SDIO_VOL 0xd40
+#define SC2731_LDO_WIFIPA_VOL 0xd50
+#define SC2731_LDO_USB33_VOL 0xd60
+#define SC2731_LDO_CAMD0_VOL 0xd80
+#define SC2731_LDO_CAMD1_VOL 0xd88
+#define SC2731_LDO_CON_VOL 0xd90
+#define SC2731_LDO_CAMIO_VOL 0xd98
+#define SC2731_LDO_SRAM_VOL 0xdB0
+
+/*
+ * SC2731 vsel register mask
+ */
+#define SC2731_DCDC_CPU0_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_CPU1_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_RF_VOL_MASK GENMASK(8, 0)
+#define SC2731_LDO_CAMA0_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMA1_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMMOT_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_VLDO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_EMMCCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDIO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_WIFIPA_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_USB33_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMD0_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMD1_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CON_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMIO_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_SRAM_VOL_MASK GENMASK(6, 0)
+
+enum sc2731_regulator_id {
+ SC2731_BUCK_CPU0,
+ SC2731_BUCK_CPU1,
+ SC2731_BUCK_RF,
+ SC2731_LDO_CAMA0,
+ SC2731_LDO_CAMA1,
+ SC2731_LDO_CAMMOT,
+ SC2731_LDO_VLDO,
+ SC2731_LDO_EMMCCORE,
+ SC2731_LDO_SDCORE,
+ SC2731_LDO_SDIO,
+ SC2731_LDO_WIFIPA,
+ SC2731_LDO_USB33,
+ SC2731_LDO_CAMD0,
+ SC2731_LDO_CAMD1,
+ SC2731_LDO_CON,
+ SC2731_LDO_CAMIO,
+ SC2731_LDO_SRAM,
+};
+
+static const struct regulator_ops sc2731_regu_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define SC2731_REGU_LINEAR(_id, en_reg, en_mask, vreg, vmask, \
+ vstep, vmin, vmax) { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .ops = &sc2731_regu_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = SC2731_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = vmin, \
+ .n_voltages = ((vmax) - (vmin)) / (vstep) + 1, \
+ .uV_step = vstep, \
+ .enable_is_inverted = true, \
+ .enable_val = 0, \
+ .enable_reg = en_reg, \
+ .enable_mask = en_mask, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+}
+
+static struct regulator_desc regulators[] = {
+ SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
+ SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_CPU1, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU1_PD_MASK, SC2731_DCDC_CPU1_VOL,
+ SC2731_DCDC_CPU1_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_RF, SC2731_POWER_PD_SW, SC2731_DCDC_RF_PD_MASK,
+ SC2731_DCDC_RF_VOL, SC2731_DCDC_RF_VOL_MASK,
+ 3125, 600000, 2196875),
+ SC2731_REGU_LINEAR(LDO_CAMA0, SC2731_LDO_CAMA0_PD,
+ SC2731_LDO_CAMA0_PD_MASK, SC2731_LDO_CAMA0_VOL,
+ SC2731_LDO_CAMA0_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMA1, SC2731_LDO_CAMA1_PD,
+ SC2731_LDO_CAMA1_PD_MASK, SC2731_LDO_CAMA1_VOL,
+ SC2731_LDO_CAMA1_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMMOT, SC2731_LDO_CAMMOT_PD,
+ SC2731_LDO_CAMMOT_PD_MASK, SC2731_LDO_CAMMOT_VOL,
+ SC2731_LDO_CAMMOT_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_VLDO, SC2731_LDO_VLDO_PD,
+ SC2731_LDO_VLDO_PD_MASK, SC2731_LDO_VLDO_VOL,
+ SC2731_LDO_VLDO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_EMMCCORE, SC2731_LDO_EMMCCORE_PD,
+ SC2731_LDO_EMMCCORE_PD_MASK, SC2731_LDO_EMMCCORE_VOL,
+ SC2731_LDO_EMMCCORE_VOL_MASK, 10000, 1200000,
+ 3750000),
+ SC2731_REGU_LINEAR(LDO_SDCORE, SC2731_LDO_SDCORE_PD,
+ SC2731_LDO_SDCORE_PD_MASK, SC2731_LDO_SDCORE_VOL,
+ SC2731_LDO_SDCORE_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_SDIO, SC2731_LDO_SDIO_PD,
+ SC2731_LDO_SDIO_PD_MASK, SC2731_LDO_SDIO_VOL,
+ SC2731_LDO_SDIO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_WIFIPA, SC2731_LDO_WIFIPA_PD,
+ SC2731_LDO_WIFIPA_PD_MASK, SC2731_LDO_WIFIPA_VOL,
+ SC2731_LDO_WIFIPA_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_USB33, SC2731_LDO_USB33_PD,
+ SC2731_LDO_USB33_PD_MASK, SC2731_LDO_USB33_VOL,
+ SC2731_LDO_USB33_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMD0, SC2731_LDO_CAMD0_PD,
+ SC2731_LDO_CAMD0_PD_MASK, SC2731_LDO_CAMD0_VOL,
+ SC2731_LDO_CAMD0_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMD1, SC2731_LDO_CAMD1_PD,
+ SC2731_LDO_CAMD1_PD_MASK, SC2731_LDO_CAMD1_VOL,
+ SC2731_LDO_CAMD1_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CON, SC2731_LDO_CON_PD,
+ SC2731_LDO_CON_PD_MASK, SC2731_LDO_CON_VOL,
+ SC2731_LDO_CON_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMIO, SC2731_LDO_CAMIO_PD,
+ SC2731_LDO_CAMIO_PD_MASK, SC2731_LDO_CAMIO_VOL,
+ SC2731_LDO_CAMIO_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_SRAM, SC2731_LDO_SRAM_PD,
+ SC2731_LDO_SRAM_PD_MASK, SC2731_LDO_SRAM_VOL,
+ SC2731_LDO_SRAM_VOL_MASK, 6250, 1000000, 1793750),
+};
+
+static int sc2731_regulator_unlock(struct regmap *regmap)
+{
+ return regmap_write(regmap, SC2731_PWR_WR_PROT,
+ SC2731_WR_UNLOCK_VALUE);
+}
+
+static int sc2731_regulator_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to get regmap.\n");
+ return -ENODEV;
+ }
+
+ ret = sc2731_regulator_unlock(regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to release regulator lock\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sc2731_regulator_driver = {
+ .driver = {
+ .name = "sc27xx-regulator",
+ },
+ .probe = sc2731_regulator_probe,
+};
+
+module_platform_driver(sc2731_regulator_driver);
+
+MODULE_AUTHOR("Chen Junhui <erick.chen@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC2731 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index bc489958fed7..1827185beacc 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -28,9 +28,6 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65218.h>
-enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
- DCDC5, DCDC6, LDO1, LS3 };
-
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
@@ -329,6 +326,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65218_NUM_REGULATOR, GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b01774e9fac0..e540ca362d08 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -919,12 +919,12 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
-static unsigned int qcom_smd_poll(struct rpmsg_endpoint *ept,
+static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
struct qcom_smd_channel *channel = qsept->qsch;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &channel->fblockread_event, wait);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index e0996fce3963..e622fcda30fa 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -256,10 +256,10 @@ free_kbuf:
return ret < 0 ? ret : len;
}
-static unsigned int rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
+static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
{
struct rpmsg_eptdev *eptdev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!eptdev->ept)
return POLLERR;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index dffa3aab7178..5a081762afcc 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
*
* Returns mask representing the current state of the endpoint's send buffers
*/
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
{
if (WARN_ON(!ept))
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 0cf9c7e2ee83..685aa70e9cbe 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -71,7 +71,7 @@ struct rpmsg_endpoint_ops {
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
- unsigned int (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
+ __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 215eac68ae2d..5a7b30d0773b 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -194,7 +194,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d4e8dff673cc..a7c15f0085e2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1393,10 +1393,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"device gone, retry");
break;
- case -EIO:
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
- "I/O error, retry");
- break;
case -EINVAL:
/*
* device not valid so no I/O could be running
@@ -1412,10 +1408,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
/* fake rc to success */
rc = 0;
break;
- case -EBUSY:
- DBF_DEV_EVENT(DBF_ERR, device, "%s",
- "device busy, retry later");
- break;
default:
/* internal error 10 - unknown rc*/
snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
@@ -1489,10 +1481,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: device busy, retry later");
break;
- case -ETIMEDOUT:
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "start_IO: request timeout, retry later");
- break;
case -EACCES:
/* -EACCES indicates that the request used only a subset of the
* available paths and all these paths are gone. If the lpm of
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606e0df8..ee14d8e45c97 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a2edf2a7ace9..29397a9dba68 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -5231,7 +5231,7 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
char sysplex[9] = "";
- int rc, i, j;
+ int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
@@ -5251,10 +5251,7 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
/* PGID */
- seq_puts(m, "pgid ");
- for (j = 0; j < 11; j++)
- seq_printf(m, "%02x", entry->pgid[j]);
- seq_putc(m, '\n');
+ seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
/* FLAGS */
seq_printf(m, "status_flags %02x\n", entry->status_flags);
/* SYSPLEX NAME */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index a7917d473774..0c075d100252 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -661,9 +661,9 @@ static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
return effective_count;
}
-static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
struct eerbuffer *eerb;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba15a53..614b44e70a28 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
endif
+CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index bf4ab4efed73..956f662908a6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -429,7 +429,7 @@ out_copy:
return count;
}
-static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index edeb2597b0b8..17b0c67f3e8d 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -12,8 +12,8 @@
#include "sclp.h"
#include "sclp_rw.h"
-char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(data);
-int sclp_init_state __section(data) = sclp_init_state_uninitialized;
+char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
+int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
void sclp_early_wait_irq(void)
{
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 0f11dce6e224..9263a0fb3858 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -268,7 +268,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%01x\n", sch->st);
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR_RO(type);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -278,7 +278,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "css:t%01X\n", sch->st);
}
-static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
+static DEVICE_ATTR_RO(modalias);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
@@ -315,7 +315,7 @@ static ssize_t chpids_show(struct device *dev,
ret += sprintf(buf + ret, "\n");
return ret;
}
-static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
+static DEVICE_ATTR_RO(chpids);
static ssize_t pimpampom_show(struct device *dev,
struct device_attribute *attr,
@@ -327,7 +327,7 @@ static ssize_t pimpampom_show(struct device *dev,
return sprintf(buf, "%02x %02x %02x\n",
pmcw->pim, pmcw->pam, pmcw->pom);
}
-static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
+static DEVICE_ATTR_RO(pimpampom);
static struct attribute *io_subchannel_type_attrs[] = {
&dev_attr_chpids.attr,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 75a245f38e2e..f50ea035aa9b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -597,13 +597,13 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%02x\n", sch->vpm);
}
-static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
-static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
-static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
-static DEVICE_ATTR(online, 0644, online_show, online_store);
+static DEVICE_ATTR_RO(devtype);
+static DEVICE_ATTR_RO(cutype);
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_RW(online);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
-static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
+static DEVICE_ATTR_RO(vpm);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_logging.attr,
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 97a8cf578116..2c726df210f6 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -57,7 +57,7 @@ static ssize_t ap_functions_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
}
-static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
+static DEVICE_ATTR_RO(ap_functions);
static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a782a207ad31..c7e484f70654 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,9 +91,6 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
-config QETH_IPV6
- def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
config CCWGROUP
tristate
default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 92ae84a927fc..0ee8f33efb54 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
static void
lcs_get_reply(struct lcs_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- atomic_inc(&reply->refcnt);
+ refcount_inc(&reply->refcnt);
}
static void
lcs_put_reply(struct lcs_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- if (atomic_dec_and_test(&reply->refcnt)) {
+ if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
- }
-
}
static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
if (!reply)
return NULL;
- atomic_set(&reply->refcnt,1);
+ refcount_set(&reply->refcnt, 1);
reply->sequence_no = cmd->sequence_no;
reply->received = 0;
reply->rc = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index fbc8b90b1f85..bd52caa3b11b 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/refcount.h>
#include <asm/ccwdev.h>
#define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
struct lcs_reply {
struct list_head list;
__u16 sequence_no;
- atomic_t refcnt;
+ refcount_t refcnt;
/* Callback for completion notification. */
void (*callback)(struct lcs_card *, struct lcs_cmd *);
wait_queue_head_t wait_q;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index badf42acbf95..db42107bf2f5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
+#include <linux/refcount.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
__u8 ext_flags;
__u16 vlan_id;
__u16 frame_offset;
- __u8 dest_addr[16];
-} __attribute__ ((packed));
+ union {
+ /* TX: */
+ u8 ipv6_addr[16];
+ struct ipv4 {
+ u8 res[12];
+ u32 addr;
+ } ipv4;
+ /* RX: */
+ struct rx {
+ u8 res1[2];
+ u8 src_mac[6];
+ u8 res2[4];
+ u16 vlan_id;
+ u8 res3[2];
+ } rx;
+ } next_hop;
+};
struct qeth_hdr_layer2 {
__u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
int default_out_queue;
};
-#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
-#define QETH_TR_MAC_C 0x0300 /* canonical */
-
/**
* buffer stuff for read channel
*/
@@ -632,7 +642,7 @@ struct qeth_reply {
int rc;
void *param;
struct qeth_card *card;
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
- __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+ __be16 prot = veth->h_vlan_proto;
+
+ if (prot == htons(ETH_P_8021Q))
+ prot = veth->h_vlan_encapsulated_proto;
- if (be16_to_cpu(*p) == ETH_P_8021Q)
- p += 2;
- switch (be16_to_cpu(*p)) {
- case ETH_P_IPV6:
+ switch (prot) {
+ case htons(ETH_P_IPV6):
return 6;
- case ETH_P_IP:
+ case htons(ETH_P_IP):
return 4;
default:
return 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3614df68830f..6abd3bc285e4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -36,6 +36,7 @@
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
#include "qeth_core.h"
@@ -564,7 +565,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply) {
- atomic_set(&reply->refcnt, 1);
+ refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
reply->card = card;
}
@@ -573,14 +574,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
static void qeth_get_reply(struct qeth_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- atomic_inc(&reply->refcnt);
+ refcount_inc(&reply->refcnt);
}
static void qeth_put_reply(struct qeth_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- if (atomic_dec_and_test(&reply->refcnt))
+ if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
}
@@ -1717,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
(prcd[0x11] == _ascebc['M']));
}
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+ enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+ struct diag26c_vnic_resp *response = NULL;
+ struct diag26c_vnic_req *request = NULL;
+ struct ccw_dev_id id;
+ char userid[80];
+ int rc = 0;
+
+ QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+ cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+ if (rc)
+ goto out;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+ response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+ if (!request || !response) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ccw_device_get_id(CARD_RDEV(card), &id);
+ request->resp_buf_len = sizeof(*response);
+ request->resp_version = DIAG26C_VERSION6_VM65918;
+ request->req_format = DIAG26C_VNIC_INFO;
+ ASCEBC(userid, 8);
+ memcpy(&request->sys_name, userid, 8);
+ request->devno = id.devno;
+
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+ rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+ if (rc)
+ goto out;
+ QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+ if (request->resp_buf_len < sizeof(*response) ||
+ response->version != request->resp_version) {
+ rc = -EIO;
+ goto out;
+ }
+
+ if (response->protocol == VNIC_INFO_PROT_L2)
+ disc = QETH_DISCIPLINE_LAYER2;
+ else if (response->protocol == VNIC_INFO_PROT_L3)
+ disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+ kfree(response);
+ kfree(request);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+ return disc;
+}
+
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
+ enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSN) {
+ card->info.type == QETH_CARD_TYPE_OSN)
+ disc = QETH_DISCIPLINE_LAYER2;
+ else if (card->info.guestlan)
+ disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+ QETH_DISCIPLINE_LAYER3 :
+ qeth_vm_detect_layer(card);
+
+ switch (disc) {
+ case QETH_DISCIPLINE_LAYER2:
QETH_DBF_TEXT(SETUP, 3, "force l2");
- return QETH_DISCIPLINE_LAYER2;
- }
-
- /* virtual HiperSocket is L3 only: */
- if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+ break;
+ case QETH_DISCIPLINE_LAYER3:
QETH_DBF_TEXT(SETUP, 3, "force l3");
- return QETH_DISCIPLINE_LAYER3;
+ break;
+ default:
+ QETH_DBF_TEXT(SETUP, 3, "force no");
}
- QETH_DBF_TEXT(SETUP, 3, "force no");
- return QETH_DISCIPLINE_UNDETERMINED;
+ return disc;
}
static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
@@ -4218,9 +4281,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
- memcpy(card->dev->dev_addr,
- &cmd->data.setadapterparms.data.change_addr.addr,
- OSA_ADDR_LEN);
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4304,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
- cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
- memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
- card->dev->dev_addr, OSA_ADDR_LEN);
+ cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+ ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+ card->dev->dev_addr);
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
NULL);
return rc;
@@ -4789,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card)
request->op_code = DIAG26C_GET_MAC;
request->devno = id.devno;
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
if (rc)
goto out;
+ QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff6877f7b6f8..619f897b4bb0 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -10,6 +10,7 @@
#define __QETH_CORE_MPC_H__
#include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
-#define OSA_ADDR_LEN 6
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
} __attribute__ ((packed));
struct qeth_change_addr {
- __u32 cmd;
- __u32 addr_size;
- __u32 no_macs;
- __u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+ u32 cmd;
+ u32 addr_size;
+ u32 no_macs;
+ u8 addr[ETH_ALEN];
+};
struct qeth_snmp_cmd {
__u8 token[16];
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 09b1c4ef3dc9..f2130051ca11 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
struct qeth_mac {
- u8 mac_addr[OSA_ADDR_LEN];
- u8 is_uc:1;
+ u8 mac_addr[ETH_ALEN];
u8 disp_flag:2;
struct hlist_node hnode;
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5863ea170ff2..7f236440483f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
- memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ cmd->data.setdelmac.mac_length = ETH_ALEN;
+ ether_addr_copy(cmd->data.setdelmac.mac, mac);
return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
NULL, NULL));
}
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ ether_addr_copy(card->dev->dev_addr, mac);
dev_info(&card->gdev->dev,
"MAC address %pM successfully registered on device %s\n",
card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+ QETH_CARD_TEXT(card, 2, "L2Wmac");
+ rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
- mac, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+ mac, QETH_CARD_IFNAME(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
return rc;
}
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
{
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmac");
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+ QETH_CARD_TEXT(card, 2, "L2Rmac");
+ rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2,
- "Could not delete group MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
return rc;
}
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
- if (mac->is_uc) {
- return qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_SETVMAC);
- } else {
- return qeth_l2_send_setgroupmac(card, mac->mac_addr);
- }
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
- if (mac->is_uc) {
- return qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- } else {
- return qeth_l2_send_delgroupmac(card, mac->mac_addr);
- }
-}
-
static void qeth_l2_del_all_macs(struct qeth_card *card)
{
struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
- QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+ QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
* only if there is not in the hash table storage already
*
*/
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
- u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
{
u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
struct qeth_mac *mac;
hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
- if (is_uc == mac->is_uc &&
- !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+ if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
return;
}
}
mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
if (!mac)
return;
- memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
- mac->is_uc = is_uc;
+ ether_addr_copy(mac->mac_addr, ha->addr);
mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
spin_lock_bh(&card->mclock);
netdev_for_each_mc_addr(ha, dev)
- qeth_l2_add_mac(card, ha, 0);
-
+ qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
- qeth_l2_add_mac(card, ha, 1);
+ qeth_l2_add_mac(card, ha);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
- qeth_l2_remove_mac(card, mac);
+ switch (mac->disp_flag) {
+ case QETH_DISP_ADDR_DELETE:
+ qeth_l2_remove_mac(card, mac->mac_addr);
hash_del(&mac->hnode);
kfree(mac);
-
- } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
- rc = qeth_l2_write_mac(card, mac);
+ break;
+ case QETH_DISP_ADDR_ADD:
+ rc = qeth_l2_write_mac(card, mac->mac_addr);
if (rc) {
hash_del(&mac->hnode);
kfree(mac);
- } else
- mac->disp_flag = QETH_DISP_ADDR_DELETE;
- } else
+ break;
+ }
+ /* fall through */
+ default:
+ /* for next call to set_rx_mode(): */
mac->disp_flag = QETH_DISP_ADDR_DELETE;
+ }
}
spin_unlock_bh(&card->mclock);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e5833837b799..bdd45f4dcace 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
*/
int ref_counter;
enum qeth_prot_versions proto;
- unsigned char mac[OSA_ADDR_LEN];
+ unsigned char mac[ETH_ALEN];
union {
struct {
unsigned int addr;
@@ -69,19 +69,20 @@ struct qeth_ipato_entry {
extern const struct attribute_group *qeth_l3_attr_groups[];
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
-void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
- u8 *, int);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+ enum qeth_prot_versions proto, u8 *addr,
+ int mask_bits);
int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
- const u8 *);
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr);
void qeth_l3_update_ipato(struct qeth_card *card);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ef0961e18686..b0c888e86cd4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -18,15 +18,20 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
#include <net/ip6_fib.h>
#include <net/ip6_checksum.h>
#include <net/iucv/af_iucv.h>
@@ -37,99 +42,22 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
-static int qeth_l3_isxdigit(char *buf)
-{
- while (*buf) {
- if (!isxdigit(*buf++))
- return 0;
- }
- return 1;
-}
-
static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%pI4", addr);
}
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
- int count = 0, rc = 0;
- unsigned int in[4];
- char c;
-
- rc = sscanf(buf, "%u.%u.%u.%u%c",
- &in[0], &in[1], &in[2], &in[3], &c);
- if (rc != 4 && (rc != 5 || c != '\n'))
- return -EINVAL;
- for (count = 0; count < 4; count++) {
- if (in[count] > 255)
- return -EINVAL;
- addr[count] = in[count];
- }
- return 0;
-}
-
static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%pI6", addr);
}
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
- const char *end, *end_tmp, *start;
- __u16 *in;
- char num[5];
- int num2, cnt, out, found, save_cnt;
- unsigned short in_tmp[8] = {0, };
-
- cnt = out = found = save_cnt = num2 = 0;
- end = start = buf;
- in = (__u16 *) addr;
- memset(in, 0, 16);
- while (*end) {
- end = strchr(start, ':');
- if (end == NULL) {
- end = buf + strlen(buf);
- end_tmp = strchr(start, '\n');
- if (end_tmp != NULL)
- end = end_tmp;
- out = 1;
- }
- if ((end - start)) {
- memset(num, 0, 5);
- if ((end - start) > 4)
- return -EINVAL;
- memcpy(num, start, end - start);
- if (!qeth_l3_isxdigit(num))
- return -EINVAL;
- sscanf(start, "%x", &num2);
- if (found)
- in_tmp[save_cnt++] = num2;
- else
- in[cnt++] = num2;
- if (out)
- break;
- } else {
- if (found)
- return -EINVAL;
- found = 1;
- }
- start = ++end;
- }
- if (cnt + save_cnt > 8)
- return -EINVAL;
- cnt = 7;
- while (save_cnt)
- in[cnt--] = in_tmp[--save_cnt];
- return 0;
-}
-
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
char *buf)
{
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf);
}
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
- __u8 *addr)
-{
- if (proto == QETH_PROT_IPV4)
- return qeth_l3_string_to_ipaddr4(buf, addr);
- else if (proto == QETH_PROT_IPV6)
- return qeth_l3_string_to_ipaddr6(buf, addr);
- else
- return -EINVAL;
-}
-
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{
int i, j;
@@ -207,8 +124,8 @@ inline int
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
return addr1->proto == addr2->proto &&
- !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
- !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+ !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+ ether_addr_equal_64bits(addr1->mac, addr2->mac);
}
static struct qeth_ipaddr *
@@ -446,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+ ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
if (addr->proto == QETH_PROT_IPV6)
memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
sizeof(struct in6_addr));
@@ -582,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
int rc = 0;
QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
if (!qeth_is_supported(card, IPA_IPV6))
return 0;
@@ -599,7 +515,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
" on %s. Type set to 'no router'.\n", rc,
QETH_CARD_IFNAME(card));
}
-#endif
return rc;
}
@@ -673,10 +588,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
return rc;
}
-void qeth_l3_del_ipato_entry(struct qeth_card *card,
- enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+ enum qeth_prot_versions proto, u8 *addr,
+ int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
+ int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
@@ -691,10 +608,12 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
list_del(&ipatoe->entry);
qeth_l3_update_ipato(card);
kfree(ipatoe);
+ rc = 0;
}
}
spin_unlock_bh(&card->ip_lock);
+ return rc;
}
/*
@@ -704,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- int rc = 0;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -728,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
- qeth_l3_add_ip(card, ipaddr);
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
@@ -737,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
return rc;
}
-void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
- const u8 *addr)
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -755,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
}
ipaddr->type = QETH_IP_TYPE_VIPA;
} else
- return;
+ return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, ipaddr);
+ rc = qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
+ return rc;
}
/*
@@ -771,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- int rc = 0;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -796,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
- qeth_l3_add_ip(card, ipaddr);
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
@@ -805,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
return rc;
}
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
- const u8 *addr)
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -823,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
}
ipaddr->type = QETH_IP_TYPE_RXIP;
} else
- return;
+ return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, ipaddr);
+ rc = qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
+ return rc;
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -896,27 +819,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
return rc;
}
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
- if (cast_type == RTN_MULTICAST)
- return QETH_CAST_MULTICAST;
- if (cast_type == RTN_BROADCAST)
- return QETH_CAST_BROADCAST;
- return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
- u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
- if (cast_type == RTN_MULTICAST)
- return ct | QETH_CAST_MULTICAST;
- if (cast_type == RTN_ANYCAST)
- return ct | QETH_CAST_ANYCAST;
- if (cast_type == RTN_BROADCAST)
- return ct | QETH_CAST_BROADCAST;
- return ct | QETH_CAST_UNICAST;
-}
-
static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc = 0;
@@ -933,7 +835,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
-#ifdef CONFIG_QETH_IPV6
static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
{
@@ -949,7 +850,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
qeth_setassparms_cb, NULL);
return rc;
}
-#endif
static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
@@ -1045,7 +945,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
return rc;
}
-#ifdef CONFIG_QETH_IPV6
static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
int rc;
@@ -1091,12 +990,9 @@ out:
dev_info(&card->gdev->dev, "IPV6 enabled\n");
return 0;
}
-#endif
static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
- int rc = 0;
-
QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1104,10 +1000,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
return 0;
}
-#ifdef CONFIG_QETH_IPV6
- rc = qeth_l3_softsetup_ipv6(card);
-#endif
- return rc ;
+ return qeth_l3_softsetup_ipv6(card);
}
static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1179,8 +1072,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
- memcpy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.create_destroy_addr.unique_id);
else
eth_random_addr(card->dev->dev_addr);
@@ -1328,81 +1221,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
- ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- int i;
-
- hash_for_each(card->ip_mc_htable, i, addr, hnode)
- addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- struct hlist_node *tmp;
- int i;
- int rc;
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
- rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_LAN_OFFLINE))
- addr->ref_counter = 1;
- else {
- hash_del(&addr->hnode);
- kfree(addr);
- }
- }
- }
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- struct hlist_node *tmp;
- int i;
- int rc;
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
- rc = qeth_l3_deregister_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
- hash_del(&addr->hnode);
- kfree(addr);
- }
- }
- }
-
-}
-
-
static void
qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct qeth_ipaddr *tmp, *ipm;
- char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!tmp)
- return;
+ if (!tmp)
+ return;
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
- qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+ ip_eth_mc_map(im4->multiaddr, tmp->mac);
tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
- memcpy(tmp->mac, buf, sizeof(tmp->mac));
tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1412,7 +1246,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!ipm)
continue;
- memcpy(ipm->mac, buf, sizeof(tmp->mac));
+ ether_addr_copy(ipm->mac, tmp->mac);
ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
ipm->is_multicast = 1;
ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1466,25 +1300,21 @@ unlock:
rcu_read_unlock();
}
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+ struct inet6_dev *in6_dev)
{
struct qeth_ipaddr *ipm;
struct ifmcaddr6 *im6;
struct qeth_ipaddr *tmp;
- char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc6");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!tmp)
- return;
+ if (!tmp)
+ return;
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
- memcpy(tmp->mac, buf, sizeof(tmp->mac));
+ ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
tmp->is_multicast = 1;
@@ -1499,7 +1329,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
if (!ipm)
continue;
- memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+ ether_addr_copy(ipm->mac, tmp->mac);
memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
ipm->is_multicast = 1;
@@ -1560,7 +1390,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
rcu_read_unlock();
in6_dev_put(in6_dev);
}
-#endif /* CONFIG_QETH_IPV6 */
static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
unsigned short vid)
@@ -1600,9 +1429,8 @@ out:
}
static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
- unsigned short vid)
+ unsigned short vid)
{
-#ifdef CONFIG_QETH_IPV6
struct inet6_dev *in6_dev;
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
@@ -1637,7 +1465,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
kfree(addr);
out:
in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
}
static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1672,44 +1499,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
- qeth_l3_set_multicast_list(card->dev);
+ qeth_l3_set_rx_mode(dev);
return 0;
}
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
- __u16 prot;
- struct iphdr *ip_hdr;
- unsigned char tg_addr[MAX_ADDR_LEN];
-
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
- prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
- ETH_P_IP;
+ u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+ ETH_P_IP;
+ unsigned char tg_addr[ETH_ALEN];
+
+ skb_reset_network_header(skb);
switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
case QETH_CAST_MULTICAST:
- switch (prot) {
-#ifdef CONFIG_QETH_IPV6
- case ETH_P_IPV6:
- ndisc_mc_map((struct in6_addr *)
- skb->data + 24,
- tg_addr, card->dev, 0);
- break;
-#endif
- case ETH_P_IP:
- ip_hdr = (struct iphdr *)skb->data;
- ip_eth_mc_map(ip_hdr->daddr, tg_addr);
- break;
- default:
- memcpy(tg_addr, card->dev->broadcast,
- card->dev->addr_len);
- }
+ if (prot == ETH_P_IP)
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+ else
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
card->stats.multicast++;
skb->pkt_type = PACKET_MULTICAST;
break;
case QETH_CAST_BROADCAST:
- memcpy(tg_addr, card->dev->broadcast,
- card->dev->addr_len);
+ ether_addr_copy(tg_addr, card->dev->broadcast);
card->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
break;
@@ -1721,12 +1535,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
skb->pkt_type = PACKET_OTHERHOST;
else
skb->pkt_type = PACKET_HOST;
- memcpy(tg_addr, card->dev->dev_addr,
- card->dev->addr_len);
+ ether_addr_copy(tg_addr, card->dev->dev_addr);
}
if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, &hdr->hdr.l3.dest_addr[2],
+ tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
card->dev->addr_len);
else
card->dev->header_ops->create(skb, card->dev, prot,
@@ -1741,7 +1554,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
hdr->hdr.l3.vlan_id :
- *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ hdr->hdr.l3.next_hop.rx.vlan_id;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
@@ -1949,26 +1762,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
}
}
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
if (!card->options.sniffer) {
-
spin_lock_bh(&card->mclock);
- qeth_l3_mark_all_mc_to_be_deleted(card);
-
qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
qeth_l3_add_multicast_ipv6(card);
-#endif
- qeth_l3_delete_nonused_mc(card);
- qeth_l3_add_all_new_mc(card);
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ switch (addr->disp_flag) {
+ case QETH_DISP_ADDR_DELETE:
+ rc = qeth_l3_deregister_addr_entry(card, addr);
+ if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+ break;
+ case QETH_DISP_ADDR_ADD:
+ rc = qeth_l3_register_addr_entry(card, addr);
+ if (rc && rc != IPA_RC_LAN_OFFLINE) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ break;
+ }
+ addr->ref_counter = 1;
+ /* fall through */
+ default:
+ /* for next call to set_rx_mode(): */
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+ }
+ }
spin_unlock_bh(&card->mclock);
@@ -2237,12 +2070,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
rc = -EFAULT;
goto free_and_out;
}
-#ifdef CONFIG_QETH_IPV6
if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
/* fails in case of GuestLAN QDIO mode */
qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
}
-#endif
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
QETH_CARD_TEXT(card, 4, "qactf");
rc = -EFAULT;
@@ -2422,9 +2253,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
{
- int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2433,48 +2263,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
if (n) {
- cast_type = n->type;
+ int cast_type = n->type;
+
rcu_read_unlock();
neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
return cast_type;
- else
- return RTN_UNSPEC;
+ return RTN_UNSPEC;
}
rcu_read_unlock();
- /* try something else */
+ /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
- return (skb_network_header(skb)[24] == 0xff) ?
- RTN_MULTICAST : 0;
+ return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+ RTN_MULTICAST : RTN_UNSPEC;
else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
- return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
- RTN_MULTICAST : 0;
- /* ... */
- if (!memcmp(skb->data, skb->dev->broadcast, 6))
+ return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+ RTN_MULTICAST : RTN_UNSPEC;
+
+ /* ... and MAC address */
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
return RTN_BROADCAST;
- else {
- u16 hdr_mac;
-
- hdr_mac = *((u16 *)skb->data);
- /* tr multicast? */
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_HSTR:
- case QETH_LINK_TYPE_LANE_TR:
- if ((hdr_mac == QETH_TR_MAC_NC) ||
- (hdr_mac == QETH_TR_MAC_C))
- return RTN_MULTICAST;
- break;
- /* eth or so multicast? */
- default:
- if ((hdr_mac == QETH_ETH_MAC_V4) ||
- (hdr_mac == QETH_ETH_MAC_V6))
- return RTN_MULTICAST;
- }
- }
- return cast_type;
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+ return RTN_MULTICAST;
+
+ /* default to unicast */
+ return RTN_UNSPEC;
}
static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2494,17 +2310,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
daddr[0] = 0xfe;
daddr[1] = 0x80;
memcpy(&daddr[8], iucv_hdr->destUserID, 8);
- memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+ memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
}
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type)
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
- struct dst_entry *dst;
+ if (cast_type == RTN_MULTICAST)
+ return QETH_CAST_MULTICAST;
+ if (cast_type == RTN_ANYCAST)
+ return QETH_CAST_ANYCAST;
+ if (cast_type == RTN_BROADCAST)
+ return QETH_CAST_BROADCAST;
+ return QETH_CAST_UNICAST;
+}
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type,
+ unsigned int data_len)
+{
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.ext_flags = 0;
+ hdr->hdr.l3.length = data_len;
/*
* before we're going to overwrite this location with next hop ip.
@@ -2518,44 +2344,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
- hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+ /* OSA only: */
+ if (!ipv) {
+ hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+ skb->dev->broadcast))
+ hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+ else
+ hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+ QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+ return;
+ }
+ hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
rcu_read_lock();
- dst = skb_dst(skb);
if (ipv == 4) {
- struct rtable *rt = (struct rtable *) dst;
- __be32 *pkey = &ip_hdr(skb)->daddr;
-
- if (rt && rt->rt_gateway)
- pkey = &rt->rt_gateway;
+ struct rtable *rt = skb_rtable(skb);
- /* IPv4 */
- hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
- memset(hdr->hdr.l3.dest_addr, 0, 12);
- *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
- } else if (ipv == 6) {
- struct rt6_info *rt = (struct rt6_info *) dst;
- struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+ *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+ rt_nexthop(rt, ip_hdr(skb)->daddr) :
+ ip_hdr(skb)->daddr;
+ } else {
+ /* IPv6 */
+ const struct rt6_info *rt = skb_rt6_info(skb);
+ const struct in6_addr *next_hop;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- pkey = &rt->rt6i_gateway;
+ next_hop = &rt->rt6i_gateway;
+ else
+ next_hop = &ipv6_hdr(skb)->daddr;
+ memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
- /* IPv6 */
- hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
- if (card->info.type == QETH_CARD_TYPE_IQD)
- hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
- memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
- } else {
- if (!memcmp(skb->data + sizeof(struct qeth_hdr),
- skb->dev->broadcast, 6)) {
- /* broadcast? */
- hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
- QETH_HDR_PASSTHRU;
- } else {
- hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
- QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
- QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
- }
+ hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
}
@@ -2587,7 +2409,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
- hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
@@ -2655,7 +2476,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
- int cast_type = qeth_l3_get_cast_type(card, skb);
+ int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_qdio_out_q *queue =
card->qdio.out_qs[card->qdio.do_prio_queueing
|| (cast_type && card->info.is_multicast_different) ?
@@ -2748,21 +2569,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len - sizeof(struct qeth_hdr_tso));
qeth_tso_fill_header(card, hdr, new_skb);
hdr_elements++;
} else {
if (data_offset < 0) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type);
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len -
+ sizeof(struct qeth_hdr));
} else {
if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
else {
qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type);
- hdr->hdr.l3.length = new_skb->len - data_offset;
+ cast_type,
+ new_skb->len - data_offset);
}
}
@@ -2930,7 +2753,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
@@ -2947,7 +2770,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
@@ -3145,7 +2968,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
__qeth_l3_open(card->dev);
else
dev_open(card->dev);
- qeth_l3_set_multicast_list(card->dev);
+ qeth_l3_set_rx_mode(card->dev);
qeth_recover_features(card->dev);
rtnl_unlock();
}
@@ -3371,10 +3194,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
NULL,
};
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
static int qeth_l3_ip6_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -3419,7 +3238,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
qeth_l3_ip6_event,
NULL,
};
-#endif
static int qeth_l3_register_notifiers(void)
{
@@ -3429,35 +3247,25 @@ static int qeth_l3_register_notifiers(void)
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
-#ifdef CONFIG_QETH_IPV6
rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
if (rc) {
unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
return rc;
}
-#else
- pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
return 0;
}
static void qeth_l3_unregister_notifiers(void)
{
-
QETH_DBF_TEXT(SETUP, 5, "unregnot");
WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
}
static int __init qeth_l3_init(void)
{
- int rc = 0;
-
pr_info("register layer 3 discipline\n");
- rc = qeth_l3_register_notifiers();
- return rc;
+ return qeth_l3_register_notifiers();
}
static void __exit qeth_l3_exit(void)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 6ea2b528a64e..a645cfe66ddf 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -10,11 +10,23 @@
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <linux/hashtable.h>
+#include <linux/inet.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+static int qeth_l3_string_to_ipaddr(const char *buf,
+ enum qeth_prot_versions proto, u8 *addr)
+{
+ const char *end;
+
+ if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+ (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+ return -EINVAL;
+ return 0;
+}
+
static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
struct qeth_routing_info *route, char *buf)
{
@@ -262,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *addr;
char *tmp;
- int i;
+ int rc, i;
if (!card)
return -EINVAL;
@@ -331,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_add_ip(card, addr);
+ rc = qeth_l3_add_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
kfree(addr);
- return count;
+ return rc ? rc : count;
}
static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
@@ -573,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
- qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+ rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -693,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = {
.attrs = qeth_ipato_device_attrs,
};
-static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
- enum qeth_prot_versions proto)
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+ enum qeth_prot_versions proto,
+ enum qeth_ip_types type)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *ipaddr;
char addr_str[40];
int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
+ if (!card)
+ return -EINVAL;
+
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- if (ipaddr->proto != proto)
- continue;
- if (ipaddr->type != QETH_IP_TYPE_VIPA)
+ if (ipaddr->proto != proto || ipaddr->type != type)
continue;
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
@@ -727,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+ QETH_IP_TYPE_VIPA);
}
static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
@@ -784,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
if (!rc)
- qeth_l3_del_vipa(card, proto, addr);
+ rc = qeth_l3_del_vipa(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -804,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
qeth_l3_dev_vipa_del4_store);
static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+ QETH_IP_TYPE_VIPA);
}
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
@@ -856,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = {
.attrs = qeth_vipa_device_attrs,
};
-static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
- enum qeth_prot_versions proto)
-{
- struct qeth_ipaddr *ipaddr;
- char addr_str[40];
- int str_len = 0;
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i;
-
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- entry_len += 2; /* \n + terminator */
- spin_lock_bh(&card->ip_lock);
- hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- if (ipaddr->proto != proto)
- continue;
- if (ipaddr->type != QETH_IP_TYPE_RXIP)
- continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - str_len) <= entry_len)
- break;
- qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
- addr_str);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
- addr_str);
- }
- spin_unlock_bh(&card->ip_lock);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
-
- return str_len;
-}
-
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+ QETH_IP_TYPE_RXIP);
}
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
@@ -964,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
- qeth_l3_del_rxip(card, proto, addr);
+ rc = qeth_l3_del_rxip(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -984,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
qeth_l3_dev_rxip_del4_store);
static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+ QETH_IP_TYPE_RXIP);
}
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index 5ba684f73ab8..a785aa7660c3 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -70,5 +70,13 @@ config DISPLAY7SEG
another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
you should say N to this option.
+config ORACLE_DAX
+ tristate "Oracle Data Analytics Accelerator"
+ default m if SPARC64
+ help
+ Driver for Oracle Data Analytics Accelerator, which is
+ a coprocessor that performs database operations in hardware.
+ It is available on M7 and M8 based systems only.
+
endmenu
diff --git a/drivers/sbus/char/Makefile b/drivers/sbus/char/Makefile
index ae478144c551..8c48ed96683f 100644
--- a/drivers/sbus/char/Makefile
+++ b/drivers/sbus/char/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_SUN_OPENPROMIO) += openprom.o
obj-$(CONFIG_TADPOLE_TS102_UCTRL) += uctrl.o
obj-$(CONFIG_SUN_JSFLASH) += jsflash.o
obj-$(CONFIG_BBC_I2C) += bbc.o
+obj-$(CONFIG_ORACLE_DAX) += oradax.o
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
new file mode 100644
index 000000000000..03dc04739225
--- /dev/null
+++ b/drivers/sbus/char/oradax.c
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Oracle Data Analytics Accelerator (DAX)
+ *
+ * DAX is a coprocessor which resides on the SPARC M7 (DAX1) and M8
+ * (DAX2) processor chips, and has direct access to the CPU's L3
+ * caches as well as physical memory. It can perform several
+ * operations on data streams with various input and output formats.
+ * The driver provides a transport mechanism only and has limited
+ * knowledge of the various opcodes and data formats. A user space
+ * library provides high level services and translates these into low
+ * level commands which are then passed into the driver and
+ * subsequently the hypervisor and the coprocessor. The library is
+ * the recommended way for applications to use the coprocessor, and
+ * the driver interface is not intended for general use.
+ *
+ * See Documentation/sparc/oradax/oracle_dax.txt for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+#include <asm/oradax.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for Oracle Data Analytics Accelerator");
+
+#define DAX_DBG_FLG_BASIC 0x01
+#define DAX_DBG_FLG_STAT 0x02
+#define DAX_DBG_FLG_INFO 0x04
+#define DAX_DBG_FLG_ALL 0xff
+
+#define dax_err(fmt, ...) pr_err("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define dax_info(fmt, ...) pr_info("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define dax_dbg(fmt, ...) do { \
+ if (dax_debug & DAX_DBG_FLG_BASIC)\
+ dax_info(fmt, ##__VA_ARGS__); \
+ } while (0)
+#define dax_stat_dbg(fmt, ...) do { \
+ if (dax_debug & DAX_DBG_FLG_STAT) \
+ dax_info(fmt, ##__VA_ARGS__); \
+ } while (0)
+#define dax_info_dbg(fmt, ...) do { \
+ if (dax_debug & DAX_DBG_FLG_INFO) \
+ dax_info(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DAX1_MINOR 1
+#define DAX1_MAJOR 1
+#define DAX2_MINOR 0
+#define DAX2_MAJOR 2
+
+#define DAX1_STR "ORCL,sun4v-dax"
+#define DAX2_STR "ORCL,sun4v-dax2"
+
+#define DAX_CA_ELEMS (DAX_MMAP_LEN / sizeof(struct dax_cca))
+
+#define DAX_CCB_USEC 100
+#define DAX_CCB_RETRIES 10000
+
+/* stream types */
+enum {
+ OUT,
+ PRI,
+ SEC,
+ TBL,
+ NUM_STREAM_TYPES
+};
+
+/* completion status */
+#define CCA_STAT_NOT_COMPLETED 0
+#define CCA_STAT_COMPLETED 1
+#define CCA_STAT_FAILED 2
+#define CCA_STAT_KILLED 3
+#define CCA_STAT_NOT_RUN 4
+#define CCA_STAT_PIPE_OUT 5
+#define CCA_STAT_PIPE_SRC 6
+#define CCA_STAT_PIPE_DST 7
+
+/* completion err */
+#define CCA_ERR_SUCCESS 0x0 /* no error */
+#define CCA_ERR_OVERFLOW 0x1 /* buffer overflow */
+#define CCA_ERR_DECODE 0x2 /* CCB decode error */
+#define CCA_ERR_PAGE_OVERFLOW 0x3 /* page overflow */
+#define CCA_ERR_KILLED 0x7 /* command was killed */
+#define CCA_ERR_TIMEOUT 0x8 /* Timeout */
+#define CCA_ERR_ADI 0x9 /* ADI error */
+#define CCA_ERR_DATA_FMT 0xA /* data format error */
+#define CCA_ERR_OTHER_NO_RETRY 0xE /* Other error, do not retry */
+#define CCA_ERR_OTHER_RETRY 0xF /* Other error, retry */
+#define CCA_ERR_PARTIAL_SYMBOL 0x80 /* QP partial symbol warning */
+
+/* CCB address types */
+#define DAX_ADDR_TYPE_NONE 0
+#define DAX_ADDR_TYPE_VA_ALT 1 /* secondary context */
+#define DAX_ADDR_TYPE_RA 2 /* real address */
+#define DAX_ADDR_TYPE_VA 3 /* virtual address */
+
+/* dax_header_t opcode */
+#define DAX_OP_SYNC_NOP 0x0
+#define DAX_OP_EXTRACT 0x1
+#define DAX_OP_SCAN_VALUE 0x2
+#define DAX_OP_SCAN_RANGE 0x3
+#define DAX_OP_TRANSLATE 0x4
+#define DAX_OP_SELECT 0x5
+#define DAX_OP_INVERT 0x10 /* OR with translate, scan opcodes */
+
+struct dax_header {
+ u32 ccb_version:4; /* 31:28 CCB Version */
+ /* 27:24 Sync Flags */
+ u32 pipe:1; /* Pipeline */
+ u32 longccb:1; /* Longccb. Set for scan with lu2, lu3, lu4. */
+ u32 cond:1; /* Conditional */
+ u32 serial:1; /* Serial */
+ u32 opcode:8; /* 23:16 Opcode */
+ /* 15:0 Address Type. */
+ u32 reserved:3; /* 15:13 reserved */
+ u32 table_addr_type:2; /* 12:11 Huffman Table Address Type */
+ u32 out_addr_type:3; /* 10:8 Destination Address Type */
+ u32 sec_addr_type:3; /* 7:5 Secondary Source Address Type */
+ u32 pri_addr_type:3; /* 4:2 Primary Source Address Type */
+ u32 cca_addr_type:2; /* 1:0 Completion Address Type */
+};
+
+struct dax_control {
+ u32 pri_fmt:4; /* 31:28 Primary Input Format */
+ u32 pri_elem_size:5; /* 27:23 Primary Input Element Size(less1) */
+ u32 pri_offset:3; /* 22:20 Primary Input Starting Offset */
+ u32 sec_encoding:1; /* 19 Secondary Input Encoding */
+ /* (must be 0 for Select) */
+ u32 sec_offset:3; /* 18:16 Secondary Input Starting Offset */
+ u32 sec_elem_size:2; /* 15:14 Secondary Input Element Size */
+ /* (must be 0 for Select) */
+ u32 out_fmt:2; /* 13:12 Output Format */
+ u32 out_elem_size:2; /* 11:10 Output Element Size */
+ u32 misc:10; /* 9:0 Opcode specific info */
+};
+
+struct dax_data_access {
+ u64 flow_ctrl:2; /* 63:62 Flow Control Type */
+ u64 pipe_target:2; /* 61:60 Pipeline Target */
+ u64 out_buf_size:20; /* 59:40 Output Buffer Size */
+ /* (cachelines less 1) */
+ u64 unused1:8; /* 39:32 Reserved, Set to 0 */
+ u64 out_alloc:5; /* 31:27 Output Allocation */
+ u64 unused2:1; /* 26 Reserved */
+ u64 pri_len_fmt:2; /* 25:24 Input Length Format */
+ u64 pri_len:24; /* 23:0 Input Element/Byte/Bit Count */
+ /* (less 1) */
+};
+
+struct dax_ccb {
+ struct dax_header hdr; /* CCB Header */
+ struct dax_control ctrl;/* Control Word */
+ void *ca; /* Completion Address */
+ void *pri; /* Primary Input Address */
+ struct dax_data_access dac; /* Data Access Control */
+ void *sec; /* Secondary Input Address */
+ u64 dword5; /* depends on opcode */
+ void *out; /* Output Address */
+ void *tbl; /* Table Address or bitmap */
+};
+
+struct dax_cca {
+ u8 status; /* user may mwait on this address */
+ u8 err; /* user visible error notification */
+ u8 rsvd[2]; /* reserved */
+ u32 n_remaining; /* for QP partial symbol warning */
+ u32 output_sz; /* output in bytes */
+ u32 rsvd2; /* reserved */
+ u64 run_cycles; /* run time in OCND2 cycles */
+ u64 run_stats; /* nothing reported in version 1.0 */
+ u32 n_processed; /* number input elements */
+ u32 rsvd3[5]; /* reserved */
+ u64 retval; /* command return value */
+ u64 rsvd4[8]; /* reserved */
+};
+
+/* per thread CCB context */
+struct dax_ctx {
+ struct dax_ccb *ccb_buf;
+ u64 ccb_buf_ra; /* cached RA of ccb_buf */
+ struct dax_cca *ca_buf;
+ u64 ca_buf_ra; /* cached RA of ca_buf */
+ struct page *pages[DAX_CA_ELEMS][NUM_STREAM_TYPES];
+ /* array of locked pages */
+ struct task_struct *owner; /* thread that owns ctx */
+ struct task_struct *client; /* requesting thread */
+ union ccb_result result;
+ u32 ccb_count;
+ u32 fail_count;
+};
+
+/* driver public entry points */
+static int dax_open(struct inode *inode, struct file *file);
+static ssize_t dax_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos);
+static ssize_t dax_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos);
+static int dax_devmap(struct file *f, struct vm_area_struct *vma);
+static int dax_close(struct inode *i, struct file *f);
+
+static const struct file_operations dax_fops = {
+ .owner = THIS_MODULE,
+ .open = dax_open,
+ .read = dax_read,
+ .write = dax_write,
+ .mmap = dax_devmap,
+ .release = dax_close,
+};
+
+static int dax_ccb_exec(struct dax_ctx *ctx, const char __user *buf,
+ size_t count, loff_t *ppos);
+static int dax_ccb_info(u64 ca, struct ccb_info_result *info);
+static int dax_ccb_kill(u64 ca, u16 *kill_res);
+
+static struct cdev c_dev;
+static struct class *cl;
+static dev_t first;
+
+static int max_ccb_version;
+static int dax_debug;
+module_param(dax_debug, int, 0644);
+MODULE_PARM_DESC(dax_debug, "Debug flags");
+
+static int __init dax_attach(void)
+{
+ unsigned long dummy, hv_rv, major, minor, minor_requested, max_ccbs;
+ struct mdesc_handle *hp = mdesc_grab();
+ char *prop, *dax_name;
+ bool found = false;
+ int len, ret = 0;
+ u64 pn;
+
+ if (hp == NULL) {
+ dax_err("Unable to grab mdesc");
+ return -ENODEV;
+ }
+
+ mdesc_for_each_node_by_name(hp, pn, "virtual-device") {
+ prop = (char *)mdesc_get_property(hp, pn, "name", &len);
+ if (prop == NULL)
+ continue;
+ if (strncmp(prop, "dax", strlen("dax")))
+ continue;
+ dax_dbg("Found node 0x%llx = %s", pn, prop);
+
+ prop = (char *)mdesc_get_property(hp, pn, "compatible", &len);
+ if (prop == NULL)
+ continue;
+ dax_dbg("Found node 0x%llx = %s", pn, prop);
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ dax_err("No DAX device found");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (strncmp(prop, DAX2_STR, strlen(DAX2_STR)) == 0) {
+ dax_name = DAX_NAME "2";
+ major = DAX2_MAJOR;
+ minor_requested = DAX2_MINOR;
+ max_ccb_version = 1;
+ dax_dbg("MD indicates DAX2 coprocessor");
+ } else if (strncmp(prop, DAX1_STR, strlen(DAX1_STR)) == 0) {
+ dax_name = DAX_NAME "1";
+ major = DAX1_MAJOR;
+ minor_requested = DAX1_MINOR;
+ max_ccb_version = 0;
+ dax_dbg("MD indicates DAX1 coprocessor");
+ } else {
+ dax_err("Unknown dax type: %s", prop);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ minor = minor_requested;
+ dax_dbg("Registering DAX HV api with major %ld minor %ld", major,
+ minor);
+ if (sun4v_hvapi_register(HV_GRP_DAX, major, &minor)) {
+ dax_err("hvapi_register failed");
+ ret = -ENODEV;
+ goto done;
+ } else {
+ dax_dbg("Max minor supported by HV = %ld (major %ld)", minor,
+ major);
+ minor = min(minor, minor_requested);
+ dax_dbg("registered DAX major %ld minor %ld", major, minor);
+ }
+
+ /* submit a zero length ccb array to query coprocessor queue size */
+ hv_rv = sun4v_ccb_submit(0, 0, HV_CCB_QUERY_CMD, 0, &max_ccbs, &dummy);
+ if (hv_rv != 0) {
+ dax_err("get_hwqueue_size failed with status=%ld and max_ccbs=%ld",
+ hv_rv, max_ccbs);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (max_ccbs != DAX_MAX_CCBS) {
+ dax_err("HV reports unsupported max_ccbs=%ld", max_ccbs);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (alloc_chrdev_region(&first, 0, 1, DAX_NAME) < 0) {
+ dax_err("alloc_chrdev_region failed");
+ ret = -ENXIO;
+ goto done;
+ }
+
+ cl = class_create(THIS_MODULE, DAX_NAME);
+ if (IS_ERR(cl)) {
+ dax_err("class_create failed");
+ ret = PTR_ERR(cl);
+ goto class_error;
+ }
+
+ if (device_create(cl, NULL, first, NULL, dax_name) == NULL) {
+ dax_err("device_create failed");
+ ret = -ENXIO;
+ goto device_error;
+ }
+
+ cdev_init(&c_dev, &dax_fops);
+ if (cdev_add(&c_dev, first, 1) == -1) {
+ dax_err("cdev_add failed");
+ ret = -ENXIO;
+ goto cdev_error;
+ }
+
+ pr_info("Attached DAX module\n");
+ goto done;
+
+cdev_error:
+ device_destroy(cl, first);
+device_error:
+ class_destroy(cl);
+class_error:
+ unregister_chrdev_region(first, 1);
+done:
+ mdesc_release(hp);
+ return ret;
+}
+module_init(dax_attach);
+
+static void __exit dax_detach(void)
+{
+ pr_info("Cleaning up DAX module\n");
+ cdev_del(&c_dev);
+ device_destroy(cl, first);
+ class_destroy(cl);
+ unregister_chrdev_region(first, 1);
+}
+module_exit(dax_detach);
+
+/* map completion area */
+static int dax_devmap(struct file *f, struct vm_area_struct *vma)
+{
+ struct dax_ctx *ctx = (struct dax_ctx *)f->private_data;
+ size_t len = vma->vm_end - vma->vm_start;
+
+ dax_dbg("len=0x%lx, flags=0x%lx", len, vma->vm_flags);
+
+ if (ctx->owner != current) {
+ dax_dbg("devmap called from wrong thread");
+ return -EINVAL;
+ }
+
+ if (len != DAX_MMAP_LEN) {
+ dax_dbg("len(%lu) != DAX_MMAP_LEN(%d)", len, DAX_MMAP_LEN);
+ return -EINVAL;
+ }
+
+ /* completion area is mapped read-only for user */
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+
+ dax_dbg("mmapped completion area at uva 0x%lx", vma->vm_start);
+ return 0;
+}
+
+/* Unlock user pages. Called during dequeue or device close */
+static void dax_unlock_pages(struct dax_ctx *ctx, int ccb_index, int nelem)
+{
+ int i, j;
+
+ for (i = ccb_index; i < ccb_index + nelem; i++) {
+ for (j = 0; j < NUM_STREAM_TYPES; j++) {
+ struct page *p = ctx->pages[i][j];
+
+ if (p) {
+ dax_dbg("freeing page %p", p);
+ if (j == OUT)
+ set_page_dirty(p);
+ put_page(p);
+ ctx->pages[i][j] = NULL;
+ }
+ }
+ }
+}
+
+static int dax_lock_page(void *va, struct page **p)
+{
+ int ret;
+
+ dax_dbg("uva %p", va);
+
+ ret = get_user_pages_fast((unsigned long)va, 1, 1, p);
+ if (ret == 1) {
+ dax_dbg("locked page %p, for VA %p", *p, va);
+ return 0;
+ }
+
+ dax_dbg("get_user_pages failed, va=%p, ret=%d", va, ret);
+ return -1;
+}
+
+static int dax_lock_pages(struct dax_ctx *ctx, int idx,
+ int nelem, u64 *err_va)
+{
+ int i;
+
+ for (i = 0; i < nelem; i++) {
+ struct dax_ccb *ccbp = &ctx->ccb_buf[i];
+
+ /*
+ * For each address in the CCB whose type is virtual,
+ * lock the page and change the type to virtual alternate
+ * context. On error, return the offending address in
+ * err_va.
+ */
+ if (ccbp->hdr.out_addr_type == DAX_ADDR_TYPE_VA) {
+ dax_dbg("output");
+ if (dax_lock_page(ccbp->out,
+ &ctx->pages[i + idx][OUT]) != 0) {
+ *err_va = (u64)ccbp->out;
+ goto error;
+ }
+ ccbp->hdr.out_addr_type = DAX_ADDR_TYPE_VA_ALT;
+ }
+
+ if (ccbp->hdr.pri_addr_type == DAX_ADDR_TYPE_VA) {
+ dax_dbg("input");
+ if (dax_lock_page(ccbp->pri,
+ &ctx->pages[i + idx][PRI]) != 0) {
+ *err_va = (u64)ccbp->pri;
+ goto error;
+ }
+ ccbp->hdr.pri_addr_type = DAX_ADDR_TYPE_VA_ALT;
+ }
+
+ if (ccbp->hdr.sec_addr_type == DAX_ADDR_TYPE_VA) {
+ dax_dbg("sec input");
+ if (dax_lock_page(ccbp->sec,
+ &ctx->pages[i + idx][SEC]) != 0) {
+ *err_va = (u64)ccbp->sec;
+ goto error;
+ }
+ ccbp->hdr.sec_addr_type = DAX_ADDR_TYPE_VA_ALT;
+ }
+
+ if (ccbp->hdr.table_addr_type == DAX_ADDR_TYPE_VA) {
+ dax_dbg("tbl");
+ if (dax_lock_page(ccbp->tbl,
+ &ctx->pages[i + idx][TBL]) != 0) {
+ *err_va = (u64)ccbp->tbl;
+ goto error;
+ }
+ ccbp->hdr.table_addr_type = DAX_ADDR_TYPE_VA_ALT;
+ }
+
+ /* skip over 2nd 64 bytes of long CCB */
+ if (ccbp->hdr.longccb)
+ i++;
+ }
+ return DAX_SUBMIT_OK;
+
+error:
+ dax_unlock_pages(ctx, idx, nelem);
+ return DAX_SUBMIT_ERR_NOACCESS;
+}
+
+static void dax_ccb_wait(struct dax_ctx *ctx, int idx)
+{
+ int ret, nretries;
+ u16 kill_res;
+
+ dax_dbg("idx=%d", idx);
+
+ for (nretries = 0; nretries < DAX_CCB_RETRIES; nretries++) {
+ if (ctx->ca_buf[idx].status == CCA_STAT_NOT_COMPLETED)
+ udelay(DAX_CCB_USEC);
+ else
+ return;
+ }
+ dax_dbg("ctx (%p): CCB[%d] timed out, wait usec=%d, retries=%d. Killing ccb",
+ (void *)ctx, idx, DAX_CCB_USEC, DAX_CCB_RETRIES);
+
+ ret = dax_ccb_kill(ctx->ca_buf_ra + idx * sizeof(struct dax_cca),
+ &kill_res);
+ dax_dbg("Kill CCB[%d] %s", idx, ret ? "failed" : "succeeded");
+}
+
+static int dax_close(struct inode *ino, struct file *f)
+{
+ struct dax_ctx *ctx = (struct dax_ctx *)f->private_data;
+ int i;
+
+ f->private_data = NULL;
+
+ for (i = 0; i < DAX_CA_ELEMS; i++) {
+ if (ctx->ca_buf[i].status == CCA_STAT_NOT_COMPLETED) {
+ dax_dbg("CCB[%d] not completed", i);
+ dax_ccb_wait(ctx, i);
+ }
+ dax_unlock_pages(ctx, i, 1);
+ }
+
+ kfree(ctx->ccb_buf);
+ kfree(ctx->ca_buf);
+ dax_stat_dbg("CCBs: %d good, %d bad", ctx->ccb_count, ctx->fail_count);
+ kfree(ctx);
+
+ return 0;
+}
+
+static ssize_t dax_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dax_ctx *ctx = f->private_data;
+
+ if (ctx->client != current)
+ return -EUSERS;
+
+ ctx->client = NULL;
+
+ if (count != sizeof(union ccb_result))
+ return -EINVAL;
+ if (copy_to_user(buf, &ctx->result, sizeof(union ccb_result)))
+ return -EFAULT;
+ return count;
+}
+
+static ssize_t dax_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dax_ctx *ctx = f->private_data;
+ struct dax_command hdr;
+ unsigned long ca;
+ int i, idx, ret;
+
+ if (ctx->client != NULL)
+ return -EINVAL;
+
+ if (count == 0 || count > DAX_MAX_CCBS * sizeof(struct dax_ccb))
+ return -EINVAL;
+
+ if (count % sizeof(struct dax_ccb) == 0)
+ return dax_ccb_exec(ctx, buf, count, ppos); /* CCB EXEC */
+
+ if (count != sizeof(struct dax_command))
+ return -EINVAL;
+
+ /* immediate command */
+ if (ctx->owner != current)
+ return -EUSERS;
+
+ if (copy_from_user(&hdr, buf, sizeof(hdr)))
+ return -EFAULT;
+
+ ca = ctx->ca_buf_ra + hdr.ca_offset;
+
+ switch (hdr.command) {
+ case CCB_KILL:
+ if (hdr.ca_offset >= DAX_MMAP_LEN) {
+ dax_dbg("invalid ca_offset (%d) >= ca_buflen (%d)",
+ hdr.ca_offset, DAX_MMAP_LEN);
+ return -EINVAL;
+ }
+
+ ret = dax_ccb_kill(ca, &ctx->result.kill.action);
+ if (ret != 0) {
+ dax_dbg("dax_ccb_kill failed (ret=%d)", ret);
+ return ret;
+ }
+
+ dax_info_dbg("killed (ca_offset %d)", hdr.ca_offset);
+ idx = hdr.ca_offset / sizeof(struct dax_cca);
+ ctx->ca_buf[idx].status = CCA_STAT_KILLED;
+ ctx->ca_buf[idx].err = CCA_ERR_KILLED;
+ ctx->client = current;
+ return count;
+
+ case CCB_INFO:
+ if (hdr.ca_offset >= DAX_MMAP_LEN) {
+ dax_dbg("invalid ca_offset (%d) >= ca_buflen (%d)",
+ hdr.ca_offset, DAX_MMAP_LEN);
+ return -EINVAL;
+ }
+
+ ret = dax_ccb_info(ca, &ctx->result.info);
+ if (ret != 0) {
+ dax_dbg("dax_ccb_info failed (ret=%d)", ret);
+ return ret;
+ }
+
+ dax_info_dbg("info succeeded on ca_offset %d", hdr.ca_offset);
+ ctx->client = current;
+ return count;
+
+ case CCB_DEQUEUE:
+ for (i = 0; i < DAX_CA_ELEMS; i++) {
+ if (ctx->ca_buf[i].status !=
+ CCA_STAT_NOT_COMPLETED)
+ dax_unlock_pages(ctx, i, 1);
+ }
+ return count;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dax_open(struct inode *inode, struct file *f)
+{
+ struct dax_ctx *ctx = NULL;
+ int i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ goto done;
+
+ ctx->ccb_buf = kcalloc(DAX_MAX_CCBS, sizeof(struct dax_ccb),
+ GFP_KERNEL);
+ if (ctx->ccb_buf == NULL)
+ goto done;
+
+ ctx->ccb_buf_ra = virt_to_phys(ctx->ccb_buf);
+ dax_dbg("ctx->ccb_buf=0x%p, ccb_buf_ra=0x%llx",
+ (void *)ctx->ccb_buf, ctx->ccb_buf_ra);
+
+ /* allocate CCB completion area buffer */
+ ctx->ca_buf = kzalloc(DAX_MMAP_LEN, GFP_KERNEL);
+ if (ctx->ca_buf == NULL)
+ goto alloc_error;
+ for (i = 0; i < DAX_CA_ELEMS; i++)
+ ctx->ca_buf[i].status = CCA_STAT_COMPLETED;
+
+ ctx->ca_buf_ra = virt_to_phys(ctx->ca_buf);
+ dax_dbg("ctx=0x%p, ctx->ca_buf=0x%p, ca_buf_ra=0x%llx",
+ (void *)ctx, (void *)ctx->ca_buf, ctx->ca_buf_ra);
+
+ ctx->owner = current;
+ f->private_data = ctx;
+ return 0;
+
+alloc_error:
+ kfree(ctx->ccb_buf);
+done:
+ if (ctx != NULL)
+ kfree(ctx);
+ return -ENOMEM;
+}
+
+static char *dax_hv_errno(unsigned long hv_ret, int *ret)
+{
+ switch (hv_ret) {
+ case HV_EBADALIGN:
+ *ret = -EFAULT;
+ return "HV_EBADALIGN";
+ case HV_ENORADDR:
+ *ret = -EFAULT;
+ return "HV_ENORADDR";
+ case HV_EINVAL:
+ *ret = -EINVAL;
+ return "HV_EINVAL";
+ case HV_EWOULDBLOCK:
+ *ret = -EAGAIN;
+ return "HV_EWOULDBLOCK";
+ case HV_ENOACCESS:
+ *ret = -EPERM;
+ return "HV_ENOACCESS";
+ default:
+ break;
+ }
+
+ *ret = -EIO;
+ return "UNKNOWN";
+}
+
+static int dax_ccb_kill(u64 ca, u16 *kill_res)
+{
+ unsigned long hv_ret;
+ int count, ret = 0;
+ char *err_str;
+
+ for (count = 0; count < DAX_CCB_RETRIES; count++) {
+ dax_dbg("attempting kill on ca_ra 0x%llx", ca);
+ hv_ret = sun4v_ccb_kill(ca, kill_res);
+
+ if (hv_ret == HV_EOK) {
+ dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca,
+ *kill_res);
+ } else {
+ err_str = dax_hv_errno(hv_ret, &ret);
+ dax_dbg("%s (ca_ra 0x%llx)", err_str, ca);
+ }
+
+ if (ret != -EAGAIN)
+ return ret;
+ dax_info_dbg("ccb_kill count = %d", count);
+ udelay(DAX_CCB_USEC);
+ }
+
+ return -EAGAIN;
+}
+
+static int dax_ccb_info(u64 ca, struct ccb_info_result *info)
+{
+ unsigned long hv_ret;
+ char *err_str;
+ int ret = 0;
+
+ dax_dbg("attempting info on ca_ra 0x%llx", ca);
+ hv_ret = sun4v_ccb_info(ca, info);
+
+ if (hv_ret == HV_EOK) {
+ dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca, info->state);
+ if (info->state == DAX_CCB_ENQUEUED) {
+ dax_info_dbg("dax_unit %d, queue_num %d, queue_pos %d",
+ info->inst_num, info->q_num, info->q_pos);
+ }
+ } else {
+ err_str = dax_hv_errno(hv_ret, &ret);
+ dax_dbg("%s (ca_ra 0x%llx)", err_str, ca);
+ }
+
+ return ret;
+}
+
+static void dax_prt_ccbs(struct dax_ccb *ccb, int nelem)
+{
+ int i, j;
+ u64 *ccbp;
+
+ dax_dbg("ccb buffer:");
+ for (i = 0; i < nelem; i++) {
+ ccbp = (u64 *)&ccb[i];
+ dax_dbg(" %sccb[%d]", ccb[i].hdr.longccb ? "long " : "", i);
+ for (j = 0; j < 8; j++)
+ dax_dbg("\tccb[%d].dwords[%d]=0x%llx",
+ i, j, *(ccbp + j));
+ }
+}
+
+/*
+ * Validates user CCB content. Also sets completion address and address types
+ * for all addresses contained in CCB.
+ */
+static int dax_preprocess_usr_ccbs(struct dax_ctx *ctx, int idx, int nelem)
+{
+ int i;
+
+ /*
+ * The user is not allowed to specify real address types in
+ * the CCB header. This must be enforced by the kernel before
+ * submitting the CCBs to HV. The only allowed values for all
+ * address fields are VA or IMM
+ */
+ for (i = 0; i < nelem; i++) {
+ struct dax_ccb *ccbp = &ctx->ccb_buf[i];
+ unsigned long ca_offset;
+
+ if (ccbp->hdr.ccb_version > max_ccb_version)
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+
+ switch (ccbp->hdr.opcode) {
+ case DAX_OP_SYNC_NOP:
+ case DAX_OP_EXTRACT:
+ case DAX_OP_SCAN_VALUE:
+ case DAX_OP_SCAN_RANGE:
+ case DAX_OP_TRANSLATE:
+ case DAX_OP_SCAN_VALUE | DAX_OP_INVERT:
+ case DAX_OP_SCAN_RANGE | DAX_OP_INVERT:
+ case DAX_OP_TRANSLATE | DAX_OP_INVERT:
+ case DAX_OP_SELECT:
+ break;
+ default:
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+ }
+
+ if (ccbp->hdr.out_addr_type != DAX_ADDR_TYPE_VA &&
+ ccbp->hdr.out_addr_type != DAX_ADDR_TYPE_NONE) {
+ dax_dbg("invalid out_addr_type in user CCB[%d]", i);
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+ }
+
+ if (ccbp->hdr.pri_addr_type != DAX_ADDR_TYPE_VA &&
+ ccbp->hdr.pri_addr_type != DAX_ADDR_TYPE_NONE) {
+ dax_dbg("invalid pri_addr_type in user CCB[%d]", i);
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+ }
+
+ if (ccbp->hdr.sec_addr_type != DAX_ADDR_TYPE_VA &&
+ ccbp->hdr.sec_addr_type != DAX_ADDR_TYPE_NONE) {
+ dax_dbg("invalid sec_addr_type in user CCB[%d]", i);
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+ }
+
+ if (ccbp->hdr.table_addr_type != DAX_ADDR_TYPE_VA &&
+ ccbp->hdr.table_addr_type != DAX_ADDR_TYPE_NONE) {
+ dax_dbg("invalid table_addr_type in user CCB[%d]", i);
+ return DAX_SUBMIT_ERR_CCB_INVAL;
+ }
+
+ /* set completion (real) address and address type */
+ ccbp->hdr.cca_addr_type = DAX_ADDR_TYPE_RA;
+ ca_offset = (idx + i) * sizeof(struct dax_cca);
+ ccbp->ca = (void *)ctx->ca_buf_ra + ca_offset;
+ memset(&ctx->ca_buf[idx + i], 0, sizeof(struct dax_cca));
+
+ dax_dbg("ccb[%d]=%p, ca_offset=0x%lx, compl RA=0x%llx",
+ i, ccbp, ca_offset, ctx->ca_buf_ra + ca_offset);
+
+ /* skip over 2nd 64 bytes of long CCB */
+ if (ccbp->hdr.longccb)
+ i++;
+ }
+
+ return DAX_SUBMIT_OK;
+}
+
+static int dax_ccb_exec(struct dax_ctx *ctx, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long accepted_len, hv_rv;
+ int i, idx, nccbs, naccepted;
+
+ ctx->client = current;
+ idx = *ppos;
+ nccbs = count / sizeof(struct dax_ccb);
+
+ if (ctx->owner != current) {
+ dax_dbg("wrong thread");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_THR_INIT;
+ return 0;
+ }
+ dax_dbg("args: ccb_buf_len=%ld, idx=%d", count, idx);
+
+ /* for given index and length, verify ca_buf range exists */
+ if (idx + nccbs >= DAX_CA_ELEMS) {
+ ctx->result.exec.status = DAX_SUBMIT_ERR_NO_CA_AVAIL;
+ return 0;
+ }
+
+ /*
+ * Copy CCBs into kernel buffer to prevent modification by the
+ * user in between validation and submission.
+ */
+ if (copy_from_user(ctx->ccb_buf, buf, count)) {
+ dax_dbg("copyin of user CCB buffer failed");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_CCB_ARR_MMU_MISS;
+ return 0;
+ }
+
+ /* check to see if ca_buf[idx] .. ca_buf[idx + nccbs] are available */
+ for (i = idx; i < idx + nccbs; i++) {
+ if (ctx->ca_buf[i].status == CCA_STAT_NOT_COMPLETED) {
+ dax_dbg("CA range not available, dequeue needed");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_NO_CA_AVAIL;
+ return 0;
+ }
+ }
+ dax_unlock_pages(ctx, idx, nccbs);
+
+ ctx->result.exec.status = dax_preprocess_usr_ccbs(ctx, idx, nccbs);
+ if (ctx->result.exec.status != DAX_SUBMIT_OK)
+ return 0;
+
+ ctx->result.exec.status = dax_lock_pages(ctx, idx, nccbs,
+ &ctx->result.exec.status_data);
+ if (ctx->result.exec.status != DAX_SUBMIT_OK)
+ return 0;
+
+ if (dax_debug & DAX_DBG_FLG_BASIC)
+ dax_prt_ccbs(ctx->ccb_buf, nccbs);
+
+ hv_rv = sun4v_ccb_submit(ctx->ccb_buf_ra, count,
+ HV_CCB_QUERY_CMD | HV_CCB_VA_SECONDARY, 0,
+ &accepted_len, &ctx->result.exec.status_data);
+
+ switch (hv_rv) {
+ case HV_EOK:
+ /*
+ * Hcall succeeded with no errors but the accepted
+ * length may be less than the requested length. The
+ * only way the driver can resubmit the remainder is
+ * to wait for completion of the submitted CCBs since
+ * there is no way to guarantee the ordering semantics
+ * required by the client applications. Therefore we
+ * let the user library deal with resubmissions.
+ */
+ ctx->result.exec.status = DAX_SUBMIT_OK;
+ break;
+ case HV_EWOULDBLOCK:
+ /*
+ * This is a transient HV API error. The user library
+ * can retry.
+ */
+ dax_dbg("hcall returned HV_EWOULDBLOCK");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_WOULDBLOCK;
+ break;
+ case HV_ENOMAP:
+ /*
+ * HV was unable to translate a VA. The VA it could
+ * not translate is returned in the status_data param.
+ */
+ dax_dbg("hcall returned HV_ENOMAP");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_NOMAP;
+ break;
+ case HV_EINVAL:
+ /*
+ * This is the result of an invalid user CCB as HV is
+ * validating some of the user CCB fields. Pass this
+ * error back to the user. There is no supporting info
+ * to isolate the invalid field.
+ */
+ dax_dbg("hcall returned HV_EINVAL");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_CCB_INVAL;
+ break;
+ case HV_ENOACCESS:
+ /*
+ * HV found a VA that did not have the appropriate
+ * permissions (such as the w bit). The VA in question
+ * is returned in status_data param.
+ */
+ dax_dbg("hcall returned HV_ENOACCESS");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_NOACCESS;
+ break;
+ case HV_EUNAVAILABLE:
+ /*
+ * The requested CCB operation could not be performed
+ * at this time. Return the specific unavailable code
+ * in the status_data field.
+ */
+ dax_dbg("hcall returned HV_EUNAVAILABLE");
+ ctx->result.exec.status = DAX_SUBMIT_ERR_UNAVAIL;
+ break;
+ default:
+ ctx->result.exec.status = DAX_SUBMIT_ERR_INTERNAL;
+ dax_dbg("unknown hcall return value (%ld)", hv_rv);
+ break;
+ }
+
+ /* unlock pages associated with the unaccepted CCBs */
+ naccepted = accepted_len / sizeof(struct dax_ccb);
+ dax_unlock_pages(ctx, idx + naccepted, nccbs - naccepted);
+
+ /* mark unaccepted CCBs as not completed */
+ for (i = idx + naccepted; i < idx + nccbs; i++)
+ ctx->ca_buf[i].status = CCA_STAT_COMPLETED;
+
+ ctx->ccb_count += naccepted;
+ ctx->fail_count += nccbs - naccepted;
+
+ dax_dbg("hcall rv=%ld, accepted_len=%ld, status_data=0x%llx, ret status=%d",
+ hv_rv, accepted_len, ctx->result.exec.status_data,
+ ctx->result.exec.status);
+
+ if (count == accepted_len)
+ ctx->client = NULL; /* no read needed to complete protocol */
+ return accepted_len;
+}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
schedulertime = cpu_to_le32(schedulertime % 604800);
memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
TW_Command_Full *full_command_packet;
TW_Compatibility_Info *tw_compat_info;
TW_Event *event;
- struct timeval current_time;
- u32 current_time_ms;
+ ktime_t current_time;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
break;
case TW_IOCTL_GET_LOCK:
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
- do_gettimeofday(&current_time);
- current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+ current_time = ktime_get();
- if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
+ ktime_after(current_time, tw_dev->ioctl_time)) {
tw_dev->ioctl_sem_lock = 1;
- tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
tw_ioctl->driver_command.status = 0;
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
} else {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
- tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
}
break;
case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char event_queue_wrapped;
unsigned int error_sequence_id;
int ioctl_sem_lock;
- u32 ioctl_msec;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
- schedulertime = cpu_to_le32(schedulertime % 604800);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
+ schedulertime = cpu_to_le32(schedulertime);
memcpy(param->data, &schedulertime, sizeof(u32));
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));
if (sup_adap_info->adapter_type_text[0]) {
- char *cp = sup_adap_info->adapter_type_text;
int c;
+ char *cp;
+ char *cname = kmemdup(sup_adap_info->adapter_type_text,
+ sizeof(sup_adap_info->adapter_type_text),
+ GFP_ATOMIC);
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
(void *) cmd);
}
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+ struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
{
- struct fib *fibptr;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- struct aac_ciss_identify_pd *identify_resp;
- dma_addr_t addr;
- u32 vbus, vid;
- u16 fibsize, datasize;
- int rcode = -ENOMEM;
-
+ struct fib *fibptr;
+ dma_addr_t addr;
+ int rcode;
+ int fibsize;
+ struct aac_srb *srb;
+ struct aac_srb_reply *srb_reply;
+ struct sgmap64 *sg64;
+ u32 vbus;
+ u32 vid;
+
+ if (!dev->sa_firmware)
+ return 0;
+ /* allocate FIB */
fibptr = aac_fib_alloc(dev);
if (!fibptr)
- goto out;
+ return -ENOMEM;
- fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_identify_pd);
+ aac_fib_init(fibptr);
+ fibptr->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
- identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (!identify_resp)
- goto fib_free_ptr;
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ sizeof(struct sgentry64);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+ /* allocate DMA buffer for response */
+ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ rcode = -ENOMEM;
+ goto fib_error;
+ }
- aac_fib_init(fibptr);
+ srb = fib_data(fibptr);
+ memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ vbus = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_target);
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = 0x26;
- srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
- srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+ /* set the common request fields */
+ srb->channel = cpu_to_le32(vbus);
+ srb->id = cpu_to_le32(vid);
+ srb->lun = 0;
+ srb->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srb->timeout = 0;
+ srb->retry_limit = 0;
+ srb->cdb_size = cpu_to_le32(16);
+ srb->count = cpu_to_le32(xfer_len);
+
+ sg64 = (struct sgmap64 *)&srb->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(xfer_len);
- sg64 = (struct sgmap64 *)&srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
- sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- sg64->sg[0].count = cpu_to_le32(datasize);
+ /*
+ * Copy the updated data for other dumping or other usage if needed
+ */
+ memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+ /* issue request to the controller */
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+ 1, 1, NULL, NULL);
+
+ if (rcode == -ERESTARTSYS)
+ rcode = -ERESTART;
+
+ if (unlikely(rcode < 0))
+ goto bmic_error;
+
+ srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+ memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+ dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+ struct aac_ciss_identify_pd *identify_resp;
- rcode = aac_fib_send(ScsiPortCommand64,
- fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+ if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+ return;
+
+ identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+ if (identify_resp == NULL) {
+ dev->hba_map[bus][target].qd_limit = 32;
+ return;
+ }
if (identify_resp->current_queue_depth_limit <= 0 ||
- identify_resp->current_queue_depth_limit > 32)
+ identify_resp->current_queue_depth_limit > 255)
dev->hba_map[bus][target].qd_limit = 32;
else
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
+}
- dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+ struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb_unit srbu;
+ struct aac_srb *srbcmd;
+ struct aac_ciss_identify_pd *identify_reply;
- aac_fib_complete(fibptr);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+ identify_reply = kmalloc(datasize, GFP_KERNEL);
+ if (!identify_reply)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ *identify_resp = identify_reply;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(identify_reply);
+ goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+ kfree(dev->safw_phys_luns);
+ dev->safw_phys_luns = NULL;
+}
+
+/**
+ * aac_get_safw_ciss_luns() Process topology change
+ * @dev: aac_dev structure
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb *srbcmd;
+ struct aac_srb_unit srbu;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+
+ datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ phys_luns = kmalloc(datasize, GFP_KERNEL);
+ if (phys_luns == NULL)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ if (phys_luns->resp_flag != 2) {
+ rcode = -ENOMSG;
+ goto mem_free_all;
+ }
+
+ dev->safw_phys_luns = phys_luns;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(phys_luns);
+ goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+ return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+ return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+ int bus, int target)
+{
+ kfree(dev->hba_map[bus][target].safw_identify_resp);
+ dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+ int lun_count)
+{
+ int luns;
+ int i;
+ u32 bus;
+ u32 target;
+
+ luns = aac_get_safw_phys_lun_count(dev);
+
+ if (luns < lun_count)
+ lun_count = luns;
+ else if (lun_count < 0)
+ lun_count = luns;
+
+ for (i = 0; i < lun_count; i++) {
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ aac_free_safw_identify_resp(dev, bus, target);
+ }
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+ int i;
+ int rcode = 0;
+ u32 lun_count;
+ u32 bus;
+ u32 target;
+ struct aac_ciss_identify_pd *identify_resp = NULL;
+
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ rcode = aac_issue_safw_bmic_identify(dev,
+ &identify_resp, bus, target);
+
+ if (unlikely(rcode < 0))
+ goto free_identify_resp;
+
+ dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+ }
-fib_free_ptr:
- aac_fib_free(fibptr);
out:
return rcode;
+free_identify_resp:
+ aac_free_safw_all_identify_resp(dev, i);
+ goto out;
}
/**
- * aac_update hba_map()- update current hba map with data from FW
+ * aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
* @phys_luns: FW information from report phys luns
+ * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
-void aac_update_hba_map(struct aac_dev *dev,
- struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
{
/* ok and extended reporting */
u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
u8 expose_flag, attribs;
u8 devtype;
- lun_count = ((phys_luns->list_length[0] << 24)
- + (phys_luns->list_length[1] << 16)
- + (phys_luns->list_length[2] << 8)
- + (phys_luns->list_length[3])) / 24;
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ dev->scan_counter++;
for (i = 0; i < lun_count; ++i) {
- bus = phys_luns->lun[i].level2[1] & 0x3f;
- target = phys_luns->lun[i].level2[0];
- expose_flag = phys_luns->lun[i].bus >> 6;
- attribs = phys_luns->lun[i].node_ident[9];
- nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+ expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+ attribs = aac_get_safw_phys_attribs(dev, i);
+ nexus = aac_get_safw_phys_nexus(dev, i);
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
continue;
- dev->hba_map[bus][target].expose = expose_flag;
-
if (expose_flag != 0) {
devtype = AAC_DEVTYPE_RAID_MEMBER;
goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
} else
devtype = AAC_DEVTYPE_ARC_RAW;
- if (devtype != AAC_DEVTYPE_NATIVE_RAW)
- goto update_devtype;
+ dev->hba_map[bus][target].scan_counter = dev->scan_counter;
- if (aac_issue_bmic_identify(dev, bus, target) < 0)
- dev->hba_map[bus][target].qd_limit = 32;
+ aac_set_safw_target_qd(dev, bus, target);
update_devtype:
- if (rescan == AAC_INIT)
- dev->hba_map[bus][target].devtype = devtype;
- else
- dev->hba_map[bus][target].new_devtype = devtype;
+ dev->hba_map[bus][target].devtype = devtype;
}
}
-/**
- * aac_report_phys_luns() Process topology change
- * @dev: aac_dev structure
- * @fibptr: fib pointer
- *
- * Execute a CISS REPORT PHYS LUNS and process the results into
- * the current hba_map.
- */
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+static int aac_setup_safw_targets(struct aac_dev *dev)
{
- int fibsize, datasize;
- struct aac_ciss_phys_luns_resp *phys_luns;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- dma_addr_t addr;
- u32 vbus, vid;
int rcode = 0;
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
- + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_phys_luns_resp)
- + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
-
- phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (phys_luns == NULL) {
- rcode = -ENOMEM;
- goto err_out;
- }
-
- vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_bus);
- vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_target);
+ rcode = aac_get_containers(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- aac_fib_init(fibptr);
+ rcode = aac_get_safw_ciss_luns(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ rcode = aac_get_safw_attr_all_targets(dev);
+ if (unlikely(rcode < 0))
+ goto free_ciss_luns;
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
- srbcmd->cdb[1] = 2; /* extended reporting */
- srbcmd->cdb[8] = (u8)(datasize >> 8);
- srbcmd->cdb[9] = (u8)(datasize);
-
- sg64 = (struct sgmap64 *) &srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
- sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
- sg64->sg[0].count = cpu_to_le32(datasize);
-
- rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
- FsaNormal, 1, 1, NULL, NULL);
-
- /* analyse data */
- if (rcode >= 0 && phys_luns->resp_flag == 2) {
- /* ok and extended reporting */
- aac_update_hba_map(dev, phys_luns, rescan);
- }
+ aac_set_safw_attr_all_targets(dev);
- dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
-err_out:
+ aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+ aac_free_safw_ciss_luns(dev);
+out:
return rcode;
}
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+ return aac_setup_safw_targets(dev);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.virt_device_bus != 0xffff) {
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
- }
-
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
} else { /* check for physical non-dasd devices */
bus = aac_logical_to_phys(scmd_channel(scsicmd));
- if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
- (dev->hba_map[bus][cid].expose
- == AAC_HIDE_DISK)){
- if (scsicmd->cmnd[0] == INQUIRY) {
- scsicmd->result = DID_NO_CONNECT << 16;
- goto scsi_done_ret;
- }
- }
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d52265416da2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <scsi/scsi_host.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -97,7 +98,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50834
+# define AAC_DRIVER_BUILD 50877
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
#define AAC_MAX_BUSES 5
#define AAC_MAX_TARGETS 256
+#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define get_bus_number(x) (x/AAC_MAX_TARGETS)
+#define get_target_number(x) (x%AAC_MAX_TARGETS)
+
/* Thor AIF events */
#define SA_AIF_HOTPLUG (1<<1)
#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
#define AAC_DEVTYPE_RAID_MEMBER 1
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_EXPOSE_DISK 0
-#define AAC_HIDE_DISK 3
+
+#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 new_devtype;
u8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
- u8 expose; /*checks if to expose or not*/
+ u32 scan_counter;
+ struct aac_ciss_identify_pd *safw_identify_resp;
};
/*
@@ -1560,6 +1565,7 @@ struct aac_dev
spinlock_t fib_lock;
struct mutex ioctl_mutex;
+ struct mutex scan_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
+ struct delayed_work safw_rescan_work;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
atomic_t msix_counter;
+ u32 scan_counter;
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+ struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
@@ -2023,6 +2032,12 @@ struct aac_srb_reply
__le32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
+
+struct aac_srb_unit {
+ struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
+};
+
/*
* SRB Flags
*/
@@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
return (dev)->a_ops.adapter_check_health(dev);
}
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, safw_rescan_work);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+
+ aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+ if (dev->sa_firmware)
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
+void aac_safw_rescan_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
+int aac_setup_safw_adapter(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
return -EFAULT;
+ dev->adapter_shutdown = 1;
+
+ mutex_unlock(&dev->ioctl_mutex);
retval = aac_reset_adapter(dev, 0, reset.reset_type);
- return retval;
+ mutex_lock(&dev->ioctl_mutex);
+ return retval;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+ unsigned long flagv = 0;
+ int i = 0;
+
+ for (i = 60; i; --i) {
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ int active = 0;
+
+ __shost_for_each_device(dev, aac->scsi_host_ptr) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+}
+
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
- int status;
+ int status = 0;
- fibctx = aac_fib_alloc(dev);
- if (!fibctx)
- return -ENOMEM;
- aac_fib_init(fibctx);
+ if (aac_adapter_check_health(dev))
+ return status;
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
mutex_unlock(&dev->ioctl_mutex);
}
+ aac_wait_for_io_completion(dev);
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 80a8cb26cdea..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->scsi_done(command);
}
/*
- * Any Device that was already marked offline needs to be cleaned up
+ * Any Device that was already marked offline needs to be marked
+ * running
*/
__shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev)) {
- sdev_printk(KERN_INFO, dev, "Removing offline device\n");
- scsi_remove_device(dev);
- scsi_device_put(dev);
- }
+ if (!scsi_device_online(dev))
+ scsi_device_set_state(dev, SDEV_RUNNING);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
- if (!retval) {
- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
- scsi_scan_host(host);
+ if (!retval && !is_kdump_kernel()) {
+ dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+ aac_schedule_safw_scan_worker(aac);
}
+
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
- if (forced < 2) for (retval = 60; retval; --retval) {
- struct scsi_device * dev;
- struct scsi_cmnd * command;
- int active = 0;
-
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
-
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- break;
- ssleep(1);
- }
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
return BlinkLED;
}
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+ return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
+
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+ int bus,
+ int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+ if (sdev)
+ scsi_device_put(sdev);
+}
-static void aac_resolve_luns(struct aac_dev *dev)
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
{
- int bus, target, channel;
struct scsi_device *sdev;
- u8 devtype;
- u8 new_devtype;
- for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
- for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ scsi_remove_device(sdev);
+ aac_put_safw_scsi_device(sdev);
+}
- if (bus == CONTAINER_CHANNEL)
- channel = CONTAINER_CHANNEL;
- else
- channel = aac_phys_to_logical(bus);
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+ int bus, int target)
+{
+ return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
- devtype = dev->hba_map[bus][target].devtype;
- new_devtype = dev->hba_map[bus][target].new_devtype;
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+ if (is_safw_raid_volume(dev, bus, target))
+ return dev->fsa_dev[target].valid;
+ else
+ return aac_is_safw_scan_count_equal(dev, bus, target);
+}
- sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
- target, 0);
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+ int is_exposed = 0;
+ struct scsi_device *sdev;
- if (!sdev && new_devtype)
- scsi_add_device(dev->scsi_host_ptr, channel,
- target, 0);
- else if (sdev && new_devtype != devtype)
- scsi_remove_device(sdev);
- else if (sdev && new_devtype == devtype)
- scsi_rescan_device(&sdev->sdev_gendev);
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ if (sdev)
+ is_exposed = 1;
+ aac_put_safw_scsi_device(sdev);
- if (sdev)
- scsi_device_put(sdev);
+ return is_exposed;
+}
- dev->hba_map[bus][target].devtype = new_devtype;
- }
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+ int i;
+ int bus;
+ int target;
+ int is_exposed = 0;
+ int rcode = 0;
+
+ rcode = aac_setup_safw_adapter(dev);
+ if (unlikely(rcode < 0)) {
+ goto out;
}
+
+ for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+ bus = get_bus_number(i);
+ target = get_target_number(i);
+
+ is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+ if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+ aac_add_safw_device(dev, bus, target);
+ else if (!aac_is_safw_target_valid(dev, bus, target) &&
+ is_exposed)
+ aac_remove_safw_device(dev, bus, target);
+ }
+out:
+ return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ rcode = aac_update_safw_host_devices(dev);
+ if (rcode)
+ aac_schedule_safw_scan_worker(dev);
+
+ return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ mutex_lock(&dev->scan_mutex);
+ if (dev->sa_firmware)
+ rcode = aac_scan_safw_host(dev);
+ else
+ scsi_scan_host(dev->scsi_host_ptr);
+ mutex_unlock(&dev->scan_mutex);
+
+ return rcode;
}
/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
*/
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
{
- int i, bus, target, container, rcode = 0;
+ int i;
u32 events = 0;
- struct fib *fib;
- struct scsi_device *sdev;
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
case SA_AIF_LDEV_CHANGE:
case SA_AIF_BPCFG_CHANGE:
- fib = aac_fib_alloc(dev);
- if (!fib) {
- pr_err("aac_handle_sa_aif: out of memory\n");
- return;
- }
- for (bus = 0; bus < AAC_MAX_BUSES; bus++)
- for (target = 0; target < AAC_MAX_TARGETS; target++)
- dev->hba_map[bus][target].new_devtype = 0;
-
- rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
-
- if (rcode != -ERESTARTSYS)
- aac_fib_free(fib);
-
- aac_resolve_luns(dev);
-
- if (events == SA_AIF_LDEV_CHANGE ||
- events == SA_AIF_BPCFG_CHANGE) {
- aac_get_containers(dev);
- for (container = 0; container <
- dev->maximum_num_containers; ++container) {
- sdev = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- if (dev->fsa_dev[container].valid && !sdev) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- } else if (!dev->fsa_dev[container].valid &&
- sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
- scsi_device_put(sdev);
- }
- }
- }
+ aac_scan_host(dev);
+
break;
case SA_AIF_BPSTAT_CHANGE:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d55332de08f9..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
u32 bus, cid;
int ret = FAILED;
+ if (aac_adapter_check_health(aac))
+ return ret;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct aac_hba_tm_req *tmf;
int status;
u64 address;
- __le32 managed_request_id;
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
(fib->callback_data == cmd)) {
found = 1;
- managed_request_id = ((struct aac_hba_cmd_req *)
- fib->hw_fib_va)->request_id;
break;
}
}
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
- int bled = 0;
- struct aac_dev *aac;
-
if (!capable(CAP_SYS_ADMIN))
return retval;
- aac = (struct aac_dev *)class_to_shost(device)->hostdata;
- bled = buf[0] == '!' ? 1:0;
- retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
+ retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+ buf[0] == '!', IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
+
return retval;
}
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
+ mutex_init(&aac->scan_mutex);
+
+ INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
- scsi_scan_host(shost);
+
+ aac_scan_host(aac);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
+ aac_cancel_safw_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ aac_cancel_safw_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
+ aac_cancel_safw_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
scsi_unblock_requests(aac->scsi_host_ptr);
- scsi_scan_host(aac->scsi_host_ptr);
+ aac_scan_host(aac);
pci_save_state(pdev);
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
}
/*
- * Fill in the function dispatch table.
- */
-
- dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
- dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
- dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
- dev->a_ops.adapter_notify = aac_sa_notify_adapter;
- dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
- dev->a_ops.adapter_check_health = aac_sa_check_health;
- dev->a_ops.adapter_restart = aac_sa_restart_adapter;
- dev->a_ops.adapter_start = aac_sa_start_adapter;
- dev->a_ops.adapter_intr = aac_sa_intr;
- dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
- dev->a_ops.adapter_ioremap = aac_sa_ioremap;
-
- /*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#ifdef CONFIG_XEN
- #define ARCMSR_MAX_FREECCB_NUM 160
-#define ARCMSR_MAX_OUTSTANDING_CMD 155
-#else
- #define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_MAX_OUTSTANDING_CMD 255
-#endif
-#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_MAX_XFER_SECTORS 512
-#define ARCMSR_MAX_XFER_SECTORS_B 4096
-#define ARCMSR_MAX_XFER_SECTORS_C 304
-#define ARCMSR_MAX_TARGETID 17
-#define ARCMSR_MAX_TARGETLUN 8
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_QBUFFER 4096
-#define ARCMSR_DEFAULT_SG_ENTRIES 38
-#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_FREECCB_NUM 1024
+#define ARCMSR_MAX_OUTSTANDING_CMD 1024
+#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
+#define ARCMSR_MIN_OUTSTANDING_CMD 32
+#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN 128
+#define ARCMSR_DEFAULT_CMD_PERLUN 32
+#define ARCMSR_MIN_CMD_PERLUN 1
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
-#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
-#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMSR_MAX_HBE_DONEQUEUE 512
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
-#define PCI_DEVICE_ID_ARECA_1880 0x1880
- #endif
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1214
- #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#define PCI_DEVICE_ID_ARECA_1214 0x1214
#endif
#ifndef PCI_DEVICE_ID_ARECA_1203
- #define PCI_DEVICE_ID_ARECA_1203 0x1203
+#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1884
+#define PCI_DEVICE_ID_ARECA_1884 0x1884
+#endif
+#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
**********************************************************************************
**
**********************************************************************************
*/
-#define ARC_SUCCESS 0
-#define ARC_FAILURE 1
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
/*
*******************************************************************************
** split 64bits dma addressing
*******************************************************************************
*/
-#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
/*
*******************************************************************************
** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
-#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
** structure for holding DMA address data
*************************************************************
*/
-#define IS_DMA64 (sizeof(dma_addr_t) == 8)
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- __le32 length;
- __le32 address;
+ __le32 length;
+ __le32 address;
}__attribute__ ((packed));
struct SG64ENTRY
{
- __le32 length;
- __le32 address;
- __le32 addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
}__attribute__ ((packed));
/*
********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
*/
struct FIRMWARE_INFO
{
- uint32_t signature; /*0, 00-03*/
- uint32_t request_len; /*1, 04-07*/
- uint32_t numbers_queue; /*2, 08-11*/
- uint32_t sdram_size; /*3, 12-15*/
- uint32_t ide_channels; /*4, 16-19*/
- char vendor[40]; /*5, 20-59*/
- char model[8]; /*15, 60-67*/
- char firmware_ver[16]; /*17, 68-83*/
- char device_map[16]; /*21, 84-99*/
- uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- uint8_t cfgSerial[16]; /*26,104-119*/
- uint32_t cfgPicStatus; /*30,120-123*/
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
};
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* ccb areca cdb flag */
-#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
/* ARC-1680 Bus Reset*/
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/* ARC-1880 Bus Reset*/
-#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
-#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
-#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
/*
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
*/
-#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
/*
** Set if Outbound Doorbell register bits 30:1 have a non-zero
** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
** Register (FIFO) is not empty. It clears when the Outbound
** Post List FIFO is empty.
*/
-#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
/*
** This bit indicates a SAS interrupt from a source external to
** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
/*inbound message 0 ready*/
-#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
/*more than 12 request completed in a time*/
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
/*outbound DATA WRITE isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
/*outbound DATA READ isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
/*outbound message 0 ready*/
-#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*
*******************************************************************************
+** SPEC. for Areca Type E adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1884 0x188417D3
+
+#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
+
+#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
+#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
+
+/* ARC-1884 doorbell sync */
+#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
+#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
+#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
struct ARCMSR_CDB
{
- uint8_t Bus;
- uint8_t TargetID;
- uint8_t LUN;
- uint8_t Function;
- uint8_t CdbLength;
- uint8_t sgcount;
- uint8_t Flags;
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
#define ARCMSR_CDB_FLAG_BIOS 0x02
#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_HEADQ 0x08
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
- uint8_t msgPages;
- uint32_t msgContext;
- uint32_t DataLength;
- uint8_t Cdb[16];
- uint8_t DeviceStatus;
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
#define ARCMSR_DEV_CHECK_CONDITION 0x02
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
#define ARCMSR_DEV_ABORTED 0xF1
#define ARCMSR_DEV_INIT_FAIL 0xF2
- uint8_t SenseData[15];
+ uint8_t SenseData[15];
union
{
- struct SG32ENTRY sg32entry[1];
- struct SG64ENTRY sg64entry[1];
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
} u;
};
/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t __iomem *drv2iop_doorbell;
- uint32_t __iomem *drv2iop_doorbell_mask;
- uint32_t __iomem *iop2drv_doorbell;
- uint32_t __iomem *iop2drv_doorbell_mask;
- uint32_t __iomem *message_rwbuffer;
- uint32_t __iomem *message_wbuffer;
- uint32_t __iomem *message_rbuffer;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
};
/*
*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
uint32_t diagnostic_rw_data; /*0024 0027*/
uint32_t diagnostic_rw_address_low; /*0028 002B*/
uint32_t diagnostic_rw_address_high; /*002C 002F*/
- uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_status; /*0030 0033*/
uint32_t host_int_mask; /*0034 0037*/
uint32_t dcr_data; /*0038 003B*/
uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
uint32_t iop_int_mask; /*0054 0057*/
uint32_t iop_inbound_queue_port; /*0058 005B*/
uint32_t iop_outbound_queue_port; /*005C 005F*/
- uint32_t inbound_free_list_index; /*0060 0063*/
- uint32_t inbound_post_list_index; /*0064 0067*/
- uint32_t outbound_free_list_index; /*0068 006B*/
- uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
uint32_t inbound_doorbell_clear; /*0070 0073*/
- uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
uint32_t last_used_message_source_address_low; /*0078 007B*/
uint32_t last_used_message_source_address_high; /*007C 007F*/
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
uint32_t utility_A_int_counter_timer; /*0098 009B*/
uint32_t outbound_doorbell; /*009C 009F*/
- uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
uint32_t message_source_address_index; /*00A4 00A7*/
uint32_t message_done_queue_index; /*00A8 00AB*/
uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
- uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
uint32_t write_sequence; /*00FC 00FF*/
uint32_t reserved1[34]; /*0100 0187*/
- uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
uint32_t message_wbuffer[32]; /*2000 207F*/
uint32_t reserved3[32]; /*2080 20FF*/
uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type E processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_E{
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+
+typedef struct deliver_completeQ {
+ uint16_t cmdFlag;
+ uint16_t cmdSMID;
+ uint16_t cmdLMID; // reserved (0)
+ uint16_t cmdFlag2; // reserved (0)
+} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
*/
struct AdapterControlBlock
{
- uint32_t adapter_type; /* adapter A,B..... */
- #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
- #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
- #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
- #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
- u32 roundup_ccbsize;
- struct pci_dev * pdev;
- struct Scsi_Host * host;
- unsigned long vir2phy_offset;
+ uint32_t adapter_type; /* adapter A,B..... */
+#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
/* Offset is used in making arc cdb physical to virtual calculations */
- uint32_t outbound_int_enable;
- uint32_t cdb_phyaddr_hi32;
- uint32_t reg_mu_acc_handle0;
- spinlock_t eh_lock;
- spinlock_t ccblist_lock;
- spinlock_t postq_lock;
- spinlock_t doneq_lock;
- spinlock_t rqbuffer_lock;
- spinlock_t wqbuffer_lock;
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
+ struct MessageUnit_E __iomem *pmuE;
};
/* message unit ATU inbound base address0 */
- void __iomem *mem_base0;
- void __iomem *mem_base1;
- uint32_t acb_flags;
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
u16 dev_id;
- uint8_t adapter_index;
- #define ACB_F_SCSISTOPADAPTER 0x0001
- #define ACB_F_MSG_STOP_BGRB 0x0002
- /* stop RAID background rebuild */
- #define ACB_F_MSG_START_BGRB 0x0004
- /* stop RAID background rebuild */
- #define ACB_F_IOPDATA_OVERFLOW 0x0008
- /* iop message data rqbuffer overflow */
- #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
- /* message clear wqbuffer */
- #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
- /* message clear rqbuffer */
- #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
- #define ACB_F_BUS_RESET 0x0080
- #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+ uint8_t adapter_index;
+#define ACB_F_SCSISTOPADAPTER 0x0001
+#define ACB_F_MSG_STOP_BGRB 0x0002
+/* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004
+/* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008
+/* iop message data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+/* message clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+/* message clear rqbuffer */
+#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+#define ACB_F_BUS_RESET 0x0080
+#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
- #define ACB_F_IOP_INITED 0x0100
- /* iop init */
- #define ACB_F_ABORT 0x0200
- #define ACB_F_FIRMWARE_TRAP 0x0400
- struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+#define ACB_F_IOP_INITED 0x0100
+/* iop init */
+#define ACB_F_ABORT 0x0200
+#define ACB_F_FIRMWARE_TRAP 0x0400
+#define ACB_F_MSG_GET_CONFIG 0x1000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
- struct list_head ccb_free_list;
+ struct list_head ccb_free_list;
/* head of free ccb list */
- atomic_t ccboutstandingcount;
+ atomic_t ccboutstandingcount;
/*The present outstanding command number that in the IOP that
waiting for being handled by FW*/
- void * dma_coherent;
+ void * dma_coherent;
/* dma_coherent used for memory free */
- dma_addr_t dma_coherent_handle;
+ dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle2;
- void *dma_coherent2;
- unsigned int uncache_size;
- uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_getIndex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_putIndex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
- uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_getIndex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_putIndex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
- uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
- uint32_t num_resets;
- uint32_t num_aborts;
- uint32_t signature;
- uint32_t firm_request_len;
- uint32_t firm_numbers_queue;
- uint32_t firm_sdram_size;
- uint32_t firm_hd_channels;
- uint32_t firm_cfg_version;
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
- struct work_struct arcmsr_do_message_isr_bh;
- struct timer_list eternal_timer;
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
unsigned short fw_flag;
- #define FW_NORMAL 0x0000
- #define FW_BOG 0x0001
- #define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
- uint32_t maxOutstanding;
- int vector_count;
+#define FW_NORMAL 0x0000
+#define FW_BOG 0x0001
+#define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int vector_count;
+ uint32_t maxFreeCCB;
+ struct timer_list refresh_timer;
+ uint32_t doneq_index;
+ uint32_t ccbsize;
+ uint32_t in_doorbell;
+ uint32_t out_doorbell;
+ uint32_t completionQ_entry;
+ pCompletion_Q pCompletionQ;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
*/
struct CommandControlBlock{
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
- struct list_head list; /*x32: 8byte, x64: 16byte*/
- struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
- struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
- uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
- uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
- #define CCB_FLAG_READ 0x0000
- #define CCB_FLAG_WRITE 0x0001
- #define CCB_FLAG_ERROR 0x0002
- #define CCB_FLAG_FLUSHCACHE 0x0004
- #define CCB_FLAG_MASTER_ABORTED 0x0008
- uint16_t startdone; /*x32:2byte,x32:2byte*/
- #define ARCMSR_CCB_DONE 0x0000
- #define ARCMSR_CCB_START 0x55AA
- #define ARCMSR_CCB_ABORTED 0xAA55
- #define ARCMSR_CCB_ILLEGAL 0xFFFF
- #if BITS_PER_LONG == 64
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+#define CCB_FLAG_READ 0x0000
+#define CCB_FLAG_WRITE 0x0001
+#define CCB_FLAG_ERROR 0x0002
+#define CCB_FLAG_FLUSHCACHE 0x0004
+#define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+#define ARCMSR_CCB_DONE 0x0000
+#define ARCMSR_CCB_START 0x55AA
+#define ARCMSR_CCB_ABORTED 0xAA55
+#define ARCMSR_CCB_ILLEGAL 0xFFFF
+ uint32_t smid;
+#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[5]; /*24 byte*/
- #else
+ uint32_t reserved[4]; /*16 byte*/
+#else
/* ======================512+32 bytes======================== */
- uint32_t reserved; /*8 byte*/
- #endif
+ // uint32_t reserved; /*4 byte*/
+#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
};
@@ -788,13 +913,13 @@ struct SENSE_DATA
** Outbound Interrupt Status Register - OISR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
-#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
-#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
** Outbound Interrupt Mask Register - OIMR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
-#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
-#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+static int msix_enable = 1;
+module_param(msix_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
+
+static int msi_enable = 1;
+module_param(msi_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
+
+static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+module_param(host_can_queue, int, S_IRUGO);
+MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
+
+static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+module_param(cmd_per_lun, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+
+static int set_date_time = 0;
+module_param(set_date_time, int, S_IRUGO);
+MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
+static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
- .eh_abort_handler = arcmsr_abort,
+ .eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
- .this_id = ARCMSR_SCSI_INITIATOR_ID,
- .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
- .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
- .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
- .id_table = arcmsr_device_id_table,
+ .id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
- case ACB_ADAPTER_TYPE_D: {
+ case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E: {
dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->mem_base0 = mem_base0;
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ acb->pmuE = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!acb->pmuE) {
+ pr_notice("arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
+ case ACB_ADAPTER_TYPE_E:
+ iounmap(acb->pmuE);
+ break;
}
}
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
+static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ int i;
+ uint32_t read_doorbell;
+ struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
+
+ for (i = 0; i < 2000; i++) {
+ read_doorbell = readl(&phbcmu->iobound_doorbell);
+ if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(0, &phbcmu->host_int_status); /*clear interrupt*/
+ pACB->in_doorbell = read_doorbell;
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
}
+static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 30;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ do {
+ if (arcmsr_hbaE_wait_msgint_ready(pACB))
+ break;
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_flush_cache(acb);
+ break;
}
}
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ uint32_t completeQ_size;
+ completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
+ acb->roundup_ccbsize = roundup(completeQ_size, 32);
+ dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ }
+ break;
default:
break;
}
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
- acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
+ acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
- for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ for(i = 0; i < acb->maxFreeCCB; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
+ ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
}
atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
unsigned long flags;
int nvec, i;
+ if (msix_enable == 0)
+ goto msi_int0;
nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
PCI_IRQ_MSIX);
if (nvec > 0) {
pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
flags = 0;
} else {
- nvec = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+msi_int0:
+ if (msi_enable == 1) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1) {
+ dev_info(&pdev->dev, "msi enabled\n");
+ goto msi_int1;
+ }
+ }
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (nvec < 1)
return FAILED;
-
+msi_int1:
flags = IRQF_SHARED;
}
@@ -755,6 +871,24 @@ out_free_irq:
return FAILED;
}
+static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
+{
+ INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&pacb->rq_map_token, 16);
+ atomic_set(&pacb->ante_token_value, 16);
+ pacb->fw_flag = FW_NORMAL;
+ timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
+ pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ add_timer(&pacb->eternal_timer);
+}
+
+static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
+{
+ timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
+ pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ add_timer(&pacb->refresh_timer);
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
- host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
+ host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+ host->can_queue = host_can_queue; /* max simultaneous cmds */
+ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
+ cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+ host->cmd_per_lun = cmd_per_lun;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
scsi_scan_host(host);
return 0;
out_free_sysfs:
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
del_timer_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
return 0;
controller_stop:
arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
return true;
}
+static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtnval = arcmsr_hbaE_abort_allcmd(acb);
+ break;
}
return rtnval;
}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = DID_OK << 16;
+ pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
+ pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ orig_mask = readl(&reg->host_int_mask);
+ writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
+ readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
+ }
+ break;
}
return orig_mask;
}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
- && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ && (i++ < acb->maxOutstanding)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
- while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
pmu->doneq_index = 0x40FF;
}
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_postqueue_isr(acb);
+ break;
}
}
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ break;
+ }
}
}
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_unlock_irqrestore(&acb->postq_lock, flags);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *pmu = acb->pmuE;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
"timeout\n", pACB->host->host_no);
}
+static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_stop_bgrb(acb);
+ break;
}
}
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
uint8_t __iomem *iop_data;
uint32_t iop_len;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
arcmsr_write_ioctldata2iop_in_DWORD(acb);
return;
}
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
}
+static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell, in_doorbell, tmp;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ do {
+ writel(0, &reg->host_int_status); /* clear interrupt */
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
+ arcmsr_iop2drv_data_read_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ arcmsr_hbaE_message_isr(pACB);
+ }
+ tmp = in_doorbell;
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = tmp ^ in_doorbell;
+ } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
+ pACB->in_doorbell = in_doorbell;
+}
+
static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_E __iomem *pmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ pmu = acb->pmuE;
+ while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag
+ & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &pmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
readl(reg->outbound_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ writel(0, &reg->host_int_status);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ host_interrupt_status = readl(&pmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbaE_doorbell_isr(pACB);
+ }
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbaE_postqueue_isr(pACB);
+ }
+ host_interrupt_status = readl(&pmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_E:
+ return arcmsr_hbaE_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
- char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
- char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
- miscellaneous data' timeout \n", acb->host->host_no);
- return false;
- }
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
+ uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
+ uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
+ uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
+ uint32_t *firm_model = &rwbuffer[15];
+ uint32_t *firm_version = &rwbuffer[17];
+ uint32_t *device_map = &rwbuffer[21];
+
+ count = 2;
+ while (count) {
+ *acb_firm_model = readl(firm_model);
acb_firm_model++;
- iop_firm_model++;
+ firm_model++;
count--;
}
-
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
+ count = 4;
+ while (count) {
+ *acb_firm_version = readl(firm_version);
acb_firm_version++;
- iop_firm_version++;
+ firm_version++;
count--;
}
-
- count=16;
- while(count){
- *acb_device_map = readb(iop_device_map);
+ count = 4;
+ while (count) {
+ *acb_device_map = readl(device_map);
acb_device_map++;
- iop_device_map++;
+ device_map++;
count--;
}
+ pACB->signature = readl(&rwbuffer[0]);
+ pACB->firm_request_len = readl(&rwbuffer[1]);
+ pACB->firm_numbers_queue = readl(&rwbuffer[2]);
+ pACB->firm_sdram_size = readl(&rwbuffer[3]);
+ pACB->firm_hd_channels = readl(&rwbuffer[4]);
+ pACB->firm_cfg_version = readl(&rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[0]);
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+}
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ arcmsr_wait_firmware_ready(acb);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- /*firm_model,15,60-67*/
- char __iomem *iop_firm_version;
- /*firm_version,17,68-83*/
- char __iomem *iop_device_map;
- /*firm_version,21,84-99*/
- int count;
-
- iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
- iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
- iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
arcmsr_wait_firmware_ready(acb);
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
-
- count = 16;
- while(count){
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
- }
-
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
-
- acb->signature = readl(&reg->message_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- /*firm_ide_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*firm_ide_channels,4,16-19*/
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
- uint32_t intmask_org, Index, firmware_state = 0;
+ uint32_t intmask_org;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
- char *acb_firm_model = pACB->firm_model;
- char *acb_firm_version = pACB->firm_version;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
- int count;
+
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
/* wait firmware ready */
- do {
- firmware_state = readl(&reg->outbound_msgaddr1);
- } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ arcmsr_wait_firmware_ready(pACB);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
/* wait message ready */
- for (Index = 0; Index < 2000; Index++) {
- if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
- break;
- }
- udelay(10);
- } /*max 1 seconds*/
- if (Index >= 2000) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- pACB->host->host_no,
- pACB->firm_model,
- pACB->firm_version);
- pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
- pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
- pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
- pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
- pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*all interrupt service will be enable at arcmsr_iop_init*/
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
{
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- char __iomem *iop_firm_version;
- char __iomem *iop_device_map;
- u32 count;
struct MessageUnit_D *reg = acb->pmuD;
- iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
- iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
- iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
if (readl(acb->pmuD->outbound_doorbell) &
ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
acb->pmuD->outbound_doorbell);/*clear interrupt*/
}
+ arcmsr_wait_firmware_ready(acb);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
/* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
"miscellaneous data timeout\n", acb->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
+ arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
+ return true;
+}
+
+static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ mdelay(20);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", pACB->host->host_no);
+ return false;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
- /*firm_hd_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_get_config(acb);
+ break;
default:
break;
}
- if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
- acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ if (acb->host->can_queue >= acb->firm_numbers_queue)
+ acb->host->can_queue = acb->maxOutstanding;
else
- acb->maxOutstanding = acb->firm_numbers_queue - 1;
- acb->host->can_queue = acb->maxOutstanding;
+ acb->maxOutstanding = acb->host->can_queue;
+ acb->maxFreeCCB = acb->host->can_queue;
+ if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
+ acb->maxFreeCCB += 64;
return rtn;
}
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
return rtn;
}
+static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
+ uint16_t cmdSMID;
+ unsigned long flags;
+ int rtn;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ polling_hbaC_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
+ doneq_index) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaC_ccb_retry;
+ }
+ }
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ acb->doneq_index = doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ pCCB = acb->pccb_pool[cmdSMID];
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check if command done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ writel(doneq_index, &reg->reply_post_consumer_index);
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
+static void arcmsr_set_iop_datetime(struct timer_list *t)
+{
+ struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ unsigned int next_time;
+ struct tm tm;
+
+ union {
+ struct {
+ uint16_t signature;
+ uint8_t year;
+ uint8_t month;
+ uint8_t date;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ } a;
+ struct {
+ uint32_t msg_time[2];
+ } b;
+ } datetime;
+
+ time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
+
+ datetime.a.signature = 0x55AA;
+ datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
+ datetime.a.month = tm.tm_mon;
+ datetime.a.date = tm.tm_mday;
+ datetime.a.hour = tm.tm_hour;
+ datetime.a.minute = tm.tm_min;
+ datetime.a.second = tm.tm_sec;
+
+ switch (pacb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = pacb->pmuA;
+ writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_B *reg = pacb->pmuB;
+ rwbuffer = reg->message_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = pacb->pmuC;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = pacb->pmuD;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = pacb->pmuE;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ }
+ if (sys_tz.tz_minuteswest)
+ next_time = ARCMSR_HOURS;
+ else
+ next_time = ARCMSR_MINUTES;
+ mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
+}
+
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
dma_coherent_handle = acb->dma_coherent_handle2;
break;
+ case ACB_ADAPTER_TYPE_E:
+ dma_coherent_handle = acb->dma_coherent_handle +
+ offsetof(struct CommandControlBlock, arcmsr_cdb);
+ break;
default:
dma_coherent_handle = acb->dma_coherent_handle;
break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
+ writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
+ cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout \n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- }
-}
-
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_B *reg = acb->pmuB;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_C __iomem *reg = acb->pmuC;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ break;
}
- return;
}
-static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct MessageUnit_D *reg = acb->pmuD;
-
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
- ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ (acb->acb_flags & ACB_F_BUS_RESET) ||
+ (acb->acb_flags & ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer,
jiffies + msecs_to_jiffies(6 * HZ));
} else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
msecs_to_jiffies(6 * HZ));
return;
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
- reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- }
-}
-
-static void arcmsr_request_device_map(struct timer_list *t)
-{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- switch (acb->adapter_type) {
+ switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_hbaA_request_device_map(acb);
- }
- break;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ break;
+ }
case ACB_ADAPTER_TYPE_B: {
- arcmsr_hbaB_request_device_map(acb);
- }
- break;
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ break;
+ }
case ACB_ADAPTER_TYPE_C: {
- arcmsr_hbaC_request_device_map(acb);
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ default:
+ return;
}
- break;
- case ACB_ADAPTER_TYPE_D:
- arcmsr_hbaD_request_device_map(acb);
- break;
+ acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
}
}
+static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &pmu->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout \n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_start_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_start_bgrb(acb);
+ break;
}
}
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- /*clear interrupt and message state*/
- writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ uint32_t outbound_doorbell, i;
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->iop2drv_doorbell);
+ if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ } else
+ break;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ uint32_t i, tmp;
+
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ tmp = acb->in_doorbell;
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
}
}
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if (acb->dev_id == 0x1884) {
+ struct MessageUnit_E __iomem *pmuE = acb->pmuE;
+ do {
+ count++;
+ writel(0x4, &pmuE->write_sequence_3xxx);
+ writel(0xB, &pmuE->write_sequence_3xxx);
+ writel(0x2, &pmuE->write_sequence_3xxx);
+ writel(0x7, &pmuE->write_sequence_3xxx);
+ writel(0xD, &pmuE->write_sequence_3xxx);
+ mdelay(10);
+ } while (((readl(&pmuE->host_diagnostic_3xxx) &
+ ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) {
writel(0x20, pmuD->reset_request);
} else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
msleep(1000);
return;
}
+
+static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
+{
+ bool rtn = true;
+
+ switch(acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ rtn = ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ struct MessageUnit_B *reg = acb->pmuB;
+ rtn = ((readl(reg->iop2drv_doorbell) &
+ ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:{
+ struct MessageUnit_D *reg = acb->pmuD;
+ rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
+ true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_E:{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ rtn = (readl(&reg->host_diagnostic_3xxx) &
+ ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
+ }
+ break;
+ }
+ return rtn;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
- uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
- printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
+ " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
- switch(acb->adapter_type){
- case ACB_ADAPTER_TYPE_A:{
- if (acb->acb_flags & ACB_F_BUS_RESET){
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_A __iomem *reg;
- reg = acb->pmuA;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep_again:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep_again;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_B:{
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = FAILED;
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_C:{
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_C __iomem *reg;
- reg = acb->pmuC;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_D: {
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- pr_notice("arcmsr: there is an bus reset"
- " eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags
- & ACB_F_BUS_RESET) == 0, 220 * HZ);
- if (timeout)
- return SUCCESS;
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_D *reg;
- reg = acb->pmuD;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
- nap:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(reg->sample_at_reset) & 0x80) != 0) {
- pr_err("arcmsr%d: waiting for "
- "hw bus reset return, retry=%d\n",
- acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- pr_err("arcmsr%d: waiting for hw bus"
- " reset return, "
- "RETRY TERMINATED!!\n",
- acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto nap;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- pr_err("arcmsr: scsi bus reset "
- "eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+wait_reset_done:
+ ssleep(ARCMSR_SLEEPTIME);
+ if (arcmsr_reset_in_progress(acb)) {
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_notice("arcmsr%d: waiting for hw bus reset"
+ " return, RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
}
- break;
+ retry_count++;
+ goto wait_reset_done;
}
+ arcmsr_iop_init(acb);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_notice("arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
}
return rtn;
}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
intmask_org = arcmsr_disable_outbound_ints(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
};
- *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *npciids = ARRAY_SIZE(__pciids);
*pciids = __pciids;
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
#define BFA_TRC_TS(_trcm) \
({ \
- struct timeval tv; \
+ struct timespec64 ts; \
\
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
+ ktime_get_ts64(&ts); \
+ (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
})
#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
enum bfa_aen_category aen_category;
u32 aen_type;
union bfa_aen_data_u aen_data;
- struct timeval aen_tv;
+ u64 aen_tv_sec;
+ u64 aen_tv_usec;
u32 seq_num;
u32 bfad_num;
};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
}
bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
{
struct bfa_itnim_s *itnim;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
return BFA_STATUS_IOPROFILE_OFF;
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ /* unsigned 32-bit time_t overflow here in y2106 */
itnim->ioprofile.io_profile_start_time =
bfa_io_profile_start_time(itnim->bfa);
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
- u32 io_profile_start_time;
+ time64_t io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = cpu_to_be16(ioc->clscode);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -2803,7 +2805,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_busy = BFA_FALSE;
if (status == BFA_STATUS_OK) {
- struct timeval tv;
-
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
- do_gettimeofday(&tv);
- port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
}
if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
{
- struct timeval tv;
-
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
if (port->stats_cbfn) {
port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
- struct timeval tv;
-
WARN_ON(!port);
port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
bfa_trc(port, 0);
}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
- u32 stats_reset_time;
+ time64_t stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
return 0;
}
-static u64
-bfa_get_log_time(void)
-{
- u64 system_time = 0;
- struct timeval tv;
- do_gettimeofday(&tv);
-
- /* We are interested in seconds only. */
- system_time = tv.tv_sec;
- return system_time;
-}
-
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
- pl_recp->tv = bfa_get_log_time();
+ pl_recp->tv = ktime_get_real_seconds();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
- struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
fcport->stats_dma_ready = BFA_FALSE;
/*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
union bfa_fcport_stats_u *ret;
if (complete) {
- struct timeval tv;
- if (fcport->stats_status == BFA_STATUS_OK)
- do_gettimeofday(&tv);
+ time64_t time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
- tv.tv_sec - fcport->stats_reset_time;
+ time - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
struct list_head *qe, *qen;
if (complete) {
- struct timeval tv;
-
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
/*
* D-port
*/
-#define bfa_dport_result_start(__dport, __mode) do { \
- (__dport)->result.start_time = bfa_get_log_time(); \
- (__dport)->result.status = DPORT_TEST_ST_INPRG; \
- (__dport)->result.mode = (__mode); \
- (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
- (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
- (__dport)->result.lpcnt = (__dport)->lpcnt; \
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = ktime_get_real_seconds(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
} while (0)
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
switch (dport->i2hmsg.scn.state) {
case BFI_DPORT_SCN_TESTCOMP:
- dport->result.end_time = bfa_get_log_time();
+ dport->result.end_time = ktime_get_real_seconds();
bfa_trc(dport->bfa, dport->result.end_time);
dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
case BFI_DPORT_SCN_SUBTESTSTART:
subtesttype = msg->info.teststart.type;
dport->result.subtest[subtesttype].start_time =
- bfa_get_log_time();
+ ktime_get_real_seconds();
dport->result.subtest[subtesttype].status =
DPORT_TEST_ST_INPRG;
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
struct list_head stats_pending_q;
struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
- u32 stats_reset_time; /* stats reset time stamp */
+ time64_t stats_reset_time; /* stats reset time stamp */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
- kva_elem->kva = vmalloc(kva_elem->mem_len);
+ kva_elem->kva = vzalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
- memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
- struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
return 0;
}
- port = im_port->port;
-
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2fa195adc7a..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
- struct timeval tv;
unsigned long flags;
- do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
- iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 06ce4ba2b7bc..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,16 +141,28 @@ struct bfad_im_s {
} while (0)
/* post fc_host vendor event */
-#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
- do_gettimeofday(&(_entry)->aen_tv); \
- (_entry)->bfad_num = (_drv)->inst_no; \
- (_entry)->seq_num = (_cnt); \
- (_entry)->aen_category = (_cat); \
- (_entry)->aen_type = (_evt); \
- if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
- queue_work((_drv)->im->drv_workq, \
- &(_drv)->im->aen_im_notify_work); \
-} while (0)
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+ struct bfad_s *drv, int cnt,
+ enum bfa_aen_category cat,
+ enum bfa_ioc_aen_event evt)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ /*
+ * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+ * architectures, or in 2038 if user space interprets it
+ * as 'signed'.
+ */
+ entry->aen_tv_sec = ts.tv_sec;
+ entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ entry->bfad_num = drv->inst_no;
+ entry->seq_num = cnt;
+ entry->aen_category = cat;
+ entry->aen_type = evt;
+ if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+ queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
interface->netdev->name);
goto lp_config_err;
}
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
/* Initialize the libfc library */
rc = bnx2fc_libfc_config(lport);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ printk(KERN_ERR PFX "Couldn't configure libfc\n");
goto shost_err;
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
+ mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->sq, 0, tgt->sq_mem_size);
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->cq, 0, tgt->cq_mem_size);
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
- &tgt->xferq_dma, GFP_KERNEL);
+ tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->xferq, 0, tgt->xferq_mem_size);
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
- &tgt->confq_dma, GFP_KERNEL);
+ tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
/* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->lcq, 0, tgt->lcq_mem_size);
tgt->conn_db->rq_prod = 0x8000;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
- if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_wqe->lun[0];
- /* 57710 requires LUN field to be swapped */
- nopout_wqe->lun[0] = nopout_wqe->lun[1];
- nopout_wqe->lun[1] = tmp;
- }
+ /* 57710 requires LUN field to be swapped */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
-MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
-#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0-ko"
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
- csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
- "Couldnt own Mailbox %x op:0x%x "
+ "Couldn't own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
- "Couldnt own Mailbox %x "
+ "Couldn't own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
} else {
- task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+
+struct cxlflash_backend_ops {
+ struct module *module;
+ void __iomem * (*psa_map)(void *);
+ void (*psa_unmap)(void __iomem *);
+ int (*process_element)(void *);
+ int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
+ void (*unmap_afu_irq)(void *, int, void *);
+ int (*start_context)(void *);
+ int (*stop_context)(void *);
+ int (*afu_reset)(void *);
+ void (*set_master)(void *);
+ void * (*get_context)(struct pci_dev *, void *);
+ void * (*dev_context_init)(struct pci_dev *, void *);
+ int (*release_context)(void *);
+ void (*perst_reloads_same_image)(void *, bool);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
+ int (*allocate_afu_irqs)(void *, int);
+ void (*free_afu_irqs)(void *);
+ void * (*create_afu)(struct pci_dev *);
+ struct file * (*get_fd)(void *, struct file_operations *, int *);
+ void * (*fops_get_context)(struct file *);
+ int (*start_work)(void *, u64);
+ int (*fd_mmap)(struct file *, struct vm_area_struct *);
+ int (*fd_release)(struct inode *, struct file *);
+};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
struct afu *afu;
+ const struct cxlflash_backend_ops *ops;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
int lr_port;
atomic_t scan_host_needed;
- struct cxl_afu *cxl_afu;
+ void *afu_cookie;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
* fields after this point
*/
struct afu *afu;
- struct cxl_context *ctx;
- struct cxl_ioctl_start_work work;
+ void *ctx_cookie;
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+
+#include "backend.h"
+
+/*
+ * The following routines map the cxlflash backend operations to existing CXL
+ * kernel API function and are largely simple shims that provide an abstraction
+ * for converting generic context and AFU cookies into cxl_context or cxl_afu
+ * pointers.
+ */
+
+static void __iomem *cxlflash_psa_map(void *ctx_cookie)
+{
+ return cxl_psa_map(ctx_cookie);
+}
+
+static void cxlflash_psa_unmap(void __iomem *addr)
+{
+ cxl_psa_unmap(addr);
+}
+
+static int cxlflash_process_element(void *ctx_cookie)
+{
+ return cxl_process_element(ctx_cookie);
+}
+
+static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
+}
+
+static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ cxl_unmap_afu_irq(ctx_cookie, num, cookie);
+}
+
+static int cxlflash_start_context(void *ctx_cookie)
+{
+ return cxl_start_context(ctx_cookie, 0, NULL);
+}
+
+static int cxlflash_stop_context(void *ctx_cookie)
+{
+ return cxl_stop_context(ctx_cookie);
+}
+
+static int cxlflash_afu_reset(void *ctx_cookie)
+{
+ return cxl_afu_reset(ctx_cookie);
+}
+
+static void cxlflash_set_master(void *ctx_cookie)
+{
+ cxl_set_master(ctx_cookie);
+}
+
+static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_get_context(dev);
+}
+
+static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_dev_context_init(dev);
+}
+
+static int cxlflash_release_context(void *ctx_cookie)
+{
+ return cxl_release_context(ctx_cookie);
+}
+
+static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ cxl_perst_reloads_same_image(afu_cookie, image);
+}
+
+static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
+ void *buf, size_t count)
+{
+ return cxl_read_adapter_vpd(dev, buf, count);
+}
+
+static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return cxl_allocate_afu_irqs(ctx_cookie, num);
+}
+
+static void cxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ cxl_free_afu_irqs(ctx_cookie);
+}
+
+static void *cxlflash_create_afu(struct pci_dev *dev)
+{
+ return cxl_pci_to_afu(dev);
+}
+
+static struct file *cxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ return cxl_get_fd(ctx_cookie, fops, fd);
+}
+
+static void *cxlflash_fops_get_context(struct file *file)
+{
+ return cxl_fops_get_context(file);
+}
+
+static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
+{
+ struct cxl_ioctl_start_work work = { 0 };
+
+ work.num_interrupts = irqs;
+ work.flags = CXL_START_WORK_NUM_IRQS;
+
+ return cxl_start_work(ctx_cookie, &work);
+}
+
+static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ return cxl_fd_mmap(file, vm);
+}
+
+static int cxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return cxl_fd_release(inode, file);
+}
+
+const struct cxlflash_backend_ops cxlflash_cxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = cxlflash_psa_map,
+ .psa_unmap = cxlflash_psa_unmap,
+ .process_element = cxlflash_process_element,
+ .map_afu_irq = cxlflash_map_afu_irq,
+ .unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .start_context = cxlflash_start_context,
+ .stop_context = cxlflash_stop_context,
+ .afu_reset = cxlflash_afu_reset,
+ .set_master = cxlflash_set_master,
+ .get_context = cxlflash_get_context,
+ .dev_context_init = cxlflash_dev_context_init,
+ .release_context = cxlflash_release_context,
+ .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = cxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
+ .free_afu_irqs = cxlflash_free_afu_irqs,
+ .create_afu = cxlflash_create_afu,
+ .get_fd = cxlflash_get_fd,
+ .fops_get_context = cxlflash_fops_get_context,
+ .start_work = cxlflash_start_work,
+ .fd_mmap = cxlflash_fd_mmap,
+ .fd_release = cxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
cmd->parent = afu;
cmd->hwq_index = hwq_index;
+ cmd->sa.ioasc = 0;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
+ cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
- cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(hwq->ctx);
+ cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
- WARN_ON(cxl_stop_context(hwq->ctx));
+ WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ)
- WARN_ON(cxl_release_context(hwq->ctx));
- hwq->ctx = NULL;
+ WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
+ hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
}
/**
- * start_context() - starts the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Return: A success or failure value from CXL services.
- */
-static int start_context(struct cxlflash_cfg *cfg, u32 index)
-{
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
-
- rc = cxl_start_context(hwq->ctx,
- hwq->work.work_element_descriptor,
- NULL);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
* @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
+ vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq;
+ void *cookie;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
+ cookie = hwq->ctx_cookie;
- hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
struct hwq *hwq)
{
struct device *dev = &cfg->dev->dev;
- struct cxl_context *ctx = hwq->ctx;
+ void *ctx = hwq->ctx_cookie;
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, num_irqs);
+ rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
+ "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
+ rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
+ "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq)
goto out;
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
+ "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
*/
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
- struct cxl_context *ctx;
+ void *ctx;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
- ctx = cxl_get_context(cfg->dev);
+ ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
- ctx = cxl_dev_context_init(cfg->dev);
- if (unlikely(!ctx)) {
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
}
- WARN_ON(hwq->ctx);
- hwq->ctx = ctx;
+ WARN_ON(hwq->ctx_cookie);
+ hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */
- cxl_set_master(ctx);
+ cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) {
- rc = cxl_afu_reset(ctx);
+ rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2;
}
- /* This performs the equivalent of the CXL_IOCTL_START_WORK.
- * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
- * element (pe) that is embedded in the context (ctx)
- */
- rc = start_context(cfg, index);
+ /* Finally, activate the context by starting it */
+ rc = cfg->ops->start_context(hwq->ctx_cookie);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
err2:
term_intr(cfg, level, index);
if (index != PRIMARY_HWQ)
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
err1:
- hwq->ctx = NULL;
+ hwq->ctx_cookie = NULL;
goto out;
}
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq;
int i;
- cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+ cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cxl_psa_map(hwq->ctx);
+ afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) {
- dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
+ dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->cxl_afu = cxl_pci_to_afu(pdev);
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
* init_context() - initializes a previously allocated context
* @ctxi: Previously allocated context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
+ * @ctx: Previously obtained context cookie.
* @ctxid: Previously obtained process element associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
+ * @irqs: User-specified number of interrupts.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, struct file *file,
- u32 perms)
+ void *ctx, int ctxid, struct file *file, u32 perms,
+ u64 irqs)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->irqs = irqs;
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release:
- cxl_fd_release(inode, file);
+ cfg->ops->fd_release(inode, file);
out:
dev_dbg(dev, "%s: returning\n", __func__);
return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
int rc = 0;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
- rc = cxl_fd_mmap(file, vma);
+ rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
- struct cxl_ioctl_start_work *work;
struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL;
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 irqs = attach->num_interrupts;
u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
- struct cxl_context *ctx = NULL;
+ void *ctx = NULL;
int fd = -1;
- if (attach->num_interrupts > 4) {
+ if (irqs > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, attach->num_interrupts);
+ __func__, irqs);
rc = -EINVAL;
goto out;
}
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
+ rc = cfg->ops->start_work(ctx, irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) {
- cxl_stop_context(ctx);
- cxl_release_context(ctx);
+ cfg->ops->stop_context(ctx);
+ cfg->ops->release_context(ctx);
ctx = NULL;
}
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
int fd = -1;
int ctxid = -1;
struct file *file;
- struct cxl_context *ctx;
+ void *ctx;
struct afu *afu = cfg->afu;
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_start_work(ctx, &ctxi->work);
+ rc = cfg->ops->start_work(ctx, ctxi->irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err1;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
fput(file);
put_unused_fd(fd);
err2:
- cxl_stop_context(ctx);
+ cfg->ops->stop_context(ctx);
err1:
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
- struct cxl_ioctl_start_work work;
u64 ctxid;
+ u64 irqs; /* Number of interrupts requested for context */
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
struct kref kref;
- struct cxl_context *ctx;
+ void *ctx;
struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define TPGS_SUPPORT_ALL 0xdf
#define RTPG_FMT_MASK 0x70
#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
int tpgs;
int state;
int pref;
+ int valid_states;
unsigned flags; /* used for optimizing STPG */
unsigned char transition_tmo;
unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
pg->group_id = group_id;
pg->tpgs = tpgs;
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ pg->valid_states = TPGS_SUPPORT_ALL;
if (optimize_stpg)
pg->flags |= ALUA_OPTIMIZE_STPG;
kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
- int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ int len, k, off, bufflen = ALUA_RTPG_SIZE;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) {
+ /*
+ * Some (broken) implementations have a habit of returning
+ * an error during things like firmware update etc.
+ * But if the target only supports active/optimized there's
+ * not much we can do; it's not that we can switch paths
+ * or anything.
+ * So ignore any errors to avoid spurious failures during
+ * path failover.
+ */
+ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: ignoring rtpg result %d\n",
+ ALUA_DH_NAME, retval);
+ kfree(buff);
+ return SCSI_DH_OK;
+ }
if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_unlock();
}
if (tmp_pg == pg)
- valid_states = desc[1];
+ tmp_pg->valid_states = desc[1];
spin_unlock_irqrestore(&tmp_pg->lock, flags);
}
kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg->pref ? "preferred" : "non-preferred",
- valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
- * Or Open fc_trace_enable file for fc_trace
- * @inode: The inode pointer.
- * @file: The file pointer to attach the trace enable/disable flag.
- *
- * Description:
- * This routine opens a debugsfs file trace_enable or fc_trace_enable.
- *
- * Returns:
- * This function returns zero if successful.
- */
-static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
+ .open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
- getnstimeofday(&stats->stats_timestamps.last_reset_time);
+ ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kmalloc(sizeof(*vlan),
- GFP_ATOMIC);
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
- memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p"
+ "hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
#define _FNIC_STATS_H_
struct stats_timestamps {
- struct timespec last_reset_time;
- struct timespec last_read_time;
+ struct timespec64 last_reset_time;
+ struct timespec64 last_read_time;
};
struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
- struct timespec val;
+ struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
{
int len = 0;
int buf_size = debug->buf_size;
- struct timespec val1, val2;
+ struct timespec64 val1, val2;
- getnstimeofday(&val1);
+ ktime_get_real_ts64(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Current time : [%ld:%ld]\n"
- "Last stats reset time: [%ld:%ld]\n"
- "Last stats read time: [%ld:%ld]\n"
- "delta since last reset: [%ld:%ld]\n"
- "delta since last read: [%ld:%ld]\n",
- val1.tv_sec, val1.tv_nsec,
- stats->stats_timestamps.last_reset_time.tv_sec,
+ "Current time : [%lld:%ld]\n"
+ "Last stats reset time: [%lld:%09ld]\n"
+ "Last stats read time: [%lld:%ld]\n"
+ "delta since last reset: [%lld:%ld]\n"
+ "delta since last read: [%lld:%ld]\n",
+ (s64)val1.tv_sec, val1.tv_nsec,
+ (s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
- stats->stats_timestamps.last_read_time.tv_sec,
+ (s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
- jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
- jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+ jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Last ISR time: %llu (%8lu.%8lu)\n"
- "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Last ISR time: %llu (%8llu.%09lu)\n"
+ "Last ACK time: %llu (%8llu.%09lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
- val1.tv_sec, val1.tv_nsec,
+ (s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
- val2.tv_sec, val2.tv_nsec,
+ (s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
const struct hisi_sas_hw_error *sub;
};
+struct hisi_sas_rst {
+ struct hisi_hba *hisi_hba;
+ struct completion *completion;
+ struct work_struct work;
+ bool done;
+};
+
+#define HISI_SAS_RST_WORK_INIT(r, c) \
+ { .hisi_hba = hisi_hba, \
+ .completion = &c, \
+ .work = __WORK_INITIALIZER(r.work, \
+ hisi_sas_sync_rst_work_handler), \
+ .done = false, \
+ }
+
+#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
+ DECLARE_COMPLETION_ONSTACK(c); \
+ DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
+ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
+
+enum hisi_sas_bit_err_type {
+ HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
+ HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
+};
+
+enum hisi_sas_phy_event {
+ HISI_PHYE_PHY_UP = 0U,
+ HISI_PHYE_LINK_RESET,
+ HISI_PHYES_NUM,
+};
+
struct hisi_sas_phy {
+ struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*free_device)(struct hisi_hba *hisi_hba,
+ void (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
+ void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
+ int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data);
int max_command_entries;
int complete_hdr_size;
};
@@ -225,6 +259,7 @@ struct hisi_hba {
struct device *dev;
void __iomem *regs;
+ void __iomem *sgpio_regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
+extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
+ int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event);
+extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
-u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
- switch (cmd) {
+ switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
default:
+ {
+ if (fis->command == ATA_CMD_SET_MAX) {
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+ }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
}
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
if (n_elem)
- dma_unmap_sg(dev, task->scatter, n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
prep_out:
return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
}
}
+ dev_info(dev, "dev[%d:%x] found\n",
+ sas_dev->device_id, sas_dev->dev_type);
+
return 0;
}
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
static void hisi_sas_phyup_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
- container_of(work, struct hisi_sas_phy, phyup_ws);
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
+static void hisi_sas_linkreset_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+}
+
+static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
+ [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
+ [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+};
+
+bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (WARN_ON(event >= HISI_PHYES_NUM))
+ return false;
+
+ return queue_work(hisi_hba->wq, &phy->works[event]);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
phy->hisi_hba = hisi_hba;
phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
sas_phy->lldd_phy = phy;
- INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
+ for (i = 0; i < HISI_PHYES_NUM; i++)
+ INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
+void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
struct hisi_sas_device *sas_dev;
struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
hisi_sas_release_task(hisi_hba, device);
}
}
+EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- dev_info(dev, "found dev[%d:%x] is gone\n",
+ dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
- hisi_sas_internal_task_abort(hisi_hba, device,
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- hisi_sas_dereg_device(hisi_hba, device);
+ hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
- device->lldd_dev = NULL;
- memset(sas_dev, 0, sizeof(*sas_dev));
+ hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ device->lldd_dev = NULL;
+ }
+
+ if (hisi_hba->hw->free_device)
+ hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
}
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- dev_err(dev, "abort tmf: TMF task timeout\n");
+ dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot)
slot->task = NULL;
goto ex_err;
- }
+ } else
+ dev_err(dev, "abort tmf: TMF task timeout\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
-static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
- struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
- struct hisi_sas_device *sas_dev;
- struct domain_device *device;
+ u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
int i;
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
- sas_dev = &hisi_hba->devices[i];
- device = sas_dev->sas_device;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+ struct asd_sas_port *sas_port;
+ struct hisi_sas_port *port;
+ struct hisi_sas_phy *phy = NULL;
+ struct asd_sas_phy *sas_phy;
+
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
- || !device || (device->port != sas_port))
+ || !device || !device->port)
continue;
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
+ sas_port = device->port;
+ port = to_hisi_sas_port(sas_port);
+
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
+ if (state & BIT(sas_phy->id)) {
+ phy = sas_phy->lldd_phy;
+ break;
+ }
+
+ if (phy) {
+ port->id = phy->port_id;
- /* Update linkrate of directly attached device. */
- if (!device->parent)
- device->linkrate = linkrate;
+ /* Update linkrate of directly attached device. */
+ if (!device->parent)
+ device->linkrate = phy->sas_phy.linkrate;
- hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ } else
+ port->id = 0xff;
}
}
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
bool do_port_check = !!(_sas_port != sas_port);
if (!sas_phy->phy->enabled)
continue;
/* Report PHY state change to libsas */
- if (state & (1 << phy_no)) {
- if (do_port_check && sas_port) {
+ if (state & BIT(phy_no)) {
+ if (do_port_check && sas_port && sas_port->port_dev) {
struct domain_device *dev = sas_port->port_dev;
_sas_port = sas_port;
- port->id = phy->port_id;
- hisi_sas_refresh_port_id(hisi_hba,
- sas_port, sas_phy->linkrate);
if (DEV_IS_EXPANDER(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
- drain_workqueue(hisi_hba->shost->work_q);
}
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
- dev_dbg(dev, "controller resetting...\n");
+ dev_info(dev, "controller resetting...\n");
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (rc) {
dev_warn(dev, "controller reset failed (%d)\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
goto out;
}
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
- drain_workqueue(hisi_hba->wq);
- drain_workqueue(shost->work_q);
+ hisi_sas_refresh_port_id(hisi_hba);
+ scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_dbg(dev, "controller reset complete\n");
+ dev_info(dev, "controller reset complete\n");
out:
- scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
+ if (rc2 < 0) {
+ dev_err(dev, "abort task: internal abort (%d)\n", rc2);
+ return TMF_RESP_FUNC_FAILED;
+ }
+
/*
* If the TMF finds that the IO is not in the device and also
* the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task: internal abort failed\n");
+ goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
+ if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
+ task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
return rc;
}
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- unsigned long flags;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
- hisi_sas_internal_task_abort(hisi_hba, device,
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_task(hisi_hba, device);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then hardreset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- if (rc == TMF_RESP_FUNC_FAILED)
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- return hisi_sas_controller_reset(hisi_hba);
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return TMF_RESP_FUNC_COMPLETE;
+
+ return TMF_RESP_FUNC_FAILED;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int res;
+ /*
+ * The interface is not realized means this HW don't support internal
+ * abort, or don't need to do internal abort. Then here, we return
+ * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
+ * the internal abort has been executed and returned CQ.
+ */
if (!hisi_hba->hw->prep_abort)
- return -EOPNOTSUPP;
+ return TMF_RESP_FUNC_FAILED;
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
- dev_err(dev, "internal task abort: timeout.\n");
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ res = -EIO;
goto exit;
- }
+ } else
+ dev_err(dev, "internal task abort: timeout.\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
+static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
+{
+}
+
+static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (!hisi_hba->hw->write_gpio)
+ return -EOPNOTSUPP;
+
+ return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
+ reg_index, reg_count, write_data);
+}
+
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
+static struct device_attribute *host_attrs[] = {
+ &dev_attr_phy_event_threshold,
+ NULL,
+};
+
static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
};
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
+ .lldd_port_deformed = hisi_sas_port_deformed,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
cq->hisi_hba = hisi_hba;
/* Delivery queue structure */
+ spin_lock_init(&dq->lock);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
+void hisi_sas_sync_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_sas_rst *rst =
+ container_of(work, struct hisi_sas_rst, work);
+
+ if (!hisi_sas_controller_reset(rst->hisi_hba))
+ rst->done = true;
+ complete(rst->completion);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
+
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hisi_hba->sgpio_regs))
+ goto err_out;
+ }
+
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void free_device_v1_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
.sl_notify = sl_notify_v1_hw,
- .free_device = free_device_v1_hw,
+ .clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
.get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
sas_dev->completion = &completion;
- /* SoC bug workaround */
- if (dev_is_sata(sas_dev->sas_device))
- clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
-
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
}
}
+static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
+{
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+}
+
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
{
int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
/* Analyse error happens on which phase TX or RX */
if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
else if (ERR_ON_RX_PHASE(err_phase))
slot_err_v2_hw(hisi_hba, task, slot, 2);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (!timer_pending(&hisi_hba->timer))
set_link_timer_quirk(hisi_hba);
}
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
}
+static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & (1 << phy_no)) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
-
- if (irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error!\
- (0x%x)",
- dev_name(dev), irq_value1);
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (1 << phy_no)) && irq_value1) {
+ int i;
- if (irq_value2)
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
+ for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_ecc_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
- if (irq_value0) {
- if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
- phy_bcast_v2_hw(phy_no, hisi_hba);
+ if ((irq_msk & (1 << phy_no)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_HOTPLUG_TOUT_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
}
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+ }
+
+ if ((irq_msk & (1 << phy_no)) && irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
}
irq_msk &= ~(1 << phy_no);
phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
for (; sub->msk || sub->msg; sub++) {
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.device_type = SAS_SATA_DEV;
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
udelay(10);
if (cnt++ > 10) {
- dev_info(dev, "wait axi bus state to idle timeout!\n");
+ dev_err(dev, "wait axi bus state to idle timeout!\n");
return -1;
}
}
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ int phy_no, count;
+
+ if (!hisi_hba->sgpio_regs)
+ return -EOPNOTSUPP;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ count = reg_count * 4;
+ count = min(count, hisi_hba->n_phy);
+
+ for (phy_no = 0; phy_no < count; phy_no++) {
+ /*
+ * GPIO_TX[n] register has the highest numbered drive
+ * of the four in the first byte and the lowest
+ * numbered drive in the fourth byte.
+ * See SFF-8485 Rev. 0.7 Table 24.
+ */
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ reg_index * 4 + phy_no;
+ int data_idx = phy_no + 3 - (phy_no % 4) * 2;
+
+ writeb(write_data[data_idx], reg_addr);
+ }
+
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
+ .write_gpio = write_gpio_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+/* RAS registers need init */
+#define RAS_BASE (0x6000)
+#define SAS_RAS_INTR0 (RAS_BASE)
+#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
+#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
+#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
upper_32_bits(hisi_hba->initial_fis_dma));
+
+ /* RAS registers init */
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
phy->phy_attached = 1;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (!(irq_value1 & error->irq_msk))
continue;
- dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
error->msg, phy_no, irq_value1);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1, irq_value1);
}
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+
+ }
+
+ if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba,
+ phy_no, STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+ }
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
/* check for erroneous completion */
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
+
slot_err_v3_hw(hisi_hba, task, slot);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
- struct hisi_sas_itct *itct;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
complete_hdr = &complete_queue[rd_point];
- /* Check for NCQ completion */
- if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
- int ncq_tag_count = ffs(act_tmp);
-
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
- CMPLT_HDR_DEV_ID_OFF;
- itct = &hisi_hba->itct[dev_id];
-
- /* The NCQ tags are held in the itct header */
- while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
-
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
-
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
-
- act_tmp &= ~(1 << ncq_tag_count);
- ncq_tag_count = ffs(act_tmp);
- }
- } else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
- }
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
- .free_device = free_device_v3_hw,
+ .clear_itct = clear_itct_v3_hw,
.sl_notify = sl_notify_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
+static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
+ { .irq_msk = BIT(19), .msg = "HILINK_INT" },
+ { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
+ { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
+ { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
+ { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
+};
+
+static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
+ { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
+ { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
+ { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
+ { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
+ { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
+ { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
+ { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
+ { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
+ { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
+ { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
+ { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
+ { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
+ { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
+};
+
+static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ras_error;
+ bool need_reset = false;
+ u32 irq_value;
+ int i;
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
+ ras_error = &sas_ras_intr0_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
+ ras_error = &sas_ras_intr1_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+
+ return need_reset;
+}
+
+static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (process_non_fatal_error_v3_hw(hisi_hba))
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+
+ dev_info(dev, "PCI error: slot reset callback!!\n");
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
enum {
/* instances of the controller */
hip08,
};
+static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 device_state, status;
+ int rc;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!pdev->pm_cap) {
+ dev_err(dev, "PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ flush_workqueue(hisi_hba->wq);
+ /* disable DQ/PHY/bus */
+ interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
+
+ hisi_sas_stop_phys(hisi_hba);
+
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+
+ /* wait until bus idle */
+ rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ if (rc) {
+ dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ device_state = pci_choose_state(pdev, state);
+ dev_warn(dev, "entering operating state [D%d]\n",
+ device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_suspend_ha(sha);
+ return 0;
+}
+
+static int hisi_sas_v3_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ unsigned int rc;
+ u32 device_state = pdev->current_state;
+
+ dev_warn(dev, "resuming from operating state [D%d]\n",
+ device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ dev_err(dev, "enable device failed during resume (%d)\n", rc);
+
+ pci_set_master(pdev);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ sas_prep_resume_ha(sha);
+ init_reg_v3_hw(hisi_hba);
+ hisi_hba->hw->phys_init(hisi_hba);
+ sas_resume_ha(sha);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
+ return 0;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
};
+static const struct pci_error_handlers hisi_sas_err_handler = {
+ .error_detected = hisi_sas_error_detected_v3_hw,
+ .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
+ .slot_reset = hisi_sas_slot_reset_v3_hw,
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
+ .suspend = hisi_sas_v3_suspend,
+ .resume = hisi_sas_v3_resume,
+ .err_handler = &hisi_sas_err_handler,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..5293e6827ce5 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -901,14 +901,14 @@ static ssize_t host_show_legacy_board(struct device *dev,
return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
}
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(lunid);
+static DEVICE_ATTR_RO(unique_id);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
+static DEVICE_ATTR_RO(sas_address);
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
host_show_hp_ssd_smart_path_enabled, NULL);
-static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
+static DEVICE_ATTR_RO(path_info);
static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
host_show_hp_ssd_smart_path_status,
host_store_hp_ssd_smart_path_status);
@@ -3518,7 +3518,7 @@ out:
if (rc != IO_OK)
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
- "Error, could not get enclosure information\n");
+ "Error, could not get enclosure information");
}
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
return 0;
}
-#define BUFLEN 128
static inline void warn_zero_length_transfer(struct ctlr_info *h,
u8 *cdb, int cdb_len,
const char *func)
{
- char buf[BUFLEN];
- int outlen;
- int i;
-
- outlen = scnprintf(buf, BUFLEN,
- "%s: Blocking zero-length request: CDB:", func);
- for (i = 0; i < cdb_len; i++)
- outlen += scnprintf(buf+outlen, BUFLEN - outlen,
- "%02hhx", cdb[i]);
- dev_warn(&h->pdev->dev, "%s\n", buf);
+ dev_warn(&h->pdev->dev,
+ "%s: Blocking zero-length request: CDB:%*phN\n",
+ func, cdb_len, cdb);
}
#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
if (!device)
continue;
- if (!device->scsi3addr)
- continue;
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
HPSA_VPD_LV_IOACCEL_STATUS))
continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
break;
default:
break;
- };
+ }
}
/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
}
/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
cpu_to_be64(buffer[MSG_HI]),
cpu_to_be64(buffer[MSG_LOW]));
- pr_debug("connection_broken: rc %ld\n", h_return_code);
+ dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
if (h_return_code == H_CLOSED)
rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
}
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
- pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+ dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
return rc;
}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
ibmvscsis_delete_client_info(vscsi, false);
}
- pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
return rc;
}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->flags |= DISCONNECT_SCHEDULED;
vscsi->flags &= ~SCHEDULE_DISCONNECT;
- pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/*
* check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
}
if (wait_idle) {
- pr_debug("disconnect start wait, active %d, sched %d\n",
- (int)list_empty(&vscsi->active_q),
- (int)list_empty(&vscsi->schedule_q));
+ dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
if (!list_empty(&vscsi->active_q) ||
!list_empty(&vscsi->schedule_q)) {
vscsi->flags |= WAIT_FOR_IDLE;
- pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
+ vscsi->flags);
/*
* This routine is can not be called with the interrupt
* lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
wait_for_completion(&vscsi->wait_idle);
spin_lock_bh(&vscsi->intr_lock);
}
- pr_debug("disconnect stop wait\n");
+ dev_dbg(&vscsi->dev, "disconnect stop wait\n");
ibmvscsis_adapter_idle(vscsi);
}
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
vscsi->flags |= flag_bits;
- pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
- new_state, flag_bits, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
}
- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
- vscsi->flags, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
}
/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
break;
case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
rc = 0;
break;
}
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
long rc = ADAPT_SUCCESS;
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+ dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (rc == H_SUCCESS) {
vscsi->client_data.partition_number =
be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
} else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
rc = ADAPT_SUCCESS;
}
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
if (rc == H_SUCCESS)
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
else if (rc != H_NOT_FOUND)
- pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+ dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
+ rc);
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
rc = vio_enable_interrupts(vscsi->dma_dev);
if (rc) {
- pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
return rc;
}
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
int bytes;
long rc = ADAPT_SUCCESS;
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
/* don't reset, the client did it for us */
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
}
if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
+ rc);
vscsi->state = ERR_DISCONNECTED;
vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
/* See if there is a Resume event in the queue */
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
- pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
- vscsi->flags, vscsi->state, (int)crq->valid);
+ dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
0, 0);
if (rc) {
- pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
+ rc);
rc = 0;
}
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
(crq->format != RESUME_FROM_SUSP)))
- pr_err("Invalid element in CRQ after Prepare for Suspend");
+ dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
}
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
{
long rc = ADAPT_SUCCESS;
- pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
- (int)crq->format, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
switch (crq->format) {
case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
!list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp) ||
!list_empty(&vscsi->active_q)) {
- pr_debug("debit %d, sched %d, wait %d, active %d\n",
- vscsi->debit,
- (int)list_empty(&vscsi->schedule_q),
- (int)list_empty(&vscsi->waiting_rsp),
- (int)list_empty(&vscsi->active_q));
- pr_warn("connection lost with outstanding work\n");
+ dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
} else {
- pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
}
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
break;
case PREPARE_FOR_SUSPEND:
- pr_debug("Prep for Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
switch (vscsi->state) {
case ERR_DISCONNECTED:
case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
- vscsi->state);
+ dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
break;
}
break;
case RESUME_FROM_SUSP:
- pr_debug("Resume from Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
} else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
rc = vscsi->flags & SCHEDULE_DISCONNECT;
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
- vscsi->flags, vscsi->state, rc);
+ dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
return rc;
}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
bool ack = true;
volatile u8 valid;
- pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
rc = vscsi->flags & SCHEDULE_DISCONNECT;
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
* if a tranport event has occurred leave
* everything but transport events on the queue
*/
- pr_debug("poll_cmd_q, ignoring\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
/*
* need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("poll_cmd_q, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
goto poll_work;
}
- pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
}
/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
{
struct ibmvscsis_cmd *cmd, *nxt;
- pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
- (int)list_empty(&vscsi->waiting_rsp),
- vscsi->rsp_q_timer.started);
+ dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
int free_qs = false;
long rc = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/* Only need to free qs if we're disconnecting from client */
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
break;
case ERR_DISCONNECT_RECONNECT:
ibmvscsis_reset_queue(vscsi);
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
+ vscsi->flags);
break;
case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->state = ERR_DISCONNECTED;
else
vscsi->state = WAIT_ENABLED;
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->flags &= ~DISCONNECT_SCHEDULED;
}
- pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
ibmvscsis_poll_cmd_q(vscsi);
break;
case ERR_DISCONNECTED:
vscsi->flags &= ~DISCONNECT_SCHEDULED;
- pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->phyp_acr_state = 0;
vscsi->phyp_acr_flags = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
- pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
}
/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
cmd->init_time = mftb();
iue->remote_token = crq->IU_data_ptr;
iue->iu_len = len;
- pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
- be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
break;
case H_PERMISSION:
if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
}
- pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
- rc);
- pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
- be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
flag_bits);
goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
free_dma:
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
- pr_debug("Leaving adapter_info, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
return rc;
}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
*/
min_len = offsetof(struct capabilities, migration);
if ((olen < min_len) || (olen > PAGE_SIZE)) {
- pr_warn("cap_mad: invalid len %d\n", olen);
+ dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
return 0;
}
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
common = (struct mad_capability_common *)&cap->migration;
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
- pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
- len, be32_to_cpu(common->cap_type),
- be16_to_cpu(common->length));
+ dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
cap_len = be16_to_cpu(common->length);
if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
switch (common->cap_type) {
default:
- pr_debug("cap_mad: unsupported capability\n");
+ dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
common->server_support = 0;
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer));
if (rc != H_SUCCESS) {
- pr_debug("cap_mad: failed to copy to client, rc %ld\n",
- rc);
+ dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
CLIENT_FAILED);
}
- pr_warn("cap_mad: error copying data to client, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
+ rc);
ibmvscsis_post_disconnect(vscsi,
ERR_DISCONNECT_RECONNECT,
flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
- pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
- rc, vscsi->client_cap);
+ dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
return rc;
}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
vscsi->fast_fail = true;
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
} else {
- pr_warn("fast fail mad sent after login\n");
+ dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
}
break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
(vscsi->state == SRP_PROCESSING)) {
- pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
- vscsi->flags, (int)vscsi->rsp_q_timer.started,
- vscsi->rsp_q_timer.timer_pops);
+ dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
/*
* Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag),
+ rc);
/* if all ok free up the command
* element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
} else {
- pr_debug("Error sending mad response, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
* expecting a response.
*/
case WAIT_CONNECTION:
- pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
- vscsi->flags);
+ dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
return ADAPT_SUCCESS;
case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (!rc) {
mad = (struct mad_common *)&vio_iu(iue)->mad;
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+ dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
rc = ibmvscsis_process_mad(vscsi, iue);
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
- rc);
+ dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
+ be16_to_cpu(mad->status), rc);
if (!rc)
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving mad, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
return rc;
}
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
{
char *name = tport->tport_name;
struct ibmvscsis_nexus *nexus;
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
int rc;
if (tport->ibmv_nexus) {
- pr_debug("tport->ibmv_nexus already exists\n");
+ dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
return 0;
}
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!nexus) {
- pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
return -ENOMEM;
}
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
cmd->rsp.format = VIOSRP_SRP_FORMAT;
cmd->rsp.tag = req->tag;
- pr_debug("srp_login: reason 0x%x\n", reason);
+ dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
if (reason)
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving srp_login, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
return rc;
}
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
case SRP_TSK_MGMT:
tsk = &vio_iu(iue)->srp.tsk_mgmt;
- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
- tsk->tag);
+ dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
+ tsk->tag, tsk->tag);
cmd->rsp.tag = tsk->tag;
vscsi->debit += 1;
cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
break;
case SRP_CMD:
- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
- srp->tag);
+ dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
+ srp->tag, srp->tag);
cmd->rsp.tag = srp->tag;
vscsi->debit += 1;
cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
"ibm,my-dma-window",
NULL);
if (!dma_window) {
- pr_err("Couldn't find ibm,my-dma-window property\n");
+ dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
srp_tsk->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_tmr, func %d\n",
- srp_tsk->tsk_mgmt_func);
+ dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk,
tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
if (cmd->type == SCSI_CDB) {
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
if (rsp->status) {
- pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
- (int)rsp->status);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
+ cmd, (int)rsp->status);
ibmvscsis_determine_resid(se_cmd, rsp);
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else if (cmd->flags & CMD_FAST_FAIL) {
- pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
+ cmd);
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
spin_lock_bh(&vscsi->intr_lock);
- pr_debug("got interrupt\n");
+ dev_dbg(&vscsi->dev, "got interrupt\n");
/*
* if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
- pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
return;
}
@@ -3414,20 +3424,20 @@ cmd_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("handle_crq, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
if (valid)
goto cmd_work;
} else {
- pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
}
- pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
- (int)list_empty(&vscsi->schedule_q), vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
rc = -ENOMEM;
- pr_err("probe: allocation of adapter failed\n");
+ dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
return rc;
}
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
dev_name(&vdev->dev));
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+ dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
rc = read_dma_window(vscsi);
if (rc)
goto free_adapter;
- pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
- vscsi->dds.window[LOCAL].liobn,
- vscsi->dds.window[REMOTE].liobn);
+ dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
* client can connect" and the client isn't activated yet.
* We'll make the call again when he sends an init msg.
*/
- pr_debug("probe hrc %ld, client partition num %d\n",
- hrc, vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
(unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+ dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
spin_lock_bh(&vscsi->intr_lock);
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
* attempt an srp_transfer_data.
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
- pr_err("write_pending failed since: %d\n", vscsi->flags);
+ dev_err(&vscsi->dev, "write_pending failed since: %d\n",
+ vscsi->flags);
return -EIO;
+
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
- pr_err("srp_transfer_data() failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
return -EIO;
}
/*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
- pr_err("srp_transfer_data failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
struct scsi_info *vscsi = cmd->adapter;
uint len;
- pr_debug("queue_status %p\n", se_cmd);
+ dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
- pr_debug("queue_tm_rsp %p, status %d\n",
- se_cmd, (int)se_cmd->se_tmr_req->response);
+ dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
- se_cmd, se_cmd->tag);
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
const char *name)
{
struct ibmvscsis_tport *tport;
+ struct scsi_info *vscsi;
tport = ibmvscsis_lookup_port(name);
if (tport) {
+ vscsi = container_of(tport, struct scsi_info, tport);
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
- pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
- name, tport, tport->tport_proto_id);
+ dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
return &tport->tport_wwn;
}
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
struct ibmvscsis_tport *tport = container_of(wwn,
struct ibmvscsis_tport,
tport_wwn);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- pr_debug("drop_tport(%s)\n",
- config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+ dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
- pr_err("enable_change_state failed, rc %ld state %d\n",
- lrc, vscsi->state);
+ dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+ dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
+ vscsi->state);
return count;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (i == 0) {
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
ioa_cfg->hrrq[i].min_cmd_id = 0;
- ioa_cfg->hrrq[i].max_cmd_id =
- (entries_each_hrrq - 1);
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
} else {
entries_each_hrrq =
IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
/**
* iscsi_sw_tcp_xmit - TCP transmit
+ * @conn: iscsi connection
**/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
@@ -357,6 +358,7 @@ error:
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2082,7 +2104,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
* @iscsit: iscsi transport template
* @shost: scsi host
* @cmds_max: session can queue
+ * @dd_size: private driver data size, added to session allocation size
* @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
+ * @id: target ID to add to this session
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
/**
* iscsi_conn_teardown - teardown iscsi connection
- * cls_conn: iscsi class connection
+ * @cls_conn: iscsi class connection
*
* TODO: we may need to make this into a two step process
* like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/**
* iscsi_tcp_hdr_recv_done - process PDU header
+ * @tcp_conn: iSCSI TCP connection
+ * @segment: the buffer segment being processed
*
* This is the callback invoked when the PDU header has
* been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
* @conn: iscsi connection
* @skb: network buffer with header and/or data segment
* @offset: offset in skb
- * @offload: bool indicating if transfer was offloaded
+ * @offloaded: bool indicating if transfer was offloaded
+ * @status: iscsi TCP status result
*
- * Will return status of transfer in status. And will return
+ * Will return status of transfer in @status. And will return
* number of bytes copied.
*/
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
/**
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
* @task: scsi command task
- * @sc: scsi command
*/
int iscsi_tcp_task_init(struct iscsi_task *task)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
}
}
-static void sas_probe_devices(struct work_struct *work)
+static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
sas_put_device(dev);
}
-static void sas_destruct_devices(struct work_struct *work)
+void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
}
}
+static void sas_destruct_ports(struct asd_sas_port *port)
+{
+ struct sas_port *sas_port, *p;
+
+ list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
+ list_del_init(&sas_port->del_list);
+ sas_port_delete(sas_port);
+ }
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
- sas_discover_event(dev->port, DISCE_DESTRUCT);
}
}
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
port->port_dev = NULL;
}
+ sas_probe_devices(port);
+
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
+
+ sas_destruct_devices(port);
+ sas_destruct_ports(port);
+ sas_probe_devices(port);
}
/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
- scsi_queue_work(ha->core.shost, &sw->work);
+ queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
- [DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
- [DISCE_DESTRUCT] = sas_destruct_devices,
};
disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- int rc = 0;
+ /* it's added to the defer_q when draining so return succeed */
+ int rc = 1;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
- rc = scsi_queue_work(ha->core.shost, &sw->work);
+ rc = queue_work(ha->event_q, &sw->work);
return rc;
}
-static int sas_queue_event(int event, unsigned long *pending,
- struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
- int rc = 0;
+ unsigned long flags;
+ int rc;
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->lock, flags);
- rc = sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->lock, flags);
- }
+ spin_lock_irqsave(&ha->lock, flags);
+ rc = sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
- struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
+ int ret;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
- drain_workqueue(wq);
+ drain_workqueue(ha->event_q);
+ drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- sas_queue_work(ha, sw);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1)
+ sas_free_event(to_asd_sas_event(&sw->work));
+
}
spin_unlock_irq(&ha->lock);
}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
+ struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
- sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ if (list_empty(&port->phy_list))
+ continue;
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+ ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
}
mutex_unlock(&ha->disco_mutex);
}
+
+static void sas_port_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_port_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
+static void sas_phy_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_phy_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- return sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- return sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3183d63de4da..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
+ list_add_tail(&phy->port->del_list,
+ &parent->port->sas_port_del_list);
phy->port = NULL;
}
}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- while (res == 0 && dev) {
+ if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
-
- dev = NULL;
- res = sas_find_bcast_dev(port_dev, &dev);
}
return res;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
#include "../scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
+ char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+ sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
+
error = sas_register_phys(sas_ha);
if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+ if (!sas_ha->event_q)
+ goto Undo_ports;
+
+ snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
+ sas_ha->disco_q = create_singlethread_workqueue(name);
+ if (!sas_ha->disco_q)
+ goto Undo_event_q;
+
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
+
+Undo_event_q:
+ destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+ destroy_workqueue(sas_ha->disco_q);
+ destroy_workqueue(sas_ha->event_q);
+
return 0;
}
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->port_events_pending = 0;
- phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
.smp_handler = sas_smp_handler,
};
+static inline ssize_t phy_event_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
+}
+
+static inline ssize_t phy_event_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ sha->event_thres = simple_strtol(buf, NULL, 10);
+
+ /* threshold cannot be set too small */
+ if (sha->event_thres < 32)
+ sha->event_thres = 32;
+
+ return count;
+}
+
+DEVICE_ATTR(phy_event_threshold,
+ S_IRUGO|S_IWUSR,
+ phy_event_threshold_show,
+ phy_event_threshold_store);
+EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
+
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+{
+ struct asd_sas_event *event;
+ gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ event = kmem_cache_zalloc(sas_event_cache, flags);
+ if (!event)
+ return NULL;
+
+ atomic_inc(&phy->event_nr);
+
+ if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
+ if (i->dft->lldd_control_phy) {
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ sas_printk("The phy%02d bursting events, shut it down.\n",
+ phy->id);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+ WARN_ONCE(1, "PHY control not supported.\n");
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+ event = NULL;
+ }
+ }
+
+ return event;
+}
+
+void sas_free_event(struct asd_sas_event *event)
+{
+ struct asd_sas_phy *phy = event->phy;
+
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+}
+
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
- return -ENOMEM;
+ goto out;
+
+ sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
+ if (!sas_event_cache)
+ goto free_task_kmem;
return 0;
+free_task_kmem:
+ kmem_cache_destroy(sas_task_cache);
+out:
+ return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
+ kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+void sas_free_event(struct asd_sas_event *event);
+
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
+void sas_destruct_devices(struct asd_sas_port *port);
+
+extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
+extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
}
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
-
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
-
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
-
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
}
+static void sas_phye_shutdown(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (phy->enabled) {
+ int ret;
+
+ phy->error = 0;
+ phy->enabled = 0;
+ ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ if (ret)
+ sas_printk("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
+ } else
+ sas_printk("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
+}
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
- static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
- [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
- [PHYE_OOB_DONE] = sas_phye_oob_done,
- [PHYE_OOB_ERROR] = sas_phye_oob_error,
- [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
- [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
-
- };
-
- static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
- [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
- [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
- [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
- [PORTE_TIMER_EVENT] = sas_porte_timer_event,
- [PORTE_HARD_RESET] = sas_porte_hard_reset,
- };
-
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
- int k;
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
+ atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
- phy->port_events[k].phy = phy;
- }
-
- for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
- phy->phy_events[k].phy = phy;
- }
phy->port = NULL;
phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
return 0;
}
+
+const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+ [PHYE_SHUTDOWN] = sas_phye_shutdown,
+};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
+ sas_destruct_devices(port);
continue;
}
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ flush_workqueue(sas_ha->disco_q);
}
/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
+ sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
-
sas_form_port(phy);
}
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
-
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+
+ if (phy->port)
+ flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
+ INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
+
+const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b728c57..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "sas_internal.h"
@@ -486,15 +487,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
- int res;
+ int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
+ unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
- res = i->dft->lldd_abort_task(task);
+ spin_lock_irqsave(host->host_lock, flags);
+ /* We cannot do async aborts for SATA devices */
+ if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return FAILED;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (task)
+ res = i->dft->lldd_abort_task(task);
+ else
+ SAS_DPRINTK("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
@@ -946,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
sas_put_device(found_dev);
}
-static void sas_parse_addr(u8 *sas_addr, const char *p)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++) {
- u8 h, l;
- if (!*p)
- break;
- h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- sas_addr[i] = (h<<4) | l;
- }
-}
-
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -977,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
goto out;
}
- sas_parse_addr(addr, fw->data);
+ res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
+ if (res)
+ goto out;
out:
release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
+#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
+ uint32_t get_nvme_bufs;
+ uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..ac77081e6e9e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
+ lport = (struct lpfc_nvme_lport *)localport->private;
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016x Cmpl %016x\n",
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
+ atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ " noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " Cmpl %016llx Outstanding %016llx Abort %08x\n",
+ tot, ((data1 + data2 + data3) - tot),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
return len;
}
@@ -2260,8 +2294,8 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
-static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
-static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
+static DEVICE_ATTR_RO(lpfc_drvr_version);
+static DEVICE_ATTR_RO(lpfc_enable_fip);
static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -2272,12 +2306,11 @@ static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
-static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
-static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
-static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
-static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
- lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR_RO(lpfc_temp_sensor);
+static DEVICE_ATTR_RO(lpfc_fips_level);
+static DEVICE_ATTR_RO(lpfc_fips_rev);
+static DEVICE_ATTR_RO(lpfc_dss);
+static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
@@ -2385,8 +2418,7 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
- lpfc_soft_wwn_enable_store);
+static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
/**
* lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
@@ -2485,8 +2517,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
- lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+static DEVICE_ATTR_RW(lpfc_soft_wwpn);
/**
* lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
@@ -2549,8 +2580,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
- lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
@@ -3068,8 +3098,7 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 1 - poll with interrupts enabled"
" 3 - poll and disable FCP ring interrupts");
-static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
- lpfc_poll_show, lpfc_poll_store);
+static DEVICE_ATTR_RW(lpfc_poll);
int lpfc_no_hba_reset_cnt;
unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
@@ -3302,8 +3331,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
lpfc_vport_param_store(nodev_tmo)
-static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
- lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
+static DEVICE_ATTR_RW(lpfc_nodev_tmo);
/*
# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
@@ -3352,8 +3380,7 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
}
lpfc_vport_param_store(devloss_tmo)
-static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
- lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
+static DEVICE_ATTR_RW(lpfc_devloss_tmo);
/*
* lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
@@ -3366,12 +3393,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
- 1, 1, 16,
+ LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
@@ -3545,8 +3573,7 @@ lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_vport_param_store(restrict_login);
-static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
- lpfc_restrict_login_show, lpfc_restrict_login_store);
+static DEVICE_ATTR_RW(lpfc_restrict_login);
/*
# Some disk devices have a "select ID" or "select Target" capability.
@@ -3660,8 +3687,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
}
lpfc_param_show(topology)
-static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
- lpfc_topology_show, lpfc_topology_store);
+static DEVICE_ATTR_RW(lpfc_topology);
/**
* lpfc_static_vport_show: Read callback function for
@@ -3691,8 +3717,7 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
/*
* Sysfs attribute to control the statistical data collection.
*/
-static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
- lpfc_static_vport_show, NULL);
+static DEVICE_ATTR_RO(lpfc_static_vport);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -3919,8 +3944,7 @@ lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
/*
* Sysfs attribute to control the statistical data collection.
*/
-static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
- lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
/*
* lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
@@ -4159,8 +4183,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
return -EINVAL;
}
-static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
- lpfc_link_speed_show, lpfc_link_speed_store);
+static DEVICE_ATTR_RW(lpfc_link_speed);
/*
# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@ -4253,8 +4276,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
return rc;
}
-static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
- lpfc_aer_support_show, lpfc_aer_support_store);
+static DEVICE_ATTR_RW(lpfc_aer_support);
/**
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
@@ -4401,8 +4423,7 @@ LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
"Enable PCIe device SR-IOV virtual fn");
lpfc_param_show(sriov_nr_virtfn)
-static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
- lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
/**
* lpfc_request_firmware_store - Request for Linux generic firmware upgrade
@@ -4576,8 +4597,7 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
return 0;
}
-static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
- lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+static DEVICE_ATTR_RW(lpfc_fcp_imax);
/*
* lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
@@ -4737,8 +4757,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
return 0;
}
-static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
- lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@@ -4824,9 +4843,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
-static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
- lpfc_max_scsicmpl_time_show,
- lpfc_max_scsicmpl_time_store);
+static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
@@ -5139,7 +5156,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
@@ -6362,6 +6379,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
+ if (!phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6389,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
+void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
+void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_flush_rscn(vport);
goto out;
}
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0151 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
len += snprintf(buf + len, size - len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, size - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
+ " Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
+
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "LS Xmt Err: Abrt %08x Err %08x "
+ "Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_abort),
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Xmt Err: noxri %06x nondlp %06x "
+ "qdepth %06x wqerr %06x Abrt %06x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+
}
return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
- if (eqidx < phba->cfg_nvmet_mrq) {
+ if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx eqd %d]\n",
+ "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "ELS RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
- qp = phba->sli4_hba.hdr_rq;
- len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
- "RQpair", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
-
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
+#define NLP_WAIT_FOR_UNREG 0x1
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ } else {
/* This side will wait for the PLOGI, decrement ndlp reference
* count indicating that ndlp can be released when other
* references to it are done.
*/
lpfc_nlp_put(ndlp);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
}
return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
- "Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE)))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x Data x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
-
-
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* Driver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+ ndlp->fc4_prli_sent--;
spin_unlock_irq(shost->host_lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
- /* Ddriver supports multiple FC4 types. Counters matter. */
- vport->fc_prli_sent--;
-
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
- ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
+ /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+ * fields here before any of them can complete.
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = 0;
+
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
-
- lpfc_nlp_put(ndlp);
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
return 0;
}
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
-
- /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
- if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- rjt_exp = LSEXP_REQ_UNSUPPORTED;
- break;
- }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_put(ndlp);
break;
case ELS_CMD_REC:
- /* receive this due to exchange closed */
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_exp = LSEXP_INVALID_OX_RX;
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
- lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0)
- /* Start devloss */
- lpfc_nvme_unregister_port(vport, ndlp);
- else
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
+ }
}
}
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
- * Initiators take the NDLP ref count in
- * the register.
+ * Only NVME Target Rports are registered with
+ * the transport.
*/
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ "ndlp %p did x%x flg x%x st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
#define LPFC_CQ_CNT_256 0x0
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
+#define LPFC_CQ_CNT_WORD7 0x3
uint32_t word1;
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
#define lpfc_cq_eq_id_2_WORD word1
- uint32_t reserved0;
+ uint32_t lpfc_cq_context_count; /* Version 2 Only */
uint32_t reserved1;
};
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
+#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
+#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
- /*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
- ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ } else {
+ error = -ENOMEM;
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
- /*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
- ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
- goto out_iounmap_ctrl;
- }
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
goto out_iounmap_all;
+ }
}
return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
@@ -12138,10 +12227,10 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
-
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
break;
}
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
return 0;
}
+static uint32_t
+lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ cmd = *payload;
+ if (vport->phba->nvmet_support) {
+ /* Must be a NVME PRLI */
+ if (cmd == ELS_CMD_PRLI)
+ goto out;
+ } else {
+ /* Initiator mode. */
+ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
+ goto out;
+ }
+ return 1;
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+ "state x%x flags x%x\n",
+ cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+}
+
static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
- if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
/* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
- "6115 NVMET ndlp rpi %d state "
- "unknown, state x%x flags x%08x\n",
- ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
+ return ndlp->nlp_state;
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
-
return ndlp->nlp_state;
}
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
- /* Check out PRLI rsp */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-
- /* NVME or FCP first burst must be negotiated for each PRLI. */
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
+ else if (ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
+
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
/* NVME initiator-based functions */
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite);
static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+static struct nvme_fc_port_template lpfc_nvme_template;
/**
* lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *qhandle;
char *str;
+ if (!pnvme_lport->private)
+ return -ENOMEM;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
+ if (!pnvme_lport->private)
+ return;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
struct lpfc_nvme_lport *lport = localport->private;
+ lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
+ "6173 localport %p delete complete\n",
+ lport);
+
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete complete %p\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
+
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
lpfc_nlp_put(ndlp);
rport_err:
- /* This call has to execute as long as the rport is valid.
- * Release any threads waiting for the unreg to complete.
- */
- complete(&rport->rport_unreg_done);
+ return;
}
static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
ndlp, 2, 30, 0);
if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6052 EXIT. issue ls wqe failed lport %p, "
"rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ atomic_inc(&lport->xmt_ls_abort);
spin_lock_irq(&phba->hbalock);
list_del_init(&wqe->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_nvme_lport *lport;
unsigned long flags;
- uint32_t code;
+ uint32_t code, status;
uint16_t cid, sqhd, data;
uint32_t *ptr;
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
- bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ status, wcqe->parameter);
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
nCmd->transferred_length = nCmd->payload_length;
} else {
- lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
- LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
freqpriv->nvme_buf = NULL;
/* NVME targets need completion held off until the abort exchange
- * completes.
+ * completes unless the NVME Rport is getting unregistered.
*/
- if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
+ if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct nvmefc_fcp_req *pnvme_fcreq)
{
int ret = 0;
+ int expedite = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, NULL hw_queue_handle\n");
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- /* Validate pointers. */
- if (!pnvme_lport || !pnvme_rport || !freqpriv) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
- "6117 No Send:IO submit ptrs NULL, lport %p, "
- "rport %p fcreq_priv %p\n",
- pnvme_lport, pnvme_rport, freqpriv);
+ if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
+ freqpriv = pnvme_fcreq->private;
+ if (unlikely(!freqpriv)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
+ /* Currently only NVME Keep alive commands should be expedited
+ * if the driver runs out of a resource. These should only be
+ * issued on the admin queue, qidx 0
+ */
+ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
+ sqe = &((struct nvme_fc_cmd_iu *)
+ pnvme_fcreq->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_keep_alive)
+ expedite = 1;
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
if (lpfc_ncmd == NULL) {
+ atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
+ atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport))
+ return;
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, HW Queue Handle NULL.\n");
+ return;
+ }
+
phba = vport->phba;
+ freqpriv = pnvme_fcreq->private;
+
+ if (unlikely(!freqpriv))
+ return;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ atomic_inc(&lport->xmt_fcp_abort);
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
return num_posted;
}
+static inline struct lpfc_nvme_buf *
+lpfc_nvme_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del_init(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
+ return lpfc_ncmd;
+ }
+ return NULL;
+}
+
/**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* Pointer to lpfc_nvme_buf - Success
**/
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL;
unsigned long iflag = 0;
- int found = 0;
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
- if (!found) {
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
+ if (!lpfc_ncmd) {
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice(&phba->lpfc_nvme_buf_list_put,
&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs += phba->put_nvme_bufs;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (!found)
- return NULL;
return lpfc_ncmd;
}
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs++;
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
}
}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
vport->nvmei_support = 1;
+ atomic_set(&lport->xmt_fcp_noxri, 0);
+ atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
+ atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_wqerr, 0);
+ atomic_set(&lport->xmt_fcp_abort, 0);
+ atomic_set(&lport->xmt_ls_abort, 0);
+ atomic_set(&lport->xmt_ls_err, 0);
+ atomic_set(&lport->cmpl_fcp_xb, 0);
+ atomic_set(&lport->cmpl_fcp_err, 0);
+ atomic_set(&lport->cmpl_ls_xb, 0);
+ atomic_set(&lport->cmpl_ls_err, 0);
+
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
*/
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
+ *
+ * The driver has to wait for the host nvme transport to callback
+ * indicating the localport has successfully unregistered all
+ * resources. Since this is an uninterruptible wait, loop every ten
+ * seconds and print a message indicating no progress.
+ *
+ * An uninterruptible wait is used because of the risk of transport-to-
+ * driver state mismatch.
+ */
+void
+lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ struct lpfc_nvme_lport *lport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u32 wait_tmo;
+ int ret;
+
+ /* Host transport has to clean up and confirm requiring an indefinite
+ * wait. Print a message if a 10 second wait expires and renew the
+ * wait. This is unexpected.
+ */
+ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ while (true) {
+ ret = wait_for_completion_timeout(&lport->lport_unreg_done,
+ wait_tmo);
+ if (unlikely(!ret)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6176 Lport %p Localport %p wait "
+ "timed out. Renewing.\n",
+ lport, vport->localport);
+ continue;
+ }
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6177 Lport %p Localport %p Complete Success\n",
+ lport, vport->localport);
+#endif
+}
+
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
* @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
init_completion(&lport->lport_unreg_done);
ret = nvme_fc_unregister_localport(localport);
- wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Wait for completion. This either blocks
+ * indefinitely or succeeds
+ */
+ lpfc_nvme_lport_unreg_wait(vport, lport);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (!ndlp->nrport)
+ lpfc_nlp_get(ndlp);
+
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
/* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
rport = remote_port->private;
if (ndlp->nrport) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
+ if (ndlp->nrport == remote_port->private) {
+ /* Same remoteport. Just reuse. */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "remoteport %p wwpn 0x%llx, "
+ "Data: x%x x%x %p x%x x%06x\n",
+ remote_port,
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ return 0;
+ }
prev_ndlp = rport->ndlp;
- /* Sever the ndlp<->rport connection before dropping
- * the ndlp ref from register.
+ /* Sever the ndlp<->rport association
+ * before dropping the ndlp ref from
+ * register.
*/
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
+ rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
}
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Clean bind the rport to the ndlp. */
rport->remoteport = remote_port;
rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
+ rport->ndlp = ndlp;
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = rport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
+ "lport %p Remoteport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
+ "x%06x Role x%x, ndlp %p\n",
+ lport, remote_port,
rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ rpinfo.port_id, rpinfo.port_role,
+ ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
- init_completion(&rport->rport_unreg_done);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
+ ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
+ lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
}
-
}
return;
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
"6312 XRI Aborted xri x%x not found\n", xri);
}
+
+/**
+ * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ u32 i, wait_cnt = 0;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Cycle through all NVME rings and make sure all outstanding
+ * WQEs have been removed from the txcmplqs.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_cnt++;
+
+ /* The sleep is 10mS. Every ten seconds,
+ * dump a message. Something is wrong.
+ */
+ if ((wait_cnt % 1000) == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6178 NVME IO not empty, "
+ "cnt %d\n", wait_cnt);
+ }
+ }
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
********************************************************************/
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
+#define LPFC_NVME_WAIT_TMO 10
+#define LPFC_NVME_EXPEDITE_XRICNT 8
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
- /* Add sttats counters here */
+ /* Add stats counters here */
+ atomic_t xmt_fcp_noxri;
+ atomic_t xmt_fcp_bad_ndlp;
+ atomic_t xmt_fcp_qdepth;
+ atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_err;
+ atomic_t cmpl_fcp_xb;
+ atomic_t cmpl_fcp_err;
+ atomic_t cmpl_ls_xb;
+ atomic_t cmpl_ls_err;
};
struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (status)
- atomic_inc(&tgtp->xmt_ls_rsp_error);
- else
- atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ if (tgtp) {
+ if (status) {
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_ls_rsp_aborted);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
+ } else {
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ }
+ }
out:
rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ oxid, size, sid);
+ /* defer repost rcv buffer till .defer_rcv callback */
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- if (tgtp)
+ if (tgtp) {
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
+ }
logerr = LOG_NVME_IOERR;
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
logerr |= LOG_NVME_ABTS;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ else
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
+ }
+
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->rqb_buffer = nvmebuf;
+ ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
+ ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_MRQ_OFF 0xffff
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
@@ -169,6 +168,11 @@ struct lpfc_queue {
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
+ uint16_t page_count; /* Number of pages allocated for this queue */
+ uint16_t page_size; /* size of page allocated for this queue */
+#define LPFC_EXPANDED_PAGE_SIZE 16384
+#define LPFC_DEFAULT_PAGE_SIZE 4096
+ uint16_t chann; /* IO channel this queue is associated with */
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_CQE_EXP_COUNT 4096
#define LPFC_WQE_DEF_COUNT 256
-#define LPFC_WQE128_DEF_COUNT 128
-#define LPFC_WQE128_MAX_COUNT 256
+#define LPFC_WQE_EXP_COUNT 1024
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
- struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.4"
+#define LPFC_DRIVER_VERSION "11.4.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.703.05.00-rc1"
-#define MEGASAS_RELDATE "October 5, 2017"
+#define MEGASAS_VERSION "07.704.04.00-rc1"
+#define MEGASAS_RELDATE "December 7, 2017"
/*
* Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
MFI_CMD_ABORT = 0x6,
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
+ MFI_CMD_NVME = 0x9,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
/*
* Global functions
*/
-extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved:8;
+ u16 reserved:2;
+ u16 support_nvme_passthru:1;
+ u16 support_pl_debug_info:1;
+ u16 support_flash_comp_info:1;
+ u16 support_host_info:1;
+ u16 support_dual_fw_update:1;
+ u16 support_ssc_rev3:1;
u16 fw_swaps_bbu_vpd_info:1;
u16 support_pd_map_target_id:1;
u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
* provide the data in little endian order
*/
u16 fw_swaps_bbu_vpd_info:1;
- u16 reserved:8;
+ u16 support_ssc_rev3:1;
+ /* FW supports CacheCade 3.0, only one SSCD creation allowed */
+ u16 support_dual_fw_update:1;
+ /* FW supports dual firmware update feature */
+ u16 support_host_info:1;
+ /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
+ u16 support_flash_comp_info:1;
+ /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
+ u16 support_pl_debug_info:1;
+ /* FW supports retrieval of PL debug information through apps */
+ u16 support_nvme_passthru:1;
+ /* FW supports NVMe passthru commands */
+ u16 reserved:2;
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:18;
+ u32 reserved:17;
+ u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
- u32 reserved:18;
+ u32 support_nvme_passthru:1;
+ u32 reserved:17;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
u32 nvme_page_size;
u8 adapter_type;
bool consistent_mask_64bit;
+ bool support_nvme_passthru;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index cc54bdb5c712..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
+static bool support_nvme_encapsulation;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
}
- mutex_lock(&instance->hba_mutex);
+ mutex_lock(&instance->reset_mutex);
/* Send DCMD to Firmware and cache the information */
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->hba_mutex);
+ mutex_unlock(&instance->reset_mutex);
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
}
+static ssize_t
+megasas_fw_cmds_outstanding_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
megasas_ldio_outstanding_show, NULL);
+static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
+ megasas_fw_cmds_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
+ &dev_attr_fw_cmds_outstanding,
NULL,
};
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
+ status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
- if (cmd->frame->hdr.cmd_status != 0) {
- if (cmd->frame->hdr.cmd_status !=
- MFI_STAT_NOT_FOUND)
+ if (status != MFI_STAT_OK) {
+ if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- } else
- instance->map_id++;
+ }
+
megasas_return_cmd(instance, cmd);
/*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
- if (MR_ValidateMapInfo(instance))
+ if (status == MFI_STAT_OK &&
+ (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
+ instance->map_id++;
fusion->fast_path_io = 1;
- else
+ } else {
fusion->fast_path_io = 0;
+ }
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info));
if ((instance->adapter_type != MFI_SERIES) &&
- !instance->mask_interrupts)
+ !instance->mask_interrupts) {
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
- else
+ } else {
ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
switch (ret) {
case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
+ instance->support_nvme_passthru =
+ ci->adapter_operations4.support_nvme_passthru;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
+ instance->support_nvme_passthru ? "Yes" : "No");
break;
case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
fusion->stream_detect_by_ld[i] =
- kmalloc(sizeof(struct LD_STREAM_DETECT),
+ kzalloc(sizeof(struct LD_STREAM_DETECT),
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
- if (MR_ValidateMapInfo(instance))
+ if (MR_ValidateMapInfo(instance, instance->map_id))
fusion->fast_path_io = 1;
else
fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd;
struct megasas_evt_log_info *el_info;
dma_addr_t el_info_h = 0;
+ int ret;
cmd = megasas_get_cmd(instance);
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
megasas_set_dma_settings(instance, dcmd, el_info_h,
sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
- DCMD_SUCCESS) {
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = el_info->newest_seq_num;
- eli->oldest_seq_num = el_info->oldest_seq_num;
- eli->clear_seq_num = el_info->clear_seq_num;
- eli->shutdown_seq_num = el_info->shutdown_seq_num;
- eli->boot_seq_num = el_info->boot_seq_num;
- } else
- dev_err(&instance->pdev->dev, "DCMD failed "
- "from %s\n", __func__);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ if (ret != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ goto dcmd_failed;
+ }
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
+
+dcmd_failed:
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
megasas_return_cmd(instance, cmd);
- return 0;
+ return ret;
}
/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->hba_mutex);
mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
*/
atomic_set(&instance->fw_outstanding, 0);
+ atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
- instance->unload = 1;
host = instance->host;
fusion = instance->ctrl_context;
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+ instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
goto skip_firing_dcmds;
@@ -7004,9 +7033,9 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
- if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
+ !instance->support_nvme_passthru)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_iocpacket *ioc;
struct megasas_instance *instance;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
ioc = memdup_user(user_ioc, sizeof(*ioc));
if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /* Adjust ioctl wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Block ioctls in VF mode */
if (instance->requestorId && !allow_vf_ioctls) {
error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting"
- "for controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance)) {
error = -ENODEV;
goto out_up;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
struct megasas_instance *instance;
struct megasas_aen aen;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
if (file->private_data != file) {
printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
return -ENODEV;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock,
- flags);
- break;
- }
-
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting for"
- "controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance))
return -ENODEV;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
}
static DRIVER_ATTR_RW(dbg_lvl);
+static ssize_t
+support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_nvme_encapsulation);
+}
+
+static DRIVER_ATTR_RO(support_nvme_encapsulation);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
+ support_nvme_encapsulation = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_device_change;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+ if (rval)
+ goto err_dcf_support_nvme_encapsulation;
+
return rval;
+err_dcf_support_nvme_encapsulation:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
pci_unregister_driver(&megasas_pci_driver);
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
/*
* This function will Populate Driver Map using firmware raid map
*/
-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_DRV_RAID_MAP_ALL *drv_map =
- fusion->ld_drv_map[(instance->map_id & 1)];
+ fusion->ld_drv_map[(map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
void *raid_map_data = NULL;
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
- fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ fw_map_dyn = fusion->ld_map[(map_id & 1)];
desc_table =
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
- return;
+ return 1;
}
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
} else {
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
- fusion->ld_map[(instance->map_id & 1)];
+ fusion->ld_map[(map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES) {
+ dev_dbg(&instance->pdev->dev,
+ "LD count exposed in RAID map in not valid\n");
+ return 1;
+ }
+
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
sizeof(struct MR_DEV_HANDLE_INFO) *
MAX_RAIDMAP_PHYSICAL_DEVICES);
}
+
+ return 0;
}
/*
* This function will validate Map info data provided by FW
*/
-u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
u16 ld;
u32 expected_size;
-
- MR_PopulateDrvRaidMap(instance);
+ if (MR_PopulateDrvRaidMap(instance, map_id))
+ return 0;
fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ drv_map = fusion->ld_drv_map[(map_id & 1)];
pDrvRaidMap = &drv_map->raidMap;
lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
- struct timeval tv;
+ ktime_t time;
bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
- do_gettimeofday(&tv);
+ time = ktime_get_real();
/* Convert to milliseconds as per FW requirement */
- IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
- (tv.tv_usec / 1000));
+ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
init_frame = (struct megasas_init_frame *)cmd->frame;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+ drv_ops->mfi_capabilities.support_nvme_passthru = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
fusion->fast_path_io = 0;
if (!megasas_get_ld_map_info(instance)) {
- if (MR_ValidateMapInfo(instance)) {
+ if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1;
return 0;
}
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
if (instance->adapter_type == VENTURA_SERIES) {
- spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
- megasas_stream_detect(instance, cmd, &io_info);
- spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
- /* In ventura if stream detected for a read and it is read ahead
- * capable make this IO as LDIO
- */
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
- io_info.isRead && io_info.ra_capable)
- fp_possible = false;
-
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
}
+ if (!fp_possible ||
+ (io_info.isRead && io_info.ra_capable)) {
+ spin_lock_irqsave(&instance->stream_lock,
+ spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock,
+ spinlock_flags);
+ /* In ventura if stream detected for a read and it is
+ * read ahead capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ fp_possible = false;
+ }
+
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
bool refire_cmd = 0;
+ u8 result;
+ u32 opcode = 0;
fusion = instance->ctrl_context;
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
cmd_fusion = fusion->cmd_list[j];
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
smid = le16_to_cpu(cmd_mfi->context.smid);
+ result = REFIRE_CMD;
if (!smid)
continue;
- /* Do not refire shutdown command */
- if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
- MR_DCMD_CTRL_SHUTDOWN) {
- cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- continue;
+ req_desc = megasas_get_request_descriptor(instance, smid - 1);
+
+ switch (cmd_mfi->frame->hdr.cmd) {
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
+ /* Do not refire shutdown command */
+ if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+ result = COMPLETE_CMD;
+ break;
+ }
+
+ refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
+ (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+
+ if (!refire_cmd)
+ result = RETURN_CMD;
+
+ break;
+ case MFI_CMD_NVME:
+ if (!instance->support_nvme_passthru) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
+ default:
+ break;
}
- req_desc = megasas_get_request_descriptor
- (instance, smid - 1);
- refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
- (cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
- && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
- if (refire_cmd)
+ switch (result) {
+ case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
- else
+ break;
+ case RETURN_CMD:
megasas_return_cmd(instance, cmd_mfi);
+ break;
+ case COMPLETE_CMD:
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ break;
+ }
}
}
@@ -4625,8 +4655,6 @@ transition_to_ready:
continue;
}
- megasas_refire_mgmt_cmd(instance);
-
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
retval = FAILED;
goto out;
}
+
+ megasas_refire_mgmt_cmd(instance);
+
/* Reset load balance info */
if (fusion->load_balance_info)
memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
} u;
};
+enum CMD_RET_VALUES {
+ REFIRE_CMD = 1,
+ COMPLETE_CMD = 2,
+ RETURN_CMD = 3,
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
return 1;
}
+static struct scsiio_tracker *
+_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *cmd;
+
+ if (WARN_ON(!smid) ||
+ WARN_ON(smid >= ioc->hi_priority_smid))
+ return NULL;
+
+ cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (cmd)
+ return scsi_cmd_priv(cmd);
+
+ return NULL;
+}
+
/**
* _base_get_cb_idx - obtain the callback index
* @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx;
+ u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ u8 cb_idx = 0xFF;
if (smid < ioc->hi_priority_smid) {
- i = smid - 1;
- cb_idx = ioc->scsi_lookup[i].cb_idx;
+ struct scsiio_tracker *st;
+
+ if (smid < ctl_smid) {
+ st = _get_st_from_smid(ioc, smid);
+ if (st)
+ cb_idx = st->cb_idx;
+ } else if (smid == ctl_smid)
+ cb_idx = ioc->ctl_cb_idx;
} else if (smid < ioc->internal_smid) {
i = smid - ioc->hi_priority_smid;
cb_idx = ioc->hpr_lookup[i].cb_idx;
} else if (smid <= ioc->hba_queue_depth) {
i = smid - ioc->internal_smid;
cb_idx = ioc->internal_lookup[i].cb_idx;
- } else
- cb_idx = 0xFF;
+ }
return cb_idx;
}
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
- * @smid: smid associated to an IO request
+ * @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
-_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->scsi_lookup[smid - 1].chain_list);
+ list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+ return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+ return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
@@ -2822,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
- unsigned long flags;
- struct scsiio_tracker *request;
+ struct scsiio_tracker *request = scsi_cmd_priv(scmd);
+ unsigned int tag = scmd->request->tag;
u16 smid;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
-
- request = list_entry(ioc->free_list.next,
- struct scsiio_tracker, tracker_list);
- request->scmd = scmd;
+ smid = tag + 1;
request->cb_idx = cb_idx;
- smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ request->smid = smid;
+ INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
return smid;
}
+static void
+_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
+{
+ /*
+ * See _wait_for_commands_to_complete() call with regards to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ if (ioc->pending_io_count == 0)
+ wake_up(&ioc->reset_wq);
+ }
+}
+
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st)
+{
+ if (WARN_ON(st->smid == 0))
+ return;
+ st->cb_idx = 0xFF;
+ st->direct_io = 0;
+ if (!list_empty(&st->chain_list)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ list_splice_init(&st->chain_list, &ioc->free_chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ }
+}
+
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
int i;
- struct chain_tracker *chain_req, *next;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add(&chain_req->tracker_list,
- &ioc->free_chain_list);
- }
- }
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ struct scsiio_tracker *st;
- /*
- * See _wait_for_commands_to_complete() call with regards
- * to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
+ st = _get_st_from_smid(ioc, smid);
+ if (!st) {
+ _base_recovery_check(ioc);
+ return;
}
+ mpt3sas_base_clear_st(ioc, st);
+ _base_recovery_check(ioc);
return;
- } else if (smid < ioc->internal_smid) {
+ }
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
- pci_pool_free(ioc->pcie_sgl_dma_pool,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
- pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- if (ioc->scsi_lookup) {
- free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
- ioc->scsi_lookup = NULL;
- }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
- ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->scsi_lookup_pages);
- if (!ioc->scsi_lookup) {
- pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
- ioc->name, (int)sz);
- goto out;
- }
-
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
+ sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
+ ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->pcie_sg_lookup) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ goto out;
+ }
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
- pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_create failed\n",
+ "PCIe SGL pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
- pci_pool_alloc(ioc->pcie_sgl_dma_pool,
- GFP_KERNEL,
- &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
- if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+ ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_alloc failed\n",
+ "PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
goto out;
}
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
- /* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->free_list);
- smid = 1;
- for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
- INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].smid = smid;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
- }
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
- * This function waiting(3s) for all pending commands to complete
+ * This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
static void
_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
- unsigned long flags;
- u16 i;
ioc->pending_io_count = 0;
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 0; i < ioc->scsiio_depth; i++)
- if (ioc->scsi_lookup[i].cb_idx != 0xFF)
- ioc->pending_io_count++;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
if (!ioc->pending_io_count)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
- * @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
- * @tracker_list: list of free request (ioc->free_list)
+ * @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
- struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
- struct list_head tracker_list;
u16 msix_io;
};
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct scsiio_tracker *scsi_lookup;
- ulong scsi_lookup_pages;
+ struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
- struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
+ u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
+ struct scsi_cmnd *scmd);
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
+struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
-u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid);
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b4c374b08e5e..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -534,7 +534,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-static unsigned int
+static __poll_t
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
- u16 i;
+ u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
- unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = ioc->scsiio_depth; i && !found; i--) {
- scmd = ioc->scsi_lookup[i - 1].scmd;
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
+ for (smid = ioc->scsiio_depth; smid && !found; smid--) {
+ struct scsiio_tracker *st;
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
- tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ st = scsi_cmd_priv(scmd);
+ tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
} else {
-
- smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
- if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
+ /* Use first reserved smid for passthrough ioctls */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
}
ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * __scsih_scsi_lookup_get_clear - returns scmd entry without
- * holding any lock.
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
-static inline struct scsi_cmnd *
-__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
- u16 smid)
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
- swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag = smid - 1;
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
- */
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
-{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_target - search for matching channel:id
- * @ioc: per adapter object
- * @id: target id
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
- int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
- * @ioc: per adapter object
- * @id: target id
- * @lun: lun number
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id:lun in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF)
+ scmd = NULL;
}
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return scmd;
}
/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
- * @channel: the channel assigned by the OS
- * @id: the id assigned by the OS
+ * @handle: device handle
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- u16 msix_task = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup->msix_io < ioc->reply_queue_count))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
-
- switch (type) {
- case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
- break;
- rc = FAILED;
- break;
-
- case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
+ rc = SUCCESS;
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
- smid_task, timeout);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
+ msix_task, timeout);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
- mpt3sas_base_free_smid(ioc, smid);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
pcie_device = sas_target_priv_data->pcie_dev;
if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
- raid_device, mpi_request, smid);
+ raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
- unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
- ioc->got_task_abort_from_ioctl)
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
- else
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
-
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt3sas_scsi_direct_io_set(ioc, smid, 0);
+ st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
-
+ mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
- return 1;
+ return 0;
}
/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* Context: user.
*
*/
-static int
+static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u8 link_rate, prev_link_rate;
unsigned long flags;
int rc;
- int requeue_event;
Mpi26EventDataPCIeTopologyChangeList_t *event_data =
(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery)
- return 0;
+ return;
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
ioc->name));
- return 0;
+ return;
}
/* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"ignoring switch event\n", ioc->name));
- return 0;
+ return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
+ return;
reason_code = event_data->PortEntry[i].PortStatus;
handle =
le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
}
}
- return requeue_event;
}
/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
- 30);
- if (r == FAILED) {
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
+ st->msix_io, 30);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
}
/**
- * mpt3sas_scsi_direct_io_get - returns direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-inline u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].direct_io;
-}
-
-/**
- * mpt3sas_scsi_direct_io_set - sets direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- * @direct_io: Zero or non-zero value to set in the direct_io flag
- *
- * Returns Nothing.
- */
-inline void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
-{
- ioc->scsi_lookup[smid - 1].direct_io = direct_io;
-}
-
-/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
* @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid)
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
- mpt3sas_scsi_direct_io_set(ioc, smid, 1);
+ st->direct_io = 1;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
{
int minor;
- minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
__set_bit(minor, pmcraid_minor);
return minor;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
+ /* fall through */
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
+ /* fall through */
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 7d91e53562f8..a980ef756a67 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val;
bool slow_sgl;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
y_st_ctx = &ctx->ystorm_st_context;
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
/* Tstorm ctx */
t_st_ctx = &ctx->tstorm_st_context;
- t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
- FCOE_TASK_DEV_TYPE_TAPE :
- FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ m_st_ctx->sgl_params.sgl_num_sges =
+ cpu_to_le16(sgl_task_params->num_sges);
} else {
/* Tstorm ctx */
SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
sgl_task_params);
}
+ /* Init Sqe */
init_common_sqe(task_params, SEND_FCOE_CMD);
+
return 0;
}
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
/* Init Ystorm */
y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
}
int init_initiator_sequence_recovery_fcoe_task(
- struct fcoe_task_params *task_params, u32 off)
+ struct fcoe_task_params *task_params, u32 desired_offset)
{
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
- task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ task_params->sqe->additional_info_union.seq_rec_updated_offset =
+ desired_offset;
return 0;
}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index f9c50faa748e..b5c236efd465 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -13,7 +13,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct fcoe_task_context *context;
+ struct e4_fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9bf7b227e69a..c105a2e48ac1 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -129,7 +129,7 @@ struct qedf_ioreq {
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca4cda9..aa22b11436ba 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 7faef80c5f7a..503c1ae3ccd0 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
MAX_FCOE_CQE_TYPE
};
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
- FCOE_TASK_DEV_TYPE_DISK,
- FCOE_TASK_DEV_TYPE_TAPE,
- MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
/*
* FCoE fast path error codes
*/
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
MAX_FCOE_SP_ERROR_CODE
};
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
- SEND_FCOE_CMD,
- SEND_FCOE_MIDPATH,
- SEND_FCOE_ABTS_REQUEST,
- FCOE_EXCHANGE_CLEANUP,
- FCOE_SEQUENCE_RECOVERY,
- SEND_FCOE_XFER_RDY,
- SEND_FCOE_RSP,
- SEND_FCOE_RSP_WITH_SENSE_DATA,
- SEND_FCOE_TARGET_DATA,
- SEND_FCOE_INITIATOR_DATA,
- /*
- * Xfer Continuation (==1) ready to be sent. Previous XFERs data
- * received successfully.
- */
- SEND_FCOE_XFER_CONTINUATION_RDY,
- SEND_FCOE_TARGET_ABTS_RSP,
- MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
/*
* FCoE task TX state
*/
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
MAX_FCOE_TASK_TX_STATE
};
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
- FCOE_TASK_TYPE_WRITE_INITIATOR,
- FCOE_TASK_TYPE_READ_INITIATOR,
- FCOE_TASK_TYPE_MIDPATH,
- FCOE_TASK_TYPE_UNSOLICITED,
- FCOE_TASK_TYPE_ABTS,
- FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
- FCOE_TASK_TYPE_WRITE_TARGET,
- FCOE_TASK_TYPE_READ_TARGET,
- FCOE_TASK_TYPE_RSP,
- FCOE_TASK_TYPE_RSP_SENSE_DATA,
- FCOE_TASK_TYPE_ABTS_TARGET,
- FCOE_TASK_TYPE_ENUM_SIZE,
- MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
- /* Start physical address for the RQ (receive queue) PBL. */
- struct regpair rq_pbl_addr;
- /* Start physical address for the CQ (completion queue) PBL. */
- struct regpair cq_pbl_addr;
- /* Start physical address for the CMDQ (command queue) PBL. */
- struct regpair cmdq_pbl_addr;
-};
-
#endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ded386036c27..b15e69586a36 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
u16 xid, rval;
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp;
struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
struct qedf_rport *fcport;
struct qedf_ctx *qedf;
uint16_t xid;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct qedf_io_work *io_work;
u32 bdq_idx;
void *bdq_addr;
+ struct scsi_bd *p_bd_info;
+ p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
- "address.hi=%x address.lo=%x opaque_data.hi=%x "
- "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
- qedf->bdq_prod_idx, pktlen);
-
- bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+ "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+ le32_to_cpu(p_bd_info->address.hi),
+ le32_to_cpu(p_bd_info->address.lo),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+ qedf->bdq_prod_idx, pktlen);
+
+ bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
if (bdq_idx >= QEDF_BDQ_SIZE) {
QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
bdq_idx);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7c0064500cc5..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
- pbl->opaque.hi = 0;
+ pbl->opaque.fcoe_opaque.hi = 0;
/* Opaque lo data is an index into the BDQ array */
- pbl->opaque.lo = cpu_to_le32(i);
+ pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
pbl++;
}
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ rc = -ENOMEM;
goto err5;
}
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 397b3b8ee51a..c2478056356a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
* this source tree.
*/
-#define QEDF_VERSION "8.20.5.0"
+#define QEDF_VERSION "8.33.0.20"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 20
-#define QEDF_DRIVER_REV_VER 5
-#define QEDF_DRIVER_ENG_VER 0
+#define QEDF_DRIVER_MINOR_VER 33
+#define QEDF_DRIVER_REV_VER 0
+#define QEDF_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 39d77818a677..fd8a1eea3163 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block *sb = NULL;
+ struct status_block_e4 *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_PROD_INDEX_MASK);
+ STATUS_BLOCK_E4_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index bd302d3cb9af..667d7697ba01 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
qedi_cmd = task->dd_data;
- qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
if (!qedi_cmd->tmf_resp_buf) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate resp buf, cid=0x%x\n",
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
(qedi->bdq_prod_idx % qedi->rq_num_entries));
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
- cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+ "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
struct scsi_bd *pbl;
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, idx);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
/* Increment producer to let f/w know we've handled the frame */
qedi->bdq_prod_idx += count;
@@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
@@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 7df32a68bd54..a269da1a6c75 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct iscsi_task_context *context;
- u16 index;
+ struct e4_iscsi_task_context *context;
u32 val;
+ u16 index;
+ u8 val_byte;
context = task_params->context;
+ val_byte = context->mstorm_ag_context.cdu_validation;
memset(context, 0, sizeof(*context));
+ context->mstorm_ag_context.cdu_validation = val_byte;
for (index = 0; index <
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
- u32 remaining_recv_len,
- u32 expected_data_transfer_len,
- u8 num_sges, bool tx_dif_conn_err_en)
+ struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len, u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
{
u32 val;
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
- SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDGUARD,
+ RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_INTERVALSIZE,
+ RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state,
- RDIF_TASK_CONTEXT_REFTAGMASK,
+ RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
- SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag);
}
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
- tdif_context->partial_crc_valueB =
+ tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK,
+ TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0);
}
}
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
cxt = task_params->context;
- val = cpu_to_le32(task_size);
- cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
- init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
- cmd_params);
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
- cxt->mstorm_st_context.sense_db.lo = val;
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
- cxt->mstorm_st_context.sense_db.hi = val;
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+ set_local_completion_context(cxt);
+ } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+ val = cpu_to_le32(task_size +
+ ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+ cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+ cxt->mstorm_st_context.expected_itt =
+ cpu_to_le32(pdu_header->itt);
+ } else {
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+ val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+ }
if (task_params->tx_io_size) {
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
dif_task_params);
+ init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+ dif_task_params);
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index b6f24f91849d..c3deb77ac388 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -13,7 +13,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct iscsi_task_context *context;
+ struct e4_iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 63d793f46064..f5b5a31999aa 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
- struct async_data *data);
+ struct iscsi_eqe_data *data);
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn);
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data);
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index a02b34ea5cab..7ec7f6e00fb8 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
conn_info->dup_ack_theshold = 3;
conn_info->rcv_wnd = 65535;
- conn_info->cwnd = DEF_MAX_CWND;
conn_info->ss_thresh = 65535;
conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
(qedi_ep->ip_type == TCP_IPV6),
1, (qedi_ep->vlan_id != 0));
+ conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
conn_info->rcv_wnd_scale = 4;
- conn_info->ts_ticks_per_second = 1000;
conn_info->da_timeout_value = 200;
conn_info->ack_frequency = 2;
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
return msg;
}
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 3247287cb0e7..ea1315189922 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct iscsi_task_context request;
+ struct e4_iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cccc34adc0e0..029e2e69b29f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
- struct async_data *data;
+ struct iscsi_eqe_data *data;
int rval = 0;
if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
- data = (struct async_data *)fw_handle;
+ data = (struct iscsi_eqe_data *)fw_handle;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
- data->cid, data->itid, data->error_code,
- data->fw_debug_param);
+ "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+ data->icid, data->conn_id, data->error_code,
+ data->error_pdu_opcode_reserved);
- qedi_ep = qedi->ep_tbl[data->cid];
+ qedi_ep = qedi->ep_tbl[data->icid];
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
"Cannot process event, ep already disconnected, cid=0x%x\n",
- data->cid);
+ data->icid);
WARN_ON(1);
return -ENODEV;
}
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block), &sb_phys,
+ sizeof(struct status_block_e4), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
- qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
err_alloc_mem:
return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -998,7 +997,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
if (ret)
- continue;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Dropping CQE 0x%x for cid=0x%x.\n",
+ que->cq_cons_idx, cqe->cqe_common.conn_id);
que->cq_cons_idx++;
if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1015,7 +1016,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
@@ -1262,22 +1263,22 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, i);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
pbl++;
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
- PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -1367,11 +1368,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1379,14 +1379,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq, 0,
- qedi->global_queues[i]->cq_mem_size);
-
- qedi->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1394,8 +1390,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq_pbl, 0,
- qedi->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1456,25 +1450,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- memset(ep->sq, 0, ep->sq_mem_size);
-
- ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
rval = -ENOMEM;
goto out_free_sq;
}
- memset(ep->sq_pbl, 0, ep->sq_pbl_size);
/* Create PBL */
num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index d61e3ac22e67..8a0e523fc089 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.4.0"
+#define QEDI_MODULE_VERSION "8.33.0.20"
#define QEDI_DRIVER_MAJOR_VER 8
-#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 4
-#define QEDI_DRIVER_ENG_VER 0
+#define QEDI_DRIVER_MINOR_VER 33
+#define QEDI_DRIVER_REV_VER 0
+#define QEDI_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vfree(vha->scan.l);
+
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
ha->optrom_state = QLA_SREADING;
}
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
return -ENOMEM;
}
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
return 0;
}
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
return -ENOMEM;
}
- memset(stats, 0, sizeof(*stats));
-
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
* There is no correspondence between an N-PORT id and an AL_PA. Therefore the
* valid range of an N-PORT id is 0 through 0x7ef.
*/
-#define NPH_LAST_HANDLE 0x7ef
-#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_LAST_HANDLE 0x7ee
+#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define FW_DEF_EXCHANGES_CNT 2048
+#define FW_MAX_EXCHANGES_CNT (32 * 1024)
+#define REDUCE_EXCHANGES_CNT (8 * 1024)
struct req_que;
struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
u32 rsp_size;
void *req;
void *rsp;
+ port_id_t id;
};
/*
@@ -416,6 +442,7 @@ struct srb_iocb {
struct {
uint32_t cmd_hndl;
__le16 comp_status;
+ __le16 req_que_no;
struct completion comp;
} abt;
struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
uint32_t timeout_sec;
struct list_head entry;
} nvme;
+ struct {
+ u16 cmd;
+ u16 vp_index;
+ } ctrlvp;
} u;
struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
+#define SRB_CTRL_VP 22
+#define SRB_PRLO_CMD 23
enum {
TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
+ int rc;
+ int retry_count;
+ struct completion comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
/*
* Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
+ DSC_GNN_ID,
DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
+ DSC_GFPN_ID,
DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
+ DSC_ADISC,
DSC_DELETE_PEND,
};
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
FCME_GPDB_DONE,
FCME_GPNID_DONE,
FCME_GFFID_DONE,
- FCME_DELETE_DONE,
+ FCME_ADISC_DONE,
+ FCME_GNNID_DONE,
+ FCME_GFPNID_DONE,
};
enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
+ unsigned int free_pending:1;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
unsigned int query:1;
+ unsigned int id_changed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
+#define FCF_ASYNC_ACTIVE BIT_5
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
#define GA_NXT_REQ_SIZE (16 + 4)
#define GA_NXT_RSP_SIZE (16 + 620)
+#define GPN_FT_CMD 0x172
+#define GPN_FT_REQ_SIZE (16 + 4)
+#define GNN_FT_CMD 0x173
+#define GNN_FT_REQ_SIZE (16 + 4)
+
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
} port_id;
struct {
+ uint8_t reserved;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t port_type;
+ } gpn_ft;
+
+ struct {
uint8_t port_type;
uint8_t domain;
uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
uint8_t port_id[3];
};
+/* It's the same for both GPN_FT and GNN_FT */
+struct ct_sns_gpnft_rsp {
+ struct {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+ };
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gpn_ft_data {
+ u8 control_byte;
+ u8 port_id[3];
+ u32 reserved;
+ u8 port_name[8];
+ } entries[1];
+};
+
+/* CT command response */
struct ct_sns_rsp {
struct ct_rsp_hdr header;
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
} p;
};
+struct ct_sns_gpnft_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_gpnft_rsp rsp;
+ } p;
+};
+
+enum scan_flags_t {
+ SF_SCANNING = BIT_0,
+ SF_QUEUED = BIT_1,
+};
+
+struct fab_scan_rp {
+ port_id_t id;
+ u8 port_name[8];
+ u8 node_name[8];
+};
+
+struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
+ u16 scan_retry;
+#define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+ struct delayed_work scan_work;
+};
+
/*
* SNS command structures -- for 2200 compatibility.
*/
@@ -3117,7 +3200,7 @@ enum qla_work_type {
QLA_EVT_AENFX,
QLA_EVT_GIDPN,
QLA_EVT_GPNID,
- QLA_EVT_GPNID_DONE,
+ QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+ QLA_EVT_GPNFT,
+ QLA_EVT_GPNFT_DONE,
+ QLA_EVT_GNNFT_DONE,
+ QLA_EVT_GNNID,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
};
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
struct {
port_id_t id;
u8 port_name[8];
+ u8 node_name[8];
void *pla;
+ u8 fc4_type;
} new_sess;
struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
u8 iocb[IOCB_SIZE];
int type;
} nack;
+ struct {
+ u8 fc4_type;
+ } gpnft;
} u;
};
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
-#define QLA_EARLY_LINKUP(_ha) \
- ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
- _ha->flags.fw_started && !_ha->flags.fw_init_done)
-
/*
* Qlogic host adapter specific data structure.
*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
+ uint32_t rida_fmt2:1;
} flags;
+ uint16_t max_exchg;
uint16_t long_range_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
- void *swl;
+ void *swl;
/* These are used by mailbox operations. */
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
+#define IOCB_WORK_ACTIVE 31
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ struct list_head gpnid_list;
+ struct fab_scan scan;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+#define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+ _ha->prev_topology = _ha->current_topology; \
+}
+
+#define N2N_TOPO(ha) \
+ ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
+ ha->current_topology == ISP_CFG_N || \
+ !ha->current_topology)
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
- struct qla_hw_data *ha = vha->hw;
+ uint16_t mb[MAX_IOCB_MB_REG];
+ int rc;
+
+ rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ if (rc != QLA_SUCCESS) {
+ seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
+ } else {
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
+ seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "MAX VP count[%d]\n", mb[11]);
+ seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
+ seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
+ mb[20]);
+ seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
+ mb[21]);
+ seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
+ mb[22]);
+ seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
+ mb[23]);
- seq_puts(s, "FW Resource count\n\n");
- seq_printf(s, "Original TGT exchg count[%d]\n",
- ha->orig_fw_tgt_xcb_count);
- seq_printf(s, "current TGT exchg count[%d]\n",
- ha->cur_fw_tgt_xcb_count);
- seq_printf(s, "original Initiator Exchange count[%d]\n",
- ha->orig_fw_xcb_count);
- seq_printf(s, "Current Initiator Exchange count[%d]\n",
- ha->cur_fw_xcb_count);
- seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
- seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
- seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
- seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+ }
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_name[8];
uint8_t node_name[8];
- uint32_t remote_nport_id;
+ uint8_t remote_nport_id[4];
uint32_t reserved_5;
} f2;
} u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
-int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
- void *);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
+ void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
+extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
/*
* Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
+void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
+int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
+void qla_scan_work_fn(struct work_struct *);
+
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
struct fc_port *, enum qlt_plogi_link_t);
void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
-extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
-extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *);
extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_rft_id(scsi_qla_host_t *);
static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
+static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
+static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
+static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
+ case CS_TIMEOUT:
+ rval = QLA_FUNCTION_TIMEOUT;
+ /* fall through */
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
+static void qla2x00_async_sns_sp_done(void *s, int rc)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_pkt *ct_sns;
+ struct qla_work_evt *e;
+
+ sp->rc = rc;
+ if (rc == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s exiting normally.\n",
+ sp->name);
+ } else if (rc == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s timeout\n", sp->name);
+ } else {
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ sp->retry_count++;
+ if (sp->retry_count > 3)
+ goto err;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s fail rc %x. Retry count %d\n",
+ sp->name, rc, sp->retry_count);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
+ if (!e)
+ goto err2;
+
+ del_timer(&sp->u.iocb_cmd.timer);
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ return;
+ }
+
+err:
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+err2:
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_rft_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFT_ID_REQ_SIZE;
- arg.rsp_size = RFT_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rftid(vha, &vha->d_id);
+}
- /* Issue RFT_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rft_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
- RFT_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2044,
- "RFT_ID exiting normally.\n");
+ goto done_free_sp;
}
-
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+ return rval;
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
int
qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFF_ID_REQ_SIZE;
- arg.rsp_size = RFF_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
+ FC4_TYPE_FCP_SCSI);
+}
- /* Issue RFF_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 fc4feature, u8 fc4type)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
- RFF_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
- /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rff_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- qlt_rff_id(vha, ct_req);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = d_id->b.domain;
+ ct_req->req.rff_id.port_id[1] = d_id->b.area;
+ ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.fc4_feature = fc4feature;
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2048,
- "RFF_ID exiting normally.\n");
+ goto done_free_sp;
}
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
int
qla2x00_rnn_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RNN_ID_REQ_SIZE;
- arg.rsp_size = RNN_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
+}
- /* Issue RNN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 *node_name)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
-
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204e,
- "RNN_ID exiting normally.\n");
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
int
qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = 0;
- arg.rsp_size = RSNN_NN_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rsnn_nn(vha);
+}
- /* Issue RSNN_NN */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rsnn_nn";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
- RSNN_NN_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
/* Prepare CT arguments -- node_name, symbolic node_name, size */
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
/* Prepare the Symbolic Node Name */
qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
sizeof(ct_req->req.rsnn_nn.sym_node_name));
-
- /* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
(uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
- /* Update MS IOCB request */
- ms_pkt->req_bytecount =
- cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2051,
- "RSNN_NN issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2052,
- "RSNN_NN exiting normally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC login state %d\n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ "%s %8phC generation changed rscn %d|%d n",
__func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ fcport->rscn_gen);
return;
}
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable plugged into the same place */
switch (vha->host->active_mode) {
case MODE_TARGET:
- /* NOOP. let the other guy login to us.*/
+ if (fcport->fw_login_state ==
+ DSC_LS_PRLI_COMP) {
+ u16 data[2];
+ /*
+ * Late RSCN was delivered.
+ * Remote port already login'ed.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "%s %d %8phC post adisc\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ }
break;
case MODE_INITIATOR:
case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post %s\n", __func__,
__LINE__, fcport->port_name,
(atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "gpdb" : "gnl");
+ FCS_ONLINE) ? "adisc" : "gnl");
if (atomic_read(&fcport->state) ==
- FCS_ONLINE)
- qla24xx_post_gpdb_work(vha,
- fcport, PDO_FORCE_ADISC);
- else
+ FCS_ONLINE) {
+ u16 data[2];
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ } else {
qla24xx_post_gnl_work(vha,
fcport);
+ }
break;
}
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
- if (fcport->deleted == QLA_SESS_DELETED) {
+ fcport->id_changed = 1;
+ if (fcport->deleted != QLA_SESS_DELETED) {
ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
struct event_arg ea;
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
- sp->name, res, fcport->port_name, id);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s WWPN %8phC timed out.\n",
+ sp->name, fcport->port_name);
+ qla24xx_post_gidpn_work(sp->vha, fcport);
+ sp->free(sp);
+ return;
+ } else if (res) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s fail res %x, WWPN %8phC\n",
+ sp->name, res, fcport->port_name);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s good WWPN %8phC ID %3phC\n",
+ sp->name, fcport->port_name, id);
+ }
qla2x00_fcport_event_handler(vha, &ea);
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GID_PN;
fcport->scan_state = QLA_FCPORT_SCAN;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gidpn";
sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+}
+
static void qla24xx_async_gpsc_sp_done(void *s, int res)
{
struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3055,6 +3309,7 @@ done:
ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
+ ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
return qla2x00_post_work(vha, e);
}
-void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport;
- unsigned long flags;
+ fc_port_t *fcport, *conflict, *t;
+ u16 data[2];
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d port_id: %06x\n",
+ __func__, __LINE__, ea->id.b24);
- if (fcport) {
- /* cable moved. just plugged in */
- fcport->rscn_gen++;
- fcport->d_id = ea->id;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0x2064,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- break;
+ if (ea->rc) {
+ /* cable is disconnected */
+ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
+ if (fcport->d_id.b24 == ea->id.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->disc_state);
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ }
}
} else {
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ /* cable is connected */
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ if (fcport) {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if ((conflict->d_id.b24 == ea->id.b24) &&
+ (fcport != conflict)) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
+
+ fcport->rscn_gen++;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ switch (fcport->disc_state) {
+ case DSC_LOGIN_COMPLETE:
+ /* recheck session is still intact. */
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC revalidate session with ADISC\n",
+ __func__, __LINE__, fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ fcport->d_id = ea->id;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ fcport->d_id = ea->id;
+ break;
+ default:
+ fcport->d_id = ea->id;
+ break;
+ }
+ } else {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if (conflict->d_id.b24 == ea->id.b24) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+ qla24xx_post_newsess_work(vha, &ea->id,
+ ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct event_arg ea;
struct qla_work_evt *e;
+ unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ if (res)
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+ sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GPNID_DONE;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (res) {
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+ } else if (sp->gen1) {
+ /* There was another RSCN for this Nport ID */
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+
qla2x00_fcport_event_handler(vha, &ea);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
+ srb_t *sp, *tsp;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online)
goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnid";
+ sp->u.iocb_cmd.u.ctarg.id = *id;
+ sp->gen1 = 0;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+ tsp->gen1++;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sp->free(sp);
+ goto done;
+ }
+ }
+ list_add_tail(&sp->elem, &vha->gpnid_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+
+/* GPN_FT + GNN_FT*/
+static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ u64 twwn;
+ int rc = 0;
+
+ if (!ha->num_vhosts)
+ return 0;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ twwn = wwn_to_u64(vp->port_name);
+ if (wwn == twwn) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return rc;
+}
+
+void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ fc_port_t *fcport;
+ u32 i, rc;
+ bool found;
+ u8 fc4type = sp->gen2;
+ struct fab_scan_rp *rp;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
+ if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan stop due to chip reset %x/%x\n",
+ sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
+ goto out;
+ }
+
+ rc = sp->rc;
+ if (rc) {
+ vha->scan.scan_retry++;
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Fabric scan failed on all retries.\n");
+ }
+ goto out;
+ }
+ vha->scan.scan_retry = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ u64 wwn;
+
+ rp = &vha->scan.l[i];
+ found = false;
+
+ wwn = wwn_to_u64(rp->port_name);
+ if (wwn == 0)
+ continue;
+
+ if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((rp->id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp(vha, wwn))
+ continue;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ }
+ break;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, rp->port_name);
+ qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
+ rp->node_name, NULL, fc4type);
+ }
+ }
+
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ } else
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
+out:
+ qla24xx_sp_unmap(vha, sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+ int i, j, k;
+ u16 cmd = be16_to_cpu(ct_req->command);
+
+ /* gen2 field is holding the fc4type */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x FC4Type %x\n",
+ sp->name, res, sp->gen2);
+
+ if (res) {
+ unsigned long flags;
+
+ sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s rescan failed on all retries\n",
+ sp->name);
+ }
+ return;
+ }
+
+ if (!res) {
+ port_id_t id;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ } else {/* GNN_FT_CMD */
+ for (k = 0; k < vha->hw->max_fibre_devices;
+ k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (cmd == GPN_FT_CMD)
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
+ else
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
+ if (!e) {
+ /* please ignore kernel warning. Otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp->rc = res;
+ e->u.iosb.sp = sp;
+
+ qla2x00_post_work(vha, e);
+}
+
+/*
+ * Get WWNN list for fc4_type
+ *
+ * It is assumed the same SRB is re-used from GPNFT to avoid
+ * mem free & re-alloc
+ */
+static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+ u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: req %p rsp %p are not setup\n",
+ __func__, sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.rsp);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ WARN_ON(1);
+ goto done_free_sp;
+ }
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+ sp->u.iocb_cmd.u.ctarg.rsp_size);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+} /* GNNFT */
+
+void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+ del_timer(&sp->u.iocb_cmd.timer);
+ qla24xx_async_gnnft(vha, sp, sp->gen2);
+}
+
+/* Get WWPN list for certain fc4_type */
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+ u32 rspsz;
+ unsigned long flags;
+
+ if (!vha->flags.online)
+ return rval;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ return rval;
+ }
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ memset(vha->scan.l, 0, vha->scan.size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+}
+
+void qla_scan_work_fn(struct work_struct *work)
+{
+ struct fab_scan *s = container_of(to_delayed_work(work),
+ struct fab_scan, scan_work);
+ struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
+ scan);
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule loop resync\n", __func__);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_QUEUED;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+/* GNN_ID */
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ qla24xx_post_gnl_work(vha, ea->fcport);
+}
+
+static void qla2x00_async_gnnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
+ struct event_arg ea;
+ u64 wwnn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwnn = wwn_to_u64(node_name);
+ if (wwnn)
+ memcpy(fcport->node_name, node_name, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->node_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GNN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+
+ /* GNN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gnnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+/* GPFN_ID */
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_gpsc_work(vha, fcport);
+}
+
+static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
+ struct event_arg ea;
+ u64 wwn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwn = wwn_to_u64(fpn);
+ if (wwn)
+ memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GFPNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->fabric_port_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GFPN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gfpnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+
+ /* GFPN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gfpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(sp);
+ if (sp->type != SRB_ELS_DCMD)
+ sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ if (fcport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ } else {
+ pr_info("Async-%s timeout - hdl=%x.\n",
+ sp->name, sp->handle);
+ }
switch (sp->type) {
case SRB_LOGIN_CMD:
+ if (!fcport)
+ break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
qla24xx_handle_plogi_done_event(fcport->vha, &ea);
break;
case SRB_LOGOUT_CMD:
+ if (!fcport)
+ break;
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
break;
case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PLOGI:
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
+ case SRB_CTRL_VP:
sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
}
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!vha->flags.online)
goto done;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- goto done;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
+ fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -272,16 +284,126 @@ done:
return rval;
}
+void
+qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
+}
+
+static void
+qla2x00_async_prlo_sp_done(void *s, int res)
+{
+ srb_t *sp = (srb_t *)s;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp);
+}
+
+int
+qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_PRLO_CMD;
+ sp->name = "prlo";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prlo_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+static
+void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (ea->data[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "%s %8phC: adisc fail: post delete\n",
+ __func__, ea->fcport->port_name);
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ return;
+ }
+
+ if (ea->fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != ea->fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, ea->fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ return;
+ }
+
+ __qla24xx_handle_gpdb_event(vha, ea);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x %8phC\n",
+ sp->name, res, sp->fcport->port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_ADISC_DONE;
+ ea.rc = res;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.fcport = sp->fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
sp->free(sp);
}
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
return rval;
done_free_sp:
sp->free(sp);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0, current_login_state;
+ u16 data[2];
+ u8 current_login_state;
fcport = ea->fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc,
+ fcport->login_gen, fcport->last_login_gen,
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
- "%s %8phC login gen changed login %d|%d\n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ "%s %8phC login gen changed\n",
+ __func__, fcport->port_name);
return;
}
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport, 1);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e4,
"%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
- opt = PDO_FORCE_ADISC;
- qla24xx_post_gpdb_work(vha, fcport, opt);
+
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
case DSC_LS_PORT_UNAVAIL:
default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (!found) {
/* fw has no record of this port */
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- } else {
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
-
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport, 1);
- }
-
- if (fcport->loop_id == loop_id) {
- /* FW already picked this loop id for another fcport */
- qla2x00_find_new_loop_id(vha, fcport);
- }
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
}
+
+ /* FW already picked this loop id for another fcport */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
struct get_name_list_extended *e;
u64 wwn;
struct list_head h;
+ bool found = false;
ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ /* create new fcport if fw has knowledge of new sessions */
+ for (i = 0; i < n; i++) {
+ port_id_t id;
+ u64 wwnn;
+
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ found = false;
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!memcmp((u8 *)&wwn, fcport->port_name,
+ WWN_SIZE)) {
+ found = true;
+ break;
+ }
+ }
+
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC %06x post new sess\n",
+ __func__, __LINE__, (u8 *)&wwn, id.b24);
+ wwnn = wwn_to_u64(e->node_name);
+ qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
+ (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ }
+ }
+
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
unsigned long flags;
u16 *mb;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
if (vha->gnl.sent) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
+ return QLA_SUCCESS;
}
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
- int rval = QLA_SUCCESS;
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- if (res) {
- rval = res;
- goto gpd_error_out;
- }
-
- pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
-
- rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-gpd_error_out:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
- ea.rc = rval;
ea.fcport = fcport;
ea.sp = sp;
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
e->u.fcport.fcport = fcport;
e->u.fcport.opt = opt;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GPDB;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
static
-void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- int rval = ea->rc;
- fc_port_t *fcport = ea->fcport;
unsigned long flags;
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
- fcport->disc_state, fcport->fw_login_state, rval);
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0x20d3,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen,
- fcport->login_gen);
- return;
- } else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- return;
- }
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- return;
- }
-
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
!vha->hw->flags.gpsc_supported) {
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
+ __func__, __LINE__, ea->fcport->port_name,
vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, fcport);
+ if (ea->fcport->id_changed) {
+ ea->fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, ea->fcport);
+ }
}
+ } else if (ea->fcport->login_succ) {
+ /*
+ * We have an existing session. A late RSCN delivery
+ * must have triggered the session to be re-validate.
+ * Session is still valid.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC session revalidate success\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+ struct port_database_24xx *pd;
+ struct srb *sp = ea->sp;
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, ea->rc);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ switch (pd->current_login_state) {
+ case PDS_PRLI_COMPLETE:
+ __qla24xx_parse_gpdb(vha, fcport, pd);
+ break;
+ case PDS_PLOGI_PENDING:
+ case PDS_PLOGI_COMPLETE:
+ case PDS_PRLI_PENDING:
+ case PDS_PRLI2_PENDING:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
+ __func__, __LINE__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ case PDS_LOGO_PENDING:
+ case PDS_PORT_UNAVAILABLE:
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ __qla24xx_handle_gpdb_event(vha, ea);
} /* gpdb event */
-int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- if (fcport->login_retry == 0)
- return 0;
+ u8 login = 0;
+ int rc;
- if (fcport->scan_state != QLA_FCPORT_FOUND)
- return 0;
+ if (qla_tgt_mode_enabled(vha))
+ return;
+
+ if (qla_dual_mode_enabled(vha)) {
+ if (N2N_TOPO(vha->hw)) {
+ u64 mywwn, wwn;
+
+ mywwn = wwn_to_u64(vha->port_name);
+ wwn = wwn_to_u64(fcport->port_name);
+ if (mywwn > wwn)
+ login = 1;
+ else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ && time_after_eq(jiffies,
+ fcport->plogi_nack_done_deadline))
+ login = 1;
+ } else {
+ login = 1;
+ }
+ } else {
+ /* initiator mode */
+ login = 1;
+ }
+
+ if (login) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ rc = qla2x00_find_new_loop_id(vha, fcport);
+ if (rc) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess - out of loopid\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+}
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ u16 data[2];
+ u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
- fcport->loop_id);
+ fcport->login_gen, fcport->login_retry,
+ fcport->loop_id, fcport->scan_state);
- fcport->login_retry--;
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
+ }
}
/* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
+ fcport->login_retry--;
+
switch (fcport->disc_state) {
case DSC_DELETED:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ wwn = wwn_to_u64(fcport->node_name);
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20bf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla_chk_n2n_b4_login(vha, fcport);
}
break;
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
}
- if (fcport->flags & FCF_FCP2_DEVICE) {
- u8 opt = PDO_FORCE_ADISC;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c9,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
-
- fcport->disc_state = DSC_GPDB;
- qla24xx_post_gpdb_work(vha, fcport, opt);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
- }
-
+ qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gidpn_work(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ qla_chk_n2n_b4_login(vha, fcport);
+ else
+ qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post gpdb\n",
+ "%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
switch (fcport->disc_state) {
case DSC_DELETED:
case DSC_LOGIN_COMPLETE:
- qla24xx_post_gidpn_work(fcport->vha, fcport);
+ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
break;
-
default:
break;
}
}
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
- u8 *port_name, void *pla)
+ u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
e->u.new_sess.id = *id;
e->u.new_sess.pla = pla;
+ e->u.new_sess.fc4_type = fc4_type;
memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+ if (node_name)
+ memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
return qla2x00_post_work(vha, e);
}
static
-int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- switch (vha->host->active_mode) {
- case MODE_INITIATOR:
- case MODE_DUAL:
- if (fcport->scan_state == QLA_FCPORT_FOUND)
- qla24xx_fcport_handle_login(vha, fcport);
- break;
-
- case MODE_TARGET:
- default:
- /* no-op */
- break;
- }
-
- return 0;
-}
-
-static
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->login_retry++;
- return;
- }
-
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
}
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gidpn(vha, fcport);
+ qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport, *f, *tf;
+ fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- int rc;
+ unsigned long flags;
switch (ea->event) {
- case FCME_RELOGIN:
case FCME_RSCN:
case FCME_GIDPN_DONE:
case FCME_GPSC_DONE:
case FCME_GPNID_DONE:
+ case FCME_GNNID_DONE:
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
- }
- } else {
- ea->fcport = fcport;
- qla24xx_handle_rscn_event(fcport, ea);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
}
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_handle_gnl_done_event(vha, ea);
break;
case FCME_GPSC_DONE:
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla24xx_handle_gpsc_event(vha, ea);
break;
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFFID_DONE:
qla24xx_handle_gffid_event(vha, ea);
break;
- case FCME_DELETE_DONE:
- qla24xx_handle_delete_done_event(vha, ea);
+ case FCME_ADISC_DONE:
+ qla24xx_handle_adisc_event(vha, ea);
+ break;
+ case FCME_GNNID_DONE:
+ qla24xx_handle_gnnid_event(vha, ea);
+ break;
+ case FCME_GFPNID_DONE:
+ qla24xx_handle_gfpnid_event(vha, ea);
break;
default:
BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
sp->free(sp);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
sp->name = "abort";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+
+ if (vha->flags.qpairs_available && cmd_sp->qpair)
+ abt_iocb->u.abt.req_que_no =
+ cpu_to_le16(cmd_sp->qpair->req->id);
+ else
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+
sp->done = qla24xx_abort_sp_done;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
+ u16 lid;
+ struct fc_port *conflict_fcport;
+ unsigned long flags;
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC Remote is trying to login\n",
+ __func__, __LINE__, fcport->port_name);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
+ ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
}
break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
ea->fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ lid = ea->iop[1] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(ea->fcport->port_name),
+ ea->fcport->d_id, lid, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another fcport share the same loop_id/nport id.
+ * Conflict fcport needs to finish cleanup before this
+ * fcport can proceed to login.
+ */
+ conflict_fcport->conflict = ea->fcport;
+ ea->fcport->login_pause = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ ea->fcport->loop_id = lid;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
{
int rval;
- uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
- if (ha->fw_dump) {
+ if (ha->eft) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "Firmware dump already allocated.\n");
+ "%s: Offload Mem is already allocated.\n",
+ __func__);
return;
}
- ha->fw_dumped = 0;
- ha->fw_dump_cap_flags = 0;
- dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
- req_q_size = rsp_q_size = 0;
-
- if (IS_QLA27XX(ha))
- goto try_fce;
-
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- fixed_size = sizeof(struct qla2100_fw_dump);
- } else if (IS_QLA23XX(ha)) {
- fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
- mem_size = (ha->fw_memory_size - 0x11000 + 1) *
- sizeof(uint16_t);
- } else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
- fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
- else if (IS_QLA81XX(ha))
- fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
- else if (IS_QLA25XX(ha))
- fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
- else
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
-
- mem_size = (ha->fw_memory_size - 0x100000 + 1) *
- sizeof(uint32_t);
- if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
- mq_size = sizeof(struct qla2xxx_mq_chain);
- /*
- * Allocate maximum buffer size for all queues.
- * Resizing must be done at end-of-dump processing.
- */
- mq_size += ha->max_req_queues *
- (req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
- (rsp->length * sizeof(response_t));
- }
- if (ha->tgt.atio_ring)
- mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha))
goto try_eft;
-try_fce:
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
EFT_SIZE / 1024);
- goto cont_alloc;
+ goto eft_err;
}
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
"Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
- goto cont_alloc;
+ goto eft_err;
}
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
ha->eft = tc;
}
-cont_alloc:
+eft_err:
+ return;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+try_eft:
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
if (IS_QLA27XX(ha)) {
if (!ha->fw_dump_template) {
ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
ha->exlogin_size;
allocate:
- ha->fw_dump = vmalloc(dump_size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x00c4,
- "Unable to allocate (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (ha->fce) {
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
- ha->fce = NULL;
- ha->fce_dma = 0;
- }
-
- if (ha->eft) {
- dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
- ha->eft_dma);
- ha->eft = NULL;
- ha->eft_dma = 0;
+ if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ fw_dump = vmalloc(dump_size);
+ if (!fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
- return;
}
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
-
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && ql2xallocfwdump
- && !(IS_P3P_TYPE(ha)))
+ if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_offload_mem(vha);
+
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+
} else {
goto failed;
}
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
+
+ /* Reserve 1/2 of emergency exchanges for ELS.*/
+ if (qla2xuseresexchforels)
+ ha->fw_options[2] |= BIT_8;
+ else
+ ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
+ unsigned long flags;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.area = area;
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_host_map(vha, id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
+ if (ha->flags.rida_fmt2) {
+ /* With Rida Format 2, the login is already triggered.
+ * We know who is on the other side of the wire.
+ * No need to login to do login to find out or drop into
+ * qla2x00_configure_local_loop().
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ }
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /*
- * Mark local devices that were present with FCF_DEVICE_LOST for now.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) == FCS_ONLINE &&
- fcport->port_type != FCT_BROADCAST &&
- (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-
- ql_dbg(ql_dbg_disc, vha, 0x2096,
- "Marking port lost loop_id=0x%04x.\n",
- fcport->loop_id);
-
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- }
- }
-
/* Inititae N2N login. */
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->d_id.b24 = new_fcport->d_id.b24;
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
-
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
-
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
break;
}
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(vha, fcport);
-
found_devs++;
}
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
cleanup_allocation:
kfree(new_fcport);
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
/* Mark the time right before querying FW for connected ports.
* This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha);
+ if (USE_ASYNC_SCAN(ha)) {
+ rval = QLA_SUCCESS;
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ if (rval)
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ rval = qla2x00_find_all_fabric_devs(vha);
+ }
if (rval != QLA_SUCCESS)
break;
} while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
-
- qlt_schedule_sess_for_deletion_lock
- (fcport);
+ qlt_schedule_sess_for_deletion(fcport);
continue;
}
}
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ SAVE_TOPO(ha);
+ ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
- goto fail;
-
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
+
ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- /* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_dbg(ql_dbg_io, vha, 0x3070,
- "mbx abort_command failed.\n");
- } else {
- ql_dbg(ql_dbg_io, vha, 0x3071,
- "mbx abort_command success.\n");
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
complete(&lio->u.els_logo.comp);
}
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ "%s ELS hdl=%x, portid=%06x done %8phC\n",
sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle =
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(req->id);
+ abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
wmb();
}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
return rval;
}
+static void
+qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
+{
+ int map, pos;
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->handle = sp->handle;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
+ vce->vp_count = cpu_to_le16(1);
+
+ /*
+ * index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
+ pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
+ vce->vp_idx_map[map] |= 1 << pos;
+}
+
+static void
+qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_NACK_LOGO:
qla2x00_send_notify_ack_iocb(sp, pkt);
break;
+ case SRB_CTRL_VP:
+ qla25xx_ctrlvp_iocb(sp, pkt);
+ break;
+ case SRB_PRLO_CMD:
+ qla24xx_prlo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ SAVE_TOPO(ha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.gpsc_supported = 1;
vha->flags.management_server_logged_in = 0;
break;
@@ -1009,7 +1009,7 @@ skip_rio:
if (qla_ini_mode_enabled(vha)) {
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -1059,8 +1059,7 @@ global_port_update:
* Mark all devices as missing so we will login again.
*/
atomic_set(&vha->loop_state, LOOP_UP);
-
- qla2x00_mark_all_devices_lost(vha, 1);
+ vha->scan.scan_retry = 0;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
+ /* fall through */
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
*/
- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
sp->name);
sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* drop through */
+ /* fall through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, ret);
}
+static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
+ struct vp_ctrl_entry_24xx *vce)
+{
+ const char func[] = "CTRLVP-IOCB";
+ srb_t *sp;
+ int rval = QLA_SUCCESS;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
+ if (!sp)
+ return;
+
+ if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c4,
+ "%s: Failed to complete IOCB -- error status (%x)\n",
+ sp->name, vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c5,
+ "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
+ sp->name, le16_to_cpu(vce->comp_status),
+ le16_to_cpu(vce->vp_idx_failed));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_vport, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ sp->rc = rval;
+ sp->done(sp, rval);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
- no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
port_state_str[atomic_read(&fcport->state)],
comp_status);
- if (no_logout)
- fcport->logout_on_delete = 0;
-
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -2972,9 +2998,9 @@ process_err:
(response_t *)pkt);
break;
} else {
- /* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
+ /* fall through */
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
qla24xx_mbx_iocb_entry(vha, rsp->req,
(struct mbx_24xx_entry *)pkt);
break;
+ case VP_CTRL_IOCB_TYPE:
+ qla_ctrlvp_completed(vha, rsp->req,
+ (struct vp_ctrl_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
{MBC_GET_PORT_DATABASE, "GPDB"},
{MBC_GET_ID_LIST, "GIDList"},
{MBC_GET_LINK_PRIV_STATS, "Stats"},
+ {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
};
static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
unsigned long flags;
int found;
port_id_t id;
+ struct fc_port *fcport;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
-
+ ha->current_topology = ISP_CFG_NL;
qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ ha->flags.gpsc_supported = 1;
+ ha->current_topology = ISP_CFG_F;
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name);
/* N2N. direct connect */
+ ha->current_topology = ISP_CFG_N;
+ ha->flags.rida_fmt2 = 1;
vha->d_id.b.domain = rptid_entry->port_id[2];
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f2.port_name, 1);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ } else {
+ id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
+ id.b.area = rptid_entry->u.f2.remote_nport_id[1];
+ id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f2.port_name,
+ rptid_entry->u.f2.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
}
/*
- * qla24xx_control_vp
- * Enable a virtual port for given host
- *
- * Input:
- * ha = adapter block pointer.
- * vhba = virtual adapter (unused)
- * index = index number for enabled VP
- *
- * Returns:
- * qla2xxx local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
-{
- int rval;
- int map, pos;
- struct vp_ctrl_entry_24xx *vce;
- dma_addr_t vce_dma;
- struct qla_hw_data *ha = vha->hw;
- int vp_index = vha->vp_idx;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
- "Entered %s enabling index %d.\n", __func__, vp_index);
-
- if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
- return QLA_PARAMETER_ERROR;
-
- vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
- if (!vce) {
- ql_log(ql_log_warn, vha, 0x10c2,
- "Failed to allocate VP control IOCB.\n");
- return QLA_MEMORY_ALLOC_FAILED;
- }
-
- vce->entry_type = VP_CTRL_IOCB_TYPE;
- vce->entry_count = 1;
- vce->command = cpu_to_le16(cmd);
- vce->vp_count = cpu_to_le16(1);
-
- /* index map in firmware starts with 1; decrement index
- * this is ok as we never use index 0
- */
- map = (vp_index - 1) / 8;
- pos = (vp_index - 1) & 7;
- mutex_lock(&ha->vport_lock);
- vce->vp_idx_map[map] |= 1 << pos;
- mutex_unlock(&ha->vport_lock);
-
- rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c3,
- "Failed to issue VP control IOCB (%x).\n", rval);
- } else if (vce->entry_status != 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c4,
- "Failed to complete IOCB -- error status (%x).\n",
- vce->entry_status);
- rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complete IOCB -- completion status (%x).\n",
- le16_to_cpu(vce->comp_status));
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
- "Done %s.\n", __func__);
- }
-
- dma_pool_free(ha->s_dma_pool, vce, vce_dma);
-
- return rval;
-}
-
-/*
* qla2x00_send_change_request
* Receive or disable RSCN request from fabric controller
*
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
}
/* Check for logged in state. */
- if (current_login_state != PDS_PRLI_COMPLETE &&
- last_login_state != PDS_PRLI_COMPLETE) {
+ if (current_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0x119a,
"Unable to verify login-state (%x/%x) for loop_id %x.\n",
current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
return rval;
}
+
+int qla24xx_res_count_wait(struct scsi_qla_host *vha,
+ uint16_t *out_mb, int out_mb_sz)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
+ memcpy(out_mb, mc.mb, out_mb_sz);
+ else
+ memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_vp_map(vha, SET_VP_IDX);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
- case MBA_PORT_UPDATE:
- case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ if ((mb[3] & 0xff) == vha->vp_idx) {
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ }
+ break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
- qla2x00_do_work(vha);
-
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
"FCPort update end.\n");
}
- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, vha, 0x4018,
- "Relogin needed scheduled.\n");
- qla2x00_relogin(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4019,
- "Relogin needed end.\n");
+ if (!vha->relogin_jif ||
+ time_after_eq(jiffies, vha->relogin_jif)) {
+ vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla24xx_post_relogin_work(vha);
+ }
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+ vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
vha->dpc_flags = 0L;
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (req) {
+ if (req && vha->flags.qpairs_req_created) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_req_que(vha, req);
+ }
return ret;
}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (rsp) {
+ if (rsp && vha->flags.qpairs_rsp_created) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_rsp_que(vha, rsp);
+ }
return ret;
}
@@ -884,3 +898,79 @@ que_failed:
failed:
return 0;
}
+
+static void qla_ctrlvp_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ complete(&sp->comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/**
+ * qla24xx_control_vp() - Enable a virtual port for given host
+ * @vha: adapter block pointer
+ * @cmd: command type to be sent for enable virtual port
+ *
+ * Return: qla2xxx local function return status code.
+ */
+int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+
+ ql_dbg(ql_dbg_vport, vha, 0x10c1,
+ "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CTRL_VP;
+ sp->name = "ctrl_vp";
+ sp->done = qla_ctrlvp_sp_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
+ sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&sp->comp);
+ rval = sp->rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ goto done_free_sp;
+ default:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+done:
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
#include "qla_def.h"
#include "qla_gbl.h"
-#include <linux/delay.h>
-
#define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
+int qla2xuseresexchforels;
+module_param(qla2xuseresexchforels, int, 0444);
+MODULE_PARM_DESC(qla2xuseresexchforels,
+ "Reserve 1/2 of emergency exchanges for ELS.\n"
+ " 0 (default): disabled");
+
/*
* SCSI host template entry points
*/
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
-void
-qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+static void
+__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int que, cnt, status;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
+ scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
- if (!req->outstanding_cmds)
- continue;
- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (sp) {
- req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- qla_nvme_abort(ha, sp);
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
- */
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
- }
- sp->done(sp, res);
- } else {
- if (!vha->hw->tgt.tgt_ops || !tgt ||
- qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
- continue;
- }
- cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha,
- cmd);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ req = qp->req;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ if (sp->cmd_type == TYPE_SRB) {
+ if (sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_NVME_LS) {
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (!test_bit(ABORT_ISP_ACTIVE,
+ &vha->dpc_flags)) &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
}
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
}
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla2x00_abort_all_cmds(ha->base_qpair, res);
+
+ for (que = 0; que < ha->max_qpairs; que++) {
+ if (!ha->queue_pair_map[que])
+ continue;
+
+ __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
+ }
}
static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
- int cnt = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int i = 20;
+ unsigned long flags;
- while (!list_empty(&vha->work_list)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
+ while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
- cnt++;
- if (cnt > 10)
- break;
+ i--;
}
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
}
/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->max_exchg = FW_MAX_EXCHANGES_CNT;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
ret = -ENOMEM;
- qla2x00_mem_free(ha);
- qla2x00_free_req_que(ha, req);
- qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_init_failed;
+ goto probe_hw_failed;
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (QLA_TGT_MODE_ENABLED()) {
mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
qla2x00_clear_drv_active(ha);
iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ }
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ vfree(base_vha->scan.l);
+
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
- /* Laser should be disabled only for ISP2031 */
- if (IS_QLA2031(ha))
- qla83xx_disable_laser(base_vha);
-
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ if (ha->flags.fw_started) {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ ha->flags.fw_started = 0;
+ }
+ }
vha->flags.online = 0;
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
u32 temp;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ if (max_cnt > vha->hw->max_exchg)
+ max_cnt = vha->hw->max_exchg;
+
if (qla_ini_mode_enabled(vha)) {
if (ql2xiniexchg > max_cnt)
ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- u16 size, max_cnt;
- u32 temp;
+ u16 size, max_cnt;
+ u32 actual_cnt, totsz;
struct qla_hw_data *ha = vha->hw;
if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- qla2x00_number_of_exch(vha, &temp, max_cnt);
- temp *= size;
+ qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
+ ql_log(ql_log_info, vha, 0xd014,
+ "Actual exchange offload count: %d.\n", actual_cnt);
+
+ totsz = actual_cnt * size;
- if (temp != ha->exchoffld_size) {
+ if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
- ha->exchoffld_size = temp;
+ ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
+ "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
+ max_cnt, actual_cnt, size, totsz);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
+ "Failed to allocate memory for Exchange Offload.\n");
+
+ if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
+ ha->max_exchg -= REDUCE_EXCHANGES_CNT;
+ } else if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + 512)) {
+ ha->max_exchg -= 512;
+ } else {
+ ha->flags.exchoffld_enabled = 0;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Disabling Exchange offload due to lack of memory\n");
+ }
+ ha->exchoffld_size = 0;
+
return -ENOMEM;
}
}
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
INIT_LIST_HEAD(&vha->nvme_rport_list);
+ INIT_LIST_HEAD(&vha->gpnid_list);
+ INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
return NULL;
}
+ /* todo: what about ext login? */
+ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
+ vha->scan.l = vmalloc(vha->scan.size);
+ if (!vha->scan.l) {
+ ql_log(ql_log_fatal, vha, 0xd04a,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
+ bool q = false;
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+
spin_unlock_irqrestore(&vha->work_lock, flags);
- if (QLA_EARLY_LINKUP(vha->hw))
- schedule_work(&vha->iocb_work);
- else
- qla2xxx_wake_dpc(vha);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
return QLA_SUCCESS;
}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
+ fcport->flags |= FCF_ASYNC_ACTIVE; \
return qla2x00_post_work(vha, e); \
}
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
+qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ u64 wwn;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
if (pla) {
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ memcpy(fcport->node_name,
+ pla->iocb.u.isp24.u.plogi.node_name,
+ WWN_SIZE);
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
/* we took an extra ref_count to prevent PLOGI ACK when
* fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (fcport) {
fcport->d_id = e->u.new_sess.id;
- fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
+ fcport->fc4_type = FC4_TYPE_FCP_SCSI;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- /* search again to make sure one else got ahead */
+ /* search again to make sure no one else got ahead */
tfcp = qla2x00_find_fcport_by_wwpn(vha,
e->u.new_sess.port_name, 1);
if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
} else {
list_add_tail(&fcport->list, &vha->vp_fcports);
- if (pla) {
- qlt_plogi_ack_link(vha, pla, fcport,
- QLT_PLOGI_LINK_SAME_WWN);
- pla->ref_count--;
- }
+ }
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (pla)
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
- else
- qla24xx_async_gffid(vha, fcport);
+ } else {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_nportid(vha,
+ &e->u.new_sess.id, 1);
+ if (tfcp && (tfcp != fcport)) {
+ /*
+ * We have a conflict fcport with same NportID.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC found conflict b4 add. DS %d LS %d\n",
+ __func__, tfcp->port_name, tfcp->disc_state,
+ tfcp->fw_login_state);
+
+ switch (tfcp->disc_state) {
+ case DSC_DELETED:
+ break;
+ case DSC_DELETE_PEND:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ break;
+ default:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ qlt_schedule_sess_for_deletion(tfcp);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ wwn = wwn_to_u64(fcport->node_name);
+
+ if (!wwn)
+ qla24xx_async_gnnid(vha, fcport);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
}
if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
}
+static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ struct srb *sp = e->u.iosb.sp;
+ int rval;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "%s: %s: Re-issue IOCB failed (%d).\n",
+ __func__, sp->name, rval);
+ qla24xx_sp_unmap(vha, sp);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
- case QLA_EVT_GPNID_DONE:
- qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ case QLA_EVT_UNMAP:
+ qla24xx_sp_unmap(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_RELOGIN:
+ qla2x00_relogin(vha);
break;
case QLA_EVT_NEW_SESS:
qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_NACK:
qla24xx_do_nack_work(vha, e);
break;
+ case QLA_EVT_ASYNC_PRLO:
+ qla2x00_async_prlo(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_PRLO_DONE:
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_GPNFT:
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ break;
+ case QLA_EVT_GPNFT_DONE:
+ qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNFT_DONE:
+ qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNID:
+ qla24xx_async_gnnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SP_RETRY:
+ qla_sp_retry(vha, e);
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
}
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
+
+ if (!e) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return qla2x00_post_work(vha, e);
+}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
- fcport->login_retry--;
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ fcport->login_retry &&
+ !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
+ fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0x400e,
+ "Relogin end.\n");
}
/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
- qla2x00_do_work(base_vha);
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED,
- &base_vha->dpc_flags)) &&
+ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
- "Relogin scheduled.\n");
- qla2x00_relogin(base_vha);
- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
- "Relogin end.\n");
+ if (!base_vha->relogin_jif ||
+ time_after_eq(jiffies, base_vha->relogin_jif)) {
+ base_vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla24xx_post_relogin_work(base_vha);
+ }
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
}
/* Process any deferred work. */
- if (!list_empty(&vha->work_list))
- start_dpc++;
+ if (!list_empty(&vha->work_list)) {
+ unsigned long flags;
+ bool q = false;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
+ }
/*
* FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
-static void
-qla83xx_disable_laser(scsi_qla_host_t *vha)
-{
- uint32_t reg, data, fn;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
-
- /* pci func #/port # */
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "Disabling Laser for hba: %p\n", vha);
-
- fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
- (BIT_15|BIT_14|BIT_13|BIT_12));
-
- fn = (fn >> 12);
-
- if (fn & 1)
- reg = PORT_1_2031;
- else
- reg = PORT_0_2031;
-
- data = LASER_OFF_2031;
-
- qla83xx_wr_reg(vha, reg, data);
-}
-
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
+ int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ if (USER_CTRL_IRQ(vha->hw))
+ rc = blk_mq_map_queues(&shost->tag_set);
+ else
+ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ return rc;
}
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
sec_mask = 0x1e000;
break;
}
+ /* fall through */
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
-static int temp_sam_status = SAM_STAT_BUSY;
+static int qla_sam_status = SAM_STAT_BUSY;
+static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
/*
* From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0x502f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0x503a,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
&u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0x503d,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
+ /* fall through */
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
__func__, __LINE__,
sp->fcport->port_name,
vha->fcport_count);
-
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
-
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
fcport->d_id = port_id;
- qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ else
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
pla->ref_count, pla, link);
+ if (link == QLT_PLOGI_LINK_CONFLICT) {
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ pla->ref_count--;
+ return;
+ default:
+ break;
+ }
+ }
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- struct event_arg ea;
scsi_qla_host_t *base_vha;
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ sess->send_els_logo = 0;
qlt_send_first_logo(vha, &logo);
}
- if (sess->logout_on_delete) {
+ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
int rc;
- rc = qla2x00_post_async_logout_work(vha, sess, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (!own ||
+ (own &&
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ rc = qla2x00_post_async_logout_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ } else if (own && (own->iocb.u.isp24.status_subcode ==
+ ELS_PRLI) && ha->flags.rida_fmt2) {
+ rc = qla2x00_post_async_prlo_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule PRLO failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
}
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
- "%s: sess %p logout completed\n",__func__, sess);
+ "%s: sess %p logout completed\n", __func__, sess);
}
if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != ha->base_qpair->chip_reset)
- qla2x00_clear_loop_id(sess);
+ qla2x00_clear_loop_id(sess);
if (sess->conflict) {
sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
}
{
- struct qlt_plogi_ack_t *own =
- sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
}
}
+
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&vha->fcport_waitQ);
base_vha = pci_get_drvdata(ha->pdev);
+
+ sess->free_pending = 0;
+
if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
return;
- if (!tgt || !tgt->tgt_stop) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_DELETE_DONE;
- ea.fcport = sess;
- qla2x00_fcport_event_handler(vha, &ea);
+ if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
}
}
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->free_pending) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->free_pending = 1;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_schedule_sess_for_deletion(struct fc_port *sess,
- bool immediate)
+void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
return;
}
- sess->disc_state = DSC_DELETE_PEND;
-
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- schedule_work(&sess->del_work);
-}
-
-void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
-{
- unsigned long flags;
- struct qla_hw_data *ha = sess->vha->hw;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess, 1);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ /* use cancel to push work element through before re-queue */
+ cancel_work_sync(&sess->del_work);
+ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
+ queue_work(sess->vha->hw->wq, &sess->del_work);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
list_for_each_entry(sess, &vha->vp_fcports, list) {
if (sess->se_sess)
- qlt_schedule_sess_for_deletion(sess, 1);
+ qlt_schedule_sess_for_deletion(sess);
}
/* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess, false);
+ qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
btree_destroy64(&tgt->lun_qpair_map);
- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
- ha->tgt.tgt_ops->remove_target(vha);
+ if (vha->vp_idx)
+ if (ha->tgt.tgt_ops &&
+ ha->tgt.tgt_ops->remove_target &&
+ vha->vha_tgt.target_lport_ptr)
+ ha->tgt.tgt_ops->remove_target(vha);
vha->vha_tgt.qla_tgt = NULL;
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
- QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
-
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (rc != 0) {
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_REJECTED, false);
- }
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
- qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
} else {
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
term = 1;
if (term)
- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
- qlt_schedule_sess_for_deletion_lock(cmd->sess);
+ qlt_schedule_sess_for_deletion(cmd->sess);
}
break;
}
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
return cmd;
}
-static void qlt_create_sess_from_atio(struct work_struct *work)
-{
- struct qla_tgt_sess_op *op = container_of(work,
- struct qla_tgt_sess_op, work);
- scsi_qla_host_t *vha = op->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_del(&op->cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
- if (op->aborted) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
- "sess_op with tag %u is aborted\n",
- op->atio.u.isp24.exchange_addr);
- goto out_term;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (op->atio.u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
- goto out_term;
- }
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
-
- if (!sess)
- goto out_term;
- /*
- * Now obtain a pre-allocated session tag using the original op->atio
- * packet header, and dispatch into __qlt_do_work() using the existing
- * process context.
- */
- cmd = qlt_get_tag(vha, sess, &op->atio);
- if (!cmd) {
- struct qla_qpair *qpair = ha->base_qpair;
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- kfree(op);
- return;
- }
-
- /*
- * __qlt_do_work() will call qlt_put_sess() to release
- * the extra reference taken above by qlt_make_local_sess()
- */
- __qlt_do_work(cmd);
- kfree(op);
- return;
-out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
- kfree(op);
-}
-
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
+ port_id_t id;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
- return -EFAULT;
+ return -ENODEV;
}
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
- if (unlikely(!sess)) {
- struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
- GFP_ATOMIC);
- if (!op)
- return -ENOMEM;
-
- memcpy(&op->atio, atio, sizeof(*atio));
- op->vha = vha;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ if (IS_SW_RESV_ADDR(id))
+ return -EBUSY;
- INIT_WORK(&op->work, qlt_create_sess_from_atio);
- queue_work(qla_tgt_wq, &op->work);
- return 0;
- }
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess))
+ return -EFAULT;
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return -ENOMEM;
+ return -EBUSY;
}
cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt;
struct fc_port *sess;
u64 unpacked_lun;
int fn;
unsigned long flags;
- tgt = vha->vha_tgt.qla_tgt;
-
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
unpacked_lun =
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
- "qla_target(%d): task mgmt fn 0x%x for "
- "non-existant session\n", vha->vp_idx, fn);
- return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
- sizeof(struct atio_from_isp));
- }
-
- if (sess->deleted)
+ if (sess == NULL || sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* might have cleared it when requested this session
* deletion, so don't touch it
*/
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
} else {
/*
* Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
-
other_sess->keep_nport_handle = 1;
- *conflict_sess = other_sess;
- qlt_schedule_sess_for_deletion(other_sess,
- true);
+ if (other_sess->disc_state != DSC_DELETED)
+ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess);
}
continue;
}
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* Same loop_id but different s_id
* Ok to kill and logout */
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
}
}
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
return count;
}
+static int qlt_handle_login(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id, wd3_lo;
+ int res = 0;
+ struct qlt_plogi_ack_t *pla;
+ unsigned long flags;
+
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ /* Mark all stale commands sitting in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
+ goto out;
+ }
+
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+ }
+
+ if (!sess) {
+ pla->ref_count++;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name,
+ iocb->u.isp24.u.plogi.node_name,
+ pla, FC4_TYPE_UNKNOWN);
+ else
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, NULL,
+ pla, FC4_TYPE_UNKNOWN);
+
+ goto out;
+ }
+
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ if (iocb->u.isp24.status_subcode == ELS_PRLI) {
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
+
+ } else
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__, sess->port_name, sess->disc_state);
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion(sess);
+ break;
+ }
+out:
+ return res;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
*/
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
+ res = qlt_handle_login(vha, iocb);
+ break;
- /* Mark all stale commands in qla_tgt_wq for deletion */
- abort_cmds_for_s_id(vha, &port_id);
-
- if (wwn) {
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(vha, wwn,
- port_id, loop_id, &conflict_sess);
- spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
- }
-
- if (IS_SW_RESV_ADDR(port_id)) {
- res = 1;
- break;
- }
-
- pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
- if (!pla) {
- qlt_send_term_imm_notif(vha, iocb, 1);
- break;
- }
-
- res = 0;
+ case ELS_PRLI:
+ if (N2N_TOPO(ha)) {
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
- if (conflict_sess) {
- conflict_sess->login_gen++;
- qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
- }
+ if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
- if (!sess) {
- pla->ref_count++;
- qla24xx_post_newsess_work(vha, &port_id,
- iocb->u.isp24.port_name, pla);
- res = 0;
+ res = qlt_handle_login(vha, iocb);
break;
}
- qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- sess->fw_login_state = DSC_LS_PLOGI_PEND;
- sess->d_id = port_id;
- sess->login_gen++;
-
- switch (sess->disc_state) {
- case DSC_DELETED:
- qlt_plogi_ack_unref(vha, pla);
- break;
-
- default:
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->d_id.b24 == port_id.b24));
-
- ql_dbg(ql_dbg_disc, vha, 0x20f9,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
-
-
- qlt_schedule_sess_for_deletion_lock(sess);
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
break;
}
- break;
-
- case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (conflict_sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
- "PRLI with conflicting sess %p port %8phC\n",
- conflict_sess, conflict_sess->port_name);
- qlt_send_term_imm_notif(vha, iocb, 1);
- res = 0;
- break;
+ switch (conflict_sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ conflict_sess->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
}
if (sess != NULL) {
- if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
- sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+ bool delete = false;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+ switch (sess->fw_login_state) {
+ case DSC_LS_PLOGI_PEND:
+ case DSC_LS_PLOGI_COMP:
+ case DSC_LS_PRLI_COMP:
+ break;
+ default:
+ delete = true;
+ break;
+ }
+
+ switch (sess->disc_state) {
+ case DSC_LOGIN_PEND:
+ case DSC_GPDB:
+ case DSC_GPSC:
+ case DSC_UPD_FCPORT:
+ case DSC_LOGIN_COMPLETE:
+ case DSC_ADISC:
+ delete = false;
+ break;
+ default:
+ break;
+ }
+
+ if (delete) {
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
+ flags);
break;
}
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->port_type = FCT_INITIATOR;
else
sess->port_type = FCT_TARGET;
+
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
res = 1; /* send notify ack */
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* drop through */
+ /* fall through */
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
} else {
/* cmd did not go to upper layer. */
if (sess) {
- qlt_schedule_sess_for_deletion_lock(sess);
+ qlt_schedule_sess_for_deletion(sess);
res = 0;
}
/* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
+ vha->vp_idx, iocb->u.isp24.status_subcode, res);
+
return res;
}
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
- uint16_t status;
unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- status = temp_sam_status;
- qlt_send_busy(qpair, atio, status);
+ qlt_send_busy(qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
- unsigned long flags;
+ unsigned long flags = 0;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_task_mgmt(vha, atio);
}
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
-
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_BUSY);
-#else
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
atio, 1, 0);
-#endif
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe059,
- "qla_target: Unable to send "
- "command to target for req, "
- "ignoring.\n");
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe05a,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status.\n", vha->vp_idx);
- if (!ha_locked)
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair,
- atio, SAM_STAT_BUSY);
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ qla_sam_status);
+ break;
}
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
}
break;
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(rsp->qpair, atio, 0);
-#else
- qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
-#endif
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe05f,
- "qla_target: Unable to send "
- "command to target, sending TERM "
- "EXCHANGE for rsp\n");
- qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe060,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status\n", vha->vp_idx);
- qlt_send_busy(rsp->qpair, atio, 0);
- }
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
+ qlt_send_term_exchange(rsp->qpair, NULL,
+ atio, 1, 0);
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ qla_sam_status);
+ break;
}
}
}
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
unsigned long flags;
u8 newfcport = 0;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
tfcp->port_type = fcport->port_type;
tfcp->supported_classes = fcport->supported_classes;
tfcp->flags |= fcport->flags;
+ tfcp->scan_state = QLA_FCPORT_FOUND;
del = fcport;
fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
qlt_add_target(ha, vha);
}
-void
-qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+u8
+qlt_rff_id(struct scsi_qla_host *vha)
{
+ u8 fc4_feature = 0;
/*
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ fc4_feature = BIT_1;
} else if (qla_dual_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ fc4_feature = BIT_0 | BIT_1;
+
+ return fc4_feature;
}
/*
@@ -6546,7 +6622,9 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb;
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- icb = (struct init_cb_24xx *)ha->init_cb;
-
- if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
- struct qla_msix_entry *msix = &ha->msix_entries[2];
-
- icb->msix_atio = cpu_to_le16(msix->entry);
- ql_dbg(ql_dbg_init, vha, 0xf072,
- "Registering ICB vector 0x%x for atio que.\n",
- msix->entry);
- } else if (ql2xenablemsix == 0) {
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- ql_dbg(ql_dbg_init, vha, 0xf07f,
- "Registering INTx vector for ATIO.\n");
+ if (ha->flags.msix_enabled) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ } else {
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= ~BIT_26;
+ }
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+ } else {
+ /* INTx|MSI */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "%s: Use INTx for ATIOQ.\n", __func__);
+ }
}
}
@@ -6574,6 +6661,7 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ if (IS_QLA25XX(ha)) {
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
+ }
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
- unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
if (!vha->d_id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
} else if (vha->d_id.b24 != id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
- ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+ ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
-extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern u8 qlt_rff_id(struct scsi_qla_host *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20c,
"%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
- if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ switch (ent->t268.buf_type) {
+ case T268_BUF_TYPE_EXTD_TRACE:
if (vha->hw->eft) {
if (buf) {
ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else {
- ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ break;
+ case T268_BUF_TYPE_EXCH_BUFOFF:
+ if (vha->hw->exchoffld_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exchoffld_size;
+ ent->t268.start_addr =
+ vha->hw->exchoffld_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exchoffld_buf,
+ vha->hw->exchoffld_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing exch offld\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+ case T268_BUF_TYPE_EXTD_LOGIN:
+ if (vha->hw->exlogin_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exlogin_size;
+ ent->t268.start_addr =
+ vha->hw->exlogin_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exlogin_buf,
+ vha->hw->exlogin_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing ext login\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
+ break;
}
return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.02-k"
+#define QLA2XXX_VERSION "10.00.00.05-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
- 65536);
+ lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
- * 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
- memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
- memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
return status;
}
- memset(sys_info, 0, sizeof(*sys_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
- memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
+ * encouraged once assigned by ANSI/INCITS T10).
*/
static const char *const scsi_device_types[] = {
"Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
};
/**
- * scsi_device_type - Return 17 char string indicating device type.
+ * scsi_device_type - Return 17-char string indicating device type.
* @type: type number to look up
*/
const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
* @scsilun: struct scsi_lun to be converted.
*
* Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
* integer, and return the result. The caller must check for
* truncation before using this function.
*
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
* back into the lun value.
*
* Notes:
- * Given an integer : 0x0b03d204, this function returns a
+ * Given an integer : 0x0b03d204, this function returns a
* struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
*/
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
+ * @desc: Sense format (non-zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
* @info: 64-bit information value to be set
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
**/
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
* @cd: command/data bit
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
*/
int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2016 Douglas Gilbert
+ * Copyright (C) 2001 - 2017 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "1.86"
-static const char *sdebug_version_date = "20160430";
+#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20171202";
#define MY_NAME "scsi_debug"
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
#define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+#define WRITE_ERROR_ASC 0xc
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
+#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
- SDEBUG_OPT_SHORT_TRANSFER)
+ SDEBUG_OPT_SHORT_TRANSFER | \
+ SDEBUG_OPT_HOST_BUSY)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
#define F_M_ACCESS 0x800 /* media access */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
+ SDEB_DEFER_WQ = 2};
+
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int issuing_cpu;
+ bool init_hrt;
+ bool init_wq;
+ enum sdeb_defer_type defer_t;
};
struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
+ unsigned int inj_host_busy:1;
};
struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
+ u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
+ /* 1 to min(cdb_len, 15); ignore cdb[15...] */
};
/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
SDEB_I_READ = 9, /* 6, 10, 12, 16 */
SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
SDEB_I_START_STOP = 11,
- SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
- SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
+ SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
SDEB_I_VERIFY = 16, /* 10 only */
- SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 27, /* 10, 16 */
SDEB_I_SYNC_CACHE = 28, /* 10 only */
SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
};
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
- SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
- 0, 0, 0, 0,
+ SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
+ 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-static const struct opcode_info_t msense_iarr[1] = {
+/*
+ * The following are overflow arrays for cdbs that "hit" the same index in
+ * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
+ * should be placed in opcode_info_arr[], the others should be placed here.
+ */
+static const struct opcode_info_t msense_iarr[] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t mselect_iarr[1] = {
+static const struct opcode_info_t mselect_iarr[] = {
{0, 0x15, 0, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t read_iarr[3] = {
- {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+static const struct opcode_info_t read_iarr[] = {
+ {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
- {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t write_iarr[3] = {
- {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
- {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
- 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
- {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
- {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
- 0xc7, 0, 0, 0, 0} },
+static const struct opcode_info_t write_iarr[] = {
+ {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t sa_in_iarr[1] = {
+static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0, 0xc7} },
+ 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
};
-static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
- NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+ {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
+ 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
-static const struct opcode_info_t maint_in_iarr[2] = {
+static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- 0xc7, 0, 0, 0, 0} },
+ 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
-static const struct opcode_info_t write_same_iarr[1] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+static const struct opcode_info_t write_same_iarr[] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+ 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
-static const struct opcode_info_t reserve_iarr[1] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+static const struct opcode_info_t reserve_iarr[] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t release_iarr[1] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+static const struct opcode_info_t release_iarr[] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORT LUNS */
{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
- {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
- 0} },
- {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
- {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+/* 5 */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
+ 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
- {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
- {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ resp_write_dt0, write_iarr, /* WRITE(16) */
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
{0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
- {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
- {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
- 0} },
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
+ {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
+ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0, 0xc7, 0, 0, 0, 0} },
+/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
- vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
- 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
- {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
+ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
+ 0xff, 0xff} },
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
- {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+/* 25 */
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
- write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
- 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
- {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
+ 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
- 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+ 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
/* 30 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
static int sdebug_add_host = DEF_NUM_HOST;
static int sdebug_ato = DEF_ATO;
+static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void config_cdb_len(struct scsi_device *sdev)
+{
+ switch (sdebug_cdb_len) {
+ case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = true;
+ break;
+ case 16:
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ case 32: /* No knobs to suggest this so same as 16 for now */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ default:
+ pr_warn("unexpected cdb_len=%d, force to 10\n",
+ sdebug_cdb_len);
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ sdebug_cdb_len = 10;
+ break;
+ }
+}
+
+static void all_config_cdb_len(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ shost = sdbg_host->shost;
+ shost_for_each_device(sdev, shost) {
+ config_cdb_len(sdev);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static char sdebug_inq_vendor_id[9] = "Linux ";
static char sdebug_inq_product_id[17] = "scsi_debug ";
-static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
+static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[8], sdebug_inq_vendor_id, 8);
memcpy(&arr[16], sdebug_inq_product_id, 16);
memcpy(&arr[32], sdebug_inq_product_rev, 4);
+ /* Use Vendor Specific area to place driver date in ASCII hex */
+ memcpy(&arr[36], sdebug_version_date, 8);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
- case 0x3: /* Format device page, direct access */
+ case 0x3: /* Format device page, direct access */
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
} else
bad_pcode = true;
- break;
+ break;
case 0x8: /* Caching page, direct access */
if (is_disk) {
len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
if ((subpcode > 0x2) && (subpcode < 0xff)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
break;
default:
bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
- res = fetch_to_dev_buffer(scp, arr, param_len);
- if (-1 == res)
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
return DID_ERROR << 16;
else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
- memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
- return sizeof(temp_l_pg);
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
}
static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
- memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
arr[4] = THRESHOLD_EXCEEDED;
arr[5] = 0xff;
}
- return sizeof(ie_l_pg);
+ return sizeof(ie_l_pg);
}
#define SDEBUG_MAX_LSENSE_SZ 512
-static int resp_log_sense(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+static int resp_log_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int ppc, sp, pcode, subpcode, alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
- bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, 0, do_write);
+ (num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep, rest * sdebug_sector_size,
- (num - rest) * sdebug_sector_size, do_write);
+ sg_skip + ((num - rest) * sdebug_sector_size),
+ do_write);
}
return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, false);
+ ret = do_device_access(scp, 0, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, true);
+ ret = do_device_access(scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
+/*
+ * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
+ * No READ GATHERED yet (requires bidi or long cdb holding gather list).
+ */
+static int resp_write_scat(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *lrdp = NULL;
+ u8 *up;
+ u8 wrprotect;
+ u16 lbdof, num_lrd, k;
+ u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
+ u32 lb_size = sdebug_sector_size;
+ u32 ei_lba;
+ u64 lba;
+ unsigned long iflags;
+ int ret, res;
+ bool is_16;
+ static const u32 lrd_size = 32; /* + parameter list header size */
+
+ if (cmd[0] == VARIABLE_LENGTH_CMD) {
+ is_16 = false;
+ wrprotect = (cmd[10] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 12);
+ num_lrd = get_unaligned_be16(cmd + 16);
+ bt_len = get_unaligned_be32(cmd + 28);
+ } else { /* that leaves WRITE SCATTERED(16) */
+ is_16 = true;
+ wrprotect = (cmd[2] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 4);
+ num_lrd = get_unaligned_be16(cmd + 8);
+ bt_len = get_unaligned_be32(cmd + 10);
+ if (unlikely(have_dif_prot)) {
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
+ wrprotect) {
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
+ wrprotect == 0)
+ sdev_printk(KERN_ERR, scp->device,
+ "Unprotected WR to DIF device\n");
+ }
+ }
+ if ((num_lrd == 0) || (bt_len == 0))
+ return 0; /* T10 says these do-nothings are not errors */
+ if (lbdof == 0) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LB Data Offset field bad\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lbdof_blen = lbdof * lb_size;
+ if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LBA range descriptors don't fit\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
+ my_name, __func__, lbdof_blen);
+ res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
+ if (res == -1) {
+ ret = DID_ERROR << 16;
+ goto err_out;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+ sg_off = lbdof_blen;
+ /* Spec says Buffer xfer Length field in number of LBs in dout */
+ cum_lb = 0;
+ for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
+ lba = get_unaligned_be64(up + 0);
+ num = get_unaligned_be32(up + 8);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
+ my_name, __func__, k, lba, num, sg_off);
+ if (num == 0)
+ continue;
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto err_out_unlock;
+ num_by = num * lb_size;
+ ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
+
+ if ((cum_lb + num) > bt_len) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: sum of blocks > data provided\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
+ 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+
+ /* DIX + T10 DIF */
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
+ int prot_ret = prot_verify_write(scp, lba, num,
+ ei_lba);
+
+ if (prot_ret) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+
+ ret = do_device_access(scp, sg_off, lba, num, true);
+ if (unlikely(scsi_debug_lbp()))
+ map_region(lba, num);
+ if (unlikely(-1 == ret)) {
+ ret = DID_ERROR << 16;
+ goto err_out_unlock;
+ } else if (unlikely(sdebug_verbose && (ret < num_by)))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num_by, ret);
+
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
+
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+ }
+ sg_off += num_by;
+ cum_lb += num;
+ }
+ ret = 0;
+err_out_unlock:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+err_out:
+ kfree(lrdp);
+ return ret;
+}
+
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
* from data-in into arr. Safe (atomic) since write_lock held. */
fake_storep_hold = fake_storep;
fake_storep = arr;
- ret = do_device_access(scp, 0, dnum, true);
+ ret = do_device_access(scp, 0, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
if (!sdbg_host) {
pr_err("Host info NULL\n");
return NULL;
- }
+ }
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
- (devip->target == sdev->id) &&
- (devip->lun == sdev->lun))
- return devip;
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
else {
if ((!devip->used) && (!open_devip))
open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
+ config_cdb_len(sdp);
return 0;
}
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp)
+static void stop_qc_helper(struct sdebug_defer *sd_dp,
+ enum sdeb_defer_type defer_t)
{
if (!sd_dp)
return;
- if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ if (defer_t == SDEB_DEFER_HRT)
hrtimer_cancel(&sd_dp->hrt);
- else if (sdebug_jdelay < 0)
+ else if (defer_t == SDEB_DEFER_WQ)
cancel_work_sync(&sd_dp->ew.work);
}
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
int j, k, qmax, r_qmax;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
return true;
}
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *devip;
- struct scsi_device * sdp;
- struct Scsi_Host * hp;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
+ &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
++num_host_resets;
if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
}
- }
- spin_unlock(&sdebug_host_list_lock);
+ }
+ spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
- starts[0] = sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+ sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
* SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+ int scsi_result, int delta_jiff, int ndelay)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
- if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ if (sd_dp == NULL) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (sd_dp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (delta_jiff > 0 || ndelay > 0) {
ktime_t kt;
if (delta_jiff > 0) {
- struct timespec ts;
-
- jiffies_to_timespec(delta_jiff, &ts);
- kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
} else
- kt = sdebug_ndelay;
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ kt = ndelay;
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_HRT;
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
sqcp->sd_dp = sd_dp;
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_WQ;
schedule_work(&sd_dp->ew.work);
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
*/
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
-MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
+ SDEBUG_VERSION "\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int opts;
+ int opts;
char work[20];
- if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strncasecmp(work,"0x", 2)) {
- if (1 == sscanf(&work[2], "%x", &opts))
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &opts) == 0)
goto opts_done;
} else {
- if (1 == sscanf(work, "%d", &opts))
+ if (kstrtoint(work, 10, &opts) == 0)
goto opts_done;
}
}
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int nth;
+ int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(uuid_ctl);
+static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
+}
+static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int ret, n;
+
+ ret = kstrtoint(buf, 0, &n);
+ if (ret)
+ return ret;
+ sdebug_cdb_len = n;
+ all_config_cdb_len();
+ return count;
+}
+static DRIVER_ATTR_RW(cdb_len);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
+ &driver_attr_cdb_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
host_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
- }
- }
+ break;
+ }
+ }
if (sdebug_verbose)
pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
static void sdebug_release_adapter(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
sdbg_host = to_sdebug_host(dev);
- kfree(sdbg_host);
+ kfree(sdbg_host);
}
static int sdebug_add_adapter(void)
{
int k, devs_per_host;
- int error = 0;
- struct sdebug_host_info *sdbg_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
- if (NULL == sdbg_host) {
+ sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
+ if (sdbg_host == NULL) {
pr_err("out of memory at line %d\n", __LINE__);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
- for (k = 0; k < devs_per_host; k++) {
+ for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ error = -ENOMEM;
goto clean;
- }
- }
+ }
+ }
- spin_lock(&sdebug_host_list_lock);
- list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
- sdbg_host->dev.bus = &pseudo_lld_bus;
- sdbg_host->dev.parent = pseudo_primary;
- sdbg_host->dev.release = &sdebug_release_adapter;
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
- error = device_register(&sdbg_host->dev);
+ error = device_register(&sdbg_host->dev);
- if (error)
+ if (error)
goto clean;
++sdebug_add_host;
- return error;
+ return error;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
}
kfree(sdbg_host);
- return error;
+ return error;
}
static void sdebug_remove_adapter(void)
{
- struct sdebug_host_info * sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host = NULL;
- spin_lock(&sdebug_host_list_lock);
- if (!list_empty(&sdebug_host_list)) {
- sdbg_host = list_entry(sdebug_host_list.prev,
- struct sdebug_host_info, host_list);
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
list_del(&sdbg_host->host_list);
}
- spin_unlock(&sdebug_host_list_lock);
+ spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
+static bool fake_host_busy(struct scsi_cmnd *scp)
+{
+ return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
+ (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
b);
}
+ if (fake_host_busy(scp))
+ return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
errsts = r_pfp(scp, devip);
fini:
- return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
+ if (F_DELAY_OVERR & flags)
+ return schedule_resp(scp, devip, errsts, 0, 0);
+ else
+ return schedule_resp(scp, devip, errsts, sdebug_jdelay,
+ sdebug_ndelay);
check_cond:
- return schedule_resp(scp, devip, check_condition_result, 0);
+ return schedule_resp(scp, devip, check_condition_result, 0, 0);
err_out:
- return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
if (sdebug_mq_active)
hpnt->nr_hw_queues = submit_queues;
- sdbg_host->shost = hpnt;
+ sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
if (sdebug_every_nth) /* need stats counters for every_nth */
sdebug_statistics = true;
- error = scsi_add_host(hpnt, &sdbg_host->dev);
- if (error) {
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
pr_err("scsi_add_host failed\n");
- error = -ENODEV;
+ error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else
scsi_scan_host(hpnt);
return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
static int sdebug_driver_remove(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
return -ENODEV;
}
- scsi_remove_host(sdbg_host->shost);
+ scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
- list_del(&sdbg_devinfo->dev_list);
- kfree(sdbg_devinfo);
- }
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
- scsi_host_put(sdbg_host->shost);
- return 0;
+ scsi_host_put(sdbg_host->shost);
+ return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dfb8da83fa50..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -108,8 +108,8 @@ static struct {
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
- {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
- {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -119,7 +119,7 @@ static struct {
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
- {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -158,8 +158,8 @@ static struct {
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
- {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
- {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -181,15 +181,14 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
- {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -255,7 +254,6 @@ static struct {
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
- {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -353,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, blist_flags_t flags, int key)
+ char *strflags, blist_flags_t flags,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -402,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
- const char *model, int key)
+ const char *model, enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -485,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
*
* Returns: 0 OK, -error on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *found;
@@ -587,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key)
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
@@ -774,7 +769,7 @@ void scsi_exit_devinfo(void)
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
-int scsi_dev_info_add_list(int key, const char *name)
+int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
@@ -806,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
-int scsi_dev_info_remove_list(int key)
+int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
-/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
+ lockdep_assert_held(shost->host_lock);
+
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d9ca1dfab154..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
* for a requeue after completion, which should only occur in this
* file.
*/
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, true);
}
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ACTION_RETRY:
/* Retry the same command immediately */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
break;
}
}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
* Called from inside blk_get_request() for pass-through requests and from
* inside scsi_init_command() for filesystem requests.
*/
-void scsi_initialize_rq(struct request *rq)
+static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-EXPORT_SYMBOL(scsi_initialize_rq);
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
@@ -2020,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
/* list of keys for the lists */
-enum {
+enum scsi_devinfo_key {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key);
+ enum scsi_devinfo_key key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- blist_flags_t flags, int key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
-extern int scsi_dev_info_add_list(int key, const char *name);
-extern int scsi_dev_info_remove_list(int key);
+ blist_flags_t flags,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
+extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
-static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
/*
* internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 26ce17178401..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1278,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add device: %d\n", error);
- scsi_dh_remove_device(sdev);
return error;
}
@@ -1287,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
- scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1354,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
- scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
- { FC_PORTSPEED_64BIT, "64 Gbit" },
- { FC_PORTSPEED_128BIT, "128 Gbit" },
+ { FC_PORTSPEED_64GBIT, "64 Gbit" },
+ { FC_PORTSPEED_128GBIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 10ebb213ddb3..871ea582029e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -1009,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1049,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a028ab3322a9..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2172,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
/* Wait 1 second for next try */
msleep(1000);
- printk(".");
+ printk(KERN_CONT ".");
/*
* Wait for USB flash devices with slow firmware.
@@ -2202,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (spintime) {
if (scsi_status_is_good(the_result))
- printk("ready\n");
+ printk(KERN_CONT "ready\n");
else
- printk("not responding...\n");
+ printk(KERN_CONT "not responding...\n");
}
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f098877eed4a..0c434453aab3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,10 +1140,10 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
}
#endif
-static unsigned int
+static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
- unsigned int res = 0;
+ __poll_t res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
@@ -1174,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait)
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
- "sg_poll: res=0x%x\n", (int) res));
+ "sg_poll: res=0x%x\n", (__force u32) res));
return res;
}
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I.
-obj-m += smartpqi.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
static DEVICE_ATTR_RO(read_byte_cnt);
/**
- * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * read_ns_show - return read ns - overall time spent waiting on reads in ns.
* @dev: struct device
* @attr: attribute structure
* @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3b3d1d050cac..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
- if (!stor_device->rport)
+ if (!stor_device->rport) {
+ ret = -ENOMEM;
goto err_out4;
+ }
}
#endif
return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
#define UTP_TRANSFER_REQ_LIST_READY 0x2
#define UTP_TASK_REQ_LIST_READY 0x4
#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
static int wd719x_board_found(struct Scsi_Host *sh)
{
struct wd719x *wd = shost_priv(sh);
- char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ static const char * const card_types[] = {
+ "Unknown card", "WD7193", "WD7197", "WD7296"
+ };
int ret;
INIT_LIST_HEAD(&wd->active_scbs);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index d65345312527..7dcb14d303eb 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
@@ -184,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
-static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val |= A3700_SPI_FIFO_MODE;
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
@@ -297,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
- a3700_spi_fifo_mode_set(a3700_spi);
+ a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
@@ -416,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
- unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
- byte_len = xfer->bits_per_word >> 3;
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
- a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
@@ -491,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
@@ -514,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
- u32 data = le32_to_cpu(val);
- memcpy(a3700_spi->rx_buf, &data, 4);
+ memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
@@ -579,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
- a3700_spi_bytelen_set(a3700_spi, 4);
-
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
- unsigned int nbits = 0;
+ unsigned int nbits = 0, byte_len;
u32 val;
- a3700_spi_transfer_setup(spi, xfer);
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
- a3700_spi->tx_buf = xfer->tx_buf;
- a3700_spi->rx_buf = xfer->rx_buf;
- a3700_spi->buf_len = xfer->len;
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
@@ -615,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
@@ -729,6 +742,63 @@ out:
return ret;
}
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
@@ -778,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
- master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
@@ -818,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 669470971023..4a11fc0d4136 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
struct completion xfer_completion;
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
- return atmel_spi_use_dma(as, xfer);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
complete(&as->xfer_completion);
}
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- xfer->rx_sg.sgl, xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan,
- xfer->tx_sg.sgl, xfer->tx_sg.nents,
- DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!txdesc)
goto err_dma;
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
-#ifdef CONFIG_SOC_SAM_V4_V5
- /*
- * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
- * since this later function tries to map buffers with dma_map_sg()
- * even if they have not been allocated inside DMA-safe areas.
- * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
- * those ARM cores, the data cache follows the PIPT model.
- * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
- * In case of PIPT caches, there cannot be cache aliases.
- * However on ARM9 cores, the data cache follows the VIVT model, hence
- * the cache aliases issue can occur when buffers are allocated from
- * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
- * not taken into account or at least not handled completely (cache
- * lines of aliases are not invalidated).
- * This is not a theorical issue: it was reproduced when trying to mount
- * a UBI file-system on a at91sam9g35ek board.
- */
- as->caps.has_dma_support = false;
-#else
as->caps.has_dma_support = version >= 0x212;
-#endif
as->caps.has_pdc_support = version < 0x212;
}
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
@@ -1664,6 +1711,14 @@ static int atmel_spi_remove(struct platform_device *pdev)
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
}
spin_lock_irq(&as->lock);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 6e409eabe1c9..d02ceb7a29d1 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
-
- size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
-
- b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
- for (i = 0; i < b53spi->read_offset + len; i++) {
+ for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
- if (!cont && i == b53spi->read_offset + len - 1)
+ if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
- bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
- b53spi->read_offset + len - 1);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
- int offset = b53spi->read_offset + i;
+ u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
- r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
-
- b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
- bool cont = left - to_write > 0;
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
- size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
- left);
- bool cont = left - to_read > 0;
+ size_t to_read = min_t(size_t, 16, left);
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 6ddb6ef1fda4..60d59b003aa4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ init_completion(&dspi->done);
+
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
- init_completion(&dspi->done);
-
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b217c22ff72f..211cc7d75bf8 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
- u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index f652f70cb8db..0630962ce442 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -903,10 +903,9 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi",
- .data = (void *)&ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
+ { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -980,7 +979,7 @@ static int dspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 79ddefe4180d..6f57592a7f95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1622,6 +1622,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
/* Request GPIO CS lines, if any */
if (!spi_imx->slave_mode && master->cs_gpios) {
@@ -1640,12 +1645,6 @@ static int spi_imx_probe(struct platform_device *pdev)
}
}
- ret = spi_bitbang_start(&spi_imx->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
@@ -1668,12 +1667,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
spi_bitbang_stop(&spi_imx->bitbang);
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cebfea5faa4b..dafed6280df3 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -198,8 +198,10 @@ static int jcore_spi_probe(struct platform_device *pdev)
/* Register our spi controller */
err = devm_spi_register_master(&pdev->dev, master);
- if (err)
+ if (err) {
+ clk_disable(clk);
goto exit;
+ }
return 0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f8429635502..5c82910e3480 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -599,6 +599,7 @@ static int meson_spicc_remove(struct platform_device *pdev)
static const struct of_device_id meson_spicc_of_match[] = {
{ .compatible = "amlogic,meson-gx-spicc", },
+ { .compatible = "amlogic,meson-axg-spicc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8974bb340b3a..deca63e82ff6 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -94,6 +94,7 @@ struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
@@ -634,6 +635,16 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status)
goto out;
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(spi->axi_clk) &&
+ PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
tclk_hz = clk_get_rate(spi->clk);
/*
@@ -658,7 +669,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
/* Scan all SPI devices of this controller for direct mapped devices */
@@ -696,7 +707,7 @@ static int orion_spi_probe(struct platform_device *pdev)
PAGE_SIZE);
if (!spi->direct_access[cs].vaddr) {
status = -ENOMEM;
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
spi->direct_access[cs].size = PAGE_SIZE;
@@ -724,6 +735,8 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_pm:
pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
@@ -738,6 +751,7 @@ static int orion_spi_remove(struct platform_device *pdev)
struct orion_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_master(master);
@@ -754,6 +768,7 @@ static int orion_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
@@ -763,6 +778,8 @@ static int orion_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4cb515a3104c..b0822d1dba29 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1237,7 +1237,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
@@ -1417,7 +1417,7 @@ static void cleanup(struct spi_device *spi)
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
kfree(chip);
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index de7df20f8712..baa3a9fa2638 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,17 +1,7 @@
-/*
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fcd261f98b9f..c5dcfb434a49 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -55,9 +56,14 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ unsigned short unused_ss;
+ bool native_cs_inited;
+ bool native_cs_high;
bool slave_aborted;
};
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
#define TMDR1 0x00 /* Transmit Mode Register 1 */
#define TMDR2 0x04 /* Transmit Mode Register 2 */
#define TMDR3 0x08 /* Transmit Mode Register 3 */
@@ -91,6 +97,8 @@ struct sh_msiof_spi_priv {
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
@@ -324,7 +332,7 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return val;
}
-static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
@@ -342,10 +350,13 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->master)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
- else
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ } else {
+ sh_msiof_write(p, TMDR1,
+ tmp | MDR1_TRMD | TMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ }
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -528,8 +539,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-
- pm_runtime_get_sync(&p->pdev->dev);
+ u32 clr, set, tmp;
if (!np) {
/*
@@ -539,19 +549,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
- /* Configure pins before deasserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ return 0;
+ }
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (spi_controller_is_slave(p->master))
+ return 0;
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+ /* Configure native chip select mode/polarity early */
+ clr = MDR1_SYNCMD_MASK;
+ set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(MDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(MDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, TMDR1) & ~clr;
+ sh_msiof_write(p, TMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
-
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
return 0;
}
@@ -560,13 +582,20 @@ static int sh_msiof_prepare_message(struct spi_master *master,
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
/* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ss = p->unused_ss;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
@@ -784,11 +813,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx fifo to be emptied / rx fifo to be filled */
+ /* wait for tx/rx DMA completion */
ret = sh_msiof_wait_for_completion(p);
if (ret)
goto stop_reset;
+ if (!rx) {
+ reinit_completion(&p->done);
+ sh_msiof_write(p, IER, IER_TEOFE);
+
+ /* wait for tx fifo to be emptied */
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
+ goto stop_reset;
+ }
+
/* clear status bits */
sh_msiof_reset_str(p);
@@ -912,9 +951,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&p->pdev->dev),
- dev_name(&p->pdev->dev));
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
break;
}
if (ret)
@@ -1071,6 +1109,45 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
+static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ unsigned int used_ss_mask = 0;
+ unsigned int cs_gpios = 0;
+ unsigned int num_cs, i;
+ int ret;
+
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ return 0;
+
+ num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ for (i = 0; i < num_cs; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
+ if (!IS_ERR(gpiod)) {
+ cs_gpios++;
+ continue;
+ }
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return PTR_ERR(gpiod);
+
+ if (i >= MAX_SS) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ used_ss_mask |= BIT(i);
+ }
+ p->unused_ss = ffz(used_ss_mask);
+ if (cs_gpios && p->unused_ss >= MAX_SS) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1284,13 +1361,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
+ /* Setup GPIO chip selects */
+ master->num_chipselect = p->info->num_chipselect;
+ ret = sh_msiof_get_cs_gpios(p);
+ if (ret)
+ goto err1;
+
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index bbb1a275f718..f009d76f96b1 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1072,7 +1072,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- struct sirf_spi_comp_data *spi_comp_data;
+ const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
const struct of_device_id *match;
@@ -1092,7 +1092,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ spi_comp_data = match->data;
sspi->regs = spi_comp_data->regs;
sspi->type = spi_comp_data->type;
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fb38234249a8..8533f4edd00a 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -541,7 +541,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e0b9fe1d0e37..63fedc49ae9c 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -381,6 +381,7 @@ static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
}
static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219c2324..ee18428a051f 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -3,10 +3,7 @@ config SSB_POSSIBLE
depends on HAS_IOMEM && HAS_DMA
default y
-menu "Sonics Silicon Backplane"
- depends on SSB_POSSIBLE
-
-config SSB
+menuconfig SSB
tristate "Sonics Silicon Backplane support"
depends on SSB_POSSIBLE
help
@@ -21,6 +18,8 @@ config SSB
If unsure, say N.
+if SSB
+
# Common SPROM support routines
config SSB_SPROM
bool
@@ -32,7 +31,7 @@ config SSB_BLOCKIO
config SSB_PCIHOST_POSSIBLE
bool
- depends on SSB && (PCI = y || PCI = SSB)
+ depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
default y
config SSB_PCIHOST
@@ -185,4 +184,4 @@ config SSB_DRIVER_GPIO
If unsure, say N
-endmenu
+endif # SSB
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 554683912cff..e95ab683331e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -26,6 +26,10 @@ if STAGING
source "drivers/staging/irda/net/Kconfig"
+source "drivers/staging/ipx/Kconfig"
+
+source "drivers/staging/ncpfs/Kconfig"
+
source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 6e536020029a..af8cd6a3a1f6 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -3,6 +3,8 @@
obj-y += media/
obj-y += typec/
+obj-$(CONFIG_IPX) += ipx/
+obj-$(CONFIG_NCP_FS) += ncpfs/
obj-$(CONFIG_IRDA) += irda/net/
obj-$(CONFIG_IRDA) += irda/drivers/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df14c9d..bbdc53b686dd 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* mm/ashmem.c
*
* Anonymous Shared Memory Subsystem, ashmem
@@ -5,15 +6,6 @@
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "ashmem: " fmt
@@ -765,10 +757,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
+ mutex_unlock(&ashmem_mutex);
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
@@ -816,7 +810,23 @@ static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
return ashmem_ioctl(file, cmd, arg);
}
#endif
+#ifdef CONFIG_PROC_FS
+static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->file)
+ seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ seq_printf(m, "name:\t%s\n",
+ asma->name + ASHMEM_NAME_PREFIX_LEN);
+ mutex_unlock(&ashmem_mutex);
+}
+#endif
static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
@@ -828,6 +838,9 @@ static const struct file_operations ashmem_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ashmem_ioctl,
#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = ashmem_show_fdinfo,
+#endif
};
static struct miscdevice ashmem_misc = {
@@ -862,12 +875,18 @@ static int __init ashmem_init(void)
goto out_free2;
}
- register_shrinker(&ashmem_shrinker);
+ ret = register_shrinker(&ashmem_shrinker);
+ if (ret) {
+ pr_err("failed to register shrinker!\n");
+ goto out_demisc;
+ }
pr_info("initialized\n");
return 0;
+out_demisc:
+ misc_deregister(&ashmem_misc);
out_free2:
kmem_cache_destroy(ashmem_range_cachep);
out_free1:
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 5abcfd7aa706..60d7208f110a 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
/*
* include/linux/ashmem.h
*
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index c78989351f9c..a8d3cc412fb9 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -1,16 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kernel.h>
@@ -70,8 +60,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
ret = validate_ioctl_arg(cmd, &data);
- if (WARN_ON_ONCE(ret))
+ if (ret) {
+ pr_warn_once("%s: ioctl validate failed\n", __func__);
return ret;
+ }
if (!(dir & _IOC_WRITE))
memset(&data, 0, sizeof(data));
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f480885e346b..57e0d8035b2e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1,42 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- *
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
-#include <linux/anon_inodes.h>
+#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
-#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
-#include <linux/slab.h>
+#include <linux/sched/task.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-#include <linux/sched/task.h>
#include "ion.h"
@@ -539,6 +529,7 @@ void ion_device_add_heap(struct ion_heap *heap)
{
struct dentry *debug_file;
struct ion_device *dev = internal_dev;
+ int ret;
if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -550,8 +541,11 @@ void ion_device_add_heap(struct ion_heap *heap)
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
- if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
- ion_heap_init_shrinker(heap);
+ if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
+ ret = ion_heap_init_shrinker(heap);
+ if (ret)
+ pr_err("%s: Failed to register shrinker\n", __func__);
+ }
heap->dev = dev;
down_write(&dev->lock);
@@ -567,9 +561,9 @@ void ion_device_add_heap(struct ion_heap *heap)
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
- debug_file = debugfs_create_file(
- debug_name, 0644, dev->debug_root, heap,
- &debug_shrink_fops);
+ debug_file = debugfs_create_file(debug_name,
+ 0644, dev->debug_root, heap,
+ &debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index f5f9cd63f8e9..a238f23c9116 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion.h
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _ION_H
@@ -189,7 +180,8 @@ struct ion_heap {
wait_queue_head_t waitqueue;
struct task_struct *task;
- int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *s,
+ void *unused);
};
/**
@@ -238,7 +230,7 @@ int ion_alloc(size_t len,
* this function will be called to setup a shrinker to shrink the freelists
* and call the heap's shrink op.
*/
-void ion_heap_init_shrinker(struct ion_heap *heap);
+int ion_heap_init_shrinker(struct ion_heap *heap);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index fee7650d6fbb..e129237a0417 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_carveout_heap.c
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 102c09398317..159d72f5bc42 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_chunk_heap.c
*
* Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 86196ffd2faf..94e06925c712 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_cma_heap.c
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/device.h>
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 91faa7f035b9..772dad65396e 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -306,11 +297,12 @@ static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
return freed;
}
-void ion_heap_init_shrinker(struct ion_heap *heap)
+int ion_heap_init_shrinker(struct ion_heap *heap)
{
heap->shrinker.count_objects = ion_heap_shrink_count;
heap->shrinker.scan_objects = ion_heap_shrink_scan;
heap->shrinker.seeks = DEFAULT_SEEKS;
heap->shrinker.batch = 0;
- register_shrinker(&heap->shrinker);
+
+ return register_shrinker(&heap->shrinker);
}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 817849df9de3..b3017f12835f 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_mem_pool.c
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 4dc5d7a589c2..bc19cdd30637 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <asm/page.h>
@@ -371,7 +362,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
unsigned long i;
int ret;
- page = alloc_pages(low_order_gfp_flags, order);
+ page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
if (!page)
return -ENOMEM;
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
index 13df42d200b7..5b531af6820e 100644
--- a/drivers/staging/android/uapi/ashmem.h
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
/*
* drivers/staging/android/uapi/ashmem.h
*
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 9e21451149d0..825d3e95ccd3 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _UAPI_LINUX_ION_H
diff --git a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
deleted file mode 100644
index 2ea65175c032..000000000000
--- a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Arm TrustZone CryptoCell cryptographic accelerators
-
-Required properties:
-- compatible: must be "arm,cryptocell-712-ree".
-- reg: shall contain base register location and length.
- Typically length is 0x10000.
-- interrupts: shall contain the interrupt for the device.
-
-Optional properties:
-- interrupt-parent: can designate the interrupt controller the
- device interrupt is connected to, if needed.
-- clocks: may contain the clock handling the device, if needed.
-- power-domains: may contain a reference to the PM domain, if applicable.
-
-
-Examples:
-
-Zynq FPGA device
-----------------
-
- arm_cc7x: arm_cc7x@80000000 {
- compatible = "arm,cryptocell-712-ree";
- interrupt-parent = <&intc>;
- interrupts = < 0 30 4 >;
- reg = < 0x80000000 0x10000 >;
- };
-
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index 0b3092ba2fcb..c94dfe8adb63 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config CRYPTO_DEV_CCREE
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index ae702f3b5369..bdc27970f95f 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
-ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/staging/ccree/TODO b/drivers/staging/ccree/TODO
index c9f5754d062d..b8e163d98f91 100644
--- a/drivers/staging/ccree/TODO
+++ b/drivers/staging/ccree/TODO
@@ -6,25 +6,5 @@
* *
*************************************************************************
-ccree specific items
-a.k.a stuff fixing for this driver to move out of staging
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. ???
-1. Move to using Crypto Engine to handle backlog queueing.
-2. Remove synchronous algorithm support leftovers.
-3. Separate platform specific code for FIPS and power management into separate platform modules.
-4. Drop legacy kernel support code.
-5. Move most (all?) #ifdef CONFIG into inline functions.
-6. Remove all unused definitions.
-7. Re-factor to accomediate newer/older HW revisions besides the 712.
-8. Handle the many checkpatch errors.
-9. Implement ahash import/export correctly.
-10. Go through a proper review of DT bindings and sysfs ABI
-11. Sort out FIPS mode: bake tests into testmgr, sort out behaviour on error,
- figure if 3DES weak key check is needed
-
-Kernel infrastructure items
-a.k.a stuff we either neither need to fix in the kernel or understand what we're doing wrong
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. ahash import/export context has a PAGE_SIZE/8 size limit. We need more.
-2. Crypto Engine seems to be built for HW with hardware queue depth of 1, we have 600++.
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/cc_aead.c
index ba0954e4d2e5..b58413172231 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/cc_aead.c
@@ -1,41 +1,19 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
-#include <crypto/sha.h>
-#include <crypto/ctr.h>
#include <crypto/authenc.h>
-#include <crypto/aes.h>
#include <crypto/des.h>
#include <linux/rtnetlink.h>
-#include <linux/version.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_aead.h"
-#include "ssi_request_mgr.h"
-#include "ssi_hash.h"
-#include "ssi_sysfs.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
#define template_aead template_u.aead
@@ -51,8 +29,8 @@
/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01
-struct ssi_aead_handle {
- ssi_sram_addr_t sram_workspace_addr;
+struct cc_aead_handle {
+ cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
};
@@ -68,8 +46,8 @@ struct cc_xcbc_s {
dma_addr_t xcbc_keys_dma_addr;
};
-struct ssi_aead_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_aead_ctx {
+ struct cc_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
@@ -90,9 +68,9 @@ static inline bool valid_assoclen(struct aead_request *req)
return ((req->assoclen == 16) || (req->assoclen == 20));
}
-static void ssi_aead_exit(struct crypto_aead *tfm)
+static void cc_aead_exit(struct crypto_aead *tfm)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
@@ -100,7 +78,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
/* Unmap enckey buffer */
if (ctx->enckey) {
- dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+ ctx->enckey_dma_addr);
dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
&ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
@@ -143,22 +122,22 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
}
}
-static int ssi_aead_init(struct crypto_aead *tfm)
+static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, aead_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
- ctx->cipher_mode = ssi_alg->cipher_mode;
- ctx->flow_mode = ssi_alg->flow_mode;
- ctx->auth_mode = ssi_alg->auth_mode;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
@@ -221,23 +200,25 @@ static int ssi_aead_init(struct crypto_aead *tfm)
return 0;
init_failed:
- ssi_aead_exit(tfm);
+ cc_aead_exit(tfm);
return -ENOMEM;
}
-static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
{
- struct aead_request *areq = (struct aead_request *)ssi_req;
+ struct aead_request *areq = (struct aead_request *)cc_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int err = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ssi_buffer_mgr_unmap_aead_request(dev, areq);
+ cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
areq->iv = areq_ctx->backup_iv;
+ if (err)
+ goto done;
+
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
ctx->authsize) != 0) {
@@ -246,36 +227,43 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
*/
- ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
err = -EBADMSG;
}
} else { /*ENCRYPT*/
- if (unlikely(areq_ctx->is_icv_fragmented))
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
- areq->cryptlen + areq_ctx->dst_offset,
- (areq->cryptlen + areq_ctx->dst_offset +
- ctx->authsize),
- SSI_SG_FROM_BUF);
-
- /* If an IV was generated, copy it back to the user provided buffer. */
+ if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+ areq_ctx->dst_sgl, skip,
+ (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
+ }
+
+ /* If an IV was generated, copy it back to the user provided
+ * buffer.
+ */
if (areq_ctx->backup_giv) {
if (ctx->cipher_mode == DRV_CIPHER_CTR)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_IV_SIZE);
else if (ctx->cipher_mode == DRV_CIPHER_CCM)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
}
}
-
+done:
aead_request_complete(areq, err);
}
-static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
/* Load the AES key */
hw_desc_init(&desc[0]);
- /* We are using for the source/user key the same buffer as for the output keys,
- * because after this key loading it is not needed anymore
+ /* We are using for the source/user key the same buffer
+ * as for the output keys, * because after this key loading it
+ * is not needed anymore
*/
set_din_type(&desc[0], DMA_DLLI,
ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
@@ -309,7 +297,7 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
@@ -328,8 +316,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx],
- ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->auth_mode),
+ cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode),
digest_size);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
@@ -378,7 +366,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return idx;
}
-static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -390,9 +378,9 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
case DRV_HASH_SHA256:
break;
case DRV_HASH_XCBC_MAC:
- if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
- (ctx->auth_keylen != AES_KEYSIZE_192) &&
- (ctx->auth_keylen != AES_KEYSIZE_256))
+ if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+ ctx->auth_keylen != AES_KEYSIZE_192 &&
+ ctx->auth_keylen != AES_KEYSIZE_256)
return -ENOTSUPP;
break;
case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
@@ -404,16 +392,16 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
}
/* Check cipher key size */
- if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
+ if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
- if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
- (ctx->enc_keylen != AES_KEYSIZE_192) &&
- (ctx->enc_keylen != AES_KEYSIZE_256)) {
+ if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_192 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
dev_err(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
@@ -427,14 +415,14 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
* (copy to intenral buffer or hash in case of key longer than block
*/
static int
-ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->auth_mode);
- struct ssi_crypto_req ssi_req = {};
+ u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ struct cc_crypto_req cc_req = {};
unsigned int blocksize;
unsigned int digestsize;
unsigned int hashmode;
@@ -457,9 +445,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
hashmode = DRV_HASH_HW_SHA256;
}
- if (likely(keylen != 0)) {
- key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
+ if (keylen != 0) {
+ key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -537,22 +526,22 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0))
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- if (likely(key_dma_addr != 0))
+ if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
return rc;
}
static int
-ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int seq_len = 0, rc = -EINVAL;
@@ -586,8 +575,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
- CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+ ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
@@ -597,7 +587,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
rc = validate_keys_sizes(ctx);
- if (unlikely(rc != 0))
+ if (rc)
goto badkey;
/* STAT_PHASE_1: Copy key to ctx */
@@ -609,8 +599,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
- if (rc != 0)
+ rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ if (rc)
goto badkey;
}
@@ -635,8 +625,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
/* STAT_PHASE_3: Submit sequence to HW */
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
- if (unlikely(rc != 0)) {
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
@@ -652,10 +642,10 @@ setkey_error:
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
if (keylen < 3)
return -EINVAL;
@@ -663,20 +653,18 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 3;
memcpy(ctx->ctr_nonce, key + keylen, 3);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-static int ssi_aead_setauthsize(
- struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
/* Unsupported auth. sizes */
- if ((authsize == 0) ||
- (authsize > crypto_aead_maxauthsize(authenc))) {
+ if (authsize == 0 ||
+ authsize > crypto_aead_maxauthsize(authenc)) {
return -ENOTSUPP;
}
@@ -686,9 +674,8 @@ static int ssi_aead_setauthsize(
return 0;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 8:
@@ -699,11 +686,11 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -718,46 +705,41 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-static inline void
-ssi_aead_create_assoc_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (assoc_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
- flow_mode);
- if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
- (areq_ctx->cryptlen > 0))
+ areq->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
set_din_not_last_indication(&desc[idx]);
break;
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
dev_dbg(dev, "ASSOC buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
areq_ctx->assoc.mlli_nents, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
- if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
- (areq_ctx->cryptlen > 0))
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
set_din_not_last_indication(&desc[idx]);
break;
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "Invalid ASSOC buffer type\n");
}
@@ -765,23 +747,20 @@ ssi_aead_create_assoc_desc(
*seq_size = (++idx);
}
-static inline void
-ssi_aead_process_authenc_data_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- int direct)
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (data_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
{
struct scatterlist *cipher =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
@@ -798,16 +777,16 @@ ssi_aead_process_authenc_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
}
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
{
/* DOUBLE-PASS flow (as default)
* assoc. + iv + data -compact in one table
* if assoclen is ZERO only IV perform
*/
- ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
mlli_addr = areq_ctx->dst.sram_addr;
mlli_nents = areq_ctx->dst.mlli_nents;
@@ -824,7 +803,7 @@ ssi_aead_process_authenc_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
}
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
}
@@ -832,37 +811,36 @@ ssi_aead_process_authenc_data_desc(
*seq_size = (++idx);
}
-static inline void
-ssi_aead_process_cipher_data_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->cryptlen == 0)
return; /*null processing*/
switch (data_dma_type) {
- case SSI_DMA_BUF_DLLI:
+ case CC_DMA_BUF_DLLI:
dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(areq_ctx->src_sgl) +
- areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
+ areq_ctx->src_offset), areq_ctx->cryptlen,
+ NS_BIT);
set_dout_dlli(&desc[idx],
(sg_dma_address(areq_ctx->dst_sgl) +
areq_ctx->dst_offset),
areq_ctx->cryptlen, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
- case SSI_DMA_BUF_MLLI:
+ case CC_DMA_BUF_MLLI:
dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
@@ -871,7 +849,7 @@ ssi_aead_process_cipher_data_desc(
areq_ctx->dst.mlli_nents, NS_BIT, 0);
set_flow_mode(&desc[idx], flow_mode);
break;
- case SSI_DMA_BUF_NULL:
+ case CC_DMA_BUF_NULL:
default:
dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
}
@@ -879,13 +857,12 @@ ssi_aead_process_cipher_data_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_process_digest_result_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -930,13 +907,12 @@ static inline void ssi_aead_process_digest_result_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_setup_cipher_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = req_ctx->hw_iv_size;
unsigned int idx = *seq_size;
@@ -976,11 +952,8 @@ static inline void ssi_aead_setup_cipher_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_cipher(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- unsigned int data_flow_mode)
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
@@ -989,8 +962,8 @@ static inline void ssi_aead_process_cipher(
if (req_ctx->cryptlen == 0)
return; /*null processing*/
- ssi_aead_setup_cipher_desc(req, desc, &idx);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* We must wait for DMA to write all cipher */
hw_desc_init(&desc[idx]);
@@ -1002,13 +975,11 @@ static inline void ssi_aead_process_cipher(
*seq_size = idx;
}
-static inline void ssi_aead_hmac_setup_digest_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1028,10 +999,8 @@ static inline void ssi_aead_hmac_setup_digest_desc(
/* Load init. digest len (64 bytes) */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
- hash_mode),
- HASH_LEN_SIZE);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ HASH_LEN_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1039,13 +1008,11 @@ static inline void ssi_aead_hmac_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_xcbc_setup_digest_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int idx = *seq_size;
/* Loading MAC state */
@@ -1101,28 +1068,26 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_header_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
/* Hash associated data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_scheme_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1161,9 +1126,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
/* Load init. digest len (64 bytes) */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
- hash_mode),
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
HASH_LEN_SIZE);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -1180,20 +1143,17 @@ static inline void ssi_aead_process_digest_scheme_desc(
*seq_size = idx;
}
-static inline void ssi_aead_load_mlli_to_sram(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (unlikely(
- (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
- (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
- !req_ctx->is_single_pass)) {
+ if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1210,54 +1170,52 @@ static inline void ssi_aead_load_mlli_to_sram(
}
}
-static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
- enum drv_crypto_direction direct,
- enum cc_flow_mode setup_flow_mode,
- bool is_single_pass)
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
{
enum cc_flow_mode data_flow_mode;
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
AES_to_HASH_and_DOUT : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
DES_to_HASH_and_DOUT : DIN_DES_DOUT;
} else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
- AES_and_HASH : DIN_AES_DOUT;
+ data_flow_mode = is_single_pass ?
+ AES_and_HASH : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
- DES_and_HASH : DIN_DES_DOUT;
+ data_flow_mode = is_single_pass ?
+ DES_and_HASH : DIN_DES_DOUT;
}
return data_flow_mode;
}
-static inline void ssi_aead_hmac_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
- direct, ctx->flow_mode, req_ctx->is_single_pass);
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1268,49 +1226,48 @@ static inline void ssi_aead_hmac_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
/* decrypt after.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
-static inline void
-ssi_aead_xcbc_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
- direct, ctx->flow_mode, req_ctx->is_single_pass);
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1321,25 +1278,25 @@ ssi_aead_xcbc_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
/* decrypt after..*/
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
-static int validate_data_size(struct ssi_aead_ctx *ctx,
+static int validate_data_size(struct cc_aead_ctx *ctx,
enum drv_crypto_direction direct,
struct aead_request *req)
{
@@ -1349,16 +1306,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
- if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- (req->cryptlen < ctx->authsize)))
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
switch (ctx->flow_mode) {
case S_DIN_to_AES:
- if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
- !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
@@ -1371,15 +1328,15 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
if (!IS_ALIGNED(assoclen, sizeof(u32)))
areq_ctx->is_single_pass = false;
- if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
+ if (ctx->cipher_mode == DRV_CIPHER_CTR &&
!IS_ALIGNED(cipherlen, sizeof(u32)))
areq_ctx->is_single_pass = false;
break;
case S_DIN_to_DES:
- if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
goto data_size_err;
- if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
areq_ctx->is_single_pass = false;
break;
default:
@@ -1393,7 +1350,6 @@ data_size_err:
return -EINVAL;
}
-#if SSI_CC_HAS_AES_CCM
static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
{
unsigned int len = 0;
@@ -1438,13 +1394,11 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
return 0;
}
-static inline int ssi_aead_ccm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
@@ -1508,7 +1462,7 @@ static inline int ssi_aead_ccm(
/* process assoc data */
if (req->assoclen > 0) {
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -1519,8 +1473,8 @@ static inline int ssi_aead_ccm(
}
/* process the cipher */
- if (req_ctx->cryptlen != 0)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
/* Read temporal MAC */
hw_desc_init(&desc[idx]);
@@ -1565,12 +1519,14 @@ static inline int ssi_aead_ccm(
static int config_ccm_adata(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
unsigned int lp = req->iv[0];
- /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
+ /* Note: The code assume that req->iv[0] already contains the value
+ * of L' of RFC3610
+ */
unsigned int l = lp + 1; /* This is L' of RFC 3610. */
unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
@@ -1601,7 +1557,7 @@ static int config_ccm_adata(struct aead_request *req)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
- if (rc != 0) {
+ if (rc) {
dev_err(dev, "message len overflow detected");
return rc;
}
@@ -1619,33 +1575,35 @@ static int config_ccm_adata(struct aead_request *req)
return 0;
}
-static void ssi_rfc4309_ccm_process(struct aead_request *req)
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
/* L' */
memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
- areq_ctx->ctr_iv[0] = 3; /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
+ /* For RFC 4309, always use 4 bytes for message length
+ * (at most 2^32-1 bytes).
+ */
+ areq_ctx->ctr_iv[0] = 3;
- /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
- memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
- memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
+ /* In RFC 4309 there is an 11-bytes nonce+IV part,
+ * that we build here.
+ */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+ CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= CCM_BLOCK_IV_SIZE;
}
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-#if SSI_CC_HAS_AES_GCM
-static inline void ssi_aead_gcm_setup_ghash_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1703,7 +1661,9 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an
+ * initial state)
+ */
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
set_dout_no_dma(&desc[idx], 0, 0, 1);
@@ -1717,13 +1677,11 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
*seq_size = idx;
}
-static inline void ssi_aead_gcm_setup_gctr_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1738,7 +1696,7 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
- if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
+ if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
/* load AES/CTR initial CTR value inc by 2*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
@@ -1755,13 +1713,12 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_gcm_result_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
dma_addr_t mac_result;
unsigned int idx = *seq_size;
@@ -1821,10 +1778,8 @@ static inline void ssi_aead_process_gcm_result_desc(
*seq_size = idx;
}
-static inline int ssi_aead_gcm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cipher_flow_mode;
@@ -1837,77 +1792,33 @@ static inline int ssi_aead_gcm(
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
- ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
// for gcm and rfc4106.
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
- if (req_ctx->cryptlen != 0)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
-#ifdef CC_DEBUG
-static inline void ssi_aead_dump_gcm(
- const char *title,
- struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-
- if (ctx->cipher_mode != DRV_CIPHER_GCTR)
- return;
-
- if (title) {
- dev_dbg(dev, "----------------------------------------------------------------------------------");
- dev_dbg(dev, "%s\n", title);
- }
-
- dev_dbg(dev, "cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
- req->assoclen, req_ctx->cryptlen);
-
- if (ctx->enckey)
- dump_byte_array("mac key", ctx->enckey, 16);
-
- dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
-
- dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
-
- dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
-
- dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
-
- if (req->src && req->cryptlen)
- dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
-
- if (req->dst)
- dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
-}
-#endif
-
static int config_gcm_context(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -1938,10 +1849,14 @@ static int config_gcm_context(struct aead_request *req)
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
- } else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
+ } else {
+ /* rfc4543=> all data(AAD,IV,Plain) are considered additional
+ * data that is nothing is encrypted.
+ */
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+ cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1950,30 +1865,31 @@ static int config_gcm_context(struct aead_request *req)
return 0;
}
-static void ssi_rfc4_gcm_process(struct aead_request *req)
+static void cc_proc_rfc4_gcm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
- memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+ ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
-#endif /*SSI_CC_HAS_AES_GCM*/
-
-static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
{
int rc = 0;
int seq_len = 0;
struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
@@ -1983,7 +1899,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
- if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
+ if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
@@ -1991,8 +1907,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_aead_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_aead_complete;
+ cc_req.user_arg = (void *)req;
/* Setup request context */
areq_ctx->gen_ctx.op_type = direct;
@@ -2005,7 +1921,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* Build CTR IV - Copy nonce from last 4 bytes in
* CTR key to first 4 bytes in CTR IV
*/
- memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+ CTR_RFC3686_NONCE_SIZE);
if (!areq_ctx->backup_giv) /*User none-generated IV*/
memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
req->iv, CTR_RFC3686_IV_SIZE);
@@ -2020,17 +1937,17 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
(ctx->cipher_mode == DRV_CIPHER_GCTR)) {
areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
if (areq_ctx->ctr_iv != req->iv) {
- memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
+ memcpy(areq_ctx->ctr_iv, req->iv,
+ crypto_aead_ivsize(tfm));
req->iv = areq_ctx->ctr_iv;
}
} else {
areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
}
-#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
rc);
goto exit;
@@ -2038,23 +1955,18 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
} else {
areq_ctx->ccm_hdr_size = ccm_header_size_null;
}
-#else
- areq_ctx->ccm_hdr_size = ccm_header_size_null;
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
rc);
goto exit;
}
}
-#endif /*SSI_CC_HAS_AES_GCM*/
- rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
- if (unlikely(rc != 0)) {
+ rc = cc_map_aead_request(ctx->drvdata, req);
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit;
}
@@ -2063,74 +1975,77 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (areq_ctx->backup_giv) {
/* set the DMA mapped IV address*/
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CTR_RFC3686_NONCE_SIZE;
+ cc_req.ivgen_dma_addr_len = 1;
} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
- /* In ccm, the IV needs to exist both inside B0 and inside the counter.
- * It is also copied to iv_dma_addr for other reasons (like returning
- * it to the user).
+ /* In ccm, the IV needs to exist both inside B0 and
+ * inside the counter.It is also copied to iv_dma_addr
+ * for other reasons (like returning it to the user).
* So, using 3 (identical) IV outputs.
*/
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr_len = 3;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[1] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[2] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr_len = 3;
} else {
- ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
}
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
+ cc_req.ivgen_size = crypto_aead_ivsize(tfm);
}
/* STAT_PHASE_2: Create sequence */
/* Load MLLI tables to SRAM if necessary */
- ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
+ cc_mlli_to_sram(req, desc, &seq_len);
/*TODO: move seq len by reference */
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA256:
- ssi_aead_hmac_authenc(req, desc, &seq_len);
+ cc_hmac_authenc(req, desc, &seq_len);
break;
case DRV_HASH_XCBC_MAC:
- ssi_aead_xcbc_authenc(req, desc, &seq_len);
+ cc_xcbc_authenc(req, desc, &seq_len);
break;
-#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
case DRV_HASH_NULL:
-#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM)
- ssi_aead_ccm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
+ cc_ccm(req, desc, &seq_len);
if (ctx->cipher_mode == DRV_CIPHER_GCTR)
- ssi_aead_gcm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_GCM*/
- break;
-#endif
+ cc_gcm(req, desc, &seq_len);
+ break;
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
}
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
}
exit:
return rc;
}
-static int ssi_aead_encrypt(struct aead_request *req)
+static int cc_aead_encrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2142,21 +2057,20 @@ static int ssi_aead_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = -EINVAL;
@@ -2170,17 +2084,16 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-#endif /* SSI_CC_HAS_AES_CCM */
-static int ssi_aead_decrypt(struct aead_request *req)
+static int cc_aead_decrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2192,18 +2105,17 @@ static int ssi_aead_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2218,22 +2130,20 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-#endif /* SSI_CC_HAS_AES_CCM */
-
-#if SSI_CC_HAS_AES_GCM
-static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2244,12 +2154,13 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2260,11 +2171,11 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -2279,13 +2190,13 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2299,13 +2210,13 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2313,15 +2224,15 @@ static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
if (authsize != 16)
return -EINVAL;
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2337,19 +2248,19 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2361,22 +2272,22 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2392,19 +2303,19 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2416,31 +2327,30 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
}
-#endif /* SSI_CC_HAS_AES_GCM */
/* DX Block aead alg */
-static struct ssi_alg_template aead_algs[] = {
+static struct cc_alg_template aead_algs[] = {
{
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2454,12 +2364,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2473,12 +2383,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2492,12 +2402,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2511,12 +2421,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2530,12 +2440,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2549,12 +2459,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2568,12 +2478,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2581,19 +2491,18 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
},
-#if SSI_CC_HAS_AES_CCM
{
.name = "ccm(aes)",
.driver_name = "ccm-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_ccm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2607,12 +2516,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4309_ccm_setkey,
- .setauthsize = ssi_rfc4309_ccm_setauthsize,
- .encrypt = ssi_rfc4309_ccm_encrypt,
- .decrypt = ssi_rfc4309_ccm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CCM_BLOCK_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2620,20 +2529,18 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
},
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
{
.name = "gcm(aes)",
.driver_name = "gcm-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_gcm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = 12,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2647,12 +2554,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4106_gcm_setkey,
- .setauthsize = ssi_rfc4106_gcm_setauthsize,
- .encrypt = ssi_rfc4106_gcm_encrypt,
- .decrypt = ssi_rfc4106_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2666,12 +2573,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4543_gcm_setkey,
- .setauthsize = ssi_rfc4543_gcm_setauthsize,
- .encrypt = ssi_rfc4543_gcm_encrypt,
- .decrypt = ssi_rfc4543_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2679,52 +2586,51 @@ static struct ssi_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
},
-#endif /*SSI_CC_HAS_AES_GCM*/
};
-static struct ssi_crypto_alg *ssi_aead_create_alg(
- struct ssi_alg_template *template,
- struct device *dev)
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct aead_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
- alg = &template->template_aead;
+ alg = &tmpl->template_aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ tmpl->driver_name);
alg->base.cra_module = THIS_MODULE;
- alg->base.cra_priority = SSI_CRA_PRIO;
+ alg->base.cra_priority = CC_CRA_PRIO;
- alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- alg->init = ssi_aead_init;
- alg->exit = ssi_aead_exit;
+ tmpl->type;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
t_alg->aead_alg = *alg;
- t_alg->cipher_mode = template->cipher_mode;
- t_alg->flow_mode = template->flow_mode;
- t_alg->auth_mode = template->auth_mode;
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->auth_mode = tmpl->auth_mode;
return t_alg;
}
-int ssi_aead_free(struct ssi_drvdata *drvdata)
+int cc_aead_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
- struct ssi_aead_handle *aead_handle =
- (struct ssi_aead_handle *)drvdata->aead_handle;
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_aead_handle *aead_handle =
+ (struct cc_aead_handle *)drvdata->aead_handle;
if (aead_handle) {
/* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+ entry) {
crypto_unregister_aead(&t_alg->aead_alg);
list_del(&t_alg->entry);
kfree(t_alg);
@@ -2736,10 +2642,10 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
return 0;
}
-int ssi_aead_alloc(struct ssi_drvdata *drvdata)
+int cc_aead_alloc(struct cc_drvdata *drvdata)
{
- struct ssi_aead_handle *aead_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_aead_handle *aead_handle;
+ struct cc_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
struct device *dev = drvdata_to_dev(drvdata);
@@ -2753,8 +2659,9 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
- aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
- drvdata, MAX_HMAC_DIGEST_SIZE);
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
@@ -2763,7 +2670,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2772,7 +2679,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
goto fail2;
@@ -2788,7 +2695,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
fail2:
kfree(t_alg);
fail1:
- ssi_aead_free(drvdata);
+ cc_aead_free(drvdata);
fail0:
return rc;
}
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/cc_aead.h
index e85bcd917e7b..5edf3b351fa4 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/cc_aead.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-/* \file ssi_aead.h
+/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
*/
-#ifndef __SSI_AEAD_H__
-#define __SSI_AEAD_H__
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
#include <linux/kernel.h>
#include <crypto/algapi.h>
@@ -28,7 +15,7 @@
/* mac_cmp - HW writes 8 B but all bytes hold the same value */
#define ICV_CMP_SIZE 8
#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
-#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8
@@ -74,32 +61,37 @@ struct aead_req_ctx {
} gcm_len_block;
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
- unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
- u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+ /* HW actual size input */
+ unsigned int hw_iv_size ____cacheline_aligned;
+ /* used to prevent cache coherence problem */
+ u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
- dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
dma_addr_t icv_dma_addr; /* Phys. address of ICV */
//used in gcm
- dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
- dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc1_dma_addr;
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr;
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
bool is_gcm4543;
u8 *icv_virt_addr; /* Virt. address of ICV */
struct async_gen_req_ctx gen_ctx;
- struct ssi_mlli assoc;
- struct ssi_mlli src;
- struct ssi_mlli dst;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
struct scatterlist *src_sgl;
struct scatterlist *dst_sgl;
unsigned int src_offset;
unsigned int dst_offset;
- enum ssi_req_dma_buf_type assoc_buff_type;
- enum ssi_req_dma_buf_type data_buff_type;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
struct mlli_params mlli_params;
unsigned int cryptlen;
struct scatterlist ccm_adata_sg;
@@ -111,7 +103,7 @@ struct aead_req_ctx {
bool plaintext_authenticate_only; //for gcm_rfc4543
};
-int ssi_aead_alloc(struct ssi_drvdata *drvdata);
-int ssi_aead_free(struct ssi_drvdata *drvdata);
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
-#endif /*__SSI_AEAD_H__*/
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/cc_buffer_mgr.c
index 1f8a225530a8..14b2eabbf70a 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/cc_buffer_mgr.c
@@ -1,42 +1,17 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include <linux/crypto.h>
-#include <linux/version.h>
-#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
-#include <crypto/hash.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include "ssi_buffer_mgr.h"
+#include "cc_buffer_mgr.h"
#include "cc_lli_defs.h"
-#include "ssi_cipher.h"
-#include "ssi_hash.h"
-#include "ssi_aead.h"
-
-#define GET_DMA_BUFFER_TYPE(buff_type) ( \
- ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
- ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
- ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -64,25 +39,62 @@ struct buffer_array {
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+ switch (type) {
+ case CC_DMA_BUF_NULL:
+ return "BUF_NULL";
+ case CC_DMA_BUF_DLLI:
+ return "BUF_DLLI";
+ case CC_DMA_BUF_MLLI:
+ return "BUF_MLLI";
+ default:
+ return "BUF_INVALID";
+ }
+}
+
/**
- * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum cc_sg_cpy_direct dir)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 skip = req->assoclen + req->cryptlen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
*
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
-static unsigned int ssi_buffer_mgr_get_sgl_nents(
- struct device *dev, struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes, bool *is_chained)
+static unsigned int cc_get_sgl_nents(struct device *dev,
+ struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes,
+ bool *is_chained)
{
unsigned int nents = 0;
- while (nbytes != 0) {
- if (sg_list->length != 0) {
+ while (nbytes && sg_list) {
+ if (sg_list->length) {
nents++;
/* get the number of bytes in the last entry */
*lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
@@ -95,11 +107,11 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
}
/**
- * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
+ * cc_zero_sgl() - Zero scatter scatter list data.
*
* @sgl:
*/
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
{
struct scatterlist *current_sg = sgl;
int sg_index = 0;
@@ -116,7 +128,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
}
/**
- * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
+ * cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
* @dest:
@@ -125,21 +137,19 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
* @end:
* @direct:
*/
-void ssi_buffer_mgr_copy_scatterlist_portion(
- struct device *dev, u8 *dest,
- struct scatterlist *sg, u32 to_skip,
- u32 end, enum ssi_sg_cpy_direct direct)
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
u32 nents, lbytes;
- nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
- (direct == SSI_SG_TO_BUF));
+ (direct == CC_SG_TO_BUF));
}
-static inline int ssi_buffer_mgr_render_buff_to_mlli(
- struct device *dev, dma_addr_t buff_dma, u32 buff_size,
- u32 *curr_nents, u32 **mlli_entry_pp)
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+ u32 buff_size, u32 *curr_nents,
+ u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
u32 new_nents;
@@ -173,26 +183,25 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
return 0;
}
-static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct device *dev, struct scatterlist *sgl,
- u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
- u32 **mlli_entry_pp)
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
s32 rc = 0;
- for ( ; (curr_sgl) && (sgl_data_len != 0);
+ for ( ; (curr_sgl && sgl_data_len);
curr_sgl = sg_next(curr_sgl)) {
u32 entry_data_len =
(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
sg_dma_len(curr_sgl) - sgl_offset :
sgl_data_len;
sgl_data_len -= entry_data_len;
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_dma_address(curr_sgl) + sgl_offset,
- entry_data_len, curr_nents, &mlli_entry_p);
- if (rc != 0)
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
+ if (rc)
return rc;
sgl_offset = 0;
@@ -201,10 +210,8 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
return 0;
}
-static int ssi_buffer_mgr_generate_mlli(
- struct device *dev,
- struct buffer_array *sg_data,
- struct mlli_params *mlli_params)
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+ struct mlli_params *mlli_params, gfp_t flags)
{
u32 *mlli_p;
u32 total_nents = 0, prev_total_nents = 0;
@@ -213,10 +220,10 @@ static int ssi_buffer_mgr_generate_mlli(
dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
/* Allocate memory from the pointed pool */
- mlli_params->mlli_virt_addr = dma_pool_alloc(
- mlli_params->curr_pool, GFP_KERNEL,
- &mlli_params->mlli_dma_addr);
- if (unlikely(!mlli_params->mlli_virt_addr)) {
+ mlli_params->mlli_virt_addr =
+ dma_pool_alloc(mlli_params->curr_pool, flags,
+ &mlli_params->mlli_dma_addr);
+ if (!mlli_params->mlli_virt_addr) {
dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM;
goto build_mlli_exit;
@@ -225,17 +232,19 @@ static int ssi_buffer_mgr_generate_mlli(
mlli_p = (u32 *)mlli_params->mlli_virt_addr;
/* go over all SG's and link it to one MLLI table */
for (i = 0; i < sg_data->num_of_buffers; i++) {
+ union buffer_array_entry *entry = &sg_data->entry[i];
+ u32 tot_len = sg_data->total_data_len[i];
+ u32 offset = sg_data->offset[i];
+
if (sg_data->type[i] == DMA_SGL_TYPE)
- rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
- dev, sg_data->entry[i].sgl,
- sg_data->total_data_len[i], sg_data->offset[i],
- &total_nents, &mlli_p);
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+ offset, &total_nents,
+ &mlli_p);
else /*DMA_BUFF_TYPE*/
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_data->entry[i].buffer_dma,
- sg_data->total_data_len[i], &total_nents,
- &mlli_p);
- if (rc != 0)
+ rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+ tot_len, &total_nents,
+ &mlli_p);
+ if (rc)
return rc;
/* set last bit in the current table */
@@ -260,10 +269,10 @@ build_mlli_exit:
return rc;
}
-static inline void ssi_buffer_mgr_add_buffer_entry(
- struct device *dev, struct buffer_array *sgl_data,
- dma_addr_t buffer_dma, unsigned int buffer_len,
- bool is_last_entry, u32 *mlli_nents)
+static void cc_add_buffer_entry(struct device *dev,
+ struct buffer_array *sgl_data,
+ dma_addr_t buffer_dma, unsigned int buffer_len,
+ bool is_last_entry, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
@@ -281,15 +290,10 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
sgl_data->num_of_buffers++;
}
-static inline void ssi_buffer_mgr_add_scatterlist_entry(
- struct device *dev,
- struct buffer_array *sgl_data,
- unsigned int nents,
- struct scatterlist *sgl,
- unsigned int data_len,
- unsigned int data_offset,
- bool is_last_table,
- u32 *mlli_nents)
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+ unsigned int nents, struct scatterlist *sgl,
+ unsigned int data_len, unsigned int data_offset,
+ bool is_last_table, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
@@ -307,9 +311,8 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
sgl_data->num_of_buffers++;
}
-static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+ enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
@@ -317,7 +320,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
- if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
@@ -336,17 +339,15 @@ err:
return 0;
}
-static int ssi_buffer_mgr_map_scatterlist(
- struct device *dev, struct scatterlist *sg,
- unsigned int nbytes, int direction,
- u32 *nents, u32 max_sg_nents,
- u32 *lbytes, u32 *mapped_nents)
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
bool is_chained = false;
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
- if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, sg, 1, direction) != 1) {
dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
@@ -357,8 +358,8 @@ static int ssi_buffer_mgr_map_scatterlist(
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+ &is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -370,7 +371,7 @@ static int ssi_buffer_mgr_map_scatterlist(
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (unlikely(*mapped_nents == 0)) {
+ if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -379,11 +380,9 @@ static int ssi_buffer_mgr_map_scatterlist(
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
- *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
- sg,
- *nents,
- direction);
- if (unlikely(*mapped_nents != *nents)) {
+ *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+ direction);
+ if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -394,18 +393,16 @@ static int ssi_buffer_mgr_map_scatterlist(
return 0;
}
-static inline int
-ssi_aead_handle_config_buf(struct device *dev,
- struct aead_req_ctx *areq_ctx,
- u8 *config_data,
- struct buffer_array *sg_data,
- unsigned int assoclen)
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+ u8 *config_data, struct buffer_array *sg_data,
+ unsigned int assoclen)
{
dev_dbg(dev, " handle additional data config set to DLLI\n");
/* create sg for the current buffer */
- sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
- if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+ AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
@@ -416,25 +413,21 @@ ssi_aead_handle_config_buf(struct device *dev,
areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
- &areq_ctx->ccm_adata_sg,
- (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
- 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
}
return 0;
}
-static inline int ssi_ahash_handle_curr_buf(struct device *dev,
- struct ahash_req_ctx *areq_ctx,
- u8 *curr_buff,
- u32 curr_buff_cnt,
- struct buffer_array *sg_data)
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
{
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
- if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
@@ -442,25 +435,22 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
- curr_buff_cnt, 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
return 0;
}
-void ssi_buffer_mgr_unmap_blkcipher_request(
- struct device *dev,
- void *ctx,
- unsigned int ivsize,
- struct scatterlist *src,
- struct scatterlist *dst)
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
- if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
+ if (req_ctx->gen_ctx.iv_dma_addr) {
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
@@ -469,7 +459,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
DMA_TO_DEVICE);
}
/* Release pool */
- if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+ req_ctx->mlli_params.mlli_virt_addr) {
dma_pool_free(req_ctx->mlli_params.curr_pool,
req_ctx->mlli_params.mlli_virt_addr,
req_ctx->mlli_params.mlli_dma_addr);
@@ -484,14 +475,10 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
}
}
-int ssi_buffer_mgr_map_blkcipher_request(
- struct ssi_drvdata *drvdata,
- void *ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- void *info,
- struct scatterlist *src,
- struct scatterlist *dst)
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags)
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
@@ -502,20 +489,19 @@ int ssi_buffer_mgr_map_blkcipher_request(
int rc = 0;
u32 mapped_nents = 0;
- req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
+ req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
/* Map IV buffer */
- if (likely(ivsize != 0)) {
+ if (ivsize) {
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- req_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
return -ENOMEM;
@@ -527,121 +513,107 @@ int ssi_buffer_mgr_map_blkcipher_request(
}
/* Map the src SGL */
- rc = ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
- req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
- if (unlikely(src == dst)) {
+ if (src == dst) {
/* Handle inplace operation */
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
req_ctx->out_nents = 0;
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
- if (unlikely(ssi_buffer_mgr_map_scatterlist(
- dev, dst, nbytes,
- DMA_BIDIRECTIONAL, &req_ctx->out_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents))){
+ if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
- req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
-
- if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->out_nents,
- dst, nbytes, 0,
- true,
- &req_ctx->out_mlli_nents);
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
}
}
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc != 0))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto ablkcipher_exit;
}
dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ cc_dma_buf_type(req_ctx->dma_buf_type));
return 0;
ablkcipher_exit:
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
return rc;
}
-void ssi_buffer_mgr_unmap_aead_request(
- struct device *dev, struct aead_request *req)
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
bool chained;
u32 size_to_unmap = 0;
- if (areq_ctx->mac_buf_dma_addr != 0) {
+ if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
}
-#if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
- if (areq_ctx->hkey_dma_addr != 0) {
+ if (areq_ctx->hkey_dma_addr) {
dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
}
- if (areq_ctx->gcm_block_len_dma_addr != 0) {
+ if (areq_ctx->gcm_block_len_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
- if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
+ if (areq_ctx->gcm_iv_inc1_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
- if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
+ if (areq_ctx->gcm_iv_inc2_dma_addr) {
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
}
-#endif
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- if (areq_ctx->ccm_iv0_dma_addr != 0) {
+ if (areq_ctx->ccm_iv0_dma_addr) {
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
}
- if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
+ if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
}
@@ -668,46 +640,37 @@ void ssi_buffer_mgr_unmap_aead_request(
size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src,
- ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
- if (unlikely(req->src != req->dst)) {
+ if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
- (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst)) {
- u32 size_to_skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- /* copy mac to a temporary location to deal with possible
- * data memory overriding that caused by cache coherence problem.
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst) {
+ /* copy back mac from temporary location to deal with possible
+ * data memory overriding that caused by cache coherence
+ * problem.
*/
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
+ cc_copy_mac(dev, req, CC_SG_FROM_BUF);
}
}
-static inline int ssi_buffer_mgr_get_aead_icv_nents(
- struct device *dev,
- struct scatterlist *sgl,
- unsigned int sgl_nents,
- unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+ unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size,
+ bool *is_icv_fragmented)
{
unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
+ unsigned int icv_required_size = authsize > last_entry_data_size ?
+ (authsize - last_entry_data_size) :
+ authsize;
unsigned int nents;
unsigned int i;
@@ -726,10 +689,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
icv_max_size = sgl->length;
if (last_entry_data_size > authsize) {
- nents = 0; /* ICV attached to data in last entry (not fragmented!) */
+ /* ICV attached to data in last entry (not fragmented!) */
+ nents = 0;
*is_icv_fragmented = false;
} else if (last_entry_data_size == authsize) {
- nents = 1; /* ICV placed in whole last entry (not fragmented!) */
+ /* ICV placed in whole last entry (not fragmented!) */
+ nents = 1;
*is_icv_fragmented = false;
} else if (icv_max_size > icv_required_size) {
nents = 1;
@@ -748,25 +713,25 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
return nents;
}
-static inline int ssi_buffer_mgr_aead_chain_iv(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last, bool do_chain)
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
- if (unlikely(!req->iv)) {
+ if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+ hw_iv_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
rc = -ENOMEM;
@@ -775,28 +740,27 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
- if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
+ // TODO: what about CTR?? ask Ron
+ if (do_chain && areq_ctx->plaintext_authenticate_only) {
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
/* Chain to given list */
- ssi_buffer_mgr_add_buffer_entry(
- dev, sg_data,
- areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
- iv_size_to_authenc, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ cc_add_buffer_entry(dev, sg_data,
+ (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
chain_iv_exit:
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_assoc(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last, bool do_chain)
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
@@ -815,12 +779,12 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
goto chain_assoc_exit;
}
- if (unlikely(req->assoclen == 0)) {
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
+ if (req->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -828,12 +792,15 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
//iterate over the sgl to see how many entries are for associated data
//it is assumed that if we reach here , the sgl is already mapped
sg_index = current_sg->length;
- if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
+ //the first entry in the scatter list contains all the associated data
+ if (sg_index > size_of_assoc) {
mapped_nents++;
} else {
while (sg_index <= size_of_assoc) {
current_sg = sg_next(current_sg);
- //if have reached the end of the sgl, then this is unexpected
+ /* if have reached the end of the sgl, then this is
+ * unexpected
+ */
if (!current_sg) {
dev_err(dev, "reached end of sg list. unexpected\n");
return -EINVAL;
@@ -842,7 +809,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
mapped_nents++;
}
}
- if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM;
@@ -853,8 +820,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
* ccm header configurations
*/
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- if (unlikely((mapped_nents + 1) >
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
(areq_ctx->assoc.nents + 1),
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -863,227 +829,175 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
}
}
- if (likely(mapped_nents == 1) &&
- (areq_ctx->ccm_hdr_size == ccm_header_size_null))
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
else
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
- if (unlikely((do_chain) ||
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+ if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
- ssi_buffer_mgr_add_scatterlist_entry(
- dev, sg_data, areq_ctx->assoc.nents,
- req->src, req->assoclen, 0, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
chain_assoc_exit:
return rc;
}
-static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
- struct aead_request *req,
- u32 *src_last_bytes, u32 *dst_last_bytes)
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+ u32 *src_last_bytes, u32 *dst_last_bytes)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
areq_ctx->is_icv_fragmented = false;
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->src_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
(*src_last_bytes - authsize);
} else {
/*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->dst_sgl) +
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->dst_sgl) +
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
(*dst_last_bytes - authsize);
}
}
-static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
int rc = 0, icv_nents;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct device *dev = drvdata_to_dev(drvdata);
+ struct scatterlist *sg;
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (unlikely(areq_ctx->is_icv_fragmented)) {
+ if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
- * verification is made by CPU compare in order to simplify
- * MAC verification upon request completion
+ * verification is made by CPU compare in order to
+ * simplify MAC verification upon request completion
*/
if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- if (!drvdata->coherent) {
/* In coherent platforms (e.g. ACP)
* already copying ICV for any
* INPLACE-DECRYPT operation, hence
* we must neglect this code.
*/
- u32 skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- skip += crypto_aead_ivsize(tfm);
-
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac,
- req->src,
- (skip + req->cryptlen -
- areq_ctx->req_authsize),
- skip + req->cryptlen,
- SSI_SG_TO_BUF);
- }
+ if (!drvdata->coherent)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else {
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
- areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_dma_addr =
+ areq_ctx->mac_buf_dma_addr;
}
} else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
/*Should hanlde if the sg is not contig.*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*src_last_bytes - authsize);
}
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (unlikely(areq_ctx->is_icv_fragmented)) {
- /* Backup happens only when ICV is fragmented, ICV
- * verification is made by CPU compare in order to simplify
- * MAC verification upon request completion
- */
- u32 size_to_skip = req->assoclen;
-
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to simplify
+ * MAC verification upon request completion
+ */
+ if (areq_ctx->is_icv_fragmented) {
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
} else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
/*Should hanlde if the sg is not contig.*/
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*src_last_bytes - authsize);
}
} else {
/*NON-INPLACE and ENCRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize,
- *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (likely(!areq_ctx->is_icv_fragmented)) {
+ if (!areq_ctx->is_icv_fragmented) {
+ sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
/* Contig. ICV */
- areq_ctx->icv_dma_addr = sg_dma_address(
- &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
(*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(
- &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
(*dst_last_bytes - authsize);
} else {
areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
@@ -1095,11 +1009,10 @@ prepare_data_mlli_exit:
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_data(
- struct ssi_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- bool is_last_table, bool do_chain)
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(drvdata);
@@ -1109,7 +1022,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
int rc = 0;
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
- unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
+ /* non-inplace mode */
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool chained = false;
@@ -1130,11 +1044,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
- size_for_map,
- &src_last_bytes,
- &chained);
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes, &chained);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1148,7 +1061,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
- if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
@@ -1160,26 +1073,23 @@ static inline int ssi_buffer_mgr_aead_chain_data(
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
- DMA_BIDIRECTIONAL,
- &areq_ctx->dst.nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dst_last_bytes,
- &dst_mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto chain_data_exit;
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_for_map,
- &dst_last_bytes,
- &chained);
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes, &chained);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@@ -1195,45 +1105,43 @@ static inline int ssi_buffer_mgr_aead_chain_data(
sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
- if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
areq_ctx->dst_offset = offset;
- if ((src_mapped_nents > 1) ||
- (dst_mapped_nents > 1) ||
+ if (src_mapped_nents > 1 ||
+ dst_mapped_nents > 1 ||
do_chain) {
- areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
- rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
- sg_data,
- &src_last_bytes,
- &dst_last_bytes,
- is_last_table);
+ areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+ rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes,
+ &dst_last_bytes, is_last_table);
} else {
- areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
- ssi_buffer_mgr_prepare_aead_data_dlli(
- req, &src_last_bytes, &dst_last_bytes);
+ areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
}
chain_data_exit:
return rc;
}
-static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
- struct aead_request *req)
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
u32 curr_mlli_size = 0;
- if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
curr_mlli_size = areq_ctx->assoc.mlli_nents *
LLI_ENTRY_BYTE_SIZE;
}
- if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
+ if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
/*Inplace case dst nents equal to src nents*/
if (req->src == req->dst) {
areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
@@ -1272,8 +1180,7 @@ static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
}
}
-int ssi_buffer_mgr_map_aead_request(
- struct ssi_drvdata *drvdata, struct aead_request *req)
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
@@ -1284,30 +1191,22 @@ int ssi_buffer_mgr_map_aead_request(
int rc = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
bool is_gcm4543 = areq_ctx->is_gcm4543;
-
+ dma_addr_t dma_addr;
u32 mapped_nents = 0;
u32 dummy = 0; /*used for the assoc data fragments */
u32 size_to_map = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
+ /* copy mac to a temporary location to deal with possible
+ * data memory overriding that caused by cache coherence problem.
+ */
if (drvdata->coherent &&
- (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
- likely(req->src == req->dst)) {
- u32 size_to_skip = req->assoclen;
-
- if (is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
- /* copy mac to a temporary location to deal with possible
- * data memory overriding that caused by cache coherence problem.
- */
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
- }
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
/* cacluate the size for cipher remove ICV in decrypt*/
areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
@@ -1315,90 +1214,83 @@ int ssi_buffer_mgr_map_aead_request(
req->cryptlen :
(req->cryptlen - authsize);
- areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
- MAX_MAC_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
+ dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->mac_buf_dma_addr = dma_addr;
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
- (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
- if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
+ dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE,
- (areq_ctx->ccm_config +
- CCM_CTR_COUNT_0_OFFSET));
+ AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
- if (ssi_aead_handle_config_buf(dev, areq_ctx,
- areq_ctx->ccm_config, &sg_data,
- req->assoclen) != 0) {
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen)) {
rc = -ENOMEM;
goto aead_map_failure;
}
}
-#if SSI_CC_HAS_AES_GCM
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
- areq_ctx->hkey_dma_addr = dma_map_single(dev,
- areq_ctx->hkey,
- AES_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
+ dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->hkey_dma_addr = dma_addr;
- areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
- &areq_ctx->gcm_len_block,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
+ dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_block_len_dma_addr = dma_addr;
- areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc1,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
- areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
- areq_ctx->gcm_iv_inc2,
- AES_BLOCK_SIZE,
- DMA_TO_DEVICE);
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
}
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
-#endif /*SSI_CC_HAS_AES_GCM*/
size_to_map = req->cryptlen + req->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
@@ -1406,29 +1298,31 @@ int ssi_buffer_mgr_map_aead_request(
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
- size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (unlikely(rc != 0)) {
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->src.nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+ if (rc) {
rc = -ENOMEM;
goto aead_map_failure;
}
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
/*
* Create MLLI table for:
* (1) Assoc. data
* (2) Src/Dst SGLs
* Note: IV is contg. buffer (not an SGL)
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (rc)
goto aead_map_failure;
} else { /* DOUBLE-PASS flow */
/*
@@ -1451,27 +1345,28 @@ int ssi_buffer_mgr_map_aead_request(
* (3) MLLI for src
* (4) MLLI for dst
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (rc)
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
- if (unlikely(rc != 0))
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (rc)
goto aead_map_failure;
}
- /* Mlli support -start building the MLLI according to the above results */
- if (unlikely(
- (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
- (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
+ /* Mlli support -start building the MLLI according to the above
+ * results
+ */
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc != 0))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto aead_map_failure;
- ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
+ cc_update_aead_mlli_nents(drvdata, req);
dev_dbg(dev, "assoc params mn %d\n",
areq_ctx->assoc.mlli_nents);
dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
@@ -1480,19 +1375,18 @@ int ssi_buffer_mgr_map_aead_request(
return 0;
aead_map_failure:
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
return rc;
}
-int ssi_buffer_mgr_map_hash_request_final(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
- u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
- areq_ctx->buff0;
- u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
- &areq_ctx->buff0_cnt;
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
@@ -1502,88 +1396,78 @@ int ssi_buffer_mgr_map_hash_request_final(
dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
/* nothing to do */
return 0;
}
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
- if (*curr_buff_cnt != 0) {
- if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
return -ENOMEM;
}
}
- if (src && (nbytes > 0) && do_update) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (src && nbytes > 0 && do_update) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
goto unmap_curr_buff;
}
- if (src && (mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = nbytes;
areq_ctx->curr_sg = areq_ctx->buff_sg;
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
} else {
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
/*build mlli */
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src, nbytes, 0, true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
- }
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0)
+ if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
-int ssi_buffer_mgr_map_hash_request_update(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
- u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
- areq_ctx->buff0;
- u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
- &areq_ctx->buff0_cnt;
- u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
- areq_ctx->buff1;
- u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
- &areq_ctx->buff1_cnt;
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
unsigned int update_data_len;
u32 total_in_len = nbytes + *curr_buff_cnt;
@@ -1596,18 +1480,17 @@ int ssi_buffer_mgr_map_hash_request_update(
dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
areq_ctx->curr_sg = NULL;
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(total_in_len < block_size)) {
+ if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
- NULL);
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1623,20 +1506,20 @@ int ssi_buffer_mgr_map_hash_request_update(
*next_buff_cnt, update_data_len);
/* Copy the new residue to next buffer */
- if (*next_buff_cnt != 0) {
+ if (*next_buff_cnt) {
dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
- ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
- (update_data_len - *curr_buff_cnt),
- nbytes, SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
- if (*curr_buff_cnt != 0) {
- if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
- *curr_buff_cnt, &sg_data) != 0) {
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
return -ENOMEM;
}
/* change the buffer index for next operation */
@@ -1644,42 +1527,33 @@ int ssi_buffer_mgr_map_hash_request_update(
}
if (update_data_len > *curr_buff_cnt) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
goto unmap_curr_buff;
}
- if ((mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = update_data_len;
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
} else {
- areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
}
}
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src,
- (update_data_len - *curr_buff_cnt),
- 0,
- true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
goto fail_unmap_din;
- }
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1689,18 +1563,17 @@ fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0)
+ if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
}
-void ssi_buffer_mgr_unmap_hash_request(
- struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
- &areq_ctx->buff1_cnt;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
/*In case a pool was set, a table was
*allocated and should be released
@@ -1714,21 +1587,23 @@ void ssi_buffer_mgr_unmap_hash_request(
areq_ctx->mlli_params.mlli_dma_addr);
}
- if ((src) && likely(areq_ctx->in_nents != 0)) {
+ if (src && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
- if (*prev_len != 0) {
+ if (*prev_len) {
dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
- /* clean the previous data length for update operation */
+ /* clean the previous data length for update
+ * operation
+ */
*prev_len = 0;
} else {
areq_ctx->buff_index ^= 1;
@@ -1736,7 +1611,7 @@ void ssi_buffer_mgr_unmap_hash_request(
}
}
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
@@ -1747,23 +1622,23 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
drvdata->buff_mgr_handle = buff_mgr_handle;
- buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
- "dx_single_mlli_tables", dev,
+ buff_mgr_handle->mlli_buffs_pool =
+ dma_pool_create("dx_single_mlli_tables", dev,
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);
- if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
+ if (!buff_mgr_handle->mlli_buffs_pool)
goto error;
return 0;
error:
- ssi_buffer_mgr_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
return -ENOMEM;
}
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
diff --git a/drivers/staging/ccree/cc_buffer_mgr.h b/drivers/staging/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000000..99b752aa1077
--- /dev/null
+++ b/drivers/staging/ccree/cc_buffer_mgr.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+ CC_DMA_BUF_NULL = 0,
+ CC_DMA_BUF_DLLI,
+ CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+ CC_SG_TO_BUF = 0,
+ CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+ cc_sram_addr_t sram_addr;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ u8 *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize,
+ struct scatterlist *src,
+ struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/cc_cipher.c
index ee85cbf7c9ae..d4ac0ff2ffcf 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/cc_cipher.c
@@ -1,46 +1,27 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/semaphore.h>
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
+#include "cc_driver.h"
#include "cc_lli_defs.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_cipher.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sysfs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
#define MAX_ABLKCIPHER_SEQ_LEN 6
#define template_ablkcipher template_u.ablkcipher
-#define SSI_MIN_AES_XTS_SIZE 0x10
-#define SSI_MAX_AES_XTS_SIZE 0x2000
-struct ssi_blkcipher_handle {
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
struct list_head blkcipher_alg_list;
};
@@ -54,8 +35,8 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
-struct ssi_ablkcipher_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_cipher_ctx {
+ struct cc_drvdata *drvdata;
int keylen;
int key_round_number;
int cipher_mode;
@@ -67,61 +48,56 @@ struct ssi_ablkcipher_ctx {
struct crypto_shash *shash_tfm;
};
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
- if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
- (ctx_p->cipher_mode != DRV_CIPHER_ESSIV) &&
- (ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)))
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+ ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+ ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
return 0;
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
- if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
- (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
- (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
return 0;
break;
default:
break;
}
case S_DIN_to_DES:
- if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
- return 0;
- break;
-#endif
default:
break;
}
return -EINVAL;
}
-static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size)
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+ unsigned int size)
{
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if ((size >= SSI_MIN_AES_XTS_SIZE) &&
- (size <= SSI_MAX_AES_XTS_SIZE) &&
+ if (size >= CC_MIN_AES_XTS_SIZE &&
+ size <= CC_MAX_AES_XTS_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
case DRV_CIPHER_CBC_CTS:
- if (likely(size >= AES_BLOCK_SIZE))
+ if (size >= AES_BLOCK_SIZE)
return 0;
break;
case DRV_CIPHER_OFB:
@@ -131,7 +107,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
default:
@@ -139,23 +115,9 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
}
break;
case S_DIN_to_DES:
- if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
- return 0;
- break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- switch (ctx_p->cipher_mode) {
- case DRV_MULTI2_CBC:
- if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
- return 0;
- break;
- case DRV_MULTI2_OFB:
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
- default:
- break;
- }
break;
-#endif /*SSI_CC_HAS_MULTI2*/
default:
break;
}
@@ -164,36 +126,42 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
static unsigned int get_max_keysize(struct crypto_tfm *tfm)
{
- struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
- return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_ABLKCIPHER)
+ return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
- return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER)
+ return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
return 0;
}
-static int ssi_blkcipher_init(struct crypto_tfm *tfm)
+static int cc_cipher_init(struct crypto_tfm *tfm)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, crypto_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, crypto_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
+ struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
- ctx_p->cipher_mode = ssi_alg->cipher_mode;
- ctx_p->flow_mode = ssi_alg->flow_mode;
- ctx_p->drvdata = ssi_alg->drvdata;
+ ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
/* Allocate key buffer, cache line aligned */
- ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
return -ENOMEM;
@@ -224,9 +192,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
return rc;
}
-static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
+static void cc_cipher_exit(struct crypto_tfm *tfm)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int max_key_buf_size = get_max_keysize(tfm);
@@ -262,13 +230,15 @@ static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
-static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
+static int cc_verify_3des_keys(const u8 *key, unsigned int keylen)
{
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
- if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
- (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+ if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key1)) == 0) ||
+ (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key3)) == 0)) {
return -ENOEXEC;
}
@@ -290,11 +260,11 @@ static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
return END_OF_KEYS;
}
-static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
- const u8 *key,
- unsigned int keylen)
+static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
u32 tmp[DES_EXPKEY_WORDS];
unsigned int max_key_buf_size = get_max_keysize(tfm);
@@ -305,44 +275,39 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* STAT_PHASE_0: Init and sanity checks */
-#if SSI_CC_HAS_MULTI2
- /*last byte of key buffer is round number and should not be a part of key size*/
- if (ctx_p->flow_mode == S_DIN_to_MULTI2)
- keylen -= 1;
-#endif /*SSI_CC_HAS_MULTI2*/
-
- if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
+ if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
/* setting HW key slots */
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
- if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
- if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key1 number (%d)\n",
hki->hw_key1);
return -EINVAL;
}
- if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
- (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
- (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
- if (unlikely(hki->hw_key1 == hki->hw_key2)) {
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki->hw_key1 == hki->hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
- ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
- if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
+ ctx_p->hw.key2_slot =
+ hw_key_to_cc_hw_key(hki->hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key2 number (%d)\n",
hki->hw_key2);
return -EINVAL;
@@ -350,28 +315,28 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
ctx_p->keylen = keylen;
- dev_dbg(dev, "ssi_is_hw_key ret 0");
+ dev_dbg(dev, "cc_is_hw_key ret 0");
return 0;
}
// verify weak keys
if (ctx_p->flow_mode == S_DIN_to_DES) {
- if (unlikely(!des_ekey(tmp, key)) &&
+ if (!des_ekey(tmp, key) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
return -EINVAL;
}
}
- if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
- xts_check_key(tfm, key, keylen) != 0) {
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+ xts_check_key(tfm, key, keylen)) {
dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
- if ((ctx_p->flow_mode == S_DIN_to_DES) &&
- (keylen == DES3_EDE_KEY_SIZE) &&
- ssi_verify_3des_keys(key, keylen) != 0) {
+ if (ctx_p->flow_mode == S_DIN_to_DES &&
+ keylen == DES3_EDE_KEY_SIZE &&
+ cc_verify_3des_keys(key, keylen)) {
dev_dbg(dev, "weak 3DES key");
return -EINVAL;
}
@@ -380,34 +345,24 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
- if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
-#if SSI_CC_HAS_MULTI2
- memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
- ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
- if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
- ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- dev_dbg(dev, "SSI_CC_HAS_MULTI2 einval");
- return -EINVAL;
-#endif /*SSI_CC_HAS_MULTI2*/
- } else {
- memcpy(ctx_p->user.key, key, keylen);
- if (keylen == 24)
- memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
-
- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
- /* sha256 for key2 - use sw implementation */
- int key_len = keylen >> 1;
- int err;
- SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
- desc->tfm = ctx_p->shash_tfm;
-
- err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
- if (err) {
- dev_err(dev, "Failed to hash ESSIV key.\n");
- return err;
- }
+ memcpy(ctx_p->user.key, key, keylen);
+ if (keylen == 24)
+ memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int key_len = keylen >> 1;
+ int err;
+
+ SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+ desc->tfm = ctx_p->shash_tfm;
+
+ err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
+ if (err) {
+ dev_err(dev, "Failed to hash ESSIV key.\n");
+ return err;
}
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
@@ -418,16 +373,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
return 0;
}
-static inline void
-ssi_blkcipher_create_setup_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
@@ -437,11 +389,14 @@ ssi_blkcipher_create_setup_desc(
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
- struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_512)
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ CRYPTO_ALG_BULK_DU_512)
du_size = 512;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_4096)
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ CRYPTO_ALG_BULK_DU_4096)
du_size = 4096;
switch (cipher_mode) {
@@ -456,8 +411,8 @@ ssi_blkcipher_create_setup_desc(
set_cipher_config0(&desc[*seq_size], direction);
set_flow_mode(&desc[*seq_size], flow_mode);
set_cipher_mode(&desc[*seq_size], cipher_mode);
- if ((cipher_mode == DRV_CIPHER_CTR) ||
- (cipher_mode == DRV_CIPHER_OFB)) {
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
} else {
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
@@ -470,7 +425,7 @@ ssi_blkcipher_create_setup_desc(
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) {
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
} else {
@@ -497,7 +452,7 @@ ssi_blkcipher_create_setup_desc(
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
} else {
@@ -513,7 +468,7 @@ ssi_blkcipher_create_setup_desc(
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (ssi_is_hw_key(tfm)) {
+ if (cc_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -543,62 +498,14 @@ ssi_blkcipher_create_setup_desc(
}
}
-#if SSI_CC_HAS_MULTI2
-static inline void ssi_blkcipher_create_multi2_setup_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- unsigned int ivsize,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
-{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
- int direction = req_ctx->gen_ctx.op_type;
- /* Load system key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
- CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
- /* load data key */
- hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI,
- (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
- CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
- set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
- (*seq_size)++;
-
- /* Set state */
- hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, NS_BIT);
- set_cipher_config0(&desc[*seq_size], direction);
- set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
- set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
- (*seq_size)++;
-}
-#endif /*SSI_CC_HAS_MULTI2*/
-
-static inline void
-ssi_blkcipher_create_data_desc(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes,
- void *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ void *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int flow_mode = ctx_p->flow_mode;
@@ -609,17 +516,12 @@ ssi_blkcipher_create_data_desc(
case S_DIN_to_DES:
flow_mode = DIN_DES_DOUT;
break;
-#if SSI_CC_HAS_MULTI2
- case S_DIN_to_MULTI2:
- flow_mode = DIN_MULTI2_DOUT;
- break;
-#endif /*SSI_CC_HAS_MULTI2*/
default:
dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
- if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n",
&sg_dma_address(src), nbytes);
dev_dbg(dev, " data params addr %pad length 0x%X\n",
@@ -682,68 +584,63 @@ ssi_blkcipher_create_data_desc(
}
}
-static int ssi_blkcipher_complete(struct device *dev,
- struct ssi_ablkcipher_ctx *ctx_p,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int ivsize,
- void *areq,
- void __iomem *cc_base)
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
- int completion_error = 0;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
+ struct scatterlist *dst = areq->dst;
+ struct scatterlist *src = areq->src;
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
kfree(req_ctx->iv);
- if (areq) {
- /*
- * The crypto API expects us to set the req->info to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->info, req_ctx->backup_info, ivsize);
- kfree(req_ctx->backup_info);
- } else {
- scatterwalk_map_and_copy(req->info, req->dst,
- (req->nbytes - ivsize),
- ivsize, 0);
- }
-
- ablkcipher_request_complete(areq, completion_error);
- return 0;
+ /*
+ * The crypto API expects us to set the req->info to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->info, req_ctx->backup_info, ivsize);
+ kfree(req_ctx->backup_info);
+ } else if (!err) {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ (req->nbytes - ivsize), ivsize, 0);
}
- return completion_error;
+
+ ablkcipher_request_complete(areq, err);
}
-static int ssi_blkcipher_process(
- struct crypto_tfm *tfm,
- struct blkcipher_req_ctx *req_ctx,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes,
- void *info, //req info
- unsigned int ivsize,
- void *areq,
- enum drv_crypto_direction direction)
+static int cc_cipher_process(struct ablkcipher_request *req,
+ enum drv_crypto_direction direction)
{
- struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ void *info = req->info;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
int rc, seq_len = 0, cts_restore_flag = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "%s areq=%p info=%p nbytes=%d\n",
+ dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- "Encrypt" : "Decrypt"), areq, info, nbytes);
+ "Encrypt" : "Decrypt"), req, info, nbytes);
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
- if (unlikely(validate_data_size(ctx_p, nbytes))) {
+ if (validate_data_size(ctx_p, nbytes)) {
dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
@@ -758,7 +655,7 @@ static int ssi_blkcipher_process(
/* The IV we are handed may be allocted from the stack so
* we must copy it to a DMAable buffer before use.
*/
- req_ctx->iv = kmalloc(ivsize, GFP_KERNEL);
+ req_ctx->iv = kmalloc(ivsize, flags);
if (!req_ctx->iv) {
rc = -ENOMEM;
goto exit_process;
@@ -766,17 +663,18 @@ static int ssi_blkcipher_process(
memcpy(req_ctx->iv, info, ivsize);
/*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
+ if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+ ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
ctx_p->cipher_mode = DRV_CIPHER_CBC;
cts_restore_flag = 1;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_ablkcipher_complete;
- ssi_req.user_arg = (void *)areq;
+ cc_req.user_cb = (void *)cc_cipher_complete;
+ cc_req.user_arg = (void *)req;
#ifdef ENABLE_CYCLE_COUNT
- ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
#endif
@@ -786,10 +684,9 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_1: Map buffers */
- rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
- ivsize, nbytes, req_ctx->iv,
- src, dst);
- if (unlikely(rc != 0)) {
+ rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst, flags);
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
}
@@ -797,50 +694,35 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_2: Create sequence */
/* Setup processing */
-#if SSI_CC_HAS_MULTI2
- if (ctx_p->flow_mode == S_DIN_to_MULTI2)
- ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
- desc, &seq_len);
- else
-#endif /*SSI_CC_HAS_MULTI2*/
- ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
- desc, &seq_len);
+ cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
- ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq,
- desc, &seq_len);
+ cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+ &seq_len);
/* do we need to generate IV? */
if (req_ctx->is_giv) {
- ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = ivsize;
+ cc_req.ivgen_size = ivsize;
}
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
- if (areq) {
- if (unlikely(rc != -EINPROGRESS)) {
- /* Failed to send the request or request completed synchronously */
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- }
-
- } else {
- if (rc != 0) {
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- } else {
- rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
- src, ivsize, NULL,
- ctx_p->drvdata->cc_base);
- }
+ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+ &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ /* Failed to send the request or request completed
+ * synchronously
+ */
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
}
exit_process:
- if (cts_restore_flag != 0)
+ if (cts_restore_flag)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
kfree(req_ctx->backup_info);
kfree(req_ctx->iv);
}
@@ -848,60 +730,28 @@ exit_process:
return rc;
}
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
-{
- struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
- struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
- struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
- unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
-
- ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
- ivsize, areq, cc_base);
-}
-
-/* Async wrap functions */
-
-static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
-{
- struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
-
- ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
-
- return ssi_blkcipher_init(tfm);
-}
-
-static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key,
- unsigned int keylen)
-{
- return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
-}
-
-static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int cc_cipher_encrypt(struct ablkcipher_request *req)
{
- struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
req_ctx->is_giv = false;
+ req_ctx->backup_info = NULL;
- return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
-static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int cc_cipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
/*
* Allocate and save the last IV sized bytes of the source, which will
* be lost in case of in-place decryption and might be needed for CTS.
*/
- req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
+ req_ctx->backup_info = kmalloc(ivsize, flags);
if (!req_ctx->backup_info)
return -ENOMEM;
@@ -909,22 +759,20 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
(req->nbytes - ivsize), ivsize, 0);
req_ctx->is_giv = false;
- return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
/* DX Block cipher alg */
-static struct ssi_alg_template blkcipher_algs[] = {
-/* Async template */
-#if SSI_CC_HAS_AES_XTS
+static struct cc_alg_template blkcipher_algs[] = {
{
.name = "xts(aes)",
.driver_name = "xts-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -939,9 +787,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -955,9 +803,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -965,17 +813,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_XTS*/
-#if SSI_CC_HAS_AES_ESSIV
{
.name = "essiv(aes)",
.driver_name = "essiv-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -989,9 +835,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1005,9 +851,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1015,17 +861,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_ESSIV*/
-#if SSI_CC_HAS_AES_BITLOCKER
{
.name = "bitlocker(aes)",
.driver_name = "bitlocker-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1039,9 +883,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1055,9 +899,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE,
@@ -1065,16 +909,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
},
-#endif /*SSI_CC_HAS_AES_BITLOCKER*/
{
.name = "ecb(aes)",
.driver_name = "ecb-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = 0,
@@ -1088,9 +931,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1104,9 +947,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1114,16 +957,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
},
-#if SSI_CC_HAS_AES_CTS
{
.name = "cts1(cbc(aes))",
.driver_name = "cts1-cbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1131,16 +973,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
},
-#endif
{
.name = "ctr(aes)",
.driver_name = "ctr-aes-dx",
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1154,9 +995,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -1170,9 +1011,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = 0,
@@ -1186,9 +1027,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1202,9 +1043,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
.blocksize = DES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = 0,
@@ -1212,47 +1053,13 @@ static struct ssi_alg_template blkcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
},
-#if SSI_CC_HAS_MULTI2
- {
- .name = "cbc(multi2)",
- .driver_name = "cbc-multi2-dx",
- .blocksize = CC_MULTI2_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_decrypt,
- .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .ivsize = CC_MULTI2_IV_SIZE,
- },
- .cipher_mode = DRV_MULTI2_CBC,
- .flow_mode = S_DIN_to_MULTI2,
- },
- {
- .name = "ofb(multi2)",
- .driver_name = "ofb-multi2-dx",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ssi_ablkcipher_setkey,
- .encrypt = ssi_ablkcipher_encrypt,
- .decrypt = ssi_ablkcipher_encrypt,
- .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
- .ivsize = CC_MULTI2_IV_SIZE,
- },
- .cipher_mode = DRV_MULTI2_OFB,
- .flow_mode = S_DIN_to_MULTI2,
- },
-#endif /*SSI_CC_HAS_MULTI2*/
};
static
-struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
- *template, struct device *dev)
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
@@ -1265,13 +1072,13 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
alg->cra_module = THIS_MODULE;
- alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
+ alg->cra_ctxsize = sizeof(struct cc_cipher_ctx);
- alg->cra_init = ssi_ablkcipher_init;
- alg->cra_exit = ssi_blkcipher_exit;
+ alg->cra_init = cc_cipher_init;
+ alg->cra_exit = cc_cipher_exit;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
@@ -1283,11 +1090,11 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
return t_alg;
}
-int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
+int cc_cipher_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
- struct ssi_blkcipher_handle *blkcipher_handle =
- drvdata->blkcipher_handle;
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_cipher_handle *blkcipher_handle = drvdata->blkcipher_handle;
+
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
@@ -1303,10 +1110,10 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
return 0;
}
-int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
- struct ssi_blkcipher_handle *ablkcipher_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_cipher_handle *ablkcipher_handle;
+ struct cc_crypto_alg *t_alg;
struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
@@ -1323,7 +1130,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
ARRAY_SIZE(blkcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
- t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg], dev);
+ t_alg = cc_cipher_create_alg(&blkcipher_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -1337,7 +1144,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
rc = crypto_register_alg(&t_alg->crypto_alg);
dev_dbg(dev, "%s alg registration rc = %x\n",
t_alg->crypto_alg.cra_driver_name, rc);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
@@ -1352,6 +1159,6 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
return 0;
fail0:
- ssi_ablkcipher_free(drvdata);
+ cc_cipher_free(drvdata);
return rc;
}
diff --git a/drivers/staging/ccree/cc_cipher.h b/drivers/staging/ccree/cc_cipher.h
new file mode 100644
index 000000000000..4c181c721723
--- /dev/null
+++ b/drivers/staging/ccree/cc_cipher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct blkcipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum cc_req_dma_buf_type dma_buf_type;
+ u32 in_nents;
+ u32 in_mlli_nents;
+ u32 out_nents;
+ u32 out_mlli_nents;
+ u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
+ bool is_giv;
+ struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512 0x00002000
+#define CRYPTO_ALG_BULK_DU_4096 0x00004000
+#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
+ CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+ int hw_key1;
+ int hw_key2;
+};
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return false;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/staging/ccree/cc_crypto_ctx.h b/drivers/staging/ccree/cc_crypto_ctx.h
index 591f6fdadc59..eb16842d7db9 100644
--- a/drivers/staging/ccree/cc_crypto_ctx.h
+++ b/drivers/staging/ccree/cc_crypto_ctx.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -21,7 +8,7 @@
/* context size */
#ifndef CC_CTX_SIZE_LOG2
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
#define CC_CTX_SIZE_LOG2 8
#else
#define CC_CTX_SIZE_LOG2 7
@@ -72,7 +59,7 @@
#define CC_SHA384_BLOCK_SIZE 128
#define CC_SHA512_BLOCK_SIZE 128
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
#else /* Only up to SHA256 */
@@ -82,15 +69,6 @@
#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
-#define CC_MULTI2_SYSTEM_KEY_SIZE 32
-#define CC_MULTI2_DATA_KEY_SIZE 8
-#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE \
- (CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
-#define CC_MULTI2_BLOCK_SIZE 8
-#define CC_MULTI2_IV_SIZE 8
-#define CC_MULTI2_MIN_NUM_ROUNDS 8
-#define CC_MULTI2_MAX_NUM_ROUNDS 128
-
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
enum drv_engine_type {
@@ -168,14 +146,6 @@ enum drv_hash_hw_mode {
DRV_HASH_HW_RESERVE32B = S32_MAX
};
-enum drv_multi2_mode {
- DRV_MULTI2_NULL = -1,
- DRV_MULTI2_ECB = 0,
- DRV_MULTI2_CBC = 1,
- DRV_MULTI2_OFB = 2,
- DRV_MULTI2_RESERVE32B = S32_MAX
-};
-
/* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
/* drv_crypto_key_type[2] is mapped to cipher_config2 */
enum drv_crypto_key_type {
diff --git a/drivers/staging/ccree/cc_debugfs.c b/drivers/staging/ccree/cc_debugfs.c
new file mode 100644
index 000000000000..08f8db489cf0
--- /dev/null
+++ b/drivers/staging/ccree/cc_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+ struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) { \
+ .name = __stringify(_X),\
+ .offset = CC_REG(_X) \
+ }
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+ CC_DEBUG_REG(HOST_SIGNATURE),
+ CC_DEBUG_REG(HOST_IRR),
+ CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+ CC_DEBUG_REG(AXIM_MON_ERR),
+ CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+ CC_DEBUG_REG(HOST_IMR),
+ CC_DEBUG_REG(AXIM_CFG),
+ CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+ CC_DEBUG_REG(HOST_VERSION),
+ CC_DEBUG_REG(GPR_HOST),
+ CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+ return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+ debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_debugfs_ctx *ctx;
+ struct debugfs_regset32 *regset;
+ struct dentry *file;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = debug_regs;
+ regset->nregs = ARRAY_SIZE(debug_regs);
+ regset->base = drvdata->cc_base;
+
+ ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ if (!ctx->dir)
+ return -ENFILE;
+
+ file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ if (!file) {
+ debugfs_remove(ctx->dir);
+ return -ENFILE;
+ }
+
+ file = debugfs_create_bool("coherent", 0400, ctx->dir,
+ &drvdata->coherent);
+
+ if (!file) {
+ debugfs_remove_recursive(ctx->dir);
+ return -ENFILE;
+ }
+
+ drvdata->debugfs = ctx;
+
+ return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+ debugfs_remove_recursive(ctx->dir);
+}
diff --git a/drivers/staging/ccree/cc_debugfs.h b/drivers/staging/ccree/cc_debugfs.h
new file mode 100644
index 000000000000..5b5320eca7d2
--- /dev/null
+++ b/drivers/staging/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/cc_driver.c
index 1a3c481fa92a..3a1cb0c98648 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/cc_driver.c
@@ -1,96 +1,56 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
-#include <crypto/sha.h>
-#include <crypto/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
-
-#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/random.h>
-#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/fcntl.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/sysctl.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
#include <linux/platform_device.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pm.h>
-
-/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
-#include <linux/cache.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/random.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_request_mgr.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_cipher.h"
-#include "ssi_aead.h"
-#include "ssi_hash.h"
-#include "ssi_ivgen.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_pm.h"
-#include "ssi_fips.h"
-
-#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *buf, size_t len)
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
- char prefix[NAME_LEN];
+ char prefix[64];
if (!buf)
return;
- snprintf(prefix, sizeof(prefix), "%s[%lu]: ", name, len);
+ snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
- print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, len,
- false);
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+ len, false);
}
-#endif
static irqreturn_t cc_isr(int irq, void *dev_id)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
struct device *dev = drvdata_to_dev(drvdata);
u32 irr;
u32 imr;
@@ -100,7 +60,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (unlikely(irr == 0)) { /* Probably shared interrupt line */
+ if (irr == 0) { /* Probably shared interrupt line */
dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
@@ -111,23 +71,27 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
- /* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_COMP_IRQ_MASK);
- irr &= ~SSI_COMP_IRQ_MASK;
+ if (irr & CC_COMP_IRQ_MASK) {
+ /* Mask AXI completion interrupt - will be unmasked in
+ * Deferred service handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+ irr &= ~CC_COMP_IRQ_MASK;
complete_request(drvdata);
}
-#ifdef CC_SUPPORT_FIPS
+#ifdef CONFIG_CRYPTO_FIPS
/* TEE FIPS interrupt */
- if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
- /* Mask interrupt - will be unmasked in Deferred service handler */
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
- irr &= ~SSI_GPR0_IRQ_MASK;
+ if (irr & CC_GPR0_IRQ_MASK) {
+ /* Mask interrupt - will be unmasked in Deferred service
+ * handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+ irr &= ~CC_GPR0_IRQ_MASK;
fips_handler(drvdata);
}
#endif
/* AXI error interrupt */
- if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
+ if (irr & CC_AXI_ERR_IRQ_MASK) {
u32 axi_err;
/* Read the AXI error ID */
@@ -135,10 +99,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
axi_err);
- irr &= ~SSI_AXI_ERR_IRQ_MASK;
+ irr &= ~CC_AXI_ERR_IRQ_MASK;
}
- if (unlikely(irr != 0)) {
+ if (irr) {
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
irr);
/* Just warning */
@@ -147,14 +111,14 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
/* Unmask all AXI interrupt sources AXI_CFG1 register */
val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
dev_dbg(dev, "AXIM_CFG=0x%08X\n",
cc_ioread(drvdata, CC_REG(AXIM_CFG)));
@@ -164,21 +128,10 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = (unsigned int)(~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK |
- SSI_GPR0_IRQ_MASK));
+ val = (unsigned int)(~(CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK |
+ CC_GPR0_IRQ_MASK));
cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
-#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
-#ifdef DX_IRQ_DELAY
- /* Set CC IRQ delay */
- cc_iowrite(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL), DX_IRQ_DELAY);
-#endif
- if (cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)) > 0) {
- dev_dbg(dev, "irq_delay=%d CC cycles\n",
- cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)));
- }
-#endif
-
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
@@ -199,12 +152,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
- void __iomem *cc_base = NULL;
- struct ssi_drvdata *new_drvdata;
+ struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
- dma_addr_t dma_mask;
+ u64 dma_mask;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -222,18 +174,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
- if (IS_ERR(new_drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
+ if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
- }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
&req_mem_cc_regs->start, new_drvdata->cc_base);
- cc_base = new_drvdata->cc_base;
-
/* Then IRQ */
new_drvdata->irq = platform_get_irq(plat_dev, 0);
if (new_drvdata->irq < 0) {
@@ -250,10 +198,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ init_completion(&new_drvdata->hw_queue_avail);
+
if (!plat_dev->dev.dma_mask)
plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
- dma_mask = (dma_addr_t)(DMA_BIT_MASK(DMA_BIT_MASK_LEN));
+ dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
while (dma_mask > 0x7fffffffUL) {
if (dma_supported(&plat_dev->dev, dma_mask)) {
rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
@@ -264,8 +214,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
if (rc) {
- dev_err(dev, "Failed in dma_set_mask, mask=%par\n",
- &dma_mask);
+ dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
return rc;
}
@@ -277,9 +226,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Verify correct mapping */
signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
- if (signature_val != DX_DEV_SIGNATURE) {
+ if (signature_val != CC_DEV_SIGNATURE) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ signature_val, (u32)CC_DEV_SIGNATURE);
rc = -EINVAL;
goto post_clk_err;
}
@@ -287,84 +236,82 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- SSI_DEV_NAME_STR,
+ CC_DEV_NAME_STR,
cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
- if (unlikely(rc != 0)) {
+ if (rc) {
dev_err(dev, "init_cc_regs failed\n");
goto post_clk_err;
}
-#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "init_stat_db failed\n");
+ rc = cc_debugfs_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed registering debugfs interface\n");
goto post_regs_err;
}
-#endif
- rc = ssi_fips_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
- goto post_sysfs_err;
+ rc = cc_fips_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ goto post_debugfs_err;
}
- rc = ssi_sram_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_sram_mgr_init failed\n");
+ rc = cc_sram_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_sram_mgr_init failed\n");
goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
- ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
- if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
goto post_sram_mgr_err;
}
- rc = request_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "request_mgr_init failed\n");
+ rc = cc_req_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_req_mgr_init failed\n");
goto post_sram_mgr_err;
}
- rc = ssi_buffer_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_buffer_mgr_init(new_drvdata);
+ if (rc) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
- rc = ssi_power_mgr_init(new_drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_pm_init(new_drvdata);
+ if (rc) {
dev_err(dev, "ssi_power_mgr_init failed\n");
goto post_buf_mgr_err;
}
- rc = ssi_ivgen_init(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_ivgen_init failed\n");
+ rc = cc_ivgen_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_ivgen_init failed\n");
goto post_power_mgr_err;
}
/* Allocate crypto algs */
- rc = ssi_ablkcipher_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_ablkcipher_alloc failed\n");
+ rc = cc_cipher_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_cipher_alloc failed\n");
goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
- rc = ssi_hash_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_hash_alloc failed\n");
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
goto post_cipher_err;
}
- rc = ssi_aead_alloc(new_drvdata);
- if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_aead_alloc failed\n");
+ rc = cc_aead_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_aead_alloc failed\n");
goto post_hash_err;
}
@@ -377,25 +324,23 @@ static int init_cc_resources(struct platform_device *plat_dev)
return 0;
post_hash_err:
- ssi_hash_free(new_drvdata);
+ cc_hash_free(new_drvdata);
post_cipher_err:
- ssi_ablkcipher_free(new_drvdata);
+ cc_cipher_free(new_drvdata);
post_ivgen_err:
- ssi_ivgen_fini(new_drvdata);
+ cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
- ssi_power_mgr_fini(new_drvdata);
+ cc_pm_fini(new_drvdata);
post_buf_mgr_err:
- ssi_buffer_mgr_fini(new_drvdata);
+ cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
- request_mgr_fini(new_drvdata);
+ cc_req_mgr_fini(new_drvdata);
post_sram_mgr_err:
- ssi_sram_mgr_fini(new_drvdata);
+ cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
- ssi_fips_fini(new_drvdata);
-post_sysfs_err:
-#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
-#endif
+ cc_fips_fini(new_drvdata);
+post_debugfs_err:
+ cc_debugfs_fini(new_drvdata);
post_regs_err:
fini_cc_regs(new_drvdata);
post_clk_err:
@@ -403,7 +348,7 @@ post_clk_err:
return rc;
}
-void fini_cc_regs(struct ssi_drvdata *drvdata)
+void fini_cc_regs(struct cc_drvdata *drvdata)
{
/* Mask all interrupts */
cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
@@ -411,26 +356,24 @@ void fini_cc_regs(struct ssi_drvdata *drvdata)
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)platform_get_drvdata(plat_dev);
-
- ssi_aead_free(drvdata);
- ssi_hash_free(drvdata);
- ssi_ablkcipher_free(drvdata);
- ssi_ivgen_fini(drvdata);
- ssi_power_mgr_fini(drvdata);
- ssi_buffer_mgr_fini(drvdata);
- request_mgr_fini(drvdata);
- ssi_sram_mgr_fini(drvdata);
- ssi_fips_fini(drvdata);
-#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
-#endif
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+ cc_aead_free(drvdata);
+ cc_hash_free(drvdata);
+ cc_cipher_free(drvdata);
+ cc_ivgen_fini(drvdata);
+ cc_pm_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
+ cc_req_mgr_fini(drvdata);
+ cc_sram_mgr_fini(drvdata);
+ cc_fips_fini(drvdata);
+ cc_debugfs_fini(drvdata);
fini_cc_regs(drvdata);
cc_clk_off(drvdata);
}
-int cc_clk_on(struct ssi_drvdata *drvdata)
+int cc_clk_on(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
int rc;
@@ -446,7 +389,7 @@ int cc_clk_on(struct ssi_drvdata *drvdata)
return 0;
}
-void cc_clk_off(struct ssi_drvdata *drvdata)
+void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
@@ -461,23 +404,10 @@ static int cc7x_probe(struct platform_device *plat_dev)
{
int rc;
struct device *dev = &plat_dev->dev;
-#if defined(CONFIG_ARM) && defined(CC_DEBUG)
- u32 ctr, cacheline_size;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
- dev_dbg(dev, "CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
-
- asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- dev_dbg(dev, "Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
- (ctr >> 20) & 0xF, ctr & 0xF);
-#endif
/* Map registers space */
rc = init_cc_resources(plat_dev);
- if (rc != 0)
+ if (rc)
return rc;
dev_info(dev, "ARM ccree device initialized\n");
@@ -498,38 +428,44 @@ static int cc7x_remove(struct platform_device *plat_dev)
return 0;
}
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-static const struct dev_pm_ops arm_cc7x_driver_pm = {
- SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
-};
-#endif
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-#define DX_DRIVER_RUNTIME_PM (&arm_cc7x_driver_pm)
-#else
-#define DX_DRIVER_RUNTIME_PM NULL
-#endif
-
-#ifdef CONFIG_OF
static const struct of_device_id arm_cc7x_dev_of_match[] = {
{.compatible = "arm,cryptocell-712-ree"},
{}
};
MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
-#endif
static struct platform_driver cc7x_driver = {
.driver = {
.name = "cc7xree",
-#ifdef CONFIG_OF
.of_match_table = arm_cc7x_dev_of_match,
+#ifdef CONFIG_PM
+ .pm = &ccree_pm,
#endif
- .pm = DX_DRIVER_RUNTIME_PM,
},
.probe = cc7x_probe,
.remove = cc7x_remove,
};
-module_platform_driver(cc7x_driver);
+
+static int __init ccree_init(void)
+{
+ int ret;
+
+ cc_hash_global_init();
+
+ ret = cc_debugfs_global_init();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cc7x_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+ platform_driver_unregister(&cc7x_driver);
+ cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
/* Module description */
MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
diff --git a/drivers/staging/ccree/cc_driver.h b/drivers/staging/ccree/cc_driver.h
new file mode 100644
index 000000000000..773ac591c45c
--- /dev/null
+++ b/drivers/staging/ccree/cc_driver.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "3.0"
+
+#define CC_DEV_NAME_STR "cc715ree"
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_DEV_SIGNATURE 0xDCC71200UL
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 3000
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES 3
+struct cc_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, int err);
+ void *user_arg;
+ dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+ /* For the first 'ivgen_dma_addr_len' addresses of this array,
+ * generated IV would be placed in it by send_request().
+ * Same generated IV for all addresses!
+ */
+ /* Amount of 'ivgen_dma_addr' elements to be filled. */
+ unsigned int ivgen_dma_addr_len;
+ /* The generated IV size required, 8/16 B allowed. */
+ unsigned int ivgen_size;
+ struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: device IRQ number
+ * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver: SeP loaded firmware version
+ */
+struct cc_drvdata {
+ void __iomem *cc_base;
+ int irq;
+ u32 irq_mask;
+ u32 fw_ver;
+ struct completion hw_queue_avail; /* wait for HW queue availability */
+ struct platform_device *plat_dev;
+ cc_sram_addr_t mlli_sram_addr;
+ void *buff_mgr_handle;
+ void *hash_handle;
+ void *aead_handle;
+ void *blkcipher_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ void *ivgen_handle;
+ void *sram_mgr_handle;
+ void *debugfs;
+ struct clk *clk;
+ bool coherent;
+};
+
+struct cc_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct cc_drvdata *drvdata;
+ struct crypto_alg crypto_alg;
+ struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ size_t size)
+{
+ if (cc_dump_bytes)
+ __dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+ return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+}
+
+#endif /*__CC_DRIVER_H__*/
+
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/cc_fips.c
index 4aea99fa129f..de08af976b7f 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/cc_fips.c
@@ -1,36 +1,22 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/fips.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_fips.h"
+#include "cc_driver.h"
+#include "cc_fips.h"
static void fips_dsr(unsigned long devarg);
-struct ssi_fips_handle {
+struct cc_fips_handle {
struct tasklet_struct tasklet;
};
/* The function called once at driver entry point to check
* whether TEE FIPS error occurred.
*/
-static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
{
u32 reg;
@@ -42,7 +28,7 @@ static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
* This function should push the FIPS REE library status towards the TEE library
* by writing the error state to HOST_GPR0 register.
*/
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
{
int val = CC_FIPS_SYNC_REE_STATUS;
@@ -51,9 +37,9 @@ void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
+void cc_fips_fini(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_h = drvdata->fips_handle;
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
if (!fips_h)
return; /* Not allocated */
@@ -65,10 +51,9 @@ void ssi_fips_fini(struct ssi_drvdata *drvdata)
drvdata->fips_handle = NULL;
}
-void fips_handler(struct ssi_drvdata *drvdata)
+void fips_handler(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_handle_ptr =
- drvdata->fips_handle;
+ struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
tasklet_schedule(&fips_handle_ptr->tasklet);
}
@@ -84,11 +69,11 @@ static inline void tee_fips_error(struct device *dev)
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
- irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
+ irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
state = cc_ioread(drvdata, CC_REG(GPR_HOST));
@@ -105,9 +90,9 @@ static void fips_dsr(unsigned long devarg)
}
/* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+int cc_fips_init(struct cc_drvdata *p_drvdata)
{
- struct ssi_fips_handle *fips_h;
+ struct cc_fips_handle *fips_h;
struct device *dev = drvdata_to_dev(p_drvdata);
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
diff --git a/drivers/staging/ccree/cc_fips.h b/drivers/staging/ccree/cc_fips.h
new file mode 100644
index 000000000000..0d520030095b
--- /dev/null
+++ b/drivers/staging/ccree/cc_fips.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+ bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif /*__CC_FIPS_H__*/
+
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/cc_hash.c
index 2035835b62dc..8afc39f10bb3 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/cc_hash.c
@@ -1,44 +1,26 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/internal/hash.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_request_mgr.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_hash.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
-#define SSI_MAX_AHASH_SEQ_LEN 12
-#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
-struct ssi_hash_handle {
- ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
- ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+struct cc_hash_handle {
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
- struct completion init_comp;
};
static const u32 digest_len_init[] = {
@@ -53,32 +35,31 @@ static const u32 sha224_init[] = {
static const u32 sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
static const u32 digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const u64 sha384_init[] = {
+static u64 sha384_init[] = {
SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static const u64 sha512_init[] = {
+static u64 sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
#endif
-static void ssi_hash_create_xcbc_setup(
- struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size);
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size);
+static const void *cc_larval_digest(struct device *dev, u32 mode);
-struct ssi_hash_alg {
+struct cc_hash_alg {
struct list_head entry;
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
struct ahash_alg ahash_alg;
};
@@ -88,13 +69,13 @@ struct hash_key_req_ctx {
};
/* hash per-session context */
-struct ssi_hash_ctx {
- struct ssi_drvdata *drvdata;
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
/* holds the origin digest; the digest after "setkey" if HMAC,*
* the initial digest if HASH.
*/
- u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
- u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
dma_addr_t digest_buff_dma_addr;
@@ -107,33 +88,27 @@ struct ssi_hash_ctx {
bool is_hmac;
};
-static void ssi_hash_create_data_desc(
- struct ahash_req_ctx *areq_ctx,
- struct ssi_hash_ctx *ctx,
- unsigned int flow_mode, struct cc_hw_desc desc[],
- bool is_not_last_data,
- unsigned int *seq_size);
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
-static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
{
- if (unlikely((mode == DRV_HASH_MD5) ||
- (mode == DRV_HASH_SHA384) ||
- (mode == DRV_HASH_SHA512))) {
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
set_bytes_swap(desc, 1);
} else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
}
}
-static int ssi_hash_map_result(struct device *dev,
- struct ahash_req_ctx *state,
- unsigned int digestsize)
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
{
state->digest_result_dma_addr =
- dma_map_single(dev, (void *)state->digest_result_buff,
- digestsize,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
digestsize);
return -ENOMEM;
@@ -145,302 +120,331 @@ static int ssi_hash_map_result(struct device *dev,
return 0;
}
-static int ssi_hash_map_request(struct device *dev,
- struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx)
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
- ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc;
- int rc = -ENOMEM;
-
- state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff0)
- goto fail0;
-
- state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff1)
- goto fail_buff0;
-
- state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_result_buff)
- goto fail_buff1;
-
- state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->digest_buff)
- goto fail_digest_result_buff;
-
- dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
- state->digest_buff);
- if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
- state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_bytes_len)
- goto fail1;
- dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
- state->digest_bytes_len);
- } else {
- state->digest_bytes_len = NULL;
- }
-
- state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->opad_digest_buff)
- goto fail2;
-
- dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
- state->opad_digest_buff);
-
- state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->digest_buff);
- goto fail3;
- }
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->digest_buff,
- &state->digest_buff_dma_addr);
+ memset(state, 0, sizeof(*state));
if (is_hmac) {
- dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
- memset(state->digest_buff, 0, ctx->inter_digestsize);
- } else { /*sha*/
- memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
-#if (DX_DEV_SHA_MAX > 256)
- if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
- memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+#if (CC_DEV_SHA_MAX > 256)
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ digest_len_sha512_init, HASH_LEN_SIZE);
else
- memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ memcpy(state->digest_bytes_len,
+ digest_len_init, HASH_LEN_SIZE);
#else
- memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ memcpy(state->digest_bytes_len, digest_len_init,
+ HASH_LEN_SIZE);
#endif
}
- dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (ctx->hash_mode != DRV_HASH_NULL) {
- dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
}
} else { /*hash*/
- /* Copy the initial digests if hash flow. The SRAM contains the
- * initial digests in the expected order for all SHA*
- */
- hw_desc_init(&desc);
- set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
- set_dout_dlli(&desc, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT, 0);
- set_flow_mode(&desc, BYPASS);
-
- rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto fail4;
- }
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
}
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
- state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
HASH_LEN_SIZE, state->digest_bytes_len);
- goto fail4;
+ goto unmap_digest_buf;
}
dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
HASH_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
- } else {
- state->digest_bytes_len_dma_addr = 0;
}
if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
- state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
- goto fail5;
+ goto unmap_digest_len;
}
dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
- } else {
- state->opad_digest_dma_addr = 0;
}
- state->buff0_cnt = 0;
- state->buff1_cnt = 0;
- state->buff_index = 0;
- state->mlli_params.curr_pool = NULL;
return 0;
-fail5:
- if (state->digest_bytes_len_dma_addr != 0) {
- dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
state->digest_bytes_len_dma_addr = 0;
}
-fail4:
- if (state->digest_buff_dma_addr != 0) {
- dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
state->digest_buff_dma_addr = 0;
}
-fail3:
- kfree(state->opad_digest_buff);
-fail2:
- kfree(state->digest_bytes_len);
-fail1:
- kfree(state->digest_buff);
-fail_digest_result_buff:
- kfree(state->digest_result_buff);
- state->digest_result_buff = NULL;
-fail_buff1:
- kfree(state->buff1);
- state->buff1 = NULL;
-fail_buff0:
- kfree(state->buff0);
- state->buff0 = NULL;
-fail0:
- return rc;
+
+ return -EINVAL;
}
-static void ssi_hash_unmap_request(struct device *dev,
- struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx)
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
{
- if (state->digest_buff_dma_addr != 0) {
+ if (state->digest_buff_dma_addr) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
- if (state->digest_bytes_len_dma_addr != 0) {
+ if (state->digest_bytes_len_dma_addr) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
&state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
- if (state->opad_digest_dma_addr != 0) {
+ if (state->opad_digest_dma_addr) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
&state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
-
- kfree(state->opad_digest_buff);
- kfree(state->digest_bytes_len);
- kfree(state->digest_buff);
- kfree(state->digest_result_buff);
- kfree(state->buff1);
- kfree(state->buff0);
}
-static void ssi_hash_unmap_result(struct device *dev,
- struct ahash_req_ctx *state,
- unsigned int digestsize, u8 *result)
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
{
- if (state->digest_result_dma_addr != 0) {
- dma_unmap_single(dev,
- state->digest_result_dma_addr,
- digestsize,
- DMA_BIDIRECTIONAL);
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
- memcpy(result,
- state->digest_result_buff,
- digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
}
state->digest_result_dma_addr = 0;
}
-static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
}
-static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
}
-static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ HASH_LEN_SIZE);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
}
-static int ssi_hash_digest(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes, u8 *result,
- void *async_req)
+static int cc_hash_digest(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_digest_addr =
+ cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
int idx = 0;
int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_digest_complete;
- ssi_req.user_arg = (void *)async_req;
- }
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
- /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
if (is_hmac) {
@@ -464,7 +468,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
NS_BIT);
} else {
set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- if (likely(nbytes != 0))
+ if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
set_cipher_do(&desc[idx], DO_PAD);
@@ -473,7 +477,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
if (is_hmac) {
/* HW last hash block padding (aka. "DO_PAD") */
@@ -486,98 +490,62 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
set_cipher_do(&desc[idx], DO_PAD);
idx++;
- /* store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash opad xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ idx = cc_fin_hmac(desc, req, idx);
+ }
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- /* Perform HASH update */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
+ return rc;
+}
- /* Get final MAC result */
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- /* TODO */
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- }
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
- return rc;
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
}
-static int ssi_hash_update(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int block_size,
- struct scatterlist *src,
- unsigned int nbytes,
- void *async_req)
+static int cc_hash_update(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
"hmac" : "hash", nbytes);
@@ -587,8 +555,9 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
return 0;
}
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
- if (unlikely(rc)) {
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
nbytes);
@@ -599,30 +568,17 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_update_complete;
- ssi_req.user_arg = async_req;
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
}
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ idx = cc_restore_hash(desc, ctx, state, idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
@@ -637,220 +593,124 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
+ HASH_LEN_SIZE, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
idx++;
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_finup(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes,
- u8 *result,
- void *async_req)
+static int cc_hash_finup(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = async_req;
- }
-
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
-
- if (is_hmac) {
- /* Store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash OPAD xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
+ idx = cc_restore_hash(desc, ctx, state, idx);
- /* Perform HASH update on last digest */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
- }
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- /* TODO */
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_final(struct ahash_req_ctx *state,
- struct ssi_hash_ctx *ctx,
- unsigned int digestsize,
- struct scatterlist *src,
- unsigned int nbytes,
- u8 *result,
- void *async_req)
+static int cc_hash_final(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (async_req) {
- /* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = async_req;
- }
-
- /* Restore hash digest */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Restore hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
+ /* Setup DX request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
- ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+ idx = cc_restore_hash(desc, ctx, state, idx);
/* "DO-PAD" must be enabled only when writing current length to HW */
hw_desc_init(&desc[idx]);
@@ -862,121 +722,56 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
- if (is_hmac) {
- /* Store the hash digest result in the context */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- digestsize, NS_BIT, 0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- idx++;
-
- /* Loading hash OPAD xor key state */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
- ctx->inter_digestsize, NS_BIT);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- idx++;
-
- /* Load the hash current length */
- hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_sram(&desc[idx],
- ssi_ahash_get_initial_digest_len_sram_addr(
-ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
- set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- set_flow_mode(&desc[idx], S_DIN_to_HASH);
- set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- idx++;
-
- /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
- hw_desc_init(&desc[idx]);
- set_din_no_dma(&desc[idx], 0, 0xfffff0);
- set_dout_no_dma(&desc[idx], 0, 0, 1);
- idx++;
-
- /* Perform HASH update on last digest */
- hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
- digestsize, NS_BIT);
- set_flow_mode(&desc[idx], DIN_HASH);
- idx++;
- }
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
- /* Get final MAC result */
- hw_desc_init(&desc[idx]);
- set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
- NS_BIT, (async_req ? 1 : 0));
- if (async_req)
- set_queue_last_ind(&desc[idx]);
- set_flow_mode(&desc[idx], S_HASH_to_DOUT);
- set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
- idx++;
+ idx = cc_fin_result(desc, req, idx);
- if (async_req) {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- }
- } else {
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (rc != 0) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- } else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
- ssi_hash_unmap_result(dev, state, digestsize, result);
- ssi_hash_unmap_request(dev, state, ctx);
- }
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
+static int cc_hash_init(struct ahash_request *req)
{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- state->xcbc_count = 0;
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
- ssi_hash_map_request(dev, state, ctx);
+ cc_init_req(dev, state, ctx);
return 0;
}
-static int ssi_hash_setkey(void *hash,
- const u8 *key,
- unsigned int keylen,
- bool synchronize)
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
- struct ssi_crypto_req ssi_req = {};
- struct ssi_hash_ctx *ctx = NULL;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
int blocksize = 0;
int digestsize = 0;
int i, idx = 0, rc = 0;
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- ssi_sram_addr_t larval_addr;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_addr;
struct device *dev;
- ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+ ctx = crypto_ahash_ctx(ahash);
dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "start keylen: %d", keylen);
- blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
- digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
- larval_addr = ssi_ahash_get_larval_digest_sram_addr(
- ctx->drvdata, ctx->hash_mode);
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
* any NON-ZERO value utilizes HMAC flow
@@ -985,12 +780,10 @@ static int ssi_hash_setkey(void *hash,
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
- if (keylen != 0) {
- ctx->key_params.key_dma_addr = dma_map_single(
- dev, (void *)key,
- keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- ctx->key_params.key_dma_addr))) {
+ if (keylen) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1032,14 +825,15 @@ static int ssi_hash_setkey(void *hash,
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
- ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0, (blocksize - digestsize));
set_flow_mode(&desc[idx], BYPASS);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- digestsize),
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
(blocksize - digestsize), NS_BIT, 0);
idx++;
} else {
@@ -1052,7 +846,7 @@ static int ssi_hash_setkey(void *hash,
keylen, NS_BIT, 0);
idx++;
- if ((blocksize - keylen) != 0) {
+ if ((blocksize - keylen)) {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0,
(blocksize - keylen));
@@ -1073,8 +867,8 @@ static int ssi_hash_setkey(void *hash,
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc != 0)) {
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
@@ -1114,7 +908,9 @@ static int ssi_hash_setkey(void *hash,
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
- /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
if (i > 0) /* Not first iteration */
@@ -1128,11 +924,11 @@ static int ssi_hash_setkey(void *hash,
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
if (rc)
- crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
@@ -1143,14 +939,14 @@ out:
return rc;
}
-static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
{
- struct ssi_crypto_req ssi_req = {};
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int idx = 0, rc = 0;
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
@@ -1165,10 +961,9 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
- ctx->key_params.key_dma_addr = dma_map_single(
- dev, (void *)key,
- keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1191,30 +986,30 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K2_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], DIN_AES_DOUT);
- set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K3_OFFSET),
- CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc != 0)
+ if (rc)
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
@@ -1225,11 +1020,10 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
return rc;
}
-#if SSI_CC_HAS_CMAC
-static int ssi_cmac_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
{
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
@@ -1253,8 +1047,10 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
- if (keylen == 24)
- memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
@@ -1263,20 +1059,19 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
return 0;
}
-#endif
-static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (ctx->digest_buff_dma_addr != 0) {
+ if (ctx->digest_buff_dma_addr) {
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
&ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
- if (ctx->opad_tmp_keys_dma_addr != 0) {
+ if (ctx->opad_tmp_keys_dma_addr) {
dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
@@ -1288,13 +1083,15 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
ctx->key_params.keylen = 0;
}
-static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
ctx->key_params.keylen = 0;
- ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, (void *)ctx->digest_buff,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
@@ -1304,7 +1101,10 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
- ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
@@ -1319,51 +1119,52 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
return 0;
fail:
- ssi_hash_free_ctx(ctx);
+ cc_free_ctx(ctx);
return -ENOMEM;
}
-static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
+static int cc_cra_init(struct crypto_tfm *tfm)
{
- struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct hash_alg_common *hash_alg_common =
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
container_of(hash_alg_common, struct ahash_alg, halg);
- struct ssi_hash_alg *ssi_alg =
- container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
- ctx->hash_mode = ssi_alg->hash_mode;
- ctx->hw_mode = ssi_alg->hw_mode;
- ctx->inter_digestsize = ssi_alg->inter_digestsize;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
- return ssi_hash_alloc_ctx(ctx);
+ return cc_alloc_ctx(ctx);
}
-static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
+static void cc_cra_exit(struct crypto_tfm *tfm)
{
- struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- dev_dbg(dev, "ssi_hash_cra_exit");
- ssi_hash_free_ctx(ctx);
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
}
-static int ssi_mac_update(struct ahash_request *req)
+static int cc_mac_update(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int rc;
u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
if (req->nbytes == 0) {
/* no real updates required */
@@ -1372,8 +1173,9 @@ static int ssi_mac_update(struct ahash_request *req)
state->xcbc_count++;
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
- if (unlikely(rc)) {
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
req->nbytes);
@@ -1384,12 +1186,17 @@ static int ssi_mac_update(struct ahash_request *req)
return -ENOMEM;
}
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
else
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
@@ -1402,32 +1209,32 @@ static int ssi_mac_update(struct ahash_request *req)
idx++;
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_update_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_final(struct ahash_request *req)
+static int cc_mac_final(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_size, key_len;
u32 digestsize = crypto_ahash_digestsize(tfm);
-
- u32 rem_cnt = state->buff_index ? state->buff1_cnt :
- state->buff0_cnt;
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_size = CC_AES_128_BIT_KEY_SIZE;
@@ -1440,34 +1247,45 @@ static int ssi_mac_final(struct ahash_request *req)
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
- if (state->xcbc_count && (rem_cnt == 0)) {
+ if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
set_din_type(&desc[idx], DMA_DLLI,
- (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
set_key_size_aes(&desc[idx], key_len);
set_flow_mode(&desc[idx], S_DIN_to_AES);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
- /* Initiate decryption of block state to previous block_state-XOR-M[n] */
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
@@ -1484,9 +1302,9 @@ static int ssi_mac_final(struct ahash_request *req)
}
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
else
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
@@ -1496,7 +1314,7 @@ static int ssi_mac_final(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else if (rem_cnt > 0) {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
@@ -1515,53 +1333,64 @@ static int ssi_mac_final(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_finup(struct ahash_request *req)
+static int cc_mac_finup(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
u32 key_len = 0;
u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
if (state->xcbc_count > 0 && req->nbytes == 0) {
dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
- return ssi_mac_final(req);
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
@@ -1572,7 +1401,7 @@ static int ssi_mac_finup(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
@@ -1586,54 +1415,61 @@ static int ssi_mac_finup(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-static int ssi_mac_digest(struct ahash_request *req)
+static int cc_mac_digest(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
- struct ssi_crypto_req ssi_req = {};
- struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 key_len;
int idx = 0;
int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_hash_digest_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
- ssi_hash_create_xcbc_setup(req, desc, &idx);
+ cc_setup_xcbc(req, desc, &idx);
} else {
key_len = ctx->key_params.keylen;
- ssi_hash_create_cmac_setup(req, desc, &idx);
+ cc_setup_cmac(req, desc, &idx);
}
if (req->nbytes == 0) {
@@ -1644,7 +1480,7 @@ static int ssi_mac_digest(struct ahash_request *req)
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
} else {
- ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
@@ -1658,96 +1494,32 @@ static int ssi_mac_digest(struct ahash_request *req)
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
- ssi_hash_unmap_result(dev, state, digestsize, req->result);
- ssi_hash_unmap_request(dev, state, ctx);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
}
return rc;
}
-//ahash wrap functions
-static int ssi_ahash_digest(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_update(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
-
- return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
-}
-
-static int ssi_ahash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_final(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
-
- return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
-}
-
-static int ssi_ahash_init(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
-
- dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
-
- return ssi_hash_init(state, ctx);
-}
-
-static int ssi_ahash_export(struct ahash_request *req, void *out)
+static int cc_hash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct ahash_req_ctx *state = ahash_request_ctx(req);
- u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
- u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
- state->buff0_cnt;
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
const u32 tmp = CC_EXPORT_MAGIC;
memcpy(out, &tmp, sizeof(u32));
out += sizeof(u32);
- dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
- if (state->digest_bytes_len_dma_addr) {
- dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
- } else {
- /* Poison the unused exported digest len field. */
- memset(out, 0x5F, HASH_LEN_SIZE);
- }
+ memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
out += HASH_LEN_SIZE;
memcpy(out, &curr_buff_cnt, sizeof(u32));
@@ -1755,80 +1527,43 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
memcpy(out, curr_buff, curr_buff_cnt);
- /* No sync for device ineeded since we did not change the data,
- * we only copy it
- */
-
return 0;
}
-static int ssi_ahash_import(struct ahash_request *req, const void *in)
+static int cc_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
- int rc = 0;
memcpy(&tmp, in, sizeof(u32));
- if (tmp != CC_EXPORT_MAGIC) {
- rc = -EINVAL;
- goto out;
- }
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
in += sizeof(u32);
- /* call init() to allocate bufs if the user hasn't */
- if (!state->digest_buff) {
- rc = ssi_hash_init(state, ctx);
- if (rc)
- goto out;
- }
+ cc_init_req(dev, state, ctx);
- dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
- if (state->digest_bytes_len_dma_addr) {
- dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
- }
+ memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
in += HASH_LEN_SIZE;
- dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
- ctx->inter_digestsize, DMA_BIDIRECTIONAL);
-
- if (state->digest_bytes_len_dma_addr)
- dma_sync_single_for_device(dev,
- state->digest_bytes_len_dma_addr,
- HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
-
- state->buff_index = 0;
-
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
- if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
- rc = -EINVAL;
- goto out;
- }
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
in += sizeof(u32);
- state->buff0_cnt = tmp;
- memcpy(state->buff0, in, state->buff0_cnt);
-
-out:
- return rc;
-}
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
-static int ssi_ahash_setkey(struct crypto_ahash *ahash,
- const u8 *key, unsigned int keylen)
-{
- return ssi_hash_setkey((void *)ahash, key, keylen, false);
+ return 0;
}
-struct ssi_hash_template {
+struct cc_hash_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
@@ -1839,14 +1574,14 @@ struct ssi_hash_template {
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
};
#define CC_STATE_SIZE(_x) \
- ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+ ((_x) + HASH_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
/* hash descriptors */
-static struct ssi_hash_template driver_hash[] = {
+static struct cc_hash_template driver_hash[] = {
//Asynchronize hash template
{
.name = "sha1",
@@ -1856,14 +1591,14 @@ static struct ssi_hash_template driver_hash[] = {
.blocksize = SHA1_BLOCK_SIZE,
.synchronize = false,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
@@ -1880,14 +1615,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha256-dx",
.blocksize = SHA256_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
@@ -1904,14 +1639,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha224-dx",
.blocksize = SHA224_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
@@ -1921,7 +1656,7 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
},
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
{
.name = "sha384",
.driver_name = "sha384-dx",
@@ -1929,14 +1664,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha384-dx",
.blocksize = SHA384_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
@@ -1953,14 +1688,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-sha512-dx",
.blocksize = SHA512_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
@@ -1978,14 +1713,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "hmac-md5-dx",
.blocksize = MD5_HMAC_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_ahash_update,
- .final = ssi_ahash_final,
- .finup = ssi_ahash_finup,
- .digest = ssi_ahash_digest,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
- .setkey = ssi_ahash_setkey,
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
@@ -2000,14 +1735,14 @@ static struct ssi_hash_template driver_hash[] = {
.mac_driver_name = "xcbc-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_mac_update,
- .final = ssi_mac_final,
- .finup = ssi_mac_finup,
- .digest = ssi_mac_digest,
- .setkey = ssi_xcbc_setkey,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
@@ -2017,20 +1752,19 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
},
-#if SSI_CC_HAS_CMAC
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-dx",
.blocksize = AES_BLOCK_SIZE,
.template_ahash = {
- .init = ssi_ahash_init,
- .update = ssi_mac_update,
- .final = ssi_mac_final,
- .finup = ssi_mac_finup,
- .digest = ssi_mac_digest,
- .setkey = ssi_cmac_setkey,
- .export = ssi_ahash_export,
- .import = ssi_ahash_import,
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
.halg = {
.digestsize = AES_BLOCK_SIZE,
.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
@@ -2040,15 +1774,12 @@ static struct ssi_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
},
-#endif
-
};
-static struct ssi_hash_alg *
-ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
- bool keyed)
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
{
- struct ssi_hash_alg *t_crypto_alg;
+ struct cc_hash_alg *t_crypto_alg;
struct crypto_alg *alg;
struct ahash_alg *halg;
@@ -2056,7 +1787,6 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
-
t_crypto_alg->ahash_alg = template->template_ahash;
halg = &t_crypto_alg->ahash_alg;
alg = &halg->halg.base;
@@ -2074,13 +1804,13 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
template->driver_name);
}
alg->cra_module = THIS_MODULE;
- alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
- alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_exit = ssi_hash_cra_exit;
+ alg->cra_exit = cc_cra_exit;
- alg->cra_init = ssi_ahash_cra_init;
+ alg->cra_init = cc_cra_init;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_type = &crypto_ahash_type;
@@ -2092,36 +1822,32 @@ ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
return t_crypto_alg;
}
-int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
- struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
- ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
- struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
-#if (DX_DEV_SHA_MAX > 256)
- int i;
-#endif
/* Copy-to-sram digest-len */
- ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_init);
larval_seq_len = 0;
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
/* Copy-to-sram digest-len for sha384/512 */
- ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_sha512_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_sha512_init);
@@ -2132,88 +1858,89 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
- ARRAY_SIZE(md5_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(md5_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
- ARRAY_SIZE(sha1_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha1_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
- ARRAY_SIZE(sha224_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha224_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
- ARRAY_SIZE(sha256_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0;
-#if (DX_DEV_SHA_MAX > 256)
- /* We are forced to swap each double-word larval before copying to sram */
- for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
- const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
- const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
-
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- }
+#if (CC_DEV_SHA_MAX > 256)
+ cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc = %d)\n", rc);
+ if (rc)
goto init_digest_const_err;
- }
+ sram_buff_ofs += sizeof(sha384_init);
larval_seq_len = 0;
- for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
- const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
- const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
-
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
- sram_buff_ofs += sizeof(u32);
- }
+ cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc != 0)) {
- dev_err(dev, "send_request() failed (rc = %d)\n", rc);
+ if (rc)
goto init_digest_const_err;
- }
#endif
init_digest_const_err:
return rc;
}
-int ssi_hash_alloc(struct ssi_drvdata *drvdata)
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
{
- struct ssi_hash_handle *hash_handle;
- ssi_sram_addr_t sram_buff;
+ int i;
+ u32 tmp;
+
+ for (i = 0; i < size; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i + 1];
+ buf[i + 1] = tmp;
+ }
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+ cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+ cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ cc_sram_addr_t sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
@@ -2227,7 +1954,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
drvdata->hash_handle = hash_handle;
sram_size_to_alloc = sizeof(digest_len_init) +
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
sizeof(digest_len_sha512_init) +
sizeof(sha384_init) +
sizeof(sha512_init) +
@@ -2237,7 +1964,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
sizeof(sha224_init) +
sizeof(sha256_init);
- sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
@@ -2248,19 +1975,19 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
hash_handle->digest_len_sram_addr = sram_buff;
/*must be set before the alg registration as it is being used there*/
- rc = ssi_hash_init_sram_digest_consts(drvdata);
- if (unlikely(rc != 0)) {
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
- struct ssi_hash_alg *t_alg;
+ struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
/* register hmac version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2270,22 +1997,21 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
- list_add_tail(&t_alg->entry,
- &hash_handle->hash_list);
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
- if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
- (hw_mode == DRV_CIPHER_CMAC))
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
continue;
/* register hash version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2295,7 +2021,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
@@ -2313,13 +2039,14 @@ fail:
return rc;
}
-int ssi_hash_free(struct ssi_drvdata *drvdata)
+int cc_hash_free(struct cc_drvdata *drvdata)
{
- struct ssi_hash_alg *t_hash_alg, *hash_n;
- struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
if (hash_handle) {
- list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
+ list_for_each_entry_safe(t_hash_alg, hash_n,
+ &hash_handle->hash_list, entry) {
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
list_del(&t_hash_alg->entry);
kfree(t_hash_alg);
@@ -2331,14 +2058,13 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
return 0;
}
-static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
/* Setup XCBC MAC K1 */
hw_desc_init(&desc[idx]);
@@ -2354,8 +2080,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K2 */
hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K2_OFFSET),
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
@@ -2366,8 +2092,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K3 */
hw_desc_init(&desc[idx]);
- set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
- XCBC_MAC_K3_OFFSET),
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
@@ -2389,14 +2115,13 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
*seq_size = idx;
}
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct ahash_req_ctx *state = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
/* Setup CMAC Key */
hw_desc_init(&desc[idx]);
@@ -2423,17 +2148,15 @@ static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
*seq_size = idx;
}
-static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
- struct ssi_hash_ctx *ctx,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- bool is_not_last_data,
- unsigned int *seq_size)
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
@@ -2441,7 +2164,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
set_flow_mode(&desc[idx], flow_mode);
idx++;
} else {
- if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
dev_dbg(dev, " NULL mode\n");
/* nothing to build */
return;
@@ -2469,6 +2192,29 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
*seq_size = idx;
}
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return md5_init;
+ case DRV_HASH_SHA1:
+ return sha1_init;
+ case DRV_HASH_SHA224:
+ return sha224_init;
+ case DRV_HASH_SHA256:
+ return sha256_init;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ return sha384_init;
+ case DRV_HASH_SHA512:
+ return sha512_init;
+#endif
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return md5_init;
+ }
+}
+
/*!
* Gets the address of the initial digest in SRAM
* according to the given hash mode
@@ -2476,12 +2222,12 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
* \param drvdata
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
- * \return u32 The address of the inital digest in SRAM
+ * \return u32 The address of the initial digest in SRAM
*/
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
- struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
switch (mode) {
@@ -2501,7 +2247,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init));
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
return (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
@@ -2524,12 +2270,12 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
return hash_handle->larval_digest_sram_addr;
}
-ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
- struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
- ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
@@ -2537,7 +2283,7 @@ ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (DX_DEV_SHA_MAX > 256)
+#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(digest_len_init);
diff --git a/drivers/staging/ccree/cc_hash.h b/drivers/staging/ccree/cc_hash.h
new file mode 100644
index 000000000000..aa42b8f4348d
--- /dev/null
+++ b/drivers/staging/ccree/cc_hash.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#if (CC_DEV_SHA_MAX > 256)
+#define HASH_LEN_SIZE 16
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+#else
+#define HASH_LEN_SIZE 8
+#define CC_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
+#endif
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
+
diff --git a/drivers/staging/ccree/cc_host_regs.h b/drivers/staging/ccree/cc_host_regs.h
new file mode 100644
index 000000000000..69ef2fa0cb9b
--- /dev/null
+++ b/drivers/staging/ccree/cc_host_regs.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REG_OFFSET 0xA04UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_REG_OFFSET 0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define CC_GPR_HOST_REG_OFFSET 0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
index 2ae0f655e7a0..a79f28cec5ae 100644
--- a/drivers/staging/ccree/cc_hw_queue_defs.h
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
#include <linux/types.h>
-#include "dx_crys_kernel.h"
+#include "cc_kernel_regs.h"
#include <linux/bitfield.h>
/******************************************************************************
@@ -30,14 +17,12 @@
/* Define max. available slots in HW queue */
#define HW_QUEUE_SLOTS_MAX 15
-#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name
-
#define CC_REG_LOW(word, name) \
- (DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+ (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
#define CC_REG_HIGH(word, name) \
(CC_REG_LOW(word, name) + \
- DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+ CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
#define CC_GENMASK(word, name) \
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
@@ -122,7 +107,6 @@ enum cc_flow_mode {
AES_to_AES_to_HASH_and_DOUT = 13,
AES_to_AES_to_HASH = 14,
AES_to_HASH_and_AES = 15,
- DIN_MULTI2_DOUT = 16,
DIN_AES_AESMAC = 17,
HASH_to_DOUT = 18,
/* setup flows */
@@ -130,7 +114,6 @@ enum cc_flow_mode {
S_DIN_to_AES2 = 33,
S_DIN_to_DES = 34,
S_DIN_to_RC4 = 35,
- S_DIN_to_MULTI2 = 36,
S_DIN_to_HASH = 37,
S_AES_to_DOUT = 38,
S_AES2_to_DOUT = 39,
@@ -203,6 +186,19 @@ enum cc_hw_des_key_size {
END_OF_DES_KEYS = S32_MAX,
};
+enum cc_hash_conf_pad {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/cc_ivgen.c
index 3f082f41ae8f..c47f419b277b 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/cc_ivgen.c
@@ -1,38 +1,23 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include <linux/platform_device.h>
#include <crypto/ctr.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_ivgen.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_buffer_mgr.h"
+#include "cc_driver.h"
+#include "cc_ivgen.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_buffer_mgr.h"
/* The max. size of pool *MUST* be <= SRAM total size */
-#define SSI_IVPOOL_SIZE 1024
+#define CC_IVPOOL_SIZE 1024
/* The first 32B fraction of pool are dedicated to the
* next encryption "key" & "IV" for pool regeneration
*/
-#define SSI_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
-#define SSI_IVPOOL_GEN_SEQ_LEN 4
+#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define CC_IVPOOL_GEN_SEQ_LEN 4
/**
- * struct ssi_ivgen_ctx -IV pool generation context
+ * struct cc_ivgen_ctx -IV pool generation context
* @pool: the start address of the iv-pool resides in internal RAM
* @ctr_key_dma: address of pool's encryption key material in internal RAM
* @ctr_iv_dma: address of pool's counter iv in internal RAM
@@ -40,31 +25,29 @@
* @pool_meta: virt. address of the initial enc. key/IV
* @pool_meta_dma: phys. address of the initial enc. key/IV
*/
-struct ssi_ivgen_ctx {
- ssi_sram_addr_t pool;
- ssi_sram_addr_t ctr_key;
- ssi_sram_addr_t ctr_iv;
+struct cc_ivgen_ctx {
+ cc_sram_addr_t pool;
+ cc_sram_addr_t ctr_key;
+ cc_sram_addr_t ctr_iv;
u32 next_iv_ofs;
u8 *pool_meta;
dma_addr_t pool_meta_dma;
};
/*!
- * Generates SSI_IVPOOL_SIZE of random bytes by
+ * Generates CC_IVPOOL_SIZE of random bytes by
* encrypting 0's using AES128-CTR.
*
* \param ivgen iv-pool context
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*/
-static int ssi_ivgen_generate_pool(
- struct ssi_ivgen_ctx *ivgen_ctx,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len)
+static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
unsigned int idx = *iv_seq_len;
- if ((*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
+ if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
@@ -97,15 +80,15 @@ static int ssi_ivgen_generate_pool(
/* Generate IV pool */
hw_desc_init(&iv_seq[idx]);
- set_din_const(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
- set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
+ set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
idx++;
*iv_seq_len = idx; /* Update sequence length */
/* queue ordering assures pool readiness */
- ivgen_ctx->next_iv_ofs = SSI_IVPOOL_META_SIZE;
+ ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
return 0;
}
@@ -118,15 +101,15 @@ static int ssi_ivgen_generate_pool(
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
unsigned int iv_seq_len = 0;
int rc;
/* Generate initial enc. key/iv */
- get_random_bytes(ivgen_ctx->pool_meta, SSI_IVPOOL_META_SIZE);
+ get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
/* The first 32B reserved for the enc. Key/IV */
ivgen_ctx->ctr_key = ivgen_ctx->pool;
@@ -135,15 +118,15 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/* Copy initial enc. key and IV to SRAM at a single descriptor */
hw_desc_init(&iv_seq[iv_seq_len]);
set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
- SSI_IVPOOL_META_SIZE, NS_BIT);
+ CC_IVPOOL_META_SIZE, NS_BIT);
set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
- SSI_IVPOOL_META_SIZE);
+ CC_IVPOOL_META_SIZE);
set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
iv_seq_len++;
/* Generate initial pool */
- rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
- if (unlikely(rc != 0))
+ rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+ if (rc)
return rc;
/* Fire-and-forget */
@@ -155,17 +138,17 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
*
* \param drvdata
*/
-void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
struct device *device = &drvdata->plat_dev->dev;
if (!ivgen_ctx)
return;
if (ivgen_ctx->pool_meta) {
- memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
- dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
+ memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
+ dma_free_coherent(device, CC_IVPOOL_META_SIZE,
ivgen_ctx->pool_meta,
ivgen_ctx->pool_meta_dma);
}
@@ -184,42 +167,41 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_init(struct ssi_drvdata *drvdata)
+int cc_ivgen_init(struct cc_drvdata *drvdata)
{
- struct ssi_ivgen_ctx *ivgen_ctx;
+ struct cc_ivgen_ctx *ivgen_ctx;
struct device *device = &drvdata->plat_dev->dev;
int rc;
/* Allocate "this" context */
- drvdata->ivgen_handle = kzalloc(sizeof(*drvdata->ivgen_handle),
- GFP_KERNEL);
- if (!drvdata->ivgen_handle)
+ ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ if (!ivgen_ctx)
return -ENOMEM;
- ivgen_ctx = drvdata->ivgen_handle;
+ drvdata->ivgen_handle = ivgen_ctx;
- /* Allocate pool's header for intial enc. key/IV */
- ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
+ /* Allocate pool's header for initial enc. key/IV */
+ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
GFP_KERNEL);
if (!ivgen_ctx->pool_meta) {
dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
- SSI_IVPOOL_META_SIZE);
+ CC_IVPOOL_META_SIZE);
rc = -ENOMEM;
goto out;
}
/* Allocate IV pool in SRAM */
- ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
+ ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto out;
}
- return ssi_ivgen_init_sram_pool(drvdata);
+ return cc_init_iv_sram(drvdata);
out:
- ssi_ivgen_fini(drvdata);
+ cc_ivgen_fini(drvdata);
return rc;
}
@@ -228,37 +210,36 @@ out:
*
* \param drvdata Driver private context
* \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements
+ * of iv_out_dma array are ignore)
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_ivgen_getiv(
- struct ssi_drvdata *drvdata,
- dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len,
- unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len)
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
- struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
unsigned int idx = *iv_seq_len;
struct device *dev = drvdata_to_dev(drvdata);
unsigned int t;
- if ((iv_out_size != CC_AES_IV_SIZE) &&
- (iv_out_size != CTR_RFC3686_IV_SIZE)) {
+ if (iv_out_size != CC_AES_IV_SIZE &&
+ iv_out_size != CTR_RFC3686_IV_SIZE) {
return -EINVAL;
}
- if ((iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
+ if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
- //check that number of generated IV is limited to max dma address iv buffer size
- if (iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
+ /* check that number of generated IV is limited to max dma address
+ * iv buffer size
+ */
+ if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
/* The sequence will be longer than allowed */
return -EINVAL;
}
@@ -288,10 +269,10 @@ int ssi_ivgen_getiv(
/* Update iv index */
ivgen_ctx->next_iv_ofs += iv_out_size;
- if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+ if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
/* pool is drained -regenerate it! */
- return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);
+ return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
}
return 0;
diff --git a/drivers/staging/ccree/cc_ivgen.h b/drivers/staging/ccree/cc_ivgen.h
new file mode 100644
index 000000000000..b6ac16903dda
--- /dev/null
+++ b/drivers/staging/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_IVGEN_H__
+#define __CC_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+#define CC_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
+ * iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
+
+#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/staging/ccree/cc_kernel_regs.h b/drivers/staging/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000000..fa994406d610
--- /dev/null
+++ b/drivers/staging/ccree/cc_kernel_regs.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
index a9c417b07b04..64b15ac9f1d3 100644
--- a/drivers/staging/ccree/cc_lli_defs.h
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -20,7 +7,7 @@
#include <linux/types.h>
/* Max DLLI size
- * AKA DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
*/
#define DLLI_SIZE_BIT_SIZE 0x18
diff --git a/drivers/staging/ccree/cc_pm.c b/drivers/staging/ccree/cc_pm.c
new file mode 100644
index 000000000000..d990f472e89f
--- /dev/null
+++ b/drivers/staging/ccree/cc_pm.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_suspend(struct device *dev)
+{
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ rc = cc_suspend_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
+ return rc;
+ }
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+ return 0;
+}
+
+int cc_pm_resume(struct device *dev)
+{
+ int rc;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
+ rc = cc_clk_on(drvdata);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ rc = init_cc_regs(drvdata, false);
+ if (rc) {
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
+ return rc;
+ }
+
+ rc = cc_resume_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
+ return rc;
+ }
+
+ /* must be after the queue resuming as it uses the HW queue*/
+ cc_init_hash_sram(drvdata);
+
+ cc_init_iv_sram(drvdata);
+ return 0;
+}
+
+int cc_pm_get(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (cc_req_queue_suspended(drvdata))
+ rc = pm_runtime_get_sync(dev);
+ else
+ pm_runtime_get_noresume(dev);
+
+ return rc;
+}
+
+int cc_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (!cc_req_queue_suspended(drvdata)) {
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ } else {
+ /* Something wrong happens*/
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
+ }
+ return rc;
+}
+
+int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ int rc = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* activate the PM module */
+ rc = pm_runtime_set_active(dev);
+ if (rc)
+ return rc;
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+
+ return rc;
+}
+
+void cc_pm_fini(struct cc_drvdata *drvdata)
+{
+ pm_runtime_disable(drvdata_to_dev(drvdata));
+}
diff --git a/drivers/staging/ccree/cc_pm.h b/drivers/staging/ccree/cc_pm.h
new file mode 100644
index 000000000000..aac8190fea38
--- /dev/null
+++ b/drivers/staging/ccree/cc_pm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
+int cc_pm_suspend(struct device *dev);
+int cc_pm_resume(struct device *dev);
+int cc_pm_get(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+static inline int cc_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_get(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_put_suspend(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/cc_request_mgr.c b/drivers/staging/ccree/cc_request_mgr.c
new file mode 100644
index 000000000000..8a7f83407410
--- /dev/null
+++ b/drivers/staging/ccree/cc_request_mgr.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
+
+struct cc_req_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
+ /* This lock protects access to HW register
+ * that must be single request at a time
+ */
+ spinlock_t hw_lock;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+ bool is_runtime_suspended;
+};
+
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!req_mgr_h)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma) {
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots));
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ flush_workqueue(req_mgr_h->workq);
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ memset(req_mgr_h, 0, sizeof(struct cc_req_mgr_handle));
+ kfree(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+ if (!req_mgr_h) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+ dev_dbg(dev, "Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
+ if (!req_mgr_h->workq) {
+ dev_err(dev, "Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ dev_dbg(dev, "Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler,
+ (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff =
+ dma_alloc_coherent(dev, sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ /* Init. "dummy" completion descriptor */
+ hw_desc_init(&req_mgr_h->compl_desc);
+ set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+ set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+ sizeof(u32), NS_BIT, 1);
+ set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+ set_queue_last_ind(&req_mgr_h->compl_desc);
+
+ return 0;
+
+req_mgr_init_err:
+ cc_req_mgr_fini(drvdata);
+ return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+ unsigned int seq_len)
+{
+ int i, w;
+ void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /*
+ * We do indeed write all 6 command words to the same
+ * register. The HW supports this.
+ */
+
+ for (i = 0; i < seq_len; i++) {
+ for (w = 0; w <= 5; w++)
+ writel_relaxed(seq[i].word[w], reg);
+
+ if (cc_dump_desc)
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1],
+ seq[i].word[2], seq[i].word[3],
+ seq[i].word[4], seq[i].word[5]);
+ }
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by cc_send_sync_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
+{
+ struct completion *this_compl = dx_compl_h;
+
+ complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+ struct cc_req_mgr_handle *req_mgr_h,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* SW queue is checked only once as it will not
+ * be chaned during the poll because the spinlock_bh
+ * is held by the thread
+ */
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -ENOSPC;
+ }
+
+ if (req_mgr_h->q_free_slots >= total_seq_len)
+ return 0;
+
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
+ return -ENOSPC;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param add_comp If "true": add an artificial dout DMA to mark completion
+ *
+ * \return int Returns -EINPROGRESS or error code
+ */
+static int cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp, bool ivgen)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int iv_seq_len = 0;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ if (ivgen) {
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ cc_req->ivgen_dma_addr_len,
+ &cc_req->ivgen_dma_addr[0],
+ &cc_req->ivgen_dma_addr[1],
+ &cc_req->ivgen_dma_addr[2],
+ cc_req->ivgen_size);
+
+ /* Acquire IV from pool */
+ rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+ cc_req->ivgen_dma_addr_len,
+ cc_req->ivgen_size, iv_seq, &iv_seq_len);
+
+ if (rc) {
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
+ return rc;
+ }
+
+ total_seq_len += iv_seq_len;
+ }
+
+ used_sw_slots = ((req_mgr_h->req_queue_head -
+ req_mgr_h->req_queue_tail) &
+ (MAX_REQUEST_QUEUE_SIZE - 1));
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1);
+ /* TODO: Use circ_buf.h ? */
+
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+
+ /* STAT_PHASE_4: Push sequence */
+ if (ivgen)
+ enqueue_seq(drvdata, iv_seq, iv_seq_len);
+
+ enqueue_seq(drvdata, desc, len);
+
+ if (add_comp) {
+ enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+ total_seq_len++;
+ }
+
+ if (req_mgr_h->q_free_slots < total_seq_len) {
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+ }
+
+ /* Operation still in process */
+ return -EINPROGRESS;
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ struct crypto_async_request *req;
+ bool ivgen;
+ unsigned int total_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ spin_unlock(&mgr->bl_lock);
+
+ creq = &bli->creq;
+ req = (struct crypto_async_request *)creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ req->complete(req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ ivgen = !!creq->ivgen_dma_addr_len;
+ total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, total_len);
+ if (rc) {
+ /*
+ * There is still not room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+ bli->len, false, ivgen);
+
+ spin_unlock(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ creq->user_cb(dev, req, rc);
+ }
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req)
+{
+ int rc;
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ bool ivgen = !!cc_req->ivgen_dma_addr_len;
+ unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+ struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, total_len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
+ cc_pm_put_suspend(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+
+ if (!rc)
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+ ivgen);
+
+ spin_unlock_bh(&mgr->hw_lock);
+ return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ int rc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ while (true) {
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len + 1);
+
+ if (!rc)
+ break;
+
+ spin_unlock_bh(&mgr->hw_lock);
+ if (rc != -EAGAIN) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+ wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+ reinit_completion(&drvdata->hw_queue_avail);
+ }
+
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+ spin_unlock_bh(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+
+ wait_for_completion(&cc_req->seq_compl);
+ return 0;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+ */
+ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+ if (rc)
+ return rc;
+
+ set_queue_last_ind(&desc[(len - 1)]);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+ enqueue_seq(drvdata, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq,
+ &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_req *cc_req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ unsigned int *tail = &request_mgr_handle->req_queue_tail;
+ unsigned int *head = &request_mgr_handle->req_queue_head;
+
+ while (request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (*head == *tail) {
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ *head);
+ break;
+ }
+
+ cc_req = &request_mgr_handle->req_queue[*tail];
+
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg, 0);
+ *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
+ cc_pm_put_suspend(dev);
+ }
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+ return FIELD_GET(AXIM_MON_COMP_VALUE,
+ cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ u32 irq;
+
+ irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+
+ if (irq & CC_COMP_IRQ_MASK) {
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
+ request_mgr_handle->axi_completed =
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ CC_COMP_IRQ_MASK);
+
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+ }
+ }
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+ cc_proc_backlog(drvdata);
+}
+
+/*
+ * resume the queue configuration - no need to take the lock as this happens
+ * inside the spin lock protection
+ */
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ request_mgr_handle->is_runtime_suspended = false;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+/*
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ /* lock the send_request */
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ if (request_mgr_handle->req_queue_head !=
+ request_mgr_handle->req_queue_tail) {
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+ return -EBUSY;
+ }
+ request_mgr_handle->is_runtime_suspended = true;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ return request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
+
diff --git a/drivers/staging/ccree/cc_request_mgr.h b/drivers/staging/ccree/cc_request_mgr.h
new file mode 100644
index 000000000000..573cb97af085
--- /dev/null
+++ b/drivers/staging/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
+
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/cc_sram_mgr.c
index 07260d168c91..d1f8a9cc1c0f 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/cc_sram_mgr.c
@@ -1,62 +1,47 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-#include "ssi_driver.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
/**
- * struct ssi_sram_mgr_ctx -Internal RAM context manager
+ * struct cc_sram_ctx -Internal RAM context manager
* @sram_free_offset: the offset to the non-allocated area
*/
-struct ssi_sram_mgr_ctx {
- ssi_sram_addr_t sram_free_offset;
+struct cc_sram_ctx {
+ cc_sram_addr_t sram_free_offset;
};
/**
- * ssi_sram_mgr_fini() - Cleanup SRAM pool.
+ * cc_sram_mgr_fini() - Cleanup SRAM pool.
*
* @drvdata: Associated device driver context
*/
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
-
/* Free "this" context */
- if (smgr_ctx) {
- memset(smgr_ctx, 0, sizeof(struct ssi_sram_mgr_ctx));
- kfree(smgr_ctx);
- }
+ kfree(drvdata->sram_mgr_handle);
}
/**
- * ssi_sram_mgr_init() - Initializes SRAM pool.
+ * cc_sram_mgr_init() - Initializes SRAM pool.
* The pool starts right at the beginning of SRAM.
* Returns zero for success, negative value otherwise.
*
* @drvdata: Associated device driver context
*/
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
{
+ struct cc_sram_ctx *ctx;
+
/* Allocate "this" context */
- drvdata->sram_mgr_handle = kzalloc(sizeof(struct ssi_sram_mgr_ctx),
- GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!drvdata->sram_mgr_handle)
+ if (!ctx)
return -ENOMEM;
+ drvdata->sram_mgr_handle = ctx;
+
return 0;
}
@@ -69,18 +54,18 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
{
- struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- ssi_sram_addr_t p;
+ cc_sram_addr_t p;
- if (unlikely((size & 0x3) != 0)) {
+ if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
- if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
+ if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
@@ -93,7 +78,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
}
/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
@@ -103,10 +88,9 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void ssi_sram_mgr_const2sram_desc(
- const u32 *src, ssi_sram_addr_t dst,
- unsigned int nelement,
- struct cc_hw_desc *seq, unsigned int *seq_len)
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
diff --git a/drivers/staging/ccree/cc_sram_mgr.h b/drivers/staging/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000000..d48649fb3323
--- /dev/null
+++ b/drivers/staging/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef u64 cc_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/dx_crys_kernel.h b/drivers/staging/ccree/dx_crys_kernel.h
deleted file mode 100644
index 219603030344..000000000000
--- a/drivers/staging/ccree/dx_crys_kernel.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_CRYS_KERNEL_H__
-#define __DX_CRYS_KERNEL_H__
-
-// --------------------------------------
-// BLOCK: DSCRPTR
-// --------------------------------------
-#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
-#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
-#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
-#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
-// --------------------------------------
-// BLOCK: AXI_P
-// --------------------------------------
-#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
-#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
-#define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL
-#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
-#define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
-#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
-#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
-#define DX_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
-#define DX_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
-#define DX_AXIM_CFG_REG_OFFSET 0xBE8UL
-#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
-#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
-#define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
-#endif // __DX_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/dx_host.h b/drivers/staging/ccree/dx_host.h
deleted file mode 100644
index 863c2670d826..000000000000
--- a/drivers/staging/ccree/dx_host.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_HOST_H__
-#define __DX_HOST_H__
-
-// --------------------------------------
-// BLOCK: HOST_P
-// --------------------------------------
-#define DX_HOST_IRR_REG_OFFSET 0xA00UL
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
-#define DX_HOST_IRR_GPR0_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_REG_OFFSET 0xA04UL
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
-#define DX_HOST_IMR_GPR0_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_REG_OFFSET 0xA08UL
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
-#define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL
-#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_BOOT_REG_OFFSET 0xA28UL
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
-#define DX_HOST_VERSION_REG_OFFSET 0xA40UL
-#define DX_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
-#define DX_HOST_GPR0_REG_OFFSET 0xA70UL
-#define DX_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
-#define DX_GPR_HOST_REG_OFFSET 0xA74UL
-#define DX_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
-#define DX_GPR_HOST_VALUE_BIT_SIZE 0x20UL
-#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
-// --------------------------------------
-// BLOCK: HOST_SRAM
-// --------------------------------------
-#define DX_SRAM_DATA_REG_OFFSET 0xF00UL
-#define DX_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
-#define DX_SRAM_ADDR_REG_OFFSET 0xF04UL
-#define DX_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
-#define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL
-#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
-#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
-
-#endif //__DX_HOST_H__
diff --git a/drivers/staging/ccree/dx_reg_common.h b/drivers/staging/ccree/dx_reg_common.h
deleted file mode 100644
index d5132ffaf6e6..000000000000
--- a/drivers/staging/ccree/dx_reg_common.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_REG_COMMON_H__
-#define __DX_REG_COMMON_H__
-
-#define DX_DEV_SIGNATURE 0xDCC71200UL
-
-#define CC_HW_VERSION 0xef840015UL
-
-#define DX_DEV_SHA_MAX 512
-
-#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/ccree/hash_defs.h b/drivers/staging/ccree/hash_defs.h
deleted file mode 100644
index f52656f5a3ea..000000000000
--- a/drivers/staging/ccree/hash_defs.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HASH_DEFS_H_
-#define _HASH_DEFS_H_
-
-#include "cc_crypto_ctx.h"
-
-enum cc_hash_conf_pad {
- HASH_PADDING_DISABLED = 0,
- HASH_PADDING_ENABLED = 1,
- HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
- HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
-};
-
-enum cc_hash_cipher_pad {
- DO_NOT_PAD = 0,
- DO_PAD = 1,
- HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
-};
-
-#endif /*_HASH_DEFS_H_*/
-
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
deleted file mode 100644
index 1032f25edcab..000000000000
--- a/drivers/staging/ccree/ssi_buffer_mgr.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file buffer_mgr.h
- * Buffer Manager
- */
-
-#ifndef __SSI_BUFFER_MGR_H__
-#define __SSI_BUFFER_MGR_H__
-
-#include <crypto/algapi.h>
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-
-enum ssi_req_dma_buf_type {
- SSI_DMA_BUF_NULL = 0,
- SSI_DMA_BUF_DLLI,
- SSI_DMA_BUF_MLLI
-};
-
-enum ssi_sg_cpy_direct {
- SSI_SG_TO_BUF = 0,
- SSI_SG_FROM_BUF = 1
-};
-
-struct ssi_mlli {
- ssi_sram_addr_t sram_addr;
- unsigned int nents; //sg nents
- unsigned int mlli_nents; //mlli nents might be different than the above
-};
-
-struct mlli_params {
- struct dma_pool *curr_pool;
- u8 *mlli_virt_addr;
- dma_addr_t mlli_dma_addr;
- u32 mlli_len;
-};
-
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
-
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata);
-
-int ssi_buffer_mgr_map_blkcipher_request(
- struct ssi_drvdata *drvdata,
- void *ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- void *info,
- struct scatterlist *src,
- struct scatterlist *dst);
-
-void ssi_buffer_mgr_unmap_blkcipher_request(
- struct device *dev,
- void *ctx,
- unsigned int ivsize,
- struct scatterlist *src,
- struct scatterlist *dst);
-
-int ssi_buffer_mgr_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
-
-void ssi_buffer_mgr_unmap_aead_request(struct device *dev, struct aead_request *req);
-
-int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update);
-
-int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size);
-
-void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
-
-void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
- struct scatterlist *sg,
- u32 to_skip, u32 end,
- enum ssi_sg_cpy_direct direct);
-
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
-
-#endif /*__BUFFER_MGR_H__*/
-
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
deleted file mode 100644
index 25e6335c0d94..000000000000
--- a/drivers/staging/ccree/ssi_cipher.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_cipher.h
- * ARM CryptoCell Cipher Crypto API
- */
-
-#ifndef __SSI_CIPHER_H__
-#define __SSI_CIPHER_H__
-
-#include <linux/kernel.h>
-#include <crypto/algapi.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-
-/* Crypto cipher flags */
-#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
-#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
-#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
-#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
-#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
-
-#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
-
-struct blkcipher_req_ctx {
- struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type dma_buf_type;
- u32 in_nents;
- u32 in_mlli_nents;
- u32 out_nents;
- u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
- u8 *iv;
- bool is_giv;
- struct mlli_params mlli_params;
-};
-
-int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata);
-
-int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
-
-#ifndef CRYPTO_ALG_BULK_MASK
-
-#define CRYPTO_ALG_BULK_DU_512 0x00002000
-#define CRYPTO_ALG_BULK_DU_4096 0x00004000
-#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
- CRYPTO_ALG_BULK_DU_4096)
-#endif /* CRYPTO_ALG_BULK_MASK */
-
-#ifdef CRYPTO_TFM_REQ_HW_KEY
-
-static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
-{
- return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
-}
-
-#else
-
-struct arm_hw_key_info {
- int hw_key1;
- int hw_key2;
-};
-
-static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
-{
- return false;
-}
-
-#endif /* CRYPTO_TFM_REQ_HW_KEY */
-
-#endif /*__SSI_CIPHER_H__*/
diff --git a/drivers/staging/ccree/ssi_config.h b/drivers/staging/ccree/ssi_config.h
deleted file mode 100644
index ff7597c2d77e..000000000000
--- a/drivers/staging/ccree/ssi_config.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_config.h
- * Definitions for ARM CryptoCell Linux Crypto Driver
- */
-
-#ifndef __SSI_CONFIG_H__
-#define __SSI_CONFIG_H__
-
-#include <linux/version.h>
-
-//#define FLUSH_CACHE_ALL
-//#define COMPLETION_DELAY
-//#define DX_DUMP_DESCS
-// #define DX_DUMP_BYTES
-// #define CC_DEBUG
-#define ENABLE_CC_SYSFS /* Enable sysfs interface for debugging REE driver */
-//#define DX_IRQ_DELAY 100000
-#define DMA_BIT_MASK_LEN 48 /* was 32 bit, but for juno's sake it was enlarged to 48 bit */
-
-#endif /*__DX_CONFIG_H__*/
-
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
deleted file mode 100644
index 94c755cafb47..000000000000
--- a/drivers/staging/ccree/ssi_driver.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_driver.h
- * ARM CryptoCell Linux Crypto Driver
- */
-
-#ifndef __SSI_DRIVER_H__
-#define __SSI_DRIVER_H__
-
-#include "ssi_config.h"
-#ifdef COMP_IN_WQ
-#include <linux/workqueue.h>
-#else
-#include <linux/interrupt.h>
-#endif
-#include <linux/dma-mapping.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/sha.h>
-#include <crypto/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/hash.h>
-#include <linux/version.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-/* Registers definitions from shared/hw/ree_include */
-#include "dx_host.h"
-#include "dx_reg_common.h"
-#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
-#include "cc_crypto_ctx.h"
-#include "ssi_sysfs.h"
-#include "hash_defs.h"
-#include "cc_hw_queue_defs.h"
-#include "ssi_sram_mgr.h"
-
-#define DRV_MODULE_VERSION "3.0"
-
-#define SSI_DEV_NAME_STR "cc715ree"
-#define CC_COHERENT_CACHE_PARAMS 0xEEE
-
-#define SSI_CC_HAS_AES_CCM 1
-#define SSI_CC_HAS_AES_GCM 1
-#define SSI_CC_HAS_AES_XTS 1
-#define SSI_CC_HAS_AES_ESSIV 1
-#define SSI_CC_HAS_AES_BITLOCKER 1
-#define SSI_CC_HAS_AES_CTS 1
-#define SSI_CC_HAS_MULTI2 0
-#define SSI_CC_HAS_CMAC 1
-
-#define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
- (1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT))
-
-#define SSI_AXI_ERR_IRQ_MASK BIT(DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
-
-#define SSI_COMP_IRQ_MASK BIT(DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
-
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-/* Register name mangling macro */
-#define CC_REG(reg_name) DX_ ## reg_name ## _REG_OFFSET
-
-/* TEE FIPS status interrupt */
-#define SSI_GPR0_IRQ_MASK BIT(DX_HOST_IRR_GPR0_BIT_SHIFT)
-
-#define SSI_CRA_PRIO 3000
-
-#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
-
-#define MAX_REQUEST_QUEUE_SIZE 4096
-#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
-
-/* Definitions for HW descriptors DIN/DOUT fields */
-#define NS_BIT 1
-#define AXI_ID 0
-/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
- * field in the HW descriptor. The DMA engine +8 that value.
- */
-
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-
-#define SSI_MAX_IVGEN_DMA_ADDRESSES 3
-struct ssi_crypto_req {
- void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base);
- void *user_arg;
- dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES];
- /* For the first 'ivgen_dma_addr_len' addresses of this array,
- * generated IV would be placed in it by send_request().
- * Same generated IV for all addresses!
- */
- unsigned int ivgen_dma_addr_len; /* Amount of 'ivgen_dma_addr' elements to be filled. */
- unsigned int ivgen_size; /* The generated IV size required, 8/16 B allowed. */
- struct completion seq_compl; /* request completion */
-};
-
-/**
- * struct ssi_drvdata - driver private data context
- * @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver: SeP loaded firmware version
- */
-struct ssi_drvdata {
- void __iomem *cc_base;
- int irq;
- u32 irq_mask;
- u32 fw_ver;
- /* Calibration time of start/stop
- * monitor descriptors
- */
- u32 monitor_null_cycles;
- struct platform_device *plat_dev;
- ssi_sram_addr_t mlli_sram_addr;
- void *buff_mgr_handle;
- void *hash_handle;
- void *aead_handle;
- void *blkcipher_handle;
- void *request_mgr_handle;
- void *fips_handle;
- void *ivgen_handle;
- void *sram_mgr_handle;
- struct clk *clk;
- bool coherent;
-};
-
-struct ssi_crypto_alg {
- struct list_head entry;
- int cipher_mode;
- int flow_mode; /* Note: currently, refers to the cipher mode only. */
- int auth_mode;
- struct ssi_drvdata *drvdata;
- struct crypto_alg crypto_alg;
- struct aead_alg aead_alg;
-};
-
-struct ssi_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
- struct blkcipher_alg blkcipher;
- struct cipher_alg cipher;
- struct compress_alg compress;
- } template_u;
- int cipher_mode;
- int flow_mode; /* Note: currently, refers to the cipher mode only. */
- int auth_mode;
- struct ssi_drvdata *drvdata;
-};
-
-struct async_gen_req_ctx {
- dma_addr_t iv_dma_addr;
- enum drv_crypto_direction op_type;
-};
-
-static inline struct device *drvdata_to_dev(struct ssi_drvdata *drvdata)
-{
- return &drvdata->plat_dev->dev;
-}
-
-#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
-#else
-static inline void dump_byte_array(const char *name, const u8 *the_array,
- unsigned long size) {};
-#endif
-
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
-void fini_cc_regs(struct ssi_drvdata *drvdata);
-int cc_clk_on(struct ssi_drvdata *drvdata);
-void cc_clk_off(struct ssi_drvdata *drvdata);
-
-static inline void cc_iowrite(struct ssi_drvdata *drvdata, u32 reg, u32 val)
-{
- iowrite32(val, (drvdata->cc_base + reg));
-}
-
-static inline u32 cc_ioread(struct ssi_drvdata *drvdata, u32 reg)
-{
- return ioread32(drvdata->cc_base + reg);
-}
-
-#endif /*__SSI_DRIVER_H__*/
-
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
deleted file mode 100644
index 63bcca7f3af9..000000000000
--- a/drivers/staging/ccree/ssi_fips.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_FIPS_H__
-#define __SSI_FIPS_H__
-
-#ifdef CONFIG_CRYPTO_FIPS
-
-enum cc_fips_status {
- CC_FIPS_SYNC_MODULE_OK = 0x0,
- CC_FIPS_SYNC_MODULE_ERROR = 0x1,
- CC_FIPS_SYNC_REE_STATUS = 0x4,
- CC_FIPS_SYNC_TEE_STATUS = 0x8,
- CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
-};
-
-int ssi_fips_init(struct ssi_drvdata *p_drvdata);
-void ssi_fips_fini(struct ssi_drvdata *drvdata);
-void fips_handler(struct ssi_drvdata *drvdata);
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok);
-
-#else /* CONFIG_CRYPTO_FIPS */
-
-static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
-{
- return 0;
-}
-
-static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-static inline void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
-static inline void fips_handler(struct ssi_drvdata *drvdata) {}
-
-#endif /* CONFIG_CRYPTO_FIPS */
-
-#endif /*__SSI_FIPS_H__*/
-
diff --git a/drivers/staging/ccree/ssi_hash.h b/drivers/staging/ccree/ssi_hash.h
deleted file mode 100644
index 2400e389d65a..000000000000
--- a/drivers/staging/ccree/ssi_hash.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_hash.h
- * ARM CryptoCell Hash Crypto API
- */
-
-#ifndef __SSI_HASH_H__
-#define __SSI_HASH_H__
-
-#include "ssi_buffer_mgr.h"
-
-#define HMAC_IPAD_CONST 0x36363636
-#define HMAC_OPAD_CONST 0x5C5C5C5C
-#if (DX_DEV_SHA_MAX > 256)
-#define HASH_LEN_SIZE 16
-#define SSI_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-#define SSI_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
-#else
-#define HASH_LEN_SIZE 8
-#define SSI_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
-#define SSI_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
-#endif
-
-#define XCBC_MAC_K1_OFFSET 0
-#define XCBC_MAC_K2_OFFSET 16
-#define XCBC_MAC_K3_OFFSET 32
-
-#define CC_EXPORT_MAGIC 0xC2EE1070U
-
-// this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used for xcbc/cmac statesize
-struct aeshash_state {
- u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
-};
-
-/* ahash state */
-struct ahash_req_ctx {
- u8 *buff0;
- u8 *buff1;
- u8 *digest_result_buff;
- struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type data_dma_buf_type;
- u8 *digest_buff;
- u8 *opad_digest_buff;
- u8 *digest_bytes_len;
- dma_addr_t opad_digest_dma_addr;
- dma_addr_t digest_buff_dma_addr;
- dma_addr_t digest_bytes_len_dma_addr;
- dma_addr_t digest_result_dma_addr;
- u32 buff0_cnt;
- u32 buff1_cnt;
- u32 buff_index;
- u32 xcbc_count; /* count xcbc update operatations */
- struct scatterlist buff_sg[2];
- struct scatterlist *curr_sg;
- u32 in_nents;
- u32 mlli_nents;
- struct mlli_params mlli_params;
-};
-
-int ssi_hash_alloc(struct ssi_drvdata *drvdata);
-int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata);
-int ssi_hash_free(struct ssi_drvdata *drvdata);
-
-/*!
- * Gets the initial digest length
- *
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- *
- * \return u32 returns the address of the initial digest length in SRAM
- */
-ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode);
-
-/*!
- * Gets the address of the initial digest in SRAM
- * according to the given hash mode
- *
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- *
- * \return u32 The address of the inital digest in SRAM
- */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode);
-
-#endif /*__SSI_HASH_H__*/
-
diff --git a/drivers/staging/ccree/ssi_ivgen.h b/drivers/staging/ccree/ssi_ivgen.h
deleted file mode 100644
index 961aea411cb3..000000000000
--- a/drivers/staging/ccree/ssi_ivgen.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_IVGEN_H__
-#define __SSI_IVGEN_H__
-
-#include "cc_hw_queue_defs.h"
-
-#define SSI_IVPOOL_SEQ_LEN 8
-
-/*!
- * Allocates iv-pool and maps resources.
- * This function generates the first IV pool.
- *
- * \param drvdata Driver's private context
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Free iv-pool and ivgen context.
- *
- * \param drvdata
- */
-void ssi_ivgen_fini(struct ssi_drvdata *drvdata);
-
-/*!
- * Generates the initial pool in SRAM.
- * This function should be invoked when resuming DX driver.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata);
-
-/*!
- * Acquires 16 Bytes IV from the iv-pool
- *
- * \param drvdata Driver private context
- * \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long
- * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_ivgen_getiv(
- struct ssi_drvdata *drvdata,
- dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len,
- unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[],
- unsigned int *iv_seq_len);
-
-#endif /*__SSI_IVGEN_H__*/
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
deleted file mode 100644
index 36a498098a70..000000000000
--- a/drivers/staging/ccree/ssi_pm.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "ssi_config.h"
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <crypto/ctr.h>
-#include <linux/pm_runtime.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_ivgen.h"
-#include "ssi_hash.h"
-#include "ssi_pm.h"
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-
-#define POWER_DOWN_ENABLE 0x01
-#define POWER_DOWN_DISABLE 0x00
-
-int ssi_power_mgr_runtime_suspend(struct device *dev)
-{
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(dev);
- int rc;
-
- dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
- rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
- if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
- rc);
- return rc;
- }
- fini_cc_regs(drvdata);
- cc_clk_off(drvdata);
- return 0;
-}
-
-int ssi_power_mgr_runtime_resume(struct device *dev)
-{
- int rc;
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(dev);
-
- dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
- rc = cc_clk_on(drvdata);
- if (rc) {
- dev_err(dev, "failed getting clock back on. We're toast.\n");
- return rc;
- }
-
- rc = init_cc_regs(drvdata, false);
- if (rc != 0) {
- dev_err(dev, "init_cc_regs (%x)\n", rc);
- return rc;
- }
-
- rc = ssi_request_mgr_runtime_resume_queue(drvdata);
- if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
- ssi_hash_init_sram_digest_consts(drvdata);
-
- ssi_ivgen_init_sram_pool(drvdata);
- return 0;
-}
-
-int ssi_power_mgr_runtime_get(struct device *dev)
-{
- int rc = 0;
-
- if (ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
- rc = pm_runtime_get_sync(dev);
- } else {
- pm_runtime_get_noresume(dev);
- }
- return rc;
-}
-
-int ssi_power_mgr_runtime_put_suspend(struct device *dev)
-{
- int rc = 0;
-
- if (!ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
- pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
- }
- return rc;
-}
-
-#endif
-
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
-{
- int rc = 0;
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* must be before the enabling to avoid resdundent suspending */
- pm_runtime_set_autosuspend_delay(dev, SSI_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
- rc = pm_runtime_set_active(dev);
- if (rc != 0)
- return rc;
- /* enable the PM module*/
- pm_runtime_enable(dev);
-#endif
- return rc;
-}
-
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
-{
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- pm_runtime_disable(drvdata_to_dev(drvdata));
-#endif
-}
diff --git a/drivers/staging/ccree/ssi_pm.h b/drivers/staging/ccree/ssi_pm.h
deleted file mode 100644
index 63673f60d2d8..000000000000
--- a/drivers/staging/ccree/ssi_pm.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_pm.h
- */
-
-#ifndef __SSI_POWER_MGR_H__
-#define __SSI_POWER_MGR_H__
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-
-#define SSI_SUSPEND_TIMEOUT 3000
-
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
-
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_power_mgr_runtime_suspend(struct device *dev);
-
-int ssi_power_mgr_runtime_resume(struct device *dev);
-
-int ssi_power_mgr_runtime_get(struct device *dev);
-
-int ssi_power_mgr_runtime_put_suspend(struct device *dev);
-#endif
-
-#endif /*__POWER_MGR_H__*/
-
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
deleted file mode 100644
index a8a7dc672d4c..000000000000
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "ssi_config.h"
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <crypto/ctr.h>
-#ifdef FLUSH_CACHE_ALL
-#include <asm/cacheflush.h>
-#endif
-#include <linux/pm_runtime.h>
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_ivgen.h"
-#include "ssi_pm.h"
-
-#define SSI_MAX_POLL_ITER 10
-
-struct ssi_request_mgr_handle {
- /* Request manager resources */
- unsigned int hw_queue_size; /* HW capability */
- unsigned int min_free_hw_slots;
- unsigned int max_used_sw_slots;
- struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
- u32 req_queue_head;
- u32 req_queue_tail;
- u32 axi_completed;
- u32 q_free_slots;
- spinlock_t hw_lock;
- struct cc_hw_desc compl_desc;
- u8 *dummy_comp_buff;
- dma_addr_t dummy_comp_buff_dma;
- struct cc_hw_desc monitor_desc;
-
-#ifdef COMP_IN_WQ
- struct workqueue_struct *workq;
- struct delayed_work compwork;
-#else
- struct tasklet_struct comptask;
-#endif
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- bool is_runtime_suspended;
-#endif
-};
-
-static void comp_handler(unsigned long devarg);
-#ifdef COMP_IN_WQ
-static void comp_work_handler(struct work_struct *work);
-#endif
-
-void request_mgr_fini(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- struct device *dev = drvdata_to_dev(drvdata);
-
- if (!req_mgr_h)
- return; /* Not allocated */
-
- if (req_mgr_h->dummy_comp_buff_dma != 0) {
- dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
- req_mgr_h->dummy_comp_buff_dma);
- }
-
- dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
- req_mgr_h->min_free_hw_slots));
- dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
-
-#ifdef COMP_IN_WQ
- flush_workqueue(req_mgr_h->workq);
- destroy_workqueue(req_mgr_h->workq);
-#else
- /* Kill tasklet */
- tasklet_kill(&req_mgr_h->comptask);
-#endif
- memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle));
- kfree(req_mgr_h);
- drvdata->request_mgr_handle = NULL;
-}
-
-int request_mgr_init(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *req_mgr_h;
- struct device *dev = drvdata_to_dev(drvdata);
- int rc = 0;
-
- req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
- if (!req_mgr_h) {
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
-
- drvdata->request_mgr_handle = req_mgr_h;
-
- spin_lock_init(&req_mgr_h->hw_lock);
-#ifdef COMP_IN_WQ
- dev_dbg(dev, "Initializing completion workqueue\n");
- req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
- if (unlikely(!req_mgr_h->workq)) {
- dev_err(dev, "Failed creating work queue\n");
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
- INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
-#else
- dev_dbg(dev, "Initializing completion tasklet\n");
- tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
-#endif
- req_mgr_h->hw_queue_size = cc_ioread(drvdata,
- CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
- dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
- if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
- dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
- req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
- req_mgr_h->max_used_sw_slots = 0;
-
- /* Allocate DMA word for "dummy" completion descriptor use */
- req_mgr_h->dummy_comp_buff = dma_alloc_coherent(dev, sizeof(u32),
- &req_mgr_h->dummy_comp_buff_dma,
- GFP_KERNEL);
- if (!req_mgr_h->dummy_comp_buff) {
- dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
- sizeof(u32));
- rc = -ENOMEM;
- goto req_mgr_init_err;
- }
-
- /* Init. "dummy" completion descriptor */
- hw_desc_init(&req_mgr_h->compl_desc);
- set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
- set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
- sizeof(u32), NS_BIT, 1);
- set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
- set_queue_last_ind(&req_mgr_h->compl_desc);
-
- return 0;
-
-req_mgr_init_err:
- request_mgr_fini(drvdata);
- return rc;
-}
-
-static inline void enqueue_seq(
- void __iomem *cc_base,
- struct cc_hw_desc seq[], unsigned int seq_len)
-{
- int i;
-
- for (i = 0; i < seq_len; i++) {
- writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
- wmb();
- writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
-#ifdef DX_DUMP_DESCS
- dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
- i, seq[i].word[0], seq[i].word[1], seq[i].word[2],
- seq[i].word[3], seq[i].word[4], seq[i].word[5]);
-#endif
- }
-}
-
-/*!
- * Completion will take place if and only if user requested completion
- * by setting "is_dout = 0" in send_request().
- *
- * \param dev
- * \param dx_compl_h The completion event to signal
- */
-static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
-{
- struct completion *this_compl = dx_compl_h;
-
- complete(this_compl);
-}
-
-static inline int request_mgr_queues_status_check(
- struct ssi_drvdata *drvdata,
- struct ssi_request_mgr_handle *req_mgr_h,
- unsigned int total_seq_len)
-{
- unsigned long poll_queue;
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* SW queue is checked only once as it will not
- * be chaned during the poll becasue the spinlock_bh
- * is held by the thread
- */
- if (unlikely(((req_mgr_h->req_queue_head + 1) &
- (MAX_REQUEST_QUEUE_SIZE - 1)) ==
- req_mgr_h->req_queue_tail)) {
- dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
- return -EBUSY;
- }
-
- if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
- return 0;
-
- /* Wait for space in HW queue. Poll constant num of iterations. */
- for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
- req_mgr_h->q_free_slots =
- cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- if (unlikely(req_mgr_h->q_free_slots <
- req_mgr_h->min_free_hw_slots)) {
- req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
- }
-
- if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
- /* If there is enough place return */
- return 0;
- }
-
- dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
- }
- /* No room in the HW queue try again later */
- dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
- req_mgr_h->q_free_slots, total_seq_len);
- return -EAGAIN;
-}
-
-/*!
- * Enqueue caller request to crypto hardware.
- *
- * \param drvdata
- * \param ssi_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
- *
- * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
- */
-int send_request(
- struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
- struct cc_hw_desc *desc, unsigned int len, bool is_dout)
-{
- void __iomem *cc_base = drvdata->cc_base;
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- unsigned int used_sw_slots;
- unsigned int iv_seq_len = 0;
- unsigned int total_seq_len = len; /*initial sequence length*/
- struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
- struct device *dev = drvdata_to_dev(drvdata);
- int rc;
- unsigned int max_required_seq_len = (total_seq_len +
- ((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
- SSI_IVPOOL_SEQ_LEN) +
- (!is_dout ? 1 : 0));
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_get(dev);
- if (rc != 0) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
- return rc;
- }
-#endif
-
- do {
- spin_lock_bh(&req_mgr_h->hw_lock);
-
- /* Check if there is enough place in the SW/HW queues
- * in case iv gen add the max size and in case of no dout add 1
- * for the internal completion descriptor
- */
- rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
- max_required_seq_len);
- if (likely(rc == 0))
- /* There is enough place in the queue */
- break;
- /* something wrong release the spinlock*/
- spin_unlock_bh(&req_mgr_h->hw_lock);
-
- if (rc != -EAGAIN) {
- /* Any error other than HW queue full
- * (SW queue is full)
- */
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
-#endif
- return rc;
- }
-
- /* HW queue is full - short sleep */
- msleep(1);
- } while (1);
-
- /* Additional completion descriptor is needed incase caller did not
- * enabled any DLLI/MLLI DOUT bit in the given sequence
- */
- if (!is_dout) {
- init_completion(&ssi_req->seq_compl);
- ssi_req->user_cb = request_mgr_complete;
- ssi_req->user_arg = &ssi_req->seq_compl;
- total_seq_len++;
- }
-
- if (ssi_req->ivgen_dma_addr_len > 0) {
- dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- &ssi_req->ivgen_dma_addr[0],
- &ssi_req->ivgen_dma_addr[1],
- &ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
-
- /* Acquire IV from pool */
- rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
- ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_size, iv_seq, &iv_seq_len);
-
- if (unlikely(rc != 0)) {
- dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
- spin_unlock_bh(&req_mgr_h->hw_lock);
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
-#endif
- return rc;
- }
-
- total_seq_len += iv_seq_len;
- }
-
- used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1));
- if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
- req_mgr_h->max_used_sw_slots = used_sw_slots;
-
- /* Enqueue request - must be locked with HW lock*/
- req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
- req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- /* TODO: Use circ_buf.h ? */
-
- dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
-
-#ifdef FLUSH_CACHE_ALL
- flush_cache_all();
-#endif
-
- /* STAT_PHASE_4: Push sequence */
- enqueue_seq(cc_base, iv_seq, iv_seq_len);
- enqueue_seq(cc_base, desc, len);
- enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
-
- if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
- /* This situation should never occur. Maybe indicating problem
- * with resuming power. Set the free slot count to 0 and hope
- * for the best.
- */
- dev_err(dev, "HW free slot count mismatch.");
- req_mgr_h->q_free_slots = 0;
- } else {
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots -= total_seq_len;
- }
-
- spin_unlock_bh(&req_mgr_h->hw_lock);
-
- if (!is_dout) {
- /* Wait upon sequence completion.
- * Return "0" -Operation done successfully.
- */
- wait_for_completion(&ssi_req->seq_compl);
- return 0;
- }
- /* Operation still in process */
- return -EINPROGRESS;
-}
-
-/*!
- * Enqueue caller request to crypto hardware during init process.
- * assume this function is not called in middle of a flow,
- * since we set QUEUE_LAST_IND flag in the last descriptor.
- *
- * \param drvdata
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- *
- * \return int Returns "0" upon success
- */
-int send_request_init(
- struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len)
-{
- void __iomem *cc_base = drvdata->cc_base;
- struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
- unsigned int total_seq_len = len; /*initial sequence length*/
- int rc = 0;
-
- /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
- rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
- total_seq_len);
- if (unlikely(rc != 0))
- return rc;
-
- set_queue_last_ind(&desc[(len - 1)]);
-
- enqueue_seq(cc_base, desc, len);
-
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots =
- cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
-
- return 0;
-}
-
-void complete_request(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-#ifdef COMP_IN_WQ
- queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
-#else
- tasklet_schedule(&request_mgr_handle->comptask);
-#endif
-}
-
-#ifdef COMP_IN_WQ
-static void comp_work_handler(struct work_struct *work)
-{
- struct ssi_drvdata *drvdata =
- container_of(work, struct ssi_drvdata, compwork.work);
-
- comp_handler((unsigned long)drvdata);
-}
-#endif
-
-static void proc_completions(struct ssi_drvdata *drvdata)
-{
- struct ssi_crypto_req *ssi_req;
- struct device *dev = drvdata_to_dev(drvdata);
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- int rc = 0;
-#endif
-
- while (request_mgr_handle->axi_completed) {
- request_mgr_handle->axi_completed--;
-
- /* Dequeue request */
- if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
- /* We are supposed to handle a completion but our
- * queue is empty. This is not normal. Return and
- * hope for the best.
- */
- dev_err(dev, "Request queue is empty head == tail %u\n",
- request_mgr_handle->req_queue_head);
- break;
- }
-
- ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
-
-#ifdef FLUSH_CACHE_ALL
- flush_cache_all();
-#endif
-
-#ifdef COMPLETION_DELAY
- /* Delay */
- {
- u32 axi_err;
- int i;
-
- dev_info(dev, "Delay\n");
- for (i = 0; i < 1000000; i++)
- axi_err = cc_ioread(drvdata,
- CC_REG(AXIM_MON_ERR));
- }
-#endif /* COMPLETION_DELAY */
-
- if (likely(ssi_req->user_cb))
- ssi_req->user_cb(dev, ssi_req->user_arg,
- drvdata->cc_base);
- request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- dev_dbg(dev, "Dequeue request tail=%u\n",
- request_mgr_handle->req_queue_tail);
- dev_dbg(dev, "Request completed. axi_completed=%d\n",
- request_mgr_handle->axi_completed);
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_put_suspend(dev);
- if (rc != 0)
- dev_err(dev, "Failed to set runtime suspension %d\n",
- rc);
-#endif
- }
-}
-
-static inline u32 cc_axi_comp_count(struct ssi_drvdata *drvdata)
-{
- return FIELD_GET(AXIM_MON_COMP_VALUE,
- cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
-}
-
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void comp_handler(unsigned long devarg)
-{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- u32 irq;
-
- irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
-
- if (irq & SSI_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), SSI_COMP_IRQ_MASK);
-
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
-
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- SSI_COMP_IRQ_MASK);
-
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
- }
- /* after verifing that there is nothing to do,
- * unmask AXI completion interrupt
- */
- cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
-}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens inside
- * the spin lock protection
- */
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
-{
- struct ssi_request_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
-
diff --git a/drivers/staging/ccree/ssi_request_mgr.h b/drivers/staging/ccree/ssi_request_mgr.h
deleted file mode 100644
index bdbbf89e5367..000000000000
--- a/drivers/staging/ccree/ssi_request_mgr.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file request_mgr.h
- * Request Manager
- */
-
-#ifndef __REQUEST_MGR_H__
-#define __REQUEST_MGR_H__
-
-#include "cc_hw_queue_defs.h"
-
-int request_mgr_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Enqueue caller request to crypto hardware.
- *
- * \param drvdata
- * \param ssi_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
- *
- * \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false"
- */
-int send_request(
- struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
- struct cc_hw_desc *desc, unsigned int len, bool is_dout);
-
-int send_request_init(
- struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len);
-
-void complete_request(struct ssi_drvdata *drvdata);
-
-void request_mgr_fini(struct ssi_drvdata *drvdata);
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
-
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
-
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata);
-#endif
-
-#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sram_mgr.h b/drivers/staging/ccree/ssi_sram_mgr.h
deleted file mode 100644
index 9ba1d59a0bae..000000000000
--- a/drivers/staging/ccree/ssi_sram_mgr.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __SSI_SRAM_MGR_H__
-#define __SSI_SRAM_MGR_H__
-
-#ifndef SSI_CC_SRAM_SIZE
-#define SSI_CC_SRAM_SIZE 4096
-#endif
-
-struct ssi_drvdata;
-
-/**
- * Address (offset) within CC internal SRAM
- */
-
-typedef u64 ssi_sram_addr_t;
-
-#define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
-
-/*!
- * Initializes SRAM pool.
- * The first X bytes of SRAM are reserved for ROM usage, hence, pool
- * starts right after X bytes.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
-
-/*!
- * Uninits SRAM pool.
- *
- * \param drvdata
- */
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
-
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
- *
- * \param drvdata
- * \param size The requested bytes to allocate
- */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
-
-/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
- * set values in given array into SRAM.
- * Note: each const value can't exceed word size.
- *
- * @src: A pointer to array of words to set as consts.
- * @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
- * @seq: A pointer to the given IN/OUT descriptor sequence
- * @seq_len: A pointer to the given IN/OUT sequence length
- */
-void ssi_sram_mgr_const2sram_desc(
- const u32 *src, ssi_sram_addr_t dst,
- unsigned int nelement,
- struct cc_hw_desc *seq, unsigned int *seq_len);
-
-#endif /*__SSI_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
deleted file mode 100644
index 5d39f15cdb59..000000000000
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "cc_crypto_ctx.h"
-#include "ssi_sysfs.h"
-
-#ifdef ENABLE_CC_SYSFS
-
-static struct ssi_drvdata *sys_get_drvdata(void);
-
-static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct ssi_drvdata *drvdata = sys_get_drvdata();
- u32 register_value;
- int offset = 0;
-
- register_value = cc_ioread(drvdata, CC_REG(HOST_SIGNATURE));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(HOST_IRR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(HOST_POWER_DOWN_EN));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
- register_value = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
- return offset;
-}
-
-static ssize_t ssi_sys_help_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- char *help_str[] = {
- "cat reg_dump ", "Print several of CC register values",
- };
- int i = 0, offset = 0;
-
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n");
- for (i = 0; i < ARRAY_SIZE(help_str); i += 2)
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i + 1]);
-
- return offset;
-}
-
-/********************************************************
- * SYSFS objects *
- ********************************************************/
-/*
- * Structure used to create a directory
- * and its attributes in sysfs.
- */
-struct sys_dir {
- struct kobject *sys_dir_kobj;
- struct attribute_group sys_dir_attr_group;
- struct attribute **sys_dir_attr_list;
- u32 num_of_attrs;
- struct ssi_drvdata *drvdata; /* Associated driver context */
-};
-
-/* top level directory structures */
-static struct sys_dir sys_top_dir;
-
-/* TOP LEVEL ATTRIBUTES */
-static struct kobj_attribute ssi_sys_top_level_attrs[] = {
- __ATTR(dump_regs, 0444, ssi_sys_regdump_show, NULL),
- __ATTR(help, 0444, ssi_sys_help_show, NULL),
-#if defined CC_CYCLE_COUNT
- __ATTR(stats_host, 0664, ssi_sys_stat_host_db_show, ssi_sys_stats_host_db_clear),
- __ATTR(stats_cc, 0664, ssi_sys_stat_cc_db_show, ssi_sys_stats_cc_db_clear),
-#endif
-
-};
-
-static struct ssi_drvdata *sys_get_drvdata(void)
-{
- /* TODO: supporting multiple SeP devices would require avoiding
- * global "top_dir" and finding associated "top_dir" by traversing
- * up the tree to the kobject which matches one of the top_dir's
- */
- return sys_top_dir.drvdata;
-}
-
-static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
- struct kobject *parent_dir_kobj, const char *dir_name,
- struct kobj_attribute *attrs, u32 num_of_attrs)
-{
- int i;
-
- memset(sys_dir, 0, sizeof(struct sys_dir));
-
- sys_dir->drvdata = drvdata;
-
- /* initialize directory kobject */
- sys_dir->sys_dir_kobj =
- kobject_create_and_add(dir_name, parent_dir_kobj);
-
- if (!(sys_dir->sys_dir_kobj))
- return -ENOMEM;
- /* allocate memory for directory's attributes list */
- sys_dir->sys_dir_attr_list =
- kcalloc(num_of_attrs + 1, sizeof(struct attribute *),
- GFP_KERNEL);
-
- if (!(sys_dir->sys_dir_attr_list)) {
- kobject_put(sys_dir->sys_dir_kobj);
- return -ENOMEM;
- }
-
- sys_dir->num_of_attrs = num_of_attrs;
-
- /* initialize attributes list */
- for (i = 0; i < num_of_attrs; ++i)
- sys_dir->sys_dir_attr_list[i] = &attrs[i].attr;
-
- /* last list entry should be NULL */
- sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
-
- sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
-
- return sysfs_create_group(sys_dir->sys_dir_kobj,
- &sys_dir->sys_dir_attr_group);
-}
-
-static void sys_free_dir(struct sys_dir *sys_dir)
-{
- if (!sys_dir)
- return;
-
- kfree(sys_dir->sys_dir_attr_list);
-
- if (sys_dir->sys_dir_kobj)
- kobject_put(sys_dir->sys_dir_kobj);
-}
-
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
-{
- int retval;
- struct device *dev = drvdata_to_dev(drvdata);
-
- dev_info(dev, "setup sysfs under %s\n", sys_dev_obj->name);
-
- /* Initialize top directory */
- retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
- ssi_sys_top_level_attrs,
- ARRAY_SIZE(ssi_sys_top_level_attrs));
- return retval;
-}
-
-void ssi_sysfs_fini(void)
-{
- sys_free_dir(&sys_top_dir);
-}
-
-#endif /*ENABLE_CC_SYSFS*/
-
diff --git a/drivers/staging/ccree/ssi_sysfs.h b/drivers/staging/ccree/ssi_sysfs.h
deleted file mode 100644
index 44ae3d4c40b3..000000000000
--- a/drivers/staging/ccree/ssi_sysfs.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_sysfs.h
- * ARM CryptoCell sysfs APIs
- */
-
-#ifndef __SSI_SYSFS_H__
-#define __SSI_SYSFS_H__
-
-#include <asm/timex.h>
-
-/* forward declaration */
-struct ssi_drvdata;
-
-enum stat_phase {
- STAT_PHASE_0 = 0,
- STAT_PHASE_1,
- STAT_PHASE_2,
- STAT_PHASE_3,
- STAT_PHASE_4,
- STAT_PHASE_5,
- STAT_PHASE_6,
- MAX_STAT_PHASES,
-};
-
-enum stat_op {
- STAT_OP_TYPE_NULL = 0,
- STAT_OP_TYPE_ENCODE,
- STAT_OP_TYPE_DECODE,
- STAT_OP_TYPE_SETKEY,
- STAT_OP_TYPE_GENERIC,
- MAX_STAT_OP_TYPES,
-};
-
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata);
-void ssi_sysfs_fini(void);
-void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result);
-void update_cc_stat(unsigned int op_type, unsigned int phase, unsigned int elapsed_cycles);
-void display_all_stat_db(void);
-
-#endif /*__SSI_SYSFS_H__*/
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index bf3fe7c61be5..cae7e6e695b0 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx 'Clocking Wizard' driver
*
* Copyright (C) 2013 - 2014 Xilinx
*
* Sören Brinkmann <soren.brinkmann@xilinx.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/platform_device.h>
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index a1c1081906c5..c0bc413f7fe0 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.0+
/*
* comedi.h
* header file for COMEDI user API
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_H
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index b455ff6714eb..f693c2c0bec3 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_buf.c
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index f356386d833a..97fb9388bc22 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/comedi_compat32.c
* 32-bit ioctl compatibility for 64-bit comedi kernel module.
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/uaccess.h>
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
index 0127c1f98bbf..3980e6e1bd0d 100644
--- a/drivers/staging/comedi/comedi_compat32.h
+++ b/drivers/staging/comedi/comedi_compat32.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/comedi_compat32.h
* 32-bit ioctl compatibility for 64-bit comedi kernel module.
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_COMPAT32_H
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e19e395b0e44..ef733847eebe 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/comedi_fops.c
* comedi kernel module
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2276,9 +2267,9 @@ done:
return retval;
}
-static unsigned int comedi_poll(struct file *file, poll_table *wait)
+static __poll_t comedi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s, *s_read;
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
index 51e023a1c0ee..126048b03f43 100644
--- a/drivers/staging/comedi/comedi_pci.c
+++ b/drivers/staging/comedi/comedi_pci.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_pci.c
* Comedi PCI driver specific functions.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/comedi_pci.h b/drivers/staging/comedi/comedi_pci.h
index 7dfd892c74b0..647a72441b8a 100644
--- a/drivers/staging/comedi/comedi_pci.h
+++ b/drivers/staging/comedi/comedi_pci.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_pci.h
* header file for Comedi PCI drivers
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_PCI_H
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
index cd4742851c08..e16f35eae343 100644
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_pcmcia.c
* Comedi PCMCIA driver specific functions.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
index 9e45c7c93278..c7d37b38e730 100644
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ b/drivers/staging/comedi/comedi_pcmcia.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_pcmcia.h
* header file for Comedi PCMCIA drivers
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_PCMCIA_H
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
index 9c946d40b894..c632c2bae722 100644
--- a/drivers/staging/comedi/comedi_usb.c
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_usb.c
* Comedi USB driver specific functions.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/comedi_usb.h b/drivers/staging/comedi/comedi_usb.h
index 132154ec792f..50287de7a239 100644
--- a/drivers/staging/comedi/comedi_usb.h
+++ b/drivers/staging/comedi/comedi_usb.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_usb.h
* header file for USB Comedi drivers
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_USB_H
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 1bb9986f865e..f3474a4ba69e 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedidev.h
* header file for kernel-only structures, variables, and constants
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDIDEV_H
@@ -186,23 +177,27 @@ struct comedi_subdevice {
unsigned int *chanlist; /* driver-owned chanlist (not used) */
- int (*insn_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- int (*do_cmd)(struct comedi_device *, struct comedi_subdevice *);
- int (*do_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*poll)(struct comedi_device *, struct comedi_subdevice *);
- int (*cancel)(struct comedi_device *, struct comedi_subdevice *);
+ int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+ int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*do_cmdtest)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+ int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
/* called when the buffer changes */
- int (*buf_change)(struct comedi_device *, struct comedi_subdevice *);
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s);
void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
void *data, unsigned int num_bytes,
@@ -445,9 +440,9 @@ struct comedi_driver {
/* public: */
const char *driver_name;
struct module *module;
- int (*attach)(struct comedi_device *, struct comedi_devconfig *);
- void (*detach)(struct comedi_device *);
- int (*auto_attach)(struct comedi_device *, unsigned long);
+ int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
+ void (*detach)(struct comedi_device *dev);
+ int (*auto_attach)(struct comedi_device *dev, unsigned long context);
unsigned int num_names;
const char *const *board_name;
int offset;
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
index f9b56396e161..e98cb9752dbc 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/drivers/staging/comedi/comedilib.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedilib.h
* Header file for kcomedilib
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_COMEDILIB_H
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 0b43db6371c6..e618a87521a3 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* module/drivers.c
* functions for manipulating drivers
@@ -5,16 +6,6 @@
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index b79d3764a8a0..3d6105b5a11b 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/8255.c
* Driver for 8255
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/8255.h b/drivers/staging/comedi/drivers/8255.h
index 55a67af5152d..6cd1339ab83e 100644
--- a/drivers/staging/comedi/drivers/8255.h
+++ b/drivers/staging/comedi/drivers/8255.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* module/8255.h
* Header file for 8255
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _8255_H
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 38c05d1ec647..9ed05f962fdb 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for generic PCI based 8255 digital i/o boards
* Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -9,16 +10,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index ccd1a91290bf..560649be9d13 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1032.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 63991c49ff23..45ad4ba92f94 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1500.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -9,16 +10,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index f1f8b1c422a7..6c8213ee1a74 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1516.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 9bfb79c2e5c8..10501fe6bb25 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_1564.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -9,16 +10,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index c63133a12a4e..050d0b4b3209 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_16xx.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index 50f9eb25d7cb..a122f3f3f5ec 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_2032.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 2b382a52d80d..140d1514a106 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_2200.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 3630d75e36e8..d2810fdd5e63 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_3120.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -9,16 +10,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 1fdc0f8d7e1a..a38267928e5e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_3501.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index be1f6133ff1c..55784f24e2a7 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* addi_apci_3xxx.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
@@ -10,16 +11,6 @@
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
index 9d9853fe54a0..69b323fb869f 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for the watchdog subdevice found on some addi-data boards
* Copyright (c) 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index ad7e7c4a5232..d39b4eabce8d 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adl_pci6208.c
* Comedi driver for ADLink 6208 series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index b0fc027cf485..d0081897fe47 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for the ADLINK PCI-723x/743x series boards.
* Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -9,16 +10,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index da901c8dec0e..3022793b1bc5 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/adl_pci8164.c
*
* Hardware comedi driver for PCI-8164 Adlink card
* Copyright (C) 2004 Michel Lachine <mike@mikelachaine.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 01d2ee931b28..f4dba6271d0d 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adl_pci9111.c
* Hardware driver for PCI9111 ADLink cards: PCI-9111HR
* Copyright (C) 2002-2005 Emmanuel Pacaud <emmanuel.pacaud@univ-poitiers.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 1cc9b7ef1ff9..2528ca0ede6d 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/adl_pci9118.c
*
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 315050454c26..5d431573bcca 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adq12b.c
* Driver for MicroAxial ADQ12-B data acquisition and control card
@@ -9,16 +10,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 2c1b6de30da8..6a93b04f1fdf 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* adv_pci1710.c
* Comedi driver for Advantech PCI-1710 series boards
diff --git a/drivers/staging/comedi/drivers/adv_pci1720.c b/drivers/staging/comedi/drivers/adv_pci1720.c
index 4830a1c93d15..2fcd7e8e7d85 100644
--- a/drivers/staging/comedi/drivers/adv_pci1720.c
+++ b/drivers/staging/comedi/drivers/adv_pci1720.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for Advantech PCI-1720U
* Copyright (c) 2015 H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index f82afd947310..771d61f87427 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adv_pci1723.c
* Comedi driver for the Advantech PCI-1723 card.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index bf6a8f10118c..e8ab573c839f 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adv_pci1724.c
* Comedi driver for the Advantech PCI-1724U card.
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index 9f525ff7290c..f460f21efb90 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI driver for the Advantech PCI-1760
* Copyright (C) 2015 H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -69,7 +60,7 @@
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
-#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firware version */
+#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
#define PCI1760_CMD_SET_PWM_LO(x) (0x11 + (x) * 2) /* Set "lo" period */
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index a8186687ca2c..5fef2aef7e03 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/adv_pci_dio.c
*
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 43a0ce5721d3..f4beda1ed640 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* aio_aio12_8.c
* Driver for Access I/O Products PC-104 AIO12-8 Analog I/O Board
* Copyright (C) 2006 C&C Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 35b2f98f0de9..41c9c56816ef 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* aio_iiro_16.c
* Comedi driver for Access I/O Products 104-IIRO-16 board
* Copyright (C) 2006 C&C Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index f5cfa71a90c6..26e63d64ffc6 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_dio200.c
*
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.h b/drivers/staging/comedi/drivers/amplc_dio200.h
index 53fb86d59fc3..88c1d1063d5d 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.h
+++ b/drivers/staging/comedi/drivers/amplc_dio200.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_dio.h
*
@@ -8,16 +9,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AMPLC_DIO200_H_INCLUDED
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index f6e4e984235d..82bd41d92509 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_dio200_common.c
*
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
index 2598e6e7d47d..30d239731e0b 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* comedi/drivers/amplc_dio200_pci.c
*
* Driver for Amplicon PCI215, PCI272, PCIe215, PCIe236, PCIe296.
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 875cc19cb969..b7dd15f5ec63 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pc236.c
* Driver for Amplicon PC36AT DIO boards.
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: amplc_pc236
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.h b/drivers/staging/comedi/drivers/amplc_pc236.h
index 91d6d9c065b5..4b67090aab54 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.h
+++ b/drivers/staging/comedi/drivers/amplc_pc236.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pc236.h
* Header for "amplc_pc236", "amplc_pci236" and "amplc_pc236_common".
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AMPLC_PC236_H_INCLUDED
diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c
index 0c02d3245679..01b90e4eca8a 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pc236_common.c
* Common support code for "amplc_pc236" and "amplc_pci236".
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 58b0b6b1a693..84c989f12faf 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Amplicon PC263 relay board.
*
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 4e554944bc71..657b736ef46d 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pci224.c
* Driver for Amplicon PCI224 and PCI234 AO boards.
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 48c7890c3007..15fc7f19051a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pci230.c
* Driver for Amplicon PCI230 and PCI260 Multifunction I/O boards.
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/amplc_pci236.c b/drivers/staging/comedi/drivers/amplc_pci236.c
index 31cc38b4bcad..86ea876a11be 100644
--- a/drivers/staging/comedi/drivers/amplc_pci236.c
+++ b/drivers/staging/comedi/drivers/amplc_pci236.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/amplc_pci236.c
* Driver for Amplicon PCI236 DIO boards.
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: amplc_pci236
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 8d4069bc5716..c3efe14020a8 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Amplicon PCI263 relay board.
*
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 8ee732571588..41cc784320a9 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c6xdigio.c
* Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Dan Block
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 78d098f9e1f2..a5d171e71c33 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cb_das16_cs.c
* Driver for Computer Boards PC-CARD DAS16/16.
@@ -5,16 +6,6 @@
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000, 2001, 2002 David A. Schleef <ds@schleef.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* PCMCIA support code for this driver is adapted from the dummy_cs.c
* driver of the Linux PCMCIA Card Services package.
*
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 3cd008acb657..8429d57087fd 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cb_pcidas.c
* Developed by Ivan Martinez and Frank Mori Hess, with valuable help from
@@ -8,16 +9,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index b761f000c1dc..b657beedd5ff 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/cb_pcidas64.c
* This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS
@@ -18,16 +19,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 987414741605..807a31e75883 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/cb_pcidda.c
* Driver for the ComputerBoards / MeasurementComputing PCI-DDA series.
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 47e38398921e..4e72a0778086 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/cb_pcimdas.c
* Comedi driver for Computer Boards PCIM-DAS1602/16 and PCIe-DAS1602/16
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 84ef45457c60..b33203f6a990 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/cb_pcimdda.c
* Computer Boards PCIM-DDA06-16 Comedi driver
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: cb_pcimdda
diff --git a/drivers/staging/comedi/drivers/comedi_8254.c b/drivers/staging/comedi/drivers/comedi_8254.c
index 0d5d56b61f60..d1d509e9add9 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.c
+++ b/drivers/staging/comedi/drivers/comedi_8254.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_8254.c
* Generic 8254 timer/counter support
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
index 326bd44b063e..7faa2185282e 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/drivers/staging/comedi/drivers/comedi_8254.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_8254.h
* Generic 8254 timer/counter support
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_8254_H
diff --git a/drivers/staging/comedi/drivers/comedi_8255.c b/drivers/staging/comedi/drivers/comedi_8255.c
index b2441efc61cc..62baa0d79302 100644
--- a/drivers/staging/comedi/drivers/comedi_8255.c
+++ b/drivers/staging/comedi/drivers/comedi_8255.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_8255.c
* Generic 8255 digital I/O support
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 64a5ea3810d4..4392b5927a99 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_bond.c
* A Comedi driver to 'bond' or merge multiple drivers and devices as one.
@@ -5,16 +6,6 @@
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index 68ef9b1750be..b77dc8d5d3ff 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI ISA DMA support functions
* Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.h b/drivers/staging/comedi/drivers/comedi_isadma.h
index a193d3e8d185..ccef7c9c0a4d 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.h
+++ b/drivers/staging/comedi/drivers/comedi_isadma.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* COMEDI ISA DMA support functions
* Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_ISADMA_H
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index 1bf8ddc6f07c..efaa57372aeb 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_parport.c
* Comedi driver for standard parallel port
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index c7e8194984e5..d437af721bd8 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/comedi_test.c
*
@@ -11,16 +12,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 5f848396c2f7..49be795b4971 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/contec_pci_dio.c
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index a562df498b01..5ef8114c2c85 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dac02.c
* Comedi driver for DAC02 compatible boards
@@ -9,16 +10,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 32dd8a857b6b..03f98b0287c8 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/daqboard2000.c
* hardware driver for IOtech DAQboard/2000
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: daqboard2000
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 3d8fc6ad44df..327ecf9aea30 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/das08.c
* comedi module for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
@@ -6,16 +7,6 @@
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
* Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
index d27044cb7158..235d32f7c817 100644
--- a/drivers/staging/comedi/drivers/das08.h
+++ b/drivers/staging/comedi/drivers/das08.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* das08.h
*
* Header for common DAS08 support (used by ISA/PCI/PCMCIA drivers)
*
* Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DAS08_H
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 317a9b5e4a3b..223479f9ea3c 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for DAS008 PCMCIA boards
*
@@ -5,16 +6,6 @@
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* PCMCIA support code for this driver is adapted from the dummy_cs.c
* driver of the Linux PCMCIA Card Services package.
*
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
index cdefc99b6db3..b22a45bd21d1 100644
--- a/drivers/staging/comedi/drivers/das08_isa.c
+++ b/drivers/staging/comedi/drivers/das08_isa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* das08_isa.c
* comedi driver for DAS08 ISA/PC-104 boards
@@ -6,16 +7,6 @@
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
* Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index d8d27fa44491..7856fc13466a 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* das08_pci.c
* comedi driver for DAS08 PCI boards
@@ -6,16 +7,6 @@
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
* Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index ddd4aeab6365..74ff204b585d 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* das16.c
* DAS16 driver
@@ -6,16 +7,6 @@
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
* Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index bb8d6ec0632e..72f8ed2c5008 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for CIO-DAS16/M1
* Author: Frank Mori Hess, based on code from the das16 driver.
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index e0a34c2687a8..f16aa7e9f4f3 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for Keithley DAS-1700/DAS-1800 series boards
* Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 0fdf5e02182f..f99211ec46de 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* das6402.c
* Comedi driver for DAS6402 compatible boards
@@ -5,16 +6,6 @@
*
* Rewrite of an experimental driver by:
* Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index fd4cb4911671..8cf09ef3012f 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/das800.c
* Driver for Keitley das800 series boards and compatibles
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: das800
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 771cceb71069..75693cdde313 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dmm32at.c
* Diamond Systems Diamond-MM-32-AT Comedi driver
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 30805797a957..a29880981d81 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/dt2801.c
* Device Driver for DataTranslation DT2801
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index fcd85475e429..05207a519755 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for Data Translation DT2811
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -316,7 +307,7 @@ static int dt2811_ai_cmd(struct comedi_device *dev,
static unsigned int dt2811_ns_to_timer(unsigned int *nanosec,
unsigned int flags)
{
- unsigned long long ns = *nanosec;
+ unsigned long long ns;
unsigned int ns_lo = COMEDI_MIN_SPEED;
unsigned int ns_hi = 0;
unsigned int divisor_hi = 0;
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 09984a66dba3..d2c715737361 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dt2814.c
* Hardware driver for Data Translation DT2814
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: dt2814
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index ce5571971194..83026ba63d1c 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dt2815.c
* Hardware driver for Data Translation DT2815
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: dt2815
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index 39d2566e49bf..9babb2a5196a 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dt2817.c
* Hardware driver for Data Translation DT2817
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: dt2817
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 217a4b884689..3be927f1d3a9 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dt282x.c
* Comedi driver for Data Translation DT2821 series
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 19e0b7be8495..2edf3ee91300 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dt3000.c
* Data Translation DT3000 series driver
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 7ebca862ecaa..75cc9e8e5b94 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dt9812.c
* COMEDI driver for DataTranslation DT9812 USB module
@@ -5,16 +6,6 @@
* Copyright (C) 2005 Anders Blomdell <anders.blomdell@control.lth.se>
*
* COMEDI - Linux Control and Measurement Device Interface
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index bab7ac9e6237..e50536731d11 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/dyna_pci10xx.c
* Copyright (C) 2011 Prashant Shah, pshah.mumbai@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index 0f278ffdad76..41c50c7a8f59 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* fl512.c
* Anders Gnistrup <ex18@kalman.iau.dtu.dk>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index e5b948405fd9..4bdf44d82879 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* gsc_hpdi.c
* Comedi driver the General Standards Corporation
@@ -8,16 +9,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 28cf53e48b8d..b14aaed6b525 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* icp_multi.c
* Comedi driver for Inova ICP_MULTI board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 77e1d891f232..3eaf7c59de75 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ii_pci20kc.c
* Driver for Intelligent Instruments PCI-20001C carrier board and modules.
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index cbff3b41bb45..201f4f96c182 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/jr3_pci.c
* hardware driver for JR3/PCI force sensor board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: jr3_pci
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 93198abf0ee5..e612cf605700 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ke_counter.c
* Comedi driver for Kolter-Electronic PCI Counter 1 Card
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 15a53204a36a..ee53571a8969 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* me4000.c
* Source code for the Meilhaus ME-4000 board family.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index c0b7a300e428..169742be17b8 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/me_daq.c
* Hardware driver for Meilhaus data acquisition cards:
* ME-2000i, ME-2600i, ME-3000vm1
*
* Copyright (C) 2002 Michael Hillmann <hillmann@syscongroup.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index fbdf181d8ccc..ea430237efa7 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/mf6x4.c
* Driver for Humusoft MF634 and MF624 Data acquisition cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
* Driver: mf6x4
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 05126ba4ba51..61e03ad84123 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/mite.c
* Hardware driver for NI Mite PCI interface chip
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 02a627d3969d..d5e27ac25df8 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* module/mite.h
* Hardware driver for NI Mite PCI interface chip
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MITE_H_
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 9bda761433c2..bf3a3a08c7ab 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mpc624.c
* Hardware driver for a Micro/sys inc. MPC-624 PC/104 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index b5a26f7b4332..c85c9ab3655f 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* multiq3.c
* Hardware driver for Quanser Consulting MultiQ-3 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 84c62e256094..4d1eccb5041d 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ni_6527.c
* Comedi driver for National Instruments PCI-6527
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 07f38e385469..996074e471d3 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ni_65xx.c
* Comedi driver for National Instruments PCI-65xx static dio boards
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6aa755ad3953..e521ed9d0887 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Hardware driver for NI 660x devices
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 1d3ff60efcb8..4e4ae31c8d0b 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI 670x devices
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index c69cd676f357..76e8d047f71e 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for National Instruments AT-A2150 boards
* Copyright (C) 2001, 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 158fa29374fc..aad0b295ee2b 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ni_at_ao.c
* Driver for NI AT-AO-6/10 boards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index ae6ed96d7874..b9e9ab548c4b 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI AT-MIO E series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index fb59b0ffbba6..68ad9676f962 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for National Instruments AT-MIO16D board
* Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 8f6396edd21c..b9e525b9beb9 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_daq_700.c
* Driver for DAQCard-700 DIO/AI
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 733d3fbafa4d..44fb65afc218 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for National Instruments PCMCIA DAQ-Card DIO-24
* Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
@@ -7,16 +8,6 @@
* The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 51e5e942b442..c6cf37ccbc92 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc.c
* Driver for National Instruments Lab-PC series boards and compatibles
* Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index c2edadc7b3c6..f685047efb67 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Header for ni_labpc ISA/PCMCIA/PCI drivers
*
* Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _NI_LABPC_H
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index b0dfb8eed16d..7fa2d39562db 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc_common.c
*
* Common support code for "ni_labpc", "ni_labpc_pci" and "ni_labpc_cs".
*
* Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 3d4d0b9ad4e1..4f7e2fe21254 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for National Instruments daqcard-1200 boards
* Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
@@ -7,16 +8,6 @@
* The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
index 29dbdf5ec25d..5657736a9408 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc_isadma.c
* ISA DMA support for National Instruments Lab-PC series boards and
@@ -5,16 +6,6 @@
*
* Extracted from ni_labpc.c:
* Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index cac089193121..d7d5a7973558 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_labpc_pci.c
* Driver for National Instruments Lab-PC PCI-1200
* Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 398347fedc47..5d610af6799f 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Hardware driver for DAQ-STC based boards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
* Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 21f823179356..4f37b4e58f09 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI PCMCIA MIO E series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index b27345abebe1..6692af5ff79b 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for National Instruments PCI-DIO-32HS
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999,2002 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 3a96913c025e..f9e466d18b3f 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Comedi driver for NI PCI-MIO E series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 61138e86a455..cb9d4c3a1926 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Register descriptions for NI DAQ-STC chip
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 15cb4088467b..ef919b21b7d9 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for NI general purpose counters
*
* Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 2012033414d3..23221cead8ca 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Header file for NI general purpose counter support code (ni_tio.c)
*
* COMEDI - Linux Control and Measurement Device Interface
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_NI_TIO_H
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 4e024eb5656b..f4d99d78208a 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Header file for NI general purpose counter support code (ni_tio.c and
* ni_tiocmd.c)
*
* COMEDI - Linux Control and Measurement Device Interface
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _COMEDI_NI_TIO_INTERNAL_H
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 9007c57544bf..050bee0b9515 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Command support for NI general purpose counters
*
* Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 9a0a96329a55..808ed92ed66f 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/ni_usb6501.c
* Comedi driver for National Instruments USB-6501
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2014 Luca Ellero <luca.ellero@brickedbrain.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 3774daa9d661..a5937206bf1c 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcl711.c
* Comedi driver for PC-LabCard PCL-711 and AdSys ACL-8112 and compatibles
@@ -7,16 +8,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
index 74b07e1744c7..9c174f1f2fcf 100644
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ b/drivers/staging/comedi/drivers/pcl724.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pcl724.c
* Comedi driver for 8255 based ISA and PC/104 DIO boards
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 256850ccb6fa..0963d85873a9 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcl726.c
* Comedi driver for 6/12-Channel D/A Output and DIO cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index ce958eef2a61..3d1e9150e5b5 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/pcl730.c
* Driver for Advantech PCL-730 and clones
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 9c75065dd26a..aefc1b849cf7 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/pcl812.c
*
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index c00a71f538ef..d722079f3327 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pcl816.c
* Comedi driver for Advantech PCL-816 cards
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 5b5df0596ad9..eebb49751713 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* comedi/drivers/pcl818.c
*
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index 588ae5ecec66..5779e005c0cb 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pcm3724.c
* Comedi driver for Advantech PCM-3724 Digital I/O board
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index 12f94fe82f5b..fe5449bb1716 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcmad.c
* Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index d86c5e2cd0c7..33e463b193a1 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcmda12.c
* Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 70ad497dd20b..72af1776f785 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcmmio.c
* Driver for Winsystems PC-104 based multifunction IO board.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2007 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 8ad64f2625fe..743fb226e2e4 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* pcmuio.c
* Comedi driver for Winsystems PC-104 based 48/96-channel DIO boards.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index 2892e6528967..7950d1f57db6 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Definitions for the PLX-9052 PCI interface chip
*
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _PLX9052_H_
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index e23e63a097b5..469a9573acdc 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* plx9080.h
*
@@ -9,11 +10,6 @@
* Written by Krzysztof Halasa <khc@rgstudio.com.pl>
*
* Portions (C) SBE Inc., used by permission.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef __COMEDI_PLX9080_H
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index e00e9c6268ae..bb400e08f0bc 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/rtd520.c
* Comedi driver for Real Time Devices (RTD) PCI4520/DM7520
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2001 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index cd61d2645af4..f7c320c89ee6 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/rti800.c
* Hardware driver for Analog Devices RTI-800/815 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index 6db58fcfd496..c6cf92bfff73 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rti802.c
* Comedi driver for Analog Devices RTI-802 board
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index e226275972c0..5d567ae78f28 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* s526.c
* Sensoray s526 Comedi driver
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c906c9a5d944..0b3cfe934e14 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/s626.c
* Sensoray s626 Comedi driver
@@ -7,16 +8,6 @@
*
* Based on Sensoray Model 626 Linux driver Version 0.2
* Copyright (C) 2002-2004 Sensoray Co., Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -1385,8 +1376,7 @@ static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
jmp_adrs =
(u32)devpriv->rps_buf.physical_base +
(u32)((unsigned long)rps -
- (unsigned long)devpriv->
- rps_buf.logical_base);
+ (unsigned long)devpriv->rps_buf.logical_base);
for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
jmp_adrs += 8; /* Repeat to implement time delay: */
/* Jump to next RPS instruction. */
@@ -1903,9 +1893,9 @@ static int s626_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
+ err |= comedi_check_trigger_arg_min(
+ &cmd->scan_begin_arg,
+ arg);
}
}
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index 4cef45263267..4bdc4fba736f 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/drivers/s626.h
* Sensoray s626 Comedi driver, header file
@@ -7,16 +8,6 @@
*
* Based on Sensoray Model 626 Linux driver Version 0.2
* Copyright (C) 2002-2004 Sensoray Co., Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef S626_H_INCLUDED
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index cc18e25103ca..ab69eeb2c1f1 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* serial2002.c
* Comedi driver for serial connected hardware
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2002 Anders Blomdell <anders.blomdell@control.lth.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -107,6 +98,7 @@ static long serial2002_tty_ioctl(struct file *f, unsigned int op,
static int serial2002_tty_write(struct file *f, unsigned char *buf, int count)
{
loff_t pos = 0;
+
return kernel_write(f, buf, count, &pos);
}
@@ -119,7 +111,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
poll_initwait(&table);
while (1) {
long elapsed;
- int mask;
+ __poll_t mask;
mask = f->f_op->poll(f, &table.pt);
if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index f9f634fd53cf..0628060e42ca 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ssv_dnp.c
* generic comedi driver for SSV Embedded Systems' DIL/Net-PCs
@@ -5,16 +6,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index f4f05d29d30d..de177418190f 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usbdux.c
* Copyright (C) 2003-2014 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -809,7 +800,6 @@ static int usbdux_ao_insn_write(struct comedi_device *dev,
{
struct usbdux_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int val = s->readback[chan];
__le16 *p = (__le16 *)&devpriv->dux_commands[2];
int ret = -EBUSY;
int i;
@@ -825,7 +815,7 @@ static int usbdux_ao_insn_write(struct comedi_device *dev,
devpriv->dux_commands[4] = chan << 6;
for (i = 0; i < insn->n; i++) {
- val = data[i];
+ unsigned int val = data[i];
/* one 16 bit value */
*p = cpu_to_le16(val);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 608403c7586b..e18c0723b760 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 456e9f13becb..af5605a875e2 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usbduxsigma.c
* Copyright (C) 2011-2015 Bernd Porr, mail@berndporr.me.uk
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index a004aed0147a..6234b649d887 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* vmk80xx.c
* Velleman USB Board Low-Level Driver
@@ -6,16 +7,6 @@
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 55d43c076b1c..df9bba1b69ed 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* kcomedilib/kcomedilib.c
* a comedlib interface for kernel modules
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 2644dd4d6143..50d38938ac6f 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* /proc interface for comedi
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index ce3a58a7a171..89d599877445 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* comedi/range.c
* comedi routines for voltage ranges
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/uaccess.h>
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
index 40ff0d007695..49633042fcc9 100644
--- a/drivers/staging/dgnc/Makefile
+++ b/drivers/staging/dgnc/Makefile
@@ -1,5 +1,4 @@
obj-$(CONFIG_DGNC) += dgnc.o
dgnc-objs := dgnc_cls.o dgnc_driver.o\
- dgnc_mgmt.o dgnc_neo.o\
- dgnc_tty.o dgnc_utils.o
+ dgnc_tty.o
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 9639035fddd1..7e6cbfe4e4ee 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -397,7 +388,7 @@ static void cls_assert_modem_signals(struct channel_t *ch)
writeb(out, &ch->ch_cls_uart->mcr);
/* Give time for the UART to actually drop the signals */
- udelay(10);
+ usleep_range(10, 20);
}
static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
@@ -1123,30 +1114,6 @@ static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
writeb(c, &ch->ch_cls_uart->txrx);
}
-static void cls_vpd(struct dgnc_board *brd)
-{
- ulong vpdbase; /* Start of io base of the card */
- u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
- int i = 0;
-
- vpdbase = pci_resource_start(brd->pdev, 3);
- if (!vpdbase)
- return;
-
- re_map_vpdbase = ioremap(vpdbase, 0x400);
-
- if (!re_map_vpdbase)
- return;
-
- for (i = 0; i < 0x40; i++) {
- brd->vpd[i] = readb(re_map_vpdbase + i);
- pr_info("%x ", brd->vpd[i]);
- }
- pr_info("\n");
-
- iounmap(re_map_vpdbase);
-}
-
struct board_ops dgnc_cls_ops = {
.tasklet = cls_tasklet,
.intr = cls_intr,
@@ -1154,7 +1121,6 @@ struct board_ops dgnc_cls_ops = {
.uart_off = cls_uart_off,
.drain = cls_drain,
.param = cls_param,
- .vpd = cls_vpd,
.assert_modem_signals = cls_assert_modem_signals,
.flush_uart_write = cls_flush_uart_write,
.flush_uart_read = cls_flush_uart_read,
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 9dfa9682a897..d31508542261 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#ifndef _DGNC_CLS_H
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index d9bf5da1b8e5..5d8c2d995dcc 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -19,37 +10,34 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include "dgnc_driver.h"
-#include "dgnc_pci.h"
-#include "dgnc_mgmt.h"
#include "dgnc_tty.h"
#include "dgnc_cls.h"
-#include "dgnc_neo.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-static const struct file_operations dgnc_board_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = dgnc_mgmt_ioctl,
- .open = dgnc_mgmt_open,
- .release = dgnc_mgmt_close
-};
-
-uint dgnc_num_boards;
+static unsigned int dgnc_num_boards;
struct dgnc_board *dgnc_board[MAXBOARDS];
-DEFINE_SPINLOCK(dgnc_global_lock);
-DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
-uint dgnc_major;
-int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
-
-static struct class *dgnc_class;
+static DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
+static int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
static ulong dgnc_poll_time; /* Time of next poll */
static uint dgnc_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgnc_poll_timer;
+#define DIGI_VID 0x114F
+#define PCI_DEVICE_CLASSIC_4_DID 0x0028
+#define PCI_DEVICE_CLASSIC_8_DID 0x0029
+#define PCI_DEVICE_CLASSIC_4_422_DID 0x00D0
+#define PCI_DEVICE_CLASSIC_8_422_DID 0x00D1
+
+#define PCI_DEVICE_CLASSIC_4_PCI_NAME "ClassicBoard 4 PCI"
+#define PCI_DEVICE_CLASSIC_8_PCI_NAME "ClassicBoard 8 PCI"
+#define PCI_DEVICE_CLASSIC_4_422_PCI_NAME "ClassicBoard 4 422 PCI"
+#define PCI_DEVICE_CLASSIC_8_422_PCI_NAME "ClassicBoard 8 422 PCI"
+
static const struct pci_device_id dgnc_pci_tbl[] = {
{PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_DID), .driver_data = 0},
{PCI_DEVICE(DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID), .driver_data = 1},
@@ -70,19 +58,6 @@ static const struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
{ PCI_DEVICE_CLASSIC_8_422_PCI_NAME, 8, 0 },
- { PCI_DEVICE_NEO_4_PCI_NAME, 4, 0 },
- { PCI_DEVICE_NEO_8_PCI_NAME, 8, 0 },
- { PCI_DEVICE_NEO_2DB9_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2DB9PRI_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2RJ45_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_1_422_PCI_NAME, 1, 0 },
- { PCI_DEVICE_NEO_1_422_485_PCI_NAME, 1, 0 },
- { PCI_DEVICE_NEO_2_422_485_PCI_NAME, 2, 0 },
- { PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME, 8, 1 },
- { PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME, 4, 1 },
- { PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME, 4, 1 },
- { PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME, 8, 1 },
{ NULL, 0, 0 }
};
@@ -101,7 +76,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
{
struct dgnc_board *brd;
unsigned int pci_irq;
- int i = 0;
int rc = 0;
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
@@ -110,16 +84,10 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
/* store the info for the board we've found */
brd->boardnum = dgnc_num_boards;
- brd->vendor = dgnc_pci_tbl[id].vendor;
brd->device = dgnc_pci_tbl[id].device;
brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
brd->name = dgnc_ids[id].name;
brd->maxports = dgnc_ids[id].maxports;
- if (dgnc_ids[i].is_pci_express)
- brd->bd_flags |= BD_IS_PCI_EXPRESS;
- brd->dpastatus = BD_NOFEP;
init_waitqueue_head(&brd->state_wait);
spin_lock_init(&brd->bd_lock);
@@ -127,11 +95,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
brd->state = BOARD_FOUND;
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
pci_irq = pdev->irq;
brd->irq = pci_irq;
@@ -140,9 +103,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
case PCI_DEVICE_CLASSIC_8_DID:
case PCI_DEVICE_CLASSIC_4_422_DID:
case PCI_DEVICE_CLASSIC_8_422_DID:
-
- brd->dpatype = T_CLASSIC | T_PCIBUS;
-
/*
* For PCI ClassicBoards
* PCI Local Address (i.e. "resource" number) space
@@ -182,9 +142,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
if (rc < 0)
goto failed;
- /* Get and store the board VPD, if it exists */
- brd->bd_ops->vpd(brd);
-
/*
* Enable Local Interrupt 1 (0x1),
* Local Interrupt 1 Polarity Active high (0x2),
@@ -194,55 +151,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
break;
- case PCI_DEVICE_NEO_4_DID:
- case PCI_DEVICE_NEO_8_DID:
- case PCI_DEVICE_NEO_2DB9_DID:
- case PCI_DEVICE_NEO_2DB9PRI_DID:
- case PCI_DEVICE_NEO_2RJ45_DID:
- case PCI_DEVICE_NEO_2RJ45PRI_DID:
- case PCI_DEVICE_NEO_1_422_DID:
- case PCI_DEVICE_NEO_1_422_485_DID:
- case PCI_DEVICE_NEO_2_422_485_DID:
- case PCI_DEVICE_NEO_EXPRESS_8_DID:
- case PCI_DEVICE_NEO_EXPRESS_4_DID:
- case PCI_DEVICE_NEO_EXPRESS_4RJ45_DID:
- case PCI_DEVICE_NEO_EXPRESS_8RJ45_DID:
-
- /*
- * This chip is set up 100% when we get to it.
- * No need to enable global interrupts or anything.
- */
- if (brd->bd_flags & BD_IS_PCI_EXPRESS)
- brd->dpatype = T_NEO_EXPRESS | T_PCIBUS;
- else
- brd->dpatype = T_NEO | T_PCIBUS;
-
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- brd->bd_ops = &dgnc_neo_ops;
-
- brd->bd_uart_offset = 0x200;
- brd->bd_dividend = 921600;
-
- rc = dgnc_do_remap(brd);
-
- if (rc < 0)
- goto failed;
-
- /* Read and store the dvid after remapping */
- brd->dvid = readb(brd->re_map_membase + 0x8D);
-
- /* Get and store the board VPD, if it exists */
- brd->bd_ops->vpd(brd);
-
- break;
-
default:
dev_err(&brd->pdev->dev,
"Didn't find any compatible Neo/Classic PCI boards.\n");
@@ -273,7 +181,6 @@ static int dgnc_request_irq(struct dgnc_board *brd)
dev_err(&brd->pdev->dev,
"Failed to hook IRQ %d\n", brd->irq);
brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
return -ENODEV;
}
}
@@ -364,7 +271,6 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
dgnc_board[dgnc_num_boards++] = brd;
@@ -388,32 +294,7 @@ static struct pci_driver dgnc_driver = {
static int dgnc_start(void)
{
- int rc = 0;
unsigned long flags;
- struct device *dev;
-
- rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
- return rc;
- }
- dgnc_major = rc;
-
- dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
- if (IS_ERR(dgnc_class)) {
- rc = PTR_ERR(dgnc_class);
- pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
- goto failed_class;
- }
-
- dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_major, 0),
- NULL, "dgnc_mgmt");
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- pr_err(DRVSTR ": Can't create device (%d)\n", rc);
- goto failed_device;
- }
/* Start the poller */
spin_lock_irqsave(&dgnc_poll_lock, flags);
@@ -425,13 +306,6 @@ static int dgnc_start(void)
add_timer(&dgnc_poll_timer);
return 0;
-
-failed_device:
- class_destroy(dgnc_class);
-failed_class:
- unregister_chrdev(dgnc_major, "dgnc");
-
- return rc;
}
/* Free all the memory associated with a board */
@@ -495,10 +369,6 @@ static void cleanup(void)
/* Turn off poller right away. */
del_timer_sync(&dgnc_poll_timer);
- device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
- class_destroy(dgnc_class);
- unregister_chrdev(dgnc_major, "dgnc");
-
for (i = 0; i < dgnc_num_boards; ++i) {
dgnc_cleanup_tty(dgnc_board[i]);
dgnc_cleanup_board(dgnc_board[i]);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index efdb11a5e27f..b4d9f714c60a 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#ifndef _DGNC_DRIVER_H
@@ -82,42 +73,27 @@ struct board_ops {
void (*uart_off)(struct channel_t *ch);
int (*drain)(struct tty_struct *tty, uint seconds);
void (*param)(struct tty_struct *tty);
- void (*vpd)(struct dgnc_board *brd);
void (*assert_modem_signals)(struct channel_t *ch);
void (*flush_uart_write)(struct channel_t *ch);
void (*flush_uart_read)(struct channel_t *ch);
void (*disable_receiver)(struct channel_t *ch);
void (*enable_receiver)(struct channel_t *ch);
- void (*send_break)(struct channel_t *ch, int);
+ void (*send_break)(struct channel_t *ch, int msec);
void (*send_start_character)(struct channel_t *ch);
void (*send_stop_character)(struct channel_t *ch);
void (*copy_data_from_queue_to_uart)(struct channel_t *ch);
uint (*get_uart_bytes_left)(struct channel_t *ch);
- void (*send_immediate_char)(struct channel_t *ch, unsigned char);
+ void (*send_immediate_char)(struct channel_t *ch, unsigned char c);
};
-/* Device flag definitions for bd_flags. */
-
-#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-
/**
* struct dgnc_board - Per board information.
* @boardnum: Board number (0 - 32).
*
* @name: Product name.
* @pdev: Pointer to the pci_dev structure.
- * @bd_flags: Board flags.
- * @vendor: PCI vendor ID.
* @device: PCI device ID.
- * @subvendor: PCI subsystem vendor ID.
- * @subdevice: PCI subsystem device ID.
- * @rev: PCI revision ID.
- * @pci_bus: PCI bus value.
- * @pci_slot: PCI slot value.
* @maxports: Maximum ports this board can handle.
- * @dvid: Board specific device ID.
- * @vpd: VPD of this board, if found.
- * @serial_num: Serial number of this board, if found in VPD.
* @bd_lock: Used to protect board.
* @bd_intr_lock: Protect poller tasklet and interrupt routine from each other.
* @state: State of the card.
@@ -135,8 +111,6 @@ struct board_ops {
* @serial_name: Serial driver name.
* @print_dirver: Pointer to the print driver.
* @print_name: Print driver name.
- * @dpatype: Board type as defined by DPA.
- * @dpastatus: Board status as defined by DPA.
* @bd_dividend: Board/UART's specific dividend.
* @bd_ops: Pointer to board operations structure.
*/
@@ -144,18 +118,8 @@ struct dgnc_board {
int boardnum;
char *name;
struct pci_dev *pdev;
- unsigned long bd_flags;
- u16 vendor;
u16 device;
- u16 subvendor;
- u16 subdevice;
- unsigned char rev;
- uint pci_bus;
- uint pci_slot;
uint maxports;
- unsigned char dvid;
- unsigned char vpd[128];
- unsigned char serial_num[20];
/* used to protect the board */
spinlock_t bd_lock;
@@ -189,9 +153,6 @@ struct dgnc_board {
struct tty_driver *print_driver;
char print_name[200];
- u16 dpatype;
- u16 dpastatus;
-
uint bd_dividend;
struct board_ops *bd_ops;
@@ -285,7 +246,6 @@ struct un_t {
* @ch_wopen: Waiting for open process count.
* @ch_mostat: FEP output modem status.
* @ch_mistat: FEP input modem status.
- * @chc_neo_uart: Pointer to the mapped neo UART struct
* @ch_cls_uart: Pointer to the mapped cls UART struct
* @ch_cached_lsr: Cached value of the LSR register.
* @ch_rqueue: Read queue buffer, malloc'ed.
@@ -344,7 +304,6 @@ struct channel_t {
unsigned char ch_mostat;
unsigned char ch_mistat;
- struct neo_uart_struct __iomem *ch_neo_uart;
struct cls_uart_struct __iomem *ch_cls_uart;
unsigned char ch_cached_lsr;
@@ -381,11 +340,6 @@ struct channel_t {
ulong ch_xoff_sends;
};
-extern uint dgnc_major; /* Our driver/mgmt major */
-extern int dgnc_poll_tick; /* Poll interval - 20 ms */
-extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
-extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
-extern uint dgnc_num_boards; /* Total number of boards */
extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of boards */
#endif /* _DGNC_DRIVER_H */
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
deleted file mode 100644
index 3ca473b47daf..000000000000
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-/*
- * This file implements the mgmt functionality for the
- * Neo and ClassicBoard based product lines.
- */
-
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/serial_reg.h>
-#include <linux/termios.h>
-#include <linux/uaccess.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_pci.h"
-#include "dgnc_mgmt.h"
-
-/* Our "in use" variables, to enforce 1 open only */
-static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
-
-/**
- * dgnc_mgmt_open() - Open the mgmt/downld/dpa device.
- */
-int dgnc_mgmt_open(struct inode *inode, struct file *file)
-{
- unsigned long flags;
- unsigned int minor = iminor(inode);
- int rc = 0;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- if (minor >= MAXMGMTDEVICES) {
- rc = -ENXIO;
- goto out;
- }
- /* Only allow 1 open at a time on mgmt device */
- if (dgnc_mgmt_in_use[minor]) {
- rc = -EBUSY;
- goto out;
- }
- dgnc_mgmt_in_use[minor]++;
-
-out:
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- return rc;
-}
-
-/**
- * dgnc_mgmt_close() - Close the mgmt/dpa device
- */
-int dgnc_mgmt_close(struct inode *inode, struct file *file)
-{
- unsigned long flags;
- unsigned int minor = iminor(inode);
- int rc = 0;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- if (minor >= MAXMGMTDEVICES) {
- rc = -ENXIO;
- goto out;
- }
- dgnc_mgmt_in_use[minor] = 0;
-
-out:
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- return rc;
-}
-
-/**
- * dgnc_mgmt_ioctl() - Ioctl the mgmt/dpa device.
- */
-long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- unsigned long flags;
- void __user *uarg = (void __user *)arg;
-
- switch (cmd) {
- case DIGI_GETDD:
- {
- /*
- * This returns the total number of boards
- * in the system, as well as driver version
- * and has space for a reserved entry
- */
- struct digi_dinfo ddi;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
-
- memset(&ddi, 0, sizeof(ddi));
- ddi.dinfo_nboards = dgnc_num_boards;
- sprintf(ddi.dinfo_version, "%s", DG_PART);
-
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
- if (copy_to_user(uarg, &ddi, sizeof(ddi)))
- return -EFAULT;
-
- break;
- }
-
- case DIGI_GETBD:
- {
- int brd;
-
- struct digi_info di;
-
- if (copy_from_user(&brd, uarg, sizeof(int)))
- return -EFAULT;
-
- if (brd < 0 || brd >= dgnc_num_boards)
- return -ENODEV;
-
- memset(&di, 0, sizeof(di));
-
- di.info_bdnum = brd;
-
- spin_lock_irqsave(&dgnc_board[brd]->bd_lock, flags);
-
- di.info_bdtype = dgnc_board[brd]->dpatype;
- di.info_bdstate = dgnc_board[brd]->dpastatus;
- di.info_ioport = 0;
- di.info_physaddr = (ulong)dgnc_board[brd]->membase;
- di.info_physsize = (ulong)dgnc_board[brd]->membase
- - dgnc_board[brd]->membase_end;
- if (dgnc_board[brd]->state != BOARD_FAILED)
- di.info_nports = dgnc_board[brd]->nasync;
- else
- di.info_nports = 0;
-
- spin_unlock_irqrestore(&dgnc_board[brd]->bd_lock, flags);
-
- if (copy_to_user(uarg, &di, sizeof(di)))
- return -EFAULT;
-
- break;
- }
-
- case DIGI_GET_NI_INFO:
- {
- struct channel_t *ch;
- struct ni_info ni;
- unsigned char mstat = 0;
- uint board = 0;
- uint channel = 0;
-
- if (copy_from_user(&ni, uarg, sizeof(ni)))
- return -EFAULT;
-
- board = ni.board;
- channel = ni.channel;
-
- if (board >= dgnc_num_boards)
- return -ENODEV;
-
- if (channel >= dgnc_board[board]->nasync)
- return -ENODEV;
-
- ch = dgnc_board[board]->channels[channel];
-
- if (!ch)
- return -ENODEV;
-
- memset(&ni, 0, sizeof(ni));
- ni.board = board;
- ni.channel = channel;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- mstat = ch->ch_mostat | ch->ch_mistat;
-
- if (mstat & UART_MCR_DTR) {
- ni.mstat |= TIOCM_DTR;
- ni.dtr = TIOCM_DTR;
- }
- if (mstat & UART_MCR_RTS) {
- ni.mstat |= TIOCM_RTS;
- ni.rts = TIOCM_RTS;
- }
- if (mstat & UART_MSR_CTS) {
- ni.mstat |= TIOCM_CTS;
- ni.cts = TIOCM_CTS;
- }
- if (mstat & UART_MSR_RI) {
- ni.mstat |= TIOCM_RI;
- ni.ri = TIOCM_RI;
- }
- if (mstat & UART_MSR_DCD) {
- ni.mstat |= TIOCM_CD;
- ni.dcd = TIOCM_CD;
- }
- if (mstat & UART_MSR_DSR)
- ni.mstat |= TIOCM_DSR;
-
- ni.iflag = ch->ch_c_iflag;
- ni.oflag = ch->ch_c_oflag;
- ni.cflag = ch->ch_c_cflag;
- ni.lflag = ch->ch_c_lflag;
-
- if (ch->ch_digi.digi_flags & CTSPACE ||
- ch->ch_c_cflag & CRTSCTS)
- ni.hflow = 1;
- else
- ni.hflow = 0;
-
- if ((ch->ch_flags & CH_STOPI) ||
- (ch->ch_flags & CH_FORCED_STOPI))
- ni.recv_stopped = 1;
- else
- ni.recv_stopped = 0;
-
- if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
- ni.xmit_stopped = 1;
- else
- ni.xmit_stopped = 0;
-
- ni.curtx = ch->ch_txcount;
- ni.currx = ch->ch_rxcount;
-
- ni.baud = ch->ch_old_baud;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- if (copy_to_user(uarg, &ni, sizeof(ni)))
- return -EFAULT;
-
- break;
- }
- }
- return 0;
-}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.h b/drivers/staging/dgnc/dgnc_mgmt.h
deleted file mode 100644
index a7a5770d7630..000000000000
--- a/drivers/staging/dgnc/dgnc_mgmt.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef _DGNC_MGMT_H
-#define _DGNC_MGMT_H
-
-#define MAXMGMTDEVICES 8
-
-int dgnc_mgmt_open(struct inode *inode, struct file *file);
-int dgnc_mgmt_close(struct inode *inode, struct file *file);
-long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-
-#endif /* _DGNC_MGMT_H */
-
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
deleted file mode 100644
index 0ae229c3aaaa..000000000000
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ /dev/null
@@ -1,1690 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_neo.h"
-#include "dgnc_tty.h"
-
-static inline void neo_parse_lsr(struct dgnc_board *brd, uint port);
-static inline void neo_parse_isr(struct dgnc_board *brd, uint port);
-static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
-static inline void neo_clear_break(struct channel_t *ch, int force);
-static inline void neo_set_cts_flow_control(struct channel_t *ch);
-static inline void neo_set_rts_flow_control(struct channel_t *ch);
-static inline void neo_set_ixon_flow_control(struct channel_t *ch);
-static inline void neo_set_ixoff_flow_control(struct channel_t *ch);
-static inline void neo_set_no_output_flow_control(struct channel_t *ch);
-static inline void neo_set_no_input_flow_control(struct channel_t *ch);
-static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
-static void neo_parse_modem(struct channel_t *ch, unsigned char signals);
-static void neo_tasklet(unsigned long data);
-static void neo_vpd(struct dgnc_board *brd);
-static void neo_uart_init(struct channel_t *ch);
-static void neo_uart_off(struct channel_t *ch);
-static int neo_drain(struct tty_struct *tty, uint seconds);
-static void neo_param(struct tty_struct *tty);
-static void neo_assert_modem_signals(struct channel_t *ch);
-static void neo_flush_uart_write(struct channel_t *ch);
-static void neo_flush_uart_read(struct channel_t *ch);
-static void neo_disable_receiver(struct channel_t *ch);
-static void neo_enable_receiver(struct channel_t *ch);
-static void neo_send_break(struct channel_t *ch, int msecs);
-static void neo_send_start_character(struct channel_t *ch);
-static void neo_send_stop_character(struct channel_t *ch);
-static void neo_copy_data_from_queue_to_uart(struct channel_t *ch);
-static uint neo_get_uart_bytes_left(struct channel_t *ch);
-static void neo_send_immediate_char(struct channel_t *ch, unsigned char c);
-static irqreturn_t neo_intr(int irq, void *voidbrd);
-
-struct board_ops dgnc_neo_ops = {
- .tasklet = neo_tasklet,
- .intr = neo_intr,
- .uart_init = neo_uart_init,
- .uart_off = neo_uart_off,
- .drain = neo_drain,
- .param = neo_param,
- .vpd = neo_vpd,
- .assert_modem_signals = neo_assert_modem_signals,
- .flush_uart_write = neo_flush_uart_write,
- .flush_uart_read = neo_flush_uart_read,
- .disable_receiver = neo_disable_receiver,
- .enable_receiver = neo_enable_receiver,
- .send_break = neo_send_break,
- .send_start_character = neo_send_start_character,
- .send_stop_character = neo_send_stop_character,
- .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = neo_get_uart_bytes_left,
- .send_immediate_char = neo_send_immediate_char
-};
-
-/*
- * This function allows calls to ensure that all outstanding
- * PCI writes have been completed, by doing a PCI read against
- * a non-destructive, read-only location on the Neo card.
- *
- * In this case, we are reading the DVID (Read-only Device Identification)
- * value of the Neo card.
- */
-static inline void neo_pci_posting_flush(struct dgnc_board *bd)
-{
- readb(bd->re_map_membase + 0x8D);
-}
-
-static inline void neo_set_cts_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn on auto CTS flow control */
- ier |= UART_17158_IER_CTSDSR;
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
-
- /* Turn off auto Xon flow control */
- efr &= ~UART_17158_EFR_IXON;
-
- /*
- * Why? Because Exar's spec says we have to zero it
- * out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
- &ch->ch_neo_uart->fctr);
-
- /* Feed the UART our trigger levels */
- writeb(8, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 8;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static inline void neo_set_rts_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn on auto RTS flow control */
- ier |= UART_17158_IER_RTSDTR;
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
-
- /* Turn off auto Xoff flow control */
- ier &= ~UART_17158_IER_XOFF;
- efr &= ~UART_17158_EFR_IXOFF;
-
- /*
- * Why? Because Exar's spec says we have to zero it
- * out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
- &ch->ch_neo_uart->fctr);
- ch->ch_r_watermark = 4;
-
- writeb(32, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 32;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- /*
- * From the Neo UART spec sheet:
- * The auto RTS/DTR function must be started by asserting
- * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after
- * it is enabled.
- */
- ch->ch_mostat |= UART_MCR_RTS;
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static inline void neo_set_ixon_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto CTS flow control */
- ier &= ~UART_17158_IER_CTSDSR;
- efr &= ~UART_17158_EFR_CTSDSR;
-
- /* Turn on auto Xon flow control */
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
-
- /*
- * Why? Because Exar's spec says we have to zero it
- * out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
- &ch->ch_neo_uart->fctr);
- ch->ch_r_watermark = 4;
-
- writeb(32, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 32;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto RTS flow control */
- ier &= ~UART_17158_IER_RTSDTR;
- efr &= ~UART_17158_EFR_RTSDTR;
-
- /* Turn on auto Xoff flow control */
- ier |= UART_17158_IER_XOFF;
- efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
-
- /*
- * Why? Because Exar's spec says we have to zero it
- * out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
- &ch->ch_neo_uart->fctr);
-
- writeb(8, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 8;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static inline void neo_set_no_input_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto RTS flow control */
- ier &= ~UART_17158_IER_RTSDTR;
- efr &= ~UART_17158_EFR_RTSDTR;
-
- /* Turn off auto Xoff flow control */
- ier &= ~UART_17158_IER_XOFF;
- if (ch->ch_c_iflag & IXON)
- efr &= ~(UART_17158_EFR_IXOFF);
- else
- efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
-
- /*
- * Why? Because Exar's spec says we have to zero
- * it out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
- &ch->ch_neo_uart->fctr);
-
- ch->ch_r_watermark = 0;
-
- writeb(16, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 16;
-
- writeb(16, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 16;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static inline void neo_set_no_output_flow_control(struct channel_t *ch)
-{
- unsigned char ier = readb(&ch->ch_neo_uart->ier);
- unsigned char efr = readb(&ch->ch_neo_uart->efr);
-
- /* Turn off auto CTS flow control */
- ier &= ~UART_17158_IER_CTSDSR;
- efr &= ~UART_17158_EFR_CTSDSR;
-
- /* Turn off auto Xon flow control */
- if (ch->ch_c_iflag & IXOFF)
- efr &= ~UART_17158_EFR_IXON;
- else
- efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
-
- /*
- * Why? Because Exar's spec says we have to zero it
- * out before setting it
- */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Turn on UART enhanced bits */
- writeb(efr, &ch->ch_neo_uart->efr);
-
- /* Turn on table D, with 8 char hi/low watermarks */
- writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
- &ch->ch_neo_uart->fctr);
-
- ch->ch_r_watermark = 0;
-
- writeb(16, &ch->ch_neo_uart->tfifo);
- ch->ch_t_tlevel = 16;
-
- writeb(16, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 16;
-
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-/* change UARTs start/stop chars */
-static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
-{
- /* if hardware flow control is set, then skip this whole thing */
- if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) ||
- ch->ch_c_cflag & CRTSCTS)
- return;
-
- /* Tell UART what start/stop chars it should be looking for */
- writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
- writeb(0, &ch->ch_neo_uart->xonchar2);
-
- writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
- writeb(0, &ch->ch_neo_uart->xoffchar2);
-
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-/* No locks are assumed to be held when calling this function. */
-static inline void neo_clear_break(struct channel_t *ch, int force)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (!ch->ch_stop_sending_break) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return;
- }
-
- if (ch->ch_flags & CH_BREAK_SENDING) {
- if (force ||
- time_after_eq(jiffies, ch->ch_stop_sending_break)) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- }
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/* Parse the ISR register. */
-static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
-{
- struct channel_t *ch;
- unsigned char isr;
- unsigned char cause;
- unsigned long flags;
-
- ch = brd->channels[port];
- if (!ch)
- return;
-
- /* Here we try to figure out what caused the interrupt to happen */
- while (1) {
- isr = readb(&ch->ch_neo_uart->isr_fcr);
-
- if (isr & UART_IIR_NO_INT)
- break;
-
- /*
- * Yank off the upper 2 bits,
- * which just show that the FIFO's are enabled.
- */
- isr &= ~(UART_17158_IIR_FIFO_ENABLED);
-
- if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
- /* Read data from uart -> queue */
- neo_copy_data_from_uart_to_queue(ch);
- /*
- * Call our tty layer to enforce queue
- * flow control if needed.
- */
- spin_lock_irqsave(&ch->ch_lock, flags);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- }
-
- if (isr & UART_IIR_THRI) {
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- neo_copy_data_from_queue_to_uart(ch);
- }
-
- if (isr & UART_17158_IIR_XONXOFF) {
- cause = readb(&ch->ch_neo_uart->xoffchar1);
-
- /*
- * Since the UART detected either an XON or
- * XOFF match, we need to figure out which
- * one it was, so we can suspend or resume data flow.
- */
- if (cause == UART_17158_XON_DETECT) {
- /* resume output if stopped */
- if (brd->channels[port]->ch_flags & CH_STOP) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_flags &= ~(CH_STOP);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- } else if (cause == UART_17158_XOFF_DETECT) {
- if (!(brd->channels[port]->ch_flags &
- CH_STOP)) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_flags |= CH_STOP;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- }
- }
-
- if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
- /*
- * If we get here, this means the hardware is
- * doing auto flow control. Check to see whether
- * RTS/DTR or CTS/DSR caused this interrupt.
- */
- cause = readb(&ch->ch_neo_uart->mcr);
- /* Which pin is doing auto flow? RTS or DTR? */
- if ((cause & 0x4) == 0) {
- if (cause & UART_MCR_RTS) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat |= UART_MCR_RTS;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- } else {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat &= ~(UART_MCR_RTS);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- } else {
- if (cause & UART_MCR_DTR) {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat |= UART_MCR_DTR;
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- } else {
- spin_lock_irqsave(&ch->ch_lock,
- flags);
- ch->ch_mostat &= ~(UART_MCR_DTR);
- spin_unlock_irqrestore(&ch->ch_lock,
- flags);
- }
- }
- }
-
- neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
- }
-}
-
-static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
-{
- struct channel_t *ch;
- int linestatus;
- unsigned long flags;
-
- if (!brd)
- return;
-
- if (port >= brd->maxports)
- return;
-
- ch = brd->channels[port];
- if (!ch)
- return;
-
- linestatus = readb(&ch->ch_neo_uart->lsr);
-
- ch->ch_cached_lsr |= linestatus;
-
- if (ch->ch_cached_lsr & UART_LSR_DR) {
- neo_copy_data_from_uart_to_queue(ch);
- spin_lock_irqsave(&ch->ch_lock, flags);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- }
-
- /*
- * The next 3 tests should *NOT* happen, as the above test
- * should encapsulate all 3... At least, thats what Exar says.
- */
-
- if (linestatus & UART_LSR_PE)
- ch->ch_err_parity++;
-
- if (linestatus & UART_LSR_FE)
- ch->ch_err_frame++;
-
- if (linestatus & UART_LSR_BI)
- ch->ch_err_break++;
-
- if (linestatus & UART_LSR_OE) {
- /*
- * Rx Oruns. Exar says that an orun will NOT corrupt
- * the FIFO. It will just replace the holding register
- * with this new data byte. So basically just ignore this.
- * Probably we should eventually have an orun stat in our
- * driver...
- */
- ch->ch_err_overrun++;
- }
-
- if (linestatus & UART_LSR_THRE) {
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- neo_copy_data_from_queue_to_uart(ch);
- } else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
- spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- neo_copy_data_from_queue_to_uart(ch);
- }
-}
-
-/* Send any/all changes to the line to the UART. */
-static void neo_param(struct tty_struct *tty)
-{
- unsigned char lcr = 0;
- unsigned char uart_lcr = 0;
- unsigned char ier = 0;
- unsigned char uart_ier = 0;
- uint baud = 9600;
- int quot = 0;
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty)
- return;
-
- un = (struct un_t *)tty->driver_data;
- if (!un)
- return;
-
- ch = un->un_ch;
- if (!ch)
- return;
-
- bd = ch->ch_bd;
- if (!bd)
- return;
-
- /* If baud rate is zero, flush queues, and set mval to drop DTR. */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
- ch->ch_r_head = 0;
- ch->ch_r_tail = 0;
- ch->ch_e_head = 0;
- ch->ch_e_tail = 0;
- ch->ch_w_head = 0;
- ch->ch_w_tail = 0;
-
- neo_flush_uart_write(ch);
- neo_flush_uart_read(ch);
-
- /* The baudrate is B0 so all modem lines are to be dropped. */
- ch->ch_flags |= (CH_BAUD0);
- ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- ch->ch_old_baud = 0;
- return;
-
- } else if (ch->ch_custom_speed) {
- baud = ch->ch_custom_speed;
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
- } else {
- int iindex = 0;
- int jindex = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 131657, 153600, 230400, 460800,
- 921600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the terminal unit
- * is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- (un->un_type == DGNC_PRINT))
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) &&
- (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
-
- /*
- * Bring back up RTS and DTR...
- * Also handle RTS or DTR toggle if set.
- */
- if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
- ch->ch_mostat |= (UART_MCR_RTS);
- if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
- ch->ch_mostat |= (UART_MCR_DTR);
- }
- }
-
- if (ch->ch_c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
-
- if (!(ch->ch_c_cflag & PARODD))
- lcr |= UART_LCR_EPAR;
-
-#ifdef CMSPAR
- if (ch->ch_c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-#endif
-
- if (ch->ch_c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
-
- switch (ch->ch_c_cflag & CSIZE) {
- case CS5:
- lcr |= UART_LCR_WLEN5;
- break;
- case CS6:
- lcr |= UART_LCR_WLEN6;
- break;
- case CS7:
- lcr |= UART_LCR_WLEN7;
- break;
- case CS8:
- default:
- lcr |= UART_LCR_WLEN8;
- break;
- }
-
- uart_ier = readb(&ch->ch_neo_uart->ier);
- ier = uart_ier;
-
- uart_lcr = readb(&ch->ch_neo_uart->lcr);
-
- if (baud == 0)
- baud = 9600;
-
- quot = ch->ch_bd->bd_dividend / baud;
-
- if (quot != 0 && ch->ch_old_baud != baud) {
- ch->ch_old_baud = baud;
- writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
- writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
- writeb((quot >> 8), &ch->ch_neo_uart->ier);
- writeb(lcr, &ch->ch_neo_uart->lcr);
- }
-
- if (uart_lcr != lcr)
- writeb(lcr, &ch->ch_neo_uart->lcr);
-
- if (ch->ch_c_cflag & CREAD)
- ier |= (UART_IER_RDI | UART_IER_RLSI);
- else
- ier &= ~(UART_IER_RDI | UART_IER_RLSI);
-
- /*
- * Have the UART interrupt on modem signal changes ONLY when
- * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
- */
- if ((ch->ch_digi.digi_flags & CTSPACE) ||
- (ch->ch_digi.digi_flags & RTSPACE) ||
- (ch->ch_c_cflag & CRTSCTS) ||
- !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
- !(ch->ch_c_cflag & CLOCAL))
- ier |= UART_IER_MSI;
- else
- ier &= ~UART_IER_MSI;
-
- ier |= UART_IER_THRI;
-
- if (ier != uart_ier)
- writeb(ier, &ch->ch_neo_uart->ier);
-
- neo_set_new_start_stop_chars(ch);
-
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
- neo_set_cts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXON) {
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE))
- neo_set_no_output_flow_control(ch);
- else
- neo_set_ixon_flow_control(ch);
- } else {
- neo_set_no_output_flow_control(ch);
- }
-
- if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
- neo_set_rts_flow_control(ch);
- } else if (ch->ch_c_iflag & IXOFF) {
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE))
- neo_set_no_input_flow_control(ch);
- else
- neo_set_ixoff_flow_control(ch);
- } else {
- neo_set_no_input_flow_control(ch);
- }
-
- /*
- * Adjust the RX FIFO Trigger level if baud is less than 9600.
- * Not exactly elegant, but this is needed because of the Exar chip's
- * delay on firing off the RX FIFO interrupt on slower baud rates.
- */
- if (baud < 9600) {
- writeb(1, &ch->ch_neo_uart->rfifo);
- ch->ch_r_tlevel = 1;
- }
-
- neo_assert_modem_signals(ch);
-
- neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
-}
-
-/* Board poller function. */
-static void neo_tasklet(unsigned long data)
-{
- struct dgnc_board *bd = (struct dgnc_board *)data;
- struct channel_t *ch;
- unsigned long flags;
- int i;
- int state = 0;
- int ports = 0;
-
- if (!bd)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, flags);
- state = bd->state;
- ports = bd->nasync;
- spin_unlock_irqrestore(&bd->bd_lock, flags);
-
- /*
- * Do NOT allow the interrupt routine to read the intr registers
- * Until we release this lock.
- */
- spin_lock_irqsave(&bd->bd_intr_lock, flags);
-
- if ((state == BOARD_READY) && (ports > 0)) {
- for (i = 0; i < ports; i++) {
- ch = bd->channels[i];
- if (!ch)
- continue;
-
- /*
- * NOTE: Remember you CANNOT hold any channel
- * locks when calling the input routine.
- *
- * During input processing, its possible we
- * will call the Linux ld, which might in turn,
- * do a callback right back into us, resulting
- * in us trying to grab the channel lock twice!
- */
- dgnc_input(ch);
-
- /*
- * Channel lock is grabbed and then released
- * inside both of these routines, but neither
- * call anything else that could call back into us.
- */
- neo_copy_data_from_queue_to_uart(ch);
- dgnc_wakeup_writes(ch);
-
- dgnc_carrier(ch);
-
- /*
- * Check to see if we need to turn off a sending break.
- * The timing check is done inside clear_break()
- */
- if (ch->ch_stop_sending_break)
- neo_clear_break(ch, 0);
- }
- }
-
- spin_unlock_irqrestore(&bd->bd_intr_lock, flags);
-}
-
-/* Neo specific interrupt handler. */
-static irqreturn_t neo_intr(int irq, void *voidbrd)
-{
- struct dgnc_board *brd = voidbrd;
- struct channel_t *ch;
- int port = 0;
- int type;
- u32 uart_poll;
- unsigned long flags;
- unsigned long flags2;
-
- if (!brd)
- return IRQ_NONE;
-
- /* Lock out the slow poller from running on this board. */
- spin_lock_irqsave(&brd->bd_intr_lock, flags);
-
- /*
- * Read in "extended" IRQ information from the 32bit Neo register.
- * Bits 0-7: What port triggered the interrupt.
- * Bits 8-31: Each 3bits indicate what type of interrupt occurred.
- */
- uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET);
-
- /*
- * If 0, no interrupts pending.
- * This can happen if the IRQ is shared among a couple Neo/Classic
- * boards.
- */
- if (!uart_poll) {
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
- return IRQ_NONE;
- }
-
- while ((uart_poll & 0xff) != 0) {
- type = uart_poll >> (8 + (port * 3));
- type &= 0x7;
-
- uart_poll &= ~(0x01 << port);
-
- switch (type) {
- case UART_17158_RXRDY_TIMEOUT:
- /*
- * RXRDY Time-out is cleared by reading data in the
- * RX FIFO until it falls below the trigger level.
- */
-
- if (port >= brd->nasync)
- break;
-
- ch = brd->channels[port];
- neo_copy_data_from_uart_to_queue(ch);
-
- /*
- * Call our tty layer to enforce queue flow control if
- * needed.
- */
- spin_lock_irqsave(&ch->ch_lock, flags2);
- dgnc_check_queue_flow_control(ch);
- spin_unlock_irqrestore(&ch->ch_lock, flags2);
-
- break;
-
- case UART_17158_RX_LINE_STATUS:
-
- /* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */
-
- neo_parse_lsr(brd, port);
- break;
-
- case UART_17158_TXRDY:
- /*
- * TXRDY interrupt clears after reading ISR register
- * for the UART channel.
- */
-
- /*
- * Yes, this is odd...
- * Why would I check EVERY possibility of type of
- * interrupt, when we know its TXRDY???
- * Becuz for some reason, even tho we got triggered for
- * TXRDY, it seems to be occasionally wrong. Instead of
- * TX, which it should be, I was getting things like
- * RXDY too. Weird.
- */
- neo_parse_isr(brd, port);
- break;
-
- case UART_17158_MSR:
- /* MSR or flow control was seen. */
- neo_parse_isr(brd, port);
- break;
-
- default:
- break;
- }
-
- port++;
- }
-
- tasklet_schedule(&brd->helper_tasklet);
-
- spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Neo specific way of turning off the receiver.
- * Used as a way to enforce queue flow control when in
- * hardware flow control mode.
- */
-static void neo_disable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_neo_uart->ier);
-
- tmp &= ~(UART_IER_RDI);
- writeb(tmp, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-/*
- * Neo specific way of turning on the receiver.
- * Used as a way to un-enforce queue flow control when in
- * hardware flow control mode.
- */
-static void neo_enable_receiver(struct channel_t *ch)
-{
- unsigned char tmp = readb(&ch->ch_neo_uart->ier);
-
- tmp |= (UART_IER_RDI);
- writeb(tmp, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
-{
- int qleft = 0;
- unsigned char linestatus = 0;
- unsigned char error_mask = 0;
- int n = 0;
- int total = 0;
- ushort head;
- ushort tail;
- unsigned long flags;
-
- if (!ch)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- head = ch->ch_r_head & RQUEUEMASK;
- tail = ch->ch_r_tail & RQUEUEMASK;
-
- linestatus = ch->ch_cached_lsr;
- ch->ch_cached_lsr = 0;
-
- qleft = tail - head - 1;
- if (qleft < 0)
- qleft += RQUEUEMASK + 1;
-
- if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
- /* force the FIFO copy to NOT be run */
- total = 0;
- } else {
- total = readb(&ch->ch_neo_uart->rfifo);
-
- /*
- * EXAR chip bug - RX FIFO COUNT - Fudge factor.
- *
- * This resolves a problem/bug with the Exar chip that sometimes
- * returns a bogus value in the rfifo register.
- * The count can be any where from 0-3 bytes "off".
- * Bizarre, but true.
- */
- if ((ch->ch_bd->dvid & 0xf0) >= UART_XR17E158_DVID)
- total -= 1;
- else
- total -= 3;
- }
-
- total = min(total, qleft);
-
- while (total > 0) {
- linestatus = readb(&ch->ch_neo_uart->lsr);
-
- if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
- break;
-
- /* Make sure we don't go over the end of our queue */
- n = min(((uint)total), (RQUEUESIZE - (uint)head));
-
- /*
- * Cut down n even further if needed, this is to fix
- * a problem with memcpy_fromio() with the Neo on the
- * IBM pSeries platform.
- * 15 bytes max appears to be the magic number.
- */
- n = min_t(uint, n, 12);
-
- /*
- * Since we are grabbing the linestatus register, which
- * will reset some bits after our read, we need to ensure
- * we don't miss our TX FIFO emptys.
- */
- if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR))
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-
- linestatus = 0;
-
- memcpy_fromio(ch->ch_rqueue + head,
- &ch->ch_neo_uart->txrxburst, n);
-
- /*
- * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
- * that all the data currently in the FIFO is free of
- * breaks and parity/frame/orun errors.
- */
- memset(ch->ch_equeue + head, 0, n);
-
- head = (head + n) & RQUEUEMASK;
- total -= n;
- qleft -= n;
- ch->ch_rxcount += n;
- }
-
- /*
- * Create a mask to determine whether we should
- * insert the character (if any) into our queue.
- */
- if (ch->ch_c_iflag & IGNBRK)
- error_mask |= UART_LSR_BI;
-
- /*
- * Now cleanup any leftover bytes still in the UART.
- * Also deal with any possible queue overflow here as well.
- */
- while (1) {
- /*
- * Its possible we have a linestatus from the loop above
- * this, so we "OR" on any extra bits.
- */
- linestatus |= readb(&ch->ch_neo_uart->lsr);
-
- /*
- * If the chip tells us there is no more data pending to
- * be read, we can then leave.
- * But before we do, cache the linestatus, just in case.
- */
- if (!(linestatus & UART_LSR_DR)) {
- ch->ch_cached_lsr = linestatus;
- break;
- }
-
- /* No need to store this bit */
- linestatus &= ~UART_LSR_DR;
-
- /*
- * Since we are grabbing the linestatus register, which
- * will reset some bits after our read, we need to ensure
- * we don't miss our TX FIFO emptys.
- */
- if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
- linestatus &= ~(UART_LSR_THRE |
- UART_17158_TX_AND_FIFO_CLR);
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- }
-
- if (linestatus & error_mask) {
- unsigned char discard;
-
- linestatus = 0;
- memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
- continue;
- }
-
- /*
- * If our queue is full, we have no choice but to drop
- * some data. The assumption is that HWFLOW or SWFLOW
- * should have stopped things way way before we got to
- * this point.
- */
- while (qleft < 1) {
- tail = (tail + 1) & RQUEUEMASK;
- ch->ch_r_tail = tail;
- ch->ch_err_overrun++;
- qleft++;
- }
-
- memcpy_fromio(ch->ch_rqueue + head,
- &ch->ch_neo_uart->txrxburst, 1);
- ch->ch_equeue[head] = (unsigned char)linestatus;
-
- linestatus = 0;
-
- head = (head + 1) & RQUEUEMASK;
-
- qleft--;
- ch->ch_rxcount++;
- }
-
- ch->ch_r_head = head & RQUEUEMASK;
- ch->ch_e_head = head & EQUEUEMASK;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/*
- * This function basically goes to sleep for secs, or until
- * it gets signalled that the port has fully drained.
- */
-static int neo_drain(struct tty_struct *tty, uint seconds)
-{
- unsigned long flags;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty)
- return -ENXIO;
-
- un = (struct un_t *)tty->driver_data;
- if (!un)
- return -ENXIO;
-
- ch = un->un_ch;
- if (!ch)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- un->un_flags |= UN_EMPTY;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /* If returned value is non-zero, user ctrl-c'ed us */
- return wait_event_interruptible_timeout(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0),
- msecs_to_jiffies(seconds * 1000));
-}
-
-/*
- * Flush the WRITE FIFO on the Neo.
- * Channel lock MUST be held before calling this function!
- */
-static void neo_flush_uart_write(struct channel_t *ch)
-{
- unsigned char tmp = 0;
- int i = 0;
-
- if (!ch)
- return;
-
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
- &ch->ch_neo_uart->isr_fcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- for (i = 0; i < 10; i++) {
- /*
- * Check to see if the UART completely flushed the FIFO
- * FIFO.
- */
- tmp = readb(&ch->ch_neo_uart->isr_fcr);
- if (tmp & 4)
- udelay(10);
- else
- break;
- }
-
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-}
-
-/*
- * Flush the READ FIFO on the Neo.
- * Channel lock MUST be held before calling this function!
- */
-static void neo_flush_uart_read(struct channel_t *ch)
-{
- unsigned char tmp = 0;
- int i = 0;
-
- if (!ch)
- return;
-
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
- &ch->ch_neo_uart->isr_fcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- for (i = 0; i < 10; i++) {
- /*
- * Check to see if the UART feels it completely flushed the
- * FIFO.
- */
- tmp = readb(&ch->ch_neo_uart->isr_fcr);
- if (tmp & 2)
- udelay(10);
- else
- break;
- }
-}
-
-static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
-{
- ushort head;
- ushort tail;
- int n;
- int s;
- int qlen;
- uint len_written = 0;
- unsigned long flags;
-
- if (!ch)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (ch->ch_w_tail == ch->ch_w_head)
- goto exit_unlock;
-
- /* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) ||
- (ch->ch_flags & CH_BREAK_SENDING))
- goto exit_unlock;
-
- if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
- /* Send data directly to txrx register */
- unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
-
- ch->ch_cached_lsr |= lsrbits;
- if (ch->ch_cached_lsr & UART_LSR_THRE) {
- ch->ch_cached_lsr &= ~(UART_LSR_THRE);
-
- /*
- * If RTS Toggle mode is on, turn on RTS now if not
- * already set, and make sure we get an event when the
- * data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
- /*
- * If DTR Toggle mode is on, turn on DTR now if not
- * already set, and make sure we get an event when the
- * data transfer has completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- writeb(ch->ch_wqueue[ch->ch_w_tail],
- &ch->ch_neo_uart->txrx);
- ch->ch_w_tail++;
- ch->ch_w_tail &= WQUEUEMASK;
- ch->ch_txcount++;
- }
-
- goto exit_unlock;
- }
-
- /* We have to do it this way, because of the EXAR TXFIFO count bug. */
-
- if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
- if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
- goto exit_unlock;
-
- len_written = 0;
-
- n = readb(&ch->ch_neo_uart->tfifo);
-
- if ((unsigned int)n > ch->ch_t_tlevel)
- goto exit_unlock;
-
- n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
- } else {
- n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo);
- }
-
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
-
- n = min(n, qlen);
-
- while (n > 0) {
- s = ((head >= tail) ? head : WQUEUESIZE) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- /*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- /*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- neo_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- memcpy_toio(&ch->ch_neo_uart->txrxburst,
- ch->ch_wqueue + tail, s);
-
- tail = (tail + s) & WQUEUEMASK;
- n -= s;
- ch->ch_txcount += s;
- len_written += s;
- }
-
- ch->ch_w_tail = tail & WQUEUEMASK;
-
- if (len_written > 0) {
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- }
-
-exit_unlock:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-static void neo_parse_modem(struct channel_t *ch, unsigned char signals)
-{
- unsigned char msignals = signals;
-
- if (!ch)
- return;
-
- /*
- * Do altpin switching. Altpin switches DCD and DSR.
- * This prolly breaks DSRPACE, so we should be more clever here.
- */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- unsigned char mswap = msignals;
-
- if (mswap & UART_MSR_DDCD) {
- msignals &= ~UART_MSR_DDCD;
- msignals |= UART_MSR_DDSR;
- }
- if (mswap & UART_MSR_DDSR) {
- msignals &= ~UART_MSR_DDSR;
- msignals |= UART_MSR_DDCD;
- }
- if (mswap & UART_MSR_DCD) {
- msignals &= ~UART_MSR_DCD;
- msignals |= UART_MSR_DSR;
- }
- if (mswap & UART_MSR_DSR) {
- msignals &= ~UART_MSR_DSR;
- msignals |= UART_MSR_DCD;
- }
- }
-
- /* Scrub off lower bits. They signify delta's */
- msignals &= 0xf0;
-
- if (msignals & UART_MSR_DCD)
- ch->ch_mistat |= UART_MSR_DCD;
- else
- ch->ch_mistat &= ~UART_MSR_DCD;
-
- if (msignals & UART_MSR_DSR)
- ch->ch_mistat |= UART_MSR_DSR;
- else
- ch->ch_mistat &= ~UART_MSR_DSR;
-
- if (msignals & UART_MSR_RI)
- ch->ch_mistat |= UART_MSR_RI;
- else
- ch->ch_mistat &= ~UART_MSR_RI;
-
- if (msignals & UART_MSR_CTS)
- ch->ch_mistat |= UART_MSR_CTS;
- else
- ch->ch_mistat &= ~UART_MSR_CTS;
-}
-
-/* Make the UART raise any of the output signals we want up */
-static void neo_assert_modem_signals(struct channel_t *ch)
-{
- unsigned char out;
-
- if (!ch)
- return;
-
- out = ch->ch_mostat;
-
- if (ch->ch_flags & CH_LOOPBACK)
- out |= UART_MCR_LOOP;
-
- writeb(out, &ch->ch_neo_uart->mcr);
- neo_pci_posting_flush(ch->ch_bd);
-
- /* Give time for the UART to actually raise/drop the signals */
- udelay(10);
-}
-
-static void neo_send_start_character(struct channel_t *ch)
-{
- if (!ch)
- return;
-
- if (ch->ch_startc != _POSIX_VDISABLE) {
- ch->ch_xon_sends++;
- writeb(ch->ch_startc, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
- udelay(10);
- }
-}
-
-static void neo_send_stop_character(struct channel_t *ch)
-{
- if (!ch)
- return;
-
- if (ch->ch_stopc != _POSIX_VDISABLE) {
- ch->ch_xoff_sends++;
- writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
- udelay(10);
- }
-}
-
-/* neo_uart_init */
-
-static void neo_uart_init(struct channel_t *ch)
-{
- writeb(0, &ch->ch_neo_uart->ier);
- writeb(0, &ch->ch_neo_uart->efr);
- writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr);
-
- /* Clear out UART and FIFO */
- readb(&ch->ch_neo_uart->txrx);
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
- &ch->ch_neo_uart->isr_fcr);
- readb(&ch->ch_neo_uart->lsr);
- readb(&ch->ch_neo_uart->msr);
-
- ch->ch_flags |= CH_FIFO_ENABLED;
-
- /* Assert any signals we want up */
- writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-/* Make the UART completely turn off. */
-static void neo_uart_off(struct channel_t *ch)
-{
- /* Turn off UART enhanced bits */
- writeb(0, &ch->ch_neo_uart->efr);
-
- /* Stop all interrupts from occurring. */
- writeb(0, &ch->ch_neo_uart->ier);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static uint neo_get_uart_bytes_left(struct channel_t *ch)
-{
- unsigned char left = 0;
- unsigned char lsr = readb(&ch->ch_neo_uart->lsr);
-
- /* We must cache the LSR as some of the bits get reset once read... */
- ch->ch_cached_lsr |= lsr;
-
- /* Determine whether the Transmitter is empty or not */
- if (!(lsr & UART_LSR_TEMT)) {
- if (ch->ch_flags & CH_TX_FIFO_EMPTY)
- tasklet_schedule(&ch->ch_bd->helper_tasklet);
- left = 1;
- } else {
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- left = 0;
- }
-
- return left;
-}
-
-/* Channel lock MUST be held by the calling function! */
-static void neo_send_break(struct channel_t *ch, int msecs)
-{
- /* If we receive a time of 0, this means turn off the break. */
-
- if (msecs == 0) {
- if (ch->ch_flags & CH_BREAK_SENDING) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags &= ~(CH_BREAK_SENDING);
- ch->ch_stop_sending_break = 0;
- }
- return;
- }
-
- /*
- * Set the time we should stop sending the break.
- * If we are already sending a break, toss away the existing
- * time to stop, and use this new value instead.
- */
- ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
-
- /* Tell the UART to start sending the break */
- if (!(ch->ch_flags & CH_BREAK_SENDING)) {
- unsigned char temp = readb(&ch->ch_neo_uart->lcr);
-
- writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr);
- neo_pci_posting_flush(ch->ch_bd);
- ch->ch_flags |= (CH_BREAK_SENDING);
- }
-}
-
-/*
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
-{
- if (!ch)
- return;
-
- writeb(c, &ch->ch_neo_uart->txrx);
- neo_pci_posting_flush(ch->ch_bd);
-}
-
-static unsigned int neo_read_eeprom(unsigned char __iomem *base,
- unsigned int address)
-{
- unsigned int enable;
- unsigned int bits;
- unsigned int databit;
- unsigned int val;
-
- /* enable chip select */
- writeb(NEO_EECS, base + NEO_EEREG);
- /* READ */
- enable = address | 0x180;
-
- for (bits = 9; bits--; ) {
- databit = (enable & (1 << bits)) ? NEO_EEDI : 0;
- /* Set read address */
- writeb(databit | NEO_EECS, base + NEO_EEREG);
- writeb(databit | NEO_EECS | NEO_EECK, base + NEO_EEREG);
- }
-
- val = 0;
-
- for (bits = 17; bits--; ) {
- /* clock to EEPROM */
- writeb(NEO_EECS, base + NEO_EEREG);
- writeb(NEO_EECS | NEO_EECK, base + NEO_EEREG);
- val <<= 1;
- /* read EEPROM */
- if (readb(base + NEO_EEREG) & NEO_EEDO)
- val |= 1;
- }
-
- /* clock falling edge */
- writeb(NEO_EECS, base + NEO_EEREG);
-
- /* drop chip select */
- writeb(0x00, base + NEO_EEREG);
-
- return val;
-}
-
-static void neo_vpd(struct dgnc_board *brd)
-{
- unsigned int i = 0;
- unsigned int a;
-
- if (!brd)
- return;
-
- if (!brd->re_map_membase)
- return;
-
- /* Store the VPD into our buffer */
- for (i = 0; i < NEO_VPD_IMAGESIZE; i++) {
- a = neo_read_eeprom(brd->re_map_membase, i);
- brd->vpd[i * 2] = a & 0xff;
- brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff;
- }
-
- /*
- * brd->vpd has different name tags by below index.
- * 0x08 : long resource name tag
- * 0x10 : long resource name tage (PCI-66 files)
- * 0x7F : small resource end tag
- */
- if (((brd->vpd[0x08] != 0x82) &&
- (brd->vpd[0x10] != 0x82)) ||
- (brd->vpd[0x7F] != 0x78)) {
- memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
- } else {
- /* Search for the serial number */
- for (i = 0; i < NEO_VPD_IMAGEBYTES - 3; i++)
- if (brd->vpd[i] == 'S' && brd->vpd[i + 1] == 'N')
- strncpy(brd->serial_num, &brd->vpd[i + 3], 9);
- }
-}
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
deleted file mode 100644
index c30a2c2b5eea..000000000000
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef _DGNC_NEO_H
-#define _DGNC_NEO_H
-
-#include "dgnc_driver.h"
-
-/**
- * struct neo_uart_struct - Per channel/port NEO UART structure
- *
- * key - W = read write
- * - R = read only
- * - U = unused
- *
- * @txrx: (RW) Holding Register.
- * @ier: (RW) Interrupt Enable Register.
- * @isr_fcr: (RW) Interrupt Status Reg/Fifo Control Register.
- * @lcr: (RW) Line Control Register.
- * @mcr: (RW) Modem Control Register.
- * @lsr: (RW) Line Status Register.
- * @msr: (RW) Modem Status Register.
- * @spr: (RW) Scratch Pad Register.
- * @fctr: (RW) Feature Control Register.
- * @efr: (RW) Enhanced Function Register.
- * @tfifo: (RW) Transmit FIFO Register.
- * @rfifo: (RW) Receive FIFO Register.
- * @xoffchar1: (RW) XOff Character 1 Register.
- * @xoffchar2: (RW) XOff Character 2 Register.
- * @xonchar1: (RW) Xon Character 1 Register.
- * @xonchar2: (RW) XOn Character 2 Register.
- * @reserved1: (U) Reserved by Exar.
- * @txrxburst: (RW) 64 bytes of RX/TX FIFO Data.
- * @reserved2: (U) Reserved by Exar.
- * @rxburst_with_errors: (R) bytes of RX FIFO Data + LSR.
- */
-struct neo_uart_struct {
- u8 txrx;
- u8 ier;
- u8 isr_fcr;
-
- u8 lcr;
- u8 mcr;
- u8 lsr;
- u8 msr;
- u8 spr;
- u8 fctr;
- u8 efr;
- u8 tfifo;
- u8 rfifo;
- u8 xoffchar1;
- u8 xoffchar2;
- u8 xonchar1;
- u8 xonchar2;
-
- u8 reserved1[0x2ff - 0x200];
- u8 txrxburst[64];
- u8 reserved2[0x37f - 0x340];
- u8 rxburst_with_errors[64];
-};
-
-/* Where to read the extended interrupt register (32bits instead of 8bits) */
-#define UART_17158_POLL_ADDR_OFFSET 0x80
-
-/* These are the current dvid's of the Neo boards */
-#define UART_XR17C158_DVID 0x20
-#define UART_XR17D158_DVID 0x20
-#define UART_XR17E158_DVID 0x40
-
-#define NEO_EECK 0x10 /* Clock */
-#define NEO_EECS 0x20 /* Chip Select */
-#define NEO_EEDI 0x40 /* Data In is an Output Pin */
-#define NEO_EEDO 0x80 /* Data Out is an Input Pin */
-#define NEO_EEREG 0x8E /* offset to EEPROM control reg */
-
-#define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */
-#define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2)
-
-/*
- * These are the redefinitions for the FCTR on the XR17C158, since
- * Exar made them different than their earlier design. (XR16C854)
- */
-
-/* These are only applicable when table D is selected */
-#define UART_17158_FCTR_RTS_NODELAY 0x00
-#define UART_17158_FCTR_RTS_4DELAY 0x01
-#define UART_17158_FCTR_RTS_6DELAY 0x02
-#define UART_17158_FCTR_RTS_8DELAY 0x03
-#define UART_17158_FCTR_RTS_12DELAY 0x12
-#define UART_17158_FCTR_RTS_16DELAY 0x05
-#define UART_17158_FCTR_RTS_20DELAY 0x13
-#define UART_17158_FCTR_RTS_24DELAY 0x06
-#define UART_17158_FCTR_RTS_28DELAY 0x14
-#define UART_17158_FCTR_RTS_32DELAY 0x07
-#define UART_17158_FCTR_RTS_36DELAY 0x16
-#define UART_17158_FCTR_RTS_40DELAY 0x08
-#define UART_17158_FCTR_RTS_44DELAY 0x09
-#define UART_17158_FCTR_RTS_48DELAY 0x10
-#define UART_17158_FCTR_RTS_52DELAY 0x11
-
-#define UART_17158_FCTR_RTS_IRDA 0x10
-#define UART_17158_FCTR_RS485 0x20
-#define UART_17158_FCTR_TRGA 0x00
-#define UART_17158_FCTR_TRGB 0x40
-#define UART_17158_FCTR_TRGC 0x80
-#define UART_17158_FCTR_TRGD 0xC0
-
-/* 17158 trigger table selects.. */
-#define UART_17158_FCTR_BIT6 0x40
-#define UART_17158_FCTR_BIT7 0x80
-
-/* 17158 TX/RX memmapped buffer offsets */
-#define UART_17158_RX_FIFOSIZE 64
-#define UART_17158_TX_FIFOSIZE 64
-
-/* 17158 Extended IIR's */
-#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
-#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
-#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR
- * state change
- */
-#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
-
-/*
- * These are the extended interrupts that get sent
- * back to us from the UART's 32bit interrupt register
- */
-#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */
-#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
-#define UART_17158_TXRDY 0x3 /* TX Ready */
-#define UART_17158_MSR 0x4 /* Modem State Change */
-#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding
- * Reg Empty
- */
-#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO
- * Data error
- */
-
-/*
- * These are the EXTENDED definitions for the 17C158's Interrupt
- * Enable Register.
- */
-#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */
-#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
-#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
-#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
-#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow Control Enable */
-
-#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an
- * incoming XOFF char
- */
-#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an
- * incoming XON char
- */
-
-#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
-#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
-#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
-#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-
-extern struct board_ops dgnc_neo_ops;
-
-#endif /* _DGNC_NEO_H */
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
deleted file mode 100644
index 59845916d90c..000000000000
--- a/drivers/staging/dgnc/dgnc_pci.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- */
-
-#ifndef _DGNC_PCI_H
-#define _DGNC_PCI_H
-
-/* Maximum number of PCI boards */
-#define PCIMAX 32
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEVICE_CLASSIC_4_DID 0x0028
-#define PCI_DEVICE_CLASSIC_8_DID 0x0029
-#define PCI_DEVICE_CLASSIC_4_422_DID 0x00D0
-#define PCI_DEVICE_CLASSIC_8_422_DID 0x00D1
-#define PCI_DEVICE_NEO_4_DID 0x00B0
-#define PCI_DEVICE_NEO_8_DID 0x00B1
-#define PCI_DEVICE_NEO_2DB9_DID 0x00C8
-#define PCI_DEVICE_NEO_2DB9PRI_DID 0x00C9
-#define PCI_DEVICE_NEO_2RJ45_DID 0x00CA
-#define PCI_DEVICE_NEO_2RJ45PRI_DID 0x00CB
-#define PCI_DEVICE_NEO_1_422_DID 0x00CC
-#define PCI_DEVICE_NEO_1_422_485_DID 0x00CD
-#define PCI_DEVICE_NEO_2_422_485_DID 0x00CE
-#define PCI_DEVICE_NEO_EXPRESS_8_DID 0x00F0
-#define PCI_DEVICE_NEO_EXPRESS_4_DID 0x00F1
-#define PCI_DEVICE_NEO_EXPRESS_4RJ45_DID 0x00F2
-#define PCI_DEVICE_NEO_EXPRESS_8RJ45_DID 0x00F3
-#define PCI_DEVICE_NEO_EXPRESS_4_IBM_DID 0x00F4
-
-#define PCI_DEVICE_CLASSIC_4_PCI_NAME "ClassicBoard 4 PCI"
-#define PCI_DEVICE_CLASSIC_8_PCI_NAME "ClassicBoard 8 PCI"
-#define PCI_DEVICE_CLASSIC_4_422_PCI_NAME "ClassicBoard 4 422 PCI"
-#define PCI_DEVICE_CLASSIC_8_422_PCI_NAME "ClassicBoard 8 422 PCI"
-#define PCI_DEVICE_NEO_4_PCI_NAME "Neo 4 PCI"
-#define PCI_DEVICE_NEO_8_PCI_NAME "Neo 8 PCI"
-#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI"
-#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
-#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
-#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
-#define PCI_DEVICE_NEO_1_422_PCI_NAME "Neo 1 422 PCI"
-#define PCI_DEVICE_NEO_1_422_485_PCI_NAME "Neo 1 422/485 PCI"
-#define PCI_DEVICE_NEO_2_422_485_PCI_NAME "Neo 2 422/485 PCI"
-
-#define PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME "Neo 8 PCI Express"
-#define PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME "Neo 4 PCI Express"
-#define PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME "Neo 4 PCI Express RJ45"
-#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
-#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
-
-/* Size of memory and I/O for PCI (4 K) */
-#define PCI_RAM_SIZE 0x1000
-
-/* Size of memory (2MB) */
-#define PCI_MEM_SIZE 0x1000
-
-#endif /* _DGNC_PCI_H */
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index d3736daf8cf2..9f9b9a5b4b27 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
/*
@@ -33,9 +24,7 @@
#include <linux/pci.h>
#include "dgnc_driver.h"
#include "dgnc_tty.h"
-#include "dgnc_neo.h"
#include "dgnc_cls.h"
-#include "dgnc_utils.h"
/* Default transparent print information. */
@@ -240,10 +229,7 @@ int dgnc_tty_init(struct dgnc_board *brd)
ch->ch_pun.un_type = DGNC_PRINT;
ch->ch_pun.un_dev = i + 128;
- if (brd->bd_uart_offset == 0x200)
- ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
- else
- ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
+ ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
ch->ch_bd = brd;
ch->ch_portnum = i;
@@ -1238,7 +1224,7 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
if (ch->ch_close_delay) {
spin_unlock_irqrestore(&ch->ch_lock,
flags);
- dgnc_ms_sleep(ch->ch_close_delay);
+ msleep_interruptible(ch->ch_close_delay);
spin_lock_irqsave(&ch->ch_lock, flags);
}
}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 6c58f1b3461a..00e31035b83d 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#ifndef _DGNC_TTY_H
diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c
deleted file mode 100644
index 620f5741a1ed..000000000000
--- a/drivers/staging/dgnc/dgnc_utils.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/tty.h>
-#include <linux/sched/signal.h>
-#include "dgnc_utils.h"
-
-/**
- * dgnc_ms_sleep - Put the driver to sleep
- * @ms - milliseconds to sleep
- *
- * Return: 0 if timed out, if interrupted by a signal return signal.
- */
-int dgnc_ms_sleep(ulong ms)
-{
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((ms * HZ) / 1000);
- return signal_pending(current);
-}
diff --git a/drivers/staging/dgnc/dgnc_utils.h b/drivers/staging/dgnc/dgnc_utils.h
deleted file mode 100644
index b30527f0889d..000000000000
--- a/drivers/staging/dgnc/dgnc_utils.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DGNC_UTILS_H
-#define _DGNC_UTILS_H
-
-int dgnc_ms_sleep(ulong ms);
-
-#endif /* _DGNC_UTILS_H */
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 46b06b07bbf2..b414ee80db88 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -1,50 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
*/
#ifndef _DIGI_H
#define _DIGI_H
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET (('d' << 8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d' << 8) | 253) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d' << 8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */
-#endif
-
#define DIGI_GETA (('e' << 8) | 94) /* Read params */
#define DIGI_SETA (('e' << 8) | 95) /* Set params */
#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
#define DIGI_LOOPBACK (('d' << 8) | 252) /* Enable/disable UART
* internal loopback
*/
@@ -88,49 +54,6 @@ struct digi_t {
};
/**
- * struct digi_dinfo - Driver status information.
- * @dinfo_nboards: Number of boards configured.
- * @dinfo_reserved: Not used, for future expansion.
- * @dinfio_version: Driver version.
- */
-struct digi_dinfo {
- unsigned int dinfo_nboards;
- char dinfo_reserved[12];
- char dinfo_version[16];
-};
-
-#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
-
-/**
- * struct digi_info - Ioctl commands for per board information.
- *
- * Physsize and memsize differ when board has "windowed" memory.
- *
- * @info_bdnum: Board number (0 based).
- * @info_ioport: IO port address.
- * @indo_physaddr: Memory address.
- * @info_physize: Size of host memory window.
- * @info_memsize: Amount of dual-port memory on board.
- * @info_bdtype: Board type.
- * @info_nports: Number of ports.
- * @info_bdstate: Board state.
- * @info_reserved: Not used, for future expansion.
- */
-struct digi_info {
- unsigned int info_bdnum;
- unsigned int info_ioport;
- unsigned int info_physaddr;
- unsigned int info_physsize;
- unsigned int info_memsize;
- unsigned short info_bdtype;
- unsigned short info_nports;
- char info_bdstate;
- char info_reserved[7];
-};
-
-#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
-
-/**
* struct digi_getbuffer - Holds buffer use counts.
*/
struct digi_getbuffer {
@@ -161,10 +84,6 @@ struct digi_getcounter {
unsigned long tbytes;
};
-/* Board State Definitions */
-#define BD_RUNNING 0x0
-#define BD_NOFEP 0x5
-
#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
@@ -204,11 +123,6 @@ struct ni_info {
unsigned int baud;
};
-#define T_CLASSIC 0002
-#define T_PCIBUS 0400
-#define T_NEO_EXPRESS 0001
-#define T_NEO 0000
-
#define TTY_FLIPBUF_SIZE 512
#endif /* _DIGI_H */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index bb010cb98a1c..7517001fb8f0 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/gadget/emxx_udc.c
* EMXX FCD (Function Controller Driver) for USB.
*
* Copyright (C) 2010 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 928d531a5115..8337e38c238a 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* EMXX FCD (Function Controller Driver) for USB.
*
* Copyright (C) 2010 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_EMXX_H
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index dba676761d72..84b2e7ebc024 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -135,12 +135,6 @@ config FB_TFT_SSD1306
help
Framebuffer support for SSD1306
-config FB_TFT_SSD1325
- tristate "FB driver for the SSD1325 OLED Controller"
- depends on FB_TFT
- help
- Framebuffer support for SSD1305
-
config FB_TFT_SSD1331
tristate "FB driver for the SSD1331 LCD Controller"
depends on FB_TFT
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 456a8dd65caf..f6f30f5bf15a 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for Two KS0108 LCD controllers in AGM1264K-FL display
*
* Copyright (C) 2014 ololoshka2871
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index 6010e6cbbd72..a58c514f4721 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
@@ -6,16 +7,6 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index fbd5ef525243..d47dcf31fffb 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8340BN LCD Controller
*
@@ -7,16 +8,6 @@
* This is done by transferring eight 9-bit words in 9 bytes.
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index bbf78f8644a8..0b605303813e 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8347D LCD Controller
*
* Copyright (C) 2013 Christian Vogelgsang
*
* Based on driver code found here: https://github.com/watterott/r61505u-Adapter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c
index 2c18051a44b3..3e73b69b6a27 100644
--- a/drivers/staging/fbtft/fb_hx8353d.c
+++ b/drivers/staging/fbtft/fb_hx8353d.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8353D LCD Controller
*
* Copyright (c) 2014 Petr Olivka
* Copyright (c) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_hx8357d.c b/drivers/staging/fbtft/fb_hx8357d.c
index 32e6efe1d0a7..94a357e8fdf6 100644
--- a/drivers/staging/fbtft/fb_hx8357d.c
+++ b/drivers/staging/fbtft/fb_hx8357d.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the HX8357D LCD Controller
* Copyright (C) 2015 Adafruit Industries
@@ -6,16 +7,6 @@
* Copyright (C) 2013 Christian Vogelgsang
*
* Based on driver code found here: https://github.com/watterott/r61505u-Adapter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_hx8357d.h b/drivers/staging/fbtft/fb_hx8357d.h
index e281921d4a97..6180b093f94f 100644
--- a/drivers/staging/fbtft/fb_hx8357d.h
+++ b/drivers/staging/fbtft/fb_hx8357d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* This is our library for the Adafruit ILI9341 Breakout and Shield
* ----> http://www.adafruit.com/products/1651
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index 045cadc3bc65..fd3dd671509f 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9163 LCD Controller
*
@@ -5,16 +6,6 @@
*
* Based on ili9325.c by Noralf Tronnes and
* .S.U.M.O.T.O.Y. by Max MC Costa (https://github.com/sumotoy/TFT_ILI9163C).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 20ba86da028b..501eee7dce4c 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9320 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 7f9e9b25490e..d6b1d4be9ff4 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9325 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* Based on ili9325.c by Jeroen Domburg
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index 0711121c303c..430f21e50f4d 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9340 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
index 21a98e9e1a14..a10e8c9de438 100644
--- a/drivers/staging/fbtft/fb_ili9341.c
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9341 LCD display controller
*
@@ -8,16 +9,6 @@
*
* Copyright (C) 2013 Christian Vogelgsang
* Based on adafruit22fb.c by Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 7f182a1c084c..19eba085ea53 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9481 LCD Controller
*
* Copyright (c) 2014 Petr Olivka
* Copyright (c) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index ddd07a64c48a..66210a7137fc 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9486 LCD Controller
*
* Copyright (C) 2014 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
index 87f678a314cc..32172f8f79f0 100644
--- a/drivers/staging/fbtft/fb_pcd8544.c
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the PCD8544 LCD Controller
*
@@ -5,16 +6,6 @@
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 6d1cad85957b..5d3b76ca74d8 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FBTFT driver for the RA8875 LCD Controller
* Copyright by Pf@nne & NOTRO
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index c12855bb6891..75295760f491 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D02A1 LCD Controller
*
* Based on fb_st7735r.c by Noralf Tronnes
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index 3b36ed50d491..b90244259d43 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D1121 LCD Controller
*
@@ -6,16 +7,6 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 89c27a440305..3fc18c0a6f11 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SH1106 OLED Controller
* Based on the SSD1306 driver by Noralf Tronnes
*
* Copyright (C) 2017 Heiner Kallweit
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index 129e175fcd7e..cbf22e1f4b61 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1289 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* Init sequence taken from ITDB02_Graph16.cpp - (C)2010-2011 Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c
index 33c03872ca84..3515888d94c9 100644
--- a/drivers/staging/fbtft/fb_ssd1305.c
+++ b/drivers/staging/fbtft/fb_ssd1305.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1305 OLED Controller
*
* based on SSD1306 driver by Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 96c58de85288..9276be499303 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1306 OLED Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index b7e40c24f58e..1a469b3c92d4 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1325 OLED Controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index e4a759b54ba0..383e197cf56a 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index e62235d4d9e7..b8ef75f5e856 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index d98522a39344..631208bd3a17 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ST7735R LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index a5d7c87557f8..7d7573a7b615 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ST7789V LCD Controller
*
* Copyright (C) 2015 Dennis Menschel
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitops.h>
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
index 097e71cfef62..e463b0ddf16d 100644
--- a/drivers/staging/fbtft/fb_tinylcd.c
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Custom FB driver for tinylcd.com display
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_tls8204.c b/drivers/staging/fbtft/fb_tls8204.c
index 4302e822de3b..277b6ed9c725 100644
--- a/drivers/staging/fbtft/fb_tls8204.c
+++ b/drivers/staging/fbtft/fb_tls8204.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the TLS8204 LCD Controller
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2013 Noralf Tronnes
* Copyright (C) 2014 Michael Hope (adapted for the TLS8204)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 8eb5e7f10fb5..4d65567eefe2 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UltraChip UC1611 LCD controller
*
* The display is 4-bit grayscale (16 shades) 240x160.
*
* Copyright (C) 2015 Henri Chain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index 78899a172c7e..0a3531d6eb39 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UC1701 LCD Controller
*
@@ -5,16 +6,6 @@
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2014 Juergen Holzmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index 970b8430eccf..acc425fdf34e 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
@@ -6,16 +7,6 @@
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 180e5be6fa4f..bfd1527f20f7 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the Watterott LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 6d0363deba61..0e36b66ae5f7 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 Noralf Tronnes
*
* This driver is inspired by:
* st7735fb.c, Copyright (C) 2011, Matt Porter
* broadsheetfb.c, Copyright (C) 2008, Jaya Kumar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -1367,20 +1358,18 @@ int fbtft_probe_common(struct fbtft_display *display,
}
/* write register functions */
- if (display->regwidth == 8 && display->buswidth == 8) {
+ if (display->regwidth == 8 && display->buswidth == 8)
par->fbtftops.write_register = fbtft_write_reg8_bus8;
- } else
- if (display->regwidth == 8 && display->buswidth == 9 && par->spi) {
+ else if (display->regwidth == 8 && display->buswidth == 9 && par->spi)
par->fbtftops.write_register = fbtft_write_reg8_bus9;
- } else if (display->regwidth == 16 && display->buswidth == 8) {
+ else if (display->regwidth == 16 && display->buswidth == 8)
par->fbtftops.write_register = fbtft_write_reg16_bus8;
- } else if (display->regwidth == 16 && display->buswidth == 16) {
+ else if (display->regwidth == 16 && display->buswidth == 16)
par->fbtftops.write_register = fbtft_write_reg16_bus16;
- } else {
+ else
dev_warn(dev,
- "no default functions for regwidth=%d and buswidth=%d\n",
- display->regwidth, display->buswidth);
- }
+ "no default functions for regwidth=%d and buswidth=%d\n",
+ display->regwidth, display->buswidth);
/* write_vmem() functions */
if (display->buswidth == 8)
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 488ab788138e..e19e64e0d094 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -1,16 +1,5 @@
-/*
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2013 Noralf Tronnes */
#ifndef __LINUX_FBTFT_H
#define __LINUX_FBTFT_H
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 0d974738c1c4..ec8477674b7d 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
*
* Copyright (C) 2013, Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "fbtft_device: " fmt
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index 7134624a16c2..f676c9b853f1 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Generic FB driver for TFT LCD displays
*
* Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/staging/fbtft/internal.h b/drivers/staging/fbtft/internal.h
index 25b9bf6f54bb..ae2ff4a4a472 100644
--- a/drivers/staging/fbtft/internal.h
+++ b/drivers/staging/fbtft/internal.h
@@ -1,17 +1,5 @@
-/*
- * Copyright (C) 2013 Noralf Tronnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2013 Noralf Tronnes */
#ifndef __LINUX_FBTFT_INTERNAL_H
#define __LINUX_FBTFT_INTERNAL_H
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 0d8ed002adcb..2817e67df3d5 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -230,7 +230,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi)
+ struct napi_struct *napi,
+ u16 queue_id)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -249,7 +250,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
- fas = dpaa2_get_fas(vaddr);
+ fas = dpaa2_get_fas(vaddr, false);
prefetch(fas);
buf_data = vaddr + dpaa2_fd_get_offset(fd);
prefetch(buf_data);
@@ -281,6 +282,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, queue_id);
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
@@ -325,7 +327,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
fq->stats.frames++;
- fq->consume(priv, ch, fd, &ch->napi);
+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
cleaned++;
} while (!is_last);
@@ -348,7 +350,6 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
int num_sg;
int num_dma_bufs;
struct dpaa2_eth_swa *swa;
- struct dpaa2_fas *fas;
/* Create and map scatterlist.
* We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
@@ -379,15 +380,6 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
goto sgt_buf_alloc_failed;
}
sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
-
- /* PTA from egress side is passed as is to the confirmation side so
- * we need to clear some fields here in order to find consistent values
- * on TX confirmation. We are clearing FAS (Frame Annotation Status)
- * field from the hardware annotation area
- */
- fas = dpaa2_get_fas(sgt_buf);
- memset(fas, 0, DPAA2_FAS_SIZE);
-
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
/* Fill in the HW SGT structure.
@@ -424,8 +416,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
- DPAA2_FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
return 0;
@@ -444,22 +435,19 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
- u8 *buffer_start;
- struct dpaa2_fas *fas;
+ u8 *buffer_start, *aligned_start;
struct sk_buff **skbh;
dma_addr_t addr;
- buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
- DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
- /* PTA from egress side is passed as is to the confirmation side so
- * we need to clear some fields here in order to find consistent values
- * on TX confirmation. We are clearing FAS (Frame Annotation Status)
- * field from the hardware annotation area
+ /* If there's enough room to align the FD address, do it.
+ * It will help hardware optimize accesses.
*/
- fas = dpaa2_get_fas(buffer_start);
- memset(fas, 0, DPAA2_FAS_SIZE);
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@@ -478,8 +466,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
- DPAA2_FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
return 0;
}
@@ -494,8 +481,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* to be checked if we're on the confirmation path.
*/
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd,
- u32 *status)
+ const struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr;
@@ -506,11 +492,9 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
int num_sg, num_dma_bufs;
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
- struct dpaa2_fas *fas;
fd_addr = dpaa2_fd_get_addr(fd);
skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
- fas = dpaa2_get_fas(skbh);
if (fd_format == dpaa2_fd_single) {
skb = *skbh;
@@ -537,19 +521,10 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
} else {
- /* Unsupported format, mark it as errored and give up */
- if (status)
- *status = ~0;
+ netdev_dbg(priv->net_dev, "Invalid FD format\n");
return;
}
- /* Read the status from the Frame Annotation after we unmap the first
- * buffer but before we free it. The caller function is responsible
- * for checking the status value.
- */
- if (status)
- *status = le32_to_cpu(fas->status);
-
/* Free SGT buffer kmalloc'ed on tx */
if (fd_format != dpaa2_fd_single)
kfree(skbh);
@@ -566,19 +541,22 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
u16 queue_mapping;
+ unsigned int needed_headroom;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- if (unlikely(skb_headroom(skb) < dpaa2_eth_needed_headroom(priv))) {
+ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ if (skb_headroom(skb) < needed_headroom) {
struct sk_buff *ns;
- ns = skb_realloc_headroom(skb, dpaa2_eth_needed_headroom(priv));
+ ns = skb_realloc_headroom(skb, needed_headroom);
if (unlikely(!ns)) {
percpu_stats->tx_dropped++;
goto err_alloc_headroom;
}
+ percpu_extras->tx_reallocs++;
dev_kfree_skb(skb);
skb = ns;
}
@@ -612,13 +590,15 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Tracing point */
trace_dpaa2_tx_fd(net_dev, &fd);
- /* TxConf FQ selection primarily based on cpu affinity; this is
- * non-migratable context, so it's safe to call smp_processor_id().
+ /* TxConf FQ selection relies on queue id from the stack.
+ * In case of a forwarded frame from another DPNI interface, we choose
+ * a queue affined to the same core that processed the Rx frame
*/
- queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
+ queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, 0,
fq->tx_qdbin, &fd);
if (err != -EBUSY)
break;
@@ -627,7 +607,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, &fd, NULL);
+ free_tx_fd(priv, &fd);
} else {
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
@@ -646,13 +626,12 @@ err_alloc_headroom:
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused)
+ struct napi_struct *napi __always_unused,
+ u16 queue_id __always_unused)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
- u32 status = 0;
u32 fd_errors;
- bool has_fas_errors = false;
/* Tracing point */
trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
@@ -663,29 +642,18 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- if (unlikely(fd_errors)) {
- /* We only check error bits in the FAS field if corresponding
- * FAERR bit is set in FD and the FAS field is marked as valid
- */
- has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
- !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
- if (net_ratelimit())
- netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
- fd_errors);
- }
-
- free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
+ free_tx_fd(priv, fd);
if (likely(!fd_errors))
return;
+ if (net_ratelimit())
+ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
+ fd_errors);
+
percpu_stats = this_cpu_ptr(priv->percpu_stats);
/* Tx-conf logically pertains to the egress path. */
percpu_stats->tx_errors++;
-
- if (has_fas_errors && net_ratelimit())
- netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
- status & DPAA2_FAS_TX_ERR_MASK);
}
static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
@@ -752,7 +720,8 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
-static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+static int add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
@@ -786,7 +755,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
release_bufs:
/* In case the portal is busy, retry until successful */
- while ((err = dpaa2_io_service_release(NULL, bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
buf_array, i)) == -EBUSY)
cpu_relax();
@@ -827,7 +796,7 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = add_bufs(priv, bpid);
+ new_count = add_bufs(priv, priv->channel[j], bpid);
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -885,7 +854,7 @@ static int refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = add_bufs(priv, bpid);
+ new_count = add_bufs(priv, ch, bpid);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -906,7 +875,8 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
/* Retry while portal is busy */
do {
- err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
+ ch->store);
dequeues++;
cpu_relax();
} while (err == -EBUSY);
@@ -956,7 +926,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (cleaned < budget && napi_complete_done(napi, cleaned)) {
/* Re-enable data available notifications */
do {
- err = dpaa2_io_service_rearm(NULL, &ch->nctx);
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
cpu_relax();
} while (err == -EBUSY);
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
@@ -1564,7 +1534,8 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
nctx->desired_cpu = i;
/* Register the new context */
- err = dpaa2_io_service_register(NULL, nctx);
+ channel->dpio = dpaa2_io_service_select(i);
+ err = dpaa2_io_service_register(channel->dpio, nctx);
if (err) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
/* If no affine DPIO for this core, there's probably
@@ -1604,7 +1575,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
return 0;
err_set_cdan:
- dpaa2_io_service_deregister(NULL, nctx);
+ dpaa2_io_service_deregister(channel->dpio, nctx);
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
@@ -1627,7 +1598,7 @@ static void free_dpio(struct dpaa2_eth_priv *priv)
/* deregister CDAN notifications and free channels */
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- dpaa2_io_service_deregister(NULL, &ch->nctx);
+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
free_channel(priv, ch);
}
}
@@ -1653,9 +1624,10 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
static void set_fq_affinity(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
+ struct cpumask xps_mask;
struct dpaa2_eth_fq *fq;
int rx_cpu, txc_cpu;
- int i;
+ int i, err;
/* For each FQ, pick one channel/CPU to deliver frames to.
* This may well change at runtime, either through irqbalance or
@@ -1674,6 +1646,17 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
break;
case DPAA2_TX_CONF_FQ:
fq->target_cpu = txc_cpu;
+
+ /* Tell the stack to affine to txc_cpu the Tx queue
+ * associated with the confirmation one
+ */
+ cpumask_clear(&xps_mask);
+ cpumask_set_cpu(txc_cpu, &xps_mask);
+ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
+ fq->flowid);
+ if (err)
+ dev_err(dev, "Error setting XPS queue\n");
+
txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
if (txc_cpu >= nr_cpu_ids)
txc_cpu = cpumask_first(&priv->dpio_cpumask);
@@ -1791,10 +1774,8 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* tx buffer */
- buf_layout.pass_frame_status = true;
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
@@ -1803,7 +1784,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
}
/* tx-confirm buffer */
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ buf_layout.options = 0;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
@@ -1826,6 +1807,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
priv->tx_data_offset);
/* rx buffer */
+ buf_layout.pass_frame_status = true;
buf_layout.pass_parser_result = true;
buf_layout.data_align = priv->rx_buf_align;
buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
@@ -2273,7 +2255,6 @@ static int netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- u16 rx_headroom, req_headroom;
u8 bcast_addr[ETH_ALEN];
u8 num_queues;
int err;
@@ -2292,24 +2273,6 @@ static int netdev_init(struct net_device *net_dev)
return err;
}
- /* Reserve enough space to align buffer as per hardware requirement;
- * NOTE: priv->tx_data_offset MUST be initialized at this point.
- */
- net_dev->needed_headroom = dpaa2_eth_needed_headroom(priv);
-
- /* If headroom guaranteed by hardware in the Rx frame buffer is
- * smaller than the Tx headroom required by the stack, issue a
- * one time warning. This will most likely mean skbs forwarded to
- * another DPAA2 network interface will get reallocated, with a
- * significant performance impact.
- */
- req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
- rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE +
- dpaa2_eth_rx_head_room(priv), priv->rx_buf_align);
- if (req_headroom > rx_headroom)
- dev_info_once(dev, "Required headroom (%d) greater than available (%d)\n",
- req_headroom, rx_headroom);
-
/* Set MTU limits */
net_dev->min_mtu = 68;
net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index 5b3ab9f62d5e..e577410fdf4f 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -134,7 +134,6 @@ struct dpaa2_eth_swa {
DPAA2_FD_CTRL_FAERR)
/* Annotation bits in FD CTRL */
-#define DPAA2_FD_CTRL_ASAL 0x00010000 /* ASAL = 64 */
#define DPAA2_FD_CTRL_PTA 0x00800000
#define DPAA2_FD_CTRL_PTV1 0x00400000
@@ -153,10 +152,15 @@ struct dpaa2_fas {
#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
/* Accessors for the hardware annotation fields that we use */
-#define dpaa2_get_hwa(buf_addr) \
- ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
-#define dpaa2_get_fas(buf_addr) \
- (struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
+static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
+{
+ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
+}
+
+static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
/* Error and status bits in the frame annotation status word */
/* Debug frame, otherwise supposed to be discarded */
@@ -203,11 +207,6 @@ struct dpaa2_fas {
DPAA2_FAS_BLE | \
DPAA2_FAS_L3CE | \
DPAA2_FAS_L4CE)
-/* Tx errors */
-#define DPAA2_FAS_TX_ERR_MASK (DPAA2_FAS_KSE | \
- DPAA2_FAS_EOFHE | \
- DPAA2_FAS_MNLE | \
- DPAA2_FAS_TIDE)
/* Time in milliseconds between link state updates */
#define DPAA2_ETH_LINK_STATE_REFRESH 1000
@@ -226,6 +225,7 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_reallocs;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Enqueues retried due to portal busy */
@@ -276,7 +276,8 @@ struct dpaa2_eth_fq {
void (*consume)(struct dpaa2_eth_priv *,
struct dpaa2_eth_channel *,
const struct dpaa2_fd *,
- struct napi_struct *);
+ struct napi_struct *,
+ u16 queue_id);
struct dpaa2_eth_fq_stats stats;
};
@@ -287,6 +288,7 @@ struct dpaa2_eth_channel {
int ch_id;
int dpio_id;
struct napi_struct napi;
+ struct dpaa2_io *dpio;
struct dpaa2_io_store *store;
struct dpaa2_eth_priv *priv;
int buf_count;
@@ -364,9 +366,13 @@ static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
}
static inline
-unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv)
+unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb)
{
- return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD;
+ if (skb_is_nonlinear(skb))
+ return 0;
+
+ return DPAA2_ETH_SWA_SIZE;
}
/* Extra headroom space requested to hardware, in order to make sure there's
@@ -374,7 +380,8 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv)
*/
static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
{
- return dpaa2_eth_needed_headroom(priv) - DPAA2_ETH_RX_HWA_SIZE;
+ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
+ DPAA2_ETH_RX_HWA_SIZE;
}
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index ebe8fd6ccf2c..070a3f2a0523 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -62,6 +62,7 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx realloc frames",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] enqueue portal busy",
diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig
index 32df07b15e09..3002229bec1b 100644
--- a/drivers/staging/fsl-mc/Kconfig
+++ b/drivers/staging/fsl-mc/Kconfig
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
source "drivers/staging/fsl-mc/bus/Kconfig"
diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile
index 9c6a00128c65..14683889dabd 100644
--- a/drivers/staging/fsl-mc/Makefile
+++ b/drivers/staging/fsl-mc/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
# Freescale Management Complex (MC) bus drivers
obj-$(CONFIG_FSL_MC_BUS) += bus/
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
deleted file mode 100644
index 524eda1de04f..000000000000
--- a/drivers/staging/fsl-mc/README.txt
+++ /dev/null
@@ -1,386 +0,0 @@
-Copyright (C) 2015 Freescale Semiconductor Inc.
-
-DPAA2 (Data Path Acceleration Architecture Gen2)
-------------------------------------------------
-
-This document provides an overview of the Freescale DPAA2 architecture
-and how it is integrated into the Linux kernel.
-
-Contents summary
- -DPAA2 overview
- -Overview of DPAA2 objects
- -DPAA2 Linux driver architecture overview
- -bus driver
- -DPRC driver
- -allocator
- -DPIO driver
- -Ethernet
- -MAC
-
-DPAA2 Overview
---------------
-
-DPAA2 is a hardware architecture designed for high-speeed network
-packet processing. DPAA2 consists of sophisticated mechanisms for
-processing Ethernet packets, queue management, buffer management,
-autonomous L2 switching, virtual Ethernet bridging, and accelerator
-(e.g. crypto) sharing.
-
-A DPAA2 hardware component called the Management Complex (or MC) manages the
-DPAA2 hardware resources. The MC provides an object-based abstraction for
-software drivers to use the DPAA2 hardware.
-
-The MC uses DPAA2 hardware resources such as queues, buffer pools, and
-network ports to create functional objects/devices such as network
-interfaces, an L2 switch, or accelerator instances.
-
-The MC provides memory-mapped I/O command interfaces (MC portals)
-which DPAA2 software drivers use to operate on DPAA2 objects:
-
-The diagram below shows an overview of the DPAA2 resource management
-architecture:
-
- +--------------------------------------+
- | OS |
- | DPAA2 drivers |
- | | |
- +-----------------------------|--------+
- |
- | (create,discover,connect
- | config,use,destroy)
- |
- DPAA2 |
- +------------------------| mc portal |-+
- | | |
- | +- - - - - - - - - - - - -V- - -+ |
- | | | |
- | | Management Complex (MC) | |
- | | | |
- | +- - - - - - - - - - - - - - - -+ |
- | |
- | Hardware Hardware |
- | Resources Objects |
- | --------- ------- |
- | -queues -DPRC |
- | -buffer pools -DPMCP |
- | -Eth MACs/ports -DPIO |
- | -network interface -DPNI |
- | profiles -DPMAC |
- | -queue portals -DPBP |
- | -MC portals ... |
- | ... |
- | |
- +--------------------------------------+
-
-The MC mediates operations such as create, discover,
-connect, configuration, and destroy. Fast-path operations
-on data, such as packet transmit/receive, are not mediated by
-the MC and are done directly using memory mapped regions in
-DPIO objects.
-
-Overview of DPAA2 Objects
--------------------------
-The section provides a brief overview of some key DPAA2 objects.
-A simple scenario is described illustrating the objects involved
-in creating a network interfaces.
-
--DPRC (Datapath Resource Container)
-
- A DPRC is a container object that holds all the other
- types of DPAA2 objects. In the example diagram below there
- are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
- in the container.
-
- +---------------------------------------------------------+
- | DPRC |
- | |
- | +-------+ +-------+ +-------+ +-------+ +-------+ |
- | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
- | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
- | | DPMCP | | DPIO | |
- | +-------+ +-------+ |
- | | DPMCP | |
- | +-------+ |
- | |
- +---------------------------------------------------------+
-
- From the point of view of an OS, a DPRC behaves similar to a plug and
- play bus, like PCI. DPRC commands can be used to enumerate the contents
- of the DPRC, discover the hardware objects present (including mappable
- regions and interrupts).
-
- DPRC.1 (bus)
- |
- +--+--------+-------+-------+-------+
- | | | | |
- DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
- DPMCP.2 DPIO.2
- DPMCP.3
-
- Hardware objects can be created and destroyed dynamically, providing
- the ability to hot plug/unplug objects in and out of the DPRC.
-
- A DPRC has a mappable MMIO region (an MC portal) that can be used
- to send MC commands. It has an interrupt for status events (like
- hotplug).
-
- All objects in a container share the same hardware "isolation context".
- This means that with respect to an IOMMU the isolation granularity
- is at the DPRC (container) level, not at the individual object
- level.
-
- DPRCs can be defined statically and populated with objects
- via a config file passed to the MC when firmware starts
- it.
-
--DPAA2 Objects for an Ethernet Network Interface
-
- A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
- queuing mechanisms, configuration mechanisms, buffer management,
- physical ports, and interrupts. DPAA2 uses a more granular approach
- utilizing multiple hardware objects. Each object provides specialized
- functions. Groups of these objects are used by software to provide
- Ethernet network interface functionality. This approach provides
- efficient use of finite hardware resources, flexibility, and
- performance advantages.
-
- The diagram below shows the objects needed for a simple
- network interface configuration on a system with 2 CPUs.
-
- +---+---+ +---+---+
- CPU0 CPU1
- +---+---+ +---+---+
- | |
- +---+---+ +---+---+
- DPIO DPIO
- +---+---+ +---+---+
- \ /
- \ /
- \ /
- +---+---+
- DPNI --- DPBP,DPMCP
- +---+---+
- |
- |
- +---+---+
- DPMAC
- +---+---+
- |
- port/PHY
-
- Below the objects are described. For each object a brief description
- is provided along with a summary of the kinds of operations the object
- supports and a summary of key resources of the object (MMIO regions
- and IRQs).
-
- -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
- hardware device that connects to an Ethernet PHY and allows
- physical transmission and reception of Ethernet frames.
- -MMIO regions: none
- -IRQs: DPNI link change
- -commands: set link up/down, link config, get stats,
- IRQ config, enable, reset
-
- -DPNI (Datapath Network Interface): contains TX/RX queues,
- network interface configuration, and RX buffer pool configuration
- mechanisms. The TX/RX queues are in memory and are identified by
- queue number.
- -MMIO regions: none
- -IRQs: link state
- -commands: port config, offload config, queue config,
- parse/classify config, IRQ config, enable, reset
-
- -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
- packets and do hardware buffer pool management operations. The DPAA2
- architecture separates the mechanism to access queues (the DPIO object)
- from the queues themselves. The DPIO provides an MMIO interface to
- enqueue/dequeue packets. To enqueue something a descriptor is written
- to the DPIO MMIO region, which includes the target queue number.
- There will typically be one DPIO assigned to each CPU. This allows all
- CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
- expected to be shared by different DPAA2 drivers.
- -MMIO regions: queue operations, buffer management
- -IRQs: data availability, congestion notification, buffer
- pool depletion
- -commands: IRQ config, enable, reset
-
- -DPBP (Datapath Buffer Pool): represents a hardware buffer
- pool.
- -MMIO regions: none
- -IRQs: none
- -commands: enable, reset
-
- -DPMCP (Datapath MC Portal): provides an MC command portal.
- Used by drivers to send commands to the MC to manage
- objects.
- -MMIO regions: MC command portal
- -IRQs: command completion
- -commands: IRQ config, enable, reset
-
- Object Connections
- ------------------
- Some objects have explicit relationships that must
- be configured:
-
- -DPNI <--> DPMAC
- -DPNI <--> DPNI
- -DPNI <--> L2-switch-port
- A DPNI must be connected to something such as a DPMAC,
- another DPNI, or L2 switch port. The DPNI connection
- is made via a DPRC command.
-
- +-------+ +-------+
- | DPNI | | DPMAC |
- +---+---+ +---+---+
- | |
- +==========+
-
- -DPNI <--> DPBP
- A network interface requires a 'buffer pool' (DPBP
- object) which provides a list of pointers to memory
- where received Ethernet data is to be copied. The
- Ethernet driver configures the DPBPs associated with
- the network interface.
-
- Interrupts
- ----------
- All interrupts generated by DPAA2 objects are message
- interrupts. At the hardware level message interrupts
- generated by devices will normally have 3 components--
- 1) a non-spoofable 'device-id' expressed on the hardware
- bus, 2) an address, 3) a data value.
-
- In the case of DPAA2 devices/objects, all objects in the
- same container/DPRC share the same 'device-id'.
- For ARM-based SoC this is the same as the stream ID.
-
-
-DPAA2 Linux Driver Overview
----------------------------
-
-This section provides an overview of the Linux kernel drivers for
-DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
-drivers and 2) functional object drivers (such as Ethernet).
-
-As described previously, a DPRC is a container that holds the other
-types of DPAA2 objects. It is functionally similar to a plug-and-play
-bus controller.
-
-Each object in the DPRC is a Linux "device" and is bound to a driver.
-The diagram below shows the Linux drivers involved in a networking
-scenario and the objects bound to each driver. A brief description
-of each driver follows.
-
- +------------+
- | OS Network |
- | Stack |
- +------------+ +------------+
- | Allocator |. . . . . . . | Ethernet |
- |(DPMCP,DPBP)| | (DPNI) |
- +-.----------+ +---+---+----+
- . . ^ |
- . . <data avail, | |<enqueue,
- . . tx confirm> | | dequeue>
- +-------------+ . | |
- | DPRC driver | . +---+---V----+ +---------+
- | (DPRC) | . . . . . .| DPIO driver| | MAC |
- +----------+--+ | (DPIO) | | (DPMAC) |
- | +------+-----+ +-----+---+
- |<dev add/remove> | |
- | | |
- +----+--------------+ | +--+---+
- | MC-bus driver | | | PHY |
- | | | |driver|
- | /soc/fsl-mc | | +--+---+
- +-------------------+ | |
- | |
- ================================ HARDWARE =========|=================|======
- DPIO |
- | |
- DPNI---DPBP |
- | |
- DPMAC |
- | |
- PHY ---------------+
- ===================================================|========================
-
-A brief description of each driver is provided below.
-
- MC-bus driver
- -------------
- The MC-bus driver is a platform driver and is probed from a
- node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
- firmware. It is responsible for bootstrapping the DPAA2 kernel
- infrastructure.
- Key functions include:
- -registering a new bus type named "fsl-mc" with the kernel,
- and implementing bus call-backs (e.g. match/uevent/dev_groups)
- -implementing APIs for DPAA2 driver registration and for device
- add/remove
- -creates an MSI IRQ domain
- -doing a 'device add' to expose the 'root' DPRC, in turn triggering
- a bind of the root DPRC to the DPRC driver
- The binding for the MC-bus device-tree node can be consulted here:
- Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-
- DPRC driver
- -----------
- The DPRC driver is bound to DPRC objects and does runtime management
- of a bus instance. It performs the initial bus scan of the DPRC
- and handles interrupts for container events such as hot plug by
- re-scanning the DPRC.
-
- Allocator
- ----------
- Certain objects such as DPMCP and DPBP are generic and fungible,
- and are intended to be used by other drivers. For example,
- the DPAA2 Ethernet driver needs:
- -DPMCPs to send MC commands, to configure network interfaces
- -DPBPs for network buffer pools
-
- The allocator driver registers for these allocatable object types
- and those objects are bound to the allocator when the bus is probed.
- The allocator maintains a pool of objects that are available for
- allocation by other DPAA2 drivers.
-
- DPIO driver
- -----------
- The DPIO driver is bound to DPIO objects and provides services that allow
- other drivers such as the Ethernet driver to enqueue and dequeue data for
- their respective objects.
- Key services include:
- -data availability notifications
- -hardware queuing operations (enqueue and dequeue of data)
- -hardware buffer pool management
-
- To transmit a packet the Ethernet driver puts data on a queue and
- invokes a DPIO API. For receive, the Ethernet driver registers
- a data availability notification callback. To dequeue a packet
- a DPIO API is used.
-
- There is typically one DPIO object per physical CPU for optimum
- performance, allowing different CPUs to simultaneously enqueue
- and dequeue data.
-
- The DPIO driver operates on behalf of all DPAA2 drivers
- active in the kernel-- Ethernet, crypto, compression,
- etc.
-
- Ethernet
- --------
- The Ethernet driver is bound to a DPNI and implements the kernel
- interfaces needed to connect the DPAA2 network interface to
- the network stack.
-
- Each DPNI corresponds to a Linux network interface.
-
- MAC driver
- ----------
- An Ethernet PHY is an off-chip, board specific component and is managed
- by the appropriate PHY driver via an mdio bus. The MAC driver
- plays a role of being a proxy between the PHY driver and the
- MC. It does this proxy via the MC commands to a DPMAC object.
- If the PHY driver signals a link change, the MAC driver notifies
- the MC via a DPMAC command. If a network interface is brought
- up or down, the MC notifies the DPMAC driver via an interrupt and
- the driver can take appropriate action.
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 504c987447f2..1f9100049176 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -1,10 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# DPAA2 fsl-mc bus
#
# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
#
-# This file is released under the GPLv2
-#
config FSL_MC_BUS
bool "QorIQ DPAA2 fsl-mc bus driver"
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index 6df407edfade..29059db95ecc 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -1,10 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Freescale Management Complex (MC) bus drivers
#
# Copyright (C) 2014 Freescale Semiconductor, Inc.
#
-# This file is released under the GPLv2
-#
obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
mc-bus-driver-objs := fsl-mc-bus.o \
diff --git a/drivers/staging/fsl-mc/bus/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
index 5904836fd741..0b7f5c041f19 100644
--- a/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPBP_CMD_H
#define _FSL_DPBP_CMD_H
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 363730a80cbb..a4df84668d5b 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "../include/mc.h"
@@ -77,7 +51,7 @@ int dpbp_open(struct fsl_mc_io *mc_io,
return err;
}
-EXPORT_SYMBOL(dpbp_open);
+EXPORT_SYMBOL_GPL(dpbp_open);
/**
* dpbp_close() - Close the control session of the object
@@ -103,7 +77,7 @@ int dpbp_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpbp_close);
+EXPORT_SYMBOL_GPL(dpbp_close);
/**
* dpbp_enable() - Enable the DPBP.
@@ -126,7 +100,7 @@ int dpbp_enable(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpbp_enable);
+EXPORT_SYMBOL_GPL(dpbp_enable);
/**
* dpbp_disable() - Disable the DPBP.
@@ -149,7 +123,7 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpbp_disable);
+EXPORT_SYMBOL_GPL(dpbp_disable);
/**
* dpbp_is_enabled() - Check if the DPBP is enabled.
@@ -183,7 +157,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dpbp_is_enabled);
+EXPORT_SYMBOL_GPL(dpbp_is_enabled);
/**
* dpbp_reset() - Reset the DPBP, returns the object to initial state.
@@ -206,7 +180,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpbp_reset);
+EXPORT_SYMBOL_GPL(dpbp_reset);
/**
* dpbp_get_attributes - Retrieve DPBP attributes.
@@ -243,7 +217,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dpbp_get_attributes);
+EXPORT_SYMBOL_GPL(dpbp_get_attributes);
/**
* dpbp_get_api_version - Get Data Path Buffer Pool API version
@@ -276,4 +250,4 @@ int dpbp_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dpbp_get_api_version);
+EXPORT_SYMBOL_GPL(dpbp_get_api_version);
diff --git a/drivers/staging/fsl-mc/bus/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
index 2bb66988ecf6..27fa09877970 100644
--- a/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPCON_CMD_H
#define _FSL_DPCON_CMD_H
@@ -45,13 +19,11 @@
/* Command IDs */
#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
-#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
#define DPCON_CMDID_RESET DPCON_CMD(0x005)
-#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
@@ -61,10 +33,6 @@ struct dpcon_cmd_open {
#define DPCON_ENABLE 1
-struct dpcon_rsp_is_enabled {
- u8 enabled;
-};
-
struct dpcon_rsp_get_attr {
/* response word 0 */
__le32 id;
diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c
index ca1da85c6dda..8f84d7b5465c 100644
--- a/drivers/staging/fsl-mc/bus/dpcon.c
+++ b/drivers/staging/fsl-mc/bus/dpcon.c
@@ -1,33 +1,7 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "../include/mc.h"
@@ -78,7 +52,7 @@ int dpcon_open(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dpcon_open);
+EXPORT_SYMBOL_GPL(dpcon_open);
/**
* dpcon_close() - Close the control session of the object
@@ -105,7 +79,7 @@ int dpcon_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpcon_close);
+EXPORT_SYMBOL_GPL(dpcon_close);
/**
* dpcon_enable() - Enable the DPCON
@@ -129,7 +103,7 @@ int dpcon_enable(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpcon_enable);
+EXPORT_SYMBOL_GPL(dpcon_enable);
/**
* dpcon_disable() - Disable the DPCON
@@ -153,43 +127,7 @@ int dpcon_disable(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpcon_disable);
-
-/**
- * dpcon_is_enabled() - Check if the DPCON is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en)
-{
- struct mc_command cmd = { 0 };
- struct dpcon_rsp_is_enabled *dpcon_rsp;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
- cmd_flags,
- token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
- *en = dpcon_rsp->enabled & DPCON_ENABLE;
-
- return 0;
-}
-EXPORT_SYMBOL(dpcon_is_enabled);
+EXPORT_SYMBOL_GPL(dpcon_disable);
/**
* dpcon_reset() - Reset the DPCON, returns the object to initial state.
@@ -212,7 +150,7 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpcon_reset);
+EXPORT_SYMBOL_GPL(dpcon_reset);
/**
* dpcon_get_attributes() - Retrieve DPCON attributes.
@@ -250,7 +188,7 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dpcon_get_attributes);
+EXPORT_SYMBOL_GPL(dpcon_get_attributes);
/**
* dpcon_set_notification() - Set DPCON notification destination
@@ -281,37 +219,4 @@ int dpcon_set_notification(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dpcon_set_notification);
-
-/**
- * dpcon_get_api_version - Get Data Path Concentrator API version
- * @mc_io: Pointer to MC portal's DPCON object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of DPCON API
- * @minor_ver: Minor version of DPCON API
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
- cmd_flags, 0);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
- return 0;
-}
-EXPORT_SYMBOL(dpcon_get_api_version);
+EXPORT_SYMBOL_GPL(dpcon_set_notification);
diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile
index 837d3303e11d..53ba84d7b884 100644
--- a/drivers/staging/fsl-mc/bus/dpio/Makefile
+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# QorIQ DPAA2 DPIO driver
#
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
index b2dc6e766f09..ab8f82ee7ee5 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPIO_CMD_H
#define _FSL_DPIO_CMD_H
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
index e36da20a2796..b8479ef64c71 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
@@ -1,33 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright NXP 2016
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/types.h>
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
index a609ec82daf3..d3c8462d43e8 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -1,33 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/types.h>
#include "../../include/mc.h"
@@ -43,7 +18,6 @@
#include "qbman-portal.h"
struct dpaa2_io {
- atomic_t refs;
struct dpaa2_io_desc dpio_desc;
struct qbman_swp_desc swp_desc;
struct qbman_swp *swp;
@@ -105,6 +79,23 @@ static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
}
/**
+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
+ * @cpu: the cpu id
+ *
+ * Return the affine dpaa2_io service, or NULL if there is no service affined
+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
+ * service.
+ */
+struct dpaa2_io *dpaa2_io_service_select(int cpu)
+{
+ if (cpu == DPAA2_IO_ANY_CPU)
+ return service_select(NULL);
+
+ return service_select_by_cpu(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+
+/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
*
@@ -126,7 +117,6 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
return NULL;
}
- atomic_set(&obj->refs, 1);
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
@@ -158,7 +148,6 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
return obj;
}
-EXPORT_SYMBOL(dpaa2_io_create);
/**
* dpaa2_io_down() - release the dpaa2_io object.
@@ -171,11 +160,8 @@ EXPORT_SYMBOL(dpaa2_io_create);
*/
void dpaa2_io_down(struct dpaa2_io *d)
{
- if (!atomic_dec_and_test(&d->refs))
- return;
kfree(d);
}
-EXPORT_SYMBOL(dpaa2_io_down);
#define DPAA_POLL_MAX 32
@@ -222,7 +208,6 @@ done:
qbman_swp_interrupt_set_inhibit(swp, 0);
return IRQ_HANDLED;
}
-EXPORT_SYMBOL(dpaa2_io_irq);
/**
* dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
@@ -265,7 +250,7 @@ int dpaa2_io_service_register(struct dpaa2_io *d,
ctx->qman64);
return 0;
}
-EXPORT_SYMBOL(dpaa2_io_service_register);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
/**
* dpaa2_io_service_deregister - The opposite of 'register'.
@@ -288,7 +273,7 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
list_del(&ctx->node);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
}
-EXPORT_SYMBOL(dpaa2_io_service_deregister);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
/**
* dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
@@ -322,38 +307,7 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
return err;
}
-EXPORT_SYMBOL(dpaa2_io_service_rearm);
-
-/**
- * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
- * @d: the given DPIO service.
- * @fqid: the given frame queue id.
- * @s: the dpaa2_io_store object for the result.
- *
- * Return 0 for success, or error code for failure.
- */
-int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
- struct dpaa2_io_store *s)
-{
- struct qbman_pull_desc pd;
- int err;
-
- qbman_pull_desc_clear(&pd);
- qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
- qbman_pull_desc_set_numframes(&pd, (u8)s->max);
- qbman_pull_desc_set_fq(&pd, fqid);
-
- d = service_select(d);
- if (!d)
- return -ENODEV;
- s->swp = d->swp;
- err = qbman_swp_pull(d->swp, &pd);
- if (err)
- s->swp = NULL;
-
- return err;
-}
-EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
@@ -385,34 +339,7 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
return err;
}
-EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
-
-/**
- * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
- * @d: the given DPIO service.
- * @fqid: the given frame queue id.
- * @fd: the frame descriptor which is enqueued.
- *
- * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
- * or -ENODEV if there is no dpio service.
- */
-int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
- u32 fqid,
- const struct dpaa2_fd *fd)
-{
- struct qbman_eq_desc ed;
-
- d = service_select(d);
- if (!d)
- return -ENODEV;
-
- qbman_eq_desc_clear(&ed);
- qbman_eq_desc_set_no_orp(&ed, 0);
- qbman_eq_desc_set_fq(&ed, fqid);
-
- return qbman_swp_enqueue(d->swp, &ed, fd);
-}
-EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
@@ -441,7 +368,7 @@ int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
return qbman_swp_enqueue(d->swp, &ed, fd);
}
-EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
/**
* dpaa2_io_service_release() - Release buffers to a buffer pool.
@@ -468,7 +395,7 @@ int dpaa2_io_service_release(struct dpaa2_io *d,
return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
}
-EXPORT_SYMBOL(dpaa2_io_service_release);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
/**
* dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
@@ -499,7 +426,7 @@ int dpaa2_io_service_acquire(struct dpaa2_io *d,
return err;
}
-EXPORT_SYMBOL(dpaa2_io_service_acquire);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/*
* 'Stores' are reusable memory blocks for holding dequeue results, and to
@@ -553,7 +480,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
return ret;
}
-EXPORT_SYMBOL(dpaa2_io_store_create);
+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
/**
* dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
@@ -567,7 +494,7 @@ void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
kfree(s->alloced_addr);
kfree(s);
}
-EXPORT_SYMBOL(dpaa2_io_store_destroy);
+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
/**
* dpaa2_io_store_next() - Determine when the next dequeue result is available.
@@ -615,4 +542,4 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
return ret;
}
-EXPORT_SYMBOL(dpaa2_io_store_next);
+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c
index 00eb22186f42..20cdeae54a74 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
@@ -1,34 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "../../include/mc.h"
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.h b/drivers/staging/fsl-mc/bus/dpio/dpio.h
index ced1103d157c..49194c8e45f1 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio.h
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPIO_H
#define __FSL_DPIO_H
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
index 163bdac6b051..376e9ed0297a 100644
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
@@ -1,33 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/cacheflush.h>
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
index 842855971f34..fb8b9d35a3eb 100644
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
@@ -1,33 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_QBMAN_PORTAL_H
#define __FSL_QBMAN_PORTAL_H
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
deleted file mode 100644
index 861b2a708af8..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPMCP_CMD_H
-#define _FSL_DPMCP_CMD_H
-
-/* Minimal supported DPMCP Version */
-#define DPMCP_MIN_VER_MAJOR 3
-#define DPMCP_MIN_VER_MINOR 0
-
-/* Command versioning */
-#define DPMCP_CMD_BASE_VERSION 1
-#define DPMCP_CMD_ID_OFFSET 4
-
-#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
-
-/* Command IDs */
-#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
-#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
-
-#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
-
-struct dpmcp_cmd_open {
- __le32 dpmcp_id;
-};
-
-#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index eea42f61af86..be07c77520af 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,39 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "../include/mc.h"
-#include "dpmcp.h"
-#include "dpmcp-cmd.h"
+#include "fsl-mc-private.h"
/**
* dpmcp_open() - Open a control session for the specified object.
@@ -124,35 +97,3 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-
-/**
- * dpmcp_get_api_version - Get Data Path Management Command Portal API version
- * @mc_io: Pointer to Mc portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of Data Path Management Command Portal API
- * @minor_ver: Minor version of Data Path Management Command Portal API
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
- cmd_flags, 0);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
- return 0;
-}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
deleted file mode 100644
index f616031e3e59..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_DPMCP_H
-#define __FSL_DPMCP_H
-
-/*
- * Data Path Management Command Portal API
- * Contains initialization APIs and runtime control APIs for DPMCP
- */
-
-struct fsl_mc_io;
-
-int dpmcp_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dpmcp_id,
- u16 *token);
-
-int dpmcp_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver);
-
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-#endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
deleted file mode 100644
index d1f04ac18b78..000000000000
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * dpmng-cmd.h
- *
- * defines portal commands
- *
- */
-
-#ifndef __FSL_DPMNG_CMD_H
-#define __FSL_DPMNG_CMD_H
-
-/* Command versioning */
-#define DPMNG_CMD_BASE_VERSION 1
-#define DPMNG_CMD_ID_OFFSET 4
-
-#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
-
-/* Command IDs */
-#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
-
-struct dpmng_rsp_get_version {
- __le32 revision;
- __le32 version_major;
- __le32 version_minor;
-};
-
-#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
deleted file mode 100644
index d9b2dcde468e..000000000000
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * dprc-cmd.h
- *
- * defines dprc portal commands
- *
- */
-
-#ifndef _FSL_DPRC_CMD_H
-#define _FSL_DPRC_CMD_H
-
-/* Minimal supported DPRC Version */
-#define DPRC_MIN_VER_MAJOR 6
-#define DPRC_MIN_VER_MINOR 0
-
-/* Command versioning */
-#define DPRC_CMD_BASE_VERSION 1
-#define DPRC_CMD_ID_OFFSET 4
-
-#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
-
-/* Command IDs */
-#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
-#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
-
-#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
-
-#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
-#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
-#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
-#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
-#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
-#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
-#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
-#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
-
-#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
-#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
-#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
-#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
-#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
-#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
-#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
-
-struct dprc_cmd_open {
- __le32 container_id;
-};
-
-struct dprc_cmd_create_container {
- /* cmd word 0 */
- __le32 options;
- __le16 icid;
- __le16 pad0;
- /* cmd word 1 */
- __le32 pad1;
- __le32 portal_id;
- /* cmd words 2-3 */
- u8 label[16];
-};
-
-struct dprc_rsp_create_container {
- /* response word 0 */
- __le64 pad0;
- /* response word 1 */
- __le32 child_container_id;
- __le32 pad1;
- /* response word 2 */
- __le64 child_portal_addr;
-};
-
-struct dprc_cmd_destroy_container {
- __le32 child_container_id;
-};
-
-struct dprc_cmd_reset_container {
- __le32 child_container_id;
-};
-
-struct dprc_cmd_set_irq {
- /* cmd word 0 */
- __le32 irq_val;
- u8 irq_index;
- u8 pad[3];
- /* cmd word 1 */
- __le64 irq_addr;
- /* cmd word 2 */
- __le32 irq_num;
-};
-
-struct dprc_cmd_get_irq {
- __le32 pad;
- u8 irq_index;
-};
-
-struct dprc_rsp_get_irq {
- /* response word 0 */
- __le32 irq_val;
- __le32 pad;
- /* response word 1 */
- __le64 irq_addr;
- /* response word 2 */
- __le32 irq_num;
- __le32 type;
-};
-
-#define DPRC_ENABLE 0x1
-
-struct dprc_cmd_set_irq_enable {
- u8 enable;
- u8 pad[3];
- u8 irq_index;
-};
-
-struct dprc_cmd_get_irq_enable {
- __le32 pad;
- u8 irq_index;
-};
-
-struct dprc_rsp_get_irq_enable {
- u8 enabled;
-};
-
-struct dprc_cmd_set_irq_mask {
- __le32 mask;
- u8 irq_index;
-};
-
-struct dprc_cmd_get_irq_mask {
- __le32 pad;
- u8 irq_index;
-};
-
-struct dprc_rsp_get_irq_mask {
- __le32 mask;
-};
-
-struct dprc_cmd_get_irq_status {
- __le32 status;
- u8 irq_index;
-};
-
-struct dprc_rsp_get_irq_status {
- __le32 status;
-};
-
-struct dprc_cmd_clear_irq_status {
- __le32 status;
- u8 irq_index;
-};
-
-struct dprc_rsp_get_attributes {
- /* response word 0 */
- __le32 container_id;
- __le16 icid;
- __le16 pad;
- /* response word 1 */
- __le32 options;
- __le32 portal_id;
-};
-
-struct dprc_cmd_set_res_quota {
- /* cmd word 0 */
- __le32 child_container_id;
- __le16 quota;
- __le16 pad;
- /* cmd words 1-2 */
- u8 type[16];
-};
-
-struct dprc_cmd_get_res_quota {
- /* cmd word 0 */
- __le32 child_container_id;
- __le32 pad;
- /* cmd word 1-2 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_res_quota {
- __le32 pad;
- __le16 quota;
-};
-
-struct dprc_cmd_assign {
- /* cmd word 0 */
- __le32 container_id;
- __le32 options;
- /* cmd word 1 */
- __le32 num;
- __le32 id_base_align;
- /* cmd word 2-3 */
- u8 type[16];
-};
-
-struct dprc_cmd_unassign {
- /* cmd word 0 */
- __le32 child_container_id;
- __le32 options;
- /* cmd word 1 */
- __le32 num;
- __le32 id_base_align;
- /* cmd word 2-3 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_pool_count {
- __le32 pool_count;
-};
-
-struct dprc_cmd_get_pool {
- __le32 pool_index;
-};
-
-struct dprc_rsp_get_pool {
- /* response word 0 */
- __le64 pad;
- /* response word 1-2 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_obj_count {
- __le32 pad;
- __le32 obj_count;
-};
-
-struct dprc_cmd_get_obj {
- __le32 obj_index;
-};
-
-struct dprc_rsp_get_obj {
- /* response word 0 */
- __le32 pad0;
- __le32 id;
- /* response word 1 */
- __le16 vendor;
- u8 irq_count;
- u8 region_count;
- __le32 state;
- /* response word 2 */
- __le16 version_major;
- __le16 version_minor;
- __le16 flags;
- __le16 pad1;
- /* response word 3-4 */
- u8 type[16];
- /* response word 5-6 */
- u8 label[16];
-};
-
-struct dprc_cmd_get_obj_desc {
- /* cmd word 0 */
- __le32 obj_id;
- __le32 pad;
- /* cmd word 1-2 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_obj_desc {
- /* response word 0 */
- __le32 pad0;
- __le32 id;
- /* response word 1 */
- __le16 vendor;
- u8 irq_count;
- u8 region_count;
- __le32 state;
- /* response word 2 */
- __le16 version_major;
- __le16 version_minor;
- __le16 flags;
- __le16 pad1;
- /* response word 3-4 */
- u8 type[16];
- /* response word 5-6 */
- u8 label[16];
-};
-
-struct dprc_cmd_get_res_count {
- /* cmd word 0 */
- __le64 pad;
- /* cmd word 1-2 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_res_count {
- __le32 res_count;
-};
-
-struct dprc_cmd_get_res_ids {
- /* cmd word 0 */
- u8 pad0[5];
- u8 iter_status;
- __le16 pad1;
- /* cmd word 1 */
- __le32 base_id;
- __le32 last_id;
- /* cmd word 2-3 */
- u8 type[16];
-};
-
-struct dprc_rsp_get_res_ids {
- /* response word 0 */
- u8 pad0[5];
- u8 iter_status;
- __le16 pad1;
- /* response word 1 */
- __le32 base_id;
- __le32 last_id;
-};
-
-struct dprc_cmd_get_obj_region {
- /* cmd word 0 */
- __le32 obj_id;
- __le16 pad0;
- u8 region_index;
- u8 pad1;
- /* cmd word 1-2 */
- __le64 pad2[2];
- /* cmd word 3-4 */
- u8 obj_type[16];
-};
-
-struct dprc_rsp_get_obj_region {
- /* response word 0 */
- __le64 pad;
- /* response word 1 */
- __le64 base_addr;
- /* response word 2 */
- __le32 size;
-};
-
-struct dprc_cmd_set_obj_label {
- /* cmd word 0 */
- __le32 obj_id;
- __le32 pad;
- /* cmd word 1-2 */
- u8 label[16];
- /* cmd word 3-4 */
- u8 obj_type[16];
-};
-
-struct dprc_cmd_set_obj_irq {
- /* cmd word 0 */
- __le32 irq_val;
- u8 irq_index;
- u8 pad[3];
- /* cmd word 1 */
- __le64 irq_addr;
- /* cmd word 2 */
- __le32 irq_num;
- __le32 obj_id;
- /* cmd word 3-4 */
- u8 obj_type[16];
-};
-
-struct dprc_cmd_get_obj_irq {
- /* cmd word 0 */
- __le32 obj_id;
- u8 irq_index;
- u8 pad[3];
- /* cmd word 1-2 */
- u8 obj_type[16];
-};
-
-struct dprc_rsp_get_obj_irq {
- /* response word 0 */
- __le32 irq_val;
- __le32 pad;
- /* response word 1 */
- __le64 irq_addr;
- /* response word 2 */
- __le32 irq_num;
- __le32 type;
-};
-
-struct dprc_cmd_connect {
- /* cmd word 0 */
- __le32 ep1_id;
- __le32 ep1_interface_id;
- /* cmd word 1 */
- __le32 ep2_id;
- __le32 ep2_interface_id;
- /* cmd word 2-3 */
- u8 ep1_type[16];
- /* cmd word 4 */
- __le32 max_rate;
- __le32 committed_rate;
- /* cmd word 5-6 */
- u8 ep2_type[16];
-};
-
-struct dprc_cmd_disconnect {
- /* cmd word 0 */
- __le32 id;
- __le32 interface_id;
- /* cmd word 1-2 */
- u8 type[16];
-};
-
-struct dprc_cmd_get_connection {
- /* cmd word 0 */
- __le32 ep1_id;
- __le32 ep1_interface_id;
- /* cmd word 1-2 */
- u8 ep1_type[16];
-};
-
-struct dprc_rsp_get_connection {
- /* response word 0-2 */
- __le64 pad[3];
- /* response word 3 */
- __le32 ep2_id;
- __le32 ep2_interface_id;
- /* response word 4-5 */
- u8 ep2_type[16];
- /* response word 6 */
- __le32 state;
-};
-
-#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 06df528f4cfc..b09075731e62 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale data path resource container (DPRC) driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
@@ -15,7 +13,6 @@
#include <linux/msi.h>
#include "../include/mc.h"
-#include "dprc-cmd.h"
#include "fsl-mc-private.h"
#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
@@ -39,8 +36,6 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
struct fsl_mc_child_objs *objs;
struct fsl_mc_device *mc_dev;
- WARN_ON(!dev);
- WARN_ON(!data);
mc_dev = to_fsl_mc_device(dev);
objs = data;
@@ -60,8 +55,6 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
static int __fsl_mc_device_remove(struct device *dev, void *data)
{
- WARN_ON(!dev);
- WARN_ON(data);
fsl_mc_device_remove(to_fsl_mc_device(dev));
return 0;
}
@@ -206,7 +199,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* dprc_scan_objects - Discover objects in a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+ * @total_irq_count: If argument is provided the function populates the
+ * total number of IRQs created by objects in the DPRC.
*
* Detects objects added and removed from a DPRC and synchronizes the
* state of the Linux bus driver, MC by adding and removing
@@ -228,6 +222,7 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
int error;
unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
error = dprc_get_obj_count(mc_bus_dev->mc_io,
0,
@@ -297,7 +292,26 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
}
}
- *total_irq_count = irq_count;
+ /*
+ * Allocate IRQ's before binding the scanned devices with their
+ * respective drivers.
+ */
+ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ error = fsl_mc_populate_irq_pool(mc_bus,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
+
+ if (total_irq_count)
+ *total_irq_count = irq_count;
+
dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
@@ -322,7 +336,6 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
{
int error;
- unsigned int irq_count;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
fsl_mc_init_all_resource_pools(mc_bus_dev);
@@ -331,29 +344,14 @@ static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
* Discover objects in the DPRC:
*/
mutex_lock(&mc_bus->scan_mutex);
- error = dprc_scan_objects(mc_bus_dev, &irq_count);
+ error = dprc_scan_objects(mc_bus_dev, NULL);
mutex_unlock(&mc_bus->scan_mutex);
- if (error < 0)
- goto error;
-
- if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
- dev_warn(&mc_bus_dev->dev,
- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- }
-
- error = fsl_mc_populate_irq_pool(
- mc_bus,
- FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- if (error < 0)
- goto error;
+ if (error < 0) {
+ fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
+ return error;
}
return 0;
-error:
- fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
- return error;
}
/**
@@ -386,11 +384,11 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
irq_num, smp_processor_id());
- if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
return IRQ_HANDLED;
mutex_lock(&mc_bus->scan_mutex);
- if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
+ if (!msi_desc || msi_desc->irq != (u32)irq_num)
goto out;
status = 0;
@@ -453,8 +451,6 @@ static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
int error;
struct fsl_mc_io *mc_io = mc_dev->mc_io;
- WARN_ON(mc_dev->obj_desc.irq_count != 1);
-
/*
* Disable generation of interrupt, while we configure it:
*/
@@ -496,8 +492,6 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
int error;
struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
- WARN_ON(mc_dev->obj_desc.irq_count != 1);
-
/*
* NOTE: devm_request_threaded_irq() invokes the device-specific
* function that programs the MSI physically in the device
@@ -601,20 +595,20 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
bool msi_domain_set = false;
u16 major_ver, minor_ver;
- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
- if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
+ if (dev_get_msi_domain(&mc_dev->dev))
return -EINVAL;
if (!mc_dev->mc_io) {
/*
* This is a child DPRC:
*/
- if (WARN_ON(!dev_is_fsl_mc(parent_dev)))
+ if (!dev_is_fsl_mc(parent_dev))
return -EINVAL;
- if (WARN_ON(mc_dev->obj_desc.region_count == 0))
+ if (mc_dev->obj_desc.region_count == 0)
return -EINVAL;
region_size = resource_size(mc_dev->regions);
@@ -642,7 +636,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
*/
struct irq_domain *mc_msi_domain;
- if (WARN_ON(dev_is_fsl_mc(parent_dev)))
+ if (dev_is_fsl_mc(parent_dev))
return -EINVAL;
error = fsl_mc_find_msi_domain(parent_dev,
@@ -753,12 +747,12 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
int error;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+ if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
- if (WARN_ON(!mc_dev->mc_io))
+ if (!mc_dev->mc_io)
return -EINVAL;
- if (WARN_ON(!mc_bus->irq_resources))
+ if (!mc_bus->irq_resources)
return -EINVAL;
if (dev_get_msi_domain(&mc_dev->dev))
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 6f6c65a42166..97f51726fa7e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,39 +1,11 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include "../include/mc.h"
-#include "dprc.h"
-
-#include "dprc-cmd.h"
+#include "fsl-mc-private.h"
/**
* dprc_open() - Open DPRC object for use
@@ -71,7 +43,7 @@ int dprc_open(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dprc_open);
+EXPORT_SYMBOL_GPL(dprc_open);
/**
* dprc_close() - Close the control session of the object
@@ -97,53 +69,7 @@ int dprc_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dprc_close);
-
-/**
- * dprc_get_irq() - Get IRQ information from the DPRC.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- struct dprc_cmd_get_irq *cmd_params;
- struct dprc_rsp_get_irq *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
- cmd_flags,
- token);
- cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
- *type = le32_to_cpu(rsp_params->type);
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(dprc_close);
/**
* dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
@@ -179,45 +105,6 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
}
/**
- * dprc_get_irq_enable() - Get overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en)
-{
- struct mc_command cmd = { 0 };
- struct dprc_cmd_get_irq_enable *cmd_params;
- struct dprc_rsp_get_irq_enable *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
- cmd_flags, token);
- cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
- *en = rsp_params->enabled & DPRC_ENABLE;
-
- return 0;
-}
-
-/**
* dprc_set_irq_enable() - Set overall interrupt state.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -253,48 +140,6 @@ int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
}
/**
- * dprc_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @irq_index: The interrupt index to configure
- * @mask: Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask)
-{
- struct mc_command cmd = { 0 };
- struct dprc_cmd_get_irq_mask *cmd_params;
- struct dprc_rsp_get_irq_mask *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
- cmd_flags, token);
- cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
- *mask = le32_to_cpu(rsp_params->mask);
-
- return 0;
-}
-
-/**
* dprc_set_irq_mask() - Set interrupt mask.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -475,7 +320,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dprc_get_obj_count);
+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
/**
* dprc_get_obj() - Get general information on an object
@@ -531,7 +376,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
obj_desc->label[15] = '\0';
return 0;
}
-EXPORT_SYMBOL(dprc_get_obj);
+EXPORT_SYMBOL_GPL(dprc_get_obj);
/**
* dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
@@ -572,104 +417,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-EXPORT_SYMBOL(dprc_set_obj_irq);
-
-/**
- * dprc_get_obj_irq() - Get IRQ information from object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @obj_type: Type od the object to get its IRQ
- * @obj_id: ID of the object to get its IRQ
- * @irq_index: The interrupt index to configure
- * @type: Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @irq_cfg: The returned IRQ attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- struct dprc_cmd_get_obj_irq *cmd_params;
- struct dprc_rsp_get_obj_irq *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
- cmd_flags,
- token);
- cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
- cmd_params->obj_id = cpu_to_le32(obj_id);
- cmd_params->irq_index = irq_index;
- strncpy(cmd_params->obj_type, obj_type, 16);
- cmd_params->obj_type[15] = '\0';
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
- *type = le32_to_cpu(rsp_params->type);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_obj_irq);
-
-/**
- * dprc_get_res_count() - Obtains the number of free resources that are assigned
- * to this container, by pool type
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRC object
- * @type: pool type
- * @res_count: Returned number of free resources of the given
- * resource type that are assigned to this DPRC
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count)
-{
- struct mc_command cmd = { 0 };
- struct dprc_cmd_get_res_count *cmd_params;
- struct dprc_rsp_get_res_count *rsp_params;
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
- cmd_flags, token);
- cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
- strncpy(cmd_params->type, type, 16);
- cmd_params->type[15] = '\0';
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
- *res_count = le32_to_cpu(rsp_params->res_count);
-
- return 0;
-}
-EXPORT_SYMBOL(dprc_get_res_count);
+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
/**
* dprc_get_obj_region() - Get region information for a specified object.
@@ -717,7 +465,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
return 0;
}
-EXPORT_SYMBOL(dprc_get_obj_region);
+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
/**
* dprc_get_api_version - Get Data Path Resource Container API version
diff --git a/drivers/staging/fsl-mc/bus/dprc.h b/drivers/staging/fsl-mc/bus/dprc.h
deleted file mode 100644
index 21295e4feb04..000000000000
--- a/drivers/staging/fsl-mc/bus/dprc.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPRC_H
-#define _FSL_DPRC_H
-
-/*
- * Data Path Resource Container API
- * Contains DPRC API for managing and querying DPAA resources
- */
-
-struct fsl_mc_io;
-struct fsl_mc_obj_desc;
-
-int dprc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int container_id,
- u16 *token);
-
-int dprc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/* IRQ */
-
-/* IRQ index */
-#define DPRC_IRQ_INDEX 0
-
-/* Number of dprc's IRQs */
-#define DPRC_NUM_OF_IRQS 1
-
-/* DPRC IRQ events */
-
-/* IRQ event - Indicates that a new object added to the container */
-#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
-/* IRQ event - Indicates that an object was removed from the container */
-#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
-/* IRQ event - Indicates that resources added to the container */
-#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
-/* IRQ event - Indicates that resources removed from the container */
-#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
-/*
- * IRQ event - Indicates that one of the descendant containers that opened by
- * this container is destroyed
- */
-#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-
-/*
- * IRQ event - Indicates that on one of the container's opened object is
- * destroyed
- */
-#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
-
-/* Irq event - Indicates that object is created at the container */
-#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
-
-/**
- * struct dprc_irq_cfg - IRQ configuration
- * @paddr: Address that must be written to signal a message-based interrupt
- * @val: Value to write into irq_addr address
- * @irq_num: A user defined number associated with this IRQ
- */
-struct dprc_irq_cfg {
- phys_addr_t paddr;
- u32 val;
- int irq_num;
-};
-
-int dprc_set_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
-
-/**
- * struct dprc_attributes - Container attributes
- * @container_id: Container's ID
- * @icid: Container's ICID
- * @portal_id: Container's portal ID
- * @options: Container's options as set at container's creation
- */
-struct dprc_attributes {
- int container_id;
- u16 icid;
- int portal_id;
- u64 options;
-};
-
-int dprc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprc_attributes *attributes);
-
-int dprc_get_obj_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *obj_count);
-
-int dprc_get_obj(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int obj_index,
- struct fsl_mc_obj_desc *obj_desc);
-
-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- struct fsl_mc_obj_desc *obj_desc);
-
-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 irq_index,
- int *type,
- struct dprc_irq_cfg *irq_cfg);
-
-int dprc_get_res_count(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *type,
- int *res_count);
-
-/**
- * enum dprc_iter_status - Iteration status
- * @DPRC_ITER_STATUS_FIRST: Perform first iteration
- * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
- * @DPRC_ITER_STATUS_LAST: Indicates last iteration
- */
-enum dprc_iter_status {
- DPRC_ITER_STATUS_FIRST = 0,
- DPRC_ITER_STATUS_MORE = 1,
- DPRC_ITER_STATUS_LAST = 2
-};
-
-/* Region flags */
-/* Cacheable - Indicates that region should be mapped as cacheable */
-#define DPRC_REGION_CACHEABLE 0x00000001
-
-/**
- * enum dprc_region_type - Region type
- * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
- * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
- */
-enum dprc_region_type {
- DPRC_REGION_TYPE_MC_PORTAL,
- DPRC_REGION_TYPE_QBMAN_PORTAL
-};
-
-/**
- * struct dprc_region_desc - Mappable region descriptor
- * @base_offset: Region offset from region's base address.
- * For DPMCP and DPRC objects, region base is offset from SoC MC portals
- * base address; For DPIO, region base is offset from SoC QMan portals
- * base address
- * @size: Region size (in bytes)
- * @flags: Region attributes
- * @type: Portal region type
- */
-struct dprc_region_desc {
- u32 base_offset;
- u32 size;
- u32 flags;
- enum dprc_region_type type;
-};
-
-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- char *obj_type,
- int obj_id,
- u8 region_index,
- struct dprc_region_desc *region_desc);
-
-int dprc_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver);
-
-int dprc_get_container_id(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int *container_id);
-
-#endif /* _FSL_DPRC_H */
-
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index 8ea3920400a0..8f313a41240b 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fsl-mc object allocator driver
*
* Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
@@ -14,11 +12,11 @@
#include "fsl-mc-private.h"
-static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
{
- return strcmp(obj_type, "dpbp") == 0 ||
- strcmp(obj_type, "dpmcp") == 0 ||
- strcmp(obj_type, "dpcon") == 0;
+ return is_fsl_mc_bus_dpbp(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev) ||
+ is_fsl_mc_bus_dpcon(mc_dev);
}
/**
@@ -41,25 +39,25 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
goto out;
- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
+ if (!fsl_mc_is_allocatable(mc_dev))
goto out;
- if (WARN_ON(mc_dev->resource))
+ if (mc_dev->resource)
goto out;
res_pool = &mc_bus->resource_pools[pool_type];
- if (WARN_ON(res_pool->type != pool_type))
+ if (res_pool->type != pool_type)
goto out;
- if (WARN_ON(res_pool->mc_bus != mc_bus))
+ if (res_pool->mc_bus != mc_bus)
goto out;
mutex_lock(&res_pool->mutex);
- if (WARN_ON(res_pool->max_count < 0))
+ if (res_pool->max_count < 0)
goto out_unlock;
- if (WARN_ON(res_pool->free_count < 0 ||
- res_pool->free_count > res_pool->max_count))
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count)
goto out_unlock;
resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
@@ -105,25 +103,25 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
struct fsl_mc_resource *resource;
int error = -EINVAL;
- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
+ if (!fsl_mc_is_allocatable(mc_dev))
goto out;
resource = mc_dev->resource;
- if (WARN_ON(!resource || resource->data != mc_dev))
+ if (!resource || resource->data != mc_dev)
goto out;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
mc_bus = to_fsl_mc_bus(mc_bus_dev);
res_pool = resource->parent_pool;
- if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type]))
+ if (res_pool != &mc_bus->resource_pools[resource->type])
goto out;
mutex_lock(&res_pool->mutex);
- if (WARN_ON(res_pool->max_count <= 0))
+ if (res_pool->max_count <= 0)
goto out_unlock;
- if (WARN_ON(res_pool->free_count <= 0 ||
- res_pool->free_count > res_pool->max_count))
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
goto out_unlock;
/*
@@ -187,11 +185,11 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
FSL_MC_NUM_POOL_TYPES);
*new_resource = NULL;
- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
goto out;
res_pool = &mc_bus->resource_pools[pool_type];
- if (WARN_ON(res_pool->mc_bus != mc_bus))
+ if (res_pool->mc_bus != mc_bus)
goto out;
mutex_lock(&res_pool->mutex);
@@ -199,7 +197,6 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
struct fsl_mc_resource, node);
if (!resource) {
- WARN_ON(res_pool->free_count != 0);
error = -ENXIO;
dev_err(&mc_bus_dev->dev,
"No more resources of type %s left\n",
@@ -207,12 +204,12 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
goto out_unlock;
}
- if (WARN_ON(resource->type != pool_type))
+ if (resource->type != pool_type)
goto out_unlock;
- if (WARN_ON(resource->parent_pool != res_pool))
+ if (resource->parent_pool != res_pool)
goto out_unlock;
- if (WARN_ON(res_pool->free_count <= 0 ||
- res_pool->free_count > res_pool->max_count))
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
goto out_unlock;
list_del_init(&resource->node);
@@ -232,15 +229,15 @@ void fsl_mc_resource_free(struct fsl_mc_resource *resource)
struct fsl_mc_resource_pool *res_pool;
res_pool = resource->parent_pool;
- if (WARN_ON(resource->type != res_pool->type))
+ if (resource->type != res_pool->type)
return;
mutex_lock(&res_pool->mutex);
- if (WARN_ON(res_pool->free_count < 0 ||
- res_pool->free_count >= res_pool->max_count))
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count)
goto out_unlock;
- if (WARN_ON(!list_empty(&resource->node)))
+ if (!list_empty(&resource->node))
goto out_unlock;
list_add_tail(&resource->node, &res_pool->free_list);
@@ -279,13 +276,13 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
struct fsl_mc_resource *resource = NULL;
*new_mc_adev = NULL;
- if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC))
+ if (mc_dev->flags & FSL_MC_IS_DPRC)
goto error;
- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
goto error;
- if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP))
+ if (pool_type == FSL_MC_POOL_DPMCP)
goto error;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -295,7 +292,7 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
goto error;
mc_adev = resource->data;
- if (WARN_ON(!mc_adev))
+ if (!mc_adev)
goto error;
*new_mc_adev = mc_adev;
@@ -318,9 +315,9 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
struct fsl_mc_resource *resource;
resource = mc_adev->resource;
- if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP))
+ if (resource->type == FSL_MC_POOL_DPMCP)
return;
- if (WARN_ON(resource->data != mc_adev))
+ if (resource->data != mc_adev)
return;
fsl_mc_resource_free(resource);
@@ -349,8 +346,8 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
- if (WARN_ON(irq_count == 0 ||
- irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
+ if (irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
return -EINVAL;
error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
@@ -406,13 +403,13 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
- if (WARN_ON(!mc_bus->irq_resources))
+ if (!mc_bus->irq_resources)
return;
- if (WARN_ON(res_pool->max_count == 0))
+ if (res_pool->max_count == 0)
return;
- if (WARN_ON(res_pool->free_count != res_pool->max_count))
+ if (res_pool->free_count != res_pool->max_count)
return;
INIT_LIST_HEAD(&res_pool->free_list);
@@ -436,19 +433,19 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
struct fsl_mc_bus *mc_bus;
struct fsl_mc_resource_pool *res_pool;
- if (WARN_ON(mc_dev->irqs))
+ if (mc_dev->irqs)
return -EINVAL;
irq_count = mc_dev->obj_desc.irq_count;
- if (WARN_ON(irq_count == 0))
+ if (irq_count == 0)
return -EINVAL;
- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+ if (is_fsl_mc_bus_dprc(mc_dev))
mc_bus = to_fsl_mc_bus(mc_dev);
else
mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
- if (WARN_ON(!mc_bus->irq_resources))
+ if (!mc_bus->irq_resources)
return -EINVAL;
res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
@@ -474,7 +471,6 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
irqs[i] = to_fsl_mc_irq(resource);
res_allocated_count++;
- WARN_ON(irqs[i]->mc_dev);
irqs[i]->mc_dev = mc_dev;
irqs[i]->dev_irq_index = i;
}
@@ -502,21 +498,20 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
struct fsl_mc_bus *mc_bus;
struct fsl_mc_device_irq **irqs = mc_dev->irqs;
- if (WARN_ON(!irqs))
+ if (!irqs)
return;
irq_count = mc_dev->obj_desc.irq_count;
- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+ if (is_fsl_mc_bus_dprc(mc_dev))
mc_bus = to_fsl_mc_bus(mc_dev);
else
mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
- if (WARN_ON(!mc_bus->irq_resources))
+ if (!mc_bus->irq_resources)
return;
for (i = 0; i < irq_count; i++) {
- WARN_ON(!irqs[i]->mc_dev);
irqs[i]->mc_dev = NULL;
fsl_mc_resource_free(&irqs[i]->resource);
}
@@ -553,17 +548,10 @@ static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
&mc_bus->resource_pools[pool_type];
int free_count = 0;
- WARN_ON(res_pool->type != pool_type);
- WARN_ON(res_pool->free_count != res_pool->max_count);
-
list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
free_count++;
- WARN_ON(resource->type != res_pool->type);
- WARN_ON(resource->parent_pool != res_pool);
devm_kfree(&mc_bus_dev->dev, resource);
}
-
- WARN_ON(free_count != res_pool->free_count);
}
void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
@@ -585,11 +573,11 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
struct fsl_mc_bus *mc_bus;
int error;
- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
+ if (!fsl_mc_is_allocatable(mc_dev))
return -EINVAL;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- if (WARN_ON(!dev_is_fsl_mc(&mc_bus_dev->dev)))
+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
return -EINVAL;
mc_bus = to_fsl_mc_bus(mc_bus_dev);
@@ -614,7 +602,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
{
int error;
- if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
+ if (!fsl_mc_is_allocatable(mc_dev))
return -EINVAL;
if (mc_dev->resource) {
@@ -658,8 +646,3 @@ int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-
-void fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 409f2b9e70ff..1b333c43aae9 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale Management Complex (MC) bus driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) "fsl-mc: " fmt
@@ -22,8 +20,6 @@
#include <linux/dma-mapping.h>
#include "fsl-mc-private.h"
-#include "dprc-cmd.h"
-#include "dpmng-cmd.h"
/**
* Default DMA mask for devices on a fsl-mc bus
@@ -156,22 +152,80 @@ struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+struct device_type fsl_mc_bus_dprc_type = {
+ .name = "fsl_mc_bus_dprc"
+};
+
+struct device_type fsl_mc_bus_dpni_type = {
+ .name = "fsl_mc_bus_dpni"
+};
+
+struct device_type fsl_mc_bus_dpio_type = {
+ .name = "fsl_mc_bus_dpio"
+};
+
+struct device_type fsl_mc_bus_dpsw_type = {
+ .name = "fsl_mc_bus_dpsw"
+};
+
+struct device_type fsl_mc_bus_dpbp_type = {
+ .name = "fsl_mc_bus_dpbp"
+};
+
+struct device_type fsl_mc_bus_dpcon_type = {
+ .name = "fsl_mc_bus_dpcon"
+};
+
+struct device_type fsl_mc_bus_dpmcp_type = {
+ .name = "fsl_mc_bus_dpmcp"
+};
+
+struct device_type fsl_mc_bus_dpmac_type = {
+ .name = "fsl_mc_bus_dpmac"
+};
+
+struct device_type fsl_mc_bus_dprtc_type = {
+ .name = "fsl_mc_bus_dprtc"
+};
+
+static struct device_type *fsl_mc_get_device_type(const char *type)
+{
+ static const struct {
+ struct device_type *dev_type;
+ const char *type;
+ } dev_types[] = {
+ { &fsl_mc_bus_dprc_type, "dprc" },
+ { &fsl_mc_bus_dpni_type, "dpni" },
+ { &fsl_mc_bus_dpio_type, "dpio" },
+ { &fsl_mc_bus_dpsw_type, "dpsw" },
+ { &fsl_mc_bus_dpbp_type, "dpbp" },
+ { &fsl_mc_bus_dpcon_type, "dpcon" },
+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
+ { &fsl_mc_bus_dpmac_type, "dpmac" },
+ { &fsl_mc_bus_dprtc_type, "dprtc" },
+ { NULL, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_types[i].dev_type; i++)
+ if (!strcmp(dev_types[i].type, type))
+ return dev_types[i].dev_type;
+
+ return NULL;
+}
+
static int fsl_mc_driver_probe(struct device *dev)
{
struct fsl_mc_driver *mc_drv;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
int error;
- if (WARN_ON(!dev->driver))
- return -EINVAL;
-
mc_drv = to_fsl_mc_driver(dev->driver);
- if (WARN_ON(!mc_drv->probe))
- return -EINVAL;
error = mc_drv->probe(mc_dev);
if (error < 0) {
- dev_err(dev, "%s failed: %d\n", __func__, error);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "%s failed: %d\n", __func__, error);
return error;
}
@@ -184,9 +238,6 @@ static int fsl_mc_driver_remove(struct device *dev)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
int error;
- if (WARN_ON(!dev->driver))
- return -EINVAL;
-
error = mc_drv->remove(mc_dev);
if (error < 0) {
dev_err(dev, "%s failed: %d\n", __func__, error);
@@ -292,9 +343,9 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
static void fsl_mc_get_root_dprc(struct device *dev,
struct device **root_dprc_dev)
{
- if (WARN_ON(!dev)) {
+ if (!dev) {
*root_dprc_dev = NULL;
- } else if (WARN_ON(!dev_is_fsl_mc(dev))) {
+ } else if (!dev_is_fsl_mc(dev)) {
*root_dprc_dev = NULL;
} else {
*root_dprc_dev = dev;
@@ -352,8 +403,6 @@ static int translate_mc_addr(struct fsl_mc_device *mc_dev,
struct fsl_mc *mc;
fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
- if (WARN_ON(!root_dprc_dev))
- return -EINVAL;
mc = dev_get_drvdata(root_dprc_dev->parent);
if (mc->num_translation_ranges == 0) {
@@ -390,10 +439,10 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
struct device *parent_dev = mc_dev->dev.parent;
enum dprc_region_type mc_region_type;
- if (strcmp(obj_desc->type, "dprc") == 0 ||
- strcmp(obj_desc->type, "dpmcp") == 0) {
+ if (is_fsl_mc_bus_dprc(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev)) {
mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
- } else if (strcmp(obj_desc->type, "dpio") == 0) {
+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
} else {
/*
@@ -401,7 +450,6 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
* type, as this object type is not supposed to have MMIO
* regions
*/
- WARN_ON(true);
return -EINVAL;
}
@@ -424,7 +472,6 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
goto error_cleanup_regions;
}
- WARN_ON(region_desc.size == 0);
error = translate_mc_addr(mc_dev, mc_region_type,
region_desc.base_offset,
&regions[i].start);
@@ -470,7 +517,7 @@ static void fsl_mc_device_release(struct device *dev)
kfree(mc_dev->regions);
- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+ if (is_fsl_mc_bus_dprc(mc_dev))
kfree(to_fsl_mc_bus(mc_dev));
else
kfree(mc_dev);
@@ -518,6 +565,12 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
mc_dev->dev.parent = parent_dev;
mc_dev->dev.bus = &fsl_mc_bus_type;
mc_dev->dev.release = fsl_mc_device_release;
+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
+ if (!mc_dev->dev.type) {
+ error = -ENODEV;
+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
+ goto error_cleanup_dev;
+ }
dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
if (strcmp(obj_desc->type, "dprc") == 0) {
@@ -544,7 +597,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
/*
* device being added is the root DPRC device
*/
- if (WARN_ON(!mc_io)) {
+ if (!mc_io) {
error = -EINVAL;
goto error_cleanup_dev;
}
@@ -826,7 +879,7 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
{
struct fsl_mc *mc = platform_get_drvdata(pdev);
- if (WARN_ON(!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)))
+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
return -EINVAL;
fsl_mc_device_remove(mc->root_mc_bus_dev);
@@ -878,15 +931,8 @@ static int __init fsl_mc_bus_driver_init(void)
if (error < 0)
goto error_cleanup_dprc_driver;
- error = its_fsl_mc_msi_init();
- if (error < 0)
- goto error_cleanup_mc_allocator;
-
return 0;
-error_cleanup_mc_allocator:
- fsl_mc_allocator_driver_exit();
-
error_cleanup_dprc_driver:
dprc_driver_exit();
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index f74a6f1764bb..971ad87c584c 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale Management Complex (MC) bus driver MSI support
*
* Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/of_device.h>
@@ -47,7 +45,7 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
{
struct msi_domain_ops *ops = info->ops;
- if (WARN_ON(!ops))
+ if (!ops)
return;
/*
@@ -73,7 +71,7 @@ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
return;
- if (WARN_ON(!owner_mc_dev))
+ if (!owner_mc_dev)
return;
irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
@@ -124,7 +122,6 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
struct fsl_mc_device_irq *mc_dev_irq =
&mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
- WARN_ON(mc_dev_irq->msi_desc != msi_desc);
msi_desc->msg = *msg;
/*
@@ -137,7 +134,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
- if (WARN_ON(!chip))
+ if (!chip)
return;
/*
@@ -239,7 +236,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
struct irq_domain *msi_domain;
int error;
- if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
+ if (!list_empty(dev_to_msi_list(dev)))
return -EINVAL;
error = fsl_mc_msi_alloc_descs(dev, irq_count);
@@ -247,7 +244,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
return error;
msi_domain = dev_get_msi_domain(dev);
- if (WARN_ON(!msi_domain)) {
+ if (!msi_domain) {
error = -EINVAL;
goto cleanup_msi_descs;
}
@@ -275,12 +272,12 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev)
struct irq_domain *msi_domain;
msi_domain = dev_get_msi_domain(dev);
- if (WARN_ON(!msi_domain))
+ if (!msi_domain)
return;
msi_domain_free_irqs(msi_domain, dev);
- if (WARN_ON(list_empty(dev_to_msi_list(dev))))
+ if (list_empty(dev_to_msi_list(dev)))
return;
fsl_mc_msi_free_descs(dev);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index 62d398947605..83b89d6241f2 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -1,19 +1,384 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Freescale Management Complex (MC) bus private declarations
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _FSL_MC_PRIVATE_H_
#define _FSL_MC_PRIVATE_H_
#include "../include/mc.h"
-#include "dprc.h"
#include <linux/mutex.h>
+/*
+ * Data Path Management Complex (DPMNG) General API
+ */
+
+/* DPMNG command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* DPMNG command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
+/*
+ * Data Path Management Command Portal (DPMCP) API
+ */
+
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* DPMCP command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
+/* DPMCP command IDs */
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+/*
+ * Initialization and runtime control APIs for DPMCP
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token);
+
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le16 icid;
+ __le16 pad;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1 */
+ __le64 base_addr;
+ /* response word 2 */
+ __le32 size;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u16 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+/* Region flags */
+/* Cacheable - Indicates that region should be mapped as cacheable */
+#define DPRC_REGION_CACHEABLE 0x00000001
+
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
/**
* Maximum number of total IRQs that can be pre-allocated for an MC bus'
* IRQ pool
@@ -73,8 +438,6 @@ void dprc_driver_exit(void);
int __init fsl_mc_allocator_driver_init(void);
-void fsl_mc_allocator_driver_exit(void);
-
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
@@ -91,10 +454,6 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
void fsl_mc_msi_domain_free_irqs(struct device *dev);
-int __init its_fsl_mc_msi_init(void);
-
-void its_fsl_mc_msi_cleanup(void);
-
int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
struct irq_domain **mc_msi_domain);
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 123e4af58408..5064d5ddf581 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale Management Complex (MC) bus driver MSI support
*
* Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/of_device.h>
@@ -15,7 +13,7 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include "fsl-mc-private.h"
+#include "../include/mc.h"
static struct irq_chip its_msi_irq_chip = {
.name = "ITS-fMSI",
@@ -32,11 +30,11 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
struct fsl_mc_device *mc_bus_dev;
struct msi_domain_info *msi_info;
- if (WARN_ON(!dev_is_fsl_mc(dev)))
+ if (!dev_is_fsl_mc(dev))
return -EINVAL;
mc_bus_dev = to_fsl_mc_device(dev);
- if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC)))
+ if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC))
return -EINVAL;
/*
@@ -67,7 +65,7 @@ static const struct of_device_id its_device_id[] = {
{},
};
-int __init its_fsl_mc_msi_init(void)
+static int __init its_fsl_mc_msi_init(void)
{
struct device_node *np;
struct irq_domain *parent;
@@ -93,30 +91,10 @@ int __init its_fsl_mc_msi_init(void)
continue;
}
- WARN_ON(mc_msi_domain->host_data !=
- &its_fsl_mc_msi_domain_info);
-
pr_info("fsl-mc MSI: %pOF domain created\n", np);
}
return 0;
}
-void its_fsl_mc_msi_cleanup(void)
-{
- struct device_node *np;
-
- for (np = of_find_matching_node(NULL, its_device_id); np;
- np = of_find_matching_node(np, its_device_id)) {
- struct irq_domain *mc_msi_domain = irq_find_matching_host(
- np,
- DOMAIN_BUS_FSL_MC_MSI);
-
- if (!of_property_read_bool(np, "msi-controller"))
- continue;
-
- if (mc_msi_domain &&
- mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
- irq_domain_remove(mc_msi_domain);
- }
-}
+early_initcall(its_fsl_mc_msi_init);
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index f65c23ce83f1..7e6fb360ef12 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -1,54 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/io.h>
#include "../include/mc.h"
#include "fsl-mc-private.h"
-#include "dpmcp.h"
-#include "dpmcp-cmd.h"
static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
struct fsl_mc_device *dpmcp_dev)
{
int error;
- if (WARN_ON(!dpmcp_dev))
- return -EINVAL;
-
- if (WARN_ON(mc_io->dpmcp_dev))
+ if (mc_io->dpmcp_dev)
return -EINVAL;
- if (WARN_ON(dpmcp_dev->mc_io))
+ if (dpmcp_dev->mc_io)
return -EINVAL;
error = dpmcp_open(mc_io,
@@ -68,12 +37,6 @@ static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
int error;
struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
- if (WARN_ON(!dpmcp_dev))
- return;
-
- if (WARN_ON(dpmcp_dev->mc_io != mc_io))
- return;
-
error = dpmcp_close(mc_io,
0,
dpmcp_dev->mc_handle);
@@ -210,7 +173,7 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (mc_dev->flags & FSL_MC_IS_DPRC) {
mc_bus_dev = mc_dev;
} else {
- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
return error;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -224,8 +187,6 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
error = -EINVAL;
dpmcp_dev = resource->data;
- if (WARN_ON(!dpmcp_dev))
- goto error_cleanup_resource;
if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
(dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
@@ -238,15 +199,9 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
goto error_cleanup_resource;
}
- if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
- goto error_cleanup_resource;
-
mc_portal_phys_addr = dpmcp_dev->regions[0].start;
mc_portal_size = resource_size(dpmcp_dev->regions);
- if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
- goto error_cleanup_resource;
-
error = fsl_create_mc_io(&mc_bus_dev->dev,
mc_portal_phys_addr,
mc_portal_size, dpmcp_dev,
@@ -279,14 +234,12 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
* to have a DPMCP object associated with.
*/
dpmcp_dev = mc_io->dpmcp_dev;
- if (WARN_ON(!dpmcp_dev))
- return;
resource = dpmcp_dev->resource;
- if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP))
+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
return;
- if (WARN_ON(resource->data != dpmcp_dev))
+ if (resource->data != dpmcp_dev)
return;
fsl_destroy_mc_io(mc_io);
@@ -304,9 +257,6 @@ int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
int error;
struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
- if (WARN_ON(!dpmcp_dev))
- return -EINVAL;
-
error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
if (error < 0) {
dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 7ce105bd3977..f09d75d9a976 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -1,35 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* I/O services to send MC commands to the MC hardware
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/delay.h>
@@ -40,7 +14,7 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include "../include/mc.h"
-#include "dpmcp.h"
+#include "fsl-mc-private.h"
/**
* Timeout in milliseconds to wait for the completion of an MC command
@@ -85,7 +59,7 @@ static int mc_status_to_error(enum mc_cmd_status status)
[MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
};
- if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map)))
+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
return -EINVAL;
return mc_status_to_error_map[status];
@@ -273,8 +247,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
enum mc_cmd_status status;
unsigned long irq_flags = 0;
- if (WARN_ON(in_irq() &&
- !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))
+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
@@ -320,4 +293,4 @@ common_exit:
return error;
}
-EXPORT_SYMBOL(mc_send_command);
+EXPORT_SYMBOL_GPL(mc_send_command);
diff --git a/drivers/staging/fsl-mc/include/dpaa2-fd.h b/drivers/staging/fsl-mc/include/dpaa2-fd.h
index cf7857f00a5c..3e022001f0b1 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-fd.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
@@ -1,33 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPAA2_FD_H
#define __FSL_DPAA2_FD_H
diff --git a/drivers/staging/fsl-mc/include/dpaa2-global.h b/drivers/staging/fsl-mc/include/dpaa2-global.h
index 0326447fde4e..9bc0713346a8 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-global.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
@@ -1,33 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPAA2_GLOBAL_H
#define __FSL_DPAA2_GLOBAL_H
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
index afc2d060d077..9cb1eec87a9c 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-io.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
@@ -1,33 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPAA2_IO_H
#define __FSL_DPAA2_IO_H
@@ -88,6 +63,8 @@ void dpaa2_io_down(struct dpaa2_io *d);
irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
+struct dpaa2_io *dpaa2_io_service_select(int cpu);
+
/**
* struct dpaa2_io_notification_ctx - The DPIO notification context structure
* @cb: The callback to be invoked when the notification arrives
@@ -120,13 +97,9 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
int dpaa2_io_service_rearm(struct dpaa2_io *service,
struct dpaa2_io_notification_ctx *ctx);
-int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
- struct dpaa2_io_store *s);
int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
struct dpaa2_io_store *s);
-int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
- const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index e9e04ccea82b..4a1809604319 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPBP_H
#define __FSL_DPBP_H
diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h
index efa23906b364..062e90ad929b 100644
--- a/drivers/staging/fsl-mc/include/dpcon.h
+++ b/drivers/staging/fsl-mc/include/dpcon.h
@@ -1,33 +1,7 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPCON_H
#define __FSL_DPCON_H
@@ -62,11 +36,6 @@ int dpcon_disable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en);
-
int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
@@ -107,9 +76,4 @@ int dpcon_set_notification(struct fsl_mc_io *mc_io,
u16 token,
struct dpcon_notification_cfg *cfg);
-int dpcon_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver);
-
#endif /* __FSL_DPCON_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index aafe63a21f49..765ba41f5987 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Freescale Management Complex (MC) bus public interface
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _FSL_MC_H_
#define _FSL_MC_H_
@@ -325,7 +323,7 @@ static inline void mc_cmd_read_api_version(struct mc_command *cmd,
struct fsl_mc_io {
struct device *dev;
u16 flags;
- u16 portal_size;
+ u32 portal_size;
phys_addr_t portal_phys_addr;
void __iomem *portal_virt_addr;
struct fsl_mc_device *dpmcp_dev;
@@ -398,4 +396,59 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
extern struct bus_type fsl_mc_bus_type;
+extern struct device_type fsl_mc_bus_dprc_type;
+extern struct device_type fsl_mc_bus_dpni_type;
+extern struct device_type fsl_mc_bus_dpio_type;
+extern struct device_type fsl_mc_bus_dpsw_type;
+extern struct device_type fsl_mc_bus_dpbp_type;
+extern struct device_type fsl_mc_bus_dpcon_type;
+extern struct device_type fsl_mc_bus_dpmcp_type;
+extern struct device_type fsl_mc_bus_dpmac_type;
+extern struct device_type fsl_mc_bus_dprtc_type;
+
+static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
+}
+
+static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
+}
+
+static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
+}
+
+static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
+}
+
+static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
+}
+
#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/fsl-mc/overview.rst b/drivers/staging/fsl-mc/overview.rst
new file mode 100644
index 000000000000..79fede4447d6
--- /dev/null
+++ b/drivers/staging/fsl-mc/overview.rst
@@ -0,0 +1,404 @@
+.. include:: <isonum.txt>
+
+DPAA2 (Data Path Acceleration Architecture Gen2) Overview
+=========================================================
+
+:Copyright: |copy| 2015 Freescale Semiconductor Inc.
+:Copyright: |copy| 2018 NXP
+
+This document provides an overview of the Freescale DPAA2 architecture
+and how it is integrated into the Linux kernel.
+
+Introduction
+============
+
+DPAA2 is a hardware architecture designed for high-speeed network
+packet processing. DPAA2 consists of sophisticated mechanisms for
+processing Ethernet packets, queue management, buffer management,
+autonomous L2 switching, virtual Ethernet bridging, and accelerator
+(e.g. crypto) sharing.
+
+A DPAA2 hardware component called the Management Complex (or MC) manages the
+DPAA2 hardware resources. The MC provides an object-based abstraction for
+software drivers to use the DPAA2 hardware.
+The MC uses DPAA2 hardware resources such as queues, buffer pools, and
+network ports to create functional objects/devices such as network
+interfaces, an L2 switch, or accelerator instances.
+The MC provides memory-mapped I/O command interfaces (MC portals)
+which DPAA2 software drivers use to operate on DPAA2 objects.
+
+The diagram below shows an overview of the DPAA2 resource management
+architecture::
+
+ +--------------------------------------+
+ | OS |
+ | DPAA2 drivers |
+ | | |
+ +-----------------------------|--------+
+ |
+ | (create,discover,connect
+ | config,use,destroy)
+ |
+ DPAA2 |
+ +------------------------| mc portal |-+
+ | | |
+ | +- - - - - - - - - - - - -V- - -+ |
+ | | | |
+ | | Management Complex (MC) | |
+ | | | |
+ | +- - - - - - - - - - - - - - - -+ |
+ | |
+ | Hardware Hardware |
+ | Resources Objects |
+ | --------- ------- |
+ | -queues -DPRC |
+ | -buffer pools -DPMCP |
+ | -Eth MACs/ports -DPIO |
+ | -network interface -DPNI |
+ | profiles -DPMAC |
+ | -queue portals -DPBP |
+ | -MC portals ... |
+ | ... |
+ | |
+ +--------------------------------------+
+
+
+The MC mediates operations such as create, discover,
+connect, configuration, and destroy. Fast-path operations
+on data, such as packet transmit/receive, are not mediated by
+the MC and are done directly using memory mapped regions in
+DPIO objects.
+
+Overview of DPAA2 Objects
+=========================
+
+The section provides a brief overview of some key DPAA2 objects.
+A simple scenario is described illustrating the objects involved
+in creating a network interfaces.
+
+DPRC (Datapath Resource Container)
+----------------------------------
+
+A DPRC is a container object that holds all the other
+types of DPAA2 objects. In the example diagram below there
+are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
+in the container.
+
+::
+
+ +---------------------------------------------------------+
+ | DPRC |
+ | |
+ | +-------+ +-------+ +-------+ +-------+ +-------+ |
+ | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
+ | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
+ | | DPMCP | | DPIO | |
+ | +-------+ +-------+ |
+ | | DPMCP | |
+ | +-------+ |
+ | |
+ +---------------------------------------------------------+
+
+From the point of view of an OS, a DPRC behaves similar to a plug and
+play bus, like PCI. DPRC commands can be used to enumerate the contents
+of the DPRC, discover the hardware objects present (including mappable
+regions and interrupts).
+
+::
+
+ DPRC.1 (bus)
+ |
+ +--+--------+-------+-------+-------+
+ | | | | |
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
+ DPMCP.2 DPIO.2
+ DPMCP.3
+
+Hardware objects can be created and destroyed dynamically, providing
+the ability to hot plug/unplug objects in and out of the DPRC.
+
+A DPRC has a mappable MMIO region (an MC portal) that can be used
+to send MC commands. It has an interrupt for status events (like
+hotplug).
+All objects in a container share the same hardware "isolation context".
+This means that with respect to an IOMMU the isolation granularity
+is at the DPRC (container) level, not at the individual object
+level.
+
+DPRCs can be defined statically and populated with objects
+via a config file passed to the MC when firmware starts it.
+
+DPAA2 Objects for an Ethernet Network Interface
+-----------------------------------------------
+
+A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
+queuing mechanisms, configuration mechanisms, buffer management,
+physical ports, and interrupts. DPAA2 uses a more granular approach
+utilizing multiple hardware objects. Each object provides specialized
+functions. Groups of these objects are used by software to provide
+Ethernet network interface functionality. This approach provides
+efficient use of finite hardware resources, flexibility, and
+performance advantages.
+
+The diagram below shows the objects needed for a simple
+network interface configuration on a system with 2 CPUs.
+
+::
+
+ +---+---+ +---+---+
+ CPU0 CPU1
+ +---+---+ +---+---+
+ | |
+ +---+---+ +---+---+
+ DPIO DPIO
+ +---+---+ +---+---+
+ \ /
+ \ /
+ \ /
+ +---+---+
+ DPNI --- DPBP,DPMCP
+ +---+---+
+ |
+ |
+ +---+---+
+ DPMAC
+ +---+---+
+ |
+ port/PHY
+
+Below the objects are described. For each object a brief description
+is provided along with a summary of the kinds of operations the object
+supports and a summary of key resources of the object (MMIO regions
+and IRQs).
+
+DPMAC (Datapath Ethernet MAC)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Represents an Ethernet MAC, a hardware device that connects to an Ethernet
+PHY and allows physical transmission and reception of Ethernet frames.
+
+- MMIO regions: none
+- IRQs: DPNI link change
+- commands: set link up/down, link config, get stats,
+ IRQ config, enable, reset
+
+DPNI (Datapath Network Interface)
+Contains TX/RX queues, network interface configuration, and RX buffer pool
+configuration mechanisms. The TX/RX queues are in memory and are identified
+by queue number.
+
+- MMIO regions: none
+- IRQs: link state
+- commands: port config, offload config, queue config,
+ parse/classify config, IRQ config, enable, reset
+
+DPIO (Datapath I/O)
+~~~~~~~~~~~~~~~~~~~
+Provides interfaces to enqueue and dequeue
+packets and do hardware buffer pool management operations. The DPAA2
+architecture separates the mechanism to access queues (the DPIO object)
+from the queues themselves. The DPIO provides an MMIO interface to
+enqueue/dequeue packets. To enqueue something a descriptor is written
+to the DPIO MMIO region, which includes the target queue number.
+There will typically be one DPIO assigned to each CPU. This allows all
+CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
+expected to be shared by different DPAA2 drivers.
+
+- MMIO regions: queue operations, buffer management
+- IRQs: data availability, congestion notification, buffer
+ pool depletion
+- commands: IRQ config, enable, reset
+
+DPBP (Datapath Buffer Pool)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Represents a hardware buffer pool.
+
+- MMIO regions: none
+- IRQs: none
+- commands: enable, reset
+
+DPMCP (Datapath MC Portal)
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Provides an MC command portal.
+Used by drivers to send commands to the MC to manage
+objects.
+
+- MMIO regions: MC command portal
+- IRQs: command completion
+- commands: IRQ config, enable, reset
+
+Object Connections
+==================
+Some objects have explicit relationships that must
+be configured:
+
+- DPNI <--> DPMAC
+- DPNI <--> DPNI
+- DPNI <--> L2-switch-port
+
+ A DPNI must be connected to something such as a DPMAC,
+ another DPNI, or L2 switch port. The DPNI connection
+ is made via a DPRC command.
+
+::
+
+ +-------+ +-------+
+ | DPNI | | DPMAC |
+ +---+---+ +---+---+
+ | |
+ +==========+
+
+- DPNI <--> DPBP
+
+ A network interface requires a 'buffer pool' (DPBP
+ object) which provides a list of pointers to memory
+ where received Ethernet data is to be copied. The
+ Ethernet driver configures the DPBPs associated with
+ the network interface.
+
+Interrupts
+==========
+All interrupts generated by DPAA2 objects are message
+interrupts. At the hardware level message interrupts
+generated by devices will normally have 3 components--
+1) a non-spoofable 'device-id' expressed on the hardware
+bus, 2) an address, 3) a data value.
+
+In the case of DPAA2 devices/objects, all objects in the
+same container/DPRC share the same 'device-id'.
+For ARM-based SoC this is the same as the stream ID.
+
+
+DPAA2 Linux Drivers Overview
+============================
+
+This section provides an overview of the Linux kernel drivers for
+DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
+drivers and 2) functional object drivers (such as Ethernet).
+
+As described previously, a DPRC is a container that holds the other
+types of DPAA2 objects. It is functionally similar to a plug-and-play
+bus controller.
+Each object in the DPRC is a Linux "device" and is bound to a driver.
+The diagram below shows the Linux drivers involved in a networking
+scenario and the objects bound to each driver. A brief description
+of each driver follows.
+
+::
+
+ +------------+
+ | OS Network |
+ | Stack |
+ +------------+ +------------+
+ | Allocator |. . . . . . . | Ethernet |
+ |(DPMCP,DPBP)| | (DPNI) |
+ +-.----------+ +---+---+----+
+ . . ^ |
+ . . <data avail, | | <enqueue,
+ . . tx confirm> | | dequeue>
+ +-------------+ . | |
+ | DPRC driver | . +---+---V----+ +---------+
+ | (DPRC) | . . . . . .| DPIO driver| | MAC |
+ +----------+--+ | (DPIO) | | (DPMAC) |
+ | +------+-----+ +-----+---+
+ |<dev add/remove> | |
+ | | |
+ +--------+----------+ | +--+---+
+ | MC-bus driver | | | PHY |
+ | | | |driver|
+ | /bus/fsl-mc | | +--+---+
+ +-------------------+ | |
+ | |
+ ========================= HARDWARE =========|=================|======
+ DPIO |
+ | |
+ DPNI---DPBP |
+ | |
+ DPMAC |
+ | |
+ PHY ---------------+
+ ============================================|========================
+
+A brief description of each driver is provided below.
+
+MC-bus driver
+-------------
+The MC-bus driver is a platform driver and is probed from a
+node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
+firmware. It is responsible for bootstrapping the DPAA2 kernel
+infrastructure.
+Key functions include:
+
+- registering a new bus type named "fsl-mc" with the kernel,
+ and implementing bus call-backs (e.g. match/uevent/dev_groups)
+- implementing APIs for DPAA2 driver registration and for device
+ add/remove
+- creates an MSI IRQ domain
+- doing a 'device add' to expose the 'root' DPRC, in turn triggering
+ a bind of the root DPRC to the DPRC driver
+
+The binding for the MC-bus device-tree node can be consulted at
+*Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt*.
+The sysfs bind/unbind interfaces for the MC-bus can be consulted at
+*Documentation/ABI/testing/sysfs-bus-fsl-mc*.
+
+DPRC driver
+-----------
+The DPRC driver is bound to DPRC objects and does runtime management
+of a bus instance. It performs the initial bus scan of the DPRC
+and handles interrupts for container events such as hot plug by
+re-scanning the DPRC.
+
+Allocator
+---------
+Certain objects such as DPMCP and DPBP are generic and fungible,
+and are intended to be used by other drivers. For example,
+the DPAA2 Ethernet driver needs:
+
+- DPMCPs to send MC commands, to configure network interfaces
+- DPBPs for network buffer pools
+
+The allocator driver registers for these allocatable object types
+and those objects are bound to the allocator when the bus is probed.
+The allocator maintains a pool of objects that are available for
+allocation by other DPAA2 drivers.
+
+DPIO driver
+-----------
+The DPIO driver is bound to DPIO objects and provides services that allow
+other drivers such as the Ethernet driver to enqueue and dequeue data for
+their respective objects.
+Key services include:
+
+- data availability notifications
+- hardware queuing operations (enqueue and dequeue of data)
+- hardware buffer pool management
+
+To transmit a packet the Ethernet driver puts data on a queue and
+invokes a DPIO API. For receive, the Ethernet driver registers
+a data availability notification callback. To dequeue a packet
+a DPIO API is used.
+There is typically one DPIO object per physical CPU for optimum
+performance, allowing different CPUs to simultaneously enqueue
+and dequeue data.
+
+The DPIO driver operates on behalf of all DPAA2 drivers
+active in the kernel-- Ethernet, crypto, compression,
+etc.
+
+Ethernet driver
+---------------
+The Ethernet driver is bound to a DPNI and implements the kernel
+interfaces needed to connect the DPAA2 network interface to
+the network stack.
+Each DPNI corresponds to a Linux network interface.
+
+MAC driver
+----------
+An Ethernet PHY is an off-chip, board specific component and is managed
+by the appropriate PHY driver via an mdio bus. The MAC driver
+plays a role of being a proxy between the PHY driver and the
+MC. It does this proxy via the MC commands to a DPMAC object.
+If the PHY driver signals a link change, the MAC driver notifies
+the MC via a DPMAC command. If a network interface is brought
+up or down, the MC notifies the DPMAC driver via an interrupt and
+the driver can take appropriate action.
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 8b23a553fd4a..5dcbab6fd622 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* DMA-able FIFO implementation
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
index 37a91c6a1709..c46a06336975 100644
--- a/drivers/staging/fwserial/dma_fifo.h
+++ b/drivers/staging/fwserial/dma_fifo.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* DMA-able FIFO interface
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DMA_FIFO_H_
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index bba7e9c888b3..1993b03a6f2d 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* FireWire Serial driver
*
* Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index b0c66112eb18..cc8d6fc831b4 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -72,14 +72,14 @@ static int coldboot_seq(struct platform_device *pdev)
int ret;
if (apb->init_disabled ||
- apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
return 0;
/* Hold APB in reset state */
assert_reset(apb->resetn_gpio);
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
+ gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* Enable power to APB */
@@ -122,7 +122,7 @@ static int fw_flashing_seq(struct platform_device *pdev)
int ret;
if (apb->init_disabled ||
- apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
return 0;
ret = regulator_enable(apb->vcore);
@@ -146,7 +146,7 @@ static int fw_flashing_seq(struct platform_device *pdev)
flags = GPIOF_OUT_INIT_LOW;
ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
- flags, "apb_spi_en");
+ flags, "apb_spi_en");
if (ret) {
dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
apb->spi_en_gpio);
@@ -174,11 +174,11 @@ static int standby_boot_seq(struct platform_device *pdev)
* then we do not want to change the state
*/
if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
- apb->state == ARCHE_PLATFORM_STATE_OFF)
+ apb->state == ARCHE_PLATFORM_STATE_OFF)
return 0;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
+ gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/*
@@ -203,7 +203,7 @@ static void poweroff_seq(struct platform_device *pdev)
return;
if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
+ gpio_is_valid(apb->spi_en_gpio))
devm_gpio_free(dev, apb->spi_en_gpio);
/* disable the clock */
@@ -251,7 +251,8 @@ void apb_ctrl_poweroff(struct device *dev)
}
static ssize_t state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
@@ -297,7 +298,7 @@ static ssize_t state_store(struct device *dev,
}
static ssize_t state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
@@ -319,7 +320,7 @@ static ssize_t state_show(struct device *dev,
static DEVICE_ATTR_RW(state);
static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
- struct arche_apb_ctrl_drvdata *apb)
+ struct arche_apb_ctrl_drvdata *apb)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -331,10 +332,10 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
return apb->resetn_gpio;
}
ret = devm_gpio_request_one(dev, apb->resetn_gpio,
- GPIOF_OUT_INIT_LOW, "apb-reset");
+ GPIOF_OUT_INIT_LOW, "apb-reset");
if (ret) {
dev_err(dev, "Failed requesting reset gpio %d\n",
- apb->resetn_gpio);
+ apb->resetn_gpio);
return ret;
}
@@ -344,10 +345,10 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
return apb->boot_ret_gpio;
}
ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
- GPIOF_OUT_INIT_LOW, "boot retention");
+ GPIOF_OUT_INIT_LOW, "boot retention");
if (ret) {
dev_err(dev, "Failed requesting bootret gpio %d\n",
- apb->boot_ret_gpio);
+ apb->boot_ret_gpio);
return ret;
}
@@ -358,10 +359,10 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
return apb->pwroff_gpio;
}
ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
- GPIOF_IN, "pwroff_n");
+ GPIOF_IN, "pwroff_n");
if (ret) {
dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
- apb->pwroff_gpio);
+ apb->pwroff_gpio);
return ret;
}
@@ -371,10 +372,10 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
dev_warn(dev, "failed to get clock en gpio\n");
} else if (gpio_is_valid(apb->clk_en_gpio)) {
ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
- GPIOF_OUT_INIT_LOW, "apb_clk_en");
+ GPIOF_OUT_INIT_LOW, "apb_clk_en");
if (ret) {
dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
- apb->clk_en_gpio);
+ apb->clk_en_gpio);
return ret;
}
}
@@ -407,7 +408,7 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
if (apb->spi_en_gpio >= 0) {
if (of_property_read_bool(pdev->dev.of_node,
- "spi-en-active-high"))
+ "spi-en-active-high"))
apb->spi_en_polarity_high = true;
}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index ace4eb365c0e..83254a72a7bb 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -233,7 +233,7 @@ arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
- ret);
+ ret);
return ret;
}
@@ -269,7 +269,7 @@ arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
if (ret) {
dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
- ret);
+ ret);
return ret;
}
@@ -312,7 +312,8 @@ arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
}
static ssize_t state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
@@ -376,7 +377,7 @@ exit:
}
static ssize_t state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
@@ -443,7 +444,7 @@ static int arche_platform_probe(struct platform_device *pdev)
/* setup svc reset gpio */
arche_pdata->is_reset_act_hi = of_property_read_bool(np,
- "svc,reset-active-high");
+ "svc,reset-active-high");
arche_pdata->svc_reset_gpio = of_get_named_gpio(np,
"svc,reset-gpio",
0);
@@ -457,7 +458,7 @@ static int arche_platform_probe(struct platform_device *pdev)
return ret;
}
ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
- arche_pdata->is_reset_act_hi);
+ arche_pdata->is_reset_act_hi);
if (ret) {
dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
return ret;
@@ -465,7 +466,8 @@ static int arche_platform_probe(struct platform_device *pdev)
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
- "svc,sysboot-gpio", 0);
+ "svc,sysboot-gpio",
+ 0);
if (arche_pdata->svc_sysboot_gpio < 0) {
dev_err(dev, "failed to get sysboot gpio\n");
return arche_pdata->svc_sysboot_gpio;
@@ -483,7 +485,8 @@ static int arche_platform_probe(struct platform_device *pdev)
/* setup the clock request gpio first */
arche_pdata->svc_refclk_req = of_get_named_gpio(np,
- "svc,refclk-req-gpio", 0);
+ "svc,refclk-req-gpio",
+ 0);
if (arche_pdata->svc_refclk_req < 0) {
dev_err(dev, "failed to get svc clock-req gpio\n");
return arche_pdata->svc_refclk_req;
@@ -525,7 +528,7 @@ static int arche_platform_probe(struct platform_device *pdev)
"wake detect");
if (ret) {
dev_err(dev, "Failed requesting wake_detect gpio %d\n",
- arche_pdata->wake_detect_gpio);
+ arche_pdata->wake_detect_gpio);
return ret;
}
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index fdb9e83cc34b..35acd55ca5ab 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -47,7 +47,7 @@ static int gbaudio_module_enable_tx(struct gbaudio_codec_info *codec,
int module_state, ret = 0;
u16 data_cport, i2s_port, cportid;
u8 sig_bits, channels;
- uint32_t format, rate;
+ u32 format, rate;
struct gbaudio_data_connection *data;
struct gbaudio_stream_params *params;
@@ -182,7 +182,7 @@ static int gbaudio_module_enable_rx(struct gbaudio_codec_info *codec,
int module_state, ret = 0;
u16 data_cport, i2s_port, cportid;
u8 sig_bits, channels;
- uint32_t format, rate;
+ u32 format, rate;
struct gbaudio_data_connection *data;
struct gbaudio_stream_params *params;
@@ -319,7 +319,7 @@ int gbaudio_module_update(struct gbaudio_codec_info *codec,
char intf_name[NAME_SIZE], dir[NAME_SIZE];
dev_dbg(module->dev, "%s:Module update %s sequence\n", w->name,
- enable ? "Enable":"Disable");
+ enable ? "Enable" : "Disable");
if ((w->id != snd_soc_dapm_aif_in) && (w->id != snd_soc_dapm_aif_out)) {
dev_dbg(codec->dev, "No action required for %s\n", w->name);
@@ -412,7 +412,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream,
{
int ret;
u8 sig_bits, channels;
- uint32_t format, rate;
+ u32 format, rate;
struct gbaudio_module_info *module;
struct gbaudio_data_connection *data;
struct gb_bundle *bundle;
@@ -567,7 +567,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
if (ret) {
mutex_unlock(&codec->lock);
dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
- ret);
+ ret);
return ret;
}
@@ -587,9 +587,8 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
struct gbaudio_stream_params *params;
-
dev_dbg(dai->dev, "Mute:%d, Direction:%s\n", mute,
- stream ? "CAPTURE":"PLAYBACK");
+ stream ? "CAPTURE" : "PLAYBACK");
mutex_lock(&codec->lock);
@@ -827,7 +826,7 @@ int gbaudio_register_module(struct gbaudio_module_info *module)
module->num_dapm_widgets);
if (module->controls)
snd_soc_add_codec_controls(codec, module->controls,
- module->num_controls);
+ module->num_controls);
if (module->dapm_routes)
snd_soc_dapm_add_routes(&codec->dapm, module->dapm_routes,
module->num_dapm_routes);
@@ -842,8 +841,8 @@ int gbaudio_register_module(struct gbaudio_module_info *module)
* from codec->jack_list
*/
list_for_each_entry(jack, &codec->jack_list, list) {
- if ((jack == &module->headset_jack)
- || (jack == &module->button_jack))
+ if ((jack == &module->headset_jack) ||
+ (jack == &module->button_jack))
snd_device_register(codec->card->snd_card,
jack->jack);
}
@@ -907,7 +906,6 @@ static void gbaudio_codec_clean_data_rx(struct gbaudio_data_connection *data)
data->state[1] = GBAUDIO_CODEC_SHUTDOWN;
}
-
static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
{
struct gbaudio_data_connection *data;
@@ -923,7 +921,6 @@ static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
if (cap_state > GBAUDIO_CODEC_SHUTDOWN)
gbaudio_codec_clean_data_rx(data);
-
}
}
@@ -972,7 +969,7 @@ void gbaudio_unregister_module(struct gbaudio_module_info *module)
dev_dbg(codec->dev, "Removing %d controls\n",
module->num_controls);
snd_soc_remove_codec_controls(codec, module->controls,
- module->num_controls);
+ module->num_controls);
}
if (module->dapm_widgets) {
dev_dbg(codec->dev, "Removing %d widgets\n",
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index 161b37c8ef17..a1d5440552d4 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -53,7 +53,7 @@ enum gbaudio_codec_state {
struct gbaudio_stream_params {
int state;
u8 sig_bits, channels;
- uint32_t format, rate;
+ u32 format, rate;
};
struct gbaudio_codec_dai {
@@ -159,7 +159,7 @@ struct gbaudio_module_info {
};
int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
- struct gb_audio_topology *tplg_data);
+ struct gb_audio_topology *tplg_data);
void gbaudio_tplg_release(struct gbaudio_module_info *module);
int gbaudio_module_update(struct gbaudio_codec_info *codec,
@@ -183,12 +183,12 @@ extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
u8 widget_id);
extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
- u16 data_cport, uint32_t *format,
- uint32_t *rate, u8 *channels,
+ u16 data_cport, u32 *format,
+ u32 *rate, u8 *channels,
u8 *sig_bits);
extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
- u16 data_cport, uint32_t format,
- uint32_t rate, u8 channels,
+ u16 data_cport, u32 format,
+ u32 rate, u8 channels,
u8 sig_bits);
extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
u16 data_cport, u16 size);
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index 16cc65e1472b..a5d7c53df987 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -202,7 +202,7 @@ static int cap_release(struct inode *inode, struct file *file)
}
static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
- void __user *buf)
+ void __user *buf)
{
struct cap_ioc_get_endpoint_uid endpoint_uid;
struct cap_ioc_get_ims_certificate *ims_cert;
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index f13f16b63d7e..07ebfb88db9b 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -918,7 +918,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
/* Retrieve number of streams to configure */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
return -EINVAL;
ret = kstrtouint(token, 10, &nstreams);
@@ -929,7 +929,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
return -EINVAL;
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
return -EINVAL;
ret = kstrtouint(token, 10, &flags);
@@ -946,7 +946,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
/* width */
token = strsep(&buf, ";");
- if (token == NULL) {
+ if (!token) {
ret = -EINVAL;
goto done;
}
@@ -956,7 +956,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
/* height */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
goto done;
ret = kstrtouint(token, 10, &stream->height);
@@ -965,7 +965,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
/* Image format code */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
goto done;
ret = kstrtouint(token, 16, &stream->format);
@@ -1009,7 +1009,7 @@ static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
/* Request id */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
return -EINVAL;
ret = kstrtouint(token, 10, &request_id);
if (ret < 0)
@@ -1017,7 +1017,7 @@ static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
/* Stream mask */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
return -EINVAL;
ret = kstrtouint(token, 16, &streams_mask);
if (ret < 0)
@@ -1025,7 +1025,7 @@ static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
/* number of frames */
token = strsep(&buf, ";");
- if (token == NULL)
+ if (!token)
return -EINVAL;
ret = kstrtouint(token, 10, &num_frames);
if (ret < 0)
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
index 5e839b1fc884..bc5d99cbda8f 100644
--- a/drivers/staging/gs_fpgaboot/io.h
+++ b/drivers/staging/gs_fpgaboot/io.h
@@ -7,12 +7,7 @@
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define GPDIR 0
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index cadfb96734ed..f01595593ce2 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -271,7 +271,7 @@ static int ad7192_setup(struct ad7192_state *st,
if (pdata->sinc3_en)
st->mode |= AD7192_MODE_SINC3;
- if (pdata->refin2_en && (st->devid != ID_AD7195))
+ if (pdata->refin2_en && st->devid != ID_AD7195)
st->conf |= AD7192_CONF_REFSEL;
if (pdata->chop_en) {
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 61377ca444de..19dc896603a1 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -47,7 +47,7 @@
#define AD7152_STATUS_PWDN BIT(7)
/* Setup Register Bit Designations (AD7152_REG_CHx_SETUP) */
-#define AD7152_SETUP_CAPDIFF (1 << 5)
+#define AD7152_SETUP_CAPDIFF BIT(5)
#define AD7152_SETUP_RANGE_2pF (0 << 6)
#define AD7152_SETUP_RANGE_0_5pF (1 << 6)
#define AD7152_SETUP_RANGE_1pF (2 << 6)
@@ -55,8 +55,8 @@
#define AD7152_SETUP_RANGE(x) ((x) << 6)
/* Config Register Bit Designations (AD7152_REG_CFG) */
-#define AD7152_CONF_CH2EN (1 << 3)
-#define AD7152_CONF_CH1EN (1 << 4)
+#define AD7152_CONF_CH2EN BIT(3)
+#define AD7152_CONF_CH1EN BIT(4)
#define AD7152_CONF_MODE_IDLE (0 << 0)
#define AD7152_CONF_MODE_CONT_CONV (1 << 0)
#define AD7152_CONF_MODE_SINGLE_CONV (2 << 0)
@@ -64,7 +64,7 @@
#define AD7152_CONF_MODE_GAIN_CAL (6 << 0)
/* Capdac Register Bit Designations (AD7152_REG_CAPDAC_XXX) */
-#define AD7152_CAPDAC_DACEN (1 << 7)
+#define AD7152_CAPDAC_DACEN BIT(7)
#define AD7152_CAPDAC_DACP(x) ((x) & 0x1F)
/* CFG2 Register Bit Designations (AD7152_REG_CFG2) */
@@ -118,22 +118,23 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
mutex_lock(&chip->state_lock);
ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
- if (ret < 0) {
- mutex_unlock(&chip->state_lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
do {
mdelay(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
- if (ret < 0) {
- mutex_unlock(&chip->state_lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
+
} while ((ret == regval) && timeout--);
mutex_unlock(&chip->state_lock);
return len;
+
+unlock:
+ mutex_unlock(&chip->state_lock);
+ return ret;
}
static ssize_t ad7152_start_offset_calib(struct device *dev,
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index a124853a05f0..c4a864725376 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -302,23 +302,24 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
mutex_lock(&chip->lock);
regval |= chip->config;
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
- if (ret < 0) {
- mutex_unlock(&chip->lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
do {
msleep(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
- if (ret < 0) {
- mutex_unlock(&chip->lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
+
} while ((ret == regval) && timeout--);
mutex_unlock(&chip->lock);
return len;
+
+unlock:
+ mutex_unlock(&chip->lock);
+ return ret;
}
static ssize_t ad7746_start_offset_calib(struct device *dev,
diff --git a/drivers/staging/iio/light/tsl2x7x.c b/drivers/staging/iio/light/tsl2x7x.c
index 42ed9c015aaf..126e11530ce0 100644
--- a/drivers/staging/iio/light/tsl2x7x.c
+++ b/drivers/staging/iio/light/tsl2x7x.c
@@ -1441,7 +1441,8 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available
+ .dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1455,7 +1456,8 @@ static struct attribute *tsl2x7x_PRX_device_attrs[] = {
static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available
+ .dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1471,7 +1473,8 @@ static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available
+ .dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index d80dcf82eba9..71f11d7472c0 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -78,9 +78,9 @@ static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
return 0;
}
-static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t frequency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
@@ -116,9 +116,9 @@ static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
return count;
}
-static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t frequency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
@@ -133,8 +133,7 @@ static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
return sprintf(buf, "%lu\n", val);
}
-static DEVICE_ATTR(frequency, 0644, iio_bfin_tmr_frequency_show,
- iio_bfin_tmr_frequency_store);
+static DEVICE_ATTR_RW(frequency);
static struct attribute *iio_bfin_tmr_trigger_attrs[] = {
&dev_attr_frequency.attr,
@@ -187,9 +186,9 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
return -ENOMEM;
st->irq = platform_get_irq(pdev, 0);
- if (!st->irq) {
+ if (st->irq < 0) {
dev_err(&pdev->dev, "No IRQs specified");
- return -ENODEV;
+ return st->irq;
}
ret = iio_bfin_tmr_get_number(st->irq);
diff --git a/net/ipx/Kconfig b/drivers/staging/ipx/Kconfig
index e9ad0062fbb6..cdff083d0ee6 100644
--- a/net/ipx/Kconfig
+++ b/drivers/staging/ipx/Kconfig
@@ -3,6 +3,7 @@
#
config IPX
tristate "The IPX protocol"
+ depends on NET
select LLC
---help---
This is support for the Novell networking protocol, IPX, commonly
diff --git a/net/ipx/Makefile b/drivers/staging/ipx/Makefile
index 440fafa9fd07..440fafa9fd07 100644
--- a/net/ipx/Makefile
+++ b/drivers/staging/ipx/Makefile
diff --git a/drivers/staging/ipx/TODO b/drivers/staging/ipx/TODO
new file mode 100644
index 000000000000..80db5d968264
--- /dev/null
+++ b/drivers/staging/ipx/TODO
@@ -0,0 +1,4 @@
+The ipx code will be removed soon from the kernel tree as it is old and
+obsolete and broken.
+
+Don't worry about fixing up anything here, it's not needed.
diff --git a/net/ipx/af_ipx.c b/drivers/staging/ipx/af_ipx.c
index d21a9d128d3e..d21a9d128d3e 100644
--- a/net/ipx/af_ipx.c
+++ b/drivers/staging/ipx/af_ipx.c
diff --git a/net/ipx/ipx_proc.c b/drivers/staging/ipx/ipx_proc.c
index 38a3d51d9ead..b9232e4e2ed4 100644
--- a/net/ipx/ipx_proc.c
+++ b/drivers/staging/ipx/ipx_proc.c
@@ -260,7 +260,6 @@ static int ipx_seq_socket_open(struct inode *inode, struct file *file)
}
static const struct file_operations ipx_seq_interface_fops = {
- .owner = THIS_MODULE,
.open = ipx_seq_interface_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -268,7 +267,6 @@ static const struct file_operations ipx_seq_interface_fops = {
};
static const struct file_operations ipx_seq_route_fops = {
- .owner = THIS_MODULE,
.open = ipx_seq_route_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -276,7 +274,6 @@ static const struct file_operations ipx_seq_route_fops = {
};
static const struct file_operations ipx_seq_socket_fops = {
- .owner = THIS_MODULE,
.open = ipx_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipx/ipx_route.c b/drivers/staging/ipx/ipx_route.c
index 3cf93aa9f284..3cf93aa9f284 100644
--- a/net/ipx/ipx_route.c
+++ b/drivers/staging/ipx/ipx_route.c
diff --git a/net/ipx/pe2.c b/drivers/staging/ipx/pe2.c
index ba7d4214bbff..ba7d4214bbff 100644
--- a/net/ipx/pe2.c
+++ b/drivers/staging/ipx/pe2.c
diff --git a/net/ipx/sysctl_net_ipx.c b/drivers/staging/ipx/sysctl_net_ipx.c
index c3eef457db88..c3eef457db88 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/drivers/staging/ipx/sysctl_net_ipx.c
diff --git a/drivers/staging/irda/drivers/pxaficp_ir.c b/drivers/staging/irda/drivers/pxaficp_ir.c
index 1dba16bc7f8d..2ea00a6531f9 100644
--- a/drivers/staging/irda/drivers/pxaficp_ir.c
+++ b/drivers/staging/irda/drivers/pxaficp_ir.c
@@ -12,7 +12,6 @@
* Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
*
*/
-#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index b82a47b9ef0b..f1d128b2dae9 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -1737,12 +1737,12 @@ static int irda_shutdown(struct socket *sock, int how)
/*
* Function irda_poll (file, sock, wait)
*/
-static unsigned int irda_poll(struct file * file, struct socket *sock,
+static __poll_t irda_poll(struct file * file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/drivers/staging/irda/net/irlmp.c b/drivers/staging/irda/net/irlmp.c
index 34355061ab0b..7af618fb66c0 100644
--- a/drivers/staging/irda/net/irlmp.c
+++ b/drivers/staging/irda/net/irlmp.c
@@ -1668,7 +1668,7 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC,
goto errlsap;);
- if ((self->slsap_sel == slsap_sel)) {
+ if (self->slsap_sel == slsap_sel) {
pr_debug("Source LSAP selector=%02x in use\n",
self->slsap_sel);
goto errlsap;
@@ -1693,7 +1693,7 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
self = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
while (self != NULL) {
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;);
- if ((self->slsap_sel == slsap_sel)) {
+ if (self->slsap_sel == slsap_sel) {
pr_debug("Source LSAP selector=%02x in use (unconnected)\n",
self->slsap_sel);
goto erruncon;
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c
index 7025dcb853d0..75bf9e34311d 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.c
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.c
@@ -419,12 +419,12 @@ irnet_ctrl_read(irnet_socket * ap,
* Poll : called when someone do a select on /dev/irnet.
* Just check if there are new events...
*/
-static inline unsigned int
+static inline __poll_t
irnet_ctrl_poll(irnet_socket * ap,
struct file * file,
poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
@@ -608,12 +608,12 @@ dev_irnet_read(struct file * file,
/*
* Poll : called when someone do a select on /dev/irnet
*/
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file * file,
poll_table * wait)
{
irnet_socket * ap = file->private_data;
- unsigned int mask;
+ __poll_t mask;
DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
file, ap);
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h
index 32061442cc8e..e6d5aa2a8aac 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.h
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.h
@@ -70,7 +70,7 @@ static ssize_t
char __user *,
size_t,
loff_t *);
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file *,
poll_table *);
static long
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 880085e2f24a..e48c55769c94 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -2990,7 +2990,7 @@ int ks_wlan_net_stop(struct net_device *dev)
/**
* is_connect_status() - return true if status is 'connected'
* @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * connect status.
+ * connect status.
*/
bool is_connect_status(u32 status)
{
@@ -3000,7 +3000,7 @@ bool is_connect_status(u32 status)
/**
* is_disconnect_status() - return true if status is 'disconnected'
* @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * disconnect status.
+ * disconnect status.
*/
bool is_disconnect_status(u32 status)
{
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 6ad8867e5451..ca3472cc952f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -45,7 +45,6 @@
#include <linux/libcfs/libcfs_prim.h>
#include <linux/libcfs/libcfs_time.h>
#include <linux/libcfs/libcfs_string.h>
-#include <linux/libcfs/libcfs_workitem.h>
#include <linux/libcfs/libcfs_hash.h>
#include <linux/libcfs/libcfs_fail.h>
#include <linux/libcfs/curproc.h>
@@ -74,16 +73,6 @@ sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t sigset);
void cfs_clear_sigpending(void);
-/*
- * Random number handling
- */
-
-/* returns a random 32-bit integer */
-unsigned int cfs_rand(void);
-/* seed the generator */
-void cfs_srand(unsigned int seed1, unsigned int seed2);
-void cfs_get_random_bytes(void *buf, int size);
-
struct libcfs_ioctl_handler {
struct list_head item;
int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
@@ -126,7 +115,7 @@ extern struct miscdevice libcfs_dev;
*/
extern char lnet_debug_log_upcall[1024];
-extern struct cfs_wi_sched *cfs_sched_rehash;
+extern struct workqueue_struct *cfs_rehash_wq;
struct lnet_debugfs_symlink_def {
char *name;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 6d132f941281..61bce77fddd6 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -79,7 +79,7 @@
/**
* return cpumask of CPU partition \a cpt
*/
-cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
+cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
/**
* print string information of cpt-table
*/
@@ -96,7 +96,7 @@ struct cfs_cpt_table {
u64 ctb_version;
};
-static inline cpumask_t *
+static inline cpumask_var_t *
cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
{
return NULL;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 5a27220cc608..0506f1d45757 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -248,7 +248,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */
u32 hs_iterators;
/** rehash workitem */
- struct cfs_workitem hs_rehash_wi;
+ struct work_struct hs_rehash_work;
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
@@ -265,7 +265,7 @@ struct cfs_hash {
/** bits when we found the max depth */
unsigned int hs_dep_bits;
/** workitem to output max depth */
- struct cfs_workitem hs_dep_wi;
+ struct work_struct hs_dep_work;
#endif
/** name of htable */
char hs_name[0];
@@ -738,7 +738,7 @@ u64 cfs_hash_size_get(struct cfs_hash *hs);
*/
void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
void cfs_hash_rehash_cancel(struct cfs_hash *hs);
-int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
+void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 2f4ff595fac9..491d5971d199 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -43,13 +43,6 @@
# define DEBUG_SUBSYSTEM S_UNDEFINED
#endif
-/*
- * When this is on, LASSERT macro includes check for assignment used instead
- * of equality check, but doesn't have unlikely(). Turn this on from time to
- * time to make test-builds. This shouldn't be on for production release.
- */
-#define LASSERT_CHECKED (0)
-
#define LASSERTF(cond, fmt, ...) \
do { \
if (unlikely(!(cond))) { \
@@ -74,8 +67,6 @@ do { \
# define LINVRNT(exp) ((void)sizeof !!(exp))
#endif
-#define KLASSERT(e) LASSERT(e)
-
void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
#define LBUG() \
@@ -84,74 +75,24 @@ do { \
lbug_with_loc(&msgdata); \
} while (0)
-#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
-#endif
-
-#define LIBCFS_ALLOC_PRE(size, mask) \
- LASSERT(!in_interrupt() || ((size) <= LIBCFS_VMALLOC_SIZE && \
- !gfpflags_allow_blocking(mask)))
-
-#define LIBCFS_ALLOC_POST(ptr, size) \
-do { \
- if (unlikely(!(ptr))) { \
- CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
- #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
- } else { \
- memset((ptr), 0, (size)); \
- } \
-} while (0)
-
-/**
- * allocate memory with GFP flags @mask
- */
-#define LIBCFS_ALLOC_GFP(ptr, size, mask) \
-do { \
- LIBCFS_ALLOC_PRE((size), (mask)); \
- (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- kmalloc((size), (mask)) : vmalloc(size); \
- LIBCFS_ALLOC_POST((ptr), (size)); \
-} while (0)
-
-/**
- * default allocator
- */
-#define LIBCFS_ALLOC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
-
-/**
- * non-sleeping allocator
+/*
+ * Use #define rather than inline, as lnet_cpt_table() might
+ * not be defined yet
*/
-#define LIBCFS_ALLOC_ATOMIC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
+#define kmalloc_cpt(size, flags, cpt) \
+ kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-/**
- * allocate memory for specified CPU partition
- * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
- * \a cptab == NULL, \a cpt is HW NUMA node id
- */
-#define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
-do { \
- LIBCFS_ALLOC_PRE((size), (mask)); \
- (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
- vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
- LIBCFS_ALLOC_POST((ptr), (size)); \
-} while (0)
+#define kzalloc_cpt(size, flags, cpt) \
+ kmalloc_node(size, flags | __GFP_ZERO, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-/** default numa allocator */
-#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
- LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
+#define kvmalloc_cpt(size, flags, cpt) \
+ kvmalloc_node(size, flags, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
-#define LIBCFS_FREE(ptr, size) \
-do { \
- if (unlikely(!(ptr))) { \
- CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
- "%s:%d\n", (int)(size), __FILE__, __LINE__); \
- break; \
- } \
- kvfree(ptr); \
-} while (0)
+#define kvzalloc_cpt(size, flags, cpt) \
+ kvmalloc_node(size, flags | __GFP_ZERO, \
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt))
/******************************************************************************/
@@ -242,59 +183,18 @@ do { \
#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
-#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
-#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
-
-/* max value for numeric network address */
-#define MAX_NUMERIC_VALUE 0xffffffff
-
/* implication */
#define ergo(a, b) (!(a) || (b))
/* logical equivalence */
#define equi(a, b) (!!(a) == !!(b))
-/* --------------------------------------------------------------------
- * Light-weight trace
- * Support for temporary event tracing with minimal Heisenberg effect.
- * --------------------------------------------------------------------
- */
-
-#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
-
-static inline size_t cfs_size_round4(int val)
-{
- return (val + 3) & (~0x3);
-}
-
#ifndef HAVE_CFS_SIZE_ROUND
static inline size_t cfs_size_round(int val)
{
- return (val + 7) & (~0x7);
+ return round_up(val, 8);
}
#define HAVE_CFS_SIZE_ROUND
#endif
-static inline size_t cfs_size_round16(int val)
-{
- return (val + 0xf) & (~0xf);
-}
-
-static inline size_t cfs_size_round32(int val)
-{
- return (val + 0x1f) & (~0x1f);
-}
-
-static inline size_t cfs_size_round0(int val)
-{
- if (!val)
- return 0;
- return (val + 1 + 7) & (~0x7);
-}
-
-static inline size_t cfs_round_strlen(char *fset)
-{
- return cfs_size_round((int)strlen(fset) + 1);
-}
-
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 1191764c431a..66463477074a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -73,7 +73,6 @@ struct cfs_expr_list {
struct list_head el_exprs;
};
-char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
int cfs_str2num_check(char *str, int nob, unsigned int *num,
unsigned int min, unsigned int max);
@@ -86,11 +85,11 @@ static inline void
cfs_expr_list_values_free(u32 *values, int num)
{
/*
- * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
+ * This array is allocated by kvalloc(), so it shouldn't be freed
* by OBD_FREE() if it's called by module other than libcfs & LNet,
* otherwise we will see fake memory leak
*/
- LIBCFS_FREE(values, num * sizeof(values[0]));
+ kvfree(values);
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
deleted file mode 100644
index fc780f608e57..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/libcfs_workitem.h
- *
- * Author: Isaac Huang <he.h.huang@oracle.com>
- * Liang Zhen <zhen.liang@sun.com>
- *
- * A workitems is deferred work with these semantics:
- * - a workitem always runs in thread context.
- * - a workitem can be concurrent with other workitems but is strictly
- * serialized with respect to itself.
- * - no CPU affinity, a workitem does not necessarily run on the same CPU
- * that schedules it. However, this might change in the future.
- * - if a workitem is scheduled again before it has a chance to run, it
- * runs only once.
- * - if a workitem is scheduled while it runs, it runs again after it
- * completes; this ensures that events occurring while other events are
- * being processed receive due attention. This behavior also allows a
- * workitem to reschedule itself.
- *
- * Usage notes:
- * - a workitem can sleep but it should be aware of how that sleep might
- * affect others.
- * - a workitem runs inside a kernel thread so there's no user space to access.
- * - do not use a workitem if the scheduling latency can't be tolerated.
- *
- * When wi_action returns non-zero, it means the workitem has either been
- * freed or reused and workitem scheduler won't touch it any more.
- */
-
-#ifndef __LIBCFS_WORKITEM_H__
-#define __LIBCFS_WORKITEM_H__
-
-struct cfs_wi_sched;
-
-void cfs_wi_sched_destroy(struct cfs_wi_sched *sched);
-int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
- int nthrs, struct cfs_wi_sched **sched_pp);
-
-struct cfs_workitem;
-
-typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
-struct cfs_workitem {
- /** chain on runq or rerunq */
- struct list_head wi_list;
- /** working function */
- cfs_wi_action_t wi_action;
- /** arg for working function */
- void *wi_data;
- /** in running */
- unsigned short wi_running:1;
- /** scheduled */
- unsigned short wi_scheduled:1;
-};
-
-static inline void
-cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
-{
- INIT_LIST_HEAD(&wi->wi_list);
-
- wi->wi_running = 0;
- wi->wi_scheduled = 0;
- wi->wi_data = data;
- wi->wi_action = action;
-}
-
-void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
-int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
-void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
-
-int cfs_wi_startup(void);
-void cfs_wi_shutdown(void);
-
-/** # workitem scheduler loops before reschedule */
-#define CFS_WI_RESCHED 128
-
-#endif /* __LIBCFS_WORKITEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index 854c84358ab4..6035376f2830 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -49,7 +49,7 @@
/** virtual processing unit */
struct cfs_cpu_partition {
/* CPUs mask for this partition */
- cpumask_t *cpt_cpumask;
+ cpumask_var_t cpt_cpumask;
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* spread rotor for NUMA allocator */
@@ -69,7 +69,7 @@ struct cfs_cpt_table {
/* shadow HW CPU to CPU partition ID */
int *ctb_cpu2cpt;
/* all cpus in this partition table */
- cpumask_t *ctb_cpumask;
+ cpumask_var_t ctb_cpumask;
/* all nodes in this partition table */
nodemask_t *ctb_nodemask;
};
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index c1626726fa05..df4c72507a15 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -181,21 +181,6 @@ lnet_net_lock_current(void)
#define MAX_PORTALS 64
-static inline struct lnet_eq *
-lnet_eq_alloc(void)
-{
- struct lnet_eq *eq;
-
- LIBCFS_ALLOC(eq, sizeof(*eq));
- return eq;
-}
-
-static inline void
-lnet_eq_free(struct lnet_eq *eq)
-{
- LIBCFS_FREE(eq, sizeof(*eq));
-}
-
static inline struct lnet_libmd *
lnet_md_alloc(struct lnet_md *umd)
{
@@ -211,7 +196,7 @@ lnet_md_alloc(struct lnet_md *umd)
size = offsetof(struct lnet_libmd, md_iov.iov[niov]);
}
- LIBCFS_ALLOC(md, size);
+ md = kzalloc(size, GFP_NOFS);
if (md) {
/* Set here in case of early free */
@@ -223,52 +208,6 @@ lnet_md_alloc(struct lnet_md *umd)
return md;
}
-static inline void
-lnet_md_free(struct lnet_libmd *md)
-{
- unsigned int size;
-
- if (md->md_options & LNET_MD_KIOV)
- size = offsetof(struct lnet_libmd, md_iov.kiov[md->md_niov]);
- else
- size = offsetof(struct lnet_libmd, md_iov.iov[md->md_niov]);
-
- LIBCFS_FREE(md, size);
-}
-
-static inline struct lnet_me *
-lnet_me_alloc(void)
-{
- struct lnet_me *me;
-
- LIBCFS_ALLOC(me, sizeof(*me));
- return me;
-}
-
-static inline void
-lnet_me_free(struct lnet_me *me)
-{
- LIBCFS_FREE(me, sizeof(*me));
-}
-
-static inline struct lnet_msg *
-lnet_msg_alloc(void)
-{
- struct lnet_msg *msg;
-
- LIBCFS_ALLOC(msg, sizeof(*msg));
-
- /* no need to zero, LIBCFS_ALLOC does for us */
- return msg;
-}
-
-static inline void
-lnet_msg_free(struct lnet_msg *msg)
-{
- LASSERT(!msg->msg_onactivelist);
- LIBCFS_FREE(msg, sizeof(*msg));
-}
-
struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec,
__u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 8024843521ab..ec84edfda271 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -325,7 +325,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
LASSERT(net);
LASSERT(nid != LNET_NID_ANY);
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer) {
CERROR("Cannot allocate peer\n");
return -ENOMEM;
@@ -367,7 +367,7 @@ void kiblnd_destroy_peer(struct kib_peer *peer)
LASSERT(kiblnd_peer_idle(peer));
LASSERT(list_empty(&peer->ibp_tx_queue));
- LIBCFS_FREE(peer, sizeof(*peer));
+ kfree(peer);
/*
* NB a peer's connections keep a reference on their peer until
@@ -596,7 +596,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
- cpumask_t *mask;
+ cpumask_var_t *mask;
int vectors;
int off;
int i;
@@ -611,8 +611,8 @@ static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
return 0;
/* hash NID to CPU id in this partition... */
- off = do_div(nid, cpumask_weight(mask));
- for_each_cpu(i, mask) {
+ off = do_div(nid, cpumask_weight(*mask));
+ for_each_cpu(i, *mask) {
if (!off--)
return i % vectors;
}
@@ -656,15 +656,14 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
LASSERT(sched->ibs_nthreads > 0);
- LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
- sizeof(*init_qp_attr));
+ init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt);
if (!init_qp_attr) {
CERROR("Can't allocate qp_attr for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_0;
}
- LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
+ conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt);
if (!conn) {
CERROR("Can't allocate connection for %s\n",
libcfs_nid2str(peer->ibp_nid));
@@ -687,8 +686,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
INIT_LIST_HEAD(&conn->ibc_active_txs);
spin_lock_init(&conn->ibc_lock);
- LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
- sizeof(*conn->ibc_connvars));
+ conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
if (!conn->ibc_connvars) {
CERROR("Can't allocate in-progress connection state\n");
goto failed_2;
@@ -722,8 +720,8 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
write_unlock_irqrestore(glock, flags);
- LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
+ conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx),
+ GFP_NOFS, cpt);
if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
@@ -776,7 +774,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
goto failed_2;
}
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ kfree(init_qp_attr);
/* 1 ref for caller and each rxmsg */
atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
@@ -826,14 +824,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
return conn;
failed_2:
- kiblnd_destroy_conn(conn, true);
+ kiblnd_destroy_conn(conn);
+ kfree(conn);
failed_1:
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ kfree(init_qp_attr);
failed_0:
return NULL;
}
-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
+void kiblnd_destroy_conn(struct kib_conn *conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
struct kib_peer *peer = conn->ibc_peer;
@@ -877,13 +876,8 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs) {
- LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
- }
-
- if (conn->ibc_connvars)
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ kfree(conn->ibc_rxs);
+ kfree(conn->ibc_connvars);
if (conn->ibc_hdev)
kiblnd_hdev_decref(conn->ibc_hdev);
@@ -896,8 +890,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
rdma_destroy_id(cmid);
atomic_dec(&net->ibn_nconns);
}
-
- LIBCFS_FREE(conn, sizeof(*conn));
}
int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
@@ -1089,7 +1081,7 @@ static void kiblnd_free_pages(struct kib_pages *p)
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
+ kfree(p);
}
int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
@@ -1097,14 +1089,13 @@ int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
struct kib_pages *p;
int i;
- LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(struct kib_pages, ibp_pages[npages]));
+ p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]),
+ GFP_NOFS, cpt);
if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
@@ -1299,7 +1290,7 @@ static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
frd_list) {
list_del(&frd->frd_list);
ib_dereg_mr(frd->frd_mr);
- LIBCFS_FREE(frd, sizeof(*frd));
+ kfree(frd);
i++;
}
if (i < fpo->fast_reg.fpo_pool_size)
@@ -1310,7 +1301,7 @@ static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
if (fpo->fpo_hdev)
kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(*fpo));
+ kfree(fpo);
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
@@ -1376,8 +1367,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po
INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size = 0;
for (i = 0; i < fps->fps_pool_size; i++) {
- LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
- sizeof(*frd));
+ frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt);
if (!frd) {
CERROR("Failed to allocate a new fast_reg descriptor\n");
rc = -ENOMEM;
@@ -1405,14 +1395,14 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po
out_middle:
if (frd->frd_mr)
ib_dereg_mr(frd->frd_mr);
- LIBCFS_FREE(frd, sizeof(*frd));
+ kfree(frd);
out:
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
ib_dereg_mr(frd->frd_mr);
- LIBCFS_FREE(frd, sizeof(*frd));
+ kfree(frd);
}
return rc;
@@ -1426,7 +1416,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
struct kib_fmr_pool *fpo;
int rc;
- LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
+ fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt);
if (!fpo)
return -ENOMEM;
@@ -1464,7 +1454,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
out_fpo:
kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(*fpo));
+ kfree(fpo);
return rc;
}
@@ -1985,33 +1975,17 @@ static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
struct kib_tx *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
- if (tx->tx_pages)
- LIBCFS_FREE(tx->tx_pages,
- LNET_MAX_IOV *
- sizeof(*tx->tx_pages));
- if (tx->tx_frags)
- LIBCFS_FREE(tx->tx_frags,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags));
- if (tx->tx_wrq)
- LIBCFS_FREE(tx->tx_wrq,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
- if (tx->tx_sge)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
- if (tx->tx_rd)
- LIBCFS_FREE(tx->tx_rd,
- offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
- }
-
- LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(struct kib_tx));
+ kfree(tx->tx_pages);
+ kfree(tx->tx_frags);
+ kfree(tx->tx_wrq);
+ kfree(tx->tx_sge);
+ kfree(tx->tx_rd);
+ }
+
+ kfree(tpo->tpo_tx_descs);
out:
kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(*tpo));
+ kfree(tpo);
}
static int kiblnd_tx_pool_size(int ncpts)
@@ -2029,7 +2003,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
struct kib_pool *pool;
struct kib_tx_pool *tpo;
- LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
+ tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt);
if (!tpo) {
CERROR("Failed to allocate TX pool\n");
return -ENOMEM;
@@ -2043,12 +2017,12 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
npg = DIV_ROUND_UP(size * IBLND_MSG_SIZE, PAGE_SIZE);
if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(*tpo));
+ kfree(tpo);
return -ENOMEM;
}
- LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(struct kib_tx));
+ tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx),
+ GFP_NOFS, ps->ps_cpt);
if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
@@ -2062,36 +2036,35 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps) {
- LIBCFS_CPT_ALLOC(tx->tx_pages,
- lnet_cpt_table(), ps->ps_cpt,
- LNET_MAX_IOV * sizeof(*tx->tx_pages));
+ tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_pages)
break;
}
- LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_frags));
+ tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_frags)
break;
sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
- LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_wrq));
+ tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_wrq),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_wrq)
break;
- LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
+ tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_sge),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_sge)
break;
- LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(struct kib_rdma_desc,
- rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc,
+ rd_frags[IBLND_MAX_RDMA_FRAGS]),
+ GFP_NOFS, ps->ps_cpt);
if (!tx->tx_rd)
break;
}
@@ -2263,7 +2236,7 @@ void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
if (hdev->ibh_cmid)
rdma_destroy_id(hdev->ibh_cmid);
- LIBCFS_FREE(hdev, sizeof(*hdev));
+ kfree(hdev);
}
/* DUMMY */
@@ -2392,7 +2365,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
goto out;
}
- LIBCFS_ALLOC(hdev, sizeof(*hdev));
+ hdev = kzalloc(sizeof(*hdev), GFP_NOFS);
if (!hdev) {
CERROR("Failed to allocate kib_hca_dev\n");
rdma_destroy_id(cmid);
@@ -2471,7 +2444,7 @@ void kiblnd_destroy_dev(struct kib_dev *dev)
if (dev->ibd_hdev)
kiblnd_hdev_decref(dev->ibd_hdev);
- LIBCFS_FREE(dev, sizeof(*dev));
+ kfree(dev);
}
static struct kib_dev *kiblnd_create_dev(char *ifname)
@@ -2495,7 +2468,7 @@ static struct kib_dev *kiblnd_create_dev(char *ifname)
return NULL;
}
- LIBCFS_ALLOC(dev, sizeof(*dev));
+ dev = kzalloc(sizeof(*dev), GFP_NOFS);
if (!dev)
return NULL;
@@ -2517,7 +2490,7 @@ static struct kib_dev *kiblnd_create_dev(char *ifname)
rc = kiblnd_dev_failover(dev);
if (rc) {
CERROR("Can't initialize device: %d\n", rc);
- LIBCFS_FREE(dev, sizeof(*dev));
+ kfree(dev);
return NULL;
}
@@ -2577,11 +2550,7 @@ static void kiblnd_base_shutdown(void)
break;
}
- if (kiblnd_data.kib_peers) {
- LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(struct list_head) *
- kiblnd_data.kib_peer_hash_size);
- }
+ kvfree(kiblnd_data.kib_peers);
if (kiblnd_data.kib_scheds)
cfs_percpt_free(kiblnd_data.kib_scheds);
@@ -2648,7 +2617,7 @@ static void kiblnd_shutdown(struct lnet_ni *ni)
net->ibn_init = IBLND_INIT_NOTHING;
ni->ni_data = NULL;
- LIBCFS_FREE(net, sizeof(*net));
+ kfree(net);
out:
if (list_empty(&kiblnd_data.kib_devs))
@@ -2673,8 +2642,9 @@ static int kiblnd_base_startup(void)
INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
+ kiblnd_data.kib_peers = kvmalloc_array(kiblnd_data.kib_peer_hash_size,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!kiblnd_data.kib_peers)
goto failed;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
@@ -2865,7 +2835,7 @@ static int kiblnd_startup(struct lnet_ni *ni)
return rc;
}
- LIBCFS_ALLOC(net, sizeof(*net));
+ net = kzalloc(sizeof(*net), GFP_NOFS);
ni->ni_data = net;
if (!net)
goto net_failed;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 171eced213f8..b18911d09e9a 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -1016,7 +1016,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
struct rdma_cm_id *cmid,
int state, int version);
-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
+void kiblnd_destroy_conn(struct kib_conn *conn);
void kiblnd_close_conn(struct kib_conn *conn, int error);
void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 40e3af5d8b04..b3e7f28eb978 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -2124,7 +2124,7 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
peer->ibp_accepting > 0));
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ kfree(conn->ibc_connvars);
conn->ibc_connvars = NULL;
if (status) {
@@ -3314,11 +3314,13 @@ kiblnd_connd(void *arg)
spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- kiblnd_destroy_conn(conn, !peer);
+ kiblnd_destroy_conn(conn);
spin_lock_irqsave(lock, flags);
- if (!peer)
+ if (!peer) {
+ kfree(conn);
continue;
+ }
conn->ibc_peer = peer;
if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
@@ -3363,7 +3365,7 @@ kiblnd_connd(void *arg)
reconn += kiblnd_reconnect_peer(conn->ibc_peer);
kiblnd_peer_decref(conn->ibc_peer);
- LIBCFS_FREE(conn, sizeof(*conn));
+ kfree(conn);
spin_lock_irqsave(lock, flags);
}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index a71b765215ad..b9235400bf1d 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -181,8 +181,8 @@ int kiblnd_tunables_setup(struct lnet_ni *ni)
* defaulted
*/
if (!ni->ni_lnd_tunables) {
- LIBCFS_ALLOC(ni->ni_lnd_tunables,
- sizeof(*ni->ni_lnd_tunables));
+ ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables),
+ GFP_NOFS);
if (!ni->ni_lnd_tunables)
return -ENOMEM;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 8267119ccc8e..ff292216290d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -66,7 +66,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
{
struct ksock_route *route;
- LIBCFS_ALLOC(route, sizeof(*route));
+ route = kzalloc(sizeof(*route), GFP_NOFS);
if (!route)
return NULL;
@@ -93,7 +93,7 @@ ksocknal_destroy_route(struct ksock_route *route)
if (route->ksnr_peer)
ksocknal_peer_decref(route->ksnr_peer);
- LIBCFS_FREE(route, sizeof(*route));
+ kfree(route);
}
static int
@@ -108,7 +108,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
if (!peer)
return -ENOMEM;
@@ -132,7 +132,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
if (net->ksnn_shutdown) {
spin_unlock_bh(&net->ksnn_lock);
- LIBCFS_FREE(peer, sizeof(*peer));
+ kfree(peer);
CERROR("Can't create peer: network shutdown\n");
return -ESHUTDOWN;
}
@@ -160,7 +160,7 @@ ksocknal_destroy_peer(struct ksock_peer *peer)
LASSERT(list_empty(&peer->ksnp_tx_queue));
LASSERT(list_empty(&peer->ksnp_zc_req_list));
- LIBCFS_FREE(peer, sizeof(*peer));
+ kfree(peer);
/*
* NB a peer's connections and routes keep a reference on their peer
@@ -985,7 +985,7 @@ ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
LASSERT(!rc); /* we succeeded before */
- LIBCFS_ALLOC(cr, sizeof(*cr));
+ cr = kzalloc(sizeof(*cr), GFP_NOFS);
if (!cr) {
LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
&peer_ip);
@@ -1043,7 +1043,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
LASSERT(active == (type != SOCKLND_CONN_NONE));
- LIBCFS_ALLOC(conn, sizeof(*conn));
+ conn = kzalloc(sizeof(*conn), GFP_NOFS);
if (!conn) {
rc = -ENOMEM;
goto failed_0;
@@ -1070,8 +1070,9 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
conn->ksnc_tx_carrier = NULL;
atomic_set(&conn->ksnc_tx_nob, 0);
- LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
- kshm_ips[LNET_MAX_INTERFACES]));
+ hello = kvzalloc(offsetof(struct ksock_hello_msg,
+ kshm_ips[LNET_MAX_INTERFACES]),
+ GFP_KERNEL);
if (!hello) {
rc = -ENOMEM;
goto failed_1;
@@ -1334,8 +1335,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
- kshm_ips[LNET_MAX_INTERFACES]));
+ kvfree(hello);
/*
* setup the socket AFTER I've received hello (it disables
@@ -1415,11 +1415,9 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
ksocknal_peer_decref(peer);
failed_1:
- if (hello)
- LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
- kshm_ips[LNET_MAX_INTERFACES]));
+ kvfree(hello);
- LIBCFS_FREE(conn, sizeof(*conn));
+ kfree(conn);
failed_0:
sock_release(sock);
@@ -1716,7 +1714,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
ksocknal_peer_decref(conn->ksnc_peer);
- LIBCFS_FREE(conn, sizeof(*conn));
+ kfree(conn);
}
int
@@ -2259,19 +2257,12 @@ ksocknal_free_buffers(void)
struct ksock_sched_info *info;
int i;
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds) {
- LIBCFS_FREE(info->ksi_scheds,
- info->ksi_nthreads_max *
- sizeof(info->ksi_scheds[0]));
- }
- }
+ cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info)
+ kfree(info->ksi_scheds);
cfs_percpt_free(ksocknal_data.ksnd_sched_info);
}
- LIBCFS_FREE(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
+ kvfree(ksocknal_data.ksnd_peers);
spin_lock(&ksocknal_data.ksnd_tx_lock);
@@ -2286,7 +2277,7 @@ ksocknal_free_buffers(void)
list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
list_del(&tx->tx_list);
- LIBCFS_FREE(tx, tx->tx_desc_size);
+ kfree(tx);
}
} else {
spin_unlock(&ksocknal_data.ksnd_tx_lock);
@@ -2401,9 +2392,9 @@ ksocknal_base_startup(void)
memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
- LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
+ ksocknal_data.ksnd_peers = kvmalloc_array(ksocknal_data.ksnd_peer_hash_size,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!ksocknal_data.ksnd_peers)
return -ENOMEM;
@@ -2456,8 +2447,8 @@ ksocknal_base_startup(void)
info->ksi_nthreads_max = nthrs;
info->ksi_cpt = i;
- LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
- info->ksi_nthreads_max * sizeof(*sched));
+ info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched),
+ GFP_NOFS, i);
if (!info->ksi_scheds)
goto failed;
@@ -2622,7 +2613,7 @@ ksocknal_shutdown(struct lnet_ni *ni)
}
list_del(&net->ksnn_list);
- LIBCFS_FREE(net, sizeof(*net));
+ kfree(net);
ksocknal_data.ksnd_nnets--;
if (!ksocknal_data.ksnd_nnets)
@@ -2815,7 +2806,7 @@ ksocknal_startup(struct lnet_ni *ni)
return rc;
}
- LIBCFS_ALLOC(net, sizeof(*net));
+ net = kzalloc(sizeof(*net), GFP_NOFS);
if (!net)
goto fail_0;
@@ -2877,7 +2868,7 @@ ksocknal_startup(struct lnet_ni *ni)
return 0;
fail_1:
- LIBCFS_FREE(net, sizeof(*net));
+ kfree(net);
fail_0:
if (!ksocknal_data.ksnd_nnets)
ksocknal_base_shutdown();
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 27c56d5ae4e5..11fd3a36424f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -46,7 +46,7 @@ ksocknal_alloc_tx(int type, int size)
}
if (!tx)
- LIBCFS_ALLOC(tx, size);
+ tx = kzalloc(size, GFP_NOFS);
if (!tx)
return NULL;
@@ -102,7 +102,7 @@ ksocknal_free_tx(struct ksock_tx *tx)
spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
- LIBCFS_FREE(tx, tx->tx_desc_size);
+ kfree(tx);
}
}
@@ -2117,7 +2117,7 @@ ksocknal_connd(void *arg)
ksocknal_create_conn(cr->ksncr_ni, NULL,
cr->ksncr_sock, SOCKLND_CONN_NONE);
lnet_ni_decref(cr->ksncr_ni);
- LIBCFS_FREE(cr, sizeof(*cr));
+ kfree(cr);
spin_lock_bh(connd_lock);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index d827f770e831..05982dac781c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -467,7 +467,7 @@ ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct lnet_hdr, src_nid));
- LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
if (!hdr) {
CERROR("Can't allocate struct lnet_hdr\n");
return -ENOMEM;
@@ -526,7 +526,7 @@ ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
&conn->ksnc_ipaddr, conn->ksnc_port);
}
out:
- LIBCFS_FREE(hdr, sizeof(*hdr));
+ kfree(hdr);
return rc;
}
@@ -582,7 +582,7 @@ ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
int rc;
int i;
- LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
if (!hdr) {
CERROR("Can't allocate struct lnet_hdr\n");
return -ENOMEM;
@@ -644,7 +644,7 @@ ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
}
}
out:
- LIBCFS_FREE(hdr, sizeof(*hdr));
+ kfree(hdr);
return rc;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile
index 1607570ef8de..730f2c675047 100644
--- a/drivers/staging/lustre/lnet/libcfs/Makefile
+++ b/drivers/staging/lustre/lnet/libcfs/Makefile
@@ -15,7 +15,7 @@ libcfs-linux-objs += linux-mem.o
libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
libcfs-all-objs := debug.o fail.o module.o tracefile.o \
- libcfs_string.o hash.o prng.o workitem.o \
+ libcfs_string.o hash.o \
libcfs_cpu.o libcfs_mem.o libcfs_lock.o
libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 5d501beeb622..39439b303d65 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -61,7 +61,7 @@ int __cfs_fail_check_set(u32 id, u32 value, int set)
/* Fail 1/cfs_fail_val times */
if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
+ if (cfs_fail_val < 2 || prandom_u32_max(cfs_fail_val) > 0)
return 0;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index f4f67d2b301e..f7b3c9306456 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -114,7 +114,7 @@ module_param(warn_on_depth, uint, 0644);
MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
-struct cfs_wi_sched *cfs_sched_rehash;
+struct workqueue_struct *cfs_rehash_wq;
static inline void
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
@@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
+ queue_work(cfs_rehash_wq, &hs->hs_dep_work);
# endif
}
@@ -864,12 +864,10 @@ cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
{
int i;
- for (i = prev_size; i < size; i++) {
- if (buckets[i])
- LIBCFS_FREE(buckets[i], bkt_size);
- }
+ for (i = prev_size; i < size; i++)
+ kfree(buckets[i]);
- LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
+ kvfree(buckets);
}
/*
@@ -889,7 +887,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
if (old_bkts && old_size == new_size)
return old_bkts;
- LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
+ new_bkts = kvmalloc_array(new_size, sizeof(new_bkts[0]), GFP_KERNEL);
if (!new_bkts)
return NULL;
@@ -902,7 +900,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
struct hlist_head *hhead;
struct cfs_hash_bd bd;
- LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
+ new_bkts[i] = kzalloc(cfs_hash_bkt_size(hs), GFP_KERNEL);
if (!new_bkts[i]) {
cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
old_size, new_size);
@@ -939,12 +937,12 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
-static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
+static void cfs_hash_rehash_worker(struct work_struct *work);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(struct cfs_workitem *wi)
+static void cfs_hash_dep_print(struct work_struct *work)
{
- struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
+ struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
int dep;
int bkt;
int off;
@@ -968,21 +966,12 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{
spin_lock_init(&hs->hs_dep_lock);
- cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
+ INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
- return;
-
- spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits) {
- spin_unlock(&hs->hs_dep_lock);
- cond_resched();
- spin_lock(&hs->hs_dep_lock);
- }
- spin_unlock(&hs->hs_dep_lock);
+ cancel_work_sync(&hs->hs_dep_work);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
@@ -1023,7 +1012,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
len = !(flags & CFS_HASH_BIGNAME) ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
- LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
+ hs = kzalloc(offsetof(struct cfs_hash, hs_name[len]), GFP_KERNEL);
if (!hs)
return NULL;
@@ -1044,7 +1033,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
- cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
+ INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs))
@@ -1055,7 +1044,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
if (hs->hs_buckets)
return hs;
- LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
+ kfree(hs);
return NULL;
}
EXPORT_SYMBOL(cfs_hash_create);
@@ -1118,7 +1107,7 @@ cfs_hash_destroy(struct cfs_hash *hs)
0, CFS_HASH_NBKT(hs));
i = cfs_hash_with_bigname(hs) ?
CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
- LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
+ kfree(hs);
}
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
@@ -1364,6 +1353,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
cfs_hash_lock(hs, 1);
hs->hs_iterators++;
+ cfs_hash_unlock(hs, 1);
/* NB: iteration is mostly called by service thread,
* we tend to cancel pending rehash-request, instead of
@@ -1371,8 +1361,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
* after iteration
*/
if (cfs_hash_is_rehashing(hs))
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
+ cfs_hash_rehash_cancel(hs);
}
static void
@@ -1774,42 +1763,13 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
* theta thresholds for @hs are tunable via cfs_hash_set_theta().
*/
void
-cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
-{
- int i;
-
- /* need hold cfs_hash_lock(hs, 1) */
- LASSERT(cfs_hash_with_rehash(hs) &&
- !cfs_hash_with_no_lock(hs));
-
- if (!cfs_hash_is_rehashing(hs))
- return;
-
- if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
- hs->hs_rehash_bits = 0;
- return;
- }
-
- for (i = 2; cfs_hash_is_rehashing(hs); i++) {
- cfs_hash_unlock(hs, 1);
- /* raise console warning while waiting too long */
- CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
- "hash %s is still rehashing, rescheded %d\n",
- hs->hs_name, i - 1);
- cond_resched();
- cfs_hash_lock(hs, 1);
- }
-}
-
-void
cfs_hash_rehash_cancel(struct cfs_hash *hs)
{
- cfs_hash_lock(hs, 1);
- cfs_hash_rehash_cancel_locked(hs);
- cfs_hash_unlock(hs, 1);
+ LASSERT(cfs_hash_with_rehash(hs));
+ cancel_work_sync(&hs->hs_rehash_work);
}
-int
+void
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{
int rc;
@@ -1821,21 +1781,21 @@ cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
rc = cfs_hash_rehash_bits(hs);
if (rc <= 0) {
cfs_hash_unlock(hs, 1);
- return rc;
+ return;
}
hs->hs_rehash_bits = rc;
if (!do_rehash) {
/* launch and return */
- cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
+ queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
cfs_hash_unlock(hs, 1);
- return 0;
+ return;
}
/* rehash right now */
cfs_hash_unlock(hs, 1);
- return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
+ cfs_hash_rehash_worker(&hs->hs_rehash_work);
}
static int
@@ -1869,10 +1829,10 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
return c;
}
-static int
-cfs_hash_rehash_worker(struct cfs_workitem *wi)
+static void
+cfs_hash_rehash_worker(struct work_struct *work)
{
- struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
+ struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work);
struct cfs_hash_bucket **bkts;
struct cfs_hash_bd bd;
unsigned int old_size;
@@ -1956,8 +1916,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi)
hs->hs_cur_bits = hs->hs_rehash_bits;
out:
hs->hs_rehash_bits = 0;
- if (rc == -ESRCH) /* never be scheduled again */
- cfs_wi_exit(cfs_sched_rehash, wi);
bsize = cfs_hash_bkt_size(hs);
cfs_hash_unlock(hs, 1);
/* can't refer to @hs anymore because it could be destroyed */
@@ -1965,8 +1923,6 @@ out:
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
- /* return 1 only if cfs_wi_exit is called */
- return rc == -ESRCH;
}
/**
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index e3a4c67a66b5..76291a350406 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -51,7 +51,7 @@ cfs_cpt_table_alloc(unsigned int ncpt)
return NULL;
}
- LIBCFS_ALLOC(cptab, sizeof(*cptab));
+ cptab = kzalloc(sizeof(*cptab), GFP_NOFS);
if (cptab) {
cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
node_set(0, cptab->ctb_nodemask);
@@ -67,7 +67,7 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC);
- LIBCFS_FREE(cptab, sizeof(*cptab));
+ kfree(cptab);
}
EXPORT_SYMBOL(cfs_cpt_table_free);
@@ -113,7 +113,7 @@ cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
{
return &cptab->ctb_nodemask;
}
-EXPORT_SYMBOL(cfs_cpt_cpumask);
+EXPORT_SYMBOL(cfs_cpt_nodemask);
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index f6a0040f4ab1..670ad5a34224 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -38,7 +38,7 @@ cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
LASSERT(!pcl->pcl_locked);
cfs_percpt_free(pcl->pcl_locks);
- LIBCFS_FREE(pcl, sizeof(*pcl));
+ kfree(pcl);
}
EXPORT_SYMBOL(cfs_percpt_lock_free);
@@ -58,14 +58,14 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
- LIBCFS_ALLOC(pcl, sizeof(*pcl));
+ pcl = kzalloc(sizeof(*pcl), GFP_NOFS);
if (!pcl)
return NULL;
pcl->pcl_cptab = cptab;
pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
if (!pcl->pcl_locks) {
- LIBCFS_FREE(pcl, sizeof(*pcl));
+ kfree(pcl);
return NULL;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index df93d8f77ea2..7faed94994ea 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -49,13 +49,10 @@ cfs_percpt_free(void *vars)
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- for (i = 0; i < arr->va_count; i++) {
- if (arr->va_ptrs[i])
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
- }
+ for (i = 0; i < arr->va_count; i++)
+ kfree(arr->va_ptrs[i]);
- LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
- va_ptrs[arr->va_count]));
+ kvfree(arr);
}
EXPORT_SYMBOL(cfs_percpt_free);
@@ -79,7 +76,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
count = cfs_cpt_number(cptab);
- LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
+ arr = kvzalloc(offsetof(struct cfs_var_array, va_ptrs[count]),
+ GFP_KERNEL);
if (!arr)
return NULL;
@@ -89,7 +87,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
arr->va_cptab = cptab;
for (i = 0; i < count; i++) {
- LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
+ arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL,
+ cfs_cpt_spread_node(cptab, i));
if (!arr->va_ptrs[i]) {
cfs_percpt_free((void *)&arr->va_ptrs[0]);
return NULL;
@@ -130,10 +129,9 @@ cfs_array_free(void *vars)
if (!arr->va_ptrs[i])
continue;
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
+ kvfree(arr->va_ptrs[i]);
}
- LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
- va_ptrs[arr->va_count]));
+ kvfree(arr);
}
EXPORT_SYMBOL(cfs_array_free);
@@ -148,7 +146,7 @@ cfs_array_alloc(int count, unsigned int size)
struct cfs_var_array *arr;
int i;
- LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
+ arr = kvmalloc(offsetof(struct cfs_var_array, va_ptrs[count]), GFP_KERNEL);
if (!arr)
return NULL;
@@ -156,7 +154,7 @@ cfs_array_alloc(int count, unsigned int size)
arr->va_size = size;
for (i = 0; i < count; i++) {
- LIBCFS_ALLOC(arr->va_ptrs[i], size);
+ arr->va_ptrs[i] = kvzalloc(size, GFP_KERNEL);
if (!arr->va_ptrs[i]) {
cfs_array_free((void *)&arr->va_ptrs[0]);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index bcac5074bf80..442889a3d729 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -137,26 +137,6 @@ out:
}
EXPORT_SYMBOL(cfs_firststr);
-char *
-cfs_trimwhite(char *str)
-{
- char *end;
-
- while (isspace(*str))
- str++;
-
- end = str + strlen(str);
- while (end > str) {
- if (!isspace(end[-1]))
- break;
- end--;
- }
-
- *end = 0;
- return str;
-}
-EXPORT_SYMBOL(cfs_trimwhite);
-
/**
* Extracts tokens from strings.
*
@@ -280,7 +260,7 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
struct cfs_range_expr *re;
struct cfs_lstr tok;
- LIBCFS_ALLOC(re, sizeof(*re));
+ re = kzalloc(sizeof(*re), GFP_NOFS);
if (!re)
return -ENOMEM;
@@ -333,7 +313,7 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
return 0;
failed:
- LIBCFS_FREE(re, sizeof(*re));
+ kfree(re);
return -EINVAL;
}
@@ -457,7 +437,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
return -EINVAL;
}
- LIBCFS_ALLOC(val, sizeof(val[0]) * count);
+ val = kvmalloc_array(count, sizeof(val[0]), GFP_KERNEL | __GFP_ZERO);
if (!val)
return -ENOMEM;
@@ -488,10 +468,10 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list)
expr = list_entry(expr_list->el_exprs.next,
struct cfs_range_expr, re_link);
list_del(&expr->re_link);
- LIBCFS_FREE(expr, sizeof(*expr));
+ kfree(expr);
}
- LIBCFS_FREE(expr_list, sizeof(*expr_list));
+ kfree(expr_list);
}
EXPORT_SYMBOL(cfs_expr_list_free);
@@ -510,7 +490,7 @@ cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_lstr src;
int rc;
- LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
+ expr_list = kzalloc(sizeof(*expr_list), GFP_NOFS);
if (!expr_list)
return -ENOMEM;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 51823ce71773..c07165e0ad95 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -72,7 +72,7 @@ struct cfs_cpt_data {
/* mutex to protect cpt_cpumask */
struct mutex cpt_mutex;
/* scratch buffer for set/unset_node */
- cpumask_t *cpt_cpumask;
+ cpumask_var_t cpt_cpumask;
};
static struct cfs_cpt_data cpt_data;
@@ -93,35 +93,21 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
int i;
- if (cptab->ctb_cpu2cpt) {
- LIBCFS_FREE(cptab->ctb_cpu2cpt,
- num_possible_cpus() *
- sizeof(cptab->ctb_cpu2cpt[0]));
- }
+ kvfree(cptab->ctb_cpu2cpt);
for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
- if (part->cpt_nodemask) {
- LIBCFS_FREE(part->cpt_nodemask,
- sizeof(*part->cpt_nodemask));
- }
-
- if (part->cpt_cpumask)
- LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
+ kfree(part->cpt_nodemask);
+ free_cpumask_var(part->cpt_cpumask);
}
- if (cptab->ctb_parts) {
- LIBCFS_FREE(cptab->ctb_parts,
- cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
- }
+ kvfree(cptab->ctb_parts);
- if (cptab->ctb_nodemask)
- LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
- if (cptab->ctb_cpumask)
- LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
+ kfree(cptab->ctb_nodemask);
+ free_cpumask_var(cptab->ctb_cpumask);
- LIBCFS_FREE(cptab, sizeof(*cptab));
+ kfree(cptab);
}
EXPORT_SYMBOL(cfs_cpt_table_free);
@@ -131,36 +117,39 @@ cfs_cpt_table_alloc(unsigned int ncpt)
struct cfs_cpt_table *cptab;
int i;
- LIBCFS_ALLOC(cptab, sizeof(*cptab));
+ cptab = kzalloc(sizeof(*cptab), GFP_NOFS);
if (!cptab)
return NULL;
cptab->ctb_nparts = ncpt;
- LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
- LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
-
- if (!cptab->ctb_cpumask || !cptab->ctb_nodemask)
+ cptab->ctb_nodemask = kzalloc(sizeof(*cptab->ctb_nodemask),
+ GFP_NOFS);
+ if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS) ||
+ !cptab->ctb_nodemask)
goto failed;
- LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
- num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
+ cptab->ctb_cpu2cpt = kvmalloc_array(num_possible_cpus(),
+ sizeof(cptab->ctb_cpu2cpt[0]),
+ GFP_KERNEL);
if (!cptab->ctb_cpu2cpt)
goto failed;
memset(cptab->ctb_cpu2cpt, -1,
num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
- LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
+ cptab->ctb_parts = kvmalloc_array(ncpt, sizeof(cptab->ctb_parts[0]),
+ GFP_KERNEL);
if (!cptab->ctb_parts)
goto failed;
for (i = 0; i < ncpt; i++) {
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
- LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
- LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
- if (!part->cpt_cpumask || !part->cpt_nodemask)
+ part->cpt_nodemask = kzalloc(sizeof(*part->cpt_nodemask),
+ GFP_NOFS);
+ if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS) ||
+ !part->cpt_nodemask)
goto failed;
}
@@ -251,13 +240,13 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
}
EXPORT_SYMBOL(cfs_cpt_online);
-cpumask_t *
+cpumask_var_t *
cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
{
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
return cpt == CFS_CPT_ANY ?
- cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
+ &cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask;
}
EXPORT_SYMBOL(cfs_cpt_cpumask);
@@ -405,7 +394,6 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
int
cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
- cpumask_t *mask;
int rc;
if (node < 0 || node >= MAX_NUMNODES) {
@@ -416,10 +404,9 @@ cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
- mask = cpt_data.cpt_cpumask;
- cfs_node_to_cpumask(node, mask);
+ cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
- rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
+ rc = cfs_cpt_set_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
mutex_unlock(&cpt_data.cpt_mutex);
@@ -430,8 +417,6 @@ EXPORT_SYMBOL(cfs_cpt_set_node);
void
cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
- cpumask_t *mask;
-
if (node < 0 || node >= MAX_NUMNODES) {
CDEBUG(D_INFO,
"Invalid NUMA id %d for CPU partition %d\n", node, cpt);
@@ -440,10 +425,9 @@ cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
- mask = cpt_data.cpt_cpumask;
- cfs_node_to_cpumask(node, mask);
+ cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
- cfs_cpt_unset_cpumask(cptab, cpt, mask);
+ cfs_cpt_unset_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
mutex_unlock(&cpt_data.cpt_mutex);
}
@@ -529,19 +513,20 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
int
cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
{
- int cpu = smp_processor_id();
- int cpt = cptab->ctb_cpu2cpt[cpu];
+ int cpu;
+ int cpt;
- if (cpt < 0) {
- if (!remap)
- return cpt;
+ preempt_disable();
+ cpu = smp_processor_id();
+ cpt = cptab->ctb_cpu2cpt[cpu];
+ if (cpt < 0 && remap) {
/* don't return negative value for safety of upper layer,
* instead we shadow the unknown cpu to a valid partition ID
*/
cpt = cpu % cptab->ctb_nparts;
}
-
+ preempt_enable();
return cpt;
}
EXPORT_SYMBOL(cfs_cpt_current);
@@ -558,7 +543,7 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
int
cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
{
- cpumask_t *cpumask;
+ cpumask_var_t *cpumask;
nodemask_t *nodemask;
int rc;
int i;
@@ -566,24 +551,24 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
if (cpt == CFS_CPT_ANY) {
- cpumask = cptab->ctb_cpumask;
+ cpumask = &cptab->ctb_cpumask;
nodemask = cptab->ctb_nodemask;
} else {
- cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
+ cpumask = &cptab->ctb_parts[cpt].cpt_cpumask;
nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
}
- if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
+ if (cpumask_any_and(*cpumask, cpu_online_mask) >= nr_cpu_ids) {
CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
cpt);
return -EINVAL;
}
for_each_online_cpu(i) {
- if (cpumask_test_cpu(i, cpumask))
+ if (cpumask_test_cpu(i, *cpumask))
continue;
- rc = set_cpus_allowed_ptr(current, cpumask);
+ rc = set_cpus_allowed_ptr(current, *cpumask);
set_mems_allowed(*nodemask);
if (!rc)
schedule(); /* switch to allowed CPU */
@@ -604,8 +589,8 @@ static int
cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
cpumask_t *node, int number)
{
- cpumask_t *socket = NULL;
- cpumask_t *core = NULL;
+ cpumask_var_t socket;
+ cpumask_var_t core;
int rc = 0;
int cpu;
@@ -623,13 +608,17 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
return 0;
}
- /* allocate scratch buffer */
- LIBCFS_ALLOC(socket, cpumask_size());
- LIBCFS_ALLOC(core, cpumask_size());
- if (!socket || !core) {
+ /*
+ * Allocate scratch buffers
+ * As we cannot initialize a cpumask_var_t, we need
+ * to alloc both before we can risk trying to free either
+ */
+ if (!zalloc_cpumask_var(&socket, GFP_NOFS))
rc = -ENOMEM;
+ if (!zalloc_cpumask_var(&core, GFP_NOFS))
+ rc = -ENOMEM;
+ if (rc)
goto out;
- }
while (!cpumask_empty(node)) {
cpu = cpumask_first(node);
@@ -667,10 +656,8 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
}
out:
- if (socket)
- LIBCFS_FREE(socket, cpumask_size());
- if (core)
- LIBCFS_FREE(core, cpumask_size());
+ free_cpumask_var(socket);
+ free_cpumask_var(core);
return rc;
}
@@ -723,7 +710,7 @@ static struct cfs_cpt_table *
cfs_cpt_table_create(int ncpt)
{
struct cfs_cpt_table *cptab = NULL;
- cpumask_t *mask = NULL;
+ cpumask_var_t mask;
int cpt = 0;
int num;
int rc;
@@ -756,8 +743,7 @@ cfs_cpt_table_create(int ncpt)
goto failed;
}
- LIBCFS_ALLOC(mask, cpumask_size());
- if (!mask) {
+ if (!zalloc_cpumask_var(&mask, GFP_NOFS)){
CERROR("Failed to allocate scratch cpumask\n");
goto failed;
}
@@ -784,7 +770,7 @@ cfs_cpt_table_create(int ncpt)
rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
if (rc < 0)
- goto failed;
+ goto failed_mask;
LASSERT(num >= cpumask_weight(part->cpt_cpumask));
if (num == cpumask_weight(part->cpt_cpumask))
@@ -797,20 +783,19 @@ cfs_cpt_table_create(int ncpt)
CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
cptab->ctb_nparts, num, cpt,
cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
- goto failed;
+ goto failed_mask;
}
- LIBCFS_FREE(mask, cpumask_size());
+ free_cpumask_var(mask);
return cptab;
+ failed_mask:
+ free_cpumask_var(mask);
failed:
CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
ncpt, num_online_nodes(), num_online_cpus());
- if (mask)
- LIBCFS_FREE(mask, cpumask_size());
-
if (cptab)
cfs_cpt_table_free(cptab);
@@ -830,7 +815,7 @@ cfs_cpt_table_create_pattern(char *pattern)
int c;
int i;
- str = cfs_trimwhite(pattern);
+ str = strim(pattern);
if (*str == 'n' || *str == 'N') {
pattern = str + 1;
if (*pattern != '\0') {
@@ -882,7 +867,7 @@ cfs_cpt_table_create_pattern(char *pattern)
high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
- for (str = cfs_trimwhite(pattern), c = 0;; c++) {
+ for (str = strim(pattern), c = 0;; c++) {
struct cfs_range_expr *range;
struct cfs_expr_list *el;
char *bracket = strchr(str, '[');
@@ -917,7 +902,7 @@ cfs_cpt_table_create_pattern(char *pattern)
goto failed;
}
- str = cfs_trimwhite(str + n);
+ str = strim(str + n);
if (str != bracket) {
CERROR("Invalid pattern %s\n", str);
goto failed;
@@ -957,7 +942,7 @@ cfs_cpt_table_create_pattern(char *pattern)
goto failed;
}
- str = cfs_trimwhite(bracket + 1);
+ str = strim(bracket + 1);
}
return cptab;
@@ -1013,8 +998,7 @@ cfs_cpu_fini(void)
cpuhp_remove_state_nocalls(lustre_cpu_online);
cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
#endif
- if (cpt_data.cpt_cpumask)
- LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
+ free_cpumask_var(cpt_data.cpt_cpumask);
}
int
@@ -1026,8 +1010,7 @@ cfs_cpu_init(void)
memset(&cpt_data, 0, sizeof(cpt_data));
- LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
- if (!cpt_data.cpt_cpumask) {
+ if (!zalloc_cpumask_var(&cpt_data.cpt_cpumask, GFP_NOFS)) {
CERROR("Failed to allocate scratch buffer\n");
return -1;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index 2e5d311d2438..db81ed527452 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -120,6 +120,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index b5746230ab31..ddf625669bff 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -146,7 +146,7 @@ int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
return -EINVAL;
}
- LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
+ *hdr_pp = kvmalloc(hdr.ioc_len, GFP_KERNEL);
if (!*hdr_pp)
return -ENOMEM;
@@ -164,7 +164,7 @@ int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
return 0;
free:
- LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
+ kvfree(*hdr_pp);
return err;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 4ead55920e79..a03f924f1d7c 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -156,7 +156,7 @@ int libcfs_ioctl(unsigned long cmd, void __user *uparam)
break; }
}
out:
- LIBCFS_FREE(hdr, hdr->ioc_len);
+ kvfree(hdr);
return err;
}
@@ -302,7 +302,7 @@ static int __proc_cpt_table(void *data, int write,
LASSERT(cfs_cpt_table);
while (1) {
- LIBCFS_ALLOC(buf, len);
+ buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -311,7 +311,7 @@ static int __proc_cpt_table(void *data, int write,
break;
if (rc == -EFBIG) {
- LIBCFS_FREE(buf, len);
+ kfree(buf);
len <<= 1;
continue;
}
@@ -325,8 +325,7 @@ static int __proc_cpt_table(void *data, int write,
rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
out:
- if (buf)
- LIBCFS_FREE(buf, len);
+ kfree(buf);
return rc;
}
@@ -548,33 +547,23 @@ static int libcfs_init(void)
goto cleanup_cpu;
}
- rc = cfs_wi_startup();
- if (rc) {
- CERROR("initialize workitem: error %d\n", rc);
- goto cleanup_deregister;
- }
-
- /* max to 4 threads, should be enough for rehash */
- rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
- rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
- rc, &cfs_sched_rehash);
- if (rc) {
- CERROR("Startup workitem scheduler: error: %d\n", rc);
+ cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4);
+ if (!cfs_rehash_wq) {
+ CERROR("Failed to start rehash workqueue.\n");
+ rc = -ENOMEM;
goto cleanup_deregister;
}
rc = cfs_crypto_register();
if (rc) {
CERROR("cfs_crypto_register: error %d\n", rc);
- goto cleanup_wi;
+ goto cleanup_deregister;
}
lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks);
CDEBUG(D_OTHER, "portals setup OK\n");
return 0;
- cleanup_wi:
- cfs_wi_shutdown();
cleanup_deregister:
misc_deregister(&libcfs_dev);
cleanup_cpu:
@@ -590,13 +579,12 @@ static void libcfs_exit(void)
lustre_remove_debugfs();
- if (cfs_sched_rehash) {
- cfs_wi_sched_destroy(cfs_sched_rehash);
- cfs_sched_rehash = NULL;
+ if (cfs_rehash_wq) {
+ destroy_workqueue(cfs_rehash_wq);
+ cfs_rehash_wq = NULL;
}
cfs_crypto_unregister();
- cfs_wi_shutdown();
misc_deregister(&libcfs_dev);
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
deleted file mode 100644
index f47cf67a92e3..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/prng.c
- *
- * concatenation of following two 16-bit multiply with carry generators
- * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
- * number and carry packed within the same 32 bit integer.
- * algorithm recommended by Marsaglia
- */
-
-#include <linux/libcfs/libcfs.h>
-
-/*
- * From: George Marsaglia <geo@stat.fsu.edu>
- * Newsgroups: sci.math
- * Subject: Re: A RANDOM NUMBER GENERATOR FOR C
- * Date: Tue, 30 Sep 1997 05:29:35 -0700
- *
- * You may replace the two constants 36969 and 18000 by any
- * pair of distinct constants from this list:
- * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584
- * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243
- * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974
- * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114
- * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088
- * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834
- * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013
- * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083
- * (or any other 16-bit constants k for which both k*2^16-1
- * and k*2^15-1 are prime)
- */
-
-#define RANDOM_CONST_A 18030
-#define RANDOM_CONST_B 29013
-
-static unsigned int seed_x = 521288629;
-static unsigned int seed_y = 362436069;
-
-/**
- * cfs_rand - creates new seeds
- *
- * First it creates new seeds from the previous seeds. Then it generates a
- * new pseudo random number for use.
- *
- * Returns a pseudo-random 32-bit integer
- */
-unsigned int cfs_rand(void)
-{
- seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16);
- seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16);
-
- return ((seed_x << 16) + (seed_y & 65535));
-}
-EXPORT_SYMBOL(cfs_rand);
-
-/**
- * cfs_srand - sets the initial seed
- * @seed1 : (seed_x) should have the most entropy in the low bits of the word
- * @seed2 : (seed_y) should have the most entropy in the high bits of the word
- *
- * Replaces the original seeds with new values. Used to generate a new pseudo
- * random numbers.
- */
-void cfs_srand(unsigned int seed1, unsigned int seed2)
-{
- if (seed1)
- seed_x = seed1; /* use default seeds if parameter is 0 */
- if (seed2)
- seed_y = seed2;
-}
-EXPORT_SYMBOL(cfs_srand);
-
-/**
- * cfs_get_random_bytes - generate a bunch of random numbers
- * @buf : buffer to fill with random numbers
- * @size: size of passed in buffer
- *
- * Fills a buffer with random bytes
- */
-void cfs_get_random_bytes(void *buf, int size)
-{
- int *p = buf;
- int rem, tmp;
-
- LASSERT(size >= 0);
-
- rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
- if (rem) {
- get_random_bytes(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, rem);
- p = buf + rem;
- size -= rem;
- }
-
- while (size >= sizeof(int)) {
- get_random_bytes(&tmp, sizeof(tmp));
- *p = cfs_rand() ^ tmp;
- size -= sizeof(int);
- p++;
- }
- buf = p;
- if (size) {
- get_random_bytes(&tmp, sizeof(tmp));
- tmp ^= cfs_rand();
- memcpy(buf, &tmp, size);
- }
-}
-EXPORT_SYMBOL(cfs_get_random_bytes);
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index da2844f37edf..57913aae1d88 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -785,7 +785,7 @@ int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
return -EFAULT;
nob = strnlen(knl_buffer, usr_buffer_nob);
- while (nob-- >= 0) /* strip trailing whitespace */
+ while (--nob >= 0) /* strip trailing whitespace */
if (!isspace(knl_buffer[nob]))
break;
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
deleted file mode 100644
index 6a05d9bab8dc..000000000000
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ /dev/null
@@ -1,466 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/workitem.c
- *
- * Author: Isaac Huang <isaac@clusterfs.com>
- * Liang Zhen <zhen.liang@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-
-#define CFS_WS_NAME_LEN 16
-
-struct cfs_wi_sched {
- /* chain on global list */
- struct list_head ws_list;
- /** serialised workitems */
- spinlock_t ws_lock;
- /** where schedulers sleep */
- wait_queue_head_t ws_waitq;
- /** concurrent workitems */
- struct list_head ws_runq;
- /**
- * rescheduled running-workitems, a workitem can be rescheduled
- * while running in wi_action(), but we don't to execute it again
- * unless it returns from wi_action(), so we put it on ws_rerunq
- * while rescheduling, and move it to runq after it returns
- * from wi_action()
- */
- struct list_head ws_rerunq;
- /** CPT-table for this scheduler */
- struct cfs_cpt_table *ws_cptab;
- /** CPT id for affinity */
- int ws_cpt;
- /** number of scheduled workitems */
- int ws_nscheduled;
- /** started scheduler thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_nthreads:30;
- /** shutting down, protected by cfs_wi_data::wi_glock */
- unsigned int ws_stopping:1;
- /** serialize starting thread, protected by cfs_wi_data::wi_glock */
- unsigned int ws_starting:1;
- /** scheduler name */
- char ws_name[CFS_WS_NAME_LEN];
-};
-
-static struct cfs_workitem_data {
- /** serialize */
- spinlock_t wi_glock;
- /** list of all schedulers */
- struct list_head wi_scheds;
- /** WI module is initialized */
- int wi_init;
- /** shutting down the whole WI module */
- int wi_stopping;
-} cfs_wi_data;
-
-static inline int
-cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
-{
- spin_lock(&sched->ws_lock);
- if (sched->ws_stopping) {
- spin_unlock(&sched->ws_lock);
- return 0;
- }
-
- if (!list_empty(&sched->ws_runq)) {
- spin_unlock(&sched->ws_lock);
- return 0;
- }
- spin_unlock(&sched->ws_lock);
- return 1;
-}
-
-/* XXX:
- * 0. it only works when called from wi->wi_action.
- * 1. when it returns no one shall try to schedule the workitem.
- */
-void
-cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
-{
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- spin_lock(&sched->ws_lock);
-
- LASSERT(wi->wi_running);
- if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!list_empty(&wi->wi_list));
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
- }
-
- LASSERT(list_empty(&wi->wi_list));
-
- wi->wi_scheduled = 1; /* LBUG future schedule attempts */
- spin_unlock(&sched->ws_lock);
-}
-EXPORT_SYMBOL(cfs_wi_exit);
-
-/**
- * cancel schedule request of workitem \a wi
- */
-int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
-{
- int rc;
-
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- /*
- * return 0 if it's running already, otherwise return 1, which
- * means the workitem will not be scheduled and will not have
- * any race with wi_action.
- */
- spin_lock(&sched->ws_lock);
-
- rc = !(wi->wi_running);
-
- if (wi->wi_scheduled) { /* cancel pending schedules */
- LASSERT(!list_empty(&wi->wi_list));
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
-
- wi->wi_scheduled = 0;
- }
-
- LASSERT(list_empty(&wi->wi_list));
-
- spin_unlock(&sched->ws_lock);
- return rc;
-}
-EXPORT_SYMBOL(cfs_wi_deschedule);
-
-/*
- * Workitem scheduled with (serial == 1) is strictly serialised not only with
- * itself, but also with others scheduled this way.
- *
- * Now there's only one static serialised queue, but in the future more might
- * be added, and even dynamic creation of serialised queues might be supported.
- */
-void
-cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
-{
- LASSERT(!in_interrupt()); /* because we use plain spinlock */
- LASSERT(!sched->ws_stopping);
-
- spin_lock(&sched->ws_lock);
-
- if (!wi->wi_scheduled) {
- LASSERT(list_empty(&wi->wi_list));
-
- wi->wi_scheduled = 1;
- sched->ws_nscheduled++;
- if (!wi->wi_running) {
- list_add_tail(&wi->wi_list, &sched->ws_runq);
- wake_up(&sched->ws_waitq);
- } else {
- list_add(&wi->wi_list, &sched->ws_rerunq);
- }
- }
-
- LASSERT(!list_empty(&wi->wi_list));
- spin_unlock(&sched->ws_lock);
-}
-EXPORT_SYMBOL(cfs_wi_schedule);
-
-static int cfs_wi_scheduler(void *arg)
-{
- struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
-
- cfs_block_allsigs();
-
- /* CPT affinity scheduler? */
- if (sched->ws_cptab)
- if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt))
- CWARN("Unable to bind %s on CPU partition %d\n",
- sched->ws_name, sched->ws_cpt);
-
- spin_lock(&cfs_wi_data.wi_glock);
-
- LASSERT(sched->ws_starting == 1);
- sched->ws_starting--;
- sched->ws_nthreads++;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- spin_lock(&sched->ws_lock);
-
- while (!sched->ws_stopping) {
- int nloops = 0;
- int rc;
- struct cfs_workitem *wi;
-
- while (!list_empty(&sched->ws_runq) &&
- nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next,
- struct cfs_workitem, wi_list);
- LASSERT(wi->wi_scheduled && !wi->wi_running);
-
- list_del_init(&wi->wi_list);
-
- LASSERT(sched->ws_nscheduled > 0);
- sched->ws_nscheduled--;
-
- wi->wi_running = 1;
- wi->wi_scheduled = 0;
-
- spin_unlock(&sched->ws_lock);
- nloops++;
-
- rc = (*wi->wi_action)(wi);
-
- spin_lock(&sched->ws_lock);
- if (rc) /* WI should be dead, even be freed! */
- continue;
-
- wi->wi_running = 0;
- if (list_empty(&wi->wi_list))
- continue;
-
- LASSERT(wi->wi_scheduled);
- /* wi is rescheduled, should be on rerunq now, we
- * move it to runq so it can run action now
- */
- list_move_tail(&wi->wi_list, &sched->ws_runq);
- }
-
- if (!list_empty(&sched->ws_runq)) {
- spin_unlock(&sched->ws_lock);
- /* don't sleep because some workitems still
- * expect me to come back soon
- */
- cond_resched();
- spin_lock(&sched->ws_lock);
- continue;
- }
-
- spin_unlock(&sched->ws_lock);
- rc = wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched));
- spin_lock(&sched->ws_lock);
- }
-
- spin_unlock(&sched->ws_lock);
-
- spin_lock(&cfs_wi_data.wi_glock);
- sched->ws_nthreads--;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- return 0;
-}
-
-void
-cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
-{
- int i;
-
- LASSERT(cfs_wi_data.wi_init);
- LASSERT(!cfs_wi_data.wi_stopping);
-
- spin_lock(&cfs_wi_data.wi_glock);
- if (sched->ws_stopping) {
- CDEBUG(D_INFO, "%s is in progress of stopping\n",
- sched->ws_name);
- spin_unlock(&cfs_wi_data.wi_glock);
- return;
- }
-
- LASSERT(!list_empty(&sched->ws_list));
- sched->ws_stopping = 1;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- i = 2;
- wake_up_all(&sched->ws_waitq);
-
- spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_nthreads > 0) {
- CDEBUG(is_power_of_2(++i) ? D_WARNING : D_NET,
- "waiting for %d threads of WI sched[%s] to terminate\n",
- sched->ws_nthreads, sched->ws_name);
-
- spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
- spin_lock(&cfs_wi_data.wi_glock);
- }
-
- list_del(&sched->ws_list);
-
- spin_unlock(&cfs_wi_data.wi_glock);
- LASSERT(!sched->ws_nscheduled);
-
- LIBCFS_FREE(sched, sizeof(*sched));
-}
-EXPORT_SYMBOL(cfs_wi_sched_destroy);
-
-int
-cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
- int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
-{
- struct cfs_wi_sched *sched;
- int rc;
-
- LASSERT(cfs_wi_data.wi_init);
- LASSERT(!cfs_wi_data.wi_stopping);
- LASSERT(!cptab || cpt == CFS_CPT_ANY ||
- (cpt >= 0 && cpt < cfs_cpt_number(cptab)));
-
- LIBCFS_ALLOC(sched, sizeof(*sched));
- if (!sched)
- return -ENOMEM;
-
- if (strlen(name) > sizeof(sched->ws_name) - 1) {
- LIBCFS_FREE(sched, sizeof(*sched));
- return -E2BIG;
- }
- strncpy(sched->ws_name, name, sizeof(sched->ws_name));
-
- sched->ws_cptab = cptab;
- sched->ws_cpt = cpt;
-
- spin_lock_init(&sched->ws_lock);
- init_waitqueue_head(&sched->ws_waitq);
- INIT_LIST_HEAD(&sched->ws_runq);
- INIT_LIST_HEAD(&sched->ws_rerunq);
- INIT_LIST_HEAD(&sched->ws_list);
-
- rc = 0;
- while (nthrs > 0) {
- char name[16];
- struct task_struct *task;
-
- spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_starting > 0) {
- spin_unlock(&cfs_wi_data.wi_glock);
- schedule();
- spin_lock(&cfs_wi_data.wi_glock);
- }
-
- sched->ws_starting++;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- if (sched->ws_cptab && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02u",
- sched->ws_name, sched->ws_cpt,
- sched->ws_nthreads);
- } else {
- snprintf(name, sizeof(name), "%s_%02u",
- sched->ws_name, sched->ws_nthreads);
- }
-
- task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
- if (!IS_ERR(task)) {
- nthrs--;
- continue;
- }
- rc = PTR_ERR(task);
-
- CERROR("Failed to create thread for WI scheduler %s: %d\n",
- name, rc);
-
- spin_lock(&cfs_wi_data.wi_glock);
-
- /* make up for cfs_wi_sched_destroy */
- list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- sched->ws_starting--;
-
- spin_unlock(&cfs_wi_data.wi_glock);
-
- cfs_wi_sched_destroy(sched);
- return rc;
- }
- spin_lock(&cfs_wi_data.wi_glock);
- list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- spin_unlock(&cfs_wi_data.wi_glock);
-
- *sched_pp = sched;
- return 0;
-}
-EXPORT_SYMBOL(cfs_wi_sched_create);
-
-int
-cfs_wi_startup(void)
-{
- memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
-
- spin_lock_init(&cfs_wi_data.wi_glock);
- INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
- cfs_wi_data.wi_init = 1;
-
- return 0;
-}
-
-void
-cfs_wi_shutdown(void)
-{
- struct cfs_wi_sched *sched;
- struct cfs_wi_sched *temp;
-
- spin_lock(&cfs_wi_data.wi_glock);
- cfs_wi_data.wi_stopping = 1;
- spin_unlock(&cfs_wi_data.wi_glock);
-
- /* nobody should contend on this list */
- list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- sched->ws_stopping = 1;
- wake_up_all(&sched->ws_waitq);
- }
-
- list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- spin_lock(&cfs_wi_data.wi_glock);
-
- while (sched->ws_nthreads) {
- spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
- spin_lock(&cfs_wi_data.wi_glock);
- }
- spin_unlock(&cfs_wi_data.wi_glock);
- }
- list_for_each_entry_safe(sched, temp, &cfs_wi_data.wi_scheds, ws_list) {
- list_del(&sched->ws_list);
- LIBCFS_FREE(sched, sizeof(*sched));
- }
-
- cfs_wi_data.wi_stopping = 0;
- cfs_wi_data.wi_init = 0;
-}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 7caff290c146..2c7abad57104 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -108,7 +108,8 @@ lnet_create_remote_nets_table(void)
LASSERT(!the_lnet.ln_remote_nets_hash);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
- LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ hash = kvmalloc_array(LNET_REMOTE_NETS_HASH_SIZE, sizeof(*hash),
+ GFP_KERNEL);
if (!hash) {
CERROR("Failed to create remote nets hash table\n");
return -ENOMEM;
@@ -131,9 +132,7 @@ lnet_destroy_remote_nets_table(void)
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
- LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
- LNET_REMOTE_NETS_HASH_SIZE *
- sizeof(the_lnet.ln_remote_nets_hash[0]));
+ kvfree(the_lnet.ln_remote_nets_hash);
the_lnet.ln_remote_nets_hash = NULL;
}
@@ -384,10 +383,10 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
+ kfree(list_entry(e, struct lnet_eq, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
+ kfree(list_entry(e, struct lnet_libmd, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
@@ -405,11 +404,8 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
count, lnet_res_type2str(rec->rec_type));
}
- if (rec->rec_lh_hash) {
- LIBCFS_FREE(rec->rec_lh_hash,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
- rec->rec_lh_hash = NULL;
- }
+ kfree(rec->rec_lh_hash);
+ rec->rec_lh_hash = NULL;
rec->rec_type = 0; /* mark it as finalized */
}
@@ -427,8 +423,8 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
/* Arbitrary choice of hash table size */
- LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]),
+ GFP_KERNEL, cpt);
if (!rec->rec_lh_hash) {
rc = -ENOMEM;
goto out;
@@ -831,7 +827,7 @@ lnet_ping_info_create(int num_ni)
unsigned int infosz;
infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
- LIBCFS_ALLOC(ping_info, infosz);
+ ping_info = kvzalloc(infosz, GFP_KERNEL);
if (!ping_info) {
CERROR("Can't allocate ping info[%d]\n", num_ni);
return NULL;
@@ -864,9 +860,7 @@ lnet_get_ni_count(void)
static inline void
lnet_ping_info_free(struct lnet_ping_info *pinfo)
{
- LIBCFS_FREE(pinfo,
- offsetof(struct lnet_ping_info,
- pi_ni[pinfo->pi_nnis]));
+ kvfree(pinfo);
}
static void
@@ -1280,8 +1274,8 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
if (lnd_tunables) {
- LIBCFS_ALLOC(ni->ni_lnd_tunables,
- sizeof(*ni->ni_lnd_tunables));
+ ni->ni_lnd_tunables = kzalloc(sizeof(*ni->ni_lnd_tunables),
+ GFP_NOFS);
if (!ni->ni_lnd_tunables) {
mutex_unlock(&the_lnet.ln_lnd_mutex);
rc = -ENOMEM;
@@ -2160,7 +2154,7 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms,
if (id.pid == LNET_PID_ANY)
id.pid = LNET_PID_LUSTRE;
- LIBCFS_ALLOC(info, infosz);
+ info = kzalloc(infosz, GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -2310,6 +2304,6 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms,
LASSERT(!rc2);
out_0:
- LIBCFS_FREE(info, infosz);
+ kfree(info);
return rc;
}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 0cf0f4f99435..0aea268a4f1c 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -106,19 +106,16 @@ lnet_ni_free(struct lnet_ni *ni)
if (ni->ni_cpts)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
- if (ni->ni_lnd_tunables)
- LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables));
+ kfree(ni->ni_lnd_tunables);
- for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
- LIBCFS_FREE(ni->ni_interfaces[i],
- strlen(ni->ni_interfaces[i]) + 1);
- }
+ for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++)
+ kfree(ni->ni_interfaces[i]);
/* release reference to net namespace */
if (ni->ni_net_ns)
put_net(ni->ni_net_ns);
- LIBCFS_FREE(ni, sizeof(*ni));
+ kfree(ni);
}
struct lnet_ni *
@@ -135,7 +132,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
return NULL;
}
- LIBCFS_ALLOC(ni, sizeof(*ni));
+ ni = kzalloc(sizeof(*ni), GFP_NOFS);
if (!ni) {
CERROR("Out of memory creating network %s\n",
libcfs_net2str(net));
@@ -170,7 +167,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
LASSERT(rc <= LNET_CPT_NUMBER);
if (rc == LNET_CPT_NUMBER) {
- LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
+ cfs_expr_list_values_free(ni->ni_cpts, LNET_CPT_NUMBER);
ni->ni_cpts = NULL;
}
@@ -198,7 +195,6 @@ int
lnet_parse_networks(struct list_head *nilist, char *networks)
{
struct cfs_expr_list *el = NULL;
- int tokensize;
char *tokens;
char *str;
char *tmp;
@@ -219,15 +215,12 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
return -EINVAL;
}
- tokensize = strlen(networks) + 1;
-
- LIBCFS_ALLOC(tokens, tokensize);
+ tokens = kstrdup(networks, GFP_KERNEL);
if (!tokens) {
CERROR("Can't allocate net tokens\n");
return -ENOMEM;
}
- memcpy(tokens, networks, tokensize);
tmp = tokens;
str = tokens;
@@ -275,7 +268,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (comma)
*comma++ = 0;
- net = libcfs_str2net(cfs_trimwhite(str));
+ net = libcfs_str2net(strim(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
LCONSOLE_ERROR_MSG(0x113,
@@ -298,7 +291,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
*bracket = 0;
- net = libcfs_str2net(cfs_trimwhite(str));
+ net = libcfs_str2net(strim(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
tmp = str;
goto failed_syntax;
@@ -328,7 +321,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (comma)
*comma++ = 0;
- iface = cfs_trimwhite(iface);
+ iface = strim(iface);
if (!*iface) {
tmp = iface;
goto failed_syntax;
@@ -349,14 +342,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
* The newly allocated ni_interfaces[] can be
* freed when freeing the NI
*/
- LIBCFS_ALLOC(ni->ni_interfaces[niface],
- strlen(iface) + 1);
+ ni->ni_interfaces[niface] = kstrdup(iface, GFP_KERNEL);
if (!ni->ni_interfaces[niface]) {
CERROR("Can't allocate net interface name\n");
goto failed;
}
- strncpy(ni->ni_interfaces[niface], iface,
- strlen(iface));
niface++;
iface = comma;
} while (iface);
@@ -365,7 +355,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
comma = strchr(bracket + 1, ',');
if (comma) {
*comma = 0;
- str = cfs_trimwhite(str);
+ str = strim(str);
if (*str) {
tmp = str;
goto failed_syntax;
@@ -374,7 +364,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
continue;
}
- str = cfs_trimwhite(str);
+ str = strim(str);
if (*str) {
tmp = str;
goto failed_syntax;
@@ -384,7 +374,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
list_for_each(temp_node, nilist)
nnets++;
- LIBCFS_FREE(tokens, tokensize);
+ kfree(tokens);
return nnets;
failed_syntax:
@@ -400,7 +390,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (el)
cfs_expr_list_free(el);
- LIBCFS_FREE(tokens, tokensize);
+ kfree(tokens);
return -EINVAL;
}
@@ -424,7 +414,7 @@ lnet_new_text_buf(int str_len)
return NULL;
}
- LIBCFS_ALLOC(ltb, nob);
+ ltb = kzalloc(nob, GFP_KERNEL);
if (!ltb)
return NULL;
@@ -438,7 +428,7 @@ static void
lnet_free_text_buf(struct lnet_text_buf *ltb)
{
lnet_tbnob -= ltb->ltb_size;
- LIBCFS_FREE(ltb, ltb->ltb_size);
+ kfree(ltb);
}
static void
@@ -1156,7 +1146,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
if (nif <= 0)
return nif;
- LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
+ ipaddrs = kcalloc(nif, sizeof(*ipaddrs), GFP_KERNEL);
if (!ipaddrs) {
CERROR("Can't allocate ipaddrs[%d]\n", nif);
lnet_ipif_free_enumeration(ifnames, nif);
@@ -1189,7 +1179,8 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
*ipaddrsp = ipaddrs;
} else {
if (nip > 0) {
- LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
+ ipaddrs2 = kcalloc(nip, sizeof(*ipaddrs2),
+ GFP_KERNEL);
if (!ipaddrs2) {
CERROR("Can't allocate ipaddrs[%d]\n", nip);
nip = -ENOMEM;
@@ -1200,7 +1191,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
rc = nip;
}
}
- LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
+ kfree(ipaddrs);
}
return nip;
}
@@ -1226,7 +1217,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
}
rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
- LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
+ kfree(ipaddrs);
if (rc < 0) {
LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index daf744277003..a173b69e2f92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -90,12 +90,13 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
if (!count && callback == LNET_EQ_HANDLER_NONE)
return -EINVAL;
- eq = lnet_eq_alloc();
+ eq = kzalloc(sizeof(*eq), GFP_NOFS);
if (!eq)
return -ENOMEM;
if (count) {
- LIBCFS_ALLOC(eq->eq_events, count * sizeof(struct lnet_event));
+ eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event),
+ GFP_KERNEL | __GFP_ZERO);
if (!eq->eq_events)
goto failed;
/*
@@ -132,13 +133,12 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
return 0;
failed:
- if (eq->eq_events)
- LIBCFS_FREE(eq->eq_events, count * sizeof(struct lnet_event));
+ kvfree(eq->eq_events);
if (eq->eq_refs)
cfs_percpt_free(eq->eq_refs);
- lnet_eq_free(eq);
+ kfree(eq);
return -ENOMEM;
}
EXPORT_SYMBOL(LNetEQAlloc);
@@ -197,13 +197,12 @@ LNetEQFree(struct lnet_handle_eq eqh)
lnet_res_lh_invalidate(&eq->eq_lh);
list_del(&eq->eq_list);
- lnet_eq_free(eq);
+ kfree(eq);
out:
lnet_eq_wait_unlock();
lnet_res_unlock(LNET_LOCK_EX);
- if (events)
- LIBCFS_FREE(events, size * sizeof(struct lnet_event));
+ kvfree(events);
if (refs)
cfs_percpt_free(refs);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index ac5b9593d597..8a22514aaf71 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -81,7 +81,7 @@ lnet_md_unlink(struct lnet_libmd *md)
LASSERT(!list_empty(&md->md_list));
list_del_init(&md->md_list);
- lnet_md_free(md);
+ kfree(md);
}
static int
@@ -173,7 +173,7 @@ lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
/*
* NB we are passed an allocated, but inactive md.
* if we return success, caller may lnet_md_unlink() it.
- * otherwise caller may only lnet_md_free() it.
+ * otherwise caller may only kfree() it.
*/
/*
* This implementation doesn't know how to create START events or
@@ -329,7 +329,7 @@ LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
out_unlock:
lnet_res_unlock(cpt);
out_free:
- lnet_md_free(md);
+ kfree(md);
return rc;
}
EXPORT_SYMBOL(LNetMDAttach);
@@ -390,7 +390,7 @@ LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
out_unlock:
lnet_res_unlock(cpt);
out_free:
- lnet_md_free(md);
+ kfree(md);
return rc;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index dd5d3cf6d3e2..672e37bdd045 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -90,7 +90,7 @@ LNetMEAttach(unsigned int portal,
if (!mtable) /* can't match portal type */
return -EPERM;
- me = lnet_me_alloc();
+ me = kzalloc(sizeof(*me), GFP_NOFS);
if (!me)
return -ENOMEM;
@@ -157,7 +157,7 @@ LNetMEInsert(struct lnet_handle_me current_meh,
if (pos == LNET_INS_LOCAL)
return -EPERM;
- new_me = lnet_me_alloc();
+ new_me = kzalloc(sizeof(*new_me), GFP_NOFS);
if (!new_me)
return -ENOMEM;
@@ -167,7 +167,7 @@ LNetMEInsert(struct lnet_handle_me current_meh,
current_me = lnet_handle2me(&current_meh);
if (!current_me) {
- lnet_me_free(new_me);
+ kfree(new_me);
lnet_res_unlock(cpt);
return -ENOENT;
@@ -178,7 +178,7 @@ LNetMEInsert(struct lnet_handle_me current_meh,
ptl = the_lnet.ln_portals[current_me->me_portal];
if (lnet_ptl_is_unique(ptl)) {
/* nosense to insertion on unique portal */
- lnet_me_free(new_me);
+ kfree(new_me);
lnet_res_unlock(cpt);
return -EPERM;
}
@@ -270,5 +270,5 @@ lnet_me_unlink(struct lnet_me *me)
}
lnet_res_lh_invalidate(&me->me_lh);
- lnet_me_free(me);
+ kfree(me);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 68d16ffec980..c673037dbce4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -57,7 +57,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold) {
/* Adding a new entry */
- LIBCFS_ALLOC(tp, sizeof(*tp));
+ tp = kzalloc(sizeof(*tp), GFP_NOFS);
if (!tp)
return -ENOMEM;
@@ -90,7 +90,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
list_for_each_entry_safe(tp, temp, &cull, tp_list) {
list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof(*tp));
+ kfree(tp);
}
return 0;
}
@@ -149,7 +149,7 @@ fail_peer(lnet_nid_t nid, int outgoing)
list_for_each_entry_safe(tp, temp, &cull, tp_list) {
list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof(*tp));
+ kfree(tp);
}
return fail;
@@ -1769,7 +1769,7 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
goto drop;
}
- msg = lnet_msg_alloc();
+ msg = kzalloc(sizeof(*msg), GFP_NOFS);
if (!msg) {
CERROR("%s, src %s: Dropping %s (out of memory)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
@@ -1777,7 +1777,7 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
goto drop;
}
- /* msg zeroed in lnet_msg_alloc;
+ /* msg zeroed by kzalloc()
* i.e. flags all clear, pointers NULL etc
*/
msg->msg_type = type;
@@ -1812,7 +1812,7 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type), rc);
- lnet_msg_free(msg);
+ kfree(msg);
if (rc == -ESHUTDOWN)
/* We are shutting down. Don't do anything more */
return 0;
@@ -2010,7 +2010,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
return -EIO;
}
- msg = lnet_msg_alloc();
+ msg = kzalloc(sizeof(*msg), GFP_NOFS);
if (!msg) {
CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
libcfs_id2str(target));
@@ -2031,7 +2031,7 @@ LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
md->md_me->me_portal);
lnet_res_unlock(cpt);
- lnet_msg_free(msg);
+ kfree(msg);
return -ENOENT;
}
@@ -2086,7 +2086,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
* CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
* lnet_finalize() is called on it, so the LND must call this first
*/
- struct lnet_msg *msg = lnet_msg_alloc();
+ struct lnet_msg *msg = kzalloc(sizeof(*msg), GFP_NOFS);
struct lnet_libmd *getmd = getmsg->msg_md;
struct lnet_process_id peer_id = getmsg->msg_target;
int cpt;
@@ -2146,8 +2146,7 @@ lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
lnet_net_unlock(cpt);
- if (msg)
- lnet_msg_free(msg);
+ kfree(msg);
return NULL;
}
@@ -2215,7 +2214,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
return -EIO;
}
- msg = lnet_msg_alloc();
+ msg = kzalloc(sizeof(*msg), GFP_NOFS);
if (!msg) {
CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
libcfs_id2str(target));
@@ -2236,7 +2235,7 @@ LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
lnet_res_unlock(cpt);
- lnet_msg_free(msg);
+ kfree(msg);
return -ENOENT;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index c72ef05b2420..0091273c04b9 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -433,7 +433,7 @@ lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
}
lnet_msg_decommit(msg, cpt, status);
- lnet_msg_free(msg);
+ kfree(msg);
return 0;
}
@@ -466,7 +466,7 @@ lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status)
if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
/* not committed to network yet */
LASSERT(!msg->msg_onactivelist);
- lnet_msg_free(msg);
+ kfree(msg);
return;
}
@@ -546,19 +546,15 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
LASSERT(msg->msg_onactivelist);
msg->msg_onactivelist = 0;
list_del(&msg->msg_activelist);
- lnet_msg_free(msg);
+ kfree(msg);
count++;
}
if (count > 0)
CERROR("%d active msg on exit\n", count);
- if (container->msc_finalizers) {
- LIBCFS_FREE(container->msc_finalizers,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
- container->msc_finalizers = NULL;
- }
+ kvfree(container->msc_finalizers);
+ container->msc_finalizers = NULL;
container->msc_init = 0;
}
@@ -573,9 +569,9 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
/* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
- LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
- container->msc_nfinalizers *
- sizeof(*container->msc_finalizers));
+ container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers *
+ sizeof(*container->msc_finalizers),
+ GFP_KERNEL, cpt);
if (!container->msc_finalizers) {
CERROR("Failed to allocate message finalizers\n");
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 8ae93bf6fd1b..471f2f6c86f4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -771,11 +771,11 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
struct lnet_me, me_list);
CERROR("Active ME %p on exit\n", me);
list_del(&me->me_list);
- lnet_me_free(me);
+ kfree(me);
}
}
/* the extra entry is for MEs with ignore bits */
- LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ kvfree(mhash);
}
cfs_percpt_free(ptl->ptl_mtables);
@@ -803,8 +803,8 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
spin_lock_init(&ptl->ptl_lock);
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
/* the extra entry is for MEs with ignore bits */
- LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
- sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1),
+ GFP_KERNEL, i);
if (!mhash) {
CERROR("Failed to create match hash for portal %d\n",
index);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d49d4865298..ce93806eefca 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -170,7 +170,7 @@ lnet_ipif_enumerate(char ***namesp)
nalloc);
}
- LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
+ ifr = kzalloc(nalloc * sizeof(*ifr), GFP_KERNEL);
if (!ifr) {
CERROR("ENOMEM enumerating up to %d interfaces\n",
nalloc);
@@ -195,14 +195,14 @@ lnet_ipif_enumerate(char ***namesp)
if (nfound < nalloc || toobig)
break;
- LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
+ kfree(ifr);
nalloc *= 2;
}
if (!nfound)
goto out1;
- LIBCFS_ALLOC(names, nfound * sizeof(*names));
+ names = kzalloc(nfound * sizeof(*names), GFP_KERNEL);
if (!names) {
rc = -ENOMEM;
goto out1;
@@ -218,7 +218,7 @@ lnet_ipif_enumerate(char ***namesp)
goto out2;
}
- LIBCFS_ALLOC(names[i], IFNAMSIZ);
+ names[i] = kmalloc(IFNAMSIZ, GFP_KERNEL);
if (!names[i]) {
rc = -ENOMEM;
goto out2;
@@ -235,7 +235,7 @@ out2:
if (rc < 0)
lnet_ipif_free_enumeration(names, nfound);
out1:
- LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
+ kfree(ifr);
out0:
return rc;
}
@@ -249,9 +249,9 @@ lnet_ipif_free_enumeration(char **names, int n)
LASSERT(n > 0);
for (i = 0; i < n && names[i]; i++)
- LIBCFS_FREE(names[i], IFNAMSIZ);
+ kfree(names[i]);
- LIBCFS_FREE(names, n * sizeof(*names));
+ kfree(names);
}
EXPORT_SYMBOL(lnet_ipif_free_enumeration);
@@ -314,19 +314,20 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
+ struct kvec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_flags = 0
+ };
LASSERT(nob > 0);
LASSERT(jiffies_left > 0);
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob);
+ for (;;) {
/* Set receive timeout to remaining time */
jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
@@ -338,7 +339,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
}
then = jiffies;
- rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
+ rc = sock_recvmsg(sock, &msg, 0);
jiffies_left -= jiffies - then;
if (rc < 0)
@@ -347,10 +348,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
if (!rc)
return -ECONNRESET;
- buffer = ((char *)buffer) + rc;
- nob -= rc;
-
- if (!nob)
+ if (!msg_data_left(&msg))
return 0;
if (jiffies_left <= 0)
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 5a5d1811ffbe..e3468cef273b 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -161,7 +161,7 @@ lnet_drop_rule_add(struct lnet_fault_attr *attr)
if (lnet_fault_attr_validate(attr))
return -EINVAL;
- CFS_ALLOC_PTR(rule);
+ rule = kzalloc(sizeof(*rule), GFP_NOFS);
if (!rule)
return -ENOMEM;
@@ -170,10 +170,10 @@ lnet_drop_rule_add(struct lnet_fault_attr *attr)
rule->dr_attr = *attr;
if (attr->u.drop.da_interval) {
rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
+ rule->dr_drop_time = cfs_time_shift(
+ prandom_u32_max(attr->u.drop.da_interval));
} else {
- rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
}
lnet_net_lock(LNET_LOCK_EX);
@@ -223,7 +223,7 @@ lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
rule->dr_attr.u.drop.da_interval);
list_del(&rule->dr_link);
- CFS_FREE_PTR(rule);
+ kfree(rule);
n++;
}
@@ -277,10 +277,10 @@ lnet_drop_rule_reset(void)
memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
if (attr->u.drop.da_rate) {
- rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
} else {
- rule->dr_drop_time = cfs_time_shift(cfs_rand() %
- attr->u.drop.da_interval);
+ rule->dr_drop_time = cfs_time_shift(
+ prandom_u32_max(attr->u.drop.da_interval));
rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
}
spin_unlock(&rule->dr_lock);
@@ -315,8 +315,8 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.drop.da_interval);
+ cfs_time_seconds(
+ prandom_u32_max(attr->u.drop.da_interval));
rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval);
CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n",
@@ -330,7 +330,7 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) {
rule->dr_drop_at = rule->dr_stat.fs_count +
- cfs_rand() % attr->u.drop.da_rate;
+ prandom_u32_max(attr->u.drop.da_rate);
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
@@ -452,7 +452,7 @@ delay_rule_decref(struct lnet_delay_rule *rule)
LASSERT(list_empty(&rule->dl_msg_list));
LASSERT(list_empty(&rule->dl_link));
- CFS_FREE_PTR(rule);
+ kfree(rule);
}
}
@@ -483,8 +483,9 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- cfs_time_seconds(cfs_rand() %
- attr->u.delay.la_interval);
+ cfs_time_seconds(
+ prandom_u32_max(
+ attr->u.delay.la_interval));
rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval);
CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n",
@@ -498,7 +499,7 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
/* generate the next random rate sequence */
if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) {
rule->dl_delay_at = rule->dl_stat.fs_count +
- cfs_rand() % attr->u.delay.la_rate;
+ prandom_u32_max(attr->u.delay.la_rate);
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
@@ -738,7 +739,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
if (lnet_fault_attr_validate(attr))
return -EINVAL;
- CFS_ALLOC_PTR(rule);
+ rule = kzalloc(sizeof(*rule), GFP_NOFS);
if (!rule)
return -ENOMEM;
@@ -771,10 +772,10 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
rule->dl_attr = *attr;
if (attr->u.delay.la_interval) {
rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
+ rule->dl_delay_time = cfs_time_shift(
+ prandom_u32_max(attr->u.delay.la_interval));
} else {
- rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
}
rule->dl_msg_send = -1;
@@ -792,7 +793,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
return 0;
failed:
mutex_unlock(&delay_dd.dd_mutex);
- CFS_FREE_PTR(rule);
+ kfree(rule);
return rc;
}
@@ -920,10 +921,11 @@ lnet_delay_rule_reset(void)
memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
if (attr->u.delay.la_rate) {
- rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
} else {
- rule->dl_delay_time = cfs_time_shift(cfs_rand() %
- attr->u.delay.la_interval);
+ rule->dl_delay_time =
+ cfs_time_shift(prandom_u32_max(
+ attr->u.delay.la_interval));
rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
}
spin_unlock(&rule->dl_lock);
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index 05b120c2d45a..3aba1421c741 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -166,7 +166,7 @@ parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
return 0;
}
- LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
+ addrrange = kzalloc(sizeof(struct addrrange), GFP_NOFS);
if (!addrrange)
return -ENOMEM;
list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
@@ -225,7 +225,7 @@ add_nidrange(const struct cfs_lstr *src,
return nr;
}
- LIBCFS_ALLOC(nr, sizeof(struct nidrange));
+ nr = kzalloc(sizeof(struct nidrange), GFP_NOFS);
if (!nr)
return NULL;
list_add_tail(&nr->nr_link, nidlist);
@@ -286,7 +286,7 @@ free_addrranges(struct list_head *list)
cfs_expr_list_free_list(&ar->ar_numaddr_ranges);
list_del(&ar->ar_link);
- LIBCFS_FREE(ar, sizeof(struct addrrange));
+ kfree(ar);
}
}
@@ -308,7 +308,7 @@ cfs_free_nidlist(struct list_head *list)
nr = list_entry(pos, struct nidrange, nr_link);
free_addrranges(&nr->nr_addrranges);
list_del(pos);
- LIBCFS_FREE(nr, sizeof(struct nidrange));
+ kfree(nr);
}
}
EXPORT_SYMBOL(cfs_free_nidlist);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 5e94ad349454..3e157c10fec4 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -56,8 +56,8 @@ lnet_peer_tables_create(void)
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
INIT_LIST_HEAD(&ptable->pt_deathrow);
- LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
- LNET_PEER_HASH_SIZE * sizeof(*hash));
+ hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash),
+ GFP_KERNEL, i);
if (!hash) {
CERROR("Failed to create peer hash table\n");
lnet_peer_tables_destroy();
@@ -94,7 +94,7 @@ lnet_peer_tables_destroy(void)
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
LASSERT(list_empty(&hash[j]));
- LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ kvfree(hash);
}
cfs_percpt_free(the_lnet.ln_peer_tables);
@@ -212,7 +212,7 @@ lnet_peer_tables_cleanup(struct lnet_ni *ni)
list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) {
list_del(&lp->lp_hashlist);
- LIBCFS_FREE(lp, sizeof(*lp));
+ kfree(lp);
}
}
@@ -297,7 +297,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
if (lp)
memset(lp, 0, sizeof(*lp));
else
- LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
+ lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2);
if (!lp) {
rc = -ENOMEM;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 88283ca3f860..6504761ca598 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -238,28 +238,25 @@ lnet_find_net_locked(__u32 net)
static void lnet_shuffle_seed(void)
{
static int seeded;
- __u32 lnd_type, seed[2];
- struct timespec64 ts;
struct lnet_ni *ni;
if (seeded)
return;
- cfs_get_random_bytes(seed, sizeof(seed));
-
/*
* Nodes with small feet have little entropy
* the NID for this node gives the most entropy in the low bits
*/
list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ __u32 lnd_type, seed;
- if (lnd_type != LOLND)
- seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ if (lnd_type != LOLND) {
+ seed = (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+ add_device_randomness(&seed, sizeof(seed));
+ }
}
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
seeded = 1;
}
@@ -277,8 +274,8 @@ lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
len++;
}
- /* len+1 positions to add a new entry, also prevents division by 0 */
- offset = cfs_rand() % (len + 1);
+ /* len+1 positions to add a new entry */
+ offset = prandom_u32_max(len + 1);
list_for_each(e, &rnet->lrn_routes) {
if (!offset)
break;
@@ -318,15 +315,13 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
return -EEXIST;
/* Assume net, route, all new */
- LIBCFS_ALLOC(route, sizeof(*route));
- LIBCFS_ALLOC(rnet, sizeof(*rnet));
+ route = kzalloc(sizeof(*route), GFP_NOFS);
+ rnet = kzalloc(sizeof(*rnet), GFP_NOFS);
if (!route || !rnet) {
CERROR("Out of memory creating route %s %d %s\n",
libcfs_net2str(net), hops, libcfs_nid2str(gateway));
- if (route)
- LIBCFS_FREE(route, sizeof(*route));
- if (rnet)
- LIBCFS_FREE(rnet, sizeof(*rnet));
+ kfree(route);
+ kfree(rnet);
return -ENOMEM;
}
@@ -342,8 +337,8 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
if (rc) {
lnet_net_unlock(LNET_LOCK_EX);
- LIBCFS_FREE(route, sizeof(*route));
- LIBCFS_FREE(rnet, sizeof(*rnet));
+ kfree(route);
+ kfree(rnet);
if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
return rc; /* ignore the route entry */
@@ -398,11 +393,11 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
if (!add_route) {
rc = -EEXIST;
- LIBCFS_FREE(route, sizeof(*route));
+ kfree(route);
}
if (rnet != rnet2)
- LIBCFS_FREE(rnet, sizeof(*rnet));
+ kfree(rnet);
/* indicate to startup the router checker if configured */
wake_up(&the_lnet.ln_rc_waitq);
@@ -520,10 +515,8 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid)
lnet_net_unlock(LNET_LOCK_EX);
- LIBCFS_FREE(route, sizeof(*route));
-
- if (rnet)
- LIBCFS_FREE(rnet, sizeof(*rnet));
+ kfree(route);
+ kfree(rnet);
rc = 0;
lnet_net_lock(LNET_LOCK_EX);
@@ -891,10 +884,9 @@ lnet_destroy_rc_data(struct lnet_rc_data *rcd)
lnet_net_unlock(cpt);
}
- if (rcd->rcd_pinginfo)
- LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
+ kfree(rcd->rcd_pinginfo);
- LIBCFS_FREE(rcd, sizeof(*rcd));
+ kfree(rcd);
}
static struct lnet_rc_data *
@@ -908,14 +900,14 @@ lnet_create_rc_data_locked(struct lnet_peer *gateway)
lnet_net_unlock(gateway->lp_cpt);
- LIBCFS_ALLOC(rcd, sizeof(*rcd));
+ rcd = kzalloc(sizeof(*rcd), GFP_NOFS);
if (!rcd)
goto out;
LNetInvalidateMDHandle(&rcd->rcd_mdh);
INIT_LIST_HEAD(&rcd->rcd_list);
- LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
+ pi = kzalloc(LNET_PINGINFO_SIZE, GFP_NOFS);
if (!pi)
goto out;
@@ -1304,12 +1296,10 @@ rescan:
void
lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
{
- int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
-
while (--npages >= 0)
__free_page(rb->rb_kiov[npages].bv_page);
- LIBCFS_FREE(rb, sz);
+ kfree(rb);
}
static struct lnet_rtrbuf *
@@ -1321,7 +1311,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
struct lnet_rtrbuf *rb;
int i;
- LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
+ rb = kzalloc_cpt(sz, GFP_NOFS, cpt);
if (!rb)
return NULL;
@@ -1335,7 +1325,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
while (--i >= 0)
__free_page(rb->rb_kiov[i].bv_page);
- LIBCFS_FREE(rb, sz);
+ kfree(rb);
return NULL;
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index d32d653edcb0..1a71ffebc889 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -91,13 +91,13 @@ static int __proc_lnet_stats(void *data, int write,
/* read */
- LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
+ ctrs = kzalloc(sizeof(*ctrs), GFP_NOFS);
if (!ctrs)
return -ENOMEM;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr) {
- LIBCFS_FREE(ctrs, sizeof(*ctrs));
+ kfree(ctrs);
return -ENOMEM;
}
@@ -118,8 +118,8 @@ static int __proc_lnet_stats(void *data, int write,
rc = cfs_trace_copyout_string(buffer, nob,
tmpstr + pos, "\n");
- LIBCFS_FREE(tmpstr, tmpsiz);
- LIBCFS_FREE(ctrs, sizeof(*ctrs));
+ kfree(tmpstr);
+ kfree(ctrs);
return rc;
}
@@ -151,7 +151,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
if (!*lenp)
return 0;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr)
return -ENOMEM;
@@ -183,7 +183,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
lnet_net_unlock(0);
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
return -ESTALE;
}
@@ -248,7 +248,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
}
}
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
if (!rc)
*lenp = len;
@@ -275,7 +275,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
if (!*lenp)
return 0;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr)
return -ENOMEM;
@@ -303,7 +303,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
lnet_net_unlock(0);
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
return -ESTALE;
}
@@ -385,7 +385,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
}
}
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
if (!rc)
*lenp = len;
@@ -418,7 +418,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
return 0;
}
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr)
return -ENOMEM;
@@ -448,7 +448,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
lnet_net_unlock(cpt);
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
return -ESTALE;
}
@@ -556,7 +556,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
*ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
}
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kfree(tmpstr);
if (!rc)
*lenp = len;
@@ -579,7 +579,7 @@ static int __proc_lnet_buffers(void *data, int write,
/* (4 %d) * 4 * LNET_CPT_NUMBER */
tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kvmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr)
return -ENOMEM;
@@ -618,7 +618,7 @@ static int __proc_lnet_buffers(void *data, int write,
rc = cfs_trace_copyout_string(buffer, nob,
tmpstr + pos, NULL);
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kvfree(tmpstr);
return rc;
}
@@ -643,7 +643,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
if (!*lenp)
return 0;
- LIBCFS_ALLOC(tmpstr, tmpsiz);
+ tmpstr = kvmalloc(tmpsiz, GFP_KERNEL);
if (!tmpstr)
return -ENOMEM;
@@ -744,7 +744,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
*ppos += 1;
}
- LIBCFS_FREE(tmpstr, tmpsiz);
+ kvfree(tmpstr);
if (!rc)
*lenp = len;
@@ -795,7 +795,7 @@ static int __proc_lnet_portal_rotor(void *data, int write,
int rc;
int i;
- LIBCFS_ALLOC(buf, buf_len);
+ buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -829,7 +829,7 @@ static int __proc_lnet_portal_rotor(void *data, int write,
if (rc < 0)
goto out;
- tmp = cfs_trimwhite(buf);
+ tmp = strim(buf);
rc = -EINVAL;
lnet_res_lock(0);
@@ -843,7 +843,7 @@ static int __proc_lnet_portal_rotor(void *data, int write,
}
lnet_res_unlock(0);
out:
- LIBCFS_FREE(buf, buf_len);
+ kfree(buf);
return rc;
}
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 082c0afacf23..34ba440b3c02 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -45,7 +45,7 @@
static int
lst_session_new_ioctl(struct lstio_session_new_args *args)
{
- char *name;
+ char name[LST_NAME_SIZE + 1];
int rc;
if (!args->lstio_ses_idp || /* address for output sid */
@@ -55,13 +55,8 @@ lst_session_new_ioctl(struct lstio_session_new_args *args)
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_ses_namep,
args->lstio_ses_nmlen)) {
- LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return -EFAULT;
}
@@ -74,7 +69,6 @@ lst_session_new_ioctl(struct lstio_session_new_args *args)
args->lstio_ses_force,
args->lstio_ses_idp);
- LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return rc;
}
@@ -112,7 +106,7 @@ lst_session_info_ioctl(struct lstio_session_info_args *args)
static int
lst_debug_ioctl(struct lstio_debug_args *args)
{
- char *name = NULL;
+ char name[LST_NAME_SIZE + 1];
int client = 1;
int rc;
@@ -128,16 +122,10 @@ lst_debug_ioctl(struct lstio_debug_args *args)
return -EINVAL;
if (args->lstio_dbg_namep) {
- LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1);
- if (!name)
- return -ENOMEM;
if (copy_from_user(name, args->lstio_dbg_namep,
- args->lstio_dbg_nmlen)) {
- LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
-
+ args->lstio_dbg_nmlen))
return -EFAULT;
- }
name[args->lstio_dbg_nmlen] = 0;
}
@@ -154,7 +142,7 @@ lst_debug_ioctl(struct lstio_debug_args *args)
client = 0;
/* fall through */
case LST_OPC_BATCHCLI:
- if (!name)
+ if (!args->lstio_dbg_namep)
goto out;
rc = lstcon_batch_debug(args->lstio_dbg_timeout,
@@ -162,7 +150,7 @@ lst_debug_ioctl(struct lstio_debug_args *args)
break;
case LST_OPC_GROUP:
- if (!name)
+ if (!args->lstio_dbg_namep)
goto out;
rc = lstcon_group_debug(args->lstio_dbg_timeout,
@@ -185,16 +173,13 @@ lst_debug_ioctl(struct lstio_debug_args *args)
}
out:
- if (name)
- LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
-
return rc;
}
static int
lst_group_add_ioctl(struct lstio_group_add_args *args)
{
- char *name;
+ char name[LST_NAME_SIZE + 1];
int rc;
if (args->lstio_grp_key != console_session.ses_key)
@@ -205,22 +190,14 @@ lst_group_add_ioctl(struct lstio_group_add_args *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen);
+ args->lstio_grp_nmlen))
return -EFAULT;
- }
name[args->lstio_grp_nmlen] = 0;
rc = lstcon_group_add(name);
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
return rc;
}
@@ -228,7 +205,7 @@ static int
lst_group_del_ioctl(struct lstio_group_del_args *args)
{
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
@@ -238,22 +215,14 @@ lst_group_del_ioctl(struct lstio_group_del_args *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ args->lstio_grp_nmlen))
return -EFAULT;
- }
name[args->lstio_grp_nmlen] = 0;
rc = lstcon_group_del(name);
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
return rc;
}
@@ -261,7 +230,7 @@ static int
lst_group_update_ioctl(struct lstio_group_update_args *args)
{
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
@@ -272,15 +241,9 @@ lst_group_update_ioctl(struct lstio_group_update_args *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ args->lstio_grp_nmlen))
return -EFAULT;
- }
name[args->lstio_grp_nmlen] = 0;
@@ -309,8 +272,6 @@ lst_group_update_ioctl(struct lstio_group_update_args *args)
break;
}
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
return rc;
}
@@ -319,7 +280,7 @@ lst_nodes_add_ioctl(struct lstio_group_nodes_args *args)
{
unsigned int feats;
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
@@ -333,16 +294,9 @@ lst_nodes_add_ioctl(struct lstio_group_nodes_args *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
+ args->lstio_grp_nmlen))
return -EFAULT;
- }
name[args->lstio_grp_nmlen] = 0;
@@ -350,7 +304,6 @@ lst_nodes_add_ioctl(struct lstio_group_nodes_args *args)
args->lstio_grp_idsp, &feats,
args->lstio_grp_resultp);
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
if (!rc &&
copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
return -EINVAL;
@@ -379,7 +332,7 @@ lst_group_list_ioctl(struct lstio_group_list_args *args)
static int
lst_group_info_ioctl(struct lstio_group_info_args *args)
{
- char *name;
+ char name[LST_NAME_SIZE + 1];
int ndent;
int index;
int rc;
@@ -411,23 +364,15 @@ lst_group_info_ioctl(struct lstio_group_info_args *args)
return -EINVAL;
}
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ args->lstio_grp_nmlen))
return -EFAULT;
- }
name[args->lstio_grp_nmlen] = 0;
rc = lstcon_group_info(name, args->lstio_grp_entp,
&index, &ndent, args->lstio_grp_dentsp);
- LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-
if (rc)
return rc;
@@ -443,7 +388,7 @@ static int
lst_batch_add_ioctl(struct lstio_batch_add_args *args)
{
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
@@ -453,22 +398,14 @@ lst_batch_add_ioctl(struct lstio_batch_add_args *args)
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ args->lstio_bat_nmlen))
return -EFAULT;
- }
name[args->lstio_bat_nmlen] = 0;
rc = lstcon_batch_add(name);
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
return rc;
}
@@ -476,7 +413,7 @@ static int
lst_batch_run_ioctl(struct lstio_batch_run_args *args)
{
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
@@ -486,23 +423,15 @@ lst_batch_run_ioctl(struct lstio_batch_run_args *args)
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ args->lstio_bat_nmlen))
return -EFAULT;
- }
name[args->lstio_bat_nmlen] = 0;
rc = lstcon_batch_run(name, args->lstio_bat_timeout,
args->lstio_bat_resultp);
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
return rc;
}
@@ -510,7 +439,7 @@ static int
lst_batch_stop_ioctl(struct lstio_batch_stop_args *args)
{
int rc;
- char *name;
+ char name[LST_NAME_SIZE + 1];
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
@@ -521,30 +450,22 @@ lst_batch_stop_ioctl(struct lstio_batch_stop_args *args)
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ args->lstio_bat_nmlen))
return -EFAULT;
- }
name[args->lstio_bat_nmlen] = 0;
rc = lstcon_batch_stop(name, args->lstio_bat_force,
args->lstio_bat_resultp);
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
return rc;
}
static int
lst_batch_query_ioctl(struct lstio_batch_query_args *args)
{
- char *name;
+ char name[LST_NAME_SIZE + 1];
int rc;
if (args->lstio_bat_key != console_session.ses_key)
@@ -559,15 +480,9 @@ lst_batch_query_ioctl(struct lstio_batch_query_args *args)
if (args->lstio_bat_testidx < 0)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ args->lstio_bat_nmlen))
return -EFAULT;
- }
name[args->lstio_bat_nmlen] = 0;
@@ -577,8 +492,6 @@ lst_batch_query_ioctl(struct lstio_batch_query_args *args)
args->lstio_bat_timeout,
args->lstio_bat_resultp);
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
return rc;
}
@@ -602,7 +515,7 @@ lst_batch_list_ioctl(struct lstio_batch_list_args *args)
static int
lst_batch_info_ioctl(struct lstio_batch_info_args *args)
{
- char *name;
+ char name[LST_NAME_SIZE + 1];
int rc;
int index;
int ndent;
@@ -634,15 +547,9 @@ lst_batch_info_ioctl(struct lstio_batch_info_args *args)
return -EINVAL;
}
- LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
if (copy_from_user(name, args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ args->lstio_bat_nmlen))
return -EFAULT;
- }
name[args->lstio_bat_nmlen] = 0;
@@ -650,8 +557,6 @@ lst_batch_info_ioctl(struct lstio_batch_info_args *args)
args->lstio_bat_server, args->lstio_bat_testidx,
&index, &ndent, args->lstio_bat_dentsp);
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-
if (rc)
return rc;
@@ -667,7 +572,7 @@ static int
lst_stat_query_ioctl(struct lstio_stat_args *args)
{
int rc;
- char *name = NULL;
+ char name[LST_NAME_SIZE + 1];
/* TODO: not finished */
if (args->lstio_sta_key != console_session.ses_key)
@@ -689,10 +594,6 @@ lst_stat_query_ioctl(struct lstio_stat_args *args)
args->lstio_sta_nmlen > LST_NAME_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
- if (!name)
- return -ENOMEM;
-
rc = copy_from_user(name, args->lstio_sta_namep,
args->lstio_sta_nmlen);
if (!rc)
@@ -704,16 +605,14 @@ lst_stat_query_ioctl(struct lstio_stat_args *args)
rc = -EINVAL;
}
- if (name)
- LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
return rc;
}
static int lst_test_add_ioctl(struct lstio_test_args *args)
{
- char *batch_name;
- char *src_name = NULL;
- char *dst_name = NULL;
+ char batch_name[LST_NAME_SIZE + 1];
+ char src_name[LST_NAME_SIZE + 1];
+ char dst_name[LST_NAME_SIZE + 1];
void *param = NULL;
int ret = 0;
int rc = -ENOMEM;
@@ -748,20 +647,8 @@ static int lst_test_add_ioctl(struct lstio_test_args *args)
if (!args->lstio_tes_param && args->lstio_tes_param_len)
return -EINVAL;
- LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (!batch_name)
- return rc;
-
- LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (!src_name)
- goto out;
-
- LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
- if (!dst_name)
- goto out;
-
if (args->lstio_tes_param) {
- LIBCFS_ALLOC(param, args->lstio_tes_param_len);
+ param = kmalloc(args->lstio_tes_param_len, GFP_KERNEL);
if (!param)
goto out;
if (copy_from_user(param, args->lstio_tes_param,
@@ -791,17 +678,7 @@ static int lst_test_add_ioctl(struct lstio_test_args *args)
rc = (copy_to_user(args->lstio_tes_retp, &ret,
sizeof(ret))) ? -EFAULT : 0;
out:
- if (batch_name)
- LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
-
- if (src_name)
- LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
-
- if (dst_name)
- LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
-
- if (param)
- LIBCFS_FREE(param, args->lstio_tes_param_len);
+ kfree(param);
return rc;
}
@@ -824,13 +701,13 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
if (data->ioc_plen1 > PAGE_SIZE)
return -EINVAL;
- LIBCFS_ALLOC(buf, data->ioc_plen1);
+ buf = kmalloc(data->ioc_plen1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* copy in parameter */
if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
- LIBCFS_FREE(buf, data->ioc_plen1);
+ kfree(buf);
return -EFAULT;
}
@@ -920,7 +797,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
out:
mutex_unlock(&console_session.ses_mutex);
- LIBCFS_FREE(buf, data->ioc_plen1);
+ kfree(buf);
return rc;
}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 6a0f770e0e24..7aa515c34594 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -129,7 +129,7 @@ lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
spin_unlock(&console_session.ses_rpc_lock);
if (!crpc) {
- LIBCFS_ALLOC(crpc, sizeof(*crpc));
+ crpc = kzalloc(sizeof(*crpc), GFP_NOFS);
if (!crpc)
return -ENOMEM;
}
@@ -140,7 +140,7 @@ lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
return 0;
}
- LIBCFS_FREE(crpc, sizeof(*crpc));
+ kfree(crpc);
return rc;
}
@@ -250,7 +250,7 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
}
/* create a trans group */
- LIBCFS_ALLOC(trans, sizeof(*trans));
+ trans = kzalloc(sizeof(*trans), GFP_NOFS);
if (!trans)
return -ENOMEM;
@@ -585,7 +585,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
lstcon_rpc_trans_name(trans->tas_opc), count);
- LIBCFS_FREE(trans, sizeof(*trans));
+ kfree(trans);
}
int
@@ -1369,7 +1369,7 @@ lstcon_rpc_cleanup_wait(void)
list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
+ kfree(crpc);
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a2662638d599..1acd5cb324b1 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -88,7 +88,7 @@ lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp,
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl));
+ *ndpp = kzalloc(sizeof(**ndpp) + sizeof(*ndl), GFP_KERNEL);
if (!*ndpp)
return -ENOMEM;
@@ -133,7 +133,7 @@ lstcon_node_put(struct lstcon_node *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
+ kfree(nd);
}
static int
@@ -166,7 +166,7 @@ lstcon_ndlink_find(struct list_head *hash, struct lnet_process_id id,
if (rc)
return rc;
- LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
+ ndl = kzalloc(sizeof(struct lstcon_ndlink), GFP_NOFS);
if (!ndl) {
lstcon_node_put(nd);
return -ENOMEM;
@@ -190,7 +190,7 @@ lstcon_ndlink_release(struct lstcon_ndlink *ndl)
list_del(&ndl->ndl_hlink); /* delete from hash */
lstcon_node_put(ndl->ndl_node);
- LIBCFS_FREE(ndl, sizeof(*ndl));
+ kfree(ndl);
}
static int
@@ -199,16 +199,16 @@ lstcon_group_alloc(char *name, struct lstcon_group **grpp)
struct lstcon_group *grp;
int i;
- LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
+ grp = kmalloc(offsetof(struct lstcon_group,
+ grp_ndl_hash[LST_NODE_HASHSIZE]),
+ GFP_KERNEL);
if (!grp)
return -ENOMEM;
grp->grp_ref = 1;
if (name) {
if (strlen(name) > sizeof(grp->grp_name) - 1) {
- LIBCFS_FREE(grp, offsetof(struct lstcon_group,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
+ kfree(grp);
return -E2BIG;
}
strncpy(grp->grp_name, name, sizeof(grp->grp_name));
@@ -263,8 +263,7 @@ lstcon_group_decref(struct lstcon_group *grp)
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- LIBCFS_FREE(grp, offsetof(struct lstcon_group,
- grp_ndl_hash[LST_NODE_HASHSIZE]));
+ kfree(grp);
}
static int
@@ -807,7 +806,7 @@ lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gents_p,
}
/* non-verbose query */
- LIBCFS_ALLOC(gentp, sizeof(struct lstcon_ndlist_ent));
+ gentp = kzalloc(sizeof(struct lstcon_ndlist_ent), GFP_NOFS);
if (!gentp) {
CERROR("Can't allocate ndlist_ent\n");
lstcon_group_decref(grp);
@@ -821,7 +820,7 @@ lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gents_p,
rc = copy_to_user(gents_p, gentp,
sizeof(struct lstcon_ndlist_ent)) ? -EFAULT : 0;
- LIBCFS_FREE(gentp, sizeof(struct lstcon_ndlist_ent));
+ kfree(gentp);
lstcon_group_decref(grp);
@@ -856,35 +855,35 @@ lstcon_batch_add(char *name)
return rc;
}
- LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
+ bat = kzalloc(sizeof(struct lstcon_batch), GFP_NOFS);
if (!bat) {
CERROR("Can't allocate descriptor for batch %s\n", name);
return -ENOMEM;
}
- LIBCFS_ALLOC(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ bat->bat_cli_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE,
+ GFP_KERNEL);
if (!bat->bat_cli_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
+ kfree(bat);
return -ENOMEM;
}
- LIBCFS_ALLOC(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ bat->bat_srv_hash = kmalloc(sizeof(struct list_head) * LST_NODE_HASHSIZE,
+ GFP_KERNEL);
if (!bat->bat_srv_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
+ kfree(bat->bat_cli_hash);
+ kfree(bat);
return -ENOMEM;
}
if (strlen(name) > sizeof(bat->bat_name) - 1) {
- LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
+ kfree(bat->bat_srv_hash);
+ kfree(bat->bat_cli_hash);
+ kfree(bat);
return -E2BIG;
}
strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -971,7 +970,7 @@ lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up,
}
/* non-verbose query */
- LIBCFS_ALLOC(entp, sizeof(struct lstcon_test_batch_ent));
+ entp = kzalloc(sizeof(struct lstcon_test_batch_ent), GFP_NOFS);
if (!entp)
return -ENOMEM;
@@ -993,7 +992,7 @@ lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up,
rc = copy_to_user(ent_up, entp,
sizeof(struct lstcon_test_batch_ent)) ? -EFAULT : 0;
- LIBCFS_FREE(entp, sizeof(struct lstcon_test_batch_ent));
+ kfree(entp);
return rc;
}
@@ -1107,8 +1106,7 @@ lstcon_batch_destroy(struct lstcon_batch *bat)
lstcon_group_decref(test->tes_src_grp);
lstcon_group_decref(test->tes_dst_grp);
- LIBCFS_FREE(test, offsetof(struct lstcon_test,
- tes_param[test->tes_paramlen]));
+ kfree(test);
}
LASSERT(list_empty(&bat->bat_trans_list));
@@ -1134,11 +1132,9 @@ lstcon_batch_destroy(struct lstcon_batch *bat)
LASSERT(list_empty(&bat->bat_srv_hash[i]));
}
- LIBCFS_FREE(bat->bat_cli_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat->bat_srv_hash,
- sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
+ kfree(bat->bat_cli_hash);
+ kfree(bat->bat_srv_hash);
+ kfree(bat);
}
static int
@@ -1311,7 +1307,8 @@ lstcon_test_add(char *batch_name, int type, int loop,
if (dst_grp->grp_userland)
*retp = 1;
- LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen]));
+ test = kzalloc(offsetof(struct lstcon_test, tes_param[paramlen]),
+ GFP_KERNEL);
if (!test) {
CERROR("Can't allocate test descriptor\n");
rc = -ENOMEM;
@@ -1357,8 +1354,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
/* hold groups so nobody can change them */
return rc;
out:
- if (test)
- LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen]));
+ kfree(test);
if (dst_grp)
lstcon_group_decref(dst_grp);
@@ -1790,7 +1786,7 @@ lstcon_session_info(struct lst_sid __user *sid_up, int __user *key_up,
if (console_session.ses_state != LST_SESSION_ACTIVE)
return -ESRCH;
- LIBCFS_ALLOC(entp, sizeof(*entp));
+ entp = kzalloc(sizeof(*entp), GFP_NOFS);
if (!entp)
return -ENOMEM;
@@ -1807,7 +1803,7 @@ lstcon_session_info(struct lst_sid __user *sid_up, int __user *key_up,
copy_to_user(name_up, console_session.ses_name, len))
rc = -EFAULT;
- LIBCFS_FREE(entp, sizeof(*entp));
+ kfree(entp);
return rc;
}
@@ -2027,8 +2023,8 @@ lstcon_console_init(void)
INIT_LIST_HEAD(&console_session.ses_bat_list);
INIT_LIST_HEAD(&console_session.ses_trans_list);
- LIBCFS_ALLOC(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ console_session.ses_ndl_hash =
+ kmalloc(sizeof(struct list_head) * LST_GLOBAL_HASHSIZE, GFP_KERNEL);
if (!console_session.ses_ndl_hash)
return -ENOMEM;
@@ -2041,8 +2037,7 @@ lstcon_console_init(void)
rc = srpc_add_service(&lstcon_acceptor_service);
LASSERT(rc != -EBUSY);
if (rc) {
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ kfree(console_session.ses_ndl_hash);
return rc;
}
@@ -2064,8 +2059,7 @@ out:
srpc_shutdown_service(&lstcon_acceptor_service);
srpc_remove_service(&lstcon_acceptor_service);
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ kfree(console_session.ses_ndl_hash);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
@@ -2099,8 +2093,7 @@ lstcon_console_fini(void)
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
- LIBCFS_FREE(console_session.ses_ndl_hash,
- sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ kfree(console_session.ses_ndl_hash);
srpc_wait_service_shutdown(&lstcon_acceptor_service);
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index fe889607ff3f..c7697f66f663 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -143,7 +143,7 @@ sfw_register_test(struct srpc_service *service,
return -EEXIST;
}
- LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
+ tsc = kzalloc(sizeof(struct sfw_test_case), GFP_NOFS);
if (!tsc)
return -ENOMEM;
@@ -344,7 +344,7 @@ sfw_bid2batch(struct lst_bid bid)
if (bat)
return bat;
- LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
+ bat = kzalloc(sizeof(struct sfw_batch), GFP_NOFS);
if (!bat)
return NULL;
@@ -447,7 +447,7 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
}
/* brand new or create by force */
- LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
+ sn = kzalloc(sizeof(struct sfw_session), GFP_NOFS);
if (!sn) {
CERROR("dropping RPC mksn under memory pressure\n");
return -ENOMEM;
@@ -632,19 +632,19 @@ sfw_destroy_test_instance(struct sfw_test_instance *tsi)
tsu = list_entry(tsi->tsi_units.next,
struct sfw_test_unit, tsu_list);
list_del(&tsu->tsu_list);
- LIBCFS_FREE(tsu, sizeof(*tsu));
+ kfree(tsu);
}
while (!list_empty(&tsi->tsi_free_rpcs)) {
rpc = list_entry(tsi->tsi_free_rpcs.next,
struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ kfree(rpc);
}
clean:
sfw_unload_test(tsi);
- LIBCFS_FREE(tsi, sizeof(*tsi));
+ kfree(tsi);
}
static void
@@ -662,7 +662,7 @@ sfw_destroy_batch(struct sfw_batch *tsb)
sfw_destroy_test_instance(tsi);
}
- LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
+ kfree(tsb);
}
void
@@ -680,7 +680,7 @@ sfw_destroy_session(struct sfw_session *sn)
sfw_destroy_batch(batch);
}
- LIBCFS_FREE(sn, sizeof(*sn));
+ kfree(sn);
atomic_dec(&sfw_data.fw_nzombies);
}
@@ -740,7 +740,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
int i;
int rc;
- LIBCFS_ALLOC(tsi, sizeof(*tsi));
+ tsi = kzalloc(sizeof(*tsi), GFP_NOFS);
if (!tsi) {
CERROR("Can't allocate test instance for batch: %llu\n",
tsb->bat_id.bat_id);
@@ -763,7 +763,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
rc = sfw_load_test(tsi);
if (rc) {
- LIBCFS_FREE(tsi, sizeof(*tsi));
+ kfree(tsi);
return rc;
}
@@ -795,7 +795,7 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
sfw_unpack_id(id);
for (j = 0; j < tsi->tsi_concur; j++) {
- LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
+ tsu = kzalloc(sizeof(struct sfw_test_unit), GFP_NOFS);
if (!tsu) {
rc = -ENOMEM;
CERROR("Can't allocate tsu for %d\n",
@@ -941,15 +941,13 @@ sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer,
return 0;
}
-static int
+static void
sfw_run_test(struct swi_workitem *wi)
{
- struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
+ struct sfw_test_unit *tsu = container_of(wi, struct sfw_test_unit, tsu_worker);
struct sfw_test_instance *tsi = tsu->tsu_instance;
struct srpc_client_rpc *rpc = NULL;
- LASSERT(wi == &tsu->tsu_worker);
-
if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
LASSERT(!rpc);
goto test_done;
@@ -975,7 +973,7 @@ sfw_run_test(struct swi_workitem *wi)
rpc->crpc_timeout = rpc_timeout;
srpc_post_rpc(rpc);
spin_unlock(&rpc->crpc_lock);
- return 0;
+ return;
test_done:
/*
@@ -985,9 +983,7 @@ test_done:
* - my batch is still active; no one can run it again now.
* Cancel pending schedules and prevent future schedule attempts:
*/
- swi_exit_workitem(wi);
sfw_test_unit_done(tsu);
- return 1;
}
static int
@@ -1016,8 +1012,8 @@ sfw_run_batch(struct sfw_batch *tsb)
atomic_inc(&tsi->tsi_nactive);
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
- swi_init_workitem(wi, tsu, sfw_run_test,
- lst_sched_test[lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
+ swi_init_workitem(wi, sfw_run_test,
+ lst_test_wq[lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
swi_schedule_workitem(wi);
}
}
@@ -1767,7 +1763,7 @@ sfw_shutdown(void)
struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ kfree(rpc);
}
for (i = 0; ; i++) {
@@ -1785,6 +1781,6 @@ sfw_shutdown(void)
srpc_wait_service_shutdown(tsc->tsc_srv_service);
list_del(&tsc->tsc_list);
- LIBCFS_FREE(tsc, sizeof(*tsc));
+ kfree(tsc);
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 1d44d912f014..7359aa56d9b3 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -47,8 +47,8 @@ enum {
static int lst_init_step = LST_INIT_NONE;
-struct cfs_wi_sched *lst_sched_serial;
-struct cfs_wi_sched **lst_sched_test;
+struct workqueue_struct *lst_serial_wq;
+struct workqueue_struct **lst_test_wq;
static void
lnet_selftest_exit(void)
@@ -68,18 +68,16 @@ lnet_selftest_exit(void)
case LST_INIT_WI_TEST:
for (i = 0;
i < cfs_cpt_number(lnet_cpt_table()); i++) {
- if (!lst_sched_test[i])
+ if (!lst_test_wq[i])
continue;
- cfs_wi_sched_destroy(lst_sched_test[i]);
+ destroy_workqueue(lst_test_wq[i]);
}
- LIBCFS_FREE(lst_sched_test,
- sizeof(lst_sched_test[0]) *
- cfs_cpt_number(lnet_cpt_table()));
- lst_sched_test = NULL;
+ kvfree(lst_test_wq);
+ lst_test_wq = NULL;
/* fall through */
case LST_INIT_WI_SERIAL:
- cfs_wi_sched_destroy(lst_sched_serial);
- lst_sched_serial = NULL;
+ destroy_workqueue(lst_serial_wq);
+ lst_serial_wq = NULL;
case LST_INIT_NONE:
break;
default:
@@ -91,35 +89,45 @@ static int
lnet_selftest_init(void)
{
int nscheds;
- int rc;
+ int rc = -ENOMEM;
int i;
- rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
- 1, &lst_sched_serial);
- if (rc) {
+ lst_serial_wq = alloc_ordered_workqueue("lst_s", 0);
+ if (!lst_serial_wq) {
CERROR("Failed to create serial WI scheduler for LST\n");
- return rc;
+ return -ENOMEM;
}
lst_init_step = LST_INIT_WI_SERIAL;
nscheds = cfs_cpt_number(lnet_cpt_table());
- LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
- if (!lst_sched_test)
+ lst_test_wq = kvmalloc_array(nscheds, sizeof(lst_test_wq[0]),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lst_test_wq) {
+ rc = -ENOMEM;
goto error;
+ }
lst_init_step = LST_INIT_WI_TEST;
for (i = 0; i < nscheds; i++) {
int nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
+ struct workqueue_attrs attrs = {0};
+ cpumask_var_t *mask = cfs_cpt_cpumask(lnet_cpt_table(), i);
/* reserve at least one CPU for LND */
nthrs = max(nthrs - 1, 1);
- rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
- nthrs, &lst_sched_test[i]);
- if (rc) {
+ lst_test_wq[i] = alloc_workqueue("lst_t", WQ_UNBOUND, nthrs);
+ if (!lst_test_wq[i]) {
CWARN("Failed to create CPU partition affinity WI scheduler %d for LST\n",
i);
+ rc = -ENOMEM;
goto error;
}
+
+ if (mask && alloc_cpumask_var(&attrs.cpumask, GFP_KERNEL)) {
+ cpumask_copy(attrs.cpumask, *mask);
+ apply_workqueue_attrs(lst_test_wq[i], &attrs);
+ free_cpumask_var(attrs.cpumask);
+ }
}
rc = srpc_startup();
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index ab7e8a8e58b8..f8198ad1046e 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -68,7 +68,7 @@ srpc_serv_portal(int svc_id)
}
/* forward ref's */
-int srpc_handle_rpc(struct swi_workitem *wi);
+void srpc_handle_rpc(struct swi_workitem *wi);
void srpc_get_counters(struct srpc_counters *cnt)
{
@@ -113,7 +113,7 @@ srpc_free_bulk(struct srpc_bulk *bk)
__free_page(pg);
}
- LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
+ kfree(bk);
}
struct srpc_bulk *
@@ -125,8 +125,8 @@ srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
- LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
+ bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]),
+ GFP_KERNEL, cpt);
if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
@@ -176,9 +176,9 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc,
struct srpc_buffer *buffer)
{
memset(rpc, 0, sizeof(*rpc));
- swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
+ swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
srpc_serv_is_framework(scd->scd_svc) ?
- lst_sched_serial : lst_sched_test[scd->scd_cpt]);
+ lst_serial_wq : lst_test_wq[scd->scd_cpt]);
rpc->srpc_ev.ev_fired = 1; /* no event expected now */
@@ -214,7 +214,7 @@ srpc_service_fini(struct srpc_service *svc)
buf = list_entry(q->next, struct srpc_buffer,
buf_list);
list_del(&buf->buf_list);
- LIBCFS_FREE(buf, sizeof(*buf));
+ kfree(buf);
}
}
@@ -225,7 +225,7 @@ srpc_service_fini(struct srpc_service *svc)
struct srpc_server_rpc,
srpc_list);
list_del(&rpc->srpc_list);
- LIBCFS_FREE(rpc, sizeof(*rpc));
+ kfree(rpc);
}
}
@@ -242,7 +242,7 @@ srpc_service_nrpcs(struct srpc_service *svc)
max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
}
-int srpc_add_buffer(struct swi_workitem *wi);
+void srpc_add_buffer(struct swi_workitem *wi);
static int
srpc_service_init(struct srpc_service *svc)
@@ -277,11 +277,11 @@ srpc_service_init(struct srpc_service *svc)
scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
/*
- * NB: don't use lst_sched_serial for adding buffer,
+ * NB: don't use lst_serial_wq for adding buffer,
* see details in srpc_service_add_buffers()
*/
- swi_init_workitem(&scd->scd_buf_wi, scd,
- srpc_add_buffer, lst_sched_test[i]);
+ swi_init_workitem(&scd->scd_buf_wi,
+ srpc_add_buffer, lst_test_wq[i]);
if (i && srpc_serv_is_framework(svc)) {
/*
@@ -294,8 +294,7 @@ srpc_service_init(struct srpc_service *svc)
}
for (j = 0; j < nrpcs; j++) {
- LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
- i, sizeof(*rpc));
+ rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i);
if (!rpc) {
srpc_service_fini(svc);
return -ENOMEM;
@@ -508,16 +507,16 @@ __must_hold(&scd->scd_lock)
list_del(&buf->buf_list);
spin_unlock(&scd->scd_lock);
- LIBCFS_FREE(buf, sizeof(*buf));
+ kfree(buf);
spin_lock(&scd->scd_lock);
return rc;
}
-int
+void
srpc_add_buffer(struct swi_workitem *wi)
{
- struct srpc_service_cd *scd = wi->swi_workitem.wi_data;
+ struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd, scd_buf_wi);
struct srpc_buffer *buf;
int rc = 0;
@@ -535,7 +534,7 @@ srpc_add_buffer(struct swi_workitem *wi)
spin_unlock(&scd->scd_lock);
- LIBCFS_ALLOC(buf, sizeof(*buf));
+ buf = kzalloc(sizeof(*buf), GFP_NOFS);
if (!buf) {
CERROR("Failed to add new buf to service: %s\n",
scd->scd_svc->sv_name);
@@ -547,7 +546,7 @@ srpc_add_buffer(struct swi_workitem *wi)
spin_lock(&scd->scd_lock);
if (scd->scd_svc->sv_shuttingdown) {
spin_unlock(&scd->scd_lock);
- LIBCFS_FREE(buf, sizeof(*buf));
+ kfree(buf);
spin_lock(&scd->scd_lock);
rc = -ESHUTDOWN;
@@ -573,7 +572,6 @@ srpc_add_buffer(struct swi_workitem *wi)
}
spin_unlock(&scd->scd_lock);
- return 0;
}
int
@@ -605,15 +603,15 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
spin_lock(&scd->scd_lock);
/*
* NB: srpc_service_add_buffers() can be called inside
- * thread context of lst_sched_serial, and we don't normally
+ * thread context of lst_serial_wq, and we don't normally
* allow to sleep inside thread context of WI scheduler
* because it will block current scheduler thread from doing
* anything else, even worse, it could deadlock if it's
* waiting on result from another WI of the same scheduler.
* However, it's safe at here because scd_buf_wi is scheduled
- * by thread in a different WI scheduler (lst_sched_test),
+ * by thread in a different WI scheduler (lst_test_wq),
* so we don't have any risk of deadlock, though this could
- * block all WIs pending on lst_sched_serial for a moment
+ * block all WIs pending on lst_serial_wq for a moment
* which is not good but not fatal.
*/
lst_wait_until(scd->scd_buf_err ||
@@ -660,11 +658,9 @@ srpc_finish_service(struct srpc_service *sv)
LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ swi_cancel_workitem(&scd->scd_buf_wi);
+
spin_lock(&scd->scd_lock);
- if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
- spin_unlock(&scd->scd_lock);
- return 0;
- }
if (scd->scd_buf_nposted > 0) {
CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
@@ -680,11 +676,9 @@ srpc_finish_service(struct srpc_service *sv)
rpc = list_entry(scd->scd_rpc_active.next,
struct srpc_server_rpc, srpc_list);
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
+ CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s, ev fired %d type %d status %d lnet %d\n",
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
- rpc->srpc_wi.swi_workitem.wi_scheduled,
- rpc->srpc_wi.swi_workitem.wi_running,
rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
spin_unlock(&scd->scd_lock);
@@ -725,7 +719,7 @@ __must_hold(&scd->scd_lock)
}
spin_unlock(&scd->scd_lock);
- LIBCFS_FREE(buf, sizeof(*buf));
+ kfree(buf);
spin_lock(&scd->scd_lock);
}
@@ -947,7 +941,6 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
* Cancel pending schedules and prevent future schedule attempts:
*/
LASSERT(rpc->srpc_ev.ev_fired);
- swi_exit_workitem(&rpc->srpc_wi);
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
@@ -965,10 +958,10 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
}
/* handles an incoming RPC */
-int
+void
srpc_handle_rpc(struct swi_workitem *wi)
{
- struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
+ struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc, srpc_wi);
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
struct srpc_event *ev = &rpc->srpc_ev;
@@ -987,9 +980,8 @@ srpc_handle_rpc(struct swi_workitem *wi)
if (ev->ev_fired) { /* no more event, OK to finish */
srpc_server_rpc_done(rpc, -ESHUTDOWN);
- return 1;
}
- return 0;
+ return;
}
spin_unlock(&scd->scd_lock);
@@ -1007,7 +999,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
if (!msg->msg_magic) {
/* moaned already in srpc_lnet_ev_handler */
srpc_server_rpc_done(rpc, EBADMSG);
- return 1;
+ return;
}
srpc_unpack_msg_hdr(msg);
@@ -1023,7 +1015,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
LASSERT(!reply->status || !rpc->srpc_bulk);
if (rc) {
srpc_server_rpc_done(rpc, rc);
- return 1;
+ return;
}
}
@@ -1032,7 +1024,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
if (rpc->srpc_bulk) {
rc = srpc_do_bulk(rpc);
if (!rc)
- return 0; /* wait for bulk */
+ return; /* wait for bulk */
LASSERT(ev->ev_fired);
ev->ev_status = rc;
@@ -1050,16 +1042,16 @@ srpc_handle_rpc(struct swi_workitem *wi)
if (rc) {
srpc_server_rpc_done(rpc, rc);
- return 1;
+ return;
}
}
wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
rc = srpc_send_reply(rpc);
if (!rc)
- return 0; /* wait for reply */
+ return; /* wait for reply */
srpc_server_rpc_done(rpc, rc);
- return 1;
+ return;
case SWI_STATE_REPLY_SUBMITTED:
if (!ev->ev_fired) {
@@ -1072,10 +1064,8 @@ srpc_handle_rpc(struct swi_workitem *wi)
wi->swi_state = SWI_STATE_DONE;
srpc_server_rpc_done(rpc, ev->ev_status);
- return 1;
+ return;
}
-
- return 0;
}
static void
@@ -1170,7 +1160,6 @@ srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
* Cancel pending schedules and prevent future schedule attempts:
*/
LASSERT(!srpc_event_pending(rpc));
- swi_exit_workitem(wi);
spin_unlock(&rpc->crpc_lock);
@@ -1178,7 +1167,7 @@ srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
}
/* sends an outgoing RPC */
-int
+void
srpc_send_rpc(struct swi_workitem *wi)
{
int rc = 0;
@@ -1188,7 +1177,7 @@ srpc_send_rpc(struct swi_workitem *wi)
LASSERT(wi);
- rpc = wi->swi_workitem.wi_data;
+ rpc = container_of(wi, struct srpc_client_rpc, crpc_wi);
LASSERT(rpc);
LASSERT(wi == &rpc->crpc_wi);
@@ -1214,7 +1203,7 @@ srpc_send_rpc(struct swi_workitem *wi)
rc = srpc_prepare_reply(rpc);
if (rc) {
srpc_client_rpc_done(rpc, rc);
- return 1;
+ return;
}
rc = srpc_prepare_bulk(rpc);
@@ -1291,7 +1280,7 @@ srpc_send_rpc(struct swi_workitem *wi)
wi->swi_state = SWI_STATE_DONE;
srpc_client_rpc_done(rpc, rc);
- return 1;
+ return;
}
if (rc) {
@@ -1308,10 +1297,9 @@ abort:
if (!srpc_event_pending(rpc)) {
srpc_client_rpc_done(rpc, -EINTR);
- return 1;
+ return;
}
}
- return 0;
}
struct srpc_client_rpc *
@@ -1322,8 +1310,8 @@ srpc_create_client_rpc(struct lnet_process_id peer, int service,
{
struct srpc_client_rpc *rpc;
- LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
- crpc_bulk.bk_iovs[nbulkiov]));
+ rpc = kzalloc(offsetof(struct srpc_client_rpc,
+ crpc_bulk.bk_iovs[nbulkiov]), GFP_KERNEL);
if (!rpc)
return NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 8c10f0f149d5..ad04534f000c 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -169,11 +169,11 @@ struct srpc_buffer {
};
struct swi_workitem;
-typedef int (*swi_action_t) (struct swi_workitem *);
+typedef void (*swi_action_t) (struct swi_workitem *);
struct swi_workitem {
- struct cfs_wi_sched *swi_sched;
- struct cfs_workitem swi_workitem;
+ struct workqueue_struct *swi_wq;
+ struct work_struct swi_work;
swi_action_t swi_action;
int swi_state;
};
@@ -444,7 +444,7 @@ void srpc_free_bulk(struct srpc_bulk *bk);
struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
unsigned int bulk_npg, unsigned int bulk_len,
int sink);
-int srpc_send_rpc(struct swi_workitem *wi);
+void srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
int srpc_add_service(struct srpc_service *sv);
int srpc_remove_service(struct srpc_service *sv);
@@ -456,8 +456,8 @@ void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
void srpc_get_counters(struct srpc_counters *cnt);
void srpc_set_counters(const struct srpc_counters *cnt);
-extern struct cfs_wi_sched *lst_sched_serial;
-extern struct cfs_wi_sched **lst_sched_test;
+extern struct workqueue_struct *lst_serial_wq;
+extern struct workqueue_struct **lst_test_wq;
static inline int
srpc_serv_is_framework(struct srpc_service *svc)
@@ -465,42 +465,36 @@ srpc_serv_is_framework(struct srpc_service *svc)
return svc->sv_id < SRPC_FRAMEWORK_SERVICE_MAX_ID;
}
-static inline int
-swi_wi_action(struct cfs_workitem *wi)
+static void
+swi_wi_action(struct work_struct *wi)
{
struct swi_workitem *swi;
- swi = container_of(wi, struct swi_workitem, swi_workitem);
+ swi = container_of(wi, struct swi_workitem, swi_work);
- return swi->swi_action(swi);
+ swi->swi_action(swi);
}
static inline void
-swi_init_workitem(struct swi_workitem *swi, void *data,
- swi_action_t action, struct cfs_wi_sched *sched)
+swi_init_workitem(struct swi_workitem *swi,
+ swi_action_t action, struct workqueue_struct *wq)
{
- swi->swi_sched = sched;
+ swi->swi_wq = wq;
swi->swi_action = action;
swi->swi_state = SWI_STATE_NEWBORN;
- cfs_wi_init(&swi->swi_workitem, data, swi_wi_action);
+ INIT_WORK(&swi->swi_work, swi_wi_action);
}
static inline void
swi_schedule_workitem(struct swi_workitem *wi)
{
- cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
-}
-
-static inline void
-swi_exit_workitem(struct swi_workitem *swi)
-{
- cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
+ queue_work(wi->swi_wq, &wi->swi_work);
}
static inline int
-swi_deschedule_workitem(struct swi_workitem *swi)
+swi_cancel_workitem(struct swi_workitem *swi)
{
- return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
+ return cancel_work_sync(&swi->swi_work);
}
int sfw_startup(void);
@@ -516,7 +510,7 @@ srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
LASSERT(!atomic_read(&rpc->crpc_refcount));
if (!rpc->crpc_fini)
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ kfree(rpc);
else
(*rpc->crpc_fini)(rpc);
}
@@ -533,8 +527,8 @@ srpc_init_client_rpc(struct srpc_client_rpc *rpc, struct lnet_process_id peer,
crpc_bulk.bk_iovs[nbulkiov]));
INIT_LIST_HEAD(&rpc->crpc_list);
- swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
- lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
+ swi_init_workitem(&rpc->crpc_wi, srpc_send_rpc,
+ lst_test_wq[lnet_cpt_of_nid(peer.nid)]);
spin_lock_init(&rpc->crpc_lock);
atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 7d6a7106c0a5..ecf8b9e1ed5c 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -213,19 +213,18 @@ static inline void fld_cache_entry_add(struct fld_cache *cache,
*/
static int fld_cache_shrink(struct fld_cache *cache)
{
- struct fld_cache_entry *flde;
- struct list_head *curr;
int num = 0;
if (cache->fci_cache_count < cache->fci_cache_size)
return 0;
- curr = cache->fci_lru.prev;
-
while (cache->fci_cache_count + cache->fci_threshold >
- cache->fci_cache_size && curr != &cache->fci_lru) {
- flde = list_entry(curr, struct fld_cache_entry, fce_lru);
- curr = curr->prev;
+ cache->fci_cache_size &&
+ !list_empty(&cache->fci_lru)) {
+ struct fld_cache_entry *flde =
+ list_last_entry(&cache->fci_lru,
+ struct fld_cache_entry, fce_lru);
+
fld_cache_entry_delete(cache, flde);
num++;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 8f1a22527006..100e993ab00b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -141,9 +141,9 @@ struct lustre_sb_info {
/* obd_mount.c */
int lustre_start_mgc(struct super_block *sb);
-void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
- struct vfsmount *mnt));
-void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb));
+void lustre_register_super_ops(struct module *mod,
+ int (*cfs)(struct super_block *sb),
+ void (*ksc)(struct super_block *sb));
int lustre_common_put_super(struct super_block *sb);
int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index a40f706a53a1..64b6fd4fed8f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -341,8 +341,8 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd);
#define SPTLRPC_MAX_PAYLOAD (1024)
struct vfs_cred {
- uint32_t vc_uid;
- uint32_t vc_gid;
+ u32 vc_uid;
+ u32 vc_gid;
};
struct ptlrpc_ctx_ops {
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 67c535c5aa98..531e8ddfa9e5 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -40,22 +40,19 @@
#include <lustre_lib.h>
#include <lprocfs_status.h>
-#define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay
- * and resends for avoid deadlocks
- */
-#define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update
- * obd_osfs_age
- */
-#define OBD_STATFS_FOR_MDT0 0x0004 /* The statfs is only for retrieving
- * information from MDT0.
- */
+/* requests should be send without delay and resends for avoid deadlocks */
+#define OBD_STATFS_NODELAY 0x0001
+/* the statfs callback should not update obd_osfs_age */
+#define OBD_STATFS_FROM_CACHE 0x0002
+/* the statfs is only for retrieving information from MDT0 */
+#define OBD_STATFS_FOR_MDT0 0x0004
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
extern rwlock_t obd_dev_lock;
/* OBD Operations Declarations */
-struct obd_device *class_exp2obd(struct obd_export *);
+struct obd_device *class_exp2obd(struct obd_export *exp);
int class_handle_ioctl(unsigned int cmd, unsigned long arg);
int lustre_get_jobid(char *jobid);
@@ -63,10 +60,10 @@ struct lu_device_type;
/* genops.c */
extern struct list_head obd_types;
-struct obd_export *class_conn2export(struct lustre_handle *);
-int class_register_type(struct obd_ops *, struct md_ops *,
- const char *nm, struct lu_device_type *ldt);
-int class_unregister_type(const char *nm);
+struct obd_export *class_conn2export(struct lustre_handle *conn);
+int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
+ const char *name, struct lu_device_type *ldt);
+int class_unregister_type(const char *name);
struct obd_device *class_newdev(const char *type_name, const char *name);
void class_release_dev(struct obd_device *obd);
@@ -137,7 +134,7 @@ int class_config_llog_handler(const struct lu_env *env,
struct llog_rec_hdr *rec, void *data);
int class_add_uuid(const char *uuid, __u64 nid);
-/*obdecho*/
+/* obdecho */
void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
#define CFG_F_START 0x01 /* Set when we start updating from a log */
@@ -148,13 +145,13 @@ void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
/* Passed as data param to class_config_parse_llog */
struct config_llog_instance {
- char *cfg_obdname;
- void *cfg_instance;
+ char *cfg_obdname;
+ void *cfg_instance;
struct super_block *cfg_sb;
struct obd_uuid cfg_uuid;
llog_cb_t cfg_callback;
- int cfg_last_idx; /* for partial llog processing */
- int cfg_flags;
+ int cfg_last_idx; /* for partial llog processing */
+ int cfg_flags;
};
int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
@@ -172,30 +169,31 @@ enum {
/* list of active configuration logs */
struct config_llog_data {
- struct ldlm_res_id cld_resid;
+ struct ldlm_res_id cld_resid;
struct config_llog_instance cld_cfg;
- struct list_head cld_list_chain;
- atomic_t cld_refcount;
+ struct list_head cld_list_chain;
+ atomic_t cld_refcount;
struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
struct config_llog_data *cld_params; /* common parameters log */
struct config_llog_data *cld_recover;/* imperative recover log */
- struct obd_export *cld_mgcexp;
+ struct obd_export *cld_mgcexp;
struct mutex cld_lock;
- int cld_type;
- unsigned int cld_stopping:1, /* we were told to stop
- * watching
- */
- cld_lostlock:1; /* lock not requeued */
- char cld_logname[0];
+ int cld_type;
+ unsigned int cld_stopping:1, /*
+ * we were told to stop
+ * watching
+ */
+ cld_lostlock:1; /* lock not requeued */
+ char cld_logname[0];
};
struct lustre_profile {
- struct list_head lp_list;
- char *lp_profile;
- char *lp_dt;
- char *lp_md;
- int lp_refs;
- bool lp_list_deleted;
+ struct list_head lp_list;
+ char *lp_profile;
+ char *lp_dt;
+ char *lp_md;
+ int lp_refs;
+ bool lp_list_deleted;
};
struct lustre_profile *class_get_profile(const char *prof);
@@ -205,9 +203,11 @@ void class_del_profiles(void);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
-void __class_export_add_lock_ref(struct obd_export *, struct ldlm_lock *);
-void __class_export_del_lock_ref(struct obd_export *, struct ldlm_lock *);
-extern void (*class_export_dump_hook)(struct obd_export *);
+void __class_export_add_lock_ref(struct obd_export *exp,
+ struct ldlm_lock *lock);
+void __class_export_del_lock_ref(struct obd_export *exp,
+ struct ldlm_lock *lock);
+extern void (*class_export_dump_hook)(struct obd_export *exp);
#else
@@ -223,8 +223,8 @@ struct obd_export *class_new_export(struct obd_device *obddev,
struct obd_uuid *cluuid);
void class_unlink_export(struct obd_export *exp);
-struct obd_import *class_import_get(struct obd_import *);
-void class_import_put(struct obd_import *);
+struct obd_import *class_import_get(struct obd_import *imp);
+void class_import_put(struct obd_import *imp);
struct obd_import *class_new_import(struct obd_device *obd);
void class_destroy_import(struct obd_import *exp);
@@ -299,7 +299,8 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
#define MDP(dev, op) (dev)->obd_type->typ_md_ops->op
#define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
-/* Ensure obd_setup: used for cleanup which must be called
+/*
+ * Ensure obd_setup: used for cleanup which must be called
* while obd is stopping
*/
static inline int obd_check_dev(struct obd_device *obd)
@@ -326,108 +327,116 @@ static inline int obd_check_dev_active(struct obd_device *obd)
return rc;
}
-#define OBD_COUNTER_OFFSET(op) \
- ((offsetof(struct obd_ops, op) - \
- offsetof(struct obd_ops, iocontrol)) \
- / sizeof(((struct obd_ops *)(0))->iocontrol))
-
-#define OBD_COUNTER_INCREMENT(obdx, op) \
- if ((obdx)->obd_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((obdx)->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->obd_stats->ls_num); \
- lprocfs_counter_incr((obdx)->obd_stats, coffset); \
- }
+#define OBD_COUNTER_OFFSET(op) \
+ ((offsetof(struct obd_ops, op) - \
+ offsetof(struct obd_ops, iocontrol)) \
+ / sizeof(((struct obd_ops *)(0))->iocontrol))
-#define EXP_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats) { \
+#define OBD_COUNTER_INCREMENT(obdx, op) \
+do { \
+ if ((obdx)->obd_stats) { \
unsigned int coffset; \
+ coffset = (unsigned int)((obdx)->obd_cntr_base) + \
+ OBD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (obdx)->obd_stats->ls_num); \
+ lprocfs_counter_incr((obdx)->obd_stats, coffset); \
+ } \
+} while (0)
+
+#define EXP_COUNTER_INCREMENT(export, op) \
+do { \
+ if ((export)->exp_obd->obd_stats) { \
+ unsigned int coffset; \
coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
- OBD_COUNTER_OFFSET(op); \
+ OBD_COUNTER_OFFSET(op); \
LASSERT(coffset < (export)->exp_obd->obd_stats->ls_num); \
lprocfs_counter_incr((export)->exp_obd->obd_stats, coffset); \
- }
+ } \
+} while (0)
-#define MD_COUNTER_OFFSET(op) \
- ((offsetof(struct md_ops, op) - \
- offsetof(struct md_ops, getstatus)) \
- / sizeof(((struct md_ops *)(0))->getstatus))
+#define MD_COUNTER_OFFSET(op) \
+ ((offsetof(struct md_ops, op) - \
+ offsetof(struct md_ops, getstatus)) \
+ / sizeof(((struct md_ops *)(0))->getstatus))
-#define MD_COUNTER_INCREMENT(obdx, op) \
- if ((obd)->md_stats) { \
- unsigned int coffset; \
+#define MD_COUNTER_INCREMENT(obdx, op) \
+do { \
+ if ((obd)->md_stats) { \
+ unsigned int coffset; \
coffset = (unsigned int)((obdx)->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (obdx)->md_stats->ls_num); \
+ MD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (obdx)->md_stats->ls_num); \
lprocfs_counter_incr((obdx)->md_stats, coffset); \
- }
+ } \
+} while (0)
-#define EXP_MD_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats) { \
- unsigned int coffset; \
- coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
- MD_COUNTER_OFFSET(op); \
- LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
- lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
- if ((export)->exp_md_stats) \
- lprocfs_counter_incr( \
+#define EXP_MD_COUNTER_INCREMENT(export, op) \
+do { \
+ if ((export)->exp_obd->obd_stats) { \
+ unsigned int coffset; \
+ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
+ MD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
+ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
+ if ((export)->exp_md_stats) \
+ lprocfs_counter_incr( \
(export)->exp_md_stats, coffset); \
- }
+ } \
+} while (0)
#define EXP_CHECK_MD_OP(exp, op) \
-do { \
- if (!(exp)) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
+do { \
+ if (!(exp)) { \
+ CERROR("obd_" #op ": NULL export\n"); \
+ return -ENODEV; \
+ } \
+ if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
+ return -EOPNOTSUPP; \
+ } \
+ if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
CERROR("obd_" #op ": dev %s/%d no operation\n", \
- (exp)->exp_obd->obd_name, \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
+ (exp)->exp_obd->obd_name, \
+ (exp)->exp_obd->obd_minor); \
+ return -EOPNOTSUPP; \
+ } \
} while (0)
-#define OBD_CHECK_DT_OP(obd, op, err) \
-do { \
- if (!OBT(obd) || !OBP((obd), op)) { \
- if (err) \
- CERROR("obd_" #op ": dev %d no operation\n", \
- obd->obd_minor); \
- return err; \
- } \
+#define OBD_CHECK_DT_OP(obd, op, err) \
+do { \
+ if (!OBT(obd) || !OBP((obd), op)) { \
+ if (err) \
+ CERROR("obd_" #op ": dev %d no operation\n", \
+ obd->obd_minor); \
+ return err; \
+ } \
} while (0)
#define EXP_CHECK_DT_OP(exp, op) \
-do { \
- if (!(exp)) { \
- CERROR("obd_" #op ": NULL export\n"); \
- return -ENODEV; \
- } \
- if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
+do { \
+ if (!(exp)) { \
+ CERROR("obd_" #op ": NULL export\n"); \
+ return -ENODEV; \
+ } \
+ if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
- return -EOPNOTSUPP; \
- } \
- if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
- CERROR("obd_" #op ": dev %d no operation\n", \
- (exp)->exp_obd->obd_minor); \
- return -EOPNOTSUPP; \
- } \
+ return -EOPNOTSUPP; \
+ } \
+ if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
+ CERROR("obd_" #op ": dev %d no operation\n", \
+ (exp)->exp_obd->obd_minor); \
+ return -EOPNOTSUPP; \
+ } \
} while (0)
-#define CTXT_CHECK_OP(ctxt, op, err) \
-do { \
- if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
- if (err) \
- CERROR("lop_" #op ": dev %d no operation\n", \
- ctxt->loc_obd->obd_minor); \
- return err; \
- } \
+#define CTXT_CHECK_OP(ctxt, op, err) \
+do { \
+ if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
+ if (err) \
+ CERROR("lop_" #op ": dev %d no operation\n", \
+ ctxt->loc_obd->obd_minor); \
+ return err; \
+ } \
} while (0)
static inline int class_devno_max(void)
@@ -481,14 +490,11 @@ static inline int obd_set_info_async(const struct lu_env *env,
* obd_precleanup() and obd_cleanup() call both lu_device and obd operations.
*/
-#define DECLARE_LU_VARS(ldt, d) \
- struct lu_device_type *ldt; \
- struct lu_device *d
-
static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
int rc;
- DECLARE_LU_VARS(ldt, d);
+ struct lu_device_type *ldt;
+ struct lu_device *d;
ldt = obd->obd_type->typ_lu;
if (ldt) {
@@ -526,7 +532,8 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
static inline int obd_precleanup(struct obd_device *obd)
{
int rc;
- DECLARE_LU_VARS(ldt, d);
+ struct lu_device_type *ldt;
+ struct lu_device *d;
rc = obd_check_dev(obd);
if (rc)
@@ -552,7 +559,8 @@ static inline int obd_precleanup(struct obd_device *obd)
static inline int obd_cleanup(struct obd_device *obd)
{
int rc;
- DECLARE_LU_VARS(ldt, d);
+ struct lu_device_type *ldt;
+ struct lu_device *d;
rc = obd_check_dev(obd);
if (rc)
@@ -579,7 +587,8 @@ static inline int obd_cleanup(struct obd_device *obd)
static inline void obd_cleanup_client_import(struct obd_device *obd)
{
- /* If we set up but never connected, the
+ /*
+ * If we set up but never connected, the
* client import will not have been cleaned.
*/
down_write(&obd->u.cli.cl_sem);
@@ -600,7 +609,8 @@ static inline int
obd_process_config(struct obd_device *obd, int datalen, void *data)
{
int rc;
- DECLARE_LU_VARS(ldt, d);
+ struct lu_device_type *ldt;
+ struct lu_device *d;
rc = obd_check_dev(obd);
if (rc)
@@ -717,7 +727,8 @@ static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
return uuid;
}
-/** Create a new /a exp on device /a obd for the uuid /a cluuid
+/*
+ * Create a new /a exp on device /a obd for the uuid /a cluuid
* @param exp New export handle
* @param d Connect data, supported flags are set, flags also understood
* by obd are returned.
@@ -729,7 +740,8 @@ static inline int obd_connect(const struct lu_env *env,
void *localdata)
{
int rc;
- __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
+ __u64 ocf = data ? data->ocd_connect_flags : 0; /*
+ * for post-condition
* check
*/
@@ -838,7 +850,9 @@ static inline int obd_pool_del(struct obd_device *obd, char *poolname)
return rc;
}
-static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ostname)
+static inline int obd_pool_add(struct obd_device *obd,
+ char *poolname,
+ char *ostname)
{
int rc;
@@ -849,7 +863,9 @@ static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ost
return rc;
}
-static inline int obd_pool_rem(struct obd_device *obd, char *poolname, char *ostname)
+static inline int obd_pool_rem(struct obd_device *obd,
+ char *poolname,
+ char *ostname)
{
int rc;
@@ -894,7 +910,8 @@ static inline int obd_destroy_export(struct obd_export *exp)
return 0;
}
-/* @max_age is the oldest time in jiffies that we accept using a cached data.
+/*
+ * @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
* target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
*/
@@ -955,7 +972,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
return rc;
}
-/* @max_age is the oldest time in jiffies that we accept using a cached data.
+/*
+ * @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
* target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
*/
@@ -983,7 +1001,8 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
spin_unlock(&obd->obd_osfs_lock);
}
} else {
- CDEBUG(D_SUPER, "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
+ CDEBUG(D_SUPER,
+ "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
obd->obd_name, &obd->obd_osfs,
obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
@@ -1118,7 +1137,8 @@ static inline int obd_quotactl(struct obd_export *exp,
static inline int obd_health_check(const struct lu_env *env,
struct obd_device *obd)
{
- /* returns: 0 on healthy
+ /*
+ * returns: 0 on healthy
* >0 on unhealthy + reason code/flag
* however the only supported reason == 1 right now
* We'll need to define some better reasons
@@ -1491,7 +1511,8 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp,
return rc;
}
-/* Unpack an MD struct from disk to in-memory format.
+/*
+ * Unpack an MD struct from disk to in-memory format.
* Returns +ve size of unpacked MD (0 for free), or -ve error.
*
* If *plsm != NULL and lmm == NULL then *lsm will be freed.
@@ -1523,11 +1544,12 @@ struct lwp_register_item {
struct obd_export **lri_exp;
register_lwp_cb lri_cb_func;
void *lri_cb_data;
- struct list_head lri_list;
+ struct list_head lri_list;
char lri_name[MTI_NAME_MAXLEN];
};
-/* I'm as embarrassed about this as you are.
+/*
+ * I'm as embarrassed about this as you are.
*
* <shaver> // XXX do not look into _superhack with remaining eye
* <shaver> // XXX if this were any uglier, I'd get my own show on MTV
@@ -1562,7 +1584,8 @@ int class_procfs_init(void);
int class_procfs_clean(void);
/* prng.c */
-#define ll_generate_random_uuid(uuid_out) cfs_get_random_bytes(uuid_out, sizeof(class_uuid_t))
+#define ll_generate_random_uuid(uuid_out) \
+ get_random_bytes(uuid_out, sizeof(class_uuid_t))
/* statfs_pack.c */
struct kstatfs;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index fac9d19d50b6..11b11b5f3216 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -64,7 +64,6 @@
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
@@ -74,8 +73,7 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
*/
ldlm_set_kms_ignore(lock);
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each_entry(lck, &res->lr_granted, l_res_link) {
if (ldlm_is_kms_ignore(lck))
continue;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 7cb61e2e7d3b..7cbc6a06afec 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -886,17 +886,15 @@ static void search_granted_lock(struct list_head *queue,
struct ldlm_lock *req,
struct sl_insert_point *prev)
{
- struct list_head *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
- list_for_each(tmp, queue) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ list_for_each_entry(lock, queue, l_res_link) {
mode_end = list_prev_entry(lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
/* jump to last lock of mode group */
- tmp = &mode_end->l_res_link;
+ lock = mode_end;
continue;
}
@@ -933,9 +931,7 @@ static void search_granted_lock(struct list_head *queue,
break;
/* go to next policy group within mode group */
- tmp = policy_end->l_res_link.next;
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock = list_next_entry(policy_end, l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
@@ -1687,7 +1683,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if (list_empty(arg->list))
return -ENOENT;
- lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
+ lock = list_first_entry(arg->list, struct ldlm_lock, l_bl_ast);
/* nobody should touch l_bl_ast */
lock_res_and_lock(lock);
@@ -1723,7 +1719,7 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if (list_empty(arg->list))
return -ENOENT;
- lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+ lock = list_first_entry(arg->list, struct ldlm_lock, l_cp_ast);
/* It's possible to receive a completion AST before we've set
* the l_completion_ast pointer: either because the AST arrived
@@ -1769,7 +1765,7 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if (list_empty(arg->list))
return -ENOENT;
- lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
+ lock = list_first_entry(arg->list, struct ldlm_lock, l_rk_ast);
list_del_init(&lock->l_rk_ast);
/* the desc just pretend to exclusive */
@@ -1796,7 +1792,7 @@ static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if (list_empty(arg->list))
return -ENOENT;
- gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
+ gl_work = list_first_entry(arg->list, struct ldlm_glimpse_work,
gl_list);
list_del_init(&gl_work->gl_list);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 2d5a2c932ddc..5f6e7c933b81 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -696,13 +696,13 @@ static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
/* process a request from the blp_list at least every blp_num_threads */
if (!list_empty(&blp->blp_list) &&
(list_empty(&blp->blp_prio_list) || num_bl == 0))
- blwi = list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ blwi = list_first_entry(&blp->blp_list,
+ struct ldlm_bl_work_item, blwi_entry);
else
if (!list_empty(&blp->blp_prio_list))
- blwi = list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item,
- blwi_entry);
+ blwi = list_first_entry(&blp->blp_prio_list,
+ struct ldlm_bl_work_item,
+ blwi_entry);
if (blwi) {
if (++num_bl >= num_th)
@@ -1093,8 +1093,10 @@ static int ldlm_cleanup(void)
kset_unregister(ldlm_ns_kset);
if (ldlm_svc_kset)
kset_unregister(ldlm_svc_kset);
- if (ldlm_kobj)
+ if (ldlm_kobj) {
+ sysfs_remove_group(ldlm_kobj, &ldlm_attr_group);
kobject_put(ldlm_kobj);
+ }
ldlm_debugfs_cleanup();
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index da65d00a7811..8563bd32befa 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -1086,8 +1086,12 @@ int ldlm_pools_init(void)
int rc;
rc = ldlm_pools_thread_start();
- if (rc == 0)
- register_shrinker(&ldlm_pools_cli_shrinker);
+ if (rc)
+ return rc;
+
+ rc = register_shrinker(&ldlm_pools_cli_shrinker);
+ if (rc)
+ ldlm_pools_thread_stop();
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 02ea14c9b089..6aa37463db46 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -1656,7 +1656,7 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
*/
while (count > 0) {
LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
+ lock = list_first_entry(cancels, struct ldlm_lock, l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_unused);
static int ldlm_resource_foreach(struct ldlm_resource *res,
ldlm_iterator_t iter, void *closure)
{
- struct list_head *tmp, *next;
+ struct ldlm_lock *tmp;
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
@@ -1788,18 +1788,14 @@ static int ldlm_resource_foreach(struct ldlm_resource *res,
return LDLM_ITER_CONTINUE;
lock_res(res);
- list_for_each_safe(tmp, next, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
+ list_for_each_entry_safe(lock, tmp, &res->lr_granted, l_res_link) {
if (iter(lock, closure) == LDLM_ITER_STOP) {
rc = LDLM_ITER_STOP;
goto out;
}
}
- list_for_each_safe(tmp, next, &res->lr_waiting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-
+ list_for_each_entry_safe(lock, tmp, &res->lr_waiting, l_res_link) {
if (iter(lock, closure) == LDLM_ITER_STOP) {
rc = LDLM_ITER_STOP;
goto out;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 2689ffdf10e3..9958533cc227 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -752,24 +752,22 @@ extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
__u64 flags)
{
- struct list_head *tmp;
int rc = 0;
bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
do {
- struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *lock = NULL, *tmp;
struct lustre_handle lockh;
/* First, we look for non-cleaned-yet lock
* all cleaned locks are marked by CLEANED flag.
*/
lock_res(res);
- list_for_each(tmp, q) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
- if (ldlm_is_cleaned(lock)) {
- lock = NULL;
+ list_for_each_entry(tmp, q, l_res_link) {
+ if (ldlm_is_cleaned(tmp))
continue;
- }
+
+ lock = tmp;
LDLM_LOCK_GET(lock);
ldlm_set_cleaned(lock);
break;
@@ -1283,19 +1281,15 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
*/
void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
{
- struct list_head *tmp;
+ struct ldlm_namespace *ns;
if (!((libcfs_debug | D_ERROR) & level))
return;
mutex_lock(ldlm_namespace_lock(client));
- list_for_each(tmp, ldlm_namespace_list(client)) {
- struct ldlm_namespace *ns;
-
- ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+ list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
ldlm_namespace_dump(level, ns);
- }
mutex_unlock(ldlm_namespace_lock(client));
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b2e47c246f3..99b0b77c75f5 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -369,8 +369,6 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
}
ctx->pos = pos;
ll_finish_md_op_data(op_data);
- filp->f_version = inode->i_version;
-
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
@@ -1339,9 +1337,9 @@ finish_req:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- } else {
- goto out_req;
}
+
+ goto out_req;
}
if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1678,7 +1676,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
else
fd->lfd_pos = offset;
file->f_pos = offset;
- file->f_version = 0;
}
ret = offset;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index b133fd00c08c..f68c2e88f12b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -792,7 +792,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
extern struct super_operations lustre_super_operations;
void ll_lli_init(struct ll_inode_info *lli);
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
+int ll_fill_super(struct super_block *sb);
void ll_put_super(struct super_block *sb);
void ll_kill_super(struct super_block *sb);
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
@@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
ll_d2d(dentry)->lld_invalid = 1;
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
+ if (d_count(dentry) == 0)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 8666f1e81ade..6735a6f006d2 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -146,8 +146,7 @@ static void ll_free_sbi(struct super_block *sb)
kfree(sbi);
}
-static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
- struct vfsmount *mnt)
+static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
{
struct inode *root = NULL;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -236,7 +235,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
"An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
md);
goto out;
- } else if (err) {
+ }
+
+ if (err) {
CERROR("cannot connect to %s: rc = %d\n", md, err);
goto out;
}
@@ -865,7 +866,7 @@ void ll_lli_init(struct ll_inode_info *lli)
mutex_init(&lli->lli_layout_mutex);
}
-int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
+int ll_fill_super(struct super_block *sb)
{
struct lustre_profile *lprof = NULL;
struct lustre_sb_info *lsi = s2lsi(sb);
@@ -942,7 +943,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
}
/* connections, registrations, sb setup */
- err = client_common_fill_super(sb, md, dt, mnt);
+ err = client_common_fill_super(sb, md, dt);
if (!err)
sbi->ll_client_common_fill_super_succeeded = 1;
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 0bda111a096e..9b0bb3541a84 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -86,8 +86,7 @@ MODULE_ALIAS_FS("lustre");
static int __init lustre_init(void)
{
struct lnet_process_id lnet_id;
- struct timespec64 ts;
- int i, rc, seed[2];
+ int i, rc;
BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) !=
LUSTRE_VOLATILE_HDR_LEN + 1);
@@ -126,22 +125,20 @@ static int __init lustre_init(void)
goto out_debugfs;
}
- cfs_get_random_bytes(seed, sizeof(seed));
-
/* Nodes with small feet have little entropy. The NID for this
* node gives the most entropy in the low bits
*/
for (i = 0;; i++) {
+ u32 seed;
+
if (LNetGetId(i, &lnet_id) == -ENOENT)
break;
-
- if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND)
- seed[0] ^= LNET_NIDADDR(lnet_id.nid);
+ if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) {
+ seed = LNET_NIDADDR(lnet_id.nid);
+ add_device_randomness(&seed, sizeof(seed));
+ }
}
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
-
rc = vvp_global_init();
if (rc != 0)
goto out_sysfs;
@@ -159,8 +156,7 @@ static int __init lustre_init(void)
if (rc != 0)
goto out_inode_fini_env;
- lustre_register_client_fill_super(ll_fill_super);
- lustre_register_kill_super_cb(ll_kill_super);
+ lustre_register_super_ops(THIS_MODULE, ll_fill_super, ll_kill_super);
lustre_register_client_process_config(ll_process_config);
return 0;
@@ -181,8 +177,7 @@ out_cache:
static void __exit lustre_exit(void)
{
- lustre_register_client_fill_super(NULL);
- lustre_register_kill_super_cb(NULL);
+ lustre_register_super_ops(NULL, NULL, NULL);
lustre_register_client_process_config(NULL);
debugfs_remove(llite_root);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8ccc8b799c02..987c03b058e6 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -384,7 +384,7 @@ int cl_sb_fini(struct super_block *sb)
struct vvp_pgcache_id {
unsigned int vpi_bucket;
unsigned int vpi_depth;
- uint32_t vpi_index;
+ u32 vpi_index;
unsigned int vpi_curdep;
struct lu_object_header *vpi_obj;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index bfae98e82d6f..e7a4778e02e4 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -699,7 +699,7 @@ static int vvp_io_read_start(const struct lu_env *env,
result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
- else if (exceed != 0)
+ if (exceed != 0)
goto out;
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index ae28ddf80d9b..a56d71c2dda2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -115,19 +115,19 @@ static inline const struct lsm_operations *lsm_op_find(int magic)
*/
#if BITS_PER_LONG == 64
# define lov_do_div64(n, base) ({ \
- uint64_t __base = (base); \
- uint64_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
+ u64 __base = (base); \
+ u64 __rem; \
+ __rem = ((u64)(n)) % __base; \
+ (n) = ((u64)(n)) / __base; \
__rem; \
})
#elif BITS_PER_LONG == 32
# define lov_do_div64(n, base) ({ \
- uint64_t __rem; \
+ u64 __rem; \
if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \
int __remainder; \
LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \
- "division %llu / %llu\n", (n), (uint64_t)(base)); \
+ "division %llu / %llu\n", (n), (u64)(base)); \
__remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
(n) >>= LOV_MIN_STRIPE_BITS; \
__rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 7ce01026a409..ec70c12e5b40 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -828,11 +828,9 @@ out:
static int lov_cleanup(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
- struct list_head *pos, *tmp;
- struct pool_desc *pool;
+ struct pool_desc *pool, *tmp;
- list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
- pool = list_entry(pos, struct pool_desc, pool_list);
+ list_for_each_entry_safe(pool, tmp, &lov->lov_pool_list, pool_list) {
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
/* In the function below, .hs_keycmp resolves to
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 105b707eed14..897cf2cd4a24 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -1335,7 +1335,7 @@ static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
int rc = 0;
int cur_stripe;
int stripe_count;
- struct fiemap_state fs = { 0 };
+ struct fiemap_state fs = { NULL };
lsm = lov_lsm_addref(cl2lov(obj));
if (!lsm)
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 3bdf48e4edb4..cfa1d7f92b0f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -49,15 +49,13 @@ static void lov_init_set(struct lov_request_set *set)
static void lov_finish_set(struct lov_request_set *set)
{
- struct list_head *pos, *n;
+ struct lov_request *req;
LASSERT(set);
- list_for_each_safe(pos, n, &set->set_list) {
- struct lov_request *req = list_entry(pos,
- struct lov_request,
- rq_link);
+ while ((req = list_first_entry_or_null(&set->set_list,
+ struct lov_request,
+ rq_link)) != NULL) {
list_del_init(&req->rq_link);
-
kfree(req->rq_oi.oi_osfs);
kfree(req);
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 77fa8fea0249..79ff85feab64 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -523,7 +523,7 @@ static void do_requeue(struct config_llog_data *cld)
* in order to not flood the MGS.
*/
#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+#define MGC_TIMEOUT_RAND_CENTISEC 500
static int mgc_requeue_thread(void *data)
{
@@ -537,7 +537,7 @@ static int mgc_requeue_thread(void *data)
while (!(rq_state & RQ_STOP)) {
struct l_wait_info lwi;
struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
/* Any new or requeued lostlocks will change the state */
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index d415f8396038..3b683b774fef 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -79,13 +79,12 @@ EXPORT_SYMBOL(cl_lock_slice_add);
void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
{
+ struct cl_lock_slice *slice;
cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
- while (!list_empty(&lock->cll_layers)) {
- struct cl_lock_slice *slice;
-
- slice = list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
+ while ((slice = list_first_entry_or_null(&lock->cll_layers,
+ struct cl_lock_slice,
+ cls_linkage)) != NULL) {
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index fdd27ce46fda..7b18d775b001 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -510,13 +510,13 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
*/
lu_site_stats_print(&site->cs_lu, m);
cache_stats_print(&site->cs_pages, m, 1);
- seq_printf(m, " [");
+ seq_puts(m, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
seq_printf(m, "%s: %u ", pstate[i],
atomic_read(&site->cs_pages_state[i]));
- seq_printf(m, "]\n");
+ seq_puts(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
- seq_printf(m, "\n");
+ seq_puts(m, "\n");
return 0;
}
EXPORT_SYMBOL(cl_site_stats_print);
@@ -1017,7 +1017,7 @@ int cl_global_init(void)
{
int result;
- cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
+ cl_envs = kcalloc(num_possible_cpus(), sizeof(*cl_envs), GFP_KERNEL);
if (!cl_envs) {
result = -ENOMEM;
goto out;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 7f65439f9b95..d3b25667bc3a 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -202,7 +202,7 @@ struct cl_page *cl_page_find(const struct lu_env *env,
* vmpage lock is used to protect the child/parent
* relationship
*/
- KLASSERT(PageLocked(vmpage));
+ LASSERT(PageLocked(vmpage));
/*
* cl_vmpage_page() can be called here without any locks as
*
@@ -340,7 +340,7 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
struct cl_page *page;
- KLASSERT(PageLocked(vmpage));
+ LASSERT(PageLocked(vmpage));
/*
* NOTE: absence of races and liveness of data are guaranteed by page
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 2985bca4dc4c..3e24b76f6301 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -377,7 +377,8 @@ static int obd_init_checks(void)
char buf[64];
int len, ret = 0;
- CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", "%llu", "%lld", "%#llx");
+ CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", "%llu", "%lld",
+ "%#llx");
CDEBUG(D_INFO, "OBD_OBJECT_EOF = %#llx\n", (__u64)OBD_OBJECT_EOF);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index fc59f29a4290..57951237def2 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -501,6 +501,7 @@ int class_procfs_init(void)
rc = debugfs_lustre_root ? PTR_ERR(debugfs_lustre_root)
: -ENOMEM;
debugfs_lustre_root = NULL;
+ sysfs_remove_group(lustre_kobj, &lustre_attr_group);
kobject_put(lustre_kobj);
goto out;
}
@@ -509,6 +510,7 @@ int class_procfs_init(void)
&obd_device_list_fops);
if (IS_ERR_OR_NULL(file)) {
rc = file ? PTR_ERR(file) : -ENOMEM;
+ sysfs_remove_group(lustre_kobj, &lustre_attr_group);
kobject_put(lustre_kobj);
goto out;
}
@@ -522,6 +524,7 @@ int class_procfs_clean(void)
debugfs_lustre_root = NULL;
+ sysfs_remove_group(lustre_kobj, &lustre_attr_group);
kobject_put(lustre_kobj);
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 05d71f568837..e1f4ef2bddd4 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -1093,7 +1093,7 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
percpusize = lprocfs_stats_counter_size(stats);
- LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
+ stats->ls_percpu[cpuid] = kzalloc(percpusize, GFP_ATOMIC);
if (stats->ls_percpu[cpuid]) {
rc = 0;
if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
@@ -1137,7 +1137,8 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
num_entry = num_possible_cpus();
/* alloc percpu pointers for all possible cpu slots */
- LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
+ stats = kvzalloc(offsetof(typeof(*stats), ls_percpu[num_entry]),
+ GFP_KERNEL);
if (!stats)
return NULL;
@@ -1146,15 +1147,16 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
spin_lock_init(&stats->ls_lock);
/* alloc num of counter headers */
- LIBCFS_ALLOC(stats->ls_cnt_header,
- stats->ls_num * sizeof(struct lprocfs_counter_header));
+ stats->ls_cnt_header = kvmalloc_array(stats->ls_num,
+ sizeof(struct lprocfs_counter_header),
+ GFP_KERNEL | __GFP_ZERO);
if (!stats->ls_cnt_header)
goto fail;
if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
/* contains only one set counters */
percpusize = lprocfs_stats_counter_size(stats);
- LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
+ stats->ls_percpu[0] = kzalloc(percpusize, GFP_ATOMIC);
if (!stats->ls_percpu[0])
goto fail;
stats->ls_biggest_alloc_num = 1;
@@ -1191,12 +1193,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
percpusize = lprocfs_stats_counter_size(stats);
for (i = 0; i < num_entry; i++)
- if (stats->ls_percpu[i])
- LIBCFS_FREE(stats->ls_percpu[i], percpusize);
- if (stats->ls_cnt_header)
- LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
- sizeof(struct lprocfs_counter_header));
- LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
+ kfree(stats->ls_percpu[i]);
+ kvfree(stats->ls_cnt_header);
+ kvfree(stats);
}
EXPORT_SYMBOL(lprocfs_free_stats);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index b938a3f9d50a..2719abbff85f 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1932,8 +1932,10 @@ int lu_global_init(void)
LU_CONTEXT_KEY_INIT(&lu_global_key);
result = lu_context_key_register(&lu_global_key);
- if (result != 0)
+ if (result != 0) {
+ lu_ref_global_fini();
return result;
+ }
/*
* At this level, we don't know what tags are needed, so allocate them
@@ -1943,17 +1945,31 @@ int lu_global_init(void)
down_write(&lu_sites_guard);
result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
up_write(&lu_sites_guard);
- if (result != 0)
+ if (result != 0) {
+ lu_context_key_degister(&lu_global_key);
+ lu_ref_global_fini();
return result;
+ }
/*
* seeks estimation: 3 seeks to read a record from oi, one to read
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- register_shrinker(&lu_site_shrinker);
+ result = register_shrinker(&lu_site_shrinker);
+ if (result != 0) {
+ /* Order explained in lu_global_fini(). */
+ lu_context_key_degister(&lu_global_key);
- return result;
+ down_write(&lu_sites_guard);
+ lu_env_fini(&lu_shrink_env);
+ up_write(&lu_sites_guard);
+
+ lu_ref_global_fini();
+ return result;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index 71329adc0318..2d6da2431a09 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -181,8 +181,6 @@ EXPORT_SYMBOL(class_handle_free_cb);
int class_handle_init(void)
{
struct handle_bucket *bucket;
- struct timespec64 ts;
- int seed[2];
LASSERT(!handle_hash);
@@ -198,12 +196,7 @@ int class_handle_init(void)
spin_lock_init(&bucket->lock);
}
- /** bug 21430: add randomness to the initial base */
- cfs_get_random_bytes(seed, sizeof(seed));
- ktime_get_ts64(&ts);
- cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
-
- cfs_get_random_bytes(&handle_base, sizeof(handle_base));
+ get_random_bytes(&handle_base, sizeof(handle_base));
LASSERT(handle_base != 0ULL);
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index c0e192ae22a9..997c0f9aafb5 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -236,7 +236,7 @@ static int class_attach(struct lustre_cfg *lcfg)
uuid = lustre_cfg_string(lcfg, 2);
CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n",
- MKSTR(typename), MKSTR(name), MKSTR(uuid));
+ typename, name, uuid);
obd = class_newdev(typename, name);
if (IS_ERR(obd)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 2a79a223b98a..acc1ea773c9c 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -49,9 +49,9 @@
#include <lustre_disk.h>
#include <uapi/linux/lustre/lustre_param.h>
-static int (*client_fill_super)(struct super_block *sb,
- struct vfsmount *mnt);
-
+static DEFINE_SPINLOCK(client_lock);
+static struct module *client_mod;
+static int (*client_fill_super)(struct super_block *sb);
static void (*kill_super_cb)(struct super_block *sb);
/**************** config llog ********************/
@@ -1107,20 +1107,14 @@ invalid:
return -EINVAL;
}
-struct lustre_mount_data2 {
- void *lmd2_data;
- struct vfsmount *lmd2_mnt;
-};
-
/** This is the entry point for the mount call into Lustre.
* This is called when a server or client is mounted,
* and this is where we start setting things up.
* @param data Mount options (e.g. -o flock,abort_recov)
*/
-static int lustre_fill_super(struct super_block *sb, void *data, int silent)
+static int lustre_fill_super(struct super_block *sb, void *lmd2_data, int silent)
{
struct lustre_mount_data *lmd;
- struct lustre_mount_data2 *lmd2 = data;
struct lustre_sb_info *lsi;
int rc;
@@ -1143,17 +1137,22 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
obd_zombie_barrier();
/* Figure out the lmd from the mount options */
- if (lmd_parse((lmd2->lmd2_data), lmd)) {
+ if (lmd_parse(lmd2_data, lmd)) {
lustre_put_lsi(sb);
rc = -EINVAL;
goto out;
}
if (lmd_is_client(lmd)) {
+ bool have_client = false;
CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
if (!client_fill_super)
request_module("lustre");
- if (!client_fill_super) {
+ spin_lock(&client_lock);
+ if (client_fill_super && try_module_get(client_mod))
+ have_client = true;
+ spin_unlock(&client_lock);
+ if (!have_client) {
LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
lustre_put_lsi(sb);
rc = -ENODEV;
@@ -1165,8 +1164,10 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
}
/* Connect and start */
/* (should always be ll_fill_super) */
- rc = (*client_fill_super)(sb, lmd2->lmd2_mnt);
- /* c_f_s will call lustre_common_put_super on failure */
+ rc = (*client_fill_super)(sb);
+ /* c_f_s will call lustre_common_put_super on failure, otherwise
+ * c_f_s will have taken another reference to the module */
+ module_put(client_mod);
}
} else {
CERROR("This is client-side-only module, cannot handle server mount.\n");
@@ -1192,29 +1193,23 @@ out:
/* We can't call ll_fill_super by name because it lives in a module that
* must be loaded after this one.
*/
-void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
- struct vfsmount *mnt))
+void lustre_register_super_ops(struct module *mod,
+ int (*cfs)(struct super_block *sb),
+ void (*ksc)(struct super_block *sb))
{
+ spin_lock(&client_lock);
+ client_mod = mod;
client_fill_super = cfs;
+ kill_super_cb = ksc;
+ spin_unlock(&client_lock);
}
-EXPORT_SYMBOL(lustre_register_client_fill_super);
-
-void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb))
-{
- kill_super_cb = cfs;
-}
-EXPORT_SYMBOL(lustre_register_kill_super_cb);
+EXPORT_SYMBOL(lustre_register_super_ops);
/***************** FS registration ******************/
static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
const char *devname, void *data)
{
- struct lustre_mount_data2 lmd2 = {
- .lmd2_data = data,
- .lmd2_mnt = NULL
- };
-
- return mount_nodev(fs_type, flags, &lmd2, lustre_fill_super);
+ return mount_nodev(fs_type, flags, data, lustre_fill_super);
}
static void lustre_kill_super(struct super_block *sb)
@@ -1230,11 +1225,11 @@ static void lustre_kill_super(struct super_block *sb)
/** Register the "lustre" fs type
*/
static struct file_system_type lustre_fs_type = {
- .owner = THIS_MODULE,
- .name = "lustre",
- .mount = lustre_mount,
- .kill_sb = lustre_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE,
+ .owner = THIS_MODULE,
+ .name = "lustre",
+ .mount = lustre_mount,
+ .kill_sb = lustre_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
};
MODULE_ALIAS_FS("lustre");
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index feda61bcdb9b..32db150fd42e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -168,9 +168,9 @@ struct osc_device {
/* Write stats is actually protected by client_obd's lock. */
struct osc_stats {
- uint64_t os_lockless_writes; /* by bytes */
- uint64_t os_lockless_reads; /* by bytes */
- uint64_t os_lockless_truncates; /* by times */
+ u64 os_lockless_writes; /* by bytes */
+ u64 os_lockless_reads; /* by bytes */
+ u64 os_lockless_truncates; /* by times */
} od_stats;
/* configuration item(s) */
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 53eda4c99142..45b1ebf33363 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -2844,7 +2844,9 @@ static int __init osc_init(void)
if (rc)
goto out_kmem;
- register_shrinker(&osc_cache_shrinker);
+ rc = register_shrinker(&osc_cache_shrinker);
+ if (rc)
+ goto out_type;
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 2a9f2f2ebaa8..bac4b2304bad 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -3067,7 +3067,7 @@ void ptlrpc_init_xid(void)
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
- cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
ptlrpc_last_xid |= (1ULL << 61);
} else {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 77a3721beaee..134ee727e8b7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -396,6 +396,8 @@ static struct shrinker pools_shrinker = {
int sptlrpc_enc_pool_init(void)
{
+ int rc;
+
/*
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
@@ -432,9 +434,11 @@ int sptlrpc_enc_pool_init(void)
if (!page_pools.epp_pools)
return -ENOMEM;
- register_shrinker(&pools_shrinker);
+ rc = register_shrinker(&pools_shrinker);
+ if (rc)
+ enc_pools_free();
- return 0;
+ return rc;
}
void sptlrpc_enc_pool_fini(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index dd7596d8763d..6657ebbe068a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -1255,7 +1255,7 @@ static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&pipe->outq, vma);
}
-static unsigned int atomisp_poll(struct file *file,
+static __poll_t atomisp_poll(struct file *file,
struct poll_table_struct *pt)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 5d3b0e5a1283..4ffff6f8b809 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2174,11 +2174,11 @@ static int bcm2048_fops_release(struct file *file)
return 0;
}
-static unsigned int bcm2048_fops_poll(struct file *file,
+static __poll_t bcm2048_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(file, &bdev->read_queue, pts);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 155e8c758e4b..588743a6fd8a 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -573,7 +573,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll() - It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index bb1d6dafca83..26994b429cf2 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -207,13 +207,9 @@ static irqreturn_t csi_idmac_eof_interrupt(int irq, void *dev_id)
goto unlock;
}
- if (priv->fim) {
- struct timespec cur_ts;
-
- ktime_get_ts(&cur_ts);
+ if (priv->fim)
/* call frame interval monitor */
- imx_media_fim_eof_monitor(priv->fim, &cur_ts);
- }
+ imx_media_fim_eof_monitor(priv->fim, ktime_get());
csi_vb2_buf_done(priv);
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 47275ef803f3..6df189135db8 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -66,7 +66,7 @@ struct imx_media_fim {
int icap_flags;
int counter;
- struct timespec last_ts;
+ ktime_t last_ts;
unsigned long sum; /* usec */
unsigned long nominal; /* usec */
@@ -147,22 +147,26 @@ static void send_fim_event(struct imx_media_fim *fim, unsigned long error)
* (presumably random) interrupt latency.
*/
static void frame_interval_monitor(struct imx_media_fim *fim,
- struct timespec *ts)
+ ktime_t timestamp)
{
- unsigned long interval, error, error_avg;
+ long long interval, error;
+ unsigned long error_avg;
bool send_event = false;
- struct timespec diff;
if (!fim->enabled || ++fim->counter <= 0)
goto out_update_ts;
- diff = timespec_sub(*ts, fim->last_ts);
- interval = diff.tv_sec * 1000 * 1000 + diff.tv_nsec / 1000;
- error = abs(interval - fim->nominal);
+ /* max error is less than l00µs, so use 32-bit division or fail */
+ interval = ktime_to_ns(ktime_sub(timestamp, fim->last_ts));
+ error = abs(interval - NSEC_PER_USEC * (u64)fim->nominal);
+ if (error > U32_MAX)
+ error = U32_MAX;
+ else
+ error = abs((u32)error / NSEC_PER_USEC);
if (fim->tolerance_max && error >= fim->tolerance_max) {
dev_dbg(fim->sd->dev,
- "FIM: %lu ignored, out of tolerance bounds\n",
+ "FIM: %llu ignored, out of tolerance bounds\n",
error);
fim->counter--;
goto out_update_ts;
@@ -184,7 +188,7 @@ static void frame_interval_monitor(struct imx_media_fim *fim,
}
out_update_ts:
- fim->last_ts = *ts;
+ fim->last_ts = timestamp;
if (send_event)
send_fim_event(fim, error_avg);
}
@@ -195,14 +199,14 @@ out_update_ts:
* to interrupt latency.
*/
static void fim_input_capture_handler(int channel, void *dev_id,
- struct timespec *ts)
+ ktime_t timestamp)
{
struct imx_media_fim *fim = dev_id;
unsigned long flags;
spin_lock_irqsave(&fim->lock, flags);
- frame_interval_monitor(fim, ts);
+ frame_interval_monitor(fim, timestamp);
if (!completion_done(&fim->icap_first_event))
complete(&fim->icap_first_event);
@@ -405,14 +409,14 @@ err_free:
* the frame_interval_monitor() is called by the input capture event
* callback handler in that case.
*/
-void imx_media_fim_eof_monitor(struct imx_media_fim *fim, struct timespec *ts)
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
{
unsigned long flags;
spin_lock_irqsave(&fim->lock, flags);
if (!icap_enabled(fim))
- frame_interval_monitor(fim, ts);
+ frame_interval_monitor(fim, timestamp);
spin_unlock_irqrestore(&fim->lock, flags);
}
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index d409170632bd..ac3ab115394f 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -280,7 +280,7 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
/* imx-media-fim.c */
struct imx_media_fim;
-void imx_media_fim_eof_monitor(struct imx_media_fim *fim, struct timespec *ts);
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp);
int imx_media_fim_set_stream(struct imx_media_fim *fim,
const struct v4l2_fract *frame_interval,
bool on);
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 6bd0717bf76e..a003603af725 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1197,12 +1197,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
}
/* copied from lirc_dev */
-static unsigned int poll(struct file *filep, poll_table *wait)
+static __poll_t poll(struct file *filep, poll_table *wait)
{
struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l->buf;
- unsigned int ret;
+ __poll_t ret;
dev_dbg(ir->dev, "%s called\n", __func__);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 9e2f0421a01e..a3a83424a926 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1185,7 +1185,7 @@ static int iss_video_release(struct file *file)
return 0;
}
-static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+static __poll_t iss_video_poll(struct file *file, poll_table *wait)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
new file mode 100644
index 000000000000..d8fa841e3742
--- /dev/null
+++ b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
@@ -0,0 +1,313 @@
+What: /sys/bus/most/devices/.../description
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Provides information about the interface type and the physical
+ location of the device. Hardware attached via USB, for instance,
+ might return <usb_device 1-1.1:1.0>
+Users:
+
+What: /sys/bus/most/devices/.../interface
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the type of peripheral interface the device uses.
+Users:
+
+What: /sys/bus/most/devices/.../dci
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ If the network interface controller is attached via USB, a dci
+ directory is created that allows applications to read and
+ write the controller's DCI registers.
+Users:
+
+What: /sys/bus/most/devices/.../dci/arb_address
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to set an arbitrary DCI register address an
+ application wants to read from or write to.
+Users:
+
+What: /sys/bus/most/devices/.../dci/arb_value
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to read and write the DCI register whose address
+ is stored in arb_address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_eui48_hi
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_eui48_lo
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_eui48_mi
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_filter
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP filter address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_hash0
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_hash1
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_hash2
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/bus/most/devices/.../dci/mep_hash3
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/bus/most/devices/.../dci/ni_state
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current network interface state.
+Users:
+
+What: /sys/bus/most/devices/.../dci/node_address
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node address.
+Users:
+
+What: /sys/bus/most/devices/.../dci/node_position
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node position.
+Users:
+
+What: /sys/bus/most/devices/.../dci/packet_bandwidth
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the configured packet bandwidth.
+Users:
+
+What: /sys/bus/most/devices/.../dci/sync_ep
+Date: June 2016
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Triggers the controller's synchronization process for a certain
+ endpoint.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ For every channel of the device a directory is created, whose
+ name is dictated by the HDM. This enables an application to
+ collect information about the channel's capabilities and
+ configure it.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/available_datatypes
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the data types the current channel can transport.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/available_directions
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the directions the current channel is capable of.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/number_of_packet_buffers
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the number of packet buffers the current channel can
+ handle.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/number_of_stream_buffers
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the number of streaming buffers the current channel can
+ handle.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/size_of_packet_buffer
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the size of a packet buffer the current channel can
+ handle.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/size_of_stream_buffer
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the size of a streaming buffer the current channel can
+ handle.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_number_of_buffers
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the number of buffers of the current channel.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_buffer_size
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the size of a buffer of the current channel.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_direction
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the direction of the current channel.
+ The following strings will be accepted:
+ 'dir_tx',
+ 'dir_rx'
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_datatype
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the data type of the current channel.
+ The following strings will be accepted:
+ 'control',
+ 'async',
+ 'sync',
+ 'isoc_avp'
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_subbuffer_size
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the subbuffer size of the current channel.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/set_packets_per_xact
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is to configure the number of packets per transaction of
+ the current channel. This is only needed network interface
+ controller is attached via USB.
+Users:
+
+What: /sys/bus/most/devices/.../<channel>/channel_starving
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates whether current channel ran out of buffers.
+Users:
+
+What: /sys/bus/most/drivers/mostcore/add_link
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to link a channel to a component of the
+ mostcore. A link created by writing to this file is
+ referred to as pipe.
+Users:
+
+What: /sys/bus/most/drivers/mostcore/remove_link
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to unlink a channel from a component.
+Users:
+
+What: /sys/bus/most/drivers/mostcore/components
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to retrieve a list of registered components.
+Users:
+
+What: /sys/bus/most/drivers/mostcore/links
+Date: March 2017
+KernelVersion: 4.15
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to retrieve a list of established links.
+Users:
diff --git a/drivers/staging/most/Documentation/driver_usage.txt b/drivers/staging/most/Documentation/driver_usage.txt
index a4dc0c348fbc..bb9b4e870199 100644
--- a/drivers/staging/most/Documentation/driver_usage.txt
+++ b/drivers/staging/most/Documentation/driver_usage.txt
@@ -23,20 +23,29 @@ audio/video streaming. Therefore, the driver perfectly fits to the mission
of Automotive Grade Linux to create open source software solutions for
automotive applications.
-The driver consists basically of three layers. The hardware layer, the
-core layer and the application layer. The core layer consists of the core
-module only. This module handles the communication flow through all three
-layers, the configuration of the driver, the configuration interface
-representation in sysfs, and the buffer management.
-For each of the other two layers a selection of modules is provided. These
-modules can arbitrarily be combined to meet the needs of the desired
-system architecture. A module of the hardware layer is referred to as an
-HDM (hardware dependent module). Each module of this layer handles exactly
-one of the peripheral interfaces of a network interface controller (e.g.
-USB, MediaLB, I2C). A module of the application layer is referred to as an
-AIM (application interfacing module). The modules of this layer give access
-to MOST via one the following ways: character devices, ALSA, Networking or
-V4L2.
+The MOST driver uses module stacking to divide the associated modules into
+three layers. From bottom up these layers are: the adapter layer, the core
+layer and the application layer. The core layer implements the MOST
+subsystem and consists basically of the module core.c and its API. It
+registers the MOST bus with the kernel's device model, handles the data
+routing through all three layers, the configuration of the driver, the
+representation of the configuration interface in sysfs and the buffer
+management.
+
+For each of the other two layers a set of modules is provided. Those can be
+arbitrarily combined with the core to meet the connectivity of the desired
+system architecture.
+
+A module of the adapter layer is basically a device driver for a different
+subsystem. It is registered with the core to connect the MOST subsystem to
+the attached network interface controller hardware. Hence, a given module
+of this layer is designed to handle exactly one of the peripheral
+interfaces (e.g. USB, MediaLB, I2C) the hardware provides.
+
+A module of the application layer is referred to as a core comoponent,
+which kind of extends the core by providing connectivity to the user space.
+Applications, then, can access a MOST network via character devices, an
+ALSA soundcard, a Network adapter or a V4L2 capture device.
To physically access MOST, an Intelligent Network Interface Controller
(INIC) is needed. For more information on available controllers visit:
@@ -44,15 +53,14 @@ www.microchip.com
- Section 1.1 Hardware Layer
+ Section 1.1 Adapter Layer
-The hardware layer contains so called hardware dependent modules (HDM). For each
-peripheral interface the hardware supports the driver has a suitable module
-that handles the interface.
-
-The HDMs encapsulate the peripheral interface specific knowledge of the driver
-and provides an easy way of extending the number of supported interfaces.
-Currently the following HDMs are available:
+The adapter layer contains a pool of device drivers. For each peripheral
+interface the hardware supports there is one suitable module that handles
+the interface. Adapter drivers encapsulate the peripheral interface
+specific knowledge of the MOST driver stack and provide an easy way of
+extending the number of supported interfaces. Currently the following
+interfaces are available:
1) MediaLB (DIM2)
Host wants to communicate with hardware via MediaLB.
@@ -63,26 +71,34 @@ Currently the following HDMs are available:
3) USB
Host wants to communicate with the hardware via USB.
+Once an adapter driver recognizes a MOST device being attached, it
+registers it with the core, which, in turn, assigns the necessary members
+of the embedded struct device (e.g. the bus this device belongs to and
+attribute groups) and registers it with the kernel's device model.
- Section 1.2 Core Layer
-
-The core layer contains the mostcore module only, which processes the driver
-configuration via sysfs, buffer management and data forwarding.
+ Section 1.2 Core Layer
+This layer implements the MOST subsystem. It contains the core module and
+the header file most.h that exposes the API of the core. When inserted in
+the kernel, it registers the MOST bus_type with the kernel's device model
+and registers itself as a device driver for this bus. Besides these meta
+tasks the core populates the configuration directory for a registered MOST
+device (represented by struct most_interface) in sysfs and processes the
+configuration of the device's interface. The core layer also handles the
+buffer management and the data/message routing.
- Section 1.2 Application Layer
-The application layer contains so called application interfacing modules (AIM).
-Depending on how the driver should interface to the application, one or more
-suitable modules can be selected.
+ Section 1.3 Application Layer
-The AIMs encapsulate the application interface specific knowledge of the driver
-and provides access to user space or other kernel subsystems.
-Currently the following AIMs are available
+This layer contains a pool of device drivers that are components of the
+core designed to make up the userspace experience of the MOST driver stack.
+Depending on how an application is meant to interface the driver, one or
+more modules of this pool can be registered with the core. Currently the
+following components are available
1) Character Device
- Applications can access the driver by means of character devices.
+ Userspace can access the driver by means of character devices.
2) Networking
Standard networking applications (e.g. iperf) can by used to access
@@ -97,84 +113,86 @@ Currently the following AIMs are available
used to access the driver via the ALSA subsystem.
+ Section 2 Usage of the MOST Driver
- Section 2 Configuration
+ Section 2.1 Configuration
-See ABI/sysfs-class-most.txt
+See ABI/sysfs-bus-most.txt
+ Section 2.2 Routing Channels
- Section 3 USB Padding
+To connect a configured channel to a certain core component and make it
+accessible for user space applications, the driver attribute 'add_link' is
+used. The configuration string passed to it has the following format:
-When transceiving synchronous or isochronous data, the number of packets per USB
-transaction and the sub-buffer size need to be configured. These values
-are needed for the driver to process buffer padding, as expected by hardware,
-which is for performance optimization purposes of the USB transmission.
+ "device_name:channel_name:component_name:link_name[.param]"
-When transmitting synchronous data the allocated channel width needs to be
-written to 'set_subbuffer_size'. Additionally, the number of MOST frames that
-should travel to the host within one USB transaction need to be written to
-'packets_per_xact'.
+It is the concatenation of up to four substrings separated by a colon. The
+substrings contain the names of the MOST interface, the channel, the
+component driver and a custom name with which the link is going to be
+referenced with. Since some components need additional information, the
+link name can be extended with a component-specific parameter (separated by
+a dot). In case the character device component is loaded, the handle would
+also appear as a device node in the /dev directory.
-Internally the synchronous threshold is calculated as follows:
+Cdev component example:
+ $ echo "mdev0:ep_81:cdev:my_rx_channel" >$(DRV_DIR)/add_link
- frame_size = set_subbuffer_size * packets_per_xact
-In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
-allocated within one MOST frame, is calculated that fit into _one_ 512 byte
-USB full packet.
+Sound component example:
- frame_size = floor(MTU_USB / bandwidth_sync) * bandwidth_sync
+The sound component needs an additional parameter to determine the audio
+resolution that is going to be used. The following formats are available:
-This frame_size is the number of synchronous data within an USB transaction,
-which renders MTU_USB - frame_size bytes for padding.
+ - "1x8" (Mono)
+ - "2x16" (16-bit stereo)
+ - "2x24" (24-bit stereo)
+ - "2x32" (32-bit stereo)
+ - "6x16" (16-bit surround 5.1)
-When transmitting isochronous AVP data the desired packet size needs to be
-written to 'set_subbuffer_size' and hardware will always expect two isochronous
-packets within one USB transaction. This renders
+ $ echo "mdev0:ep_81:sound:most51_playback.6x16" >$(DRV_DIR)/add_link
- MTU_USB - (2 * set_subbuffer_size)
-bytes for padding.
-
-Note that at least 2 times set_subbuffer_size bytes for isochronous data or
-set_subbuffer_size times packts_per_xact bytes for synchronous data need to be
-put in the transmission buffer and passed to the driver.
-Since HDMs are allowed to change a chosen configuration to best fit its
-constraints, it is recommended to always double check the configuration and read
-back the previously written files.
+ Section 2.3 USB Padding
+When transceiving synchronous or isochronous data, the number of packets
+per USB transaction and the sub-buffer size need to be configured. These
+values are needed for the driver to process buffer padding, as expected by
+hardware, which is for performance optimization purposes of the USB
+transmission.
+When transmitting synchronous data the allocated channel width needs to be
+written to 'set_subbuffer_size'. Additionally, the number of MOST frames
+that should travel to the host within one USB transaction need to be
+written to 'packets_per_xact'.
- Section 4 Routing Channels
+The driver, then, calculates the synchronous threshold as follows:
-To connect a channel that has been configured as outlined above to an AIM and
-make it accessible to user space applications, the attribute file 'add_link' is
-used. To actually bind a channel to the AIM a string needs to be written to the
-file that complies with the following syntax:
+ frame_size = set_subbuffer_size * packets_per_xact
- "most_device:channel_name:link_name[.param]"
+In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
+allocated within one MOST frame, is calculated that fit into _one_ 512 byte
+USB full packet.
-The example above links the channel "channel_name" of the device "most_device"
-to the AIM. In case the AIM interfaces the VFS this would also create a device
-node "link_name" in the /dev directory. The parameter "param" is an AIM dependent
-string, which can be omitted in case the used AIM does not make any use of it.
+ frame_size = floor(MTU_USB / bandwidth_sync) * bandwidth_sync
-Cdev AIM example:
- $ echo "mdev0:ep_81:my_rx_channel" >add_link
- $ echo "mdev0:ep_81" >add_link
+This frame_size is the number of synchronous data within an USB
+transaction, which renders MTU_USB - frame_size bytes for padding.
+When transmitting isochronous AVP data the desired packet size needs to be
+written to 'set_subbuffer_size' and hardware will always expect two
+isochronous packets within one USB transaction. This renders
-Sound/ALSA AIM example:
+ MTU_USB - (2 * set_subbuffer_size)
-The sound/ALSA AIM needs an additional parameter to determine the audio resolution
-that is going to be used. The following strings can be used:
+bytes for padding.
- - "1x8" (Mono)
- - "2x16" (16-bit stereo)
- - "2x24" (24-bit stereo)
- - "2x32" (32-bit stereo)
+Note that at least (2 * set_subbuffer_size) bytes for isochronous data or
+(set_subbuffer_size * packts_per_xact) bytes for synchronous data need to
+be put in the transmission buffer and passed to the driver.
- $ echo "mdev0:ep_81:audio_rx.2x16" >add_link
- $ echo "mdev0:ep_81" >add_link
+Since adapter drivers are allowed to change a chosen configuration to best
+fit its constraints, it is recommended to always double check the
+configuration and read back the previously written files.
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 0b9b9b539f70..20047abbe560 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,10 +1,15 @@
menuconfig MOST
- tristate "MOST driver"
+ tristate "MOST support"
depends on HAS_DMA
- select MOSTCORE
default n
---help---
- This option allows you to enable support for MOST Network transceivers.
+ Say Y here if you want to enable MOST support.
+ This driver needs at least one additional component to enable the
+ desired access from userspace (e.g. character devices) and one that
+ matches the network controller's hardware interface (e.g. USB).
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_core.
If in doubt, say N here.
@@ -12,20 +17,18 @@ menuconfig MOST
if MOST
-source "drivers/staging/most/mostcore/Kconfig"
-
-source "drivers/staging/most/aim-cdev/Kconfig"
+source "drivers/staging/most/cdev/Kconfig"
-source "drivers/staging/most/aim-network/Kconfig"
+source "drivers/staging/most/net/Kconfig"
-source "drivers/staging/most/aim-sound/Kconfig"
+source "drivers/staging/most/sound/Kconfig"
-source "drivers/staging/most/aim-v4l2/Kconfig"
+source "drivers/staging/most/video/Kconfig"
-source "drivers/staging/most/hdm-dim2/Kconfig"
+source "drivers/staging/most/dim2/Kconfig"
-source "drivers/staging/most/hdm-i2c/Kconfig"
+source "drivers/staging/most/i2c/Kconfig"
-source "drivers/staging/most/hdm-usb/Kconfig"
+source "drivers/staging/most/usb/Kconfig"
endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index f5bbb9deaab5..f8bcf488ecf2 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MOSTCORE) += mostcore/
-obj-$(CONFIG_AIM_CDEV) += aim-cdev/
-obj-$(CONFIG_AIM_NETWORK) += aim-network/
-obj-$(CONFIG_AIM_SOUND) += aim-sound/
-obj-$(CONFIG_AIM_V4L2) += aim-v4l2/
-obj-$(CONFIG_HDM_DIM2) += hdm-dim2/
-obj-$(CONFIG_HDM_I2C) += hdm-i2c/
-obj-$(CONFIG_HDM_USB) += hdm-usb/
+obj-$(CONFIG_MOST) += most_core.o
+most_core-y := core.o
+ccflags-y += -Idrivers/staging/
+
+obj-$(CONFIG_MOST_CDEV) += cdev/
+obj-$(CONFIG_MOST_NET) += net/
+obj-$(CONFIG_MOST_SOUND) += sound/
+obj-$(CONFIG_MOST_VIDEO) += video/
+obj-$(CONFIG_MOST_DIM2) += dim2/
+obj-$(CONFIG_MOST_I2C) += i2c/
+obj-$(CONFIG_MOST_USB) += usb/
diff --git a/drivers/staging/most/aim-cdev/Makefile b/drivers/staging/most/aim-cdev/Makefile
deleted file mode 100644
index 0bcc6c637b75..000000000000
--- a/drivers/staging/most/aim-cdev/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_CDEV) += aim_cdev.o
-
-aim_cdev-objs := cdev.o
-ccflags-y += -Idrivers/staging/most/mostcore/ \ No newline at end of file
diff --git a/drivers/staging/most/aim-network/Makefile b/drivers/staging/most/aim-network/Makefile
deleted file mode 100644
index 840c1dd94873..000000000000
--- a/drivers/staging/most/aim-network/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_NETWORK) += aim_network.o
-
-aim_network-objs := networking.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-sound/Makefile b/drivers/staging/most/aim-sound/Makefile
deleted file mode 100644
index beba9586fd28..000000000000
--- a/drivers/staging/most/aim-sound/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_SOUND) += aim_sound.o
-
-aim_sound-objs := sound.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-v4l2/Makefile b/drivers/staging/most/aim-v4l2/Makefile
deleted file mode 100644
index 69a7524b466c..000000000000
--- a/drivers/staging/most/aim-v4l2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_AIM_V4L2) += aim_v4l2.o
-
-aim_v4l2-objs := video.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-cdev/Kconfig b/drivers/staging/most/cdev/Kconfig
index 3c59f1bac127..2b04e26bcbea 100644
--- a/drivers/staging/most/aim-cdev/Kconfig
+++ b/drivers/staging/most/cdev/Kconfig
@@ -2,11 +2,11 @@
# MOST Cdev configuration
#
-config AIM_CDEV
- tristate "Cdev AIM"
+config MOST_CDEV
+ tristate "Cdev"
---help---
Say Y here if you want to commumicate via character devices.
To compile this driver as a module, choose M here: the
- module will be called aim_cdev. \ No newline at end of file
+ module will be called most_cdev.
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
new file mode 100644
index 000000000000..afb9870eb50f
--- /dev/null
+++ b/drivers/staging/most/cdev/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_CDEV) += most_cdev.o
+
+most_cdev-objs := cdev.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 1e5cbc893496..c183489c4a1c 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * cdev.c - Application interfacing module for character devices
+ * cdev.c - Character device component for Mostcore
*
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -22,15 +16,17 @@
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
-#include "mostcore.h"
+#include "most/core.h"
-static dev_t aim_devno;
-static struct class *aim_class;
-static struct ida minor_id;
-static unsigned int major;
-static struct most_aim cdev_aim;
+static struct cdev_component {
+ dev_t devno;
+ struct ida minor_id;
+ unsigned int major;
+ struct class *class;
+ struct core_component cc;
+} comp;
-struct aim_channel {
+struct comp_channel {
wait_queue_head_t wq;
spinlock_t unlink; /* synchronization lock to unlink channels */
struct cdev cdev;
@@ -46,28 +42,28 @@ struct aim_channel {
struct list_head list;
};
-#define to_channel(d) container_of(d, struct aim_channel, cdev)
+#define to_channel(d) container_of(d, struct comp_channel, cdev)
static struct list_head channel_list;
static spinlock_t ch_list_lock;
-static inline bool ch_has_mbo(struct aim_channel *c)
+static inline bool ch_has_mbo(struct comp_channel *c)
{
- return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+ return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
}
-static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+static inline bool ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
{
if (!kfifo_peek(&c->fifo, mbo)) {
- *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
if (*mbo)
kfifo_in(&c->fifo, mbo, 1);
}
return *mbo;
}
-static struct aim_channel *get_channel(struct most_interface *iface, int id)
+static struct comp_channel *get_channel(struct most_interface *iface, int id)
{
- struct aim_channel *c, *tmp;
+ struct comp_channel *c, *tmp;
unsigned long flags;
int found_channel = 0;
@@ -84,44 +80,44 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id)
return c;
}
-static void stop_channel(struct aim_channel *c)
+static void stop_channel(struct comp_channel *c)
{
struct mbo *mbo;
while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
most_put_mbo(mbo);
- most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+ most_stop_channel(c->iface, c->channel_id, &comp.cc);
}
-static void destroy_cdev(struct aim_channel *c)
+static void destroy_cdev(struct comp_channel *c)
{
unsigned long flags;
- device_destroy(aim_class, c->devno);
+ device_destroy(comp.class, c->devno);
cdev_del(&c->cdev);
spin_lock_irqsave(&ch_list_lock, flags);
list_del(&c->list);
spin_unlock_irqrestore(&ch_list_lock, flags);
}
-static void destroy_channel(struct aim_channel *c)
+static void destroy_channel(struct comp_channel *c)
{
- ida_simple_remove(&minor_id, MINOR(c->devno));
+ ida_simple_remove(&comp.minor_id, MINOR(c->devno));
kfifo_free(&c->fifo);
kfree(c);
}
/**
- * aim_open - implements the syscall to open the device
+ * comp_open - implements the syscall to open the device
* @inode: inode pointer
* @filp: file pointer
*
* This stores the channel pointer in the private data field of
* the file structure and activates the channel within the core.
*/
-static int aim_open(struct inode *inode, struct file *filp)
+static int comp_open(struct inode *inode, struct file *filp)
{
- struct aim_channel *c;
+ struct comp_channel *c;
int ret;
c = to_channel(inode->i_cdev);
@@ -149,7 +145,7 @@ static int aim_open(struct inode *inode, struct file *filp)
}
c->mbo_offs = 0;
- ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+ ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
if (!ret)
c->access_ref = 1;
mutex_unlock(&c->io_mutex);
@@ -157,15 +153,15 @@ static int aim_open(struct inode *inode, struct file *filp)
}
/**
- * aim_close - implements the syscall to close the device
+ * comp_close - implements the syscall to close the device
* @inode: inode pointer
* @filp: file pointer
*
* This stops the channel within the core.
*/
-static int aim_close(struct inode *inode, struct file *filp)
+static int comp_close(struct inode *inode, struct file *filp)
{
- struct aim_channel *c = to_channel(inode->i_cdev);
+ struct comp_channel *c = to_channel(inode->i_cdev);
mutex_lock(&c->io_mutex);
spin_lock(&c->unlink);
@@ -182,19 +178,19 @@ static int aim_close(struct inode *inode, struct file *filp)
}
/**
- * aim_write - implements the syscall to write to the device
+ * comp_write - implements the syscall to write to the device
* @filp: file pointer
* @buf: pointer to user buffer
* @count: number of bytes to write
* @offset: offset from where to start writing
*/
-static ssize_t aim_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
+static ssize_t comp_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
{
int ret;
size_t to_copy, left;
struct mbo *mbo = NULL;
- struct aim_channel *c = filp->private_data;
+ struct comp_channel *c = filp->private_data;
mutex_lock(&c->io_mutex);
while (c->dev && !ch_get_mbo(c, &mbo)) {
@@ -236,18 +232,18 @@ unlock:
}
/**
- * aim_read - implements the syscall to read from the device
+ * comp_read - implements the syscall to read from the device
* @filp: file pointer
* @buf: pointer to user buffer
* @count: number of bytes to read
* @offset: offset from where to start reading
*/
static ssize_t
-aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
size_t to_copy, not_copied, copied;
struct mbo *mbo;
- struct aim_channel *c = filp->private_data;
+ struct comp_channel *c = filp->private_data;
mutex_lock(&c->io_mutex);
while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
@@ -287,10 +283,10 @@ aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
return copied;
}
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
+static __poll_t comp_poll(struct file *filp, poll_table *wait)
{
- struct aim_channel *c = filp->private_data;
- unsigned int mask = 0;
+ struct comp_channel *c = filp->private_data;
+ __poll_t mask = 0;
poll_wait(filp, &c->wq, wait);
@@ -309,24 +305,24 @@ static unsigned int aim_poll(struct file *filp, poll_table *wait)
*/
static const struct file_operations channel_fops = {
.owner = THIS_MODULE,
- .read = aim_read,
- .write = aim_write,
- .open = aim_open,
- .release = aim_close,
- .poll = aim_poll,
+ .read = comp_read,
+ .write = comp_write,
+ .open = comp_open,
+ .release = comp_close,
+ .poll = comp_poll,
};
/**
- * aim_disconnect_channel - disconnect a channel
+ * comp_disconnect_channel - disconnect a channel
* @iface: pointer to interface instance
* @channel_id: channel index
*
* This frees allocated memory and removes the cdev that represents this
* channel in user space.
*/
-static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
+static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
{
- struct aim_channel *c;
+ struct comp_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
@@ -354,15 +350,15 @@ static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
}
/**
- * aim_rx_completion - completion handler for rx channels
+ * comp_rx_completion - completion handler for rx channels
* @mbo: pointer to buffer object that has completed
*
* This searches for the channel linked to this MBO and stores it in the local
* fifo buffer.
*/
-static int aim_rx_completion(struct mbo *mbo)
+static int comp_rx_completion(struct mbo *mbo)
{
- struct aim_channel *c;
+ struct comp_channel *c;
if (!mbo)
return -EINVAL;
@@ -387,15 +383,15 @@ static int aim_rx_completion(struct mbo *mbo)
}
/**
- * aim_tx_completion - completion handler for tx channels
+ * comp_tx_completion - completion handler for tx channels
* @iface: pointer to interface instance
* @channel_id: channel index/ID
*
* This wakes sleeping processes in the wait-queue.
*/
-static int aim_tx_completion(struct most_interface *iface, int channel_id)
+static int comp_tx_completion(struct most_interface *iface, int channel_id)
{
- struct aim_channel *c;
+ struct comp_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
@@ -414,35 +410,33 @@ static int aim_tx_completion(struct most_interface *iface, int channel_id)
}
/**
- * aim_probe - probe function of the driver module
+ * comp_probe - probe function of the driver module
* @iface: pointer to interface instance
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
* @name: name of the device to be created
*
* This allocates achannel object and creates the device node in /dev
*
* Returns 0 on success or error code otherwise.
*/
-static int aim_probe(struct most_interface *iface, int channel_id,
- struct most_channel_config *cfg,
- struct kobject *parent, char *name)
+static int comp_probe(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg, char *name)
{
- struct aim_channel *c;
+ struct comp_channel *c;
unsigned long cl_flags;
int retval;
int current_minor;
- if ((!iface) || (!cfg) || (!parent) || (!name)) {
- pr_info("Probing AIM with bad arguments");
+ if ((!iface) || (!cfg) || (!name)) {
+ pr_info("Probing component with bad arguments");
return -EINVAL;
}
c = get_channel(iface, channel_id);
if (c)
return -EEXIST;
- current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
+ current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
@@ -452,7 +446,7 @@ static int aim_probe(struct most_interface *iface, int channel_id,
goto error_alloc_channel;
}
- c->devno = MKDEV(major, current_minor);
+ c->devno = MKDEV(comp.major, current_minor);
cdev_init(&c->cdev, &channel_fops);
c->cdev.owner = THIS_MODULE;
cdev_add(&c->cdev, c->devno, 1);
@@ -472,11 +466,7 @@ static int aim_probe(struct most_interface *iface, int channel_id,
spin_lock_irqsave(&ch_list_lock, cl_flags);
list_add_tail(&c->list, &channel_list);
spin_unlock_irqrestore(&ch_list_lock, cl_flags);
- c->dev = device_create(aim_class,
- NULL,
- c->devno,
- NULL,
- "%s", name);
+ c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
if (IS_ERR(c->dev)) {
retval = PTR_ERR(c->dev);
@@ -493,16 +483,18 @@ error_alloc_kfifo:
cdev_del(&c->cdev);
kfree(c);
error_alloc_channel:
- ida_simple_remove(&minor_id, current_minor);
+ ida_simple_remove(&comp.minor_id, current_minor);
return retval;
}
-static struct most_aim cdev_aim = {
- .name = "cdev",
- .probe_channel = aim_probe,
- .disconnect_channel = aim_disconnect_channel,
- .rx_completion = aim_rx_completion,
- .tx_completion = aim_tx_completion,
+static struct cdev_component comp = {
+ .cc = {
+ .name = "cdev",
+ .probe_channel = comp_probe,
+ .disconnect_channel = comp_disconnect_channel,
+ .rx_completion = comp_rx_completion,
+ .tx_completion = comp_tx_completion,
+ },
};
static int __init mod_init(void)
@@ -511,54 +503,52 @@ static int __init mod_init(void)
pr_info("init()\n");
+ comp.class = class_create(THIS_MODULE, "most_cdev");
+ if (IS_ERR(comp.class)) {
+ pr_info("No udev support.\n");
+ return PTR_ERR(comp.class);
+ }
+
INIT_LIST_HEAD(&channel_list);
spin_lock_init(&ch_list_lock);
- ida_init(&minor_id);
+ ida_init(&comp.minor_id);
- err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
+ err = alloc_chrdev_region(&comp.devno, 0, 50, "cdev");
if (err < 0)
goto dest_ida;
- major = MAJOR(aim_devno);
-
- aim_class = class_create(THIS_MODULE, "most_cdev_aim");
- if (IS_ERR(aim_class)) {
- pr_err("no udev support\n");
- err = PTR_ERR(aim_class);
- goto free_cdev;
- }
- err = most_register_aim(&cdev_aim);
+ comp.major = MAJOR(comp.devno);
+ err = most_register_component(&comp.cc);
if (err)
- goto dest_class;
+ goto free_cdev;
return 0;
-dest_class:
- class_destroy(aim_class);
free_cdev:
- unregister_chrdev_region(aim_devno, 1);
+ unregister_chrdev_region(comp.devno, 1);
dest_ida:
- ida_destroy(&minor_id);
+ ida_destroy(&comp.minor_id);
+ class_destroy(comp.class);
return err;
}
static void __exit mod_exit(void)
{
- struct aim_channel *c, *tmp;
+ struct comp_channel *c, *tmp;
pr_info("exit module\n");
- most_deregister_aim(&cdev_aim);
+ most_deregister_component(&comp.cc);
list_for_each_entry_safe(c, tmp, &channel_list, list) {
destroy_cdev(c);
destroy_channel(c);
}
- class_destroy(aim_class);
- unregister_chrdev_region(aim_devno, 1);
- ida_destroy(&minor_id);
+ unregister_chrdev_region(comp.devno, 1);
+ ida_destroy(&comp.minor_id);
+ class_destroy(comp.class);
}
module_init(mod_init);
module_exit(mod_exit);
MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("character device AIM for mostcore");
+MODULE_DESCRIPTION("character device component for mostcore");
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
new file mode 100644
index 000000000000..3dda8d81bf0b
--- /dev/null
+++ b/drivers/staging/most/core.c
@@ -0,0 +1,1603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * core.c - Implementation of core module of MOST Linux driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <most/core.h>
+
+#define MAX_CHANNELS 64
+#define STRING_SIZE 80
+
+static struct ida mdev_id;
+static int dummy_num_buffers;
+
+static struct mostcore {
+ struct device dev;
+ struct device_driver drv;
+ struct bus_type bus;
+ struct list_head comp_list;
+} mc;
+
+#define to_driver(d) container_of(d, struct mostcore, drv)
+
+struct pipe {
+ struct core_component *comp;
+ int refs;
+ int num_buffers;
+};
+
+struct most_channel {
+ struct device dev;
+ struct completion cleanup;
+ atomic_t mbo_ref;
+ atomic_t mbo_nq_level;
+ u16 channel_id;
+ char name[STRING_SIZE];
+ bool is_poisoned;
+ struct mutex start_mutex;
+ struct mutex nq_mutex; /* nq thread synchronization */
+ int is_starving;
+ struct most_interface *iface;
+ struct most_channel_config cfg;
+ bool keep_mbo;
+ bool enqueue_halt;
+ struct list_head fifo;
+ spinlock_t fifo_lock;
+ struct list_head halt_fifo;
+ struct list_head list;
+ struct pipe pipe0;
+ struct pipe pipe1;
+ struct list_head trash_fifo;
+ struct task_struct *hdm_enqueue_task;
+ wait_queue_head_t hdm_fifo_wq;
+
+};
+
+#define to_channel(d) container_of(d, struct most_channel, dev)
+
+struct interface_private {
+ int dev_id;
+ char name[STRING_SIZE];
+ struct most_channel *channel[MAX_CHANNELS];
+ struct list_head channel_list;
+};
+
+static const struct {
+ int most_ch_data_type;
+ const char *name;
+} ch_data_type[] = {
+ { MOST_CH_CONTROL, "control\n" },
+ { MOST_CH_ASYNC, "async\n" },
+ { MOST_CH_SYNC, "sync\n" },
+ { MOST_CH_ISOC, "isoc\n"},
+ { MOST_CH_ISOC, "isoc_avp\n"},
+};
+
+/**
+ * list_pop_mbo - retrieves the first MBO of the list and removes it
+ * @ptr: the list head to grab the MBO from.
+ */
+#define list_pop_mbo(ptr) \
+({ \
+ struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
+ list_del(&_mbo->list); \
+ _mbo; \
+})
+
+/**
+ * most_free_mbo_coherent - free an MBO and its coherent buffer
+ * @mbo: most buffer
+ */
+static void most_free_mbo_coherent(struct mbo *mbo)
+{
+ struct most_channel *c = mbo->context;
+ u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
+ mbo->bus_address);
+ kfree(mbo);
+ if (atomic_sub_and_test(1, &c->mbo_ref))
+ complete(&c->cleanup);
+}
+
+/**
+ * flush_channel_fifos - clear the channel fifos
+ * @c: pointer to channel object
+ */
+static void flush_channel_fifos(struct most_channel *c)
+{
+ unsigned long flags, hf_flags;
+ struct mbo *mbo, *tmp;
+
+ if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
+ return;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, hf_flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+
+ if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
+ pr_info("WARN: fifo | trash fifo not empty\n");
+}
+
+/**
+ * flush_trash_fifo - clear the trash fifo
+ * @c: pointer to channel object
+ */
+static int flush_trash_fifo(struct most_channel *c)
+{
+ struct mbo *mbo, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
+ list_del(&mbo->list);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ most_free_mbo_coherent(mbo);
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return 0;
+}
+
+static ssize_t available_directions_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].direction & MOST_CH_RX)
+ strcat(buf, "rx ");
+ if (c->iface->channel_vector[i].direction & MOST_CH_TX)
+ strcat(buf, "tx ");
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t available_datatypes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ strcpy(buf, "");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
+ strcat(buf, "control ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
+ strcat(buf, "async ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
+ strcat(buf, "sync ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
+ strcat(buf, "isoc ");
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t number_of_packet_buffers_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_packet);
+}
+
+static ssize_t number_of_stream_buffers_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].num_buffers_streaming);
+}
+
+static ssize_t size_of_packet_buffer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_packet);
+}
+
+static ssize_t size_of_stream_buffer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+ unsigned int i = c->channel_id;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ c->iface->channel_vector[i].buffer_size_streaming);
+}
+
+static ssize_t channel_starving_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
+}
+
+static ssize_t set_number_of_buffers_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
+}
+
+static ssize_t set_number_of_buffers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+ int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t set_buffer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
+}
+
+static ssize_t set_buffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+ int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t set_direction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ if (c->cfg.direction & MOST_CH_TX)
+ return snprintf(buf, PAGE_SIZE, "tx\n");
+ else if (c->cfg.direction & MOST_CH_RX)
+ return snprintf(buf, PAGE_SIZE, "rx\n");
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t set_direction_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+
+ if (!strcmp(buf, "dir_rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "dir_tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
+ } else if (!strcmp(buf, "tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
+ } else {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t set_datatype_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ struct most_channel *c = to_channel(dev);
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+ return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+ }
+ return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t set_datatype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int i;
+ struct most_channel *c = to_channel(dev);
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (!strcmp(buf, ch_data_type[i].name)) {
+ c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ch_data_type)) {
+ pr_info("WARN: invalid attribute settings\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t set_subbuffer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
+}
+
+static ssize_t set_subbuffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+ int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t set_packets_per_xact_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_channel *c = to_channel(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
+}
+
+static ssize_t set_packets_per_xact_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct most_channel *c = to_channel(dev);
+ int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
+
+ if (ret)
+ return ret;
+ return count;
+}
+
+#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
+
+static DEVICE_ATTR_RO(available_directions);
+static DEVICE_ATTR_RO(available_datatypes);
+static DEVICE_ATTR_RO(number_of_packet_buffers);
+static DEVICE_ATTR_RO(number_of_stream_buffers);
+static DEVICE_ATTR_RO(size_of_stream_buffer);
+static DEVICE_ATTR_RO(size_of_packet_buffer);
+static DEVICE_ATTR_RO(channel_starving);
+static DEVICE_ATTR_RW(set_buffer_size);
+static DEVICE_ATTR_RW(set_number_of_buffers);
+static DEVICE_ATTR_RW(set_direction);
+static DEVICE_ATTR_RW(set_datatype);
+static DEVICE_ATTR_RW(set_subbuffer_size);
+static DEVICE_ATTR_RW(set_packets_per_xact);
+
+static struct attribute *channel_attrs[] = {
+ DEV_ATTR(available_directions),
+ DEV_ATTR(available_datatypes),
+ DEV_ATTR(number_of_packet_buffers),
+ DEV_ATTR(number_of_stream_buffers),
+ DEV_ATTR(size_of_stream_buffer),
+ DEV_ATTR(size_of_packet_buffer),
+ DEV_ATTR(channel_starving),
+ DEV_ATTR(set_buffer_size),
+ DEV_ATTR(set_number_of_buffers),
+ DEV_ATTR(set_direction),
+ DEV_ATTR(set_datatype),
+ DEV_ATTR(set_subbuffer_size),
+ DEV_ATTR(set_packets_per_xact),
+ NULL,
+};
+
+static struct attribute_group channel_attr_group = {
+ .attrs = channel_attrs,
+};
+
+static const struct attribute_group *channel_attr_groups[] = {
+ &channel_attr_group,
+ NULL,
+};
+
+static ssize_t description_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_interface *iface = to_most_interface(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
+}
+
+static ssize_t interface_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct most_interface *iface = to_most_interface(dev);
+
+ switch (iface->interface) {
+ case ITYPE_LOOPBACK:
+ return snprintf(buf, PAGE_SIZE, "loopback\n");
+ case ITYPE_I2C:
+ return snprintf(buf, PAGE_SIZE, "i2c\n");
+ case ITYPE_I2S:
+ return snprintf(buf, PAGE_SIZE, "i2s\n");
+ case ITYPE_TSI:
+ return snprintf(buf, PAGE_SIZE, "tsi\n");
+ case ITYPE_HBI:
+ return snprintf(buf, PAGE_SIZE, "hbi\n");
+ case ITYPE_MEDIALB_DIM:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
+ case ITYPE_MEDIALB_DIM2:
+ return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
+ case ITYPE_USB:
+ return snprintf(buf, PAGE_SIZE, "usb\n");
+ case ITYPE_PCIE:
+ return snprintf(buf, PAGE_SIZE, "pcie\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+static DEVICE_ATTR_RO(description);
+static DEVICE_ATTR_RO(interface);
+
+static struct attribute *interface_attrs[] = {
+ DEV_ATTR(description),
+ DEV_ATTR(interface),
+ NULL,
+};
+
+static struct attribute_group interface_attr_group = {
+ .attrs = interface_attrs,
+};
+
+static const struct attribute_group *interface_attr_groups[] = {
+ &interface_attr_group,
+ NULL,
+};
+
+static struct core_component *match_component(char *name)
+{
+ struct core_component *comp;
+
+ list_for_each_entry(comp, &mc.comp_list, list) {
+ if (!strcmp(comp->name, name))
+ return comp;
+ }
+ return NULL;
+}
+
+struct show_links_data {
+ int offs;
+ char *buf;
+};
+
+static int print_links(struct device *dev, void *data)
+{
+ struct show_links_data *d = data;
+ int offs = d->offs;
+ char *buf = d->buf;
+ struct most_channel *c;
+ struct most_interface *iface = to_most_interface(dev);
+
+ list_for_each_entry(c, &iface->p->channel_list, list) {
+ if (c->pipe0.comp) {
+ offs += snprintf(buf + offs,
+ PAGE_SIZE - offs,
+ "%s:%s:%s\n",
+ c->pipe0.comp->name,
+ dev_name(&iface->dev),
+ dev_name(&c->dev));
+ }
+ if (c->pipe1.comp) {
+ offs += snprintf(buf + offs,
+ PAGE_SIZE - offs,
+ "%s:%s:%s\n",
+ c->pipe1.comp->name,
+ dev_name(&iface->dev),
+ dev_name(&c->dev));
+ }
+ }
+ d->offs = offs;
+ return 0;
+}
+
+static ssize_t links_show(struct device_driver *drv, char *buf)
+{
+ struct show_links_data d = { .buf = buf };
+
+ bus_for_each_dev(&mc.bus, NULL, &d, print_links);
+ return d.offs;
+}
+
+static ssize_t components_show(struct device_driver *drv, char *buf)
+{
+ struct core_component *comp;
+ int offs = 0;
+
+ list_for_each_entry(comp, &mc.comp_list, list) {
+ offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
+ comp->name);
+ }
+ return offs;
+}
+/**
+ * split_string - parses buf and extracts ':' separated substrings.
+ *
+ * @buf: complete string from attribute 'add_channel'
+ * @a: storage for 1st substring (=interface name)
+ * @b: storage for 2nd substring (=channel name)
+ * @c: storage for 3rd substring (=component name)
+ * @d: storage optional 4th substring (=user defined name)
+ *
+ * Examples:
+ *
+ * Input: "mdev0:ch6:cdev:my_channel\n" or
+ * "mdev0:ch6:cdev:my_channel"
+ *
+ * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
+ *
+ * Input: "mdev1:ep81:cdev\n"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
+ *
+ * Input: "mdev1:ep81"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
+ */
+static int split_string(char *buf, char **a, char **b, char **c, char **d)
+{
+ *a = strsep(&buf, ":");
+ if (!*a)
+ return -EIO;
+
+ *b = strsep(&buf, ":\n");
+ if (!*b)
+ return -EIO;
+
+ *c = strsep(&buf, ":\n");
+ if (!*c)
+ return -EIO;
+
+ if (d)
+ *d = strsep(&buf, ":\n");
+
+ return 0;
+}
+
+static int match_bus_dev(struct device *dev, void *data)
+{
+ char *mdev_name = data;
+
+ return !strcmp(dev_name(dev), mdev_name);
+}
+
+/**
+ * get_channel - get pointer to channel
+ * @mdev: name of the device interface
+ * @mdev_ch: name of channel
+ */
+static struct most_channel *get_channel(char *mdev, char *mdev_ch)
+{
+ struct device *dev = NULL;
+ struct most_interface *iface;
+ struct most_channel *c, *tmp;
+
+ dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
+ if (!dev)
+ return NULL;
+ iface = to_most_interface(dev);
+ list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
+ if (!strcmp(dev_name(&c->dev), mdev_ch))
+ return c;
+ }
+ return NULL;
+}
+
+static
+inline int link_channel_to_component(struct most_channel *c,
+ struct core_component *comp,
+ char *comp_param)
+{
+ int ret;
+ struct core_component **comp_ptr;
+
+ if (!c->pipe0.comp)
+ comp_ptr = &c->pipe0.comp;
+ else if (!c->pipe1.comp)
+ comp_ptr = &c->pipe1.comp;
+ else
+ return -ENOSPC;
+
+ *comp_ptr = comp;
+ ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
+ if (ret) {
+ *comp_ptr = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * add_link_store - store function for add_link attribute
+ * @drv: device driver
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * This parses the string given by buf and splits it into
+ * four substrings. Note: last substring is optional. In case a cdev
+ * component is loaded the optional 4th substring will make up the name of
+ * device node in the /dev directory. If omitted, the device node will
+ * inherit the channel's name within sysfs.
+ *
+ * Searches for (device, channel) pair and probes the component
+ *
+ * Example:
+ * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
+ * (2) echo "mdev1:ep81:cdev" >add_link
+ *
+ * (1) would create the device node /dev/my_rxchannel
+ * (2) would create the device node /dev/mdev1-ep81
+ */
+static ssize_t add_link_store(struct device_driver *drv,
+ const char *buf,
+ size_t len)
+{
+ struct most_channel *c;
+ struct core_component *comp;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ char *comp_name;
+ char *comp_param;
+ char devnod_buf[STRING_SIZE];
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
+ if (ret)
+ return ret;
+ comp = match_component(comp_name);
+ if (!comp)
+ return -ENODEV;
+ if (!comp_param || *comp_param == 0) {
+ snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
+ mdev_ch);
+ comp_param = devnod_buf;
+ }
+
+ c = get_channel(mdev, mdev_ch);
+ if (!c)
+ return -ENODEV;
+
+ ret = link_channel_to_component(c, comp, comp_param);
+ if (ret)
+ return ret;
+ return len;
+}
+
+/**
+ * remove_link_store - store function for remove_link attribute
+ * @drv: device driver
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * Example:
+ * echo "mdev0:ep81" >remove_link
+ */
+static ssize_t remove_link_store(struct device_driver *drv,
+ const char *buf,
+ size_t len)
+{
+ struct most_channel *c;
+ struct core_component *comp;
+ char buffer[STRING_SIZE];
+ char *mdev;
+ char *mdev_ch;
+ char *comp_name;
+ int ret;
+ size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+ strlcpy(buffer, buf, max_len);
+ ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
+ if (ret)
+ return ret;
+ comp = match_component(comp_name);
+ if (!comp)
+ return -ENODEV;
+ c = get_channel(mdev, mdev_ch);
+ if (!c)
+ return -ENODEV;
+
+ if (comp->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
+ if (c->pipe0.comp == comp)
+ c->pipe0.comp = NULL;
+ if (c->pipe1.comp == comp)
+ c->pipe1.comp = NULL;
+ return len;
+}
+
+#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
+
+static DRIVER_ATTR_RO(links);
+static DRIVER_ATTR_RO(components);
+static DRIVER_ATTR_WO(add_link);
+static DRIVER_ATTR_WO(remove_link);
+
+static struct attribute *mc_attrs[] = {
+ DRV_ATTR(links),
+ DRV_ATTR(components),
+ DRV_ATTR(add_link),
+ DRV_ATTR(remove_link),
+ NULL,
+};
+
+static struct attribute_group mc_attr_group = {
+ .attrs = mc_attrs,
+};
+
+static const struct attribute_group *mc_attr_groups[] = {
+ &mc_attr_group,
+ NULL,
+};
+
+static int most_match(struct device *dev, struct device_driver *drv)
+{
+ if (!strcmp(dev_name(dev), "most"))
+ return 0;
+ else
+ return 1;
+}
+
+static inline void trash_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_channel *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add(&mbo->list, &c->trash_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+}
+
+static bool hdm_mbo_ready(struct most_channel *c)
+{
+ bool empty;
+
+ if (c->enqueue_halt)
+ return false;
+
+ spin_lock_irq(&c->fifo_lock);
+ empty = list_empty(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
+
+ return !empty;
+}
+
+static void nq_hdm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_channel *c = mbo->context;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ list_add_tail(&mbo->list, &c->halt_fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+
+static int hdm_enqueue_thread(void *data)
+{
+ struct most_channel *c = data;
+ struct mbo *mbo;
+ int ret;
+ typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
+
+ while (likely(!kthread_should_stop())) {
+ wait_event_interruptible(c->hdm_fifo_wq,
+ hdm_mbo_ready(c) ||
+ kthread_should_stop());
+
+ mutex_lock(&c->nq_mutex);
+ spin_lock_irq(&c->fifo_lock);
+ if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
+ spin_unlock_irq(&c->fifo_lock);
+ mutex_unlock(&c->nq_mutex);
+ continue;
+ }
+
+ mbo = list_pop_mbo(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
+
+ if (c->cfg.direction == MOST_CH_RX)
+ mbo->buffer_length = c->cfg.buffer_size;
+
+ ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
+ mutex_unlock(&c->nq_mutex);
+
+ if (unlikely(ret)) {
+ pr_err("hdm enqueue failed\n");
+ nq_hdm_mbo(mbo);
+ c->hdm_enqueue_task = NULL;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int run_enqueue_thread(struct most_channel *c, int channel_id)
+{
+ struct task_struct *task =
+ kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
+ channel_id);
+
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ c->hdm_enqueue_task = task;
+ return 0;
+}
+
+/**
+ * arm_mbo - recycle MBO for further usage
+ * @mbo: most buffer
+ *
+ * This puts an MBO back to the list to have it ready for up coming
+ * tx transactions.
+ *
+ * In case the MBO belongs to a channel that recently has been
+ * poisoned, the MBO is scheduled to be trashed.
+ * Calls the completion handler of an attached component.
+ */
+static void arm_mbo(struct mbo *mbo)
+{
+ unsigned long flags;
+ struct most_channel *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+ c = mbo->context;
+
+ if (c->is_poisoned) {
+ trash_mbo(mbo);
+ return;
+ }
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ ++*mbo->num_buffers_ptr;
+ list_add_tail(&mbo->list, &c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ if (c->pipe0.refs && c->pipe0.comp->tx_completion)
+ c->pipe0.comp->tx_completion(c->iface, c->channel_id);
+
+ if (c->pipe1.refs && c->pipe1.comp->tx_completion)
+ c->pipe1.comp->tx_completion(c->iface, c->channel_id);
+}
+
+/**
+ * arm_mbo_chain - helper function that arms an MBO chain for the HDM
+ * @c: pointer to interface channel
+ * @dir: direction of the channel
+ * @compl: pointer to completion function
+ *
+ * This allocates buffer objects including the containing DMA coherent
+ * buffer and puts them in the fifo.
+ * Buffers of Rx channels are put in the kthread fifo, hence immediately
+ * submitted to the HDM.
+ *
+ * Returns the number of allocated and enqueued MBOs.
+ */
+static int arm_mbo_chain(struct most_channel *c, int dir,
+ void (*compl)(struct mbo *))
+{
+ unsigned int i;
+ int retval;
+ struct mbo *mbo;
+ u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+ atomic_set(&c->mbo_nq_level, 0);
+
+ for (i = 0; i < c->cfg.num_buffers; i++) {
+ mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
+ if (!mbo) {
+ retval = i;
+ goto _exit;
+ }
+ mbo->context = c;
+ mbo->ifp = c->iface;
+ mbo->hdm_channel_id = c->channel_id;
+ mbo->virt_address = dma_alloc_coherent(NULL,
+ coherent_buf_size,
+ &mbo->bus_address,
+ GFP_KERNEL);
+ if (!mbo->virt_address) {
+ pr_info("WARN: No DMA coherent buffer.\n");
+ retval = i;
+ goto _error1;
+ }
+ mbo->complete = compl;
+ mbo->num_buffers_ptr = &dummy_num_buffers;
+ if (dir == MOST_CH_RX) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ } else {
+ arm_mbo(mbo);
+ }
+ }
+ return i;
+
+_error1:
+ kfree(mbo);
+_exit:
+ return retval;
+}
+
+/**
+ * most_submit_mbo - submits an MBO to fifo
+ * @mbo: most buffer
+ */
+void most_submit_mbo(struct mbo *mbo)
+{
+ if (WARN_ONCE(!mbo || !mbo->context,
+ "bad mbo or missing channel reference\n"))
+ return;
+
+ nq_hdm_mbo(mbo);
+}
+EXPORT_SYMBOL_GPL(most_submit_mbo);
+
+/**
+ * most_write_completion - write completion handler
+ * @mbo: most buffer
+ *
+ * This recycles the MBO for further usage. In case the channel has been
+ * poisoned, the MBO is scheduled to be trashed.
+ */
+static void most_write_completion(struct mbo *mbo)
+{
+ struct most_channel *c;
+
+ BUG_ON((!mbo) || (!mbo->context));
+
+ c = mbo->context;
+ if (mbo->status == MBO_E_INVAL)
+ pr_info("WARN: Tx MBO status: invalid\n");
+ if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
+ trash_mbo(mbo);
+ else
+ arm_mbo(mbo);
+}
+
+int channel_has_mbo(struct most_interface *iface, int id,
+ struct core_component *comp)
+{
+ struct most_channel *c = iface->p->channel[id];
+ unsigned long flags;
+ int empty;
+
+ if (unlikely(!c))
+ return -EINVAL;
+
+ if (c->pipe0.refs && c->pipe1.refs &&
+ ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
+ (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
+ return 0;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ empty = list_empty(&c->fifo);
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return !empty;
+}
+EXPORT_SYMBOL_GPL(channel_has_mbo);
+
+/**
+ * most_get_mbo - get pointer to an MBO of pool
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ * @comp: driver component
+ *
+ * This attempts to get a free buffer out of the channel fifo.
+ * Returns a pointer to MBO on success or NULL otherwise.
+ */
+struct mbo *most_get_mbo(struct most_interface *iface, int id,
+ struct core_component *comp)
+{
+ struct mbo *mbo;
+ struct most_channel *c;
+ unsigned long flags;
+ int *num_buffers_ptr;
+
+ c = iface->p->channel[id];
+ if (unlikely(!c))
+ return NULL;
+
+ if (c->pipe0.refs && c->pipe1.refs &&
+ ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
+ (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
+ return NULL;
+
+ if (comp == c->pipe0.comp)
+ num_buffers_ptr = &c->pipe0.num_buffers;
+ else if (comp == c->pipe1.comp)
+ num_buffers_ptr = &c->pipe1.num_buffers;
+ else
+ num_buffers_ptr = &dummy_num_buffers;
+
+ spin_lock_irqsave(&c->fifo_lock, flags);
+ if (list_empty(&c->fifo)) {
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+ return NULL;
+ }
+ mbo = list_pop_mbo(&c->fifo);
+ --*num_buffers_ptr;
+ spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ mbo->num_buffers_ptr = num_buffers_ptr;
+ mbo->buffer_length = c->cfg.buffer_size;
+ return mbo;
+}
+EXPORT_SYMBOL_GPL(most_get_mbo);
+
+/**
+ * most_put_mbo - return buffer to pool
+ * @mbo: most buffer
+ */
+void most_put_mbo(struct mbo *mbo)
+{
+ struct most_channel *c = mbo->context;
+
+ if (c->cfg.direction == MOST_CH_TX) {
+ arm_mbo(mbo);
+ return;
+ }
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+}
+EXPORT_SYMBOL_GPL(most_put_mbo);
+
+/**
+ * most_read_completion - read completion handler
+ * @mbo: most buffer
+ *
+ * This function is called by the HDM when data has been received from the
+ * hardware and copied to the buffer of the MBO.
+ *
+ * In case the channel has been poisoned it puts the buffer in the trash queue.
+ * Otherwise, it passes the buffer to an component for further processing.
+ */
+static void most_read_completion(struct mbo *mbo)
+{
+ struct most_channel *c = mbo->context;
+
+ if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
+ trash_mbo(mbo);
+ return;
+ }
+
+ if (mbo->status == MBO_E_INVAL) {
+ nq_hdm_mbo(mbo);
+ atomic_inc(&c->mbo_nq_level);
+ return;
+ }
+
+ if (atomic_sub_and_test(1, &c->mbo_nq_level))
+ c->is_starving = 1;
+
+ if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
+ c->pipe0.comp->rx_completion(mbo) == 0)
+ return;
+
+ if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
+ c->pipe1.comp->rx_completion(mbo) == 0)
+ return;
+
+ most_put_mbo(mbo);
+}
+
+/**
+ * most_start_channel - prepares a channel for communication
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ * @comp: driver component
+ *
+ * This prepares the channel for usage. Cross-checks whether the
+ * channel's been properly configured.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+int most_start_channel(struct most_interface *iface, int id,
+ struct core_component *comp)
+{
+ int num_buffer;
+ int ret;
+ struct most_channel *c = iface->p->channel[id];
+
+ if (unlikely(!c))
+ return -EINVAL;
+
+ mutex_lock(&c->start_mutex);
+ if (c->pipe0.refs + c->pipe1.refs > 0)
+ goto out; /* already started by another component */
+
+ if (!try_module_get(iface->mod)) {
+ pr_info("failed to acquire HDM lock\n");
+ mutex_unlock(&c->start_mutex);
+ return -ENOLCK;
+ }
+
+ c->cfg.extra_len = 0;
+ if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
+ pr_info("channel configuration failed. Go check settings...\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ init_waitqueue_head(&c->hdm_fifo_wq);
+
+ if (c->cfg.direction == MOST_CH_RX)
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_read_completion);
+ else
+ num_buffer = arm_mbo_chain(c, c->cfg.direction,
+ most_write_completion);
+ if (unlikely(!num_buffer)) {
+ pr_info("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = run_enqueue_thread(c, id);
+ if (ret)
+ goto error;
+
+ c->is_starving = 0;
+ c->pipe0.num_buffers = c->cfg.num_buffers / 2;
+ c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
+ atomic_set(&c->mbo_ref, num_buffer);
+
+out:
+ if (comp == c->pipe0.comp)
+ c->pipe0.refs++;
+ if (comp == c->pipe1.comp)
+ c->pipe1.refs++;
+ mutex_unlock(&c->start_mutex);
+ return 0;
+
+error:
+ module_put(iface->mod);
+ mutex_unlock(&c->start_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(most_start_channel);
+
+/**
+ * most_stop_channel - stops a running channel
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ * @comp: driver component
+ */
+int most_stop_channel(struct most_interface *iface, int id,
+ struct core_component *comp)
+{
+ struct most_channel *c;
+
+ if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
+ pr_err("Bad interface or index out of range\n");
+ return -EINVAL;
+ }
+ c = iface->p->channel[id];
+ if (unlikely(!c))
+ return -EINVAL;
+
+ mutex_lock(&c->start_mutex);
+ if (c->pipe0.refs + c->pipe1.refs >= 2)
+ goto out;
+
+ if (c->hdm_enqueue_task)
+ kthread_stop(c->hdm_enqueue_task);
+ c->hdm_enqueue_task = NULL;
+
+ if (iface->mod)
+ module_put(iface->mod);
+
+ c->is_poisoned = true;
+ if (c->iface->poison_channel(c->iface, c->channel_id)) {
+ pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
+ c->iface->description);
+ mutex_unlock(&c->start_mutex);
+ return -EAGAIN;
+ }
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+
+#ifdef CMPL_INTERRUPTIBLE
+ if (wait_for_completion_interruptible(&c->cleanup)) {
+ pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+ mutex_unlock(&c->start_mutex);
+ return -EINTR;
+ }
+#else
+ wait_for_completion(&c->cleanup);
+#endif
+ c->is_poisoned = false;
+
+out:
+ if (comp == c->pipe0.comp)
+ c->pipe0.refs--;
+ if (comp == c->pipe1.comp)
+ c->pipe1.refs--;
+ mutex_unlock(&c->start_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_stop_channel);
+
+/**
+ * most_register_component - registers a driver component with the core
+ * @comp: driver component
+ */
+int most_register_component(struct core_component *comp)
+{
+ if (!comp) {
+ pr_err("Bad component\n");
+ return -EINVAL;
+ }
+ list_add_tail(&comp->list, &mc.comp_list);
+ pr_info("registered new core component %s\n", comp->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_register_component);
+
+static int disconnect_channels(struct device *dev, void *data)
+{
+ struct most_interface *iface;
+ struct most_channel *c, *tmp;
+ struct core_component *comp = data;
+
+ iface = to_most_interface(dev);
+ list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
+ if (c->pipe0.comp == comp || c->pipe1.comp == comp)
+ comp->disconnect_channel(c->iface, c->channel_id);
+ if (c->pipe0.comp == comp)
+ c->pipe0.comp = NULL;
+ if (c->pipe1.comp == comp)
+ c->pipe1.comp = NULL;
+ }
+ return 0;
+}
+
+/**
+ * most_deregister_component - deregisters a driver component with the core
+ * @comp: driver component
+ */
+int most_deregister_component(struct core_component *comp)
+{
+ if (!comp) {
+ pr_err("Bad component\n");
+ return -EINVAL;
+ }
+
+ bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
+ list_del(&comp->list);
+ pr_info("deregistering component %s\n", comp->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(most_deregister_component);
+
+static void release_interface(struct device *dev)
+{
+ pr_info("releasing interface dev %s...\n", dev_name(dev));
+}
+
+static void release_channel(struct device *dev)
+{
+ pr_info("releasing channel dev %s...\n", dev_name(dev));
+}
+
+/**
+ * most_register_interface - registers an interface with core
+ * @iface: device interface
+ *
+ * Allocates and initializes a new interface instance and all of its channels.
+ * Returns a pointer to kobject or an error pointer.
+ */
+int most_register_interface(struct most_interface *iface)
+{
+ unsigned int i;
+ int id;
+ struct most_channel *c;
+
+ if (!iface || !iface->enqueue || !iface->configure ||
+ !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
+ pr_err("Bad interface or channel overflow\n");
+ return -EINVAL;
+ }
+
+ id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ pr_info("Failed to alloc mdev ID\n");
+ return id;
+ }
+
+ iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
+ if (!iface->p) {
+ pr_info("Failed to allocate interface instance\n");
+ ida_simple_remove(&mdev_id, id);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&iface->p->channel_list);
+ iface->p->dev_id = id;
+ snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+ iface->dev.init_name = iface->p->name;
+ iface->dev.bus = &mc.bus;
+ iface->dev.parent = &mc.dev;
+ iface->dev.groups = interface_attr_groups;
+ iface->dev.release = release_interface;
+ if (device_register(&iface->dev)) {
+ pr_err("registering iface->dev failed\n");
+ kfree(iface->p);
+ ida_simple_remove(&mdev_id, id);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < iface->num_channels; i++) {
+ const char *name_suffix = iface->channel_vector[i].name_suffix;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ goto free_instance;
+ if (!name_suffix)
+ snprintf(c->name, STRING_SIZE, "ch%d", i);
+ else
+ snprintf(c->name, STRING_SIZE, "%s", name_suffix);
+ c->dev.init_name = c->name;
+ c->dev.parent = &iface->dev;
+ c->dev.groups = channel_attr_groups;
+ c->dev.release = release_channel;
+ if (device_register(&c->dev)) {
+ pr_err("registering c->dev failed\n");
+ goto free_instance_nodev;
+ }
+ iface->p->channel[i] = c;
+ c->is_starving = 0;
+ c->iface = iface;
+ c->channel_id = i;
+ c->keep_mbo = false;
+ c->enqueue_halt = false;
+ c->is_poisoned = false;
+ c->cfg.direction = 0;
+ c->cfg.data_type = 0;
+ c->cfg.num_buffers = 0;
+ c->cfg.buffer_size = 0;
+ c->cfg.subbuffer_size = 0;
+ c->cfg.packets_per_xact = 0;
+ spin_lock_init(&c->fifo_lock);
+ INIT_LIST_HEAD(&c->fifo);
+ INIT_LIST_HEAD(&c->trash_fifo);
+ INIT_LIST_HEAD(&c->halt_fifo);
+ init_completion(&c->cleanup);
+ atomic_set(&c->mbo_ref, 0);
+ mutex_init(&c->start_mutex);
+ mutex_init(&c->nq_mutex);
+ list_add_tail(&c->list, &iface->p->channel_list);
+ }
+ pr_info("registered new device mdev%d (%s)\n",
+ id, iface->description);
+ return 0;
+
+free_instance_nodev:
+ kfree(c);
+
+free_instance:
+ while (i > 0) {
+ c = iface->p->channel[--i];
+ device_unregister(&c->dev);
+ kfree(c);
+ }
+ kfree(iface->p);
+ device_unregister(&iface->dev);
+ ida_simple_remove(&mdev_id, id);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(most_register_interface);
+
+/**
+ * most_deregister_interface - deregisters an interface with core
+ * @iface: device interface
+ *
+ * Before removing an interface instance from the list, all running
+ * channels are stopped and poisoned.
+ */
+void most_deregister_interface(struct most_interface *iface)
+{
+ int i;
+ struct most_channel *c;
+
+ pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev), iface->description);
+ for (i = 0; i < iface->num_channels; i++) {
+ c = iface->p->channel[i];
+ if (c->pipe0.comp)
+ c->pipe0.comp->disconnect_channel(c->iface,
+ c->channel_id);
+ if (c->pipe1.comp)
+ c->pipe1.comp->disconnect_channel(c->iface,
+ c->channel_id);
+ c->pipe0.comp = NULL;
+ c->pipe1.comp = NULL;
+ list_del(&c->list);
+ device_unregister(&c->dev);
+ kfree(c);
+ }
+
+ ida_simple_remove(&mdev_id, iface->p->dev_id);
+ kfree(iface->p);
+ device_unregister(&iface->dev);
+}
+EXPORT_SYMBOL_GPL(most_deregister_interface);
+
+/**
+ * most_stop_enqueue - prevents core from enqueueing MBOs
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This is called by an HDM that _cannot_ attend to its duties and
+ * is imminent to get run over by the core. The core is not going to
+ * enqueue any further packets unless the flagging HDM calls
+ * most_resume enqueue().
+ */
+void most_stop_enqueue(struct most_interface *iface, int id)
+{
+ struct most_channel *c = iface->p->channel[id];
+
+ if (!c)
+ return;
+
+ mutex_lock(&c->nq_mutex);
+ c->enqueue_halt = true;
+ mutex_unlock(&c->nq_mutex);
+}
+EXPORT_SYMBOL_GPL(most_stop_enqueue);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * sitting in the wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int id)
+{
+ struct most_channel *c = iface->p->channel[id];
+
+ if (!c)
+ return;
+
+ mutex_lock(&c->nq_mutex);
+ c->enqueue_halt = false;
+ mutex_unlock(&c->nq_mutex);
+
+ wake_up_interruptible(&c->hdm_fifo_wq);
+}
+EXPORT_SYMBOL_GPL(most_resume_enqueue);
+
+static void release_most_sub(struct device *dev)
+{
+ pr_info("releasing most_subsystem\n");
+}
+
+static int __init most_init(void)
+{
+ int err;
+
+ pr_info("init()\n");
+ INIT_LIST_HEAD(&mc.comp_list);
+ ida_init(&mdev_id);
+
+ mc.bus.name = "most",
+ mc.bus.match = most_match,
+ mc.drv.name = "most_core",
+ mc.drv.bus = &mc.bus,
+ mc.drv.groups = mc_attr_groups;
+
+ err = bus_register(&mc.bus);
+ if (err) {
+ pr_info("Cannot register most bus\n");
+ return err;
+ }
+ err = driver_register(&mc.drv);
+ if (err) {
+ pr_info("Cannot register core driver\n");
+ goto exit_bus;
+ }
+ mc.dev.init_name = "most_bus";
+ mc.dev.release = release_most_sub;
+ if (device_register(&mc.dev)) {
+ err = -ENOMEM;
+ goto exit_driver;
+ }
+
+ return 0;
+
+exit_driver:
+ driver_unregister(&mc.drv);
+exit_bus:
+ bus_unregister(&mc.bus);
+ return err;
+}
+
+static void __exit most_exit(void)
+{
+ pr_info("exit core module\n");
+ device_unregister(&mc.dev);
+ driver_unregister(&mc.drv);
+ bus_unregister(&mc.bus);
+ ida_destroy(&mdev_id);
+}
+
+module_init(most_init);
+module_exit(most_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/core.h
index 915e5159d1eb..74a29163b68a 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/core.h
@@ -1,31 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * mostcore.h - Interface between MostCore,
- * Hardware Dependent Module (HDM) and Application Interface Module (AIM).
+ * most.h - API for component and adapter drivers
*
* Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/*
- * Authors:
- * Andrey Shvetsov <andrey.shvetsov@k2l.de>
- * Christian Gromm <christian.gromm@microchip.com>
- * Sebastian Graf
*/
#ifndef __MOST_CORE_H__
#define __MOST_CORE_H__
#include <linux/types.h>
+#include <linux/device.h>
-struct kobject;
struct module;
+struct interface_private;
/**
* Interface type
@@ -79,22 +66,22 @@ enum mbo_status_flags {
* The value is bitwise OR-combination of the values from the
* enumeration most_channel_data_type. Zero is allowed value and means
* "channel may not be used".
- * @num_buffer_packet: Maximum number of buffers supported by this channel
+ * @num_buffers_packet: Maximum number of buffers supported by this channel
* for packet data types (Async,Control,QoS)
* @buffer_size_packet: Maximum buffer size supported by this channel
* for packet data types (Async,Control,QoS)
- * @num_buffer_streaming: Maximum number of buffers supported by this channel
+ * @num_buffers_streaming: Maximum number of buffers supported by this channel
* for streaming data types (Sync,AV Packetized)
* @buffer_size_streaming: Maximum buffer size supported by this channel
* for streaming data types (Sync,AV Packetized)
* @name_suffix: Optional suffix providean by an HDM that is attached to the
* regular channel name.
*
- * Describes the capabilities of a MostCore channel like supported Data Types
+ * Describes the capabilities of a MOST channel like supported Data Types
* and directions. This information is provided by an HDM for the MostCore.
*
* The Core creates read only sysfs attribute files in
- * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
+ * /sys/devices/most/mdev#/<channel>/ with the
* following attributes:
* -available_directions
* -available_datatypes
@@ -129,7 +116,7 @@ struct most_channel_capability {
* @packets_per_xact: number of MOST frames that are packet inside one USB
* packet. This is USB specific
*
- * Describes the configuration for a MostCore channel. This information is
+ * Describes the configuration for a MOST channel. This information is
* provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
* parameter of the "configure" function call.
*/
@@ -153,6 +140,7 @@ struct most_channel_config {
*
* @list: list head for use by the mbo's current owner
* @ifp: (in) associated interface instance
+ * @num_buffers_ptr: amount of pool buffers
* @hdm_channel_id: (in) HDM channel instance
* @virt_address: (in) kernel virtual address of the buffer
* @bus_address: (in) bus address of the buffer
@@ -161,15 +149,15 @@ struct most_channel_config {
* @status: (out) transfer status
* @complete: (in) completion routine
*
- * The MostCore allocates and initializes the MBO.
+ * The core allocates and initializes the MBO.
*
- * The HDM receives MBO for transfer from MostCore with the call to enqueue().
+ * The HDM receives MBO for transfer from the core with the call to enqueue().
* The HDM copies the data to- or from the buffer depending on configured
* channel direction, set "processed_length" and "status" and completes
* the transfer procedure by calling the completion routine.
*
- * At the end the MostCore deallocates the MBO or recycles it for further
- * transfers for the same or different HDM.
+ * Finally, the MBO is being deallocated or recycled for further
+ * transfers of the same or a different HDM.
*
* Directions of usage:
* The core driver should never access any MBO fields (even if marked
@@ -202,10 +190,12 @@ struct mbo {
/**
* Interface instance description.
*
- * Describes one instance of an interface like Medusa PCIe or Vantage USB.
+ * Describes an interface of a MOST device the core driver is bound to.
* This structure is allocated and initialized in the HDM. MostCore may not
* modify this structure.
*
+ * @dev: the actual device
+ * @mod: module
* @interface Interface type. \sa most_interface_type.
* @description PRELIMINARY.
* Unique description of the device instance from point of view of the
@@ -238,10 +228,11 @@ struct mbo {
* @priv Private field used by mostcore to store context information.
*/
struct most_interface {
+ struct device dev;
struct module *mod;
enum most_interface_type interface;
const char *description;
- int num_channels;
+ unsigned int num_channels;
struct most_channel_capability *channel_vector;
int (*configure)(struct most_interface *iface, int channel_idx,
struct most_channel_config *channel_config);
@@ -253,27 +244,29 @@ struct most_interface {
unsigned char link_stat,
unsigned char *mac_addr));
void *priv;
+ struct interface_private *p;
};
+#define to_most_interface(d) container_of(d, struct most_interface, dev)
+
/**
- * struct most_aim - identifies MOST device driver to mostcore
- * @name: Driver name
+ * struct core_component - identifies a loadable component for the mostcore
+ * @list: list_head
+ * @name: component name
* @probe_channel: function for core to notify driver about channel connection
* @disconnect_channel: callback function to disconnect a certain channel
* @rx_completion: completion handler for received packets
* @tx_completion: completion handler for transmitted packets
- * @context: context pointer to be used by mostcore
*/
-struct most_aim {
+struct core_component {
+ struct list_head list;
const char *name;
int (*probe_channel)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *cfg,
- struct kobject *parent, char *name);
+ struct most_channel_config *cfg, char *name);
int (*disconnect_channel)(struct most_interface *iface,
int channel_idx);
int (*rx_completion)(struct mbo *mbo);
int (*tx_completion)(struct most_interface *iface, int channel_idx);
- void *context;
};
/**
@@ -285,7 +278,7 @@ struct most_aim {
* Note: HDM has to ensure that any reference held on the kobj is
* released before deregistering the interface.
*/
-struct kobject *most_register_interface(struct most_interface *iface);
+int most_register_interface(struct most_interface *iface);
/**
* Deregisters instance of the interface.
@@ -310,16 +303,16 @@ void most_stop_enqueue(struct most_interface *iface, int channel_idx);
* in wait fifo.
*/
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_aim(struct most_aim *aim);
-int most_deregister_aim(struct most_aim *aim);
+int most_register_component(struct core_component *comp);
+int most_deregister_component(struct core_component *comp);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
- struct most_aim *);
+ struct core_component *comp);
void most_put_mbo(struct mbo *mbo);
int channel_has_mbo(struct most_interface *iface, int channel_idx,
- struct most_aim *aim);
+ struct core_component *comp);
int most_start_channel(struct most_interface *iface, int channel_idx,
- struct most_aim *);
+ struct core_component *comp);
int most_stop_channel(struct most_interface *iface, int channel_idx,
- struct most_aim *);
+ struct core_component *comp);
#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/dim2/Kconfig
index 663bfebff674..e39c4e525cac 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/dim2/Kconfig
@@ -2,8 +2,8 @@
# MediaLB configuration
#
-config HDM_DIM2
- tristate "DIM2 HDM"
+config MOST_DIM2
+ tristate "DIM2"
depends on HAS_IOMEM
---help---
@@ -13,4 +13,4 @@ config HDM_DIM2
maintainer of this driver.
To compile this driver as a module, choose M here: the
- module will be called hdm_dim2.
+ module will be called most_dim2.
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
new file mode 100644
index 000000000000..66676f5907ee
--- /dev/null
+++ b/drivers/staging/most/dim2/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_DIM2) += most_dim2.o
+
+most_dim2-objs := dim2.o hal.o sysfs.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/dim2/dim2.c
index df7021c522b3..f9bc7dea75b8 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
+ * dim2.c - MediaLB DIM2 Hardware Dependent Module
*
* Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -25,11 +19,11 @@
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <mostcore.h>
-#include "dim2_hal.h"
-#include "dim2_hdm.h"
-#include "dim2_errors.h"
-#include "dim2_sysfs.h"
+#include "most/core.h"
+#include "hal.h"
+#include "dim2.h"
+#include "errors.h"
+#include "sysfs.h"
#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
@@ -93,6 +87,7 @@ struct hdm_channel {
* @atx_idx: index of async tx channel
*/
struct dim2_hdm {
+ struct device dev;
struct hdm_channel hch[DMA_CHANNELS];
struct most_channel_capability capabilities[DMA_CHANNELS];
struct most_interface most_iface;
@@ -106,8 +101,8 @@ struct dim2_hdm {
unsigned char link_state;
int atx_idx;
struct medialb_bus bus;
- void (*on_netinfo)(struct most_interface *,
- unsigned char, unsigned char *);
+ void (*on_netinfo)(struct most_interface *most_iface,
+ unsigned char link_state, unsigned char *addrs);
};
#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
@@ -156,7 +151,7 @@ void dimcb_io_write(u32 __iomem *ptr32, u32 value)
*/
void dimcb_on_error(u8 error_id, const char *error_message)
{
- pr_err("dimcb_on_error: error_id - %d, error_message - %s\n", error_id,
+ pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
error_message);
}
@@ -744,7 +739,6 @@ static int dim2_probe(struct platform_device *pdev)
struct dim2_hdm *dev;
struct resource *res;
int ret, i;
- struct kobject *kobj;
int irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -832,17 +826,20 @@ static int dim2_probe(struct platform_device *pdev)
dev->most_iface.enqueue = enqueue;
dev->most_iface.poison_channel = poison_channel;
dev->most_iface.request_netinfo = request_netinfo;
+ dev->dev.init_name = "dim2_state";
+ dev->dev.parent = &dev->most_iface.dev;
- kobj = most_register_interface(&dev->most_iface);
- if (IS_ERR(kobj)) {
- ret = PTR_ERR(kobj);
+ ret = most_register_interface(&dev->most_iface);
+ if (ret) {
dev_err(&pdev->dev, "failed to register MOST interface\n");
goto err_stop_thread;
}
- ret = dim2_sysfs_probe(&dev->bus, kobj);
- if (ret)
+ ret = dim2_sysfs_probe(&dev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs attribute\n");
goto err_unreg_iface;
+ }
ret = startup_dim(pdev);
if (ret) {
@@ -853,7 +850,7 @@ static int dim2_probe(struct platform_device *pdev)
return 0;
err_destroy_bus:
- dim2_sysfs_destroy(&dev->bus);
+ dim2_sysfs_destroy(&dev->dev);
err_unreg_iface:
most_deregister_interface(&dev->most_iface);
err_stop_thread:
@@ -881,7 +878,7 @@ static int dim2_remove(struct platform_device *pdev)
if (pdata && pdata->destroy)
pdata->destroy(pdata);
- dim2_sysfs_destroy(&dev->bus);
+ dim2_sysfs_destroy(&dev->dev);
most_deregister_interface(&dev->most_iface);
kthread_stop(dev->netinfo_task);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/dim2/dim2.h
index 4050e7c764ed..6a9fc51a2eb4 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ b/drivers/staging/most/dim2/dim2.h
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_hdm.h - MediaLB DIM2 HDM Header
+ * dim2.h - MediaLB DIM2 HDM Header
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#ifndef DIM2_HDM_H
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/dim2/errors.h
index 66343ba426c1..3487510fbd2f 100644
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ b/drivers/staging/most/dim2/errors.h
@@ -1,15 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_errors.h - Definitions of errors for DIM2 HAL API
+ * errors.h - Definitions of errors for DIM2 HAL API
* (MediaLB, Device Interface Macro IP, OS62420)
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#ifndef _MOST_DIM_ERRORS_H
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/dim2/hal.c
index 91484643d289..17c04e1c5e62 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/dim2/hal.c
@@ -1,22 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_hal.c - DIM2 HAL implementation
+ * hal.c - DIM2 HAL implementation
* (MediaLB, Device Interface Macro IP, OS62420)
*
* Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-#include "dim2_hal.h"
-#include "dim2_errors.h"
-#include "dim2_reg.h"
+#include "hal.h"
+#include "errors.h"
+#include "reg.h"
#include <linux/stddef.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/dim2/hal.h
index 6df6ea5f7da4..e04a5350f134 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/dim2/hal.h
@@ -1,22 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_hal.h - DIM2 HAL interface
+ * hal.h - DIM2 HAL interface
* (MediaLB, Device Interface Macro IP, OS62420)
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#ifndef _DIM2_HAL_H
#define _DIM2_HAL_H
#include <linux/types.h>
-#include "dim2_reg.h"
+#include "reg.h"
/*
* The values below are specified in the hardware specification.
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/dim2/reg.h
index f7d9fbcd29f2..69cbf78239f1 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/dim2/reg.h
@@ -1,15 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * dim2_reg.h - Definitions for registers of DIM2
+ * reg.h - Definitions for registers of DIM2
* (MediaLB, Device Interface Macro IP, OS62420)
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#ifndef DIM2_OS62420_H
diff --git a/drivers/staging/most/dim2/sysfs.c b/drivers/staging/most/dim2/sysfs.c
new file mode 100644
index 000000000000..c85b2cdcdca3
--- /dev/null
+++ b/drivers/staging/most/dim2/sysfs.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sysfs.c - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include "sysfs.h"
+#include <linux/device.h>
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ bool state = dim2_sysfs_get_state_cb();
+
+ return sprintf(buf, "%s\n", state ? "locked" : "");
+}
+
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static const struct attribute_group *dev_attr_groups[] = {
+ &dev_attr_group,
+ NULL,
+};
+
+int dim2_sysfs_probe(struct device *dev)
+{
+ dev->groups = dev_attr_groups;
+ return device_register(dev);
+}
+
+void dim2_sysfs_destroy(struct device *dev)
+{
+ device_unregister(dev);
+}
diff --git a/drivers/staging/most/dim2/sysfs.h b/drivers/staging/most/dim2/sysfs.h
new file mode 100644
index 000000000000..33756a3bffe2
--- /dev/null
+++ b/drivers/staging/most/dim2/sysfs.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#ifndef DIM2_SYSFS_H
+#define DIM2_SYSFS_H
+
+#include <linux/kobject.h>
+
+struct medialb_bus {
+ struct kobject kobj_group;
+};
+
+struct device;
+
+int dim2_sysfs_probe(struct device *dev);
+void dim2_sysfs_destroy(struct device *dev);
+
+/*
+ * callback,
+ * must deliver MediaLB state as true if locked or false if unlocked
+ */
+bool dim2_sysfs_get_state_cb(void);
+
+#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-dim2/Makefile b/drivers/staging/most/hdm-dim2/Makefile
deleted file mode 100644
index 6bbee879a8ea..000000000000
--- a/drivers/staging/most/hdm-dim2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_HDM_DIM2) += hdm_dim2.o
-
-hdm_dim2-objs := dim2_hdm.o dim2_hal.o dim2_sysfs.o
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
deleted file mode 100644
index d8b22f91d2c3..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * dim2_sysfs.c - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include "dim2_sysfs.h"
-
-struct bus_attr {
- struct attribute attr;
- ssize_t (*show)(struct medialb_bus *bus, char *buf);
- ssize_t (*store)(struct medialb_bus *bus, const char *buf,
- size_t count);
-};
-
-static ssize_t state_show(struct medialb_bus *bus, char *buf)
-{
- bool state = dim2_sysfs_get_state_cb();
-
- return sprintf(buf, "%s\n", state ? "locked" : "");
-}
-
-static struct bus_attr state_attr = __ATTR_RO(state);
-
-static struct attribute *bus_default_attrs[] = {
- &state_attr.attr,
- NULL,
-};
-
-static const struct attribute_group bus_attr_group = {
- .attrs = bus_default_attrs,
-};
-
-static void bus_kobj_release(struct kobject *kobj)
-{
-}
-
-static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct medialb_bus *bus =
- container_of(kobj, struct medialb_bus, kobj_group);
- struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
- if (!xattr->show)
- return -EIO;
-
- return xattr->show(bus, buf);
-}
-
-static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct medialb_bus *bus =
- container_of(kobj, struct medialb_bus, kobj_group);
- struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
- if (!xattr->store)
- return -EIO;
-
- return xattr->store(bus, buf, count);
-}
-
-static struct sysfs_ops const bus_kobj_sysfs_ops = {
- .show = bus_kobj_attr_show,
- .store = bus_kobj_attr_store,
-};
-
-static struct kobj_type bus_ktype = {
- .release = bus_kobj_release,
- .sysfs_ops = &bus_kobj_sysfs_ops,
-};
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
-{
- int err;
-
- kobject_init(&bus->kobj_group, &bus_ktype);
- err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
- if (err) {
- pr_err("kobject_add() failed: %d\n", err);
- goto err_kobject_add;
- }
-
- err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
- if (err) {
- pr_err("sysfs_create_group() failed: %d\n", err);
- goto err_create_group;
- }
-
- return 0;
-
-err_create_group:
- kobject_put(&bus->kobj_group);
-
-err_kobject_add:
- return err;
-}
-
-void dim2_sysfs_destroy(struct medialb_bus *bus)
-{
- kobject_put(&bus->kobj_group);
-}
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.h b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
deleted file mode 100644
index b71dd027ebc7..000000000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * dim2_sysfs.h - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#ifndef DIM2_SYSFS_H
-#define DIM2_SYSFS_H
-
-#include <linux/kobject.h>
-
-struct medialb_bus {
- struct kobject kobj_group;
-};
-
-struct dim2_hdm;
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
-void dim2_sysfs_destroy(struct medialb_bus *bus);
-
-/*
- * callback,
- * must deliver MediaLB state as true if locked or false if unlocked
- */
-bool dim2_sysfs_get_state_cb(void);
-
-#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-i2c/Makefile b/drivers/staging/most/hdm-i2c/Makefile
deleted file mode 100644
index 03a4a59b1f9f..000000000000
--- a/drivers/staging/most/hdm-i2c/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_HDM_I2C) += hdm_i2c.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/hdm-usb/Makefile b/drivers/staging/most/hdm-usb/Makefile
deleted file mode 100644
index 6bbacb41e94b..000000000000
--- a/drivers/staging/most/hdm-usb/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_HDM_USB) += hdm_usb.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-i2c/Kconfig b/drivers/staging/most/i2c/Kconfig
index 6fd7983668ad..79d0ff27f56d 100644
--- a/drivers/staging/most/hdm-i2c/Kconfig
+++ b/drivers/staging/most/i2c/Kconfig
@@ -2,11 +2,11 @@
# MOST I2C configuration
#
-config HDM_I2C
- tristate "I2C HDM"
+config MOST_I2C
+ tristate "I2C"
depends on I2C
---help---
Say Y here if you want to connect via I2C to network tranceiver.
To compile this driver as a module, choose M here: the
- module will be called hdm_i2c.
+ module will be called most_i2c.
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
new file mode 100644
index 000000000000..a7d094c1e1c2
--- /dev/null
+++ b/drivers/staging/most/i2c/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_I2C) += most_i2c.o
+
+most_i2c-objs := i2c.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/i2c/i2c.c
index 2b4de404e46a..141239fc9f51 100644
--- a/drivers/staging/most/hdm-i2c/hdm_i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * hdm_i2c.c - Hardware Dependent Module for I2C Interface
+ * i2c.c - Hardware Dependent Module for I2C Interface
*
* Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
-#include <mostcore.h>
+#include "most/core.h"
enum { CH_RX, CH_TX, NUM_CHANNELS };
@@ -309,7 +303,6 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct hdm_i2c *dev;
int ret, i;
- struct kobject *kobj;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -347,11 +340,11 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev->client = client;
i2c_set_clientdata(client, dev);
- kobj = most_register_interface(&dev->most_iface);
- if (IS_ERR(kobj)) {
+ ret = most_register_interface(&dev->most_iface);
+ if (ret) {
pr_err("Failed to register i2c as a MOST interface\n");
kfree(dev);
- return PTR_ERR(kobj);
+ return ret;
}
dev->polling_mode = polling_req || client->irq <= 0;
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
deleted file mode 100644
index 47172546d728..000000000000
--- a/drivers/staging/most/mostcore/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# MOSTCore configuration
-#
-
-config MOSTCORE
- tristate "MOST Core"
- depends on HAS_DMA
-
- ---help---
- Say Y here if you want to enable MOST support.
- This device driver needs at least an additional AIM and HDM to work.
-
- To compile this driver as a module, choose M here: the
- module will be called mostcore.
diff --git a/drivers/staging/most/mostcore/Makefile b/drivers/staging/most/mostcore/Makefile
deleted file mode 100644
index a078f01cf7c2..000000000000
--- a/drivers/staging/most/mostcore/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_MOSTCORE) += mostcore.o
-
-mostcore-objs := core.o
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
deleted file mode 100644
index 069269db394c..000000000000
--- a/drivers/staging/most/mostcore/core.c
+++ /dev/null
@@ -1,1949 +0,0 @@
-/*
- * core.c - Implementation of core module of MOST Linux driver stack
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/kobject.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <linux/sysfs.h>
-#include <linux/kthread.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include "mostcore.h"
-
-#define MAX_CHANNELS 64
-#define STRING_SIZE 80
-
-static struct class *most_class;
-static struct device *core_dev;
-static struct ida mdev_id;
-static int dummy_num_buffers;
-
-struct most_c_aim_obj {
- struct most_aim *ptr;
- int refs;
- int num_buffers;
-};
-
-struct most_c_obj {
- struct kobject kobj;
- struct completion cleanup;
- atomic_t mbo_ref;
- atomic_t mbo_nq_level;
- u16 channel_id;
- bool is_poisoned;
- struct mutex start_mutex;
- struct mutex nq_mutex; /* nq thread synchronization */
- int is_starving;
- struct most_interface *iface;
- struct most_inst_obj *inst;
- struct most_channel_config cfg;
- bool keep_mbo;
- bool enqueue_halt;
- struct list_head fifo;
- spinlock_t fifo_lock;
- struct list_head halt_fifo;
- struct list_head list;
- struct most_c_aim_obj aim0;
- struct most_c_aim_obj aim1;
- struct list_head trash_fifo;
- struct task_struct *hdm_enqueue_task;
- wait_queue_head_t hdm_fifo_wq;
-};
-
-#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
-
-struct most_inst_obj {
- int dev_id;
- struct most_interface *iface;
- struct list_head channel_list;
- struct most_c_obj *channel[MAX_CHANNELS];
- struct kobject kobj;
- struct list_head list;
-};
-
-static const struct {
- int most_ch_data_type;
- const char *name;
-} ch_data_type[] = {
- { MOST_CH_CONTROL, "control\n" },
- { MOST_CH_ASYNC, "async\n" },
- { MOST_CH_SYNC, "sync\n" },
- { MOST_CH_ISOC, "isoc\n"},
- { MOST_CH_ISOC, "isoc_avp\n"},
-};
-
-#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
-
-/**
- * list_pop_mbo - retrieves the first MBO of the list and removes it
- * @ptr: the list head to grab the MBO from.
- */
-#define list_pop_mbo(ptr) \
-({ \
- struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
- list_del(&_mbo->list); \
- _mbo; \
-})
-
-/* ___ ___
- * ___C H A N N E L___
- */
-
-/**
- * struct most_c_attr - to access the attributes of a channel object
- * @attr: attributes of a channel
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_c_attr {
- struct attribute attr;
- ssize_t (*show)(struct most_c_obj *d,
- struct most_c_attr *attr,
- char *buf);
- ssize_t (*store)(struct most_c_obj *d,
- struct most_c_attr *attr,
- const char *buf,
- size_t count);
-};
-
-#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
-
-/**
- * channel_attr_show - show function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- */
-static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct most_c_attr *channel_attr = to_channel_attr(attr);
- struct most_c_obj *c_obj = to_c_obj(kobj);
-
- if (!channel_attr->show)
- return -EIO;
-
- return channel_attr->show(c_obj, channel_attr, buf);
-}
-
-/**
- * channel_attr_store - store function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t channel_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_attr *channel_attr = to_channel_attr(attr);
- struct most_c_obj *c_obj = to_c_obj(kobj);
-
- if (!channel_attr->store)
- return -EIO;
- return channel_attr->store(c_obj, channel_attr, buf, len);
-}
-
-static const struct sysfs_ops most_channel_sysfs_ops = {
- .show = channel_attr_show,
- .store = channel_attr_store,
-};
-
-/**
- * most_free_mbo_coherent - free an MBO and its coherent buffer
- * @mbo: buffer to be released
- *
- */
-static void most_free_mbo_coherent(struct mbo *mbo)
-{
- struct most_c_obj *c = mbo->context;
- u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
- dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
- mbo->bus_address);
- kfree(mbo);
- if (atomic_sub_and_test(1, &c->mbo_ref))
- complete(&c->cleanup);
-}
-
-/**
- * flush_channel_fifos - clear the channel fifos
- * @c: pointer to channel object
- */
-static void flush_channel_fifos(struct most_c_obj *c)
-{
- unsigned long flags, hf_flags;
- struct mbo *mbo, *tmp;
-
- if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
- return;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- spin_lock_irqsave(&c->fifo_lock, hf_flags);
- list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, hf_flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
-
- if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
- pr_info("WARN: fifo | trash fifo not empty\n");
-}
-
-/**
- * flush_trash_fifo - clear the trash fifo
- * @c: pointer to channel object
- */
-static int flush_trash_fifo(struct most_c_obj *c)
-{
- struct mbo *mbo, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
- list_del(&mbo->list);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- most_free_mbo_coherent(mbo);
- spin_lock_irqsave(&c->fifo_lock, flags);
- }
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return 0;
-}
-
-/**
- * most_channel_release - release function of channel object
- * @kobj: pointer to channel's kobject
- */
-static void most_channel_release(struct kobject *kobj)
-{
- struct most_c_obj *c = to_c_obj(kobj);
-
- kfree(c);
-}
-
-static ssize_t available_directions_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- strcpy(buf, "");
- if (c->iface->channel_vector[i].direction & MOST_CH_RX)
- strcat(buf, "rx ");
- if (c->iface->channel_vector[i].direction & MOST_CH_TX)
- strcat(buf, "tx ");
- strcat(buf, "\n");
- return strlen(buf);
-}
-
-static ssize_t available_datatypes_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- strcpy(buf, "");
- if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
- strcat(buf, "control ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
- strcat(buf, "async ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
- strcat(buf, "sync ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
- strcat(buf, "isoc ");
- strcat(buf, "\n");
- return strlen(buf);
-}
-
-static ssize_t number_of_packet_buffers_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].num_buffers_packet);
-}
-
-static ssize_t number_of_stream_buffers_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].num_buffers_streaming);
-}
-
-static ssize_t size_of_packet_buffer_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].buffer_size_packet);
-}
-
-static ssize_t size_of_stream_buffer_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- unsigned int i = c->channel_id;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- c->iface->channel_vector[i].buffer_size_streaming);
-}
-
-static ssize_t channel_starving_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
-}
-
-static ssize_t set_number_of_buffers_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
-}
-
-static ssize_t set_number_of_buffers_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t set_buffer_size_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
-}
-
-static ssize_t set_buffer_size_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t set_direction_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- if (c->cfg.direction & MOST_CH_TX)
- return snprintf(buf, PAGE_SIZE, "tx\n");
- else if (c->cfg.direction & MOST_CH_RX)
- return snprintf(buf, PAGE_SIZE, "rx\n");
- return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t set_direction_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- if (!strcmp(buf, "dir_rx\n")) {
- c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "rx\n")) {
- c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "dir_tx\n")) {
- c->cfg.direction = MOST_CH_TX;
- } else if (!strcmp(buf, "tx\n")) {
- c->cfg.direction = MOST_CH_TX;
- } else {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
-static ssize_t set_datatype_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
- if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
- return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
- }
- return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t set_datatype_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
- if (!strcmp(buf, ch_data_type[i].name)) {
- c->cfg.data_type = ch_data_type[i].most_ch_data_type;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(ch_data_type)) {
- pr_info("WARN: invalid attribute settings\n");
- return -EINVAL;
- }
- return count;
-}
-
-static ssize_t set_subbuffer_size_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
-}
-
-static ssize_t set_subbuffer_size_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
-
- if (ret)
- return ret;
- return count;
-}
-
-static ssize_t set_packets_per_xact_show(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
-}
-
-static ssize_t set_packets_per_xact_store(struct most_c_obj *c,
- struct most_c_attr *attr,
- const char *buf,
- size_t count)
-{
- int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
-
- if (ret)
- return ret;
- return count;
-}
-
-static struct most_c_attr most_c_attrs[] = {
- __ATTR_RO(available_directions),
- __ATTR_RO(available_datatypes),
- __ATTR_RO(number_of_packet_buffers),
- __ATTR_RO(number_of_stream_buffers),
- __ATTR_RO(size_of_stream_buffer),
- __ATTR_RO(size_of_packet_buffer),
- __ATTR_RO(channel_starving),
- __ATTR_RW(set_buffer_size),
- __ATTR_RW(set_number_of_buffers),
- __ATTR_RW(set_direction),
- __ATTR_RW(set_datatype),
- __ATTR_RW(set_subbuffer_size),
- __ATTR_RW(set_packets_per_xact),
-};
-
-/**
- * most_channel_def_attrs - array of default attributes of channel object
- */
-static struct attribute *most_channel_def_attrs[] = {
- &most_c_attrs[0].attr,
- &most_c_attrs[1].attr,
- &most_c_attrs[2].attr,
- &most_c_attrs[3].attr,
- &most_c_attrs[4].attr,
- &most_c_attrs[5].attr,
- &most_c_attrs[6].attr,
- &most_c_attrs[7].attr,
- &most_c_attrs[8].attr,
- &most_c_attrs[9].attr,
- &most_c_attrs[10].attr,
- &most_c_attrs[11].attr,
- &most_c_attrs[12].attr,
- NULL,
-};
-
-static struct kobj_type most_channel_ktype = {
- .sysfs_ops = &most_channel_sysfs_ops,
- .release = most_channel_release,
- .default_attrs = most_channel_def_attrs,
-};
-
-static struct kset *most_channel_kset;
-
-/**
- * create_most_c_obj - allocates a channel object
- * @name: name of the channel object
- * @parent: parent kobject
- *
- * This create a channel object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct most_c_obj *
-create_most_c_obj(const char *name, struct kobject *parent)
-{
- struct most_c_obj *c;
- int retval;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return NULL;
- c->kobj.kset = most_channel_kset;
- retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
- "%s", name);
- if (retval) {
- kobject_put(&c->kobj);
- return NULL;
- }
- kobject_uevent(&c->kobj, KOBJ_ADD);
- return c;
-}
-
-/* ___ ___
- * ___I N S T A N C E___
- */
-
-static struct list_head instance_list;
-
-/**
- * struct most_inst_attribute - to access the attributes of instance object
- * @attr: attributes of an instance
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_inst_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_inst_obj *d,
- struct most_inst_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_inst_obj *d,
- struct most_inst_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-#define to_instance_attr(a) \
- container_of(a, struct most_inst_attribute, attr)
-
-/**
- * instance_attr_show - show function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t instance_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct most_inst_attribute *instance_attr;
- struct most_inst_obj *instance_obj;
-
- instance_attr = to_instance_attr(attr);
- instance_obj = to_inst_obj(kobj);
-
- if (!instance_attr->show)
- return -EIO;
-
- return instance_attr->show(instance_obj, instance_attr, buf);
-}
-
-/**
- * instance_attr_store - store function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t instance_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_inst_attribute *instance_attr;
- struct most_inst_obj *instance_obj;
-
- instance_attr = to_instance_attr(attr);
- instance_obj = to_inst_obj(kobj);
-
- if (!instance_attr->store)
- return -EIO;
-
- return instance_attr->store(instance_obj, instance_attr, buf, len);
-}
-
-static const struct sysfs_ops most_inst_sysfs_ops = {
- .show = instance_attr_show,
- .store = instance_attr_store,
-};
-
-/**
- * most_inst_release - release function for instance object
- * @kobj: pointer to instance's kobject
- *
- * This frees the allocated memory for the instance object
- */
-static void most_inst_release(struct kobject *kobj)
-{
- struct most_inst_obj *inst = to_inst_obj(kobj);
-
- kfree(inst);
-}
-
-static ssize_t description_show(struct most_inst_obj *instance_obj,
- struct most_inst_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- instance_obj->iface->description);
-}
-
-static ssize_t interface_show(struct most_inst_obj *instance_obj,
- struct most_inst_attribute *attr,
- char *buf)
-{
- switch (instance_obj->iface->interface) {
- case ITYPE_LOOPBACK:
- return snprintf(buf, PAGE_SIZE, "loopback\n");
- case ITYPE_I2C:
- return snprintf(buf, PAGE_SIZE, "i2c\n");
- case ITYPE_I2S:
- return snprintf(buf, PAGE_SIZE, "i2s\n");
- case ITYPE_TSI:
- return snprintf(buf, PAGE_SIZE, "tsi\n");
- case ITYPE_HBI:
- return snprintf(buf, PAGE_SIZE, "hbi\n");
- case ITYPE_MEDIALB_DIM:
- return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
- case ITYPE_MEDIALB_DIM2:
- return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
- case ITYPE_USB:
- return snprintf(buf, PAGE_SIZE, "usb\n");
- case ITYPE_PCIE:
- return snprintf(buf, PAGE_SIZE, "pcie\n");
- }
- return snprintf(buf, PAGE_SIZE, "unknown\n");
-}
-
-static struct most_inst_attribute most_inst_attr_description =
- __ATTR_RO(description);
-
-static struct most_inst_attribute most_inst_attr_interface =
- __ATTR_RO(interface);
-
-static struct attribute *most_inst_def_attrs[] = {
- &most_inst_attr_description.attr,
- &most_inst_attr_interface.attr,
- NULL,
-};
-
-static struct kobj_type most_inst_ktype = {
- .sysfs_ops = &most_inst_sysfs_ops,
- .release = most_inst_release,
- .default_attrs = most_inst_def_attrs,
-};
-
-static struct kset *most_inst_kset;
-
-/**
- * create_most_inst_obj - creates an instance object
- * @name: name of the object to be created
- *
- * This allocates memory for an instance structure, assigns the proper kset
- * and registers it with sysfs.
- *
- * Returns a pointer to the instance object or NULL when something went wrong.
- */
-static struct most_inst_obj *create_most_inst_obj(const char *name)
-{
- struct most_inst_obj *inst;
- int retval;
-
- inst = kzalloc(sizeof(*inst), GFP_KERNEL);
- if (!inst)
- return NULL;
- inst->kobj.kset = most_inst_kset;
- retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
- "%s", name);
- if (retval) {
- kobject_put(&inst->kobj);
- return NULL;
- }
- kobject_uevent(&inst->kobj, KOBJ_ADD);
- return inst;
-}
-
-/**
- * destroy_most_inst_obj - MOST instance release function
- * @inst: pointer to the instance object
- *
- * This decrements the reference counter of the instance object.
- * If the reference count turns zero, its release function is called
- */
-static void destroy_most_inst_obj(struct most_inst_obj *inst)
-{
- struct most_c_obj *c, *tmp;
-
- list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
- flush_trash_fifo(c);
- flush_channel_fifos(c);
- kobject_put(&c->kobj);
- }
- kobject_put(&inst->kobj);
-}
-
-/* ___ ___
- * ___A I M___
- */
-struct most_aim_obj {
- struct kobject kobj;
- struct list_head list;
- struct most_aim *driver;
-};
-
-#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
-
-static struct list_head aim_list;
-
-/**
- * struct most_aim_attribute - to access the attributes of AIM object
- * @attr: attributes of an AIM
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_aim_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_aim_obj *d,
- struct most_aim_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_aim_obj *d,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
-
-/**
- * aim_attr_show - show function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t aim_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct most_aim_attribute *aim_attr;
- struct most_aim_obj *aim_obj;
-
- aim_attr = to_aim_attr(attr);
- aim_obj = to_aim_obj(kobj);
-
- if (!aim_attr->show)
- return -EIO;
-
- return aim_attr->show(aim_obj, aim_attr, buf);
-}
-
-/**
- * aim_attr_store - store function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t aim_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_aim_attribute *aim_attr;
- struct most_aim_obj *aim_obj;
-
- aim_attr = to_aim_attr(attr);
- aim_obj = to_aim_obj(kobj);
-
- if (!aim_attr->store)
- return -EIO;
- return aim_attr->store(aim_obj, aim_attr, buf, len);
-}
-
-static const struct sysfs_ops most_aim_sysfs_ops = {
- .show = aim_attr_show,
- .store = aim_attr_store,
-};
-
-/**
- * most_aim_release - AIM release function
- * @kobj: pointer to AIM's kobject
- */
-static void most_aim_release(struct kobject *kobj)
-{
- struct most_aim_obj *aim_obj = to_aim_obj(kobj);
-
- kfree(aim_obj);
-}
-
-static ssize_t links_show(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
-{
- struct most_c_obj *c;
- struct most_inst_obj *i;
- int offs = 0;
-
- list_for_each_entry(i, &instance_list, list) {
- list_for_each_entry(c, &i->channel_list, list) {
- if (c->aim0.ptr == aim_obj->driver ||
- c->aim1.ptr == aim_obj->driver) {
- offs += snprintf(buf + offs, PAGE_SIZE - offs,
- "%s:%s\n",
- kobject_name(&i->kobj),
- kobject_name(&c->kobj));
- }
- }
- }
-
- return offs;
-}
-
-/**
- * split_string - parses and changes string in the buffer buf and
- * splits it into two mandatory and one optional substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: address of pointer to 1st substring (=instance name)
- * @b: address of pointer to 2nd substring (=channel name)
- * @c: optional address of pointer to 3rd substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch6:my_channel\n" or
- * "mdev0:ch6:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
- *
- * Input: "mdev1:ep81\n"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
- *
- * Input: "mdev1:ep81"
- * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c)
-{
- *a = strsep(&buf, ":");
- if (!*a)
- return -EIO;
-
- *b = strsep(&buf, ":\n");
- if (!*b)
- return -EIO;
-
- if (c)
- *c = strsep(&buf, ":\n");
-
- return 0;
-}
-
-/**
- * get_channel_by_name - get pointer to channel object
- * @mdev: name of the device instance
- * @mdev_ch: name of the respective channel
- *
- * This retrieves the pointer to a channel object.
- */
-static struct
-most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
-{
- struct most_c_obj *c, *tmp;
- struct most_inst_obj *i, *i_tmp;
- int found = 0;
-
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- if (!strcmp(kobject_name(&i->kobj), mdev)) {
- found++;
- break;
- }
- }
- if (unlikely(!found))
- return ERR_PTR(-EIO);
-
- list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
- if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
- found++;
- break;
- }
- }
- if (unlikely(found < 2))
- return ERR_PTR(-EIO);
- return c;
-}
-
-/**
- * add_link_store - store() function for add_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * This parses the string given by buf and splits it into
- * three substrings. Note: third substring is optional. In case a cdev
- * AIM is loaded the optional 3rd substring will make up the name of
- * device node in the /dev directory. If omitted, the device node will
- * inherit the channel's name within sysfs.
- *
- * Searches for a pair of device and channel and probes the AIM
- *
- * Example:
- * (1) echo "mdev0:ch6:my_rxchannel" >add_link
- * (2) echo "mdev1:ep81" >add_link
- *
- * (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev1-ep81
- */
-static ssize_t add_link_store(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_obj *c;
- struct most_aim **aim_ptr;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *mdev_devnod;
- char devnod_buf[STRING_SIZE];
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
-
- ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
- if (ret)
- return ret;
-
- if (!mdev_devnod || *mdev_devnod == 0) {
- snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
- mdev_ch);
- mdev_devnod = devnod_buf;
- }
-
- c = get_channel_by_name(mdev, mdev_ch);
- if (IS_ERR(c))
- return -ENODEV;
-
- if (!c->aim0.ptr)
- aim_ptr = &c->aim0.ptr;
- else if (!c->aim1.ptr)
- aim_ptr = &c->aim1.ptr;
- else
- return -ENOSPC;
-
- *aim_ptr = aim_obj->driver;
- ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
- &c->cfg, &c->kobj, mdev_devnod);
- if (ret) {
- *aim_ptr = NULL;
- return ret;
- }
-
- return len;
-}
-
-/**
- * remove_link_store - store function for remove_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo "mdev0:ep81" >remove_link
- */
-static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_c_obj *c;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, NULL);
- if (ret)
- return ret;
-
- c = get_channel_by_name(mdev, mdev_ch);
- if (IS_ERR(c))
- return -ENODEV;
-
- if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
- if (c->aim0.ptr == aim_obj->driver)
- c->aim0.ptr = NULL;
- if (c->aim1.ptr == aim_obj->driver)
- c->aim1.ptr = NULL;
- return len;
-}
-
-static struct most_aim_attribute most_aim_attrs[] = {
- __ATTR_RO(links),
- __ATTR_WO(add_link),
- __ATTR_WO(remove_link),
-};
-
-static struct attribute *most_aim_def_attrs[] = {
- &most_aim_attrs[0].attr,
- &most_aim_attrs[1].attr,
- &most_aim_attrs[2].attr,
- NULL,
-};
-
-static struct kobj_type most_aim_ktype = {
- .sysfs_ops = &most_aim_sysfs_ops,
- .release = most_aim_release,
- .default_attrs = most_aim_def_attrs,
-};
-
-static struct kset *most_aim_kset;
-
-/**
- * create_most_aim_obj - creates an AIM object
- * @name: name of the AIM
- *
- * This creates an AIM object assigns the proper kset and registers
- * it with sysfs.
- * Returns a pointer to the object or NULL if something went wrong.
- */
-static struct most_aim_obj *create_most_aim_obj(const char *name)
-{
- struct most_aim_obj *most_aim;
- int retval;
-
- most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
- if (!most_aim)
- return NULL;
- most_aim->kobj.kset = most_aim_kset;
- retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
- NULL, "%s", name);
- if (retval) {
- kobject_put(&most_aim->kobj);
- return NULL;
- }
- kobject_uevent(&most_aim->kobj, KOBJ_ADD);
- return most_aim;
-}
-
-/**
- * destroy_most_aim_obj - AIM release function
- * @p: pointer to AIM object
- *
- * This decrements the reference counter of the AIM object. If the
- * reference count turns zero, its release function will be called.
- */
-static void destroy_most_aim_obj(struct most_aim_obj *p)
-{
- kobject_put(&p->kobj);
-}
-
-/* ___ ___
- * ___C O R E___
- */
-
-/**
- * Instantiation of the MOST bus
- */
-static struct bus_type most_bus = {
- .name = "most",
-};
-
-/**
- * Instantiation of the core driver
- */
-static struct device_driver mostcore = {
- .name = "mostcore",
- .bus = &most_bus,
-};
-
-static inline void trash_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c = mbo->context;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_add(&mbo->list, &c->trash_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-}
-
-static bool hdm_mbo_ready(struct most_c_obj *c)
-{
- bool empty;
-
- if (c->enqueue_halt)
- return false;
-
- spin_lock_irq(&c->fifo_lock);
- empty = list_empty(&c->halt_fifo);
- spin_unlock_irq(&c->fifo_lock);
-
- return !empty;
-}
-
-static void nq_hdm_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c = mbo->context;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- list_add_tail(&mbo->list, &c->halt_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- wake_up_interruptible(&c->hdm_fifo_wq);
-}
-
-static int hdm_enqueue_thread(void *data)
-{
- struct most_c_obj *c = data;
- struct mbo *mbo;
- int ret;
- typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
-
- while (likely(!kthread_should_stop())) {
- wait_event_interruptible(c->hdm_fifo_wq,
- hdm_mbo_ready(c) ||
- kthread_should_stop());
-
- mutex_lock(&c->nq_mutex);
- spin_lock_irq(&c->fifo_lock);
- if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
- spin_unlock_irq(&c->fifo_lock);
- mutex_unlock(&c->nq_mutex);
- continue;
- }
-
- mbo = list_pop_mbo(&c->halt_fifo);
- spin_unlock_irq(&c->fifo_lock);
-
- if (c->cfg.direction == MOST_CH_RX)
- mbo->buffer_length = c->cfg.buffer_size;
-
- ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
- mutex_unlock(&c->nq_mutex);
-
- if (unlikely(ret)) {
- pr_err("hdm enqueue failed\n");
- nq_hdm_mbo(mbo);
- c->hdm_enqueue_task = NULL;
- return 0;
- }
- }
-
- return 0;
-}
-
-static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
-{
- struct task_struct *task =
- kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
- channel_id);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- c->hdm_enqueue_task = task;
- return 0;
-}
-
-/**
- * arm_mbo - recycle MBO for further usage
- * @mbo: buffer object
- *
- * This puts an MBO back to the list to have it ready for up coming
- * tx transactions.
- *
- * In case the MBO belongs to a channel that recently has been
- * poisoned, the MBO is scheduled to be trashed.
- * Calls the completion handler of an attached AIM.
- */
-static void arm_mbo(struct mbo *mbo)
-{
- unsigned long flags;
- struct most_c_obj *c;
-
- BUG_ON((!mbo) || (!mbo->context));
- c = mbo->context;
-
- if (c->is_poisoned) {
- trash_mbo(mbo);
- return;
- }
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- ++*mbo->num_buffers_ptr;
- list_add_tail(&mbo->list, &c->fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- if (c->aim0.refs && c->aim0.ptr->tx_completion)
- c->aim0.ptr->tx_completion(c->iface, c->channel_id);
-
- if (c->aim1.refs && c->aim1.ptr->tx_completion)
- c->aim1.ptr->tx_completion(c->iface, c->channel_id);
-}
-
-/**
- * arm_mbo_chain - helper function that arms an MBO chain for the HDM
- * @c: pointer to interface channel
- * @dir: direction of the channel
- * @compl: pointer to completion function
- *
- * This allocates buffer objects including the containing DMA coherent
- * buffer and puts them in the fifo.
- * Buffers of Rx channels are put in the kthread fifo, hence immediately
- * submitted to the HDM.
- *
- * Returns the number of allocated and enqueued MBOs.
- */
-static int arm_mbo_chain(struct most_c_obj *c, int dir,
- void (*compl)(struct mbo *))
-{
- unsigned int i;
- int retval;
- struct mbo *mbo;
- u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
- atomic_set(&c->mbo_nq_level, 0);
-
- for (i = 0; i < c->cfg.num_buffers; i++) {
- mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
- if (!mbo) {
- retval = i;
- goto _exit;
- }
- mbo->context = c;
- mbo->ifp = c->iface;
- mbo->hdm_channel_id = c->channel_id;
- mbo->virt_address = dma_alloc_coherent(NULL,
- coherent_buf_size,
- &mbo->bus_address,
- GFP_KERNEL);
- if (!mbo->virt_address) {
- pr_info("WARN: No DMA coherent buffer.\n");
- retval = i;
- goto _error1;
- }
- mbo->complete = compl;
- mbo->num_buffers_ptr = &dummy_num_buffers;
- if (dir == MOST_CH_RX) {
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
- } else {
- arm_mbo(mbo);
- }
- }
- return i;
-
-_error1:
- kfree(mbo);
-_exit:
- return retval;
-}
-
-/**
- * most_submit_mbo - submits an MBO to fifo
- * @mbo: pointer to the MBO
- */
-void most_submit_mbo(struct mbo *mbo)
-{
- if (WARN_ONCE(!mbo || !mbo->context,
- "bad mbo or missing channel reference\n"))
- return;
-
- nq_hdm_mbo(mbo);
-}
-EXPORT_SYMBOL_GPL(most_submit_mbo);
-
-/**
- * most_write_completion - write completion handler
- * @mbo: pointer to MBO
- *
- * This recycles the MBO for further usage. In case the channel has been
- * poisoned, the MBO is scheduled to be trashed.
- */
-static void most_write_completion(struct mbo *mbo)
-{
- struct most_c_obj *c;
-
- BUG_ON((!mbo) || (!mbo->context));
-
- c = mbo->context;
- if (mbo->status == MBO_E_INVAL)
- pr_info("WARN: Tx MBO status: invalid\n");
- if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
- trash_mbo(mbo);
- else
- arm_mbo(mbo);
-}
-
-/**
- * get_channel_by_iface - get pointer to channel object
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This retrieves a pointer to a channel of the given interface and channel ID.
- */
-static struct
-most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
-{
- struct most_inst_obj *i;
-
- if (unlikely(!iface)) {
- pr_err("Bad interface\n");
- return NULL;
- }
- if (unlikely((id < 0) || (id >= iface->num_channels))) {
- pr_err("Channel index (%d) out of range\n", id);
- return NULL;
- }
- i = iface->priv;
- if (unlikely(!i)) {
- pr_err("interface is not registered\n");
- return NULL;
- }
- return i->channel[id];
-}
-
-int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
- unsigned long flags;
- int empty;
-
- if (unlikely(!c))
- return -EINVAL;
-
- if (c->aim0.refs && c->aim1.refs &&
- ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
- (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
- return 0;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- empty = list_empty(&c->fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return !empty;
-}
-EXPORT_SYMBOL_GPL(channel_has_mbo);
-
-/**
- * most_get_mbo - get pointer to an MBO of pool
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This attempts to get a free buffer out of the channel fifo.
- * Returns a pointer to MBO on success or NULL otherwise.
- */
-struct mbo *most_get_mbo(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- struct mbo *mbo;
- struct most_c_obj *c;
- unsigned long flags;
- int *num_buffers_ptr;
-
- c = get_channel_by_iface(iface, id);
- if (unlikely(!c))
- return NULL;
-
- if (c->aim0.refs && c->aim1.refs &&
- ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
- (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
- return NULL;
-
- if (aim == c->aim0.ptr)
- num_buffers_ptr = &c->aim0.num_buffers;
- else if (aim == c->aim1.ptr)
- num_buffers_ptr = &c->aim1.num_buffers;
- else
- num_buffers_ptr = &dummy_num_buffers;
-
- spin_lock_irqsave(&c->fifo_lock, flags);
- if (list_empty(&c->fifo)) {
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return NULL;
- }
- mbo = list_pop_mbo(&c->fifo);
- --*num_buffers_ptr;
- spin_unlock_irqrestore(&c->fifo_lock, flags);
-
- mbo->num_buffers_ptr = num_buffers_ptr;
- mbo->buffer_length = c->cfg.buffer_size;
- return mbo;
-}
-EXPORT_SYMBOL_GPL(most_get_mbo);
-
-/**
- * most_put_mbo - return buffer to pool
- * @mbo: buffer object
- */
-void most_put_mbo(struct mbo *mbo)
-{
- struct most_c_obj *c = mbo->context;
-
- if (c->cfg.direction == MOST_CH_TX) {
- arm_mbo(mbo);
- return;
- }
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
-}
-EXPORT_SYMBOL_GPL(most_put_mbo);
-
-/**
- * most_read_completion - read completion handler
- * @mbo: pointer to MBO
- *
- * This function is called by the HDM when data has been received from the
- * hardware and copied to the buffer of the MBO.
- *
- * In case the channel has been poisoned it puts the buffer in the trash queue.
- * Otherwise, it passes the buffer to an AIM for further processing.
- */
-static void most_read_completion(struct mbo *mbo)
-{
- struct most_c_obj *c = mbo->context;
-
- if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
- trash_mbo(mbo);
- return;
- }
-
- if (mbo->status == MBO_E_INVAL) {
- nq_hdm_mbo(mbo);
- atomic_inc(&c->mbo_nq_level);
- return;
- }
-
- if (atomic_sub_and_test(1, &c->mbo_nq_level))
- c->is_starving = 1;
-
- if (c->aim0.refs && c->aim0.ptr->rx_completion &&
- c->aim0.ptr->rx_completion(mbo) == 0)
- return;
-
- if (c->aim1.refs && c->aim1.ptr->rx_completion &&
- c->aim1.ptr->rx_completion(mbo) == 0)
- return;
-
- most_put_mbo(mbo);
-}
-
-/**
- * most_start_channel - prepares a channel for communication
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This prepares the channel for usage. Cross-checks whether the
- * channel's been properly configured.
- *
- * Returns 0 on success or error code otherwise.
- */
-int most_start_channel(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- int num_buffer;
- int ret;
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (unlikely(!c))
- return -EINVAL;
-
- mutex_lock(&c->start_mutex);
- if (c->aim0.refs + c->aim1.refs > 0)
- goto out; /* already started by other aim */
-
- if (!try_module_get(iface->mod)) {
- pr_info("failed to acquire HDM lock\n");
- mutex_unlock(&c->start_mutex);
- return -ENOLCK;
- }
-
- c->cfg.extra_len = 0;
- if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
- pr_info("channel configuration failed. Go check settings...\n");
- ret = -EINVAL;
- goto error;
- }
-
- init_waitqueue_head(&c->hdm_fifo_wq);
-
- if (c->cfg.direction == MOST_CH_RX)
- num_buffer = arm_mbo_chain(c, c->cfg.direction,
- most_read_completion);
- else
- num_buffer = arm_mbo_chain(c, c->cfg.direction,
- most_write_completion);
- if (unlikely(!num_buffer)) {
- pr_info("failed to allocate memory\n");
- ret = -ENOMEM;
- goto error;
- }
-
- ret = run_enqueue_thread(c, id);
- if (ret)
- goto error;
-
- c->is_starving = 0;
- c->aim0.num_buffers = c->cfg.num_buffers / 2;
- c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
- atomic_set(&c->mbo_ref, num_buffer);
-
-out:
- if (aim == c->aim0.ptr)
- c->aim0.refs++;
- if (aim == c->aim1.ptr)
- c->aim1.refs++;
- mutex_unlock(&c->start_mutex);
- return 0;
-
-error:
- module_put(iface->mod);
- mutex_unlock(&c->start_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(most_start_channel);
-
-/**
- * most_stop_channel - stops a running channel
- * @iface: pointer to interface instance
- * @id: channel ID
- */
-int most_stop_channel(struct most_interface *iface, int id,
- struct most_aim *aim)
-{
- struct most_c_obj *c;
-
- if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
- pr_err("Bad interface or index out of range\n");
- return -EINVAL;
- }
- c = get_channel_by_iface(iface, id);
- if (unlikely(!c))
- return -EINVAL;
-
- mutex_lock(&c->start_mutex);
- if (c->aim0.refs + c->aim1.refs >= 2)
- goto out;
-
- if (c->hdm_enqueue_task)
- kthread_stop(c->hdm_enqueue_task);
- c->hdm_enqueue_task = NULL;
-
- if (iface->mod)
- module_put(iface->mod);
-
- c->is_poisoned = true;
- if (c->iface->poison_channel(c->iface, c->channel_id)) {
- pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
- c->iface->description);
- mutex_unlock(&c->start_mutex);
- return -EAGAIN;
- }
- flush_trash_fifo(c);
- flush_channel_fifos(c);
-
-#ifdef CMPL_INTERRUPTIBLE
- if (wait_for_completion_interruptible(&c->cleanup)) {
- pr_info("Interrupted while clean up ch %d\n", c->channel_id);
- mutex_unlock(&c->start_mutex);
- return -EINTR;
- }
-#else
- wait_for_completion(&c->cleanup);
-#endif
- c->is_poisoned = false;
-
-out:
- if (aim == c->aim0.ptr)
- c->aim0.refs--;
- if (aim == c->aim1.ptr)
- c->aim1.refs--;
- mutex_unlock(&c->start_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_stop_channel);
-
-/**
- * most_register_aim - registers an AIM (driver) with the core
- * @aim: instance of AIM to be registered
- */
-int most_register_aim(struct most_aim *aim)
-{
- struct most_aim_obj *aim_obj;
-
- if (!aim) {
- pr_err("Bad driver\n");
- return -EINVAL;
- }
- aim_obj = create_most_aim_obj(aim->name);
- if (!aim_obj) {
- pr_info("failed to alloc driver object\n");
- return -ENOMEM;
- }
- aim_obj->driver = aim;
- aim->context = aim_obj;
- pr_info("registered new application interfacing module %s\n",
- aim->name);
- list_add_tail(&aim_obj->list, &aim_list);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_register_aim);
-
-/**
- * most_deregister_aim - deregisters an AIM (driver) with the core
- * @aim: AIM to be removed
- */
-int most_deregister_aim(struct most_aim *aim)
-{
- struct most_aim_obj *aim_obj;
- struct most_c_obj *c, *tmp;
- struct most_inst_obj *i, *i_tmp;
-
- if (!aim) {
- pr_err("Bad driver\n");
- return -EINVAL;
- }
-
- aim_obj = aim->context;
- if (!aim_obj) {
- pr_info("driver not registered.\n");
- return -EINVAL;
- }
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
- if (c->aim0.ptr == aim || c->aim1.ptr == aim)
- aim->disconnect_channel(
- c->iface, c->channel_id);
- if (c->aim0.ptr == aim)
- c->aim0.ptr = NULL;
- if (c->aim1.ptr == aim)
- c->aim1.ptr = NULL;
- }
- }
- list_del(&aim_obj->list);
- destroy_most_aim_obj(aim_obj);
- pr_info("deregistering application interfacing module %s\n", aim->name);
- return 0;
-}
-EXPORT_SYMBOL_GPL(most_deregister_aim);
-
-/**
- * most_register_interface - registers an interface with core
- * @iface: pointer to the instance of the interface description.
- *
- * Allocates and initializes a new interface instance and all of its channels.
- * Returns a pointer to kobject or an error pointer.
- */
-struct kobject *most_register_interface(struct most_interface *iface)
-{
- unsigned int i;
- int id;
- char name[STRING_SIZE];
- char channel_name[STRING_SIZE];
- struct most_c_obj *c;
- struct most_inst_obj *inst;
-
- if (!iface || !iface->enqueue || !iface->configure ||
- !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
- pr_err("Bad interface or channel overflow\n");
- return ERR_PTR(-EINVAL);
- }
-
- id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
- if (id < 0) {
- pr_info("Failed to alloc mdev ID\n");
- return ERR_PTR(id);
- }
- snprintf(name, STRING_SIZE, "mdev%d", id);
-
- inst = create_most_inst_obj(name);
- if (!inst) {
- pr_info("Failed to allocate interface instance\n");
- ida_simple_remove(&mdev_id, id);
- return ERR_PTR(-ENOMEM);
- }
-
- iface->priv = inst;
- INIT_LIST_HEAD(&inst->channel_list);
- inst->iface = iface;
- inst->dev_id = id;
- list_add_tail(&inst->list, &instance_list);
-
- for (i = 0; i < iface->num_channels; i++) {
- const char *name_suffix = iface->channel_vector[i].name_suffix;
-
- if (!name_suffix)
- snprintf(channel_name, STRING_SIZE, "ch%d", i);
- else
- snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
-
- /* this increments the reference count of this instance */
- c = create_most_c_obj(channel_name, &inst->kobj);
- if (!c)
- goto free_instance;
- inst->channel[i] = c;
- c->is_starving = 0;
- c->iface = iface;
- c->inst = inst;
- c->channel_id = i;
- c->keep_mbo = false;
- c->enqueue_halt = false;
- c->is_poisoned = false;
- c->cfg.direction = 0;
- c->cfg.data_type = 0;
- c->cfg.num_buffers = 0;
- c->cfg.buffer_size = 0;
- c->cfg.subbuffer_size = 0;
- c->cfg.packets_per_xact = 0;
- spin_lock_init(&c->fifo_lock);
- INIT_LIST_HEAD(&c->fifo);
- INIT_LIST_HEAD(&c->trash_fifo);
- INIT_LIST_HEAD(&c->halt_fifo);
- init_completion(&c->cleanup);
- atomic_set(&c->mbo_ref, 0);
- mutex_init(&c->start_mutex);
- mutex_init(&c->nq_mutex);
- list_add_tail(&c->list, &inst->channel_list);
- }
- pr_info("registered new MOST device mdev%d (%s)\n",
- inst->dev_id, iface->description);
- return &inst->kobj;
-
-free_instance:
- pr_info("Failed allocate channel(s)\n");
- list_del(&inst->list);
- ida_simple_remove(&mdev_id, id);
- destroy_most_inst_obj(inst);
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL_GPL(most_register_interface);
-
-/**
- * most_deregister_interface - deregisters an interface with core
- * @iface: pointer to the interface instance description.
- *
- * Before removing an interface instance from the list, all running
- * channels are stopped and poisoned.
- */
-void most_deregister_interface(struct most_interface *iface)
-{
- struct most_inst_obj *i = iface->priv;
- struct most_c_obj *c;
-
- if (unlikely(!i)) {
- pr_info("Bad Interface\n");
- return;
- }
- pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
- iface->description);
-
- list_for_each_entry(c, &i->channel_list, list) {
- if (c->aim0.ptr)
- c->aim0.ptr->disconnect_channel(c->iface,
- c->channel_id);
- if (c->aim1.ptr)
- c->aim1.ptr->disconnect_channel(c->iface,
- c->channel_id);
- c->aim0.ptr = NULL;
- c->aim1.ptr = NULL;
- }
-
- ida_simple_remove(&mdev_id, i->dev_id);
- list_del(&i->list);
- destroy_most_inst_obj(i);
-}
-EXPORT_SYMBOL_GPL(most_deregister_interface);
-
-/**
- * most_stop_enqueue - prevents core from enqueueing MBOs
- * @iface: pointer to interface
- * @id: channel id
- *
- * This is called by an HDM that _cannot_ attend to its duties and
- * is imminent to get run over by the core. The core is not going to
- * enqueue any further packets unless the flagging HDM calls
- * most_resume enqueue().
- */
-void most_stop_enqueue(struct most_interface *iface, int id)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (!c)
- return;
-
- mutex_lock(&c->nq_mutex);
- c->enqueue_halt = true;
- mutex_unlock(&c->nq_mutex);
-}
-EXPORT_SYMBOL_GPL(most_stop_enqueue);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @id: channel id
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * sitting in the wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int id)
-{
- struct most_c_obj *c = get_channel_by_iface(iface, id);
-
- if (!c)
- return;
-
- mutex_lock(&c->nq_mutex);
- c->enqueue_halt = false;
- mutex_unlock(&c->nq_mutex);
-
- wake_up_interruptible(&c->hdm_fifo_wq);
-}
-EXPORT_SYMBOL_GPL(most_resume_enqueue);
-
-static int __init most_init(void)
-{
- int err;
-
- pr_info("init()\n");
- INIT_LIST_HEAD(&instance_list);
- INIT_LIST_HEAD(&aim_list);
- ida_init(&mdev_id);
-
- err = bus_register(&most_bus);
- if (err) {
- pr_info("Cannot register most bus\n");
- return err;
- }
-
- most_class = class_create(THIS_MODULE, "most");
- if (IS_ERR(most_class)) {
- pr_info("No udev support.\n");
- err = PTR_ERR(most_class);
- goto exit_bus;
- }
-
- err = driver_register(&mostcore);
- if (err) {
- pr_info("Cannot register core driver\n");
- goto exit_class;
- }
-
- core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
- if (IS_ERR(core_dev)) {
- err = PTR_ERR(core_dev);
- goto exit_driver;
- }
-
- most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
- if (!most_aim_kset) {
- err = -ENOMEM;
- goto exit_class_container;
- }
-
- most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
- if (!most_inst_kset) {
- err = -ENOMEM;
- goto exit_driver_kset;
- }
-
- return 0;
-
-exit_driver_kset:
- kset_unregister(most_aim_kset);
-exit_class_container:
- device_destroy(most_class, 0);
-exit_driver:
- driver_unregister(&mostcore);
-exit_class:
- class_destroy(most_class);
-exit_bus:
- bus_unregister(&most_bus);
- return err;
-}
-
-static void __exit most_exit(void)
-{
- struct most_inst_obj *i, *i_tmp;
- struct most_aim_obj *d, *d_tmp;
-
- pr_info("exit core module\n");
- list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
- destroy_most_aim_obj(d);
- }
-
- list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
- list_del(&i->list);
- destroy_most_inst_obj(i);
- }
- kset_unregister(most_inst_kset);
- kset_unregister(most_aim_kset);
- device_destroy(most_class, 0);
- driver_unregister(&mostcore);
- class_destroy(most_class);
- bus_unregister(&most_bus);
- ida_destroy(&mdev_id);
-}
-
-module_init(most_init);
-module_exit(most_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/aim-network/Kconfig b/drivers/staging/most/net/Kconfig
index 4c66b24cf73c..795330ba94ef 100644
--- a/drivers/staging/most/aim-network/Kconfig
+++ b/drivers/staging/most/net/Kconfig
@@ -2,12 +2,12 @@
# MOST Networking configuration
#
-config AIM_NETWORK
- tristate "Networking AIM"
+config MOST_NET
+ tristate "Net"
depends on NET
---help---
Say Y here if you want to commumicate via a networking device.
To compile this driver as a module, choose M here: the
- module will be called aim_network.
+ module will be called most_net.
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
new file mode 100644
index 000000000000..54500aa77be8
--- /dev/null
+++ b/drivers/staging/most/net/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_NET) += most_net.o
+
+most_net-objs := net.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/net/net.c
index 936f013c350e..30d816b7e165 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/net/net.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Networking AIM - Networking Application Interface Module for MostCore
+ * net.c - Networking component for Mostcore
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,7 +15,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/kobject.h>
-#include "mostcore.h"
+#include "most/core.h"
#define MEP_HDR_LEN 8
#define MDP_HDR_LEN 16
@@ -52,10 +46,12 @@
((len) > MEP_HDR_LEN && \
EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
-#define PMS_IS_MAMAC(buf, len) \
- ((len) > MDP_HDR_LEN && \
- EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
- EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
+static inline bool pms_is_mamac(char *buf, u32 len)
+{
+ return (len > MDP_HDR_LEN &&
+ EXTRACT_BIT_SET(PMS_FIFONO, buf[3]) == PMS_FIFONO_MDP &&
+ EXTRACT_BIT_SET(PMS_TELID, buf[14]) == PMS_TELID_UNSEGM_MAMAC);
+}
struct net_dev_channel {
bool linked;
@@ -74,7 +70,7 @@ struct net_dev_context {
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
-static struct most_aim aim;
+static struct core_component comp;
static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
{
@@ -184,15 +180,15 @@ static int most_nd_open(struct net_device *dev)
mutex_lock(&probe_disc_mt);
- if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
+ if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
netdev_err(dev, "most_start_channel() failed\n");
ret = -EBUSY;
goto unlock;
}
- if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
+ if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
netdev_err(dev, "most_start_channel() failed\n");
- most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+ most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
ret = -EBUSY;
goto unlock;
}
@@ -218,8 +214,8 @@ static int most_nd_stop(struct net_device *dev)
netif_stop_queue(dev);
if (nd->iface->request_netinfo)
nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
- most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
- most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+ most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
+ most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
return 0;
}
@@ -231,7 +227,7 @@ static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
struct mbo *mbo;
int ret;
- mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
+ mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
if (!mbo) {
netif_stop_queue(dev);
@@ -296,9 +292,8 @@ static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
return nd;
}
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg,
- struct kobject *parent, char *name)
+static int comp_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg, char *name)
{
struct net_dev_context *nd;
struct net_dev_channel *ch;
@@ -353,8 +348,8 @@ unlock:
return ret;
}
-static int aim_disconnect_channel(struct most_interface *iface,
- int channel_idx)
+static int comp_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
{
struct net_dev_context *nd;
struct net_dev_channel *ch;
@@ -400,8 +395,8 @@ unlock:
return ret;
}
-static int aim_resume_tx_channel(struct most_interface *iface,
- int channel_idx)
+static int comp_resume_tx_channel(struct most_interface *iface,
+ int channel_idx)
{
struct net_dev_context *nd;
@@ -419,7 +414,7 @@ put_nd:
return 0;
}
-static int aim_rx_data(struct mbo *mbo)
+static int comp_rx_data(struct mbo *mbo)
{
const u32 zero = 0;
struct net_dev_context *nd;
@@ -442,7 +437,7 @@ static int aim_rx_data(struct mbo *mbo)
dev = nd->dev;
if (nd->is_mamac) {
- if (!PMS_IS_MAMAC(buf, len)) {
+ if (!pms_is_mamac(buf, len)) {
ret = -EIO;
goto put_nd;
}
@@ -501,24 +496,24 @@ put_nd:
return ret;
}
-static struct most_aim aim = {
- .name = "networking",
- .probe_channel = aim_probe_channel,
- .disconnect_channel = aim_disconnect_channel,
- .tx_completion = aim_resume_tx_channel,
- .rx_completion = aim_rx_data,
+static struct core_component comp = {
+ .name = "net",
+ .probe_channel = comp_probe_channel,
+ .disconnect_channel = comp_disconnect_channel,
+ .tx_completion = comp_resume_tx_channel,
+ .rx_completion = comp_rx_data,
};
static int __init most_net_init(void)
{
spin_lock_init(&list_lock);
mutex_init(&probe_disc_mt);
- return most_register_aim(&aim);
+ return most_register_component(&comp);
}
static void __exit most_net_exit(void)
{
- most_deregister_aim(&aim);
+ most_deregister_component(&comp);
}
/**
@@ -564,4 +559,4 @@ module_init(most_net_init);
module_exit(most_net_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
-MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
+MODULE_DESCRIPTION("Networking Component Module for Mostcore");
diff --git a/drivers/staging/most/aim-sound/Kconfig b/drivers/staging/most/sound/Kconfig
index 3194c219ff14..115262a58a42 100644
--- a/drivers/staging/most/aim-sound/Kconfig
+++ b/drivers/staging/most/sound/Kconfig
@@ -2,12 +2,12 @@
# MOST ALSA configuration
#
-config AIM_SOUND
- tristate "ALSA AIM"
+config MOST_SOUND
+ tristate "Sound"
depends on SND
select SND_PCM
---help---
Say Y here if you want to commumicate via ALSA/sound devices.
To compile this driver as a module, choose M here: the
- module will be called aim_sound.
+ module will be called most_sound.
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
new file mode 100644
index 000000000000..eee8774e38cb
--- /dev/null
+++ b/drivers/staging/most/sound/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_SOUND) += most_sound.o
+
+most_sound-objs := sound.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/sound/sound.c
index ea1366a44008..83cec21c85b8 100644
--- a/drivers/staging/most/aim-sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * sound.c - Audio Application Interface Module for Mostcore
+ * sound.c - Sound component for Mostcore
*
* Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -22,12 +16,12 @@
#include <sound/pcm_params.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <mostcore.h>
+#include <most/core.h>
#define DRIVER_NAME "sound"
static struct list_head dev_list;
-static struct most_aim audio_aim;
+static struct core_component comp;
/**
* struct channel - private structure to keep channel specific data
@@ -240,7 +234,7 @@ static int playback_thread(void *data)
kthread_should_stop() ||
(channel->is_stream_running &&
(mbo = most_get_mbo(channel->iface, channel->id,
- &audio_aim))));
+ &comp))));
if (!mbo)
continue;
@@ -283,7 +277,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
}
}
- if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
+ if (most_start_channel(channel->iface, channel->id, &comp)) {
pr_err("most_start_channel() failed!\n");
if (cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
@@ -310,7 +304,7 @@ static int pcm_close(struct snd_pcm_substream *substream)
if (channel->cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
- most_stop_channel(channel->iface, channel->id, &audio_aim);
+ most_stop_channel(channel->iface, channel->id, &comp);
return 0;
}
@@ -545,7 +539,6 @@ error:
* @iface: pointer to interface instance
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
* @arg_list: string that provides the name of the device to be created in /dev
* plus the desired audio resolution
*
@@ -555,7 +548,7 @@ error:
*/
static int audio_probe_channel(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg,
- struct kobject *parent, char *arg_list)
+ char *arg_list)
{
struct channel *channel;
struct snd_card *card;
@@ -724,9 +717,9 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
}
/**
- * Initialization of the struct most_aim
+ * Initialization of the struct core_component
*/
-static struct most_aim audio_aim = {
+static struct core_component comp = {
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
.disconnect_channel = audio_disconnect_channel,
@@ -740,7 +733,7 @@ static int __init audio_init(void)
INIT_LIST_HEAD(&dev_list);
- return most_register_aim(&audio_aim);
+ return most_register_component(&comp);
}
static void __exit audio_exit(void)
@@ -754,7 +747,7 @@ static void __exit audio_exit(void)
snd_card_free(channel->card);
}
- most_deregister_aim(&audio_aim);
+ most_deregister_component(&comp);
}
module_init(audio_init);
@@ -762,4 +755,4 @@ module_exit(audio_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
-MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
+MODULE_DESCRIPTION("Sound Component Module for Mostcore");
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/usb/Kconfig
index 487f1f34776c..ebbdb573a9a6 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/usb/Kconfig
@@ -2,13 +2,12 @@
# MOST USB configuration
#
-config HDM_USB
- tristate "USB HDM"
+config MOST_USB
+ tristate "USB"
depends on USB && NET
-
---help---
Say Y here if you want to connect via USB to network tranceiver.
This device driver depends on the networking AIM.
To compile this driver as a module, choose M here: the
- module will be called hdm_usb.
+ module will be called most_usb.
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
new file mode 100644
index 000000000000..18d28cba4fbf
--- /dev/null
+++ b/drivers/staging/most/usb/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_USB) += most_usb.o
+
+most_usb-objs := usb.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/usb/usb.c
index 667dacac81f0..31f184cfcd69 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * hdm_usb.c - Hardware dependent module for USB
+ * usb.c - Hardware dependent module for USB
*
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -29,7 +23,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
-#include "mostcore.h"
+#include "most/core.h"
#define USB_MTU 512
#define NO_ISOCHRONOUS_URB 0
@@ -70,12 +64,12 @@
* @reg_addr: register address for arbitrary DCI access
*/
struct most_dci_obj {
- struct kobject kobj;
+ struct device dev;
struct usb_device *usb_device;
u16 reg_addr;
};
-#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+#define to_dci_obj(p) container_of(p, struct most_dci_obj, dev)
struct most_dev;
@@ -90,7 +84,6 @@ struct clear_hold_work {
/**
* struct most_dev - holds all usb interface specific stuff
- * @parent: parent object in sysfs
* @usb_device: pointer to usb device
* @iface: hardware interface
* @cap: channel capabilities
@@ -108,7 +101,6 @@ struct clear_hold_work {
* @poll_work_obj: work for polling link status
*/
struct most_dev {
- struct kobject *parent;
struct usb_device *usb_device;
struct most_interface iface;
struct most_channel_capability *cap;
@@ -125,8 +117,8 @@ struct most_dev {
struct mutex io_mutex;
struct timer_list link_stat_timer;
struct work_struct poll_work_obj;
- void (*on_netinfo)(struct most_interface *, unsigned char,
- unsigned char *);
+ void (*on_netinfo)(struct most_interface *most_iface,
+ unsigned char link_state, unsigned char *addrs);
};
#define to_mdev(d) container_of(d, struct most_dev, iface)
@@ -817,6 +809,21 @@ static void wq_clear_halt(struct work_struct *wq_obj)
if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
+ /* If the functional Stall condition has been set on an
+ * asynchronous rx channel, we need to clear the tx channel
+ * too, since the hardware runs its clean-up sequence on both
+ * channels, as they are physically one on the network.
+ *
+ * The USB interface that exposes the asynchronous channels
+ * contains always two endpoints, and two only.
+ */
+ if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
+ mdev->conf[channel].direction == MOST_CH_RX) {
+ int peer = 1 - channel;
+ int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
+ mdev->ep_address[peer]);
+ usb_clear_halt(mdev->usb_device, snd_pipe);
+ }
mdev->is_channel_healthy[channel] = true;
most_resume_enqueue(&mdev->iface, channel);
mutex_unlock(&mdev->io_mutex);
@@ -840,94 +847,6 @@ static const struct usb_device_id usbid[] = {
{ } /* Terminating entry */
};
-#define MOST_DCI_RO_ATTR(_name) \
- struct most_dci_attribute most_dci_attr_##_name = \
- __ATTR(_name, 0444, show_value, NULL)
-
-#define MOST_DCI_ATTR(_name) \
- struct most_dci_attribute most_dci_attr_##_name = \
- __ATTR(_name, 0644, show_value, store_value)
-
-#define MOST_DCI_WO_ATTR(_name) \
- struct most_dci_attribute most_dci_attr_##_name = \
- __ATTR(_name, 0200, NULL, store_value)
-
-/**
- * struct most_dci_attribute - to access the attributes of a dci object
- * @attr: attributes of a dci object
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_dci_attribute {
- struct attribute attr;
- ssize_t (*show)(struct most_dci_obj *d,
- struct most_dci_attribute *attr,
- char *buf);
- ssize_t (*store)(struct most_dci_obj *d,
- struct most_dci_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
-
-/**
- * dci_attr_show - show function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct most_dci_attribute *dci_attr = to_dci_attr(attr);
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- if (!dci_attr->show)
- return -EIO;
-
- return dci_attr->show(dci_obj, dci_attr, buf);
-}
-
-/**
- * dci_attr_store - store function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t dci_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct most_dci_attribute *dci_attr = to_dci_attr(attr);
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- if (!dci_attr->store)
- return -EIO;
-
- return dci_attr->store(dci_obj, dci_attr, buf, len);
-}
-
-static const struct sysfs_ops most_dci_sysfs_ops = {
- .show = dci_attr_show,
- .store = dci_attr_store,
-};
-
-/**
- * most_dci_release - release function for dci object
- * @kobj: pointer to kobject
- *
- * This frees the memory allocated for the dci object
- */
-static void most_dci_release(struct kobject *kobj)
-{
- struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
- kfree(dci_obj);
-}
-
struct regs {
const char *name;
u16 reg;
@@ -968,10 +887,11 @@ static int get_stat_reg_addr(const struct regs *regs, int size,
#define get_static_reg_addr(regs, name, reg_addr) \
get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
-static ssize_t show_value(struct most_dci_obj *dci_obj,
- struct most_dci_attribute *attr, char *buf)
+static ssize_t value_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
const char *name = attr->attr.name;
+ struct most_dci_obj *dci_obj = to_dci_obj(dev);
u16 val;
u16 reg_addr;
int err;
@@ -992,13 +912,13 @@ static ssize_t show_value(struct most_dci_obj *dci_obj,
return snprintf(buf, PAGE_SIZE, "%04x\n", val);
}
-static ssize_t store_value(struct most_dci_obj *dci_obj,
- struct most_dci_attribute *attr,
+static ssize_t value_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u16 val;
u16 reg_addr;
const char *name = attr->attr.name;
+ struct most_dci_obj *dci_obj = to_dci_obj(dev);
struct usb_device *usb_dev = dci_obj->usb_device;
int err = kstrtou16(buf, 16, &val);
@@ -1025,86 +945,49 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
return count;
}
-static MOST_DCI_RO_ATTR(ni_state);
-static MOST_DCI_RO_ATTR(packet_bandwidth);
-static MOST_DCI_RO_ATTR(node_address);
-static MOST_DCI_RO_ATTR(node_position);
-static MOST_DCI_WO_ATTR(sync_ep);
-static MOST_DCI_ATTR(mep_filter);
-static MOST_DCI_ATTR(mep_hash0);
-static MOST_DCI_ATTR(mep_hash1);
-static MOST_DCI_ATTR(mep_hash2);
-static MOST_DCI_ATTR(mep_hash3);
-static MOST_DCI_ATTR(mep_eui48_hi);
-static MOST_DCI_ATTR(mep_eui48_mi);
-static MOST_DCI_ATTR(mep_eui48_lo);
-static MOST_DCI_ATTR(arb_address);
-static MOST_DCI_ATTR(arb_value);
-
-/**
- * most_dci_def_attrs - array of default attribute files of the dci object
- */
-static struct attribute *most_dci_def_attrs[] = {
- &most_dci_attr_ni_state.attr,
- &most_dci_attr_packet_bandwidth.attr,
- &most_dci_attr_node_address.attr,
- &most_dci_attr_node_position.attr,
- &most_dci_attr_sync_ep.attr,
- &most_dci_attr_mep_filter.attr,
- &most_dci_attr_mep_hash0.attr,
- &most_dci_attr_mep_hash1.attr,
- &most_dci_attr_mep_hash2.attr,
- &most_dci_attr_mep_hash3.attr,
- &most_dci_attr_mep_eui48_hi.attr,
- &most_dci_attr_mep_eui48_mi.attr,
- &most_dci_attr_mep_eui48_lo.attr,
- &most_dci_attr_arb_address.attr,
- &most_dci_attr_arb_value.attr,
+static DEVICE_ATTR(ni_state, 0444, value_show, NULL);
+static DEVICE_ATTR(packet_bandwidth, 0444, value_show, NULL);
+static DEVICE_ATTR(node_address, 0444, value_show, NULL);
+static DEVICE_ATTR(node_position, 0444, value_show, NULL);
+static DEVICE_ATTR(sync_ep, 0200, NULL, value_store);
+static DEVICE_ATTR(mep_filter, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_hash0, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_hash1, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_hash2, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_hash3, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_eui48_hi, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_eui48_mi, 0644, value_show, value_store);
+static DEVICE_ATTR(mep_eui48_lo, 0644, value_show, value_store);
+static DEVICE_ATTR(arb_address, 0644, value_show, value_store);
+static DEVICE_ATTR(arb_value, 0644, value_show, value_store);
+
+static struct attribute *dci_attrs[] = {
+ &dev_attr_ni_state.attr,
+ &dev_attr_packet_bandwidth.attr,
+ &dev_attr_node_address.attr,
+ &dev_attr_node_position.attr,
+ &dev_attr_sync_ep.attr,
+ &dev_attr_mep_filter.attr,
+ &dev_attr_mep_hash0.attr,
+ &dev_attr_mep_hash1.attr,
+ &dev_attr_mep_hash2.attr,
+ &dev_attr_mep_hash3.attr,
+ &dev_attr_mep_eui48_hi.attr,
+ &dev_attr_mep_eui48_mi.attr,
+ &dev_attr_mep_eui48_lo.attr,
+ &dev_attr_arb_address.attr,
+ &dev_attr_arb_value.attr,
NULL,
};
-/**
- * DCI ktype
- */
-static struct kobj_type most_dci_ktype = {
- .sysfs_ops = &most_dci_sysfs_ops,
- .release = most_dci_release,
- .default_attrs = most_dci_def_attrs,
+static struct attribute_group dci_attr_group = {
+ .attrs = dci_attrs,
};
-/**
- * create_most_dci_obj - allocates a dci object
- * @parent: parent kobject
- *
- * This creates a dci object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct
-most_dci_obj *create_most_dci_obj(struct kobject *parent)
-{
- struct most_dci_obj *most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
- int retval;
-
- if (!most_dci)
- return NULL;
-
- retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
- "dci");
- if (retval) {
- kobject_put(&most_dci->kobj);
- return NULL;
- }
- return most_dci;
-}
-
-/**
- * destroy_most_dci_obj - DCI object release function
- * @p: pointer to dci object
- */
-static void destroy_most_dci_obj(struct most_dci_obj *p)
-{
- kobject_put(&p->kobj);
-}
+static const struct attribute_group *dci_attr_groups[] = {
+ &dci_attr_group,
+ NULL,
+};
/**
* hdm_probe - probe function of USB device driver
@@ -1168,8 +1051,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
goto exit_free1;
mdev->iface.channel_vector = mdev->cap;
- mdev->iface.priv = NULL;
-
mdev->ep_address =
kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
if (!mdev->ep_address)
@@ -1217,20 +1098,15 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
usb_dev->config->desc.bConfigurationValue,
usb_iface_desc->desc.bInterfaceNumber);
- mdev->parent = most_register_interface(&mdev->iface);
- if (IS_ERR(mdev->parent)) {
- ret = PTR_ERR(mdev->parent);
+ ret = most_register_interface(&mdev->iface);
+ if (ret)
goto exit_free4;
- }
mutex_lock(&mdev->io_mutex);
if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
- /* this increments the reference count of the instance
- * object of the core
- */
- mdev->dci = create_most_dci_obj(mdev->parent);
+ mdev->dci = kzalloc(sizeof(*mdev->dci), GFP_KERNEL);
if (!mdev->dci) {
mutex_unlock(&mdev->io_mutex);
most_deregister_interface(&mdev->iface);
@@ -1238,12 +1114,21 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
goto exit_free4;
}
- kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
+ mdev->dci->dev.init_name = "dci";
+ mdev->dci->dev.parent = &mdev->iface.dev;
+ mdev->dci->dev.groups = dci_attr_groups;
+ if (device_register(&mdev->dci->dev)) {
+ mutex_unlock(&mdev->io_mutex);
+ most_deregister_interface(&mdev->iface);
+ ret = -ENOMEM;
+ goto exit_free5;
+ }
mdev->dci->usb_device = mdev->usb_device;
}
mutex_unlock(&mdev->io_mutex);
return 0;
-
+exit_free5:
+ kfree(mdev->dci);
exit_free4:
kfree(mdev->busy_urbs);
exit_free3:
@@ -1283,7 +1168,8 @@ static void hdm_disconnect(struct usb_interface *interface)
del_timer_sync(&mdev->link_stat_timer);
cancel_work_sync(&mdev->poll_work_obj);
- destroy_most_dci_obj(mdev->dci);
+ device_unregister(&mdev->dci->dev);
+ kfree(mdev->dci);
most_deregister_interface(&mdev->iface);
kfree(mdev->busy_urbs);
diff --git a/drivers/staging/most/aim-v4l2/Kconfig b/drivers/staging/most/video/Kconfig
index d70eaaf0936c..ce6af4f951a6 100644
--- a/drivers/staging/most/aim-v4l2/Kconfig
+++ b/drivers/staging/most/video/Kconfig
@@ -2,11 +2,11 @@
# MOST V4L2 configuration
#
-config AIM_V4L2
- tristate "V4L2 AIM"
+config MOST_VIDEO
+ tristate "Video"
depends on VIDEO_V4L2
---help---
Say Y here if you want to commumicate via Video 4 Linux.
To compile this driver as a module, choose M here: the
- module will be called aim_v4l2. \ No newline at end of file
+ module will be called most_video.
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
new file mode 100644
index 000000000000..1c8e520e02a2
--- /dev/null
+++ b/drivers/staging/most/video/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_VIDEO) += most_video.o
+
+most_video-objs := video.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/video/video.c
index e0748416aee5..ef23e8524b1e 100644
--- a/drivers/staging/most/aim-v4l2/video.c
+++ b/drivers/staging/most/video/video.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * V4L2 AIM - V4L2 Application Interface Module for MostCore
+ * video.c - V4L2 component for Mostcore
*
* Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -27,11 +21,11 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-#include "mostcore.h"
+#include "most/core.h"
-#define V4L2_AIM_MAX_INPUT 1
+#define V4L2_CMP_MAX_INPUT 1
-static struct most_aim aim_info;
+static struct core_component comp;
struct most_video_dev {
struct most_interface *iface;
@@ -52,7 +46,7 @@ struct most_video_dev {
wait_queue_head_t wait_data;
};
-struct aim_fh {
+struct comp_fh {
/* must be the first field of this struct! */
struct v4l2_fh fh;
struct most_video_dev *mdev;
@@ -72,14 +66,14 @@ static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
return list_first_entry(&mdev->pending_mbos, struct mbo, list);
}
-static int aim_vdev_open(struct file *filp)
+static int comp_vdev_open(struct file *filp)
{
int ret;
struct video_device *vdev = video_devdata(filp);
struct most_video_dev *mdev = video_drvdata(filp);
- struct aim_fh *fh;
+ struct comp_fh *fh;
- v4l2_info(&mdev->v4l2_dev, "aim_vdev_open()\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_vdev_open()\n");
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
@@ -104,7 +98,7 @@ static int aim_vdev_open(struct file *filp)
v4l2_fh_add(&fh->fh);
- ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
+ ret = most_start_channel(mdev->iface, mdev->ch_idx, &comp);
if (ret) {
v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
goto err_rm;
@@ -122,13 +116,13 @@ err_dec:
return ret;
}
-static int aim_vdev_close(struct file *filp)
+static int comp_vdev_close(struct file *filp)
{
- struct aim_fh *fh = filp->private_data;
+ struct comp_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
struct mbo *mbo, *tmp;
- v4l2_info(&mdev->v4l2_dev, "aim_vdev_close()\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_vdev_close()\n");
/*
* We need to put MBOs back before we call most_stop_channel()
@@ -148,7 +142,7 @@ static int aim_vdev_close(struct file *filp)
spin_lock_irq(&mdev->list_lock);
}
spin_unlock_irq(&mdev->list_lock);
- most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
+ most_stop_channel(mdev->iface, mdev->ch_idx, &comp);
mdev->mute = false;
v4l2_fh_del(&fh->fh);
@@ -159,10 +153,10 @@ static int aim_vdev_close(struct file *filp)
return 0;
}
-static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t comp_vdev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
{
- struct aim_fh *fh = filp->private_data;
+ struct comp_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
int ret = 0;
@@ -209,11 +203,11 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
return ret;
}
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+static __poll_t comp_vdev_poll(struct file *filp, poll_table *wait)
{
- struct aim_fh *fh = filp->private_data;
+ struct comp_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* only wait if no data is available */
if (!data_ready(mdev))
@@ -224,7 +218,7 @@ static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
return mask;
}
-static void aim_set_format_struct(struct v4l2_format *f)
+static void comp_set_format_struct(struct v4l2_format *f)
{
f->fmt.pix.width = 8;
f->fmt.pix.height = 8;
@@ -236,8 +230,8 @@ static void aim_set_format_struct(struct v4l2_format *f)
f->fmt.pix.priv = 0;
}
-static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
- struct v4l2_format *format)
+static int comp_set_format(struct most_video_dev *mdev, unsigned int cmd,
+ struct v4l2_format *format)
{
if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
return -EINVAL;
@@ -245,7 +239,7 @@ static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
if (cmd == VIDIOC_TRY_FMT)
return 0;
- aim_set_format_struct(format);
+ comp_set_format_struct(format);
return 0;
}
@@ -253,12 +247,12 @@ static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
- strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
+ strlcpy(cap->driver, "v4l2_component", sizeof(cap->driver));
strlcpy(cap->card, "MOST", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"%s", mdev->iface->description);
@@ -273,7 +267,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
@@ -292,36 +286,36 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
- aim_set_format_struct(f);
+ comp_set_format_struct(f);
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
+ return comp_set_format(mdev, VIDIOC_TRY_FMT, f);
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- return aim_set_format(mdev, VIDIOC_S_FMT, f);
+ return comp_set_format(mdev, VIDIOC_S_FMT, f);
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
@@ -333,10 +327,10 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- if (input->index >= V4L2_AIM_MAX_INPUT)
+ if (input->index >= V4L2_CMP_MAX_INPUT)
return -EINVAL;
strcpy(input->name, "MOST Video");
@@ -350,7 +344,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
*i = mdev->ctrl_input;
return 0;
@@ -358,23 +352,23 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
{
- struct aim_fh *fh = priv;
+ struct comp_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
- if (index >= V4L2_AIM_MAX_INPUT)
+ if (index >= V4L2_CMP_MAX_INPUT)
return -EINVAL;
mdev->ctrl_input = index;
return 0;
}
-static const struct v4l2_file_operations aim_fops = {
+static const struct v4l2_file_operations comp_fops = {
.owner = THIS_MODULE,
- .open = aim_vdev_open,
- .release = aim_vdev_close,
- .read = aim_vdev_read,
- .poll = aim_vdev_poll,
+ .open = comp_vdev_open,
+ .release = comp_vdev_close,
+ .read = comp_vdev_read,
+ .poll = comp_vdev_poll,
.unlocked_ioctl = video_ioctl2,
};
@@ -390,8 +384,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_input = vidioc_s_input,
};
-static const struct video_device aim_videodev_template = {
- .fops = &aim_fops,
+static const struct video_device comp_videodev_template = {
+ .fops = &comp_fops,
.release = video_device_release,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_UNKNOWN,
@@ -399,7 +393,7 @@ static const struct video_device aim_videodev_template = {
/**************************************************************************/
-static struct most_video_dev *get_aim_dev(
+static struct most_video_dev *get_comp_dev(
struct most_interface *iface, int channel_idx)
{
struct most_video_dev *mdev;
@@ -416,11 +410,11 @@ static struct most_video_dev *get_aim_dev(
return NULL;
}
-static int aim_rx_data(struct mbo *mbo)
+static int comp_rx_data(struct mbo *mbo)
{
unsigned long flags;
struct most_video_dev *mdev =
- get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
+ get_comp_dev(mbo->ifp, mbo->hdm_channel_id);
if (!mdev)
return -EIO;
@@ -437,11 +431,11 @@ static int aim_rx_data(struct mbo *mbo)
return 0;
}
-static int aim_register_videodev(struct most_video_dev *mdev)
+static int comp_register_videodev(struct most_video_dev *mdev)
{
int ret;
- v4l2_info(&mdev->v4l2_dev, "aim_register_videodev()\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_register_videodev()\n");
init_waitqueue_head(&mdev->wait_data);
@@ -451,7 +445,7 @@ static int aim_register_videodev(struct most_video_dev *mdev)
return -ENOMEM;
/* Fill the video capture device struct */
- *mdev->vdev = aim_videodev_template;
+ *mdev->vdev = comp_videodev_template;
mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
mdev->vdev->lock = &mdev->lock;
snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
@@ -469,14 +463,14 @@ static int aim_register_videodev(struct most_video_dev *mdev)
return ret;
}
-static void aim_unregister_videodev(struct most_video_dev *mdev)
+static void comp_unregister_videodev(struct most_video_dev *mdev)
{
- v4l2_info(&mdev->v4l2_dev, "aim_unregister_videodev()\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_unregister_videodev()\n");
video_unregister_device(mdev->vdev);
}
-static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+static void comp_v4l2_dev_release(struct v4l2_device *v4l2_dev)
{
struct most_video_dev *mdev =
container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
@@ -485,14 +479,13 @@ static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
kfree(mdev);
}
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
- struct most_channel_config *ccfg,
- struct kobject *parent, char *name)
+static int comp_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg, char *name)
{
int ret;
- struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
- pr_info("aim_probe_channel(%s)\n", name);
+ pr_info("comp_probe_channel(%s)\n", name);
if (mdev) {
pr_err("channel already linked\n");
@@ -520,7 +513,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
INIT_LIST_HEAD(&mdev->pending_mbos);
mdev->iface = iface;
mdev->ch_idx = channel_idx;
- mdev->v4l2_dev.release = aim_v4l2_dev_release;
+ mdev->v4l2_dev.release = comp_v4l2_dev_release;
/* Create the v4l2_device */
strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
@@ -531,14 +524,14 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
return ret;
}
- ret = aim_register_videodev(mdev);
+ ret = comp_register_videodev(mdev);
if (ret)
goto err_unreg;
spin_lock_irq(&list_lock);
list_add(&mdev->list, &video_devices);
spin_unlock_irq(&list_lock);
- v4l2_info(&mdev->v4l2_dev, "aim_probe_channel() done\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_probe_channel() done\n");
return 0;
err_unreg:
@@ -547,48 +540,48 @@ err_unreg:
return ret;
}
-static int aim_disconnect_channel(struct most_interface *iface,
- int channel_idx)
+static int comp_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
{
- struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
if (!mdev) {
pr_err("no such channel is linked\n");
return -ENOENT;
}
- v4l2_info(&mdev->v4l2_dev, "aim_disconnect_channel()\n");
+ v4l2_info(&mdev->v4l2_dev, "comp_disconnect_channel()\n");
spin_lock_irq(&list_lock);
list_del(&mdev->list);
spin_unlock_irq(&list_lock);
- aim_unregister_videodev(mdev);
+ comp_unregister_videodev(mdev);
v4l2_device_disconnect(&mdev->v4l2_dev);
v4l2_device_put(&mdev->v4l2_dev);
return 0;
}
-static struct most_aim aim_info = {
- .name = "v4l",
- .probe_channel = aim_probe_channel,
- .disconnect_channel = aim_disconnect_channel,
- .rx_completion = aim_rx_data,
+static struct core_component comp_info = {
+ .name = "video",
+ .probe_channel = comp_probe_channel,
+ .disconnect_channel = comp_disconnect_channel,
+ .rx_completion = comp_rx_data,
};
-static int __init aim_init(void)
+static int __init comp_init(void)
{
spin_lock_init(&list_lock);
- return most_register_aim(&aim_info);
+ return most_register_component(&comp);
}
-static void __exit aim_exit(void)
+static void __exit comp_exit(void)
{
struct most_video_dev *mdev, *tmp;
/*
* As the mostcore currently doesn't call disconnect_channel()
- * for linked channels while we call most_deregister_aim()
+ * for linked channels while we call most_deregister_component()
* we simulate this call here.
* This must be fixed in core.
*/
@@ -597,20 +590,20 @@ static void __exit aim_exit(void)
list_del(&mdev->list);
spin_unlock_irq(&list_lock);
- aim_unregister_videodev(mdev);
+ comp_unregister_videodev(mdev);
v4l2_device_disconnect(&mdev->v4l2_dev);
v4l2_device_put(&mdev->v4l2_dev);
spin_lock_irq(&list_lock);
}
spin_unlock_irq(&list_lock);
- most_deregister_aim(&aim_info);
+ most_deregister_component(&comp_info);
BUG_ON(!list_empty(&video_devices));
}
-module_init(aim_init);
-module_exit(aim_exit);
+module_init(comp_init);
+module_exit(comp_exit);
-MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
+MODULE_DESCRIPTION("V4L2 Component Module for Mostcore");
MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c594b12..264ad362d858 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
+ return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
enable_read_hw_ecc = 1;
- chip->read_buf(mtd, p, eccsize * eccsteps);
+ nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/fs/ncpfs/Kconfig b/drivers/staging/ncpfs/Kconfig
index c931cf22a1f6..c931cf22a1f6 100644
--- a/fs/ncpfs/Kconfig
+++ b/drivers/staging/ncpfs/Kconfig
diff --git a/fs/ncpfs/Makefile b/drivers/staging/ncpfs/Makefile
index 66fe5f878817..66fe5f878817 100644
--- a/fs/ncpfs/Makefile
+++ b/drivers/staging/ncpfs/Makefile
diff --git a/drivers/staging/ncpfs/TODO b/drivers/staging/ncpfs/TODO
new file mode 100644
index 000000000000..9b6d38b7e248
--- /dev/null
+++ b/drivers/staging/ncpfs/TODO
@@ -0,0 +1,4 @@
+The ncpfs code will be removed soon from the kernel tree as it is old and
+obsolete and broken.
+
+Don't worry about fixing up anything here, it's not needed.
diff --git a/fs/ncpfs/dir.c b/drivers/staging/ncpfs/dir.c
index 0c57c5c5d40a..0c57c5c5d40a 100644
--- a/fs/ncpfs/dir.c
+++ b/drivers/staging/ncpfs/dir.c
diff --git a/fs/ncpfs/file.c b/drivers/staging/ncpfs/file.c
index 8f8cc0334ddd..8f8cc0334ddd 100644
--- a/fs/ncpfs/file.c
+++ b/drivers/staging/ncpfs/file.c
diff --git a/fs/ncpfs/getopt.c b/drivers/staging/ncpfs/getopt.c
index 5c941bef14c4..5c941bef14c4 100644
--- a/fs/ncpfs/getopt.c
+++ b/drivers/staging/ncpfs/getopt.c
diff --git a/fs/ncpfs/getopt.h b/drivers/staging/ncpfs/getopt.h
index 30f0da317670..30f0da317670 100644
--- a/fs/ncpfs/getopt.h
+++ b/drivers/staging/ncpfs/getopt.h
diff --git a/fs/ncpfs/inode.c b/drivers/staging/ncpfs/inode.c
index 41de88cdc053..bb411610a071 100644
--- a/fs/ncpfs/inode.c
+++ b/drivers/staging/ncpfs/inode.c
@@ -53,7 +53,8 @@ static struct kmem_cache * ncp_inode_cachep;
static struct inode *ncp_alloc_inode(struct super_block *sb)
{
struct ncp_inode_info *ei;
- ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
+
+ ei = kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
diff --git a/fs/ncpfs/ioctl.c b/drivers/staging/ncpfs/ioctl.c
index d378b98cd7b6..d378b98cd7b6 100644
--- a/fs/ncpfs/ioctl.c
+++ b/drivers/staging/ncpfs/ioctl.c
diff --git a/fs/ncpfs/mmap.c b/drivers/staging/ncpfs/mmap.c
index a5c5cf2ff007..a5c5cf2ff007 100644
--- a/fs/ncpfs/mmap.c
+++ b/drivers/staging/ncpfs/mmap.c
diff --git a/fs/ncpfs/ncp_fs.h b/drivers/staging/ncpfs/ncp_fs.h
index bdd262b6c198..bdd262b6c198 100644
--- a/fs/ncpfs/ncp_fs.h
+++ b/drivers/staging/ncpfs/ncp_fs.h
diff --git a/fs/ncpfs/ncp_fs_i.h b/drivers/staging/ncpfs/ncp_fs_i.h
index 3432bafb53a5..3432bafb53a5 100644
--- a/fs/ncpfs/ncp_fs_i.h
+++ b/drivers/staging/ncpfs/ncp_fs_i.h
diff --git a/fs/ncpfs/ncp_fs_sb.h b/drivers/staging/ncpfs/ncp_fs_sb.h
index f06cde4adf71..f06cde4adf71 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/drivers/staging/ncpfs/ncp_fs_sb.h
diff --git a/fs/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c
index 804adfebba2f..804adfebba2f 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/drivers/staging/ncpfs/ncplib_kernel.c
diff --git a/fs/ncpfs/ncplib_kernel.h b/drivers/staging/ncpfs/ncplib_kernel.h
index aaae8aa9bf7d..aaae8aa9bf7d 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/drivers/staging/ncpfs/ncplib_kernel.h
diff --git a/fs/ncpfs/ncpsign_kernel.c b/drivers/staging/ncpfs/ncpsign_kernel.c
index 8085b1a3ba47..8085b1a3ba47 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/drivers/staging/ncpfs/ncpsign_kernel.c
diff --git a/fs/ncpfs/ncpsign_kernel.h b/drivers/staging/ncpfs/ncpsign_kernel.h
index 57ff0a0650b8..57ff0a0650b8 100644
--- a/fs/ncpfs/ncpsign_kernel.h
+++ b/drivers/staging/ncpfs/ncpsign_kernel.h
diff --git a/fs/ncpfs/sock.c b/drivers/staging/ncpfs/sock.c
index efb176b1751a..4c13174d85b7 100644
--- a/fs/ncpfs/sock.c
+++ b/drivers/staging/ncpfs/sock.c
@@ -39,7 +39,8 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
{
struct msghdr msg = {NULL, };
struct kvec iov = {buf, size};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, flags);
}
static int _send(struct socket *sock, const void *buff, int len)
diff --git a/fs/ncpfs/symlink.c b/drivers/staging/ncpfs/symlink.c
index b6e16da4837a..b6e16da4837a 100644
--- a/fs/ncpfs/symlink.c
+++ b/drivers/staging/ncpfs/symlink.c
diff --git a/drivers/staging/nvec/nvec-keytable.h b/drivers/staging/nvec/nvec-keytable.h
index 7008c96bdbbe..ac58e87e6a4e 100644
--- a/drivers/staging/nvec/nvec-keytable.h
+++ b/drivers/staging/nvec/nvec-keytable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/input/keyboard/tegra-nvec.c
*
@@ -5,19 +6,6 @@
* embedded controller
*
* Copyright (c) 2009, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see http://www.gnu.org/licenses
*/
static unsigned short code_tab_102us[] = {
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 4ff8f47385da..52054a528723 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NVEC: NVIDIA compliant embedded controller interface
*
@@ -7,11 +8,6 @@
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
* Julian Andres Klode <jak@jak-linux.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index aa7c70ef94f5..25efcdfa4f20 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NVEC: NVIDIA compliant embedded controller interface
*
@@ -7,11 +8,6 @@
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
* Julian Andres Klode <jak@jak-linux.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#ifndef __LINUX_MFD_NVEC
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index a01f486621eb..01dbb66f7e9a 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_kbd: keyboard driver for a NVIDIA compliant embedded controller
*
@@ -5,11 +6,6 @@
*
* Authors: Pierre-Hugues Husson <phhusson@free.fr>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c
index 51dbeeb3320e..8b4da95081c8 100644
--- a/drivers/staging/nvec/nvec_paz00.c
+++ b/drivers/staging/nvec/nvec_paz00.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_paz00: OEM specific driver for Compal PAZ00 based devices
*
* Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
*
* Authors: Ilya Petrov <ilya.muromec@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 3b144a9ea055..0e861c4bfcbf 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_power: power supply driver for a NVIDIA compliant embedded controller
*
@@ -5,11 +6,6 @@
*
* Authors: Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 3b7bce3ffd19..45db29262a9c 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvec_ps2: mouse driver for a NVIDIA compliant embedded controller
*
@@ -6,11 +7,6 @@
* Authors: Pierre-Hugues Husson <phhusson@free.fr>
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index 8ca17210d917..3887cf5f1e84 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -1,6 +1,4 @@
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
+# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2005-2009 Cavium Networks
#
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 07bd2b87f6a0..1e114422993a 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 691e4a51ace4..f67f95043887 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 5ed8483fc24d..e3771d48c49b 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index d6172e4dace5..0d26c4a93ec1 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
index 62d07c426f89..692dcdb7154d 100644
--- a/drivers/staging/octeon/ethernet-mem.h
+++ b/drivers/staging/octeon/ethernet-mem.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 4e7304210bb9..c15376d33891 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1a44291318ee..5e271245273c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index 315a63d7094f..096553d8fc99 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <asm/octeon/cvmx-fau.h>
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 7424dc45ad39..a4a8f094e2b4 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/phy.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 063dcd07557b..01efdf2a2c20 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 31f35025d19e..df3441b815bb 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 84848e4c1664..78936e9b33b0 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index cb5540dc0e9d..31a82873e15c 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <asm/octeon/cvmx-pip.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 429e24adfcf5..9b15c9ed844b 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 9c3f453adaa0..4a07e7f43d12 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 82bffd911435..2744c9f0920e 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -393,7 +393,8 @@ static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
}
static ssize_t dcon_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
@@ -401,7 +402,8 @@ static ssize_t dcon_mode_show(struct device *dev,
}
static ssize_t dcon_sleep_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
@@ -409,7 +411,8 @@ static ssize_t dcon_sleep_show(struct device *dev,
}
static ssize_t dcon_freeze_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
@@ -417,7 +420,8 @@ static ssize_t dcon_freeze_show(struct device *dev,
}
static ssize_t dcon_mono_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
@@ -425,13 +429,15 @@ static ssize_t dcon_mono_show(struct device *dev,
}
static ssize_t dcon_resumeline_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", resumeline);
}
static ssize_t dcon_mono_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long enable_mono;
int rc;
@@ -446,7 +452,8 @@ static ssize_t dcon_mono_store(struct device *dev,
}
static ssize_t dcon_freeze_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
unsigned long output;
@@ -474,7 +481,8 @@ static ssize_t dcon_freeze_store(struct device *dev,
}
static ssize_t dcon_resumeline_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned short rl;
int rc;
@@ -490,7 +498,8 @@ static ssize_t dcon_resumeline_store(struct device *dev,
}
static ssize_t dcon_sleep_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
unsigned long output;
int ret;
@@ -641,7 +650,8 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Add the backlight device for the DCON */
dcon_bl_props.brightness = dcon->bl_val;
dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
- dcon, &dcon_bl_ops, &dcon_bl_props);
+ dcon, &dcon_bl_ops,
+ &dcon_bl_props);
if (IS_ERR(dcon->bl_dev)) {
dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
PTR_ERR(dcon->bl_dev));
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 8fbde5d3b4a6..fa89bb97c7b0 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -10,18 +10,18 @@
#define DCON_REG_ID 0
#define DCON_REG_MODE 1
-#define MODE_PASSTHRU (1<<0)
-#define MODE_SLEEP (1<<1)
-#define MODE_SLEEP_AUTO (1<<2)
-#define MODE_BL_ENABLE (1<<3)
-#define MODE_BLANK (1<<4)
-#define MODE_CSWIZZLE (1<<5)
-#define MODE_COL_AA (1<<6)
-#define MODE_MONO_LUMA (1<<7)
-#define MODE_SCAN_INT (1<<8)
-#define MODE_CLOCKDIV (1<<9)
-#define MODE_DEBUG (1<<14)
-#define MODE_SELFTEST (1<<15)
+#define MODE_PASSTHRU BIT(0)
+#define MODE_SLEEP BIT(1)
+#define MODE_SLEEP_AUTO BIT(2)
+#define MODE_BL_ENABLE BIT(3)
+#define MODE_BLANK BIT(4)
+#define MODE_CSWIZZLE BIT(5)
+#define MODE_COL_AA BIT(6)
+#define MODE_MONO_LUMA BIT(7)
+#define MODE_SCAN_INT BIT(8)
+#define MODE_CLOCKDIV BIT(9)
+#define MODE_DEBUG BIT(14)
+#define MODE_SELFTEST BIT(15)
#define DCON_REG_HRES 0x2
#define DCON_REG_HTOTAL 0x3
@@ -36,11 +36,11 @@
#define DCON_REG_MEM_OPT_B 0x42
/* Load Delay Locked Loop (DLL) settings for clock delay */
-#define MEM_DLL_CLOCK_DELAY (1<<0)
+#define MEM_DLL_CLOCK_DELAY BIT(0)
/* Memory controller power down function */
-#define MEM_POWER_DOWN (1<<8)
+#define MEM_POWER_DOWN BIT(8)
/* Memory controller software reset */
-#define MEM_SOFT_RESET (1<<0)
+#define MEM_SOFT_RESET BIT(0)
/* Status values */
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 0c5a10c69401..633c58ce24ee 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -69,7 +69,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
gpio_direction_input(OLPC_GPIO_DCON_IRQ);
gpio_direction_input(OLPC_GPIO_DCON_BLANK);
gpio_direction_output(OLPC_GPIO_DCON_LOAD,
- dcon->curr_src == DCON_SOURCE_CPU);
+ dcon->curr_src == DCON_SOURCE_CPU);
/* Set up the interrupt mappings */
diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
index 004b5027a934..61fad96818c2 100644
--- a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
+++ b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
@@ -3,22 +3,22 @@
/plugin/;
/ {
- compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709";
-
- fragment@0 {
- target = <&spi0>;
- __overlay__ {
- status = "okay";
-
- spidev@0{
- status = "disabled";
- };
-
- spidev@1{
- status = "disabled";
- };
- };
- };
+ compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709";
+
+ fragment@0 {
+ target = <&spi0>;
+ __overlay__ {
+ status = "okay";
+
+ spidev@0{
+ status = "disabled";
+ };
+
+ spidev@1{
+ status = "disabled";
+ };
+ };
+ };
fragment@1 {
target = <&gpio>;
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
index 245fef33d688..7d9dc2244848 100644
--- a/drivers/staging/pi433/Documentation/pi433.txt
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -69,8 +69,8 @@ PI433_IOC_WR_TX_CFG - set the transmission parameters
PI433_IOC_RD_RX_CFG - get the receiving parameters from the driver
PI433_IOC_WR_RX_CFG - set the receiving parameters
-The tx configuration is transfered via struct pi433_tx_cfg, the parameterset for transmission.
-It is devided into two sections: rf parameters and packet format.
+The tx configuration is transferred via struct pi433_tx_cfg, the parameterset for transmission.
+It is divided into two sections: rf parameters and packet format.
rf params:
frequency
@@ -110,9 +110,9 @@ rf params:
ramp12 - amp ramps up in 12us
ramp10 - amp ramps up in 10us
tx_start_condition
- fifoLevel - transmission starts, if fifo is filled to
+ fifo_level - transmission starts, if fifo is filled to
threshold level
- fifoNotEmpty - transmission starts, as soon as there is one
+ fifo_not_empty - transmission starts, as soon as there is one
byte in internal fifo
repetitions
This gives the option, to send a telegram multiple times. Default: 1
@@ -200,11 +200,11 @@ rf params:
sets the gain of the low noise amp
automatic - lna gain is determined by an agc
max - lna gain is set to maximum
- maxMinus6 - lna gain is set to 6db below max
- maxMinus12 - lna gain is set to 12db below max
- maxMinus24 - lna gain is set to 24db below max
- maxMinus36 - lna gain is set to 36db below max
- maxMinus48 - lna gain is set to 48db below max
+ max_minus_6 - lna gain is set to 6db below max
+ max_minus_12 - lna gain is set to 12db below max
+ max_minus_24 - lna gain is set to 24db below max
+ max_minus_36 - lna gain is set to 36db below max
+ max_minus_48 - lna gain is set to 48db below max
bw_mantisse
sets the bandwidth of the channel filter - part one: mantisse.
mantisse16 - mantisse is set to 16
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 2a205c6173dc..edcd7e798f99 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -48,14 +48,13 @@
#include <linux/wait.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_COMPAT
-#include <asm/compat.h>
+#include <linux/compat.h>
#endif
#include "pi433_if.h"
#include "rf69.h"
-
-#define N_PI433_MINORS (1U << MINORBITS) /*32*/ /* ... up to 256 */
+#define N_PI433_MINORS BIT(MINORBITS) /*32*/ /* ... up to 256 */
#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
#define NUM_DIO 2
@@ -79,7 +78,7 @@ struct pi433_device {
struct device *dev;
struct cdev *cdev;
struct spi_device *spi;
- unsigned users;
+ unsigned int users;
/* irq related values */
struct gpio_desc *gpiod[NUM_DIO];
@@ -121,32 +120,20 @@ struct pi433_instance {
/*-------------------------------------------------------------------------*/
-/* macro for checked access of registers of radio module */
-#define SET_CHECKED(retval) \
- if (retval < 0) \
- return retval;
-
-/*-------------------------------------------------------------------------*/
-
/* GPIO interrupt handlers */
static irqreturn_t DIO0_irq_handler(int irq, void *dev_id)
{
struct pi433_device *device = dev_id;
- if (device->irq_state[DIO0] == DIO_PacketSent)
- {
+ if (device->irq_state[DIO0] == DIO_PACKET_SENT) {
device->free_in_fifo = FIFO_SIZE;
dev_dbg(device->dev, "DIO0 irq: Packet sent\n");
wake_up_interruptible(&device->fifo_wait_queue);
- }
- else if (device->irq_state[DIO0] == DIO_Rssi_DIO0)
- {
+ } else if (device->irq_state[DIO0] == DIO_RSSI_DIO0) {
dev_dbg(device->dev, "DIO0 irq: RSSI level over threshold\n");
wake_up_interruptible(&device->rx_wait_queue);
- }
- else if (device->irq_state[DIO0] == DIO_PayloadReady)
- {
- dev_dbg(device->dev, "DIO0 irq: PayloadReady\n");
+ } else if (device->irq_state[DIO0] == DIO_PAYLOAD_READY) {
+ dev_dbg(device->dev, "DIO0 irq: Payload ready\n");
device->free_in_fifo = 0;
wake_up_interruptible(&device->fifo_wait_queue);
}
@@ -158,14 +145,13 @@ static irqreturn_t DIO1_irq_handler(int irq, void *dev_id)
{
struct pi433_device *device = dev_id;
- if (device->irq_state[DIO1] == DIO_FifoNotEmpty_DIO1)
- {
+ if (device->irq_state[DIO1] == DIO_FIFO_NOT_EMPTY_DIO1) {
device->free_in_fifo = FIFO_SIZE;
- }
- else if (device->irq_state[DIO1] == DIO_FifoLevel)
- {
- if (device->rx_active) device->free_in_fifo = FIFO_THRESHOLD - 1;
- else device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1;
+ } else if (device->irq_state[DIO1] == DIO_FIFO_LEVEL) {
+ if (device->rx_active)
+ device->free_in_fifo = FIFO_THRESHOLD - 1;
+ else
+ device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1;
}
dev_dbg(device->dev,
"DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo);
@@ -183,30 +169,56 @@ rf69_set_rx_cfg(struct pi433_device *dev, struct pi433_rx_cfg *rx_cfg)
int payload_length;
/* receiver config */
- SET_CHECKED(rf69_set_frequency (dev->spi, rx_cfg->frequency));
- SET_CHECKED(rf69_set_bit_rate (dev->spi, rx_cfg->bit_rate));
- SET_CHECKED(rf69_set_modulation (dev->spi, rx_cfg->modulation));
- SET_CHECKED(rf69_set_antenna_impedance (dev->spi, rx_cfg->antenna_impedance));
- SET_CHECKED(rf69_set_rssi_threshold (dev->spi, rx_cfg->rssi_threshold));
- SET_CHECKED(rf69_set_ook_threshold_dec (dev->spi, rx_cfg->thresholdDecrement));
- SET_CHECKED(rf69_set_bandwidth (dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent));
- SET_CHECKED(rf69_set_bandwidth_during_afc(dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent));
- SET_CHECKED(rf69_set_dagc (dev->spi, rx_cfg->dagc));
+ ret = rf69_set_frequency(dev->spi, rx_cfg->frequency);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_bit_rate(dev->spi, rx_cfg->bit_rate);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_modulation(dev->spi, rx_cfg->modulation);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_antenna_impedance(dev->spi, rx_cfg->antenna_impedance);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_rssi_threshold(dev->spi, rx_cfg->rssi_threshold);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_ook_threshold_dec(dev->spi, rx_cfg->threshold_decrement);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_bandwidth(dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_bandwidth_during_afc(dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_dagc(dev->spi, rx_cfg->dagc);
+ if (ret < 0)
+ return ret;
dev->rx_bytes_to_drop = rx_cfg->bytes_to_drop;
/* packet config */
/* enable */
- SET_CHECKED(rf69_set_sync_enable(dev->spi, rx_cfg->enable_sync));
- if (rx_cfg->enable_sync == optionOn)
- {
- SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, afterSyncInterrupt));
- }
- else
- {
- SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, always));
+ if (rx_cfg->enable_sync == OPTION_ON) {
+ ret = rf69_enable_sync(dev->spi);
+ if (ret < 0)
+ return ret;
+
+ ret = rf69_set_fifo_fill_condition(dev->spi, afterSyncInterrupt);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_disable_sync(dev->spi);
+ if (ret < 0)
+ return ret;
+
+ ret = rf69_set_fifo_fill_condition(dev->spi, always);
+ if (ret < 0)
+ return ret;
}
- if (rx_cfg->enable_length_byte == optionOn) {
+ if (rx_cfg->enable_length_byte == OPTION_ON) {
ret = rf69_set_packet_format(dev->spi, packetLengthVar);
if (ret < 0)
return ret;
@@ -215,36 +227,56 @@ rf69_set_rx_cfg(struct pi433_device *dev, struct pi433_rx_cfg *rx_cfg)
if (ret < 0)
return ret;
}
- SET_CHECKED(rf69_set_adressFiltering(dev->spi, rx_cfg->enable_address_filtering));
- SET_CHECKED(rf69_set_crc_enable (dev->spi, rx_cfg->enable_crc));
+ ret = rf69_set_adressFiltering(dev->spi, rx_cfg->enable_address_filtering);
+ if (ret < 0)
+ return ret;
- /* lengths */
- SET_CHECKED(rf69_set_sync_size(dev->spi, rx_cfg->sync_length));
- if (rx_cfg->enable_length_byte == optionOn)
- {
- SET_CHECKED(rf69_set_payload_length(dev->spi, 0xff));
+ if (rx_cfg->enable_crc == OPTION_ON) {
+ ret = rf69_enable_crc(dev->spi);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_disable_crc(dev->spi);
+ if (ret < 0)
+ return ret;
}
- else if (rx_cfg->fixed_message_length != 0)
- {
+
+ /* lengths */
+ ret = rf69_set_sync_size(dev->spi, rx_cfg->sync_length);
+ if (ret < 0)
+ return ret;
+ if (rx_cfg->enable_length_byte == OPTION_ON) {
+ ret = rf69_set_payload_length(dev->spi, 0xff);
+ if (ret < 0)
+ return ret;
+ } else if (rx_cfg->fixed_message_length != 0) {
payload_length = rx_cfg->fixed_message_length;
- if (rx_cfg->enable_length_byte == optionOn) payload_length++;
- if (rx_cfg->enable_address_filtering != filteringOff) payload_length++;
- SET_CHECKED(rf69_set_payload_length(dev->spi, payload_length));
- }
- else
- {
- SET_CHECKED(rf69_set_payload_length(dev->spi, 0));
+ if (rx_cfg->enable_length_byte == OPTION_ON)
+ payload_length++;
+ if (rx_cfg->enable_address_filtering != filteringOff)
+ payload_length++;
+ ret = rf69_set_payload_length(dev->spi, payload_length);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_set_payload_length(dev->spi, 0);
+ if (ret < 0)
+ return ret;
}
/* values */
- if (rx_cfg->enable_sync == optionOn)
- {
- SET_CHECKED(rf69_set_sync_values(dev->spi, rx_cfg->sync_pattern));
+ if (rx_cfg->enable_sync == OPTION_ON) {
+ ret = rf69_set_sync_values(dev->spi, rx_cfg->sync_pattern);
+ if (ret < 0)
+ return ret;
}
- if (rx_cfg->enable_address_filtering != filteringOff)
- {
- SET_CHECKED(rf69_set_node_address (dev->spi, rx_cfg->node_address));
- SET_CHECKED(rf69_set_broadcast_address(dev->spi, rx_cfg->broadcast_address));
+ if (rx_cfg->enable_address_filtering != filteringOff) {
+ ret = rf69_set_node_address(dev->spi, rx_cfg->node_address);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_broadcast_address(dev->spi, rx_cfg->broadcast_address);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -255,25 +287,50 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
{
int ret;
- SET_CHECKED(rf69_set_frequency (dev->spi, tx_cfg->frequency));
- SET_CHECKED(rf69_set_bit_rate (dev->spi, tx_cfg->bit_rate));
- SET_CHECKED(rf69_set_modulation (dev->spi, tx_cfg->modulation));
- SET_CHECKED(rf69_set_deviation (dev->spi, tx_cfg->dev_frequency));
- SET_CHECKED(rf69_set_pa_ramp (dev->spi, tx_cfg->pa_ramp));
- SET_CHECKED(rf69_set_modulation_shaping(dev->spi, tx_cfg->modShaping));
- SET_CHECKED(rf69_set_tx_start_condition(dev->spi, tx_cfg->tx_start_condition));
+ ret = rf69_set_frequency(dev->spi, tx_cfg->frequency);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_bit_rate(dev->spi, tx_cfg->bit_rate);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_modulation(dev->spi, tx_cfg->modulation);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_deviation(dev->spi, tx_cfg->dev_frequency);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_pa_ramp(dev->spi, tx_cfg->pa_ramp);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_modulation_shaping(dev->spi, tx_cfg->mod_shaping);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_tx_start_condition(dev->spi, tx_cfg->tx_start_condition);
+ if (ret < 0)
+ return ret;
/* packet format enable */
- if (tx_cfg->enable_preamble == optionOn)
- {
- SET_CHECKED(rf69_set_preamble_length(dev->spi, tx_cfg->preamble_length));
+ if (tx_cfg->enable_preamble == OPTION_ON) {
+ ret = rf69_set_preamble_length(dev->spi, tx_cfg->preamble_length);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_set_preamble_length(dev->spi, 0);
+ if (ret < 0)
+ return ret;
}
- else
- {
- SET_CHECKED(rf69_set_preamble_length(dev->spi, 0));
+
+ if (tx_cfg->enable_sync == OPTION_ON) {
+ ret = rf69_enable_sync(dev->spi);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_disable_sync(dev->spi);
+ if (ret < 0)
+ return ret;
}
- SET_CHECKED(rf69_set_sync_enable (dev->spi, tx_cfg->enable_sync));
- if (tx_cfg->enable_length_byte == optionOn) {
+
+ if (tx_cfg->enable_length_byte == OPTION_ON) {
ret = rf69_set_packet_format(dev->spi, packetLengthVar);
if (ret < 0)
return ret;
@@ -282,12 +339,25 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
if (ret < 0)
return ret;
}
- SET_CHECKED(rf69_set_crc_enable (dev->spi, tx_cfg->enable_crc));
+
+ if (tx_cfg->enable_crc == OPTION_ON) {
+ ret = rf69_enable_crc(dev->spi);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rf69_disable_crc(dev->spi);
+ if (ret < 0)
+ return ret;
+ }
/* configure sync, if enabled */
- if (tx_cfg->enable_sync == optionOn) {
- SET_CHECKED(rf69_set_sync_size(dev->spi, tx_cfg->sync_length));
- SET_CHECKED(rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern));
+ if (tx_cfg->enable_sync == OPTION_ON) {
+ ret = rf69_set_sync_size(dev->spi, tx_cfg->sync_length);
+ if (ret < 0)
+ return ret;
+ ret = rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -306,43 +376,51 @@ pi433_start_rx(struct pi433_device *dev)
/* setup for receiving */
retval = rf69_set_rx_cfg(dev, &dev->rx_cfg);
- if (retval) return retval;
+ if (retval)
+ return retval;
/* setup rssi irq */
- SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO0, DIO_Rssi_DIO0));
- dev->irq_state[DIO0] = DIO_Rssi_DIO0;
+ retval = rf69_set_dio_mapping(dev->spi, DIO0, DIO_RSSI_DIO0);
+ if (retval < 0)
+ return retval;
+ dev->irq_state[DIO0] = DIO_RSSI_DIO0;
irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
/* setup fifo level interrupt */
- SET_CHECKED(rf69_set_fifo_threshold(dev->spi, FIFO_SIZE - FIFO_THRESHOLD));
- SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO1, DIO_FifoLevel));
- dev->irq_state[DIO1] = DIO_FifoLevel;
+ retval = rf69_set_fifo_threshold(dev->spi, FIFO_SIZE - FIFO_THRESHOLD);
+ if (retval < 0)
+ return retval;
+ retval = rf69_set_dio_mapping(dev->spi, DIO1, DIO_FIFO_LEVEL);
+ if (retval < 0)
+ return retval;
+ dev->irq_state[DIO1] = DIO_FIFO_LEVEL;
irq_set_irq_type(dev->irq_num[DIO1], IRQ_TYPE_EDGE_RISING);
/* set module to receiving mode */
- SET_CHECKED(rf69_set_mode(dev->spi, receive));
+ retval = rf69_set_mode(dev->spi, receive);
+ if (retval < 0)
+ return retval;
return 0;
}
-
/*-------------------------------------------------------------------------*/
static int
pi433_receive(void *data)
{
struct pi433_device *dev = data;
- struct spi_device *spi = dev->spi; /* needed for SET_CHECKED */
+ struct spi_device *spi = dev->spi;
int bytes_to_read, bytes_total;
int retval;
dev->interrupt_rx_allowed = false;
/* wait for any tx to finish */
- dev_dbg(dev->dev,"rx: going to wait for any tx to finish");
+ dev_dbg(dev->dev, "rx: going to wait for any tx to finish");
retval = wait_event_interruptible(dev->rx_wait_queue, !dev->tx_active);
- if(retval) /* wait was interrupted */
- {
+ if (retval) {
+ /* wait was interrupted */
dev->interrupt_rx_allowed = true;
wake_up_interruptible(&dev->tx_wait_queue);
return retval;
@@ -359,8 +437,7 @@ pi433_receive(void *data)
return retval;
/* now check RSSI, if low wait for getting high (RSSI interrupt) */
- while ( !rf69_get_flag(dev->spi, rssiExceededThreshold) )
- {
+ while (!rf69_get_flag(dev->spi, rssiExceededThreshold)) {
/* allow tx to interrupt us while waiting for high RSSI */
dev->interrupt_rx_allowed = true;
wake_up_interruptible(&dev->tx_wait_queue);
@@ -368,43 +445,43 @@ pi433_receive(void *data)
/* wait for RSSI level to become high */
dev_dbg(dev->dev, "rx: going to wait for high RSSI level");
retval = wait_event_interruptible(dev->rx_wait_queue,
- rf69_get_flag(dev->spi,
- rssiExceededThreshold));
- if (retval) goto abort; /* wait was interrupted */
+ rf69_get_flag(dev->spi,
+ rssiExceededThreshold));
+ if (retval) /* wait was interrupted */
+ goto abort;
dev->interrupt_rx_allowed = false;
/* cross check for ongoing tx */
- if (!dev->tx_active) break;
+ if (!dev->tx_active)
+ break;
}
/* configure payload ready irq */
- SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PayloadReady));
- dev->irq_state[DIO0] = DIO_PayloadReady;
+ retval = rf69_set_dio_mapping(spi, DIO0, DIO_PAYLOAD_READY);
+ if (retval < 0)
+ goto abort;
+ dev->irq_state[DIO0] = DIO_PAYLOAD_READY;
irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
/* fixed or unlimited length? */
- if (dev->rx_cfg.fixed_message_length != 0)
- {
- if (dev->rx_cfg.fixed_message_length > dev->rx_buffer_size)
- {
+ if (dev->rx_cfg.fixed_message_length != 0) {
+ if (dev->rx_cfg.fixed_message_length > dev->rx_buffer_size) {
retval = -1;
goto abort;
}
bytes_total = dev->rx_cfg.fixed_message_length;
- dev_dbg(dev->dev,"rx: msg len set to %d by fixed length", bytes_total);
- }
- else
- {
+ dev_dbg(dev->dev, "rx: msg len set to %d by fixed length", bytes_total);
+ } else {
bytes_total = dev->rx_buffer_size;
dev_dbg(dev->dev, "rx: msg len set to %d as requested by read", bytes_total);
}
/* length byte enabled? */
- if (dev->rx_cfg.enable_length_byte == optionOn)
- {
+ if (dev->rx_cfg.enable_length_byte == OPTION_ON) {
retval = wait_event_interruptible(dev->fifo_wait_queue,
dev->free_in_fifo < FIFO_SIZE);
- if (retval) goto abort; /* wait was interrupted */
+ if (retval) /* wait was interrupted */
+ goto abort;
rf69_read_fifo(spi, (u8 *)&bytes_total, 1);
if (bytes_total > dev->rx_buffer_size) {
@@ -416,15 +493,15 @@ pi433_receive(void *data)
}
/* address byte enabled? */
- if (dev->rx_cfg.enable_address_filtering != filteringOff)
- {
+ if (dev->rx_cfg.enable_address_filtering != filteringOff) {
u8 dummy;
bytes_total--;
retval = wait_event_interruptible(dev->fifo_wait_queue,
dev->free_in_fifo < FIFO_SIZE);
- if (retval) goto abort; /* wait was interrupted */
+ if (retval) /* wait was interrupted */
+ goto abort;
rf69_read_fifo(spi, &dummy, 1);
dev->free_in_fifo++;
@@ -432,13 +509,12 @@ pi433_receive(void *data)
}
/* get payload */
- while (dev->rx_position < bytes_total)
- {
- if ( !rf69_get_flag(dev->spi, payloadReady) )
- {
+ while (dev->rx_position < bytes_total) {
+ if (!rf69_get_flag(dev->spi, payload_ready)) {
retval = wait_event_interruptible(dev->fifo_wait_queue,
dev->free_in_fifo < FIFO_SIZE);
- if (retval) goto abort; /* wait was interrupted */
+ if (retval) /* wait was interrupted */
+ goto abort;
}
/* need to drop bytes or acquire? */
@@ -447,14 +523,15 @@ pi433_receive(void *data)
else
bytes_to_read = bytes_total - dev->rx_position;
-
/* access the fifo */
if (bytes_to_read > FIFO_SIZE - dev->free_in_fifo)
bytes_to_read = FIFO_SIZE - dev->free_in_fifo;
retval = rf69_read_fifo(spi,
&dev->rx_buffer[dev->rx_position],
bytes_to_read);
- if (retval) goto abort; /* read failed */
+ if (retval) /* read failed */
+ goto abort;
+
dev->free_in_fifo += bytes_to_read;
/* adjust status vars */
@@ -464,11 +541,11 @@ pi433_receive(void *data)
dev->rx_position += bytes_to_read;
}
-
/* rx done, wait was interrupted or error occurred */
abort:
dev->interrupt_rx_allowed = true;
- SET_CHECKED(rf69_set_mode(dev->spi, standby));
+ if (rf69_set_mode(dev->spi, standby))
+ pr_err("rf69_set_mode(): radio module failed to go standby\n");
wake_up_interruptible(&dev->tx_wait_queue);
if (retval)
@@ -481,22 +558,20 @@ static int
pi433_tx_thread(void *data)
{
struct pi433_device *device = data;
- struct spi_device *spi = device->spi; /* needed for SET_CHECKED */
+ struct spi_device *spi = device->spi;
struct pi433_tx_cfg tx_cfg;
- u8 *buffer = device->buffer;
size_t size;
bool rx_interrupted = false;
int position, repetitions;
int retval;
- while (1)
- {
+ while (1) {
/* wait for fifo to be populated or for request to terminate*/
dev_dbg(device->dev, "thread: going to wait for new messages");
wait_event_interruptible(device->tx_wait_queue,
- ( !kfifo_is_empty(&device->tx_fifo) ||
- kthread_should_stop() ));
- if ( kthread_should_stop() )
+ (!kfifo_is_empty(&device->tx_fifo) ||
+ kthread_should_stop()));
+ if (kthread_should_stop())
return 0;
/* get data from fifo in the following order:
@@ -508,14 +583,14 @@ pi433_tx_thread(void *data)
retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg));
if (retval != sizeof(tx_cfg)) {
- dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg) );
+ dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg));
mutex_unlock(&device->tx_fifo_lock);
continue;
}
retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t));
if (retval != sizeof(size_t)) {
- dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t) );
+ dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t));
mutex_unlock(&device->tx_fifo_lock);
continue;
}
@@ -525,27 +600,27 @@ pi433_tx_thread(void *data)
size = tx_cfg.fixed_message_length;
/* increase size, if len byte is requested */
- if (tx_cfg.enable_length_byte == optionOn)
+ if (tx_cfg.enable_length_byte == OPTION_ON)
size++;
/* increase size, if adr byte is requested */
- if (tx_cfg.enable_address_byte == optionOn)
+ if (tx_cfg.enable_address_byte == OPTION_ON)
size++;
/* prime buffer */
- memset(buffer, 0, size);
+ memset(device->buffer, 0, size);
position = 0;
/* add length byte, if requested */
- if (tx_cfg.enable_length_byte == optionOn)
- buffer[position++] = size-1; /* according to spec length byte itself must be excluded from the length calculation */
+ if (tx_cfg.enable_length_byte == OPTION_ON)
+ device->buffer[position++] = size - 1; /* according to spec length byte itself must be excluded from the length calculation */
/* add adr byte, if requested */
- if (tx_cfg.enable_address_byte == optionOn)
- buffer[position++] = tx_cfg.address_byte;
+ if (tx_cfg.enable_address_byte == OPTION_ON)
+ device->buffer[position++] = tx_cfg.address_byte;
/* finally get message data from fifo */
- retval = kfifo_out(&device->tx_fifo, &buffer[position], sizeof(buffer)-position );
+ retval = kfifo_out(&device->tx_fifo, &device->buffer[position], sizeof(device->buffer) - position);
dev_dbg(device->dev, "read %d message byte(s) from fifo queue.", retval);
mutex_unlock(&device->tx_fifo_lock);
@@ -557,7 +632,7 @@ pi433_tx_thread(void *data)
*/
wait_event_interruptible(device->tx_wait_queue,
!device->rx_active ||
- device->interrupt_rx_allowed == true);
+ device->interrupt_rx_allowed);
/* prevent race conditions
* irq will be reenabled after tx config is set
@@ -565,91 +640,107 @@ pi433_tx_thread(void *data)
disable_irq(device->irq_num[DIO0]);
device->tx_active = true;
- if (device->rx_active && rx_interrupted == false)
- {
+ if (device->rx_active && !rx_interrupted) {
/* rx is currently waiting for a telegram;
* we need to set the radio module to standby
*/
- SET_CHECKED(rf69_set_mode(device->spi, standby));
+ retval = rf69_set_mode(device->spi, standby);
+ if (retval < 0)
+ return retval;
rx_interrupted = true;
}
/* clear fifo, set fifo threshold, set payload length */
- SET_CHECKED(rf69_set_mode(spi, standby)); /* this clears the fifo */
- SET_CHECKED(rf69_set_fifo_threshold(spi, FIFO_THRESHOLD));
- if (tx_cfg.enable_length_byte == optionOn)
- {
- SET_CHECKED(rf69_set_payload_length(spi, size * tx_cfg.repetitions));
- }
- else
- {
- SET_CHECKED(rf69_set_payload_length(spi, 0));
+ retval = rf69_set_mode(spi, standby); /* this clears the fifo */
+ if (retval < 0)
+ return retval;
+ retval = rf69_set_fifo_threshold(spi, FIFO_THRESHOLD);
+ if (retval < 0)
+ return retval;
+ if (tx_cfg.enable_length_byte == OPTION_ON) {
+ retval = rf69_set_payload_length(spi, size * tx_cfg.repetitions);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = rf69_set_payload_length(spi, 0);
+ if (retval < 0)
+ return retval;
}
/* configure the rf chip */
- rf69_set_tx_cfg(device, &tx_cfg);
+ retval = rf69_set_tx_cfg(device, &tx_cfg);
+ if (retval < 0)
+ return retval;
/* enable fifo level interrupt */
- SET_CHECKED(rf69_set_dio_mapping(spi, DIO1, DIO_FifoLevel));
- device->irq_state[DIO1] = DIO_FifoLevel;
+ retval = rf69_set_dio_mapping(spi, DIO1, DIO_FIFO_LEVEL);
+ if (retval < 0)
+ return retval;
+ device->irq_state[DIO1] = DIO_FIFO_LEVEL;
irq_set_irq_type(device->irq_num[DIO1], IRQ_TYPE_EDGE_FALLING);
/* enable packet sent interrupt */
- SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PacketSent));
- device->irq_state[DIO0] = DIO_PacketSent;
+ retval = rf69_set_dio_mapping(spi, DIO0, DIO_PACKET_SENT);
+ if (retval < 0)
+ return retval;
+ device->irq_state[DIO0] = DIO_PACKET_SENT;
irq_set_irq_type(device->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
enable_irq(device->irq_num[DIO0]); /* was disabled by rx active check */
/* enable transmission */
- SET_CHECKED(rf69_set_mode(spi, transmit));
+ retval = rf69_set_mode(spi, transmit);
+ if (retval < 0)
+ return retval;
/* transfer this msg (and repetitions) to chip fifo */
device->free_in_fifo = FIFO_SIZE;
position = 0;
repetitions = tx_cfg.repetitions;
- while( (repetitions > 0) && (size > position) )
- {
- if ( (size - position) > device->free_in_fifo)
- { /* msg to big for fifo - take a part */
+ while ((repetitions > 0) && (size > position)) {
+ if ((size - position) > device->free_in_fifo) {
+ /* msg to big for fifo - take a part */
int temp = device->free_in_fifo;
device->free_in_fifo = 0;
rf69_write_fifo(spi,
- &buffer[position],
- temp);
- position +=temp;
- }
- else
- { /* msg fits into fifo - take all */
+ &device->buffer[position],
+ temp);
+ position += temp;
+ } else {
+ /* msg fits into fifo - take all */
device->free_in_fifo -= size;
repetitions--;
rf69_write_fifo(spi,
- &buffer[position],
- (size - position) );
+ &device->buffer[position],
+ (size - position));
position = 0; /* reset for next repetition */
}
retval = wait_event_interruptible(device->fifo_wait_queue,
device->free_in_fifo > 0);
- if (retval) { printk("ABORT\n"); goto abort; }
+ if (retval) {
+ dev_dbg(device->dev, "ABORT\n");
+ goto abort;
+ }
}
/* we are done. Wait for packet to get sent */
dev_dbg(device->dev, "thread: wait for packet to get sent/fifo to be empty");
wait_event_interruptible(device->fifo_wait_queue,
device->free_in_fifo == FIFO_SIZE ||
- kthread_should_stop() );
- if ( kthread_should_stop() ) printk("ABORT\n");
-
+ kthread_should_stop());
+ if (kthread_should_stop())
+ dev_dbg(device->dev, "ABORT\n");
/* STOP_TRANSMISSION */
dev_dbg(device->dev, "thread: Packet sent. Set mode to stby.");
- SET_CHECKED(rf69_set_mode(spi, standby));
+ retval = rf69_set_mode(spi, standby);
+ if (retval < 0)
+ return retval;
/* everything sent? */
if (kfifo_is_empty(&device->tx_fifo)) {
abort:
- if (rx_interrupted)
- {
+ if (rx_interrupted) {
rx_interrupted = false;
pi433_start_rx(device);
}
@@ -678,16 +769,13 @@ pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
/* just one read request at a time */
mutex_lock(&device->rx_lock);
- if (device->rx_active)
- {
+ if (device->rx_active) {
mutex_unlock(&device->rx_lock);
return -EAGAIN;
}
- else
- {
- device->rx_active = true;
- mutex_unlock(&device->rx_lock);
- }
+
+ device->rx_active = true;
+ mutex_unlock(&device->rx_lock);
/* start receiving */
/* will block until something was received*/
@@ -709,14 +797,14 @@ pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
return bytes_received;
}
-
static ssize_t
pi433_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
+ size_t count, loff_t *f_pos)
{
struct pi433_instance *instance;
struct pi433_device *device;
- int copied, retval;
+ int retval;
+ unsigned int copied;
instance = filp->private_data;
device = instance->device;
@@ -732,11 +820,11 @@ pi433_write(struct file *filp, const char __user *buf,
*/
mutex_lock(&device->tx_fifo_lock);
retval = kfifo_in(&device->tx_fifo, &instance->tx_cfg, sizeof(instance->tx_cfg));
- if ( retval != sizeof(instance->tx_cfg) )
+ if (retval != sizeof(instance->tx_cfg))
goto abort;
- retval = kfifo_in (&device->tx_fifo, &count, sizeof(size_t));
- if ( retval != sizeof(size_t) )
+ retval = kfifo_in(&device->tx_fifo, &count, sizeof(size_t));
+ if (retval != sizeof(size_t))
goto abort;
retval = kfifo_from_user(&device->tx_fifo, buf, count, &copied);
@@ -749,7 +837,7 @@ pi433_write(struct file *filp, const char __user *buf,
wake_up_interruptible(&device->tx_wait_queue);
dev_dbg(device->dev, "write: generated new msg with %d bytes.", copied);
- return 0;
+ return copied;
abort:
dev_dbg(device->dev, "write to fifo failed: 0x%x", retval);
@@ -758,7 +846,6 @@ abort:
return -EAGAIN;
}
-
static long
pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -777,23 +864,23 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
instance = filp->private_data;
device = instance->device;
- if (device == NULL)
+ if (!device)
return -ESHUTDOWN;
switch (cmd) {
case PI433_IOC_RD_TX_CFG:
if (copy_to_user(argp, &instance->tx_cfg,
- sizeof(struct pi433_tx_cfg)))
+ sizeof(struct pi433_tx_cfg)))
return -EFAULT;
break;
case PI433_IOC_WR_TX_CFG:
if (copy_from_user(&instance->tx_cfg, argp,
- sizeof(struct pi433_tx_cfg)))
+ sizeof(struct pi433_tx_cfg)))
return -EFAULT;
break;
case PI433_IOC_RD_RX_CFG:
if (copy_to_user(argp, &device->rx_cfg,
- sizeof(struct pi433_rx_cfg)))
+ sizeof(struct pi433_rx_cfg)))
return -EFAULT;
break;
case PI433_IOC_WR_RX_CFG:
@@ -806,7 +893,7 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (copy_from_user(&device->rx_cfg, argp,
- sizeof(struct pi433_rx_cfg))) {
+ sizeof(struct pi433_rx_cfg))) {
mutex_unlock(&device->rx_lock);
return -EFAULT;
}
@@ -847,10 +934,8 @@ static int pi433_open(struct inode *inode, struct file *filp)
if (!device->rx_buffer) {
device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
- if (!device->rx_buffer) {
- dev_dbg(device->dev, "open/ENOMEM\n");
+ if (!device->rx_buffer)
return -ENOMEM;
- }
}
device->users++;
@@ -889,19 +974,18 @@ static int pi433_release(struct inode *inode, struct file *filp)
if (!device->users) {
kfree(device->rx_buffer);
device->rx_buffer = NULL;
- if (device->spi == NULL)
+ if (!device->spi)
kfree(device);
}
return 0;
}
-
/*-------------------------------------------------------------------------*/
static int setup_GPIOs(struct pi433_device *device)
{
- char name[5];
+ char name[5];
int retval;
int i;
const irq_handler_t DIO_irq_handler[NUM_DIO] = {
@@ -909,8 +993,7 @@ static int setup_GPIOs(struct pi433_device *device)
DIO1_irq_handler
};
- for (i=0; i<NUM_DIO; i++)
- {
+ for (i = 0; i < NUM_DIO; i++) {
/* "construct" name and get the gpio descriptor */
snprintf(name, sizeof(name), "DIO%d", i);
device->gpiod[i] = gpiod_get(&device->spi->dev, name, 0 /*GPIOD_IN*/);
@@ -923,24 +1006,21 @@ static int setup_GPIOs(struct pi433_device *device)
if (device->gpiod[i] == ERR_PTR(-EBUSY))
dev_dbg(&device->spi->dev, "%s is busy.", name);
- if ( IS_ERR(device->gpiod[i]) )
- {
+ if (IS_ERR(device->gpiod[i])) {
retval = PTR_ERR(device->gpiod[i]);
/* release already allocated gpios */
- for (i--; i>=0; i--)
- {
+ for (i--; i >= 0; i--) {
free_irq(device->irq_num[i], device);
gpiod_put(device->gpiod[i]);
}
return retval;
}
-
/* configure the pin */
gpiod_unexport(device->gpiod[i]);
retval = gpiod_direction_input(device->gpiod[i]);
- if (retval) return retval;
-
+ if (retval)
+ return retval;
/* configure irq */
device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
@@ -967,16 +1047,14 @@ static void free_GPIOs(struct pi433_device *device)
{
int i;
- for (i=0; i<NUM_DIO; i++)
- {
+ for (i = 0; i < NUM_DIO; i++) {
/* check if gpiod is valid */
- if ( IS_ERR(device->gpiod[i]) )
+ if (IS_ERR(device->gpiod[i]))
continue;
free_irq(device->irq_num[i], device);
gpiod_put(device->gpiod[i]);
}
- return;
}
static int pi433_get_minor(struct pi433_device *device)
@@ -989,7 +1067,7 @@ static int pi433_get_minor(struct pi433_device *device)
device->minor = retval;
retval = 0;
} else if (retval == -ENOSPC) {
- dev_err(device->dev, "too many pi433 devices\n");
+ dev_err(&device->spi->dev, "too many pi433 devices\n");
retval = -EINVAL;
}
mutex_unlock(&minor_lock);
@@ -1002,6 +1080,7 @@ static void pi433_free_minor(struct pi433_device *dev)
idr_remove(&pi433_idr, dev->minor);
mutex_unlock(&minor_lock);
}
+
/*-------------------------------------------------------------------------*/
static const struct file_operations pi433_fops = {
@@ -1032,17 +1111,14 @@ static int pi433_probe(struct spi_device *spi)
/* spi->max_speed_hz = 10000000; 1MHz already set by device tree overlay */
retval = spi_setup(spi);
- if (retval)
- {
+ if (retval) {
dev_dbg(&spi->dev, "configuration of SPI interface failed!\n");
return retval;
}
- else
- {
- dev_dbg(&spi->dev,
- "spi interface setup: mode 0x%2x, %d bits per word, %dhz max speed",
- spi->mode, spi->bits_per_word, spi->max_speed_hz);
- }
+
+ dev_dbg(&spi->dev,
+ "spi interface setup: mode 0x%2x, %d bits per word, %dhz max speed",
+ spi->mode, spi->bits_per_word, spi->max_speed_hz);
/* Ping the chip by reading the version register */
retval = spi_w8r8(spi, 0x10);
@@ -1089,27 +1165,32 @@ static int pi433_probe(struct spi_device *spi)
}
/* setup the radio module */
- SET_CHECKED(rf69_set_mode (spi, standby));
- SET_CHECKED(rf69_set_data_mode (spi, packet));
- SET_CHECKED(rf69_set_amplifier_0 (spi, optionOn));
- SET_CHECKED(rf69_set_amplifier_1 (spi, optionOff));
- SET_CHECKED(rf69_set_amplifier_2 (spi, optionOff));
- SET_CHECKED(rf69_set_output_power_level (spi, 13));
- SET_CHECKED(rf69_set_antenna_impedance (spi, fiftyOhm));
-
- /* start tx thread */
- device->tx_task_struct = kthread_run(pi433_tx_thread,
- device,
- "pi433_tx_task");
- if (IS_ERR(device->tx_task_struct)) {
- dev_dbg(device->dev, "start of send thread failed");
- goto send_thread_failed;
- }
+ retval = rf69_set_mode(spi, standby);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_set_data_mode(spi, DATAMODUL_MODE_PACKET);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_enable_amplifier(spi, MASK_PALEVEL_PA0);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_disable_amplifier(spi, MASK_PALEVEL_PA1);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_disable_amplifier(spi, MASK_PALEVEL_PA2);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_set_output_power_level(spi, 13);
+ if (retval < 0)
+ goto minor_failed;
+ retval = rf69_set_antenna_impedance(spi, fiftyOhm);
+ if (retval < 0)
+ goto minor_failed;
/* determ minor number */
retval = pi433_get_minor(device);
if (retval) {
- dev_dbg(device->dev, "get of minor number failed");
+ dev_dbg(&spi->dev, "get of minor number failed");
goto minor_failed;
}
@@ -1119,19 +1200,29 @@ static int pi433_probe(struct spi_device *spi)
&spi->dev,
device->devt,
device,
- "pi433");
+ "pi433.%d",
+ device->minor);
if (IS_ERR(device->dev)) {
pr_err("pi433: device register failed\n");
retval = PTR_ERR(device->dev);
goto device_create_failed;
- }
- else {
+ } else {
dev_dbg(device->dev,
"created device for major %d, minor %d\n",
MAJOR(pi433_dev),
device->minor);
}
+ /* start tx thread */
+ device->tx_task_struct = kthread_run(pi433_tx_thread,
+ device,
+ "pi433.%d_tx_task",
+ device->minor);
+ if (IS_ERR(device->tx_task_struct)) {
+ dev_dbg(device->dev, "start of send thread failed");
+ goto send_thread_failed;
+ }
+
/* create cdev */
device->cdev = cdev_alloc();
device->cdev->owner = THIS_MODULE;
@@ -1148,12 +1239,12 @@ static int pi433_probe(struct spi_device *spi)
return 0;
cdev_failed:
+ kthread_stop(device->tx_task_struct);
+send_thread_failed:
device_destroy(pi433_class, device->devt);
device_create_failed:
pi433_free_minor(device);
minor_failed:
- kthread_stop(device->tx_task_struct);
-send_thread_failed:
free_GPIOs(device);
GPIO_failed:
kfree(device);
@@ -1230,14 +1321,16 @@ static int __init pi433_init(void)
pi433_class = class_create(THIS_MODULE, "pi433");
if (IS_ERR(pi433_class)) {
- unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+ unregister_chrdev(MAJOR(pi433_dev),
+ pi433_spi_driver.driver.name);
return PTR_ERR(pi433_class);
}
status = spi_register_driver(&pi433_spi_driver);
if (status < 0) {
class_destroy(pi433_class);
- unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+ unregister_chrdev(MAJOR(pi433_dev),
+ pi433_spi_driver.driver.name);
}
return status;
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
index e6ed3cd9b2e2..7314f69af198 100644
--- a/drivers/staging/pi433/pi433_if.h
+++ b/drivers/staging/pi433/pi433_if.h
@@ -34,8 +34,10 @@
/*---------------------------------------------------------------------------*/
-
-/*---------------------------------------------------------------------------*/
+enum option_on_off {
+ OPTION_OFF,
+ OPTION_ON
+};
/* IOCTL structs and commands */
@@ -57,14 +59,13 @@
*
* NOTE: struct layout is the same in 64bit and 32bit userspace.
*/
-#define PI433_TX_CFG_IOCTL_NR 0
-struct pi433_tx_cfg
-{
+#define PI433_TX_CFG_IOCTL_NR 0
+struct pi433_tx_cfg {
__u32 frequency;
__u16 bit_rate;
__u32 dev_frequency;
enum modulation modulation;
- enum modShaping modShaping;
+ enum mod_shaping mod_shaping;
enum paRamp pa_ramp;
@@ -72,13 +73,12 @@ struct pi433_tx_cfg
__u16 repetitions;
-
/* packet format */
- enum optionOnOff enable_preamble;
- enum optionOnOff enable_sync;
- enum optionOnOff enable_length_byte;
- enum optionOnOff enable_address_byte;
- enum optionOnOff enable_crc;
+ enum option_on_off enable_preamble;
+ enum option_on_off enable_sync;
+ enum option_on_off enable_length_byte;
+ enum option_on_off enable_address_byte;
+ enum option_on_off enable_crc;
__u16 preamble_length;
__u8 sync_length;
@@ -88,7 +88,6 @@ struct pi433_tx_cfg
__u8 address_byte;
};
-
/**
* struct pi433_rx_config - describes the configuration of the radio module for sending
* @frequency:
@@ -107,7 +106,7 @@ struct pi433_tx_cfg
*
* NOTE: struct layout is the same in 64bit and 32bit userspace.
*/
-#define PI433_RX_CFG_IOCTL_NR 1
+#define PI433_RX_CFG_IOCTL_NR 1
struct pi433_rx_cfg {
__u32 frequency;
__u16 bit_rate;
@@ -116,20 +115,18 @@ struct pi433_rx_cfg {
enum modulation modulation;
__u8 rssi_threshold;
- enum thresholdDecrement thresholdDecrement;
+ enum thresholdDecrement threshold_decrement;
enum antennaImpedance antenna_impedance;
enum lnaGain lna_gain;
enum mantisse bw_mantisse; /* normal: 0x50 */
__u8 bw_exponent; /* during AFC: 0x8b */
enum dagc dagc;
-
-
/* packet format */
- enum optionOnOff enable_sync;
- enum optionOnOff enable_length_byte; /* should be used in combination with sync, only */
+ enum option_on_off enable_sync;
+ enum option_on_off enable_length_byte; /* should be used in combination with sync, only */
enum addressFiltering enable_address_filtering; /* operational with sync, only */
- enum optionOnOff enable_crc; /* only operational, if sync on and fixed length or length byte is used */
+ enum option_on_off enable_crc; /* only operational, if sync on and fixed length or length byte is used */
__u8 sync_length;
__u8 fixed_message_length;
@@ -140,7 +137,6 @@ struct pi433_rx_cfg {
__u8 broadcast_address;
};
-
#define PI433_IOC_MAGIC 'r'
#define PI433_IOC_RD_TX_CFG _IOR(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 12c9df9cddde..7ccdff6ae213 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -29,27 +29,98 @@
#include "rf69_registers.h"
#define F_OSC 32000000 /* in Hz */
-#define FIFO_SIZE 66 /* in byte */
+#define FIFO_SIZE 66 /* in byte */
/*-------------------------------------------------------------------------*/
-#define READ_REG(x) rf69_read_reg (spi, x)
-#define WRITE_REG(x, y) rf69_write_reg(spi, x, y)
+static u8 rf69_read_reg(struct spi_device *spi, u8 addr)
+{
+ int retval;
+
+ retval = spi_w8r8(spi, addr);
+
+#ifdef DEBUG_VALUES
+ if (retval < 0)
+ /* should never happen, since we already checked,
+ * that module is connected. Therefore no error
+ * handling, just an optional error message...
+ */
+ dev_dbg(&spi->dev, "read 0x%x FAILED\n", addr);
+ else
+ dev_dbg(&spi->dev, "read 0x%x from reg 0x%x\n", retval, addr);
+#endif
+
+ return retval;
+}
+
+static int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value)
+{
+ int retval;
+ char buffer[2];
+
+ buffer[0] = addr | WRITE_BIT;
+ buffer[1] = value;
+
+ retval = spi_write(spi, &buffer, 2);
+
+#ifdef DEBUG_VALUES
+ if (retval < 0)
+ /* should never happen, since we already checked,
+ * that module is connected. Therefore no error
+ * handling, just an optional error message...
+ */
+ dev_dbg(&spi->dev, "write 0x%x to 0x%x FAILED\n", value, addr);
+ else
+ dev_dbg(&spi->dev, "wrote 0x%x to reg 0x%x\n", value, addr);
+#endif
+
+ return retval;
+}
/*-------------------------------------------------------------------------*/
-int rf69_set_mode(struct spi_device *spi, enum mode mode)
+static int rf69_set_bit(struct spi_device *spi, u8 reg, u8 mask)
+{
+ u8 tmp;
+
+ tmp = rf69_read_reg(spi, reg);
+ tmp = tmp | mask;
+ return rf69_write_reg(spi, reg, tmp);
+}
+
+static int rf69_clear_bit(struct spi_device *spi, u8 reg, u8 mask)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: mode");
- #endif
+ u8 tmp;
+ tmp = rf69_read_reg(spi, reg);
+ tmp = tmp & ~mask;
+ return rf69_write_reg(spi, reg, tmp);
+}
+
+static inline int rf69_read_mod_write(struct spi_device *spi, u8 reg, u8 mask, u8 value)
+{
+ u8 tmp;
+
+ tmp = rf69_read_reg(spi, reg);
+ tmp = (tmp & ~mask) | value;
+ return rf69_write_reg(spi, reg, tmp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+int rf69_set_mode(struct spi_device *spi, enum mode mode)
+{
switch (mode) {
- case transmit: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_TRANSMIT);
- case receive: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_RECEIVE);
- case synthesizer: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SYNTHESIZER);
- case standby: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_STANDBY);
- case mode_sleep: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SLEEP);
+ case transmit:
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_TRANSMIT);
+ case receive:
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_RECEIVE);
+ case synthesizer:
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_SYNTHESIZER);
+ case standby:
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_STANDBY);
+ case mode_sleep:
+ return rf69_read_mod_write(spi, REG_OPMODE, MASK_OPMODE_MODE, OPMODE_MODE_SLEEP);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -58,82 +129,75 @@ int rf69_set_mode(struct spi_device *spi, enum mode mode)
// we are using packet mode, so this check is not really needed
// but waiting for mode ready is necessary when going from sleep because the FIFO may not be immediately available from previous mode
//while (_mode == RF69_MODE_SLEEP && (READ_REG(REG_IRQFLAGS1) & RF_IRQFLAGS1_MODEREADY) == 0x00); // Wait for ModeReady
-
}
-int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode)
+int rf69_set_data_mode(struct spi_device *spi, u8 data_mode)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: data mode");
- #endif
-
- switch (dataMode) {
- case packet: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_PACKET);
- case continuous: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS);
- case continuousNoSync: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS_NOSYNC);
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODE, data_mode);
}
int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: modulation");
- #endif
-
switch (modulation) {
- case OOK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_OOK);
- case FSK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_FSK);
+ case OOK:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_TYPE, DATAMODUL_MODULATION_TYPE_OOK);
+ case FSK:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_TYPE, DATAMODUL_MODULATION_TYPE_FSK);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
}
-enum modulation rf69_get_modulation(struct spi_device *spi)
+static enum modulation rf69_get_modulation(struct spi_device *spi)
{
u8 currentValue;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "get: mode");
- #endif
-
- currentValue = READ_REG(REG_DATAMODUL);
+ currentValue = rf69_read_reg(spi, REG_DATAMODUL);
switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
- case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
- case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
- default: return undefined;
+ case DATAMODUL_MODULATION_TYPE_OOK:
+ return OOK;
+ case DATAMODUL_MODULATION_TYPE_FSK:
+ return FSK;
+ default:
+ return UNDEF;
}
}
-int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping)
+int rf69_set_modulation_shaping(struct spi_device *spi,
+ enum mod_shaping mod_shaping)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: mod shaping");
- #endif
-
- if (rf69_get_modulation(spi) == FSK) {
- switch (modShaping) {
- case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE);
- case shaping1_0: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_1_0);
- case shaping0_5: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_3);
- case shaping0_3: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_5);
+ switch (rf69_get_modulation(spi)) {
+ case FSK:
+ switch (mod_shaping) {
+ case SHAPING_OFF:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_NONE);
+ case SHAPING_1_0:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_1_0);
+ case SHAPING_0_5:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_0_5);
+ case SHAPING_0_3:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_0_3);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
- } else {
- switch (modShaping) {
- case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE);
- case shapingBR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_BR);
- case shaping2BR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_2BR);
+ case OOK:
+ switch (mod_shaping) {
+ case SHAPING_OFF:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_NONE);
+ case SHAPING_BR:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_BR);
+ case SHAPING_2BR:
+ return rf69_read_mod_write(spi, REG_DATAMODUL, MASK_DATAMODUL_MODULATION_SHAPE, DATAMODUL_MODULATION_SHAPE_2BR);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
+ default:
+ dev_dbg(&spi->dev, "set: modulation undefined");
+ return -EINVAL;
}
}
@@ -145,10 +209,6 @@ int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
u8 msb;
u8 lsb;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: bit rate");
- #endif
-
// check input value
bitRate_min = F_OSC / 8388608; // 8388608 = 2^23;
if (bitRate < bitRate_min) {
@@ -159,15 +219,14 @@ int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
// calculate reg settings
bitRate_reg = (F_OSC / bitRate);
- msb = (bitRate_reg&0xff00) >> 8;
- lsb = (bitRate_reg&0xff);
+ msb = (bitRate_reg & 0xff00) >> 8;
+ lsb = (bitRate_reg & 0xff);
// transmit to RF 69
- retval = WRITE_REG(REG_BITRATE_MSB, msb);
+ retval = rf69_write_reg(spi, REG_BITRATE_MSB, msb);
if (retval)
return retval;
-
- retval = WRITE_REG(REG_BITRATE_LSB, lsb);
+ retval = rf69_write_reg(spi, REG_BITRATE_LSB, lsb);
if (retval)
return retval;
@@ -177,18 +236,14 @@ int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
int rf69_set_deviation(struct spi_device *spi, u32 deviation)
{
int retval;
-// u32 f_max; TODO: Abhängigkeit von Bitrate beachten!!
u64 f_reg;
u64 f_step;
u8 msb;
u8 lsb;
u64 factor = 1000000; // to improve precision of calculation
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: deviation");
- #endif
-
- if (deviation < 600 || deviation > 500000) { //TODO: Abhängigkeit von Bitrate beachten!!
+ // TODO: Dependency to bitrate
+ if (deviation < 600 || deviation > 500000) {
dev_dbg(&spi->dev, "set_deviation: illegal input param");
return -EINVAL;
}
@@ -201,8 +256,8 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
f_reg = deviation * factor;
do_div(f_reg, f_step);
- msb = (f_reg&0xff00) >> 8;
- lsb = (f_reg&0xff);
+ msb = (f_reg & 0xff00) >> 8;
+ lsb = (f_reg & 0xff);
// check msb
if (msb & ~FDEVMASB_MASK) {
@@ -211,11 +266,10 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
}
// write to chip
- retval = WRITE_REG(REG_FDEV_MSB, msb);
+ retval = rf69_write_reg(spi, REG_FDEV_MSB, msb);
if (retval)
return retval;
-
- retval = WRITE_REG(REG_FDEV_LSB, lsb);
+ retval = rf69_write_reg(spi, REG_FDEV_LSB, lsb);
if (retval)
return retval;
@@ -233,10 +287,6 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
u8 lsb;
u64 factor = 1000000; // to improve precision of calculation
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: frequency");
- #endif
-
// calculat f step
f_step = F_OSC * factor;
do_div(f_step, 524288); // 524288 = 2^19
@@ -252,78 +302,38 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
f_reg = frequency * factor;
do_div(f_reg, f_step);
- msb = (f_reg&0xff0000) >> 16;
- mid = (f_reg&0xff00) >> 8;
- lsb = (f_reg&0xff);
+ msb = (f_reg & 0xff0000) >> 16;
+ mid = (f_reg & 0xff00) >> 8;
+ lsb = (f_reg & 0xff);
// write to chip
- retval = WRITE_REG(REG_FRF_MSB, msb);
+ retval = rf69_write_reg(spi, REG_FRF_MSB, msb);
if (retval)
return retval;
-
- retval = WRITE_REG(REG_FRF_MID, mid);
+ retval = rf69_write_reg(spi, REG_FRF_MID, mid);
if (retval)
return retval;
-
- retval = WRITE_REG(REG_FRF_LSB, lsb);
+ retval = rf69_write_reg(spi, REG_FRF_LSB, lsb);
if (retval)
return retval;
return 0;
}
-int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff)
+int rf69_enable_amplifier(struct spi_device *spi, u8 amplifier_mask)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: amp #0");
- #endif
-
- switch (optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0));
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-}
-
-int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: amp #1");
- #endif
-
- switch (optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1));
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+ return rf69_set_bit(spi, REG_PALEVEL, amplifier_mask);
}
-int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff)
+int rf69_disable_amplifier(struct spi_device *spi, u8 amplifier_mask)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: amp #2");
- #endif
-
- switch (optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2));
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+ return rf69_clear_bit(spi, REG_PALEVEL, amplifier_mask);
}
int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: power level");
- #endif
-
- powerLevel += 18; // TODO Abhängigkeit von PA0,1,2 setting
+ // TODO: Dependency to PA0,1,2 setting
+ powerLevel += 18;
// check input value
if (powerLevel > 0x1f) {
@@ -332,32 +342,44 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel)
}
// write value
- return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_OUTPUT_POWER) | powerLevel);
+ return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER, powerLevel);
}
int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: pa ramp");
- #endif
-
switch (paRamp) {
- case ramp3400: return WRITE_REG(REG_PARAMP, PARAMP_3400);
- case ramp2000: return WRITE_REG(REG_PARAMP, PARAMP_2000);
- case ramp1000: return WRITE_REG(REG_PARAMP, PARAMP_1000);
- case ramp500: return WRITE_REG(REG_PARAMP, PARAMP_500);
- case ramp250: return WRITE_REG(REG_PARAMP, PARAMP_250);
- case ramp125: return WRITE_REG(REG_PARAMP, PARAMP_125);
- case ramp100: return WRITE_REG(REG_PARAMP, PARAMP_100);
- case ramp62: return WRITE_REG(REG_PARAMP, PARAMP_62);
- case ramp50: return WRITE_REG(REG_PARAMP, PARAMP_50);
- case ramp40: return WRITE_REG(REG_PARAMP, PARAMP_40);
- case ramp31: return WRITE_REG(REG_PARAMP, PARAMP_31);
- case ramp25: return WRITE_REG(REG_PARAMP, PARAMP_25);
- case ramp20: return WRITE_REG(REG_PARAMP, PARAMP_20);
- case ramp15: return WRITE_REG(REG_PARAMP, PARAMP_15);
- case ramp12: return WRITE_REG(REG_PARAMP, PARAMP_12);
- case ramp10: return WRITE_REG(REG_PARAMP, PARAMP_10);
+ case ramp3400:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_3400);
+ case ramp2000:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_2000);
+ case ramp1000:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_1000);
+ case ramp500:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_500);
+ case ramp250:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_250);
+ case ramp125:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_125);
+ case ramp100:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_100);
+ case ramp62:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_62);
+ case ramp50:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_50);
+ case ramp40:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_40);
+ case ramp31:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_31);
+ case ramp25:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_25);
+ case ramp20:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_20);
+ case ramp15:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_15);
+ case ramp12:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_12);
+ case ramp10:
+ return rf69_write_reg(spi, REG_PARAMP, PARAMP_10);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -366,13 +388,11 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp)
int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: antenna impedance");
- #endif
-
switch (antennaImpedance) {
- case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN));
- case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN));
+ case fiftyOhm:
+ return rf69_clear_bit(spi, REG_LNA, MASK_LNA_ZIN);
+ case twohundretOhm:
+ return rf69_set_bit(spi, REG_LNA, MASK_LNA_ZIN);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -381,81 +401,27 @@ int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance ant
int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: lna gain");
- #endif
-
switch (lnaGain) {
- case automatic: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO));
- case max: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX));
- case maxMinus6: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6));
- case maxMinus12: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12));
- case maxMinus24: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24));
- case maxMinus36: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36));
- case maxMinus48: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48));
+ case automatic:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_AUTO);
+ case max:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX);
+ case max_minus_6:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_6);
+ case max_minus_12:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_12);
+ case max_minus_24:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_24);
+ case max_minus_36:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_36);
+ case max_minus_48:
+ return rf69_read_mod_write(spi, REG_LNA, MASK_LNA_GAIN, LNA_GAIN_MAX_MINUS_48);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
}
-enum lnaGain rf69_get_lna_gain(struct spi_device *spi)
-{
- u8 currentValue;
-
- #ifdef DEBUG
- dev_dbg(&spi->dev, "get: lna gain");
- #endif
-
- currentValue = READ_REG(REG_LNA);
-
- switch (currentValue & MASK_LNA_CURRENT_GAIN >> 3) { // improvement: change 3 to define
- case LNA_GAIN_AUTO: return automatic;
- case LNA_GAIN_MAX: return max;
- case LNA_GAIN_MAX_MINUS_6: return maxMinus6;
- case LNA_GAIN_MAX_MINUS_12: return maxMinus12;
- case LNA_GAIN_MAX_MINUS_24: return maxMinus24;
- case LNA_GAIN_MAX_MINUS_36: return maxMinus36;
- case LNA_GAIN_MAX_MINUS_48: return maxMinus48;
- default: return undefined;
- }
-}
-
-int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent)
-{
- switch (dccPercent) {
- case dcc16Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT));
- case dcc8Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT));
- case dcc4Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT));
- case dcc2Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT));
- case dcc1Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT));
- case dcc0_5Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT));
- case dcc0_25Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT));
- case dcc0_125Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-}
-
-int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: cut off freq");
- #endif
-
- return rf69_set_dc_cut_off_frequency_intern(spi, REG_RXBW, dccPercent);
-}
-
-int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: cut off freq during afc");
- #endif
-
- return rf69_set_dc_cut_off_frequency_intern(spi, REG_AFCBW, dccPercent);
-}
-
static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
enum mantisse mantisse, u8 exponent)
{
@@ -475,7 +441,7 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
}
// read old value
- newValue = READ_REG(reg);
+ newValue = rf69_read_reg(spi, reg);
// "delete" mantisse and exponent = just keep the DCC setting
newValue = newValue & MASK_BW_DCC_FREQ;
@@ -497,79 +463,38 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
newValue = newValue | exponent;
// write back
- return WRITE_REG(reg, newValue);
+ return rf69_write_reg(spi, reg, newValue);
}
int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: band width");
- #endif
-
return rf69_set_bandwidth_intern(spi, REG_RXBW, mantisse, exponent);
}
int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: band width during afc");
- #endif
-
return rf69_set_bandwidth_intern(spi, REG_AFCBW, mantisse, exponent);
}
-int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: threshold type");
- #endif
-
- switch (thresholdType) {
- case fixed: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED));
- case peak: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK));
- case average: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-}
-
-int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: threshold step");
- #endif
-
- switch (thresholdStep) {
- case step_0_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB));
- case step_1_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB));
- case step_1_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB));
- case step_2_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB));
- case step_3_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB));
- case step_4_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB));
- case step_5_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB));
- case step_6_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-}
-
int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: threshold decrement");
- #endif
-
switch (thresholdDecrement) {
- case dec_every8th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH));
- case dec_every4th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH));
- case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND));
- case dec_once: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE));
- case dec_twice: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE));
- case dec_4times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES));
- case dec_8times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES));
- case dec_16times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES));
+ case dec_every8th:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_8TH);
+ case dec_every4th:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_4TH);
+ case dec_every2nd:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_EVERY_2ND);
+ case dec_once:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_ONCE);
+ case dec_twice:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_TWICE);
+ case dec_4times:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_4_TIMES);
+ case dec_8times:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_8_TIMES);
+ case dec_16times:
+ return rf69_read_mod_write(spi, REG_OOKPEAK, MASK_OOKPEAK_THRESDEC, OOKPEAK_THRESHDEC_16_TIMES);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -583,10 +508,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
u8 regaddr;
u8 regValue;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: DIO mapping");
- #endif
-
switch (DIONumber) {
case 0:
mask = MASK_DIO0; shift = SHIFT_DIO0; regaddr = REG_DIOMAPPING1;
@@ -607,95 +528,66 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
mask = MASK_DIO5; shift = SHIFT_DIO5; regaddr = REG_DIOMAPPING2;
break;
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
// read reg
- regValue = READ_REG(regaddr);
+ regValue = rf69_read_reg(spi, regaddr);
// delete old value
regValue = regValue & ~mask;
// add new value
regValue = regValue | value << shift;
// write back
- return WRITE_REG(regaddr, regValue);
+ return rf69_write_reg(spi, regaddr, regValue);
}
bool rf69_get_flag(struct spi_device *spi, enum flag flag)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "get: flag");
- #endif
-
- switch (flag) {
- case modeSwitchCompleted: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY);
- case readyToReceive: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY);
- case readyToSend: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY);
- case pllLocked: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_PLL_LOCK);
- case rssiExceededThreshold: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RSSI);
- case timeout: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TIMEOUT);
- case automode: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_AUTOMODE);
- case syncAddressMatch: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
- case fifoFull: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_FULL);
-/* case fifoNotEmpty: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); */
- case fifoEmpty: return !(READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY);
- case fifoLevelBelowThreshold: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_LEVEL);
- case fifoOverrun: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_OVERRUN);
- case packetSent: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PACKET_SENT);
- case payloadReady: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PAYLOAD_READY);
- case crcOk: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_CRC_OK);
- case batteryLow: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_LOW_BAT);
- default: return false;
- }
-}
-
-int rf69_reset_flag(struct spi_device *spi, enum flag flag)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "reset: flag");
- #endif
-
switch (flag) {
- case rssiExceededThreshold: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_RSSI);
- case syncAddressMatch: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
- case fifoOverrun: return WRITE_REG(REG_IRQFLAGS2, MASK_IRQFLAGS2_FIFO_OVERRUN);
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
+ case modeSwitchCompleted:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY);
+ case readyToReceive:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY);
+ case readyToSend:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY);
+ case pllLocked:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_PLL_LOCK);
+ case rssiExceededThreshold:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_RSSI);
+ case timeout:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_TIMEOUT);
+ case automode:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_AUTOMODE);
+ case syncAddressMatch:
+ return (rf69_read_reg(spi, REG_IRQFLAGS1) & MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
+ case fifo_full:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_FULL);
+/* case fifo_not_empty:
+ * return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); */
+ case fifo_empty:
+ return !(rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY);
+ case fifo_level_below_threshold:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_LEVEL);
+ case fifo_overrun:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_OVERRUN);
+ case packetSent:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_PACKET_SENT);
+ case payload_ready:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_PAYLOAD_READY);
+ case crcOk:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_CRC_OK);
+ case batteryLow:
+ return (rf69_read_reg(spi, REG_IRQFLAGS2) & MASK_IRQFLAGS2_LOW_BAT);
+ default: return false;
}
}
int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: rssi threshold");
- #endif
-
/* no value check needed - u8 exactly matches register size */
- return WRITE_REG(REG_RSSITHRESH, threshold);
-}
-
-int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: start timeout");
- #endif
-
- /* no value check needed - u8 exactly matches register size */
-
- return WRITE_REG(REG_RXTIMEOUT1, timeout);
-}
-
-int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: rssi timeout");
- #endif
-
- /* no value check needed - u8 exactly matches register size */
-
- return WRITE_REG(REG_RXTIMEOUT2, timeout);
+ return rf69_write_reg(spi, REG_RSSITHRESH, threshold);
}
int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength)
@@ -703,47 +595,38 @@ int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength)
int retval;
u8 msb, lsb;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: preamble length");
- #endif
-
/* no value check needed - u16 exactly matches register size */
/* calculate reg settings */
- msb = (preambleLength&0xff00) >> 8;
- lsb = (preambleLength&0xff);
+ msb = (preambleLength & 0xff00) >> 8;
+ lsb = (preambleLength & 0xff);
/* transmit to chip */
- retval = WRITE_REG(REG_PREAMBLE_MSB, msb);
+ retval = rf69_write_reg(spi, REG_PREAMBLE_MSB, msb);
if (retval)
return retval;
- return WRITE_REG(REG_PREAMBLE_LSB, lsb);
+ retval = rf69_write_reg(spi, REG_PREAMBLE_LSB, lsb);
+
+ return retval;
}
-int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
+int rf69_enable_sync(struct spi_device *spi)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: sync enable");
- #endif
-
- switch (optionOnOff) {
- case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON));
- case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+ return rf69_set_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_SYNC_ON);
}
-int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition)
+int rf69_disable_sync(struct spi_device *spi)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: fifo fill condition");
- #endif
+ return rf69_clear_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_SYNC_ON);
+}
- switch (fifoFillCondition) {
- case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
- case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
+int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifo_fill_condition fifo_fill_condition)
+{
+ switch (fifo_fill_condition) {
+ case always:
+ return rf69_set_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
+ case afterSyncInterrupt:
+ return rf69_clear_bit(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -752,10 +635,6 @@ int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition
int rf69_set_sync_size(struct spi_device *spi, u8 syncSize)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: sync size");
- #endif
-
// check input value
if (syncSize > 0x07) {
dev_dbg(&spi->dev, "set: illegal input param");
@@ -763,136 +642,85 @@ int rf69_set_sync_size(struct spi_device *spi, u8 syncSize)
}
// write value
- return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3));
-}
-
-int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: sync tolerance");
- #endif
-
- // check input value
- if (syncTolerance > 0x07) {
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
-
- // write value
- return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | syncTolerance);
+ return rf69_read_mod_write(spi, REG_SYNC_CONFIG, MASK_SYNC_CONFIG_SYNC_SIZE, (syncSize << 3));
}
int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8])
{
int retval = 0;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: sync values");
- #endif
-
- retval += WRITE_REG(REG_SYNCVALUE1, syncValues[0]);
- retval += WRITE_REG(REG_SYNCVALUE2, syncValues[1]);
- retval += WRITE_REG(REG_SYNCVALUE3, syncValues[2]);
- retval += WRITE_REG(REG_SYNCVALUE4, syncValues[3]);
- retval += WRITE_REG(REG_SYNCVALUE5, syncValues[4]);
- retval += WRITE_REG(REG_SYNCVALUE6, syncValues[5]);
- retval += WRITE_REG(REG_SYNCVALUE7, syncValues[6]);
- retval += WRITE_REG(REG_SYNCVALUE8, syncValues[7]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE1, syncValues[0]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE2, syncValues[1]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE3, syncValues[2]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE4, syncValues[3]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE5, syncValues[4]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE6, syncValues[5]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE7, syncValues[6]);
+ retval += rf69_write_reg(spi, REG_SYNCVALUE8, syncValues[7]);
return retval;
}
int rf69_set_packet_format(struct spi_device *spi, enum packetFormat packetFormat)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: packet format");
- #endif
-
switch (packetFormat) {
- case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
- case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
+ case packetLengthVar:
+ return rf69_set_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
+ case packetLengthFix:
+ return rf69_clear_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
}
-int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
+int rf69_enable_crc(struct spi_device *spi)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: crc enable");
- #endif
+ return rf69_set_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_CRC_ON);
+}
- switch (optionOnOff) {
- case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON));
- case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON));
- default:
- dev_dbg(&spi->dev, "set: illegal input param");
- return -EINVAL;
- }
+int rf69_disable_crc(struct spi_device *spi)
+{
+ return rf69_clear_bit(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_CRC_ON);
}
int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: address filtering");
- #endif
-
switch (addressFiltering) {
- case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF));
- case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE));
- case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST));
+ case filteringOff:
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_OFF);
+ case nodeAddress:
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_NODE);
+ case nodeOrBroadcastAddress:
+ return rf69_read_mod_write(spi, REG_PACKETCONFIG1, MASK_PACKETCONFIG1_ADDRESSFILTERING, PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
}
-int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength)
+int rf69_set_payload_length(struct spi_device *spi, u8 payload_length)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: payload length");
- #endif
-
- return WRITE_REG(REG_PAYLOAD_LENGTH, payloadLength);
-}
-
-u8 rf69_get_payload_length(struct spi_device *spi)
-{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "get: payload length");
- #endif
-
- return (u8) READ_REG(REG_PAYLOAD_LENGTH);
+ return rf69_write_reg(spi, REG_PAYLOAD_LENGTH, payload_length);
}
int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: node address");
- #endif
-
- return WRITE_REG(REG_NODEADRS, nodeAddress);
+ return rf69_write_reg(spi, REG_NODEADRS, nodeAddress);
}
int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: broadcast address");
- #endif
-
- return WRITE_REG(REG_BROADCASTADRS, broadcastAddress);
+ return rf69_write_reg(spi, REG_BROADCASTADRS, broadcastAddress);
}
int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: start condition");
- #endif
-
switch (txStartCondition) {
- case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART));
- case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART));
+ case fifo_level:
+ return rf69_clear_bit(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_TXSTART);
+ case fifo_not_empty:
+ return rf69_set_bit(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_TXSTART);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -903,35 +731,32 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
{
int retval;
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: fifo threshold");
- #endif
-
- // check input value
+ /* check input value */
if (threshold & 0x80) {
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
- // write value
- retval = WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_VALUE) | threshold);
+ /* write value */
+ retval = rf69_read_mod_write(spi, REG_FIFO_THRESH, MASK_FIFO_THRESH_VALUE, threshold);
if (retval)
return retval;
- // access the fifo to activate new threshold
- return rf69_read_fifo(spi, (u8 *)&retval, 1); // retval used as buffer
+ /* access the fifo to activate new threshold
+ * retval (mis-) used as buffer here
+ */
+ return rf69_read_fifo(spi, (u8 *)&retval, 1);
}
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
{
- #ifdef DEBUG
- dev_dbg(&spi->dev, "set: dagc");
- #endif
-
switch (dagc) {
- case normalMode: return WRITE_REG(REG_TESTDAGC, DAGC_NORMAL);
- case improve: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
- case improve4LowModulationIndex: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
+ case normalMode:
+ return rf69_write_reg(spi, REG_TESTDAGC, DAGC_NORMAL);
+ case improve:
+ return rf69_write_reg(spi, REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
+ case improve4LowModulationIndex:
+ return rf69_write_reg(spi, REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -940,19 +765,17 @@ int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
/*-------------------------------------------------------------------------*/
-int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size)
+int rf69_read_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
{
- #ifdef DEBUG_FIFO_ACCESS
- int i;
- #endif
+#ifdef DEBUG_FIFO_ACCESS
+ int i;
+#endif
struct spi_transfer transfer;
u8 local_buffer[FIFO_SIZE + 1];
int retval;
if (size > FIFO_SIZE) {
- #ifdef DEBUG
- dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n");
- #endif
+ dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
@@ -961,97 +784,41 @@ int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size)
memset(&transfer, 0, sizeof(transfer));
transfer.tx_buf = local_buffer;
transfer.rx_buf = local_buffer;
- transfer.len = size+1;
+ transfer.len = size + 1;
retval = spi_sync_transfer(spi, &transfer, 1);
- #ifdef DEBUG_FIFO_ACCESS
- for (i = 0; i < size; i++)
- dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i+1]);
- #endif
+#ifdef DEBUG_FIFO_ACCESS
+ for (i = 0; i < size; i++)
+ dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i + 1]);
+#endif
- memcpy(buffer, &local_buffer[1], size); // TODO: ohne memcopy wäre schöner
+ memcpy(buffer, &local_buffer[1], size);
return retval;
}
int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
{
- #ifdef DEBUG_FIFO_ACCESS
- int i;
- #endif
+#ifdef DEBUG_FIFO_ACCESS
+ int i;
+#endif
char spi_address = REG_FIFO | WRITE_BIT;
u8 local_buffer[FIFO_SIZE + 1];
if (size > FIFO_SIZE) {
- #ifdef DEBUG
- dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n");
- #endif
+ dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
local_buffer[0] = spi_address;
- memcpy(&local_buffer[1], buffer, size); // TODO: ohne memcopy wäre schöner
+ memcpy(&local_buffer[1], buffer, size);
- #ifdef DEBUG_FIFO_ACCESS
- for (i = 0; i < size; i++)
- dev_dbg(&spi->dev, "0x%x\n", buffer[i]);
- #endif
+#ifdef DEBUG_FIFO_ACCESS
+ for (i = 0; i < size; i++)
+ dev_dbg(&spi->dev, "0x%x\n", buffer[i]);
+#endif
- return spi_write (spi, local_buffer, size + 1);
+ return spi_write(spi, local_buffer, size + 1);
}
-/*-------------------------------------------------------------------------*/
-
-u8 rf69_read_reg(struct spi_device *spi, u8 addr)
-{
- int retval;
-
- retval = spi_w8r8(spi, addr);
-
- #ifdef DEBUG_VALUES
- if (retval < 0)
- /* should never happen, since we already checked,
- * that module is connected. Therefore no error
- * handling, just an optional error message...
- */
- dev_dbg(&spi->dev, "read 0x%x FAILED\n",
- addr);
- else
- dev_dbg(&spi->dev, "read 0x%x from reg 0x%x\n",
- retval,
- addr);
- #endif
-
- return retval;
-}
-
-int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value)
-{
- int retval;
- char buffer[2];
-
- buffer[0] = addr | WRITE_BIT;
- buffer[1] = value;
-
- retval = spi_write(spi, &buffer, 2);
-
- #ifdef DEBUG_VALUES
- if (retval < 0)
- /* should never happen, since we already checked,
- * that module is connected. Therefore no error
- * handling, just an optional error message...
- */
- dev_dbg(&spi->dev, "write 0x%x to 0x%x FAILED\n",
- value,
- addr);
- else
- dev_dbg(&spi->dev, "wrote 0x%x to reg 0x%x\n",
- value,
- addr);
- #endif
-
- return retval;
-}
-
-
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index 5c0c95628f2f..09d221b8b6df 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -26,57 +26,42 @@
#define FIFO_THRESHOLD 15 /* in byte */
int rf69_set_mode(struct spi_device *spi, enum mode mode);
-int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode);
+int rf69_set_data_mode(struct spi_device *spi, u8 data_mode);
int rf69_set_modulation(struct spi_device *spi, enum modulation modulation);
-enum modulation rf69_get_modulation(struct spi_device *spi);
-int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping);
+int rf69_set_modulation_shaping(struct spi_device *spi, enum mod_shaping mod_shaping);
int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate);
int rf69_set_deviation(struct spi_device *spi, u32 deviation);
int rf69_set_frequency(struct spi_device *spi, u32 frequency);
-int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff);
-int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff);
-int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_enable_amplifier(struct spi_device *spi, u8 amplifier_mask);
+int rf69_disable_amplifier(struct spi_device *spi, u8 amplifier_mask);
int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel);
int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp);
int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance);
int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain);
-enum lnaGain rf69_get_lna_gain(struct spi_device *spi);
-int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent);
-int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent);
-int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent);
int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent);
-int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType);
-int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep);
int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement);
int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value);
bool rf69_get_flag(struct spi_device *spi, enum flag flag);
-int rf69_reset_flag(struct spi_device *spi, enum flag flag);
int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold);
-int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout);
-int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout);
int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength);
-int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff);
-int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition);
+int rf69_enable_sync(struct spi_device *spi);
+int rf69_disable_sync(struct spi_device *spi);
+int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifo_fill_condition fifo_fill_condition);
int rf69_set_sync_size(struct spi_device *spi, u8 sync_size);
-int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance);
int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8]);
int rf69_set_packet_format(struct spi_device *spi, enum packetFormat packetFormat);
-int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff);
+int rf69_enable_crc(struct spi_device *spi);
+int rf69_disable_crc(struct spi_device *spi);
int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering);
-int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength);
-u8 rf69_get_payload_length(struct spi_device *spi);
+int rf69_set_payload_length(struct spi_device *spi, u8 payload_length);
int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress);
int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress);
int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition);
int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold);
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc);
-int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size);
+int rf69_read_fifo(struct spi_device *spi, u8 *buffer, unsigned int size);
int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size);
-u8 rf69_read_reg (struct spi_device *spi, u8 addr);
-int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value);
-
-
#endif
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
index fbfb59bd3f3d..03440cfa957c 100644
--- a/drivers/staging/pi433/rf69_enum.h
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -18,184 +18,126 @@
#ifndef RF69_ENUM_H
#define RF69_ENUM_H
-enum optionOnOff
-{
- optionOff,
- optionOn
+enum mode {
+ mode_sleep,
+ standby,
+ synthesizer,
+ transmit,
+ receive
+};
+
+enum modulation {
+ OOK,
+ FSK,
+ UNDEF
+};
+
+enum mod_shaping {
+ SHAPING_OFF,
+ SHAPING_1_0,
+ SHAPING_0_5,
+ SHAPING_0_3,
+ SHAPING_BR,
+ SHAPING_2BR
+};
+
+enum paRamp {
+ ramp3400,
+ ramp2000,
+ ramp1000,
+ ramp500,
+ ramp250,
+ ramp125,
+ ramp100,
+ ramp62,
+ ramp50,
+ ramp40,
+ ramp31,
+ ramp25,
+ ramp20,
+ ramp15,
+ ramp12,
+ ramp10
+};
+
+enum antennaImpedance {
+ fiftyOhm,
+ twohundretOhm
+};
+
+enum lnaGain {
+ automatic,
+ max,
+ max_minus_6,
+ max_minus_12,
+ max_minus_24,
+ max_minus_36,
+ max_minus_48,
+ undefined
+};
+
+enum mantisse {
+ mantisse16,
+ mantisse20,
+ mantisse24
+};
+
+enum thresholdDecrement {
+ dec_every8th,
+ dec_every4th,
+ dec_every2nd,
+ dec_once,
+ dec_twice,
+ dec_4times,
+ dec_8times,
+ dec_16times
+};
+
+enum flag {
+ modeSwitchCompleted,
+ readyToReceive,
+ readyToSend,
+ pllLocked,
+ rssiExceededThreshold,
+ timeout,
+ automode,
+ syncAddressMatch,
+ fifo_full,
+// fifo_not_empty, collision with next enum; replaced by following enum...
+ fifo_empty,
+ fifo_level_below_threshold,
+ fifo_overrun,
+ packetSent,
+ payload_ready,
+ crcOk,
+ batteryLow
+};
+
+enum fifo_fill_condition {
+ afterSyncInterrupt,
+ always
+};
+
+enum packetFormat {
+ packetLengthFix,
+ packetLengthVar
+};
+
+enum txStartCondition {
+ fifo_level,
+ fifo_not_empty
+};
+
+enum addressFiltering {
+ filteringOff,
+ nodeAddress,
+ nodeOrBroadcastAddress
+};
+
+enum dagc {
+ normalMode,
+ improve,
+ improve4LowModulationIndex
};
-enum mode
-{
- mode_sleep,
- standby,
- synthesizer,
- transmit,
- receive
-};
-
-enum dataMode
-{
- packet,
- continuous,
- continuousNoSync
-};
-
-enum modulation
-{
- OOK,
- FSK
-};
-
-enum modShaping
-{
- shapingOff,
- shaping1_0,
- shaping0_5,
- shaping0_3,
- shapingBR,
- shaping2BR
-};
-
-enum paRamp
-{
- ramp3400,
- ramp2000,
- ramp1000,
- ramp500,
- ramp250,
- ramp125,
- ramp100,
- ramp62,
- ramp50,
- ramp40,
- ramp31,
- ramp25,
- ramp20,
- ramp15,
- ramp12,
- ramp10
-};
-
-enum antennaImpedance
-{
- fiftyOhm,
- twohundretOhm
-};
-
-enum lnaGain
-{
- automatic,
- max,
- maxMinus6,
- maxMinus12,
- maxMinus24,
- maxMinus36,
- maxMinus48,
- undefined
-};
-
-enum dccPercent
-{
- dcc16Percent,
- dcc8Percent,
- dcc4Percent,
- dcc2Percent,
- dcc1Percent,
- dcc0_5Percent,
- dcc0_25Percent,
- dcc0_125Percent
-};
-
-enum mantisse
-{
- mantisse16,
- mantisse20,
- mantisse24
-};
-
-enum thresholdType
-{
- fixed,
- peak,
- average
-};
-
-enum thresholdStep
-{
- step_0_5db,
- step_1_0db,
- step_1_5db,
- step_2_0db,
- step_3_0db,
- step_4_0db,
- step_5_0db,
- step_6_0db
-};
-
-enum thresholdDecrement
-{
- dec_every8th,
- dec_every4th,
- dec_every2nd,
- dec_once,
- dec_twice,
- dec_4times,
- dec_8times,
- dec_16times
-};
-
-enum flag
-{
- modeSwitchCompleted,
- readyToReceive,
- readyToSend,
- pllLocked,
- rssiExceededThreshold,
- timeout,
- automode,
- syncAddressMatch,
- fifoFull,
-// fifoNotEmpty, collision with next enum; replaced by following enum...
- fifoEmpty,
- fifoLevelBelowThreshold,
- fifoOverrun,
- packetSent,
- payloadReady,
- crcOk,
- batteryLow
-};
-
-enum fifoFillCondition
-{
- afterSyncInterrupt,
- always
-};
-
-enum packetFormat
-{
- packetLengthFix,
- packetLengthVar
-};
-
-enum txStartCondition
-{
- fifoLevel,
- fifoNotEmpty
-};
-
-enum addressFiltering
-{
- filteringOff,
- nodeAddress,
- nodeOrBroadcastAddress
-};
-
-enum dagc
-{
- normalMode,
- improve,
- improve4LowModulationIndex
-};
-
-
#endif
diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h
index 6335d42142fe..33fd91518bb0 100644
--- a/drivers/staging/pi433/rf69_registers.h
+++ b/drivers/staging/pi433/rf69_registers.h
@@ -188,8 +188,6 @@
#define MASK_PALEVEL_PA2 0x20
#define MASK_PALEVEL_OUTPUT_POWER 0x1F
-
-
// RegPaRamp
#define PARAMP_3400 0x00
#define PARAMP_2000 0x01
@@ -246,7 +244,6 @@
#define LNA_GAIN_MAX_MINUS_36 0x05
#define LNA_GAIN_MAX_MINUS_48 0x06
-
/* RegRxBw (0x19) and RegAfcBw (0x1A) */
#define MASK_BW_DCC_FREQ 0xE0
#define MASK_BW_MANTISSE 0x18
@@ -265,7 +262,6 @@
#define BW_MANT_20 0x08
#define BW_MANT_24 0x10 /* default */
-
/* RegOokPeak (0x1B) */
#define MASK_OOKPEAK_THRESTYPE 0xc0
#define MASK_OOKPEAK_THRESSTEP 0x38
@@ -346,28 +342,28 @@
#define DIO5 5
/* DIO Mapping values (packet mode) */
-#define DIO_ModeReady_DIO4 0x00
-#define DIO_ModeReady_DIO5 0x03
-#define DIO_ClkOut 0x00
-#define DIO_Data 0x01
-#define DIO_TimeOut_DIO1 0x03
-#define DIO_TimeOut_DIO4 0x00
-#define DIO_Rssi_DIO0 0x03
-#define DIO_Rssi_DIO3_4 0x01
-#define DIO_RxReady 0x02
-#define DIO_PLLLock 0x03
-#define DIO_TxReady 0x01
-#define DIO_FifoFull_DIO1 0x01
-#define DIO_FifoFull_DIO3 0x00
-#define DIO_SyncAddress 0x02
-#define DIO_FifoNotEmpty_DIO1 0x02
-#define DIO_FifoNotEmpty_FIO2 0x00
-#define DIO_Automode 0x04
-#define DIO_FifoLevel 0x00
-#define DIO_CrcOk 0x00
-#define DIO_PayloadReady 0x01
-#define DIO_PacketSent 0x00
-#define DIO_Dclk 0x00
+#define DIO_MODE_READY_DIO4 0x00
+#define DIO_MODE_READY_DIO5 0x03
+#define DIO_CLK_OUT 0x00
+#define DIO_DATA 0x01
+#define DIO_TIMEOUT_DIO1 0x03
+#define DIO_TIMEOUT_DIO4 0x00
+#define DIO_RSSI_DIO0 0x03
+#define DIO_RSSI_DIO3_4 0x01
+#define DIO_RX_READY 0x02
+#define DIO_PLL_LOCK 0x03
+#define DIO_TX_READY 0x01
+#define DIO_FIFO_FULL_DIO1 0x01
+#define DIO_FIFO_FULL_DIO3 0x00
+#define DIO_SYNC_ADDRESS 0x02
+#define DIO_FIFO_NOT_EMPTY_DIO1 0x02
+#define DIO_FIFO_NOT_EMPTY_FIO2 0x00
+#define DIO_AUTOMODE 0x04
+#define DIO_FIFO_LEVEL 0x00
+#define DIO_CRC_OK 0x00
+#define DIO_PAYLOAD_READY 0x01
+#define DIO_PACKET_SENT 0x00
+#define DIO_DCLK 0x00
/* RegDioMapping2 CLK_OUT part */
#define MASK_DIOMAPPING2_CLK_OUT 0x07
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index d73e9bdc80cc..bcb6919bb7d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -5395,14 +5395,14 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
int len_diff = 0;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
ptxBeacon_parm = kmemdup(&(pmlmeinfo->network),
- sizeof(struct wlan_bssid_ex), GFP_KERNEL);
+ sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
if (ptxBeacon_parm == NULL) {
kfree(ph2c);
res = _FAIL;
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index 2867864bbfbe..e6867eea3530 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -22,7 +22,7 @@
u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
struct wl_pwr_cfg pwrseqcmd[])
{
- struct wl_pwr_cfg pwrcfgcmd = {0};
+ struct wl_pwr_cfg pwrcfgcmd;
u8 poll_bit = false;
u32 aryidx = 0;
u8 value = 0;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 7bc9cb131bcc..30f72d220af1 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1317,7 +1317,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
bool bToOtherSTA = false;
int ret = 0, i = 0;
- hdr = (struct rtllib_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index c2b9ffba354a..919231fec09c 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1968,7 +1968,7 @@ void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
{
- int timeout = ieee->ps_timeout;
+ int timeout;
u8 dtim;
struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 64b13a5da3cb..ba284bfb3b6d 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -11,7 +11,7 @@ void Dot11d_Init(struct ieee80211_device *ieee)
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index cbf8eb4a049d..37a610d05ad2 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -110,12 +110,12 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else {
tid = 0;
}
@@ -177,12 +177,12 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else {
tid = 0;
}
@@ -434,12 +434,12 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
- tid ++;
+ tid++;
} else { // no QoS
tid = 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index f56fdc7a4b61..25c186a8bde3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1725,7 +1725,7 @@ static void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
u32 *time_l)
{
- int timeout = ieee->ps_timeout;
+ int timeout;
u8 dtim;
/*if(ieee->ps == IEEE80211_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 09f66b386e44..3c300f7b6a62 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -4998,7 +4998,7 @@ fail:
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
- mdelay(10);
+ msleep(10);
free_ieee80211(dev);
RT_TRACE(COMP_ERR, "wlan driver load failed\n");
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index de832b0b5eec..2a3f0746ee2c 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -336,7 +336,6 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32 | BIT(25))); /* Append PHY status */
- val32 = 0;
val32 = r8712_read32(padapter, 0x10250040);
r8712_write32(padapter, 0x10250040, (val32 & 0x00FFFFFF));
/* for usb rx aggregation */
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 33e82a9dd462..987270395635 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -169,12 +169,13 @@ int r8712_generate_ie(struct registry_priv *pregistrypriv)
int sz = 0, rate_len;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->IEs;
+ u16 beaconPeriod = (u16)pdev_network->Configuration.BeaconPeriod;
/*timestamp will be inserted by hardware*/
sz += 8;
ie += sz;
/*beacon interval : 2bytes*/
- *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);
+ *(__le16 *)ie = cpu_to_le16(beaconPeriod);
sz += 2;
ie += 2;
/*capability info*/
@@ -221,7 +222,8 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
pbuf = r8712_get_ie(pbuf, _WPA_IE_ID_, &len, limit);
if (pbuf) {
/*check if oui matches...*/
- if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
+ if (memcmp((pbuf + 2), wpa_oui_type,
+ sizeof(wpa_oui_type)))
goto check_next_ie;
/*check version...*/
memcpy((u8 *)&val16, (pbuf + 6), sizeof(val16));
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 95caf8df9a13..e7df5d7986fc 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -238,10 +238,13 @@ static u32 start_drv_threads(struct _adapter *padapter)
void r8712_stop_drv_threads(struct _adapter *padapter)
{
+ struct completion *completion =
+ &padapter->cmdpriv.terminate_cmdthread_comp;
+
/*Below is to terminate r8712_cmd_thread & event_thread...*/
complete(&padapter->cmdpriv.cmd_queue_comp);
if (padapter->cmdThread)
- wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
+ wait_for_completion_interruptible(completion);
padapter->cmdpriv.cmd_seq = 1;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 3c88994fdfcd..9c8e0c50a804 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -321,10 +321,13 @@ int r8712_cmd_thread(void *context)
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct _adapter *padapter = context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct completion *cmd_queue_comp =
+ &pcmdpriv->cmd_queue_comp;
+ struct mutex *pwctrl_lock = &padapter->pwrctrlpriv.mutex_lock;
allow_signal(SIGTERM);
while (1) {
- if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp))
+ if (wait_for_completion_interruptible(cmd_queue_comp))
break;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
@@ -343,6 +346,7 @@ _next:
if (pcmd) { /* if pcmd != NULL, cmd will be handled by f/w */
struct dvobj_priv *pdvobj = &padapter->dvobjpriv;
u8 blnPending = 0;
+ u16 cmdcode = pcmd->cmdcode;
pcmdpriv->cmd_issued_cnt++;
cmdsz = round_up(pcmd->cmdsz, 8);
@@ -387,20 +391,18 @@ _next:
r8712_write_mem(padapter, RTL8712_DMA_H2CCMD, wr_sz,
(u8 *)pdesc);
pcmdpriv->cmd_seq++;
- if (pcmd->cmdcode == GEN_CMD_CODE(_CreateBss)) {
+ if (cmdcode == GEN_CMD_CODE(_CreateBss)) {
pcmd->res = H2C_SUCCESS;
- pcmd_callback = cmd_callback[pcmd->
- cmdcode].callback;
+ pcmd_callback = cmd_callback[cmdcode].callback;
if (pcmd_callback)
pcmd_callback(padapter, pcmd);
continue;
}
- if (pcmd->cmdcode == GEN_CMD_CODE(_SetPwrMode)) {
+ if (cmdcode == GEN_CMD_CODE(_SetPwrMode)) {
if (padapter->pwrctrlpriv.bSleep) {
- mutex_lock(&padapter->
- pwrctrlpriv.mutex_lock);
+ mutex_lock(pwctrl_lock);
r8712_set_rpwm(padapter, PS_STATE_S2);
- mutex_unlock(&padapter->pwrctrlpriv.mutex_lock);
+ mutex_unlock(pwctrl_lock);
}
}
r8712_free_cmd_obj(pcmd);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 8f555e6e1b3f..4264cd341f03 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -899,6 +899,7 @@ static void process_link_qual(struct _adapter *padapter,
{
u32 last_evm = 0, tmpVal;
struct rx_pkt_attrib *pattrib;
+ struct smooth_rssi_data *sqd = &padapter->recvpriv.signal_qual_data;
if (prframe == NULL || padapter == NULL)
return;
@@ -907,27 +908,18 @@ static void process_link_qual(struct _adapter *padapter,
/*
* 1. Record the general EVM to the sliding window.
*/
- if (padapter->recvpriv.signal_qual_data.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- padapter->recvpriv.signal_qual_data.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm = padapter->recvpriv.signal_qual_data.elements
- [padapter->recvpriv.signal_qual_data.index];
- padapter->recvpriv.signal_qual_data.total_val -=
- last_evm;
+ if (sqd->total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) {
+ sqd->total_num = PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = sqd->elements[sqd->index];
+ sqd->total_val -= last_evm;
}
- padapter->recvpriv.signal_qual_data.total_val +=
- pattrib->signal_qual;
- padapter->recvpriv.signal_qual_data.elements[padapter->
- recvpriv.signal_qual_data.index++] =
- pattrib->signal_qual;
- if (padapter->recvpriv.signal_qual_data.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- padapter->recvpriv.signal_qual_data.index = 0;
+ sqd->total_val += pattrib->signal_qual;
+ sqd->elements[sqd->index++] = pattrib->signal_qual;
+ if (sqd->index >= PHY_LINKQUALITY_SLID_WIN_MAX)
+ sqd->index = 0;
/* <1> Showed on UI for user, in percentage. */
- tmpVal = padapter->recvpriv.signal_qual_data.total_val /
- padapter->recvpriv.signal_qual_data.total_num;
+ tmpVal = sqd->total_val / sqd->total_num;
padapter->recvpriv.signal = (u8)tmpVal;
}
}
@@ -936,25 +928,18 @@ static void process_rssi(struct _adapter *padapter, union recv_frame *prframe)
{
u32 last_rssi, tmp_val;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct smooth_rssi_data *ssd = &padapter->recvpriv.signal_strength_data;
- if (padapter->recvpriv.signal_strength_data.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- padapter->recvpriv.signal_strength_data.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi = padapter->recvpriv.signal_strength_data.elements
- [padapter->recvpriv.signal_strength_data.index];
- padapter->recvpriv.signal_strength_data.total_val -= last_rssi;
+ if (ssd->total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+ ssd->total_num = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = ssd->elements[ssd->index];
+ ssd->total_val -= last_rssi;
}
- padapter->recvpriv.signal_strength_data.total_val +=
- pattrib->signal_strength;
- padapter->recvpriv.signal_strength_data.elements[padapter->recvpriv.
- signal_strength_data.index++] =
- pattrib->signal_strength;
- if (padapter->recvpriv.signal_strength_data.index >=
- PHY_RSSI_SLID_WIN_MAX)
- padapter->recvpriv.signal_strength_data.index = 0;
- tmp_val = padapter->recvpriv.signal_strength_data.total_val /
- padapter->recvpriv.signal_strength_data.total_num;
+ ssd->total_val += pattrib->signal_strength;
+ ssd->elements[ssd->index++] = pattrib->signal_strength;
+ if (ssd->index >= PHY_RSSI_SLID_WIN_MAX)
+ ssd->index = 0;
+ tmp_val = ssd->total_val / ssd->total_num;
padapter->recvpriv.rssi = (s8)translate2dbm(padapter, (u8)tmp_val);
}
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 42d014007764..fb64c2891e22 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -573,7 +573,8 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
}
} else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
/* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32((0x05) & 0x1f);/*CAM_ID(MAC_ID), default=5;*/
+ /* CAM_ID(MAC_ID), default=5; */
+ ptxdesc->txdw1 |= cpu_to_le32((0x05) & 0x1f);
qsel = (uint)(pattrib->qsel & 0x0000001f);
ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
ptxdesc->txdw1 |= cpu_to_le32(BIT(16));/* Non-QoS */
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index a424f447a725..620cee8b8514 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -455,8 +455,8 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
- enum NDIS_802_11_NETWORK_INFRASTRUCTURE ndis_network_mode = pnetwork->
- network.InfrastructureMode;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE ndis_network_mode =
+ pnetwork->network.InfrastructureMode;
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
@@ -862,22 +862,22 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy);
pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi);
pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse);
- pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->
- Configuration.ATIMWindow);
- pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->
- Configuration.DSConfig);
- pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->
- Configuration.FHConfig.DwellTime);
- pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->
- Configuration.FHConfig.HopPattern);
- pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->
- Configuration.FHConfig.HopSet);
- pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->
- Configuration.FHConfig.Length);
- pnetwork->Configuration.Length = le32_to_cpu(pnetwork->
- Configuration.Length);
- pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->
- InfrastructureMode);
+ pnetwork->Configuration.ATIMWindow =
+ le32_to_cpu(pnetwork->Configuration.ATIMWindow);
+ pnetwork->Configuration.DSConfig =
+ le32_to_cpu(pnetwork->Configuration.DSConfig);
+ pnetwork->Configuration.FHConfig.DwellTime =
+ le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime);
+ pnetwork->Configuration.FHConfig.HopPattern =
+ le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern);
+ pnetwork->Configuration.FHConfig.HopSet =
+ le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet);
+ pnetwork->Configuration.FHConfig.Length =
+ le32_to_cpu(pnetwork->Configuration.FHConfig.Length);
+ pnetwork->Configuration.Length =
+ le32_to_cpu(pnetwork->Configuration.Length);
+ pnetwork->InfrastructureMode =
+ le32_to_cpu(pnetwork->InfrastructureMode);
pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
#endif
spin_lock_irqsave(&pmlmepriv->lock, irqL);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index e30a5be5f318..c3ff7c3e6681 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -114,23 +114,27 @@ static inline void handle_pairwise_key(struct sta_info *psta,
static inline void handle_group_key(struct ieee_param *param,
struct _adapter *padapter)
{
+ union Keytype *gk = padapter->securitypriv.XGrpKey;
+ union Keytype *gtk = padapter->securitypriv.XGrptxmickey;
+ union Keytype *grk = padapter->securitypriv.XGrprxmickey;
+
if (param->u.crypt.idx > 0 &&
param->u.crypt.idx < 3) {
/* group key idx is 1 or 2 */
- memcpy(padapter->securitypriv.XGrpKey[param->u.crypt.
- idx - 1].skey, param->u.crypt.key,
- (param->u.crypt.key_len > 16 ? 16 :
- param->u.crypt.key_len));
- memcpy(padapter->securitypriv.XGrptxmickey[param->
- u.crypt.idx - 1].skey, &(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv. XGrprxmickey[param->
- u.crypt.idx - 1].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(gk[param->u.crypt.idx - 1].skey,
+ param->u.crypt.key,
+ (param->u.crypt.key_len > 16 ? 16 :
+ param->u.crypt.key_len));
+ memcpy(gtk[param->u.crypt.idx - 1].skey,
+ &param->u.crypt.key[16], 8);
+ memcpy(grk[param->u.crypt.idx - 1].skey,
+ &param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
r8712_set_key(padapter, &padapter->securitypriv,
param->u.crypt.idx);
if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) {
- if (padapter->registrypriv.power_mgnt != padapter->
- pwrctrlpriv.pwr_mode)
+ if (padapter->registrypriv.power_mgnt !=
+ padapter->pwrctrlpriv.pwr_mode)
mod_timer(&padapter->mlmepriv.dhcp_timer,
jiffies + msecs_to_jiffies(60000));
}
@@ -216,9 +220,9 @@ static noinline_for_stack char *translate_scan(struct _adapter *padapter,
if (dsconfig >= 1 && dsconfig <= sizeof(
ieee80211_wlan_frequencies) / sizeof(long))
- iwe.u.freq.m = (s32)(ieee80211_wlan_frequencies[
- pnetwork->network.Configuration.
- DSConfig - 1] * 100000);
+ iwe.u.freq.m =
+ (s32)(ieee80211_wlan_frequencies
+ [dsconfig - 1] * 100000);
else
iwe.u.freq.m = 0;
}
@@ -425,9 +429,9 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
ret = -EOPNOTSUPP;
goto exit;
}
- memcpy(&(psecuritypriv->DefKey[wep_key_idx].
- skey[0]), pwep->KeyMaterial,
- pwep->KeyLength);
+ memcpy(&psecuritypriv->DefKey[wep_key_idx].skey[0],
+ pwep->KeyMaterial,
+ pwep->KeyLength);
psecuritypriv->DefKeylen[wep_key_idx] =
pwep->KeyLength;
r8712_set_key(padapter, psecuritypriv, wep_key_idx);
@@ -437,6 +441,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (padapter->securitypriv.AuthAlgrthm == 2) { /* 802_1x */
struct sta_info *psta, *pbcmc_sta;
struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *spriv = &padapter->securitypriv;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE |
WIFI_MP_STATE)) { /* sta mode */
@@ -444,12 +449,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
get_bssid(pmlmepriv));
if (psta) {
psta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption3Enabled))
- psta->XPrivacy = padapter->
- securitypriv.PrivacyAlgrthm;
+ if (spriv->ndisencryptstatus ==
+ Ndis802_11Encryption2Enabled ||
+ spriv->ndisencryptstatus ==
+ Ndis802_11Encryption3Enabled)
+ psta->XPrivacy = spriv->PrivacyAlgrthm;
if (param->u.crypt.set_tx == 1)
handle_pairwise_key(psta, param,
padapter);
@@ -459,13 +463,12 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
pbcmc_sta = r8712_get_bcmc_stainfo(padapter);
if (pbcmc_sta) {
pbcmc_sta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption3Enabled))
+ if (spriv->ndisencryptstatus ==
+ Ndis802_11Encryption2Enabled ||
+ spriv->ndisencryptstatus ==
+ Ndis802_11Encryption3Enabled)
pbcmc_sta->XPrivacy =
- padapter->securitypriv.
- PrivacyAlgrthm;
+ spriv->PrivacyAlgrthm;
}
}
}
@@ -763,6 +766,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
struct _adapter *padapter = netdev_priv(dev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct iw_pmksa *pPMK = (struct iw_pmksa *) extra;
+ struct RT_PMKID_LIST *pl = psecuritypriv->PMKIDList;
u8 strZeroMacAddress[ETH_ALEN] = {0x00};
u8 strIssueBssid[ETH_ALEN] = {0x00};
u8 j, blInserted = false;
@@ -787,16 +791,14 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
blInserted = false;
/* overwrite PMKID */
for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid,
- strIssueBssid, ETH_ALEN)) {
+ if (!memcmp(pl[j].Bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite
* with new PMKID.
*/
netdev_info(dev, "r8712u: %s: BSSID exists in the PMKList.\n",
__func__);
- memcpy(psecuritypriv->PMKIDList[j].PMKID,
- pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[j].bUsed = true;
+ memcpy(pl[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ pl[j].bUsed = true;
psecuritypriv->PMKIDIndex = j + 1;
blInserted = true;
break;
@@ -806,12 +808,11 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
/* Find a new entry */
netdev_info(dev, "r8712u: %s: Use the new entry index = %d for this PMKID.\n",
__func__, psecuritypriv->PMKIDIndex);
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->
- PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->
- PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
- bUsed = true;
+ memcpy(pl[psecuritypriv->PMKIDIndex].Bssid,
+ strIssueBssid, ETH_ALEN);
+ memcpy(pl[psecuritypriv->PMKIDIndex].PMKID,
+ pPMK->pmkid, IW_PMKID_LEN);
+ pl[psecuritypriv->PMKIDIndex].bUsed = true;
psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == NUM_PMKID_CACHE)
psecuritypriv->PMKIDIndex = 0;
@@ -820,13 +821,12 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
case IW_PMKSA_REMOVE:
intReturn = true;
for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid,
- strIssueBssid, ETH_ALEN)) {
+ if (!memcmp(pl[j].Bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => Remove
* this PMKID information and reset it.
*/
- eth_zero_addr(psecuritypriv->PMKIDList[j].Bssid);
- psecuritypriv->PMKIDList[j].bUsed = false;
+ eth_zero_addr(pl[j].Bssid);
+ pl[j].bUsed = false;
break;
}
}
@@ -1598,6 +1598,7 @@ static int r8711_wx_get_enc(struct net_device *dev,
struct _adapter *padapter = netdev_priv(dev);
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ union Keytype *dk = padapter->securitypriv.DefKey;
if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -1624,9 +1625,8 @@ static int r8711_wx_get_enc(struct net_device *dev,
case Ndis802_11Encryption1Enabled:
erq->length = padapter->securitypriv.DefKeylen[key];
if (erq->length) {
- memcpy(keybuf, padapter->securitypriv.DefKey[
- key].skey, padapter->securitypriv.
- DefKeylen[key]);
+ memcpy(keybuf, dk[key].skey,
+ padapter->securitypriv.DefKeylen[key]);
erq->flags |= IW_ENCODE_ENABLED;
if (padapter->securitypriv.ndisauthtype ==
Ndis802_11AuthModeOpen)
@@ -1719,12 +1719,12 @@ static int r871x_wx_set_auth(struct net_device *dev,
*/
if (padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption1Enabled) {
- /* it means init value, or using wep,
- * ndisencryptstatus =
- * Ndis802_11Encryption1Enabled,
- * then it needn't reset it;
- */
- break;
+ /* it means init value, or using wep,
+ * ndisencryptstatus =
+ * Ndis802_11Encryption1Enabled,
+ * then it needn't reset it;
+ */
+ break;
}
if (paramval) {
@@ -1853,7 +1853,7 @@ static int dummy(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
- return -ENOSYS;
+ return -EINVAL;
}
static int r8711_drvext_hdl(struct net_device *dev,
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 8a5ced4fa9d3..f4a53df7f2c1 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -55,6 +55,7 @@ static u8 do_join(struct _adapter *padapter)
u8 *pibss = NULL;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct __queue *queue = &(pmlmepriv->scanned_queue);
+ int ret;
phead = &queue->queue;
plist = phead->next;
@@ -74,45 +75,42 @@ static u8 do_join(struct _adapter *padapter)
if (!pmlmepriv->sitesurveyctrl.traffic_busy)
r8712_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid);
return true;
- } else {
- int ret;
+ }
- ret = r8712_select_and_join_from_scan(pmlmepriv);
- if (ret == _SUCCESS) {
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+ ret = r8712_select_and_join_from_scan(pmlmepriv);
+ if (ret == _SUCCESS) {
+ mod_timer(&pmlmepriv->assoc_timer,
+ jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+ } else {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ /* submit r8712_createbss_cmd to change to an
+ * ADHOC_MASTER pmlmepriv->lock has been
+ * acquired by caller...
+ */
+ struct wlan_bssid_ex *pdev_network =
+ &(padapter->registrypriv.dev_network);
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+ pibss = padapter->registrypriv.dev_network.MacAddress;
+ memcpy(&pdev_network->Ssid,
+ &pmlmepriv->assoc_ssid,
+ sizeof(struct ndis_802_11_ssid));
+ r8712_update_registrypriv_dev_network(padapter);
+ r8712_generate_random_ibss(pibss);
+ if (r8712_createbss_cmd(padapter) != _SUCCESS)
+ return false;
+ pmlmepriv->to_join = false;
} else {
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* submit r8712_createbss_cmd to change to an
- * ADHOC_MASTER pmlmepriv->lock has been
- * acquired by caller...
- */
- struct wlan_bssid_ex *pdev_network =
- &(padapter->registrypriv.dev_network);
- pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
- pibss = padapter->registrypriv.dev_network.
- MacAddress;
- memcpy(&pdev_network->Ssid,
- &pmlmepriv->assoc_ssid,
- sizeof(struct ndis_802_11_ssid));
- r8712_update_registrypriv_dev_network(padapter);
- r8712_generate_random_ibss(pibss);
- if (r8712_createbss_cmd(padapter) != _SUCCESS)
- return false;
- pmlmepriv->to_join = false;
- } else {
- /* can't associate ; reset under-linking */
- if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
- pmlmepriv->fw_state ^=
- _FW_UNDER_LINKING;
- /* when set_ssid/set_bssid for do_join(), but
- * there are no desired bss in scanning queue
- * we try to issue sitesurvey first
- */
- if (!pmlmepriv->sitesurveyctrl.traffic_busy)
- r8712_sitesurvey_cmd(padapter,
- &pmlmepriv->assoc_ssid);
- }
+ /* can't associate ; reset under-linking */
+ if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
+ pmlmepriv->fw_state ^=
+ _FW_UNDER_LINKING;
+ /* when set_ssid/set_bssid for do_join(), but
+ * there are no desired bss in scanning queue
+ * we try to issue sitesurvey first
+ */
+ if (!pmlmepriv->sitesurveyctrl.traffic_busy)
+ r8712_sitesurvey_cmd(padapter,
+ &pmlmepriv->assoc_ssid);
}
}
return true;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 111c809afc51..78245080e328 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -319,6 +319,7 @@ static void update_network(struct wlan_bssid_ex *dst,
struct _adapter *padapter)
{
u32 last_evm = 0, tmpVal;
+ struct smooth_rssi_data *sqd = &padapter->recvpriv.signal_qual_data;
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) &&
is_same_network(&(padapter->mlmepriv.cur_network.network), src)) {
@@ -326,17 +327,13 @@ static void update_network(struct wlan_bssid_ex *dst,
PHY_LINKQUALITY_SLID_WIN_MAX) {
padapter->recvpriv.signal_qual_data.total_num =
PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm = padapter->recvpriv.signal_qual_data.
- elements[padapter->recvpriv.
- signal_qual_data.index];
+ last_evm = sqd->elements[sqd->index];
padapter->recvpriv.signal_qual_data.total_val -=
last_evm;
}
padapter->recvpriv.signal_qual_data.total_val += src->Rssi;
- padapter->recvpriv.signal_qual_data.
- elements[padapter->recvpriv.signal_qual_data.
- index++] = src->Rssi;
+ sqd->elements[sqd->index++] = src->Rssi;
if (padapter->recvpriv.signal_qual_data.index >=
PHY_LINKQUALITY_SLID_WIN_MAX)
padapter->recvpriv.signal_qual_data.index = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index b21f28140f53..918947f38151 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -55,7 +55,8 @@
* single-tone
*/
#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in cont, tx
- * background due to out of skb
+ * background due
+ * to out of skb
*/
#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx*/
#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in cont, tx with carrier
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 3c10a2c848c8..ba208a2e1e4e 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -541,7 +541,7 @@ void r8712_SetSingleCarrierTx(struct _adapter *pAdapter, u8 bStart)
void r8712_SetSingleToneTx(struct _adapter *pAdapter, u8 bStart)
{
- u8 rfPath = pAdapter->mppriv.curr_rfpath;
+ u8 rfPath;
switch (pAdapter->mppriv.antenna_tx) {
case ANTENNA_B:
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 56d36f6f9c46..7bc74d7d8a3a 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -165,7 +165,7 @@ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
unsigned char crc[4];
struct arc4context mycontext;
- u32 curfragnum, length, keylength;
+ u32 curfragnum, length, keylength, pki;
u8 *pframe, *payload, *iv; /*,*wepkey*/
u8 wepkey[16];
struct pkt_attrib *pattrib = &((struct xmit_frame *)
@@ -178,8 +178,8 @@ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + TXDESC_OFFSET;
/*start to encrypt each fragment*/
if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
- keylength = psecuritypriv->DefKeylen[psecuritypriv->
- PrivacyKeyIndex];
+ pki = psecuritypriv->PrivacyKeyIndex;
+ keylength = psecuritypriv->DefKeylen[pki];
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
iv = pframe + pattrib->hdrlen;
@@ -189,9 +189,10 @@ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
keylength);
payload = pframe + pattrib->iv_len + pattrib->hdrlen;
if ((curfragnum + 1) == pattrib->nr_frags) {
- length = pattrib->last_txcmdsz - pattrib->
- hdrlen - pattrib->iv_len -
- pattrib->icv_len;
+ length = pattrib->last_txcmdsz -
+ pattrib->hdrlen -
+ pattrib->iv_len -
+ pattrib->icv_len;
*((__le32 *)crc) = cpu_to_le32(getcrc32(
payload, length));
arcfour_init(&mycontext, wepkey, 3 + keylength);
@@ -606,8 +607,8 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
GET_TKIP_PN(iv, txpn);
pnl = (u16)(txpn.val);
pnh = (u32)(txpn.val >> 16);
- phase1((u16 *)&ttkey[0], prwskey, &pattrib->
- ta[0], pnh);
+ phase1((u16 *)&ttkey[0], prwskey,
+ &pattrib->ta[0], pnh);
phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0],
pnl);
if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -997,8 +998,9 @@ static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, sint a4_exists,
/* Builds the last MIC header block from */
/* header fields. */
/************************************************/
-static void construct_ctr_preload(u8 *ctr_preload, sint a4_exists, sint qc_exists,
- u8 *mpdu, u8 *pn_vector, sint c)
+static void construct_ctr_preload(u8 *ctr_preload,
+ sint a4_exists, sint qc_exists,
+ u8 *mpdu, u8 *pn_vector, sint c)
{
sint i;
@@ -1067,16 +1069,16 @@ static sint aes_cipher(u8 *key, uint hdrlen,
if ((frtype == WIFI_DATA_CFACK) ||
(frtype == WIFI_DATA_CFPOLL) ||
(frtype == WIFI_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
} else if ((frsubtype == 0x08) ||
(frsubtype == 0x09) ||
(frsubtype == 0x0a) ||
(frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
} else {
qc_exists = 0;
}
@@ -1184,15 +1186,15 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
pattrib->hdrlen -
pattrib->iv_len -
pattrib->icv_len;
- aes_cipher(prwskey, pattrib->
- hdrlen, pframe, length);
+ aes_cipher(prwskey, pattrib->hdrlen,
+ pframe, length);
} else {
length = pxmitpriv->frag_len -
pattrib->hdrlen -
pattrib->iv_len -
pattrib->icv_len;
- aes_cipher(prwskey, pattrib->
- hdrlen, pframe, length);
+ aes_cipher(prwskey, pattrib->hdrlen,
+ pframe, length);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)RND4((addr_t)(pframe));
}
@@ -1405,7 +1407,7 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
void r8712_use_tkipkey_handler(struct timer_list *t)
{
struct _adapter *padapter =
- from_timer(padapter, t, securitypriv.tkip_timer);
+ from_timer(padapter, t, securitypriv.tkip_timer);
padapter->securitypriv.busetkipkey = true;
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 441e76b8959d..6d12a96fa65f 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -145,7 +145,7 @@ static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
break;
}
} else {
- pipe = 0;
+ pipe = 0;
}
return pipe;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 9ac2dea6dff1..af0a9e0a00df 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -846,9 +846,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
- if (pcmd != NULL)
- kfree(pcmd);
-
+ kfree(pcmd);
res = _FAIL;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index c16e147d8adc..a99a863be656 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1052,7 +1052,7 @@ static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
/* Check failed */
regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
- regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);;
+ regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x, 0xeac = 0x%x\n", regEA4, regEAC));
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
index f4619768a99f..334d680b9b1c 100644
--- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -50,7 +50,7 @@ u8 HalPwrSeqCmdParsing(
WLAN_PWR_CFG PwrSeqCmd[]
)
{
- WLAN_PWR_CFG PwrCfgCmd = {0};
+ WLAN_PWR_CFG PwrCfgCmd;
u8 bPollingBit = false;
u32 AryIdx = 0;
u8 value = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index e6787c22e00b..93d6cc478706 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -66,8 +66,7 @@ u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
DBG_871X("Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
RateSection, RfPath, TxNum);
break;
-
- };
+ }
} else if (Band == BAND_ON_5G) {
switch (RateSection) {
case OFDM:
@@ -101,7 +100,7 @@ u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
DBG_871X("Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
RateSection, RfPath, TxNum);
break;
- };
+ }
} else
DBG_871X("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", Band);
@@ -161,7 +160,7 @@ phy_SetTxPowerByRateBase(
DBG_871X("Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in phy_SetTxPowerByRateBase()\n",
RateSection, RfPath, TxNum);
break;
- };
+ }
} else if (Band == BAND_ON_5G) {
switch (RateSection) {
case OFDM:
@@ -195,7 +194,7 @@ phy_SetTxPowerByRateBase(
DBG_871X("Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in phy_SetTxPowerByRateBase()\n",
RateSection, RfPath, TxNum);
break;
- };
+ }
} else
DBG_871X("Invalid Band %d in phy_SetTxPowerByRateBase()\n", Band);
}
@@ -336,7 +335,7 @@ u8 PHY_GetRateSectionIndexOfTxPowerByRate(
default:
DBG_871X("Invalid RegAddr 0x3%x in PHY_GetRateSectionIndexOfTxPowerByRate()", RegAddr);
break;
- };
+ }
}
return index;
@@ -726,7 +725,7 @@ PHY_GetRateValuesOfTxPowerByRate(
default:
DBG_871X("Invalid RegAddr 0x%x in %s()\n", RegAddr, __func__);
break;
- };
+ }
}
static void PHY_StoreTxPowerByRateNew(
@@ -1474,8 +1473,7 @@ u8 PHY_GetRateIndexOfTxPowerByRate(u8 Rate)
default:
DBG_871X("Invalid rate 0x%x in %s\n", Rate, __func__);
break;
- };
-
+ }
return index;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 21ec890fd60c..e34d133075c0 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -153,7 +153,7 @@ static u32 phy_RFSerialRead_8723B(
NewOffset = Offset;
if (eRFPath == RF_PATH_A) {
- tmplong2 = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord);;
+ tmplong2 = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord);
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord, tmplong2&(~bLSSIReadEdge));
} else {
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 9a4c24861947..ab2ff53a8e57 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -28,35 +28,35 @@
/* Creadted by Roger, 2011.01.31. */
/* */
static void HalSdioGetCmdAddr8723BSdio(
- struct adapter *padapter,
- u8 DeviceID,
- u32 Addr,
- u32 *pCmdAddr
+ struct adapter *adapter,
+ u8 device_id,
+ u32 addr,
+ u32 *cmdaddr
)
{
- switch (DeviceID) {
+ switch (device_id) {
case SDIO_LOCAL_DEVICE_ID:
- *pCmdAddr = ((SDIO_LOCAL_DEVICE_ID << 13) | (Addr & SDIO_LOCAL_MSK));
+ *cmdaddr = ((SDIO_LOCAL_DEVICE_ID << 13) | (addr & SDIO_LOCAL_MSK));
break;
case WLAN_IOREG_DEVICE_ID:
- *pCmdAddr = ((WLAN_IOREG_DEVICE_ID << 13) | (Addr & WLAN_IOREG_MSK));
+ *cmdaddr = ((WLAN_IOREG_DEVICE_ID << 13) | (addr & WLAN_IOREG_MSK));
break;
case WLAN_TX_HIQ_DEVICE_ID:
- *pCmdAddr = ((WLAN_TX_HIQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ *cmdaddr = ((WLAN_TX_HIQ_DEVICE_ID << 13) | (addr & WLAN_FIFO_MSK));
break;
case WLAN_TX_MIQ_DEVICE_ID:
- *pCmdAddr = ((WLAN_TX_MIQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ *cmdaddr = ((WLAN_TX_MIQ_DEVICE_ID << 13) | (addr & WLAN_FIFO_MSK));
break;
case WLAN_TX_LOQ_DEVICE_ID:
- *pCmdAddr = ((WLAN_TX_LOQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ *cmdaddr = ((WLAN_TX_LOQ_DEVICE_ID << 13) | (addr & WLAN_FIFO_MSK));
break;
case WLAN_RX0FF_DEVICE_ID:
- *pCmdAddr = ((WLAN_RX0FF_DEVICE_ID << 13) | (Addr & WLAN_RX0FF_MSK));
+ *cmdaddr = ((WLAN_RX0FF_DEVICE_ID << 13) | (addr & WLAN_RX0FF_MSK));
break;
default:
@@ -66,64 +66,64 @@ static void HalSdioGetCmdAddr8723BSdio(
static u8 get_deviceid(u32 addr)
{
- u8 devideId;
- u16 pseudoId;
+ u8 devide_id;
+ u16 pseudo_id;
- pseudoId = (u16)(addr >> 16);
- switch (pseudoId) {
+ pseudo_id = (u16)(addr >> 16);
+ switch (pseudo_id) {
case 0x1025:
- devideId = SDIO_LOCAL_DEVICE_ID;
+ devide_id = SDIO_LOCAL_DEVICE_ID;
break;
case 0x1026:
- devideId = WLAN_IOREG_DEVICE_ID;
+ devide_id = WLAN_IOREG_DEVICE_ID;
break;
/* case 0x1027: */
-/* devideId = SDIO_FIRMWARE_FIFO; */
+/* devide_id = SDIO_FIRMWARE_FIFO; */
/* break; */
case 0x1031:
- devideId = WLAN_TX_HIQ_DEVICE_ID;
+ devide_id = WLAN_TX_HIQ_DEVICE_ID;
break;
case 0x1032:
- devideId = WLAN_TX_MIQ_DEVICE_ID;
+ devide_id = WLAN_TX_MIQ_DEVICE_ID;
break;
case 0x1033:
- devideId = WLAN_TX_LOQ_DEVICE_ID;
+ devide_id = WLAN_TX_LOQ_DEVICE_ID;
break;
case 0x1034:
- devideId = WLAN_RX0FF_DEVICE_ID;
+ devide_id = WLAN_RX0FF_DEVICE_ID;
break;
default:
-/* devideId = (u8)((addr >> 13) & 0xF); */
- devideId = WLAN_IOREG_DEVICE_ID;
+/* devide_id = (u8)((addr >> 13) & 0xF); */
+ devide_id = WLAN_IOREG_DEVICE_ID;
break;
}
- return devideId;
+ return devide_id;
}
/*
* Ref:
*HalSdioGetCmdAddr8723BSdio()
*/
-static u32 _cvrt2ftaddr(const u32 addr, u8 *pdeviceId, u16 *poffset)
+static u32 _cvrt2ftaddr(const u32 addr, u8 *pdevice_id, u16 *poffset)
{
- u8 deviceId;
+ u8 device_id;
u16 offset;
u32 ftaddr;
- deviceId = get_deviceid(addr);
+ device_id = get_deviceid(addr);
offset = 0;
- switch (deviceId) {
+ switch (device_id) {
case SDIO_LOCAL_DEVICE_ID:
offset = addr & SDIO_LOCAL_MSK;
break;
@@ -140,47 +140,44 @@ static u32 _cvrt2ftaddr(const u32 addr, u8 *pdeviceId, u16 *poffset)
case WLAN_IOREG_DEVICE_ID:
default:
- deviceId = WLAN_IOREG_DEVICE_ID;
+ device_id = WLAN_IOREG_DEVICE_ID;
offset = addr & WLAN_IOREG_MSK;
break;
}
- ftaddr = (deviceId << 13) | offset;
+ ftaddr = (device_id << 13) | offset;
- if (pdeviceId)
- *pdeviceId = deviceId;
+ if (pdevice_id)
+ *pdevice_id = device_id;
if (poffset)
*poffset = offset;
return ftaddr;
}
-static u8 sdio_read8(struct intf_hdl *pintfhdl, u32 addr)
+static u8 sdio_read8(struct intf_hdl *intfhdl, u32 addr)
{
u32 ftaddr;
- u8 val;
-
ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
- val = sd_read8(pintfhdl, ftaddr, NULL);
- return val;
+
+ return sd_read8(intfhdl, ftaddr, NULL);
}
-static u16 sdio_read16(struct intf_hdl *pintfhdl, u32 addr)
+static u16 sdio_read16(struct intf_hdl *intfhdl, u32 addr)
{
u32 ftaddr;
- u16 val;
__le16 le_tmp;
ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
- sd_cmd52_read(pintfhdl, ftaddr, 2, (u8 *)&le_tmp);
- val = le16_to_cpu(le_tmp);
- return val;
+ sd_cmd52_read(intfhdl, ftaddr, 2, (u8 *)&le_tmp);
+
+ return le16_to_cpu(le_tmp);
}
-static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
+static u32 sdio_read32(struct intf_hdl *intfhdl, u32 addr)
{
- struct adapter *padapter;
- u8 bMacPwrCtrlOn;
- u8 deviceId;
+ struct adapter *adapter;
+ u8 mac_pwr_ctrl_on;
+ u8 device_id;
u16 offset;
u32 ftaddr;
u8 shift;
@@ -188,21 +185,20 @@ static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
s32 err;
__le32 le_tmp;
- padapter = pintfhdl->padapter;
- ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+ adapter = intfhdl->padapter;
+ ftaddr = _cvrt2ftaddr(addr, &device_id, &offset);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
- (false == bMacPwrCtrlOn) ||
- (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ((device_id == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
) {
- err = sd_cmd52_read(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
+ err = sd_cmd52_read(intfhdl, ftaddr, 4, (u8 *)&le_tmp);
#ifdef SDIO_DEBUG_IO
if (!err) {
#endif
- val = le32_to_cpu(le_tmp);
- return val;
+ return le32_to_cpu(le_tmp);
#ifdef SDIO_DEBUG_IO
}
@@ -214,191 +210,184 @@ static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
/* 4 bytes alignment */
shift = ftaddr & 0x3;
if (shift == 0) {
- val = sd_read32(pintfhdl, ftaddr, NULL);
+ val = sd_read32(intfhdl, ftaddr, NULL);
} else {
- u8 *ptmpbuf;
+ u8 *tmpbuf;
- ptmpbuf = rtw_malloc(8);
- if (NULL == ptmpbuf) {
+ tmpbuf = rtw_malloc(8);
+ if (!tmpbuf) {
DBG_8192C(KERN_ERR "%s: Allocate memory FAIL!(size =8) addr = 0x%x\n", __func__, addr);
return SDIO_ERR_VAL32;
}
ftaddr &= ~(u16)0x3;
- sd_read(pintfhdl, ftaddr, 8, ptmpbuf);
- memcpy(&le_tmp, ptmpbuf+shift, 4);
+ sd_read(intfhdl, ftaddr, 8, tmpbuf);
+ memcpy(&le_tmp, tmpbuf+shift, 4);
val = le32_to_cpu(le_tmp);
- kfree(ptmpbuf);
+ kfree(tmpbuf);
}
return val;
}
-static s32 sdio_readN(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pbuf)
+static s32 sdio_readN(struct intf_hdl *intfhdl, u32 addr, u32 cnt, u8 *buf)
{
- struct adapter *padapter;
- u8 bMacPwrCtrlOn;
- u8 deviceId;
+ struct adapter *adapter;
+ u8 mac_pwr_ctrl_on;
+ u8 device_id;
u16 offset;
u32 ftaddr;
u8 shift;
s32 err;
- padapter = pintfhdl->padapter;
+ adapter = intfhdl->padapter;
err = 0;
- ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+ ftaddr = _cvrt2ftaddr(addr, &device_id, &offset);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
- (false == bMacPwrCtrlOn) ||
- (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
- ) {
- err = sd_cmd52_read(pintfhdl, ftaddr, cnt, pbuf);
- return err;
- }
+ ((device_id == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ )
+ return sd_cmd52_read(intfhdl, ftaddr, cnt, buf);
/* 4 bytes alignment */
shift = ftaddr & 0x3;
if (shift == 0) {
- err = sd_read(pintfhdl, ftaddr, cnt, pbuf);
+ err = sd_read(intfhdl, ftaddr, cnt, buf);
} else {
- u8 *ptmpbuf;
+ u8 *tmpbuf;
u32 n;
ftaddr &= ~(u16)0x3;
n = cnt + shift;
- ptmpbuf = rtw_malloc(n);
- if (NULL == ptmpbuf)
+ tmpbuf = rtw_malloc(n);
+ if (!tmpbuf)
return -1;
- err = sd_read(pintfhdl, ftaddr, n, ptmpbuf);
+ err = sd_read(intfhdl, ftaddr, n, tmpbuf);
if (!err)
- memcpy(pbuf, ptmpbuf+shift, cnt);
- kfree(ptmpbuf);
+ memcpy(buf, tmpbuf+shift, cnt);
+ kfree(tmpbuf);
}
return err;
}
-static s32 sdio_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+static s32 sdio_write8(struct intf_hdl *intfhdl, u32 addr, u8 val)
{
u32 ftaddr;
s32 err;
ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
- sd_write8(pintfhdl, ftaddr, val, &err);
+ sd_write8(intfhdl, ftaddr, val, &err);
return err;
}
-static s32 sdio_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+static s32 sdio_write16(struct intf_hdl *intfhdl, u32 addr, u16 val)
{
u32 ftaddr;
- s32 err;
__le16 le_tmp;
ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
le_tmp = cpu_to_le16(val);
- err = sd_cmd52_write(pintfhdl, ftaddr, 2, (u8 *)&le_tmp);
-
- return err;
+ return sd_cmd52_write(intfhdl, ftaddr, 2, (u8 *)&le_tmp);
}
-static s32 sdio_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+static s32 sdio_write32(struct intf_hdl *intfhdl, u32 addr, u32 val)
{
- struct adapter *padapter;
- u8 bMacPwrCtrlOn;
- u8 deviceId;
+ struct adapter *adapter;
+ u8 mac_pwr_ctrl_on;
+ u8 device_id;
u16 offset;
u32 ftaddr;
u8 shift;
s32 err;
__le32 le_tmp;
- padapter = pintfhdl->padapter;
+ adapter = intfhdl->padapter;
err = 0;
- ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+ ftaddr = _cvrt2ftaddr(addr, &device_id, &offset);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
- (!bMacPwrCtrlOn) ||
- (adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ((device_id == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
) {
le_tmp = cpu_to_le32(val);
- err = sd_cmd52_write(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
- return err;
+
+ return sd_cmd52_write(intfhdl, ftaddr, 4, (u8 *)&le_tmp);
}
/* 4 bytes alignment */
shift = ftaddr & 0x3;
if (shift == 0) {
- sd_write32(pintfhdl, ftaddr, val, &err);
+ sd_write32(intfhdl, ftaddr, val, &err);
} else {
le_tmp = cpu_to_le32(val);
- err = sd_cmd52_write(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
+ err = sd_cmd52_write(intfhdl, ftaddr, 4, (u8 *)&le_tmp);
}
return err;
}
-static s32 sdio_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pbuf)
+static s32 sdio_writeN(struct intf_hdl *intfhdl, u32 addr, u32 cnt, u8 *buf)
{
- struct adapter *padapter;
- u8 bMacPwrCtrlOn;
- u8 deviceId;
+ struct adapter *adapter;
+ u8 mac_pwr_ctrl_on;
+ u8 device_id;
u16 offset;
u32 ftaddr;
u8 shift;
s32 err;
- padapter = pintfhdl->padapter;
+ adapter = intfhdl->padapter;
err = 0;
- ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+ ftaddr = _cvrt2ftaddr(addr, &device_id, &offset);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
- (false == bMacPwrCtrlOn) ||
- (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
- ) {
- err = sd_cmd52_write(pintfhdl, ftaddr, cnt, pbuf);
- return err;
- }
+ ((device_id == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ )
+ return sd_cmd52_write(intfhdl, ftaddr, cnt, buf);
shift = ftaddr & 0x3;
if (shift == 0) {
- err = sd_write(pintfhdl, ftaddr, cnt, pbuf);
+ err = sd_write(intfhdl, ftaddr, cnt, buf);
} else {
- u8 *ptmpbuf;
+ u8 *tmpbuf;
u32 n;
ftaddr &= ~(u16)0x3;
n = cnt + shift;
- ptmpbuf = rtw_malloc(n);
- if (NULL == ptmpbuf)
+ tmpbuf = rtw_malloc(n);
+ if (!tmpbuf)
return -1;
- err = sd_read(pintfhdl, ftaddr, 4, ptmpbuf);
+ err = sd_read(intfhdl, ftaddr, 4, tmpbuf);
if (err) {
- kfree(ptmpbuf);
+ kfree(tmpbuf);
return err;
}
- memcpy(ptmpbuf+shift, pbuf, cnt);
- err = sd_write(pintfhdl, ftaddr, n, ptmpbuf);
- kfree(ptmpbuf);
+ memcpy(tmpbuf+shift, buf, cnt);
+ err = sd_write(intfhdl, ftaddr, n, tmpbuf);
+ kfree(tmpbuf);
}
return err;
}
-static u8 sdio_f0_read8(struct intf_hdl *pintfhdl, u32 addr)
+static u8 sdio_f0_read8(struct intf_hdl *intfhdl, u32 addr)
{
- return sd_f0_read8(pintfhdl, addr, NULL);
+ return sd_f0_read8(intfhdl, addr, NULL);
}
static void sdio_read_mem(
- struct intf_hdl *pintfhdl,
+ struct intf_hdl *intfhdl,
u32 addr,
u32 cnt,
u8 *rmem
@@ -406,18 +395,18 @@ static void sdio_read_mem(
{
s32 err;
- err = sdio_readN(pintfhdl, addr, cnt, rmem);
+ err = sdio_readN(intfhdl, addr, cnt, rmem);
/* TODO: Report error is err not zero */
}
static void sdio_write_mem(
- struct intf_hdl *pintfhdl,
+ struct intf_hdl *intfhdl,
u32 addr,
u32 cnt,
u8 *wmem
)
{
- sdio_writeN(pintfhdl, addr, cnt, wmem);
+ sdio_writeN(intfhdl, addr, cnt, wmem);
}
/*
@@ -427,7 +416,7 @@ static void sdio_write_mem(
*and make sure data transfer will be done in one command.
*
* Parameters:
- *pintfhdl a pointer of intf_hdl
+ *intfhdl a pointer of intf_hdl
*addr port ID
*cnt size to read
*rmem address to put data
@@ -437,15 +426,15 @@ static void sdio_write_mem(
*_FAIL(0) Fail
*/
static u32 sdio_read_port(
- struct intf_hdl *pintfhdl,
+ struct intf_hdl *intfhdl,
u32 addr,
u32 cnt,
u8 *mem
)
{
- struct adapter *padapter;
+ struct adapter *adapter;
PSDIO_DATA psdio;
- struct hal_com_data *phal;
+ struct hal_com_data *hal;
u32 oldcnt;
#ifdef SDIO_DYNAMIC_ALLOC_MEM
u8 *oldmem;
@@ -453,33 +442,18 @@ static u32 sdio_read_port(
s32 err;
- padapter = pintfhdl->padapter;
- psdio = &adapter_to_dvobj(padapter)->intf_data;
- phal = GET_HAL_DATA(padapter);
+ adapter = intfhdl->padapter;
+ psdio = &adapter_to_dvobj(adapter)->intf_data;
+ hal = GET_HAL_DATA(adapter);
- HalSdioGetCmdAddr8723BSdio(padapter, addr, phal->SdioRxFIFOCnt++, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, addr, hal->SdioRxFIFOCnt++, &addr);
oldcnt = cnt;
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
/* cnt = sdio_align_size(cnt); */
- if (oldcnt != cnt) {
-#ifdef SDIO_DYNAMIC_ALLOC_MEM
- oldmem = mem;
- mem = rtw_malloc(cnt);
- if (mem == NULL) {
- DBG_8192C(KERN_WARNING "%s: allocate memory %d bytes fail!\n", __func__, cnt);
- mem = oldmem;
- oldmem == NULL;
- }
-#else
- /* in this case, caller should gurante the buffer is big enough */
- /* to receive data after alignment */
-#endif
- }
-
- err = _sd_read(pintfhdl, addr, cnt, mem);
+ err = _sd_read(intfhdl, addr, cnt, mem);
#ifdef SDIO_DYNAMIC_ALLOC_MEM
if ((oldcnt != cnt) && (oldmem)) {
@@ -500,7 +474,7 @@ static u32 sdio_read_port(
*and make sure data could be written in one command.
*
* Parameters:
- *pintfhdl a pointer of intf_hdl
+ *intfhdl a pointer of intf_hdl
*addr port ID
*cnt size to write
*wmem data pointer to write
@@ -510,33 +484,33 @@ static u32 sdio_read_port(
*_FAIL(0) Fail
*/
static u32 sdio_write_port(
- struct intf_hdl *pintfhdl,
+ struct intf_hdl *intfhdl,
u32 addr,
u32 cnt,
u8 *mem
)
{
- struct adapter *padapter;
+ struct adapter *adapter;
PSDIO_DATA psdio;
s32 err;
struct xmit_buf *xmitbuf = (struct xmit_buf *)mem;
- padapter = pintfhdl->padapter;
- psdio = &adapter_to_dvobj(padapter)->intf_data;
+ adapter = intfhdl->padapter;
+ psdio = &adapter_to_dvobj(adapter)->intf_data;
- if (padapter->hw_init_completed == false) {
- DBG_871X("%s [addr = 0x%x cnt =%d] padapter->hw_init_completed == false\n", __func__, addr, cnt);
+ if (!adapter->hw_init_completed) {
+ DBG_871X("%s [addr = 0x%x cnt =%d] adapter->hw_init_completed == false\n", __func__, addr, cnt);
return _FAIL;
}
cnt = _RND4(cnt);
- HalSdioGetCmdAddr8723BSdio(padapter, addr, cnt >> 2, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, addr, cnt >> 2, &addr);
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
/* cnt = sdio_align_size(cnt); */
- err = sd_write(pintfhdl, addr, cnt, xmitbuf->pdata);
+ err = sd_write(intfhdl, addr, cnt, xmitbuf->pdata);
rtw_sctx_done_err(
&xmitbuf->sctx,
@@ -548,61 +522,59 @@ static u32 sdio_write_port(
return _SUCCESS;
}
-void sdio_set_intf_ops(struct adapter *padapter, struct _io_ops *pops)
+void sdio_set_intf_ops(struct adapter *adapter, struct _io_ops *ops)
{
- pops->_read8 = &sdio_read8;
- pops->_read16 = &sdio_read16;
- pops->_read32 = &sdio_read32;
- pops->_read_mem = &sdio_read_mem;
- pops->_read_port = &sdio_read_port;
-
- pops->_write8 = &sdio_write8;
- pops->_write16 = &sdio_write16;
- pops->_write32 = &sdio_write32;
- pops->_writeN = &sdio_writeN;
- pops->_write_mem = &sdio_write_mem;
- pops->_write_port = &sdio_write_port;
-
- pops->_sd_f0_read8 = sdio_f0_read8;
+ ops->_read8 = &sdio_read8;
+ ops->_read16 = &sdio_read16;
+ ops->_read32 = &sdio_read32;
+ ops->_read_mem = &sdio_read_mem;
+ ops->_read_port = &sdio_read_port;
+
+ ops->_write8 = &sdio_write8;
+ ops->_write16 = &sdio_write16;
+ ops->_write32 = &sdio_write32;
+ ops->_writeN = &sdio_writeN;
+ ops->_write_mem = &sdio_write_mem;
+ ops->_write_port = &sdio_write_port;
+
+ ops->_sd_f0_read8 = sdio_f0_read8;
}
/*
* Todo: align address to 4 bytes.
*/
static s32 _sdio_local_read(
- struct adapter *padapter,
+ struct adapter *adapter,
u32 addr,
u32 cnt,
- u8 *pbuf
+ u8 *buf
)
{
- struct intf_hdl *pintfhdl;
- u8 bMacPwrCtrlOn;
+ struct intf_hdl *intfhdl;
+ u8 mac_pwr_ctrl_on;
s32 err;
- u8 *ptmpbuf;
+ u8 *tmpbuf;
u32 n;
- pintfhdl = &padapter->iopriv.intf;
+ intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
- if (false == bMacPwrCtrlOn) {
- err = _sd_cmd52_read(pintfhdl, addr, cnt, pbuf);
- return err;
- }
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
+ if (!mac_pwr_ctrl_on)
+ return _sd_cmd52_read(intfhdl, addr, cnt, buf);
n = RND4(cnt);
- ptmpbuf = rtw_malloc(n);
- if (!ptmpbuf)
+ tmpbuf = rtw_malloc(n);
+ if (!tmpbuf)
return (-1);
- err = _sd_read(pintfhdl, addr, n, ptmpbuf);
+ err = _sd_read(intfhdl, addr, n, tmpbuf);
if (!err)
- memcpy(pbuf, ptmpbuf, cnt);
+ memcpy(buf, tmpbuf, cnt);
- kfree(ptmpbuf);
+ kfree(tmpbuf);
return err;
}
@@ -611,41 +583,39 @@ static s32 _sdio_local_read(
* Todo: align address to 4 bytes.
*/
s32 sdio_local_read(
- struct adapter *padapter,
+ struct adapter *adapter,
u32 addr,
u32 cnt,
- u8 *pbuf
+ u8 *buf
)
{
- struct intf_hdl *pintfhdl;
- u8 bMacPwrCtrlOn;
+ struct intf_hdl *intfhdl;
+ u8 mac_pwr_ctrl_on;
s32 err;
- u8 *ptmpbuf;
+ u8 *tmpbuf;
u32 n;
- pintfhdl = &padapter->iopriv.intf;
+ intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- (false == bMacPwrCtrlOn) ||
- (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
- ) {
- err = sd_cmd52_read(pintfhdl, addr, cnt, pbuf);
- return err;
- }
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ )
+ return sd_cmd52_read(intfhdl, addr, cnt, buf);
n = RND4(cnt);
- ptmpbuf = rtw_malloc(n);
- if (!ptmpbuf)
+ tmpbuf = rtw_malloc(n);
+ if (!tmpbuf)
return (-1);
- err = sd_read(pintfhdl, addr, n, ptmpbuf);
+ err = sd_read(intfhdl, addr, n, tmpbuf);
if (!err)
- memcpy(pbuf, ptmpbuf, cnt);
+ memcpy(buf, tmpbuf, cnt);
- kfree(ptmpbuf);
+ kfree(tmpbuf);
return err;
}
@@ -654,16 +624,16 @@ s32 sdio_local_read(
* Todo: align address to 4 bytes.
*/
s32 sdio_local_write(
- struct adapter *padapter,
+ struct adapter *adapter,
u32 addr,
u32 cnt,
- u8 *pbuf
+ u8 *buf
)
{
- struct intf_hdl *pintfhdl;
- u8 bMacPwrCtrlOn;
+ struct intf_hdl *intfhdl;
+ u8 mac_pwr_ctrl_on;
s32 err;
- u8 *ptmpbuf;
+ u8 *tmpbuf;
if (addr & 0x3)
DBG_8192C("%s, address must be 4 bytes alignment\n", __func__);
@@ -671,101 +641,99 @@ s32 sdio_local_write(
if (cnt & 0x3)
DBG_8192C("%s, size must be the multiple of 4\n", __func__);
- pintfhdl = &padapter->iopriv.intf;
+ intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
if (
- (false == bMacPwrCtrlOn) ||
- (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
- ) {
- err = sd_cmd52_write(pintfhdl, addr, cnt, pbuf);
- return err;
- }
+ (!mac_pwr_ctrl_on) ||
+ (adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ )
+ return sd_cmd52_write(intfhdl, addr, cnt, buf);
- ptmpbuf = rtw_malloc(cnt);
- if (!ptmpbuf)
+ tmpbuf = rtw_malloc(cnt);
+ if (!tmpbuf)
return (-1);
- memcpy(ptmpbuf, pbuf, cnt);
+ memcpy(tmpbuf, buf, cnt);
- err = sd_write(pintfhdl, addr, cnt, ptmpbuf);
+ err = sd_write(intfhdl, addr, cnt, tmpbuf);
- kfree(ptmpbuf);
+ kfree(tmpbuf);
return err;
}
-u8 SdioLocalCmd52Read1Byte(struct adapter *padapter, u32 addr)
+u8 SdioLocalCmd52Read1Byte(struct adapter *adapter, u32 addr)
{
u8 val = 0;
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- sd_cmd52_read(pintfhdl, addr, 1, &val);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_read(intfhdl, addr, 1, &val);
return val;
}
-static u16 SdioLocalCmd52Read2Byte(struct adapter *padapter, u32 addr)
+static u16 SdioLocalCmd52Read2Byte(struct adapter *adapter, u32 addr)
{
__le16 val = 0;
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- sd_cmd52_read(pintfhdl, addr, 2, (u8 *)&val);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_read(intfhdl, addr, 2, (u8 *)&val);
return le16_to_cpu(val);
}
-static u32 SdioLocalCmd53Read4Byte(struct adapter *padapter, u32 addr)
+static u32 SdioLocalCmd53Read4Byte(struct adapter *adapter, u32 addr)
{
- u8 bMacPwrCtrlOn;
+ u8 mac_pwr_ctrl_on;
u32 val = 0;
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
__le32 le_tmp;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
- if (!bMacPwrCtrlOn || adapter_to_pwrctl(padapter)->bFwCurrentInPSMode) {
- sd_cmd52_read(pintfhdl, addr, 4, (u8 *)&le_tmp);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ rtw_hal_get_hwreg(adapter, HW_VAR_APFM_ON_MAC, &mac_pwr_ctrl_on);
+ if (!mac_pwr_ctrl_on || adapter_to_pwrctl(adapter)->bFwCurrentInPSMode) {
+ sd_cmd52_read(intfhdl, addr, 4, (u8 *)&le_tmp);
val = le32_to_cpu(le_tmp);
} else {
- val = sd_read32(pintfhdl, addr, NULL);
+ val = sd_read32(intfhdl, addr, NULL);
}
return val;
}
-void SdioLocalCmd52Write1Byte(struct adapter *padapter, u32 addr, u8 v)
+void SdioLocalCmd52Write1Byte(struct adapter *adapter, u32 addr, u8 v)
{
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
- sd_cmd52_write(pintfhdl, addr, 1, &v);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_write(intfhdl, addr, 1, &v);
}
-static void SdioLocalCmd52Write4Byte(struct adapter *padapter, u32 addr, u32 v)
+static void SdioLocalCmd52Write4Byte(struct adapter *adapter, u32 addr, u32 v)
{
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
__le32 le_tmp;
- HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
le_tmp = cpu_to_le32(v);
- sd_cmd52_write(pintfhdl, addr, 4, (u8 *)&le_tmp);
+ sd_cmd52_write(intfhdl, addr, 4, (u8 *)&le_tmp);
}
-static s32 ReadInterrupt8723BSdio(struct adapter *padapter, u32 *phisr)
+static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
{
u32 hisr, himr;
u8 val8, hisr_len;
- if (phisr == NULL)
+ if (!phisr)
return false;
- himr = GET_HAL_DATA(padapter)->sdio_himr;
+ himr = GET_HAL_DATA(adapter)->sdio_himr;
/* decide how many bytes need to be read */
hisr_len = 0;
@@ -777,7 +745,7 @@ static s32 ReadInterrupt8723BSdio(struct adapter *padapter, u32 *phisr)
hisr = 0;
while (hisr_len != 0) {
hisr_len--;
- val8 = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HISR+hisr_len);
+ val8 = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HISR+hisr_len);
hisr |= (val8 << (8*hisr_len));
}
@@ -795,13 +763,13 @@ static s32 ReadInterrupt8723BSdio(struct adapter *padapter, u32 *phisr)
/* */
/* Created by Roger, 2011.02.11. */
/* */
-void InitInterrupt8723BSdio(struct adapter *padapter)
+void InitInterrupt8723BSdio(struct adapter *adapter)
{
- struct hal_com_data *pHalData;
+ struct hal_com_data *haldata;
- pHalData = GET_HAL_DATA(padapter);
- pHalData->sdio_himr = (u32)( \
+ haldata = GET_HAL_DATA(adapter);
+ haldata->sdio_himr = (u32)( \
SDIO_HIMR_RX_REQUEST_MSK |
SDIO_HIMR_AVAL_MSK |
/* SDIO_HIMR_TXERR_MSK | */
@@ -829,14 +797,14 @@ void InitInterrupt8723BSdio(struct adapter *padapter)
/* */
/* Created by Roger, 2011.08.03. */
/* */
-void InitSysInterrupt8723BSdio(struct adapter *padapter)
+void InitSysInterrupt8723BSdio(struct adapter *adapter)
{
- struct hal_com_data *pHalData;
+ struct hal_com_data *haldata;
- pHalData = GET_HAL_DATA(padapter);
+ haldata = GET_HAL_DATA(adapter);
- pHalData->SysIntrMask = ( \
+ haldata->SysIntrMask = ( \
/* HSIMR_GPIO12_0_INT_EN | */
/* HSIMR_SPS_OCP_INT_EN | */
/* HSIMR_RON_INT_EN | */
@@ -855,20 +823,19 @@ void InitSysInterrupt8723BSdio(struct adapter *padapter)
/* */
/* Created by Roger, 2011.02.11. */
/* */
-void ClearInterrupt8723BSdio(struct adapter *padapter)
+void clearinterrupt8723bsdio(struct adapter *adapter)
{
- struct hal_com_data *pHalData;
+ struct hal_com_data *haldata;
u8 *clear;
-
- if (true == padapter->bSurpriseRemoved)
+ if (adapter->bSurpriseRemoved)
return;
- pHalData = GET_HAL_DATA(padapter);
+ haldata = GET_HAL_DATA(adapter);
clear = rtw_zmalloc(4);
/* Clear corresponding HISR Content if needed */
- *(__le32 *)clear = cpu_to_le32(pHalData->sdio_hisr & MASK_SDIO_HISR_CLEAR);
+ *(__le32 *)clear = cpu_to_le32(haldata->sdio_hisr & MASK_SDIO_HISR_CLEAR);
if (*(__le32 *)clear) {
/* Perform write one clear operation */
sdio_local_write(padapter, SDIO_REG_HISR, 4, clear);
@@ -888,16 +855,16 @@ void ClearInterrupt8723BSdio(struct adapter *padapter)
/* */
/* Created by Roger, 2011.02.11. */
/* */
-void EnableInterrupt8723BSdio(struct adapter *padapter)
+void EnableInterrupt8723BSdio(struct adapter *adapter)
{
- struct hal_com_data *pHalData;
+ struct hal_com_data *haldata;
__le32 himr;
u32 tmp;
- pHalData = GET_HAL_DATA(padapter);
+ haldata = GET_HAL_DATA(adapter);
- himr = cpu_to_le32(pHalData->sdio_himr);
- sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+ himr = cpu_to_le32(haldata->sdio_himr);
+ sdio_local_write(adapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
RT_TRACE(
_module_hci_ops_c_,
@@ -905,13 +872,13 @@ void EnableInterrupt8723BSdio(struct adapter *padapter)
(
"%s: enable SDIO HIMR = 0x%08X\n",
__func__,
- pHalData->sdio_himr
+ haldata->sdio_himr
)
);
/* Update current system IMR settings */
- tmp = rtw_read32(padapter, REG_HSIMR);
- rtw_write32(padapter, REG_HSIMR, tmp | pHalData->SysIntrMask);
+ tmp = rtw_read32(adapter, REG_HSIMR);
+ rtw_write32(adapter, REG_HSIMR, tmp | haldata->SysIntrMask);
RT_TRACE(
_module_hci_ops_c_,
@@ -919,7 +886,7 @@ void EnableInterrupt8723BSdio(struct adapter *padapter)
(
"%s: enable HSIMR = 0x%08X\n",
__func__,
- pHalData->SysIntrMask
+ haldata->SysIntrMask
)
);
@@ -928,7 +895,7 @@ void EnableInterrupt8723BSdio(struct adapter *padapter)
/* So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore. */
/* 2011.10.19. */
/* */
- rtw_write8(padapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
}
/* */
@@ -940,12 +907,12 @@ void EnableInterrupt8723BSdio(struct adapter *padapter)
/* */
/* Created by Roger, 2011.02.11. */
/* */
-void DisableInterrupt8723BSdio(struct adapter *padapter)
+void DisableInterrupt8723BSdio(struct adapter *adapter)
{
__le32 himr;
himr = cpu_to_le32(SDIO_HIMR_DISABLED);
- sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+ sdio_local_write(adapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
}
/* */
@@ -957,27 +924,27 @@ void DisableInterrupt8723BSdio(struct adapter *padapter)
/* */
/* Created by Isaac, 2013.09.10. */
/* */
-u8 CheckIPSStatus(struct adapter *padapter)
+u8 CheckIPSStatus(struct adapter *adapter)
{
DBG_871X(
"%s(): Read 0x100 = 0x%02x 0x86 = 0x%02x\n",
__func__,
- rtw_read8(padapter, 0x100),
- rtw_read8(padapter, 0x86)
+ rtw_read8(adapter, 0x100),
+ rtw_read8(adapter, 0x86)
);
- if (rtw_read8(padapter, 0x100) == 0xEA)
+ if (rtw_read8(adapter, 0x100) == 0xEA)
return true;
else
return false;
}
-static struct recv_buf *sd_recv_rxfifo(struct adapter *padapter, u32 size)
+static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
{
u32 readsize, ret;
- u8 *preadbuf;
- struct recv_priv *precvpriv;
- struct recv_buf *precvbuf;
+ u8 *readbuf;
+ struct recv_priv *recv_priv;
+ struct recv_buf *recvbuf;
/* Patch for some SDIO Host 4 bytes issue */
@@ -985,37 +952,37 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *padapter, u32 size)
readsize = RND4(size);
/* 3 1. alloc recvbuf */
- precvpriv = &padapter->recvpriv;
- precvbuf = rtw_dequeue_recvbuf(&precvpriv->free_recv_buf_queue);
- if (precvbuf == NULL) {
+ recv_priv = &adapter->recvpriv;
+ recvbuf = rtw_dequeue_recvbuf(&recv_priv->free_recv_buf_queue);
+ if (!recvbuf) {
DBG_871X_LEVEL(_drv_err_, "%s: alloc recvbuf FAIL!\n", __func__);
return NULL;
}
/* 3 2. alloc skb */
- if (precvbuf->pskb == NULL) {
+ if (!recvbuf->pskb) {
SIZE_PTR tmpaddr = 0;
SIZE_PTR alignment = 0;
- precvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ recvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (precvbuf->pskb) {
- precvbuf->pskb->dev = padapter->pnetdev;
+ if (recvbuf->pskb) {
+ recvbuf->pskb->dev = adapter->pnetdev;
- tmpaddr = (SIZE_PTR)precvbuf->pskb->data;
+ tmpaddr = (SIZE_PTR)recvbuf->pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+ skb_reserve(recvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
}
- if (precvbuf->pskb == NULL) {
+ if (!recvbuf->pskb) {
DBG_871X("%s: alloc_skb fail! read =%d\n", __func__, readsize);
return NULL;
}
}
/* 3 3. read data from rxfifo */
- preadbuf = precvbuf->pskb->data;
- ret = sdio_read_port(&padapter->iopriv.intf, WLAN_RX0FF_DEVICE_ID, readsize, preadbuf);
+ readbuf = recvbuf->pskb->data;
+ ret = sdio_read_port(&adapter->iopriv.intf, WLAN_RX0FF_DEVICE_ID, readsize, readbuf);
if (ret == _FAIL) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s: read port FAIL!\n", __func__));
return NULL;
@@ -1023,72 +990,72 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *padapter, u32 size)
/* 3 4. init recvbuf */
- precvbuf->len = size;
- precvbuf->phead = precvbuf->pskb->head;
- precvbuf->pdata = precvbuf->pskb->data;
- skb_set_tail_pointer(precvbuf->pskb, size);
- precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
- precvbuf->pend = skb_end_pointer(precvbuf->pskb);
-
- return precvbuf;
+ recvbuf->len = size;
+ recvbuf->phead = recvbuf->pskb->head;
+ recvbuf->pdata = recvbuf->pskb->data;
+ skb_set_tail_pointer(recvbuf->pskb, size);
+ recvbuf->ptail = skb_tail_pointer(recvbuf->pskb);
+ recvbuf->pend = skb_end_pointer(recvbuf->pskb);
+
+ return recvbuf;
}
-static void sd_rxhandler(struct adapter *padapter, struct recv_buf *precvbuf)
+static void sd_rxhandler(struct adapter *adapter, struct recv_buf *recvbuf)
{
- struct recv_priv *precvpriv;
- struct __queue *ppending_queue;
+ struct recv_priv *recv_priv;
+ struct __queue *pending_queue;
- precvpriv = &padapter->recvpriv;
- ppending_queue = &precvpriv->recv_buf_pending_queue;
+ recv_priv = &adapter->recvpriv;
+ pending_queue = &recv_priv->recv_buf_pending_queue;
/* 3 1. enqueue recvbuf */
- rtw_enqueue_recvbuf(precvbuf, ppending_queue);
+ rtw_enqueue_recvbuf(recvbuf, pending_queue);
/* 3 2. schedule tasklet */
- tasklet_schedule(&precvpriv->recv_tasklet);
+ tasklet_schedule(&recv_priv->recv_tasklet);
}
-void sd_int_dpc(struct adapter *padapter)
+void sd_int_dpc(struct adapter *adapter)
{
- struct hal_com_data *phal;
+ struct hal_com_data *hal;
struct dvobj_priv *dvobj;
- struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct intf_hdl *intfhdl = &adapter->iopriv.intf;
struct pwrctrl_priv *pwrctl;
- phal = GET_HAL_DATA(padapter);
- dvobj = adapter_to_dvobj(padapter);
+ hal = GET_HAL_DATA(adapter);
+ dvobj = adapter_to_dvobj(adapter);
pwrctl = dvobj_to_pwrctl(dvobj);
- if (phal->sdio_hisr & SDIO_HISR_AVAL) {
+ if (hal->sdio_hisr & SDIO_HISR_AVAL) {
u8 freepage[4];
- _sdio_local_read(padapter, SDIO_REG_FREE_TXPG, 4, freepage);
- up(&(padapter->xmitpriv.xmit_sema));
+ _sdio_local_read(adapter, SDIO_REG_FREE_TXPG, 4, freepage);
+ up(&(adapter->xmitpriv.xmit_sema));
}
- if (phal->sdio_hisr & SDIO_HISR_CPWM1) {
+ if (hal->sdio_hisr & SDIO_HISR_CPWM1) {
struct reportpwrstate_parm report;
u8 bcancelled;
_cancel_timer(&(pwrctl->pwr_rpwm_timer), &bcancelled);
- report.state = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HCPWM1_8723B);
+ report.state = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B);
- /* cpwm_int_hdl(padapter, &report); */
+ /* cpwm_int_hdl(adapter, &report); */
_set_workitem(&(pwrctl->cpwm_event));
}
- if (phal->sdio_hisr & SDIO_HISR_TXERR) {
+ if (hal->sdio_hisr & SDIO_HISR_TXERR) {
u8 *status;
u32 addr;
status = rtw_malloc(4);
if (status) {
addr = REG_TXDMA_STATUS;
- HalSdioGetCmdAddr8723BSdio(padapter, WLAN_IOREG_DEVICE_ID, addr, &addr);
- _sd_read(pintfhdl, addr, 4, status);
- _sd_write(pintfhdl, addr, 4, status);
+ HalSdioGetCmdAddr8723BSdio(adapter, WLAN_IOREG_DEVICE_ID, addr, &addr);
+ _sd_read(intfhdl, addr, 4, status);
+ _sd_write(intfhdl, addr, 4, status);
DBG_8192C("%s: SDIO_HISR_TXERR (0x%08x)\n", __func__, le32_to_cpu(*(u32 *)status));
kfree(status);
} else {
@@ -1096,71 +1063,71 @@ void sd_int_dpc(struct adapter *padapter)
}
}
- if (phal->sdio_hisr & SDIO_HISR_TXBCNOK) {
+ if (hal->sdio_hisr & SDIO_HISR_TXBCNOK) {
DBG_8192C("%s: SDIO_HISR_TXBCNOK\n", __func__);
}
- if (phal->sdio_hisr & SDIO_HISR_TXBCNERR) {
+ if (hal->sdio_hisr & SDIO_HISR_TXBCNERR) {
DBG_8192C("%s: SDIO_HISR_TXBCNERR\n", __func__);
}
#ifndef CONFIG_C2H_PACKET_EN
- if (phal->sdio_hisr & SDIO_HISR_C2HCMD) {
+ if (hal->sdio_hisr & SDIO_HISR_C2HCMD) {
struct c2h_evt_hdr_88xx *c2h_evt;
DBG_8192C("%s: C2H Command\n", __func__);
c2h_evt = rtw_zmalloc(16);
if (c2h_evt != NULL) {
- if (rtw_hal_c2h_evt_read(padapter, (u8 *)c2h_evt) == _SUCCESS) {
+ if (rtw_hal_c2h_evt_read(adapter, (u8 *)c2h_evt) == _SUCCESS) {
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
/* Handle CCX report here */
- rtw_hal_c2h_handler(padapter, (u8 *)c2h_evt);
+ rtw_hal_c2h_handler(adapter, (u8 *)c2h_evt);
kfree((u8 *)c2h_evt);
} else {
- rtw_c2h_wk_cmd(padapter, (u8 *)c2h_evt);
+ rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
}
} else {
/* Error handling for malloc fail */
- if (rtw_cbuf_push(padapter->evtpriv.c2h_queue, NULL) != _SUCCESS)
+ if (rtw_cbuf_push(adapter->evtpriv.c2h_queue, NULL) != _SUCCESS)
DBG_871X("%s rtw_cbuf_push fail\n", __func__);
- _set_workitem(&padapter->evtpriv.c2h_wk);
+ _set_workitem(&adapter->evtpriv.c2h_wk);
}
}
#endif
- if (phal->sdio_hisr & SDIO_HISR_RXFOVW) {
+ if (hal->sdio_hisr & SDIO_HISR_RXFOVW) {
DBG_8192C("%s: Rx Overflow\n", __func__);
}
- if (phal->sdio_hisr & SDIO_HISR_RXERR) {
+ if (hal->sdio_hisr & SDIO_HISR_RXERR) {
DBG_8192C("%s: Rx Error\n", __func__);
}
- if (phal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
- struct recv_buf *precvbuf;
+ if (hal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
+ struct recv_buf *recvbuf;
int alloc_fail_time = 0;
u32 hisr;
-/* DBG_8192C("%s: RX Request, size =%d\n", __func__, phal->SdioRxFIFOSize); */
- phal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
+/* DBG_8192C("%s: RX Request, size =%d\n", __func__, hal->SdioRxFIFOSize); */
+ hal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
do {
- phal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(padapter, SDIO_REG_RX0_REQ_LEN);
- if (phal->SdioRxFIFOSize != 0) {
- precvbuf = sd_recv_rxfifo(padapter, phal->SdioRxFIFOSize);
- if (precvbuf)
- sd_rxhandler(padapter, precvbuf);
+ hal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(adapter, SDIO_REG_RX0_REQ_LEN);
+ if (hal->SdioRxFIFOSize != 0) {
+ recvbuf = sd_recv_rxfifo(adapter, hal->SdioRxFIFOSize);
+ if (recvbuf)
+ sd_rxhandler(adapter, recvbuf);
else {
alloc_fail_time++;
- DBG_871X("precvbuf is Null for %d times because alloc memory failed\n", alloc_fail_time);
+ DBG_871X("recvbuf is Null for %d times because alloc memory failed\n", alloc_fail_time);
if (alloc_fail_time >= 10)
break;
}
- phal->SdioRxFIFOSize = 0;
+ hal->SdioRxFIFOSize = 0;
} else
break;
hisr = 0;
- ReadInterrupt8723BSdio(padapter, &hisr);
+ ReadInterrupt8723BSdio(adapter, &hisr);
hisr &= SDIO_HISR_RX_REQUEST;
if (!hisr)
break;
@@ -1172,38 +1139,37 @@ void sd_int_dpc(struct adapter *padapter)
}
}
-void sd_int_hdl(struct adapter *padapter)
+void sd_int_hdl(struct adapter *adapter)
{
- struct hal_com_data *phal;
+ struct hal_com_data *hal;
if (
- (padapter->bDriverStopped == true) ||
- (padapter->bSurpriseRemoved == true)
+ (adapter->bDriverStopped) || (adapter->bSurpriseRemoved)
)
return;
- phal = GET_HAL_DATA(padapter);
+ hal = GET_HAL_DATA(adapter);
- phal->sdio_hisr = 0;
- ReadInterrupt8723BSdio(padapter, &phal->sdio_hisr);
+ hal->sdio_hisr = 0;
+ ReadInterrupt8723BSdio(adapter, &hal->sdio_hisr);
- if (phal->sdio_hisr & phal->sdio_himr) {
+ if (hal->sdio_hisr & hal->sdio_himr) {
u32 v32;
- phal->sdio_hisr &= phal->sdio_himr;
+ hal->sdio_hisr &= hal->sdio_himr;
/* clear HISR */
- v32 = phal->sdio_hisr & MASK_SDIO_HISR_CLEAR;
+ v32 = hal->sdio_hisr & MASK_SDIO_HISR_CLEAR;
if (v32) {
- SdioLocalCmd52Write4Byte(padapter, SDIO_REG_HISR, v32);
+ SdioLocalCmd52Write4Byte(adapter, SDIO_REG_HISR, v32);
}
- sd_int_dpc(padapter);
+ sd_int_dpc(adapter);
} else {
RT_TRACE(_module_hci_ops_c_, _drv_err_,
("%s: HISR(0x%08x) and HIMR(0x%08x) not match!\n",
- __func__, phal->sdio_hisr, phal->sdio_himr));
+ __func__, hal->sdio_hisr, hal->sdio_himr));
}
}
@@ -1217,27 +1183,27 @@ void sd_int_hdl(struct adapter *padapter)
/* */
/* Created by Roger, 2011.01.28. */
/* */
-u8 HalQueryTxBufferStatus8723BSdio(struct adapter *padapter)
+u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
{
- struct hal_com_data *phal;
- u32 NumOfFreePage;
- /* _irqL irql; */
+ struct hal_com_data *hal;
+ u32 numof_free_page;
+ /* _irql irql; */
- phal = GET_HAL_DATA(padapter);
+ hal = GET_HAL_DATA(adapter);
- NumOfFreePage = SdioLocalCmd53Read4Byte(padapter, SDIO_REG_FREE_TXPG);
+ numof_free_page = SdioLocalCmd53Read4Byte(adapter, SDIO_REG_FREE_TXPG);
/* spin_lock_bh(&phal->SdioTxFIFOFreePageLock); */
- memcpy(phal->SdioTxFIFOFreePage, &NumOfFreePage, 4);
+ memcpy(hal->SdioTxFIFOFreePage, &numof_free_page, 4);
RT_TRACE(_module_hci_ops_c_, _drv_notice_,
("%s: Free page for HIQ(%#x), MIDQ(%#x), LOWQ(%#x), PUBQ(%#x)\n",
__func__,
- phal->SdioTxFIFOFreePage[HI_QUEUE_IDX],
- phal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
- phal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
- phal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
- /* spin_unlock_bh(&phal->SdioTxFIFOFreePageLock); */
+ hal->SdioTxFIFOFreePage[HI_QUEUE_IDX],
+ hal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
+ hal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
+ hal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
+ /* spin_unlock_bh(&hal->SdioTxFIFOFreePageLock); */
return true;
}
@@ -1246,19 +1212,19 @@ u8 HalQueryTxBufferStatus8723BSdio(struct adapter *padapter)
/* Description: */
/* Query SDIO Local register to get the current number of TX OQT Free Space. */
/* */
-u8 HalQueryTxOQTBufferStatus8723BSdio(struct adapter *padapter)
+u8 HalQueryTxOQTBufferStatus8723BSdio(struct adapter *adapter)
{
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct hal_com_data *haldata = GET_HAL_DATA(adapter);
- pHalData->SdioTxOQTFreeSpace = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_OQT_FREE_PG);
+ haldata->SdioTxOQTFreeSpace = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_OQT_FREE_PG);
return true;
}
#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
-u8 RecvOnePkt(struct adapter *padapter, u32 size)
+u8 RecvOnePkt(struct adapter *adapter, u32 size)
{
- struct recv_buf *precvbuf;
- struct dvobj_priv *psddev;
+ struct recv_buf *recvbuf;
+ struct dvobj_priv *sddev;
PSDIO_DATA psdio_data;
struct sdio_func *func;
@@ -1266,22 +1232,22 @@ u8 RecvOnePkt(struct adapter *padapter, u32 size)
DBG_871X("+%s: size: %d+\n", __func__, size);
- if (padapter == NULL) {
- DBG_871X(KERN_ERR "%s: padapter is NULL!\n", __func__);
+ if (!adapter) {
+ DBG_871X(KERN_ERR "%s: adapter is NULL!\n", __func__);
return false;
}
- psddev = adapter_to_dvobj(padapter);
- psdio_data = &psddev->intf_data;
+ sddev = adapter_to_dvobj(adapter);
+ psdio_data = &sddev->intf_data;
func = psdio_data->func;
if (size) {
sdio_claim_host(func);
- precvbuf = sd_recv_rxfifo(padapter, size);
+ recvbuf = sd_recv_rxfifo(adapter, size);
- if (precvbuf) {
+ if (recvbuf) {
/* printk("Completed Recv One Pkt.\n"); */
- sd_rxhandler(padapter, precvbuf);
+ sd_rxhandler(adapter, recvbuf);
res = true;
} else {
res = false;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 3fca0c2d4c8d..cc18d0ad7d7b 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -371,7 +371,7 @@ static char *translate_scan(struct adapter *padapter,
u8 *wpsie_ptr = NULL;
uint wps_ielen = 0;
- u8 *ie_ptr = pnetwork->network.IEs + ie_offset;
+ u8 *ie_ptr;
total_ielen = pnetwork->network.IELength - ie_offset;
if (pnetwork->network.Reserved[0] == 2) { /* Probe Request */
@@ -967,7 +967,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
break;
default :
- ret = -EINVAL;;
+ ret = -EINVAL;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported \n", iw_operation_mode[wrqu->mode]));
goto exit;
}
diff --git a/drivers/staging/rtlwifi/Kconfig b/drivers/staging/rtlwifi/Kconfig
index cb3a29ae764b..b6b03950987b 100644
--- a/drivers/staging/rtlwifi/Kconfig
+++ b/drivers/staging/rtlwifi/Kconfig
@@ -6,16 +6,6 @@ config R8822BE
This is the staging driver for Realtek RTL8822BE 802.11ac PCIe
wireless network adapters.
-config RTLHALMAC_ST
- tristate
- depends on R8822BE
- default m
-
-config RTLPHYDM_ST
- tristate
- depends on R8822BE
- default m
-
config RTLWIFI_DEBUG_ST
boolean
depends on R8822BE
diff --git a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
index 52620b72cfa9..493011a54e64 100644
--- a/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -800,7 +800,7 @@ static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
u32 wifi_link_status = 0x0;
bool bt_hs_on = false, under_ips = false, under_lps = false;
bool low_power = false, dc_mode = false;
- u8 wifi_chnl = 0, wifi_hs_chnl = 0, fw_ps_state;
+ u8 wifi_chnl = 0, wifi_hs_chnl = 0;
u8 ap_num = 0;
wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
@@ -856,7 +856,6 @@ static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
dc_mode = true; /*TODO*/
under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
- fw_ps_state = 0;
low_power = 0; /*TODO*/
seq_printf(m, "\n %-35s = %s%s%s%s",
"Power Status",
@@ -1644,26 +1643,9 @@ void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
{
- u8 stack_op_type;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->statistics.cnt_stack_operation_notify++;
- if (btcoexist->manual_control)
- return;
-
- if ((type == HCI_BT_OP_INQUIRY_START) ||
- (type == HCI_BT_OP_PAGING_START) ||
- (type == HCI_BT_OP_PAIRING_START)) {
- stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_START;
- } else if ((type == HCI_BT_OP_INQUIRY_FINISH) ||
- (type == HCI_BT_OP_PAGING_SUCCESS) ||
- (type == HCI_BT_OP_PAGING_UNSUCCESS) ||
- (type == HCI_BT_OP_PAIRING_FINISH)) {
- stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_FINISH;
- } else {
- stack_op_type = BTC_STACK_OP_NONE;
- }
}
void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
diff --git a/drivers/staging/rtlwifi/cam.c b/drivers/staging/rtlwifi/cam.c
index 9c8c907cb48e..ca1c9e36d976 100644
--- a/drivers/staging/rtlwifi/cam.c
+++ b/drivers/staging/rtlwifi/cam.c
@@ -181,7 +181,7 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_enc_algo;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case WEP40_ENCRYPTION:
@@ -221,7 +221,7 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_encalgo;
u8 entry_i;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
diff --git a/drivers/staging/rtlwifi/core.c b/drivers/staging/rtlwifi/core.c
index b00e51df984f..3ec039498208 100644
--- a/drivers/staging/rtlwifi/core.c
+++ b/drivers/staging/rtlwifi/core.c
@@ -509,15 +509,13 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct timeval ts;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
/* to resolve s4 can not wake up*/
- do_gettimeofday(&ts);
- rtlhal->last_suspend_sec = ts.tv_sec;
+ rtlhal->last_suspend_sec = ktime_get_real_seconds();
if ((ppsc->wo_wlan_mode & WAKE_ON_PATTERN_MATCH) && wow->n_patterns)
_rtl_add_wowlan_patterns(hw, wow);
@@ -536,7 +534,6 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct timeval ts;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
@@ -544,8 +541,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
rtlhal->wake_from_pnp_sleep = true;
/* to resovle s4 can not wake up*/
- do_gettimeofday(&ts);
- if (ts.tv_sec - rtlhal->last_suspend_sec < 5)
+ if (ktime_get_real_seconds() - rtlhal->last_suspend_sec < 5)
return -1;
rtl_op_start(hw);
@@ -1820,7 +1816,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
u8 faversion, u8 interface_type,
struct wlan_pwr_cfg pwrcfgcmd[])
{
- struct wlan_pwr_cfg cfg_cmd = {0};
+ struct wlan_pwr_cfg cfg_cmd;
bool polling_bit = false;
u32 ary_idx = 0;
u8 value = 0;
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f45487122517..483ea85943c3 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -464,6 +464,8 @@ bool rtl8822b_halmac_cb_write_data_rsvd_page(struct rtl_priv *rtlpriv, u8 *buf,
int count;
skb = dev_alloc_skb(size);
+ if (!skb)
+ return false;
memcpy((u8 *)skb_put(skb, size), buf, size);
if (!_rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, BEACON_QUEUE))
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index eb91c130b245..ca0243fa2e66 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -1670,7 +1670,7 @@ struct rtl_hal {
bool enter_pnp_sleep;
bool wake_from_pnp_sleep;
bool wow_enabled;
- __kernel_time_t last_suspend_sec;
+ time64_t last_suspend_sec;
u32 wowlan_fwsize;
u8 *wowlan_firmware;
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 7cdce87f3051..821256b95e22 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2821,6 +2821,7 @@ BUILD_FAIL:
int reset_ms_card(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
+ int seg_no = ms_card->total_block / 512 - 1;
int retval;
memset(ms_card, 0, sizeof(struct ms_info));
@@ -2863,7 +2864,7 @@ int reset_ms_card(struct rtsx_chip *chip)
/* Build table for the last segment,
* to check if L2P table block exists, erasing it
*/
- retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1);
+ retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
return STATUS_FAIL;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 89e2cfe7d1cc..70e0b8623110 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -275,23 +275,6 @@ static int rtsx_acquire_irq(struct rtsx_dev *dev)
return 0;
}
-int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val)
-{
- struct pci_dev *pdev;
- u8 data;
- u8 devfn = (dev << 3) | func;
-
- pdev = pci_get_bus_and_slot(bus, devfn);
- if (!pdev)
- return -1;
-
- pci_read_config_byte(pdev, offset, &data);
- if (val)
- *val = data;
-
- return 0;
-}
-
#ifdef CONFIG_PM
/*
* power management
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index 575e5734f2a5..62e467c5a6d7 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -174,8 +174,6 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
/* struct scsi_cmnd transfer buffer access utilities */
enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
-int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val);
-
#define _MSG_TRACE
#include "trace.h"
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index 55764e16b93a..900be444acf9 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -118,7 +118,7 @@
#define SUPPORT_MAX_POWER_PERMANCE 0x10000000
#define SUPPORT_1V8 0x01000000
-#define SWTICH_NO_ERR 0x00
+#define SWITCH_NO_ERR 0x00
#define CARD_NOT_EXIST 0x01
#define SPEC_NOT_SUPPORT 0x02
#define CHECK_MODE_ERR 0x03
diff --git a/drivers/staging/sm750fb/TODO b/drivers/staging/sm750fb/TODO
index a3a877d90066..f710ab15abfe 100644
--- a/drivers/staging/sm750fb/TODO
+++ b/drivers/staging/sm750fb/TODO
@@ -6,8 +6,8 @@ TODO:
- check on hardware effects of removal of USE_HW_I2C and USE_DVICHIP (these two
are supposed to be sample code which is given here if someone wants to
use those functionalities)
-- move it to drivers/video/fbdev
-- modify the code for drm framework
+- must be ported to the atomic kms framework in the drm subsystem (which will
+ give you a basic fbdev driver for free)
Please send any patches to
Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index c787a74c4f9c..4b34a083f5cf 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -62,8 +62,6 @@ unsigned short sii164GetDeviceID(void)
return deviceID;
}
-
-
/* DVI.C will handle all SiI164 chip stuffs and try it best to make code minimal and useful */
/*
@@ -239,10 +237,6 @@ long sii164InitChip(unsigned char edgeSelect,
return -1;
}
-
-
-
-
/* below sii164 function is not necessary */
#ifdef SII164_FULL_FUNCTIONS
@@ -402,5 +396,3 @@ void sii164ClearInterrupt(void)
#endif
#endif
-
-
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 2e9a88cd6af3..862e7bf27353 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -12,7 +12,6 @@ enum sii164_hot_plug_mode {
SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
};
-
/* Silicon Image SiI164 chip prototype */
long sii164InitChip(unsigned char edgeSelect,
unsigned char busSelect,
@@ -28,7 +27,6 @@ long sii164InitChip(unsigned char edgeSelect,
unsigned short sii164GetVendorID(void);
unsigned short sii164GetDeviceID(void);
-
#ifdef SII164_FULL_FUNCTIONS
void sii164ResetChip(void);
char *sii164GetChipString(void);
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index ffd114a6d09b..a8c79864ee4b 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -185,29 +185,29 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
struct fb_fix_screeninfo *fix)
{
int ret;
- disp_output_t dispSet;
+ disp_output_t disp_set;
int channel;
ret = 0;
- dispSet = 0;
+ disp_set = 0;
channel = *output->channel;
if (sm750_get_chip_type() != SM750LE) {
if (channel == sm750_primary) {
pr_info("primary channel\n");
if (output->paths & sm750_panel)
- dispSet |= do_LCD1_PRI;
+ disp_set |= do_LCD1_PRI;
if (output->paths & sm750_crt)
- dispSet |= do_CRT_PRI;
+ disp_set |= do_CRT_PRI;
} else {
pr_info("secondary channel\n");
if (output->paths & sm750_panel)
- dispSet |= do_LCD1_SEC;
+ disp_set |= do_LCD1_SEC;
if (output->paths & sm750_crt)
- dispSet |= do_CRT_SEC;
+ disp_set |= do_CRT_SEC;
}
- ddk750_setLogicalDispOut(dispSet);
+ ddk750_setLogicalDispOut(disp_set);
} else {
/* just open DISPLAY_CONTROL_750LE register bit 3:0 */
u32 reg;
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index 6137fa83c609..461f131644a2 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/console.h>
#include <linux/types.h>
#include <linux/wait.h>
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 294c74b47224..cd029968462f 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* fakekey.c
* Functions for simulating keypresses.
*
* Copyright (C) 2010 the Speakup Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/slab.h>
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 4e6e5daba50c..5f1bda37f86d 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/* speakup_keyhelp.c
* help module for speakup
*
*written by David Borowski.
*
* Copyright (C) 2003 David Borowski.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/keyboard.h>
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index ca85476e3ff7..f1f90222186b 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Speakup kobject implementation
*
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index aae868509e13..cf1259059776 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* speakup.c
* review functions for the speakup screen review package.
* originally written by: Kirk Reiser and Andy Berdan.
@@ -6,16 +7,6 @@
*
** Copyright (C) 1998 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 66061b5c3427..0ed1fefee0e9 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -64,13 +64,8 @@ int speakup_set_selection(struct tty_struct *tty)
ps = spk_ys * vc->vc_size_row + (spk_xs << 1);
pe = spk_ye * vc->vc_size_row + (spk_xe << 1);
- if (ps > pe) {
- /* make sel_start <= sel_end */
- int tmp = ps;
-
- ps = pe;
- pe = tmp;
- }
+ if (ps > pe) /* make sel_start <= sel_end */
+ swap(ps, pe);
if (spk_sel_cons != vc_cons[fg_console].d) {
speakup_clear_selection();
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 9cfc8142a318..177a2988641c 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/ioport.h>
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index a041441766aa..28519754b2f0 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,15 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
* This driver is for the Aicom Acent PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 43315849b7b6..3a863dc61286 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index dcf0c3b59fdd..0877b4044c28 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 45b5721441ba..e6a6a9665d8f 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 402b0fbfb94d..76dfa3f7c058 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 4310c2c276c4..3741c0fcf5bb 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 7a8df7dc1e38..303f393d3f2f 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* This is the DECtalk PC speakup driver
*
@@ -14,16 +15,6 @@
* Copyright (c) 2003 David Borowski <david575@golden.net>
*
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 5d6a861c9b1e..2ea22a2eb5f9 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 8999e3eb5c26..f8cb83c9b82e 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the RC Systems DoubleTalk PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index ea3b2911cab9..a30d60450bd5 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -7,16 +8,6 @@
* Copyright (C) 2003 David Borowski.
* Copyright (C) 2007 Samuel Thibault.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index d3203f8fc3d0..de76183932e1 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* written by David Borowski
*
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 95efaab73813..3c59519a871f 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index d99daf69e501..0e74d09e18ea 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/* speakup_soft.c - speakup driver to register and make available
* a user space device for software synthesizers. written by: Kirk
* Reiser <kirk@braille.uwo.ca>
*
* Copyright (C) 2003 Kirk Reiser.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
@@ -326,10 +316,10 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
+static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(fp, &speakup_event, wait);
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 1037aa0d085a..6e933bf1de2e 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index e160034e4a68..a7326f226a5e 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
@@ -5,16 +6,6 @@
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 046040ac074c..00430437eb4c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/* spk_priv.h
* review functions for the speakup screen review package.
* originally written by: Kirk Reiser and Andy Berdan.
@@ -6,16 +7,6 @@
*
* Copyright (C) 1998 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
-
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _SPEAKUP_PRIVATE_H
#define _SPEAKUP_PRIVATE_H
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index c95b68ebd8e7..cc99fcd1bc6e 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/* spk_priv.h
* review functions for the speakup screen review package.
* originally written by: Kirk Reiser and Andy Berdan.
@@ -6,16 +7,6 @@
*
* Copyright (C) 1998 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _SPEAKUP_KEYINFO_H
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 513cebbd161c..5aa3ffa3772d 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index aac29c816d09..c06e6a810999 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/ctype.h> /* for isdigit() and friends */
#include <linux/fs.h>
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 8c64f1ada6e0..2fc75e60fbac 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kthread.h>
#include <linux/wait.h>
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index d37d24e26641..321405532a8e 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include "spk_types.h"
#include "spk_priv.h"
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
index b6abaf79ef0b..9bd4412356c9 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/staging/typec/tcpci.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* USB Type-C Port Controller Interface.
*/
@@ -47,7 +38,7 @@ static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
}
static int tcpci_read16(struct tcpci *tcpci, unsigned int reg,
- unsigned int *val)
+ u16 *val)
{
return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
}
@@ -285,15 +276,15 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc,
const struct pd_message *msg)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
- unsigned int reg, cnt, header;
+ u16 header = msg ? le16_to_cpu(msg->header) : 0;
+ unsigned int reg, cnt;
int ret;
- cnt = msg ? pd_header_cnt(msg->header) * 4 : 0;
+ cnt = msg ? pd_header_cnt(header) * 4 : 0;
ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
if (ret < 0)
return ret;
- header = msg ? msg->header : 0;
ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
if (ret < 0)
return ret;
@@ -356,7 +347,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
static irqreturn_t tcpci_irq(int irq, void *dev_id)
{
struct tcpci *tcpci = dev_id;
- unsigned int status, reg;
+ u16 status;
tcpci_read16(tcpci, TCPC_ALERT, &status);
@@ -372,6 +363,8 @@ static irqreturn_t tcpci_irq(int irq, void *dev_id)
tcpm_cc_change(tcpci->port);
if (status & TCPC_ALERT_POWER_STATUS) {
+ unsigned int reg;
+
regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &reg);
/*
@@ -387,11 +380,12 @@ static irqreturn_t tcpci_irq(int irq, void *dev_id)
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
unsigned int cnt;
+ u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
- tcpci_read16(tcpci, TCPC_RX_HDR, &reg);
- msg.header = reg;
+ tcpci_read16(tcpci, TCPC_RX_HDR, &header);
+ msg.header = cpu_to_le16(header);
if (WARN_ON(cnt > sizeof(msg.payload)))
cnt = sizeof(msg.payload);
diff --git a/drivers/staging/typec/tcpci.h b/drivers/staging/typec/tcpci.h
index 10b04c8723da..fdfb06cc3b86 100644
--- a/drivers/staging/typec/tcpci.h
+++ b/drivers/staging/typec/tcpci.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2015-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* USB Type-C Port Controller Interface.
*/
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index 4f1f5e624604..c27dab3b610f 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -3,15 +3,11 @@
#
menuconfig UNISYSSPAR
bool "Unisys SPAR driver support"
- depends on X86_64 && !UML
- select PCI
- select ACPI
---help---
Support for the Unisys SPAR drivers
if UNISYSSPAR
-source "drivers/staging/unisys/visorbus/Kconfig"
source "drivers/staging/unisys/visornic/Kconfig"
source "drivers/staging/unisys/visorinput/Kconfig"
source "drivers/staging/unisys/visorhba/Kconfig"
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
index 20eb098538d3..e45f44b64202 100644
--- a/drivers/staging/unisys/Makefile
+++ b/drivers/staging/unisys/Makefile
@@ -1,7 +1,6 @@
#
# Makefile for Unisys SPAR drivers
#
-obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_UNISYS_VISORNIC) += visornic/
obj-$(CONFIG_UNISYS_VISORINPUT) += visorinput/
obj-$(CONFIG_UNISYS_VISORHBA) += visorhba/
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 5cd407ca2251..45c785d80ce4 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 - 2016 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#ifndef __IOCHANNEL_H__
@@ -43,8 +33,7 @@
#include <linux/uuid.h>
#include <linux/skbuff.h>
-
-#include "visorchannel.h"
+#include <linux/visorbus.h>
/*
* Must increment these whenever you insert or delete fields within this channel
diff --git a/drivers/staging/unisys/include/visorchannel.h b/drivers/staging/unisys/include/visorchannel.h
deleted file mode 100644
index 33945749c8b6..000000000000
--- a/drivers/staging/unisys/include/visorchannel.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __VISORCHANNEL_H__
-#define __VISORCHANNEL_H__
-
-#include <linux/types.h>
-#include <linux/uuid.h>
-
-#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
-
-/*
- * enum channel_serverstate
- * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
- * @CHANNELSRV_READY: Channel has been initialized by server.
- */
-enum channel_serverstate {
- CHANNELSRV_UNINITIALIZED = 0,
- CHANNELSRV_READY = 1
-};
-
-/*
- * enum channel_clientstate
- * @CHANNELCLI_DETACHED:
- * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
- * unless given TBD* explicit request
- * (should actually be < DETACHED).
- * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
- * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
- * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
- * @CHANNELCLI_OWNED: "No worries" state - client can access channel
- * anytime.
- */
-enum channel_clientstate {
- CHANNELCLI_DETACHED = 0,
- CHANNELCLI_DISABLED = 1,
- CHANNELCLI_ATTACHING = 2,
- CHANNELCLI_ATTACHED = 3,
- CHANNELCLI_BUSY = 4,
- CHANNELCLI_OWNED = 5
-};
-
-/*
- * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
- * a guest can look at the FeatureFlags in the io channel, and configure the
- * driver to use interrupts or not based on this setting. All feature bits for
- * all channels should be defined here. The io channel feature bits are defined
- * below.
- */
-#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
-#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
-#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
-#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
-#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
-
-/*
- * struct channel_header - Common Channel Header
- * @signature: Signature.
- * @legacy_state: DEPRECATED - being replaced by.
- * @header_size: sizeof(struct channel_header).
- * @size: Total size of this channel in bytes.
- * @features: Flags to modify behavior.
- * @chtype: Channel type: data, bus, control, etc..
- * @partition_handle: ID of guest partition.
- * @handle: Device number of this channel in client.
- * @ch_space_offset: Offset in bytes to channel specific area.
- * @version_id: Struct channel_header Version ID.
- * @partition_index: Index of guest partition.
- * @zone_uuid: Guid of Channel's zone.
- * @cli_str_offset: Offset from channel header to null-terminated
- * ClientString (0 if ClientString not present).
- * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
- * channel.
- * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
- * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
- * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- * ServerStateUp, ServerStateDown, etc).
- * @srv_state: CHANNEL_SERVERSTATE.
- * @cli_error_boot: Bits to indicate err states for boot clients, so err
- * messages can be throttled.
- * @cli_error_os: Bits to indicate err states for OS clients, so err
- * messages can be throttled.
- * @filler: Pad out to 128 byte cacheline.
- * @recover_channel: Please add all new single-byte values below here.
- */
-struct channel_header {
- u64 signature;
- u32 legacy_state;
- /* SrvState, CliStateBoot, and CliStateOS below */
- u32 header_size;
- u64 size;
- u64 features;
- guid_t chtype;
- u64 partition_handle;
- u64 handle;
- u64 ch_space_offset;
- u32 version_id;
- u32 partition_index;
- guid_t zone_guid;
- u32 cli_str_offset;
- u32 cli_state_boot;
- u32 cmd_state_cli;
- u32 cli_state_os;
- u32 ch_characteristic;
- u32 cmd_state_srv;
- u32 srv_state;
- u8 cli_error_boot;
- u8 cli_error_os;
- u8 filler[1];
- u8 recover_channel;
-} __packed;
-
-#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
-
-/*
- * struct signal_queue_header - Subheader for the Signal Type variation of the
- * Common Channel.
- * @version: SIGNAL_QUEUE_HEADER Version ID.
- * @chtype: Queue type: storage, network.
- * @size: Total size of this queue in bytes.
- * @sig_base_offset: Offset to signal queue area.
- * @features: Flags to modify behavior.
- * @num_sent: Total # of signals placed in this queue.
- * @num_overflows: Total # of inserts failed due to full queue.
- * @signal_size: Total size of a signal for this queue.
- * @max_slots: Max # of slots in queue, 1 slot is always empty.
- * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
- * @head: Queue head signal #.
- * @num_received: Total # of signals removed from this queue.
- * @tail: Queue tail signal.
- * @reserved1: Reserved field.
- * @reserved2: Reserved field.
- * @client_queue:
- * @num_irq_received: Total # of Interrupts received. This is incremented by the
- * ISR in the guest windows driver.
- * @num_empty: Number of times that visor_signal_remove is called and
- * returned Empty Status.
- * @errorflags: Error bits set during SignalReinit to denote trouble with
- * client's fields.
- * @filler: Pad out to 64 byte cacheline.
- */
-struct signal_queue_header {
- /* 1st cache line */
- u32 version;
- u32 chtype;
- u64 size;
- u64 sig_base_offset;
- u64 features;
- u64 num_sent;
- u64 num_overflows;
- u32 signal_size;
- u32 max_slots;
- u32 max_signals;
- u32 head;
- /* 2nd cache line */
- u64 num_received;
- u32 tail;
- u32 reserved1;
- u64 reserved2;
- u64 client_queue;
- u64 num_irq_received;
- u64 num_empty;
- u32 errorflags;
- u8 filler[12];
-} __packed;
-
-/* VISORCHANNEL Guids */
-/* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define VISOR_VHBA_CHANNEL_GUID \
- GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VHBA_CHANNEL_GUID_STR \
- "414815ed-c58c-11da-95a9-00e08161165f"
-#endif
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 0bcd3acb7b0c..167e98f8688e 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2012 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#include <linux/debugfs.h>
@@ -19,12 +9,12 @@
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/visorbus.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
-#include "visorbus.h"
#include "iochannel.h"
/* The Send and Receive Buffers of the IO Queue may both be full */
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index 53975a09535f..67dac430ce0c 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#ifndef __SPAR_ULTRAINPUTREPORT_H__
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 450f003743c0..d8048e48658f 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
/*
@@ -25,8 +16,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uuid.h>
+#include <linux/visorbus.h>
-#include "visorbus.h"
#include "ultrainputreport.h"
/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 6d8239163ba5..92dceb557886 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
/* This driver lives in a spar partition, and registers to ethernet io
@@ -25,8 +16,8 @@
#include <linux/kthread.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
+#include <linux/visorbus.h>
-#include "visorbus.h"
#include "iochannel.h"
#define VISORNIC_INFINITE_RSP_WAIT 0
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
index 8aed248db6e2..43c39eca4ae1 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -170,7 +170,7 @@ static int vboxfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
sizes->fb_height);
- info->screen_base = bo->kmap.virtual;
+ info->screen_base = (char __iomem *)bo->kmap.virtual;
info->screen_size = size;
#ifdef CONFIG_DRM_KMS_FB_HELPER
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
index 80bd039fa08e..973b3bcc04b1 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -61,7 +61,7 @@ void vbox_enable_accel(struct vbox_private *vbox)
if (vbox->vbva_info[i].vbva)
continue;
- vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
+ vbva = (void __force *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
if (!vbva_enable(&vbox->vbva_info[i],
vbox->guest_pool, vbva, i)) {
/* very old host or driver error. */
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index f484bb055df7..ec468d5719b1 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/init.h>
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 7e68b3e28246..5f7551fbf5cf 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 3c6f1d91d22d..a4a48f31f1a3 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/device.h>
#include <sound/core.h>
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index 8f2d508183b2..045d577fe4f8 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#include <linux/platform_device.h>
@@ -443,7 +432,6 @@ static struct platform_driver bcm2835_alsa0_driver = {
#endif
.driver = {
.name = "bcm2835_audio",
- .owner = THIS_MODULE,
.of_match_table = snd_bcm2835_of_match_table,
},
};
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index f1e43e45fd67..dc6ec915f9f5 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#ifndef __SOUND_ARM_BCM2835_H
#define __SOUND_ARM_BCM2835_H
diff --git a/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h b/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h
index da96f1bc2516..1a7f0884ac9c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h
@@ -1,16 +1,5 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011 Broadcom Corporation. All rights reserved. */
#ifndef _VC_AUDIO_DEFS_H_
#define _VC_AUDIO_DEFS_H_
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index be936b8fe317..d2262275a870 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
@@ -328,11 +325,9 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
pr_debug("Grab another frame");
vchiq_mmal_port_parameter_set(
instance,
- dev->capture.
- camera_port,
+ dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
- &dev->capture.
- frame_count,
+ &dev->capture.frame_count,
sizeof(dev->capture.frame_count));
}
} else {
@@ -343,37 +338,17 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
if (dev->capture.frame_count) {
if (dev->capture.vc_start_timestamp != -1 &&
pts != 0) {
- struct timeval timestamp;
+ ktime_t timestamp;
s64 runtime_us = pts -
dev->capture.vc_start_timestamp;
- u32 div = 0;
- u32 rem = 0;
-
- div =
- div_u64_rem(runtime_us, USEC_PER_SEC, &rem);
- timestamp.tv_sec =
- dev->capture.kernel_start_ts.tv_sec + div;
- timestamp.tv_usec =
- dev->capture.kernel_start_ts.tv_usec + rem;
-
- if (timestamp.tv_usec >=
- USEC_PER_SEC) {
- timestamp.tv_sec++;
- timestamp.tv_usec -=
- USEC_PER_SEC;
- }
+ timestamp = ktime_add_us(dev->capture.kernel_start_ts,
+ runtime_us);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Convert start time %d.%06d and %llu "
- "with offset %llu to %d.%06d\n",
- (int)dev->capture.kernel_start_ts.
- tv_sec,
- (int)dev->capture.kernel_start_ts.
- tv_usec,
+ "Convert start time %llu and %llu with offset %llu to %llu\n",
+ ktime_to_ns(dev->capture.kernel_start_ts),
dev->capture.vc_start_timestamp, pts,
- (int)timestamp.tv_sec,
- (int)timestamp.tv_usec);
- buf->vb.vb2_buf.timestamp = timestamp.tv_sec * 1000000000ULL +
- timestamp.tv_usec * 1000ULL;
+ ktime_to_ns(timestamp));
+ buf->vb.vb2_buf.timestamp = ktime_to_ns(timestamp);
} else {
buf->vb.vb2_buf.timestamp = ktime_get_ns();
}
@@ -387,11 +362,9 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
"Grab another frame as buffer has EOS");
vchiq_mmal_port_parameter_set(
instance,
- dev->capture.
- camera_port,
+ dev->capture.camera_port,
MMAL_PARAMETER_CAPTURE,
- &dev->capture.
- frame_count,
+ &dev->capture.frame_count,
sizeof(dev->capture.frame_count));
}
} else {
@@ -547,7 +520,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
"Start time %lld size %d\n",
dev->capture.vc_start_timestamp, parameter_size);
- v4l2_get_timestamp(&dev->capture.kernel_start_ts);
+ dev->capture.kernel_start_ts = ktime_get();
/* enable the camera port */
dev->capture.port->cb_ctx = dev;
@@ -555,8 +528,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
vchiq_mmal_port_enable(dev->instance, dev->capture.port, buffer_cb);
if (ret) {
v4l2_err(&dev->v4l2_dev,
- "Failed to enable capture port - error %d. "
- "Disabling camera port again\n", ret);
+ "Failed to enable capture port - error %d. Disabling camera port again\n",
+ ret);
vchiq_mmal_port_disable(dev->instance,
dev->capture.camera_port);
@@ -1213,8 +1186,8 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
port->current_buffer.size =
(f->fmt.pix.sizeimage <
(100 << 10))
- ? (100 << 10) : f->fmt.pix.
- sizeimage;
+ ? (100 << 10)
+ : f->fmt.pix.sizeimage;
}
v4l2_dbg(1, bcm2835_v4l2_debug,
&dev->v4l2_dev,
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
index 404037476bc5..2b5679eb5b4a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
@@ -92,7 +89,7 @@ struct bm2835_mmal_dev {
/* VC start timestamp for streaming */
s64 vc_start_timestamp;
/* Kernel start timestamp for streaming */
- struct timeval kernel_start_ts;
+ ktime_t kernel_start_ts;
struct vchiq_mmal_port *port; /* port being used for capture */
/* camera port being used for capture */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 77a5d6f4e1eb..0736214e1422 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index 840fd139e033..800e4e7e5f96 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h
index e71d9600b278..129203597f91 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h
index 66e8a6edf628..ec8455639d49 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h
index 24b002e8df0c..c9d6fbe25fe4 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
index 84a0f4b717ef..dd4b4ce72081 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
index 5a1b2a7d8eb0..d1c57edbe2b8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
index e7300229842d..1607bc4c0347 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 6ea7fb0ea50e..a91ef6ea29ce 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
@@ -618,8 +615,8 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context;
u32 handle;
- pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
- instance, msg, msg_len);
+ pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
+ __func__, instance, msg, msg_len);
if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
handle = msg->u.buffer_from_host.drvbuf.client_context;
@@ -1360,8 +1357,7 @@ static int port_action_handle(struct vchiq_mmal_instance *instance,
ret = -rmsg->u.port_action_reply.status;
- pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
- " connect component:0x%x connect port:%d\n",
+ pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
__func__,
ret, port->component->handle, port->handle,
port_action_type_names[action_type],
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index db39900c9d91..b1f22b6dca10 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Broadcom BM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
* Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
* Dave Stevenson <dsteve@broadcom.com>
* Simon Mellor <simellor@broadcom.com>
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 315b49c1de3b..b59ef14890aa 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -224,8 +224,7 @@ vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
platform_state = (struct vchiq_2835_state *)state->platform_state;
- if (!platform_state->inited)
- BUG();
+ WARN_ON_ONCE(!platform_state->inited);
return &platform_state->arm_state;
}
@@ -485,8 +484,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
__func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
- while (actual_pages > 0)
- {
+ while (actual_pages > 0) {
actual_pages--;
put_page(pages[actual_pages]);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 411539f8ff8c..c2c440009cac 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -682,8 +682,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (user_service->close_pending &&
down_interruptible(&user_service->close_event))
status = VCHIQ_RETRY;
- }
- else
+ } else
ret = -EINVAL;
} break;
@@ -708,8 +707,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (user_service->close_pending &&
down_interruptible(&user_service->close_event))
status = VCHIQ_RETRY;
- }
- else
+ } else
ret = -EINVAL;
} break;
@@ -1171,8 +1169,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
USER_SERVICE_T *user_service =
(USER_SERVICE_T *)service->base.userdata;
close_delivered(user_service);
- }
- else
+ } else
ret = -EINVAL;
} break;
@@ -1810,8 +1807,7 @@ vchiq_release(struct inode *inode, struct file *file)
instance->completion_remove &
(MAX_COMPLETIONS - 1)];
service = completion->service_userdata;
- if (completion->reason == VCHIQ_SERVICE_CLOSED)
- {
+ if (completion->reason == VCHIQ_SERVICE_CLOSED) {
USER_SERVICE_T *user_service =
service->base.userdata;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index ecff92bae200..5d28fff46557 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -66,8 +66,7 @@ struct vchiq_openack_payload {
short version;
};
-enum
-{
+enum {
QMFLAGS_IS_BLOCKING = (1 << 0),
QMFLAGS_NO_MUTEX_LOCK = (1 << 1),
QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2)
@@ -212,7 +211,8 @@ find_service_by_port(VCHIQ_STATE_T *state, int localport)
VCHIQ_SERVICE_T *
find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle) {
+ VCHIQ_SERVICE_HANDLE_T handle)
+{
VCHIQ_SERVICE_T *service;
spin_lock(&service_spinlock);
@@ -235,7 +235,8 @@ find_service_for_instance(VCHIQ_INSTANCE_T instance,
VCHIQ_SERVICE_T *
find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle) {
+ VCHIQ_SERVICE_HANDLE_T handle)
+{
VCHIQ_SERVICE_T *service;
spin_lock(&service_spinlock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 34f746db19cd..43c89a08bda9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -64,11 +64,6 @@ static VCHIQ_STATUS_T
vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
unsigned int size, VCHIQ_BULK_DIR_T dir);
-/****************************************************************************
-*
-* vchiq_initialise
-*
-***************************************************************************/
#define VCHIQ_INIT_RETRIES 10
VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
{
@@ -80,7 +75,9 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
/* VideoCore may not be ready due to boot up timing.
- It may never be ready if kernel and firmware are mismatched, so don't block forever. */
+ * It may never be ready if kernel and firmware are mismatched,so don't
+ * block forever.
+ */
for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
state = vchiq_get_state();
if (state)
@@ -93,7 +90,8 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
goto failed;
} else if (i > 0) {
vchiq_log_warning(vchiq_core_log_level,
- "%s: videocore initialized after %d retries\n", __func__, i);
+ "%s: videocore initialized after %d retries\n",
+ __func__, i);
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
@@ -120,12 +118,6 @@ failed:
}
EXPORT_SYMBOL(vchiq_initialise);
-/****************************************************************************
-*
-* vchiq_shutdown
-*
-***************************************************************************/
-
VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
{
VCHIQ_STATUS_T status;
@@ -168,23 +160,11 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
}
EXPORT_SYMBOL(vchiq_shutdown);
-/****************************************************************************
-*
-* vchiq_is_connected
-*
-***************************************************************************/
-
static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
{
return instance->connected;
}
-/****************************************************************************
-*
-* vchiq_connect
-*
-***************************************************************************/
-
VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
{
VCHIQ_STATUS_T status;
@@ -214,12 +194,6 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-/****************************************************************************
-*
-* vchiq_add_service
-*
-***************************************************************************/
-
VCHIQ_STATUS_T vchiq_add_service(
VCHIQ_INSTANCE_T instance,
const VCHIQ_SERVICE_PARAMS_T *params,
@@ -259,12 +233,6 @@ VCHIQ_STATUS_T vchiq_add_service(
}
EXPORT_SYMBOL(vchiq_add_service);
-/****************************************************************************
-*
-* vchiq_open_service
-*
-***************************************************************************/
-
VCHIQ_STATUS_T vchiq_open_service(
VCHIQ_INSTANCE_T instance,
const VCHIQ_SERVICE_PARAMS_T *params,
@@ -414,8 +382,9 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
if ((bulk->data != data) ||
(bulk->size != size)) {
/* This is not a retry of the previous one.
- ** Cancel the signal when the transfer
- ** completes. */
+ * Cancel the signal when the transfer
+ * completes.
+ */
spin_lock(&bulk_waiter_spinlock);
bulk->userdata = NULL;
spin_unlock(&bulk_waiter_spinlock);
@@ -441,7 +410,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
if (bulk) {
/* Cancel the signal when the transfer
- ** completes. */
+ * completes.
+ */
spin_lock(&bulk_waiter_spinlock);
bulk->userdata = NULL;
spin_unlock(&bulk_waiter_spinlock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index d465e1cf5db9..29984f9795c7 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -800,8 +800,7 @@ int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_ve
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
- if (service)
- {
+ if (service) {
VCHIQ_STATUS_T status;
status = vchiq_get_peer_version(service->handle, peer_version);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a3d4610fbdbe..3242dee8246f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* VMEbus User access driver
*
@@ -7,12 +8,6 @@
* Based on work by:
* Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
- *
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -134,7 +129,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
if (copied < 0)
return (int)copied;
- if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+ if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
return -EFAULT;
return copied;
@@ -146,7 +141,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
if (count > image[minor].size_buf)
count = image[minor].size_buf;
- if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+ if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
return -EFAULT;
return vme_master_write(image[minor].resource, image[minor].kern_buf,
@@ -159,7 +154,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+ if (copy_to_user(buf, image_ptr, (unsigned long)count))
return -EFAULT;
return count;
@@ -171,7 +166,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+ if (copy_from_user(image_ptr, buf, (unsigned long)count))
return -EFAULT;
return count;
@@ -573,7 +568,7 @@ static int vme_user_probe(struct vme_dev *vdev)
* by all windows.
*/
image[i].resource = vme_slave_request(vme_user_bridge,
- VME_A24, VME_SCT);
+ VME_A24, VME_SCT);
if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate slave resource\n");
@@ -582,7 +577,8 @@ static int vme_user_probe(struct vme_dev *vdev)
}
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = vme_alloc_consistent(image[i].resource,
- image[i].size_buf, &image[i].pci_buf);
+ image[i].size_buf,
+ &image[i].pci_buf);
if (!image[i].kern_buf) {
dev_warn(&vdev->dev,
"Unable to allocate memory for buffer\n");
@@ -600,7 +596,8 @@ static int vme_user_probe(struct vme_dev *vdev)
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
/* XXX Need to properly request attributes */
image[i].resource = vme_master_request(vme_user_bridge,
- VME_A32, VME_SCT, VME_D32);
+ VME_A32, VME_SCT,
+ VME_D32);
if (!image[i].resource) {
dev_warn(&vdev->dev,
"Unable to allocate master resource\n");
@@ -645,7 +642,8 @@ static int vme_user_probe(struct vme_dev *vdev)
num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
image[i].device = device_create(vme_user_sysfs_class, NULL,
- MKDEV(VME_MAJOR, i), NULL, name, num);
+ MKDEV(VME_MAJOR, i), NULL,
+ name, num);
if (IS_ERR(image[i].device)) {
dev_info(&vdev->dev, "Error creating sysfs device\n");
err = PTR_ERR(image[i].device);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 44dfa5421374..f0b163473426 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: baseband.c
*
* Purpose: Implement functions to access baseband
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index feaf222574ee..b8ee33dcb352 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: baseband.h
*
* Purpose: Implement functions to access baseband
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 14034e342aa6..ea0a4b57852c 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: card.c
* Purpose: Provide functions to setup NIC operation mode
* Functions:
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 1a04dbb57d42..487039a64587 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: card.h
*
* Purpose: Provide functions to setup NIC operation mode
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index ab89956511a0..dec6f0f23b88 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: channel.c
*
*/
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 8fe70760e548..53f623a4af65 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: channel.h
- *
*/
#ifndef _CHANNEL_H_
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 2fee6e759ad8..b4a0037b40c1 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: desc.h
*
* Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 3ae40d846a09..2f9e9219e8c8 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: device.h
*
* Purpose: MAC Data structure
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index 0298ea923f97..73f904b51b96 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: device_cfg.h
*
* Purpose: Driver configuration header
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1123b4f1e1d6..0dc902022a91 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: device_main.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
@@ -547,7 +538,7 @@ static void device_init_rd0_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.rx_descs0;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD0Ring[i];
- desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_ATOMIC);
+ desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
@@ -571,7 +562,7 @@ static void device_init_rd1_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.rx_descs1;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD1Ring[i];
- desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_ATOMIC);
+ desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
@@ -629,7 +620,7 @@ static void device_init_td0_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD0Rings[i];
- desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_ATOMIC);
+ desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
@@ -654,7 +645,7 @@ static void device_init_td1_ring(struct vnt_private *priv)
for (i = 0; i < priv->opts.tx_descs[1];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD1Rings[i];
- desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_ATOMIC);
+ desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 9b3fa779258a..088d2d9dbc21 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: dpc.c
*
* Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index 6e75fa9c5618..93af4220605f 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: dpc.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index d891993b20cf..4d6b48fd119d 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: key.c
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index a5024611af60..0942d8703f98 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: key.h
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index f7550b215f72..4750863c1bb7 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: mac.c
*
* Purpose: MAC routines
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index db401e32ae23..b8ab09434773 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: mac.h
*
* Purpose: MAC routines
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 716d2a80f840..d6c581b31569 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: power.c
*
* Purpose: Handles 802.11 power management functions
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index f360c5966523..2ec40045fddb 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: power.h
*
* Purpose: Handles 802.11 power management functions
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index edf7db9d53b3..03b0d56dbe9e 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: rf.c
*
* Purpose: rf function code
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index ba222301d49d..bfce5a89657d 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: rf.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 3efe19a1b13f..9aa4d5262aaa 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 095258923ebd..08db848613f0 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: rxtx.h
*
* Purpose:
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 635f271595f6..df57d120ed30 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: srom.c
*
* Purpose:Implement functions to access eeprom
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 6e03ab6dfa9d..577f20dc4308 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: srom.h
*
* Purpose: Implement functions to access eeprom
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index d6a0563ad55c..6795b5d74cfc 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: tmacro.h
*
* Purpose: define basic common types and macros
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 9806b5989014..61b3e568ff9a 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* File: upc.h
*
* Purpose: Macros to access device
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 882fe54ce41d..b29ba237fa29 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: baseband.c
*
* Purpose: Implement functions to access baseband
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index fe1c25c64cca..a907e3026012 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: baseband.h
*
* Purpose: Implement functions to access baseband
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 4fd9cd64c6e8..501f482b41c4 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: card.c
* Purpose: Provide functions to setup NIC operation mode
* Functions:
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 7f08cda27e2c..0a91d9ba4688 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: card.h
*
* Purpose: Provide functions to setup NIC operation mode
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index a4299f405d7f..5d57d34577f5 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: channel.c
*
* Purpose: Channel number mapping
diff --git a/drivers/staging/vt6656/channel.h b/drivers/staging/vt6656/channel.h
index 62f18a959098..6d0d2825d992 100644
--- a/drivers/staging/vt6656/channel.h
+++ b/drivers/staging/vt6656/channel.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: channel.h
*
* Purpose: Country Regulation Rules header file
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 59e3071021bd..ac45ebb71195 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: desc.h
*
* Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 74715c854856..a2feeb916836 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: device.h
*
* Purpose: MAC Data structure
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 655f0002f880..c3b5b1431048 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: dpc.c
*
* Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index 5d0454f3af0e..ddd0cb710512 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: dpc.h
*
* Purpose:
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 093a6048bd22..38521c338917 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: baseband.c
*
* Purpose: Implement functions to access baseband
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
index f753019c94c9..f30ae90cbb1f 100644
--- a/drivers/staging/vt6656/firmware.h
+++ b/drivers/staging/vt6656/firmware.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: firmware.h
*
* Purpose: Version and Release Information
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index c6ffbe0e2728..504424b19fcf 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: int.c
*
* Purpose: Handle USB interrupt endpoint
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index b5f1b4b02ce4..1e6ff925701a 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: int.h
*
* Purpose:
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index cc18cb141bff..91dede54cc1f 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: key.c
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index cfc6c2131536..1306ff441b87 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: key.h
*
* Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 417fdad1f9ae..0b543854ea97 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: mac.c
*
* Purpose: MAC routines
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 29f37a0ff156..94e700fcd0b6 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: mac.h
*
* Purpose: MAC routines
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index cc6d8778fe5b..ccafcc2c87ac 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: main_usb.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
@@ -437,11 +427,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
for (ii = 0; ii < priv->num_rcb; ii++) {
priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
- if (!priv->rcb[ii]) {
- dev_err(&priv->usb->dev,
- "failed to allocate rcb no %d\n", ii);
+ if (!priv->rcb[ii])
goto free_rx_tx;
- }
rcb = priv->rcb[ii];
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index c466e0614bc4..7a086c72d5a8 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: power.c
*
* Purpose: Handles 802.11 power management functions
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index 859e75fc77ac..d5a3198206da 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: power.h
*
* Purpose: Handles 802.11 power management functions
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 3a9d19a0b842..18f75dcc65d2 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: rf.c
*
* Purpose: rf function code
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index c907a18047d2..f77866a9c177 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: rf.h
*
* Purpose:
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index a44abcce6fb4..26ca3fa29301 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 1ba8647bea86..44698f41a234 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: rxtx.h
*
* Purpose:
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 23eaef458556..273176386a51 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: usbpipe.c
*
* Purpose: Handle USB control endpoint
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 9fc5ac0ef6c1..5d7708fcf557 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: usbpipe.h
*
* Purpose:
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index b2fc17f1381b..3eb2f11a5de1 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: wcmd.c
*
* Purpose: Handles the management command interface functions
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index 727ec14380c4..4a96f4de980d 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
* File: wcmd.h
*
* Purpose: Handles the management command interface functions
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 8cf886d32afb..e98fc8e93011 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -201,7 +201,7 @@ static inline u16 get_cap_info(u8 *data)
st = get_sub_type(data);
- if ((st == BEACON) || (st == PROBE_RSP))
+ if (st == BEACON || st == PROBE_RSP)
index += TIME_STAMP_LEN + BEACON_INTERVAL_LEN;
cap_info = data[index];
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index d69248a8c7b5..358354b3a2b4 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -202,7 +202,7 @@ struct host_if_msg {
};
struct join_bss_param {
- BSSTYPE_T bss_type;
+ enum bss_types bss_type;
u8 dtim_period;
u16 beacon_period;
u16 cap_info;
@@ -394,7 +394,7 @@ static void handle_set_operation_mode(struct wilc_vif *vif,
ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
- if ((hif_op_mode->mode) == IDLE_MODE)
+ if (hif_op_mode->mode == IDLE_MODE)
complete(&hif_driver_comp);
if (ret)
@@ -760,8 +760,8 @@ static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
hif_drv->usr_scan_req.scan_result = scan_info->result;
hif_drv->usr_scan_req.arg = scan_info->arg;
- if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
- (hif_drv->hif_state < HOST_IF_CONNECTED)) {
+ if (hif_drv->hif_state >= HOST_IF_SCANNING &&
+ hif_drv->hif_state < HOST_IF_CONNECTED) {
netdev_err(vif->ndev, "Already scan\n");
result = -EBUSY;
goto ERRORHANDLER;
@@ -1025,7 +1025,7 @@ static s32 Handle_Connect(struct wilc_vif *vif,
pu8CurrByte += MAX_SSID_LEN;
*(pu8CurrByte++) = INFRASTRUCTURE;
- if ((pstrHostIFconnectAttr->ch >= 1) && (pstrHostIFconnectAttr->ch <= 14)) {
+ if (pstrHostIFconnectAttr->ch >= 1 && pstrHostIFconnectAttr->ch <= 14) {
*(pu8CurrByte++) = pstrHostIFconnectAttr->ch;
} else {
netdev_err(vif->ndev, "Channel out of range\n");
@@ -1258,8 +1258,8 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
if (hif_drv->usr_scan_req.scan_result) {
wilc_parse_network_info(pstrRcvdNetworkInfo->buffer, &pstrNetworkInfo);
- if ((!pstrNetworkInfo) ||
- (!hif_drv->usr_scan_req.scan_result)) {
+ if (!pstrNetworkInfo ||
+ !hif_drv->usr_scan_req.scan_result) {
netdev_err(vif->ndev, "driver is null\n");
result = -EINVAL;
goto done;
@@ -1340,8 +1340,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
return -ENODEV;
}
- if ((hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
- (hif_drv->hif_state == HOST_IF_CONNECTED) ||
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
+ hif_drv->hif_state == HOST_IF_CONNECTED ||
hif_drv->usr_scan_req.scan_result) {
if (!pstrRcvdGnrlAsyncInfo->buffer ||
!hif_drv->usr_conn_req.conn_result) {
@@ -1400,8 +1400,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
}
}
- if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.status != SUCCESSFUL_STATUSCODE)) {
+ if (u8MacStatus == MAC_CONNECTED &&
+ strConnectInfo.status != SUCCESSFUL_STATUSCODE) {
netdev_err(vif->ndev, "Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
eth_zero_addr(wilc_connected_ssid);
} else if (u8MacStatus == MAC_DISCONNECTED) {
@@ -1412,8 +1412,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
if (hif_drv->usr_conn_req.bssid) {
memcpy(strConnectInfo.bssid, hif_drv->usr_conn_req.bssid, 6);
- if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) {
+ if (u8MacStatus == MAC_CONNECTED &&
+ strConnectInfo.status == SUCCESSFUL_STATUSCODE) {
memcpy(hif_drv->assoc_bssid,
hif_drv->usr_conn_req.bssid, ETH_ALEN);
}
@@ -1434,8 +1434,8 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
NULL,
hif_drv->usr_conn_req.arg);
- if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) {
+ if (u8MacStatus == MAC_CONNECTED &&
+ strConnectInfo.status == SUCCESSFUL_STATUSCODE) {
wilc_set_power_mgmt(vif, 0, 0);
hif_drv->hif_state = HOST_IF_CONNECTED;
@@ -1864,8 +1864,8 @@ void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
{
if (!vif->hif_drv)
return;
- if ((vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
- (vif->hif_drv->hif_state == HOST_IF_CONNECTING))
+ if (vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
+ vif->hif_drv->hif_state == HOST_IF_CONNECTING)
wilc_disconnect(vif, 1);
}
@@ -2414,7 +2414,7 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
wid.id = (u16)WID_SETUP_MULTICAST_FILTER;
wid.type = WID_BIN;
- wid.size = sizeof(struct set_multicast) + ((strHostIfSetMulti->cnt) * ETH_ALEN);
+ wid.size = sizeof(struct set_multicast) + (strHostIfSetMulti->cnt * ETH_ALEN);
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
goto ERRORHANDLER;
@@ -3730,8 +3730,8 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param));
if (add_sta_info->rates_len > 0) {
add_sta_info->rates = kmemdup(sta_param->rates,
- add_sta_info->rates_len,
- GFP_KERNEL);
+ add_sta_info->rates_len,
+ GFP_KERNEL);
if (!add_sta_info->rates)
return -ENOMEM;
}
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 119f3459b5bb..d9725efe0537 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "wilc_wfi_cfgoperations.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
@@ -694,7 +695,7 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc = vif->wilc;
wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
- "K_TXQ_TASK");
+ "K_TXQ_TASK");
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
wilc->close = 0;
@@ -910,13 +911,13 @@ static void wilc_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
return;
- if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) {
+ if (dev->flags & IFF_ALLMULTI ||
+ dev->mc.count > WILC_MULTICAST_TABLE_SIZE) {
wilc_setup_multicast_filter(vif, false, 0);
return;
}
- if ((dev->mc.count) == 0) {
+ if (dev->mc.count == 0) {
wilc_setup_multicast_filter(vif, true, 0);
return;
}
@@ -1029,7 +1030,7 @@ static int wilc_mac_close(struct net_device *ndev)
if (!hif_drv)
return 0;
- if ((wl->open_ifcs) > 0)
+ if (wl->open_ifcs > 0)
wl->open_ifcs--;
else
return 0;
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index ce54864569c7..0deb61a21b27 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NewportMedia WiFi chipset driver test tools - wilc-debug
* Copyright (c) 2012 NewportMedia Inc.
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 0189e3edbbbe..bb65b374c1ce 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) Atmel Corporation. All rights reserved.
*
@@ -374,7 +375,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
data = cpu_to_le32(data);
- if ((addr >= 0xf0) && (addr <= 0xff)) {
+ if (addr >= 0xf0 && addr <= 0xff) {
struct sdio_cmd52 cmd;
cmd.read_write = 1;
@@ -514,7 +515,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
int ret;
- if ((addr >= 0xf0) && (addr <= 0xff)) {
+ if (addr >= 0xf0 && addr <= 0xff) {
struct sdio_cmd52 cmd;
cmd.read_write = 0;
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 5ef84410e0f2..8f71a6022721 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) Atmel Corporation. All rights reserved.
*
@@ -389,11 +390,11 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
#define NUM_DATA_BYTES (4)
#define NUM_CRC_BYTES (2)
#define NUM_DUMMY_BYTES (3)
- if ((cmd == CMD_RESET) ||
- (cmd == CMD_TERMINATE) ||
- (cmd == CMD_REPEAT)) {
+ if (cmd == CMD_RESET ||
+ cmd == CMD_TERMINATE ||
+ cmd == CMD_REPEAT) {
len2 = len + (NUM_SKIP_BYTES + NUM_RSP_BYTES + NUM_DUMMY_BYTES);
- } else if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ)) {
+ } else if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) {
if (!g_spi.crc_off) {
len2 = len + (NUM_RSP_BYTES + NUM_DATA_HDR_BYTES + NUM_DATA_BYTES
+ NUM_CRC_BYTES + NUM_DUMMY_BYTES);
@@ -424,9 +425,9 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
/**
* Command/Control response
**/
- if ((cmd == CMD_RESET) ||
- (cmd == CMD_TERMINATE) ||
- (cmd == CMD_REPEAT)) {
+ if (cmd == CMD_RESET ||
+ cmd == CMD_TERMINATE ||
+ cmd == CMD_REPEAT) {
rix++; /* skip 1 byte */
}
@@ -452,8 +453,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
return N_FAIL;
}
- if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ) ||
- (cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
+ if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ ||
+ cmd == CMD_DMA_READ || cmd == CMD_DMA_EXT_READ) {
int retry;
/* u16 crc1, crc2; */
u8 crc[2];
@@ -479,7 +480,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
return N_RESET;
}
- if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ)) {
+ if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) {
/**
* Read bytes
**/
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 028da1dc1b81..621810d70450 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -379,7 +379,7 @@ static void CfgScanResult(enum scan_event scan_event,
struct cfg80211_bss *bss = NULL;
priv = user_void;
- if (priv->bCfgScanning) {
+ if (priv->cfg_scanning) {
if (scan_event == SCAN_EVENT_NETWORK_FOUND) {
wiphy = priv->dev->ieee80211_ptr->wiphy;
@@ -399,8 +399,8 @@ static void CfgScanResult(enum scan_event scan_event,
return;
if (network_info->new_network) {
- if (priv->u32RcvdChCount < MAX_NUM_SCANNED_NETWORKS) {
- priv->u32RcvdChCount++;
+ if (priv->rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
+ priv->rcvd_ch_cnt++;
add_network_to_shadow(network_info, priv, join_params);
@@ -422,7 +422,7 @@ static void CfgScanResult(enum scan_event scan_event,
} else {
u32 i;
- for (i = 0; i < priv->u32RcvdChCount; i++) {
+ for (i = 0; i < priv->rcvd_ch_cnt; i++) {
if (memcmp(last_scanned_shadow[i].bssid, network_info->bssid, 6) == 0) {
last_scanned_shadow[i].rssi = network_info->rssi;
last_scanned_shadow[i].time_scan = jiffies;
@@ -436,21 +436,21 @@ static void CfgScanResult(enum scan_event scan_event,
mutex_lock(&priv->scan_req_lock);
- if (priv->pstrScanReq) {
+ if (priv->scan_req) {
struct cfg80211_scan_info info = {
.aborted = false,
};
- cfg80211_scan_done(priv->pstrScanReq, &info);
- priv->u32RcvdChCount = 0;
- priv->bCfgScanning = false;
- priv->pstrScanReq = NULL;
+ cfg80211_scan_done(priv->scan_req, &info);
+ priv->rcvd_ch_cnt = 0;
+ priv->cfg_scanning = false;
+ priv->scan_req = NULL;
}
mutex_unlock(&priv->scan_req_lock);
} else if (scan_event == SCAN_EVENT_ABORTED) {
mutex_lock(&priv->scan_req_lock);
- if (priv->pstrScanReq) {
+ if (priv->scan_req) {
struct cfg80211_scan_info info = {
.aborted = false,
};
@@ -458,9 +458,9 @@ static void CfgScanResult(enum scan_event scan_event,
update_scan_time();
refresh_scan(priv, false);
- cfg80211_scan_done(priv->pstrScanReq, &info);
- priv->bCfgScanning = false;
- priv->pstrScanReq = NULL;
+ cfg80211_scan_done(priv->scan_req, &info);
+ priv->cfg_scanning = false;
+ priv->scan_req = NULL;
}
mutex_unlock(&priv->scan_req_lock);
}
@@ -469,92 +469,92 @@ static void CfgScanResult(enum scan_event scan_event,
int wilc_connecting;
-static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
- struct connect_info *pstrConnectInfo,
- u8 u8MacStatus,
- struct disconnect_info *pstrDisconnectNotifInfo,
- void *pUserVoid)
+static void cfg_connect_result(enum conn_event conn_disconn_evt,
+ struct connect_info *conn_info,
+ u8 mac_status,
+ struct disconnect_info *disconn_info,
+ void *priv_data)
{
struct wilc_priv *priv;
struct net_device *dev;
- struct host_if_drv *pstrWFIDrv;
- u8 NullBssid[ETH_ALEN] = {0};
+ struct host_if_drv *wfi_drv;
+ u8 null_bssid[ETH_ALEN] = {0};
struct wilc *wl;
struct wilc_vif *vif;
wilc_connecting = 0;
- priv = pUserVoid;
+ priv = priv_data;
dev = priv->dev;
vif = netdev_priv(dev);
wl = vif->wilc;
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
- if (enuConnDisconnEvent == CONN_DISCONN_EVENT_CONN_RESP) {
- u16 u16ConnectStatus;
+ if (conn_disconn_evt == CONN_DISCONN_EVENT_CONN_RESP) {
+ u16 connect_status;
- u16ConnectStatus = pstrConnectInfo->status;
+ connect_status = conn_info->status;
- if ((u8MacStatus == MAC_DISCONNECTED) &&
- (pstrConnectInfo->status == SUCCESSFUL_STATUSCODE)) {
- u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE;
- wilc_wlan_set_bssid(priv->dev, NullBssid,
+ if (mac_status == MAC_DISCONNECTED &&
+ conn_info->status == SUCCESSFUL_STATUSCODE) {
+ connect_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ wilc_wlan_set_bssid(priv->dev, null_bssid,
STATION_MODE);
eth_zero_addr(wilc_connected_ssid);
- if (!pstrWFIDrv->p2p_connect)
+ if (!wfi_drv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
netdev_err(dev, "Unspecified failure\n");
}
- if (u16ConnectStatus == WLAN_STATUS_SUCCESS) {
- bool bNeedScanRefresh = false;
+ if (connect_status == WLAN_STATUS_SUCCESS) {
+ bool scan_refresh = false;
u32 i;
- memcpy(priv->au8AssociatedBss, pstrConnectInfo->bssid, ETH_ALEN);
+ memcpy(priv->associated_bss, conn_info->bssid, ETH_ALEN);
for (i = 0; i < last_scanned_cnt; i++) {
if (memcmp(last_scanned_shadow[i].bssid,
- pstrConnectInfo->bssid,
+ conn_info->bssid,
ETH_ALEN) == 0) {
unsigned long now = jiffies;
if (time_after(now,
last_scanned_shadow[i].time_scan_cached +
(unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
- bNeedScanRefresh = true;
+ scan_refresh = true;
break;
}
}
- if (bNeedScanRefresh)
+ if (scan_refresh)
refresh_scan(priv, true);
}
- cfg80211_connect_result(dev, pstrConnectInfo->bssid,
- pstrConnectInfo->req_ies, pstrConnectInfo->req_ies_len,
- pstrConnectInfo->resp_ies, pstrConnectInfo->resp_ies_len,
- u16ConnectStatus, GFP_KERNEL);
- } else if (enuConnDisconnEvent == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
+ cfg80211_connect_result(dev, conn_info->bssid,
+ conn_info->req_ies, conn_info->req_ies_len,
+ conn_info->resp_ies, conn_info->resp_ies_len,
+ connect_status, GFP_KERNEL);
+ } else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
wilc_optaining_ip = false;
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
wilc_ie = false;
- eth_zero_addr(priv->au8AssociatedBss);
- wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
+ eth_zero_addr(priv->associated_bss);
+ wilc_wlan_set_bssid(priv->dev, null_bssid, STATION_MODE);
eth_zero_addr(wilc_connected_ssid);
- if (!pstrWFIDrv->p2p_connect)
+ if (!wfi_drv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
- pstrDisconnectNotifInfo->reason = 3;
- else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
- pstrDisconnectNotifInfo->reason = 1;
+ if (wfi_drv->IFC_UP && dev == wl->vif[1]->ndev)
+ disconn_info->reason = 3;
+ else if (!wfi_drv->IFC_UP && dev == wl->vif[1]->ndev)
+ disconn_info->reason = 1;
- cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
- pstrDisconnectNotifInfo->ie_len, false,
+ cfg80211_disconnected(dev, disconn_info->reason, disconn_info->ie,
+ disconn_info->ie_len, false,
GFP_KERNEL);
}
}
@@ -585,7 +585,7 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct wilc_priv *priv;
u32 i;
- s32 s32Error = 0;
+ s32 ret = 0;
u8 au8ScanChanList[MAX_NUM_SCANNED_NETWORKS];
struct hidden_network strHiddenNetwork;
struct wilc_vif *vif;
@@ -593,13 +593,13 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- priv->pstrScanReq = request;
+ priv->scan_req = request;
- priv->u32RcvdChCount = 0;
+ priv->rcvd_ch_cnt = 0;
reset_shadow_found();
- priv->bCfgScanning = true;
+ priv->cfg_scanning = true;
if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) {
for (i = 0; i < request->n_channels; i++)
au8ScanChanList[i] = (u8)ieee80211_frequency_to_channel(request->channels[i]->center_freq);
@@ -622,56 +622,56 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
strHiddenNetwork.n_ssids -= 1;
}
}
- s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
- au8ScanChanList,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, CfgScanResult,
- (void *)priv, &strHiddenNetwork);
+ ret = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
+ au8ScanChanList,
+ request->n_channels,
+ (const u8 *)request->ie,
+ request->ie_len, CfgScanResult,
+ (void *)priv, &strHiddenNetwork);
} else {
- s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
- au8ScanChanList,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, CfgScanResult,
- (void *)priv, NULL);
+ ret = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
+ au8ScanChanList,
+ request->n_channels,
+ (const u8 *)request->ie,
+ request->ie_len, CfgScanResult,
+ (void *)priv, NULL);
}
} else {
netdev_err(priv->dev, "Requested scanned channels over\n");
}
- if (s32Error != 0)
- s32Error = -EBUSY;
+ if (ret != 0)
+ ret = -EBUSY;
- return s32Error;
+ return ret;
}
static int connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- s32 s32Error = 0;
+ s32 ret = 0;
u32 i;
u32 sel_bssi_idx = UINT_MAX;
u8 u8security = NO_ENCRYPT;
- enum AUTHTYPE tenuAuth_type = ANY;
+ enum AUTHTYPE auth_type = ANY;
struct wilc_priv *priv;
- struct host_if_drv *pstrWFIDrv;
+ struct host_if_drv *wfi_drv;
struct network_info *pstrNetworkInfo = NULL;
struct wilc_vif *vif;
wilc_connecting = 1;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
if (!(strncmp(sme->ssid, "DIRECT-", 7)))
- pstrWFIDrv->p2p_connect = 1;
+ wfi_drv->p2p_connect = 1;
else
- pstrWFIDrv->p2p_connect = 0;
+ wfi_drv->p2p_connect = 0;
for (i = 0; i < last_scanned_cnt; i++) {
- if ((sme->ssid_len == last_scanned_shadow[i].ssid_len) &&
+ if (sme->ssid_len == last_scanned_shadow[i].ssid_len &&
memcmp(last_scanned_shadow[i].ssid,
sme->ssid,
sme->ssid_len) == 0) {
@@ -694,9 +694,9 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if (sel_bssi_idx < last_scanned_cnt) {
pstrNetworkInfo = &last_scanned_shadow[sel_bssi_idx];
} else {
- s32Error = -ENOENT;
+ ret = -ENOENT;
wilc_connecting = 0;
- return s32Error;
+ return ret;
}
memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
@@ -744,10 +744,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
else
u8security = ENCRYPT_ENABLED | WPA | AES;
} else {
- s32Error = -ENOTSUPP;
+ ret = -ENOTSUPP;
netdev_err(dev, "Not supported cipher\n");
wilc_connecting = 0;
- return s32Error;
+ return ret;
}
}
@@ -763,11 +763,11 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
switch (sme->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
- tenuAuth_type = OPEN_SYSTEM;
+ auth_type = OPEN_SYSTEM;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
- tenuAuth_type = SHARED_KEY;
+ auth_type = SHARED_KEY;
break;
default:
@@ -777,7 +777,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_akm_suites) {
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_8021X:
- tenuAuth_type = IEEE8021;
+ auth_type = IEEE8021;
break;
default:
@@ -787,35 +787,35 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
curr_channel = pstrNetworkInfo->ch;
- if (!pstrWFIDrv->p2p_connect)
+ if (!wfi_drv->p2p_connect)
wlan_channel = pstrNetworkInfo->ch;
wilc_wlan_set_bssid(dev, pstrNetworkInfo->bssid, STATION_MODE);
- s32Error = wilc_set_join_req(vif, pstrNetworkInfo->bssid, sme->ssid,
- sme->ssid_len, sme->ie, sme->ie_len,
- CfgConnectResult, (void *)priv,
- u8security, tenuAuth_type,
- pstrNetworkInfo->ch,
- pstrNetworkInfo->join_params);
- if (s32Error != 0) {
+ ret = wilc_set_join_req(vif, pstrNetworkInfo->bssid, sme->ssid,
+ sme->ssid_len, sme->ie, sme->ie_len,
+ cfg_connect_result, (void *)priv,
+ u8security, auth_type,
+ pstrNetworkInfo->ch,
+ pstrNetworkInfo->join_params);
+ if (ret != 0) {
netdev_err(dev, "wilc_set_join_req(): Error\n");
- s32Error = -ENOENT;
+ ret = -ENOENT;
wilc_connecting = 0;
- return s32Error;
+ return ret;
}
- return s32Error;
+ return ret;
}
static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code)
{
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
- struct host_if_drv *pstrWFIDrv;
+ struct host_if_drv *wfi_drv;
struct wilc_vif *vif;
struct wilc *wilc;
- u8 NullBssid[ETH_ALEN] = {0};
+ u8 null_bssid[ETH_ALEN] = {0};
wilc_connecting = 0;
priv = wiphy_priv(wiphy);
@@ -831,23 +831,23 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
return 0;
}
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
- if (!pstrWFIDrv->p2p_connect)
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
+ if (!wfi_drv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
+ wilc_wlan_set_bssid(priv->dev, null_bssid, STATION_MODE);
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
wilc_ie = false;
- pstrWFIDrv->p2p_timeout = 0;
+ wfi_drv->p2p_timeout = 0;
- s32Error = wilc_disconnect(vif, reason_code);
- if (s32Error != 0) {
+ ret = wilc_disconnect(vif, reason_code);
+ if (ret != 0) {
netdev_err(priv->dev, "Error in disconnecting\n");
- s32Error = -EINVAL;
+ ret = -EINVAL;
}
- return s32Error;
+ return ret;
}
static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
@@ -855,14 +855,14 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
const u8 *mac_addr, struct key_params *params)
{
- s32 s32Error = 0, KeyLen = params->key_len;
+ s32 ret = 0, keylen = params->key_len;
struct wilc_priv *priv;
- const u8 *pu8RxMic = NULL;
- const u8 *pu8TxMic = NULL;
+ const u8 *rx_mic = NULL;
+ const u8 *tx_mic = NULL;
u8 u8mode = NO_ENCRYPT;
u8 u8gmode = NO_ENCRYPT;
u8 u8pmode = NO_ENCRYPT;
- enum AUTHTYPE tenuAuth_type = ANY;
+ enum AUTHTYPE auth_type = ANY;
struct wilc *wl;
struct wilc_vif *vif;
@@ -877,7 +877,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
- tenuAuth_type = OPEN_SYSTEM;
+ auth_type = OPEN_SYSTEM;
if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
u8mode = ENCRYPT_ENABLED | WEP;
@@ -886,7 +886,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
wilc_add_wep_key_bss_ap(vif, params->key,
params->key_len, key_index,
- u8mode, tenuAuth_type);
+ u8mode, auth_type);
break;
}
if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
@@ -922,9 +922,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
priv->wilc_groupkey = u8gmode;
if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- pu8TxMic = params->key + 24;
- pu8RxMic = params->key + 16;
- KeyLen = params->key_len - 16;
+ tx_mic = params->key + 24;
+ rx_mic = params->key + 16;
+ keylen = params->key_len - 16;
}
kfree(priv->wilc_gtk[key_index]->key);
@@ -932,7 +932,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
kfree(priv->wilc_gtk[key_index]->seq);
- if ((params->seq_len) > 0) {
+ if (params->seq_len > 0) {
priv->wilc_gtk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
}
@@ -941,10 +941,10 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
priv->wilc_gtk[key_index]->key_len = params->key_len;
priv->wilc_gtk[key_index]->seq_len = params->seq_len;
- wilc_add_rx_gtk(vif, params->key, KeyLen,
+ wilc_add_rx_gtk(vif, params->key, keylen,
key_index, params->seq_len,
- params->seq, pu8RxMic,
- pu8TxMic, AP_MODE, u8gmode);
+ params->seq, rx_mic,
+ tx_mic, AP_MODE, u8gmode);
} else {
if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
@@ -953,9 +953,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
u8pmode = priv->wilc_groupkey | AES;
if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- pu8TxMic = params->key + 24;
- pu8RxMic = params->key + 16;
- KeyLen = params->key_len - 16;
+ tx_mic = params->key + 24;
+ rx_mic = params->key + 16;
+ keylen = params->key_len - 16;
}
kfree(priv->wilc_ptk[key_index]->key);
@@ -964,20 +964,20 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
kfree(priv->wilc_ptk[key_index]->seq);
- if ((params->seq_len) > 0)
+ if (params->seq_len > 0)
priv->wilc_ptk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
- if ((params->seq_len) > 0)
+ if (params->seq_len > 0)
memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
priv->wilc_ptk[key_index]->cipher = params->cipher;
priv->wilc_ptk[key_index]->key_len = params->key_len;
priv->wilc_ptk[key_index]->seq_len = params->seq_len;
- wilc_add_ptk(vif, params->key, KeyLen,
- mac_addr, pu8RxMic, pu8TxMic,
+ wilc_add_ptk(vif, params->key, keylen,
+ mac_addr, rx_mic, tx_mic,
AP_MODE, u8pmode, key_index);
}
break;
@@ -987,9 +987,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
u8mode = 0;
if (!pairwise) {
if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- pu8RxMic = params->key + 24;
- pu8TxMic = params->key + 16;
- KeyLen = params->key_len - 16;
+ rx_mic = params->key + 24;
+ tx_mic = params->key + 16;
+ keylen = params->key_len - 16;
}
if (!g_gtk_keys_saved && netdev == wl->vif[0]->ndev) {
@@ -1013,16 +1013,16 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
g_gtk_keys_saved = true;
}
- wilc_add_rx_gtk(vif, params->key, KeyLen,
+ wilc_add_rx_gtk(vif, params->key, keylen,
key_index, params->seq_len,
- params->seq, pu8RxMic,
- pu8TxMic, STATION_MODE,
+ params->seq, rx_mic,
+ tx_mic, STATION_MODE,
u8mode);
} else {
if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- pu8RxMic = params->key + 24;
- pu8TxMic = params->key + 16;
- KeyLen = params->key_len - 16;
+ rx_mic = params->key + 24;
+ tx_mic = params->key + 16;
+ keylen = params->key_len - 16;
}
if (!g_ptk_keys_saved && netdev == wl->vif[0]->ndev) {
@@ -1046,8 +1046,8 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
g_ptk_keys_saved = true;
}
- wilc_add_ptk(vif, params->key, KeyLen,
- mac_addr, pu8RxMic, pu8TxMic,
+ wilc_add_ptk(vif, params->key, keylen,
+ mac_addr, rx_mic, tx_mic,
STATION_MODE, u8mode, key_index);
}
}
@@ -1055,10 +1055,10 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
default:
netdev_err(netdev, "Not supported cipher\n");
- s32Error = -ENOTSUPP;
+ ret = -ENOTSUPP;
}
- return s32Error;
+ return ret;
}
static int del_key(struct wiphy *wiphy, struct net_device *netdev,
@@ -1082,7 +1082,7 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
kfree(g_key_wep_params.key);
g_key_wep_params.key = NULL;
- if ((priv->wilc_gtk[key_index]) != NULL) {
+ if (priv->wilc_gtk[key_index] != NULL) {
kfree(priv->wilc_gtk[key_index]->key);
priv->wilc_gtk[key_index]->key = NULL;
kfree(priv->wilc_gtk[key_index]->seq);
@@ -1092,7 +1092,7 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
priv->wilc_gtk[key_index] = NULL;
}
- if ((priv->wilc_ptk[key_index]) != NULL) {
+ if (priv->wilc_ptk[key_index] != NULL) {
kfree(priv->wilc_ptk[key_index]->key);
priv->wilc_ptk[key_index]->key = NULL;
kfree(priv->wilc_ptk[key_index]->seq);
@@ -1182,7 +1182,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
for (i = 0; i < NUM_STA_ASSOCIATED; i++) {
- if (!(memcmp(mac, priv->assoc_stainfo.au8Sta_AssociatedBss[i], ETH_ALEN))) {
+ if (!(memcmp(mac, priv->assoc_stainfo.sta_associated_bss[i], ETH_ALEN))) {
associatedsta = i;
break;
}
@@ -1200,9 +1200,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
}
if (vif->iftype == STATION_MODE) {
- struct rf_info strStatistics;
+ struct rf_info stats;
- wilc_get_statistics(vif, &strStatistics);
+ wilc_get_statistics(vif, &stats);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
BIT(NL80211_STA_INFO_RX_PACKETS) |
@@ -1210,16 +1210,16 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
BIT(NL80211_STA_INFO_TX_FAILED) |
BIT(NL80211_STA_INFO_TX_BITRATE);
- sinfo->signal = strStatistics.rssi;
- sinfo->rx_packets = strStatistics.rx_cnt;
- sinfo->tx_packets = strStatistics.tx_cnt + strStatistics.tx_fail_cnt;
- sinfo->tx_failed = strStatistics.tx_fail_cnt;
- sinfo->txrate.legacy = strStatistics.link_speed * 10;
+ sinfo->signal = stats.rssi;
+ sinfo->rx_packets = stats.rx_cnt;
+ sinfo->tx_packets = stats.tx_cnt + stats.tx_fail_cnt;
+ sinfo->tx_failed = stats.tx_fail_cnt;
+ sinfo->txrate.legacy = stats.link_speed * 10;
- if ((strStatistics.link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH) &&
- (strStatistics.link_speed != DEFAULT_LINK_SPEED))
+ if (stats.link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH &&
+ stats.link_speed != DEFAULT_LINK_SPEED)
wilc_enable_tcp_ack_filter(true);
- else if (strStatistics.link_speed != DEFAULT_LINK_SPEED)
+ else if (stats.link_speed != DEFAULT_LINK_SPEED)
wilc_enable_tcp_ack_filter(false);
}
return 0;
@@ -1233,46 +1233,46 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- s32 s32Error = 0;
- struct cfg_param_attr pstrCfgParamVal;
+ s32 ret = 0;
+ struct cfg_param_attr cfg_param_val;
struct wilc_priv *priv;
struct wilc_vif *vif;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- pstrCfgParamVal.flag = 0;
+ cfg_param_val.flag = 0;
if (changed & WIPHY_PARAM_RETRY_SHORT) {
- pstrCfgParamVal.flag |= RETRY_SHORT;
- pstrCfgParamVal.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short;
+ cfg_param_val.flag |= RETRY_SHORT;
+ cfg_param_val.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short;
}
if (changed & WIPHY_PARAM_RETRY_LONG) {
- pstrCfgParamVal.flag |= RETRY_LONG;
- pstrCfgParamVal.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long;
+ cfg_param_val.flag |= RETRY_LONG;
+ cfg_param_val.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
- pstrCfgParamVal.flag |= FRAG_THRESHOLD;
- pstrCfgParamVal.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold;
+ cfg_param_val.flag |= FRAG_THRESHOLD;
+ cfg_param_val.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- pstrCfgParamVal.flag |= RTS_THRESHOLD;
- pstrCfgParamVal.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold;
+ cfg_param_val.flag |= RTS_THRESHOLD;
+ cfg_param_val.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold;
}
- s32Error = wilc_hif_set_cfg(vif, &pstrCfgParamVal);
- if (s32Error)
+ ret = wilc_hif_set_cfg(vif, &cfg_param_val);
+ if (ret)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
- return s32Error;
+ return ret;
}
static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u32 i;
- s32 s32Error = 0;
+ s32 ret = 0;
u8 flag = 0;
struct wilc_vif *vif;
struct wilc_priv *priv = wiphy_priv(wiphy);
@@ -1295,20 +1295,20 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
priv->pmkid_list.numpmkid++;
} else {
netdev_err(netdev, "Invalid PMKID index\n");
- s32Error = -EINVAL;
+ ret = -EINVAL;
}
- if (!s32Error)
- s32Error = wilc_set_pmkid_info(vif, &priv->pmkid_list);
+ if (!ret)
+ ret = wilc_set_pmkid_info(vif, &priv->pmkid_list);
- return s32Error;
+ return ret;
}
static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u32 i;
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv = wiphy_priv(wiphy);
@@ -1331,10 +1331,10 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
}
priv->pmkid_list.numpmkid--;
} else {
- s32Error = -EINVAL;
+ ret = -EINVAL;
}
- return s32Error;
+ return ret;
}
static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
@@ -1346,7 +1346,7 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
return 0;
}
-static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
+static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len)
{
u32 index = 0;
u32 i = 0, j = 0;
@@ -1382,7 +1382,7 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
}
}
-static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftype)
+static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch, u8 iftype)
{
u32 index = 0;
u32 i = 0, j = 0;
@@ -1403,7 +1403,7 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
op_channel_attr_index = index;
index += buf[index + 1] + 3;
}
- if (wlan_channel != INVALID_CHANNEL && bOperChan) {
+ if (wlan_channel != INVALID_CHANNEL && oper_ch) {
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
@@ -1425,12 +1425,12 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
{
struct wilc_priv *priv;
u32 header, pkt_offset;
- struct host_if_drv *pstrWFIDrv;
+ struct host_if_drv *wfi_drv;
u32 i = 0;
s32 s32Freq;
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
@@ -1438,20 +1438,20 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP) {
- cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
+ cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, true, GFP_KERNEL);
return;
} else {
if (pkt_offset & IS_MGMT_STATUS_SUCCES)
- cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
+ cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, true, GFP_KERNEL);
else
- cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, false, GFP_KERNEL);
+ cfg80211_mgmt_tx_status(priv->wdev, priv->tx_cookie, buff, size, false, GFP_KERNEL);
return;
}
} else {
s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
- if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
+ if (priv->cfg_scanning && time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) {
netdev_dbg(dev, "Receiving action wrong ch\n");
return;
}
@@ -1481,7 +1481,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
if (buff[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buff[i + 2], 4))) {
- WILC_WFI_CfgParseRxAction(&buff[i + 6], size - (i + 6));
+ wilc_wfi_cfg_parse_rx_action(&buff[i + 6], size - (i + 6));
break;
}
}
@@ -1508,7 +1508,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
}
}
-static void WILC_WFI_mgmt_tx_complete(void *priv, int status)
+static void wilc_wfi_mgmt_tx_complete(void *priv, int status)
{
struct p2p_mgmt_data *pv_data = priv;
@@ -1516,33 +1516,33 @@ static void WILC_WFI_mgmt_tx_complete(void *priv, int status)
kfree(pv_data);
}
-static void WILC_WFI_RemainOnChannelReady(void *pUserVoid)
+static void wilc_wfi_remain_on_channel_ready(void *priv_data)
{
struct wilc_priv *priv;
- priv = pUserVoid;
+ priv = priv_data;
- priv->bInP2PlistenState = true;
+ priv->p2p_listen_state = true;
cfg80211_ready_on_channel(priv->wdev,
- priv->strRemainOnChanParams.u64ListenCookie,
- priv->strRemainOnChanParams.pstrListenChan,
- priv->strRemainOnChanParams.u32ListenDuration,
+ priv->remain_on_ch_params.listen_cookie,
+ priv->remain_on_ch_params.listen_ch,
+ priv->remain_on_ch_params.listen_duration,
GFP_KERNEL);
}
-static void WILC_WFI_RemainOnChannelExpired(void *pUserVoid, u32 u32SessionID)
+static void wilc_wfi_remain_on_channel_expired(void *data, u32 session_id)
{
struct wilc_priv *priv;
- priv = pUserVoid;
+ priv = data;
- if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) {
- priv->bInP2PlistenState = false;
+ if (session_id == priv->remain_on_ch_params.listen_session_id) {
+ priv->p2p_listen_state = false;
cfg80211_remain_on_channel_expired(priv->wdev,
- priv->strRemainOnChanParams.u64ListenCookie,
- priv->strRemainOnChanParams.pstrListenChan,
+ priv->remain_on_ch_params.listen_cookie,
+ priv->remain_on_ch_params.listen_ch,
GFP_KERNEL);
}
}
@@ -1552,7 +1552,7 @@ static int remain_on_channel(struct wiphy *wiphy,
struct ieee80211_channel *chan,
unsigned int duration, u64 *cookie)
{
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
@@ -1561,21 +1561,21 @@ static int remain_on_channel(struct wiphy *wiphy,
if (wdev->iftype == NL80211_IFTYPE_AP) {
netdev_dbg(vif->ndev, "Required while in AP mode\n");
- return s32Error;
+ return ret;
}
curr_channel = chan->hw_value;
- priv->strRemainOnChanParams.pstrListenChan = chan;
- priv->strRemainOnChanParams.u64ListenCookie = *cookie;
- priv->strRemainOnChanParams.u32ListenDuration = duration;
- priv->strRemainOnChanParams.u32ListenSessionID++;
+ priv->remain_on_ch_params.listen_ch = chan;
+ priv->remain_on_ch_params.listen_cookie = *cookie;
+ priv->remain_on_ch_params.listen_duration = duration;
+ priv->remain_on_ch_params.listen_session_id++;
return wilc_remain_on_channel(vif,
- priv->strRemainOnChanParams.u32ListenSessionID,
+ priv->remain_on_ch_params.listen_session_id,
duration, chan->hw_value,
- WILC_WFI_RemainOnChannelExpired,
- WILC_WFI_RemainOnChannelReady, (void *)priv);
+ wilc_wfi_remain_on_channel_expired,
+ wilc_wfi_remain_on_channel_ready, (void *)priv);
}
static int cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1589,7 +1589,7 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
vif = netdev_priv(priv->dev);
return wilc_listen_state_expired(vif,
- priv->strRemainOnChanParams.u32ListenSessionID);
+ priv->remain_on_ch_params.listen_session_id);
}
static int mgmt_tx(struct wiphy *wiphy,
@@ -1604,17 +1604,17 @@ static int mgmt_tx(struct wiphy *wiphy,
const struct ieee80211_mgmt *mgmt;
struct p2p_mgmt_data *mgmt_tx;
struct wilc_priv *priv;
- struct host_if_drv *pstrWFIDrv;
+ struct host_if_drv *wfi_drv;
u32 i;
struct wilc_vif *vif;
u32 buf_len = len + sizeof(p2p_vendor_spec) + sizeof(p2p_local_random);
vif = netdev_priv(wdev->netdev);
priv = wiphy_priv(wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
*cookie = (unsigned long)buf;
- priv->u64tx_cookie = *cookie;
+ priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
if (ieee80211_is_mgmt(mgmt->frame_control)) {
@@ -1665,9 +1665,9 @@ static int mgmt_tx(struct wiphy *wiphy,
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) {
if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
- WILC_WFI_CfgParseTxAction(&mgmt_tx->buff[i + 6], len - (i + 6), true, vif->iftype);
+ wilc_wfi_cfg_parse_tx_action(&mgmt_tx->buff[i + 6], len - (i + 6), true, vif->iftype);
else
- WILC_WFI_CfgParseTxAction(&mgmt_tx->buff[i + 6], len - (i + 6), false, vif->iftype);
+ wilc_wfi_cfg_parse_tx_action(&mgmt_tx->buff[i + 6], len - (i + 6), false, vif->iftype);
break;
}
}
@@ -1695,12 +1695,12 @@ static int mgmt_tx(struct wiphy *wiphy,
}
}
- pstrWFIDrv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
+ wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
}
wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx,
mgmt_tx->buff, mgmt_tx->size,
- WILC_WFI_mgmt_tx_complete);
+ wilc_wfi_mgmt_tx_complete);
}
return 0;
}
@@ -1710,16 +1710,16 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
u64 cookie)
{
struct wilc_priv *priv;
- struct host_if_drv *pstrWFIDrv;
+ struct host_if_drv *wfi_drv;
priv = wiphy_priv(wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
- pstrWFIDrv->p2p_timeout = jiffies;
+ wfi_drv = (struct host_if_drv *)priv->hif_drv;
+ wfi_drv->p2p_timeout = jiffies;
- if (!priv->bInP2PlistenState) {
+ if (!priv->p2p_listen_state) {
cfg80211_remain_on_channel_expired(priv->wdev,
- priv->strRemainOnChanParams.u64ListenCookie,
- priv->strRemainOnChanParams.pstrListenChan,
+ priv->remain_on_ch_params.listen_cookie,
+ priv->remain_on_ch_params.listen_ch,
GFP_KERNEL);
}
@@ -1788,7 +1788,7 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_rssi(vif, &sinfo->signal);
- memcpy(mac, priv->au8AssociatedBss, ETH_ALEN);
+ memcpy(mac, priv->associated_bss, ETH_ALEN);
return 0;
}
@@ -1837,7 +1837,7 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
vif->iftype = STATION_MODE;
wilc_set_operation_mode(vif, STATION_MODE);
- memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN);
+ memset(priv->assoc_stainfo.sta_associated_bss, 0, MAX_NUM_STA * ETH_ALEN);
wilc_enable_ps = true;
wilc_set_power_mgmt(vif, 1, 0);
@@ -1893,9 +1893,9 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
static int start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings)
{
- struct cfg80211_beacon_data *beacon = &(settings->beacon);
+ struct cfg80211_beacon_data *beacon = &settings->beacon;
struct wilc_priv *priv;
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc *wl;
struct wilc_vif *vif;
@@ -1903,9 +1903,9 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
vif = netdev_priv(dev);
wl = vif->wilc;
- s32Error = set_channel(wiphy, &settings->chandef);
+ ret = set_channel(wiphy, &settings->chandef);
- if (s32Error != 0)
+ if (ret != 0)
netdev_err(dev, "Error in setting channel\n");
wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
@@ -1933,10 +1933,10 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
- u8 NullBssid[ETH_ALEN] = {0};
+ u8 null_bssid[ETH_ALEN] = {0};
if (!wiphy)
return -EFAULT;
@@ -1944,22 +1944,22 @@ static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- wilc_wlan_set_bssid(dev, NullBssid, AP_MODE);
+ wilc_wlan_set_bssid(dev, null_bssid, AP_MODE);
- s32Error = wilc_del_beacon(vif);
+ ret = wilc_del_beacon(vif);
- if (s32Error)
+ if (ret)
netdev_err(dev, "Host delete beacon fail\n");
- return s32Error;
+ return ret;
}
static int add_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
- struct add_sta_param strStaParams = { {0} };
+ struct add_sta_param sta_params = { {0} };
struct wilc_vif *vif;
if (!wiphy)
@@ -1969,35 +1969,35 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev,
vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
- memcpy(strStaParams.bssid, mac, ETH_ALEN);
- memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
- strStaParams.aid = params->aid;
- strStaParams.rates_len = params->supported_rates_len;
- strStaParams.rates = params->supported_rates;
+ memcpy(sta_params.bssid, mac, ETH_ALEN);
+ memcpy(priv->assoc_stainfo.sta_associated_bss[params->aid], mac, ETH_ALEN);
+ sta_params.aid = params->aid;
+ sta_params.rates_len = params->supported_rates_len;
+ sta_params.rates = params->supported_rates;
if (!params->ht_capa) {
- strStaParams.ht_supported = false;
+ sta_params.ht_supported = false;
} else {
- strStaParams.ht_supported = true;
- strStaParams.ht_capa = *params->ht_capa;
+ sta_params.ht_supported = true;
+ sta_params.ht_capa = *params->ht_capa;
}
- strStaParams.flags_mask = params->sta_flags_mask;
- strStaParams.flags_set = params->sta_flags_set;
+ sta_params.flags_mask = params->sta_flags_mask;
+ sta_params.flags_set = params->sta_flags_set;
- s32Error = wilc_add_station(vif, &strStaParams);
- if (s32Error)
+ ret = wilc_add_station(vif, &sta_params);
+ if (ret)
netdev_err(dev, "Host add station fail\n");
}
- return s32Error;
+ return ret;
}
static int del_station(struct wiphy *wiphy, struct net_device *dev,
struct station_del_parameters *params)
{
const u8 *mac = params->mac;
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
struct wilc_vif *vif;
@@ -2009,23 +2009,23 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
if (!mac)
- s32Error = wilc_del_allstation(vif,
- priv->assoc_stainfo.au8Sta_AssociatedBss);
+ ret = wilc_del_allstation(vif,
+ priv->assoc_stainfo.sta_associated_bss);
- s32Error = wilc_del_station(vif, mac);
+ ret = wilc_del_station(vif, mac);
- if (s32Error)
+ if (ret)
netdev_err(dev, "Host delete station fail\n");
}
- return s32Error;
+ return ret;
}
static int change_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
- s32 s32Error = 0;
+ s32 ret = 0;
struct wilc_priv *priv;
- struct add_sta_param strStaParams = { {0} };
+ struct add_sta_param sta_params = { {0} };
struct wilc_vif *vif;
if (!wiphy)
@@ -2035,26 +2035,26 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
- memcpy(strStaParams.bssid, mac, ETH_ALEN);
- strStaParams.aid = params->aid;
- strStaParams.rates_len = params->supported_rates_len;
- strStaParams.rates = params->supported_rates;
+ memcpy(sta_params.bssid, mac, ETH_ALEN);
+ sta_params.aid = params->aid;
+ sta_params.rates_len = params->supported_rates_len;
+ sta_params.rates = params->supported_rates;
if (!params->ht_capa) {
- strStaParams.ht_supported = false;
+ sta_params.ht_supported = false;
} else {
- strStaParams.ht_supported = true;
- strStaParams.ht_capa = *params->ht_capa;
+ sta_params.ht_supported = true;
+ sta_params.ht_capa = *params->ht_capa;
}
- strStaParams.flags_mask = params->sta_flags_mask;
- strStaParams.flags_set = params->sta_flags_set;
+ sta_params.flags_mask = params->sta_flags_mask;
+ sta_params.flags_set = params->sta_flags_set;
- s32Error = wilc_edit_station(vif, &strStaParams);
- if (s32Error)
+ ret = wilc_edit_station(vif, &sta_params);
+ if (ret)
netdev_err(dev, "Host edit station fail\n");
}
- return s32Error;
+ return ret;
}
static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
@@ -2198,7 +2198,7 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
};
-static struct wireless_dev *WILC_WFI_CfgAlloc(void)
+static struct wireless_dev *wilc_wfi_cfg_alloc(void)
{
struct wireless_dev *wdev;
@@ -2230,9 +2230,9 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
{
struct wilc_priv *priv;
struct wireless_dev *wdev;
- s32 s32Error = 0;
+ s32 ret = 0;
- wdev = WILC_WFI_CfgAlloc();
+ wdev = wilc_wfi_cfg_alloc();
if (!wdev) {
netdev_err(net, "wiphy new allocate failed\n");
return NULL;
@@ -2262,8 +2262,8 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
set_wiphy_dev(wdev->wiphy, dev);
- s32Error = wiphy_register(wdev->wiphy);
- if (s32Error)
+ ret = wiphy_register(wdev->wiphy);
+ if (ret)
netdev_err(net, "Cannot register wiphy device\n");
priv->dev = net;
@@ -2272,7 +2272,7 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
int wilc_init_host_int(struct net_device *net)
{
- int s32Error = 0;
+ int ret = 0;
struct wilc_priv *priv;
@@ -2283,43 +2283,43 @@ int wilc_init_host_int(struct net_device *net)
}
op_ifcs++;
- priv->gbAutoRateAdjusted = false;
+ priv->auto_rate_adjusted = false;
- priv->bInP2PlistenState = false;
+ priv->p2p_listen_state = false;
mutex_init(&priv->scan_req_lock);
- s32Error = wilc_init(net, &priv->hif_drv);
- if (s32Error)
+ ret = wilc_init(net, &priv->hif_drv);
+ if (ret)
netdev_err(net, "Error while initializing hostinterface\n");
- return s32Error;
+ return ret;
}
int wilc_deinit_host_int(struct net_device *net)
{
- int s32Error = 0;
+ int ret = 0;
struct wilc_vif *vif;
struct wilc_priv *priv;
priv = wdev_priv(net->ieee80211_ptr);
vif = netdev_priv(priv->dev);
- priv->gbAutoRateAdjusted = false;
+ priv->auto_rate_adjusted = false;
- priv->bInP2PlistenState = false;
+ priv->p2p_listen_state = false;
op_ifcs--;
- s32Error = wilc_deinit(vif);
+ ret = wilc_deinit(vif);
clear_shadow_scan();
if (op_ifcs == 0)
del_timer_sync(&wilc_during_ip_timer);
- if (s32Error)
+ if (ret)
netdev_err(net, "Error while deinitializing host interface\n");
- return s32Error;
+ return ret;
}
void wilc_free_wiphy(struct net_device *net)
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index e6f4d84971c3..3337fb26c8e2 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -86,29 +86,28 @@ struct wilc_wfi_wep_key {
};
struct sta_info {
- u8 au8Sta_AssociatedBss[MAX_NUM_STA][ETH_ALEN];
+ u8 sta_associated_bss[MAX_NUM_STA][ETH_ALEN];
};
/*Parameters needed for host interface for remaining on channel*/
-struct wilc_wfi_p2pListenParams {
- struct ieee80211_channel *pstrListenChan;
- enum nl80211_channel_type tenuChannelType;
- u32 u32ListenDuration;
- u64 u64ListenCookie;
- u32 u32ListenSessionID;
+struct wilc_wfi_p2p_listen_params {
+ struct ieee80211_channel *listen_ch;
+ u32 listen_duration;
+ u64 listen_cookie;
+ u32 listen_session_id;
};
struct wilc_priv {
struct wireless_dev *wdev;
- struct cfg80211_scan_request *pstrScanReq;
+ struct cfg80211_scan_request *scan_req;
- struct wilc_wfi_p2pListenParams strRemainOnChanParams;
- u64 u64tx_cookie;
+ struct wilc_wfi_p2p_listen_params remain_on_ch_params;
+ u64 tx_cookie;
- bool bCfgScanning;
- u32 u32RcvdChCount;
+ bool cfg_scanning;
+ u32 rcvd_ch_cnt;
- u8 au8AssociatedBss[ETH_ALEN];
+ u8 associated_bss[ETH_ALEN];
struct sta_info assoc_stainfo;
struct net_device_stats stats;
u8 monitor_flag;
@@ -135,9 +134,9 @@ struct wilc_priv {
/* mutexes */
struct mutex scan_req_lock;
/* */
- bool gbAutoRateAdjusted;
+ bool auto_rate_adjusted;
- bool bInP2PlistenState;
+ bool p2p_listen_state;
};
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index f49dfa82f1b8..acaeafc2c350 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1,19 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/completion.h>
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
#include "wilc_wlan_cfg.h"
-static CHIP_PS_STATE_T chip_ps_state = CHIP_WAKEDUP;
+static enum chip_ps_states chip_ps_state = CHIP_WAKEDUP;
-static inline void acquire_bus(struct wilc *wilc, BUS_ACQUIRE_T acquire)
+static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
if (acquire == ACQUIRE_AND_WAKEUP)
chip_wakeup(wilc);
}
-static inline void release_bus(struct wilc *wilc, BUS_RELEASE_T release)
+static inline void release_bus(struct wilc *wilc, enum bus_release release)
{
if (release == RELEASE_ALLOW_SLEEP)
chip_allow_sleep(wilc);
@@ -692,7 +693,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
i = 0;
do {
tqe = wilc_wlan_txq_remove_from_head(dev);
- if (tqe && (vmm_table[i] != 0)) {
+ if (tqe && vmm_table[i] != 0) {
u32 header, buffer_offset;
vmm_table[i] = cpu_to_le32(vmm_table[i]);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 19e4f85fdd27..aeb5417f3587 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -235,7 +235,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
buf[2] = (u8)size;
buf[3] = (u8)(size >> 8);
- if ((str) && (size != 0))
+ if (str && size != 0)
memcpy(&buf[4], str, size);
return (size + 4);
@@ -256,7 +256,7 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
buf[2] = (u8)size;
buf[3] = (u8)(size >> 8);
- if ((b) && (size != 0)) {
+ if ((b) && size != 0) {
memcpy(&buf[4], b, size);
for (i = 0; i < size; i++)
checksum += buf[i + 4];
@@ -370,7 +370,7 @@ static int wilc_wlan_parse_info_frame(u8 *info, int size)
len = info[2];
- if ((len == 1) && (wid == WID_STATUS)) {
+ if (len == 1 && wid == WID_STATUS) {
pd->mac_status = info[3];
type = WILC_CFG_RSP_STATUS;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index c1693cfc076d..e186509ad334 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -75,7 +75,7 @@ typedef void (*wilc_tx_complete_func_t)(void *, int);
#define MAX_SSID_LEN 33
#define MAX_RATES_SUPPORTED 12
-typedef enum {
+enum {
SUPP_RATES_IE = 1,
EXT_SUPP_RATES_IE = 50,
HT_CAPABILITY_IE = 45,
@@ -83,15 +83,15 @@ typedef enum {
WPA_IE = 221,
WMM_IE = 221,
P2P_IE = 221,
-} BEACON_IE;
+};
-typedef enum {
+enum bss_types {
INFRASTRUCTURE = 0,
INDEPENDENT,
AP,
-} BSSTYPE_T;
+};
-typedef enum {
+enum {
RATE_AUTO = 0,
RATE_1MB = 1,
RATE_2MB = 2,
@@ -105,55 +105,55 @@ typedef enum {
RATE_26MB = 36,
RATE_48MB = 48,
RATE_54MB = 54
-} TX_RATE_T;
+};
-typedef enum {
+enum {
B_ONLY_MODE = 0, /* 1, 2 M, otherwise 5, 11 M */
G_ONLY_MODE, /* 6,12,24 otherwise 9,18,36,48,54 */
G_MIXED_11B_1_MODE, /* 1,2,5.5,11 otherwise all on */
G_MIXED_11B_2_MODE, /* 1,2,5,11,6,12,24 otherwise all on */
-} G_OPERATING_MODE_T;
+};
-typedef enum {
+enum {
G_SHORT_PREAMBLE = 0, /* Short Preamble */
G_LONG_PREAMBLE = 1, /* Long Preamble */
G_AUTO_PREAMBLE = 2, /* Auto Preamble Selection */
-} G_PREAMBLE_T;
+};
#define MAC_CONNECTED 1
#define MAC_DISCONNECTED 0
#define SCAN_DONE TRUE
-typedef enum {
+enum {
PASSIVE_SCAN = 0,
ACTIVE_SCAN = 1,
-} SCANTYPE_T;
+};
-typedef enum {
+enum {
NO_POWERSAVE = 0,
MIN_FAST_PS = 1,
MAX_FAST_PS = 2,
MIN_PSPOLL_PS = 3,
MAX_PSPOLL_PS = 4
-} USER_PS_MODE_T;
+};
-typedef enum {
+enum chip_ps_states {
CHIP_WAKEDUP = 0,
CHIP_SLEEPING_AUTO = 1,
CHIP_SLEEPING_MANUAL = 2
-} CHIP_PS_STATE_T;
+};
-typedef enum {
+enum bus_acquire {
ACQUIRE_ONLY = 0,
ACQUIRE_AND_WAKEUP = 1,
-} BUS_ACQUIRE_T;
+};
-typedef enum {
+enum bus_release {
RELEASE_ONLY = 0,
RELEASE_ALLOW_SLEEP = 1,
-} BUS_RELEASE_T;
+};
-typedef enum {
+enum {
NO_SECURITY = 0,
WEP_40 = 0x3,
WEP_104 = 0x7,
@@ -163,7 +163,7 @@ typedef enum {
WPA2_AES = 0x31,
WPA2_TKIP = 0x51,
WPA2_AES_TKIP = 0x71, /* Aes or Tkip */
-} SECURITY_T;
+};
enum AUTHTYPE {
OPEN_SYSTEM = 1,
@@ -178,88 +178,88 @@ enum SITESURVEY {
SITE_SURVEY_OFF = 2
};
-typedef enum {
+enum {
NORMAL_ACK = 0,
NO_ACK,
-} ACK_POLICY_T;
+};
-typedef enum {
+enum {
DONT_RESET = 0,
DO_RESET = 1,
NO_REQUEST = 2,
-} RESET_REQ_T;
+};
-typedef enum {
+enum {
REKEY_DISABLE = 1,
REKEY_TIME_BASE,
REKEY_PKT_BASE,
REKEY_TIME_PKT_BASE
-} RSNA_REKEY_POLICY_T;
+};
-typedef enum {
+enum {
FILTER_NO = 0x00,
FILTER_AP_ONLY = 0x01,
FILTER_STA_ONLY = 0x02
-} SCAN_CLASS_FITLER_T;
+};
-typedef enum {
+enum {
PRI_HIGH_RSSI = 0x00,
PRI_LOW_RSSI = 0x04,
PRI_DETECT = 0x08
-} SCAN_PRI_T;
+};
-typedef enum {
+enum {
CH_FILTER_OFF = 0x00,
CH_FILTER_ON = 0x10
-} CH_FILTER_T;
+};
-typedef enum {
+enum {
AUTO_PROT = 0, /* Auto */
NO_PROT, /* Do not use any protection */
ERP_PROT, /* Protect all ERP frame exchanges */
HT_PROT, /* Protect all HT frame exchanges */
GF_PROT, /* Protect all GF frame exchanges */
-} N_PROTECTION_MODE_T;
+};
-typedef enum {
+enum {
G_SELF_CTS_PROT,
G_RTS_CTS_PROT,
-} G_PROTECTION_MODE_T;
+};
-typedef enum {
+enum {
HT_MIXED_MODE = 1,
HT_ONLY_20MHZ_MODE,
HT_ONLY_20_40MHZ_MODE,
-} N_OPERATING_MODE_T;
+};
-typedef enum {
+enum {
NO_DETECT = 0,
DETECT_ONLY = 1,
DETECT_PROTECT = 2,
DETECT_PROTECT_REPORT = 3,
-} N_OBSS_DETECTION_T;
+};
-typedef enum {
+enum {
RTS_CTS_NONHT_PROT = 0, /* RTS-CTS at non-HT rate */
FIRST_FRAME_NONHT_PROT, /* First frame at non-HT rate */
LSIG_TXOP_PROT, /* LSIG TXOP Protection */
FIRST_FRAME_MIXED_PROT, /* First frame at Mixed format */
-} N_PROTECTION_TYPE_T;
+};
-typedef enum {
+enum {
STATIC_MODE = 1,
DYNAMIC_MODE = 2,
MIMO_MODE = 3, /* power save disable */
-} N_SMPS_MODE_T;
+};
-typedef enum {
+enum {
DISABLE_SELF_CTS,
ENABLE_SELF_CTS,
DISABLE_TX_ABORT,
ENABLE_TX_ABORT,
HW_TRIGGER_ABORT,
SW_TRIGGER_ABORT,
-} TX_ABORT_OPTION_T;
+};
enum wid_type {
WID_CHAR = 0,
@@ -281,7 +281,7 @@ struct wid {
s8 *val;
};
-typedef enum {
+enum {
WID_NIL = 0xffff,
/*
@@ -889,7 +889,7 @@ typedef enum {
/* Miscellaneous WIDs */
WID_ALL = 0x7FFE,
WID_MAX = 0xFFFF
-} WID_T;
+};
struct wilc;
int wilc_wlan_init(struct net_device *dev);
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index f5a3a1ce21ce..85c3af00abd2 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* hfa384x.h
*
* Defines the constants and data structures for the hfa384x
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 197f5a914e8f..555711bc12f0 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/prism2/driver/hfa384x_usb.c
*
* Functions that talk to the USB variantof the Intersil hfa384x MAC
@@ -2457,7 +2458,8 @@ int hfa384x_drvr_start(struct hfa384x *hw)
* ok
*/
result =
- usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in,
+ &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n");
goto done;
@@ -2466,7 +2468,8 @@ int hfa384x_drvr_start(struct hfa384x *hw)
netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n");
result =
- usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out,
+ &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n");
goto done;
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index c1b6d426bcad..855b424f6423 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/p80211/p80211conv.c
*
* Ether/802.11 conversions and packet buffer routines
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 66332b1fb6d5..28459dcea4b1 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211conv.h
*
* Ether/802.11 conversions and packet buffer routines
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 2c44c613a586..133d70c08ecf 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211hdr.h
*
* Macros, types, and functions for handling 802.11 MAC headers
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index ab6067e65050..d8cde1d8870b 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211ioctl.h
*
* Declares constants and types for the p80211 ioctls
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index ea3d9ce222b9..4ac2f08a520a 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
* --------------------------------------------------------------------
*
diff --git a/drivers/staging/wlan-ng/p80211metastruct.h b/drivers/staging/wlan-ng/p80211metastruct.h
index 850d897fc163..15b7c08e210d 100644
--- a/drivers/staging/wlan-ng/p80211metastruct.h
+++ b/drivers/staging/wlan-ng/p80211metastruct.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* This file is GENERATED AUTOMATICALLY. DO NOT EDIT OR MODIFY.
* --------------------------------------------------------------------
*
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 653950fd9843..3c12929858cb 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211mgmt.h
*
* Macros, types, and functions to handle 802.11 mgmt frames
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 40c5cf5997c7..ae119ecd74b0 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211msg.h
*
* Macros, constants, types, and funcs for req and ind messages
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 0f503652740f..ec9cc00ee241 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/p80211/p80211knetdev.c
*
* Linux Kernel net device interface
@@ -640,7 +641,8 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
dot11req.msgcode = DIDmsg_dot11req_mibset;
dot11req.msglen = sizeof(dot11req);
memcpy(dot11req.devname,
- ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
+ ((struct wlandevice *)dev->ml_priv)->name,
+ WLAN_DEVNAMELEN_MAX - 1);
/* Set up the mibattribute argument */
mibattr->did = DIDmsg_dot11req_mibset_mibattribute;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 8e0d08298c8b..cebbe746a52f 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211netdev.h
*
* WLAN net device structure and functions
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index afe847722cf7..c36d01469afc 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/p80211/p80211req.c
*
* Request/Indication/MacMgmt interface handling functions
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 6c72f59993e0..20be2c3af4c1 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* p80211req.h
*
* Request handling functions
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 263ef2ddb197..94420562c418 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*
* p80211types.h
*
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 6492ffe59085..8bd92bba0ac1 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/p80211/p80211wep.c
*
* WEP encode/decode for P80211.
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 344bec8cc31b..5860d0d65841 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* from src/prism2/download/prism2dl.c
*
* utility for downloading prism2 images moved into kernelspace
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 72070593394a..78934e435fcf 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/prism2/driver/prism2mgmt.c
*
* Management request handler functions.
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index c062418f1202..564c3f4a3e03 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* prism2mgmt.h
*
* Declares the mgmt command handler functions
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index e41207d97309..edad299ff5ad 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/prism2/driver/prism2mib.c
*
* Management request for mibset/mibget
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 99316b9a4e49..fed0b8ceca6f 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/* src/prism2/driver/prism2sta.c
*
* Implements the station functionality for prism2
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index a3af1cbbf8ee..e19a8291cb2a 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -18,19 +18,6 @@ static const struct pci_device_id xgifb_pci_table[] = {
MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
-/* To be included in fb.h */
-#define XGISR (xgifb_info->dev_info.P3c4)
-#define XGICR (xgifb_info->dev_info.P3d4)
-#define XGIDACA (xgifb_info->dev_info.P3c8)
-#define XGIDACD (xgifb_info->dev_info.P3c9)
-#define XGIPART1 (xgifb_info->dev_info.Part1Port)
-#define XGIPART2 (xgifb_info->dev_info.Part2Port)
-#define XGIPART3 (xgifb_info->dev_info.Part3Port)
-#define XGIPART4 (xgifb_info->dev_info.Part4Port)
-#define XGIPART5 (xgifb_info->dev_info.Part5Port)
-#define XGIDAC2A XGIPART5
-#define XGIDAC2D (XGIPART5 + 1)
-
#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
#define IND_XGI_SCRATCH_REG_CR31 0x31
#define IND_XGI_SCRATCH_REG_CR32 0x32
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index b813f1d460ce..10107de0119a 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* XG20, XG21, XG40, XG42 frame buffer device
* for Linux kernels 2.5.x, 2.6.x
@@ -31,18 +32,19 @@ static unsigned int refresh_rate;
#ifdef DEBUG
static void dumpVGAReg(struct xgifb_video_info *xgifb_info)
{
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 i, reg;
- xgifb_reg_set(XGISR, 0x05, 0x86);
+ xgifb_reg_set(vb->P3c4, 0x05, 0x86);
for (i = 0; i < 0x4f; i++) {
- reg = xgifb_reg_get(XGISR, i);
+ reg = xgifb_reg_get(vb->P3c4, i);
pr_debug("o 3c4 %x\n", i);
pr_debug("i 3c5 => %x\n", reg);
}
for (i = 0; i < 0xF0; i++) {
- reg = xgifb_reg_get(XGICR, i);
+ reg = xgifb_reg_get(vb->P3d4, i);
pr_debug("o 3d4 %x\n", i);
pr_debug("i 3d5 => %x\n", reg);
}
@@ -644,9 +646,10 @@ static void XGIfb_bpp_to_var(struct xgifb_video_info *xgifb_info,
static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
{
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 cr30 = 0, cr31 = 0;
- cr31 = xgifb_reg_get(XGICR, 0x31);
+ cr31 = xgifb_reg_get(vb->P3d4, 0x31);
cr31 &= ~0x60;
switch (xgifb_info->display2) {
@@ -683,14 +686,15 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
}
- xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
- xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
- xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
+ xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR30, cr30);
+ xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR31, cr31);
+ xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR33,
(xgifb_info->rate_idx & 0x0F));
}
static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
{
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 reg;
unsigned char doit = 1;
@@ -713,7 +717,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
/* We can't switch off CRT1 if bridge is in slave mode */
if (xgifb_info->hasVB != HASVB_NONE) {
- reg = xgifb_reg_get(XGIPART1, 0x00);
+ reg = xgifb_reg_get(vb->Part1Port, 0x00);
if ((reg & 0x50) == 0x10)
doit = 0;
@@ -722,18 +726,18 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
XGIfb_crt1off = 0;
}
- reg = xgifb_reg_get(XGICR, 0x17);
+ reg = xgifb_reg_get(vb->P3d4, 0x17);
if ((XGIfb_crt1off) && (doit))
reg &= ~0x80;
else
reg |= 0x80;
- xgifb_reg_set(XGICR, 0x17, reg);
+ xgifb_reg_set(vb->P3d4, 0x17, reg);
- xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04);
+ xgifb_reg_and(vb->P3c4, IND_SIS_RAMDAC_CONTROL, ~0x04);
if (xgifb_info->display2 == XGIFB_DISP_TV &&
xgifb_info->hasVB == HASVB_301) {
- reg = xgifb_reg_get(XGIPART4, 0x01);
+ reg = xgifb_reg_get(vb->Part4Port, 0x01);
if (reg < 0xB0) { /* Set filter for XGI301 */
int filter_tb;
@@ -760,60 +764,58 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
filter = -1;
break;
}
- xgifb_reg_or(XGIPART1,
- SIS_CRT2_WENABLE_315,
- 0x01);
+ xgifb_reg_or(vb->Part1Port, SIS_CRT2_WENABLE_315, 0x01);
if (xgifb_info->TV_type == TVMODE_NTSC) {
- xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
+ xgifb_reg_and(vb->Part2Port, 0x3a, 0x1f);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
- xgifb_reg_and(XGIPART2, 0x30, 0xdf);
+ xgifb_reg_and(vb->Part2Port, 0x30, 0xdf);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
- xgifb_reg_or(XGIPART2, 0x30, 0x20);
+ xgifb_reg_or(vb->Part2Port, 0x30, 0x20);
switch (xgifb_info->video_width) {
case 640:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xEB);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0x04);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x25);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0x18);
break;
case 720:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xEE);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0x0C);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x22);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0x08);
break;
case 800:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xEB);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0x15);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x25);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0xF6);
break;
@@ -821,55 +823,55 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
} else if (xgifb_info->TV_type == TVMODE_PAL) {
- xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
+ xgifb_reg_and(vb->Part2Port, 0x3A, 0x1F);
if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
- xgifb_reg_and(XGIPART2, 0x30, 0xDF);
+ xgifb_reg_and(vb->Part2Port, 0x30, 0xDF);
} else if (xgifb_info->TV_plug
== TVPLUG_COMPOSITE) {
- xgifb_reg_or(XGIPART2, 0x30, 0x20);
+ xgifb_reg_or(vb->Part2Port, 0x30, 0x20);
switch (xgifb_info->video_width) {
case 640:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xF1);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0xF7);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x1F);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0x32);
break;
case 720:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xF3);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0x00);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x1D);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0x20);
break;
case 800:
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x35,
0xFC);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x36,
0xFB);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x37,
0x14);
- xgifb_reg_set(XGIPART2,
+ xgifb_reg_set(vb->Part2Port,
0x38,
0x2A);
break;
@@ -882,10 +884,10 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
pr_debug("FilterTable[%d]-%d: %*ph\n",
filter_tb, filter, 4, f);
- xgifb_reg_set(XGIPART2, 0x35, f[0]);
- xgifb_reg_set(XGIPART2, 0x36, f[1]);
- xgifb_reg_set(XGIPART2, 0x37, f[2]);
- xgifb_reg_set(XGIPART2, 0x38, f[3]);
+ xgifb_reg_set(vb->Part2Port, 0x35, f[0]);
+ xgifb_reg_set(vb->Part2Port, 0x36, f[1]);
+ xgifb_reg_set(vb->Part2Port, 0x37, f[2]);
+ xgifb_reg_set(vb->Part2Port, 0x38, f[3]);
}
}
}
@@ -895,6 +897,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
+ struct vb_device_info *vb = &xgifb_info->dev_info;
struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
unsigned int htotal = var->left_margin + var->xres + var->right_margin
+ var->hsync_len;
@@ -981,11 +984,10 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
info->fix.line_length = (info->var.xres_virtual
* info->var.bits_per_pixel) >> 6;
- xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
+ xgifb_reg_set(vb->P3c4, IND_SIS_PASSWORD, SIS_PASSWORD);
- xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
- xgifb_reg_set(XGISR,
- 0x0E,
+ xgifb_reg_set(vb->P3d4, 0x13, (info->fix.line_length & 0x00ff));
+ xgifb_reg_set(vb->P3c4, 0x0E,
(info->fix.line_length & 0xff00) >> 8);
XGIfb_post_setmode(xgifb_info);
@@ -1013,16 +1015,16 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->XGI310_AccelDepth = 0x00000000;
xgifb_info->video_cmap_len = 256;
#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(XGICR, 0x4D);
- xgifb_reg_set(XGICR, 0x4D, (cr_data & 0xE0));
+ cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
+ xgifb_reg_set(vb->P3d4, 0x4D, (cr_data & 0xE0));
#endif
break;
case 16:
xgifb_info->DstColor = 0x8000;
xgifb_info->XGI310_AccelDepth = 0x00010000;
#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(XGICR, 0x4D);
- xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x0B));
+ cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
+ xgifb_reg_set(vb->P3d4, 0x4D, ((cr_data & 0xE0) | 0x0B));
#endif
xgifb_info->video_cmap_len = 16;
break;
@@ -1031,8 +1033,8 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
xgifb_info->XGI310_AccelDepth = 0x00020000;
xgifb_info->video_cmap_len = 16;
#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(XGICR, 0x4D);
- xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x15));
+ cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
+ xgifb_reg_set(vb->P3d4, 0x4D, ((cr_data & 0xE0) | 0x15));
#endif
break;
default:
@@ -1051,6 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
+ struct vb_device_info *vb = &xgifb_info->dev_info;
unsigned int base;
base = var->yoffset * info->var.xres_virtual + var->xoffset;
@@ -1068,22 +1071,20 @@ static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
- xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
+ xgifb_reg_set(vb->P3c4, IND_SIS_PASSWORD, SIS_PASSWORD);
- xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
- xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
- xgifb_reg_set(XGISR, 0x0D, (base >> 16) & 0xFF);
- xgifb_reg_set(XGISR, 0x37, (base >> 24) & 0x03);
- xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
+ xgifb_reg_set(vb->P3d4, 0x0D, base & 0xFF);
+ xgifb_reg_set(vb->P3d4, 0x0C, (base >> 8) & 0xFF);
+ xgifb_reg_set(vb->P3c4, 0x0D, (base >> 16) & 0xFF);
+ xgifb_reg_set(vb->P3c4, 0x37, (base >> 24) & 0x03);
+ xgifb_reg_and_or(vb->P3c4, 0x37, 0xDF, (base >> 21) & 0x04);
if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01);
- xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
- xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
- xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
- xgifb_reg_and_or(XGIPART1,
- 0x02,
- 0x7F,
+ xgifb_reg_or(vb->Part1Port, SIS_CRT2_WENABLE_315, 0x01);
+ xgifb_reg_set(vb->Part1Port, 0x06, (base & 0xFF));
+ xgifb_reg_set(vb->Part1Port, 0x05, ((base >> 8) & 0xFF));
+ xgifb_reg_set(vb->Part1Port, 0x04, ((base >> 16) & 0xFF));
+ xgifb_reg_and_or(vb->Part1Port, 0x02, 0x7F,
((base >> 24) & 0x01) << 7);
}
return 0;
@@ -1110,21 +1111,22 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
unsigned int transp, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
+ struct vb_device_info *vb = &xgifb_info->dev_info;
if (regno >= XGIfb_get_cmap_len(&info->var))
return 1;
switch (info->var.bits_per_pixel) {
case 8:
- outb(regno, XGIDACA);
- outb((red >> 10), XGIDACD);
- outb((green >> 10), XGIDACD);
- outb((blue >> 10), XGIDACD);
+ outb(regno, vb->P3c8);
+ outb((red >> 10), vb->P3c9);
+ outb((green >> 10), vb->P3c9);
+ outb((blue >> 10), vb->P3c9);
if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- outb(regno, XGIDAC2A);
- outb((red >> 8), XGIDAC2D);
- outb((green >> 8), XGIDAC2D);
- outb((blue >> 8), XGIDAC2D);
+ outb(regno, vb->Part5Port);
+ outb((red >> 8), (vb->Part5Port + 1));
+ outb((green >> 8), (vb->Part5Port + 1));
+ outb((blue >> 8), (vb->Part5Port + 1));
}
break;
case 16:
@@ -1344,18 +1346,19 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
static int XGIfb_blank(int blank, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 reg;
- reg = xgifb_reg_get(XGICR, 0x17);
+ reg = xgifb_reg_get(vb->P3d4, 0x17);
if (blank > 0)
reg &= 0x7f;
else
reg |= 0x80;
- xgifb_reg_set(XGICR, 0x17, reg);
- xgifb_reg_set(XGISR, 0x00, 0x01); /* Synchronous Reset */
- xgifb_reg_set(XGISR, 0x00, 0x03); /* End Reset */
+ xgifb_reg_set(vb->P3d4, 0x17, reg);
+ xgifb_reg_set(vb->P3c4, 0x00, 0x01); /* Synchronous Reset */
+ xgifb_reg_set(vb->P3c4, 0x00, 0x03); /* End Reset */
return 0;
}
@@ -1379,14 +1382,15 @@ static struct fb_ops XGIfb_ops = {
static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
{
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 ChannelNum, tmp;
u8 reg = 0;
/* xorg driver sets 32MB * 1 channel */
if (xgifb_info->chip == XG27)
- xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51);
+ xgifb_reg_set(vb->P3c4, IND_SIS_DRAM_SIZE, 0x51);
- reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE);
+ reg = xgifb_reg_get(vb->P3c4, IND_SIS_DRAM_SIZE);
if (!reg)
return -1;
@@ -1457,12 +1461,13 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
{
+ struct vb_device_info *vb = &xgifb_info->dev_info;
u8 cr32, temp = 0;
xgifb_info->TV_plug = 0;
xgifb_info->TV_type = 0;
- cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
+ cr32 = xgifb_reg_get(vb->P3d4, IND_XGI_SCRATCH_REG_CR32);
if ((cr32 & SIS_CRT1) && !XGIfb_crt1off) {
XGIfb_crt1off = 0;
@@ -1499,7 +1504,7 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
}
if (xgifb_info->TV_type == 0) {
- temp = xgifb_reg_get(XGICR, 0x38);
+ temp = xgifb_reg_get(vb->P3d4, 0x38);
if (temp & 0x10)
xgifb_info->TV_type = TVMODE_PAL;
else
@@ -1519,7 +1524,7 @@ static bool XGIfb_has_VB(struct xgifb_video_info *xgifb_info)
{
u8 vb_chipid;
- vb_chipid = xgifb_reg_get(XGIPART4, 0x00);
+ vb_chipid = xgifb_reg_get(xgifb_info->dev_info.Part4Port, 0x00);
switch (vb_chipid) {
case 0x01:
xgifb_info->hasVB = HASVB_301;
@@ -1539,7 +1544,8 @@ static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
u8 reg;
if (!XGIfb_has_VB(xgifb_info)) {
- reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
+ reg = xgifb_reg_get(xgifb_info->dev_info.P3d4,
+ IND_XGI_SCRATCH_REG_CR37);
switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
case SIS_EXTERNAL_CHIP_LVDS:
xgifb_info->hasVB = HASVB_LVDS;
@@ -1617,6 +1623,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
struct fb_info *fb_info;
struct xgifb_video_info *xgifb_info;
+ struct vb_device_info *vb;
struct xgi_hw_device_info *hw_info;
unsigned long video_size_max;
@@ -1625,6 +1632,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
xgifb_info = fb_info->par;
+ vb = &xgifb_info->dev_info;
hw_info = &xgifb_info->hw_info;
xgifb_info->fb_info = fb_info;
xgifb_info->chip_id = pdev->device;
@@ -1658,10 +1666,11 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xgifb_info->display2_force = true;
}
- XGIRegInit(&xgifb_info->dev_info, xgifb_info->vga_base);
+ XGIRegInit(vb, xgifb_info->vga_base);
- xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
- reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
+ xgifb_reg_set(vb->P3c4,
+ IND_SIS_PASSWORD, SIS_PASSWORD);
+ reg1 = xgifb_reg_get(vb->P3c4, IND_SIS_PASSWORD);
if (reg1 != 0xa1) { /* I/O error */
dev_err(&pdev->dev, "I/O error\n");
@@ -1671,8 +1680,10 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (xgifb_info->chip_id) {
case PCI_DEVICE_ID_XGI_20:
- xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
- CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
+ xgifb_reg_or(vb->P3d4,
+ Index_CR_GPIO_Reg3, GPIOG_EN);
+ CR48 = xgifb_reg_get(vb->P3d4,
+ Index_CR_GPIO_Reg1);
if (CR48 & GPIOG_READ)
xgifb_info->chip = XG21;
else
@@ -1703,11 +1714,12 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
- xgifb_reg_or(XGISR,
+ xgifb_reg_or(vb->P3c4,
IND_SIS_PCI_ADDRESS_SET,
(SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
- xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
+ xgifb_reg_or(vb->P3c4,
+ IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
hw_info->ulVideoMemorySize = xgifb_info->video_size;
@@ -1760,7 +1772,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(xgifb_info->chip == XG27)) {
xgifb_info->hasVB = HASVB_NONE;
} else if (xgifb_info->chip == XG21) {
- CR38 = xgifb_reg_get(XGICR, 0x38);
+ CR38 = xgifb_reg_get(vb->P3d4, 0x38);
if ((CR38 & 0xE0) == 0xC0)
xgifb_info->display2 = XGIFB_DISP_LCD;
else if ((CR38 & 0xE0) == 0x60)
@@ -1777,7 +1789,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (xgifb_info->hasVB) {
case HASVB_301:
- reg = xgifb_reg_get(XGIPART4, 0x01);
+ reg = xgifb_reg_get(vb->Part4Port, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
dev_info(&pdev->dev,
@@ -1794,7 +1806,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
break;
case HASVB_302:
- reg = xgifb_reg_get(XGIPART4, 0x01);
+ reg = xgifb_reg_get(vb->Part4Port, 0x01);
if (reg >= 0xE0) {
hw_info->ujVBChipID = VB_CHIP_302LV;
dev_info(&pdev->dev,
@@ -1806,7 +1818,8 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"XGI302LV bridge detected (revision 0x%02x)\n",
reg);
} else if (reg >= 0xB0) {
- reg1 = xgifb_reg_get(XGIPART4, 0x23);
+ reg1 = xgifb_reg_get(vb->Part4Port,
+ 0x23);
hw_info->ujVBChipID = VB_CHIP_302B;
@@ -1844,7 +1857,8 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
if (!enable_dstn) {
- reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
+ reg = xgifb_reg_get(vb->P3d4,
+ IND_XGI_LCD_PANEL);
reg &= 0x0f;
hw_info->ulCRT2LCDType = XGI310paneltype[reg];
}
@@ -1940,11 +1954,11 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
- (&xgifb_info->dev_info, hw_info,
+ (vb, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no));
- if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info,
- hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no,
+ if (XGIfb_mode_rate_to_ddata(vb, hw_info,
+ XGIbios_mode[xgifb_info->mode_idx].mode_no,
&fb_info->var.left_margin,
&fb_info->var.right_margin,
&fb_info->var.upper_margin,
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index e9d930f150cb..1fa0dc66406e 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -74,7 +74,8 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
/* Get SR1,2,3,4 from file */
/* SR1 is with screen off 0x20 */
SRdata = XGI330_StandTable.SR[i];
- xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */
+ /* Set SR 1 2 3 4 */
+ xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata);
}
}
@@ -628,12 +629,14 @@ static void xgifb_set_lcd(int chip_id,
xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80); /* Vsync polarity */
temp = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- if (temp & 0x4000)
+ if (temp & 0x4000) {
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
- if (temp & 0x8000)
+ }
+ if (temp & 0x8000) {
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
+ }
}
/*
@@ -1225,9 +1228,10 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
return table[i].DATAPTR;
}
-static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+static struct SiS_TVData const *XGI_GetTVPtr(
+ unsigned short ModeIdIndex,
+ unsigned short RefreshRateTableIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempal, modeflag;
@@ -1480,14 +1484,16 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1b, (unsigned short)(tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x1c, (unsigned short)(tempcx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
+ (unsigned short)(tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
+ (unsigned short)(tempcx & 0xff));
tempbx = (tempbx >> 8) & 0x07;
tempcx = (tempcx >> 8) & 0x07;
- xgifb_reg_set(pVBInfo->Part1Port, 0x1d, (unsigned short)((tempcx << 3) |
- tempbx));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
+ (unsigned short)((tempcx << 3) | tempbx));
tempax = pVBInfo->VT;
tempbx = LCDPtr1->LCDVRS;
@@ -1501,8 +1507,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
if (tempcx >= tempax)
tempcx -= tempax;
- xgifb_reg_set(pVBInfo->Part1Port, 0x18, (unsigned short)(tempbx & 0xff));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x18,
+ (unsigned short)(tempbx & 0xff));
+ xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
+ (unsigned short)(tempcx & 0x0f));
tempax = ((tempbx >> 8) & 0x07) << 3;
@@ -1592,16 +1600,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
tempax = ((tempbx >> 8) & 0xff) << 3;
tempax |= (unsigned short)((temp3 >> 8) & 0x07);
- xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x20,
+ (unsigned short)(tempax & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x21,
+ (unsigned short)(tempbx & 0xff));
temp3 >>= 16;
if (modeflag & HalfDCLK)
temp3 >>= 1;
- xgifb_reg_set(pVBInfo->Part1Port, 0x22, (unsigned short)((temp3 >> 8) & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x23, (unsigned short)(temp3 & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x22,
+ (unsigned short)((temp3 >> 8) & 0xff));
+ xgifb_reg_set(pVBInfo->Part1Port, 0x23,
+ (unsigned short)(temp3 & 0xff));
}
/*
@@ -1760,9 +1772,10 @@ static void XGI_UpdateModeInfo(struct vb_device_info *pVBInfo)
temp &= 0x05;
- if (!(tempcl & ActiveLCD))
+ if (!(tempcl & ActiveLCD)) {
if (temp == 0x01)
tempcl |= ActiveCRT2;
+ }
if (temp == 0x04)
tempcl |= ActiveLCD;
@@ -1856,7 +1869,8 @@ finish:
pVBInfo->VBType = tempbx;
}
-static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static void XGI_GetVBInfo(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
@@ -1981,7 +1995,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVB
pVBInfo->VBInfo = tempbx;
}
-static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static void XGI_GetTVInfo(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
@@ -1998,10 +2013,11 @@ static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVB
if (tempbx & TVSetPALM)
/* set to NTSC if PAL-M */
tempbx &= ~TVSetPAL;
- } else
+ } else {
tempbx &= (SetCHTVOverScan |
TVSetNTSCJ |
TVSetPAL);
+ }
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempbx |= TVSetPAL;
@@ -2026,9 +2042,10 @@ static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVB
(!(pVBInfo->VBInfo & SetNotSimuMode)))
tempbx |= TVSimuMode;
- if (!(tempbx & TVSetPAL) && (modeflag > 13) && (resinfo == 8))
+ if (!(tempbx & TVSetPAL) && (modeflag > 13) && (resinfo == 8)) {
/* NTSC 1024x768, */
tempbx |= NTSC1024x768;
+ }
tempbx |= RPLLDIV2XO;
@@ -2327,9 +2344,10 @@ void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
mdelay(xgifb_info->lvds_data.PSC_S3);
}
- if (pVBInfo->IF_DEF_LVDS == 0)
+ if (pVBInfo->IF_DEF_LVDS == 0) {
/* DVO/DVI signal off */
XGI_XG27BLSignalVDD(0x20, 0x00, pVBInfo);
+ }
}
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x20);
@@ -2688,7 +2706,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeIdIndex)
static unsigned short XGI_GetOffset(unsigned short ModeNo,
unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex)
+ unsigned short RefreshRateTableIndex)
{
unsigned short temp, colordepth, modeinfo, index, infoflag,
ColorDepth[] = { 0x01, 0x02, 0x04 };
@@ -3633,7 +3651,8 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
tempbh, tempch;
@@ -4527,8 +4546,10 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
* 1 : 301B/302B/301LV/302LV
* Description :
*/
-static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
- unsigned char *tempch, struct vb_device_info *pVBInfo)
+static void XGI_GetTVPtrIndex2(unsigned short *tempbx,
+ unsigned char *tempcl,
+ unsigned char *tempch,
+ struct vb_device_info *pVBInfo)
{
*tempbx = 0;
*tempcl = 0;
@@ -4632,13 +4653,14 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
static void XGI_SetLCDCap_B(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
- if (tempcx & EnableLCD24bpp) /* 24bits */
+ if (tempcx & EnableLCD24bpp) { /* 24bits */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
(unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
- else
+ } else {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
(unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
/* Enable Dither */
+ }
}
static void XGI_LongWait(struct vb_device_info *pVBInfo)
@@ -5461,8 +5483,9 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
switch (HwDeviceExtension->ujVBChipID) {
case VB_CHIP_301: /* fall through */
case VB_CHIP_302:
+ /* add for CRT2 */
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
- pVBInfo); /* add for CRT2 */
+ pVBInfo);
break;
default:
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 0da63e1da32f..42ecf7fe6766 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -2493,7 +2493,7 @@ static const struct XGI301C_Tap4TimingStruct xgifb_ntsc_525_tap4_timing[] = {
0x02, 0x0C, 0x0E, 0x04, 0x01, 0x0B, 0x0E, 0x06, /* ; EA-EF */
0x01, 0x0B, 0x0E, 0x06, 0x00, 0x0A, 0x0F, 0x07, /* ; F0-F7 */
0x00, 0x0A, 0x0F, 0x07, 0x00, 0x09, 0x0F, 0x08 /* ; F8-FF */
- }
+ }
}
};
@@ -2507,7 +2507,7 @@ static const struct XGI301C_Tap4TimingStruct YPbPr750pTap4Timing[] = {
0x7D, 0x0F, 0x16, 0x7E, 0x7D, 0x0E, 0x17, 0x7E, /* ; EA-EF */
0x7D, 0x0C, 0x18, 0x7F, 0x7D, 0x0A, 0x18, 0x01, /* ; F0-F7 */
0x7D, 0x08, 0x19, 0x02, 0x7D, 0x06, 0x19, 0x04 /* F8-FF */
- }
+ }
}
};
#endif
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index e2bc99980f75..4c44d7bed01a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
select CONFIGFS_FS
select CRC_T10DIF
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 58caacd54a3b..c03a78ee26cd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2300,13 +2300,7 @@ queue_full:
void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
-
- kfree(sgl);
+ sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
@@ -2414,42 +2408,10 @@ int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
- struct scatterlist *sg;
- struct page *page;
- gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nalloc, nent;
- int i = 0;
-
- nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
- if (chainable)
- nalloc++;
- sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return -ENOMEM;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
- sg_init_table(sg, nalloc);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dc63aba092e4..dfd23245f778 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,6 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
@@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
}
/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
- * @tz: thermal zone device in which we're operating
- * @freq: frequency in KHz
- * @power: pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq. This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered. If it wasn't, the static power is assumed to
- * be negligible. The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
- struct thermal_zone_device *tz, unsigned long freq,
- u32 *power)
-{
- struct dev_pm_opp *opp;
- unsigned long voltage;
- struct cpufreq_policy *policy = cpufreq_cdev->policy;
- struct cpumask *cpumask = policy->related_cpus;
- unsigned long freq_hz = freq * 1000;
- struct device *dev;
-
- if (!cpufreq_cdev->plat_get_static_power) {
- *power = 0;
- return 0;
- }
-
- dev = get_cpu_device(policy->cpu);
- WARN_ON(!dev);
-
- opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
- if (IS_ERR(opp)) {
- dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
- freq_hz, PTR_ERR(opp));
- return -EINVAL;
- }
-
- voltage = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (voltage == 0) {
- dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
- freq_hz);
- return -EINVAL;
- }
-
- return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
-}
-
-/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
{
unsigned long freq;
- int i = 0, cpu, ret;
- u32 static_power, dynamic_power, total_load = 0;
+ int i = 0, cpu;
+ u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret) {
- kfree(load_cpu);
- return ret;
- }
+ *power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, dynamic_power,
- static_power);
+ load_cpu, i, *power);
kfree(load_cpu);
}
- *power = static_power + dynamic_power;
return 0;
}
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- u32 static_power, dynamic_power;
- int ret;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
freq = cpufreq_cdev->freq_table[state].frequency;
- dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret)
- return ret;
+ *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- *power = static_power + dynamic_power;
- return ret;
+ return 0;
}
/**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
unsigned int cur_freq, target_freq;
- int ret;
- s32 dyn_power;
- u32 last_load, normalised_power, static_power;
+ u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cur_freq = cpufreq_quick_get(policy->cpu);
- ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
- if (ret)
- return ret;
-
- dyn_power = power - static_power;
- dyn_power = dyn_power > 0 ? dyn_power : 0;
+ power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
- normalised_power = (dyn_power * 100) / last_load;
+ normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
+ struct cpufreq_policy *policy, u32 capacitance)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
}
if (capacitance) {
- cpufreq_cdev->plat_get_static_power = plat_static_func;
-
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- if (!np)
- return ERR_PTR(-EINVAL);
-
- return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model. The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
-{
- return __cpufreq_cooling_register(NULL, policy, capacitance,
- plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np: a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
*/
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- if (!np)
- return ERR_PTR(-EINVAL);
+ struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+ struct thermal_cooling_device *cdev = NULL;
+ u32 capacitance = 0;
+
+ if (!np) {
+ pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ policy->cpu);
+ return NULL;
+ }
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &capacitance);
- return __cpufreq_cooling_register(np, policy, capacitance,
- plat_static_func);
+ cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ if (IS_ERR(cdev)) {
+ pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ policy->cpu, PTR_ERR(cdev));
+ cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+ return cdev;
}
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index fb80c96d8f73..ba81c9080f6e 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -317,7 +317,7 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
-static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+static DEVICE_ATTR_WO(emul_temp);
#endif
static ssize_t
@@ -396,16 +396,15 @@ create_s32_tzp_attr(offset);
* All the attributes created for tzp (create_s32_tzp_attr) also are always
* present on the sysfs interface.
*/
-static DEVICE_ATTR(type, 0444, type_show, NULL);
-static DEVICE_ATTR(temp, 0444, temp_show, NULL);
-static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
-static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
-static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
- sustainable_power_store);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(temp);
+static DEVICE_ATTR_RW(policy);
+static DEVICE_ATTR_RO(available_policies);
+static DEVICE_ATTR_RW(sustainable_power);
/* These thermal zone device attributes are created based on conditions */
-static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
-static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
+static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RW(passive);
/* These attributes are unconditionally added to a thermal zone */
static struct attribute *thermal_zone_dev_attrs[] = {
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index cc2b4d9433ed..b811442c5ce6 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -394,10 +394,14 @@ config GOLDFISH_TTY
depends on GOLDFISH
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
- select SERIAL_EARLYCON
help
Console and system TTY driver for the Goldfish virtual platform.
+config GOLDFISH_TTY_EARLY_CONSOLE
+ bool
+ default y if GOLDFISH_TTY=y
+ select SERIAL_EARLYCON
+
config DA_TTY
bool "DA TTY"
depends on METAG_DA
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 7f657bb5113c..1c1bd0afcd48 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -433,6 +433,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
static void gf_early_console_putchar(struct uart_port *port, int ch)
{
__raw_writel(ch, port->membase);
@@ -456,6 +457,7 @@ static int __init gf_earlycon_setup(struct earlycon_device *device,
}
OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup);
+#endif
static const struct of_device_id goldfish_tty_of_match[] = {
{ .compatible = "google,goldfish-tty", },
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 015686ff4825..bdd3027ef01b 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -219,13 +219,9 @@ static struct isi_port isi_ports[PORT_COUNT];
static int WaitTillCardIsFree(unsigned long base)
{
unsigned int count = 0;
- unsigned int a = in_atomic(); /* do we run under spinlock? */
while (!(inw(base + 0xe) & 0x1) && count++ < 100)
- if (a)
- mdelay(1);
- else
- msleep(1);
+ mdelay(1);
return !(inw(base + 0xe) & 0x1);
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 68cbc03aab4b..250a19f042d7 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1487,8 +1487,6 @@ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_term
if (ts->c_iflag & IXANY)
xany = 1;
- /* Clear the features we don't support */
- ts->c_cflag &= ~CMSPAR;
MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany);
baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty));
if (baud == -1)
@@ -1781,10 +1779,17 @@ static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
mode |= MX_STOP1;
if (termio->c_cflag & PARENB) {
- if (termio->c_cflag & PARODD)
- mode |= MX_PARODD;
- else
- mode |= MX_PAREVEN;
+ if (termio->c_cflag & PARODD) {
+ if (termio->c_cflag & CMSPAR)
+ mode |= MX_PARMARK;
+ else
+ mode |= MX_PARODD;
+ } else {
+ if (termio->c_cflag & CMSPAR)
+ mode |= MX_PARSPACE;
+ else
+ mode |= MX_PAREVEN;
+ }
} else
mode |= MX_PARNONE;
diff --git a/drivers/tty/moxa.h b/drivers/tty/moxa.h
index 8ce89fd36c7b..563d2dce80b3 100644
--- a/drivers/tty/moxa.h
+++ b/drivers/tty/moxa.h
@@ -301,5 +301,7 @@
#define MX_PARNONE 0x00
#define MX_PAREVEN 0x40
#define MX_PARODD 0xC0
+#define MX_PARMARK 0xA0
+#define MX_PARSPACE 0x20
#endif
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5131bdc9e765..3b3af7e0ce1c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1451,6 +1451,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
+ *
+ * Some control dlci can stay in ADM mode with other dlci working just
+ * fine. In that case we can just keep the control dlci open after the
+ * DLCI_OPENING retries time out.
*/
static void gsm_dlci_t1(struct timer_list *t)
@@ -1464,8 +1468,15 @@ static void gsm_dlci_t1(struct timer_list *t)
if (dlci->retries) {
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else
+ } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (debug & 8)
+ pr_info("DLCI %d opening in ADM mode.\n",
+ dlci->addr);
+ gsm_dlci_open(dlci);
+ } else {
gsm_dlci_close(dlci);
+ }
+
break;
case DLCI_CLOSING:
dlci->retries--;
@@ -1483,8 +1494,8 @@ static void gsm_dlci_t1(struct timer_list *t)
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
- * to the modem which should then reply with a UA, at which point we
- * will move into open state. Opening is done asynchronously with retry
+ * to the modem which should then reply with a UA or ADM, at which point
+ * we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
*/
@@ -2457,10 +2468,10 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
* Called without the kernel lock held - fine
*/
-static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gsm_mux *gsm = tty->disc_data;
poll_wait(file, &tty->read_wait, wait);
@@ -2955,7 +2966,6 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
static void gsmtty_close(struct tty_struct *tty, struct file *filp)
{
struct gsm_dlci *dlci = tty->driver_data;
- struct gsm_mux *gsm;
if (dlci == NULL)
return;
@@ -2964,7 +2974,6 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&dlci->mutex);
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- gsm = dlci->gsm;
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
return;
gsm_dlci_begin_close(dlci);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index eea7b6cb3cc4..929434ebee50 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -180,7 +180,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
@@ -796,11 +796,11 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
* to caller.
* Returns a bit mask containing info on which ops will not block.
*/
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 30bb0900cd2f..e81d3db8ad63 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -135,7 +135,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
@@ -1216,14 +1216,14 @@ static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* Called without the kernel lock held - fine */
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait)
{
struct r3964_info *pInfo = tty->disc_data;
struct r3964_client_info *pClient;
struct r3964_message *pMsg = NULL;
unsigned long flags;
- int result = POLLOUT;
+ __poll_t result = POLLOUT;
TRACE_L("POLL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 539b49adb6af..478a9b40fd03 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2368,10 +2368,10 @@ break_out:
* Called without the kernel lock held - fine
*/
-static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 1bef39828ca7..f439c72b9e3c 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -19,6 +19,38 @@
static bool is_registered;
static DEFINE_IDA(ctrl_ida);
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
+ return of_device_modalias(dev, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *serdev_device_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(serdev_device);
+
+static int serdev_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int rc;
+
+ /* TODO: platform modalias */
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ return of_device_uevent_modalias(dev, env);
+}
+
static void serdev_device_release(struct device *dev)
{
struct serdev_device *serdev = to_serdev_device(dev);
@@ -26,9 +58,16 @@ static void serdev_device_release(struct device *dev)
}
static const struct device_type serdev_device_type = {
+ .groups = serdev_device_groups,
+ .uevent = serdev_device_uevent,
.release = serdev_device_release,
};
+static bool is_serdev_device(const struct device *dev)
+{
+ return dev->type == &serdev_device_type;
+}
+
static void serdev_ctrl_release(struct device *dev)
{
struct serdev_controller *ctrl = to_serdev_controller(dev);
@@ -42,6 +81,9 @@ static const struct device_type serdev_ctrl_type = {
static int serdev_device_match(struct device *dev, struct device_driver *drv)
{
+ if (!is_serdev_device(dev))
+ return 0;
+
/* TODO: platform matching */
if (acpi_driver_match_device(dev, drv))
return 1;
@@ -49,18 +91,6 @@ static int serdev_device_match(struct device *dev, struct device_driver *drv)
return of_driver_match_device(dev, drv);
}
-static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- int rc;
-
- /* TODO: platform modalias */
- rc = acpi_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
- return of_device_uevent_modalias(dev, env);
-}
-
/**
* serdev_device_add() - add a device previously constructed via serdev_device_alloc()
* @serdev: serdev_device to be added
@@ -132,6 +162,33 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
@@ -225,6 +282,18 @@ void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
}
EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_parity)
+ return -ENOTSUPP;
+
+ return ctrl->ops->set_parity(ctrl, parity);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_parity);
+
void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
@@ -268,37 +337,16 @@ static int serdev_drv_probe(struct device *dev)
static int serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
- sdrv->remove(to_serdev_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
return 0;
}
-static ssize_t modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int len;
-
- len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
- if (len != -ENODEV)
- return len;
-
- return of_device_modalias(dev, buf, PAGE_SIZE);
-}
-DEVICE_ATTR_RO(modalias);
-
-static struct attribute *serdev_device_attrs[] = {
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(serdev_device);
-
static struct bus_type serdev_bus_type = {
.name = "serial",
.match = serdev_device_match,
.probe = serdev_drv_probe,
.remove = serdev_drv_remove,
- .uevent = serdev_uevent,
- .dev_groups = serdev_device_groups,
};
/**
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 247788a16f0b..fa1672993b4c 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -59,7 +59,8 @@ static void ttyport_write_wakeup(struct tty_port *port)
test_bit(SERPORT_ACTIVE, &serport->flags))
serdev_controller_write_wakeup(ctrl);
- wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+ /* Wake up any tty_wait_until_sent() */
+ wake_up_interruptible(&tty->write_wait);
tty_kref_put(tty);
}
@@ -122,6 +123,8 @@ static int ttyport_open(struct serdev_controller *ctrl)
if (ret)
goto err_close;
+ tty_unlock(serport->tty);
+
/* Bring the UART into a known 8 bits no parity hw fc state */
ktermios = tty->termios;
ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP |
@@ -131,11 +134,12 @@ static int ttyport_open(struct serdev_controller *ctrl)
ktermios.c_cflag &= ~(CSIZE | PARENB);
ktermios.c_cflag |= CS8;
ktermios.c_cflag |= CRTSCTS;
+ /* Hangups are not supported so make sure to ignore carrier detect. */
+ ktermios.c_cflag |= CLOCAL;
tty_set_termios(tty, &ktermios);
set_bit(SERPORT_ACTIVE, &serport->flags);
- tty_unlock(serport->tty);
return 0;
err_close:
@@ -190,6 +194,29 @@ static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable
tty_set_termios(tty, &ktermios);
}
+static int ttyport_set_parity(struct serdev_controller *ctrl,
+ enum serdev_parity parity)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ ktermios.c_cflag &= ~(PARENB | PARODD | CMSPAR);
+ if (parity != SERDEV_PARITY_NONE) {
+ ktermios.c_cflag |= PARENB;
+ if (parity == SERDEV_PARITY_ODD)
+ ktermios.c_cflag |= PARODD;
+ }
+
+ tty_set_termios(tty, &ktermios);
+
+ if ((tty->termios.c_cflag & (PARENB | PARODD | CMSPAR)) !=
+ (ktermios.c_cflag & (PARENB | PARODD | CMSPAR)))
+ return -EINVAL;
+
+ return 0;
+}
+
static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
@@ -227,6 +254,7 @@ static const struct serdev_controller_ops ctrl_ops = {
.open = ttyport_open,
.close = ttyport_close,
.set_flow_control = ttyport_set_flow_control,
+ .set_parity = ttyport_set_parity,
.set_baudrate = ttyport_set_baudrate,
.wait_until_sent = ttyport_wait_until_sent,
.get_tiocm = ttyport_get_tiocm,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 5bb0c42c88dd..cd1b94a0f451 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -252,31 +252,25 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- unsigned int target_rate, min_rate, max_rate;
struct dw8250_data *d = p->private_data;
long rate;
- int i, ret;
+ int ret;
if (IS_ERR(d->clk) || !old)
goto out;
- /* Find a clk rate within +/-1.6% of an integer multiple of baudx16 */
- target_rate = baud * 16;
- min_rate = target_rate - (target_rate >> 6);
- max_rate = target_rate + (target_rate >> 6);
-
- for (i = 1; i <= UART_DIV_MAX; i++) {
- rate = clk_round_rate(d->clk, i * target_rate);
- if (rate >= i * min_rate && rate <= i * max_rate)
- break;
- }
- if (i <= UART_DIV_MAX) {
- clk_disable_unprepare(d->clk);
+ clk_disable_unprepare(d->clk);
+ rate = clk_round_rate(d->clk, baud * 16);
+ if (rate < 0)
+ ret = rate;
+ else if (rate == 0)
+ ret = -ENOENT;
+ else
ret = clk_set_rate(d->clk, rate);
- clk_prepare_enable(d->clk);
- if (!ret)
- p->uartclk = rate;
- }
+ clk_prepare_enable(d->clk);
+
+ if (!ret)
+ p->uartclk = rate;
out:
p->status &= ~UPSTAT_AUTOCTS;
@@ -515,7 +509,8 @@ static int dw8250_probe(struct platform_device *pdev)
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
dev_err(dev, "clock rate not defined\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_clk;
}
data->pclk = devm_clk_get(dev, "apb_pclk");
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index a402878c9f30..38af306ca0e8 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+#define UART_EXAR_INT0 0x80
#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
#define UART_EXAR_FCTR 0x08 /* Feature Control Register */
@@ -121,6 +122,7 @@ struct exar8250_board {
struct exar8250 {
unsigned int nr;
struct exar8250_board *board;
+ void __iomem *virt;
int line[0];
};
@@ -131,12 +133,9 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
const struct exar8250_board *board = priv->board;
unsigned int bar = 0;
- if (!pcim_iomap_table(pcidev)[bar] && !pcim_iomap(pcidev, bar, 0))
- return -ENOMEM;
-
port->port.iotype = UPIO_MEM;
port->port.mapbase = pci_resource_start(pcidev, bar) + offset;
- port->port.membase = pcim_iomap_table(pcidev)[bar] + offset;
+ port->port.membase = priv->virt + offset;
port->port.regshift = board->reg_shift;
return 0;
@@ -420,6 +419,25 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
port->port.private_data = NULL;
}
+/*
+ * These Exar UARTs have an extra interrupt indicator that could fire for a
+ * few interrupts that are not presented/cleared through IIR. One of which is
+ * a wakeup interrupt when coming out of sleep. These interrupts are only
+ * cleared by reading global INT0 or INT1 registers as interrupts are
+ * associated with channel 0. The INT[3:0] registers _are_ accessible from each
+ * channel's address space, but for the sake of bus efficiency we register a
+ * dedicated handler at the PCI device level to handle them.
+ */
+static irqreturn_t exar_misc_handler(int irq, void *data)
+{
+ struct exar8250 *priv = data;
+
+ /* Clear all PCI interrupts by reading INT0. No effect on IIR */
+ ioread8(priv->virt + UART_EXAR_INT0);
+
+ return IRQ_HANDLED;
+}
+
static int
exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
@@ -448,6 +466,9 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
return -ENOMEM;
priv->board = board;
+ priv->virt = pcim_iomap(pcidev, bar, 0);
+ if (!priv->virt)
+ return -ENOMEM;
pci_set_master(pcidev);
@@ -461,6 +482,11 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
uart.port.irq = pci_irq_vector(pcidev, 0);
uart.port.dev = &pcidev->dev;
+ rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler,
+ IRQF_SHARED, "exar_uart", priv);
+ if (rc)
+ return rc;
+
for (i = 0; i < nr_ports && i < maxnr; i++) {
rc = board->setup(priv, pcidev, &uart, i);
if (rc) {
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 6af84900870e..15a8c8dfa92b 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -91,14 +91,22 @@ static int __init ingenic_early_console_setup(struct earlycon_device *dev,
const char *opt)
{
struct uart_port *port = &dev->port;
- unsigned int baud, divisor;
+ unsigned int divisor;
+ int baud = 115200;
if (!dev->port.membase)
return -ENODEV;
+ if (opt) {
+ unsigned int parity, bits, flow; /* unused for now */
+
+ uart_parse_options(opt, &baud, &parity, &bits, &flow);
+ }
+
ingenic_early_console_setup_clock(dev);
- baud = dev->baud ?: 115200;
+ if (dev->baud)
+ baud = dev->baud;
divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
early_out(port, UART_IER, 0);
@@ -125,6 +133,10 @@ EARLYCON_DECLARE(jz4740_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4740_uart, "ingenic,jz4740-uart",
ingenic_early_console_setup);
+EARLYCON_DECLARE(jz4770_uart, ingenic_early_console_setup);
+OF_EARLYCON_DECLARE(jz4770_uart, "ingenic,jz4770-uart",
+ ingenic_early_console_setup);
+
EARLYCON_DECLARE(jz4775_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart",
ingenic_early_console_setup);
@@ -319,6 +331,7 @@ static const struct ingenic_uart_config jz4780_uart_config = {
static const struct of_device_id of_match[] = {
{ .compatible = "ingenic,jz4740-uart", .data = &jz4740_uart_config },
{ .compatible = "ingenic,jz4760-uart", .data = &jz4760_uart_config },
+ { .compatible = "ingenic,jz4770-uart", .data = &jz4760_uart_config },
{ .compatible = "ingenic,jz4775-uart", .data = &jz4760_uart_config },
{ .compatible = "ingenic,jz4780-uart", .data = &jz4780_uart_config },
{ /* sentinel */ }
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 1e67a7e4a5fd..160b8906d9b9 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -136,8 +136,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
}
info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
- if (IS_ERR(info->rst))
+ if (IS_ERR(info->rst)) {
+ ret = PTR_ERR(info->rst);
goto err_dispose;
+ }
+
ret = reset_control_deassert(info->rst);
if (ret)
goto err_dispose;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index bd40ba402410..57f6eba47f44 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -414,7 +414,7 @@ static void omap_8250_set_termios(struct uart_port *port,
/* Up to here it was mostly serial8250_do_set_termios() */
/*
- * We enable TRIG_GRANU for RX and TX and additionaly we set
+ * We enable TRIG_GRANU for RX and TX and additionally we set
* SCR_TX_EMPTY bit. The result is the following:
* - RX_TRIGGER amount of bytes in the FIFO will cause an interrupt.
* - less than RX_TRIGGER number of bytes will also cause an interrupt
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 11434551ac0a..1328c7e70108 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -441,7 +441,6 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
}
static int serial8250_default_handle_irq(struct uart_port *port);
-static int exar_handle_irq(struct uart_port *port);
static void set_io_from_upio(struct uart_port *p)
{
@@ -1884,26 +1883,6 @@ static int serial8250_default_handle_irq(struct uart_port *port)
}
/*
- * These Exar UARTs have an extra interrupt indicator that could
- * fire for a few unimplemented interrupts. One of which is a
- * wakeup event when coming out of sleep. Put this here just
- * to be on the safe side that these interrupts don't go unhandled.
- */
-static int exar_handle_irq(struct uart_port *port)
-{
- unsigned int iir = serial_port_in(port, UART_IIR);
- int ret = 0;
-
- if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
- serial_port_in(port, UART_EXAR_INT0) != 0)
- ret = 1;
-
- ret |= serial8250_handle_irq(port, iir);
-
- return ret;
-}
-
-/*
* Newer 16550 compatible parts such as the SC16C650 & Altera 16550 Soft IP
* have a programmable TX threshold that triggers the THRE interrupt in
* the IIR register. In this case, the THRE interrupt indicates the FIFO
@@ -3067,11 +3046,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (port->type == PORT_UNKNOWN)
serial8250_release_std_resource(up);
- /* Fixme: probably not the best place for this */
- if ((port->type == PORT_XR17V35X) ||
- (port->type == PORT_XR17D15X))
- port->handle_irq = exar_handle_irq;
-
register_dev_spec_attr_grp(up);
up->fcr = uart_config[up->port.type].fcr;
}
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 45ef506293ae..28d88ccf5a0c 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -250,12 +250,13 @@ static int uniphier_uart_probe(struct platform_device *pdev)
up.dl_read = uniphier_serial_dl_read;
up.dl_write = uniphier_serial_dl_write;
- priv->line = serial8250_register_8250_port(&up);
- if (priv->line < 0) {
+ ret = serial8250_register_8250_port(&up);
+ if (ret < 0) {
dev_err(dev, "failed to register 8250 port\n");
clk_disable_unprepare(priv->clk);
return ret;
}
+ priv->line = ret;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a5c0ef1e7695..16b1496e6105 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -5,6 +5,7 @@
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
+ depends on !S390
select SERIAL_CORE
---help---
This selects whether you want to include the driver for the standard
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index b788fee54249..3682fd3e960c 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -761,24 +761,30 @@ config SERIAL_SH_SCI
select SERIAL_MCTRL_GPIO if GPIOLIB
config SERIAL_SH_SCI_NR_UARTS
- int "Maximum number of SCI(F) serial ports"
+ int "Maximum number of SCI(F) serial ports" if EXPERT
depends on SERIAL_SH_SCI
+ default "3" if H8300
+ default "10" if SUPERH
+ default "18" if ARCH_RENESAS
default "2"
config SERIAL_SH_SCI_CONSOLE
- bool "Support for console on SuperH SCI(F)"
+ bool "Support for console on SuperH SCI(F)" if EXPERT
depends on SERIAL_SH_SCI=y
select SERIAL_CORE_CONSOLE
+ default y
config SERIAL_SH_SCI_EARLYCON
- bool "Support for early console on SuperH SCI(F)"
+ bool "Support for early console on SuperH SCI(F)" if EXPERT
depends on SERIAL_SH_SCI=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
+ default ARCH_RENESAS || H8300
config SERIAL_SH_SCI_DMA
- bool "DMA support"
+ bool "DMA support" if EXPERT
depends on SERIAL_SH_SCI && DMA_ENGINE
+ default ARCH_RENESAS
config SERIAL_PNX8XXX
bool "Enable PNX8XXX SoCs' UART Support"
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 04af8de8617e..4b40a5b449ee 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -314,10 +314,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
u16 status;
- unsigned int ch, flag, max_count = 256;
- int fifotaken = 0;
+ unsigned int ch, flag, fifotaken;
- while (max_count--) {
+ for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR);
if (status & UART01x_FR_RXFE)
break;
@@ -326,7 +325,6 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
- fifotaken++;
if (unlikely(ch & UART_DR_ERROR)) {
if (ch & UART011_DR_BE) {
@@ -1482,12 +1480,10 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
struct uart_amba_port *uap = dev_id;
unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
- u16 imsc;
int handled = 0;
spin_lock_irqsave(&uap->port.lock, flags);
- imsc = pl011_read(uap, REG_IMSC);
- status = pl011_read(uap, REG_RIS) & imsc;
+ status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
check_apply_cts_event_workaround(uap);
@@ -1511,7 +1507,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (pass_counter-- == 0)
break;
- status = pl011_read(uap, REG_RIS) & imsc;
+ status = pl011_read(uap, REG_RIS) & uap->im;
} while (status != 0);
handled = 1;
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index efa25611ca0c..df46a9e88c34 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2345,7 +2345,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- of_get_rs485_mode(pdev->dev.of_node, &port->rs485);
+ uart_get_rs485_mode(&pdev->dev, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 1c4d3f387138..8cf112f2efc3 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2206,23 +2206,16 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_attach_port;
- of_get_rs485_mode(np, &sport->port.rs485);
+ uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
- if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX) {
+ if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
- return -ENOSYS;
- }
if (sport->port.rs485.delay_rts_before_send ||
- sport->port.rs485.delay_rts_after_send) {
+ sport->port.rs485.delay_rts_after_send)
dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
- return -ENOSYS;
- }
- if (sport->port.rs485.flags & SER_RS485_ENABLED) {
- sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
- writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
- }
+ lpuart_config_rs485(&sport->port, &sport->port.rs485);
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e4b3d9123a03..1d7ca382bc12 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -703,25 +703,6 @@ out:
return IRQ_HANDLED;
}
-static void imx_disable_rx_int(struct imx_port *sport)
-{
- unsigned long temp;
-
- /* disable the receiver ready and aging timer interrupts */
- temp = readl(sport->port.membase + UCR1);
- temp &= ~(UCR1_RRDYEN);
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR2);
- temp &= ~(UCR2_ATEN);
- writel(temp, sport->port.membase + UCR2);
-
- /* disable the rx errors interrupts */
- temp = readl(sport->port.membase + UCR4);
- temp &= ~UCR4_OREN;
- writel(temp, sport->port.membase + UCR4);
-}
-
static void clear_rx_errors(struct imx_port *sport);
/*
@@ -1252,18 +1233,21 @@ static int imx_startup(struct uart_port *port)
if (sport->dma_is_inited && !sport->dma_is_enabled)
imx_enable_dma(sport);
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_RRDYEN | UCR1_UARTEN;
+ temp = readl(sport->port.membase + UCR1) & ~UCR1_RRDYEN;
+ if (!sport->dma_is_enabled)
+ temp |= UCR1_RRDYEN;
+ temp |= UCR1_UARTEN;
if (sport->have_rtscts)
temp |= UCR1_RTSDEN;
writel(temp, sport->port.membase + UCR1);
- temp = readl(sport->port.membase + UCR4);
- temp |= UCR4_OREN;
+ temp = readl(sport->port.membase + UCR4) & ~UCR4_OREN;
+ if (!sport->dma_is_enabled)
+ temp |= UCR4_OREN;
writel(temp, sport->port.membase + UCR4);
- temp = readl(sport->port.membase + UCR2);
+ temp = readl(sport->port.membase + UCR2) & ~UCR2_ATEN;
temp |= (UCR2_RXEN | UCR2_TXEN);
if (!sport->have_rtscts)
temp |= UCR2_IRTS;
@@ -1297,10 +1281,8 @@ static int imx_startup(struct uart_port *port)
* In our iMX53 the average delay for the first reception dropped from
* approximately 35000 microseconds to 1000 microseconds.
*/
- if (sport->dma_is_enabled) {
- imx_disable_rx_int(sport);
+ if (sport->dma_is_enabled)
start_rx_dma(sport);
- }
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -2017,8 +1999,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
- of_get_rs485_mode(np, &sport->port.rs485);
-
return 0;
}
#else
@@ -2080,7 +2060,6 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.rs485_config = imx_rs485_config;
- sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_timeout, 0);
@@ -2111,6 +2090,14 @@ static int serial_imx_probe(struct platform_device *pdev)
return ret;
}
+ uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
+
+ if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+ (!sport->have_rtscts || !sport->have_rtsgpio))
+ dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
+
+ imx_rs485_config(&sport->port, &sport->port.rs485);
+
/* Disable interrupts before requesting them */
reg = readl_relaxed(sport->port.membase + UCR1);
reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
@@ -2232,29 +2219,28 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
unsigned int val;
val = readl(sport->port.membase + UCR3);
- if (on)
+ if (on) {
+ writel(USR1_AWAKE, sport->port.membase + USR1);
val |= UCR3_AWAKEN;
+ }
else
val &= ~UCR3_AWAKEN;
writel(val, sport->port.membase + UCR3);
- val = readl(sport->port.membase + UCR1);
- if (on)
- val |= UCR1_RTSDEN;
- else
- val &= ~UCR1_RTSDEN;
- writel(val, sport->port.membase + UCR1);
+ if (sport->have_rtscts) {
+ val = readl(sport->port.membase + UCR1);
+ if (on)
+ val |= UCR1_RTSDEN;
+ else
+ val &= ~UCR1_RTSDEN;
+ writel(val, sport->port.membase + UCR1);
+ }
}
static int imx_serial_port_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
- int ret;
-
- ret = clk_enable(sport->clk_ipg);
- if (ret)
- return ret;
serial_imx_save_context(sport);
@@ -2275,8 +2261,6 @@ static int imx_serial_port_resume_noirq(struct device *dev)
serial_imx_restore_context(sport);
- clk_disable(sport->clk_ipg);
-
return 0;
}
@@ -2284,15 +2268,19 @@ static int imx_serial_port_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
-
- /* enable wakeup from i.MX UART */
- serial_imx_enable_wakeup(sport, true);
+ int ret;
uart_suspend_port(&imx_reg, &sport->port);
disable_irq(sport->port.irq);
- /* Needed to enable clock in suspend_noirq */
- return clk_prepare(sport->clk_ipg);
+ ret = clk_prepare_enable(sport->clk_ipg);
+ if (ret)
+ return ret;
+
+ /* enable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, true);
+
+ return 0;
}
static int imx_serial_port_resume(struct device *dev)
@@ -2306,7 +2294,7 @@ static int imx_serial_port_resume(struct device *dev)
uart_resume_port(&imx_reg, &sport->port);
enable_irq(sport->port.irq);
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
return 0;
}
@@ -2318,8 +2306,7 @@ static int imx_serial_port_freeze(struct device *dev)
uart_suspend_port(&imx_reg, &sport->port);
- /* Needed to enable clock in suspend_noirq */
- return clk_prepare(sport->clk_ipg);
+ return clk_prepare_enable(sport->clk_ipg);
}
static int imx_serial_port_thaw(struct device *dev)
@@ -2329,7 +2316,7 @@ static int imx_serial_port_thaw(struct device *dev)
uart_resume_port(&imx_reg, &sport->port);
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
return 0;
}
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 4718560b8fdc..bf0e2a4cb0ce 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -282,9 +282,6 @@ static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch)
u16 head;
u16 tail;
- if (!ch)
- return;
-
/* cache head and tail of queue */
head = ch->ch_r_head & RQUEUEMASK;
tail = ch->ch_r_tail & RQUEUEMASK;
@@ -1175,6 +1172,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
continue;
ch = brd->channels[port];
+ if (!ch)
+ continue;
+
neo_copy_data_from_uart_to_queue(ch);
/* Call our tty layer to enforce queue flow control if needed. */
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 469927d37b41..b6bd6e15e07b 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -523,9 +523,6 @@ void jsm_input(struct jsm_channel *ch)
jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n");
- if (!ch)
- return;
-
port = &ch->uart_port.state->port;
tp = port->tty;
@@ -648,11 +645,8 @@ static void jsm_carrier(struct jsm_channel *ch)
int phys_carrier = 0;
jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n");
- if (!ch)
- return;
bd = ch->ch_bd;
-
if (!bd)
return;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index ecb6513a6505..39f635812077 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -233,6 +233,7 @@
/* Misc definitions */
#define MAX310X_FIFO_SIZE (128)
#define MAX310x_REV_MASK (0xf8)
+#define MAX310X_WRITE_BIT 0x80
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
@@ -593,57 +594,118 @@ static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
return (int)bestfreq;
}
-static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
+static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
- unsigned int sts, ch, flag;
+ u8 header[] = { (port->iobase + MAX310X_THR_REG) | MAX310X_WRITE_BIT };
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = &header,
+ .len = sizeof(header),
+ }, {
+ .tx_buf = txbuf,
+ .len = len,
+ }
+ };
+ spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+}
- if (unlikely(rxlen >= port->fifosize)) {
- dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
- port->icount.buf_overrun++;
- /* Ensure sanity of RX level */
- rxlen = port->fifosize;
- }
+static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
+{
+ u8 header[] = { port->iobase + MAX310X_RHR_REG };
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = &header,
+ .len = sizeof(header),
+ }, {
+ .rx_buf = rxbuf,
+ .len = len,
+ }
+ };
+ spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+}
- while (rxlen--) {
- ch = max310x_port_read(port, MAX310X_RHR_REG);
- sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
+static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
+{
+ unsigned int sts, ch, flag, i;
+ u8 buf[MAX310X_FIFO_SIZE];
+
+ if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) {
+ /* We are just reading, happily ignoring any error conditions.
+ * Break condition, parity checking, framing errors -- they
+ * are all ignored. That means that we can do a batch-read.
+ *
+ * There is a small opportunity for race if the RX FIFO
+ * overruns while we're reading the buffer; the datasheets says
+ * that the LSR register applies to the "current" character.
+ * That's also the reason why we cannot do batched reads when
+ * asked to check the individual statuses.
+ * */
- sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT |
- MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT;
+ sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
+ max310x_batch_read(port, buf, rxlen);
- port->icount.rx++;
+ port->icount.rx += rxlen;
flag = TTY_NORMAL;
+ sts &= port->read_status_mask;
- if (unlikely(sts)) {
- if (sts & MAX310X_LSR_RXBRK_BIT) {
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- } else if (sts & MAX310X_LSR_RXPAR_BIT)
- port->icount.parity++;
- else if (sts & MAX310X_LSR_FRERR_BIT)
- port->icount.frame++;
- else if (sts & MAX310X_LSR_RXOVR_BIT)
- port->icount.overrun++;
-
- sts &= port->read_status_mask;
- if (sts & MAX310X_LSR_RXBRK_BIT)
- flag = TTY_BREAK;
- else if (sts & MAX310X_LSR_RXPAR_BIT)
- flag = TTY_PARITY;
- else if (sts & MAX310X_LSR_FRERR_BIT)
- flag = TTY_FRAME;
- else if (sts & MAX310X_LSR_RXOVR_BIT)
- flag = TTY_OVERRUN;
+ if (sts & MAX310X_LSR_RXOVR_BIT) {
+ dev_warn_ratelimited(port->dev, "Hardware RX FIFO overrun\n");
+ port->icount.overrun++;
}
- if (uart_handle_sysrq_char(port, ch))
- continue;
+ for (i = 0; i < rxlen; ++i) {
+ uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, buf[i], flag);
+ }
+
+ } else {
+ if (unlikely(rxlen >= port->fifosize)) {
+ dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
+ port->icount.buf_overrun++;
+ /* Ensure sanity of RX level */
+ rxlen = port->fifosize;
+ }
+
+ while (rxlen--) {
+ ch = max310x_port_read(port, MAX310X_RHR_REG);
+ sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
+
+ sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT |
+ MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT;
+
+ port->icount.rx++;
+ flag = TTY_NORMAL;
+
+ if (unlikely(sts)) {
+ if (sts & MAX310X_LSR_RXBRK_BIT) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sts & MAX310X_LSR_RXPAR_BIT)
+ port->icount.parity++;
+ else if (sts & MAX310X_LSR_FRERR_BIT)
+ port->icount.frame++;
+ else if (sts & MAX310X_LSR_RXOVR_BIT)
+ port->icount.overrun++;
+
+ sts &= port->read_status_mask;
+ if (sts & MAX310X_LSR_RXBRK_BIT)
+ flag = TTY_BREAK;
+ else if (sts & MAX310X_LSR_RXPAR_BIT)
+ flag = TTY_PARITY;
+ else if (sts & MAX310X_LSR_FRERR_BIT)
+ flag = TTY_FRAME;
+ else if (sts & MAX310X_LSR_RXOVR_BIT)
+ flag = TTY_OVERRUN;
+ }
- if (sts & port->ignore_status_mask)
- continue;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
- uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag);
+ if (sts & port->ignore_status_mask)
+ continue;
+
+ uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag);
+ }
}
tty_flip_buffer_push(&port->state->port);
@@ -652,7 +714,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
static void max310x_handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
- unsigned int txlen, to_send;
+ unsigned int txlen, to_send, until_end;
if (unlikely(port->x_char)) {
max310x_port_write(port, MAX310X_THR_REG, port->x_char);
@@ -666,28 +728,43 @@ static void max310x_handle_tx(struct uart_port *port)
/* Get length of data pending in circular buffer */
to_send = uart_circ_chars_pending(xmit);
+ until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (likely(to_send)) {
/* Limit to size of TX FIFO */
txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
txlen = port->fifosize - txlen;
to_send = (to_send > txlen) ? txlen : to_send;
+ if (until_end < to_send) {
+ /* It's a circ buffer -- wrap around.
+ * We could do that in one SPI transaction, but meh. */
+ max310x_batch_write(port, xmit->buf + xmit->tail, until_end);
+ max310x_batch_write(port, xmit->buf, to_send - until_end);
+ } else {
+ max310x_batch_write(port, xmit->buf + xmit->tail, to_send);
+ }
+
/* Add data to send */
port->icount.tx += to_send;
- while (to_send--) {
- max310x_port_write(port, MAX310X_THR_REG,
- xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- }
+ xmit->tail = (xmit->tail + to_send) & (UART_XMIT_SIZE - 1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
-static void max310x_port_irq(struct max310x_port *s, int portno)
+static void max310x_start_tx(struct uart_port *port)
+{
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+ if (!work_pending(&one->tx_work))
+ schedule_work(&one->tx_work);
+}
+
+static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno)
{
struct uart_port *port = &s->p[portno].port;
+ irqreturn_t res = IRQ_NONE;
do {
unsigned int ists, lsr, rxlen;
@@ -698,6 +775,8 @@ static void max310x_port_irq(struct max310x_port *s, int portno)
if (!ists && !rxlen)
break;
+ res = IRQ_HANDLED;
+
if (ists & MAX310X_IRQ_CTS_BIT) {
lsr = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
uart_handle_cts_change(port,
@@ -705,17 +784,16 @@ static void max310x_port_irq(struct max310x_port *s, int portno)
}
if (rxlen)
max310x_handle_rx(port, rxlen);
- if (ists & MAX310X_IRQ_TXEMPTY_BIT) {
- mutex_lock(&s->mutex);
- max310x_handle_tx(port);
- mutex_unlock(&s->mutex);
- }
+ if (ists & MAX310X_IRQ_TXEMPTY_BIT)
+ max310x_start_tx(port);
} while (1);
+ return res;
}
static irqreturn_t max310x_ist(int irq, void *dev_id)
{
struct max310x_port *s = (struct max310x_port *)dev_id;
+ bool handled = false;
if (s->devtype->nr > 1) {
do {
@@ -726,12 +804,15 @@ static irqreturn_t max310x_ist(int irq, void *dev_id)
val = ((1 << s->devtype->nr) - 1) & ~val;
if (!val)
break;
- max310x_port_irq(s, fls(val) - 1);
+ if (max310x_port_irq(s, fls(val) - 1) == IRQ_HANDLED)
+ handled = true;
} while (1);
- } else
- max310x_port_irq(s, 0);
+ } else {
+ if (max310x_port_irq(s, 0) == IRQ_HANDLED)
+ handled = true;
+ }
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static void max310x_wq_proc(struct work_struct *ws)
@@ -744,14 +825,6 @@ static void max310x_wq_proc(struct work_struct *ws)
mutex_unlock(&s->mutex);
}
-static void max310x_start_tx(struct uart_port *port)
-{
- struct max310x_one *one = container_of(port, struct max310x_one, port);
-
- if (!work_pending(&one->tx_work))
- schedule_work(&one->tx_work);
-}
-
static unsigned int max310x_tx_empty(struct uart_port *port)
{
unsigned int lvl, sts;
@@ -1086,10 +1159,31 @@ static int max310x_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+
+static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct max310x_port *s = gpiochip_get_data(chip);
+ struct uart_port *port = &s->p[offset / 4].port;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ max310x_port_update(port, MAX310X_GPIOCFG_REG,
+ 1 << ((offset % 4) + 4),
+ 1 << ((offset % 4) + 4));
+ return 0;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ max310x_port_update(port, MAX310X_GPIOCFG_REG,
+ 1 << ((offset % 4) + 4), 0);
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
#endif
static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
- struct regmap *regmap, int irq, unsigned long flags)
+ struct regmap *regmap, int irq)
{
int i, ret, fmin, fmax, freq, uartclk;
struct clk *clk_osc, *clk_xtal;
@@ -1169,23 +1263,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
uartclk = max310x_set_ref_clk(s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
-#ifdef CONFIG_GPIOLIB
- /* Setup GPIO cotroller */
- s->gpio.owner = THIS_MODULE;
- s->gpio.parent = dev;
- s->gpio.label = dev_name(dev);
- s->gpio.direction_input = max310x_gpio_direction_input;
- s->gpio.get = max310x_gpio_get;
- s->gpio.direction_output= max310x_gpio_direction_output;
- s->gpio.set = max310x_gpio_set;
- s->gpio.base = -1;
- s->gpio.ngpio = devtype->nr * 4;
- s->gpio.can_sleep = 1;
- ret = devm_gpiochip_add_data(dev, &s->gpio, s);
- if (ret)
- goto out_clk;
-#endif
-
mutex_init(&s->mutex);
for (i = 0; i < devtype->nr; i++) {
@@ -1237,9 +1314,27 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
devtype->power(&s->p[i].port, 0);
}
+#ifdef CONFIG_GPIOLIB
+ /* Setup GPIO cotroller */
+ s->gpio.owner = THIS_MODULE;
+ s->gpio.parent = dev;
+ s->gpio.label = dev_name(dev);
+ s->gpio.direction_input = max310x_gpio_direction_input;
+ s->gpio.get = max310x_gpio_get;
+ s->gpio.direction_output= max310x_gpio_direction_output;
+ s->gpio.set = max310x_gpio_set;
+ s->gpio.set_config = max310x_gpio_set_config;
+ s->gpio.base = -1;
+ s->gpio.ngpio = devtype->nr * 4;
+ s->gpio.can_sleep = 1;
+ ret = devm_gpiochip_add_data(dev, &s->gpio, s);
+ if (ret)
+ goto out_uart;
+#endif
+
/* Setup interrupt */
ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist,
- IRQF_ONESHOT | flags, dev_name(dev), s);
+ IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), s);
if (!ret)
return 0;
@@ -1293,7 +1388,7 @@ MODULE_DEVICE_TABLE(of, max310x_dt_ids);
static struct regmap_config regcfg = {
.reg_bits = 8,
.val_bits = 8,
- .write_flag_mask = 0x80,
+ .write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
@@ -1304,7 +1399,6 @@ static struct regmap_config regcfg = {
static int max310x_spi_probe(struct spi_device *spi)
{
struct max310x_devtype *devtype;
- unsigned long flags = 0;
struct regmap *regmap;
int ret;
@@ -1327,11 +1421,10 @@ static int max310x_spi_probe(struct spi_device *spi)
devtype = (struct max310x_devtype *)id_entry->driver_data;
}
- flags = IRQF_TRIGGER_FALLING;
regcfg.max_register = devtype->nr * 0x20 - 1;
regmap = devm_regmap_init_spi(spi, &regcfg);
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq, flags);
+ return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
}
static int max310x_spi_remove(struct spi_device *spi)
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index daafe60175da..8a842591b37c 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -34,9 +34,15 @@
/* AML_UART_CONTROL bits */
#define AML_UART_TX_EN BIT(12)
#define AML_UART_RX_EN BIT(13)
+#define AML_UART_TWO_WIRE_EN BIT(15)
+#define AML_UART_STOP_BIT_LEN_MASK (0x03 << 16)
+#define AML_UART_STOP_BIT_1SB (0x00 << 16)
+#define AML_UART_STOP_BIT_2SB (0x01 << 16)
+#define AML_UART_PARITY_TYPE BIT(18)
+#define AML_UART_PARITY_EN BIT(19)
#define AML_UART_TX_RST BIT(22)
#define AML_UART_RX_RST BIT(23)
-#define AML_UART_CLR_ERR BIT(24)
+#define AML_UART_CLEAR_ERR BIT(24)
#define AML_UART_RX_INT_EN BIT(27)
#define AML_UART_TX_INT_EN BIT(28)
#define AML_UART_DATA_LEN_MASK (0x03 << 20)
@@ -57,15 +63,6 @@
AML_UART_FRAME_ERR | \
AML_UART_TX_FIFO_WERR)
-/* AML_UART_CONTROL bits */
-#define AML_UART_TWO_WIRE_EN BIT(15)
-#define AML_UART_PARITY_TYPE BIT(18)
-#define AML_UART_PARITY_EN BIT(19)
-#define AML_UART_CLEAR_ERR BIT(24)
-#define AML_UART_STOP_BIN_LEN_MASK (0x03 << 16)
-#define AML_UART_STOP_BIN_1SB (0x00 << 16)
-#define AML_UART_STOP_BIN_2SB (0x01 << 16)
-
/* AML_UART_MISC bits */
#define AML_UART_XMIT_IRQ(c) (((c) & 0xff) << 8)
#define AML_UART_RECV_IRQ(c) ((c) & 0xff)
@@ -263,10 +260,10 @@ static void meson_uart_reset(struct uart_port *port)
u32 val;
val = readl(port->membase + AML_UART_CONTROL);
- val |= (AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLR_ERR);
+ val |= (AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR);
writel(val, port->membase + AML_UART_CONTROL);
- val &= ~(AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLR_ERR);
+ val &= ~(AML_UART_RX_RST | AML_UART_TX_RST | AML_UART_CLEAR_ERR);
writel(val, port->membase + AML_UART_CONTROL);
}
@@ -276,9 +273,9 @@ static int meson_uart_startup(struct uart_port *port)
int ret = 0;
val = readl(port->membase + AML_UART_CONTROL);
- val |= AML_UART_CLR_ERR;
+ val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
- val &= ~AML_UART_CLR_ERR;
+ val &= ~AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
val |= (AML_UART_RX_EN | AML_UART_TX_EN);
@@ -354,11 +351,11 @@ static void meson_uart_set_termios(struct uart_port *port,
else
val &= ~AML_UART_PARITY_TYPE;
- val &= ~AML_UART_STOP_BIN_LEN_MASK;
+ val &= ~AML_UART_STOP_BIT_LEN_MASK;
if (cflags & CSTOPB)
- val |= AML_UART_STOP_BIN_2SB;
+ val |= AML_UART_STOP_BIT_2SB;
else
- val |= AML_UART_STOP_BIN_1SB;
+ val |= AML_UART_STOP_BIT_1SB;
if (cflags & CRTSCTS)
val &= ~AML_UART_TWO_WIRE_EN;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index efb4fd3784ed..079dc47aa142 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -40,7 +40,6 @@
#include <asm/cacheflush.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/irq.h>
@@ -1597,7 +1596,7 @@ static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
for (i = 0; i < UART_GPIO_MAX; i++) {
gpiod = mctrl_gpio_to_gpiod(s->gpios, i);
- if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN))
+ if (gpiod && (gpiod_get_direction(gpiod) == 1))
s->gpio_irq[i] = gpiod_to_irq(gpiod);
else
s->gpio_irq[i] = -EINVAL;
@@ -1721,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
ret = uart_add_one_port(&auart_driver, &s->port);
if (ret)
- goto out_free_gpio_irq;
+ goto out_disable_clks_free_qpio_irq;
/* ASM9260 don't have version reg */
if (is_asm9260_auart(s)) {
@@ -1735,7 +1734,11 @@ static int mxs_auart_probe(struct platform_device *pdev)
return 0;
-out_free_gpio_irq:
+out_disable_clks_free_qpio_irq:
+ if (s->clk)
+ clk_disable_unprepare(s->clk_ahb);
+ if (s->clk_ahb)
+ clk_disable_unprepare(s->clk_ahb);
mxs_auart_free_gpio_irq(s);
auart_port[pdev->id] = NULL;
return ret;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 53d59e9b944a..6420ae581a80 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1602,7 +1602,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device_node *np)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
- enum of_gpio_flags flags;
int ret;
rs485conf->flags = 0;
@@ -1611,19 +1610,24 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
if (!np)
return 0;
- if (of_property_read_bool(np, "rs485-rts-active-high"))
+ uart_get_rs485_mode(up->dev, rs485conf);
+
+ if (of_property_read_bool(np, "rs485-rts-active-high")) {
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
- else
+ rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ } else {
+ rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
+ }
/* check for tx enable gpio */
- up->rts_gpio = of_get_named_gpio_flags(np, "rts-gpio", 0, &flags);
+ up->rts_gpio = of_get_named_gpio(np, "rts-gpio", 0);
if (gpio_is_valid(up->rts_gpio)) {
ret = devm_gpio_request(up->dev, up->rts_gpio, "omap-serial");
if (ret < 0)
return ret;
- ret = gpio_direction_output(up->rts_gpio,
- flags & SER_RS485_RTS_AFTER_SEND);
+ ret = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ? 1 : 0;
+ ret = gpio_direction_output(up->rts_gpio, ret);
if (ret < 0)
return ret;
} else if (up->rts_gpio == -EPROBE_DEFER) {
@@ -1632,8 +1636,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
up->rts_gpio = -EINVAL;
}
- of_get_rs485_mode(np, rs485conf);
-
return 0;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 854995e1cae7..c8dde56b532b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -974,6 +974,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
} else {
retval = uart_startup(tty, state, 1);
+ if (retval == 0)
+ tty_port_set_initialized(port, true);
if (retval > 0)
retval = 0;
}
@@ -1955,9 +1957,10 @@ EXPORT_SYMBOL_GPL(uart_parse_earlycon);
* eg: 115200n8r
*/
void
-uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
+uart_parse_options(const char *options, int *baud, int *parity,
+ int *bits, int *flow)
{
- char *s = options;
+ const char *s = options;
*baud = simple_strtoul(s, NULL, 10);
while (*s >= '0' && *s <= '9')
@@ -3013,19 +3016,20 @@ EXPORT_SYMBOL(uart_add_one_port);
EXPORT_SYMBOL(uart_remove_one_port);
/**
- * of_get_rs485_mode() - Implement parsing rs485 properties
- * @np: uart node
+ * uart_get_rs485_mode() - retrieve rs485 properties for given uart
+ * @dev: uart device
* @rs485conf: output parameter
*
* This function implements the device tree binding described in
* Documentation/devicetree/bindings/serial/rs485.txt.
*/
-void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf)
+void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
{
u32 rs485_delay[2];
int ret;
- ret = of_property_read_u32_array(np, "rs485-rts-delay", rs485_delay, 2);
+ ret = device_property_read_u32_array(dev, "rs485-rts-delay",
+ rs485_delay, 2);
if (!ret) {
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
@@ -3035,18 +3039,25 @@ void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf)
}
/*
- * clear full-duplex and enabled flags to get to a defined state with
- * the two following properties.
+ * Clear full-duplex and enabled flags, set RTS polarity to active high
+ * to get to a defined state with the following properties:
*/
- rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED);
+ rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED |
+ SER_RS485_RTS_AFTER_SEND);
+ rs485conf->flags |= SER_RS485_RTS_ON_SEND;
- if (of_property_read_bool(np, "rs485-rx-during-tx"))
+ if (device_property_read_bool(dev, "rs485-rx-during-tx"))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
+ if (device_property_read_bool(dev, "linux,rs485-enabled-at-boot-time"))
rs485conf->flags |= SER_RS485_ENABLED;
+
+ if (device_property_read_bool(dev, "rs485-rts-active-low")) {
+ rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
+ rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
+ }
}
-EXPORT_SYMBOL_GPL(of_get_rs485_mode);
+EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index d9f399c4e90c..7257c078e155 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1144,7 +1144,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(rx_fifo_timeout, 0644, rx_fifo_timeout_show, rx_fifo_timeout_store);
+static DEVICE_ATTR_RW(rx_fifo_timeout);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 8a5ff54d0f42..2294d0f05872 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -236,7 +236,7 @@ struct stm32_usart_info stm32h7_info = {
#define USART_ICR_CMCF BIT(17) /* F7 */
#define USART_ICR_WUCF BIT(20) /* H7 */
-#define STM32_SERIAL_NAME "ttyS"
+#define STM32_SERIAL_NAME "ttySTM"
#define STM32_MAX_PORTS 8
#define RX_BUF_L 200 /* dma rx buffer length */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dc60aeea87d8..6a89835453d3 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -144,7 +144,7 @@ static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
+static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -443,7 +443,7 @@ static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
}
/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
{
return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
}
@@ -1323,6 +1323,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
"%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
__func__, tty->driver->name);
+ retval = tty_ldisc_lock(tty, 5 * HZ);
+ if (retval)
+ goto err_release_lock;
tty->port->itty = tty;
/*
@@ -1333,6 +1336,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
retval = tty_ldisc_setup(tty, tty->link);
if (retval)
goto err_release_tty;
+ tty_ldisc_unlock(tty);
/* Return the tty locked so that it cannot vanish under the caller */
return tty;
@@ -1345,9 +1349,11 @@ err_module_put:
/* call the tty release_tty routine to clean out this slot */
err_release_tty:
- tty_unlock(tty);
+ tty_ldisc_unlock(tty);
tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
retval, idx);
+err_release_lock:
+ tty_unlock(tty);
release_tty(tty, idx);
return ERR_PTR(retval);
}
@@ -1476,6 +1482,8 @@ static void release_tty(struct tty_struct *tty, int idx)
if (tty->link)
tty->link->port->itty = NULL;
tty_buffer_cancel_work(tty->port);
+ if (tty->link)
+ tty_buffer_cancel_work(tty->link->port);
tty_kref_put(tty->link);
tty_kref_put(tty);
@@ -2055,11 +2063,11 @@ retry_open:
* may be re-entered freely by other callers.
*/
-static unsigned int tty_poll(struct file *filp, poll_table *wait)
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
- int ret = 0;
+ __poll_t ret = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
return 0;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 24ec5c7e6b20..4e7946c0484b 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -337,7 +337,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
ldsem_up_write(&tty->ldisc_sem);
}
-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
int ret;
@@ -348,7 +348,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
return 0;
}
-static void tty_ldisc_unlock(struct tty_struct *tty)
+void tty_ldisc_unlock(struct tty_struct *tty)
{
clear_bit(TTY_LDISC_HALTED, &tty->flags);
__tty_ldisc_unlock(tty);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index af4da9507180..7851383fbd6c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -186,11 +186,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
}
if (ps > pe) /* make sel_start <= sel_end */
- {
- int tmp = ps;
- ps = pe;
- pe = tmp;
- }
+ swap(ps, pe);
if (sel_cons != vc_cons[fg_console].d) {
clear_selection();
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 85b6634f518a..3e64ccd0040f 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -559,11 +559,11 @@ unlock_out:
return ret;
}
-static unsigned int
+static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
- int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+ __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
if (poll) {
poll_wait(file, &poll->waitq, wait);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ff04b7f8549f..85bc1aaea4a4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -496,7 +496,7 @@ static int uio_release(struct inode *inode, struct file *filep)
return ret;
}
-static unsigned int uio_poll(struct file *filep, poll_table *wait)
+static __poll_t uio_poll(struct file *filep, poll_table *wait)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 8af797252af2..e57a2be8754a 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -196,18 +196,16 @@ static void cxacru_poll_status(struct work_struct *work);
/* Card info exported through sysfs */
#define CXACRU__ATTR_INIT(_name) \
-static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
+static DEVICE_ATTR_RO(_name)
#define CXACRU_CMD_INIT(_name) \
-static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
- cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
+static DEVICE_ATTR_RW(_name)
#define CXACRU_SET_INIT(_name) \
-static DEVICE_ATTR(_name, S_IWUSR, \
- NULL, cxacru_sysfs_store_##_name)
+static DEVICE_ATTR_WO(_name)
#define CXACRU_ATTR_INIT(_value, _type, _name) \
-static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
+static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct cxacru_data *instance = to_usbatm_driver_data(\
@@ -302,7 +300,7 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
* MAC_ADDRESS_LOW = 0x33221100
* Where 00-55 are bytes 0-5 of the MAC.
*/
-static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
+static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxacru_data *instance = to_usbatm_driver_data(
@@ -315,7 +313,7 @@ static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
instance->usbatm->atm_dev->esi);
}
-static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
+static ssize_t adsl_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
static char *str[] = { "running", "stopped" };
@@ -332,7 +330,7 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
-static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
+static ssize_t adsl_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
@@ -435,7 +433,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
-static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+static ssize_t adsl_config_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct cxacru_data *instance = to_usbatm_driver_data(
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index ab75690044bb..2754b4ce7136 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2280,7 +2280,7 @@ static struct uea_softc *dev_to_uea(struct device *dev)
return usbatm->driver_data;
}
-static ssize_t read_status(struct device *dev, struct device_attribute *attr,
+static ssize_t stat_status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
@@ -2296,7 +2296,7 @@ out:
return ret;
}
-static ssize_t reboot(struct device *dev, struct device_attribute *attr,
+static ssize_t stat_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = -ENODEV;
@@ -2313,9 +2313,9 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
+static DEVICE_ATTR_RW(stat_status);
-static ssize_t read_human_status(struct device *dev,
+static ssize_t stat_human_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
@@ -2376,9 +2376,9 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
+static DEVICE_ATTR_RO(stat_human_status);
-static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
+static ssize_t stat_delin_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
@@ -2408,11 +2408,11 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
+static DEVICE_ATTR_RO(stat_delin);
#define UEA_ATTR(name, reset) \
\
-static ssize_t read_##name(struct device *dev, \
+static ssize_t stat_##name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int ret = -ENODEV; \
@@ -2430,7 +2430,7 @@ out: \
return ret; \
} \
\
-static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL)
+static DEVICE_ATTR_RO(stat_##name)
UEA_ATTR(mflags, 1);
UEA_ATTR(vidcpe, 0);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 7b65a1040d2c..7f4d2b6af37a 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -29,7 +29,7 @@ static const struct tegra_udc_soc_info tegra20_udc_soc_info = {
};
static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
- .flags = 0,
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index dd2dd9391bb7..33ae87fa3ff3 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -835,7 +835,7 @@ static void ci_get_otg_capable(struct ci_hdrc *ci)
}
}
-static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -846,7 +846,7 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
return 0;
}
-static ssize_t ci_role_store(struct device *dev,
+static ssize_t role_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -877,7 +877,7 @@ static ssize_t ci_role_store(struct device *dev,
return (ret == 0) ? n : ret;
}
-static DEVICE_ATTR(role, 0644, ci_role_show, ci_role_store);
+static DEVICE_ATTR_RW(role);
static struct attribute *ci_attrs[] = {
&dev_attr_role.attr,
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 9e2d300060bc..6ed4b00dba96 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -29,7 +29,7 @@
/* Add for otg: interact with user space app */
static ssize_t
-get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
@@ -45,7 +45,7 @@ get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-set_a_bus_req(struct device *dev, struct device_attribute *attr,
+a_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -75,10 +75,10 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req);
+static DEVICE_ATTR_RW(a_bus_req);
static ssize_t
-get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+a_bus_drop_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
@@ -94,7 +94,7 @@ get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+a_bus_drop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -115,11 +115,10 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop,
- set_a_bus_drop);
+static DEVICE_ATTR_RW(a_bus_drop);
static ssize_t
-get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+b_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char *next;
unsigned size, t;
@@ -135,7 +134,7 @@ get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-set_b_bus_req(struct device *dev, struct device_attribute *attr,
+b_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -160,10 +159,10 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req);
+static DEVICE_ATTR_RW(b_bus_req);
static ssize_t
-set_a_clr_err(struct device *dev, struct device_attribute *attr,
+a_clr_err_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -180,7 +179,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
+static DEVICE_ATTR_WO(a_clr_err);
static struct attribute *inputs_attrs[] = {
&dev_attr_a_bus_req.attr,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8e0636c963a7..06b3b54a0e68 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -235,7 +235,7 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
/*
* attributes exported through sysfs
*/
-static ssize_t show_caps
+static ssize_t bmCapabilities_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -243,9 +243,9 @@ static ssize_t show_caps
return sprintf(buf, "%d", acm->ctrl_caps);
}
-static DEVICE_ATTR(bmCapabilities, S_IRUGO, show_caps, NULL);
+static DEVICE_ATTR_RO(bmCapabilities);
-static ssize_t show_country_codes
+static ssize_t wCountryCodes_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -255,9 +255,9 @@ static ssize_t show_country_codes
return acm->country_code_size;
}
-static DEVICE_ATTR(wCountryCodes, S_IRUGO, show_country_codes, NULL);
+static DEVICE_ATTR_RO(wCountryCodes);
-static ssize_t show_country_rel_date
+static ssize_t iCountryCodeRelDate_show
(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -266,7 +266,7 @@ static ssize_t show_country_rel_date
return sprintf(buf, "%d", acm->country_rel_date);
}
-static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL);
+static DEVICE_ATTR_RO(iCountryCodeRelDate);
/*
* Interrupt handlers for various ACM device responses
*/
@@ -425,7 +425,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
- if (res != -EPERM) {
+ if (res != -EPERM && res != -ENODEV) {
dev_err(&acm->data->dev,
"urb %d failed submission with %d\n",
index, res);
@@ -1752,6 +1752,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
+ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+ .driver_info = SINGLE_RX_URB,
+ },
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6c181a625daf..9627ea6ec3ae 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -595,11 +595,11 @@ static int wdm_flush(struct file *file, fl_owner_t id)
return usb_translate_errors(desc->werr);
}
-static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
{
struct wdm_device *desc = file->private_data;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index c454885ef4a0..425247b7f728 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -469,9 +469,9 @@ static int usblp_release(struct inode *inode, struct file *file)
}
/* No kernel lock - fine */
-static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
- int ret;
+ __poll_t ret;
unsigned long flags;
struct usblp *usblp = file->private_data;
@@ -1066,7 +1066,7 @@ static struct usb_class_driver usblp_class = {
.minor_base = USBLP_MINOR_BASE,
};
-static ssize_t usblp_show_ieee1284_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t ieee1284_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usblp *usblp = usb_get_intfdata(intf);
@@ -1078,7 +1078,7 @@ static ssize_t usblp_show_ieee1284_id(struct device *dev, struct device_attribut
return sprintf(buf, "%s", usblp->device_id_string+2);
}
-static DEVICE_ATTR(ieee1284_id, S_IRUGO, usblp_show_ieee1284_id, NULL);
+static DEVICE_ATTR_RO(ieee1284_id);
static int usblp_probe(struct usb_interface *intf,
const struct usb_device_id *id)
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 0b8b0f3bdd2f..7ea67a55be10 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1257,10 +1257,10 @@ static int usbtmc_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &data->fasync);
}
-static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
struct usbtmc_device_data *data = file->private_data;
- unsigned int mask;
+ __poll_t mask;
mutex_lock(&data->io_mutex);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index c2cf62b7043a..e2cec448779e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -622,7 +622,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
}
/* Kernel lock for "lastev" protection */
-static unsigned int usb_device_poll(struct file *file,
+static __poll_t usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int event_count;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a3fad4ec9870..bf00166cbee0 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -595,7 +595,7 @@ static void async_completed(struct urb *urb)
as->status = urb->status;
signr = as->signr;
if (signr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = as->signr;
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
@@ -1681,8 +1681,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u |= URB_ISO_ASAP;
if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
u |= URB_SHORT_NOT_OK;
- if (uurb->flags & USBDEVFS_URB_NO_FSBR)
- u |= URB_NO_FSBR;
if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
@@ -2572,11 +2570,11 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
#endif
/* No kernel lock - fine */
-static unsigned int usbdev_poll(struct file *file,
+static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct usb_dev_state *ps = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
@@ -2613,7 +2611,7 @@ static void usbdev_remove(struct usb_device *udev)
wake_up_all(&ps->wait);
list_del_init(&ps->list);
if (ps->discsignr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = ps->discsignr;
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 64262a9a8829..9792cedfc351 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -342,8 +342,8 @@ static int usb_probe_interface(struct device *dev)
if (driver->disable_hub_initiated_lpm) {
lpm_disable_error = usb_unlocked_disable_lpm(udev);
if (lpm_disable_error) {
- dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
+ dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n",
+ __func__, driver->name);
error = lpm_disable_error;
goto err;
}
@@ -537,8 +537,8 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (driver->disable_hub_initiated_lpm) {
lpm_disable_error = usb_unlocked_disable_lpm(udev);
if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
+ dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
+ __func__, driver->name);
return -ENOMEM;
}
}
@@ -1070,7 +1070,7 @@ static void usb_rebind_intf(struct usb_interface *intf)
if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
- if (rc < 0)
+ if (rc < 0 && rc != -EPROBE_DEFER)
dev_warn(&intf->dev, "rebind failed: %d\n", rc);
}
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index cf7bbcb9a63c..c5c1f6cf3228 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -38,6 +38,9 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
+#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -1049,12 +1052,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_NOIO);
if (ret < 0) {
- dev_err(hub->intfdev, "Host not "
- "accepting hub info "
- "update.\n");
- dev_err(hub->intfdev, "LS/FS devices "
- "and hubs may not work "
- "under this hub\n.");
+ dev_err(hub->intfdev,
+ "Host not accepting hub info update\n");
+ dev_err(hub->intfdev,
+ "LS/FS devices and hubs may not work under this hub\n");
}
}
hub_power_on(hub, true);
@@ -1352,6 +1353,20 @@ static int hub_configure(struct usb_hub *hub,
goto fail;
}
+ /*
+ * Accumulate wHubDelay + 40ns for every hub in the tree of devices.
+ * The resulting value will be used for SetIsochDelay() request.
+ */
+ if (hub_is_superspeed(hdev) || hub_is_superspeedplus(hdev)) {
+ u32 delay = __le16_to_cpu(hub->descriptor->u.ss.wHubDelay);
+
+ if (hdev->parent)
+ delay += hdev->parent->hub_delay;
+
+ delay += USB_TP_TRANSMISSION_DELAY;
+ hdev->hub_delay = min_t(u32, delay, USB_TP_TRANSMISSION_DELAY_MAX);
+ }
+
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
(maxchild == 1) ? "" : "s");
@@ -3157,7 +3172,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
usb_set_usb2_hardware_lpm(udev, 0);
if (usb_disable_ltm(udev)) {
- dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
status = -ENOMEM;
if (PMSG_IS_AUTO(msg))
goto err_ltm;
@@ -4599,7 +4614,20 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (retval >= 0)
retval = -EMSGSIZE;
} else {
+ u32 delay;
+
retval = 0;
+
+ delay = udev->parent->hub_delay;
+ udev->hub_delay = min_t(u32, delay,
+ USB_TP_TRANSMISSION_DELAY_MAX);
+ retval = usb_set_isoch_delay(udev);
+ if (retval) {
+ dev_dbg(&udev->dev,
+ "Failed set isoch delay, error %d\n",
+ retval);
+ retval = 0;
+ }
break;
}
}
@@ -5484,13 +5512,12 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
*/
ret = usb_unlocked_disable_lpm(udev);
if (ret) {
- dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
+ dev_err(&udev->dev, "%s Failed to disable LPM\n", __func__);
goto re_enumerate_no_bos;
}
ret = usb_disable_ltm(udev);
if (ret) {
- dev_err(&udev->dev, "%s Failed to disable LTM\n.",
- __func__);
+ dev_err(&udev->dev, "%s Failed to disable LTM\n", __func__);
goto re_enumerate_no_bos;
}
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 9dbb429cd471..d775ffea20c3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -137,11 +137,17 @@ static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data,
if (!led_np)
return false;
- /* Get node of port being added */
- port_np = usb_of_get_child_node(usb_dev->dev.of_node, port1);
+ /*
+ * Get node of port being added
+ *
+ * FIXME: This is really the device node of the connected device
+ */
+ port_np = usb_of_get_device_node(usb_dev, port1);
if (!port_np)
return false;
+ of_node_put(port_np);
+
/* Amount of trigger sources for this LED */
count = of_count_phandle_with_args(led_np, "trigger-sources",
"#trigger-source-cells");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 77001bcfc504..c64cf6c4a83d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -18,6 +18,7 @@
#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
+#include <linux/usb/of.h>
#include <asm/byteorder.h>
#include "usb.h"
@@ -776,7 +777,7 @@ static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
* deal with strings at all. Set string_langid to -1 in order to
* prevent any string to be retrieved from the device */
if (err < 0) {
- dev_err(&dev->dev, "string descriptor 0 read error: %d\n",
+ dev_info(&dev->dev, "string descriptor 0 read error: %d\n",
err);
dev->string_langid = -1;
return -EPIPE;
@@ -915,6 +916,30 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
return ret;
}
+/*
+ * usb_set_isoch_delay - informs the device of the packet transmit delay
+ * @dev: the device whose delay is to be informed
+ * Context: !in_interrupt()
+ *
+ * Since this is an optional request, we don't bother if it fails.
+ */
+int usb_set_isoch_delay(struct usb_device *dev)
+{
+ /* skip hub devices */
+ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return 0;
+
+ /* skip non-SS/non-SSP devices */
+ if (dev->speed < USB_SPEED_SUPER)
+ return 0;
+
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ USB_REQ_SET_ISOCH_DELAY,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ cpu_to_le16(dev->hub_delay), 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
+
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
@@ -1355,7 +1380,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
* so that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
- dev_err(&iface->dev, "%s Failed to disable LPM\n.", __func__);
+ dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
@@ -1499,7 +1524,7 @@ int usb_reset_configuration(struct usb_device *dev)
* that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
- dev_err(&dev->dev, "%s Failed to disable LPM\n.", __func__);
+ dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
@@ -1583,6 +1608,7 @@ static void usb_release_interface(struct device *dev)
kref_put(&intfc->ref, usb_release_interface_cache);
usb_put_dev(interface_to_usbdev(intf));
+ of_node_put(dev->of_node);
kfree(intf);
}
@@ -1846,7 +1872,7 @@ free_interfaces:
* timeouts.
*/
if (dev->actconfig && usb_disable_lpm(dev)) {
- dev_err(&dev->dev, "%s Failed to disable LPM\n.", __func__);
+ dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
ret = -ENOMEM;
goto free_interfaces;
@@ -1868,6 +1894,7 @@ free_interfaces:
struct usb_interface_cache *intfc;
struct usb_interface *intf;
struct usb_host_interface *alt;
+ u8 ifnum;
cp->interface[i] = intf = new_interfaces[i];
intfc = cp->intf_cache[i];
@@ -1886,11 +1913,17 @@ free_interfaces:
if (!alt)
alt = &intf->altsetting[0];
- intf->intf_assoc =
- find_iad(dev, cp, alt->desc.bInterfaceNumber);
+ ifnum = alt->desc.bInterfaceNumber;
+ intf->intf_assoc = find_iad(dev, cp, ifnum);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
+ if (usb_of_has_combined_node(dev)) {
+ device_set_of_node_from_dev(&intf->dev, &dev->dev);
+ } else {
+ intf->dev.of_node = usb_of_get_interface_node(dev,
+ configuration, ifnum);
+ }
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
@@ -1905,9 +1938,8 @@ free_interfaces:
intf->minor = -1;
device_initialize(&intf->dev);
pm_runtime_no_callbacks(&intf->dev);
- dev_set_name(&intf->dev, "%d-%s:%d.%d",
- dev->bus->busnum, dev->devpath,
- configuration, alt->desc.bInterfaceNumber);
+ dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum,
+ dev->devpath, configuration, ifnum);
usb_get_dev(dev);
}
kfree(new_interfaces);
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 2be968353257..fd77442c2d12 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -3,7 +3,8 @@
* of.c The helpers for hcd device tree support
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Author: Peter Chen <peter.chen@freescale.com>
+ * Author: Peter Chen <peter.chen@freescale.com>
+ * Copyright (C) 2017 Johan Hovold <johan@kernel.org>
*/
#include <linux/of.h>
@@ -11,31 +12,99 @@
#include <linux/usb/of.h>
/**
- * usb_of_get_child_node - Find the device node match port number
- * @parent: the parent device node
- * @portnum: the port number which device is connecting
+ * usb_of_get_device_node() - get a USB device node
+ * @hub: hub to which device is connected
+ * @port1: one-based index of port
*
- * Find the node from device tree according to its port number.
+ * Look up the node of a USB device given its parent hub device and one-based
+ * port number.
*
* Return: A pointer to the node with incremented refcount if found, or
* %NULL otherwise.
*/
-struct device_node *usb_of_get_child_node(struct device_node *parent,
- int portnum)
+struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1)
{
struct device_node *node;
- u32 port;
+ u32 reg;
- for_each_child_of_node(parent, node) {
- if (!of_property_read_u32(node, "reg", &port)) {
- if (port == portnum)
- return node;
+ for_each_child_of_node(hub->dev.of_node, node) {
+ if (of_property_read_u32(node, "reg", &reg))
+ continue;
+
+ if (reg == port1)
+ return node;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_device_node);
+
+/**
+ * usb_of_has_combined_node() - determine whether a device has a combined node
+ * @udev: USB device
+ *
+ * Determine whether a USB device has a so called combined node which is
+ * shared with its sole interface. This is the case if and only if the device
+ * has a node and its decriptors report the following:
+ *
+ * 1) bDeviceClass is 0 or 9, and
+ * 2) bNumConfigurations is 1, and
+ * 3) bNumInterfaces is 1.
+ *
+ * Return: True iff the device has a device node and its descriptors match the
+ * criteria for a combined node.
+ */
+bool usb_of_has_combined_node(struct usb_device *udev)
+{
+ struct usb_device_descriptor *ddesc = &udev->descriptor;
+ struct usb_config_descriptor *cdesc;
+
+ if (!udev->dev.of_node)
+ return false;
+
+ switch (ddesc->bDeviceClass) {
+ case USB_CLASS_PER_INTERFACE:
+ case USB_CLASS_HUB:
+ if (ddesc->bNumConfigurations == 1) {
+ cdesc = &udev->config->desc;
+ if (cdesc->bNumInterfaces == 1)
+ return true;
}
}
+ return false;
+}
+EXPORT_SYMBOL_GPL(usb_of_has_combined_node);
+
+/**
+ * usb_of_get_interface_node() - get a USB interface node
+ * @udev: USB device of interface
+ * @config: configuration value
+ * @ifnum: interface number
+ *
+ * Look up the node of a USB interface given its USB device, configuration
+ * value and interface number.
+ *
+ * Return: A pointer to the node with incremented refcount if found, or
+ * %NULL otherwise.
+ */
+struct device_node *
+usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
+{
+ struct device_node *node;
+ u32 reg[2];
+
+ for_each_child_of_node(udev->dev.of_node, node) {
+ if (of_property_read_u32_array(node, "reg", reg, 2))
+ continue;
+
+ if (reg[0] == ifnum && reg[1] == config)
+ return node;
+ }
+
return NULL;
}
-EXPORT_SYMBOL_GPL(usb_of_get_child_node);
+EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
/**
* usb_of_get_companion_dev - Find the companion device
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9fdf137c4865..796c9b149728 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -479,9 +479,6 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (is_out)
allowed |= URB_ZERO_PACKET;
/* FALLTHROUGH */
- case USB_ENDPOINT_XFER_CONTROL:
- allowed |= URB_NO_FSBR; /* only affects UHCI */
- /* FALLTHROUGH */
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 845286f08ab0..2f5fbc56a9dd 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -645,8 +645,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
raw_port = usb_hcd_find_raw_port_number(usb_hcd,
port1);
}
- dev->dev.of_node = usb_of_get_child_node(parent->dev.of_node,
- raw_port);
+ dev->dev.of_node = usb_of_get_device_node(parent, raw_port);
/* hub driver sets up TT records */
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 2bee08d084ae..149cc7480971 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -40,6 +40,7 @@ extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
+extern int usb_set_isoch_delay(struct usb_device *dev);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
extern char *usb_cache_string(struct usb_device *udev, int index);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 31749c79045f..cd77af3b1565 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -929,6 +929,7 @@ struct dwc2_hsotg {
int irq;
struct clk *clk;
struct reset_control *reset;
+ struct reset_control *reset_ecc;
unsigned int queuing_high_bandwidth:1;
unsigned int srp_success:1;
@@ -971,6 +972,7 @@ struct dwc2_hsotg {
} flags;
struct list_head non_periodic_sched_inactive;
+ struct list_head non_periodic_sched_waiting;
struct list_head non_periodic_sched_active;
struct list_head *non_periodic_qh_ptr;
struct list_head periodic_sched_inactive;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 7b6eb0ad513b..a5d72fcd1603 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -659,6 +659,10 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
qh_list_entry)
dev_dbg(hsotg->dev, " %p\n", qh);
+ dev_dbg(hsotg->dev, " NP waiting sched:\n");
+ list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
+ qh_list_entry)
+ dev_dbg(hsotg->dev, " %p\n", qh);
dev_dbg(hsotg->dev, " NP active sched:\n");
list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
qh_list_entry)
@@ -1818,6 +1822,7 @@ static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
{
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
@@ -4998,6 +5003,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
/* Free memory for QH/QTD lists */
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
+ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
@@ -5159,6 +5165,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
/* Initialize the periodic schedule */
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 78e9e01051b5..ad60e46e66e1 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -314,12 +314,16 @@ struct dwc2_hs_transfer_time {
* descriptor and indicates original XferSize value for the
* descriptor
* @unreserve_timer: Timer for releasing periodic reservation.
+ * @wait_timer: Timer used to wait before re-queuing.
* @dwc2_tt: Pointer to our tt info (or NULL if no tt).
* @ttport: Port number within our tt.
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
* @unreserve_pending: True if we planned to unreserve but haven't yet.
* @schedule_low_speed: True if we have a low/full speed component (either the
* host is in low/full speed mode or do_split).
+ * @want_wait: We should wait before re-queuing; only matters for non-
+ * periodic transfers and is ignored for periodic ones.
+ * @wait_timer_cancel: Set to true to cancel the wait_timer.
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
* maintains a list of transfers (QTDs) for that endpoint. A QH structure may
@@ -354,11 +358,14 @@ struct dwc2_qh {
u32 desc_list_sz;
u32 *n_bytes;
struct timer_list unreserve_timer;
+ struct timer_list wait_timer;
struct dwc2_tt *dwc_tt;
int ttport;
unsigned tt_buffer_dirty:1;
unsigned unreserve_pending:1;
unsigned schedule_low_speed:1;
+ unsigned want_wait:1;
+ unsigned wait_timer_cancel:1;
};
/**
@@ -389,6 +396,7 @@ struct dwc2_qh {
* @n_desc: Number of DMA descriptors for this QTD
* @isoc_frame_index_last: Last activated frame (packet) index, used in
* descriptor DMA mode only
+ * @num_naks: Number of NAKs received on this QTD.
* @urb: URB for this transfer
* @qh: Queue head for this QTD
* @qtd_list_entry: For linking to the QH's list of QTDs
@@ -419,6 +427,7 @@ struct dwc2_qtd {
u8 error_count;
u8 n_desc;
u16 isoc_frame_index_last;
+ u16 num_naks;
struct dwc2_hcd_urb *urb;
struct dwc2_qh *qh;
struct list_head qtd_list_entry;
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 916d991b96b8..a5dfd9d8bd9a 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -53,6 +53,12 @@
#include "core.h"
#include "hcd.h"
+/*
+ * If we get this many NAKs on a split transaction we'll slow down
+ * retransmission. A 1 here means delay after the first NAK.
+ */
+#define DWC2_NAKS_BEFORE_DELAY 3
+
/* This function is for debug only */
static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
{
@@ -1201,11 +1207,25 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
/*
* Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
* interrupt. Re-start the SSPLIT transfer.
+ *
+ * Normally for non-periodic transfers we'll retry right away, but to
+ * avoid interrupt storms we'll wait before retrying if we've got
+ * several NAKs. If we didn't do this we'd retry directly from the
+ * interrupt handler and could end up quickly getting another
+ * interrupt (another NAK), which we'd retry.
+ *
+ * Note that in DMA mode software only gets involved to re-send NAKed
+ * transfers for split transactions, so we only need to apply this
+ * delaying logic when handling splits. In non-DMA mode presumably we
+ * might want a similar delay if someone can demonstrate this problem
+ * affects that code path too.
*/
if (chan->do_split) {
if (chan->complete_split)
qtd->error_count = 0;
qtd->complete_split = 0;
+ qtd->num_naks++;
+ qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
goto handle_nak_done;
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index fcd1676c7f0b..e34ad5e65350 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -58,6 +58,9 @@
/* Wait this long before releasing periodic reservation */
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
+/* If we get a NAK, wait this long before retrying */
+#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
+
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
* periodic transfer
@@ -1441,6 +1444,55 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
}
/**
+ * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
+ *
+ * As per the spec, a NAK indicates that "a function is temporarily unable to
+ * transmit or receive data, but will eventually be able to do so without need
+ * of host intervention".
+ *
+ * That means that when we encounter a NAK we're supposed to retry.
+ *
+ * ...but if we retry right away (from the interrupt handler that saw the NAK)
+ * then we can end up with an interrupt storm (if the other side keeps NAKing
+ * us) because on slow enough CPUs it could take us longer to get out of the
+ * interrupt routine than it takes for the device to send another NAK. That
+ * leads to a constant stream of NAK interrupts and the CPU locks.
+ *
+ * ...so instead of retrying right away in the case of a NAK we'll set a timer
+ * to retry some time later. This function handles that timer and moves the
+ * qh back to the "inactive" list, then queues transactions.
+ *
+ * @t: Pointer to wait_timer in a qh.
+ */
+static void dwc2_wait_timer_fn(struct timer_list *t)
+{
+ struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ /*
+ * We'll set wait_timer_cancel to true if we want to cancel this
+ * operation in dwc2_hcd_qh_unlink().
+ */
+ if (!qh->wait_timer_cancel) {
+ enum dwc2_transaction_type tr_type;
+
+ qh->want_wait = false;
+
+ list_move(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+
+ tr_type = dwc2_hcd_select_transactions(hsotg);
+ if (tr_type != DWC2_TRANSACTION_NONE)
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+/**
* dwc2_qh_init() - Initializes a QH structure
*
* @hsotg: The HCD state structure for the DWC OTG controller
@@ -1468,6 +1520,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
+ timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
@@ -1628,6 +1681,16 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dwc2_do_unreserve(hsotg, qh);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
+
+ /*
+ * We don't have the lock so we can safely wait until the wait timer
+ * finishes. Of course, at this point in time we'd better have set
+ * wait_timer_active to false so if this timer was still pending it
+ * won't do anything anyway, but we want it to finish before we free
+ * memory.
+ */
+ del_timer_sync(&qh->wait_timer);
+
dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
if (qh->desc_list)
@@ -1663,9 +1726,16 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
qh->start_active_frame = hsotg->frame_number;
qh->next_active_frame = qh->start_active_frame;
- /* Always start in inactive schedule */
- list_add_tail(&qh->qh_list_entry,
- &hsotg->non_periodic_sched_inactive);
+ if (qh->want_wait) {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_waiting);
+ qh->wait_timer_cancel = false;
+ mod_timer(&qh->wait_timer,
+ jiffies + DWC2_RETRY_WAIT_DELAY + 1);
+ } else {
+ list_add_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_inactive);
+ }
return 0;
}
@@ -1695,6 +1765,9 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dev_vdbg(hsotg->dev, "%s()\n", __func__);
+ /* If the wait_timer is pending, this will stop it from acting */
+ qh->wait_timer_cancel = true;
+
if (list_empty(&qh->qh_list_entry))
/* QH is not in a schedule */
return;
@@ -1903,7 +1976,7 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
if (dwc2_qh_is_non_per(qh)) {
dwc2_hcd_qh_unlink(hsotg, qh);
if (!list_empty(&qh->qtd_list))
- /* Add back to inactive non-periodic schedule */
+ /* Add back to inactive/waiting non-periodic schedule */
dwc2_hcd_qh_add(hsotg, qh);
return;
}
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 3e26550d13dd..4703478f702f 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -221,6 +221,15 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
reset_control_deassert(hsotg->reset);
+ hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
+ if (IS_ERR(hsotg->reset_ecc)) {
+ ret = PTR_ERR(hsotg->reset_ecc);
+ dev_err(hsotg->dev, "error getting reset control for ecc %d\n", ret);
+ return ret;
+ }
+
+ reset_control_deassert(hsotg->reset_ecc);
+
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
@@ -319,6 +328,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
dwc2_lowlevel_hw_disable(hsotg);
reset_control_assert(hsotg->reset);
+ reset_control_assert(hsotg->reset_ecc);
return 0;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 07832509584f..ade2ab00d37a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -147,6 +147,7 @@ static void __dwc3_set_mode(struct work_struct *work)
otg_set_vbus(dwc->usb2_phy->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
+ phy_calibrate(dwc->usb2_generic_phy);
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -945,6 +946,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dev_err(dev, "failed to initialize host\n");
return ret;
}
+ phy_calibrate(dwc->usb2_generic_phy);
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
@@ -1062,6 +1064,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
+ dwc->dis_metastability_quirk = device_property_read_bool(dev,
+ "snps,dis_metastability_quirk");
+
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4a4a4c98508c..03c7aaaac926 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -796,7 +796,6 @@ struct dwc3_scratchpad_array {
* @usb2_generic_phy: pointer to USB2 PHY
* @usb3_generic_phy: pointer to USB3 PHY
* @ulpi: pointer to ulpi interface
- * @isoch_delay: wValue from Set Isochronous Delay request;
* @u2sel: parameter from Set SEL request.
* @u2pel: parameter from Set SEL request.
* @u1sel: parameter from Set SEL request.
@@ -857,6 +856,7 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @dis_metastability_quirk: set to disable metastability quirk.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
*/
@@ -955,7 +955,6 @@ struct dwc3 {
enum dwc3_ep0_state ep0state;
enum dwc3_link_state link_state;
- u16 isoch_delay;
u16 u2sel;
u16 u2pel;
u8 u1sel;
@@ -1010,6 +1009,8 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+ unsigned dis_metastability_quirk:1;
+
u16 imod_interval;
};
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 368f8e59219a..bfb90c52d8fc 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -247,6 +247,15 @@ static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
case USB_DEVICE_TEST_MODE:
s = "Test Mode";
break;
+ case USB_DEVICE_U1_ENABLE:
+ s = "U1 Enable";
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ s = "U2 Enable";
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ s = "LTM Enable";
+ break;
default:
s = "UNKNOWN";
} s; }),
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fd3e7ad2eb0e..9c2e4a17918e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -736,11 +736,7 @@ static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ct
if (wIndex || wLength)
return -EINVAL;
- /*
- * REVISIT It's unclear from Databook what to do with this
- * value. For now, just cache it.
- */
- dwc->isoch_delay = wValue;
+ dwc->gadget.isoch_delay = wValue;
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 639dd1b163a0..616ef49ccb49 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2005,7 +2005,8 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
- if (dwc->revision < DWC3_REVISION_220A) {
+ if (dwc->revision < DWC3_REVISION_220A &&
+ !dwc->dis_metastability_quirk) {
reg |= DWC3_DCFG_SUPERSPEED;
} else {
switch (speed) {
@@ -3224,7 +3225,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* is less than super speed because we don't have means, yet, to tell
* composite.c that we are USB 2.0 + LPM ECN.
*/
- if (dwc->revision < DWC3_REVISION_220A)
+ if (dwc->revision < DWC3_REVISION_220A &&
+ !dwc->dis_metastability_quirk)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index a9dd5c64e6c7..babaee981aa7 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -37,12 +37,12 @@ DECLARE_EVENT_CLASS(dwc3_log_io,
);
DEFINE_EVENT(dwc3_log_io, dwc3_readl,
- TP_PROTO(void *base, u32 offset, u32 value),
+ TP_PROTO(void __iomem *base, u32 offset, u32 value),
TP_ARGS(base, offset, value)
);
DEFINE_EVENT(dwc3_log_io, dwc3_writel,
- TP_PROTO(void *base, u32 offset, u32 value),
+ TP_PROTO(void __iomem *base, u32 offset, u32 value),
TP_ARGS(base, offset, value)
);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 8a700b45b9a9..e15e896f356c 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -328,7 +328,7 @@ static void xdbc_mem_init(void)
ep_in = (struct xdbc_ep_context *)&ctx->in;
ep_in->ep_info1 = 0;
- ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
+ ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
/* Set DbC context and info registers: */
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index b6cf5ab5a0a1..67564725e371 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -266,6 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
}
static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ __releases(&ffs->ev.waitq.lock)
{
struct usb_request *req = ffs->ep0req;
int ret;
@@ -458,6 +459,7 @@ done_spin:
/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
size_t n)
+ __releases(&ffs->ev.waitq.lock)
{
/*
* n cannot be bigger than ffs->ev.count, which cannot be bigger than
@@ -543,6 +545,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
break;
}
+ /* unlocks spinlock */
return __ffs_ep0_read_events(ffs, buf,
min(n, (size_t)ffs->ev.count));
@@ -638,10 +641,10 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
-static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
{
struct ffs_data *ffs = file->private_data;
- unsigned int mask = POLLWRNORM;
+ __poll_t mask = POLLWRNORM;
int ret;
poll_wait(file, &ffs->ev.waitq, wait);
@@ -1246,7 +1249,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
desc = epfile->ep->descs[desc_idx];
spin_unlock_irq(&epfile->ffs->eps_lock);
- ret = copy_to_user((void *)value, desc, desc->bLength);
+ ret = copy_to_user((void __user *)value, desc, desc->bLength);
if (ret)
ret = -EFAULT;
return ret;
@@ -2324,7 +2327,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
length, pnl, type);
return -EINVAL;
}
- pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
+ pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
if (length != 14 + pnl + pdl) {
pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
length, pnl, pdl, type);
@@ -2878,7 +2881,7 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
- ext_prop->data_len = le32_to_cpu(*(u32 *)
+ ext_prop->data_len = le32_to_cpu(*(__le32 *)
usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
length = ext_prop->name_len + ext_prop->data_len + 14;
@@ -3700,7 +3703,8 @@ static void ffs_closed(struct ffs_data *ffs)
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
- unregister_gadget_item(ci);
+ if (test_bit(FFS_FL_BOUND, &ffs->flags))
+ unregister_gadget_item(ci);
return;
done:
ffs_dev_unlock();
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index daae35318a3a..a73efb1c47d0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -413,10 +413,10 @@ release_write_pending_unlocked:
return status;
}
-static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index c5bce8e22983..5780fba620ab 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
- struct tasklet_struct tx_tasklet;
struct hrtimer task_timer;
-
bool timer_stopping;
};
@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
@@ -1148,17 +1146,15 @@ err:
}
/*
- * This transmits the NTB if there are frames waiting.
+ * The transmit should only be run if no skb data has been sent
+ * for a certain duration.
*/
-static void ncm_tx_tasklet(unsigned long data)
+static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
- struct f_ncm *ncm = (void *)data;
-
- if (ncm->timer_stopping)
- return;
+ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
/* Only send if data is available. */
- if (ncm->skb_tx_data) {
+ if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
-}
-
-/*
- * The transmit should only be run if no skb data has been sent
- * for a certain duration.
- */
-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
-{
- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
- tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART;
}
@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
- tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index dd607b99eb1d..453578c4af69 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -680,12 +680,12 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
return 0;
}
-static unsigned int
+static __poll_t
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
- int status = 0;
+ __poll_t status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 4d653d2960d4..29436f75bbe0 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/kthread.h>
+#include <linux/kfifo.h>
#include "u_serial.h"
@@ -80,19 +81,11 @@
#define WRITE_BUF_SIZE 8192 /* TX only */
#define GS_CONSOLE_BUF_SIZE 8192
-/* circular buffer */
-struct gs_buf {
- unsigned buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
/* console info */
struct gscons_info {
struct gs_port *port;
struct task_struct *console_thread;
- struct gs_buf con_buf;
+ struct kfifo con_buf;
/* protect the buf and busy flag */
spinlock_t con_lock;
int req_busy;
@@ -122,7 +115,7 @@ struct gs_port {
struct list_head write_pool;
int write_started;
int write_allocated;
- struct gs_buf port_write_buf;
+ struct kfifo port_write_buf;
wait_queue_head_t drain_wait; /* wait while writes drain */
bool write_busy;
wait_queue_head_t close_wait;
@@ -154,144 +147,6 @@ static struct portmaster {
/*-------------------------------------------------------------------------*/
-/* Circular Buffer */
-
-/*
- * gs_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
-{
- gb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (gb->buf_buf == NULL)
- return -ENOMEM;
-
- gb->buf_size = size;
- gb->buf_put = gb->buf_buf;
- gb->buf_get = gb->buf_buf;
-
- return 0;
-}
-
-/*
- * gs_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void gs_buf_free(struct gs_buf *gb)
-{
- kfree(gb->buf_buf);
- gb->buf_buf = NULL;
-}
-
-/*
- * gs_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void gs_buf_clear(struct gs_buf *gb)
-{
- gb->buf_get = gb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-/*
- * gs_buf_data_avail
- *
- * Return the number of bytes of data written into the circular
- * buffer.
- */
-static unsigned gs_buf_data_avail(struct gs_buf *gb)
-{
- return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
-}
-
-/*
- * gs_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-static unsigned gs_buf_space_avail(struct gs_buf *gb)
-{
- return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
-}
-
-/*
- * gs_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static unsigned
-gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
-{
- unsigned len;
-
- len = gs_buf_space_avail(gb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = gb->buf_buf + gb->buf_size - gb->buf_put;
- if (count > len) {
- memcpy(gb->buf_put, buf, len);
- memcpy(gb->buf_buf, buf+len, count - len);
- gb->buf_put = gb->buf_buf + count - len;
- } else {
- memcpy(gb->buf_put, buf, count);
- if (count < len)
- gb->buf_put += count;
- else /* count == len */
- gb->buf_put = gb->buf_buf;
- }
-
- return count;
-}
-
-/*
- * gs_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static unsigned
-gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
-{
- unsigned len;
-
- len = gs_buf_data_avail(gb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = gb->buf_buf + gb->buf_size - gb->buf_get;
- if (count > len) {
- memcpy(buf, gb->buf_get, len);
- memcpy(buf+len, gb->buf_buf, count - len);
- gb->buf_get = gb->buf_buf + count - len;
- } else {
- memcpy(buf, gb->buf_get, count);
- if (count < len)
- gb->buf_get += count;
- else /* count == len */
- gb->buf_get = gb->buf_buf;
- }
-
- return count;
-}
-
-/*-------------------------------------------------------------------------*/
-
/* I/O glue between TTY (upper) and USB function (lower) driver layers */
/*
@@ -346,11 +201,11 @@ gs_send_packet(struct gs_port *port, char *packet, unsigned size)
{
unsigned len;
- len = gs_buf_data_avail(&port->port_write_buf);
+ len = kfifo_len(&port->port_write_buf);
if (len < size)
size = len;
if (size != 0)
- size = gs_buf_get(&port->port_write_buf, packet, size);
+ size = kfifo_out(&port->port_write_buf, packet, size);
return size;
}
@@ -398,7 +253,7 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
- req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
+ req->zero = kfifo_is_empty(&port->port_write_buf);
pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
port->port_num, len, *((u8 *)req->buf),
@@ -787,10 +642,11 @@ static int gs_open(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
- if (port->port_write_buf.buf_buf == NULL) {
+ if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
- status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
+ status = kfifo_alloc(&port->port_write_buf,
+ WRITE_BUF_SIZE, GFP_KERNEL);
spin_lock_irq(&port->port_lock);
if (status) {
@@ -839,7 +695,7 @@ static int gs_writes_finished(struct gs_port *p)
/* return true on disconnect or empty buffer */
spin_lock_irq(&p->port_lock);
- cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
+ cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
spin_unlock_irq(&p->port_lock);
return cond;
@@ -875,7 +731,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
/* wait for circular write buffer to drain, disconnect, or at
* most GS_CLOSE_TIMEOUT seconds; then discard the rest
*/
- if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
+ if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
gs_writes_finished(port),
@@ -889,9 +745,9 @@ static void gs_close(struct tty_struct *tty, struct file *file)
* let the push tasklet fire again until we're re-opened.
*/
if (gser == NULL)
- gs_buf_free(&port->port_write_buf);
+ kfifo_free(&port->port_write_buf);
else
- gs_buf_clear(&port->port_write_buf);
+ kfifo_reset(&port->port_write_buf);
port->port.tty = NULL;
@@ -915,7 +771,7 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
spin_lock_irqsave(&port->port_lock, flags);
if (count)
- count = gs_buf_put(&port->port_write_buf, buf, count);
+ count = kfifo_in(&port->port_write_buf, buf, count);
/* treat count == 0 as flush_chars() */
if (port->port_usb)
gs_start_tx(port);
@@ -934,7 +790,7 @@ static int gs_put_char(struct tty_struct *tty, unsigned char ch)
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
- status = gs_buf_put(&port->port_write_buf, &ch, 1);
+ status = kfifo_put(&port->port_write_buf, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -961,7 +817,7 @@ static int gs_write_room(struct tty_struct *tty)
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
- room = gs_buf_space_avail(&port->port_write_buf);
+ room = kfifo_avail(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
@@ -977,7 +833,7 @@ static int gs_chars_in_buffer(struct tty_struct *tty)
int chars = 0;
spin_lock_irqsave(&port->port_lock, flags);
- chars = gs_buf_data_avail(&port->port_write_buf);
+ chars = kfifo_len(&port->port_write_buf);
spin_unlock_irqrestore(&port->port_lock, flags);
pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
@@ -1148,7 +1004,7 @@ static int gs_console_thread(void *data)
ep = port->port_usb->in;
spin_lock_irq(&info->con_lock);
- count = gs_buf_data_avail(&info->con_buf);
+ count = kfifo_len(&info->con_buf);
size = ep->maxpacket;
if (count > 0 && !info->req_busy) {
@@ -1156,7 +1012,7 @@ static int gs_console_thread(void *data)
if (count < size)
size = count;
- xfer = gs_buf_get(&info->con_buf, req->buf, size);
+ xfer = kfifo_out(&info->con_buf, req->buf, size);
req->length = xfer;
spin_unlock(&info->con_lock);
@@ -1192,7 +1048,7 @@ static int gs_console_setup(struct console *co, char *options)
info->req_busy = 0;
spin_lock_init(&info->con_lock);
- status = gs_buf_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE);
+ status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
if (status) {
pr_err("%s: allocate console buffer failed\n", __func__);
return status;
@@ -1202,7 +1058,7 @@ static int gs_console_setup(struct console *co, char *options)
co, "gs_console");
if (IS_ERR(info->console_thread)) {
pr_err("%s: cannot create console thread\n", __func__);
- gs_buf_free(&info->con_buf);
+ kfifo_free(&info->con_buf);
return PTR_ERR(info->console_thread);
}
wake_up_process(info->console_thread);
@@ -1217,7 +1073,7 @@ static void gs_console_write(struct console *co,
unsigned long flags;
spin_lock_irqsave(&info->con_lock, flags);
- gs_buf_put(&info->con_buf, buf, count);
+ kfifo_in(&info->con_buf, buf, count);
spin_unlock_irqrestore(&info->con_lock, flags);
wake_up_process(info->console_thread);
@@ -1256,7 +1112,7 @@ static void gserial_console_exit(void)
unregister_console(&gserial_cons);
if (!IS_ERR_OR_NULL(info->console_thread))
kthread_stop(info->console_thread);
- gs_buf_free(&info->con_buf);
+ kfifo_free(&info->con_buf);
}
#else
@@ -1529,7 +1385,7 @@ void gserial_disconnect(struct gserial *gser)
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
if (port->port.count == 0 && !port->openclose)
- gs_buf_free(&port->port_write_buf);
+ kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
gs_free_requests(gser->in, &port->write_pool, NULL);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 278d50ff1eea..9e33d5206d54 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -193,7 +193,7 @@ int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
* This function implements video queue polling and is intended to be used by
* the device poll handler.
*/
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
return vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 51ee94e5cf2b..f9f65b5c1062 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -72,7 +72,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *buf, int nonblocking);
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f3069db6f08e..9a9019625496 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -329,7 +329,7 @@ uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
-static unsigned int
+static __poll_t
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9343ec436485..5960e76f4c75 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1209,11 +1209,11 @@ dev_release (struct inode *inode, struct file *fd)
return 0;
}
-static unsigned int
+static __poll_t
ep0_poll (struct file *fd, poll_table *wait)
{
struct dev_data *dev = fd->private_data;
- int mask = 0;
+ __poll_t mask = 0;
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
@@ -1470,7 +1470,6 @@ delegate:
dev->setup_wLength = w_length;
dev->setup_out_ready = 0;
dev->setup_out_error = 0;
- value = 0;
/* read DATA stage for OUT right away */
if (unlikely (!dev->setup_in && w_length)) {
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index fcee1ee0bf66..8465f081e921 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -102,10 +102,8 @@ static int ncm_do_config(struct usb_configuration *c)
}
f_ncm = usb_get_function(f_ncm_inst);
- if (IS_ERR(f_ncm)) {
- status = PTR_ERR(f_ncm);
- return status;
- }
+ if (IS_ERR(f_ncm))
+ return PTR_ERR(f_ncm);
status = usb_add_function(c, f_ncm);
if (status < 0) {
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 29f254793592..465ccd1104de 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2385,10 +2385,8 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
goto out_uninit;
}
if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
- dev_name(dev), udc) < 0) {
- dev_err(dev, "error requesting IRQ #%d\n", irq);
- goto out_uninit;
- }
+ dev_name(dev), udc) < 0)
+ goto report_request_failure;
/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
@@ -2398,10 +2396,8 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
goto out_uninit;
}
if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
- dev_name(dev), &udc->iudma[i]) < 0) {
- dev_err(dev, "error requesting IRQ #%d\n", irq);
- goto out_uninit;
- }
+ dev_name(dev), &udc->iudma[i]) < 0)
+ goto report_request_failure;
}
bcm63xx_udc_init_debugfs(udc);
@@ -2413,6 +2409,10 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
out_uninit:
bcm63xx_uninit_udc_hw(udc);
return rc;
+
+report_request_failure:
+ dev_err(dev, "error requesting IRQ #%d\n", irq);
+ goto out_uninit;
}
/**
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7dec2f5..859d5b11ba4c 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -912,7 +912,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
return 0;
/* "high bandwidth" works only at high speed */
- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
return 0;
switch (type) {
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
- goto err1;
-
- ret = device_add(&gadget->dev);
- if (ret)
- goto err2;
+ goto err_put_gadget;
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc->dev.parent = parent;
ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
if (ret)
- goto err3;
+ goto err_put_udc;
+
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_put_udc;
udc->gadget = gadget;
gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
ret = device_add(&udc->dev);
if (ret)
- goto err4;
+ goto err_unlist_udc;
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
/* pick up one of pending gadget drivers */
ret = check_pending_gadget_drivers(udc);
if (ret)
- goto err5;
+ goto err_del_udc;
mutex_unlock(&udc_lock);
return 0;
-err5:
+ err_del_udc:
device_del(&udc->dev);
-err4:
+ err_unlist_udc:
list_del(&udc->list);
mutex_unlock(&udc_lock);
-err3:
- put_device(&udc->dev);
device_del(&gadget->dev);
-err2:
- kfree(udc);
+ err_put_udc:
+ put_device(&udc->dev);
-err1:
+ err_put_gadget:
put_device(&gadget->dev);
return ret;
}
@@ -1419,7 +1417,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
/* ------------------------------------------------------------------------- */
-static ssize_t usb_udc_srp_store(struct device *dev,
+static ssize_t srp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
@@ -1429,9 +1427,9 @@ static ssize_t usb_udc_srp_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+static DEVICE_ATTR_WO(srp);
-static ssize_t usb_udc_softconn_store(struct device *dev,
+static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
@@ -1455,7 +1453,7 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+static DEVICE_ATTR_WO(soft_connect);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index d0128f92ec5a..e744d4b7bfed 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -925,20 +925,8 @@ static void dummy_udc_set_speed(struct usb_gadget *_gadget,
struct dummy *dum;
dum = gadget_dev_to_dummy(&_gadget->dev);
-
- if (mod_data.is_super_speed)
- dum->gadget.speed = min_t(u8, USB_SPEED_SUPER, speed);
- else if (mod_data.is_high_speed)
- dum->gadget.speed = min_t(u8, USB_SPEED_HIGH, speed);
- else
- dum->gadget.speed = USB_SPEED_FULL;
-
+ dum->gadget.speed = speed;
dummy_udc_update_ep0(dum);
-
- if (dum->gadget.speed < speed)
- dev_dbg(udc_dev(dum), "This device can perform faster"
- " if you connect it to a %s port...\n",
- usb_speed_string(speed));
}
static int dummy_udc_start(struct usb_gadget *g,
@@ -2193,8 +2181,6 @@ static int dummy_hub_control(
USB_PORT_STAT_LOW_SPEED;
break;
default:
- dum_hcd->dum->gadget.speed =
- USB_SPEED_FULL;
break;
}
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index d606d4f13098..e5b4ee96c4bf 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1543,7 +1543,7 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
- ERR("Unexpect ep0 packets\n");
+ ERR("Unexpected ep0 packets\n");
break;
default:
ep0stall(udc);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index be2761f1b3f5..fadcf2653c3d 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -979,8 +979,6 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
max = ep->fifo_size;
do {
- is_short = 0;
-
udccsr = udc_ep_readl(ep, UDCCSR);
if (udccsr & UDCCSR_PC) {
ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
@@ -1134,7 +1132,6 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
if (unlikely(!_ep))
return -EINVAL;
- dev = udc_usb_ep->dev;
ep = udc_usb_ep->pxa_ep;
if (unlikely(!ep))
return -EINVAL;
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
index f07ddb3f4bb9..2d1f68b5ea76 100644
--- a/drivers/usb/gadget/udc/trace.h
+++ b/drivers/usb/gadget/udc/trace.h
@@ -225,6 +225,7 @@ DECLARE_EVENT_CLASS(udc_log_req,
__field(unsigned, short_not_ok)
__field(int, status)
__field(int, ret)
+ __field(struct usb_request *, req)
),
TP_fast_assign(
snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
@@ -238,9 +239,10 @@ DECLARE_EVENT_CLASS(udc_log_req,
__entry->short_not_ok = req->short_not_ok;
__entry->status = req->status;
__entry->ret = ret;
+ __entry->req = req;
),
- TP_printk("%s: length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
- __get_str(name), __entry->actual, __entry->length,
+ TP_printk("%s: req %p length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
+ __get_str(name),__entry->req, __entry->actual, __entry->length,
__entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id,
__entry->zero ? "Z" : "z",
__entry->short_not_ok ? "S" : "s",
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 7da2b9ce8cb3..6407e433bc78 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -963,10 +963,8 @@ static struct usb_request *xudc_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct xusb_ep *ep = to_xusb_ep(_ep);
- struct xusb_udc *udc;
struct xusb_req *req;
- udc = ep->udc;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index b80a94e632af..6150bed7cfa8 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -27,6 +27,14 @@ config USB_XHCI_HCD
module will be called xhci-hcd.
if USB_XHCI_HCD
+config USB_XHCI_DBGCAP
+ bool "xHCI support for debug capability"
+ depends on TTY
+ ---help---
+ Say 'Y' to enable the support for the xHCI debug capability. Make
+ sure that your xHCI host supports the extended debug capability and
+ you want a TTY serial device based on the xHCI debug capability
+ before enabling this option. If unsure, say 'N'.
config USB_XHCI_PCI
tristate
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 32b036e2ffef..4ede4ce12366 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -14,6 +14,11 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
xhci-hcd-y += xhci-trace.o
+
+ifneq ($(CONFIG_USB_XHCI_DBGCAP), )
+ xhci-hcd-y += xhci-dbgcap.o xhci-dbgtty.o
+endif
+
ifneq ($(CONFIG_USB_XHCI_MTK), )
xhci-hcd-y += xhci-mtk-sch.o
endif
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 854b146a457d..8d8bafc70c1f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -167,7 +167,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
continue;
ret = PTR_ERR(phy);
- dev_err(dev, "Can't get PHY device for port %d: %d\n",
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Can't get PHY for port %d: %d\n",
i, ret);
goto err_phy;
}
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 71fb61dd4a87..8f75cb7b197c 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -7,7 +7,7 @@
/* Display the ports dedicated to the companion controller */
-static ssize_t show_companion(struct device *dev,
+static ssize_t companion_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -34,7 +34,7 @@ static ssize_t show_companion(struct device *dev,
* Syntax is "[-]portnum", where a leading '-' sign means
* return control of the port to the EHCI controller.
*/
-static ssize_t store_companion(struct device *dev,
+static ssize_t companion_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -59,13 +59,13 @@ static ssize_t store_companion(struct device *dev,
set_owner(ehci, portnum, new_owner);
return count;
}
-static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
+static DEVICE_ATTR_RW(companion);
/*
* Display / Set uframe_periodic_max
*/
-static ssize_t show_uframe_periodic_max(struct device *dev,
+static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -78,7 +78,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
}
-static ssize_t store_uframe_periodic_max(struct device *dev,
+static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -143,7 +143,7 @@ out_unlock:
spin_unlock_irqrestore (&ehci->lock, flags);
return ret;
}
-static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+static DEVICE_ATTR_RW(uframe_periodic_max);
static inline int create_sysfs_files(struct ehci_hcd *ehci)
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 62fc955085a1..d8abf401918a 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1865,11 +1865,9 @@ static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
if (!qh)
goto done;
- qh->hw = (struct fotg210_qh_hw *)
- dma_pool_alloc(fotg210->qh_pool, flags, &dma);
+ qh->hw = dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
- memset(qh->hw, 0, sizeof(*qh->hw));
qh->qh_dma = dma;
INIT_LIST_HEAD(&qh->qtd_list);
@@ -4121,7 +4119,7 @@ static int itd_urb_transaction(struct fotg210_iso_stream *stream,
} else {
alloc_itd:
spin_unlock_irqrestore(&fotg210->lock, flags);
- itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
+ itd = dma_pool_zalloc(fotg210->itd_pool, mem_flags,
&itd_dma);
spin_lock_irqsave(&fotg210->lock, flags);
if (!itd) {
@@ -4131,7 +4129,6 @@ alloc_itd:
}
}
- memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
list_add(&itd->itd_list, &sched->td_list);
}
@@ -4696,7 +4693,7 @@ static void scan_isoc(struct fotg210_hcd *fotg210)
/* Display / Set uframe_periodic_max
*/
-static ssize_t show_uframe_periodic_max(struct device *dev,
+static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fotg210_hcd *fotg210;
@@ -4708,7 +4705,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
}
-static ssize_t store_uframe_periodic_max(struct device *dev,
+static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fotg210_hcd *fotg210;
@@ -4775,8 +4772,7 @@ out_unlock:
return ret;
}
-static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
- store_uframe_periodic_max);
+static DEVICE_ATTR_RW(uframe_periodic_max);
static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
{
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 0c507a0cfe1f..a55cbba40a5a 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -413,7 +413,7 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
da8xx_ohci = to_da8xx_ohci(hcd);
da8xx_ohci->hcd = hcd;
- da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, "usb11");
+ da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(da8xx_ohci->usb11_clk)) {
error = PTR_ERR(da8xx_ohci->usb11_clk);
if (error != -EPROBE_DEFER)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index f5c90217777a..f9c3947577fc 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -600,7 +600,7 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->dentry = dentry;
#endif
- uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
+ uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
UHCI_NUMFRAMES * sizeof(*uhci->frame),
&uhci->frame_dma_handle, GFP_KERNEL);
if (!uhci->frame) {
@@ -608,7 +608,6 @@ static int uhci_start(struct usb_hcd *hcd)
"unable to allocate consistent memory for frame list\n");
goto err_alloc_frame;
}
- memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame));
uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index f1cc47292a59..7f9f33c8c232 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/usb.h>
+#include <linux/clk.h>
#define usb_packetid(pipe) (usb_pipein(pipe) ? USB_PID_IN : USB_PID_OUT)
#define PIPE_DEVEP_MASK 0x0007ff00
@@ -447,6 +448,8 @@ struct uhci_hcd {
int total_load; /* Sum of array values */
short load[MAX_PHASE]; /* Periodic allocations */
+ struct clk *clk; /* (optional) clock source */
+
/* Reset host controller */
void (*reset_hc) (struct uhci_hcd *uhci);
int (*check_and_reset_hc) (struct uhci_hcd *uhci);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 6cb16d4b2257..89700e26fb29 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -89,6 +89,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
+ uhci = hcd_to_uhci(hcd);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
@@ -98,8 +100,6 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
- uhci = hcd_to_uhci(hcd);
-
uhci->regs = hcd->regs;
/* Grab some things from the device-tree */
@@ -119,13 +119,28 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
"Enabled Aspeed implementation workarounds\n");
}
}
+
+ /* Get and enable clock if any specified */
+ uhci->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(uhci->clk)) {
+ ret = PTR_ERR(uhci->clk);
+ goto err_rmr;
+ }
+ ret = clk_prepare_enable(uhci->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+ goto err_rmr;
+ }
+
ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
if (ret)
- goto err_rmr;
+ goto err_clk;
device_wakeup_enable(hcd->self.controller);
return 0;
+err_clk:
+ clk_disable_unprepare(uhci->clk);
err_rmr:
usb_put_hcd(hcd);
@@ -135,7 +150,9 @@ err_rmr:
static int uhci_hcd_platform_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ clk_disable_unprepare(uhci->clk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index d40438238938..35fcb826152c 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -73,8 +73,7 @@ static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
- if (!(urb->transfer_flags & URB_NO_FSBR))
- urbp->fsbr = 1;
+ urbp->fsbr = 1;
}
static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index c5ac9efb076a..276fb34c8efd 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -90,9 +90,7 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
while (qset->ntds) {
struct whc_qtd *td;
- int t;
- t = qset->td_start;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 584d7b9a3683..386abf26641d 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -10,267 +10,6 @@
#include "xhci.h"
-#define XHCI_INIT_VALUE 0x0
-
-/* Add verbose debugging later, just print everything for now */
-
-void xhci_dbg_regs(struct xhci_hcd *xhci)
-{
- u32 temp;
-
- xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
- xhci->cap_regs);
- temp = readl(&xhci->cap_regs->hc_capbase);
- xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
- &xhci->cap_regs->hc_capbase, temp);
- xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
- (unsigned int) HC_LENGTH(temp));
- xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
- (unsigned int) HC_VERSION(temp));
-
- xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
-
- temp = readl(&xhci->cap_regs->run_regs_off);
- xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
- &xhci->cap_regs->run_regs_off,
- (unsigned int) temp & RTSOFF_MASK);
- xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
-
- temp = readl(&xhci->cap_regs->db_off);
- xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
- xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
-}
-
-static void xhci_print_cap_regs(struct xhci_hcd *xhci)
-{
- u32 temp;
- u32 hci_version;
-
- xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
-
- temp = readl(&xhci->cap_regs->hc_capbase);
- hci_version = HC_VERSION(temp);
- xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
- (unsigned int) temp);
- xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
- (unsigned int) HC_LENGTH(temp));
- xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version);
-
- temp = readl(&xhci->cap_regs->hcs_params1);
- xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
- (unsigned int) temp);
- xhci_dbg(xhci, " Max device slots: %u\n",
- (unsigned int) HCS_MAX_SLOTS(temp));
- xhci_dbg(xhci, " Max interrupters: %u\n",
- (unsigned int) HCS_MAX_INTRS(temp));
- xhci_dbg(xhci, " Max ports: %u\n",
- (unsigned int) HCS_MAX_PORTS(temp));
-
- temp = readl(&xhci->cap_regs->hcs_params2);
- xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
- (unsigned int) temp);
- xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
- (unsigned int) HCS_IST(temp));
- xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
- (unsigned int) HCS_ERST_MAX(temp));
-
- temp = readl(&xhci->cap_regs->hcs_params3);
- xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
- (unsigned int) temp);
- xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
- (unsigned int) HCS_U1_LATENCY(temp));
- xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
- (unsigned int) HCS_U2_LATENCY(temp));
-
- temp = readl(&xhci->cap_regs->hcc_params);
- xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
- xhci_dbg(xhci, " HC generates %s bit addresses\n",
- HCC_64BIT_ADDR(temp) ? "64" : "32");
- xhci_dbg(xhci, " HC %s Contiguous Frame ID Capability\n",
- HCC_CFC(temp) ? "has" : "hasn't");
- xhci_dbg(xhci, " HC %s generate Stopped - Short Package event\n",
- HCC_SPC(temp) ? "can" : "can't");
- /* FIXME */
- xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
-
- temp = readl(&xhci->cap_regs->run_regs_off);
- xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
-
- /* xhci 1.1 controllers have the HCCPARAMS2 register */
- if (hci_version > 0x100) {
- temp = readl(&xhci->cap_regs->hcc_params2);
- xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
- xhci_dbg(xhci, " HC %s Force save context capability",
- HCC2_FSC(temp) ? "supports" : "doesn't support");
- xhci_dbg(xhci, " HC %s Large ESIT Payload Capability",
- HCC2_LEC(temp) ? "supports" : "doesn't support");
- xhci_dbg(xhci, " HC %s Extended TBC capability",
- HCC2_ETC(temp) ? "supports" : "doesn't support");
- }
-}
-
-static void xhci_print_command_reg(struct xhci_hcd *xhci)
-{
- u32 temp;
-
- temp = readl(&xhci->op_regs->command);
- xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
- xhci_dbg(xhci, " HC is %s\n",
- (temp & CMD_RUN) ? "running" : "being stopped");
- xhci_dbg(xhci, " HC has %sfinished hard reset\n",
- (temp & CMD_RESET) ? "not " : "");
- xhci_dbg(xhci, " Event Interrupts %s\n",
- (temp & CMD_EIE) ? "enabled " : "disabled");
- xhci_dbg(xhci, " Host System Error Interrupts %s\n",
- (temp & CMD_HSEIE) ? "enabled " : "disabled");
- xhci_dbg(xhci, " HC has %sfinished light reset\n",
- (temp & CMD_LRESET) ? "not " : "");
-}
-
-static void xhci_print_status(struct xhci_hcd *xhci)
-{
- u32 temp;
-
- temp = readl(&xhci->op_regs->status);
- xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
- xhci_dbg(xhci, " Event ring is %sempty\n",
- (temp & STS_EINT) ? "not " : "");
- xhci_dbg(xhci, " %sHost System Error\n",
- (temp & STS_FATAL) ? "WARNING: " : "No ");
- xhci_dbg(xhci, " HC is %s\n",
- (temp & STS_HALT) ? "halted" : "running");
-}
-
-static void xhci_print_op_regs(struct xhci_hcd *xhci)
-{
- xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
- xhci_print_command_reg(xhci);
- xhci_print_status(xhci);
-}
-
-static void xhci_print_ports(struct xhci_hcd *xhci)
-{
- __le32 __iomem *addr;
- int i, j;
- int ports;
- char *names[NUM_PORT_REGS] = {
- "status",
- "power",
- "link",
- "reserved",
- };
-
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
- addr = &xhci->op_regs->port_status_base;
- for (i = 0; i < ports; i++) {
- for (j = 0; j < NUM_PORT_REGS; j++) {
- xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
- addr, names[j],
- (unsigned int) readl(addr));
- addr++;
- }
- }
-}
-
-void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
-{
- struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
- void __iomem *addr;
- u32 temp;
- u64 temp_64;
-
- addr = &ir_set->irq_pending;
- temp = readl(addr);
- if (temp == XHCI_INIT_VALUE)
- return;
-
- xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
-
- xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
- (unsigned int)temp);
-
- addr = &ir_set->irq_control;
- temp = readl(addr);
- xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
- (unsigned int)temp);
-
- addr = &ir_set->erst_size;
- temp = readl(addr);
- xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
- (unsigned int)temp);
-
- addr = &ir_set->rsvd;
- temp = readl(addr);
- if (temp != XHCI_INIT_VALUE)
- xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
- addr, (unsigned int)temp);
-
- addr = &ir_set->erst_base;
- temp_64 = xhci_read_64(xhci, addr);
- xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
- addr, temp_64);
-
- addr = &ir_set->erst_dequeue;
- temp_64 = xhci_read_64(xhci, addr);
- xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
- addr, temp_64);
-}
-
-void xhci_print_run_regs(struct xhci_hcd *xhci)
-{
- u32 temp;
- int i;
-
- xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
- temp = readl(&xhci->run_regs->microframe_index);
- xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
- &xhci->run_regs->microframe_index,
- (unsigned int) temp);
- for (i = 0; i < 7; i++) {
- temp = readl(&xhci->run_regs->rsvd[i]);
- if (temp != XHCI_INIT_VALUE)
- xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
- &xhci->run_regs->rsvd[i],
- i, (unsigned int) temp);
- }
-}
-
-void xhci_print_registers(struct xhci_hcd *xhci)
-{
- xhci_print_cap_regs(xhci);
- xhci_print_op_regs(xhci);
- xhci_print_ports(xhci);
-}
-
-void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
-{
- u64 addr = erst->erst_dma_addr;
- int i;
- struct xhci_erst_entry *entry;
-
- for (i = 0; i < erst->num_entries; i++) {
- entry = &erst->entries[i];
- xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
- addr,
- lower_32_bits(le64_to_cpu(entry->seg_addr)),
- upper_32_bits(le64_to_cpu(entry->seg_addr)),
- le32_to_cpu(entry->seg_size),
- le32_to_cpu(entry->rsvd));
- addr += sizeof(*entry);
- }
-}
-
-void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
-{
- u64 val;
-
- val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
- lower_32_bits(val));
- xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
- upper_32_bits(val));
-}
-
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
new file mode 100644
index 000000000000..a1ab8acf39ba
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -0,0 +1,996 @@
+/**
+ * xhci-dbgcap.c - xHCI debug capability support
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/nls.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-dbgcap.h"
+
+static inline void *
+dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *vaddr;
+
+ vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, dma_handle, flags);
+ memset(vaddr, 0, size);
+ return vaddr;
+}
+
+static inline void
+dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ if (cpu_addr)
+ dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, cpu_addr, dma_handle);
+}
+
+static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
+{
+ struct usb_string_descriptor *s_desc;
+ u32 string_length;
+
+ /* Serial string: */
+ s_desc = (struct usb_string_descriptor *)strings->serial;
+ utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
+ UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+ DBC_MAX_STRING_LENGTH);
+
+ s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ string_length = s_desc->bLength;
+ string_length <<= 8;
+
+ /* Product string: */
+ s_desc = (struct usb_string_descriptor *)strings->product;
+ utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
+ UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+ DBC_MAX_STRING_LENGTH);
+
+ s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ string_length += s_desc->bLength;
+ string_length <<= 8;
+
+ /* Manufacture string: */
+ s_desc = (struct usb_string_descriptor *)strings->manufacturer;
+ utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
+ strlen(DBC_STRING_MANUFACTURER),
+ UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+ DBC_MAX_STRING_LENGTH);
+
+ s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ string_length += s_desc->bLength;
+ string_length <<= 8;
+
+ /* String0: */
+ strings->string0[0] = 4;
+ strings->string0[1] = USB_DT_STRING;
+ strings->string0[2] = 0x09;
+ strings->string0[3] = 0x04;
+ string_length += 4;
+
+ return string_length;
+}
+
+static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
+{
+ struct xhci_dbc *dbc;
+ struct dbc_info_context *info;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 dev_info;
+ dma_addr_t deq, dma;
+ unsigned int max_burst;
+
+ dbc = xhci->dbc;
+ if (!dbc)
+ return;
+
+ /* Populate info Context: */
+ info = (struct dbc_info_context *)dbc->ctx->bytes;
+ dma = dbc->string_dma;
+ info->string0 = cpu_to_le64(dma);
+ info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
+ info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
+ info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
+ info->length = cpu_to_le32(string_length);
+
+ /* Populate bulk out endpoint context: */
+ ep_ctx = dbc_bulkout_ctx(dbc);
+ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+ deq = dbc_bulkout_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+
+ /* Populate bulk in endpoint context: */
+ ep_ctx = dbc_bulkin_ctx(dbc);
+ deq = dbc_bulkin_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+
+ /* Set DbC context and info registers: */
+ xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
+
+ dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
+ writel(dev_info, &dbc->regs->devinfo1);
+
+ dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
+ writel(dev_info, &dbc->regs->devinfo2);
+}
+
+static void xhci_dbc_giveback(struct dbc_request *req, int status)
+ __releases(&dbc->lock)
+ __acquires(&dbc->lock)
+{
+ struct dbc_ep *dep = req->dep;
+ struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_hcd *xhci = dbc->xhci;
+ struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
+
+ list_del_init(&req->list_pending);
+ req->trb_dma = 0;
+ req->trb = NULL;
+
+ if (req->status == -EINPROGRESS)
+ req->status = status;
+
+ trace_xhci_dbc_giveback_request(req);
+
+ dma_unmap_single(dev,
+ req->dma,
+ req->length,
+ dbc_ep_dma_direction(dep));
+
+ /* Give back the transfer request: */
+ spin_unlock(&dbc->lock);
+ req->complete(xhci, req);
+ spin_lock(&dbc->lock);
+}
+
+static void xhci_dbc_flush_single_request(struct dbc_request *req)
+{
+ union xhci_trb *trb = req->trb;
+
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+
+ xhci_dbc_giveback(req, -ESHUTDOWN);
+}
+
+static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
+{
+ struct dbc_request *req, *tmp;
+
+ list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
+ xhci_dbc_flush_single_request(req);
+}
+
+static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
+{
+ xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
+ xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
+}
+
+struct dbc_request *
+dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
+{
+ struct dbc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->dep = dep;
+ INIT_LIST_HEAD(&req->list_pending);
+ INIT_LIST_HEAD(&req->list_pool);
+ req->direction = dep->direction;
+
+ trace_xhci_dbc_alloc_request(req);
+
+ return req;
+}
+
+void
+dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
+{
+ trace_xhci_dbc_free_request(req);
+
+ kfree(req);
+}
+
+static void
+xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
+ u32 field2, u32 field3, u32 field4)
+{
+ union xhci_trb *trb, *next;
+
+ trb = ring->enqueue;
+ trb->generic.field[0] = cpu_to_le32(field1);
+ trb->generic.field[1] = cpu_to_le32(field2);
+ trb->generic.field[2] = cpu_to_le32(field3);
+ trb->generic.field[3] = cpu_to_le32(field4);
+
+ trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
+
+ ring->num_trbs_free--;
+ next = ++(ring->enqueue);
+ if (TRB_TYPE_LINK_LE32(next->link.control)) {
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ ring->enqueue = ring->enq_seg->trbs;
+ ring->cycle_state ^= 1;
+ }
+}
+
+static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
+ struct dbc_request *req)
+{
+ u64 addr;
+ union xhci_trb *trb;
+ unsigned int num_trbs;
+ struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_ring *ring = dep->ring;
+ u32 length, control, cycle;
+
+ num_trbs = count_trbs(req->dma, req->length);
+ WARN_ON(num_trbs != 1);
+ if (ring->num_trbs_free < num_trbs)
+ return -EBUSY;
+
+ addr = req->dma;
+ trb = ring->enqueue;
+ cycle = ring->cycle_state;
+ length = TRB_LEN(req->length);
+ control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
+
+ if (cycle)
+ control &= cpu_to_le32(~TRB_CYCLE);
+ else
+ control |= cpu_to_le32(TRB_CYCLE);
+
+ req->trb = ring->enqueue;
+ req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+ xhci_dbc_queue_trb(ring,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length, control);
+
+ /*
+ * Add a barrier between writes of trb fields and flipping
+ * the cycle bit:
+ */
+ wmb();
+
+ if (cycle)
+ trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
+ else
+ trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+ writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
+
+ return 0;
+}
+
+static int
+dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
+{
+ int ret;
+ struct device *dev;
+ struct xhci_dbc *dbc = dep->dbc;
+ struct xhci_hcd *xhci = dbc->xhci;
+
+ dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ if (!req->length || !req->buf)
+ return -EINVAL;
+
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+
+ req->dma = dma_map_single(dev,
+ req->buf,
+ req->length,
+ dbc_ep_dma_direction(dep));
+ if (dma_mapping_error(dev, req->dma)) {
+ xhci_err(xhci, "failed to map buffer\n");
+ return -EFAULT;
+ }
+
+ ret = xhci_dbc_queue_bulk_tx(dep, req);
+ if (ret) {
+ xhci_err(xhci, "failed to queue trbs\n");
+ dma_unmap_single(dev,
+ req->dma,
+ req->length,
+ dbc_ep_dma_direction(dep));
+ return -EFAULT;
+ }
+
+ list_add_tail(&req->list_pending, &dep->list_pending);
+
+ return 0;
+}
+
+int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
+ gfp_t gfp_flags)
+{
+ struct xhci_dbc *dbc = dep->dbc;
+ int ret = -ESHUTDOWN;
+
+ spin_lock(&dbc->lock);
+ if (dbc->state == DS_CONFIGURED)
+ ret = dbc_ep_do_queue(dep, req);
+ spin_unlock(&dbc->lock);
+
+ mod_delayed_work(system_wq, &dbc->event_work, 0);
+
+ trace_xhci_dbc_queue_request(req);
+
+ return ret;
+}
+
+static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
+{
+ struct dbc_ep *dep;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ dep = &dbc->eps[direction];
+ dep->dbc = dbc;
+ dep->direction = direction;
+ dep->ring = direction ? dbc->ring_in : dbc->ring_out;
+
+ INIT_LIST_HEAD(&dep->list_pending);
+}
+
+static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
+{
+ xhci_dbc_do_eps_init(xhci, BULK_OUT);
+ xhci_dbc_do_eps_init(xhci, BULK_IN);
+}
+
+static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
+}
+
+static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ int ret;
+ dma_addr_t deq;
+ u32 string_length;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ /* Allocate various rings for events and transfers: */
+ dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
+ if (!dbc->ring_evt)
+ goto evt_fail;
+
+ dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
+ if (!dbc->ring_in)
+ goto in_fail;
+
+ dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
+ if (!dbc->ring_out)
+ goto out_fail;
+
+ /* Allocate and populate ERST: */
+ ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
+ if (ret)
+ goto erst_fail;
+
+ /* Allocate context data structure: */
+ dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+ if (!dbc->ctx)
+ goto ctx_fail;
+
+ /* Allocate the string table: */
+ dbc->string_size = sizeof(struct dbc_str_descs);
+ dbc->string = dbc_dma_alloc_coherent(xhci,
+ dbc->string_size,
+ &dbc->string_dma,
+ flags);
+ if (!dbc->string)
+ goto string_fail;
+
+ /* Setup ERST register: */
+ writel(dbc->erst.erst_size, &dbc->regs->ersts);
+ xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
+ deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+ dbc->ring_evt->dequeue);
+ xhci_write_64(xhci, deq, &dbc->regs->erdp);
+
+ /* Setup strings and contexts: */
+ string_length = xhci_dbc_populate_strings(dbc->string);
+ xhci_dbc_init_contexts(xhci, string_length);
+
+ mmiowb();
+
+ xhci_dbc_eps_init(xhci);
+ dbc->state = DS_INITIALIZED;
+
+ return 0;
+
+string_fail:
+ xhci_free_container_ctx(xhci, dbc->ctx);
+ dbc->ctx = NULL;
+ctx_fail:
+ xhci_free_erst(xhci, &dbc->erst);
+erst_fail:
+ xhci_ring_free(xhci, dbc->ring_out);
+ dbc->ring_out = NULL;
+out_fail:
+ xhci_ring_free(xhci, dbc->ring_in);
+ dbc->ring_in = NULL;
+in_fail:
+ xhci_ring_free(xhci, dbc->ring_evt);
+ dbc->ring_evt = NULL;
+evt_fail:
+ return -ENOMEM;
+}
+
+static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ if (!dbc)
+ return;
+
+ xhci_dbc_eps_exit(xhci);
+
+ if (dbc->string) {
+ dbc_dma_free_coherent(xhci,
+ dbc->string_size,
+ dbc->string, dbc->string_dma);
+ dbc->string = NULL;
+ }
+
+ xhci_free_container_ctx(xhci, dbc->ctx);
+ dbc->ctx = NULL;
+
+ xhci_free_erst(xhci, &dbc->erst);
+ xhci_ring_free(xhci, dbc->ring_out);
+ xhci_ring_free(xhci, dbc->ring_in);
+ xhci_ring_free(xhci, dbc->ring_evt);
+ dbc->ring_in = NULL;
+ dbc->ring_out = NULL;
+ dbc->ring_evt = NULL;
+}
+
+static int xhci_do_dbc_start(struct xhci_hcd *xhci)
+{
+ int ret;
+ u32 ctrl;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ if (dbc->state != DS_DISABLED)
+ return -EINVAL;
+
+ writel(0, &dbc->regs->control);
+ ret = xhci_handshake(&dbc->regs->control,
+ DBC_CTRL_DBC_ENABLE,
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ ctrl = readl(&dbc->regs->control);
+ writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
+ &dbc->regs->control);
+ ret = xhci_handshake(&dbc->regs->control,
+ DBC_CTRL_DBC_ENABLE,
+ DBC_CTRL_DBC_ENABLE, 1000);
+ if (ret)
+ return ret;
+
+ dbc->state = DS_ENABLED;
+
+ return 0;
+}
+
+static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ if (dbc->state == DS_DISABLED)
+ return;
+
+ writel(0, &dbc->regs->control);
+ xhci_dbc_mem_cleanup(xhci);
+ dbc->state = DS_DISABLED;
+}
+
+static int xhci_dbc_start(struct xhci_hcd *xhci)
+{
+ int ret;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ WARN_ON(!dbc);
+
+ pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
+
+ spin_lock(&dbc->lock);
+ ret = xhci_do_dbc_start(xhci);
+ spin_unlock(&dbc->lock);
+
+ if (ret) {
+ pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
+ return ret;
+ }
+
+ return mod_delayed_work(system_wq, &dbc->event_work, 1);
+}
+
+static void xhci_dbc_stop(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+ struct dbc_port *port = &dbc->port;
+
+ WARN_ON(!dbc);
+
+ cancel_delayed_work_sync(&dbc->event_work);
+
+ if (port->registered)
+ xhci_dbc_tty_unregister_device(xhci);
+
+ spin_lock(&dbc->lock);
+ xhci_do_dbc_stop(xhci);
+ spin_unlock(&dbc->lock);
+
+ pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+}
+
+static void
+dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
+{
+ u32 portsc;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ portsc = readl(&dbc->regs->portsc);
+ if (portsc & DBC_PORTSC_CONN_CHANGE)
+ xhci_info(xhci, "DbC port connect change\n");
+
+ if (portsc & DBC_PORTSC_RESET_CHANGE)
+ xhci_info(xhci, "DbC port reset change\n");
+
+ if (portsc & DBC_PORTSC_LINK_CHANGE)
+ xhci_info(xhci, "DbC port link status change\n");
+
+ if (portsc & DBC_PORTSC_CONFIG_CHANGE)
+ xhci_info(xhci, "DbC config error change\n");
+
+ /* Port reset change bit will be cleared in other place: */
+ writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
+}
+
+static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
+{
+ struct dbc_ep *dep;
+ struct xhci_ring *ring;
+ int ep_id;
+ int status;
+ u32 comp_code;
+ size_t remain_length;
+ struct dbc_request *req = NULL, *r;
+
+ comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
+ remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
+ ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
+ dep = (ep_id == EPID_OUT) ?
+ get_out_ep(xhci) : get_in_ep(xhci);
+ ring = dep->ring;
+
+ switch (comp_code) {
+ case COMP_SUCCESS:
+ remain_length = 0;
+ /* FALLTHROUGH */
+ case COMP_SHORT_PACKET:
+ status = 0;
+ break;
+ case COMP_TRB_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_STALL_ERROR:
+ xhci_warn(xhci, "tx error %d detected\n", comp_code);
+ status = -comp_code;
+ break;
+ default:
+ xhci_err(xhci, "unknown tx error %d\n", comp_code);
+ status = -comp_code;
+ break;
+ }
+
+ /* Match the pending request: */
+ list_for_each_entry(r, &dep->list_pending, list_pending) {
+ if (r->trb_dma == event->trans_event.buffer) {
+ req = r;
+ break;
+ }
+ }
+
+ if (!req) {
+ xhci_warn(xhci, "no matched request\n");
+ return;
+ }
+
+ trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+
+ ring->num_trbs_free++;
+ req->actual = req->length - remain_length;
+ xhci_dbc_giveback(req, status);
+}
+
+static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+{
+ dma_addr_t deq;
+ struct dbc_ep *dep;
+ union xhci_trb *evt;
+ u32 ctrl, portsc;
+ struct xhci_hcd *xhci = dbc->xhci;
+ bool update_erdp = false;
+
+ /* DbC state machine: */
+ switch (dbc->state) {
+ case DS_DISABLED:
+ case DS_INITIALIZED:
+
+ return EVT_ERR;
+ case DS_ENABLED:
+ portsc = readl(&dbc->regs->portsc);
+ if (portsc & DBC_PORTSC_CONN_STATUS) {
+ dbc->state = DS_CONNECTED;
+ xhci_info(xhci, "DbC connected\n");
+ }
+
+ return EVT_DONE;
+ case DS_CONNECTED:
+ ctrl = readl(&dbc->regs->control);
+ if (ctrl & DBC_CTRL_DBC_RUN) {
+ dbc->state = DS_CONFIGURED;
+ xhci_info(xhci, "DbC configured\n");
+ portsc = readl(&dbc->regs->portsc);
+ writel(portsc, &dbc->regs->portsc);
+ return EVT_GSER;
+ }
+
+ return EVT_DONE;
+ case DS_CONFIGURED:
+ /* Handle cable unplug event: */
+ portsc = readl(&dbc->regs->portsc);
+ if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
+ !(portsc & DBC_PORTSC_CONN_STATUS)) {
+ xhci_info(xhci, "DbC cable unplugged\n");
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_reqests(dbc);
+
+ return EVT_DISC;
+ }
+
+ /* Handle debug port reset event: */
+ if (portsc & DBC_PORTSC_RESET_CHANGE) {
+ xhci_info(xhci, "DbC port reset\n");
+ writel(portsc, &dbc->regs->portsc);
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_reqests(dbc);
+
+ return EVT_DISC;
+ }
+
+ /* Handle endpoint stall event: */
+ ctrl = readl(&dbc->regs->control);
+ if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
+ (ctrl & DBC_CTRL_HALT_OUT_TR)) {
+ xhci_info(xhci, "DbC Endpoint stall\n");
+ dbc->state = DS_STALLED;
+
+ if (ctrl & DBC_CTRL_HALT_IN_TR) {
+ dep = get_in_ep(xhci);
+ xhci_dbc_flush_endpoint_requests(dep);
+ }
+
+ if (ctrl & DBC_CTRL_HALT_OUT_TR) {
+ dep = get_out_ep(xhci);
+ xhci_dbc_flush_endpoint_requests(dep);
+ }
+
+ return EVT_DONE;
+ }
+
+ /* Clear DbC run change bit: */
+ if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
+ writel(ctrl, &dbc->regs->control);
+ ctrl = readl(&dbc->regs->control);
+ }
+
+ break;
+ case DS_STALLED:
+ ctrl = readl(&dbc->regs->control);
+ if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
+ !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
+ (ctrl & DBC_CTRL_DBC_RUN)) {
+ dbc->state = DS_CONFIGURED;
+ break;
+ }
+
+ return EVT_DONE;
+ default:
+ xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
+ break;
+ }
+
+ /* Handle the events in the event ring: */
+ evt = dbc->ring_evt->dequeue;
+ while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
+ dbc->ring_evt->cycle_state) {
+ /*
+ * Add a barrier between reading the cycle flag and any
+ * reads of the event's flags/data below:
+ */
+ rmb();
+
+ trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
+
+ switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_PORT_STATUS):
+ dbc_handle_port_status(xhci, evt);
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ dbc_handle_xfer_event(xhci, evt);
+ break;
+ default:
+ break;
+ }
+
+ inc_deq(xhci, dbc->ring_evt);
+ evt = dbc->ring_evt->dequeue;
+ update_erdp = true;
+ }
+
+ /* Update event ring dequeue pointer: */
+ if (update_erdp) {
+ deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+ dbc->ring_evt->dequeue);
+ xhci_write_64(xhci, deq, &dbc->regs->erdp);
+ }
+
+ return EVT_DONE;
+}
+
+static void xhci_dbc_handle_events(struct work_struct *work)
+{
+ int ret;
+ enum evtreturn evtr;
+ struct xhci_dbc *dbc;
+ struct xhci_hcd *xhci;
+
+ dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ xhci = dbc->xhci;
+
+ spin_lock(&dbc->lock);
+ evtr = xhci_dbc_do_handle_events(dbc);
+ spin_unlock(&dbc->lock);
+
+ switch (evtr) {
+ case EVT_GSER:
+ ret = xhci_dbc_tty_register_device(xhci);
+ if (ret) {
+ xhci_err(xhci, "failed to alloc tty device\n");
+ break;
+ }
+
+ xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
+ break;
+ case EVT_DISC:
+ xhci_dbc_tty_unregister_device(xhci);
+ break;
+ case EVT_DONE:
+ break;
+ default:
+ xhci_info(xhci, "stop handling dbc events\n");
+ return;
+ }
+
+ mod_delayed_work(system_wq, &dbc->event_work, 1);
+}
+
+static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ kfree(xhci->dbc);
+ xhci->dbc = NULL;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
+static int xhci_do_dbc_init(struct xhci_hcd *xhci)
+{
+ u32 reg;
+ struct xhci_dbc *dbc;
+ unsigned long flags;
+ void __iomem *base;
+ int dbc_cap_offs;
+
+ base = &xhci->cap_regs->hc_capbase;
+ dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
+ if (!dbc_cap_offs)
+ return -ENODEV;
+
+ dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
+ if (!dbc)
+ return -ENOMEM;
+
+ dbc->regs = base + dbc_cap_offs;
+
+ /* We will avoid using DbC in xhci driver if it's in use. */
+ reg = readl(&dbc->regs->control);
+ if (reg & DBC_CTRL_DBC_ENABLE) {
+ kfree(dbc);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->dbc) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ kfree(dbc);
+ return -EBUSY;
+ }
+ xhci->dbc = dbc;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ dbc->xhci = xhci;
+ INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
+ spin_lock_init(&dbc->lock);
+
+ return 0;
+}
+
+static ssize_t dbc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const char *p;
+ struct xhci_dbc *dbc;
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(dev_get_drvdata(dev));
+ dbc = xhci->dbc;
+
+ switch (dbc->state) {
+ case DS_DISABLED:
+ p = "disabled";
+ break;
+ case DS_INITIALIZED:
+ p = "initialized";
+ break;
+ case DS_ENABLED:
+ p = "enabled";
+ break;
+ case DS_CONNECTED:
+ p = "connected";
+ break;
+ case DS_CONFIGURED:
+ p = "configured";
+ break;
+ case DS_STALLED:
+ p = "stalled";
+ break;
+ default:
+ p = "unknown";
+ }
+
+ return sprintf(buf, "%s\n", p);
+}
+
+static ssize_t dbc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xhci_dbc *dbc;
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(dev_get_drvdata(dev));
+ dbc = xhci->dbc;
+
+ if (!strncmp(buf, "enable", 6))
+ xhci_dbc_start(xhci);
+ else if (!strncmp(buf, "disable", 7))
+ xhci_dbc_stop(xhci);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(dbc);
+
+int xhci_dbc_init(struct xhci_hcd *xhci)
+{
+ int ret;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ ret = xhci_do_dbc_init(xhci);
+ if (ret)
+ goto init_err3;
+
+ ret = xhci_dbc_tty_register_driver(xhci);
+ if (ret)
+ goto init_err2;
+
+ ret = device_create_file(dev, &dev_attr_dbc);
+ if (ret)
+ goto init_err1;
+
+ return 0;
+
+init_err1:
+ xhci_dbc_tty_unregister_driver();
+init_err2:
+ xhci_do_dbc_exit(xhci);
+init_err3:
+ return ret;
+}
+
+void xhci_dbc_exit(struct xhci_hcd *xhci)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ if (!xhci->dbc)
+ return;
+
+ device_remove_file(dev, &dev_attr_dbc);
+ xhci_dbc_tty_unregister_driver();
+ xhci_dbc_stop(xhci);
+ xhci_do_dbc_exit(xhci);
+}
+
+#ifdef CONFIG_PM
+int xhci_dbc_suspend(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ if (!dbc)
+ return 0;
+
+ if (dbc->state == DS_CONFIGURED)
+ dbc->resume_required = 1;
+
+ xhci_dbc_stop(xhci);
+
+ return 0;
+}
+
+int xhci_dbc_resume(struct xhci_hcd *xhci)
+{
+ int ret = 0;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ if (!dbc)
+ return 0;
+
+ if (dbc->resume_required) {
+ dbc->resume_required = 0;
+ xhci_dbc_start(xhci);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
new file mode 100644
index 000000000000..e66ea0748ba3
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -0,0 +1,229 @@
+
+/**
+ * xhci-dbgcap.h - xHCI debug capability support
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#ifndef __LINUX_XHCI_DBGCAP_H
+#define __LINUX_XHCI_DBGCAP_H
+
+#include <linux/tty.h>
+#include <linux/kfifo.h>
+
+struct dbc_regs {
+ __le32 capability;
+ __le32 doorbell;
+ __le32 ersts; /* Event Ring Segment Table Size*/
+ __le32 __reserved_0; /* 0c~0f reserved bits */
+ __le64 erstba; /* Event Ring Segment Table Base Address */
+ __le64 erdp; /* Event Ring Dequeue Pointer */
+ __le32 control;
+ __le32 status;
+ __le32 portsc; /* Port status and control */
+ __le32 __reserved_1; /* 2b~28 reserved bits */
+ __le64 dccp; /* Debug Capability Context Pointer */
+ __le32 devinfo1; /* Device Descriptor Info Register 1 */
+ __le32 devinfo2; /* Device Descriptor Info Register 2 */
+};
+
+struct dbc_info_context {
+ __le64 string0;
+ __le64 manufacturer;
+ __le64 product;
+ __le64 serial;
+ __le32 length;
+ __le32 __reserved_0[7];
+};
+
+#define DBC_CTRL_DBC_RUN BIT(0)
+#define DBC_CTRL_PORT_ENABLE BIT(1)
+#define DBC_CTRL_HALT_OUT_TR BIT(2)
+#define DBC_CTRL_HALT_IN_TR BIT(3)
+#define DBC_CTRL_DBC_RUN_CHANGE BIT(4)
+#define DBC_CTRL_DBC_ENABLE BIT(31)
+#define DBC_CTRL_MAXBURST(p) (((p) >> 16) & 0xff)
+#define DBC_DOOR_BELL_TARGET(p) (((p) & 0xff) << 8)
+
+#define DBC_MAX_PACKET 1024
+#define DBC_MAX_STRING_LENGTH 64
+#define DBC_STRING_MANUFACTURER "Linux Foundation"
+#define DBC_STRING_PRODUCT "Linux USB Debug Target"
+#define DBC_STRING_SERIAL "0001"
+#define DBC_CONTEXT_SIZE 64
+
+/*
+ * Port status:
+ */
+#define DBC_PORTSC_CONN_STATUS BIT(0)
+#define DBC_PORTSC_PORT_ENABLED BIT(1)
+#define DBC_PORTSC_CONN_CHANGE BIT(17)
+#define DBC_PORTSC_RESET_CHANGE BIT(21)
+#define DBC_PORTSC_LINK_CHANGE BIT(22)
+#define DBC_PORTSC_CONFIG_CHANGE BIT(23)
+
+struct dbc_str_descs {
+ char string0[DBC_MAX_STRING_LENGTH];
+ char manufacturer[DBC_MAX_STRING_LENGTH];
+ char product[DBC_MAX_STRING_LENGTH];
+ char serial[DBC_MAX_STRING_LENGTH];
+};
+
+#define DBC_PROTOCOL 1 /* GNU Remote Debug Command */
+#define DBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */
+#define DBC_PRODUCT_ID 0x0010 /* device 0010 */
+#define DBC_DEVICE_REV 0x0010 /* 0.10 */
+
+enum dbc_state {
+ DS_DISABLED = 0,
+ DS_INITIALIZED,
+ DS_ENABLED,
+ DS_CONNECTED,
+ DS_CONFIGURED,
+ DS_STALLED,
+};
+
+struct dbc_request {
+ void *buf;
+ unsigned int length;
+ dma_addr_t dma;
+ void (*complete)(struct xhci_hcd *xhci,
+ struct dbc_request *req);
+ struct list_head list_pool;
+ int status;
+ unsigned int actual;
+
+ struct dbc_ep *dep;
+ struct list_head list_pending;
+ dma_addr_t trb_dma;
+ union xhci_trb *trb;
+ unsigned direction:1;
+};
+
+struct dbc_ep {
+ struct xhci_dbc *dbc;
+ struct list_head list_pending;
+ struct xhci_ring *ring;
+ unsigned direction:1;
+};
+
+#define DBC_QUEUE_SIZE 16
+#define DBC_WRITE_BUF_SIZE 8192
+
+/*
+ * Private structure for DbC hardware state:
+ */
+struct dbc_port {
+ struct tty_port port;
+ spinlock_t port_lock; /* port access */
+
+ struct list_head read_pool;
+ struct list_head read_queue;
+ unsigned int n_read;
+ struct tasklet_struct push;
+
+ struct list_head write_pool;
+ struct kfifo write_fifo;
+
+ bool registered;
+ struct dbc_ep *in;
+ struct dbc_ep *out;
+};
+
+struct xhci_dbc {
+ spinlock_t lock; /* device access */
+ struct xhci_hcd *xhci;
+ struct dbc_regs __iomem *regs;
+ struct xhci_ring *ring_evt;
+ struct xhci_ring *ring_in;
+ struct xhci_ring *ring_out;
+ struct xhci_erst erst;
+ struct xhci_container_ctx *ctx;
+
+ struct dbc_str_descs *string;
+ dma_addr_t string_dma;
+ size_t string_size;
+
+ enum dbc_state state;
+ struct delayed_work event_work;
+ unsigned resume_required:1;
+ struct dbc_ep eps[2];
+
+ struct dbc_port port;
+};
+
+#define dbc_bulkout_ctx(d) \
+ ((struct xhci_ep_ctx *)((d)->ctx->bytes + DBC_CONTEXT_SIZE))
+#define dbc_bulkin_ctx(d) \
+ ((struct xhci_ep_ctx *)((d)->ctx->bytes + DBC_CONTEXT_SIZE * 2))
+#define dbc_bulkout_enq(d) \
+ xhci_trb_virt_to_dma((d)->ring_out->enq_seg, (d)->ring_out->enqueue)
+#define dbc_bulkin_enq(d) \
+ xhci_trb_virt_to_dma((d)->ring_in->enq_seg, (d)->ring_in->enqueue)
+#define dbc_epctx_info2(t, p, b) \
+ cpu_to_le32(EP_TYPE(t) | MAX_PACKET(p) | MAX_BURST(b))
+#define dbc_ep_dma_direction(d) \
+ ((d)->direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE)
+
+#define BULK_OUT 0
+#define BULK_IN 1
+#define EPID_OUT 2
+#define EPID_IN 3
+
+enum evtreturn {
+ EVT_ERR = -1,
+ EVT_DONE,
+ EVT_GSER,
+ EVT_DISC,
+};
+
+static inline struct dbc_ep *get_in_ep(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ return &dbc->eps[BULK_IN];
+}
+
+static inline struct dbc_ep *get_out_ep(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ return &dbc->eps[BULK_OUT];
+}
+
+#ifdef CONFIG_USB_XHCI_DBGCAP
+int xhci_dbc_init(struct xhci_hcd *xhci);
+void xhci_dbc_exit(struct xhci_hcd *xhci);
+int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci);
+void xhci_dbc_tty_unregister_driver(void);
+int xhci_dbc_tty_register_device(struct xhci_hcd *xhci);
+void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci);
+struct dbc_request *dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags);
+void dbc_free_request(struct dbc_ep *dep, struct dbc_request *req);
+int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, gfp_t gfp_flags);
+#ifdef CONFIG_PM
+int xhci_dbc_suspend(struct xhci_hcd *xhci);
+int xhci_dbc_resume(struct xhci_hcd *xhci);
+#endif /* CONFIG_PM */
+#else
+static inline int xhci_dbc_init(struct xhci_hcd *xhci)
+{
+ return 0;
+}
+
+static inline void xhci_dbc_exit(struct xhci_hcd *xhci)
+{
+}
+
+static inline int xhci_dbc_suspend(struct xhci_hcd *xhci)
+{
+ return 0;
+}
+
+static inline int xhci_dbc_resume(struct xhci_hcd *xhci)
+{
+ return 0;
+}
+#endif /* CONFIG_USB_XHCI_DBGCAP */
+#endif /* __LINUX_XHCI_DBGCAP_H */
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
new file mode 100644
index 000000000000..8d47b6fbf973
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -0,0 +1,497 @@
+/**
+ * xhci-dbgtty.c - tty glue for xHCI debug capability
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include "xhci.h"
+#include "xhci-dbgcap.h"
+
+static unsigned int
+dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
+{
+ unsigned int len;
+
+ len = kfifo_len(&port->write_fifo);
+ if (len < size)
+ size = len;
+ if (size != 0)
+ size = kfifo_out(&port->write_fifo, packet, size);
+ return size;
+}
+
+static int dbc_start_tx(struct dbc_port *port)
+ __releases(&port->port_lock)
+ __acquires(&port->port_lock)
+{
+ int len;
+ struct dbc_request *req;
+ int status = 0;
+ bool do_tty_wake = false;
+ struct list_head *pool = &port->write_pool;
+
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+ len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
+ if (len == 0)
+ break;
+ do_tty_wake = true;
+
+ req->length = len;
+ list_del(&req->list_pool);
+
+ spin_unlock(&port->port_lock);
+ status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+
+ if (status) {
+ list_add(&req->list_pool, pool);
+ break;
+ }
+ }
+
+ if (do_tty_wake && port->port.tty)
+ tty_wakeup(port->port.tty);
+
+ return status;
+}
+
+static void dbc_start_rx(struct dbc_port *port)
+ __releases(&port->port_lock)
+ __acquires(&port->port_lock)
+{
+ struct dbc_request *req;
+ int status;
+ struct list_head *pool = &port->read_pool;
+
+ while (!list_empty(pool)) {
+ if (!port->port.tty)
+ break;
+
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+ list_del(&req->list_pool);
+ req->length = DBC_MAX_PACKET;
+
+ spin_unlock(&port->port_lock);
+ status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock);
+
+ if (status) {
+ list_add(&req->list_pool, pool);
+ break;
+ }
+ }
+}
+
+static void
+dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+ struct dbc_port *port = &dbc->port;
+
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list_pool, &port->read_queue);
+ tasklet_schedule(&port->push);
+ spin_unlock(&port->port_lock);
+}
+
+static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+ struct dbc_port *port = &dbc->port;
+
+ spin_lock(&port->port_lock);
+ list_add(&req->list_pool, &port->write_pool);
+ switch (req->status) {
+ case 0:
+ dbc_start_tx(port);
+ break;
+ case -ESHUTDOWN:
+ break;
+ default:
+ xhci_warn(xhci, "unexpected write complete status %d\n",
+ req->status);
+ break;
+ }
+ spin_unlock(&port->port_lock);
+}
+
+static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
+{
+ kfree(req->buf);
+ dbc_free_request(dep, req);
+}
+
+static int
+xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
+ void (*fn)(struct xhci_hcd *, struct dbc_request *))
+{
+ int i;
+ struct dbc_request *req;
+
+ for (i = 0; i < DBC_QUEUE_SIZE; i++) {
+ req = dbc_alloc_request(dep, GFP_ATOMIC);
+ if (!req)
+ break;
+
+ req->length = DBC_MAX_PACKET;
+ req->buf = kmalloc(req->length, GFP_KERNEL);
+ if (!req->buf) {
+ xhci_dbc_free_req(dep, req);
+ break;
+ }
+
+ req->complete = fn;
+ list_add_tail(&req->list_pool, head);
+ }
+
+ return list_empty(head) ? -ENOMEM : 0;
+}
+
+static void
+xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
+{
+ struct dbc_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct dbc_request, list_pool);
+ list_del(&req->list_pool);
+ xhci_dbc_free_req(dep, req);
+ }
+}
+
+static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct dbc_port *port = driver->driver_state;
+
+ tty->driver_data = port;
+
+ return tty_port_install(&port->port, driver, tty);
+}
+
+static int dbc_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct dbc_port *port = tty->driver_data;
+
+ return tty_port_open(&port->port, tty, file);
+}
+
+static void dbc_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct dbc_port *port = tty->driver_data;
+
+ tty_port_close(&port->port, tty, file);
+}
+
+static int dbc_tty_write(struct tty_struct *tty,
+ const unsigned char *buf,
+ int count)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (count)
+ count = kfifo_in(&port->write_fifo, buf, count);
+ dbc_start_tx(port);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return count;
+}
+
+static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ status = kfifo_put(&port->write_fifo, ch);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return status;
+}
+
+static void dbc_tty_flush_chars(struct tty_struct *tty)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ dbc_start_tx(port);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int dbc_tty_write_room(struct tty_struct *tty)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+ int room = 0;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ room = kfifo_avail(&port->write_fifo);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return room;
+}
+
+static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+ int chars = 0;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ chars = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return chars;
+}
+
+static void dbc_tty_unthrottle(struct tty_struct *tty)
+{
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ tasklet_schedule(&port->push);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static const struct tty_operations dbc_tty_ops = {
+ .install = dbc_tty_install,
+ .open = dbc_tty_open,
+ .close = dbc_tty_close,
+ .write = dbc_tty_write,
+ .put_char = dbc_tty_put_char,
+ .flush_chars = dbc_tty_flush_chars,
+ .write_room = dbc_tty_write_room,
+ .chars_in_buffer = dbc_tty_chars_in_buffer,
+ .unthrottle = dbc_tty_unthrottle,
+};
+
+static struct tty_driver *dbc_tty_driver;
+
+int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
+{
+ int status;
+ struct xhci_dbc *dbc = xhci->dbc;
+
+ dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV);
+ if (IS_ERR(dbc_tty_driver)) {
+ status = PTR_ERR(dbc_tty_driver);
+ dbc_tty_driver = NULL;
+ return status;
+ }
+
+ dbc_tty_driver->driver_name = "dbc_serial";
+ dbc_tty_driver->name = "ttyDBC";
+
+ dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ dbc_tty_driver->init_termios = tty_std_termios;
+ dbc_tty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ dbc_tty_driver->init_termios.c_ispeed = 9600;
+ dbc_tty_driver->init_termios.c_ospeed = 9600;
+ dbc_tty_driver->driver_state = &dbc->port;
+
+ tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
+
+ status = tty_register_driver(dbc_tty_driver);
+ if (status) {
+ xhci_err(xhci,
+ "can't register dbc tty driver, err %d\n", status);
+ put_tty_driver(dbc_tty_driver);
+ dbc_tty_driver = NULL;
+ }
+
+ return status;
+}
+
+void xhci_dbc_tty_unregister_driver(void)
+{
+ tty_unregister_driver(dbc_tty_driver);
+ put_tty_driver(dbc_tty_driver);
+ dbc_tty_driver = NULL;
+}
+
+static void dbc_rx_push(unsigned long _port)
+{
+ struct dbc_request *req;
+ struct tty_struct *tty;
+ bool do_push = false;
+ bool disconnect = false;
+ struct dbc_port *port = (void *)_port;
+ struct list_head *queue = &port->read_queue;
+
+ spin_lock_irq(&port->port_lock);
+ tty = port->port.tty;
+ while (!list_empty(queue)) {
+ req = list_first_entry(queue, struct dbc_request, list_pool);
+
+ if (tty && tty_throttled(tty))
+ break;
+
+ switch (req->status) {
+ case 0:
+ break;
+ case -ESHUTDOWN:
+ disconnect = true;
+ break;
+ default:
+ pr_warn("ttyDBC0: unexpected RX status %d\n",
+ req->status);
+ break;
+ }
+
+ if (req->actual) {
+ char *packet = req->buf;
+ unsigned int n, size = req->actual;
+ int count;
+
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(&port->port, packet,
+ size);
+ if (count)
+ do_push = true;
+ if (count != size) {
+ port->n_read += count;
+ break;
+ }
+ port->n_read = 0;
+ }
+
+ list_move(&req->list_pool, &port->read_pool);
+ }
+
+ if (do_push)
+ tty_flip_buffer_push(&port->port);
+
+ if (!list_empty(queue) && tty) {
+ if (!tty_throttled(tty)) {
+ if (do_push)
+ tasklet_schedule(&port->push);
+ else
+ pr_warn("ttyDBC0: RX not scheduled?\n");
+ }
+ }
+
+ if (!disconnect)
+ dbc_start_rx(port);
+
+ spin_unlock_irq(&port->port_lock);
+}
+
+static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
+{
+ struct dbc_port *port = container_of(_port, struct dbc_port, port);
+
+ spin_lock_irq(&port->port_lock);
+ dbc_start_rx(port);
+ spin_unlock_irq(&port->port_lock);
+
+ return 0;
+}
+
+static const struct tty_port_operations dbc_port_ops = {
+ .activate = dbc_port_activate,
+};
+
+static void
+xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
+{
+ tty_port_init(&port->port);
+ spin_lock_init(&port->port_lock);
+ tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
+ INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
+ INIT_LIST_HEAD(&port->write_pool);
+
+ port->in = get_in_ep(xhci);
+ port->out = get_out_ep(xhci);
+ port->port.ops = &dbc_port_ops;
+ port->n_read = 0;
+}
+
+static void
+xhci_dbc_tty_exit_port(struct dbc_port *port)
+{
+ tasklet_kill(&port->push);
+ tty_port_destroy(&port->port);
+}
+
+int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
+{
+ int ret;
+ struct device *tty_dev;
+ struct xhci_dbc *dbc = xhci->dbc;
+ struct dbc_port *port = &dbc->port;
+
+ xhci_dbc_tty_init_port(xhci, port);
+ tty_dev = tty_port_register_device(&port->port,
+ dbc_tty_driver, 0, NULL);
+ ret = IS_ERR_OR_NULL(tty_dev);
+ if (ret)
+ goto register_fail;
+
+ ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
+ if (ret)
+ goto buf_alloc_fail;
+
+ ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
+ dbc_read_complete);
+ if (ret)
+ goto request_fail;
+
+ ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
+ dbc_write_complete);
+ if (ret)
+ goto request_fail;
+
+ port->registered = true;
+
+ return 0;
+
+request_fail:
+ xhci_dbc_free_requests(port->in, &port->read_pool);
+ xhci_dbc_free_requests(port->out, &port->write_pool);
+ kfifo_free(&port->write_fifo);
+
+buf_alloc_fail:
+ tty_unregister_device(dbc_tty_driver, 0);
+
+register_fail:
+ xhci_dbc_tty_exit_port(port);
+
+ xhci_err(xhci, "can't register tty port, err %d\n", ret);
+
+ return ret;
+}
+
+void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
+{
+ struct xhci_dbc *dbc = xhci->dbc;
+ struct dbc_port *port = &dbc->port;
+
+ tty_unregister_device(dbc_tty_driver, 0);
+ xhci_dbc_tty_exit_port(port);
+ port->registered = false;
+
+ kfifo_free(&port->write_fifo);
+ xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
+ xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
+ xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
+}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 2a90229be7a6..46d5e08f05f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -388,7 +388,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
trace_xhci_stop_device(virt_dev);
- cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!cmd)
return -ENOMEM;
@@ -404,8 +404,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
continue;
- command = xhci_alloc_command(xhci, false, false,
- GFP_NOWAIT);
+ command = xhci_alloc_command(xhci, false, GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
ret = -ENOMEM;
@@ -1077,6 +1076,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -ENODEV;
break;
}
+ trace_xhci_get_port_status(wIndex, temp);
status = xhci_get_port_status(hcd, bus_state, port_array,
wIndex, temp, flags);
if (status == 0xffffffff)
@@ -1443,6 +1443,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
retval = -ENODEV;
break;
}
+ trace_xhci_hub_status_data(i, temp);
+
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
(bus_state->resume_done[i] && time_after_eq(
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3a29b32a3bd0..332420d10be9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -357,7 +357,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
-static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
@@ -454,7 +454,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return 0;
}
-static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx;
@@ -479,7 +479,7 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
return ctx;
}
-static void xhci_free_container_ctx(struct xhci_hcd *xhci,
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
@@ -650,7 +650,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
/* Allocate everything needed to free the stream rings later */
stream_info->free_streams_command =
- xhci_alloc_command(xhci, true, true, mem_flags);
+ xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!stream_info->free_streams_command)
goto cleanup_ctx;
@@ -1715,8 +1715,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_in_ctx, bool allocate_completion,
- gfp_t mem_flags)
+ bool allocate_completion, gfp_t mem_flags)
{
struct xhci_command *command;
@@ -1724,21 +1723,10 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
if (!command)
return NULL;
- if (allocate_in_ctx) {
- command->in_ctx =
- xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
- mem_flags);
- if (!command->in_ctx) {
- kfree(command);
- return NULL;
- }
- }
-
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
if (!command->completion) {
- xhci_free_container_ctx(xhci, command->in_ctx);
kfree(command);
return NULL;
}
@@ -1750,6 +1738,25 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
return command;
}
+struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
+ bool allocate_completion, gfp_t mem_flags)
+{
+ struct xhci_command *command;
+
+ command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
+ if (!command)
+ return NULL;
+
+ command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command->completion);
+ kfree(command);
+ return NULL;
+ }
+ return command;
+}
+
void xhci_urb_free_priv(struct urb_priv *urb_priv)
{
kfree(urb_priv);
@@ -1764,21 +1771,58 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
+int xhci_alloc_erst(struct xhci_hcd *xhci,
+ struct xhci_ring *evt_ring,
+ struct xhci_erst *erst,
+ gfp_t flags)
+{
+ size_t size;
+ unsigned int val;
+ struct xhci_segment *seg;
+ struct xhci_erst_entry *entry;
+
+ size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
+ erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, &erst->erst_dma_addr, flags);
+ if (!erst->entries)
+ return -ENOMEM;
+
+ erst->num_entries = evt_ring->num_segs;
+
+ seg = evt_ring->first_seg;
+ for (val = 0; val < evt_ring->num_segs; val++) {
+ entry = &erst->entries[val];
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ return 0;
+}
+
+void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+ size_t size;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
+ if (erst->entries)
+ dma_free_coherent(dev, size,
+ erst->entries,
+ erst->erst_dma_addr);
+ erst->entries = NULL;
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- int size;
int i, j, num_ports;
cancel_delayed_work_sync(&xhci->cmd_timer);
- /* Free the Event Ring Segment Table and the actual Event Ring */
- size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
- if (xhci->erst.entries)
- dma_free_coherent(dev, size,
- xhci->erst.entries, xhci->erst.erst_dma_addr);
- xhci->erst.entries = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+ xhci_free_erst(xhci, &xhci->erst);
+
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
@@ -2315,9 +2359,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int val, val2;
u64 val_64;
- struct xhci_segment *seg;
- u32 page_size, temp;
- int i;
+ u32 page_size, temp;
+ int i, ret;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2421,9 +2464,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%016llx", val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
- xhci_dbg_cmd_ptrs(xhci);
- xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
+ xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
if (!xhci->lpm_command)
goto fail;
@@ -2439,8 +2481,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"// Doorbell array is located at offset 0x%x"
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
- xhci_dbg_regs(xhci);
- xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = &xhci->run_regs->ir_set[0];
@@ -2456,32 +2496,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_check_trb_in_td_math(xhci) < 0)
goto fail;
- xhci->erst.entries = dma_alloc_coherent(dev,
- sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
- flags);
- if (!xhci->erst.entries)
+ ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
+ if (ret)
goto fail;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Allocated event ring segment table at 0x%llx",
- (unsigned long long)dma);
-
- memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
- xhci->erst.num_entries = ERST_NUM_SEGS;
- xhci->erst.erst_dma_addr = dma;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
- xhci->erst.num_entries,
- xhci->erst.entries,
- (unsigned long long)xhci->erst.erst_dma_addr);
-
- /* set ring base address and size for each segment table entry */
- for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
- struct xhci_erst_entry *entry = &xhci->erst.entries[val];
- entry->seg_addr = cpu_to_le64(seg->dma);
- entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
- entry->rsvd = 0;
- seg = seg->next;
- }
/* set ERST count with the number of entries in the segment table */
val = readl(&xhci->ir_set->erst_size);
@@ -2507,7 +2524,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_set_hc_event_deq(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set 0.");
- xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b62a1d23244b..b0ab4d5e2751 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -57,26 +57,21 @@
/* u2_phy_pll register */
#define CTRL_U2_FORCE_PLL_STB BIT(28)
-#define PERI_WK_CTRL0 0x400
-#define UWK_CTR0_0P_LS_PE BIT(8) /* posedge */
-#define UWK_CTR0_0P_LS_NE BIT(7) /* negedge for 0p linestate*/
-#define UWK_CTL1_1P_LS_C(x) (((x) & 0xf) << 1)
-#define UWK_CTL1_1P_LS_E BIT(0)
-
-#define PERI_WK_CTRL1 0x404
-#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26)
-#define UWK_CTL1_IS_E BIT(25)
-#define UWK_CTL1_0P_LS_C(x) (((x) & 0xf) << 21)
-#define UWK_CTL1_0P_LS_E BIT(20)
-#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */
-#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */
-#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */
-#define UWK_CTL1_0P_LS_P BIT(7)
-#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */
-
-enum ssusb_wakeup_src {
- SSUSB_WK_IP_SLEEP = 1,
- SSUSB_WK_LINE_STATE = 2,
+/* usb remote wakeup registers in syscon */
+/* mt8173 etc */
+#define PERI_WK_CTRL1 0x4
+#define WC1_IS_C(x) (((x) & 0xf) << 26) /* cycle debounce */
+#define WC1_IS_EN BIT(25)
+#define WC1_IS_P BIT(6) /* polarity for ip sleep */
+
+/* mt2712 etc */
+#define PERI_SSUSB_SPM_CTRL 0x0
+#define SSC_IP_SLEEP_EN BIT(4)
+#define SSC_SPM_INT_EN BIT(1)
+
+enum ssusb_uwk_vers {
+ SSUSB_UWK_V1 = 1,
+ SSUSB_UWK_V2,
};
static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
@@ -296,112 +291,58 @@ static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk)
}
/* only clocks can be turn off for ip-sleep wakeup mode */
-static void usb_wakeup_ip_sleep_en(struct xhci_hcd_mtk *mtk)
+static void usb_wakeup_ip_sleep_set(struct xhci_hcd_mtk *mtk, bool enable)
{
- u32 tmp;
- struct regmap *pericfg = mtk->pericfg;
-
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_IS_P;
- tmp &= ~(UWK_CTL1_IS_C(0xf));
- tmp |= UWK_CTL1_IS_C(0x8);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E);
-
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- dev_dbg(mtk->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n",
- __func__, tmp);
+ u32 reg, msk, val;
+
+ switch (mtk->uwk_vers) {
+ case SSUSB_UWK_V1:
+ reg = mtk->uwk_reg_base + PERI_WK_CTRL1;
+ msk = WC1_IS_EN | WC1_IS_C(0xf) | WC1_IS_P;
+ val = enable ? (WC1_IS_EN | WC1_IS_C(0x8)) : 0;
+ break;
+ case SSUSB_UWK_V2:
+ reg = mtk->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
+ msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
+ val = enable ? msk : 0;
+ break;
+ default:
+ return;
+ }
+ regmap_update_bits(mtk->uwk, reg, msk, val);
}
-static void usb_wakeup_ip_sleep_dis(struct xhci_hcd_mtk *mtk)
+static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
+ struct device_node *dn)
{
- u32 tmp;
+ struct of_phandle_args args;
+ int ret;
- regmap_read(mtk->pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_IS_E;
- regmap_write(mtk->pericfg, PERI_WK_CTRL1, tmp);
-}
+ /* Wakeup function is optional */
+ mtk->uwk_en = of_property_read_bool(dn, "wakeup-source");
+ if (!mtk->uwk_en)
+ return 0;
-/*
-* for line-state wakeup mode, phy's power should not power-down
-* and only support cable plug in/out
-*/
-static void usb_wakeup_line_state_en(struct xhci_hcd_mtk *mtk)
-{
- u32 tmp;
- struct regmap *pericfg = mtk->pericfg;
-
- /* line-state of u2-port0 */
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_0P_LS_P;
- tmp &= ~(UWK_CTL1_0P_LS_C(0xf));
- tmp |= UWK_CTL1_0P_LS_C(0x8);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp);
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_0P_LS_E);
-
- /* line-state of u2-port1 */
- regmap_read(pericfg, PERI_WK_CTRL0, &tmp);
- tmp &= ~(UWK_CTL1_1P_LS_C(0xf));
- tmp |= UWK_CTL1_1P_LS_C(0x8);
- regmap_write(pericfg, PERI_WK_CTRL0, tmp);
- regmap_write(pericfg, PERI_WK_CTRL0, tmp | UWK_CTL1_1P_LS_E);
-}
+ ret = of_parse_phandle_with_fixed_args(dn,
+ "mediatek,syscon-wakeup", 2, 0, &args);
+ if (ret)
+ return ret;
-static void usb_wakeup_line_state_dis(struct xhci_hcd_mtk *mtk)
-{
- u32 tmp;
- struct regmap *pericfg = mtk->pericfg;
-
- /* line-state of u2-port0 */
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_0P_LS_E;
- regmap_write(pericfg, PERI_WK_CTRL1, tmp);
-
- /* line-state of u2-port1 */
- regmap_read(pericfg, PERI_WK_CTRL0, &tmp);
- tmp &= ~UWK_CTL1_1P_LS_E;
- regmap_write(pericfg, PERI_WK_CTRL0, tmp);
-}
+ mtk->uwk_reg_base = args.args[0];
+ mtk->uwk_vers = args.args[1];
+ mtk->uwk = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ dev_info(mtk->dev, "uwk - reg:0x%x, version:%d\n",
+ mtk->uwk_reg_base, mtk->uwk_vers);
-static void usb_wakeup_enable(struct xhci_hcd_mtk *mtk)
-{
- if (mtk->wakeup_src == SSUSB_WK_IP_SLEEP)
- usb_wakeup_ip_sleep_en(mtk);
- else if (mtk->wakeup_src == SSUSB_WK_LINE_STATE)
- usb_wakeup_line_state_en(mtk);
-}
+ return PTR_ERR_OR_ZERO(mtk->uwk);
-static void usb_wakeup_disable(struct xhci_hcd_mtk *mtk)
-{
- if (mtk->wakeup_src == SSUSB_WK_IP_SLEEP)
- usb_wakeup_ip_sleep_dis(mtk);
- else if (mtk->wakeup_src == SSUSB_WK_LINE_STATE)
- usb_wakeup_line_state_dis(mtk);
}
-static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
- struct device_node *dn)
+static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
{
- struct device *dev = mtk->dev;
-
- /*
- * wakeup function is optional, so it is not an error if this property
- * does not exist, and in such case, no need to get relative
- * properties anymore.
- */
- of_property_read_u32(dn, "mediatek,wakeup-src", &mtk->wakeup_src);
- if (!mtk->wakeup_src)
- return 0;
-
- mtk->pericfg = syscon_regmap_lookup_by_phandle(dn,
- "mediatek,syscon-wakeup");
- if (IS_ERR(mtk->pericfg)) {
- dev_err(dev, "fail to get pericfg regs\n");
- return PTR_ERR(mtk->pericfg);
- }
-
- return 0;
+ if (mtk->uwk_en)
+ usb_wakeup_ip_sleep_set(mtk, enable);
}
static int xhci_mtk_setup(struct usb_hcd *hcd);
@@ -583,8 +524,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
&mtk->u3p_dis_msk);
ret = usb_wakeup_of_property_parse(mtk, node);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to parse uwk property\n");
return ret;
+ }
mtk->num_phys = of_count_phandle_with_args(node,
"phys", "#phy-cells");
@@ -674,6 +617,15 @@ static int xhci_mtk_probe(struct platform_device *pdev)
xhci = hcd_to_xhci(hcd);
xhci->main_hcd = hcd;
+
+ /*
+ * imod_interval is the interrupt moderation value in nanoseconds.
+ * The increment interval is 8 times as much as that defined in
+ * the xHCI spec on MTK's controller.
+ */
+ xhci->imod_interval = 5000;
+ device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval);
+
xhci->shared_hcd = usb_create_shared_hcd(driver, dev,
dev_name(dev), hcd);
if (!xhci->shared_hcd) {
@@ -768,7 +720,7 @@ static int __maybe_unused xhci_mtk_suspend(struct device *dev)
xhci_mtk_host_disable(mtk);
xhci_mtk_phy_power_off(mtk);
xhci_mtk_clks_disable(mtk);
- usb_wakeup_enable(mtk);
+ usb_wakeup_set(mtk, true);
return 0;
}
@@ -778,7 +730,7 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
struct usb_hcd *hcd = mtk->hcd;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- usb_wakeup_disable(mtk);
+ usb_wakeup_set(mtk, false);
xhci_mtk_clks_enable(mtk);
xhci_mtk_phy_power_on(mtk);
xhci_mtk_host_enable(mtk);
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 6b74ce5b7564..cc59d80b663b 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -122,8 +122,12 @@ struct xhci_hcd_mtk {
struct regmap *pericfg;
struct phy **phys;
int num_phys;
- int wakeup_src;
bool lpm_support;
+ /* usb remote wakeup */
+ bool uwk_en;
+ struct regmap *uwk;
+ u32 uwk_reg_base;
+ u32 uwk_vers;
};
static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1aad89b8aba0..6c79037876db 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -237,6 +237,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!xhci->sbrn)
pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+ /* imod_interval is the interrupt moderation value in nanoseconds. */
+ xhci->imod_interval = 40000;
+
retval = xhci_gen_setup(hcd, xhci_pci_quirks);
if (retval)
return retval;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 09f164f8cf8c..6f038306c14d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -269,6 +269,11 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
+ /* imod_interval is the interrupt moderation value in nanoseconds. */
+ xhci->imod_interval = 40000;
+ device_property_read_u32(sysdev, "imod-interval-ns",
+ &xhci->imod_interval);
+
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c5cbc685c691..daa94c3aed80 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -153,7 +153,7 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
-static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
@@ -1141,7 +1141,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command)
return;
@@ -1821,7 +1821,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command)
return;
@@ -1878,12 +1878,10 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ring *ep_ring, int *status)
{
- struct urb_priv *urb_priv;
struct urb *urb = NULL;
/* Clean up the endpoint's TD list */
urb = td->urb;
- urb_priv = urb->hcpriv;
/* if a bounce buffer was used to align this td then unmap it */
xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
@@ -1994,7 +1992,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
- struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_ep_ctx *ep_ctx;
@@ -2006,7 +2003,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
requested = td->urb->transfer_buffer_length;
@@ -2965,7 +2961,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
return 0;
}
-static unsigned int count_trbs(u64 addr, u64 len)
+unsigned int count_trbs(u64 addr, u64 len)
{
unsigned int num_trbs;
@@ -4044,7 +4040,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
}
/* This function gets called from contexts where it cannot sleep */
- cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!cmd)
return;
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index a3b57c781db1..410544ffe78f 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -23,6 +23,7 @@
#include <linux/tracepoint.h>
#include "xhci.h"
+#include "xhci-dbgcap.h"
#define XHCI_MSG_MAX 500
@@ -155,6 +156,21 @@ DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
TP_ARGS(ring, trb)
);
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
DECLARE_EVENT_CLASS(xhci_log_virt_dev,
TP_PROTO(struct xhci_virt_device *vdev),
TP_ARGS(vdev),
@@ -478,6 +494,59 @@ DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
TP_ARGS(portnum, portsc)
);
+DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+DECLARE_EVENT_CLASS(xhci_dbc_log_request,
+ TP_PROTO(struct dbc_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct dbc_request *, req)
+ __field(bool, dir)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->dir = req->direction;
+ __entry->actual = req->actual;
+ __entry->length = req->length;
+ __entry->status = req->status;
+ ),
+ TP_printk("%s: req %p length %u/%u ==> %d",
+ __entry->dir ? "bulk-in" : "bulk-out",
+ __entry->req, __entry->actual,
+ __entry->length, __entry->status
+ )
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
+ TP_PROTO(struct dbc_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
+ TP_PROTO(struct dbc_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
+ TP_PROTO(struct dbc_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
+ TP_PROTO(struct dbc_request *req),
+ TP_ARGS(req)
+);
#endif /* __XHCI_TRACE_H */
/* this part must be outside header guard */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index da6dbe3ebd8b..1eeb3396300f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
#include "xhci-trace.h"
#include "xhci-mtk.h"
#include "xhci-debugfs.h"
+#include "xhci-dbgcap.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -573,10 +574,6 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
return ret;
- xhci_dbg_cmd_ptrs(xhci);
-
- xhci_dbg(xhci, "ERST memory map follows:\n");
- xhci_dbg_erst(xhci, &xhci->erst);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -586,11 +583,7 @@ int xhci_run(struct usb_hcd *hcd)
"// Set the interrupt modulation register");
temp = readl(&xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
- /*
- * the increment interval is 8 times as much as that defined
- * in xHCI spec on MTK's controller
- */
- temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160);
+ temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
writel(temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
@@ -605,12 +598,11 @@ int xhci_run(struct usb_hcd *hcd)
"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, 0);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -622,6 +614,8 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
+ xhci_dbc_init(xhci);
+
xhci_debugfs_init(xhci);
return 0;
@@ -654,6 +648,8 @@ static void xhci_stop(struct usb_hcd *hcd)
xhci_debugfs_exit(xhci);
+ xhci_dbc_exit(xhci);
+
spin_lock_irq(&xhci->lock);
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
@@ -681,7 +677,6 @@ static void xhci_stop(struct usb_hcd *hcd)
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
temp = readl(&xhci->ir_set->irq_pending);
writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, 0);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -870,6 +865,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
+ xhci_dbc_suspend(xhci);
+
/* Clear root port wake on bits if wakeup not allowed. */
if (!do_wakeup)
xhci_disable_port_wake_on_bits(xhci);
@@ -1014,7 +1011,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
temp = readl(&xhci->ir_set->irq_pending);
writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -1065,6 +1061,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
+ xhci_dbc_resume(xhci);
+
done:
if (retval == 0) {
/* Resume root hubs only when have pending events. */
@@ -1243,7 +1241,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
* changes max packet sizes.
*/
- command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -1498,7 +1496,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* the first cancellation to be handled.
*/
if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
- command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ret = -ENOMEM;
goto done;
@@ -2683,7 +2681,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
- command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -2838,12 +2836,10 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *td)
{
struct xhci_dequeue_state deq_state;
- struct xhci_virt_ep *ep;
struct usb_device *udev = td->urb->dev;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Cleaning up stalled endpoint ring");
- ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
@@ -3092,7 +3088,7 @@ static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
return -ENOSYS;
}
- config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
+ config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!config_cmd)
return -ENOMEM;
@@ -3363,7 +3359,6 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
unsigned int slot_id;
struct xhci_virt_device *virt_dev;
struct xhci_command *reset_device_cmd;
- int last_freed_endpoint;
struct xhci_slot_ctx *slot_ctx;
int old_active_eps = 0;
@@ -3416,7 +3411,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
* reset as part of error handling, so use GFP_NOIO instead of
* GFP_KERNEL.
*/
- reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
if (!reset_device_cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
return -ENOMEM;
@@ -3478,7 +3473,6 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
/* Everything but endpoint 0 is disabled, so free the rings. */
- last_freed_endpoint = 1;
for (i = 1; i < 31; i++) {
struct xhci_virt_ep *ep = &virt_dev->eps[i];
@@ -3493,7 +3487,6 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
if (ep->ring) {
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
- last_freed_endpoint = i;
}
if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
xhci_drop_ep_from_interval_table(xhci,
@@ -3566,7 +3559,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
u32 state;
int ret = 0;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3628,7 +3621,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
int ret, slot_id;
struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return 0;
@@ -3761,7 +3754,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
}
}
- command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command) {
ret = -ENOMEM;
goto out;
@@ -4680,7 +4673,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
return -EINVAL;
}
- config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
+ config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
if (!config_cmd)
return -ENOMEM;
@@ -4828,7 +4821,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
- xhci_print_registers(xhci);
xhci->quirks |= quirks;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 99a014a920d3..96099a245c69 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1717,6 +1717,8 @@ struct xhci_hcd {
u8 max_interrupters;
u8 max_ports;
u8 isoc_threshold;
+ /* imod_interval in ns (I * 250ns) */
+ u32 imod_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
@@ -1856,6 +1858,7 @@ struct xhci_hcd {
struct dentry *debugfs_slots;
struct list_head regset_list;
+ void *dbc;
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
};
@@ -1924,12 +1927,6 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
-void xhci_print_registers(struct xhci_hcd *xhci);
-void xhci_dbg_regs(struct xhci_hcd *xhci);
-void xhci_print_run_regs(struct xhci_hcd *xhci);
-void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
-void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
@@ -1965,9 +1962,17 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
- unsigned int num_trbs, gfp_t flags);
+ unsigned int num_trbs, gfp_t flags);
+int xhci_alloc_erst(struct xhci_hcd *xhci,
+ struct xhci_ring *evt_ring,
+ struct xhci_erst *erst,
+ gfp_t flags);
+void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
@@ -1992,11 +1997,16 @@ struct xhci_ring *xhci_stream_id_to_ring(
unsigned int ep_index,
unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_in_ctx, bool allocate_completion,
- gfp_t mem_flags);
+ bool allocate_completion, gfp_t mem_flags);
+struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
+ bool allocate_completion, gfp_t mem_flags);
void xhci_urb_free_priv(struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+ int type, gfp_t flags);
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
@@ -2070,6 +2080,8 @@ void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
+unsigned int count_trbs(u64 addr, u64 len);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index b6a1b9331aa0..716cb515523e 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -183,10 +183,10 @@ static int chaoskey_probe(struct usb_interface *interface,
dev->in_ep = in_ep;
if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
- dev->reads_started = 1;
+ dev->reads_started = true;
dev->size = size;
- dev->present = 1;
+ dev->present = true;
init_waitqueue_head(&dev->wait_q);
@@ -239,7 +239,7 @@ static void chaoskey_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
mutex_lock(&dev->lock);
- dev->present = 0;
+ dev->present = false;
usb_poison_urb(dev->urb);
if (!dev->open) {
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index bf4d2778907e..9d780b77314b 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -144,7 +144,7 @@ error:
}
/* attribute callback handler (write) */
-static ssize_t set_port0_handler(struct device *dev,
+static ssize_t port0_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -152,7 +152,7 @@ static ssize_t set_port0_handler(struct device *dev,
}
/* attribute callback handler (write) */
-static ssize_t set_port1_handler(struct device *dev,
+static ssize_t port1_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -178,22 +178,22 @@ static ssize_t read_port(struct device *dev, struct device_attribute *attr,
}
/* attribute callback handler (read) */
-static ssize_t get_port0_handler(struct device *dev,
+static ssize_t port0_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 0, CYPRESS_READ_PORT_ID0);
}
/* attribute callback handler (read) */
-static ssize_t get_port1_handler(struct device *dev,
+static ssize_t port1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
-static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
+static DEVICE_ATTR_RW(port0);
-static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
+static DEVICE_ATTR_RW(port1);
static int cypress_probe(struct usb_interface *interface,
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 66be86077292..8b15ab5e1450 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -78,7 +78,7 @@ static int vendor_command(struct usb_device *dev, unsigned char request,
#define BRIGHTNESS 0x2c /* RAM location for brightness value */
#define BRIGHTNESS_SEM 0x2b /* RAM location for brightness semaphore */
-static ssize_t show_brightness(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
@@ -86,7 +86,7 @@ static ssize_t show_brightness(struct device *dev, struct device_attribute *attr
return sprintf(buf, "%i", cytherm->brightness);
}
-static ssize_t set_brightness(struct device *dev, struct device_attribute *attr, const char *buf,
+static ssize_t brightness_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -121,15 +121,13 @@ static ssize_t set_brightness(struct device *dev, struct device_attribute *attr,
return count;
}
-
-static DEVICE_ATTR(brightness, S_IRUGO | S_IWUSR | S_IWGRP,
- show_brightness, set_brightness);
+static DEVICE_ATTR_RW(brightness);
#define TEMP 0x33 /* RAM location for temperature */
#define SIGN 0x34 /* RAM location for temperature sign */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -161,19 +159,12 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char
return sprintf(buf, "%c%i.%i", sign ? '-' : '+', temp >> 1,
5*(temp - ((temp >> 1) << 1)));
}
-
-
-static ssize_t set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- return count;
-}
-
-static DEVICE_ATTR(temp, S_IRUGO, show_temp, set_temp);
+static DEVICE_ATTR_RO(temp);
#define BUTTON 0x7a
-static ssize_t show_button(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t button_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -200,17 +191,10 @@ static ssize_t show_button(struct device *dev, struct device_attribute *attr, ch
else
return sprintf(buf, "0");
}
+static DEVICE_ATTR_RO(button);
-static ssize_t set_button(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- return count;
-}
-
-static DEVICE_ATTR(button, S_IRUGO, show_button, set_button);
-
-
-static ssize_t show_port0(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t port0_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
@@ -234,7 +218,7 @@ static ssize_t show_port0(struct device *dev, struct device_attribute *attr, cha
}
-static ssize_t set_port0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t port0_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
@@ -263,10 +247,9 @@ static ssize_t set_port0(struct device *dev, struct device_attribute *attr, cons
return count;
}
+static DEVICE_ATTR_RW(port0);
-static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR | S_IWGRP, show_port0, set_port0);
-
-static ssize_t show_port1(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t port1_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
@@ -290,7 +273,7 @@ static ssize_t show_port1(struct device *dev, struct device_attribute *attr, cha
}
-static ssize_t set_port1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t port1_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
@@ -319,9 +302,7 @@ static ssize_t set_port1(struct device *dev, struct device_attribute *attr, cons
return count;
}
-
-static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR | S_IWGRP, show_port1, set_port1);
-
+static DEVICE_ATTR_RW(port1);
static int cytherm_probe(struct usb_interface *interface,
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ad3109490c0f..1fa00b35f4ad 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -677,10 +677,10 @@ static int iowarrior_release(struct inode *inode, struct file *file)
return retval;
}
-static unsigned iowarrior_poll(struct file *file, poll_table * wait)
+static __poll_t iowarrior_poll(struct file *file, poll_table * wait)
{
struct iowarrior *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!dev->present)
return POLLERR | POLLHUP;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 5c1a3b852453..074398c1e410 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -409,10 +409,10 @@ exit:
/**
* ld_usb_poll
*/
-static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
+static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
{
struct ld_usb *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c5be6e9e24a5..941c45028828 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -224,7 +224,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
static inline void tower_delete (struct lego_usb_tower *dev);
static int tower_open (struct inode *inode, struct file *file);
static int tower_release (struct inode *inode, struct file *file);
-static unsigned int tower_poll (struct file *file, poll_table *wait);
+static __poll_t tower_poll (struct file *file, poll_table *wait);
static loff_t tower_llseek (struct file *file, loff_t off, int whence);
static void tower_abort_transfers (struct lego_usb_tower *dev);
@@ -509,10 +509,10 @@ static void tower_check_for_read_packet (struct lego_usb_tower *dev)
/**
* tower_poll
*/
-static unsigned int tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll (struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index d83af2a332e4..b3e1f553954a 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -30,7 +30,7 @@ struct trancevibrator {
unsigned int speed;
};
-static ssize_t show_speed(struct device *dev, struct device_attribute *attr,
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -39,7 +39,7 @@ static ssize_t show_speed(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", tv->speed);
}
-static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -70,7 +70,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
+static DEVICE_ATTR_RW(speed);
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf68b463..f723f7b8c9ac 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
if (gpio_is_valid(hub->gpio_reset)) {
err = devm_gpio_request_one(dev, hub->gpio_reset,
GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ /* Datasheet defines a hardware reset to be at least 100us */
+ usleep_range(100, 10000);
if (err) {
dev_err(dev,
"unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 12f7e94695a2..1923d5b6d9c9 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -165,7 +165,7 @@ static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
}
#define MYDEV_ATTR_SIMPLE_UNSIGNED(name, update_fcn) \
-static ssize_t show_attr_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -174,7 +174,7 @@ static ssize_t show_attr_##name(struct device *dev, \
return sprintf(buf, "%u\n", mydev->name); \
} \
\
-static ssize_t set_attr_##name(struct device *dev, \
+static ssize_t name##_store(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -185,9 +185,9 @@ static ssize_t set_attr_##name(struct device *dev, \
\
return count; \
} \
-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
+static DEVICE_ATTR_RW(name);
-static ssize_t show_attr_text(struct device *dev,
+static ssize_t text_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -196,7 +196,7 @@ static ssize_t show_attr_text(struct device *dev,
return snprintf(buf, mydev->textlength, "%s\n", mydev->text);
}
-static ssize_t set_attr_text(struct device *dev,
+static ssize_t text_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -216,9 +216,9 @@ static ssize_t set_attr_text(struct device *dev,
return count;
}
-static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
+static DEVICE_ATTR_RW(text);
-static ssize_t show_attr_decimals(struct device *dev,
+static ssize_t decimals_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -240,7 +240,7 @@ static ssize_t show_attr_decimals(struct device *dev,
return sizeof(mydev->decimals) + 1;
}
-static ssize_t set_attr_decimals(struct device *dev,
+static ssize_t decimals_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -265,9 +265,9 @@ static ssize_t set_attr_decimals(struct device *dev,
return count;
}
-static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
+static DEVICE_ATTR_RW(decimals);
-static ssize_t show_attr_textmode(struct device *dev,
+static ssize_t textmode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -293,7 +293,7 @@ static ssize_t show_attr_textmode(struct device *dev,
return strlen(buf);
}
-static ssize_t set_attr_textmode(struct device *dev,
+static ssize_t textmode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -309,7 +309,7 @@ static ssize_t set_attr_textmode(struct device *dev,
return count;
}
-static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
+static DEVICE_ATTR_RW(textmode);
MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index aedc9a7f149e..90028ef541e3 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1710,6 +1710,35 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
return 0;
}
+static int test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb)
+{
+ int retval;
+
+ /* clear initial data toggle to DATA0 */
+ retval = usb_clear_halt(urb->dev, urb->pipe);
+ if (retval < 0) {
+ ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
+ return retval;
+ }
+
+ /* transfer 3 data packets, should be DATA0, DATA1, DATA0 */
+ retval = simple_io(tdev, urb, 1, 0, 0, __func__);
+ if (retval != 0)
+ return -EINVAL;
+
+ /* clear halt resets device side data toggle, host should react to it */
+ retval = usb_clear_halt(urb->dev, urb->pipe);
+ if (retval < 0) {
+ ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
+ return retval;
+ }
+
+ /* host should use DATA0 again after clear halt */
+ retval = simple_io(tdev, urb, 1, 0, 0, __func__);
+
+ return retval;
+}
+
static int halt_simple(struct usbtest_dev *dev)
{
int ep;
@@ -1742,6 +1771,33 @@ done:
return retval;
}
+static int toggle_sync_simple(struct usbtest_dev *dev)
+{
+ int ep;
+ int retval = 0;
+ struct urb *urb;
+ struct usb_device *udev = testdev_to_usbdev(dev);
+ unsigned maxp = get_maxpacket(udev, dev->out_pipe);
+
+ /*
+ * Create a URB that causes a transfer of uneven amount of data packets
+ * This way the clear toggle has an impact on the data toggle sequence.
+ * Use 2 maxpacket length packets and one zero packet.
+ */
+ urb = simple_alloc_urb(udev, 0, 2 * maxp, 0);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
+ ep = usb_pipeendpoint(dev->out_pipe);
+ urb->pipe = dev->out_pipe;
+ retval = test_toggle_sync(dev, ep, urb);
+
+ simple_free_urb(urb);
+ return retval;
+}
+
/*-------------------------------------------------------------------------*/
/* Control OUT tests use the vendor control requests from Intel's
@@ -2524,6 +2580,20 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
retval = test_queue(dev, param,
dev->in_pipe, NULL, 0);
break;
+ /* Test data Toggle/seq_nr clear between bulk out transfers */
+ case 29:
+ if (dev->out_pipe == 0)
+ break;
+ retval = 0;
+ dev_info(&intf->dev, "TEST 29: Clear toggle between bulk writes %d times\n",
+ param->iterations);
+ for (i = param->iterations; retval == 0 && i > 0; --i)
+ retval = toggle_sync_simple(dev);
+
+ if (retval)
+ ERROR(dev, "toggle sync failed, iterations left %d\n",
+ i);
+ break;
}
return retval;
}
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753ab99b..cc5b296bff3f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
break;
case MON_IOCQ_RING_SIZE:
+ mutex_lock(&rp->fetch_lock);
ret = rp->b_size;
+ mutex_unlock(&rp->fetch_lock);
break;
case MON_IOCT_RING_SIZE:
@@ -1189,11 +1191,11 @@ static long mon_bin_compat_ioctl(struct file *file,
}
#endif /* CONFIG_COMPAT */
-static unsigned int
+static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{
struct mon_reader_bin *rp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (file->f_mode & FMODE_READ)
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
+ mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size)
+ if (offset >= rp->b_size) {
+ mutex_unlock(&rp->fetch_lock);
return VM_FAULT_SIGBUS;
+ }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
+ mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 3c888d942a9f..2cd00a24afd9 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -229,7 +229,10 @@ struct otg_switch_mtk {
* @u3p_dis_msk: mask of disabling usb3 ports, for example, bit0==1 to
* disable u3port0, bit1==1 to disable u3port1,... etc
* @dbgfs_root: only used when supports manual dual-role switch via debugfs
- * @wakeup_en: it's true when supports remote wakeup in host mode
+ * @uwk_en: it's true when supports remote wakeup in host mode
+ * @uwk: syscon including usb wakeup glue layer between SSUSB IP and SPM
+ * @uwk_reg_base: the base address of the wakeup glue layer in @uwk
+ * @uwk_vers: the version of the wakeup glue layer
*/
struct ssusb_mtk {
struct device *dev;
@@ -253,8 +256,10 @@ struct ssusb_mtk {
int u3p_dis_msk;
struct dentry *dbgfs_root;
/* usb wakeup for host mode */
- bool wakeup_en;
- struct regmap *pericfg;
+ bool uwk_en;
+ struct regmap *uwk;
+ u32 uwk_reg_base;
+ u32 uwk_vers;
};
/**
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index c179192408ba..50702fdcde28 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -18,8 +18,7 @@ int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
struct device_node *dn);
int ssusb_host_enable(struct ssusb_mtk *ssusb);
int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend);
-int ssusb_wakeup_enable(struct ssusb_mtk *ssusb);
-void ssusb_wakeup_disable(struct ssusb_mtk *ssusb);
+void ssusb_wakeup_set(struct ssusb_mtk *ssusb, bool enable);
#else
@@ -49,12 +48,7 @@ static inline int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
return 0;
}
-static inline int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
-{
- return 0;
-}
-
-static inline void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+static inline void ssusb_wakeup_set(struct ssusb_mtk *ssusb, bool enable)
{}
#endif
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index d237d7e65c44..c871b94f3e6f 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -18,66 +18,77 @@
#include "mtu3.h"
#include "mtu3_dr.h"
-#define PERI_WK_CTRL1 0x404
-#define UWK_CTL1_IS_C(x) (((x) & 0xf) << 26)
-#define UWK_CTL1_IS_E BIT(25)
-#define UWK_CTL1_IDDIG_C(x) (((x) & 0xf) << 11) /* cycle debounce */
-#define UWK_CTL1_IDDIG_E BIT(10) /* enable debounce */
-#define UWK_CTL1_IDDIG_P BIT(9) /* polarity */
-#define UWK_CTL1_IS_P BIT(6) /* polarity for ip sleep */
+/* mt8173 etc */
+#define PERI_WK_CTRL1 0x4
+#define WC1_IS_C(x) (((x) & 0xf) << 26) /* cycle debounce */
+#define WC1_IS_EN BIT(25)
+#define WC1_IS_P BIT(6) /* polarity for ip sleep */
+
+/* mt2712 etc */
+#define PERI_SSUSB_SPM_CTRL 0x0
+#define SSC_IP_SLEEP_EN BIT(4)
+#define SSC_SPM_INT_EN BIT(1)
+
+enum ssusb_uwk_vers {
+ SSUSB_UWK_V1 = 1,
+ SSUSB_UWK_V2,
+};
/*
* ip-sleep wakeup mode:
* all clocks can be turn off, but power domain should be kept on
*/
-static void ssusb_wakeup_ip_sleep_en(struct ssusb_mtk *ssusb)
+static void ssusb_wakeup_ip_sleep_set(struct ssusb_mtk *ssusb, bool enable)
{
- u32 tmp;
- struct regmap *pericfg = ssusb->pericfg;
-
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_IS_P;
- tmp &= ~(UWK_CTL1_IS_C(0xf));
- tmp |= UWK_CTL1_IS_C(0x8);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp);
- regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E);
-
- regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
- dev_dbg(ssusb->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n",
- __func__, tmp);
-}
-
-static void ssusb_wakeup_ip_sleep_dis(struct ssusb_mtk *ssusb)
-{
- u32 tmp;
-
- regmap_read(ssusb->pericfg, PERI_WK_CTRL1, &tmp);
- tmp &= ~UWK_CTL1_IS_E;
- regmap_write(ssusb->pericfg, PERI_WK_CTRL1, tmp);
+ u32 reg, msk, val;
+
+ switch (ssusb->uwk_vers) {
+ case SSUSB_UWK_V1:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL1;
+ msk = WC1_IS_EN | WC1_IS_C(0xf) | WC1_IS_P;
+ val = enable ? (WC1_IS_EN | WC1_IS_C(0x8)) : 0;
+ break;
+ case SSUSB_UWK_V2:
+ reg = ssusb->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
+ msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
+ val = enable ? msk : 0;
+ break;
+ default:
+ return;
+ }
+ regmap_update_bits(ssusb->uwk, reg, msk, val);
}
int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
struct device_node *dn)
{
- struct device *dev = ssusb->dev;
+ struct of_phandle_args args;
+ int ret;
- /*
- * Wakeup function is optional, so it is not an error if this property
- * does not exist, and in such case, no need to get relative
- * properties anymore.
- */
- ssusb->wakeup_en = of_property_read_bool(dn, "mediatek,enable-wakeup");
- if (!ssusb->wakeup_en)
+ /* wakeup function is optional */
+ ssusb->uwk_en = of_property_read_bool(dn, "wakeup-source");
+ if (!ssusb->uwk_en)
return 0;
- ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
- "mediatek,syscon-wakeup");
- if (IS_ERR(ssusb->pericfg)) {
- dev_err(dev, "fail to get pericfg regs\n");
- return PTR_ERR(ssusb->pericfg);
- }
+ ret = of_parse_phandle_with_fixed_args(dn,
+ "mediatek,syscon-wakeup", 2, 0, &args);
+ if (ret)
+ return ret;
- return 0;
+ ssusb->uwk_reg_base = args.args[0];
+ ssusb->uwk_vers = args.args[1];
+ ssusb->uwk = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ dev_info(ssusb->dev, "uwk - reg:0x%x, version:%d\n",
+ ssusb->uwk_reg_base, ssusb->uwk_vers);
+
+ return PTR_ERR_OR_ZERO(ssusb->uwk);
+}
+
+void ssusb_wakeup_set(struct ssusb_mtk *ssusb, bool enable)
+{
+ if (ssusb->uwk_en)
+ ssusb_wakeup_ip_sleep_set(ssusb, enable);
}
static void host_ports_num_get(struct ssusb_mtk *ssusb)
@@ -235,17 +246,3 @@ void ssusb_host_exit(struct ssusb_mtk *ssusb)
of_platform_depopulate(ssusb->dev);
ssusb_host_cleanup(ssusb);
}
-
-int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
-{
- if (ssusb->wakeup_en)
- ssusb_wakeup_ip_sleep_en(ssusb);
-
- return 0;
-}
-
-void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
-{
- if (ssusb->wakeup_en)
- ssusb_wakeup_ip_sleep_dis(ssusb);
-}
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 3650fd11fc49..628d5ce356ca 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -282,8 +282,10 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
/* if host role is supported */
ret = ssusb_wakeup_of_property_parse(ssusb, node);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to parse uwk property\n");
return ret;
+ }
/* optional property, ignore the error if it does not exist */
of_property_read_u32(node, "mediatek,u3p-dis-msk",
@@ -308,7 +310,7 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
if (IS_ERR(otg_sx->edev)) {
dev_err(ssusb->dev, "couldn't get extcon device\n");
- return -EPROBE_DEFER;
+ return PTR_ERR(otg_sx->edev);
}
}
@@ -457,7 +459,7 @@ static int __maybe_unused mtu3_suspend(struct device *dev)
ssusb_host_disable(ssusb, true);
ssusb_phy_power_off(ssusb);
ssusb_clks_disable(ssusb);
- ssusb_wakeup_enable(ssusb);
+ ssusb_wakeup_set(ssusb, true);
return 0;
}
@@ -473,7 +475,7 @@ static int __maybe_unused mtu3_resume(struct device *dev)
if (!ssusb->is_host)
return 0;
- ssusb_wakeup_disable(ssusb);
+ ssusb_wakeup_set(ssusb, false);
ret = ssusb_clks_enable(ssusb);
if (ret)
goto clks_err;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 6c036de63272..b8295ce7c4fe 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -520,7 +520,7 @@ static int da8xx_probe(struct platform_device *pdev)
if (!glue)
return -ENOMEM;
- clk = devm_clk_get(&pdev->dev, "usb20");
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ea5013aa69e2..968bf1e8b0fe 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1687,7 +1687,7 @@ EXPORT_SYMBOL_GPL(musb_mailbox);
/*-------------------------------------------------------------------------*/
static ssize_t
-musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
@@ -1701,7 +1701,7 @@ musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-musb_mode_store(struct device *dev, struct device_attribute *attr,
+mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
@@ -1721,10 +1721,10 @@ musb_mode_store(struct device *dev, struct device_attribute *attr,
return (status == 0) ? n : status;
}
-static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+static DEVICE_ATTR_RW(mode);
static ssize_t
-musb_vbus_store(struct device *dev, struct device_attribute *attr,
+vbus_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
@@ -1748,7 +1748,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
@@ -1773,13 +1773,12 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
}
-static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+static DEVICE_ATTR_RW(vbus);
/* Gadget drivers can't know that a host is connected so they might want
* to start SRP, but users can. This allows userspace to trigger SRP.
*/
-static ssize_t
-musb_srp_store(struct device *dev, struct device_attribute *attr,
+static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct musb *musb = dev_to_musb(dev);
@@ -1796,7 +1795,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+static DEVICE_ATTR_WO(srp);
static struct attribute *musb_attributes[] = {
&dev_attr_mode.attr,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 2627363fb4fe..394b4ac86161 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -195,7 +195,6 @@ static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
- u16 frame;
u32 len;
void __iomem *mbase = musb->mregs;
struct urb *urb = next_urb(qh);
@@ -244,7 +243,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
musb_dbg(musb, "check whether there's still time for periodic Tx");
- frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
*/
@@ -1781,7 +1779,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
struct musb_qh *qh = hw_ep->in_qh;
size_t xfer_len;
void __iomem *mbase = musb->mregs;
- int pipe;
u16 rx_csr, val;
bool iso_err = false;
bool done = false;
@@ -1810,8 +1807,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
return;
}
- pipe = urb->pipe;
-
trace_musb_urb_rx(musb, urb);
/* check for errors, concurrent stall & unlink is not really
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 554b72282276..cfd9add10bf4 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -519,7 +519,7 @@ static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
}
static ssize_t
-get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
@@ -527,7 +527,7 @@ get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-set_a_bus_req(struct device *dev, struct device_attribute *attr,
+a_bus_req_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
@@ -559,11 +559,10 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req,
- set_a_bus_req);
+static DEVICE_ATTR_RW(a_bus_req);
static ssize_t
-set_a_clr_err(struct device *dev, struct device_attribute *attr,
+a_clr_err_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
@@ -587,10 +586,10 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
+static DEVICE_ATTR_WO(a_clr_err);
static ssize_t
-get_a_bus_drop(struct device *dev, struct device_attribute *attr,
+a_bus_drop_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
@@ -599,7 +598,7 @@ get_a_bus_drop(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+a_bus_drop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mv_otg *mvotg = dev_get_drvdata(dev);
@@ -630,8 +629,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR,
- get_a_bus_drop, set_a_bus_drop);
+static DEVICE_ATTR_RW(a_bus_drop);
static struct attribute *inputs_attrs[] = {
&dev_attr_a_bus_req.attr,
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index b3ce42edb373..0981abc3d1ad 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -59,13 +59,13 @@ static const unsigned int tahvo_cable[] = {
EXTCON_NONE,
};
-static ssize_t vbus_state_show(struct device *device,
+static ssize_t vbus_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
}
-static DEVICE_ATTR(vbus, 0444, vbus_state_show, NULL);
+static DEVICE_ATTR_RO(vbus);
static void check_vbus_state(struct tahvo_usb *tu)
{
@@ -310,7 +310,7 @@ static ssize_t otg_mode_store(struct device *device,
return r;
}
-static DEVICE_ATTR(otg_mode, 0644, otg_mode_show, otg_mode_store);
+static DEVICE_ATTR_RW(otg_mode);
static struct attribute *tahvo_attributes[] = {
&dev_attr_vbus.attr,
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index e78ed52339e6..183550b63faa 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -168,7 +168,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
return 0;
}
-static ssize_t twl6030_usb_vbus_show(struct device *dev,
+static ssize_t vbus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct twl6030_usb *twl = dev_get_drvdata(dev);
@@ -194,7 +194,7 @@ static ssize_t twl6030_usb_vbus_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(vbus, 0444, twl6030_usb_vbus_show, NULL);
+static DEVICE_ATTR_RO(vbus);
static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
{
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index f97cb47577fc..bceb2c9988dd 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -323,6 +323,14 @@ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
return *phy == match_data;
}
+static void usb_charger_init(struct usb_phy *usb_phy)
+{
+ usb_phy->chg_type = UNKNOWN_TYPE;
+ usb_phy->chg_state = USB_CHARGER_DEFAULT;
+ usb_phy_set_default_current(usb_phy);
+ INIT_WORK(&usb_phy->chg_work, usb_phy_notify_charger_work);
+}
+
static int usb_add_extcon(struct usb_phy *x)
{
int ret;
@@ -406,10 +414,6 @@ static int usb_add_extcon(struct usb_phy *x)
}
}
- usb_phy_set_default_current(x);
- INIT_WORK(&x->chg_work, usb_phy_notify_charger_work);
- x->chg_type = UNKNOWN_TYPE;
- x->chg_state = USB_CHARGER_DEFAULT;
if (x->type_nb.notifier_call)
__usb_phy_get_charger_type(x);
@@ -704,6 +708,7 @@ int usb_add_phy(struct usb_phy *x, enum usb_phy_type type)
return -EINVAL;
}
+ usb_charger_init(x);
ret = usb_add_extcon(x);
if (ret)
return ret;
@@ -749,6 +754,7 @@ int usb_add_phy_dev(struct usb_phy *x)
return -EINVAL;
}
+ usb_charger_init(x);
ret = usb_add_extcon(x);
if (ret)
return ret;
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index fac147a3ad23..5c5b51bb48ef 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o
-renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o rcar3.o
+renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o rcar3.o rza.o
ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),)
renesas_usbhs-y += mod_host.o
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 56079bb6759a..4310df46639d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -17,6 +17,7 @@
#include "common.h"
#include "rcar2.h"
#include "rcar3.h"
+#include "rza.h"
/*
* image of renesas_usbhs
@@ -488,6 +489,10 @@ static const struct of_device_id usbhs_of_match[] = {
.compatible = "renesas,rcar-gen3-usbhs",
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
+ {
+ .compatible = "renesas,rza1-usbhs",
+ .data = (void *)USBHS_TYPE_RZA1,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -520,6 +525,11 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
dparam->pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
}
+ if (dparam->type == USBHS_TYPE_RZA1) {
+ dparam->pipe_configs = usbhsc_new_pipe;
+ dparam->pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
+ }
+
return info;
}
@@ -581,6 +591,18 @@ static int usbhs_probe(struct platform_device *pdev)
break;
case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
priv->pfunc = usbhs_rcar3_with_pll_ops;
+ if (!IS_ERR_OR_NULL(priv->edev)) {
+ priv->nb.notifier_call = priv->pfunc.notifier;
+ ret = devm_extcon_register_notifier(&pdev->dev,
+ priv->edev,
+ EXTCON_USB_HOST,
+ &priv->nb);
+ if (ret < 0)
+ dev_err(&pdev->dev, "no notifier registered\n");
+ }
+ break;
+ case USBHS_TYPE_RZA1:
+ priv->pfunc = usbhs_rza1_ops;
break;
default:
if (!info->platform_callback.get_id) {
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 64797784a6df..f619afeae2b8 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -98,6 +98,7 @@ struct usbhs_priv;
#define D2FIFOCTR 0x00F2 /* for R-Car Gen2 */
#define D3FIFOSEL 0x00F4 /* for R-Car Gen2 */
#define D3FIFOCTR 0x00F6 /* for R-Car Gen2 */
+#define SUSPMODE 0x0102 /* for RZ/A */
/* SYSCFG */
#define SCKE (1 << 10) /* USB Module Clock Enable */
@@ -106,6 +107,8 @@ struct usbhs_priv;
#define DRPD (1 << 5) /* D+ Line/D- Line Resistance Control */
#define DPRPU (1 << 4) /* D+ Line Resistance Control */
#define USBE (1 << 0) /* USB Module Operation Enable */
+#define UCKSEL (1 << 2) /* Clock Select for RZ/A1 */
+#define UPLLE (1 << 1) /* USB PLL Enable for RZ/A1 */
/* DVSTCTR */
#define EXTLP (1 << 10) /* Controls the EXTLP pin output state */
@@ -233,6 +236,9 @@ struct usbhs_priv;
#define USBSPD_SPEED_FULL 0x2
#define USBSPD_SPEED_HIGH 0x3
+/* SUSPMODE */
+#define SUSPM (1 << 14) /* SuspendM Control */
+
/*
* struct
*/
@@ -249,6 +255,7 @@ struct usbhs_priv {
struct platform_device *pdev;
struct extcon_dev *edev;
+ struct notifier_block nb;
spinlock_t lock;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 2d24ef3076ef..5925d111bd47 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -94,8 +94,6 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
-static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
- struct usbhs_fifo *fifo);
static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo);
static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
@@ -124,10 +122,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
chan = usbhsf_dma_chan_get(fifo, pkt);
if (chan) {
dmaengine_terminate_all(chan);
- usbhsf_fifo_clear(pipe, fifo);
usbhsf_dma_unmap(pkt);
}
+ usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+
__usbhsf_pkt_del(pkt);
}
@@ -256,15 +255,9 @@ static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
struct usbhs_fifo *fifo)
{
- int timeout = 1024;
-
- do {
- /* The FIFO port is accessible */
- if (usbhs_read(priv, fifo->ctr) & FRDY)
- return 0;
-
- udelay(10);
- } while (timeout--);
+ /* The FIFO port is accessible */
+ if (usbhs_read(priv, fifo->ctr) & FRDY)
+ return 0;
return -EBUSY;
}
@@ -278,8 +271,8 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
if (!usbhs_pipe_is_dcp(pipe)) {
/*
* This driver checks the pipe condition first to avoid -EBUSY
- * from usbhsf_fifo_barrier() with about 10 msec delay in
- * the interrupt handler if the pipe is RX direction and empty.
+ * from usbhsf_fifo_barrier() if the pipe is RX direction and
+ * empty.
*/
if (usbhs_pipe_is_dir_in(pipe))
ret = usbhs_pipe_is_accessible(pipe);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 093cd8e87335..9677e0e31475 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -590,10 +590,22 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe)
}
}
-void usbhs_pipe_config_change_bfre(struct usbhs_pipe *pipe, int enable)
+/* Should call usbhsp_pipe_select() before */
+void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
+ int needs_bfre, int bfre_enable)
{
int sequence;
+ usbhsp_pipe_select(pipe);
+ sequence = usbhs_pipe_get_data_sequence(pipe);
+ if (needs_bfre)
+ usbhsp_pipe_cfg_set(pipe, BFRE, bfre_enable ? BFRE : 0);
+ usbhs_pipe_clear(pipe);
+ usbhs_pipe_data_sequence(pipe, sequence);
+}
+
+void usbhs_pipe_config_change_bfre(struct usbhs_pipe *pipe, int enable)
+{
if (usbhs_pipe_is_dcp(pipe))
return;
@@ -602,10 +614,7 @@ void usbhs_pipe_config_change_bfre(struct usbhs_pipe *pipe, int enable)
if (!(enable ^ !!(usbhsp_pipe_cfg_get(pipe) & BFRE)))
return;
- sequence = usbhs_pipe_get_data_sequence(pipe);
- usbhsp_pipe_cfg_set(pipe, BFRE, enable ? BFRE : 0);
- usbhs_pipe_clear(pipe);
- usbhs_pipe_data_sequence(pipe, sequence);
+ usbhs_pipe_clear_without_sequence(pipe, 1, enable);
}
static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index d3d002244891..3080423e600c 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -80,6 +80,8 @@ void usbhs_pipe_init(struct usbhs_priv *priv,
struct usbhs_pkt *pkt, int map));
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
void usbhs_pipe_clear(struct usbhs_pipe *pipe);
+void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
+ int needs_bfre, int bfre_enable);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index c929d296c77b..d0ea4ff89622 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -27,6 +27,7 @@
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
+#define UGCTRL2_USB0SEL_EHCI 0x00000010
#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
@@ -44,13 +45,25 @@ static u32 usbhs_read32(struct usbhs_priv *priv, u32 reg)
return ioread32(priv->base + reg);
}
+static void usbhs_rcar3_set_ugctrl2(struct usbhs_priv *priv, u32 val)
+{
+ usbhs_write32(priv, UGCTRL2, val | UGCTRL2_RESERVED_3);
+}
+
+static void usbhs_rcar3_set_usbsel(struct usbhs_priv *priv, bool ehci)
+{
+ if (ehci)
+ usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_EHCI);
+ else
+ usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_HSUSB);
+}
+
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
- UGCTRL2_VBUSSEL);
+ usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
@@ -70,11 +83,14 @@ static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
u32 val;
int timeout = 1000;
+ bool is_host = false;
if (enable) {
usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 |
- UGCTRL2_USB0SEL_HSUSB);
+ if (priv->edev)
+ is_host = extcon_get_state(priv->edev, EXTCON_USB_HOST);
+
+ usbhs_rcar3_set_usbsel(priv, is_host);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
do {
@@ -96,6 +112,16 @@ static int usbhs_rcar3_get_id(struct platform_device *pdev)
return USBHS_GADGET;
}
+static int usbhs_rcar3_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct usbhs_priv *priv = container_of(nb, struct usbhs_priv, nb);
+
+ usbhs_rcar3_set_usbsel(priv, !!event);
+
+ return NOTIFY_DONE;
+}
+
const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
@@ -104,4 +130,5 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
.power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
.get_id = usbhs_rcar3_get_id,
+ .notifier = usbhs_rcar3_notifier,
};
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
new file mode 100644
index 000000000000..5b287257ec11
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas USB driver RZ/A initialization and power control
+ *
+ * Copyright (C) 2018 Chris Brandt
+ * Copyright (C) 2018 Renesas Electronics Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include "common.h"
+#include "rza.h"
+
+static int usbhs_rza1_hardware_init(struct platform_device *pdev)
+{
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+ struct device_node *usb_x1_clk, *extal_clk;
+ u32 freq_usb = 0, freq_extal = 0;
+
+ /* Input Clock Selection (NOTE: ch0 controls both ch0 and ch1) */
+ usb_x1_clk = of_find_node_by_name(NULL, "usb_x1");
+ extal_clk = of_find_node_by_name(NULL, "extal");
+ of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
+ of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
+ if (freq_usb == 0) {
+ if (freq_extal == 12000000) {
+ /* Select 12MHz XTAL */
+ usbhs_bset(priv, SYSCFG, UCKSEL, UCKSEL);
+ } else {
+ dev_err(usbhs_priv_to_dev(priv), "A 48MHz USB clock or 12MHz main clock is required.\n");
+ return -EIO;
+ }
+ }
+
+ /* Enable USB PLL (NOTE: ch0 controls both ch0 and ch1) */
+ usbhs_bset(priv, SYSCFG, UPLLE, UPLLE);
+ udelay(1000);
+ usbhs_bset(priv, SUSPMODE, SUSPM, SUSPM);
+
+ return 0;
+}
+
+static int usbhs_rza_get_id(struct platform_device *pdev)
+{
+ return USBHS_GADGET;
+}
+
+const struct renesas_usbhs_platform_callback usbhs_rza1_ops = {
+ .hardware_init = usbhs_rza1_hardware_init,
+ .get_id = usbhs_rza_get_id,
+};
diff --git a/drivers/usb/renesas_usbhs/rza.h b/drivers/usb/renesas_usbhs/rza.h
new file mode 100644
index 000000000000..ca917ca54f6d
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rza.h
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "common.h"
+
+extern const struct renesas_usbhs_platform_callback usbhs_rza1_ops;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a8d5f2e4878d..a646820f5a78 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
- Google USB serial devices
- HP4x calculators
- a number of Motorola phones
+ - Motorola Tetra devices
- Novatel Wireless GPS receivers
- Siemens USB/MPI adapter.
- ViVOtech ViVOpay USB device.
@@ -322,84 +323,6 @@ config USB_SERIAL_KEYSPAN
To compile this driver as a module, choose M here: the
module will be called keyspan.
-config USB_SERIAL_KEYSPAN_MPR
- bool "USB Keyspan MPR Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the Keyspan MPR converter.
-
-config USB_SERIAL_KEYSPAN_USA28
- bool "USB Keyspan USA-28 Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-28 converter.
-
-config USB_SERIAL_KEYSPAN_USA28X
- bool "USB Keyspan USA-28X Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-28X converter.
- Be sure you have a USA-28X, there are also 28XA and 28XB
- models, the label underneath has the actual part number.
-
-config USB_SERIAL_KEYSPAN_USA28XA
- bool "USB Keyspan USA-28XA Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-28XA converter.
- Be sure you have a USA-28XA, there are also 28X and 28XB
- models, the label underneath has the actual part number.
-
-config USB_SERIAL_KEYSPAN_USA28XB
- bool "USB Keyspan USA-28XB Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-28XB converter.
- Be sure you have a USA-28XB, there are also 28X and 28XA
- models, the label underneath has the actual part number.
-
-config USB_SERIAL_KEYSPAN_USA19
- bool "USB Keyspan USA-19 Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-19 converter.
-
-config USB_SERIAL_KEYSPAN_USA18X
- bool "USB Keyspan USA-18X Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-18X converter.
-
-config USB_SERIAL_KEYSPAN_USA19W
- bool "USB Keyspan USA-19W Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-19W converter.
-
-config USB_SERIAL_KEYSPAN_USA19QW
- bool "USB Keyspan USA-19QW Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-19QW converter.
-
-config USB_SERIAL_KEYSPAN_USA19QI
- bool "USB Keyspan USA-19QI Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-19QI converter.
-
-config USB_SERIAL_KEYSPAN_USA49W
- bool "USB Keyspan USA-49W Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-49W converter.
-
-config USB_SERIAL_KEYSPAN_USA49WLC
- bool "USB Keyspan USA-49WLC Firmware"
- depends on USB_SERIAL_KEYSPAN && FIRMWARE_IN_KERNEL
- help
- Say Y here to include firmware for the USA-49WLC converter.
-
config USB_SERIAL_KLSI
tristate "USB KL5KUSB105 (Palmconnect) Driver"
---help---
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 3c544782f60b..7796ad8e33c6 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -83,7 +83,10 @@ static int ark3116_write_reg(struct usb_serial *serial,
usb_sndctrlpipe(serial->dev, 0),
0xfe, 0x40, val, reg,
NULL, 0, ARK_TIMEOUT);
- return result;
+ if (result)
+ return result;
+
+ return 0;
}
static int ark3116_read_reg(struct usb_serial *serial,
@@ -105,7 +108,7 @@ static int ark3116_read_reg(struct usb_serial *serial,
return result;
}
- return buf[0];
+ return 0;
}
static inline int calc_divisor(int bps)
@@ -355,13 +358,13 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
/* read modem status */
result = ark3116_read_reg(serial, UART_MSR, buf);
- if (result < 0)
+ if (result)
goto err_close;
priv->msr = *buf;
/* read line status */
result = ark3116_read_reg(serial, UART_LSR, buf);
- if (result < 0)
+ if (result)
goto err_close;
priv->lsr = *buf;
@@ -394,31 +397,35 @@ err_free:
return result;
}
+static int ark3116_get_serial_info(struct usb_serial_port *port,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ tmp.type = PORT_16654;
+ tmp.line = port->minor;
+ tmp.port = port->port_number;
+ tmp.baud_base = 460800;
+
+ if (copy_to_user(retinfo, &tmp, sizeof(tmp)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int ark3116_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
- struct serial_struct serstruct;
void __user *user_arg = (void __user *)arg;
switch (cmd) {
case TIOCGSERIAL:
- /* XXX: Some of these values are probably wrong. */
- memset(&serstruct, 0, sizeof(serstruct));
- serstruct.type = PORT_16654;
- serstruct.line = port->minor;
- serstruct.port = port->port_number;
- serstruct.custom_divisor = 0;
- serstruct.baud_base = 460800;
-
- if (copy_to_user(user_arg, &serstruct, sizeof(serstruct)))
- return -EFAULT;
-
- return 0;
- case TIOCSSERIAL:
- if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
- return -EFAULT;
- return 0;
+ return ark3116_get_serial_info(port, user_arg);
+ default:
+ break;
}
return -ENOIOCTLCMD;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273bf5beb..06d502b3e913 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index e4573b4c8935..4dfbff20bda4 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -41,6 +41,7 @@
#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
#define F81534_LINE_STATUS_REG (0x05 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
+#define F81534_CLOCK_REG (0x08 + F81534_UART_BASE_ADDRESS)
#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
#define F81534_DEF_CONF_ADDRESS_START 0x3000
@@ -51,13 +52,14 @@
#define F81534_CUSTOM_NO_CUSTOM_DATA 0xff
#define F81534_CUSTOM_VALID_TOKEN 0xf0
#define F81534_CONF_OFFSET 1
+#define F81534_CONF_GPIO_OFFSET 4
#define F81534_MAX_DATA_BLOCK 64
#define F81534_MAX_BUS_RETRY 20
/* Default URB timeout for USB operations */
#define F81534_USB_MAX_RETRY 10
-#define F81534_USB_TIMEOUT 1000
+#define F81534_USB_TIMEOUT 2000
#define F81534_SET_GET_REGISTER 0xA0
#define F81534_NUM_PORT 4
@@ -96,16 +98,43 @@
#define F81534_CMD_READ 0x03
#define F81534_DEFAULT_BAUD_RATE 9600
-#define F81534_MAX_BAUDRATE 115200
+#define F81534_PORT_CONF_RS232 0
+#define F81534_PORT_CONF_RS485 BIT(0)
+#define F81534_PORT_CONF_RS485_INVERT (BIT(0) | BIT(1))
+#define F81534_PORT_CONF_MODE_MASK GENMASK(1, 0)
#define F81534_PORT_CONF_DISABLE_PORT BIT(3)
#define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7)
#define F81534_PORT_UNAVAILABLE \
(F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT)
+
#define F81534_1X_RXTRIGGER 0xc3
#define F81534_8X_RXTRIGGER 0xcf
+/*
+ * F81532/534 Clock registers (offset +08h)
+ *
+ * Bit0: UART Enable (always on)
+ * Bit2-1: Clock source selector
+ * 00: 1.846MHz.
+ * 01: 18.46MHz.
+ * 10: 24MHz.
+ * 11: 14.77MHz.
+ * Bit4: Auto direction(RTS) control (RTS pin Low when TX)
+ * Bit5: Invert direction(RTS) when Bit4 enabled (RTS pin high when TX)
+ */
+
+#define F81534_UART_EN BIT(0)
+#define F81534_CLK_1_846_MHZ 0
+#define F81534_CLK_18_46_MHZ BIT(1)
+#define F81534_CLK_24_MHZ BIT(2)
+#define F81534_CLK_14_77_MHZ (BIT(1) | BIT(2))
+#define F81534_CLK_MASK GENMASK(2, 1)
+#define F81534_CLK_TX_DELAY_1BIT BIT(3)
+#define F81534_CLK_RS485_MODE BIT(4)
+#define F81534_CLK_RS485_INVERT BIT(5)
+
static const struct usb_device_id f81534_id_table[] = {
{ USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) },
{ USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) },
@@ -129,12 +158,35 @@ struct f81534_port_private {
struct usb_serial_port *port;
unsigned long tx_empty;
spinlock_t msr_lock;
+ u32 baud_base;
u8 shadow_mcr;
u8 shadow_lcr;
u8 shadow_msr;
+ u8 shadow_clk;
u8 phy_num;
};
+struct f81534_pin_data {
+ const u16 reg_addr;
+ const u8 reg_mask;
+};
+
+struct f81534_port_out_pin {
+ struct f81534_pin_data pin[3];
+};
+
+/* Pin output value for M2/M1/M0(SD) */
+static const struct f81534_port_out_pin f81534_port_out_pins[] = {
+ { { { 0x2ae8, BIT(7) }, { 0x2a90, BIT(5) }, { 0x2a90, BIT(4) } } },
+ { { { 0x2ae8, BIT(6) }, { 0x2ae8, BIT(0) }, { 0x2ae8, BIT(3) } } },
+ { { { 0x2a90, BIT(0) }, { 0x2ae8, BIT(2) }, { 0x2a80, BIT(6) } } },
+ { { { 0x2a90, BIT(3) }, { 0x2a90, BIT(2) }, { 0x2a90, BIT(1) } } },
+};
+
+static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 };
+static u8 const clock_table[] = { F81534_CLK_1_846_MHZ, F81534_CLK_14_77_MHZ,
+ F81534_CLK_18_46_MHZ, F81534_CLK_24_MHZ };
+
static int f81534_logic_to_phy_port(struct usb_serial *serial,
struct usb_serial_port *port)
{
@@ -240,6 +292,36 @@ end:
return status;
}
+static int f81534_set_mask_register(struct usb_serial *serial, u16 reg,
+ u8 mask, u8 data)
+{
+ int status;
+ u8 tmp;
+
+ status = f81534_get_register(serial, reg, &tmp);
+ if (status)
+ return status;
+
+ tmp &= ~mask;
+ tmp |= (mask & data);
+
+ return f81534_set_register(serial, reg, tmp);
+}
+
+static int f81534_set_phy_port_register(struct usb_serial *serial, int phy,
+ u16 reg, u8 data)
+{
+ return f81534_set_register(serial, reg + F81534_UART_OFFSET * phy,
+ data);
+}
+
+static int f81534_get_phy_port_register(struct usb_serial *serial, int phy,
+ u16 reg, u8 *data)
+{
+ return f81534_get_register(serial, reg + F81534_UART_OFFSET * phy,
+ data);
+}
+
static int f81534_set_port_register(struct usb_serial_port *port, u16 reg,
u8 data)
{
@@ -460,13 +542,52 @@ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
-static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
- u8 lcr)
+static int f81534_find_clk(u32 baudrate)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) {
+ if (baudrate <= baudrate_table[idx] &&
+ baudrate_table[idx] % baudrate == 0)
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+static int f81534_set_port_config(struct usb_serial_port *port,
+ struct tty_struct *tty, u32 baudrate, u32 old_baudrate, u8 lcr)
{
struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
u32 divisor;
int status;
+ int i;
+ int idx;
u8 value;
+ u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+ idx = f81534_find_clk(baud_list[i]);
+ if (idx >= 0) {
+ baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+ }
+
+ if (idx < 0)
+ return -EINVAL;
+
+ port_priv->baud_base = baudrate_table[idx];
+ port_priv->shadow_clk &= ~F81534_CLK_MASK;
+ port_priv->shadow_clk |= clock_table[idx];
+
+ status = f81534_set_port_register(port, F81534_CLOCK_REG,
+ port_priv->shadow_clk);
+ if (status) {
+ dev_err(&port->dev, "CLOCK_REG setting failed\n");
+ return status;
+ }
if (baudrate <= 1200)
value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */
@@ -482,7 +603,7 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
if (baudrate <= 1200)
value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */
else
- value = UART_FCR_R_TRIG_11 | UART_FCR_ENABLE_FIFO; /* TL: 14 */
+ value = UART_FCR_TRIGGER_8 | UART_FCR_ENABLE_FIFO; /* TL: 8 */
status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG,
value);
@@ -491,7 +612,7 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
return status;
}
- divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+ divisor = f81534_calc_baud_divisor(baudrate, port_priv->baud_base);
mutex_lock(&port_priv->lcr_mutex);
@@ -627,6 +748,70 @@ static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
}
/*
+ * The F81532/534 will not report serial port to USB serial subsystem when
+ * H/W DCD/DSR/CTS/RI/RX pin connected to ground.
+ *
+ * To detect RX pin status, we'll enable MCR interal loopback, disable it and
+ * delayed for 60ms. It connected to ground If LSR register report UART_LSR_BI.
+ */
+static bool f81534_check_port_hw_disabled(struct usb_serial *serial, int phy)
+{
+ int status;
+ u8 old_mcr;
+ u8 msr;
+ u8 lsr;
+ u8 msr_mask;
+
+ msr_mask = UART_MSR_DCD | UART_MSR_RI | UART_MSR_DSR | UART_MSR_CTS;
+
+ status = f81534_get_phy_port_register(serial, phy,
+ F81534_MODEM_STATUS_REG, &msr);
+ if (status)
+ return false;
+
+ if ((msr & msr_mask) != msr_mask)
+ return false;
+
+ status = f81534_set_phy_port_register(serial, phy,
+ F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ if (status)
+ return false;
+
+ status = f81534_get_phy_port_register(serial, phy,
+ F81534_MODEM_CONTROL_REG, &old_mcr);
+ if (status)
+ return false;
+
+ status = f81534_set_phy_port_register(serial, phy,
+ F81534_MODEM_CONTROL_REG, UART_MCR_LOOP);
+ if (status)
+ return false;
+
+ status = f81534_set_phy_port_register(serial, phy,
+ F81534_MODEM_CONTROL_REG, 0x0);
+ if (status)
+ return false;
+
+ msleep(60);
+
+ status = f81534_get_phy_port_register(serial, phy,
+ F81534_LINE_STATUS_REG, &lsr);
+ if (status)
+ return false;
+
+ status = f81534_set_phy_port_register(serial, phy,
+ F81534_MODEM_CONTROL_REG, old_mcr);
+ if (status)
+ return false;
+
+ if ((lsr & UART_LSR_BI) == UART_LSR_BI)
+ return true;
+
+ return false;
+}
+
+/*
* We had 2 generation of F81532/534 IC. All has an internal storage.
*
* 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any
@@ -647,14 +832,14 @@ static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
static int f81534_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
+ struct f81534_serial_private *serial_priv;
struct device *dev = &serial->interface->dev;
int size_bulk_in = usb_endpoint_maxp(epds->bulk_in[0]);
int size_bulk_out = usb_endpoint_maxp(epds->bulk_out[0]);
- u8 setting[F81534_CUSTOM_DATA_SIZE];
- u8 setting_idx;
u8 num_port = 0;
+ int index = 0;
int status;
- size_t i;
+ int i;
if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
@@ -662,8 +847,16 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
return -ENODEV;
}
+ serial_priv = devm_kzalloc(&serial->interface->dev,
+ sizeof(*serial_priv), GFP_KERNEL);
+ if (!serial_priv)
+ return -ENOMEM;
+
+ usb_set_serial_data(serial, serial_priv);
+ mutex_init(&serial_priv->urb_mutex);
+
/* Check had custom setting */
- status = f81534_find_config_idx(serial, &setting_idx);
+ status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
if (status) {
dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
__func__, status);
@@ -674,11 +867,12 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
* We'll read custom data only when data available, otherwise we'll
* read default value instead.
*/
- if (setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
+ if (serial_priv->setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
status = f81534_read_flash(serial,
F81534_CUSTOM_ADDRESS_START +
F81534_CONF_OFFSET,
- sizeof(setting), setting);
+ sizeof(serial_priv->conf_data),
+ serial_priv->conf_data);
if (status) {
dev_err(&serial->interface->dev,
"%s: get custom data failed: %d\n",
@@ -688,13 +882,13 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
dev_dbg(&serial->interface->dev,
"%s: read config from block: %d\n", __func__,
- setting_idx);
+ serial_priv->setting_idx);
} else {
/* Read default board setting */
status = f81534_read_flash(serial,
- F81534_DEF_CONF_ADDRESS_START, F81534_NUM_PORT,
- setting);
-
+ F81534_DEF_CONF_ADDRESS_START,
+ sizeof(serial_priv->conf_data),
+ serial_priv->conf_data);
if (status) {
dev_err(&serial->interface->dev,
"%s: read failed: %d\n", __func__,
@@ -708,7 +902,10 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
/* New style, find all possible ports */
for (i = 0; i < F81534_NUM_PORT; ++i) {
- if (setting[i] & F81534_PORT_UNAVAILABLE)
+ if (f81534_check_port_hw_disabled(serial, i))
+ serial_priv->conf_data[i] |= F81534_PORT_UNAVAILABLE;
+
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
continue;
++num_port;
@@ -720,6 +917,17 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
num_port = 4; /* Nothing found, oldest version IC */
}
+ /* Assign phy-to-logic mapping */
+ for (i = 0; i < F81534_NUM_PORT; ++i) {
+ if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+ continue;
+
+ serial_priv->tty_idx[i] = index++;
+ dev_dbg(&serial->interface->dev,
+ "%s: phy_num: %d, tty_idx: %d\n", __func__, i,
+ serial_priv->tty_idx[i]);
+ }
+
/*
* Setup bulk-out endpoint multiplexing. All ports share the same
* bulk-out endpoint.
@@ -741,6 +949,7 @@ static void f81534_set_termios(struct tty_struct *tty,
u8 new_lcr = 0;
int status;
u32 baud;
+ u32 old_baud;
if (C_BAUD(tty) == B0)
f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
@@ -780,18 +989,14 @@ static void f81534_set_termios(struct tty_struct *tty,
if (!baud)
return;
- if (baud > F81534_MAX_BAUDRATE) {
- if (old_termios)
- baud = tty_termios_baud_rate(old_termios);
- else
- baud = F81534_DEFAULT_BAUD_RATE;
-
- tty_encode_baud_rate(tty, baud, baud);
- }
+ if (old_termios)
+ old_baud = tty_termios_baud_rate(old_termios);
+ else
+ old_baud = F81534_DEFAULT_BAUD_RATE;
dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud);
- status = f81534_set_port_config(port, baud, new_lcr);
+ status = f81534_set_port_config(port, tty, baud, old_baud, new_lcr);
if (status < 0) {
dev_err(&port->dev, "%s: set port config failed: %d\n",
__func__, status);
@@ -947,7 +1152,7 @@ static int f81534_get_serial_info(struct usb_serial_port *port,
tmp.type = PORT_16550A;
tmp.port = port->port_number;
tmp.line = port->minor;
- tmp.baud_base = F81534_MAX_BAUDRATE;
+ tmp.baud_base = port_priv->baud_base;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
@@ -1124,79 +1329,6 @@ static void f81534_write_usb_callback(struct urb *urb)
}
}
-static int f81534_attach(struct usb_serial *serial)
-{
- struct f81534_serial_private *serial_priv;
- int index = 0;
- int status;
- int i;
-
- serial_priv = devm_kzalloc(&serial->interface->dev,
- sizeof(*serial_priv), GFP_KERNEL);
- if (!serial_priv)
- return -ENOMEM;
-
- usb_set_serial_data(serial, serial_priv);
-
- mutex_init(&serial_priv->urb_mutex);
-
- /* Check had custom setting */
- status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
- if (status) {
- dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
- __func__, status);
- return status;
- }
-
- /*
- * We'll read custom data only when data available, otherwise we'll
- * read default value instead.
- */
- if (serial_priv->setting_idx == F81534_CUSTOM_NO_CUSTOM_DATA) {
- /*
- * The default configuration layout:
- * byte 0/1/2/3: uart setting
- */
- status = f81534_read_flash(serial,
- F81534_DEF_CONF_ADDRESS_START,
- F81534_DEF_CONF_SIZE,
- serial_priv->conf_data);
- if (status) {
- dev_err(&serial->interface->dev,
- "%s: read reserve data failed: %d\n",
- __func__, status);
- return status;
- }
- } else {
- /* Only read 8 bytes for mode & GPIO */
- status = f81534_read_flash(serial,
- F81534_CUSTOM_ADDRESS_START +
- F81534_CONF_OFFSET,
- sizeof(serial_priv->conf_data),
- serial_priv->conf_data);
- if (status) {
- dev_err(&serial->interface->dev,
- "%s: idx: %d get data failed: %d\n",
- __func__, serial_priv->setting_idx,
- status);
- return status;
- }
- }
-
- /* Assign phy-to-logic mapping */
- for (i = 0; i < F81534_NUM_PORT; ++i) {
- if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
- continue;
-
- serial_priv->tty_idx[i] = index++;
- dev_dbg(&serial->interface->dev,
- "%s: phy_num: %d, tty_idx: %d\n", __func__, i,
- serial_priv->tty_idx[i]);
- }
-
- return 0;
-}
-
static void f81534_lsr_worker(struct work_struct *work)
{
struct f81534_port_private *port_priv;
@@ -1212,15 +1344,54 @@ static void f81534_lsr_worker(struct work_struct *work)
dev_warn(&port->dev, "read LSR failed: %d\n", status);
}
+static int f81534_set_port_output_pin(struct usb_serial_port *port)
+{
+ struct f81534_serial_private *serial_priv;
+ struct f81534_port_private *port_priv;
+ struct usb_serial *serial;
+ const struct f81534_port_out_pin *pins;
+ int status;
+ int i;
+ u8 value;
+ u8 idx;
+
+ serial = port->serial;
+ serial_priv = usb_get_serial_data(serial);
+ port_priv = usb_get_serial_port_data(port);
+
+ idx = F81534_CONF_GPIO_OFFSET + port_priv->phy_num;
+ value = serial_priv->conf_data[idx];
+ pins = &f81534_port_out_pins[port_priv->phy_num];
+
+ for (i = 0; i < ARRAY_SIZE(pins->pin); ++i) {
+ status = f81534_set_mask_register(serial,
+ pins->pin[i].reg_addr, pins->pin[i].reg_mask,
+ value & BIT(i) ? pins->pin[i].reg_mask : 0);
+ if (status)
+ return status;
+ }
+
+ dev_dbg(&port->dev, "Output pin (M0/M1/M2): %d\n", value);
+ return 0;
+}
+
static int f81534_port_probe(struct usb_serial_port *port)
{
+ struct f81534_serial_private *serial_priv;
struct f81534_port_private *port_priv;
int ret;
+ u8 value;
+ serial_priv = usb_get_serial_data(port->serial);
port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
return -ENOMEM;
+ /*
+ * We'll make tx frame error when baud rate from 384~500kps. So we'll
+ * delay all tx data frame with 1bit.
+ */
+ port_priv->shadow_clk = F81534_UART_EN | F81534_CLK_TX_DELAY_1BIT;
spin_lock_init(&port_priv->msr_lock);
mutex_init(&port_priv->mcr_mutex);
mutex_init(&port_priv->lcr_mutex);
@@ -1248,7 +1419,25 @@ static int f81534_port_probe(struct usb_serial_port *port)
if (ret)
return ret;
- return 0;
+ value = serial_priv->conf_data[port_priv->phy_num];
+ switch (value & F81534_PORT_CONF_MODE_MASK) {
+ case F81534_PORT_CONF_RS485_INVERT:
+ port_priv->shadow_clk |= F81534_CLK_RS485_MODE |
+ F81534_CLK_RS485_INVERT;
+ dev_dbg(&port->dev, "RS485 invert mode\n");
+ break;
+ case F81534_PORT_CONF_RS485:
+ port_priv->shadow_clk |= F81534_CLK_RS485_MODE;
+ dev_dbg(&port->dev, "RS485 mode\n");
+ break;
+
+ default:
+ case F81534_PORT_CONF_RS232:
+ dev_dbg(&port->dev, "RS232 mode\n");
+ break;
+ }
+
+ return f81534_set_port_output_pin(port);
}
static int f81534_port_remove(struct usb_serial_port *port)
@@ -1387,7 +1576,6 @@ static struct usb_serial_driver f81534_device = {
.write = f81534_write,
.tx_empty = f81534_tx_empty,
.calc_num_ports = f81534_calc_num_ports,
- .attach = f81534_attach,
.port_probe = f81534_port_probe,
.port_remove = f81534_port_remove,
.break_ctl = f81534_break_ctl,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index fc68952c994a..f58c4ff6b387 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1691,7 +1691,7 @@ static DEVICE_ATTR_RW(latency_timer);
/* Write an event character directly to the FTDI register. The ASCII
value is in the low 8 bits, with the enable bit in the 9th bit. */
-static ssize_t store_event_char(struct device *dev,
+static ssize_t event_char_store(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
@@ -1718,7 +1718,7 @@ static ssize_t store_event_char(struct device *dev,
return count;
}
-static DEVICE_ATTR(event_char, S_IWUSR, NULL, store_event_char);
+static DEVICE_ATTR_WO(event_char);
static int create_sysfs_attrs(struct usb_serial_port *port)
{
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 219265ce3711..17283f4b4779 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2282,7 +2282,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
/* something went wrong */
dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
__func__, status);
- usb_kill_urb(urb);
usb_free_urb(urb);
atomic_dec(&CmdUrbs);
return status;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 397a8012ffa3..62c91e360baf 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -472,7 +472,6 @@ static int iuu_clk(struct usb_serial_port *port, int dwFrq)
}
}
P2 = ((P - PO) / 2) - 4;
- DIV = DIV;
PUMP = 0x04;
PBmsb = (P2 >> 8 & 0x03);
PBlsb = P2 & 0xFF;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index f4df3d5bf69c..bd57630e67e2 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -518,7 +518,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
static unsigned char parport_mos7715_read_control(struct parport *pp)
{
- struct mos7715_parport *mos_parport = pp->private_data;
+ struct mos7715_parport *mos_parport;
__u8 dcr;
spin_lock(&release_lock);
@@ -554,7 +554,7 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
static unsigned char parport_mos7715_read_status(struct parport *pp)
{
unsigned char status;
- struct mos7715_parport *mos_parport = pp->private_data;
+ struct mos7715_parport *mos_parport;
spin_lock(&release_lock);
mos_parport = pp->private_data;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index b6320e3be429..5db8ed517e0e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -380,6 +380,9 @@ static void option_instat_callback(struct urb *urb);
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
+/* Fujisoft products */
+#define FUJISOFT_PRODUCT_FS040U 0x9b02
+
/* iBall 3.5G connect wireless modem */
#define IBALL_3_5G_CONNECT 0x9605
@@ -1894,6 +1897,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
+ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 57ae832a49ff..46dd09da2434 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -38,6 +38,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index f98fd84890de..fcd72396a7b6 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -12,6 +12,7 @@
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
+#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 74172fe158df..4ef79e29cb26 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -77,6 +77,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
{ USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
DEVICE(moto_modem, MOTO_IDS);
+/* Motorola Tetra driver */
+#define MOTOROLA_TETRA_IDS() \
+ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
+
/* Novatel Wireless GPS driver */
#define NOVATEL_IDS() \
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
@@ -107,6 +112,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&google_device,
&vivopay_device,
&moto_modem_device,
+ &motorola_tetra_device,
&novatel_gps_device,
&hp4x_device,
&suunto_device,
@@ -122,6 +128,7 @@ static const struct usb_device_id id_table[] = {
GOOGLE_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
+ MOTOROLA_TETRA_IDS(),
NOVATEL_IDS(),
HP4X_IDS(),
SUUNTO_IDS(),
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index e5a4969d15ae..d7f50b7a079e 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -25,9 +25,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/device.h>
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 8833cd4f78b6..6d64f342f587 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -22,9 +22,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifndef _DEBUG_H_
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 93a6bcf77806..f8f9ce8dc710 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -18,9 +18,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index e4cf28efb4a7..2dbf9c7d9749 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -18,9 +18,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include "usb.h"
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index f3f2a93f52e1..9033e505db7f 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -25,9 +25,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/highmem.h>
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index 9198396e8c6e..072f1ffda2af 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -19,9 +19,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifndef _PROTOCOL_H_
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 585efd120193..c267f2812a04 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -26,9 +26,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index bf99c6201331..2bc5ea045bf7 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -19,9 +19,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifndef _SCSIGLUE_H_
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index daf62448483f..6ac60abd2e15 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -90,7 +90,7 @@ static void debug_swoc(const struct device *dev, struct swoc_info *swocInfo)
}
-static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
+static ssize_t truinst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct swoc_info *swocInfo;
@@ -122,7 +122,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
}
return result;
}
-static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
+static DEVICE_ATTR_RO(truinst);
int sierra_ms_init(struct us_data *us)
{
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index d947957f3635..96cb0409dd89 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -26,9 +26,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/sched.h>
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index f559dc575f4f..fb3bb4ee4ccf 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -19,9 +19,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifndef _TRANSPORT_H_
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 5d04c40ee40a..3b1b9695177a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
return 0;
err = uas_configure_endpoints(devinfo);
- if (err) {
+ if (err && err != ENODEV)
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
- return 1;
- }
+ /* we must unblock the host in every case lest we deadlock */
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_unblock_requests(shost);
- return 0;
+ return err ? 1 : 0;
}
static int uas_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f72d045ee9ef..264af199aec8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -8,9 +8,6 @@
*
* Initial work by:
* (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
/*
@@ -31,10 +28,7 @@
* explanation of the reason for the entry),
* - a copy of /sys/kernel/debug/usb/devices with your device plugged in
* running with this patch.
- * Send your submission to either Phil Dibowitz <phil@ipom.com> or
- * Alan Stern <stern@rowland.harvard.edu>, and don't forget to CC: the
- * USB development list <linux-usb@vger.kernel.org> and the USB storage list
- * <usb-storage@lists.one-eyed-alien.net>
+ * Send your submission to the USB development list <linux-usb@vger.kernel.org>
*/
/*
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index e6127fb21c12..38434d88954a 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -38,20 +38,6 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
-UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
- "Seagate",
- "Expansion Desk",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
-UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
- "Seagate",
- "Expansion Desk",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
@@ -59,55 +45,6 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
- "Seagate",
- "Expansion Desk",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */
-UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
- "Seagate",
- "Backup Plus",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
-UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
- "Seagate",
- "Backup Plus",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
- "Seagate",
- "Backup Plus Desk",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
-UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
- "Seagate",
- "Backup+ BK",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
-UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
- "Seagate",
- "Backup+ BK",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
-/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
-UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
- "Seagate",
- "BUP Fast HDD",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
-
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -143,6 +80,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+ "Norelsys",
+ "NS1068X",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index a0c07e05a8f1..9a79cd9762f3 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -28,9 +28,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifdef CONFIG_USB_STORAGE_DEBUG
@@ -211,8 +208,8 @@ int usb_stor_reset_resume(struct usb_interface *iface)
usb_stor_report_bus_reset(us);
/*
- * FIXME: Notify the subdrivers that they need to reinitialize
- * the device
+ * If any of the subdrivers implemented a reinitialization scheme,
+ * this is where the callback would be invoked.
*/
return 0;
}
@@ -243,8 +240,8 @@ int usb_stor_post_reset(struct usb_interface *iface)
usb_stor_report_bus_reset(us);
/*
- * FIXME: Notify the subdrivers that they need to reinitialize
- * the device
+ * If any of the subdrivers implemented a reinitialization scheme,
+ * this is where the callback would be invoked.
*/
mutex_unlock(&us->dev_mutex);
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 90133e16bec5..85052cd66839 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -22,9 +22,6 @@
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#ifndef _USB_H_
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 83ad01747eed..cfd12e523678 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -4,9 +4,6 @@
* Usual Tables File for usb-storage and libusual
*
* Copyright (C) 2009 Alan Stern (stern@rowland.harvard.edu)
- *
- * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
- * information about this driver.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index 72cb060b3fca..9ce4756adad6 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/power_supply.h>
@@ -1543,6 +1542,21 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
fusb302_log(chip, "PD message header: %x", msg->header);
fusb302_log(chip, "PD message len: %d", len);
+ /*
+ * Check if we've read off a GoodCRC message. If so then indicate to
+ * TCPM that the previous transmission has completed. Otherwise we pass
+ * the received message over to TCPM for processing.
+ *
+ * We make this check here instead of basing the reporting decision on
+ * the IRQ event type, as it's possible for the chip to report the
+ * TX_SUCCESS and GCRCSENT events out of order on occasion, so we need
+ * to check the message type to ensure correct reporting to TCPM.
+ */
+ if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
+ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
+ else
+ tcpm_pd_receive(chip->tcpm_port, msg);
+
return ret;
}
@@ -1650,13 +1664,12 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) {
fusb302_log(chip, "IRQ: PD tx success");
- /* read out the received good CRC */
ret = fusb302_pd_read_message(chip, &pd_msg);
if (ret < 0) {
- fusb302_log(chip, "cannot read in GCRC, ret=%d", ret);
+ fusb302_log(chip,
+ "cannot read in PD message, ret=%d", ret);
goto done;
}
- tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
}
if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) {
@@ -1677,7 +1690,6 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
"cannot read in PD message, ret=%d", ret);
goto done;
}
- tcpm_pd_receive(chip->tcpm_port, &pd_msg);
}
done:
mutex_unlock(&chip->lock);
@@ -1731,24 +1743,24 @@ static int init_gpio(struct fusb302_chip *chip)
chip->gpio_int_n = of_get_named_gpio(node, "fcs,int_n", 0);
if (!gpio_is_valid(chip->gpio_int_n)) {
ret = chip->gpio_int_n;
- fusb302_log(chip, "cannot get named GPIO Int_N, ret=%d", ret);
+ dev_err(chip->dev, "cannot get named GPIO Int_N, ret=%d", ret);
return ret;
}
ret = devm_gpio_request(chip->dev, chip->gpio_int_n, "fcs,int_n");
if (ret < 0) {
- fusb302_log(chip, "cannot request GPIO Int_N, ret=%d", ret);
+ dev_err(chip->dev, "cannot request GPIO Int_N, ret=%d", ret);
return ret;
}
ret = gpio_direction_input(chip->gpio_int_n);
if (ret < 0) {
- fusb302_log(chip,
- "cannot set GPIO Int_N to input, ret=%d", ret);
+ dev_err(chip->dev,
+ "cannot set GPIO Int_N to input, ret=%d", ret);
return ret;
}
ret = gpio_to_irq(chip->gpio_int_n);
if (ret < 0) {
- fusb302_log(chip,
- "cannot request IRQ for GPIO Int_N, ret=%d", ret);
+ dev_err(chip->dev,
+ "cannot request IRQ for GPIO Int_N, ret=%d", ret);
return ret;
}
chip->gpio_int_n_irq = ret;
@@ -1845,7 +1857,7 @@ static int fusb302_probe(struct i2c_client *client,
chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
if (IS_ERR(chip->tcpm_port)) {
ret = PTR_ERR(chip->tcpm_port);
- fusb302_log(chip, "cannot register tcpm port, ret=%d", ret);
+ dev_err(dev, "cannot register tcpm port, ret=%d", ret);
goto destroy_workqueue;
}
@@ -1854,8 +1866,7 @@ static int fusb302_probe(struct i2c_client *client,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
"fsc_interrupt_int_n", chip);
if (ret < 0) {
- fusb302_log(chip,
- "cannot request IRQ for GPIO Int_N, ret=%d", ret);
+ dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret);
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index c166fc77dfb8..f4d563ee7690 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -252,6 +252,9 @@ struct tcpm_port {
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
+ unsigned int nr_fixed; /* number of fixed sink PDOs */
+ unsigned int nr_var; /* number of variable sink PDOs */
+ unsigned int nr_batt; /* number of battery sink PDOs */
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
@@ -1247,6 +1250,100 @@ static void vdm_state_machine_work(struct work_struct *work)
mutex_unlock(&port->lock);
}
+enum pdo_err {
+ PDO_NO_ERR,
+ PDO_ERR_NO_VSAFE5V,
+ PDO_ERR_VSAFE5V_NOT_FIRST,
+ PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
+ PDO_ERR_FIXED_NOT_SORTED,
+ PDO_ERR_VARIABLE_BATT_NOT_SORTED,
+ PDO_ERR_DUPE_PDO,
+};
+
+static const char * const pdo_err_msg[] = {
+ [PDO_ERR_NO_VSAFE5V] =
+ " err: source/sink caps should atleast have vSafe5V",
+ [PDO_ERR_VSAFE5V_NOT_FIRST] =
+ " err: vSafe5V Fixed Supply Object Shall always be the first object",
+ [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
+ " err: PDOs should be in the following order: Fixed; Battery; Variable",
+ [PDO_ERR_FIXED_NOT_SORTED] =
+ " err: Fixed supply pdos should be in increasing order of their fixed voltage",
+ [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
+ " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
+ [PDO_ERR_DUPE_PDO] =
+ " err: Variable/Batt supply pdos cannot have same min/max voltage",
+};
+
+static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo)
+{
+ unsigned int i;
+
+ /* Should at least contain vSafe5v */
+ if (nr_pdo < 1)
+ return PDO_ERR_NO_VSAFE5V;
+
+ /* The vSafe5V Fixed Supply Object Shall always be the first object */
+ if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
+ pdo_fixed_voltage(pdo[0]) != VSAFE5V)
+ return PDO_ERR_VSAFE5V_NOT_FIRST;
+
+ for (i = 1; i < nr_pdo; i++) {
+ if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
+ return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
+ } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
+ enum pd_pdo_type type = pdo_type(pdo[i]);
+
+ switch (type) {
+ /*
+ * The remaining Fixed Supply Objects, if
+ * present, shall be sent in voltage order;
+ * lowest to highest.
+ */
+ case PDO_TYPE_FIXED:
+ if (pdo_fixed_voltage(pdo[i]) <=
+ pdo_fixed_voltage(pdo[i - 1]))
+ return PDO_ERR_FIXED_NOT_SORTED;
+ break;
+ /*
+ * The Battery Supply Objects and Variable
+ * supply, if present shall be sent in Minimum
+ * Voltage order; lowest to highest.
+ */
+ case PDO_TYPE_VAR:
+ case PDO_TYPE_BATT:
+ if (pdo_min_voltage(pdo[i]) <
+ pdo_min_voltage(pdo[i - 1]))
+ return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
+ else if ((pdo_min_voltage(pdo[i]) ==
+ pdo_min_voltage(pdo[i - 1])) &&
+ (pdo_max_voltage(pdo[i]) ==
+ pdo_min_voltage(pdo[i - 1])))
+ return PDO_ERR_DUPE_PDO;
+ break;
+ default:
+ tcpm_log_force(port, " Unknown pdo type");
+ }
+ }
+ }
+
+ return PDO_NO_ERR;
+}
+
+static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo)
+{
+ enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
+
+ if (err_index != PDO_NO_ERR) {
+ tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* PD (data, control) command handling functions
*/
@@ -1269,6 +1366,9 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
tcpm_log_source_caps(port);
+ tcpm_validate_caps(port, port->source_caps,
+ port->nr_source_caps);
+
/*
* This message may be received even if VBUS is not
* present. This is quite unexpected; see USB PD
@@ -1670,39 +1770,90 @@ static int tcpm_pd_check_request(struct tcpm_port *port)
return 0;
}
-static int tcpm_pd_select_pdo(struct tcpm_port *port)
+#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
+#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
+
+static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
+ int *src_pdo)
{
- unsigned int i, max_mw = 0, max_mv = 0;
+ unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0;
int ret = -EINVAL;
/*
- * Select the source PDO providing the most power while staying within
- * the board's voltage limits. Prefer PDO providing exp
+ * Select the source PDO providing the most power which has a
+ * matchig sink cap.
*/
for (i = 0; i < port->nr_source_caps; i++) {
u32 pdo = port->source_caps[i];
enum pd_pdo_type type = pdo_type(pdo);
- unsigned int mv, ma, mw;
-
- if (type == PDO_TYPE_FIXED)
- mv = pdo_fixed_voltage(pdo);
- else
- mv = pdo_min_voltage(pdo);
-
- if (type == PDO_TYPE_BATT) {
- mw = pdo_max_power(pdo);
- } else {
- ma = min(pdo_max_current(pdo),
- port->max_snk_ma);
- mw = ma * mv / 1000;
- }
- /* Perfer higher voltages if available */
- if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
- mv <= port->max_snk_mv) {
- ret = i;
- max_mw = mw;
- max_mv = mv;
+ if (type == PDO_TYPE_FIXED) {
+ for (j = 0; j < port->nr_fixed; j++) {
+ if (pdo_fixed_voltage(pdo) ==
+ pdo_fixed_voltage(port->snk_pdo[j])) {
+ ma = min_current(pdo, port->snk_pdo[j]);
+ mv = pdo_fixed_voltage(pdo);
+ mw = ma * mv / 1000;
+ if (mw > max_mw ||
+ (mw == max_mw && mv > max_mv)) {
+ ret = 0;
+ *src_pdo = i;
+ *sink_pdo = j;
+ max_mw = mw;
+ max_mv = mv;
+ }
+ /* There could only be one fixed pdo
+ * at a specific voltage level.
+ * So breaking here.
+ */
+ break;
+ }
+ }
+ } else if (type == PDO_TYPE_BATT) {
+ for (j = port->nr_fixed;
+ j < port->nr_fixed +
+ port->nr_batt;
+ j++) {
+ if (pdo_min_voltage(pdo) >=
+ pdo_min_voltage(port->snk_pdo[j]) &&
+ pdo_max_voltage(pdo) <=
+ pdo_max_voltage(port->snk_pdo[j])) {
+ mw = min_power(pdo, port->snk_pdo[j]);
+ mv = pdo_min_voltage(pdo);
+ if (mw > max_mw ||
+ (mw == max_mw && mv > max_mv)) {
+ ret = 0;
+ *src_pdo = i;
+ *sink_pdo = j;
+ max_mw = mw;
+ max_mv = mv;
+ }
+ }
+ }
+ } else if (type == PDO_TYPE_VAR) {
+ for (j = port->nr_fixed +
+ port->nr_batt;
+ j < port->nr_fixed +
+ port->nr_batt +
+ port->nr_var;
+ j++) {
+ if (pdo_min_voltage(pdo) >=
+ pdo_min_voltage(port->snk_pdo[j]) &&
+ pdo_max_voltage(pdo) <=
+ pdo_max_voltage(port->snk_pdo[j])) {
+ ma = min_current(pdo, port->snk_pdo[j]);
+ mv = pdo_min_voltage(pdo);
+ mw = ma * mv / 1000;
+ if (mw > max_mw ||
+ (mw == max_mw && mv > max_mv)) {
+ ret = 0;
+ *src_pdo = i;
+ *sink_pdo = j;
+ max_mw = mw;
+ max_mv = mv;
+ }
+ }
+ }
}
}
@@ -1714,13 +1865,14 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
unsigned int mv, ma, mw, flags;
unsigned int max_ma, max_mw;
enum pd_pdo_type type;
- int index;
- u32 pdo;
+ int src_pdo_index, snk_pdo_index;
+ u32 pdo, matching_snk_pdo;
- index = tcpm_pd_select_pdo(port);
- if (index < 0)
+ if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0)
return -EINVAL;
- pdo = port->source_caps[index];
+
+ pdo = port->source_caps[src_pdo_index];
+ matching_snk_pdo = port->snk_pdo[snk_pdo_index];
type = pdo_type(pdo);
if (type == PDO_TYPE_FIXED)
@@ -1728,26 +1880,28 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
else
mv = pdo_min_voltage(pdo);
- /* Select maximum available current within the board's power limit */
+ /* Select maximum available current within the sink pdo's limit */
if (type == PDO_TYPE_BATT) {
- mw = pdo_max_power(pdo);
- ma = 1000 * min(mw, port->max_snk_mw) / mv;
+ mw = min_power(pdo, matching_snk_pdo);
+ ma = 1000 * mw / mv;
} else {
- ma = min(pdo_max_current(pdo),
- 1000 * port->max_snk_mw / mv);
+ ma = min_current(pdo, matching_snk_pdo);
+ mw = ma * mv / 1000;
}
- ma = min(ma, port->max_snk_ma);
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
/* Set mismatch bit if offered power is less than operating power */
- mw = ma * mv / 1000;
max_ma = ma;
max_mw = mw;
if (mw < port->operating_snk_mw) {
flags |= RDO_CAP_MISMATCH;
- max_mw = port->operating_snk_mw;
- max_ma = max_mw * 1000 / mv;
+ if (type == PDO_TYPE_BATT &&
+ (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
+ max_mw = pdo_max_power(matching_snk_pdo);
+ else if (pdo_max_current(matching_snk_pdo) >
+ pdo_max_current(pdo))
+ max_ma = pdo_max_current(matching_snk_pdo);
}
tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
@@ -1756,16 +1910,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
port->polarity);
if (type == PDO_TYPE_BATT) {
- *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
+ *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
- index, mv, mw,
+ src_pdo_index, mv, mw,
flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
} else {
- *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
+ *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
- index, mv, ma,
+ src_pdo_index, mv, ma,
flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
}
@@ -3435,9 +3589,12 @@ static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
return nr_vdo;
}
-void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo)
+int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo)
{
+ if (tcpm_validate_caps(port, pdo, nr_pdo))
+ return -EINVAL;
+
mutex_lock(&port->lock);
port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
switch (port->state) {
@@ -3457,16 +3614,20 @@ void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
break;
}
mutex_unlock(&port->lock);
+ return 0;
}
EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
-void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int max_snk_mv,
- unsigned int max_snk_ma,
- unsigned int max_snk_mw,
- unsigned int operating_snk_mw)
+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo,
+ unsigned int max_snk_mv,
+ unsigned int max_snk_ma,
+ unsigned int max_snk_mw,
+ unsigned int operating_snk_mw)
{
+ if (tcpm_validate_caps(port, pdo, nr_pdo))
+ return -EINVAL;
+
mutex_lock(&port->lock);
port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
port->max_snk_mv = max_snk_mv;
@@ -3485,9 +3646,23 @@ void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
break;
}
mutex_unlock(&port->lock);
+ return 0;
}
EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
+static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo,
+ enum pd_pdo_type type)
+{
+ int count = 0;
+ int i;
+
+ for (i = 0; i < nr_pdo; i++) {
+ if (pdo_type(pdo[i]) == type)
+ count++;
+ }
+ return count;
+}
+
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
@@ -3520,11 +3695,28 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
init_completion(&port->tx_complete);
init_completion(&port->swap_complete);
+ tcpm_debugfs_init(port);
+ if (tcpm_validate_caps(port, tcpc->config->src_pdo,
+ tcpc->config->nr_src_pdo) ||
+ tcpm_validate_caps(port, tcpc->config->snk_pdo,
+ tcpc->config->nr_snk_pdo)) {
+ err = -EINVAL;
+ goto out_destroy_wq;
+ }
port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
tcpc->config->nr_src_pdo);
port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
tcpc->config->nr_snk_pdo);
+ port->nr_fixed = nr_type_pdos(port->snk_pdo,
+ port->nr_snk_pdo,
+ PDO_TYPE_FIXED);
+ port->nr_var = nr_type_pdos(port->snk_pdo,
+ port->nr_snk_pdo,
+ PDO_TYPE_VAR);
+ port->nr_batt = nr_type_pdos(port->snk_pdo,
+ port->nr_snk_pdo,
+ PDO_TYPE_BATT);
port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
tcpc->config->nr_snk_vdo);
@@ -3575,7 +3767,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
}
}
- tcpm_debugfs_init(port);
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index a8d479eb221a..2e990e0d917d 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -556,10 +556,8 @@ static const u32 src_pdo[] = {
};
static const u32 snk_pdo[] = {
- PDO_FIXED(12000, 3000, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
+ PDO_FIXED(5000, 500, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
PDO_FIXED_USB_COMM),
- PDO_BATT(4750, 12000, 15000),
- PDO_VAR(4750, 12000, 3000),
};
static struct tcpc_config wcove_typec_config = {
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index e31a6f204397..49e552472c3f 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -39,7 +39,7 @@ static DEVICE_ATTR_RO(usbip_status);
* is used to transfer usbip requests by kernel threads. -1 is a magic number
* by which usbip connection is finished.
*/
-static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
+static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct stub_device *sdev = dev_get_drvdata(dev);
@@ -103,7 +103,7 @@ err:
spin_unlock_irq(&sdev->ud.lock);
return -EINVAL;
}
-static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
+static DEVICE_ATTR_WO(usbip_sockfd);
static int stub_add_files(struct device *dev)
{
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 6c5a59313999..97b09a42a10c 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -428,9 +428,6 @@ static void masking_bogus_flags(struct urb *urb)
if (is_out)
allowed |= URB_ZERO_PACKET;
/* FALLTHROUGH */
- case USB_ENDPOINT_XFER_CONTROL:
- allowed |= URB_NO_FSBR; /* only affects UHCI */
- /* FALLTHROUGH */
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 7b219d9109b4..9756752c0681 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
udev->devnum, udev->devpath, usb_speed_string(udev->speed));
- pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+ pr_debug("tt hub ttport %d\n", udev->ttport);
dev_dbg(dev, " ");
for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
}
pr_debug("\n");
- dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
- dev_dbg(dev,
- "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
- &udev->descriptor, udev->config,
- udev->actconfig, udev->rawdescriptors);
+ dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+ udev->bus->bus_name);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
dev = &urb->dev->dev;
- dev_dbg(dev, " urb :%p\n", urb);
- dev_dbg(dev, " dev :%p\n", urb->dev);
-
usbip_dump_usb_device(urb->dev);
dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " status :%d\n", urb->status);
dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
- dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
dev_dbg(dev, " transfer_buffer_length:%d\n",
urb->transfer_buffer_length);
dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
- dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
dev_dbg(dev, " interval :%d\n", urb->interval);
dev_dbg(dev, " error_count :%d\n", urb->error_count);
- dev_dbg(dev, " context :%p\n", urb->context);
- dev_dbg(dev, " complete :%p\n", urb->complete);
}
EXPORT_SYMBOL_GPL(usbip_dump_urb);
@@ -325,7 +314,6 @@ int usbip_recv(struct socket *sock, void *buf, int size)
usbip_dbg_xmit("enter\n");
do {
- msg_data_left(&msg);
sock->sk->sk_allocation = GFP_NOIO;
result = sock_recvmsg(sock, &msg, MSG_WAITALL);
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 112ebb90d8c9..44cd64518925 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -30,7 +30,7 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
/* fall through */
case -ECONNRESET:
dev_dbg(&urb->dev->dev,
- "urb seq# %u was unlinked %ssynchronuously\n",
+ "urb seq# %u was unlinked %ssynchronously\n",
seqnum, status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 091f76b7196d..48808388ec33 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,10 +17,10 @@
/*
* output example:
- * hub port sta spd dev sockfd local_busid
- * hs 0000 004 000 00000000 3 1-2.3
+ * hub port sta spd dev sockfd local_busid
+ * hs 0000 004 000 00000000 000003 1-2.3
* ................................................
- * ss 0008 004 000 00000000 4 2-3.4
+ * ss 0008 004 000 00000000 000004 2-3.4
* ................................................
*
* Output includes socket fd instead of socket pointer address to avoid
@@ -44,13 +44,13 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
if (vdev->ud.status == VDEV_ST_USED) {
*out += sprintf(*out, "%03u %08x ",
vdev->speed, vdev->devid);
- *out += sprintf(*out, "%u %s",
+ *out += sprintf(*out, "%06u %s",
vdev->ud.sockfd,
dev_name(&vdev->udev->dev));
} else {
*out += sprintf(*out, "000 00000000 ");
- *out += sprintf(*out, "0000000000000000 0-0");
+ *out += sprintf(*out, "000000 0-0");
}
*out += sprintf(*out, "\n");
@@ -148,7 +148,7 @@ static ssize_t status_show(struct device *dev,
int pdev_nr;
out += sprintf(out,
- "hub port sta spd dev socket local_busid\n");
+ "hub port sta spd dev sockfd local_busid\n");
pdev_nr = status_name_to_id(attr->attr.name);
if (pdev_nr < 0)
@@ -218,7 +218,7 @@ static int valid_port(__u32 pdev_nr, __u32 rhport)
return 1;
}
-static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
+static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
__u32 port = 0, pdev_nr = 0, rhport = 0;
@@ -256,7 +256,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
+static DEVICE_ATTR_WO(detach);
static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
{
@@ -292,7 +292,7 @@ static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
*
* write() returns 0 on success, else negative errno.
*/
-static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
+static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct socket *socket;
@@ -387,7 +387,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(attach, S_IWUSR, NULL, store_attach);
+static DEVICE_ATTR_WO(attach);
#define MAX_STATUS_NAME 16
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e30989148..1e8a23d92cb4 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
urb_p->new = 1;
urb_p->seqnum = pdu->base.seqnum;
+ if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(urb_p->ep->desc);
+ maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&udc->gadget.dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ ret = -EMSGSIZE;
+ goto free_urbp;
+ }
+ }
+
ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
if (ret) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 1adc8af292ec..d86f72bbbb91 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -90,7 +90,7 @@ unlock:
}
static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
-static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
+static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
const char *in, size_t count)
{
struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
@@ -180,7 +180,7 @@ unlock:
return ret;
}
-static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
+static DEVICE_ATTR_WO(usbip_sockfd);
static ssize_t usbip_status_show(struct device *dev,
struct device_attribute *attr, char *out)
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0919ec..3ccb17c3e840 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&udc->gadget.dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
iovnum = 2 + urb->number_of_packets;
else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig
index c204094e1bb4..afac2588dab4 100644
--- a/drivers/uwb/Kconfig
+++ b/drivers/uwb/Kconfig
@@ -5,6 +5,7 @@
menuconfig UWB
tristate "Ultra Wideband devices"
default n
+ select GENERIC_NET_UTILS
help
UWB is a high-bandwidth, low-power, point-to-point radio
technology using a wide spectrum (3.1-10.6GHz). It is
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
index 8739c4f4d015..2833be9cbc2a 100644
--- a/drivers/uwb/address.c
+++ b/drivers/uwb/address.c
@@ -336,23 +336,17 @@ static ssize_t uwb_rc_mac_addr_store(struct device *dev,
struct uwb_mac_addr addr;
ssize_t result;
- result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n",
- &addr.data[0], &addr.data[1], &addr.data[2],
- &addr.data[3], &addr.data[4], &addr.data[5]);
- if (result != 6) {
- result = -EINVAL;
- goto out;
- }
+ if (!mac_pton(buf, addr.data))
+ return -EINVAL;
if (is_multicast_ether_addr(addr.data)) {
dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
"MAC address %s\n", buf);
- result = -EINVAL;
- goto out;
+ return -EINVAL;
}
result = uwb_rc_mac_addr_set(rc, &addr);
if (result == 0)
rc->uwb_dev.mac_addr = addr;
-out:
+
return result < 0 ? result : size;
}
DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
index b7d4f6b75eef..1d2a939cfcf8 100644
--- a/drivers/uwb/drp-ie.c
+++ b/drivers/uwb/drp-ie.c
@@ -128,9 +128,8 @@ static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
GFP_KERNEL);
- if (drp_ie) {
+ if (drp_ie)
drp_ie->hdr.element_id = UWB_IE_DRP;
- }
return drp_ie;
}
@@ -199,7 +198,7 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
rsv->drp_ie = NULL;
return 0;
}
-
+
unsafe = rsv->mas.unsafe ? 1 : 0;
if (rsv->drp_ie == NULL) {
@@ -232,23 +231,23 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
if (uwb_rsv_has_two_drp_ies(rsv)) {
- mv = &rsv->mv;
+ mv = &rsv->mv;
if (mv->companion_drp_ie == NULL) {
mv->companion_drp_ie = uwb_drp_ie_alloc();
if (mv->companion_drp_ie == NULL)
return -ENOMEM;
}
drp_ie = mv->companion_drp_ie;
-
+
/* keep all the same configuration of the main drp_ie */
memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
-
+
/* FIXME: handle properly the unsafe bit */
uwb_ie_drp_set_unsafe(drp_ie, 1);
uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
-
+
uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
}
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 4797217e5e72..8cc4b48ff127 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,7 +46,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLIN) {
/* An event has been signaled, call function */
@@ -113,7 +113,7 @@ int vfio_virqfd_enable(void *opaque,
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
- unsigned int events;
+ __poll_t events;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c7bdeb655646..9c3f8160ef24 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {
#define VHOST_RX_BATCH 64
struct vhost_net_buf {
- struct sk_buff **queue;
+ void **queue;
int tail;
int head;
};
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
- struct skb_array *rx_array;
+ struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
};
@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
struct vhost_net_buf *rxq = &nvq->rxq;
rxq->head = 0;
- rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
VHOST_RX_BATCH);
return rxq->tail;
}
@@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
- if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
- skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
- vhost_net_buf_get_size(rxq));
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ __skb_array_destroy_skb);
rxq->head = rxq->tail = 0;
}
}
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
@@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
return 0;
out:
- return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}
static void vhost_net_buf_init(struct vhost_net_buf *rxq)
@@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
int len = 0;
unsigned long flags;
- if (rvq->rx_array)
+ if (rvq->rx_ring)
return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net)
};
size_t total_len = 0;
int err, mergeable;
- s16 headcount;
+ s16 headcount, nheads = 0;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
@@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+ headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
@@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
- if (nvq->rx_array)
+ if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
@@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount);
goto out;
}
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- headcount);
+ nheads += headcount;
+ if (nheads > VHOST_RX_BATCH) {
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
+ nheads = 0;
+ }
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
@@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net)
}
vhost_net_enable_vq(net, vq);
out:
+ if (nheads)
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
mutex_unlock(&vq->mutex);
}
@@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- struct sk_buff **queue;
+ void **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1046,23 +1065,23 @@ err:
return ERR_PTR(r);
}
-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
{
- struct skb_array *array;
+ struct ptr_ring *ring;
struct file *file = fget(fd);
if (!file)
return NULL;
- array = tun_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = tap_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = NULL;
+ ring = NULL;
out:
fput(file);
- return array;
+ return ring;
}
static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vq->private_data = sock;
vhost_net_buf_unproduce(nvq);
if (index == VHOST_NET_VQ_RX)
- nvq->rx_array = get_tap_skb_array(fd);
+ nvq->rx_ring = get_tap_ptr_ring(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
@@ -1208,6 +1227,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
@@ -1353,7 +1373,7 @@ static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
return vhost_chr_write_iter(dev, from);
}
-static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 33ac2b186b85..8d4374606756 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -170,7 +170,7 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
- if (!((unsigned long)key & poll->mask))
+ if (!(key_to_poll(key) & poll->mask))
return 0;
vhost_poll_queue(poll);
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
- unsigned long mask;
+ __poll_t mask;
int ret = 0;
if (poll->wqh)
@@ -211,7 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
mask = file->f_op->poll(file, &poll->table);
if (mask)
- vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
- mutex_lock(&d->vqs[i]->mutex);
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
vhost_iotlb_notify_vq(dev, msg);
break;
case VHOST_IOTLB_INVALIDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
vhost_vq_meta_reset(dev);
vhost_del_umem_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1);
@@ -1057,10 +1061,10 @@ done:
}
EXPORT_SYMBOL(vhost_chr_write_iter);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &dev->wait, wait);
@@ -1877,12 +1881,7 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = vhost16_to_cpu(vq, desc->next);
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 79c6e7a60a5e..7876a3d7d1b3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -34,7 +34,7 @@ struct vhost_poll {
wait_queue_head_t *wqh;
wait_queue_entry_t wait;
struct vhost_work work;
- unsigned long mask;
+ __poll_t mask;
struct vhost_dev *dev;
};
@@ -43,7 +43,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
@@ -217,7 +217,7 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index d84329676689..6a34ab936726 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
struct pci_dev *host;
int intensity;
- host = pci_get_bus_and_slot(0, 0);
+ host = pci_get_domain_bus_and_slot(0, 0, 0);
if (!host) {
pr_err("unable to find PCI host\n");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239ea3d09..f5574060f9c8 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f842f9c0..e4bd63e9db6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea92737a..4dc5ee8debeb 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c
index 0d06038324e0..1e383c547633 100644
--- a/drivers/video/fbdev/auo_k190x.c
+++ b/drivers/video/fbdev/auo_k190x.c
@@ -708,8 +708,8 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", temp);
}
-static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
-static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR_RW(update_mode);
+static DEVICE_ATTR_RW(flash);
static DEVICE_ATTR(temp, 0644, temp_show, NULL);
static struct attribute *auok190x_attributes[] = {
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587cbc86..e707e617bf1c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -556,7 +556,7 @@ static void __init iounmap_macfb(void)
static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
char *option = NULL;
int err;
@@ -670,15 +670,17 @@ static int __init macfb_init(void)
* code is really broken :-)
*/
- while ((ndev = nubus_find_type(NUBUS_CAT_DISPLAY,
- NUBUS_TYPE_VIDEO, ndev)))
- {
+ for_each_func_rsrc(ndev) {
unsigned long base = ndev->board->slot_addr;
if (mac_bi_data.videoaddr < base ||
mac_bi_data.videoaddr - base > 0xFFFFFF)
continue;
+ if (ndev->category != NUBUS_CAT_DISPLAY ||
+ ndev->type != NUBUS_TYPE_VIDEO)
+ continue;
+
video_is_nubus = 1;
slot_addr = (unsigned char *)base;
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index d570e19a2864..035ff6e02894 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -110,7 +110,7 @@ static ssize_t flip_store(struct device *dev, struct device_attribute *attr, con
return count;
}
-static DEVICE_ATTR(flip, 0644, flip_show, flip_store);
+static DEVICE_ATTR_RW(flip);
static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
@@ -166,7 +166,7 @@ static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *att
return count;
}
-static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store);
+static DEVICE_ATTR_RW(fastpllclk);
/*
* Some touchscreens need hsync information from the video driver to
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d70ad6d38879..b0597bef4555 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -565,11 +565,11 @@ static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
/*
* Returns a bitmask indicating whether a read will block
*/
-static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
- unsigned int mask;
+ __poll_t mask;
spin_lock_irqsave(&dbq->lock, flags);
diff --git a/drivers/staging/unisys/visorbus/Kconfig b/drivers/visorbus/Kconfig
index 511388075ffa..1f5812b936d0 100644
--- a/drivers/staging/unisys/visorbus/Kconfig
+++ b/drivers/visorbus/Kconfig
@@ -4,7 +4,7 @@
config UNISYS_VISORBUS
tristate "Unisys visorbus driver"
- depends on UNISYSSPAR
+ depends on X86_64 && ACPI
---help---
The visorbus driver is a virtualized bus for the Unisys s-Par firmware.
Virtualized devices allow Linux guests on a system to share disks and
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/visorbus/Makefile
index 784cdc1f9d6a..e8df59d1301f 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/visorbus/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus.o
visorbus-y := visorbus_main.o
visorbus-y += visorchannel.o
visorbus-y += visorchipset.o
-
-ccflags-y += -Idrivers/staging/unisys/include
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
index 9ee9886a9aed..8c57562a070a 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/visorbus/controlvmchannel.h
@@ -1,24 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#ifndef __CONTROLVMCHANNEL_H__
#define __CONTROLVMCHANNEL_H__
#include <linux/uuid.h>
-
-#include "visorchannel.h"
+#include <linux/visorbus.h>
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
#define VISOR_CONTROLVM_CHANNEL_GUID \
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
index 981b180f3c4b..b1dce26166bf 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/visorbus/vbuschannel.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#ifndef __VBUSCHANNEL_H__
@@ -26,7 +17,7 @@
*/
#include <linux/uuid.h>
-#include "visorchannel.h"
+#include <linux/visorbus.h>
/* {193b331b-c58f-11da-95a9-00e08161165f} */
#define VISOR_VBUS_CHANNEL_GUID \
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/visorbus/visorbus_main.c
index 6cb6eb0673c6..0b2434cc4ecd 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/visorbus/visorbus_main.c
@@ -1,25 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright � 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/visorbus.h>
#include <linux/uuid.h>
-#include "visorbus.h"
#include "visorbus_private.h"
static const guid_t visor_vbus_channel_guid = VISOR_VBUS_CHANNEL_GUID;
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
index 4a8b12d7cfaa..366380b7f8d9 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/visorbus/visorbus_private.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#ifndef __VISORBUS_PRIVATE_H__
@@ -18,10 +9,10 @@
#include <linux/uuid.h>
#include <linux/utsname.h>
+#include <linux/visorbus.h>
#include "controlvmchannel.h"
#include "vbuschannel.h"
-#include "visorbus.h"
struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
struct visor_device *from);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/visorbus/visorchannel.c
index aae16073ba03..bd890e0f456b 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/visorbus/visorchannel.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
/*
@@ -21,8 +12,8 @@
#include <linux/uuid.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/visorbus.h>
-#include "visorbus.h"
#include "visorbus_private.h"
#include "controlvmchannel.h"
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
index fed554a43151..ca752b8f495f 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/visorbus/visorchipset.c
@@ -1,22 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
#include <linux/acpi.h>
#include <linux/crash_dump.h>
+#include <linux/visorbus.h>
-#include "visorbus.h"
#include "visorbus_private.h"
/* {72120008-4AAB-11DC-8530-444553544200} */
@@ -590,7 +581,8 @@ static void *parser_name_get(struct parser_context *ctx)
struct visor_controlvm_parameters_header *phdr;
phdr = &ctx->data;
- if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
+ if ((unsigned long)phdr->name_offset +
+ (unsigned long)phdr->name_length > ctx->param_bytes)
return NULL;
ctx->curr = (char *)&phdr + phdr->name_offset;
ctx->bytes_remaining = phdr->name_length;
@@ -1317,13 +1309,13 @@ static void parser_done(struct parser_context *ctx)
static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
bool *retry)
{
- int allocbytes;
+ unsigned long allocbytes;
struct parser_context *ctx;
void *mapping;
*retry = false;
/* alloc an extra byte to ensure payload is \0 terminated */
- allocbytes = bytes + 1 + (sizeof(struct parser_context) -
+ allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
sizeof(struct visor_controlvm_parameters_header));
if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
MAX_CONTROLVM_PAYLOAD_BYTES) {
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a90728ceec5a..55e11bf8ebaf 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -13,9 +13,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
@@ -30,11 +29,17 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
pdata->pullup_duration = delay;
} else {
if (pdata->pullup_duration) {
- gpio_direction_output(pdata->pin, 1);
-
+ /*
+ * This will OVERRIDE open drain emulation and force-pull
+ * the line high for some time.
+ */
+ gpiod_set_raw_value(pdata->gpiod, 1);
msleep(pdata->pullup_duration);
-
- gpio_direction_input(pdata->pin);
+ /*
+ * This will simply set the line as input since we are doing
+ * open drain emulation in the GPIO library.
+ */
+ gpiod_set_value(pdata->gpiod, 1);
}
pdata->pullup_duration = 0;
}
@@ -42,28 +47,18 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
return 0;
}
-static void w1_gpio_write_bit_dir(void *data, u8 bit)
-{
- struct w1_gpio_platform_data *pdata = data;
-
- if (bit)
- gpio_direction_input(pdata->pin);
- else
- gpio_direction_output(pdata->pin, 0);
-}
-
-static void w1_gpio_write_bit_val(void *data, u8 bit)
+static void w1_gpio_write_bit(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
- gpio_set_value(pdata->pin, bit);
+ gpiod_set_value(pdata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
- return gpio_get_value(pdata->pin) ? 1 : 0;
+ return gpiod_get_value(pdata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
@@ -74,107 +69,85 @@ static const struct of_device_id w1_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
-static int w1_gpio_probe_dt(struct platform_device *pdev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int gpio;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "linux,open-drain", NULL))
- pdata->is_open_drain = 1;
-
- gpio = of_get_gpio(np, 0);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to parse gpio property for data pin (%d)\n",
- gpio);
-
- return gpio;
- }
- pdata->pin = gpio;
-
- gpio = of_get_gpio(np, 1);
- if (gpio == -EPROBE_DEFER)
- return gpio;
- /* ignore other errors as the pullup gpio is optional */
- pdata->ext_pullup_enable_pin = gpio;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ /* Enforce open drain mode by default */
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
if (of_have_populated_dt()) {
- err = w1_gpio_probe_dt(pdev);
- if (err < 0)
- return err;
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * This parameter means that something else than the gpiolib has
+ * already set the line into open drain mode, so we should just
+ * driver it high/low like we are in full control of the line and
+ * open drain will happen transparently.
+ */
+ if (of_get_property(np, "linux,open-drain", NULL))
+ gflags = GPIOD_OUT_LOW;
+
+ pdev->dev.platform_data = pdata;
}
-
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "No configuration data\n");
+ dev_err(dev, "No configuration data\n");
return -ENXIO;
}
- master = devm_kzalloc(&pdev->dev, sizeof(struct w1_bus_master),
+ master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master) {
- dev_err(&pdev->dev, "Out of memory\n");
+ dev_err(dev, "Out of memory\n");
return -ENOMEM;
}
- err = devm_gpio_request(&pdev->dev, pdata->pin, "w1");
- if (err) {
- dev_err(&pdev->dev, "gpio_request (pin) failed\n");
- return err;
+ pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+ if (IS_ERR(pdata->gpiod)) {
+ dev_err(dev, "gpio_request (pin) failed\n");
+ return PTR_ERR(pdata->gpiod);
}
- if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
- err = devm_gpio_request_one(&pdev->dev,
- pdata->ext_pullup_enable_pin, GPIOF_INIT_LOW,
- "w1 pullup");
- if (err < 0) {
- dev_err(&pdev->dev, "gpio_request_one "
- "(ext_pullup_enable_pin) failed\n");
- return err;
- }
+ pdata->pullup_gpiod =
+ devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->pullup_gpiod)) {
+ dev_err(dev, "gpio_request_one "
+ "(ext_pullup_enable_pin) failed\n");
+ return PTR_ERR(pdata->pullup_gpiod);
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
-
- if (pdata->is_open_drain) {
- gpio_direction_output(pdata->pin, 1);
- master->write_bit = w1_gpio_write_bit_val;
- } else {
- gpio_direction_input(pdata->pin);
- master->write_bit = w1_gpio_write_bit_dir;
+ gpiod_direction_output(pdata->gpiod, 1);
+ master->write_bit = w1_gpio_write_bit;
+
+ /*
+ * If we are using open drain emulation from the GPIO library,
+ * we need to use this pullup function that hammers the line
+ * high using a raw accessor to provide pull-up for the w1
+ * line.
+ */
+ if (gflags == GPIOD_OUT_LOW_OPEN_DRAIN)
master->set_pullup = w1_gpio_set_pullup;
- }
err = w1_add_master_device(master);
if (err) {
- dev_err(&pdev->dev, "w1_add_master device failed\n");
+ dev_err(dev, "w1_add_master device failed\n");
return err;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 1);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
@@ -189,8 +162,8 @@ static int w1_gpio_remove(struct platform_device *pdev)
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 0);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 0);
w1_remove_master_device(master);
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index a36661cd1f05..f876772c0fb4 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -59,7 +59,11 @@ enum w1_netlink_message_types {
* @type: one of enum w1_netlink_message_types
* @status: kernel feedback for success 0 or errno failure value
* @len: length of data following w1_netlink_msg
- * @id: union holding master bus id (msg.id) and slave device id (id[8]).
+ * @id: union holding bus master id (msg.id) and slave device id (id[8]).
+ * @id.id: Slave ID (8 bytes)
+ * @id.mst: bus master identification
+ * @id.mst.id: bus master ID
+ * @id.mst.res: bus master reserved
* @data: start address of any following data
*
* The base message structure for w1 messages over netlink.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1f310a..5bf613d3b7d6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called ziirave_wdt.
+config RAVE_SP_WATCHDOG
+ tristate "RAVE SP Watchdog timer"
+ depends on RAVE_SP_CORE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog on RAVE SP device.
+
# ALPHA Architecture
# ARM Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 715a21078e0c..135c5e81f25e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644
index 000000000000..35db173252f9
--- /dev/null
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+ RAVE_SP_RESET_BYTE = 1,
+ RAVE_SP_RESET_REASON_NORMAL = 0,
+ RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout: Largest possible watchdog timeout setting
+ * @min_timeout: Smallest possible watchdog timeout setting
+ *
+ * @configure: Function to send configuration command
+ * @restart: Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+ unsigned int max_timeout;
+ unsigned int min_timeout;
+
+ int (*configure)(struct watchdog_device *, bool);
+ int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd: Underlying watchdog device
+ * @sp: Pointer to parent RAVE SP device
+ * @variant: Device specific variant information
+ * @reboot_notifier: Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+ struct watchdog_device wdd;
+ struct rave_sp *sp;
+ const struct rave_sp_wdt_variant *variant;
+ struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+ size_t data_size)
+{
+ return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+ data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = 0,
+ [3] = on,
+ [4] = on ? wdd->timeout : 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = on,
+ [3] = (u8)wdd->timeout,
+ [4] = (u8)(wdd->timeout >> 8),
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd: Device to configure
+ * @on: Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ * - Wheither it is ON or OFF
+ * - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+ return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE,
+ [3] = RAVE_SP_RESET_REASON_NORMAL
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Restart handler is called in atomic context which means we
+ * can't communicate to SP via UART. Luckily for use SP will
+ * wait 500ms before actually resetting us, so we ask it to do
+ * so here and let the rest of the system go on wrapping
+ * things up.
+ */
+ if (action == SYS_DOWN || action == SYS_HALT) {
+ struct rave_sp_wdt *sp_wd =
+ container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+ const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+ if (ret < 0)
+ dev_err(sp_wd->wdd.parent,
+ "Failed to issue restart command (%d)", ret);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ /*
+ * The actual work was done by reboot notifier above. SP
+ * firmware waits 500 ms before issuing reset, so let's hang
+ * here for twice that delay and hopefuly we'd never reach
+ * the return statement.
+ */
+ mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+ return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+
+ ret = rave_sp_wdt_configure(wdd, true);
+ if (!ret)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+ return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_PET_WDT,
+ [1] = 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rave_sp_wdt_start,
+ .stop = rave_sp_wdt_stop,
+ .ping = rave_sp_wdt_ping,
+ .set_timeout = rave_sp_wdt_set_timeout,
+ .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+ .max_timeout = 255,
+ .min_timeout = 1,
+ .configure = rave_sp_wdt_legacy_configure,
+ .restart = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+ .max_timeout = 180,
+ .min_timeout = 60,
+ .configure = rave_sp_wdt_rdu_configure,
+ .restart = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+ {
+ .compatible = "zii,rave-sp-watchdog-legacy",
+ .data = &rave_sp_wdt_legacy,
+ },
+ {
+ .compatible = "zii,rave-sp-watchdog",
+ .data = &rave_sp_wdt_rdu,
+ },
+ { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct rave_sp_wdt *sp_wd;
+ struct nvmem_cell *cell;
+ __le16 timeout = 0;
+ int ret;
+
+ sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+ if (!sp_wd)
+ return -ENOMEM;
+
+ sp_wd->variant = of_device_get_match_data(dev);
+ sp_wd->sp = dev_get_drvdata(dev->parent);
+
+ wdd = &sp_wd->wdd;
+ wdd->parent = dev;
+ wdd->info = &rave_sp_wdt_info;
+ wdd->ops = &rave_sp_wdt_ops;
+ wdd->min_timeout = sp_wd->variant->min_timeout;
+ wdd->max_timeout = sp_wd->variant->max_timeout;
+ wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdd->timeout = 60;
+
+ cell = nvmem_cell_get(dev, "wdt-timeout");
+ if (!IS_ERR(cell)) {
+ size_t len;
+ void *value = nvmem_cell_read(cell, &len);
+
+ if (!IS_ERR(value)) {
+ memcpy(&timeout, value, min(len, sizeof(timeout)));
+ kfree(value);
+ }
+ nvmem_cell_put(cell);
+ }
+ watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+ watchdog_set_restart_priority(wdd, 255);
+ watchdog_stop_on_unregister(wdd);
+
+ sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+ ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register reboot notifier\n");
+ return ret;
+ }
+
+ /*
+ * We don't know if watchdog is running now. To be sure, let's
+ * start it and depend on watchdog core to ping it
+ */
+ wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+ ret = rave_sp_wdt_start(wdd);
+ if (ret) {
+ dev_err(dev, "Watchdog didn't start\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ rave_sp_wdt_stop(wdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+ .probe = rave_sp_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9729a64ea1a9..72c0416a01cc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -621,9 +621,9 @@ static long evtchn_ioctl(struct file *file,
return rc;
}
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+static __poll_t evtchn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3b053b..bd56653b9bbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1) {
- range--;
+ if (map->unmap_ops[offset+range].handle == -1)
break;
- }
range++;
}
err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (use_ptemod) {
map->vma = NULL;
+ unmap_grant_pages(map, 0, map->count);
+ }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 6cc1c15bcd84..9ade533d9e40 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -139,7 +139,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &xen_mce_chrdev_wait, wait);
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index d1e1d8d2b9d5..78804e71f9a6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit();
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -878,7 +878,7 @@ received:
return ret;
}
-static unsigned int pvcalls_front_poll_passive(struct file *file,
+static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
@@ -935,12 +935,12 @@ static unsigned int pvcalls_front_poll_passive(struct file *file,
return 0;
}
-static unsigned int pvcalls_front_poll_active(struct file *file,
+static __poll_t pvcalls_front_poll_active(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int32_t in_error, out_error;
struct pvcalls_data_intf *intf = map->active.ring;
@@ -958,12 +958,12 @@ static unsigned int pvcalls_front_poll_active(struct file *file,
return mask;
}
-unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+__poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
- int ret;
+ __poll_t ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 82fc54f8eb77..5bb72d3f8337 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index f3b089b7c0b6..e17ec3fce590 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -645,7 +645,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
{
struct xenbus_file_priv *u = file->private_data;
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 6489e1fc1afd..11045d8e356a 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -25,9 +25,6 @@ config 9P_FS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
endif
diff --git a/fs/Kconfig b/fs/Kconfig
index 7aee6d699fd6..9774588da60e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -167,17 +167,13 @@ config TMPFS_POSIX_ACL
files for sound to work properly. In short, if you're not sure,
say Y.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
config TMPFS_XATTR
bool "Tmpfs extended attributes"
depends on TMPFS
default n
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
+ the kernel or by users (see the attr(5) manual page for details).
Currently this enables support for the trusted.* and
security.* namespaces.
@@ -298,7 +294,6 @@ config NFS_COMMON
source "net/sunrpc/Kconfig"
source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
-source "fs/ncpfs/Kconfig"
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
source "fs/9p/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index ef772f1eaff8..add789ea270a 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -92,7 +92,6 @@ obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_CIFS) += cifs/
-obj-$(CONFIG_NCP_FS) += ncpfs/
obj-$(CONFIG_HPFS_FS) += hpfs/
obj-$(CONFIG_NTFS_FS) += ntfs/
obj-$(CONFIG_UFS_FS) += ufs/
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 0f0e6925e97d..14a6c1b90c9f 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -10,6 +10,7 @@
*/
#include <linux/math64.h>
+#include <linux/iversion.h>
#include "affs.h"
/*
@@ -60,7 +61,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
affs_brelse(dir_bh);
dir->i_mtime = dir->i_ctime = current_time(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
mark_inode_dirty(dir);
return 0;
@@ -114,7 +115,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
dir->i_mtime = dir->i_ctime = current_time(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
mark_inode_dirty(dir);
return retval;
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index a105e77df2c1..d180b46453cf 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/iversion.h>
#include "affs.h"
static int affs_readdir(struct file *, struct dir_context *);
@@ -80,7 +81,7 @@ affs_readdir(struct file *file, struct dir_context *ctx)
* we can jump directly to where we left off.
*/
ino = (u32)(long)file->private_data;
- if (ino && file->f_version == inode->i_version) {
+ if (ino && inode_cmp_iversion(inode, file->f_version) == 0) {
pr_debug("readdir() left off=%d\n", ino);
goto inside;
}
@@ -130,7 +131,7 @@ inside:
} while (ino);
}
done:
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
file->private_data = (void *)(long)ino;
affs_brelse(fh_bh);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 1117e36134cc..e602619aed9d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -21,6 +21,7 @@
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/iversion.h>
#include "affs.h"
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -102,7 +103,7 @@ static struct inode *affs_alloc_inode(struct super_block *sb)
if (!i)
return NULL;
- i->vfs_inode.i_version = 1;
+ inode_set_iversion(&i->vfs_inode, 1);
i->i_lc = NULL;
i->i_ext_bh = NULL;
i->i_pa_cnt = 0;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ff8d5bf4354f..23c7f395d718 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -895,20 +895,38 @@ error:
* However, if we didn't have a callback promise outstanding, or it was
* outstanding on a different server, then it won't break it either...
*/
-static int afs_dir_remove_link(struct dentry *dentry, struct key *key)
+static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
+ unsigned long d_version_before,
+ unsigned long d_version_after)
{
+ bool dir_valid;
int ret = 0;
+ /* There were no intervening changes on the server if the version
+ * number we got back was incremented by exactly 1.
+ */
+ dir_valid = (d_version_after == d_version_before + 1);
+
if (d_really_is_positive(dentry)) {
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- kdebug("AFS_VNODE_DELETED");
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-
- ret = afs_validate(vnode, key);
- if (ret == -ESTALE)
+ if (dir_valid) {
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
ret = 0;
+ } else {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ kdebug("AFS_VNODE_DELETED");
+
+ ret = afs_validate(vnode, key);
+ if (ret == -ESTALE)
+ ret = 0;
+ }
_debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
}
@@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
struct afs_fs_cursor fc;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct key *key;
+ unsigned long d_version = (unsigned long)dentry->d_fsdata;
int ret;
_enter("{%x:%u},{%pd}",
@@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
ret = afs_end_vnode_operation(&fc);
if (ret == 0)
- ret = afs_dir_remove_link(dentry, key);
+ ret = afs_dir_remove_link(
+ dentry, key, d_version,
+ (unsigned long)dvnode->status.data_version);
}
error_key:
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index b90ef39ae914..88ec38c2d83c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/circ_buf.h>
+#include <linux/iversion.h>
#include "internal.h"
#include "afs_fs.h"
@@ -124,7 +125,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
- vnode->vfs_inode.i_version = data_version;
+ inode_set_iversion_raw(&vnode->vfs_inode, data_version);
}
expected_version = status->data_version;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 3415eb7484f6..c7f17c44c7ce 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/iversion.h>
#include "internal.h"
static const struct inode_operations afs_symlink_inode_operations = {
@@ -89,7 +90,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
- inode->i_version = vnode->status.data_version;
+ inode_set_iversion_raw(inode, vnode->status.data_version);
inode->i_mapping->a_ops = &afs_fs_aops;
read_sequnlock_excl(&vnode->cb_lock);
@@ -218,7 +219,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
- inode->i_version = 0;
+ inode_set_iversion_raw(inode, 0);
inode->i_generation = 0;
set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
@@ -377,6 +378,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
}
read_sequnlock_excl(&vnode->cb_lock);
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ clear_nlink(&vnode->vfs_inode);
+
if (valid)
goto valid;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ea1460b9b71a..e1126659f043 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
{
struct afs_net *net = call->net;
enum afs_call_state state;
- u32 remote_abort;
+ u32 remote_abort = 0;
int ret;
_enter("{%s,%zu},,%zu,%d",
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb5f8a3df577..9370e2feb999 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
ret = afs_fill_page(vnode, key, pos + copied,
len - copied, page);
if (ret < 0)
- return ret;
+ goto out;
}
SetPageUptodate(page);
}
@@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping,
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");
+ ret = copied;
+
+out:
unlock_page(page);
put_page(page);
-
- return copied;
+ return ret;
}
/*
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 961a12dc6dc8..a0c57c37fa21 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -442,8 +442,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
memcpy(&wq->name, &qstr, sizeof(struct qstr));
wq->dev = autofs4_get_dev(sbi);
wq->ino = autofs4_get_ino(sbi);
- wq->uid = current_cred()->uid;
- wq->gid = current_cred()->gid;
+ wq->uid = current_uid();
+ wq->gid = current_gid();
wq->pid = pid;
wq->tgid = tgid;
wq->status = -EINTR; /* Status return if interrupted */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 2e558227931a..273351ee4c46 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -38,9 +38,6 @@ config BTRFS_FS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config BTRFS_FS_CHECK_INTEGRITY
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 6fe881d5cb38..0c4373628eb4 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -19,4 +19,4 @@ btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
- tests/free-space-tree-tests.o
+ tests/free-space-tree-tests.o tests/extent-map-tests.o
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7d0dc100a09a..e4054e533f6d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -216,7 +216,8 @@ static int prelim_ref_compare(struct prelim_ref *ref1,
return 0;
}
-void update_share_count(struct share_check *sc, int oldcount, int newcount)
+static void update_share_count(struct share_check *sc, int oldcount,
+ int newcount)
{
if ((!sc) || (oldcount == 0 && newcount < 1))
return;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 5982c8a71f02..07d049c0c20f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -33,7 +33,6 @@
#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
-#include <linux/sort.h>
#include <linux/log2.h>
#include "ctree.h"
#include "disk-io.h"
@@ -45,6 +44,21 @@
#include "extent_io.h"
#include "extent_map.h"
+static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
+
+const char* btrfs_compress_type2str(enum btrfs_compression_type type)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB:
+ case BTRFS_COMPRESS_LZO:
+ case BTRFS_COMPRESS_ZSTD:
+ case BTRFS_COMPRESS_NONE:
+ return btrfs_compress_types[type];
+ }
+
+ return NULL;
+}
+
static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
@@ -348,8 +362,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
page->mapping = NULL;
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) {
- bio_get(bio);
-
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -372,8 +384,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_endio(bio);
}
- bio_put(bio);
-
bio = btrfs_bio_alloc(bdev, first_byte);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
@@ -389,7 +399,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
first_byte += PAGE_SIZE;
cond_resched();
}
- bio_get(bio);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -405,13 +414,12 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_endio(bio);
}
- bio_put(bio);
return 0;
}
static u64 bio_end_offset(struct bio *bio)
{
- struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ struct bio_vec *last = bio_last_bvec_all(bio);
return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}
@@ -563,7 +571,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* we need the actual starting offset of this extent in the file */
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree,
- page_offset(bio->bi_io_vec->bv_page),
+ page_offset(bio_first_page_all(bio)),
PAGE_SIZE);
read_unlock(&em_tree->lock);
if (!em)
@@ -638,8 +646,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->mapping = NULL;
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) {
- bio_get(comp_bio);
-
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -666,8 +672,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_endio(comp_bio);
}
- bio_put(comp_bio);
-
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
@@ -677,7 +681,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
cur_disk_byte += PAGE_SIZE;
}
- bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -693,7 +696,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_endio(comp_bio);
}
- bio_put(comp_bio);
return 0;
fail2:
@@ -752,6 +754,8 @@ struct heuristic_ws {
u32 sample_size;
/* Buckets store counters for each byte value */
struct bucket_item *bucket;
+ /* Sorting buffer */
+ struct bucket_item *bucket_b;
struct list_head list;
};
@@ -763,6 +767,7 @@ static void free_heuristic_ws(struct list_head *ws)
kvfree(workspace->sample);
kfree(workspace->bucket);
+ kfree(workspace->bucket_b);
kfree(workspace);
}
@@ -782,6 +787,10 @@ static struct list_head *alloc_heuristic_ws(void)
if (!ws->bucket)
goto fail;
+ ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
+ if (!ws->bucket_b)
+ goto fail;
+
INIT_LIST_HEAD(&ws->list);
return &ws->list;
fail:
@@ -1278,13 +1287,103 @@ static u32 shannon_entropy(struct heuristic_ws *ws)
return entropy_sum * 100 / entropy_max;
}
-/* Compare buckets by size, ascending */
-static int bucket_comp_rev(const void *lv, const void *rv)
+#define RADIX_BASE 4U
+#define COUNTERS_SIZE (1U << RADIX_BASE)
+
+static u8 get4bits(u64 num, int shift) {
+ u8 low4bits;
+
+ num >>= shift;
+ /* Reverse order */
+ low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
+ return low4bits;
+}
+
+/*
+ * Use 4 bits as radix base
+ * Use 16 u32 counters for calculating new possition in buf array
+ *
+ * @array - array that will be sorted
+ * @array_buf - buffer array to store sorting results
+ * must be equal in size to @array
+ * @num - array size
+ */
+static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
+ int num)
{
- const struct bucket_item *l = (const struct bucket_item *)lv;
- const struct bucket_item *r = (const struct bucket_item *)rv;
+ u64 max_num;
+ u64 buf_num;
+ u32 counters[COUNTERS_SIZE];
+ u32 new_addr;
+ u32 addr;
+ int bitlen;
+ int shift;
+ int i;
- return r->count - l->count;
+ /*
+ * Try avoid useless loop iterations for small numbers stored in big
+ * counters. Example: 48 33 4 ... in 64bit array
+ */
+ max_num = array[0].count;
+ for (i = 1; i < num; i++) {
+ buf_num = array[i].count;
+ if (buf_num > max_num)
+ max_num = buf_num;
+ }
+
+ buf_num = ilog2(max_num);
+ bitlen = ALIGN(buf_num, RADIX_BASE * 2);
+
+ shift = 0;
+ while (shift < bitlen) {
+ memset(counters, 0, sizeof(counters));
+
+ for (i = 0; i < num; i++) {
+ buf_num = array[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]++;
+ }
+
+ for (i = 1; i < COUNTERS_SIZE; i++)
+ counters[i] += counters[i - 1];
+
+ for (i = num - 1; i >= 0; i--) {
+ buf_num = array[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]--;
+ new_addr = counters[addr];
+ array_buf[new_addr] = array[i];
+ }
+
+ shift += RADIX_BASE;
+
+ /*
+ * Normal radix expects to move data from a temporary array, to
+ * the main one. But that requires some CPU time. Avoid that
+ * by doing another sort iteration to original array instead of
+ * memcpy()
+ */
+ memset(counters, 0, sizeof(counters));
+
+ for (i = 0; i < num; i ++) {
+ buf_num = array_buf[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]++;
+ }
+
+ for (i = 1; i < COUNTERS_SIZE; i++)
+ counters[i] += counters[i - 1];
+
+ for (i = num - 1; i >= 0; i--) {
+ buf_num = array_buf[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]--;
+ new_addr = counters[addr];
+ array[new_addr] = array_buf[i];
+ }
+
+ shift += RADIX_BASE;
+ }
}
/*
@@ -1314,7 +1413,7 @@ static int byte_core_set_size(struct heuristic_ws *ws)
struct bucket_item *bucket = ws->bucket;
/* Sort in reverse order */
- sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL);
+ radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
for (i = 0; i < BYTE_CORE_SET_LOW; i++)
coreset_sum += bucket[i].count;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0868cc554f14..677fa4aa0bd7 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -75,7 +75,7 @@ struct compressed_bio {
u32 sums;
};
-void btrfs_init_compress(void);
+void __init btrfs_init_compress(void);
void btrfs_exit_compress(void);
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
@@ -137,6 +137,8 @@ extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
extern const struct btrfs_compress_op btrfs_zstd_compress;
+const char* btrfs_compress_type2str(enum btrfs_compression_type type);
+
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 1e74cf826532..b88a79e69ddf 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1807,8 +1807,8 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
* simple bin_search frontend that does the right thing for
* leaves vs nodes
*/
-static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot)
+int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
+ int level, int *slot)
{
if (level == 0)
return generic_bin_search(eb,
@@ -1824,12 +1824,6 @@ static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
slot);
}
-int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot)
-{
- return bin_search(eb, key, level, slot);
-}
-
static void root_add_used(struct btrfs_root *root, u32 size)
{
spin_lock(&root->accounting_lock);
@@ -2614,7 +2608,7 @@ static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
int level, int *prev_cmp, int *slot)
{
if (*prev_cmp != 0) {
- *prev_cmp = bin_search(b, key, level, slot);
+ *prev_cmp = btrfs_bin_search(b, key, level, slot);
return *prev_cmp;
}
@@ -2660,17 +2654,29 @@ int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
}
/*
- * look for key in the tree. path is filled in with nodes along the way
- * if key is found, we return zero and you can find the item in the leaf
- * level of the path (level 0)
+ * btrfs_search_slot - look for a key in a tree and perform necessary
+ * modifications to preserve tree invariants.
+ *
+ * @trans: Handle of transaction, used when modifying the tree
+ * @p: Holds all btree nodes along the search path
+ * @root: The root node of the tree
+ * @key: The key we are looking for
+ * @ins_len: Indicates purpose of search, for inserts it is 1, for
+ * deletions it's -1. 0 for plain searches
+ * @cow: boolean should CoW operations be performed. Must always be 1
+ * when modifying the tree.
+ *
+ * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
+ * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
*
- * If the key isn't found, the path points to the slot where it should
- * be inserted, and 1 is returned. If there are other errors during the
- * search a negative error number is returned.
+ * If @key is found, 0 is returned and you can find the item in the leaf level
+ * of the path (level 0)
*
- * if ins_len > 0, nodes and leaves will be split as we walk down the
- * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
- * possible)
+ * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
+ * points to the slot where it should be inserted
+ *
+ * If an error is encountered while searching the tree a negative error number
+ * is returned
*/
int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key, struct btrfs_path *p,
@@ -2774,6 +2780,8 @@ again:
* contention with the cow code
*/
if (cow) {
+ bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
+
/*
* if we don't really need to cow this block
* then we don't want to set the path blocking,
@@ -2798,9 +2806,13 @@ again:
}
btrfs_set_path_blocking(p);
- err = btrfs_cow_block(trans, root, b,
- p->nodes[level + 1],
- p->slots[level + 1], &b);
+ if (last_level)
+ err = btrfs_cow_block(trans, root, b, NULL, 0,
+ &b);
+ else
+ err = btrfs_cow_block(trans, root, b,
+ p->nodes[level + 1],
+ p->slots[level + 1], &b);
if (err) {
ret = err;
goto done;
@@ -5175,7 +5187,7 @@ again:
while (1) {
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
- sret = bin_search(cur, min_key, level, &slot);
+ sret = btrfs_bin_search(cur, min_key, level, &slot);
/* at the lowest level, we're done, setup the path and exit */
if (level == path->lowest_level) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 13c260b525a1..1a462ab85c49 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -679,7 +679,6 @@ enum btrfs_orphan_cleanup_state {
/* used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
- wait_queue_head_t wait;
spinlock_t lock;
};
@@ -3060,15 +3059,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
-int verify_dir_item(struct btrfs_fs_info *fs_info,
- struct extent_buffer *leaf, int slot,
- struct btrfs_dir_item *dir_item);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
-bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
- unsigned long start, u16 name_len);
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -3197,7 +3191,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
-int btrfs_init_cachep(void);
+int __init btrfs_init_cachep(void);
void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
@@ -3248,7 +3242,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
struct file *dst_file, u64 dst_loff);
/* file.c */
-int btrfs_auto_defrag_init(void);
+int __init btrfs_auto_defrag_init(void);
void btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode);
@@ -3283,7 +3277,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
/* sysfs.c */
-int btrfs_init_sysfs(void);
+int __init btrfs_init_sysfs(void);
void btrfs_exit_sysfs(void);
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5d73f79ded8b..0530f6f2e4ba 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -18,6 +18,7 @@
*/
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "delayed-inode.h"
#include "disk-io.h"
#include "transaction.h"
@@ -87,6 +88,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
spin_lock(&root->inode_lock);
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
@@ -94,9 +96,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
spin_unlock(&root->inode_lock);
return node;
}
- btrfs_inode->delayed_node = node;
- /* can be accessed and cached in the inode */
- refcount_add(2, &node->refs);
+
+ /*
+ * It's possible that we're racing into the middle of removing
+ * this node from the radix tree. In this case, the refcount
+ * was zero and it should never go back to one. Just return
+ * NULL like it was never in the radix at all; our release
+ * function is in the process of removing it.
+ *
+ * Some implementations of refcount_inc refuse to bump the
+ * refcount once it has hit zero. If we don't do this dance
+ * here, refcount_inc() may decide to just WARN_ONCE() instead
+ * of actually bumping the refcount.
+ *
+ * If this node is properly in the radix, we want to bump the
+ * refcount twice, once for the inode and once for this get
+ * operation.
+ */
+ if (refcount_inc_not_zero(&node->refs)) {
+ refcount_inc(&node->refs);
+ btrfs_inode->delayed_node = node;
+ } else {
+ node = NULL;
+ }
+
spin_unlock(&root->inode_lock);
return node;
}
@@ -254,17 +277,18 @@ static void __btrfs_release_delayed_node(
mutex_unlock(&delayed_node->mutex);
if (refcount_dec_and_test(&delayed_node->refs)) {
- bool free = false;
struct btrfs_root *root = delayed_node->root;
+
spin_lock(&root->inode_lock);
- if (refcount_read(&delayed_node->refs) == 0) {
- radix_tree_delete(&root->delayed_nodes_tree,
- delayed_node->inode_id);
- free = true;
- }
+ /*
+ * Once our refcount goes to zero, nobody is allowed to bump it
+ * back up. We can delete it now.
+ */
+ ASSERT(refcount_read(&delayed_node->refs) == 0);
+ radix_tree_delete(&root->delayed_nodes_tree,
+ delayed_node->inode_id);
spin_unlock(&root->inode_lock);
- if (free)
- kmem_cache_free(delayed_node_cache, delayed_node);
+ kmem_cache_free(delayed_node_cache, delayed_node);
}
}
@@ -1279,40 +1303,42 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
if (!path)
goto out;
-again:
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
- goto free_path;
+ do {
+ if (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND / 2)
+ break;
- delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
- if (!delayed_node)
- goto free_path;
+ delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
+ if (!delayed_node)
+ break;
- path->leave_spinning = 1;
- root = delayed_node->root;
+ path->leave_spinning = 1;
+ root = delayed_node->root;
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- goto release_path;
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ btrfs_release_path(path);
+ btrfs_release_prepared_delayed_node(delayed_node);
+ total_done++;
+ continue;
+ }
- block_rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->delayed_block_rsv;
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &root->fs_info->delayed_block_rsv;
- __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
+ __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
- trans->block_rsv = block_rsv;
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty_nodelay(root->fs_info);
+ trans->block_rsv = block_rsv;
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty_nodelay(root->fs_info);
-release_path:
- btrfs_release_path(path);
- total_done++;
+ btrfs_release_path(path);
+ btrfs_release_prepared_delayed_node(delayed_node);
+ total_done++;
- btrfs_release_prepared_delayed_node(delayed_node);
- if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
- total_done < async_work->nr)
- goto again;
+ } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
+ || total_done < async_work->nr);
-free_path:
btrfs_free_path(path);
out:
wake_up(&delayed_root->wait);
@@ -1325,10 +1351,6 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
{
struct btrfs_async_delayed_work *async_work;
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
- btrfs_workqueue_normal_congested(fs_info->delayed_workers))
- return 0;
-
async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
if (!async_work)
return -ENOMEM;
@@ -1364,7 +1386,8 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
+ btrfs_workqueue_normal_congested(fs_info->delayed_workers))
return;
if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
@@ -1610,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index)
{
- struct btrfs_delayed_item *curr, *next;
- int ret;
-
- if (list_empty(del_list))
- return 0;
+ struct btrfs_delayed_item *curr;
+ int ret = 0;
- list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+ list_for_each_entry(curr, del_list, readdir_list) {
if (curr->key.offset > index)
break;
-
- list_del(&curr->readdir_list);
- ret = (curr->key.offset == index);
-
- if (refcount_dec_and_test(&curr->refs))
- kfree(curr);
-
- if (ret)
- return 1;
- else
- continue;
+ if (curr->key.offset == index) {
+ ret = 1;
+ break;
+ }
}
- return 0;
+ return ret;
}
/*
@@ -1700,7 +1713,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
btrfs_set_stack_inode_generation(inode_item,
BTRFS_I(inode)->generation);
- btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
+ btrfs_set_stack_inode_sequence(inode_item,
+ inode_peek_iversion(inode));
btrfs_set_stack_inode_transid(inode_item, trans->transid);
btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1754,7 +1768,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
- inode->i_version = btrfs_stack_inode_sequence(inode_item);
+ inode_set_iversion_queried(inode,
+ btrfs_stack_inode_sequence(inode_item));
inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 83be8f9fd906..a1a40cf382e3 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -937,7 +937,7 @@ void btrfs_delayed_ref_exit(void)
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}
-int btrfs_delayed_ref_init(void)
+int __init btrfs_delayed_ref_init(void)
{
btrfs_delayed_ref_head_cachep = kmem_cache_create(
"btrfs_delayed_ref_head",
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index a43af432f859..c4f625e5a691 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -203,7 +203,7 @@ extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
-int btrfs_delayed_ref_init(void);
+int __init btrfs_delayed_ref_init(void);
void btrfs_delayed_ref_exit(void);
static inline struct btrfs_delayed_extent_op *
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 7c655f9a7a50..7efbc4d1128b 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -172,7 +172,8 @@ no_valid_dev_replace_entry_found:
dev_replace->tgtdev->commit_bytes_used =
dev_replace->srcdev->commit_bytes_used;
}
- dev_replace->tgtdev->is_tgtdev_for_dev_replace = 1;
+ set_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &dev_replace->tgtdev->dev_state);
btrfs_init_dev_replace_tgtdev_for_resume(fs_info,
dev_replace->tgtdev);
}
@@ -304,6 +305,14 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
dev_replace->cursor_left_last_write_of_item;
}
+static char* btrfs_dev_name(struct btrfs_device *device)
+{
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ return "<missing disk>";
+ else
+ return rcu_str_deref(device->name);
+}
+
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
int read_src)
@@ -363,8 +372,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s started",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name));
@@ -538,8 +546,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
} else {
btrfs_err_in_rcu(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace, 1);
@@ -557,11 +564,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s finished",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name));
- tgt_device->is_tgtdev_for_dev_replace = 0;
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state);
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
@@ -814,12 +820,10 @@ static int btrfs_dev_replace_kthread(void *data)
progress = btrfs_dev_replace_progress(fs_info);
progress = div_u64(progress, 10);
btrfs_info_in_rcu(fs_info,
- "continuing dev_replace from %s (devid %llu) to %s @%u%%",
- dev_replace->srcdev->missing ? "<missing disk>"
- : rcu_str_deref(dev_replace->srcdev->name),
+ "continuing dev_replace from %s (devid %llu) to target %s @%u%%",
+ btrfs_dev_name(dev_replace->srcdev),
dev_replace->srcdev->devid,
- dev_replace->tgtdev ? rcu_str_deref(dev_replace->tgtdev->name)
- : "<missing target disk>",
+ btrfs_dev_name(dev_replace->tgtdev),
(unsigned int)progress);
btrfs_dev_replace_continue_on_mount(fs_info);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 41cb9196eaa8..cbe421605cd5 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -403,8 +403,6 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1);
- if (verify_dir_item(fs_info, leaf, path->slots[0], dir_item))
- return NULL;
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item;
@@ -450,109 +448,3 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
}
return ret;
}
-
-int verify_dir_item(struct btrfs_fs_info *fs_info,
- struct extent_buffer *leaf,
- int slot,
- struct btrfs_dir_item *dir_item)
-{
- u16 namelen = BTRFS_NAME_LEN;
- int ret;
- u8 type = btrfs_dir_type(leaf, dir_item);
-
- if (type >= BTRFS_FT_MAX) {
- btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
- return 1;
- }
-
- if (type == BTRFS_FT_XATTR)
- namelen = XATTR_NAME_MAX;
-
- if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
- btrfs_crit(fs_info, "invalid dir item name len: %u",
- (unsigned)btrfs_dir_name_len(leaf, dir_item));
- return 1;
- }
-
- namelen = btrfs_dir_name_len(leaf, dir_item);
- ret = btrfs_is_name_len_valid(leaf, slot,
- (unsigned long)(dir_item + 1), namelen);
- if (!ret)
- return 1;
-
- /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
- if ((btrfs_dir_data_len(leaf, dir_item) +
- btrfs_dir_name_len(leaf, dir_item)) >
- BTRFS_MAX_XATTR_SIZE(fs_info)) {
- btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
- (unsigned)btrfs_dir_name_len(leaf, dir_item),
- (unsigned)btrfs_dir_data_len(leaf, dir_item));
- return 1;
- }
-
- return 0;
-}
-
-bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
- unsigned long start, u16 name_len)
-{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
- struct btrfs_key key;
- u32 read_start;
- u32 read_end;
- u32 item_start;
- u32 item_end;
- u32 size;
- bool ret = true;
-
- ASSERT(start > BTRFS_LEAF_DATA_OFFSET);
-
- read_start = start - BTRFS_LEAF_DATA_OFFSET;
- read_end = read_start + name_len;
- item_start = btrfs_item_offset_nr(leaf, slot);
- item_end = btrfs_item_end_nr(leaf, slot);
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
-
- switch (key.type) {
- case BTRFS_DIR_ITEM_KEY:
- case BTRFS_XATTR_ITEM_KEY:
- case BTRFS_DIR_INDEX_KEY:
- size = sizeof(struct btrfs_dir_item);
- break;
- case BTRFS_INODE_REF_KEY:
- size = sizeof(struct btrfs_inode_ref);
- break;
- case BTRFS_INODE_EXTREF_KEY:
- size = sizeof(struct btrfs_inode_extref);
- break;
- case BTRFS_ROOT_REF_KEY:
- case BTRFS_ROOT_BACKREF_KEY:
- size = sizeof(struct btrfs_root_ref);
- break;
- default:
- ret = false;
- goto out;
- }
-
- if (read_start < item_start) {
- ret = false;
- goto out;
- }
- if (read_end > item_end) {
- ret = false;
- goto out;
- }
-
- /* there shall be item(s) before name */
- if (read_start - item_start < size) {
- ret = false;
- goto out;
- }
-
-out:
- if (!ret)
- btrfs_crit(fs_info, "invalid dir item name len: %u",
- (unsigned int)name_len);
- return ret;
-}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a8ecccfc36de..21f34ad0d411 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -30,6 +30,7 @@
#include <linux/ratelimit.h>
#include <linux/uuid.h>
#include <linux/semaphore.h>
+#include <linux/error-injection.h>
#include <asm/unaligned.h>
#include "ctree.h"
#include "disk-io.h"
@@ -61,7 +62,8 @@
BTRFS_HEADER_FLAG_RELOC |\
BTRFS_SUPER_FLAG_ERROR |\
BTRFS_SUPER_FLAG_SEEDING |\
- BTRFS_SUPER_FLAG_METADUMP)
+ BTRFS_SUPER_FLAG_METADUMP |\
+ BTRFS_SUPER_FLAG_METADUMP_V2)
static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
@@ -220,7 +222,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
*/
-static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
+struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
@@ -285,7 +287,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
int verify)
{
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
- char *result = NULL;
+ char result[BTRFS_CSUM_SIZE];
unsigned long len;
unsigned long cur_len;
unsigned long offset = BTRFS_CSUM_SIZE;
@@ -294,7 +296,6 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
unsigned long map_len;
int err;
u32 crc = ~(u32)0;
- unsigned long inline_result;
len = buf->len - offset;
while (len > 0) {
@@ -308,13 +309,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
len -= cur_len;
offset += cur_len;
}
- if (csum_size > sizeof(inline_result)) {
- result = kzalloc(csum_size, GFP_NOFS);
- if (!result)
- return -ENOMEM;
- } else {
- result = (char *)&inline_result;
- }
+ memset(result, 0, BTRFS_CSUM_SIZE);
btrfs_csum_final(crc, result);
@@ -329,15 +324,12 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
"%s checksum verify failed on %llu wanted %X found %X level %d",
fs_info->sb->s_id, buf->start,
val, found, btrfs_header_level(buf));
- if (result != (char *)&inline_result)
- kfree(result);
return -EUCLEAN;
}
} else {
write_extent_buffer(buf, result, 0, csum_size);
}
- if (result != (char *)&inline_result)
- kfree(result);
+
return 0;
}
@@ -391,7 +383,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (need_lock)
btrfs_tree_read_unlock_blocking(eb);
return ret;
@@ -455,7 +447,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
- btree_get_extent, mirror_num);
+ mirror_num);
if (!ret) {
if (!verify_parent_transid(io_tree, eb,
parent_transid, 0))
@@ -1012,7 +1004,7 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
- buf, WAIT_NONE, btree_get_extent, 0);
+ buf, WAIT_NONE, 0);
free_extent_buffer(buf);
}
@@ -1031,7 +1023,7 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
- btree_get_extent, mirror_num);
+ mirror_num);
if (ret) {
free_extent_buffer(buf);
return ret;
@@ -1243,7 +1235,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root;
struct btrfs_key key;
int ret = 0;
- uuid_le uuid;
+ uuid_le uuid = NULL_UUID_LE;
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
@@ -1284,7 +1276,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
btrfs_set_root_used(&root->root_item, leaf->len);
btrfs_set_root_last_snapshot(&root->root_item, 0);
btrfs_set_root_dirid(&root->root_item, 0);
- uuid_le_gen(&uuid);
+ if (is_fstree(objectid))
+ uuid_le_gen(&uuid);
memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
root->root_item.drop_level = 0;
@@ -2875,7 +2868,7 @@ retry_root_backup:
goto fail_sysfs;
}
- if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) {
+ if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
"writeable mount is not allowed due to too many missing devices");
goto fail_sysfs;
@@ -3123,6 +3116,7 @@ recovery_tree_root:
goto fail_block_groups;
goto retry_root_backup;
}
+ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
@@ -3357,7 +3351,7 @@ static void write_dev_flush(struct btrfs_device *device)
bio->bi_private = &device->flush_wait;
btrfsic_submit_bio(bio);
- device->flush_bio_sent = 1;
+ set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
/*
@@ -3367,10 +3361,10 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
{
struct bio *bio = device->flush_bio;
- if (!device->flush_bio_sent)
+ if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
return BLK_STS_OK;
- device->flush_bio_sent = 0;
+ clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
wait_for_completion_io(&device->flush_wait);
return bio->bi_status;
@@ -3378,7 +3372,7 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
static int check_barrier_error(struct btrfs_fs_info *fs_info)
{
- if (!btrfs_check_rw_degradable(fs_info))
+ if (!btrfs_check_rw_degradable(fs_info, NULL))
return -EIO;
return 0;
}
@@ -3394,14 +3388,16 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
int errors_wait = 0;
blk_status_t ret;
+ lockdep_assert_held(&info->fs_devices->device_list_mutex);
/* send down all the barriers */
head = &info->fs_devices->devices;
- list_for_each_entry_rcu(dev, head, dev_list) {
- if (dev->missing)
+ list_for_each_entry(dev, head, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->bdev)
continue;
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
write_dev_flush(dev);
@@ -3409,14 +3405,15 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
}
/* wait for all the barriers */
- list_for_each_entry_rcu(dev, head, dev_list) {
- if (dev->missing)
+ list_for_each_entry(dev, head, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->bdev) {
errors_wait++;
continue;
}
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
ret = wait_dev_flush(dev);
@@ -3508,12 +3505,13 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
}
}
- list_for_each_entry_rcu(dev, head, dev_list) {
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
continue;
}
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
btrfs_set_stack_device_generation(dev_item, 0);
@@ -3549,10 +3547,11 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
}
total_errors = 0;
- list_for_each_entry_rcu(dev, head, dev_list) {
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev)
continue;
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
ret = wait_dev_supers(dev, max_mirrors);
@@ -3910,9 +3909,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
- if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
- btrfs_warn(fs_info, "unrecognized super flag: %llu",
+ if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
+ btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ ret = -EINVAL;
+ }
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "tree_root level too big: %d >= %d",
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 7f7c35d6347a..301151a50ac1 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -149,6 +149,9 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
+struct extent_map *btree_get_extent(struct btrfs_inode *inode,
+ struct page *page, size_t pg_offset, u64 start, u64 len,
+ int create);
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int __init btrfs_end_io_wq_init(void);
void btrfs_end_io_wq_exit(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 3aeb5770f896..ddaccad469f8 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -283,11 +283,6 @@ static int btrfs_get_name(struct dentry *parent, char *name,
name_len = btrfs_inode_ref_name_len(leaf, iref);
}
- ret = btrfs_is_name_len_valid(leaf, path->slots[0], name_ptr, name_len);
- if (!ret) {
- btrfs_free_path(path);
- return -EIO;
- }
read_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_free_path(path);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2f4328511ac8..05751a677da4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2145,7 +2145,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
- if (!stripe->dev->can_discard)
+ struct request_queue *req_q;
+
+ req_q = bdev_get_queue(stripe->dev->bdev);
+ if (!blk_queue_discard(req_q))
continue;
ret = btrfs_issue_discard(stripe->dev->bdev,
@@ -2894,7 +2897,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
- u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
+ unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
u64 num_bytes, num_dirty_bgs_bytes;
int ret = 0;
@@ -4945,12 +4948,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
bytes = 0;
else
bytes -= delayed_rsv->size;
+ spin_unlock(&delayed_rsv->lock);
+
if (percpu_counter_compare(&space_info->total_bytes_pinned,
bytes) < 0) {
- spin_unlock(&delayed_rsv->lock);
return -ENOSPC;
}
- spin_unlock(&delayed_rsv->lock);
commit:
trans = btrfs_join_transaction(fs_info->extent_root);
@@ -5738,8 +5741,8 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
* or return if we already have enough space. This will also handle the resreve
* tracepoint for the reserved amount.
*/
-int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
- enum btrfs_reserve_flush_enum flush)
+static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
+ enum btrfs_reserve_flush_enum flush)
{
struct btrfs_root *root = inode->root;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
@@ -5770,7 +5773,7 @@ int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
* This is the same as btrfs_block_rsv_release, except that it handles the
* tracepoint for the reservation.
*/
-void btrfs_inode_rsv_release(struct btrfs_inode *inode)
+static void btrfs_inode_rsv_release(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -9690,7 +9693,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
* space to fit our block group in.
*/
if (device->total_bytes > device->bytes_used + min_free &&
- !device->is_tgtdev_for_dev_replace) {
+ !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = find_free_dev_extent(trans, device, min_free,
&dev_offset, NULL);
if (!ret)
@@ -10875,7 +10878,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
*trimmed = 0;
/* Not writeable = nothing to do. */
- if (!device->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
/* No free space = nothing to do. */
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 012d63870b99..dfeb74a0be77 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -21,6 +21,7 @@
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
+#include "disk-io.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -109,8 +110,6 @@ struct tree_entry {
struct extent_page_data {
struct bio *bio;
struct extent_io_tree *tree;
- get_extent_t *get_extent;
-
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
*/
@@ -139,7 +138,8 @@ static void add_extent_changeset(struct extent_state *state, unsigned bits,
BUG_ON(ret < 0);
}
-static noinline void flush_write_bio(void *data);
+static void flush_write_bio(struct extent_page_data *epd);
+
static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree)
{
@@ -581,7 +581,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
*
* This takes the tree lock, and returns 0 on success and < 0 on error.
*/
-static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
struct extent_state **cached_state,
gfp_t mask, struct extent_changeset *changeset)
@@ -1295,10 +1295,10 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask)
+ struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, bits, wake, delete,
- cached, mask, NULL);
+ cached, GFP_NOFS, NULL);
}
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1348,7 +1348,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
+ EXTENT_LOCKED, 1, 0, NULL);
return 0;
}
return 1;
@@ -1648,7 +1648,7 @@ again:
EXTENT_DELALLOC, 1, cached_state);
if (!ret) {
unlock_extent_cached(tree, delalloc_start, delalloc_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
cond_resched();
@@ -1744,7 +1744,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
unsigned long page_ops)
{
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
- NULL, GFP_NOFS);
+ NULL);
__process_pages_contig(inode->i_mapping, locked_page,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
@@ -2027,7 +2027,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[bbio->mirror_num - 1].dev;
btrfs_put_bbio(bbio);
- if (!dev || !dev->bdev || !dev->writeable) {
+ if (!dev || !dev->bdev ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
@@ -2257,7 +2258,7 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return 0;
}
-bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2281,7 +2282,7 @@ bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
* a) deliver good data to the caller
* b) correct the bad sectors on disk
*/
- if (failed_bio->bi_vcnt > 1) {
+ if (failed_bio_pages > 1) {
/*
* to fulfill b), we need to know the exact failing sectors, as
* we don't want to rewrite any more than the failed ones. thus,
@@ -2374,6 +2375,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
int read_mode = 0;
blk_status_t status;
int ret;
+ unsigned failed_bio_pages = bio_pages_all(failed_bio);
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2381,13 +2383,13 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
if (ret)
return ret;
- if (!btrfs_check_repairable(inode, failed_bio, failrec,
+ if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
failed_mirror)) {
free_io_failure(failure_tree, tree, failrec);
return -EIO;
}
- if (failed_bio->bi_vcnt > 1)
+ if (failed_bio_pages > 1)
read_mode |= REQ_FAILFAST_DEV;
phy_offset >>= inode->i_sb->s_blocksize_bits;
@@ -2492,7 +2494,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
if (uptodate && tree->track_uptodate)
set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
+ unlock_extent_cached_atomic(tree, start, end, &cached);
}
/*
@@ -2724,7 +2726,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags)
{
blk_status_t ret = 0;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private;
u64 start;
@@ -2732,7 +2734,6 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
- bio_get(bio);
if (tree->ops)
ret = tree->ops->submit_bio_hook(tree->private_data, bio,
@@ -2740,7 +2741,6 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
else
btrfsic_submit_bio(bio);
- bio_put(bio);
return blk_status_to_errno(ret);
}
@@ -2942,8 +2942,7 @@ static int __do_readpage(struct extent_io_tree *tree,
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ cur + iosize - 1, &cached);
break;
}
em = __get_extent_map(inode, page, pg_offset, cur,
@@ -3036,8 +3035,7 @@ static int __do_readpage(struct extent_io_tree *tree,
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ cur + iosize - 1, &cached);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3092,9 +3090,8 @@ out:
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
struct page *pages[], int nr_pages,
u64 start, u64 end,
- get_extent_t *get_extent,
struct extent_map **em_cached,
- struct bio **bio, int mirror_num,
+ struct bio **bio,
unsigned long *bio_flags,
u64 *prev_em_start)
{
@@ -3115,18 +3112,17 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
}
for (index = 0; index < nr_pages; index++) {
- __do_readpage(tree, pages[index], get_extent, em_cached, bio,
- mirror_num, bio_flags, 0, prev_em_start);
+ __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
+ bio, 0, bio_flags, 0, prev_em_start);
put_page(pages[index]);
}
}
static void __extent_readpages(struct extent_io_tree *tree,
struct page *pages[],
- int nr_pages, get_extent_t *get_extent,
+ int nr_pages,
struct extent_map **em_cached,
- struct bio **bio, int mirror_num,
- unsigned long *bio_flags,
+ struct bio **bio, unsigned long *bio_flags,
u64 *prev_em_start)
{
u64 start = 0;
@@ -3146,8 +3142,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
} else {
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, em_cached,
- bio, mirror_num, bio_flags,
+ end, em_cached,
+ bio, bio_flags,
prev_em_start);
start = page_start;
end = start + PAGE_SIZE - 1;
@@ -3158,9 +3154,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
if (end)
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, em_cached, bio,
- mirror_num, bio_flags,
- prev_em_start);
+ end, em_cached, bio,
+ bio_flags, prev_em_start);
}
static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -3375,7 +3370,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
page_end, NULL, 1);
break;
}
- em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur,
+ em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
end - cur + 1, 1);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
@@ -3458,10 +3453,9 @@ done:
* and the end_io handler clears the writeback ranges
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
+ struct extent_page_data *epd)
{
struct inode *inode = page->mapping->host;
- struct extent_page_data *epd = data;
u64 start = page_offset(page);
u64 page_end = start + PAGE_SIZE - 1;
int ret;
@@ -3895,8 +3889,7 @@ retry:
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @writepage: function called for each page
- * @data: data passed to writepage function
+ * @data: data passed to __extent_writepage function
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
@@ -3908,8 +3901,7 @@ retry:
*/
static int extent_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc,
- writepage_t writepage, void *data,
- void (*flush_fn)(void *))
+ struct extent_page_data *epd)
{
struct inode *inode = mapping->host;
int ret = 0;
@@ -3973,7 +3965,7 @@ retry:
* mapping
*/
if (!trylock_page(page)) {
- flush_fn(data);
+ flush_write_bio(epd);
lock_page(page);
}
@@ -3984,7 +3976,7 @@ retry:
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page))
- flush_fn(data);
+ flush_write_bio(epd);
wait_on_page_writeback(page);
}
@@ -3994,7 +3986,7 @@ retry:
continue;
}
- ret = (*writepage)(page, wbc, data);
+ ret = __extent_writepage(page, wbc, epd);
if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
unlock_page(page);
@@ -4042,7 +4034,7 @@ retry:
return ret;
}
-static void flush_epd_write_bio(struct extent_page_data *epd)
+static void flush_write_bio(struct extent_page_data *epd)
{
if (epd->bio) {
int ret;
@@ -4053,37 +4045,28 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
}
}
-static noinline void flush_write_bio(void *data)
-{
- struct extent_page_data *epd = data;
- flush_epd_write_bio(epd);
-}
-
-int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent,
- struct writeback_control *wbc)
+int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
struct extent_page_data epd = {
.bio = NULL,
- .tree = tree,
- .get_extent = get_extent,
+ .tree = &BTRFS_I(page->mapping->host)->io_tree,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
ret = __extent_writepage(page, wbc, &epd);
- flush_epd_write_bio(&epd);
+ flush_write_bio(&epd);
return ret;
}
-int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
- u64 start, u64 end, get_extent_t *get_extent,
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode)
{
int ret = 0;
struct address_space *mapping = inode->i_mapping;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page;
unsigned long nr_pages = (end - start + PAGE_SIZE) >>
PAGE_SHIFT;
@@ -4091,7 +4074,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
- .get_extent = get_extent,
.extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL,
};
@@ -4117,34 +4099,30 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
start += PAGE_SIZE;
}
- flush_epd_write_bio(&epd);
+ flush_write_bio(&epd);
return ret;
}
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
- get_extent_t *get_extent,
struct writeback_control *wbc)
{
int ret = 0;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
- .get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
- ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd,
- flush_write_bio);
- flush_epd_write_bio(&epd);
+ ret = extent_write_cache_pages(mapping, wbc, &epd);
+ flush_write_bio(&epd);
return ret;
}
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages,
- get_extent_t get_extent)
+ struct list_head *pages, unsigned nr_pages)
{
struct bio *bio = NULL;
unsigned page_idx;
@@ -4170,13 +4148,13 @@ int extent_readpages(struct extent_io_tree *tree,
pagepool[nr++] = page;
if (nr < ARRAY_SIZE(pagepool))
continue;
- __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, &prev_em_start);
+ __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
+ &bio_flags, &prev_em_start);
nr = 0;
}
if (nr)
- __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, &prev_em_start);
+ __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
+ &bio_flags, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
@@ -4209,7 +4187,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING,
- 1, 1, &cached_state, GFP_NOFS);
+ 1, 1, &cached_state);
return 0;
}
@@ -4234,9 +4212,9 @@ static int try_release_extent_state(struct extent_map_tree *map,
* at this point we can safely clear everything except the
* locked bit and the nodatasum bit
*/
- ret = clear_extent_bit(tree, start, end,
+ ret = __clear_extent_bit(tree, start, end,
~(EXTENT_LOCKED | EXTENT_NODATASUM),
- 0, 0, NULL, mask);
+ 0, 0, NULL, mask, NULL);
/* if clear_extent_bit failed for enomem reasons,
* we can't allow the release to continue.
@@ -4302,9 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
* This maps until we find something past 'last'
*/
static struct extent_map *get_extent_skip_holes(struct inode *inode,
- u64 offset,
- u64 last,
- get_extent_t *get_extent)
+ u64 offset, u64 last)
{
u64 sectorsize = btrfs_inode_sectorsize(inode);
struct extent_map *em;
@@ -4318,15 +4294,14 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
if (len == 0)
break;
len = ALIGN(len, sectorsize);
- em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0);
+ em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, offset,
+ len, 0);
if (IS_ERR_OR_NULL(em))
return em;
/* if this isn't a hole return it */
- if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
- em->block_start != EXTENT_MAP_HOLE) {
+ if (em->block_start != EXTENT_MAP_HOLE)
return em;
- }
/* this is a hole, advance to the next extent */
offset = extent_map_end(em);
@@ -4451,7 +4426,7 @@ static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
}
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, get_extent_t *get_extent)
+ __u64 start, __u64 len)
{
int ret = 0;
u64 off = start;
@@ -4533,8 +4508,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
&cached_state);
- em = get_extent_skip_holes(inode, start, last_for_get_extent,
- get_extent);
+ em = get_extent_skip_holes(inode, start, last_for_get_extent);
if (!em)
goto out;
if (IS_ERR(em)) {
@@ -4622,8 +4596,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
/* now scan forward to see if this is really the last extent. */
- em = get_extent_skip_holes(inode, off, last_for_get_extent,
- get_extent);
+ em = get_extent_skip_holes(inode, off, last_for_get_extent);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
@@ -4647,7 +4620,7 @@ out_free:
out:
btrfs_free_path(path);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
return ret;
}
@@ -5263,8 +5236,7 @@ int extent_buffer_uptodate(struct extent_buffer *eb)
}
int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, int wait,
- get_extent_t *get_extent, int mirror_num)
+ struct extent_buffer *eb, int wait, int mirror_num)
{
unsigned long i;
struct page *page;
@@ -5324,7 +5296,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
ClearPageError(page);
err = __extent_read_full_page(tree, page,
- get_extent, &bio,
+ btree_get_extent, &bio,
mirror_num, &bio_flags,
REQ_META);
if (err) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 93dcae0c3183..a7a850abd600 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -300,19 +300,29 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, struct extent_changeset *changeset);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask);
+ struct extent_state **cached);
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached, gfp_t mask,
+ struct extent_changeset *changeset);
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- GFP_NOFS);
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
}
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached, gfp_t mask)
+ u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_NOFS, NULL);
+}
+
+static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
+ u64 start, u64 end, struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- mask);
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_ATOMIC, NULL);
}
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
@@ -323,8 +333,7 @@ static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
if (bits & EXTENT_LOCKED)
wake = 1;
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
- GFP_NOFS);
+ return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
}
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -340,10 +349,10 @@ static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
}
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
+ u64 end, struct extent_state **cached_state)
{
- return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, mask);
+ return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
+ cached_state, GFP_NOFS, NULL);
}
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
@@ -358,7 +367,7 @@ static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
{
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
+ EXTENT_DO_ACCOUNTING, 0, 0, NULL);
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -401,24 +410,19 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
struct extent_state **cached_state);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
-int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent,
- struct writeback_control *wbc);
-int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
- u64 start, u64 end, get_extent_t *get_extent,
+int extent_write_full_page(struct page *page, struct writeback_control *wbc);
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode);
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
- get_extent_t *get_extent,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages,
- get_extent_t get_extent);
+ struct list_head *pages, unsigned nr_pages);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, get_extent_t *get_extent);
+ __u64 start, __u64 len);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
@@ -437,7 +441,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, int wait,
- get_extent_t *get_extent, int mirror_num);
+ int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
static inline unsigned long num_extent_pages(u64 start, u64 len)
@@ -540,7 +544,7 @@ void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
u64 end);
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret);
-bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int fail_mirror);
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2e348fb0b280..d3bd02105d1c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -454,3 +454,135 @@ void replace_extent_mapping(struct extent_map_tree *tree,
setup_extent_mapping(tree, new, modified);
}
+
+static struct extent_map *next_extent_map(struct extent_map *em)
+{
+ struct rb_node *next;
+
+ next = rb_next(&em->rb_node);
+ if (!next)
+ return NULL;
+ return container_of(next, struct extent_map, rb_node);
+}
+
+static struct extent_map *prev_extent_map(struct extent_map *em)
+{
+ struct rb_node *prev;
+
+ prev = rb_prev(&em->rb_node);
+ if (!prev)
+ return NULL;
+ return container_of(prev, struct extent_map, rb_node);
+}
+
+/* helper for btfs_get_extent. Given an existing extent in the tree,
+ * the existing extent is the nearest extent to map_start,
+ * and an extent that you want to insert, deal with overlap and insert
+ * the best fitted new extent into the tree.
+ */
+static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map *existing,
+ struct extent_map *em,
+ u64 map_start)
+{
+ struct extent_map *prev;
+ struct extent_map *next;
+ u64 start;
+ u64 end;
+ u64 start_diff;
+
+ BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
+
+ if (existing->start > map_start) {
+ next = existing;
+ prev = prev_extent_map(next);
+ } else {
+ prev = existing;
+ next = next_extent_map(prev);
+ }
+
+ start = prev ? extent_map_end(prev) : em->start;
+ start = max_t(u64, start, em->start);
+ end = next ? next->start : extent_map_end(em);
+ end = min_t(u64, end, extent_map_end(em));
+ start_diff = start - em->start;
+ em->start = start;
+ em->len = end - start;
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+ em->block_start += start_diff;
+ em->block_len = em->len;
+ }
+ return add_extent_mapping(em_tree, em, 0);
+}
+
+/**
+ * btrfs_add_extent_mapping - add extent mapping into em_tree
+ * @em_tree - the extent tree into which we want to insert the extent mapping
+ * @em_in - extent we are inserting
+ * @start - start of the logical range btrfs_get_extent() is requesting
+ * @len - length of the logical range btrfs_get_extent() is requesting
+ *
+ * Note that @em_in's range may be different from [start, start+len),
+ * but they must be overlapped.
+ *
+ * Insert @em_in into @em_tree. In case there is an overlapping range, handle
+ * the -EEXIST by either:
+ * a) Returning the existing extent in @em_in if @start is within the
+ * existing em.
+ * b) Merge the existing extent with @em_in passed in.
+ *
+ * Return 0 on success, otherwise -EEXIST.
+ *
+ */
+int btrfs_add_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map **em_in, u64 start, u64 len)
+{
+ int ret;
+ struct extent_map *em = *em_in;
+
+ ret = add_extent_mapping(em_tree, em, 0);
+ /* it is possible that someone inserted the extent into the tree
+ * while we had the lock dropped. It is also possible that
+ * an overlapping map exists in the tree
+ */
+ if (ret == -EEXIST) {
+ struct extent_map *existing;
+
+ ret = 0;
+
+ existing = search_extent_mapping(em_tree, start, len);
+ /*
+ * existing will always be non-NULL, since there must be
+ * extent causing the -EEXIST.
+ */
+ if (start >= existing->start &&
+ start < extent_map_end(existing)) {
+ free_extent_map(em);
+ *em_in = existing;
+ ret = 0;
+ } else {
+ u64 orig_start = em->start;
+ u64 orig_len = em->len;
+
+ /*
+ * The existing extent map is the one nearest to
+ * the [start, start + len) range which overlaps
+ */
+ ret = merge_extent_mapping(em_tree, existing,
+ em, start);
+ if (ret) {
+ free_extent_map(em);
+ *em_in = NULL;
+ WARN_ONCE(ret,
+"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
+ ret, existing->start, existing->len,
+ orig_start, orig_len);
+ }
+ free_extent_map(existing);
+ }
+ }
+
+ ASSERT(ret == 0 || ret == -EEXIST);
+ return ret;
+}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 64365bbc9b16..b29f77bc0732 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -13,7 +13,6 @@
/* bits for the flags field */
#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
#define EXTENT_FLAG_COMPRESSED 1
-#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
@@ -92,4 +91,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen
void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
+int btrfs_add_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map **em_in, u64 start, u64 len);
#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index eb1bac7c8553..41ab9073d1d4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/btrfs.h>
#include <linux/uio.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -1504,7 +1505,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
ordered->file_offset + ordered->len > start_pos &&
ordered->file_offset <= last_pos) {
unlock_extent_cached(&inode->io_tree, start_pos,
- last_pos, cached_state, GFP_NOFS);
+ last_pos, cached_state);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
@@ -1519,7 +1520,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
clear_extent_bit(&inode->io_tree, start_pos, last_pos,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state, GFP_NOFS);
+ 0, 0, cached_state);
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
@@ -1755,11 +1756,10 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
- pos, copied, NULL);
+ pos, copied, &cached_state);
if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state,
- GFP_NOFS);
+ lockstart, lockend, &cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
@@ -2019,10 +2019,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
{
int ret;
+ struct blk_plug plug;
+ /*
+ * This is only called in fsync, which would do synchronous writes, so
+ * a plug can merge adjacent IOs as much as possible. Esp. in case of
+ * multiple disks using raid profile, a large IO can be split to
+ * several segments of stripe length (currently 64K).
+ */
+ blk_start_plug(&plug);
atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = btrfs_fdatawrite_range(inode, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
+ blk_finish_plug(&plug);
return ret;
}
@@ -2450,6 +2459,46 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
return ret;
}
+static int btrfs_punch_hole_lock_range(struct inode *inode,
+ const u64 lockstart,
+ const u64 lockend,
+ struct extent_state **cached_state)
+{
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+ int ret;
+
+ truncate_pagecache_range(inode, lockstart, lockend);
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
+ ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
+
+ /*
+ * We need to make sure we have no ordered extents in this range
+ * and nobody raced in and read a page in this range, if we did
+ * we need to try again.
+ */
+ if ((!ordered ||
+ (ordered->file_offset + ordered->len <= lockstart ||
+ ordered->file_offset > lockend)) &&
+ !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ break;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, cached_state);
+ ret = btrfs_wait_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2566,38 +2615,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
- while (1) {
- struct btrfs_ordered_extent *ordered;
-
- truncate_pagecache_range(inode, lockstart, lockend);
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
-
- /*
- * We need to make sure we have no ordered extents in this range
- * and nobody raced in and read a page in this range, if we did
- * we need to try again.
- */
- if ((!ordered ||
- (ordered->file_offset + ordered->len <= lockstart ||
- ordered->file_offset > lockend)) &&
- !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state, GFP_NOFS);
- ret = btrfs_wait_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+ if (ret) {
+ inode_unlock(inode);
+ goto out_only_mutex;
}
path = btrfs_alloc_path();
@@ -2742,7 +2764,7 @@ out_free:
btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ &cached_state);
out_only_mutex:
if (!updated_inode && truncated_block && !ret && !err) {
/*
@@ -2806,6 +2828,234 @@ insert:
return 0;
}
+static int btrfs_fallocate_update_isize(struct inode *inode,
+ const u64 end,
+ const int mode)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ inode->i_ctime = current_time(inode);
+ i_size_write(inode, end);
+ btrfs_ordered_update_i_size(inode, end, NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ ret2 = btrfs_end_transaction(trans);
+
+ return ret ? ret : ret2;
+}
+
+enum {
+ RANGE_BOUNDARY_WRITTEN_EXTENT = 0,
+ RANGE_BOUNDARY_PREALLOC_EXTENT = 1,
+ RANGE_BOUNDARY_HOLE = 2,
+};
+
+static int btrfs_zero_range_check_range_boundary(struct inode *inode,
+ u64 offset)
+{
+ const u64 sectorsize = btrfs_inode_sectorsize(inode);
+ struct extent_map *em;
+ int ret;
+
+ offset = round_down(offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start == EXTENT_MAP_HOLE)
+ ret = RANGE_BOUNDARY_HOLE;
+ else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
+ else
+ ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
+
+ free_extent_map(em);
+ return ret;
+}
+
+static int btrfs_zero_range(struct inode *inode,
+ loff_t offset,
+ loff_t len,
+ const int mode)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct extent_map *em;
+ struct extent_changeset *data_reserved = NULL;
+ int ret;
+ u64 alloc_hint = 0;
+ const u64 sectorsize = btrfs_inode_sectorsize(inode);
+ u64 alloc_start = round_down(offset, sectorsize);
+ u64 alloc_end = round_up(offset + len, sectorsize);
+ u64 bytes_to_reserve = 0;
+ bool space_reserved = false;
+
+ inode_dio_wait(inode);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
+ alloc_start, alloc_end - alloc_start, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ /*
+ * Avoid hole punching and extent allocation for some cases. More cases
+ * could be considered, but these are unlikely common and we keep things
+ * as simple as possible for now. Also, intentionally, if the target
+ * range contains one or more prealloc extents together with regular
+ * extents and holes, we drop all the existing extents and allocate a
+ * new prealloc extent, so that we get a larger contiguous disk extent.
+ */
+ if (em->start <= alloc_start &&
+ test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ const u64 em_end = em->start + em->len;
+
+ if (em_end >= offset + len) {
+ /*
+ * The whole range is already a prealloc extent,
+ * do nothing except updating the inode's i_size if
+ * needed.
+ */
+ free_extent_map(em);
+ ret = btrfs_fallocate_update_isize(inode, offset + len,
+ mode);
+ goto out;
+ }
+ /*
+ * Part of the range is already a prealloc extent, so operate
+ * only on the remaining part of the range.
+ */
+ alloc_start = em_end;
+ ASSERT(IS_ALIGNED(alloc_start, sectorsize));
+ len = offset + len - alloc_start;
+ offset = alloc_start;
+ alloc_hint = em->block_start + em->len;
+ }
+ free_extent_map(em);
+
+ if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
+ BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
+ alloc_start, sectorsize, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ free_extent_map(em);
+ ret = btrfs_fallocate_update_isize(inode, offset + len,
+ mode);
+ goto out;
+ }
+ if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
+ free_extent_map(em);
+ ret = btrfs_truncate_block(inode, offset, len, 0);
+ if (!ret)
+ ret = btrfs_fallocate_update_isize(inode,
+ offset + len,
+ mode);
+ return ret;
+ }
+ free_extent_map(em);
+ alloc_start = round_down(offset, sectorsize);
+ alloc_end = alloc_start + sectorsize;
+ goto reserve_space;
+ }
+
+ alloc_start = round_up(offset, sectorsize);
+ alloc_end = round_down(offset + len, sectorsize);
+
+ /*
+ * For unaligned ranges, check the pages at the boundaries, they might
+ * map to an extent, in which case we need to partially zero them, or
+ * they might map to a hole, in which case we need our allocation range
+ * to cover them.
+ */
+ if (!IS_ALIGNED(offset, sectorsize)) {
+ ret = btrfs_zero_range_check_range_boundary(inode, offset);
+ if (ret < 0)
+ goto out;
+ if (ret == RANGE_BOUNDARY_HOLE) {
+ alloc_start = round_down(offset, sectorsize);
+ ret = 0;
+ } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
+ ret = btrfs_truncate_block(inode, offset, 0, 0);
+ if (ret)
+ goto out;
+ } else {
+ ret = 0;
+ }
+ }
+
+ if (!IS_ALIGNED(offset + len, sectorsize)) {
+ ret = btrfs_zero_range_check_range_boundary(inode,
+ offset + len);
+ if (ret < 0)
+ goto out;
+ if (ret == RANGE_BOUNDARY_HOLE) {
+ alloc_end = round_up(offset + len, sectorsize);
+ ret = 0;
+ } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
+ ret = btrfs_truncate_block(inode, offset + len, 0, 1);
+ if (ret)
+ goto out;
+ } else {
+ ret = 0;
+ }
+ }
+
+reserve_space:
+ if (alloc_start < alloc_end) {
+ struct extent_state *cached_state = NULL;
+ const u64 lockstart = alloc_start;
+ const u64 lockend = alloc_end - 1;
+
+ bytes_to_reserve = alloc_end - alloc_start;
+ ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+ bytes_to_reserve);
+ if (ret < 0)
+ goto out;
+ space_reserved = true;
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
+ alloc_start, bytes_to_reserve);
+ if (ret)
+ goto out;
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+ if (ret)
+ goto out;
+ ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
+ alloc_end - alloc_start,
+ i_blocksize(inode),
+ offset + len, &alloc_hint);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
+ /* btrfs_prealloc_file_range releases reserved space on error */
+ if (ret) {
+ space_reserved = false;
+ goto out;
+ }
+ }
+ ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
+ out:
+ if (ret && space_reserved)
+ btrfs_free_reserved_data_space(inode, data_reserved,
+ alloc_start, bytes_to_reserve);
+ extent_changeset_free(data_reserved);
+
+ return ret;
+}
+
static long btrfs_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
@@ -2831,7 +3081,8 @@ static long btrfs_fallocate(struct file *file, int mode,
cur_offset = alloc_start;
/* Make sure we aren't being give some crap mode */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_ZERO_RANGE))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
@@ -2842,10 +3093,12 @@ static long btrfs_fallocate(struct file *file, int mode,
*
* For qgroup space, it will be checked later.
*/
- ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
- alloc_end - alloc_start);
- if (ret < 0)
- return ret;
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+ alloc_end - alloc_start);
+ if (ret < 0)
+ return ret;
+ }
inode_lock(inode);
@@ -2887,6 +3140,12 @@ static long btrfs_fallocate(struct file *file, int mode,
if (ret)
goto out;
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = btrfs_zero_range(inode, offset, len, mode);
+ inode_unlock(inode);
+ return ret;
+ }
+
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
@@ -2896,15 +3155,15 @@ static long btrfs_fallocate(struct file *file, int mode,
*/
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
locked_end, &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- alloc_end - 1);
+ ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
+
if (ordered &&
ordered->file_offset + ordered->len > alloc_start &&
ordered->file_offset < alloc_end) {
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
+ &cached_state);
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
@@ -2922,7 +3181,7 @@ static long btrfs_fallocate(struct file *file, int mode,
/* First, check if we exceed the qgroup limit */
INIT_LIST_HEAD(&reserve_list);
- while (1) {
+ while (cur_offset < alloc_end) {
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
if (IS_ERR(em)) {
@@ -2958,8 +3217,6 @@ static long btrfs_fallocate(struct file *file, int mode,
}
free_extent_map(em);
cur_offset = last_byte;
- if (cur_offset >= alloc_end)
- break;
}
/*
@@ -2982,37 +3239,18 @@ static long btrfs_fallocate(struct file *file, int mode,
if (ret < 0)
goto out_unlock;
- if (actual_end > inode->i_size &&
- !(mode & FALLOC_FL_KEEP_SIZE)) {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(inode)->root;
-
- /*
- * We didn't need to allocate any more space, but we
- * still extended the size of the file so we need to
- * update i_size and the inode item.
- */
- trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- } else {
- inode->i_ctime = current_time(inode);
- i_size_write(inode, actual_end);
- btrfs_ordered_update_i_size(inode, actual_end, NULL);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret)
- btrfs_end_transaction(trans);
- else
- ret = btrfs_end_transaction(trans);
- }
- }
+ /*
+ * We didn't need to allocate any more space, but we still extended the
+ * size of the file so we need to update i_size and the inode item.
+ */
+ ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
+ &cached_state);
out:
inode_unlock(inode);
/* Let go of our reservation. */
- if (ret != 0)
+ if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
btrfs_free_reserved_data_space(inode, data_reserved,
alloc_start, alloc_end - cur_offset);
extent_changeset_free(data_reserved);
@@ -3081,7 +3319,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
*offset = min_t(loff_t, start, inode->i_size);
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ &cached_state);
return ret;
}
@@ -3145,7 +3383,7 @@ void btrfs_auto_defrag_exit(void)
kmem_cache_destroy(btrfs_inode_defrag_cachep);
}
-int btrfs_auto_defrag_init(void)
+int __init btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4426d1c73e50..a9f22ac50d6a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/ratelimit.h>
+#include <linux/error-injection.h>
#include "ctree.h"
#include "free-space-cache.h"
#include "transaction.h"
@@ -332,6 +333,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
return 0;
}
+ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
{
@@ -993,8 +995,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
- GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
goto fail;
}
leaf = path->nodes[0];
@@ -1008,7 +1009,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
inode->i_size - 1,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
- NULL, GFP_NOFS);
+ NULL);
btrfs_release_path(path);
goto fail;
}
@@ -1105,8 +1106,7 @@ static int flush_dirty_cache(struct inode *inode)
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
- GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
return ret;
}
@@ -1127,8 +1127,7 @@ cleanup_write_cache_enospc(struct inode *inode,
{
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, cached_state,
- GFP_NOFS);
+ i_size_read(inode) - 1, cached_state);
}
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1322,7 +1321,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ i_size_read(inode) - 1, &cached_state);
/*
* at this point the pages are under IO and we're happy,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e1a7f3cb5be9..53ca025655fc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -43,6 +43,7 @@
#include <linux/posix_acl_xattr.h>
#include <linux/uio.h>
#include <linux/magic.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -536,9 +537,14 @@ again:
*
* If the compression fails for any reason, we set the pages
* dirty again later on.
+ *
+ * Note that the remaining part is redirtied, the start pointer
+ * has moved, the end is the original one.
*/
- extent_range_clear_dirty_for_io(inode, start, end);
- redirty = 1;
+ if (!redirty) {
+ extent_range_clear_dirty_for_io(inode, start, end);
+ redirty = 1;
+ }
/* Compression level is applied here and only here */
ret = btrfs_compress_pages(
@@ -765,11 +771,10 @@ retry:
* all those pages down to the drive.
*/
if (!page_started && !ret)
- extent_write_locked_range(io_tree,
- inode, async_extent->start,
+ extent_write_locked_range(inode,
+ async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- btrfs_get_extent,
WB_SYNC_ALL);
else if (ret)
unlock_page(async_cow->locked_page);
@@ -1203,7 +1208,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 cur_end;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
- 1, 0, NULL, GFP_NOFS);
+ 1, 0, NULL);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
@@ -1951,7 +1956,21 @@ static blk_status_t __btrfs_submit_bio_done(void *private_data, struct bio *bio,
/*
* extent_io.c submission hook. This does the right thing for csum calculation
- * on write, or reading the csums from the tree before a read
+ * on write, or reading the csums from the tree before a read.
+ *
+ * Rules about async/sync submit,
+ * a) read: sync submit
+ *
+ * b) write without checksum: sync submit
+ *
+ * c) write with checksum:
+ * c-1) if bio is issued by fsync: sync submit
+ * (sync_writers != 0)
+ *
+ * c-2) if root is reloc root: sync submit
+ * (only in case of buffered IO)
+ *
+ * c-3) otherwise: async submit
*/
static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
int mirror_num, unsigned long bio_flags,
@@ -2023,10 +2042,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
- trans->adding_csums = 1;
+ trans->adding_csums = true;
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
- trans->adding_csums = 0;
+ trans->adding_csums = false;
}
return 0;
}
@@ -2082,7 +2101,7 @@ again:
PAGE_SIZE);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
- page_end, &cached_state, GFP_NOFS);
+ page_end, &cached_state);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -2098,14 +2117,21 @@ again:
goto out;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
- 0);
+ ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
+ &cached_state, 0);
+ if (ret) {
+ mapping_set_error(page->mapping, ret);
+ end_extent_writepage(page, ret, page_start, page_end);
+ ClearPageChecked(page);
+ goto out;
+ }
+
ClearPageChecked(page);
set_page_dirty(page);
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
out_page:
unlock_page(page);
put_page(page);
@@ -2697,7 +2723,7 @@ out_free_path:
btrfs_end_transaction(trans);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
- &cached, GFP_NOFS);
+ &cached);
iput(inode);
return ret;
}
@@ -2986,7 +3012,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
clear_extent_bit(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
- EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
+ EXTENT_DEFRAG, 0, 0, &cached_state);
}
if (nolock)
@@ -3056,7 +3082,7 @@ out:
ordered_extent->len - 1,
clear_bits,
(clear_bits & EXTENT_LOCKED) ? 1 : 0,
- 0, &cached_state, GFP_NOFS);
+ 0, &cached_state);
}
if (trans)
@@ -3070,7 +3096,7 @@ out:
else
start = ordered_extent->file_offset;
end = ordered_extent->file_offset + ordered_extent->len - 1;
- clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
+ clear_extent_uptodate(io_tree, start, end, NULL);
/* Drop the cache for the part of the extent we didn't write. */
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
@@ -3777,7 +3803,8 @@ static int btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
- inode->i_version = btrfs_inode_sequence(leaf, inode_item);
+ inode_set_iversion_queried(inode,
+ btrfs_inode_sequence(leaf, inode_item));
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -3945,7 +3972,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
&token);
btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
&token);
- btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
+ &token);
btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
@@ -4744,8 +4772,8 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
u64 block_start;
u64 block_end;
- if ((offset & (blocksize - 1)) == 0 &&
- (!len || ((len & (blocksize - 1)) == 0)))
+ if (IS_ALIGNED(offset, blocksize) &&
+ (!len || IS_ALIGNED(len, blocksize)))
goto out;
block_start = round_down(from, blocksize);
@@ -4787,7 +4815,7 @@ again:
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
unlock_page(page);
put_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
@@ -4798,13 +4826,13 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state, GFP_NOFS);
+ 0, 0, &cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
goto out_unlock;
}
@@ -4823,8 +4851,7 @@ again:
}
ClearPageChecked(page);
set_page_dirty(page);
- unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
out_unlock:
if (ret)
@@ -4925,7 +4952,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
@@ -4990,8 +5017,7 @@ next:
break;
}
free_extent_map(em);
- unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
return err;
}
@@ -5234,8 +5260,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
clear_extent_bit(io_tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 1,
- &cached_state, GFP_NOFS);
+ EXTENT_DEFRAG, 1, 1, &cached_state);
cond_resched();
spin_lock(&io_tree->lock);
@@ -5894,7 +5919,6 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_private *private = file->private_data;
struct btrfs_dir_item *di;
@@ -5962,9 +5986,6 @@ again:
if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
goto next;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
- if (verify_dir_item(fs_info, leaf, slot, di))
- goto next;
-
name_len = btrfs_dir_name_len(leaf, di);
if ((total_len + sizeof(struct dir_entry) + name_len) >=
PAGE_SIZE) {
@@ -6104,19 +6125,20 @@ static int btrfs_update_time(struct inode *inode, struct timespec *now,
int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ bool dirty = flags & ~S_VERSION;
if (btrfs_root_readonly(root))
return -EROFS;
if (flags & S_VERSION)
- inode_inc_iversion(inode);
+ dirty |= inode_maybe_inc_iversion(inode, dirty);
if (flags & S_CTIME)
inode->i_ctime = *now;
if (flags & S_MTIME)
inode->i_mtime = *now;
if (flags & S_ATIME)
inode->i_atime = *now;
- return btrfs_dirty_inode(inode);
+ return dirty ? btrfs_dirty_inode(inode) : 0;
}
/*
@@ -6297,7 +6319,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
}
/*
* index_cnt is ignored for everything but a dir,
- * btrfs_get_inode_index_count has an explanation for the magic
+ * btrfs_set_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
@@ -6560,7 +6582,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
out_unlock:
btrfs_end_transaction(trans);
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
if (drop_inode) {
inode_dec_link_count(inode);
@@ -6641,7 +6662,6 @@ out_unlock:
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
@@ -6716,7 +6736,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
}
- btrfs_balance_delayed_items(fs_info);
fail:
if (trans)
btrfs_end_transaction(trans);
@@ -6794,7 +6813,6 @@ out_fail:
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
@@ -6803,68 +6821,6 @@ out_fail_inode:
goto out_fail;
}
-/* Find next extent map of a given extent map, caller needs to ensure locks */
-static struct extent_map *next_extent_map(struct extent_map *em)
-{
- struct rb_node *next;
-
- next = rb_next(&em->rb_node);
- if (!next)
- return NULL;
- return container_of(next, struct extent_map, rb_node);
-}
-
-static struct extent_map *prev_extent_map(struct extent_map *em)
-{
- struct rb_node *prev;
-
- prev = rb_prev(&em->rb_node);
- if (!prev)
- return NULL;
- return container_of(prev, struct extent_map, rb_node);
-}
-
-/* helper for btfs_get_extent. Given an existing extent in the tree,
- * the existing extent is the nearest extent to map_start,
- * and an extent that you want to insert, deal with overlap and insert
- * the best fitted new extent into the tree.
- */
-static int merge_extent_mapping(struct extent_map_tree *em_tree,
- struct extent_map *existing,
- struct extent_map *em,
- u64 map_start)
-{
- struct extent_map *prev;
- struct extent_map *next;
- u64 start;
- u64 end;
- u64 start_diff;
-
- BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
-
- if (existing->start > map_start) {
- next = existing;
- prev = prev_extent_map(next);
- } else {
- prev = existing;
- next = next_extent_map(prev);
- }
-
- start = prev ? extent_map_end(prev) : em->start;
- start = max_t(u64, start, em->start);
- end = next ? next->start : extent_map_end(em);
- end = min_t(u64, end, extent_map_end(em));
- start_diff = start - em->start;
- em->start = start;
- em->len = end - start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- em->block_start += start_diff;
- em->block_len -= start_diff;
- }
- return add_extent_mapping(em_tree, em, 0);
-}
-
static noinline int uncompress_inline(struct btrfs_path *path,
struct page *page,
size_t pg_offset, u64 extent_offset,
@@ -6939,10 +6895,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_io_tree *io_tree = &inode->io_tree;
- struct btrfs_trans_handle *trans = NULL;
const bool new_inline = !page || create;
-again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
@@ -6981,8 +6935,7 @@ again:
path->reada = READA_FORWARD;
}
- ret = btrfs_lookup_file_extent(trans, root, path,
- objectid, start, trans != NULL);
+ ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
err = ret;
goto out;
@@ -7083,7 +7036,7 @@ next:
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
- if (create == 0 && !PageUptodate(page)) {
+ if (!PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, page, pg_offset,
@@ -7104,25 +7057,6 @@ next:
kunmap(page);
}
flush_dcache_page(page);
- } else if (create && PageUptodate(page)) {
- BUG();
- if (!trans) {
- kunmap(page);
- free_extent_map(em);
- em = NULL;
-
- btrfs_release_path(path);
- trans = btrfs_join_transaction(root);
-
- if (IS_ERR(trans))
- return ERR_CAST(trans);
- goto again;
- }
- map = kmap(page);
- write_extent_buffer(leaf, map + pg_offset, ptr,
- copy_size);
- kunmap(page);
- btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
@@ -7134,7 +7068,6 @@ not_found:
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
- set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
@@ -7147,62 +7080,13 @@ insert:
err = 0;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
- /* it is possible that someone inserted the extent into the tree
- * while we had the lock dropped. It is also possible that
- * an overlapping map exists in the tree
- */
- if (ret == -EEXIST) {
- struct extent_map *existing;
-
- ret = 0;
-
- existing = search_extent_mapping(em_tree, start, len);
- /*
- * existing will always be non-NULL, since there must be
- * extent causing the -EEXIST.
- */
- if (existing->start == em->start &&
- extent_map_end(existing) >= extent_map_end(em) &&
- em->block_start == existing->block_start) {
- /*
- * The existing extent map already encompasses the
- * entire extent map we tried to add.
- */
- free_extent_map(em);
- em = existing;
- err = 0;
-
- } else if (start >= extent_map_end(existing) ||
- start <= existing->start) {
- /*
- * The existing extent map is the one nearest to
- * the [start, start + len) range which overlaps
- */
- err = merge_extent_mapping(em_tree, existing,
- em, start);
- free_extent_map(existing);
- if (err) {
- free_extent_map(em);
- em = NULL;
- }
- } else {
- free_extent_map(em);
- em = existing;
- err = 0;
- }
- }
+ err = btrfs_add_extent_mapping(em_tree, &em, start, len);
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
- if (trans) {
- ret = btrfs_end_transaction(trans);
- if (!err)
- err = ret;
- }
if (err) {
free_extent_map(em);
return ERR_PTR(err);
@@ -7324,7 +7208,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
- } else if (hole_em) {
+ } else {
return hole_em;
}
out:
@@ -7641,7 +7525,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state, GFP_NOFS);
+ cached_state);
if (ordered) {
/*
@@ -7926,7 +7810,7 @@ unlock:
if (lockstart < lockend) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, unlock_bits, 1, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
} else {
free_extent_state(cached_state);
}
@@ -7937,7 +7821,7 @@ unlock:
unlock_err:
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, 1, 0, &cached_state, GFP_NOFS);
+ unlock_bits, 1, 0, &cached_state);
err:
if (dio_data)
current->journal_info = dio_data;
@@ -7953,15 +7837,12 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
- bio_get(bio);
-
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
- goto err;
+ return ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
-err:
- bio_put(bio);
+
return ret;
}
@@ -8015,6 +7896,7 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
int segs;
int ret;
blk_status_t status;
+ struct bio_vec bvec;
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -8030,8 +7912,9 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
}
segs = bio_segments(failed_bio);
+ bio_get_first_bvec(failed_bio, &bvec);
if (segs > 1 ||
- (failed_bio->bi_io_vec->bv_len > btrfs_inode_sectorsize(inode)))
+ (bvec.bv_len > btrfs_inode_sectorsize(inode)))
read_mode |= REQ_FAILFAST_DEV;
isector = start - btrfs_io_bio(failed_bio)->logical;
@@ -8074,7 +7957,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
ASSERT(bio->bi_vcnt == 1);
io_tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+ ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
done->uptodate = 1;
ASSERT(!bio_flagged(bio, BIO_CLONED));
@@ -8164,7 +8047,7 @@ static void btrfs_retry_endio(struct bio *bio)
uptodate = 1;
ASSERT(bio->bi_vcnt == 1);
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
+ ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
io_tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
@@ -8460,11 +8343,10 @@ __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
bool write = bio_op(bio) == REQ_OP_WRITE;
blk_status_t ret;
+ /* Check btrfs_submit_bio_hook() for rules about async submit. */
if (async_submit)
async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
- bio_get(bio);
-
if (!write) {
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
@@ -8497,7 +8379,6 @@ __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
map:
ret = btrfs_map_bio(fs_info, bio, 0, 0);
err:
- bio_put(bio);
return ret;
}
@@ -8854,7 +8735,7 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret)
return ret;
- return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
+ return extent_fiemap(inode, fieinfo, start, len);
}
int btrfs_readpage(struct file *file, struct page *page)
@@ -8866,7 +8747,6 @@ int btrfs_readpage(struct file *file, struct page *page)
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
- struct extent_io_tree *tree;
struct inode *inode = page->mapping->host;
int ret;
@@ -8885,8 +8765,7 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+ ret = extent_write_full_page(page, wbc);
btrfs_add_delayed_iput(inode);
return ret;
}
@@ -8897,7 +8776,7 @@ static int btrfs_writepages(struct address_space *mapping,
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
- return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
+ return extent_writepages(tree, mapping, wbc);
}
static int
@@ -8906,8 +8785,7 @@ btrfs_readpages(struct file *file, struct address_space *mapping,
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
- return extent_readpages(tree, mapping, pages, nr_pages,
- btrfs_get_extent);
+ return extent_readpages(tree, mapping, pages, nr_pages);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
@@ -8978,8 +8856,7 @@ again:
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 0, &cached_state,
- GFP_NOFS);
+ EXTENT_DEFRAG, 1, 0, &cached_state);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
@@ -9036,7 +8913,7 @@ again:
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
__btrfs_releasepage(page, GFP_NOFS);
}
@@ -9137,7 +9014,7 @@ again:
PAGE_SIZE);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -9164,13 +9041,13 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state, GFP_NOFS);
+ 0, 0, &cached_state);
ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -9196,7 +9073,7 @@ again:
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
+ unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
out_unlock:
if (!ret) {
@@ -9421,7 +9298,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
struct btrfs_inode *ei;
struct inode *inode;
- ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
+ ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
@@ -9573,7 +9450,7 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_free_space_cachep);
}
-int btrfs_init_cachep(void)
+int __init btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
@@ -10688,7 +10565,6 @@ out:
btrfs_end_transaction(trans);
if (ret)
iput(inode);
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2ef8acaac688..111ee282b777 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -43,6 +43,7 @@
#include <linux/uuid.h>
#include <linux/btrfs.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -307,12 +308,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags |= BTRFS_INODE_COMPRESS;
ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
- if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
- comp = "lzo";
- else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB)
- comp = "zlib";
- else
- comp = "zstd";
+ comp = btrfs_compress_type2str(fs_info->compress_type);
+ if (!comp || comp[0] == 0)
+ comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
+
ret = btrfs_set_prop(inode, "btrfs.compression",
comp, strlen(comp), 0);
if (ret)
@@ -979,7 +978,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
/* get the big lock and read metadata off disk */
lock_extent_bits(io_tree, start, end, &cached);
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
- unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
+ unlock_extent_cached(io_tree, start, end, &cached);
if (IS_ERR(em))
return NULL;
@@ -1130,7 +1129,7 @@ again:
ordered = btrfs_lookup_ordered_extent(inode,
page_start);
unlock_extent_cached(tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (!ordered)
break;
@@ -1190,7 +1189,7 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
@@ -1206,8 +1205,7 @@ again:
&cached_state);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1, &cached_state,
- GFP_NOFS);
+ page_start, page_end - 1, &cached_state);
for (i = 0; i < i_done; i++) {
clear_page_dirty_for_io(pages[i]);
@@ -1503,7 +1501,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
- if (!device->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_info(fs_info,
"resizer unable to apply on readonly device %llu",
devid);
@@ -1528,7 +1526,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
}
}
- if (device->is_tgtdev_for_dev_replace) {
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -EPERM;
goto out_free;
}
@@ -2675,14 +2673,12 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto out;
}
- mutex_lock(&fs_info->volume_mutex);
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
} else {
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
- mutex_unlock(&fs_info->volume_mutex);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
if (!ret) {
@@ -2726,9 +2722,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- mutex_lock(&fs_info->volume_mutex);
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
- mutex_unlock(&fs_info->volume_mutex);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
@@ -2753,16 +2747,16 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
if (!fi_args)
return -ENOMEM;
- mutex_lock(&fs_devices->device_list_mutex);
+ rcu_read_lock();
fi_args->num_devices = fs_devices->num_devices;
- memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
if (device->devid > fi_args->max_id)
fi_args->max_id = device->devid;
}
- mutex_unlock(&fs_devices->device_list_mutex);
+ rcu_read_unlock();
+ memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
fi_args->nodesize = fs_info->nodesize;
fi_args->sectorsize = fs_info->sectorsize;
fi_args->clone_alignment = fs_info->sectorsize;
@@ -2779,7 +2773,6 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
{
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int ret = 0;
char *s_uuid = NULL;
@@ -2790,7 +2783,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
if (!btrfs_is_empty_uuid(di_args->uuid))
s_uuid = di_args->uuid;
- mutex_lock(&fs_devices->device_list_mutex);
+ rcu_read_lock();
dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
if (!dev) {
@@ -2805,17 +2798,15 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
if (dev->name) {
struct rcu_string *name;
- rcu_read_lock();
name = rcu_dereference(dev->name);
- strncpy(di_args->path, name->str, sizeof(di_args->path));
- rcu_read_unlock();
+ strncpy(di_args->path, name->str, sizeof(di_args->path) - 1);
di_args->path[sizeof(di_args->path) - 1] = 0;
} else {
di_args->path[0] = '\0';
}
out:
- mutex_unlock(&fs_devices->device_list_mutex);
+ rcu_read_unlock();
if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
ret = -EFAULT;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index f6a05f836629..b30a056963ab 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -164,7 +164,6 @@ static int iterate_object_props(struct btrfs_root *root,
size_t),
void *ctx)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *name_buf = NULL;
char *value_buf = NULL;
@@ -215,12 +214,6 @@ static int iterate_object_props(struct btrfs_root *root,
name_ptr = (unsigned long)(di + 1);
data_ptr = name_ptr + name_len;
- if (verify_dir_item(fs_info, leaf,
- path->slots[0], di)) {
- ret = -EIO;
- goto out;
- }
-
if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
name_ptr,
@@ -430,11 +423,11 @@ static const char *prop_compression_extract(struct inode *inode)
{
switch (BTRFS_I(inode)->prop_compress) {
case BTRFS_COMPRESS_ZLIB:
- return "zlib";
case BTRFS_COMPRESS_LZO:
- return "lzo";
case BTRFS_COMPRESS_ZSTD:
- return "zstd";
+ return btrfs_compress_type2str(BTRFS_I(inode)->prop_compress);
+ default:
+ break;
}
return NULL;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 168fd03ca3ac..9e61dd624f7b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2883,8 +2883,7 @@ cleanup:
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(&reserved->range_changed, &uiter)))
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
- unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
- GFP_NOFS);
+ unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
extent_changeset_release(reserved);
return ret;
}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index a7f79254ecca..dec0907dfb8a 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -231,7 +231,6 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
cur = h + i;
INIT_LIST_HEAD(&cur->hash_list);
spin_lock_init(&cur->lock);
- init_waitqueue_head(&cur->wait);
}
x = cmpxchg(&info->stripe_hash_table, NULL, table);
@@ -595,14 +594,31 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
* bio list here, anyone else that wants to
* change this stripe needs to do their own rmw.
*/
- if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
- cur->operation == BTRFS_RBIO_PARITY_SCRUB)
+ if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
- cur->operation == BTRFS_RBIO_REBUILD_MISSING)
+ if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
return 0;
+ if (last->operation == BTRFS_RBIO_READ_REBUILD) {
+ int fa = last->faila;
+ int fb = last->failb;
+ int cur_fa = cur->faila;
+ int cur_fb = cur->failb;
+
+ if (last->faila >= last->failb) {
+ fa = last->failb;
+ fb = last->faila;
+ }
+
+ if (cur->faila >= cur->failb) {
+ cur_fa = cur->failb;
+ cur_fb = cur->faila;
+ }
+
+ if (fa != cur_fa || fb != cur_fb)
+ return 0;
+ }
return 1;
}
@@ -670,7 +686,6 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
unsigned long flags;
- DEFINE_WAIT(wait);
struct btrfs_raid_bio *freeit = NULL;
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
@@ -816,15 +831,6 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
}
goto done_nolock;
- /*
- * The barrier for this waitqueue_active is not needed,
- * we're protected by h->lock and can't miss a wakeup.
- */
- } else if (waitqueue_active(&h->wait)) {
- spin_unlock(&rbio->bio_list_lock);
- spin_unlock_irqrestore(&h->lock, flags);
- wake_up(&h->wait);
- goto done_nolock;
}
}
done:
@@ -858,10 +864,17 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
kfree(rbio);
}
-static void free_raid_bio(struct btrfs_raid_bio *rbio)
+static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
{
- unlock_stripe(rbio);
- __free_raid_bio(rbio);
+ struct bio *next;
+
+ while (cur) {
+ next = cur->bi_next;
+ cur->bi_next = NULL;
+ cur->bi_status = err;
+ bio_endio(cur);
+ cur = next;
+ }
}
/*
@@ -871,20 +884,26 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
- struct bio *next;
+ struct bio *extra;
if (rbio->generic_bio_cnt)
btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
- free_raid_bio(rbio);
+ /*
+ * At this moment, rbio->bio_list is empty, however since rbio does not
+ * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
+ * hash list, rbio may be merged with others so that rbio->bio_list
+ * becomes non-empty.
+ * Once unlock_stripe() is done, rbio->bio_list will not be updated any
+ * more and we can call bio_endio() on all queued bios.
+ */
+ unlock_stripe(rbio);
+ extra = bio_list_get(&rbio->bio_list);
+ __free_raid_bio(rbio);
- while (cur) {
- next = cur->bi_next;
- cur->bi_next = NULL;
- cur->bi_status = err;
- bio_endio(cur);
- cur = next;
- }
+ rbio_endio_bio_list(cur, err);
+ if (extra)
+ rbio_endio_bio_list(extra, err);
}
/*
@@ -1435,14 +1454,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
- struct bio_vec bvec;
- struct bvec_iter iter;
+ struct bio_vec *bvec;
+ int i;
- if (bio_flagged(bio, BIO_CLONED))
- bio->bi_iter = btrfs_io_bio(bio)->iter;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment(bvec, bio, iter)
- SetPageUptodate(bvec.bv_page);
+ bio_for_each_segment_all(bvec, bio, i)
+ SetPageUptodate(bvec->bv_page);
}
/*
@@ -1969,7 +1987,22 @@ cleanup:
cleanup_io:
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
- if (err == BLK_STS_OK)
+ /*
+ * - In case of two failures, where rbio->failb != -1:
+ *
+ * Do not cache this rbio since the above read reconstruction
+ * (raid6_datap_recov() or raid6_2data_recov()) may have
+ * changed some content of stripes which are not identical to
+ * on-disk content any more, otherwise, a later write/recover
+ * may steal stripe_pages from this rbio and end up with
+ * corruptions or rebuild failures.
+ *
+ * - In case of single failure, where rbio->failb == -1:
+ *
+ * Cache this rbio iff the above read reconstruction is
+ * excuted without problems.
+ */
+ if (err == BLK_STS_OK && rbio->failb < 0)
cache_rbio_pages(rbio);
else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2170,11 +2203,21 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
}
/*
- * reconstruct from the q stripe if they are
- * asking for mirror 3
+ * Loop retry:
+ * for 'mirror == 2', reconstruct from all other stripes.
+ * for 'mirror_num > 2', select a stripe to fail on every retry.
*/
- if (mirror_num == 3)
- rbio->failb = rbio->real_stripes - 2;
+ if (mirror_num > 2) {
+ /*
+ * 'mirror == 3' is to fail the p stripe and
+ * reconstruct from the q stripe. 'mirror > 3' is to
+ * fail a data stripe and reconstruct from p+q stripe.
+ */
+ rbio->failb = rbio->real_stripes - (mirror_num - 1);
+ ASSERT(rbio->failb > 0);
+ if (rbio->failb <= rbio->faila)
+ rbio->failb--;
+ }
ret = lock_stripe_add(rbio);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 34878699d363..171f3cce30e6 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -606,8 +606,7 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
}
/* Walk up to the next node that needs to be processed */
-static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
- int *level)
+static int walk_up_tree(struct btrfs_path *path, int *level)
{
int l;
@@ -984,7 +983,6 @@ void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_path *path;
- struct btrfs_root *root;
struct extent_buffer *eb;
u64 bytenr = 0, num_bytes = 0;
int ret, level;
@@ -1014,7 +1012,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
&bytenr, &num_bytes);
if (ret)
break;
- ret = walk_up_tree(root, path, &level);
+ ret = walk_up_tree(path, &level);
if (ret < 0)
break;
if (ret > 0) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 3338407ef0f0..aab0194efe46 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -387,13 +387,6 @@ again:
WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
ptr = (unsigned long)(ref + 1);
- ret = btrfs_is_name_len_valid(leaf, path->slots[0], ptr,
- name_len);
- if (!ret) {
- err = -EIO;
- goto out;
- }
-
WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
*sequence = btrfs_root_ref_sequence(leaf, ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b2f871d80982..ec56f33feea9 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -301,6 +301,11 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_put_ctx(struct scrub_ctx *sctx);
+static inline int scrub_is_page_on_raid56(struct scrub_page *page)
+{
+ return page->recover &&
+ (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+}
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
@@ -1323,15 +1328,34 @@ nodatasum_case:
* could happen otherwise that a correct page would be
* overwritten by a bad one).
*/
- for (mirror_index = 0;
- mirror_index < BTRFS_MAX_MIRRORS &&
- sblocks_for_recheck[mirror_index].page_count > 0;
- mirror_index++) {
+ for (mirror_index = 0; ;mirror_index++) {
struct scrub_block *sblock_other;
if (mirror_index == failed_mirror_index)
continue;
- sblock_other = sblocks_for_recheck + mirror_index;
+
+ /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
+ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
+ if (mirror_index >= BTRFS_MAX_MIRRORS)
+ break;
+ if (!sblocks_for_recheck[mirror_index].page_count)
+ break;
+
+ sblock_other = sblocks_for_recheck + mirror_index;
+ } else {
+ struct scrub_recover *r = sblock_bad->pagev[0]->recover;
+ int max_allowed = r->bbio->num_stripes -
+ r->bbio->num_tgtdevs;
+
+ if (mirror_index >= max_allowed)
+ break;
+ if (!sblocks_for_recheck[1].page_count)
+ break;
+
+ ASSERT(failed_mirror_index == 0);
+ sblock_other = sblocks_for_recheck + 1;
+ sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
+ }
/* build and submit the bios, check checksums */
scrub_recheck_block(fs_info, sblock_other, 0);
@@ -1666,49 +1690,32 @@ leave_nomem:
return 0;
}
-struct scrub_bio_ret {
- struct completion event;
- blk_status_t status;
-};
-
static void scrub_bio_wait_endio(struct bio *bio)
{
- struct scrub_bio_ret *ret = bio->bi_private;
-
- ret->status = bio->bi_status;
- complete(&ret->event);
-}
-
-static inline int scrub_is_page_on_raid56(struct scrub_page *page)
-{
- return page->recover &&
- (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+ complete(bio->bi_private);
}
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
struct bio *bio,
struct scrub_page *page)
{
- struct scrub_bio_ret done;
+ DECLARE_COMPLETION_ONSTACK(done);
int ret;
+ int mirror_num;
- init_completion(&done.event);
- done.status = 0;
bio->bi_iter.bi_sector = page->logical >> 9;
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
+ mirror_num = page->sblock->pagev[0]->mirror_num;
ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
- page->mirror_num, 0);
+ mirror_num, 0);
if (ret)
return ret;
- wait_for_completion_io(&done.event);
- if (done.status)
- return -EIO;
-
- return 0;
+ wait_for_completion_io(&done);
+ return blk_status_to_errno(bio->bi_status);
}
/*
@@ -2535,7 +2542,7 @@ leave_nomem:
}
WARN_ON(sblock->page_count == 0);
- if (dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
/*
* This case should only be hit for RAID 5/6 device replace. See
* the comment in scrub_missing_raid56_pages() for details.
@@ -2870,7 +2877,7 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
u8 csum[BTRFS_CSUM_SIZE];
u32 blocksize;
- if (dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
scrub_parity_mark_sectors_error(sparity, logical, len);
return 0;
}
@@ -4112,12 +4119,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
- if (!dev || (dev->missing && !is_dev_replace)) {
+ if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
+ !is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return -ENODEV;
}
- if (!is_dev_replace && !readonly && !dev->writeable) {
+ if (!is_dev_replace && !readonly &&
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
rcu_read_lock();
name = rcu_dereference(dev->name);
@@ -4128,14 +4137,15 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
}
mutex_lock(&fs_info->scrub_lock);
- if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return -EIO;
}
btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
- if (dev->scrub_device ||
+ if (dev->scrub_ctx ||
(!is_dev_replace &&
btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -4160,7 +4170,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return PTR_ERR(sctx);
}
sctx->readonly = readonly;
- dev->scrub_device = sctx;
+ dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/*
@@ -4195,7 +4205,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_lock(&fs_info->scrub_lock);
- dev->scrub_device = NULL;
+ dev->scrub_ctx = NULL;
scrub_workers_put(fs_info);
mutex_unlock(&fs_info->scrub_lock);
@@ -4252,16 +4262,16 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
struct scrub_ctx *sctx;
mutex_lock(&fs_info->scrub_lock);
- sctx = dev->scrub_device;
+ sctx = dev->scrub_ctx;
if (!sctx) {
mutex_unlock(&fs_info->scrub_lock);
return -ENOTCONN;
}
atomic_inc(&sctx->cancel_req);
- while (dev->scrub_device) {
+ while (dev->scrub_ctx) {
mutex_unlock(&fs_info->scrub_lock);
wait_event(fs_info->scrub_pause_wait,
- dev->scrub_device == NULL);
+ dev->scrub_ctx == NULL);
mutex_lock(&fs_info->scrub_lock);
}
mutex_unlock(&fs_info->scrub_lock);
@@ -4278,7 +4288,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (dev)
- sctx = dev->scrub_device;
+ sctx = dev->scrub_ctx;
if (sctx)
memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4478,8 +4488,7 @@ static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
free_extent_map(em);
out_unlock:
- unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
return ret;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 20d3300bd268..f306c608dc28 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1059,12 +1059,6 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
}
}
- ret = btrfs_is_name_len_valid(eb, path->slots[0],
- (unsigned long)(di + 1), name_len + data_len);
- if (!ret) {
- ret = -EIO;
- goto out;
- }
if (name_len + data_len > buf_len) {
buf_len = name_len + data_len;
if (is_vmalloc_addr(buf)) {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3a4dce153645..6e71a2a78363 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -61,12 +61,21 @@
#include "tests/btrfs-tests.h"
#include "qgroup.h"
-#include "backref.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
static const struct super_operations btrfs_super_ops;
+
+/*
+ * Types for mounting the default subvolume and a subvolume explicitly
+ * requested by subvol=/path. That way the callchain is straightforward and we
+ * don't have to play tricks with the mount options and recursive calls to
+ * btrfs_mount.
+ *
+ * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
+ */
static struct file_system_type btrfs_fs_type;
+static struct file_system_type btrfs_root_fs_type;
static int btrfs_remount(struct super_block *sb, int *flags, char *data);
@@ -98,30 +107,6 @@ const char *btrfs_decode_error(int errno)
return errstr;
}
-/* btrfs handle error by forcing the filesystem readonly */
-static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
-{
- struct super_block *sb = fs_info->sb;
-
- if (sb_rdonly(sb))
- return;
-
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
- sb->s_flags |= SB_RDONLY;
- btrfs_info(fs_info, "forced readonly");
- /*
- * Note that a running device replace operation is not
- * canceled here although there is no way to update
- * the progress. It would add the risk of a deadlock,
- * therefore the canceling is omitted. The only penalty
- * is that some I/O remains active until the procedure
- * completes. The next time when the filesystem is
- * mounted writeable again, the device replace
- * operation continues.
- */
- }
-}
-
/*
* __btrfs_handle_fs_error decodes expected errors from the caller and
* invokes the approciate error response.
@@ -168,8 +153,23 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/* Don't go through full error handling during mount */
- if (sb->s_flags & SB_BORN)
- btrfs_handle_error(fs_info);
+ if (!(sb->s_flags & SB_BORN))
+ return;
+
+ if (sb_rdonly(sb))
+ return;
+
+ /* btrfs handle error by forcing the filesystem readonly */
+ sb->s_flags |= SB_RDONLY;
+ btrfs_info(fs_info, "forced readonly");
+ /*
+ * Note that a running device replace operation is not canceled here
+ * although there is no way to update the progress. It would add the
+ * risk of a deadlock, therefore the canceling is omitted. The only
+ * penalty is that some I/O remains active until the procedure
+ * completes. The next time when the filesystem is mounted writeable
+ * again, the device replace operation continues.
+ */
}
#ifdef CONFIG_PRINTK
@@ -405,7 +405,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
substring_t args[MAX_OPT_ARGS];
- char *p, *num, *orig = NULL;
+ char *p, *num;
u64 cache_gen;
int intarg;
int ret = 0;
@@ -428,16 +428,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
if (!options)
goto check;
- /*
- * strsep changes the string, duplicate it because parse_options
- * gets called twice
- */
- options = kstrdup(options, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- orig = options;
-
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
@@ -454,7 +444,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_subvolrootid:
case Opt_device:
/*
- * These are parsed by btrfs_parse_early_options
+ * These are parsed by btrfs_parse_subvol_options
+ * and btrfs_parse_early_options
* and can be happily ignored here.
*/
break;
@@ -877,7 +868,6 @@ out:
btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(info, "using free space tree");
- kfree(orig);
return ret;
}
@@ -888,11 +878,60 @@ out:
* only when we need to allocate a new super block.
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
- void *holder, char **subvol_name, u64 *subvol_objectid,
- struct btrfs_fs_devices **fs_devices)
+ void *holder, struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *device_name, *opts, *orig, *p;
+ int error = 0;
+
+ if (!options)
+ return 0;
+
+ /*
+ * strsep changes the string, duplicate it because btrfs_parse_options
+ * gets called later
+ */
+ opts = kstrdup(options, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+ orig = opts;
+
+ while ((p = strsep(&opts, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ if (token == Opt_device) {
+ device_name = match_strdup(&args[0]);
+ if (!device_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = btrfs_scan_one_device(device_name,
+ flags, holder, fs_devices);
+ kfree(device_name);
+ if (error)
+ goto out;
+ }
+ }
+
+out:
+ kfree(orig);
+ return error;
+}
+
+/*
+ * Parse mount options that are related to subvolume id
+ *
+ * The value is later passed to mount_subvol()
+ */
+static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
+ char **subvol_name, u64 *subvol_objectid)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *opts, *orig, *p;
char *num = NULL;
int error = 0;
@@ -900,8 +939,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
return 0;
/*
- * strsep changes the string, duplicate it because parse_options
- * gets called twice
+ * strsep changes the string, duplicate it because
+ * btrfs_parse_early_options gets called later
*/
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
@@ -940,18 +979,6 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
case Opt_subvolrootid:
pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
break;
- case Opt_device:
- device_name = match_strdup(&args[0]);
- if (!device_name) {
- error = -ENOMEM;
- goto out;
- }
- error = btrfs_scan_one_device(device_name,
- flags, holder, fs_devices);
- kfree(device_name);
- if (error)
- goto out;
- break;
default:
break;
}
@@ -1243,7 +1270,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
- char *compress_type;
+ const char *compress_type;
if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
@@ -1259,12 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
if (btrfs_test_opt(info, COMPRESS)) {
- if (info->compress_type == BTRFS_COMPRESS_ZLIB)
- compress_type = "zlib";
- else if (info->compress_type == BTRFS_COMPRESS_LZO)
- compress_type = "lzo";
- else
- compress_type = "zstd";
+ compress_type = btrfs_compress_type2str(info->compress_type);
if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
@@ -1365,86 +1387,12 @@ static inline int is_subvolume_inode(struct inode *inode)
return 0;
}
-/*
- * This will add subvolid=0 to the argument string while removing any subvol=
- * and subvolid= arguments to make sure we get the top-level root for path
- * walking to the subvol we want.
- */
-static char *setup_root_args(char *args)
-{
- char *buf, *dst, *sep;
-
- if (!args)
- return kstrdup("subvolid=0", GFP_KERNEL);
-
- /* The worst case is that we add ",subvolid=0" to the end. */
- buf = dst = kmalloc(strlen(args) + strlen(",subvolid=0") + 1,
- GFP_KERNEL);
- if (!buf)
- return NULL;
-
- while (1) {
- sep = strchrnul(args, ',');
- if (!strstarts(args, "subvol=") &&
- !strstarts(args, "subvolid=")) {
- memcpy(dst, args, sep - args);
- dst += sep - args;
- *dst++ = ',';
- }
- if (*sep)
- args = sep + 1;
- else
- break;
- }
- strcpy(dst, "subvolid=0");
-
- return buf;
-}
-
static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
- int flags, const char *device_name,
- char *data)
+ const char *device_name, struct vfsmount *mnt)
{
struct dentry *root;
- struct vfsmount *mnt = NULL;
- char *newargs;
int ret;
- newargs = setup_root_args(data);
- if (!newargs) {
- root = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
- if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
- if (flags & SB_RDONLY) {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
- device_name, newargs);
- } else {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
- device_name, newargs);
- if (IS_ERR(mnt)) {
- root = ERR_CAST(mnt);
- mnt = NULL;
- goto out;
- }
-
- down_write(&mnt->mnt_sb->s_umount);
- ret = btrfs_remount(mnt->mnt_sb, &flags, NULL);
- up_write(&mnt->mnt_sb->s_umount);
- if (ret < 0) {
- root = ERR_PTR(ret);
- goto out;
- }
- }
- }
- if (IS_ERR(mnt)) {
- root = ERR_CAST(mnt);
- mnt = NULL;
- goto out;
- }
-
if (!subvol_name) {
if (!subvol_objectid) {
ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
@@ -1500,7 +1448,6 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
out:
mntput(mnt);
- kfree(newargs);
kfree(subvol_name);
return root;
}
@@ -1558,11 +1505,11 @@ static int setup_security_options(struct btrfs_fs_info *fs_info,
/*
* Find a superblock for the given device / mount point.
*
- * Note: This is based on get_sb_bdev from fs/super.c with a few additions
- * for multiple device setup. Make sure to keep it in sync.
+ * Note: This is based on mount_bdev from fs/super.c with a few additions
+ * for multiple device setup. Make sure to keep it in sync.
*/
-static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
- const char *device_name, void *data)
+static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ int flags, const char *device_name, void *data)
{
struct block_device *bdev = NULL;
struct super_block *s;
@@ -1570,27 +1517,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
struct btrfs_fs_info *fs_info = NULL;
struct security_mnt_opts new_sec_opts;
fmode_t mode = FMODE_READ;
- char *subvol_name = NULL;
- u64 subvol_objectid = 0;
int error = 0;
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
error = btrfs_parse_early_options(data, mode, fs_type,
- &subvol_name, &subvol_objectid,
&fs_devices);
if (error) {
- kfree(subvol_name);
return ERR_PTR(error);
}
- if (subvol_name || subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
- /* mount_subvol() will free subvol_name. */
- return mount_subvol(subvol_name, subvol_objectid, flags,
- device_name, data);
- }
-
security_init_mnt_opts(&new_sec_opts);
if (data) {
error = parse_security_options(data, &new_sec_opts);
@@ -1674,6 +1611,84 @@ error_sec_opts:
return ERR_PTR(error);
}
+/*
+ * Mount function which is called by VFS layer.
+ *
+ * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
+ * which needs vfsmount* of device's root (/). This means device's root has to
+ * be mounted internally in any case.
+ *
+ * Operation flow:
+ * 1. Parse subvol id related options for later use in mount_subvol().
+ *
+ * 2. Mount device's root (/) by calling vfs_kern_mount().
+ *
+ * NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
+ * first place. In order to avoid calling btrfs_mount() again, we use
+ * different file_system_type which is not registered to VFS by
+ * register_filesystem() (btrfs_root_fs_type). As a result,
+ * btrfs_mount_root() is called. The return value will be used by
+ * mount_subtree() in mount_subvol().
+ *
+ * 3. Call mount_subvol() to get the dentry of subvolume. Since there is
+ * "btrfs subvolume set-default", mount_subvol() is called always.
+ */
+static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ const char *device_name, void *data)
+{
+ struct vfsmount *mnt_root;
+ struct dentry *root;
+ fmode_t mode = FMODE_READ;
+ char *subvol_name = NULL;
+ u64 subvol_objectid = 0;
+ int error = 0;
+
+ if (!(flags & SB_RDONLY))
+ mode |= FMODE_WRITE;
+
+ error = btrfs_parse_subvol_options(data, mode,
+ &subvol_name, &subvol_objectid);
+ if (error) {
+ kfree(subvol_name);
+ return ERR_PTR(error);
+ }
+
+ /* mount device's root (/) */
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
+ if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
+ if (flags & SB_RDONLY) {
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
+ flags & ~SB_RDONLY, device_name, data);
+ } else {
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
+ flags | SB_RDONLY, device_name, data);
+ if (IS_ERR(mnt_root)) {
+ root = ERR_CAST(mnt_root);
+ goto out;
+ }
+
+ down_write(&mnt_root->mnt_sb->s_umount);
+ error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
+ up_write(&mnt_root->mnt_sb->s_umount);
+ if (error < 0) {
+ root = ERR_PTR(error);
+ mntput(mnt_root);
+ goto out;
+ }
+ }
+ }
+ if (IS_ERR(mnt_root)) {
+ root = ERR_CAST(mnt_root);
+ goto out;
+ }
+
+ /* mount_subvol() will free subvol_name and mnt_root */
+ root = mount_subvol(subvol_name, subvol_objectid, device_name, mnt_root);
+
+out:
+ return root;
+}
+
static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
int new_pool_size, int old_pool_size)
{
@@ -1820,7 +1835,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
- if (!btrfs_check_rw_degradable(fs_info)) {
+ if (!btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
"too many missing devices, writeable remount is not allowed");
ret = -EACCES;
@@ -1972,8 +1987,10 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
- if (!device->in_fs_metadata || !device->bdev ||
- device->is_tgtdev_for_dev_replace)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state) ||
+ !device->bdev ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
if (i >= nr_devices)
@@ -2174,6 +2191,15 @@ static struct file_system_type btrfs_fs_type = {
.kill_sb = btrfs_kill_super,
.fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
};
+
+static struct file_system_type btrfs_root_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "btrfs",
+ .mount = btrfs_mount_root,
+ .kill_sb = btrfs_kill_super,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
+};
+
MODULE_ALIAS_FS("btrfs");
static int btrfs_control_open(struct inode *inode, struct file *file)
@@ -2207,11 +2233,11 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_fs_type, &fs_devices);
+ &btrfs_root_fs_type, &fs_devices);
break;
case BTRFS_IOC_DEVICES_READY:
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_fs_type, &fs_devices);
+ &btrfs_root_fs_type, &fs_devices);
if (ret)
break;
ret = !(fs_devices->num_devices == fs_devices->total_devices);
@@ -2269,7 +2295,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
while (cur_devices) {
head = &cur_devices->devices;
list_for_each_entry(dev, head, dev_list) {
- if (dev->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->name)
continue;
@@ -2324,7 +2350,7 @@ static struct miscdevice btrfs_misc = {
MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
MODULE_ALIAS("devname:btrfs-control");
-static int btrfs_interface_init(void)
+static int __init btrfs_interface_init(void)
{
return misc_register(&btrfs_misc);
}
@@ -2334,7 +2360,7 @@ static void btrfs_interface_exit(void)
misc_deregister(&btrfs_misc);
}
-static void btrfs_print_mod_info(void)
+static void __init btrfs_print_mod_info(void)
{
pr_info("Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index a28bba801264..a8bafed931f4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -897,7 +897,7 @@ static int btrfs_init_debugfs(void)
return 0;
}
-int btrfs_init_sysfs(void)
+int __init btrfs_init_sysfs(void)
{
int ret;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index d3f25376a0f8..9786d8cd0aa6 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -277,6 +277,9 @@ int btrfs_run_sanity_tests(void)
goto out;
}
}
+ ret = btrfs_test_extent_map();
+ if (ret)
+ goto out;
out:
btrfs_destroy_test_fs();
return ret;
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 266f1e3d1784..bc0615bac3cc 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -33,6 +33,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_map(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
new file mode 100644
index 000000000000..70c993f01670
--- /dev/null
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2017 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+
+static void free_extent_map_tree(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ struct rb_node *node;
+
+ while (!RB_EMPTY_ROOT(&em_tree->map)) {
+ node = rb_first(&em_tree->map);
+ em = rb_entry(node, struct extent_map, rb_node);
+ remove_extent_mapping(em_tree, em);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ if (refcount_read(&em->refs) != 1) {
+ test_msg(
+"em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d\n",
+ em->start, em->len, em->block_start,
+ em->block_len, refcount_read(&em->refs));
+
+ refcount_set(&em->refs, 1);
+ }
+#endif
+ free_extent_map(em);
+ }
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet, there is a file
+ * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
+ * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
+ * reading [0, 8K)
+ *
+ * t1 t2
+ * btrfs_get_extent() btrfs_get_extent()
+ * -> lookup_extent_mapping() ->lookup_extent_mapping()
+ * -> add_extent_mapping(0, 16K)
+ * -> return em
+ * ->add_extent_mapping(0, 16K)
+ * -> #handle -EEXIST
+ */
+static void test_case_1(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ u64 start = 0;
+ u64 len = SZ_8K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip the test on error. */
+ return;
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ /* Add [16K, 20K) following [0, 16K) */
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ em->start = SZ_16K;
+ em->len = SZ_4K;
+ em->block_start = SZ_32K; /* avoid merging */
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 8K), should return [0, 16K) instead. */
+ em->start = start;
+ em->len = len;
+ em->block_start = start;
+ em->block_len = len;
+ ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
+ if (ret)
+ test_msg("case1 [%llu %llu]: ret %d\n", start, start + len, ret);
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_16K ||
+ em->block_start != 0 || em->block_len != SZ_16K))
+ test_msg(
+"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Reading the inline ending up with EEXIST, ie. read an inline
+ * extent and discard page cache and read it again.
+ */
+static void test_case_2(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip the test on error. */
+ return;
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ /* Add [4K, 4K) following [0, 1K) */
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
+ if (ret)
+ test_msg("case2 [0 1K]: ret %d\n", ret);
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_1K ||
+ em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1))
+ test_msg(
+"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
+ ret, em->start, em->len, em->block_start,
+ em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+static void __test_case_3(struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip this test on error. */
+ return;
+
+ /* Add [4K, 8K) */
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
+ if (ret)
+ test_msg("case3 [0x%llx 0x%llx): ret %d\n",
+ start, start + len, ret);
+ /*
+ * Since bytes within em are contiguous, em->block_start is identical to
+ * em->start.
+ */
+ if (em &&
+ (start < em->start || start + len > extent_map_end(em) ||
+ em->start != em->block_start || em->len != em->block_len))
+ test_msg(
+"case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 16K), two jobs are running concurrently
+ * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
+ * read from [0, 4K) or [8K, 12K) or [12K, 16K).
+ *
+ * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
+ *
+ * t1 t2
+ * cow_file_range() btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping()
+ * -> add_extent_mapping()
+ */
+static void test_case_3(struct extent_map_tree *em_tree)
+{
+ __test_case_3(em_tree, 0);
+ __test_case_3(em_tree, SZ_8K);
+ __test_case_3(em_tree, (12 * 1024ULL));
+}
+
+static void __test_case_4(struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip this test on error. */
+ return;
+
+ /* Add [0K, 8K) */
+ em->start = 0;
+ em->len = SZ_8K;
+ em->block_start = 0;
+ em->block_len = SZ_8K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [8K, 24K) */
+ em->start = SZ_8K;
+ em->len = 24 * 1024ULL;
+ em->block_start = SZ_16K; /* avoid merging */
+ em->block_len = 24 * 1024ULL;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+ /* Add [0K, 32K) */
+ em->start = 0;
+ em->len = SZ_32K;
+ em->block_start = 0;
+ em->block_len = SZ_32K;
+ ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
+ if (ret)
+ test_msg("case4 [0x%llx 0x%llx): ret %d\n",
+ start, len, ret);
+ if (em &&
+ (start < em->start || start + len > extent_map_end(em)))
+ test_msg(
+"case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
+ start, len, ret, em->start, em->len, em->block_start,
+ em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 32K), two jobs are running concurrently
+ * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
+ * read from [0, 4K) or [4K, 8K).
+ *
+ * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
+ *
+ * t1 t2
+ * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
+ * -> btrfs_get_extent() -> btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping() -> lookup_extent_mapping()
+ * # load [0, 32K)
+ * -> btrfs_new_extent_direct()
+ * -> btrfs_drop_extent_cache()
+ * # split [0, 32K)
+ * -> add_extent_mapping()
+ * # add [8K, 32K)
+ * -> add_extent_mapping()
+ * # handle -EEXIST when adding
+ * # [0, 32K)
+ */
+static void test_case_4(struct extent_map_tree *em_tree)
+{
+ __test_case_4(em_tree, 0);
+ __test_case_4(em_tree, SZ_4K);
+}
+
+int btrfs_test_extent_map()
+{
+ struct extent_map_tree *em_tree;
+
+ test_msg("Running extent_map tests\n");
+
+ em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
+ if (!em_tree)
+ /* Skip the test on error. */
+ return 0;
+
+ extent_map_tree_init(em_tree);
+
+ test_case_1(em_tree);
+ test_case_2(em_tree);
+ test_case_3(em_tree);
+ test_case_4(em_tree);
+
+ kfree(em_tree);
+ return 0;
+}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 30affb60da51..13420cd19ef0 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -288,10 +288,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
- test_msg("Vacancy flag wasn't set properly\n");
- goto out;
- }
free_extent_map(em);
btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
@@ -1001,8 +997,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
EXTENT_DELALLOC | EXTENT_DIRTY |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1070,8 +1065,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1104,8 +1098,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* Empty */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1121,8 +1114,7 @@ out:
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -1134,7 +1126,6 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
int ret;
set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
- set_bit(EXTENT_FLAG_VACANCY, &vacancy_only);
set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
test_msg("Running btrfs_get_extent tests\n");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5a8c2649af2f..04f07144b45c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -495,8 +495,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
if (current->journal_info) {
WARN_ON(type & TRANS_EXTWRITERS);
h = current->journal_info;
- h->use_count++;
- WARN_ON(h->use_count > 2);
+ refcount_inc(&h->use_count);
+ WARN_ON(refcount_read(&h->use_count) > 2);
h->orig_rsv = h->block_rsv;
h->block_rsv = NULL;
goto got_it;
@@ -567,7 +567,7 @@ again:
h->transid = cur_trans->transid;
h->transaction = cur_trans;
h->root = root;
- h->use_count = 1;
+ refcount_set(&h->use_count, 1);
h->fs_info = root->fs_info;
h->type = type;
@@ -837,8 +837,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
int err = 0;
int must_run_delayed_refs = 0;
- if (trans->use_count > 1) {
- trans->use_count--;
+ if (refcount_read(&trans->use_count) > 1) {
+ refcount_dec(&trans->use_count);
trans->block_rsv = trans->orig_rsv;
return 0;
}
@@ -1016,8 +1016,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
* it's safe to do it (through clear_btree_io_tree()).
*/
err = clear_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT,
- 0, 0, &cached_state, GFP_NOFS);
+ EXTENT_NEED_WAIT, 0, 0, &cached_state);
if (err == -ENOMEM)
err = 0;
if (!err)
@@ -1869,7 +1868,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans = trans->transaction;
DEFINE_WAIT(wait);
- WARN_ON(trans->use_count > 1);
+ WARN_ON(refcount_read(&trans->use_count) > 1);
btrfs_abort_transaction(trans, err);
@@ -2266,16 +2265,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
ret = write_all_supers(fs_info, 0);
- if (ret) {
- mutex_unlock(&fs_info->tree_log_mutex);
- goto scrub_continue;
- }
-
/*
* the super is written, we can safely allow the tree-loggers
* to go about their business
*/
mutex_unlock(&fs_info->tree_log_mutex);
+ if (ret)
+ goto scrub_continue;
btrfs_finish_extent_commit(trans, fs_info);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index c55e44560103..6beee072b1bd 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -58,6 +58,7 @@ struct btrfs_transaction {
/* Be protected by fs_info->trans_lock when we want to change it. */
enum btrfs_trans_state state;
+ int aborted;
struct list_head list;
struct extent_io_tree dirty_pages;
unsigned long start_time;
@@ -70,7 +71,6 @@ struct btrfs_transaction {
struct list_head dirty_bgs;
struct list_head io_bgs;
struct list_head dropped_roots;
- u64 num_dirty_bgs;
/*
* we need to make sure block group deletion doesn't race with
@@ -79,11 +79,11 @@ struct btrfs_transaction {
*/
struct mutex cache_write_mutex;
spinlock_t dirty_bgs_lock;
+ unsigned int num_dirty_bgs;
/* Protected by spin lock fs_info->unused_bgs_lock. */
struct list_head deleted_bgs;
spinlock_t dropped_roots_lock;
struct btrfs_delayed_ref_root delayed_refs;
- int aborted;
struct btrfs_fs_info *fs_info;
};
@@ -111,20 +111,19 @@ struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
u64 chunk_bytes_reserved;
- unsigned long use_count;
- unsigned long blocks_reserved;
unsigned long delayed_ref_updates;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ refcount_t use_count;
+ unsigned int type;
short aborted;
- short adding_csums;
+ bool adding_csums;
bool allocating_chunk;
bool can_flush_pending_bgs;
bool reloc_reserved;
bool sync;
bool dirty;
- unsigned int type;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ce4ed6ec8f39..c3c8d48f6618 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -30,6 +30,7 @@
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
+#include "hash.h"
/*
* Error message should follow the following format:
@@ -223,6 +224,142 @@ static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
}
/*
+ * Customized reported for dir_item, only important new info is key->objectid,
+ * which represents inode number
+ */
+__printf(4, 5)
+static void dir_item_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
+ btrfs_header_bytenr(eb), slot, key.objectid, &vaf);
+ va_end(args);
+}
+
+static int check_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_dir_item *di;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 cur = 0;
+
+ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ while (cur < item_size) {
+ u32 name_len;
+ u32 data_len;
+ u32 max_name_len;
+ u32 total_size;
+ u32 name_hash;
+ u8 dir_type;
+
+ /* header itself should not cross item boundary */
+ if (cur + sizeof(*di) > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item header crosses item boundary, have %zu boundary %u",
+ cur + sizeof(*di), item_size);
+ return -EUCLEAN;
+ }
+
+ /* dir type check */
+ dir_type = btrfs_dir_type(leaf, di);
+ if (dir_type >= BTRFS_FT_MAX) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type, have %u expect [0, %u)",
+ dir_type, BTRFS_FT_MAX);
+ return -EUCLEAN;
+ }
+
+ if (key->type == BTRFS_XATTR_ITEM_KEY &&
+ dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type for XATTR key, have %u expect %u",
+ dir_type, BTRFS_FT_XATTR);
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR &&
+ key->type != BTRFS_XATTR_ITEM_KEY) {
+ dir_item_err(root, leaf, slot,
+ "xattr dir type found for non-XATTR key");
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR)
+ max_name_len = XATTR_NAME_MAX;
+ else
+ max_name_len = BTRFS_NAME_LEN;
+
+ /* Name/data length check */
+ name_len = btrfs_dir_name_len(leaf, di);
+ data_len = btrfs_dir_data_len(leaf, di);
+ if (name_len > max_name_len) {
+ dir_item_err(root, leaf, slot,
+ "dir item name len too long, have %u max %u",
+ name_len, max_name_len);
+ return -EUCLEAN;
+ }
+ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
+ dir_item_err(root, leaf, slot,
+ "dir item name and data len too long, have %u max %u",
+ name_len + data_len,
+ BTRFS_MAX_XATTR_SIZE(root->fs_info));
+ return -EUCLEAN;
+ }
+
+ if (data_len && dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "dir item with invalid data len, have %u expect 0",
+ data_len);
+ return -EUCLEAN;
+ }
+
+ total_size = sizeof(*di) + name_len + data_len;
+
+ /* header and name/data should not cross item boundary */
+ if (cur + total_size > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item data crosses item boundary, have %u boundary %u",
+ cur + total_size, item_size);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Special check for XATTR/DIR_ITEM, as key->offset is name
+ * hash, should match its name
+ */
+ if (key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_XATTR_ITEM_KEY) {
+ char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+
+ read_extent_buffer(leaf, namebuf,
+ (unsigned long)(di + 1), name_len);
+ name_hash = btrfs_name_hash(namebuf, name_len);
+ if (key->offset != name_hash) {
+ dir_item_err(root, leaf, slot,
+ "name hash mismatch with key, have 0x%016x expect 0x%016llx",
+ name_hash, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ cur += total_size;
+ di = (struct btrfs_dir_item *)((void *)di + total_size);
+ }
+ return 0;
+}
+
+/*
* Common point to switch the item-specific validation.
*/
static int check_leaf_item(struct btrfs_root *root,
@@ -238,6 +375,11 @@ static int check_leaf_item(struct btrfs_root *root,
case BTRFS_EXTENT_CSUM_KEY:
ret = check_csum_item(root, leaf, key, slot);
break;
+ case BTRFS_DIR_ITEM_KEY:
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ ret = check_dir_item(root, leaf, key, slot);
+ break;
}
return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 7bf9b31561db..afadaadab18e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/list_sort.h>
+#include <linux/iversion.h>
#include "tree-log.h"
#include "disk-io.h"
#include "locking.h"
@@ -1173,19 +1174,15 @@ next:
return 0;
}
-static int extref_get_fields(struct extent_buffer *eb, int slot,
- unsigned long ref_ptr, u32 *namelen, char **name,
- u64 *index, u64 *parent_objectid)
+static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index,
+ u64 *parent_objectid)
{
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ref_ptr;
*namelen = btrfs_inode_extref_name_len(eb, extref);
- if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)&extref->name,
- *namelen))
- return -EIO;
-
*name = kmalloc(*namelen, GFP_NOFS);
if (*name == NULL)
return -ENOMEM;
@@ -1200,19 +1197,14 @@ static int extref_get_fields(struct extent_buffer *eb, int slot,
return 0;
}
-static int ref_get_fields(struct extent_buffer *eb, int slot,
- unsigned long ref_ptr, u32 *namelen, char **name,
- u64 *index)
+static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index)
{
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ref_ptr;
*namelen = btrfs_inode_ref_name_len(eb, ref);
- if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)(ref + 1),
- *namelen))
- return -EIO;
-
*name = kmalloc(*namelen, GFP_NOFS);
if (*name == NULL)
return -ENOMEM;
@@ -1287,8 +1279,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
while (ref_ptr < ref_end) {
if (log_ref_ver) {
- ret = extref_get_fields(eb, slot, ref_ptr, &namelen,
- &name, &ref_index, &parent_objectid);
+ ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index, &parent_objectid);
/*
* parent object can change from one array
* item to another.
@@ -1300,8 +1292,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
goto out;
}
} else {
- ret = ref_get_fields(eb, slot, ref_ptr, &namelen,
- &name, &ref_index);
+ ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index);
}
if (ret)
goto out;
@@ -1835,7 +1827,6 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
@@ -1848,8 +1839,6 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(fs_info, eb, slot, di))
- return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
if (ret < 0)
@@ -2024,11 +2013,6 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(fs_info, eb, slot, di)) {
- ret = -EIO;
- goto out;
- }
-
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
@@ -2109,7 +2093,6 @@ static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const u64 ino)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key search_key;
struct btrfs_path *log_path;
int i;
@@ -2151,11 +2134,6 @@ process_leaf:
u32 this_len = sizeof(*di) + name_len + data_len;
char *name;
- ret = verify_dir_item(fs_info, path->nodes[0], i, di);
- if (ret) {
- ret = -EIO;
- goto out;
- }
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
ret = -ENOMEM;
@@ -3609,7 +3587,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
&token);
- btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_sequence(leaf, item,
+ inode_peek_iversion(inode), &token);
btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
@@ -4572,12 +4551,6 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
this_len = sizeof(*extref) + this_name_len;
}
- ret = btrfs_is_name_len_valid(eb, slot, name_ptr,
- this_name_len);
- if (!ret) {
- ret = -EIO;
- goto out;
- }
if (this_name_len > name_len) {
char *new_name;
@@ -5432,11 +5405,10 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct dentry *parent,
const loff_t start,
const loff_t end,
- int exists_only,
+ int inode_only,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
struct dentry *old_parent = NULL;
int ret = 0;
@@ -5602,7 +5574,7 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)),
- parent, start, end, 0, ctx);
+ parent, start, end, LOG_INODE_ALL, ctx);
dput(parent);
return ret;
@@ -5865,6 +5837,6 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0,
- LLONG_MAX, 1, NULL);
+ LLONG_MAX, LOG_INODE_EXISTS, NULL);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 49810b70afd3..b5036bd69e6a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -145,6 +145,71 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map);
+/*
+ * Device locking
+ * ==============
+ *
+ * There are several mutexes that protect manipulation of devices and low-level
+ * structures like chunks but not block groups, extents or files
+ *
+ * uuid_mutex (global lock)
+ * ------------------------
+ * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
+ * the SCAN_DEV ioctl registration or from mount either implicitly (the first
+ * device) or requested by the device= mount option
+ *
+ * the mutex can be very coarse and can cover long-running operations
+ *
+ * protects: updates to fs_devices counters like missing devices, rw devices,
+ * seeding, structure cloning, openning/closing devices at mount/umount time
+ *
+ * global::fs_devs - add, remove, updates to the global list
+ *
+ * does not protect: manipulation of the fs_devices::devices list!
+ *
+ * btrfs_device::name - renames (write side), read is RCU
+ *
+ * fs_devices::device_list_mutex (per-fs, with RCU)
+ * ------------------------------------------------
+ * protects updates to fs_devices::devices, ie. adding and deleting
+ *
+ * simple list traversal with read-only actions can be done with RCU protection
+ *
+ * may be used to exclude some operations from running concurrently without any
+ * modifications to the list (see write_all_supers)
+ *
+ * volume_mutex
+ * ------------
+ * coarse lock owned by a mounted filesystem; used to exclude some operations
+ * that cannot run in parallel and affect the higher-level properties of the
+ * filesystem like: device add/deleting/resize/replace, or balance
+ *
+ * balance_mutex
+ * -------------
+ * protects balance structures (status, state) and context accessed from
+ * several places (internally, ioctl)
+ *
+ * chunk_mutex
+ * -----------
+ * protects chunks, adding or removing during allocation, trim or when a new
+ * device is added/removed
+ *
+ * cleaner_mutex
+ * -------------
+ * a big lock that is held by the cleaner thread and prevents running subvolume
+ * cleaning together with relocation or delayed iputs
+ *
+ *
+ * Lock nesting
+ * ============
+ *
+ * uuid_mutex
+ * volume_mutex
+ * device_list_mutex
+ * chunk_mutex
+ * balance_mutex
+ */
+
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
struct list_head *btrfs_get_fs_uuids(void)
@@ -180,6 +245,13 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
return fs_devs;
}
+static void free_device(struct btrfs_device *device)
+{
+ rcu_string_free(device->name);
+ bio_put(device->flush_bio);
+ kfree(device);
+}
+
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
@@ -188,9 +260,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
device = list_entry(fs_devices->devices.next,
struct btrfs_device, dev_list);
list_del(&device->dev_list);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
}
kfree(fs_devices);
}
@@ -220,6 +290,11 @@ void btrfs_cleanup_fs_uuids(void)
}
}
+/*
+ * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
+ * Returned struct is not linked onto any lists and must be destroyed using
+ * free_device.
+ */
static struct btrfs_device *__alloc_device(void)
{
struct btrfs_device *dev;
@@ -237,7 +312,6 @@ static struct btrfs_device *__alloc_device(void)
kfree(dev);
return ERR_PTR(-ENOMEM);
}
- bio_get(dev->flush_bio);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_alloc_list);
@@ -245,7 +319,6 @@ static struct btrfs_device *__alloc_device(void)
spin_lock_init(&dev->io_lock);
- spin_lock_init(&dev->reada_lock);
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
@@ -531,45 +604,42 @@ static void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios(device);
}
-
-static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
+/*
+ * Search and remove all stale (devices which are not mounted) devices.
+ * When both inputs are NULL, it will search and release all stale devices.
+ * path: Optional. When provided will it release all unmounted devices
+ * matching this path only.
+ * skip_dev: Optional. Will skip this device when searching for the stale
+ * devices.
+ */
+static void btrfs_free_stale_devices(const char *path,
+ struct btrfs_device *skip_dev)
{
- struct btrfs_fs_devices *fs_devs;
- struct btrfs_device *dev;
-
- if (!cur_dev->name)
- return;
+ struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
+ struct btrfs_device *dev, *tmp_dev;
- list_for_each_entry(fs_devs, &fs_uuids, list) {
- int del = 1;
+ list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, list) {
if (fs_devs->opened)
continue;
- if (fs_devs->seeding)
- continue;
- list_for_each_entry(dev, &fs_devs->devices, dev_list) {
+ list_for_each_entry_safe(dev, tmp_dev,
+ &fs_devs->devices, dev_list) {
+ int not_found = 0;
- if (dev == cur_dev)
+ if (skip_dev && skip_dev == dev)
continue;
- if (!dev->name)
+ if (path && !dev->name)
continue;
- /*
- * Todo: This won't be enough. What if the same device
- * comes back (with new uuid and) with its mapper path?
- * But for now, this does help as mostly an admin will
- * either use mapper or non mapper path throughout.
- */
rcu_read_lock();
- del = strcmp(rcu_str_deref(dev->name),
- rcu_str_deref(cur_dev->name));
+ if (path)
+ not_found = strcmp(rcu_str_deref(dev->name),
+ path);
rcu_read_unlock();
- if (!del)
- break;
- }
+ if (not_found)
+ continue;
- if (!del) {
/* delete the stale device */
if (fs_devs->num_devices == 1) {
btrfs_sysfs_remove_fsid(fs_devs);
@@ -578,38 +648,99 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
} else {
fs_devs->num_devices--;
list_del(&dev->dev_list);
- rcu_string_free(dev->name);
- bio_put(dev->flush_bio);
- kfree(dev);
+ free_device(dev);
}
- break;
}
}
}
+static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
+ struct btrfs_device *device, fmode_t flags,
+ void *holder)
+{
+ struct request_queue *q;
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ struct btrfs_super_block *disk_super;
+ u64 devid;
+ int ret;
+
+ if (device->bdev)
+ return -EINVAL;
+ if (!device->name)
+ return -EINVAL;
+
+ ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+ &bdev, &bh);
+ if (ret)
+ return ret;
+
+ disk_super = (struct btrfs_super_block *)bh->b_data;
+ devid = btrfs_stack_device_id(&disk_super->dev_item);
+ if (devid != device->devid)
+ goto error_brelse;
+
+ if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
+ goto error_brelse;
+
+ device->generation = btrfs_super_generation(disk_super);
+
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ fs_devices->seeding = 1;
+ } else {
+ if (bdev_read_only(bdev))
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ else
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ }
+
+ q = bdev_get_queue(bdev);
+ if (!blk_queue_nonrot(q))
+ fs_devices->rotating = 1;
+
+ device->bdev = bdev;
+ clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ device->mode = flags;
+
+ fs_devices->open_devices++;
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ device->devid != BTRFS_DEV_REPLACE_DEVID) {
+ fs_devices->rw_devices++;
+ list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
+ }
+ brelse(bh);
+
+ return 0;
+
+error_brelse:
+ brelse(bh);
+ blkdev_put(bdev, flags);
+
+ return -EINVAL;
+}
+
/*
* Add new device to list of registered devices
*
* Returns:
- * 1 - first time device is seen
- * 0 - device already known
- * < 0 - error
+ * device pointer which was just added or updated when successful
+ * error pointer when failed
*/
-static noinline int device_list_add(const char *path,
- struct btrfs_super_block *disk_super,
- u64 devid, struct btrfs_fs_devices **fs_devices_ret)
+static noinline struct btrfs_device *device_list_add(const char *path,
+ struct btrfs_super_block *disk_super)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices;
struct rcu_string *name;
- int ret = 0;
u64 found_transid = btrfs_super_generation(disk_super);
+ u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
fs_devices = find_fsid(disk_super->fsid);
if (!fs_devices) {
fs_devices = alloc_fs_devices(disk_super->fsid);
if (IS_ERR(fs_devices))
- return PTR_ERR(fs_devices);
+ return ERR_CAST(fs_devices);
list_add(&fs_devices->list, &fs_uuids);
@@ -621,20 +752,19 @@ static noinline int device_list_add(const char *path,
if (!device) {
if (fs_devices->opened)
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
device = btrfs_alloc_device(NULL, &devid,
disk_super->dev_item.uuid);
if (IS_ERR(device)) {
/* we can safely leave the fs_devices entry around */
- return PTR_ERR(device);
+ return device;
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
- return -ENOMEM;
+ free_device(device);
+ return ERR_PTR(-ENOMEM);
}
rcu_assign_pointer(device->name, name);
@@ -643,8 +773,16 @@ static noinline int device_list_add(const char *path,
fs_devices->num_devices++;
mutex_unlock(&fs_devices->device_list_mutex);
- ret = 1;
device->fs_devices = fs_devices;
+ btrfs_free_stale_devices(path, device);
+
+ if (disk_super->label[0])
+ pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
+ disk_super->label, devid, found_transid, path);
+ else
+ pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
+ disk_super->fsid, devid, found_transid, path);
+
} else if (!device->name || strcmp(device->name->str, path)) {
/*
* When FS is already mounted.
@@ -680,17 +818,17 @@ static noinline int device_list_add(const char *path,
* with larger generation number or the last-in if
* generation are equal.
*/
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
rcu_string_free(device->name);
rcu_assign_pointer(device->name, name);
- if (device->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
fs_devices->missing_devices--;
- device->missing = 0;
+ clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
}
@@ -703,16 +841,9 @@ static noinline int device_list_add(const char *path,
if (!fs_devices->opened)
device->generation = found_transid;
- /*
- * if there is new btrfs on an already registered device,
- * then remove the stale device entry.
- */
- if (ret > 0)
- btrfs_free_stale_device(device);
-
- *fs_devices_ret = fs_devices;
+ fs_devices->total_devices = btrfs_super_num_devices(disk_super);
- return ret;
+ return device;
}
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
@@ -745,8 +876,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
name = rcu_string_strdup(orig_dev->name->str,
GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
goto error;
}
rcu_assign_pointer(device->name, name);
@@ -773,10 +903,12 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
again:
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
- if (device->in_fs_metadata) {
- if (!device->is_tgtdev_for_dev_replace &&
- (!latest_dev ||
- device->generation > latest_dev->generation)) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state)) {
+ if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state) &&
+ (!latest_dev ||
+ device->generation > latest_dev->generation)) {
latest_dev = device;
}
continue;
@@ -793,7 +925,8 @@ again:
* not, which means whether this device is
* used or whether it should be removed.
*/
- if (step == 0 || device->is_tgtdev_for_dev_replace) {
+ if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state)) {
continue;
}
}
@@ -802,17 +935,16 @@ again:
device->bdev = NULL;
fs_devices->open_devices--;
}
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
- device->writeable = 0;
- if (!device->is_tgtdev_for_dev_replace)
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state))
fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
}
if (fs_devices->seed) {
@@ -825,35 +957,25 @@ again:
mutex_unlock(&uuid_mutex);
}
-static void __free_device(struct work_struct *work)
-{
- struct btrfs_device *device;
-
- device = container_of(work, struct btrfs_device, rcu_work);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
-}
-
-static void free_device(struct rcu_head *head)
+static void free_device_rcu(struct rcu_head *head)
{
struct btrfs_device *device;
device = container_of(head, struct btrfs_device, rcu);
-
- INIT_WORK(&device->rcu_work, __free_device);
- schedule_work(&device->rcu_work);
+ free_device(device);
}
static void btrfs_close_bdev(struct btrfs_device *device)
{
- if (device->bdev && device->writeable) {
+ if (!device->bdev)
+ return;
+
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
sync_blockdev(device->bdev);
invalidate_bdev(device->bdev);
}
- if (device->bdev)
- blkdev_put(device->bdev, device->mode);
+ blkdev_put(device->bdev, device->mode);
}
static void btrfs_prepare_close_one_device(struct btrfs_device *device)
@@ -865,13 +987,13 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
if (device->bdev)
fs_devices->open_devices--;
- if (device->writeable &&
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
- if (device->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
new_device = btrfs_alloc_device(NULL, &device->devid,
@@ -917,7 +1039,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
struct btrfs_device, dev_list);
list_del(&device->dev_list);
btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device);
+ call_rcu(&device->rcu, free_device_rcu);
}
WARN_ON(fs_devices->open_devices);
@@ -947,93 +1069,32 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
__btrfs_close_devices(fs_devices);
free_fs_devices(fs_devices);
}
- /*
- * Wait for rcu kworkers under __btrfs_close_devices
- * to finish all blkdev_puts so device is really
- * free when umount is done.
- */
- rcu_barrier();
return ret;
}
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
- struct request_queue *q;
- struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
- struct buffer_head *bh;
- struct btrfs_super_block *disk_super;
- u64 devid;
- int seeding = 1;
int ret = 0;
flags |= FMODE_EXCL;
list_for_each_entry(device, head, dev_list) {
- if (device->bdev)
- continue;
- if (!device->name)
- continue;
-
/* Just open everything we can; ignore failures here */
- if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &bh))
+ if (btrfs_open_one_device(fs_devices, device, flags, holder))
continue;
- disk_super = (struct btrfs_super_block *)bh->b_data;
- devid = btrfs_stack_device_id(&disk_super->dev_item);
- if (devid != device->devid)
- goto error_brelse;
-
- if (memcmp(device->uuid, disk_super->dev_item.uuid,
- BTRFS_UUID_SIZE))
- goto error_brelse;
-
- device->generation = btrfs_super_generation(disk_super);
if (!latest_dev ||
device->generation > latest_dev->generation)
latest_dev = device;
-
- if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
- device->writeable = 0;
- } else {
- device->writeable = !bdev_read_only(bdev);
- seeding = 0;
- }
-
- q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
- if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
-
- device->bdev = bdev;
- device->in_fs_metadata = 0;
- device->mode = flags;
-
- fs_devices->open_devices++;
- if (device->writeable &&
- device->devid != BTRFS_DEV_REPLACE_DEVID) {
- fs_devices->rw_devices++;
- list_add(&device->dev_alloc_list,
- &fs_devices->alloc_list);
- }
- brelse(bh);
- continue;
-
-error_brelse:
- brelse(bh);
- blkdev_put(bdev, flags);
- continue;
}
if (fs_devices->open_devices == 0) {
ret = -EINVAL;
goto out;
}
- fs_devices->seeding = seeding;
fs_devices->opened = 1;
fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
@@ -1117,12 +1178,10 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
+ struct btrfs_device *device;
struct block_device *bdev;
struct page *page;
- int ret = -EINVAL;
- u64 devid;
- u64 transid;
- u64 total_devices;
+ int ret = 0;
u64 bytenr;
/*
@@ -1141,26 +1200,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
goto error;
}
- if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
+ if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
+ ret = -EINVAL;
goto error_bdev_put;
-
- devid = btrfs_stack_device_id(&disk_super->dev_item);
- transid = btrfs_super_generation(disk_super);
- total_devices = btrfs_super_num_devices(disk_super);
-
- ret = device_list_add(path, disk_super, devid, fs_devices_ret);
- if (ret > 0) {
- if (disk_super->label[0]) {
- pr_info("BTRFS: device label %s ", disk_super->label);
- } else {
- pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
- }
-
- pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
- ret = 0;
}
- if (!ret && fs_devices_ret)
- (*fs_devices_ret)->total_devices = total_devices;
+
+ device = device_list_add(path, disk_super);
+ if (IS_ERR(device))
+ ret = PTR_ERR(device);
+ else
+ *fs_devices_ret = device->fs_devices;
btrfs_release_disk_super(page);
@@ -1186,7 +1235,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
*length = 0;
- if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
+ if (start >= device->total_bytes ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
return 0;
path = btrfs_alloc_path();
@@ -1364,7 +1414,8 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
max_hole_size = 0;
again:
- if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
+ if (search_start >= search_end ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -ENOSPC;
goto out;
}
@@ -1571,8 +1622,8 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
- WARN_ON(!device->in_fs_metadata);
- WARN_ON(device->is_tgtdev_for_dev_replace);
+ WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
+ WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1662,7 +1713,7 @@ error:
* the device information is stored in the chunk root
* the btrfs_device struct should be fully filled in
*/
-static int btrfs_add_device(struct btrfs_trans_handle *trans,
+static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
@@ -1818,7 +1869,8 @@ static struct btrfs_device * btrfs_find_next_active_device(
list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
if (next_device != device &&
- !next_device->missing && next_device->bdev)
+ !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
+ && next_device->bdev)
return next_device;
}
@@ -1859,6 +1911,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 num_devices;
int ret = 0;
+ mutex_lock(&fs_info->volume_mutex);
mutex_lock(&uuid_mutex);
num_devices = fs_info->fs_devices->num_devices;
@@ -1878,17 +1931,18 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (ret)
goto out;
- if (device->is_tgtdev_for_dev_replace) {
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
- if (device->writeable && fs_info->fs_devices->rw_devices == 1) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
@@ -1910,7 +1964,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (ret)
goto error_undo;
- device->in_fs_metadata = 0;
+ clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(fs_info, device);
/*
@@ -1930,7 +1984,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
device->fs_devices->num_devices--;
device->fs_devices->total_devices--;
- if (device->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
device->fs_devices->missing_devices--;
btrfs_assign_next_active_device(fs_info, device, NULL);
@@ -1950,11 +2004,11 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
- if (device->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(device->bdev, device->name->str);
btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device);
+ call_rcu(&device->rcu, free_device_rcu);
if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
@@ -1973,10 +2027,11 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
out:
mutex_unlock(&uuid_mutex);
+ mutex_unlock(&fs_info->volume_mutex);
return ret;
error_undo:
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_info->fs_devices->alloc_list);
@@ -2004,10 +2059,10 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
list_del_rcu(&srcdev->dev_list);
list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--;
- if (srcdev->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
fs_devices->missing_devices--;
- if (srcdev->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
fs_devices->rw_devices--;
if (srcdev->bdev)
@@ -2019,13 +2074,13 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
- if (srcdev->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
/* zero out the old super if it is writable */
btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
}
btrfs_close_bdev(srcdev);
- call_rcu(&srcdev->rcu, free_device);
+ call_rcu(&srcdev->rcu, free_device_rcu);
/* if this is no devs we rather delete the fs_devices */
if (!fs_devices->num_devices) {
@@ -2084,7 +2139,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
btrfs_close_bdev(tgtdev);
- call_rcu(&tgtdev->rcu, free_device);
+ call_rcu(&tgtdev->rcu, free_device_rcu);
}
static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
@@ -2129,7 +2184,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
* is held by the caller.
*/
list_for_each_entry(tmp, devices, dev_list) {
- if (tmp->in_fs_metadata && !tmp->bdev) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &tmp->dev_state) && !tmp->bdev) {
*device = tmp;
break;
}
@@ -2358,26 +2414,19 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
ret = -ENOMEM;
- goto error;
+ goto error_free_device;
}
rcu_assign_pointer(device->name, name);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
ret = PTR_ERR(trans);
- goto error;
+ goto error_free_device;
}
q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
- device->writeable = 1;
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = trans->transid;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
@@ -2388,8 +2437,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
device->commit_total_bytes = device->total_bytes;
device->fs_info = fs_info;
device->bdev = bdev;
- device->in_fs_metadata = 1;
- device->is_tgtdev_for_dev_replace = 0;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2450,7 +2499,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- ret = btrfs_add_device(trans, fs_info, device);
+ ret = btrfs_add_dev_item(trans, fs_info, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
@@ -2511,9 +2560,8 @@ error_trans:
sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+error_free_device:
+ free_device(device);
error:
blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev && !unlocked) {
@@ -2528,7 +2576,6 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
- struct request_queue *q;
struct btrfs_device *device;
struct block_device *bdev;
struct list_head *devices;
@@ -2579,18 +2626,14 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
ret = -ENOMEM;
goto error;
}
rcu_assign_pointer(device->name, name);
- q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- device->writeable = 1;
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = 0;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
@@ -2603,8 +2646,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
- device->in_fs_metadata = 1;
- device->is_tgtdev_for_dev_replace = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2632,7 +2675,7 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
tgtdev->io_align = sectorsize;
tgtdev->sector_size = sectorsize;
tgtdev->fs_info = fs_info;
- tgtdev->in_fs_metadata = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &tgtdev->dev_state);
}
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
@@ -2690,7 +2733,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
u64 old_total;
u64 diff;
- if (!device->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return -EACCES;
new_size = round_down(new_size, fs_info->sectorsize);
@@ -2700,7 +2743,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
if (new_size <= device->total_bytes ||
- device->is_tgtdev_for_dev_replace) {
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
mutex_unlock(&fs_info->chunk_mutex);
return -EINVAL;
}
@@ -3044,6 +3087,48 @@ error:
return ret;
}
+/*
+ * return 1 : allocate a data chunk successfully,
+ * return <0: errors during allocating a data chunk,
+ * return 0 : no need to allocate a data chunk.
+ */
+static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
+ u64 chunk_offset)
+{
+ struct btrfs_block_group_cache *cache;
+ u64 bytes_used;
+ u64 chunk_type;
+
+ cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ ASSERT(cache);
+ chunk_type = cache->flags;
+ btrfs_put_block_group(cache);
+
+ if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
+ spin_lock(&fs_info->data_sinfo->lock);
+ bytes_used = fs_info->data_sinfo->bytes_used;
+ spin_unlock(&fs_info->data_sinfo->lock);
+
+ if (!bytes_used) {
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
+ BTRFS_BLOCK_GROUP_DATA);
+ btrfs_end_transaction(trans);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int insert_balance_item(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl)
{
@@ -3502,7 +3587,6 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
u32 count_meta = 0;
u32 count_sys = 0;
int chunk_reserved = 0;
- u64 bytes_used = 0;
/* step one make some room on all the devices */
devices = &fs_info->fs_devices->devices;
@@ -3510,10 +3594,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
old_size = btrfs_device_get_total_bytes(device);
size_to_free = div_factor(old_size, 1);
size_to_free = min_t(u64, size_to_free, SZ_1M);
- if (!device->writeable ||
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) ||
btrfs_device_get_total_bytes(device) -
btrfs_device_get_bytes_used(device) > size_to_free ||
- device->is_tgtdev_for_dev_replace)
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
ret = btrfs_shrink_device(device, old_size - size_to_free);
@@ -3661,28 +3745,21 @@ again:
goto loop;
}
- ASSERT(fs_info->data_sinfo);
- spin_lock(&fs_info->data_sinfo->lock);
- bytes_used = fs_info->data_sinfo->bytes_used;
- spin_unlock(&fs_info->data_sinfo->lock);
-
- if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
- !chunk_reserved && !bytes_used) {
- trans = btrfs_start_transaction(chunk_root, 0);
- if (IS_ERR(trans)) {
- mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- ret = PTR_ERR(trans);
- goto error;
- }
-
- ret = btrfs_force_chunk_alloc(trans, fs_info,
- BTRFS_BLOCK_GROUP_DATA);
- btrfs_end_transaction(trans);
+ if (!chunk_reserved) {
+ /*
+ * We may be relocating the only data chunk we have,
+ * which could potentially end up with losing data's
+ * raid profile, so lets allocate an empty one in
+ * advance.
+ */
+ ret = btrfs_may_alloc_data_chunk(fs_info,
+ found_key.offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
+ } else if (ret == 1) {
+ chunk_reserved = 1;
}
- chunk_reserved = 1;
}
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
@@ -4381,7 +4458,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
new_size = round_down(new_size, fs_info->sectorsize);
diff = round_down(old_size - new_size, fs_info->sectorsize);
- if (device->is_tgtdev_for_dev_replace)
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
return -EINVAL;
path = btrfs_alloc_path();
@@ -4393,7 +4470,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, new_size);
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
atomic64_sub(diff, &fs_info->free_chunk_space);
}
@@ -4445,6 +4522,18 @@ again:
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
+ /*
+ * We may be relocating the only data chunk we have,
+ * which could potentially end up with losing data's
+ * raid profile, so lets allocate an empty one in
+ * advance.
+ */
+ ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
+ if (ret < 0) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ goto done;
+ }
+
ret = btrfs_relocate_chunk(fs_info, chunk_offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
@@ -4518,7 +4607,7 @@ done:
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
- if (device->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
device->fs_devices->total_rw_bytes += diff;
atomic64_add(diff, &fs_info->free_chunk_space);
mutex_unlock(&fs_info->chunk_mutex);
@@ -4678,14 +4767,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 max_avail;
u64 dev_offset;
- if (!device->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
WARN(1, KERN_ERR
"BTRFS: read-only device in alloc_list\n");
continue;
}
- if (!device->in_fs_metadata ||
- device->is_tgtdev_for_dev_replace)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
if (device->total_bytes > device->bytes_used)
@@ -5033,12 +5123,13 @@ int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
- if (map->stripes[i].dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING,
+ &map->stripes[i].dev->dev_state)) {
miss_ndevs++;
continue;
}
-
- if (!map->stripes[i].dev->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
+ &map->stripes[i].dev->dev_state)) {
readonly = 1;
goto end;
}
@@ -5104,7 +5195,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
ret = 2;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- ret = 3;
+ /*
+ * There could be two corrupted data stripes, we need
+ * to loop retry in order to rebuild the correct data.
+ *
+ * Fail a stripe at a time on every retry except the
+ * stripe under reconstruction.
+ */
+ ret = map->num_stripes;
else
ret = 1;
free_extent_map(em);
@@ -6004,15 +6102,14 @@ static void btrfs_end_bio(struct bio *bio)
dev = bbio->stripes[stripe_index].dev;
if (dev->bdev) {
if (bio_op(bio) == REQ_OP_WRITE)
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_READ_ERRS);
if (bio->bi_opf & REQ_PREFLUSH)
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
- btrfs_dev_stat_print_on_error(dev);
}
}
}
@@ -6062,16 +6159,15 @@ static noinline void btrfs_schedule_bio(struct btrfs_device *device,
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
- if (device->missing || !device->bdev) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
+ !device->bdev) {
bio_io_error(bio);
return;
}
/* don't bother with additional async steps for reads, right now */
if (bio_op(bio) == REQ_OP_READ) {
- bio_get(bio);
btrfsic_submit_bio(bio);
- bio_put(bio);
return;
}
@@ -6208,7 +6304,8 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev ||
- (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
+ (bio_op(first_bio) == REQ_OP_WRITE &&
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
continue;
}
@@ -6257,7 +6354,7 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
device->fs_devices = fs_devices;
fs_devices->num_devices++;
- device->missing = 1;
+ set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
fs_devices->missing_devices++;
return device;
@@ -6273,8 +6370,8 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
* is generated.
*
* Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
- * on error. Returned struct is not linked onto any lists and can be
- * destroyed with kfree() right away.
+ * on error. Returned struct is not linked onto any lists and must be
+ * destroyed with free_device.
*/
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
@@ -6297,8 +6394,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
ret = find_next_devid(fs_info, &tmp);
if (ret) {
- bio_put(dev->flush_bio);
- kfree(dev);
+ free_device(dev);
return ERR_PTR(ret);
}
}
@@ -6477,7 +6573,9 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
- map->stripes[i].dev->in_fs_metadata = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &(map->stripes[i].dev->dev_state));
+
}
write_lock(&map_tree->map_tree.lock);
@@ -6506,7 +6604,7 @@ static void fill_device_from_item(struct extent_buffer *leaf,
device->io_width = btrfs_device_io_width(leaf, dev_item);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
- device->is_tgtdev_for_dev_replace = 0;
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
ptr = btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
@@ -6618,7 +6716,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
dev_uuid, false);
}
- if(!device->bdev && !device->missing) {
+ if (!device->bdev &&
+ !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
@@ -6626,12 +6725,13 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
* device->missing to one here
*/
device->fs_devices->missing_devices++;
- device->missing = 1;
+ set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
/* Move the device to its own fs_devices */
if (device->fs_devices != fs_devices) {
- ASSERT(device->missing);
+ ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
+ &device->dev_state));
list_move(&device->dev_list, &fs_devices->devices);
device->fs_devices->num_devices--;
@@ -6645,15 +6745,16 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
}
if (device->fs_devices != fs_info->fs_devices) {
- BUG_ON(device->writeable);
+ BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
return -EINVAL;
}
fill_device_from_item(leaf, dev_item, device);
- device->in_fs_metadata = 1;
- if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
device->fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes - device->bytes_used,
&fs_info->free_chunk_space);
@@ -6785,10 +6886,13 @@ out_short_read:
/*
* Check if all chunks in the fs are OK for read-write degraded mount
*
+ * If the @failing_dev is specified, it's accounted as missing.
+ *
* Return true if all chunks meet the minimal RW mount requirements.
* Return false if any chunk doesn't meet the minimal RW mount requirements.
*/
-bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info)
+bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *failing_dev)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
@@ -6816,12 +6920,16 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info)
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *dev = map->stripes[i].dev;
- if (!dev || !dev->bdev || dev->missing ||
+ if (!dev || !dev->bdev ||
+ test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
dev->last_flush_error)
missing++;
+ else if (failing_dev && failing_dev == dev)
+ missing++;
}
if (missing > max_tolerated) {
- btrfs_warn(fs_info,
+ if (!failing_dev)
+ btrfs_warn(fs_info,
"chunk %llu missing %d devices, max tolerance is %d for writeable mount",
em->start, missing, max_tolerated);
free_extent_map(em);
@@ -7092,10 +7200,24 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
+ stats_cnt = atomic_read(&device->dev_stats_ccnt);
+ if (!device->dev_stats_valid || stats_cnt == 0)
continue;
- stats_cnt = atomic_read(&device->dev_stats_ccnt);
+
+ /*
+ * There is a LOAD-LOAD control dependency between the value of
+ * dev_stats_ccnt and updating the on-disk values which requires
+ * reading the in-memory counters. Such control dependencies
+ * require explicit read memory barriers.
+ *
+ * This memory barriers pairs with smp_mb__before_atomic in
+ * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
+ * barrier implied by atomic_xchg in
+ * btrfs_dev_stats_read_and_reset
+ */
+ smp_rmb();
+
ret = update_dev_stat_item(trans, fs_info, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ff15208344a7..28c28eeadff3 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -47,6 +47,12 @@ struct btrfs_pending_bios {
#define btrfs_device_data_ordered_init(device) do { } while (0)
#endif
+#define BTRFS_DEV_STATE_WRITEABLE (0)
+#define BTRFS_DEV_STATE_IN_FS_METADATA (1)
+#define BTRFS_DEV_STATE_MISSING (2)
+#define BTRFS_DEV_STATE_REPLACE_TGT (3)
+#define BTRFS_DEV_STATE_FLUSH_SENT (4)
+
struct btrfs_device {
struct list_head dev_list;
struct list_head dev_alloc_list;
@@ -69,11 +75,7 @@ struct btrfs_device {
/* the mode sent to blkdev_get */
fmode_t mode;
- int writeable;
- int in_fs_metadata;
- int missing;
- int can_discard;
- int is_tgtdev_for_dev_replace;
+ unsigned long dev_state;
blk_status_t last_flush_error;
int flush_bio_sent;
@@ -129,14 +131,12 @@ struct btrfs_device {
struct completion flush_wait;
/* per-device scrub information */
- struct scrub_ctx *scrub_device;
+ struct scrub_ctx *scrub_ctx;
struct btrfs_work work;
struct rcu_head rcu;
- struct work_struct rcu_work;
/* readahead state */
- spinlock_t reada_lock;
atomic_t reada_in_flight;
u64 reada_next;
struct reada_zone *reada_curr_zone;
@@ -489,15 +489,16 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 chunk_offset);
-static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
-{
- return atomic_read(&dev->dev_stats_ccnt);
-}
-
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
{
atomic_inc(dev->dev_stat_values + index);
+ /*
+ * This memory barrier orders stores updating statistics before stores
+ * updating dev_stats_ccnt.
+ *
+ * It pairs with smp_rmb() in btrfs_run_dev_stats().
+ */
smp_mb__before_atomic();
atomic_inc(&dev->dev_stats_ccnt);
}
@@ -514,7 +515,13 @@ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
int ret;
ret = atomic_xchg(dev->dev_stat_values + index, 0);
- smp_mb__before_atomic();
+ /*
+ * atomic_xchg implies a full memory barriers as per atomic_t.txt:
+ * - RMW operations that have a return value are fully ordered;
+ *
+ * This implicit memory barriers is paired with the smp_rmb in
+ * btrfs_run_dev_stats
+ */
atomic_inc(&dev->dev_stats_ccnt);
return ret;
}
@@ -523,6 +530,12 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
int index, unsigned long val)
{
atomic_set(dev->dev_stat_values + index, val);
+ /*
+ * This memory barrier orders stores updating statistics before stores
+ * updating dev_stats_ccnt.
+ *
+ * It pairs with smp_rmb() in btrfs_run_dev_stats().
+ */
smp_mb__before_atomic();
atomic_inc(&dev->dev_stats_ccnt);
}
@@ -540,7 +553,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct list_head *btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
-
-bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info);
+bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *failing_dev);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 2c7e53f9ff1b..de7d072c78ef 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -23,6 +23,7 @@
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "btrfs_inode.h"
#include "transaction.h"
@@ -267,7 +268,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
@@ -336,11 +336,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
- if (verify_dir_item(fs_info, leaf, slot, di)) {
- ret = -EIO;
- goto err;
- }
-
total_size += name_len + 1;
/*
* We are just looking for how big our buffer needs to
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 17f2dd8fddb8..01a4eab602a3 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -43,6 +43,8 @@ struct workspace {
size_t size;
char *buf;
struct list_head list;
+ ZSTD_inBuffer in_buf;
+ ZSTD_outBuffer out_buf;
};
static void zstd_free_workspace(struct list_head *ws)
@@ -94,8 +96,6 @@ static int zstd_compress_pages(struct list_head *ws,
int nr_pages = 0;
struct page *in_page = NULL; /* The current page to read */
struct page *out_page = NULL; /* The current page to write to */
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long tot_in = 0;
unsigned long tot_out = 0;
unsigned long len = *total_out;
@@ -118,9 +118,9 @@ static int zstd_compress_pages(struct list_head *ws,
/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- in_buf.src = kmap(in_page);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
@@ -130,14 +130,15 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
while (1) {
size_t ret2;
- ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -146,22 +147,22 @@ static int zstd_compress_pages(struct list_head *ws,
}
/* Check to see if we are making it bigger */
- if (tot_in + in_buf.pos > 8192 &&
- tot_in + in_buf.pos <
- tot_out + out_buf.pos) {
+ if (tot_in + workspace->in_buf.pos > 8192 &&
+ tot_in + workspace->in_buf.pos <
+ tot_out + workspace->out_buf.pos) {
ret = -E2BIG;
goto out;
}
/* We've reached the end of our output range */
- if (out_buf.pos >= max_out) {
- tot_out += out_buf.pos;
+ if (workspace->out_buf.pos >= max_out) {
+ tot_out += workspace->out_buf.pos;
ret = -E2BIG;
goto out;
}
/* Check if we need more output space */
- if (out_buf.pos == out_buf.size) {
+ if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
@@ -176,19 +177,20 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out,
+ PAGE_SIZE);
}
/* We've reached the end of the input */
- if (in_buf.pos >= len) {
- tot_in += in_buf.pos;
+ if (workspace->in_buf.pos >= len) {
+ tot_in += workspace->in_buf.pos;
break;
}
/* Check if we need more input */
- if (in_buf.pos == in_buf.size) {
+ if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
kunmap(in_page);
put_page(in_page);
@@ -196,15 +198,15 @@ static int zstd_compress_pages(struct list_head *ws,
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- in_buf.src = kmap(in_page);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
}
while (1) {
size_t ret2;
- ret2 = ZSTD_endStream(stream, &out_buf);
+ ret2 = ZSTD_endStream(stream, &workspace->out_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_endStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -212,11 +214,11 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
if (ret2 == 0) {
- tot_out += out_buf.pos;
+ tot_out += workspace->out_buf.pos;
break;
}
- if (out_buf.pos >= max_out) {
- tot_out += out_buf.pos;
+ if (workspace->out_buf.pos >= max_out) {
+ tot_out += workspace->out_buf.pos;
ret = -E2BIG;
goto out;
}
@@ -235,9 +237,9 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
if (tot_out >= tot_in) {
@@ -273,8 +275,6 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
stream = ZSTD_initDStream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
@@ -284,18 +284,19 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
- in_buf.src = kmap(pages_in[page_in_index]);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
- out_buf.dst = workspace->buf;
- out_buf.pos = 0;
- out_buf.size = PAGE_SIZE;
+ workspace->out_buf.dst = workspace->buf;
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = PAGE_SIZE;
while (1) {
size_t ret2;
- ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -303,38 +304,38 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
buf_start = total_out;
- total_out += out_buf.pos;
- out_buf.pos = 0;
+ total_out += workspace->out_buf.pos;
+ workspace->out_buf.pos = 0;
- ret = btrfs_decompress_buf2page(out_buf.dst, buf_start,
- total_out, disk_start, orig_bio);
+ ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
+ buf_start, total_out, disk_start, orig_bio);
if (ret == 0)
break;
- if (in_buf.pos >= srclen)
+ if (workspace->in_buf.pos >= srclen)
break;
/* Check if we've hit the end of a frame */
if (ret2 == 0)
break;
- if (in_buf.pos == in_buf.size) {
+ if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap(pages_in[page_in_index++]);
if (page_in_index >= total_pages_in) {
- in_buf.src = NULL;
+ workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
- in_buf.src = kmap(pages_in[page_in_index]);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
}
ret = 0;
zero_fill_bio(orig_bio);
done:
- if (in_buf.src)
+ if (workspace->in_buf.src)
kunmap(pages_in[page_in_index]);
return ret;
}
@@ -348,8 +349,6 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
ZSTD_DStream *stream;
int ret = 0;
size_t ret2;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long total_out = 0;
unsigned long pg_offset = 0;
char *kaddr;
@@ -364,16 +363,17 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
destlen = min_t(size_t, destlen, PAGE_SIZE);
- in_buf.src = data_in;
- in_buf.pos = 0;
- in_buf.size = srclen;
+ workspace->in_buf.src = data_in;
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = srclen;
- out_buf.dst = workspace->buf;
- out_buf.pos = 0;
- out_buf.size = PAGE_SIZE;
+ workspace->out_buf.dst = workspace->buf;
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = PAGE_SIZE;
ret2 = 1;
- while (pg_offset < destlen && in_buf.pos < in_buf.size) {
+ while (pg_offset < destlen
+ && workspace->in_buf.pos < workspace->in_buf.size) {
unsigned long buf_start;
unsigned long buf_offset;
unsigned long bytes;
@@ -384,7 +384,8 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
ret = -EIO;
goto finish;
}
- ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -393,8 +394,8 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
}
buf_start = total_out;
- total_out += out_buf.pos;
- out_buf.pos = 0;
+ total_out += workspace->out_buf.pos;
+ workspace->out_buf.pos = 0;
if (total_out <= start_byte)
continue;
@@ -405,10 +406,11 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
buf_offset = 0;
bytes = min_t(unsigned long, destlen - pg_offset,
- out_buf.size - buf_offset);
+ workspace->out_buf.size - buf_offset);
kaddr = kmap_atomic(dest_page);
- memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes);
+ memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset,
+ bytes);
kunmap_atomic(kaddr);
pg_offset += bytes;
diff --git a/fs/buffer.c b/fs/buffer.c
index 0736a6a2e2f0..9a73924db22f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -53,13 +53,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
-{
- bh->b_end_io = handler;
- bh->b_private = private;
-}
-EXPORT_SYMBOL(init_buffer);
-
inline void touch_buffer(struct buffer_head *bh)
{
trace_block_touch_buffer(bh);
@@ -922,7 +915,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
do {
if (!buffer_mapped(bh)) {
- init_buffer(bh, NULL, NULL);
+ bh->b_end_io = NULL;
+ bh->b_private = NULL;
bh->b_bdev = bdev;
bh->b_blocknr = block;
if (uptodate)
@@ -3014,7 +3008,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
void guard_bio_eod(int op, struct bio *bio)
{
sector_t maxsector;
- struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
unsigned truncated_bytes;
struct hd_struct *part;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 1ee54ffd3a24..7edbd0679952 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -31,7 +31,7 @@ static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
loff_t *);
static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int cachefiles_daemon_poll(struct file *,
+static __poll_t cachefiles_daemon_poll(struct file *,
struct poll_table_struct *);
static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
@@ -291,11 +291,11 @@ found_command:
* poll for culling state
* - use POLLOUT to indicate culling state
*/
-static unsigned int cachefiles_daemon_poll(struct file *file,
+static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
{
struct cachefiles_cache *cache = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll);
mask = 0;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 264e9bf83ff3..52095f473464 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -34,7 +34,4 @@ config CEPH_FS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index d5b2e12b5d02..687da62daf4e 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -108,14 +108,13 @@ config CIFS_XATTR
depends on CIFS
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details). CIFS maps the name of
- extended attributes beginning with the user namespace prefix
- to SMB/CIFS EAs. EAs are stored on Windows servers without the
- user namespace prefix, but their names are seen by Linux cifs clients
- prefaced by the user namespace prefix. The system namespace
- (used by some filesystems to store ACLs) is not supported at
- this time.
+ the kernel or by users (see the attr(5) manual page for details).
+ CIFS maps the name of extended attributes beginning with the user
+ namespace prefix to SMB/CIFS EAs. EAs are stored on Windows
+ servers without the user namespace prefix, but their names are
+ seen by Linux cifs clients prefaced by the user namespace prefix.
+ The system namespace (used by some filesystems to store ACLs) is
+ not supported at this time.
If unsure, say Y.
@@ -196,6 +195,14 @@ config CIFS_SMB311
This dialect includes improved security negotiation features.
If unsure, say N
+config CIFS_SMB_DIRECT
+ bool "SMB Direct support (Experimental)"
+ depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y
+ help
+ Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
+ SMB Direct allows transferring SMB packets over RDMA. If unsure,
+ say N.
+
config CIFS_FSCACHE
bool "Provide CIFS client caching support"
depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 7134f182720b..7e4a1e2f0696 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -19,3 +19,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index cbb9534b89b4..c7a863219fa3 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -30,6 +30,9 @@
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifsfs.h"
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#include "smbdirect.h"
+#endif
void
cifs_dump_mem(char *label, void *data, int length)
@@ -107,6 +110,32 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
}
#ifdef CONFIG_PROC_FS
+static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
+{
+ __u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+
+ seq_printf(m, "%s Mounts: %d ", tcon->treeName, tcon->tc_count);
+ if (tcon->nativeFileSystem)
+ seq_printf(m, "Type: %s ", tcon->nativeFileSystem);
+ seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d",
+ le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
+ le32_to_cpu(tcon->fsAttrInfo.Attributes),
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
+ tcon->tidStatus);
+ if (dev_type == FILE_DEVICE_DISK)
+ seq_puts(m, " type: DISK ");
+ else if (dev_type == FILE_DEVICE_CD_ROM)
+ seq_puts(m, " type: CDROM ");
+ else
+ seq_printf(m, " type: %d ", dev_type);
+ if (tcon->ses->server->ops->dump_share_caps)
+ tcon->ses->server->ops->dump_share_caps(m, tcon);
+
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_putc(m, '\n');
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct list_head *tmp1, *tmp2, *tmp3;
@@ -115,7 +144,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
int i, j;
- __u32 dev_type;
seq_puts(m,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -152,6 +180,72 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (!server->rdma)
+ goto skip_rdma;
+
+ seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+ "transport status: %x",
+ server->smbd_conn->protocol,
+ server->smbd_conn->transport_status);
+ seq_printf(m, "\nConn receive_credit_max: %x "
+ "send_credit_target: %x max_send_size: %x",
+ server->smbd_conn->receive_credit_max,
+ server->smbd_conn->send_credit_target,
+ server->smbd_conn->max_send_size);
+ seq_printf(m, "\nConn max_fragmented_recv_size: %x "
+ "max_fragmented_send_size: %x max_receive_size:%x",
+ server->smbd_conn->max_fragmented_recv_size,
+ server->smbd_conn->max_fragmented_send_size,
+ server->smbd_conn->max_receive_size);
+ seq_printf(m, "\nConn keep_alive_interval: %x "
+ "max_readwrite_size: %x rdma_readwrite_threshold: %x",
+ server->smbd_conn->keep_alive_interval,
+ server->smbd_conn->max_readwrite_size,
+ server->smbd_conn->rdma_readwrite_threshold);
+ seq_printf(m, "\nDebug count_get_receive_buffer: %x "
+ "count_put_receive_buffer: %x count_send_empty: %x",
+ server->smbd_conn->count_get_receive_buffer,
+ server->smbd_conn->count_put_receive_buffer,
+ server->smbd_conn->count_send_empty);
+ seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
+ "count_enqueue_reassembly_queue: %x "
+ "count_dequeue_reassembly_queue: %x "
+ "fragment_reassembly_remaining: %x "
+ "reassembly_data_length: %x "
+ "reassembly_queue_length: %x",
+ server->smbd_conn->count_reassembly_queue,
+ server->smbd_conn->count_enqueue_reassembly_queue,
+ server->smbd_conn->count_dequeue_reassembly_queue,
+ server->smbd_conn->fragment_reassembly_remaining,
+ server->smbd_conn->reassembly_data_length,
+ server->smbd_conn->reassembly_queue_length);
+ seq_printf(m, "\nCurrent Credits send_credits: %x "
+ "receive_credits: %x receive_credit_target: %x",
+ atomic_read(&server->smbd_conn->send_credits),
+ atomic_read(&server->smbd_conn->receive_credits),
+ server->smbd_conn->receive_credit_target);
+ seq_printf(m, "\nPending send_pending: %x send_payload_pending:"
+ " %x smbd_send_pending: %x smbd_recv_pending: %x",
+ atomic_read(&server->smbd_conn->send_pending),
+ atomic_read(&server->smbd_conn->send_payload_pending),
+ server->smbd_conn->smbd_send_pending,
+ server->smbd_conn->smbd_recv_pending);
+ seq_printf(m, "\nReceive buffers count_receive_queue: %x "
+ "count_empty_packet_queue: %x",
+ server->smbd_conn->count_receive_queue,
+ server->smbd_conn->count_empty_packet_queue);
+ seq_printf(m, "\nMR responder_resources: %x "
+ "max_frmr_depth: %x mr_type: %x",
+ server->smbd_conn->responder_resources,
+ server->smbd_conn->max_frmr_depth,
+ server->smbd_conn->mr_type);
+ seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x",
+ atomic_read(&server->smbd_conn->mr_ready_count),
+ atomic_read(&server->smbd_conn->mr_used_count));
+skip_rdma:
+#endif
seq_printf(m, "\nNumber of credits: %d", server->credits);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
@@ -176,6 +270,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
+ if (server->rdma)
+ seq_printf(m, "RDMA\n\t");
seq_printf(m, "TCP status: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
server->tcpStatus, server->srv_count,
@@ -189,35 +285,19 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_puts(m, "\n\tShares:");
j = 0;
+
+ seq_printf(m, "\n\t%d) IPC: ", j);
+ if (ses->tcon_ipc)
+ cifs_debug_tcon(m, ses->tcon_ipc);
+ else
+ seq_puts(m, "none\n");
+
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3, struct cifs_tcon,
tcon_list);
++j;
- dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
- seq_printf(m, "\n\t%d) %s Mounts: %d ", j,
- tcon->treeName, tcon->tc_count);
- if (tcon->nativeFileSystem) {
- seq_printf(m, "Type: %s ",
- tcon->nativeFileSystem);
- }
- seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x"
- "\n\tPathComponentMax: %d Status: %d",
- le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
- le32_to_cpu(tcon->fsAttrInfo.Attributes),
- le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
- tcon->tidStatus);
- if (dev_type == FILE_DEVICE_DISK)
- seq_puts(m, " type: DISK ");
- else if (dev_type == FILE_DEVICE_CD_ROM)
- seq_puts(m, " type: CDROM ");
- else
- seq_printf(m, " type: %d ", dev_type);
- if (server->ops->dump_share_caps)
- server->ops->dump_share_caps(m, tcon);
-
- if (tcon->need_reconnect)
- seq_puts(m, "\tDISCONNECTED ");
- seq_putc(m, '\n');
+ seq_printf(m, "\n\t%d) ", j);
+ cifs_debug_tcon(m, tcon);
}
seq_puts(m, "\n\tMIDs:\n");
@@ -374,6 +454,45 @@ static const struct file_operations cifs_stats_proc_fops = {
};
#endif /* STATS */
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#define PROC_FILE_DEFINE(name) \
+static ssize_t name##_write(struct file *file, const char __user *buffer, \
+ size_t count, loff_t *ppos) \
+{ \
+ int rc; \
+ rc = kstrtoint_from_user(buffer, count, 10, & name); \
+ if (rc) \
+ return rc; \
+ return count; \
+} \
+static int name##_proc_show(struct seq_file *m, void *v) \
+{ \
+ seq_printf(m, "%d\n", name ); \
+ return 0; \
+} \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, name##_proc_show, NULL); \
+} \
+\
+static const struct file_operations cifs_##name##_proc_fops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = name##_write, \
+}
+
+PROC_FILE_DEFINE(rdma_readwrite_threshold);
+PROC_FILE_DEFINE(smbd_max_frmr_depth);
+PROC_FILE_DEFINE(smbd_keep_alive_interval);
+PROC_FILE_DEFINE(smbd_max_receive_size);
+PROC_FILE_DEFINE(smbd_max_fragmented_recv_size);
+PROC_FILE_DEFINE(smbd_max_send_size);
+PROC_FILE_DEFINE(smbd_send_credit_target);
+PROC_FILE_DEFINE(smbd_receive_credit_max);
+#endif
+
static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
@@ -401,6 +520,24 @@ cifs_proc_init(void)
&cifs_security_flags_proc_fops);
proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
&cifs_lookup_cache_proc_fops);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ proc_create("rdma_readwrite_threshold", 0, proc_fs_cifs,
+ &cifs_rdma_readwrite_threshold_proc_fops);
+ proc_create("smbd_max_frmr_depth", 0, proc_fs_cifs,
+ &cifs_smbd_max_frmr_depth_proc_fops);
+ proc_create("smbd_keep_alive_interval", 0, proc_fs_cifs,
+ &cifs_smbd_keep_alive_interval_proc_fops);
+ proc_create("smbd_max_receive_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_receive_size_proc_fops);
+ proc_create("smbd_max_fragmented_recv_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_fragmented_recv_size_proc_fops);
+ proc_create("smbd_max_send_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_send_size_proc_fops);
+ proc_create("smbd_send_credit_target", 0, proc_fs_cifs,
+ &cifs_smbd_send_credit_target_proc_fops);
+ proc_create("smbd_receive_credit_max", 0, proc_fs_cifs,
+ &cifs_smbd_receive_credit_max_proc_fops);
+#endif
}
void
@@ -418,6 +555,16 @@ cifs_proc_clean(void)
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs);
+ remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs);
+ remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs);
+ remove_proc_entry("smbd_max_receive_size", proc_fs_cifs);
+ remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs);
+ remove_proc_entry("smbd_max_send_size", proc_fs_cifs);
+ remove_proc_entry("smbd_send_credit_target", proc_fs_cifs);
+ remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs);
+#endif
remove_proc_entry("fs/cifs", NULL);
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index b98436f5c7c7..13a8a77322c9 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1125,7 +1125,7 @@ out:
return rc;
}
-/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
+/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
struct inode *inode, const char *path,
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 68abbb0db608..f2b0a7f124da 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
{
int i;
int rc;
- char password_with_pad[CIFS_ENCPWD_SIZE];
+ char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
- memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 31b7565b1617..a7be591d8e18 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -327,6 +327,8 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
default:
seq_puts(s, "(unknown)");
}
+ if (server->rdma)
+ seq_puts(s, ",rdma");
}
static void
@@ -1068,6 +1070,7 @@ const struct file_operations cifs_file_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1086,6 +1089,7 @@ const struct file_operations cifs_file_strict_ops = {
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1105,6 +1109,7 @@ const struct file_operations cifs_file_direct_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
@@ -1122,6 +1127,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1139,6 +1145,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1157,6 +1164,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 5a10e566f0e6..013ba2aed8d9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -149,5 +149,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.10"
+#define CIFS_VERSION "2.11"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b16583594d1a..48f7c197cd2d 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -64,8 +64,8 @@
#define RFC1001_NAME_LEN 15
#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
-/* currently length of NIP6_FMT */
-#define SERVER_NAME_LENGTH 40
+/* maximum length of ip addr as a string (including ipv6 and sctp) */
+#define SERVER_NAME_LENGTH 80
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
/* echo interval in seconds */
@@ -230,8 +230,14 @@ struct smb_version_operations {
__u64 (*get_next_mid)(struct TCP_Server_Info *);
/* data offset from read response message */
unsigned int (*read_data_offset)(char *);
- /* data length from read response message */
- unsigned int (*read_data_length)(char *);
+ /*
+ * Data length from read response message
+ * When in_remaining is true, the returned data length is in
+ * message field DataRemaining for out-of-band data read (e.g through
+ * Memory Registration RDMA write in SMBD).
+ * Otherwise, the returned data length is in message field DataLength.
+ */
+ unsigned int (*read_data_length)(char *, bool in_remaining);
/* map smb to linux error */
int (*map_error)(char *, bool);
/* find mid corresponding to the response message */
@@ -532,6 +538,7 @@ struct smb_vol {
bool nopersistent:1;
bool resilient:1; /* noresilient not required since not fored for CA */
bool domainauto:1;
+ bool rdma:1;
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
@@ -648,6 +655,10 @@ struct TCP_Server_Info {
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool large_buf; /* is current buffer large? */
+ /* use SMBD connection instead of socket */
+ bool rdma;
+ /* point to the SMBD connection if RDMA is used instead of socket */
+ struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
@@ -822,12 +833,12 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
struct cifs_ses {
struct list_head smb_ses_list;
struct list_head tcon_list;
+ struct cifs_tcon *tcon_ipc;
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
enum statusEnum status;
unsigned overrideSecFlg; /* if non-zero override global sec flags */
- __u32 ipc_tid; /* special tid for connection to IPC share */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
@@ -835,8 +846,7 @@ struct cifs_ses {
kuid_t linux_uid; /* overriding owner of files on the mount */
kuid_t cred_uid; /* owner of credentials */
unsigned int capabilities;
- char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
- TCP names - will ipv6 and sctp addresses fit? */
+ char serverName[SERVER_NAME_LEN_WITH_NULL];
char *user_name; /* must not be null except during init of sess
and after mount option parsing we fill it */
char *domainName;
@@ -931,7 +941,9 @@ struct cifs_tcon {
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
- bool ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */
+ bool ipc:1; /* set if connection to IPC$ share (always also pipe) */
+ bool pipe:1; /* set if connection to pipe share */
+ bool print:1; /* set if connection to printer share */
bool retry:1;
bool nocase:1;
bool seal:1; /* transport encryption for this mounted share */
@@ -944,7 +956,6 @@ struct cifs_tcon {
bool need_reopen_files:1; /* need to reopen tcon file handles */
bool use_resilient:1; /* use resilient instead of durable handles */
bool use_persistent:1; /* use persistent instead of durable handles */
- bool print:1; /* set if connection to printer share */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
@@ -1147,6 +1158,9 @@ struct cifs_readdata {
struct cifs_readdata *rdata,
struct iov_iter *iter);
struct kvec iov[2];
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbd_mr *mr;
+#endif
unsigned int pagesz;
unsigned int tailsz;
unsigned int credits;
@@ -1169,6 +1183,9 @@ struct cifs_writedata {
pid_t pid;
unsigned int bytes;
int result;
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbd_mr *mr;
+#endif
unsigned int pagesz;
unsigned int tailsz;
unsigned int credits;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4143c9dec463..93d565186698 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -106,6 +106,10 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
struct kvec * /* resp vec */);
+extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
+ struct kvec *pkvec, int nvec_to_send,
+ int *pbuftype, const int flags,
+ struct kvec *presp);
extern int SendReceiveBlockingLock(const unsigned int xid,
struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 35dc5bf01ee2..4e0922d24eb2 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -43,6 +43,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "fscache.h"
+#include "smbdirect.h"
#ifdef CONFIG_CIFS_POSIX
static struct {
@@ -1454,6 +1455,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
struct cifs_readdata *rdata = mid->callback_data;
char *buf = server->smallbuf;
unsigned int buflen = get_rfc1002_length(buf) + 4;
+ bool use_rdma_mr = false;
cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
__func__, mid->mid, rdata->offset, rdata->bytes);
@@ -1542,8 +1544,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->iov[0].iov_base, server->total_read);
/* how much data is in the response? */
- data_len = server->ops->read_data_length(buf);
- if (data_offset + data_len > buflen) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ use_rdma_mr = rdata->mr;
+#endif
+ data_len = server->ops->read_data_length(buf, use_rdma_mr);
+ if (!use_rdma_mr && (data_offset + data_len > buflen)) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
@@ -1923,6 +1928,12 @@ cifs_writedata_release(struct kref *refcount)
{
struct cifs_writedata *wdata = container_of(refcount,
struct cifs_writedata, refcount);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
if (wdata->cfile)
cifsFileInfo_put(wdata->cfile);
@@ -4822,10 +4833,11 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
*target_nodes = NULL;
cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name);
- if (ses == NULL)
+ if (ses == NULL || ses->tcon_ipc == NULL)
return -ENODEV;
+
getDFSRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB,
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
@@ -4833,7 +4845,7 @@ getDFSRetry:
/* server pointer checked in called function,
but should never be null here anyway */
pSMB->hdr.Mid = get_next_mid(ses->server);
- pSMB->hdr.Tid = ses->ipc_tid;
+ pSMB->hdr.Tid = ses->tcon_ipc->tid;
pSMB->hdr.Uid = ses->Suid;
if (ses->capabilities & CAP_STATUS32)
pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0bfc2280436d..a726f524fb84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -44,7 +44,6 @@
#include <net/ipv6.h>
#include <linux/parser.h>
#include <linux/bvec.h>
-
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -56,6 +55,7 @@
#include "rfc1002pdu.h"
#include "fscache.h"
#include "smb2proto.h"
+#include "smbdirect.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -92,7 +92,7 @@ enum {
Opt_multiuser, Opt_sloppy, Opt_nosharesock,
Opt_persistent, Opt_nopersistent,
Opt_resilient, Opt_noresilient,
- Opt_domainauto,
+ Opt_domainauto, Opt_rdma,
/* Mount options which take numeric value */
Opt_backupuid, Opt_backupgid, Opt_uid,
@@ -183,6 +183,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_resilient, "resilienthandles"},
{ Opt_noresilient, "noresilienthandles"},
{ Opt_domainauto, "domainauto"},
+ { Opt_rdma, "rdma"},
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
@@ -353,11 +354,12 @@ cifs_reconnect(struct TCP_Server_Info *server)
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
- ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
+ if (ses->tcon_ipc)
+ ses->tcon_ipc->need_reconnect = true;
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -405,7 +407,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* we should try only the port we connected to before */
mutex_lock(&server->srv_mutex);
- rc = generic_ip_connect(server);
+ if (cifs_rdma_enabled(server))
+ rc = smbd_reconnect(server);
+ else
+ rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
mutex_unlock(&server->srv_mutex);
@@ -538,8 +543,10 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
if (server_unresponsive(server))
return -ECONNABORTED;
-
- length = sock_recvmsg(server->ssocket, smb_msg, 0);
+ if (cifs_rdma_enabled(server) && server->smbd_conn)
+ length = smbd_recv(server->smbd_conn, smb_msg);
+ else
+ length = sock_recvmsg(server->ssocket, smb_msg, 0);
if (server->tcpStatus == CifsExiting)
return -ESHUTDOWN;
@@ -700,7 +707,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
-
+ if (cifs_rdma_enabled(server) && server->smbd_conn) {
+ smbd_destroy(server->smbd_conn);
+ server->smbd_conn = NULL;
+ }
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
@@ -1550,6 +1560,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_domainauto:
vol->domainauto = true;
break;
+ case Opt_rdma:
+ vol->rdma = true;
+ break;
/* Numeric Values */
case Opt_backupuid:
@@ -1707,7 +1720,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
- kfree(vol->password);
+ kzfree(vol->password);
vol->password = NULL;
break;
}
@@ -1745,7 +1758,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
options = end;
}
- kfree(vol->password);
+ kzfree(vol->password);
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -1951,6 +1964,19 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto cifs_parse_mount_err;
}
+ if (vol->rdma && vol->vals->protocol_id < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n");
+ goto cifs_parse_mount_err;
+ }
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (vol->rdma && vol->sign) {
+ cifs_dbg(VFS, "Currently SMB direct doesn't support signing."
+ " This is being fixed\n");
+ goto cifs_parse_mount_err;
+ }
+#endif
+
#ifndef CONFIG_KEYS
/* Muliuser mounts require CONFIG_KEYS support */
if (vol->multiuser) {
@@ -2162,6 +2188,9 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
if (server->echo_interval != vol->echo_interval * HZ)
return 0;
+ if (server->rdma != vol->rdma)
+ return 0;
+
return 1;
}
@@ -2260,6 +2289,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
+ tcp_ses->rdma = volume_info->rdma;
tcp_ses->in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
@@ -2297,13 +2327,29 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->echo_interval = volume_info->echo_interval * HZ;
else
tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
-
+ if (tcp_ses->rdma) {
+#ifndef CONFIG_CIFS_SMB_DIRECT
+ cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
+ rc = -ENOENT;
+ goto out_err_crypto_release;
+#endif
+ tcp_ses->smbd_conn = smbd_get_connection(
+ tcp_ses, (struct sockaddr *)&volume_info->dstaddr);
+ if (tcp_ses->smbd_conn) {
+ cifs_dbg(VFS, "RDMA transport established\n");
+ rc = 0;
+ goto smbd_connected;
+ } else {
+ rc = -ENOENT;
+ goto out_err_crypto_release;
+ }
+ }
rc = ip_connect(tcp_ses);
if (rc < 0) {
cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
goto out_err_crypto_release;
}
-
+smbd_connected:
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
@@ -2381,6 +2427,93 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
return 1;
}
+/**
+ * cifs_setup_ipc - helper to setup the IPC tcon for the session
+ *
+ * A new IPC connection is made and stored in the session
+ * tcon_ipc. The IPC tcon has the same lifetime as the session.
+ */
+static int
+cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
+{
+ int rc = 0, xid;
+ struct cifs_tcon *tcon;
+ struct nls_table *nls_codepage;
+ char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
+ bool seal = false;
+
+ /*
+ * If the mount request that resulted in the creation of the
+ * session requires encryption, force IPC to be encrypted too.
+ */
+ if (volume_info->seal) {
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
+ seal = true;
+ else {
+ cifs_dbg(VFS,
+ "IPC: server doesn't support encryption\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ tcon = tconInfoAlloc();
+ if (tcon == NULL)
+ return -ENOMEM;
+
+ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+
+ /* cannot fail */
+ nls_codepage = load_nls_default();
+
+ xid = get_xid();
+ tcon->ses = ses;
+ tcon->ipc = true;
+ tcon->seal = seal;
+ rc = ses->server->ops->tree_connect(xid, ses, unc, tcon, nls_codepage);
+ free_xid(xid);
+
+ if (rc) {
+ cifs_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+ tconInfoFree(tcon);
+ goto out;
+ }
+
+ cifs_dbg(FYI, "IPC tcon rc = %d ipc tid = %d\n", rc, tcon->tid);
+
+ ses->tcon_ipc = tcon;
+out:
+ unload_nls(nls_codepage);
+ return rc;
+}
+
+/**
+ * cifs_free_ipc - helper to release the session IPC tcon
+ *
+ * Needs to be called everytime a session is destroyed
+ */
+static int
+cifs_free_ipc(struct cifs_ses *ses)
+{
+ int rc = 0, xid;
+ struct cifs_tcon *tcon = ses->tcon_ipc;
+
+ if (tcon == NULL)
+ return 0;
+
+ if (ses->server->ops->tree_disconnect) {
+ xid = get_xid();
+ rc = ses->server->ops->tree_disconnect(xid, tcon);
+ free_xid(xid);
+ }
+
+ if (rc)
+ cifs_dbg(FYI, "failed to disconnect IPC tcon (rc=%d)\n", rc);
+
+ tconInfoFree(tcon);
+ ses->tcon_ipc = NULL;
+ return rc;
+}
+
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
@@ -2421,6 +2554,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
ses->status = CifsExiting;
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_free_ipc(ses);
+
if (ses->status == CifsExiting && server->ops->logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
@@ -2569,6 +2704,13 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
}
#endif /* CONFIG_KEYS */
+/**
+ * cifs_get_smb_ses - get a session matching @volume_info data from @server
+ *
+ * This function assumes it is being called from cifs_mount() where we
+ * already got a server reference (server refcount +1). See
+ * cifs_get_tcon() for refcount explanations.
+ */
static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
@@ -2665,6 +2807,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
spin_unlock(&cifs_tcp_ses_lock);
free_xid(xid);
+
+ cifs_setup_ipc(ses, volume_info);
+
return ses;
get_ses_fail:
@@ -2709,8 +2854,16 @@ void
cifs_put_tcon(struct cifs_tcon *tcon)
{
unsigned int xid;
- struct cifs_ses *ses = tcon->ses;
+ struct cifs_ses *ses;
+
+ /*
+ * IPC tcon share the lifetime of their session and are
+ * destroyed in the session put function
+ */
+ if (tcon == NULL || tcon->ipc)
+ return;
+ ses = tcon->ses;
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
@@ -2731,6 +2884,26 @@ cifs_put_tcon(struct cifs_tcon *tcon)
cifs_put_smb_ses(ses);
}
+/**
+ * cifs_get_tcon - get a tcon matching @volume_info data from @ses
+ *
+ * - tcon refcount is the number of mount points using the tcon.
+ * - ses refcount is the number of tcon using the session.
+ *
+ * 1. This function assumes it is being called from cifs_mount() where
+ * we already got a session reference (ses refcount +1).
+ *
+ * 2. Since we're in the context of adding a mount point, the end
+ * result should be either:
+ *
+ * a) a new tcon already allocated with refcount=1 (1 mount point) and
+ * its session refcount incremented (1 new tcon). This +1 was
+ * already done in (1).
+ *
+ * b) an existing tcon with refcount+1 (add a mount point to it) and
+ * identical ses refcount (no new tcon). Because of (1) we need to
+ * decrement the ses refcount.
+ */
static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
@@ -2739,8 +2912,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon = cifs_find_tcon(ses, volume_info);
if (tcon) {
+ /*
+ * tcon has refcount already incremented but we need to
+ * decrement extra ses reference gotten by caller (case b)
+ */
cifs_dbg(FYI, "Found match on UNC path\n");
- /* existing tcon already has a reference */
cifs_put_smb_ses(ses);
return tcon;
}
@@ -2986,39 +3162,17 @@ get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *num_referrals,
struct dfs_info3_param **referrals, int remap)
{
- char *temp_unc;
int rc = 0;
- if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer)
+ if (!ses->server->ops->get_dfs_refer)
return -ENOSYS;
*num_referrals = 0;
*referrals = NULL;
- if (ses->ipc_tid == 0) {
- temp_unc = kmalloc(2 /* for slashes */ +
- strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2)
- + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL);
- if (temp_unc == NULL)
- return -ENOMEM;
- temp_unc[0] = '\\';
- temp_unc[1] = '\\';
- strcpy(temp_unc + 2, ses->serverName);
- strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$");
- rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL,
- nls_codepage);
- cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid);
- kfree(temp_unc);
- }
- if (rc == 0)
- rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
- referrals, num_referrals,
- nls_codepage, remap);
- /*
- * BB - map targetUNCs to dfs_info3 structures, here or in
- * ses->server->ops->get_dfs_refer.
- */
-
+ rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
+ referrals, num_referrals,
+ nls_codepage, remap);
return rc;
}
@@ -3783,7 +3937,7 @@ try_mount_again:
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
- if (!tcon->ipc && server->ops->qfs_tcon)
+ if (!tcon->pipe && server->ops->qfs_tcon)
server->ops->qfs_tcon(xid, tcon);
cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info);
@@ -3913,8 +4067,7 @@ out:
}
/*
- * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
- * pointer may be NULL.
+ * Issue a TREE_CONNECT request.
*/
int
CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
@@ -3950,7 +4103,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
+ if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -4022,7 +4175,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
0);
/* above now done in SendReceive */
- if ((rc == 0) && (tcon != NULL)) {
+ if (rc == 0) {
bool is_unicode;
tcon->tidStatus = CifsGood;
@@ -4042,7 +4195,8 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
cifs_dbg(FYI, "IPC connection\n");
- tcon->ipc = 1;
+ tcon->ipc = true;
+ tcon->pipe = true;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
@@ -4069,9 +4223,6 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
else
tcon->Flags = 0;
cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
- } else if ((rc == 0) && tcon == NULL) {
- /* all we need to save for IPC$ connection */
- ses->ipc_tid = smb_buffer_response->Tid;
}
cifs_buf_release(smb_buffer);
@@ -4235,7 +4386,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
- kfree(vol_info->password);
+ kzfree(vol_info->password);
kfree(vol_info);
return tcon;
@@ -4387,7 +4538,7 @@ cifs_prune_tlinks(struct work_struct *work)
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work);
struct rb_root *root = &cifs_sb->tlink_tree;
- struct rb_node *node = rb_first(root);
+ struct rb_node *node;
struct rb_node *tmp;
struct tcon_link *tlink;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index df9f682708c6..7cee97b93a61 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -42,7 +42,7 @@
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "fscache.h"
-
+#include "smbdirect.h"
static inline int cifs_convert_flags(unsigned int flags)
{
@@ -2902,7 +2902,12 @@ cifs_readdata_release(struct kref *refcount)
{
struct cifs_readdata *rdata = container_of(refcount,
struct cifs_readdata, refcount);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (rdata->mr) {
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
if (rdata->cfile)
cifsFileInfo_put(rdata->cfile);
@@ -3031,6 +3036,10 @@ uncached_fill_pages(struct TCP_Server_Info *server,
}
if (iter)
result = copy_page_from_iter(page, 0, n, iter);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ else if (rdata->mr)
+ result = n;
+#endif
else
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
@@ -3471,20 +3480,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
- int rc, xid;
+ int xid, rc = 0;
struct inode *inode = file_inode(file);
xid = get_xid();
- if (!CIFS_CACHE_READ(CIFS_I(inode))) {
+ if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
- if (rc)
- return rc;
- }
-
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
@@ -3494,16 +3501,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
int rc, xid;
xid = get_xid();
+
rc = cifs_revalidate_file(file);
- if (rc) {
+ if (rc)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
- free_xid(xid);
- return rc;
- }
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
@@ -3600,6 +3607,10 @@ readpages_fill_pages(struct TCP_Server_Info *server,
if (iter)
result = copy_page_from_iter(page, 0, n, iter);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ else if (rdata->mr)
+ result = n;
+#endif
else
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ecb99079363a..8f9a8cc7cc62 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1049,7 +1049,7 @@ iget_no_retry:
tcon->resource_id = CIFS_I(inode)->uniqueid;
#endif
- if (rc && tcon->ipc) {
+ if (rc && tcon->pipe) {
cifs_dbg(FYI, "ipc connection - fake read inode\n");
spin_lock(&inode->i_lock);
inode->i_mode |= S_IFDIR;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index eea93ac15ef0..a0dbced4a45c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->serverOS);
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
- kfree(buf_to_free->auth_key.response);
- kfree(buf_to_free);
+ kzfree(buf_to_free->auth_key.response);
+ kzfree(buf_to_free);
}
struct cifs_tcon *
@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
}
atomic_dec(&tconInfoAllocCount);
kfree(buf_to_free->nativeFileSystem);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free);
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index a723df3e0197..3d495e440c87 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -87,9 +87,11 @@ cifs_read_data_offset(char *buf)
}
static unsigned int
-cifs_read_data_length(char *buf)
+cifs_read_data_length(char *buf, bool in_remaining)
{
READ_RSP *rsp = (READ_RSP *)buf;
+ /* It's a bug reading remaining data for SMB1 packets */
+ WARN_ON(in_remaining);
return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
le16_to_cpu(rsp->DataLength);
}
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b4b1f0305f29..12af5dba742b 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -74,7 +74,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
NULL, NULL /* no return info */);
if (rc == -EOPNOTSUPP) {
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 7b08a1446a7f..76d03abaa38c 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -578,7 +578,7 @@ smb2_is_valid_lease_break(char *buffer)
bool
smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
{
- struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
+ struct smb2_oplock_break_rsp *rsp = (struct smb2_oplock_break_rsp *)buffer;
struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ed88ab8a4774..eb68e2fcc500 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -32,6 +32,7 @@
#include "smb2status.h"
#include "smb2glob.h"
#include "cifs_ioctl.h"
+#include "smbdirect.h"
static int
change_conf(struct TCP_Server_Info *server)
@@ -250,7 +251,11 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified wsize, or default */
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma)
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_readwrite_size);
+#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -266,6 +271,11 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified rsize, or default */
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma)
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_readwrite_size);
+#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
@@ -283,7 +293,6 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
- false /* use_ipc */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
if (rc != 0)
@@ -782,7 +791,6 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
- false /* use_ipc */,
NULL, 0 /* no input */,
(char **)&res_key, &ret_data_len);
@@ -848,8 +856,7 @@ smb2_copychunk_range(const unsigned int xid,
/* Request server copy to target from src identified by key */
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
- true /* is_fsctl */, false /* use_ipc */,
- (char *)pcchunk,
+ true /* is_fsctl */, (char *)pcchunk,
sizeof(struct copychunk_ioctl), (char **)&retbuf,
&ret_data_len);
if (rc == 0) {
@@ -947,9 +954,13 @@ smb2_read_data_offset(char *buf)
}
static unsigned int
-smb2_read_data_length(char *buf)
+smb2_read_data_length(char *buf, bool in_remaining)
{
struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
+
+ if (in_remaining)
+ return le32_to_cpu(rsp->DataRemaining);
+
return le32_to_cpu(rsp->DataLength);
}
@@ -1006,7 +1017,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
- true /* is_fctl */, false /* use_ipc */,
+ true /* is_fctl */,
&setsparse, 1, NULL, NULL);
if (rc) {
tcon->broken_sparse_sup = true;
@@ -1077,7 +1088,7 @@ smb2_duplicate_extents(const unsigned int xid,
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
NULL,
@@ -1112,7 +1123,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SET_INTEGRITY_INFORMATION,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
NULL,
@@ -1132,7 +1143,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
NULL, 0 /* no input data */,
(char **)&retbuf,
&ret_data_len);
@@ -1351,16 +1362,20 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
/*
- * Use any tcon from the current session. Here, the first one.
+ * Try to use the IPC tcon, otherwise just use any
*/
- spin_lock(&cifs_tcp_ses_lock);
- tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon,
- tcon_list);
- if (tcon)
- tcon->tc_count++;
- spin_unlock(&cifs_tcp_ses_lock);
+ tcon = ses->tcon_ipc;
+ if (tcon == NULL) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon = list_first_entry_or_null(&ses->tcon_list,
+ struct cifs_tcon,
+ tcon_list);
+ if (tcon)
+ tcon->tc_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
- if (!tcon) {
+ if (tcon == NULL) {
cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
ses);
rc = -ENOTCONN;
@@ -1389,20 +1404,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
do {
- /* try first with IPC */
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_DFS_GET_REFERRALS,
- true /* is_fsctl */, true /* use_ipc */,
+ true /* is_fsctl */,
(char *)dfs_req, dfs_req_size,
(char **)&dfs_rsp, &dfs_rsp_size);
- if (rc == -ENOTCONN) {
- /* try with normal tcon */
- rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
- FSCTL_DFS_GET_REFERRALS,
- true /* is_fsctl */, false /*use_ipc*/,
- (char *)dfs_req, dfs_req_size,
- (char **)&dfs_rsp, &dfs_rsp_size);
- }
} while (rc == -EAGAIN);
if (rc) {
@@ -1421,7 +1427,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
}
out:
- if (tcon) {
+ if (tcon && !tcon->ipc) {
+ /* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
spin_unlock(&cifs_tcp_ses_lock);
@@ -1713,8 +1720,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, false /* use_ipc */,
- (char *)&fsctl_buf,
+ true /* is_fctl */, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), NULL, NULL);
free_xid(xid);
return rc;
@@ -1748,8 +1754,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, false /* use_ipc */,
- (char *)&fsctl_buf,
+ true /* is_fctl */, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), NULL, NULL);
free_xid(xid);
return rc;
@@ -2411,6 +2416,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
struct iov_iter iter;
struct kvec iov;
int length;
+ bool use_rdma_mr = false;
if (shdr->Command != SMB2_READ) {
cifs_dbg(VFS, "only big read responses are supported\n");
@@ -2437,7 +2443,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
data_offset = server->ops->read_data_offset(buf) + 4;
- data_len = server->ops->read_data_length(buf);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ use_rdma_mr = rdata->mr;
+#endif
+ data_len = server->ops->read_data_length(buf, use_rdma_mr);
if (data_offset < server->vals->read_rsp_size) {
/*
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 01346b8b6edb..63778ac22fd9 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -48,6 +48,7 @@
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
+#include "smbdirect.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -319,54 +320,16 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
*total_len = parmsize + sizeof(struct smb2_sync_hdr);
}
-/* init request without RFC1001 length at the beginning */
-static int
-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
-{
- int rc;
- struct smb2_sync_hdr *shdr;
-
- rc = smb2_reconnect(smb2_command, tcon);
- if (rc)
- return rc;
-
- /* BB eventually switch this to SMB2 specific small buf size */
- *request_buf = cifs_small_buf_get();
- if (*request_buf == NULL) {
- /* BB should we add a retry in here if not a writepage? */
- return -ENOMEM;
- }
-
- shdr = (struct smb2_sync_hdr *)(*request_buf);
-
- fill_small_buf(smb2_command, tcon, shdr, total_len);
-
- if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
- uint16_t com_code = le16_to_cpu(smb2_command);
-
- cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
-#endif
- cifs_stats_inc(&tcon->num_smbs_sent);
- }
-
- return rc;
-}
-
/*
* Allocate and return pointer to an SMB request hdr, and set basic
* SMB information in the SMB header. If the return code is zero, this
- * function must have filled in request_buf pointer. The returned buffer
- * has RFC1001 length at the beginning.
+ * function must have filled in request_buf pointer.
*/
static int
-small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf)
+smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
{
int rc;
- unsigned int total_len;
- struct smb2_pdu *pdu;
rc = smb2_reconnect(smb2_command, tcon);
if (rc)
@@ -379,12 +342,9 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
return -ENOMEM;
}
- pdu = (struct smb2_pdu *)(*request_buf);
-
- fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len);
-
- /* Note this is only network field converted to big endian */
- pdu->hdr.smb2_buf_length = cpu_to_be32(total_len);
+ fill_small_buf(smb2_command, tcon,
+ (struct smb2_sync_hdr *)(*request_buf),
+ total_len);
if (tcon != NULL) {
#ifdef CONFIG_CIFS_STATS2
@@ -398,8 +358,8 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
}
#ifdef CONFIG_CIFS_SMB311
-/* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */
-#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */
+/* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */
+#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */
#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
@@ -427,23 +387,25 @@ build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
}
static void
-assemble_neg_contexts(struct smb2_negotiate_req *req)
+assemble_neg_contexts(struct smb2_negotiate_req *req,
+ unsigned int *total_len)
{
-
- /* +4 is to account for the RFC1001 len field */
- char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4;
+ char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT;
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
/* Add 2 to size to round to 8 byte boundary */
+
pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context);
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
req->NegotiateContextCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
- + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
+
+ *total_len += 4 + sizeof(struct smb2_preauth_neg_context)
+ + sizeof(struct smb2_encryption_neg_context);
}
#else
-static void assemble_neg_contexts(struct smb2_negotiate_req *req)
+static void assemble_neg_contexts(struct smb2_negotiate_req *req,
+ unsigned int *total_len)
{
return;
}
@@ -477,6 +439,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
+ unsigned int total_len;
cifs_dbg(FYI, "Negotiate protocol\n");
@@ -485,30 +448,30 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
}
- rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
if (rc)
return rc;
- req->hdr.sync_hdr.SessionId = 0;
+ req->sync_hdr.SessionId = 0;
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4);
+ total_len += 4;
} else if (strcmp(ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(3);
- inc_rfc1001_len(req, 6);
+ total_len += 6;
} else {
/* otherwise send specific dialect */
req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1);
- inc_rfc1001_len(req, 2);
+ total_len += 2;
}
/* only one of SMB2 signing flags may be set in SMB2 request */
@@ -528,13 +491,12 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
memcpy(req->ClientGUID, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if (ses->server->vals->protocol_id == SMB311_PROT_ID)
- assemble_neg_contexts(req);
+ assemble_neg_contexts(req, &total_len);
}
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -654,6 +616,11 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
cifs_dbg(FYI, "validate negotiate\n");
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (tcon->ses->server->rdma)
+ return 0;
+#endif
+
/*
* validation ioctl must be signed, so no point sending this if we
* can not sign it (ie are not known user). Even if signing is not
@@ -713,7 +680,6 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
- false /* use_ipc */,
(char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
(char **)&pneg_rsp, &rsplen);
@@ -733,8 +699,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
/* check validate negotiate info response matches what we got earlier */
- if (pneg_rsp->Dialect !=
- cpu_to_le16(tcon->ses->server->vals->protocol_id))
+ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
goto vneg_out;
if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
@@ -806,20 +771,22 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
struct TCP_Server_Info *server = ses->server;
+ unsigned int total_len;
- rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
+ &total_len);
if (rc)
return rc;
/* First session, not a reauthenticate */
- req->hdr.sync_hdr.SessionId = 0;
+ req->sync_hdr.SessionId = 0;
/* if reconnect, we need to send previous sess id, otherwise it is 0 */
req->PreviousSessionId = sess_data->previous_session;
req->Flags = 0; /* MBZ */
/* to enable echos and oplocks */
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3);
+ req->sync_hdr.CreditRequest = cpu_to_le16(3);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
@@ -833,8 +800,8 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for pad */
- sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for pad */
+ sess_data->iov[0].iov_len = total_len - 1;
/*
* This variable will be used to clear the buffer
* allocated above in case of any error in the calling function.
@@ -860,18 +827,15 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->SecurityBufferOffset =
- cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
- 1 /* pad */ - 4 /* rfc1001 len */);
+ cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
- inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */);
-
/* BB add code to build os and lm fields */
- rc = SendReceive2(sess_data->xid, sess_data->ses,
- sess_data->iov, 2,
- &sess_data->buf0_type,
- CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
+ rc = smb2_send_recv(sess_data->xid, sess_data->ses,
+ sess_data->iov, 2,
+ &sess_data->buf0_type,
+ CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
@@ -1092,7 +1056,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
goto out;
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
- req->hdr.sync_hdr.SessionId = ses->Suid;
+ req->sync_hdr.SessionId = ses->Suid;
rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
sess_data->nls_cp);
@@ -1202,6 +1166,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
int rc = 0;
struct TCP_Server_Info *server;
int flags = 0;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "disconnect session %p\n", ses);
@@ -1214,19 +1182,24 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
if (ses->need_reconnect)
goto smb2_session_already_dead;
- rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
if (rc)
return rc;
/* since no tcon, smb2_init can not do this, so do here */
- req->hdr.sync_hdr.SessionId = ses->Suid;
+ req->sync_hdr.SessionId = ses->Suid;
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
flags |= CIFS_TRANSFORM_REQ;
else if (server->sign)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
- rc = SendReceiveNoRsp(xid, ses, (char *) req, flags);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1265,6 +1238,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
int unc_path_len;
__le16 *unc_path = NULL;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "TCON\n");
@@ -1283,40 +1257,30 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
}
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
- if (tcon)
- tcon->tid = 0;
+ tcon->tid = 0;
- rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
+ &total_len);
if (rc) {
kfree(unc_path);
return rc;
}
- if (tcon == NULL) {
- if ((ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA))
- flags |= CIFS_TRANSFORM_REQ;
-
- /* since no tcon, smb2_init can not do this, so do here */
- req->hdr.sync_hdr.SessionId = ses->Suid;
- if (ses->server->sign)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- } else if (encryption_required(tcon))
+ if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for pad */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for pad */
+ iov[0].iov_len = total_len - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
+ - 1 /* pad */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
- inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
-
- rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
@@ -1328,21 +1292,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
goto tcon_error_exit;
}
- if (tcon == NULL) {
- ses->ipc_tid = rsp->hdr.sync_hdr.TreeId;
- goto tcon_exit;
- }
-
switch (rsp->ShareType) {
case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
break;
case SMB2_SHARE_TYPE_PIPE:
- tcon->ipc = true;
+ tcon->pipe = true;
cifs_dbg(FYI, "connection to pipe share\n");
break;
case SMB2_SHARE_TYPE_PRINT:
- tcon->ipc = true;
+ tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
break;
default:
@@ -1389,6 +1348,10 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
int rc = 0;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "Tree Disconnect\n");
@@ -1398,14 +1361,20 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
- rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceiveNoRsp(xid, ses, (char *)req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1505,11 +1474,10 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
- sizeof(struct smb2_create_req) - 4 +
+ sizeof(struct smb2_create_req) +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
server->vals->create_lease_size);
- inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
*num_iovec = num + 1;
return 0;
}
@@ -1589,10 +1557,9 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
- inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2));
*num_iovec = num + 1;
return 0;
}
@@ -1613,12 +1580,10 @@ add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
sizeof(struct create_durable_handle_reconnect_v2));
- inc_rfc1001_len(&req->hdr,
- sizeof(struct create_durable_handle_reconnect_v2));
*num_iovec = num + 1;
return 0;
}
@@ -1649,10 +1614,9 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
- inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
*num_iovec = num + 1;
return 0;
}
@@ -1723,6 +1687,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u32 file_attributes = 0;
char *dhc_buf = NULL, *lc_buf = NULL;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "create/open\n");
@@ -1731,7 +1696,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else
return -EIO;
- rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
if (rc)
return rc;
@@ -1752,12 +1718,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
/* -1 since last byte is buf[0] which is sent below (path) */
- iov[0].iov_len--;
+ iov[0].iov_len = total_len - 1;
- req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
+ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
* If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
@@ -1770,7 +1734,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
if (tcon->share_flags & SHI1005_FLAGS_DFS) {
int name_len;
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
tcon->treeName, path);
@@ -1797,8 +1761,6 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
iov[1].iov_len = uni_path_len;
iov[1].iov_base = path;
- /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
- inc_rfc1001_len(req, uni_path_len - 1);
if (!server->oplocks)
*oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -1836,7 +1798,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
dhc_buf = iov[n_iov-1].iov_base;
}
- rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -1877,7 +1840,7 @@ creat_exit:
*/
int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 opcode, bool is_fsctl, bool use_ipc,
+ u64 volatile_fid, u32 opcode, bool is_fsctl,
char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */)
{
@@ -1891,6 +1854,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int n_iov;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "SMB2 IOCTL\n");
@@ -1909,20 +1873,10 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
if (rc)
return rc;
- if (use_ipc) {
- if (ses->ipc_tid == 0) {
- cifs_small_buf_release(req);
- return -ENOTCONN;
- }
-
- cifs_dbg(FYI, "replacing tid 0x%x with IPC tid 0x%x\n",
- req->hdr.sync_hdr.TreeId, ses->ipc_tid);
- req->hdr.sync_hdr.TreeId = ses->ipc_tid;
- }
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -1934,7 +1888,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
req->InputCount = cpu_to_le32(indatalen);
/* do not set InputOffset if no input data */
req->InputOffset =
- cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
+ cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
iov[1].iov_base = in_data;
iov[1].iov_len = indatalen;
n_iov = 2;
@@ -1969,21 +1923,20 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
* but if input data passed to ioctl, we do not
* want to double count this, so we do not send
* the dummy one byte of data in iovec[0] if sending
- * input data (in iovec[1]). We also must add 4 bytes
- * in first iovec to allow for rfc1002 length field.
+ * input data (in iovec[1]).
*/
if (indatalen) {
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
- inc_rfc1001_len(req, indatalen - 1);
+ iov[0].iov_len = total_len - 1;
} else
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2052,7 +2005,6 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */,
- false /* use_ipc */,
(char *)&fsctl_input /* data input */,
2 /* in data len */, &ret_data /* out data */, NULL);
@@ -2073,13 +2025,14 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Close\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2090,10 +2043,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -2180,13 +2132,15 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Query Info\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -2203,15 +2157,14 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
* We do not use the input buffer (do not send extra byte)
*/
req->InputBufferOffset = 0;
- inc_rfc1001_len(req, -1);
req->OutputBufferLength = cpu_to_le32(output_len);
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -2338,6 +2291,10 @@ void smb2_reconnect_server(struct work_struct *work)
tcon_exist = true;
}
}
+ if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
+ list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
+ tcon_exist = true;
+ }
}
/*
* Get the reference to server struct to be sure that the last call of
@@ -2376,6 +2333,8 @@ SMB2_echo(struct TCP_Server_Info *server)
struct kvec iov[2];
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = 2 };
+ unsigned int total_len;
+ __be32 rfc1002_marker;
cifs_dbg(FYI, "In echo request\n");
@@ -2385,17 +2344,17 @@ SMB2_echo(struct TCP_Server_Info *server)
return rc;
}
- rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
if (rc)
return rc;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
- /* 4 for rfc1002 length field */
iov[0].iov_len = 4;
- iov[0].iov_base = (char *)req;
- iov[1].iov_len = get_rfc1002_length(req);
- iov[1].iov_base = (char *)req + 4;
+ rfc1002_marker = cpu_to_be32(total_len);
+ iov[0].iov_base = &rfc1002_marker;
+ iov[1].iov_len = total_len;
+ iov[1].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
server, CIFS_ECHO_OP);
@@ -2417,13 +2376,14 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Flush\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2434,10 +2394,9 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc != 0)
@@ -2453,18 +2412,21 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
*/
static int
smb2_new_read_req(void **buf, unsigned int *total_len,
- struct cifs_io_parms *io_parms, unsigned int remaining_bytes,
- int request_type)
+ struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
+ unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_sync_hdr *shdr;
+ struct TCP_Server_Info *server;
rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
total_len);
if (rc)
return rc;
- if (io_parms->tcon->ses->server == NULL)
+
+ server = io_parms->tcon->ses->server;
+ if (server == NULL)
return -ECONNABORTED;
shdr = &req->sync_hdr;
@@ -2478,7 +2440,40 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If we want to do a RDMA write, fill in and append
+ * smbd_buffer_descriptor_v1 to the end of read request
+ */
+ if (server->rdma && rdata &&
+ rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
+
+ struct smbd_buffer_descriptor_v1 *v1;
+ bool need_invalidate =
+ io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
+
+ rdata->mr = smbd_register_mr(
+ server->smbd_conn, rdata->pages,
+ rdata->nr_pages, rdata->tailsz,
+ true, need_invalidate);
+ if (!rdata->mr)
+ return -ENOBUFS;
+
+ req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+ if (need_invalidate)
+ req->Channel = SMB2_CHANNEL_RDMA_V1;
+ req->ReadChannelInfoOffset =
+ cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer));
+ req->ReadChannelInfoLength =
+ cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+ v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ v1->offset = cpu_to_le64(rdata->mr->mr->iova);
+ v1->token = cpu_to_le32(rdata->mr->mr->rkey);
+ v1->length = cpu_to_le32(rdata->mr->mr->length);
+
+ *total_len += sizeof(*v1) - 1;
+ }
+#endif
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* next 8-byte aligned request */
@@ -2557,7 +2552,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->result != -ENODATA)
rdata->result = -EIO;
}
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If this rdata has a memmory registered, the MR can be freed
+ * MR needs to be freed as soon as I/O finishes to prevent deadlock
+ * because they have limited number and are used for future I/Os
+ */
+ if (rdata->mr) {
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
if (rdata->result)
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
@@ -2592,7 +2597,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
server = io_parms.tcon->ses->server;
- rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0);
+ rc = smb2_new_read_req(
+ (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc) {
if (rc == -EAGAIN && rdata->credits) {
/* credits was reset by reconnect */
@@ -2650,31 +2656,24 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
struct smb2_sync_hdr *shdr;
- struct kvec iov[2];
+ struct kvec iov[1];
struct kvec rsp_iov;
unsigned int total_len;
- __be32 req_len;
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
int flags = CIFS_LOG_ERROR;
struct cifs_ses *ses = io_parms->tcon->ses;
*nbytes = 0;
- rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0);
+ rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
if (rc)
return rc;
if (encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
- req_len = cpu_to_be32(total_len);
-
- iov[0].iov_base = &req_len;
- iov[0].iov_len = sizeof(__be32);
- iov[1].iov_base = req;
- iov[1].iov_len = total_len;
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -2755,7 +2754,19 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->result = -EIO;
break;
}
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If this wdata has a memory registered, the MR can be freed
+ * The number of MRs available is limited, it's important to recover
+ * used MR as soon as I/O is finished. Hold MR longer in the later
+ * I/O process can possibly result in I/O deadlock due to lack of MR
+ * to send request on I/O retry
+ */
+ if (wdata->mr) {
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
if (wdata->result)
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
@@ -2776,8 +2787,10 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct TCP_Server_Info *server = tcon->ses->server;
struct kvec iov[2];
struct smb_rqst rqst = { };
+ unsigned int total_len;
+ __be32 rfc1002_marker;
- rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
if (rc) {
if (rc == -EAGAIN && wdata->credits) {
/* credits was reset by reconnect */
@@ -2793,7 +2806,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- shdr = get_sync_hdr(req);
+ shdr = (struct smb2_sync_hdr *)req;
shdr->ProcessId = cpu_to_le32(wdata->cfile->pid);
req->PersistentFileId = wdata->cfile->fid.persistent_fid;
@@ -2802,16 +2815,51 @@ smb2_async_writev(struct cifs_writedata *wdata,
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Offset = cpu_to_le64(wdata->offset);
- /* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
- offsetof(struct smb2_write_req, Buffer) - 4);
+ offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If we want to do a server RDMA read, fill in and append
+ * smbd_buffer_descriptor_v1 to the end of write request
+ */
+ if (server->rdma && wdata->bytes >=
+ server->smbd_conn->rdma_readwrite_threshold) {
+
+ struct smbd_buffer_descriptor_v1 *v1;
+ bool need_invalidate = server->dialect == SMB30_PROT_ID;
+
+ wdata->mr = smbd_register_mr(
+ server->smbd_conn, wdata->pages,
+ wdata->nr_pages, wdata->tailsz,
+ false, need_invalidate);
+ if (!wdata->mr) {
+ rc = -ENOBUFS;
+ goto async_writev_out;
+ }
+ req->Length = 0;
+ req->DataOffset = 0;
+ req->RemainingBytes =
+ cpu_to_le32((wdata->nr_pages-1)*PAGE_SIZE + wdata->tailsz);
+ req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+ if (need_invalidate)
+ req->Channel = SMB2_CHANNEL_RDMA_V1;
+ req->WriteChannelInfoOffset =
+ cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
+ req->WriteChannelInfoLength =
+ cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+ v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ v1->offset = cpu_to_le64(wdata->mr->mr->iova);
+ v1->token = cpu_to_le32(wdata->mr->mr->rkey);
+ v1->length = cpu_to_le32(wdata->mr->mr->length);
+ }
+#endif
/* 4 for rfc1002 length field and 1 for Buffer */
iov[0].iov_len = 4;
- iov[0].iov_base = req;
- iov[1].iov_len = get_rfc1002_length(req) - 1;
- iov[1].iov_base = (char *)req + 4;
+ rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
+ iov[0].iov_base = &rfc1002_marker;
+ iov[1].iov_len = total_len - 1;
+ iov[1].iov_base = (char *)req;
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
@@ -2819,13 +2867,22 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+ rqst.rq_npages = 0;
+ }
+#endif
cifs_dbg(FYI, "async write at %llu %u bytes\n",
wdata->offset, wdata->bytes);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /* For RDMA read, I/O size is in RemainingBytes not in Length */
+ if (!wdata->mr)
+ req->Length = cpu_to_le32(wdata->bytes);
+#else
req->Length = cpu_to_le32(wdata->bytes);
-
- inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
+#endif
if (wdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
@@ -2869,13 +2926,15 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
int resp_buftype;
struct kvec rsp_iov;
int flags = 0;
+ unsigned int total_len;
*nbytes = 0;
if (n_vec < 1)
return rc;
- rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -2885,7 +2944,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
if (encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
@@ -2894,20 +2953,16 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
req->Channel = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
- /* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
- offsetof(struct smb2_write_req, Buffer) - 4);
+ offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for Buffer */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
- /* length of entire message including data to be written */
- inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
-
- rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
- &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+ &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -2984,13 +3039,15 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
+ unsigned int total_len;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3022,7 +3079,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
- cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
@@ -3033,15 +3090,13 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
- /* 4 for RFC1001 length and 1 for Buffer */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
- inc_rfc1001_len(req, len - 1 /* Buffer */);
-
- rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -3110,6 +3165,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int i;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
if (!ses || !(ses->server))
return -EIO;
@@ -3121,7 +3177,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!iov)
return -ENOMEM;
- rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
if (rc) {
kfree(iov);
return rc;
@@ -3130,7 +3186,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->InfoType = info_type;
req->FileInfoClass = info_class;
@@ -3138,27 +3194,25 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /* 4 for RFC1001 length and 1 for Buffer */
req->BufferOffset =
- cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
req->BufferLength = cpu_to_le32(*size);
- inc_rfc1001_len(req, *size - 1 /* Buffer */);
-
memcpy(req->Buffer, *data, *size);
+ total_len += *size;
iov[0].iov_base = (char *)req;
- /* 4 for RFC1001 length */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
for (i = 1; i < num; i++) {
- inc_rfc1001_len(req, size[i]);
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
- rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -3310,11 +3364,17 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 oplock_level)
{
int rc;
- struct smb2_oplock_break *req = NULL;
+ struct smb2_oplock_break_req *req = NULL;
+ struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "SMB2_oplock_break\n");
- rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3324,9 +3384,14 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFid = volatile_fid;
req->PersistentFid = persistent_fid;
req->OplockLevel = oplock_level;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
- rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3355,13 +3420,15 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
{
int rc;
struct smb2_query_info_req *req;
+ unsigned int total_len;
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3369,15 +3436,14 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
req->FileInfoClass = level;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
- /* 4 for rfc1002 length field and 1 for pad */
+ /* 1 for pad */
req->InputBufferOffset =
- cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
req->OutputBufferLength = cpu_to_le32(
outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
iov->iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov->iov_len = get_rfc1002_length(req) + 4;
+ iov->iov_len = total_len;
return 0;
}
@@ -3403,7 +3469,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3459,7 +3525,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3505,34 +3571,33 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
unsigned int count;
int flags = CIFS_NO_RESP;
+ unsigned int total_len;
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
- rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->LockCount = cpu_to_le16(num_lock);
req->PersistentFileId = persist_fid;
req->VolatileFileId = volatile_fid;
count = num_lock * sizeof(struct smb2_lock_element);
- inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and count for all locks */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
+ iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
- rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
- &rsp_iov);
+ rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
@@ -3565,24 +3630,35 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
{
int rc;
struct smb2_lease_ack *req = NULL;
+ struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "SMB2_lease_break\n");
- rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
req->StructureSize = cpu_to_le16(36);
- inc_rfc1001_len(req, 12);
+ total_len += 12;
memcpy(req->LeaseKey, lease_key, 16);
req->LeaseState = lease_state;
- rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index c2ec934be968..6eb9f9691ed4 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -195,7 +195,7 @@ struct smb2_symlink_err_rsp {
#define SMB2_CLIENT_GUID_SIZE 16
struct smb2_negotiate_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 36 */
__le16 DialectCount;
__le16 SecurityMode;
@@ -282,7 +282,7 @@ struct smb2_negotiate_rsp {
#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
struct smb2_sess_setup_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 25 */
__u8 Flags;
__u8 SecurityMode;
@@ -308,7 +308,7 @@ struct smb2_sess_setup_rsp {
} __packed;
struct smb2_logoff_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__le16 Reserved;
} __packed;
@@ -323,7 +323,7 @@ struct smb2_logoff_rsp {
#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
struct smb2_tree_connect_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 9 */
__le16 Reserved; /* Flags in SMB3.1.1 */
__le16 PathOffset;
@@ -375,7 +375,7 @@ struct smb2_tree_connect_rsp {
#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */
struct smb2_tree_disconnect_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__le16 Reserved;
} __packed;
@@ -496,7 +496,7 @@ struct smb2_tree_disconnect_rsp {
#define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9
struct smb2_create_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
__u8 SecurityFlags;
__u8 RequestedOplockLevel;
@@ -753,7 +753,7 @@ struct duplicate_extents_to_file {
} __packed;
struct smb2_ioctl_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
__u16 Reserved;
__le32 CtlCode;
@@ -789,7 +789,7 @@ struct smb2_ioctl_rsp {
/* Currently defined values for close flags */
#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
struct smb2_close_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 24 */
__le16 Flags;
__le32 Reserved;
@@ -812,7 +812,7 @@ struct smb2_close_rsp {
} __packed;
struct smb2_flush_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 24 */
__le16 Reserved1;
__le32 Reserved2;
@@ -830,9 +830,9 @@ struct smb2_flush_rsp {
#define SMB2_READFLAG_READ_UNBUFFERED 0x01
/* Channel field for read and write: exactly one of following flags can be set*/
-#define SMB2_CHANNEL_NONE 0x00000000
-#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
+#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
+#define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001) /* SMB3 or later */
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002) /* >= SMB3.02 */
/* SMB2 read request without RFC1001 length at the beginning */
struct smb2_read_plain_req {
@@ -847,8 +847,8 @@ struct smb2_read_plain_req {
__le32 MinimumCount;
__le32 Channel; /* MBZ except for SMB3 or later */
__le32 RemainingBytes;
- __le16 ReadChannelInfoOffset; /* Reserved MBZ */
- __le16 ReadChannelInfoLength; /* Reserved MBZ */
+ __le16 ReadChannelInfoOffset;
+ __le16 ReadChannelInfoLength;
__u8 Buffer[1];
} __packed;
@@ -868,7 +868,7 @@ struct smb2_read_rsp {
#define SMB2_WRITEFLAG_WRITE_UNBUFFERED 0x00000002 /* SMB3.02 or later */
struct smb2_write_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 49 */
__le16 DataOffset; /* offset from start of SMB2 header to write data */
__le32 Length;
@@ -877,8 +877,8 @@ struct smb2_write_req {
__u64 VolatileFileId; /* opaque endianness */
__le32 Channel; /* Reserved MBZ */
__le32 RemainingBytes;
- __le16 WriteChannelInfoOffset; /* Reserved MBZ */
- __le16 WriteChannelInfoLength; /* Reserved MBZ */
+ __le16 WriteChannelInfoOffset;
+ __le16 WriteChannelInfoLength;
__le32 Flags;
__u8 Buffer[1];
} __packed;
@@ -907,7 +907,7 @@ struct smb2_lock_element {
} __packed;
struct smb2_lock_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 48 */
__le16 LockCount;
__le32 Reserved;
@@ -924,7 +924,7 @@ struct smb2_lock_rsp {
} __packed;
struct smb2_echo_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__u16 Reserved;
} __packed;
@@ -942,7 +942,7 @@ struct smb2_echo_rsp {
#define SMB2_REOPEN 0x10
struct smb2_query_directory_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
__u8 FileInformationClass;
__u8 Flags;
@@ -989,7 +989,7 @@ struct smb2_query_directory_rsp {
#define SL_INDEX_SPECIFIED 0x00000004
struct smb2_query_info_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 41 */
__u8 InfoType;
__u8 FileInfoClass;
@@ -1013,7 +1013,7 @@ struct smb2_query_info_rsp {
} __packed;
struct smb2_set_info_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
__u8 InfoType;
__u8 FileInfoClass;
@@ -1031,7 +1031,19 @@ struct smb2_set_info_rsp {
__le16 StructureSize; /* Must be 2 */
} __packed;
-struct smb2_oplock_break {
+/* oplock break without an rfc1002 header */
+struct smb2_oplock_break_req {
+ struct smb2_sync_hdr sync_hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __u8 OplockLevel;
+ __u8 Reserved;
+ __le32 Reserved2;
+ __u64 PersistentFid;
+ __u64 VolatileFid;
+} __packed;
+
+/* oplock break with an rfc1002 header */
+struct smb2_oplock_break_rsp {
struct smb2_hdr hdr;
__le16 StructureSize; /* Must be 24 */
__u8 OplockLevel;
@@ -1057,7 +1069,7 @@ struct smb2_lease_break {
} __packed;
struct smb2_lease_ack {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 36 */
__le16 Reserved;
__le32 Flags;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index e9ab5227e7a8..05287b01f596 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -125,8 +125,7 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
struct smb2_err_rsp **err_buf);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, bool use_ipc,
- char *in_data, u32 indatalen,
+ bool is_fsctl, char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */);
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
new file mode 100644
index 000000000000..5130492847eb
--- /dev/null
+++ b/fs/cifs/smbdirect.c
@@ -0,0 +1,2610 @@
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ *
+ * Author(s): Long Li <longli@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include "smbdirect.h"
+#include "cifs_debug.h"
+
+static struct smbd_response *get_empty_queue_buffer(
+ struct smbd_connection *info);
+static struct smbd_response *get_receive_buffer(
+ struct smbd_connection *info);
+static void put_receive_buffer(
+ struct smbd_connection *info,
+ struct smbd_response *response);
+static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
+static void destroy_receive_buffers(struct smbd_connection *info);
+
+static void put_empty_packet(
+ struct smbd_connection *info, struct smbd_response *response);
+static void enqueue_reassembly(
+ struct smbd_connection *info,
+ struct smbd_response *response, int data_length);
+static struct smbd_response *_get_first_reassembly(
+ struct smbd_connection *info);
+
+static int smbd_post_recv(
+ struct smbd_connection *info,
+ struct smbd_response *response);
+
+static int smbd_post_send_empty(struct smbd_connection *info);
+static int smbd_post_send_data(
+ struct smbd_connection *info,
+ struct kvec *iov, int n_vec, int remaining_data_length);
+static int smbd_post_send_page(struct smbd_connection *info,
+ struct page *page, unsigned long offset,
+ size_t size, int remaining_data_length);
+
+static void destroy_mr_list(struct smbd_connection *info);
+static int allocate_mr_list(struct smbd_connection *info);
+
+/* SMBD version number */
+#define SMBD_V1 0x0100
+
+/* Port numbers for SMBD transport */
+#define SMB_PORT 445
+#define SMBD_PORT 5445
+
+/* Address lookup and resolve timeout in ms */
+#define RDMA_RESOLVE_TIMEOUT 5000
+
+/* SMBD negotiation timeout in seconds */
+#define SMBD_NEGOTIATE_TIMEOUT 120
+
+/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
+#define SMBD_MIN_RECEIVE_SIZE 128
+#define SMBD_MIN_FRAGMENTED_SIZE 131072
+
+/*
+ * Default maximum number of RDMA read/write outstanding on this connection
+ * This value is possibly decreased during QP creation on hardware limit
+ */
+#define SMBD_CM_RESPONDER_RESOURCES 32
+
+/* Maximum number of retries on data transfer operations */
+#define SMBD_CM_RETRY 6
+/* No need to retry on Receiver Not Ready since SMBD manages credits */
+#define SMBD_CM_RNR_RETRY 0
+
+/*
+ * User configurable initial values per SMBD transport connection
+ * as defined in [MS-SMBD] 3.1.1.1
+ * Those may change after a SMBD negotiation
+ */
+/* The local peer's maximum number of credits to grant to the peer */
+int smbd_receive_credit_max = 255;
+
+/* The remote peer's credit request of local peer */
+int smbd_send_credit_target = 255;
+
+/* The maximum single message size can be sent to remote peer */
+int smbd_max_send_size = 1364;
+
+/* The maximum fragmented upper-layer payload receive size supported */
+int smbd_max_fragmented_recv_size = 1024 * 1024;
+
+/* The maximum single-message size which can be received */
+int smbd_max_receive_size = 8192;
+
+/* The timeout to initiate send of a keepalive message on idle */
+int smbd_keep_alive_interval = 120;
+
+/*
+ * User configurable initial values for RDMA transport
+ * The actual values used may be lower and are limited to hardware capabilities
+ */
+/* Default maximum number of SGEs in a RDMA write/read */
+int smbd_max_frmr_depth = 2048;
+
+/* If payload is less than this byte, use RDMA send/recv not read/write */
+int rdma_readwrite_threshold = 4096;
+
+/* Transport logging functions
+ * Logging are defined as classes. They can be OR'ed to define the actual
+ * logging level via module parameter smbd_logging_class
+ * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
+ * log_rdma_event()
+ */
+#define LOG_OUTGOING 0x1
+#define LOG_INCOMING 0x2
+#define LOG_READ 0x4
+#define LOG_WRITE 0x8
+#define LOG_RDMA_SEND 0x10
+#define LOG_RDMA_RECV 0x20
+#define LOG_KEEP_ALIVE 0x40
+#define LOG_RDMA_EVENT 0x80
+#define LOG_RDMA_MR 0x100
+static unsigned int smbd_logging_class;
+module_param(smbd_logging_class, uint, 0644);
+MODULE_PARM_DESC(smbd_logging_class,
+ "Logging class for SMBD transport 0x0 to 0x100");
+
+#define ERR 0x0
+#define INFO 0x1
+static unsigned int smbd_logging_level = ERR;
+module_param(smbd_logging_level, uint, 0644);
+MODULE_PARM_DESC(smbd_logging_level,
+ "Logging level for SMBD transport, 0 (default): error, 1: info");
+
+#define log_rdma(level, class, fmt, args...) \
+do { \
+ if (level <= smbd_logging_level || class & smbd_logging_class) \
+ cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
+} while (0)
+
+#define log_outgoing(level, fmt, args...) \
+ log_rdma(level, LOG_OUTGOING, fmt, ##args)
+#define log_incoming(level, fmt, args...) \
+ log_rdma(level, LOG_INCOMING, fmt, ##args)
+#define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
+#define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
+#define log_rdma_send(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
+#define log_rdma_recv(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
+#define log_keep_alive(level, fmt, args...) \
+ log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
+#define log_rdma_event(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
+#define log_rdma_mr(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_MR, fmt, ##args)
+
+/*
+ * Destroy the transport and related RDMA and memory resources
+ * Need to go through all the pending counters and make sure on one is using
+ * the transport while it is destroyed
+ */
+static void smbd_destroy_rdma_work(struct work_struct *work)
+{
+ struct smbd_response *response;
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, destroy_work);
+ unsigned long flags;
+
+ log_rdma_event(INFO, "destroying qp\n");
+ ib_drain_qp(info->id->qp);
+ rdma_destroy_qp(info->id);
+
+ /* Unblock all I/O waiting on the send queue */
+ wake_up_interruptible_all(&info->wait_send_queue);
+
+ log_rdma_event(INFO, "cancelling idle timer\n");
+ cancel_delayed_work_sync(&info->idle_timer_work);
+ log_rdma_event(INFO, "cancelling send immediate work\n");
+ cancel_delayed_work_sync(&info->send_immediate_work);
+
+ log_rdma_event(INFO, "wait for all send to finish\n");
+ wait_event(info->wait_smbd_send_pending,
+ info->smbd_send_pending == 0);
+
+ log_rdma_event(INFO, "wait for all recv to finish\n");
+ wake_up_interruptible(&info->wait_reassembly_queue);
+ wait_event(info->wait_smbd_recv_pending,
+ info->smbd_recv_pending == 0);
+
+ log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
+ wait_event(info->wait_send_pending,
+ atomic_read(&info->send_pending) == 0);
+ wait_event(info->wait_send_payload_pending,
+ atomic_read(&info->send_payload_pending) == 0);
+
+ log_rdma_event(INFO, "freeing mr list\n");
+ wake_up_interruptible_all(&info->wait_mr);
+ wait_event(info->wait_for_mr_cleanup,
+ atomic_read(&info->mr_used_count) == 0);
+ destroy_mr_list(info);
+
+ /* It's not posssible for upper layer to get to reassembly */
+ log_rdma_event(INFO, "drain the reassembly queue\n");
+ do {
+ spin_lock_irqsave(&info->reassembly_queue_lock, flags);
+ response = _get_first_reassembly(info);
+ if (response) {
+ list_del(&response->list);
+ spin_unlock_irqrestore(
+ &info->reassembly_queue_lock, flags);
+ put_receive_buffer(info, response);
+ }
+ } while (response);
+ spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
+ info->reassembly_data_length = 0;
+
+ log_rdma_event(INFO, "free receive buffers\n");
+ wait_event(info->wait_receive_queues,
+ info->count_receive_queue + info->count_empty_packet_queue
+ == info->receive_credit_max);
+ destroy_receive_buffers(info);
+
+ ib_free_cq(info->send_cq);
+ ib_free_cq(info->recv_cq);
+ ib_dealloc_pd(info->pd);
+ rdma_destroy_id(info->id);
+
+ /* free mempools */
+ mempool_destroy(info->request_mempool);
+ kmem_cache_destroy(info->request_cache);
+
+ mempool_destroy(info->response_mempool);
+ kmem_cache_destroy(info->response_cache);
+
+ info->transport_status = SMBD_DESTROYED;
+ wake_up_all(&info->wait_destroy);
+}
+
+static int smbd_process_disconnected(struct smbd_connection *info)
+{
+ schedule_work(&info->destroy_work);
+ return 0;
+}
+
+static void smbd_disconnect_rdma_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, disconnect_work);
+
+ if (info->transport_status == SMBD_CONNECTED) {
+ info->transport_status = SMBD_DISCONNECTING;
+ rdma_disconnect(info->id);
+ }
+}
+
+static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
+{
+ queue_work(info->workqueue, &info->disconnect_work);
+}
+
+/* Upcall from RDMA CM */
+static int smbd_conn_upcall(
+ struct rdma_cm_id *id, struct rdma_cm_event *event)
+{
+ struct smbd_connection *info = id->context;
+
+ log_rdma_event(INFO, "event=%d status=%d\n",
+ event->event, event->status);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ info->ri_rc = 0;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ info->ri_rc = -EHOSTUNREACH;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ info->ri_rc = -ENETUNREACH;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ log_rdma_event(INFO, "connected event=%d\n", event->event);
+ info->transport_status = SMBD_CONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+ info->transport_status = SMBD_DISCONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* This happenes when we fail the negotiation */
+ if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+ info->transport_status = SMBD_DISCONNECTED;
+ wake_up(&info->conn_wait);
+ break;
+ }
+
+ info->transport_status = SMBD_DISCONNECTED;
+ smbd_process_disconnected(info);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Upcall from RDMA QP */
+static void
+smbd_qp_async_error_upcall(struct ib_event *event, void *context)
+{
+ struct smbd_connection *info = context;
+
+ log_rdma_event(ERR, "%s on device %s info %p\n",
+ ib_event_msg(event->event), event->device->name, info);
+
+ switch (event->event) {
+ case IB_EVENT_CQ_ERR:
+ case IB_EVENT_QP_FATAL:
+ smbd_disconnect_rdma_connection(info);
+
+ default:
+ break;
+ }
+}
+
+static inline void *smbd_request_payload(struct smbd_request *request)
+{
+ return (void *)request->packet;
+}
+
+static inline void *smbd_response_payload(struct smbd_response *response)
+{
+ return (void *)response->packet;
+}
+
+/* Called when a RDMA send is done */
+static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ int i;
+ struct smbd_request *request =
+ container_of(wc->wr_cqe, struct smbd_request, cqe);
+
+ log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
+ request, wc->status);
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+ log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+ wc->status, wc->opcode);
+ smbd_disconnect_rdma_connection(request->info);
+ }
+
+ for (i = 0; i < request->num_sge; i++)
+ ib_dma_unmap_single(request->info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+
+ if (request->has_payload) {
+ if (atomic_dec_and_test(&request->info->send_payload_pending))
+ wake_up(&request->info->wait_send_payload_pending);
+ } else {
+ if (atomic_dec_and_test(&request->info->send_pending))
+ wake_up(&request->info->wait_send_pending);
+ }
+
+ mempool_free(request, request->info->request_mempool);
+}
+
+static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
+{
+ log_rdma_event(INFO, "resp message min_version %u max_version %u "
+ "negotiated_version %u credits_requested %u "
+ "credits_granted %u status %u max_readwrite_size %u "
+ "preferred_send_size %u max_receive_size %u "
+ "max_fragmented_size %u\n",
+ resp->min_version, resp->max_version, resp->negotiated_version,
+ resp->credits_requested, resp->credits_granted, resp->status,
+ resp->max_readwrite_size, resp->preferred_send_size,
+ resp->max_receive_size, resp->max_fragmented_size);
+}
+
+/*
+ * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
+ * response, packet_length: the negotiation response message
+ * return value: true if negotiation is a success, false if failed
+ */
+static bool process_negotiation_response(
+ struct smbd_response *response, int packet_length)
+{
+ struct smbd_connection *info = response->info;
+ struct smbd_negotiate_resp *packet = smbd_response_payload(response);
+
+ if (packet_length < sizeof(struct smbd_negotiate_resp)) {
+ log_rdma_event(ERR,
+ "error: packet_length=%d\n", packet_length);
+ return false;
+ }
+
+ if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
+ log_rdma_event(ERR, "error: negotiated_version=%x\n",
+ le16_to_cpu(packet->negotiated_version));
+ return false;
+ }
+ info->protocol = le16_to_cpu(packet->negotiated_version);
+
+ if (packet->credits_requested == 0) {
+ log_rdma_event(ERR, "error: credits_requested==0\n");
+ return false;
+ }
+ info->receive_credit_target = le16_to_cpu(packet->credits_requested);
+
+ if (packet->credits_granted == 0) {
+ log_rdma_event(ERR, "error: credits_granted==0\n");
+ return false;
+ }
+ atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
+
+ atomic_set(&info->receive_credits, 0);
+
+ if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
+ log_rdma_event(ERR, "error: preferred_send_size=%d\n",
+ le32_to_cpu(packet->preferred_send_size));
+ return false;
+ }
+ info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
+
+ if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
+ log_rdma_event(ERR, "error: max_receive_size=%d\n",
+ le32_to_cpu(packet->max_receive_size));
+ return false;
+ }
+ info->max_send_size = min_t(int, info->max_send_size,
+ le32_to_cpu(packet->max_receive_size));
+
+ if (le32_to_cpu(packet->max_fragmented_size) <
+ SMBD_MIN_FRAGMENTED_SIZE) {
+ log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
+ le32_to_cpu(packet->max_fragmented_size));
+ return false;
+ }
+ info->max_fragmented_send_size =
+ le32_to_cpu(packet->max_fragmented_size);
+ info->rdma_readwrite_threshold =
+ rdma_readwrite_threshold > info->max_fragmented_send_size ?
+ info->max_fragmented_send_size :
+ rdma_readwrite_threshold;
+
+
+ info->max_readwrite_size = min_t(u32,
+ le32_to_cpu(packet->max_readwrite_size),
+ info->max_frmr_depth * PAGE_SIZE);
+ info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
+
+ return true;
+}
+
+/*
+ * Check and schedule to send an immediate packet
+ * This is used to extend credtis to remote peer to keep the transport busy
+ */
+static void check_and_send_immediate(struct smbd_connection *info)
+{
+ if (info->transport_status != SMBD_CONNECTED)
+ return;
+
+ info->send_immediate = true;
+
+ /*
+ * Promptly send a packet if our peer is running low on receive
+ * credits
+ */
+ if (atomic_read(&info->receive_credits) <
+ info->receive_credit_target - 1)
+ queue_delayed_work(
+ info->workqueue, &info->send_immediate_work, 0);
+}
+
+static void smbd_post_send_credits(struct work_struct *work)
+{
+ int ret = 0;
+ int use_receive_queue = 1;
+ int rc;
+ struct smbd_response *response;
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection,
+ post_send_credits_work);
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ wake_up(&info->wait_receive_queues);
+ return;
+ }
+
+ if (info->receive_credit_target >
+ atomic_read(&info->receive_credits)) {
+ while (true) {
+ if (use_receive_queue)
+ response = get_receive_buffer(info);
+ else
+ response = get_empty_queue_buffer(info);
+ if (!response) {
+ /* now switch to emtpy packet queue */
+ if (use_receive_queue) {
+ use_receive_queue = 0;
+ continue;
+ } else
+ break;
+ }
+
+ response->type = SMBD_TRANSFER_DATA;
+ response->first_segment = false;
+ rc = smbd_post_recv(info, response);
+ if (rc) {
+ log_rdma_recv(ERR,
+ "post_recv failed rc=%d\n", rc);
+ put_receive_buffer(info, response);
+ break;
+ }
+
+ ret++;
+ }
+ }
+
+ spin_lock(&info->lock_new_credits_offered);
+ info->new_credits_offered += ret;
+ spin_unlock(&info->lock_new_credits_offered);
+
+ atomic_add(ret, &info->receive_credits);
+
+ /* Check if we can post new receive and grant credits to peer */
+ check_and_send_immediate(info);
+}
+
+static void smbd_recv_done_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, recv_done_work);
+
+ /*
+ * We may have new send credits granted from remote peer
+ * If any sender is blcoked on lack of credets, unblock it
+ */
+ if (atomic_read(&info->send_credits))
+ wake_up_interruptible(&info->wait_send_queue);
+
+ /*
+ * Check if we need to send something to remote peer to
+ * grant more credits or respond to KEEP_ALIVE packet
+ */
+ check_and_send_immediate(info);
+}
+
+/* Called from softirq, when recv is done */
+static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_data_transfer *data_transfer;
+ struct smbd_response *response =
+ container_of(wc->wr_cqe, struct smbd_response, cqe);
+ struct smbd_connection *info = response->info;
+ int data_length = 0;
+
+ log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
+ "byte_len=%d pkey_index=%x\n",
+ response, response->type, wc->status, wc->opcode,
+ wc->byte_len, wc->pkey_index);
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+ wc->status, wc->opcode);
+ smbd_disconnect_rdma_connection(info);
+ goto error;
+ }
+
+ ib_dma_sync_single_for_cpu(
+ wc->qp->device,
+ response->sge.addr,
+ response->sge.length,
+ DMA_FROM_DEVICE);
+
+ switch (response->type) {
+ /* SMBD negotiation response */
+ case SMBD_NEGOTIATE_RESP:
+ dump_smbd_negotiate_resp(smbd_response_payload(response));
+ info->full_packet_received = true;
+ info->negotiate_done =
+ process_negotiation_response(response, wc->byte_len);
+ complete(&info->negotiate_completion);
+ break;
+
+ /* SMBD data transfer packet */
+ case SMBD_TRANSFER_DATA:
+ data_transfer = smbd_response_payload(response);
+ data_length = le32_to_cpu(data_transfer->data_length);
+
+ /*
+ * If this is a packet with data playload place the data in
+ * reassembly queue and wake up the reading thread
+ */
+ if (data_length) {
+ if (info->full_packet_received)
+ response->first_segment = true;
+
+ if (le32_to_cpu(data_transfer->remaining_data_length))
+ info->full_packet_received = false;
+ else
+ info->full_packet_received = true;
+
+ enqueue_reassembly(
+ info,
+ response,
+ data_length);
+ } else
+ put_empty_packet(info, response);
+
+ if (data_length)
+ wake_up_interruptible(&info->wait_reassembly_queue);
+
+ atomic_dec(&info->receive_credits);
+ info->receive_credit_target =
+ le16_to_cpu(data_transfer->credits_requested);
+ atomic_add(le16_to_cpu(data_transfer->credits_granted),
+ &info->send_credits);
+
+ log_incoming(INFO, "data flags %d data_offset %d "
+ "data_length %d remaining_data_length %d\n",
+ le16_to_cpu(data_transfer->flags),
+ le32_to_cpu(data_transfer->data_offset),
+ le32_to_cpu(data_transfer->data_length),
+ le32_to_cpu(data_transfer->remaining_data_length));
+
+ /* Send a KEEP_ALIVE response right away if requested */
+ info->keep_alive_requested = KEEP_ALIVE_NONE;
+ if (le16_to_cpu(data_transfer->flags) &
+ SMB_DIRECT_RESPONSE_REQUESTED) {
+ info->keep_alive_requested = KEEP_ALIVE_PENDING;
+ }
+
+ queue_work(info->workqueue, &info->recv_done_work);
+ return;
+
+ default:
+ log_rdma_recv(ERR,
+ "unexpected response type=%d\n", response->type);
+ }
+
+error:
+ put_receive_buffer(info, response);
+}
+
+static struct rdma_cm_id *smbd_create_id(
+ struct smbd_connection *info,
+ struct sockaddr *dstaddr, int port)
+{
+ struct rdma_cm_id *id;
+ int rc;
+ __be16 *sport;
+
+ id = rdma_create_id(&init_net, smbd_conn_upcall, info,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ rc = PTR_ERR(id);
+ log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
+ return id;
+ }
+
+ if (dstaddr->sa_family == AF_INET6)
+ sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
+ else
+ sport = &((struct sockaddr_in *)dstaddr)->sin_port;
+
+ *sport = htons(port);
+
+ init_completion(&info->ri_done);
+ info->ri_rc = -ETIMEDOUT;
+
+ rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
+ RDMA_RESOLVE_TIMEOUT);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
+ goto out;
+ }
+ wait_for_completion_interruptible_timeout(
+ &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = info->ri_rc;
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
+ goto out;
+ }
+
+ info->ri_rc = -ETIMEDOUT;
+ rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
+ goto out;
+ }
+ wait_for_completion_interruptible_timeout(
+ &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = info->ri_rc;
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
+ goto out;
+ }
+
+ return id;
+
+out:
+ rdma_destroy_id(id);
+ return ERR_PTR(rc);
+}
+
+/*
+ * Test if FRWR (Fast Registration Work Requests) is supported on the device
+ * This implementation requries FRWR on RDMA read/write
+ * return value: true if it is supported
+ */
+static bool frwr_is_supported(struct ib_device_attr *attrs)
+{
+ if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+ return false;
+ if (attrs->max_fast_reg_page_list_len == 0)
+ return false;
+ return true;
+}
+
+static int smbd_ia_open(
+ struct smbd_connection *info,
+ struct sockaddr *dstaddr, int port)
+{
+ int rc;
+
+ info->id = smbd_create_id(info, dstaddr, port);
+ if (IS_ERR(info->id)) {
+ rc = PTR_ERR(info->id);
+ goto out1;
+ }
+
+ if (!frwr_is_supported(&info->id->device->attrs)) {
+ log_rdma_event(ERR,
+ "Fast Registration Work Requests "
+ "(FRWR) is not supported\n");
+ log_rdma_event(ERR,
+ "Device capability flags = %llx "
+ "max_fast_reg_page_list_len = %u\n",
+ info->id->device->attrs.device_cap_flags,
+ info->id->device->attrs.max_fast_reg_page_list_len);
+ rc = -EPROTONOSUPPORT;
+ goto out2;
+ }
+ info->max_frmr_depth = min_t(int,
+ smbd_max_frmr_depth,
+ info->id->device->attrs.max_fast_reg_page_list_len);
+ info->mr_type = IB_MR_TYPE_MEM_REG;
+ if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ info->mr_type = IB_MR_TYPE_SG_GAPS;
+
+ info->pd = ib_alloc_pd(info->id->device, 0);
+ if (IS_ERR(info->pd)) {
+ rc = PTR_ERR(info->pd);
+ log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+ goto out2;
+ }
+
+ return 0;
+
+out2:
+ rdma_destroy_id(info->id);
+ info->id = NULL;
+
+out1:
+ return rc;
+}
+
+/*
+ * Send a negotiation request message to the peer
+ * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
+ * After negotiation, the transport is connected and ready for
+ * carrying upper layer SMB payload
+ */
+static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+{
+ struct ib_send_wr send_wr, *send_wr_fail;
+ int rc = -ENOMEM;
+ struct smbd_request *request;
+ struct smbd_negotiate_req *packet;
+
+ request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ if (!request)
+ return rc;
+
+ request->info = info;
+
+ packet = smbd_request_payload(request);
+ packet->min_version = cpu_to_le16(SMBD_V1);
+ packet->max_version = cpu_to_le16(SMBD_V1);
+ packet->reserved = 0;
+ packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+ packet->max_receive_size = cpu_to_le32(info->max_receive_size);
+ packet->max_fragmented_size =
+ cpu_to_le32(info->max_fragmented_recv_size);
+
+ request->num_sge = 1;
+ request->sge[0].addr = ib_dma_map_single(
+ info->id->device, (void *)packet,
+ sizeof(*packet), DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ rc = -EIO;
+ goto dma_mapping_failed;
+ }
+
+ request->sge[0].length = sizeof(*packet);
+ request->sge[0].lkey = info->pd->local_dma_lkey;
+
+ ib_dma_sync_single_for_device(
+ info->id->device, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+ request->cqe.done = send_done;
+
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &request->cqe;
+ send_wr.sg_list = request->sge;
+ send_wr.num_sge = request->num_sge;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
+ request->sge[0].addr,
+ request->sge[0].length, request->sge[0].lkey);
+
+ request->has_payload = false;
+ atomic_inc(&info->send_pending);
+ rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ if (!rc)
+ return 0;
+
+ /* if we reach here, post send failed */
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ atomic_dec(&info->send_pending);
+ ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+dma_mapping_failed:
+ mempool_free(request, info->request_mempool);
+ return rc;
+}
+
+/*
+ * Extend the credits to remote peer
+ * This implements [MS-SMBD] 3.1.5.9
+ * The idea is that we should extend credits to remote peer as quickly as
+ * it's allowed, to maintain data flow. We allocate as much receive
+ * buffer as possible, and extend the receive credits to remote peer
+ * return value: the new credtis being granted.
+ */
+static int manage_credits_prior_sending(struct smbd_connection *info)
+{
+ int new_credits;
+
+ spin_lock(&info->lock_new_credits_offered);
+ new_credits = info->new_credits_offered;
+ info->new_credits_offered = 0;
+ spin_unlock(&info->lock_new_credits_offered);
+
+ return new_credits;
+}
+
+/*
+ * Check if we need to send a KEEP_ALIVE message
+ * The idle connection timer triggers a KEEP_ALIVE message when expires
+ * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
+ * back a response.
+ * return value:
+ * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
+ * 0: otherwise
+ */
+static int manage_keep_alive_before_sending(struct smbd_connection *info)
+{
+ if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
+ info->keep_alive_requested = KEEP_ALIVE_SENT;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Build and prepare the SMBD packet header
+ * This function waits for avaialbe send credits and build a SMBD packet
+ * header. The caller then optional append payload to the packet after
+ * the header
+ * intput values
+ * size: the size of the payload
+ * remaining_data_length: remaining data to send if this is part of a
+ * fragmented packet
+ * output values
+ * request_out: the request allocated from this function
+ * return values: 0 on success, otherwise actual error code returned
+ */
+static int smbd_create_header(struct smbd_connection *info,
+ int size, int remaining_data_length,
+ struct smbd_request **request_out)
+{
+ struct smbd_request *request;
+ struct smbd_data_transfer *packet;
+ int header_length;
+ int rc;
+
+ /* Wait for send credits. A SMBD packet needs one credit */
+ rc = wait_event_interruptible(info->wait_send_queue,
+ atomic_read(&info->send_credits) > 0 ||
+ info->transport_status != SMBD_CONNECTED);
+ if (rc)
+ return rc;
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending\n");
+ return -ENOENT;
+ }
+ atomic_dec(&info->send_credits);
+
+ request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ if (!request) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ request->info = info;
+
+ /* Fill in the packet header */
+ packet = smbd_request_payload(request);
+ packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet->credits_granted =
+ cpu_to_le16(manage_credits_prior_sending(info));
+ info->send_immediate = false;
+
+ packet->flags = 0;
+ if (manage_keep_alive_before_sending(info))
+ packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
+
+ packet->reserved = 0;
+ if (!size)
+ packet->data_offset = 0;
+ else
+ packet->data_offset = cpu_to_le32(24);
+ packet->data_length = cpu_to_le32(size);
+ packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+ packet->padding = 0;
+
+ log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
+ "data_offset=%d data_length=%d remaining_data_length=%d\n",
+ le16_to_cpu(packet->credits_requested),
+ le16_to_cpu(packet->credits_granted),
+ le32_to_cpu(packet->data_offset),
+ le32_to_cpu(packet->data_length),
+ le32_to_cpu(packet->remaining_data_length));
+
+ /* Map the packet to DMA */
+ header_length = sizeof(struct smbd_data_transfer);
+ /* If this is a packet without payload, don't send padding */
+ if (!size)
+ header_length = offsetof(struct smbd_data_transfer, padding);
+
+ request->num_sge = 1;
+ request->sge[0].addr = ib_dma_map_single(info->id->device,
+ (void *)packet,
+ header_length,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ mempool_free(request, info->request_mempool);
+ rc = -EIO;
+ goto err;
+ }
+
+ request->sge[0].length = header_length;
+ request->sge[0].lkey = info->pd->local_dma_lkey;
+
+ *request_out = request;
+ return 0;
+
+err:
+ atomic_inc(&info->send_credits);
+ return rc;
+}
+
+static void smbd_destroy_header(struct smbd_connection *info,
+ struct smbd_request *request)
+{
+
+ ib_dma_unmap_single(info->id->device,
+ request->sge[0].addr,
+ request->sge[0].length,
+ DMA_TO_DEVICE);
+ mempool_free(request, info->request_mempool);
+ atomic_inc(&info->send_credits);
+}
+
+/* Post the send request */
+static int smbd_post_send(struct smbd_connection *info,
+ struct smbd_request *request, bool has_payload)
+{
+ struct ib_send_wr send_wr, *send_wr_fail;
+ int rc, i;
+
+ for (i = 0; i < request->num_sge; i++) {
+ log_rdma_send(INFO,
+ "rdma_request sge[%d] addr=%llu legnth=%u\n",
+ i, request->sge[0].addr, request->sge[0].length);
+ ib_dma_sync_single_for_device(
+ info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+ }
+
+ request->cqe.done = send_done;
+
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &request->cqe;
+ send_wr.sg_list = request->sge;
+ send_wr.num_sge = request->num_sge;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ if (has_payload) {
+ request->has_payload = true;
+ atomic_inc(&info->send_payload_pending);
+ } else {
+ request->has_payload = false;
+ atomic_inc(&info->send_pending);
+ }
+
+ rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ if (rc) {
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ if (has_payload) {
+ if (atomic_dec_and_test(&info->send_payload_pending))
+ wake_up(&info->wait_send_payload_pending);
+ } else {
+ if (atomic_dec_and_test(&info->send_pending))
+ wake_up(&info->wait_send_pending);
+ }
+ } else
+ /* Reset timer for idle connection after packet is sent */
+ mod_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+
+ return rc;
+}
+
+static int smbd_post_send_sgl(struct smbd_connection *info,
+ struct scatterlist *sgl, int data_length, int remaining_data_length)
+{
+ int num_sgs;
+ int i, rc;
+ struct smbd_request *request;
+ struct scatterlist *sg;
+
+ rc = smbd_create_header(
+ info, data_length, remaining_data_length, &request);
+ if (rc)
+ return rc;
+
+ num_sgs = sgl ? sg_nents(sgl) : 0;
+ for_each_sg(sgl, sg, num_sgs, i) {
+ request->sge[i+1].addr =
+ ib_dma_map_page(info->id->device, sg_page(sg),
+ sg->offset, sg->length, DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(
+ info->id->device, request->sge[i+1].addr)) {
+ rc = -EIO;
+ request->sge[i+1].addr = 0;
+ goto dma_mapping_failure;
+ }
+ request->sge[i+1].length = sg->length;
+ request->sge[i+1].lkey = info->pd->local_dma_lkey;
+ request->num_sge++;
+ }
+
+ rc = smbd_post_send(info, request, data_length);
+ if (!rc)
+ return 0;
+
+dma_mapping_failure:
+ for (i = 1; i < request->num_sge; i++)
+ if (request->sge[i].addr)
+ ib_dma_unmap_single(info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+ smbd_destroy_header(info, request);
+ return rc;
+}
+
+/*
+ * Send a page
+ * page: the page to send
+ * offset: offset in the page to send
+ * size: length in the page to send
+ * remaining_data_length: remaining data to send in this payload
+ */
+static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
+ unsigned long offset, size_t size, int remaining_data_length)
+{
+ struct scatterlist sgl;
+
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, page, size, offset);
+
+ return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
+}
+
+/*
+ * Send an empty message
+ * Empty message is used to extend credits to peer to for keep live
+ * while there is no upper layer payload to send at the time
+ */
+static int smbd_post_send_empty(struct smbd_connection *info)
+{
+ info->count_send_empty++;
+ return smbd_post_send_sgl(info, NULL, 0, 0);
+}
+
+/*
+ * Send a data buffer
+ * iov: the iov array describing the data buffers
+ * n_vec: number of iov array
+ * remaining_data_length: remaining data to send following this packet
+ * in segmented SMBD packet
+ */
+static int smbd_post_send_data(
+ struct smbd_connection *info, struct kvec *iov, int n_vec,
+ int remaining_data_length)
+{
+ int i;
+ u32 data_length = 0;
+ struct scatterlist sgl[SMBDIRECT_MAX_SGE];
+
+ if (n_vec > SMBDIRECT_MAX_SGE) {
+ cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
+ return -ENOMEM;
+ }
+
+ sg_init_table(sgl, n_vec);
+ for (i = 0; i < n_vec; i++) {
+ data_length += iov[i].iov_len;
+ sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
+ }
+
+ return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
+}
+
+/*
+ * Post a receive request to the transport
+ * The remote peer can only send data when a receive request is posted
+ * The interaction is controlled by send/receive credit system
+ */
+static int smbd_post_recv(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ struct ib_recv_wr recv_wr, *recv_wr_fail = NULL;
+ int rc = -EIO;
+
+ response->sge.addr = ib_dma_map_single(
+ info->id->device, response->packet,
+ info->max_receive_size, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(info->id->device, response->sge.addr))
+ return rc;
+
+ response->sge.length = info->max_receive_size;
+ response->sge.lkey = info->pd->local_dma_lkey;
+
+ response->cqe.done = recv_done;
+
+ recv_wr.wr_cqe = &response->cqe;
+ recv_wr.next = NULL;
+ recv_wr.sg_list = &response->sge;
+ recv_wr.num_sge = 1;
+
+ rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail);
+ if (rc) {
+ ib_dma_unmap_single(info->id->device, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+
+ log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
+static int smbd_negotiate(struct smbd_connection *info)
+{
+ int rc;
+ struct smbd_response *response = get_receive_buffer(info);
+
+ response->type = SMBD_NEGOTIATE_RESP;
+ rc = smbd_post_recv(info, response);
+ log_rdma_event(INFO,
+ "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
+ "iov.lkey=%x\n",
+ rc, response->sge.addr,
+ response->sge.length, response->sge.lkey);
+ if (rc)
+ return rc;
+
+ init_completion(&info->negotiate_completion);
+ info->negotiate_done = false;
+ rc = smbd_post_send_negotiate_req(info);
+ if (rc)
+ return rc;
+
+ rc = wait_for_completion_interruptible_timeout(
+ &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
+ log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
+
+ if (info->negotiate_done)
+ return 0;
+
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else if (rc == -ERESTARTSYS)
+ rc = -EINTR;
+ else
+ rc = -ENOTCONN;
+
+ return rc;
+}
+
+static void put_empty_packet(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ spin_lock(&info->empty_packet_queue_lock);
+ list_add_tail(&response->list, &info->empty_packet_queue);
+ info->count_empty_packet_queue++;
+ spin_unlock(&info->empty_packet_queue_lock);
+
+ queue_work(info->workqueue, &info->post_send_credits_work);
+}
+
+/*
+ * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
+ * This is a queue for reassembling upper layer payload and present to upper
+ * layer. All the inncoming payload go to the reassembly queue, regardless of
+ * if reassembly is required. The uuper layer code reads from the queue for all
+ * incoming payloads.
+ * Put a received packet to the reassembly queue
+ * response: the packet received
+ * data_length: the size of payload in this packet
+ */
+static void enqueue_reassembly(
+ struct smbd_connection *info,
+ struct smbd_response *response,
+ int data_length)
+{
+ spin_lock(&info->reassembly_queue_lock);
+ list_add_tail(&response->list, &info->reassembly_queue);
+ info->reassembly_queue_length++;
+ /*
+ * Make sure reassembly_data_length is updated after list and
+ * reassembly_queue_length are updated. On the dequeue side
+ * reassembly_data_length is checked without a lock to determine
+ * if reassembly_queue_length and list is up to date
+ */
+ virt_wmb();
+ info->reassembly_data_length += data_length;
+ spin_unlock(&info->reassembly_queue_lock);
+ info->count_reassembly_queue++;
+ info->count_enqueue_reassembly_queue++;
+}
+
+/*
+ * Get the first entry at the front of reassembly queue
+ * Caller is responsible for locking
+ * return value: the first entry if any, NULL if queue is empty
+ */
+static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+
+ if (!list_empty(&info->reassembly_queue)) {
+ ret = list_first_entry(
+ &info->reassembly_queue,
+ struct smbd_response, list);
+ }
+ return ret;
+}
+
+static struct smbd_response *get_empty_queue_buffer(
+ struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
+ if (!list_empty(&info->empty_packet_queue)) {
+ ret = list_first_entry(
+ &info->empty_packet_queue,
+ struct smbd_response, list);
+ list_del(&ret->list);
+ info->count_empty_packet_queue--;
+ }
+ spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Get a receive buffer
+ * For each remote send, we need to post a receive. The receive buffers are
+ * pre-allocated in advance.
+ * return value: the receive buffer, NULL if none is available
+ */
+static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+ if (!list_empty(&info->receive_queue)) {
+ ret = list_first_entry(
+ &info->receive_queue,
+ struct smbd_response, list);
+ list_del(&ret->list);
+ info->count_receive_queue--;
+ info->count_get_receive_buffer++;
+ }
+ spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Return a receive buffer
+ * Upon returning of a receive buffer, we can post new receive and extend
+ * more receive credits to remote peer. This is done immediately after a
+ * receive buffer is returned.
+ */
+static void put_receive_buffer(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ unsigned long flags;
+
+ ib_dma_unmap_single(info->id->device, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+ list_add_tail(&response->list, &info->receive_queue);
+ info->count_receive_queue++;
+ info->count_put_receive_buffer++;
+ spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+
+ queue_work(info->workqueue, &info->post_send_credits_work);
+}
+
+/* Preallocate all receive buffer on transport establishment */
+static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+{
+ int i;
+ struct smbd_response *response;
+
+ INIT_LIST_HEAD(&info->reassembly_queue);
+ spin_lock_init(&info->reassembly_queue_lock);
+ info->reassembly_data_length = 0;
+ info->reassembly_queue_length = 0;
+
+ INIT_LIST_HEAD(&info->receive_queue);
+ spin_lock_init(&info->receive_queue_lock);
+ info->count_receive_queue = 0;
+
+ INIT_LIST_HEAD(&info->empty_packet_queue);
+ spin_lock_init(&info->empty_packet_queue_lock);
+ info->count_empty_packet_queue = 0;
+
+ init_waitqueue_head(&info->wait_receive_queues);
+
+ for (i = 0; i < num_buf; i++) {
+ response = mempool_alloc(info->response_mempool, GFP_KERNEL);
+ if (!response)
+ goto allocate_failed;
+
+ response->info = info;
+ list_add_tail(&response->list, &info->receive_queue);
+ info->count_receive_queue++;
+ }
+
+ return 0;
+
+allocate_failed:
+ while (!list_empty(&info->receive_queue)) {
+ response = list_first_entry(
+ &info->receive_queue,
+ struct smbd_response, list);
+ list_del(&response->list);
+ info->count_receive_queue--;
+
+ mempool_free(response, info->response_mempool);
+ }
+ return -ENOMEM;
+}
+
+static void destroy_receive_buffers(struct smbd_connection *info)
+{
+ struct smbd_response *response;
+
+ while ((response = get_receive_buffer(info)))
+ mempool_free(response, info->response_mempool);
+
+ while ((response = get_empty_queue_buffer(info)))
+ mempool_free(response, info->response_mempool);
+}
+
+/*
+ * Check and send an immediate or keep alive packet
+ * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1
+ * Connection.KeepaliveRequested and Connection.SendImmediate
+ * The idea is to extend credits to server as soon as it becomes available
+ */
+static void send_immediate_work(struct work_struct *work)
+{
+ struct smbd_connection *info = container_of(
+ work, struct smbd_connection,
+ send_immediate_work.work);
+
+ if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
+ info->send_immediate) {
+ log_keep_alive(INFO, "send an empty message\n");
+ smbd_post_send_empty(info);
+ }
+}
+
+/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
+static void idle_connection_timer(struct work_struct *work)
+{
+ struct smbd_connection *info = container_of(
+ work, struct smbd_connection,
+ idle_timer_work.work);
+
+ if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+ log_keep_alive(ERR,
+ "error status info->keep_alive_requested=%d\n",
+ info->keep_alive_requested);
+ smbd_disconnect_rdma_connection(info);
+ return;
+ }
+
+ log_keep_alive(INFO, "about to send an empty idle message\n");
+ smbd_post_send_empty(info);
+
+ /* Setup the next idle timeout work */
+ queue_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+}
+
+/* Destroy this SMBD connection, called from upper layer */
+void smbd_destroy(struct smbd_connection *info)
+{
+ log_rdma_event(INFO, "destroying rdma session\n");
+
+ /* Kick off the disconnection process */
+ smbd_disconnect_rdma_connection(info);
+
+ log_rdma_event(INFO, "wait for transport being destroyed\n");
+ wait_event(info->wait_destroy,
+ info->transport_status == SMBD_DESTROYED);
+
+ destroy_workqueue(info->workqueue);
+ kfree(info);
+}
+
+/*
+ * Reconnect this SMBD connection, called from upper layer
+ * return value: 0 on success, or actual error code
+ */
+int smbd_reconnect(struct TCP_Server_Info *server)
+{
+ log_rdma_event(INFO, "reconnecting rdma session\n");
+
+ if (!server->smbd_conn) {
+ log_rdma_event(ERR, "rdma session already destroyed\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is possible if transport is disconnected and we haven't received
+ * notification from RDMA, but upper layer has detected timeout
+ */
+ if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+ log_rdma_event(INFO, "disconnecting transport\n");
+ smbd_disconnect_rdma_connection(server->smbd_conn);
+ }
+
+ /* wait until the transport is destroyed */
+ wait_event(server->smbd_conn->wait_destroy,
+ server->smbd_conn->transport_status == SMBD_DESTROYED);
+
+ destroy_workqueue(server->smbd_conn->workqueue);
+ kfree(server->smbd_conn);
+
+ log_rdma_event(INFO, "creating rdma session\n");
+ server->smbd_conn = smbd_get_connection(
+ server, (struct sockaddr *) &server->dstaddr);
+
+ return server->smbd_conn ? 0 : -ENOENT;
+}
+
+static void destroy_caches_and_workqueue(struct smbd_connection *info)
+{
+ destroy_receive_buffers(info);
+ destroy_workqueue(info->workqueue);
+ mempool_destroy(info->response_mempool);
+ kmem_cache_destroy(info->response_cache);
+ mempool_destroy(info->request_mempool);
+ kmem_cache_destroy(info->request_cache);
+}
+
+#define MAX_NAME_LEN 80
+static int allocate_caches_and_workqueue(struct smbd_connection *info)
+{
+ char name[MAX_NAME_LEN];
+ int rc;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+ info->request_cache =
+ kmem_cache_create(
+ name,
+ sizeof(struct smbd_request) +
+ sizeof(struct smbd_data_transfer),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!info->request_cache)
+ return -ENOMEM;
+
+ info->request_mempool =
+ mempool_create(info->send_credit_target, mempool_alloc_slab,
+ mempool_free_slab, info->request_cache);
+ if (!info->request_mempool)
+ goto out1;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
+ info->response_cache =
+ kmem_cache_create(
+ name,
+ sizeof(struct smbd_response) +
+ info->max_receive_size,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!info->response_cache)
+ goto out2;
+
+ info->response_mempool =
+ mempool_create(info->receive_credit_max, mempool_alloc_slab,
+ mempool_free_slab, info->response_cache);
+ if (!info->response_mempool)
+ goto out3;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_%p", info);
+ info->workqueue = create_workqueue(name);
+ if (!info->workqueue)
+ goto out4;
+
+ rc = allocate_receive_buffers(info, info->receive_credit_max);
+ if (rc) {
+ log_rdma_event(ERR, "failed to allocate receive buffers\n");
+ goto out5;
+ }
+
+ return 0;
+
+out5:
+ destroy_workqueue(info->workqueue);
+out4:
+ mempool_destroy(info->response_mempool);
+out3:
+ kmem_cache_destroy(info->response_cache);
+out2:
+ mempool_destroy(info->request_mempool);
+out1:
+ kmem_cache_destroy(info->request_cache);
+ return -ENOMEM;
+}
+
+/* Create a SMBD connection, called by upper layer */
+static struct smbd_connection *_smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
+{
+ int rc;
+ struct smbd_connection *info;
+ struct rdma_conn_param conn_param;
+ struct ib_qp_init_attr qp_attr;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+ struct ib_port_immutable port_immutable;
+ u32 ird_ord_hdr[2];
+
+ info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->transport_status = SMBD_CONNECTING;
+ rc = smbd_ia_open(info, dstaddr, port);
+ if (rc) {
+ log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+ goto create_id_failed;
+ }
+
+ if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+ smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
+ log_rdma_event(ERR,
+ "consider lowering send_credit_target = %d. "
+ "Possible CQE overrun, device "
+ "reporting max_cpe %d max_qp_wr %d\n",
+ smbd_send_credit_target,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+ if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+ smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
+ log_rdma_event(ERR,
+ "consider lowering receive_credit_max = %d. "
+ "Possible CQE overrun, device "
+ "reporting max_cpe %d max_qp_wr %d\n",
+ smbd_receive_credit_max,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+ info->receive_credit_max = smbd_receive_credit_max;
+ info->send_credit_target = smbd_send_credit_target;
+ info->max_send_size = smbd_max_send_size;
+ info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+ info->max_receive_size = smbd_max_receive_size;
+ info->keep_alive_interval = smbd_keep_alive_interval;
+
+ if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR, "warning: device max_sge = %d too small\n",
+ info->id->device->attrs.max_sge);
+ log_rdma_event(ERR, "Queue Pair creation may fail\n");
+ }
+
+ info->send_cq = NULL;
+ info->recv_cq = NULL;
+ info->send_cq = ib_alloc_cq(info->id->device, info,
+ info->send_credit_target, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(info->send_cq)) {
+ info->send_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+ info->recv_cq = ib_alloc_cq(info->id->device, info,
+ info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(info->recv_cq)) {
+ info->recv_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.event_handler = smbd_qp_async_error_upcall;
+ qp_attr.qp_context = info;
+ qp_attr.cap.max_send_wr = info->send_credit_target;
+ qp_attr.cap.max_recv_wr = info->receive_credit_max;
+ qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
+ qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
+ qp_attr.cap.max_inline_data = 0;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+ qp_attr.send_cq = info->send_cq;
+ qp_attr.recv_cq = info->recv_cq;
+ qp_attr.port_num = ~0;
+
+ rc = rdma_create_qp(info->id, info->pd, &qp_attr);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+ goto create_qp_failed;
+ }
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 0;
+
+ conn_param.responder_resources =
+ info->id->device->attrs.max_qp_rd_atom
+ < SMBD_CM_RESPONDER_RESOURCES ?
+ info->id->device->attrs.max_qp_rd_atom :
+ SMBD_CM_RESPONDER_RESOURCES;
+ info->responder_resources = conn_param.responder_resources;
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ info->responder_resources);
+
+ /* Need to send IRD/ORD in private data for iWARP */
+ info->id->device->get_port_immutable(
+ info->id->device, info->id->port_num, &port_immutable);
+ if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+ ird_ord_hdr[0] = info->responder_resources;
+ ird_ord_hdr[1] = 1;
+ conn_param.private_data = ird_ord_hdr;
+ conn_param.private_data_len = sizeof(ird_ord_hdr);
+ } else {
+ conn_param.private_data = NULL;
+ conn_param.private_data_len = 0;
+ }
+
+ conn_param.retry_count = SMBD_CM_RETRY;
+ conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
+ conn_param.flow_control = 0;
+ init_waitqueue_head(&info->wait_destroy);
+
+ log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
+ &addr_in->sin_addr, port);
+
+ init_waitqueue_head(&info->conn_wait);
+ rc = rdma_connect(info->id, &conn_param);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+ goto rdma_connect_failed;
+ }
+
+ wait_event_interruptible(
+ info->conn_wait, info->transport_status != SMBD_CONNECTING);
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+ goto rdma_connect_failed;
+ }
+
+ log_rdma_event(INFO, "rdma_connect connected\n");
+
+ rc = allocate_caches_and_workqueue(info);
+ if (rc) {
+ log_rdma_event(ERR, "cache allocation failed\n");
+ goto allocate_cache_failed;
+ }
+
+ init_waitqueue_head(&info->wait_send_queue);
+ init_waitqueue_head(&info->wait_reassembly_queue);
+
+ INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+ INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
+ queue_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+
+ init_waitqueue_head(&info->wait_smbd_send_pending);
+ info->smbd_send_pending = 0;
+
+ init_waitqueue_head(&info->wait_smbd_recv_pending);
+ info->smbd_recv_pending = 0;
+
+ init_waitqueue_head(&info->wait_send_pending);
+ atomic_set(&info->send_pending, 0);
+
+ init_waitqueue_head(&info->wait_send_payload_pending);
+ atomic_set(&info->send_payload_pending, 0);
+
+ INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
+ INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work);
+ INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
+ INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
+ info->new_credits_offered = 0;
+ spin_lock_init(&info->lock_new_credits_offered);
+
+ rc = smbd_negotiate(info);
+ if (rc) {
+ log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
+ goto negotiation_failed;
+ }
+
+ rc = allocate_mr_list(info);
+ if (rc) {
+ log_rdma_mr(ERR, "memory registration allocation failed\n");
+ goto allocate_mr_failed;
+ }
+
+ return info;
+
+allocate_mr_failed:
+ /* At this point, need to a full transport shutdown */
+ smbd_destroy(info);
+ return NULL;
+
+negotiation_failed:
+ cancel_delayed_work_sync(&info->idle_timer_work);
+ destroy_caches_and_workqueue(info);
+ info->transport_status = SMBD_NEGOTIATE_FAILED;
+ init_waitqueue_head(&info->conn_wait);
+ rdma_disconnect(info->id);
+ wait_event(info->conn_wait,
+ info->transport_status == SMBD_DISCONNECTED);
+
+allocate_cache_failed:
+rdma_connect_failed:
+ rdma_destroy_qp(info->id);
+
+create_qp_failed:
+alloc_cq_failed:
+ if (info->send_cq)
+ ib_free_cq(info->send_cq);
+ if (info->recv_cq)
+ ib_free_cq(info->recv_cq);
+
+config_failed:
+ ib_dealloc_pd(info->pd);
+ rdma_destroy_id(info->id);
+
+create_id_failed:
+ kfree(info);
+ return NULL;
+}
+
+struct smbd_connection *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr)
+{
+ struct smbd_connection *ret;
+ int port = SMBD_PORT;
+
+try_again:
+ ret = _smbd_get_connection(server, dstaddr, port);
+
+ /* Try SMB_PORT if SMBD_PORT doesn't work */
+ if (!ret && port == SMBD_PORT) {
+ port = SMB_PORT;
+ goto try_again;
+ }
+ return ret;
+}
+
+/*
+ * Receive data from receive reassembly queue
+ * All the incoming data packets are placed in reassembly queue
+ * buf: the buffer to read data into
+ * size: the length of data to read
+ * return value: actual data read
+ * Note: this implementation copies the data from reassebmly queue to receive
+ * buffers used by upper layer. This is not the optimal code path. A better way
+ * to do it is to not have upper layer allocate its receive buffers but rather
+ * borrow the buffer from reassembly queue, and return it after data is
+ * consumed. But this will require more changes to upper layer code, and also
+ * need to consider packet boundaries while they still being reassembled.
+ */
+static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ unsigned int size)
+{
+ struct smbd_response *response;
+ struct smbd_data_transfer *data_transfer;
+ int to_copy, to_read, data_read, offset;
+ u32 data_length, remaining_data_length, data_offset;
+ int rc;
+
+again:
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_read(ERR, "disconnected\n");
+ return -ENODEV;
+ }
+
+ /*
+ * No need to hold the reassembly queue lock all the time as we are
+ * the only one reading from the front of the queue. The transport
+ * may add more entries to the back of the queue at the same time
+ */
+ log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
+ info->reassembly_data_length);
+ if (info->reassembly_data_length >= size) {
+ int queue_length;
+ int queue_removed = 0;
+
+ /*
+ * Need to make sure reassembly_data_length is read before
+ * reading reassembly_queue_length and calling
+ * _get_first_reassembly. This call is lock free
+ * as we never read at the end of the queue which are being
+ * updated in SOFTIRQ as more data is received
+ */
+ virt_rmb();
+ queue_length = info->reassembly_queue_length;
+ data_read = 0;
+ to_read = size;
+ offset = info->first_entry_offset;
+ while (data_read < size) {
+ response = _get_first_reassembly(info);
+ data_transfer = smbd_response_payload(response);
+ data_length = le32_to_cpu(data_transfer->data_length);
+ remaining_data_length =
+ le32_to_cpu(
+ data_transfer->remaining_data_length);
+ data_offset = le32_to_cpu(data_transfer->data_offset);
+
+ /*
+ * The upper layer expects RFC1002 length at the
+ * beginning of the payload. Return it to indicate
+ * the total length of the packet. This minimize the
+ * change to upper layer packet processing logic. This
+ * will be eventually remove when an intermediate
+ * transport layer is added
+ */
+ if (response->first_segment && size == 4) {
+ unsigned int rfc1002_len =
+ data_length + remaining_data_length;
+ *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+ data_read = 4;
+ response->first_segment = false;
+ log_read(INFO, "returning rfc1002 length %d\n",
+ rfc1002_len);
+ goto read_rfc1002_done;
+ }
+
+ to_copy = min_t(int, data_length - offset, to_read);
+ memcpy(
+ buf + data_read,
+ (char *)data_transfer + data_offset + offset,
+ to_copy);
+
+ /* move on to the next buffer? */
+ if (to_copy == data_length - offset) {
+ queue_length--;
+ /*
+ * No need to lock if we are not at the
+ * end of the queue
+ */
+ if (!queue_length)
+ spin_lock_irq(
+ &info->reassembly_queue_lock);
+ list_del(&response->list);
+ queue_removed++;
+ if (!queue_length)
+ spin_unlock_irq(
+ &info->reassembly_queue_lock);
+
+ info->count_reassembly_queue--;
+ info->count_dequeue_reassembly_queue++;
+ put_receive_buffer(info, response);
+ offset = 0;
+ log_read(INFO, "put_receive_buffer offset=0\n");
+ } else
+ offset += to_copy;
+
+ to_read -= to_copy;
+ data_read += to_copy;
+
+ log_read(INFO, "_get_first_reassembly memcpy %d bytes "
+ "data_transfer_length-offset=%d after that "
+ "to_read=%d data_read=%d offset=%d\n",
+ to_copy, data_length - offset,
+ to_read, data_read, offset);
+ }
+
+ spin_lock_irq(&info->reassembly_queue_lock);
+ info->reassembly_data_length -= data_read;
+ info->reassembly_queue_length -= queue_removed;
+ spin_unlock_irq(&info->reassembly_queue_lock);
+
+ info->first_entry_offset = offset;
+ log_read(INFO, "returning to thread data_read=%d "
+ "reassembly_data_length=%d first_entry_offset=%d\n",
+ data_read, info->reassembly_data_length,
+ info->first_entry_offset);
+read_rfc1002_done:
+ return data_read;
+ }
+
+ log_read(INFO, "wait_event on more data\n");
+ rc = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= size ||
+ info->transport_status != SMBD_CONNECTED);
+ /* Don't return any data if interrupted */
+ if (rc)
+ return -ENODEV;
+
+ goto again;
+}
+
+/*
+ * Receive a page from receive reassembly queue
+ * page: the page to read data into
+ * to_read: the length of data to read
+ * return value: actual data read
+ */
+static int smbd_recv_page(struct smbd_connection *info,
+ struct page *page, unsigned int to_read)
+{
+ int ret;
+ char *to_address;
+
+ /* make sure we have the page ready for read */
+ ret = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= to_read ||
+ info->transport_status != SMBD_CONNECTED);
+ if (ret)
+ return 0;
+
+ /* now we can read from reassembly queue and not sleep */
+ to_address = kmap_atomic(page);
+
+ log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
+ page, to_address, to_read);
+
+ ret = smbd_recv_buf(info, to_address, to_read);
+ kunmap_atomic(to_address);
+
+ return ret;
+}
+
+/*
+ * Receive data from transport
+ * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
+ * return: total bytes read, or 0. SMB Direct will not do partial read.
+ */
+int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+{
+ char *buf;
+ struct page *page;
+ unsigned int to_read;
+ int rc;
+
+ info->smbd_recv_pending++;
+
+ switch (msg->msg_iter.type) {
+ case READ | ITER_KVEC:
+ buf = msg->msg_iter.kvec->iov_base;
+ to_read = msg->msg_iter.kvec->iov_len;
+ rc = smbd_recv_buf(info, buf, to_read);
+ break;
+
+ case READ | ITER_BVEC:
+ page = msg->msg_iter.bvec->bv_page;
+ to_read = msg->msg_iter.bvec->bv_len;
+ rc = smbd_recv_page(info, page, to_read);
+ break;
+
+ default:
+ /* It's a bug in upper layer to get there */
+ cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
+ msg->msg_iter.type);
+ rc = -EIO;
+ }
+
+ info->smbd_recv_pending--;
+ wake_up(&info->wait_smbd_recv_pending);
+
+ /* SMBDirect will read it all or nothing */
+ if (rc > 0)
+ msg->msg_iter.count = 0;
+ return rc;
+}
+
+/*
+ * Send data to transport
+ * Each rqst is transported as a SMBDirect payload
+ * rqst: the data to write
+ * return value: 0 if successfully write, otherwise error code
+ */
+int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+{
+ struct kvec vec;
+ int nvecs;
+ int size;
+ int buflen = 0, remaining_data_length;
+ int start, i, j;
+ int max_iov_size =
+ info->max_send_size - sizeof(struct smbd_data_transfer);
+ struct kvec iov[SMBDIRECT_MAX_SGE];
+ int rc;
+
+ info->smbd_send_pending++;
+ if (info->transport_status != SMBD_CONNECTED) {
+ rc = -ENODEV;
+ goto done;
+ }
+
+ /*
+ * This usually means a configuration error
+ * We use RDMA read/write for packet size > rdma_readwrite_threshold
+ * as long as it's properly configured we should never get into this
+ * situation
+ */
+ if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
+ log_write(ERR, "maximum send segment %x exceeding %x\n",
+ rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Remove the RFC1002 length defined in MS-SMB2 section 2.1
+ * It is used only for TCP transport
+ * In future we may want to add a transport layer under protocol
+ * layer so this will only be issued to TCP transport
+ */
+ iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
+ iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
+ buflen += iov[0].iov_len;
+
+ /* total up iov array first */
+ for (i = 1; i < rqst->rq_nvec; i++) {
+ iov[i].iov_base = rqst->rq_iov[i].iov_base;
+ iov[i].iov_len = rqst->rq_iov[i].iov_len;
+ buflen += iov[i].iov_len;
+ }
+
+ /* add in the page array if there is one */
+ if (rqst->rq_npages) {
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
+ buflen += rqst->rq_tailsz;
+ }
+
+ if (buflen + sizeof(struct smbd_data_transfer) >
+ info->max_fragmented_send_size) {
+ log_write(ERR, "payload size %d > max size %d\n",
+ buflen, info->max_fragmented_send_size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ remaining_data_length = buflen;
+
+ log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
+ "rq_tailsz=%d buflen=%d\n",
+ rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+ rqst->rq_tailsz, buflen);
+
+ start = i = iov[0].iov_len ? 0 : 1;
+ buflen = 0;
+ while (true) {
+ buflen += iov[i].iov_len;
+ if (buflen > max_iov_size) {
+ if (i > start) {
+ remaining_data_length -=
+ (buflen-iov[i].iov_len);
+ log_write(INFO, "sending iov[] from start=%d "
+ "i=%d nvecs=%d "
+ "remaining_data_length=%d\n",
+ start, i, i-start,
+ remaining_data_length);
+ rc = smbd_post_send_data(
+ info, &iov[start], i-start,
+ remaining_data_length);
+ if (rc)
+ goto done;
+ } else {
+ /* iov[start] is too big, break it */
+ nvecs = (buflen+max_iov_size-1)/max_iov_size;
+ log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
+ " break to %d vectors\n",
+ start, iov[start].iov_base,
+ buflen, nvecs);
+ for (j = 0; j < nvecs; j++) {
+ vec.iov_base =
+ (char *)iov[start].iov_base +
+ j*max_iov_size;
+ vec.iov_len = max_iov_size;
+ if (j == nvecs-1)
+ vec.iov_len =
+ buflen -
+ max_iov_size*(nvecs-1);
+ remaining_data_length -= vec.iov_len;
+ log_write(INFO,
+ "sending vec j=%d iov_base=%p"
+ " iov_len=%zu "
+ "remaining_data_length=%d\n",
+ j, vec.iov_base, vec.iov_len,
+ remaining_data_length);
+ rc = smbd_post_send_data(
+ info, &vec, 1,
+ remaining_data_length);
+ if (rc)
+ goto done;
+ }
+ i++;
+ }
+ start = i;
+ buflen = 0;
+ } else {
+ i++;
+ if (i == rqst->rq_nvec) {
+ /* send out all remaining vecs */
+ remaining_data_length -= buflen;
+ log_write(INFO,
+ "sending iov[] from start=%d i=%d "
+ "nvecs=%d remaining_data_length=%d\n",
+ start, i, i-start,
+ remaining_data_length);
+ rc = smbd_post_send_data(info, &iov[start],
+ i-start, remaining_data_length);
+ if (rc)
+ goto done;
+ break;
+ }
+ }
+ log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
+ }
+
+ /* now sending pages if there are any */
+ for (i = 0; i < rqst->rq_npages; i++) {
+ buflen = (i == rqst->rq_npages-1) ?
+ rqst->rq_tailsz : rqst->rq_pagesz;
+ nvecs = (buflen + max_iov_size - 1) / max_iov_size;
+ log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
+ buflen, nvecs);
+ for (j = 0; j < nvecs; j++) {
+ size = max_iov_size;
+ if (j == nvecs-1)
+ size = buflen - j*max_iov_size;
+ remaining_data_length -= size;
+ log_write(INFO, "sending pages i=%d offset=%d size=%d"
+ " remaining_data_length=%d\n",
+ i, j*max_iov_size, size, remaining_data_length);
+ rc = smbd_post_send_page(
+ info, rqst->rq_pages[i], j*max_iov_size,
+ size, remaining_data_length);
+ if (rc)
+ goto done;
+ }
+ }
+
+done:
+ /*
+ * As an optimization, we don't wait for individual I/O to finish
+ * before sending the next one.
+ * Send them all and wait for pending send count to get to 0
+ * that means all the I/Os have been out and we are good to return
+ */
+
+ wait_event(info->wait_send_payload_pending,
+ atomic_read(&info->send_payload_pending) == 0);
+
+ info->smbd_send_pending--;
+ wake_up(&info->wait_smbd_send_pending);
+
+ return rc;
+}
+
+static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_mr *mr;
+ struct ib_cqe *cqe;
+
+ if (wc->status) {
+ log_rdma_mr(ERR, "status=%d\n", wc->status);
+ cqe = wc->wr_cqe;
+ mr = container_of(cqe, struct smbd_mr, cqe);
+ smbd_disconnect_rdma_connection(mr->conn);
+ }
+}
+
+/*
+ * The work queue function that recovers MRs
+ * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
+ * again. Both calls are slow, so finish them in a workqueue. This will not
+ * block I/O path.
+ * There is one workqueue that recovers MRs, there is no need to lock as the
+ * I/O requests calling smbd_register_mr will never update the links in the
+ * mr_list.
+ */
+static void smbd_mr_recovery_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, mr_recovery_work);
+ struct smbd_mr *smbdirect_mr;
+ int rc;
+
+ list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
+ if (smbdirect_mr->state == MR_INVALIDATED ||
+ smbdirect_mr->state == MR_ERROR) {
+
+ if (smbdirect_mr->state == MR_INVALIDATED) {
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ smbdirect_mr->state = MR_READY;
+ } else if (smbdirect_mr->state == MR_ERROR) {
+
+ /* recover this MR entry */
+ rc = ib_dereg_mr(smbdirect_mr->mr);
+ if (rc) {
+ log_rdma_mr(ERR,
+ "ib_dereg_mr faield rc=%x\n",
+ rc);
+ smbd_disconnect_rdma_connection(info);
+ }
+
+ smbdirect_mr->mr = ib_alloc_mr(
+ info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR,
+ "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
+ smbd_disconnect_rdma_connection(info);
+ }
+
+ smbdirect_mr->state = MR_READY;
+ }
+ /* smbdirect_mr->state is updated by this function
+ * and is read and updated by I/O issuing CPUs trying
+ * to get a MR, the call to atomic_inc_return
+ * implicates a memory barrier and guarantees this
+ * value is updated before waking up any calls to
+ * get_mr() from the I/O issuing CPUs
+ */
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
+ }
+ }
+}
+
+static void destroy_mr_list(struct smbd_connection *info)
+{
+ struct smbd_mr *mr, *tmp;
+
+ cancel_work_sync(&info->mr_recovery_work);
+ list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+ if (mr->state == MR_INVALIDATED)
+ ib_dma_unmap_sg(info->id->device, mr->sgl,
+ mr->sgl_count, mr->dir);
+ ib_dereg_mr(mr->mr);
+ kfree(mr->sgl);
+ kfree(mr);
+ }
+}
+
+/*
+ * Allocate MRs used for RDMA read/write
+ * The number of MRs will not exceed hardware capability in responder_resources
+ * All MRs are kept in mr_list. The MR can be recovered after it's used
+ * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
+ * as MRs are used and recovered for I/O, but the list links will not change
+ */
+static int allocate_mr_list(struct smbd_connection *info)
+{
+ int i;
+ struct smbd_mr *smbdirect_mr, *tmp;
+
+ INIT_LIST_HEAD(&info->mr_list);
+ init_waitqueue_head(&info->wait_mr);
+ spin_lock_init(&info->mr_list_lock);
+ atomic_set(&info->mr_ready_count, 0);
+ atomic_set(&info->mr_used_count, 0);
+ init_waitqueue_head(&info->wait_for_mr_cleanup);
+ /* Allocate more MRs (2x) than hardware responder_resources */
+ for (i = 0; i < info->responder_resources * 2; i++) {
+ smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+ if (!smbdirect_mr)
+ goto out;
+ smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type, info->max_frmr_depth);
+ goto out;
+ }
+ smbdirect_mr->sgl = kcalloc(
+ info->max_frmr_depth,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!smbdirect_mr->sgl) {
+ log_rdma_mr(ERR, "failed to allocate sgl\n");
+ ib_dereg_mr(smbdirect_mr->mr);
+ goto out;
+ }
+ smbdirect_mr->state = MR_READY;
+ smbdirect_mr->conn = info;
+
+ list_add_tail(&smbdirect_mr->list, &info->mr_list);
+ atomic_inc(&info->mr_ready_count);
+ }
+ INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ return 0;
+
+out:
+ kfree(smbdirect_mr);
+
+ list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+ ib_dereg_mr(smbdirect_mr->mr);
+ kfree(smbdirect_mr->sgl);
+ kfree(smbdirect_mr);
+ }
+ return -ENOMEM;
+}
+
+/*
+ * Get a MR from mr_list. This function waits until there is at least one
+ * MR available in the list. It may access the list while the
+ * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
+ * as they never modify the same places. However, there may be several CPUs
+ * issueing I/O trying to get MR at the same time, mr_list_lock is used to
+ * protect this situation.
+ */
+static struct smbd_mr *get_mr(struct smbd_connection *info)
+{
+ struct smbd_mr *ret;
+ int rc;
+again:
+ rc = wait_event_interruptible(info->wait_mr,
+ atomic_read(&info->mr_ready_count) ||
+ info->transport_status != SMBD_CONNECTED);
+ if (rc) {
+ log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+ return NULL;
+ }
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_rdma_mr(ERR, "info->transport_status=%x\n",
+ info->transport_status);
+ return NULL;
+ }
+
+ spin_lock(&info->mr_list_lock);
+ list_for_each_entry(ret, &info->mr_list, list) {
+ if (ret->state == MR_READY) {
+ ret->state = MR_REGISTERED;
+ spin_unlock(&info->mr_list_lock);
+ atomic_dec(&info->mr_ready_count);
+ atomic_inc(&info->mr_used_count);
+ return ret;
+ }
+ }
+
+ spin_unlock(&info->mr_list_lock);
+ /*
+ * It is possible that we could fail to get MR because other processes may
+ * try to acquire a MR at the same time. If this is the case, retry it.
+ */
+ goto again;
+}
+
+/*
+ * Register memory for RDMA read/write
+ * pages[]: the list of pages to register memory with
+ * num_pages: the number of pages to register
+ * tailsz: if non-zero, the bytes to register in the last page
+ * writing: true if this is a RDMA write (SMB read), false for RDMA read
+ * need_invalidate: true if this MR needs to be locally invalidated after I/O
+ * return value: the MR registered, NULL if failed.
+ */
+struct smbd_mr *smbd_register_mr(
+ struct smbd_connection *info, struct page *pages[], int num_pages,
+ int tailsz, bool writing, bool need_invalidate)
+{
+ struct smbd_mr *smbdirect_mr;
+ int rc, i;
+ enum dma_data_direction dir;
+ struct ib_reg_wr *reg_wr;
+ struct ib_send_wr *bad_wr;
+
+ if (num_pages > info->max_frmr_depth) {
+ log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
+ num_pages, info->max_frmr_depth);
+ return NULL;
+ }
+
+ smbdirect_mr = get_mr(info);
+ if (!smbdirect_mr) {
+ log_rdma_mr(ERR, "get_mr returning NULL\n");
+ return NULL;
+ }
+ smbdirect_mr->need_invalidate = need_invalidate;
+ smbdirect_mr->sgl_count = num_pages;
+ sg_init_table(smbdirect_mr->sgl, num_pages);
+
+ for (i = 0; i < num_pages - 1; i++)
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i],
+ tailsz ? tailsz : PAGE_SIZE, 0);
+
+ dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ smbdirect_mr->dir = dir;
+ rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
+ if (!rc) {
+ log_rdma_mr(INFO, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+ num_pages, dir, rc);
+ goto dma_map_error;
+ }
+
+ rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
+ NULL, PAGE_SIZE);
+ if (rc != num_pages) {
+ log_rdma_mr(INFO,
+ "ib_map_mr_sg failed rc = %x num_pages = %x\n",
+ rc, num_pages);
+ goto map_mr_error;
+ }
+
+ ib_update_fast_reg_key(smbdirect_mr->mr,
+ ib_inc_rkey(smbdirect_mr->mr->rkey));
+ reg_wr = &smbdirect_mr->wr;
+ reg_wr->wr.opcode = IB_WR_REG_MR;
+ smbdirect_mr->cqe.done = register_mr_done;
+ reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
+ reg_wr->wr.num_sge = 0;
+ reg_wr->wr.send_flags = IB_SEND_SIGNALED;
+ reg_wr->mr = smbdirect_mr->mr;
+ reg_wr->key = smbdirect_mr->mr->rkey;
+ reg_wr->access = writing ?
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+ IB_ACCESS_REMOTE_READ;
+
+ /*
+ * There is no need for waiting for complemtion on ib_post_send
+ * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+ * on the next ib_post_send when we actaully send I/O to remote peer
+ */
+ rc = ib_post_send(info->id->qp, &reg_wr->wr, &bad_wr);
+ if (!rc)
+ return smbdirect_mr;
+
+ log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
+ rc, reg_wr->key);
+
+ /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+map_mr_error:
+ ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count, smbdirect_mr->dir);
+
+dma_map_error:
+ smbdirect_mr->state = MR_ERROR;
+ if (atomic_dec_and_test(&info->mr_used_count))
+ wake_up(&info->wait_for_mr_cleanup);
+
+ return NULL;
+}
+
+static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_mr *smbdirect_mr;
+ struct ib_cqe *cqe;
+
+ cqe = wc->wr_cqe;
+ smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
+ smbdirect_mr->state = MR_INVALIDATED;
+ if (wc->status != IB_WC_SUCCESS) {
+ log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
+ smbdirect_mr->state = MR_ERROR;
+ }
+ complete(&smbdirect_mr->invalidate_done);
+}
+
+/*
+ * Deregister a MR after I/O is done
+ * This function may wait if remote invalidation is not used
+ * and we have to locally invalidate the buffer to prevent data is being
+ * modified by remote peer after upper layer consumes it
+ */
+int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+{
+ struct ib_send_wr *wr, *bad_wr;
+ struct smbd_connection *info = smbdirect_mr->conn;
+ int rc = 0;
+
+ if (smbdirect_mr->need_invalidate) {
+ /* Need to finish local invalidation before returning */
+ wr = &smbdirect_mr->inv_wr;
+ wr->opcode = IB_WR_LOCAL_INV;
+ smbdirect_mr->cqe.done = local_inv_done;
+ wr->wr_cqe = &smbdirect_mr->cqe;
+ wr->num_sge = 0;
+ wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
+ wr->send_flags = IB_SEND_SIGNALED;
+
+ init_completion(&smbdirect_mr->invalidate_done);
+ rc = ib_post_send(info->id->qp, wr, &bad_wr);
+ if (rc) {
+ log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+ smbd_disconnect_rdma_connection(info);
+ goto done;
+ }
+ wait_for_completion(&smbdirect_mr->invalidate_done);
+ smbdirect_mr->need_invalidate = false;
+ } else
+ /*
+ * For remote invalidation, just set it to MR_INVALIDATED
+ * and defer to mr_recovery_work to recover the MR for next use
+ */
+ smbdirect_mr->state = MR_INVALIDATED;
+
+ /*
+ * Schedule the work to do MR recovery for future I/Os
+ * MR recovery is slow and we don't want it to block the current I/O
+ */
+ queue_work(info->workqueue, &info->mr_recovery_work);
+
+done:
+ if (atomic_dec_and_test(&info->mr_used_count))
+ wake_up(&info->wait_for_mr_cleanup);
+
+ return rc;
+}
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
new file mode 100644
index 000000000000..f9038daea194
--- /dev/null
+++ b/fs/cifs/smbdirect.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ *
+ * Author(s): Long Li <longli@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+#ifndef _SMBDIRECT_H
+#define _SMBDIRECT_H
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#define cifs_rdma_enabled(server) ((server)->rdma)
+
+#include "cifsglob.h"
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <linux/mempool.h>
+
+extern int rdma_readwrite_threshold;
+extern int smbd_max_frmr_depth;
+extern int smbd_keep_alive_interval;
+extern int smbd_max_receive_size;
+extern int smbd_max_fragmented_recv_size;
+extern int smbd_max_send_size;
+extern int smbd_send_credit_target;
+extern int smbd_receive_credit_max;
+
+enum keep_alive_status {
+ KEEP_ALIVE_NONE,
+ KEEP_ALIVE_PENDING,
+ KEEP_ALIVE_SENT,
+};
+
+enum smbd_connection_status {
+ SMBD_CREATED,
+ SMBD_CONNECTING,
+ SMBD_CONNECTED,
+ SMBD_NEGOTIATE_FAILED,
+ SMBD_DISCONNECTING,
+ SMBD_DISCONNECTED,
+ SMBD_DESTROYED
+};
+
+/*
+ * The context for the SMBDirect transport
+ * Everything related to the transport is here. It has several logical parts
+ * 1. RDMA related structures
+ * 2. SMBDirect connection parameters
+ * 3. Memory registrations
+ * 4. Receive and reassembly queues for data receive path
+ * 5. mempools for allocating packets
+ */
+struct smbd_connection {
+ enum smbd_connection_status transport_status;
+
+ /* RDMA related */
+ struct rdma_cm_id *id;
+ struct ib_qp_init_attr qp_attr;
+ struct ib_pd *pd;
+ struct ib_cq *send_cq, *recv_cq;
+ struct ib_device_attr dev_attr;
+ int ri_rc;
+ struct completion ri_done;
+ wait_queue_head_t conn_wait;
+ wait_queue_head_t wait_destroy;
+
+ struct completion negotiate_completion;
+ bool negotiate_done;
+
+ struct work_struct destroy_work;
+ struct work_struct disconnect_work;
+ struct work_struct recv_done_work;
+ struct work_struct post_send_credits_work;
+
+ spinlock_t lock_new_credits_offered;
+ int new_credits_offered;
+
+ /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
+ int receive_credit_max;
+ int send_credit_target;
+ int max_send_size;
+ int max_fragmented_recv_size;
+ int max_fragmented_send_size;
+ int max_receive_size;
+ int keep_alive_interval;
+ int max_readwrite_size;
+ enum keep_alive_status keep_alive_requested;
+ int protocol;
+ atomic_t send_credits;
+ atomic_t receive_credits;
+ int receive_credit_target;
+ int fragment_reassembly_remaining;
+
+ /* Memory registrations */
+ /* Maximum number of RDMA read/write outstanding on this connection */
+ int responder_resources;
+ /* Maximum number of SGEs in a RDMA write/read */
+ int max_frmr_depth;
+ /*
+ * If payload is less than or equal to the threshold,
+ * use RDMA send/recv to send upper layer I/O.
+ * If payload is more than the threshold,
+ * use RDMA read/write through memory registration for I/O.
+ */
+ int rdma_readwrite_threshold;
+ enum ib_mr_type mr_type;
+ struct list_head mr_list;
+ spinlock_t mr_list_lock;
+ /* The number of available MRs ready for memory registration */
+ atomic_t mr_ready_count;
+ atomic_t mr_used_count;
+ wait_queue_head_t wait_mr;
+ struct work_struct mr_recovery_work;
+ /* Used by transport to wait until all MRs are returned */
+ wait_queue_head_t wait_for_mr_cleanup;
+
+ /* Activity accoutning */
+ /* Pending reqeusts issued from upper layer */
+ int smbd_send_pending;
+ wait_queue_head_t wait_smbd_send_pending;
+
+ int smbd_recv_pending;
+ wait_queue_head_t wait_smbd_recv_pending;
+
+ atomic_t send_pending;
+ wait_queue_head_t wait_send_pending;
+ atomic_t send_payload_pending;
+ wait_queue_head_t wait_send_payload_pending;
+
+ /* Receive queue */
+ struct list_head receive_queue;
+ int count_receive_queue;
+ spinlock_t receive_queue_lock;
+
+ struct list_head empty_packet_queue;
+ int count_empty_packet_queue;
+ spinlock_t empty_packet_queue_lock;
+
+ wait_queue_head_t wait_receive_queues;
+
+ /* Reassembly queue */
+ struct list_head reassembly_queue;
+ spinlock_t reassembly_queue_lock;
+ wait_queue_head_t wait_reassembly_queue;
+
+ /* total data length of reassembly queue */
+ int reassembly_data_length;
+ int reassembly_queue_length;
+ /* the offset to first buffer in reassembly queue */
+ int first_entry_offset;
+
+ bool send_immediate;
+
+ wait_queue_head_t wait_send_queue;
+
+ /*
+ * Indicate if we have received a full packet on the connection
+ * This is used to identify the first SMBD packet of a assembled
+ * payload (SMB packet) in reassembly queue so we can return a
+ * RFC1002 length to upper layer to indicate the length of the SMB
+ * packet received
+ */
+ bool full_packet_received;
+
+ struct workqueue_struct *workqueue;
+ struct delayed_work idle_timer_work;
+ struct delayed_work send_immediate_work;
+
+ /* Memory pool for preallocating buffers */
+ /* request pool for RDMA send */
+ struct kmem_cache *request_cache;
+ mempool_t *request_mempool;
+
+ /* response pool for RDMA receive */
+ struct kmem_cache *response_cache;
+ mempool_t *response_mempool;
+
+ /* for debug purposes */
+ unsigned int count_get_receive_buffer;
+ unsigned int count_put_receive_buffer;
+ unsigned int count_reassembly_queue;
+ unsigned int count_enqueue_reassembly_queue;
+ unsigned int count_dequeue_reassembly_queue;
+ unsigned int count_send_empty;
+};
+
+enum smbd_message_type {
+ SMBD_NEGOTIATE_RESP,
+ SMBD_TRANSFER_DATA,
+};
+
+#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+
+/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+struct smbd_negotiate_req {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+struct smbd_negotiate_resp {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 negotiated_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le32 status;
+ __le32 max_readwrite_size;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+struct smbd_data_transfer {
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le16 flags;
+ __le16 reserved;
+ __le32 remaining_data_length;
+ __le32 data_offset;
+ __le32 data_length;
+ __le32 padding;
+ __u8 buffer[];
+} __packed;
+
+/* The packet fields for a registered RDMA buffer */
+struct smbd_buffer_descriptor_v1 {
+ __le64 offset;
+ __le32 token;
+ __le32 length;
+} __packed;
+
+/* Default maximum number of SGEs in a RDMA send/recv */
+#define SMBDIRECT_MAX_SGE 16
+/* The context for a SMBD request */
+struct smbd_request {
+ struct smbd_connection *info;
+ struct ib_cqe cqe;
+
+ /* true if this request carries upper layer payload */
+ bool has_payload;
+
+ /* the SGE entries for this packet */
+ struct ib_sge sge[SMBDIRECT_MAX_SGE];
+ int num_sge;
+
+ /* SMBD packet header follows this structure */
+ u8 packet[];
+};
+
+/* The context for a SMBD response */
+struct smbd_response {
+ struct smbd_connection *info;
+ struct ib_cqe cqe;
+ struct ib_sge sge;
+
+ enum smbd_message_type type;
+
+ /* Link to receive queue or reassembly queue */
+ struct list_head list;
+
+ /* Indicate if this is the 1st packet of a payload */
+ bool first_segment;
+
+ /* SMBD packet header and payload follows this structure */
+ u8 packet[];
+};
+
+/* Create a SMBDirect session */
+struct smbd_connection *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr);
+
+/* Reconnect SMBDirect session */
+int smbd_reconnect(struct TCP_Server_Info *server);
+/* Destroy SMBDirect session */
+void smbd_destroy(struct smbd_connection *info);
+
+/* Interface for carrying upper layer I/O through send/recv */
+int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
+int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+
+enum mr_state {
+ MR_READY,
+ MR_REGISTERED,
+ MR_INVALIDATED,
+ MR_ERROR
+};
+
+struct smbd_mr {
+ struct smbd_connection *conn;
+ struct list_head list;
+ enum mr_state state;
+ struct ib_mr *mr;
+ struct scatterlist *sgl;
+ int sgl_count;
+ enum dma_data_direction dir;
+ union {
+ struct ib_reg_wr wr;
+ struct ib_send_wr inv_wr;
+ };
+ struct ib_cqe cqe;
+ bool need_invalidate;
+ struct completion invalidate_done;
+};
+
+/* Interfaces to register and deregister MR for RDMA read/write */
+struct smbd_mr *smbd_register_mr(
+ struct smbd_connection *info, struct page *pages[], int num_pages,
+ int tailsz, bool writing, bool need_invalidate);
+int smbd_deregister_mr(struct smbd_mr *mr);
+
+#else
+#define cifs_rdma_enabled(server) 0
+struct smbd_connection {};
+static inline void *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
+static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
+static inline void smbd_destroy(struct smbd_connection *info) {}
+static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
+static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+#endif
+
+#endif
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 7efbab013957..9779b3292d8e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -37,6 +37,10 @@
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
+#include "smbdirect.h"
+
+/* Max number of iovectors we can use off the stack when sending requests. */
+#define CIFS_MAX_IOV_SIZE 8
void
cifs_wake_up_task(struct mid_q_entry *mid)
@@ -229,7 +233,10 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
int val = 1;
-
+ if (cifs_rdma_enabled(server) && server->smbd_conn) {
+ rc = smbd_send(server->smbd_conn, rqst);
+ goto smbd_done;
+ }
if (ssocket == NULL)
return -ENOTSOCK;
@@ -298,7 +305,7 @@ uncork:
*/
server->tcpStatus = CifsNeedReconnect;
}
-
+smbd_done:
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
@@ -803,12 +810,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
const int flags, struct kvec *resp_iov)
{
struct smb_rqst rqst;
- struct kvec *new_iov;
+ struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
int rc;
- new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
- if (!new_iov)
- return -ENOMEM;
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
+ GFP_KERNEL);
+ if (!new_iov)
+ return -ENOMEM;
+ } else
+ new_iov = s_iov;
/* 1st iov is a RFC1001 length followed by the rest of the packet */
memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
@@ -823,7 +834,51 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rqst.rq_nvec = n_vec + 1;
rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
- kfree(new_iov);
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+ kfree(new_iov);
+ return rc;
+}
+
+/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
+int
+smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
+ const int flags, struct kvec *resp_iov)
+{
+ struct smb_rqst rqst;
+ struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
+ int rc;
+ int i;
+ __u32 count;
+ __be32 rfc1002_marker;
+
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
+ GFP_KERNEL);
+ if (!new_iov)
+ return -ENOMEM;
+ } else
+ new_iov = s_iov;
+
+ /* 1st iov is an RFC1002 Session Message length */
+ memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
+
+ count = 0;
+ for (i = 1; i < n_vec + 1; i++)
+ count += new_iov[i].iov_len;
+
+ rfc1002_marker = cpu_to_be32(count);
+
+ new_iov[0].iov_base = &rfc1002_marker;
+ new_iov[0].iov_len = 4;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = new_iov;
+ rqst.rq_nvec = n_vec + 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+ kfree(new_iov);
return rc;
}
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index f40e3953e7fe..49d3c6fda89a 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -61,10 +61,10 @@ static struct class *coda_psdev_class;
* Device operations
*/
-static unsigned int coda_psdev_poll(struct file *file, poll_table * wait)
+static __poll_t coda_psdev_poll(struct file *file, poll_table * wait)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(file, &vcp->vc_waitq, wait);
mutex_lock(&vcp->vc_mutex);
diff --git a/fs/dax.c b/fs/dax.c
index 95981591977a..6ee6f7e24f5a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -44,6 +44,7 @@
/* The 'colour' (ie low bits) within a PMD of a page offset. */
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
+#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
@@ -375,8 +376,8 @@ restart:
* unmapped.
*/
if (pmd_downgrade && dax_is_zero_entry(entry))
- unmap_mapping_range(mapping,
- (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+ unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+ PG_PMD_NR, false);
err = radix_tree_preload(
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
/* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry))
- unmap_mapping_range(mapping,
- (vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
- PMD_SIZE, 0);
+ unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+ PG_PMD_NR, false);
else /* pte entry */
- unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
- PAGE_SIZE, 0);
+ unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
}
spin_lock_irq(&mapping->tree_lock);
@@ -636,8 +635,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pmd = pmd_mkclean(pmd);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
unlock_pmd:
- spin_unlock(ptl);
#endif
+ spin_unlock(ptl);
} else {
if (pfn != pte_pfn(*ptep))
goto unlock_pte;
@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
#ifdef CONFIG_FS_DAX_PMD
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
- * more often than one might expect in the below functions.
- */
-#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
-
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
void *entry)
{
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c7df1df81ff..d93872d425d6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,7 +32,6 @@
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/fs_struct.h>
-#include <linux/hardirq.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
@@ -49,8 +48,8 @@
* - i_dentry, d_u.d_alias, d_inode of aliases
* dcache_hash_bucket lock protects:
* - the dcache hash table
- * s_anon bl list spinlock protects:
- * - the s_anon list (see __d_drop)
+ * s_roots bl list spinlock protects:
+ * - the s_roots list (see __d_drop)
* dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
@@ -68,7 +67,7 @@
* dentry->d_lock
* dentry->d_sb->s_dentry_lru_lock
* dcache_hash_bucket lock
- * s_anon lock
+ * s_roots lock
*
* If there is an ancestor relationship:
* dentry->d_parent->...->d_parent->d_lock
@@ -104,14 +103,13 @@ EXPORT_SYMBOL(slash_name);
* information, yet avoid using a prime hash-size or similar.
*/
-static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
static inline struct hlist_bl_head *d_hash(unsigned int hash)
{
- return dentry_hashtable + (hash >> (32 - d_hash_shift));
+ return dentry_hashtable + (hash >> d_hash_shift);
}
#define IN_LOOKUP_SHIFT 10
@@ -468,30 +466,37 @@ static void dentry_lru_add(struct dentry *dentry)
* d_drop() is used mainly for stuff that wants to invalidate a dentry for some
* reason (NFS timeouts or autofs deletes).
*
- * __d_drop requires dentry->d_lock.
+ * __d_drop requires dentry->d_lock
+ * ___d_drop doesn't mark dentry as "unhashed"
+ * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
*/
-void __d_drop(struct dentry *dentry)
+static void ___d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
struct hlist_bl_head *b;
/*
* Hashed dentries are normally on the dentry hashtable,
* with the exception of those newly allocated by
- * d_obtain_alias, which are always IS_ROOT:
+ * d_obtain_root, which are always IS_ROOT:
*/
if (unlikely(IS_ROOT(dentry)))
- b = &dentry->d_sb->s_anon;
+ b = &dentry->d_sb->s_roots;
else
b = d_hash(dentry->d_name.hash);
hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
- dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
/* After this call, in-progress rcu-walk path lookup will fail. */
write_seqcount_invalidate(&dentry->d_seq);
}
}
+
+void __d_drop(struct dentry *dentry)
+{
+ ___d_drop(dentry);
+ dentry->d_hash.pprev = NULL;
+}
EXPORT_SYMBOL(__d_drop);
void d_drop(struct dentry *dentry)
@@ -1500,8 +1505,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
sb->s_root = NULL;
do_one_tree(dentry);
- while (!hlist_bl_empty(&sb->s_anon)) {
- dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
+ while (!hlist_bl_empty(&sb->s_roots)) {
+ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
do_one_tree(dentry);
}
}
@@ -1636,8 +1641,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dname[name->len] = 0;
/* Make sure we always see the terminating NUL character */
- smp_wmb();
- dentry->d_name.name = dname;
+ smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
dentry->d_lockref.count = 1;
dentry->d_flags = 0;
@@ -1965,9 +1969,11 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
spin_lock(&tmp->d_lock);
__d_set_inode_and_type(tmp, inode, add_flags);
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
- hlist_bl_lock(&tmp->d_sb->s_anon);
- hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- hlist_bl_unlock(&tmp->d_sb->s_anon);
+ if (!disconnected) {
+ hlist_bl_lock(&tmp->d_sb->s_roots);
+ hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_roots);
+ hlist_bl_unlock(&tmp->d_sb->s_roots);
+ }
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
@@ -2381,7 +2387,7 @@ EXPORT_SYMBOL(d_delete);
static void __d_rehash(struct dentry *entry)
{
struct hlist_bl_head *b = d_hash(entry->d_name.hash);
- BUG_ON(!d_unhashed(entry));
+
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
@@ -2816,9 +2822,9 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
/* unhash both */
- /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
- __d_drop(dentry);
- __d_drop(target);
+ /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
+ ___d_drop(dentry);
+ ___d_drop(target);
/* Switch the names.. */
if (exchange)
@@ -2830,6 +2836,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
__d_rehash(dentry);
if (exchange)
__d_rehash(target);
+ else
+ target->d_hash.pprev = NULL;
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
@@ -3047,17 +3055,14 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* retry it again when a d_move() does happen. So any garbage in the buffer
* due to mismatched pointer and length will be discarded.
*
- * Data dependency barrier is needed to make sure that we see that terminating
- * NUL. Alpha strikes again, film at 11...
+ * Load acquire is needed to make sure that we see that terminating NUL.
*/
static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
- const char *dname = READ_ONCE(name->name);
+ const char *dname = smp_load_acquire(&name->name); /* ^^^ */
u32 dlen = READ_ONCE(name->len);
char *p;
- smp_read_barrier_depends();
-
*buflen -= dlen + 1;
if (*buflen < 0)
return -ENAMETOOLONG;
@@ -3589,9 +3594,10 @@ static void __init dcache_init_early(void)
13,
HASH_EARLY | HASH_ZERO,
&d_hash_shift,
- &d_hash_mask,
+ NULL,
0,
0);
+ d_hash_shift = 32 - d_hash_shift;
}
static void __init dcache_init(void)
@@ -3615,9 +3621,10 @@ static void __init dcache_init(void)
13,
HASH_ZERO,
&d_hash_shift,
- &d_hash_mask,
+ NULL,
0,
0);
+ d_hash_shift = 32 - d_hash_shift;
}
/* SLAB cache for __getname() consumers */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index cd12e6576b48..6fdbf21be318 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -206,11 +206,11 @@ FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
ARGS(filp, cmd, arg));
-static unsigned int full_proxy_poll(struct file *filp,
+static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
- unsigned int r = 0;
+ __poll_t r = 0;
const struct file_operations *real_fops;
if (debugfs_file_get(dentry))
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 7eae33ffa3fc..e31d6ed3ec32 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
dput(path.dentry);
if (err) {
mntput(path.mnt);
- path.mnt = ERR_PTR(err);
+ return ERR_PTR(err);
}
if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
mntput(path.mnt);
- path.mnt = ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENODEV);
}
return path.mnt;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3aafb3343a65..a0ca9e48e993 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -219,6 +219,27 @@ static inline struct page *dio_get_page(struct dio *dio,
return dio->pages[sdio->head];
}
+/*
+ * Warn about a page cache invalidation failure during a direct io write.
+ */
+void dio_warn_stale_pagecache(struct file *filp)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
+ char pathname[128];
+ struct inode *inode = file_inode(filp);
+ char *path;
+
+ errseq_set(&inode->i_mapping->wb_err, -EIO);
+ if (__ratelimit(&_rs)) {
+ path = file_path(filp, pathname, sizeof(pathname));
+ if (IS_ERR(path))
+ path = "(unknown)";
+ pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
+ pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
+ current->comm);
+ }
+}
+
/**
* dio_complete() - called when all DIO BIO I/O has been completed
* @offset: the byte offset in the file of the completed operation
@@ -290,7 +311,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
err = invalidate_inode_pages2_range(dio->inode->i_mapping,
offset >> PAGE_SHIFT,
(offset + ret - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(err);
+ if (err)
+ dio_warn_stale_pagecache(dio->iocb->ki_filp);
}
if (!(dio->flags & DIO_SKIP_DIO_COUNT))
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 05707850f93a..cff79ea0c01d 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -675,9 +675,9 @@ static int receive_from_sock(struct connection *con)
nvec = 2;
}
len = iov[0].iov_len + iov[1].iov_len;
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nvec, len);
- r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
- MSG_DONTWAIT | MSG_NOSIGNAL);
+ r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
if (ret <= 0)
goto out_close;
else if (ret == len)
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index e631b1689228..a4c63e9e6385 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -463,9 +463,9 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
return count;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &send_wq, wait);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index d18e7a539f11..662432af8ce8 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -887,7 +887,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
return rv;
}
-static unsigned int device_poll(struct file *file, poll_table *wait)
+static __poll_t device_poll(struct file *file, poll_table *wait)
{
struct dlm_user_proc *proc = file->private_data;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index f09cacaf8c80..7423e792a092 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -38,11 +38,11 @@ static atomic_t ecryptfs_num_miscdev_opens;
*
* Returns the poll mask
*/
-static unsigned int
+static __poll_t
ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
{
struct ecryptfs_daemon *daemon = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 2fb4eadaa118..04fd824142a1 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -80,24 +80,11 @@ static void eventfd_free(struct kref *kref)
}
/**
- * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
- * @ctx: [in] Pointer to the eventfd context.
- *
- * Returns: In case of success, returns a pointer to the eventfd context.
- */
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
-{
- kref_get(&ctx->kref);
- return ctx;
-}
-EXPORT_SYMBOL_GPL(eventfd_ctx_get);
-
-/**
* eventfd_ctx_put - Releases a reference to the internal eventfd context.
* @ctx: [in] Pointer to eventfd context.
*
* The eventfd context reference must have been previously acquired either
- * with eventfd_ctx_get() or eventfd_ctx_fdget().
+ * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
*/
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
@@ -114,10 +101,10 @@ static int eventfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int eventfd_poll(struct file *file, poll_table *wait)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
{
struct eventfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
u64 count;
poll_wait(file, &ctx->wqh, wait);
@@ -207,36 +194,27 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
-/**
- * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
- * @ctx: [in] Pointer to eventfd context.
- * @no_wait: [in] Different from zero if the operation should not block.
- * @cnt: [out] Pointer to the 64-bit counter value.
- *
- * Returns %0 if successful, or the following error codes:
- *
- * - -EAGAIN : The operation would have blocked but @no_wait was non-zero.
- * - -ERESTARTSYS : A signal interrupted the wait operation.
- *
- * If @no_wait is zero, the function might sleep until the eventfd internal
- * counter becomes greater than zero.
- */
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
+static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
+ struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
+ __u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (count < sizeof(ucnt))
+ return -EINVAL;
+
spin_lock_irq(&ctx->wqh.lock);
- *cnt = 0;
res = -EAGAIN;
if (ctx->count > 0)
- res = 0;
- else if (!no_wait) {
+ res = sizeof(ucnt);
+ else if (!(file->f_flags & O_NONBLOCK)) {
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ctx->count > 0) {
- res = 0;
+ res = sizeof(ucnt);
break;
}
if (signal_pending(current)) {
@@ -250,31 +228,17 @@ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res == 0)) {
- eventfd_ctx_do_read(ctx, cnt);
+ if (likely(res > 0)) {
+ eventfd_ctx_do_read(ctx, &ucnt);
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, POLLOUT);
}
spin_unlock_irq(&ctx->wqh.lock);
- return res;
-}
-EXPORT_SYMBOL_GPL(eventfd_ctx_read);
-
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct eventfd_ctx *ctx = file->private_data;
- ssize_t res;
- __u64 cnt;
-
- if (count < sizeof(cnt))
- return -EINVAL;
- res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
- if (res < 0)
- return res;
+ if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+ return -EFAULT;
- return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
+ return res;
}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -405,79 +369,44 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
*/
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
+ struct eventfd_ctx *ctx;
+
if (file->f_op != &eventfd_fops)
return ERR_PTR(-EINVAL);
- return eventfd_ctx_get(file->private_data);
+ ctx = file->private_data;
+ kref_get(&ctx->kref);
+ return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
-/**
- * eventfd_file_create - Creates an eventfd file pointer.
- * @count: Initial eventfd counter value.
- * @flags: Flags for the eventfd file.
- *
- * This function creates an eventfd file pointer, w/out installing it into
- * the fd table. This is useful when the eventfd file is used during the
- * initialization of data structures that require extra setup after the eventfd
- * creation. So the eventfd creation is split into the file pointer creation
- * phase, and the file descriptor installation phase.
- * In this way races with userspace closing the newly installed file descriptor
- * can be avoided.
- * Returns an eventfd file pointer, or a proper error pointer.
- */
-struct file *eventfd_file_create(unsigned int count, int flags)
+SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
- struct file *file;
struct eventfd_ctx *ctx;
+ int fd;
/* Check the EFD_* constants for consistency. */
BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
if (flags & ~EFD_FLAGS_SET)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
- O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
- if (IS_ERR(file))
+ fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
+ O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
+ if (fd < 0)
eventfd_free_ctx(ctx);
- return file;
-}
-
-SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
-{
- int fd, error;
- struct file *file;
-
- error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
- if (error < 0)
- return error;
- fd = error;
-
- file = eventfd_file_create(count, flags);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_put_unused_fd;
- }
- fd_install(fd, file);
-
return fd;
-
-err_put_unused_fd:
- put_unused_fd(fd);
-
- return error;
}
SYSCALL_DEFINE1(eventfd, unsigned int, count)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index afd548ebc328..42e35a6977c9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -874,7 +874,8 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
*/
-static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth)
+static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt,
+ int depth)
{
struct eventpoll *ep;
bool locked;
@@ -920,7 +921,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
return 0;
}
-static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
@@ -1117,6 +1118,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
+ __poll_t pollflags = key_to_poll(key);
int ewake = 0;
spin_lock_irqsave(&ep->lock, flags);
@@ -1138,7 +1140,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
- if (key && !((unsigned long) key & epi->event.events))
+ if (pollflags && !(pollflags & epi->event.events))
goto out_unlock;
/*
@@ -1175,8 +1177,8 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
*/
if (waitqueue_active(&ep->wq)) {
if ((epi->event.events & EPOLLEXCLUSIVE) &&
- !((unsigned long)key & POLLFREE)) {
- switch ((unsigned long)key & EPOLLINOUT_BITS) {
+ !(pollflags & POLLFREE)) {
+ switch (pollflags & EPOLLINOUT_BITS) {
case POLLIN:
if (epi->event.events & POLLIN)
ewake = 1;
@@ -1205,7 +1207,7 @@ out_unlock:
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
- if ((unsigned long)key & POLLFREE) {
+ if (pollflags & POLLFREE) {
/*
* If we race with ep_remove_wait_queue() it can miss
* ->whead = NULL and do another remove_wait_queue() after
@@ -1409,7 +1411,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
/*
* Must be called with "mtx" held.
*/
-static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
struct file *tfile, int fd, int full_check)
{
int error, revents, pwake = 0;
@@ -1486,7 +1488,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
ep_set_busy_poll_napi_id(epi);
/* If the file is already "ready" we drop it inside the ready list */
- if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
+ if (revents && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1540,10 +1542,10 @@ error_create_wakeup_source:
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
-static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
+static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+ const struct epoll_event *event)
{
int pwake = 0;
- unsigned int revents;
poll_table pt;
init_poll_funcptr(&pt, NULL);
@@ -1585,14 +1587,10 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
- */
- revents = ep_item_poll(epi, &pt, 1);
-
- /*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
- if (revents & event->events) {
+ if (ep_item_poll(epi, &pt, 1)) {
spin_lock_irq(&ep->lock);
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
diff --git a/fs/exec.c b/fs/exec.c
index 5688b5e1b937..7eb8d21bcab9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1349,9 +1349,14 @@ void setup_new_exec(struct linux_binprm * bprm)
current->sas_ss_sp = current->sas_ss_size = 0;
- /* Figure out dumpability. */
+ /*
+ * Figure out dumpability. Note that this checking only of current
+ * is wrong, but userspace depends on it. This should be testing
+ * bprm->secureexec instead.
+ */
if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
- bprm->secureexec)
+ !(uid_eq(current_euid(), current_uid()) &&
+ gid_eq(current_egid(), current_gid())))
set_dumpable(current->mm, suid_dumpable);
else
set_dumpable(current->mm, SUID_DUMP_USER);
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 98233a97b7b8..c5a53fcc43ea 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -31,6 +31,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/iversion.h>
#include "exofs.h"
static inline unsigned exofs_chunk_size(struct inode *inode)
@@ -60,7 +61,7 @@ static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
if (!PageUptodate(page))
SetPageUptodate(page);
@@ -241,7 +242,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
- int need_revalidate = (file->f_version != inode->i_version);
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
return 0;
@@ -264,8 +265,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (struct exofs_dir_entry *)(kaddr + offset);
limit = kaddr + exofs_last_byte(inode, n) -
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 819624cfc8da..7e244093c0e5 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/exportfs.h>
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "exofs.h"
@@ -159,7 +160,7 @@ static struct inode *exofs_alloc_inode(struct super_block *sb)
if (!oi)
return NULL;
- oi->vfs_inode.i_version = 1;
+ inode_set_iversion(&oi->vfs_inode, 1);
return &oi->vfs_inode;
}
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index c634874e12d9..894e4c53d1d2 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -13,8 +13,7 @@ config EXT2_FS_XATTR
depends on EXT2_FS
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
+ the kernel or by users (see the attr(5) manual page for details).
If unsure, say N.
@@ -26,9 +25,6 @@ config EXT2_FS_POSIX_ACL
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config EXT2_FS_SECURITY
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 987647986f47..4111085a129f 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -26,6 +26,7 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/iversion.h>
typedef struct ext2_dir_entry_2 ext2_dirent;
@@ -92,7 +93,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
@@ -293,7 +294,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
- int need_revalidate = file->f_version != inode->i_version;
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
return 0;
@@ -319,8 +320,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (ext2_dirent *)(kaddr+offset);
limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7646818ab266..554c98b8a93a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -33,6 +33,7 @@
#include <linux/quotaops.h>
#include <linux/uaccess.h>
#include <linux/dax.h>
+#include <linux/iversion.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -184,7 +185,7 @@ static struct inode *ext2_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
#ifdef CONFIG_QUOTA
memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
#endif
@@ -1569,7 +1570,7 @@ out:
return err;
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
return len - towrite;
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 73b850f5659c..a453cc87082b 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -82,9 +82,6 @@ config EXT4_FS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config EXT4_FS_SECURITY
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index d5babc9f222b..afda0a0499ce 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "ext4.h"
#include "xattr.h"
@@ -208,7 +209,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (file->f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, file->f_version)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext4_dir_entry_2 *)
(bh->b_data + i);
@@ -227,7 +228,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
}
while (ctx->pos < inode->i_size
@@ -568,10 +569,10 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- (file->f_version != inode->i_version)) {
+ inode_cmp_iversion(inode, file->f_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
ret = ext4_htree_fill_tree(file, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 1367553c43bb..a8b987b71173 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -14,6 +14,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -1042,7 +1043,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
*/
dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
return 1;
}
@@ -1494,7 +1495,7 @@ int ext4_read_inline_dir(struct file *file,
* dirent right now. Scan from the start of the inline
* dir to make sure.
*/
- if (file->f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, file->f_version)) {
for (i = 0; i < extra_size && i < offset;) {
/*
* "." is with offset 0 and
@@ -1526,7 +1527,7 @@ int ext4_read_inline_dir(struct file *file,
}
offset = i;
ctx->pos = offset;
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
}
while (ctx->pos < extra_size) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 534a9130f625..0eff5b761c6e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/iomap.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -4882,12 +4883,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
- inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
+ u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
- inode->i_version |=
+ ivers |=
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
}
+ inode_set_iversion_queried(inode, ivers);
}
ret = 0;
@@ -5173,11 +5176,13 @@ static int ext4_do_update_inode(handle_t *handle,
}
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
- raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
+ u64 ivers = inode_peek_iversion(inode);
+
+ raw_inode->i_disk_version = cpu_to_le32(ivers);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
- cpu_to_le32(inode->i_version >> 32);
+ cpu_to_le32(ivers >> 32);
raw_inode->i_extra_isize =
cpu_to_le16(ei->i_extra_isize);
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 1eec25014f62..7e99ad02f1ba 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -19,6 +19,7 @@
#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include <linux/fsmap.h>
@@ -144,7 +145,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
i_gid_write(inode_bl, 0);
inode_bl->i_flags = 0;
ei_bl->i_flags = 0;
- inode_bl->i_version = 1;
+ inode_set_iversion(inode_bl, 1);
i_size_write(inode_bl, 0);
inode_bl->i_mode = S_IFREG;
if (ext4_has_feature_extents(sb)) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e750d68fbcb5..6660686e505a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -34,6 +34,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
+#include <linux/iversion.h>
#include "ext4.h"
#include "ext4_jbd2.h"
@@ -2959,7 +2960,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
"empty directory '%.*s' has too many links (%u)",
dentry->d_name.len, dentry->d_name.name,
inode->i_nlink);
- inode->i_version++;
+ inode_inc_iversion(inode);
clear_nlink(inode);
/* There's no need to set i_disksize: the fact that i_nlink is
* zero will ensure that the right thing happens during any
@@ -3365,7 +3366,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
ent->de->inode = cpu_to_le32(ino);
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
- ent->dir->i_version++;
+ inode_inc_iversion(ent->dir);
ent->dir->i_ctime = ent->dir->i_mtime =
current_time(ent->dir);
ext4_mark_inode_dirty(handle, ent->dir);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7c46693a14d7..5de959fb0244 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -40,6 +40,7 @@
#include <linux/dax.h>
#include <linux/cleancache.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -967,7 +968,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
spin_lock_init(&ei->i_raw_lock);
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 218a7ba57819..63656dbafdc4 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -56,6 +56,7 @@
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
@@ -294,13 +295,13 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
return ((u64)ea_inode->i_ctime.tv_sec << 32) |
- ((u32)ea_inode->i_version);
+ (u32) inode_peek_iversion_raw(ea_inode);
}
static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
{
ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
- ea_inode->i_version = (u32)ref_count;
+ inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
}
static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 378c221d68a9..9a20ef42fadd 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -35,8 +35,7 @@ config F2FS_FS_XATTR
default y
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
+ the kernel or by users (see the attr(5) manual page for details).
If unsure, say N.
@@ -49,9 +48,6 @@ config F2FS_FS_POSIX_ACL
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config F2FS_FS_SECURITY
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 2bb7c9fc5144..111824199a88 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -270,7 +270,7 @@ static struct posix_acl *f2fs_acl_clone(const struct posix_acl *acl,
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
- atomic_set(&clone->a_refcount, 1);
+ refcount_set(&clone->a_refcount, 1);
}
return clone;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4aa69bc1c70a..512dca8abc7d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -237,12 +237,15 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ unlock_page(page);
+ return 0;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
write_meta_page(sbi, page, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
@@ -793,7 +796,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
block_t cp_blk_no;
int i;
- sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
+ sbi->ckpt = f2fs_kzalloc(sbi, cp_blks * blk_size, GFP_KERNEL);
if (!sbi->ckpt)
return -ENOMEM;
/*
@@ -1154,6 +1157,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+ __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
spin_unlock_irqrestore(&sbi->cp_lock, flags);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 516fa0d3ff9c..7578ed1a85e0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -56,7 +56,7 @@ static void f2fs_read_end_io(struct bio *bio)
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
+ if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -111,8 +111,13 @@ static void f2fs_write_end_io(struct bio *bio)
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
- f2fs_stop_checkpoint(sbi, true);
+ if (type == F2FS_WB_CP_DATA)
+ f2fs_stop_checkpoint(sbi, true);
}
+
+ f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
+ page->index != nid_of_node(page));
+
dec_page_count(sbi, type);
clear_cold_data(page);
end_page_writeback(page);
@@ -169,6 +174,7 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
+ struct writeback_control *wbc,
int npages, bool is_read)
{
struct bio *bio;
@@ -178,6 +184,8 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
+ if (wbc)
+ wbc_init_bio(wbc, bio);
return bio;
}
@@ -373,7 +381,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
+ 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -435,7 +444,7 @@ alloc_new:
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
- io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+ io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false);
io->fio = *fio;
}
@@ -445,6 +454,9 @@ alloc_new:
goto alloc_new;
}
+ if (fio->io_wbc)
+ wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);
+
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
@@ -783,7 +795,7 @@ got_it:
return page;
}
-static int __allocate_data_block(struct dnode_of_data *dn)
+static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
@@ -808,7 +820,7 @@ alloc:
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, CURSEG_WARM_DATA, NULL, false);
+ &sum, seg_type, NULL, false);
set_data_blkaddr(dn);
/* update i_size */
@@ -831,10 +843,12 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct f2fs_map_blocks map;
+ int flag;
int err = 0;
+ bool direct_io = iocb->ki_flags & IOCB_DIRECT;
/* convert inline data for Direct I/O*/
- if (iocb->ki_flags & IOCB_DIRECT) {
+ if (direct_io) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
@@ -851,19 +865,33 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_len = 0;
map.m_next_pgofs = NULL;
+ map.m_next_extent = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
- if (iocb->ki_flags & IOCB_DIRECT)
- return f2fs_map_blocks(inode, &map, 1,
- __force_buffered_io(inode, WRITE) ?
- F2FS_GET_BLOCK_PRE_AIO :
- F2FS_GET_BLOCK_PRE_DIO);
+ if (direct_io) {
+ map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
+ flag = __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO;
+ goto map_blocks;
+ }
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
- if (!f2fs_has_inline_data(inode))
- return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (f2fs_has_inline_data(inode))
+ return err;
+
+ flag = F2FS_GET_BLOCK_PRE_AIO;
+
+map_blocks:
+ err = f2fs_map_blocks(inode, &map, 1, flag);
+ if (map.m_len > 0 && err == -ENOSPC) {
+ if (!direct_io)
+ set_inode_flag(inode, FI_NO_PREALLOC);
+ err = 0;
+ }
return err;
}
@@ -904,6 +932,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
blkcnt_t prealloc;
struct extent_info ei = {0,0,0};
block_t blkaddr;
+ unsigned int start_pgofs;
if (!maxblocks)
return 0;
@@ -919,6 +948,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
+ if (map->m_next_extent)
+ *map->m_next_extent = pgofs + map->m_len;
goto out;
}
@@ -937,10 +968,14 @@ next_dnode:
if (map->m_next_pgofs)
*map->m_next_pgofs =
get_next_page_offset(&dn, pgofs);
+ if (map->m_next_extent)
+ *map->m_next_extent =
+ get_next_page_offset(&dn, pgofs);
}
goto unlock_out;
}
+ start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
@@ -960,7 +995,8 @@ next_block:
last_ofs_in_node = dn.ofs_in_node;
}
} else {
- err = __allocate_data_block(&dn);
+ err = __allocate_data_block(&dn,
+ map->m_seg_type);
if (!err)
set_inode_flag(inode, FI_APPEND_WRITE);
}
@@ -973,14 +1009,20 @@ next_block:
map->m_pblk = 0;
goto sync_out;
}
+ if (flag == F2FS_GET_BLOCK_PRECACHE)
+ goto sync_out;
if (flag == F2FS_GET_BLOCK_FIEMAP &&
blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
+ goto sync_out;
}
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- blkaddr != NEW_ADDR)
+ if (flag != F2FS_GET_BLOCK_FIEMAP) {
+ /* for defragment case */
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
goto sync_out;
+ }
}
}
@@ -1031,6 +1073,16 @@ skip:
else if (dn.ofs_in_node < end_offset)
goto next_block;
+ if (flag == F2FS_GET_BLOCK_PRECACHE) {
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+ f2fs_update_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+ }
+
f2fs_put_dnode(&dn);
if (create) {
@@ -1040,6 +1092,17 @@ skip:
goto next_dnode;
sync_out:
+ if (flag == F2FS_GET_BLOCK_PRECACHE) {
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+ f2fs_update_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+ if (map->m_next_extent)
+ *map->m_next_extent = pgofs + 1;
+ }
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
@@ -1053,7 +1116,7 @@ out:
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
- pgoff_t *next_pgofs)
+ pgoff_t *next_pgofs, int seg_type)
{
struct f2fs_map_blocks map;
int err;
@@ -1061,6 +1124,8 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
map.m_next_pgofs = next_pgofs;
+ map.m_next_extent = NULL;
+ map.m_seg_type = seg_type;
err = f2fs_map_blocks(inode, &map, create, flag);
if (!err) {
@@ -1076,14 +1141,17 @@ static int get_data_block(struct inode *inode, sector_t iblock,
pgoff_t *next_pgofs)
{
return __get_data_block(inode, iblock, bh_result, create,
- flag, next_pgofs);
+ flag, next_pgofs,
+ NO_CHECK_TYPE);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DEFAULT, NULL);
+ F2FS_GET_BLOCK_DEFAULT, NULL,
+ rw_hint_to_seg_type(
+ inode->i_write_hint));
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1094,7 +1162,8 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP, NULL);
+ F2FS_GET_BLOCK_BMAP, NULL,
+ NO_CHECK_TYPE);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1107,6 +1176,68 @@ static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
return (blk << inode->i_blkbits);
}
+static int f2fs_xattr_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *page;
+ struct node_info ni;
+ __u64 phys = 0, len;
+ __u32 flags;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ int err = 0;
+
+ if (f2fs_has_inline_xattr(inode)) {
+ int offset;
+
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
+ inode->i_ino, false);
+ if (!page)
+ return -ENOMEM;
+
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ offset = offsetof(struct f2fs_inode, i_addr) +
+ sizeof(__le32) * (DEF_ADDRS_PER_INODE -
+ get_inline_xattr_addrs(inode));
+
+ phys += offset;
+ len = inline_xattr_size(inode);
+
+ f2fs_put_page(page, 1);
+
+ flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
+
+ if (!xnid)
+ flags |= FIEMAP_EXTENT_LAST;
+
+ err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+ if (err || err == 1)
+ return err;
+ }
+
+ if (xnid) {
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
+ if (!page)
+ return -ENOMEM;
+
+ get_node_info(sbi, xnid, &ni);
+
+ phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ len = inode->i_sb->s_blocksize;
+
+ f2fs_put_page(page, 1);
+
+ flags = FIEMAP_EXTENT_LAST;
+ }
+
+ if (phys)
+ err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+
+ return (err < 0 ? err : 0);
+}
+
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -1117,18 +1248,29 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0;
int ret = 0;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ ret = f2fs_precache_extents(inode);
+ if (ret)
+ return ret;
+ }
+
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
if (ret)
return ret;
+ inode_lock(inode);
+
+ if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
+ ret = f2fs_xattr_fiemap(inode, fieinfo);
+ goto out;
+ }
+
if (f2fs_has_inline_data(inode)) {
ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
if (ret != -EAGAIN)
- return ret;
+ goto out;
}
- inode_lock(inode);
-
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
@@ -1198,7 +1340,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
unsigned nr_pages)
{
struct bio *bio = NULL;
- unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
@@ -1214,9 +1355,10 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_len = 0;
map.m_flags = 0;
map.m_next_pgofs = NULL;
+ map.m_next_extent = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
- for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
-
+ for (; nr_pages; nr_pages--) {
if (pages) {
page = list_last_entry(pages, struct page, lru);
@@ -1376,18 +1518,79 @@ retry_encrypt:
return PTR_ERR(fio->encrypted_page);
}
+static inline bool check_inplace_update_policy(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int policy = SM_I(sbi)->ipu_policy;
+
+ if (policy & (0x1 << F2FS_IPU_FORCE))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_UTIL) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ return true;
+
+ /*
+ * IPU for rewrite async pages
+ */
+ if (policy & (0x1 << F2FS_IPU_ASYNC) &&
+ fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) &&
+ !f2fs_encrypted_inode(inode))
+ return true;
+
+ /* this is only set during fdatasync */
+ if (policy & (0x1 << F2FS_IPU_FSYNC) &&
+ is_inode_flag_set(inode, FI_NEED_IPU))
+ return true;
+
+ return false;
+}
+
+bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
+{
+ if (f2fs_is_pinned_file(inode))
+ return true;
+
+ /* if this is cold file, we should overwrite to avoid fragmentation */
+ if (file_is_cold(inode))
+ return true;
+
+ return check_inplace_update_policy(inode, fio);
+}
+
+bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (test_opt(sbi, LFS))
+ return true;
+ if (S_ISDIR(inode->i_mode))
+ return true;
+ if (f2fs_is_atomic_file(inode))
+ return true;
+ if (fio) {
+ if (is_cold_data(fio->page))
+ return true;
+ if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ return true;
+ }
+ return false;
+}
+
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
- if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
- return false;
- if (is_cold_data(fio->page))
- return false;
- if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ if (should_update_outplace(inode, fio))
return false;
- return need_inplace_update_policy(inode, fio);
+ return should_update_inplace(inode, fio);
}
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
@@ -1508,10 +1711,17 @@ static int __write_data_page(struct page *page, bool *submitted,
.submitted = false,
.need_lock = LOCK_RETRY,
.io_type = io_type,
+ .io_wbc = wbc,
};
trace_f2fs_writepage(page, DATA);
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1536,12 +1746,6 @@ write:
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
- goto out;
- }
-
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
fio.need_lock = LOCK_DONE;
@@ -1571,10 +1775,14 @@ write:
}
}
- down_write(&F2FS_I(inode)->i_sem);
- if (F2FS_I(inode)->last_disk_size < psize)
- F2FS_I(inode)->last_disk_size = psize;
- up_write(&F2FS_I(inode)->i_sem);
+ if (err) {
+ file_set_keep_isize(inode);
+ } else {
+ down_write(&F2FS_I(inode)->i_sem);
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
+ up_write(&F2FS_I(inode)->i_sem);
+ }
done:
if (err && err != -ENOENT)
@@ -1933,7 +2141,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
- bool need_balance = false;
+ bool need_balance = false, drop_atomic = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
@@ -1942,6 +2150,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (f2fs_is_atomic_file(inode) &&
!available_free_memory(sbi, INMEM_PAGES)) {
err = -ENOMEM;
+ drop_atomic = true;
goto fail;
}
@@ -2022,7 +2231,7 @@ repeat:
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
- if (f2fs_is_atomic_file(inode))
+ if (drop_atomic)
drop_inmem_pages_all(sbi);
return err;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index ecada8425268..a66107b5cfff 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -49,14 +49,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
-
- si->nquota_files = 0;
- if (f2fs_sb_has_quota_ino(sbi->sb)) {
- for (i = 0; i < MAXQUOTAS; i++) {
- if (f2fs_qf_ino(sbi->sb, i))
- si->nquota_files++;
- }
- }
+ si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->aw_cnt = atomic_read(&sbi->aw_cnt);
@@ -186,7 +179,6 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
- si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */
si->base_mem += sizeof(struct f2fs_sm_info);
@@ -447,7 +439,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
- si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+ si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
if (!si)
return -ENOMEM;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 2d98d877c09d..f00b5ed8c011 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -713,6 +713,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
+
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
@@ -798,6 +800,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int bit_pos;
struct f2fs_dir_entry *de = NULL;
struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
bit_pos = ((unsigned long)ctx->pos % d->max);
@@ -836,6 +839,9 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
le32_to_cpu(de->ino), d_type))
return 1;
+ if (sbi->readdir_ra == 1)
+ ra_node_page(sbi, le32_to_cpu(de->ino));
+
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6abf26c31d01..6300ac5bcbe4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
#include <linux/magic.h>
#include <linux/kobject.h>
#include <linux/sched.h>
+#include <linux/cred.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -43,6 +44,7 @@
#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
+ FAULT_KVMALLOC,
FAULT_PAGE_ALLOC,
FAULT_PAGE_GET,
FAULT_ALLOC_BIO,
@@ -94,6 +96,7 @@ extern char *fault_name[FAULT_MAX];
#define F2FS_MOUNT_PRJQUOTA 0x00200000
#define F2FS_MOUNT_QUOTA 0x00400000
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
+#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -121,6 +124,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_INODE_CHKSUM 0x0020
#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
#define F2FS_FEATURE_QUOTA_INO 0x0080
+#define F2FS_FEATURE_INODE_CRTIME 0x0100
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -130,6 +134,12 @@ struct f2fs_mount_info {
(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define F2FS_DEF_RESUID 0
+#define F2FS_DEF_RESGID 0
+
+/*
* For checkpoint manager
*/
enum {
@@ -179,6 +189,7 @@ enum {
ORPHAN_INO, /* for orphan ino list */
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
+ TRANS_DIR_INO, /* for trasactions dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -264,7 +275,6 @@ struct discard_cmd_control {
struct task_struct *f2fs_issue_discard; /* discard thread */
struct list_head entry_list; /* 4KB discard entry list */
struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
- unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */
struct list_head wait_list; /* store on-flushing entries */
struct list_head fstrim_list; /* in-flight discard from fstrim */
wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
@@ -347,6 +357,9 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
struct f2fs_gc_range)
#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
+#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
+#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
+#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -402,10 +415,9 @@ struct f2fs_flush_device {
#define DEF_MIN_INLINE_SIZE 1
static inline int get_extra_isize(struct inode *inode);
static inline int get_inline_xattr_addrs(struct inode *inode);
-#define F2FS_INLINE_XATTR_ADDRS(inode) get_inline_xattr_addrs(inode)
#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
(CUR_ADDRS_PER_INODE(inode) - \
- F2FS_INLINE_XATTR_ADDRS(inode) - \
+ get_inline_xattr_addrs(inode) - \
DEF_INLINE_RESERVED_SIZE))
/* for inline dir */
@@ -542,6 +554,8 @@ struct f2fs_map_blocks {
unsigned int m_len;
unsigned int m_flags;
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
+ pgoff_t *m_next_extent; /* point to next possible extent */
+ int m_seg_type;
};
/* for flag in get_data_block */
@@ -551,6 +565,7 @@ enum {
F2FS_GET_BLOCK_BMAP,
F2FS_GET_BLOCK_PRE_DIO,
F2FS_GET_BLOCK_PRE_AIO,
+ F2FS_GET_BLOCK_PRECACHE,
};
/*
@@ -583,7 +598,10 @@ struct f2fs_inode_info {
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */
- unsigned int i_current_depth; /* use only in directory structure */
+ union {
+ unsigned int i_current_depth; /* only for directory depth */
+ unsigned short i_gc_failures; /* only for regular file */
+ };
unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */
@@ -618,6 +636,7 @@ struct f2fs_inode_info {
int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */
int i_inline_xattr_size; /* inline xattr size */
+ struct timespec i_crtime; /* inode creation time */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -922,6 +941,7 @@ enum cp_reason_type {
CP_NODE_NEED_CP,
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
+ CP_RECOVER_DIR,
};
enum iostat_type {
@@ -957,6 +977,7 @@ struct f2fs_io_info {
int need_lock; /* indicate we need to lock cp_rwsem */
bool in_list; /* indicate fio is in io_list */
enum iostat_type io_type; /* io type */
+ struct writeback_control *io_wbc; /* writeback control */
};
#define is_read_io(rw) ((rw) == READ)
@@ -1093,6 +1114,7 @@ struct f2fs_sb_info {
int dir_level; /* directory level */
int inline_xattr_size; /* inline xattr size */
unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
+ int readdir_ra; /* readahead inode in readdir */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
@@ -1100,6 +1122,11 @@ struct f2fs_sb_info {
block_t last_valid_block_count; /* for recovery */
block_t reserved_blocks; /* configurable reserved blocks */
block_t current_reserved_blocks; /* current reserved blocks */
+ block_t root_reserved_blocks; /* root reserved blocks */
+ kuid_t s_resuid; /* reserved blocks for uid */
+ kgid_t s_resgid; /* reserved blocks for gid */
+
+ unsigned int nquota_files; /* # of quota sysfile */
u32 s_next_generation; /* for NFS support */
@@ -1124,6 +1151,9 @@ struct f2fs_sb_info {
/* threshold for converting bg victims for fg */
u64 fggc_threshold;
+ /* threshold for gc trials on pinned files */
+ u64 gc_pin_file_threshold;
+
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
@@ -1250,33 +1280,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi)
/*
* Inline functions
*/
-static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
- unsigned int length)
-{
- SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
- u32 retval;
- int err;
-
- shash->tfm = sbi->s_chksum_driver;
- shash->flags = 0;
- *ctx = F2FS_SUPER_MAGIC;
-
- err = crypto_shash_update(shash, address, length);
- BUG_ON(err);
-
- retval = *ctx;
- barrier_data(ctx);
- return retval;
-}
-
-static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
- void *buf, size_t buf_size)
-{
- return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
-}
-
-static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
+static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
{
struct {
@@ -1297,6 +1301,24 @@ static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
return *(u32 *)desc.ctx;
}
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+ unsigned int length)
+{
+ return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+ void *buf, size_t buf_size)
+{
+ return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
+static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
+ const void *address, unsigned int length)
+{
+ return __f2fs_crc32(sbi, crc, address, length);
+}
+
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
return container_of(inode, struct f2fs_inode_info, vfs_inode);
@@ -1555,6 +1577,25 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
+static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+ struct inode *inode)
+{
+ if (!inode)
+ return true;
+ if (!test_opt(sbi, RESERVE_ROOT))
+ return false;
+ if (IS_NOQUOTA(inode))
+ return true;
+ if (capable(CAP_SYS_RESOURCE))
+ return true;
+ if (uid_eq(sbi->s_resuid, current_fsuid()))
+ return true;
+ if (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) &&
+ in_group_p(sbi->s_resgid))
+ return true;
+ return false;
+}
+
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count)
@@ -1584,11 +1625,17 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
sbi->total_valid_block_count += (block_t)(*count);
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
+
+ if (!__allow_reserved_blocks(sbi, inode))
+ avail_user_block_count -= sbi->root_reserved_blocks;
+
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
+ if (diff > *count)
+ diff = *count;
*count -= diff;
release = diff;
- sbi->total_valid_block_count = avail_user_block_count;
+ sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
@@ -1597,7 +1644,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
}
spin_unlock(&sbi->stat_lock);
- if (release)
+ if (unlikely(release))
dquot_release_reservation_block(inode, release);
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
@@ -1777,9 +1824,13 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
- valid_block_count = sbi->total_valid_block_count + 1;
- if (unlikely(valid_block_count + sbi->current_reserved_blocks >
- sbi->user_block_count)) {
+ valid_block_count = sbi->total_valid_block_count +
+ sbi->current_reserved_blocks + 1;
+
+ if (!__allow_reserved_blocks(sbi, inode))
+ valid_block_count += sbi->root_reserved_blocks;
+
+ if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
@@ -1992,11 +2043,11 @@ static inline block_t datablock_addr(struct inode *inode,
raw_node = F2FS_NODE(node_page);
/* from GC path only */
- if (!inode) {
- if (is_inode)
+ if (is_inode) {
+ if (!inode)
base = offset_in_addr(&raw_node->i);
- } else if (f2fs_has_extra_attr(inode) && is_inode) {
- base = get_extra_isize(inode);
+ else if (f2fs_has_extra_attr(inode))
+ base = get_extra_isize(inode);
}
addr_array = blkaddr_in_node(raw_node);
@@ -2107,6 +2158,7 @@ enum {
FI_HOT_DATA, /* indicate file is hot */
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
+ FI_PIN_FILE, /* indicate file should not be gced */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2116,10 +2168,12 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_INLINE_XATTR:
case FI_INLINE_DATA:
case FI_INLINE_DENTRY:
+ case FI_NEW_INODE:
if (set)
return;
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
+ case FI_PIN_FILE:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -2200,6 +2254,13 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
f2fs_mark_inode_dirty_sync(inode, true);
}
+static inline void f2fs_i_gc_failures_write(struct inode *inode,
+ unsigned int count)
+{
+ F2FS_I(inode)->i_gc_failures = count;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
{
F2FS_I(inode)->i_xattr_nid = xnid;
@@ -2228,6 +2289,8 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DOTS, &fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, &fi->flags);
+ if (ri->i_inline & F2FS_PIN_FILE)
+ set_bit(FI_PIN_FILE, &fi->flags);
}
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@@ -2246,6 +2309,8 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+ ri->i_inline |= F2FS_PIN_FILE;
}
static inline int f2fs_has_extra_attr(struct inode *inode)
@@ -2260,7 +2325,7 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
static inline unsigned int addrs_per_inode(struct inode *inode)
{
- return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS(inode);
+ return CUR_ADDRS_PER_INODE(inode) - get_inline_xattr_addrs(inode);
}
static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2268,7 +2333,7 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
struct f2fs_inode *ri = F2FS_INODE(page);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
- F2FS_INLINE_XATTR_ADDRS(inode)]);
+ get_inline_xattr_addrs(inode)]);
}
static inline int inline_xattr_size(struct inode *inode)
@@ -2291,6 +2356,11 @@ static inline int f2fs_has_inline_dots(struct inode *inode)
return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
+static inline bool f2fs_is_pinned_file(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_PIN_FILE);
+}
+
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
@@ -2418,12 +2488,35 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
return kmalloc(size, flags);
}
+static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+ return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
+}
+
+static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KVMALLOC)) {
+ f2fs_show_injection_info(FAULT_KVMALLOC);
+ return NULL;
+ }
+#endif
+ return kvmalloc(size, flags);
+}
+
+static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+ return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
+}
+
static inline int get_extra_isize(struct inode *inode)
{
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
}
-static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb);
static inline int get_inline_xattr_addrs(struct inode *inode)
{
return F2FS_I(inode)->i_inline_xattr_size;
@@ -2479,9 +2572,11 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
-int truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+void truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+int f2fs_precache_extents(struct inode *inode);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int f2fs_pin_file_control(struct inode *inode, bool inc);
/*
* inode.c
@@ -2492,8 +2587,8 @@ void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-int update_inode(struct inode *inode, struct page *node_page);
-int update_inode_page(struct inode *inode);
+void update_inode(struct inode *inode, struct page *node_page);
+void update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_evict_inode(struct inode *inode);
void handle_failed_inode(struct inode *inode);
@@ -2604,10 +2699,9 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
void recover_inline_xattr(struct inode *inode, struct page *page);
-int recover_xattr_data(struct inode *inode, struct page *page,
- block_t blkaddr);
+int recover_xattr_data(struct inode *inode, struct page *page);
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-int restore_node_summary(struct f2fs_sb_info *sbi,
+void restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int build_node_manager(struct f2fs_sb_info *sbi);
@@ -2634,6 +2728,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
unsigned int granularity);
+void drop_discard_cmd(struct f2fs_sb_info *sbi);
void stop_discard_thread(struct f2fs_sb_info *sbi);
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
@@ -2672,6 +2767,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi);
void destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
+int rw_hint_to_seg_type(enum rw_hint hint);
/*
* checkpoint.c
@@ -2741,6 +2837,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
+bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
+bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
void f2fs_set_page_dirty_nobuffers(struct page *page);
int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
@@ -3109,6 +3207,11 @@ static inline int f2fs_sb_has_quota_ino(struct super_block *sb)
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO);
}
+static inline int f2fs_sb_has_inode_crtime(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CRTIME);
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkaddr)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 7874bbd7311d..672a542e5464 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -165,6 +165,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
cp_reason = CP_FASTBOOT_MODE;
else if (sbi->active_logs == 2)
cp_reason = CP_SPEC_LOG_NUM;
+ else if (need_dentry_mark(sbi, inode->i_ino) &&
+ exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
+ cp_reason = CP_RECOVER_DIR;
return cp_reason;
}
@@ -472,26 +475,14 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
- struct dentry *dir;
+ int err = fscrypt_file_open(inode, filp);
- if (f2fs_encrypted_inode(inode)) {
- int ret = fscrypt_get_encryption_info(inode);
- if (ret)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- dir = dget_parent(file_dentry(filp));
- if (f2fs_encrypted_inode(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
- dput(dir);
- return -EPERM;
- }
- dput(dir);
+ if (err)
+ return err;
return dquot_file_open(inode, filp);
}
-int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+void truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_node *raw_node;
@@ -534,7 +525,6 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
dn->ofs_in_node, nr_free);
- return nr_free;
}
void truncate_data_blocks(struct dnode_of_data *dn)
@@ -682,8 +672,17 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_inode *ri;
unsigned int flags;
+ if (f2fs_has_extra_attr(inode) &&
+ f2fs_sb_has_inode_crtime(inode->i_sb) &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = fi->i_crtime.tv_sec;
+ stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
+ }
+
flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
if (flags & FS_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
@@ -755,6 +754,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
if (is_quota_modification(inode, attr)) {
err = dquot_initialize(inode);
if (err)
@@ -770,14 +773,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (f2fs_encrypted_inode(inode)) {
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
-
if (attr->ia_size <= i_size_read(inode)) {
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
@@ -1114,11 +1109,13 @@ static int __exchange_data_block(struct inode *src_inode,
while (len) {
olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
- src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
+ sizeof(block_t) * olen, GFP_KERNEL);
if (!src_blkaddr)
return -ENOMEM;
- do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
+ sizeof(int) * olen, GFP_KERNEL);
if (!do_replace) {
kvfree(src_blkaddr);
return -ENOMEM;
@@ -1186,14 +1183,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out;
-
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+ goto out_unlock;
truncate_pagecache(inode, offset);
@@ -1212,9 +1209,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (!ret)
f2fs_i_size_write(inode, new_size);
out_unlock:
- up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-out:
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
return ret;
}
@@ -1385,6 +1381,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
down_write(&F2FS_I(inode)->i_mmap_sem);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
@@ -1395,9 +1394,6 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
goto out;
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-
truncate_pagecache(inode, offset);
pg_start = offset >> PAGE_SHIFT;
@@ -1425,10 +1421,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (!ret)
f2fs_i_size_write(inode, new_size);
-
- up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
out:
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
return ret;
}
@@ -1436,7 +1431,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
+ .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
@@ -1852,14 +1848,20 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev);
- if (sb && !IS_ERR(sb)) {
+ if (IS_ERR(sb)) {
+ ret = PTR_ERR(sb);
+ goto out;
+ }
+ if (sb) {
f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb);
}
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
- f2fs_sync_fs(sb, 1);
+ ret = f2fs_sync_fs(sb, 1);
+ if (ret)
+ goto out;
f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_NOSYNC:
@@ -1873,6 +1875,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
ret = -EINVAL;
goto out;
}
+
+ stop_gc_thread(sbi);
+ stop_discard_thread(sbi);
+
+ drop_discard_cmd(sbi);
+ clear_opt(sbi, DISCARD);
+
f2fs_update_time(sbi, REQ_TIME);
out:
mnt_drop_write_file(filp);
@@ -2084,9 +2093,10 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
struct f2fs_defragment *range)
{
struct inode *inode = file_inode(filp);
- struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct f2fs_map_blocks map = { .m_next_extent = NULL,
+ .m_seg_type = NO_CHECK_TYPE };
struct extent_info ei = {0,0,0};
- pgoff_t pg_start, pg_end;
+ pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num;
block_t blk_end = 0;
@@ -2094,7 +2104,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
int err;
/* if in-place-update policy is enabled, don't waste time here */
- if (need_inplace_update_policy(inode, NULL))
+ if (should_update_inplace(inode, NULL))
return -EINVAL;
pg_start = range->start >> PAGE_SHIFT;
@@ -2120,6 +2130,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
}
map.m_lblk = pg_start;
+ map.m_next_pgofs = &next_pgofs;
/*
* lookup mapping info in dnode page cache, skip defragmenting if all
@@ -2133,14 +2144,16 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- map.m_lblk++;
+ map.m_lblk = next_pgofs;
continue;
}
- if (blk_end && blk_end != map.m_pblk) {
+ if (blk_end && blk_end != map.m_pblk)
fragmented = true;
- break;
- }
+
+ /* record total count of block that we're going to move */
+ total += map.m_len;
+
blk_end = map.m_pblk + map.m_len;
map.m_lblk += map.m_len;
@@ -2149,10 +2162,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
if (!fragmented)
goto out;
- map.m_lblk = pg_start;
- map.m_len = pg_end - pg_start;
-
- sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
+ sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
/*
* make sure there are enough free section for LFS allocation, this can
@@ -2164,6 +2174,10 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
}
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
+ total = 0;
+
while (map.m_lblk < pg_end) {
pgoff_t idx;
int cnt = 0;
@@ -2175,7 +2189,7 @@ do_map:
goto clear_out;
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- map.m_lblk++;
+ map.m_lblk = next_pgofs;
continue;
}
@@ -2681,6 +2695,125 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return 0;
}
+int f2fs_pin_file_control(struct inode *inode, bool inc)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ /* Use i_gc_failures for normal file as a risk signal. */
+ if (inc)
+ f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
+
+ if (fi->i_gc_failures > sbi->gc_pin_file_threshold) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: Enable GC = ino %lx after %x GC trials\n",
+ __func__, inode->i_ino, fi->i_gc_failures);
+ clear_inode_flag(inode, FI_PIN_FILE);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 pin;
+ int ret = 0;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(pin, (__u32 __user *)arg))
+ return -EFAULT;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ if (should_update_outplace(inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!pin) {
+ clear_inode_flag(inode, FI_PIN_FILE);
+ F2FS_I(inode)->i_gc_failures = 1;
+ goto done;
+ }
+
+ if (f2fs_pin_file_control(inode, false)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto out;
+
+ set_inode_flag(inode, FI_PIN_FILE);
+ ret = F2FS_I(inode)->i_gc_failures;
+done:
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 pin = 0;
+
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+ pin = F2FS_I(inode)->i_gc_failures;
+ return put_user(pin, (u32 __user *)arg);
+}
+
+int f2fs_precache_extents(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_map_blocks map;
+ pgoff_t m_next_extent;
+ loff_t end;
+ int err;
+
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ return -EOPNOTSUPP;
+
+ map.m_lblk = 0;
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = &m_next_extent;
+ map.m_seg_type = NO_CHECK_TYPE;
+ end = F2FS_I_SB(inode)->max_file_blocks;
+
+ while (map.m_lblk < end) {
+ map.m_len = end - map.m_lblk;
+
+ down_write(&fi->dio_rwsem[WRITE]);
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
+ up_write(&fi->dio_rwsem[WRITE]);
+ if (err)
+ return err;
+
+ map.m_lblk = m_next_extent;
+ }
+
+ return err;
+}
+
+static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
+{
+ return f2fs_precache_extents(file_inode(filp));
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -2731,6 +2864,12 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_fsgetxattr(filp, arg);
case F2FS_IOC_FSSETXATTR:
return f2fs_ioc_fssetxattr(filp, arg);
+ case F2FS_IOC_GET_PIN_FILE:
+ return f2fs_ioc_get_pin_file(filp, arg);
+ case F2FS_IOC_SET_PIN_FILE:
+ return f2fs_ioc_set_pin_file(filp, arg);
+ case F2FS_IOC_PRECACHE_EXTENTS:
+ return f2fs_ioc_precache_extents(filp, arg);
default:
return -ENOTTY;
}
@@ -2806,6 +2945,9 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GET_FEATURES:
case F2FS_IOC_FSGETXATTR:
case F2FS_IOC_FSSETXATTR:
+ case F2FS_IOC_GET_PIN_FILE:
+ case F2FS_IOC_SET_PIN_FILE:
+ case F2FS_IOC_PRECACHE_EXTENTS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d844dcb80570..aa720cc44509 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -624,6 +624,11 @@ static void move_data_block(struct inode *inode, block_t bidx,
if (f2fs_is_atomic_file(inode))
goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ f2fs_pin_file_control(inode, true);
+ goto out;
+ }
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
@@ -686,7 +691,12 @@ static void move_data_block(struct inode *inode, block_t bidx,
fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC;
fio.new_blkaddr = newaddr;
- f2fs_submit_page_write(&fio);
+ err = f2fs_submit_page_write(&fio);
+ if (err) {
+ if (PageWriteback(fio.encrypted_page))
+ end_page_writeback(fio.encrypted_page);
+ goto put_page_out;
+ }
f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
@@ -720,6 +730,11 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
if (f2fs_is_atomic_file(inode))
goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ if (gc_type == FG_GC)
+ f2fs_pin_file_control(inode, true);
+ goto out;
+ }
if (gc_type == BG_GC) {
if (PageWriteback(page))
@@ -1091,6 +1106,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
BLKS_PER_SEC(sbi), (main_count - resv_count));
+ sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
/* give warm/cold data area from slower device */
if (sbi->s_ndevs && sbi->segs_per_sec == 1)
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 9325191fab2d..b0045d4c8d1e 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -20,6 +20,8 @@
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define DEF_GC_FAILED_PINNED_FILES 2048
+
/* Search max. number of dirty segments to select a victim segment */
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index b4c4f2b25304..89c838bfb067 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -22,6 +22,9 @@
void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
{
+ if (is_inode_flag_set(inode, FI_NEW_INODE))
+ return;
+
if (f2fs_inode_dirtied(inode, sync))
return;
@@ -275,6 +278,12 @@ static int do_read_inode(struct inode *inode)
i_projid = F2FS_DEF_PROJID;
fi->i_projid = make_kprojid(&init_user_ns, i_projid);
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
+ fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
+ }
+
f2fs_put_page(node_page, 1);
stat_inc_inline_xattr(inode);
@@ -360,14 +369,15 @@ retry:
return inode;
}
-int update_inode(struct inode *inode, struct page *node_page)
+void update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
struct extent_tree *et = F2FS_I(inode)->extent_tree;
- f2fs_inode_synced(inode);
-
f2fs_wait_on_page_writeback(node_page, NODE, true);
+ set_page_dirty(node_page);
+
+ f2fs_inode_synced(inode);
ri = F2FS_INODE(node_page);
@@ -417,6 +427,15 @@ int update_inode(struct inode *inode, struct page *node_page)
F2FS_I(inode)->i_projid);
ri->i_projid = cpu_to_le32(i_projid);
}
+
+ if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
+ F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ i_crtime)) {
+ ri->i_crtime =
+ cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
+ ri->i_crtime_nsec =
+ cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
+ }
}
__set_inode_rdev(inode, ri);
@@ -426,14 +445,12 @@ int update_inode(struct inode *inode, struct page *node_page)
if (inode->i_nlink == 0)
clear_inline_node(node_page);
- return set_page_dirty(node_page);
}
-int update_inode_page(struct inode *inode)
+void update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *node_page;
- int ret = 0;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
@@ -444,11 +461,10 @@ retry:
} else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi, false);
}
- return 0;
+ return;
}
- ret = update_inode(inode, node_page);
+ update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
- return ret;
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 28bdf8828e73..c4c94c7e9f4f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -50,7 +50,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime =
+ F2FS_I(inode)->i_crtime = current_time(inode);
inode->i_generation = sbi->s_next_generation++;
err = insert_inode_locked(inode);
@@ -74,12 +75,12 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
+ set_inode_flag(inode, FI_NEW_INODE);
+
/* If the directory encrypted, then we should encrypt the inode. */
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- set_inode_flag(inode, FI_NEW_INODE);
-
if (f2fs_sb_has_extra_attr(sbi->sb)) {
set_inode_flag(inode, FI_EXTRA_ATTR);
F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
@@ -240,9 +241,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (f2fs_encrypted_inode(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(dir)->i_projid,
@@ -357,20 +358,9 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
trace_f2fs_lookup_start(dir, dentry, flags);
- if (f2fs_encrypted_inode(dir)) {
- err = fscrypt_get_encryption_info(dir);
-
- /*
- * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
- * created while the directory was encrypted and we
- * don't have access to the key.
- */
- if (fscrypt_has_encryption_key(dir))
- fscrypt_set_encrypted_dentry(dentry);
- fscrypt_set_d_op(dentry);
- if (err && err != -ENOKEY)
- goto out;
- }
+ err = fscrypt_prepare_lookup(dir, dentry, flags);
+ if (err)
+ goto out;
if (dentry->d_name.len > F2FS_NAME_LEN) {
err = -ENAMETOOLONG;
@@ -544,7 +534,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct qstr istr = QSTR_INIT(symname, len);
struct fscrypt_str ostr;
- sd = kzalloc(disk_link.len, GFP_NOFS);
+ sd = f2fs_kzalloc(sbi, disk_link.len, GFP_NOFS);
if (!sd) {
err = -ENOMEM;
goto err_out;
@@ -800,18 +790,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if ((f2fs_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
- if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_permitted_context(new_dir, old_inode)) {
- err = -EPERM;
- goto out;
- }
-
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_dentry->d_inode)->i_projid)))
@@ -958,6 +936,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_i_links_write(old_dir, false);
}
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
f2fs_unlock_op(sbi);
@@ -1002,18 +981,6 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if ((f2fs_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
- if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!fscrypt_has_permitted_context(new_dir, old_inode) ||
- !fscrypt_has_permitted_context(old_dir, new_inode)))
- return -EPERM;
-
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_dentry->d_inode)->i_projid)) ||
@@ -1124,6 +1091,9 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_mark_inode_dirty_sync(new_dir, false);
+ add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+
f2fs_unlock_op(sbi);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
@@ -1153,9 +1123,16 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ int err;
+
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
if (flags & RENAME_EXCHANGE) {
return f2fs_cross_rename(old_dir, old_dentry,
new_dir, new_dentry);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d3322752426f..177c438e4a56 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -143,11 +143,9 @@ static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
struct nat_entry *new;
if (no_fail)
- new = f2fs_kmem_cache_alloc(nat_entry_slab,
- GFP_NOFS | __GFP_ZERO);
+ new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
else
- new = kmem_cache_alloc(nat_entry_slab,
- GFP_NOFS | __GFP_ZERO);
+ new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
if (new) {
nat_set_nid(new, nid);
nat_reset_flag(new);
@@ -702,7 +700,6 @@ static void truncate_node(struct dnode_of_data *dn)
struct node_info ni;
get_node_info(sbi, dn->nid, &ni);
- f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
invalidate_blocks(sbi, ni.blk_addr);
@@ -1336,14 +1333,19 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
.encrypted_page = NULL,
.submitted = false,
.io_type = io_type,
+ .io_wbc = wbc,
};
trace_f2fs_writepage(page, NODE);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ unlock_page(page);
+ return 0;
+ }
+
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
/* get old block addr of this node page */
nid = nid_of_node(page);
@@ -1580,12 +1582,6 @@ next_step:
struct page *page = pvec.pages[i];
bool submitted = false;
- if (unlikely(f2fs_cp_error(sbi))) {
- pagevec_release(&pvec);
- ret = -EIO;
- goto out;
- }
-
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1655,9 +1651,12 @@ continue_unlock:
step++;
goto next_step;
}
-out:
+
if (nwritten)
f2fs_submit_merged_write(sbi, NODE);
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
return ret;
}
@@ -1812,8 +1811,33 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
}
}
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ bool set, bool build)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
+ unsigned int nid_ofs = nid - START_NID(nid);
+
+ if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ if (set) {
+ if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ nm_i->free_nid_count[nat_ofs]++;
+ } else {
+ if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+ }
+}
+
/* return if the nid is recognized as free */
-static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+static bool add_free_nid(struct f2fs_sb_info *sbi,
+ nid_t nid, bool build, bool update)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *e;
@@ -1829,8 +1853,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
i->nid = nid;
i->state = FREE_NID;
- if (radix_tree_preload(GFP_NOFS))
- goto err;
+ radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
spin_lock(&nm_i->nid_list_lock);
@@ -1871,9 +1894,14 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
ret = true;
err = __insert_free_nid(sbi, i, FREE_NID);
err_out:
+ if (update) {
+ update_free_nid_bitmap(sbi, nid, ret, build);
+ if (!build)
+ nm_i->available_nids++;
+ }
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
-err:
+
if (err)
kmem_cache_free(free_nid_slab, i);
return ret;
@@ -1897,30 +1925,6 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
- bool set, bool build)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
- unsigned int nid_ofs = nid - START_NID(nid);
-
- if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
- return;
-
- if (set) {
- if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
- return;
- __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- nm_i->free_nid_count[nat_ofs]++;
- } else {
- if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
- return;
- __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- if (!build)
- nm_i->free_nid_count[nat_ofs]--;
- }
-}
-
static void scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
@@ -1930,26 +1934,23 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
- if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
- return;
-
__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
- bool freed = false;
-
if (unlikely(start_nid >= nm_i->max_nid))
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR)
- freed = add_free_nid(sbi, start_nid, true);
- spin_lock(&NM_I(sbi)->nid_list_lock);
- update_free_nid_bitmap(sbi, start_nid, freed, true);
- spin_unlock(&NM_I(sbi)->nid_list_lock);
+ if (blk_addr == NULL_ADDR) {
+ add_free_nid(sbi, start_nid, true, true);
+ } else {
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, start_nid, false, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
}
@@ -1967,7 +1968,7 @@ static void scan_curseg_cache(struct f2fs_sb_info *sbi)
addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
- add_free_nid(sbi, nid, true);
+ add_free_nid(sbi, nid, true, false);
else
remove_free_nid(sbi, nid);
}
@@ -1994,7 +1995,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
break;
nid = i * NAT_ENTRY_PER_BLOCK + idx;
- add_free_nid(sbi, nid, true);
+ add_free_nid(sbi, nid, true, false);
if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
goto out;
@@ -2037,10 +2038,13 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
down_read(&nm_i->nat_tree_lock);
while (1) {
- struct page *page = get_current_nat_page(sbi, nid);
+ if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
+ nm_i->nat_block_bitmap)) {
+ struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ scan_nat_page(sbi, page, nid);
+ f2fs_put_page(page, 1);
+ }
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
if (unlikely(nid >= nm_i->max_nid))
@@ -2203,7 +2207,9 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
ri = F2FS_INODE(page);
- if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
+ if (ri->i_inline & F2FS_INLINE_XATTR) {
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ } else {
clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
@@ -2219,7 +2225,7 @@ update_inode:
f2fs_put_page(ipage, 1);
}
-int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+int recover_xattr_data(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
@@ -2233,7 +2239,6 @@ int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
/* 1: invalidate the previous xattr nid */
get_node_info(sbi, prev_xnid, &ni);
- f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2322,7 +2327,7 @@ retry:
return 0;
}
-int restore_node_summary(struct f2fs_sb_info *sbi,
+void restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
@@ -2355,7 +2360,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
- return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2497,11 +2501,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), set, ne);
if (nat_get_blkaddr(ne) == NULL_ADDR) {
- add_free_nid(sbi, nid, false);
- spin_lock(&NM_I(sbi)->nid_list_lock);
- NM_I(sbi)->available_nids++;
- update_free_nid_bitmap(sbi, nid, true, false);
- spin_unlock(&NM_I(sbi)->nid_list_lock);
+ add_free_nid(sbi, nid, false, true);
} else {
spin_lock(&NM_I(sbi)->nid_list_lock);
update_free_nid_bitmap(sbi, nid, false, false);
@@ -2582,8 +2582,8 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
F2FS_BLKSIZE - 1);
- nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS,
- GFP_KERNEL);
+ nm_i->nat_bits = f2fs_kzalloc(sbi,
+ nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -2661,7 +2661,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
/* not used nids: 0, node, meta, (and root counted as valid node) */
nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
- F2FS_RESERVED_NODE_NUM;
+ sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
nm_i->nid_cnt[FREE_NID] = 0;
nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i->nat_cnt = 0;
@@ -2708,17 +2708,17 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
- nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
+ nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
- nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_count = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
sizeof(unsigned short), GFP_KERNEL);
if (!nm_i->free_nid_count)
return -ENOMEM;
@@ -2729,7 +2729,8 @@ int build_node_manager(struct f2fs_sb_info *sbi)
{
int err;
- sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
+ sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
+ GFP_KERNEL);
if (!sbi->nm_info)
return -ENOMEM;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 0ee3e5ff49a3..081ef0d672bf 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -305,6 +305,10 @@ static inline bool is_recoverable_dnode(struct page *page)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
__u64 cp_ver = cur_cp_version(ckpt);
+ /* Don't care crc part, if fsck.f2fs sets it. */
+ if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
+ return (cp_ver << 32) == (cpver_of_node(page) << 32);
+
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
cp_ver |= (cur_cp_crc(ckpt) << 32);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b3a14b0429f2..337f3363f48f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -195,6 +195,20 @@ out:
return err;
}
+static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
+{
+ if (ri->i_inline & F2FS_PIN_FILE)
+ set_inode_flag(inode, FI_PIN_FILE);
+ else
+ clear_inode_flag(inode, FI_PIN_FILE);
+ if (ri->i_inline & F2FS_DATA_EXIST)
+ set_inode_flag(inode, FI_DATA_EXIST);
+ else
+ clear_inode_flag(inode, FI_DATA_EXIST);
+ if (!(ri->i_inline & F2FS_INLINE_DOTS))
+ clear_inode_flag(inode, FI_INLINE_DOTS);
+}
+
static void recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
@@ -211,13 +225,16 @@ static void recover_inode(struct inode *inode, struct page *page)
F2FS_I(inode)->i_advise = raw->i_advise;
+ recover_inline_flags(inode, raw);
+
if (file_enc_name(inode))
name = "<encrypted>";
else
name = F2FS_INODE(page)->i_name;
- f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
- ino_of_node(page), name);
+ f2fs_msg(inode->i_sb, KERN_NOTICE,
+ "recover_inode: ino = %x, name = %s, inline = %x",
+ ino_of_node(page), name, raw->i_inline);
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
@@ -404,7 +421,7 @@ truncate_out:
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
- struct page *page, block_t blkaddr)
+ struct page *page)
{
struct dnode_of_data dn;
struct node_info ni;
@@ -415,7 +432,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
- err = recover_xattr_data(inode, page, blkaddr);
+ err = recover_xattr_data(inode, page);
if (!err)
recovered++;
goto out;
@@ -568,7 +585,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
break;
}
}
- err = do_recover_data(sbi, entry->inode, page, blkaddr);
+ err = do_recover_data(sbi, entry->inode, page);
if (err) {
f2fs_put_page(page, 1);
break;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c117e0913f2a..b16a8e6625aa 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -248,7 +248,11 @@ retry:
goto next;
}
get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ if (cur->old_addr == NEW_ADDR) {
+ invalidate_blocks(sbi, dn.data_blkaddr);
+ f2fs_update_data_blkaddr(&dn, NEW_ADDR);
+ } else
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
cur->old_addr, ni.version, true, true);
f2fs_put_dnode(&dn);
}
@@ -657,7 +661,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
goto init_thread;
}
- fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
+ fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
atomic_set(&fcc->issued_flush, 0);
@@ -884,7 +888,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
bio_put(bio);
}
-void __check_sit_bitmap(struct f2fs_sb_info *sbi,
+static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
block_t start, block_t end)
{
#ifdef CONFIG_F2FS_CHECK_FS
@@ -1204,6 +1208,8 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
pend_list = &dcc->pend_list[i];
mutex_lock(&dcc->cmd_lock);
+ if (list_empty(pend_list))
+ goto next;
f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
@@ -1222,6 +1228,7 @@ skip:
break;
}
blk_finish_plug(&plug);
+next:
mutex_unlock(&dcc->cmd_lock);
if (iter >= dpolicy->max_requests)
@@ -1256,6 +1263,11 @@ static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
return dropped;
}
+void drop_discard_cmd(struct f2fs_sb_info *sbi)
+{
+ __drop_discard_cmd(sbi);
+}
+
static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
@@ -1324,7 +1336,7 @@ static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
-void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
@@ -1394,6 +1406,8 @@ static int issue_discard_thread(void *data)
msecs_to_jiffies(wait_ms));
if (try_to_freeze())
continue;
+ if (f2fs_readonly(sbi->sb))
+ continue;
if (kthread_should_stop())
return 0;
@@ -1703,25 +1717,20 @@ void init_discard_policy(struct discard_policy *dpolicy,
dpolicy->sync = true;
dpolicy->granularity = granularity;
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = true;
} else if (discard_type == DPOLICY_FORCE) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = true;
} else if (discard_type == DPOLICY_FSTRIM) {
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = false;
}
}
@@ -1737,7 +1746,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
goto init_thread;
}
- dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
+ dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
if (!dcc)
return -ENOMEM;
@@ -2739,6 +2748,7 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
}
+ f2fs_bug_on(sbi, !IS_DATASEG(type));
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
@@ -2823,7 +2833,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
}
}
-static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+static void read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
@@ -2880,7 +2890,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
}
}
f2fs_put_page(page, 1);
- return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -2926,13 +2935,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
ns->ofs_in_node = 0;
}
} else {
- int err;
-
- err = restore_node_summary(sbi, segno, sum);
- if (err) {
- f2fs_put_page(new, 1);
- return err;
- }
+ restore_node_summary(sbi, segno, sum);
}
}
@@ -2971,8 +2974,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
META_CP, true);
/* restore for compacted data summary */
- if (read_compacted_summaries(sbi))
- return -EINVAL;
+ read_compacted_summaries(sbi);
type = CURSEG_HOT_NODE;
}
@@ -3108,28 +3110,19 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct page *src_page, *dst_page;
+ struct page *page;
pgoff_t src_off, dst_off;
- void *src_addr, *dst_addr;
src_off = current_sit_addr(sbi, start);
dst_off = next_sit_addr(sbi, src_off);
- /* get current sit block page without lock */
- src_page = get_meta_page(sbi, src_off);
- dst_page = grab_meta_page(sbi, dst_off);
- f2fs_bug_on(sbi, PageDirty(src_page));
-
- src_addr = page_address(src_page);
- dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_SIZE);
-
- set_page_dirty(dst_page);
- f2fs_put_page(src_page, 1);
+ page = grab_meta_page(sbi, dst_off);
+ seg_info_to_sit_page(sbi, page, start);
+ set_page_dirty(page);
set_to_next_sit(sit_i, start);
- return dst_page;
+ return page;
}
static struct sit_entry_set *grab_sit_entry_set(void)
@@ -3338,52 +3331,54 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
unsigned int bitmap_size;
/* allocate memory for SIT information */
- sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
+ sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
if (!sit_i)
return -ENOMEM;
SM_I(sbi)->sit_info = sit_i;
- sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
+ sit_i->sentries = f2fs_kvzalloc(sbi, MAIN_SEGS(sbi) *
sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
+ sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size,
+ GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
for (start = 0; start < MAIN_SEGS(sbi); start++) {
sit_i->sentries[start].cur_valid_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
!sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
#ifdef CONFIG_F2FS_CHECK_FS
sit_i->sentries[start].cur_valid_map_mir
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map_mir)
return -ENOMEM;
#endif
if (f2fs_discard_en(sbi)) {
sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
+ GFP_KERNEL);
if (!sit_i->sentries[start].discard_map)
return -ENOMEM;
}
}
- sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->tmp_map)
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
+ sit_i->sec_entries = f2fs_kvzalloc(sbi, MAIN_SECS(sbi) *
sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
@@ -3427,19 +3422,19 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
unsigned int bitmap_size, sec_bitmap_size;
/* allocate memory for free segmap information */
- free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
+ free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
if (!free_i)
return -ENOMEM;
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
+ free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
+ free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
@@ -3460,7 +3455,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
struct curseg_info *array;
int i;
- array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
+ array = f2fs_kzalloc(sbi, sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
if (!array)
return -ENOMEM;
@@ -3468,12 +3463,12 @@ static int build_curseg(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
init_rwsem(&array[i].journal_rwsem);
- array[i].journal = kzalloc(sizeof(struct f2fs_journal),
- GFP_KERNEL);
+ array[i].journal = f2fs_kzalloc(sbi,
+ sizeof(struct f2fs_journal), GFP_KERNEL);
if (!array[i].journal)
return -ENOMEM;
array[i].segno = NULL_SEGNO;
@@ -3482,7 +3477,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi);
}
-static void build_sit_entries(struct f2fs_sb_info *sbi)
+static int build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
@@ -3492,6 +3487,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
+ int err = 0;
do {
readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
@@ -3510,7 +3506,9 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
- check_block_count(sbi, start, &sit);
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ return err;
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
@@ -3545,7 +3543,9 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
old_valid_blocks = se->valid_blocks;
- check_block_count(sbi, start, &sit);
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ break;
seg_info_from_raw_sit(se, &sit);
if (f2fs_discard_en(sbi)) {
@@ -3565,6 +3565,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
se->valid_blocks - old_valid_blocks;
}
up_read(&curseg->journal_rwsem);
+ return err;
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -3619,7 +3620,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
@@ -3631,7 +3632,8 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
unsigned int bitmap_size, i;
/* allocate memory for dirty segments list information */
- dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
+ dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
+ GFP_KERNEL);
if (!dirty_i)
return -ENOMEM;
@@ -3641,7 +3643,8 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
- dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
+ GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
@@ -3685,7 +3688,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
struct f2fs_sm_info *sm_info;
int err;
- sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
+ sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
@@ -3737,7 +3740,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
return err;
/* reinit free segmap based on SIT */
- build_sit_entries(sbi);
+ err = build_sit_entries(sbi);
+ if (err)
+ return err;
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index d1d394cdf61d..f11c4bc82c78 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -348,16 +348,41 @@ static inline void seg_info_from_raw_sit(struct seg_entry *se,
se->mtime = le64_to_cpu(rs->mtime);
}
-static inline void seg_info_to_raw_sit(struct seg_entry *se,
+static inline void __seg_info_to_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
se->valid_blocks;
rs->vblocks = cpu_to_le16(raw_vblocks);
memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
+ rs->mtime = cpu_to_le64(se->mtime);
+}
+
+static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
+ struct page *page, unsigned int start)
+{
+ struct f2fs_sit_block *raw_sit;
+ struct seg_entry *se;
+ struct f2fs_sit_entry *rs;
+ unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
+ (unsigned long)MAIN_SEGS(sbi));
+ int i;
+
+ raw_sit = (struct f2fs_sit_block *)page_address(page);
+ for (i = 0; i < end - start; i++) {
+ rs = &raw_sit->entries[i];
+ se = get_seg_entry(sbi, start + i);
+ __seg_info_to_raw_sit(se, rs);
+ }
+}
+
+static inline void seg_info_to_raw_sit(struct seg_entry *se,
+ struct f2fs_sit_entry *rs)
+{
+ __seg_info_to_raw_sit(se, rs);
+
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->ckpt_valid_blocks = se->valid_blocks;
- rs->mtime = cpu_to_le64(se->mtime);
}
static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
@@ -580,47 +605,6 @@ enum {
F2FS_IPU_ASYNC,
};
-static inline bool need_inplace_update_policy(struct inode *inode,
- struct f2fs_io_info *fio)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int policy = SM_I(sbi)->ipu_policy;
-
- if (test_opt(sbi, LFS))
- return false;
-
- /* if this is cold file, we should overwrite to avoid fragmentation */
- if (file_is_cold(inode))
- return true;
-
- if (policy & (0x1 << F2FS_IPU_FORCE))
- return true;
- if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
- return true;
- if (policy & (0x1 << F2FS_IPU_UTIL) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
- if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
-
- /*
- * IPU for rewrite async pages
- */
- if (policy & (0x1 << F2FS_IPU_ASYNC) &&
- fio && fio->op == REQ_OP_WRITE &&
- !(fio->op_flags & REQ_SYNC) &&
- !f2fs_encrypted_inode(inode))
- return true;
-
- /* this is only set during fdatasync */
- if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(inode, FI_NEED_IPU))
- return true;
-
- return false;
-}
-
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
int type)
{
@@ -655,7 +639,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
/*
* Summary block is always treated as an invalid block
*/
-static inline void check_block_count(struct f2fs_sb_info *sbi,
+static inline int check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
{
#ifdef CONFIG_F2FS_CHECK_FS
@@ -677,11 +661,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
cur_pos = next_pos;
is_valid = !is_valid;
} while (cur_pos < sbi->blocks_per_seg);
- BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
+
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Mismatch valid blocks %d vs. %d",
+ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
#endif
/* check segment usage, and check boundary of a given segment number */
- f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
- || segno > TOTAL_SEGS(sbi) - 1);
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+ || segno > TOTAL_SEGS(sbi) - 1)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong valid blocks %d or segno %u",
+ GET_SIT_VBLOCKS(raw_sit), segno);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
+ return 0;
}
static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 708155d9c2e4..8173ae688814 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -43,6 +43,7 @@ static struct kmem_cache *f2fs_inode_cachep;
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
+ [FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
[FAULT_ALLOC_BIO] = "alloc bio",
@@ -106,6 +107,9 @@ enum {
Opt_noextent_cache,
Opt_noinline_data,
Opt_data_flush,
+ Opt_reserve_root,
+ Opt_resgid,
+ Opt_resuid,
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
@@ -156,6 +160,9 @@ static match_table_t f2fs_tokens = {
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
+ {Opt_reserve_root, "reserve_root=%u"},
+ {Opt_resgid, "resgid=%u"},
+ {Opt_resuid, "resuid=%u"},
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
@@ -190,6 +197,28 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_end(args);
}
+static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
+{
+ block_t limit = (sbi->user_block_count << 1) / 1000;
+
+ /* limit is 0.2% */
+ if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) {
+ sbi->root_reserved_blocks = limit;
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Reduce reserved blocks for root = %u",
+ sbi->root_reserved_blocks);
+ }
+ if (!test_opt(sbi, RESERVE_ROOT) &&
+ (!uid_eq(sbi->s_resuid,
+ make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
+ !gid_eq(sbi->s_resgid,
+ make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+ from_kuid_munged(&init_user_ns, sbi->s_resuid),
+ from_kgid_munged(&init_user_ns, sbi->s_resgid));
+}
+
static void init_once(void *foo)
{
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -320,6 +349,8 @@ static int parse_options(struct super_block *sb, char *options)
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
+ kuid_t uid;
+ kgid_t gid;
#ifdef CONFIG_QUOTA
int ret;
#endif
@@ -487,6 +518,40 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_data_flush:
set_opt(sbi, DATA_FLUSH);
break;
+ case Opt_reserve_root:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (test_opt(sbi, RESERVE_ROOT)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Preserve previous reserve_root=%u",
+ sbi->root_reserved_blocks);
+ } else {
+ sbi->root_reserved_blocks = arg;
+ set_opt(sbi, RESERVE_ROOT);
+ }
+ break;
+ case Opt_resuid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ uid = make_kuid(current_user_ns(), arg);
+ if (!uid_valid(uid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid uid value %d", arg);
+ return -EINVAL;
+ }
+ sbi->s_resuid = uid;
+ break;
+ case Opt_resgid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ gid = make_kgid(current_user_ns(), arg);
+ if (!gid_valid(gid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid gid value %d", arg);
+ return -EINVAL;
+ }
+ sbi->s_resgid = gid;
+ break;
case Opt_mode:
name = match_strdup(&args[0]);
@@ -993,22 +1058,25 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- block_t total_count, user_block_count, start_count, ovp_count;
+ block_t total_count, user_block_count, start_count;
u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
- ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
- buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
+ if (buf->f_bfree > sbi->root_reserved_blocks)
+ buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks;
+ else
+ buf->f_bavail = 0;
- avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+ avail_node_count = sbi->total_node_count - sbi->nquota_files -
+ F2FS_RESERVED_NODE_NUM;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
@@ -1134,6 +1202,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+ if (test_opt(sbi, RESERVE_ROOT))
+ seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
+ sbi->root_reserved_blocks,
+ from_kuid_munged(&init_user_ns, sbi->s_resuid),
+ from_kgid_munged(&init_user_ns, sbi->s_resgid));
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -1263,7 +1336,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else {
+ } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -1332,6 +1405,7 @@ skip:
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
+ limit_reserve_root(sbi);
return 0;
restore_gc:
if (need_restart_gc) {
@@ -1656,7 +1730,7 @@ void f2fs_quota_off_umount(struct super_block *sb)
f2fs_quota_off(sb, type);
}
-int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
{
*projid = F2FS_I(inode)->i_projid;
return 0;
@@ -2148,14 +2222,15 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
- FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
+ GFP_KERNEL);
if (!FDEV(devi).blkz_type)
return -ENOMEM;
#define F2FS_REPORT_NR_ZONES 4096
- zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
- GFP_KERNEL);
+ zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
+ F2FS_REPORT_NR_ZONES, GFP_KERNEL);
if (!zones)
return -ENOMEM;
@@ -2295,8 +2370,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
* Initialize multiple devices information, or single
* zoned block device information.
*/
- sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
- GFP_KERNEL);
+ sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
+ max_devices, GFP_KERNEL);
if (!sbi->devs)
return -ENOMEM;
@@ -2419,6 +2494,9 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2462,6 +2540,13 @@ try_onemore:
else
sb->s_qcop = &f2fs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+
+ if (f2fs_sb_has_quota_ino(sbi->sb)) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (f2fs_qf_ino(sbi->sb, i))
+ sbi->nquota_files++;
+ }
+ }
#endif
sb->s_op = &f2fs_sops;
@@ -2475,6 +2560,7 @@ try_onemore:
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+ sb->s_iflags |= SB_I_CGROUPWB;
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
@@ -2495,8 +2581,9 @@ try_onemore:
int n = (i == META) ? 1: NR_TEMP_TYPE;
int j;
- sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
- GFP_KERNEL);
+ sbi->write_io[i] = f2fs_kmalloc(sbi,
+ n * sizeof(struct f2fs_bio_info),
+ GFP_KERNEL);
if (!sbi->write_io[i]) {
err = -ENOMEM;
goto free_options;
@@ -2517,14 +2604,14 @@ try_onemore:
err = init_percpu_info(sbi);
if (err)
- goto free_options;
+ goto free_bio_info;
if (F2FS_IO_SIZE(sbi) > 1) {
sbi->write_io_dummy =
mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
if (!sbi->write_io_dummy) {
err = -ENOMEM;
- goto free_options;
+ goto free_percpu;
}
}
@@ -2559,6 +2646,7 @@ try_onemore:
sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->reserved_blocks = 0;
sbi->current_reserved_blocks = 0;
+ limit_reserve_root(sbi);
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -2604,18 +2692,16 @@ try_onemore:
goto free_nm;
}
- f2fs_join_shrinker(sbi);
-
err = f2fs_build_stats(sbi);
if (err)
- goto free_nm;
+ goto free_node_inode;
/* read root inode and dentry */
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
if (IS_ERR(root)) {
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
err = PTR_ERR(root);
- goto free_node_inode;
+ goto free_stats;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
@@ -2711,6 +2797,8 @@ skip_recovery:
sbi->valid_super_block ? 1 : 2, err);
}
+ f2fs_join_shrinker(sbi);
+
f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
@@ -2737,14 +2825,12 @@ free_sysfs:
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
+free_stats:
+ f2fs_destroy_stats(sbi);
free_node_inode:
- truncate_inode_pages_final(NODE_MAPPING(sbi));
- mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
- f2fs_leave_shrinker(sbi);
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
iput(sbi->node_inode);
- mutex_unlock(&sbi->umount_mutex);
- f2fs_destroy_stats(sbi);
free_nm:
destroy_node_manager(sbi);
free_sm:
@@ -2757,10 +2843,12 @@ free_meta_inode:
iput(sbi->meta_inode);
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
-free_options:
+free_percpu:
+ destroy_percpu_info(sbi);
+free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
kfree(sbi->write_io[i]);
- destroy_percpu_info(sbi);
+free_options:
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 9835348b6e5d..d978c7b6ea04 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -113,6 +113,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_quota_ino(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "quota_ino");
+ if (f2fs_sb_has_inode_crtime(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "inode_crtime");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -162,7 +165,8 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
#endif
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
- if (t > (unsigned long)sbi->user_block_count) {
+ if (t > (unsigned long)(sbi->user_block_count -
+ sbi->root_reserved_blocks)) {
spin_unlock(&sbi->stat_lock);
return -EINVAL;
}
@@ -231,6 +235,7 @@ enum feat_id {
FEAT_INODE_CHECKSUM,
FEAT_FLEXIBLE_INLINE_XATTR,
FEAT_QUOTA_INO,
+ FEAT_INODE_CRTIME,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -245,6 +250,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_INODE_CHECKSUM:
case FEAT_FLEXIBLE_INLINE_XATTR:
case FEAT_QUOTA_INO:
+ case FEAT_INODE_CRTIME:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -299,6 +305,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
#ifdef CONFIG_F2FS_FAULT_INJECTION
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
@@ -320,6 +328,7 @@ F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
+F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -346,6 +355,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
ATTR_LIST(iostat_enable),
+ ATTR_LIST(readdir_ra),
+ ATTR_LIST(gc_pin_file_thresh),
#ifdef CONFIG_F2FS_FAULT_INJECTION
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
@@ -371,6 +382,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(inode_checksum),
ATTR_LIST(flexible_inline_xattr),
ATTR_LIST(quota_ino),
+ ATTR_LIST(inode_crtime),
NULL,
};
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index bccbbf2616d2..a1fcd00bbb2b 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -17,7 +17,7 @@
#include "trace.h"
static RADIX_TREE(pids, GFP_ATOMIC);
-static spinlock_t pids_lock;
+static struct mutex pids_lock;
static struct last_io_info last_io;
static inline void __print_last_io(void)
@@ -64,7 +64,7 @@ void f2fs_trace_pid(struct page *page)
if (radix_tree_preload(GFP_NOFS))
return;
- spin_lock(&pids_lock);
+ mutex_lock(&pids_lock);
p = radix_tree_lookup(&pids, pid);
if (p == current)
goto out;
@@ -77,7 +77,7 @@ void f2fs_trace_pid(struct page *page)
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
pid, current->comm);
out:
- spin_unlock(&pids_lock);
+ mutex_unlock(&pids_lock);
radix_tree_preload_end();
}
@@ -122,7 +122,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
void f2fs_build_trace_ios(void)
{
- spin_lock_init(&pids_lock);
+ mutex_init(&pids_lock);
}
#define PIDVEC_SIZE 128
@@ -150,7 +150,7 @@ void f2fs_destroy_trace_ios(void)
pid_t next_pid = 0;
unsigned int found;
- spin_lock(&pids_lock);
+ mutex_lock(&pids_lock);
while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
unsigned idx;
@@ -158,5 +158,5 @@ void f2fs_destroy_trace_ios(void)
for (idx = 0; idx < found; idx++)
radix_tree_delete(&pids, pid[idx]);
}
- spin_unlock(&pids_lock);
+ mutex_unlock(&pids_lock);
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index ec8961ef8cac..ae2dfa709f5d 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -298,8 +298,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!size && !inline_size)
return -ENODATA;
- txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
- GFP_F2FS_ZERO);
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
+ inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -351,8 +351,8 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
void *txattr_addr;
int err;
- txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
- GFP_F2FS_ZERO);
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
+ inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -433,6 +433,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (F2FS_I(inode)->i_xattr_nid) {
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
@@ -443,6 +444,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
@@ -598,7 +600,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
goto exit;
}
- if (f2fs_xattr_value_same(here, value, size))
+ if (value && f2fs_xattr_value_same(here, value, size))
goto exit;
} else if ((flags & XATTR_REPLACE)) {
error = -ENODATA;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index b833ffeee1e1..8e100c3bf72c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "fat.h"
/*
@@ -1055,7 +1056,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
brelse(bh);
if (err)
return err;
- dir->i_version++;
+ inode_inc_iversion(dir);
if (nr_slots) {
/*
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 20a0a89eaca5..ffbbf0520d9e 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <asm/unaligned.h>
+#include <linux/iversion.h>
#include "fat.h"
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
@@ -507,7 +508,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
MSDOS_I(inode)->i_pos = 0;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_generation = get_seconds();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
@@ -590,7 +591,7 @@ struct inode *fat_build_inode(struct super_block *sb,
goto out;
}
inode->i_ino = iunique(sb, MSDOS_ROOT_INO);
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
err = fat_fill_inode(inode, de);
if (err) {
iput(inode);
@@ -1377,7 +1378,7 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->i_pos = MSDOS_ROOT_INO;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_generation = 0;
inode->i_mode = fat_make_mode(sbi, ATTR_DIR, S_IRWXUGO);
inode->i_op = sbi->dir_ops;
@@ -1828,7 +1829,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
if (!root_inode)
goto out_fail;
root_inode->i_ino = MSDOS_ROOT_INO;
- root_inode->i_version = 1;
+ inode_set_iversion(root_inode, 1);
error = fat_read_root(root_inode);
if (error < 0) {
iput(root_inode);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index d24d2758a363..582ca731a6c9 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/iversion.h>
#include "fat.h"
/* Characters that are undesirable in an MS-DOS file name */
@@ -480,7 +481,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
} else
mark_inode_dirty(old_inode);
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
@@ -508,7 +509,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
goto out;
new_i_pos = sinfo.i_pos;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
@@ -540,7 +541,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 02c066663a3a..cefea792cde8 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/kernel.h>
-
+#include <linux/iversion.h>
#include "fat.h"
static inline unsigned long vfat_d_version(struct dentry *dentry)
@@ -46,7 +46,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
{
int ret = 1;
spin_lock(&dentry->d_lock);
- if (vfat_d_version(dentry) != d_inode(dentry->d_parent)->i_version)
+ if (inode_cmp_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
ret = 0;
spin_unlock(&dentry->d_lock);
return ret;
@@ -759,7 +759,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!inode)
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
return d_splice_alias(inode, dentry);
error:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -781,7 +781,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
if (err)
goto out;
- dir->i_version++;
+ inode_inc_iversion(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
@@ -789,7 +789,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
goto out;
}
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -823,7 +823,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = current_time(inode);
fat_detach(inode);
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -849,7 +849,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = current_time(inode);
fat_detach(inode);
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -875,7 +875,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
- dir->i_version++;
+ inode_inc_iversion(dir);
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
@@ -885,7 +885,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
/* the directory was completed, just return a error */
goto out;
}
- inode->i_version++;
+ inode_inc_iversion(inode);
set_nlink(inode, 2);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -951,7 +951,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
new_i_pos = sinfo.i_pos;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
@@ -979,7 +979,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 0522e283a4f4..e95fa0a352ea 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -418,7 +418,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
break;
case F_ADD_SEALS:
case F_GET_SEALS:
- err = shmem_fcntl(filp, cmd, arg);
+ err = memfd_fcntl(filp, cmd, arg);
break;
case F_GET_RW_HINT:
case F_SET_RW_HINT:
@@ -690,7 +690,7 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
/* Table to convert sigio signal codes into poll band bitmaps */
-static const long band_table[NSIGPOLL] = {
+static const __poll_t band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -737,6 +737,7 @@ static void send_sigio_to_task(struct task_struct *p,
delivered even if we can't queue. Failure to
queue in this case _should_ be reported; we fall
back to SIGIO in that case. --sct */
+ clear_siginfo(&si);
si.si_signo = signum;
si.si_errno = 0;
si.si_code = reason;
@@ -758,7 +759,7 @@ static void send_sigio_to_task(struct task_struct *p,
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
- si.si_band = band_table[reason - POLL_IN];
+ si.si_band = mangle_poll(band_table[reason - POLL_IN]);
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, group))
break;
diff --git a/fs/file.c b/fs/file.c
index 3b080834b870..42f0db4bd0fb 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -11,18 +11,13 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/time.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/bitops.h>
-#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
-#include <linux/workqueue.h>
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
@@ -391,7 +386,7 @@ static struct fdtable *close_files(struct files_struct * files)
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
- cond_resched_rcu_qs();
+ cond_resched();
}
}
i++;
diff --git a/fs/file_table.c b/fs/file_table.c
index 2dc9f38bd195..7ec0b3e5f05d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -23,7 +23,6 @@
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
-#include <linux/hardirq.h>
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index cea4836385b7..d4d04fee568a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -126,7 +126,7 @@ static void wb_io_lists_depopulated(struct bdi_writeback *wb)
* inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
* @inode: inode to be moved
* @wb: target bdi_writeback
- * @head: one of @wb->b_{dirty|io|more_io}
+ * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
*
* Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
* Returns %true if @inode is the first occupant of the !dirty_time IO
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 17f0d05bfd4c..aa089a6925d0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2004,9 +2004,9 @@ out:
return ret;
}
-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
+static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
{
- unsigned mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cb7dff5c45d7..e85e974dd211 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2751,7 +2751,7 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
spin_unlock(&fc->lock);
}
-unsigned fuse_file_poll(struct file *file, poll_table *wait)
+__poll_t fuse_file_poll(struct file *file, poll_table *wait)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
@@ -2764,7 +2764,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
return DEFAULT_POLLMASK;
poll_wait(file, &ff->poll_wait, wait);
- inarg.events = (__u32)poll_requested_events(wait);
+ inarg.events = mangle_poll(poll_requested_events(wait));
/*
* Ask for notification iff there's someone waiting for it.
@@ -2786,7 +2786,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
err = fuse_simple_request(fc, &args);
if (!err)
- return outarg.revents;
+ return demangle_poll(outarg.revents);
if (err == -ENOSYS) {
fc->no_poll = 1;
return DEFAULT_POLLMASK;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d5773ca67ad2..c4c093bbf456 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -951,7 +951,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags);
long fuse_ioctl_common(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags);
-unsigned fuse_file_poll(struct file *file, poll_table *wait);
+__poll_t fuse_file_poll(struct file *file, poll_table *wait);
int fuse_dev_release(struct inode *inode, struct file *file);
bool fuse_write_update_size(struct inode *inode, loff_t pos);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 43c827a7cce5..c0225d4b5435 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -3,6 +3,8 @@ config GFS2_FS
depends on (64BIT || LBDAF)
select FS_POSIX_ACL
select CRC32
+ select CRYPTO
+ select CRYPTO_CRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 1daf15a1f00c..2f725b4a386b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -39,18 +39,21 @@
static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int to)
+ unsigned int from, unsigned int len)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
struct buffer_head *bh;
+ unsigned int to = from + len;
unsigned int start, end;
for (bh = head, start = 0; bh != head || !start;
bh = bh->b_this_page, start = end) {
end = start + bsize;
- if (end <= from || start >= to)
+ if (end <= from)
continue;
+ if (start >= to)
+ break;
if (gfs2_is_jdata(ip))
set_buffer_uptodate(bh);
gfs2_trans_add_data(ip->i_gl, bh);
@@ -189,7 +192,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
create_empty_buffers(page, inode->i_sb->s_blocksize,
BIT(BH_Dirty)|BIT(BH_Uptodate));
}
- gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
+ gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
}
return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
}
@@ -255,7 +258,6 @@ static int gfs2_writepages(struct address_space *mapping,
* @wbc: The writeback control
* @pvec: The vector of pages
* @nr_pages: The number of pages to write
- * @end: End position
* @done_index: Page index
*
* Returns: non-zero if loop should terminate, zero otherwise
@@ -264,7 +266,7 @@ static int gfs2_writepages(struct address_space *mapping,
static int gfs2_write_jdata_pagevec(struct address_space *mapping,
struct writeback_control *wbc,
struct pagevec *pvec,
- int nr_pages, pgoff_t end,
+ int nr_pages,
pgoff_t *done_index)
{
struct inode *inode = mapping->host;
@@ -402,7 +404,7 @@ retry:
if (nr_pages == 0)
break;
- ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index);
+ ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
if (ret)
done = 1;
if (ret > 0)
@@ -446,7 +448,8 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
ret = gfs2_write_cache_jdata(mapping, wbc);
if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_JDATA_WPAGES);
ret = gfs2_write_cache_jdata(mapping, wbc);
}
return ret;
@@ -483,8 +486,8 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return error;
kaddr = kmap_atomic(page);
- if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
- dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
+ if (dsize > gfs2_max_stuffed_size(ip))
+ dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap_atomic(kaddr);
@@ -501,10 +504,9 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
* @file: The file to read a page for
* @page: The page to read
*
- * This is the core of gfs2's readpage. Its used by the internal file
- * reading code as in that case we already hold the glock. Also its
+ * This is the core of gfs2's readpage. It's used by the internal file
+ * reading code as in that case we already hold the glock. Also it's
* called by gfs2_readpage() once the required lock has been granted.
- *
*/
static int __gfs2_readpage(void *file, struct page *page)
@@ -725,7 +727,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (gfs2_is_stuffed(ip)) {
error = 0;
- if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
+ if (pos + len > gfs2_max_stuffed_size(ip)) {
error = gfs2_unstuff_dinode(ip, page);
if (error == 0)
goto prepare_write;
@@ -832,7 +834,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
void *kaddr;
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
- BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
+ BUG_ON(pos + len > gfs2_max_stuffed_size(ip));
+
kaddr = kmap_atomic(page);
memcpy(buf + pos, kaddr + pos, copied);
flush_dcache_page(page);
@@ -890,8 +893,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
- unsigned int from = pos & (PAGE_SIZE - 1);
- unsigned int to = from + len;
int ret;
struct gfs2_trans *tr = current->journal_info;
BUG_ON(!tr);
@@ -909,7 +910,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
+ gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len);
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (tr->tr_num_buf_new)
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index d5f0d96169c5..86863792f36a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -69,8 +69,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
void *kaddr = kmap(page);
u64 dsize = i_size_read(inode);
- if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
- dsize = dibh->b_size - sizeof(struct gfs2_dinode);
+ if (dsize > gfs2_max_stuffed_size(ip))
+ dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
@@ -279,14 +279,13 @@ static inline __be64 *metapointer(unsigned int height, const struct metapath *mp
return p + mp->mp_list[height];
}
-static void gfs2_metapath_ra(struct gfs2_glock *gl,
- const struct buffer_head *bh, const __be64 *pos)
+static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
{
- struct buffer_head *rabh;
- const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
const __be64 *t;
- for (t = pos; t < endp; t++) {
+ for (t = start; t < end; t++) {
+ struct buffer_head *rabh;
+
if (!*t)
continue;
@@ -305,21 +304,22 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
}
}
-/**
- * lookup_mp_height - helper function for lookup_metapath
- * @ip: the inode
- * @mp: the metapath
- * @h: the height which needs looking up
- */
-static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
+static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
+ unsigned int x, unsigned int h)
{
- __be64 *ptr = metapointer(h, mp);
- u64 dblock = be64_to_cpu(*ptr);
-
- if (!dblock)
- return h + 1;
+ for (; x < h; x++) {
+ __be64 *ptr = metapointer(x, mp);
+ u64 dblock = be64_to_cpu(*ptr);
+ int ret;
- return gfs2_meta_indirect_buffer(ip, h + 1, dblock, &mp->mp_bh[h + 1]);
+ if (!dblock)
+ break;
+ ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
+ if (ret)
+ return ret;
+ }
+ mp->mp_aheight = x + 1;
+ return 0;
}
/**
@@ -336,25 +336,12 @@ static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
* at which it found the unallocated block. Blocks which are found are
* added to the mp->mp_bh[] list.
*
- * Returns: error or height of metadata tree
+ * Returns: error
*/
static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
{
- unsigned int end_of_metadata = ip->i_height - 1;
- unsigned int x;
- int ret;
-
- for (x = 0; x < end_of_metadata; x++) {
- ret = lookup_mp_height(ip, mp, x);
- if (ret)
- goto out;
- }
-
- ret = ip->i_height;
-out:
- mp->mp_aheight = ret;
- return ret;
+ return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
}
/**
@@ -365,25 +352,25 @@ out:
*
* Similar to lookup_metapath, but does lookups for a range of heights
*
- * Returns: error or height of metadata tree
+ * Returns: error or the number of buffers filled
*/
static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
{
- unsigned int start_h = h - 1;
+ unsigned int x = 0;
int ret;
if (h) {
/* find the first buffer we need to look up. */
- while (start_h > 0 && mp->mp_bh[start_h] == NULL)
- start_h--;
- for (; start_h < h; start_h++) {
- ret = lookup_mp_height(ip, mp, start_h);
- if (ret)
- return ret;
+ for (x = h - 1; x > 0; x--) {
+ if (mp->mp_bh[x])
+ break;
}
}
- return ip->i_height;
+ ret = __fillup_metapath(ip, mp, x, h);
+ if (ret)
+ return ret;
+ return mp->mp_aheight - x - 1;
}
static inline void release_metapath(struct metapath *mp)
@@ -474,13 +461,6 @@ enum alloc_state {
/* ALLOC_UNSTUFF = 3, TBD and rather complicated */
};
-static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
-{
- if (hgt)
- return sdp->sd_inptrs;
- return sdp->sd_diptrs;
-}
-
/**
* gfs2_bmap_alloc - Build a metadata tree of the requested height
* @inode: The GFS2 inode
@@ -788,7 +768,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
goto do_alloc;
ret = lookup_metapath(ip, &mp);
- if (ret < 0)
+ if (ret)
goto out_release;
if (mp.mp_aheight != ip->i_height)
@@ -913,17 +893,18 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
}
/**
- * gfs2_block_truncate_page - Deal with zeroing out data for truncate
+ * gfs2_block_zero_range - Deal with zeroing out data
*
* This is partly borrowed from ext3.
*/
-static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
+static int gfs2_block_zero_range(struct inode *inode, loff_t from,
+ unsigned int length)
{
- struct inode *inode = mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned long index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize, iblock, length, pos;
+ unsigned blocksize, iblock, pos;
struct buffer_head *bh;
struct page *page;
int err;
@@ -933,7 +914,6 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
return 0;
blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
@@ -1003,11 +983,24 @@ static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize
int error;
while (oldsize != newsize) {
+ struct gfs2_trans *tr;
+ unsigned int offs;
+
chunk = oldsize - newsize;
if (chunk > max_chunk)
chunk = max_chunk;
+
+ offs = oldsize & ~PAGE_MASK;
+ if (offs && chunk > PAGE_SIZE)
+ chunk = offs + ((chunk - offs) & PAGE_MASK);
+
truncate_pagecache(inode, oldsize - chunk);
oldsize -= chunk;
+
+ tr = current->journal_info;
+ if (!test_bit(TR_TOUCHED, &tr->tr_flags))
+ continue;
+
gfs2_trans_end(sdp);
error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
if (error)
@@ -1017,13 +1010,13 @@ static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize
return 0;
}
-static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
+static int trunc_start(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct address_space *mapping = inode->i_mapping;
- struct buffer_head *dibh;
+ struct buffer_head *dibh = NULL;
int journaled = gfs2_is_jdata(ip);
+ u64 oldsize = inode->i_size;
int error;
if (journaled)
@@ -1042,10 +1035,13 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
if (gfs2_is_stuffed(ip)) {
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
} else {
- if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
- error = gfs2_block_truncate_page(mapping, newsize);
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int offs = newsize & (blocksize - 1);
+ if (offs) {
+ error = gfs2_block_zero_range(inode, newsize,
+ blocksize - offs);
if (error)
- goto out_brelse;
+ goto out;
}
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
}
@@ -1059,15 +1055,10 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
else
truncate_pagecache(inode, newsize);
- if (error) {
- brelse(dibh);
- return error;
- }
-
-out_brelse:
- brelse(dibh);
out:
- gfs2_trans_end(sdp);
+ brelse(dibh);
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
return error;
}
@@ -1075,10 +1066,11 @@ out:
* sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
* @ip: inode
* @rg_gh: holder of resource group glock
- * @mp: current metapath fully populated with buffers
+ * @bh: buffer head to sweep
+ * @start: starting point in bh
+ * @end: end point in bh
+ * @meta: true if bh points to metadata (rather than data)
* @btotal: place to keep count of total blocks freed
- * @hgt: height we're processing
- * @first: true if this is the first call to this function for this height
*
* We sweep a metadata buffer (provided by the metapath) for blocks we need to
* free, and free them all. However, we do it one rgrp at a time. If this
@@ -1093,47 +1085,46 @@ out:
* *btotal has the total number of blocks freed
*/
static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
- const struct metapath *mp, u32 *btotal, int hgt,
- bool preserve1)
+ struct buffer_head *bh, __be64 *start, __be64 *end,
+ bool meta, u32 *btotal)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
struct gfs2_trans *tr;
- struct buffer_head *bh = mp->mp_bh[hgt];
- __be64 *top, *bottom, *p;
+ __be64 *p;
int blks_outside_rgrp;
u64 bn, bstart, isize_blks;
s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
- int meta = ((hgt != ip->i_height - 1) ? 1 : 0);
int ret = 0;
bool buf_in_tr = false; /* buffer was added to transaction */
- if (gfs2_metatype_check(sdp, bh,
- (hgt ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)))
- return -EIO;
-
more_rgrps:
+ rgd = NULL;
+ if (gfs2_holder_initialized(rd_gh)) {
+ rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
+ gfs2_assert_withdraw(sdp,
+ gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
+ }
blks_outside_rgrp = 0;
bstart = 0;
blen = 0;
- top = metapointer(hgt, mp); /* first ptr from metapath */
- /* If we're keeping some data at the truncation point, we've got to
- preserve the metadata tree by adding 1 to the starting metapath. */
- if (preserve1)
- top++;
- bottom = (__be64 *)(bh->b_data + bh->b_size);
-
- for (p = top; p < bottom; p++) {
+ for (p = start; p < end; p++) {
if (!*p)
continue;
bn = be64_to_cpu(*p);
- if (gfs2_holder_initialized(rd_gh)) {
- rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
- gfs2_assert_withdraw(sdp,
- gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
+
+ if (rgd) {
+ if (!rgrp_contains_block(rgd, bn)) {
+ blks_outside_rgrp++;
+ continue;
+ }
} else {
- rgd = gfs2_blk2rgrpd(sdp, bn, false);
+ rgd = gfs2_blk2rgrpd(sdp, bn, true);
+ if (unlikely(!rgd)) {
+ ret = -EIO;
+ goto out;
+ }
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
0, rd_gh);
if (ret)
@@ -1145,11 +1136,6 @@ more_rgrps:
gfs2_rs_deltree(&ip->i_res);
}
- if (!rgrp_contains_block(rgd, bn)) {
- blks_outside_rgrp++;
- continue;
- }
-
/* The size of our transactions will be unknown until we
actually process all the metadata blocks that relate to
the rgrp. So we estimate. We know it can't be more than
@@ -1168,7 +1154,7 @@ more_rgrps:
jblocks_rqsted += isize_blks;
revokes = jblocks_rqsted;
if (meta)
- revokes += hptrs(sdp, hgt);
+ revokes += end - start;
else if (ip->i_depth)
revokes += sdp->sd_inptrs;
ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
@@ -1226,7 +1212,11 @@ out_unlock:
outside the rgrp we just processed,
do it all over again. */
if (current->journal_info) {
- struct buffer_head *dibh = mp->mp_bh[0];
+ struct buffer_head *dibh;
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ goto out;
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
@@ -1234,6 +1224,7 @@ out_unlock:
current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
}
@@ -1245,38 +1236,48 @@ out:
return ret;
}
+static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
+{
+ if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
+ return false;
+ return true;
+}
+
/**
* find_nonnull_ptr - find a non-null pointer given a metapath and height
- * assumes the metapath is valid (with buffers) out to height h
* @mp: starting metapath
* @h: desired height to search
*
+ * Assumes the metapath is valid (with buffers) out to height h.
* Returns: true if a non-null pointer was found in the metapath buffer
* false if all remaining pointers are NULL in the buffer
*/
static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
- unsigned int h)
+ unsigned int h,
+ __u16 *end_list, unsigned int end_aligned)
{
- __be64 *ptr;
- unsigned int ptrs = hptrs(sdp, h) - 1;
+ struct buffer_head *bh = mp->mp_bh[h];
+ __be64 *first, *ptr, *end;
+
+ first = metaptr1(h, mp);
+ ptr = first + mp->mp_list[h];
+ end = (__be64 *)(bh->b_data + bh->b_size);
+ if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
+ bool keep_end = h < end_aligned;
+ end = first + end_list[h] + keep_end;
+ }
- while (true) {
- ptr = metapointer(h, mp);
+ while (ptr < end) {
if (*ptr) { /* if we have a non-null pointer */
- /* Now zero the metapath after the current height. */
+ mp->mp_list[h] = ptr - first;
h++;
if (h < GFS2_MAX_META_HEIGHT)
- memset(&mp->mp_list[h], 0,
- (GFS2_MAX_META_HEIGHT - h) *
- sizeof(mp->mp_list[0]));
+ mp->mp_list[h] = 0;
return true;
}
-
- if (mp->mp_list[h] < ptrs)
- mp->mp_list[h]++;
- else
- return false; /* no more pointers in this buffer */
+ ptr++;
}
+ return false;
}
enum dealloc_states {
@@ -1286,49 +1287,126 @@ enum dealloc_states {
DEALLOC_DONE = 3, /* process complete */
};
-static bool mp_eq_to_hgt(struct metapath *mp, __u16 *nbof, unsigned int h)
+static inline void
+metapointer_range(struct metapath *mp, int height,
+ __u16 *start_list, unsigned int start_aligned,
+ __u16 *end_list, unsigned int end_aligned,
+ __be64 **start, __be64 **end)
{
- if (memcmp(mp->mp_list, nbof, h * sizeof(mp->mp_list[0])))
- return false;
- return true;
+ struct buffer_head *bh = mp->mp_bh[height];
+ __be64 *first;
+
+ first = metaptr1(height, mp);
+ *start = first;
+ if (mp_eq_to_hgt(mp, start_list, height)) {
+ bool keep_start = height < start_aligned;
+ *start = first + start_list[height] + keep_start;
+ }
+ *end = (__be64 *)(bh->b_data + bh->b_size);
+ if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
+ bool keep_end = height < end_aligned;
+ *end = first + end_list[height] + keep_end;
+ }
+}
+
+static inline bool walk_done(struct gfs2_sbd *sdp,
+ struct metapath *mp, int height,
+ __u16 *end_list, unsigned int end_aligned)
+{
+ __u16 end;
+
+ if (end_list) {
+ bool keep_end = height < end_aligned;
+ if (!mp_eq_to_hgt(mp, end_list, height))
+ return false;
+ end = end_list[height] + keep_end;
+ } else
+ end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
+ return mp->mp_list[height] >= end;
}
/**
- * trunc_dealloc - truncate a file down to a desired size
+ * punch_hole - deallocate blocks in a file
* @ip: inode to truncate
- * @newsize: The desired size of the file
+ * @offset: the start of the hole
+ * @length: the size of the hole (or 0 for truncate)
+ *
+ * Punch a hole into a file or truncate a file at a given position. This
+ * function operates in whole blocks (@offset and @length are rounded
+ * accordingly); partially filled blocks must be cleared otherwise.
*
- * This function truncates a file to newsize. It works from the
- * bottom up, and from the right to the left. In other words, it strips off
- * the highest layer (data) before stripping any of the metadata. Doing it
- * this way is best in case the operation is interrupted by power failure, etc.
- * The dinode is rewritten in every transaction to guarantee integrity.
+ * This function works from the bottom up, and from the right to the left. In
+ * other words, it strips off the highest layer (data) before stripping any of
+ * the metadata. Doing it this way is best in case the operation is interrupted
+ * by power failure, etc. The dinode is rewritten in every transaction to
+ * guarantee integrity.
*/
-static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
+static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct metapath mp;
+ struct metapath mp = {};
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
- u64 lblock;
- __u16 nbof[GFS2_MAX_META_HEIGHT]; /* new beginning of truncation */
+ unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+ u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ __u16 start_list[GFS2_MAX_META_HEIGHT];
+ __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
+ unsigned int start_aligned, uninitialized_var(end_aligned);
unsigned int strip_h = ip->i_height - 1;
u32 btotal = 0;
int ret, state;
int mp_h; /* metapath buffers are read in to this height */
- sector_t last_ra = 0;
u64 prev_bnr = 0;
- bool preserve1; /* need to preserve the first meta pointer? */
+ __be64 *start, *end;
- if (!newsize)
- lblock = 0;
- else
- lblock = (newsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+ /*
+ * The start position of the hole is defined by lblock, start_list, and
+ * start_aligned. The end position of the hole is defined by lend,
+ * end_list, and end_aligned.
+ *
+ * start_aligned and end_aligned define down to which height the start
+ * and end positions are aligned to the metadata tree (i.e., the
+ * position is a multiple of the metadata granularity at the height
+ * above). This determines at which heights additional meta pointers
+ * needs to be preserved for the remaining data.
+ */
+
+ if (length) {
+ u64 maxsize = sdp->sd_heightsize[ip->i_height];
+ u64 end_offset = offset + length;
+ u64 lend;
+
+ /*
+ * Clip the end at the maximum file size for the given height:
+ * that's how far the metadata goes; files bigger than that
+ * will have additional layers of indirection.
+ */
+ if (end_offset > maxsize)
+ end_offset = maxsize;
+ lend = end_offset >> bsize_shift;
+
+ if (lblock >= lend)
+ return 0;
+
+ find_metapath(sdp, lend, &mp, ip->i_height);
+ end_list = __end_list;
+ memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
+
+ for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
+ if (end_list[mp_h])
+ break;
+ }
+ end_aligned = mp_h;
+ }
- memset(&mp, 0, sizeof(mp));
find_metapath(sdp, lblock, &mp, ip->i_height);
+ memcpy(start_list, mp.mp_list, sizeof(start_list));
- memcpy(&nbof, &mp.mp_list, sizeof(nbof));
+ for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
+ if (start_list[mp_h])
+ break;
+ }
+ start_aligned = mp_h;
ret = gfs2_meta_inode_buffer(ip, &dibh);
if (ret)
@@ -1336,7 +1414,17 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
mp.mp_bh[0] = dibh;
ret = lookup_metapath(ip, &mp);
- if (ret == ip->i_height)
+ if (ret)
+ goto out_metapath;
+
+ /* issue read-ahead on metadata */
+ for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
+ metapointer_range(&mp, mp_h, start_list, start_aligned,
+ end_list, end_aligned, &start, &end);
+ gfs2_metapath_ra(ip->i_gl, start, end);
+ }
+
+ if (mp.mp_aheight == ip->i_height)
state = DEALLOC_MP_FULL; /* We have a complete metapath */
else
state = DEALLOC_FILL_MP; /* deal with partial metapath */
@@ -1357,20 +1445,6 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
/* Truncate a full metapath at the given strip height.
* Note that strip_h == mp_h in order to be in this state. */
case DEALLOC_MP_FULL:
- if (mp_h > 0) { /* issue read-ahead on metadata */
- __be64 *top;
-
- bh = mp.mp_bh[mp_h - 1];
- if (bh->b_blocknr != last_ra) {
- last_ra = bh->b_blocknr;
- top = metaptr1(mp_h - 1, &mp);
- gfs2_metapath_ra(ip->i_gl, bh, top);
- }
- }
- /* If we're truncating to a non-zero size and the mp is
- at the beginning of file for the strip height, we
- need to preserve the first metadata pointer. */
- preserve1 = (newsize && mp_eq_to_hgt(&mp, nbof, mp_h));
bh = mp.mp_bh[mp_h];
gfs2_assert_withdraw(sdp, bh);
if (gfs2_assert_withdraw(sdp,
@@ -1382,8 +1456,28 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
prev_bnr, ip->i_height, strip_h, mp_h);
}
prev_bnr = bh->b_blocknr;
- ret = sweep_bh_for_rgrps(ip, &rd_gh, &mp, &btotal,
- mp_h, preserve1);
+
+ if (gfs2_metatype_check(sdp, bh,
+ (mp_h ? GFS2_METATYPE_IN :
+ GFS2_METATYPE_DI))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Below, passing end_aligned as 0 gives us the
+ * metapointer range excluding the end point: the end
+ * point is the first metapath we must not deallocate!
+ */
+
+ metapointer_range(&mp, mp_h, start_list, start_aligned,
+ end_list, 0 /* end_aligned */,
+ &start, &end);
+ ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
+ start, end,
+ mp_h != ip->i_height - 1,
+ &btotal);
+
/* If we hit an error or just swept dinode buffer,
just exit. */
if (ret || !mp_h) {
@@ -1407,20 +1501,20 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
stripping the previous level of metadata. */
if (mp_h == 0) {
strip_h--;
- memcpy(&mp.mp_list, &nbof, sizeof(nbof));
+ memcpy(mp.mp_list, start_list, sizeof(start_list));
mp_h = strip_h;
state = DEALLOC_FILL_MP;
break;
}
mp.mp_list[mp_h] = 0;
mp_h--; /* search one metadata height down */
- if (mp.mp_list[mp_h] >= hptrs(sdp, mp_h) - 1)
- break; /* loop around in the same state */
mp.mp_list[mp_h]++;
+ if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
+ break;
/* Here we've found a part of the metapath that is not
* allocated. We need to search at that height for the
* next non-null pointer. */
- if (find_nonnull_ptr(sdp, &mp, mp_h)) {
+ if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
state = DEALLOC_FILL_MP;
mp_h++;
}
@@ -1435,18 +1529,29 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
if (ret < 0)
goto out;
+ /* issue read-ahead on metadata */
+ if (mp.mp_aheight > 1) {
+ for (; ret > 1; ret--) {
+ metapointer_range(&mp, mp.mp_aheight - ret,
+ start_list, start_aligned,
+ end_list, end_aligned,
+ &start, &end);
+ gfs2_metapath_ra(ip->i_gl, start, end);
+ }
+ }
+
/* If buffers found for the entire strip height */
- if ((ret == ip->i_height) && (mp_h == strip_h)) {
+ if (mp.mp_aheight - 1 == strip_h) {
state = DEALLOC_MP_FULL;
break;
}
- if (ret < ip->i_height) /* We have a partial height */
- mp_h = ret - 1;
+ if (mp.mp_aheight < ip->i_height) /* We have a partial height */
+ mp_h = mp.mp_aheight - 1;
/* If we find a non-null block pointer, crawl a bit
higher up in the metapath and try again, otherwise
we need to look lower for a new starting point. */
- if (find_nonnull_ptr(sdp, &mp, mp_h))
+ if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
mp_h++;
else
state = DEALLOC_MP_LOWER;
@@ -1524,7 +1629,6 @@ out:
/**
* do_shrink - make a file smaller
* @inode: the inode
- * @oldsize: the current inode size
* @newsize: the size to make the file
*
* Called with an exclusive lock on @inode. The @size must
@@ -1533,18 +1637,18 @@ out:
* Returns: errno
*/
-static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
+static int do_shrink(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int error;
- error = trunc_start(inode, oldsize, newsize);
+ error = trunc_start(inode, newsize);
if (error < 0)
return error;
if (gfs2_is_stuffed(ip))
return 0;
- error = trunc_dealloc(ip, newsize);
+ error = punch_hole(ip, newsize, 0);
if (error == 0)
error = trunc_end(ip);
@@ -1553,10 +1657,9 @@ static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
void gfs2_trim_blocks(struct inode *inode)
{
- u64 size = inode->i_size;
int ret;
- ret = do_shrink(inode, size, size);
+ ret = do_shrink(inode, inode->i_size);
WARN_ON(ret != 0);
}
@@ -1589,8 +1692,7 @@ static int do_grow(struct inode *inode, u64 size)
int error;
int unstuff = 0;
- if (gfs2_is_stuffed(ip) &&
- (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
+ if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
error = gfs2_quota_lock_check(ip, &ap);
if (error)
return error;
@@ -1650,7 +1752,6 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int ret;
- u64 oldsize;
BUG_ON(!S_ISREG(inode->i_mode));
@@ -1664,13 +1765,12 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
if (ret)
goto out;
- oldsize = inode->i_size;
- if (newsize >= oldsize) {
+ if (newsize >= inode->i_size) {
ret = do_grow(inode, newsize);
goto out;
}
- ret = do_shrink(inode, oldsize, newsize);
+ ret = do_shrink(inode, newsize);
out:
gfs2_rsqa_delete(ip, NULL);
return ret;
@@ -1679,7 +1779,7 @@ out:
int gfs2_truncatei_resume(struct gfs2_inode *ip)
{
int error;
- error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
+ error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
if (!error)
error = trunc_end(ip);
return error;
@@ -1687,7 +1787,7 @@ int gfs2_truncatei_resume(struct gfs2_inode *ip)
int gfs2_file_dealloc(struct gfs2_inode *ip)
{
- return trunc_dealloc(ip, 0);
+ return punch_hole(ip, 0, 0);
}
/**
@@ -1827,8 +1927,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
return 0;
if (gfs2_is_stuffed(ip)) {
- if (offset + len >
- sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
+ if (offset + len > gfs2_max_stuffed_size(ip))
return 1;
return 0;
}
@@ -1855,3 +1954,123 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
return 0;
}
+static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *dibh;
+ int error;
+
+ if (offset >= inode->i_size)
+ return 0;
+ if (offset + length > inode->i_size)
+ length = inode->i_size - offset;
+
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ return error;
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
+ length);
+ brelse(dibh);
+ return 0;
+}
+
+static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
+ loff_t length)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
+ int error;
+
+ while (length) {
+ struct gfs2_trans *tr;
+ loff_t chunk;
+ unsigned int offs;
+
+ chunk = length;
+ if (chunk > max_chunk)
+ chunk = max_chunk;
+
+ offs = offset & ~PAGE_MASK;
+ if (offs && chunk > PAGE_SIZE)
+ chunk = offs + ((chunk - offs) & PAGE_MASK);
+
+ truncate_pagecache_range(inode, offset, chunk);
+ offset += chunk;
+ length -= chunk;
+
+ tr = current->journal_info;
+ if (!test_bit(TR_TOUCHED, &tr->tr_flags))
+ continue;
+
+ gfs2_trans_end(sdp);
+ error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+ struct inode *inode = file_inode(file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ int error;
+
+ if (gfs2_is_jdata(ip))
+ error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
+ GFS2_JTRUNC_REVOKES);
+ else
+ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ if (error)
+ return error;
+
+ if (gfs2_is_stuffed(ip)) {
+ error = stuffed_zero_range(inode, offset, length);
+ if (error)
+ goto out;
+ } else {
+ unsigned int start_off, end_off, blocksize;
+
+ blocksize = i_blocksize(inode);
+ start_off = offset & (blocksize - 1);
+ end_off = (offset + length) & (blocksize - 1);
+ if (start_off) {
+ unsigned int len = length;
+ if (length > blocksize - start_off)
+ len = blocksize - start_off;
+ error = gfs2_block_zero_range(inode, offset, len);
+ if (error)
+ goto out;
+ if (start_off + length < blocksize)
+ end_off = 0;
+ }
+ if (end_off) {
+ error = gfs2_block_zero_range(inode,
+ offset + length - end_off, end_off);
+ if (error)
+ goto out;
+ }
+ }
+
+ if (gfs2_is_jdata(ip)) {
+ BUG_ON(!current->journal_info);
+ gfs2_journaled_truncate_range(inode, offset, length);
+ } else
+ truncate_pagecache_range(inode, offset, offset + length - 1);
+
+ file_update_time(file);
+ mark_inode_dirty(inode);
+
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
+
+ if (!gfs2_is_stuffed(ip))
+ error = punch_hole(ip, offset, length);
+
+out:
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
+ return error;
+}
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 443cc182cf18..c3402fe00653 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -61,5 +61,6 @@ extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
unsigned int len);
extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 06a0d1947c77..7c21aea0266b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -170,8 +170,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
if (!size)
return 0;
- if (gfs2_is_stuffed(ip) &&
- offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
+ if (gfs2_is_stuffed(ip) && offset + size <= gfs2_max_stuffed_size(ip))
return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
size);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 58705ef8643a..4f88e201b3f0 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -246,7 +246,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
}
if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
if (new_flags & GFS2_DIF_JDATA)
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_SET_FLAGS);
error = filemap_fdatawrite(inode->i_mapping);
if (error)
goto out;
@@ -924,7 +926,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
struct gfs2_holder gh;
int ret;
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
/* fallocate is needed by gfs2_grow to reserve space in the rindex */
if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
@@ -948,13 +950,18 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
if (ret)
goto out_unlock;
- ret = gfs2_rsqa_alloc(ip);
- if (ret)
- goto out_putw;
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ ret = __gfs2_punch_hole(file, offset, len);
+ } else {
+ ret = gfs2_rsqa_alloc(ip);
+ if (ret)
+ goto out_putw;
- ret = __gfs2_fallocate(file, mode, offset, len);
- if (ret)
- gfs2_rs_deltree(&ip->i_res);
+ ret = __gfs2_fallocate(file, mode, offset, len);
+
+ if (ret)
+ gfs2_rs_deltree(&ip->i_res);
+ }
out_putw:
put_write_access(inode);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 11066d8647d2..90af87ff29ba 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_enter(&gl_hash_table, &iter);
do {
- gl = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(gl))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if (gl->gl_name.ln_sbd == sdp &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
-walk_stop:
rhashtable_walk_stop(&iter);
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
@@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
loff_t n = *pos;
rhashtable_walk_enter(&gl_hash_table, &gi->hti);
- if (rhashtable_walk_start(&gi->hti) != 0)
+ if (rhashtable_walk_start_check(&gi->hti) != 0)
return NULL;
do {
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index cdd1c5f06f45..d8782a7a1e7d 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -107,7 +107,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
__gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_AIL_EMPTY_GL);
}
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
@@ -128,7 +129,8 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
return;
__gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_AIL_FLUSH);
}
/**
@@ -157,7 +159,8 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
return;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_RGRP_GO_SYNC);
filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
mapping_set_error(mapping, error);
@@ -252,7 +255,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
+ gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INODE_GO_SYNC);
filemap_fdatawrite(metamapping);
if (isreg) {
struct address_space *mapping = ip->i_inode.i_mapping;
@@ -303,7 +307,9 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
}
if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
- gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INODE_GO_INVAL);
gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
}
if (ip && S_ISREG(ip->i_inode.i_mode))
@@ -495,7 +501,8 @@ static void freeze_go_sync(struct gfs2_glock *gl)
gfs2_assert_withdraw(sdp, 0);
}
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
}
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6e18e9793ec4..e0557b8a590a 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -44,7 +44,6 @@ struct gfs2_log_header_host {
u32 lh_flags; /* GFS2_LOG_HEAD_... */
u32 lh_tail; /* Block number of log tail */
u32 lh_blkno;
- u32 lh_hash;
};
/*
@@ -861,5 +860,10 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
+{
+ return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+}
+
#endif /* __INCORE_DOT_H__ */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 4e971b1c7f92..59e0560180ec 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1152,12 +1152,11 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
if (error)
- goto out_end_trans;
+ goto out_gunlock;
error = gfs2_unlink_inode(dip, dentry);
-
-out_end_trans:
gfs2_trans_end(sdp);
+
out_gunlock:
gfs2_glock_dq(ghs + 2);
out_rgrp:
@@ -1184,11 +1183,10 @@ out_inodes:
static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct gfs2_sbd *sdp = GFS2_SB(dir);
unsigned int size;
size = strlen(symname);
- if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1)
+ if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
return -ENAMETOOLONG;
return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
@@ -1205,8 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- struct gfs2_sbd *sdp = GFS2_SB(dir);
- unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+ unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 65f33a0ac190..006c6164f759 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1091,7 +1091,7 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
spin_lock(&ls->ls_recover_spin);
if (ls->ls_recover_size < jid + 1) {
- fs_err(sdp, "recover_slot jid %d gen %u short size %d",
+ fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
jid, ls->ls_recover_block, ls->ls_recover_size);
spin_unlock(&ls->ls_recover_spin);
return;
@@ -1153,7 +1153,7 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
return;
}
if (ls->ls_recover_size < jid + 1) {
- fs_err(sdp, "recovery_result jid %d short size %d",
+ fs_err(sdp, "recovery_result jid %d short size %d\n",
jid, ls->ls_recover_size);
spin_unlock(&ls->ls_recover_spin);
return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f72c44231406..cf6b46247df4 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -14,6 +14,7 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
+#include <linux/crc32c.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -538,9 +539,12 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
while (!list_empty(&sdp->sd_log_le_ordered)) {
ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
- list_move(&ip->i_ordered, &written);
- if (ip->i_inode.i_mapping->nrpages == 0)
+ if (ip->i_inode.i_mapping->nrpages == 0) {
+ test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
+ list_del(&ip->i_ordered);
continue;
+ }
+ list_move(&ip->i_ordered, &written);
spin_unlock(&sdp->sd_ordered_lock);
filemap_fdatawrite(ip->i_inode.i_mapping);
spin_lock(&sdp->sd_ordered_lock);
@@ -648,49 +652,102 @@ out_of_blocks:
}
/**
- * log_write_header - Get and initialize a journal header buffer
+ * write_log_header - Write a journal log header buffer at sd_log_flush_head
* @sdp: The GFS2 superblock
+ * @jd: journal descriptor of the journal to which we are writing
+ * @seq: sequence number
+ * @tail: tail of the log
+ * @flags: log header flags GFS2_LOG_HEAD_*
+ * @op_flags: flags to pass to the bio
*
* Returns: the initialized log buffer descriptor
*/
-static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 flags, int op_flags)
{
struct gfs2_log_header *lh;
- unsigned int tail;
- u32 hash;
- int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ u32 hash, crc;
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+ struct timespec64 tv;
+ struct super_block *sb = sdp->sd_vfs;
+ u64 addr;
+
lh = page_address(page);
clear_page(lh);
- gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
-
- tail = current_tail(sdp);
-
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
- lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
+ lh->lh_sequence = cpu_to_be64(seq);
lh->lh_flags = cpu_to_be32(flags);
lh->lh_tail = cpu_to_be32(tail);
lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
- hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
+ hash = ~crc32(~0, lh, LH_V1_SIZE);
lh->lh_hash = cpu_to_be32(hash);
+ tv = current_kernel_time64();
+ lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
+ lh->lh_sec = cpu_to_be64(tv.tv_sec);
+ addr = gfs2_log_bmap(sdp);
+ lh->lh_addr = cpu_to_be64(addr);
+ lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
+
+ /* We may only write local statfs, quota, etc., when writing to our
+ own journal. The values are left 0 when recovering a journal
+ different from our own. */
+ if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
+ lh->lh_statfs_addr =
+ cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
+ lh->lh_quota_addr =
+ cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
+
+ spin_lock(&sdp->sd_statfs_spin);
+ lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
+ lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
+ lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
+ spin_unlock(&sdp->sd_statfs_spin);
+ }
+
+ BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
+
+ crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
+ sb->s_blocksize - LH_V1_SIZE - 4);
+ lh->lh_crc = cpu_to_be32(crc);
+
+ gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
+ log_flush_wait(sdp);
+}
+
+/**
+ * log_write_header - Get and initialize a journal header buffer
+ * @sdp: The GFS2 superblock
+ * @flags: The log header flags, including log header origin
+ *
+ * Returns: the initialized log buffer descriptor
+ */
+
+static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+{
+ unsigned int tail;
+ int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+
+ gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
+ tail = current_tail(sdp);
+
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
log_flush_wait(sdp);
op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
}
-
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
- gfs2_log_write_page(sdp, page);
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
- log_flush_wait(sdp);
+ gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
+ flags, op_flags);
if (sdp->sd_log_tail != tail)
log_pull_tail(sdp, tail);
@@ -700,11 +757,11 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
* gfs2_log_flush - flush incore transaction(s)
* @sdp: the filesystem
* @gl: The glock structure to flush. If NULL, flush the whole incore log
+ * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
*
*/
-void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- enum gfs2_flush_type type)
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
{
struct gfs2_trans *tr;
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
@@ -716,9 +773,9 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
up_write(&sdp->sd_log_flush_lock);
return;
}
- trace_gfs2_log_flush(sdp, 1);
+ trace_gfs2_log_flush(sdp, 1, flags);
- if (type == SHUTDOWN_FLUSH)
+ if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
sdp->sd_log_flush_head = sdp->sd_log_head;
@@ -743,11 +800,11 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
trace_gfs2_log_blocks(sdp, -1);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
}
lops_after_commit(sdp, tr);
@@ -764,7 +821,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
- if (type != NORMAL_FLUSH) {
+ if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
for (;;) {
gfs2_ail1_start(sdp);
@@ -774,16 +831,17 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
}
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
trace_gfs2_log_blocks(sdp, -1);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
sdp->sd_log_head = sdp->sd_log_flush_head;
}
- if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
+ if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
+ GFS2_LOG_HEAD_FLUSH_FREEZE))
gfs2_log_shutdown(sdp);
- if (type == FREEZE_FLUSH)
+ if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
- trace_gfs2_log_flush(sdp, 0);
+ trace_gfs2_log_flush(sdp, 0, flags);
up_write(&sdp->sd_log_flush_lock);
kfree(tr);
@@ -879,7 +937,7 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
sdp->sd_log_flush_head = sdp->sd_log_head;
- log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
+ log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
@@ -935,7 +993,8 @@ int gfs2_logd(void *data)
did_flush = false;
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_LOGD_JFLUSH_REQD);
did_flush = true;
}
@@ -943,7 +1002,8 @@ int gfs2_logd(void *data)
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_LOGD_AIL_FLUSH_REQD);
did_flush = true;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 9499a6049212..93b52ac1ca1f 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -65,14 +65,10 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
-enum gfs2_flush_type {
- NORMAL_FLUSH = 0,
- SYNC_FLUSH,
- SHUTDOWN_FLUSH,
- FREEZE_FLUSH
-};
+extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 flags, int op_flags);
extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- enum gfs2_flush_type type);
+ u32 type);
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index c8ff7b7954f0..4d6567990baf 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/list_sort.h>
+#include "dir.h"
#include "gfs2.h"
#include "incore.h"
#include "inode.h"
@@ -138,7 +139,7 @@ static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
sdp->sd_log_flush_head = 0;
}
-static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
+u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
{
unsigned int lbn = sdp->sd_log_flush_head;
struct gfs2_journal_extent *je;
@@ -161,7 +162,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
* @bvec: The bio_vec
* @error: The i/o status
*
- * This finds the relavent buffers and unlocks then and sets the
+ * This finds the relevant buffers and unlocks them and sets the
* error flag according to the status of the i/o request. This is
* used when the log is writing data which has an in-place version
* that is pinned in the pagecache.
@@ -306,23 +307,22 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
return gfs2_log_alloc_bio(sdp, blkno);
}
-
/**
* gfs2_log_write - write to log
* @sdp: the filesystem
* @page: the page to write
* @size: the size of the data to write
* @offset: the offset within the page
+ * @blkno: block number of the log entry
*
* Try and add the page segment to the current bio. If that fails,
* submit the current bio to the device and create a new one, and
* then add the page segment to that.
*/
-static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
- unsigned size, unsigned offset)
+void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
+ unsigned size, unsigned offset, u64 blkno)
{
- u64 blkno = gfs2_log_bmap(sdp);
struct bio *bio;
int ret;
@@ -348,7 +348,8 @@ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
+ gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
+ gfs2_log_bmap(sdp));
}
/**
@@ -365,7 +366,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
{
struct super_block *sb = sdp->sd_vfs;
- gfs2_log_write(sdp, page, sb->s_blocksize, 0);
+ gfs2_log_write(sdp, page, sb->s_blocksize, 0,
+ gfs2_log_bmap(sdp));
}
static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index e529f536c117..e4949394f054 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -26,6 +26,9 @@ extern const struct gfs2_log_operations gfs2_revoke_lops;
extern const struct gfs2_log_operations gfs2_databuf_lops;
extern const struct gfs2_log_operations *gfs2_log_ops[];
+extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
+extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
+ unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 0a89e6f7a314..2d55e2c3333c 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -93,7 +93,7 @@ static int __init init_gfs2_fs(void)
error = gfs2_glock_init();
if (error)
- goto fail;
+ goto fail_glock;
error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
@@ -101,7 +101,7 @@ static int __init init_gfs2_fs(void)
0, 0,
gfs2_init_glock_once);
if (!gfs2_glock_cachep)
- goto fail;
+ goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) +
@@ -109,7 +109,7 @@ static int __init init_gfs2_fs(void)
0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep)
- goto fail;
+ goto fail_cachep2;
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
@@ -118,107 +118,105 @@ static int __init init_gfs2_fs(void)
SLAB_ACCOUNT,
gfs2_init_inode_once);
if (!gfs2_inode_cachep)
- goto fail;
+ goto fail_cachep3;
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
sizeof(struct gfs2_bufdata),
0, 0, NULL);
if (!gfs2_bufdata_cachep)
- goto fail;
+ goto fail_cachep4;
gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
sizeof(struct gfs2_rgrpd),
0, 0, NULL);
if (!gfs2_rgrpd_cachep)
- goto fail;
+ goto fail_cachep5;
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
sizeof(struct gfs2_quota_data),
0, 0, NULL);
if (!gfs2_quotad_cachep)
- goto fail;
+ goto fail_cachep6;
gfs2_qadata_cachep = kmem_cache_create("gfs2_qadata",
sizeof(struct gfs2_qadata),
0, 0, NULL);
if (!gfs2_qadata_cachep)
- goto fail;
+ goto fail_cachep7;
error = register_shrinker(&gfs2_qd_shrinker);
if (error)
- goto fail;
+ goto fail_shrinker;
error = register_filesystem(&gfs2_fs_type);
if (error)
- goto fail;
+ goto fail_fs1;
error = register_filesystem(&gfs2meta_fs_type);
if (error)
- goto fail_unregister;
+ goto fail_fs2;
error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
if (!gfs_recovery_wq)
- goto fail_wq;
+ goto fail_wq1;
gfs2_control_wq = alloc_workqueue("gfs2_control",
WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
- goto fail_recovery;
+ goto fail_wq2;
gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
if (!gfs2_freeze_wq)
- goto fail_control;
+ goto fail_wq3;
gfs2_page_pool = mempool_create_page_pool(64, 0);
if (!gfs2_page_pool)
- goto fail_freeze;
+ goto fail_mempool;
- gfs2_register_debugfs();
+ error = gfs2_register_debugfs();
+ if (error)
+ goto fail_debugfs;
pr_info("GFS2 installed\n");
return 0;
-fail_freeze:
+fail_debugfs:
+ mempool_destroy(gfs2_page_pool);
+fail_mempool:
destroy_workqueue(gfs2_freeze_wq);
-fail_control:
+fail_wq3:
destroy_workqueue(gfs2_control_wq);
-fail_recovery:
+fail_wq2:
destroy_workqueue(gfs_recovery_wq);
-fail_wq:
+fail_wq1:
unregister_filesystem(&gfs2meta_fs_type);
-fail_unregister:
+fail_fs2:
unregister_filesystem(&gfs2_fs_type);
-fail:
- list_lru_destroy(&gfs2_qd_lru);
-fail_lru:
+fail_fs1:
unregister_shrinker(&gfs2_qd_shrinker);
+fail_shrinker:
+ kmem_cache_destroy(gfs2_qadata_cachep);
+fail_cachep7:
+ kmem_cache_destroy(gfs2_quotad_cachep);
+fail_cachep6:
+ kmem_cache_destroy(gfs2_rgrpd_cachep);
+fail_cachep5:
+ kmem_cache_destroy(gfs2_bufdata_cachep);
+fail_cachep4:
+ kmem_cache_destroy(gfs2_inode_cachep);
+fail_cachep3:
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+fail_cachep2:
+ kmem_cache_destroy(gfs2_glock_cachep);
+fail_cachep1:
gfs2_glock_exit();
-
- if (gfs2_qadata_cachep)
- kmem_cache_destroy(gfs2_qadata_cachep);
-
- if (gfs2_quotad_cachep)
- kmem_cache_destroy(gfs2_quotad_cachep);
-
- if (gfs2_rgrpd_cachep)
- kmem_cache_destroy(gfs2_rgrpd_cachep);
-
- if (gfs2_bufdata_cachep)
- kmem_cache_destroy(gfs2_bufdata_cachep);
-
- if (gfs2_inode_cachep)
- kmem_cache_destroy(gfs2_inode_cachep);
-
- if (gfs2_glock_aspace_cachep)
- kmem_cache_destroy(gfs2_glock_aspace_cachep);
-
- if (gfs2_glock_cachep)
- kmem_cache_destroy(gfs2_glock_cachep);
-
+fail_glock:
+ list_lru_destroy(&gfs2_qd_lru);
+fail_lru:
gfs2_sys_uninit();
return error;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ad55eb86a250..e6a0a8a89ea7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1382,7 +1382,7 @@ static void gfs2_kill_sb(struct super_block *sb)
return;
}
- gfs2_log_flush(sdp, NULL, SYNC_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
dput(sdp->sd_root_dir);
dput(sdp->sd_master_dir);
sdp->sd_root_dir = NULL;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index e700fb162664..7a98abd340ee 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -955,7 +955,8 @@ out:
gfs2_glock_dq_uninit(&ghs[qx]);
inode_unlock(&ip->i_inode);
kfree(ghs);
- gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
return error;
}
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 9395a3db1a60..b6b258998bcd 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -14,12 +14,14 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
+#include <linux/crc32c.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
+#include "log.h"
#include "lops.h"
#include "meta_io.h"
#include "recovery.h"
@@ -117,22 +119,6 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
}
}
-static int gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
-{
- const struct gfs2_log_header *str = buf;
-
- if (str->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
- str->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH))
- return 1;
-
- lh->lh_sequence = be64_to_cpu(str->lh_sequence);
- lh->lh_flags = be32_to_cpu(str->lh_flags);
- lh->lh_tail = be32_to_cpu(str->lh_tail);
- lh->lh_blkno = be32_to_cpu(str->lh_blkno);
- lh->lh_hash = be32_to_cpu(str->lh_hash);
- return 0;
-}
-
/**
* get_log_header - read the log header for a given segment
* @jd: the journal
@@ -150,29 +136,37 @@ static int gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
struct gfs2_log_header_host *head)
{
+ struct gfs2_log_header *lh;
struct buffer_head *bh;
- struct gfs2_log_header_host uninitialized_var(lh);
- const u32 nothing = 0;
- u32 hash;
+ u32 hash, crc;
int error;
error = gfs2_replay_read_block(jd, blk, &bh);
if (error)
return error;
+ lh = (void *)bh->b_data;
- hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
- sizeof(u32));
- hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
- hash ^= (u32)~0;
- error = gfs2_log_header_in(&lh, bh->b_data);
- brelse(bh);
+ hash = crc32(~0, lh, LH_V1_SIZE - 4);
+ hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
- if (error || lh.lh_blkno != blk || lh.lh_hash != hash)
- return 1;
+ crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
+ bh->b_size - LH_V1_SIZE - 4);
- *head = lh;
+ error = lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
+ lh->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH) ||
+ be32_to_cpu(lh->lh_blkno) != blk ||
+ be32_to_cpu(lh->lh_hash) != hash ||
+ (lh->lh_crc != 0 && be32_to_cpu(lh->lh_crc) != crc);
- return 0;
+ brelse(bh);
+
+ if (!error) {
+ head->lh_sequence = be64_to_cpu(lh->lh_sequence);
+ head->lh_flags = be32_to_cpu(lh->lh_flags);
+ head->lh_tail = be32_to_cpu(lh->lh_tail);
+ head->lh_blkno = be32_to_cpu(lh->lh_blkno);
+ }
+ return error;
}
/**
@@ -370,62 +364,22 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
/**
* clean_journal - mark a dirty journal as being clean
- * @sdp: the filesystem
* @jd: the journal
- * @gl: the journal's glock
* @head: the head journal to start from
*
* Returns: errno
*/
-static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
+static void clean_journal(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head)
{
- struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- unsigned int lblock;
- struct gfs2_log_header *lh;
- u32 hash;
- struct buffer_head *bh;
- int error;
- struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
-
- lblock = head->lh_blkno;
- gfs2_replay_incr_blk(jd, &lblock);
- bh_map.b_size = 1 << ip->i_inode.i_blkbits;
- error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
- if (error)
- return error;
- if (!bh_map.b_blocknr) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
- bh = sb_getblk(sdp->sd_vfs, bh_map.b_blocknr);
- lock_buffer(bh);
- memset(bh->b_data, 0, bh->b_size);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- unlock_buffer(bh);
-
- lh = (struct gfs2_log_header *)bh->b_data;
- memset(lh, 0, sizeof(struct gfs2_log_header));
- lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
- lh->lh_header.__pad0 = cpu_to_be64(0);
- lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
- lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
- lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
- lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
- lh->lh_blkno = cpu_to_be32(lblock);
- hash = gfs2_disk_hash((const char *)lh, sizeof(struct gfs2_log_header));
- lh->lh_hash = cpu_to_be32(hash);
-
- set_buffer_dirty(bh);
- if (sync_dirty_buffer(bh))
- gfs2_io_error_bh(sdp, bh);
- brelse(bh);
- return error;
+ sdp->sd_log_flush_head = head->lh_blkno;
+ gfs2_replay_incr_blk(jd, &sdp->sd_log_flush_head);
+ gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0,
+ GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
+ REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
}
@@ -552,9 +506,7 @@ void gfs2_recover_func(struct work_struct *work)
goto fail_gunlock_thaw;
}
- error = clean_journal(jd, &head);
- if (error)
- goto fail_gunlock_thaw;
+ clean_journal(jd, &head);
gfs2_glock_dq_uninit(&thaw_gh);
t = DIV_ROUND_UP(jiffies - t, HZ);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 95b2a57ded33..8b683917a27e 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -34,6 +34,7 @@
#include "log.h"
#include "inode.h"
#include "trace_gfs2.h"
+#include "dir.h"
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
@@ -489,6 +490,13 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
* @blk: The data block number
* @exact: True if this needs to be an exact match
*
+ * The @exact argument should be set to true by most callers. The exception
+ * is when we need to match blocks which are not represented by the rgrp
+ * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
+ * there for alignment purposes. Another way of looking at it is that @exact
+ * matches only valid data/metadata blocks, but with @exact false, it will
+ * match any block within the extent of the rgrp.
+ *
* Returns: The resource group, or NULL if not found
*/
@@ -1040,17 +1048,30 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
rgd->rd_free = be32_to_cpu(str->rg_free);
rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
+ /* rd_data0, rd_data and rd_bitbytes already set from rindex */
}
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
{
+ struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
struct gfs2_rgrp *str = buf;
+ u32 crc;
str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
str->rg_free = cpu_to_be32(rgd->rd_free);
str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
- str->__pad = cpu_to_be32(0);
+ if (next == NULL)
+ str->rg_skip = 0;
+ else if (next->rd_addr > rgd->rd_addr)
+ str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
+ str->rg_data0 = cpu_to_be64(rgd->rd_data0);
+ str->rg_data = cpu_to_be32(rgd->rd_data);
+ str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
+ str->rg_crc = 0;
+ crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
+ str->rg_crc = cpu_to_be32(crc);
+
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}
@@ -1318,7 +1339,7 @@ start_new_extent:
fail:
if (sdp->sd_args.ar_discard)
- fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
+ fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
sdp->sd_args.ar_discard = 0;
return -EIO;
}
@@ -2072,7 +2093,8 @@ next_rgrp:
}
/* Flushing the log may release space */
if (loops == 2)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INPLACE_RESERVE);
}
return -ENOSPC;
@@ -2453,12 +2475,12 @@ void gfs2_unlink_di(struct inode *inode)
update_rgrp_lvb_unlinked(rgd, 1);
}
-static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
+void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_rgrpd *tmp_rgd;
- tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
+ tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
if (!tmp_rgd)
return;
gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
@@ -2474,12 +2496,6 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
update_rgrp_lvb_unlinked(rgd, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
-}
-
-
-void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
-{
- gfs2_free_uninit_di(rgd, ip->i_no_addr);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
gfs2_meta_wipe(ip, ip->i_no_addr, 1);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d81d46e19726..620be0521866 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -757,7 +757,9 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
if (flush_all)
- gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_WRITE_INODE);
if (bdi->wb.dirty_exceeded)
gfs2_ail1_flush(sdp, wbc);
else
@@ -766,6 +768,12 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
ret = filemap_fdatawait(metamapping);
if (ret)
mark_inode_dirty_sync(inode);
+ else {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_flags & I_DIRTY))
+ gfs2_ordered_del_inode(ip);
+ spin_unlock(&inode->i_lock);
+ }
return ret;
}
@@ -853,7 +861,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
- gfs2_log_flush(sdp, NULL, SHUTDOWN_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
+ GFS2_LFC_MAKE_FS_RO);
wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
@@ -946,7 +955,8 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
gfs2_quota_sync(sb, -1);
if (wait)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_SYNC_FS);
return sdp->sd_log_error;
}
@@ -1650,7 +1660,8 @@ alloc_failed:
goto out_unlock;
out_truncate:
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_EVICT_INODE);
metamapping = gfs2_glock2aspace(ip->i_gl);
if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
filemap_fdatawrite(metamapping);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 9eb9d0a1abd9..c191fa58a1df 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -112,7 +112,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
}
if (error) {
- fs_warn(sdp, "freeze %d error %d", n, error);
+ fs_warn(sdp, "freeze %d error %d\n", n, error);
return error;
}
@@ -679,7 +679,7 @@ fail_tune:
sysfs_remove_group(&sdp->sd_kobj, &tune_group);
fail_reg:
free_percpu(sdp->sd_lkstats);
- fs_err(sdp, "error %d adding sysfs files", error);
+ fs_err(sdp, "error %d adding sysfs files\n", error);
if (sysfs_frees_sdp)
kobject_put(&sdp->sd_kobj);
else
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index f67a709589d3..b9318b49ff8f 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -353,26 +353,29 @@ TRACE_EVENT(gfs2_pin,
/* Flushing the log */
TRACE_EVENT(gfs2_log_flush,
- TP_PROTO(const struct gfs2_sbd *sdp, int start),
+ TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags),
- TP_ARGS(sdp, start),
+ TP_ARGS(sdp, start, flags),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, start )
__field( u64, log_seq )
+ __field( u32, flags )
),
TP_fast_assign(
__entry->dev = sdp->sd_vfs->s_dev;
__entry->start = start;
__entry->log_seq = sdp->sd_log_sequence;
+ __entry->flags = flags;
),
- TP_printk("%u,%u log flush %s %llu",
+ TP_printk("%u,%u log flush %s %llu %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->start ? "start" : "end",
- (unsigned long long)__entry->log_seq)
+ (unsigned long long)__entry->log_seq,
+ (unsigned long long)__entry->flags)
);
/* Reserving/releasing blocks in the log */
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index ca8b72d0a831..c75cacaa349b 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -92,7 +92,6 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
s64 nbuf;
int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
- BUG_ON(!tr);
current->journal_info = NULL;
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
@@ -118,7 +117,8 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_TRANS_END);
if (alloced)
sb_end_intwrite(sdp->sd_vfs);
}
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index 24bc20fd42f7..7cc8b4acf66a 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -20,9 +20,6 @@ config HFSPLUS_FS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
It needs to understand that POSIX ACLs are treated only under
Linux. POSIX ACLs doesn't mean something under Mac OS X.
Mac OS X beginning with version 10.4 ("Tiger") support NFSv4 ACLs,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8a85f3f53446..8fe1b0aa2896 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -55,16 +55,6 @@ struct hugetlbfs_config {
umode_t mode;
};
-struct hugetlbfs_inode_info {
- struct shared_policy policy;
- struct inode vfs_inode;
-};
-
-static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
-{
- return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
-}
-
int sysctl_hugetlb_shm_group;
enum {
@@ -520,8 +510,16 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (hole_end > hole_start) {
struct address_space *mapping = inode->i_mapping;
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
inode_lock(inode);
+
+ /* protected by i_mutex */
+ if (info->seals & F_SEAL_WRITE) {
+ inode_unlock(inode);
+ return -EPERM;
+ }
+
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap,
@@ -539,6 +537,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_inode(inode);
struct vm_area_struct pseudo_vma;
@@ -570,6 +569,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
if (error)
goto out;
+ if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
+ error = -EPERM;
+ goto out;
+ }
+
/*
* Initialize a pseudo vma as this is required by the huge page
* allocation routines. If NUMA is configured, use page index
@@ -660,6 +664,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
struct hstate *h = hstate_inode(inode);
int error;
unsigned int ia_valid = attr->ia_valid;
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
BUG_ON(!inode);
@@ -668,9 +673,16 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
if (ia_valid & ATTR_SIZE) {
- if (attr->ia_size & ~huge_page_mask(h))
+ loff_t oldsize = inode->i_size;
+ loff_t newsize = attr->ia_size;
+
+ if (newsize & ~huge_page_mask(h))
return -EINVAL;
- error = hugetlb_vmtruncate(inode, attr->ia_size);
+ /* protected by i_mutex */
+ if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
+ (newsize > oldsize && (info->seals & F_SEAL_GROW)))
+ return -EPERM;
+ error = hugetlb_vmtruncate(inode, newsize);
if (error)
return error;
}
@@ -722,6 +734,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -729,6 +743,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_mapping->private_data = resv_map;
+ info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
diff --git a/fs/inode.c b/fs/inode.c
index 03102d6ef044..e2ca0f4b5151 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,6 +18,7 @@
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
+#include <linux/iversion.h>
#include <trace/events/writeback.h>
#include "internal.h"
@@ -1634,17 +1635,21 @@ static int relatime_need_update(const struct path *path, struct inode *inode,
int generic_update_time(struct inode *inode, struct timespec *time, int flags)
{
int iflags = I_DIRTY_TIME;
+ bool dirty = false;
if (flags & S_ATIME)
inode->i_atime = *time;
if (flags & S_VERSION)
- inode_inc_iversion(inode);
+ dirty = inode_maybe_inc_iversion(inode, false);
if (flags & S_CTIME)
inode->i_ctime = *time;
if (flags & S_MTIME)
inode->i_mtime = *time;
+ if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
+ !(inode->i_sb->s_flags & SB_LAZYTIME))
+ dirty = true;
- if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
+ if (dirty)
iflags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, iflags);
return 0;
@@ -1863,7 +1868,7 @@ int file_update_time(struct file *file)
if (!timespec_equal(&inode->i_ctime, &now))
sync_it |= S_CTIME;
- if (IS_I_VERSION(inode))
+ if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
sync_it |= S_VERSION;
if (!sync_it)
diff --git a/fs/iomap.c b/fs/iomap.c
index 47d29ccffaef..afd163586aa0 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -65,6 +65,8 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
return ret;
if (WARN_ON(iomap.offset > pos))
return -EIO;
+ if (WARN_ON(iomap.length == 0))
+ return -EIO;
/*
* Cut down the length to the one actually provided by the filesystem,
@@ -753,7 +755,8 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
err = invalidate_inode_pages2_range(inode->i_mapping,
offset >> PAGE_SHIFT,
(offset + dio->size - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(err);
+ if (err)
+ dio_warn_stale_pagecache(iocb->ki_filp);
}
inode_dio_end(file_inode(iocb->ki_filp));
@@ -1018,9 +1021,16 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_free_dio;
+ /*
+ * Try to invalidate cache pages for the range we're direct
+ * writing. If this invalidation fails, tough, the write will
+ * still work, but racing two incompatible write paths is a
+ * pretty crazy thing to do, so we don't support it 100%.
+ */
ret = invalidate_inode_pages2_range(mapping,
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
+ if (ret)
+ dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
index d8bb6c411e96..ad850c5bf2ca 100644
--- a/fs/jffs2/Kconfig
+++ b/fs/jffs2/Kconfig
@@ -68,8 +68,7 @@ config JFFS2_FS_XATTR
default n
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
+ the kernel or by users (see the attr(5) manual page for details).
If unsure, say N.
@@ -82,9 +81,6 @@ config JFFS2_FS_POSIX_ACL
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config JFFS2_FS_SECURITY
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d8c274d39ddb..eab04eca95a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -362,7 +362,6 @@ error_io:
ret = -EIO;
error:
mutex_unlock(&f->sem);
- jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
}
diff --git a/fs/jfs/Kconfig b/fs/jfs/Kconfig
index 57cef19951db..851de78fdabb 100644
--- a/fs/jfs/Kconfig
+++ b/fs/jfs/Kconfig
@@ -16,9 +16,6 @@ config JFS_POSIX_ACL
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config JFS_SECURITY
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 9698e51656b1..c53d9cc5ae7a 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,7 +832,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
-static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
+static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 066ac313ae5c..a2c0dfc6fdc0 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -48,13 +48,13 @@ void nlmclnt_next_cookie(struct nlm_cookie *c)
static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
{
- atomic_inc(&lockowner->count);
+ refcount_inc(&lockowner->count);
return lockowner;
}
static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
{
- if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
+ if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
@@ -105,7 +105,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
res = __nlm_find_lockowner(host, owner);
if (res == NULL && new != NULL) {
res = new;
- atomic_set(&new->count, 1);
+ refcount_set(&new->count, 1);
new->owner = owner;
new->pid = __nlm_alloc_pid(host);
new->host = nlm_get_host(host);
@@ -204,7 +204,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
for(;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (call != NULL) {
- atomic_set(&call->a_count, 1);
+ refcount_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = nlm_get_host(host);
@@ -222,7 +222,7 @@ void nlmclnt_release_call(struct nlm_rqst *call)
{
const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
- if (!atomic_dec_and_test(&call->a_count))
+ if (!refcount_dec_and_test(&call->a_count))
return;
if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
@@ -678,7 +678,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
goto out;
}
- atomic_inc(&req->a_count);
+ refcount_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
@@ -769,7 +769,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
- atomic_inc(&req->a_count);
+ refcount_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 826a89184f90..d35cd6be0675 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -114,7 +114,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
unsigned long now = jiffies;
if (nsm != NULL)
- atomic_inc(&nsm->sm_count);
+ refcount_inc(&nsm->sm_count);
else {
host = NULL;
nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
@@ -151,7 +151,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
host->h_state = 0;
host->h_nsmstate = 0;
host->h_pidcount = 0;
- atomic_set(&host->h_count, 1);
+ refcount_set(&host->h_count, 1);
mutex_init(&host->h_mutex);
host->h_nextrebind = now + NLM_HOST_REBIND;
host->h_expires = now + NLM_HOST_EXPIRE;
@@ -290,7 +290,7 @@ void nlmclnt_release_host(struct nlm_host *host)
WARN_ON_ONCE(host->h_server);
- if (atomic_dec_and_test(&host->h_count)) {
+ if (refcount_dec_and_test(&host->h_count)) {
WARN_ON_ONCE(!list_empty(&host->h_lockowners));
WARN_ON_ONCE(!list_empty(&host->h_granted));
WARN_ON_ONCE(!list_empty(&host->h_reclaim));
@@ -388,6 +388,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
ln->nrhosts++;
nrhosts++;
+ refcount_inc(&host->h_count);
+
dprintk("lockd: %s created host %s (%s)\n",
__func__, host->h_name, host->h_addrbuf);
@@ -410,7 +412,7 @@ void nlmsvc_release_host(struct nlm_host *host)
dprintk("lockd: release server host %s\n", host->h_name);
WARN_ON_ONCE(!host->h_server);
- atomic_dec(&host->h_count);
+ refcount_dec(&host->h_count);
}
/*
@@ -504,7 +506,7 @@ struct nlm_host * nlm_get_host(struct nlm_host *host)
{
if (host) {
dprintk("lockd: get host %s\n", host->h_name);
- atomic_inc(&host->h_count);
+ refcount_inc(&host->h_count);
host->h_expires = jiffies + NLM_HOST_EXPIRE;
}
return host;
@@ -593,7 +595,7 @@ static void nlm_complain_hosts(struct net *net)
if (net && host->net != net)
continue;
dprintk(" %s (cnt %d use %d exp %ld net %x)\n",
- host->h_name, atomic_read(&host->h_count),
+ host->h_name, refcount_read(&host->h_count),
host->h_inuse, host->h_expires, host->net->ns.inum);
}
}
@@ -662,16 +664,16 @@ nlm_gc_hosts(struct net *net)
for_each_host_safe(host, next, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
- if (atomic_read(&host->h_count) || host->h_inuse
- || time_before(jiffies, host->h_expires)) {
+ if (host->h_inuse || time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
"(cnt %d use %d exp %ld net %x)\n",
- host->h_name, atomic_read(&host->h_count),
+ host->h_name, refcount_read(&host->h_count),
host->h_inuse, host->h_expires,
host->net->ns.inum);
continue;
}
- nlm_destroy_host_locked(host);
+ if (refcount_dec_if_one(&host->h_count))
+ nlm_destroy_host_locked(host);
}
if (net) {
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 96cfb2967ac7..654594ef4f94 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -191,7 +191,7 @@ void nsm_unmonitor(const struct nlm_host *host)
struct nsm_res res;
int status;
- if (atomic_read(&nsm->sm_count) == 1
+ if (refcount_read(&nsm->sm_count) == 1
&& nsm->sm_monitored && !nsm->sm_sticky) {
dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
@@ -279,7 +279,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
if (unlikely(new == NULL))
return NULL;
- atomic_set(&new->sm_count, 1);
+ refcount_set(&new->sm_count, 1);
new->sm_name = (char *)(new + 1);
memcpy(nsm_addr(new), sap, salen);
new->sm_addrlen = salen;
@@ -337,13 +337,13 @@ retry:
cached = nsm_lookup_addr(&ln->nsm_handles, sap);
if (cached != NULL) {
- atomic_inc(&cached->sm_count);
+ refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
kfree(new);
dprintk("lockd: found nsm_handle for %s (%s), "
"cnt %d\n", cached->sm_name,
cached->sm_addrbuf,
- atomic_read(&cached->sm_count));
+ refcount_read(&cached->sm_count));
return cached;
}
@@ -388,12 +388,12 @@ struct nsm_handle *nsm_reboot_lookup(const struct net *net,
return cached;
}
- atomic_inc(&cached->sm_count);
+ refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
dprintk("lockd: host %s (%s) rebooted, cnt %d\n",
cached->sm_name, cached->sm_addrbuf,
- atomic_read(&cached->sm_count));
+ refcount_read(&cached->sm_count));
return cached;
}
@@ -404,7 +404,7 @@ struct nsm_handle *nsm_reboot_lookup(const struct net *net,
*/
void nsm_release(struct nsm_handle *nsm)
{
- if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
+ if (refcount_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
list_del(&nsm->sm_link);
spin_unlock(&nsm_lock);
dprintk("lockd: destroyed nsm_handle for %s (%s)\n",
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0d670c5c378f..ea77c66d3cc3 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -295,7 +295,7 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
void nlmsvc_release_call(struct nlm_rqst *call)
{
- if (!atomic_dec_and_test(&call->a_count))
+ if (!refcount_dec_and_test(&call->a_count))
return;
nlmsvc_release_host(call->a_host);
kfree(call);
diff --git a/fs/namei.c b/fs/namei.c
index 9cc91fb7f156..921ae32dbc80 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -391,50 +391,6 @@ static inline int do_inode_permission(struct inode *inode, int mask)
}
/**
- * __inode_permission - Check for access rights to a given inode
- * @inode: Inode to check permission on
- * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
- *
- * Check for read/write/execute permissions on an inode.
- *
- * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
- *
- * This does not check for a read-only file system. You probably want
- * inode_permission().
- */
-int __inode_permission(struct inode *inode, int mask)
-{
- int retval;
-
- if (unlikely(mask & MAY_WRITE)) {
- /*
- * Nobody gets write access to an immutable file.
- */
- if (IS_IMMUTABLE(inode))
- return -EPERM;
-
- /*
- * Updating mtime will likely cause i_uid and i_gid to be
- * written back improperly if their true value is unknown
- * to the vfs.
- */
- if (HAS_UNMAPPED_ID(inode))
- return -EACCES;
- }
-
- retval = do_inode_permission(inode, mask);
- if (retval)
- return retval;
-
- retval = devcgroup_inode_permission(inode, mask);
- if (retval)
- return retval;
-
- return security_inode_permission(inode, mask);
-}
-EXPORT_SYMBOL(__inode_permission);
-
-/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
* @inode: Inode to check permission on
@@ -472,7 +428,32 @@ int inode_permission(struct inode *inode, int mask)
retval = sb_permission(inode->i_sb, inode, mask);
if (retval)
return retval;
- return __inode_permission(inode, mask);
+
+ if (unlikely(mask & MAY_WRITE)) {
+ /*
+ * Nobody gets write access to an immutable file.
+ */
+ if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ /*
+ * Updating mtime will likely cause i_uid and i_gid to be
+ * written back improperly if their true value is unknown
+ * to the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
+ return -EACCES;
+ }
+
+ retval = do_inode_permission(inode, mask);
+ if (retval)
+ return retval;
+
+ retval = devcgroup_inode_permission(inode, mask);
+ if (retval)
+ return retval;
+
+ return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
@@ -1133,9 +1114,6 @@ static int follow_automount(struct path *path, struct nameidata *nd,
path->dentry->d_inode)
return -EISDIR;
- if (path->dentry->d_sb->s_user_ns != &init_user_ns)
- return -EACCES;
-
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
@@ -2898,6 +2876,27 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
EXPORT_SYMBOL(vfs_create);
+int vfs_mkobj(struct dentry *dentry, umode_t mode,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *arg)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ int error = may_create(dir, dentry);
+ if (error)
+ return error;
+
+ mode &= S_IALLUGO;
+ mode |= S_IFREG;
+ error = security_inode_create(dir, dentry, mode);
+ if (error)
+ return error;
+ error = f(dentry, mode, arg);
+ if (!error)
+ fsnotify_create(dir, dentry);
+ return error;
+}
+EXPORT_SYMBOL(vfs_mkobj);
+
bool may_open_dev(const struct path *path)
{
return !(path->mnt->mnt_flags & MNT_NODEV) &&
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 995d707537da..7cb5c38c19e4 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -137,6 +137,11 @@ bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
return bio;
}
+static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
+{
+ return offset >= map->start && offset < map->start + map->len;
+}
+
static struct bio *
do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
struct page *page, struct pnfs_block_dev_map *map,
@@ -156,8 +161,8 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
/* translate to physical disk offset */
disk_addr = (u64)isect << SECTOR_SHIFT;
- if (disk_addr < map->start || disk_addr >= map->start + map->len) {
- if (!dev->map(dev, disk_addr, map))
+ if (!offset_in_map(disk_addr, map)) {
+ if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
return ERR_PTR(-EIO);
bio = bl_submit_bio(bio);
}
@@ -184,6 +189,29 @@ retry:
return bio;
}
+static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
+{
+ struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
+ size_t bytes_left = header->args.count;
+ sector_t isect, extent_length = 0;
+ struct pnfs_block_extent be;
+
+ isect = header->args.offset >> SECTOR_SHIFT;
+ bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
+
+ while (bytes_left > 0) {
+ if (!ext_tree_lookup(bl, isect, &be, rw))
+ return;
+ extent_length = be.be_length - (isect - be.be_f_offset);
+ nfs4_mark_deviceid_unavailable(be.be_device);
+ isect += extent_length;
+ if (bytes_left > extent_length << SECTOR_SHIFT)
+ bytes_left -= extent_length << SECTOR_SHIFT;
+ else
+ bytes_left = 0;
+ }
+}
+
static void bl_end_io_read(struct bio *bio)
{
struct parallel_io *par = bio->bi_private;
@@ -194,6 +222,7 @@ static void bl_end_io_read(struct bio *bio)
if (!header->pnfs_error)
header->pnfs_error = -EIO;
pnfs_set_lo_fail(header->lseg);
+ bl_mark_devices_unavailable(header, false);
}
bio_put(bio);
@@ -323,6 +352,7 @@ static void bl_end_io_write(struct bio *bio)
if (!header->pnfs_error)
header->pnfs_error = -EIO;
pnfs_set_lo_fail(header->lseg);
+ bl_mark_devices_unavailable(header, true);
}
bio_put(bio);
put_parallel(par);
@@ -552,6 +582,31 @@ static int decode_sector_number(__be32 **rp, sector_t *sp)
return 0;
}
+static struct nfs4_deviceid_node *
+bl_find_get_deviceid(struct nfs_server *server,
+ const struct nfs4_deviceid *id, struct rpc_cred *cred,
+ gfp_t gfp_mask)
+{
+ struct nfs4_deviceid_node *node;
+ unsigned long start, end;
+
+retry:
+ node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
+ return node;
+
+ end = jiffies;
+ start = end - PNFS_DEVICE_RETRY_TIMEOUT;
+ if (!time_in_range(node->timestamp_unavailable, start, end)) {
+ nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ goto retry;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
static int
bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
struct layout_verification *lv, struct list_head *extents,
@@ -573,16 +628,18 @@ bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
memcpy(&id, p, NFS4_DEVICEID4_SIZE);
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
- error = -EIO;
- be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
+ be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
lo->plh_lc_cred, gfp_mask);
- if (!be->be_device)
+ if (IS_ERR(be->be_device)) {
+ error = PTR_ERR(be->be_device);
goto out_free_be;
+ }
/*
* The next three values are read in as bytes, but stored in the
* extent structure in 512-byte granularity.
*/
+ error = -EIO;
if (decode_sector_number(&p, &be->be_f_offset) < 0)
goto out_put_deviceid;
if (decode_sector_number(&p, &be->be_length) < 0)
@@ -692,11 +749,16 @@ out_free_scratch:
__free_page(scratch);
out:
dprintk("%s returns %d\n", __func__, status);
- if (status) {
+ switch (status) {
+ case -ENODEV:
+ /* Our extent block devices are unavailable */
+ set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
+ case 0:
+ return lseg;
+ default:
kfree(lseg);
return ERR_PTR(status);
}
- return lseg;
}
static void
@@ -798,6 +860,13 @@ bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
}
pnfs_generic_pg_init_read(pgio, req);
+
+ if (pgio->pg_lseg &&
+ test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
+ pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ nfs_pageio_reset_read_mds(pgio);
+ }
}
/*
@@ -853,6 +922,14 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
pnfs_generic_pg_init_write(pgio, req, wb_size);
+
+ if (pgio->pg_lseg &&
+ test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
+
+ pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ nfs_pageio_reset_write_mds(pgio);
+ }
}
/*
@@ -887,6 +964,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
.name = "LAYOUT_BLOCK_VOLUME",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR |
PNFS_READ_WHOLE_PAGE,
.read_pagelist = bl_read_pagelist,
.write_pagelist = bl_write_pagelist,
@@ -910,6 +988,7 @@ static struct pnfs_layoutdriver_type scsilayout_type = {
.name = "LAYOUT_SCSI",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR |
PNFS_READ_WHOLE_PAGE,
.read_pagelist = bl_read_pagelist,
.write_pagelist = bl_write_pagelist,
@@ -967,6 +1046,7 @@ static void __exit nfs4blocklayout_exit(void)
}
MODULE_ALIAS("nfs-layouttype4-3");
+MODULE_ALIAS("nfs-layouttype4-5");
module_init(nfs4blocklayout_init);
module_exit(nfs4blocklayout_exit);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index efc007f00742..716bc75e9ed2 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -92,10 +92,9 @@ struct pnfs_block_volume {
};
struct pnfs_block_dev_map {
- sector_t start;
- sector_t len;
-
- sector_t disk_offset;
+ u64 start;
+ u64 len;
+ u64 disk_offset;
struct block_device *bdev;
};
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 95f74bd2c067..a7efd83779d2 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -533,14 +533,11 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_free_volumes;
ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
- if (ret) {
- bl_free_device(top);
- kfree(top);
- goto out_free_volumes;
- }
node = &top->node;
nfs4_init_deviceid_node(node, server, &pdev->dev_id);
+ if (ret)
+ nfs4_mark_deviceid_unavailable(node);
out_free_volumes:
kfree(volumes);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ade44ca0c66c..d8b47624fee2 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/iversion.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -347,7 +348,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
nfs4_stateid_copy(&delegation->stateid, &res->delegation);
delegation->type = res->delegation_type;
delegation->pagemod_limit = res->pagemod_limit;
- delegation->change_attr = inode->i_version;
+ delegation->change_attr = inode_peek_iversion_raw(inode);
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index d2972d537469..8c10b0562e75 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
- dreq->flags = 0;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- }
if (dreq->error == 0) {
nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index 83fd09fc8f77..ab5de3246c5c 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -48,10 +48,6 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
*max_len = len;
return FILEID_INVALID;
}
- if (IS_AUTOMOUNT(inode)) {
- *max_len = FILEID_INVALID;
- goto out;
- }
p[FILEID_HIGH_OFF] = NFS_FILEID(inode) >> 32;
p[FILEID_LOW_OFF] = NFS_FILEID(inode);
@@ -59,7 +55,6 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
p[len - 1] = 0; /* Padding */
nfs_copy_fh(clnt_fh, server_fh);
*max_len = len;
-out:
dprintk("%s: result fh fileid %llu mode %u size %d\n",
__func__, NFS_FILEID(inode), inode->i_mode, *max_len);
return *max_len;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 4e54d8b5413a..d175724ff566 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino,
lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
gfp_flags);
- if (!lseg)
- lseg = ERR_PTR(-ENOMEM);
- if (IS_ERR(lseg))
+ if (IS_ERR_OR_NULL(lseg))
goto out;
lo = NFS_I(ino)->layout;
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 3025fe8584a0..0ee4b93d36ea 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -16,6 +16,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/in6.h>
+#include <linux/iversion.h>
#include "internal.h"
#include "fscache.h"
@@ -211,7 +212,7 @@ static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->vfs_inode.i_version;
+ auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
if (bufmax > sizeof(auxdata))
bufmax = sizeof(auxdata);
@@ -243,7 +244,7 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->vfs_inode.i_version;
+ auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b992d2382ffa..ceeaf0fb6657 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,8 +38,8 @@
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/freezer.h>
-
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -483,7 +483,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
- inode->i_version = 0;
+ inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
clear_nlink(inode);
inode->i_uid = make_kuid(&init_user_ns, -2);
@@ -508,7 +508,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- inode->i_version = fattr->change_attr;
+ inode_set_iversion_raw(inode, fattr->change_attr);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE);
@@ -735,12 +735,20 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
+ struct nfs_server *server = NFS_SERVER(inode);
+ unsigned long cache_validity;
int err = 0;
+ bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
+ bool do_update = false;
trace_nfs_getattr_enter(inode);
+
+ if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync)
+ goto out_no_update;
+
/* Flush out writes to the server in order to update c/mtime. */
- if (S_ISREG(inode->i_mode)) {
+ if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
+ S_ISREG(inode->i_mode)) {
err = filemap_write_and_wait(inode->i_mapping);
if (err)
goto out;
@@ -757,24 +765,42 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
*/
if ((path->mnt->mnt_flags & MNT_NOATIME) ||
((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
- need_atime = 0;
-
- if (need_atime || nfs_need_revalidate_inode(inode)) {
- struct nfs_server *server = NFS_SERVER(inode);
-
+ request_mask &= ~STATX_ATIME;
+
+ /* Is the user requesting attributes that might need revalidation? */
+ if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
+ STATX_MTIME|STATX_UID|STATX_GID|
+ STATX_SIZE|STATX_BLOCKS)))
+ goto out_no_revalidate;
+
+ /* Check whether the cached attributes are stale */
+ do_update |= force_sync || nfs_attribute_cache_expired(inode);
+ cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+ do_update |= cache_validity &
+ (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL);
+ if (request_mask & STATX_ATIME)
+ do_update |= cache_validity & NFS_INO_INVALID_ATIME;
+ if (request_mask & (STATX_CTIME|STATX_MTIME))
+ do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
+ if (do_update) {
+ /* Update the attribute cache */
if (!(server->flags & NFS_MOUNT_NOAC))
nfs_readdirplus_parent_cache_miss(path->dentry);
else
nfs_readdirplus_parent_cache_hit(path->dentry);
err = __nfs_revalidate_inode(server, inode);
+ if (err)
+ goto out;
} else
nfs_readdirplus_parent_cache_hit(path->dentry);
- if (!err) {
- generic_fillattr(inode, stat);
- stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
- if (S_ISDIR(inode->i_mode))
- stat->blksize = NFS_SERVER(inode)->dtsize;
- }
+out_no_revalidate:
+ /* Only return attributes that were revalidated. */
+ stat->result_mask &= request_mask;
+out_no_update:
+ generic_fillattr(inode, stat);
+ stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
+ if (S_ISDIR(inode->i_mode))
+ stat->blksize = NFS_SERVER(inode)->dtsize;
out:
trace_nfs_getattr_exit(inode, err);
return err;
@@ -1144,7 +1170,6 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
if (mapping->nrpages != 0) {
if (S_ISREG(inode->i_mode)) {
- unmap_mapping_range(mapping, 0, 0, 0);
ret = nfs_sync_mapping(mapping);
if (ret < 0)
return ret;
@@ -1289,8 +1314,8 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- && inode->i_version == fattr->pre_change_attr) {
- inode->i_version = fattr->change_attr;
+ && !inode_cmp_iversion_raw(inode, fattr->pre_change_attr)) {
+ inode_set_iversion_raw(inode, fattr->change_attr);
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
ret |= NFS_INO_INVALID_ATTR;
@@ -1348,7 +1373,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
- if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode->i_version != fattr->change_attr)
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode_cmp_iversion_raw(inode, fattr->change_attr))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
@@ -1642,7 +1667,7 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
- fattr->pre_change_attr = inode->i_version;
+ fattr->pre_change_attr = inode_peek_iversion_raw(inode);
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
@@ -1778,7 +1803,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
- if (inode->i_version != fattr->change_attr) {
+ if (inode_cmp_iversion_raw(inode, fattr->change_attr)) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
/* Could it be a race with writeback? */
@@ -1790,7 +1815,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
- inode->i_version = fattr->change_attr;
+ inode_set_iversion_raw(inode, fattr->change_attr);
}
} else {
nfsi->cache_validity |= save_cache_validity;
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index 20fef85d2bb1..9034b4926909 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
{
if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
set_bit(NFS_INO_ODIRECT, &nfsi->flags);
- nfs_wb_all(inode);
+ nfs_sync_mapping(inode->i_mapping);
}
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 65a7e5da508c..04612c24d394 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -861,6 +861,7 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status))
set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
+ server->port = rpc_get_port(addr);
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
@@ -1123,19 +1124,36 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
/* Initialise the client representation from the parent server */
nfs_server_copy_userdata(server, parent_server);
- /* Get a client representation.
- * Note: NFSv4 always uses TCP, */
+ /* Get a client representation */
+#ifdef CONFIG_SUNRPC_XPRT_RDMA
+ rpc_set_port(data->addr, NFS_RDMA_PORT);
error = nfs4_set_client(server, data->hostname,
data->addr,
data->addrlen,
parent_client->cl_ipaddr,
- rpc_protocol(parent_server->client),
+ XPRT_TRANSPORT_RDMA,
+ parent_server->client->cl_timeout,
+ parent_client->cl_mvops->minor_version,
+ parent_client->cl_net);
+ if (!error)
+ goto init_server;
+#endif /* CONFIG_SUNRPC_XPRT_RDMA */
+
+ rpc_set_port(data->addr, NFS_PORT);
+ error = nfs4_set_client(server, data->hostname,
+ data->addr,
+ data->addrlen,
+ parent_client->cl_ipaddr,
+ XPRT_TRANSPORT_TCP,
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
parent_client->cl_net);
if (error < 0)
goto error;
+#ifdef CONFIG_SUNRPC_XPRT_RDMA
+init_server:
+#endif
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
if (error < 0)
goto error;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 626d1382002e..6b3b372b59b9 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -8,7 +8,6 @@
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/nfs_fs.h>
-#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 30426c1a1bbd..22dc30a679a0 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
- int ret = -ENOMEM;
+ int ret = -ENOKEY;
+
+ if (!aux)
+ goto out1;
/* msg and im are freed in idmap_pipe_destroy_msg */
+ ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out1;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 8c3f327d858d..24f06dcc2b08 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -270,8 +270,6 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (mountdata->addrlen == 0)
continue;
- rpc_set_port(mountdata->addr, NFS_PORT);
-
memcpy(page2, buf->data, buf->len);
page2[buf->len] = '\0';
mountdata->hostname = page2;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 56fa5a16e097..47f3c273245e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -54,6 +54,7 @@
#include <linux/xattr.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
+#include <linux/iversion.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -1045,16 +1046,16 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- if (cinfo->atomic && cinfo->before == dir->i_version) {
+ if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
nfsi->attrtimeo_timestamp = jiffies;
} else {
nfs_force_lookup_revalidate(dir);
- if (cinfo->before != dir->i_version)
+ if (cinfo->before != inode_peek_iversion_raw(dir))
nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
NFS_INO_INVALID_ACL;
}
- dir->i_version = cinfo->after;
+ inode_set_iversion_raw(dir, cinfo->after);
nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfs_fscache_invalidate(dir);
@@ -2019,7 +2020,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
return ret;
}
-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
+static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
{
switch (err) {
default:
@@ -2066,7 +2067,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
return -EAGAIN;
case -ENOMEM:
case -NFS4ERR_DENIED:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ if (fl) {
+ struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
+ if (lsp)
+ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
+ }
return 0;
}
return err;
@@ -2102,7 +2107,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
err = nfs4_open_recover_helper(opendata, FMODE_READ);
}
nfs4_opendata_put(opendata);
- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
}
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
@@ -2454,7 +2459,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
data->file_created = true;
else if (o_res->cinfo.before != o_res->cinfo.after)
data->file_created = true;
- if (data->file_created || dir->i_version != o_res->cinfo.after)
+ if (data->file_created ||
+ inode_peek_iversion_raw(dir) != o_res->cinfo.after)
update_changeattr(dir, &o_res->cinfo,
o_res->f_attr->time_start);
}
@@ -3148,6 +3154,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
nfs4_stateid *res_stateid = NULL;
+ struct nfs4_exception exception = {
+ .state = state,
+ .inode = calldata->inode,
+ .stateid = &calldata->arg.stateid,
+ };
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -3213,7 +3224,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case -NFS4ERR_BAD_STATEID:
break;
default:
- if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
+ task->tk_status = nfs4_async_handle_exception(task,
+ server, task->tk_status, &exception);
+ if (exception.retry)
goto out_restart;
}
nfs_clear_open_stateid(state, &calldata->arg.stateid,
@@ -5757,6 +5770,10 @@ struct nfs4_delegreturndata {
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+ struct nfs4_exception exception = {
+ .inode = data->inode,
+ .stateid = &data->stateid,
+ };
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
@@ -5818,10 +5835,11 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
/* Fallthrough */
default:
- if (nfs4_async_handle_error(task, data->res.server,
- NULL, NULL) == -EAGAIN) {
+ task->tk_status = nfs4_async_handle_exception(task,
+ data->res.server, task->tk_status,
+ &exception);
+ if (exception.retry)
goto out_restart;
- }
}
data->rpc_status = task->tk_status;
return;
@@ -6059,6 +6077,10 @@ static void nfs4_locku_release_calldata(void *data)
static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
+ struct nfs4_exception exception = {
+ .inode = calldata->lsp->ls_state->inode,
+ .stateid = &calldata->arg.stateid,
+ };
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
@@ -6082,8 +6104,10 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
rpc_restart_call_prepare(task);
break;
default:
- if (nfs4_async_handle_error(task, calldata->server,
- NULL, NULL) == -EAGAIN)
+ task->tk_status = nfs4_async_handle_exception(task,
+ calldata->server, task->tk_status,
+ &exception);
+ if (exception.retry)
rpc_restart_call_prepare(task);
}
nfs_release_seqid(calldata->arg.seqid);
@@ -6739,7 +6763,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
if (err != 0)
return err;
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
struct nfs_release_lockowner_data {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e4f4a09ed9f4..91a4d4eeb235 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1482,6 +1482,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct inode *inode = state->inode;
struct nfs_inode *nfsi = NFS_I(inode);
struct file_lock *fl;
+ struct nfs4_lock_state *lsp;
int status = 0;
struct file_lock_context *flctx = inode->i_flctx;
struct list_head *list;
@@ -1522,7 +1523,9 @@ restart:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ lsp = fl->fl_u.nfs4_fl.owner;
+ if (lsp)
+ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
status = 0;
}
spin_lock(&flctx->flc_lock);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 0d91d84e5822..c394e4447100 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -32,7 +32,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
.data = &nfs_idmap_cache_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
},
{ }
};
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 77c6729e57f0..65c9c4175145 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -7678,6 +7678,22 @@ nfs4_stat_to_errno(int stat)
.p_name = #proc, \
}
+#if defined(CONFIG_NFS_V4_1)
+#define PROC41(proc, argtype, restype) \
+ PROC(proc, argtype, restype)
+#else
+#define PROC41(proc, argtype, restype) \
+ STUB(proc)
+#endif
+
+#if defined(CONFIG_NFS_V4_2)
+#define PROC42(proc, argtype, restype) \
+ PROC(proc, argtype, restype)
+#else
+#define PROC42(proc, argtype, restype) \
+ STUB(proc)
+#endif
+
const struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
@@ -7698,7 +7714,6 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(ACCESS, enc_access, dec_access),
PROC(GETATTR, enc_getattr, dec_getattr),
PROC(LOOKUP, enc_lookup, dec_lookup),
- PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
PROC(REMOVE, enc_remove, dec_remove),
PROC(RENAME, enc_rename, dec_rename),
@@ -7717,33 +7732,30 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC(SECINFO, enc_secinfo, dec_secinfo),
PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
-#if defined(CONFIG_NFS_V4_1)
- PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
- PROC(CREATE_SESSION, enc_create_session, dec_create_session),
- PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
- PROC(SEQUENCE, enc_sequence, dec_sequence),
- PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
- PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
- PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
- PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
- PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
- PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
- PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
- PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
- PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC41(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC41(SEQUENCE, enc_sequence, dec_sequence),
+ PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete),
+ PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC41(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
+ PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
+ PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
+ PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid),
+ PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid),
STUB(GETDEVICELIST),
- PROC(BIND_CONN_TO_SESSION,
+ PROC41(BIND_CONN_TO_SESSION,
enc_bind_conn_to_session, dec_bind_conn_to_session),
- PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
-#endif /* CONFIG_NFS_V4_1 */
-#ifdef CONFIG_NFS_V4_2
- PROC(SEEK, enc_seek, dec_seek),
- PROC(ALLOCATE, enc_allocate, dec_allocate),
- PROC(DEALLOCATE, enc_deallocate, dec_deallocate),
- PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
- PROC(CLONE, enc_clone, dec_clone),
- PROC(COPY, enc_copy, dec_copy),
-#endif /* CONFIG_NFS_V4_2 */
+ PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid),
+ PROC42(SEEK, enc_seek, dec_seek),
+ PROC42(ALLOCATE, enc_allocate, dec_allocate),
+ PROC42(DEALLOCATE, enc_deallocate, dec_deallocate),
+ PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
+ PROC42(CLONE, enc_clone, dec_clone),
+ PROC42(COPY, enc_copy, dec_copy),
+ PROC(LOOKUPP, enc_lookupp, dec_lookupp),
};
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 093290c42d7c..bd60f8d1e181 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -9,6 +9,7 @@
#define _TRACE_NFS_H
#include <linux/tracepoint.h>
+#include <linux/iversion.h>
#define nfs_show_file_type(ftype) \
__print_symbolic(ftype, \
@@ -61,7 +62,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
- __entry->version = inode->i_version;
+ __entry->version = inode_peek_iversion_raw(inode);
),
TP_printk(
@@ -100,7 +101,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->type = nfs_umode_to_dtype(inode->i_mode);
- __entry->version = inode->i_version;
+ __entry->version = inode_peek_iversion_raw(inode);
__entry->size = i_size_read(inode);
__entry->nfsi_flags = nfsi->flags;
__entry->cache_validity = nfsi->cache_validity;
@@ -796,15 +797,15 @@ TRACE_EVENT(nfs_readpage_done,
)
);
-/*
- * XXX: I tried using NFS_UNSTABLE and friends in this table, but they
- * all evaluate to 0 for some reason, even if I include linux/nfs.h.
- */
+TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+
#define nfs_show_stable(stable) \
__print_symbolic(stable, \
- { 0, " (UNSTABLE)" }, \
- { 1, " (DATA_SYNC)" }, \
- { 2, " (FILE_SYNC)" })
+ { NFS_UNSTABLE, "UNSTABLE" }, \
+ { NFS_DATA_SYNC, "DATA_SYNC" }, \
+ { NFS_FILE_SYNC, "FILE_SYNC" })
TRACE_EVENT(nfs_initiate_write,
TP_PROTO(
@@ -837,12 +838,12 @@ TRACE_EVENT(nfs_initiate_write,
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%lu stable=%d%s",
+ "offset=%lld count=%lu stable=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->count,
- __entry->stable, nfs_show_stable(__entry->stable)
+ nfs_show_stable(__entry->stable)
)
);
@@ -881,13 +882,13 @@ TRACE_EVENT(nfs_writeback_done,
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d stable=%d%s "
+ "offset=%lld status=%d stable=%s "
"verifier 0x%016llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->status,
- __entry->stable, nfs_show_stable(__entry->stable),
+ nfs_show_stable(__entry->stable),
__entry->verifier
)
);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d0543e19098a..18a7626ac638 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -537,7 +537,7 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
* @cinfo: Commit information for the call (writes only)
*/
static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
- unsigned int count, unsigned int offset,
+ unsigned int count,
int how, struct nfs_commit_info *cinfo)
{
struct nfs_page *req = hdr->req;
@@ -546,10 +546,10 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
* NB: take care not to mess about with hdr->commit et al. */
hdr->args.fh = NFS_FH(hdr->inode);
- hdr->args.offset = req_offset(req) + offset;
+ hdr->args.offset = req_offset(req);
/* pnfs_set_layoutcommit needs this */
hdr->mds_offset = hdr->args.offset;
- hdr->args.pgbase = req->wb_pgbase + offset;
+ hdr->args.pgbase = req->wb_pgbase;
hdr->args.pages = hdr->page_array.pagevec;
hdr->args.count = count;
hdr->args.context = get_nfs_open_context(req->wb_context);
@@ -789,7 +789,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
+ nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index d602fe9e1ac8..c13e826614b5 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -655,7 +655,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
return 0;
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
- dprintk("%s: freeing lseg %p iomode %d seq %u"
+ dprintk("%s: freeing lseg %p iomode %d seq %u "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_seq,
lseg->pls_range.offset, lseg->pls_range.length);
@@ -2255,7 +2255,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}
static enum pnfs_try_status
@@ -2378,7 +2378,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 8d507c361d98..daf6cbf5c15f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -40,6 +40,7 @@ enum {
NFS_LSEG_ROC, /* roc bit received from server */
NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */
+ NFS_LSEG_UNAVAILABLE, /* unavailable bit set for temporary problem */
};
/* Individual ip address */
@@ -86,6 +87,7 @@ enum pnfs_try_status {
*/
#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
#define NFS4_DEF_DS_RETRANS 5
+#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
/* error codes for internal use */
#define NFS4ERR_RESET_TO_MDS 12001
@@ -524,8 +526,10 @@ static inline int pnfs_return_layout(struct inode *ino)
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_server *nfss = NFS_SERVER(ino);
- if (pnfs_enabled_sb(nfss) && nfsi->layout)
+ if (pnfs_enabled_sb(nfss) && nfsi->layout) {
+ set_bit(NFS_LAYOUT_RETURN_REQUESTED, &nfsi->layout->plh_flags);
return _pnfs_return_layout(ino);
+ }
return 0;
}
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index 2961fcd7a2df..e8a07b3f9aaa 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -43,7 +43,6 @@
#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
-#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
static DEFINE_SPINLOCK(nfs4_deviceid_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 4a379d7918f2..7428a669d7a7 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -23,6 +23,7 @@
#include <linux/export.h>
#include <linux/freezer.h>
#include <linux/wait.h>
+#include <linux/iversion.h>
#include <linux/uaccess.h>
@@ -753,11 +754,8 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
*/
spin_lock(&mapping->private_lock);
if (!nfs_have_writebacks(inode) &&
- NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) {
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
- }
+ NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
+ inode_inc_iversion_raw(inode);
if (likely(!PageSwapCache(req->wb_page))) {
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
@@ -1837,6 +1835,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
next:
nfs_unlock_and_release_request(req);
+ /* Latency breaker */
+ cond_resched();
}
nfss = NFS_SERVER(data->inode);
if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index f650e475d8f0..fdf2aad73470 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
gi->gid[i] = exp->ex_anon_gid;
else
gi->gid[i] = rqgi->gid[i];
-
- /* Each thread allocates its own gi, no race */
- groups_sort(gi);
}
+
+ /* Each thread allocates its own gi, no race */
+ groups_sort(gi);
} else {
gi = get_group_info(rqgi);
}
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 43f31cf49bae..b8444189223b 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -11,6 +11,7 @@
#include <linux/crc32.h>
#include <linux/sunrpc/svc.h>
#include <uapi/linux/nfsd/nfsfh.h>
+#include <linux/iversion.h>
static inline __u32 ino_t_to_u32(ino_t ino)
{
@@ -259,7 +260,7 @@ static inline u64 nfsd4_change_attribute(struct inode *inode)
chattr = inode->i_ctime.tv_sec;
chattr <<= 30;
chattr += inode->i_ctime.tv_nsec;
- chattr += inode->i_version;
+ chattr += inode_query_iversion(inode);
return chattr;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index d0d4bc4c4b70..ef08d64c84b8 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -239,10 +239,10 @@ out_close_fd:
}
/* intofiy userspace file descriptor functions */
-static unsigned int fanotify_poll(struct file *file, poll_table *wait)
+static __poll_t fanotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index d3c20e0bb046..5c29bf16814f 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -107,10 +107,10 @@ static inline u32 inotify_mask_to_arg(__u32 mask)
}
/* intofiy userspace file descriptor functions */
-static unsigned int inotify_poll(struct file *file, poll_table *wait)
+static __poll_t inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 7c6f76d29f56..36b0772701a0 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -103,14 +103,14 @@ slow:
goto got_it;
}
-void *ns_get_path(struct path *path, struct task_struct *task,
- const struct proc_ns_operations *ns_ops)
+void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+ void *private_data)
{
struct ns_common *ns;
void *ret;
again:
- ns = ns_ops->get(task);
+ ns = ns_get_cb(private_data);
if (!ns)
return ERR_PTR(-ENOENT);
@@ -120,6 +120,29 @@ again:
return ret;
}
+struct ns_get_path_task_args {
+ const struct proc_ns_operations *ns_ops;
+ struct task_struct *task;
+};
+
+static struct ns_common *ns_get_path_task(void *private_data)
+{
+ struct ns_get_path_task_args *args = private_data;
+
+ return args->ns_ops->get(args->task);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct ns_get_path_task_args args = {
+ .ns_ops = ns_ops,
+ .task = task,
+ };
+
+ return ns_get_path_cb(path, ns_get_path_task, &args);
+}
+
int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns))
{
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 7c410f879412..1c1ee489284b 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -560,13 +560,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
/* Setup the generic vfs inode parts now. */
-
- /*
- * This is for checking whether an inode has changed w.r.t. a file so
- * that the file can be updated if necessary (compare with f_version).
- */
- vi->i_version = 1;
-
vi->i_uid = vol->uid;
vi->i_gid = vol->gid;
vi->i_mode = 0;
@@ -1240,7 +1233,6 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
@@ -1507,7 +1499,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index ee8392aee9f6..2831f495a674 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2641,12 +2641,6 @@ mft_rec_already_initialized:
goto undo_mftbmp_alloc;
}
vi->i_ino = bit;
- /*
- * This is for checking whether an inode has changed w.r.t. a
- * file so that the file can be updated if necessary (compare
- * with f_version).
- */
- vi->i_version = 1;
/* The owner and group come from the ntfs volume. */
vi->i_uid = vol->uid;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 40b5cc97f7b0..917fadca8a7b 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -311,7 +311,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
if (had_lock < 0)
return ERR_PTR(had_lock);
+ down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, type, di_bh);
+ up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
brelse(di_bh);
@@ -330,7 +332,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return 0;
+ down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+ up_read(&OCFS2_I(inode)->ip_xattr_sem);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
@@ -361,8 +365,10 @@ int ocfs2_init_acl(handle_t *handle,
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+ down_read(&OCFS2_I(dir)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
dir_bh);
+ up_read(&OCFS2_I(dir)->ip_xattr_sem);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ab5105f9767e..9a876bb07cac 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -165,6 +165,13 @@ static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et);
static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et);
+
+static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ struct buffer_head **new_eb_bh,
+ int blk_wanted, int *blk_given);
+static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et);
+
static const struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
.eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
@@ -448,6 +455,7 @@ static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
if (!obj)
obj = (void *)bh->b_data;
et->et_object = obj;
+ et->et_dealloc = NULL;
et->et_ops->eo_fill_root_el(et);
if (!et->et_ops->eo_fill_max_leaf_clusters)
@@ -1158,7 +1166,7 @@ static int ocfs2_add_branch(handle_t *handle,
struct buffer_head **last_eb_bh,
struct ocfs2_alloc_context *meta_ac)
{
- int status, new_blocks, i;
+ int status, new_blocks, i, block_given = 0;
u64 next_blkno, new_last_eb_blk;
struct buffer_head *bh;
struct buffer_head **new_eb_bhs = NULL;
@@ -1213,11 +1221,31 @@ static int ocfs2_add_branch(handle_t *handle,
goto bail;
}
- status = ocfs2_create_new_meta_bhs(handle, et, new_blocks,
- meta_ac, new_eb_bhs);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
+ /* Firstyly, try to reuse dealloc since we have already estimated how
+ * many extent blocks we may use.
+ */
+ if (!ocfs2_is_dealloc_empty(et)) {
+ status = ocfs2_reuse_blk_from_dealloc(handle, et,
+ new_eb_bhs, new_blocks,
+ &block_given);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ BUG_ON(block_given > new_blocks);
+
+ if (block_given < new_blocks) {
+ BUG_ON(!meta_ac);
+ status = ocfs2_create_new_meta_bhs(handle, et,
+ new_blocks - block_given,
+ meta_ac,
+ &new_eb_bhs[block_given]);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
}
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
@@ -1340,15 +1368,25 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **ret_new_eb_bh)
{
- int status, i;
+ int status, i, block_given = 0;
u32 new_clusters;
struct buffer_head *new_eb_bh = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *root_el;
struct ocfs2_extent_list *eb_el;
- status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
- &new_eb_bh);
+ if (!ocfs2_is_dealloc_empty(et)) {
+ status = ocfs2_reuse_blk_from_dealloc(handle, et,
+ &new_eb_bh, 1,
+ &block_given);
+ } else if (meta_ac) {
+ status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
+ &new_eb_bh);
+
+ } else {
+ BUG();
+ }
+
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -1511,7 +1549,7 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
int depth = le16_to_cpu(el->l_tree_depth);
struct buffer_head *bh = NULL;
- BUG_ON(meta_ac == NULL);
+ BUG_ON(meta_ac == NULL && ocfs2_is_dealloc_empty(et));
shift = ocfs2_find_branch_target(et, &bh);
if (shift < 0) {
@@ -2598,11 +2636,8 @@ static void ocfs2_unlink_subtree(handle_t *handle,
int i;
struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el;
- struct ocfs2_extent_list *el;
struct ocfs2_extent_block *eb;
- el = path_leaf_el(left_path);
-
eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data;
for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
@@ -3938,7 +3973,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec)
{
- int ret, i, next_free;
+ int i, next_free;
struct buffer_head *bh;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
@@ -3955,7 +3990,6 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Owner %llu has a bad extent list\n",
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
- ret = -EIO;
return;
}
@@ -5057,7 +5091,6 @@ int ocfs2_split_extent(handle_t *handle,
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
struct ocfs2_merge_ctxt ctxt;
- struct ocfs2_extent_list *rightmost_el;
if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
@@ -5093,9 +5126,7 @@ int ocfs2_split_extent(handle_t *handle,
}
eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
- rightmost_el = &eb->h_list;
- } else
- rightmost_el = path_root_el(path);
+ }
if (rec->e_cpos == split_rec->e_cpos &&
rec->e_leaf_clusters == split_rec->e_leaf_clusters)
@@ -6585,6 +6616,154 @@ ocfs2_find_per_slot_free_list(int type,
return fl;
}
+static struct ocfs2_per_slot_free_list *
+ocfs2_find_preferred_free_list(int type,
+ int preferred_slot,
+ int *real_slot,
+ struct ocfs2_cached_dealloc_ctxt *ctxt)
+{
+ struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
+
+ while (fl) {
+ if (fl->f_inode_type == type && fl->f_slot == preferred_slot) {
+ *real_slot = fl->f_slot;
+ return fl;
+ }
+
+ fl = fl->f_next_suballocator;
+ }
+
+ /* If we can't find any free list matching preferred slot, just use
+ * the first one.
+ */
+ fl = ctxt->c_first_suballocator;
+ *real_slot = fl->f_slot;
+
+ return fl;
+}
+
+/* Return Value 1 indicates empty */
+static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et)
+{
+ struct ocfs2_per_slot_free_list *fl = NULL;
+
+ if (!et->et_dealloc)
+ return 1;
+
+ fl = et->et_dealloc->c_first_suballocator;
+ if (!fl)
+ return 1;
+
+ if (!fl->f_first)
+ return 1;
+
+ return 0;
+}
+
+/* If extent was deleted from tree due to extent rotation and merging, and
+ * no metadata is reserved ahead of time. Try to reuse some extents
+ * just deleted. This is only used to reuse extent blocks.
+ * It is supposed to find enough extent blocks in dealloc if our estimation
+ * on metadata is accurate.
+ */
+static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ struct buffer_head **new_eb_bh,
+ int blk_wanted, int *blk_given)
+{
+ int i, status = 0, real_slot;
+ struct ocfs2_cached_dealloc_ctxt *dealloc;
+ struct ocfs2_per_slot_free_list *fl;
+ struct ocfs2_cached_block_free *bf;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_super *osb =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
+
+ *blk_given = 0;
+
+ /* If extent tree doesn't have a dealloc, this is not faulty. Just
+ * tell upper caller dealloc can't provide any block and it should
+ * ask for alloc to claim more space.
+ */
+ dealloc = et->et_dealloc;
+ if (!dealloc)
+ goto bail;
+
+ for (i = 0; i < blk_wanted; i++) {
+ /* Prefer to use local slot */
+ fl = ocfs2_find_preferred_free_list(EXTENT_ALLOC_SYSTEM_INODE,
+ osb->slot_num, &real_slot,
+ dealloc);
+ /* If no more block can be reused, we should claim more
+ * from alloc. Just return here normally.
+ */
+ if (!fl) {
+ status = 0;
+ break;
+ }
+
+ bf = fl->f_first;
+ fl->f_first = bf->free_next;
+
+ new_eb_bh[i] = sb_getblk(osb->sb, bf->free_blk);
+ if (new_eb_bh[i] == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "Reusing block(%llu) from "
+ "dealloc(local slot:%d, real slot:%d)\n",
+ bf->free_blk, osb->slot_num, real_slot);
+
+ ocfs2_set_new_buffer_uptodate(et->et_ci, new_eb_bh[i]);
+
+ status = ocfs2_journal_access_eb(handle, et->et_ci,
+ new_eb_bh[i],
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ memset(new_eb_bh[i]->b_data, 0, osb->sb->s_blocksize);
+ eb = (struct ocfs2_extent_block *) new_eb_bh[i]->b_data;
+
+ /* We can't guarantee that buffer head is still cached, so
+ * polutlate the extent block again.
+ */
+ strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ eb->h_blkno = cpu_to_le64(bf->free_blk);
+ eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
+ eb->h_suballoc_slot = cpu_to_le16(real_slot);
+ eb->h_suballoc_loc = cpu_to_le64(bf->free_bg);
+ eb->h_suballoc_bit = cpu_to_le16(bf->free_bit);
+ eb->h_list.l_count =
+ cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
+
+ /* We'll also be dirtied by the caller, so
+ * this isn't absolutely necessary.
+ */
+ ocfs2_journal_dirty(handle, new_eb_bh[i]);
+
+ if (!fl->f_first) {
+ dealloc->c_first_suballocator = fl->f_next_suballocator;
+ kfree(fl);
+ }
+ kfree(bf);
+ }
+
+ *blk_given = i;
+
+bail:
+ if (unlikely(status < 0)) {
+ for (i = 0; i < blk_wanted; i++)
+ brelse(new_eb_bh[i]);
+ }
+
+ return status;
+}
+
int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
int type, int slot, u64 suballoc,
u64 blkno, unsigned int bit)
@@ -7382,6 +7561,7 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
struct buffer_head *gd_bh = NULL;
struct ocfs2_dinode *main_bm;
struct ocfs2_group_desc *gd = NULL;
+ struct ocfs2_trim_fs_info info, *pinfo = NULL;
start = range->start >> osb->s_clustersize_bits;
len = range->len >> osb->s_clustersize_bits;
@@ -7419,6 +7599,42 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
trace_ocfs2_trim_fs(start, len, minlen);
+ ocfs2_trim_fs_lock_res_init(osb);
+ ret = ocfs2_trim_fs_lock(osb, NULL, 1);
+ if (ret < 0) {
+ if (ret != -EAGAIN) {
+ mlog_errno(ret);
+ ocfs2_trim_fs_lock_res_uninit(osb);
+ goto out_unlock;
+ }
+
+ mlog(ML_NOTICE, "Wait for trim on device (%s) to "
+ "finish, which is running from another node.\n",
+ osb->dev_str);
+ ret = ocfs2_trim_fs_lock(osb, &info, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ ocfs2_trim_fs_lock_res_uninit(osb);
+ goto out_unlock;
+ }
+
+ if (info.tf_valid && info.tf_success &&
+ info.tf_start == start && info.tf_len == len &&
+ info.tf_minlen == minlen) {
+ /* Avoid sending duplicated trim to a shared device */
+ mlog(ML_NOTICE, "The same trim on device (%s) was "
+ "just done from node (%u), return.\n",
+ osb->dev_str, info.tf_nodenum);
+ range->len = info.tf_trimlen;
+ goto out_trimunlock;
+ }
+ }
+
+ info.tf_nodenum = osb->node_num;
+ info.tf_start = start;
+ info.tf_len = len;
+ info.tf_minlen = minlen;
+
/* Determine first and last group to examine based on start and len */
first_group = ocfs2_which_cluster_group(main_bm_inode, start);
if (first_group == osb->first_cluster_group_blkno)
@@ -7463,6 +7679,13 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
}
range->len = trimmed * sb->s_blocksize;
+
+ info.tf_trimlen = range->len;
+ info.tf_success = (ret ? 0 : 1);
+ pinfo = &info;
+out_trimunlock:
+ ocfs2_trim_fs_unlock(osb, pinfo);
+ ocfs2_trim_fs_lock_res_uninit(osb);
out_unlock:
ocfs2_inode_unlock(main_bm_inode, 0);
brelse(main_bm_bh);
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 27b75cf32cfa..250bcacdf9e9 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -61,6 +61,7 @@ struct ocfs2_extent_tree {
ocfs2_journal_access_func et_root_journal_access;
void *et_object;
unsigned int et_max_leaf_clusters;
+ struct ocfs2_cached_dealloc_ctxt *et_dealloc;
};
/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index d1516327b787..e8e205bf2e41 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -797,6 +797,7 @@ struct ocfs2_write_ctxt {
struct ocfs2_cached_dealloc_ctxt w_dealloc;
struct list_head w_unwritten_list;
+ unsigned int w_unwritten_count;
};
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
@@ -1386,6 +1387,7 @@ retry:
desc->c_clear_unwritten = 0;
list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
list_add_tail(&new->ue_node, &wc->w_unwritten_list);
+ wc->w_unwritten_count++;
new = NULL;
unlock:
spin_unlock(&oi->ip_lock);
@@ -2256,7 +2258,7 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
ue->ue_phys = desc->c_phys;
list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
- dwc->dw_zero_count++;
+ dwc->dw_zero_count += wc->w_unwritten_count;
}
ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
@@ -2330,6 +2332,12 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
+ /* Attach dealloc with extent tree in case that we may reuse extents
+ * which are already unlinked from current extent tree due to extent
+ * rotation and merging.
+ */
+ et.et_dealloc = &dealloc;
+
ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
&data_ac, &meta_ac);
if (ret) {
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 62e8ec619b4c..af2e7473956e 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -314,12 +314,13 @@ void o2quo_conn_err(u8 node)
node, qs->qs_connected);
clear_bit(node, qs->qs_conn_bm);
+
+ if (test_bit(node, qs->qs_hb_bm))
+ o2quo_set_hold(qs, node);
}
mlog(0, "node %u, %d total\n", node, qs->qs_connected);
- if (test_bit(node, qs->qs_hb_bm))
- o2quo_set_hold(qs, node);
spin_unlock(&qs->qs_lock);
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index bebe59feca58..eac5140aac47 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -918,7 +918,8 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
{
struct kvec vec = { .iov_len = len, .iov_base = data, };
struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
- return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, len);
+ return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
}
static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b95e7df5b76a..0276f7f8d5e6 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -196,7 +196,7 @@ struct o2net_msg_handler {
u32 nh_msg_type;
u32 nh_key;
o2net_msg_handler_func *nh_func;
- o2net_msg_handler_func *nh_func_data;
+ void *nh_func_data;
o2net_post_msg_handler_func
*nh_post_func;
struct kref nh_kref;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index febe6312ceff..b7520e20a770 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -42,6 +42,7 @@
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/sort.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -1174,7 +1175,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
de->inode = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, bh);
goto bail;
}
@@ -1729,7 +1730,7 @@ int __ocfs2_add_entry(handle_t *handle,
if (ocfs2_dir_indexed(dir))
ocfs2_recalc_free_list(dir, handle, lookup);
- dir->i_version++;
+ inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, insert_bh);
retval = 0;
goto bail;
@@ -1775,7 +1776,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (*f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, *f_version)) {
for (i = 0; i < i_size_read(inode) && i < offset; ) {
de = (struct ocfs2_dir_entry *)
(data->id_data + i);
@@ -1791,7 +1792,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
i += le16_to_cpu(de->rec_len);
}
ctx->pos = offset = i;
- *f_version = inode->i_version;
+ *f_version = inode_query_iversion(inode);
}
de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
@@ -1869,7 +1870,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (*f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, *f_version)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ocfs2_dir_entry *) (bh->b_data + i);
/* It's too expensive to do a full
@@ -1886,7 +1887,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- *f_version = inode->i_version;
+ *f_version = inode_query_iversion(inode);
}
while (ctx->pos < i_size_read(inode)
@@ -1940,7 +1941,7 @@ static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
*/
int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
{
- u64 version = inode->i_version;
+ u64 version = inode_query_iversion(inode);
ocfs2_dir_foreach_blk(inode, &version, ctx, true);
return 0;
}
@@ -1957,7 +1958,7 @@ int ocfs2_readdir(struct file *file, struct dir_context *ctx)
trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
- error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
+ error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level, 1);
if (lock_level && error >= 0) {
/* We release EX lock which used to update atime
* and get PR lock again to reduce contention
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9c3e0f13ca87..a7df226f9449 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1122,13 +1122,6 @@ recheck:
/* sleep if we haven't finished voting yet */
if (sleep) {
unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
-
- /*
- if (kref_read(&mle->mle_refs) < 2)
- mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
- kref_read(&mle->mle_refs),
- res->lockname.len, res->lockname.name);
- */
atomic_set(&mle->woken, 0);
(void)wait_event_timeout(mle->wq,
(atomic_read(&mle->woken) == 1),
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 9c7c18c0e129..385fcefa8bc5 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -220,9 +220,9 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
-static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
+static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
{
- int event = 0;
+ __poll_t event = 0;
struct inode *inode = file_inode(file);
struct dlmfs_inode_private *ip = DLMFS_I(inode);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 4689940a953c..9479f99c2145 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -259,6 +259,10 @@ static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
.flags = 0,
};
+static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
+ .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
+};
+
static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
@@ -676,6 +680,24 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
&ocfs2_nfs_sync_lops, osb);
}
+void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
+ ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
+ &ocfs2_trim_fs_lops, osb);
+}
+
+void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ ocfs2_simple_drop_lockres(osb, lockres);
+ ocfs2_lock_res_free(lockres);
+}
+
static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
@@ -1742,6 +1764,27 @@ int ocfs2_rw_lock(struct inode *inode, int write)
return status;
}
+int ocfs2_try_rw_lock(struct inode *inode, int write)
+{
+ int status, level;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu try to take %s RW lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ lockres = &OCFS2_I(inode)->ip_rw_lockres;
+
+ level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
+ return status;
+}
+
void ocfs2_rw_unlock(struct inode *inode, int write)
{
int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
@@ -2486,6 +2529,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
unlock_page(page);
+ /*
+ * If we can't get inode lock immediately, we should not return
+ * directly here, since this will lead to a softlockup problem.
+ * The method is to get a blocking lock and immediately unlock
+ * before returning, this can avoid CPU resource waste due to
+ * lots of retries, and benefits fairness in getting lock.
+ */
+ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
+ ocfs2_inode_unlock(inode, ex);
ret = AOP_TRUNCATED_PAGE;
}
@@ -2494,13 +2546,18 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
int ocfs2_inode_lock_atime(struct inode *inode,
struct vfsmount *vfsmnt,
- int *level)
+ int *level, int wait)
{
int ret;
- ret = ocfs2_inode_lock(inode, NULL, 0);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, NULL, 0);
+ else
+ ret = ocfs2_try_inode_lock(inode, NULL, 0);
+
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
return ret;
}
@@ -2512,9 +2569,14 @@ int ocfs2_inode_lock_atime(struct inode *inode,
struct buffer_head *bh = NULL;
ocfs2_inode_unlock(inode, 0);
- ret = ocfs2_inode_lock(inode, &bh, 1);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, &bh, 1);
+ else
+ ret = ocfs2_try_inode_lock(inode, &bh, 1);
+
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
return ret;
}
*level = 1;
@@ -2745,6 +2807,70 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
ex ? LKM_EXMODE : LKM_PRMODE);
}
+int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info, int trylock)
+{
+ int status;
+ struct ocfs2_trim_fs_lvb *lvb;
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ if (info)
+ info->tf_valid = 0;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
+ trylock ? DLM_LKF_NOQUEUE : 0, 0);
+ if (status < 0) {
+ if (status != -EAGAIN)
+ mlog_errno(status);
+ return status;
+ }
+
+ if (info) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
+ info->tf_valid = 1;
+ info->tf_success = lvb->lvb_success;
+ info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
+ info->tf_start = be64_to_cpu(lvb->lvb_start);
+ info->tf_len = be64_to_cpu(lvb->lvb_len);
+ info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
+ info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
+ }
+ }
+
+ return status;
+}
+
+void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info)
+{
+ struct ocfs2_trim_fs_lvb *lvb;
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ if (ocfs2_mount_local(osb))
+ return;
+
+ if (info) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
+ lvb->lvb_success = info->tf_success;
+ lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
+ lvb->lvb_start = cpu_to_be64(info->tf_start);
+ lvb->lvb_len = cpu_to_be64(info->tf_len);
+ lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
+ lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
+ }
+
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+}
+
int ocfs2_dentry_lock(struct dentry *dentry, int ex)
{
int ret;
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index a7fc18ba0dc1..256e0a9067b8 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -70,6 +70,29 @@ struct ocfs2_orphan_scan_lvb {
__be32 lvb_os_seqno;
};
+#define OCFS2_TRIMFS_LVB_VERSION 1
+
+struct ocfs2_trim_fs_lvb {
+ __u8 lvb_version;
+ __u8 lvb_success;
+ __u8 lvb_reserved[2];
+ __be32 lvb_nodenum;
+ __be64 lvb_start;
+ __be64 lvb_len;
+ __be64 lvb_minlen;
+ __be64 lvb_trimlen;
+};
+
+struct ocfs2_trim_fs_info {
+ u8 tf_valid; /* lvb is valid, or not */
+ u8 tf_success; /* trim is successful, or not */
+ u32 tf_nodenum; /* osb node number */
+ u64 tf_start; /* trim start offset in clusters */
+ u64 tf_len; /* trim end offset in clusters */
+ u64 tf_minlen; /* trim minimum contiguous free clusters */
+ u64 tf_trimlen; /* trimmed length in bytes */
+};
+
struct ocfs2_lock_holder {
struct list_head oh_list;
struct pid *oh_owner_pid;
@@ -116,13 +139,14 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
int ocfs2_create_new_inode_locks(struct inode *inode);
int ocfs2_drop_inode_locks(struct inode *inode);
int ocfs2_rw_lock(struct inode *inode, int write);
+int ocfs2_try_rw_lock(struct inode *inode, int write);
void ocfs2_rw_unlock(struct inode *inode, int write);
int ocfs2_open_lock(struct inode *inode);
int ocfs2_try_open_lock(struct inode *inode, int write);
void ocfs2_open_unlock(struct inode *inode);
int ocfs2_inode_lock_atime(struct inode *inode,
struct vfsmount *vfsmnt,
- int *level);
+ int *level, int wait);
int ocfs2_inode_lock_full_nested(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
@@ -140,6 +164,9 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
/* 99% of the time we don't want to supply any additional flags --
* those are for very specific cases only. */
#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL)
+#define ocfs2_try_inode_lock(i, b, e)\
+ ocfs2_inode_lock_full_nested(i, b, e, OCFS2_META_LOCK_NOQUEUE,\
+ OI_LS_NORMAL)
void ocfs2_inode_unlock(struct inode *inode,
int ex);
int ocfs2_super_lock(struct ocfs2_super *osb,
@@ -153,6 +180,12 @@ int ocfs2_rename_lock(struct ocfs2_super *osb);
void ocfs2_rename_unlock(struct ocfs2_super *osb);
int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex);
void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex);
+void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb);
+void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb);
+int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info, int trylock);
+void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info);
int ocfs2_dentry_lock(struct dentry *dentry, int ex);
void ocfs2_dentry_unlock(struct dentry *dentry, int ex);
int ocfs2_file_lock(struct file *file, int ex, int trylock);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e4719e0a3f99..06cb96462bf9 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -38,6 +38,7 @@
#include "inode.h"
#include "super.h"
#include "symlink.h"
+#include "aops.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
@@ -832,6 +833,50 @@ out:
return ret;
}
+/* Is IO overwriting allocated blocks? */
+int ocfs2_overwrite_io(struct inode *inode, struct buffer_head *di_bh,
+ u64 map_start, u64 map_len)
+{
+ int ret = 0, is_last;
+ u32 mapping_end, cpos;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_extent_rec rec;
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (ocfs2_size_fits_inline_data(di_bh, map_start + map_len))
+ return ret;
+ else
+ return -EAGAIN;
+ }
+
+ cpos = map_start >> osb->s_clustersize_bits;
+ mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
+ map_start + map_len);
+ is_last = 0;
+ while (cpos < mapping_end && !is_last) {
+ ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
+ NULL, &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (rec.e_blkno == 0ULL)
+ break;
+
+ if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
+ break;
+
+ cpos = le32_to_cpu(rec.e_cpos) +
+ le16_to_cpu(rec.e_leaf_clusters);
+ }
+
+ if (cpos < mapping_end)
+ ret = -EAGAIN;
+out:
+ return ret;
+}
+
int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
{
struct inode *inode = file->f_mapping->host;
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index 67ea57d2fd59..1057586ec19f 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,9 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len);
+int ocfs2_overwrite_io(struct inode *inode, struct buffer_head *di_bh,
+ u64 map_start, u64 map_len);
+
int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index a1d051055472..5d1784a365a3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -140,6 +140,8 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
spin_unlock(&oi->ip_lock);
}
+ file->f_mode |= FMODE_NOWAIT;
+
leave:
return status;
}
@@ -2132,12 +2134,12 @@ out:
}
static int ocfs2_prepare_inode_for_write(struct file *file,
- loff_t pos,
- size_t count)
+ loff_t pos, size_t count, int wait)
{
- int ret = 0, meta_level = 0;
+ int ret = 0, meta_level = 0, overwrite_io = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
+ struct buffer_head *di_bh = NULL;
loff_t end;
/*
@@ -2145,13 +2147,40 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* if we need to make modifications here.
*/
for(;;) {
- ret = ocfs2_inode_lock(inode, NULL, meta_level);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, NULL, meta_level);
+ else
+ ret = ocfs2_try_inode_lock(inode,
+ overwrite_io ? NULL : &di_bh, meta_level);
if (ret < 0) {
meta_level = -1;
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out;
}
+ /*
+ * Check if IO will overwrite allocated blocks in case
+ * IOCB_NOWAIT flag is set.
+ */
+ if (!wait && !overwrite_io) {
+ overwrite_io = 1;
+ if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
+ brelse(di_bh);
+ di_bh = NULL;
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+ }
+
/* Clear suid / sgid if necessary. We do this here
* instead of later in the write path because
* remove_suid() calls ->setattr without any hint that
@@ -2199,7 +2228,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
out_unlock:
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
- pos, count);
+ pos, count, wait);
+
+ brelse(di_bh);
if (meta_level >= 0)
ocfs2_inode_unlock(inode, meta_level);
@@ -2211,7 +2242,7 @@ out:
static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
- int direct_io, rw_level;
+ int rw_level;
ssize_t written = 0;
ssize_t ret;
size_t count = iov_iter_count(from);
@@ -2223,6 +2254,8 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
void *saved_ki_complete = NULL;
int append_write = ((iocb->ki_pos + count) >=
i_size_read(inode) ? 1 : 0);
+ int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
+ int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2230,12 +2263,17 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
file->f_path.dentry->d_name.name,
(unsigned int)from->nr_segs); /* GRRRRR */
+ if (!direct_io && nowait)
+ return -EOPNOTSUPP;
+
if (count == 0)
return 0;
- direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
-
- inode_lock(inode);
+ if (nowait) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else
+ inode_lock(inode);
/*
* Concurrent O_DIRECT writes are allowed with
@@ -2244,9 +2282,13 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
*/
rw_level = (!direct_io || full_coherency || append_write);
- ret = ocfs2_rw_lock(inode, rw_level);
+ if (nowait)
+ ret = ocfs2_try_rw_lock(inode, rw_level);
+ else
+ ret = ocfs2_rw_lock(inode, rw_level);
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out_mutex;
}
@@ -2260,9 +2302,13 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
* other nodes to drop their caches. Buffered I/O
* already does this in write_begin().
*/
- ret = ocfs2_inode_lock(inode, NULL, 1);
+ if (nowait)
+ ret = ocfs2_try_inode_lock(inode, NULL, 1);
+ else
+ ret = ocfs2_inode_lock(inode, NULL, 1);
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out;
}
@@ -2277,9 +2323,10 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
}
count = ret;
- ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count);
+ ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto out;
}
@@ -2355,6 +2402,8 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
int ret = 0, rw_level = -1, lock_level = 0;
struct file *filp = iocb->ki_filp;
struct inode *inode = file_inode(filp);
+ int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
+ int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2369,14 +2418,22 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
goto bail;
}
+ if (!direct_io && nowait)
+ return -EOPNOTSUPP;
+
/*
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
- if (iocb->ki_flags & IOCB_DIRECT) {
- ret = ocfs2_rw_lock(inode, 0);
+ if (direct_io) {
+ if (nowait)
+ ret = ocfs2_try_rw_lock(inode, 0);
+ else
+ ret = ocfs2_rw_lock(inode, 0);
+
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto bail;
}
rw_level = 0;
@@ -2393,9 +2450,11 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
* like i_size. This allows the checks down below
* generic_file_aio_read() a chance of actually working.
*/
- ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
+ ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
+ !nowait);
if (ret < 0) {
- mlog_errno(ret);
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
goto bail;
}
ocfs2_inode_unlock(inode, lock_level);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 1a1e0078ab38..d51b80edd972 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include <asm/byteorder.h>
@@ -302,7 +303,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 36304434eacf..e5dcea6cee5f 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle,
/* we can safely remove this assertion after testing. */
if (!buffer_uptodate(bh)) {
mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
- mlog(ML_ERROR, "b_blocknr=%llu\n",
- (unsigned long long)bh->b_blocknr);
+ mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
+ (unsigned long long)bh->b_blocknr, bh->b_state);
lock_buffer(bh);
/*
- * A previous attempt to write this buffer head failed.
- * Nothing we can do but to retry the write and hope for
- * the best.
+ * A previous transaction with a couple of buffer heads fail
+ * to checkpoint, so all the bhs are marked as BH_Write_EIO.
+ * For current transaction, the bh is just among those error
+ * bhs which previous transaction handle. We can't just clear
+ * its BH_Write_EIO and reuse directly, since other bhs are
+ * not written to disk yet and that will cause metadata
+ * inconsistency. So we should set fs read-only to avoid
+ * further damage.
*/
if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
- clear_buffer_write_io_error(bh);
- set_buffer_uptodate(bh);
- }
-
- if (!buffer_uptodate(bh)) {
unlock_buffer(bh);
- return -EIO;
+ return ocfs2_error(osb->sb, "A previous attempt to "
+ "write this buffer head failed\n");
}
unlock_buffer(bh);
}
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 098f5c712569..fb9a20e3d608 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -184,7 +184,7 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
int ret = 0, lock_level = 0;
ret = ocfs2_inode_lock_atime(file_inode(file),
- file->f_path.mnt, &lock_level);
+ file->f_path.mnt, &lock_level, 1);
if (ret < 0) {
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3b0a10d9b36f..c801eddc4bf3 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -1520,7 +1521,7 @@ static int ocfs2_rename(struct inode *old_dir,
mlog_errno(status);
goto bail;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
if (S_ISDIR(new_inode->i_mode))
ocfs2_set_links_count(newfe, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9a50f222ac97..6867eef2e06b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -404,6 +404,7 @@ struct ocfs2_super
struct ocfs2_lock_res osb_super_lockres;
struct ocfs2_lock_res osb_rename_lockres;
struct ocfs2_lock_res osb_nfs_sync_lockres;
+ struct ocfs2_lock_res osb_trim_fs_lockres;
struct ocfs2_dlm_debug *osb_dlm_debug;
struct dentry *osb_debug_root;
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index d277aabf5dfb..7051b994c776 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -50,6 +50,7 @@ enum ocfs2_lock_type {
OCFS2_LOCK_TYPE_NFS_SYNC,
OCFS2_LOCK_TYPE_ORPHAN_SCAN,
OCFS2_LOCK_TYPE_REFCOUNT,
+ OCFS2_LOCK_TYPE_TRIM_FS,
OCFS2_NUM_LOCK_TYPES
};
@@ -93,6 +94,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type)
case OCFS2_LOCK_TYPE_REFCOUNT:
c = 'T';
break;
+ case OCFS2_LOCK_TYPE_TRIM_FS:
+ c = 'I';
+ break;
default:
c = '\0';
}
@@ -115,6 +119,7 @@ static char *ocfs2_lock_type_strings[] = {
[OCFS2_LOCK_TYPE_NFS_SYNC] = "NFSSync",
[OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan",
[OCFS2_LOCK_TYPE_REFCOUNT] = "Refcount",
+ [OCFS2_LOCK_TYPE_TRIM_FS] = "TrimFs",
};
static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index a0b5d00ef0a9..e2a11aaece10 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -1449,20 +1449,22 @@ DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range);
TRACE_EVENT(ocfs2_prepare_inode_for_write,
TP_PROTO(unsigned long long ino, unsigned long long saved_pos,
- unsigned long count),
- TP_ARGS(ino, saved_pos, count),
+ unsigned long count, int wait),
+ TP_ARGS(ino, saved_pos, count, wait),
TP_STRUCT__entry(
__field(unsigned long long, ino)
__field(unsigned long long, saved_pos)
__field(unsigned long, count)
+ __field(int, wait)
),
TP_fast_assign(
__entry->ino = ino;
__entry->saved_pos = saved_pos;
__entry->count = count;
+ __entry->wait = wait;
),
- TP_printk("%llu %llu %lu", __entry->ino,
- __entry->saved_pos, __entry->count)
+ TP_printk("%llu %llu %lu %d", __entry->ino,
+ __entry->saved_pos, __entry->count, __entry->wait)
);
DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index b39d14cbfa34..7a922190a8c7 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -12,6 +12,7 @@
#include <linux/writeback.h>
#include <linux/workqueue.h>
#include <linux/llist.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -289,7 +290,7 @@ out:
mlog_errno(err);
return err;
}
- gqinode->i_version++;
+ inode_inc_iversion(gqinode);
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
return len;
}
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 9f0b95abc09f..d8f5f6ce99dc 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -2438,6 +2438,8 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
}
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
+ if (undo_fn)
+ jbd_unlock_bh_state(group_bh);
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
@@ -2563,16 +2565,16 @@ static int _ocfs2_free_clusters(handle_t *handle,
int status;
u16 bg_start_bit;
u64 bg_blkno;
- struct ocfs2_dinode *fe;
/* You can't ever have a contiguous set of clusters
* bigger than a block group bitmap so we never have to worry
* about looping on them.
* This is expensive. We can safely remove once this stuff has
* gotten tested really well. */
- BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
+ BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb,
+ ocfs2_blocks_to_clusters(bitmap_inode->i_sb,
+ start_blk)));
- fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
&bg_start_bit);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 80efa5699fb0..ffa4952d432b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -474,9 +474,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
if (!new) {
ocfs2_release_system_inodes(osb);
- status = -EINVAL;
+ status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
mlog_errno(status);
- /* FIXME: Should ERROR_RO_FS */
mlog(ML_ERROR, "Unable to load system inode %d, "
"possibly corrupt fs?", i);
goto bail;
@@ -505,7 +504,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
if (!new) {
ocfs2_release_system_inodes(osb);
- status = -EINVAL;
+ status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
status, i, osb->slot_num);
goto bail;
@@ -1208,14 +1207,15 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
read_super_error:
brelse(bh);
+ if (status)
+ mlog_errno(status);
+
if (osb) {
atomic_set(&osb->vol_state, VOLUME_DISABLED);
wake_up(&osb->osb_mount_event);
ocfs2_dismount_volume(sb, 1);
}
- if (status)
- mlog_errno(status);
return status;
}
@@ -1843,6 +1843,9 @@ static int ocfs2_mount_volume(struct super_block *sb)
status = ocfs2_dlm_init(osb);
if (status < 0) {
mlog_errno(status);
+ if (status == -EBADR && ocfs2_userspace_stack(osb))
+ mlog(ML_ERROR, "couldn't mount because cluster name on"
+ " disk does not match the running cluster name.\n");
goto leave;
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index c5898c59d411..c261c1dfd374 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -638,14 +638,17 @@ int ocfs2_calc_xattr_init(struct inode *dir,
si->value_len);
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+ down_read(&OCFS2_I(dir)->ip_xattr_sem);
acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
"", NULL, 0);
+ up_read(&OCFS2_I(dir)->ip_xattr_sem);
if (acl_len > 0) {
a_size = ocfs2_xattr_entry_real_size(0, acl_len);
if (S_ISDIR(mode))
a_size <<= 1;
} else if (acl_len != 0 && acl_len != -ENODATA) {
+ ret = acl_len;
mlog_errno(ret);
return ret;
}
@@ -6415,7 +6418,7 @@ static int ocfs2_reflink_xattr_header(handle_t *handle,
* and then insert the extents one by one.
*/
if (xv->xr_list.l_tree_depth) {
- memcpy(new_xv, &def_xv, sizeof(def_xv));
+ memcpy(new_xv, &def_xv, OCFS2_XATTR_ROOT_SIZE);
vb->vb_xv = new_xv;
vb->vb_bh = value_bh;
ocfs2_init_xattr_value_extent_tree(&data_et,
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index ded456f17de6..f073cd9e6687 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
struct orangefs_kernel_op_s *op, *temp;
__s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
- struct orangefs_kernel_op_s *cur_op = NULL;
+ struct orangefs_kernel_op_s *cur_op;
unsigned long ret;
/* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
return -EAGAIN;
restart:
+ cur_op = NULL;
/* Get next op (if any) from top of list. */
spin_lock(&orangefs_request_list_lock);
list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
@@ -814,15 +815,15 @@ void orangefs_dev_cleanup(void)
ORANGEFS_REQDEVICE_NAME);
}
-static unsigned int orangefs_devreq_poll(struct file *file,
+static __poll_t orangefs_devreq_poll(struct file *file,
struct poll_table_struct *poll_table)
{
- int poll_revent_mask = 0;
+ __poll_t poll_revent_mask = 0;
poll_wait(file, &orangefs_request_list_waitq, poll_table);
if (!list_empty(&orangefs_request_list))
- poll_revent_mask |= POLL_IN;
+ poll_revent_mask |= POLLIN;
return poll_revent_mask;
}
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 1668fd645c45..0d228cd087e6 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
- loff_t pos = *(&iocb->ki_pos);
+ loff_t pos = iocb->ki_pos;
ssize_t rc = 0;
BUG_ON(iocb->private);
@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
}
}
- if (file->f_pos > i_size_read(file->f_mapping->host))
- orangefs_i_size_write(file->f_mapping->host, file->f_pos);
-
rc = generic_write_checks(iocb, iter);
if (rc <= 0) {
@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
* pos to the end of the file, so we will wait till now to set
* pos...
*/
- pos = *(&iocb->ki_pos);
+ pos = iocb->ki_pos;
rc = do_readv_writev(ORANGEFS_IO_WRITE,
file,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 97adf7d100b5..2595453fe737 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -533,17 +533,6 @@ do { \
sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
} while (0)
-static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
-{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
- inode_lock(inode);
-#endif
- i_size_write(inode, i_size);
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
- inode_unlock(inode);
-#endif
-}
-
static inline void orangefs_set_timeout(struct dentry *dentry)
{
unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index 835c6e148afc..0577d6dba8c8 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
*/
void purge_waiting_ops(void)
{
- struct orangefs_kernel_op_s *op;
+ struct orangefs_kernel_op_s *op, *tmp;
spin_lock(&orangefs_request_list_lock);
- list_for_each_entry(op, &orangefs_request_list, list) {
+ list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
gossip_debug(GOSSIP_WAIT_DEBUG,
"pvfs2-client-core: purging op tag %llu %s\n",
llu(op->tag),
diff --git a/fs/pipe.c b/fs/pipe.c
index 6d98566201ef..a449ca0ec0c6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -515,10 +515,10 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
/* No kernel lock held - fine */
-static unsigned int
+static __poll_t
pipe_poll(struct file *filp, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index eebf5f6cf6d5..2fd0fde16fe1 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -43,7 +43,7 @@ struct posix_acl *get_cached_acl(struct inode *inode, int type)
rcu_read_lock();
acl = rcu_dereference(*p);
if (!acl || is_uncached_acl(acl) ||
- atomic_inc_not_zero(&acl->a_refcount))
+ refcount_inc_not_zero(&acl->a_refcount))
break;
rcu_read_unlock();
cpu_relax();
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(get_acl);
void
posix_acl_init(struct posix_acl *acl, int count)
{
- atomic_set(&acl->a_refcount, 1);
+ refcount_set(&acl->a_refcount, 1);
acl->a_count = count;
}
EXPORT_SYMBOL(posix_acl_init);
@@ -197,7 +197,7 @@ posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
- atomic_set(&clone->a_refcount, 1);
+ refcount_set(&clone->a_refcount, 1);
}
return clone;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 79375fc115d2..d67a72dcb92c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -430,8 +430,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* safe because the task has stopped executing permanently.
*/
if (permitted && (task->flags & PF_DUMPCORE)) {
- eip = KSTK_EIP(task);
- esp = KSTK_ESP(task);
+ if (try_get_task_stack(task)) {
+ eip = KSTK_EIP(task);
+ esp = KSTK_ESP(task);
+ put_task_stack(task);
+ }
}
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dd0f82622427..8dacaabb9f37 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -234,11 +234,11 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
return rv;
}
-static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
- unsigned int rv = DEFAULT_POLLMASK;
- unsigned int (*poll)(struct file *, struct poll_table_struct *);
+ __poll_t rv = DEFAULT_POLLMASK;
+ __poll_t (*poll)(struct file *, struct poll_table_struct *);
if (use_pde(pde)) {
poll = pde->proc_fops->poll;
if (poll)
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index e0f8774acd65..f0bfb45c3f9f 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -40,7 +40,7 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
}
-static unsigned int kmsg_poll(struct file *file, poll_table *wait)
+static __poll_t kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index c5cbbdff3c3d..63325377621a 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -630,12 +630,12 @@ static int proc_sys_open(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
+static __poll_t proc_sys_poll(struct file *filp, poll_table *wait)
{
struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
- unsigned int ret = DEFAULT_POLLMASK;
+ __poll_t ret = DEFAULT_POLLMASK;
unsigned long event;
/* sysctl was unregistered */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 339e4c1c044d..ec6d2983a5cb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -47,8 +47,11 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
- text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
- lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
+ /* split executable areas between text and lib */
+ text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
+ text = min(text, mm->exec_vm << PAGE_SHIFT);
+ lib = (mm->exec_vm << PAGE_SHIFT) - text;
+
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
@@ -76,7 +79,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
file << (PAGE_SHIFT-10),
shmem << (PAGE_SHIFT-10),
mm->data_vm << (PAGE_SHIFT-10),
- mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+ mm->stack_vm << (PAGE_SHIFT-10),
+ text >> 10,
+ lib >> 10,
mm_pgtables_bytes(mm) >> 10,
swap << (PAGE_SHIFT-10));
hugetlb_report_usage(m, mm);
@@ -977,14 +982,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = *pmdp;
+ pmd_t old, pmd = *pmdp;
if (pmd_present(pmd)) {
/* See comment in change_huge_pmd() */
- pmdp_invalidate(vma, addr, pmdp);
- if (pmd_dirty(*pmdp))
+ old = pmdp_invalidate(vma, addr, pmdp);
+ if (pmd_dirty(old))
pmd = pmd_mkdirty(pmd);
- if (pmd_young(*pmdp))
+ if (pmd_young(old))
pmd = pmd_mkyoung(pmd);
pmd = pmd_wrprotect(pmd);
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index b786840facd9..c8528d587e09 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -18,12 +18,12 @@
#include "pnode.h"
#include "internal.h"
-static unsigned mounts_poll(struct file *file, poll_table *wait)
+static __poll_t mounts_poll(struct file *file, poll_table *wait)
{
struct seq_file *m = file->private_data;
struct proc_mounts *p = m->private;
struct mnt_namespace *ns = p->ns;
- unsigned res = POLLIN | POLLRDNORM;
+ __poll_t res = POLLIN | POLLRDNORM;
int event;
poll_wait(file, &p->ns->poll, wait);
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
index 7cd46666ba2c..86e71c0caf48 100644
--- a/fs/reiserfs/Kconfig
+++ b/fs/reiserfs/Kconfig
@@ -57,8 +57,7 @@ config REISERFS_FS_XATTR
depends on REISERFS_FS
help
Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
+ the kernel or by users (see the attr(5) manual page for details).
If unsure, say N.
@@ -70,9 +69,6 @@ config REISERFS_FS_POSIX_ACL
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N
config REISERFS_FS_SECURITY
diff --git a/fs/select.c b/fs/select.c
index 6de493bb42a4..ec14171dd78a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -212,7 +212,7 @@ static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key
struct poll_table_entry *entry;
entry = container_of(wait, struct poll_table_entry, wait);
- if (key && !((unsigned long)key & entry->key))
+ if (key && !(key_to_poll(key) & entry->key))
return 0;
return __pollwake(wait, mode, sync, key);
}
@@ -438,7 +438,7 @@ get_max:
static inline void wait_key_set(poll_table *wait, unsigned long in,
unsigned long out, unsigned long bit,
- unsigned int ll_flag)
+ __poll_t ll_flag)
{
wait->_key = POLLEX_SET | ll_flag;
if (in & bit)
@@ -454,7 +454,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
poll_table *wait;
int retval, i, timed_out = 0;
u64 slack = 0;
- unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_start = 0;
rcu_read_lock();
@@ -484,8 +484,9 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
- unsigned long in, out, ex, all_bits, bit = 1, mask, j;
+ unsigned long in, out, ex, all_bits, bit = 1, j;
unsigned long res_in = 0, res_out = 0, res_ex = 0;
+ __poll_t mask;
in = *inp++; out = *outp++; ex = *exp++;
all_bits = in | out | ex;
@@ -802,11 +803,11 @@ struct poll_list {
* pwait poll_table will be used by the fd-provided poll handler for waiting,
* if pwait->_qproc is non-NULL.
*/
-static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
+static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
bool *can_busy_poll,
- unsigned int busy_flag)
+ __poll_t busy_flag)
{
- unsigned int mask;
+ __poll_t mask;
int fd;
mask = 0;
@@ -815,20 +816,24 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
struct fd f = fdget(fd);
mask = POLLNVAL;
if (f.file) {
+ /* userland u16 ->events contains POLL... bitmap */
+ __poll_t filter = demangle_poll(pollfd->events) |
+ POLLERR | POLLHUP;
mask = DEFAULT_POLLMASK;
if (f.file->f_op->poll) {
- pwait->_key = pollfd->events|POLLERR|POLLHUP;
+ pwait->_key = filter;
pwait->_key |= busy_flag;
mask = f.file->f_op->poll(f.file, pwait);
if (mask & busy_flag)
*can_busy_poll = true;
}
/* Mask out unneeded events. */
- mask &= pollfd->events | POLLERR | POLLHUP;
+ mask &= filter;
fdput(f);
}
}
- pollfd->revents = mask;
+ /* ... and so does ->revents */
+ pollfd->revents = mangle_poll(mask);
return mask;
}
@@ -840,7 +845,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
ktime_t expire, *to = NULL;
int timed_out = 0, count = 0;
u64 slack = 0;
- unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_start = 0;
/* Optimise the no-wait case */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 5f1ff8756595..31e923bec99a 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -58,10 +58,10 @@ static int signalfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int signalfd_poll(struct file *file, poll_table *wait)
+static __poll_t signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(file, &current->sighand->signalfd_wqh, wait);
diff --git a/fs/super.c b/fs/super.c
index 7ff1349609e4..672538ca9831 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -225,7 +225,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
if (s->s_user_ns != &init_user_ns)
s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
- INIT_HLIST_BL_HEAD(&s->s_anon);
+ INIT_HLIST_BL_HEAD(&s->s_roots);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
@@ -517,7 +517,11 @@ retry:
hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
- register_shrinker(&s->s_shrink);
+ err = register_shrinker(&s->s_shrink);
+ if (err) {
+ deactivate_locked_super(s);
+ s = ERR_PTR(err);
+ }
return s;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 2b67bda2021b..58eba92a0e41 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/dir.c - sysfs core and dir operation implementation
*
@@ -5,12 +6,10 @@
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
- * This file is released under the GPLv2.
- *
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
-#undef DEBUG
+#define pr_fmt(fmt) "sysfs: " fmt
#include <linux/fs.h>
#include <linux/kobject.h>
@@ -27,8 +26,8 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
if (buf)
kernfs_path(parent, buf, PATH_MAX);
- WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s/%s'\n",
- buf, name);
+ pr_warn("cannot create duplicate filename '%s/%s'\n", buf, name);
+ dump_stack();
kfree(buf);
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 39c75a86c67f..39da8e86f10a 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/file.c - sysfs regular (text) file implementation
*
@@ -5,8 +6,6 @@
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
- * This file is released under the GPLv2.
- *
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index ac2de0ed69ad..4802ec0e1e3a 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/group.c - Operations for adding/removing multiple files at once.
*
@@ -5,9 +6,6 @@
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2013 Greg Kroah-Hartman
* Copyright (c) 2013 The Linux Foundation
- *
- * This file is released undert the GPL v2.
- *
*/
#include <linux/kobject.h>
@@ -406,6 +404,6 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
kernfs_put(entry);
kernfs_put(target);
- return IS_ERR(link) ? PTR_ERR(link) : 0;
+ return PTR_ERR_OR_ZERO(link);
}
EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index fb49510c5dcf..b428d317ae92 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/symlink.c - operations for initializing and mounting sysfs
*
@@ -5,13 +6,9 @@
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
- * This file is released under the GPLv2.
- *
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
-#define DEBUG
-
#include <linux/fs.h>
#include <linux/magic.h>
#include <linux/mount.h>
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index aecb15f84557..8664db25a9a6 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/sysfs/symlink.c - sysfs symlink implementation
*
@@ -5,8 +6,6 @@
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
- * This file is released under the GPLv2.
- *
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 0e2f1cccb812..d098e015fcc9 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/sysfs/sysfs.h - sysfs internal header file
*
* Copyright (c) 2001-3 Patrick Mochel
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
- *
- * This file is released under the GPLv2.
*/
#ifndef __SYSFS_INTERNAL_H
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 040612ec9598..0510717f3a53 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -227,10 +227,10 @@ static int timerfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int timerfd_poll(struct file *file, poll_table *wait)
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
unsigned long flags;
poll_wait(file, &ctx->wqh, wait);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 417fe0b29f23..a2ea4856e67b 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -220,20 +220,9 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
-
- /*
- * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
- * created while the directory was encrypted and we
- * have access to the key.
- */
- if (fscrypt_has_encryption_key(dir))
- fscrypt_set_encrypted_dentry(dentry);
- fscrypt_set_d_op(dentry);
- if (err && err != -ENOKEY)
- return ERR_PTR(err);
- }
+ err = fscrypt_prepare_lookup(dir, dentry, flags);
+ if (err)
+ return ERR_PTR(err);
err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
if (err)
@@ -743,9 +732,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(inode_is_locked(dir));
ubifs_assert(inode_is_locked(inode));
- if (ubifs_crypt_is_encrypted(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
@@ -1353,12 +1342,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink)
ubifs_assert(inode_is_locked(new_inode));
- if (old_dir != new_dir) {
- if (ubifs_crypt_is_encrypted(new_dir) &&
- !fscrypt_has_permitted_context(new_dir, old_inode))
- return -EPERM;
- }
-
if (unlink && is_dir) {
err = ubifs_check_dir_empty(new_inode);
if (err)
@@ -1573,13 +1556,6 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
ubifs_assert(fst_inode && snd_inode);
- if ((ubifs_crypt_is_encrypted(old_dir) ||
- ubifs_crypt_is_encrypted(new_dir)) &&
- (old_dir != new_dir) &&
- (!fscrypt_has_permitted_context(new_dir, fst_inode) ||
- !fscrypt_has_permitted_context(old_dir, snd_inode)))
- return -EPERM;
-
err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
if (err)
return err;
@@ -1624,12 +1600,19 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ int err;
+
if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
ubifs_assert(inode_is_locked(old_dir));
ubifs_assert(inode_is_locked(new_dir));
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index dfe85069586e..9fe194a4fa9b 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1284,13 +1284,9 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
- if (ubifs_crypt_is_encrypted(inode) && (attr->ia_valid & ATTR_SIZE)) {
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
/* Truncation to a smaller size */
@@ -1629,35 +1625,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static int ubifs_file_open(struct inode *inode, struct file *filp)
-{
- int ret;
- struct dentry *dir;
- struct ubifs_info *c = inode->i_sb->s_fs_info;
-
- if (ubifs_crypt_is_encrypted(inode)) {
- ret = fscrypt_get_encryption_info(inode);
- if (ret)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
-
- dir = dget_parent(file_dentry(filp));
- if (ubifs_crypt_is_encrypted(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
- ubifs_err(c, "Inconsistent encryption contexts: %lu/%lu",
- (unsigned long) d_inode(dir)->i_ino,
- (unsigned long) inode->i_ino);
- dput(dir);
- ubifs_ro_mode(c, -EPERM);
- return -EPERM;
- }
- dput(dir);
-
- return 0;
-}
-
static const char *ubifs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
@@ -1746,7 +1713,7 @@ const struct file_operations ubifs_file_operations = {
.unlocked_ioctl = ubifs_ioctl,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
- .open = ubifs_file_open,
+ .open = fscrypt_file_open,
#ifdef CONFIG_COMPAT
.compat_ioctl = ubifs_compat_ioctl,
#endif
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 0a213dcba2a1..ba3d0e0f8615 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1890,35 +1890,28 @@ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
union ubifs_key *dkey;
for (;;) {
- if (!err) {
- err = tnc_next(c, &znode, n);
- if (err)
- goto out;
- }
-
zbr = &znode->zbranch[*n];
dkey = &zbr->key;
if (key_inum(c, dkey) != key_inum(c, key) ||
key_type(c, dkey) != key_type(c, key)) {
- err = -ENOENT;
- goto out;
+ return -ENOENT;
}
err = tnc_read_hashed_node(c, zbr, dent);
if (err)
- goto out;
+ return err;
if (key_hash(c, key) == key_hash(c, dkey) &&
le32_to_cpu(dent->cookie) == cookie) {
*zn = znode;
- goto out;
+ return 0;
}
- }
-
-out:
- return err;
+ err = tnc_next(c, &znode, n);
+ if (err)
+ return err;
+ }
}
static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 5ddc89d564fd..759f1a209dbb 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -381,8 +381,6 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
if (buf) {
/* If @buf is %NULL we are supposed to return the length */
if (ui->data_len > size) {
- ubifs_err(c, "buffer size %zd, xattr len %d",
- size, ui->data_len);
err = -ERANGE;
goto out_iput;
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 2edc1755b7c5..50dfce000864 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -20,6 +20,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/swap.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -47,7 +48,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
@@ -428,7 +429,7 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
- int need_revalidate = file->f_version != inode->i_version;
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
unsigned flags = UFS_SB(sb)->s_flags;
UFSD("BEGIN\n");
@@ -455,8 +456,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (struct ufs_dir_entry *)(kaddr+offset);
limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index afb601c0dda0..c843ec858cf7 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -693,7 +694,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
if (err)
goto bad_inode;
- inode->i_version++;
+ inode_inc_iversion(inode);
ufsi->i_lastfrag =
(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
ufsi->i_dir_start_lookup = 0;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 4d497e9c6883..b6ba80e05bff 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -88,6 +88,7 @@
#include <linux/log2.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -1440,7 +1441,7 @@ static struct inode *ufs_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
seqlock_init(&ei->meta_lock);
mutex_init(&ei->truncate_mutex);
return &ei->vfs_inode;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ac9a4e65ca49..87a13a7c8270 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -294,10 +294,13 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
* pmd_trans_unstable) of the pmd.
*/
_pmd = READ_ONCE(*pmd);
- if (!pmd_present(_pmd))
+ if (pmd_none(_pmd))
goto out;
ret = false;
+ if (!pmd_present(_pmd))
+ goto out;
+
if (pmd_trans_huge(_pmd))
goto out;
@@ -570,11 +573,14 @@ out:
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq)
{
+ struct userfaultfd_ctx *release_new_ctx;
+
if (WARN_ON_ONCE(current->flags & PF_EXITING))
goto out;
ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current);
+ release_new_ctx = NULL;
spin_lock(&ctx->event_wqh.lock);
/*
@@ -601,8 +607,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
new = (struct userfaultfd_ctx *)
(unsigned long)
ewq->msg.arg.reserved.reserved1;
-
- userfaultfd_ctx_put(new);
+ release_new_ctx = new;
}
break;
}
@@ -617,6 +622,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
__set_current_state(TASK_RUNNING);
spin_unlock(&ctx->event_wqh.lock);
+ if (release_new_ctx) {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = release_new_ctx->mm;
+
+ /* the various vma->vm_userfaultfd_ctx still points to it */
+ down_write(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
+ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ up_write(&mm->mmap_sem);
+
+ userfaultfd_ctx_put(release_new_ctx);
+ }
+
/*
* ctx may go away after this if the userfault pseudo fd is
* already released.
@@ -921,10 +940,10 @@ static inline struct userfaultfd_wait_queue *find_userfault_evt(
return find_userfault_in(&ctx->event_wqh);
}
-static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
+static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
{
struct userfaultfd_ctx *ctx = file->private_data;
- unsigned int ret;
+ __poll_t ret;
poll_wait(file, &ctx->fd_wqh, wait);
@@ -969,24 +988,14 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
struct uffd_msg *msg)
{
int fd;
- struct file *file;
- unsigned int flags = new->flags & UFFD_SHARED_FCNTL_FLAGS;
- fd = get_unused_fd_flags(flags);
+ fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
+ O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
if (fd < 0)
return fd;
- file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, new,
- O_RDWR | flags);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
-
- fd_install(fd, file);
msg->arg.reserved.reserved1 = 0;
msg->arg.fork.ufd = fd;
-
return 0;
}
@@ -1868,24 +1877,10 @@ static void init_once_userfaultfd_ctx(void *mem)
seqcount_init(&ctx->refile_seq);
}
-/**
- * userfaultfd_file_create - Creates a userfaultfd file pointer.
- * @flags: Flags for the userfaultfd file.
- *
- * This function creates a userfaultfd file pointer, w/out installing
- * it into the fd table. This is useful when the userfaultfd file is
- * used during the initialization of data structures that require
- * extra setup after the userfaultfd creation. So the userfaultfd
- * creation is split into the file pointer creation phase, and the
- * file descriptor installation phase. In this way races with
- * userspace closing the newly installed file descriptor can be
- * avoided. Returns a userfaultfd file pointer, or a proper error
- * pointer.
- */
-static struct file *userfaultfd_file_create(int flags)
+SYSCALL_DEFINE1(userfaultfd, int, flags)
{
- struct file *file;
struct userfaultfd_ctx *ctx;
+ int fd;
BUG_ON(!current->mm);
@@ -1893,14 +1888,12 @@ static struct file *userfaultfd_file_create(int flags)
BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
- file = ERR_PTR(-EINVAL);
if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
- goto out;
+ return -EINVAL;
- file = ERR_PTR(-ENOMEM);
ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
if (!ctx)
- goto out;
+ return -ENOMEM;
atomic_set(&ctx->refcount, 1);
ctx->flags = flags;
@@ -1911,39 +1904,13 @@ static struct file *userfaultfd_file_create(int flags)
/* prevent the mm struct to be freed */
mmgrab(ctx->mm);
- file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
- O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
- if (IS_ERR(file)) {
+ fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
+ O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
+ if (fd < 0) {
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
-out:
- return file;
-}
-
-SYSCALL_DEFINE1(userfaultfd, int, flags)
-{
- int fd, error;
- struct file *file;
-
- error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
- if (error < 0)
- return error;
- fd = error;
-
- file = userfaultfd_file_create(flags);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_put_unused_fd;
- }
- fd_install(fd, file);
-
return fd;
-
-err_put_unused_fd:
- put_unused_fd(fd);
-
- return error;
}
static int __init userfaultfd_init(void)
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index f42fcf1b5465..46bcf0e649f5 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -48,9 +48,6 @@ config XFS_POSIX_ACL
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
If you don't know what Access Control Lists are, say N.
config XFS_RT
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 83ed7715f856..c02781a4c091 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -167,7 +167,7 @@ xfs_alloc_lookup_ge(
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/
-static int /* error */
+int /* error */
xfs_alloc_lookup_le(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
@@ -520,7 +520,7 @@ xfs_alloc_fixup_trees(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_agfl_verify(
struct xfs_buf *bp)
{
@@ -528,10 +528,19 @@ xfs_agfl_verify(
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
int i;
+ /*
+ * There is no verification of non-crc AGFLs because mkfs does not
+ * initialise the AGFL to zero or NULL. Hence the only valid part of the
+ * AGFL is what the AGF says is active. We can't get to the AGF, so we
+ * can't verify just those entries are valid.
+ */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return NULL;
+
if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -539,16 +548,17 @@ xfs_agfl_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
- return false;
+ return __this_address;
}
- return xfs_log_check_lsn(mp,
- be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn));
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
+ return __this_address;
+ return NULL;
}
static void
@@ -556,6 +566,7 @@ xfs_agfl_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
/*
* There is no verification of non-crc AGFLs because mkfs does not
@@ -567,28 +578,29 @@ xfs_agfl_read_verify(
return;
if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_agfl_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agfl_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agfl_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
/* no verification of non-crc AGFLs */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_agfl_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agfl_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -602,6 +614,7 @@ const struct xfs_buf_ops xfs_agfl_buf_ops = {
.name = "xfs_agfl",
.verify_read = xfs_agfl_read_verify,
.verify_write = xfs_agfl_write_verify,
+ .verify_struct = xfs_agfl_verify,
};
/*
@@ -2397,19 +2410,19 @@ xfs_alloc_put_freelist(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_agf_verify(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
- {
- struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp,
be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
- return false;
+ return __this_address;
}
if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
@@ -2418,18 +2431,18 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
- return false;
+ return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
- return false;
+ return __this_address;
if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
(be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
@@ -2438,18 +2451,18 @@ xfs_agf_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
- return false;
+ return __this_address;
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
- return true;;
+ return NULL;
}
@@ -2458,28 +2471,29 @@ xfs_agf_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
- XFS_ERRTAG_ALLOC_READ_AGF))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agf_verify(bp);
+ if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agf_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
- if (!xfs_agf_verify(mp, bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agf_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -2496,6 +2510,7 @@ const struct xfs_buf_ops xfs_agf_buf_ops = {
.name = "xfs_agf",
.verify_read = xfs_agf_read_verify,
.verify_write = xfs_agf_write_verify,
+ .verify_struct = xfs_agf_verify,
};
/*
@@ -2981,3 +2996,22 @@ xfs_verify_fsbno(
return false;
return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_alloc_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.a.ar_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.a.ar_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7ba2d129d504..65a0cafe06e4 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -198,6 +198,13 @@ xfs_free_extent(
enum xfs_ag_resv_type type); /* block reservation type */
int /* error */
+xfs_alloc_lookup_le(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat); /* success/failure */
+
+int /* error */
xfs_alloc_lookup_ge(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
@@ -237,4 +244,7 @@ bool xfs_verify_agbno(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno);
bool xfs_verify_fsbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
+int xfs_alloc_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, bool *exist);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index cfde0a0f9706..6840b588187e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -307,13 +307,14 @@ xfs_cntbt_diff_two_keys(
be32_to_cpu(k2->alloc.ar_startblock);
}
-static bool
+static xfs_failaddr_t
xfs_allocbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -331,29 +332,31 @@ xfs_allocbt_verify(
level = be16_to_cpu(block->bb_level);
switch (block->bb_magic) {
case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_ABTB_MAGIC):
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
- return false;
+ return __this_address;
} else if (level >= mp->m_ag_maxlevels)
- return false;
+ return __this_address;
break;
case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_ABTC_MAGIC):
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
- return false;
+ return __this_address;
} else if (level >= mp->m_ag_maxlevels)
- return false;
+ return __this_address;
break;
default:
- return false;
+ return __this_address;
}
return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
@@ -363,25 +366,30 @@ static void
xfs_allocbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_allocbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_allocbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_allocbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_allocbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_allocbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -392,6 +400,7 @@ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
.name = "xfs_allocbt",
.verify_read = xfs_allocbt_read_verify,
.verify_write = xfs_allocbt_write_verify,
+ .verify_struct = xfs_allocbt_verify,
};
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index a76914db72ef..ce4a34a2751d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -717,7 +717,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
return error;
}
@@ -770,7 +769,6 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
return 0;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
return error;
}
@@ -1045,7 +1043,6 @@ out:
return retval;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
goto out;
}
@@ -1186,7 +1183,6 @@ out:
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
goto out;
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 601eaa36f1ad..2135b8e67dcc 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -247,14 +247,15 @@ xfs_attr3_leaf_hdr_to_disk(
}
}
-static bool
+static xfs_failaddr_t
xfs_attr3_leaf_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_attr_leafblock *leaf = bp->b_addr;
- struct xfs_perag *pag = bp->b_pag;
- struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_perag *pag = bp->b_pag;
+ struct xfs_attr_leaf_entry *entries;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -262,17 +263,17 @@ xfs_attr3_leaf_verify(
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
- return false;
+ return __this_address;
}
/*
* In recovery there is a transient state where count == 0 is valid
@@ -280,12 +281,27 @@ xfs_attr3_leaf_verify(
* if the attr didn't fit in shortform.
*/
if (pag && pag->pagf_init && ichdr.count == 0)
- return false;
+ return __this_address;
+
+ /*
+ * firstused is the block offset of the first name info structure.
+ * Make sure it doesn't go off the block or crash into the header.
+ */
+ if (ichdr.firstused > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.firstused < xfs_attr3_leaf_hdr_size(leaf))
+ return __this_address;
+
+ /* Make sure the entries array doesn't crash into the name info. */
+ entries = xfs_attr3_leaf_entryp(bp->b_addr);
+ if ((char *)&entries[ichdr.count] >
+ (char *)bp->b_addr + ichdr.firstused)
+ return __this_address;
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
- return true;
+ return NULL;
}
static void
@@ -293,12 +309,13 @@ xfs_attr3_leaf_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_attr3_leaf_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_attr3_leaf_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -322,21 +339,23 @@ xfs_attr3_leaf_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_attr3_leaf_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_attr3_leaf_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
.name = "xfs_attr3_leaf",
.verify_read = xfs_attr3_leaf_read_verify,
.verify_write = xfs_attr3_leaf_write_verify,
+ .verify_struct = xfs_attr3_leaf_verify,
};
int
@@ -870,6 +889,80 @@ xfs_attr_shortform_allfit(
return xfs_attr_shortform_bytesfit(dp, bytes);
}
+/* Verify the consistency of an inline attribute fork. */
+xfs_failaddr_t
+xfs_attr_shortform_verify(
+ struct xfs_inode *ip)
+{
+ struct xfs_attr_shortform *sfp;
+ struct xfs_attr_sf_entry *sfep;
+ struct xfs_attr_sf_entry *next_sfep;
+ char *endp;
+ struct xfs_ifork *ifp;
+ int i;
+ int size;
+
+ ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
+ ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
+ sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+
+ /*
+ * Give up if the attribute is way too short.
+ */
+ if (size < sizeof(struct xfs_attr_sf_hdr))
+ return __this_address;
+
+ endp = (char *)sfp + size;
+
+ /* Check all reported entries */
+ sfep = &sfp->list[0];
+ for (i = 0; i < sfp->hdr.count; i++) {
+ /*
+ * struct xfs_attr_sf_entry has a variable length.
+ * Check the fixed-offset parts of the structure are
+ * within the data buffer.
+ */
+ if (((char *)sfep + sizeof(*sfep)) >= endp)
+ return __this_address;
+
+ /* Don't allow names with known bad length. */
+ if (sfep->namelen == 0)
+ return __this_address;
+
+ /*
+ * Check that the variable-length part of the structure is
+ * within the data buffer. The next entry starts after the
+ * name component, so nextentry is an acceptable test.
+ */
+ next_sfep = XFS_ATTR_SF_NEXTENTRY(sfep);
+ if ((char *)next_sfep > endp)
+ return __this_address;
+
+ /*
+ * Check for unknown flags. Short form doesn't support
+ * the incomplete or local bits, so we can use the namespace
+ * mask here.
+ */
+ if (sfep->flags & ~XFS_ATTR_NSP_ONDISK_MASK)
+ return __this_address;
+
+ /*
+ * Check for invalid namespace combinations. We only allow
+ * one namespace flag per xattr, so we can just count the
+ * bits (i.e. hweight) here.
+ */
+ if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
+ return __this_address;
+
+ sfep = next_sfep;
+ }
+ if ((void *)sfep != (void *)endp)
+ return __this_address;
+
+ return NULL;
+}
+
/*
* Convert a leaf attribute list to shortform attribute list
*/
@@ -2173,7 +2266,8 @@ xfs_attr3_leaf_lookup_int(
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- ASSERT(ichdr.count < args->geo->blksize / 8);
+ if (ichdr.count >= args->geo->blksize / 8)
+ return -EFSCORRUPTED;
/*
* Binary search. (note: small blocks will skip this loop)
@@ -2189,8 +2283,10 @@ xfs_attr3_leaf_lookup_int(
else
break;
}
- ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count));
- ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval);
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ return -EFSCORRUPTED;
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ return -EFSCORRUPTED;
/*
* Since we may have duplicate hashval's, find the first matching
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 894124efb421..4da08af5b134 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -53,6 +53,7 @@ int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
int xfs_attr_shortform_remove(struct xfs_da_args *args);
int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
+xfs_failaddr_t xfs_attr_shortform_verify(struct xfs_inode *ip);
void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
/*
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index d56caf037ca0..21be186067a2 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -65,7 +65,7 @@ xfs_attr3_rmt_blocks(
* does CRC, location and bounds checking, the unpacking function checks the
* attribute parameters and owner.
*/
-static bool
+static xfs_failaddr_t
xfs_attr3_rmt_hdr_ok(
void *ptr,
xfs_ino_t ino,
@@ -76,19 +76,19 @@ xfs_attr3_rmt_hdr_ok(
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (bno != be64_to_cpu(rmt->rm_blkno))
- return false;
+ return __this_address;
if (offset != be32_to_cpu(rmt->rm_offset))
- return false;
+ return __this_address;
if (size != be32_to_cpu(rmt->rm_bytes))
- return false;
+ return __this_address;
if (ino != be64_to_cpu(rmt->rm_owner))
- return false;
+ return __this_address;
/* ok */
- return true;
+ return NULL;
}
-static bool
+static xfs_failaddr_t
xfs_attr3_rmt_verify(
struct xfs_mount *mp,
void *ptr,
@@ -98,27 +98,29 @@ xfs_attr3_rmt_verify(
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(rmt->rm_blkno) != bno)
- return false;
+ return __this_address;
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
- return false;
+ return __this_address;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
- return false;
+ return __this_address;
if (rmt->rm_owner == 0)
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
-static void
-xfs_attr3_rmt_read_verify(
- struct xfs_buf *bp)
+static int
+__xfs_attr3_rmt_read_verify(
+ struct xfs_buf *bp,
+ bool check_crc,
+ xfs_failaddr_t *failaddr)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
char *ptr;
@@ -128,7 +130,7 @@ xfs_attr3_rmt_read_verify(
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return;
+ return 0;
ptr = bp->b_addr;
bno = bp->b_bn;
@@ -136,23 +138,48 @@ xfs_attr3_rmt_read_verify(
ASSERT(len >= blksize);
while (len > 0) {
- if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
- xfs_buf_ioerror(bp, -EFSBADCRC);
- break;
- }
- if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- break;
+ if (check_crc &&
+ !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
+ *failaddr = __this_address;
+ return -EFSBADCRC;
}
+ *failaddr = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ if (*failaddr)
+ return -EFSCORRUPTED;
len -= blksize;
ptr += blksize;
bno += BTOBB(blksize);
}
- if (bp->b_error)
- xfs_verifier_error(bp);
- else
- ASSERT(len == 0);
+ if (len != 0) {
+ *failaddr = __this_address;
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
+}
+
+static void
+xfs_attr3_rmt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+ int error;
+
+ error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
+ if (error)
+ xfs_verifier_error(bp, error, fa);
+}
+
+static xfs_failaddr_t
+xfs_attr3_rmt_verify_struct(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+ int error;
+
+ error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
+ return error ? fa : NULL;
}
static void
@@ -160,6 +187,7 @@ xfs_attr3_rmt_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
int blksize = mp->m_attr_geo->blksize;
char *ptr;
int len;
@@ -177,9 +205,9 @@ xfs_attr3_rmt_write_verify(
while (len > 0) {
struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
- if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -188,8 +216,7 @@ xfs_attr3_rmt_write_verify(
* xfs_attr3_rmt_hdr_set() for the explanation.
*/
if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
return;
}
xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
@@ -198,13 +225,16 @@ xfs_attr3_rmt_write_verify(
ptr += blksize;
bno += BTOBB(blksize);
}
- ASSERT(len == 0);
+
+ if (len != 0)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
}
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.name = "xfs_attr3_rmt",
.verify_read = xfs_attr3_rmt_read_verify,
.verify_write = xfs_attr3_rmt_write_verify,
+ .verify_struct = xfs_attr3_rmt_verify_struct,
};
STATIC int
@@ -269,7 +299,7 @@ xfs_attr_rmtval_copyout(
byte_cnt = min(*valuelen, byte_cnt);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
- if (!xfs_attr3_rmt_hdr_ok(src, ino, *offset,
+ if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 1bddbba6b80c..daae00ed30c5 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -400,7 +400,7 @@ xfs_bmap_check_leaf_extents(
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(mp,
- XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ xfs_verify_fsbno(mp, bno), error0);
if (bp_release) {
bp_release = 0;
xfs_trans_brelse(NULL, bp);
@@ -1220,7 +1220,7 @@ xfs_iread_extents(
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(mp,
- XFS_FSB_SANITY_CHECK(mp, bno), out_brelse);
+ xfs_verify_fsbno(mp, bno), out_brelse);
xfs_trans_brelse(tp, bp);
}
@@ -3337,6 +3337,49 @@ xfs_bmap_btalloc_filestreams(
return 0;
}
+/* Update all inode and quota accounting for the allocation we just did. */
+static void
+xfs_bmap_btalloc_accounting(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args)
+{
+ if (ap->flags & XFS_BMAPI_COWFORK) {
+ /*
+ * COW fork blocks are in-core only and thus are treated as
+ * in-core quota reservation (like delalloc blocks) even when
+ * converted to real blocks. The quota reservation is not
+ * accounted to disk until blocks are remapped to the data
+ * fork. So if these blocks were previously delalloc, we
+ * already have quota reservation and there's nothing to do
+ * yet.
+ */
+ if (ap->wasdel)
+ return;
+
+ /*
+ * Otherwise, we've allocated blocks in a hole. The transaction
+ * has acquired in-core quota reservation for this extent.
+ * Rather than account these as real blocks, however, we reduce
+ * the transaction quota reservation based on the allocation.
+ * This essentially transfers the transaction quota reservation
+ * to that of a delalloc extent.
+ */
+ ap->ip->i_delayed_blks += args->len;
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
+ -(long)args->len);
+ return;
+ }
+
+ /* data/attr fork only */
+ ap->ip->i_d.di_nblocks += args->len;
+ xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+ if (ap->wasdel)
+ ap->ip->i_delayed_blks -= args->len;
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
+ ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
+ args->len);
+}
+
STATIC int
xfs_bmap_btalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -3347,6 +3390,8 @@ xfs_bmap_btalloc(
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
xfs_agnumber_t ag;
xfs_alloc_arg_t args;
+ xfs_fileoff_t orig_offset;
+ xfs_extlen_t orig_length;
xfs_extlen_t blen;
xfs_extlen_t nextminlen = 0;
int nullfb; /* true if ap->firstblock isn't set */
@@ -3356,6 +3401,8 @@ xfs_bmap_btalloc(
int stripe_align;
ASSERT(ap->length);
+ orig_offset = ap->offset;
+ orig_length = ap->length;
mp = ap->ip->i_mount;
@@ -3571,19 +3618,23 @@ xfs_bmap_btalloc(
*ap->firstblock = args.fsbno;
ASSERT(nullfb || fb_agno <= args.agno);
ap->length = args.len;
- if (!(ap->flags & XFS_BMAPI_COWFORK))
- ap->ip->i_d.di_nblocks += args.len;
- xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
- if (ap->wasdel)
- ap->ip->i_delayed_blks -= args.len;
/*
- * Adjust the disk quota also. This was reserved
- * earlier.
+ * If the extent size hint is active, we tried to round the
+ * caller's allocation request offset down to extsz and the
+ * length up to another extsz boundary. If we found a free
+ * extent we mapped it in starting at this new offset. If the
+ * newly mapped space isn't long enough to cover any of the
+ * range of offsets that was originally requested, move the
+ * mapping up so that we can fill as much of the caller's
+ * original request as possible. Free space is apparently
+ * very fragmented so we're unlikely to be able to satisfy the
+ * hints anyway.
*/
- xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
- ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
- XFS_TRANS_DQ_BCOUNT,
- (long) args.len);
+ if (ap->length <= orig_length)
+ ap->offset = orig_offset;
+ else if (ap->offset + ap->length < orig_offset + orig_length)
+ ap->offset = orig_offset + orig_length - ap->length;
+ xfs_bmap_btalloc_accounting(ap, &args);
} else {
ap->blkno = NULLFSBLOCK;
ap->length = 0;
@@ -3876,8 +3927,6 @@ xfs_bmapi_reserve_delalloc(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_extlen_t alen;
xfs_extlen_t indlen;
- char rt = XFS_IS_REALTIME_INODE(ip);
- xfs_extlen_t extsz;
int error;
xfs_fileoff_t aoff = off;
@@ -3892,31 +3941,25 @@ xfs_bmapi_reserve_delalloc(
prealloc = alen - len;
/* Figure out the extent size, adjust alen */
- if (whichfork == XFS_COW_FORK)
- extsz = xfs_get_cowextsz_hint(ip);
- else
- extsz = xfs_get_extsz_hint(ip);
- if (extsz) {
+ if (whichfork == XFS_COW_FORK) {
struct xfs_bmbt_irec prev;
+ xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
prev.br_startoff = NULLFILEOFF;
- error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
+ error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
1, 0, &aoff, &alen);
ASSERT(!error);
}
- if (rt)
- extsz = alen / mp->m_sb.sb_rextsize;
-
/*
* Make a transaction-less quota reservation for delayed allocation
* blocks. This number gets adjusted later. We return if we haven't
* allocated blocks already inside this loop.
*/
error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
- rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ XFS_QMOPT_RES_REGBLKS);
if (error)
return error;
@@ -3927,12 +3970,7 @@ xfs_bmapi_reserve_delalloc(
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
- if (rt) {
- error = xfs_mod_frextents(mp, -((int64_t)extsz));
- } else {
- error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
- }
-
+ error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
if (error)
goto out_unreserve_quota;
@@ -3963,14 +4001,11 @@ xfs_bmapi_reserve_delalloc(
return 0;
out_unreserve_blocks:
- if (rt)
- xfs_mod_frextents(mp, extsz);
- else
- xfs_mod_fdblocks(mp, alen, false);
+ xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
- xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
+ XFS_QMOPT_RES_REGBLKS);
return error;
}
@@ -4304,8 +4339,16 @@ xfs_bmapi_write(
while (bno < end && n < *nmap) {
bool need_alloc = false, wasdelay = false;
- /* in hole or beyoned EOF? */
+ /* in hole or beyond EOF? */
if (eof || bma.got.br_startoff > bno) {
+ /*
+ * CoW fork conversions should /never/ hit EOF or
+ * holes. There should always be something for us
+ * to work on.
+ */
+ ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
+ (flags & XFS_BMAPI_COWFORK)));
+
if (flags & XFS_BMAPI_DELALLOC) {
/*
* For the COW fork we can reasonably get a
@@ -4824,6 +4867,7 @@ xfs_bmap_del_extent_cow(
xfs_iext_insert(ip, icur, &new, state);
break;
}
+ ip->i_delayed_blks -= del->br_blockcount;
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index c10aecaaae44..9faf479aba49 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -425,33 +425,29 @@ xfs_bmbt_diff_two_keys(
be64_to_cpu(k2->bmbt.br_startoff);
}
-static bool
+static xfs_failaddr_t
xfs_bmbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
unsigned int level;
switch (block->bb_magic) {
case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
- if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
- if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
- return false;
/*
* XXX: need a better way of verifying the owner here. Right now
* just make sure there has been one set.
*/
- if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
- return false;
+ fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_BMAP_MAGIC):
break;
default:
- return false;
+ return __this_address;
}
/*
@@ -463,46 +459,39 @@ xfs_bmbt_verify(
*/
level = be16_to_cpu(block->bb_level);
if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
- return false;
- if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
- return false;
-
- /* sibling pointer verification */
- if (!block->bb_u.l.bb_leftsib ||
- (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
- !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
- return false;
- if (!block->bb_u.l.bb_rightsib ||
- (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
- !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
- return false;
-
- return true;
+ return __this_address;
+
+ return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
}
static void
xfs_bmbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_lblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_bmbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_bmbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_bmbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_bmbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_bmbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_lblock_calc_crc(bp);
@@ -512,6 +501,7 @@ const struct xfs_buf_ops xfs_bmbt_buf_ops = {
.name = "xfs_bmbt",
.verify_read = xfs_bmbt_read_verify,
.verify_write = xfs_bmbt_write_verify,
+ .verify_struct = xfs_bmbt_verify,
};
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5f33adf8eecb..79ee4a1951d1 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -273,7 +273,7 @@ xfs_btree_lblock_calc_crc(
struct xfs_buf *bp)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
return;
@@ -311,7 +311,7 @@ xfs_btree_sblock_calc_crc(
struct xfs_buf *bp)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
return;
@@ -329,7 +329,7 @@ xfs_btree_sblock_verify_crc(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
- return false;
+ return __this_address;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
}
@@ -853,7 +853,7 @@ xfs_btree_read_bufl(
xfs_daddr_t d; /* real disk block address */
int error;
- if (!XFS_FSB_SANITY_CHECK(mp, fsbno))
+ if (!xfs_verify_fsbno(mp, fsbno))
return -EFSCORRUPTED;
d = XFS_FSB_TO_DADDR(mp, fsbno);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
@@ -4529,6 +4529,51 @@ xfs_btree_change_owner(
&bbcoi);
}
+/* Verify the v5 fields of a long-format btree block. */
+xfs_failaddr_t
+xfs_btree_lblock_v5hdr_verify(
+ struct xfs_buf *bp,
+ uint64_t owner)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return __this_address;
+ if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
+ return __this_address;
+ if (owner != XFS_RMAP_OWN_UNKNOWN &&
+ be64_to_cpu(block->bb_u.l.bb_owner) != owner)
+ return __this_address;
+ return NULL;
+}
+
+/* Verify a long-format btree block. */
+xfs_failaddr_t
+xfs_btree_lblock_verify(
+ struct xfs_buf *bp,
+ unsigned int max_recs)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ /* numrecs verification */
+ if (be16_to_cpu(block->bb_numrecs) > max_recs)
+ return __this_address;
+
+ /* sibling pointer verification */
+ if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
+ return __this_address;
+ if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
+ return __this_address;
+
+ return NULL;
+}
+
/**
* xfs_btree_sblock_v5hdr_verify() -- verify the v5 fields of a short-format
* btree block
@@ -4537,7 +4582,7 @@ xfs_btree_change_owner(
* @max_recs: pointer to the m_*_mxr max records field in the xfs mount
* @pag_max_level: pointer to the per-ag max level field
*/
-bool
+xfs_failaddr_t
xfs_btree_sblock_v5hdr_verify(
struct xfs_buf *bp)
{
@@ -4546,14 +4591,14 @@ xfs_btree_sblock_v5hdr_verify(
struct xfs_perag *pag = bp->b_pag;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
- return false;
+ return __this_address;
if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
- return false;
- return true;
+ return __this_address;
+ return NULL;
}
/**
@@ -4562,29 +4607,29 @@ xfs_btree_sblock_v5hdr_verify(
* @bp: buffer containing the btree block
* @max_recs: maximum records allowed in this btree node
*/
-bool
+xfs_failaddr_t
xfs_btree_sblock_verify(
struct xfs_buf *bp,
unsigned int max_recs)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_agblock_t agno;
/* numrecs verification */
if (be16_to_cpu(block->bb_numrecs) > max_recs)
- return false;
+ return __this_address;
/* sibling pointer verification */
- if (!block->bb_u.s.bb_leftsib ||
- (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
- block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
- return false;
- if (!block->bb_u.s.bb_rightsib ||
- (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
- block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
- return false;
+ agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
+ if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
+ return __this_address;
+ if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
+ return __this_address;
- return true;
+ return NULL;
}
/*
@@ -4953,3 +4998,33 @@ xfs_btree_diff_two_ptrs(
return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
}
+
+/* If there's an extent, we're done. */
+STATIC int
+xfs_btree_has_record_helper(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ return XFS_BTREE_QUERY_RANGE_ABORT;
+}
+
+/* Is there a record covering a given range of keys? */
+int
+xfs_btree_has_record(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low,
+ union xfs_btree_irec *high,
+ bool *exists)
+{
+ int error;
+
+ error = xfs_btree_query_range(cur, low, high,
+ &xfs_btree_has_record_helper, NULL);
+ if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
+ *exists = true;
+ return 0;
+ }
+ *exists = false;
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index b57501c6f71d..50440b5618e8 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -473,10 +473,6 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
#define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
-#define XFS_FSB_SANITY_CHECK(mp,fsb) \
- (fsb && XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
- XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
-
/*
* Trace hooks. Currently not implemented as they need to be ported
* over to the generic tracing functionality, which is some effort.
@@ -496,8 +492,14 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
#define XFS_BTREE_TRACE_ARGR(c, r)
#define XFS_BTREE_TRACE_CURSOR(c, t)
-bool xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
-bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
+xfs_failaddr_t xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
+xfs_failaddr_t xfs_btree_sblock_verify(struct xfs_buf *bp,
+ unsigned int max_recs);
+xfs_failaddr_t xfs_btree_lblock_v5hdr_verify(struct xfs_buf *bp,
+ uint64_t owner);
+xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
+ unsigned int max_recs);
+
uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
unsigned long len);
xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
@@ -545,5 +547,7 @@ void xfs_btree_get_keys(struct xfs_btree_cur *cur,
struct xfs_btree_block *block, union xfs_btree_key *key);
union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
union xfs_btree_key *key);
+int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
+ union xfs_btree_irec *high, bool *exists);
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 651611530d2f..ea187b4a7991 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -128,7 +128,7 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_zone_free(xfs_da_state_zone, state);
}
-static bool
+static xfs_failaddr_t
xfs_da3_node_verify(
struct xfs_buf *bp)
{
@@ -145,24 +145,24 @@ xfs_da3_node_verify(
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_DA3_NODE_MAGIC)
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (ichdr.magic != XFS_DA_NODE_MAGIC)
- return false;
+ return __this_address;
}
if (ichdr.level == 0)
- return false;
+ return __this_address;
if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
- return false;
+ return __this_address;
if (ichdr.count == 0)
- return false;
+ return __this_address;
/*
* we don't know if the node is for and attribute or directory tree,
@@ -170,11 +170,11 @@ xfs_da3_node_verify(
*/
if (ichdr.count > mp->m_dir_geo->node_ents &&
ichdr.count > mp->m_attr_geo->node_ents)
- return false;
+ return __this_address;
/* XXX: hash order check? */
- return true;
+ return NULL;
}
static void
@@ -182,12 +182,13 @@ xfs_da3_node_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_da3_node_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_da3_node_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -211,19 +212,20 @@ xfs_da3_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
+ xfs_failaddr_t fa;
switch (be16_to_cpu(info->magic)) {
case XFS_DA3_NODE_MAGIC:
if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
- xfs_buf_ioerror(bp, -EFSBADCRC);
+ xfs_verifier_error(bp, -EFSBADCRC,
+ __this_address);
break;
}
/* fall through */
case XFS_DA_NODE_MAGIC:
- if (!xfs_da3_node_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- break;
- }
+ fa = xfs_da3_node_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
@@ -236,18 +238,40 @@ xfs_da3_node_read_verify(
bp->b_ops->verify_read(bp);
return;
default:
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
break;
}
+}
+
+/* Verify the structure of a da3 block. */
+static xfs_failaddr_t
+xfs_da3_node_verify_struct(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
- /* corrupt block */
- xfs_verifier_error(bp);
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DA3_NODE_MAGIC:
+ case XFS_DA_NODE_MAGIC:
+ return xfs_da3_node_verify(bp);
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ bp->b_ops = &xfs_dir3_leafn_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ default:
+ return __this_address;
+ }
}
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.name = "xfs_da3_node",
.verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify,
+ .verify_struct = xfs_da3_node_verify_struct,
};
int
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 3771edcb301d..7e77299b7789 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -875,4 +875,10 @@ struct xfs_attr3_rmt_hdr {
((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
sizeof(struct xfs_attr3_rmt_hdr) : 0))
+/* Number of bytes in a directory block. */
+static inline unsigned int xfs_dir2_dirblock_bytes(struct xfs_sb *sbp)
+{
+ return 1 << (sbp->sb_blocklog + sbp->sb_dirblklog);
+}
+
#endif /* __XFS_DA_FORMAT_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index e10778c102ea..92f94e190f04 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -119,8 +119,7 @@ xfs_da_mount(
ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
- ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
- XFS_MAX_BLOCKSIZE);
+ ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE);
mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
@@ -140,7 +139,7 @@ xfs_da_mount(
dageo = mp->m_dir_geo;
dageo->blklog = mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog;
dageo->fsblog = mp->m_sb.sb_blocklog;
- dageo->blksize = 1 << dageo->blklog;
+ dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb);
dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog;
/*
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 1a8f2cf977ca..388d67c5c903 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -340,5 +340,7 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
#define XFS_READDIR_BUFSIZE (32768)
unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
+void *xfs_dir3_data_endp(struct xfs_da_geometry *geo,
+ struct xfs_dir2_data_hdr *hdr);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 43c902f7a68d..2da86a394bcf 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -58,7 +58,7 @@ xfs_dir_startup(void)
xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
}
-static bool
+static xfs_failaddr_t
xfs_dir3_block_verify(
struct xfs_buf *bp)
{
@@ -67,20 +67,18 @@ xfs_dir3_block_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr3->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))
- return false;
+ return __this_address;
}
- if (__xfs_dir3_data_check(NULL, bp))
- return false;
- return true;
+ return __xfs_dir3_data_check(NULL, bp);
}
static void
@@ -88,15 +86,16 @@ xfs_dir3_block_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_block_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_block_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -104,12 +103,13 @@ xfs_dir3_block_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_block_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_block_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -126,6 +126,7 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.name = "xfs_dir3_block",
.verify_read = xfs_dir3_block_read_verify,
.verify_write = xfs_dir3_block_write_verify,
+ .verify_struct = xfs_dir3_block_verify,
};
int
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 8727a43115ef..920279485275 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -36,9 +36,9 @@
/*
* Check the consistency of the data block.
* The input can also be a block-format directory.
- * Return 0 is the buffer is good, otherwise an error.
+ * Return NULL if the buffer is good, otherwise the address of the error.
*/
-int
+xfs_failaddr_t
__xfs_dir3_data_check(
struct xfs_inode *dp, /* incore inode pointer */
struct xfs_buf *bp) /* data block's buffer */
@@ -73,6 +73,14 @@ __xfs_dir3_data_check(
*/
ops = xfs_dir_get_ops(mp, dp);
+ /*
+ * If this isn't a directory, or we don't get handed the dir ops,
+ * something is seriously wrong. Bail out.
+ */
+ if ((dp && !S_ISDIR(VFS_I(dp)->i_mode)) ||
+ ops != xfs_dir_get_ops(mp, NULL))
+ return __this_address;
+
hdr = bp->b_addr;
p = (char *)ops->data_entry_p(hdr);
@@ -81,7 +89,6 @@ __xfs_dir3_data_check(
case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
btp = xfs_dir2_block_tail_p(geo, hdr);
lep = xfs_dir2_block_leaf_p(btp);
- endp = (char *)lep;
/*
* The number of leaf entries is limited by the size of the
@@ -90,17 +97,19 @@ __xfs_dir3_data_check(
* so just ensure that the count falls somewhere inside the
* block right now.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(btp->count) <
- ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
+ if (be32_to_cpu(btp->count) >=
+ ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry))
+ return __this_address;
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
- endp = (char *)hdr + geo->blksize;
break;
default:
- XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
+ return __this_address;
}
+ endp = xfs_dir3_data_endp(geo, hdr);
+ if (!endp)
+ return __this_address;
/*
* Account for zero bestfree entries.
@@ -108,22 +117,25 @@ __xfs_dir3_data_check(
bf = ops->data_bestfree_p(hdr);
count = lastfree = freeseen = 0;
if (!bf[0].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[0].offset);
+ if (bf[0].offset)
+ return __this_address;
freeseen |= 1 << 0;
}
if (!bf[1].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[1].offset);
+ if (bf[1].offset)
+ return __this_address;
freeseen |= 1 << 1;
}
if (!bf[2].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[2].offset);
+ if (bf[2].offset)
+ return __this_address;
freeseen |= 1 << 2;
}
- XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[0].length) >=
- be16_to_cpu(bf[1].length));
- XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[1].length) >=
- be16_to_cpu(bf[2].length));
+ if (be16_to_cpu(bf[0].length) < be16_to_cpu(bf[1].length))
+ return __this_address;
+ if (be16_to_cpu(bf[1].length) < be16_to_cpu(bf[2].length))
+ return __this_address;
/*
* Loop over the data/unused entries.
*/
@@ -135,22 +147,23 @@ __xfs_dir3_data_check(
* doesn't need to be there.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0);
- XFS_WANT_CORRUPTED_RETURN(mp, endp >=
- p + be16_to_cpu(dup->length));
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
- (char *)dup - (char *)hdr);
+ if (lastfree != 0)
+ return __this_address;
+ if (endp < p + be16_to_cpu(dup->length))
+ return __this_address;
+ if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
+ (char *)dup - (char *)hdr)
+ return __this_address;
dfp = xfs_dir2_data_freefind(hdr, bf, dup);
if (dfp) {
i = (int)(dfp - bf);
- XFS_WANT_CORRUPTED_RETURN(mp,
- (freeseen & (1 << i)) == 0);
+ if ((freeseen & (1 << i)) != 0)
+ return __this_address;
freeseen |= 1 << i;
} else {
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(dup->length) <=
- be16_to_cpu(bf[2].length));
+ if (be16_to_cpu(dup->length) >
+ be16_to_cpu(bf[2].length))
+ return __this_address;
}
p += be16_to_cpu(dup->length);
lastfree = 1;
@@ -163,16 +176,17 @@ __xfs_dir3_data_check(
* The linear search is crude but this is DEBUG code.
*/
dep = (xfs_dir2_data_entry_t *)p;
- XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0);
- XFS_WANT_CORRUPTED_RETURN(mp,
- !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
- XFS_WANT_CORRUPTED_RETURN(mp, endp >=
- p + ops->data_entsize(dep->namelen));
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
- (char *)dep - (char *)hdr);
- XFS_WANT_CORRUPTED_RETURN(mp,
- ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
+ if (dep->namelen == 0)
+ return __this_address;
+ if (xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)))
+ return __this_address;
+ if (endp < p + ops->data_entsize(dep->namelen))
+ return __this_address;
+ if (be16_to_cpu(*ops->data_entry_tag_p(dep)) !=
+ (char *)dep - (char *)hdr)
+ return __this_address;
+ if (ops->data_get_ftype(dep) >= XFS_DIR3_FT_MAX)
+ return __this_address;
count++;
lastfree = 0;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -188,34 +202,52 @@ __xfs_dir3_data_check(
be32_to_cpu(lep[i].hashval) == hash)
break;
}
- XFS_WANT_CORRUPTED_RETURN(mp,
- i < be32_to_cpu(btp->count));
+ if (i >= be32_to_cpu(btp->count))
+ return __this_address;
}
p += ops->data_entsize(dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, freeseen == 7);
+ if (freeseen != 7)
+ return __this_address;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
if (lep[i].address ==
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
- if (i > 0)
- XFS_WANT_CORRUPTED_RETURN(mp,
- be32_to_cpu(lep[i].hashval) >=
- be32_to_cpu(lep[i - 1].hashval));
+ if (i > 0 && be32_to_cpu(lep[i].hashval) <
+ be32_to_cpu(lep[i - 1].hashval))
+ return __this_address;
}
- XFS_WANT_CORRUPTED_RETURN(mp, count ==
- be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
- XFS_WANT_CORRUPTED_RETURN(mp, stale == be32_to_cpu(btp->stale));
+ if (count != be32_to_cpu(btp->count) - be32_to_cpu(btp->stale))
+ return __this_address;
+ if (stale != be32_to_cpu(btp->stale))
+ return __this_address;
}
- return 0;
+ return NULL;
+}
+
+#ifdef DEBUG
+void
+xfs_dir3_data_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = __xfs_dir3_data_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
}
+#endif
-static bool
+static xfs_failaddr_t
xfs_dir3_data_verify(
struct xfs_buf *bp)
{
@@ -224,20 +256,18 @@ xfs_dir3_data_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (hdr3->magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr3->magic != cpu_to_be32(XFS_DIR2_DATA_MAGIC))
- return false;
+ return __this_address;
}
- if (__xfs_dir3_data_check(NULL, bp))
- return false;
- return true;
+ return __xfs_dir3_data_check(NULL, bp);
}
/*
@@ -263,8 +293,7 @@ xfs_dir3_data_reada_verify(
bp->b_ops->verify_read(bp);
return;
default:
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
break;
}
}
@@ -274,15 +303,16 @@ xfs_dir3_data_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
- !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_data_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_data_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -290,12 +320,13 @@ xfs_dir3_data_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_data_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_data_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -312,6 +343,7 @@ const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
.name = "xfs_dir3_data",
.verify_read = xfs_dir3_data_read_verify,
.verify_write = xfs_dir3_data_write_verify,
+ .verify_struct = xfs_dir3_data_verify,
};
static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
@@ -515,7 +547,6 @@ xfs_dir2_data_freescan_int(
struct xfs_dir2_data_hdr *hdr,
int *loghead)
{
- xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* active data entry */
xfs_dir2_data_unused_t *dup; /* unused data entry */
struct xfs_dir2_data_free *bf;
@@ -537,12 +568,7 @@ xfs_dir2_data_freescan_int(
* Set up pointers.
*/
p = (char *)ops->data_entry_p(hdr);
- if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
- btp = xfs_dir2_block_tail_p(geo, hdr);
- endp = (char *)xfs_dir2_block_leaf_p(btp);
- } else
- endp = (char *)hdr + geo->blksize;
+ endp = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the block's entries.
*/
@@ -755,17 +781,9 @@ xfs_dir2_data_make_free(
/*
* Figure out where the end of the data area is.
*/
- if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC))
- endptr = (char *)hdr + args->geo->blksize;
- else {
- xfs_dir2_block_tail_t *btp; /* block tail */
+ endptr = xfs_dir3_data_endp(args->geo, hdr);
+ ASSERT(endptr != NULL);
- ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
- btp = xfs_dir2_block_tail_p(args->geo, hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
- }
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
@@ -1067,3 +1085,21 @@ xfs_dir2_data_use_free(
}
*needscanp = needscan;
}
+
+/* Find the end of the entry data in a data/block format dir block. */
+void *
+xfs_dir3_data_endp(
+ struct xfs_da_geometry *geo,
+ struct xfs_dir2_data_hdr *hdr)
+{
+ switch (hdr->magic) {
+ case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
+ case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
+ return xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
+ case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
+ return (char *)hdr + geo->blksize;
+ default:
+ return NULL;
+ }
+}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index 27297a689d9c..d7e630f41f9c 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -50,13 +50,7 @@ static void xfs_dir3_leaf_log_tail(struct xfs_da_args *args,
* Pop an assert if something is wrong.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(dp, bp) \
-do { \
- if (!xfs_dir3_leaf1_check((dp), (bp))) \
- ASSERT(0); \
-} while (0);
-
-STATIC bool
+static xfs_failaddr_t
xfs_dir3_leaf1_check(
struct xfs_inode *dp,
struct xfs_buf *bp)
@@ -69,17 +63,32 @@ xfs_dir3_leaf1_check(
if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
- return false;
+ return __this_address;
return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
+
+static inline void
+xfs_dir3_leaf_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_dir3_leaf1_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
+}
#else
#define xfs_dir3_leaf_check(dp, bp)
#endif
-bool
+xfs_failaddr_t
xfs_dir3_leaf_check_int(
struct xfs_mount *mp,
struct xfs_inode *dp,
@@ -114,27 +123,27 @@ xfs_dir3_leaf_check_int(
* We can deduce a value for that from di_size.
*/
if (hdr->count > ops->leaf_max_ents(geo))
- return false;
+ return __this_address;
/* Leaves and bests don't overlap in leaf format. */
if ((hdr->magic == XFS_DIR2_LEAF1_MAGIC ||
hdr->magic == XFS_DIR3_LEAF1_MAGIC) &&
(char *)&ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
- return false;
+ return __this_address;
/* Check hash value order, count stale entries. */
for (i = stale = 0; i < hdr->count; i++) {
if (i + 1 < hdr->count) {
if (be32_to_cpu(ents[i].hashval) >
be32_to_cpu(ents[i + 1].hashval))
- return false;
+ return __this_address;
}
if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (hdr->stale != stale)
- return false;
- return true;
+ return __this_address;
+ return NULL;
}
/*
@@ -142,7 +151,7 @@ xfs_dir3_leaf_check_int(
* kernels we don't get assertion failures in xfs_dir3_leaf_hdr_from_disk() due
* to incorrect magic numbers.
*/
-static bool
+static xfs_failaddr_t
xfs_dir3_leaf_verify(
struct xfs_buf *bp,
uint16_t magic)
@@ -160,16 +169,16 @@ xfs_dir3_leaf_verify(
: XFS_DIR3_LEAFN_MAGIC;
if (leaf3->info.hdr.magic != cpu_to_be16(magic3))
- return false;
+ return __this_address;
if (!uuid_equal(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(leaf3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (leaf->hdr.info.magic != cpu_to_be16(magic))
- return false;
+ return __this_address;
}
return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
@@ -181,15 +190,16 @@ __read_verify(
uint16_t magic)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_leaf_verify(bp, magic))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_leaf_verify(bp, magic);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -198,12 +208,13 @@ __write_verify(
uint16_t magic)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_leaf_verify(bp, magic)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_leaf_verify(bp, magic);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -216,6 +227,13 @@ __write_verify(
xfs_buf_update_cksum(bp, XFS_DIR3_LEAF_CRC_OFF);
}
+static xfs_failaddr_t
+xfs_dir3_leaf1_verify(
+ struct xfs_buf *bp)
+{
+ return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAF1_MAGIC);
+}
+
static void
xfs_dir3_leaf1_read_verify(
struct xfs_buf *bp)
@@ -230,6 +248,13 @@ xfs_dir3_leaf1_write_verify(
__write_verify(bp, XFS_DIR2_LEAF1_MAGIC);
}
+static xfs_failaddr_t
+xfs_dir3_leafn_verify(
+ struct xfs_buf *bp)
+{
+ return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAFN_MAGIC);
+}
+
static void
xfs_dir3_leafn_read_verify(
struct xfs_buf *bp)
@@ -248,12 +273,14 @@ const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
.name = "xfs_dir3_leaf1",
.verify_read = xfs_dir3_leaf1_read_verify,
.verify_write = xfs_dir3_leaf1_write_verify,
+ .verify_struct = xfs_dir3_leaf1_verify,
};
const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
.name = "xfs_dir3_leafn",
.verify_read = xfs_dir3_leafn_read_verify,
.verify_write = xfs_dir3_leafn_write_verify,
+ .verify_struct = xfs_dir3_leafn_verify,
};
int
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 682e2bf370c7..239d97a64296 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -53,13 +53,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
* Check internal consistency of a leafn block.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(dp, bp) \
-do { \
- if (!xfs_dir3_leafn_check((dp), (bp))) \
- ASSERT(0); \
-} while (0);
-
-static bool
+static xfs_failaddr_t
xfs_dir3_leafn_check(
struct xfs_inode *dp,
struct xfs_buf *bp)
@@ -72,17 +66,32 @@ xfs_dir3_leafn_check(
if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
- return false;
+ return __this_address;
return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
+
+static inline void
+xfs_dir3_leaf_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_dir3_leafn_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
+}
#else
#define xfs_dir3_leaf_check(dp, bp)
#endif
-static bool
+static xfs_failaddr_t
xfs_dir3_free_verify(
struct xfs_buf *bp)
{
@@ -93,21 +102,21 @@ xfs_dir3_free_verify(
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
if (hdr3->magic != cpu_to_be32(XFS_DIR3_FREE_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr->magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC))
- return false;
+ return __this_address;
}
/* XXX: should bounds check the xfs_dir3_icfree_hdr here */
- return true;
+ return NULL;
}
static void
@@ -115,15 +124,16 @@ xfs_dir3_free_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_free_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_free_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -131,12 +141,13 @@ xfs_dir3_free_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_free_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_free_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -153,10 +164,11 @@ const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
.name = "xfs_dir3_free",
.verify_read = xfs_dir3_free_read_verify,
.verify_write = xfs_dir3_free_write_verify,
+ .verify_struct = xfs_dir3_free_verify,
};
/* Everything ok in the free block header? */
-static bool
+static xfs_failaddr_t
xfs_dir3_free_header_check(
struct xfs_inode *dp,
xfs_dablk_t fbno,
@@ -174,22 +186,22 @@ xfs_dir3_free_header_check(
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (be32_to_cpu(hdr3->firstdb) != firstdb)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr3->nvalid) > maxbests)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
- return false;
+ return __this_address;
} else {
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
if (be32_to_cpu(hdr->firstdb) != firstdb)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr->nvalid) > maxbests)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr->nvalid) < be32_to_cpu(hdr->nused))
- return false;
+ return __this_address;
}
- return true;
+ return NULL;
}
static int
@@ -200,6 +212,7 @@ __xfs_dir3_free_read(
xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
@@ -208,9 +221,9 @@ __xfs_dir3_free_read(
return err;
/* Check things that we can't do in the verifier. */
- if (!xfs_dir3_free_header_check(dp, fbno, *bpp)) {
- xfs_buf_ioerror(*bpp, -EFSCORRUPTED);
- xfs_verifier_error(*bpp);
+ fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
+ if (fa) {
+ xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
xfs_trans_brelse(tp, *bpp);
return -EFSCORRUPTED;
}
@@ -1906,7 +1919,7 @@ xfs_dir2_node_addname_int(
(unsigned long long)ifbno, lastfbno);
if (fblk) {
xfs_alert(mp,
- " fblk 0x%p blkno %llu index %d magic 0x%x",
+ " fblk "PTR_FMT" blkno %llu index %d magic 0x%x",
fblk,
(unsigned long long)fblk->blkno,
fblk->index,
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 4badd26c47e6..753aeeeffc18 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -39,12 +39,13 @@ extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
/* xfs_dir2_data.c */
#ifdef DEBUG
-#define xfs_dir3_data_check(dp,bp) __xfs_dir3_data_check(dp, bp);
+extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir3_data_check(dp,bp)
#endif
-extern int __xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
+extern xfs_failaddr_t __xfs_dir3_data_check(struct xfs_inode *dp,
+ struct xfs_buf *bp);
extern int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
extern int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
@@ -89,8 +90,9 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
int lowstale, int highstale, int *lfloglow, int *lfloghigh);
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
-extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp, struct xfs_inode *dp,
- struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
+extern xfs_failaddr_t xfs_dir3_leaf_check_int(struct xfs_mount *mp,
+ struct xfs_inode *dp, struct xfs_dir3_icleaf_hdr *hdr,
+ struct xfs_dir2_leaf *leaf);
/* xfs_dir2_node.c */
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
@@ -127,7 +129,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
+extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index be8b9755f66a..0c75a7f00883 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -156,7 +156,6 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
xfs_dir2_data_hdr_t *hdr; /* block header */
- xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused data pointer */
@@ -192,9 +191,8 @@ xfs_dir2_block_to_sf(
/*
* Set up to loop over the block's entries.
*/
- btp = xfs_dir2_block_tail_p(args->geo, hdr);
ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
+ endptr = xfs_dir3_data_endp(args->geo, hdr);
sfep = xfs_dir2_sf_firstentry(sfp);
/*
* Loop over the active and unused entries.
@@ -630,7 +628,7 @@ xfs_dir2_sf_check(
#endif /* DEBUG */
/* Verify the consistency of an inline directory. */
-int
+xfs_failaddr_t
xfs_dir2_sf_verify(
struct xfs_inode *ip)
{
@@ -665,7 +663,7 @@ xfs_dir2_sf_verify(
*/
if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
size < xfs_dir2_sf_hdr_size(sfp->i8count))
- return -EFSCORRUPTED;
+ return __this_address;
endp = (char *)sfp + size;
@@ -674,7 +672,7 @@ xfs_dir2_sf_verify(
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
- return error;
+ return __this_address;
offset = dops->data_first_offset;
/* Check all reported entries */
@@ -686,11 +684,11 @@ xfs_dir2_sf_verify(
* within the data buffer.
*/
if (((char *)sfep + sizeof(*sfep)) >= endp)
- return -EFSCORRUPTED;
+ return __this_address;
/* Don't allow names with known bad length. */
if (sfep->namelen == 0)
- return -EFSCORRUPTED;
+ return __this_address;
/*
* Check that the variable-length part of the structure is
@@ -699,23 +697,23 @@ xfs_dir2_sf_verify(
*/
next_sfep = dops->sf_nextentry(sfp, sfep);
if (endp < (char *)next_sfep)
- return -EFSCORRUPTED;
+ return __this_address;
/* Check that the offsets always increase. */
if (xfs_dir2_sf_get_offset(sfep) < offset)
- return -EFSCORRUPTED;
+ return __this_address;
/* Check the inode number. */
ino = dops->sf_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
- return error;
+ return __this_address;
/* Check the file type. */
filetype = dops->sf_get_ftype(sfep);
if (filetype >= XFS_DIR3_FT_MAX)
- return -EFSCORRUPTED;
+ return __this_address;
offset = xfs_dir2_sf_get_offset(sfep) +
dops->data_entsize(sfep->namelen);
@@ -723,16 +721,16 @@ xfs_dir2_sf_verify(
sfep = next_sfep;
}
if (i8count != sfp->i8count)
- return -EFSCORRUPTED;
+ return __this_address;
if ((void *)sfep != (void *)endp)
- return -EFSCORRUPTED;
+ return __this_address;
/* Make sure this whole thing ought to be in local format. */
if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
- return -EFSCORRUPTED;
+ return __this_address;
- return 0;
+ return NULL;
}
/*
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 747085b4ef44..8b7a6c3cb599 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -42,18 +42,14 @@ xfs_calc_dquots_per_chunk(
/*
* Do some primitive error checking on ondisk dquot data structures.
*/
-int
-xfs_dqcheck(
+xfs_failaddr_t
+xfs_dquot_verify(
struct xfs_mount *mp,
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
- uint flags,
- const char *str)
+ uint flags)
{
- xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
- int errs = 0;
-
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks got
@@ -69,87 +65,57 @@ xfs_dqcheck(
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
- if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
- str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
- errs++;
- }
- if (ddq->d_version != XFS_DQUOT_VERSION) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
- str, id, ddq->d_version, XFS_DQUOT_VERSION);
- errs++;
- }
+ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
+ return __this_address;
+ if (ddq->d_version != XFS_DQUOT_VERSION)
+ return __this_address;
if (ddq->d_flags != XFS_DQ_USER &&
ddq->d_flags != XFS_DQ_PROJ &&
- ddq->d_flags != XFS_DQ_GROUP) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
- str, id, ddq->d_flags);
- errs++;
- }
+ ddq->d_flags != XFS_DQ_GROUP)
+ return __this_address;
- if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : ondisk-dquot 0x%p, ID mismatch: "
- "0x%x expected, found id 0x%x",
- str, ddq, id, be32_to_cpu(ddq->d_id));
- errs++;
- }
+ if (id != -1 && id != be32_to_cpu(ddq->d_id))
+ return __this_address;
- if (!errs && ddq->d_id) {
- if (ddq->d_blk_softlimit &&
- be64_to_cpu(ddq->d_bcount) >
- be64_to_cpu(ddq->d_blk_softlimit)) {
- if (!ddq->d_btimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_ino_softlimit &&
- be64_to_cpu(ddq->d_icount) >
- be64_to_cpu(ddq->d_ino_softlimit)) {
- if (!ddq->d_itimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_rtb_softlimit &&
- be64_to_cpu(ddq->d_rtbcount) >
- be64_to_cpu(ddq->d_rtb_softlimit)) {
- if (!ddq->d_rtbtimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- }
+ if (!ddq->d_id)
+ return NULL;
+
+ if (ddq->d_blk_softlimit &&
+ be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
+ !ddq->d_btimer)
+ return __this_address;
+
+ if (ddq->d_ino_softlimit &&
+ be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
+ !ddq->d_itimer)
+ return __this_address;
- if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
- return errs;
+ if (ddq->d_rtb_softlimit &&
+ be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
+ !ddq->d_rtbtimer)
+ return __this_address;
+
+ return NULL;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dquot_repair(
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id,
+ uint type)
+{
+ struct xfs_dqblk *d = (struct xfs_dqblk *)ddq;
- if (flags & XFS_QMOPT_DOWARN)
- xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
- ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(d, 0, sizeof(xfs_dqblk_t));
d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
@@ -163,7 +129,7 @@ xfs_dqcheck(
XFS_DQUOT_CRC_OFF);
}
- return errs;
+ return 0;
}
STATIC bool
@@ -198,13 +164,13 @@ xfs_dquot_buf_verify_crc(
return true;
}
-STATIC bool
+STATIC xfs_failaddr_t
xfs_dquot_buf_verify(
struct xfs_mount *mp,
- struct xfs_buf *bp,
- int warn)
+ struct xfs_buf *bp)
{
struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ xfs_failaddr_t fa;
xfs_dqid_t id = 0;
int ndquots;
int i;
@@ -228,33 +194,43 @@ xfs_dquot_buf_verify(
*/
for (i = 0; i < ndquots; i++) {
struct xfs_disk_dquot *ddq;
- int error;
ddq = &d[i].dd_diskdq;
if (i == 0)
id = be32_to_cpu(ddq->d_id);
- error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
- if (error)
- return false;
+ fa = xfs_dquot_verify(mp, ddq, id + i, 0, 0);
+ if (fa)
+ return fa;
}
- return true;
+
+ return NULL;
+}
+
+static xfs_failaddr_t
+xfs_dquot_buf_verify_struct(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ return xfs_dquot_buf_verify(mp, bp);
}
static void
xfs_dquot_buf_read_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (!xfs_dquot_buf_verify_crc(mp, bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dquot_buf_verify(mp, bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
+ }
}
/*
@@ -270,7 +246,7 @@ xfs_dquot_buf_readahead_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
if (!xfs_dquot_buf_verify_crc(mp, bp) ||
- !xfs_dquot_buf_verify(mp, bp, 0)) {
+ xfs_dquot_buf_verify(mp, bp) != NULL) {
xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE;
}
@@ -283,21 +259,21 @@ xfs_dquot_buf_readahead_verify(
*/
static void
xfs_dquot_buf_write_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
- if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
- return;
- }
+ fa = xfs_dquot_buf_verify(mp, bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
}
const struct xfs_buf_ops xfs_dquot_buf_ops = {
.name = "xfs_dquot",
.verify_read = xfs_dquot_buf_read_verify,
.verify_write = xfs_dquot_buf_write_verify,
+ .verify_struct = xfs_dquot_buf_verify_struct,
};
const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index b90924104596..faf1a4edd618 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -233,6 +233,13 @@ typedef struct xfs_fsop_resblks {
#define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL)
#define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL)
+/*
+ * Limits on sb_agblocks/sb_agblklog -- mkfs won't format AGs smaller than
+ * 16MB or larger than 1TB.
+ */
+#define XFS_MIN_AG_BYTES (1ULL << 24) /* 16 MB */
+#define XFS_MAX_AG_BYTES (1ULL << 40) /* 1 TB */
+
/* keep the maximum size under 2^31 by a small amount */
#define XFS_MAX_LOG_BYTES \
((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 3b57ef0f2f76..0e2cf5f0be1f 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2491,7 +2491,7 @@ xfs_check_agi_unlinked(
#define xfs_check_agi_unlinked(agi)
#endif
-static bool
+static xfs_failaddr_t
xfs_agi_verify(
struct xfs_buf *bp)
{
@@ -2500,28 +2500,28 @@ xfs_agi_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp,
be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
- return false;
+ return __this_address;
}
/*
* Validate the magic number of the agi block.
*/
if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC))
- return false;
+ return __this_address;
if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
- return false;
+ return __this_address;
if (be32_to_cpu(agi->agi_level) < 1 ||
be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
- return false;
+ return __this_address;
if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
(be32_to_cpu(agi->agi_free_level) < 1 ||
be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
@@ -2530,10 +2530,10 @@ xfs_agi_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
xfs_check_agi_unlinked(agi);
- return true;
+ return NULL;
}
static void
@@ -2541,28 +2541,29 @@ xfs_agi_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp,
- XFS_ERRTAG_IALLOC_READ_AGI))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agi_verify(bp);
+ if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agi_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
- if (!xfs_agi_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agi_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -2578,6 +2579,7 @@ const struct xfs_buf_ops xfs_agi_buf_ops = {
.name = "xfs_agi",
.verify_read = xfs_agi_read_verify,
.verify_write = xfs_agi_write_verify,
+ .verify_struct = xfs_agi_verify,
};
/*
@@ -2751,3 +2753,102 @@ xfs_verify_dir_ino(
return false;
return xfs_verify_ino(mp, ino);
}
+
+/* Is there an inode record covering a given range of inode numbers? */
+int
+xfs_ialloc_has_inode_record(
+ struct xfs_btree_cur *cur,
+ xfs_agino_t low,
+ xfs_agino_t high,
+ bool *exists)
+{
+ struct xfs_inobt_rec_incore irec;
+ xfs_agino_t agino;
+ uint16_t holemask;
+ int has_record;
+ int i;
+ int error;
+
+ *exists = false;
+ error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
+ while (error == 0 && has_record) {
+ error = xfs_inobt_get_rec(cur, &irec, &has_record);
+ if (error || irec.ir_startino > high)
+ break;
+
+ agino = irec.ir_startino;
+ holemask = irec.ir_holemask;
+ for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
+ i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
+ if (holemask & 1)
+ continue;
+ if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
+ agino <= high) {
+ *exists = true;
+ return 0;
+ }
+ }
+
+ error = xfs_btree_increment(cur, 0, &has_record);
+ }
+ return error;
+}
+
+/* Is there an inode record covering a given extent? */
+int
+xfs_ialloc_has_inodes_at_extent(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ xfs_agino_t low;
+ xfs_agino_t high;
+
+ low = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno, 0);
+ high = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno + len, 0) - 1;
+
+ return xfs_ialloc_has_inode_record(cur, low, high, exists);
+}
+
+struct xfs_ialloc_count_inodes {
+ xfs_agino_t count;
+ xfs_agino_t freecount;
+};
+
+/* Record inode counts across all inobt records. */
+STATIC int
+xfs_ialloc_count_inodes_rec(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_inobt_rec_incore irec;
+ struct xfs_ialloc_count_inodes *ci = priv;
+
+ xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
+ ci->count += irec.ir_count;
+ ci->freecount += irec.ir_freecount;
+
+ return 0;
+}
+
+/* Count allocated and free inodes under an inobt. */
+int
+xfs_ialloc_count_inodes(
+ struct xfs_btree_cur *cur,
+ xfs_agino_t *count,
+ xfs_agino_t *freecount)
+{
+ struct xfs_ialloc_count_inodes ci = {0};
+ int error;
+
+ ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
+ error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
+ if (error)
+ return error;
+
+ *count = ci.count;
+ *freecount = ci.freecount;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 66a8de0b1caa..c5402bb4ce0c 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -170,6 +170,12 @@ int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
union xfs_btree_rec;
void xfs_inobt_btrec_to_irec(struct xfs_mount *mp, union xfs_btree_rec *rec,
struct xfs_inobt_rec_incore *irec);
+int xfs_ialloc_has_inodes_at_extent(struct xfs_btree_cur *cur,
+ xfs_agblock_t bno, xfs_extlen_t len, bool *exists);
+int xfs_ialloc_has_inode_record(struct xfs_btree_cur *cur, xfs_agino_t low,
+ xfs_agino_t high, bool *exists);
+int xfs_ialloc_count_inodes(struct xfs_btree_cur *cur, xfs_agino_t *count,
+ xfs_agino_t *freecount);
int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 317caba9faa6..af197a5f3a82 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -141,21 +141,42 @@ xfs_finobt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
+ if (cur->bc_mp->m_inotbt_nores)
+ return xfs_inobt_alloc_block(cur, start, new, stat);
return __xfs_inobt_alloc_block(cur, start, new, stat,
XFS_AG_RESV_METADATA);
}
STATIC int
-xfs_inobt_free_block(
+__xfs_inobt_free_block(
struct xfs_btree_cur *cur,
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ enum xfs_ag_resv_type resv)
{
struct xfs_owner_info oinfo;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
- &oinfo, XFS_AG_RESV_NONE);
+ &oinfo, resv);
+}
+
+STATIC int
+xfs_inobt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ if (cur->bc_mp->m_inotbt_nores)
+ return xfs_inobt_free_block(cur, bp);
+ return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
}
STATIC int
@@ -250,12 +271,13 @@ xfs_inobt_diff_two_keys(
be32_to_cpu(k2->inobt.ir_startino);
}
-static int
+static xfs_failaddr_t
xfs_inobt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -271,20 +293,21 @@ xfs_inobt_verify(
switch (block->bb_magic) {
case cpu_to_be32(XFS_IBT_CRC_MAGIC):
case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_IBT_MAGIC):
case cpu_to_be32(XFS_FIBT_MAGIC):
break;
default:
- return 0;
+ return NULL;
}
/* level verification */
level = be16_to_cpu(block->bb_level);
if (level >= mp->m_in_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
}
@@ -293,25 +316,30 @@ static void
xfs_inobt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_inobt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_inobt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_inobt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_inobt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_inobt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -322,6 +350,7 @@ const struct xfs_buf_ops xfs_inobt_buf_ops = {
.name = "xfs_inobt",
.verify_read = xfs_inobt_read_verify,
.verify_write = xfs_inobt_write_verify,
+ .verify_struct = xfs_inobt_verify,
};
STATIC int
@@ -372,7 +401,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
.alloc_block = xfs_finobt_alloc_block,
- .free_block = xfs_inobt_free_block,
+ .free_block = xfs_finobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 6b7989038d75..4fe17b368316 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -32,6 +32,8 @@
#include "xfs_ialloc.h"
#include "xfs_dir2.h"
+#include <linux/iversion.h>
+
/*
* Check that none of the inode's in the buffer have a next
* unlinked field of 0.
@@ -113,8 +115,7 @@ xfs_inode_buf_verify(
return;
}
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
#ifdef DEBUG
xfs_alert(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
@@ -264,7 +265,8 @@ xfs_inode_from_disk(
to->di_flags = be16_to_cpu(from->di_flags);
if (to->di_version == 3) {
- inode->i_version = be64_to_cpu(from->di_changecount);
+ inode_set_iversion_queried(inode,
+ be64_to_cpu(from->di_changecount));
to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to->di_flags2 = be64_to_cpu(from->di_flags2);
@@ -314,7 +316,7 @@ xfs_inode_to_disk(
to->di_flags = cpu_to_be16(from->di_flags);
if (from->di_version == 3) {
- to->di_changecount = cpu_to_be64(inode->i_version);
+ to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
@@ -381,7 +383,7 @@ xfs_log_dinode_to_disk(
}
}
-bool
+xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
xfs_ino_t ino,
@@ -390,53 +392,122 @@ xfs_dinode_verify(
uint16_t mode;
uint16_t flags;
uint64_t flags2;
+ uint64_t di_size;
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
- return false;
+ return __this_address;
+
+ /* Verify v3 integrity information first */
+ if (dip->di_version >= 3) {
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return __this_address;
+ if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
+ XFS_DINODE_CRC_OFF))
+ return __this_address;
+ if (be64_to_cpu(dip->di_ino) != ino)
+ return __this_address;
+ if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ }
/* don't allow invalid i_size */
- if (be64_to_cpu(dip->di_size) & (1ULL << 63))
- return false;
+ di_size = be64_to_cpu(dip->di_size);
+ if (di_size & (1ULL << 63))
+ return __this_address;
mode = be16_to_cpu(dip->di_mode);
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
- return false;
+ return __this_address;
/* No zero-length symlinks/dirs. */
- if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
- return false;
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
+ return __this_address;
+
+ /* Fork checks carried over from xfs_iformat_fork */
+ if (mode &&
+ be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
+ be64_to_cpu(dip->di_nblocks))
+ return __this_address;
+
+ if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
+ return __this_address;
+
+ flags = be16_to_cpu(dip->di_flags);
+
+ if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
+ return __this_address;
+
+ /* Do we have appropriate data fork formats for the mode? */
+ switch (mode & S_IFMT) {
+ case S_IFIFO:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFSOCK:
+ if (dip->di_format != XFS_DINODE_FMT_DEV)
+ return __this_address;
+ break;
+ case S_IFREG:
+ case S_IFLNK:
+ case S_IFDIR:
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_LOCAL:
+ /*
+ * no local regular files yet
+ */
+ if (S_ISREG(mode))
+ return __this_address;
+ if (di_size > XFS_DFORK_DSIZE(dip, mp))
+ return __this_address;
+ /* fall through */
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ break;
+ default:
+ return __this_address;
+ }
+ break;
+ case 0:
+ /* Uninitialized inode ok. */
+ break;
+ default:
+ return __this_address;
+ }
+
+ if (XFS_DFORK_Q(dip)) {
+ switch (dip->di_aformat) {
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ break;
+ default:
+ return __this_address;
+ }
+ }
/* only version 3 or greater inodes are extensively verified here */
if (dip->di_version < 3)
- return true;
-
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
- if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
- XFS_DINODE_CRC_OFF))
- return false;
- if (be64_to_cpu(dip->di_ino) != ino)
- return false;
- if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return NULL;
- flags = be16_to_cpu(dip->di_flags);
flags2 = be64_to_cpu(dip->di_flags2);
/* don't allow reflink/cowextsize if we don't have reflink */
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
!xfs_sb_version_hasreflink(&mp->m_sb))
- return false;
+ return __this_address;
+
+ /* only regular files get reflink */
+ if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
+ return __this_address;
/* don't let reflink and realtime mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
- return false;
+ return __this_address;
/* don't let reflink and dax mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
void
@@ -476,6 +547,7 @@ xfs_iread(
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
+ xfs_failaddr_t fa;
int error;
/*
@@ -507,11 +579,10 @@ xfs_iread(
return error;
/* even unallocated inodes are verified */
- if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
- xfs_alert(mp, "%s: validation failed for inode %lld",
- __func__, ip->i_ino);
-
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
+ fa = xfs_dinode_verify(mp, ip->i_ino, dip);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
+ sizeof(*dip), fa);
error = -EFSCORRUPTED;
goto out_brelse;
}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index a9c97a356c30..8a5e1da52d74 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -82,7 +82,7 @@ void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#define xfs_inobp_check(mp, bp)
#endif /* DEBUG */
-bool xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino,
- struct xfs_dinode *dip);
+xfs_failaddr_t xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino,
+ struct xfs_dinode *dip);
#endif /* __XFS_INODE_BUF_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index c79a1616b79d..866d2861c625 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -35,6 +35,8 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2_priv.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_shared.h"
kmem_zone_t *xfs_ifork_zone;
@@ -62,69 +64,11 @@ xfs_iformat_fork(
int error = 0;
xfs_fsize_t di_size;
- if (unlikely(be32_to_cpu(dip->di_nextents) +
- be16_to_cpu(dip->di_anextents) >
- be64_to_cpu(dip->di_nblocks))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
- (unsigned long long)ip->i_ino,
- (int)(be32_to_cpu(dip->di_nextents) +
- be16_to_cpu(dip->di_anextents)),
- (unsigned long long)
- be64_to_cpu(dip->di_nblocks));
- XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
- xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
- (unsigned long long)ip->i_ino,
- dip->di_forkoff);
- XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
- !ip->i_mount->m_rtdev_targp)) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, has realtime flag set.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(xfs_is_reflink_inode(ip) && !S_ISREG(inode->i_mode))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %llu, wrong file type for reflink.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(xfs_is_reflink_inode(ip) &&
- (ip->i_d.di_flags & XFS_DIFLAG_REALTIME))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %llu, has reflink+realtime flag set.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
switch (inode->i_mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
case S_IFBLK:
case S_IFSOCK:
- if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
- XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
ip->i_d.di_size = 0;
inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
break;
@@ -134,32 +78,7 @@ xfs_iformat_fork(
case S_IFDIR:
switch (dip->di_format) {
case XFS_DINODE_FMT_LOCAL:
- /*
- * no local regular files yet
- */
- if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (local format for regular file).",
- (unsigned long long) ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(4)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
di_size = be64_to_cpu(dip->di_size);
- if (unlikely(di_size < 0 ||
- di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad size %Ld for local inode).",
- (unsigned long long) ip->i_ino,
- (long long) di_size);
- XFS_CORRUPTION_ERROR("xfs_iformat(5)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
size = (int)di_size;
error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
break;
@@ -170,28 +89,16 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
- XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
- ip->i_mount);
return -EFSCORRUPTED;
}
break;
default:
- XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
return -EFSCORRUPTED;
}
if (error)
return error;
- /* Check inline dir contents. */
- if (S_ISDIR(inode->i_mode) && dip->di_format == XFS_DINODE_FMT_LOCAL) {
- error = xfs_dir2_sf_verify(ip);
- if (error) {
- xfs_idestroy_fork(ip, XFS_DATA_FORK);
- return error;
- }
- }
-
if (xfs_is_reflink_inode(ip)) {
ASSERT(ip->i_cowfp == NULL);
xfs_ifork_init_cow(ip);
@@ -208,18 +115,6 @@ xfs_iformat_fork(
atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
size = be16_to_cpu(atp->hdr.totsize);
- if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad attr fork size %Ld).",
- (unsigned long long) ip->i_ino,
- (long long) size);
- XFS_CORRUPTION_ERROR("xfs_iformat(8)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- error = -EFSCORRUPTED;
- break;
- }
-
error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
break;
case XFS_DINODE_FMT_EXTENTS:
@@ -403,6 +298,7 @@ xfs_iformat_btree(
*/
if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
XFS_IFORK_MAXEXT(ip, whichfork) ||
+ nrecs == 0 ||
XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, mp, whichfork) ||
XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
@@ -827,3 +723,45 @@ xfs_ifork_init_cow(
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
ip->i_cnextents = 0;
}
+
+/* Default fork content verifiers. */
+struct xfs_ifork_ops xfs_default_ifork_ops = {
+ .verify_attr = xfs_attr_shortform_verify,
+ .verify_dir = xfs_dir2_sf_verify,
+ .verify_symlink = xfs_symlink_shortform_verify,
+};
+
+/* Verify the inline contents of the data fork of an inode. */
+xfs_failaddr_t
+xfs_ifork_verify_data(
+ struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops)
+{
+ /* Non-local data fork, we're done. */
+ if (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return NULL;
+
+ /* Check the inline data fork if there is one. */
+ switch (VFS_I(ip)->i_mode & S_IFMT) {
+ case S_IFDIR:
+ return ops->verify_dir(ip);
+ case S_IFLNK:
+ return ops->verify_symlink(ip);
+ default:
+ return NULL;
+ }
+}
+
+/* Verify the inline contents of the attr fork of an inode. */
+xfs_failaddr_t
+xfs_ifork_verify_attr(
+ struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops)
+{
+ /* There has to be an attr fork allocated if aformat is local. */
+ if (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
+ return NULL;
+ if (!XFS_IFORK_PTR(ip, XFS_ATTR_FORK))
+ return __this_address;
+ return ops->verify_attr(ip);
+}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index b9f0098e33b8..dd8aba0dd119 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -186,4 +186,18 @@ extern struct kmem_zone *xfs_ifork_zone;
extern void xfs_ifork_init_cow(struct xfs_inode *ip);
+typedef xfs_failaddr_t (*xfs_ifork_verifier_t)(struct xfs_inode *);
+
+struct xfs_ifork_ops {
+ xfs_ifork_verifier_t verify_symlink;
+ xfs_ifork_verifier_t verify_dir;
+ xfs_ifork_verifier_t verify_attr;
+};
+extern struct xfs_ifork_ops xfs_default_ifork_ops;
+
+xfs_failaddr_t xfs_ifork_verify_data(struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops);
+xfs_failaddr_t xfs_ifork_verify_attr(struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops);
+
#endif /* __XFS_INODE_FORK_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c
index c10597973333..cc4cbe290939 100644
--- a/fs/xfs/libxfs/xfs_log_rlimit.c
+++ b/fs/xfs/libxfs/xfs_log_rlimit.c
@@ -55,7 +55,7 @@ xfs_log_calc_max_attrsetm_res(
* the maximum one in terms of the pre-calculated values which were done
* at mount time.
*/
-STATIC void
+void
xfs_log_get_max_trans_res(
struct xfs_mount *mp,
struct xfs_trans_res *max_resp)
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index d69c772271cb..bb1b13a9b5f4 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -112,8 +112,6 @@ typedef uint16_t xfs_qwarncnt_t;
#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
-#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
-#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
#define XFS_QMOPT_DQNEXT 0x0008000 /* return next dquot >= this ID */
@@ -153,8 +151,11 @@ typedef uint16_t xfs_qwarncnt_t;
(XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
-extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
- xfs_dqid_t id, uint type, uint flags, const char *str);
+extern xfs_failaddr_t xfs_dquot_verify(struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq, xfs_dqid_t id, uint type,
+ uint flags);
extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
+extern int xfs_dquot_repair(struct xfs_mount *mp, struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id, uint type);
#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index c40d26763075..bee68c23d612 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1696,3 +1696,22 @@ out_cursor:
xfs_trans_brelse(tp, agbp);
goto out_trans;
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_refcount_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.rc.rc_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rc.rc_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index eafb9d1f3b37..2a731ac68fe4 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -83,4 +83,7 @@ static inline xfs_fileoff_t xfs_refcount_max_unmap(int log_res)
return (log_res * 3 / 4) / XFS_REFCOUNT_ITEM_OVERHEAD;
}
+extern int xfs_refcount_has_record(struct xfs_btree_cur *cur,
+ xfs_agblock_t bno, xfs_extlen_t len, bool *exists);
+
#endif /* __XFS_REFCOUNT_H__ */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 3c59dd3d58d7..8479769e470d 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -223,29 +223,31 @@ xfs_refcountbt_diff_two_keys(
be32_to_cpu(k2->refc.rc_startblock);
}
-STATIC bool
+STATIC xfs_failaddr_t
xfs_refcountbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
- return false;
+ return __this_address;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return false;
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ return __this_address;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) {
if (level >= pag->pagf_refcount_level)
- return false;
+ return __this_address;
} else if (level >= mp->m_refc_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
}
@@ -254,25 +256,30 @@ STATIC void
xfs_refcountbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_refcountbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_refcountbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
STATIC void
xfs_refcountbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_refcountbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_refcountbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -283,6 +290,7 @@ const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
.name = "xfs_refcountbt",
.verify_read = xfs_refcountbt_read_verify,
.verify_write = xfs_refcountbt_write_verify,
+ .verify_struct = xfs_refcountbt_verify,
};
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 50db920ceeeb..79822cf6ebe3 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2387,3 +2387,70 @@ xfs_rmap_compare(
else
return 0;
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_rmap_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.r.rm_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.r.rm_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
+
+/*
+ * Is there a record for this owner completely covering a given physical
+ * extent? If so, *has_rmap will be set to true. If there is no record
+ * or the record only covers part of the range, we set *has_rmap to false.
+ * This function doesn't perform range lookups or offset checks, so it is
+ * not suitable for checking data fork blocks.
+ */
+int
+xfs_rmap_record_exists(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool *has_rmap)
+{
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ int has_record;
+ struct xfs_rmap_irec irec;
+ int error;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK));
+
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags,
+ &has_record);
+ if (error)
+ return error;
+ if (!has_record) {
+ *has_rmap = false;
+ return 0;
+ }
+
+ error = xfs_rmap_get_rec(cur, &irec, &has_record);
+ if (error)
+ return error;
+ if (!has_record) {
+ *has_rmap = false;
+ return 0;
+ }
+
+ *has_rmap = (irec.rm_owner == owner && irec.rm_startblock <= bno &&
+ irec.rm_startblock + irec.rm_blockcount >= bno + len);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 0fcd5b1ba729..380e53be98d5 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -233,5 +233,10 @@ int xfs_rmap_compare(const struct xfs_rmap_irec *a,
union xfs_btree_rec;
int xfs_rmap_btrec_to_irec(union xfs_btree_rec *rec,
struct xfs_rmap_irec *irec);
+int xfs_rmap_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, bool *exists);
+int xfs_rmap_record_exists(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo,
+ bool *has_rmap);
#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 9d9c9192584c..e829c3e489ea 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -303,13 +303,14 @@ xfs_rmapbt_diff_two_keys(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_rmapbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -325,19 +326,20 @@ xfs_rmapbt_verify(
* in this case.
*/
if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
- return false;
+ return __this_address;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
- return false;
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ return __this_address;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
- return false;
+ return __this_address;
} else if (level >= mp->m_rmap_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
}
@@ -346,25 +348,30 @@ static void
xfs_rmapbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_rmapbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_rmapbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_rmapbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_rmapbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_rmapbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -375,6 +382,7 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
.name = "xfs_rmapbt",
.verify_read = xfs_rmapbt_read_verify,
.verify_write = xfs_rmapbt_write_verify,
+ .verify_struct = xfs_rmapbt_verify,
};
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 3fb29a5ea915..106be2d0bb88 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1097,3 +1097,24 @@ xfs_verify_rtbno(
{
return rtbno < mp->m_sb.sb_rblocks;
}
+
+/* Is the given extent all free? */
+int
+xfs_rtalloc_extent_is_free(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_rtblock_t start,
+ xfs_extlen_t len,
+ bool *is_free)
+{
+ xfs_rtblock_t end;
+ int matches;
+ int error;
+
+ error = xfs_rtcheck_range(mp, tp, start, len, 1, &end, &matches);
+ if (error)
+ return error;
+
+ *is_free = matches;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 9b5aae2bcc0b..46af6aa60a8e 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -40,6 +40,8 @@
#include "xfs_rmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_refcount_btree.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -116,6 +118,9 @@ xfs_mount_validate_sb(
bool check_inprogress,
bool check_version)
{
+ u32 agcount = 0;
+ u32 rem;
+
if (sbp->sb_magicnum != XFS_SB_MAGIC) {
xfs_warn(mp, "bad magic number");
return -EWRONGFS;
@@ -226,6 +231,13 @@ xfs_mount_validate_sb(
return -EINVAL;
}
+ /* Compute agcount for this number of dblocks and agblocks */
+ if (sbp->sb_agblocks) {
+ agcount = div_u64_rem(sbp->sb_dblocks, sbp->sb_agblocks, &rem);
+ if (rem)
+ agcount++;
+ }
+
/*
* More sanity checking. Most of these were stolen directly from
* xfs_repair.
@@ -250,6 +262,10 @@ xfs_mount_validate_sb(
sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE ||
sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
+ XFS_FSB_TO_B(mp, sbp->sb_agblocks) < XFS_MIN_AG_BYTES ||
+ XFS_FSB_TO_B(mp, sbp->sb_agblocks) > XFS_MAX_AG_BYTES ||
+ sbp->sb_agblklog != xfs_highbit32(sbp->sb_agblocks - 1) + 1 ||
+ agcount == 0 || agcount != sbp->sb_agcount ||
(sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
(sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
@@ -640,11 +656,10 @@ xfs_sb_read_verify(
error = xfs_sb_verify(bp, true);
out_error:
- if (error) {
+ if (error == -EFSCORRUPTED || error == -EFSBADCRC)
+ xfs_verifier_error(bp, error, __this_address);
+ else if (error)
xfs_buf_ioerror(bp, error);
- if (error == -EFSCORRUPTED || error == -EFSBADCRC)
- xfs_verifier_error(bp);
- }
}
/*
@@ -673,13 +688,12 @@ xfs_sb_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int error;
error = xfs_sb_verify(bp, false);
if (error) {
- xfs_buf_ioerror(bp, error);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, error, __this_address);
return;
}
@@ -876,3 +890,88 @@ xfs_sync_sb(
xfs_trans_set_sync(tp);
return xfs_trans_commit(tp);
}
+
+int
+xfs_fs_geometry(
+ struct xfs_sb *sbp,
+ struct xfs_fsop_geom *geo,
+ int struct_version)
+{
+ memset(geo, 0, sizeof(struct xfs_fsop_geom));
+
+ geo->blocksize = sbp->sb_blocksize;
+ geo->rtextsize = sbp->sb_rextsize;
+ geo->agblocks = sbp->sb_agblocks;
+ geo->agcount = sbp->sb_agcount;
+ geo->logblocks = sbp->sb_logblocks;
+ geo->sectsize = sbp->sb_sectsize;
+ geo->inodesize = sbp->sb_inodesize;
+ geo->imaxpct = sbp->sb_imax_pct;
+ geo->datablocks = sbp->sb_dblocks;
+ geo->rtblocks = sbp->sb_rblocks;
+ geo->rtextents = sbp->sb_rextents;
+ geo->logstart = sbp->sb_logstart;
+ BUILD_BUG_ON(sizeof(geo->uuid) != sizeof(sbp->sb_uuid));
+ memcpy(geo->uuid, &sbp->sb_uuid, sizeof(sbp->sb_uuid));
+
+ if (struct_version < 2)
+ return 0;
+
+ geo->sunit = sbp->sb_unit;
+ geo->swidth = sbp->sb_width;
+
+ if (struct_version < 3)
+ return 0;
+
+ geo->version = XFS_FSOP_GEOM_VERSION;
+ geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
+ XFS_FSOP_GEOM_FLAGS_DIRV2;
+ if (xfs_sb_version_hasattr(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR;
+ if (xfs_sb_version_hasquota(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_QUOTA;
+ if (xfs_sb_version_hasalign(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_IALIGN;
+ if (xfs_sb_version_hasdalign(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_DALIGN;
+ if (xfs_sb_version_hasextflgbit(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_EXTFLG;
+ if (xfs_sb_version_hassector(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_SECTOR;
+ if (xfs_sb_version_hasasciici(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_DIRV2CI;
+ if (xfs_sb_version_haslazysbcount(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_LAZYSB;
+ if (xfs_sb_version_hasattr2(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR2;
+ if (xfs_sb_version_hasprojid32bit(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_PROJID32;
+ if (xfs_sb_version_hascrc(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_V5SB;
+ if (xfs_sb_version_hasftype(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_FTYPE;
+ if (xfs_sb_version_hasfinobt(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_FINOBT;
+ if (xfs_sb_version_hassparseinodes(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_SPINODES;
+ if (xfs_sb_version_hasrmapbt(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_RMAPBT;
+ if (xfs_sb_version_hasreflink(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_REFLINK;
+ if (xfs_sb_version_hassector(sbp))
+ geo->logsectsize = sbp->sb_logsectsize;
+ else
+ geo->logsectsize = BBSIZE;
+ geo->rtsectsize = sbp->sb_blocksize;
+ geo->dirblocksize = xfs_dir2_dirblock_bytes(sbp);
+
+ if (struct_version < 4)
+ return 0;
+
+ if (xfs_sb_version_haslogv2(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_LOGV2;
+
+ geo->logsunit = sbp->sb_logsunit;
+
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 961e6475a309..63dcd2a1a657 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -34,4 +34,8 @@ extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
+#define XFS_FS_GEOM_MAX_STRUCT_VER (4)
+extern int xfs_fs_geometry(struct xfs_sb *sbp, struct xfs_fsop_geom *geo,
+ int struct_version);
+
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index c6f4eb46fe26..d0b84da0cb1e 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -76,6 +76,9 @@ struct xfs_log_item_desc {
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
+struct xfs_trans_res;
+void xfs_log_get_max_trans_res(struct xfs_mount *mp,
+ struct xfs_trans_res *max_resp);
/*
* Values for t_flags.
@@ -143,5 +146,6 @@ bool xfs_symlink_hdr_ok(xfs_ino_t ino, uint32_t offset,
uint32_t size, struct xfs_buf *bp);
void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_inode *ip, struct xfs_ifork *ifp);
+xfs_failaddr_t xfs_symlink_shortform_verify(struct xfs_inode *ip);
#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index c484877129a0..5ef5f354587e 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -98,7 +98,7 @@ xfs_symlink_hdr_ok(
return true;
}
-static bool
+static xfs_failaddr_t
xfs_symlink_verify(
struct xfs_buf *bp)
{
@@ -106,22 +106,22 @@ xfs_symlink_verify(
struct xfs_dsymlink_hdr *dsl = bp->b_addr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
- return false;
+ return __this_address;
if (be32_to_cpu(dsl->sl_offset) +
be32_to_cpu(dsl->sl_bytes) >= XFS_SYMLINK_MAXLEN)
- return false;
+ return __this_address;
if (dsl->sl_owner == 0)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(dsl->sl_lsn)))
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
static void
@@ -129,18 +129,19 @@ xfs_symlink_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_symlink_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_symlink_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -148,15 +149,16 @@ xfs_symlink_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_symlink_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_symlink_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -171,6 +173,7 @@ const struct xfs_buf_ops xfs_symlink_buf_ops = {
.name = "xfs_symlink",
.verify_read = xfs_symlink_read_verify,
.verify_write = xfs_symlink_write_verify,
+ .verify_struct = xfs_symlink_verify,
};
void
@@ -207,3 +210,37 @@ xfs_symlink_local_to_remote(
xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsymlink_hdr) +
ifp->if_bytes - 1);
}
+
+/* Verify the consistency of an inline symlink. */
+xfs_failaddr_t
+xfs_symlink_shortform_verify(
+ struct xfs_inode *ip)
+{
+ char *sfp;
+ char *endp;
+ struct xfs_ifork *ifp;
+ int size;
+
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ sfp = (char *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+ endp = sfp + size;
+
+ /* Zero length symlinks can exist while we're deleting a remote one. */
+ if (size == 0)
+ return NULL;
+
+ /* No negative sizes or overly long symlink targets. */
+ if (size < 0 || size > XFS_SYMLINK_MAXLEN)
+ return __this_address;
+
+ /* No NULLs in the target either. */
+ if (memchr(sfp, 0, size - 1))
+ return __this_address;
+
+ /* We /did/ null-terminate the buffer, right? */
+ if (*endp != 0)
+ return __this_address;
+ return NULL;
+}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6bd916bd35e2..5f17641f040f 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -34,6 +34,9 @@
#include "xfs_trans_space.h"
#include "xfs_trace.h"
+#define _ALLOC true
+#define _FREE false
+
/*
* A buffer has a format structure overhead in the log in addition
* to the data, so we need to take this into account when reserving
@@ -132,43 +135,77 @@ xfs_calc_inode_res(
}
/*
- * The free inode btree is a conditional feature and the log reservation
- * requirements differ slightly from that of the traditional inode allocation
- * btree. The finobt tracks records for inode chunks with at least one free
- * inode. A record can be removed from the tree for an inode allocation
- * or free and thus the finobt reservation is unconditional across:
+ * Inode btree record insertion/removal modifies the inode btree and free space
+ * btrees (since the inobt does not use the agfl). This requires the following
+ * reservation:
*
- * - inode allocation
- * - inode free
- * - inode chunk allocation
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
*
- * The 'modify' param indicates to include the record modification scenario. The
- * 'alloc' param indicates to include the reservation for free space btree
- * modifications on behalf of finobt modifications. This is required only for
- * transactions that do not already account for free space btree modifications.
+ * The caller must account for SB and AG header modifications, etc.
+ */
+STATIC uint
+xfs_calc_inobt_res(
+ struct xfs_mount *mp)
+{
+ return xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
+ XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * The free inode btree is a conditional feature. The behavior differs slightly
+ * from that of the traditional inode btree in that the finobt tracks records
+ * for inode chunks with at least one free inode. A record can be removed from
+ * the tree during individual inode allocation. Therefore the finobt
+ * reservation is unconditional for both the inode chunk allocation and
+ * individual inode allocation (modify) cases.
*
- * the free inode btree: max depth * block size
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- * the free inode btree entry: block size
+ * Behavior aside, the reservation for finobt modification is equivalent to the
+ * traditional inobt: cover a full finobt shape change plus block allocation.
*/
STATIC uint
xfs_calc_finobt_res(
- struct xfs_mount *mp,
- int alloc,
- int modify)
+ struct xfs_mount *mp)
{
- uint res;
-
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
return 0;
- res = xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1));
- if (alloc)
- res += xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1));
- if (modify)
- res += (uint)XFS_FSB_TO_B(mp, 1);
+ return xfs_calc_inobt_res(mp);
+}
+/*
+ * Calculate the reservation required to allocate or free an inode chunk. This
+ * includes:
+ *
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
+ * the inode chunk: m_ialloc_blks * N
+ *
+ * The size N of the inode chunk reservation depends on whether it is for
+ * allocation or free and which type of create transaction is in use. An inode
+ * chunk free always invalidates the buffers and only requires reservation for
+ * headers (N == 0). An inode chunk allocation requires a chunk sized
+ * reservation on v4 and older superblocks to initialize the chunk. No chunk
+ * reservation is required for allocation on v5 supers, which use ordered
+ * buffers to initialize.
+ */
+STATIC uint
+xfs_calc_inode_chunk_res(
+ struct xfs_mount *mp,
+ bool alloc)
+{
+ uint res, size = 0;
+
+ res = xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
+ XFS_FSB_TO_B(mp, 1));
+ if (alloc) {
+ /* icreate tx uses ordered buffers */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return res;
+ size = XFS_FSB_TO_B(mp, 1);
+ }
+
+ res += xfs_calc_buf_res(mp->m_ialloc_blks, size);
return res;
}
@@ -232,8 +269,6 @@ xfs_calc_write_reservation(
* the super block to reflect the freed blocks: sector size
* worst case split in allocation btrees per extent assuming 4 extents:
* 4 exts * 2 trees * (2 * max depth - 1) * block size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
*/
STATIC uint
xfs_calc_itruncate_reservation(
@@ -245,12 +280,7 @@ xfs_calc_itruncate_reservation(
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(5, 0) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(2 + mp->m_ialloc_blks +
- mp->m_in_maxlevels, 0)));
+ XFS_FSB_TO_B(mp, 1))));
}
/*
@@ -282,13 +312,14 @@ xfs_calc_rename_reservation(
* For removing an inode from unlinked list at first, we can modify:
* the agi hash list and counters: sector size
* the on disk inode before ours in the agi hash list: inode cluster size
+ * the on disk inode in the agi hash list: inode cluster size
*/
STATIC uint
xfs_calc_iunlink_remove_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
+ 2 * max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
}
/*
@@ -320,13 +351,13 @@ xfs_calc_link_reservation(
/*
* For adding an inode to unlinked list we can modify:
* the agi hash list: sector size
- * the unlinked inode: inode size
+ * the on disk inode: inode cluster size
*/
STATIC uint
xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- xfs_calc_inode_res(mp, 1);
+ max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
}
/*
@@ -379,45 +410,16 @@ xfs_calc_create_resv_modify(
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
(uint)XFS_FSB_TO_B(mp, 1) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 1, 1);
-}
-
-/*
- * For create we can allocate some inodes giving:
- * the agi and agf of the ag getting the new inodes: 2 * sectorsize
- * the superblock for the nlink flag: sector size
- * the inode blocks allocated: mp->m_ialloc_blks * blocksize
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_create_resv_alloc(
- struct xfs_mount *mp)
-{
- return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
- mp->m_sb.sb_sectsize +
- xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1));
-}
-
-STATIC uint
-__xfs_calc_create_reservation(
- struct xfs_mount *mp)
-{
- return XFS_DQUOT_LOGRES(mp) +
- MAX(xfs_calc_create_resv_alloc(mp),
- xfs_calc_create_resv_modify(mp));
+ xfs_calc_finobt_res(mp);
}
/*
* For icreate we can allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the superblock for the nlink flag: sector size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- * the finobt (record insertion)
+ * the inode chunk (allocation, optional init)
+ * the inobt (record insertion)
+ * the finobt (optional, record insertion)
*/
STATIC uint
xfs_calc_icreate_resv_alloc(
@@ -425,10 +427,9 @@ xfs_calc_icreate_resv_alloc(
{
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize +
- xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 0, 0);
+ xfs_calc_inode_chunk_res(mp, _ALLOC) +
+ xfs_calc_inobt_res(mp) +
+ xfs_calc_finobt_res(mp);
}
STATIC uint
@@ -440,26 +441,12 @@ xfs_calc_icreate_reservation(xfs_mount_t *mp)
}
STATIC uint
-xfs_calc_create_reservation(
- struct xfs_mount *mp)
-{
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return xfs_calc_icreate_reservation(mp);
- return __xfs_calc_create_reservation(mp);
-
-}
-
-STATIC uint
xfs_calc_create_tmpfile_reservation(
struct xfs_mount *mp)
{
uint res = XFS_DQUOT_LOGRES(mp);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- res += xfs_calc_icreate_resv_alloc(mp);
- else
- res += xfs_calc_create_resv_alloc(mp);
-
+ res += xfs_calc_icreate_resv_alloc(mp);
return res + xfs_calc_iunlink_add_reservation(mp);
}
@@ -470,7 +457,7 @@ STATIC uint
xfs_calc_mkdir_reservation(
struct xfs_mount *mp)
{
- return xfs_calc_create_reservation(mp);
+ return xfs_calc_icreate_reservation(mp);
}
@@ -483,20 +470,24 @@ STATIC uint
xfs_calc_symlink_reservation(
struct xfs_mount *mp)
{
- return xfs_calc_create_reservation(mp) +
+ return xfs_calc_icreate_reservation(mp) +
xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
}
/*
* In freeing an inode we can modify:
* the inode being freed: inode size
- * the super block free inode counter: sector size
- * the agi hash list and counters: sector size
- * the inode btree entry: block size
- * the on disk inode before ours in the agi hash list: inode cluster size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
+ * the super block free inode counter, AGF and AGFL: sector size
+ * the on disk inode (agi unlinked list removal)
+ * the inode chunk (invalidated, headers only)
+ * the inode btree
* the finobt (record insertion, removal or modification)
+ *
+ * Note that the inode chunk res. includes an allocfree res. for freeing of the
+ * inode chunk. This is technically extraneous because the inode chunk free is
+ * deferred (it occurs after a transaction roll). Include the extra reservation
+ * anyways since we've had reports of ifree transaction overruns due to too many
+ * agfl fixups during inode chunk frees.
*/
STATIC uint
xfs_calc_ifree_reservation(
@@ -504,15 +495,11 @@ xfs_calc_ifree_reservation(
{
return XFS_DQUOT_LOGRES(mp) +
xfs_calc_inode_res(mp, 1) +
- xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
+ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_iunlink_remove_reservation(mp) +
- xfs_calc_buf_res(1, 0) +
- xfs_calc_buf_res(2 + mp->m_ialloc_blks +
- mp->m_in_maxlevels, 0) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 0, 1);
+ xfs_calc_inode_chunk_res(mp, _FREE) +
+ xfs_calc_inobt_res(mp) +
+ xfs_calc_finobt_res(mp);
}
/*
@@ -842,7 +829,7 @@ xfs_trans_resv_calc(
resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
- resp->tr_create.tr_logres = xfs_calc_create_reservation(mp);
+ resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 2a9b4f9e93c6..fd975524f460 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -32,30 +32,17 @@
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
+#include "xfs_rmap.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
/*
- * Set up scrub to check all the static metadata in each AG.
- * This means the SB, AGF, AGI, and AGFL headers.
+ * Walk all the blocks in the AGFL. The fn function can return any negative
+ * error code or XFS_BTREE_QUERY_RANGE_ABORT.
*/
int
-xfs_scrub_setup_ag_header(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = sc->mp;
-
- if (sc->sm->sm_agno >= mp->m_sb.sb_agcount ||
- sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
- return xfs_scrub_setup_fs(sc, ip);
-}
-
-/* Walk all the blocks in the AGFL. */
-int
xfs_scrub_walk_agfl(
struct xfs_scrub_context *sc,
int (*fn)(struct xfs_scrub_context *,
@@ -115,6 +102,36 @@ xfs_scrub_walk_agfl(
/* Superblock */
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_superblock_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_SB_BLOCK(mp);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/*
* Scrub the filesystem superblock.
*
@@ -143,6 +160,22 @@ xfs_scrub_superblock(
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
+ /*
+ * The superblock verifier can return several different error codes
+ * if it thinks the superblock doesn't look right. For a mount these
+ * would all get bounced back to userspace, but if we're here then the
+ * fs mounted successfully, which means that this secondary superblock
+ * is simply incorrect. Treat all these codes the same way we treat
+ * any corruption.
+ */
+ switch (error) {
+ case -EINVAL: /* also -EWRONGFS */
+ case -ENOSYS:
+ case -EFBIG:
+ error = -EFSCORRUPTED;
+ default:
+ break;
+ }
if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error;
@@ -387,11 +420,175 @@ xfs_scrub_superblock(
BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
xfs_scrub_block_set_corrupt(sc, bp);
+ xfs_scrub_superblock_xref(sc, bp);
+
return error;
}
/* AGF */
+/* Tally freespace record lengths. */
+STATIC int
+xfs_scrub_agf_record_bno_lengths(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ xfs_extlen_t *blocks = priv;
+
+ (*blocks) += rec->ar_blockcount;
+ return 0;
+}
+
+/* Check agf_freeblks */
+static inline void
+xfs_scrub_agf_xref_freeblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_extlen_t blocks = 0;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_query_all(sc->sa.bno_cur,
+ xfs_scrub_agf_record_bno_lengths, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (blocks != be32_to_cpu(agf->agf_freeblks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Cross reference the AGF with the cntbt (freespace by length btree) */
+static inline void
+xfs_scrub_agf_xref_cntbt(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t agbno;
+ xfs_extlen_t blocks;
+ int have;
+ int error;
+
+ if (!sc->sa.cnt_cur)
+ return;
+
+ /* Any freespace at all? */
+ error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ if (!have) {
+ if (agf->agf_freeblks != be32_to_cpu(0))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ return;
+ }
+
+ /* Check agf_longest */
+ error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ if (!have || blocks != be32_to_cpu(agf->agf_longest))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Check the btree block counts in the AGF against the btrees. */
+STATIC void
+xfs_scrub_agf_xref_btreeblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t blocks;
+ xfs_agblock_t btreeblks;
+ int error;
+
+ /* Check agf_rmap_blocks; set up for agf_btreeblks check */
+ if (sc->sa.rmap_cur) {
+ error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ btreeblks = blocks - 1;
+ if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ } else {
+ btreeblks = 0;
+ }
+
+ /*
+ * No rmap cursor; we can't xref if we have the rmapbt feature.
+ * We also can't do it if we're missing the free space btree cursors.
+ */
+ if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
+ !sc->sa.bno_cur || !sc->sa.cnt_cur)
+ return;
+
+ /* Check agf_btreeblks */
+ error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ btreeblks += blocks - 1;
+
+ error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ btreeblks += blocks - 1;
+
+ if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Check agf_refcount_blocks against tree size */
+static inline void
+xfs_scrub_agf_xref_refcblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t blocks;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agf_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGF_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_agf_xref_freeblks(sc);
+ xfs_scrub_agf_xref_cntbt(sc);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_agf_xref_btreeblks(sc);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xfs_scrub_agf_xref_refcblks(sc);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/* Scrub the AGF. */
int
xfs_scrub_agf(
@@ -414,6 +611,7 @@ xfs_scrub_agf(
&sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@@ -470,6 +668,7 @@ xfs_scrub_agf(
if (agfl_count != 0 && fl_count != agfl_count)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xfs_scrub_agf_xref(sc);
out:
return error;
}
@@ -477,11 +676,28 @@ out:
/* AGFL */
struct xfs_scrub_agfl_info {
+ struct xfs_owner_info oinfo;
unsigned int sz_entries;
unsigned int nr_entries;
xfs_agblock_t *entries;
};
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agfl_block_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ struct xfs_owner_info *oinfo)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+}
+
/* Scrub an AGFL block. */
STATIC int
xfs_scrub_agfl_block(
@@ -499,6 +715,8 @@ xfs_scrub_agfl_block(
else
xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
+ xfs_scrub_agfl_block_xref(sc, agbno, priv);
+
return 0;
}
@@ -513,6 +731,37 @@ xfs_scrub_agblock_cmp(
return (int)*a - (int)*b;
}
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agfl_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGFL_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /*
+ * Scrub teardown will take care of sc->sa for us. Leave sc->sa
+ * active so that the agfl block xref can use it too.
+ */
+}
+
/* Scrub the AGFL. */
int
xfs_scrub_agfl(
@@ -532,6 +781,12 @@ xfs_scrub_agfl(
goto out;
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
+
+ xfs_scrub_agfl_xref(sc);
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
/* Allocate buffer to ensure uniqueness of AGFL entries. */
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@@ -548,6 +803,7 @@ xfs_scrub_agfl(
}
/* Check the blocks in the AGFL. */
+ xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
if (error)
goto out_free;
@@ -575,6 +831,56 @@ out:
/* AGI */
+/* Check agi_count/agi_freecount */
+static inline void
+xfs_scrub_agi_xref_icounts(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ xfs_agino_t icount;
+ xfs_agino_t freecount;
+ int error;
+
+ if (!sc->sa.ino_cur)
+ return;
+
+ error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
+ return;
+ if (be32_to_cpu(agi->agi_count) != icount ||
+ be32_to_cpu(agi->agi_freecount) != freecount)
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agi_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGI_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_scrub_agi_xref_icounts(sc);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/* Scrub the AGI. */
int
xfs_scrub_agi(
@@ -598,6 +904,7 @@ xfs_scrub_agi(
&sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
@@ -653,6 +960,7 @@ xfs_scrub_agi(
if (agi->agi_pad32 != cpu_to_be32(0))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xfs_scrub_agi_xref(sc);
out:
return error;
}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 059663e13414..517c079d3f68 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -31,6 +31,7 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
+#include "xfs_alloc.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -49,6 +50,64 @@ xfs_scrub_setup_ag_allocbt(
}
/* Free space btree scrubber. */
+/*
+ * Ensure there's a corresponding cntbt/bnobt record matching this
+ * bnobt/cntbt record, respectively.
+ */
+STATIC void
+xfs_scrub_allocbt_xref_other(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_btree_cur **pcur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int has_otherrec;
+ int error;
+
+ if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
+ pcur = &sc->sa.cnt_cur;
+ else
+ pcur = &sc->sa.bno_cur;
+ if (!*pcur)
+ return;
+
+ error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (!has_otherrec) {
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ return;
+ }
+
+ error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (!has_otherrec) {
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ return;
+ }
+
+ if (fbno != agbno || flen != len)
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_allocbt_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_allocbt_xref_other(sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xfs_scrub_xref_has_no_owner(sc, agbno, len);
+ xfs_scrub_xref_is_not_shared(sc, agbno, len);
+}
/* Scrub a bnobt/cntbt record. */
STATIC int
@@ -70,6 +129,8 @@ xfs_scrub_allocbt_rec(
!xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_allocbt_xref(bs->sc, bno, len);
+
return error;
}
@@ -100,3 +161,23 @@ xfs_scrub_cntbt(
{
return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
}
+
+/* xref check that the extent is not free */
+void
+xfs_scrub_xref_is_used_space(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ bool is_freesp;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (is_freesp)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
+}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 42fec0bcd9e1..d00282130492 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -37,6 +37,7 @@
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -99,6 +100,201 @@ struct xfs_scrub_bmap_info {
int whichfork;
};
+/* Look for a corresponding rmap for this irec. */
+static inline bool
+xfs_scrub_bmap_get_rmap(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno,
+ uint64_t owner,
+ struct xfs_rmap_irec *rmap)
+{
+ xfs_fileoff_t offset;
+ unsigned int rflags = 0;
+ int has_rmap;
+ int error;
+
+ if (info->whichfork == XFS_ATTR_FORK)
+ rflags |= XFS_RMAP_ATTR_FORK;
+
+ /*
+ * CoW staging extents are owned (on disk) by the refcountbt, so
+ * their rmaps do not have offsets.
+ */
+ if (info->whichfork == XFS_COW_FORK)
+ offset = 0;
+ else
+ offset = irec->br_startoff;
+
+ /*
+ * If the caller thinks this could be a shared bmbt extent (IOWs,
+ * any data fork extent of a reflink inode) then we have to use the
+ * range rmap lookup to make sure we get the correct owner/offset.
+ */
+ if (info->is_shared) {
+ error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
+ owner, offset, rflags, rmap, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+ goto out;
+ }
+
+ /*
+ * Otherwise, use the (faster) regular lookup.
+ */
+ error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
+ offset, rflags, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+ if (!has_rmap)
+ goto out;
+
+ error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+
+out:
+ if (!has_rmap)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ return has_rmap;
+}
+
+/* Make sure that we have rmapbt records for this extent. */
+STATIC void
+xfs_scrub_bmap_xref_rmap(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno)
+{
+ struct xfs_rmap_irec rmap;
+ unsigned long long rmap_end;
+ uint64_t owner;
+
+ if (!info->sc->sa.rmap_cur)
+ return;
+
+ if (info->whichfork == XFS_COW_FORK)
+ owner = XFS_RMAP_OWN_COW;
+ else
+ owner = info->sc->ip->i_ino;
+
+ /* Find the rmap record for this irec. */
+ if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ return;
+
+ /* Check the rmap. */
+ rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
+ if (rmap.rm_startblock > agbno ||
+ agbno + irec->br_blockcount > rmap_end)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /*
+ * Check the logical offsets if applicable. CoW staging extents
+ * don't track logical offsets since the mappings only exist in
+ * memory.
+ */
+ if (info->whichfork != XFS_COW_FORK) {
+ rmap_end = (unsigned long long)rmap.rm_offset +
+ rmap.rm_blockcount;
+ if (rmap.rm_offset > irec->br_startoff ||
+ irec->br_startoff + irec->br_blockcount > rmap_end)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc,
+ info->whichfork, irec->br_startoff);
+ }
+
+ if (rmap.rm_owner != owner)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /*
+ * Check for discrepancies between the unwritten flag in the irec and
+ * the rmap. Note that the (in-memory) CoW fork distinguishes between
+ * unwritten and written extents, but we don't track that in the rmap
+ * records because the blocks are owned (on-disk) by the refcountbt,
+ * which doesn't track unwritten state.
+ */
+ if (owner != XFS_RMAP_OWN_COW &&
+ irec->br_state == XFS_EXT_UNWRITTEN &&
+ !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ if (info->whichfork == XFS_ATTR_FORK &&
+ !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+}
+
+/* Cross-reference a single rtdev extent record. */
+STATIC void
+xfs_scrub_bmap_rt_extent_xref(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
+{
+ if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
+ irec->br_blockcount);
+}
+
+/* Cross-reference a single datadev extent record. */
+STATIC void
+xfs_scrub_bmap_extent_xref(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
+{
+ struct xfs_mount *mp = info->sc->mp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int error;
+
+ if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
+ agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
+ len = irec->br_blockcount;
+
+ error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
+ if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
+ irec->br_startoff, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(info->sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
+ xfs_scrub_bmap_xref_rmap(info, irec, agbno);
+ switch (info->whichfork) {
+ case XFS_DATA_FORK:
+ if (xfs_is_reflink_inode(info->sc->ip))
+ break;
+ /* fall through */
+ case XFS_ATTR_FORK:
+ xfs_scrub_xref_is_not_shared(info->sc, agbno,
+ irec->br_blockcount);
+ break;
+ case XFS_COW_FORK:
+ xfs_scrub_xref_is_cow_staging(info->sc, agbno,
+ irec->br_blockcount);
+ break;
+ }
+
+ xfs_scrub_ag_free(info->sc, &info->sc->sa);
+}
+
/* Scrub a single extent record. */
STATIC int
xfs_scrub_bmap_extent(
@@ -109,6 +305,7 @@ xfs_scrub_bmap_extent(
{
struct xfs_mount *mp = info->sc->mp;
struct xfs_buf *bp = NULL;
+ xfs_filblks_t end;
int error = 0;
if (cur)
@@ -136,19 +333,23 @@ xfs_scrub_bmap_extent(
irec->br_startoff);
/* Make sure the extent points to a valid place. */
+ if (irec->br_blockcount > MAXEXTLEN)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) ||
- !xfs_verify_rtbno(mp, irec->br_startblock +
- irec->br_blockcount - 1)))
+ !xfs_verify_rtbno(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) ||
- !xfs_verify_fsbno(mp, irec->br_startblock +
- irec->br_blockcount - 1)))
+ !xfs_verify_fsbno(mp, end) ||
+ XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
+ XFS_FSB_TO_AGNO(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
@@ -158,6 +359,11 @@ xfs_scrub_bmap_extent(
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ if (info->is_rt)
+ xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
+ else
+ xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
+
info->lastoff = irec->br_startoff + irec->br_blockcount;
return error;
}
@@ -235,7 +441,6 @@ xfs_scrub_bmap(
struct xfs_ifork *ifp;
xfs_fileoff_t endoff;
struct xfs_iext_cursor icur;
- bool found;
int error = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -314,9 +519,7 @@ xfs_scrub_bmap(
/* Scrub extent records. */
info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
- for (found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &irec);
- found != 0;
- found = xfs_iext_next_extent(ifp, &icur, &irec)) {
+ for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error))
break;
if (isnullstartblock(irec.br_startblock))
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index df0766132ace..54218168c8f9 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -42,12 +42,14 @@
* Check for btree operation errors. See the section about handling
* operational errors in common.c.
*/
-bool
-xfs_scrub_btree_process_error(
+static bool
+__xfs_scrub_btree_process_error(
struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur,
int level,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
if (*error == 0)
return true;
@@ -60,36 +62,80 @@ xfs_scrub_btree_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
- *error, __return_address);
+ *error, ret_ip);
else
trace_xfs_scrub_btree_op_error(sc, cur, level,
- *error, __return_address);
+ *error, ret_ip);
break;
}
return false;
}
+bool
+xfs_scrub_btree_process_error(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
+{
+ return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_btree_xref_process_error(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
+{
+ return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
/* Record btree block corruption. */
-void
-xfs_scrub_btree_set_corrupt(
+static void
+__xfs_scrub_btree_set_corrupt(
struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur,
- int level)
+ int level,
+ __u32 errflag,
+ void *ret_ip)
{
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_error(sc, cur, level,
- __return_address);
+ ret_ip);
else
trace_xfs_scrub_btree_error(sc, cur, level,
- __return_address);
+ ret_ip);
+}
+
+void
+xfs_scrub_btree_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
+ __return_address);
+}
+
+void
+xfs_scrub_btree_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
+ __return_address);
}
/*
@@ -268,6 +314,8 @@ xfs_scrub_btree_block_check_sibling(
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
goto out;
+ if (pbp)
+ xfs_scrub_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
@@ -315,6 +363,97 @@ out:
return error;
}
+struct check_owner {
+ struct list_head list;
+ xfs_daddr_t daddr;
+ int level;
+};
+
+/*
+ * Make sure this btree block isn't in the free list and that there's
+ * an rmap record for it.
+ */
+STATIC int
+xfs_scrub_btree_check_block_owner(
+ struct xfs_scrub_btree *bs,
+ int level,
+ xfs_daddr_t daddr)
+{
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_btnum_t btnum;
+ bool init_sa;
+ int error = 0;
+
+ if (!bs->cur)
+ return 0;
+
+ btnum = bs->cur->bc_btnum;
+ agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
+ agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
+
+ init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
+ if (init_sa) {
+ error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa);
+ if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur,
+ level, &error))
+ return error;
+ }
+
+ xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
+ /*
+ * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
+ * have to nullify it (to shut down further block owner checks) if
+ * self-xref encounters problems.
+ */
+ if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
+ bs->cur = NULL;
+
+ xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
+ if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
+ bs->cur = NULL;
+
+ if (init_sa)
+ xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
+
+ return error;
+}
+
+/* Check the owner of a btree block. */
+STATIC int
+xfs_scrub_btree_check_owner(
+ struct xfs_scrub_btree *bs,
+ int level,
+ struct xfs_buf *bp)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ struct check_owner *co;
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+ return 0;
+
+ /*
+ * We want to cross-reference each btree block with the bnobt
+ * and the rmapbt. We cannot cross-reference the bnobt or
+ * rmapbt while scanning the bnobt or rmapbt, respectively,
+ * because we cannot alter the cursor and we'd prefer not to
+ * duplicate cursors. Therefore, save the buffer daddr for
+ * later scanning.
+ */
+ if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
+ co = kmem_alloc(sizeof(struct check_owner),
+ KM_MAYFAIL | KM_NOFS);
+ if (!co)
+ return -ENOMEM;
+ co->level = level;
+ co->daddr = XFS_BUF_ADDR(bp);
+ list_add_tail(&co->list, &bs->to_check);
+ return 0;
+ }
+
+ return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
+}
+
/*
* Grab and scrub a btree block given a btree pointer. Returns block
* and buffer pointers (if applicable) if they're ok to use.
@@ -349,6 +488,16 @@ xfs_scrub_btree_get_block(
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
+ if (*pbp)
+ xfs_scrub_buffer_recheck(bs->sc, *pbp);
+
+ /*
+ * Check the block's owner; this function absorbs error codes
+ * for us.
+ */
+ error = xfs_scrub_btree_check_owner(bs, level, *pbp);
+ if (error)
+ return error;
/*
* Check the block's siblings; this function absorbs error codes
@@ -421,6 +570,8 @@ xfs_scrub_btree(
struct xfs_btree_block *block;
int level;
struct xfs_buf *bp;
+ struct check_owner *co;
+ struct check_owner *n;
int i;
int error = 0;
@@ -512,5 +663,14 @@ xfs_scrub_btree(
}
out:
+ /* Process deferred owner checks on btree blocks. */
+ list_for_each_entry_safe(co, n, &bs.to_check, list) {
+ if (!error && bs.cur)
+ error = xfs_scrub_btree_check_block_owner(&bs,
+ co->level, co->daddr);
+ list_del(&co->list);
+ kmem_free(co);
+ }
+
return error;
}
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index 4de825a626d1..e2b868ede70b 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -26,10 +26,19 @@
bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, int *error);
+/* Check for btree xref operation errors. */
+bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level,
+ int *error);
+
/* Check for btree corruption. */
void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level);
+/* Check for btree xref discrepancies. */
+void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level);
+
struct xfs_scrub_btree;
typedef int (*xfs_scrub_btree_rec_fn)(
struct xfs_scrub_btree *bs,
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index ac95fe911d96..8033ab9d8f47 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -78,12 +78,14 @@
*/
/* Check for operational errors. */
-bool
-xfs_scrub_process_error(
+static bool
+__xfs_scrub_process_error(
struct xfs_scrub_context *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
@@ -95,24 +97,48 @@ xfs_scrub_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
trace_xfs_scrub_op_error(sc, agno, bno, *error,
- __return_address);
+ ret_ip);
break;
}
return false;
}
-/* Check for operational errors for a file offset. */
bool
-xfs_scrub_fblock_process_error(
+xfs_scrub_process_error(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
+{
+ return __xfs_scrub_process_error(sc, agno, bno, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_xref_process_error(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
+{
+ return __xfs_scrub_process_error(sc, agno, bno, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
+/* Check for operational errors for a file offset. */
+static bool
+__xfs_scrub_fblock_process_error(
struct xfs_scrub_context *sc,
int whichfork,
xfs_fileoff_t offset,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
@@ -124,17 +150,39 @@ xfs_scrub_fblock_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
- __return_address);
+ ret_ip);
break;
}
return false;
}
+bool
+xfs_scrub_fblock_process_error(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
+{
+ return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_fblock_xref_process_error(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
+{
+ return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
/*
* Handling scrub corruption/optimization/warning checks.
*
@@ -183,6 +231,16 @@ xfs_scrub_block_set_corrupt(
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
}
+/* Record a corruption while cross-referencing. */
+void
+xfs_scrub_block_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+}
+
/*
* Record a corrupt inode. The trace data will include the block given
* by bp if bp is given; otherwise it will use the block location of the
@@ -198,6 +256,17 @@ xfs_scrub_ino_set_corrupt(
trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address);
}
+/* Record a corruption while cross-referencing with an inode. */
+void
+xfs_scrub_ino_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address);
+}
+
/* Record corruption in a block indexed by a file fork. */
void
xfs_scrub_fblock_set_corrupt(
@@ -209,6 +278,17 @@ xfs_scrub_fblock_set_corrupt(
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
}
+/* Record a corruption while cross-referencing a fork block. */
+void
+xfs_scrub_fblock_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+}
+
/*
* Warn about inodes that need administrative review but is not
* incorrect.
@@ -245,6 +325,59 @@ xfs_scrub_set_incomplete(
}
/*
+ * rmap scrubbing -- compute the number of blocks with a given owner,
+ * at least according to the reverse mapping data.
+ */
+
+struct xfs_scrub_rmap_ownedby_info {
+ struct xfs_owner_info *oinfo;
+ xfs_filblks_t *blocks;
+};
+
+STATIC int
+xfs_scrub_count_rmap_ownedby_irec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_scrub_rmap_ownedby_info *sroi = priv;
+ bool irec_attr;
+ bool oinfo_attr;
+
+ irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
+ oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
+
+ if (rec->rm_owner != sroi->oinfo->oi_owner)
+ return 0;
+
+ if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
+ (*sroi->blocks) += rec->rm_blockcount;
+
+ return 0;
+}
+
+/*
+ * Calculate the number of blocks the rmap thinks are owned by something.
+ * The caller should pass us an rmapbt cursor.
+ */
+int
+xfs_scrub_count_rmap_ownedby_ag(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks)
+{
+ struct xfs_scrub_rmap_ownedby_info sroi;
+
+ sroi.oinfo = oinfo;
+ *blocks = 0;
+ sroi.blocks = blocks;
+
+ return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
+ &sroi);
+}
+
+/*
* AG scrubbing
*
* These helpers facilitate locking an allocation group's header
@@ -302,7 +435,7 @@ xfs_scrub_ag_read_headers(
error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
goto out;
-
+ error = 0;
out:
return error;
}
@@ -472,7 +605,7 @@ xfs_scrub_setup_ag_btree(
return error;
}
- error = xfs_scrub_setup_ag_header(sc, ip);
+ error = xfs_scrub_setup_fs(sc, ip);
if (error)
return error;
@@ -503,18 +636,11 @@ xfs_scrub_get_inode(
struct xfs_scrub_context *sc,
struct xfs_inode *ip_in)
{
+ struct xfs_imap imap;
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = NULL;
int error;
- /*
- * If userspace passed us an AG number or a generation number
- * without an inode number, they haven't got a clue so bail out
- * immediately.
- */
- if (sc->sm->sm_agno || (sc->sm->sm_gen && !sc->sm->sm_ino))
- return -EINVAL;
-
/* We want to scan the inode we already had opened. */
if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
sc->ip = ip_in;
@@ -526,10 +652,33 @@ xfs_scrub_get_inode(
return -ENOENT;
error = xfs_iget(mp, NULL, sc->sm->sm_ino,
XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
- if (error == -ENOENT || error == -EINVAL) {
- /* inode doesn't exist... */
- return -ENOENT;
- } else if (error) {
+ switch (error) {
+ case -ENOENT:
+ /* Inode doesn't exist, just bail out. */
+ return error;
+ case 0:
+ /* Got an inode, continue. */
+ break;
+ case -EINVAL:
+ /*
+ * -EINVAL with IGET_UNTRUSTED could mean one of several
+ * things: userspace gave us an inode number that doesn't
+ * correspond to fs space, or doesn't have an inobt entry;
+ * or it could simply mean that the inode buffer failed the
+ * read verifiers.
+ *
+ * Try just the inode mapping lookup -- if it succeeds, then
+ * the inode buffer verifier failed and something needs fixing.
+ * Otherwise, we really couldn't find it so tell userspace
+ * that it no longer exists.
+ */
+ error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
+ XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
+ if (error)
+ return -ENOENT;
+ error = -EFSCORRUPTED;
+ /* fall through */
+ default:
trace_xfs_scrub_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
@@ -572,3 +721,61 @@ out:
/* scrub teardown will unlock and release the inode for us */
return error;
}
+
+/*
+ * Predicate that decides if we need to evaluate the cross-reference check.
+ * If there was an error accessing the cross-reference btree, just delete
+ * the cursor and skip the check.
+ */
+bool
+xfs_scrub_should_check_xref(
+ struct xfs_scrub_context *sc,
+ int *error,
+ struct xfs_btree_cur **curpp)
+{
+ if (*error == 0)
+ return true;
+
+ if (curpp) {
+ /* If we've already given up on xref, just bail out. */
+ if (!*curpp)
+ return false;
+
+ /* xref error, delete cursor and bail out. */
+ xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
+ *curpp = NULL;
+ }
+
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
+ trace_xfs_scrub_xref_error(sc, *error, __return_address);
+
+ /*
+ * Errors encountered during cross-referencing with another
+ * data structure should not cause this scrubber to abort.
+ */
+ *error = 0;
+ return false;
+}
+
+/* Run the structure verifiers on in-memory buffers to detect bad memory. */
+void
+xfs_scrub_buffer_recheck(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ if (bp->b_ops == NULL) {
+ xfs_scrub_block_set_corrupt(sc, bp);
+ return;
+ }
+ if (bp->b_ops->verify_struct == NULL) {
+ xfs_scrub_set_incomplete(sc);
+ return;
+ }
+ fa = bp->b_ops->verify_struct(bp);
+ if (!fa)
+ return;
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
+}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 5c043855570e..ddb65d22c76a 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -56,6 +56,11 @@ bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, int *error);
+bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
+bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc,
+ int whichfork, xfs_fileoff_t offset, int *error);
+
void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc,
struct xfs_buf *bp);
void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino,
@@ -68,6 +73,13 @@ void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino,
void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset);
+void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_buf *bp);
+void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino,
+ struct xfs_buf *bp);
+void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
+ int whichfork, xfs_fileoff_t offset);
+
void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino,
struct xfs_buf *bp);
void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
@@ -76,10 +88,12 @@ void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc);
int xfs_scrub_checkpoint_log(struct xfs_mount *mp);
+/* Are we set up for a cross-referencing check? */
+bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error,
+ struct xfs_btree_cur **curpp);
+
/* Setup functions */
int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
-int xfs_scrub_setup_ag_header(struct xfs_scrub_context *sc,
- struct xfs_inode *ip);
int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip);
int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc,
@@ -134,11 +148,16 @@ int xfs_scrub_walk_agfl(struct xfs_scrub_context *sc,
int (*fn)(struct xfs_scrub_context *, xfs_agblock_t bno,
void *),
void *priv);
+int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks);
int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc,
struct xfs_inode *ip, bool force_log);
int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc,
struct xfs_inode *ip, unsigned int resblks);
+void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
#endif /* __XFS_SCRUB_COMMON_H__ */
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index d94edd93cba8..bffdb7dc09bf 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -233,11 +233,28 @@ xfs_scrub_da_btree_write_verify(
return;
}
}
+static void *
+xfs_scrub_da_btree_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ bp->b_ops = &xfs_dir3_leaf1_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ default:
+ bp->b_ops = &xfs_da3_node_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ }
+}
static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = {
.name = "xfs_scrub_da_btree",
.verify_read = xfs_scrub_da_btree_read_verify,
.verify_write = xfs_scrub_da_btree_write_verify,
+ .verify_struct = xfs_scrub_da_btree_verify,
};
/* Check a block's sibling. */
@@ -276,6 +293,9 @@ xfs_scrub_da_btree_block_check_sibling(
xfs_scrub_da_set_corrupt(ds, level);
return error;
}
+ if (ds->state->altpath.blk[level].bp)
+ xfs_scrub_buffer_recheck(ds->sc,
+ ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
@@ -358,6 +378,8 @@ xfs_scrub_da_btree_block(
&xfs_scrub_da_btree_buf_ops);
if (!xfs_scrub_da_process_error(ds, level, &error))
goto out_nobuf;
+ if (blk->bp)
+ xfs_scrub_buffer_recheck(ds->sc, blk->bp);
/*
* We didn't find a dir btree root block, which means that
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 69e1efdd4019..50b6a26b0299 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -92,7 +92,7 @@ xfs_scrub_dir_check_ftype(
* inodes can trigger immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
- if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xfs_scrub_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
@@ -200,6 +200,7 @@ xfs_scrub_dir_rec(
struct xfs_inode *dp = ds->dargs.dp;
struct xfs_dir2_data_entry *dent;
struct xfs_buf *bp;
+ char *p, *endp;
xfs_ino_t ino;
xfs_dablk_t rec_bno;
xfs_dir2_db_t db;
@@ -237,9 +238,37 @@ xfs_scrub_dir_rec(
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out;
}
+ xfs_scrub_buffer_recheck(ds->sc, bp);
- /* Retrieve the entry, sanity check it, and compare hashes. */
dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
+
+ /* Make sure we got a real directory entry. */
+ p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
+ endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
+ if (!endp) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
+ while (p < endp) {
+ struct xfs_dir2_data_entry *dep;
+ struct xfs_dir2_data_unused *dup;
+
+ dup = (struct xfs_dir2_data_unused *)p;
+ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
+ p += be16_to_cpu(dup->length);
+ continue;
+ }
+ dep = (struct xfs_dir2_data_entry *)p;
+ if (dep == dent)
+ break;
+ p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
+ }
+ if (p >= endp) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
+
+ /* Retrieve the entry, sanity check it, and compare hashes. */
ino = be64_to_cpu(dent->inumber);
hash = be32_to_cpu(ent->hashval);
tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
@@ -324,6 +353,7 @@ xfs_scrub_directory_data_bestfree(
}
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
@@ -361,13 +391,7 @@ xfs_scrub_directory_data_bestfree(
/* Make sure the bestfrees are actually the best free spaces. */
ptr = (char *)d_ops->data_entry_p(bp->b_addr);
- if (is_block) {
- struct xfs_dir2_block_tail *btp;
-
- btp = xfs_dir2_block_tail_p(mp->m_dir_geo, bp->b_addr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
- } else
- endptr = (char *)bp->b_addr + BBTOB(bp->b_length);
+ endptr = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
/* Iterate the entries, stopping when we hit or go past the end. */
while (ptr < endptr) {
@@ -474,6 +498,7 @@ xfs_scrub_directory_leaf1_bestfree(
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
leaf = bp->b_addr;
d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
@@ -559,6 +584,7 @@ xfs_scrub_directory_free_bestfree(
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 496d6f2fbb9e..63ab3f98430d 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -58,6 +58,56 @@ xfs_scrub_setup_ag_iallocbt(
/* Inode btree scrubber. */
+/*
+ * If we're checking the finobt, cross-reference with the inobt.
+ * Otherwise we're checking the inobt; if there is an finobt, make sure
+ * we have a record or not depending on freecount.
+ */
+static inline void
+xfs_scrub_iallocbt_chunk_xref_other(
+ struct xfs_scrub_context *sc,
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino)
+{
+ struct xfs_btree_cur **pcur;
+ bool has_irec;
+ int error;
+
+ if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
+ pcur = &sc->sa.ino_cur;
+ else
+ pcur = &sc->sa.fino_cur;
+ if (!(*pcur))
+ return;
+ error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (((irec->ir_freecount > 0 && !has_irec) ||
+ (irec->ir_freecount == 0 && has_irec)))
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_iallocbt_chunk_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_owner_info oinfo;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, len);
+}
+
/* Is this chunk worth checking? */
STATIC bool
xfs_scrub_iallocbt_chunk(
@@ -76,6 +126,8 @@ xfs_scrub_iallocbt_chunk(
!xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
+
return true;
}
@@ -190,8 +242,14 @@ xfs_scrub_iallocbt_check_freemask(
}
/* If any part of this is a hole, skip it. */
- if (ir_holemask)
+ if (ir_holemask) {
+ xfs_scrub_xref_is_not_owned_by(bs->sc, agbno,
+ blks_per_cluster, &oinfo);
continue;
+ }
+
+ xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
+ &oinfo);
/* Grab the inode cluster buffer. */
imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
@@ -227,6 +285,7 @@ xfs_scrub_iallocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_filblks_t *inode_blocks = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
@@ -264,6 +323,9 @@ xfs_scrub_iallocbt_rec(
(agbno & (xfs_icluster_size_fsb(mp) - 1)))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ *inode_blocks += XFS_B_TO_FSB(mp,
+ irec.ir_count * mp->m_sb.sb_inodesize);
+
/* Handle non-sparse inodes */
if (!xfs_inobt_issparse(irec.ir_holemask)) {
len = XFS_B_TO_FSB(mp,
@@ -308,6 +370,72 @@ out:
return error;
}
+/*
+ * Make sure the inode btrees are as large as the rmap thinks they are.
+ * Don't bother if we're missing btree cursors, as we're already corrupt.
+ */
+STATIC void
+xfs_scrub_iallocbt_xref_rmap_btreeblks(
+ struct xfs_scrub_context *sc,
+ int which)
+{
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ xfs_extlen_t inobt_blocks = 0;
+ xfs_extlen_t finobt_blocks = 0;
+ int error;
+
+ if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
+ (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur))
+ return;
+
+ /* Check that we saw as many inobt blocks as the rmap says. */
+ error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
+ if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ return;
+
+ if (sc->sa.fino_cur) {
+ error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
+ if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ return;
+ }
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != inobt_blocks + finobt_blocks)
+ xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+}
+
+/*
+ * Make sure that the inobt records point to the same number of blocks as
+ * the rmap says are owned by inodes.
+ */
+STATIC void
+xfs_scrub_iallocbt_xref_rmap_inodes(
+ struct xfs_scrub_context *sc,
+ int which,
+ xfs_filblks_t inode_blocks)
+{
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Check that we saw as many inode blocks as the rmap knows about. */
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != inode_blocks)
+ xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+}
+
/* Scrub the inode btrees for some AG. */
STATIC int
xfs_scrub_iallocbt(
@@ -316,10 +444,29 @@ xfs_scrub_iallocbt(
{
struct xfs_btree_cur *cur;
struct xfs_owner_info oinfo;
+ xfs_filblks_t inode_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
- return xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, NULL);
+ error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo,
+ &inode_blocks);
+ if (error)
+ return error;
+
+ xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which);
+
+ /*
+ * If we're scrubbing the inode btree, inode_blocks is the number of
+ * blocks pointed to by all the inode chunk records. Therefore, we
+ * should compare to the number of inode chunk blocks that the rmap
+ * knows about. We can't do this for the finobt since it only points
+ * to inode chunks with free inodes.
+ */
+ if (which == XFS_BTNUM_INO)
+ xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+
+ return error;
}
int
@@ -335,3 +482,46 @@ xfs_scrub_finobt(
{
return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO);
}
+
+/* See if an inode btree has (or doesn't have) an inode chunk record. */
+static inline void
+xfs_scrub_xref_inode_check(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ struct xfs_btree_cur **icur,
+ bool should_have_inodes)
+{
+ bool has_inodes;
+ int error;
+
+ if (!(*icur))
+ return;
+
+ error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
+ if (!xfs_scrub_should_check_xref(sc, &error, icur))
+ return;
+ if (has_inodes != should_have_inodes)
+ xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0);
+}
+
+/* xref check that the extent is not covered by inodes */
+void
+xfs_scrub_xref_is_not_inode_chunk(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
+}
+
+/* xref check that the extent is covered by inodes */
+void
+xfs_scrub_xref_is_inode_chunk(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
+}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index f120fb20452f..21297bef8df1 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -36,9 +36,13 @@
#include "xfs_ialloc.h"
#include "xfs_da_format.h"
#include "xfs_reflink.h"
+#include "xfs_rmap.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
+#include "scrub/btree.h"
#include "scrub/trace.h"
/*
@@ -64,7 +68,7 @@ xfs_scrub_setup_inode(
break;
case -EFSCORRUPTED:
case -EFSBADCRC:
- return 0;
+ return xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
default:
return error;
}
@@ -392,6 +396,14 @@ xfs_scrub_dinode(
break;
}
+ /* di_[amc]time.nsec */
+ if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
/*
* di_size. xfs_dinode_verify checks for things that screw up
* the VFS such as the upper bit being set and zero-length
@@ -495,6 +507,8 @@ xfs_scrub_dinode(
}
if (dip->di_version >= 3) {
+ if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
xfs_scrub_inode_flags2(sc, bp, dip, ino, mode, flags, flags2);
xfs_scrub_inode_cowextsize(sc, bp, dip, ino, mode, flags,
flags2);
@@ -546,7 +560,7 @@ xfs_scrub_inode_map_raw(
*/
bp->b_ops = &xfs_inode_buf_ops;
dip = xfs_buf_offset(bp, imap.im_boffset);
- if (!xfs_dinode_verify(mp, ino, dip) ||
+ if (xfs_dinode_verify(mp, ino, dip) != NULL ||
!xfs_dinode_good_version(mp, dip->di_version)) {
xfs_scrub_ino_set_corrupt(sc, ino, bp);
goto out_buf;
@@ -567,18 +581,155 @@ out_buf:
return error;
}
+/*
+ * Make sure the finobt doesn't think this inode is free.
+ * We don't have to check the inobt ourselves because we got the inode via
+ * IGET_UNTRUSTED, which checks the inobt for us.
+ */
+static void
+xfs_scrub_inode_xref_finobt(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino)
+{
+ struct xfs_inobt_rec_incore rec;
+ xfs_agino_t agino;
+ int has_record;
+ int error;
+
+ if (!sc->sa.fino_cur)
+ return;
+
+ agino = XFS_INO_TO_AGINO(sc->mp, ino);
+
+ /*
+ * Try to get the finobt record. If we can't get it, then we're
+ * in good shape.
+ */
+ error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
+ &has_record);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ !has_record)
+ return;
+
+ error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ !has_record)
+ return;
+
+ /*
+ * Otherwise, make sure this record either doesn't cover this inode,
+ * or that it does but it's marked present.
+ */
+ if (rec.ir_startino > agino ||
+ rec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
+ return;
+
+ if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
+}
+
+/* Cross reference the inode fields with the forks. */
+STATIC void
+xfs_scrub_inode_xref_bmap(
+ struct xfs_scrub_context *sc,
+ struct xfs_dinode *dip)
+{
+ xfs_extnum_t nextents;
+ xfs_filblks_t count;
+ xfs_filblks_t acount;
+ int error;
+
+ /* Walk all the extents to check nextents/naextents/nblocks. */
+ error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
+ &nextents, &count);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ return;
+ if (nextents < be32_to_cpu(dip->di_nextents))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+
+ error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
+ &nextents, &acount);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ return;
+ if (nextents != be16_to_cpu(dip->di_anextents))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+
+ /* Check nblocks against the inode. */
+ if (count + acount != be64_to_cpu(dip->di_nblocks))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_inode_xref(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_dinode *dip)
+{
+ struct xfs_owner_info oinfo;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agno = XFS_INO_TO_AGNO(sc->mp, ino);
+ agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_inode_xref_finobt(sc, ino);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xfs_scrub_inode_xref_bmap(sc, dip);
+
+ xfs_scrub_ag_free(sc, &sc->sa);
+}
+
+/*
+ * If the reflink iflag disagrees with a scan for shared data fork extents,
+ * either flag an error (shared extents w/ no flag) or a preen (flag set w/o
+ * any shared extents). We already checked for reflink iflag set on a non
+ * reflink filesystem.
+ */
+static void
+xfs_scrub_inode_check_reflink_iflag(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = sc->mp;
+ bool has_shared;
+ int error;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return;
+
+ error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
+ &has_shared);
+ if (!xfs_scrub_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ XFS_INO_TO_AGBNO(mp, ino), &error))
+ return;
+ if (xfs_is_reflink_inode(sc->ip) && !has_shared)
+ xfs_scrub_ino_set_preen(sc, ino, bp);
+ else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
/* Scrub an inode. */
int
xfs_scrub_inode(
struct xfs_scrub_context *sc)
{
struct xfs_dinode di;
- struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp = NULL;
struct xfs_dinode *dip;
xfs_ino_t ino;
-
- bool has_shared;
int error = 0;
/* Did we get the in-core inode, or are we doing this manually? */
@@ -603,19 +754,14 @@ xfs_scrub_inode(
goto out;
/*
- * Does this inode have the reflink flag set but no shared extents?
- * Set the preening flag if this is the case.
+ * Look for discrepancies between file's data blocks and the reflink
+ * iflag. We already checked the iflag against the file mode when
+ * we scrubbed the dinode.
*/
- if (xfs_is_reflink_inode(sc->ip)) {
- error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
- &has_shared);
- if (!xfs_scrub_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
- XFS_INO_TO_AGBNO(mp, ino), &error))
- goto out;
- if (!has_shared)
- xfs_scrub_ino_set_preen(sc, ino, bp);
- }
+ if (S_ISREG(VFS_I(sc->ip)->i_mode))
+ xfs_scrub_inode_check_reflink_iflag(sc, ino, bp);
+ xfs_scrub_inode_xref(sc, ino, dip);
out:
if (bp)
xfs_trans_brelse(sc->tp, bp);
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 63a25334fc83..0d3851410c74 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -169,9 +169,9 @@ xfs_scrub_parent_validate(
* immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sc->tp, dnum, 0, 0, &dp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
- if (dp == sc->ip) {
+ if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele;
}
@@ -185,7 +185,7 @@ xfs_scrub_parent_validate(
*/
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out_unlock;
if (nlink != expected_nlink)
@@ -205,7 +205,7 @@ xfs_scrub_parent_validate(
/* Go looking for our dentry. */
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock;
/* Drop the parent lock, relock this inode. */
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 3d9037eceaf1..51daa4ae2627 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -67,13 +67,6 @@ xfs_scrub_setup_quota(
{
uint dqtype;
- /*
- * If userspace gave us an AG number or inode data, they don't
- * know what they're doing. Get out.
- */
- if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
-
dqtype = xfs_scrub_quota_to_dqtype(sc);
if (dqtype == 0)
return -EINVAL;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 2f88a8d44bd0..400f1561cd3d 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -31,6 +31,7 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -50,6 +51,307 @@ xfs_scrub_setup_ag_refcountbt(
/* Reference count btree scrubber. */
+/*
+ * Confirming Reference Counts via Reverse Mappings
+ *
+ * We want to count the reverse mappings overlapping a refcount record
+ * (bno, len, refcount), allowing for the possibility that some of the
+ * overlap may come from smaller adjoining reverse mappings, while some
+ * comes from single extents which overlap the range entirely. The
+ * outer loop is as follows:
+ *
+ * 1. For all reverse mappings overlapping the refcount extent,
+ * a. If a given rmap completely overlaps, mark it as seen.
+ * b. Otherwise, record the fragment (in agbno order) for later
+ * processing.
+ *
+ * Once we've seen all the rmaps, we know that for all blocks in the
+ * refcount record we want to find $refcount owners and we've already
+ * visited $seen extents that overlap all the blocks. Therefore, we
+ * need to find ($refcount - $seen) owners for every block in the
+ * extent; call that quantity $target_nr. Proceed as follows:
+ *
+ * 2. Pull the first $target_nr fragments from the list; all of them
+ * should start at or before the start of the extent.
+ * Call this subset of fragments the working set.
+ * 3. Until there are no more unprocessed fragments,
+ * a. Find the shortest fragments in the set and remove them.
+ * b. Note the block number of the end of these fragments.
+ * c. Pull the same number of fragments from the list. All of these
+ * fragments should start at the block number recorded in the
+ * previous step.
+ * d. Put those fragments in the set.
+ * 4. Check that there are $target_nr fragments remaining in the list,
+ * and that they all end at or beyond the end of the refcount extent.
+ *
+ * If the refcount is correct, all the check conditions in the algorithm
+ * should always hold true. If not, the refcount is incorrect.
+ */
+struct xfs_scrub_refcnt_frag {
+ struct list_head list;
+ struct xfs_rmap_irec rm;
+};
+
+struct xfs_scrub_refcnt_check {
+ struct xfs_scrub_context *sc;
+ struct list_head fragments;
+
+ /* refcount extent we're examining */
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+
+ /* number of owners seen */
+ xfs_nlink_t seen;
+};
+
+/*
+ * Decide if the given rmap is large enough that we can redeem it
+ * towards refcount verification now, or if it's a fragment, in
+ * which case we'll hang onto it in the hopes that we'll later
+ * discover that we've collected exactly the correct number of
+ * fragments as the refcountbt says we should have.
+ */
+STATIC int
+xfs_scrub_refcountbt_rmap_check(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_scrub_refcnt_check *refchk = priv;
+ struct xfs_scrub_refcnt_frag *frag;
+ xfs_agblock_t rm_last;
+ xfs_agblock_t rc_last;
+ int error = 0;
+
+ if (xfs_scrub_should_terminate(refchk->sc, &error))
+ return error;
+
+ rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
+ rc_last = refchk->bno + refchk->len - 1;
+
+ /* Confirm that a single-owner refc extent is a CoW stage. */
+ if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
+ xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
+ return 0;
+ }
+
+ if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
+ /*
+ * The rmap overlaps the refcount record, so we can confirm
+ * one refcount owner seen.
+ */
+ refchk->seen++;
+ } else {
+ /*
+ * This rmap covers only part of the refcount record, so
+ * save the fragment for later processing. If the rmapbt
+ * is healthy each rmap_irec we see will be in agbno order
+ * so we don't need insertion sort here.
+ */
+ frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
+ KM_MAYFAIL | KM_NOFS);
+ if (!frag)
+ return -ENOMEM;
+ memcpy(&frag->rm, rec, sizeof(frag->rm));
+ list_add_tail(&frag->list, &refchk->fragments);
+ }
+
+ return 0;
+}
+
+/*
+ * Given a bunch of rmap fragments, iterate through them, keeping
+ * a running tally of the refcount. If this ever deviates from
+ * what we expect (which is the refcountbt's refcount minus the
+ * number of extents that totally covered the refcountbt extent),
+ * we have a refcountbt error.
+ */
+STATIC void
+xfs_scrub_refcountbt_process_rmap_fragments(
+ struct xfs_scrub_refcnt_check *refchk)
+{
+ struct list_head worklist;
+ struct xfs_scrub_refcnt_frag *frag;
+ struct xfs_scrub_refcnt_frag *n;
+ xfs_agblock_t bno;
+ xfs_agblock_t rbno;
+ xfs_agblock_t next_rbno;
+ xfs_nlink_t nr;
+ xfs_nlink_t target_nr;
+
+ target_nr = refchk->refcount - refchk->seen;
+ if (target_nr == 0)
+ return;
+
+ /*
+ * There are (refchk->rc.rc_refcount - refchk->nr refcount)
+ * references we haven't found yet. Pull that many off the
+ * fragment list and figure out where the smallest rmap ends
+ * (and therefore the next rmap should start). All the rmaps
+ * we pull off should start at or before the beginning of the
+ * refcount record's range.
+ */
+ INIT_LIST_HEAD(&worklist);
+ rbno = NULLAGBLOCK;
+ nr = 1;
+
+ /* Make sure the fragments actually /are/ in agbno order. */
+ bno = 0;
+ list_for_each_entry(frag, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock < bno)
+ goto done;
+ bno = frag->rm.rm_startblock;
+ }
+
+ /*
+ * Find all the rmaps that start at or before the refc extent,
+ * and put them on the worklist.
+ */
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock > refchk->bno)
+ goto done;
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno < rbno)
+ rbno = bno;
+ list_move_tail(&frag->list, &worklist);
+ if (nr == target_nr)
+ break;
+ nr++;
+ }
+
+ /*
+ * We should have found exactly $target_nr rmap fragments starting
+ * at or before the refcount extent.
+ */
+ if (nr != target_nr)
+ goto done;
+
+ while (!list_empty(&refchk->fragments)) {
+ /* Discard any fragments ending at rbno from the worklist. */
+ nr = 0;
+ next_rbno = NULLAGBLOCK;
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno != rbno) {
+ if (bno < next_rbno)
+ next_rbno = bno;
+ continue;
+ }
+ list_del(&frag->list);
+ kmem_free(frag);
+ nr++;
+ }
+
+ /* Try to add nr rmaps starting at rbno to the worklist. */
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (frag->rm.rm_startblock != rbno)
+ goto done;
+ list_move_tail(&frag->list, &worklist);
+ if (next_rbno > bno)
+ next_rbno = bno;
+ nr--;
+ if (nr == 0)
+ break;
+ }
+
+ /*
+ * If we get here and nr > 0, this means that we added fewer
+ * items to the worklist than we discarded because the fragment
+ * list ran out of items. Therefore, we cannot maintain the
+ * required refcount. Something is wrong, so we're done.
+ */
+ if (nr)
+ goto done;
+
+ rbno = next_rbno;
+ }
+
+ /*
+ * Make sure the last extent we processed ends at or beyond
+ * the end of the refcount extent.
+ */
+ if (rbno < refchk->bno + refchk->len)
+ goto done;
+
+ /* Actually record us having seen the remaining refcount. */
+ refchk->seen = refchk->refcount;
+done:
+ /* Delete fragments and work list. */
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+}
+
+/* Use the rmap entries covering this extent to verify the refcount. */
+STATIC void
+xfs_scrub_refcountbt_xref_rmap(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
+{
+ struct xfs_scrub_refcnt_check refchk = {
+ .sc = sc,
+ .bno = bno,
+ .len = len,
+ .refcount = refcount,
+ .seen = 0,
+ };
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ struct xfs_scrub_refcnt_frag *frag;
+ struct xfs_scrub_refcnt_frag *n;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Cross-reference with the rmapbt to confirm the refcount. */
+ memset(&low, 0, sizeof(low));
+ low.rm_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rm_startblock = bno + len - 1;
+
+ INIT_LIST_HEAD(&refchk.fragments);
+ error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
+ &xfs_scrub_refcountbt_rmap_check, &refchk);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ goto out_free;
+
+ xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
+ if (refcount != refchk.seen)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+
+out_free:
+ list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_refcountbt_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
+}
+
/* Scrub a refcountbt record. */
STATIC int
xfs_scrub_refcountbt_rec(
@@ -57,6 +359,7 @@ xfs_scrub_refcountbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agblock_t *cow_blocks = bs->private;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
@@ -72,6 +375,8 @@ xfs_scrub_refcountbt_rec(
has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ if (has_cowflag)
+ (*cow_blocks) += len;
/* Check the extent. */
bno &= ~XFS_REFC_COW_START;
@@ -83,17 +388,128 @@ xfs_scrub_refcountbt_rec(
if (refcount == 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
+
return error;
}
+/* Make sure we have as many refc blocks as the rmap says. */
+STATIC void
+xfs_scrub_refcount_xref_rmap(
+ struct xfs_scrub_context *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t cow_blocks)
+{
+ xfs_extlen_t refcbt_blocks = 0;
+ xfs_filblks_t blocks;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Check that we saw as many refcbt blocks as the rmap knows about. */
+ error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
+ if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
+ return;
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != refcbt_blocks)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+
+ /* Check that we saw as many cow blocks as the rmap knows about. */
+ xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != cow_blocks)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
+
/* Scrub the refcount btree for some AG. */
int
xfs_scrub_refcountbt(
struct xfs_scrub_context *sc)
{
struct xfs_owner_info oinfo;
+ xfs_agblock_t cow_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
- return xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
- &oinfo, NULL);
+ error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
+ &oinfo, &cow_blocks);
+ if (error)
+ return error;
+
+ xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
+
+ return 0;
+}
+
+/* xref check that a cow staging extent is marked in the refcountbt. */
+void
+xfs_scrub_xref_is_cow_staging(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_refcount_irec rc;
+ bool has_cowflag;
+ int has_refcount;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ /* Find the CoW staging extent. */
+ error = xfs_refcount_lookup_le(sc->sa.refc_cur,
+ agbno + XFS_REFC_COW_START, &has_refcount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (!has_refcount) {
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ return;
+ }
+
+ error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (!has_refcount) {
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ return;
+ }
+
+ /* CoW flag must be set, refcount must be 1. */
+ has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
+ if (!has_cowflag || rc.rc_refcount != 1)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+
+ /* Must be at least as long as what was passed in */
+ if (rc.rc_blockcount < len)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+}
+
+/*
+ * xref check that the extent is not shared. Only file data blocks
+ * can have multiple owners.
+ */
+void
+xfs_scrub_xref_is_not_shared(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ bool shared;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (shared)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 97846c424690..8f2a7c3ff455 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -32,6 +32,7 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -51,6 +52,61 @@ xfs_scrub_setup_ag_rmapbt(
/* Reverse-mapping scrubber. */
+/* Cross-reference a rmap against the refcount btree. */
+STATIC void
+xfs_scrub_rmapbt_xref_refc(
+ struct xfs_scrub_context *sc,
+ struct xfs_rmap_irec *irec)
+{
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ bool non_inode;
+ bool is_bmbt;
+ bool is_attr;
+ bool is_unwritten;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
+ is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
+ is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
+ is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
+
+ /* If this is shared, must be a data fork extent. */
+ error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
+ irec->rm_blockcount, &fbno, &flen, false);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_rmapbt_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_rmap_irec *irec)
+{
+ xfs_agblock_t agbno = irec->rm_startblock;
+ xfs_extlen_t len = irec->rm_blockcount;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ if (irec->rm_owner == XFS_RMAP_OWN_INODES)
+ xfs_scrub_xref_is_inode_chunk(sc, agbno, len);
+ else
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ if (irec->rm_owner == XFS_RMAP_OWN_COW)
+ xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock,
+ irec->rm_blockcount);
+ else
+ xfs_scrub_rmapbt_xref_refc(sc, irec);
+}
+
/* Scrub an rmapbt record. */
STATIC int
xfs_scrub_rmapbt_rec(
@@ -121,6 +177,8 @@ xfs_scrub_rmapbt_rec(
irec.rm_owner > XFS_RMAP_OWN_FS)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
}
+
+ xfs_scrub_rmapbt_xref(bs->sc, &irec);
out:
return error;
}
@@ -136,3 +194,68 @@ xfs_scrub_rmapbt(
return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
&oinfo, NULL);
}
+
+/* xref check that the extent is owned by a given owner */
+static inline void
+xfs_scrub_xref_check_owner(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool should_have_rmap)
+{
+ bool has_rmap;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
+ &has_rmap);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (has_rmap != should_have_rmap)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
+
+/* xref check that the extent is owned by a given owner */
+void
+xfs_scrub_xref_is_owned_by(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true);
+}
+
+/* xref check that the extent is not owned by a given owner */
+void
+xfs_scrub_xref_is_not_owned_by(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false);
+}
+
+/* xref check that the extent has no reverse mapping at all */
+void
+xfs_scrub_xref_has_no_owner(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len)
+{
+ bool has_rmap;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (has_rmap)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index c6fedb698008..26390991369a 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -43,22 +43,14 @@ xfs_scrub_setup_rt(
struct xfs_scrub_context *sc,
struct xfs_inode *ip)
{
- struct xfs_mount *mp = sc->mp;
- int error = 0;
-
- /*
- * If userspace gave us an AG number or inode data, they don't
- * know what they're doing. Get out.
- */
- if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
+ int error;
error = xfs_scrub_setup_fs(sc, ip);
if (error)
return error;
sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP;
- sc->ip = mp->m_rbmip;
+ sc->ip = sc->mp->m_rbmip;
xfs_ilock(sc->ip, sc->ilock_flags);
return 0;
@@ -106,3 +98,26 @@ xfs_scrub_rtsummary(
/* XXX: implement this some day */
return -ENOENT;
}
+
+
+/* xref check that the extent is not free in the rtbitmap */
+void
+xfs_scrub_xref_is_used_rt_space(
+ struct xfs_scrub_context *sc,
+ xfs_rtblock_t fsbno,
+ xfs_extlen_t len)
+{
+ bool is_free;
+ int error;
+
+ xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+ error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, fsbno, len,
+ &is_free);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ goto out_unlock;
+ if (is_free)
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino,
+ NULL);
+out_unlock:
+ xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index ab3aef2ae823..26c75967a072 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -110,6 +110,16 @@
* structure itself is corrupt, the CORRUPT flag will be set. If
* the metadata is correct but otherwise suboptimal, the PREEN flag
* will be set.
+ *
+ * We perform secondary validation of filesystem metadata by
+ * cross-referencing every record with all other available metadata.
+ * For example, for block mapping extents, we verify that there are no
+ * records in the free space and inode btrees corresponding to that
+ * space extent and that there is a corresponding entry in the reverse
+ * mapping btree. Inconsistent metadata is noted by setting the
+ * XCORRUPT flag; btree query function errors are noted by setting the
+ * XFAIL flag and deleting the cursor to prevent further attempts to
+ * cross-reference with a defective btree.
*/
/*
@@ -128,8 +138,6 @@ xfs_scrub_probe(
{
int error = 0;
- if (sc->sm->sm_ino || sc->sm->sm_agno)
- return -EINVAL;
if (xfs_scrub_should_terminate(sc, &error))
return error;
@@ -151,7 +159,8 @@ xfs_scrub_teardown(
sc->tp = NULL;
}
if (sc->ip) {
- xfs_iunlock(sc->ip, sc->ilock_flags);
+ if (sc->ilock_flags)
+ xfs_iunlock(sc->ip, sc->ilock_flags);
if (sc->ip != ip_in &&
!xfs_internal_inum(sc->mp, sc->ip->i_ino))
iput(VFS_I(sc->ip));
@@ -167,106 +176,130 @@ xfs_scrub_teardown(
/* Scrubbing dispatch. */
static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
- { /* ioctl presence test */
+ [XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
+ .type = ST_NONE,
.setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_probe,
},
- { /* superblock */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_SB] = { /* superblock */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_superblock,
},
- { /* agf */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGF] = { /* agf */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agf,
},
- { /* agfl */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGFL]= { /* agfl */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agfl,
},
- { /* agi */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGI] = { /* agi */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agi,
},
- { /* bnobt */
+ [XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt,
.scrub = xfs_scrub_bnobt,
},
- { /* cntbt */
+ [XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt,
.scrub = xfs_scrub_cntbt,
},
- { /* inobt */
+ [XFS_SCRUB_TYPE_INOBT] = { /* inobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt,
.scrub = xfs_scrub_inobt,
},
- { /* finobt */
+ [XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt,
.scrub = xfs_scrub_finobt,
.has = xfs_sb_version_hasfinobt,
},
- { /* rmapbt */
+ [XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_rmapbt,
.scrub = xfs_scrub_rmapbt,
.has = xfs_sb_version_hasrmapbt,
},
- { /* refcountbt */
+ [XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_refcountbt,
.scrub = xfs_scrub_refcountbt,
.has = xfs_sb_version_hasreflink,
},
- { /* inode record */
+ [XFS_SCRUB_TYPE_INODE] = { /* inode record */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode,
.scrub = xfs_scrub_inode,
},
- { /* inode data fork */
+ [XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_data,
},
- { /* inode attr fork */
+ [XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_attr,
},
- { /* inode CoW fork */
+ [XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_cow,
},
- { /* directory */
+ [XFS_SCRUB_TYPE_DIR] = { /* directory */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_directory,
.scrub = xfs_scrub_directory,
},
- { /* extended attributes */
+ [XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_xattr,
.scrub = xfs_scrub_xattr,
},
- { /* symbolic link */
+ [XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_symlink,
.scrub = xfs_scrub_symlink,
},
- { /* parent pointers */
+ [XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_parent,
.scrub = xfs_scrub_parent,
},
- { /* realtime bitmap */
+ [XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
+ .type = ST_FS,
.setup = xfs_scrub_setup_rt,
.scrub = xfs_scrub_rtbitmap,
.has = xfs_sb_version_hasrealtime,
},
- { /* realtime summary */
+ [XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
+ .type = ST_FS,
.setup = xfs_scrub_setup_rt,
.scrub = xfs_scrub_rtsummary,
.has = xfs_sb_version_hasrealtime,
},
- { /* user quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
- { /* group quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
- { /* project quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
};
@@ -284,44 +317,56 @@ xfs_scrub_experimental_warning(
"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
}
-/* Dispatch metadata scrubbing. */
-int
-xfs_scrub_metadata(
- struct xfs_inode *ip,
+static int
+xfs_scrub_validate_inputs(
+ struct xfs_mount *mp,
struct xfs_scrub_metadata *sm)
{
- struct xfs_scrub_context sc;
- struct xfs_mount *mp = ip->i_mount;
+ int error;
const struct xfs_scrub_meta_ops *ops;
- bool try_harder = false;
- int error = 0;
-
- trace_xfs_scrub_start(ip, sm, error);
-
- /* Forbidden if we are shut down or mounted norecovery. */
- error = -ESHUTDOWN;
- if (XFS_FORCED_SHUTDOWN(mp))
- goto out;
- error = -ENOTRECOVERABLE;
- if (mp->m_flags & XFS_MOUNT_NORECOVERY)
- goto out;
- /* Check our inputs. */
error = -EINVAL;
+ /* Check our inputs. */
sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN)
goto out;
+ /* sm_reserved[] must be zero */
if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved)))
goto out;
- /* Do we know about this type of metadata? */
error = -ENOENT;
+ /* Do we know about this type of metadata? */
if (sm->sm_type >= XFS_SCRUB_TYPE_NR)
goto out;
ops = &meta_scrub_ops[sm->sm_type];
- if (ops->scrub == NULL)
+ if (ops->setup == NULL || ops->scrub == NULL)
goto out;
+ /* Does this fs even support this type of metadata? */
+ if (ops->has && !ops->has(&mp->m_sb))
+ goto out;
+
+ error = -EINVAL;
+ /* restricting fields must be appropriate for type */
+ switch (ops->type) {
+ case ST_NONE:
+ case ST_FS:
+ if (sm->sm_ino || sm->sm_gen || sm->sm_agno)
+ goto out;
+ break;
+ case ST_PERAG:
+ if (sm->sm_ino || sm->sm_gen ||
+ sm->sm_agno >= mp->m_sb.sb_agcount)
+ goto out;
+ break;
+ case ST_INODE:
+ if (sm->sm_agno || (sm->sm_gen && !sm->sm_ino))
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+ error = -EOPNOTSUPP;
/*
* We won't scrub any filesystem that doesn't have the ability
* to record unwritten extents. The option was made default in
@@ -331,20 +376,46 @@ xfs_scrub_metadata(
* We also don't support v1-v3 filesystems, which aren't
* mountable.
*/
- error = -EOPNOTSUPP;
if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
goto out;
- /* Does this fs even support this type of metadata? */
- error = -ENOENT;
- if (ops->has && !ops->has(&mp->m_sb))
- goto out;
-
/* We don't know how to repair anything yet. */
- error = -EOPNOTSUPP;
if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
goto out;
+ error = 0;
+out:
+ return error;
+}
+
+/* Dispatch metadata scrubbing. */
+int
+xfs_scrub_metadata(
+ struct xfs_inode *ip,
+ struct xfs_scrub_metadata *sm)
+{
+ struct xfs_scrub_context sc;
+ struct xfs_mount *mp = ip->i_mount;
+ bool try_harder = false;
+ int error = 0;
+
+ BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
+ (sizeof(struct xfs_scrub_meta_ops) * XFS_SCRUB_TYPE_NR));
+
+ trace_xfs_scrub_start(ip, sm, error);
+
+ /* Forbidden if we are shut down or mounted norecovery. */
+ error = -ESHUTDOWN;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ goto out;
+ error = -ENOTRECOVERABLE;
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ goto out;
+
+ error = xfs_scrub_validate_inputs(mp, sm);
+ if (error)
+ goto out;
+
xfs_scrub_experimental_warning(mp);
retry_op:
@@ -352,7 +423,7 @@ retry_op:
memset(&sc, 0, sizeof(sc));
sc.mp = ip->i_mount;
sc.sm = sm;
- sc.ops = ops;
+ sc.ops = &meta_scrub_ops[sm->sm_type];
sc.try_harder = try_harder;
sc.sa.agno = NULLAGNUMBER;
error = sc.ops->setup(&sc, ip);
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index e9ec041cf713..0d92af86f67a 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -22,6 +22,14 @@
struct xfs_scrub_context;
+/* Type info and names for the scrub types. */
+enum xfs_scrub_type {
+ ST_NONE = 1, /* disabled */
+ ST_PERAG, /* per-AG metadata */
+ ST_FS, /* per-FS metadata */
+ ST_INODE, /* per-inode metadata */
+};
+
struct xfs_scrub_meta_ops {
/* Acquire whatever resources are needed for the operation. */
int (*setup)(struct xfs_scrub_context *,
@@ -32,6 +40,9 @@ struct xfs_scrub_meta_ops {
/* Decide if we even have this piece of metadata. */
bool (*has)(struct xfs_sb *);
+
+ /* type describing required/allowed inputs */
+ enum xfs_scrub_type type;
};
/* Buffer pointers and btree cursors for an entire AG. */
@@ -112,4 +123,30 @@ xfs_scrub_quota(struct xfs_scrub_context *sc)
}
#endif
+/* cross-referencing helpers */
+void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_inode_chunk(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_owned_by(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+void xfs_scrub_xref_is_not_owned_by(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+void xfs_scrub_xref_has_no_owner(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_cow_staging(struct xfs_scrub_context *sc,
+ xfs_agblock_t bno, xfs_extlen_t len);
+void xfs_scrub_xref_is_not_shared(struct xfs_scrub_context *sc,
+ xfs_agblock_t bno, xfs_extlen_t len);
+#ifdef CONFIG_XFS_RT
+void xfs_scrub_xref_is_used_rt_space(struct xfs_scrub_context *sc,
+ xfs_rtblock_t rtbno, xfs_extlen_t len);
+#else
+# define xfs_scrub_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
+#endif
+
#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index c4ebfb5c1ee8..4dc896852bf0 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_class,
__entry->flags = sm->sm_flags;
__entry->error = error;
),
- TP_printk("dev %d:%d ino %llu type %u agno %u inum %llu gen %u flags 0x%x error %d",
+ TP_printk("dev %d:%d ino 0x%llx type %u agno %u inum %llu gen %u flags 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->type,
@@ -90,7 +90,7 @@ TRACE_EVENT(xfs_scrub_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->agno,
@@ -121,7 +121,7 @@ TRACE_EVENT(xfs_scrub_file_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu error %d ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
__entry->bno = bno;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->agno,
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
__entry->bno = bno;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu type %u agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx type %u agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->type,
@@ -246,7 +246,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
__entry->offset = offset;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -277,7 +277,7 @@ TRACE_EVENT(xfs_scrub_incomplete,
__entry->type = sc->sm->sm_type;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->ret_ip)
@@ -311,7 +311,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->btnum,
@@ -354,7 +354,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -393,7 +393,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->btnum,
@@ -433,7 +433,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -491,6 +491,28 @@ DEFINE_EVENT(xfs_scrub_sbtree_class, name, \
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec);
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key);
+TRACE_EVENT(xfs_scrub_xref_error,
+ TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
+ TP_ARGS(sc, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u xref error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->error,
+ __entry->ret_ip)
+);
+
#endif /* _TRACE_XFS_SCRUB_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 21e2d70884e1..9c6a830da0ee 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -390,6 +390,19 @@ xfs_map_blocks(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
+ /*
+ * Truncate can race with writeback since writeback doesn't take the
+ * iolock and truncate decreases the file size before it starts
+ * truncating the pages between new_size and old_size. Therefore, we
+ * can end up in the situation where writeback gets a CoW fork mapping
+ * but the truncate makes the mapping invalid and we end up in here
+ * trying to get a new mapping. Bail out here so that we simply never
+ * get a valid mapping and so we drop the write altogether. The page
+ * truncation will kill the contents anyway.
+ */
+ if (type == XFS_IO_COW && offset > i_size_read(inode))
+ return 0;
+
ASSERT(type != XFS_IO_COW);
if (type == XFS_IO_UNWRITTEN)
bmapi_flags |= XFS_BMAPI_IGSTATE;
@@ -399,7 +412,7 @@ xfs_map_blocks(
(ip->i_df.if_flags & XFS_IFEXTENTS));
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - count)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -791,7 +804,7 @@ xfs_aops_discard_page(
goto out_invalidate;
xfs_alert(ip->i_mount,
- "page discard on page %p, inode 0x%llx, offset %llu.",
+ "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -1312,7 +1325,7 @@ xfs_get_blocks(
lockmode = xfs_ilock_data_map_shared(ip);
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - size)
size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6d37ab43195f..c83f549dc17b 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1872,7 +1872,7 @@ xfs_swap_extents(
*/
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
lock_flags = XFS_MMAPLOCK_EXCL;
- xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
+ xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
/* Verify that both files have the same format */
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
@@ -1919,7 +1919,7 @@ xfs_swap_extents(
* Lock and join the inodes to the tansaction so that transaction commit
* or cancel will unlock the inodes from this point onwards.
*/
- xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
lock_flags |= XFS_ILOCK_EXCL;
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tip, 0);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 4c6e86d861fd..d1da2ee9e6db 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -236,6 +236,7 @@ _xfs_buf_alloc(
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
+ INIT_LIST_HEAD(&bp->b_li_list);
sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
XB_SET_OWNER(bp);
@@ -585,7 +586,7 @@ _xfs_buf_find(
* returning a specific error on buffer lookup failures.
*/
xfs_alert(btp->bt_mount,
- "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
+ "%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, cmap.bm_bn, eofs);
WARN_ON(1);
return NULL;
@@ -1180,13 +1181,14 @@ xfs_buf_ioend_async(
}
void
-xfs_buf_ioerror(
+__xfs_buf_ioerror(
xfs_buf_t *bp,
- int error)
+ int error,
+ xfs_failaddr_t failaddr)
{
ASSERT(error <= 0 && error >= -1000);
bp->b_error = error;
- trace_xfs_buf_ioerror(bp, error, _RET_IP_);
+ trace_xfs_buf_ioerror(bp, error, failaddr);
}
void
@@ -1195,8 +1197,9 @@ xfs_buf_ioerror_alert(
const char *func)
{
xfs_alert(bp->b_target->bt_mount,
-"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
- (uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
+"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
+ func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
+ -bp->b_error);
}
int
@@ -1378,9 +1381,10 @@ _xfs_buf_ioapply(
*/
if (xfs_sb_version_hascrc(&mp->m_sb)) {
xfs_warn(mp,
- "%s: no ops on block 0x%llx/0x%x",
+ "%s: no buf ops on daddr 0x%llx len %d",
__func__, bp->b_bn, bp->b_length);
- xfs_hex_dump(bp->b_addr, 64);
+ xfs_hex_dump(bp->b_addr,
+ XFS_CORRUPTION_DUMP_LEN);
dump_stack();
}
}
@@ -1671,7 +1675,7 @@ xfs_wait_buftarg(
list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
xfs_alert(btp->bt_mount,
-"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
+"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(long long)bp->b_bn);
xfs_alert(btp->bt_mount,
"Please run xfs_repair to determine the extent of the problem.");
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index f873bb786824..2f4c91452861 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -140,6 +140,7 @@ struct xfs_buf_ops {
char *name;
void (*verify_read)(struct xfs_buf *);
void (*verify_write)(struct xfs_buf *);
+ xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
};
typedef struct xfs_buf {
@@ -175,7 +176,8 @@ typedef struct xfs_buf {
struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
struct completion b_iowait; /* queue for I/O waiters */
- void *b_fspriv;
+ void *b_log_item;
+ struct list_head b_li_list; /* Log items list head */
struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
@@ -315,7 +317,9 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfs_buf_ioend(struct xfs_buf *bp);
-extern void xfs_buf_ioerror(xfs_buf_t *, int);
+extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
+ xfs_failaddr_t failaddr);
+#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
extern void xfs_buf_submit(struct xfs_buf *bp);
extern int xfs_buf_submit_wait(struct xfs_buf *bp);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index e0a0af0946f2..270ddb4d2313 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -61,14 +61,14 @@ xfs_buf_log_format_size(
*/
STATIC void
xfs_buf_item_size_segment(
- struct xfs_buf_log_item *bip,
- struct xfs_buf_log_format *blfp,
- int *nvecs,
- int *nbytes)
+ struct xfs_buf_log_item *bip,
+ struct xfs_buf_log_format *blfp,
+ int *nvecs,
+ int *nbytes)
{
- struct xfs_buf *bp = bip->bli_buf;
- int next_bit;
- int last_bit;
+ struct xfs_buf *bp = bip->bli_buf;
+ int next_bit;
+ int last_bit;
last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
if (last_bit == -1)
@@ -218,12 +218,12 @@ xfs_buf_item_format_segment(
uint offset,
struct xfs_buf_log_format *blfp)
{
- struct xfs_buf *bp = bip->bli_buf;
- uint base_size;
- int first_bit;
- int last_bit;
- int next_bit;
- uint nbits;
+ struct xfs_buf *bp = bip->bli_buf;
+ uint base_size;
+ int first_bit;
+ int last_bit;
+ int next_bit;
+ uint nbits;
/* copy the flags across from the base format item */
blfp->blf_flags = bip->__bli_format.blf_flags;
@@ -406,12 +406,12 @@ xfs_buf_item_unpin(
int remove)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- xfs_buf_t *bp = bip->bli_buf;
- struct xfs_ail *ailp = lip->li_ailp;
- int stale = bip->bli_flags & XFS_BLI_STALE;
- int freed;
+ xfs_buf_t *bp = bip->bli_buf;
+ struct xfs_ail *ailp = lip->li_ailp;
+ int stale = bip->bli_flags & XFS_BLI_STALE;
+ int freed;
- ASSERT(bp->b_fspriv == bip);
+ ASSERT(bp->b_log_item == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_buf_item_unpin(bip);
@@ -456,13 +456,14 @@ xfs_buf_item_unpin(
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
xfs_buf_do_callbacks(bp);
- bp->b_fspriv = NULL;
+ bp->b_log_item = NULL;
+ list_del_init(&bp->b_li_list);
bp->b_iodone = NULL;
} else {
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
- ASSERT(bp->b_fspriv == NULL);
+ ASSERT(bp->b_log_item == NULL);
}
xfs_buf_relse(bp);
} else if (freed && remove) {
@@ -722,18 +723,15 @@ xfs_buf_item_free_format(
/*
* Allocate a new buf log item to go with the given buffer.
- * Set the buffer's b_fsprivate field to point to the new
- * buf log item. If there are other item's attached to the
- * buffer (see xfs_buf_attach_iodone() below), then put the
- * buf log item at the front.
+ * Set the buffer's b_log_item field to point to the new
+ * buf log item.
*/
int
xfs_buf_item_init(
struct xfs_buf *bp,
struct xfs_mount *mp)
{
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_buf_log_item *bip;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int chunks;
int map_size;
int error;
@@ -741,13 +739,14 @@ xfs_buf_item_init(
/*
* Check to see if there is already a buf log item for
- * this buffer. If there is, it is guaranteed to be
- * the first. If we do already have one, there is
+ * this buffer. If we do already have one, there is
* nothing to do here so return.
*/
ASSERT(bp->b_target->bt_mount == mp);
- if (lip != NULL && lip->li_type == XFS_LI_BUF)
+ if (bip != NULL) {
+ ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
return 0;
+ }
bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
@@ -781,13 +780,7 @@ xfs_buf_item_init(
bip->bli_formats[i].blf_map_size = map_size;
}
- /*
- * Put the buf item into the list of items attached to the
- * buffer at the front.
- */
- if (bp->b_fspriv)
- bip->bli_item.li_bio_list = bp->b_fspriv;
- bp->b_fspriv = bip;
+ bp->b_log_item = bip;
xfs_buf_hold(bp);
return 0;
}
@@ -880,7 +873,7 @@ xfs_buf_item_log_segment(
*/
void
xfs_buf_item_log(
- xfs_buf_log_item_t *bip,
+ struct xfs_buf_log_item *bip,
uint first,
uint last)
{
@@ -943,7 +936,7 @@ xfs_buf_item_dirty_format(
STATIC void
xfs_buf_item_free(
- xfs_buf_log_item_t *bip)
+ struct xfs_buf_log_item *bip)
{
xfs_buf_item_free_format(bip);
kmem_free(bip->bli_item.li_lv_shadow);
@@ -961,13 +954,13 @@ void
xfs_buf_item_relse(
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
trace_xfs_buf_item_relse(bp, _RET_IP_);
ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
- bp->b_fspriv = bip->bli_item.li_bio_list;
- if (bp->b_fspriv == NULL)
+ bp->b_log_item = NULL;
+ if (list_empty(&bp->b_li_list))
bp->b_iodone = NULL;
xfs_buf_rele(bp);
@@ -980,9 +973,7 @@ xfs_buf_item_relse(
* to be called when the buffer's I/O completes. If it is not set
* already, set the buffer's b_iodone() routine to be
* xfs_buf_iodone_callbacks() and link the log item into the list of
- * items rooted at b_fsprivate. Items are always added as the second
- * entry in the list if there is a first, because the buf item code
- * assumes that the buf log item is first.
+ * items rooted at b_li_list.
*/
void
xfs_buf_attach_iodone(
@@ -990,18 +981,10 @@ xfs_buf_attach_iodone(
void (*cb)(xfs_buf_t *, xfs_log_item_t *),
xfs_log_item_t *lip)
{
- xfs_log_item_t *head_lip;
-
ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb;
- head_lip = bp->b_fspriv;
- if (head_lip) {
- lip->li_bio_list = head_lip->li_bio_list;
- head_lip->li_bio_list = lip;
- } else {
- bp->b_fspriv = lip;
- }
+ list_add_tail(&lip->li_bio_list, &bp->b_li_list);
ASSERT(bp->b_iodone == NULL ||
bp->b_iodone == xfs_buf_iodone_callbacks);
@@ -1011,12 +994,12 @@ xfs_buf_attach_iodone(
/*
* We can have many callbacks on a buffer. Running the callbacks individually
* can cause a lot of contention on the AIL lock, so we allow for a single
- * callback to be able to scan the remaining lip->li_bio_list for other items
- * of the same type and callback to be processed in the first call.
+ * callback to be able to scan the remaining items in bp->b_li_list for other
+ * items of the same type and callback to be processed in the first call.
*
* As a result, the loop walking the callback list below will also modify the
* list. it removes the first item from the list and then runs the callback.
- * The loop then restarts from the new head of the list. This allows the
+ * The loop then restarts from the new first item int the list. This allows the
* callback to scan and modify the list attached to the buffer and we don't
* have to care about maintaining a next item pointer.
*/
@@ -1024,18 +1007,26 @@ STATIC void
xfs_buf_do_callbacks(
struct xfs_buf *bp)
{
+ struct xfs_buf_log_item *blip = bp->b_log_item;
struct xfs_log_item *lip;
- while ((lip = bp->b_fspriv) != NULL) {
- bp->b_fspriv = lip->li_bio_list;
- ASSERT(lip->li_cb != NULL);
+ /* If there is a buf_log_item attached, run its callback */
+ if (blip) {
+ lip = &blip->bli_item;
+ lip->li_cb(bp, lip);
+ }
+
+ while (!list_empty(&bp->b_li_list)) {
+ lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+
/*
- * Clear the next pointer so we don't have any
+ * Remove the item from the list, so we don't have any
* confusion if the item is added to another buf.
* Don't touch the log item after calling its
* callback, because it could have freed itself.
*/
- lip->li_bio_list = NULL;
+ list_del_init(&lip->li_bio_list);
lip->li_cb(bp, lip);
}
}
@@ -1052,13 +1043,22 @@ STATIC void
xfs_buf_do_callbacks_fail(
struct xfs_buf *bp)
{
- struct xfs_log_item *next;
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_ail *ailp = lip->li_ailp;
+ struct xfs_log_item *lip;
+ struct xfs_ail *ailp;
+ /*
+ * Buffer log item errors are handled directly by xfs_buf_item_push()
+ * and xfs_buf_iodone_callback_error, and they have no IO error
+ * callbacks. Check only for items in b_li_list.
+ */
+ if (list_empty(&bp->b_li_list))
+ return;
+
+ lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+ ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock);
- for (; lip; lip = next) {
- next = lip->li_bio_list;
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_ops->iop_error)
lip->li_ops->iop_error(lip, bp);
}
@@ -1069,13 +1069,23 @@ static bool
xfs_buf_iodone_callback_error(
struct xfs_buf *bp)
{
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_mount *mp = lip->li_mountp;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ struct xfs_log_item *lip;
+ struct xfs_mount *mp;
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
struct xfs_error_cfg *cfg;
/*
+ * The failed buffer might not have a buf_log_item attached or the
+ * log_item list might be empty. Get the mp from the available
+ * xfs_log_item
+ */
+ lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+ mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
+
+ /*
* If we've already decided to shutdown the filesystem because of
* I/O errors, there's no point in giving this a retry.
*/
@@ -1183,7 +1193,8 @@ xfs_buf_iodone_callbacks(
bp->b_first_retry_time = 0;
xfs_buf_do_callbacks(bp);
- bp->b_fspriv = NULL;
+ bp->b_log_item = NULL;
+ list_del_init(&bp->b_li_list);
bp->b_iodone = NULL;
xfs_buf_ioend(bp);
}
@@ -1228,10 +1239,9 @@ xfs_buf_iodone(
bool
xfs_buf_resubmit_failed_buffers(
struct xfs_buf *bp,
- struct xfs_log_item *lip,
struct list_head *buffer_list)
{
- struct xfs_log_item *next;
+ struct xfs_log_item *lip;
/*
* Clear XFS_LI_FAILED flag from all items before resubmit
@@ -1239,10 +1249,8 @@ xfs_buf_resubmit_failed_buffers(
* XFS_LI_FAILED set/clear is protected by xa_lock, caller this
* function already have it acquired
*/
- for (; lip; lip = next) {
- next = lip->li_bio_list;
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
xfs_clear_li_failed(lip);
- }
/* Add this buffer back to the delayed write list */
return xfs_buf_delwri_queue(bp, buffer_list);
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 9690ce62c9a7..643f53dcfe51 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -50,7 +50,7 @@ struct xfs_buf_log_item;
* needed to log buffers. It tracks how many times the lock has been
* locked, and which 128 byte chunks of the buffer are dirty.
*/
-typedef struct xfs_buf_log_item {
+struct xfs_buf_log_item {
xfs_log_item_t bli_item; /* common item structure */
struct xfs_buf *bli_buf; /* real buffer pointer */
unsigned int bli_flags; /* misc flags */
@@ -59,11 +59,11 @@ typedef struct xfs_buf_log_item {
int bli_format_count; /* count of headers */
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
struct xfs_buf_log_format __bli_format; /* embedded in-log header */
-} xfs_buf_log_item_t;
+};
int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
-void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
+void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
void xfs_buf_attach_iodone(struct xfs_buf *,
void(*)(struct xfs_buf *, xfs_log_item_t *),
@@ -71,7 +71,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
bool xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
- struct xfs_log_item *,
struct list_head *);
extern kmem_zone_t *xfs_buf_item_zone;
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 0c58918bc0ad..b6ae3597bfb0 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -152,7 +152,6 @@ xfs_dir2_block_getdents(
struct xfs_inode *dp = args->dp; /* incore directory inode */
xfs_dir2_data_hdr_t *hdr; /* block header */
struct xfs_buf *bp; /* buffer for block */
- xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_dir2_data_unused_t *dup; /* block unused entry */
char *endptr; /* end of the data entries */
@@ -185,9 +184,8 @@ xfs_dir2_block_getdents(
/*
* Set up values for the loop.
*/
- btp = xfs_dir2_block_tail_p(geo, hdr);
ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
+ endptr = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the data portion of the block.
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f248708c10ff..43572f8a1b8e 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -399,52 +399,6 @@ error0:
return error;
}
-STATIC int
-xfs_qm_dqrepair(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_dquot *dqp,
- xfs_dqid_t firstid,
- struct xfs_buf **bpp)
-{
- int error;
- struct xfs_disk_dquot *ddq;
- struct xfs_dqblk *d;
- int i;
-
- /*
- * Read the buffer without verification so we get the corrupted
- * buffer returned to us. make sure we verify it on write, though.
- */
- error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen,
- 0, bpp, NULL);
-
- if (error) {
- ASSERT(*bpp == NULL);
- return error;
- }
- (*bpp)->b_ops = &xfs_dquot_buf_ops;
-
- ASSERT(xfs_buf_islocked(*bpp));
- d = (struct xfs_dqblk *)(*bpp)->b_addr;
-
- /* Do the actual repair of dquots in this buffer */
- for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
- ddq = &d[i].dd_diskdq;
- error = xfs_dqcheck(mp, ddq, firstid + i,
- dqp->dq_flags & XFS_DQ_ALLTYPES,
- XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
- if (error) {
- /* repair failed, we're screwed */
- xfs_trans_brelse(tp, *bpp);
- return -EIO;
- }
- }
-
- return 0;
-}
-
/*
* Maps a dquot to the buffer containing its on-disk version.
* This returns a ptr to the buffer containing the on-disk dquot
@@ -526,14 +480,6 @@ xfs_qm_dqtobp(
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
0, &bp, &xfs_dquot_buf_ops);
-
- if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
- xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
- mp->m_quotainfo->qi_dqperchunk;
- ASSERT(bp == NULL);
- error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
- }
-
if (error) {
ASSERT(bp == NULL);
return error;
@@ -1010,6 +956,7 @@ xfs_qm_dqflush(
struct xfs_mount *mp = dqp->q_mount;
struct xfs_buf *bp;
struct xfs_disk_dquot *ddqp;
+ xfs_failaddr_t fa;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -1056,9 +1003,10 @@ xfs_qm_dqflush(
/*
* A simple sanity check in case we got a corrupted dquot..
*/
- error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
- XFS_QMOPT_DOWARN, "dqflush (incore copy)");
- if (error) {
+ fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, 0);
+ if (fa) {
+ xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
+ be32_to_cpu(ddqp->d_id), fa);
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 664dea105e76..96eaa6933709 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -150,10 +150,7 @@ xfs_dquot_item_error(
struct xfs_log_item *lip,
struct xfs_buf *bp)
{
- struct xfs_dquot *dqp;
-
- dqp = DQUOT_ITEM(lip)->qli_dquot;
- ASSERT(!completion_done(&dqp->q_flush));
+ ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush));
xfs_set_li_failed(lip, bp);
}
@@ -179,7 +176,7 @@ xfs_qm_dquot_logitem_push(
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
- if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+ if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -212,7 +209,7 @@ xfs_qm_dquot_logitem_push(
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
+ xfs_warn(dqp->q_mount, "%s: push error %d on dqp "PTR_FMT,
__func__, error, dqp);
} else {
if (!xfs_buf_delwri_queue(bp, buffer_list))
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 4c9f35d983b2..ccf520f0b00d 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -24,6 +24,7 @@
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_sysfs.h"
+#include "xfs_inode.h"
#ifdef DEBUG
@@ -314,12 +315,12 @@ xfs_error_report(
struct xfs_mount *mp,
const char *filename,
int linenum,
- void *ra)
+ xfs_failaddr_t failaddr)
{
if (level <= xfs_error_level) {
xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
"Internal error %s at line %d of file %s. Caller %pS",
- tag, linenum, filename, ra);
+ tag, linenum, filename, failaddr);
xfs_stack_trace();
}
@@ -333,11 +334,11 @@ xfs_corruption_error(
void *p,
const char *filename,
int linenum,
- void *ra)
+ xfs_failaddr_t failaddr)
{
if (level <= xfs_error_level)
- xfs_hex_dump(p, 64);
- xfs_error_report(tag, level, mp, filename, linenum, ra);
+ xfs_hex_dump(p, XFS_CORRUPTION_DUMP_LEN);
+ xfs_error_report(tag, level, mp, filename, linenum, failaddr);
xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
@@ -347,19 +348,62 @@ xfs_corruption_error(
*/
void
xfs_verifier_error(
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ int error,
+ xfs_failaddr_t failaddr)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
+
+ fa = failaddr ? failaddr : __return_address;
+ __xfs_buf_ioerror(bp, error, fa);
xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
- __return_address, bp->b_ops->name, bp->b_bn);
+ fa, bp->b_ops->name, bp->b_bn);
xfs_alert(mp, "Unmount and run xfs_repair");
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- xfs_alert(mp, "First 64 bytes of corrupted metadata buffer:");
- xfs_hex_dump(xfs_buf_offset(bp, 0), 64);
+ xfs_alert(mp, "First %d bytes of corrupted metadata buffer:",
+ XFS_CORRUPTION_DUMP_LEN);
+ xfs_hex_dump(xfs_buf_offset(bp, 0), XFS_CORRUPTION_DUMP_LEN);
+ }
+
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+}
+
+/*
+ * Warnings for inode corruption problems. Don't bother with the stack
+ * trace unless the error level is turned up high.
+ */
+void
+xfs_inode_verifier_error(
+ struct xfs_inode *ip,
+ int error,
+ const char *name,
+ void *buf,
+ size_t bufsz,
+ xfs_failaddr_t failaddr)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_failaddr_t fa;
+ int sz;
+
+ fa = failaddr ? failaddr : __return_address;
+
+ xfs_alert(mp, "Metadata %s detected at %pS, inode 0x%llx %s",
+ error == -EFSBADCRC ? "CRC error" : "corruption",
+ fa, ip->i_ino, name);
+
+ xfs_alert(mp, "Unmount and run xfs_repair");
+
+ if (buf && xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ sz = min_t(size_t, XFS_CORRUPTION_DUMP_LEN, bufsz);
+ xfs_alert(mp, "First %d bytes of corrupted metadata buffer:",
+ sz);
+ xfs_hex_dump(buf, sz);
}
if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index ea816c1bf8db..7e728c5a46b8 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -21,11 +21,16 @@
struct xfs_mount;
extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
- const char *filename, int linenum, void *ra);
+ const char *filename, int linenum,
+ xfs_failaddr_t failaddr);
extern void xfs_corruption_error(const char *tag, int level,
struct xfs_mount *mp, void *p, const char *filename,
- int linenum, void *ra);
-extern void xfs_verifier_error(struct xfs_buf *bp);
+ int linenum, xfs_failaddr_t failaddr);
+extern void xfs_verifier_error(struct xfs_buf *bp, int error,
+ xfs_failaddr_t failaddr);
+extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
+ const char *name, void *buf, size_t bufsz,
+ xfs_failaddr_t failaddr);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
@@ -37,6 +42,9 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_ERRLEVEL_LOW 1
#define XFS_ERRLEVEL_HIGH 5
+/* Dump 128 bytes of any corrupt buffer */
+#define XFS_CORRUPTION_DUMP_LEN (128)
+
/*
* Macros to set EFSCORRUPTED & return/branch.
*/
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 60a2e128cb6a..8b4545623e25 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -49,83 +49,6 @@
* File system operations
*/
-int
-xfs_fs_geometry(
- xfs_mount_t *mp,
- xfs_fsop_geom_t *geo,
- int new_version)
-{
-
- memset(geo, 0, sizeof(*geo));
-
- geo->blocksize = mp->m_sb.sb_blocksize;
- geo->rtextsize = mp->m_sb.sb_rextsize;
- geo->agblocks = mp->m_sb.sb_agblocks;
- geo->agcount = mp->m_sb.sb_agcount;
- geo->logblocks = mp->m_sb.sb_logblocks;
- geo->sectsize = mp->m_sb.sb_sectsize;
- geo->inodesize = mp->m_sb.sb_inodesize;
- geo->imaxpct = mp->m_sb.sb_imax_pct;
- geo->datablocks = mp->m_sb.sb_dblocks;
- geo->rtblocks = mp->m_sb.sb_rblocks;
- geo->rtextents = mp->m_sb.sb_rextents;
- geo->logstart = mp->m_sb.sb_logstart;
- ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
- memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
- if (new_version >= 2) {
- geo->sunit = mp->m_sb.sb_unit;
- geo->swidth = mp->m_sb.sb_width;
- }
- if (new_version >= 3) {
- geo->version = XFS_FSOP_GEOM_VERSION;
- geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
- XFS_FSOP_GEOM_FLAGS_DIRV2 |
- (xfs_sb_version_hasattr(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
- (xfs_sb_version_hasquota(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
- (xfs_sb_version_hasalign(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
- (xfs_sb_version_hasdalign(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
- (xfs_sb_version_hasextflgbit(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
- (xfs_sb_version_hassector(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
- (xfs_sb_version_hasasciici(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
- (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
- (xfs_sb_version_hasattr2(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
- (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
- (xfs_sb_version_hascrc(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
- (xfs_sb_version_hasftype(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_FTYPE : 0) |
- (xfs_sb_version_hasfinobt(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_FINOBT : 0) |
- (xfs_sb_version_hassparseinodes(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
- (xfs_sb_version_hasrmapbt(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_RMAPBT : 0) |
- (xfs_sb_version_hasreflink(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_REFLINK : 0);
- geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
- mp->m_sb.sb_logsectsize : BBSIZE;
- geo->rtsectsize = mp->m_sb.sb_blocksize;
- geo->dirblocksize = mp->m_dir_geo->blksize;
- }
- if (new_version >= 4) {
- geo->flags |=
- (xfs_sb_version_haslogv2(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
- geo->logsunit = mp->m_sb.sb_logsunit;
- }
- return 0;
-}
-
static struct xfs_buf *
xfs_growfs_get_hdr_buf(
struct xfs_mount *mp,
@@ -955,7 +878,7 @@ xfs_do_force_shutdown(
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_notice(mp,
- "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
+ "%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
__func__, flags, lnnum, fname, __return_address);
}
/*
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 2954c13a3acd..20484ed5e919 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -18,7 +18,6 @@
#ifndef __XFS_FSOPS_H__
#define __XFS_FSOPS_H__
-extern int xfs_fs_geometry(xfs_mount_t *mp, xfs_fsop_geom_t *geo, int nversion);
extern int xfs_growfs_data(xfs_mount_t *mp, xfs_growfs_data_t *in);
extern int xfs_growfs_log(xfs_mount_t *mp, xfs_growfs_log_t *in);
extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3861d61fb265..d53a316162d6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/iversion.h>
/*
* Allocate and initialise an xfs_inode.
@@ -293,15 +294,17 @@ xfs_reinit_inode(
int error;
uint32_t nlink = inode->i_nlink;
uint32_t generation = inode->i_generation;
- uint64_t version = inode->i_version;
+ uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
+ dev_t dev = inode->i_rdev;
error = inode_init_always(mp->m_super, inode);
set_nlink(inode, nlink);
inode->i_generation = generation;
- inode->i_version = version;
+ inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
+ inode->i_rdev = dev;
return error;
}
@@ -473,6 +476,11 @@ xfs_iget_cache_miss(
if (error)
goto out_destroy;
+ if (!xfs_inode_verify_forks(ip)) {
+ error = -EFSCORRUPTED;
+ goto out_destroy;
+ }
+
trace_xfs_iget_miss(ip);
if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
@@ -1650,28 +1658,15 @@ xfs_inode_clear_eofblocks_tag(
}
/*
- * Automatic CoW Reservation Freeing
- *
- * These functions automatically garbage collect leftover CoW reservations
- * that were made on behalf of a cowextsize hint when we start to run out
- * of quota or when the reservations sit around for too long. If the file
- * has dirty pages or is undergoing writeback, its CoW reservations will
- * be retained.
- *
- * The actual garbage collection piggybacks off the same code that runs
- * the speculative EOF preallocation garbage collector.
+ * Set ourselves up to free CoW blocks from this file. If it's already clean
+ * then we can bail out quickly, but otherwise we must back off if the file
+ * is undergoing some kind of write.
*/
-STATIC int
-xfs_inode_free_cowblocks(
+static bool
+xfs_prep_free_cowblocks(
struct xfs_inode *ip,
- int flags,
- void *args)
+ struct xfs_ifork *ifp)
{
- int ret;
- struct xfs_eofblocks *eofb = args;
- int match;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
@@ -1679,7 +1674,7 @@ xfs_inode_free_cowblocks(
if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
- return 0;
+ return false;
}
/*
@@ -1690,6 +1685,35 @@ xfs_inode_free_cowblocks(
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
atomic_read(&VFS_I(ip)->i_dio_count))
+ return false;
+
+ return true;
+}
+
+/*
+ * Automatic CoW Reservation Freeing
+ *
+ * These functions automatically garbage collect leftover CoW reservations
+ * that were made on behalf of a cowextsize hint when we start to run out
+ * of quota or when the reservations sit around for too long. If the file
+ * has dirty pages or is undergoing writeback, its CoW reservations will
+ * be retained.
+ *
+ * The actual garbage collection piggybacks off the same code that runs
+ * the speculative EOF preallocation garbage collector.
+ */
+STATIC int
+xfs_inode_free_cowblocks(
+ struct xfs_inode *ip,
+ int flags,
+ void *args)
+{
+ struct xfs_eofblocks *eofb = args;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ int match;
+ int ret = 0;
+
+ if (!xfs_prep_free_cowblocks(ip, ifp))
return 0;
if (eofb) {
@@ -1710,7 +1734,12 @@ xfs_inode_free_cowblocks(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
+ /*
+ * Check again, nobody else should be able to dirty blocks or change
+ * the reflink iflag now that we have the first two locks held.
+ */
+ if (xfs_prep_free_cowblocks(ip, ifp))
+ ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6f95bdb408ce..604ee384a00a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -16,6 +16,7 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/log2.h>
+#include <linux/iversion.h>
#include "xfs.h"
#include "xfs_fs.h"
@@ -546,23 +547,36 @@ again:
/*
* xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
- * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
- * lock more than one at a time, lockdep will report false positives saying we
- * have violated locking orders.
+ * the mmaplock or the ilock, but not more than one type at a time. If we lock
+ * more than one at a time, lockdep will report false positives saying we have
+ * violated locking orders. The iolock must be double-locked separately since
+ * we use i_rwsem for that. We now support taking one lock EXCL and the other
+ * SHARED.
*/
void
xfs_lock_two_inodes(
- xfs_inode_t *ip0,
- xfs_inode_t *ip1,
- uint lock_mode)
+ struct xfs_inode *ip0,
+ uint ip0_mode,
+ struct xfs_inode *ip1,
+ uint ip1_mode)
{
- xfs_inode_t *temp;
+ struct xfs_inode *temp;
+ uint mode_temp;
int attempts = 0;
xfs_log_item_t *lp;
- ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
- if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
- ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(hweight32(ip0_mode) == 1);
+ ASSERT(hweight32(ip1_mode) == 1);
+ ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
ASSERT(ip0->i_ino != ip1->i_ino);
@@ -570,10 +584,13 @@ xfs_lock_two_inodes(
temp = ip0;
ip0 = ip1;
ip1 = temp;
+ mode_temp = ip0_mode;
+ ip0_mode = ip1_mode;
+ ip1_mode = mode_temp;
}
again:
- xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
+ xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
/*
* If the first lock we have locked is in the AIL, we must TRY to get
@@ -582,18 +599,17 @@ xfs_lock_two_inodes(
*/
lp = (xfs_log_item_t *)ip0->i_itemp;
if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
- if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
- xfs_iunlock(ip0, lock_mode);
+ if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
+ xfs_iunlock(ip0, ip0_mode);
if ((++attempts % 5) == 0)
delay(1); /* Don't just spin the CPU */
goto again;
}
} else {
- xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
+ xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
}
}
-
void
__xfs_iflock(
struct xfs_inode *ip)
@@ -832,7 +848,7 @@ xfs_ialloc(
ip->i_d.di_flags = 0;
if (ip->i_d.di_version == 3) {
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
@@ -1421,7 +1437,7 @@ xfs_link(
if (error)
goto std_return;
- xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
@@ -2214,7 +2230,7 @@ xfs_ifree_cluster(
xfs_buf_t *bp;
xfs_inode_t *ip;
xfs_inode_log_item_t *iip;
- xfs_log_item_t *lip;
+ struct xfs_log_item *lip;
struct xfs_perag *pag;
xfs_ino_t inum;
@@ -2272,8 +2288,7 @@ xfs_ifree_cluster(
* stale first, we will not attempt to lock them in the loop
* below as the XFS_ISTALE flag will be set.
*/
- lip = bp->b_fspriv;
- while (lip) {
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_type == XFS_LI_INODE) {
iip = (xfs_inode_log_item_t *)lip;
ASSERT(iip->ili_logged == 1);
@@ -2283,7 +2298,6 @@ xfs_ifree_cluster(
&iip->ili_item.li_lsn);
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
}
- lip = lip->li_bio_list;
}
@@ -2451,6 +2465,7 @@ xfs_ifree(
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
ip->i_d.di_flags = 0;
+ ip->i_d.di_flags2 = 0;
ip->i_d.di_dmevmask = 0;
ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
@@ -2586,7 +2601,7 @@ xfs_remove(
goto std_return;
}
- xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -3479,6 +3494,36 @@ abort_out:
return error;
}
+/*
+ * If there are inline format data / attr forks attached to this inode,
+ * make sure they're not corrupt.
+ */
+bool
+xfs_inode_verify_forks(
+ struct xfs_inode *ip)
+{
+ struct xfs_ifork *ifp;
+ xfs_failaddr_t fa;
+
+ fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
+ if (fa) {
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
+ ifp->if_u1.if_data, ifp->if_bytes, fa);
+ return false;
+ }
+
+ fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
+ if (fa) {
+ ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
+ ifp ? ifp->if_u1.if_data : NULL,
+ ifp ? ifp->if_bytes : 0, fa);
+ return false;
+ }
+ return true;
+}
+
STATIC int
xfs_iflush_int(
struct xfs_inode *ip,
@@ -3501,7 +3546,7 @@ xfs_iflush_int(
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
mp, XFS_ERRTAG_IFLUSH_1)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
+ "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
goto corrupt_out;
}
@@ -3511,7 +3556,7 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
mp, XFS_ERRTAG_IFLUSH_3)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad regular inode %Lu, ptr 0x%p",
+ "%s: Bad regular inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
goto corrupt_out;
}
@@ -3522,7 +3567,7 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
mp, XFS_ERRTAG_IFLUSH_4)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad directory inode %Lu, ptr 0x%p",
+ "%s: Bad directory inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
goto corrupt_out;
}
@@ -3531,7 +3576,7 @@ xfs_iflush_int(
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: detected corrupt incore inode %Lu, "
- "total extents = %d, nblocks = %Ld, ptr 0x%p",
+ "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
__func__, ip->i_ino,
ip->i_d.di_nextents + ip->i_d.di_anextents,
ip->i_d.di_nblocks, ip);
@@ -3540,7 +3585,7 @@ xfs_iflush_int(
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
mp, XFS_ERRTAG_IFLUSH_6)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
+ "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
goto corrupt_out;
}
@@ -3557,10 +3602,8 @@ xfs_iflush_int(
if (ip->i_d.di_version < 3)
ip->i_d.di_flushiter++;
- /* Check the inline directory data. */
- if (S_ISDIR(VFS_I(ip)->i_mode) &&
- ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
- xfs_dir2_sf_verify(ip))
+ /* Check the inline fork data before we write out. */
+ if (!xfs_inode_verify_forks(ip))
goto corrupt_out;
/*
@@ -3623,7 +3666,7 @@ xfs_iflush_int(
/* generate the checksum. */
xfs_dinode_calc_crc(mp, dip);
- ASSERT(bp->b_fspriv != NULL);
+ ASSERT(!list_empty(&bp->b_li_list));
ASSERT(bp->b_iodone != NULL);
return 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index d383e392ec9d..3e8dc990d41c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -423,7 +423,8 @@ void xfs_iunpin_wait(xfs_inode_t *);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
-void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
+void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
+ struct xfs_inode *ip1, uint ip1_mode);
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
@@ -491,4 +492,6 @@ extern struct kmem_zone *xfs_inode_zone;
/* The default CoW extent size hint. */
#define XFS_DEFAULT_COWEXTSZ_HINT 32
+bool xfs_inode_verify_forks(struct xfs_inode *ip);
+
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6ee5c3bf19ad..d5037f060d6f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -30,6 +30,7 @@
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include <linux/iversion.h>
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -354,7 +355,7 @@ xfs_inode_to_log_dinode(
to->di_next_unlinked = NULLAGINO;
if (from->di_version == 3) {
- to->di_changecount = inode->i_version;
+ to->di_changecount = inode_peek_iversion(inode);
to->di_crtime.t_sec = from->di_crtime.t_sec;
to->di_crtime.t_nsec = from->di_crtime.t_nsec;
to->di_flags2 = from->di_flags2;
@@ -521,7 +522,7 @@ xfs_inode_item_push(
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
- if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+ if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -712,37 +713,23 @@ xfs_iflush_done(
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip;
- struct xfs_log_item *blip;
- struct xfs_log_item *next;
- struct xfs_log_item *prev;
+ struct xfs_log_item *blip, *n;
struct xfs_ail *ailp = lip->li_ailp;
int need_ail = 0;
+ LIST_HEAD(tmp);
/*
* Scan the buffer IO completions for other inodes being completed and
* attach them to the current inode log item.
*/
- blip = bp->b_fspriv;
- prev = NULL;
- while (blip != NULL) {
- if (blip->li_cb != xfs_iflush_done) {
- prev = blip;
- blip = blip->li_bio_list;
- continue;
- }
- /* remove from list */
- next = blip->li_bio_list;
- if (!prev) {
- bp->b_fspriv = next;
- } else {
- prev->li_bio_list = next;
- }
+ list_add_tail(&lip->li_bio_list, &tmp);
- /* add to current list */
- blip->li_bio_list = lip->li_bio_list;
- lip->li_bio_list = blip;
+ list_for_each_entry_safe(blip, n, &bp->b_li_list, li_bio_list) {
+ if (lip->li_cb != xfs_iflush_done)
+ continue;
+ list_move_tail(&blip->li_bio_list, &tmp);
/*
* while we have the item, do the unlocked check for needing
* the AIL lock.
@@ -751,8 +738,6 @@ xfs_iflush_done(
if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
(blip->li_flags & XFS_LI_FAILED))
need_ail++;
-
- blip = next;
}
/* make sure we capture the state of the initial inode. */
@@ -775,7 +760,7 @@ xfs_iflush_done(
/* this is an opencoded batch version of xfs_trans_ail_delete */
spin_lock(&ailp->xa_lock);
- for (blip = lip; blip; blip = blip->li_bio_list) {
+ list_for_each_entry(blip, &tmp, li_bio_list) {
if (INODE_ITEM(blip)->ili_logged &&
blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
mlip_changed |= xfs_ail_delete_one(ailp, blip);
@@ -801,15 +786,14 @@ xfs_iflush_done(
* ili_last_fields bits now that we know that the data corresponding to
* them is safely on disk.
*/
- for (blip = lip; blip; blip = next) {
- next = blip->li_bio_list;
- blip->li_bio_list = NULL;
-
+ list_for_each_entry_safe(blip, n, &tmp, li_bio_list) {
+ list_del_init(&blip->li_bio_list);
iip = INODE_ITEM(blip);
iip->ili_logged = 0;
iip->ili_last_fields = 0;
xfs_ifunlock(iip->ili_inode);
}
+ list_del(&tmp);
}
/*
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 20dc65fef6a4..89fb1eb80aae 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -45,6 +45,7 @@
#include <linux/fsmap.h>
#include "xfs_fsmap.h"
#include "scrub/xfs_scrub.h"
+#include "xfs_sb.h"
#include <linux/capability.h>
#include <linux/cred.h>
@@ -809,7 +810,7 @@ xfs_ioc_fsgeometry_v1(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 3);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
if (error)
return error;
@@ -831,7 +832,7 @@ xfs_ioc_fsgeometry(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 4);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 4);
if (error)
return error;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 35c79e246fde..10fbde359649 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -37,6 +37,7 @@
#include "xfs_ioctl.h"
#include "xfs_ioctl32.h"
#include "xfs_trace.h"
+#include "xfs_sb.h"
#define _NATIVE_IOC(cmd, type) \
_IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
@@ -66,7 +67,7 @@ xfs_compat_ioc_fsgeometry_v1(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 3);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
if (error)
return error;
/* The 32-bit variant simply has some padding at the end */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7ab52a8bc0a9..66e1edbfb2b2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
}
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - length)
length = mp->m_super->s_maxbytes - offset;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
end_fsb = XFS_B_TO_FSB(mp, offset + length);
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 99562ec0de56..bee51a14a906 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -285,8 +285,22 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
#define XFS_IS_REALTIME_INODE(ip) \
(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
(ip)->i_mount->m_rtdev_targp)
+#define XFS_IS_REALTIME_MOUNT(mp) ((mp)->m_rtdev_targp ? 1 : 0)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
+#define XFS_IS_REALTIME_MOUNT(mp) (0)
+#endif
+
+/*
+ * Starting in Linux 4.15, the %p (raw pointer value) printk modifier
+ * prints a hashed version of the pointer to avoid leaking kernel
+ * pointers into dmesg. If we're trying to debug the kernel we want the
+ * raw values, so override this behavior as best we can.
+ */
+#ifdef DEBUG
+# define PTR_FMT "%px"
+#else
+# define PTR_FMT "%p"
#endif
#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a503af96d780..3e5ba1ecc080 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1047,6 +1047,7 @@ xfs_log_item_init(
INIT_LIST_HEAD(&item->li_ail);
INIT_LIST_HEAD(&item->li_cil);
+ INIT_LIST_HEAD(&item->li_bio_list);
}
/*
@@ -1242,7 +1243,7 @@ xlog_space_left(
static void
xlog_iodone(xfs_buf_t *bp)
{
- struct xlog_in_core *iclog = bp->b_fspriv;
+ struct xlog_in_core *iclog = bp->b_log_item;
struct xlog *l = iclog->ic_log;
int aborted = 0;
@@ -1773,7 +1774,7 @@ STATIC int
xlog_bdstrat(
struct xfs_buf *bp)
{
- struct xlog_in_core *iclog = bp->b_fspriv;
+ struct xlog_in_core *iclog = bp->b_log_item;
xfs_buf_lock(bp);
if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -1919,7 +1920,7 @@ xlog_sync(
}
bp->b_io_length = BTOBB(count);
- bp->b_fspriv = iclog;
+ bp->b_log_item = iclog;
bp->b_flags &= ~XBF_FLUSH;
bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
@@ -1958,7 +1959,7 @@ xlog_sync(
XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
xfs_buf_associate_memory(bp,
(char *)&iclog->ic_header + count, split);
- bp->b_fspriv = iclog;
+ bp->b_log_item = iclog;
bp->b_flags &= ~XBF_FLUSH;
bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
@@ -2117,7 +2118,9 @@ xlog_print_trans(
/* dump core transaction and ticket info */
xfs_warn(mp, "transaction summary:");
- xfs_warn(mp, " flags = 0x%x", tp->t_flags);
+ xfs_warn(mp, " log res = %d", tp->t_log_res);
+ xfs_warn(mp, " log count = %d", tp->t_log_count);
+ xfs_warn(mp, " flags = 0x%x", tp->t_flags);
xlog_print_tic_res(mp, tp->t_ticket);
@@ -2242,7 +2245,7 @@ xlog_write_setup_ophdr(
break;
default:
xfs_warn(log->l_mp,
- "Bad XFS transaction clientid 0x%x in ticket 0x%p",
+ "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT,
ophdr->oh_clientid, ticket);
return NULL;
}
@@ -3924,7 +3927,7 @@ xlog_verify_iclog(
}
if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
xfs_warn(log->l_mp,
- "%s: invalid clientid %d op 0x%p offset 0x%lx",
+ "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx",
__func__, clientid, ophead,
(unsigned long)field_offset);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 28d1abfe835e..00240c9ee72e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -400,9 +400,9 @@ xlog_recover_iodone(
* On v5 supers, a bli could be attached to update the metadata LSN.
* Clean it up.
*/
- if (bp->b_fspriv)
+ if (bp->b_log_item)
xfs_buf_item_relse(bp);
- ASSERT(bp->b_fspriv == NULL);
+ ASSERT(bp->b_log_item == NULL);
bp->b_iodone = NULL;
xfs_buf_ioend(bp);
@@ -2218,7 +2218,7 @@ xlog_recover_do_inode_buffer(
next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
xfs_alert(mp,
- "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
+ "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
"Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
@@ -2630,7 +2630,7 @@ xlog_recover_validate_buf_type(
ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
bp->b_iodone = xlog_recover_iodone;
xfs_buf_item_init(bp, mp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
bip->bli_item.li_lsn = current_lsn;
}
}
@@ -2652,7 +2652,7 @@ xlog_recover_do_reg_buffer(
int i;
int bit;
int nbits;
- int error;
+ xfs_failaddr_t fa;
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
@@ -2687,7 +2687,7 @@ xlog_recover_do_reg_buffer(
* the first dquot in the buffer should do. XXXThis is
* probably a good thing to do for other buf types also.
*/
- error = 0;
+ fa = NULL;
if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
if (item->ri_buf[i].i_addr == NULL) {
@@ -2701,11 +2701,14 @@ xlog_recover_do_reg_buffer(
item->ri_buf[i].i_len, __func__);
goto next;
}
- error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
- -1, 0, XFS_QMOPT_DOWARN,
- "dquot_buf_recover");
- if (error)
+ fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
+ -1, 0, 0);
+ if (fa) {
+ xfs_alert(mp,
+ "dquot corrupt at %pS trying to replay into block 0x%llx",
+ fa, bp->b_bn);
goto next;
+ }
}
memcpy(xfs_buf_offset(bp,
@@ -2957,6 +2960,10 @@ xfs_recover_inode_owner_change(
if (error)
goto out_free_ip;
+ if (!xfs_inode_verify_forks(ip)) {
+ error = -EFSCORRUPTED;
+ goto out_free_ip;
+ }
if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
@@ -3042,7 +3049,7 @@ xlog_recover_inode_pass2(
*/
if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
xfs_alert(mp,
- "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
+ "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
XFS_ERRLEVEL_LOW, mp);
@@ -3052,7 +3059,7 @@ xlog_recover_inode_pass2(
ldip = item->ri_buf[1].i_addr;
if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
+ "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
__func__, item, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
XFS_ERRLEVEL_LOW, mp);
@@ -3110,8 +3117,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad regular inode log record, rec ptr 0x%p, "
- "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = -EFSCORRUPTED;
goto out_release;
@@ -3123,8 +3130,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad dir inode log record, rec ptr 0x%p, "
- "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = -EFSCORRUPTED;
goto out_release;
@@ -3134,8 +3141,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
- "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
__func__, item, dip, bp, in_f->ilf_ino,
ldip->di_nextents + ldip->di_anextents,
ldip->di_nblocks);
@@ -3146,8 +3153,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
- "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
error = -EFSCORRUPTED;
goto out_release;
@@ -3157,7 +3164,7 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record length %d, rec ptr 0x%p",
+ "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
__func__, item->ri_buf[1].i_len, item);
error = -EFSCORRUPTED;
goto out_release;
@@ -3303,6 +3310,7 @@ xlog_recover_dquot_pass2(
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
struct xfs_disk_dquot *ddq, *recddq;
+ xfs_failaddr_t fa;
int error;
xfs_dq_logformat_t *dq_f;
uint type;
@@ -3345,10 +3353,12 @@ xlog_recover_dquot_pass2(
*/
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
- error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
- "xlog_recover_dquot_pass2 (log copy)");
- if (error)
+ fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0, 0);
+ if (fa) {
+ xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
+ dq_f->qlf_id, fa);
return -EIO;
+ }
ASSERT(dq_f->qlf_len == 1);
/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index c879b517cc94..98fd41cbb9e1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -162,6 +162,7 @@ xfs_free_perag(
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
xfs_buf_hash_destroy(pag);
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}
@@ -248,6 +249,7 @@ xfs_initialize_perag(
out_hash_destroy:
xfs_buf_hash_destroy(pag);
out_free_pag:
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
kmem_free(pag);
out_unwind_new_pags:
/* unwind any prior newly initialized pags */
@@ -256,6 +258,7 @@ out_unwind_new_pags:
if (!pag)
break;
xfs_buf_hash_destroy(pag);
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
kmem_free(pag);
}
return error;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index ec952dfad359..5b848f4b637f 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -48,7 +48,7 @@
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
-
+STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -162,7 +162,7 @@ xfs_qm_dqpurge(
*/
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(mp, "%s: dquot %p flush failed",
+ xfs_warn(mp, "%s: dquot "PTR_FMT" flush failed",
__func__, dqp);
} else {
error = xfs_bwrite(bp);
@@ -291,8 +291,7 @@ xfs_qm_dqattach_one(
* exist on disk and we didn't ask it to allocate; ESRCH if quotas got
* turned off suddenly.
*/
- error = xfs_qm_dqget(ip->i_mount, ip, id, type,
- doalloc | XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqget(ip->i_mount, ip, id, type, doalloc, &dqp);
if (error)
return error;
@@ -481,7 +480,7 @@ xfs_qm_dquot_isolate(
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
+ xfs_warn(dqp->q_mount, "%s: dquot "PTR_FMT" flush failed",
__func__, dqp);
goto out_unlock_dirty;
}
@@ -574,7 +573,7 @@ xfs_qm_set_defquota(
struct xfs_def_quota *defq;
int error;
- error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqread(mp, 0, type, 0, &dqp);
if (!error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
@@ -652,7 +651,7 @@ xfs_qm_init_quotainfo(
XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
(XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
XFS_DQ_PROJ),
- XFS_QMOPT_DOWARN, &dqp);
+ 0, &dqp);
if (!error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
@@ -695,9 +694,17 @@ xfs_qm_init_quotainfo(
qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
- register_shrinker(&qinf->qi_shrinker);
+
+ error = register_shrinker(&qinf->qi_shrinker);
+ if (error)
+ goto out_free_inos;
+
return 0;
+out_free_inos:
+ mutex_destroy(&qinf->qi_quotaofflock);
+ mutex_destroy(&qinf->qi_tree_lock);
+ xfs_qm_destroy_quotainos(qinf);
out_free_lru:
list_lru_destroy(&qinf->qi_lru);
out_free_qinf:
@@ -706,7 +713,6 @@ out_free_qinf:
return error;
}
-
/*
* Gets called when unmounting a filesystem or when all quotas get
* turned off.
@@ -723,19 +729,8 @@ xfs_qm_destroy_quotainfo(
unregister_shrinker(&qi->qi_shrinker);
list_lru_destroy(&qi->qi_lru);
-
- if (qi->qi_uquotaip) {
- IRELE(qi->qi_uquotaip);
- qi->qi_uquotaip = NULL; /* paranoia */
- }
- if (qi->qi_gquotaip) {
- IRELE(qi->qi_gquotaip);
- qi->qi_gquotaip = NULL;
- }
- if (qi->qi_pquotaip) {
- IRELE(qi->qi_pquotaip);
- qi->qi_pquotaip = NULL;
- }
+ xfs_qm_destroy_quotainos(qi);
+ mutex_destroy(&qi->qi_tree_lock);
mutex_destroy(&qi->qi_quotaofflock);
kmem_free(qi);
mp->m_quotainfo = NULL;
@@ -847,6 +842,7 @@ xfs_qm_reset_dqcounts(
{
struct xfs_dqblk *dqb;
int j;
+ xfs_failaddr_t fa;
trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -868,10 +864,13 @@ xfs_qm_reset_dqcounts(
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
- * find uninitialised dquot blks. See comment in xfs_dqcheck.
+ * find uninitialised dquot blks. See comment in
+ * xfs_dquot_verify.
*/
- xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
- "xfs_quotacheck");
+ fa = xfs_dquot_verify(mp, ddq, id + j, type, 0);
+ if (fa)
+ xfs_dquot_repair(mp, ddq, id + j, type);
+
/*
* Reset type in case we are reusing group quota file for
* project quotas or vice versa
@@ -1078,8 +1077,7 @@ xfs_qm_quotacheck_dqadjust(
struct xfs_dquot *dqp;
int error;
- error = xfs_qm_dqget(mp, ip, id, type,
- XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqget(mp, ip, id, type, XFS_QMOPT_DQALLOC, &dqp);
if (error) {
/*
* Shouldn't be able to turn off quotas here.
@@ -1600,6 +1598,24 @@ error_rele:
}
STATIC void
+xfs_qm_destroy_quotainos(
+ xfs_quotainfo_t *qi)
+{
+ if (qi->qi_uquotaip) {
+ IRELE(qi->qi_uquotaip);
+ qi->qi_uquotaip = NULL; /* paranoia */
+ }
+ if (qi->qi_gquotaip) {
+ IRELE(qi->qi_gquotaip);
+ qi->qi_gquotaip = NULL;
+ }
+ if (qi->qi_pquotaip) {
+ IRELE(qi->qi_pquotaip);
+ qi->qi_pquotaip = NULL;
+ }
+}
+
+STATIC void
xfs_qm_dqfree_one(
struct xfs_dquot *dqp)
{
@@ -1682,8 +1698,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, uid,
XFS_DQ_USER,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&uq);
if (error) {
ASSERT(error != -ENOENT);
@@ -1709,8 +1724,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, gid,
XFS_DQ_GROUP,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&gq);
if (error) {
ASSERT(error != -ENOENT);
@@ -1729,8 +1743,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&pq);
if (error) {
ASSERT(error != -ENOENT);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 47aea2e82c26..270246943a06 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -464,6 +464,13 @@ retry:
error = xfs_trans_commit(tp);
if (error)
return error;
+
+ /*
+ * Allocation succeeded but the requested range was not even partially
+ * satisfied? Bail out!
+ */
+ if (nimaps == 0)
+ return -ENOSPC;
convert:
return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
&dfops);
@@ -599,10 +606,6 @@ xfs_reflink_cancel_cow_blocks(
del.br_startblock, del.br_blockcount,
NULL);
- /* Update quota accounting */
- xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
- -(long)del.br_blockcount);
-
/* Roll the transaction */
xfs_defer_ijoin(&dfops, ip);
error = xfs_defer_finish(tpp, &dfops);
@@ -613,6 +616,13 @@ xfs_reflink_cancel_cow_blocks(
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+
+ /* Remove the quota reservation */
+ error = xfs_trans_reserve_quota_nblks(NULL, ip,
+ -(long)del.br_blockcount, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ break;
} else {
/* Didn't do anything, push cursor back. */
xfs_iext_prev(ifp, &icur);
@@ -795,6 +805,10 @@ xfs_reflink_end_cow(
if (error)
goto out_defer;
+ /* Charge this new data fork mapping to the on-disk quota. */
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
+ (long)del.br_blockcount);
+
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
@@ -944,7 +958,7 @@ xfs_reflink_set_inode_flag(
if (src->i_ino == dest->i_ino)
xfs_ilock(src, XFS_ILOCK_EXCL);
else
- xfs_lock_two_inodes(src, dest, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
if (!xfs_is_reflink_inode(src)) {
trace_xfs_reflink_set_inode_flag(src);
@@ -1202,13 +1216,16 @@ xfs_reflink_remap_blocks(
/* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
while (len) {
+ uint lock_mode;
+
trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
dest, destoff);
+
/* Read extent from the source file */
nimaps = 1;
- xfs_ilock(src, XFS_ILOCK_EXCL);
+ lock_mode = xfs_ilock_data_map_shared(src);
error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
- xfs_iunlock(src, XFS_ILOCK_EXCL);
+ xfs_iunlock(src, lock_mode);
if (error)
goto err;
ASSERT(nimaps == 1);
@@ -1245,6 +1262,50 @@ err:
}
/*
+ * Grab the exclusive iolock for a data copy from src to dest, making
+ * sure to abide vfs locking order (lowest pointer value goes first) and
+ * breaking the pnfs layout leases on dest before proceeding. The loop
+ * is needed because we cannot call the blocking break_layout() with the
+ * src iolock held, and therefore have to back out both locks.
+ */
+static int
+xfs_iolock_two_inodes_and_break_layout(
+ struct inode *src,
+ struct inode *dest)
+{
+ int error;
+
+retry:
+ if (src < dest) {
+ inode_lock_shared(src);
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ } else {
+ /* src >= dest */
+ inode_lock(dest);
+ }
+
+ error = break_layout(dest, false);
+ if (error == -EWOULDBLOCK) {
+ inode_unlock(dest);
+ if (src < dest)
+ inode_unlock_shared(src);
+ error = break_layout(dest, true);
+ if (error)
+ return error;
+ goto retry;
+ }
+ if (error) {
+ inode_unlock(dest);
+ if (src < dest)
+ inode_unlock_shared(src);
+ return error;
+ }
+ if (src > dest)
+ inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+ return 0;
+}
+
+/*
* Link a range of blocks from one file to another.
*/
int
@@ -1274,11 +1335,14 @@ xfs_reflink_remap_range(
return -EIO;
/* Lock both files against IO */
- lock_two_nondirectories(inode_in, inode_out);
+ ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
+ if (ret)
+ return ret;
if (same_inode)
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
else
- xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
+ xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ XFS_MMAPLOCK_EXCL);
/* Check file eligibility and prepare for block sharing. */
ret = -EINVAL;
@@ -1295,6 +1359,11 @@ xfs_reflink_remap_range(
if (ret <= 0)
goto out_unlock;
+ /* Attach dquots to dest inode before changing block map */
+ ret = xfs_qm_dqattach(dest, 0);
+ if (ret)
+ goto out_unlock;
+
trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
/*
@@ -1341,10 +1410,12 @@ xfs_reflink_remap_range(
is_dedupe);
out_unlock:
- xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
+ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+ if (!same_inode)
+ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ inode_unlock(inode_out);
if (!same_inode)
- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
- unlock_two_nondirectories(inode_in, inode_out);
+ inode_unlock_shared(inode_in);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return ret;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 3f30f846d7f2..dfee3c991155 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -139,6 +139,9 @@ int xfs_rtalloc_query_all(struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn,
void *priv);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
+int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_extlen_t len,
+ bool *is_free);
#else
# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
@@ -148,6 +151,7 @@ bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
# define xfs_rtalloc_query_all(t,f,p) (ENOSYS)
# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
# define xfs_verify_rtbno(m, r) (false)
+# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS)
static inline int /* error */
xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1dacccc367f8..f3e0001f9992 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1153,6 +1153,14 @@ xfs_fs_statfs(
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
(XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
xfs_qm_statvfs(ip, statp);
+
+ if (XFS_IS_REALTIME_MOUNT(mp) &&
+ (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
+ statp->f_blocks = sbp->sb_rblocks;
+ statp->f_bavail = statp->f_bfree =
+ sbp->sb_frextents * sbp->sb_rextsize;
+ }
+
return 0;
}
@@ -1660,7 +1668,7 @@ xfs_fs_fill_super(
}
if (xfs_sb_version_hasreflink(&mp->m_sb))
xfs_alert(mp,
- "DAX and reflink have not been tested together!");
+ "DAX and reflink cannot be used together!");
}
if (mp->m_flags & XFS_MOUNT_DISCARD) {
@@ -1684,10 +1692,6 @@ xfs_fs_fill_super(
"EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
}
- if (xfs_sb_version_hasreflink(&mp->m_sb))
- xfs_alert(mp,
- "EXPERIMENTAL reflink feature enabled. Use at your own risk!");
-
error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index d718a10c2271..945de08af7ba 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
__entry->flags = ctx->flags;
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist 0x%p size %u count %u firstu %u flags %d %s",
+ "alist %p size %u count %u firstu %u flags %d %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->hashval,
@@ -119,7 +119,7 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
__entry->refcount = refcount;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno %u refcount %d caller %ps",
+ TP_printk("dev %d:%d agno %u refcount %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->refcount,
@@ -200,7 +200,7 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__entry->bt_before = be32_to_cpu(btree->before);
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist 0x%p size %u count %u firstu %u flags %d %s "
+ "alist %p size %u count %u firstu %u flags %d %s "
"node hashval %u, node before %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
@@ -251,8 +251,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
__entry->bmap_state = state;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx state %s cur 0x%p/%d "
- "offset %lld block %lld count %lld flag %d caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d "
+ "offset %lld block %lld count %lld flag %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
@@ -301,7 +301,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
- "lock %d flags %s caller %ps",
+ "lock %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->nblks,
@@ -370,7 +370,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d flags %s caller %ps",
+ "lock %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -390,7 +390,7 @@ DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
TRACE_EVENT(xfs_buf_ioerror,
- TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
+ TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
TP_ARGS(bp, error, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -401,7 +401,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__field(int, pincount)
__field(unsigned, lockval)
__field(int, error)
- __field(unsigned long, caller_ip)
+ __field(xfs_failaddr_t, caller_ip)
),
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
@@ -415,7 +415,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d error %d flags %s caller %ps",
+ "lock %d error %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -460,7 +460,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
"lock %d flags %s recur %d refcount %d bliflags %s "
- "lidesc 0x%p liflags %s",
+ "lidesc %p liflags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->buf_bno,
__entry->buf_len,
@@ -579,7 +579,7 @@ DECLARE_EVENT_CLASS(xfs_lock_class,
__entry->lock_flags = lock_flags;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx flags %s caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
@@ -697,7 +697,7 @@ DECLARE_EVENT_CLASS(xfs_iref_class,
__entry->pincount = atomic_read(&ip->i_pincount);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->count,
@@ -1028,7 +1028,7 @@ DECLARE_EVENT_CLASS(xfs_log_item_class,
__entry->flags = lip->li_flags;
__entry->lsn = lip->li_lsn;
),
- TP_printk("dev %d:%d lip 0x%p lsn %d/%d type %s flags %s",
+ TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lip,
CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
@@ -1049,7 +1049,7 @@ TRACE_EVENT(xfs_log_force,
__entry->lsn = lsn;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d lsn 0x%llx caller %ps",
+ TP_printk("dev %d:%d lsn 0x%llx caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lsn, (void *)__entry->caller_ip)
)
@@ -1082,7 +1082,7 @@ DECLARE_EVENT_CLASS(xfs_ail_class,
__entry->old_lsn = old_lsn;
__entry->new_lsn = new_lsn;
),
- TP_printk("dev %d:%d lip 0x%p old lsn %d/%d new lsn %d/%d type %s flags %s",
+ TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lip,
CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
@@ -1403,7 +1403,7 @@ TRACE_EVENT(xfs_bunmap,
__entry->flags = flags;
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
- "flags %s caller %ps",
+ "flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->size,
@@ -1517,7 +1517,7 @@ TRACE_EVENT(xfs_agf,
),
TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
"levels b %u c %u flfirst %u fllast %u flcount %u "
- "freeblks %u longest %u caller %ps",
+ "freeblks %u longest %u caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
@@ -2014,7 +2014,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
__entry->count = item->ri_cnt;
__entry->total = item->ri_total;
),
- TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item 0x%p, "
+ TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item %p, "
"item type %s item region count/total %d/%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tid,
@@ -2486,7 +2486,7 @@ DECLARE_EVENT_CLASS(xfs_ag_error_class,
__entry->error = error;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno %u error %d caller %ps",
+ TP_printk("dev %d:%d agno %u error %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->error,
@@ -2977,7 +2977,7 @@ DECLARE_EVENT_CLASS(xfs_inode_error_class,
__entry->error = error;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino %llx error %d caller %ps",
+ TP_printk("dev %d:%d ino %llx error %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->error,
@@ -3313,6 +3313,32 @@ DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key);
DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key);
DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping);
+TRACE_EVENT(xfs_trans_resv_calc,
+ TP_PROTO(struct xfs_mount *mp, unsigned int type,
+ struct xfs_trans_res *res),
+ TP_ARGS(mp, type, res),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(uint, logres)
+ __field(int, logcount)
+ __field(int, logflags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->type = type;
+ __entry->logres = res->tr_logres;
+ __entry->logcount = res->tr_logcount;
+ __entry->logflags = res->tr_logflags;
+ ),
+ TP_printk("dev %d:%d type %d logres %u logcount %d flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->logres,
+ __entry->logcount,
+ __entry->logflags)
+);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index a87f657f59c9..86f92df32c42 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -35,6 +35,27 @@
kmem_zone_t *xfs_trans_zone;
kmem_zone_t *xfs_log_item_desc_zone;
+#if defined(CONFIG_TRACEPOINTS)
+static void
+xfs_trans_trace_reservations(
+ struct xfs_mount *mp)
+{
+ struct xfs_trans_res resv;
+ struct xfs_trans_res *res;
+ struct xfs_trans_res *end_res;
+ int i;
+
+ res = (struct xfs_trans_res *)M_RES(mp);
+ end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
+ for (i = 0; res < end_res; i++, res++)
+ trace_xfs_trans_resv_calc(mp, i, res);
+ xfs_log_get_max_trans_res(mp, &resv);
+ trace_xfs_trans_resv_calc(mp, -1, &resv);
+}
+#else
+# define xfs_trans_trace_reservations(mp)
+#endif
+
/*
* Initialize the precomputed transaction reservation values
* in the mount structure.
@@ -44,6 +65,7 @@ xfs_trans_init(
struct xfs_mount *mp)
{
xfs_trans_resv_calc(mp, M_RES(mp));
+ xfs_trans_trace_reservations(mp);
}
/*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 815b53d20e26..9d542dfe0052 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -50,7 +50,7 @@ typedef struct xfs_log_item {
uint li_type; /* item type */
uint li_flags; /* misc flags */
struct xfs_buf *li_buf; /* real buffer pointer */
- struct xfs_log_item *li_bio_list; /* buffer item list */
+ struct list_head li_bio_list; /* buffer item list */
void (*li_cb)(struct xfs_buf *,
struct xfs_log_item *);
/* buffer item iodone */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 3ba7a96a8abd..653ce379d36b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -82,12 +82,12 @@ _xfs_trans_bjoin(
ASSERT(bp->b_transp == NULL);
/*
- * The xfs_buf_log_item pointer is stored in b_fsprivate. If
+ * The xfs_buf_log_item pointer is stored in b_log_item. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
@@ -118,7 +118,7 @@ xfs_trans_bjoin(
struct xfs_buf *bp)
{
_xfs_trans_bjoin(tp, bp, 0);
- trace_xfs_trans_bjoin(bp->b_fspriv);
+ trace_xfs_trans_bjoin(bp->b_log_item);
}
/*
@@ -139,7 +139,7 @@ xfs_trans_get_buf_map(
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
if (!tp)
return xfs_buf_get_map(target, map, nmaps, flags);
@@ -159,7 +159,7 @@ xfs_trans_get_buf_map(
}
ASSERT(bp->b_transp == tp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -175,7 +175,7 @@ xfs_trans_get_buf_map(
ASSERT(!bp->b_error);
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_get_buf(bp->b_fspriv);
+ trace_xfs_trans_get_buf(bp->b_log_item);
return bp;
}
@@ -188,12 +188,13 @@ xfs_trans_get_buf_map(
* mount structure.
*/
xfs_buf_t *
-xfs_trans_getsb(xfs_trans_t *tp,
- struct xfs_mount *mp,
- int flags)
+xfs_trans_getsb(
+ xfs_trans_t *tp,
+ struct xfs_mount *mp,
+ int flags)
{
xfs_buf_t *bp;
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
/*
* Default to just trying to lock the superblock buffer
@@ -210,7 +211,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
*/
bp = mp->m_sb_bp;
if (bp->b_transp == tp) {
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -223,7 +224,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
return NULL;
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_getsb(bp->b_fspriv);
+ trace_xfs_trans_getsb(bp->b_log_item);
return bp;
}
@@ -266,7 +267,7 @@ xfs_trans_read_buf_map(
if (bp) {
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_transp == tp);
- ASSERT(bp->b_fspriv != NULL);
+ ASSERT(bp->b_log_item != NULL);
ASSERT(!bp->b_error);
ASSERT(bp->b_flags & XBF_DONE);
@@ -279,7 +280,7 @@ xfs_trans_read_buf_map(
return -EIO;
}
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
@@ -329,7 +330,7 @@ xfs_trans_read_buf_map(
if (tp) {
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_read_buf(bp->b_fspriv);
+ trace_xfs_trans_read_buf(bp->b_log_item);
}
*bpp = bp;
return 0;
@@ -352,10 +353,11 @@ xfs_trans_read_buf_map(
* brelse() call.
*/
void
-xfs_trans_brelse(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_brelse(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
int freed;
/*
@@ -368,7 +370,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
}
ASSERT(bp->b_transp == tp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
@@ -456,10 +458,11 @@ xfs_trans_brelse(xfs_trans_t *tp,
*/
/* ARGSUSED */
void
-xfs_trans_bhold(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_bhold(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -476,10 +479,11 @@ xfs_trans_bhold(xfs_trans_t *tp,
* for this transaction.
*/
void
-xfs_trans_bhold_release(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_bhold_release(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -500,7 +504,7 @@ xfs_trans_dirty_buf(
struct xfs_trans *tp,
struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -557,7 +561,7 @@ xfs_trans_log_buf(
uint first,
uint last)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(first <= last && last < BBTOB(bp->b_length));
ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
@@ -600,10 +604,10 @@ xfs_trans_log_buf(
*/
void
xfs_trans_binval(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int i;
ASSERT(bp->b_transp == tp);
@@ -655,10 +659,10 @@ xfs_trans_binval(
*/
void
xfs_trans_inode_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -679,10 +683,10 @@ xfs_trans_inode_buf(
*/
void
xfs_trans_stale_inode_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -704,10 +708,10 @@ xfs_trans_stale_inode_buf(
/* ARGSUSED */
void
xfs_trans_inode_alloc_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -729,7 +733,7 @@ xfs_trans_ordered_buf(
struct xfs_trans *tp,
struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -759,7 +763,7 @@ xfs_trans_buf_set_type(
struct xfs_buf *bp,
enum xfs_blft type)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!tp)
return;
@@ -776,8 +780,8 @@ xfs_trans_buf_copy_type(
struct xfs_buf *dst_bp,
struct xfs_buf *src_bp)
{
- struct xfs_buf_log_item *sbip = src_bp->b_fspriv;
- struct xfs_buf_log_item *dbip = dst_bp->b_fspriv;
+ struct xfs_buf_log_item *sbip = src_bp->b_log_item;
+ struct xfs_buf_log_item *dbip = dst_bp->b_log_item;
enum xfs_blft type;
type = xfs_blft_from_flags(&sbip->__bli_format);
@@ -797,11 +801,11 @@ xfs_trans_buf_copy_type(
/* ARGSUSED */
void
xfs_trans_dquot_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp,
- uint type)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp,
+ uint type)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(type == XFS_BLF_UDQUOT_BUF ||
type == XFS_BLF_PDQUOT_BUF ||
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index daa7615497f9..4a89da4b6fe7 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -28,6 +28,8 @@
#include "xfs_inode_item.h"
#include "xfs_trace.h"
+#include <linux/iversion.h>
+
/*
* Add a locked inode to the transaction.
*
@@ -110,15 +112,17 @@ xfs_trans_log_inode(
/*
* First time we log the inode in a transaction, bump the inode change
- * counter if it is configured for this to occur. We don't use
- * inode_inc_version() because there is no need for extra locking around
- * i_version as we already hold the inode locked exclusively for
- * metadata modification.
+ * counter if it is configured for this to occur. While we have the
+ * inode locked exclusively for metadata modification, we can usually
+ * avoid setting XFS_ILOG_CORE if no one has queried the value since
+ * the last time it was incremented. If we have XFS_ILOG_CORE already
+ * set however, then go ahead and bump the i_version counter
+ * unconditionally.
*/
if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
IS_I_VERSION(VFS_I(ip))) {
- VFS_I(ip)->i_version++;
- flags |= XFS_ILOG_CORE;
+ if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE))
+ flags |= XFS_ILOG_CORE;
}
tp->t_flags |= XFS_TRANS_DIRTY;
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 6db3b4668b1a..ffe364fa4040 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -145,9 +145,9 @@
#define ACPI_ADDRESS_RANGE_MAX 2
-/* Maximum number of While() loops before abort */
+/* Maximum time (default 30s) of While() loops before abort */
-#define ACPI_MAX_LOOP_COUNT 0x000FFFFF
+#define ACPI_MAX_LOOP_TIMEOUT 30
/******************************************************************************
*
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17d61b1f2511..3c46f0ef5f7a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -130,8 +130,9 @@ struct acpi_exception_info {
#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
+#define AE_END_OF_TABLE EXCEP_ENV (0x0023)
-#define AE_CODE_ENV_MAX 0x0022
+#define AE_CODE_ENV_MAX 0x0023
/*
* Programmer exceptions
@@ -195,7 +196,7 @@ struct acpi_exception_info {
#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E)
#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
-#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
+#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
@@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_DECIMAL_OVERFLOW",
"Overflow during ASCII decimal-to-binary conversion"),
EXCEP_TXT("AE_OCTAL_OVERFLOW",
- "Overflow during ASCII octal-to-binary conversion")
+ "Overflow during ASCII octal-to-binary conversion"),
+ EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"The length of a Resource Descriptor in the AML is incorrect"),
EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
"A memory, I/O, or PCI configuration address is invalid"),
- EXCEP_TXT("AE_AML_INFINITE_LOOP",
- "An apparent infinite AML While loop, method was aborted"),
+ EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
+ "An AML While loop exceeded the maximum execution time"),
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 79287629c888..c9608b0b80c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e1dd1a8d42b6..c589c3e12d90 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170831
+#define ACPI_CA_VERSION 0x20171215
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
- * Maximum number of While() loop iterations before forced method abort.
+ * Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
*/
-ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
/*
* This mechanism is used to trace a specified AML method. The method is
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7a89e6de94da..4c304bf4d591 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -69,9 +69,10 @@
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
-#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */
+#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
@@ -1149,7 +1150,8 @@ enum acpi_nfit_type {
ACPI_NFIT_TYPE_CONTROL_REGION = 4,
ACPI_NFIT_TYPE_DATA_REGION = 5,
ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
- ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+ ACPI_NFIT_TYPE_CAPABILITIES = 7,
+ ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
};
/*
@@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address {
struct acpi_nfit_header header;
u16 range_index;
u16 flags;
- u32 reserved; /* Reseved, must be zero */
+ u32 reserved; /* Reserved, must be zero */
u32 proximity_domain;
u8 range_guid[16];
u64 address;
@@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address {
u64 hint_address[1]; /* Variable length */
};
+/* 7: Platform Capabilities Structure */
+
+struct acpi_nfit_capabilities {
+ struct acpi_nfit_header header;
+ u8 highest_capability;
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 capabilities;
+ u32 reserved2;
+};
+
+/* Capabilities Flags */
+
+#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */
+
+/*
+ * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
+ */
+struct nfit_device_handle {
+ u32 handle;
+};
+
+/* Device handle construction and extraction macros */
+
+#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F
+#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0
+#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00
+#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000
+#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000
+
+#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0
+#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4
+#define ACPI_NFIT_MEMORY_ID_OFFSET 8
+#define ACPI_NFIT_SOCKET_ID_OFFSET 12
+#define ACPI_NFIT_NODE_ID_OFFSET 16
+
+/* Macro to construct a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
+ ((dimm) | \
+ ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \
+ ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \
+ ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \
+ ((node) << ACPI_NFIT_NODE_ID_OFFSET))
+
+/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
+ ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
+
+#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
+ (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
+
+#define ACPI_NFIT_GET_MEMORY_ID(handle) \
+ (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET)
+
+#define ACPI_NFIT_GET_SOCKET_ID(handle) \
+ (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET)
+
+#define ACPI_NFIT_GET_NODE_ID(handle) \
+ (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
+
/*******************************************************************************
*
- * PDTT - Processor Debug Trigger Table (ACPI 6.2)
+ * PDTT - Platform Debug Trigger Table (ACPI 6.2)
* Version 0
*
******************************************************************************/
@@ -1301,14 +1366,14 @@ struct acpi_table_pdtt {
* starting at array_offset.
*/
struct acpi_pdtt_channel {
- u16 sub_channel_id;
+ u8 subchannel_id;
+ u8 flags;
};
-/* Mask and Flags for above */
+/* Flags for above */
-#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF
-#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8)
-#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
+#define ACPI_PDTT_RUNTIME_TRIGGER (1)
+#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
/*******************************************************************************
*
@@ -1376,6 +1441,20 @@ struct acpi_pptt_cache {
#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
+/* Attributes describing cache */
+#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */
+#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */
+#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */
+#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */
+
/* 2: ID Structure */
struct acpi_pptt_id {
@@ -1405,6 +1484,68 @@ struct acpi_table_sbst {
/*******************************************************************************
*
+ * SDEV - Secure Devices Table (ACPI 6.2)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_sdev {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_sdev_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+};
+
+/* Values for subtable type above */
+
+enum acpi_sdev_type {
+ ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
+ ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
+ ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* Values for flags above */
+
+#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+
+/*
+ * SDEV subtables
+ */
+
+/* 0: Namespace Device Based Secure Device Structure */
+
+struct acpi_sdev_namespace {
+ struct acpi_sdev_header header;
+ u16 device_id_offset;
+ u16 device_id_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1: PCIe Endpoint Device Based Device Structure */
+
+struct acpi_sdev_pcie {
+ struct acpi_sdev_header header;
+ u16 segment;
+ u16 start_bus;
+ u16 path_offset;
+ u16 path_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1a: PCIe Endpoint path entry */
+
+struct acpi_sdev_pcie_path {
+ u8 device;
+ u8 function;
+};
+
+/*******************************************************************************
+ *
* SLIT - System Locality Distance Information Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 686b6f8c09dc..0d60d5df14f8 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 {
u8 pxm;
u8 reserved1;
u16 reserved2;
+ u32 id_mapping_index;
};
/* Values for Model field above */
@@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types {
* TCPA - Trusted Computing Platform Alliance table
* Version 2
*
+ * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server {
* TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
* Version 4
*
+ * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 {
/* Values for start_method above */
#define ACPI_TPM2_NOT_ALLOWED 0
+#define ACPI_TPM2_RESERVED1 1
#define ACPI_TPM2_START_METHOD 2
+#define ACPI_TPM2_RESERVED3 3
+#define ACPI_TPM2_RESERVED4 4
+#define ACPI_TPM2_RESERVED5 5
#define ACPI_TPM2_MEMORY_MAPPED 6
#define ACPI_TPM2_COMMAND_BUFFER 7
#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+#define ACPI_TPM2_RESERVED9 9
+#define ACPI_TPM2_RESERVED10 10
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
+#define ACPI_TPM2_RESERVED 12
-/* Trailer appears after any start_method subtables */
+/* Optional trailer appears after any start_method subtables */
struct acpi_tpm2_trailer {
+ u8 method_parameters[12];
u32 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f077edb9b81..31f1be74dd16 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -468,6 +468,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
#define ACPI_NSEC_PER_MSEC 1000000L
#define ACPI_NSEC_PER_SEC 1000000000L
+#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0)
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -1299,6 +1301,8 @@ typedef enum {
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
#define ACPI_OSI_WIN_10 0x0D
+#define ACPI_OSI_WIN_10_RS1 0x0E
+#define ACPI_OSI_WIN_10_RS2 0x0F
/* Definitions of getopt */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000000..880a292d792f
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &dma_direct_ops;
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000000..296c65442f00
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+ EI_ETYPE_NONE, /* Dummy value for undefined case */
+ EI_ETYPE_NULL, /* Return NULL if failure */
+ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
+ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
+};
+
+struct error_injection_entry {
+ unsigned long addr;
+ int etype;
+};
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist ganerating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype) \
+static struct error_injection_entry __used \
+ __attribute__((__section__("_error_injection_whitelist"))) \
+ _eil_addr_##fname = { \
+ .addr = (unsigned long)fname, \
+ .etype = EI_ETYPE_##_etype, \
+ };
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 868e68561f91..2cfa3075d148 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -309,19 +309,26 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
-#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
-static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
-
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
}
#endif
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ee8b707d9fa9..1ab0e520d6fc 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,6 +136,15 @@
#define KPROBE_BLACKLIST()
#endif
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+ KEEP(*(_error_injection_whitelist)) \
+ VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
+#else
+#define ERROR_INJECT_WHITELIST()
+#endif
+
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
@@ -268,7 +277,11 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
VMLINUX_SYMBOL(__start_init_task) = .; \
+ VMLINUX_SYMBOL(init_thread_union) = .; \
+ VMLINUX_SYMBOL(init_stack) = .; \
*(.data..init_task) \
+ *(.data..init_thread_info) \
+ . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
VMLINUX_SYMBOL(__end_init_task) = .;
/*
@@ -564,6 +577,7 @@
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
+ ERROR_INJECT_WHITELIST() \
MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 03b97629442c..1e26f790b03f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_aead_alg(aead)->encrypt(req);
}
/**
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index caaa470389e0..b83d66073db0 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,12 +13,13 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
+#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, void *stream);
+void chacha20_block(u32 *state, u32 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0ed31fd80242..2d1849dffb80 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -71,12 +71,11 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
- * @init: Initialize the transformation context. Intended only to initialize the
+ * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
- * Note: mandatory.
- * @update: Push a chunk of data into the driver for transformation. This
+ * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
* function must not finalize the HASH transformation by calculating the
@@ -85,20 +84,17 @@ struct ahash_request {
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point.
- * Note: mandatory.
- * @final: Retrieve result from the driver. This function finalizes the
+ * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
* point unless hardware requires it to finish the transformation
* (then the data buffered by the device driver is processed).
- * Note: mandatory.
- * @finup: Combination of @update and @final. This function is effectively a
+ * @finup: **[optional]** Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
- * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -210,7 +206,6 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
- bool has_setkey;
struct crypto_tfm base;
};
@@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
-{
- return tfm->has_setkey;
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*/
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- return crypto_ahash_reqtfm(req)->import(req, in);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->import(req, in);
}
/**
@@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->init(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->init(req);
}
/**
@@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->import(desc, in);
}
/**
@@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
*/
static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->init(desc);
}
/**
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 38d9c5861ed8..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -18,6 +18,7 @@
#include <linux/if_alg.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
+#include <linux/atomic.h>
#include <net/sock.h>
#include <crypto/aead.h>
@@ -150,7 +151,7 @@ struct af_alg_ctx {
struct crypto_wait wait;
size_t used;
- size_t rcvused;
+ atomic_t rcvused;
bool more;
bool merge;
@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->rcvused, 0);
+ atomic_read(&ctx->rcvused), 0);
}
/**
@@ -244,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index c2bae8da642c..27040a46d50a 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index ccad9b2c9bd6..0f6ddac1acfc 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -28,17 +28,6 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
* @base: Common crypto API algorithm data structure
*/
struct scomp_alg {
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 5757c0a4b321..15aeef6e30ef 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -12,14 +12,4 @@
struct crypto_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
-static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
-{
- return crypto_get_default_null_skcipher();
-}
-
-static inline void crypto_put_default_null_skcipher2(void)
-{
- crypto_put_default_null_skcipher();
-}
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index c65567d01e8e..f718a19da82f 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
};
int crypto_poly1305_init(struct shash_desc *desc);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
new file mode 100644
index 000000000000..19ed48aefc86
--- /dev/null
+++ b/include/crypto/salsa20.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Salsa20 algorithm
+ */
+
+#ifndef _CRYPTO_SALSA20_H
+#define _CRYPTO_SALSA20_H
+
+#include <linux/types.h>
+
+#define SALSA20_IV_SIZE 8
+#define SALSA20_MIN_KEY_SIZE 16
+#define SALSA20_MAX_KEY_SIZE 32
+#define SALSA20_BLOCK_SIZE 64
+
+struct crypto_skcipher;
+
+struct salsa20_ctx {
+ u32 initial_state[16];
+};
+
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv);
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index b9d9bd553b48..080f60c2e6b1 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -19,7 +19,6 @@
struct sha3_state {
u64 st[25];
- unsigned int md_len;
unsigned int rsiz;
unsigned int rsizw;
@@ -27,4 +26,9 @@ struct sha3_state {
u8 buf[SHA3_224_BLOCK_SIZE];
};
+int crypto_sha3_init(struct shash_desc *desc);
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 562001cb412b..2f327f090c3e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
-static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
-{
- return tfm->keysize;
-}
-
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->encrypt(req);
}
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->decrypt(req);
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 0e0c868451a5..5176c3797680 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index dd549ff04295..2cc10ae4bbb7 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -29,8 +29,8 @@
#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
-/* Bit 3 express GPIO suspend/resume persistence */
-#define GPIO_SLEEP_MAINTAIN_VALUE 0
-#define GPIO_SLEEP_MAY_LOSE_VALUE 8
+/* Bit 3 express GPIO suspend/resume and reset persistence */
+#define GPIO_PERSISTENT 0
+#define GPIO_TRANSITORY 8
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dc1ebfeeb5ec..e6d41b65d396 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+#define ACPI_HANDLE_FWNODE(fwnode) \
+ acpi_device_handle(to_acpi_device_node(fwnode))
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
@@ -451,6 +453,7 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -584,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+void *acpi_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -626,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
struct fwnode_handle;
@@ -640,6 +645,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+static inline const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -755,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
+static inline void *acpi_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
@@ -978,6 +994,11 @@ struct acpi_gpio_mapping {
const char *name;
const struct acpi_gpio_params *data;
unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+
+ unsigned int quirks;
};
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 304511267c82..2b709416de05 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
-unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..942afbd544b7
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+enum sdei_conduit_types {
+ CONDUIT_INVALID = 0,
+ CONDUIT_SMC,
+ CONDUIT_HVC,
+};
+
+#include <asm/sdei.h>
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..40d150ad7e07 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -448,6 +448,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
* holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23d29b39f71e..d0eb659fa733 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv->bv_len = iter.bi_bvec_done;
}
+static inline unsigned bio_pages_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_vcnt;
+}
+
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
+}
+
+static inline struct page *bio_first_page_all(struct bio *bio)
+{
+ return bio_first_bvec_all(bio)->bv_page;
+}
+
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
#endif
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -103,4 +104,49 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
+extern void __compiletime_warning("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e9825ff57b15..69bea82ebeb1 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
- struct blkg_rwstat v = blkg_rwstat_read(from);
+ u64 sum[BLKG_RWSTAT_NR];
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..8efcf49796a3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
+ unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9e7d8bd776d2..c5d3db0d83f8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,34 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
struct blk_issue_stat {
u64 stat;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ce8a372d506..4f3df807cf8f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,6 +27,8 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/seqlock.h>
+#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
- struct list_head queuelist;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
-
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -148,8 +150,6 @@ struct request {
int internal_tag;
- unsigned long atomic_flags;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
@@ -158,6 +158,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
+ struct list_head queuelist;
+
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
unsigned short nr_integrity_segments;
#endif
+ unsigned short write_hint;
unsigned short ioprio;
unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */
- unsigned short write_hint;
+ /*
+ * On blk-mq, the lower bits of ->gstate (generation number and
+ * state) carry the MQ_RQ_* state value and the upper bits the
+ * generation number which is monotonically incremented and used to
+ * distinguish the reuse instances.
+ *
+ * ->gstate_seq allows updates to ->gstate and other fields
+ * (currently ->deadline) during request start to be read
+ * atomically from the timeout path, so that it can operate on a
+ * coherent set of information.
+ */
+ seqcount_t gstate_seq;
+ u64 gstate;
+
+ /*
+ * ->aborted_gstate is used by the timeout to claim a specific
+ * recycle instance of this request. See blk_mq_timeout_work().
+ */
+ struct u64_stats_sync aborted_gstate_sync;
+ u64 aborted_gstate;
+
+ /* access through blk_rq_set_deadline, blk_rq_deadline */
+ unsigned long __deadline;
- unsigned long deadline;
struct list_head timeout_list;
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
/*
* completion callback.
*/
@@ -239,6 +264,12 @@ struct request {
/* for bidi */
struct request *next_rq;
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -564,6 +595,22 @@ struct request_queue {
struct queue_limits limits;
/*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit clear) or
+ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched. All three fields are
+ * initialized by the low level device driver (e.g. scsi/sd.c).
+ * Stacking drivers (device mappers) may or may not initialize
+ * these fields.
+ */
+ unsigned int nr_zones;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+
+ /*
* sg stuff
*/
unsigned int sg_timeout;
@@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return q->nr_zones;
+}
+
+static inline unsigned int blk_queue_zone_no(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return sector >> ilog2(q->limits.chunk_sectors);
+}
+
+static inline bool blk_queue_zone_is_seq(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
+ return false;
+ return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
+}
+
static inline bool rq_is_sync(struct request *rq)
{
return op_is_sync(rq->cmd_flags);
@@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+}
+
/*
* Some commands like WRITE SAME have a payload or data transfer size which
* is different from the size of the request. Any driver that supports such
@@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
if (q)
return blk_queue_zone_sectors(q);
+ return 0;
+}
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ if (q)
+ return blk_queue_nr_zones(q);
return 0;
}
@@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
@@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+bool blk_req_needs_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e55e4255a210..66df387106de 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
#include <linux/numa.h>
#include <linux/wait.h>
+struct bpf_verifier_env;
struct perf_event;
struct bpf_prog;
struct bpf_map;
@@ -24,6 +25,7 @@ struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
+ int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
@@ -43,7 +45,14 @@ struct bpf_map_ops {
};
struct bpf_map {
- atomic_t refcnt;
+ /* 1st cacheline with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+ */
+ const struct bpf_map_ops *ops ____cacheline_aligned;
+ struct bpf_map *inner_map_meta;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
@@ -52,17 +61,46 @@ struct bpf_map {
u32 pages;
u32 id;
int numa_node;
- struct user_struct *user;
- const struct bpf_map_ops *ops;
- struct work_struct work;
+ bool unpriv_array;
+ /* 7 bytes hole */
+
+ /* 2nd cacheline with misc members to avoid false sharing
+ * particularly with refcounting.
+ */
+ struct user_struct *user ____cacheline_aligned;
+ atomic_t refcnt;
atomic_t usercnt;
- struct bpf_map *inner_map_meta;
+ struct work_struct work;
char name[BPF_OBJ_NAME_LEN];
-#ifdef CONFIG_SECURITY
- void *security;
-#endif
};
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+ int (*map_get_next_key)(struct bpf_offloaded_map *map,
+ void *key, void *next_key);
+ int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value);
+ int (*map_update_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+ struct bpf_map map;
+ struct net_device *netdev;
+ const struct bpf_map_dev_ops *dev_ops;
+ void *dev_priv;
+ struct list_head offloads;
+};
+
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_offloaded_map, map);
+}
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -184,14 +222,20 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size);
};
-struct bpf_dev_offload {
+struct bpf_prog_offload_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- bool verifier_running;
- wait_queue_head_t verifier_done;
+ const struct bpf_prog_offload_ops *dev_ops;
+ void *jited_image;
+ u32 jited_len;
};
struct bpf_prog_aux {
@@ -200,6 +244,10 @@ struct bpf_prog_aux {
u32 max_ctx_offset;
u32 stack_depth;
u32 id;
+ u32 func_cnt;
+ bool offload_requested;
+ struct bpf_prog **func;
+ void *jit_data; /* JIT specific data. arch dependent */
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_prog_ops *ops;
@@ -211,7 +259,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY
void *security;
#endif
- struct bpf_dev_offload *offload;
+ struct bpf_prog_offload *offload;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -221,6 +269,7 @@ struct bpf_prog_aux {
struct bpf_array {
struct bpf_map map;
u32 elem_size;
+ u32 index_mask;
/* 'ownership' of prog_array is claimed by the first program that
* is going to use this map or by the first program which FD is stored
* in the map to make sure that all callers and callees have the same
@@ -285,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
struct bpf_prog *old_prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ __u32 __user *prog_ids, u32 request_cnt,
+ __u32 __user *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
@@ -345,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -353,6 +408,7 @@ void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -399,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@ -419,6 +476,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
attr->numa_node : NUMA_NO_NODE;
}
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -506,6 +565,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
{
return 0;
}
+
+static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,16 +579,39 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
return bpf_prog_get_type_dev(ufd, type, false);
}
+bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog);
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+ void *key, void *next_key);
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
- return aux->offload;
+ return aux->offload_requested;
+}
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+ return unlikely(map->ops == &bpf_map_offload_ops);
}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@@ -535,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
return false;
}
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+ return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
#else
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 978c1d9c9383..19b8349a3809 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#ifdef CONFIG_STREAM_PARSER
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1632bb13ad8a..6b66cd1aa0b9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -76,6 +76,14 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ /* Inside the callee two registers can be both PTR_TO_STACK like
+ * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+ * while another to the caller's stack. To differentiate them 'frameno'
+ * is used which is an index in bpf_verifier_state->frame[] array
+ * pointing to bpf_func_state.
+ * This field must be second to last, for states_equal() reasons.
+ */
+ u32 frameno;
/* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
@@ -83,7 +91,8 @@ struct bpf_reg_state {
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
+ STACK_MISC, /* BPF program wrote some data into this slot */
+ STACK_ZERO, /* BPF program wrote constant zero */
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -96,13 +105,34 @@ struct bpf_stack_state {
/* state of the program:
* type of all registers and stack info
*/
-struct bpf_verifier_state {
+struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
struct bpf_verifier_state *parent;
+ /* index of call instruction that called into this func */
+ int callsite;
+ /* stack frame number of this function state from pov of
+ * enclosing bpf_verifier_state.
+ * 0 = main function, 1 = first callee.
+ */
+ u32 frameno;
+ /* subprog number == index within subprog_stack_depth
+ * zero == main subprog
+ */
+ u32 subprogno;
+
+ /* should be second to last. See copy_func_state() */
int allocated_stack;
struct bpf_stack_state *stack;
};
+#define MAX_CALL_FRAMES 8
+struct bpf_verifier_state {
+ /* call stack tracking */
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ u32 curframe;
+};
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ s32 call_imm; /* saved imm field of call insn */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
bool seen; /* this insn was processed by the verifier */
@@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
return log->len_used >= log->len_total - 1;
}
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
- int (*insn_hook)(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx);
-};
+#define BPF_MAX_SUBPROGS 256
/* single container for all structs
* one verifier_env per bpf_check() call
@@ -152,29 +179,31 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
-
struct bpf_verifer_log log;
+ u32 subprog_starts[BPF_MAX_SUBPROGS];
+ /* computes the stack depth of each bpf function */
+ u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
+ u32 subprog_cnt;
};
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...);
+
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{
- return env->cur_state->regs;
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[cur->curframe]->regs;
}
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
- return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 8ff86b4c1b8a..d3339dd48b1a 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -14,6 +14,7 @@
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5395 0x0143bcf0
#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..58a82f58e44e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -151,7 +151,6 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..fe7a22dd133b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 61f1cf2d9f44..055aaf5ed9af 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -46,6 +46,7 @@ struct can_priv {
unsigned int bitrate_const_cnt;
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
+ u32 bitrate_max;
struct can_clock clock;
enum can_state state;
@@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+#ifdef CONFIG_OF
+void of_can_transceiver(struct net_device *dev);
+#else
+static inline void of_can_transceiver(struct net_device *dev) { }
+#endif
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8b7fd8eeccee..9f242b876fde 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -561,7 +561,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/cgroup-v1/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 0fc36406f32c..8a9643857c4a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -157,6 +157,104 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ short _dummy_bnd;
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ short _dummy_pkey;
+ u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
/*
* These functions operate on 32- or 64-bit specs depending on
* COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
@@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
+int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 2272ded07496..631354acfa72 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -219,7 +219,7 @@
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-#ifdef RANDSTRUCT_PLUGIN
+#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
#endif
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 94a59ba7d422..519e94915d18 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -32,7 +32,6 @@ struct completion {
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
-static inline void complete_release_commit(struct completion *x) {}
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 723e952fde0d..d14ef4e77c8a 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -275,6 +275,50 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_CACHE_ERROR 0
+#define CPER_ARM_TLB_ERROR 1
+#define CPER_ARM_BUS_ERROR 2
+#define CPER_ARM_VENDOR_ERROR 3
+#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
/* Reset to default packing */
#pragma pack()
+extern const char * const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a04ef7c15c6a..7b01bc11c692 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+extern ssize_t cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..de0dafb9399d 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,9 +30,6 @@
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
-
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
- * @policy: cpufreq policy.
- */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
-struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
@@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
+#endif /* CONFIG_CPU_THERMAL */
+#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+#else
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
-
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-{
- return;
-}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1a32e558eb11..2c787c5cad3d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -109,6 +109,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..871f9e21810c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
})
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 06097ef30449..b511f6d24b42 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ARRAY(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
#define VMCOREINFO_SIZE(name) \
vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
(unsigned long)sizeof(name))
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index cd4f420231ba..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,12 +5,19 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..7e6e84cf6383 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,8 +107,16 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
+#define CRYPTO_TFM_NEED_KEY 0x00000001
+
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
@@ -447,7 +455,7 @@ struct crypto_alg {
unsigned int cra_alignmask;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 4178d2493547..5e335b6203f4 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -71,7 +71,7 @@ extern void delayacct_init(void);
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
-extern void __delayacct_blkio_end(void);
+extern void __delayacct_blkio_end(struct task_struct *);
extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
__delayacct_blkio_start();
}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{
if (current->delays)
- __delayacct_blkio_end();
+ __delayacct_blkio_end(p);
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
}
@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
{}
static inline void delayacct_blkio_start(void)
{}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{}
static inline int delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5538433c927..da83f64952e7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,6 +28,7 @@ enum dm_queue_mode {
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_NVME_BIO_BASED = 5,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -221,14 +222,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -291,13 +284,6 @@ struct dm_target {
*/
unsigned per_io_data_size;
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
-
/* target specific data */
void *private;
@@ -329,35 +315,9 @@ struct dm_target_callbacks {
int (*congested_fn) (struct dm_target_callbacks *, int);
};
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
-
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
-
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
-
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -585,6 +550,7 @@ do { \
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -592,7 +558,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d32000725da..f649fc0c2571 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* device.h - generic, centralized driver model
*
@@ -5,8 +6,6 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
- * This file is released under the GPLv2
- *
* See Documentation/driver-model/ for more information.
*/
@@ -288,6 +287,7 @@ struct device_driver {
const struct attribute_group **groups;
const struct dev_pm_ops *pm;
+ int (*coredump) (struct device *dev);
struct driver_private *p;
};
@@ -301,7 +301,6 @@ extern struct device_driver *driver_find(const char *name,
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
-
/* sysfs interface for exporting driver attributes */
struct driver_attribute {
@@ -575,6 +574,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..085db2fee2d7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -301,7 +301,7 @@ struct dma_buf {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
+ __poll_t active;
} cb_excl, cb_shared;
};
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..bcdb1a3e4b1f
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#else
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+
+ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ phys_addr_t paddr = (phys_addr_t)dev_addr;
+
+ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void dma_mark_clean(void *addr, size_t size);
+#else
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+int dma_direct_supported(struct device *dev, u64 mask);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 81ed9b2d84dc..34fe8463d10e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,7 @@ struct dma_map_ops {
int is_phys;
};
-extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
void *cpu_addr;
BUG_ON(!ops);
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
+ /*
+ * Let the implementation decide on the zone to allocate from, and
+ * decide on the way of zeroing the memory given that the memory
+ * returned should always be zeroed.
+ */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
+
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
@@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+/*
+ * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
+ * don't use this is new code.
+ */
+#ifndef arch_dma_supported
+#define arch_dma_supported(dev, mask) (1)
+#endif
+
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
+ if (!arch_dma_supported(dev, mask))
+ return 0;
+
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
}
#endif
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index f48a85c377de..b4f22112ba75 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -23,9 +23,10 @@ struct lan9303 {
struct regmap_irq_chip_data *irq_data;
struct gpio_desc *reset_gpio;
u32 reset_duration; /* in [ms] */
- bool phy_addr_sel_strap;
+ int phy_addr_base;
struct dsa_switch *ds;
struct mutex indirect_mutex; /* protect indexed register access */
+ struct mutex alr_mutex; /* protect ALR access */
const struct lan9303_phy_ops *ops;
bool is_bridged; /* true if port 1 and 2 are bridged */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d813f7b04da7..f5083aa72eae 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -140,11 +140,13 @@ struct efi_boot_memmap {
struct capsule_info {
efi_capsule_header_t header;
+ efi_capsule_header_t *capsule;
int reset_type;
long index;
size_t count;
size_t total_size;
- phys_addr_t *pages;
+ struct page **pages;
+ phys_addr_t *phys;
size_t page_bytes_remain;
};
@@ -473,6 +475,39 @@ typedef struct {
u64 get_all;
} apple_properties_protocol_64_t;
+typedef struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_32_t;
+
+typedef struct {
+ u64 get_capability;
+ u64 get_event_log;
+ u64 hash_log_extend_event;
+ u64 submit_command;
+ u64 get_active_pcr_banks;
+ u64 set_active_pcr_banks;
+ u64 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_64_t;
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef struct {
+ void *get_capability;
+ efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
+ efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -623,6 +658,7 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -635,6 +671,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
typedef struct {
efi_guid_t guid;
@@ -909,6 +946,7 @@ extern struct efi {
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
+ unsigned long tpm_log; /* TPM2 Event Log table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1534,6 +1572,8 @@ static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
#endif
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1601,4 +1641,12 @@ struct linux_efi_random_seed {
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3d794b3dc532..6d9e230dffd2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..280c61ecbf20
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+#include <asm/error-injection.h>
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+#include <asm-generic/error-injection.h>
+static inline bool within_error_injection_list(unsigned long addr)
+{
+ return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+ return EI_ETYPE_NONE;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 6ffae9c5052d..fc2777770768 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * See Documentation/errseq.rst and lib/errseq.c
+ * See Documentation/core-api/errseq.rst and lib/errseq.c
*/
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
-{
- return ERR_PTR(-ENOSYS);
-}
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
-{
- return -ENOSYS;
-}
-
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
wait_queue_entry_t *wait, __u64 *cnt)
{
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 43e98d30d2df..58aecb60ea51 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -117,6 +117,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -212,6 +213,7 @@ struct f2fs_extent {
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -229,7 +231,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -245,8 +253,10 @@ struct f2fs_inode {
__le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
__le32 i_projid; /* project id */
__le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
__le32 i_extra_end[0]; /* for attribute size calculation */
- };
+ } __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
};
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 80b5b482cb46..276932d75975 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,7 +18,9 @@
#include <linux/capability.h>
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
+#include <linux/kallsyms.h>
+#include <net/xdp.h>
#include <net/sch_generic.h>
#include <uapi/linux/filter.h>
@@ -58,6 +60,9 @@ struct bpf_prog_aux;
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS 0xe0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -455,10 +460,14 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
@@ -495,6 +504,7 @@ struct xdp_buff {
void *data_end;
void *data_meta;
void *data_hard_start;
+ struct xdp_rxq_info *rxq;
};
/* Compute the linear packet data range [data, data_end) which
@@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
+bool bpf_opcode_in_insntable(u8 code);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+ __bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func);
+static inline bool bpf_dump_raw_ok(void)
+{
+ /* Reconstruction of call-sites is dependent on kallsyms,
+ * thus make dump the same restriction.
+ */
+ return kallsyms_show_value() == 1;
+}
+
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
@@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf();
}
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
@@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void)
*/
if (!bpf_jit_is_ebpf())
return false;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return false;
if (!bpf_jit_harden)
return false;
@@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern {
struct sock *sk;
u32 op;
union {
+ u32 args[4];
u32 reply;
u32 replylong[4];
};
+ u32 is_fullsock;
+ u64 temp; /* temp and everything after is not
+ * initialized to 0 before calling
+ * the BPF program. New fields that
+ * should be initialized to 0 should
+ * be inserted before temp.
+ * temp is scratch storage used by
+ * sock_ops_convert_ctx_access
+ * as temporary storage of a register.
+ */
};
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 511fbaabf624..2a815560fda0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,7 +639,7 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -1359,7 +1364,7 @@ struct super_block {
const struct fscrypt_operations *s_cop;
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
@@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
int open_flag);
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
+
/*
* VFS file helper functions.
*/
@@ -1698,7 +1707,7 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
extern int __check_sticky(struct inode *dir, struct inode *inode);
@@ -2992,6 +2985,7 @@ enum {
};
void dio_end_io(struct bio *bio);
+void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
ki->ki_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC)
ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ if (flags & RWF_APPEND)
+ ki->ki_flags |= IOCB_APPEND;
return 0;
}
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index f4ff47d4a893..fe0c349684fa 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
{
if (fscache_cookie_valid(cookie) && PageFsCache(page))
return __fscache_maybe_release_page(cookie, page, gfp);
- return false;
+ return true;
}
/**
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2bab81951ced..9c3c9a319e48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
+
bool is_ftrace_trampoline(unsigned long addr);
/*
@@ -764,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
-
/*
* Stack of return addresses for functions
* of a thread.
@@ -844,7 +843,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -923,10 +921,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -935,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fa1a489efe4 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct fwnode_operations;
+struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
* @property_read_integer_array: Read an array of integer properties. Return
* zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index ecc2928e8046..bc738504ab4a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds genl mutex.
+ * the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5144ebe046c9..5e3531027b51 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
}
+extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
+static inline void add_disk_no_queue_reg(struct gendisk *disk)
+{
+ device_add_disk_no_queue_reg(NULL, disk);
+}
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..91ed23468530 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,4 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/gpio.h>
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 7447d85dbe2f..dbd065963296 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
return -ENOSYS;
}
+static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
}
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 7258cd676df4..1ba9a331ec51 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset);
+
#ifdef CONFIG_LOCKDEP
/*
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 846be7c69a52..b2f2dc638463 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -10,8 +10,8 @@ enum gpio_lookup_flags {
GPIO_ACTIVE_LOW = (1 << 0),
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
};
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d491027a7c22..091a81cf330f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -281,6 +281,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -342,6 +343,7 @@ struct hid_item {
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
#define HID_QUIRK_NO_IGNORE 0x40000000
@@ -671,6 +673,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -683,6 +686,8 @@ struct hid_usage_id {
* @input_mapped: invoked on input registering after mapping an usage
* @input_configured: invoked just before the device is registered
* @feature_mapping: invoked on feature registering
+ * @bus_add_driver: invoked when a HID driver is about to be added
+ * @bus_removed_driver: invoked when a HID driver has been removed
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -711,6 +716,7 @@ struct hid_driver {
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -736,6 +742,8 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
+ void (*bus_add_driver)(struct hid_driver *driver);
+ void (*bus_removed_driver)(struct hid_driver *driver);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
@@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
@@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..c7902ca7c9f4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
};
/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+ u8 is_soft;
};
/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
+# define __hrtimer_clock_base_align ____cacheline_aligned
#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
+# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- int index;
+ unsigned int index;
clockid_t clockid;
+ seqcount_t seq;
+ struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
-/*
+/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +200,28 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return timer->base->cpu_base->hres_active;
-}
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
static inline void clock_was_set_delayed(void) { }
#endif
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 82a25880714a..36fa6a2a82e3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
+void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
@@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
-extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
@@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
bool is_hugetlb_entry_migration(pte_t pte);
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
@@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
+#define move_hugetlb_state(old, new, reason) do {} while (0)
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
@@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
+struct hugetlbfs_inode_info {
+ struct shared_policy policy;
+ struct inode vfs_inode;
+ unsigned int seals;
+};
+
+static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
+{
+ return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
+}
+
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
@@ -343,10 +356,10 @@ struct huge_bootmem_page {
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
-struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask);
+struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
@@ -524,7 +537,7 @@ struct hstate {};
#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_noerr(v, a, r) NULL
+#define alloc_huge_page_vma(h, vma, address) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_sizelog(s) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6c9336626592..93bd6fcd6e62 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -127,28 +127,6 @@ struct hv_ring_buffer_info {
u32 priv_read_index;
};
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
- u32 *read, u32 *write)
-{
- u32 read_loc, write_loc, dsize;
-
- /* Capture the read/write indices before they changed */
- read_loc = rbi->ring_buffer->read_index;
- write_loc = rbi->ring_buffer->write_index;
- dsize = rbi->ring_datasize;
-
- *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- *read = dsize - *write;
-}
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4c54611e03e9..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -13,6 +13,8 @@ struct ifla_vf_stats {
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
};
struct ifla_vf_info {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index bedf54b6f943..4cb7aeeafce0 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,10 +30,10 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
int nest_level;
+ unsigned int macaddr_count;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57c31e3..8e66866c11be 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
@@ -70,7 +70,7 @@ struct tap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
- struct skb_array skb_array;
+ struct ptr_ring ring;
};
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..c5b0a75a7812 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,14 @@
#include <uapi/linux/if_tun.h>
+#define TUN_XDP_FLAG 0x1UL
+
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+bool tun_is_xdp_buff(void *ptr);
+void *tun_xdp_to_ptr(void *ptr);
+void *tun_ptr_to_xdp(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline bool tun_is_xdp_buff(void *ptr)
+{
+ return false;
+}
+static inline void *tun_xdp_to_ptr(void *ptr)
+{
+ return NULL;
+}
+static inline void *tun_ptr_to_xdp(void *ptr)
+{
+ return NULL;
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..e7dc7a542a4e
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..9887f4f8e2a8 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..44d48bb1d39f
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 20b61347ea58..11579fd4126e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -20,34 +20,6 @@
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
-
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
@@ -606,8 +578,8 @@ const struct iio_chan_spec
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
-#define iio_device_register(iio_dev) \
- __iio_device_register((iio_dev), THIS_MODULE)
+#define iio_device_register(indio_dev) \
+ __iio_device_register((indio_dev), THIS_MODULE)
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
void iio_device_unregister(struct iio_dev *indio_dev);
/**
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
index 1601a2a63a72..5e1cfa75f652 100644
--- a/include/linux/iio/machine.h
+++ b/include/linux/iio/machine.h
@@ -28,4 +28,11 @@ struct iio_map {
void *consumer_data;
};
+#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \
+{ \
+ .adc_channel_label = _provider_channel, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+}
+
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 7d5e44518379..b19b7204ef84 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -43,12 +43,13 @@ struct iio_trigger_ops {
/**
* struct iio_trigger - industrial I/O trigger device
* @ops: [DRIVER] operations structure
+ * @owner: [INTERN] owner of this driver module
* @id: [INTERN] unique id number
* @name: [DRIVER] unique name
* @dev: [DRIVER] associated device (if relevant)
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @use_count: use count for the trigger
+ * @use_count: [INTERN] use count for the trigger.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..6eb3d683ef62 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -34,4 +34,32 @@ enum iio_available_type {
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 1ac5bf95bfdd..e16fe7d44a71 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6a532629c983..a454b8aeb938 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -21,22 +21,11 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct group_info init_groups;
+extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
#define INIT_CPU_TIMERS(s) \
.cpu_timers = { \
LIST_HEAD_INIT(s.cpu_timers[0]), \
LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
+ LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#else
-#define INIT_POSIX_TIMERS(s)
#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
#endif
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- }, } \
-}
-
#define INIT_PID_LINK(type) \
{ \
.node = { \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
.pid = &init_struct_pid, \
}
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
- .pi_top_task = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
/* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
+#else
+#define __init_task_data /**/
+#endif
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..858d3f4a2241 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -14,6 +14,7 @@
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e81035b678f..b11fcdfd0770 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -13,10 +13,13 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_PENDING BIT(0)
+#define IRQ_WORK_BUSY BIT(1)
+
+/* Doesn't want IPI, wait for tick: */
+#define IRQ_WORK_LAZY BIT(2)
+
+#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 46cb57d5eb13..9700f00bbc04 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -27,24 +27,19 @@
# define trace_hardirq_enter() \
do { \
current->hardirq_context++; \
- crossrelease_hist_start(XHLOCK_HARD); \
} while (0)
# define trace_hardirq_exit() \
do { \
current->hardirq_context--; \
- crossrelease_hist_end(XHLOCK_HARD); \
} while (0)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
- crossrelease_hist_start(XHLOCK_SOFT); \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
- crossrelease_hist_end(XHLOCK_SOFT); \
} while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@@ -58,7 +53,6 @@ do { \
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..3d2fd06495ec
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear different to observers if there was a change to the inode's data or
+ * metadata since it was last queried.
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_cmp_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+/**
+ * inode_maybe_inc_iversion - increments i_version
+ * @inode: inode with the i_version that should be updated
+ * @force: increment the counter even if it's not necessary?
+ *
+ * Every time the inode is modified, the i_version field must be seen to have
+ * changed by any observer.
+ *
+ * If "force" is set or the QUERIED flag is set, then ensure that we increment
+ * the value, and clear the queried flag.
+ *
+ * In the common case where neither is set, then we can return "false" without
+ * updating i_version.
+ *
+ * If this function returns false, and no other metadata has changed, then we
+ * can avoid logging the metadata.
+ */
+static inline bool
+inode_maybe_inc_iversion(struct inode *inode, bool force)
+{
+ u64 cur, old, new;
+
+ /*
+ * The i_version field is not strictly ordered with any other inode
+ * information, but the legacy inode_inc_iversion code used a spinlock
+ * to serialize increments.
+ *
+ * Here, we add full memory barriers to ensure that any de-facto
+ * ordering with other info is preserved.
+ *
+ * This barrier pairs with the barrier in inode_query_iversion()
+ */
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is clear then we needn't do anything */
+ if (!force && !(cur & I_VERSION_QUERIED))
+ return false;
+
+ /* Since lowest bit is flag, add 2 to avoid it */
+ new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
+
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return true;
+}
+
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_query_iversion - read i_version for later use
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter. This should be used by callers that wish
+ * to store the returned i_version for later comparison. This will guarantee
+ * that a later query of the i_version will result in a different value if
+ * anything has changed.
+ *
+ * In this implementation, we fetch the current value, set the QUERIED flag and
+ * then try to swap it into place with a cmpxchg, if it wasn't already set. If
+ * that fails, we try again with the newly fetched value from the cmpxchg.
+ */
+static inline u64
+inode_query_iversion(struct inode *inode)
+{
+ u64 cur, old, new;
+
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is already set, then no need to swap */
+ if (cur & I_VERSION_QUERIED) {
+ /*
+ * This barrier (and the implicit barrier in the
+ * cmpxchg below) pairs with the barrier in
+ * inode_maybe_inc_iversion().
+ */
+ smp_mb();
+ break;
+ }
+
+ new = cur | I_VERSION_QUERIED;
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return cur >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_cmp_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns false
+ * if they are the same or true if they are different.
+ */
+static inline bool
+inode_cmp_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) != old;
+}
+
+/**
+ * inode_cmp_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns false if they are
+ * the same, and true if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_cmp_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) != old;
+}
+#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c7b368c734af..e0340ca08d98 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e0a6205caa71..7f6f93c3df9c 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.h - generic kernel object infrastructure.
*
@@ -6,8 +7,6 @@
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
- * This file is released under the GPLv2.
- *
* Please read Documentation/kobject.txt before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..069aa2ebef90 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Kernel object name space definitions
*
* Copyright (c) 2002-2003 Patrick Mochel
@@ -7,8 +8,6 @@
*
* Split from kobject.h by David Howells (dhowells@redhat.com)
*
- * This file is released under the GPLv2.
- *
* Please read Documentation/kobject.txt before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2d1d9de06728..7f4b60abdf27 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,10 +50,7 @@ struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
struct nvm_dev_ops {
nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
@@ -112,8 +108,6 @@ enum {
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
NVM_OP_PWRITE = 0x91,
NVM_OP_PREAD = 0x92,
NVM_OP_ERASE = 0x90,
@@ -165,12 +159,16 @@ struct nvm_id_group {
u8 fmtype;
u8 num_ch;
u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
+ u16 num_chk;
+ u16 clba;
u16 csecs;
u16 sos;
+
+ u16 ws_min;
+ u16 ws_opt;
+ u16 ws_seq;
+ u16 ws_per_chk;
+
u32 trdt;
u32 trdm;
u32 tprt;
@@ -181,7 +179,10 @@ struct nvm_id_group {
u32 mccap;
u16 cpar;
- struct nvm_id_lp_tbl lptbl;
+ /* 1.2 compatibility */
+ u8 num_pln;
+ u16 num_pg;
+ u16 fpg_sz;
};
struct nvm_addr_format {
@@ -217,6 +218,10 @@ struct nvm_target {
#define ADDR_EMPTY (~0ULL)
+#define NVM_TARGET_DEFAULT_OP (101)
+#define NVM_TARGET_MIN_OP (3)
+#define NVM_TARGET_MAX_OP (80)
+
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
@@ -239,7 +244,6 @@ struct nvm_rq {
void *meta_list;
dma_addr_t dma_meta_list;
- struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
@@ -268,31 +272,38 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
+
/* Device generic information */
struct nvm_geo {
+ /* generic geometry */
int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
+ int all_luns; /* across channels */
+ int nr_luns; /* per channel */
+ int nr_chks; /* per lun */
+
int sec_size;
int oob_size;
int mccap;
- struct nvm_addr_format ppaf;
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
+ int sec_per_chk;
+ int sec_per_lun;
+
+ int ws_min;
+ int ws_opt;
+ int ws_seq;
+ int ws_per_chk;
+
int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
+ int op;
+
+ struct nvm_addr_format ppaf;
+
+ /* Legacy 1.2 specific geometry */
+ int plane_mode; /* drive device in single, double or quad mode */
+ int nr_planes;
+ int sec_per_pg; /* only sectors for a single page */
int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
};
/* sub-device structure */
@@ -320,10 +331,6 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
unsigned long total_secs;
unsigned long *lun_map;
@@ -346,36 +353,6 @@ struct nvm_dev {
struct list_head targets;
};
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr r)
{
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
return l;
}
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
-
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
-}
-
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index fc5c1be3f6f4..4754f01c1abb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -40,7 +40,6 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
* @stack_node: list node for klp_ops func_stack list
@@ -76,7 +75,6 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
unsigned long old_addr;
@@ -137,7 +135,6 @@ struct klp_object {
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
* @enabled: the patch is enabled (but operation may be incomplete)
@@ -147,7 +144,6 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
/* internal */
struct list_head list;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d7d313fb9cd4..4fd95dbeb52f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -17,6 +17,7 @@
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -58,7 +59,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -83,7 +84,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
@@ -136,7 +137,7 @@ struct nlm_wait;
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2e75dc34bff5..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -367,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
@@ -426,7 +424,6 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
@@ -475,8 +472,6 @@ enum xhlock_context_t {
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
-static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
-static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..2cfffe586885 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -12,6 +12,7 @@
#include <uapi/linux/mdio.h>
#include <linux/mod_devicetable.h>
+struct gpio_desc;
struct mii_bus;
/* Multiple levels of nesting are possible. However typically this is
@@ -39,6 +40,9 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ struct gpio_desc *reset;
+ unsigned int reset_assert_delay;
+ unsigned int reset_deassert_delay;
};
#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 69966c461d1c..882046863581 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -108,7 +108,10 @@ struct lruvec_stat {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- struct lruvec_stat __percpu *lruvec_stat;
+
+ struct lruvec_stat __percpu *lruvec_stat_cpu;
+ atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
+
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -227,10 +230,10 @@ struct mem_cgroup {
spinlock_t move_lock;
struct task_struct *move_lock_task;
unsigned long move_lock_flags;
- /*
- * percpu counter.
- */
- struct mem_cgroup_stat_cpu __percpu *stat;
+
+ struct mem_cgroup_stat_cpu __percpu *stat_cpu;
+ atomic_long_t stat[MEMCG_NR_STAT];
+ atomic_long_t events[MEMCG_NR_EVENTS];
unsigned long socket_pressure;
@@ -265,6 +268,12 @@ struct mem_cgroup {
/* WARNING: nodeinfo must be the last member here */
};
+/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define MEMCG_CHARGE_BATCH 32U
+
extern struct mem_cgroup *root_mem_cgroup;
static inline bool mem_cgroup_disabled(void)
@@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
-{
- this_cpu_inc(memcg->stat->events[event]);
- cgroup_file_notify(&memcg->events_file);
-}
-
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
- long val = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- val += per_cpu(memcg->stat->count[idx], cpu);
-
- if (val < 0)
- val = 0;
-
- return val;
+ long x = atomic_long_read(&memcg->stat[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
- if (!mem_cgroup_disabled())
- __this_cpu_add(memcg->stat->count[idx], val);
+ long x;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &memcg->stat[idx]);
+ x = 0;
+ }
+ __this_cpu_write(memcg->stat_cpu->count[idx], x);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
+ preempt_disable();
+ __mod_memcg_state(memcg, idx, val);
+ preempt_enable();
}
/**
@@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
- long val = 0;
- int cpu;
+ long x;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
-
- if (val < 0)
- val = 0;
-
- return val;
+ x = atomic_long_read(&pn->lruvec_stat[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;
+ long x;
+ /* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+
if (mem_cgroup_disabled())
return;
+
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+
+ /* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+
+ /* Update lruvec */
+ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &pn->lruvec_stat[idx]);
+ x = 0;
+ }
+ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
-
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- mod_memcg_state(pn->memcg, idx, val);
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ preempt_disable();
+ __mod_lruvec_state(lruvec, idx, val);
+ preempt_enable();
}
static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ pg_data_t *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
- __mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
+ if (!page->mem_cgroup) {
+ __mod_node_page_state(pgdat, idx, val);
return;
- __mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+ }
+
+ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+ __mod_lruvec_state(lruvec, idx, val);
}
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
-
- mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ preempt_disable();
+ __mod_lruvec_page_state(page, idx, val);
+ preempt_enable();
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
+/* idx can be of type enum memcg_event_item or vm_event_item */
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
+ int idx, unsigned long count)
+{
+ unsigned long x;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
+ if (unlikely(x > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &memcg->events[idx]);
+ x = 0;
+ }
+ __this_cpu_write(memcg->stat_cpu->events[idx], x);
+}
+
static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
+ int idx, unsigned long count)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->events[idx], count);
+ preempt_disable();
+ __count_memcg_events(memcg, idx, count);
+ preempt_enable();
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/* idx can be of type enum memcg_event_item or vm_event_item */
static inline void count_memcg_page_event(struct page *page,
int idx)
{
@@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg)) {
- this_cpu_inc(memcg->stat->events[idx]);
+ count_memcg_events(memcg, idx, 1);
if (idx == OOM_KILL)
cgroup_file_notify(&memcg->events_file);
}
rcu_read_unlock();
}
+
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
+{
+ count_memcg_events(memcg, event, 1);
+ cgroup_file_notify(&memcg->events_file);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 4e887ba22635..c61535979b8f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* debugfs stuff */
+int cros_ec_debugfs_init(struct cros_ec_dev *ec);
+void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
+
/* ACPI GPE handler */
#ifdef CONFIG_ACPI
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3c8568aa82a5..75e5c8ff85fc 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..796fb9794c9e
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 77c7cf40d9b4..605f62264825 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_LPTIMER_H_
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..2aadab6f34a1 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e1cfe9194129..396a103c8bc6 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -25,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..e5258ee4e38b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -284,6 +291,7 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1f509d072026..fb7e8b205eb9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
@@ -154,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
+enum mlx5_dct_atomic_mode {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -230,6 +238,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
@@ -429,6 +440,7 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -787,6 +799,7 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+ struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -825,7 +838,7 @@ struct mlx5_core_dev {
struct mlx5e_resources mlx5e_res;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
@@ -834,6 +847,8 @@ struct mlx5_core_dev {
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct page *clock_info_page;
};
struct mlx5_db {
@@ -1102,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -1224,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@ -1231,7 +1271,23 @@ enum {
static inline const struct cpumask *
mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
{
- return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+ const struct cpumask *mask;
+ struct irq_desc *desc;
+ unsigned int irq;
+ int eqn;
+ int err;
+
+ err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+ if (err)
+ return NULL;
+
+ desc = irq_to_desc(irq);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
+#else
+ mask = desc->irq_common_data.affinity;
+#endif
+ return mask;
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..a0b48afcb422 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d44ec5f41d4a..f4e417686f62 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -794,7 +794,10 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x80];
+ u8 reserved_at_0[0x30];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
@@ -1023,12 +1026,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3c0[0x1b];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 disable_local_lb[0x1];
- u8 reserved_at_3e2[0x9];
+ u8 disable_local_lb_uc[0x1];
+ u8 disable_local_lb_mc[0x1];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x3];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1066,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
};
enum mlx5_flow_destination_type {
@@ -1162,7 +1179,12 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x15];
+ u8 reserved_at_120[0x3];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+ u8 reserved_at_130[0x5];
+
u8 log_wqe_num_of_strides[0x3];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
@@ -2482,7 +2504,8 @@ struct mlx5_ifc_sqc_bits {
u8 state[0x4];
u8 reg_umr[0x1];
u8 allow_swp[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2490,7 +2513,13 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_at_60[0x90];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_a0[0x50];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
@@ -2562,7 +2591,8 @@ struct mlx5_ifc_rqc_bits {
u8 state[0x4];
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2576,7 +2606,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_80[0x8];
u8 rmpn[0x18];
- u8 reserved_at_a0[0xe0];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_e0[0xa0];
struct mlx5_ifc_wq_bits wq;
};
@@ -2615,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 reserved_at_40[0xc];
+
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -3258,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3878,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -5310,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_e[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5531,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits {
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
int pid;
};
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
+};
+
struct mlx5_qp_path {
u8 fl_free_ar;
u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *qp,
+ u32 *in, int inlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..7e8f281f8c00 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+struct mlx5_hairpin_params {
+ u8 log_data_size;
+ u8 log_num_packets;
+ u16 q_counter;
+ int num_channels;
+};
+
+struct mlx5_hairpin {
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5_core_dev *peer_mdev;
+
+ int num_channels;
+
+ u32 *rqn;
+ u32 *sqn;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..64e193e87394 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea818ff739cd..173d2484f6e3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
-static inline void unmap_shared_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen)
-{
- unmap_mapping_range(mapping, holebegin, holelen, 0);
-}
-
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline int handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
@@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows) { }
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
- unsigned int gup_flags);
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
@@ -2570,8 +2576,8 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
};
-extern int memory_failure(unsigned long pfn, int trapno, int flags);
-extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+extern int memory_failure(unsigned long pfn, int flags);
+extern void memory_failure_queue(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
#define put_hwpoison_page(page) put_page(page)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cfd0ac4e5e0e..fd1af6b9591d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -31,28 +31,56 @@ struct hmm;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it.
+ * who is mapping it. If you allocate the page using alloc_pages(), you
+ * can use some of the space in struct page for your own purposes.
*
- * The objects in struct page are organized in double word blocks in
- * order to allows us to use atomic double word operations on portions
- * of struct page. That is currently only used by slub but the arrangement
- * allows the use of atomic double word operations on the flags/mapping
- * and lru list pointers also.
+ * Pages that were once in the page cache may be found under the RCU lock
+ * even after they have been recycled to a different purpose. The page
+ * cache reads and writes some of the fields in struct page to pin the
+ * page before checking that it's still in the page cache. It is vital
+ * that all users of struct page:
+ * 1. Use the first word as PageFlags.
+ * 2. Clear or preserve bit 0 of page->compound_head. It is used as
+ * PageTail for compound pages, and the page cache must not see false
+ * positives. Some users put a pointer here (guaranteed to be at least
+ * 4-byte aligned), other users avoid using the field altogether.
+ * 3. page->_refcount must either not be used, or must be used in such a
+ * way that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
+ *
+ * If you allocate pages of order > 0, you can use the fields in the struct
+ * page associated with each page, but bear in mind that the pages may have
+ * been inserted individually into the page cache, so you must use the above
+ * four fields in a compatible way for each struct page.
+ *
+ * SLUB uses cmpxchg_double() to atomically update its freelist and
+ * counters. That requires that freelist & counters be adjacent and
+ * double-word aligned. We align all struct pages to double-word
+ * boundaries, and ensure that 'freelist' is aligned within the
+ * struct.
*/
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
+#define _slub_counter_t unsigned long
+#else
+#define _slub_counter_t unsigned int
+#endif
+#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#define _struct_page_alignment
+#define _slub_counter_t unsigned int
+#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+
struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
union {
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object
- * or KSM private structure. See
- * PAGE_MAPPING_ANON and
- * PAGE_MAPPING_KSM.
- */
+ /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+
void *s_mem; /* slab first object */
atomic_t compound_mapcount; /* first tail page */
/* page_deferred_list().next -- second tail page */
@@ -66,40 +94,27 @@ struct page {
};
union {
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
- defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
- /* Used for cmpxchg_double in slub */
- unsigned long counters;
-#else
- /*
- * Keep _refcount separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by slab_lock
- * but _refcount is not.
- */
- unsigned counters;
-#endif
- struct {
+ _slub_counter_t counters;
+ unsigned int active; /* SLAB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ int units; /* SLOB */
+
+ struct { /* Page cache */
+ /*
+ * Count of ptes mapped in mms, to show when
+ * page is mapped & limit reverse map searches.
+ *
+ * Extra information about page type may be
+ * stored here for pages that are never mapped,
+ * in which case the value MUST BE <= -2.
+ * See page-flags.h for more details.
+ */
+ atomic_t _mapcount;
- union {
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
- };
/*
* Usage count, *USE WRAPPER FUNCTION* when manual
* accounting. See page_ref.h
@@ -109,8 +124,6 @@ struct page {
};
/*
- * Third double word block
- *
* WARNING: bit 0 of the first word encode PageTail(). That means
* the rest users of the storage space MUST NOT use the bit to
* avoid collision and false-positive PageTail().
@@ -145,19 +158,9 @@ struct page {
unsigned long compound_head; /* If bit zero is set */
/* First tail page only */
-#ifdef CONFIG_64BIT
- /*
- * On 64 bit system we have enough space in struct page
- * to encode compound_dtor and compound_order with
- * unsigned int. It can help compiler generate better or
- * smaller code on some archtectures.
- */
- unsigned int compound_dtor;
- unsigned int compound_order;
-#else
- unsigned short int compound_dtor;
- unsigned short int compound_order;
-#endif
+ unsigned char compound_dtor;
+ unsigned char compound_order;
+ /* two/six bytes available here */
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
@@ -171,15 +174,14 @@ struct page {
#endif
};
- /* Remainder is not double word aligned */
union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache;
- * indicates order in the buddy
- * system if PG_buddy is set.
- */
+ /*
+ * Mapping-private opaque data:
+ * Usually used for buffer_heads if PagePrivate
+ * Used for swp_entry_t if PageSwapCache
+ * Indicates order in the buddy system if PageBuddy
+ */
+ unsigned long private;
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
@@ -212,15 +214,7 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
-}
-/*
- * The struct page can be forced to be double word aligned so that atomic ops
- * on double words work. The SLUB allocator can make use of such a feature.
- */
-#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
- __aligned(2 * sizeof(unsigned long))
-#endif
-;
+} _struct_page_alignment;
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e7743eca1021..85146235231e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int use_blk_mq:1; /* use blk-mq */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
struct dentry *debugfs_root;
- struct mmc_async_req *areq; /* active async req */
- struct mmc_context_info context_info; /* async synchronization info */
-
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..91f1ba0663c8 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
+bool mmc_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b25dc9db19fc..2d07a1ed5a31 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@@ -10,6 +11,9 @@
struct mmu_notifier;
struct mmu_notifier_ops;
+/* mmu_notifier_ops flags */
+#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
+
#ifdef CONFIG_MMU_NOTIFIER
/*
@@ -27,6 +31,15 @@ struct mmu_notifier_mm {
struct mmu_notifier_ops {
/*
+ * Flags to specify behavior of callbacks for this MMU notifier.
+ * Used to determine which context an operation may be called.
+ *
+ * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
+ * block
+ */
+ int flags;
+
+ /*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
* freed. This can run concurrently with other mmu notifier
@@ -137,6 +150,10 @@ struct mmu_notifier_ops {
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
+ *
+ * If both of these callbacks cannot block, and invalidate_range
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -159,12 +176,13 @@ struct mmu_notifier_ops {
* external TLB range needs to be flushed. For more in depth
* discussion on this see Documentation/vm/mmu_notifier.txt
*
- * The invalidate_range() function is called under the ptl
- * spin-lock and not allowed to sleep.
- *
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
+ *
+ * If this callback cannot block, and invalidate_range_{start,end}
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
@@ -218,6 +236,7 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -457,6 +476,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
+static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+{
+ return false;
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 67f2e3c38939..7522a6987595 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1166,8 +1166,16 @@ extern unsigned long usemap_size(void);
/*
* We use the lower bits of the mem_map pointer to store
- * a little bit of information. There should be at least
- * 3 bits here due to 32-bit alignment.
+ * a little bit of information. The pointer is calculated
+ * as mem_map - section_nr_to_pfn(pnum). The result is
+ * aligned to the minimum alignment of the two values:
+ * 1. All mem_map arrays are page-aligned.
+ * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
+ * lowest bits. PFN_SECTION_SHIFT is arch-specific
+ * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
+ * worst combination is powerpc with 256k pages,
+ * which results in PFN_SECTION_SHIFT equal 6.
+ * To sum it up, at least 6 bits are available.
*/
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
diff --git a/include/linux/module.h b/include/linux/module.h
index c69b49abe877..70245f1a3590 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -19,6 +19,7 @@
#include <linux/jump_label.h>
#include <linux/export.h>
#include <linux/rbtree_latch.h>
+#include <linux/error-injection.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -475,6 +476,11 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+ struct error_injection_entry *ei_funcs;
+ unsigned int num_ei_funcs;
+#endif
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
@@ -801,6 +807,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef RETPOLINE
+extern bool retpoline_module_ok(bool has_retpoline);
+#else
+static inline bool retpoline_module_ok(bool has_retpoline)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module)
{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b5b43f94f311 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & ~val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] | val2.x[i];
-
- return r;
-}
-
-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if ((val1.x[i] & val2.x[i]) != val3.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
-
- return 0;
-}
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd55bf14ad51..205ededccc60 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..56c5570aadbe 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -133,12 +133,6 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/*
- * If your controller already sends the required NAND commands when
- * reading or writing a page, then the framework is not supposed to
- * send READ0 and SEQIN/PAGEPROG respectively.
- */
-#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
- */
-#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
* @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
int postpad;
unsigned int options;
void *priv;
+ u8 *calc_buf;
+ u8 *code_buf;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
int page);
};
-static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
-{
- return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
-}
-
-/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
- * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
- * @databuf: buffer pointer for data, size is (page size + oobsize).
- *
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
- */
-struct nand_buffers {
- uint8_t *ecccalc;
- uint8_t *ecccode;
- uint8_t *databuf;
-};
-
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops {
};
/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @in: buffer to fill when reading from the NAND chip
+ * @out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
+ * You'll have to use the appropriate element
+ * depending on @type
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ sizeof(dividend) == sizeof(u32) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @addr/@data: address or data constraint (number of cycles or data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_instrs) \
+ { \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
* commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces ->cmdfunc(),
+ * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
+ * ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
* @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
* @pagebuf: [INTERN] holds the pagenumber which is currently in
* data_buf.
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
-
int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
int numchips;
uint64_t chipsize;
int pagemask;
+ u8 *data_buf;
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
u16 max_bb_per_die;
u32 blocks_per_die;
- struct nand_data_interface *data_interface;
+ struct nand_data_interface data_interface;
int read_retries;
@@ -938,7 +1262,6 @@ struct nand_chip {
struct nand_hw_control *controller;
struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
unsigned long buf_align;
struct nand_hw_control hwcontrol;
@@ -956,6 +1279,15 @@ struct nand_chip {
} manufacturer;
};
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ return chip->exec_op(chip, op, false);
+}
+
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode);
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-/* get data interface from ONFI timing mode 0, used after reset. */
-const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d0c66a0975cf..de36969eb359 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -61,6 +61,7 @@
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
-#define FSR_READY BIT(7)
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor: the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index caeb159abda5..91216b16feb7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,7 +147,7 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
- unsigned int (*poll) (struct file *file, struct socket *sock,
+ __poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
@@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
new file mode 100644
index 000000000000..bebeaad897cc
--- /dev/null
+++ b/include/linux/net_dim.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NET_DIM_H
+#define NET_DIM_H
+
+#include <linux/module.h>
+
+struct net_dim_cq_moder {
+ u16 usec;
+ u16 pkts;
+ u8 cq_period_mode;
+};
+
+struct net_dim_sample {
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
+};
+
+struct net_dim_stats {
+ int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
+ int epms; /* events per msec */
+};
+
+struct net_dim { /* Adaptive Moderation */
+ u8 state;
+ struct net_dim_stats prev_stats;
+ struct net_dim_sample start_sample;
+ struct work_struct work;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
+enum {
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ NET_DIM_CQ_PERIOD_NUM_MODES
+};
+
+/* Adaptive moderation logic */
+enum {
+ NET_DIM_START_MEASURE,
+ NET_DIM_MEASURE_IN_PROGRESS,
+ NET_DIM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ NET_DIM_PARKING_ON_TOP,
+ NET_DIM_PARKING_TIRED,
+ NET_DIM_GOING_RIGHT,
+ NET_DIM_GOING_LEFT,
+};
+
+enum {
+ NET_DIM_STATS_WORSE,
+ NET_DIM_STATS_SAME,
+ NET_DIM_STATS_BETTER,
+};
+
+enum {
+ NET_DIM_STEPPED,
+ NET_DIM_TOO_TIRED,
+ NET_DIM_ON_EDGE,
+};
+
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+/* Adaptive moderation profiles */
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
+#define NET_DIM_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct net_dim_cq_moder
+profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_EQE_PROFILES,
+ NET_DIM_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
+ int ix)
+{
+ struct net_dim_cq_moder cq_moder;
+
+ cq_moder = profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+
+static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
+ else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+}
+
+static inline bool net_dim_on_top(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ return true;
+ case NET_DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* NET_DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+
+static inline void net_dim_turn(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ dim->tune_state = NET_DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case NET_DIM_GOING_LEFT:
+ dim->tune_state = NET_DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+
+static inline int net_dim_step(struct net_dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return NET_DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case NET_DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return NET_DIM_STEPPED;
+}
+
+static inline void net_dim_park_on_top(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = NET_DIM_PARKING_ON_TOP;
+}
+
+static inline void net_dim_park_tired(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = NET_DIM_PARKING_TIRED;
+}
+
+static inline void net_dim_exit_parking(struct net_dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
+ NET_DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
+static inline int net_dim_stats_compare(struct net_dim_stats *curr,
+ struct net_dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ return NET_DIM_STATS_SAME;
+}
+
+static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
+ struct net_dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_GOING_RIGHT:
+ case NET_DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_BETTER)
+ net_dim_turn(dim);
+
+ if (net_dim_on_top(dim)) {
+ net_dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case NET_DIM_ON_EDGE:
+ net_dim_park_on_top(dim);
+ break;
+ case NET_DIM_TOO_TIRED:
+ net_dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != NET_DIM_PARKING_ON_TOP) ||
+ (dim->tune_state != NET_DIM_PARKING_ON_TOP))
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+static inline void net_dim_sample(u16 event_ctr,
+ u64 packets,
+ u64 bytes,
+ struct net_dim_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = packets;
+ s->byte_ctr = bytes;
+ s->event_ctr = event_ctr;
+}
+
+#define NET_DIM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
+
+static inline void net_dim_calc_stats(struct net_dim_sample *start,
+ struct net_dim_sample *end,
+ struct net_dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+
+ if (!delta_us)
+ return;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+}
+
+static inline void net_dim(struct net_dim *dim,
+ struct net_dim_sample end_sample)
+{
+ struct net_dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case NET_DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < NET_DIM_NEVENTS)
+ break;
+ net_dim_calc_stats(&dim->start_sample, &end_sample,
+ &curr_stats);
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = NET_DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case NET_DIM_START_MEASURE:
+ dim->state = NET_DIM_MEASURE_IN_PROGRESS;
+ break;
+ case NET_DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+
+#endif /* NET_DIM_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index b1b0ca7ccb2b..db84c516bcfb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,8 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
+ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
+
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +99,7 @@ enum {
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
+#define NETIF_F_GRO_HW __NETIF_F(GRO_HW)
#define NETIF_F_GSO __NETIF_F(GSO)
#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef789e1d679e..4c77f39ebd65 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
+#include <net/xdp.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
#endif
struct kobject kobj;
struct net_device *dev;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
/*
@@ -778,6 +780,7 @@ enum tc_setup_type {
TC_SETUP_BLOCK,
TC_SETUP_QDISC_CBS,
TC_SETUP_QDISC_RED,
+ TC_SETUP_QDISC_PRIO,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -802,9 +805,11 @@ enum bpf_netdev_command {
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY,
+ BPF_OFFLOAD_MAP_ALLOC,
+ BPF_OFFLOAD_MAP_FREE,
};
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct netdev_bpf {
@@ -820,16 +825,22 @@ struct netdev_bpf {
struct {
u8 prog_attached;
u32 prog_id;
+ /* flags with which program was installed */
+ u32 prog_flags;
};
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
- const struct bpf_ext_analyzer_ops *ops; /* callee set */
+ const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
struct bpf_prog *prog;
} offload;
+ /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
+ struct {
+ struct bpf_offloaded_map *offmap;
+ };
};
};
@@ -840,6 +851,7 @@ struct xfrmdev_ops {
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
+ void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
};
#endif
@@ -1458,8 +1470,6 @@ enum netdev_priv_flags {
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry used for polling NAPI devices
@@ -1495,6 +1505,8 @@ enum netdev_priv_flags {
* do not use this in drivers
* @rx_nohandler: nohandler dropped packets by core network on
* inactive devices, do not use this in drivers
+ * @carrier_up_count: Number of times the carrier has been up
+ * @carrier_down_count: Number of times the carrier has been down
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
@@ -1669,8 +1681,6 @@ struct net_device {
unsigned long base_addr;
int irq;
- atomic_t carrier_changes;
-
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
@@ -1708,6 +1718,10 @@ struct net_device {
atomic_long_t tx_dropped;
atomic_long_t rx_nohandler;
+ /* Stats to monitor link on/off, flapping */
+ atomic_t carrier_up_count;
+ atomic_t carrier_down_count;
+
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
@@ -1724,7 +1738,7 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops;
#endif
@@ -1801,12 +1815,9 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
-
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-#endif
struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
@@ -2751,7 +2762,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
return false;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
+ int len, int size);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
{
@@ -2791,7 +2803,9 @@ struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct sk_buff_head xfrm_backlog;
+#endif
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3302,7 +3316,9 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
+ bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf *, int);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *, unsigned int flags);
@@ -3315,6 +3331,7 @@ int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
int dev_set_mtu(struct net_device *, int);
+int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
@@ -3323,14 +3340,15 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
+void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+ struct netdev_bpf *xdp);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4399,11 +4417,11 @@ do { \
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
- WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
+ WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-#define netdev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \
+#define netdev_WARN_ONCE(dev, format, args...) \
+ WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
/* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b24e9b101651..85a1a0b32c66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,6 +67,7 @@ struct nf_hook_ops {
struct net_device *dev;
void *priv;
u_int8_t pf;
+ bool nat_hook;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -77,17 +78,28 @@ struct nf_hook_entry {
void *priv;
};
+struct nf_hook_entries_rcu_head {
+ struct rcu_head head;
+ void *allocation;
+};
+
struct nf_hook_entries {
u16 num_hook_entries;
/* padding */
struct nf_hook_entry hooks[];
- /* trailer: pointers to original orig_ops of each hook.
- *
- * This is not part of struct nf_hook_entry since its only
- * needed in slow path (hook register/unregister).
+ /* trailer: pointers to original orig_ops of each hook,
+ * followed by rcu_head and scratch space used for freeing
+ * the structure via call_rcu.
*
+ * This is not part of struct nf_hook_entry since its only
+ * needed in slow path (hook register/unregister):
* const struct nf_hook_ops *orig_ops[]
+ *
+ * For the same reason, we store this at end -- its
+ * only needed when a hook is deleted, not during
+ * packet path processing:
+ * struct nf_hook_entries_rcu_head head
*/
};
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct nf_hook_entries *hook_head;
+ struct nf_hook_entries *hook_head = NULL;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
#endif
rcu_read_lock();
- hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+ break;
+ case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+ break;
+#if IS_ENABLED(CONFIG_DECNET)
+ case NFPROTO_DECNET:
+ hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
if (hook_head) {
struct nf_hook_state state;
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
-struct nf_afinfo {
- unsigned short family;
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol);
- int (*route)(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict);
- void (*saveroute)(const struct sk_buff *skb,
- struct nf_queue_entry *entry);
- int (*reroute)(struct net *net, struct sk_buff *skb,
- const struct nf_queue_entry *entry);
- int route_key_size;
-};
-
-extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
-static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
-{
- return rcu_dereference(nf_afinfo[family]);
-}
-
-static inline __sum16
-nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
-
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum(skb, hook, dataoff, protocol);
- rcu_read_unlock();
- return csum;
-}
-
-static inline __sum16
-nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
-
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum_partial(skb, hook, dataoff, len,
- protocol);
- rcu_read_unlock();
- return csum;
-}
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol,
+ unsigned short family);
-int nf_register_afinfo(const struct nf_afinfo *afinfo);
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol, unsigned short family);
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict, unsigned short family);
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253e5d4d..34fc80f3eb90 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,6 +122,8 @@ struct ip_set_ext {
u64 bytes;
char *comment;
u32 timeout;
+ u8 packets_op;
+ u8 bytes_op;
};
struct ip_set;
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active);
+extern bool ip_set_match_extensions(struct ip_set *set,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext,
+ u32 flags, void *data);
static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
index bb6fba480118..3d33a2c3f39f 100644
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter)
return (u64)atomic64_read(&(counter)->packets);
}
+static inline bool
+ip_set_match_counter(u64 counter, u64 match, u8 op)
+{
+ switch (op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == match;
+ case IPSET_COUNTER_NE:
+ return counter != match;
+ case IPSET_COUNTER_LT:
+ return counter < match;
+ case IPSET_COUNTER_GT:
+ return counter > match;
+ }
+ return false;
+}
+
static inline void
ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
+ const struct ip_set_ext *ext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
- if (flags & IPSET_FLAG_MATCH_COUNTERS) {
- mext->packets = ip_set_get_packets(counter);
- mext->bytes = ip_set_get_bytes(counter);
- }
}
static inline bool
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 495ba4dd9da5..34551f8aaf9d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds the NFNL subsystem mutex.
+ * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 33f7530f96b9..1313b35c3ab7 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
void xt_table_unlock(struct xt_table *t);
int xt_proto_init(struct net *net, u_int8_t af);
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index dc6111adea06..8dddfb151f00 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -4,7 +4,17 @@
#include <uapi/linux/netfilter.h>
+/* in/out/forward only */
+#define NF_ARP_NUMHOOKS 3
+
+/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
+#define NF_DN_NUMHOOKS 7
+
+#if IS_ENABLED(CONFIG_DECNET)
/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS 8
+#define NF_MAX_HOOKS NF_DN_NUMHOOKS
+#else
+#define NF_MAX_HOOKS NF_INET_NUMHOOKS
+#endif
#endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2462b5..b31dabfdb453 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,53 @@
#include <uapi/linux/netfilter_ipv4.h>
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip_rt_info {
+ __be32 daddr;
+ __be32 saddr;
+ u_int8_t tos;
+ u_int32_t mark;
+};
+
int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+
+struct nf_queue_entry;
+
+#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol);
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
+#else
+static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol)
+{
+ return 0;
+}
+static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
+ unsigned int hook,
+ unsigned int dataoff,
+ unsigned int len,
+ u_int8_t protocol)
+{
+ return 0;
+}
+static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
+{
+ return -EOPNOTSUPP;
+}
+static inline int nf_ip_reroute(struct sk_buff *skb,
+ const struct nf_queue_entry *entry)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INET */
+
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04c28c0..288c597e75b3 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,6 +9,17 @@
#include <uapi/linux/netfilter_ipv6.h>
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip6_rt_info {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ u_int32_t mark;
+};
+
+struct nf_queue_entry;
+
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -19,6 +30,14 @@ struct nf_ipv6_ops {
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
+ __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+ __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol);
+ int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+ int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
};
#ifdef CONFIG_NETFILTER
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 49b4257ce1ea..f3075d6c7e82 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -85,7 +85,7 @@ struct netlink_ext_ack {
* to the lack of an output buffer.)
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
- static const char __msg[] = (msg); \
+ static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) \
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
} while (0)
#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
- static const char __msg[] = (msg); \
+ static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47adac640191..57ffaa20d564 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -457,7 +457,12 @@ enum lock_type4 {
#define NFS4_DEBUG 1
-/* Index of predefined Linux client operations */
+/*
+ * Index of predefined Linux client operations
+ *
+ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
+ * append only to this enum when adding new client operations.
+ */
enum {
NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -480,7 +485,6 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
- NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
@@ -500,7 +504,6 @@ enum {
NFSPROC4_CLNT_SECINFO,
NFSPROC4_CLNT_FSID_PRESENT,
- /* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
NFSPROC4_CLNT_CREATE_SESSION,
NFSPROC4_CLNT_DESTROY_SESSION,
@@ -518,13 +521,14 @@ enum {
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
- /* nfs42 */
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+
+ NFSPROC4_CLNT_LOOKUPP,
};
/* nfs41 types */
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 11ce6b1117a8..6e8200215321 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -5,20 +5,36 @@
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
-
- Some of the constants in here are from the corresponding
- NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
- rest of them on our own. */
+*/
+
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
+#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
+struct proc_dir_entry;
+struct seq_file;
+
+struct nubus_dir {
+ unsigned char *base;
+ unsigned char *ptr;
+ int done;
+ int mask;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_dirent {
+ unsigned char *base;
+ unsigned char type;
+ __u32 data; /* Actually 24 bits used */
+ int mask;
+};
+
struct nubus_board {
- struct nubus_board* next;
- struct nubus_dev* first_dev;
-
+ struct device dev;
+
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
- unsigned char* fblock;
+ unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
- unsigned char* directory;
-
+ unsigned char *directory;
+
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
-};
-struct nubus_dev {
- /* Next link in device list */
- struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
- struct proc_dir_entry* procdir;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_rsrc {
+ struct list_head list;
- /* The functional resource ID of this device */
+ /* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
- /* This is the device's name rather than the board's.
- Sometimes they are different. Usually the board name is
- more correct. */
- char name[64];
- /* MacOS driver (I kid you not) */
- unsigned char* driver;
- /* Actually this is an offset */
- unsigned long iobase;
- unsigned long iosize;
- unsigned char flags, hwdevid;
-
+
/* Functional directory */
- unsigned char* directory;
+ unsigned char *directory;
/* Much of our info comes from here */
- struct nubus_board* board;
+ struct nubus_board *board;
+};
+
+/* This is all NuBus functional resources (used to find devices later on) */
+extern struct list_head nubus_func_rsrcs;
+
+struct nubus_driver {
+ struct device_driver driver;
+ int (*probe)(struct nubus_board *board);
+ int (*remove)(struct nubus_board *board);
};
-/* This is all NuBus devices (used to find devices later on) */
-extern struct nubus_dev* nubus_devices;
-/* This is all NuBus cards */
-extern struct nubus_board* nubus_boards;
+extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
-void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
-extern void nubus_proc_init(void);
+void nubus_proc_init(void);
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board);
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size);
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
+static inline
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
+{ return NULL; }
+static inline
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
+{ return NULL; }
+static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size) {}
+static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent) {}
#endif
-int get_nubus_list(char *buf);
-int nubus_proc_attach_device(struct nubus_dev *dev);
-/* If we need more precision we can add some more of these */
-struct nubus_dev* nubus_find_device(unsigned short category,
- unsigned short type,
- unsigned short dr_hw,
- unsigned short dr_sw,
- const struct nubus_dev* from);
-struct nubus_dev* nubus_find_type(unsigned short category,
- unsigned short type,
- const struct nubus_dev* from);
-/* Might have more than one device in a slot, you know... */
-struct nubus_dev* nubus_find_slot(unsigned int slot,
- const struct nubus_dev* from);
+
+struct nubus_rsrc *nubus_first_rsrc_or_null(void);
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
+
+#define for_each_func_rsrc(f) \
+ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
+
+#define for_each_board_func_rsrc(b, f) \
+ for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
-int nubus_get_root_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_root_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The board directory */
-int nubus_get_board_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_board_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The functional directory */
-int nubus_get_func_dir(const struct nubus_dev* dev,
- struct nubus_dir* dir);
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
-int nubus_readdir(struct nubus_dir* dir,
- struct nubus_dirent* ent);
-int nubus_find_rsrc(struct nubus_dir* dir,
+int nubus_readdir(struct nubus_dir *dir,
+ struct nubus_dirent *ent);
+int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
- struct nubus_dirent* ent);
-int nubus_rewinddir(struct nubus_dir* dir);
+ struct nubus_dirent *ent);
+int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
-int nubus_get_subdir(const struct nubus_dirent* ent,
- struct nubus_dir* dir);
-void nubus_get_rsrc_mem(void* dest,
- const struct nubus_dirent *dirent,
- int len);
-void nubus_get_rsrc_str(void* dest,
- const struct nubus_dirent *dirent,
- int maxlen);
+int nubus_get_subdir(const struct nubus_dirent *ent,
+ struct nubus_dir *dir);
+void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
+
+/* Declarations relating to driver model objects */
+int nubus_bus_register(void);
+int nubus_device_register(struct nubus_board *board);
+int nubus_driver_register(struct nubus_driver *ndrv);
+void nubus_driver_unregister(struct nubus_driver *ndrv);
+int nubus_proc_show(struct seq_file *m, void *data);
+
+static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
+{
+ dev_set_drvdata(&board->dev, data);
+}
+
+static inline void *nubus_get_drvdata(struct nubus_board *board)
+{
+ return dev_get_drvdata(&board->dev);
+}
+
+/* Returns a pointer to the "standard" slot space. */
+static inline void *nubus_slot_addr(int slot)
+{
+ return (void *)(0xF0000000 | (slot << 24));
+}
+
#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index aea87f0d917b..4112e2bd747f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
-#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
-#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
-
-#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
-#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
-#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
-#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
-#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
+enum {
+ NVME_CMBSZ_SQS = 1 << 0,
+ NVME_CMBSZ_CQS = 1 << 1,
+ NVME_CMBSZ_LISTS = 1 << 2,
+ NVME_CMBSZ_RDS = 1 << 3,
+ NVME_CMBSZ_WDS = 1 << 4,
+
+ NVME_CMBSZ_SZ_SHIFT = 12,
+ NVME_CMBSZ_SZ_MASK = 0xfffff,
+
+ NVME_CMBSZ_SZU_SHIFT = 8,
+ NVME_CMBSZ_SZU_MASK = 0xf,
+};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.
diff --git a/include/linux/of.h b/include/linux/of.h
index d3dea1d1e3a9..173102dafb07 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -544,6 +544,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
+extern int of_cpu_node_to_id(struct device_node *np);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -916,6 +918,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1fe205582111..18a7f03e1182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -31,7 +31,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8,
+ OF_GPIO_TRANSITORY = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
+struct gpmc_onenand_info {
+ bool sync_read;
+ bool sync_write;
+ int burst_len;
+};
+
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
+/**
+ * gpmc_omap_onenand_set_timings - set optimized sync timings.
+ * @cs: Chip Select Region
+ * @freq: Chip frequency
+ * @latency: Burst latency cycle count
+ * @info: Structure describing parameters used
+ *
+ * Sets optimized timings for the @cs region based on @freq and @latency.
+ * Updates the @info structure based on the GPMC settings.
+ */
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info);
+
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
+
+static inline
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OMAP_GPMC */
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3ec44e27aa9d..50c2b8786831 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -46,11 +46,6 @@
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
*
- * PG_highmem pages are not permanently mapped into the kernel virtual address
- * space, they need to be kmapped separately for doing IO on the pages. The
- * struct page (these bits with information) are always mapped into kernel
- * address space...
- *
* PG_hwpoison indicates that a page got corrupted in hardware and contains
* data with incorrect ECC bits that triggered a machine check. Accessing is
* not safe since it may cause another machine check. Don't touch!
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5fb6580f7f23..6dc456ac6136 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,14 +9,14 @@
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-/* 14 pointers + two long's align the pagevec structure to a power of two */
-#define PAGEVEC_SIZE 14
+/* 15 pointers + header align the pagevec structure to a power of two */
+#define PAGEVEC_SIZE 15
struct page;
struct address_space;
struct pagevec {
- unsigned long nr;
+ unsigned char nr;
bool percpu_pvec_drained;
struct page *pages[PAGEVEC_SIZE];
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c9250c8b..0314e0716c30 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1072,6 +1072,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
int pci_set_cacheline_size(struct pci_dev *dev);
#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
+int __must_check pcim_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6658d9ee5257..864d167a1073 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
+ *
+ * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
- /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
- smp_read_barrier_depends();
-
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 73a7bf30fe9a..4f052496cdfd 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 0;
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SMP */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dc82a07cb4fd..5a0c3e53e7c2 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -59,6 +59,7 @@
#define PHY_HAS_INTERRUPT 0x00000001
#define PHY_IS_INTERNAL 0x00000002
+#define PHY_RST_AFTER_CLK_EN 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -468,7 +469,6 @@ struct phy_device {
/* Interrupt and Polling infrastructure */
struct work_struct phy_queue;
struct delayed_work state_queue;
- atomic_t irq_disable;
struct mutex lock;
@@ -497,19 +497,19 @@ struct phy_device {
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
*
- * The drivers must implement config_aneg and read_status. All
- * other functions are optional. Note that none of these
- * functions should be called from interrupt time. The goal is
- * for the bus read/write functions to be able to block when the
- * bus transaction is happening, and be freed up by an interrupt
- * (The MPC85xx has this ability, though it is not currently
- * supported in the driver).
+ * All functions are optional. If config_aneg or read_status
+ * are not implemented, the phy core uses the genphy versions.
+ * Note that none of these functions should be called from
+ * interrupt time. The goal is for the bus read/write functions
+ * to be able to block when the bus transaction is happening,
+ * and be freed up by an interrupt (The MPC85xx has this ability,
+ * though it is not currently supported in the driver).
*/
struct phy_driver {
struct mdio_driver_common mdiodrv;
u32 phy_id;
char *name;
- unsigned int phy_id_mask;
+ u32 phy_id_mask;
u32 features;
u32 flags;
const void *driver_data;
@@ -634,6 +634,9 @@ struct phy_driver {
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ int (*read_page)(struct phy_device *dev);
+ int (*write_page)(struct phy_device *dev, int page);
+
/* Get the size and type of the eeprom contained within a plug-in
* module */
int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
size_t phy_speeds(unsigned int *speeds, size_t size,
unsigned long *mask, size_t maxbit);
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+
/**
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
}
/**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+ return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
+/**
* phy_write - Convenience function for writing a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to write
@@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+ val);
+}
+
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+/**
+ * __phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
+ u16 val)
+{
+ return __phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ */
+static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, val, 0);
+}
+
+/**
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
* @phydev: the phy_device struct
*
@@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
+ * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * negotiation
+ * @mode: one of &enum phy_interface_t
+ *
+ * Returns true if the phy interface mode uses the 16-bit negotiation
+ * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
+ */
+static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
+{
+ return mode == PHY_INTERFACE_MODE_1000BASEX ||
+ mode == PHY_INTERFACE_MODE_2500BASEX;
+}
+
+/**
* phy_interface_is_rgmii - Convenience function for testing if a PHY interface
* is RGMII (all variants)
* @phydev: the phy_device struct
@@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
*/
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set);
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
@@ -840,13 +945,11 @@ int phy_aneg_done(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
+int phy_reset_after_clk_enable(struct phy_device *phydev);
-static inline int phy_read_status(struct phy_device *phydev)
+static inline void phy_device_reset(struct phy_device *phydev, int value)
{
- if (!phydev->drv)
- return -EIO;
-
- return phydev->drv->read_status(phydev);
+ mdio_device_reset(&phydev->mdio, value);
}
#define phydev_err(_phydev, format, args...) \
@@ -889,6 +992,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+ if (!phydev->drv)
+ return -EIO;
+
+ if (phydev->drv->read_status)
+ return phydev->drv->read_status(phydev);
+ else
+ return genphy_read_status(phydev);
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
@@ -898,7 +1013,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
void phy_state_machine(struct work_struct *work);
void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
-void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
void phy_trigger_machine(struct phy_device *phydev, bool sync);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index cf6392de6eb0..ee54453a40a0 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
-static inline int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed)
-{
- return -ENODEV;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index af67edd4ae38..bd137c273d38 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -7,6 +7,7 @@
struct device_node;
struct ethtool_cmd;
+struct fwnode_handle;
struct net_device;
enum {
@@ -20,19 +21,31 @@ enum {
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
- MLO_AN_SGMII, /* Cisco SGMII protocol */
- MLO_AN_8023Z, /* 1000base-X protocol */
+ MLO_AN_INBAND, /* In-band protocol */
};
static inline bool phylink_autoneg_inband(unsigned int mode)
{
- return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+ return mode == MLO_AN_INBAND;
}
+/**
+ * struct phylink_link_state - link state structure
+ * @advertising: ethtool bitmask containing advertised link modes
+ * @lp_advertising: ethtool bitmask containing link partner advertised link
+ * modes
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed, one of the SPEED_* constants.
+ * @duplex: link duplex mode, one of DUPLEX_* constants.
+ * @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @link: true if the link is up.
+ * @an_enabled: true if autonegotiation is enabled/desired.
+ * @an_complete: true if autonegotiation has completed.
+ */
struct phylink_link_state {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
- phy_interface_t interface; /* PHY_INTERFACE_xxx */
+ phy_interface_t interface;
int speed;
int duplex;
int pause;
@@ -41,72 +54,145 @@ struct phylink_link_state {
unsigned int an_complete:1;
};
+/**
+ * struct phylink_mac_ops - MAC operations structure.
+ * @validate: Validate and update the link configuration.
+ * @mac_link_state: Read the current link state from the hardware.
+ * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_an_restart: restart 802.3z BaseX autonegotiation.
+ * @mac_link_down: take the link down.
+ * @mac_link_up: allow the link to come up.
+ *
+ * The individual methods are described more fully below.
+ */
struct phylink_mac_ops {
- /**
- * validate: validate and update the link configuration
- * @ndev: net_device structure associated with MAC
- * @config: configuration to validate
- *
- * Update the %config->supported and %config->advertised masks
- * clearing bits that can not be supported.
- *
- * Note: the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to support it. This is more about
- * clearing unsupported speeds and duplex settings.
- *
- * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on %config->advertised and/or %config->speed.
- */
void (*validate)(struct net_device *ndev, unsigned long *supported,
struct phylink_link_state *state);
-
- /* Read the current link state from the hardware */
- int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
-
- /* Configure the MAC */
- /**
- * mac_config: configure the MAC for the selected mode and state
- * @ndev: net_device structure for the MAC
- * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
- * @state: state structure
- *
- * The action performed depends on the currently selected mode:
- *
- * %MLO_AN_FIXED, %MLO_AN_PHY:
- * set the specified speed, duplex, pause mode, and phy interface
- * mode in the provided @state.
- * %MLO_AN_8023Z:
- * place the link in 1000base-X mode, advertising the parameters
- * given in advertising in @state.
- * %MLO_AN_SGMII:
- * place the link in Cisco SGMII mode - there is no advertisment
- * to make as the PHY communicates the speed and duplex to the
- * MAC over the in-band control word. Configuration of the pause
- * mode is as per MLO_AN_PHY since this is not included.
- */
+ int (*mac_link_state)(struct net_device *ndev,
+ struct phylink_link_state *state);
void (*mac_config)(struct net_device *ndev, unsigned int mode,
const struct phylink_link_state *state);
-
- /**
- * mac_an_restart: restart 802.3z BaseX autonegotiation
- * @ndev: net_device structure for the MAC
- */
void (*mac_an_restart)(struct net_device *ndev);
-
- void (*mac_link_down)(struct net_device *, unsigned int mode);
- void (*mac_link_up)(struct net_device *, unsigned int mode,
- struct phy_device *);
+ void (*mac_link_down)(struct net_device *ndev, unsigned int mode);
+ void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+ struct phy_device *phy);
};
-struct phylink *phylink_create(struct net_device *, struct device_node *,
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * validate - Validate and update the link configuration
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Clear bits in the @supported and @state->advertising masks that
+ * are not supportable by the MAC.
+ *
+ * Note that the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to BaseX mode. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on @state->advertising and/or @state->speed and update
+ * @state->interface accordingly.
+ */
+void validate(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state);
+
+/**
+ * mac_link_state() - Read the current link state from the hardware
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current link state from the MAC, reporting the current
+ * speed in @state->speed, duplex mode in @state->duplex, pause mode
+ * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link
+ * up state in @state->link.
+ */
+int mac_link_state(struct net_device *ndev,
+ struct phylink_link_state *state);
+
+/**
+ * mac_config() - configure the MAC for the selected mode and state
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ * Configure the specified @state->speed, @state->duplex and
+ * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
+ *
+ * %MLO_AN_INBAND:
+ * place the link in an inband negotiation mode (such as 802.3z
+ * 1000base-X or Cisco SGMII mode depending on the @state->interface
+ * mode). In both cases, link state management (whether the link
+ * is up or not) is performed by the MAC, and reported via the
+ * mac_link_state() callback. Changes in link state must be made
+ * by calling phylink_mac_change().
+ *
+ * If in 802.3z mode, the link speed is fixed, dependent on the
+ * @state->interface. Duplex is negotiated, and pause is advertised
+ * according to @state->an_enabled, @state->pause and
+ * @state->advertising flags. Beware of MACs which only support full
+ * duplex at gigabit and higher speeds.
+ *
+ * If in Cisco SGMII mode, the link speed and duplex mode are passed
+ * in the serial bitstream 16-bit configuration word, and the MAC
+ * should be configured to read these bits and acknowledge the
+ * configuration word. Nothing is advertised by the MAC. The MAC is
+ * responsible for reading the configuration word and configuring
+ * itself accordingly.
+ */
+void mac_config(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state);
+
+/**
+ * mac_an_restart() - restart 802.3z BaseX autonegotiation
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ */
+void mac_an_restart(struct net_device *ndev);
+
+/**
+ * mac_link_down() - take the link down
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), force the link down and disable any
+ * Energy Efficient Ethernet MAC configuration.
+ */
+void mac_link_down(struct net_device *ndev, unsigned int mode);
+
+/**
+ * mac_link_up() - allow the link to come up
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ * @phy: any attached phy
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), allow the link to come up. If @phy
+ * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ */
+void mac_link_up(struct net_device *ndev, unsigned int mode,
+ struct phy_device *phy);
+#endif
+
+struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
phy_interface_t iface, const struct phylink_mac_ops *ops);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
-int phylink_of_phy_connect(struct phylink *, struct device_node *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_fixed_state_cb(struct phylink *,
+ void (*cb)(struct net_device *dev,
+ struct phylink_link_state *));
void phylink_mac_change(struct phylink *, bool up);
@@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
int phylink_ethtool_get_module_eeprom(struct phylink *,
struct ethtool_eeprom *, u8 *);
-int phylink_init_eee(struct phylink *, bool);
int phylink_get_eee_err(struct phylink *);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index ec6dadcc1fde..6c0680641108 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -94,6 +94,7 @@
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -122,6 +123,7 @@ enum pin_config_param {
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2006 Nokia Corporation
- * Author: Juha Yrjola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MTD_ONENAND_OMAP2_H
-#define __MTD_ONENAND_OMAP2_H
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define ONENAND_SYNC_READ (1 << 0)
-#define ONENAND_SYNC_READWRITE (1 << 1)
-#define ONENAND_IN_OMAP34XX (1 << 2)
-
-struct omap_onenand_platform_data {
- int cs;
- int gpio_irq;
- struct mtd_partition *parts;
- int nr_parts;
- int (*onenand_setup)(void __iomem *, int *freq_ptr);
- int dma_channel;
- u8 flags;
- u8 regulator_can_sleep;
- u8 skip_initial_unlocking;
-
- /* for passing the partitions */
- struct device_node *of_node;
-};
-#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..773daf7915a3 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 492ed473ba7e..e723b78d8357 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -556,9 +556,10 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
- * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
* SMART_PREPARE: Check the return value of the driver's ->prepare callback.
* SMART_SUSPEND: No need to resume the device from runtime suspend.
+ * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
*
* Setting SMART_PREPARE instructs bus types and PM domains which may want
* system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
* necessary from the driver's perspective. It also may cause them to skip
* invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
* the driver if they decide to leave the device in runtime suspend.
+ *
+ * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
+ * driver prefers the device to be left in suspend after system resume.
*/
-#define DPM_FLAG_NEVER_SKIP BIT(0)
-#define DPM_FLAG_SMART_PREPARE BIT(1)
-#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ unsigned int must_resume:1; /* Owned by the PM core */
+ unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
@@ -766,6 +773,7 @@ extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void dev_pm_skip_next_resume_phases(struct device *dev);
+extern bool dev_pm_may_skip_resume(struct device *dev);
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4c2cba7ec1d4..4238dde0aaf0 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev)
+{
+ dev->power.wakeup_path = true;
+}
+
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && dev->power.should_wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev) {}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d384f12abdd5..04781a753326 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
- unsigned long _key;
+ __poll_t _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p)
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
-static inline unsigned long poll_requested_events(const poll_table *p)
+static inline __poll_t poll_requested_events(const poll_table *p)
{
- return p ? p->_key : ~0UL;
+ return p ? p->_key : ~(__poll_t)0;
}
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
- pt->_key = ~0UL; /* all events enabled */
+ pt->_key = ~(__poll_t)0; /* all events enabled */
}
struct poll_table_entry {
struct file *filp;
- unsigned long key;
+ __poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..3a3bc71017d5 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -68,7 +68,7 @@ struct posix_clock_operations {
int (*open) (struct posix_clock *pc, fmode_t f_mode);
- uint (*poll) (struct posix_clock *pc,
+ __poll_t (*poll) (struct posix_clock *pc,
struct file *file, poll_table *wait);
int (*release) (struct posix_clock *pc);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 672c4f32311e..c85704fcdbd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+static inline clockid_t make_process_cpuclock(const unsigned int pid,
+ const clockid_t clock)
+{
+ return ((~pid) << 3) | clock;
+}
+static inline clockid_t make_thread_cpuclock(const unsigned int tid,
+ const clockid_t clock)
+{
+ return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
+}
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+static inline clockid_t fd_to_clockid(const int fd)
+{
+ return make_process_cpuclock((unsigned int) fd, CLOCKFD);
+}
+
+static inline int clockid_to_fd(const clockid_t clk)
+{
+ return ~(clk >> 3);
+}
#define REQUEUE_PENDING 1
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index b2b7255ec7f5..540595a321a7 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
struct posix_acl_entry {
@@ -24,7 +25,7 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
@@ -41,7 +42,7 @@ static inline struct posix_acl *
posix_acl_dup(struct posix_acl *acl)
{
if (acl)
- atomic_inc(&acl->a_refcount);
+ refcount_inc(&acl->a_refcount);
return acl;
}
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl)
static inline void
posix_acl_release(struct posix_acl *acl)
{
- if (acl && atomic_dec_and_test(&acl->a_refcount))
+ if (acl && refcount_dec_and_test(&acl->a_refcount))
kfree_rcu(acl, a_rcu);
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e6187f524f2c..01fbf1b16258 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -16,6 +16,7 @@ enum bq27xxx_chip {
BQ27520G2, /* bq27520G2 */
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
+ BQ27521, /* bq27521 */
BQ27530, /* bq27530, bq27531 */
BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 2ff18c9840a7..d31cb6215905 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
extern void *ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+ void *private_data);
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/property.h b/include/linux/property.h
index f6189a3ac63c..769d372c1edf 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_next_available_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
#define fwnode_for_each_child_node(fwnode, child) \
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
+#define fwnode_for_each_available_child_node(fwnode, child) \
+ for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
+ child = fwnode_get_next_available_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(
struct device *dev, struct fwnode_handle *child);
@@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+
unsigned int device_get_child_node_count(struct device *dev);
static inline bool device_property_read_bool(struct device *dev,
@@ -206,7 +214,7 @@ struct property_entry {
*/
#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
@@ -224,7 +232,7 @@ struct property_entry {
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(const char *), \
.is_array = true, \
@@ -233,7 +241,7 @@ struct property_entry {
}
#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = sizeof(_type_), \
.is_string = false, \
@@ -250,7 +258,7 @@ struct property_entry {
PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = sizeof(_val_), \
.is_string = true, \
@@ -258,7 +266,7 @@ struct property_entry {
}
#define PROPERTY_ENTRY_BOOL(_name_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
}
@@ -275,10 +283,15 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
+void *device_get_match_data(struct device *dev);
+
int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
+ char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..f724fd8c78e8 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -46,10 +47,11 @@ static inline int psci_dt_init(void) { return 0; }
#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
+bool acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) {return false; }
#endif
#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6866df4f31b5..1883d6137e9b 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -45,9 +45,10 @@ struct ptr_ring {
};
/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). If ring is ever resized, callers must hold
- * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
- * producer_lock, the next call to __ptr_ring_produce may fail.
+ * for example cpu_relax().
+ *
+ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
+ * see e.g. ptr_ring_full.
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
@@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
smp_wmb();
- r->queue[r->producer++] = ptr;
+ WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
@@ -169,26 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
return ret;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
- * If ring is never resized, and if the pointer is merely
- * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
- */
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer_head];
+ return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if the ring is ever resized - see e.g. ptr_ring_empty.
+/*
+ * Test ring empty status without taking any locks.
+ *
+ * NB: This is only safe to call if ring is never resized.
+ *
+ * However, if some other CPU consumes ring entries at the same time, the value
+ * returned is not guaranteed to be correct.
+ *
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * re-test __ptr_ring_empty and/or consume the ring enteries
+ * after the synchronization point.
+ *
+ * Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax().
*/
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
- return !__ptr_ring_peek(r);
+ if (likely(r->size))
+ return !r->queue[READ_ONCE(r->consumer_head)];
+ return true;
}
static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -242,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Fundamentally, what we want to do is update consumer
* index and zero out the entry so producer can reuse it.
* Doing it naively at each consume would be as simple as:
- * r->queue[r->consumer++] = NULL;
- * if (unlikely(r->consumer >= r->size))
- * r->consumer = 0;
+ * consumer = r->consumer;
+ * r->queue[consumer++] = NULL;
+ * if (unlikely(consumer >= r->size))
+ * consumer = 0;
+ * r->consumer = consumer;
* but that is suboptimal when the ring is full as producer is writing
* out new entries in the same cache line. Defer these updates until a
* batch of entries has been consumed.
*/
- int head = r->consumer_head++;
+ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
+ * to work correctly.
+ */
+ int consumer_head = r->consumer_head;
+ int head = consumer_head++;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
* We also do this when we reach end of the ring - not mandatory
* but helps keep the implementation simple.
*/
- if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
- r->consumer_head >= r->size)) {
+ if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
+ consumer_head >= r->size)) {
/* Zero out entries in the reverse order: this way we touch the
* cache line that producer might currently be reading the last;
* producer won't make progress and touch other cache lines
@@ -265,12 +282,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
*/
while (likely(head >= r->consumer_tail))
r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ r->consumer_tail = consumer_head;
}
- if (unlikely(r->consumer_head >= r->size)) {
- r->consumer_head = 0;
+ if (unlikely(consumer_head >= r->size)) {
+ consumer_head = 0;
r->consumer_tail = 0;
}
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, consumer_head);
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -521,7 +540,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
goto done;
}
r->queue[head] = batch[--n];
- r->consumer_tail = r->consumer_head = head;
+ r->consumer_tail = head;
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, head);
}
done:
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2ac2471..2b3b350e07b7 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -32,14 +32,15 @@
#ifndef _COMMON_HSI_H
#define _COMMON_HSI_H
+
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/slab.h>
/* dma_addr_t manip */
-#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \
@@ -47,39 +48,45 @@
(x).lo = DMA_LO_LE((val)); \
} while (0)
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+ HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+ typeof(regpair) __regpair = (regpair); \
+ HILO_64(__regpair.hi, __regpair.lo); })
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
-#define X_FINAL_CLEANUP_AGG_INT 1
+#define X_FINAL_CLEANUP_AGG_INT 1
-#define EVENT_RING_PAGE_SIZE_BYTES 4096
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
-#define NUM_OF_GLOBAL_QUEUES 128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
-#define ISCSI_CDU_TASK_SEG_TYPE 0
-#define FCOE_CDU_TASK_SEG_TYPE 0
-#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
-#define FW_ASSERT_GENERAL_ATTN_IDX 32
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
-#define MAX_PINNED_CCFC 32
+#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
-
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
@@ -102,8 +109,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 20
-#define FW_REVISION_VERSION 0
+#define FW_MINOR_VERSION 33
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -115,10 +122,10 @@
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
@@ -141,29 +148,14 @@
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
-
+#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
-#define LB_TC (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO (8)
-
-#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4 (375e6)
-#define STORM_CLK_FREQ_E4 (1000e6)
-#define CLK25M_CLK_FREQ_E4 (25e6)
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
#define NUM_OF_GTT 19
@@ -172,17 +164,17 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 10
/*****************/
/* CDU CONSTANTS */
/*****************/
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
@@ -201,45 +193,45 @@
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
#define DQ_DEMS_ROCE_CQ_CONS 7
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2 0
-#define DQ_XCM_AGG_VAL_SEL_WORD3 1
-#define DQ_XCM_AGG_VAL_SEL_WORD4 2
-#define DQ_XCM_AGG_VAL_SEL_WORD5 3
-#define DQ_XCM_AGG_VAL_SEL_REG3 4
-#define DQ_XCM_AGG_VAL_SEL_REG4 5
-#define DQ_XCM_AGG_VAL_SEL_REG5 6
-#define DQ_XCM_AGG_VAL_SEL_REG6 7
-
-/* XCM agg val selection */
-#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection (FW) */
+#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
#define DQ_UCM_AGG_VAL_SEL_WORD1 1
#define DQ_UCM_AGG_VAL_SEL_WORD2 2
#define DQ_UCM_AGG_VAL_SEL_WORD3 3
-#define DQ_UCM_AGG_VAL_SEL_REG0 4
-#define DQ_UCM_AGG_VAL_SEL_REG1 5
-#define DQ_UCM_AGG_VAL_SEL_REG2 6
-#define DQ_UCM_AGG_VAL_SEL_REG3 7
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
/* UCM agg val selection (FW) */
#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +255,7 @@
#define DQ_TCM_ROCE_RQ_PROD_CMD \
DQ_TCM_AGG_VAL_SEL_WORD0
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
@@ -273,20 +265,20 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -317,9 +309,9 @@
#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
/* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +319,18 @@
#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE 0x0
-#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
-#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_UCM16_4 0x50
#define DQ_PWM_OFFSET_TCM16_BASE 0x58
#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +339,11 @@
#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define DQ_REGION_SHIFT (12)
+
+#define DQ_REGION_SHIFT (12)
/* DPM */
-#define DQ_DPM_WQE_BUFF_SIZE (320)
+#define DQ_DPM_WQE_BUFF_SIZE (320)
/* Conn type ranges */
#define DQ_CONN_TYPE_RANGE_SHIFT (4)
@@ -359,29 +352,30 @@
/* QM CONSTANTS */
/*****************/
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE 4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
*/
-#define CM_TX_PQ_BASE 0x200
+#define CM_TX_PQ_BASE 0x200
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +394,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB 12
+#define PIS_PER_SB_E4 12
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -432,8 +426,7 @@
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - \
- 1)
+ MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -447,8 +440,7 @@
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - \
- 1)
+ MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -514,129 +506,126 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-#define PXP_BAR0_START_GRC 0x0000
-#define PXP_BAR0_GRC_LENGTH 0x1C00000
-#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
- PXP_BAR0_GRC_LENGTH - 1)
-
-#define PXP_BAR0_START_IGU 0x1C00000
-#define PXP_BAR0_IGU_LENGTH 0x10000
-#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
- PXP_BAR0_IGU_LENGTH - 1)
-
-#define PXP_BAR0_START_TSDM 0x1C80000
-#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
-#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_MSDM 0x1D00000
-#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_USDM 0x1D80000
-#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_XSDM 0x1E00000
-#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_YSDM 0x1E80000
-#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_PSDM 0x1F00000
-#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC 0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
- PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU 0
-#define PXP_VF_BAR0_IGU_LENGTH 0x3000
-#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
- PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ 0x3000
-#define PXP_VF_BAR0_DQ_LENGTH 0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
- + 4)
-#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
-
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
+ PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ + 4)
+#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
+ PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+
/*****************/
/* PRM CONSTANTS */
/*****************/
-#define PRM_DMA_PAD_BYTES_NUM 2
+#define PRM_DMA_PAD_BYTES_NUM 2
+
/*****************/
/* SDMs CONSTANTS */
/*****************/
-#define SDM_OP_GEN_TRIG_NONE 0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
-#define SDM_OP_GEN_TRIG_AGG_INT 2
-#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
@@ -644,26 +633,26 @@
/* Completion types */
/********************/
-#define SDM_COMP_TYPE_NONE 0
-#define SDM_COMP_TYPE_WAKE_THREAD 1
-#define SDM_COMP_TYPE_AGG_INT 2
-#define SDM_COMP_TYPE_CM 3
-#define SDM_COMP_TYPE_LOADER 4
-#define SDM_COMP_TYPE_PXP 5
-#define SDM_COMP_TYPE_INDICATE_ERROR 6
-#define SDM_COMP_TYPE_RELEASE_THREAD 7
-#define SDM_COMP_TYPE_RAM 8
-#define SDM_COMP_TYPE_INC_ORDER_CNT 9
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+#define SDM_COMP_TYPE_PXP 5
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9
/*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
/*****************/
/* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS 1440
/*****************/
/* PRS CONSTANTS */
@@ -671,14 +660,7 @@
#define PRS_GFT_CAM_LINES_NO_MATCH 31
-/* Async data KCQ CQE */
-struct async_data {
- __le32 cid;
- __le16 itid;
- u8 error_code;
- u8 fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
struct coalescing_timeset {
u8 value;
#define COALESCING_TIMESET_TIMESET_MASK 0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
__le16 reserved;
};
+/* ETH Rx producers data */
struct eth_rx_prod_data {
__le16 bd_prod;
__le16 cqe_prod;
};
-struct regpair {
- __le32 lo;
- __le32 hi;
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
-struct vf_pf_channel_eqe_data {
- struct regpair msg_addr;
+struct iscsi_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
};
struct iscsi_eqe_data {
- __le32 cid;
+ __le16 icid;
__le16 conn_id;
+ __le16 reserved;
u8 error_code;
u8 error_pdu_opcode_reserved;
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
-struct rdma_eqe_destroy_qp {
- __le32 cid;
- u8 reserved[4];
-};
-
-union rdma_eqe_data {
- struct regpair async_handle;
- struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
- u8 vf_id;
- u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
- u8 bytes[8];
- struct vf_pf_channel_eqe_data vf_pf_channel;
- struct iscsi_eqe_data iscsi_info;
- union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
- struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
- u8 protocol_id;
- u8 opcode;
- __le16 reserved0;
- __le16 echo;
- u8 fw_return_code;
- u8 flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
-#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
- union event_ring_data data;
-};
-
/* Multi function mode */
enum mf_mode {
ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_IWARP,
- PROTOCOLID_RESERVED5,
+ PROTOCOLID_RESERVED0,
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
- PROTOCOLID_RESERVED6,
+ PROTOCOLID_RESERVED1,
MAX_PROTOCOL_TYPE
};
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
+/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
struct common_queue_zone common;
};
-/* status block structure */
+/* Status block structure */
struct cau_pi_entry {
- u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
-#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+ __le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
-/* status block structure */
+/* Status block structure */
struct cau_sb_entry {
- u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
-#define CAU_SB_ENTRY_STATE0_MASK 0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT 24
-#define CAU_SB_ENTRY_STATE1_MASK 0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT 28
- u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
-#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
-#define CAU_SB_ENTRY_TPH_MASK 0x1
-#define CAU_SB_ENTRY_TPH_SHIFT 31
+ __le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ __le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
};
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
struct core_db_data {
u8 params;
-#define CORE_DB_DATA_DEST_MASK 0x3
-#define CORE_DB_DATA_DEST_SHIFT 0
-#define CORE_DB_DATA_AGG_CMD_MASK 0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT 2
-#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
-#define CORE_DB_DATA_RESERVED_MASK 0x1
-#define CORE_DB_DATA_RESERVED_SHIFT 5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 spq_prod;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
struct regpair addr;
__le16 nbytes;
__le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
-#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
-#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
__le32 reserved2;
};
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK 0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT 2
-#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT 5
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
#define DB_PWM_ADDR_RESERVED0_MASK 0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
#define DB_PWM_ADDR_OFFSET_SHIFT 3
-#define DB_PWM_ADDR_WID_MASK 0x3
-#define DB_PWM_ADDR_WID_SHIFT 10
-#define DB_PWM_ADDR_DPI_MASK 0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
#define DB_PWM_ADDR_RESERVED1_MASK 0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
};
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
struct db_rdma_dpm_data {
__le16 icid;
__le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
- u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
- u32 reserved1;
+ __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
};
/* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
MAX_IGU_SEG_ACCESS
};
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
+ MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
struct parsing_and_err_flags {
__le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+/* Parsing error flags bitmap */
struct parsing_err_flags {
__le16 flags;
#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
};
+/* Pb context */
struct pb_context {
__le32 crc[4];
};
+/* Concrete Function ID */
struct pxp_concrete_fid {
__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_CONCRETE_FID_PORT_MASK 0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT 4
-#define PXP_CONCRETE_FID_PATH_MASK 0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT 6
-#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
+/* Concrete Function ID */
struct pxp_pretend_concrete_fid {
__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
};
+/* Function ID */
union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid;
- __le16 opaque_fid;
+ __le16 opaque_fid;
};
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
struct pxp_pretend_cmd {
- union pxp_pretend_fid fid;
- __le16 control;
-#define PXP_PRETEND_CMD_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT 0
-#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
-#define PXP_PRETEND_CMD_PORT_MASK 0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT 2
-#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
-#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
struct pxp_ptt_entry {
- __le32 offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
-#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
- struct pxp_pretend_cmd pretend;
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
};
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
struct pxp_vf_zone_a_permission {
__le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
};
-/* RSS hash type */
+/* Rdif context */
struct rdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
u8 partial_dif_data[7];
__le16 partial_crc_value;
__le16 partial_checksum_value;
__le32 offset_in_io;
__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
-/* RSS hash type */
-enum rss_hash_type {
- RSS_HASH_TYPE_DEFAULT = 0,
- RSS_HASH_TYPE_IPV4 = 1,
- RSS_HASH_TYPE_TCP_IPV4 = 2,
- RSS_HASH_TYPE_IPV6 = 3,
- RSS_HASH_TYPE_TCP_IPV6 = 4,
- RSS_HASH_TYPE_UDP_IPV4 = 5,
- RSS_HASH_TYPE_UDP_IPV6 = 6,
- MAX_RSS_HASH_TYPE
-};
-
-/* status block structure */
-struct status_block {
- __le16 pi_array[PIS_PER_SB];
+/* Status block structure */
+struct status_block_e4 {
+ __le16 pi_array[PIS_PER_SB_E4];
__le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
+/* Tdif context */
struct tdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
- __le16 partial_crc_valueB;
- __le16 partial_checksum_valueB;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
- __le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
__le16 partial_crc_value_a;
- __le16 partial_checksum_valuea_;
- __le32 offset_in_ioa;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
u8 partial_dif_data_a[8];
u8 partial_dif_data_b[8];
};
+/* Timers context */
struct timers_context {
__le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
enum tunnel_next_protocol {
e_unknown = 0,
e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e368e1..9db02856623b 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -36,150 +36,168 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
-#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 10
+
+#define ETH_HSI_VER_MAJOR 3
+#define ETH_HSI_VER_MINOR 10
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
-#define ETH_CACHE_LINE_SIZE 64
-#define ETH_RX_CQE_GAP 32
-#define ETH_MAX_RAMROD_PER_CON 8
-#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
-#define ETH_TX_MAX_LSO_HDR_NBD 4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES 510
-#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
-#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT 5
-#define ETH_RX_BD_THRESHOLD 12
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_BD_THRESHOLD 12
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS 512
-#define ETH_NUM_VLAN_FILTERS 512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
-#define ETH_MULTICAST_MAC_BINS 256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
-/* ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT 10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
-#define ETH_RSS_KEY_SIZE_REGS 10
-#define ETH_RSS_ENGINE_NUM_K2 207
-#define ETH_RSS_ENGINE_NUM_BB 127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+/* GFS constants */
+#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
+
+/* Destination port mode */
+enum dest_port_mode {
+ DEST_PORT_PHY,
+ DEST_PORT_LOOPBACK,
+ DEST_PORT_PHY_LOOPBACK,
+ DEST_PORT_DROP,
+ MAX_DEST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
struct eth_tx_1st_bd_flags {
u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
-/* The parsing information data fo rthe first tx bd of a given packet. */
+/* The parsing information data fo rthe first tx bd of a given packet */
struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
__le16 bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd;
__le32 reserved;
};
-struct eth_fast_path_cqe_fw_debug {
- __le16 reserved2;
-};
-
-/* tunneling parsing flags */
+/* Tunneling parsing flags */
struct eth_tunnel_parsing_flags {
u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
/* PMD flow control bits */
struct eth_pmd_flow_flags {
u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
struct eth_fast_path_rx_reg_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[9];
- struct eth_fast_path_cqe_fw_debug fw_debug;
- u8 reserved1[3];
+ u8 reserved;
+ __le16 flow_id;
+ u8 reserved1[11];
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type;
u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_end_cqe {
u8 type;
u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_start_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- struct eth_fast_path_cqe_fw_debug fw_debug;
+ __le16 flow_id;
u8 reserved;
struct eth_pmd_flow_flags pmd_flags;
};
@@ -295,24 +313,24 @@ struct eth_rx_bd {
struct regpair addr;
};
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
struct eth_slow_path_rx_cqe {
- u8 type;
- u8 ramrod_cmd_id;
- u8 error_flag;
- u8 reserved[25];
- __le16 echo;
- u8 reserved1;
+ u8 type;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
struct eth_pmd_flow_flags pmd_flags;
};
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
union eth_rx_cqe {
- struct eth_fast_path_rx_reg_cqe fast_path_regular;
- struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
- struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
- struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
- struct eth_slow_path_rx_cqe slow_path;
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+ struct eth_slow_path_rx_cqe slow_path;
};
/* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
MAX_ETH_RX_TUNN_TYPE
};
-/* Aggregation end reason. */
+/* Aggregation end reason. */
enum eth_tpa_end_reason {
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
/* The first tx bd of a given packet */
struct eth_tx_1st_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_1st_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_1st_bd data;
};
/* The second tx bd of a given packet */
struct eth_tx_2nd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_2nd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_2nd_bd data;
};
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
__le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w;
u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
struct eth_tx_3rd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_3rd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_3rd_bd data;
};
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
- __le16 reserved0;
- __le16 bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The common non-special TX BD ring element */
struct eth_tx_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_bd data;
};
union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
/* ETH doorbell data */
struct eth_db_data {
u8 params;
-#define ETH_DB_DATA_DEST_MASK 0x3
-#define ETH_DB_DATA_DEST_SHIFT 0
-#define ETH_DB_DATA_AGG_CMD_MASK 0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT 2
-#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
-#define ETH_DB_DATA_RESERVED_MASK 0x1
-#define ETH_DB_DATA_RESERVED_SHIFT 5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 bd_prod;
};
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e788eea..22077c586853 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -8,217 +8,78 @@
#ifndef __FCOE_COMMON__
#define __FCOE_COMMON__
+
/*********************/
/* FCOE FW CONSTANTS */
/*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
-struct fcoe_abts_pkt {
- __le32 abts_rsp_fc_payload_lo;
- __le16 abts_rsp_rx_id;
- u8 abts_rsp_rctl;
- u8 reserved2;
-};
-
-/* FCoE additional WQE (Sq/XferQ) information */
-union fcoe_additional_info_union {
- __le32 previous_tid;
- __le32 parent_tid;
- __le32 burst_length;
- __le32 seq_rec_updated_offset;
-};
-
-struct fcoe_exp_ro {
- __le32 data_offset;
- __le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
- struct regpair abts_rsp_fc_payload_hi;
- struct fcoe_exp_ro exp_ro;
-};
-
-/* FCoE Ramrod Command IDs */
-enum fcoe_completion_status {
- FCOE_COMPLETION_STATUS_SUCCESS,
- FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
- FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
- MAX_FCOE_COMPLETION_STATUS
-};
-
-struct fc_addr_nw {
- u8 addr_lo;
- u8 addr_mid;
- u8 addr_hi;
-};
-
-/* FCoE connection offload */
-struct fcoe_conn_offload_ramrod_data {
- struct regpair sq_pbl_addr;
- struct regpair sq_curr_page_addr;
- struct regpair sq_next_page_addr;
- struct regpair xferq_pbl_addr;
- struct regpair xferq_curr_page_addr;
- struct regpair xferq_next_page_addr;
- struct regpair respq_pbl_addr;
- struct regpair respq_curr_page_addr;
- struct regpair respq_next_page_addr;
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
- __le16 tx_max_fc_pay_len;
- __le16 e_d_tov_timer_val;
- __le16 rx_max_fc_pay_len;
- __le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
- __le16 physical_q0;
- __le16 rec_rr_tov_timer_val;
- struct fc_addr_nw s_id;
- u8 max_conc_seqs_c3;
- struct fc_addr_nw d_id;
- u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
- __le16 conn_id;
- u8 def_q_idx;
- u8 reserved[5];
-};
-
-/* FCoE terminate connection request */
-struct fcoe_conn_terminate_ramrod_data {
- struct regpair terminate_params_addr;
-};
-
-struct fcoe_slow_sgl_ctx {
- struct regpair base_sgl_addr;
- __le16 curr_sge_off;
- __le16 remainder_num_sges;
- __le16 curr_sgl_index;
- __le16 reserved;
-};
-
-union fcoe_dix_desc_ctx {
- struct fcoe_slow_sgl_ctx dix_sgl;
- struct scsi_sge cached_dix_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+ __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
+ u8 dix_block_size;
+ u8 dst_size;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+ struct protection_info_ctx info;
+ __le32 value;
};
+/* FCP CMD payload */
struct fcoe_fcp_cmd_payload {
__le32 opaque[8];
};
+/* FCP RSP payload */
struct fcoe_fcp_rsp_payload {
__le32 opaque[6];
};
-struct fcoe_fcp_xfer_payload {
- __le32 opaque[3];
-};
-
-/* FCoE firmware function init */
-struct fcoe_init_func_ramrod_data {
- struct scsi_init_func_params func_params;
- struct scsi_init_func_queues q_params;
- __le16 mtu;
- __le16 sq_num_pages_in_pbl;
- __le32 reserved;
-};
-
-/* FCoE: Mode of the connection: Target or Initiator or both */
-enum fcoe_mode_type {
- FCOE_INITIATOR_MODE = 0x0,
- FCOE_TARGET_MODE = 0x1,
- FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
- MAX_FCOE_MODE_TYPE
-};
-
-struct fcoe_rx_stat {
- struct regpair fcoe_rx_byte_cnt;
- struct regpair fcoe_rx_data_pkt_cnt;
- struct regpair fcoe_rx_xfer_pkt_cnt;
- struct regpair fcoe_rx_other_pkt_cnt;
- __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
- __le32 fcoe_silent_drop_pkt_rq_full_cnt;
- __le32 fcoe_silent_drop_pkt_crc_error_cnt;
- __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
- __le32 fcoe_silent_drop_total_pkt_cnt;
- __le32 rsrv;
-};
-
-struct fcoe_stat_ramrod_data {
- struct regpair stat_params_addr;
-};
-
-struct protection_info_ctx {
- __le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
- u8 dix_block_size;
- u8 dst_size;
-};
-
-union protection_info_union_ctx {
- struct protection_info_ctx info;
- __le32 value;
-};
-
+/* FCP RSP payload */
struct fcp_rsp_payload_padded {
struct fcoe_fcp_rsp_payload rsp_payload;
__le32 reserved[2];
};
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+ __le32 opaque[3];
+};
+
+/* FCP RSP payload */
struct fcp_xfer_payload_padded {
struct fcoe_fcp_xfer_payload xfer_payload;
__le32 reserved[5];
};
+/* Task params */
struct fcoe_tx_data_params {
__le32 data_offset;
__le32 offset_in_io;
u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
u8 dif_residual;
__le16 seq_cnt;
__le16 single_sge_saved_offset;
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
__le16 reserved3;
};
+/* Middle path parameters: FC header fields provided by the driver */
struct fcoe_tx_mid_path_params {
__le32 parameter;
u8 r_ctl;
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
__le16 ox_id;
};
+/* Task params */
struct fcoe_tx_params {
struct fcoe_tx_data_params data;
struct fcoe_tx_mid_path_params mid_path;
};
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
union fcoe_tx_info_union_ctx {
struct fcoe_fcp_cmd_payload fcp_cmd_payload;
struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx {
struct fcoe_tx_params tx_params;
};
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+ struct regpair base_sgl_addr;
+ __le16 curr_sge_off;
+ __le16 remainder_num_sges;
+ __le16 curr_sgl_index;
+ __le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+ struct fcoe_slow_sgl_ctx dix_sgl;
+ struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct ystorm_fcoe_task_ag_ctx {
+struct e4_ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct tstorm_fcoe_task_ag_ctx {
+struct e4_tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx {
__le32 data_offset_next;
};
+/* Cached data sges */
+struct fcoe_exp_ro {
+ __le32 data_offset;
+ __le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+ struct regpair abts_rsp_fc_payload_hi;
+ struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
+struct fcoe_abts_pkt {
+ __le32 abts_rsp_fc_payload_lo;
+ __le16 abts_rsp_rx_id;
+ u8 abts_rsp_rctl;
+ u8 reserved2;
+};
+
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
__le16 reserved1;
};
+/* FW read only part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
u8 task_type;
u8 dev_type;
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
__le32 rsrv;
};
+/** The fcoe task storm context of Tstorm */
struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct mstorm_fcoe_task_ag_ctx {
+struct e4_mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
__le32 reg2;
};
+/* The fcoe task storm context of Mstorm */
struct mstorm_fcoe_task_st_ctx {
struct regpair rsp_buf_addr;
__le32 rsrv[2];
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx {
__le32 data_buffer_offset;
__le16 parent_id;
__le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
struct scsi_cached_sges data_desc;
};
-struct ustorm_fcoe_task_ag_ctx {
+struct e4_ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx {
__le32 reg5;
};
-struct fcoe_task_context {
+/* FCoE task context */
+struct e4_fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+ __le32 previous_tid;
+ __le32 parent_tid;
+ __le32 burst_length;
+ __le32 seq_rec_updated_offset;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+ FCOE_COMPLETION_STATUS_SUCCESS,
+ FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+ FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+ MAX_FCOE_COMPLETION_STATUS
+};
+
+/* FC address (SID/DID) network presentation */
+struct fc_addr_nw {
+ u8 addr_lo;
+ u8 addr_mid;
+ u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le16 tx_max_fc_pay_len;
+ __le16 e_d_tov_timer_val;
+ __le16 rx_max_fc_pay_len;
+ __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
+ __le16 physical_q0;
+ __le16 rec_rr_tov_timer_val;
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
+ __le16 conn_id;
+ u8 def_q_idx;
+ u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+ struct regpair terminate_params_addr;
+};
+
+/* FCoE device type */
+enum fcoe_device_type {
+ FCOE_TASK_DEV_TYPE_DISK,
+ FCOE_TASK_DEV_TYPE_TAPE,
+ MAX_FCOE_DEVICE_TYPE
+};
+
+/* Data sgl */
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+ __le16 mtu;
+ __le16 sq_num_pages_in_pbl;
+ __le32 reserved[3];
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+ FCOE_INITIATOR_MODE = 0x0,
+ FCOE_TARGET_MODE = 0x1,
+ FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+ MAX_FCOE_MODE_TYPE
+};
+
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
+struct fcoe_rx_stat {
+ struct regpair fcoe_rx_byte_cnt;
+ struct regpair fcoe_rx_data_pkt_cnt;
+ struct regpair fcoe_rx_xfer_pkt_cnt;
+ struct regpair fcoe_rx_other_pkt_cnt;
+ __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+ __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ __le32 fcoe_silent_drop_total_pkt_cnt;
+ __le32 rsrv;
+};
+
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+ SEND_FCOE_CMD,
+ SEND_FCOE_MIDPATH,
+ SEND_FCOE_ABTS_REQUEST,
+ FCOE_EXCHANGE_CLEANUP,
+ FCOE_SEQUENCE_RECOVERY,
+ SEND_FCOE_XFER_RDY,
+ SEND_FCOE_RSP,
+ SEND_FCOE_RSP_WITH_SENSE_DATA,
+ SEND_FCOE_TARGET_DATA,
+ SEND_FCOE_INITIATOR_DATA,
+ SEND_FCOE_XFER_CONTINUATION_RDY,
+ SEND_FCOE_TARGET_ABTS_RSP,
+ MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
+struct fcoe_stat_ramrod_data {
+ struct regpair stat_params_addr;
+};
+
+/* FCoE task type */
+enum fcoe_task_type {
+ FCOE_TASK_TYPE_WRITE_INITIATOR,
+ FCOE_TASK_TYPE_READ_INITIATOR,
+ FCOE_TASK_TYPE_MIDPATH,
+ FCOE_TASK_TYPE_UNSOLICITED,
+ FCOE_TASK_TYPE_ABTS,
+ FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+ FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+ FCOE_TASK_TYPE_WRITE_TARGET,
+ FCOE_TASK_TYPE_READ_TARGET,
+ FCOE_TASK_TYPE_RSP,
+ FCOE_TASK_TYPE_RSP_SENSE_DATA,
+ FCOE_TASK_TYPE_ABTS_TARGET,
+ FCOE_TASK_TYPE_ENUM_SIZE,
+ MAX_FCOE_TASK_TYPE
+};
+
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
struct fcoe_tx_stat {
struct regpair fcoe_tx_byte_cnt;
struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
struct regpair fcoe_tx_other_pkt_cnt;
};
+/* FCoE SQ/XferQ element */
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x1
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK 0x1
-#define FCOE_WQE_RESERVED_SHIFT 7
-#define FCOE_WQE_NUM_SGES_MASK 0xF
-#define FCOE_WQE_NUM_SGES_SHIFT 8
-#define FCOE_WQE_RESERVED1_MASK 0xF
-#define FCOE_WQE_RESERVED1_SHIFT 12
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
+/* FCoE XFRQ element */
struct xfrqe_prot_flags {
u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
+/* FCoE doorbell data */
struct fcoe_db_data {
u8 params;
-#define FCOE_DB_DATA_DEST_MASK 0x3
-#define FCOE_DB_DATA_DEST_SHIFT 0
-#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
-#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
-#define FCOE_DB_DATA_RESERVED_MASK 0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT 5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK 0x3
+#define FCOE_DB_DATA_DEST_SHIFT 0
+#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
+#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define FCOE_DB_DATA_RESERVED_MASK 0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT 5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
+
#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086cba639..4cc9b37b8d95 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -32,47 +32,48 @@
#ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__
+
/**********************/
/* ISCSI FW CONSTANTS */
/**********************/
/* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU (1500)
+#define ISCSI_DEFAULT_MTU (1500)
/* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE (6)
/* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST (0)
-#define ISCSI_DEFAULT_DATA_DIGEST (0)
-#define ISCSI_DEFAULT_INITIAL_R2T (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST (0)
+#define ISCSI_DEFAULT_DATA_DIGEST (0)
+#define ISCSI_DEFAULT_INITIAL_R2T (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
/* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE 4
-#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
#define ISCSI_OPCODE_NOP_OUT (0)
@@ -84,41 +85,48 @@
#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
-#define ISCSI_OPCODE_DATA_IN (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
-#define ISCSI_OPCODE_R2T (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG (0x32)
-#define ISCSI_OPCODE_REJECT (0x3f)
+#define ISCSI_OPCODE_NOP_IN (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
+#define ISCSI_OPCODE_DATA_IN (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
+#define ISCSI_OPCODE_R2T (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG (0x32)
+#define ISCSI_OPCODE_REJECT (0x3f)
/* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
/* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+ __le16 bd_opaque;
+ __le16 tq_tid;
+};
+/* ISCSI SGL entry */
struct cqe_error_bitmap {
u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
};
union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
struct cqe_error_bitmap error_bits;
};
+/* iSCSI Login Response PDU header */
struct data_hdr {
__le32 data[12];
};
-struct iscsi_async_msg_hdr {
- __le16 reserved0;
- u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
- u8 opcode;
- __le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 all_ones;
- __le32 reserved1;
- __le32 stat_sn;
- __le32 exp_cmd_sn;
- __le32 max_cmd_sn;
- __le16 param1_rsrv;
- u8 async_vcode;
- u8 async_event;
- __le16 param3_rsrv;
- __le16 param2_rsrv;
- __le32 reserved7;
+struct lun_mapper_addr_reserved {
+ struct regpair lun_mapper_addr;
+ u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+ __le32 initial_ref_tag;
+ __le16 application_tag;
+ __le16 application_tag_mask;
+ __le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+ u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+ struct lun_mapper_addr_reserved lun_mapper_address;
+ struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+ __le16 data_sn;
+ __le16 r2t_sn;
};
-struct iscsi_cmd_hdr {
- __le16 reserved1;
- u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_CMD_HDR_READ_MASK 0x1
-#define ISCSI_CMD_HDR_READ_SHIFT 6
-#define ISCSI_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT 7
- u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
-#define ISCSI_CMD_HDR_IMM_MASK 0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT 6
-#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
- __le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 itt;
- __le32 expected_transfer_length;
- __le32 cmd_sn;
- __le32 exp_stat_sn;
- __le32 cdb[4];
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+ u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 exp_r2t_sn;
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
+};
+
+/* iSCSI Common PDU header */
struct iscsi_common_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
-#define ISCSI_COMMON_HDR_IMM_MASK 0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT 6
-#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
+#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
+#define ISCSI_COMMON_HDR_IMM_MASK 0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT 6
+#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
__le32 data[3];
};
-struct iscsi_conn_offload_params {
- struct regpair sq_pbl_addr;
- struct regpair r2tq_pbl_addr;
- struct regpair xhq_pbl_addr;
- struct regpair uhq_pbl_addr;
- __le32 initial_ack;
- __le16 physical_q0;
- __le16 physical_q1;
- u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
- u8 pbl_page_size_log;
- u8 pbe_page_size_log;
- u8 default_cq;
- __le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
- u8 op_code;
- u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
-};
-
-struct iscsi_conn_update_ramrod_params {
- struct iscsi_slow_path_hdr hdr;
- __le16 conn_id;
- __le32 fw_cid;
- u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
- u8 reserved0[3];
- __le32 max_seq_size;
- __le32 max_send_pdu_length;
- __le32 max_recv_pdu_length;
- __le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
+#define ISCSI_CMD_HDR_IMM_MASK 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT 6
+#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
+ __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
__le32 exp_stat_sn;
+ __le32 cdb[4];
};
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
struct iscsi_ext_cdb_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
struct scsi_sge cdb_sge;
};
+/* iSCSI login request PDU header */
struct iscsi_login_req_hdr {
u8 version_min;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
__le32 reserved2[4];
};
+/* iSCSI logout request PDU header */
struct iscsi_logout_req_hdr {
__le16 reserved0;
u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
__le32 reserved4[4];
};
+/* iSCSI Data-out PDU header */
struct iscsi_data_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
__le32 reserved5;
};
+/* iSCSI Data-in PDU header */
struct iscsi_data_in_hdr {
u8 status_rsvd;
u8 reserved1;
u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
-#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
+#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
__le32 residual_count;
};
+/* iSCSI R2T PDU header */
struct iscsi_r2t_hdr {
u8 reserved0[3];
u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
__le32 desired_data_trns_len;
};
+/* iSCSI NOP-out PDU header */
struct iscsi_nop_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
__le32 reserved6;
};
+/* iSCSI NOP-in PDU header */
struct iscsi_nop_in_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
__le32 reserved7;
};
+/* iSCSI Login Response PDU header */
struct iscsi_login_response_hdr {
u8 version_active;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
__le32 reserved4[2];
};
+/* iSCSI Logout Response PDU header */
struct iscsi_logout_response_hdr {
u8 reserved1;
u8 response;
u8 flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 reserved2[2];
__le32 itt;
__le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
__le32 reserved5[1];
};
+/* iSCSI Text Request PDU header */
struct iscsi_text_request_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
__le32 reserved4[4];
};
+/* iSCSI Text Response PDU header */
struct iscsi_text_response_hdr {
__le16 reserved1;
u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI TMF Request PDU header */
struct iscsi_tmf_request_hdr {
__le16 reserved0;
u8 function;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
__le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI Response PDU header */
struct iscsi_response_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
__le32 residual_count;
};
+/* iSCSI Reject PDU header */
struct iscsi_reject_hdr {
u8 reserved4;
u8 hdr_reason;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 all_ones;
__le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
__le32 reserved3[2];
};
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 all_ones;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 param1_rsrv;
+ u8 async_vcode;
+ u8 async_event;
+ __le16 param3_rsrv;
+ __le16 param2_rsrv;
+ __le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
union iscsi_task_hdr {
struct iscsi_common_hdr common;
struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
struct iscsi_async_msg_hdr async_msg;
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+ struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+ union iscsi_task_hdr pdu_hdr;
+};
+
+struct e4_ystorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 TTT;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct e4_mstorm_iscsi_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct e4_ustorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ u8 next_tid_valid;
+ u8 byte3;
+ __le16 word1;
+ __le16 next_tid;
+ __le16 word3;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair sense_db;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct iscsi_reg1 {
+ __le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
+};
+
+struct tqe_opaque {
+ __le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair lun;
+ struct iscsi_reg1 reg1;
+ u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct iscsi_dif_flags dif_flags;
+ __le16 reserved3;
+ struct tqe_opaque tqe_opaque_list;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
+ u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
+ u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct e4_iscsi_task_context {
+ struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+ struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct tdif_task_context tdif_context;
+ struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+ struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le32 initial_ack;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
+ u8 pbl_page_size_log;
+ u8 pbe_page_size_log;
+ u8 default_cq;
+ __le32 stat_sn;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+ struct regpair iscsi_tcp_tx_packets_cnt;
+ struct regpair iscsi_tcp_tx_bytes_cnt;
+ struct regpair iscsi_tcp_tx_rxmit_cnt;
+ struct regpair iscsi_tcp_rx_packets_cnt;
+ struct regpair iscsi_tcp_rx_bytes_cnt;
+ struct regpair iscsi_tcp_rx_dup_ack_cnt;
+ __le32 iscsi_tcp_rx_chksum_err_cnt;
+ __le32 reserved;
+};
+
+/* spe message header */
+struct iscsi_slow_path_hdr {
+ u8 op_code;
+ u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
+};
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
+ u8 reserved0[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 exp_stat_sn;
+ union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
struct iscsi_cqe_common {
__le16 conn_id;
u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
- __le32 reserved1[1];
+ __le32 data_truncated_bytes;
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
- struct regpair rqe_opaque;
+ __le16 rqe_opaque;
+ __le16 reserved2[3];
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
+/* iSCSI CQE type */
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
MAX_ISCSI_CQES_TYPE
};
+/* iSCSI CQE type */
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-
+/* iscsi debug modes */
struct iscsi_debug_modes {
u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
-};
-
-struct iscsi_dif_flags {
- u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
-};
-
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
+};
+
+/* iSCSI kernel completion queue IDs */
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+ ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
MAX_ISCSI_EQE_OPCODE
};
+/* iSCSI EQE and CQE completion status */
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
MAX_ISCSI_ERROR_TYPES
};
-
+/* iSCSI Ramrod Command IDs */
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+ ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
MAX_ISCSI_RAMROD_CMD_ID
};
-struct iscsi_reg1 {
- __le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK 0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
- __le16 data_sn;
- __le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
u8 reserved0[2];
};
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
struct tcp_offload_params tcp;
};
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
struct tcp_offload_params_opt2 tcp;
};
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 reset_stats;
+ u8 reserved0[7];
+ struct regpair stats_cnts_addr;
+};
+
+/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
struct regpair query_params_addr;
};
+/* iSCSI firmware function destroy parameters */
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
+/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
- u8 ooo_enable;
+ u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
- __le32 reserved3;
- __le32 reserved4;
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
-struct ystorm_iscsi_task_state {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 exp_r2t_sn;
- __le32 buffer_offset;
- union iscsi_seq_num seq_num;
- struct iscsi_dif_flags dif_flags;
- u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
- __le32 fast_rxmit_sge_offset;
- __le32 scan_start_buffer_offset;
- __le32 fast_rxmit_buffer_offset;
- u8 scan_start_sgl_index;
- u8 fast_rxmit_sgl_index;
- __le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
- struct ystorm_iscsi_task_state state;
- struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
- union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 word0;
- u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 TTT;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
- u8 cdu_validation;
- u8 byte1;
- __le16 task_cid;
- u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
- u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 reg0;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 state;
- __le16 icid;
- u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
- u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
- u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 rcv_cont_len;
- __le32 exp_cont_len;
- __le32 total_data_acked;
- __le32 exp_data_acked;
- u8 next_tid_valid;
- u8 byte3;
- __le16 word1;
- __le16 next_tid;
- __le16 word3;
- __le32 hdr_residual_count;
- __le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 rem_task_size;
- __le32 data_buffer_offset;
- u8 task_type;
- struct iscsi_dif_flags dif_flags;
- u8 reserved0[2];
- struct regpair sense_db;
- __le32 expected_itt;
- __le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
- __le32 rem_rcv_len;
- __le32 exp_data_transfer_len;
- __le32 exp_data_sn;
- struct regpair lun;
- struct iscsi_reg1 reg1;
- u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- struct iscsi_dif_flags dif_flags;
- __le16 reserved3;
- __le32 reserved4;
- __le32 reserved5;
- __le32 reserved6;
- __le32 reserved7;
- u8 task_type;
- u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
- u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
- u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
- struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
- struct regpair ystorm_ag_padding[2];
- struct tdif_task_context tdif_context;
- struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
- struct regpair mstorm_ag_padding[2];
- struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
- struct mstorm_iscsi_task_st_ctx mstorm_st_context;
- struct ustorm_iscsi_task_st_ctx ustorm_st_context;
- struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
enum iscsi_task_type {
ISCSI_TASK_TYPE_INITIATOR_WRITE,
ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
ISCSI_TASK_TYPE_TARGET_READ,
ISCSI_TASK_TYPE_TARGET_RESPONSE,
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+ ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
MAX_ISCSI_TASK_TYPE
};
+/* iSCSI DesiredDataTransferLength/ttt union */
union iscsi_ttt_txlen_union {
__le32 desired_tx_len;
__le32 ttt;
};
+/* iSCSI uHQ element */
struct iscsi_uhqe {
__le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
-#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
+#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
__le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
-#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-
+/* iSCSI WQ element */
struct iscsi_wqe {
__le16 task_id;
u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK 0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_SGES_MASK 0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK 0x1
-#define ISCSI_WQE_RESPONSE_SHIFT 7
+#define ISCSI_WQE_WQE_TYPE_MASK 0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT 0
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
+#define ISCSI_WQE_RESPONSE_MASK 0x1
+#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
__le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
+/* iSCSI wqe type */
enum iscsi_wqe_type {
ISCSI_WQE_TYPE_NORMAL,
ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
MAX_ISCSI_WQE_TYPE
};
+/* iSCSI xHQ element */
struct iscsi_xhqe {
union iscsi_ttt_txlen_union ttt_or_txlen;
__le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 0
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK 0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT 2
-#define ISCSI_XHQE_RESERVED0_MASK 0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT 6
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
union iscsi_seq_num seq_num;
__le16 reserved1;
};
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+ struct regpair iscsi_rx_dup_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
};
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
struct tstorm_iscsi_stats_drv {
struct regpair iscsi_rx_bytes_cnt;
struct regpair iscsi_rx_packet_cnt;
struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+ struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_rx_tcp_pkt_cnt;
+ struct regpair iscsi_rx_pure_ack_cnt;
__le32 iscsi_cmdq_threshold_cnt;
__le32 iscsi_rq_threshold_cnt;
__le32 iscsi_immq_threshold_cnt;
};
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
struct ustorm_iscsi_stats_drv {
struct regpair iscsi_rx_data_pdu_cnt;
struct regpair iscsi_rx_r2t_pdu_cnt;
struct regpair iscsi_rx_total_pdu_cnt;
};
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
struct xstorm_iscsi_stats_drv {
struct regpair iscsi_tx_go_to_slow_start_event_cnt;
struct regpair iscsi_tx_fast_retransmit_event_cnt;
+ struct regpair iscsi_tx_pure_ack_cnt;
+ struct regpair iscsi_tx_delayed_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_data_pdu_cnt;
struct regpair iscsi_tx_r2t_pdu_cnt;
struct regpair iscsi_tx_total_pdu_cnt;
+ struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct tstorm_iscsi_task_ag_ctx {
+struct e4_tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
__le32 reg1;
__le32 reg2;
};
+
+/* iSCSI doorbell data */
struct iscsi_db_data {
u8 params;
-#define ISCSI_DB_DATA_DEST_MASK 0x3
-#define ISCSI_DB_DATA_DEST_SHIFT 0
-#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
-#define ISCSI_DB_DATA_RESERVED_MASK 0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT 5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1cfae90..c6cfd39cd910 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -29,9 +29,12 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
+
#include <linux/qed/rdma_common.h>
+
/************************/
/* IWARP FW CONSTANTS */
/************************/
@@ -40,14 +43,14 @@
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
-#define IWARP_MAX_QPS (64 * 1024)
+#define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d60de4a39810..147d08ccf813 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
void *p_handle;
};
+enum qed_filter_config_mode {
+ QED_FILTER_CONFIG_MODE_DISABLE,
+ QED_FILTER_CONFIG_MODE_5_TUPLE,
+ QED_FILTER_CONFIG_MODE_L4_PORT,
+ QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+ /* Physically mapped address containing header of buffer to be used
+ * as filter.
+ */
+ dma_addr_t addr;
+
+ /* Length of header in bytes */
+ u16 length;
+
+ /* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+ u16 qid;
+
+ /* Identifier can either be according to vport-id or vfid */
+ bool b_is_vf;
+ u8 vport_id;
+ u8 vf_id;
+
+ /* true iff this filter is to be added. Else to be removed */
+ bool b_is_add;
+};
+
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -316,13 +345,12 @@ struct qed_eth_ops {
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
- int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter);
+ int (*ntuple_filter_config)(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params);
int (*configure_arfs_searcher)(struct qed_dev *cdev,
- bool en_searcher);
+ enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
};
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cc646ca97974..15e398c7230e 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
- u64 bdq_pbl_base_addr[2];
- u32 max_cwnd;
+ u64 bdq_pbl_base_addr[3];
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
- u16 dup_ack_threshold;
u16 tx_sws_timer;
- u16 min_rto;
- u16 min_rto_rt;
- u16 max_rto;
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
/* The following parameters are used during protocol-init */
u16 half_way_close_timeout;
- u16 bdq_xoff_threshold[2];
- u16 bdq_xon_threshold[2];
+ u16 bdq_xoff_threshold[3];
+ u16 bdq_xon_threshold[3];
u16 cmdq_xoff_threshold;
u16 cmdq_xon_threshold;
u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
u8 gl_cmd_pi;
u8 debug_mode;
u8 ll2_ooo_queue_id;
- u8 ooo_enable;
u8 is_target;
- u8 bdq_pbl_num_entries[2];
+ u8 is_soc_en;
+ u8 soc_num_of_blocks_log;
+ u8 bdq_pbl_num_entries[3];
};
struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block *sb_virt;
- dma_addr_t sb_phys;
- u32 sb_ack; /* Last given ack */
- u16 igu_sb_id;
- void __iomem *igu_addr;
- u8 flags;
-#define QED_SB_INFO_INIT 0x1
-#define QED_SB_INFO_SETUP 0x2
+ struct status_block_e4 *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void __iomem *igu_addr;
+ u8 flags;
+#define QED_SB_INFO_INIT 0x1
+#define QED_SB_INFO_SETUP 0x2
- struct qed_dev *cdev;
+ struct qed_dev *cdev;
};
enum qed_dev_type {
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_PROD_INDEX_MASK;
+ STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606a74c8..d0df1bec5357 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index e755954d85fd..266c1fb45387 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
u32 opaque_data_1;
/* GSI only */
- u32 gid_dst[4];
+ u32 src_qp;
u16 qp_id;
union {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050f469c..c1a446ebe362 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -32,28 +32,29 @@
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
+
/************************/
/* RDMA FW CONSTANTS */
/************************/
-#define RDMA_RESERVED_LKEY (0)
-#define RDMA_RING_PAGE_SIZE (0x1000)
+#define RDMA_RESERVED_LKEY (0)
+#define RDMA_RING_PAGE_SIZE (0x1000)
-#define RDMA_MAX_SGE_PER_SQ_WQE (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE (4)
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
-#define RDMA_MAX_CQS (64 * 1024)
-#define RDMA_MAX_TIDS (128 * 1024 - 1)
-#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_CQS (64 * 1024)
+#define RDMA_MAX_TIDS (128 * 1024 - 1)
+#define RDMA_MAX_PDS (64 * 1024)
-#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e45977..e15e0da71240 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -33,13 +33,18 @@
#ifndef __ROCE_COMMON__
#define __ROCE_COMMON__
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
-#define ROCE_MAX_QPS (32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS (64)
-#define ROCE_DCQCN_RP_MAX_QPS (64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+#define ROCE_MAX_QPS (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS (64)
+#define ROCE_DCQCN_RP_MAX_QPS (64)
+
+/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a096b6..505c0b48a761 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -33,43 +33,77 @@
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
-
-#define BDQ_ID_RQ (0)
-#define BDQ_ID_IMM_DATA (1)
-#define BDQ_NUM_IDS (2)
-
-#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
+
+#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ (0)
+#define BDQ_ID_IMM_DATA (1)
+#define BDQ_ID_TQ (2)
+#define BDQ_NUM_IDS (3)
+
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
+#define SCSI_OPCODE_READ_10 (0x28)
+#define SCSI_OPCODE_WRITE_6 (0x0A)
+#define SCSI_OPCODE_WRITE_10 (0x2A)
+#define SCSI_OPCODE_WRITE_12 (0xAA)
+#define SCSI_OPCODE_WRITE_16 (0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+ __le16 reserved_zero[3];
+ __le16 opaque;
+};
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+ struct regpair fcoe_opaque;
+ struct iscsi_drv_opaque iscsi_opaque;
+};
+/* SCSI buffer descriptor */
struct scsi_bd {
struct regpair address;
- struct regpair opaque;
+ union scsi_opaque opaque;
};
+/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data {
__le16 external_producer;
__le16 reserved0[3];
};
+/* SCSI SGE entry */
struct scsi_sge {
struct regpair sge_addr;
__le32 sge_len;
__le32 reserved;
};
+/* Cached SGEs section */
struct scsi_cached_sges {
struct scsi_sge sge[4];
};
+/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
__le32 reserved1;
};
+/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
u8 reserved2[12];
};
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues {
struct regpair glbl_q_params_addr;
__le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
__le16 cmdq_num_entries;
u8 bdq_resource_id;
u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
+ __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
u8 num_queues;
u8 queue_relative_offset;
u8 cq_sb_pi;
u8 cmdq_sb_pi;
- __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
- __le16 reserved0;
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+ u8 reserved1;
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
- __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xoff_threshold;
+ __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xon_threshold;
- __le32 reserved1;
};
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
+/* SCSI SGL types */
enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE
};
+/* SCSI SGL parameters */
struct scsi_sgl_params {
struct regpair sgl_addr;
__le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
u8 reserved;
};
+/* SCSI terminate connection params */
struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count;
__le16 cmdq_count;
u8 reserved[4];
};
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+ __le16 itid;
+};
+
#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43c3e1f..4a4845193539 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -33,8 +33,13 @@
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+/* OOO opaque data received from LL2 */
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
u8 ooo_isle;
};
+/* tcp connect mode enum */
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE
};
+/* tcp function init parameters */
struct tcp_init_params {
__le32 two_msl_timer;
__le16 tx_sws_timer;
- u8 maxfinrt;
+ u8 max_fin_rt;
u8 reserved[9];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_ip_version {
TCP_IPV4,
TCP_IPV6,
MAX_TCP_IP_VERSION
};
+/* tcp offload parameters */
struct tcp_offload_params {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
u8 ip_version;
+ u8 reserved0[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
u8 rcv_wnd_scale;
u8 connect_mode;
__le16 srtt;
- __le32 cwnd;
__le32 ss_thresh;
- __le16 reserved1;
+ __le32 rcv_wnd;
+ __le32 cwnd;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
+ __le16 reserved1;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 initial_rcv_wnd;
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd;
- __le32 rcv_wnd;
__le32 snd_wl1;
__le32 ts_recent;
__le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
u8 rt_cnt;
__le16 rtt_var;
__le16 fw_internal;
- __le32 ka_timeout;
- __le32 ka_interval;
- __le32 max_rt_time;
- __le32 initial_rcv_wnd;
u8 snd_wnd_scale;
u8 ack_frequency;
__le16 da_timeout_value;
- __le32 reserved3[2];
+ __le32 reserved3;
};
+/* tcp offload parameters */
struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
u8 ip_version;
+ u8 reserved1[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
__le16 syn_ip_payload_length;
__le32 syn_phy_addr_lo;
__le32 syn_phy_addr_hi;
- __le32 reserved1[22];
+ __le32 cwnd;
+ u8 ka_max_probe_cnt;
+ u8 reserved2[3];
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 reserved3[16];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT
};
+/* tcp init parameters */
struct tcp_update_params {
__le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
u8 reserved1[7];
};
+/* toe upload parameters */
struct tcp_upload_params {
__le32 rcv_next;
__le32 snd_una;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a6ddc42f87a5..043d04784675 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
#define cond_resched_rcu_qs() \
do { \
if (!cond_resched()) \
- rcu_note_voluntary_context_switch(current); \
+ rcu_note_voluntary_context_switch_lite(current); \
} while (0)
/*
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. Although rcu_access_pointer() may also be used in cases where
- * update-side locks prevent the value of the pointer from changing, you
- * should instead use rcu_dereference_protected() for this use case.
+ * lockdep checks for being in an RCU read-side critical section. This is
+ * useful when the value of this pointer is accessed, but the pointer is
+ * not dereferenced, for example, when testing an RCU-protected pointer
+ * against NULL. Although rcu_access_pointer() may also be used in cases
+ * where update-side locks prevent the value of the pointer from changing,
+ * you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(). This
- * is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does *not*
- * prevent the compiler from repeating this reference or combining it
- * with other references, so it should not be used without protection
- * of appropriate locks.
+ * the READ_ONCE(). This is useful in cases where update-side locks
+ * prevent the value of the pointer from changing. Please note that this
+ * primitive does *not* prevent the compiler from repeating this reference
+ * or combining it with other references, so it should not be used without
+ * protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
void exit_rcu(void);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e8286585e149..4193c41e383a 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
/**
- * refcount_t - variant of atomic_t specialized for reference counts
+ * struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at UINT_MAX and will not move once
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b2207737a159..6a3aeba40e9e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -31,6 +31,7 @@ struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
+struct sdw_slave;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -265,6 +266,9 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not be be accessed from multiple threads.
+ * Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -297,7 +301,10 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
- * empty the regmap_bus default masks are used.
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
* @use_single_rw: If set, converts the bulk read and write operations into
* a series of single read and write operations. This is useful
* for device that does not support bulk read and write.
@@ -318,6 +325,7 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
@@ -334,6 +342,8 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
@@ -356,6 +366,7 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
+ bool zero_flag_mask;
bool use_single_rw;
bool can_multi_write;
@@ -366,6 +377,7 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+ bool use_hwlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
};
@@ -529,6 +541,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -566,6 +582,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -728,6 +748,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -857,6 +891,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..4c00486b7a78 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -214,6 +214,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+ int (*resume_early)(struct regulator_dev *rdev);
+
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..93a04893c739 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/*
+ * operations in suspend mode
+ * DO_NOTHING_IN_SUSPEND - the default value
+ * DISABLE_IN_SUSPEND - turn off regulator in suspend states
+ * ENABLE_IN_SUSPEND - keep regulator on in suspend states
+ */
+#define DO_NOTHING_IN_SUSPEND (-1)
+#define DISABLE_IN_SUSPEND 0
+#define ENABLE_IN_SUSPEND 1
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
- * @uV: Operating voltage during suspend.
+ * @uV: Default operating voltage during suspend, it can be adjusted
+ * among <min_uV, max_uV>.
+ * @min_uV: Minimum suspend voltage may be set.
+ * @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
- * @enabled: Enabled during suspend.
- * @disabled: Disabled during suspend.
+ * @enabled: operations during suspend.
+ * - DO_NOTHING_IN_SUSPEND
+ * - DISABLE_IN_SUSPEND
+ * - ENABLE_IN_SUSPEND
+ * @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
- int uV; /* suspend voltage */
- unsigned int mode; /* suspend regulator operating mode */
- int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disabled in this suspend state */
+ int uV;
+ int min_uV;
+ int max_uV;
+ unsigned int mode;
+ int enabled;
+ bool changeable;
};
/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+#endif
+
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
-#endif
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 361c08e35dbc..c9df2527e0cd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -207,6 +207,7 @@ struct rhashtable_iter {
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
+ bool end_of_table;
};
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
}
-static inline unsigned int rht_key_hashfn(
- struct rhashtable *ht, const struct bucket_table *tbl,
- const void *key, const struct rhashtable_params params)
+static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+ const void *key, const struct rhashtable_params params,
+ unsigned int hash_rnd)
{
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
- hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
else if (params.key_len) {
unsigned int key_len = params.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else if (key_len & (sizeof(u32) - 1))
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
else
- hash = jhash2(key, key_len / sizeof(u32),
- tbl->hash_rnd);
+ hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
} else {
unsigned int key_len = ht->p.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
}
+ return hash;
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
+
return rht_bucket_index(tbl, hash);
}
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+
+static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+ (void)rhashtable_walk_start_check(iter);
+}
+
void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 289e4d54e3e0..7d9eb39fa76a 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
})
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..ca07366c4c33 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
#else
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2032ce2eb20b..1fdcde96eb65 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
- gfp_t flags, int *new_nsid);
+ gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
- gfp_t flags, int *new_nsid);
+ gfp_t flags, int *new_nsid,
+ int new_ifindex);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
gfp_t flags);
@@ -70,8 +71,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds RTNL.
+ * the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
@@ -97,13 +97,9 @@ void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() do { \
- if (unlikely(!rtnl_is_locked())) { \
- printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
- __FILE__, __LINE__); \
- dump_stack(); \
- } \
-} while(0)
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index c3d3f04d8cc6..478acf6efac6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
+#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -203,6 +203,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
+#define REG_SD_STOP_SDCLK_CFG 0xFDB8
+#define SD30_CLK_STOP_CFG_EN 0x04
+#define SD30_CLK_STOP_CFG1 0x02
+#define SD30_CLK_STOP_CFG0 0x01
+#define REG_PRE_RW_MODE 0xFD70
+#define EN_INFINITE_MODE 0x01
#define SRCTL 0xFC13
@@ -434,6 +442,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
+#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
+#define ALL_POWER_DOWN 0x03
+#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
+#define LED_SHINE_MASK 0x08
+#define LED_SHINE_EN 0x08
+#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
+#define EFUSE_CTL 0xFC30
+#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
+#define EFUSE_DATAL 0xFC34
+#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
+#define U_AUTO_DMA_EN_MASK 0x20
+#define U_AUTO_DMA_DISABLE 0x00
+#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
#define LTR_LATENCY_MODE_HW 0
#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
+#define OBFF_EN_MASK 0x03
+#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
+#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
+#define AUX_CLK_ACTIVE_SEL_MASK 0x01
+#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
+#define IC_VERSION_MASK 0x0F
+#define REG_VREF 0xFE97
+#define PWD_SUSPND_EN 0x10
+#define RTS5260_DMA_RST_CTL_0 0xFEBF
+#define RTS5260_DMA_RST 0x80
+#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
+#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
#define FORCE_CLKREQ_DELINK_MASK BIT(7)
#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
+#define DV331812_MASK 0x70
+#define DV331812_33 0x70
+#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
+#define DV331812_VDD1 0x04
+#define DV331812_POWERON 0x08
+#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
+/*RTS5260*/
+#define RTS5260_DVCC_TUNE_MASK 0x70
+#define RTS5260_DVCC_33 0x70
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
+/*RTS5260*/
+#define LDO_POW_SDVDD1_MASK 0x08
+#define LDO_POW_SDVDD1_ON 0x08
+#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5260_AUTOLOAD_CFG4 0xFF7F
+#define RTS5260_MIMO_DISABLE 0x8A
+
+#define RTS5260_REG_GPIO_CTL0 0xFC1A
+#define RTS5260_REG_GPIO_MASK 0x01
+#define RTS5260_REG_GPIO_ON 0x01
+#define RTS5260_REG_GPIO_OFF 0x00
+
+#define PWR_GLOBAL_CTRL 0xF200
+#define PCIE_L1_2_EN 0x0C
+#define PCIE_L1_1_EN 0x0A
+#define PCIE_L1_0_EN 0x09
+#define PWR_FE_CTL 0xF201
+#define PCIE_L1_2_PD_FE_EN 0x0C
+#define PCIE_L1_1_PD_FE_EN 0x0A
+#define PCIE_L1_0_PD_FE_EN 0x09
+#define CFG_PCIE_APHY_OFF_0 0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
+#define CFG_PCIE_APHY_OFF_1 0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
+#define CFG_PCIE_APHY_OFF_2 0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
+#define CFG_PCIE_APHY_OFF_3 0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
+#define CFG_L1_0_SYS_RET_VALUE 0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
+#define CFG_LP_FPWM_VALUE 0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
+#define PWC_CDR 0xF253
+#define PWC_CDR_DEFAULT 0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define SDVIO_DETECT_EN (1 << 7)
+#define SDVIO_OCP_INT_EN (1 << 6)
+#define SDVIO_OCP_INT_CLR (1 << 5)
+#define SDVIO_OC_CLR (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define SDVIO_OC_NOW (1 << 6)
+#define SDVIO_OC_EVER (1 << 5)
+
+#define REG_OCPCTL 0xFD6A
+#define REG_OCPSTAT 0xFD6E
+#define REG_OCPGLITCH 0xFD6C
+#define REG_OCPPARA1 0xFD6B
+#define REG_OCPPARA2 0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL 0xFD89
+#define DV3318_OCP_TIME_MASK 0xF0
+#define DV3318_DETECT_EN 0x08
+#define DV3318_OCP_INT_EN 0x04
+#define DV3318_OCP_INT_CLR 0x02
+#define DV3318_OCP_CLR 0x01
+
+#define REG_DV3318_OCPSTAT 0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
+#define DV3318_OCP_DETECT 0x08
+#define DV3318_OCP_NOW 0x04
+#define DV3318_OCP_EVER 0x02
+
+#define SD_OCP_GLITCH_MASK 0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60 0x00
+#define SDVIO_OCP_TIME_100 0x10
+#define SDVIO_OCP_TIME_200 0x20
+#define SDVIO_OCP_TIME_400 0x30
+#define SDVIO_OCP_TIME_600 0x40
+#define SDVIO_OCP_TIME_800 0x50
+#define SDVIO_OCP_TIME_1100 0x60
+#define SDVIO_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190 0x00
+#define SDVIO_OCP_THD_250 0x10
+#define SDVIO_OCP_THD_320 0x20
+#define SDVIO_OCP_THD_380 0x30
+#define SDVIO_OCP_THD_440 0x40
+#define SDVIO_OCP_THD_500 0x50
+#define SDVIO_OCP_THD_570 0x60
+#define SDVIO_OCP_THD_630 0x70
+#define SDVIO_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define SDVIO_OCP_GLITCH_MASK 0xF0
+#define SDVIO_OCP_GLITCH_NONE 0x00
+#define SDVIO_OCP_GLITCH_50U 0x10
+#define SDVIO_OCP_GLITCH_100U 0x20
+#define SDVIO_OCP_GLITCH_200U 0x30
+#define SDVIO_OCP_GLITCH_600U 0x40
+#define SDVIO_OCP_GLITCH_800U 0x50
+#define SDVIO_OCP_GLITCH_1M 0x60
+#define SDVIO_OCP_GLITCH_2M 0x70
+#define SDVIO_OCP_GLITCH_3M 0x80
+#define SDVIO_OCP_GLITCH_4M 0x90
+#define SDVIO_OCP_GLIVCH_5M 0xA0
+#define SDVIO_OCP_GLITCH_6M 0xB0
+#define SDVIO_OCP_GLITCH_7M 0xC0
+#define SDVIO_OCP_GLITCH_8M 0xD0
+#define SDVIO_OCP_GLITCH_9M 0xE0
+#define SDVIO_OCP_GLITCH_10M 0xF0
+
+#define SD_OCP_GLITCH_MASK 0x0F
+#define SD_OCP_GLITCH_NONE 0x00
+#define SD_OCP_GLITCH_50U 0x01
+#define SD_OCP_GLITCH_100U 0x02
+#define SD_OCP_GLITCH_200U 0x03
+#define SD_OCP_GLITCH_600U 0x04
+#define SD_OCP_GLITCH_800U 0x05
+#define SD_OCP_GLITCH_1M 0x06
+#define SD_OCP_GLITCH_2M 0x07
+#define SD_OCP_GLITCH_3M 0x08
+#define SD_OCP_GLITCH_4M 0x09
+#define SD_OCP_GLIVCH_5M 0x0A
+#define SD_OCP_GLITCH_6M 0x0B
+#define SD_OCP_GLITCH_7M 0x0C
+#define SD_OCP_GLITCH_8M 0x0D
+#define SD_OCP_GLITCH_9M 0x0E
+#define SD_OCP_GLITCH_10M 0x0F
+
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
#define PCR_ASPM_SETTING_REG1 0x160
#define PCR_ASPM_SETTING_REG2 0x168
+#define PCR_ASPM_SETTING_5260 0x178
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
void (*full_on)(struct rtsx_pcr *pcr);
void (*power_saving)(struct rtsx_pcr *pcr);
+ void (*enable_ocp)(struct rtsx_pcr *pcr);
+ void (*disable_ocp)(struct rtsx_pcr *pcr);
+ void (*init_ocp)(struct rtsx_pcr *pcr);
+ void (*process_ocp)(struct rtsx_pcr *pcr);
+ int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+ void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
*/
struct rtsx_cr_option {
u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
+ bool ocp_en;
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+ u32 interrupt_en;
+ u8 ocp_glitch;
};
#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
unsigned int id;
int pcie_cap;
struct rtsx_cr_option option;
+ struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
+ u8 ocp_stat;
+ u8 ocp_stat2;
};
#define PID_524A 0x524A
-#define PID_5249 0x5249
-#define PID_5250 0x5250
+#define PID_5249 0x5249
+#define PID_5250 0x5250
#define PID_525A 0x525A
+#define PID_5260 0x5260
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..c446e4fd6b5c 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b7c83254c566..22b2131bcdcd 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask);
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2588263a989..166144c04ef6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -472,11 +472,15 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
+ *
+ * @dl_overrun tells if the task asked to be informed about runtime
+ * overruns.
*/
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
+ unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1427,6 +1431,7 @@ extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
+extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1446,12 +1451,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ struct task_struct task;
+#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d1ad3d825561..0b55834efd46 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -12,8 +12,6 @@
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
-#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
-
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3d49b91b674d..bd422561a75e 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -11,7 +11,7 @@
/*
* Routines for handling mm_structs
*/
-extern struct mm_struct * mm_alloc(void);
+extern struct mm_struct *mm_alloc(void);
/**
* mmgrab() - Pin a &struct mm_struct.
@@ -35,27 +35,7 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
-/* mmdrop drops the mm and the page tables */
-extern void __mmdrop(struct mm_struct *);
-static inline void mmdrop(struct mm_struct *mm)
-{
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
- __mmdrop(mm);
-}
-
-static inline void mmdrop_async_fn(struct work_struct *work)
-{
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
- __mmdrop(mm);
-}
-
-static inline void mmdrop_async(struct mm_struct *mm)
-{
- if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
- INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
- schedule_work(&mm->async_put_work);
- }
-}
+extern void mmdrop(struct mm_struct *mm);
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0aa4548fb492..23b4f9cb82db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void)
schedule();
}
+#ifdef __ARCH_SI_TRAPNO
+# define ___ARCH_SI_TRAPNO(_a1) , _a1
+#else
+# define ___ARCH_SI_TRAPNO(_a1)
+#endif
+#ifdef __ia64__
+# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
+#else
+# define ___ARCH_SI_IA64(_a1, _a2, _a3)
+#endif
+
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+
+int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
+int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
+
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
+int force_sig_pkuerr(void __user *addr, u32 pkey);
+
+int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828aaa34f..6a841929073f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cf257c2e728d..26347741ba50 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -7,6 +7,12 @@
#include <linux/sched/idle.h>
/*
+ * Increase resolution of cpu_capacity calculations
+ */
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
+#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -27,12 +33,6 @@
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
-/*
- * Increase resolution of cpu_capacity calculations
- */
-#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
-#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 49a35d6edc94..7046111b8d0a 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -123,8 +123,8 @@ struct scif_range {
*/
struct scif_pollepd {
scif_epd_t epd;
- short events;
- short revents;
+ __poll_t events;
+ __poll_t revents;
};
/**
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index da803dfc7a39..b36c76635f18 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -102,11 +102,15 @@ enum sctp_cid {
/* AUTH Extension Section 4.1 */
SCTP_CID_AUTH = 0x0F,
+ /* sctp ndata 5.1. I-DATA */
+ SCTP_CID_I_DATA = 0x40,
+
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0,
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
+ SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
}; /* enum */
@@ -240,6 +244,23 @@ struct sctp_data_chunk {
struct sctp_datahdr data_hdr;
};
+struct sctp_idatahdr {
+ __be32 tsn;
+ __be16 stream;
+ __be16 reserved;
+ __be32 mid;
+ union {
+ __u32 ppid;
+ __be32 fsn;
+ };
+ __u8 payload[0];
+};
+
+struct sctp_idata_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_idatahdr data_hdr;
+};
+
/* DATA Chuck Specific Flags */
enum {
SCTP_DATA_MIDDLE_FRAG = 0x00,
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk {
struct sctp_fwdtsn_hdr fwdtsn_hdr;
};
+struct sctp_ifwdtsn_skip {
+ __be16 stream;
+ __u8 reserved;
+ __u8 flags;
+ __be32 mid;
+};
+
+struct sctp_ifwdtsn_hdr {
+ __be32 new_cum_tsn;
+ struct sctp_ifwdtsn_skip skip[0];
+};
+
+struct sctp_ifwdtsn_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_ifwdtsn_hdr fwdtsn_hdr;
+};
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..c723a5c4e3ff 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
extern long seccomp_get_filter(struct task_struct *task,
unsigned long filter_off, void __user *data);
+extern long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
#else
static inline long seccomp_get_filter(struct task_struct *task,
unsigned long n, void __user *data)
{
return -EINVAL;
}
+static inline long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off,
+ void __user *data)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f189a8a3bbb8..bcf4cf26b8c8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- int seq = READ_ONCE(s->sequence);
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- smp_read_barrier_depends();
+ int seq = READ_ONCE(s->sequence); /* ^^^ */
return seq;
}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index d609e6dc5bad..f153b2c7f0cd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -27,8 +27,10 @@ struct serdev_device;
/**
* struct serdev_device_ops - Callback operations for a serdev device
- * @receive_buf: Function called with data received from device.
- * @write_wakeup: Function called when ready to transmit more data.
+ * @receive_buf: Function called with data received from device;
+ * returns number of bytes accepted; may sleep.
+ * @write_wakeup: Function called when ready to transmit more data; must
+ * not sleep.
*/
struct serdev_device_ops {
int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
@@ -76,6 +78,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device
return container_of(d, struct serdev_device_driver, driver);
}
+enum serdev_parity {
+ SERDEV_PARITY_NONE,
+ SERDEV_PARITY_EVEN,
+ SERDEV_PARITY_ODD,
+};
+
/*
* serdev controller structures
*/
@@ -86,6 +94,7 @@ struct serdev_controller_ops {
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
+ int (*set_parity)(struct serdev_controller *, enum serdev_parity);
unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
void (*wait_until_sent)(struct serdev_controller *, long);
int (*get_tiocm)(struct serdev_controller *);
@@ -193,6 +202,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
@@ -298,6 +308,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl
return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
}
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity);
+
/*
* serdev hooks into TTY core
*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 37b044e78333..4c310c34ddad 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -387,7 +387,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options);
-void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
int parity, int bits, int flow);
@@ -501,9 +501,5 @@ static inline int uart_handle_break(struct uart_port *port)
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
-/*
- * Common device tree parsing helpers
- */
-void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf);
-
+void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 4a906f560817..e724d5a3dd80 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -3,7 +3,7 @@
#include <linux/phy.h>
-struct __packed sfp_eeprom_base {
+struct sfp_eeprom_base {
u8 phys_id;
u8 phys_ext_id;
u8 connector;
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base {
char vendor_rev[4];
union {
__be16 optical_wavelength;
- u8 cable_spec;
- };
+ __be16 cable_compliance;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_2:6;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 reserved60_2:6;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed passive;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_4:4;
+ u8 fc_pi_4_lim:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_lim:1;
+ u8 reserved60_4:4;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed active;
+ } __packed;
u8 reserved62;
u8 cc_base;
-};
+} __packed;
-struct __packed sfp_eeprom_ext {
+struct sfp_eeprom_ext {
__be16 options;
u8 br_max;
u8 br_min;
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext {
u8 enhopts;
u8 sff8472_compliance;
u8 cc_ext;
-};
-
-struct __packed sfp_eeprom_id {
+} __packed;
+
+/**
+ * struct sfp_eeprom_id - raw SFP module identification information
+ * @base: base SFP module identification structure
+ * @ext: extended SFP module identification structure
+ *
+ * See the SFF-8472 specification and related documents for the definition
+ * of these structure members. This can be obtained from
+ * ftp://ftp.seagate.com/sff
+ */
+struct sfp_eeprom_id {
struct sfp_eeprom_base base;
struct sfp_eeprom_ext ext;
-};
+} __packed;
/* SFP EEPROM registers */
enum {
@@ -222,6 +265,7 @@ enum {
SFP_SFF8472_COMPLIANCE = 0x5e,
SFP_CC_EXT = 0x5f,
+ SFP_PHYS_ID_SFF = 0x02,
SFP_PHYS_ID_SFP = 0x03,
SFP_PHYS_EXT_ID_SFP = 0x04,
SFP_CONNECTOR_UNSPEC = 0x00,
@@ -347,19 +391,32 @@ enum {
SFP_PAGE = 0x7f,
};
-struct device_node;
+struct fwnode_handle;
struct ethtool_eeprom;
struct ethtool_modinfo;
struct net_device;
struct sfp_bus;
+/**
+ * struct sfp_upstream_ops - upstream operations structure
+ * @module_insert: called after a module has been detected to determine
+ * whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
+ * @link_down: called when the link is non-operational for whatever
+ * reason.
+ * @link_up: called when the link is operational.
+ * @connect_phy: called when an I2C accessible PHY has been detected
+ * on the module.
+ * @disconnect_phy: called when a module with an I2C accessible PHY has
+ * been removed.
+ */
struct sfp_upstream_ops {
- int (*module_insert)(void *, const struct sfp_eeprom_id *id);
- void (*module_remove)(void *);
- void (*link_down)(void *);
- void (*link_up)(void *);
- int (*connect_phy)(void *, struct phy_device *);
- void (*disconnect_phy)(void *);
+ int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *priv);
+ void (*link_down)(void *priv);
+ void (*link_up)(void *priv);
+ int (*connect_phy)(void *priv, struct phy_device *);
+ void (*disconnect_phy)(void *priv);
};
#if IS_ENABLED(CONFIG_SFP)
@@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_unregister_upstream(struct sfp_bus *bus);
@@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+static inline struct sfp_bus *sfp_register_upstream(
+ struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops)
{
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index ff3642d267f7..6dfda97a6c1a 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -5,19 +5,15 @@
#include <linux/phy.h>
#include <linux/if_ether.h>
-enum {EDMAC_LITTLE_ENDIAN};
-
struct sh_eth_plat_data {
int phy;
int phy_irq;
- int edmac_endian;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
unsigned char mac_addr[ETH_ALEN];
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
- unsigned needs_init:1;
};
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 06b295bec00d..73b5e655a76e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -112,13 +112,11 @@ extern void shmem_uncharge(struct inode *inode, long pages);
#ifdef CONFIG_TMPFS
-extern int shmem_add_seals(struct file *file, unsigned int seals);
-extern int shmem_get_seals(struct file *file);
-extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
#else
-static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a)
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
{
return -EINVAL;
}
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 042968dd98f0..a9bc7e1b077e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,13 +11,14 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ memcpy(to, from, sizeof(*to));
+}
+
+static inline void clear_siginfo(struct siginfo *info)
+{
+ memset(info, 0, sizeof(*info));
}
int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
@@ -29,9 +30,7 @@ enum siginfo_layout {
SIL_FAULT,
SIL_CHLD,
SIL_RT,
-#ifdef __ARCH_SIGSYS
SIL_SYS,
-#endif
};
enum siginfo_layout siginfo_layout(int sig, int si_code);
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 8621ffdeecbf..a6b6e8bb3d7b 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb
*/
static inline bool __skb_array_empty(struct skb_array *a)
{
- return !__ptr_ring_peek(&a->ring);
+ return __ptr_ring_empty(&a->ring);
+}
+
+static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
+{
+ return __ptr_ring_peek(&a->ring);
}
static inline bool skb_array_empty(struct skb_array *a)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a38c80e9f91e..ac89a93b7c83 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
data, proto, nhoff, hlen, flags);
}
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container);
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -3241,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 3c6d393c7f29..ec85b7a1f8d1 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -12,11 +12,9 @@ struct device;
extern int register_sound_special(const struct file_operations *fops, int unit);
extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
extern int register_sound_mixer(const struct file_operations *fops, int dev);
-extern int register_sound_midi(const struct file_operations *fops, int dev);
extern int register_sound_dsp(const struct file_operations *fops, int dev);
extern void unregister_sound_special(int unit);
extern void unregister_sound_mixer(int unit);
-extern void unregister_sound_midi(int unit);
extern void unregister_sound_dsp(int unit);
#endif /* _LINUX_SOUND_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3bf273538840..4894d322d258 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ size_t max_size, unsigned int cpu_mult,
+ gfp_t gfp);
+
+void free_bucket_spinlocks(spinlock_t *locks);
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
return 1;
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
- raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
+ spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- raw_spinlock_t __private lock;
+ spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- raw_spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
+extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
/*
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 71c237e8240e..ed761f751ecb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int rpc_restart_call_prepare(struct rpc_task *);
int rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
-int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
size_t rpc_max_bc_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 221b7a2e5406..5859563e3c1f 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -64,7 +64,7 @@ enum rpcrdma_memreg {
RPCRDMA_MEMWINDOWS,
RPCRDMA_MEMWINDOWS_ASYNC,
RPCRDMA_MTHCAFMR,
- RPCRDMA_FRMR,
+ RPCRDMA_FRWR,
RPCRDMA_ALLPHYSICAL,
RPCRDMA_LAST
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d60b0f5c38d5..cc22a24516d6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -443,32 +443,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-static inline void lock_system_sleep(void)
-{
- current->flags |= PF_FREEZER_SKIP;
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the pm_mutex lock,
- * since the freezer always works with pm_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
- current->flags &= ~PF_FREEZER_SKIP;
- mutex_unlock(&pm_mutex);
-}
+extern void lock_system_sleep(void);
+extern void unlock_system_sleep(void);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c2b8128799c1..7b6a59f722a3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -332,7 +332,6 @@ extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
-extern void lru_add_drain_all_cpuslocked(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
@@ -345,7 +344,6 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
-extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 9c5a2628d6ce..1d3877c39a00 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+ return swp_offset(entry);
+}
+
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
return false;
}
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+ return 0;
+}
+
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
{
return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
}
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+ return swp_offset(entry);
+}
+
static inline struct page *migration_entry_to_page(swp_entry_t entry)
{
struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
{
return 0;
}
+
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+ return 0;
+}
+
static inline struct page *migration_entry_to_page(swp_entry_t entry)
{
return NULL;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 24ed817082ee..5b1f2a00491c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
enum dma_sync_target target);
/* Accessory functions. */
+
+void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs);
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
@@ -115,10 +121,10 @@ extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_free(void);
+extern void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_free(void) { }
+static inline void swiotlb_exit(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
+extern const struct dma_map_ops swiotlb_dma_ops;
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 40839c02d28c..b8bfdc173ec0 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -113,7 +113,7 @@ struct attribute_group {
}
#define __ATTR_RO(_name) { \
- .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = _name##_show, \
}
@@ -124,12 +124,11 @@ struct attribute_group {
}
#define __ATTR_WO(_name) { \
- .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
+ .attr = { .name = __stringify(_name), .mode = 0200 }, \
.store = _name##_store, \
}
-#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
- _name##_show, _name##_store)
+#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
#define __ATTR_NULL { .attr = { .name = NULL } }
@@ -192,14 +191,13 @@ struct bin_attribute {
}
#define __BIN_ATTR_RO(_name, _size) { \
- .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
.read = _name##_read, \
.size = _size, \
}
-#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \
- (S_IWUSR | S_IRUGO), _name##_read, \
- _name##_write, _size)
+#define __BIN_ATTR_RW(_name, _size) \
+ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size)
#define __BIN_ATTR_NULL __ATTR_NULL
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b..8f4c54986f97 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -335,6 +335,17 @@ struct tcp_sock {
int linger2;
+
+/* Sock_ops bpf program related variables */
+#ifdef CONFIG_BPF
+ u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
+ * values defined in uapi/linux/tcp.h
+ */
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
+#else
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
+#endif
+
/* Receiver side RTT estimation */
struct {
u32 rtt_us;
@@ -344,7 +355,7 @@ struct tcp_sock {
/* Receiver queue space */
struct {
- int space;
+ u32 space;
u32 seq;
u64 time;
} rcvq_space;
diff --git a/include/linux/torture.h b/include/linux/torture.h
index a45702eb3e7b..66272862070b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -79,7 +79,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v, int *runnable);
+bool torture_init_begin(char *ttype, bool v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+#ifdef CONFIG_PREEMPT
+#define torture_preempt_schedule() preempt_schedule()
+#else
+#define torture_preempt_schedule()
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 5a090f5ab335..bcdd3790e94d 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -24,11 +24,6 @@
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-/*
- * Chip num is this value or a valid tpm idx
- */
-#define TPM_ANY_NUM 0xFFFF
-
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
@@ -50,46 +45,52 @@ struct tpm_class_ops {
unsigned long *timeout_cap);
int (*request_locality)(struct tpm_chip *chip, int loc);
void (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ void (*clk_enable)(struct tpm_chip *chip, bool value);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
-extern int tpm_is_tpm2(u32 chip_num);
-extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
-extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
-extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
-extern int tpm_seal_trusted(u32 chip_num,
+extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
+extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
+extern int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-extern int tpm_unseal_trusted(u32 chip_num,
+extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
#else
-static inline int tpm_is_tpm2(u32 chip_num)
+static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
+static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
return -ENODEV;
}
-static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
+static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
+ const u8 *hash)
+{
return -ENODEV;
}
-static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
return -ENODEV;
}
-static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
+{
return -ENODEV;
}
-static inline int tpm_seal_trusted(u32 chip_num,
+static inline int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
return -ENODEV;
}
-static inline int tpm_unseal_trusted(u32 chip_num,
+static inline int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
diff --git a/drivers/char/tpm/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 204466cc4d05..20d9da77fc11 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TPM_EVENTLOG_H__
-#define __TPM_EVENTLOG_H__
+#ifndef __LINUX_TPM_EVENTLOG_H__
+#define __LINUX_TPM_EVENTLOG_H__
#include <crypto/hash_info.h>
@@ -10,6 +10,9 @@
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
#define TPM2_ACTIVE_PCR_BANKS 3
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
+
#ifdef CONFIG_PPC64
#define do_endian_conversion(x) be32_to_cpu(x)
#else
@@ -105,6 +108,11 @@ struct tcg_event_field {
u8 event[0];
} __packed;
+struct tpm2_digest {
+ u16 alg_id;
+ u8 digest[SHA512_DIGEST_SIZE];
+} __packed;
+
struct tcg_pcr_event2 {
u32 pcr_idx;
u32 event_type;
@@ -113,26 +121,4 @@ struct tcg_pcr_event2 {
struct tcg_event_field event;
} __packed;
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
-int tpm_bios_log_setup(struct tpm_chip *chip);
-void tpm_bios_log_teardown(struct tpm_chip *chip);
-
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af44e7c2d577..8a1442c4e513 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
void perf_event_detach_bpf_prog(struct perf_event *event);
+int perf_event_query_prog_array(struct perf_event *event, void __user *info);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
+static inline int
+perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+ return -EOPNOTSUPP;
+}
#endif
enum {
@@ -528,6 +534,7 @@ do { \
struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+DECLARE_PER_CPU(int, bpf_kprobe_override);
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- if (rcucheck) { \
- if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
- return; \
+ if (rcucheck) \
rcu_irq_enter_irqson(); \
- } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7ac8ba208b1f..0a6c71e0ad01 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty);
extern struct tty_struct *tty_kopen(dev_t device);
extern void tty_kclose(struct tty_struct *tty);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
+extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
+extern void tty_ldisc_unlock(struct tty_struct *tty);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 3bc5144b1c7e..1ef64d4ad887 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -187,7 +187,7 @@ struct tty_ldisc_ops {
long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- unsigned int (*poll)(struct tty_struct *, struct file *,
+ __poll_t (*poll)(struct tty_struct *, struct file *,
struct poll_table_struct *);
int (*hangup)(struct tty_struct *tty);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index fbbe974661f2..0173597e59aa 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -609,6 +609,10 @@ struct usb3_lpm_parameters {
* to keep track of the number of functions that require USB 3.0 Link Power
* Management to be disabled for this usb_device. This count should only
* be manipulated by those functions, with the bandwidth_mutex is held.
+ * @hub_delay: cached value consisting of:
+ * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
+ *
+ * Will be used as wValue for SetIsochDelay requests.
*
* Notes:
* Usbcore drivers should not set usbdev->state directly. Instead use
@@ -689,6 +693,8 @@ struct usb_device {
struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params;
unsigned lpm_disable_count;
+
+ u16 hub_delay;
};
#define to_usb_device(d) container_of(d, struct usb_device, dev)
@@ -1293,7 +1299,6 @@ extern int usb_disabled(void);
#define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired
* slot in the schedule */
#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
-#define URB_NO_FSBR 0x0020 /* UHCI-specific */
#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
* needed */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 0142f3af0da6..66a5cff7ee14 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -330,6 +330,7 @@ struct usb_gadget_ops {
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
* @dev: Driver model state for this abstract device.
+ * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
* @mA: last set mA value
@@ -394,6 +395,7 @@ struct usb_gadget {
enum usb_device_state state;
const char *name;
struct device dev;
+ unsigned isoch_delay;
unsigned out_epnum;
unsigned in_epnum;
unsigned mA;
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 6cbe7a5c2b57..dba55ccb9b53 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -12,13 +12,17 @@
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
+struct usb_device;
+
#if IS_ENABLED(CONFIG_OF)
enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
bool of_usb_host_tpl_support(struct device_node *np);
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
-struct device_node *usb_of_get_child_node(struct device_node *parent,
- int portnum);
+struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1);
+bool usb_of_has_combined_node(struct usb_device *udev);
+struct device_node *usb_of_get_interface_node(struct usb_device *udev,
+ u8 config, u8 ifnum);
struct device *usb_of_get_companion_dev(struct device *dev);
#else
static inline enum usb_dr_mode
@@ -35,8 +39,17 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
{
return 0;
}
-static inline struct device_node *usb_of_get_child_node
- (struct device_node *parent, int portnum)
+static inline struct device_node *
+usb_of_get_device_node(struct usb_device *hub, int port1)
+{
+ return NULL;
+}
+static inline bool usb_of_has_combined_node(struct usb_device *udev)
+{
+ return false;
+}
+static inline struct device_node *
+usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
{
return NULL;
}
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index e00051ced806..b3d41d7409b3 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -148,6 +148,8 @@ enum pd_pdo_type {
(PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
+#define VSAFE5V 5000 /* mv units */
+
#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index d92259f8de0a..2b64d23ace5c 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -65,7 +65,7 @@
#define CMD_EXIT_MODE 5
#define CMD_ATTENTION 6
-#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f))
+#define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f))
/* ChromeOS specific commands */
#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 67102f3d59d4..53924f8e840c 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -17,6 +17,7 @@
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
@@ -98,6 +99,13 @@ struct renesas_usbhs_platform_callback {
* VBUS control is needed for Host
*/
int (*set_vbus)(struct platform_device *pdev, int enable);
+
+ /*
+ * option:
+ * extcon notifier to set host/peripheral mode.
+ */
+ int (*notifier)(struct notifier_block *nb, unsigned long event,
+ void *data);
};
/*
@@ -187,6 +195,7 @@ struct renesas_usbhs_driver_param {
#define USBHS_TYPE_RCAR_GEN2 1
#define USBHS_TYPE_RCAR_GEN3 2
#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3
+#define USBHS_TYPE_RZA1 4
/*
* option:
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 073197f0d2bb..ca1c0b57f03f 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -183,14 +183,14 @@ struct tcpm_port;
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
void tcpm_unregister_port(struct tcpm_port *port);
-void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo);
-void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int max_snk_mv,
- unsigned int max_snk_ma,
- unsigned int max_snk_mw,
- unsigned int operating_snk_mw);
+int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo);
+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo,
+ unsigned int max_snk_mv,
+ unsigned int max_snk_ma,
+ unsigned int max_snk_mw,
+ unsigned int operating_snk_mw);
void tcpm_vbus_change(struct tcpm_port *port);
void tcpm_cc_change(struct tcpm_port *port);
diff --git a/drivers/staging/unisys/include/visorbus.h b/include/linux/visorbus.h
index 1a0986ba3d24..0d8bd6769b13 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/include/linux/visorbus.h
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 - 2013 UNISYS CORPORATION
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
/*
@@ -29,8 +19,172 @@
#include <linux/device.h>
-#include "visorchannel.h"
+#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
+
+/*
+ * enum channel_serverstate
+ * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
+ * @CHANNELSRV_READY: Channel has been initialized by server.
+ */
+enum channel_serverstate {
+ CHANNELSRV_UNINITIALIZED = 0,
+ CHANNELSRV_READY = 1
+};
+
+/*
+ * enum channel_clientstate
+ * @CHANNELCLI_DETACHED:
+ * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
+ * unless given TBD* explicit request
+ * (should actually be < DETACHED).
+ * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
+ * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
+ * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
+ * @CHANNELCLI_OWNED: "No worries" state - client can access channel
+ * anytime.
+ */
+enum channel_clientstate {
+ CHANNELCLI_DETACHED = 0,
+ CHANNELCLI_DISABLED = 1,
+ CHANNELCLI_ATTACHING = 2,
+ CHANNELCLI_ATTACHED = 3,
+ CHANNELCLI_BUSY = 4,
+ CHANNELCLI_OWNED = 5
+};
+
+/*
+ * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
+ * a guest can look at the FeatureFlags in the io channel, and configure the
+ * driver to use interrupts or not based on this setting. All feature bits for
+ * all channels should be defined here. The io channel feature bits are defined
+ * below.
+ */
+#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
+#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
+#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
+#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
+#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
+
+/*
+ * struct channel_header - Common Channel Header
+ * @signature: Signature.
+ * @legacy_state: DEPRECATED - being replaced by.
+ * @header_size: sizeof(struct channel_header).
+ * @size: Total size of this channel in bytes.
+ * @features: Flags to modify behavior.
+ * @chtype: Channel type: data, bus, control, etc..
+ * @partition_handle: ID of guest partition.
+ * @handle: Device number of this channel in client.
+ * @ch_space_offset: Offset in bytes to channel specific area.
+ * @version_id: Struct channel_header Version ID.
+ * @partition_index: Index of guest partition.
+ * @zone_uuid: Guid of Channel's zone.
+ * @cli_str_offset: Offset from channel header to null-terminated
+ * ClientString (0 if ClientString not present).
+ * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
+ * channel.
+ * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
+ * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
+ * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @srv_state: CHANNEL_SERVERSTATE.
+ * @cli_error_boot: Bits to indicate err states for boot clients, so err
+ * messages can be throttled.
+ * @cli_error_os: Bits to indicate err states for OS clients, so err
+ * messages can be throttled.
+ * @filler: Pad out to 128 byte cacheline.
+ * @recover_channel: Please add all new single-byte values below here.
+ */
+struct channel_header {
+ u64 signature;
+ u32 legacy_state;
+ /* SrvState, CliStateBoot, and CliStateOS below */
+ u32 header_size;
+ u64 size;
+ u64 features;
+ guid_t chtype;
+ u64 partition_handle;
+ u64 handle;
+ u64 ch_space_offset;
+ u32 version_id;
+ u32 partition_index;
+ guid_t zone_guid;
+ u32 cli_str_offset;
+ u32 cli_state_boot;
+ u32 cmd_state_cli;
+ u32 cli_state_os;
+ u32 ch_characteristic;
+ u32 cmd_state_srv;
+ u32 srv_state;
+ u8 cli_error_boot;
+ u8 cli_error_os;
+ u8 filler[1];
+ u8 recover_channel;
+} __packed;
+
+#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
+
+/*
+ * struct signal_queue_header - Subheader for the Signal Type variation of the
+ * Common Channel.
+ * @version: SIGNAL_QUEUE_HEADER Version ID.
+ * @chtype: Queue type: storage, network.
+ * @size: Total size of this queue in bytes.
+ * @sig_base_offset: Offset to signal queue area.
+ * @features: Flags to modify behavior.
+ * @num_sent: Total # of signals placed in this queue.
+ * @num_overflows: Total # of inserts failed due to full queue.
+ * @signal_size: Total size of a signal for this queue.
+ * @max_slots: Max # of slots in queue, 1 slot is always empty.
+ * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
+ * @head: Queue head signal #.
+ * @num_received: Total # of signals removed from this queue.
+ * @tail: Queue tail signal.
+ * @reserved1: Reserved field.
+ * @reserved2: Reserved field.
+ * @client_queue:
+ * @num_irq_received: Total # of Interrupts received. This is incremented by the
+ * ISR in the guest windows driver.
+ * @num_empty: Number of times that visor_signal_remove is called and
+ * returned Empty Status.
+ * @errorflags: Error bits set during SignalReinit to denote trouble with
+ * client's fields.
+ * @filler: Pad out to 64 byte cacheline.
+ */
+struct signal_queue_header {
+ /* 1st cache line */
+ u32 version;
+ u32 chtype;
+ u64 size;
+ u64 sig_base_offset;
+ u64 features;
+ u64 num_sent;
+ u64 num_overflows;
+ u32 signal_size;
+ u32 max_slots;
+ u32 max_signals;
+ u32 head;
+ /* 2nd cache line */
+ u64 num_received;
+ u32 tail;
+ u32 reserved1;
+ u64 reserved2;
+ u64 client_queue;
+ u64 num_irq_received;
+ u64 num_empty;
+ u32 errorflags;
+ u8 filler[12];
+} __packed;
+/* VISORCHANNEL Guids */
+/* {414815ed-c58c-11da-95a9-00e08161165f} */
+#define VISOR_VHBA_CHANNEL_GUID \
+ GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+#define VISOR_VHBA_CHANNEL_GUID_STR \
+ "414815ed-c58c-11da-95a9-00e08161165f"
struct visorchipset_state {
u32 created:1;
u32 attached:1;
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1779c9817b39..a4c2317d8b9f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -216,23 +216,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
return x;
}
-static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
- enum node_stat_item item)
-{
- long x = atomic_long_read(&pgdat->vm_stat[item]);
-
-#ifdef CONFIG_SMP
- int cpu;
- for_each_online_cpu(cpu)
- x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
-
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
-
-
#ifdef CONFIG_NUMA
extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
extern unsigned long sum_zone_node_page_state(int node,
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index d58594a32324..78901ecd2f95 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -10,16 +10,15 @@
#ifndef _LINUX_W1_GPIO_H
#define _LINUX_W1_GPIO_H
+struct gpio_desc;
+
/**
* struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- * @pin: GPIO pin to use
- * @is_open_drain: GPIO pin is configured as open drain
*/
struct w1_gpio_platform_data {
- unsigned int pin;
- unsigned int is_open_drain:1;
+ struct gpio_desc *gpiod;
+ struct gpio_desc *pullup_gpiod;
void (*enable_external_pullup)(int enable);
- unsigned int ext_pullup_enable_pin;
unsigned int pullup_duration;
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 158715445ffb..55a611486bac 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
/*
* Wakeup macros to be used to report events to the targets.
*/
+#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
+#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
- __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
- __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 004ba807df96..7238865e75b0 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
+bool zpool_evictable(struct zpool *pool);
+
#endif
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 857da67bd931..d9c143d17f70 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -185,7 +185,7 @@ void *lirc_get_pdata(struct file *file);
*/
int lirc_dev_fop_open(struct inode *inode, struct file *file);
int lirc_dev_fop_close(struct inode *inode, struct file *file);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
loff_t *ppos);
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 511615d3bf6f..dc2f64e1b08f 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -56,7 +56,7 @@ struct media_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*open) (struct file *);
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 4d8cb0796bc6..b7e42a1b0910 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -117,7 +117,7 @@ struct soc_camera_host_ops {
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- unsigned int (*poll)(struct file *, poll_table *);
+ __poll_t (*poll)(struct file *, poll_table *);
};
#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index dacfe54057f8..a9ced6bbee55 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -1037,7 +1037,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
* @file: pointer to struct file
* @wait: pointer to struct poll_table_struct
*/
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
/* Helpers for ioctl_ops */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 28a686eb7d09..fa99f6f66712 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -152,7 +152,7 @@ struct v4l2_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index e157d5c9b224..3d07ba3a8262 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* indicate that a non-blocking write can be performed, while read will be
* returned in case of the destination queue.
*/
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait);
/**
@@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
#endif /* _MEDIA_V4L2_MEM2MEM_H */
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index d760aa73ebbb..0bda0adc744f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef9b64398c8c..e55efc62a950 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -871,7 +871,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait);
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 036127c54bbf..c2fa55657440 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -226,8 +226,7 @@ void vb2_queue_release(struct vb2_queue *q);
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
/*
* The following functions are not part of the vb2 core API, but are simple
@@ -262,7 +261,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t vb2_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 480d50a0b8ba..b712be544f8c 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file);
int cxl_fd_release(struct inode *inode, struct file *file);
long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index fd08df74c466..6ed9692f20bd 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -86,7 +86,7 @@ struct tc_action_ops {
int (*act)(struct sk_buff *, const struct tc_action *,
struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
- void (*cleanup)(struct tc_action *, int bind);
+ void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *, struct tc_action **, u32);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
@@ -120,12 +120,19 @@ int tc_action_net_init(struct tc_action_net *tn,
void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
struct tcf_idrinfo *idrinfo);
-static inline void tc_action_net_exit(struct tc_action_net *tn)
+static inline void tc_action_net_exit(struct list_head *net_list,
+ unsigned int id)
{
+ struct net *net;
+
rtnl_lock();
- tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct tc_action_net *tn = net_generic(net, id);
+
+ tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ kfree(tn->idrinfo);
+ }
rtnl_unlock();
- kfree(tn->idrinfo);
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b623b65a79d1..c4185a7b0e90 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
*/
int ipv6_addr_label_init(void);
void ipv6_addr_label_cleanup(void);
-void ipv6_addr_label_rtnl_register(void);
+int ipv6_addr_label_rtnl_register(void);
u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
int type, int ifindex);
diff --git a/include/net/arp.h b/include/net/arp.h
index dc8cd47f883b..977aabfcdc03 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+ key = INADDR_ANY;
+
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e89cff0c4c23..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c52c46b..801489bb14c3 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
*/
int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+ u8 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+ __le16 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+
+ return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+ __le32 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 4);
+
+ return le32_to_cpu(tmp);
+}
+
/*
* Peek header from packet.
* Reads data from packet without changing packet.
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cb4d92b79cd9..81174f9b8d14 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
u8 count;
};
+#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+
/**
* struct iface_combination_params - input parameters for interface combinations
*
@@ -1773,6 +1775,8 @@ enum cfg80211_signal_type {
* by %parent_bssid.
* @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
* the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
@@ -1781,6 +1785,8 @@ struct cfg80211_inform_bss {
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
};
/**
@@ -1824,6 +1830,8 @@ struct cfg80211_bss_ies {
* that holds the beacon data. @beacon_ies is still valid, of course, and
* points to the same data as hidden_beacon_bss->beacon_ies in that case.
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -1842,6 +1850,8 @@ struct cfg80211_bss {
u16 capability;
u8 bssid[ETH_ALEN];
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 priv[0] __aligned(sizeof(void *));
};
@@ -2021,6 +2031,9 @@ struct cfg80211_disassoc_request {
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_ibss_params {
const u8 *ssid;
@@ -2037,6 +2050,8 @@ struct cfg80211_ibss_params {
int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
+ struct key_params *wep_keys;
+ int wep_tx_key;
};
/**
@@ -5575,7 +5590,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5754,7 +5769,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
* @frame: the frame
* @len: length of the frame
* @freq: frequency the frame was received on
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
*
* Use this function to report to userspace when a beacon was
* received. It is not useful to call this when there is no
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9654e133599..6545b03e97f7 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -26,10 +26,12 @@ struct devlink {
struct list_head port_list;
struct list_head sb_list;
struct list_head dpipe_table_list;
+ struct list_head resource_list;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
+ struct mutex lock;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
* @counters_enabled: indicates if counters are active
* @counter_control_extern: indicates if counter control is in dpipe or
* external tool
+ * @resource_valid: Indicate that the resource id is valid
+ * @resource_id: relative resource this table is related to
+ * @resource_units: number of resource's unit consumed per table's entry
* @table_ops: table operations
* @rcu: rcu
*/
@@ -190,6 +195,9 @@ struct devlink_dpipe_table {
const char *name;
bool counters_enabled;
bool counter_control_extern;
+ bool resource_valid;
+ u64 resource_id;
+ u64 resource_units;
struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@@ -223,7 +231,63 @@ struct devlink_dpipe_headers {
unsigned int headers_count;
};
+/**
+ * struct devlink_resource_ops - resource ops
+ * @occ_get: get the occupied size
+ * @size_validate: validate the size of the resource before update, reload
+ * is needed for changes to take place
+ */
+struct devlink_resource_ops {
+ u64 (*occ_get)(struct devlink *devlink);
+ int (*size_validate)(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_resource_size_params - resource's size parameters
+ * @size_min: minimum size which can be set
+ * @size_max: maximum size which can be set
+ * @size_granularity: size granularity
+ * @size_unit: resource's basic unit
+ */
+struct devlink_resource_size_params {
+ u64 size_min;
+ u64 size_max;
+ u64 size_granularity;
+ enum devlink_resource_unit unit;
+};
+
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ * including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ * @resource_ops: resource ops
+ */
+struct devlink_resource {
+ const char *name;
+ u64 id;
+ u64 size;
+ u64 size_new;
+ bool size_valid;
+ struct devlink_resource *parent;
+ struct devlink_resource_size_params *size_params;
+ struct list_head list;
+ struct list_head resource_list;
+ const struct devlink_resource_ops *resource_ops;
+};
+
+#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+
struct devlink_ops {
+ int (*reload)(struct devlink *devlink);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ bool top_hierarchy,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ struct devlink_resource_size_params *size_params,
+ const struct devlink_resource_ops *resource_ops);
+void devlink_resources_unregister(struct devlink *devlink,
+ struct devlink_resource *resource);
+int devlink_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size);
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
+
#else
static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb,
return 0;
}
+static inline int
+devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ bool top_hierarchy,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ struct devlink_resource_size_params *size_params,
+ const struct devlink_resource_ops *resource_ops)
+{
+ return 0;
+}
+
+static inline void
+devlink_resources_unregister(struct devlink *devlink,
+ struct devlink_resource *resource)
+{
+}
+
+static inline int
+devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
+ u64 *p_resource_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 55df9939bca2..342d2503cba5 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
*/
struct dn_route {
struct dst_entry dst;
+ struct dn_route __rcu *dn_next;
struct neighbour *n;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2a05738570d8..6cb602dd970c 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,31 +296,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+/* Return the local port used to reach an arbitrary switch port */
+static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+ int port)
{
- struct dsa_switch_tree *dst = ds->dst;
-
- /*
- * If this is the root switch (i.e. the switch that connects
- * to the CPU), return the cpu port number on this switch.
- * Else return the (DSA) port number that connects to the
- * switch that is one hop closer to the cpu.
- */
- if (dst->cpu_dp->ds == ds)
- return dst->cpu_dp->index;
+ if (device == ds->index)
+ return port;
else
- return ds->rtable[dst->cpu_dp->ds->index];
+ return ds->rtable[device];
+}
+
+/* Return the local port used to reach the dedicated CPU port */
+static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ const struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ if (!cpu_dp)
+ return port;
+
+ return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
/*
* Legacy probing.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv);
+#endif
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port);
@@ -412,12 +420,10 @@ struct dsa_switch_ops {
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
bool vlan_filtering);
- int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
- void (*port_vlan_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ void (*port_vlan_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
/*
@@ -433,12 +439,10 @@ struct dsa_switch_ops {
/*
* Multicast database
*/
- int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans);
- void (*port_mdb_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans);
+ int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+ void (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int (*port_mdb_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
/*
@@ -472,11 +476,20 @@ struct dsa_switch_driver {
const struct dsa_switch_ops *ops;
};
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
/* Legacy driver registration */
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
+#else
+static inline void register_switch_driver(struct dsa_switch_driver *type) { }
+static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
+static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
+{
+ return NULL;
+}
+#endif
struct net_device *dsa_dev_to_net_device(struct device *dev);
/* Keep inline for faster access in hot path */
diff --git a/include/net/dst.h b/include/net/dst.h
index b091fd536098..c63d2c37f6e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -34,13 +34,9 @@ struct sk_buff;
struct dst_entry {
struct net_device *dev;
- struct rcu_head rcu_head;
- struct dst_entry *child;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
- struct dst_entry *path;
- struct dst_entry *from;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
#else
@@ -59,8 +55,6 @@ struct dst_entry {
#define DST_XFRM_QUEUE 0x0040
#define DST_METADATA 0x0080
- short error;
-
/* A non-zero value of dst->obsolete forces by-hand validation
* of the route entry. Positive values are set by the generic
* dst layer to indicate that the entry has been forcefully
@@ -76,35 +70,24 @@ struct dst_entry {
#define DST_OBSOLETE_KILL -2
unsigned short header_len; /* more space at head required */
unsigned short trailer_len; /* space to reserve at tail */
- unsigned short __pad3;
-
-#ifdef CONFIG_IP_ROUTE_CLASSID
- __u32 tclassid;
-#else
- __u32 __pad2;
-#endif
-#ifdef CONFIG_64BIT
- /*
- * Align __refcnt to a 64 bytes alignment
- * (L1_CACHE_SIZE would be too much)
- */
- long __pad_to_align_refcnt[2];
-#endif
/*
* __refcnt wants to be on a different cache line from
* input/output/ops or performance tanks badly
*/
- atomic_t __refcnt; /* client references */
+#ifdef CONFIG_64BIT
+ atomic_t __refcnt; /* 64-bit offset 64 */
+#endif
int __use;
unsigned long lastuse;
struct lwtunnel_state *lwtstate;
- union {
- struct dst_entry *next;
- struct rtable __rcu *rt_next;
- struct rt6_info __rcu *rt6_next;
- struct dn_route __rcu *dn_next;
- };
+ struct rcu_head rcu_head;
+ short error;
+ short __pad;
+ __u32 tclassid;
+#ifndef CONFIG_64BIT
+ atomic_t __refcnt; /* 32-bit offset 64 */
+#endif
};
struct dst_metrics {
@@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst)
{
/*
* If your kernel compilation stops here, please check
- * __pad_to_align_refcnt declaration in struct dst_entry
+ * the placement of __refcnt in struct dst_entry
*/
BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
@@ -521,4 +504,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
}
#endif
+static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/erspan.h b/include/net/erspan.h
index ca94fc86865e..5daa4866412b 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -15,7 +15,7 @@
* s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
* other fields are set to zero, so only a sequence number follows.
*
- * ERSPAN Type II header (8 octets [42:49])
+ * ERSPAN Version 1 (Type II) header (8 octets [42:49])
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -24,11 +24,31 @@
* | Reserved | Index |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
+ *
+ * ERSPAN Version 2 (Type III) header (12 octets [42:49])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ver | VLAN | COS |BSO|T| Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Timestamp |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SGT |P| FT | Hw ID |D|Gra|O|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Platform Specific SubHeader (8 octets, optional)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platf ID | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
* GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
*/
-#define ERSPAN_VERSION 0x1
+#include <uapi/linux/erspan.h>
+#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
#define VER_MASK 0xf000
#define VLAN_MASK 0x0fff
#define COS_MASK 0xe000
@@ -37,6 +57,19 @@
#define ID_MASK 0x03ff
#define INDEX_MASK 0xfffff
+#define ERSPAN_VERSION2 0x2 /* ERSPAN type III*/
+#define BSO_MASK EN_MASK
+#define SGT_MASK 0xffff0000
+#define P_MASK 0x8000
+#define FT_MASK 0x7c00
+#define HWID_MASK 0x03f0
+#define DIR_MASK 0x0008
+#define GRA_MASK 0x0006
+#define O_MASK 0x0001
+
+#define HWID_OFFSET 4
+#define DIR_OFFSET 3
+
enum erspan_encap_type {
ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
@@ -44,18 +77,199 @@ enum erspan_encap_type {
ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
};
-struct erspan_metadata {
- __be32 index; /* type II */
-};
+#define ERSPAN_V1_MDSIZE 4
+#define ERSPAN_V2_MDSIZE 8
-struct erspanhdr {
- __be16 ver_vlan;
-#define VER_OFFSET 12
- __be16 session_id;
-#define COS_OFFSET 13
-#define EN_OFFSET 11
-#define T_OFFSET 10
- struct erspan_metadata md;
+struct erspan_base_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 vlan_upper:4,
+ ver:4;
+ __u8 vlan:8;
+ __u8 session_id_upper:2,
+ t:1,
+ en:2,
+ cos:3;
+ __u8 session_id:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 ver: 4,
+ vlan_upper:4;
+ __u8 vlan:8;
+ __u8 cos:3,
+ en:2,
+ t:1,
+ session_id_upper:2;
+ __u8 session_id:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
};
+static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id)
+{
+ ershdr->session_id = id & 0xff;
+ ershdr->session_id_upper = (id >> 8) & 0x3;
+}
+
+static inline u16 get_session_id(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->session_id_upper << 8) + ershdr->session_id;
+}
+
+static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan)
+{
+ ershdr->vlan = vlan & 0xff;
+ ershdr->vlan_upper = (vlan >> 8) & 0xf;
+}
+
+static inline u16 get_vlan(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->vlan_upper << 8) + ershdr->vlan;
+}
+
+static inline void set_hwid(struct erspan_md2 *md2, u8 hwid)
+{
+ md2->hwid = hwid & 0xf;
+ md2->hwid_upper = (hwid >> 4) & 0x3;
+}
+
+static inline u8 get_hwid(const struct erspan_md2 *md2)
+{
+ return (md2->hwid_upper << 4) + md2->hwid;
+}
+
+static inline int erspan_hdr_len(int version)
+{
+ return sizeof(struct erspan_base_hdr) +
+ (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
+}
+
+static inline u8 tos_to_cos(u8 tos)
+{
+ u8 dscp, cos;
+
+ dscp = tos >> 2;
+ cos = dscp >> 3;
+ return cos;
+}
+
+static inline void erspan_build_header(struct sk_buff *skb,
+ u32 id, u32 index,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ enum erspan_encap_type enc_type;
+ struct erspan_base_hdr *ershdr;
+ struct erspan_metadata *ersmd;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 tos;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ enc_type = ERSPAN_ENCAP_NOVLAN;
+
+ /* If mirrored packet has vlan tag, extract tci and
+ * perserve vlan header in the mirrored frame.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ enc_type = ERSPAN_ENCAP_INFRAME;
+ }
+
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = enc_type;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ ersmd = (struct erspan_metadata *)(ershdr + 1);
+ ersmd->u.index = htonl(index & INDEX_MASK);
+}
+
+/* ERSPAN GRA: timestamp granularity
+ * 00b --> granularity = 100 microseconds
+ * 01b --> granularity = 100 nanoseconds
+ * 10b --> granularity = IEEE 1588
+ * Here we only support 100 microseconds.
+ */
+static inline __be32 erspan_get_timestamp(void)
+{
+ u64 h_usecs;
+ ktime_t kt;
+
+ kt = ktime_get_real();
+ h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
+
+ /* ERSPAN base header only has 32-bit,
+ * so it wraps around 4 days.
+ */
+ return htonl((u32)h_usecs);
+}
+
+static inline void erspan_build_header_v2(struct sk_buff *skb,
+ u32 id, u8 direction, u16 hwid,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct erspan_base_hdr *ershdr;
+ struct erspan_metadata *md;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 gra = 0; /* 100 usec */
+ u8 bso = 0; /* Bad/Short/Oversized */
+ u8 sgt = 0;
+ u8 tos;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ /* Unlike v1, v2 does not have En field,
+ * so only extract vlan tci field.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ }
+
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION2;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = bso;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ md = (struct erspan_metadata *)(ershdr + 1);
+ md->u.md2.timestamp = erspan_get_timestamp();
+ md->u.md2.sgt = htons(sgt);
+ md->u.md2.p = 1;
+ md->u.md2.ft = 0;
+ md->u.md2.dir = direction;
+ md->u.md2.gra = gra;
+ md->u.md2.o = 0;
+ set_hwid(&md->u.md2, hwid);
+}
+
#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 304f7aa9cc01..0304ba2ae353 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0358745ea059..6692d67e9245 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
+ * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -101,6 +102,7 @@ struct inet_connection_sock {
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void *icsk_ulp_data;
+ struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:6,
icsk_ca_setsockopt:1,
@@ -305,7 +307,7 @@ void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
*/
-static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
+static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
(POLLIN | POLLRDNORM) : 0;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff5e1e3..9141e95529e7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -111,6 +111,7 @@ struct inet_bind_hashbucket {
*/
struct inet_listen_hashbucket {
spinlock_t lock;
+ unsigned int count;
struct hlist_head head;
};
@@ -132,12 +133,13 @@ struct inet_hashinfo {
/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
+ struct kmem_cache *bind_bucket_cachep;
struct inet_bind_hashbucket *bhash;
-
unsigned int bhash_size;
- /* 4 bytes hole on 64 bit */
- struct kmem_cache *bind_bucket_cachep;
+ /* The 2nd listener table hashed by local port and address */
+ unsigned int lhash2_mask;
+ struct inet_listen_hashbucket *lhash2;
/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
@@ -145,14 +147,25 @@ struct inet_hashinfo {
* Now align to a new cache line as all the following members
* might be often dirty.
*/
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
+ /* All sockets in TCP_LISTEN state will be in listening_hash.
+ * This is the only table where wildcard'd TCP sockets can
+ * exist. listening_hash is only hashed by local port number.
+ * If lhash2 is initialized, the same socket will also be hashed
+ * to lhash2 by port and address.
*/
struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
____cacheline_aligned_in_smp;
};
+#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
+ hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+
+static inline struct inet_listen_hashbucket *
+inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
+{
+ return &h->lhash2[hash & h->lhash2_mask];
+}
+
static inline struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
@@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child);
void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
+void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+ unsigned long numentries, int scale,
+ unsigned long low_limit,
+ unsigned long high_limit);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 39efb968b7a4..0a671c32d6b9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
int inet_sk_rebuild_header(struct sock *sk);
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+ /* state change might impact lockless readers. */
+ return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
static inline unsigned int __inet_ehashfn(const __be32 laddr,
const __u16 lport,
const __be32 faddr,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 1356fa6a7566..899495589a7e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -93,8 +93,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ struct inet_hashinfo *hashinfo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
diff --git a/include/net/ip.h b/include/net/ip.h
index af8addbaa3c1..746abff9ce51 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -26,12 +26,14 @@
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/skbuff.h>
+#include <linux/jhash.h>
#include <net/inet_sock.h>
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
#include <net/flow_dissector.h>
+#include <net/netns/hash.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -522,6 +524,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
return (__force unsigned int) ip;
}
+static inline u32 ipv4_portaddr_hash(const struct net *net,
+ __be32 saddr,
+ unsigned int port)
+{
+ return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
+}
+
bool ip_call_ra_chain(struct sk_buff *skb);
/*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 10c913816032..34ec321d6a03 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct rt6_exception {
struct rt6_info {
struct dst_entry dst;
+ struct rt6_info __rcu *rt6_next;
+ struct rt6_info *from;
/*
* Tail elements of dst_entry (__refcnt etc.)
@@ -147,6 +149,7 @@ struct rt6_info {
*/
struct list_head rt6i_siblings;
unsigned int rt6i_nsiblings;
+ atomic_t rt6i_nh_upper_bound;
atomic_t rt6i_ref;
@@ -168,19 +171,21 @@ struct rt6_info {
u32 rt6i_metric;
u32 rt6i_pmtu;
/* more non-fragment space at head required */
+ int rt6i_nh_weight;
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
u8 exception_bucket_flushed:1,
- unused:7;
+ should_flush:1,
+ unused:6;
};
#define for_each_fib6_node_rt_rcu(fn) \
for (rt = rcu_dereference((fn)->leaf); rt; \
- rt = rcu_dereference(rt->dst.rt6_next))
+ rt = rcu_dereference(rt->rt6_next))
#define for_each_fib6_walker_rt(w) \
for (rt = (w)->leaf; rt; \
- rt = rcu_dereference_protected(rt->dst.rt6_next, 1))
+ rt = rcu_dereference_protected(rt->rt6_next, 1))
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
@@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
struct rt6_info *rt;
- for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES);
- rt = (struct rt6_info *)rt->dst.from);
+ for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
if (rt && rt != rt0)
rt0->dst.expires = rt->dst.expires;
-
dst_set_expires(&rt0->dst, timeout);
rt0->rt6i_flags |= RTF_EXPIRES;
}
@@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
u32 cookie = 0;
if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
- rt = (struct rt6_info *)(rt->dst.from);
+ (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
+ rt = rt->from;
rt6_get_cookie_safe(rt, &cookie);
@@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb);
void fib6_update_sernum(struct rt6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 18e442ea93d8..27d23a65f3cd 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
+static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
+{
+ return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
@@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg {
};
int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-void rt6_ifdown(struct net *net, struct net_device *dev);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
+void rt6_multipath_rebalance(struct rt6_info *rt);
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f63734..236e40ba06bf 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,10 @@ struct __ip6_tnl_parm {
__be32 o_key;
__u32 fwmark;
+ __u32 index; /* ERSPAN type II index */
+ __u8 erspan_ver; /* ERSPAN version */
+ __u8 dir; /* direction */
+ __u16 hwid; /* hwid */
};
/* IPv6 tunnel */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 24628f6b09bf..1f16773cfd76 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,8 +116,11 @@ struct ip_tunnel {
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
- /* This field used only by ERSPAN */
+ /* These four fields used only by ERSPAN */
u32 index; /* ERSPAN type II index */
+ u8 erspan_ver; /* ERSPAN version */
+ u8 dir; /* ERSPAN direction */
+ u16 hwid; /* ERSPAN hardware ID */
struct dst_cache dst_cache;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff68cf288f9b..eb0bec043c96 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -69,8 +69,7 @@ struct ip_vs_iphdr {
};
static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
- int len, void *buffer,
- const struct ip_vs_iphdr *ipvsh)
+ int len, void *buffer)
{
return skb_header_pointer(skb, offset, len, buffer);
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f73797e2fa60..8606c9113d3f 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -22,6 +22,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/snmp.h>
+#include <net/netns/hash.h>
#define SIN6_LEN_RFC2133 24
@@ -331,6 +332,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -673,6 +675,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline u32 ipv6_portaddr_hash(const struct net *net,
+ const struct in6_addr *addr6,
+ unsigned int port)
+{
+ unsigned int hash, mix = net_hash_mix(net);
+
+ if (ipv6_addr_any(addr6))
+ hash = jhash_1word(0, mix);
+ else if (ipv6_addr_v4mapped(addr6))
+ hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
+ else
+ hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
+
+ return hash ^ port;
+}
+
/*
* Check for a RFC 4843 ORCHID address
* (Overlay Routable Cryptographic Hash Identifiers)
@@ -952,6 +970,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
+
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 070e93a17c59..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,7 +153,7 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eec143cca1c0..906e90223066 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
* driver for a key to indicate that sufficient tailroom must always
* be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ * a TKIP key if it only requires MIC space. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1562,6 +1565,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
};
/**
@@ -1593,8 +1597,8 @@ struct ieee80211_key_conf {
u8 icv_len;
u8 iv_len;
u8 hw_key_idx;
- u8 flags;
s8 keyidx;
+ u16 flags;
u8 keylen;
u8 key[0];
};
@@ -2056,6 +2060,9 @@ struct ieee80211_txq {
* The stack will not do fragmentation.
* The callback for @set_frag_threshold should be set as well.
*
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ * TDLS links.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TX_FRAG_LIST,
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
+ IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 10f99dafd5ac..f306b2aa15a4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -51,7 +51,7 @@ struct net {
refcount_t passive; /* To decided when the network
* namespace should be freed.
*/
- atomic_t count; /* To decided when the network
+ refcount_t count; /* To decided when the network
* namespace should be shut down.
*/
spinlock_t rules_mod_lock;
@@ -195,7 +195,7 @@ void __put_net(struct net *net);
static inline struct net *get_net(struct net *net)
{
- atomic_inc(&net->count);
+ refcount_inc(&net->count);
return net;
}
@@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
* exists. If the reference count is zero this
* function fails and returns NULL.
*/
- if (!atomic_inc_not_zero(&net->count))
+ if (!refcount_inc_not_zero(&net->count))
net = NULL;
return net;
}
static inline void put_net(struct net *net)
{
- if (atomic_dec_and_test(&net->count))
+ if (refcount_dec_and_test(&net->count))
__put_net(net);
}
@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return net1 == net2;
}
+static inline int check_net(const struct net *net)
+{
+ return refcount_read(&net->count) != 0;
+}
+
void net_drop_ns(void *);
#else
@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return 1;
}
+static inline int check_net(const struct net *net)
+{
+ return 1;
+}
+
#define net_drop_ns NULL
#endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 4ed1040bbe4a..73f825732326 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,17 +13,17 @@
const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
#endif
int nf_conntrack_ipv4_compat_init(void);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 9cd55be95853..effa8dfba68c 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,17 +4,17 @@
extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
#endif
#include <linux/sysctl.h>
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 000000000000..adf8db44cf86
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,17 @@
+#ifndef _NF_CONNTRACK_COUNT_H
+#define _NF_CONNTRACK_COUNT_H
+
+struct nf_conncount_data;
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+ unsigned int keylen);
+void nf_conncount_destroy(struct net *net, unsigned int family,
+ struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ unsigned int family,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
+#endif
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7ef56c13698a..a7220eef9aee 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,6 +27,9 @@ struct nf_conntrack_l4proto {
/* Resolve clashes on insertion races. */
bool allow_clash;
+ /* protoinfo nlattr size, closes a hole */
+ u16 nlattr_size;
+
/* Try to fill in the third arg: dataoff is offset past network protocol
hdr. Return true if possible. */
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
@@ -66,8 +69,6 @@ struct nf_conntrack_l4proto {
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
- /* Calculate protoinfo nlattr size */
- int (*nlattr_size)(void);
/* convert nfnetlink attributes to protoinfo */
int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -80,8 +81,6 @@ struct nf_conntrack_l4proto {
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
- size_t nla_size;
-
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
int (*nlattr_to_obj)(struct nlattr *tb[],
@@ -109,7 +108,7 @@ struct nf_conntrack_l4proto {
};
/* Existing built-in generic protocol */
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO 256
@@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
void nf_ct_l4proto_pernet_unregister_one(struct net *net,
const struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *const proto[],
+ const struct nf_conntrack_l4proto *const proto[],
unsigned int num_proto);
void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *const proto[],
+ const struct nf_conntrack_l4proto *const proto[],
unsigned int num_proto);
/* Protocol global registration. */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
/* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 000000000000..b22b22082733
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,122 @@
+#ifndef _NF_FLOW_TABLE_H
+#define _NF_FLOW_TABLE_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/rcupdate.h>
+#include <net/dst.h>
+
+struct nf_flowtable;
+
+struct nf_flowtable_type {
+ struct list_head list;
+ int family;
+ void (*gc)(struct work_struct *work);
+ const struct rhashtable_params *params;
+ nf_hookfn *hook;
+ struct module *owner;
+};
+
+struct nf_flowtable {
+ struct rhashtable rhashtable;
+ const struct nf_flowtable_type *type;
+ struct delayed_work gc_work;
+};
+
+enum flow_offload_tuple_dir {
+ FLOW_OFFLOAD_DIR_ORIGINAL,
+ FLOW_OFFLOAD_DIR_REPLY,
+ __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY,
+};
+#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1)
+
+struct flow_offload_tuple {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+
+ int iifidx;
+
+ u8 l3proto;
+ u8 l4proto;
+ u8 dir;
+
+ int oifidx;
+
+ struct dst_entry *dst_cache;
+};
+
+struct flow_offload_tuple_rhash {
+ struct rhash_head node;
+ struct flow_offload_tuple tuple;
+};
+
+#define FLOW_OFFLOAD_SNAT 0x1
+#define FLOW_OFFLOAD_DNAT 0x2
+#define FLOW_OFFLOAD_DYING 0x4
+
+struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+ u32 flags;
+ union {
+ /* Your private driver data here. */
+ u32 timeout;
+ };
+};
+
+#define NF_FLOW_TIMEOUT (30 * HZ)
+
+struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
+ int ifindex;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
+ struct nf_flow_route *route);
+void flow_offload_free(struct flow_offload *flow);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
+struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple);
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data);
+void nf_flow_offload_work_gc(struct work_struct *work);
+extern const struct rhashtable_params nf_flow_offload_rhash_params;
+
+void flow_offload_dead(struct flow_offload *flow);
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+int nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+
+struct flow_ports {
+ __be16 source, dest;
+};
+
+unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+#define MODULE_ALIAS_NF_FLOWTABLE(family) \
+ MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+#endif /* _FLOW_OFFLOAD_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 814058d0f167..a50a69f5334c 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -25,7 +25,7 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
- unsigned int (*nf_hook_drop)(struct net *net);
+ void (*nf_hook_drop)(struct net *net);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fecc6112c768..663b015dace5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/u64_stats_sync.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#define NFT_JUMP_STACK_SIZE 16
@@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->xt.state = state;
}
-static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
pkt->tprot_set = false;
pkt->tprot = 0;
@@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
pkt->xt.fragoff = 0;
}
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- nft_set_pktinfo(pkt, skb, state);
- nft_set_pktinfo_proto_unspec(pkt, skb);
-}
-
/**
* struct nft_verdict - nf_tables verdict
*
@@ -150,22 +143,22 @@ static inline void nft_data_debug(const struct nft_data *data)
* struct nft_ctx - nf_tables rule/set context
*
* @net: net namespace
- * @afi: address family info
* @table: the table the chain is contained in
* @chain: the chain the rule is contained in
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @family: protocol family
* @report: notify via unicast netlink message
*/
struct nft_ctx {
struct net *net;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain;
const struct nlattr * const *nla;
u32 portid;
u32 seq;
+ u8 family;
bool report;
};
@@ -381,6 +374,7 @@ void nft_unregister_set(struct nft_set_type *type);
* @list: table set list node
* @bindings: list of set bindings
* @name: name of the set
+ * @handle: unique handle of the set
* @ktype: key type (numeric type defined by userspace, not used in the kernel)
* @dtype: data type (verdict or numeric type defined by userspace)
* @objtype: object type (see NFT_OBJECT_* definitions)
@@ -403,6 +397,7 @@ struct nft_set {
struct list_head list;
struct list_head bindings;
char *name;
+ u64 handle;
u32 ktype;
u32 dtype;
u32 objtype;
@@ -424,6 +419,11 @@ struct nft_set {
__attribute__((aligned(__alignof__(u64))));
};
+static inline bool nft_set_is_anonymous(const struct nft_set *set)
+{
+ return set->flags & NFT_SET_ANONYMOUS;
+}
+
static inline void *nft_set_priv(const struct nft_set *set)
{
return (void *)set->data;
@@ -883,7 +883,7 @@ enum nft_chain_type {
* @family: address family
* @owner: module owner
* @hook_mask: mask of valid hooks
- * @hooks: hookfn overrides
+ * @hooks: array of hook functions
*/
struct nf_chain_type {
const char *name;
@@ -905,8 +905,6 @@ struct nft_stats {
struct u64_stats_sync syncp;
};
-#define NFT_HOOK_OPS_MAX 2
-
/**
* struct nft_base_chain - nf_tables base chain
*
@@ -918,7 +916,7 @@ struct nft_stats {
* @dev_name: device name that this base chain is attached to (if any)
*/
struct nft_base_chain {
- struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ struct nf_hook_ops ops;
const struct nf_chain_type *type;
u8 policy;
u8 flags;
@@ -948,10 +946,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @chains: chains in the table
* @sets: sets in the table
* @objects: stateful objects in the table
+ * @flowtables: flow tables in the table
* @hgenerator: handle generator state
+ * @handle: table handle
* @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags)
* @genmask: generation mask
+ * @afinfo: address family info
* @name: name of the table
*/
struct nft_table {
@@ -959,46 +960,16 @@ struct nft_table {
struct list_head chains;
struct list_head sets;
struct list_head objects;
+ struct list_head flowtables;
u64 hgenerator;
+ u64 handle;
u32 use;
- u16 flags:14,
+ u16 family:6,
+ flags:8,
genmask:2;
char *name;
};
-enum nft_af_flags {
- NFT_AF_NEEDS_DEV = (1 << 0),
-};
-
-/**
- * struct nft_af_info - nf_tables address family info
- *
- * @list: used internally
- * @family: address family
- * @nhooks: number of hooks in this family
- * @owner: module owner
- * @tables: used internally
- * @flags: family flags
- * @nops: number of hook ops in this family
- * @hook_ops_init: initialization function for chain hook ops
- * @hooks: hookfn overrides for packet validation
- */
-struct nft_af_info {
- struct list_head list;
- int family;
- unsigned int nhooks;
- struct module *owner;
- struct list_head tables;
- u32 flags;
- unsigned int nops;
- void (*hook_ops_init)(struct nf_hook_ops *,
- unsigned int);
- nf_hookfn *hooks[NF_MAX_HOOKS];
-};
-
-int nft_register_afinfo(struct net *, struct nft_af_info *);
-void nft_unregister_afinfo(struct net *, struct nft_af_info *);
-
int nft_register_chain_type(const struct nf_chain_type *);
void nft_unregister_chain_type(const struct nf_chain_type *);
@@ -1016,9 +987,9 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
* @name: name of this stateful object
* @genmask: generation mask
* @use: number of references to this stateful object
- * @data: object data, layout depends on type
+ * @handle: unique object handle
* @ops: object operations
- * @data: pointer to object data
+ * @data: object data, layout depends on type
*/
struct nft_object {
struct list_head list;
@@ -1026,6 +997,7 @@ struct nft_object {
struct nft_table *table;
u32 genmask:2,
use:30;
+ u64 handle;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
@@ -1097,6 +1069,46 @@ int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
/**
+ * struct nft_flowtable - nf_tables flow table
+ *
+ * @list: flow table list node in table list
+ * @table: the table the flow table is contained in
+ * @name: name of this flow table
+ * @hooknum: hook number
+ * @priority: hook priority
+ * @ops_len: number of hooks in array
+ * @genmask: generation mask
+ * @use: number of references to this flow table
+ * @handle: unique object handle
+ * @data: rhashtable and garbage collector
+ * @ops: array of hooks
+ */
+struct nft_flowtable {
+ struct list_head list;
+ struct nft_table *table;
+ char *name;
+ int hooknum;
+ int priority;
+ int ops_len;
+ u32 genmask:2,
+ use:30;
+ u64 handle;
+ /* runtime data below here */
+ struct nf_hook_ops *ops ____cacheline_aligned;
+ struct nf_flowtable data;
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+ const struct nlattr *nla,
+ u8 genmask);
+void nft_flow_table_iterate(struct net *net,
+ void (*iter)(struct nf_flowtable *flowtable, void *data),
+ void *data);
+
+void nft_register_flowtable_type(struct nf_flowtable_type *type);
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
+
+/**
* struct nft_traceinfo - nft tracing information and state
*
* @pkt: pktinfo currently processed
@@ -1125,12 +1137,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
void nft_trace_notify(struct nft_traceinfo *info);
-#define nft_dereference(p) \
- nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
-
-#define MODULE_ALIAS_NFT_FAMILY(family) \
- MODULE_ALIAS("nft-afinfo-" __stringify(family))
-
#define MODULE_ALIAS_NFT_CHAIN(family, name) \
MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
@@ -1332,4 +1338,11 @@ struct nft_trans_obj {
#define nft_trans_obj(trans) \
(((struct nft_trans_obj *)trans->data)->obj)
+struct nft_trans_flowtable {
+ struct nft_flowtable *flowtable;
+};
+
+#define nft_trans_flowtable(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flowtable)
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index f0896ba456c4..ed7b511f0a59 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,15 +5,11 @@
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
-static inline void
-nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
struct iphdr *ip;
- nft_set_pktinfo(pkt, skb, state);
-
ip = ip_hdr(pkt->skb);
pkt->tprot_set = true;
pkt->tprot = ip->protocol;
@@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int
-__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
struct iphdr *iph, _iph;
u32 len, thoff;
@@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
return 0;
}
-static inline void
-nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
- nft_set_pktinfo(pkt, skb, state);
- if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
+ nft_set_pktinfo_unspec(pkt, skb);
}
-extern struct nft_af_info nft_af_ipv4;
-
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index b8065b72f56e..dabe6fdb553a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -5,20 +5,16 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline void
-nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
unsigned short frag_off;
- nft_set_pktinfo(pkt, skb, state);
-
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ nft_set_pktinfo_unspec(pkt, skb);
return;
}
@@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
pkt->xt.fragoff = frag_off;
}
-static inline int
-__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
#endif
}
-static inline void
-nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
- nft_set_pktinfo(pkt, skb, state);
- if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
+ nft_set_pktinfo_unspec(pkt, skb);
}
-extern struct nft_af_info nft_af_ipv6;
-
#endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index ecf238b8862c..ca9bd9fba5b5 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -8,7 +8,7 @@
#include <linux/spinlock.h>
-struct dev_rcv_lists;
+struct can_dev_rcv_lists;
struct s_stats;
struct s_pstats;
@@ -28,7 +28,7 @@ struct netns_can {
#endif
/* receive filters subscribed for 'all' CAN devices */
- struct dev_rcv_lists *can_rx_alldev_list;
+ struct can_dev_rcv_lists *can_rx_alldev_list;
spinlock_t can_rcvlists_lock;
struct timer_list can_stattimer;/* timer for statistics update */
struct s_stats *can_stats; /* packet statistics */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 0ad4d0c71228..36c2d998a43c 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,7 +11,10 @@ struct netns_core {
int sysctl_somaxconn;
- struct prot_inuse __percpu *inuse;
+#ifdef CONFIG_PROC_FS
+ int __percpu *sock_inuse;
+ struct prot_inuse __percpu *prot_inuse;
+#endif
};
#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cc00af2ac2d7..ca043342c0eb 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,7 +17,17 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+ struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
+#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
bool defrag_ipv4;
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 4109b5f3010f..48134353411d 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,14 +7,8 @@
struct nft_af_info;
struct netns_nftables {
- struct list_head af_info;
+ struct list_head tables;
struct list_head commit_list;
- struct nft_af_info *ipv4;
- struct nft_af_info *ipv6;
- struct nft_af_info *inet;
- struct nft_af_info *arp;
- struct nft_af_info *bridge;
- struct nft_af_info *netdev;
unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index ebc813277662..0db7fb3e4e15 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -122,9 +122,12 @@ struct netns_sctp {
/* Flag to indicate if PR-CONFIG is enabled. */
int reconf_enable;
- /* Flag to idicate if SCTP-AUTH is enabled */
+ /* Flag to indicate if SCTP-AUTH is enabled */
int auth_enable;
+ /* Flag to indicate if stream interleave is enabled */
+ int intl_enable;
+
/*
* Policy to control SCTP IPv4 address scoping
* 0 - Disable IPv4 address scoping
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 8e08b6da72f3..87406252f0a3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -29,6 +29,7 @@ struct tcf_block_ext_info {
enum tcf_block_binder_type binder_type;
tcf_chain_head_change_t *chain_head_change;
void *chain_head_change_priv;
+ u32 block_index;
};
struct tcf_block_cb;
@@ -38,16 +39,25 @@ bool tcf_queue_work(struct work_struct *work);
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);
void tcf_chain_put(struct tcf_chain *chain);
+void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack);
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
- struct tcf_block_ext_info *ei);
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack);
void tcf_block_put(struct tcf_block *block);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei);
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return block->index;
+}
+
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
+ WARN_ON(tcf_block_shared(block));
return block->q;
}
@@ -77,14 +87,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
#else
static inline
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack)
{
return 0;
}
static inline
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -364,7 +376,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts, bool ovr);
+ struct tcf_exts *exts, bool ovr,
+ struct netlink_ext_ack *extack);
void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -522,7 +535,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
{
switch (layer) {
case TCF_LAYER_LINK:
- return skb->data;
+ return skb_mac_header(skb);
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:
@@ -544,13 +557,16 @@ static inline int tcf_valid_offset(const struct sk_buff *skb,
#include <net/net_namespace.h>
static inline int
-tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
+tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
+ struct netlink_ext_ack *extack)
{
char indev[IFNAMSIZ];
struct net_device *dev;
- if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
+ if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
+ NL_SET_ERR_MSG(extack, "Interface name too long");
return -EINVAL;
+ }
dev = __dev_get_by_name(net, indev);
if (!dev)
return -ENODEV;
@@ -586,17 +602,9 @@ struct tc_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
+ struct netlink_ext_ack *extack;
};
-static inline void
-tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
- const struct tcf_proto *tp)
-{
- cls_common->chain_index = tp->chain->index;
- cls_common->protocol = tp->protocol;
- cls_common->prio = tp->prio;
-}
-
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tc_u32_sel *sel;
@@ -637,6 +645,31 @@ static inline bool tc_can_offload(const struct net_device *dev)
return dev->features & NETIF_F_HW_TC;
}
+static inline bool tc_can_offload_extack(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ bool can = tc_can_offload(dev);
+
+ if (!can)
+ NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
+
+ return can;
+}
+
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+ struct tc_cls_common_offload *common)
+{
+ if (!tc_can_offload_extack(dev, common->extack))
+ return false;
+ if (common->chain_index) {
+ NL_SET_ERR_MSG(common->extack,
+ "Driver supports only offload of chain 0");
+ return false;
+ }
+ return true;
+}
+
static inline bool tc_skip_hw(u32 flags)
{
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
@@ -664,6 +697,18 @@ static inline bool tc_in_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
}
+static inline void
+tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
+ const struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
+{
+ cls_common->chain_index = tp->chain->index;
+ cls_common->protocol = tp->protocol;
+ cls_common->prio = tp->prio;
+ if (tc_skip_sw(flags))
+ cls_common->extack = extack;
+}
+
enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
@@ -706,7 +751,6 @@ struct tc_cls_bpf_offload {
struct bpf_prog *oldprog;
const char *name;
bool exts_integrated;
- u32 gen_flags;
};
struct tc_mqprio_qopt_offload {
@@ -727,6 +771,11 @@ struct tc_cookie {
u32 len;
};
+struct tc_qopt_offload_stats {
+ struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_queue *qstats;
+};
+
enum tc_red_command {
TC_RED_REPLACE,
TC_RED_DESTROY,
@@ -739,9 +788,6 @@ struct tc_red_qopt_offload_params {
u32 max;
u32 probability;
bool is_ecn;
-};
-struct tc_red_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
struct gnet_stats_queue *qstats;
};
@@ -751,9 +797,34 @@ struct tc_red_qopt_offload {
u32 parent;
union {
struct tc_red_qopt_offload_params set;
- struct tc_red_qopt_offload_stats stats;
+ struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
};
};
+enum tc_prio_command {
+ TC_PRIO_REPLACE,
+ TC_PRIO_DESTROY,
+ TC_PRIO_STATS,
+};
+
+struct tc_prio_qopt_offload_params {
+ int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ /* In case that a prio qdisc is offloaded and now is changed to a
+ * non-offloadedable config, it needs to update the backlog & qlen
+ * values to negate the HW backlog & qlen values (and only them).
+ */
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_prio_qopt_offload {
+ enum tc_prio_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_prio_qopt_offload_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ };
+};
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1f413f06c72..815b92a23936 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
int fifo_set_limit(struct Qdisc *q, unsigned int limit);
struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit);
+ unsigned int limit,
+ struct netlink_ext_ack *extack);
int register_qdisc(struct Qdisc_ops *qops);
int unregister_qdisc(struct Qdisc_ops *qops);
@@ -99,22 +100,24 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab);
+ struct nlattr *tab,
+ struct netlink_ext_ack *extack);
void qdisc_put_rtab(struct qdisc_rate_table *tab);
void qdisc_put_stab(struct qdisc_size_table *tab);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock, bool validate);
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
- if (qdisc_run_begin(q))
+ if (qdisc_run_begin(q)) {
__qdisc_run(q);
+ qdisc_run_end(q);
+ }
}
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
diff --git a/include/net/route.h b/include/net/route.h
index d538e6db1afe..1eb9ce470e25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -217,7 +217,7 @@ unsigned int inet_addr_type_dev_table(struct net *net,
const struct net_device *dev,
__be32 addr);
void ip_rt_multicast_event(struct in_device *);
-int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index ead018744ff5..14b6b3af8918 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,10 +13,10 @@ enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = 1,
};
-int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 83a3e47d5845..e2ab13687fb9 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
* qdisc_tree_decrease_qlen() should stop.
*/
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
+#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
u32 limit;
const struct Qdisc_ops *ops;
@@ -88,14 +89,14 @@ struct Qdisc {
/*
* For performance sake on SMP, we put highly modified fields at the end
*/
- struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
+ struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
- struct sk_buff *skb_bad_txq;
+ struct sk_buff_head skb_bad_txq;
int padded;
refcount_t refcnt;
@@ -150,19 +151,23 @@ struct Qdisc_class_ops {
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
- struct Qdisc *, struct Qdisc **);
+ struct Qdisc *, struct Qdisc **,
+ struct netlink_ext_ack *extack);
struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
void (*qlen_notify)(struct Qdisc *, unsigned long);
/* Class manipulation routines */
unsigned long (*find)(struct Qdisc *, u32 classid);
int (*change)(struct Qdisc *, u32, u32,
- struct nlattr **, unsigned long *);
+ struct nlattr **, unsigned long *,
+ struct netlink_ext_ack *);
int (*delete)(struct Qdisc *, unsigned long);
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
+ struct tcf_block * (*tcf_block)(struct Qdisc *sch,
+ unsigned long arg,
+ struct netlink_ext_ack *extack);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -179,6 +184,7 @@ struct Qdisc_ops {
const struct Qdisc_class_ops *cl_ops;
char id[IFNAMSIZ];
int priv_size;
+ unsigned int static_flags;
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -186,15 +192,26 @@ struct Qdisc_ops {
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);
- int (*init)(struct Qdisc *, struct nlattr *arg);
+ int (*init)(struct Qdisc *sch, struct nlattr *arg,
+ struct netlink_ext_ack *extack);
void (*reset)(struct Qdisc *);
void (*destroy)(struct Qdisc *);
- int (*change)(struct Qdisc *, struct nlattr *arg);
- void (*attach)(struct Qdisc *);
+ int (*change)(struct Qdisc *sch,
+ struct nlattr *arg,
+ struct netlink_ext_ack *extack);
+ void (*attach)(struct Qdisc *sch);
+ int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+ void (*ingress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ void (*egress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ u32 (*ingress_block_get)(struct Qdisc *sch);
+ u32 (*egress_block_get)(struct Qdisc *sch);
+
struct module *owner;
};
@@ -217,14 +234,18 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto*);
+ void (*destroy)(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack);
void* (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool);
- int (*delete)(struct tcf_proto*, void *, bool*);
+ void **, bool,
+ struct netlink_ext_ack *);
+ int (*delete)(struct tcf_proto *tp, void *arg,
+ bool *last,
+ struct netlink_ext_ack *);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
void (*bind_class)(void *, u32, unsigned long);
@@ -246,8 +267,6 @@ struct tcf_proto {
/* All the rest */
u32 prio;
- u32 classid;
- struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;
@@ -266,8 +285,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
- tcf_chain_head_change_t *chain_head_change;
- void *chain_head_change_priv;
+ struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
@@ -276,12 +294,33 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
+ u32 index; /* block index for shared blocks */
+ unsigned int refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
- struct work_struct work;
+ struct list_head owner_list;
+ bool keep_dst;
+ unsigned int offloadcnt; /* Number of oddloaded filters */
+ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
};
+static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+{
+ if (*flags & TCA_CLS_FLAGS_IN_HW)
+ return;
+ *flags |= TCA_CLS_FLAGS_IN_HW;
+ block->offloadcnt++;
+}
+
+static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+{
+ if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+ return;
+ *flags &= ~TCA_CLS_FLAGS_IN_HW;
+ block->offloadcnt--;
+}
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
@@ -290,11 +329,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
+{
+ __u32 qlen = 0;
+ int i;
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ for_each_possible_cpu(i)
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+ } else {
+ qlen = q->q.qlen;
+ }
+
+ return qlen;
+}
+
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
@@ -431,6 +490,7 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *,
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+int dev_qdisc_change_tx_queue_len(struct net_device *dev);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -443,9 +503,12 @@ void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- const struct Qdisc_ops *ops);
+ const struct Qdisc_ops *ops,
+ struct netlink_ext_ack *extack);
+void qdisc_free(struct Qdisc *qdisc);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- const struct Qdisc_ops *ops, u32 parentid);
+ const struct Qdisc_ops *ops, u32 parentid,
+ struct netlink_ext_ack *extack);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);
@@ -631,12 +694,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
sch->qstats.backlog -= qdisc_pkt_len(skb);
}
+static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog += qdisc_pkt_len(skb);
}
+static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+{
+ this_cpu_dec(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->requeues);
+}
+
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
sch->qstats.drops += count;
@@ -767,26 +857,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
- if (!sch->gso_skb) {
- sch->gso_skb = sch->dequeue(sch);
- if (sch->gso_skb) {
+ if (!skb) {
+ skb = sch->dequeue(sch);
+
+ if (skb) {
+ __skb_queue_head(&sch->gso_skb, skb);
/* it's still part of the queue */
- qdisc_qstats_backlog_inc(sch, sch->gso_skb);
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
}
}
- return sch->gso_skb;
+ return skb;
}
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
- struct sk_buff *skb = sch->gso_skb;
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
if (skb) {
- sch->gso_skb = NULL;
+ skb = __skb_dequeue(&sch->gso_skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
@@ -844,6 +938,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
qdisc_qstats_drop(sch);
}
+static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop(skb, to_free);
+ qdisc_qstats_cpu_drop(sch);
+
+ return NET_XMIT_DROP;
+}
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index deaafa9b09cb..20ff237c5eb2 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
-#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+ a->chunk_hdr->type == SCTP_CID_I_DATA)
/* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
- - (unsigned long)(c->chunk_hdr)\
- - sizeof(struct sctp_data_chunk)))
+#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
+ (unsigned long)(c->chunk_hdr) - \
+ sctp_datachk_len(&c->asoc->stream)))
/* Internal error codes */
enum sctp_ierror {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 906a9c0efa71..f7ae6b0a21d0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-unsigned int sctp_poll(struct file *file, struct socket *sock,
+__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
int frag = pmtu;
frag -= sp->pf->af->net_header_len;
- frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+ frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
- sizeof(struct sctp_data_chunk)));
+ sctp_datachk_len(&asoc->stream)));
return frag;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 70fb397f65b0..2883c43c5258 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn,
const struct sctp_chunk *chunk);
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
- int len, const __u8 flags,
- __u16 ssn, gfp_t gfp);
+ int len, __u8 flags, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
const __u32 lowest_tsn);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
@@ -342,7 +346,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sizeof(struct sctp_data_chunk);
+ size -= sctp_datahdr_len(&chunk->asoc->stream);
return size;
}
@@ -358,6 +362,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
typecheck(__u32, b) && \
((__s32)((a) - (b)) <= 0))
+/* Compare two MIDs */
+#define MID_lt(a, b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
/* Compare two SSNs */
#define SSN_lt(a,b) \
(typecheck(__u16, a) && \
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000000..6657711c8bc4
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,61 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+ __u16 data_chunk_len;
+ __u16 ftsn_chunk_len;
+ /* (I-)DATA process */
+ struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+ void (*assign_number)(struct sctp_chunk *chunk);
+ bool (*validate_data)(struct sctp_chunk *chunk);
+ int (*ulpevent_data)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ int (*enqueue_event)(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event);
+ void (*renege_events)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ /* (I-)FORWARD-TSN process */
+ void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+ bool (*validate_ftsn)(struct sctp_chunk *chunk);
+ void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+ void (*handle_ftsn)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2f8f93da5dc2..02369e379d35 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -89,6 +89,7 @@ struct sctp_stream;
#include <net/sctp/tsnmap.h>
#include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
/* Structures useful for managing bind/connect. */
@@ -217,6 +218,7 @@ struct sctp_sock {
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
+ strm_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
data_ready_signalled:1;
@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
#define sctp_ssn_skip(stream, type, sid, ssn) \
((stream)->type[sid].ssn = ssn + 1)
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+ ((stream)->type[sid].mid)
+
+/* Return the next MID number for this stream. */
+#define sctp_mid_next(stream, type, sid) \
+ ((stream)->type[sid].mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+ ((stream)->type[sid].mid = mid + 1)
+
+#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+ ((stream)->type[sid].mid_uo)
+
+/* Return the next MID_uo number for this stream. */
+#define sctp_mid_uo_next(stream, type, sid) \
+ ((stream)->type[sid].mid_uo++)
+
/*
* Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.)
@@ -574,6 +598,8 @@ struct sctp_chunk {
struct sctp_addiphdr *addip_hdr;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_authhdr *auth_hdr;
+ struct sctp_idatahdr *idata_hdr;
+ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
} subh;
__u8 *chunk_end;
@@ -620,6 +646,7 @@ struct sctp_chunk {
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
singleton:1, /* Only chunk in the packet? */
end_of_packet:1, /* Last chunk in the packet? */
ecn_ce_done:1, /* Have we processed the ECN CE bit? */
@@ -966,7 +993,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *t);
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1073,6 +1100,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
/* Uncork and flush an outqueue. */
static inline void sctp_outq_cork(struct sctp_outq *q)
{
@@ -1357,13 +1385,25 @@ struct sctp_stream_out_ext {
};
struct sctp_stream_out {
- __u16 ssn;
- __u8 state;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
struct sctp_stream_out_ext *ext;
+ __u8 state;
};
struct sctp_stream_in {
- __u16 ssn;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ __u32 fsn;
+ __u32 fsn_uo;
+ char pd_mode;
+ char pd_mode_uo;
};
struct sctp_stream {
@@ -1387,11 +1427,32 @@ struct sctp_stream {
struct sctp_stream_out_ext *rr_next;
};
};
+ struct sctp_stream_interleave *si;
};
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
/* SCTP_GET_ASSOC_STATS counters */
struct sctp_priv_assoc_stats {
/* Maximum observed rto in the association during subsequent
@@ -1940,6 +2001,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
force_delay:1,
+ intl_enable:1,
prsctp_enable:1,
reconf_enable:1;
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 231dc42f1da6..51b4e0626c34 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -45,19 +45,29 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
*/
struct sctp_ulpevent {
struct sctp_association *asoc;
struct sctp_chunk *chunk;
unsigned int rmem_len;
- __u32 ppid;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ union {
+ __u32 ppid;
+ __u32 fsn;
+ };
__u32 tsn;
__u32 cumtsn;
__u16 stream;
- __u16 ssn;
__u16 flags;
__u16 msg_flags;
-};
+} __packed;
/* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc,
- __u32 indication, gfp_t gfp);
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
const struct sctp_association *asoc, gfp_t gfp);
@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
const struct sctp_association *asoc, __u16 flags,
__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+struct sctp_ulpevent *sctp_make_reassembled_event(
+ struct net *net, struct sk_buff_head *queue,
+ struct sk_buff *f_frag, struct sk_buff *l_frag);
+
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index e0dce07b8794..bb0ecba3db2b 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
char pd_mode;
struct sctp_association *asoc;
struct sk_buff_head reasm;
+ struct sk_buff_head reasm_uo;
struct sk_buff_head lobby;
};
@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
-#endif /* __sctp_ulpqueue_h__ */
-
-
-
-
-
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+ struct sk_buff_head *list, __u16 needed);
+#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e9628a..63731289186a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -72,6 +72,7 @@
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
#include <net/smc.h>
+#include <net/l3mdev.h>
/*
* This structure really needs to be cleaned up.
@@ -1262,6 +1263,7 @@ proto_memory_pressure(struct proto *prot)
/* Called with local bh disabled */
void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
#else
static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
@@ -1445,10 +1447,8 @@ do { \
} while (0)
#ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
{
- struct sock *sk = (struct sock *)csk;
-
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
@@ -1583,7 +1583,7 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
-unsigned int sock_no_poll(struct file *, struct socket *,
+__poll_t sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
@@ -2337,31 +2337,6 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
- return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
- smp_store_release(&sk->sk_state, newstate);
-}
-
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2412,4 +2387,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
return *proto->sysctl_rmem;
}
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+ if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+ return;
+ sk->sk_pacing_shift = val;
+}
+
+/* if a socket is bound to a device, check that the given device
+ * index is either the same or that the socket is bound to an L3
+ * master device and the given device index is also enslaved to
+ * that L3 master
+ */
+static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
+{
+ int mdif;
+
+ if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+ return true;
+
+ mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
+ if (mdif && mdif == sk->sk_bound_dev_if)
+ return true;
+
+ return false;
+}
+
#endif /* _SOCK_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 781f3433a0be..9470fd7e4350 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -6,10 +6,16 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_csum.h>
+struct tcf_csum_params {
+ int action;
+ u32 update_flags;
+ struct rcu_head rcu;
+};
+
struct tcf_csum {
struct tc_action common;
- u32 update_flags;
+ struct tcf_csum_params __rcu *params;
};
#define to_tcf_csum(a) ((struct tcf_csum *)a)
@@ -24,7 +30,13 @@ static inline bool is_tcf_csum(const struct tc_action *a)
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
- return to_tcf_csum(a)->update_flags;
+ u32 update_flags;
+
+ rcu_read_lock();
+ update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
+ rcu_read_unlock();
+
+ return update_flags;
}
#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 21d253c9a8c6..a2e9cbca5c9e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,10 +8,8 @@
struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
- int tcfm_ifindex;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
- struct net *net;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
@@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
return false;
}
-static inline int tcf_mirred_ifindex(const struct tc_action *a)
+static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
{
- return to_mirred(a)->tcfm_ifindex;
+ return rtnl_dereference(to_mirred(a)->tcfm_dev);
}
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6da880d2f022..58278669cc55 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-unsigned int tcp_poll(struct file *file, struct socket *sock,
+__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
@@ -953,6 +953,7 @@ struct rate_sample {
u32 prior_in_flight; /* in flight before this ACK */
bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */
+ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
};
struct tcp_congestion_ops {
@@ -1507,8 +1508,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
/* From tcp_fastopen.c */
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
- struct tcp_fastopen_cookie *cookie, int *syn_loss,
- unsigned long *last_syn_loss);
+ struct tcp_fastopen_cookie *cookie);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
struct tcp_fastopen_cookie *cookie, bool syn_lost,
u16 try_exp);
@@ -1546,7 +1546,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
/* Latencies incurred by various limits for a sender. They are
* chronograph-like stats that are mutually exclusive.
@@ -2006,17 +2006,21 @@ void tcp_cleanup_ulp(struct sock *sk);
* program loaded).
*/
#ifdef CONFIG_BPF
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
{
struct bpf_sock_ops_kern sock_ops;
int ret;
- if (sk_fullsock(sk))
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+ if (sk_fullsock(sk)) {
+ sock_ops.is_fullsock = 1;
sock_owned_by_me(sk);
+ }
- memset(&sock_ops, 0, sizeof(sock_ops));
sock_ops.sk = sk;
sock_ops.op = op;
+ if (nargs > 0)
+ memcpy(sock_ops.args, args, nargs * sizeof(*args));
ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
if (ret == 0)
@@ -2025,18 +2029,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op)
ret = -1;
return ret;
}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+ u32 args[2] = {arg1, arg2};
+
+ return tcp_call_bpf(sk, op, 2, args);
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ u32 args[3] = {arg1, arg2, arg3};
+
+ return tcp_call_bpf(sk, op, 3, args);
+}
+
#else
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
{
return -EPERM;
}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+ return -EPERM;
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return -EPERM;
+}
+
#endif
static inline u32 tcp_timeout_init(struct sock *sk)
{
int timeout;
- timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT);
+ timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
if (timeout <= 0)
timeout = TCP_TIMEOUT_INIT;
@@ -2047,7 +2079,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
{
int rwnd;
- rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT);
+ rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
if (rwnd < 0)
rwnd = 0;
@@ -2056,7 +2088,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
{
- return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1);
+ return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
}
#if IS_ENABLED(CONFIG_SMC)
diff --git a/include/net/tls.h b/include/net/tls.h
index 936cfc5cab7d..4913430ab807 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <net/tcp.h>
@@ -57,6 +58,7 @@
struct tls_sw_context {
struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
/* Sending context */
char aad_space[TLS_AAD_SPACE_SIZE];
@@ -170,7 +172,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
static inline void tls_err_abort(struct sock *sk)
{
- sk->sk_err = -EBADMSG;
+ sk->sk_err = EBADMSG;
sk->sk_error_report(sk);
}
diff --git a/include/net/udp.h b/include/net/udp.h
index 6c759c8594e2..850a8e581cce 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -275,7 +275,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_init_sock(struct sock *sk);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 13223396dc64..ad73d8b3fcc2 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
np_applied:1,
instance_applied:1,
version:2,
-reserved_flags2:2;
+ reserved_flags2:2;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved_flags2:2,
version:2,
@@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
default:
- return features;;
+ return features;
}
if ((l4_hdr == IPPROTO_UDP) &&
diff --git a/include/net/wext.h b/include/net/wext.h
index e51f067fdb3a..aa192a670304 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -7,7 +7,7 @@
struct net;
#ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, unsigned int cmd,
void __user *arg);
int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
unsigned long arg);
@@ -15,7 +15,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
struct iw_statistics *get_wireless_stats(struct net_device *dev);
int call_commit_handler(struct net_device *dev);
#else
-static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, unsigned int cmd,
void __user *arg)
{
return -EINVAL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000000..b2362ddfa694
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2. See COPYING.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues. It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carry a (pointer)
+ * reference to this xdp_rxq_info structure. This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context. Contents is read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog. A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring. If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory. In that process the driver MUST call unregistor (which
+ * also apply for driver shutdown and unload). The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+struct xdp_rxq_info {
+ struct net_device *dev;
+ u32 queue_index;
+ u32 reg_state;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index);
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+
+#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae35991b5877..7d2077665c0b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c
/* A struct encoding bundle of transformations to apply to some set of flow.
*
- * dst->child points to the next element of bundle.
+ * xdst->child points to the next element of bundle.
* dst->xfrm points to an instanse of transformer.
*
* Due to unfortunate limitations of current routing cache, which we
@@ -984,6 +984,8 @@ struct xfrm_dst {
struct rt6_info rt6;
} u;
struct dst_entry *route;
+ struct dst_entry *child;
+ struct dst_entry *path;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
u32 xfrm_genid;
@@ -994,7 +996,35 @@ struct xfrm_dst {
u32 path_cookie;
};
+static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm) {
+ const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+
+ return xdst->path;
+ }
+#endif
+ return (struct dst_entry *) dst;
+}
+
+static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm) {
+ struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+ return xdst->child;
+ }
+#endif
+ return NULL;
+}
+
#ifdef CONFIG_XFRM
+static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
+{
+ xdst->child = child;
+}
+
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
{
xfrm_pols_put(xdst->pols, xdst->num_pols);
@@ -1021,6 +1051,7 @@ struct xfrm_offload {
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
+#define XFRM_DEV_RESUME 128
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1847,34 +1878,53 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
return skb->sp->xvec[skb->sp->len - 1];
}
+#endif
+
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
+#ifdef CONFIG_XFRM
struct sec_path *sp = skb->sp;
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
return &sp->ovec[sp->olen - 1];
-}
+#else
+ return NULL;
#endif
+}
void __net_init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+ struct xfrm_state_offload *xso = &x->xso;
+
+ if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+}
+
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
{
struct xfrm_state *x = dst->xfrm;
+ struct xfrm_dst *xdst;
if (!x || !x->type_offload)
return false;
- if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
- !dst->child->xfrm)
+ xdst = (struct xfrm_dst *) dst;
+ if (!x->xso.offload_handle && !xdst->child->xfrm)
+ return true;
+ if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
+ !xdst->child->xfrm)
return true;
return false;
@@ -1894,15 +1944,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
- dev->xfrmdev_ops->xdo_dev_state_free(x);
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
dev_put(dev);
}
}
#else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
{
- return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+ return skb;
}
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
@@ -1923,6 +1982,10 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x
return false;
}
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+}
+
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
{
return false;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..d656809f1217 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
* The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr, u16 *vlan_id);
+ struct rdma_dev_addr *dev_addr);
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
int rdma_addr_size(struct sockaddr *addr);
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
@@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
}
}
-static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- struct net_device *dev;
- struct in_device *ip4;
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-
- if (ip4)
- in_dev_put(ip4);
-
- dev_put(dev);
- }
-}
-
+/*
+ * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
+ * They are not applicable to RoCE.
+ * RoCE GIDs are derived from the IP addresses.
+ */
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- if (dev_addr->transport == RDMA_TRANSPORT_IB &&
- dev_addr->dev_type != ARPHRD_INFINIBAND)
- iboe_addr_get_sgid(dev_addr, gid);
- else
- memcpy(gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
+ sizeof(*gid));
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..811cfcfcbe3d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr);
/**
- * ib_init_ah_from_path - Initialize address handle attributes based on an SA
- * path record.
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
*/
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..5263c86fd103 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#include <rdma/restrack.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -300,11 +301,6 @@ struct ib_tm_caps {
u32 max_sge;
};
-enum ib_cq_creation_flags {
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
- IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
-};
-
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
@@ -983,9 +979,9 @@ struct ib_wc {
u32 invalidate_rkey;
} ex;
u32 src_qp;
+ u32 slid;
int wc_flags;
u16 pkey_index;
- u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1078,7 @@ enum ib_qp_type {
IB_QPT_XRC_INI = 9,
IB_QPT_XRC_TGT,
IB_QPT_MAX,
+ IB_QPT_DRIVER = 0xFF,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1526,7 @@ struct ib_pd {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct ib_mr *__internal_mr;
+ struct rdma_restrack_entry res;
};
struct ib_xrcd {
@@ -1538,6 +1536,10 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_ah {
@@ -1569,6 +1571,10 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_srq {
@@ -1745,6 +1751,11 @@ struct ib_qp {
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
u8 port;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mr {
@@ -2351,6 +2362,10 @@ struct ib_device {
#endif
u32 index;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers
+ */
+ struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
@@ -2836,8 +2851,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index);
+ struct net_device *ndev, u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -2858,7 +2872,7 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
#define ib_alloc_pd(device, flags) \
- __ib_alloc_pd((device), (flags), __func__)
+ __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2905,7 +2919,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
+ * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
* @port_num: Port on which the received message arrived.
@@ -2915,9 +2929,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3149,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
}
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller);
+#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
+ __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
@@ -3560,8 +3578,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
+ * @caller: Module name for kernel consumers
*/
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
+#define ib_alloc_xrcd(device) \
+ __ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3793,8 +3814,7 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
u32 port_num)
{
- if ((rdma_protocol_roce(dev, port_num)) ||
- (rdma_protocol_iwarp(dev, port_num)))
+ if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
else if ((rdma_protocol_ib(dev, port_num)) &&
(rdma_cap_opa_ah(dev, port_num)))
@@ -3850,4 +3870,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ibdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
}
+/* Check for a valid unicast LID for non-SM traffic types */
+static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ be32_to_cpu(IB_MULTICAST_LID_BASE))
+ return false;
+ } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ return false;
+ }
+ return true;
+}
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..6538a5cc27b6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len);
+/**
+ * rdma_read_gids - Return the SGID and DGID used for establishing
+ * connection. This can be used after rdma_resolve_addr()
+ * on client side. This can be use on new connection
+ * on server side. This is applicable to IB, RoCE, iWarp.
+ * If cm_id is not bound yet to the RDMA device, it doesn't
+ * copy and SGID or DGID to the given pointers.
+ * @id: Communication identifier whose GIDs are queried.
+ * @sgid: Pointer to SGID where SGID will be returned. It is optional.
+ * @dgid: Pointer to DGID where DGID will be returned. It is optional.
+ * Note: This API should not be used by any new ULPs or new code.
+ * Instead, users interested in querying GIDs should refer to path record
+ * of the rdma_cm_id to query the GIDs.
+ * This API is provided for compatibility for existing users.
+ */
+
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
#include <rdma/rdma_cm.h>
/**
- * rdma_set_ib_paths - Manually sets the path records used to establish a
+ * rdma_set_ib_path - Manually sets the path record used to establish a
* connection.
* @id: Connection identifier associated with the request.
* @path_rec: Reference to the path record
*
* This call permits a user to specify routing information for rdma_cm_id's
- * bound to Infiniband devices. It is called on the client side of a
+ * bound to InfiniBand devices. It is called on the client side of a
* connection and replaces the call to rdma_resolve_route.
*/
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths);
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..4118324a0310 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
- * Returns a string to represent the device for which is being
- * registered. This is primarily used for error and debug messages on
- * the console.
- */
- const char * (*get_card_name)(struct rvt_dev_info *rdi);
-
- /*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
};
+/**
+ * rvt_set_ibdev_name - Craft an IB device name from client info
+ * @rdi: pointer to the client rvt_dev_info structure
+ * @name: client specific name
+ * @unit: client specific unit number.
+ */
+static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
+ const char *fmt, const char *name,
+ const int unit)
+{
+ snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+}
+
+/**
+ * rvt_get_ibdev_name - return the IB name
+ * @rdi: rdmavt device
+ *
+ * Return the registered name of the device.
+ */
+static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
+{
+ return rdi->ibdev.name;
+}
+
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..c2d81167c858
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_RESTRACK_H_
+#define _RDMA_RESTRACK_H_
+
+#include <linux/typecheck.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+/**
+ * enum rdma_restrack_type - HW objects to track
+ */
+enum rdma_restrack_type {
+ /**
+ * @RDMA_RESTRACK_PD: Protection domain (PD)
+ */
+ RDMA_RESTRACK_PD,
+ /**
+ * @RDMA_RESTRACK_CQ: Completion queue (CQ)
+ */
+ RDMA_RESTRACK_CQ,
+ /**
+ * @RDMA_RESTRACK_QP: Queue pair (QP)
+ */
+ RDMA_RESTRACK_QP,
+ /**
+ * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
+ */
+ RDMA_RESTRACK_XRCD,
+ /**
+ * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
+ */
+ RDMA_RESTRACK_MAX
+};
+
+#define RDMA_RESTRACK_HASH_BITS 8
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /*
+ * @rwsem: Read/write lock to protect lists
+ */
+ struct rw_semaphore rwsem;
+ /**
+ * @hash: global database for all resources per-device
+ */
+ DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+};
+
+/**
+ * struct rdma_restrack_entry - metadata per-entry
+ */
+struct rdma_restrack_entry {
+ /**
+ * @valid: validity indicator
+ *
+ * The entries are filled during rdma_restrack_add,
+ * can be attempted to be free during rdma_restrack_del.
+ *
+ * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
+ */
+ bool valid;
+ /*
+ * @kref: Protect destroy of the resource
+ */
+ struct kref kref;
+ /*
+ * @comp: Signal that all consumers of resource are completed their work
+ */
+ struct completion comp;
+ /**
+ * @task: owner of resource tracking entity
+ *
+ * There are two types of entities: created by user and created
+ * by kernel.
+ *
+ * This is relevant for the entities created by users.
+ * For the entities created by kernel, this pointer will be NULL.
+ */
+ struct task_struct *task;
+ /**
+ * @kern_name: name of owner for the kernel created entities.
+ */
+ const char *kern_name;
+ /**
+ * @node: hash table entry
+ */
+ struct hlist_node node;
+ /**
+ * @type: various objects in restrack database
+ */
+ enum rdma_restrack_type type;
+};
+
+/**
+ * rdma_restrack_init() - initialize resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_init(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_clean(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @res: resource entry
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ * @type: actual type of object to operate
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_is_kernel_res() - check the owner of resource
+ * @res: resource entry
+ */
+static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+{
+ return !res->task;
+}
+
+/**
+ * rdma_restrack_get() - grab to protect resource from release
+ * @res: resource entry
+ */
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_put() - relase resource
+ * @res: resource entry
+ */
+int rdma_restrack_put(struct rdma_restrack_entry *res);
+#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6df6fe0c2198..225ab7783dfd 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -75,16 +75,15 @@ enum phy_event {
PHYE_OOB_ERROR,
PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
PHYE_RESUME_TIMEOUT,
+ PHYE_SHUTDOWN,
PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
DISCE_REVALIDATE_DOMAIN,
- DISCE_PROBE,
DISCE_SUSPEND,
DISCE_RESUME,
- DISCE_DESTRUCT,
DISC_NUM_EVENTS,
};
@@ -261,6 +260,7 @@ struct asd_sas_port {
struct list_head dev_list;
struct list_head disco_list;
struct list_head destroy_list;
+ struct list_head sas_port_del_list;
enum sas_linkrate linkrate;
struct sas_work work;
@@ -292,6 +292,7 @@ struct asd_sas_port {
struct asd_sas_event {
struct sas_work work;
struct asd_sas_phy *phy;
+ int event;
};
static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
@@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
return ev;
}
+static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
+ void (*fn)(struct work_struct *),
+ struct asd_sas_phy *phy, int event)
+{
+ INIT_SAS_WORK(&ev->work, fn);
+ ev->phy = phy;
+ ev->event = event;
+}
+
+#define SAS_PHY_SHUTDOWN_THRES 1024
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
- struct asd_sas_event port_events[PORT_NUM_EVENTS];
- struct asd_sas_event phy_events[PHY_NUM_EVENTS];
-
- unsigned long port_events_pending;
- unsigned long phy_events_pending;
-
+ atomic_t event_nr;
+ int in_shutdown;
int error;
int suspended;
@@ -380,6 +388,9 @@ struct sas_ha_struct {
struct device *dev; /* should be set */
struct module *lldd_module; /* should be set */
+ struct workqueue_struct *event_q;
+ struct workqueue_struct *disco_q;
+
u8 *sas_addr; /* must be set */
u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
@@ -399,6 +410,8 @@ struct sas_ha_struct {
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
+
+ int event_thres;
};
#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
+extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7fb57e905526..949a016dd7fa 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -171,7 +171,6 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
extern void scsi_kunmap_atomic_sg(void *virt);
extern int scsi_init_io(struct scsi_cmnd *cmd);
-extern void scsi_initialize_rq(struct request *rq);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..1a1df0d21ee3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -571,6 +571,8 @@ struct Scsi_Host {
struct blk_mq_tag_set tag_set;
};
+ struct rcu_head rcu;
+
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1df8efb0ee01..c36860111932 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr {
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
+#define VENDOR_SPECIFIC 0x09
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8cf30215c177..15da45dc2a5d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,8 +139,8 @@ enum fc_vport_state {
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
-#define FC_PORTSPEED_64BIT 0x1000
-#define FC_PORTSPEED_128BIT 0x2000
+#define FC_PORTSPEED_64GBIT 0x1000
+#define FC_PORTSPEED_128GBIT 0x2000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 62895b405933..05ec927a3c72 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -156,6 +156,7 @@ struct sas_port {
struct mutex phy_list_mutex;
struct list_head phy_list;
+ struct list_head del_list; /* libsas only */
};
#define dev_to_sas_port(d) \
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/**
+ * struct srp_login_req_rdma - RDMA/CM login parameters.
+ *
+ * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
+ * data. The %srp_login_req_rdma structure contains the same information as
+ * %srp_login_req but with the reserved data removed.
+ */
+struct srp_login_req_rdma {
+ u64 tag;
+ __be16 req_buf_fmt;
+ u8 req_flags;
+ u8 opcode;
+ __be32 req_it_iu_len;
+ u8 initiator_port_id[16];
+ u8 target_port_id[16];
+};
+
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ca00130cb028..9c14e21dda85 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -193,7 +193,7 @@ struct hda_dai_map {
* @pvt_data - private data, for asoc contains asoc codec object
*/
struct hdac_ext_device {
- struct hdac_device hdac;
+ struct hdac_device hdev;
struct hdac_ext_bus *ebus;
/* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
u8 stream_tag;
};
#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdac))
+ struct hdac_ext_device, hdev))
/*
* HD-audio codec base driver
*/
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ab9fcb2f97f0..afeca593188a 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -37,7 +37,7 @@ struct snd_hwdep_ops {
long count, loff_t *offset);
int (*open)(struct snd_hwdep *hw, struct file * file);
int (*release)(struct snd_hwdep *hw, struct file * file);
- unsigned int (*poll)(struct snd_hwdep *hw, struct file *file,
+ __poll_t (*poll)(struct snd_hwdep *hw, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/include/sound/info.h b/include/sound/info.h
index 67390ee846aa..becdf66d2825 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -62,7 +62,7 @@ struct snd_info_entry_ops {
loff_t (*llseek)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
loff_t offset, int orig);
- unsigned int (*poll)(struct snd_info_entry *entry,
+ __poll_t (*poll)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 24febf9e177c..e054c583d3b3 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -169,6 +169,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM)
#define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG)
#define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE)
+#define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE)
+#define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE)
+#define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE)
#define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL)
#define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE)
#define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE)
@@ -202,6 +206,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE
@@ -213,6 +219,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
struct snd_pcm_file {
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
index ef18494769ee..64d027dbaaca 100644
--- a/include/sound/rt5514.h
+++ b/include/sound/rt5514.h
@@ -14,6 +14,8 @@
struct rt5514_platform_data {
unsigned int dmic_init_delay;
+ const char *dsp_calib_clk_name;
+ unsigned int dsp_calib_clk_rate;
};
#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index d0c33a9972b9..f218c742f08e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
bool level_trigger_irq;
/* Invert JD1_1 status polarity */
bool inv_jd1_1;
+
+ /* Value to asign to snd_soc_card.long_name */
+ const char *long_name;
};
#endif
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 1a9191cd4bb3..9da6388c20a1 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -16,6 +16,7 @@
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
#define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a7d8d335b043..082224275f52 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -17,6 +17,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -26,17 +27,13 @@ struct snd_soc_acpi_package_context {
bool data_valid;
};
+/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
+#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
+
#if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
#else
-static inline const char *
-snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- return NULL;
-}
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
@@ -49,9 +46,6 @@ snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
-/* acpi check hid */
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 58acd00cae19..8ad11669e4d8 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -102,6 +102,8 @@ struct snd_compr_stream;
SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S20_LE |\
+ SNDRV_PCM_FMTBIT_S20_BE |\
SNDRV_PCM_FMTBIT_S24_3LE |\
SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S32_LE |\
@@ -294,9 +296,6 @@ struct snd_soc_dai {
/* DAI runtime info */
unsigned int capture_active:1; /* stream is in use */
unsigned int playback_active:1; /* stream is in use */
- unsigned int symmetric_rates:1;
- unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
unsigned int probed:1;
unsigned int active;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1a7323238c49..b655d987fbe7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
#endif
+void snd_soc_disconnect_sync(struct device *dev);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -802,6 +804,9 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ unsigned int (*read)(struct snd_soc_component *, unsigned int);
+ int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
+
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
@@ -858,12 +863,10 @@ struct snd_soc_component {
struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
- struct snd_soc_dai_driver *dai_drv;
- int num_dai;
-
const struct snd_soc_component_driver *driver;
struct list_head dai_list;
+ int num_dai;
int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 1bee3e7fdf32..8ea966448b58 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete,
TP_fast_assign(
__assign_str(br_dev, br->dev->name);
__assign_str(dev, f->dst ? f->dst->dev->name : "null");
- memcpy(__entry->addr, f->addr.addr, ETH_ALEN);
- __entry->vid = f->vlan_id;
+ memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
+ __entry->vid = f->key.vlan_id;
),
TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4342a329821f..c3ac5ec86519 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -193,7 +193,6 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_flags(flag, "|", \
{ (1 << EXTENT_FLAG_PINNED), "PINNED" },\
{ (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
{ (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
{ (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
{ (1 << EXTENT_FLAG_FILLING), "FILLING" },\
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8f8dd42fa57b..06c87f9f720c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_NO_SPC_ROLL, "no space roll forward" }, \
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
- { CP_SPEC_LOG_NUM, "log type is 2" })
+ { CP_SPEC_LOG_NUM, "log type is 2" }, \
+ { CP_RECOVER_DIR, "dir needs recovery" })
struct victim_sel_policy;
struct f2fs_map_blocks;
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644
index 000000000000..3930119cab08
--- /dev/null
+++ b/include/trace/events/net_probe_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_PROBE_COMMON_H
+
+#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk) \
+ do { \
+ struct sockaddr_in *v4 = (void *)__entry->saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = inet->inet_sport; \
+ v4->sin_addr.s_addr = inet->inet_saddr; \
+ v4 = (void *)__entry->daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = inet->inet_dport; \
+ v4->sin_addr.s_addr = inet->inet_daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
+ do { \
+ if (sk->sk_family == AF_INET6) { \
+ struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = inet->inet_sport; \
+ v6->sin6_addr = inet6_sk(sk)->saddr; \
+ v6 = (void *)__entry->daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = inet->inet_dport; \
+ v6->sin6_addr = sk->sk_v6_daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
+ TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
+
+#endif
+
+#endif
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59d40c454aa0..0b50fda80db0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -243,6 +243,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
__entry->grphi, __entry->gpevent)
);
+#ifdef CONFIG_RCU_NOCB_CPU
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
@@ -285,6 +286,7 @@ TRACE_EVENT(rcu_nocb_wake,
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
);
+#endif
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
@@ -421,76 +423,40 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "End" for
- * leaving it, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle. "Error on entry: not idle task" and "Error on
- * exit: not idle task" indicate that a non-idle task is erroneously
- * toying with the idle loop.
+ * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
+ * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
+ * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.
*
* These events also take a pair of numbers, which indicate the nesting
- * depth before and after the event of interest. Note that task-related
- * events use the upper bits of each number, while interrupt-related
- * events use the lower bits.
+ * depth before and after the event of interest, and a third number that is
+ * the ->dynticks counter. Note that task-related and interrupt-related
+ * events use two separate counters, and that the "++=" and "--=" events
+ * for irq/NMI will change the counter by two, otherwise by one.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
- TP_ARGS(polarity, oldnesting, newnesting),
+ TP_ARGS(polarity, oldnesting, newnesting, dynticks),
TP_STRUCT__entry(
__field(const char *, polarity)
- __field(long long, oldnesting)
- __field(long long, newnesting)
+ __field(long, oldnesting)
+ __field(long, newnesting)
+ __field(int, dynticks)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
+ __entry->dynticks = atomic_read(&dynticks);
),
- TP_printk("%s %llx %llx", __entry->polarity,
- __entry->oldnesting, __entry->newnesting)
-);
-
-/*
- * Tracepoint for RCU preparation for idle, the goal being to get RCU
- * processing done so that the current CPU can shut off its scheduling
- * clock and enter dyntick-idle mode. One way to accomplish this is
- * to drain all RCU callbacks from this CPU, and the other is to have
- * done everything RCU requires for the current grace period. In this
- * latter case, the CPU will be awakened at the end of the current grace
- * period in order to process the remainder of its callbacks.
- *
- * These tracepoints take a string as argument:
- *
- * "No callbacks": Nothing to do, no callbacks on this CPU.
- * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
- * "Begin holdoff": Attempt failed, don't retry until next jiffy.
- * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
- * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
- * "More callbacks": Still more callbacks, try again to clear them out.
- * "Callbacks drained": All callbacks processed, off to dyntick idle!
- * "Timer": Timer fired to cause CPU to continue processing callbacks.
- * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
- * "Cleanup after idle": Idle exited, timer canceled.
- */
-TRACE_EVENT(rcu_prep_idle,
-
- TP_PROTO(const char *reason),
-
- TP_ARGS(reason),
-
- TP_STRUCT__entry(
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __entry->reason = reason;
- ),
-
- TP_printk("%s", __entry->reason)
+ TP_printk("%s %lx %lx %#3x", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting,
+ __entry->dynticks & 0xfff)
);
/*
@@ -799,8 +765,7 @@ TRACE_EVENT(rcu_barrier,
grplo, grphi, gp_tasks) do { } \
while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
-#define trace_rcu_prep_idle(reason) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
do { } while (0)
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
new file mode 100644
index 000000000000..aa19afc73a4e
--- /dev/null
+++ b/include/trace/events/rdma.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+
+/*
+ * enum ib_event_type, from include/rdma/ib_verbs.h
+ */
+
+#define IB_EVENT_LIST \
+ ib_event(CQ_ERR) \
+ ib_event(QP_FATAL) \
+ ib_event(QP_REQ_ERR) \
+ ib_event(QP_ACCESS_ERR) \
+ ib_event(COMM_EST) \
+ ib_event(SQ_DRAINED) \
+ ib_event(PATH_MIG) \
+ ib_event(PATH_MIG_ERR) \
+ ib_event(DEVICE_FATAL) \
+ ib_event(PORT_ACTIVE) \
+ ib_event(PORT_ERR) \
+ ib_event(LID_CHANGE) \
+ ib_event(PKEY_CHANGE) \
+ ib_event(SM_CHANGE) \
+ ib_event(SRQ_ERR) \
+ ib_event(SRQ_LIMIT_REACHED) \
+ ib_event(QP_LAST_WQE_REACHED) \
+ ib_event(CLIENT_REREGISTER) \
+ ib_event(GID_CHANGE) \
+ ib_event_end(WQ_FATAL)
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+
+IB_EVENT_LIST
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) { IB_EVENT_##x, #x },
+#define ib_event_end(x) { IB_EVENT_##x, #x }
+
+#define rdma_show_ib_event(x) \
+ __print_symbolic(x, IB_EVENT_LIST)
+
+/*
+ * enum ib_wc_status type, from include/rdma/ib_verbs.h
+ */
+#define IB_WC_STATUS_LIST \
+ ib_wc_status(SUCCESS) \
+ ib_wc_status(LOC_LEN_ERR) \
+ ib_wc_status(LOC_QP_OP_ERR) \
+ ib_wc_status(LOC_EEC_OP_ERR) \
+ ib_wc_status(LOC_PROT_ERR) \
+ ib_wc_status(WR_FLUSH_ERR) \
+ ib_wc_status(MW_BIND_ERR) \
+ ib_wc_status(BAD_RESP_ERR) \
+ ib_wc_status(LOC_ACCESS_ERR) \
+ ib_wc_status(REM_INV_REQ_ERR) \
+ ib_wc_status(REM_ACCESS_ERR) \
+ ib_wc_status(REM_OP_ERR) \
+ ib_wc_status(RETRY_EXC_ERR) \
+ ib_wc_status(RNR_RETRY_EXC_ERR) \
+ ib_wc_status(LOC_RDD_VIOL_ERR) \
+ ib_wc_status(REM_INV_RD_REQ_ERR) \
+ ib_wc_status(REM_ABORT_ERR) \
+ ib_wc_status(INV_EECN_ERR) \
+ ib_wc_status(INV_EEC_STATE_ERR) \
+ ib_wc_status(FATAL_ERR) \
+ ib_wc_status(RESP_TIMEOUT_ERR) \
+ ib_wc_status_end(GENERAL_ERR)
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+
+IB_WC_STATUS_LIST
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) { IB_WC_##x, #x },
+#define ib_wc_status_end(x) { IB_WC_##x, #x }
+
+#define rdma_show_wc_status(x) \
+ __print_symbolic(x, IB_WC_STATUS_LIST)
+
+/*
+ * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
+ */
+#define RDMA_CM_EVENT_LIST \
+ rdma_cm_event(ADDR_RESOLVED) \
+ rdma_cm_event(ADDR_ERROR) \
+ rdma_cm_event(ROUTE_RESOLVED) \
+ rdma_cm_event(ROUTE_ERROR) \
+ rdma_cm_event(CONNECT_REQUEST) \
+ rdma_cm_event(CONNECT_RESPONSE) \
+ rdma_cm_event(CONNECT_ERROR) \
+ rdma_cm_event(UNREACHABLE) \
+ rdma_cm_event(REJECTED) \
+ rdma_cm_event(ESTABLISHED) \
+ rdma_cm_event(DISCONNECTED) \
+ rdma_cm_event(DEVICE_REMOVAL) \
+ rdma_cm_event(MULTICAST_JOIN) \
+ rdma_cm_event(MULTICAST_ERROR) \
+ rdma_cm_event(ADDR_CHANGE) \
+ rdma_cm_event_end(TIMEWAIT_EXIT)
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+
+RDMA_CM_EVENT_LIST
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x },
+#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x }
+
+#define rdma_show_cm_event(x) \
+ __print_symbolic(x, RDMA_CM_EVENT_LIST)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
new file mode 100644
index 000000000000..50ed3f8bf534
--- /dev/null
+++ b/include/trace/events/rpcrdma.h
@@ -0,0 +1,890 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpcrdma
+
+#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPCRDMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/**
+ ** Event classes
+ **/
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(const void *, r_xprt)
+ __field(u32, xid)
+ __field(u32, version)
+ __field(u32, proc)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->r_xprt = rep->rr_rxprt;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->version = be32_to_cpu(rep->rr_vers);
+ __entry->proc = be32_to_cpu(rep->rr_proc);
+ ),
+
+ TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
+ __entry->r_xprt, __entry->xid, __entry->rep,
+ __entry->version, __entry->proc
+ )
+);
+
+#define DEFINE_REPLY_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_reply_event, name, \
+ TP_PROTO( \
+ const struct rpcrdma_rep *rep \
+ ), \
+ TP_ARGS(rep))
+
+DECLARE_EVENT_CLASS(xprtrdma_rxprt,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt
+ ),
+
+ TP_ARGS(r_xprt),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p",
+ __get_str(addr), __get_str(port), __entry->r_xprt
+ )
+);
+
+#define DEFINE_RXPRT_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rxprt, name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt \
+ ), \
+ TP_ARGS(r_xprt))
+
+DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ unsigned int pos,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, pos, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(unsigned int, pos)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->pos = pos;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->pos, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_RDCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ unsigned int pos, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, pos, mr, nsegs))
+
+DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_WRCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, mr, nsegs))
+
+TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
+TRACE_DEFINE_ENUM(FRWR_IS_VALID);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
+
+#define xprtrdma_show_frwr_state(x) \
+ __print_symbolic(x, \
+ { FRWR_IS_INVALID, "INVALID" }, \
+ { FRWR_IS_VALID, "VALID" }, \
+ { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
+ { FRWR_FLUSHED_LI, "FLUSHED_LI" })
+
+DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpcrdma_frwr *frwr
+ ),
+
+ TP_ARGS(wc, frwr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(unsigned int, state)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ __entry->state = frwr->fr_state;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk(
+ "mr=%p state=%s: %s (%u/0x%x)",
+ __entry->mr, xprtrdma_show_frwr_state(__entry->state),
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_FRWR_DONE_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_frwr_done, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpcrdma_frwr *frwr \
+ ), \
+ TP_ARGS(wc, frwr))
+
+DECLARE_EVENT_CLASS(xprtrdma_mr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr, name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
+ TP_ARGS(mr))
+
+DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(const void *, rqst)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqst = rqst;
+ __entry->req = rpcr_to_rdmar(rqst);
+ __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
+ __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ )
+);
+
+#define DEFINE_CB_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_cb_event, name, \
+ TP_PROTO( \
+ const struct rpc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
+
+/**
+ ** Connection events
+ **/
+
+TRACE_EVENT(xprtrdma_conn_upcall,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __field(int, status)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, rdma_show_cm_event(__entry->event),
+ __entry->event, __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_disconnect,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ int status
+ ),
+
+ TP_ARGS(r_xprt, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(int, status)
+ __field(int, connected)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->status = status;
+ __entry->connected = r_xprt->rx_ep.rep_connected;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->status,
+ __entry->connected == 1 ? "still " : "dis"
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
+DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
+DEFINE_RXPRT_EVENT(xprtrdma_create);
+DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_remove);
+DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
+DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
+DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+
+TRACE_EVENT(xprtrdma_qp_error,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ const struct ib_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __string(name, event->device->name)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __assign_str(name, event->device->name);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __get_str(name), rdma_show_ib_event(__entry->event),
+ __entry->event
+ )
+);
+
+/**
+ ** Call events
+ **/
+
+TRACE_EVENT(xprtrdma_createmrs,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int count
+ ),
+
+ TP_ARGS(r_xprt, count),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->count = count;
+ ),
+
+ TP_printk("r_xprt=%p: created %u MRs",
+ __entry->r_xprt, __entry->count
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+
+DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+
+TRACE_DEFINE_ENUM(rpcrdma_noch);
+TRACE_DEFINE_ENUM(rpcrdma_readch);
+TRACE_DEFINE_ENUM(rpcrdma_areadch);
+TRACE_DEFINE_ENUM(rpcrdma_writech);
+TRACE_DEFINE_ENUM(rpcrdma_replych);
+
+#define xprtrdma_show_chunktype(x) \
+ __print_symbolic(x, \
+ { rpcrdma_noch, "inline" }, \
+ { rpcrdma_readch, "read list" }, \
+ { rpcrdma_areadch, "*read list" }, \
+ { rpcrdma_writech, "write list" }, \
+ { rpcrdma_replych, "reply chunk" })
+
+TRACE_EVENT(xprtrdma_marshal,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ unsigned int hdrlen,
+ unsigned int rtype,
+ unsigned int wtype
+ ),
+
+ TP_ARGS(rqst, hdrlen, rtype, wtype),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, headlen)
+ __field(unsigned int, pagelen)
+ __field(unsigned int, taillen)
+ __field(unsigned int, rtype)
+ __field(unsigned int, wtype)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->hdrlen = hdrlen;
+ __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
+ __entry->pagelen = rqst->rq_snd_buf.page_len;
+ __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
+ __entry->rtype = rtype;
+ __entry->wtype = wtype;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->hdrlen,
+ __entry->headlen, __entry->pagelen, __entry->taillen,
+ xprtrdma_show_chunktype(__entry->rtype),
+ xprtrdma_show_chunktype(__entry->wtype)
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_send,
+ TP_PROTO(
+ const struct rpcrdma_req *req,
+ int status
+ ),
+
+ TP_ARGS(req, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(int, num_sge)
+ __field(bool, signaled)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
+ __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
+ IB_SEND_SIGNALED;
+ __entry->status = status;
+ ),
+
+ TP_printk("req=%p, %d SGEs%s, status=%d",
+ __entry->req, __entry->num_sge,
+ (__entry->signaled ? ", signaled" : ""),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_recv,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ int status
+ ),
+
+ TP_ARGS(rep, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->status = status;
+ ),
+
+ TP_printk("rep=%p status=%d",
+ __entry->rep, __entry->status
+ )
+);
+
+/**
+ ** Completion events
+ **/
+
+TRACE_EVENT(xprtrdma_wc_send,
+ TP_PROTO(
+ const struct rpcrdma_sendctx *sc,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(sc, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, unmap_count)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->req = sc->sc_req;
+ __entry->unmap_count = sc->sc_unmap_count;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
+ __entry->req, __entry->unmap_count,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+TRACE_EVENT(xprtrdma_wc_receive,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(rep, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(unsigned int, byte_len)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->byte_len = wc->byte_len;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+
+DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_unmap);
+DEFINE_MR_EVENT(xprtrdma_remoteinv);
+DEFINE_MR_EVENT(xprtrdma_recover_mr);
+
+/**
+ ** Reply events
+ **/
+
+TRACE_EVENT(xprtrdma_reply,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_rep *rep,
+ const struct rpcrdma_req *req,
+ unsigned int credits
+ ),
+
+ TP_ARGS(task, rep, req, credits),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ __field(unsigned int, credits)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->req = req;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->credits = credits;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->credits, __entry->rep, __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_defer_cmp,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
+ __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
+DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
+DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
+DEFINE_REPLY_EVENT(xprtrdma_reply_short);
+DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+
+TRACE_EVENT(xprtrdma_fixup,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int len,
+ int hdrlen
+ ),
+
+ TP_ARGS(rqst, len, hdrlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, base)
+ __field(int, len)
+ __field(int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
+ __entry->len = len;
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->base, __entry->len, __entry->hdrlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_fixup_pg,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int pageno,
+ const void *pos,
+ int len,
+ int curlen
+ ),
+
+ TP_ARGS(rqst, pageno, pos, len, curlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, pos)
+ __field(int, pageno)
+ __field(int, len)
+ __field(int, curlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->pos = pos;
+ __entry->pageno = pageno;
+ __entry->len = len;
+ __entry->curlen = curlen;
+ ),
+
+ TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->pageno, __entry->pos, __entry->len, __entry->curlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_decode_seg,
+ TP_PROTO(
+ u32 handle,
+ u32 length,
+ u64 offset
+ ),
+
+ TP_ARGS(handle, length, offset),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = handle;
+ __entry->length = length;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%u@0x%016llx:0x%08x",
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+/**
+ ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
+ **/
+
+TRACE_EVENT(xprtrdma_allocate,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ __field(size_t, callsize)
+ __field(size_t, rcvsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req ? req->rl_reply : NULL;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep,
+ __entry->callsize, __entry->rcvsize
+ )
+);
+
+TRACE_EVENT(xprtrdma_rpc_done,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req->rl_reply;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_noreps);
+
+/**
+ ** Callback events
+ **/
+
+TRACE_EVENT(xprtrdma_cb_setup,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int reqs
+ ),
+
+ TP_ARGS(r_xprt, reqs),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, reqs)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->reqs = reqs;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->reqs
+ )
+);
+
+DEFINE_CB_EVENT(xprtrdma_cb_call);
+DEFINE_CB_EVENT(xprtrdma_cb_reply);
+
+#endif /* _TRACE_RPCRDMA_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644
index 000000000000..7475c7be165a
--- /dev/null
+++ b/include/trace/events/sctp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sctp
+
+#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCTP_H
+
+#include <net/sctp/structs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sctp_probe_path,
+
+ TP_PROTO(struct sctp_transport *sp,
+ const struct sctp_association *asoc),
+
+ TP_ARGS(sp, asoc),
+
+ TP_STRUCT__entry(
+ __field(__u64, asoc)
+ __field(__u32, primary)
+ __array(__u8, ipaddr, sizeof(union sctp_addr))
+ __field(__u32, state)
+ __field(__u32, cwnd)
+ __field(__u32, ssthresh)
+ __field(__u32, flight_size)
+ __field(__u32, partial_bytes_acked)
+ __field(__u32, pathmtu)
+ ),
+
+ TP_fast_assign(
+ __entry->asoc = (unsigned long)asoc;
+ __entry->primary = (sp == asoc->peer.primary_path);
+ memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
+ __entry->state = sp->state;
+ __entry->cwnd = sp->cwnd;
+ __entry->ssthresh = sp->ssthresh;
+ __entry->flight_size = sp->flight_size;
+ __entry->partial_bytes_acked = sp->partial_bytes_acked;
+ __entry->pathmtu = sp->pathmtu;
+ ),
+
+ TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
+ "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
+ __entry->asoc, __entry->primary ? "(*)" : "",
+ __entry->ipaddr, __entry->state, __entry->cwnd,
+ __entry->ssthresh, __entry->flight_size,
+ __entry->partial_bytes_acked, __entry->pathmtu)
+);
+
+TRACE_EVENT(sctp_probe,
+
+ TP_PROTO(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk),
+
+ TP_ARGS(ep, asoc, chunk),
+
+ TP_STRUCT__entry(
+ __field(__u64, asoc)
+ __field(__u32, mark)
+ __field(__u16, bind_port)
+ __field(__u16, peer_port)
+ __field(__u32, pathmtu)
+ __field(__u32, rwnd)
+ __field(__u16, unack_data)
+ ),
+
+ TP_fast_assign(
+ struct sk_buff *skb = chunk->skb;
+
+ __entry->asoc = (unsigned long)asoc;
+ __entry->mark = skb->mark;
+ __entry->bind_port = ep->base.bind_addr.port;
+ __entry->peer_port = asoc->peer.port;
+ __entry->pathmtu = asoc->pathmtu;
+ __entry->rwnd = asoc->peer.rwnd;
+ __entry->unack_data = asoc->unack_data;
+
+ if (trace_sctp_probe_path_enabled()) {
+ struct sctp_transport *sp;
+
+ list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+ transports) {
+ trace_sctp_probe_path(sp, asoc);
+ }
+ }
+ ),
+
+ TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+ "rwnd=%u unack_data=%d",
+ __entry->asoc, __entry->mark, __entry->bind_port,
+ __entry->peer_port, __entry->pathmtu, __entry->rwnd,
+ __entry->unack_data)
+);
+
+#endif /* _TRACE_SCTP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index ec4dade24466..3176a3931107 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -6,7 +6,58 @@
#define _TRACE_SOCK_H
#include <net/sock.h>
+#include <net/ipv6.h>
#include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#define family_names \
+ EM(AF_INET) \
+ EMe(AF_INET6)
+
+/* The protocol traced by inet_sock_set_state */
+#define inet_protocol_names \
+ EM(IPPROTO_TCP) \
+ EM(IPPROTO_DCCP) \
+ EMe(IPPROTO_SCTP)
+
+#define tcp_state_names \
+ EM(TCP_ESTABLISHED) \
+ EM(TCP_SYN_SENT) \
+ EM(TCP_SYN_RECV) \
+ EM(TCP_FIN_WAIT1) \
+ EM(TCP_FIN_WAIT2) \
+ EM(TCP_TIME_WAIT) \
+ EM(TCP_CLOSE) \
+ EM(TCP_CLOSE_WAIT) \
+ EM(TCP_LAST_ACK) \
+ EM(TCP_LISTEN) \
+ EM(TCP_CLOSING) \
+ EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a) TRACE_DEFINE_ENUM(a);
+#define EMe(a) TRACE_DEFINE_ENUM(a);
+
+family_names
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a) { a, #a },
+#define EMe(a) { a, #a }
+
+#define show_family_name(val) \
+ __print_symbolic(val, family_names)
+
+#define show_inet_protocol_name(val) \
+ __print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val) \
+ __print_symbolic(val, tcp_state_names)
TRACE_EVENT(sock_rcvqueue_full,
@@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->rmem_alloc)
);
+TRACE_EVENT(inet_sock_set_state,
+
+ TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+ TP_ARGS(sk, oldstate, newstate),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(int, oldstate)
+ __field(int, newstate)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __field(__u8, protocol)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+ __entry->oldstate = oldstate;
+ __entry->newstate = newstate;
+
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+ show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ show_tcp_state_name(__entry->oldstate),
+ show_tcp_state_name(__entry->newstate))
+);
+
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c153f68509e..970c91a83173 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status,
__entry->status = status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
@@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__entry->status)
);
+DEFINE_EVENT(rpc_xprt_event, xprt_timer,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
TP_ARGS(xprt, xid, status));
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ab34c561f26b..878b2be7ce77 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tcp
@@ -8,22 +9,7 @@
#include <linux/tcp.h>
#include <linux/tracepoint.h>
#include <net/ipv6.h>
-
-#define tcp_state_name(state) { state, #state }
-#define show_tcp_state_name(val) \
- __print_symbolic(val, \
- tcp_state_name(TCP_ESTABLISHED), \
- tcp_state_name(TCP_SYN_SENT), \
- tcp_state_name(TCP_SYN_RECV), \
- tcp_state_name(TCP_FIN_WAIT1), \
- tcp_state_name(TCP_FIN_WAIT2), \
- tcp_state_name(TCP_TIME_WAIT), \
- tcp_state_name(TCP_CLOSE), \
- tcp_state_name(TCP_CLOSE_WAIT), \
- tcp_state_name(TCP_LAST_ACK), \
- tcp_state_name(TCP_LISTEN), \
- tcp_state_name(TCP_CLOSING), \
- tcp_state_name(TCP_NEW_SYN_RECV))
+#include <net/tcp.h>
#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
do { \
@@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(tcp_probe,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u32, mark)
+ __field(__u16, length)
+ __field(__u32, snd_nxt)
+ __field(__u32, snd_una)
+ __field(__u32, snd_cwnd)
+ __field(__u32, ssthresh)
+ __field(__u32, snd_wnd)
+ __field(__u32, srtt)
+ __field(__u32, rcv_wnd)
+ ),
+
+ TP_fast_assign(
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->mark = skb->mark;
+
+ __entry->length = skb->len;
+ __entry->snd_nxt = tp->snd_nxt;
+ __entry->snd_una = tp->snd_una;
+ __entry->snd_cwnd = tp->snd_cwnd;
+ __entry->snd_wnd = tp->snd_wnd;
+ __entry->rcv_wnd = tp->rcv_wnd;
+ __entry->ssthresh = tcp_current_ssthresh(sk);
+ __entry->srtt = tp->srtt_us >> 3;
+ ),
+
+ TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
+ "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
+ "rcv_wnd=%u",
+ __entry->saddr, __entry->daddr, __entry->mark,
+ __entry->length, __entry->snd_nxt, __entry->snd_una,
+ __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
+ __entry->srtt, __entry->rcv_wnd)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 78946640fe03..135e5421f003 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -94,9 +94,9 @@ TRACE_EVENT(thermal_zone_trip,
#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power, u32 static_power),
+ size_t load_len, u32 dynamic_power),
- TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+ TP_ARGS(cpus, freq, load, load_len, dynamic_power),
TP_STRUCT__entry(
__bitmask(cpumask, num_possible_cpus())
@@ -104,7 +104,6 @@ TRACE_EVENT(thermal_power_cpu_get_power,
__dynamic_array(u32, load, load_len)
__field(size_t, load_len )
__field(u32, dynamic_power )
- __field(u32, static_power )
),
TP_fast_assign(
@@ -115,13 +114,12 @@ TRACE_EVENT(thermal_power_cpu_get_power,
load_len * sizeof(*load));
__entry->load_len = load_len;
__entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
),
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+ TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
__get_bitmask(cpumask), __entry->freq,
__print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power, __entry->static_power)
+ __entry->dynamic_power)
);
TRACE_EVENT(thermal_power_cpu_limit,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 16e305e69f34..a57e4ee989d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+#define decode_clockid(type) \
+ __print_symbolic(type, \
+ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
+ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
+ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
+ { CLOCK_TAI, "CLOCK_TAI" })
+
+#define decode_hrtimer_mode(mode) \
+ __print_symbolic(mode, \
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
+ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
+ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
+ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
- __entry->clockid == CLOCK_REALTIME ?
- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
- __entry->mode == HRTIMER_MODE_ABS ?
- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+ decode_clockid(__entry->clockid),
+ decode_hrtimer_mode(__entry->mode))
);
/**
@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *hrtimer),
+ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
- TP_ARGS(hrtimer),
+ TP_ARGS(hrtimer, mode),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
+ __field( enum hrtimer_mode, mode )
),
TP_fast_assign(
@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
+ __entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
- __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ "mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
- (unsigned long long) __entry->softexpires)
+ (unsigned long long) __entry->softexpires,
+ decode_hrtimer_mode(__entry->mode))
);
/**
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d70b53e65f43..e0b8b9173e1c 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -192,12 +192,12 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
- long nr_objects_to_shrink, unsigned long pgs_scanned,
- unsigned long lru_pgs, unsigned long cache_items,
- unsigned long long delta, unsigned long total_scan),
+ long nr_objects_to_shrink, unsigned long cache_items,
+ unsigned long long delta, unsigned long total_scan,
+ int priority),
- TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
- cache_items, delta, total_scan),
+ TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan,
+ priority),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
@@ -205,11 +205,10 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(int, nid)
__field(long, nr_objects_to_shrink)
__field(gfp_t, gfp_flags)
- __field(unsigned long, pgs_scanned)
- __field(unsigned long, lru_pgs)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
+ __field(int, priority)
),
TP_fast_assign(
@@ -218,24 +217,22 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
- __entry->pgs_scanned = pgs_scanned;
- __entry->lru_pgs = lru_pgs;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
+ __entry->priority = priority;
),
- TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
+ TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
__entry->shrink,
__entry->shr,
__entry->nid,
__entry->nr_objects_to_shrink,
show_gfp_flags(__entry->gfp_flags),
- __entry->pgs_scanned,
- __entry->lru_pgs,
__entry->cache_items,
__entry->delta,
- __entry->total_scan)
+ __entry->total_scan,
+ __entry->priority)
);
TRACE_EVENT(mm_shrink_slab_end,
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index fefb3d2c3fac..639fade14b23 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -3,35 +3,49 @@
#define __ASM_GENERIC_POLL_H
/* These are specified by iBCS2 */
-#define POLLIN 0x0001
-#define POLLPRI 0x0002
-#define POLLOUT 0x0004
-#define POLLERR 0x0008
-#define POLLHUP 0x0010
-#define POLLNVAL 0x0020
+#define POLLIN (__force __poll_t)0x0001
+#define POLLPRI (__force __poll_t)0x0002
+#define POLLOUT (__force __poll_t)0x0004
+#define POLLERR (__force __poll_t)0x0008
+#define POLLHUP (__force __poll_t)0x0010
+#define POLLNVAL (__force __poll_t)0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
-#define POLLRDNORM 0x0040
-#define POLLRDBAND 0x0080
+#define POLLRDNORM (__force __poll_t)0x0040
+#define POLLRDBAND (__force __poll_t)0x0080
#ifndef POLLWRNORM
-#define POLLWRNORM 0x0100
+#define POLLWRNORM (__force __poll_t)0x0100
#endif
#ifndef POLLWRBAND
-#define POLLWRBAND 0x0200
+#define POLLWRBAND (__force __poll_t)0x0200
#endif
#ifndef POLLMSG
-#define POLLMSG 0x0400
+#define POLLMSG (__force __poll_t)0x0400
#endif
#ifndef POLLREMOVE
-#define POLLREMOVE 0x1000
+#define POLLREMOVE (__force __poll_t)0x1000
#endif
#ifndef POLLRDHUP
-#define POLLRDHUP 0x2000
+#define POLLRDHUP (__force __poll_t)0x2000
#endif
-#define POLLFREE 0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
-#define POLL_BUSY_LOOP 0x8000
+#define POLL_BUSY_LOOP (__force __poll_t)0x8000
+
+#ifdef __KERNEL__
+#ifndef __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ return (__force __u16)val;
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ return (__force __poll_t)v;
+}
+#endif
+#endif
struct pollfd {
int fd;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index e447283b8f52..254afc31e3be 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -23,10 +23,6 @@ typedef union sigval {
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
-#ifndef __ARCH_SI_UID_T
-#define __ARCH_SI_UID_T __kernel_uid32_t
-#endif
-
/*
* The default "si_band" type is "long", as specified by POSIX.
* However, some architectures want to override this to "int"
@@ -44,12 +40,15 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-#ifndef HAVE_ARCH_SIGINFO_T
-
typedef struct siginfo {
int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
int si_errno;
int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
union {
int _pad[SI_PAD_SIZE];
@@ -57,14 +56,13 @@ typedef struct siginfo {
/* kill() */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
@@ -72,34 +70,47 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
__kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
int _status; /* exit code */
__ARCH_SI_CLOCK_T _utime;
__ARCH_SI_CLOCK_T _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
- short _addr_lsb; /* LSB of the reported address */
+#ifdef __ia64__
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
+#endif
union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
/* used when si_code=SEGV_BNDERR */
struct {
+ short _dummy_bnd;
void __user *_lower;
void __user *_upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
+ struct {
+ short _dummy_pkey;
+ __u32 _pkey;
+ } _addr_pkey;
};
} _sigfault;
@@ -118,10 +129,6 @@ typedef struct siginfo {
} _sifields;
} __ARCH_SI_ATTRIBUTES siginfo_t;
-/* If the arch shares siginfo, then it has SIGSYS. */
-#define __ARCH_SIGSYS
-#endif
-
/*
* How these fields are to be accessed.
*/
@@ -143,14 +150,12 @@ typedef struct siginfo {
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
-#define si_pkey _sifields._sigfault._pkey
+#define si_pkey _sifields._sigfault._addr_pkey._pkey
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
-#ifdef __ARCH_SIGSYS
#define si_call_addr _sifields._sigsys._call_addr
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif
/*
* si_code values
@@ -165,6 +170,7 @@ typedef struct siginfo {
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
@@ -173,14 +179,34 @@ typedef struct siginfo {
* SIGILL si_codes
*/
#define ILL_ILLOPC 1 /* illegal opcode */
+#ifdef __bfin__
+# define ILL_ILLPARAOP 2 /* illegal opcode combine */
+#endif
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
+#ifdef __bfin__
+# define ILL_ILLEXCPT 4 /* unrecoverable exception */
+#endif
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
-#define NSIGILL 8
+#ifdef __bfin__
+# define ILL_CPLB_VI 9 /* D/I CPLB protect violation */
+# define ILL_CPLB_MISS 10 /* D/I CPLB miss */
+# define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit */
+#endif
+#ifdef __tile__
+# define ILL_DBLFLT 9 /* double fault */
+# define ILL_HARDWALL 10 /* user networks hardwall violation */
+#endif
+#ifdef __ia64__
+# define ILL_BADIADDR 9 /* unimplemented instruction address */
+# define __ILL_BREAK 10 /* illegal break */
+# define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
+#endif
+#define NSIGILL 11
/*
* SIGFPE si_codes
@@ -193,15 +219,33 @@ typedef struct siginfo {
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
-#define NSIGFPE 8
+#ifdef __frv__
+# define FPE_MDAOVF 9 /* media overflow */
+#endif
+#ifdef __ia64__
+# define __FPE_DECOVF 9 /* decimal overflow */
+# define __FPE_DECDIV 10 /* decimal division by zero */
+# define __FPE_DECERR 11 /* packed decimal error */
+# define __FPE_INVASC 12 /* invalid ASCII digit */
+# define __FPE_INVDEC 13 /* invalid decimal digit */
+#endif
+#define NSIGFPE 13
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
-#define SEGV_BNDERR 3 /* failed address bound checks */
-#define SEGV_PKUERR 4 /* failed protection key checks */
+#ifdef __bfin__
+# define SEGV_STACKFLOW 3 /* stack overflow */
+#else
+# define SEGV_BNDERR 3 /* failed address bound checks */
+#endif
+#ifdef __ia64__
+# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
+#else
+# define SEGV_PKUERR 4 /* failed protection key checks */
+#endif
#define NSIGSEGV 4
/*
@@ -210,8 +254,12 @@ typedef struct siginfo {
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existent physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
+#ifdef __bfin__
+# define BUS_OPFETCH 4 /* error from instruction fetch */
+#else
/* hardware memory error consumed on a machine check: action required */
-#define BUS_MCEERR_AR 4
+# define BUS_MCEERR_AR 4
+#endif
/* hardware memory error detected in process but not consumed: action optional*/
#define BUS_MCEERR_AO 5
#define NSIGBUS 5
@@ -223,9 +271,20 @@ typedef struct siginfo {
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
+#ifdef __bfin__
+# define TRAP_STEP 1 /* single-step breakpoint */
+# define TRAP_TRACEFLOW 2 /* trace buffer overflow */
+# define TRAP_WATCHPT 3 /* watchpoint match */
+# define TRAP_ILLTRAP 4 /* illegal trap */
+#endif
#define NSIGTRAP 4
/*
+ * There are an additional set of SIGTRAP si_codes used by ptrace
+ * that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
+ */
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h
new file mode 100644
index 000000000000..af0630ba5437
--- /dev/null
+++ b/include/uapi/linux/arm_sdei.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017 Arm Ltd. */
+#ifndef _UAPI_LINUX_ARM_SDEI_H
+#define _UAPI_LINUX_ARM_SDEI_H
+
+#define SDEI_1_0_FN_BASE 0xC4000020
+#define SDEI_1_0_MASK 0xFFFFFFE0
+#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n))
+
+#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00)
+#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01)
+#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02)
+#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03)
+#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06)
+#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07)
+#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08)
+#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09)
+#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A)
+#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B)
+#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E)
+#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11)
+#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12)
+
+#define SDEI_VERSION_MAJOR_SHIFT 48
+#define SDEI_VERSION_MAJOR_MASK 0x7fff
+#define SDEI_VERSION_MINOR_SHIFT 32
+#define SDEI_VERSION_MINOR_MASK 0xffff
+#define SDEI_VERSION_VENDOR_SHIFT 0
+#define SDEI_VERSION_VENDOR_MASK 0xffffffff
+
+#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK)
+#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK)
+#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK)
+
+/* SDEI return values */
+#define SDEI_SUCCESS 0
+#define SDEI_NOT_SUPPORTED -1
+#define SDEI_INVALID_PARAMETERS -2
+#define SDEI_DENIED -3
+#define SDEI_PENDING -5
+#define SDEI_OUT_OF_RESOURCE -10
+
+/* EVENT_REGISTER flags */
+#define SDEI_EVENT_REGISTER_RM_ANY 0
+#define SDEI_EVENT_REGISTER_RM_PE 1
+
+/* EVENT_STATUS return value bits */
+#define SDEI_EVENT_STATUS_RUNNING 2
+#define SDEI_EVENT_STATUS_ENABLED 1
+#define SDEI_EVENT_STATUS_REGISTERED 0
+
+/* EVENT_COMPLETE status values */
+#define SDEI_EV_HANDLED 0
+#define SDEI_EV_FAILED 1
+
+/* GET_INFO values */
+#define SDEI_EVENT_INFO_EV_TYPE 0
+#define SDEI_EVENT_INFO_EV_SIGNALED 1
+#define SDEI_EVENT_INFO_EV_PRIORITY 2
+#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3
+#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4
+
+/* and their results */
+#define SDEI_EVENT_TYPE_PRIVATE 0
+#define SDEI_EVENT_TYPE_SHARED 1
+#define SDEI_EVENT_PRIORITY_NORMAL 0
+#define SDEI_EVENT_PRIORITY_CRITICAL 1
+
+#endif /* _UAPI_LINUX_ARM_SDEI_H */
diff --git a/net/batman-adv/packet.h b/include/uapi/linux/batadv_packet.h
index 8e8a5db197cb..5cb360be2a11 100644
--- a/net/batman-adv/packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -15,13 +16,20 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
#include <asm/byteorder.h>
+#include <linux/if_ether.h>
#include <linux/types.h>
-#define batadv_tp_is_error(n) ((u8)(n) > 127 ? 1 : 0)
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
/**
* enum batadv_packettype - types for batman-adv encapsulated packets
@@ -83,12 +91,20 @@ enum batadv_subtype {
* one hop neighbor on the interface where it was originally received.
*/
enum batadv_iv_flags {
- BATADV_NOT_BEST_NEXT_HOP = BIT(0),
- BATADV_PRIMARIES_FIRST_HOP = BIT(1),
- BATADV_DIRECTLINK = BIT(2),
+ BATADV_NOT_BEST_NEXT_HOP = 1UL << 0,
+ BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+ BATADV_DIRECTLINK = 1UL << 2,
};
-/* ICMP message types */
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
enum batadv_icmp_packettype {
BATADV_ECHO_REPLY = 0,
BATADV_DESTINATION_UNREACHABLE = 3,
@@ -106,9 +122,9 @@ enum batadv_icmp_packettype {
* @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
*/
enum batadv_mcast_flags {
- BATADV_MCAST_WANT_ALL_UNSNOOPABLES = BIT(0),
- BATADV_MCAST_WANT_ALL_IPV4 = BIT(1),
- BATADV_MCAST_WANT_ALL_IPV6 = BIT(2),
+ BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
+ BATADV_MCAST_WANT_ALL_IPV4 = 1UL << 1,
+ BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
};
/* tt data subtypes */
@@ -122,10 +138,10 @@ enum batadv_mcast_flags {
* @BATADV_TT_FULL_TABLE: contains full table to replace existing table
*/
enum batadv_tt_data_flags {
- BATADV_TT_OGM_DIFF = BIT(0),
- BATADV_TT_REQUEST = BIT(1),
- BATADV_TT_RESPONSE = BIT(2),
- BATADV_TT_FULL_TABLE = BIT(4),
+ BATADV_TT_OGM_DIFF = 1UL << 0,
+ BATADV_TT_REQUEST = 1UL << 1,
+ BATADV_TT_RESPONSE = 1UL << 2,
+ BATADV_TT_FULL_TABLE = 1UL << 4,
};
/**
@@ -133,10 +149,17 @@ enum batadv_tt_data_flags {
* @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
*/
enum batadv_vlan_flags {
- BATADV_VLAN_HAS_TAG = BIT(15),
+ BATADV_VLAN_HAS_TAG = 1UL << 15,
};
-/* claim frame types for the bridge loop avoidance */
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
enum batadv_bla_claimframe {
BATADV_CLAIM_TYPE_CLAIM = 0x00,
BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
@@ -168,8 +191,8 @@ enum batadv_tvlv_type {
* transport the claim type and the group id
*/
struct batadv_bla_claim_dst {
- u8 magic[3]; /* FF:43:05 */
- u8 type; /* bla_claimframe */
+ __u8 magic[3]; /* FF:43:05 */
+ __u8 type; /* bla_claimframe */
__be16 group; /* group id */
};
@@ -189,15 +212,15 @@ struct batadv_bla_claim_dst {
* @tvlv_len: length of tvlv data following the ogm header
*/
struct batadv_ogm_packet {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 flags;
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 flags;
__be32 seqno;
- u8 orig[ETH_ALEN];
- u8 prev_sender[ETH_ALEN];
- u8 reserved;
- u8 tq;
+ __u8 orig[ETH_ALEN];
+ __u8 prev_sender[ETH_ALEN];
+ __u8 reserved;
+ __u8 tq;
__be16 tvlv_len;
/* __packed is not needed as the struct size is divisible by 4,
* and the largest data type in this struct has a size of 4.
@@ -218,12 +241,12 @@ struct batadv_ogm_packet {
* @throughput: the currently flooded path throughput
*/
struct batadv_ogm2_packet {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 flags;
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 flags;
__be32 seqno;
- u8 orig[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
__be16 tvlv_len;
__be32 throughput;
/* __packed is not needed as the struct size is divisible by 4,
@@ -242,9 +265,9 @@ struct batadv_ogm2_packet {
* @elp_interval: currently used ELP sending interval in ms
*/
struct batadv_elp_packet {
- u8 packet_type;
- u8 version;
- u8 orig[ETH_ALEN];
+ __u8 packet_type;
+ __u8 version;
+ __u8 orig[ETH_ALEN];
__be32 seqno;
__be32 elp_interval;
};
@@ -267,14 +290,14 @@ struct batadv_elp_packet {
* members are padded the same way as they are in real packets.
*/
struct batadv_icmp_header {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 msg_type; /* see ICMP message types above */
- u8 dst[ETH_ALEN];
- u8 orig[ETH_ALEN];
- u8 uid;
- u8 align[3];
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 align[3];
};
/**
@@ -290,14 +313,14 @@ struct batadv_icmp_header {
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 msg_type; /* see ICMP message types above */
- u8 dst[ETH_ALEN];
- u8 orig[ETH_ALEN];
- u8 uid;
- u8 reserved;
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 reserved;
__be16 seqno;
};
@@ -319,15 +342,15 @@ struct batadv_icmp_packet {
* store it using network order
*/
struct batadv_icmp_tp_packet {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 msg_type; /* see ICMP message types above */
- u8 dst[ETH_ALEN];
- u8 orig[ETH_ALEN];
- u8 uid;
- u8 subtype;
- u8 session[2];
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 subtype;
+ __u8 session[2];
__be32 seqno;
__be32 timestamp;
};
@@ -358,16 +381,16 @@ enum batadv_icmp_tp_subtype {
* @rr: route record array
*/
struct batadv_icmp_packet_rr {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 msg_type; /* see ICMP message types above */
- u8 dst[ETH_ALEN];
- u8 orig[ETH_ALEN];
- u8 uid;
- u8 rr_cur;
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 rr_cur;
__be16 seqno;
- u8 rr[BATADV_RR_LEN][ETH_ALEN];
+ __u8 rr[BATADV_RR_LEN][ETH_ALEN];
};
#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr)
@@ -393,11 +416,11 @@ struct batadv_icmp_packet_rr {
* @dest: originator destination of the unicast packet
*/
struct batadv_unicast_packet {
- u8 packet_type;
- u8 version;
- u8 ttl;
- u8 ttvn; /* destination translation table version number */
- u8 dest[ETH_ALEN];
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 ttvn; /* destination translation table version number */
+ __u8 dest[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -412,9 +435,9 @@ struct batadv_unicast_packet {
*/
struct batadv_unicast_4addr_packet {
struct batadv_unicast_packet u;
- u8 src[ETH_ALEN];
- u8 subtype;
- u8 reserved;
+ __u8 src[ETH_ALEN];
+ __u8 subtype;
+ __u8 reserved;
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -434,22 +457,22 @@ struct batadv_unicast_4addr_packet {
* @total_size: size of the merged packet
*/
struct batadv_frag_packet {
- u8 packet_type;
- u8 version; /* batman version field */
- u8 ttl;
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
#if defined(__BIG_ENDIAN_BITFIELD)
- u8 no:4;
- u8 priority:3;
- u8 reserved:1;
+ __u8 no:4;
+ __u8 priority:3;
+ __u8 reserved:1;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 reserved:1;
- u8 priority:3;
- u8 no:4;
+ __u8 reserved:1;
+ __u8 priority:3;
+ __u8 no:4;
#else
#error "unknown bitfield endianness"
#endif
- u8 dest[ETH_ALEN];
- u8 orig[ETH_ALEN];
+ __u8 dest[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
__be16 seqno;
__be16 total_size;
};
@@ -464,12 +487,12 @@ struct batadv_frag_packet {
* @orig: originator of the broadcast packet
*/
struct batadv_bcast_packet {
- u8 packet_type;
- u8 version; /* batman version field */
- u8 ttl;
- u8 reserved;
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 reserved;
__be32 seqno;
- u8 orig[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -493,19 +516,19 @@ struct batadv_bcast_packet {
* @coded_len: length of network coded part of the payload
*/
struct batadv_coded_packet {
- u8 packet_type;
- u8 version; /* batman version field */
- u8 ttl;
- u8 first_ttvn;
- /* u8 first_dest[ETH_ALEN]; - saved in mac header destination */
- u8 first_source[ETH_ALEN];
- u8 first_orig_dest[ETH_ALEN];
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 first_ttvn;
+ /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+ __u8 first_source[ETH_ALEN];
+ __u8 first_orig_dest[ETH_ALEN];
__be32 first_crc;
- u8 second_ttl;
- u8 second_ttvn;
- u8 second_dest[ETH_ALEN];
- u8 second_source[ETH_ALEN];
- u8 second_orig_dest[ETH_ALEN];
+ __u8 second_ttl;
+ __u8 second_ttvn;
+ __u8 second_dest[ETH_ALEN];
+ __u8 second_source[ETH_ALEN];
+ __u8 second_orig_dest[ETH_ALEN];
__be32 second_crc;
__be16 coded_len;
};
@@ -524,14 +547,14 @@ struct batadv_coded_packet {
* @align: 2 bytes to align the header to a 4 byte boundary
*/
struct batadv_unicast_tvlv_packet {
- u8 packet_type;
- u8 version; /* batman version field */
- u8 ttl;
- u8 reserved;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 reserved;
+ __u8 dst[ETH_ALEN];
+ __u8 src[ETH_ALEN];
__be16 tvlv_len;
- u16 align;
+ __u16 align;
};
/**
@@ -541,8 +564,8 @@ struct batadv_unicast_tvlv_packet {
* @len: tvlv container length
*/
struct batadv_tvlv_hdr {
- u8 type;
- u8 version;
+ __u8 type;
+ __u8 version;
__be16 len;
};
@@ -565,8 +588,8 @@ struct batadv_tvlv_gateway_data {
* one batadv_tvlv_tt_vlan_data object per announced vlan
*/
struct batadv_tvlv_tt_data {
- u8 flags;
- u8 ttvn;
+ __u8 flags;
+ __u8 ttvn;
__be16 num_vlan;
};
@@ -580,7 +603,7 @@ struct batadv_tvlv_tt_data {
struct batadv_tvlv_tt_vlan_data {
__be32 crc;
__be16 vid;
- u16 reserved;
+ __u16 reserved;
};
/**
@@ -592,9 +615,9 @@ struct batadv_tvlv_tt_vlan_data {
* @vid: VLAN identifier
*/
struct batadv_tvlv_tt_change {
- u8 flags;
- u8 reserved[3];
- u8 addr[ETH_ALEN];
+ __u8 flags;
+ __u8 reserved[3];
+ __u8 addr[ETH_ALEN];
__be16 vid;
};
@@ -604,7 +627,7 @@ struct batadv_tvlv_tt_change {
* @vid: VLAN identifier
*/
struct batadv_tvlv_roam_adv {
- u8 client[ETH_ALEN];
+ __u8 client[ETH_ALEN];
__be16 vid;
};
@@ -614,8 +637,8 @@ struct batadv_tvlv_roam_adv {
* @reserved: reserved field
*/
struct batadv_tvlv_mcast_data {
- u8 flags;
- u8 reserved[3];
+ __u8 flags;
+ __u8 reserved[3];
};
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index efd641c8a5d6..ae00c99cbed0 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,18 +1,25 @@
+/* SPDX-License-Identifier: MIT */
/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_LINUX_BATMAN_ADV_H_
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
+#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set).
*/
char map_name[BPF_OBJ_NAME_LEN];
+ __u32 map_ifindex; /* ifindex of netdev to create on */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ * Set callback flags for sock_ops
+ * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ * @flags: flags value
+ * Return: 0 for no error
+ * -EINVAL if there is no full tcp socket
+ * bits in flags that are not supported by current kernel
+ *
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ * @pt_regs: pointer to struct pt_regs
+ * @rc: the return value to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -736,7 +755,9 @@ union bpf_attr {
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
- FN(getsockopt),
+ FN(getsockopt), \
+ FN(override_return), \
+ FN(sock_ops_cb_flags_set),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
__u32 data;
__u32 data_end;
__u32 data_meta;
+ /* Below access go through struct xdp_rxq_info */
+ __u32 ingress_ifindex; /* rxq->dev->ifindex */
+ __u32 rx_queue_index; /* rxq->queue_index */
};
enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
struct bpf_sock_ops {
__u32 op;
union {
- __u32 reply;
- __u32 replylong[4];
+ __u32 args[4]; /* Optionally passed to bpf program */
+ __u32 reply; /* Returned by bpf program */
+ __u32 replylong[4]; /* Optionally returned by bpf prog */
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 is_fullsock; /* Some TCP fields are only valid if
+ * there is a full socket. If not, the
+ * fields read as zero.
+ */
+ __u32 snd_cwnd;
+ __u32 srtt_us; /* Averaged RTT << 3 in usecs */
+ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+ __u32 state;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u32 sk_txhash;
+ __u64 bytes_received;
+ __u64 bytes_acked;
};
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ * supported cb flags
+ */
+
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
@@ -976,6 +1044,43 @@ enum {
* a congestion threshold. RTTs above
* this indicate congestion
*/
+ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
+ * Arg1: value of icsk_retransmits
+ * Arg2: value of icsk_rto
+ * Arg3: whether RTO has expired
+ */
+ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
+ * Arg1: sequence number of 1st byte
+ * Arg2: # segments
+ * Arg3: return value of
+ * tcp_transmit_skb (0 => success)
+ */
+ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
+ * Arg1: old_state
+ * Arg2: new_state
+ */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+ BPF_TCP_ESTABLISHED = 1,
+ BPF_TCP_SYN_SENT,
+ BPF_TCP_SYN_RECV,
+ BPF_TCP_FIN_WAIT1,
+ BPF_TCP_FIN_WAIT2,
+ BPF_TCP_TIME_WAIT,
+ BPF_TCP_CLOSE,
+ BPF_TCP_CLOSE_WAIT,
+ BPF_TCP_LAST_ACK,
+ BPF_TCP_LISTEN,
+ BPF_TCP_CLOSING, /* Now a valid state */
+ BPF_TCP_NEW_SYN_RECV,
+
+ BPF_TCP_MAX_STATES /* Leave at the end! */
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
struct bpf_cgroup_dev_ctx {
- __u32 access_type; /* (access << 16) | type */
+ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+ __u32 access_type;
__u32 major;
__u32 minor;
};
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/include/uapi/linux/bpf_common.h
+++ b/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
+#define BPF_W 0x00 /* 32-bit */
+#define BPF_H 0x08 /* 16-bit */
+#define BPF_B 0x10 /* 8-bit */
+/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ce615b75e855..c8d99b9ca550 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -33,7 +33,12 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1];
};
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_SUBVOL_NAME_MAX 4039
+
+#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
@@ -101,11 +106,7 @@ struct btrfs_ioctl_qgroup_limit_args {
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
-#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 6d6e5da51527..aff1356c2bb8 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -456,6 +456,8 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
+#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
/*
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 96710e76d5ce..9f56fad4785b 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -132,6 +132,7 @@ enum {
IFLA_CAN_TERMINATION_CONST,
IFLA_CAN_BITRATE_CONST,
IFLA_CAN_DATA_BITRATE_CONST,
+ IFLA_CAN_BITRATE_MAX,
__IFLA_CAN_MAX
};
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6665df69e26a..1df65a4c2044 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -70,6 +70,13 @@ enum devlink_command {
DEVLINK_CMD_DPIPE_ENTRIES_GET,
DEVLINK_CMD_DPIPE_HEADERS_GET,
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ DEVLINK_CMD_RESOURCE_SET,
+ DEVLINK_CMD_RESOURCE_DUMP,
+
+ /* Hot driver reload, makes configuration changes take place. The
+ * devlink instance is not released during the process.
+ */
+ DEVLINK_CMD_RELOAD,
/* add new commands above here */
__DEVLINK_CMD_MAX,
@@ -202,6 +209,20 @@ enum devlink_attr {
DEVLINK_ATTR_PAD,
DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
+ DEVLINK_ATTR_RESOURCE_LIST, /* nested */
+ DEVLINK_ATTR_RESOURCE, /* nested */
+ DEVLINK_ATTR_RESOURCE_NAME, /* string */
+ DEVLINK_ATTR_RESOURCE_ID, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_NEW, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_VALID, /* u8 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MIN, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MAX, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_GRAN, /* u64 */
+ DEVLINK_ATTR_RESOURCE_UNIT, /* u8 */
+ DEVLINK_ATTR_RESOURCE_OCC, /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
/* add new attributes above here, update the policy in devlink.c */
@@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
DEVLINK_DPIPE_HEADER_IPV6,
};
+enum devlink_resource_unit {
+ DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/erspan.h b/include/uapi/linux/erspan.h
new file mode 100644
index 000000000000..841573019ae1
--- /dev/null
+++ b/include/uapi/linux/erspan.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ERSPAN Tunnel Metadata
+ *
+ * Copyright (c) 2018 VMware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * Userspace API for metadata mode ERSPAN tunnel
+ */
+#ifndef _UAPI_ERSPAN_H
+#define _UAPI_ERSPAN_H
+
+#include <linux/types.h> /* For __beXX in userspace */
+#include <asm/byteorder.h>
+
+/* ERSPAN version 2 metadata header */
+struct erspan_md2 {
+ __be32 timestamp;
+ __be16 sgt; /* security group tag */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hwid_upper:2,
+ ft:5,
+ p:1;
+ __u8 o:1,
+ gra:2,
+ dir:1,
+ hwid:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 p:1,
+ ft:5,
+ hwid_upper:2;
+ __u8 hwid:4,
+ dir:1,
+ gra:2,
+ o:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct erspan_metadata {
+ int version;
+ union {
+ __be32 index; /* Version 1 (type II)*/
+ struct erspan_md2 md2; /* Version 2 (type III) */
+ } u;
+};
+
+#endif /* _UAPI_ERSPAN_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index ac71559314e7..44a0b675a6bc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1686,6 +1686,7 @@ enum ethtool_reset_flags {
ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */
ETH_RESET_RAM = 1 << 7, /* RAM shared between
* multiple components */
+ ETH_RESET_AP = 1 << 8, /* Application processor */
ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to
* this interface */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO, return -EAGAIN if operation would block */
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+/* per-IO O_APPEND */
+#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+
/* mask of flags supported by the kernel */
-#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+ RWF_APPEND)
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 5156bad77b47..2dc10a034de1 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -187,10 +187,19 @@ struct gfs2_rgrp {
__be32 rg_flags;
__be32 rg_free;
__be32 rg_dinodes;
- __be32 __pad;
+ union {
+ __be32 __pad;
+ __be32 rg_skip; /* Distance to the next rgrp in fs blocks */
+ };
__be64 rg_igeneration;
-
- __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
+ /* The following 3 fields are duplicated from gfs2_rindex to reduce
+ reliance on the rindex */
+ __be64 rg_data0; /* First data location */
+ __be32 rg_data; /* Number of data blocks in rgrp */
+ __be32 rg_bitbytes; /* Number of bytes in data bitmaps */
+ __be32 rg_crc; /* crc32 of the structure with this field 0 */
+
+ __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */
};
/*
@@ -394,7 +403,36 @@ struct gfs2_ea_header {
* Log header structure
*/
-#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */
+#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */
+#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */
+#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */
+#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */
+#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */
+
+/* Log flush callers */
+#define GFS2_LFC_SHUTDOWN 0x00000100
+#define GFS2_LFC_JDATA_WPAGES 0x00000200
+#define GFS2_LFC_SET_FLAGS 0x00000400
+#define GFS2_LFC_AIL_EMPTY_GL 0x00000800
+#define GFS2_LFC_AIL_FLUSH 0x00001000
+#define GFS2_LFC_RGRP_GO_SYNC 0x00002000
+#define GFS2_LFC_INODE_GO_SYNC 0x00004000
+#define GFS2_LFC_INODE_GO_INVAL 0x00008000
+#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000
+#define GFS2_LFC_KILL_SB 0x00020000
+#define GFS2_LFC_DO_SYNC 0x00040000
+#define GFS2_LFC_INPLACE_RESERVE 0x00080000
+#define GFS2_LFC_WRITE_INODE 0x00100000
+#define GFS2_LFC_MAKE_FS_RO 0x00200000
+#define GFS2_LFC_SYNC_FS 0x00400000
+#define GFS2_LFC_EVICT_INODE 0x00800000
+#define GFS2_LFC_TRANS_END 0x01000000
+#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000
+#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000
+
+#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash))
struct gfs2_log_header {
struct gfs2_meta_header lh_header;
@@ -403,7 +441,21 @@ struct gfs2_log_header {
__be32 lh_flags; /* GFS2_LOG_HEAD_... */
__be32 lh_tail; /* Block number of log tail */
__be32 lh_blkno;
- __be32 lh_hash;
+ __be32 lh_hash; /* crc up to here with this field 0 */
+
+ /* Version 2 additional fields start here */
+ __be32 lh_crc; /* crc32c from lh_nsec to end of block */
+ __be32 lh_nsec; /* Nanoseconds of timestamp */
+ __be64 lh_sec; /* Seconds of timestamp */
+ __be64 lh_addr; /* Block addr of this log header (absolute) */
+ __be64 lh_jinode; /* Journal inode number */
+ __be64 lh_statfs_addr; /* Local statfs inode number */
+ __be64 lh_quota_addr; /* Local quota change inode number */
+
+ /* Statfs local changes (i.e. diff from global statfs) */
+ __be64 lh_local_total;
+ __be64 lh_local_free;
+ __be64 lh_local_dinodes;
};
/*
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 3ee3bf7c8526..f8cb5760ea4f 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,6 +23,7 @@
#define _UAPI_LINUX_IF_ETHER_H
#include <linux/types.h>
+#include <linux/libc-compat.h>
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -47,6 +48,7 @@
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
+#define ETH_P_ERSPAN2 0x22EB /* ERSPAN version 2 (type III) */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
@@ -149,11 +151,13 @@
* This is an Ethernet frame header.
*/
+#if __UAPI_DEF_ETHHDR
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
+#endif
#endif /* _UAPI_LINUX_IF_ETHER_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 19fc02660e0c..6d9447700e18 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,9 @@ enum {
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
+ IFLA_CARRIER_UP_COUNT,
+ IFLA_CARRIER_DOWN_COUNT,
+ IFLA_NEW_IFINDEX,
__IFLA_MAX
};
@@ -732,6 +735,8 @@ enum {
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
+ IFLA_VF_STATS_RX_DROPPED,
+ IFLA_VF_STATS_TX_DROPPED,
__IFLA_VF_STATS_MAX,
};
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 719d243471f4..98e4d5d7c45c 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,8 +22,13 @@
#define MACSEC_KEYID_LEN 16
-#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
-#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+/* cipher IDs as per IEEE802.1AEbn-2011 */
+#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
+#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+
+/* deprecated cipher ID for GCM-AES-128 */
+#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT MACSEC_CIPHER_ID_GCM_AES_128
#define MACSEC_MIN_ICV_LEN 8
#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 030d3e6d6029..ee432cd3018c 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -57,6 +57,8 @@
*/
#define TUNSETVNETBE _IOW('T', 222, int)
#define TUNGETVNETBE _IOR('T', 223, int)
+#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
+#define TUNSETFILTEREBPF _IOR('T', 225, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index e68dadbd6d45..1b3d148c4560 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -137,6 +137,9 @@ enum {
IFLA_GRE_IGNORE_DF,
IFLA_GRE_FWMARK,
IFLA_GRE_ERSPAN_INDEX,
+ IFLA_GRE_ERSPAN_VER,
+ IFLA_GRE_ERSPAN_DIR,
+ IFLA_GRE_ERSPAN_HWID,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 817d807e9481..14565d703291 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -92,6 +92,8 @@ enum {
INET_DIAG_BC_D_COND,
INET_DIAG_BC_DEV_COND, /* u32 ifindex */
INET_DIAG_BC_MARK_COND,
+ INET_DIAG_BC_S_EQ,
+ INET_DIAG_BC_D_EQ,
};
struct inet_diag_hostcond {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 496e59a2738b..8fb90a0819c3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_SYNIC2 148
#define KVM_CAP_HYPERV_VP_INDEX 149
#define KVM_CAP_S390_AIS_MIGRATION 150
+#define KVM_CAP_PPC_GET_CPU_CHAR 151
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
/* Available with KVM_CAP_PPC_RADIX_MMU */
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
+/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
+#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index d84ce5c1c9aa..7d570c7bd117 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 {
* TUNNEL_MODIFY - CONN_ID, udpcsum
* TUNNEL_GETSTATS - CONN_ID, (stats)
* TUNNEL_GET - CONN_ID, (...)
- * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
* SESSION_DELETE - SESSION_ID
* SESSION_MODIFY - SESSION_ID, data_seq
* SESSION_GET - SESSION_ID, (...)
@@ -94,10 +94,10 @@ enum {
L2TP_ATTR_NONE, /* no data */
L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
- L2TP_ATTR_OFFSET, /* u16 */
+ L2TP_ATTR_OFFSET, /* u16 (not used) */
L2TP_ATTR_DATA_SEQ, /* u16 */
L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
- L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */
+ L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
L2TP_ATTR_PROTO_VERSION, /* u8 */
L2TP_ATTR_IFNAME, /* string */
L2TP_ATTR_CONN_ID, /* u32 */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 282875cf8056..fc29efaa918c 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -168,47 +168,106 @@
/* If we did not see any headers from any supported C libraries,
* or we are being included in the kernel, then define everything
- * that we need. */
+ * that we need. Check for previous __UAPI_* definitions to give
+ * unsupported C libraries a way to opt out of any kernel definition. */
#else /* !defined(__GLIBC__) */
/* Definitions for if.h */
+#ifndef __UAPI_DEF_IF_IFCONF
#define __UAPI_DEF_IF_IFCONF 1
+#endif
+#ifndef __UAPI_DEF_IF_IFMAP
#define __UAPI_DEF_IF_IFMAP 1
+#endif
+#ifndef __UAPI_DEF_IF_IFNAMSIZ
#define __UAPI_DEF_IF_IFNAMSIZ 1
+#endif
+#ifndef __UAPI_DEF_IF_IFREQ
#define __UAPI_DEF_IF_IFREQ 1
+#endif
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+#endif
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif
/* Definitions for in.h */
+#ifndef __UAPI_DEF_IN_ADDR
#define __UAPI_DEF_IN_ADDR 1
+#endif
+#ifndef __UAPI_DEF_IN_IPPROTO
#define __UAPI_DEF_IN_IPPROTO 1
+#endif
+#ifndef __UAPI_DEF_IN_PKTINFO
#define __UAPI_DEF_IN_PKTINFO 1
+#endif
+#ifndef __UAPI_DEF_IP_MREQ
#define __UAPI_DEF_IP_MREQ 1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN
#define __UAPI_DEF_SOCKADDR_IN 1
+#endif
+#ifndef __UAPI_DEF_IN_CLASS
#define __UAPI_DEF_IN_CLASS 1
+#endif
/* Definitions for in6.h */
+#ifndef __UAPI_DEF_IN6_ADDR
#define __UAPI_DEF_IN6_ADDR 1
+#endif
+#ifndef __UAPI_DEF_IN6_ADDR_ALT
#define __UAPI_DEF_IN6_ADDR_ALT 1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN6
#define __UAPI_DEF_SOCKADDR_IN6 1
+#endif
+#ifndef __UAPI_DEF_IPV6_MREQ
#define __UAPI_DEF_IPV6_MREQ 1
+#endif
+#ifndef __UAPI_DEF_IPPROTO_V6
#define __UAPI_DEF_IPPROTO_V6 1
+#endif
+#ifndef __UAPI_DEF_IPV6_OPTIONS
#define __UAPI_DEF_IPV6_OPTIONS 1
+#endif
+#ifndef __UAPI_DEF_IN6_PKTINFO
#define __UAPI_DEF_IN6_PKTINFO 1
+#endif
+#ifndef __UAPI_DEF_IP6_MTUINFO
#define __UAPI_DEF_IP6_MTUINFO 1
+#endif
/* Definitions for ipx.h */
+#ifndef __UAPI_DEF_SOCKADDR_IPX
#define __UAPI_DEF_SOCKADDR_IPX 1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
+#endif
+#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
+#endif
+#ifndef __UAPI_DEF_IPX_CONFIG_DATA
#define __UAPI_DEF_IPX_CONFIG_DATA 1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEF
#define __UAPI_DEF_IPX_ROUTE_DEF 1
+#endif
/* Definitions for xattr.h */
+#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1
+#endif
#endif /* __GLIBC__ */
+/* Definitions for if_ether.h */
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR 1
+#endif
+
#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 42d1a434af29..f9a1be7fc696 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -75,14 +75,23 @@ struct nvm_ioctl_create_simple {
__u32 lun_end;
};
+struct nvm_ioctl_create_extended {
+ __u16 lun_begin;
+ __u16 lun_end;
+ __u16 op;
+ __u16 rsv;
+};
+
enum {
NVM_CONFIG_TYPE_SIMPLE = 0,
+ NVM_CONFIG_TYPE_EXTENDED = 1,
};
struct nvm_ioctl_create_conf {
__u32 type;
union {
struct nvm_ioctl_create_simple s;
+ struct nvm_ioctl_create_extended e;
};
};
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 3fea7709a441..9574bd40870b 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
#define NF_CT_STATE_INVALID_BIT (1 << 0)
#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
-#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1))
+#define NF_CT_STATE_UNTRACKED_BIT (1 << 6)
/* Bitset representing status of connection. */
enum ip_conntrack_status {
@@ -101,12 +101,16 @@ enum ip_conntrack_status {
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
+ /* Conntrack has been offloaded to flow table. */
+ IPS_OFFLOAD_BIT = 14,
+ IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
+
/* Be careful here, modifying these bits can make things messy,
* so don't let users modify them directly.
*/
IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
- IPS_SEQ_ADJUST | IPS_TEMPLATE),
+ IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
__IPS_MAX_BIT = 14,
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a3ee277b17a1..66dceee0ae30 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -92,6 +92,9 @@ enum nft_verdicts {
* @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
* @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
* @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -116,6 +119,9 @@ enum nf_tables_msg_types {
NFT_MSG_GETOBJ,
NFT_MSG_DELOBJ,
NFT_MSG_GETOBJ_RESET,
+ NFT_MSG_NEWFLOWTABLE,
+ NFT_MSG_GETFLOWTABLE,
+ NFT_MSG_DELFLOWTABLE,
NFT_MSG_MAX,
};
@@ -168,6 +174,8 @@ enum nft_table_attributes {
NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS,
NFTA_TABLE_USE,
+ NFTA_TABLE_HANDLE,
+ NFTA_TABLE_PAD,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -311,6 +319,7 @@ enum nft_set_desc_attributes {
* @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
+ * @NFTA_SET_HANDLE: set handle (NLA_U64)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -329,6 +338,7 @@ enum nft_set_attributes {
NFTA_SET_USERDATA,
NFTA_SET_PAD,
NFTA_SET_OBJ_TYPE,
+ NFTA_SET_HANDLE,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -777,6 +787,7 @@ enum nft_exthdr_attributes {
* @NFT_META_OIFGROUP: packet output interface group
* @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
* @NFT_META_PRANDOM: a 32bit pseudo-random number
+ * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -804,6 +815,7 @@ enum nft_meta_keys {
NFT_META_OIFGROUP,
NFT_META_CGROUP,
NFT_META_PRANDOM,
+ NFT_META_SECPATH,
};
/**
@@ -949,6 +961,17 @@ enum nft_ct_attributes {
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+/**
+ * enum nft_flow_attributes - ct offload expression attributes
+ * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING)
+ */
+enum nft_offload_attributes {
+ NFTA_FLOW_UNSPEC,
+ NFTA_FLOW_TABLE_NAME,
+ __NFTA_FLOW_MAX,
+};
+#define NFTA_FLOW_MAX (__NFTA_FLOW_MAX - 1)
+
enum nft_limit_type {
NFT_LIMIT_PKTS,
NFT_LIMIT_PKT_BYTES
@@ -1295,6 +1318,7 @@ enum nft_ct_helper_attributes {
* @NFTA_OBJ_TYPE: stateful object type (NLA_U32)
* @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
* @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
+ * @NFTA_OBJ_HANDLE: object handle (NLA_U64)
*/
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
@@ -1303,11 +1327,63 @@ enum nft_object_attributes {
NFTA_OBJ_TYPE,
NFTA_OBJ_DATA,
NFTA_OBJ_USE,
+ NFTA_OBJ_HANDLE,
+ NFTA_OBJ_PAD,
__NFTA_OBJ_MAX
};
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
/**
+ * enum nft_flowtable_attributes - nf_tables flow table netlink attributes
+ *
+ * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ */
+enum nft_flowtable_attributes {
+ NFTA_FLOWTABLE_UNSPEC,
+ NFTA_FLOWTABLE_TABLE,
+ NFTA_FLOWTABLE_NAME,
+ NFTA_FLOWTABLE_HOOK,
+ NFTA_FLOWTABLE_USE,
+ NFTA_FLOWTABLE_HANDLE,
+ NFTA_FLOWTABLE_PAD,
+ __NFTA_FLOWTABLE_MAX
+};
+#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
+
+/**
+ * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes
+ *
+ * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED)
+ */
+enum nft_flowtable_hook_attributes {
+ NFTA_FLOWTABLE_HOOK_UNSPEC,
+ NFTA_FLOWTABLE_HOOK_NUM,
+ NFTA_FLOWTABLE_HOOK_PRIORITY,
+ NFTA_FLOWTABLE_HOOK_DEVS,
+ __NFTA_FLOWTABLE_HOOK_MAX
+};
+#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+
+/**
+ * enum nft_device_attributes - nf_tables device netlink attributes
+ *
+ * @NFTA_DEVICE_NAME: name of this device (NLA_STRING)
+ */
+enum nft_devices_attributes {
+ NFTA_DEVICE_UNSPEC,
+ NFTA_DEVICE_NAME,
+ __NFTA_DEVICE_MAX
+};
+#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1)
+
+
+/**
* enum nft_trace_attributes - nf_tables trace netlink attributes
*
* @NFTA_TRACE_TABLE: name of the table (NLA_STRING)
diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h
index 07e5e9d47882..d4d1943dcd11 100644
--- a/include/uapi/linux/netfilter/xt_connlimit.h
+++ b/include/uapi/linux/netfilter/xt_connlimit.h
@@ -27,7 +27,7 @@ struct xt_connlimit_info {
__u32 flags;
/* Used internally by the kernel */
- struct xt_connlimit_data *data __attribute__((aligned(8)));
+ struct nf_conncount_data *data __attribute__((aligned(8)));
};
#endif /* _XT_CONNLIMIT_H */
diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h
index 81b6a4cbcb72..791dfc5ae907 100644
--- a/include/uapi/linux/netfilter_arp.h
+++ b/include/uapi/linux/netfilter_arp.h
@@ -15,6 +15,9 @@
#define NF_ARP_IN 0
#define NF_ARP_OUT 1
#define NF_ARP_FORWARD 2
+
+#ifndef __KERNEL__
#define NF_ARP_NUMHOOKS 3
+#endif
#endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 9089c38f6abe..61f1c7dfd033 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -24,6 +24,9 @@
#define NFC_DN_IF_IN 0x0004
/* Output device. */
#define NFC_DN_IF_OUT 0x0008
+
+/* kernel define is in netfilter_defs.h */
+#define NF_DN_NUMHOOKS 7
#endif /* ! __KERNEL__ */
/* DECnet Hooks */
@@ -41,7 +44,6 @@
#define NF_DN_HELLO 5
/* Input Routing Packets */
#define NF_DN_ROUTE 6
-#define NF_DN_NUMHOOKS 7
enum nf_dn_hook_priorities {
NF_DN_PRI_FIRST = INT_MIN,
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index e6b1a84f5dd3..c3b060775e13 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -57,6 +57,7 @@
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
NF_IP_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index 2f9724611cc2..dc624fd24d25 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -62,6 +62,7 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
+ NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
NF_IP6_PRI_RAW = -300,
NF_IP6_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
new file mode 100644
index 000000000000..f3cc0ef514a7
--- /dev/null
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _IP6T_SRH_H
+#define _IP6T_SRH_H
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+
+/* Values for "mt_flags" field in struct ip6t_srh */
+#define IP6T_SRH_NEXTHDR 0x0001
+#define IP6T_SRH_LEN_EQ 0x0002
+#define IP6T_SRH_LEN_GT 0x0004
+#define IP6T_SRH_LEN_LT 0x0008
+#define IP6T_SRH_SEGS_EQ 0x0010
+#define IP6T_SRH_SEGS_GT 0x0020
+#define IP6T_SRH_SEGS_LT 0x0040
+#define IP6T_SRH_LAST_EQ 0x0080
+#define IP6T_SRH_LAST_GT 0x0100
+#define IP6T_SRH_LAST_LT 0x0200
+#define IP6T_SRH_TAG 0x0400
+#define IP6T_SRH_MASK 0x07FF
+
+/* Values for "mt_invflags" field in struct ip6t_srh */
+#define IP6T_SRH_INV_NEXTHDR 0x0001
+#define IP6T_SRH_INV_LEN_EQ 0x0002
+#define IP6T_SRH_INV_LEN_GT 0x0004
+#define IP6T_SRH_INV_LEN_LT 0x0008
+#define IP6T_SRH_INV_SEGS_EQ 0x0010
+#define IP6T_SRH_INV_SEGS_GT 0x0020
+#define IP6T_SRH_INV_SEGS_LT 0x0040
+#define IP6T_SRH_INV_LAST_EQ 0x0080
+#define IP6T_SRH_INV_LAST_GT 0x0100
+#define IP6T_SRH_INV_LAST_LT 0x0200
+#define IP6T_SRH_INV_TAG 0x0400
+#define IP6T_SRH_INV_MASK 0x07FF
+
+/**
+ * struct ip6t_srh - SRH match options
+ * @ next_hdr: Next header field of SRH
+ * @ hdr_len: Extension header length field of SRH
+ * @ segs_left: Segments left field of SRH
+ * @ last_entry: Last entry field of SRH
+ * @ tag: Tag field of SRH
+ * @ mt_flags: match options
+ * @ mt_invflags: Invert the sense of match options
+ */
+
+struct ip6t_srh {
+ __u8 next_hdr;
+ __u8 hdr_len;
+ __u8 segs_left;
+ __u8 last_entry;
+ __u16 tag;
+ __u16 mt_flags;
+ __u16 mt_invflags;
+};
+
+#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 057d22a48416..946cb62d64b0 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -12,6 +12,7 @@
#define NFS_PROGRAM 100003
#define NFS_PORT 2049
+#define NFS_RDMA_PORT 20049
#define NFS_MAXDATA 8192
#define NFS_MAXPATHLEN 1024
#define NFS_MAXNAMLEN 255
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f882fe1f9709..c587a61c32bf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_PARENT_BSSID. (u64).
* @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
* is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ * Contains a nested array of signal strength attributes (u8, dBm),
+ * using the nesting index as the antenna number.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3885,6 +3888,7 @@ enum nl80211_bss {
NL80211_BSS_PAD,
NL80211_BSS_PARENT_TSF,
NL80211_BSS_PARENT_BSSID,
+ NL80211_BSS_CHAIN_SIGNAL,
/* keep last */
__NL80211_BSS_AFTER_LAST,
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index f3776cc80f4d..48031e7858f1 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -221,27 +221,4 @@ enum nubus_display_res_id {
NUBUS_RESID_SIXTHMODE = 0x0085
};
-struct nubus_dir
-{
- unsigned char *base;
- unsigned char *ptr;
- int done;
- int mask;
-};
-
-struct nubus_dirent
-{
- unsigned char *base;
- unsigned char type;
- __u32 data; /* Actually 24bits used */
- int mask;
-};
-
-
-/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
-static inline void *nubus_slot_addr(int slot)
-{
- return (void *)(0xF0000000|(slot<<24));
-}
-
#endif /* _UAPILINUX_NUBUS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 4265d7f9e1f2..713e56ce681f 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,7 +363,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
OVS_TUNNEL_KEY_ATTR_PAD,
- OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* be32 ERSPAN index. */
+ OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */
__OVS_TUNNEL_KEY_ATTR_MAX
};
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b9a4953018ed..e0739a1aa4b2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
__u16 __reserved_2; /* align to __u64 */
};
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+ /*
+ * The below ids array length
+ */
+ __u32 ids_len;
+ /*
+ * Set by the kernel to indicate the number of
+ * available programs
+ */
+ __u32 prog_cnt;
+ /*
+ * User provided buffer to store program ids
+ */
+ __u32 ids[0];
+};
+
#define perf_flags(attr) (*(&(attr)->read_format + 1))
/*
@@ -433,6 +454,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -612,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -864,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e3939e00980b..e46d82b91166 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_SETSIGMASK 0x420b
#define PTRACE_SECCOMP_GET_FILTER 0x420c
+#define PTRACE_SECCOMP_GET_METADATA 0x420d
+
+struct seccomp_metadata {
+ unsigned long filter_off; /* Input: which filter */
+ unsigned int flags; /* Output: filter's flags */
+};
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 843e29aa3cac..9b15005955fa 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -541,9 +541,19 @@ struct tcmsg {
int tcm_ifindex;
__u32 tcm_handle;
__u32 tcm_parent;
+/* tcm_block_index is used instead of tcm_parent
+ * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
+ */
+#define tcm_block_index tcm_parent
__u32 tcm_info;
};
+/* For manipulation of filters in shared block, tcm_ifindex is set to
+ * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
+ * which is the block index.
+ */
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+
enum {
TCA_UNSPEC,
TCA_KIND,
@@ -558,6 +568,8 @@ enum {
TCA_DUMP_INVISIBLE,
TCA_CHAIN,
TCA_HW_OFFLOAD,
+ TCA_INGRESS_BLOCK,
+ TCA_EGRESS_BLOCK,
__TCA_MAX
};
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
+#define SCHED_FLAG_DL_OVERRUN 0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
+ SCHED_FLAG_RECLAIM | \
+ SCHED_FLAG_DL_OVERRUN)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d9adab32dbee..4c4db14786bd 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_SOCKOPT_PEELOFF_FLAGS 122
#define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124
+#define SCTP_INTERLEAVING_SUPPORTED 125
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
__u32 pdapi_length;
__u32 pdapi_indication;
sctp_assoc_t pdapi_assoc_id;
+ __u32 pdapi_stream;
+ __u32 pdapi_seq;
};
enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 35f79d1f8c3a..14bacc7e6cef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr)
/*
* Publication scopes when binding port names and port name sequences
*/
-
-#define TIPC_ZONE_SCOPE 1
-#define TIPC_CLUSTER_SCOPE 2
-#define TIPC_NODE_SCOPE 3
+#define TIPC_ZONE_SCOPE 1
+#define TIPC_CLUSTER_SCOPE 2
+#define TIPC_NODE_SCOPE 3
/*
* Limiting values for messages
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index e3d1d0c78f3c..cd4f0b897a48 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum;
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
+#ifdef __CHECK_POLL
+typedef unsigned __bitwise __poll_t;
+#else
+typedef unsigned __poll_t;
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index c4c79aa331bd..d5a5caec8fbc 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1077,9 +1077,9 @@ struct usb_ptm_cap_descriptor {
#define USB_DT_USB_PTM_ID_SIZE 3
/*
* The size of the descriptor for the Sublink Speed Attribute Count
- * (SSAC) specified in bmAttributes[4:0].
+ * (SSAC) specified in bmAttributes[4:0]. SSAC is zero-based
*/
-#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
+#define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4)
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index 70ed5338d447..964e87217be4 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -79,7 +79,7 @@ struct usbdevfs_connectinfo {
#define USBDEVFS_URB_SHORT_NOT_OK 0x01
#define USBDEVFS_URB_ISO_ASAP 0x02
#define USBDEVFS_URB_BULK_CONTINUATION 0x04
-#define USBDEVFS_URB_NO_FSBR 0x20
+#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */
#define USBDEVFS_URB_ZERO_PACKET 0x40
#define USBDEVFS_URB_NO_INTERRUPT 0x80
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index fc353b518288..5de6ed37695b 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,8 @@
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
+
#ifndef VIRTIO_NET_NO_LEGACY
#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
#endif /* VIRTIO_NET_NO_LEGACY */
@@ -76,6 +78,17 @@ struct virtio_net_config {
__u16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
__u16 mtu;
+ /*
+ * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Any other value stands for unknown.
+ */
+ __u32 speed;
+ /*
+ * 0x00 - half duplex
+ * 0x01 - full duplex
+ * Any other value stands for unknown.
+ */
+ __u8 duplex;
} __attribute__((packed));
/*
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..db54115be044 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -82,6 +82,15 @@ struct bnxt_re_qp_resp {
__u32 rsvd;
};
+struct bnxt_re_srq_req {
+ __u64 srqva;
+ __u64 srq_handle;
+};
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+};
+
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..04d0e67b1312 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -402,13 +402,18 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
+};
+
struct ib_uverbs_ex_create_cq {
__u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 comp_mask;
- __u32 flags;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
__u32 reserved;
};
@@ -449,7 +454,7 @@ struct ib_uverbs_wc {
__u32 vendor_err;
__u32 byte_len;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 qp_num;
@@ -765,7 +770,7 @@ struct ib_uverbs_send_wr {
__u32 opcode;
__u32 send_flags;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
union {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..7f9c37346613 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask;
- __u8 rx_hash_function;
+ __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
@@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..1111aa4e7c1e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+ MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+ MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+ MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
};
enum {
@@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
- __u8 reserved2;
+ __u8 clock_info_versions;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
+ __u32 num_dyn_bfregs;
+ __u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
@@ -280,8 +285,11 @@ struct mlx5_ib_create_qp {
__u32 rq_wqe_shift;
__u32 flags;
__u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
+ __u32 bfreg_index;
+ union {
+ __u64 sq_buf_addr;
+ __u64 access_key;
+ };
};
/* RX Hash function flags */
@@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
/* Save bits for future fields */
- MLX5_RX_HASH_INNER = 1 << 31
+ MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
@@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_modify_qp_resp {
+ __u32 response_length;
+ __u32 dctn;
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
@@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
+
+struct mlx5_ib_clock_info {
+ __u32 sign;
+ __u32 resv;
+ __u64 nsec;
+ __u64 cycles;
+ __u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __u64 mask;
+ __u64 overflow_period;
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
+ MLX5_IB_MMAP_CORE_CLOCK = 5,
+ MLX5_IB_MMAP_ALLOC_WC = 6,
+ MLX5_IB_MMAP_CLOCK_INFO = 7,
+};
+
+enum {
+ MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..17e59bec169e 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -236,6 +236,10 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_PORT_NEW,
RDMA_NLDEV_CMD_PORT_DEL,
+ RDMA_NLDEV_CMD_RES_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -303,6 +307,51 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_QP, /* nested table */
+ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
+ /*
+ * Local QPN
+ */
+ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
+ /*
+ * Remote QPN,
+ * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
+ /*
+ * Receive Queue PSN,
+ * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
+ /*
+ * Send Queue PSN
+ */
+ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
+ /*
+ * QP types as visible to RDMA/core, the reserved QPT
+ * are not exported through this interface.
+ */
+ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
+ /*
+ * Process ID which created object,
+ * in case of kernel origin, PID won't exist.
+ */
+ RDMA_NLDEV_ATTR_RES_PID, /* u32 */
+ /*
+ * The name of process created following resource.
+ * It will exist only for kernel objects.
+ * For user created objects, the user is supposed
+ * to read /proc/PID/comm file.
+ */
+ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..02ca0d0f1eb7 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
+#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
+#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
enum pvrdma_wr_opcode {
PVRDMA_WR_RDMA_WRITE,
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index c227ccba60ae..07d61583fd02 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -248,6 +253,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +266,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE
#endif
typedef int __bitwise snd_pcm_subformat_t;
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 326054a72bc7..8ba0112e5336 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -222,6 +222,17 @@
* %SKL_TKN_MM_U32_NUM_IN_FMT: Number of input formats
* %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
*
+ * %SKL_TKN_U32_ASTATE_IDX: Table Index for the A-State entry to be filled
+ * with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT: Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS: Specifies the core load threshold (in kilo
+ * cycles per second) below which DSP is clocked
+ * from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -309,7 +320,11 @@ enum SKL_TKNS {
SKL_TKN_MM_U32_NUM_IN_FMT,
SKL_TKN_MM_U32_NUM_OUT_FMT,
- SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+ SKL_TKN_U32_ASTATE_IDX,
+ SKL_TKN_U32_ASTATE_COUNT,
+ SKL_TKN_U32_ASTATE_KCPS,
+ SKL_TKN_U32_ASTATE_CLK_SRC,
+ SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
};
#endif
diff --git a/init/Kconfig b/init/Kconfig
index 690a381adee0..a9a2e2c86671 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,6 +461,7 @@ endmenu # "CPU/Task time and stats accounting"
config CPU_ISOLATION
bool "CPU isolation"
+ depends on SMP || COMPILE_TEST
default y
help
Make sure that CPUs running critical tasks are not disturbed by
@@ -1396,6 +1397,13 @@ config BPF_SYSCALL
Enable the bpf() system call that allows to manipulate eBPF
programs and maps via file descriptors.
+config BPF_JIT_ALWAYS_ON
+ bool "Permanently enable BPF JIT and remove BPF interpreter"
+ depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+ help
+ Enables BPF JIT and removes BPF interpreter to avoid
+ speculative execution of BPF instructions by the interpreter
+
config USERFAULTFD
bool "Enable userfaultfd() system call"
select ANON_INODES
diff --git a/init/Makefile b/init/Makefile
index 1dbb23787290..a3e5ce2bcf08 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -13,9 +13,7 @@ obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
-ifneq ($(CONFIG_ARCH_INIT_TASK),y)
obj-y += init_task.o
-endif
mounts-y := do_mounts.o
mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
diff --git a/init/init_task.c b/init/init_task.c
index 9325fee7dc82..3ac6e754cf64 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -13,19 +13,175 @@
#include <asm/pgtable.h>
#include <linux/uaccess.h>
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+static struct signal_struct init_signals = {
+ .nr_threads = 1,
+ .thread_head = LIST_HEAD_INIT(init_task.thread_node),
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(init_signals.wait_chldexit),
+ .shared_pending = {
+ .list = LIST_HEAD_INIT(init_signals.shared_pending.list),
+ .signal = {{0}}
+ },
+ .rlim = INIT_RLIMITS,
+ .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
+#ifdef CONFIG_POSIX_TIMERS
+ .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
+ .cputimer = {
+ .cputime_atomic = INIT_CPUTIME_ATOMIC,
+ .running = false,
+ .checking_timer = false,
+ },
+#endif
+ INIT_CPU_TIMERS(init_signals)
+ INIT_PREV_CPUTIME(init_signals)
+};
+
+static struct sighand_struct init_sighand = {
+ .count = ATOMIC_INIT(1),
+ .action = { { { .sa_handler = SIG_DFL, } }, },
+ .siglock = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
+ .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
+};
-/* Initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
+/*
+ * Set up the first task table, touch at your own risk!. Base=0,
+ * limit=0x1fffff (=2MB)
+ */
+struct task_struct init_task
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ __init_task_data
+#endif
+= {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ .thread_info = INIT_THREAD_INFO(init_task),
+ .stack_refcount = ATOMIC_INIT(1),
+#endif
+ .state = 0,
+ .stack = init_stack,
+ .usage = ATOMIC_INIT(2),
+ .flags = PF_KTHREAD,
+ .prio = MAX_PRIO - 20,
+ .static_prio = MAX_PRIO - 20,
+ .normal_prio = MAX_PRIO - 20,
+ .policy = SCHED_NORMAL,
+ .cpus_allowed = CPU_MASK_ALL,
+ .nr_cpus_allowed= NR_CPUS,
+ .mm = NULL,
+ .active_mm = &init_mm,
+ .restart_block = {
+ .fn = do_no_restart_syscall,
+ },
+ .se = {
+ .group_node = LIST_HEAD_INIT(init_task.se.group_node),
+ },
+ .rt = {
+ .run_list = LIST_HEAD_INIT(init_task.rt.run_list),
+ .time_slice = RR_TIMESLICE,
+ },
+ .tasks = LIST_HEAD_INIT(init_task.tasks),
+#ifdef CONFIG_SMP
+ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ .sched_task_group = &root_task_group,
+#endif
+ .ptraced = LIST_HEAD_INIT(init_task.ptraced),
+ .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
+ .real_parent = &init_task,
+ .parent = &init_task,
+ .children = LIST_HEAD_INIT(init_task.children),
+ .sibling = LIST_HEAD_INIT(init_task.sibling),
+ .group_leader = &init_task,
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred),
+ RCU_POINTER_INITIALIZER(cred, &init_cred),
+ .comm = INIT_TASK_COMM,
+ .thread = INIT_THREAD,
+ .fs = &init_fs,
+ .files = &init_files,
+ .signal = &init_signals,
+ .sighand = &init_sighand,
+ .nsproxy = &init_nsproxy,
+ .pending = {
+ .list = LIST_HEAD_INIT(init_task.pending.list),
+ .signal = {{0}}
+ },
+ .blocked = {{0}},
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
+ .journal_info = NULL,
+ INIT_CPU_TIMERS(init_task)
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
+ .timer_slack_ns = 50000, /* 50 usec default slack */
+ .pids = {
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID),
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID),
+ },
+ .thread_group = LIST_HEAD_INIT(init_task.thread_group),
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head),
+#ifdef CONFIG_AUDITSYSCALL
+ .loginuid = INVALID_UID,
+ .sessionid = (unsigned int)-1,
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
+ .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
+#endif
+#ifdef CONFIG_PREEMPT_RCU
+ .rcu_read_lock_nesting = 0,
+ .rcu_read_unlock_special.s = 0,
+ .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
+ .rcu_blocked_node = NULL,
+#endif
+#ifdef CONFIG_TASKS_RCU
+ .rcu_tasks_holdout = false,
+ .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
+ .rcu_tasks_idle_cpu = -1,
+#endif
+#ifdef CONFIG_CPUSETS
+ .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+#endif
+#ifdef CONFIG_RT_MUTEXES
+ .pi_waiters = RB_ROOT_CACHED,
+ .pi_top_task = NULL,
+#endif
+ INIT_PREV_CPUTIME(init_task)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
+ .vtime.starttime = 0,
+ .vtime.state = VTIME_SYS,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ .numa_preferred_nid = -1,
+ .numa_group = NULL,
+ .numa_faults = NULL,
+#endif
+#ifdef CONFIG_KASAN
+ .kasan_depth = 1,
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ .softirqs_enabled = 1,
+#endif
+#ifdef CONFIG_LOCKDEP
+ .lockdep_recursion = 0,
+#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .ret_stack = NULL,
+#endif
+#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
+ .trace_recursion = 0,
+#endif
+#ifdef CONFIG_LIVEPATCH
+ .patch_state = KLP_UNDEFINED,
+#endif
+#ifdef CONFIG_SECURITY
+ .security = NULL,
+#endif
+};
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
-union thread_union init_thread_union __init_task_data = {
#ifndef CONFIG_THREAD_INFO_IN_TASK
- INIT_THREAD_INFO(init_task)
+struct thread_info init_thread_info __init_thread_info = INIT_THREAD_INFO(init_task);
#endif
-};
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 9649ecd8a73a..690ae6665500 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -270,13 +270,30 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
* that means the min(mq_maxmsg, max_priorities) * struct
* posix_msg_tree_node.
*/
+
+ ret = -EINVAL;
+ if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
+ goto out_inode;
+ if (capable(CAP_SYS_RESOURCE)) {
+ if (info->attr.mq_maxmsg > HARD_MSGMAX ||
+ info->attr.mq_msgsize > HARD_MSGSIZEMAX)
+ goto out_inode;
+ } else {
+ if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
+ info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
+ goto out_inode;
+ }
+ ret = -EOVERFLOW;
+ /* check for overflow */
+ if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
+ goto out_inode;
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
-
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
-
+ mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
+ if (mq_bytes + mq_treesize < mq_bytes)
+ goto out_inode;
+ mq_bytes += mq_treesize;
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
@@ -308,8 +325,9 @@ err:
static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
- struct ipc_namespace *ns = sb->s_fs_info;
+ struct ipc_namespace *ns = data;
+ sb->s_fs_info = ns;
sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -326,18 +344,44 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
+static struct file_system_type mqueue_fs_type;
+/*
+ * Return value is pinned only by reference in ->mq_mnt; it will
+ * live until ipcns dies. Caller does not need to drop it.
+ */
+static struct vfsmount *mq_internal_mount(void)
+{
+ struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+ struct vfsmount *m = ns->mq_mnt;
+ if (m)
+ return m;
+ m = kern_mount_data(&mqueue_fs_type, ns);
+ spin_lock(&mq_lock);
+ if (unlikely(ns->mq_mnt)) {
+ spin_unlock(&mq_lock);
+ if (!IS_ERR(m))
+ kern_unmount(m);
+ return ns->mq_mnt;
+ }
+ if (!IS_ERR(m))
+ ns->mq_mnt = m;
+ spin_unlock(&mq_lock);
+ return m;
+}
+
static struct dentry *mqueue_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
- struct ipc_namespace *ns;
- if (flags & SB_KERNMOUNT) {
- ns = data;
- data = NULL;
- } else {
- ns = current->nsproxy->ipc_ns;
- }
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
+ struct vfsmount *m;
+ if (flags & SB_KERNMOUNT)
+ return mount_nodev(fs_type, flags, data, mqueue_fill_super);
+ m = mq_internal_mount();
+ if (IS_ERR(m))
+ return ERR_CAST(m);
+ atomic_inc(&m->mnt_sb->s_active);
+ down_write(&m->mnt_sb->s_umount);
+ return dget(m->mnt_root);
}
static void init_once(void *foo)
@@ -416,11 +460,11 @@ static void mqueue_evict_inode(struct inode *inode)
put_ipc_ns(ipc_ns);
}
-static int mqueue_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
{
+ struct inode *dir = dentry->d_parent->d_inode;
struct inode *inode;
- struct mq_attr *attr = dentry->d_fsdata;
+ struct mq_attr *attr = arg;
int error;
struct ipc_namespace *ipc_ns;
@@ -461,6 +505,12 @@ out_unlock:
return error;
}
+static int mqueue_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ return mqueue_create_attr(dentry, mode, NULL);
+}
+
static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
@@ -519,10 +569,10 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id)
return 0;
}
-static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
+static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
{
struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(filp, &info->wait_q, poll_tab);
@@ -639,6 +689,7 @@ static void __do_notify(struct mqueue_inode_info *info)
case SIGEV_SIGNAL:
/* sends signal */
+ clear_siginfo(&sig_i);
sig_i.si_signo = info->notify.sigev_signo;
sig_i.si_errno = 0;
sig_i.si_code = SI_MESGQ;
@@ -690,96 +741,46 @@ static void remove_notification(struct mqueue_inode_info *info)
info->notify_user_ns = NULL;
}
-static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
-{
- int mq_treesize;
- unsigned long total_size;
-
- if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
- return -EINVAL;
- if (capable(CAP_SYS_RESOURCE)) {
- if (attr->mq_maxmsg > HARD_MSGMAX ||
- attr->mq_msgsize > HARD_MSGSIZEMAX)
- return -EINVAL;
- } else {
- if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
- attr->mq_msgsize > ipc_ns->mq_msgsize_max)
- return -EINVAL;
- }
- /* check for overflow */
- if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
- return -EOVERFLOW;
- mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
- total_size = attr->mq_maxmsg * attr->mq_msgsize;
- if (total_size + mq_treesize < total_size)
- return -EOVERFLOW;
- return 0;
-}
-
-/*
- * Invoked when creating a new queue via sys_mq_open
- */
-static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
- struct path *path, int oflag, umode_t mode,
+static int prepare_open(struct dentry *dentry, int oflag, int ro,
+ umode_t mode, struct filename *name,
struct mq_attr *attr)
{
- const struct cred *cred = current_cred();
- int ret;
-
- if (attr) {
- ret = mq_attr_ok(ipc_ns, attr);
- if (ret)
- return ERR_PTR(ret);
- /* store for use during create */
- path->dentry->d_fsdata = attr;
- } else {
- struct mq_attr def_attr;
-
- def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
- ipc_ns->mq_msg_default);
- def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
- ipc_ns->mq_msgsize_default);
- ret = mq_attr_ok(ipc_ns, &def_attr);
- if (ret)
- return ERR_PTR(ret);
- }
-
- mode &= ~current_umask();
- ret = vfs_create(dir, path->dentry, mode, true);
- path->dentry->d_fsdata = NULL;
- if (ret)
- return ERR_PTR(ret);
- return dentry_open(path, oflag, cred);
-}
-
-/* Opens existing queue */
-static struct file *do_open(struct path *path, int oflag)
-{
static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
MAY_READ | MAY_WRITE };
int acc;
+
+ if (d_really_is_negative(dentry)) {
+ if (!(oflag & O_CREAT))
+ return -ENOENT;
+ if (ro)
+ return ro;
+ audit_inode_parent_hidden(name, dentry->d_parent);
+ return vfs_mkobj(dentry, mode & ~current_umask(),
+ mqueue_create_attr, attr);
+ }
+ /* it already existed */
+ audit_inode(name, dentry, 0);
+ if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ return -EEXIST;
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
acc = oflag2acc[oflag & O_ACCMODE];
- if (inode_permission(d_inode(path->dentry), acc))
- return ERR_PTR(-EACCES);
- return dentry_open(path, oflag, current_cred());
+ return inode_permission(d_inode(dentry), acc);
}
static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
struct mq_attr *attr)
{
- struct path path;
- struct file *filp;
+ struct vfsmount *mnt = mq_internal_mount();
+ struct dentry *root;
struct filename *name;
+ struct path path;
int fd, error;
- struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
- struct vfsmount *mnt = ipc_ns->mq_mnt;
- struct dentry *root = mnt->mnt_root;
int ro;
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+
audit_mq_open(oflag, mode, attr);
if (IS_ERR(name = getname(u_name)))
@@ -790,7 +791,7 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
goto out_putname;
ro = mnt_want_write(mnt); /* we'll drop it in any case */
- error = 0;
+ root = mnt->mnt_root;
inode_lock(d_inode(root));
path.dentry = lookup_one_len(name->name, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
@@ -798,38 +799,14 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
goto out_putfd;
}
path.mnt = mntget(mnt);
-
- if (oflag & O_CREAT) {
- if (d_really_is_positive(path.dentry)) { /* entry already exists */
- audit_inode(name, path.dentry, 0);
- if (oflag & O_EXCL) {
- error = -EEXIST;
- goto out;
- }
- filp = do_open(&path, oflag);
- } else {
- if (ro) {
- error = ro;
- goto out;
- }
- audit_inode_parent_hidden(name, root);
- filp = do_create(ipc_ns, d_inode(root), &path,
- oflag, mode, attr);
- }
- } else {
- if (d_really_is_negative(path.dentry)) {
- error = -ENOENT;
- goto out;
- }
- audit_inode(name, path.dentry, 0);
- filp = do_open(&path, oflag);
+ error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
+ if (!error) {
+ struct file *file = dentry_open(&path, oflag, current_cred());
+ if (!IS_ERR(file))
+ fd_install(fd, file);
+ else
+ error = PTR_ERR(file);
}
-
- if (!IS_ERR(filp))
- fd_install(fd, filp);
- else
- error = PTR_ERR(filp);
-out:
path_put(&path);
out_putfd:
if (error) {
@@ -863,6 +840,9 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
struct vfsmount *mnt = ipc_ns->mq_mnt;
+ if (!mnt)
+ return -ENOENT;
+
name = getname(u_name);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -1589,28 +1569,26 @@ int mq_init_ns(struct ipc_namespace *ns)
ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
ns->mq_msg_default = DFLT_MSG;
ns->mq_msgsize_default = DFLT_MSGSIZE;
+ ns->mq_mnt = NULL;
- ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
- if (IS_ERR(ns->mq_mnt)) {
- int err = PTR_ERR(ns->mq_mnt);
- ns->mq_mnt = NULL;
- return err;
- }
return 0;
}
void mq_clear_sbinfo(struct ipc_namespace *ns)
{
- ns->mq_mnt->mnt_sb->s_fs_info = NULL;
+ if (ns->mq_mnt)
+ ns->mq_mnt->mnt_sb->s_fs_info = NULL;
}
void mq_put_mnt(struct ipc_namespace *ns)
{
- kern_unmount(ns->mq_mnt);
+ if (ns->mq_mnt)
+ kern_unmount(ns->mq_mnt);
}
static int __init init_mqueue_fs(void)
{
+ struct vfsmount *m;
int error;
mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
@@ -1632,6 +1610,10 @@ static int __init init_mqueue_fs(void)
if (error)
goto out_filesystem;
+ m = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
+ if (IS_ERR(m))
+ goto out_filesystem;
+ init_ipc_ns.mq_mnt = m;
return 0;
out_filesystem:
diff --git a/kernel/Makefile b/kernel/Makefile
index 172d151d429c..f85ae5dfa474 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KCOV) += kcov.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_FAIL_FUNCTION) += fail_function.o
obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
diff --git a/kernel/acct.c b/kernel/acct.c
index d15c0ee4d955..addf7732fb56 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
{
struct kstatfs sbuf;
- if (time_is_before_jiffies(acct->needcheck))
+ if (time_is_after_jiffies(acct->needcheck))
goto out;
/* May block */
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e691da0b3bab..a713fd23ec88 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_BPF_SYSCALL) += devmap.o
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
obj-$(CONFIG_BPF_SYSCALL) += offload.o
ifeq ($(CONFIG_STREAM_PARSER),y)
+ifeq ($(CONFIG_INET),y)
obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
endif
endif
+endif
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7c25426d3cf5..b1f66480135b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -49,34 +49,64 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
}
/* Called from syscall */
-static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+static int array_map_alloc_check(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_array *array;
- u64 array_size;
- u32 elem_size;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size == 0 ||
attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
(percpu && numa_node != NUMA_NO_NODE))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+ int numa_node = bpf_map_attr_numa_node(attr);
+ u32 elem_size, index_mask, max_entries;
+ bool unpriv = !capable(CAP_SYS_ADMIN);
+ struct bpf_array *array;
+ u64 array_size, mask64;
elem_size = round_up(attr->value_size, 8);
+ max_entries = attr->max_entries;
+
+ /* On 32 bit archs roundup_pow_of_two() with max_entries that has
+ * upper most bit set in u32 space is undefined behavior due to
+ * resulting 1U << 32, so do it manually here in u64 space.
+ */
+ mask64 = fls_long(max_entries - 1);
+ mask64 = 1ULL << mask64;
+ mask64 -= 1;
+
+ index_mask = mask64;
+ if (unpriv) {
+ /* round up array size to nearest power of 2,
+ * since cpu will speculate within index_mask limits
+ */
+ max_entries = index_mask + 1;
+ /* Check for overflows. */
+ if (max_entries < attr->max_entries)
+ return ERR_PTR(-E2BIG);
+ }
+
array_size = sizeof(*array);
if (percpu)
- array_size += (u64) attr->max_entries * sizeof(void *);
+ array_size += (u64) max_entries * sizeof(void *);
else
- array_size += (u64) attr->max_entries * elem_size;
+ array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */
if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,14 +116,11 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array = bpf_map_area_alloc(array_size, numa_node);
if (!array)
return ERR_PTR(-ENOMEM);
+ array->index_mask = index_mask;
+ array->map.unpriv_array = unpriv;
/* copy mandatory map attributes */
- array->map.map_type = attr->map_type;
- array->map.key_size = attr->key_size;
- array->map.value_size = attr->value_size;
- array->map.max_entries = attr->max_entries;
- array->map.map_flags = attr->map_flags;
- array->map.numa_node = numa_node;
+ bpf_map_init_from_attr(&array->map, attr);
array->elem_size = elem_size;
if (!percpu)
@@ -121,12 +148,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
if (unlikely(index >= array->map.max_entries))
return NULL;
- return array->value + array->elem_size * index;
+ return array->value + array->elem_size * (index & array->index_mask);
}
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_insn *insn = insn_buf;
u32 elem_size = round_up(map->value_size, 8);
const int ret = BPF_REG_0;
@@ -135,7 +163,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
- *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+ if (map->unpriv_array) {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+ } else {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+ }
if (is_power_of_2(elem_size)) {
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +190,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
if (unlikely(index >= array->map.max_entries))
return NULL;
- return this_cpu_ptr(array->pptrs[index]);
+ return this_cpu_ptr(array->pptrs[index & array->index_mask]);
}
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +210,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
*/
size = round_up(map->value_size, 8);
rcu_read_lock();
- pptr = array->pptrs[index];
+ pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
off += size;
@@ -225,10 +258,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EEXIST;
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
- memcpy(this_cpu_ptr(array->pptrs[index]),
+ memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
value, map->value_size);
else
- memcpy(array->value + array->elem_size * index,
+ memcpy(array->value +
+ array->elem_size * (index & array->index_mask),
value, map->value_size);
return 0;
}
@@ -262,7 +296,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
*/
size = round_up(map->value_size, 8);
rcu_read_lock();
- pptr = array->pptrs[index];
+ pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
off += size;
@@ -296,6 +330,7 @@ static void array_map_free(struct bpf_map *map)
}
const struct bpf_map_ops array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -306,6 +341,7 @@ const struct bpf_map_ops array_map_ops = {
};
const struct bpf_map_ops percpu_array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -314,12 +350,12 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_delete_elem = array_map_delete_elem,
};
-static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
+static int fd_array_map_alloc_check(union bpf_attr *attr)
{
/* only file descriptors can be stored in this type of map */
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return array_map_alloc(attr);
+ return -EINVAL;
+ return array_map_alloc_check(attr);
}
static void fd_array_map_free(struct bpf_map *map)
@@ -443,7 +479,8 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
}
const struct bpf_map_ops prog_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -530,7 +567,8 @@ static void perf_event_fd_array_release(struct bpf_map *map,
}
const struct bpf_map_ops perf_event_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -561,7 +599,8 @@ static void cgroup_fd_array_free(struct bpf_map *map)
}
const struct bpf_map_ops cgroup_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = cgroup_fd_array_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -579,7 +618,7 @@ static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_array_map_alloc(attr);
+ map = array_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -613,6 +652,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
static u32 array_of_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 elem_size = round_up(map->value_size, 8);
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
@@ -621,7 +661,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
- *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+ if (map->unpriv_array) {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+ } else {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+ }
if (is_power_of_2(elem_size))
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
else
@@ -636,6 +681,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
}
const struct bpf_map_ops array_of_maps_map_ops = {
+ .map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_of_map_alloc,
.map_free = array_of_map_free,
.map_get_next_key = array_map_get_next_key,
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index b789ab78d28f..c1c0b60d3f2f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
if (type == BPF_WRITE)
return false;
@@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
- if (size != sizeof(__u32))
- return false;
+
+ switch (off) {
+ case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
+ bpf_ctx_record_field_size(info, size_default);
+ if (!bpf_ctx_narrow_access_ok(off, size, size_default))
+ return false;
+ break;
+ default:
+ if (size != size_default)
+ return false;
+ }
return true;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 86b50aa26ee8..5f35f93dcab2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
fp->pages = size / PAGE_SIZE;
fp->aux = aux;
fp->aux->prog = fp;
+ fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
-static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
-{
- return BPF_CLASS(insn->code) == BPF_JMP &&
- /* Call and Exit are both special jumps with no
- * target inside the BPF instruction image.
- */
- BPF_OP(insn->code) != BPF_CALL &&
- BPF_OP(insn->code) != BPF_EXIT;
-}
-
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
{
struct bpf_insn *insn = prog->insnsi;
u32 i, insn_cnt = prog->len;
+ bool pseudo_call;
+ u8 code;
+ int off;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (!bpf_is_jmp_and_has_target(insn))
+ code = insn->code;
+ if (BPF_CLASS(code) != BPF_JMP)
continue;
+ if (BPF_OP(code) == BPF_EXIT)
+ continue;
+ if (BPF_OP(code) == BPF_CALL) {
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ pseudo_call = true;
+ else
+ continue;
+ } else {
+ pseudo_call = false;
+ }
+ off = pseudo_call ? insn->imm : insn->off;
/* Adjust offset of jmps if we cross boundaries. */
- if (i < pos && i + insn->off + 1 > pos)
- insn->off += delta;
- else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
- insn->off -= delta;
+ if (i < pos && i + off + 1 > pos)
+ off += delta;
+ else if (i > pos + delta && i + off + 1 <= pos + delta)
+ off -= delta;
+
+ if (pseudo_call)
+ insn->imm = off;
+ else
+ insn->off = off;
}
}
@@ -289,6 +300,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden __read_mostly;
+int bpf_jit_kallsyms __read_mostly;
+
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
unsigned long *symbol_start,
@@ -370,8 +386,6 @@ static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;
-int bpf_jit_kallsyms __read_mostly;
-
static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
{
WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
@@ -552,8 +566,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
-int bpf_jit_harden __read_mostly;
-
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
@@ -711,7 +723,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
struct bpf_insn *insn;
int i, rewritten;
- if (!bpf_jit_blinding_enabled())
+ if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
return prog;
clone = bpf_prog_clone_create(prog, GFP_USER);
@@ -753,13 +765,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
i += insn_delta;
}
+ clone->blinded = 1;
return clone;
}
#endif /* CONFIG_BPF_JIT */
/* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
+ * anyway later on, so do not let the compiler omit it. This also needs
+ * to go into kallsyms for correlation from e.g. bpftool, so naming
+ * must not change.
*/
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
@@ -767,6 +782,138 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
EXPORT_SYMBOL_GPL(__bpf_call_base);
+/* All UAPI available opcodes. */
+#define BPF_INSN_MAP(INSN_2, INSN_3) \
+ /* 32 bit ALU operations. */ \
+ /* Register based. */ \
+ INSN_3(ALU, ADD, X), \
+ INSN_3(ALU, SUB, X), \
+ INSN_3(ALU, AND, X), \
+ INSN_3(ALU, OR, X), \
+ INSN_3(ALU, LSH, X), \
+ INSN_3(ALU, RSH, X), \
+ INSN_3(ALU, XOR, X), \
+ INSN_3(ALU, MUL, X), \
+ INSN_3(ALU, MOV, X), \
+ INSN_3(ALU, DIV, X), \
+ INSN_3(ALU, MOD, X), \
+ INSN_2(ALU, NEG), \
+ INSN_3(ALU, END, TO_BE), \
+ INSN_3(ALU, END, TO_LE), \
+ /* Immediate based. */ \
+ INSN_3(ALU, ADD, K), \
+ INSN_3(ALU, SUB, K), \
+ INSN_3(ALU, AND, K), \
+ INSN_3(ALU, OR, K), \
+ INSN_3(ALU, LSH, K), \
+ INSN_3(ALU, RSH, K), \
+ INSN_3(ALU, XOR, K), \
+ INSN_3(ALU, MUL, K), \
+ INSN_3(ALU, MOV, K), \
+ INSN_3(ALU, DIV, K), \
+ INSN_3(ALU, MOD, K), \
+ /* 64 bit ALU operations. */ \
+ /* Register based. */ \
+ INSN_3(ALU64, ADD, X), \
+ INSN_3(ALU64, SUB, X), \
+ INSN_3(ALU64, AND, X), \
+ INSN_3(ALU64, OR, X), \
+ INSN_3(ALU64, LSH, X), \
+ INSN_3(ALU64, RSH, X), \
+ INSN_3(ALU64, XOR, X), \
+ INSN_3(ALU64, MUL, X), \
+ INSN_3(ALU64, MOV, X), \
+ INSN_3(ALU64, ARSH, X), \
+ INSN_3(ALU64, DIV, X), \
+ INSN_3(ALU64, MOD, X), \
+ INSN_2(ALU64, NEG), \
+ /* Immediate based. */ \
+ INSN_3(ALU64, ADD, K), \
+ INSN_3(ALU64, SUB, K), \
+ INSN_3(ALU64, AND, K), \
+ INSN_3(ALU64, OR, K), \
+ INSN_3(ALU64, LSH, K), \
+ INSN_3(ALU64, RSH, K), \
+ INSN_3(ALU64, XOR, K), \
+ INSN_3(ALU64, MUL, K), \
+ INSN_3(ALU64, MOV, K), \
+ INSN_3(ALU64, ARSH, K), \
+ INSN_3(ALU64, DIV, K), \
+ INSN_3(ALU64, MOD, K), \
+ /* Call instruction. */ \
+ INSN_2(JMP, CALL), \
+ /* Exit instruction. */ \
+ INSN_2(JMP, EXIT), \
+ /* Jump instructions. */ \
+ /* Register based. */ \
+ INSN_3(JMP, JEQ, X), \
+ INSN_3(JMP, JNE, X), \
+ INSN_3(JMP, JGT, X), \
+ INSN_3(JMP, JLT, X), \
+ INSN_3(JMP, JGE, X), \
+ INSN_3(JMP, JLE, X), \
+ INSN_3(JMP, JSGT, X), \
+ INSN_3(JMP, JSLT, X), \
+ INSN_3(JMP, JSGE, X), \
+ INSN_3(JMP, JSLE, X), \
+ INSN_3(JMP, JSET, X), \
+ /* Immediate based. */ \
+ INSN_3(JMP, JEQ, K), \
+ INSN_3(JMP, JNE, K), \
+ INSN_3(JMP, JGT, K), \
+ INSN_3(JMP, JLT, K), \
+ INSN_3(JMP, JGE, K), \
+ INSN_3(JMP, JLE, K), \
+ INSN_3(JMP, JSGT, K), \
+ INSN_3(JMP, JSLT, K), \
+ INSN_3(JMP, JSGE, K), \
+ INSN_3(JMP, JSLE, K), \
+ INSN_3(JMP, JSET, K), \
+ INSN_2(JMP, JA), \
+ /* Store instructions. */ \
+ /* Register based. */ \
+ INSN_3(STX, MEM, B), \
+ INSN_3(STX, MEM, H), \
+ INSN_3(STX, MEM, W), \
+ INSN_3(STX, MEM, DW), \
+ INSN_3(STX, XADD, W), \
+ INSN_3(STX, XADD, DW), \
+ /* Immediate based. */ \
+ INSN_3(ST, MEM, B), \
+ INSN_3(ST, MEM, H), \
+ INSN_3(ST, MEM, W), \
+ INSN_3(ST, MEM, DW), \
+ /* Load instructions. */ \
+ /* Register based. */ \
+ INSN_3(LDX, MEM, B), \
+ INSN_3(LDX, MEM, H), \
+ INSN_3(LDX, MEM, W), \
+ INSN_3(LDX, MEM, DW), \
+ /* Immediate based. */ \
+ INSN_3(LD, IMM, DW), \
+ /* Misc (old cBPF carry-over). */ \
+ INSN_3(LD, ABS, B), \
+ INSN_3(LD, ABS, H), \
+ INSN_3(LD, ABS, W), \
+ INSN_3(LD, IND, B), \
+ INSN_3(LD, IND, H), \
+ INSN_3(LD, IND, W)
+
+bool bpf_opcode_in_insntable(u8 code)
+{
+#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
+#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
+ static const bool public_insntable[256] = {
+ [0 ... 255] = false,
+ /* Now overwrite non-defaults ... */
+ BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
+ };
+#undef BPF_INSN_3_TBL
+#undef BPF_INSN_2_TBL
+ return public_insntable[code];
+}
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
* __bpf_prog_run - run eBPF program on a given context
* @ctx: is the data we are operating on
@@ -774,118 +921,21 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
*
* Decode and execute eBPF instructions.
*/
-static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
- u64 *stack)
+static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{
u64 tmp;
+#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
+#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
- /* 32 bit ALU operations */
- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
- /* 64 bit ALU operations */
- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
- /* Call instruction */
- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+ BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
+ /* Non-UAPI available opcodes. */
+ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
- /* Jumps */
- [BPF_JMP | BPF_JA] = &&JMP_JA,
- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
- [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
- [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
- [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
- [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
- [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
- [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
- [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
- [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
- /* Program return */
- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
- /* Store instructions */
- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
- /* Load instructions */
- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
- [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
};
+#undef BPF_INSN_3_LBL
+#undef BPF_INSN_2_LBL
u32 tail_call_cnt = 0;
void *ptr;
int off;
@@ -949,14 +999,10 @@ select_insn:
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
- if (unlikely(SRC == 0))
- return 0;
div64_u64_rem(DST, SRC, &tmp);
DST = tmp;
CONT;
ALU_MOD_X:
- if (unlikely(SRC == 0))
- return 0;
tmp = (u32) DST;
DST = do_div(tmp, (u32) SRC);
CONT;
@@ -969,13 +1015,9 @@ select_insn:
DST = do_div(tmp, (u32) IMM);
CONT;
ALU64_DIV_X:
- if (unlikely(SRC == 0))
- return 0;
DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
- if (unlikely(SRC == 0))
- return 0;
tmp = (u32) DST;
do_div(tmp, (u32) SRC);
DST = (u32) tmp;
@@ -1025,6 +1067,13 @@ select_insn:
BPF_R4, BPF_R5);
CONT;
+ JMP_CALL_ARGS:
+ BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
+ BPF_R3, BPF_R4,
+ BPF_R5,
+ insn + insn->off + 1);
+ CONT;
+
JMP_TAIL_CALL: {
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1279,8 +1328,14 @@ load_byte:
goto load_byte;
default_label:
- /* If we ever reach this, we have a bug somewhere. */
- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+ /* If we ever reach this, we have a bug somewhere. Die hard here
+ * instead of just returning 0; we could be somewhere in a subprog,
+ * so execution could continue otherwise which we do /not/ want.
+ *
+ * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
+ */
+ pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+ BUG_ON(1);
return 0;
}
STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
@@ -1297,6 +1352,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
return ___bpf_prog_run(regs, insn, stack); \
}
+#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
+#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
+static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+ const struct bpf_insn *insn) \
+{ \
+ u64 stack[stack_size / sizeof(u64)]; \
+ u64 regs[MAX_BPF_REG]; \
+\
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ BPF_R1 = r1; \
+ BPF_R2 = r2; \
+ BPF_R3 = r3; \
+ BPF_R4 = r4; \
+ BPF_R5 = r5; \
+ return ___bpf_prog_run(regs, insn, stack); \
+}
+
#define EVAL1(FN, X) FN(X)
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1308,6 +1380,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
+
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
static unsigned int (*interpreters[])(const void *ctx,
@@ -1316,10 +1392,43 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
+#undef PROG_NAME_LIST
+#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
+static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
+ const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
+#undef PROG_NAME_LIST
+
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
+{
+ stack_depth = max_t(u32, stack_depth, 1);
+ insn->off = (s16) insn->imm;
+ insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
+ __bpf_call_base_args;
+ insn->code = BPF_JMP | BPF_CALL_ARGS;
+}
+
+#else
+static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ const struct bpf_insn *insn)
+{
+ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+ * is not working properly, so warn about it!
+ */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+#endif
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
+ if (fp->kprobe_override)
+ return false;
+
if (!array->owner_prog_type) {
/* There's no owner yet where we could check for
* compatibility.
@@ -1364,9 +1473,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+ fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
@@ -1376,6 +1489,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
*/
if (!bpf_prog_is_dev_bound(fp->aux)) {
fp = bpf_int_jit_compile(fp);
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited) {
+ *err = -ENOTSUPP;
+ return fp;
+ }
+#endif
} else {
*err = bpf_prog_offload_compile(fp);
if (*err)
@@ -1462,6 +1581,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
for (; *prog; prog++) {
+ if (*prog == &dummy_bpf_prog.prog)
+ continue;
id = (*prog)->aux->id;
if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
rcu_read_unlock();
@@ -1545,14 +1666,41 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
return 0;
}
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ __u32 __user *prog_ids, u32 request_cnt,
+ __u32 __user *prog_cnt)
+{
+ u32 cnt = 0;
+
+ if (array)
+ cnt = bpf_prog_array_length(array);
+
+ if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+ return -EFAULT;
+
+ /* return early if user requested only program count or nothing to copy */
+ if (!request_cnt || !cnt)
+ return 0;
+
+ return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+}
+
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
+ int i;
aux = container_of(work, struct bpf_prog_aux, work);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
- bpf_jit_free(aux->prog);
+ for (i = 0; i < aux->func_cnt; i++)
+ bpf_jit_free(aux->func[i]);
+ if (aux->func_cnt) {
+ kfree(aux->func);
+ bpf_prog_unlock_free(aux->prog);
+ } else {
+ bpf_jit_free(aux->prog);
+ }
}
/* Free internal BPF program */
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669003b2..fbfdada6caee 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (!cmap)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- cmap->map.map_type = attr->map_type;
- cmap->map.key_size = attr->key_size;
- cmap->map.value_size = attr->value_size;
- cmap->map.max_entries = attr->max_entries;
- cmap->map.map_flags = attr->map_flags;
- cmap->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&cmap->map, attr);
/* Pre-limit array size based on NR_CPUS, not final CPU check */
if (cmap->map.max_entries > NR_CPUS) {
@@ -143,7 +137,7 @@ free_cmap:
return ERR_PTR(err);
}
-void __cpu_map_queue_destructor(void *ptr)
+static void __cpu_map_queue_destructor(void *ptr)
{
/* The tear-down procedure should have made sure that queue is
* empty. See __cpu_map_entry_replace() and work-queue
@@ -222,8 +216,8 @@ static struct xdp_pkt *convert_to_xdp_pkt(struct xdp_buff *xdp)
return xdp_pkt;
}
-struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
- struct xdp_pkt *xdp_pkt)
+static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
+ struct xdp_pkt *xdp_pkt)
{
unsigned int frame_size;
void *pkt_data_start;
@@ -337,7 +331,8 @@ static int cpu_map_kthread_run(void *data)
return 0;
}
-struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
+static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
+ int map_id)
{
gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
@@ -395,7 +390,7 @@ free_rcu:
return NULL;
}
-void __cpu_map_entry_free(struct rcu_head *rcu)
+static void __cpu_map_entry_free(struct rcu_head *rcu)
{
struct bpf_cpu_map_entry *rcpu;
int cpu;
@@ -438,8 +433,8 @@ void __cpu_map_entry_free(struct rcu_head *rcu)
* cpu_map_kthread_stop, which waits for an RCU graze period before
* stopping kthread, emptying the queue.
*/
-void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
- u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
+static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
+ u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
{
struct bpf_cpu_map_entry *old_rcpu;
@@ -451,7 +446,7 @@ void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
}
}
-int cpu_map_delete_elem(struct bpf_map *map, void *key)
+static int cpu_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
u32 key_cpu = *(u32 *)key;
@@ -464,8 +459,8 @@ int cpu_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
-int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
+static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct bpf_cpu_map_entry *rcpu;
@@ -502,7 +497,7 @@ int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return 0;
}
-void cpu_map_free(struct bpf_map *map)
+static void cpu_map_free(struct bpf_map *map)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
int cpu;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54bf7df..565f9ece9115 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- dtab->map.map_type = attr->map_type;
- dtab->map.key_size = attr->key_size;
- dtab->map.value_size = attr->value_size;
- dtab->map.max_entries = attr->max_entries;
- dtab->map.map_flags = attr->map_flags;
- dtab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index e682850c9715..8740406df2cd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
};
#undef __BPF_FUNC_STR_FN
-const char *func_id_name(int id)
+static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ char *buff, size_t len)
{
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
+ if (insn->src_reg != BPF_PSEUDO_CALL &&
+ insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
+ func_id_str[insn->imm])
+ return func_id_str[insn->imm];
+
+ if (cbs && cbs->cb_call)
+ return cbs->cb_call(cbs->private_data, insn);
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ snprintf(buff, len, "%+d", insn->imm);
+
+ return buff;
+}
+
+static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
+ const struct bpf_insn *insn,
+ u64 full_imm, char *buff, size_t len)
+{
+ if (cbs && cbs->cb_imm)
+ return cbs->cb_imm(cbs->private_data, insn, full_imm);
+
+ snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
+ return buff;
+}
+
+const char *func_id_name(int id)
+{
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
return func_id_str[id];
else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
[BPF_EXIT >> 4] = "exit",
};
-static void print_bpf_end_insn(bpf_insn_print_cb verbose,
+static void print_bpf_end_insn(bpf_insn_print_t verbose,
struct bpf_verifier_env *env,
const struct bpf_insn *insn)
{
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
insn->imm, insn->dst_reg);
}
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
- const struct bpf_insn *insn, bool allow_ptr_leaks)
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+ struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ bool allow_ptr_leaks)
{
+ const bpf_insn_print_t verbose = cbs->cb_print;
u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
*/
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+ char tmp[64];
if (map_ptr && !allow_ptr_leaks)
imm = 0;
- verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
- insn->dst_reg, (unsigned long long)imm);
+ verbose(env, "(%02x) r%d = %s\n",
+ insn->code, insn->dst_reg,
+ __func_imm_name(cbs, insn, imm,
+ tmp, sizeof(tmp)));
} else {
verbose(env, "BUG_ld_%02x\n", insn->code);
return;
@@ -189,8 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
- verbose(env, "(%02x) call %s#%d\n", insn->code,
- func_id_name(insn->imm), insn->imm);
+ char tmp[64];
+
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ verbose(env, "(%02x) call pc%s\n",
+ insn->code,
+ __func_get_name(cbs, insn,
+ tmp, sizeof(tmp)));
+ } else {
+ strcpy(tmp, "unknown");
+ verbose(env, "(%02x) call %s#%d\n", insn->code,
+ __func_get_name(cbs, insn,
+ tmp, sizeof(tmp)),
+ insn->imm);
+ }
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(env, "(%02x) goto pc%+d\n",
insn->code, insn->off);
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 8de977e420b6..266fe8ee542b 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -17,16 +17,35 @@
#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <string.h>
+#endif
+
+struct bpf_verifier_env;
extern const char *const bpf_alu_string[16];
extern const char *const bpf_class_string[8];
const char *func_id_name(int id);
-struct bpf_verifier_env;
-typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
- const char *, ...);
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
- const struct bpf_insn *insn, bool allow_ptr_leaks);
+typedef __printf(2, 3) void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+ const char *, ...);
+typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
+ const struct bpf_insn *insn);
+typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm);
+
+struct bpf_insn_cbs {
+ bpf_insn_print_t cb_print;
+ bpf_insn_revmap_call_t cb_call;
+ bpf_insn_print_imm_t cb_imm;
+ void *private_data;
+};
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+ struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ bool allow_ptr_leaks);
#endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3905d4bc5b80..b76828f23b49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
}
/* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr);
- struct bpf_htab *htab;
- int err, i;
- u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev));
@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
*/
- return ERR_PTR(-EPERM);
+ return -EPERM;
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (!lru && percpu_lru)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (lru && !prealloc)
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
+
+ /* check sanity of attributes.
+ * value_size == 0 may be allowed in the future to use map as a set
+ */
+ if (attr->max_entries == 0 || attr->key_size == 0 ||
+ attr->value_size == 0)
+ return -EINVAL;
+
+ if (attr->key_size > MAX_BPF_STACK)
+ /* eBPF programs initialize keys on stack, so they cannot be
+ * larger than max stack size
+ */
+ return -E2BIG;
+
+ if (attr->value_size >= KMALLOC_MAX_SIZE -
+ MAX_BPF_STACK - sizeof(struct htab_elem))
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements via bpf syscall. This check also makes
+ * sure that the elem_size doesn't overflow and it's
+ * kmalloc-able later in htab_map_update_elem()
+ */
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ /* percpu_lru means each cpu has its own LRU list.
+ * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+ * the map's value itself is percpu. percpu_lru has
+ * nothing to do with the map's value.
+ */
+ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+ struct bpf_htab *htab;
+ int err, i;
+ u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- htab->map.map_type = attr->map_type;
- htab->map.key_size = attr->key_size;
- htab->map.value_size = attr->value_size;
- htab->map.max_entries = attr->max_entries;
- htab->map.map_flags = attr->map_flags;
- htab->map.numa_node = numa_node;
-
- /* check sanity of attributes.
- * value_size == 0 may be allowed in the future to use map as a set
- */
- err = -EINVAL;
- if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
- htab->map.value_size == 0)
- goto free_htab;
+ bpf_map_init_from_attr(&htab->map, attr);
if (percpu_lru) {
/* ensure each CPU's lru list has >=1 elements.
@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
- err = -E2BIG;
- if (htab->map.key_size > MAX_BPF_STACK)
- /* eBPF programs initialize keys on stack, so they cannot be
- * larger than max stack size
- */
- goto free_htab;
-
- if (htab->map.value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct htab_elem))
- /* if value_size is bigger, the user space won't be able to
- * access the elements via bpf syscall. This check also makes
- * sure that the elem_size doesn't overflow and it's
- * kmalloc-able later in htab_map_update_elem()
- */
- goto free_htab;
-
htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8);
if (percpu)
@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->elem_size += round_up(htab->map.value_size, 8);
+ err = -E2BIG;
/* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct bucket))
@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = {
};
const struct bpf_map_ops htab_lru_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
}
const struct bpf_map_ops htab_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
};
const struct bpf_map_ops htab_lru_percpu_map_ops = {
+ .map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem,
};
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
{
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return htab_map_alloc(attr);
+ return -EINVAL;
+ return htab_map_alloc_check(attr);
}
static void fd_htab_map_free(struct bpf_map *map)
@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_htab_map_alloc(attr);
+ map = htab_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map)
}
const struct bpf_map_ops htab_of_maps_map_ops = {
+ .map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc,
.map_free = htab_of_map_free,
.map_get_next_key = htab_map_get_next_key,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 01aaef1a77c5..81e2f6995adb 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -150,39 +150,29 @@ static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct inode_operations *iops)
+static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
+ const struct inode_operations *iops)
{
- struct inode *inode;
-
- inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG);
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = iops;
- inode->i_private = dentry->d_fsdata;
+ inode->i_private = raw;
bpf_dentry_finalize(dentry, inode, dir);
return 0;
}
-static int bpf_mkobj(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t devt)
+static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
{
- enum bpf_type type = MINOR(devt);
-
- if (MAJOR(devt) != UNNAMED_MAJOR || !S_ISREG(mode) ||
- dentry->d_fsdata == NULL)
- return -EPERM;
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops);
+}
- switch (type) {
- case BPF_TYPE_PROG:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_prog_iops);
- case BPF_TYPE_MAP:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_map_iops);
- default:
- return -EPERM;
- }
+static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
+{
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops);
}
static struct dentry *
@@ -218,7 +208,6 @@ static int bpf_symlink(struct inode *dir, struct dentry *dentry,
static const struct inode_operations bpf_dir_iops = {
.lookup = bpf_lookup,
- .mknod = bpf_mkobj,
.mkdir = bpf_mkdir,
.symlink = bpf_symlink,
.rmdir = simple_rmdir,
@@ -234,7 +223,6 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
struct inode *dir;
struct path path;
umode_t mode;
- dev_t devt;
int ret;
dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0);
@@ -242,9 +230,8 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
return PTR_ERR(dentry);
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- devt = MKDEV(UNNAMED_MAJOR, type);
- ret = security_path_mknod(&path, dentry, mode, devt);
+ ret = security_path_mknod(&path, dentry, mode, 0);
if (ret)
goto out;
@@ -254,9 +241,16 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
goto out;
}
- dentry->d_fsdata = raw;
- ret = vfs_mknod(dir, dentry, mode, devt);
- dentry->d_fsdata = NULL;
+ switch (type) {
+ case BPF_TYPE_PROG:
+ ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
+ break;
+ case BPF_TYPE_MAP:
+ ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
+ break;
+ default:
+ ret = -EPERM;
+ }
out:
done_path_create(&path, dentry);
return ret;
@@ -368,7 +362,45 @@ out:
putname(pname);
return ret;
}
-EXPORT_SYMBOL_GPL(bpf_obj_get_user);
+
+static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
+{
+ struct bpf_prog *prog;
+ int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (inode->i_op == &bpf_map_iops)
+ return ERR_PTR(-EINVAL);
+ if (inode->i_op != &bpf_prog_iops)
+ return ERR_PTR(-EACCES);
+
+ prog = inode->i_private;
+
+ ret = security_bpf_prog(prog);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!bpf_prog_get_ok(prog, &type, false))
+ return ERR_PTR(-EINVAL);
+
+ return bpf_prog_inc(prog);
+}
+
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
+{
+ struct bpf_prog *prog;
+ struct path path;
+ int ret = kern_path(name, LOOKUP_FOLLOW, &path);
+ if (ret)
+ return ERR_PTR(ret);
+ prog = __get_prog_inode(d_backing_inode(path.dentry), type);
+ if (!IS_ERR(prog))
+ touch_atime(&path);
+ path_put(&path);
+ return prog;
+}
+EXPORT_SYMBOL(bpf_prog_get_type_path);
static void bpf_evict_inode(struct inode *inode)
{
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e45479680..7b469d10d0e9 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- trie->map.map_type = attr->map_type;
- trie->map.key_size = attr->key_size;
- trie->map.value_size = attr->value_size;
- trie->map.max_entries = attr->max_entries;
- trie->map.map_flags = attr->map_flags;
- trie->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&trie->map, attr);
trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
@@ -596,9 +591,96 @@ unlock:
raw_spin_unlock(&trie->lock);
}
-static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
{
- return -ENOTSUPP;
+ struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
+ struct lpm_trie_node **node_stack = NULL;
+ int err = 0, stack_ptr = -1;
+ unsigned int next_bit;
+ size_t matchlen;
+
+ /* The get_next_key follows postorder. For the 4 node example in
+ * the top of this file, the trie_get_next_key() returns the following
+ * one after another:
+ * 192.168.0.0/24
+ * 192.168.1.0/24
+ * 192.168.128.0/24
+ * 192.168.0.0/16
+ *
+ * The idea is to return more specific keys before less specific ones.
+ */
+
+ /* Empty trie */
+ search_root = rcu_dereference(trie->root);
+ if (!search_root)
+ return -ENOENT;
+
+ /* For invalid key, find the leftmost node in the trie */
+ if (!key || key->prefixlen > trie->max_prefixlen)
+ goto find_leftmost;
+
+ node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!node_stack)
+ return -ENOMEM;
+
+ /* Try to find the exact node for the given key */
+ for (node = search_root; node;) {
+ node_stack[++stack_ptr] = node;
+ matchlen = longest_prefix_match(trie, node, key);
+ if (node->prefixlen != matchlen ||
+ node->prefixlen == key->prefixlen)
+ break;
+
+ next_bit = extract_bit(key->data, node->prefixlen);
+ node = rcu_dereference(node->child[next_bit]);
+ }
+ if (!node || node->prefixlen != key->prefixlen ||
+ (node->flags & LPM_TREE_NODE_FLAG_IM))
+ goto find_leftmost;
+
+ /* The node with the exactly-matching key has been found,
+ * find the first node in postorder after the matched node.
+ */
+ node = node_stack[stack_ptr];
+ while (stack_ptr > 0) {
+ parent = node_stack[stack_ptr - 1];
+ if (rcu_dereference(parent->child[0]) == node) {
+ search_root = rcu_dereference(parent->child[1]);
+ if (search_root)
+ goto find_leftmost;
+ }
+ if (!(parent->flags & LPM_TREE_NODE_FLAG_IM)) {
+ next_node = parent;
+ goto do_copy;
+ }
+
+ node = parent;
+ stack_ptr--;
+ }
+
+ /* did not find anything */
+ err = -ENOENT;
+ goto free_stack;
+
+find_leftmost:
+ /* Find the leftmost non-intermediate node, all intermediate nodes
+ * have exact two children, so this function will never return NULL.
+ */
+ for (node = search_root; node;) {
+ if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+ next_node = node;
+ node = rcu_dereference(node->child[0]);
+ }
+do_copy:
+ next_key->prefixlen = next_node->prefixlen;
+ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
+ next_node->data, trie->data_size);
+free_stack:
+ kfree(node_stack);
+ return err;
}
const struct bpf_map_ops trie_map_ops = {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 8455b89d1bbf..c9401075b58c 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -16,18 +16,35 @@
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bug.h>
+#include <linux/kdev_t.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
+#include <linux/proc_ns.h>
#include <linux/rtnetlink.h>
+#include <linux/rwsem.h>
-/* protected by RTNL */
+/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+ * of all progs.
+ * RTNL lock cannot be taken when holding this lock.
+ */
+static DECLARE_RWSEM(bpf_devs_lock);
static LIST_HEAD(bpf_prog_offload_devs);
+static LIST_HEAD(bpf_map_offload_devs);
+
+static int bpf_dev_offload_check(struct net_device *netdev)
+{
+ if (!netdev)
+ return -EINVAL;
+ if (!netdev->netdev_ops->ndo_bpf)
+ return -EOPNOTSUPP;
+ return 0;
+}
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{
- struct net *net = current->nsproxy->net_ns;
- struct bpf_dev_offload *offload;
+ struct bpf_prog_offload *offload;
+ int err;
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_type != BPF_PROG_TYPE_XDP)
@@ -41,34 +58,44 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return -ENOMEM;
offload->prog = prog;
- init_waitqueue_head(&offload->verifier_done);
- rtnl_lock();
- offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
- if (!offload->netdev) {
- rtnl_unlock();
- kfree(offload);
- return -EINVAL;
- }
+ offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
+ attr->prog_ifindex);
+ err = bpf_dev_offload_check(offload->netdev);
+ if (err)
+ goto err_maybe_put;
+ down_write(&bpf_devs_lock);
+ if (offload->netdev->reg_state != NETREG_REGISTERED) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
- rtnl_unlock();
+ dev_put(offload->netdev);
+ up_write(&bpf_devs_lock);
return 0;
+err_unlock:
+ up_write(&bpf_devs_lock);
+err_maybe_put:
+ if (offload->netdev)
+ dev_put(offload->netdev);
+ kfree(offload);
+ return err;
}
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
struct netdev_bpf *data)
{
- struct net_device *netdev = prog->aux->offload->netdev;
+ struct bpf_prog_offload *offload = prog->aux->offload;
+ struct net_device *netdev;
ASSERT_RTNL();
- if (!netdev)
+ if (!offload)
return -ENODEV;
- if (!netdev->netdev_ops->ndo_bpf)
- return -EOPNOTSUPP;
+ netdev = offload->netdev;
data->command = cmd;
@@ -87,62 +114,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
if (err)
goto exit_unlock;
- env->dev_ops = data.verifier.ops;
-
+ env->prog->aux->offload->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true;
- env->prog->aux->offload->verifier_running = true;
exit_unlock:
rtnl_unlock();
return err;
}
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
+{
+ struct bpf_prog_offload *offload;
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload)
+ ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
- /* Caution - if netdev is destroyed before the program, this function
- * will be called twice.
- */
-
data.offload.prog = prog;
- if (offload->verifier_running)
- wait_event(offload->verifier_done, !offload->verifier_running);
-
if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
- offload->dev_state = false;
+ /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+ bpf_prog_free_id(prog, true);
+
list_del_init(&offload->offloads);
- offload->netdev = NULL;
+ kfree(offload);
+ prog->aux->offload = NULL;
}
void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
-
- offload->verifier_running = false;
- wake_up(&offload->verifier_done);
-
rtnl_lock();
- __bpf_prog_offload_destroy(prog);
+ down_write(&bpf_devs_lock);
+ if (prog->aux->offload)
+ __bpf_prog_offload_destroy(prog);
+ up_write(&bpf_devs_lock);
rtnl_unlock();
-
- kfree(offload);
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
int ret;
data.offload.prog = prog;
- offload->verifier_running = false;
- wake_up(&offload->verifier_done);
-
rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock();
@@ -164,14 +192,323 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
return bpf_prog_offload_translate(prog);
}
+struct ns_get_path_bpf_prog_args {
+ struct bpf_prog *prog;
+ struct bpf_prog_info *info;
+};
+
+static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
+{
+ struct ns_get_path_bpf_prog_args *args = private_data;
+ struct bpf_prog_aux *aux = args->prog->aux;
+ struct ns_common *ns;
+ struct net *net;
+
+ rtnl_lock();
+ down_read(&bpf_devs_lock);
+
+ if (aux->offload) {
+ args->info->ifindex = aux->offload->netdev->ifindex;
+ net = dev_net(aux->offload->netdev);
+ get_net(net);
+ ns = &net->ns;
+ } else {
+ args->info->ifindex = 0;
+ ns = NULL;
+ }
+
+ up_read(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return ns;
+}
+
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog)
+{
+ struct ns_get_path_bpf_prog_args args = {
+ .prog = prog,
+ .info = info,
+ };
+ struct bpf_prog_aux *aux = prog->aux;
+ struct inode *ns_inode;
+ struct path ns_path;
+ char __user *uinsns;
+ void *res;
+ u32 ulen;
+
+ res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
+ if (IS_ERR(res)) {
+ if (!info->ifindex)
+ return -ENODEV;
+ return PTR_ERR(res);
+ }
+
+ down_read(&bpf_devs_lock);
+
+ if (!aux->offload) {
+ up_read(&bpf_devs_lock);
+ return -ENODEV;
+ }
+
+ ulen = info->jited_prog_len;
+ info->jited_prog_len = aux->offload->jited_len;
+ if (info->jited_prog_len & ulen) {
+ uinsns = u64_to_user_ptr(info->jited_prog_insns);
+ ulen = min_t(u32, info->jited_prog_len, ulen);
+ if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+ up_read(&bpf_devs_lock);
+ return -EFAULT;
+ }
+ }
+
+ up_read(&bpf_devs_lock);
+
+ ns_inode = ns_path.dentry->d_inode;
+ info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+ info->netns_ino = ns_inode->i_ino;
+ path_put(&ns_path);
+
+ return 0;
+}
+
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
+static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
+ enum bpf_netdev_command cmd)
+{
+ struct netdev_bpf data = {};
+ struct net_device *netdev;
+
+ ASSERT_RTNL();
+
+ data.command = cmd;
+ data.offmap = offmap;
+ /* Caller must make sure netdev is valid */
+ netdev = offmap->netdev;
+
+ return netdev->netdev_ops->ndo_bpf(netdev, &data);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct bpf_offloaded_map *offmap;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
+ attr->map_type != BPF_MAP_TYPE_HASH)
+ return ERR_PTR(-EINVAL);
+
+ offmap = kzalloc(sizeof(*offmap), GFP_USER);
+ if (!offmap)
+ return ERR_PTR(-ENOMEM);
+
+ bpf_map_init_from_attr(&offmap->map, attr);
+
+ rtnl_lock();
+ down_write(&bpf_devs_lock);
+ offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
+ err = bpf_dev_offload_check(offmap->netdev);
+ if (err)
+ goto err_unlock;
+
+ err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
+ if (err)
+ goto err_unlock;
+
+ list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return &offmap->map;
+
+err_unlock:
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+ kfree(offmap);
+ return ERR_PTR(err);
+}
+
+static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
+{
+ WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
+ /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
+ bpf_map_free_id(&offmap->map, true);
+ list_del_init(&offmap->offloads);
+ offmap->netdev = NULL;
+}
+
+void bpf_map_offload_map_free(struct bpf_map *map)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+
+ rtnl_lock();
+ down_write(&bpf_devs_lock);
+ if (offmap->netdev)
+ __bpf_map_offload_destroy(offmap);
+ up_write(&bpf_devs_lock);
+ rtnl_unlock();
+
+ kfree(offmap);
+}
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_update_elem(offmap, key, value,
+ flags);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_delete_elem(offmap, key);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ struct bpf_offloaded_map *offmap = map_to_offmap(map);
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ if (offmap->netdev)
+ ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+struct ns_get_path_bpf_map_args {
+ struct bpf_offloaded_map *offmap;
+ struct bpf_map_info *info;
+};
+
+static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
+{
+ struct ns_get_path_bpf_map_args *args = private_data;
+ struct ns_common *ns;
+ struct net *net;
+
+ rtnl_lock();
+ down_read(&bpf_devs_lock);
+
+ if (args->offmap->netdev) {
+ args->info->ifindex = args->offmap->netdev->ifindex;
+ net = dev_net(args->offmap->netdev);
+ get_net(net);
+ ns = &net->ns;
+ } else {
+ args->info->ifindex = 0;
+ ns = NULL;
+ }
+
+ up_read(&bpf_devs_lock);
+ rtnl_unlock();
+
+ return ns;
+}
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+ struct ns_get_path_bpf_map_args args = {
+ .offmap = map_to_offmap(map),
+ .info = info,
+ };
+ struct inode *ns_inode;
+ struct path ns_path;
+ void *res;
+
+ res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
+ if (IS_ERR(res)) {
+ if (!info->ifindex)
+ return -ENODEV;
+ return PTR_ERR(res);
+ }
+
+ ns_inode = ns_path.dentry->d_inode;
+ info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+ info->netns_ino = ns_inode->i_ino;
+ path_put(&ns_path);
+
+ return 0;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+{
+ struct bpf_offloaded_map *offmap;
+ struct bpf_prog_offload *offload;
+ bool ret;
+
+ if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
+ return false;
+
+ down_read(&bpf_devs_lock);
+ offload = prog->aux->offload;
+ offmap = map_to_offmap(map);
+
+ ret = offload && offload->netdev == offmap->netdev;
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
+static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+{
+ struct bpf_prog_offload *offload, *tmp;
+
+ list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
+ if (offload->netdev == netdev)
+ __bpf_prog_offload_destroy(offload->prog);
+}
+
+static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+{
+ struct bpf_offloaded_map *offmap, *tmp;
+
+ list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
+ if (offmap->netdev == netdev)
+ __bpf_map_offload_destroy(offmap);
+}
+
static int bpf_offload_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct bpf_dev_offload *offload, *tmp;
ASSERT_RTNL();
@@ -181,11 +518,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
if (netdev->reg_state != NETREG_UNREGISTERING)
break;
- list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
- offloads) {
- if (offload->netdev == netdev)
- __bpf_prog_offload_destroy(offload->prog);
- }
+ down_write(&bpf_devs_lock);
+ bpf_offload_orphan_all_progs(netdev);
+ bpf_offload_orphan_all_maps(netdev);
+ up_write(&bpf_devs_lock);
break;
default:
break;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5ee2e41893d9..0314d1783d77 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -96,14 +96,6 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
-/* compute the linear packet data range [data, data_end) for skb when
- * sk_skb type programs are in use.
- */
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
- TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
@@ -521,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (!stab)
return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- stab->map.map_type = attr->map_type;
- stab->map.key_size = attr->key_size;
- stab->map.value_size = attr->value_size;
- stab->map.max_entries = attr->max_entries;
- stab->map.map_flags = attr->map_flags;
- stab->map.numa_node = bpf_map_attr_numa_node(attr);
+ bpf_map_init_from_attr(&stab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
@@ -591,8 +577,15 @@ static void sock_map_free(struct bpf_map *map)
write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
- smap_list_remove(psock, &stab->sock_map[i]);
- smap_release_sock(psock, sock);
+ /* This check handles a racing sock event that can get the
+ * sk_callback_lock before this case but after xchg happens
+ * causing the refcnt to hit zero and sock user data (psock)
+ * to be null and queued for garbage collection.
+ */
+ if (likely(psock)) {
+ smap_list_remove(psock, &stab->sock_map[i]);
+ smap_release_sock(psock, sock);
+ }
write_unlock_bh(&sock->sk_callback_lock);
}
rcu_read_unlock();
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index a15bc636cc98..b0ecf43f5894 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
goto free_smap;
- smap->map.map_type = attr->map_type;
- smap->map.key_size = attr->key_size;
+ bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
- smap->map.max_entries = attr->max_entries;
- smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- smap->map.numa_node = bpf_map_attr_numa_node(attr);
err = bpf_map_precharge_memlock(smap->map.pages);
if (err)
@@ -226,9 +222,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
return 0;
}
-static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int stack_map_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
{
- return -EINVAL;
+ struct bpf_stack_map *smap = container_of(map,
+ struct bpf_stack_map, map);
+ u32 id;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (!key) {
+ id = 0;
+ } else {
+ id = *(u32 *)key;
+ if (id >= smap->n_buckets || !smap->buckets[id])
+ id = 0;
+ else
+ id++;
+ }
+
+ while (id < smap->n_buckets && !smap->buckets[id])
+ id++;
+
+ if (id >= smap->n_buckets)
+ return -ENOENT;
+
+ *(u32 *)next_key = id;
+ return 0;
}
static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2c4cfeaa8d5e..e24aa3241387 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
return 0;
}
+const struct bpf_map_ops bpf_map_offload_ops = {
+ .map_alloc = bpf_map_offload_map_alloc,
+ .map_free = bpf_map_offload_map_free,
+};
+
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
+ const struct bpf_map_ops *ops;
struct bpf_map *map;
+ int err;
- if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
- !bpf_map_types[attr->map_type])
+ if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+ return ERR_PTR(-EINVAL);
+ ops = bpf_map_types[attr->map_type];
+ if (!ops)
return ERR_PTR(-EINVAL);
- map = bpf_map_types[attr->map_type]->map_alloc(attr);
+ if (ops->map_alloc_check) {
+ err = ops->map_alloc_check(attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+ if (attr->map_ifindex)
+ ops = &bpf_map_offload_ops;
+ map = ops->map_alloc(attr);
if (IS_ERR(map))
return map;
- map->ops = bpf_map_types[attr->map_type];
+ map->ops = ops;
map->map_type = attr->map_type;
return map;
}
@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
kvfree(area);
}
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+{
+ map->map_type = attr->map_type;
+ map->key_size = attr->key_size;
+ map->value_size = attr->value_size;
+ map->max_entries = attr->max_entries;
+ map->map_flags = attr->map_flags;
+ map->numa_node = bpf_map_attr_numa_node(attr);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
return id > 0 ? 0 : id;
}
-static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
{
unsigned long flags;
+ /* Offloaded maps are removed from the IDR store when their device
+ * disappears - even if someone holds an fd to them they are unusable,
+ * the memory is gone, all ops will fail; they are simply waiting for
+ * refcnt to drop to be freed.
+ */
+ if (!map->id)
+ return;
+
if (do_idr_lock)
spin_lock_irqsave(&map_idr_lock, flags);
else
__acquire(&map_idr_lock);
idr_remove(&map_idr, map->id);
+ map->id = 0;
if (do_idr_lock)
spin_unlock_irqrestore(&map_idr_lock, flags);
@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
return 0;
}
-#define BPF_MAP_CREATE_LAST_FIELD map_name
+#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr)
if (!value)
goto free_key;
- if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_lookup_elem(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr)
goto free_value;
/* Need to create a kthread, thus must support schedule */
- if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_update_elem(map, key, value, attr->flags);
+ goto out;
+ } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out;
}
@@ -669,10 +709,7 @@ static int map_update_elem(union bpf_attr *attr)
err = bpf_percpu_hash_update(map, key, value, attr->flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_update(map, key, value, attr->flags);
- } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
- map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
- map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
- map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
+ } else if (IS_FD_ARRAY(map)) {
rcu_read_lock();
err = bpf_fd_array_map_update_elem(map, f.file, key, value,
attr->flags);
@@ -731,6 +768,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto err_put;
}
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_delete_elem(map, key);
+ goto out;
+ }
+
preempt_disable();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock();
@@ -738,7 +780,7 @@ static int map_delete_elem(union bpf_attr *attr)
rcu_read_unlock();
__this_cpu_dec(bpf_prog_active);
preempt_enable();
-
+out:
if (!err)
trace_bpf_map_delete_elem(map, ufd, key);
kfree(key);
@@ -788,9 +830,15 @@ static int map_get_next_key(union bpf_attr *attr)
if (!next_key)
goto free_key;
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_get_next_key(map, key, next_key);
+ goto out;
+ }
+
rcu_read_lock();
err = map->ops->map_get_next_key(map, key, next_key);
rcu_read_unlock();
+out:
if (err)
goto free_next_key;
@@ -905,9 +953,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
return id > 0 ? 0 : id;
}
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
{
- /* cBPF to eBPF migrations are currently not in the idr store. */
+ /* cBPF to eBPF migrations are currently not in the idr store.
+ * Offloaded programs are removed from the store when their device
+ * disappears - even if someone grabs an fd to them they are unusable,
+ * simply waiting for refcnt to drop to be freed.
+ */
if (!prog->aux->id)
return;
@@ -917,6 +969,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
__acquire(&prog_idr_lock);
idr_remove(&prog_idr, prog->aux->id);
+ prog->aux->id = 0;
if (do_idr_lock)
spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +990,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ int i;
+
trace_bpf_prog_put_rcu(prog);
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
+
+ for (i = 0; i < prog->aux->func_cnt; i++)
+ bpf_prog_kallsyms_del(prog->aux->func[i]);
bpf_prog_kallsyms_del(prog);
+
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
}
@@ -1057,7 +1116,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
}
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
-static bool bpf_prog_get_ok(struct bpf_prog *prog,
+bool bpf_prog_get_ok(struct bpf_prog *prog,
enum bpf_prog_type *attach_type, bool attach_drv)
{
/* not an attachment, just a refcount inc, always allow */
@@ -1151,6 +1210,8 @@ static int bpf_prog_load(union bpf_attr *attr)
if (!prog)
return -ENOMEM;
+ prog->aux->offload_requested = !!attr->prog_ifindex;
+
err = security_bpf_prog_alloc(prog->aux);
if (err)
goto free_prog_nouncharge;
@@ -1172,7 +1233,7 @@ static int bpf_prog_load(union bpf_attr *attr)
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
- if (attr->prog_ifindex) {
+ if (bpf_prog_is_dev_bound(prog->aux)) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
@@ -1194,7 +1255,8 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_used_maps;
/* eBPF program is ready to be JITed */
- prog = bpf_prog_select_runtime(prog, &err);
+ if (!prog->bpf_func)
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;
@@ -1439,6 +1501,8 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
struct bpf_prog *prog;
int ret = -ENOTSUPP;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
if (CHECK_ATTR(BPF_PROG_TEST_RUN))
return -EINVAL;
@@ -1551,6 +1615,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
return fd;
}
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+ unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (prog->aux->used_maps[i] == (void *)addr)
+ return prog->aux->used_maps[i];
+ return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+ const struct bpf_map *map;
+ struct bpf_insn *insns;
+ u64 imm;
+ int i;
+
+ insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+ GFP_USER);
+ if (!insns)
+ return insns;
+
+ for (i = 0; i < prog->len; i++) {
+ if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+ insns[i].code = BPF_JMP | BPF_CALL;
+ insns[i].imm = BPF_FUNC_tail_call;
+ /* fall-through */
+ }
+ if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+ insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+ if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+ insns[i].code = BPF_JMP | BPF_CALL;
+ if (!bpf_dump_raw_ok())
+ insns[i].imm = 0;
+ continue;
+ }
+
+ if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+ continue;
+
+ imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+ map = bpf_map_from_imm(prog, imm);
+ if (map) {
+ insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+ insns[i].imm = map->id;
+ insns[i + 1].imm = 0;
+ continue;
+ }
+
+ if (!bpf_dump_raw_ok() &&
+ imm == (unsigned long)prog->aux) {
+ insns[i].imm = 0;
+ insns[i + 1].imm = 0;
+ continue;
+ }
+ }
+
+ return insns;
+}
+
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
@@ -1598,24 +1723,51 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
goto done;
}
- ulen = info.jited_prog_len;
- info.jited_prog_len = prog->jited_len;
- if (info.jited_prog_len && ulen) {
- uinsns = u64_to_user_ptr(info.jited_prog_insns);
- ulen = min_t(u32, info.jited_prog_len, ulen);
- if (copy_to_user(uinsns, prog->bpf_func, ulen))
- return -EFAULT;
- }
-
ulen = info.xlated_prog_len;
info.xlated_prog_len = bpf_prog_insn_size(prog);
if (info.xlated_prog_len && ulen) {
+ struct bpf_insn *insns_sanitized;
+ bool fault;
+
+ if (prog->blinded && !bpf_dump_raw_ok()) {
+ info.xlated_prog_insns = 0;
+ goto done;
+ }
+ insns_sanitized = bpf_insn_prepare_dump(prog);
+ if (!insns_sanitized)
+ return -ENOMEM;
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
ulen = min_t(u32, info.xlated_prog_len, ulen);
- if (copy_to_user(uinsns, prog->insnsi, ulen))
+ fault = copy_to_user(uinsns, insns_sanitized, ulen);
+ kfree(insns_sanitized);
+ if (fault)
return -EFAULT;
}
+ if (bpf_prog_is_dev_bound(prog->aux)) {
+ err = bpf_prog_offload_info_fill(&info, prog);
+ if (err)
+ return err;
+ goto done;
+ }
+
+ /* NOTE: the following code is supposed to be skipped for offload.
+ * bpf_prog_offload_info_fill() is the place to fill similar fields
+ * for offload.
+ */
+ ulen = info.jited_prog_len;
+ info.jited_prog_len = prog->jited_len;
+ if (info.jited_prog_len && ulen) {
+ if (bpf_dump_raw_ok()) {
+ uinsns = u64_to_user_ptr(info.jited_prog_insns);
+ ulen = min_t(u32, info.jited_prog_len, ulen);
+ if (copy_to_user(uinsns, prog->bpf_func, ulen))
+ return -EFAULT;
+ } else {
+ info.jited_prog_insns = 0;
+ }
+ }
+
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
@@ -1646,6 +1798,12 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
info.map_flags = map->map_flags;
memcpy(info.name, map->name, sizeof(map->name));
+ if (bpf_map_is_dev_bound(map)) {
+ err = bpf_map_offload_info_fill(&info, map);
+ if (err)
+ return err;
+ }
+
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
return -EFAULT;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 04b24876cd23..5fb69a85d967 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20,6 +20,8 @@
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
#include "disasm.h"
@@ -167,11 +169,11 @@ struct bpf_call_arg_meta {
static DEFINE_MUTEX(bpf_verifier_lock);
/* log_level controls verbosity level of eBPF verifier.
- * verbose() is used to dump the verification trace to the log, so the user
- * can figure out what's wrong with the program
+ * bpf_verifier_log_write() is used to dump the verification trace to the log,
+ * so the user can figure out what's wrong with the program
*/
-static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
- const char *fmt, ...)
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...)
{
struct bpf_verifer_log *log = &env->log;
unsigned int n;
@@ -195,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
else
log->ubuf = NULL;
}
+EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+/* Historically bpf_verifier_log_write was called verbose, but the name was too
+ * generic for symbol export. The function was renamed, but not the calls in
+ * the verifier to avoid complicating backports. Hence the alias below.
+ */
+static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
+ const char *fmt, ...)
+ __attribute__((alias("bpf_verifier_log_write")));
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
@@ -216,23 +226,48 @@ static const char * const reg_type_str[] = {
[PTR_TO_PACKET_END] = "pkt_end",
};
+static void print_liveness(struct bpf_verifier_env *env,
+ enum bpf_reg_liveness live)
+{
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
+ verbose(env, "_");
+ if (live & REG_LIVE_READ)
+ verbose(env, "r");
+ if (live & REG_LIVE_WRITTEN)
+ verbose(env, "w");
+}
+
+static struct bpf_func_state *func(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[reg->frameno];
+}
+
static void print_verifier_state(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state)
+ const struct bpf_func_state *state)
{
- struct bpf_reg_state *reg;
+ const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
+ if (state->frameno)
+ verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
- verbose(env, " R%d=%s", i, reg_type_str[t]);
+ verbose(env, " R%d", i);
+ print_liveness(env, reg->live);
+ verbose(env, "=%s", reg_type_str[t]);
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
+ if (t == PTR_TO_STACK)
+ verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
@@ -277,16 +312,21 @@ static void print_verifier_state(struct bpf_verifier_env *env,
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] == STACK_SPILL)
- verbose(env, " fp%d=%s",
- -MAX_BPF_STACK + i * BPF_REG_SIZE,
+ if (state->stack[i].slot_type[0] == STACK_SPILL) {
+ verbose(env, " fp%d",
+ (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, state->stack[i].spilled_ptr.live);
+ verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
+ }
+ if (state->stack[i].slot_type[0] == STACK_ZERO)
+ verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
}
verbose(env, "\n");
}
-static int copy_stack_state(struct bpf_verifier_state *dst,
- const struct bpf_verifier_state *src)
+static int copy_stack_state(struct bpf_func_state *dst,
+ const struct bpf_func_state *src)
{
if (!src->stack)
return 0;
@@ -302,13 +342,13 @@ static int copy_stack_state(struct bpf_verifier_state *dst,
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
- * the program calls into realloc_verifier_state() to grow the stack size.
+ * the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which this function copies over. It points to previous bpf_verifier_state
* which is never reallocated
*/
-static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
- bool copy_old)
+static int realloc_func_state(struct bpf_func_state *state, int size,
+ bool copy_old)
{
u32 old_size = state->allocated_stack;
struct bpf_stack_state *new_stack;
@@ -341,10 +381,23 @@ static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
return 0;
}
+static void free_func_state(struct bpf_func_state *state)
+{
+ if (!state)
+ return;
+ kfree(state->stack);
+ kfree(state);
+}
+
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
- kfree(state->stack);
+ int i;
+
+ for (i = 0; i <= state->curframe; i++) {
+ free_func_state(state->frame[i]);
+ state->frame[i] = NULL;
+ }
if (free_self)
kfree(state);
}
@@ -352,18 +405,46 @@ static void free_verifier_state(struct bpf_verifier_state *state,
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
-static int copy_verifier_state(struct bpf_verifier_state *dst,
- const struct bpf_verifier_state *src)
+static int copy_func_state(struct bpf_func_state *dst,
+ const struct bpf_func_state *src)
{
int err;
- err = realloc_verifier_state(dst, src->allocated_stack, false);
+ err = realloc_func_state(dst, src->allocated_stack, false);
if (err)
return err;
- memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
+ memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
return copy_stack_state(dst, src);
}
+static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ const struct bpf_verifier_state *src)
+{
+ struct bpf_func_state *dst;
+ int i, err;
+
+ /* if dst has more stack frames then src frame, free them */
+ for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
+ free_func_state(dst_state->frame[i]);
+ dst_state->frame[i] = NULL;
+ }
+ dst_state->curframe = src->curframe;
+ dst_state->parent = src->parent;
+ for (i = 0; i <= src->curframe; i++) {
+ dst = dst_state->frame[i];
+ if (!dst) {
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+ dst_state->frame[i] = dst;
+ }
+ err = copy_func_state(dst, src->frame[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
@@ -416,6 +497,8 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
}
return &elem->st;
err:
+ free_verifier_state(env->cur_state, true);
+ env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
@@ -425,6 +508,10 @@ err:
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
+#define CALLEE_SAVED_REGS 5
+static const int callee_saved[CALLEE_SAVED_REGS] = {
+ BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
+};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
@@ -449,6 +536,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
__mark_reg_known(reg, 0);
}
+static void __mark_reg_const_zero(struct bpf_reg_state *reg)
+{
+ __mark_reg_known(reg, 0);
+ reg->off = 0;
+ reg->type = SCALAR_VALUE;
+}
+
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
@@ -560,6 +654,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
reg->id = 0;
reg->off = 0;
reg->var_off = tnum_unknown;
+ reg->frameno = 0;
__mark_reg_unbounded(reg);
}
@@ -568,8 +663,8 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
@@ -587,8 +682,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
@@ -596,8 +691,9 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
}
static void init_reg_state(struct bpf_verifier_env *env,
- struct bpf_reg_state *regs)
+ struct bpf_func_state *state)
{
+ struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
@@ -608,41 +704,218 @@ static void init_reg_state(struct bpf_verifier_env *env,
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
+ regs[BPF_REG_FP].frameno = state->frameno;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
+#define BPF_MAIN_FUNC (-1)
+static void init_func_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *state,
+ int callsite, int frameno, int subprogno)
+{
+ state->callsite = callsite;
+ state->frameno = frameno;
+ state->subprogno = subprogno;
+ init_reg_state(env, state);
+}
+
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
-static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
+static int cmp_subprogs(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int find_subprog(struct bpf_verifier_env *env, int off)
+{
+ u32 *p;
+
+ p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs);
+ if (!p)
+ return -ENOENT;
+ return p - env->subprog_starts;
+
+}
+
+static int add_subprog(struct bpf_verifier_env *env, int off)
{
- struct bpf_verifier_state *parent = state->parent;
+ int insn_cnt = env->prog->len;
+ int ret;
+
+ if (off >= insn_cnt || off < 0) {
+ verbose(env, "call to invalid destination\n");
+ return -EINVAL;
+ }
+ ret = find_subprog(env, off);
+ if (ret >= 0)
+ return 0;
+ if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
+ verbose(env, "too many subprograms\n");
+ return -E2BIG;
+ }
+ env->subprog_starts[env->subprog_cnt++] = off;
+ sort(env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
+ return 0;
+}
+
+static int check_subprogs(struct bpf_verifier_env *env)
+{
+ int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+
+ /* determine subprog starts. The end is one before the next starts */
+ for (i = 0; i < insn_cnt; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ if (!env->allow_ptr_leaks) {
+ verbose(env, "function calls to other bpf functions are allowed for root only\n");
+ return -EPERM;
+ }
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ verbose(env, "function calls in offloaded programs are not supported yet\n");
+ return -EINVAL;
+ }
+ ret = add_subprog(env, i + insn[i].imm + 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (env->log.level > 1)
+ for (i = 0; i < env->subprog_cnt; i++)
+ verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
+
+ /* now check that all jumps are within the same subprog */
+ subprog_start = 0;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ for (i = 0; i < insn_cnt; i++) {
+ u8 code = insn[i].code;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ goto next;
+ if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+ goto next;
+ off = i + insn[i].off + 1;
+ if (off < subprog_start || off >= subprog_end) {
+ verbose(env, "jump out of range from insn %d to %d\n", i, off);
+ return -EINVAL;
+ }
+next:
+ if (i == subprog_end - 1) {
+ /* to avoid fall-through from one subprog into another
+ * the last insn of the subprog should be either exit
+ * or unconditional jump back
+ */
+ if (code != (BPF_JMP | BPF_EXIT) &&
+ code != (BPF_JMP | BPF_JA)) {
+ verbose(env, "last insn is not an exit or jmp\n");
+ return -EINVAL;
+ }
+ subprog_start = subprog_end;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ }
+ }
+ return 0;
+}
+
+static
+struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
+{
+ struct bpf_verifier_state *tmp = NULL;
+
+ /* 'parent' could be a state of caller and
+ * 'state' could be a state of callee. In such case
+ * parent->curframe < state->curframe
+ * and it's ok for r1 - r5 registers
+ *
+ * 'parent' could be a callee's state after it bpf_exit-ed.
+ * In such case parent->curframe > state->curframe
+ * and it's ok for r0 only
+ */
+ if (parent->curframe == state->curframe ||
+ (parent->curframe < state->curframe &&
+ regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
+ (parent->curframe > state->curframe &&
+ regno == BPF_REG_0))
+ return parent;
+
+ if (parent->curframe > state->curframe &&
+ regno >= BPF_REG_6) {
+ /* for callee saved regs we have to skip the whole chain
+ * of states that belong to callee and mark as LIVE_READ
+ * the registers before the call
+ */
+ tmp = parent;
+ while (tmp && tmp->curframe != state->curframe) {
+ tmp = tmp->parent;
+ }
+ if (!tmp)
+ goto bug;
+ parent = tmp;
+ } else {
+ goto bug;
+ }
+ return parent;
+bug:
+ verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
+ verbose(env, "regno %d parent frame %d current frame %d\n",
+ regno, parent->curframe, state->curframe);
+ return NULL;
+}
+
+static int mark_reg_read(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
+{
+ bool writes = parent == state->parent; /* Observe write marks */
if (regno == BPF_REG_FP)
/* We don't need to worry about FP liveness because it's read-only */
- return;
+ return 0;
while (parent) {
/* if read wasn't screened by an earlier write ... */
- if (state->regs[regno].live & REG_LIVE_WRITTEN)
+ if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
break;
+ parent = skip_callee(env, state, parent, regno);
+ if (!parent)
+ return -EFAULT;
/* ... then we depend on parent's value */
- parent->regs[regno].live |= REG_LIVE_READ;
+ parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
+ return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
- struct bpf_reg_state *regs = env->cur_state->regs;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
@@ -655,7 +928,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
- mark_reg_read(env->cur_state, regno);
+ return mark_reg_read(env, vstate, vstate->parent, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
@@ -686,17 +959,25 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
}
}
+/* Does this register contain a constant zero? */
+static bool register_is_null(struct bpf_reg_state *reg)
+{
+ return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
+}
+
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state, int off,
- int size, int value_regno)
+ struct bpf_func_state *state, /* func where register points to */
+ int off, int size, int value_regno)
{
+ struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+ enum bpf_reg_type type;
- err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
- true);
+ err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
+ true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -709,8 +990,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
+ cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0 &&
- is_spillable_regtype(state->regs[value_regno].type)) {
+ is_spillable_regtype((type = cur->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -718,51 +1000,116 @@ static int check_stack_write(struct bpf_verifier_env *env,
return -EACCES;
}
+ if (state != cur && type == PTR_TO_STACK) {
+ verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
+ return -EINVAL;
+ }
+
/* save register state */
- state->stack[spi].spilled_ptr = state->regs[value_regno];
+ state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_SPILL;
} else {
+ u8 type = STACK_MISC;
+
/* regular write of data into stack */
state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
+ /* only mark the slot as written if all 8 bytes were written
+ * otherwise read propagation may incorrectly stop too soon
+ * when stack slots are partially written.
+ * This heuristic means that read propagation will be
+ * conservative, since it will add reg_live_read marks
+ * to stack slots all the way to first state when programs
+ * writes+reads less than 8 bytes
+ */
+ if (size == BPF_REG_SIZE)
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+ /* when we zero initialize stack slots mark them as such */
+ if (value_regno >= 0 &&
+ register_is_null(&cur->regs[value_regno]))
+ type = STACK_ZERO;
+
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
- STACK_MISC;
+ type;
}
return 0;
}
-static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
+/* registers of every function are unique and mark_reg_read() propagates
+ * the liveness in the following cases:
+ * - from callee into caller for R1 - R5 that were used as arguments
+ * - from caller into callee for R0 that used as result of the call
+ * - from caller to the same caller skipping states of the callee for R6 - R9,
+ * since R6 - R9 are callee saved by implicit function prologue and
+ * caller's R6 != callee's R6, so when we propagate liveness up to
+ * parent states we need to skip callee states for R6 - R9.
+ *
+ * stack slot marking is different, since stacks of caller and callee are
+ * accessible in both (since caller can pass a pointer to caller's stack to
+ * callee which can pass it to another function), hence mark_stack_slot_read()
+ * has to propagate the stack liveness to all parent states at given frame number.
+ * Consider code:
+ * f1() {
+ * ptr = fp - 8;
+ * *ptr = ctx;
+ * call f2 {
+ * .. = *ptr;
+ * }
+ * .. = *ptr;
+ * }
+ * First *ptr is reading from f1's stack and mark_stack_slot_read() has
+ * to mark liveness at the f1's frame and not f2's frame.
+ * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
+ * to propagate liveness to f2 states at f1's frame level and further into
+ * f1 states at f1's frame level until write into that stack slot
+ */
+static void mark_stack_slot_read(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ int slot, int frameno)
{
- struct bpf_verifier_state *parent = state->parent;
+ bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
+ if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
+ /* since LIVE_WRITTEN mark is only done for full 8-byte
+ * write the read marks are conservative and parent
+ * state may not even have the stack allocated. In such case
+ * end the propagation, since the loop reached beginning
+ * of the function
+ */
+ break;
/* if read wasn't screened by an earlier write ... */
- if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
+ if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
break;
/* ... then we depend on parent's value */
- parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
+ parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
}
static int check_stack_read(struct bpf_verifier_env *env,
- struct bpf_verifier_state *state, int off, int size,
- int value_regno)
+ struct bpf_func_state *reg_state /* func where register points to */,
+ int off, int size, int value_regno)
{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
- if (state->allocated_stack <= slot) {
+ if (reg_state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
- stype = state->stack[spi].slot_type;
+ stype = reg_state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
@@ -778,21 +1125,44 @@ static int check_stack_read(struct bpf_verifier_env *env,
if (value_regno >= 0) {
/* restore register state from stack */
- state->regs[value_regno] = state->stack[spi].spilled_ptr;
- mark_stack_slot_read(state, spi);
+ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
+ /* mark reg as written since spilled pointer state likely
+ * has its liveness marks cleared by is_state_visited()
+ * which resets stack/reg liveness for state transitions
+ */
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
+ mark_stack_slot_read(env, vstate, vstate->parent, spi,
+ reg_state->frameno);
return 0;
} else {
+ int zeros = 0;
+
for (i = 0; i < size; i++) {
- if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
- verbose(env, "invalid read from stack off %d+%d size %d\n",
- off, i, size);
- return -EACCES;
+ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
+ continue;
+ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+ zeros++;
+ continue;
}
+ verbose(env, "invalid read from stack off %d+%d size %d\n",
+ off, i, size);
+ return -EACCES;
+ }
+ mark_stack_slot_read(env, vstate, vstate->parent, spi,
+ reg_state->frameno);
+ if (value_regno >= 0) {
+ if (zeros == size) {
+ /* any size read into register is zero extended,
+ * so the whole register == const_zero
+ */
+ __mark_reg_const_zero(&state->regs[value_regno]);
+ } else {
+ /* have read misc data from the stack */
+ mark_reg_unknown(env, state->regs, value_regno);
+ }
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
- if (value_regno >= 0)
- /* have read misc data from the stack */
- mark_reg_unknown(env, state->regs, value_regno);
return 0;
}
}
@@ -817,7 +1187,8 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
- struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
@@ -978,6 +1349,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
}
+static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
+{
+ const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+ return reg->type == PTR_TO_CTX;
+}
+
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
@@ -1072,6 +1450,103 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
+static int update_stack_depth(struct bpf_verifier_env *env,
+ const struct bpf_func_state *func,
+ int off)
+{
+ u16 stack = env->subprog_stack_depth[func->subprogno];
+
+ if (stack >= -off)
+ return 0;
+
+ /* update known max for given subprogram */
+ env->subprog_stack_depth[func->subprogno] = -off;
+ return 0;
+}
+
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+ int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int ret_insn[MAX_CALL_FRAMES];
+ int ret_prog[MAX_CALL_FRAMES];
+
+process_func:
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ if (depth > MAX_BPF_STACK) {
+ verbose(env, "combined stack size of %d calls is %d. Too large\n",
+ frame + 1, depth);
+ return -EACCES;
+ }
+continue_func:
+ if (env->subprog_cnt == subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[subprog];
+ for (; i < subprog_end; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ /* remember insn and function to return to */
+ ret_insn[frame] = i + 1;
+ ret_prog[frame] = subprog;
+
+ /* find the callee */
+ i = i + insn[i].imm + 1;
+ subprog = find_subprog(env, i);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i);
+ return -EFAULT;
+ }
+ subprog++;
+ frame++;
+ if (frame >= MAX_CALL_FRAMES) {
+ WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+ return -EFAULT;
+ }
+ goto process_func;
+ }
+ /* end of for() loop means the last insn of the 'subprog'
+ * was reached. Doesn't matter whether it was JA or EXIT
+ */
+ if (frame == 0)
+ return 0;
+ depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ frame--;
+ i = ret_insn[frame];
+ subprog = ret_prog[frame];
+ goto continue_func;
+}
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+static int get_callee_stack_depth(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn, int idx)
+{
+ int start = idx + insn->imm + 1, subprog;
+
+ subprog = find_subprog(env, start);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ start);
+ return -EFAULT;
+ }
+ subprog++;
+ return env->subprog_stack_depth[subprog];
+}
+#endif
+
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
@@ -1105,9 +1580,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
int bpf_size, enum bpf_access_type t,
int value_regno)
{
- struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
+ struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
@@ -1196,8 +1671,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- if (env->prog->aux->stack_depth < -off)
- env->prog->aux->stack_depth = -off;
+ state = func(env, reg);
+ err = update_stack_depth(env, state, off);
+ if (err)
+ return err;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
@@ -1258,6 +1735,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
return -EACCES;
}
+ if (is_ctx_reg(env, insn->dst_reg)) {
+ verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
+ insn->dst_reg);
+ return -EACCES;
+ }
+
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1269,12 +1752,6 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
-/* Does this register contain a constant zero? */
-static bool register_is_null(struct bpf_reg_state reg)
-{
- return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
-}
-
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized.
@@ -1285,32 +1762,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_verifier_state *state = env->cur_state;
- struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *reg = cur_regs(env) + regno;
+ struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
- if (regs[regno].type != PTR_TO_STACK) {
+ if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
- register_is_null(regs[regno]))
+ register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[regs[regno].type],
+ reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
- if (!tnum_is_const(regs[regno].var_off)) {
+ if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
- tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
- off = regs[regno].off + regs[regno].var_off.value;
+ off = reg->off + reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
@@ -1318,9 +1795,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
return -EACCES;
}
- if (env->prog->aux->stack_depth < -off)
- env->prog->aux->stack_depth = -off;
-
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
@@ -1328,17 +1802,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
for (i = 0; i < access_size; i++) {
+ u8 *stype;
+
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
- if (state->allocated_stack <= slot ||
- state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
- STACK_MISC) {
- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
- off, i, access_size);
- return -EACCES;
+ if (state->allocated_stack <= slot)
+ goto err;
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ if (*stype == STACK_MISC)
+ goto mark;
+ if (*stype == STACK_ZERO) {
+ /* helper can write anything into the stack */
+ *stype = STACK_MISC;
+ goto mark;
}
+err:
+ verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+ off, i, access_size);
+ return -EACCES;
+mark:
+ /* reading any byte out of 8-byte 'spill_slot' will cause
+ * the whole slot to be marked as 'read'
+ */
+ mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
+ spi, state->frameno);
}
- return 0;
+ return update_stack_depth(env, state, off);
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -1361,6 +1850,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
+static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_MEM ||
+ type == ARG_PTR_TO_MEM_OR_NULL ||
+ type == ARG_PTR_TO_UNINIT_MEM;
+}
+
+static bool arg_type_is_mem_size(enum bpf_arg_type type)
+{
+ return type == ARG_CONST_SIZE ||
+ type == ARG_CONST_SIZE_OR_ZERO;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
@@ -1410,15 +1912,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
- } else if (arg_type == ARG_PTR_TO_MEM ||
- arg_type == ARG_PTR_TO_MEM_OR_NULL ||
- arg_type == ARG_PTR_TO_UNINIT_MEM) {
+ } else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
- if (register_is_null(*reg) &&
+ if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
@@ -1473,25 +1973,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
- } else if (arg_type == ARG_CONST_SIZE ||
- arg_type == ARG_CONST_SIZE_OR_ZERO) {
+ } else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
- /* bpf_xxx(..., buf, len) call will access 'len' bytes
- * from stack pointer 'buf'. Check it
- * note: regno == len, regno - 1 == buf
- */
- if (regno == 0) {
- /* kernel subsystem misconfigured verifier */
- verbose(env,
- "ARG_CONST_SIZE cannot be first argument\n");
- return -EACCES;
- }
-
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
-
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
@@ -1591,6 +2078,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
+ if (env->subprog_cnt) {
+ verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
@@ -1631,7 +2122,7 @@ error:
return -EINVAL;
}
-static int check_raw_mode(const struct bpf_func_proto *fn)
+static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
@@ -1646,15 +2137,52 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
- return count > 1 ? -EINVAL : 0;
+ /* We only support one arg being in raw mode at the moment,
+ * which is sufficient for the helper functions we have
+ * right now.
+ */
+ return count <= 1;
+}
+
+static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
+ enum bpf_arg_type arg_next)
+{
+ return (arg_type_is_mem_ptr(arg_curr) &&
+ !arg_type_is_mem_size(arg_next)) ||
+ (!arg_type_is_mem_ptr(arg_curr) &&
+ arg_type_is_mem_size(arg_next));
+}
+
+static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
+{
+ /* bpf_xxx(..., buf, len) call will access 'len'
+ * bytes from memory 'buf'. Both arg types need
+ * to be paired, so make sure there's no buggy
+ * helper function specification.
+ */
+ if (arg_type_is_mem_size(fn->arg1_type) ||
+ arg_type_is_mem_ptr(fn->arg5_type) ||
+ check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
+ check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
+ check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
+ check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
+ return false;
+
+ return true;
+}
+
+static int check_func_proto(const struct bpf_func_proto *fn)
+{
+ return check_raw_mode_ok(fn) &&
+ check_arg_pair_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
-static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
+ struct bpf_func_state *state)
{
- struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs, *reg;
int i;
@@ -1671,7 +2199,121 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
}
-static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ int i;
+
+ for (i = 0; i <= vstate->curframe; i++)
+ __clear_all_pkt_pointers(env, vstate->frame[i]);
+}
+
+static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ int *insn_idx)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ int i, subprog, target_insn;
+
+ if (state->curframe + 1 >= MAX_CALL_FRAMES) {
+ verbose(env, "the call stack of %d frames is too deep\n",
+ state->curframe + 2);
+ return -E2BIG;
+ }
+
+ target_insn = *insn_idx + insn->imm;
+ subprog = find_subprog(env, target_insn + 1);
+ if (subprog < 0) {
+ verbose(env, "verifier bug. No program starts at insn %d\n",
+ target_insn + 1);
+ return -EFAULT;
+ }
+
+ caller = state->frame[state->curframe];
+ if (state->frame[state->curframe + 1]) {
+ verbose(env, "verifier bug. Frame %d already allocated\n",
+ state->curframe + 1);
+ return -EFAULT;
+ }
+
+ callee = kzalloc(sizeof(*callee), GFP_KERNEL);
+ if (!callee)
+ return -ENOMEM;
+ state->frame[state->curframe + 1] = callee;
+
+ /* callee cannot access r0, r6 - r9 for reading and has to write
+ * into its own stack before reading from it.
+ * callee can read/write into caller's stack
+ */
+ init_func_state(env, callee,
+ /* remember the callsite, it will be used by bpf_exit */
+ *insn_idx /* callsite */,
+ state->curframe + 1 /* frameno within this callchain */,
+ subprog + 1 /* subprog number within this prog */);
+
+ /* copy r1 - r5 args that callee can access */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ callee->regs[i] = caller->regs[i];
+
+ /* after the call regsiters r0 - r5 were scratched */
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ mark_reg_not_init(env, caller->regs, caller_saved[i]);
+ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+ }
+
+ /* only increment it after check_reg_arg() finished */
+ state->curframe++;
+
+ /* and go analyze first insn of the callee */
+ *insn_idx = target_insn;
+
+ if (env->log.level) {
+ verbose(env, "caller:\n");
+ print_verifier_state(env, caller);
+ verbose(env, "callee:\n");
+ print_verifier_state(env, callee);
+ }
+ return 0;
+}
+
+static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+{
+ struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ struct bpf_reg_state *r0;
+
+ callee = state->frame[state->curframe];
+ r0 = &callee->regs[BPF_REG_0];
+ if (r0->type == PTR_TO_STACK) {
+ /* technically it's ok to return caller's stack pointer
+ * (or caller's caller's pointer) back to the caller,
+ * since these pointers are valid. Only current stack
+ * pointer will be invalid as soon as function exits,
+ * but let's be conservative
+ */
+ verbose(env, "cannot return stack pointer to the caller\n");
+ return -EINVAL;
+ }
+
+ state->curframe--;
+ caller = state->frame[state->curframe];
+ /* return to the caller whatever r0 had in the callee */
+ caller->regs[BPF_REG_0] = *r0;
+
+ *insn_idx = callee->callsite + 1;
+ if (env->log.level) {
+ verbose(env, "returning from callee:\n");
+ print_verifier_state(env, callee);
+ verbose(env, "to caller at %d:\n", *insn_idx);
+ print_verifier_state(env, caller);
+ }
+ /* clear everything in the callee */
+ free_func_state(callee);
+ state->frame[state->curframe + 1] = NULL;
+ return 0;
+}
+
+static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
@@ -1688,7 +2330,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id);
-
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
@@ -1712,10 +2353,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
- /* We only support one arg being in raw mode at the moment, which
- * is sufficient for the helper functions we have right now.
- */
- err = check_raw_mode(fn);
+ err = check_func_proto(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
@@ -1729,6 +2367,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
+ if (func_id == BPF_FUNC_tail_call) {
+ if (meta.map_ptr == NULL) {
+ verbose(env, "verifier bug\n");
+ return -EINVAL;
+ }
+ env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
+ }
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
@@ -1864,7 +2509,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
- struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
@@ -1875,17 +2522,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg = &regs[dst];
- if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
- print_verifier_state(env, env->cur_state);
- verbose(env,
- "verifier internal error: known but bad sbounds\n");
- return -EINVAL;
- }
- if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
- print_verifier_state(env, env->cur_state);
- verbose(env,
- "verifier internal error: known but bad ubounds\n");
- return -EINVAL;
+ if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
+ smin_val > smax_val || umin_val > umax_val) {
+ /* Taint dst register if offset had invalid bounds derived from
+ * e.g. dead branches.
+ */
+ __mark_reg_unknown(dst_reg);
+ return 0;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
@@ -2077,6 +2720,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
+ if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
+ smin_val > smax_val || umin_val > umax_val) {
+ /* Taint dst register if offset had invalid bounds derived from
+ * e.g. dead branches.
+ */
+ __mark_reg_unknown(dst_reg);
+ return 0;
+ }
+
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
@@ -2294,7 +2946,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
@@ -2345,12 +2999,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
- print_verifier_state(env, env->cur_state);
+ print_verifier_state(env, state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
- print_verifier_state(env, env->cur_state);
+ print_verifier_state(env, state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
@@ -2486,6 +3140,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
+ if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
+ verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
+ return -EINVAL;
+ }
+
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -2507,14 +3166,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
- int i;
+ int i, j;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
@@ -2584,12 +3244,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- reg = &state->stack[i].spilled_ptr;
- if (reg->type == type && reg->id == dst_reg->id)
- reg->range = max(reg->range, new_range);
+ for (j = 0; j <= vstate->curframe; j++) {
+ state = vstate->frame[j];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ reg = &state->stack[i].spilled_ptr;
+ if (reg->type == type && reg->id == dst_reg->id)
+ reg->range = max(reg->range, new_range);
+ }
}
}
@@ -2827,20 +3490,24 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
-static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
u32 id = regs[regno].id;
- int i;
+ int i, j;
for (i = 0; i < MAX_BPF_REG; i++)
mark_map_reg(regs, i, id, is_null);
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+ for (j = 0; j <= vstate->curframe; j++) {
+ state = vstate->frame[j];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
+ continue;
+ mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+ }
}
}
@@ -2940,8 +3607,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
- struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
- struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
+ struct bpf_verifier_state *this_branch = env->cur_state;
+ struct bpf_verifier_state *other_branch;
+ struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
+ struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -2984,8 +3653,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
- tnum_equals_const(dst_reg->var_off, insn->imm)) {
- if (opcode == BPF_JEQ) {
+ tnum_is_const(dst_reg->var_off)) {
+ if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
+ (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
@@ -3003,6 +3673,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
+ other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
@@ -3015,22 +3686,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
- reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
- reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
+ reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
&regs[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
- reg_combine_min_max(&other_branch->regs[insn->src_reg],
- &other_branch->regs[insn->dst_reg],
+ reg_combine_min_max(&other_branch_regs[insn->src_reg],
+ &other_branch_regs[insn->dst_reg],
&regs[insn->src_reg],
&regs[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
- reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
@@ -3051,7 +3722,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EACCES;
}
if (env->log.level)
- print_verifier_state(env, this_branch);
+ print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
@@ -3136,6 +3807,18 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
+ if (env->subprog_cnt) {
+ /* when program has LD_ABS insn JITs and interpreter assume
+ * that r1 == ctx == skb which is not the case for callees
+ * that can have arbitrary arguments. It's problematic
+ * for main prog as well since JITs would need to analyze
+ * all functions in order to make proper register save/restore
+ * decisions in the main prog. Hence disallow LD_ABS with calls
+ */
+ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
+
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -3312,6 +3995,10 @@ static int check_cfg(struct bpf_verifier_env *env)
int ret = 0;
int i, t;
+ ret = check_subprogs(env);
+ if (ret < 0)
+ return ret;
+
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
@@ -3344,6 +4031,14 @@ peek_stack:
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
+ if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+ env->explored_states[t] = STATE_LIST_MARK;
+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ if (ret == 1)
+ goto peek_stack;
+ else if (ret < 0)
+ goto err_free;
+ }
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
@@ -3462,11 +4157,21 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
+ bool equal;
+
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
- if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
+ equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
+
+ if (rold->type == PTR_TO_STACK)
+ /* two stack pointers are equal only if they're pointing to
+ * the same stack frame, since fp-8 in foo != fp-8 in bar
+ */
+ return equal && rold->frameno == rcur->frameno;
+
+ if (equal)
return true;
if (rold->type == NOT_INIT)
@@ -3538,7 +4243,6 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
- case PTR_TO_STACK:
case PTR_TO_PACKET_END:
/* Only valid matches are exact, which memcmp() above
* would have accepted
@@ -3553,8 +4257,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false;
}
-static bool stacksafe(struct bpf_verifier_state *old,
- struct bpf_verifier_state *cur,
+static bool stacksafe(struct bpf_func_state *old,
+ struct bpf_func_state *cur,
struct idpair *idmap)
{
int i, spi;
@@ -3572,8 +4276,19 @@ static bool stacksafe(struct bpf_verifier_state *old,
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
+ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
+ /* explored state didn't use this */
+ continue;
+
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
+ /* if old state was safe with misc data in the stack
+ * it will be safe with zero-initialized stack.
+ * The opposite is not true
+ */
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
+ cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
+ continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
@@ -3630,9 +4345,8 @@ static bool stacksafe(struct bpf_verifier_state *old,
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool states_equal(struct bpf_verifier_env *env,
- struct bpf_verifier_state *old,
- struct bpf_verifier_state *cur)
+static bool func_states_equal(struct bpf_func_state *old,
+ struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
@@ -3656,71 +4370,72 @@ out_free:
return ret;
}
+static bool states_equal(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
+{
+ int i;
+
+ if (old->curframe != cur->curframe)
+ return false;
+
+ /* for states to be equal callsites have to be the same
+ * and all frame states need to be equivalent
+ */
+ for (i = 0; i <= old->curframe; i++) {
+ if (old->frame[i]->callsite != cur->frame[i]->callsite)
+ return false;
+ if (!func_states_equal(old->frame[i], cur->frame[i]))
+ return false;
+ }
+ return true;
+}
+
/* A write screens off any subsequent reads; but write marks come from the
- * straight-line code between a state and its parent. When we arrive at a
- * jump target (in the first iteration of the propagate_liveness() loop),
- * we didn't arrive by the straight-line code, so read marks in state must
- * propagate to parent regardless of state's write marks.
+ * straight-line code between a state and its parent. When we arrive at an
+ * equivalent state (jump target or such) we didn't arrive by the straight-line
+ * code, so read marks in the state must propagate to the parent regardless
+ * of the state's write marks. That's what 'parent == state->parent' comparison
+ * in mark_reg_read() and mark_stack_slot_read() is for.
*/
-static bool do_propagate_liveness(const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent)
+static int propagate_liveness(struct bpf_verifier_env *env,
+ const struct bpf_verifier_state *vstate,
+ struct bpf_verifier_state *vparent)
{
- bool writes = parent == state->parent; /* Observe write marks */
- bool touched = false; /* any changes made? */
- int i;
+ int i, frame, err = 0;
+ struct bpf_func_state *state, *parent;
- if (!parent)
- return touched;
+ if (vparent->curframe != vstate->curframe) {
+ WARN(1, "propagate_live: parent frame %d current frame %d\n",
+ vparent->curframe, vstate->curframe);
+ return -EFAULT;
+ }
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
- if (parent->regs[i].live & REG_LIVE_READ)
- continue;
- if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
+ if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
- if (state->regs[i].live & REG_LIVE_READ) {
- parent->regs[i].live |= REG_LIVE_READ;
- touched = true;
+ if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
+ err = mark_reg_read(env, vstate, vparent, i);
+ if (err)
+ return err;
}
}
+
/* ... and stack slots */
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
- i < parent->allocated_stack / BPF_REG_SIZE; i++) {
- if (parent->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- if (state->stack[i].slot_type[0] != STACK_SPILL)
- continue;
- if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
- continue;
- if (writes &&
- (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
- continue;
- if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
- parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
- touched = true;
+ for (frame = 0; frame <= vstate->curframe; frame++) {
+ state = vstate->frame[frame];
+ parent = vparent->frame[frame];
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
+ i < parent->allocated_stack / BPF_REG_SIZE; i++) {
+ if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
+ continue;
+ if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
+ mark_stack_slot_read(env, vstate, vparent, i, frame);
}
}
- return touched;
-}
-
-/* "parent" is "a state from which we reach the current state", but initially
- * it is not the state->parent (i.e. "the state whose straight-line code leads
- * to the current state"), instead it is the state that happened to arrive at
- * a (prunable) equivalent of the current state. See comment above
- * do_propagate_liveness() for consequences of this.
- * This function is just a more efficient way of calling mark_reg_read() or
- * mark_stack_slot_read() on each reg in "parent" that is read in "state",
- * though it requires that parent != state->parent in the call arguments.
- */
-static void propagate_liveness(const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent)
-{
- while (do_propagate_liveness(state, parent)) {
- /* Something changed, so we need to feed those changes onward */
- state = parent;
- parent = state->parent;
- }
+ return err;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
@@ -3728,7 +4443,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state;
- int i, err;
+ int i, j, err;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -3749,7 +4464,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
- propagate_liveness(&sl->state, cur);
+ err = propagate_liveness(env, &sl->state, cur);
+ if (err)
+ return err;
return 1;
}
sl = sl->next;
@@ -3757,9 +4474,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
- * but it will either reach bpf_exit (which means it's safe) or
- * it will be rejected. Since there are no loops, we won't be
- * seeing this 'insn_idx' instruction again on the way to bpf_exit
+ * but it will either reach outer most bpf_exit (which means it's safe)
+ * or it will be rejected. Since there are no loops, we won't be
+ * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
+ * again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
@@ -3783,19 +4501,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
- cur->regs[i].live = REG_LIVE_NONE;
- for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
- if (cur->stack[i].slot_type[0] == STACK_SPILL)
- cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
- return 0;
-}
+ cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx)
-{
- if (env->dev_ops && env->dev_ops->insn_hook)
- return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+ /* all stack frames are accessible from callee, clear them all */
+ for (j = 0; j <= cur->curframe; j++) {
+ struct bpf_func_state *frame = cur->frame[j];
+ for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
+ frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
+ }
return 0;
}
@@ -3804,7 +4518,7 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
- int insn_cnt = env->prog->len;
+ int insn_cnt = env->prog->len, i;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
@@ -3812,9 +4526,18 @@ static int do_check(struct bpf_verifier_env *env)
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- env->cur_state = state;
- init_reg_state(env, state->regs);
+ state->curframe = 0;
state->parent = NULL;
+ state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
+ if (!state->frame[0]) {
+ kfree(state);
+ return -ENOMEM;
+ }
+ env->cur_state = state;
+ init_func_state(env, state->frame[0],
+ BPF_MAIN_FUNC /* callsite */,
+ 0 /* frameno */,
+ 0 /* subprogno, zero == main subprog */);
insn_idx = 0;
for (;;) {
struct bpf_insn *insn;
@@ -3861,19 +4584,25 @@ static int do_check(struct bpf_verifier_env *env)
else
verbose(env, "\nfrom %d to %d:",
prev_insn_idx, insn_idx);
- print_verifier_state(env, state);
+ print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false;
}
if (env->log.level) {
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = verbose,
+ };
+
verbose(env, "%d: ", insn_idx);
- print_bpf_insn(verbose, env, insn,
- env->allow_ptr_leaks);
+ print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
}
- err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
- if (err)
- return err;
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ err = bpf_prog_offload_verify_insn(env, insn_idx,
+ prev_insn_idx);
+ if (err)
+ return err;
+ }
regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true;
@@ -3981,6 +4710,12 @@ static int do_check(struct bpf_verifier_env *env)
if (err)
return err;
+ if (is_ctx_reg(env, insn->dst_reg)) {
+ verbose(env, "BPF_ST stores into R%d context is not allowed\n",
+ insn->dst_reg);
+ return -EACCES;
+ }
+
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
@@ -3994,13 +4729,17 @@ static int do_check(struct bpf_verifier_env *env)
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
- insn->src_reg != BPF_REG_0 ||
+ (insn->src_reg != BPF_REG_0 &&
+ insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
- err = check_call(env, insn->imm, insn_idx);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ err = check_func_call(env, insn, &insn_idx);
+ else
+ err = check_helper_call(env, insn->imm, insn_idx);
if (err)
return err;
@@ -4025,6 +4764,16 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
+ if (state->curframe) {
+ /* exit from nested function */
+ prev_insn_idx = insn_idx;
+ err = prepare_func_exit(env, &insn_idx);
+ if (err)
+ return err;
+ do_print_state = true;
+ continue;
+ }
+
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
@@ -4085,8 +4834,17 @@ process_bpf_exit:
insn_idx++;
}
- verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
- env->prog->aux->stack_depth);
+ verbose(env, "processed %d insns (limit %d), stack depth ",
+ insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
+ for (i = 0; i < env->subprog_cnt + 1; i++) {
+ u32 depth = env->subprog_stack_depth[i];
+
+ verbose(env, "%d", depth);
+ if (i + 1 < env->subprog_cnt + 1)
+ verbose(env, "+");
+ }
+ verbose(env, "\n");
+ env->prog->aux->stack_depth = env->subprog_stack_depth[0];
return 0;
}
@@ -4119,6 +4877,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
}
+
+ if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
+ !bpf_offload_dev_match(prog, map)) {
+ verbose(env, "offload device mismatch between prog and map\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -4216,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
next_insn:
insn++;
i++;
+ continue;
+ }
+
+ /* Basic sanity check before we invest more work here. */
+ if (!bpf_opcode_in_insntable(insn->code)) {
+ verbose(env, "unknown opcode %02x\n", insn->code);
+ return -EINVAL;
}
}
@@ -4272,6 +5044,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
return 0;
}
+static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
+{
+ int i;
+
+ if (len == 1)
+ return;
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (env->subprog_starts[i] < off)
+ continue;
+ env->subprog_starts[i] += len - 1;
+ }
+}
+
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
@@ -4282,17 +5067,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
+ adjust_subprog_starts(env, off, len);
return new_prog;
}
-/* The verifier does more data flow analysis than llvm and will not explore
- * branches that are dead at run time. Malicious programs can have dead code
- * too. Therefore replace all dead at-run-time code with nops.
+/* The verifier does more data flow analysis than llvm and will not
+ * explore branches that are dead at run time. Malicious programs can
+ * have dead code too. Therefore replace all dead at-run-time code
+ * with 'ja -1'.
+ *
+ * Just nops are not optimal, e.g. if they would sit at the end of the
+ * program and through another bug we would manage to jump there, then
+ * we'd execute beyond program memory otherwise. Returning exception
+ * code also wouldn't work since we can have subprogs where the dead
+ * code could be located.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
- struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
@@ -4300,7 +5093,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
- memcpy(insn + i, &nop, sizeof(nop));
+ memcpy(insn + i, &trap, sizeof(trap));
}
}
@@ -4416,6 +5209,180 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
return 0;
}
+static int jit_subprogs(struct bpf_verifier_env *env)
+{
+ struct bpf_prog *prog = env->prog, **func, *tmp;
+ int i, j, subprog_start, subprog_end = 0, len, subprog;
+ struct bpf_insn *insn;
+ void *old_bpf_func;
+ int err = -ENOMEM;
+
+ if (env->subprog_cnt == 0)
+ return 0;
+
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ subprog = find_subprog(env, i + insn->imm + 1);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i + insn->imm + 1);
+ return -EFAULT;
+ }
+ /* temporarily remember subprog id inside insn instead of
+ * aux_data, since next loop will split up all insns into funcs
+ */
+ insn->off = subprog + 1;
+ /* remember original imm in case JIT fails and fallback
+ * to interpreter will be needed
+ */
+ env->insn_aux_data[i].call_imm = insn->imm;
+ /* point imm to __bpf_call_base+1 from JITs point of view */
+ insn->imm = 1;
+ }
+
+ func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ subprog_start = subprog_end;
+ if (env->subprog_cnt == i)
+ subprog_end = prog->len;
+ else
+ subprog_end = env->subprog_starts[i];
+
+ len = subprog_end - subprog_start;
+ func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
+ if (!func[i])
+ goto out_free;
+ memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
+ len * sizeof(struct bpf_insn));
+ func[i]->type = prog->type;
+ func[i]->len = len;
+ if (bpf_prog_calc_tag(func[i]))
+ goto out_free;
+ func[i]->is_func = 1;
+ /* Use bpf_prog_F_tag to indicate functions in stack traces.
+ * Long term would need debug info to populate names
+ */
+ func[i]->aux->name[0] = 'F';
+ func[i]->aux->stack_depth = env->subprog_stack_depth[i];
+ func[i]->jit_requested = 1;
+ func[i] = bpf_int_jit_compile(func[i]);
+ if (!func[i]->jited) {
+ err = -ENOTSUPP;
+ goto out_free;
+ }
+ cond_resched();
+ }
+ /* at this point all bpf functions were successfully JITed
+ * now populate all bpf_calls with correct addresses and
+ * run last pass of JIT
+ */
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ insn = func[i]->insnsi;
+ for (j = 0; j < func[i]->len; j++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ subprog = insn->off;
+ insn->off = 0;
+ insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+ func[subprog]->bpf_func -
+ __bpf_call_base;
+ }
+ }
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ old_bpf_func = func[i]->bpf_func;
+ tmp = bpf_int_jit_compile(func[i]);
+ if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
+ verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
+ err = -EFAULT;
+ goto out_free;
+ }
+ cond_resched();
+ }
+
+ /* finally lock prog and jit images for all functions and
+ * populate kallsysm
+ */
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ bpf_prog_lock_ro(func[i]);
+ bpf_prog_kallsyms_add(func[i]);
+ }
+
+ /* Last step: make now unused interpreter insns from main
+ * prog consistent for later dump requests, so they can
+ * later look the same as if they were interpreted only.
+ */
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ unsigned long addr;
+
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ insn->off = env->insn_aux_data[i].call_imm;
+ subprog = find_subprog(env, i + insn->off + 1);
+ addr = (unsigned long)func[subprog + 1]->bpf_func;
+ addr &= PAGE_MASK;
+ insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+ addr - __bpf_call_base;
+ }
+
+ prog->jited = 1;
+ prog->bpf_func = func[0]->bpf_func;
+ prog->aux->func = func;
+ prog->aux->func_cnt = env->subprog_cnt + 1;
+ return 0;
+out_free:
+ for (i = 0; i <= env->subprog_cnt; i++)
+ if (func[i])
+ bpf_jit_free(func[i]);
+ kfree(func);
+ /* cleanup main prog to be interpreted */
+ prog->jit_requested = 0;
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ insn->off = 0;
+ insn->imm = env->insn_aux_data[i].call_imm;
+ }
+ return err;
+}
+
+static int fixup_call_args(struct bpf_verifier_env *env)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ struct bpf_prog *prog = env->prog;
+ struct bpf_insn *insn = prog->insnsi;
+ int i, depth;
+#endif
+ int err;
+
+ err = 0;
+ if (env->prog->jit_requested) {
+ err = jit_subprogs(env);
+ if (err == 0)
+ return 0;
+ }
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ for (i = 0; i < prog->len; i++, insn++) {
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ depth = get_callee_stack_depth(env, insn, i);
+ if (depth < 0)
+ return depth;
+ bpf_patch_call_args(insn, depth);
+ }
+ err = 0;
+#endif
+ return err;
+}
+
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
@@ -4433,13 +5400,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
+ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ struct bpf_insn mask_and_div[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx div 0 -> 0 */
+ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+ BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+ struct bpf_insn mask_and_mod[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx mod 0 -> Rx */
+ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+ *insn,
+ };
+ struct bpf_insn *patchlet;
+
+ if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ patchlet = mask_and_div + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
+ } else {
+ patchlet = mask_and_mod + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
+ }
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
+ if (insn->imm == BPF_FUNC_override_return)
+ prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
@@ -4456,13 +5467,42 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
+
+ /* instead of changing every JIT dealing with tail_call
+ * emit two extra insns:
+ * if (index >= max_entries) goto out;
+ * index &= array->index_mask;
+ * to avoid out-of-bounds cpu speculation
+ */
+ map_ptr = env->insn_aux_data[i + delta].map_ptr;
+ if (map_ptr == BPF_MAP_PTR_POISON) {
+ verbose(env, "tail_call abusing map_ptr\n");
+ return -EINVAL;
+ }
+ if (!map_ptr->unpriv_array)
+ continue;
+ insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
+ map_ptr->max_entries, 2);
+ insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
+ container_of(map_ptr,
+ struct bpf_array,
+ map)->index_mask);
+ insn_buf[2] = *insn;
+ cnt = 3;
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* handlers are currently limited to 64 bit only.
*/
- if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
+ if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
@@ -4597,7 +5637,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
- if (env->prog->aux->offload) {
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
goto err_unlock;
@@ -4614,12 +5654,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!env->explored_states)
goto skip_full_check;
+ env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
- env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
@@ -4634,12 +5674,18 @@ skip_full_check:
sanitize_dead_code(env);
if (ret == 0)
+ ret = check_max_stack_depth(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
+ if (ret == 0)
+ ret = fixup_call_args(env);
+
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 024085daab1a..a2c05d2476ac 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
*/
do {
css_task_iter_start(&from->self, 0, &it);
- task = css_task_iter_next(&it);
+
+ do {
+ task = css_task_iter_next(&it);
+ } while (task && (task->flags & PF_EXITING));
+
if (task)
get_task_struct(task);
css_task_iter_end(&it);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0b1ffe147f24..8cda3bc3ae22 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
cft->name);
else
- strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+ strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
return buf;
}
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
root->flags = opts->flags;
if (opts->release_agent)
- strcpy(root->release_agent_path, opts->release_agent);
+ strscpy(root->release_agent_path, opts->release_agent, PATH_MAX);
if (opts->name)
- strcpy(root->name, opts->name);
+ strscpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
if (opts->cpuset_clone_children)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
static void css_task_iter_advance(struct css_task_iter *it)
{
- struct list_head *l = it->task_pos;
+ struct list_head *next;
lockdep_assert_held(&css_set_lock);
- WARN_ON_ONCE(!l);
-
repeat:
/*
* Advance iterator to find next entry. cset->tasks is consumed
* first and then ->mg_tasks. After ->mg_tasks, we move onto the
* next cset.
*/
- l = l->next;
+ next = it->task_pos->next;
- if (l == it->tasks_head)
- l = it->mg_tasks_head->next;
+ if (next == it->tasks_head)
+ next = it->mg_tasks_head->next;
- if (l == it->mg_tasks_head)
+ if (next == it->mg_tasks_head)
css_task_iter_advance_css_set(it);
else
- it->task_pos = l;
+ it->task_pos = next;
/* if PROCS, skip over tasks which aren't group leaders */
if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4449,6 +4447,7 @@ static struct cftype cgroup_base_files[] = {
},
{
.name = "cgroup.threads",
+ .flags = CFTYPE_NS_DELEGATABLE,
.release = cgroup_procs_release,
.seq_start = cgroup_threads_start,
.seq_next = cgroup_procs_next,
diff --git a/kernel/configs/nopm.config b/kernel/configs/nopm.config
new file mode 100644
index 000000000000..81ff07863576
--- /dev/null
+++ b/kernel/configs/nopm.config
@@ -0,0 +1,15 @@
+CONFIG_PM=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+
+# Triggers PM on OMAP
+CONFIG_CPU_IDLE=n
+
+# Triggers enablement via hibernate callbacks
+CONFIG_XEN=n
+
+# ARM/ARM64 architectures that select PM unconditionally
+CONFIG_ARCH_OMAP2PLUS_TYPICAL=n
+CONFIG_ARCH_RENESAS=n
+CONFIG_ARCH_TEGRA=n
+CONFIG_ARCH_VEXPRESS=n
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b3663896278e..4f63597c824d 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#ifdef CONFIG_SPARSEMEM
- VMCOREINFO_SYMBOL(mem_section);
+ VMCOREINFO_SYMBOL_ARRAY(mem_section);
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index c8146d53ca67..dbb0781a0533 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2441,7 +2441,6 @@ static int kdb_kill(int argc, const char **argv)
long sig, pid;
char *endp;
struct task_struct *p;
- struct siginfo info;
if (argc != 2)
return KDB_ARGCOUNT;
@@ -2449,7 +2448,7 @@ static int kdb_kill(int argc, const char **argv)
sig = simple_strtol(argv[1], &endp, 0);
if (*endp)
return KDB_BADINT;
- if (sig >= 0) {
+ if ((sig >= 0) || !valid_signal(-sig)) {
kdb_printf("Invalid signal parameter.<-signal>\n");
return 0;
}
@@ -2470,12 +2469,7 @@ static int kdb_kill(int argc, const char **argv)
return 0;
}
p = p->group_leader;
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = SI_USER;
- info.si_pid = pid; /* same capabilities as process being signalled */
- info.si_uid = 0; /* kdb has root authority */
- kdb_send_sig_info(p, &info);
+ kdb_send_sig(p, sig);
return 0;
}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index fc224fbcf954..1e5a502ba4a7 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -208,7 +208,7 @@ extern unsigned long kdb_task_state(const struct task_struct *p,
extern void kdb_ps_suppressed(void);
extern void kdb_ps1(const struct task_struct *p);
extern void kdb_print_nameval(const char *name, unsigned long val);
-extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
+extern void kdb_send_sig(struct task_struct *p, int sig);
extern void kdb_meminfo_proc_show(void);
extern char *kdb_getstr(char *, size_t, const char *);
extern void kdb_gdb_state_pass(char *buf);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 4a1c33416b6a..e2764d767f18 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
* Finish delay accounting for a statistic using its timestamps (@start),
* accumalator (@total) and @count
*/
-static void delayacct_end(u64 *start, u64 *total, u32 *count)
+static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
{
s64 ns = ktime_get_ns() - *start;
unsigned long flags;
if (ns > 0) {
- spin_lock_irqsave(&current->delays->lock, flags);
+ spin_lock_irqsave(lock, flags);
*total += ns;
(*count)++;
- spin_unlock_irqrestore(&current->delays->lock, flags);
+ spin_unlock_irqrestore(lock, flags);
}
}
@@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
current->delays->blkio_start = ktime_get_ns();
}
-void __delayacct_blkio_end(void)
+/*
+ * We cannot rely on the `current` macro, as we haven't yet switched back to
+ * the process being woken.
+ */
+void __delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays->flags & DELAYACCT_PF_SWAPIN)
- /* Swapin block I/O */
- delayacct_end(&current->delays->blkio_start,
- &current->delays->swapin_delay,
- &current->delays->swapin_count);
- else /* Other block I/O */
- delayacct_end(&current->delays->blkio_start,
- &current->delays->blkio_delay,
- &current->delays->blkio_count);
+ struct task_delay_info *delays = p->delays;
+ u64 *total;
+ u32 *count;
+
+ if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
+ total = &delays->swapin_delay;
+ count = &delays->swapin_count;
+ } else {
+ total = &delays->blkio_delay;
+ count = &delays->blkio_count;
+ }
+
+ delayacct_end(&delays->lock, &delays->blkio_start, total, count);
}
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
void __delayacct_freepages_end(void)
{
- delayacct_end(&current->delays->freepages_start,
- &current->delays->freepages_delay,
- &current->delays->freepages_count);
+ delayacct_end(
+ &current->delays->lock,
+ &current->delays->freepages_start,
+ &current->delays->freepages_delay,
+ &current->delays->freepages_count);
}
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 1b2be63c8528..772a43fea825 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -179,21 +179,6 @@ put_callchain_entry(int rctx)
}
struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs)
-{
- bool kernel = !event->attr.exclude_callchain_kernel;
- bool user = !event->attr.exclude_callchain_user;
- /* Disallow cross-task user callchains. */
- bool crosstask = event->ctx->task && event->ctx->task != current;
- const u32 max_stack = event->attr.sample_max_stack;
-
- if (!kernel && !user)
- return NULL;
-
- return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
-}
-
-struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4df5b695bf0d..f0549e79978b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1231,6 +1231,10 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::lock
* perf_event::mmap_mutex
* mmap_sem
+ *
+ * cpu_hotplug_lock
+ * pmus_lock
+ * cpuctx->mutex / perf_event_context::mutex
*/
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
@@ -4196,6 +4200,7 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
+ LIST_HEAD(free_list);
/*
* If we got here through err_file: fput(event_file); we will not have
@@ -4268,8 +4273,7 @@ again:
struct perf_event, child_list);
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
- list_del(&child->child_list);
- free_event(child);
+ list_move(&child->child_list, &free_list);
/*
* This matches the refcount bump in inherit_event();
* this can't be the last reference.
@@ -4284,6 +4288,11 @@ again:
}
mutex_unlock(&event->child_mutex);
+ list_for_each_entry_safe(child, tmp, &free_list, child_list) {
+ list_del(&child->child_list);
+ free_event(child);
+ }
+
no_ctx:
put_event(event); /* Must be the 'last' reference */
return 0;
@@ -4511,11 +4520,11 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int perf_poll(struct file *file, poll_table *wait)
+static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
- unsigned int events = POLLHUP;
+ __poll_t events = POLLHUP;
poll_wait(file, &event->waitq, wait);
@@ -4723,6 +4732,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
rcu_read_unlock();
return 0;
}
+
+ case PERF_EVENT_IOC_QUERY_BPF:
+ return perf_event_query_prog_array(event, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -4904,6 +4916,7 @@ void perf_event_update_userpage(struct perf_event *event)
unlock:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static int perf_mmap_fault(struct vm_fault *vmf)
{
@@ -5815,19 +5828,11 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
- if (data->callchain) {
- int size = 1;
-
- if (data->callchain)
- size += data->callchain->nr;
-
- size *= sizeof(u64);
+ int size = 1;
- __output_copy(handle, data->callchain, size);
- } else {
- u64 nr = 0;
- perf_output_put(handle, nr);
- }
+ size += data->callchain->nr;
+ size *= sizeof(u64);
+ __output_copy(handle, data->callchain, size);
}
if (sample_type & PERF_SAMPLE_RAW) {
@@ -5980,6 +5985,26 @@ static u64 perf_virt_to_phys(u64 virt)
return phys_addr;
}
+static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
+
+static struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs)
+{
+ bool kernel = !event->attr.exclude_callchain_kernel;
+ bool user = !event->attr.exclude_callchain_user;
+ /* Disallow cross-task user callchains. */
+ bool crosstask = event->ctx->task && event->ctx->task != current;
+ const u32 max_stack = event->attr.sample_max_stack;
+ struct perf_callchain_entry *callchain;
+
+ if (!kernel && !user)
+ return &__empty_callchain;
+
+ callchain = get_perf_callchain(regs, 0, kernel, user,
+ max_stack, crosstask, true);
+ return callchain ?: &__empty_callchain;
+}
+
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
@@ -6002,9 +6027,7 @@ void perf_prepare_sample(struct perf_event_header *header,
int size = 1;
data->callchain = perf_callchain(event, regs);
-
- if (data->callchain)
- size += data->callchain->nr;
+ size += data->callchain->nr;
header->size += size * sizeof(u64);
}
@@ -8080,6 +8103,13 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
return -EINVAL;
}
+ /* Kprobe override only works for kprobes, not uprobes. */
+ if (prog->kprobe_override &&
+ !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
if (is_tracepoint || is_syscall_tp) {
int off = trace_event_get_offsets(event->tp_event);
@@ -8516,6 +8546,29 @@ fail_clear_files:
return ret;
}
+static int
+perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
+{
+ struct perf_event_context *ctx = event->ctx;
+ int ret;
+
+ /*
+ * Beware, here be dragons!!
+ *
+ * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
+ * stuff does not actually need it. So temporarily drop ctx->mutex. As per
+ * perf_event_ctx_lock() we already have a reference on ctx.
+ *
+ * This can result in event getting moved to a different ctx, but that
+ * does not affect the tracepoint state.
+ */
+ mutex_unlock(&ctx->mutex);
+ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+ mutex_lock(&ctx->mutex);
+
+ return ret;
+}
+
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
@@ -8532,8 +8585,7 @@ static int perf_event_set_filter(struct perf_event *event, void __user *arg)
if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
event->attr.type == PERF_TYPE_TRACEPOINT)
- ret = ftrace_profile_set_filter(event, event->attr.config,
- filter_str);
+ ret = perf_tracepoint_set_filter(event, filter_str);
else if (has_addr_filter(event))
ret = perf_event_set_addr_filter(event, filter_str);
@@ -9168,7 +9220,13 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
if (!try_module_get(pmu->module))
return -ENODEV;
- if (event->group_leader != event) {
+ /*
+ * A number of pmu->event_init() methods iterate the sibling_list to,
+ * for example, validate if the group fits on the PMU. Therefore,
+ * if this is a sibling event, acquire the ctx->mutex to protect
+ * the sibling_list.
+ */
+ if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
/*
* This ctx->mutex can nest when we're called through
* inheritance. See the perf_event_ctx_lock_nested() comment.
@@ -10703,6 +10761,19 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
+
+ if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) &&
+ !child_ctx->task_ctx_data) {
+ struct pmu *pmu = child_event->pmu;
+
+ child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
+ GFP_KERNEL);
+ if (!child_ctx->task_ctx_data) {
+ free_event(child_event);
+ return NULL;
+ }
+ }
+
/*
* is_orphaned_event() and list_add_tail(&parent_event->child_list)
* must be under the same lock in order to serialize against
@@ -10713,6 +10784,7 @@ inherit_event(struct perf_event *parent_event,
if (is_orphaned_event(parent_event) ||
!atomic_long_inc_not_zero(&parent_event->refcount)) {
mutex_unlock(&parent_event->child_mutex);
+ /* task_ctx_data is freed with child_ctx */
free_event(child_event);
return NULL;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09b1537ae06c..6dc725a7e7bc 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -201,10 +201,6 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
-/* Callchain handling */
-extern struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs);
-
static inline int get_recursion_context(int *recursion)
{
int rctx;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 267f6ef91d97..ce6848e46e94 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1167,8 +1167,8 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
}
ret = 0;
- smp_wmb(); /* pairs with get_xol_area() */
- mm->uprobes_state.xol_area = area;
+ /* pairs with get_xol_area() */
+ smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
fail:
up_write(&mm->mmap_sem);
@@ -1230,8 +1230,8 @@ static struct xol_area *get_xol_area(void)
if (!mm->uprobes_state.xol_area)
__create_xol_area(0);
- area = mm->uprobes_state.xol_area;
- smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
+ /* Pairs with xol_add_vma() smp_store_release() */
+ area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
return area;
}
@@ -1528,8 +1528,8 @@ static unsigned long get_trampoline_vaddr(void)
struct xol_area *area;
unsigned long trampoline_vaddr = -1;
- area = current->mm->uprobes_state.xol_area;
- smp_read_barrier_depends();
+ /* Pairs with xol_add_vma() smp_store_release() */
+ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
if (area)
trampoline_vaddr = area->vaddr;
diff --git a/kernel/exit.c b/kernel/exit.c
index df0c91d5606c..995453d9fb55 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1763,3 +1763,4 @@ __weak void abort(void)
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
+EXPORT_SYMBOL(abort);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
new file mode 100644
index 000000000000..21b0122cb39c
--- /dev/null
+++ b/kernel/fail_function.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fail_function.c: Function-based error injection
+ */
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+
+struct fei_attr {
+ struct list_head list;
+ struct kprobe kp;
+ unsigned long retval;
+};
+static DEFINE_MUTEX(fei_lock);
+static LIST_HEAD(fei_attr_list);
+static DECLARE_FAULT_ATTR(fei_fault_attr);
+static struct dentry *fei_debugfs_dir;
+
+static unsigned long adjust_error_retval(unsigned long addr, unsigned long retv)
+{
+ switch (get_injectable_error_type(addr)) {
+ case EI_ETYPE_NULL:
+ if (retv != 0)
+ return 0;
+ break;
+ case EI_ETYPE_ERRNO:
+ if (retv < (unsigned long)-MAX_ERRNO)
+ return (unsigned long)-EINVAL;
+ break;
+ case EI_ETYPE_ERRNO_NULL:
+ if (retv != 0 && retv < (unsigned long)-MAX_ERRNO)
+ return (unsigned long)-EINVAL;
+ break;
+ }
+
+ return retv;
+}
+
+static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
+{
+ struct fei_attr *attr;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (attr) {
+ attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL);
+ if (!attr->kp.symbol_name) {
+ kfree(attr);
+ return NULL;
+ }
+ attr->kp.pre_handler = fei_kprobe_handler;
+ attr->retval = adjust_error_retval(addr, 0);
+ INIT_LIST_HEAD(&attr->list);
+ }
+ return attr;
+}
+
+static void fei_attr_free(struct fei_attr *attr)
+{
+ if (attr) {
+ kfree(attr->kp.symbol_name);
+ kfree(attr);
+ }
+}
+
+static struct fei_attr *fei_attr_lookup(const char *sym)
+{
+ struct fei_attr *attr;
+
+ list_for_each_entry(attr, &fei_attr_list, list) {
+ if (!strcmp(attr->kp.symbol_name, sym))
+ return attr;
+ }
+
+ return NULL;
+}
+
+static bool fei_attr_is_valid(struct fei_attr *_attr)
+{
+ struct fei_attr *attr;
+
+ list_for_each_entry(attr, &fei_attr_list, list) {
+ if (attr == _attr)
+ return true;
+ }
+
+ return false;
+}
+
+static int fei_retval_set(void *data, u64 val)
+{
+ struct fei_attr *attr = data;
+ unsigned long retv = (unsigned long)val;
+ int err = 0;
+
+ mutex_lock(&fei_lock);
+ /*
+ * Since this operation can be done after retval file is removed,
+ * It is safer to check the attr is still valid before accessing
+ * its member.
+ */
+ if (!fei_attr_is_valid(attr)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (attr->kp.addr) {
+ if (adjust_error_retval((unsigned long)attr->kp.addr,
+ val) != retv)
+ err = -EINVAL;
+ }
+ if (!err)
+ attr->retval = val;
+out:
+ mutex_unlock(&fei_lock);
+
+ return err;
+}
+
+static int fei_retval_get(void *data, u64 *val)
+{
+ struct fei_attr *attr = data;
+ int err = 0;
+
+ mutex_lock(&fei_lock);
+ /* Here we also validate @attr to ensure it still exists. */
+ if (!fei_attr_is_valid(attr))
+ err = -ENOENT;
+ else
+ *val = attr->retval;
+ mutex_unlock(&fei_lock);
+
+ return err;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fei_retval_ops, fei_retval_get, fei_retval_set,
+ "%llx\n");
+
+static int fei_debugfs_add_attr(struct fei_attr *attr)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(attr->kp.symbol_name, fei_debugfs_dir);
+ if (!dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("retval", 0600, dir, attr, &fei_retval_ops)) {
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void fei_debugfs_remove_attr(struct fei_attr *attr)
+{
+ struct dentry *dir;
+
+ dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
+ if (dir)
+ debugfs_remove_recursive(dir);
+}
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+ struct fei_attr *attr = container_of(kp, struct fei_attr, kp);
+
+ if (should_fail(&fei_fault_attr, 1)) {
+ regs_set_return_value(regs, attr->retval);
+ override_function_with_return(regs);
+ /* Kprobe specific fixup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ return 1;
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(fei_kprobe_handler)
+
+static void *fei_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&fei_lock);
+ return seq_list_start(&fei_attr_list, *pos);
+}
+
+static void fei_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&fei_lock);
+}
+
+static void *fei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &fei_attr_list, pos);
+}
+
+static int fei_seq_show(struct seq_file *m, void *v)
+{
+ struct fei_attr *attr = list_entry(v, struct fei_attr, list);
+
+ seq_printf(m, "%pf\n", attr->kp.addr);
+ return 0;
+}
+
+static const struct seq_operations fei_seq_ops = {
+ .start = fei_seq_start,
+ .next = fei_seq_next,
+ .stop = fei_seq_stop,
+ .show = fei_seq_show,
+};
+
+static int fei_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fei_seq_ops);
+}
+
+static void fei_attr_remove(struct fei_attr *attr)
+{
+ fei_debugfs_remove_attr(attr);
+ unregister_kprobe(&attr->kp);
+ list_del(&attr->list);
+ fei_attr_free(attr);
+}
+
+static void fei_attr_remove_all(void)
+{
+ struct fei_attr *attr, *n;
+
+ list_for_each_entry_safe(attr, n, &fei_attr_list, list) {
+ fei_attr_remove(attr);
+ }
+}
+
+static ssize_t fei_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct fei_attr *attr;
+ unsigned long addr;
+ char *buf, *sym;
+ int ret;
+
+ /* cut off if it is too long */
+ if (count > KSYM_NAME_LEN)
+ count = KSYM_NAME_LEN;
+ buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, buffer, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ buf[count] = '\0';
+ sym = strstrip(buf);
+
+ mutex_lock(&fei_lock);
+
+ /* Writing just spaces will remove all injection points */
+ if (sym[0] == '\0') {
+ fei_attr_remove_all();
+ ret = count;
+ goto out;
+ }
+ /* Writing !function will remove one injection point */
+ if (sym[0] == '!') {
+ attr = fei_attr_lookup(sym + 1);
+ if (!attr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ fei_attr_remove(attr);
+ ret = count;
+ goto out;
+ }
+
+ addr = kallsyms_lookup_name(sym);
+ if (!addr) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!within_error_injection_list(addr)) {
+ ret = -ERANGE;
+ goto out;
+ }
+ if (fei_attr_lookup(sym)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ attr = fei_attr_new(sym, addr);
+ if (!attr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = register_kprobe(&attr->kp);
+ if (!ret)
+ ret = fei_debugfs_add_attr(attr);
+ if (ret < 0)
+ fei_attr_remove(attr);
+ else {
+ list_add_tail(&attr->list, &fei_attr_list);
+ ret = count;
+ }
+out:
+ kfree(buf);
+ mutex_unlock(&fei_lock);
+ return ret;
+}
+
+static const struct file_operations fei_ops = {
+ .open = fei_open,
+ .read = seq_read,
+ .write = fei_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init fei_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_function", NULL,
+ &fei_fault_attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ /* injectable attribute is just a symlink of error_inject/list */
+ if (!debugfs_create_symlink("injectable", dir,
+ "../error_injection/list"))
+ goto error;
+
+ if (!debugfs_create_file("inject", 0600, dir, NULL, &fei_ops))
+ goto error;
+
+ fei_debugfs_dir = dir;
+
+ return 0;
+error:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+
+late_initcall(fei_debugfs_init);
diff --git a/kernel/fork.c b/kernel/fork.c
index 2295fc69717f..5e6cf0dd031c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -77,6 +77,7 @@
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
+#include <linux/sched/mm.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
@@ -390,6 +391,241 @@ void free_task(struct task_struct *tsk)
}
EXPORT_SYMBOL(free_task);
+#ifdef CONFIG_MMU
+static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ struct mm_struct *oldmm)
+{
+ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+ struct rb_node **rb_link, *rb_parent;
+ int retval;
+ unsigned long charge;
+ LIST_HEAD(uf);
+
+ uprobe_start_dup_mmap();
+ if (down_write_killable(&oldmm->mmap_sem)) {
+ retval = -EINTR;
+ goto fail_uprobe_end;
+ }
+ flush_cache_dup_mm(oldmm);
+ uprobe_dup_mmap(oldmm, mm);
+ /*
+ * Not linked in yet - no deadlock potential:
+ */
+ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+
+ /* No ordering required: file already has been exposed. */
+ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
+
+ mm->total_vm = oldmm->total_vm;
+ mm->data_vm = oldmm->data_vm;
+ mm->exec_vm = oldmm->exec_vm;
+ mm->stack_vm = oldmm->stack_vm;
+
+ rb_link = &mm->mm_rb.rb_node;
+ rb_parent = NULL;
+ pprev = &mm->mmap;
+ retval = ksm_fork(mm, oldmm);
+ if (retval)
+ goto out;
+ retval = khugepaged_fork(mm, oldmm);
+ if (retval)
+ goto out;
+
+ prev = NULL;
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+ struct file *file;
+
+ if (mpnt->vm_flags & VM_DONTCOPY) {
+ vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
+ continue;
+ }
+ charge = 0;
+ if (mpnt->vm_flags & VM_ACCOUNT) {
+ unsigned long len = vma_pages(mpnt);
+
+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+ goto fail_nomem;
+ charge = len;
+ }
+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!tmp)
+ goto fail_nomem;
+ *tmp = *mpnt;
+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
+ retval = vma_dup_policy(mpnt, tmp);
+ if (retval)
+ goto fail_nomem_policy;
+ tmp->vm_mm = mm;
+ retval = dup_userfaultfd(tmp, &uf);
+ if (retval)
+ goto fail_nomem_anon_vma_fork;
+ if (tmp->vm_flags & VM_WIPEONFORK) {
+ /* VM_WIPEONFORK gets a clean slate in the child. */
+ tmp->anon_vma = NULL;
+ if (anon_vma_prepare(tmp))
+ goto fail_nomem_anon_vma_fork;
+ } else if (anon_vma_fork(tmp, mpnt))
+ goto fail_nomem_anon_vma_fork;
+ tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
+ tmp->vm_next = tmp->vm_prev = NULL;
+ file = tmp->vm_file;
+ if (file) {
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
+
+ get_file(file);
+ if (tmp->vm_flags & VM_DENYWRITE)
+ atomic_dec(&inode->i_writecount);
+ i_mmap_lock_write(mapping);
+ if (tmp->vm_flags & VM_SHARED)
+ atomic_inc(&mapping->i_mmap_writable);
+ flush_dcache_mmap_lock(mapping);
+ /* insert tmp into the share list, just after mpnt */
+ vma_interval_tree_insert_after(tmp, mpnt,
+ &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ i_mmap_unlock_write(mapping);
+ }
+
+ /*
+ * Clear hugetlb-related page reserves for children. This only
+ * affects MAP_PRIVATE mappings. Faults generated by the child
+ * are not guaranteed to succeed, even if read-only
+ */
+ if (is_vm_hugetlb_page(tmp))
+ reset_vma_resv_huge_pages(tmp);
+
+ /*
+ * Link in the new vma and copy the page table entries.
+ */
+ *pprev = tmp;
+ pprev = &tmp->vm_next;
+ tmp->vm_prev = prev;
+ prev = tmp;
+
+ __vma_link_rb(mm, tmp, rb_link, rb_parent);
+ rb_link = &tmp->vm_rb.rb_right;
+ rb_parent = &tmp->vm_rb;
+
+ mm->map_count++;
+ if (!(tmp->vm_flags & VM_WIPEONFORK))
+ retval = copy_page_range(mm, oldmm, mpnt);
+
+ if (tmp->vm_ops && tmp->vm_ops->open)
+ tmp->vm_ops->open(tmp);
+
+ if (retval)
+ goto out;
+ }
+ /* a new mm has just been created */
+ arch_dup_mmap(oldmm, mm);
+ retval = 0;
+out:
+ up_write(&mm->mmap_sem);
+ flush_tlb_mm(oldmm);
+ up_write(&oldmm->mmap_sem);
+ dup_userfaultfd_complete(&uf);
+fail_uprobe_end:
+ uprobe_end_dup_mmap();
+ return retval;
+fail_nomem_anon_vma_fork:
+ mpol_put(vma_policy(tmp));
+fail_nomem_policy:
+ kmem_cache_free(vm_area_cachep, tmp);
+fail_nomem:
+ retval = -ENOMEM;
+ vm_unacct_memory(charge);
+ goto out;
+}
+
+static inline int mm_alloc_pgd(struct mm_struct *mm)
+{
+ mm->pgd = pgd_alloc(mm);
+ if (unlikely(!mm->pgd))
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void mm_free_pgd(struct mm_struct *mm)
+{
+ pgd_free(mm, mm->pgd);
+}
+#else
+static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ down_write(&oldmm->mmap_sem);
+ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
+ up_write(&oldmm->mmap_sem);
+ return 0;
+}
+#define mm_alloc_pgd(mm) (0)
+#define mm_free_pgd(mm)
+#endif /* CONFIG_MMU */
+
+static void check_mm(struct mm_struct *mm)
+{
+ int i;
+
+ for (i = 0; i < NR_MM_COUNTERS; i++) {
+ long x = atomic_long_read(&mm->rss_stat.count[i]);
+
+ if (unlikely(x))
+ printk(KERN_ALERT "BUG: Bad rss-counter state "
+ "mm:%p idx:%d val:%ld\n", mm, i, x);
+ }
+
+ if (mm_pgtables_bytes(mm))
+ pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
+ mm_pgtables_bytes(mm));
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+ VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
+#endif
+}
+
+#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
+#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
+
+/*
+ * Called when the last reference to the mm
+ * is dropped: either by a lazy thread or by
+ * mmput. Free the page directory and the mm.
+ */
+static void __mmdrop(struct mm_struct *mm)
+{
+ BUG_ON(mm == &init_mm);
+ mm_free_pgd(mm);
+ destroy_context(mm);
+ hmm_mm_destroy(mm);
+ mmu_notifier_mm_destroy(mm);
+ check_mm(mm);
+ put_user_ns(mm->user_ns);
+ free_mm(mm);
+}
+
+void mmdrop(struct mm_struct *mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+}
+EXPORT_SYMBOL_GPL(mmdrop);
+
+static void mmdrop_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm;
+
+ mm = container_of(work, struct mm_struct, async_put_work);
+ __mmdrop(mm);
+}
+
+static void mmdrop_async(struct mm_struct *mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
+ INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
@@ -594,181 +830,8 @@ free_tsk:
return NULL;
}
-#ifdef CONFIG_MMU
-static __latent_entropy int dup_mmap(struct mm_struct *mm,
- struct mm_struct *oldmm)
-{
- struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
- struct rb_node **rb_link, *rb_parent;
- int retval;
- unsigned long charge;
- LIST_HEAD(uf);
-
- uprobe_start_dup_mmap();
- if (down_write_killable(&oldmm->mmap_sem)) {
- retval = -EINTR;
- goto fail_uprobe_end;
- }
- flush_cache_dup_mm(oldmm);
- uprobe_dup_mmap(oldmm, mm);
- /*
- * Not linked in yet - no deadlock potential:
- */
- down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
-
- /* No ordering required: file already has been exposed. */
- RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
-
- mm->total_vm = oldmm->total_vm;
- mm->data_vm = oldmm->data_vm;
- mm->exec_vm = oldmm->exec_vm;
- mm->stack_vm = oldmm->stack_vm;
-
- rb_link = &mm->mm_rb.rb_node;
- rb_parent = NULL;
- pprev = &mm->mmap;
- retval = ksm_fork(mm, oldmm);
- if (retval)
- goto out;
- retval = khugepaged_fork(mm, oldmm);
- if (retval)
- goto out;
-
- prev = NULL;
- for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
- struct file *file;
-
- if (mpnt->vm_flags & VM_DONTCOPY) {
- vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
- continue;
- }
- charge = 0;
- if (mpnt->vm_flags & VM_ACCOUNT) {
- unsigned long len = vma_pages(mpnt);
-
- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
- goto fail_nomem;
- charge = len;
- }
- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
- if (!tmp)
- goto fail_nomem;
- *tmp = *mpnt;
- INIT_LIST_HEAD(&tmp->anon_vma_chain);
- retval = vma_dup_policy(mpnt, tmp);
- if (retval)
- goto fail_nomem_policy;
- tmp->vm_mm = mm;
- retval = dup_userfaultfd(tmp, &uf);
- if (retval)
- goto fail_nomem_anon_vma_fork;
- if (tmp->vm_flags & VM_WIPEONFORK) {
- /* VM_WIPEONFORK gets a clean slate in the child. */
- tmp->anon_vma = NULL;
- if (anon_vma_prepare(tmp))
- goto fail_nomem_anon_vma_fork;
- } else if (anon_vma_fork(tmp, mpnt))
- goto fail_nomem_anon_vma_fork;
- tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
- tmp->vm_next = tmp->vm_prev = NULL;
- file = tmp->vm_file;
- if (file) {
- struct inode *inode = file_inode(file);
- struct address_space *mapping = file->f_mapping;
-
- get_file(file);
- if (tmp->vm_flags & VM_DENYWRITE)
- atomic_dec(&inode->i_writecount);
- i_mmap_lock_write(mapping);
- if (tmp->vm_flags & VM_SHARED)
- atomic_inc(&mapping->i_mmap_writable);
- flush_dcache_mmap_lock(mapping);
- /* insert tmp into the share list, just after mpnt */
- vma_interval_tree_insert_after(tmp, mpnt,
- &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
- i_mmap_unlock_write(mapping);
- }
-
- /*
- * Clear hugetlb-related page reserves for children. This only
- * affects MAP_PRIVATE mappings. Faults generated by the child
- * are not guaranteed to succeed, even if read-only
- */
- if (is_vm_hugetlb_page(tmp))
- reset_vma_resv_huge_pages(tmp);
-
- /*
- * Link in the new vma and copy the page table entries.
- */
- *pprev = tmp;
- pprev = &tmp->vm_next;
- tmp->vm_prev = prev;
- prev = tmp;
-
- __vma_link_rb(mm, tmp, rb_link, rb_parent);
- rb_link = &tmp->vm_rb.rb_right;
- rb_parent = &tmp->vm_rb;
-
- mm->map_count++;
- if (!(tmp->vm_flags & VM_WIPEONFORK))
- retval = copy_page_range(mm, oldmm, mpnt);
-
- if (tmp->vm_ops && tmp->vm_ops->open)
- tmp->vm_ops->open(tmp);
-
- if (retval)
- goto out;
- }
- /* a new mm has just been created */
- retval = arch_dup_mmap(oldmm, mm);
-out:
- up_write(&mm->mmap_sem);
- flush_tlb_mm(oldmm);
- up_write(&oldmm->mmap_sem);
- dup_userfaultfd_complete(&uf);
-fail_uprobe_end:
- uprobe_end_dup_mmap();
- return retval;
-fail_nomem_anon_vma_fork:
- mpol_put(vma_policy(tmp));
-fail_nomem_policy:
- kmem_cache_free(vm_area_cachep, tmp);
-fail_nomem:
- retval = -ENOMEM;
- vm_unacct_memory(charge);
- goto out;
-}
-
-static inline int mm_alloc_pgd(struct mm_struct *mm)
-{
- mm->pgd = pgd_alloc(mm);
- if (unlikely(!mm->pgd))
- return -ENOMEM;
- return 0;
-}
-
-static inline void mm_free_pgd(struct mm_struct *mm)
-{
- pgd_free(mm, mm->pgd);
-}
-#else
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
-{
- down_write(&oldmm->mmap_sem);
- RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
- up_write(&oldmm->mmap_sem);
- return 0;
-}
-#define mm_alloc_pgd(mm) (0)
-#define mm_free_pgd(mm)
-#endif /* CONFIG_MMU */
-
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
-#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
-#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
-
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
@@ -858,27 +921,6 @@ fail_nopgd:
return NULL;
}
-static void check_mm(struct mm_struct *mm)
-{
- int i;
-
- for (i = 0; i < NR_MM_COUNTERS; i++) {
- long x = atomic_long_read(&mm->rss_stat.count[i]);
-
- if (unlikely(x))
- printk(KERN_ALERT "BUG: Bad rss-counter state "
- "mm:%p idx:%d val:%ld\n", mm, i, x);
- }
-
- if (mm_pgtables_bytes(mm))
- pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
- mm_pgtables_bytes(mm));
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
-#endif
-}
-
/*
* Allocate and initialize an mm_struct.
*/
@@ -894,24 +936,6 @@ struct mm_struct *mm_alloc(void)
return mm_init(mm, current, current_user_ns());
}
-/*
- * Called when the last reference to the mm
- * is dropped: either by a lazy thread or by
- * mmput. Free the page directory and the mm.
- */
-void __mmdrop(struct mm_struct *mm)
-{
- BUG_ON(mm == &init_mm);
- mm_free_pgd(mm);
- destroy_context(mm);
- hmm_mm_destroy(mm);
- mmu_notifier_mm_destroy(mm);
- check_mm(mm);
- put_user_ns(mm->user_ns);
- free_mm(mm);
-}
-EXPORT_SYMBOL_GPL(__mmdrop);
-
static inline void __mmput(struct mm_struct *mm)
{
VM_BUG_ON(atomic_read(&mm->mm_users));
diff --git a/kernel/futex.c b/kernel/futex.c
index 57d0b3657e16..7f719d110908 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_q *this, *next;
DEFINE_WAKE_Q(wake_q);
+ if (nr_wake < 0 || nr_requeue < 0)
+ return -EINVAL;
+
/*
* When PI not supported: return -ENOSYS if requeue_pi is true,
* consequently the compiler knows requeue_pi is always false past
@@ -2294,34 +2297,33 @@ static void unqueue_me_pi(struct futex_q *q)
spin_unlock(q->lock_ptr);
}
-/*
- * Fixup the pi_state owner with the new owner.
- *
- * Must be called with hash bucket lock held and mm->sem held for non
- * private futexes.
- */
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *newowner)
+ struct task_struct *argowner)
{
- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, uninitialized_var(curval), newval;
- struct task_struct *oldowner;
+ struct task_struct *oldowner, *newowner;
+ u32 newtid;
int ret;
+ lockdep_assert_held(q->lock_ptr);
+
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
oldowner = pi_state->owner;
- /* Owner died? */
- if (!pi_state->owner)
- newtid |= FUTEX_OWNER_DIED;
/*
- * We are here either because we stole the rtmutex from the
- * previous highest priority waiter or we are the highest priority
- * waiter but have failed to get the rtmutex the first time.
+ * We are here because either:
+ *
+ * - we stole the lock and pi_state->owner needs updating to reflect
+ * that (@argowner == current),
+ *
+ * or:
*
- * We have to replace the newowner TID in the user space variable.
+ * - someone stole our lock and we need to fix things to point to the
+ * new owner (@argowner == NULL).
+ *
+ * Either way, we have to replace the TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
@@ -2334,6 +2336,45 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
* in the PID check in lookup_pi_state.
*/
retry:
+ if (!argowner) {
+ if (oldowner != current) {
+ /*
+ * We raced against a concurrent self; things are
+ * already fixed up. Nothing to do.
+ */
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
+ /* We got the lock after all, nothing to fix. */
+ ret = 0;
+ goto out_unlock;
+ }
+
+ /*
+ * Since we just failed the trylock; there must be an owner.
+ */
+ newowner = rt_mutex_owner(&pi_state->pi_mutex);
+ BUG_ON(!newowner);
+ } else {
+ WARN_ON_ONCE(argowner != current);
+ if (oldowner == current) {
+ /*
+ * We raced against a concurrent self; things are
+ * already fixed up. Nothing to do.
+ */
+ ret = 0;
+ goto out_unlock;
+ }
+ newowner = argowner;
+ }
+
+ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+ /* Owner died? */
+ if (!pi_state->owner)
+ newtid |= FUTEX_OWNER_DIED;
+
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
@@ -2434,9 +2475,9 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
*
- * We can safely read pi_state->owner without holding wait_lock
- * because we now own the rt_mutex, only the owner will attempt
- * to change it.
+ * Speculative pi_state->owner read (we don't hold wait_lock);
+ * since we own the lock pi_state->owner == current is the
+ * stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
@@ -2444,6 +2485,19 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
}
/*
+ * If we didn't get the lock; check if anybody stole it from us. In
+ * that case, we need to fix up the uval to point to them instead of
+ * us, otherwise bad things happen. [10]
+ *
+ * Another speculative read; pi_state->owner == current is unstable
+ * but needs our attention.
+ */
+ if (q->pi_state->owner == current) {
+ ret = fixup_pi_state_owner(uaddr, q, NULL);
+ goto out;
+ }
+
+ /*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 89e355866450..6fc87ccda1d7 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -103,16 +103,6 @@ config GENERIC_IRQ_MATRIX_ALLOCATOR
config GENERIC_IRQ_RESERVATION_MODE
bool
-config IRQ_DOMAIN_DEBUG
- bool "Expose hardware/virtual IRQ mapping via debugfs"
- depends on IRQ_DOMAIN && DEBUG_FS
- help
- This option will show the mapping relationship between hardware irq
- numbers and Linux irq numbers. The mapping is exposed via debugfs
- in the file "irq_domain_mapping".
-
- If you don't know what this means you don't need it.
-
# Support forced irq threading
config IRQ_FORCED_THREADING
bool
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e12d35108225..a37a3b4b6342 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
}
}
-static cpumask_var_t *alloc_node_to_present_cpumask(void)
+static cpumask_var_t *alloc_node_to_possible_cpumask(void)
{
cpumask_var_t *masks;
int node;
@@ -62,7 +62,7 @@ out_unwind:
return NULL;
}
-static void free_node_to_present_cpumask(cpumask_var_t *masks)
+static void free_node_to_possible_cpumask(cpumask_var_t *masks)
{
int node;
@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
kfree(masks);
}
-static void build_node_to_present_cpumask(cpumask_var_t *masks)
+static void build_node_to_possible_cpumask(cpumask_var_t *masks)
{
int cpu;
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu)
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
}
-static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk)
{
int n, nodes = 0;
/* Calculate the number of nodes in the supplied affinity mask */
for_each_node(n) {
- if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
+ if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
node_set(n, *nodemsk);
nodes++;
}
@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE;
struct cpumask *masks;
- cpumask_var_t nmsk, *node_to_present_cpumask;
+ cpumask_var_t nmsk, *node_to_possible_cpumask;
/*
* If there aren't any vectors left after applying the pre/post
@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (!masks)
goto out;
- node_to_present_cpumask = alloc_node_to_present_cpumask();
- if (!node_to_present_cpumask)
+ node_to_possible_cpumask = alloc_node_to_possible_cpumask();
+ if (!node_to_possible_cpumask)
goto out;
/* Fill out vectors at the beginning that don't need affinity */
@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Stabilize the cpumasks */
get_online_cpus();
- build_node_to_present_cpumask(node_to_present_cpumask);
- nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
+ build_node_to_possible_cpumask(node_to_possible_cpumask);
+ nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
&nodemsk);
/*
@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (affv <= nodes) {
for_each_node_mask(n, nodemsk) {
cpumask_copy(masks + curvec,
- node_to_present_cpumask[n]);
+ node_to_possible_cpumask[n]);
if (++curvec == last_affv)
break;
}
@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */
- cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
+ cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
@@ -192,7 +192,7 @@ done:
/* Fill out vectors at the end that don't need affinity */
for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
- free_node_to_present_cpumask(node_to_present_cpumask);
+ free_node_to_possible_cpumask(node_to_possible_cpumask);
out:
free_cpumask_var(nmsk);
return masks;
@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
return 0;
get_online_cpus();
- ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
+ ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
put_online_cpus();
return ret;
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 62068ad46930..e6a9c36470ee 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -897,124 +897,6 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
-#ifdef CONFIG_IRQ_DOMAIN_DEBUG
-static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
-{
- struct irq_domain *domain;
- struct irq_data *data;
-
- domain = desc->irq_data.domain;
- data = &desc->irq_data;
-
- while (domain) {
- unsigned int irq = data->irq;
- unsigned long hwirq = data->hwirq;
- struct irq_chip *chip;
- bool direct;
-
- if (data == &desc->irq_data)
- seq_printf(m, "%5d ", irq);
- else
- seq_printf(m, "%5d+ ", irq);
- seq_printf(m, "0x%05lx ", hwirq);
-
- chip = irq_data_get_irq_chip(data);
- seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
-
- seq_printf(m, "0x%p ", irq_data_get_irq_chip_data(data));
-
- seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
- direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
- seq_printf(m, "%6s%-8s ",
- (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
- direct ? "(DIRECT)" : "");
- seq_printf(m, "%s\n", domain->name);
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- domain = domain->parent;
- data = data->parent_data;
-#else
- domain = NULL;
-#endif
- }
-}
-
-static int virq_debug_show(struct seq_file *m, void *private)
-{
- unsigned long flags;
- struct irq_desc *desc;
- struct irq_domain *domain;
- struct radix_tree_iter iter;
- void __rcu **slot;
- int i;
-
- seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
- "name", "mapped", "linear-max", "direct-max", "devtree-node");
- mutex_lock(&irq_domain_mutex);
- list_for_each_entry(domain, &irq_domain_list, link) {
- struct device_node *of_node;
- const char *name;
-
- int count = 0;
-
- of_node = irq_domain_get_of_node(domain);
- if (of_node)
- name = of_node_full_name(of_node);
- else if (is_fwnode_irqchip(domain->fwnode))
- name = container_of(domain->fwnode, struct irqchip_fwid,
- fwnode)->name;
- else
- name = "";
-
- radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
- count++;
- seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
- domain == irq_default_domain ? '*' : ' ', domain->name,
- domain->revmap_size + count, domain->revmap_size,
- domain->revmap_direct_max_irq,
- name);
- }
- mutex_unlock(&irq_domain_mutex);
-
- seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
- "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
- "active", "type", "domain");
-
- for (i = 1; i < nr_irqs; i++) {
- desc = irq_to_desc(i);
- if (!desc)
- continue;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- virq_debug_show_one(m, desc);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-
- return 0;
-}
-
-static int virq_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, virq_debug_show, inode->i_private);
-}
-
-static const struct file_operations virq_debug_fops = {
- .open = virq_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init irq_debugfs_init(void)
-{
- if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
- NULL, &virq_debug_fops) == NULL)
- return -ENOMEM;
-
- return 0;
-}
-__initcall(irq_debugfs_init);
-#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
-
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
*
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 0ba0dd8863a7..5187dfe809ac 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
- unsigned int cpu;
+ unsigned int cpu, best_cpu, maxavl = 0;
+ struct cpumap *cm;
+ unsigned int bit;
+ best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
- struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- unsigned int bit;
+ cm = per_cpu_ptr(m->maps, cpu);
- if (!cm->online)
+ if (!cm->online || cm->available <= maxavl)
continue;
+ best_cpu = cpu;
+ maxavl = cm->available;
+ }
+
+ if (maxavl) {
+ cm = per_cpu_ptr(m->maps, best_cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit < m->alloc_end) {
cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
m->global_available--;
if (reserved)
m->global_reserved--;
- *mapped_cpu = cpu;
- trace_irq_matrix_alloc(bit, cpu, m, cm);
+ *mapped_cpu = best_cpu;
+ trace_irq_matrix_alloc(bit, best_cpu, m, cm);
return bit;
}
}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 40e9d739c169..6b7cdf17ccf8 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -36,7 +36,7 @@ static bool irq_work_claim(struct irq_work *work)
*/
flags = work->flags & ~IRQ_WORK_PENDING;
for (;;) {
- nflags = flags | IRQ_WORK_FLAGS;
+ nflags = flags | IRQ_WORK_CLAIMED;
oflags = cmpxchg(&work->flags, flags, nflags);
if (oflags == flags)
break;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 8594d24e4adc..b4517095db6a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -79,7 +79,7 @@ int static_key_count(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_count);
-static void static_key_slow_inc_cpuslocked(struct static_key *key)
+void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;
@@ -180,7 +180,7 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-static void static_key_slow_dec_cpuslocked(struct static_key *key,
+static void __static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct static_key *key,
struct delayed_work *work)
{
cpus_read_lock();
- static_key_slow_dec_cpuslocked(key, rate_limit, work);
+ __static_key_slow_dec_cpuslocked(key, rate_limit, work);
cpus_read_unlock();
}
@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
+void static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE(key);
+ __static_key_slow_dec_cpuslocked(key, 0, NULL);
+}
+
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE(key);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index de9e45dca70f..3a4656fb7047 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -366,11 +366,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
/*
* A reference is taken on the patch module to prevent it from being
* unloaded.
- *
- * Note: For immediate (no consistency model) patches we don't allow
- * patch modules to unload since there is no safe/sane method to
- * determine if a thread is still running in the patched code contained
- * in the patch module once the ftrace registration is successful.
*/
if (!try_module_get(patch->mod))
return -ENODEV;
@@ -454,6 +449,8 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
* /sys/kernel/livepatch/<patch>
* /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition
+ * /sys/kernel/livepatch/<patch>/signal
+ * /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
*/
@@ -528,11 +525,73 @@ static ssize_t transition_show(struct kobject *kobj,
patch == klp_transition_patch);
}
+static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return count;
+
+ mutex_lock(&klp_mutex);
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ if (patch != klp_transition_patch) {
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
+ klp_send_signals();
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+}
+
+static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return count;
+
+ mutex_lock(&klp_mutex);
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ if (patch != klp_transition_patch) {
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
+ klp_force_transition();
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
+static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
+static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
+ &signal_kobj_attr.attr,
+ &force_kobj_attr.attr,
NULL
};
@@ -830,12 +889,7 @@ int klp_register_patch(struct klp_patch *patch)
if (!klp_initialized())
return -ENODEV;
- /*
- * Architectures without reliable stack traces have to set
- * patch->immediate because there's currently no way to patch kthreads
- * with the consistency model.
- */
- if (!klp_have_reliable_stack() && !patch->immediate) {
+ if (!klp_have_reliable_stack()) {
pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
return -ENOSYS;
}
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 56add6327736..7c6631e693bc 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED;
+static bool klp_forced = false;
+
/*
* This work can be performed periodically to finish patching or unpatching any
* "straggler" tasks which failed to transition in the first attempt.
@@ -80,7 +82,6 @@ static void klp_complete_transition(void)
struct klp_func *func;
struct task_struct *g, *task;
unsigned int cpu;
- bool immediate_func = false;
pr_debug("'%s': completing %s transition\n",
klp_transition_patch->mod->name,
@@ -102,16 +103,9 @@ static void klp_complete_transition(void)
klp_synchronize_transition();
}
- if (klp_transition_patch->immediate)
- goto done;
-
- klp_for_each_object(klp_transition_patch, obj) {
- klp_for_each_func(obj, func) {
+ klp_for_each_object(klp_transition_patch, obj)
+ klp_for_each_func(obj, func)
func->transition = false;
- if (func->immediate)
- immediate_func = true;
- }
- }
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED)
@@ -130,7 +124,6 @@ static void klp_complete_transition(void)
task->patch_state = KLP_UNDEFINED;
}
-done:
klp_for_each_object(klp_transition_patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
@@ -144,13 +137,11 @@ done:
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * See complementary comment in __klp_enable_patch() for why we
- * keep the module reference for immediate patches.
+ * klp_forced set implies unbounded increase of module's ref count if
+ * the module is disabled/enabled in a loop.
*/
- if (!klp_transition_patch->immediate && !immediate_func &&
- klp_target_state == KLP_UNPATCHED) {
+ if (!klp_forced && klp_target_state == KLP_UNPATCHED)
module_put(klp_transition_patch->mod);
- }
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
@@ -218,9 +209,6 @@ static int klp_check_stack_func(struct klp_func *func,
struct klp_ops *ops;
int i;
- if (func->immediate)
- return 0;
-
for (i = 0; i < trace->nr_entries; i++) {
address = trace->entries[i];
@@ -383,13 +371,6 @@ void klp_try_complete_transition(void)
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (klp_transition_patch->immediate)
- goto success;
-
- /*
* Try to switch the tasks to the target patch state by walking their
* stacks and looking for any to-be-patched or to-be-unpatched
* functions. If such functions are found on a stack, or if the stack
@@ -432,7 +413,6 @@ void klp_try_complete_transition(void)
return;
}
-success:
/* we're done, now cleanup the data structures */
klp_complete_transition();
}
@@ -453,13 +433,6 @@ void klp_start_transition(void)
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (klp_transition_patch->immediate)
- return;
-
- /*
* Mark all normal tasks as needing a patch state update. They'll
* switch either in klp_try_complete_transition() or as they exit the
* kernel.
@@ -509,13 +482,6 @@ void klp_init_transition(struct klp_patch *patch, int state)
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (patch->immediate)
- return;
-
- /*
* Initialize all tasks to the initial patch state to prepare them for
* switching to the target state.
*/
@@ -608,3 +574,71 @@ void klp_copy_process(struct task_struct *child)
/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
}
+
+/*
+ * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
+ * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
+ * action currently.
+ */
+void klp_send_signals(void)
+{
+ struct task_struct *g, *task;
+
+ pr_notice("signaling remaining tasks\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task) {
+ if (!klp_patch_pending(task))
+ continue;
+
+ /*
+ * There is a small race here. We could see TIF_PATCH_PENDING
+ * set and decide to wake up a kthread or send a fake signal.
+ * Meanwhile the task could migrate itself and the action
+ * would be meaningless. It is not serious though.
+ */
+ if (task->flags & PF_KTHREAD) {
+ /*
+ * Wake up a kthread which sleeps interruptedly and
+ * still has not been migrated.
+ */
+ wake_up_state(task, TASK_INTERRUPTIBLE);
+ } else {
+ /*
+ * Send fake signal to all non-kthread tasks which are
+ * still not migrated.
+ */
+ spin_lock_irq(&task->sighand->siglock);
+ signal_wake_up(task, 0);
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
+ * existing transition to finish.
+ *
+ * NOTE: klp_update_patch_state(task) requires the task to be inactive or
+ * 'current'. This is not the case here and the consistency model could be
+ * broken. Administrator, who is the only one to execute the
+ * klp_force_transitions(), has to be aware of this.
+ */
+void klp_force_transition(void)
+{
+ struct task_struct *g, *task;
+ unsigned int cpu;
+
+ pr_warn("forcing remaining tasks to the patched state\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task)
+ klp_update_patch_state(task);
+ read_unlock(&tasklist_lock);
+
+ for_each_possible_cpu(cpu)
+ klp_update_patch_state(idle_task(cpu));
+
+ klp_forced = true;
+}
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
index 0f6e27c481f9..f9d0bc016067 100644
--- a/kernel/livepatch/transition.h
+++ b/kernel/livepatch/transition.h
@@ -11,5 +11,7 @@ void klp_cancel_transition(void);
void klp_start_transition(void);
void klp_try_complete_transition(void);
void klp_reverse_transition(void);
+void klp_send_signals(void);
+void klp_force_transition(void);
#endif /* _LIVEPATCH_TRANSITION_H */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324a4f29..89b5f83f1969 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -49,6 +49,7 @@
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/nmi.h>
#include <asm/sections.h>
@@ -647,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
return count + 1;
}
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
- bool is_static = false;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
@@ -671,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
/*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself. If the lock is in the per cpu area,
- * the canonical address of the lock (per cpu offset removed) is
- * used.
+ * If it is not initialised then it has never been locked,
+ * so it won't be present in the hash table.
*/
- if (unlikely(!lock->key)) {
- unsigned long can_addr, addr = (unsigned long)lock;
-
- if (__is_kernel_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (__is_module_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (static_obj(lock))
- lock->key = (void *)lock;
- else
- return ERR_PTR(-EINVAL);
- is_static = true;
- }
+ if (unlikely(!lock->key))
+ return NULL;
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
}
- return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+ return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+ unsigned long can_addr, addr = (unsigned long)lock;
+
+ if (__is_kernel_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (__is_module_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (static_obj(lock))
+ lock->key = (void *)lock;
+ else {
+ /* Debug-check: all keys must be persistent! */
+ debug_locks_off();
+ pr_err("INFO: trying to register non-static key.\n");
+ pr_err("the code is fine but needs lockdep annotation.\n");
+ pr_err("turning off the locking correctness validator.\n");
+ dump_stack();
+ return false;
+ }
+
+ return true;
}
/*
@@ -738,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
- if (likely(!IS_ERR_OR_NULL(class)))
+ if (likely(class))
goto out_set_class_cache;
- /*
- * Debug-check: all keys must be persistent!
- */
- if (IS_ERR(class)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
+ if (!lock->key) {
+ if (!assign_lock_key(lock))
+ return NULL;
+ } else if (!static_obj(lock->key)) {
return NULL;
}
@@ -3272,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
}
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3481,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
}
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+ const struct lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
if (hlock->references) {
- struct lock_class *class = lock->class_cache[0];
+ const struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
@@ -3498,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
- if (IS_ERR_OR_NULL(class))
+ if (!class)
return 0;
/*
@@ -3723,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 1;
}
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3937,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = 0;
@@ -4294,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
- if (!IS_ERR_OR_NULL(class))
+ if (class)
zap_class(class);
}
/*
@@ -4490,6 +4496,7 @@ retry:
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
+ touch_nmi_watchdog();
} while_each_thread(g, p);
pr_warn("\n");
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index f24582d4dad3..6850ffd69125 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -77,10 +77,6 @@ struct lock_stress_stats {
long n_lock_acquired;
};
-int torture_runnable = IS_ENABLED(MODULE);
-module_param(torture_runnable, int, 0444);
-MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
-
/* Forward reference. */
static void lock_torture_cleanup(void);
@@ -130,10 +126,8 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_lock_busted_write_unlock(void)
@@ -179,10 +173,8 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
@@ -352,10 +344,8 @@ static void torture_mutex_delay(struct torture_random_state *trsp)
mdelay(longdelay_ms * 5);
else
mdelay(longdelay_ms / 5);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_mutex_unlock(void) __releases(torture_mutex)
@@ -507,10 +497,8 @@ static void torture_rtmutex_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
@@ -547,10 +535,8 @@ static void torture_rwsem_write_delay(struct torture_random_state *trsp)
mdelay(longdelay_ms * 10);
else
mdelay(longdelay_ms / 10);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_write(void) __releases(torture_rwsem)
@@ -570,14 +556,12 @@ static void torture_rwsem_read_delay(struct torture_random_state *trsp)
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
- (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 2);
else
mdelay(longdelay_ms / 2);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_read(void) __releases(torture_rwsem)
@@ -715,8 +699,7 @@ static void __torture_print_stats(char *page,
{
bool fail = 0;
int i, n_stress;
- long max = 0;
- long min = statp[0].n_lock_acquired;
+ long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
long long sum = 0;
n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
@@ -823,7 +806,7 @@ static void lock_torture_cleanup(void)
* such, only perform the underlying torture-specific cleanups,
* and avoid anything related to locktorture.
*/
- if (!cxt.lwsa)
+ if (!cxt.lwsa && !cxt.lrsa)
goto end;
if (writer_tasks) {
@@ -879,7 +862,7 @@ static int __init lock_torture_init(void)
&percpu_rwsem_lock_ops,
};
- if (!torture_init_begin(torture_type, verbose, &torture_runnable))
+ if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
@@ -898,6 +881,13 @@ static int __init lock_torture_init(void)
firsterr = -EINVAL;
goto unwind;
}
+
+ if (nwriters_stress == 0 && nreaders_stress == 0) {
+ pr_alert("lock-torture: must run at least one locking thread\n");
+ firsterr = -EINVAL;
+ goto unwind;
+ }
+
if (cxt.cur_ops->init)
cxt.cur_ops->init();
@@ -921,17 +911,19 @@ static int __init lock_torture_init(void)
#endif
/* Initialize the statistics so that each run gets its own numbers. */
+ if (nwriters_stress) {
+ lock_is_write_held = 0;
+ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ if (cxt.lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
- lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
- if (cxt.lwsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < cxt.nrealwriters_stress; i++) {
- cxt.lwsa[i].n_lock_fail = 0;
- cxt.lwsa[i].n_lock_acquired = 0;
+ for (i = 0; i < cxt.nrealwriters_stress; i++) {
+ cxt.lwsa[i].n_lock_fail = 0;
+ cxt.lwsa[i].n_lock_acquired = 0;
+ }
}
if (cxt.cur_ops->readlock) {
@@ -948,19 +940,21 @@ static int __init lock_torture_init(void)
cxt.nrealreaders_stress = cxt.nrealwriters_stress;
}
- lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
- if (cxt.lrsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
- firsterr = -ENOMEM;
- kfree(cxt.lwsa);
- cxt.lwsa = NULL;
- goto unwind;
- }
-
- for (i = 0; i < cxt.nrealreaders_stress; i++) {
- cxt.lrsa[i].n_lock_fail = 0;
- cxt.lrsa[i].n_lock_acquired = 0;
+ if (nreaders_stress) {
+ lock_is_read_held = 0;
+ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ if (cxt.lrsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
+ firsterr = -ENOMEM;
+ kfree(cxt.lwsa);
+ cxt.lwsa = NULL;
+ goto unwind;
+ }
+
+ for (i = 0; i < cxt.nrealreaders_stress; i++) {
+ cxt.lrsa[i].n_lock_fail = 0;
+ cxt.lrsa[i].n_lock_acquired = 0;
+ }
}
}
@@ -990,12 +984,14 @@ static int __init lock_torture_init(void)
goto unwind;
}
- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
- GFP_KERNEL);
- if (writer_tasks == NULL) {
- VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
+ if (nwriters_stress) {
+ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+ GFP_KERNEL);
+ if (writer_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
}
if (cxt.cur_ops->readlock) {
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 294294c71ba4..38ece035039e 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -170,7 +170,7 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
@@ -409,13 +409,11 @@ queue:
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
/*
- * The above xchg_tail() is also a load of @lock which generates,
- * through decode_tail(), a pointer.
- *
- * The address dependency matches the RELEASE of xchg_tail()
- * such that the access to @prev must happen after.
+ * The above xchg_tail() is also a load of @lock which
+ * generates, through decode_tail(), a pointer. The address
+ * dependency matches the RELEASE of xchg_tail() such that
+ * the subsequent access to @prev happens after.
*/
- smp_read_barrier_depends();
WRITE_ONCE(prev->next, node);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6f3dba6e4e9e..65cc0cb984e6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
return ret;
}
+static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
+{
+ int ret = try_to_take_rt_mutex(lock, current, NULL);
+
+ /*
+ * try_to_take_rt_mutex() sets the lock waiters bit
+ * unconditionally. Clean this up.
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ return ret;
+}
+
/*
* Slow path try-lock function:
*/
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
- ret = try_to_take_rt_mutex(lock, current, NULL);
-
- /*
- * try_to_take_rt_mutex() sets the lock waiters bit
- * unconditionally. Clean this up.
- */
- fixup_rt_mutex_waiters(lock);
+ ret = __rt_mutex_slowtrylock(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
return rt_mutex_slowtrylock(lock);
}
+int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+ return __rt_mutex_slowtrylock(lock);
+}
+
/**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 124e98ca0b17..68686b3ec3c1 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter);
extern int rt_mutex_futex_trylock(struct rt_mutex *l);
+extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
diff --git a/kernel/module.c b/kernel/module.c
index dea01ac9cb74..1d65b2cc4f80 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2863,6 +2863,15 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
}
#endif /* CONFIG_LIVEPATCH */
+static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
+{
+ if (retpoline_module_ok(get_modinfo(info, "retpoline")))
+ return;
+
+ pr_warn("%s: loading module not compiled with retpoline compiler.\n",
+ mod->name);
+}
+
/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info)
@@ -3029,6 +3038,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
}
+ check_modinfo_retpoline(mod, info);
+
if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
pr_warn("%s: module is from the staging directory, the quality "
@@ -3118,7 +3129,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->ftrace_callsites),
&mod->num_ftrace_callsites);
#endif
-
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+ mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
+ sizeof(*mod->ei_funcs),
+ &mod->num_ei_funcs);
+#endif
mod->extable = section_objs(info, "__ex_table",
sizeof(*mod->extable), &mod->num_exentries);
diff --git a/kernel/padata.c b/kernel/padata.c
index 57c0074d50cc..d568cc56405f 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* padata.c - generic interface to process data streams in parallel
*
diff --git a/kernel/pid.c b/kernel/pid.c
index b13b624e2c49..5d30c87e3c42 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -41,7 +41,19 @@
#include <linux/sched/task.h>
#include <linux/idr.h>
-struct pid init_struct_pid = INIT_STRUCT_PID;
+struct pid init_struct_pid = {
+ .count = ATOMIC_INIT(1),
+ .tasks = {
+ { .first = NULL },
+ { .first = NULL },
+ { .first = NULL },
+ },
+ .level = 0,
+ .numbers = { {
+ .nr = 0,
+ .ns = &init_pid_ns,
+ }, }
+};
int pid_max = PID_MAX_DEFAULT;
@@ -193,10 +205,8 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}
if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns)) {
- disable_pid_allocation(ns);
+ if (pid_ns_prepare_proc(ns))
goto out_free;
- }
}
get_pid_ns(ns);
@@ -226,6 +236,10 @@ out_free:
while (++i <= ns->level)
idr_remove(&ns->idr, (pid->numbers + i)->nr);
+ /* On failure to allocate the first pid, reset the state */
+ if (ns->pid_allocated == PIDNS_ADDING)
+ idr_set_cursor(&ns->idr, 0);
+
spin_unlock_irq(&pidmap_lock);
kmem_cache_free(ns->pid_cachep, pid);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3a2ca9066583..705c2366dafe 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -22,6 +22,35 @@ DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
+void lock_system_sleep(void)
+{
+ current->flags |= PF_FREEZER_SKIP;
+ mutex_lock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(lock_system_sleep);
+
+void unlock_system_sleep(void)
+{
+ /*
+ * Don't use freezer_count() because we don't want the call to
+ * try_to_freeze() here.
+ *
+ * Reason:
+ * Fundamentally, we just don't need it, because freezing condition
+ * doesn't come into effect until we release the pm_mutex lock,
+ * since the freezer always works with pm_mutex held.
+ *
+ * More importantly, in the case of hibernation,
+ * unlock_system_sleep() gets called in snapshot_read() and
+ * snapshot_write() when the freezing condition is still in effect.
+ * Which means, if we use try_to_freeze() here, it would make them
+ * enter the refrigerator, thus causing hibernation to lockup.
+ */
+ current->flags &= ~PF_FREEZER_SKIP;
+ mutex_unlock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(unlock_system_sleep);
+
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index bce0464524d8..3d37c279c090 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1645,8 +1645,7 @@ static unsigned long free_unnecessary_pages(void)
* [number of saveable pages] - [number of pages that can be freed in theory]
*
* where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
- * minus mapped file pages.
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
*/
static unsigned long minimum_image_size(unsigned long saveable)
{
@@ -1656,8 +1655,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
+ global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE)
- + global_node_page_state(NR_INACTIVE_FILE)
- - global_node_page_state(NR_FILE_MAPPED);
+ + global_node_page_state(NR_INACTIVE_FILE);
return saveable <= size ? 0 : saveable - size;
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 293ead59eccc..11b4282c2d20 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -240,7 +240,7 @@ static void hib_init_batch(struct hib_bio_batch *hb)
static void hib_end_io(struct bio *bio)
{
struct hib_bio_batch *hb = bio->bi_private;
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
if (bio->bi_status) {
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
@@ -879,7 +879,7 @@ out_clean:
* space avaiable from the resume partition.
*/
-static int enough_swap(unsigned int nr_pages, unsigned int flags)
+static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
unsigned int required;
@@ -915,7 +915,7 @@ int swsusp_write(unsigned int flags)
return error;
}
if (flags & SF_NOCOMPRESS_MODE) {
- if (!enough_swap(pages, flags)) {
+ if (!enough_swap(pages)) {
pr_err("Not enough free swap\n");
error = -ENOSPC;
goto out_finish;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b9006617710f..c2e713f6ae2e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -920,10 +920,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
return ret;
}
-static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
+static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
{
struct devkmsg_user *user = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
if (!user)
return POLLERR|POLLNVAL;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 84b1367935e4..5e1d713c8e61 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -659,7 +659,7 @@ static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
if (lock_task_sighand(child, &flags)) {
error = -EINVAL;
if (likely(child->last_siginfo != NULL)) {
- *info = *child->last_siginfo;
+ copy_siginfo(info, child->last_siginfo);
error = 0;
}
unlock_task_sighand(child, &flags);
@@ -675,7 +675,7 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
if (lock_task_sighand(child, &flags)) {
error = -EINVAL;
if (likely(child->last_siginfo != NULL)) {
- *child->last_siginfo = *info;
+ copy_siginfo(child->last_siginfo, info);
error = 0;
}
unlock_task_sighand(child, &flags);
@@ -1092,6 +1092,10 @@ int ptrace_request(struct task_struct *child, long request,
ret = seccomp_get_filter(child, addr, datavp);
break;
+ case PTRACE_SECCOMP_GET_METADATA:
+ ret = seccomp_get_metadata(child, addr, datavp);
+ break;
+
default:
break;
}
@@ -1226,7 +1230,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
break;
case PTRACE_SETSIGINFO:
- memset(&siginfo, 0, sizeof siginfo);
if (copy_siginfo_from_user32(
&siginfo, (struct compat_siginfo __user *) datap))
ret = -EFAULT;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 59c471de342a..6334f2c1abd0 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -30,31 +30,8 @@
#define RCU_TRACE(stmt)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
-/*
- * Process-level increment to ->dynticks_nesting field. This allows for
- * architectures that use half-interrupts and half-exceptions from
- * process context.
- *
- * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
- * that counts the number of process-based reasons why RCU cannot
- * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
- * is the value used to increment or decrement this field.
- *
- * The rest of the bits could in principle be used to count interrupts,
- * but this would mean that a negative-one value in the interrupt
- * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
- * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
- * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
- * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
- * initial exit from idle.
- */
-#define DYNTICK_TASK_NEST_WIDTH 7
-#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
-#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
-#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
-#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
-#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
- DYNTICK_TASK_FLAG)
+/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
/*
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 1f87a02c3399..d1ebdf9868bb 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -106,10 +106,6 @@ static int rcu_perf_writer_state;
#define MAX_MEAS 10000
#define MIN_MEAS 100
-static int perf_runnable = IS_ENABLED(MODULE);
-module_param(perf_runnable, int, 0444);
-MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
-
/*
* Operations vector for selecting different types of tests.
*/
@@ -646,7 +642,7 @@ rcu_perf_init(void)
&tasks_ops,
};
- if (!torture_init_begin(perf_type, verbose, &perf_runnable))
+ if (!torture_init_begin(perf_type, verbose))
return -EBUSY;
/* Process args and tell the world that the perf'er is on the job. */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 74f6b0146b98..308e6fdbced8 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -187,10 +187,6 @@ static const char *rcu_torture_writer_state_getname(void)
return rcu_torture_writer_state_names[i];
}
-static int torture_runnable = IS_ENABLED(MODULE);
-module_param(torture_runnable, int, 0444);
-MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
-
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
@@ -315,11 +311,9 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
}
if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!preempt_count() &&
- !(torture_random(rrsp) % (nrealreaders * 20000)))
- preempt_schedule(); /* No QS if preempt_disable() in effect */
-#endif
+ !(torture_random(rrsp) % (nrealreaders * 500)))
+ torture_preempt_schedule(); /* QS only if preemptible. */
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -1731,7 +1725,7 @@ rcu_torture_init(void)
&sched_ops, &tasks_ops,
};
- if (!torture_init_begin(torture_type, verbose, &torture_runnable))
+ if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6d5880089ff6..d5cea81378cc 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -53,6 +53,33 @@ static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
static void process_srcu(struct work_struct *work);
+/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
+#define spin_lock_rcu_node(p) \
+do { \
+ spin_lock(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
+
+#define spin_lock_irq_rcu_node(p) \
+do { \
+ spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_irq_rcu_node(p) \
+ spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
+
+#define spin_lock_irqsave_rcu_node(p, flags) \
+do { \
+ spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_irqrestore_rcu_node(p, flags) \
+ spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
+
/*
* Initialize SRCU combining tree. Note that statically allocated
* srcu_struct structures might already have srcu_read_lock() and
@@ -77,7 +104,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
/* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) {
- raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -111,7 +138,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp_first = sp->level[level];
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -170,7 +197,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
- raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -187,7 +214,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
*/
int init_srcu_struct(struct srcu_struct *sp)
{
- raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -210,13 +237,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
return;
}
init_srcu_struct_fields(sp, true);
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -513,7 +540,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_lock(&sp->srcu_cb_mutex);
/* End the current grace period. */
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
@@ -522,7 +549,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq;
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
@@ -530,7 +557,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
- raw_spin_lock_irq_rcu_node(snp);
+ spin_lock_irq_rcu_node(snp);
cbs = false;
if (snp >= sp->level[rcu_num_lvls - 1])
cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -540,7 +567,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_gp_seq_needed_exp = gpseq;
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
- raw_spin_unlock_irq_rcu_node(snp);
+ spin_unlock_irq_rcu_node(snp);
if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
@@ -548,11 +575,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
if (!(gpseq & counter_wrap_check))
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_irqsave_rcu_node(sdp, flags);
+ spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
- raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
}
}
@@ -560,17 +587,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_unlock(&sp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
srcu_gp_start(sp);
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
/* Throttle expedited grace periods: Should be rare! */
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
? 0 : SRCU_INTERVAL);
} else {
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
}
}
@@ -590,18 +617,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
- raw_spin_lock_irqsave_rcu_node(snp, flags);
+ spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
return;
}
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
}
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
sp->srcu_gp_seq_needed_exp = s;
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -623,12 +650,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
- raw_spin_lock_irqsave_rcu_node(snp, flags);
+ spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
snp_seq = snp->srcu_have_cbs[idx];
if (snp == sdp->mynode && snp_seq == s)
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
if (snp == sdp->mynode && snp_seq != s) {
srcu_schedule_cbs_sdp(sdp, do_norm
? SRCU_INTERVAL
@@ -644,11 +671,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
snp->srcu_gp_seq_needed_exp = s;
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
}
/* Top of tree, must ensure the grace period will be started. */
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
@@ -667,7 +694,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
queue_delayed_work(system_power_efficient_wq, &sp->work,
srcu_get_delay(sp));
}
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -830,7 +857,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda);
- raw_spin_lock_rcu_node(sdp);
+ spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
@@ -844,7 +871,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
- raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm);
else if (needexp)
@@ -900,7 +927,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/*
* Make sure that later code is ordered after the SRCU grace
- * period. This pairs with the raw_spin_lock_irq_rcu_node()
+ * period. This pairs with the spin_lock_irq_rcu_node()
* in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
* because the current CPU might have been totally uninvolved with
* (and thus unordered against) that grace period.
@@ -1024,7 +1051,7 @@ void srcu_barrier(struct srcu_struct *sp)
*/
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
atomic_inc(&sp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
@@ -1033,7 +1060,7 @@ void srcu_barrier(struct srcu_struct *sp)
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt);
}
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
@@ -1082,17 +1109,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
*/
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
return;
}
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
srcu_gp_start(sp);
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
@@ -1141,19 +1168,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp;
rcu_cblist_init(&ready_cbs);
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
return; /* Someone else on the job or nothing to do. */
}
/* We are on the job! Extract and invoke ready callbacks. */
sdp->srcu_cblist_invoking = true;
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
@@ -1166,13 +1193,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&sp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}
@@ -1185,7 +1212,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
bool pushgp = true;
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
@@ -1195,7 +1222,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
/* Outstanding request and no GP. Start one. */
srcu_gp_start(sp);
}
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
if (pushgp)
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f9c0ca2ccf0c..491bdf39f276 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -265,25 +265,12 @@ void rcu_bh_qs(void)
#endif
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+ .dynticks_nesting = 1,
+ .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
};
/*
- * There's a few places, currently just in the tracing infrastructure,
- * that uses rcu_irq_enter() to make sure RCU is watching. But there's
- * a small location where that will not even work. In those cases
- * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
- * can be called.
- */
-static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
-
-bool rcu_irq_enter_disabled(void)
-{
- return this_cpu_read(disable_rcu_irq_enter);
-}
-
-/*
* Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state.
*/
@@ -762,68 +749,39 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
+ * Enter an RCU extended quiescent state, which can be either the
+ * idle loop or adaptive-tickless usermode execution.
*
- * Enter idle, doing appropriate accounting. The caller must have
- * disabled interrupts.
+ * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
+ * the possibility of usermode upcalls having messed up our count
+ * of interrupt nesting level during the prior busy period.
*/
-static void rcu_eqs_enter_common(bool user)
+static void rcu_eqs_enter(bool user)
{
struct rcu_state *rsp;
struct rcu_data *rdp;
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_dynticks *rdtp;
- lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
- if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- !user && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused =
- idle_task(smp_processor_id());
-
- trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
- rcu_ftrace_dump(DUMP_ORIG);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ rdtp->dynticks_nesting == 0);
+ if (rdtp->dynticks_nesting != 1) {
+ rdtp->dynticks_nesting--;
+ return;
}
+
+ lockdep_assert_irqs_disabled();
+ trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
do_nocb_deferred_wakeup(rdp);
}
rcu_prepare_for_idle();
- __this_cpu_inc(disable_rcu_irq_enter);
- rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
- rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
- __this_cpu_dec(disable_rcu_irq_enter);
+ WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
+ rcu_dynticks_eqs_enter();
rcu_dynticks_task_enter();
-
- /*
- * It is illegal to enter an extended quiescent state while
- * in an RCU read-side critical section.
- */
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
- "Illegal idle entry in RCU read-side critical section.");
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
- "Illegal idle entry in RCU-bh read-side critical section.");
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
- "Illegal idle entry in RCU-sched read-side critical section.");
-}
-
-/*
- * Enter an RCU extended quiescent state, which can be either the
- * idle loop or adaptive-tickless usermode execution.
- */
-static void rcu_eqs_enter(bool user)
-{
- struct rcu_dynticks *rdtp;
-
- rdtp = this_cpu_ptr(&rcu_dynticks);
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
- if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
- rcu_eqs_enter_common(user);
- else
- rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
}
/**
@@ -834,10 +792,6 @@ static void rcu_eqs_enter(bool user)
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
- * We crowbar the ->dynticks_nesting field to zero to allow for
- * the possibility of usermode upcalls having messed up our count
- * of interrupt nesting level during the prior busy period.
- *
* If you add or remove a call to rcu_idle_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
@@ -867,6 +821,46 @@ void rcu_user_enter(void)
#endif /* CONFIG_NO_HZ_FULL */
/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
+ * If we are returning from the outermost NMI handler that interrupted an
+ * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * to let the RCU grace-period handling know that the CPU is back to
+ * being RCU-idle.
+ *
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
+ */
+void rcu_nmi_exit(void)
+{
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /*
+ * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+ * (We are exiting an NMI handler, so RCU better be paying attention
+ * to us!)
+ */
+ WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
+
+ /*
+ * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+ * leave it in non-RCU-idle state.
+ */
+ if (rdtp->dynticks_nmi_nesting != 1) {
+ trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
+ rdtp->dynticks_nmi_nesting - 2);
+ return;
+ }
+
+ /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+ trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ rcu_dynticks_eqs_enter();
+}
+
+/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
@@ -875,8 +869,8 @@ void rcu_user_enter(void)
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
- * architecture violates this assumption, RCU will give you what you
- * deserve, good and hard. But very infrequently and irreproducibly.
+ * architecture's idle loop violates this assumption, RCU will give you what
+ * you deserve, good and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
@@ -887,23 +881,14 @@ void rcu_user_enter(void)
*/
void rcu_irq_exit(void)
{
- struct rcu_dynticks *rdtp;
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
lockdep_assert_irqs_disabled();
- rdtp = this_cpu_ptr(&rcu_dynticks);
-
- /* Page faults can happen in NMI handlers, so check... */
- if (rdtp->dynticks_nmi_nesting)
- return;
-
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- rdtp->dynticks_nesting < 1);
- if (rdtp->dynticks_nesting <= 1) {
- rcu_eqs_enter_common(true);
- } else {
- trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
- rdtp->dynticks_nesting--;
- }
+ if (rdtp->dynticks_nmi_nesting == 1)
+ rcu_prepare_for_idle();
+ rcu_nmi_exit();
+ if (rdtp->dynticks_nmi_nesting == 0)
+ rcu_dynticks_task_enter();
}
/*
@@ -922,55 +907,33 @@ void rcu_irq_exit_irqson(void)
}
/*
- * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
- *
- * If the new value of the ->dynticks_nesting counter was previously zero,
- * we really have exited idle, and must do the appropriate accounting.
- * The caller must have disabled interrupts.
- */
-static void rcu_eqs_exit_common(long long oldval, int user)
-{
- RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
-
- rcu_dynticks_task_exit();
- rcu_dynticks_eqs_exit();
- rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
- if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- !user && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused =
- idle_task(smp_processor_id());
-
- trace_rcu_dyntick(TPS("Error on exit: not idle task"),
- oldval, rdtp->dynticks_nesting);
- rcu_ftrace_dump(DUMP_ORIG);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
- }
-}
-
-/*
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
+ *
+ * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
+ * allow for the possibility of usermode upcalls messing up our count of
+ * interrupt nesting level during the busy period that is just now starting.
*/
static void rcu_eqs_exit(bool user)
{
struct rcu_dynticks *rdtp;
- long long oldval;
+ long oldval;
lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
- if (oldval & DYNTICK_TASK_NEST_MASK) {
- rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
- } else {
- __this_cpu_inc(disable_rcu_irq_enter);
- rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
- rcu_eqs_exit_common(oldval, user);
- __this_cpu_dec(disable_rcu_irq_enter);
+ if (oldval) {
+ rdtp->dynticks_nesting++;
+ return;
}
+ rcu_dynticks_task_exit();
+ rcu_dynticks_eqs_exit();
+ rcu_cleanup_after_idle();
+ trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ WRITE_ONCE(rdtp->dynticks_nesting, 1);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
}
/**
@@ -979,11 +942,6 @@ static void rcu_eqs_exit(bool user)
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
- * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
- * allow for the possibility of usermode upcalls messing up our count
- * of interrupt nesting level during the busy period that is just
- * now starting.
- *
* If you add or remove a call to rcu_idle_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
@@ -1013,65 +971,6 @@ void rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */
/**
- * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
- *
- * Enter an interrupt handler, which might possibly result in exiting
- * idle mode, in other words, entering the mode in which read-side critical
- * sections can occur. The caller must have disabled interrupts.
- *
- * Note that the Linux kernel is fully capable of entering an interrupt
- * handler that it never exits, for example when doing upcalls to
- * user mode! This code assumes that the idle loop never does upcalls to
- * user mode. If your architecture does do upcalls from the idle loop (or
- * does anything else that results in unbalanced calls to the irq_enter()
- * and irq_exit() functions), RCU will give you what you deserve, good
- * and hard. But very infrequently and irreproducibly.
- *
- * Use things like work queues to work around this limitation.
- *
- * You have been warned.
- *
- * If you add or remove a call to rcu_irq_enter(), be sure to test with
- * CONFIG_RCU_EQS_DEBUG=y.
- */
-void rcu_irq_enter(void)
-{
- struct rcu_dynticks *rdtp;
- long long oldval;
-
- lockdep_assert_irqs_disabled();
- rdtp = this_cpu_ptr(&rcu_dynticks);
-
- /* Page faults can happen in NMI handlers, so check... */
- if (rdtp->dynticks_nmi_nesting)
- return;
-
- oldval = rdtp->dynticks_nesting;
- rdtp->dynticks_nesting++;
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- rdtp->dynticks_nesting == 0);
- if (oldval)
- trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
- else
- rcu_eqs_exit_common(oldval, true);
-}
-
-/*
- * Wrapper for rcu_irq_enter() where interrupts are enabled.
- *
- * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
- */
-void rcu_irq_enter_irqson(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- rcu_irq_enter();
- local_irq_restore(flags);
-}
-
-/**
* rcu_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
@@ -1086,7 +985,7 @@ void rcu_irq_enter_irqson(void)
void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int incby = 2;
+ long incby = 2;
/* Complain about underflow. */
WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
@@ -1103,45 +1002,61 @@ void rcu_nmi_enter(void)
rcu_dynticks_eqs_exit();
incby = 1;
}
- rdtp->dynticks_nmi_nesting += incby;
+ trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
+ rdtp->dynticks_nmi_nesting,
+ rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
+ rdtp->dynticks_nmi_nesting + incby);
barrier();
}
/**
- * rcu_nmi_exit - inform RCU of exit from NMI context
+ * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
- * If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
- * to let the RCU grace-period handling know that the CPU is back to
- * being RCU-idle.
+ * Enter an interrupt handler, which might possibly result in exiting
+ * idle mode, in other words, entering the mode in which read-side critical
+ * sections can occur. The caller must have disabled interrupts.
*
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
+ * Note that the Linux kernel is fully capable of entering an interrupt
+ * handler that it never exits, for example when doing upcalls to user mode!
+ * This code assumes that the idle loop never does upcalls to user mode.
+ * If your architecture's idle loop does do upcalls to user mode (or does
+ * anything else that results in unbalanced calls to the irq_enter() and
+ * irq_exit() functions), RCU will give you what you deserve, good and hard.
+ * But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ *
+ * If you add or remove a call to rcu_irq_enter(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
-void rcu_nmi_exit(void)
+void rcu_irq_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- /*
- * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
- * (We are exiting an NMI handler, so RCU better be paying attention
- * to us!)
- */
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
- WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
+ lockdep_assert_irqs_disabled();
+ if (rdtp->dynticks_nmi_nesting == 0)
+ rcu_dynticks_task_exit();
+ rcu_nmi_enter();
+ if (rdtp->dynticks_nmi_nesting == 1)
+ rcu_cleanup_after_idle();
+}
- /*
- * If the nesting level is not 1, the CPU wasn't RCU-idle, so
- * leave it in non-RCU-idle state.
- */
- if (rdtp->dynticks_nmi_nesting != 1) {
- rdtp->dynticks_nmi_nesting -= 2;
- return;
- }
+/*
+ * Wrapper for rcu_irq_enter() where interrupts are enabled.
+ *
+ * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
+ */
+void rcu_irq_enter_irqson(void)
+{
+ unsigned long flags;
- /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- rdtp->dynticks_nmi_nesting = 0;
- rcu_dynticks_eqs_enter();
+ local_irq_save(flags);
+ rcu_irq_enter();
+ local_irq_restore(flags);
}
/**
@@ -1233,7 +1148,8 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
*/
static int rcu_is_cpu_rrupt_from_idle(void)
{
- return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
+ return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
+ __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
}
/*
@@ -2789,6 +2705,11 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
rdp->n_force_qs_snap = rsp->n_force_qs;
} else if (count < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = count;
+
+ /*
+ * The following usually indicates a double call_rcu(). To track
+ * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
+ */
WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
local_irq_restore(flags);
@@ -3723,7 +3644,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
- WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
+ WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
rdp->cpu = cpu;
rdp->rsp = rsp;
@@ -3752,7 +3673,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
!init_nocb_callback_list(rdp))
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
- rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+ rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */
rcu_dynticks_eqs_online();
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 46a5d1991450..6488a3b0e729 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -38,9 +38,8 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- long long dynticks_nesting; /* Track irq/process nesting level. */
- /* Process level is worth LLONG_MAX/2. */
- int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index db85ca3975f1..fb88a028deec 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -61,7 +61,6 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
@@ -1687,7 +1686,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
- pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1752,7 +1751,6 @@ static void increment_cpu_stall_ticks(void)
static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
- have_rcu_nocb_mask = true;
cpulist_parse(str, rcu_nocb_mask);
return 1;
}
@@ -1801,7 +1799,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
/* Is the specified CPU a no-CBs CPU? */
bool rcu_is_nocb_cpu(int cpu)
{
- if (have_rcu_nocb_mask)
+ if (cpumask_available(rcu_nocb_mask))
return cpumask_test_cpu(cpu, rcu_nocb_mask);
return false;
}
@@ -2295,14 +2293,13 @@ void __init rcu_init_nohz(void)
need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
- if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
+ if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
return;
}
- have_rcu_nocb_mask = true;
}
- if (!have_rcu_nocb_mask)
+ if (!cpumask_available(rcu_nocb_mask))
return;
#if defined(CONFIG_NO_HZ_FULL)
@@ -2428,7 +2425,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
struct rcu_data *rdp_prev = NULL;
- if (!have_rcu_nocb_mask)
+ if (!cpumask_available(rcu_nocb_mask))
return;
if (ls == -1) {
ls = int_sqrt(nr_cpu_ids);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index fbd56d6e575b..68fa19a5e7bd 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -422,11 +422,13 @@ void init_rcu_head(struct rcu_head *head)
{
debug_object_init(head, &rcuhead_debug_descr);
}
+EXPORT_SYMBOL_GPL(init_rcu_head);
void destroy_rcu_head(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
+EXPORT_SYMBOL_GPL(destroy_rcu_head);
static bool rcuhead_is_static_object(void *addr)
{
diff --git a/kernel/relay.c b/kernel/relay.c
index 39a9dfc69486..41280033a4c5 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -919,9 +919,9 @@ static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
*
* Poll implemention.
*/
-static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
+static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct rchan_buf *buf = filp->private_data;
if (buf->finalized)
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 2ddaec40956f..0926aef10dad 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -34,11 +34,6 @@ void complete(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
- /*
- * Perform commit of crossrelease here.
- */
- complete_release_commit(x);
-
if (x->done != UINT_MAX)
x->done++;
__wake_up_locked(&x->wait, TASK_NORMAL, 1);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 644fa2e3d993..3da7a2444a91 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -508,7 +508,8 @@ void resched_cpu(int cpu)
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
- resched_curr(rq);
+ if (cpu_online(cpu) || cpu == smp_processor_id())
+ resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -2045,7 +2046,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in finish_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2056,7 +2057,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
p->state = TASK_WAKING;
if (p->in_iowait) {
- delayacct_blkio_end();
+ delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
@@ -2069,7 +2070,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
#else /* CONFIG_SMP */
if (p->in_iowait) {
- delayacct_blkio_end();
+ delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
@@ -2122,7 +2123,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
if (!task_on_rq_queued(p)) {
if (p->in_iowait) {
- delayacct_blkio_end();
+ delayacct_blkio_end(p);
atomic_dec(&rq->nr_iowait);
}
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
@@ -2571,6 +2572,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void prepare_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void finish_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2636,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ prepare_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2691,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * finish_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2708,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ finish_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -4040,8 +4086,7 @@ recheck:
return -EINVAL;
}
- if (attr->sched_flags &
- ~(SCHED_FLAG_RESET_ON_FORK | SCHED_FLAG_RECLAIM))
+ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
return -EINVAL;
/*
@@ -4108,6 +4153,9 @@ recheck:
}
if (user) {
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return -EINVAL;
+
retval = security_task_setscheduler(p);
if (retval)
return retval;
@@ -4163,7 +4211,8 @@ change:
}
#endif
#ifdef CONFIG_SMP
- if (dl_bandwidth_enabled() && dl_policy(policy)) {
+ if (dl_bandwidth_enabled() && dl_policy(policy) &&
+ !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
cpumask_t *span = rq->rd->span;
/*
@@ -4293,6 +4342,11 @@ int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
}
EXPORT_SYMBOL_GPL(sched_setattr);
+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, false, true);
+}
+
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d6717a3331a1..dd062a1c8cf0 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -60,7 +60,8 @@ struct sugov_cpu {
u64 last_update;
/* The fields below are only needed when sharing a policy. */
- unsigned long util;
+ unsigned long util_cfs;
+ unsigned long util_dl;
unsigned long max;
unsigned int flags;
@@ -176,21 +177,28 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
+static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
- struct rq *rq = cpu_rq(cpu);
- unsigned long cfs_max;
+ struct rq *rq = cpu_rq(sg_cpu->cpu);
- cfs_max = arch_scale_cpu_capacity(NULL, cpu);
+ sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->util_cfs = cpu_util_cfs(rq);
+ sg_cpu->util_dl = cpu_util_dl(rq);
+}
- *util = min(rq->cfs.avg.util_avg, cfs_max);
- *max = cfs_max;
+static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
+{
+ /*
+ * Ideally we would like to set util_dl as min/guaranteed freq and
+ * util_cfs + util_dl as requested freq. However, cpufreq is not yet
+ * ready for such an interface. So, we only do the latter for now.
+ */
+ return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max);
}
-static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
- unsigned int flags)
+static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
{
- if (flags & SCHED_CPUFREQ_IOWAIT) {
+ if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) {
if (sg_cpu->iowait_boost_pending)
return;
@@ -264,7 +272,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int next_f;
bool busy;
- sugov_set_iowait_boost(sg_cpu, time, flags);
+ sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;
if (!sugov_should_update_freq(sg_policy, time))
@@ -272,10 +280,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
- if (flags & SCHED_CPUFREQ_RT_DL) {
+ if (flags & SCHED_CPUFREQ_RT) {
next_f = policy->cpuinfo.max_freq;
} else {
- sugov_get_util(&util, &max, sg_cpu->cpu);
+ sugov_get_util(sg_cpu);
+ max = sg_cpu->max;
+ util = sugov_aggregate_util(sg_cpu);
sugov_iowait_boost(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
@@ -305,23 +315,27 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
s64 delta_ns;
/*
- * If the CPU utilization was last updated before the previous
- * frequency update and the time elapsed between the last update
- * of the CPU utilization and the last frequency update is long
- * enough, don't take the CPU into account as it probably is
- * idle now (and clear iowait_boost for it).
+ * If the CFS CPU utilization was last updated before the
+ * previous frequency update and the time elapsed between the
+ * last update of the CPU utilization and the last frequency
+ * update is long enough, reset iowait_boost and util_cfs, as
+ * they are now probably stale. However, still consider the
+ * CPU contribution if it has some DEADLINE utilization
+ * (util_dl).
*/
delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
j_sg_cpu->iowait_boost_pending = false;
- continue;
+ j_sg_cpu->util_cfs = 0;
+ if (j_sg_cpu->util_dl == 0)
+ continue;
}
- if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
+ if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
return policy->cpuinfo.max_freq;
- j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
+ j_util = sugov_aggregate_util(j_sg_cpu);
if (j_util * max > j_max * util) {
util = j_util;
max = j_max;
@@ -338,22 +352,18 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long util, max;
unsigned int next_f;
- sugov_get_util(&util, &max, sg_cpu->cpu);
-
raw_spin_lock(&sg_policy->update_lock);
- sg_cpu->util = util;
- sg_cpu->max = max;
+ sugov_get_util(sg_cpu);
sg_cpu->flags = flags;
- sugov_set_iowait_boost(sg_cpu, time, flags);
+ sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) {
- if (flags & SCHED_CPUFREQ_RT_DL)
+ if (flags & SCHED_CPUFREQ_RT)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
next_f = sugov_next_freq_shared(sg_cpu, time);
@@ -383,9 +393,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
/*
- * For RT and deadline tasks, the schedutil governor shoots the
- * frequency to maximum. Special care must be taken to ensure that this
- * kthread doesn't result in the same behavior.
+ * For RT tasks, the schedutil governor shoots the frequency to maximum.
+ * Special care must be taken to ensure that this kthread doesn't result
+ * in the same behavior.
*
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
* updated only at the end of the sugov_work() function and before that
@@ -470,7 +480,20 @@ static void sugov_policy_free(struct sugov_policy *sg_policy)
static int sugov_kthread_create(struct sugov_policy *sg_policy)
{
struct task_struct *thread;
- struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ .sched_policy = SCHED_DEADLINE,
+ .sched_flags = SCHED_FLAG_SUGOV,
+ .sched_nice = 0,
+ .sched_priority = 0,
+ /*
+ * Fake (unused) bandwidth; workaround to "fix"
+ * priority inheritance.
+ */
+ .sched_runtime = 1000000,
+ .sched_deadline = 10000000,
+ .sched_period = 10000000,
+ };
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
@@ -488,10 +511,10 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
return PTR_ERR(thread);
}
- ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
+ ret = sched_setattr_nocheck(thread, &attr);
if (ret) {
kthread_stop(thread);
- pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
return ret;
}
@@ -655,7 +678,7 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
- sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->flags = 0;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2473736c7616..9bb0e0c412ec 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -78,7 +78,7 @@ static inline int dl_bw_cpus(int i)
#endif
static inline
-void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
@@ -86,10 +86,12 @@ void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+ /* kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
}
static inline
-void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
@@ -98,10 +100,12 @@ void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
+ /* kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
}
static inline
-void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
@@ -111,7 +115,7 @@ void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
}
static inline
-void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
@@ -123,16 +127,46 @@ void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
+static inline
+void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __add_rq_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __sub_rq_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __add_running_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __sub_running_bw(dl_se->dl_bw, dl_rq);
+}
+
void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
+ BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
+
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
@@ -144,8 +178,8 @@ void dl_change_utilization(struct task_struct *p, u64 new_bw)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- add_rq_bw(new_bw, &rq->dl);
+ __sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ __add_rq_bw(new_bw, &rq->dl);
}
/*
@@ -217,6 +251,9 @@ static void task_non_contending(struct task_struct *p)
if (dl_se->dl_runtime == 0)
return;
+ if (dl_entity_is_special(dl_se))
+ return;
+
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
@@ -236,12 +273,12 @@ static void task_non_contending(struct task_struct *p)
*/
if (zerolag_time < 0) {
if (dl_task(p))
- sub_running_bw(dl_se->dl_bw, dl_rq);
+ sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD)
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
@@ -268,7 +305,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
return;
if (flags & ENQUEUE_MIGRATED)
- add_rq_bw(dl_se->dl_bw, dl_rq);
+ add_rq_bw(dl_se, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
@@ -289,7 +326,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
* when the "inactive timer" fired).
* So, add it back.
*/
- add_running_bw(dl_se->dl_bw, dl_rq);
+ add_running_bw(dl_se, dl_rq);
}
}
@@ -1114,7 +1151,8 @@ static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
- u64 delta_exec;
+ u64 delta_exec, scaled_delta_exec;
+ int cpu = cpu_of(rq);
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
@@ -1134,9 +1172,6 @@ static void update_curr_dl(struct rq *rq)
return;
}
- /* kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
-
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
@@ -1148,13 +1183,39 @@ static void update_curr_dl(struct rq *rq)
sched_rt_avg_update(rq, delta_exec);
- if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
- delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
- dl_se->runtime -= delta_exec;
+ if (dl_entity_is_special(dl_se))
+ return;
+
+ /*
+ * For tasks that participate in GRUB, we implement GRUB-PA: the
+ * spare reclaimed bandwidth is used to clock down frequency.
+ *
+ * For the others, we still need to scale reservation parameters
+ * according to current frequency and CPU maximum capacity.
+ */
+ if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
+ scaled_delta_exec = grub_reclaim(delta_exec,
+ rq,
+ &curr->dl);
+ } else {
+ unsigned long scale_freq = arch_scale_freq_capacity(cpu);
+ unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+ scaled_delta_exec = cap_scale(delta_exec, scale_freq);
+ scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
+ }
+
+ dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
+
+ /* If requested, inform the user about runtime overruns. */
+ if (dl_runtime_exceeded(dl_se) &&
+ (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
+ dl_se->dl_overrun = 1;
+
__dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
@@ -1204,8 +1265,8 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
- sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
+ sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
+ sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
}
@@ -1222,7 +1283,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
sched_clock_tick();
update_rq_clock(rq);
- sub_running_bw(dl_se->dl_bw, &rq->dl);
+ sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
@@ -1416,8 +1477,8 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
dl_check_constrained_dl(&p->dl);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
- add_running_bw(p->dl.dl_bw, &rq->dl);
+ add_rq_bw(&p->dl, &rq->dl);
+ add_running_bw(&p->dl, &rq->dl);
}
/*
@@ -1457,8 +1518,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
__dequeue_task_dl(rq, p, flags);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
}
/*
@@ -1564,7 +1625,7 @@ static void migrate_task_rq_dl(struct task_struct *p)
*/
raw_spin_lock(&rq->lock);
if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
@@ -1576,7 +1637,7 @@ static void migrate_task_rq_dl(struct task_struct *p)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
raw_spin_unlock(&rq->lock);
}
@@ -2019,11 +2080,11 @@ retry:
}
deactivate_task(rq, next_task, 0);
- sub_running_bw(next_task->dl.dl_bw, &rq->dl);
- sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
+ sub_running_bw(&next_task->dl, &rq->dl);
+ sub_rq_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
- add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
- add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
+ add_rq_bw(&next_task->dl, &later_rq->dl);
+ add_running_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, 0);
ret = 1;
@@ -2111,11 +2172,11 @@ static void pull_dl_task(struct rq *this_rq)
resched = true;
deactivate_task(src_rq, p, 0);
- sub_running_bw(p->dl.dl_bw, &src_rq->dl);
- sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
+ sub_running_bw(&p->dl, &src_rq->dl);
+ sub_rq_bw(&p->dl, &src_rq->dl);
set_task_cpu(p, this_cpu);
- add_rq_bw(p->dl.dl_bw, &this_rq->dl);
- add_running_bw(p->dl.dl_bw, &this_rq->dl);
+ add_rq_bw(&p->dl, &this_rq->dl);
+ add_running_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
@@ -2224,7 +2285,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
task_non_contending(p);
if (!task_on_rq_queued(p))
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
@@ -2256,7 +2317,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
+ add_rq_bw(&p->dl, &rq->dl);
return;
}
@@ -2435,6 +2496,9 @@ int sched_dl_overflow(struct task_struct *p, int policy,
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return 0;
+
/* !deadline task may carry old deadline bandwidth */
if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
return 0;
@@ -2521,6 +2585,10 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
+ /* special dl tasks don't actually use any parameter */
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return true;
+
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
@@ -2566,6 +2634,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
+ dl_se->dl_overrun = 0;
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2fe3aa853e4d..7b6535987500 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3020,9 +3020,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
- * a real problem -- added to that it only calls on the local
- * CPU, so if we enqueue remotely we'll miss an update, but
- * the next tick/schedule should update.
+ * a real problem.
*
* It will not get called when we go idle, because the idle
* thread is a different class (!fair), nor will the utilization
@@ -3091,8 +3089,6 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
return c1 + c2 + c3;
}
-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-
/*
* Accumulate the three separate parts of the sum; d1 the remainder
* of the last (incomplete) period, d2 the span of full periods and d3
@@ -3122,7 +3118,7 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
- scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ scale_freq = arch_scale_freq_capacity(cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
delta += sa->period_contrib;
@@ -4365,12 +4361,12 @@ static inline bool cfs_bandwidth_used(void)
void cfs_bandwidth_usage_inc(void)
{
- static_key_slow_inc(&__cfs_bandwidth_used);
+ static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_dec(void)
{
- static_key_slow_dec(&__cfs_bandwidth_used);
+ static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
@@ -5689,8 +5685,8 @@ static int wake_wide(struct task_struct *p)
* soonest. For the purpose of speed we only consider the waking and previous
* CPU.
*
- * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
- * will be) idle.
+ * wake_affine_idle() - only considers 'now', it check if the waking CPU is
+ * cache-affine and is (or will be) idle.
*
* wake_affine_weight() - considers the weight to reflect the average
* scheduling latency of the CPUs. This seems to work
@@ -5701,7 +5697,13 @@ static bool
wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
- if (idle_cpu(this_cpu))
+ /*
+ * If this_cpu is idle, it implies the wakeup is from interrupt
+ * context. Only allow the move if cache is shared. Otherwise an
+ * interrupt intensive workload could force all tasks onto one
+ * node depending on the IO topology or IRQ affinity settings.
+ */
+ if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
return true;
if (sync && cpu_rq(this_cpu)->nr_running == 1)
@@ -5765,12 +5767,12 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return affine;
}
-static inline int task_util(struct task_struct *p);
-static int cpu_util_wake(int cpu, struct task_struct *p);
+static inline unsigned long task_util(struct task_struct *p);
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
{
- return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+ return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
}
/*
@@ -5950,7 +5952,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
}
} else if (shallowest_idle_cpu == -1) {
load = weighted_cpuload(cpu_rq(i));
- if (load < min_load || (load == min_load && i == this_cpu)) {
+ if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
}
@@ -6247,7 +6249,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* capacity_orig) as it useful for predicting the capacity required after task
* migrations (scheduler-driven DVFS).
*/
-static int cpu_util(int cpu)
+static unsigned long cpu_util(int cpu)
{
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
@@ -6255,7 +6257,7 @@ static int cpu_util(int cpu)
return (util >= capacity) ? capacity : util;
}
-static inline int task_util(struct task_struct *p)
+static inline unsigned long task_util(struct task_struct *p)
{
return p->se.avg.util_avg;
}
@@ -6264,7 +6266,7 @@ static inline int task_util(struct task_struct *p)
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
*/
-static int cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
{
unsigned long util, capacity;
@@ -6449,8 +6451,7 @@ static void task_dead_fair(struct task_struct *p)
}
#endif /* CONFIG_SMP */
-static unsigned long
-wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
+static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
@@ -6492,7 +6493,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
if (vdiff <= 0)
return -1;
- gran = wakeup_gran(curr, se);
+ gran = wakeup_gran(se);
if (vdiff > gran)
return 1;
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index dd7908743dab..9bcbacba82a8 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
rcu_read_unlock();
}
if (!fallback) {
+ preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+ preempt_enable();
free_cpumask_var(tmpmask);
}
cpus_read_unlock();
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 665ace2fc558..862a513adca3 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2212,7 +2212,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
queue_push_tasks(rq);
#endif /* CONFIG_SMP */
- if (p->prio < rq->curr->prio)
+ if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a212de..2e95505e23c6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -156,13 +156,39 @@ static inline int task_has_dl_policy(struct task_struct *p)
return dl_policy(p->policy);
}
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * !! For sched_setattr_nocheck() (kernel) only !!
+ *
+ * This is actually gross. :(
+ *
+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
+ * tasks, but still be able to sleep. We need this on platforms that cannot
+ * atomically change clock frequency. Remove once fast switching will be
+ * available on such platforms.
+ *
+ * SUGOV stands for SchedUtil GOVernor.
+ */
+#define SCHED_FLAG_SUGOV 0x10000000
+
+static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
+{
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+ return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
+#else
+ return false;
+#endif
+}
+
/*
* Tells if entity @a should preempt entity @b.
*/
static inline bool
dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
{
- return dl_time_before(a->deadline, b->deadline);
+ return dl_entity_is_special(a) ||
+ dl_time_before(a->deadline, b->deadline);
}
/*
@@ -1328,47 +1354,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
@@ -1687,17 +1672,17 @@ static inline int hrtick_enabled(struct rq *rq)
#endif /* CONFIG_SCHED_HRTICK */
-#ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
#ifndef arch_scale_freq_capacity
static __always_inline
-unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_freq_capacity(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
+#ifdef CONFIG_SMP
+extern void sched_avg_update(struct rq *rq);
+
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1711,10 +1696,17 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
- rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
+ rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
sched_avg_update(rq);
}
#else
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
static inline void sched_avg_update(struct rq *rq) { }
#endif
@@ -2096,14 +2088,14 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
* The way cpufreq is currently arranged requires it to evaluate the CPU
* performance state (frequency/voltage) on a regular basis to prevent it from
* being stuck in a completely inadequate performance level for too long.
- * That is not guaranteed to happen if the updates are only triggered from CFS,
- * though, because they may not be coming in if RT or deadline tasks are active
- * all the time (or there are RT and DL tasks only).
+ * That is not guaranteed to happen if the updates are only triggered from CFS
+ * and DL, though, because they may not be coming in if only RT tasks are
+ * active all the time (or there are RT tasks only).
*
- * As a workaround for that issue, this function is called by the RT and DL
- * sched classes to trigger extra cpufreq updates to prevent it from stalling,
+ * As a workaround for that issue, this function is called periodically by the
+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
* but that really is a band-aid. Going forward it should be replaced with
- * solutions targeted more specifically at RT and DL tasks.
+ * solutions targeted more specifically at RT tasks.
*/
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
@@ -2125,3 +2117,17 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#else /* arch_scale_freq_capacity */
#define arch_scale_freq_invariant() (false)
#endif
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+ return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
+}
+
+static inline unsigned long cpu_util_cfs(struct rq *rq)
+{
+ return rq->cfs.avg.util_avg;
+}
+
+#endif
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5f0dfb2abb8d..940fa408a288 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -515,7 +515,7 @@ void put_seccomp_filter(struct task_struct *tsk)
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
{
- memset(info, 0, sizeof(*info));
+ clear_siginfo(info);
info->si_signo = SIGSYS;
info->si_code = SYS_SECCOMP;
info->si_call_addr = (void __user *)KSTK_EIP(current);
@@ -978,49 +978,68 @@ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
}
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
-long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
- void __user *data)
+static struct seccomp_filter *get_nth_filter(struct task_struct *task,
+ unsigned long filter_off)
{
- struct seccomp_filter *filter;
- struct sock_fprog_kern *fprog;
- long ret;
- unsigned long count = 0;
-
- if (!capable(CAP_SYS_ADMIN) ||
- current->seccomp.mode != SECCOMP_MODE_DISABLED) {
- return -EACCES;
- }
+ struct seccomp_filter *orig, *filter;
+ unsigned long count;
+ /*
+ * Note: this is only correct because the caller should be the (ptrace)
+ * tracer of the task, otherwise lock_task_sighand is needed.
+ */
spin_lock_irq(&task->sighand->siglock);
+
if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
- ret = -EINVAL;
- goto out;
+ spin_unlock_irq(&task->sighand->siglock);
+ return ERR_PTR(-EINVAL);
}
- filter = task->seccomp.filter;
- while (filter) {
- filter = filter->prev;
+ orig = task->seccomp.filter;
+ __get_seccomp_filter(orig);
+ spin_unlock_irq(&task->sighand->siglock);
+
+ count = 0;
+ for (filter = orig; filter; filter = filter->prev)
count++;
- }
if (filter_off >= count) {
- ret = -ENOENT;
+ filter = ERR_PTR(-ENOENT);
goto out;
}
- count -= filter_off;
- filter = task->seccomp.filter;
- while (filter && count > 1) {
- filter = filter->prev;
+ count -= filter_off;
+ for (filter = orig; filter && count > 1; filter = filter->prev)
count--;
- }
if (WARN_ON(count != 1 || !filter)) {
- /* The filter tree shouldn't shrink while we're using it. */
- ret = -ENOENT;
+ filter = ERR_PTR(-ENOENT);
goto out;
}
+ __get_seccomp_filter(filter);
+
+out:
+ __put_seccomp_filter(orig);
+ return filter;
+}
+
+long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+ void __user *data)
+{
+ struct seccomp_filter *filter;
+ struct sock_fprog_kern *fprog;
+ long ret;
+
+ if (!capable(CAP_SYS_ADMIN) ||
+ current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+ return -EACCES;
+ }
+
+ filter = get_nth_filter(task, filter_off);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+
fprog = filter->prog->orig_prog;
if (!fprog) {
/* This must be a new non-cBPF filter, since we save
@@ -1035,17 +1054,44 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if (!data)
goto out;
- __get_seccomp_filter(filter);
- spin_unlock_irq(&task->sighand->siglock);
-
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
+out:
__put_seccomp_filter(filter);
return ret;
+}
-out:
- spin_unlock_irq(&task->sighand->siglock);
+long seccomp_get_metadata(struct task_struct *task,
+ unsigned long size, void __user *data)
+{
+ long ret;
+ struct seccomp_filter *filter;
+ struct seccomp_metadata kmd = {};
+
+ if (!capable(CAP_SYS_ADMIN) ||
+ current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+ return -EACCES;
+ }
+
+ size = min_t(unsigned long, size, sizeof(kmd));
+
+ if (copy_from_user(&kmd, data, size))
+ return -EFAULT;
+
+ filter = get_nth_filter(task, kmd.filter_off);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+
+ memset(&kmd, 0, sizeof(kmd));
+ if (filter->log)
+ kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
+
+ ret = size;
+ if (copy_to_user(data, &kmd, size))
+ ret = -EFAULT;
+
+ __put_seccomp_filter(filter);
return ret;
}
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 9558664bd9ec..c6e4c83dc090 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -40,6 +40,7 @@
#include <linux/cn_proc.h>
#include <linux/compiler.h>
#include <linux/posix-timers.h>
+#include <linux/livepatch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
@@ -165,7 +166,8 @@ void recalc_sigpending_and_wake(struct task_struct *t)
void recalc_sigpending(void)
{
- if (!recalc_sigpending_tsk(current) && !freezing(current))
+ if (!recalc_sigpending_tsk(current) && !freezing(current) &&
+ !klp_patch_pending(current))
clear_thread_flag(TIF_SIGPENDING);
}
@@ -549,6 +551,7 @@ still_pending:
* a fast-pathed signal or we must have been
* out of queue space. So zero out the info.
*/
+ clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
@@ -642,6 +645,9 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
spin_unlock(&tsk->sighand->siglock);
posixtimer_rearm(info);
spin_lock(&tsk->sighand->siglock);
+
+ /* Don't expose the si_sys_private value to userspace */
+ info->si_sys_private = 0;
}
#endif
return signr;
@@ -1043,6 +1049,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
case (unsigned long) SEND_SIG_NOINFO:
+ clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
@@ -1051,6 +1058,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
break;
case (unsigned long) SEND_SIG_PRIV:
+ clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
@@ -1485,6 +1493,129 @@ force_sigsegv(int sig, struct task_struct *p)
return 0;
}
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+#ifdef __ARCH_SI_TRAPNO
+ info.si_trapno = trapno;
+#endif
+#ifdef __ia64__
+ info.si_imm = imm;
+ info.si_flags = flags;
+ info.si_isr = isr;
+#endif
+ return force_sig_info(info.si_signo, &info, t);
+}
+
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+#ifdef __ARCH_SI_TRAPNO
+ info.si_trapno = trapno;
+#endif
+#ifdef __ia64__
+ info.si_imm = imm;
+ info.si_flags = flags;
+ info.si_isr = isr;
+#endif
+ return send_sig_info(info.si_signo, &info, t);
+}
+
+#if defined(BUS_MCEERR_AO) && defined(BUS_MCEERR_AR)
+int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
+{
+ struct siginfo info;
+
+ WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ info.si_addr_lsb = lsb;
+ return force_sig_info(info.si_signo, &info, t);
+}
+
+int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
+{
+ struct siginfo info;
+
+ WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ info.si_addr_lsb = lsb;
+ return send_sig_info(info.si_signo, &info, t);
+}
+EXPORT_SYMBOL(send_sig_mceerr);
+#endif
+
+#ifdef SEGV_BNDERR
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_BNDERR;
+ info.si_addr = addr;
+ info.si_lower = lower;
+ info.si_upper = upper;
+ return force_sig_info(info.si_signo, &info, current);
+}
+#endif
+
+#ifdef SEGV_PKUERR
+int force_sig_pkuerr(void __user *addr, u32 pkey)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_PKUERR;
+ info.si_addr = addr;
+ info.si_pkey = pkey;
+ return force_sig_info(info.si_signo, &info, current);
+}
+#endif
+
+/* For the crazy architectures that include trap information in
+ * the errno field, instead of an actual errno value.
+ */
+int force_sig_ptrace_errno_trap(int errno, void __user *addr)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = errno;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = addr;
+ return force_sig_info(info.si_signo, &info, current);
+}
+
int kill_pgrp(struct pid *pid, int sig, int priv)
{
int ret;
@@ -1623,6 +1754,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
sig = SIGCHLD;
}
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
/*
@@ -1717,6 +1849,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
parent = tsk->real_parent;
}
+ clear_siginfo(&info);
info.si_signo = SIGCHLD;
info.si_errno = 0;
/*
@@ -1929,7 +2062,7 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
{
siginfo_t info;
- memset(&info, 0, sizeof info);
+ clear_siginfo(&info);
info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
@@ -2136,6 +2269,7 @@ static int ptrace_signal(int signr, siginfo_t *info)
* have updated *info via PTRACE_SETSIGINFO.
*/
if (signr != info->si_signo) {
+ clear_siginfo(info);
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
@@ -2688,9 +2822,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
#endif
[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
[SIGPOLL] = { NSIGPOLL, SIL_POLL },
-#ifdef __ARCH_SIGSYS
[SIGSYS] = { NSIGSYS, SIL_SYS },
-#endif
};
if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
layout = filter[sig].layout;
@@ -2712,12 +2844,14 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
if ((sig == SIGFPE) && (si_code == FPE_FIXME))
layout = SIL_FAULT;
#endif
+#ifdef BUS_FIXME
+ if ((sig == SIGBUS) && (si_code == BUS_FIXME))
+ layout = SIL_FAULT;
+#endif
}
return layout;
}
-#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
-
int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -2756,13 +2890,21 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(from->si_trapno, &to->si_trapno);
#endif
-#ifdef BUS_MCEERR_AO
+#ifdef __ia64__
+ err |= __put_user(from->si_imm, &to->si_imm);
+ err |= __put_user(from->si_flags, &to->si_flags);
+ err |= __put_user(from->si_isr, &to->si_isr);
+#endif
/*
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+#ifdef BUS_MCEERR_AR
+ if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AR)
+ err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+#endif
+#ifdef BUS_MCEERR_AO
+ if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AO)
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
#ifdef SEGV_BNDERR
@@ -2788,18 +2930,185 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
-#ifdef __ARCH_SIGSYS
case SIL_SYS:
err |= __put_user(from->si_call_addr, &to->si_call_addr);
err |= __put_user(from->si_syscall, &to->si_syscall);
err |= __put_user(from->si_arch, &to->si_arch);
break;
-#endif
}
return err;
}
+#ifdef CONFIG_COMPAT
+int copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const struct siginfo *from)
+#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+{
+ return __copy_siginfo_to_user32(to, from, in_x32_syscall());
+}
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const struct siginfo *from, bool x32_ABI)
+#endif
+{
+ struct compat_siginfo new;
+ memset(&new, 0, sizeof(new));
+
+ new.si_signo = from->si_signo;
+ new.si_errno = from->si_errno;
+ new.si_code = from->si_code;
+ switch(siginfo_layout(from->si_signo, from->si_code)) {
+ case SIL_KILL:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ break;
+ case SIL_TIMER:
+ new.si_tid = from->si_tid;
+ new.si_overrun = from->si_overrun;
+ new.si_int = from->si_int;
+ break;
+ case SIL_POLL:
+ new.si_band = from->si_band;
+ new.si_fd = from->si_fd;
+ break;
+ case SIL_FAULT:
+ new.si_addr = ptr_to_compat(from->si_addr);
+#ifdef __ARCH_SI_TRAPNO
+ new.si_trapno = from->si_trapno;
+#endif
+#ifdef BUS_MCEERR_AR
+ if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AR))
+ new.si_addr_lsb = from->si_addr_lsb;
+#endif
+#ifdef BUS_MCEERR_AO
+ if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AO))
+ new.si_addr_lsb = from->si_addr_lsb;
+#endif
+#ifdef SEGV_BNDERR
+ if ((from->si_signo == SIGSEGV) &&
+ (from->si_code == SEGV_BNDERR)) {
+ new.si_lower = ptr_to_compat(from->si_lower);
+ new.si_upper = ptr_to_compat(from->si_upper);
+ }
+#endif
+#ifdef SEGV_PKUERR
+ if ((from->si_signo == SIGSEGV) &&
+ (from->si_code == SEGV_PKUERR))
+ new.si_pkey = from->si_pkey;
+#endif
+
+ break;
+ case SIL_CHLD:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ new.si_status = from->si_status;
+#ifdef CONFIG_X86_X32_ABI
+ if (x32_ABI) {
+ new._sifields._sigchld_x32._utime = from->si_utime;
+ new._sifields._sigchld_x32._stime = from->si_stime;
+ } else
+#endif
+ {
+ new.si_utime = from->si_utime;
+ new.si_stime = from->si_stime;
+ }
+ break;
+ case SIL_RT:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ new.si_int = from->si_int;
+ break;
+ case SIL_SYS:
+ new.si_call_addr = ptr_to_compat(from->si_call_addr);
+ new.si_syscall = from->si_syscall;
+ new.si_arch = from->si_arch;
+ break;
+ }
+
+ if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int copy_siginfo_from_user32(struct siginfo *to,
+ const struct compat_siginfo __user *ufrom)
+{
+ struct compat_siginfo from;
+
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ clear_siginfo(to);
+ to->si_signo = from.si_signo;
+ to->si_errno = from.si_errno;
+ to->si_code = from.si_code;
+ switch(siginfo_layout(from.si_signo, from.si_code)) {
+ case SIL_KILL:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ break;
+ case SIL_TIMER:
+ to->si_tid = from.si_tid;
+ to->si_overrun = from.si_overrun;
+ to->si_int = from.si_int;
+ break;
+ case SIL_POLL:
+ to->si_band = from.si_band;
+ to->si_fd = from.si_fd;
+ break;
+ case SIL_FAULT:
+ to->si_addr = compat_ptr(from.si_addr);
+#ifdef __ARCH_SI_TRAPNO
+ to->si_trapno = from.si_trapno;
+#endif
+#ifdef BUS_MCEERR_AR
+ if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AR))
+ to->si_addr_lsb = from.si_addr_lsb;
+#endif
+#ifdef BUS_MCEER_AO
+ if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AO))
+ to->si_addr_lsb = from.si_addr_lsb;
+#endif
+#ifdef SEGV_BNDERR
+ if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_BNDERR)) {
+ to->si_lower = compat_ptr(from.si_lower);
+ to->si_upper = compat_ptr(from.si_upper);
+ }
+#endif
+#ifdef SEGV_PKUERR
+ if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_PKUERR))
+ to->si_pkey = from.si_pkey;
+#endif
+ break;
+ case SIL_CHLD:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ to->si_status = from.si_status;
+#ifdef CONFIG_X86_X32_ABI
+ if (in_x32_syscall()) {
+ to->si_utime = from._sifields._sigchld_x32._utime;
+ to->si_stime = from._sifields._sigchld_x32._stime;
+ } else
#endif
+ {
+ to->si_utime = from.si_utime;
+ to->si_stime = from.si_stime;
+ }
+ break;
+ case SIL_RT:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ to->si_int = from.si_int;
+ break;
+ case SIL_SYS:
+ to->si_call_addr = compat_ptr(from.si_call_addr);
+ to->si_syscall = from.si_syscall;
+ to->si_arch = from.si_arch;
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_COMPAT */
/**
* do_sigtimedwait - wait for queued signals specified in @which
@@ -2937,6 +3246,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct siginfo info;
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
@@ -2978,8 +3288,9 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
- struct siginfo info = {};
+ struct siginfo info;
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
@@ -3060,7 +3371,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info = {};
+ siginfo_t info;
int ret = copy_siginfo_from_user32(&info, uinfo);
if (unlikely(ret))
return ret;
@@ -3104,7 +3415,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info = {};
+ siginfo_t info;
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
@@ -3677,6 +3988,7 @@ void __init signals_init(void)
/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
!= offsetof(struct siginfo, _sifields._pad));
+ BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
}
@@ -3684,26 +3996,25 @@ void __init signals_init(void)
#ifdef CONFIG_KGDB_KDB
#include <linux/kdb.h>
/*
- * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * kdb_send_sig - Allows kdb to send signals without exposing
* signal internals. This function checks if the required locks are
* available before calling the main signal code, to avoid kdb
* deadlocks.
*/
-void
-kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+void kdb_send_sig(struct task_struct *t, int sig)
{
static struct task_struct *kdb_prev_t;
- int sig, new_t;
+ int new_t, ret;
if (!spin_trylock(&t->sighand->siglock)) {
kdb_printf("Can't do kill command now.\n"
"The sigmask lock is held somewhere else in "
"kernel, try again later\n");
return;
}
- spin_unlock(&t->sighand->siglock);
new_t = kdb_prev_t != t;
kdb_prev_t = t;
if (t->state != TASK_RUNNING && new_t) {
+ spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
"on the run queue locks. "
@@ -3712,8 +4023,9 @@ kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
"the deadlock.\n");
return;
}
- sig = info->si_signo;
- if (send_sig_info(sig, info, t))
+ ret = send_signal(sig, SEND_SIG_PRIV, t, false);
+ spin_unlock(&t->sighand->siglock);
+ if (ret)
kdb_printf("Fail to deliver Signal %d to process %d.\n",
sig, t->pid);
else
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2f5e87f1bae2..24d243ef8e71 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -665,7 +665,7 @@ static void run_ksoftirqd(unsigned int cpu)
*/
__do_softirq();
local_irq_enable();
- cond_resched_rcu_qs();
+ cond_resched();
return;
}
local_irq_enable();
diff --git a/kernel/sys.c b/kernel/sys.c
index 83ffd7dccf23..f2289de20e19 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(overflowgid);
*/
int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
-int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
+int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
EXPORT_SYMBOL(fs_overflowuid);
EXPORT_SYMBOL(fs_overflowgid);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 557d46728577..2fb4e27c636a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1374,13 +1374,6 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "hugepages_treat_as_movable",
- .data = &hugepages_treat_as_movable,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
{
.procname = "nr_overcommit_hugepages",
.data = NULL,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d32520840fde..ae0c8a411fe7 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -60,6 +60,15 @@
#include "tick-internal.h"
/*
+ * Masks for selecting the soft and hard context timers from
+ * cpu_base->active
+ */
+#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
+#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
+#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
+/*
* The timer bases:
*
* There are more clockids than hrtimer bases. Thus, we index
@@ -70,7 +79,6 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
- .seq = SEQCNT_ZERO(hrtimer_bases.seq),
.clock_base =
{
{
@@ -93,6 +101,26 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
.clockid = CLOCK_TAI,
.get_time = &ktime_get_clocktai,
},
+ {
+ .index = HRTIMER_BASE_MONOTONIC_SOFT,
+ .clockid = CLOCK_MONOTONIC,
+ .get_time = &ktime_get,
+ },
+ {
+ .index = HRTIMER_BASE_REALTIME_SOFT,
+ .clockid = CLOCK_REALTIME,
+ .get_time = &ktime_get_real,
+ },
+ {
+ .index = HRTIMER_BASE_BOOTTIME_SOFT,
+ .clockid = CLOCK_BOOTTIME,
+ .get_time = &ktime_get_boottime,
+ },
+ {
+ .index = HRTIMER_BASE_TAI_SOFT,
+ .clockid = CLOCK_TAI,
+ .get_time = &ktime_get_clocktai,
+ },
}
};
@@ -118,7 +146,6 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
- .seq = SEQCNT_ZERO(migration_cpu_base),
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
};
@@ -156,45 +183,33 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}
/*
- * With HIGHRES=y we do not migrate the timer when it is expiring
- * before the next event on the target cpu because we cannot reprogram
- * the target cpu hardware and we would cause it to fire late.
+ * We do not migrate the timer when it is expiring before the next
+ * event on the target cpu. When high resolution is enabled, we cannot
+ * reprogram the target cpu hardware and we would cause it to fire
+ * late. To keep it simple, we handle the high resolution enabled and
+ * disabled case similar.
*
* Called with cpu_base->lock of target cpu held.
*/
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
{
-#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires;
- if (!new_base->cpu_base->hres_active)
- return 0;
-
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
- return expires <= new_base->cpu_base->expires_next;
-#else
- return 0;
-#endif
+ return expires < new_base->cpu_base->expires_next;
}
-#ifdef CONFIG_NO_HZ_COMMON
-static inline
-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
- int pinned)
-{
- if (pinned || !base->migration_enabled)
- return base;
- return &per_cpu(hrtimer_bases, get_nohz_timer_target());
-}
-#else
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ if (static_branch_likely(&timers_migration_enabled) && !pinned)
+ return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+#endif
return base;
}
-#endif
/*
* We switch the timer base to a power-optimized selected CPU target,
@@ -396,7 +411,8 @@ static inline void debug_hrtimer_init(struct hrtimer *timer)
debug_object_init(timer, &hrtimer_debug_descr);
}
-static inline void debug_hrtimer_activate(struct hrtimer *timer)
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
debug_object_activate(timer, &hrtimer_debug_descr);
}
@@ -429,8 +445,10 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
+
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
-static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
@@ -442,10 +460,11 @@ debug_init(struct hrtimer *timer, clockid_t clockid,
trace_hrtimer_init(timer, clockid, mode);
}
-static inline void debug_activate(struct hrtimer *timer)
+static inline void debug_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
- debug_hrtimer_activate(timer);
- trace_hrtimer_start(timer);
+ debug_hrtimer_activate(timer, mode);
+ trace_hrtimer_start(timer, mode);
}
static inline void debug_deactivate(struct hrtimer *timer)
@@ -454,35 +473,43 @@ static inline void debug_deactivate(struct hrtimer *timer)
trace_hrtimer_cancel(timer);
}
-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
-static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
- struct hrtimer *timer)
+static struct hrtimer_clock_base *
+__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
{
-#ifdef CONFIG_HIGH_RES_TIMERS
- cpu_base->next_timer = timer;
-#endif
+ unsigned int idx;
+
+ if (!*active)
+ return NULL;
+
+ idx = __ffs(*active);
+ *active &= ~(1U << idx);
+
+ return &cpu_base->clock_base[idx];
}
-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+#define for_each_active_base(base, cpu_base, active) \
+ while ((base = __next_base((cpu_base), &(active))))
+
+static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ unsigned int active,
+ ktime_t expires_next)
{
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
- ktime_t expires, expires_next = KTIME_MAX;
+ struct hrtimer_clock_base *base;
+ ktime_t expires;
- hrtimer_update_next_timer(cpu_base, NULL);
- for (; active; base++, active >>= 1) {
+ for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *next;
struct hrtimer *timer;
- if (!(active & 0x01))
- continue;
-
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) {
expires_next = expires;
- hrtimer_update_next_timer(cpu_base, timer);
+ if (timer->is_soft)
+ cpu_base->softirq_next_timer = timer;
+ else
+ cpu_base->next_timer = timer;
}
}
/*
@@ -494,7 +521,47 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
expires_next = 0;
return expires_next;
}
-#endif
+
+/*
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ *
+ * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
+ * those timers will get run whenever the softirq gets handled, at the end of
+ * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
+ *
+ * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
+ * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
+ * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
+ *
+ * @active_mask must be one of:
+ * - HRTIMER_ACTIVE_ALL,
+ * - HRTIMER_ACTIVE_SOFT, or
+ * - HRTIMER_ACTIVE_HARD.
+ */
+static ktime_t
+__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
+{
+ unsigned int active;
+ struct hrtimer *next_timer = NULL;
+ ktime_t expires_next = KTIME_MAX;
+
+ if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
+ cpu_base->softirq_next_timer = NULL;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX);
+
+ next_timer = cpu_base->softirq_next_timer;
+ }
+
+ if (active_mask & HRTIMER_ACTIVE_HARD) {
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+ cpu_base->next_timer = next_timer;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+ }
+
+ return expires_next;
+}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
@@ -502,36 +569,14 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
- return ktime_get_update_offsets_now(&base->clock_was_set_seq,
+ ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
offs_real, offs_boot, offs_tai);
-}
-/* High resolution timer related functions */
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer enabled ?
- */
-static bool hrtimer_hres_enabled __read_mostly = true;
-unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
-EXPORT_SYMBOL_GPL(hrtimer_resolution);
-
-/*
- * Enable / Disable high resolution mode
- */
-static int __init setup_hrtimer_hres(char *str)
-{
- return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
-}
-
-__setup("highres=", setup_hrtimer_hres);
+ base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
+ base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
+ base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
-/*
- * hrtimer_high_res_enabled - query, if the highres mode is enabled
- */
-static inline int hrtimer_is_hres_enabled(void)
-{
- return hrtimer_hres_enabled;
+ return now;
}
/*
@@ -539,7 +584,8 @@ static inline int hrtimer_is_hres_enabled(void)
*/
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
- return cpu_base->hres_active;
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ cpu_base->hres_active : 0;
}
static inline int hrtimer_hres_active(void)
@@ -557,10 +603,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
- if (!cpu_base->hres_active)
- return;
+ /*
+ * Find the current next expiration time.
+ */
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
- expires_next = __hrtimer_get_next_event(cpu_base);
+ if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
+ /*
+ * When the softirq is activated, hrtimer has to be
+ * programmed with the first hard hrtimer because soft
+ * timer interrupt could occur too late.
+ */
+ if (cpu_base->softirq_activated)
+ expires_next = __hrtimer_get_next_event(cpu_base,
+ HRTIMER_ACTIVE_HARD);
+ else
+ cpu_base->softirq_expires_next = expires_next;
+ }
if (skip_equal && expires_next == cpu_base->expires_next)
return;
@@ -568,6 +627,9 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
cpu_base->expires_next = expires_next;
/*
+ * If hres is not active, hardware does not have to be
+ * reprogrammed yet.
+ *
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
@@ -581,81 +643,38 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
- if (cpu_base->hang_detected)
+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
return;
tick_program_event(cpu_base->expires_next, 1);
}
+/* High resolution timer related functions */
+#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
- * When a timer is enqueued and expires earlier than the already enqueued
- * timers, we have to check, whether it expires earlier than the timer for
- * which the clock event device was armed.
- *
- * Called with interrupts disabled and base->cpu_base.lock held
+ * High resolution timer enabled ?
*/
-static void hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-
- WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
-
- /*
- * If the timer is not on the current cpu, we cannot reprogram
- * the other cpus clock event device.
- */
- if (base->cpu_base != cpu_base)
- return;
-
- /*
- * If the hrtimer interrupt is running, then it will
- * reevaluate the clock bases and reprogram the clock event
- * device. The callbacks are always executed in hard interrupt
- * context so we don't need an extra check for a running
- * callback.
- */
- if (cpu_base->in_hrtirq)
- return;
-
- /*
- * CLOCK_REALTIME timer might be requested with an absolute
- * expiry time which is less than base->offset. Set it to 0.
- */
- if (expires < 0)
- expires = 0;
-
- if (expires >= cpu_base->expires_next)
- return;
-
- /* Update the pointer to the next expiring timer */
- cpu_base->next_timer = timer;
-
- /*
- * If a hang was detected in the last timer interrupt then we
- * do not schedule a timer which is earlier than the expiry
- * which we enforced in the hang detection. We want the system
- * to make progress.
- */
- if (cpu_base->hang_detected)
- return;
+static bool hrtimer_hres_enabled __read_mostly = true;
+unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+EXPORT_SYMBOL_GPL(hrtimer_resolution);
- /*
- * Program the timer hardware. We enforce the expiry for
- * events which are already in the past.
- */
- cpu_base->expires_next = expires;
- tick_program_event(expires, 1);
+/*
+ * Enable / Disable high resolution mode
+ */
+static int __init setup_hrtimer_hres(char *str)
+{
+ return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
}
+__setup("highres=", setup_hrtimer_hres);
+
/*
- * Initialize the high resolution related parts of cpu_base
+ * hrtimer_high_res_enabled - query, if the highres mode is enabled
*/
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+static inline int hrtimer_is_hres_enabled(void)
{
- base->expires_next = KTIME_MAX;
- base->hres_active = 0;
+ return hrtimer_hres_enabled;
}
/*
@@ -667,7 +686,7 @@ static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
- if (!base->hres_active)
+ if (!__hrtimer_hres_active(base))
return;
raw_spin_lock(&base->lock);
@@ -714,23 +733,102 @@ void clock_was_set_delayed(void)
#else
-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
-static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
-static inline void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
+ * When a timer is enqueued and expires earlier than the already enqueued
+ * timers, we have to check, whether it expires earlier than the timer for
+ * which the clock event device was armed.
+ *
+ * Called with interrupts disabled and base->cpu_base.lock held
+ */
+static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ struct hrtimer_clock_base *base = timer->base;
+ ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+
+ WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+
+ /*
+ * CLOCK_REALTIME timer might be requested with an absolute
+ * expiry time which is less than base->offset. Set it to 0.
+ */
+ if (expires < 0)
+ expires = 0;
+
+ if (timer->is_soft) {
+ /*
+ * soft hrtimer could be started on a remote CPU. In this
+ * case softirq_expires_next needs to be updated on the
+ * remote CPU. The soft hrtimer will not expire before the
+ * first hard hrtimer on the remote CPU -
+ * hrtimer_check_target() prevents this case.
+ */
+ struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
+
+ if (timer_cpu_base->softirq_activated)
+ return;
+
+ if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
+ return;
+
+ timer_cpu_base->softirq_next_timer = timer;
+ timer_cpu_base->softirq_expires_next = expires;
+
+ if (!ktime_before(expires, timer_cpu_base->expires_next) ||
+ !reprogram)
+ return;
+ }
+
+ /*
+ * If the timer is not on the current cpu, we cannot reprogram
+ * the other cpus clock event device.
+ */
+ if (base->cpu_base != cpu_base)
+ return;
+
+ /*
+ * If the hrtimer interrupt is running, then it will
+ * reevaluate the clock bases and reprogram the clock event
+ * device. The callbacks are always executed in hard interrupt
+ * context so we don't need an extra check for a running
+ * callback.
+ */
+ if (cpu_base->in_hrtirq)
+ return;
+
+ if (expires >= cpu_base->expires_next)
+ return;
+
+ /* Update the pointer to the next expiring timer */
+ cpu_base->next_timer = timer;
+ cpu_base->expires_next = expires;
+
+ /*
+ * If hres is not active, hardware does not have to be
+ * programmed yet.
+ *
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ /*
+ * Program the timer hardware. We enforce the expiry for
+ * events which are already in the past.
+ */
+ tick_program_event(expires, 1);
+}
+
+/*
* Clock realtime was set
*
* Change the offset of the realtime clock vs. the monotonic
@@ -835,9 +933,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
* Returns 1 when the new timer is the leftmost timer in the tree.
*/
static int enqueue_hrtimer(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ enum hrtimer_mode mode)
{
- debug_activate(timer);
+ debug_activate(timer, mode);
base->cpu_base->active_bases |= 1 << base->index;
@@ -870,7 +969,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-#ifdef CONFIG_HIGH_RES_TIMERS
/*
* Note: If reprogram is false we do not update
* cpu_base->next_timer. This happens when we remove the first
@@ -881,7 +979,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
*/
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
-#endif
}
/*
@@ -930,22 +1027,36 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
return tim;
}
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer: the timer to be added
- * @tim: expiry time
- * @delta_ns: "slack" range for the timer
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
- */
-void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- u64 delta_ns, const enum hrtimer_mode mode)
+static void
+hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
{
- struct hrtimer_clock_base *base, *new_base;
- unsigned long flags;
- int leftmost;
+ ktime_t expires;
- base = lock_hrtimer_base(timer, &flags);
+ /*
+ * Find the next SOFT expiration.
+ */
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
+
+ /*
+ * reprogramming needs to be triggered, even if the next soft
+ * hrtimer expires at the same time than the next hard
+ * hrtimer. cpu_base->softirq_expires_next needs to be updated!
+ */
+ if (expires == KTIME_MAX)
+ return;
+
+ /*
+ * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
+ * cpu_base->*expires_next is only set by hrtimer_reprogram()
+ */
+ hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
+}
+
+static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode,
+ struct hrtimer_clock_base *base)
+{
+ struct hrtimer_clock_base *new_base;
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
@@ -960,21 +1071,35 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
- leftmost = enqueue_hrtimer(timer, new_base);
- if (!leftmost)
- goto unlock;
+ return enqueue_hrtimer(timer, new_base, mode);
+}
+
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
+ */
+void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode)
+{
+ struct hrtimer_clock_base *base;
+ unsigned long flags;
+
+ /*
+ * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+ * match.
+ */
+ WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
+
+ base = lock_hrtimer_base(timer, &flags);
+
+ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+ hrtimer_reprogram(timer, true);
- if (!hrtimer_is_hres_active(timer)) {
- /*
- * Kick to reschedule the next tick to handle the new timer
- * on dynticks target.
- */
- if (new_base->cpu_base->nohz_active)
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
- } else {
- hrtimer_reprogram(timer, new_base);
- }
-unlock:
unlock_hrtimer_base(timer, &flags);
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1072,7 +1197,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
- expires = __hrtimer_get_next_event(cpu_base);
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@@ -1095,17 +1220,24 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
+ bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
+ int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
struct hrtimer_cpu_base *cpu_base;
- int base;
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+ /*
+ * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
+ * clock modifications, so they needs to become CLOCK_MONOTONIC to
+ * ensure POSIX compliance.
+ */
+ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
- base = hrtimer_clockid_to_base(clock_id);
+ base += hrtimer_clockid_to_base(clock_id);
+ timer->is_soft = softtimer;
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
}
@@ -1114,7 +1246,13 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
* hrtimer_init - initialize a timer to the given clock
* @timer: the timer to be initialized
* @clock_id: the clock to be used
- * @mode: timer mode abs/rel
+ * @mode: The modes which are relevant for intitialization:
+ * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
+ * HRTIMER_MODE_REL_SOFT
+ *
+ * The PINNED variants of the above can be handed in,
+ * but the PINNED bit is ignored as pinning happens
+ * when the hrtimer is started
*/
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
@@ -1133,19 +1271,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
*/
bool hrtimer_active(const struct hrtimer *timer)
{
- struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
unsigned int seq;
do {
- cpu_base = READ_ONCE(timer->base->cpu_base);
- seq = raw_read_seqcount_begin(&cpu_base->seq);
+ base = READ_ONCE(timer->base);
+ seq = raw_read_seqcount_begin(&base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
- cpu_base->running == timer)
+ base->running == timer)
return true;
- } while (read_seqcount_retry(&cpu_base->seq, seq) ||
- cpu_base != READ_ONCE(timer->base->cpu_base));
+ } while (read_seqcount_retry(&base->seq, seq) ||
+ base != READ_ONCE(timer->base));
return false;
}
@@ -1171,7 +1309,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
- struct hrtimer *timer, ktime_t *now)
+ struct hrtimer *timer, ktime_t *now,
+ unsigned long flags)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
@@ -1179,16 +1318,16 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
- cpu_base->running = timer;
+ base->running = timer;
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
- * hrtimer_active() cannot observe cpu_base->running == NULL &&
+ * hrtimer_active() cannot observe base->running == NULL &&
* timer->state == INACTIVE.
*/
- raw_write_seqcount_barrier(&cpu_base->seq);
+ raw_write_seqcount_barrier(&base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = timer->function;
@@ -1202,15 +1341,15 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
timer->is_rel = false;
/*
- * Because we run timers from hardirq context, there is no chance
- * they get migrated to another cpu, therefore its safe to unlock
- * the timer base.
+ * The timer is marked as running in the CPU base, so it is
+ * protected against migration to a different CPU even if the lock
+ * is dropped.
*/
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irq(&cpu_base->lock);
/*
* Note: We clear the running state after enqueue_hrtimer and
@@ -1223,33 +1362,31 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
- enqueue_hrtimer(timer, base);
+ enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
- * hrtimer_active() cannot observe cpu_base->running == NULL &&
+ * hrtimer_active() cannot observe base->running.timer == NULL &&
* timer->state == INACTIVE.
*/
- raw_write_seqcount_barrier(&cpu_base->seq);
+ raw_write_seqcount_barrier(&base->seq);
- WARN_ON_ONCE(cpu_base->running != timer);
- cpu_base->running = NULL;
+ WARN_ON_ONCE(base->running != timer);
+ base->running = NULL;
}
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+ unsigned long flags, unsigned int active_mask)
{
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
+ struct hrtimer_clock_base *base;
+ unsigned int active = cpu_base->active_bases & active_mask;
- for (; active; base++, active >>= 1) {
+ for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node;
ktime_t basenow;
- if (!(active & 0x01))
- continue;
-
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
@@ -1272,11 +1409,28 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
- __run_hrtimer(cpu_base, base, timer, &basenow);
+ __run_hrtimer(cpu_base, base, timer, &basenow, flags);
}
}
}
+static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
+ ktime_t now;
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ now = hrtimer_update_base(cpu_base);
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
+
+ cpu_base->softirq_activated = 0;
+ hrtimer_update_softirq_timer(cpu_base, true);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
/*
@@ -1287,13 +1441,14 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
+ unsigned long flags;
int retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event = KTIME_MAX;
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
@@ -1306,17 +1461,23 @@ retry:
*/
cpu_base->expires_next = KTIME_MAX;
- __hrtimer_run_queues(cpu_base, now);
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
@@ -1337,7 +1498,7 @@ retry:
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
@@ -1350,7 +1511,8 @@ retry:
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta;
@@ -1392,6 +1554,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
@@ -1409,10 +1572,17 @@ void hrtimer_run_queues(void)
return;
}
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
+
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
/*
@@ -1590,7 +1760,13 @@ int hrtimers_prepare_cpu(unsigned int cpu)
}
cpu_base->cpu = cpu;
- hrtimer_init_hres(cpu_base);
+ cpu_base->active_bases = 0;
+ cpu_base->hres_active = 0;
+ cpu_base->hang_detected = 0;
+ cpu_base->next_timer = NULL;
+ cpu_base->softirq_next_timer = NULL;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
return 0;
}
@@ -1622,7 +1798,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_base);
+ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
}
}
@@ -1634,6 +1810,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
+ /*
+ * this BH disable ensures that raise_softirq_irqoff() does
+ * not wakeup ksoftirqd (and acquire the pi-lock) while
+ * holding the cpu_base lock
+ */
+ local_bh_disable();
local_irq_disable();
old_base = &per_cpu(hrtimer_bases, scpu);
new_base = this_cpu_ptr(&hrtimer_bases);
@@ -1649,12 +1831,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
&new_base->clock_base[i]);
}
+ /*
+ * The migration might have changed the first expiring softirq
+ * timer on this CPU. Update it.
+ */
+ hrtimer_update_softirq_timer(new_base, false);
+
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
local_irq_enable();
+ local_bh_enable();
return 0;
}
@@ -1663,18 +1852,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
+ open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
+ * @mode: timer mode
+ * @clock_id: timer clock to be used
*/
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode, int clock)
+ const enum hrtimer_mode mode, clockid_t clock_id)
{
struct hrtimer_sleeper t;
@@ -1695,7 +1885,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
return -EINTR;
}
- hrtimer_init_on_stack(&t.timer, clock, mode);
+ hrtimer_init_on_stack(&t.timer, clock_id, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
@@ -1717,7 +1907,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode: timer mode
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
@@ -1756,7 +1946,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
* schedule_hrtimeout - sleep until timeout
* @expires: timeout value (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode: timer mode
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 17cdc554c9fe..94ad46d50b56 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -68,10 +68,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
return err;
}
-static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+static __poll_t posix_clock_poll(struct file *fp, poll_table *wait)
{
struct posix_clock *clk = get_posix_clock(fp);
- unsigned int result = 0;
+ __poll_t result = 0;
if (!clk)
return POLLERR;
@@ -216,7 +216,7 @@ struct posix_clock_desc {
static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd)
{
- struct file *fp = fget(CLOCKID_TO_FD(id));
+ struct file *fp = fget(clockid_to_fd(id));
int err = -EINVAL;
if (!fp)
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 1f27887aa194..2541bd89f20e 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -14,6 +14,7 @@
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
+#include <linux/sched/deadline.h>
#include "posix-timers.h"
@@ -791,6 +792,14 @@ check_timers_list(struct list_head *timers,
return 0;
}
+static inline void check_dl_overrun(struct task_struct *tsk)
+{
+ if (tsk->dl.dl_overrun) {
+ tsk->dl.dl_overrun = 0;
+ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ }
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
@@ -804,6 +813,9 @@ static void check_thread_timers(struct task_struct *tsk,
u64 expires;
unsigned long soft;
+ if (dl_task(tsk))
+ check_dl_overrun(tsk);
+
/*
* If cputime_expires is zero, then there are no active
* per thread CPU timers.
@@ -906,6 +918,9 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime;
unsigned long soft;
+ if (dl_task(tsk))
+ check_dl_overrun(tsk);
+
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
@@ -1111,6 +1126,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
return 1;
}
+ if (dl_task(tsk) && tsk->dl.dl_overrun)
+ return 1;
+
return 0;
}
@@ -1189,9 +1207,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
u64 now;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
- cpu_timer_sample_group(clock_idx, tsk, &now);
- if (oldval) {
+ if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
@@ -1363,8 +1380,8 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
-#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
-#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
+#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
+#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index ec999f32c840..75043046914e 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -462,7 +462,7 @@ static struct k_itimer * alloc_posix_timer(void)
kmem_cache_free(posix_timers_cache, tmr);
return NULL;
}
- memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
+ clear_siginfo(&tmr->sigq->info);
return tmr;
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f8e1845aa464..e277284c2831 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -150,16 +150,15 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
-#else
+extern void timers_update_nohz(void);
+# ifdef CONFIG_SMP
+extern struct static_key_false timers_migration_enabled;
+# endif
+#else /* CONFIG_NO_HZ_COMMON */
+static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0)
#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void timers_update_migration(bool update_nohz);
-#else
-static inline void timers_update_migration(bool update_nohz) { }
-#endif
-
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f7cc7abfcf25..29a5733eff83 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1107,7 +1107,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
- timers_update_migration(true);
+ timers_update_nohz();
}
/**
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 89a9e1b4264a..48150ab42de9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -200,8 +200,6 @@ struct timer_base {
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
- bool migration_enabled;
- bool nohz_active;
bool is_idle;
bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
@@ -210,45 +208,64 @@ struct timer_base {
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
+
+static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+static DEFINE_MUTEX(timer_keys_mutex);
+
+static void timer_update_keys(struct work_struct *work);
+static DECLARE_WORK(timer_update_work, timer_update_keys);
+
+#ifdef CONFIG_SMP
unsigned int sysctl_timer_migration = 1;
-void timers_update_migration(bool update_nohz)
+DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
+
+static void timers_update_migration(void)
{
- bool on = sysctl_timer_migration && tick_nohz_active;
- unsigned int cpu;
+ if (sysctl_timer_migration && tick_nohz_active)
+ static_branch_enable(&timers_migration_enabled);
+ else
+ static_branch_disable(&timers_migration_enabled);
+}
+#else
+static inline void timers_update_migration(void) { }
+#endif /* !CONFIG_SMP */
- /* Avoid the loop, if nothing to update */
- if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
- return;
+static void timer_update_keys(struct work_struct *work)
+{
+ mutex_lock(&timer_keys_mutex);
+ timers_update_migration();
+ static_branch_enable(&timers_nohz_active);
+ mutex_unlock(&timer_keys_mutex);
+}
- for_each_possible_cpu(cpu) {
- per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
- per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
- if (!update_nohz)
- continue;
- per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
- per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
- }
+void timers_update_nohz(void)
+{
+ schedule_work(&timer_update_work);
}
int timer_migration_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- static DEFINE_MUTEX(mutex);
int ret;
- mutex_lock(&mutex);
+ mutex_lock(&timer_keys_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
- timers_update_migration(false);
- mutex_unlock(&mutex);
+ timers_update_migration();
+ mutex_unlock(&timer_keys_mutex);
return ret;
}
-#endif
+
+static inline bool is_timers_nohz_active(void)
+{
+ return static_branch_unlikely(&timers_nohz_active);
+}
+#else
+static inline bool is_timers_nohz_active(void) { return false; }
+#endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up)
@@ -534,7 +551,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ if (!is_timers_nohz_active())
return;
/*
@@ -849,21 +866,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
}
-#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags)
{
-#ifdef CONFIG_SMP
- if ((tflags & TIMER_PINNED) || !base->migration_enabled)
- return get_timer_this_cpu_base(tflags);
- return get_timer_cpu_base(tflags, get_nohz_timer_target());
-#else
- return get_timer_this_cpu_base(tflags);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ if (static_branch_likely(&timers_migration_enabled) &&
+ !(tflags & TIMER_PINNED))
+ return get_timer_cpu_base(tflags, get_nohz_timer_target());
#endif
+ return get_timer_this_cpu_base(tflags);
}
static inline void forward_timer_base(struct timer_base *base)
{
+#ifdef CONFIG_NO_HZ_COMMON
unsigned long jnow;
/*
@@ -887,16 +903,8 @@ static inline void forward_timer_base(struct timer_base *base)
base->clk = jnow;
else
base->clk = base->next_expiry;
-}
-#else
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
- return get_timer_this_cpu_base(tflags);
-}
-
-static inline void forward_timer_base(struct timer_base *base) { }
#endif
+}
/*
@@ -1696,7 +1704,7 @@ void run_local_timers(void)
hrtimer_run_queues();
/* Raise the softirq only if required. */
if (time_before(jiffies, base->clk)) {
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
return;
/* CPU is awake, so check the deferrable base. */
base++;
diff --git a/kernel/torture.c b/kernel/torture.c
index 637e172835d8..37b94012a3f8 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -47,6 +47,7 @@
#include <linux/ktime.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
+#include "rcu/rcu.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
@@ -60,7 +61,6 @@ static bool verbose;
#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
static int fullstop = FULLSTOP_RMMOD;
static DEFINE_MUTEX(fullstop_mutex);
-static int *torture_runnable;
#ifdef CONFIG_HOTPLUG_CPU
@@ -500,7 +500,7 @@ static int torture_shutdown(void *arg)
torture_shutdown_hook();
else
VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
- ftrace_dump(DUMP_ALL);
+ rcu_ftrace_dump(DUMP_ALL);
kernel_power_off(); /* Shut down the system. */
return 0;
}
@@ -572,17 +572,19 @@ static int stutter;
*/
void stutter_wait(const char *title)
{
+ int spt;
+
cond_resched_rcu_qs();
- while (READ_ONCE(stutter_pause_test) ||
- (torture_runnable && !READ_ONCE(*torture_runnable))) {
- if (stutter_pause_test)
- if (READ_ONCE(stutter_pause_test) == 1)
- schedule_timeout_interruptible(1);
- else
- while (READ_ONCE(stutter_pause_test))
- cond_resched();
- else
+ spt = READ_ONCE(stutter_pause_test);
+ for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+ if (spt == 1) {
+ schedule_timeout_interruptible(1);
+ } else if (spt == 2) {
+ while (READ_ONCE(stutter_pause_test))
+ cond_resched();
+ } else {
schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ }
torture_shutdown_absorb(title);
}
}
@@ -596,17 +598,15 @@ static int torture_stutter(void *arg)
{
VERBOSE_TOROUT_STRING("torture_stutter task started");
do {
- if (!torture_must_stop()) {
- if (stutter > 1) {
- schedule_timeout_interruptible(stutter - 1);
- WRITE_ONCE(stutter_pause_test, 2);
- }
- schedule_timeout_interruptible(1);
+ if (!torture_must_stop() && stutter > 1) {
WRITE_ONCE(stutter_pause_test, 1);
+ schedule_timeout_interruptible(stutter - 1);
+ WRITE_ONCE(stutter_pause_test, 2);
+ schedule_timeout_interruptible(1);
}
+ WRITE_ONCE(stutter_pause_test, 0);
if (!torture_must_stop())
schedule_timeout_interruptible(stutter);
- WRITE_ONCE(stutter_pause_test, 0);
torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop());
torture_kthread_stopping("torture_stutter");
@@ -647,7 +647,7 @@ static void torture_stutter_cleanup(void)
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable. If there is no such flag, pass in NULL.
*/
-bool torture_init_begin(char *ttype, bool v, int *runnable)
+bool torture_init_begin(char *ttype, bool v)
{
mutex_lock(&fullstop_mutex);
if (torture_type != NULL) {
@@ -659,7 +659,6 @@ bool torture_init_begin(char *ttype, bool v, int *runnable)
}
torture_type = ttype;
verbose = v;
- torture_runnable = runnable;
fullstop = FULLSTOP_DONTSTOP;
return true;
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 904c952ac383..0b249e2f0c3c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -355,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES
on if you need to profile the system's use of these macros.
config PROFILE_ALL_BRANCHES
- bool "Profile all if conditionals"
+ bool "Profile all if conditionals" if !FORTIFY_SOURCE
select TRACE_BRANCH_PROFILING
help
This tracer profiles all branch conditions. Every if ()
@@ -530,6 +530,15 @@ config FUNCTION_PROFILER
If in doubt, say N.
+config BPF_KPROBE_OVERRIDE
+ bool "Enable BPF programs to override a kprobed function"
+ depends on BPF_EVENTS
+ depends on FUNCTION_ERROR_INJECTION
+ default n
+ help
+ Allows BPF to override the execution of a probed function and
+ set a different return value. This is used for error injection.
+
config FTRACE_MCOUNT_RECORD
def_bool y
depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 40207c2a4113..fc2838ac8b78 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -13,6 +13,10 @@
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
+#include <linux/kprobes.h>
+#include <linux/error-injection.h>
+
+#include "trace_probe.h"
#include "trace.h"
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -76,6 +80,23 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
}
EXPORT_SYMBOL_GPL(trace_call_bpf);
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+ regs_set_return_value(regs, rc);
+ override_function_with_return(regs);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_override_return_proto = {
+ .func = bpf_override_return,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+#endif
+
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
int ret;
@@ -224,7 +245,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
*/
#define __BPF_TP_EMIT() __BPF_ARG3_TP()
#define __BPF_TP(...) \
- __trace_printk(1 /* Fake ip will not be printed. */, \
+ __trace_printk(0 /* Fake ip */, \
fmt, ##__VA_ARGS__)
#define __BPF_ARG1_TP(...) \
@@ -556,6 +577,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return &bpf_get_stackid_proto;
case BPF_FUNC_perf_event_read_value:
return &bpf_perf_event_read_value_proto;
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+ case BPF_FUNC_override_return:
+ return &bpf_override_return_proto;
+#endif
default:
return tracing_func_proto(func_id);
}
@@ -773,6 +798,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
struct bpf_prog_array *new_array;
int ret = -EEXIST;
+ /*
+ * Kprobe override only works if they are on the function entry,
+ * and only if they are on the opt-in list.
+ */
+ if (prog->kprobe_override &&
+ (!trace_kprobe_on_func_entry(event->tp_event) ||
+ !trace_kprobe_error_injectable(event->tp_event)))
+ return -EINVAL;
+
mutex_lock(&bpf_event_mutex);
if (event->prog)
@@ -825,3 +859,26 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
unlock:
mutex_unlock(&bpf_event_mutex);
}
+
+int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+ struct perf_event_query_bpf __user *uquery = info;
+ struct perf_event_query_bpf query = {};
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ return -EINVAL;
+ if (copy_from_user(&query, uquery, sizeof(query)))
+ return -EFAULT;
+
+ mutex_lock(&bpf_event_mutex);
+ ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+ uquery->ids,
+ query.ids_len,
+ &uquery->prog_cnt);
+ mutex_unlock(&bpf_event_mutex);
+
+ return ret;
+}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ccdf3664e4a9..554b517c61a0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
};
/*
- * This is used by __kernel_text_address() to return true if the
- * address is on a dynamically allocated trampoline that would
- * not return true for either core_kernel_text() or
- * is_module_text_address().
+ * Used by the stack undwinder to know about dynamic ftrace trampolines.
*/
-bool is_ftrace_trampoline(unsigned long addr)
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{
- struct ftrace_ops *op;
- bool ret = false;
+ struct ftrace_ops *op = NULL;
/*
* Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
if (op->trampoline && op->trampoline_size)
if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) {
- ret = true;
- goto out;
+ preempt_enable_notrace();
+ return op;
}
} while_for_each_ftrace_op(op);
-
- out:
preempt_enable_notrace();
- return ret;
+ return NULL;
+}
+
+/*
+ * This is used by __kernel_text_address() to return true if the
+ * address is on a dynamically allocated trampoline that would
+ * not return true for either core_kernel_text() or
+ * is_module_text_address().
+ */
+bool is_ftrace_trampoline(unsigned long addr)
+{
+ return ftrace_ops_trampoline(addr) != NULL;
}
struct ftrace_page {
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9ab18995ff1e..ca6930e0d25e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -630,7 +630,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* Returns POLLIN | POLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -2534,29 +2534,58 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
* by the current task between lock and unlock. But it can
- * be modified more than once via an interrupt. There are four
- * different contexts that we need to consider.
+ * be modified more than once via an interrupt. To pass this
+ * information from the lock to the unlock without having to
+ * access the 'in_interrupt()' functions again (which do show
+ * a bit of overhead in something as critical as function tracing,
+ * we use a bitmask trick.
*
- * Normal context.
- * SoftIRQ context
- * IRQ context
- * NMI context
+ * bit 0 = NMI context
+ * bit 1 = IRQ context
+ * bit 2 = SoftIRQ context
+ * bit 3 = normal context.
*
- * If for some reason the ring buffer starts to recurse, we
- * only allow that to happen at most 4 times (one for each
- * context). If it happens 5 times, then we consider this a
- * recusive loop and do not let it go further.
+ * This works because this is the order of contexts that can
+ * preempt other contexts. A SoftIRQ never preempts an IRQ
+ * context.
+ *
+ * When the context is determined, the corresponding bit is
+ * checked and set (if it was set, then a recursion of that context
+ * happened).
+ *
+ * On unlock, we need to clear this bit. To do so, just subtract
+ * 1 from the current_context and AND it to itself.
+ *
+ * (binary)
+ * 101 - 1 = 100
+ * 101 & 100 = 100 (clearing bit zero)
+ *
+ * 1010 - 1 = 1001
+ * 1010 & 1001 = 1000 (clearing bit 1)
+ *
+ * The least significant bit can be cleared this way, and it
+ * just so happens that it is the same bit corresponding to
+ * the current context.
*/
static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
- if (cpu_buffer->current_context >= 4)
+ unsigned int val = cpu_buffer->current_context;
+ unsigned long pc = preempt_count();
+ int bit;
+
+ if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+ bit = RB_CTX_NORMAL;
+ else
+ bit = pc & NMI_MASK ? RB_CTX_NMI :
+ pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+
+ if (unlikely(val & (1 << bit)))
return 1;
- cpu_buffer->current_context++;
- /* Interrupts must see this update */
- barrier();
+ val |= (1 << bit);
+ cpu_buffer->current_context = val;
return 0;
}
@@ -2564,9 +2593,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
- /* Don't let the dec leak out */
- barrier();
- cpu_buffer->current_context--;
+ cpu_buffer->current_context &= cpu_buffer->current_context - 1;
}
/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a8d8a294345..32c069bbf41b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+/*
+ * Skip 3:
+ *
+ * trace_buffer_unlock_commit_regs()
+ * trace_event_buffer_commit()
+ * trace_event_raw_event_xxx()
+*/
+# define STACK_SKIP 3
+
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
__buffer_unlock_commit(buffer, event);
/*
- * If regs is not set, then skip the following callers:
- * trace_buffer_unlock_commit_regs
- * event_trigger_unlock_commit
- * trace_event_buffer_commit
- * trace_event_raw_event_sched_switch
+ * If regs is not set, then skip the necessary functions.
* Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or
- * two. They are that meaningful.
+ * two. They are not that meaningful.
*/
- ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip;
/*
- * Add two, for this function and the call to save_stack_trace()
+ * Add one, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way.
*/
+#ifndef CONFIG_UNWINDER_ORC
if (!regs)
- trace.skip += 2;
+ trace.skip++;
+#endif
/*
* Since events can happen in NMIs there's no safe way to
@@ -2682,17 +2689,6 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
if (unlikely(in_nmi()))
return;
- /*
- * It is possible that a function is being traced in a
- * location that RCU is not watching. A call to
- * rcu_irq_enter() will make sure that it is, but there's
- * a few internal rcu functions that could be traced
- * where that wont work either. In those cases, we just
- * do nothing.
- */
- if (unlikely(rcu_irq_enter_disabled()))
- return;
-
rcu_irq_enter_irqson();
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
rcu_irq_exit_irqson();
@@ -2711,11 +2707,10 @@ void trace_dump_stack(int skip)
local_save_flags(flags);
- /*
- * Skip 3 more, seems to get us at the caller of
- * this function.
- */
- skip += 3;
+#ifndef CONFIG_UNWINDER_ORC
+ /* Skip 1 to skip this function. */
+ skip++;
+#endif
__ftrace_trace_stack(global_trace.trace_buffer.buffer,
flags, skip, preempt_count(), NULL);
}
@@ -5621,7 +5616,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{
struct trace_array *tr = iter->tr;
@@ -5640,7 +5635,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
filp, poll_table);
}
-static unsigned int
+static __poll_t
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
@@ -6594,7 +6589,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
return ret;
}
-static unsigned int
+static __poll_t
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
{
struct ftrace_buffer_info *info = filp->private_data;
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 79f838a75077..22fee766081b 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -165,7 +165,7 @@ static int benchmark_event_kthread(void *arg)
* this thread will never voluntarily schedule which would
* block synchronize_rcu_tasks() indefinitely.
*/
- cond_resched_rcu_qs();
+ cond_resched();
}
return 0;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ec0f9aa4e151..1b87157edbff 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
{
struct trace_event_call *call, *p;
const char *last_system = NULL;
+ bool first = false;
int last_i;
int i;
@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
list_for_each_entry_safe(call, p, &ftrace_events, list) {
/* events are usually grouped together with systems */
if (!last_system || call->class->system != last_system) {
+ first = true;
last_i = 0;
last_system = call->class->system;
}
+ /*
+ * Since calls are grouped by systems, the likelyhood that the
+ * next call in the iteration belongs to the same system as the
+ * previous call is high. As an optimization, we skip seaching
+ * for a map[] that matches the call's system if the last call
+ * was from the same system. That's what last_i is for. If the
+ * call has the same system as the previous call, then last_i
+ * will be the index of the first map[] that has a matching
+ * system.
+ */
for (i = last_i; i < len; i++) {
if (call->class->system == map[i]->system) {
/* Save the first system if need be */
- if (!last_i)
+ if (first) {
last_i = i;
+ first = false;
+ }
update_event_printk(call, map[i]);
}
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index f2ac9d44f6c4..87411482a46f 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif /* CONFIG_TRACER_SNAPSHOT */
#ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_UNWINDER_ORC
+/* Skip 2:
+ * event_triggers_post_call()
+ * trace_event_raw_event_xxx()
+ */
+# define STACK_SKIP 2
+#else
/*
- * Skip 3:
+ * Skip 4:
* stacktrace_trigger()
* event_triggers_post_call()
+ * trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
-#define STACK_SKIP 3
+#define STACK_SKIP 4
+#endif
static void
stacktrace_trigger(struct event_trigger_data *data, void *rec)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 27f7ad12c4b1..b611cd36e22d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace();
}
+#ifdef CONFIG_UNWINDER_ORC
+/*
+ * Skip 2:
+ *
+ * function_stack_trace_call()
+ * ftrace_call()
+ */
+#define STACK_SKIP 2
+#else
+/*
+ * Skip 3:
+ * __trace_stack()
+ * function_stack_trace_call()
+ * ftrace_call()
+ */
+#define STACK_SKIP 3
+#endif
+
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc);
- /*
- * skip over 5 funcs:
- * __ftrace_trace_stack,
- * __trace_stack,
- * function_stack_trace_call
- * ftrace_list_func
- * ftrace_call
- */
- __trace_stack(tr, flags, 5, pc);
+ __trace_stack(tr, flags, STACK_SKIP, pc);
}
atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
tracer_tracing_off(tr);
}
+#ifdef CONFIG_UNWINDER_ORC
/*
- * Skip 4:
+ * Skip 3:
+ *
+ * function_trace_probe_call()
+ * ftrace_ops_assist_func()
+ * ftrace_call()
+ */
+#define FTRACE_STACK_SKIP 3
+#else
+/*
+ * Skip 5:
+ *
+ * __trace_stack()
* ftrace_stacktrace()
* function_trace_probe_call()
- * ftrace_ops_list_func()
+ * ftrace_ops_assist_func()
* ftrace_call()
*/
-#define STACK_SKIP 4
+#define FTRACE_STACK_SKIP 5
+#endif
static __always_inline void trace_stack(struct trace_array *tr)
{
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
local_save_flags(flags);
pc = preempt_count();
- __trace_stack(tr, flags, STACK_SKIP, pc);
+ __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
}
static void
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 492700c5fb4d..1fad24acd444 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/rculist.h>
+#include <linux/error-injection.h>
#include "trace_probe.h"
@@ -42,7 +43,6 @@ struct trace_kprobe {
(offsetof(struct trace_kprobe, tp.args) + \
(sizeof(struct probe_arg) * (n)))
-
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
return tk->rp.handler != NULL;
@@ -87,6 +87,30 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
return nhit;
}
+bool trace_kprobe_on_func_entry(struct trace_event_call *call)
+{
+ struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+
+ return kprobe_on_func_entry(tk->rp.kp.addr,
+ tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
+}
+
+bool trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+ struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+ unsigned long addr;
+
+ if (tk->symbol) {
+ addr = (unsigned long)
+ kallsyms_lookup_name(trace_kprobe_symbol(tk));
+ addr += tk->rp.kp.offset;
+ } else {
+ addr = (unsigned long)tk->rp.kp.addr;
+ }
+ return within_error_injection_list(addr);
+}
+
static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);
@@ -1170,7 +1194,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
#ifdef CONFIG_PERF_EVENTS
/* Kprobe profile handler */
-static void
+static int
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
struct trace_event_call *call = &tk->tp.call;
@@ -1179,12 +1203,31 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
int size, __size, dsize;
int rctx;
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
- return;
+ if (bpf_prog_array_valid(call)) {
+ unsigned long orig_ip = instruction_pointer(regs);
+ int ret;
+
+ ret = trace_call_bpf(call, regs);
+
+ /*
+ * We need to check and see if we modified the pc of the
+ * pt_regs, and if so clear the kprobe and return 1 so that we
+ * don't do the single stepping.
+ * The ftrace kprobe handler leaves it up to us to re-enable
+ * preemption here before returning if we've modified the ip.
+ */
+ if (orig_ip != instruction_pointer(regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ return 1;
+ }
+ if (!ret)
+ return 0;
+ }
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
- return;
+ return 0;
dsize = __get_data_size(&tk->tp, regs);
__size = sizeof(*entry) + tk->tp.size + dsize;
@@ -1193,13 +1236,14 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
entry = perf_trace_buf_alloc(size, NULL, &rctx);
if (!entry)
- return;
+ return 0;
entry->ip = (unsigned long)tk->rp.kp.addr;
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL);
+ return 0;
}
NOKPROBE_SYMBOL(kprobe_perf_func);
@@ -1275,6 +1319,7 @@ static int kprobe_register(struct trace_event_call *event,
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
+ int ret = 0;
raw_cpu_inc(*tk->nhit);
@@ -1282,9 +1327,9 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
kprobe_trace_func(tk, regs);
#ifdef CONFIG_PERF_EVENTS
if (tk->tp.flags & TP_FLAG_PROFILE)
- kprobe_perf_func(tk, regs);
+ ret = kprobe_perf_func(tk, regs);
#endif
- return 0; /* We don't tweek kernel, so just return 0 */
+ return ret;
}
NOKPROBE_SYMBOL(kprobe_dispatcher);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index fb66e3eaa192..e101c5bb9eda 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -252,6 +252,8 @@ struct symbol_cache;
unsigned long update_symbol_cache(struct symbol_cache *sc);
void free_symbol_cache(struct symbol_cache *sc);
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
+bool trace_kprobe_on_func_entry(struct trace_event_call *call);
+bool trace_kprobe_error_injectable(struct trace_event_call *call);
#else
/* uprobes do not support symbol fetch methods */
#define fetch_symbol_u8 NULL
@@ -277,6 +279,16 @@ alloc_symbol_cache(const char *sym, long offset)
{
return NULL;
}
+
+static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
+{
+ return false;
+}
+
+static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+ return false;
+}
#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 685c50ae6300..671b13457387 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -212,11 +212,10 @@ static int tracepoint_add_func(struct tracepoint *tp,
}
/*
- * rcu_assign_pointer has a smp_wmb() which makes sure that the new
- * probe callbacks array is consistent before setting a pointer to it.
- * This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
- * is used.
+ * rcu_assign_pointer has as smp_store_release() which makes sure
+ * that the new probe callbacks array is consistent before setting
+ * a pointer to it. This array is referenced by __DO_TRACE from
+ * include/linux/tracepoint.h using rcu_dereference_sched().
*/
rcu_assign_pointer(tp->funcs, tp_funcs);
if (!static_key_enabled(&tp->key))
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 43d18cb46308..017044c26233 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,6 +48,7 @@
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
+#include <linux/nmi.h>
#include "workqueue_internal.h"
@@ -2135,7 +2136,7 @@ __acquires(&pool->lock)
* stop_machine. At the same time, report a quiescent RCU state so
* the same condition doesn't freeze RCU.
*/
- cond_resched_rcu_qs();
+ cond_resched();
spin_lock_irq(&pool->lock);
@@ -3806,6 +3807,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
return ret;
}
+EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
/**
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
@@ -3939,6 +3941,37 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
return clamp_val(max_active, 1, lim);
}
+/*
+ * Workqueues which may be used during memory reclaim should have a rescuer
+ * to guarantee forward progress.
+ */
+static int init_rescuer(struct workqueue_struct *wq)
+{
+ struct worker *rescuer;
+ int ret;
+
+ if (!(wq->flags & WQ_MEM_RECLAIM))
+ return 0;
+
+ rescuer = alloc_worker(NUMA_NO_NODE);
+ if (!rescuer)
+ return -ENOMEM;
+
+ rescuer->rescue_wq = wq;
+ rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
+ ret = PTR_ERR_OR_ZERO(rescuer->task);
+ if (ret) {
+ kfree(rescuer);
+ return ret;
+ }
+
+ wq->rescuer = rescuer;
+ kthread_bind_mask(rescuer->task, cpu_possible_mask);
+ wake_up_process(rescuer->task);
+
+ return 0;
+}
+
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
@@ -4001,29 +4034,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (alloc_and_link_pwqs(wq) < 0)
goto err_free_wq;
- /*
- * Workqueues which may be used during memory reclaim should
- * have a rescuer to guarantee forward progress.
- */
- if (flags & WQ_MEM_RECLAIM) {
- struct worker *rescuer;
-
- rescuer = alloc_worker(NUMA_NO_NODE);
- if (!rescuer)
- goto err_destroy;
-
- rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
- wq->name);
- if (IS_ERR(rescuer->task)) {
- kfree(rescuer);
- goto err_destroy;
- }
-
- wq->rescuer = rescuer;
- kthread_bind_mask(rescuer->task, cpu_possible_mask);
- wake_up_process(rescuer->task);
- }
+ if (wq_online && init_rescuer(wq) < 0)
+ goto err_destroy;
if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
goto err_destroy;
@@ -4463,6 +4475,12 @@ void show_workqueue_state(void)
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
show_pwq(pwq);
spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+ * hard lockup.
+ */
+ touch_nmi_watchdog();
}
}
@@ -4490,6 +4508,12 @@ void show_workqueue_state(void)
pr_cont("\n");
next_pool:
spin_unlock_irqrestore(&pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+ * hard lockup.
+ */
+ touch_nmi_watchdog();
}
rcu_read_unlock_sched();
@@ -5629,6 +5653,8 @@ int __init workqueue_init(void)
* archs such as power and arm64. As per-cpu pools created
* previously could be missing node hint and unbound pools NUMA
* affinity, fix them up.
+ *
+ * Also, while iterating workqueues, create rescuers if requested.
*/
wq_numa_init();
@@ -5640,8 +5666,12 @@ int __init workqueue_init(void)
}
}
- list_for_each_entry(wq, &workqueues, list)
+ list_for_each_entry(wq, &workqueues, list) {
wq_update_unbound_numa(wq, smp_processor_id(), true);
+ WARN(init_rescuer(wq),
+ "workqueue: failed to create early rescuer for %s",
+ wq->name);
+ }
mutex_unlock(&wq_pool_mutex);
diff --git a/lib/Kconfig b/lib/Kconfig
index c5e84fbcb30b..e96089499371 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -409,7 +409,11 @@ config HAS_DMA
depends on !NO_DMA
default y
-config DMA_NOOP_OPS
+config SGL_ALLOC
+ bool
+ default n
+
+config DMA_DIRECT_OPS
bool
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9d5b78aad4c5..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@ config FAULT_INJECTION
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
+config FUNCTION_ERROR_INJECTION
+ def_bool y
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@ config FAIL_FUTEX
help
Provide fault-injection capability for futexes.
+config FAIL_FUNCTION
+ bool "Fault-injection capability for functions"
+ depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+ help
+ Provide function-based fault-injection capability.
+ This will allow you to override a specific function with a return
+ with given return value. As a result, function caller will see
+ an error value and have to handle it. This is useful to test the
+ error handling in various subsystems.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1952,7 +1966,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if TILE || PPC
+ default y if TILE || PPC || X86 || ARM64
---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
diff --git a/lib/Makefile b/lib/Makefile
index d11c48ec8ffd..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -28,7 +28,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
+lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o errseq.o
+ once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -149,6 +149,7 @@ obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index b77d51da8c73..c6659cb37033 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -38,12 +38,10 @@ begin_node:
if (assoc_array_ptr_is_shortcut(cursor)) {
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
}
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
slot = 0;
/* We perform two passes of each node.
@@ -55,15 +53,12 @@ begin_node:
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
- /* We need a barrier between the read of the pointer
- * and dereferencing the pointer - but only if we are
- * actually going to dereference it.
+ /* We need a barrier between the read of the pointer,
+ * which is supplied by the above READ_ONCE().
*/
- smp_read_barrier_depends();
-
/* Invoke the callback */
ret = iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data);
@@ -86,10 +81,8 @@ begin_node:
continue_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
@@ -98,16 +91,15 @@ continue_node:
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = READ_ONCE(node->back_pointer);
+ parent = READ_ONCE(node->back_pointer); /* Address dependency. */
slot = node->parent_slot;
if (parent == stop)
return 0;
if (assoc_array_ptr_is_shortcut(parent)) {
shortcut = assoc_array_ptr_to_shortcut(parent);
- smp_read_barrier_depends();
cursor = parent;
- parent = READ_ONCE(shortcut->back_pointer);
+ parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
@@ -147,7 +139,7 @@ int assoc_array_iterate(const struct assoc_array *array,
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = READ_ONCE(array->root);
+ struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
if (!root)
return 0;
@@ -194,7 +186,7 @@ assoc_array_walk(const struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- cursor = READ_ONCE(array->root);
+ cursor = READ_ONCE(array->root); /* Address dependency. */
if (!cursor)
return assoc_array_walk_tree_empty;
@@ -216,11 +208,9 @@ jumped:
consider_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
@@ -254,7 +244,6 @@ consider_node:
cursor = ptr;
follow_shortcut:
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
pr_devel("shortcut to %d\n", shortcut->skip_to_level);
sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
BUG_ON(sc_level > shortcut->skip_to_level);
@@ -294,7 +283,7 @@ follow_shortcut:
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
@@ -331,20 +320,18 @@ void *assoc_array_find(const struct assoc_array *array,
return NULL;
node = result.terminal_node.node;
- smp_read_barrier_depends();
/* If the target key is available to us, it's has to be pointed to by
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
* actually going to dereference it.
*/
leaf = assoc_array_ptr_to_leaf(ptr);
- smp_read_barrier_depends();
if (ops->compare_object(leaf, index_key))
return (void *)leaf;
}
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000000..266a97c5708b
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+{
+ spinlock_t *tlocks = NULL;
+ unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+ if (cpu_mult) {
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+ size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+ } else {
+ size = max_size;
+ }
+
+ if (sizeof(spinlock_t) != 0) {
+ if (gfpflags_allow_blocking(gfp))
+ tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
+ else
+ tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ if (!tlocks)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ spin_lock_init(&tlocks[i]);
+ }
+
+ *locks = tlocks;
+ *locks_mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+ kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/chacha20.c b/lib/chacha20.c
index 250ceed9ec9a..c1cc50fb68c9 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,12 +16,7 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-static inline u32 rotl32(u32 v, u8 n)
-{
- return (v << n) | (v >> (sizeof(v) * 8 - n));
-}
-
-extern void chacha20_block(u32 *state, void *stream)
+void chacha20_block(u32 *state, u32 *stream)
{
u32 x[16], *out = stream;
int i;
@@ -30,45 +25,45 @@ extern void chacha20_block(u32 *state, void *stream)
x[i] = state[i];
for (i = 0; i < 20; i += 2) {
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
for (i = 0; i < ARRAY_SIZE(x); i++)
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 7f6dd68d2d09..d873b34039ff 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -51,8 +51,49 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
+/*
+ * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
+ * Reflected bits order, does not augment final value.
+ */
+u16 const crc_ccitt_false_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+ 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+ 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+ 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+ 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+ 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+ 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+ 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+ 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+ 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+ 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+ 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+ 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+ 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+ 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+ 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+ 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+ 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+ 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+ 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+ 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+ 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+ 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+EXPORT_SYMBOL(crc_ccitt_false_table);
+
/**
- * crc_ccitt - recompute the CRC for the data buffer
+ * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
+ * buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
@@ -65,5 +106,20 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
+/**
+ * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
+ * for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ */
+u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc_ccitt_false_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc_ccitt_false);
+
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
new file mode 100644
index 000000000000..40b1f92f2214
--- /dev/null
+++ b/lib/dma-direct.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map physical memory directly without using an IOMMU or
+ * flushing caches.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
+#include <linux/pfn.h>
+
+#define DIRECT_MAPPING_ERROR 0
+
+/*
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
+ * some use it for entirely different regions:
+ */
+#ifndef ARCH_ZONE_DMA_BITS
+#define ARCH_ZONE_DMA_BITS 24
+#endif
+
+static bool
+check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
+ const char *caller)
+{
+ if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+ if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+ dev_err(dev,
+ "%s: overflow %pad+%zu of device mask %llx\n",
+ caller, &dma_addr, size, *dev->dma_mask);
+ }
+ return false;
+ }
+ return true;
+}
+
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+{
+ return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
+}
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ int page_order = get_order(size);
+ struct page *page = NULL;
+
+ /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ gfp |= GFP_DMA;
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+
+again:
+ /* CMA can be used only in the context which permits sleeping */
+ if (gfpflags_allow_blocking(gfp)) {
+ page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ __free_pages(page, page_order);
+ page = NULL;
+
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+ !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+ }
+
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ memset(page_address(page), 0, size);
+ return page_address(page);
+}
+
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+
+ if (!check_addr(dev, dma_addr, size, __func__))
+ return DIRECT_MAPPING_ERROR;
+ return dma_addr;
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
+ if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ return 0;
+#else
+ /*
+ * Because 32-bit DMA masks are so common we expect every architecture
+ * to be able to satisfy them - either by not supporting more physical
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
+ * architecture needs to use an IOMMU instead of the direct mapping.
+ */
+ if (mask < DMA_BIT_MASK(32))
+ return 0;
+#endif
+ return 1;
+}
+
+static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == DIRECT_MAPPING_ERROR;
+}
+
+const struct dma_map_ops dma_direct_ops = {
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+ .map_page = dma_direct_map_page,
+ .map_sg = dma_direct_map_sg,
+ .dma_supported = dma_direct_supported,
+ .mapping_error = dma_direct_mapping_error,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
deleted file mode 100644
index a10185b0c2d4..000000000000
--- a/lib/dma-noop.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * lib/dma-noop.c
- *
- * DMA operations that map to physical addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/pfn.h>
-
-static void *dma_noop_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
-
- return ret;
-}
-
-static void dma_noop_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
-}
-
-static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
- void *va;
-
- BUG_ON(!sg_page(sg));
- va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_noop_ops = {
- .alloc = dma_noop_alloc,
- .free = dma_noop_free,
- .map_page = dma_noop_map_page,
- .map_sg = dma_noop_map_sg,
-};
-
-EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000000..c0d4600f4896
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+ struct list_head list;
+ unsigned long start_addr;
+ unsigned long end_addr;
+ int etype;
+ void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+ struct ei_entry *ent;
+ bool ret = false;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ei_mutex);
+ return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+ struct ei_entry *ent;
+
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr)
+ return ent->etype;
+ }
+ return EI_ETYPE_NONE;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+ struct error_injection_entry *end,
+ void *priv)
+{
+ struct error_injection_entry *iter;
+ struct ei_entry *ent;
+ unsigned long entry, offset = 0, size = 0;
+
+ mutex_lock(&ei_mutex);
+ for (iter = start; iter < end; iter++) {
+ entry = arch_deref_entry_point((void *)iter->addr);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find error inject entry at %p\n",
+ (void *)entry);
+ continue;
+ }
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ break;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
+ ent->etype = iter->etype;
+ ent->priv = priv;
+ INIT_LIST_HEAD(&ent->list);
+ list_add_tail(&ent->list, &error_injection_list);
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+ populate_error_injection_list(__start_error_injection_whitelist,
+ __stop_error_injection_whitelist,
+ NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+ if (!mod->num_ei_funcs)
+ return;
+
+ populate_error_injection_list(mod->ei_funcs,
+ mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+ struct ei_entry *ent, *n;
+
+ if (!mod->num_ei_funcs)
+ return;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+ if (ent->priv == mod) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ if (val == MODULE_STATE_COMING)
+ module_load_ei_list(mod);
+ else if (val == MODULE_STATE_GOING)
+ module_unload_ei_list(mod);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+ .notifier_call = ei_module_callback,
+ .priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+ return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init() (0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&ei_mutex);
+ return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+ switch (etype) {
+ case EI_ETYPE_NULL:
+ return "NULL";
+ case EI_ETYPE_ERRNO:
+ return "ERRNO";
+ case EI_ETYPE_ERRNO_NULL:
+ return "ERRNO_NULL";
+ default:
+ return "(unknown)";
+ }
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+ struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+ seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
+ error_type_string(ent->etype));
+ return 0;
+}
+
+static const struct seq_operations ei_seq_ops = {
+ .start = ei_seq_start,
+ .next = ei_seq_next,
+ .stop = ei_seq_stop,
+ .show = ei_seq_show,
+};
+
+static int ei_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &ei_seq_ops);
+}
+
+static const struct file_operations debugfs_ei_ops = {
+ .open = ei_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init ei_debugfs_init(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("error_injection", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init init_error_injection(void)
+{
+ populate_kernel_ei_list();
+
+ if (!module_ei_init())
+ ei_debugfs_init();
+
+ return 0;
+}
+late_initcall(init_error_injection);
diff --git a/lib/errseq.c b/lib/errseq.c
index 79cc66897db4..df782418b333 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -46,14 +46,14 @@
* @eseq: errseq_t field that should be set
* @err: error to set (must be between -1 and -MAX_ERRNO)
*
- * This function sets the error in *eseq, and increments the sequence counter
+ * This function sets the error in @eseq, and increments the sequence counter
* if the last sequence was sampled at some point in the past.
*
* Any error set will always overwrite an existing error.
*
- * We do return the latest value here, primarily for debugging purposes. The
- * return value should not be used as a previously sampled value in later calls
- * as it will not have the SEEN flag set.
+ * Return: The previous value, primarily for debugging purposes. The
+ * return value should not be used as a previously sampled value in later
+ * calls as it will not have the SEEN flag set.
*/
errseq_t errseq_set(errseq_t *eseq, int err)
{
@@ -108,11 +108,13 @@ errseq_t errseq_set(errseq_t *eseq, int err)
EXPORT_SYMBOL(errseq_set);
/**
- * errseq_sample - grab current errseq_t value
- * @eseq: pointer to errseq_t to be sampled
+ * errseq_sample() - Grab current errseq_t value.
+ * @eseq: Pointer to errseq_t to be sampled.
*
* This function allows callers to sample an errseq_t value, marking it as
* "seen" if required.
+ *
+ * Return: The current errseq value.
*/
errseq_t errseq_sample(errseq_t *eseq)
{
@@ -134,15 +136,15 @@ errseq_t errseq_sample(errseq_t *eseq)
EXPORT_SYMBOL(errseq_sample);
/**
- * errseq_check - has an error occurred since a particular sample point?
- * @eseq: pointer to errseq_t value to be checked
- * @since: previously-sampled errseq_t from which to check
+ * errseq_check() - Has an error occurred since a particular sample point?
+ * @eseq: Pointer to errseq_t value to be checked.
+ * @since: Previously-sampled errseq_t from which to check.
*
- * Grab the value that eseq points to, and see if it has changed "since"
- * the given value was sampled. The "since" value is not advanced, so there
+ * Grab the value that eseq points to, and see if it has changed @since
+ * the given value was sampled. The @since value is not advanced, so there
* is no need to mark the value as seen.
*
- * Returns the latest error set in the errseq_t or 0 if it hasn't changed.
+ * Return: The latest error set in the errseq_t or 0 if it hasn't changed.
*/
int errseq_check(errseq_t *eseq, errseq_t since)
{
@@ -155,11 +157,11 @@ int errseq_check(errseq_t *eseq, errseq_t since)
EXPORT_SYMBOL(errseq_check);
/**
- * errseq_check_and_advance - check an errseq_t and advance to current value
- * @eseq: pointer to value being checked and reported
- * @since: pointer to previously-sampled errseq_t to check against and advance
+ * errseq_check_and_advance() - Check an errseq_t and advance to current value.
+ * @eseq: Pointer to value being checked and reported.
+ * @since: Pointer to previously-sampled errseq_t to check against and advance.
*
- * Grab the eseq value, and see whether it matches the value that "since"
+ * Grab the eseq value, and see whether it matches the value that @since
* points to. If it does, then just return 0.
*
* If it doesn't, then the value has changed. Set the "seen" flag, and try to
@@ -170,6 +172,9 @@ EXPORT_SYMBOL(errseq_check);
* value. The caller must provide that if necessary. Because of this, callers
* may want to do a lockless errseq_check before taking the lock and calling
* this.
+ *
+ * Return: Negative errno if one has been stored, or 0 if no new error has
+ * occurred.
*/
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
{
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a18941..afd5a3fc6123 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.c - library routines for handling generic kernel objects
*
@@ -5,9 +6,6 @@
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
- * This file is released under the GPLv2.
- *
- *
* Please see the file Documentation/kobject.txt for critical information
* about using the kobject interface.
*/
@@ -1039,6 +1037,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
return ns;
}
+EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
{
@@ -1074,3 +1073,4 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
}
+EXPORT_SYMBOL_GPL(kobj_ns_drop);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2615074d3de5..9fe6ec8fda28 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kernel userspace event delivery
*
@@ -5,8 +6,6 @@
* Copyright (C) 2004 Novell, Inc. All rights reserved.
* Copyright (C) 2004 IBM, Inc. All rights reserved.
*
- * Licensed under the GNU GPL v2.
- *
* Authors:
* Robert Love <rml@novell.com>
* Kay Sievers <kay.sievers@vrfy.org>
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 57fd45ab7af1..08c60d10747f 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+/*
+ * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
+ * code below, so we special case MIPS64r6 until the compiler can do better.
+ */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ __asm__ ("dmulu %0,%1,%2" \
+ : "=d" ((UDItype)(w0)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+ __asm__ ("dmuhu %0,%1,%2" \
+ : "=d" ((UDItype)(w1)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+} while (0)
+#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#define umul_ppmm(w1, w0, u, v) \
do { \
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe03c6d52761..30e7dd88148b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following __PERCPU_REF_ATOMIC clearing.
+ * Restore per-cpu operation. smp_store_release() is paired
+ * with READ_ONCE() in __ref_is_percpu() and guarantees that the
+ * zeroing is visible to all percpu accesses which can see the
+ * following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
- gfp_t gfp)
-{
- unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
-
- nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
- size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
-
- /* Never allocate more than 0.5 locks per bucket */
- size = min_t(unsigned int, size, tbl->size >> 1);
-
- if (tbl->nest)
- size = min(size, 1U << tbl->nest);
-
- if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
- gfp);
- if (!tbl->locks)
- return -ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&tbl->locks[i]);
- }
- tbl->locks_mask = size - 1;
-
- return 0;
-}
-
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
if (tbl->nest)
nested_bucket_table_free(tbl);
- kvfree(tbl->locks);
+ free_bucket_spinlocks(tbl->locks);
kvfree(tbl);
}
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
gfp_t gfp)
{
struct bucket_table *tbl = NULL;
- size_t size;
+ size_t size, max_locks;
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->size = size;
- if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
+ max_locks = size >> 1;
+ if (tbl->nest)
+ max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+
+ if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
+ ht->p.locks_mul, gfp) < 0) {
bucket_table_free(tbl);
return NULL;
}
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
+ iter->end_of_table = 0;
spin_lock(&ht->lock);
iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
/**
- * rhashtable_walk_start - Start a hash table walk
+ * rhashtable_walk_start_check - Start a hash table walk
* @iter: Hash table iterator
*
* Start a hash table walk at the current iterator position. Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* Returns -EAGAIN if resize event occured. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
*/
-int rhashtable_walk_start(struct rhashtable_iter *iter)
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{
struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker.tbl) {
+ if (!iter->walker.tbl && !iter->end_of_table) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
/**
- * rhashtable_walk_next - Return the next object and advance the iterator
- * @iter: Hash table iterator
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
*
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
+ * @iter: Hash table iterator
*
- * Returns the next object or NULL when the end of the table is reached.
+ * Returns the found object or NULL when the end of the table is reached.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
+ * Returns -EAGAIN if resize event occurred.
*/
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker.tbl;
struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
struct rhash_head *p = iter->p;
bool rhlist = ht->rhlist;
- if (p) {
- if (!rhlist || !(list = rcu_dereference(list->next))) {
- p = rcu_dereference(p->next);
- list = container_of(p, struct rhlist_head, rhead);
- }
- goto next;
- }
+ if (!tbl)
+ return NULL;
for (; iter->slot < tbl->size; iter->slot++) {
int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
+ } else {
+ iter->end_of_table = true;
}
return NULL;
}
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
+
+ if (p) {
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
+ }
+
+ /* At the end of this slot, switch to next one and then find
+ * next entry from that point.
+ */
+ iter->skip = 0;
+ iter->slot++;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
/**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+
+ if (p)
+ return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+ /* No object found in current iter, find next one in the table. */
+
+ if (iter->skip) {
+ /* A nonzero skip value points to the next entry in the table
+ * beyond that last one that was found. Decrement skip so
+ * we find the current value. __rhashtable_walk_find_next
+ * will restore the original value of skip assuming that
+ * the table hasn't changed.
+ */
+ iter->skip--;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
* rhashtable_walk_stop - Finish a hash table walk
* @iter: Hash table iterator
*
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 80aa8d5463fa..42b5ca0acf93 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -462,7 +462,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
*/
atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
sbq_index_atomic_inc(&sbq->wake_index);
- wake_up(&ws->wait);
+ wake_up_nr(&ws->wait, wake_batch);
}
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c1c55f7daaa..53728d391d3a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -474,6 +474,133 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
+#ifdef CONFIG_SGL_ALLOC
+
+/**
+ * sgl_alloc_order - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist. Must be at least one
+ * @order: Second argument for alloc_pages()
+ * @chainable: Whether or not to allocate an extra element in the scatterlist
+ * for scatterlist chaining purposes
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist that have pages
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p)
+{
+ struct scatterlist *sgl, *sg;
+ struct page *page;
+ unsigned int nent, nalloc;
+ u32 elem_len;
+
+ nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
+ /* Check for integer overflow */
+ if (length > (nent << (PAGE_SHIFT + order)))
+ return NULL;
+ nalloc = nent;
+ if (chainable) {
+ /* Check for integer overflow */
+ if (nalloc + 1 < nalloc)
+ return NULL;
+ nalloc++;
+ }
+ sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
+ (gfp & ~GFP_DMA) | __GFP_ZERO);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, nalloc);
+ sg = sgl;
+ while (length) {
+ elem_len = min_t(u64, length, PAGE_SIZE << order);
+ page = alloc_pages(gfp, order);
+ if (!page) {
+ sgl_free(sgl);
+ return NULL;
+ }
+
+ sg_set_page(sg, page, elem_len, 0);
+ length -= elem_len;
+ sg = sg_next(sg);
+ }
+ WARN_ONCE(length, "length = %lld\n", length);
+ if (nent_p)
+ *nent_p = nent;
+ return sgl;
+}
+EXPORT_SYMBOL(sgl_alloc_order);
+
+/**
+ * sgl_alloc - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p)
+{
+ return sgl_alloc_order(length, 0, false, gfp, nent_p);
+}
+EXPORT_SYMBOL(sgl_alloc);
+
+/**
+ * sgl_free_n_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @nents: Maximum number of elements to free
+ * @order: Second argument for __free_pages()
+ *
+ * Notes:
+ * - If several scatterlists have been chained and each chain element is
+ * freed separately then it's essential to set nents correctly to avoid that a
+ * page would get freed twice.
+ * - All pages in a chained scatterlist can be freed at once by setting @nents
+ * to a high number.
+ */
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (!sg)
+ break;
+ page = sg_page(sg);
+ if (page)
+ __free_pages(page, order);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL(sgl_free_n_order);
+
+/**
+ * sgl_free_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @order: Second argument for __free_pages()
+ */
+void sgl_free_order(struct scatterlist *sgl, int order)
+{
+ sgl_free_n_order(sgl, INT_MAX, order);
+}
+EXPORT_SYMBOL(sgl_free_order);
+
+/**
+ * sgl_free - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ */
+void sgl_free(struct scatterlist *sgl)
+{
+ sgl_free_order(sgl, 0);
+}
+EXPORT_SYMBOL(sgl_free);
+
+#endif /* CONFIG_SGL_ALLOC */
+
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cea19aaf303c..c43ec2271469 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -18,7 +18,7 @@
*/
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -417,7 +417,7 @@ cleanup2:
return -ENOMEM;
}
-void __init swiotlb_free(void)
+void __init swiotlb_exit(void)
{
if (!io_tlb_orig_addr)
return;
@@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (printk_ratelimit())
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
@@ -605,7 +605,6 @@ found:
return tlb_addr;
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
@@ -675,7 +674,6 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
@@ -707,92 +705,107 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
BUG();
}
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
+
+static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ u64 mask = DMA_BIT_MASK(32);
+
+ if (dev && dev->coherent_dma_mask)
+ mask = dev->coherent_dma_mask;
+ return addr + size - 1 <= mask;
+}
+
+static void *
+swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned long attrs)
+{
+ phys_addr_t phys_addr;
+
+ if (swiotlb_force == SWIOTLB_NO_FORCE)
+ goto out_warn;
+
+ phys_addr = swiotlb_tbl_map_single(dev,
+ swiotlb_phys_to_dma(dev, io_tlb_start),
+ 0, size, DMA_FROM_DEVICE, 0);
+ if (phys_addr == SWIOTLB_MAP_ERROR)
+ goto out_warn;
+
+ *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
+ if (dma_coherent_ok(dev, *dma_handle, size))
+ goto out_unmap;
+
+ memset(phys_to_virt(phys_addr), 0, size);
+ return phys_to_virt(phys_addr);
+
+out_unmap:
+ dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
+ (unsigned long long)*dma_handle);
+
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+out_warn:
+ if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+ dev_warn(dev,
+ "swiotlb: coherent allocation failed, size=%zu\n",
+ size);
+ dump_stack();
+ }
+ return NULL;
+}
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- dma_addr_t dev_addr;
- void *ret;
int order = get_order(size);
- u64 dma_mask = DMA_BIT_MASK(32);
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
+ unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
+ void *ret;
ret = (void *)__get_free_pages(flags, order);
if (ret) {
- dev_addr = swiotlb_virt_to_bus(hwdev, ret);
- if (dev_addr + size - 1 > dma_mask) {
- /*
- * The allocated memory isn't reachable by the device.
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
+ *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
+ if (dma_coherent_ok(hwdev, *dma_handle, size)) {
+ memset(ret, 0, size);
+ return ret;
}
+ free_pages((unsigned long)ret, order);
}
- if (!ret) {
- /*
- * We are either out of memory or the device can't DMA to
- * GFP_DMA memory; fall back on map_single(), which
- * will grab memory from the lowest available address range.
- */
- phys_addr_t paddr = map_single(hwdev, 0, size,
- DMA_FROM_DEVICE, 0);
- if (paddr == SWIOTLB_MAP_ERROR)
- goto err_warn;
- ret = phys_to_virt(paddr);
- dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
-
- /* Confirm address can be DMA'd by device */
- if (dev_addr + size - 1 > dma_mask) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dma_mask,
- (unsigned long long)dev_addr);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * The DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr,
- size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- goto err_warn;
- }
- }
+ return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
+}
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
- *dma_handle = dev_addr;
- memset(ret, 0, size);
+static bool swiotlb_free_buffer(struct device *dev, size_t size,
+ dma_addr_t dma_addr)
+{
+ phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
- return ret;
+ WARN_ON_ONCE(irqs_disabled());
-err_warn:
- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
- dev_name(hwdev), size);
- dump_stack();
+ if (!is_swiotlb_buffer(phys_addr))
+ return false;
- return NULL;
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ return true;
}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- WARN_ON(irqs_disabled());
- if (!is_swiotlb_buffer(paddr))
+ if (!swiotlb_free_buffer(hwdev, size, dev_addr))
free_pages((unsigned long)vaddr, get_order(size));
- else
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -868,7 +881,6 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
-EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -909,7 +921,6 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
{
unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -947,7 +958,6 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -955,7 +965,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -1007,7 +1016,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
return nelems;
}
-EXPORT_SYMBOL(swiotlb_map_sg_attrs);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -1027,7 +1035,6 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
-EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -1055,7 +1062,6 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -1063,14 +1069,12 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
-EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
@@ -1083,4 +1087,49 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
-EXPORT_SYMBOL(swiotlb_dma_supported);
+
+#ifdef CONFIG_DMA_DIRECT_OPS
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *vaddr;
+
+ /* temporary workaround: */
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ /*
+ * Don't print a warning when the first allocation attempt fails.
+ * swiotlb_alloc_coherent() will print a warning when the DMA memory
+ * allocation ultimately failed.
+ */
+ gfp |= __GFP_NOWARN;
+
+ vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!vaddr)
+ vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
+ return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!swiotlb_free_buffer(dev, size, dma_addr))
+ dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc = swiotlb_alloc,
+ .free = swiotlb_free,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .dma_supported = swiotlb_dma_supported,
+};
+#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 9e9748089270..4cd9ea9b3449 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
{ { 4, 0 }, { 5, 10 } }
},
{
- "INT: DIV by zero",
+ /* This one doesn't go through verifier, but is just raw insn
+ * as opposed to cBPF tests from here. Thus div by 0 tests are
+ * done in test_verifier in BPF kselftests.
+ */
+ "INT: DIV by -1",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU32_REG(BPF_DIV, R0, R7),
BPF_EXIT_INSN(),
@@ -6109,6 +6113,110 @@ static struct bpf_test tests[] = {
{ { ETH_HLEN, 42 } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
},
+ /* Checking interpreter vs JIT wrt signed extended imms. */
+ {
+ "JNE signed compare, test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 4",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, -17104896),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 5",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xfefb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x7efb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 7",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, 2),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { { 0, 2 } },
+ },
};
static struct net_device dev;
@@ -6250,9 +6358,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
return NULL;
}
}
- /* We don't expect to fail. */
if (*err) {
- pr_cont("FAIL to attach err=%d len=%d\n",
+ pr_cont("FAIL to prog_create err=%d len=%d\n",
*err, fprog.len);
return NULL;
}
@@ -6276,6 +6383,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
* checks.
*/
fp = bpf_prog_select_runtime(fp, err);
+ if (*err) {
+ pr_cont("FAIL to select_runtime err=%d\n", *err);
+ return NULL;
+ }
break;
}
@@ -6461,8 +6572,8 @@ static __init int test_bpf(void)
pass_cnt++;
continue;
}
-
- return err;
+ err_cnt++;
+ continue;
}
pr_cont("jited:%u ", fp->jited);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 64a4c76cba2b..078a61480573 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -96,7 +96,7 @@ struct test_config {
struct device *device);
};
-struct test_config *test_fw_config;
+static struct test_config *test_fw_config;
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
@@ -359,7 +359,7 @@ static ssize_t config_name_show(struct device *dev,
{
return config_test_show_str(buf, test_fw_config->name);
}
-static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store);
+static DEVICE_ATTR_RW(config_name);
static ssize_t config_num_requests_store(struct device *dev,
struct device_attribute *attr,
@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
if (test_fw_config->reqs) {
pr_err("Must call release_all_firmware prior to changing config\n");
rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
goto out;
}
mutex_unlock(&test_fw_mutex);
@@ -388,8 +389,7 @@ static ssize_t config_num_requests_show(struct device *dev,
{
return test_dev_config_show_u8(buf, test_fw_config->num_requests);
}
-static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show,
- config_num_requests_store);
+static DEVICE_ATTR_RW(config_num_requests);
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
@@ -411,8 +411,7 @@ static ssize_t config_sync_direct_show(struct device *dev,
{
return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
}
-static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show,
- config_sync_direct_store);
+static DEVICE_ATTR_RW(config_sync_direct);
static ssize_t config_send_uevent_store(struct device *dev,
struct device_attribute *attr,
@@ -428,8 +427,7 @@ static ssize_t config_send_uevent_show(struct device *dev,
{
return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
}
-static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show,
- config_send_uevent_store);
+static DEVICE_ATTR_RW(config_send_uevent);
static ssize_t config_read_fw_idx_store(struct device *dev,
struct device_attribute *attr,
@@ -445,8 +443,7 @@ static ssize_t config_read_fw_idx_show(struct device *dev,
{
return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
}
-static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show,
- config_read_fw_idx_store);
+static DEVICE_ATTR_RW(config_read_fw_idx);
static ssize_t trigger_request_store(struct device *dev,
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 337f408b4de6..e372b97eee13 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -694,8 +694,7 @@ static ssize_t config_test_driver_show(struct device *dev,
return config_test_show_str(&test_dev->config_mutex, buf,
config->test_driver);
}
-static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show,
- config_test_driver_store);
+static DEVICE_ATTR_RW(config_test_driver);
static ssize_t config_test_fs_store(struct device *dev,
struct device_attribute *attr,
@@ -726,8 +725,7 @@ static ssize_t config_test_fs_show(struct device *dev,
return config_test_show_str(&test_dev->config_mutex, buf,
config->test_fs);
}
-static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show,
- config_test_fs_store);
+static DEVICE_ATTR_RW(config_test_fs);
static int trigger_config_run_type(struct kmod_test_device *test_dev,
enum kmod_test_case test_case,
@@ -1012,8 +1010,7 @@ static ssize_t config_num_threads_show(struct device *dev,
return test_dev_config_show_int(test_dev, buf, config->num_threads);
}
-static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show,
- config_num_threads_store);
+static DEVICE_ATTR_RW(config_num_threads);
static ssize_t config_test_case_store(struct device *dev,
struct device_attribute *attr,
@@ -1037,8 +1034,7 @@ static ssize_t config_test_case_show(struct device *dev,
return test_dev_config_show_uint(test_dev, buf, config->test_case);
}
-static DEVICE_ATTR(config_test_case, 0644, config_test_case_show,
- config_test_case_store);
+static DEVICE_ATTR_RW(config_test_case);
static ssize_t test_result_show(struct device *dev,
struct device_attribute *attr,
@@ -1049,7 +1045,7 @@ static ssize_t test_result_show(struct device *dev,
return test_dev_config_show_int(test_dev, buf, config->test_result);
}
-static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store);
+static DEVICE_ATTR_RW(test_result);
#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbdc049c..76d3667fdea2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
return;
}
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN) {
- pr_warn("Test failed: iterator failed: %d\n", err);
- return;
- }
+ rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
if (PTR_ERR(pos) == -EAGAIN) {
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 15e2e6fb060e..3744b2a8e591 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
#endif
#ifndef INLINE_COPY_TO_USER
-unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(VERIFY_WRITE, to, n))) {
diff --git a/lib/uuid.c b/lib/uuid.c
index 680b9fb9ba09..2290b9f001a9 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -29,15 +29,14 @@ EXPORT_SYMBOL(uuid_null);
const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
-/***************************************************************
+/**
+ * generate_random_uuid - generate a random UUID
+ * @uuid: where to put the generated UUID
+ *
* Random UUID interface
*
- * Used here for a Boot ID, but can be useful for other kernel
- * drivers.
- ***************************************************************/
-
-/*
- * Generate random UUID
+ * Used to create a Boot ID or a filesystem UUID/GUID, but can be
+ * useful for other kernel drivers.
*/
void generate_random_uuid(unsigned char uuid[16])
{
@@ -73,16 +72,17 @@ void uuid_gen(uuid_t *bu)
EXPORT_SYMBOL_GPL(uuid_gen);
/**
- * uuid_is_valid - checks if UUID string valid
- * @uuid: UUID string to check
- *
- * Description:
- * It checks if the UUID string is following the format:
- * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- * where x is a hex digit.
- *
- * Return: true if input is valid UUID string.
- */
+ * uuid_is_valid - checks if a UUID string is valid
+ * @uuid: UUID string to check
+ *
+ * Description:
+ * It checks if the UUID string is following the format:
+ * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ *
+ * where x is a hex digit.
+ *
+ * Return: true if input is valid UUID string.
+ */
bool uuid_is_valid(const char *uuid)
{
unsigned int i;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 01c3957b2de6..2b18135446dc 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1834,7 +1834,8 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
*
* - 'x' For printing the address. Equivalent to "%lx".
*
- * ** Please update also Documentation/printk-formats.txt when making changes **
+ * ** When making changes please also update:
+ * Documentation/core-api/printk-formats.rst
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -2194,7 +2195,7 @@ set_precision(struct printf_spec *spec, int prec)
* - ``%n`` is unsupported
* - ``%p*`` is handled by pointer()
*
- * See pointer() or Documentation/printk-formats.txt for more
+ * See pointer() or Documentation/core-api/printk-formats.rst for more
* extensive description.
*
* **Please update the documentation in both places when making changes**
diff --git a/mm/Kconfig b/mm/Kconfig
index 03ff7703d322..c782e8fb7235 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -639,15 +639,10 @@ config MAX_STACK_SIZE_MB
A sane initial value is 80 MB.
-# For architectures that support deferred memory initialisation
-config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- bool
-
config DEFERRED_STRUCT_PAGE_INIT
bool "Defer initialisation of struct pages to kthreads"
default n
- depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- depends on NO_BOOTMEM && MEMORY_HOTPLUG
+ depends on NO_BOOTMEM
depends on !FLATMEM
help
Ordinarily all struct pages are initialised during early boot in a
diff --git a/mm/compaction.c b/mm/compaction.c
index 10cd757f1006..2c8999d027ab 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1738,7 +1738,7 @@ int sysctl_extfrag_threshold = 500;
* @order: The order of the current allocation
* @alloc_flags: The allocation flags of the current allocation
* @ac: The context of current allocation
- * @mode: The migration mode for async, sync light, or sync migration
+ * @prio: Determines how hard direct compaction should try to succeed
*
* This is the main entry point for direct page compaction.
*/
diff --git a/mm/debug.c b/mm/debug.c
index d947f3e03b0d..56e2d9125ea5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
*/
int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
- pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
+ pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
page, page_ref_count(page), mapcount,
page->mapping, page_to_pgoff(page));
if (PageCompound(page))
@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
#ifdef CONFIG_MEMCG
if (page->mem_cgroup)
- pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
+ pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
#endif
}
@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
void dump_vma(const struct vm_area_struct *vma)
{
- pr_emerg("vma %p start %p end %p\n"
- "next %p prev %p mm %p\n"
- "prot %lx anon_vma %p vm_ops %p\n"
- "pgoff %lx file %p private_data %p\n"
+ pr_emerg("vma %px start %px end %px\n"
+ "next %px prev %px mm %px\n"
+ "prot %lx anon_vma %px vm_ops %px\n"
+ "pgoff %lx file %px private_data %px\n"
"flags: %#lx(%pGv)\n",
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
vma->vm_prev, vma->vm_mm,
@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+ pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
#ifdef CONFIG_MMU
- "get_unmapped_area %p\n"
+ "get_unmapped_area %px\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
- "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
+ "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
- "binfmt %p flags %lx core_state %p\n"
+ "binfmt %px flags %lx core_state %px\n"
#ifdef CONFIG_AIO
- "ioctx_table %p\n"
+ "ioctx_table %px\n"
#endif
#ifdef CONFIG_MEMCG
- "owner %p "
+ "owner %px "
#endif
- "exe_file %p\n"
+ "exe_file %px\n"
#ifdef CONFIG_MMU_NOTIFIER
- "mmu_notifier_mm %p\n"
+ "mmu_notifier_mm %px\n"
#endif
#ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
diff --git a/mm/fadvise.c b/mm/fadvise.c
index ec70d6e4b86d..767887f5f3bf 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -127,7 +127,15 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
*/
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
end_index = (endbyte >> PAGE_SHIFT);
- if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+ /*
+ * The page at end_index will be inclusively discarded according
+ * by invalidate_mapping_pages(), so subtracting 1 from
+ * end_index means we will skip the last page. But if endbyte
+ * is page aligned or is at the end of file, we should not skip
+ * that page - discarding the last page is safe enough.
+ */
+ if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
+ endbyte != inode->i_size - 1) {
/* First page is tricky as 0 - 1 = -1, but pgoff_t
* is unsigned, so the end_index >= start_index
* check below would be true and we'll discard the whole
diff --git a/mm/filemap.c b/mm/filemap.c
index ee83baaf855d..693f62212a59 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -31,7 +31,6 @@
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
-#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
diff --git a/mm/gup.c b/mm/gup.c
index e0d82b6706d7..9e17d8db2d6b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
- int *locked, bool notify_drop,
+ int *locked,
unsigned int flags)
{
long ret, pages_done;
@@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
pages++;
start += PAGE_SIZE;
}
- if (notify_drop && lock_dropped && *locked) {
+ if (lock_dropped && *locked) {
/*
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
@@ -959,36 +959,12 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, NULL, locked, true,
+ pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
/*
- * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for
- * tsk, mm to be specified.
- *
- * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
- * caller if required (just like with __get_user_pages). "FOLL_GET"
- * is set implicitly if "pages" is non-NULL.
- */
-static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- int locked = 1;
-
- down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
- &locked, false, gup_flags);
- if (locked)
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-/*
* get_user_pages_unlocked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
@@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags | FOLL_TOUCH);
+ struct mm_struct *mm = current->mm;
+ int locked = 1;
+ long ret;
+
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+ &locked, gup_flags | FOLL_TOUCH);
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *locked)
{
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
- locked, true,
+ locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, vmas, NULL, false,
+ pages, vmas, NULL,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/hmm.c b/mm/hmm.c
index ea19742a5d60..979211c7ccc8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -418,7 +418,7 @@ again:
}
if (!pte_present(pte)) {
- swp_entry_t entry;
+ swp_entry_t entry = pte_to_swp_entry(pte);
if (!non_swap_entry(entry)) {
if (hmm_vma_walk->fault)
@@ -426,8 +426,6 @@ again:
continue;
}
- entry = pte_to_swp_entry(pte);
-
/*
* This is a special swap entry, ignore migration, use
* device and report anything else as error.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0e7ded98d114..87ab9b8f56b5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1910,17 +1910,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* pmdp_invalidate() is required to make sure we don't miss
* dirty/young flags set by hardware.
*/
- entry = *pmd;
- pmdp_invalidate(vma, addr, pmd);
-
- /*
- * Recover dirty/young flags. It relies on pmdp_invalidate to not
- * corrupt them.
- */
- if (pmd_dirty(*pmd))
- entry = pmd_mkdirty(entry);
- if (pmd_young(*pmd))
- entry = pmd_mkyoung(entry);
+ entry = pmdp_invalidate(vma, addr, pmd);
entry = pmd_modify(entry, newprot);
if (preserve_write)
@@ -2073,8 +2063,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct mm_struct *mm = vma->vm_mm;
struct page *page;
pgtable_t pgtable;
- pmd_t _pmd;
- bool young, write, dirty, soft_dirty, pmd_migration = false;
+ pmd_t old_pmd, _pmd;
+ bool young, write, soft_dirty, pmd_migration = false;
unsigned long addr;
int i;
@@ -2116,24 +2106,50 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
+ /*
+ * Up to this point the pmd is present and huge and userland has the
+ * whole access to the hugepage during the split (which happens in
+ * place). If we overwrite the pmd with the not-huge version pointing
+ * to the pte here (which of course we could if all CPUs were bug
+ * free), userland could trigger a small page size TLB miss on the
+ * small sized TLB while the hugepage TLB entry is still established in
+ * the huge TLB. Some CPU doesn't like that.
+ * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
+ * 383 on page 93. Intel should be safe but is also warns that it's
+ * only safe if the permission and cache attributes of the two entries
+ * loaded in the two TLB is identical (which should be the case here).
+ * But it is generally safer to never allow small and huge TLB entries
+ * for the same virtual address to be loaded simultaneously. So instead
+ * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
+ * current pmd notpresent (atomically because here the pmd_trans_huge
+ * must remain set at all times on the pmd until the split is complete
+ * for this pmd), then we flush the SMP TLB and finally we write the
+ * non-huge version of the pmd entry with pmd_populate.
+ */
+ old_pmd = pmdp_invalidate(vma, haddr, pmd);
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
- pmd_migration = is_pmd_migration_entry(*pmd);
+ pmd_migration = is_pmd_migration_entry(old_pmd);
if (pmd_migration) {
swp_entry_t entry;
- entry = pmd_to_swp_entry(*pmd);
+ entry = pmd_to_swp_entry(old_pmd);
page = pfn_to_page(swp_offset(entry));
} else
#endif
- page = pmd_page(*pmd);
+ page = pmd_page(old_pmd);
VM_BUG_ON_PAGE(!page_count(page), page);
page_ref_add(page, HPAGE_PMD_NR - 1);
- write = pmd_write(*pmd);
- young = pmd_young(*pmd);
- dirty = pmd_dirty(*pmd);
- soft_dirty = pmd_soft_dirty(*pmd);
+ if (pmd_dirty(old_pmd))
+ SetPageDirty(page);
+ write = pmd_write(old_pmd);
+ young = pmd_young(old_pmd);
+ soft_dirty = pmd_soft_dirty(old_pmd);
- pmdp_huge_split_prepare(vma, haddr, pmd);
+ /*
+ * Withdraw the table only after we mark the pmd entry invalid.
+ * This's critical for some architectures (Power).
+ */
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
@@ -2160,8 +2176,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
}
- if (dirty)
- SetPageDirty(page + i);
pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, entry);
@@ -2189,28 +2203,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
smp_wmb(); /* make pte visible before pmd */
- /*
- * Up to this point the pmd is present and huge and userland has the
- * whole access to the hugepage during the split (which happens in
- * place). If we overwrite the pmd with the not-huge version pointing
- * to the pte here (which of course we could if all CPUs were bug
- * free), userland could trigger a small page size TLB miss on the
- * small sized TLB while the hugepage TLB entry is still established in
- * the huge TLB. Some CPU doesn't like that.
- * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
- * 383 on page 93. Intel should be safe but is also warns that it's
- * only safe if the permission and cache attributes of the two entries
- * loaded in the two TLB is identical (which should be the case here).
- * But it is generally safer to never allow small and huge TLB entries
- * for the same virtual address to be loaded simultaneously. So instead
- * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
- * current pmd notpresent (atomically because here the pmd_trans_huge
- * and pmd_trans_splitting must remain set at all times on the pmd
- * until the split is complete for this pmd), then we flush the SMP TLB
- * and finally we write the non-huge version of the pmd entry with
- * pmd_populate.
- */
- pmdp_invalidate(vma, haddr, pmd);
pmd_populate(mm, pmd, pgtable);
if (freeze) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9a334f5fb730..7c204e3d132b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -34,10 +34,9 @@
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
#include <linux/userfaultfd_k.h>
+#include <linux/page_owner.h>
#include "internal.h"
-int hugepages_treat_as_movable;
-
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
@@ -926,7 +925,7 @@ retry_cpuset:
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepages_treat_as_movable || hugepage_migration_supported(h))
+ if (hugepage_migration_supported(h))
return GFP_HIGHUSER_MOVABLE;
else
return GFP_HIGHUSER;
@@ -1108,7 +1107,8 @@ static bool zone_spans_last_pfn(const struct zone *zone,
return zone_spans_pfn(zone, last_pfn);
}
-static struct page *alloc_gigantic_page(int nid, struct hstate *h)
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
{
unsigned int order = huge_page_order(h);
unsigned long nr_pages = 1 << order;
@@ -1116,11 +1116,9 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h)
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
- gfp_t gfp_mask;
- gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
zonelist = node_zonelist(nid, gfp_mask);
- for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
spin_lock_irqsave(&zone->lock, flags);
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
@@ -1151,41 +1149,13 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h)
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
-static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
-{
- struct page *page;
-
- page = alloc_gigantic_page(nid, h);
- if (page) {
- prep_compound_gigantic_page(page, huge_page_order(h));
- prep_new_huge_page(h, page, nid);
- }
-
- return page;
-}
-
-static int alloc_fresh_gigantic_page(struct hstate *h,
- nodemask_t *nodes_allowed)
-{
- struct page *page = NULL;
- int nr_nodes, node;
-
- for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
- page = alloc_fresh_gigantic_page_node(h, node);
- if (page)
- return 1;
- }
-
- return 0;
-}
-
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static inline bool gigantic_page_supported(void) { return false; }
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask) { return NULL; }
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { }
-static inline int alloc_fresh_gigantic_page(struct hstate *h,
- nodemask_t *nodes_allowed) { return 0; }
#endif
static void update_and_free_page(struct hstate *h, struct page *page)
@@ -1250,6 +1220,28 @@ static void clear_page_huge_active(struct page *page)
ClearPagePrivate(&page[1]);
}
+/*
+ * Internal hugetlb specific page flag. Do not use outside of the hugetlb
+ * code
+ */
+static inline bool PageHugeTemporary(struct page *page)
+{
+ if (!PageHuge(page))
+ return false;
+
+ return (unsigned long)page[2].mapping == -1U;
+}
+
+static inline void SetPageHugeTemporary(struct page *page)
+{
+ page[2].mapping = (void *)-1U;
+}
+
+static inline void ClearPageHugeTemporary(struct page *page)
+{
+ page[2].mapping = NULL;
+}
+
void free_huge_page(struct page *page)
{
/*
@@ -1284,7 +1276,11 @@ void free_huge_page(struct page *page)
if (restore_reserve)
h->resv_huge_pages++;
- if (h->surplus_huge_pages_node[nid]) {
+ if (PageHugeTemporary(page)) {
+ list_del(&page->lru);
+ ClearPageHugeTemporary(page);
+ update_and_free_page(h, page);
+ } else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */
list_del(&page->lru);
update_and_free_page(h, page);
@@ -1306,7 +1302,6 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
- put_page(page); /* free it into the hugepage allocator */
}
static void prep_compound_gigantic_page(struct page *page, unsigned int order)
@@ -1383,41 +1378,70 @@ pgoff_t __basepage_index(struct page *page)
return (index << compound_order(page_head)) + compound_idx;
}
-static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
+static struct page *alloc_buddy_huge_page(struct hstate *h,
+ gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
+ int order = huge_page_order(h);
struct page *page;
- page = __alloc_pages_node(nid,
- htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
- __GFP_RETRY_MAYFAIL|__GFP_NOWARN,
- huge_page_order(h));
- if (page) {
- prep_new_huge_page(h, page, nid);
- }
+ gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+ page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+ if (page)
+ __count_vm_event(HTLB_BUDDY_PGALLOC);
+ else
+ __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
+ return page;
+}
+
+/*
+ * Common helper to allocate a fresh hugetlb page. All specific allocators
+ * should use this function to get new hugetlb pages
+ */
+static struct page *alloc_fresh_huge_page(struct hstate *h,
+ gfp_t gfp_mask, int nid, nodemask_t *nmask)
+{
+ struct page *page;
+
+ if (hstate_is_gigantic(h))
+ page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
+ else
+ page = alloc_buddy_huge_page(h, gfp_mask,
+ nid, nmask);
+ if (!page)
+ return NULL;
+
+ if (hstate_is_gigantic(h))
+ prep_compound_gigantic_page(page, huge_page_order(h));
+ prep_new_huge_page(h, page, page_to_nid(page));
return page;
}
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+/*
+ * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
+ * manner.
+ */
+static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int nr_nodes, node;
- int ret = 0;
+ gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
- page = alloc_fresh_huge_page_node(h, node);
- if (page) {
- ret = 1;
+ page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
+ if (page)
break;
- }
}
- if (ret)
- count_vm_event(HTLB_BUDDY_PGALLOC);
- else
- count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+ if (!page)
+ return 0;
- return ret;
+ put_page(page); /* free it into the hugepage allocator */
+
+ return 1;
}
/*
@@ -1525,79 +1549,66 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
return rc;
}
-static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask)
-{
- int order = huge_page_order(h);
-
- gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
- if (nid == NUMA_NO_NODE)
- nid = numa_mem_id();
- return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
-}
-
-static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
+/*
+ * Allocates a fresh surplus page from the page allocator.
+ */
+static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
- struct page *page;
- unsigned int r_nid;
+ struct page *page = NULL;
if (hstate_is_gigantic(h))
return NULL;
+ spin_lock(&hugetlb_lock);
+ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
+ goto out_unlock;
+ spin_unlock(&hugetlb_lock);
+
+ page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+ if (!page)
+ return NULL;
+
+ spin_lock(&hugetlb_lock);
/*
- * Assume we will successfully allocate the surplus page to
- * prevent racing processes from causing the surplus to exceed
- * overcommit
- *
- * This however introduces a different race, where a process B
- * tries to grow the static hugepage pool while alloc_pages() is
- * called by process A. B will only examine the per-node
- * counters in determining if surplus huge pages can be
- * converted to normal huge pages in adjust_pool_surplus(). A
- * won't be able to increment the per-node counter, until the
- * lock is dropped by B, but B doesn't drop hugetlb_lock until
- * no more huge pages can be converted from surplus to normal
- * state (and doesn't try to convert again). Thus, we have a
- * case where a surplus huge page exists, the pool is grown, and
- * the surplus huge page still exists after, even though it
- * should just have been converted to a normal huge page. This
- * does not leak memory, though, as the hugepage will be freed
- * once it is out of use. It also does not allow the counters to
- * go out of whack in adjust_pool_surplus() as we don't modify
- * the node values until we've gotten the hugepage and only the
- * per-node value is checked there.
+ * We could have raced with the pool size change.
+ * Double check that and simply deallocate the new page
+ * if we would end up overcommiting the surpluses. Abuse
+ * temporary page to workaround the nasty free_huge_page
+ * codeflow
*/
- spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
- spin_unlock(&hugetlb_lock);
- return NULL;
+ SetPageHugeTemporary(page);
+ put_page(page);
+ page = NULL;
} else {
- h->nr_huge_pages++;
h->surplus_huge_pages++;
+ h->nr_huge_pages_node[page_to_nid(page)]++;
}
+
+out_unlock:
spin_unlock(&hugetlb_lock);
- page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
+ return page;
+}
- spin_lock(&hugetlb_lock);
- if (page) {
- INIT_LIST_HEAD(&page->lru);
- r_nid = page_to_nid(page);
- set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
- set_hugetlb_cgroup(page, NULL);
- /*
- * We incremented the global counters already
- */
- h->nr_huge_pages_node[r_nid]++;
- h->surplus_huge_pages_node[r_nid]++;
- __count_vm_event(HTLB_BUDDY_PGALLOC);
- } else {
- h->nr_huge_pages--;
- h->surplus_huge_pages--;
- __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
- }
- spin_unlock(&hugetlb_lock);
+static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nmask)
+{
+ struct page *page;
+
+ if (hstate_is_gigantic(h))
+ return NULL;
+
+ page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+ if (!page)
+ return NULL;
+
+ /*
+ * We do not account these pages as surplus because they are only
+ * temporary and will be released properly on the last reference
+ */
+ SetPageHugeTemporary(page);
return page;
}
@@ -1606,7 +1617,7 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
* Use the VMA's mpolicy to allocate a huge page from the buddy.
*/
static
-struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
+struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
@@ -1616,17 +1627,13 @@ struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
nodemask_t *nodemask;
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
- page = __alloc_buddy_huge_page(h, gfp_mask, nid, nodemask);
+ page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
return page;
}
-/*
- * This allocation function is useful in the context where vma is irrelevant.
- * E.g. soft-offlining uses this function because it only cares physical
- * address of error page.
- */
+/* page migration callback function */
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
gfp_t gfp_mask = htlb_alloc_mask(h);
@@ -1641,12 +1648,12 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
spin_unlock(&hugetlb_lock);
if (!page)
- page = __alloc_buddy_huge_page(h, gfp_mask, nid, NULL);
+ page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
return page;
}
-
+/* page migration callback function */
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask)
{
@@ -1664,9 +1671,25 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
}
spin_unlock(&hugetlb_lock);
- /* No reservations, try to overcommit */
+ return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
+}
+
+/* mempolicy aware migration callback */
+struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address)
+{
+ struct mempolicy *mpol;
+ nodemask_t *nodemask;
+ struct page *page;
+ gfp_t gfp_mask;
+ int node;
+
+ gfp_mask = htlb_alloc_mask(h);
+ node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ page = alloc_huge_page_nodemask(h, node, nodemask);
+ mpol_cond_put(mpol);
- return __alloc_buddy_huge_page(h, gfp_mask, preferred_nid, nmask);
+ return page;
}
/*
@@ -1694,7 +1717,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
- page = __alloc_buddy_huge_page(h, htlb_alloc_mask(h),
+ page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
NUMA_NO_NODE, NULL);
if (!page) {
alloc_ok = false;
@@ -2031,7 +2054,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
if (!page) {
spin_unlock(&hugetlb_lock);
- page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
+ page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
@@ -2074,20 +2097,6 @@ out_subpool_put:
return ERR_PTR(-ENOSPC);
}
-/*
- * alloc_huge_page()'s wrapper which simply returns the page if allocation
- * succeeds, otherwise NULL. This function is called from new_vma_page(),
- * where no ERR_VALUE is expected to be returned.
- */
-struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve)
-{
- struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
- if (IS_ERR(page))
- page = NULL;
- return page;
-}
-
int alloc_bootmem_huge_page(struct hstate *h)
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h)
@@ -2150,6 +2159,8 @@ static void __init gather_bootmem_prealloc(void)
prep_compound_huge_page(page, h->order);
WARN_ON(PageReserved(page));
prep_new_huge_page(h, page, page_to_nid(page));
+ put_page(page); /* free it into the hugepage allocator */
+
/*
* If we had gigantic hugepages allocated at boot time, we need
* to restore the 'stolen' pages to totalram_pages in order to
@@ -2169,7 +2180,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
if (hstate_is_gigantic(h)) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h,
+ } else if (!alloc_pool_huge_page(h,
&node_states[N_MEMORY]))
break;
cond_resched();
@@ -2289,7 +2300,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
- * We might race with __alloc_buddy_huge_page() here and be unable
+ * We might race with alloc_surplus_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
@@ -2312,10 +2323,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
/* yield cpu to avoid soft lockup */
cond_resched();
- if (hstate_is_gigantic(h))
- ret = alloc_fresh_gigantic_page(h, nodes_allowed);
- else
- ret = alloc_fresh_huge_page(h, nodes_allowed);
+ ret = alloc_pool_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
@@ -2335,7 +2343,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
- * __alloc_buddy_huge_page() is checking the global counter,
+ * alloc_surplus_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
@@ -2975,20 +2983,32 @@ out:
void hugetlb_report_meminfo(struct seq_file *m)
{
- struct hstate *h = &default_hstate;
+ struct hstate *h;
+ unsigned long total = 0;
+
if (!hugepages_supported())
return;
- seq_printf(m,
- "HugePages_Total: %5lu\n"
- "HugePages_Free: %5lu\n"
- "HugePages_Rsvd: %5lu\n"
- "HugePages_Surp: %5lu\n"
- "Hugepagesize: %8lu kB\n",
- h->nr_huge_pages,
- h->free_huge_pages,
- h->resv_huge_pages,
- h->surplus_huge_pages,
- 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+
+ for_each_hstate(h) {
+ unsigned long count = h->nr_huge_pages;
+
+ total += (PAGE_SIZE << huge_page_order(h)) * count;
+
+ if (h == &default_hstate)
+ seq_printf(m,
+ "HugePages_Total: %5lu\n"
+ "HugePages_Free: %5lu\n"
+ "HugePages_Rsvd: %5lu\n"
+ "HugePages_Surp: %5lu\n"
+ "Hugepagesize: %8lu kB\n",
+ count,
+ h->free_huge_pages,
+ h->resv_huge_pages,
+ h->surplus_huge_pages,
+ (PAGE_SIZE << huge_page_order(h)) / 1024);
+ }
+
+ seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
}
int hugetlb_report_node_meminfo(int nid, char *buf)
@@ -4799,3 +4819,36 @@ void putback_active_hugepage(struct page *page)
spin_unlock(&hugetlb_lock);
put_page(page);
}
+
+void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
+{
+ struct hstate *h = page_hstate(oldpage);
+
+ hugetlb_cgroup_migrate(oldpage, newpage);
+ set_page_owner_migrate_reason(newpage, reason);
+
+ /*
+ * transfer temporary state of the new huge page. This is
+ * reverse to other transitions because the newpage is going to
+ * be final while the old one will be freed so it takes over
+ * the temporary status.
+ *
+ * Also note that we have to transfer the per-node surplus state
+ * here as well otherwise the global surplus count will not match
+ * the per-node's.
+ */
+ if (PageHugeTemporary(newpage)) {
+ int old_nid = page_to_nid(oldpage);
+ int new_nid = page_to_nid(newpage);
+
+ SetPageHugeTemporary(oldpage);
+ ClearPageHugeTemporary(newpage);
+
+ spin_lock(&hugetlb_lock);
+ if (h->surplus_huge_pages_node[old_nid]) {
+ h->surplus_huge_pages_node[old_nid]--;
+ h->surplus_huge_pages_node[new_nid]++;
+ }
+ spin_unlock(&hugetlb_lock);
+ }
+}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 356df057a2a8..b6ac70616c32 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -52,7 +52,7 @@ static int hwpoison_inject(void *data, u64 val)
inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
- return memory_failure(pfn, 18, MF_COUNT_INCREASED);
+ return memory_failure(pfn, MF_COUNT_INCREASED);
put_out:
put_hwpoison_page(p);
return 0;
diff --git a/mm/interval_tree.c b/mm/interval_tree.c
index b47664358796..27ddfd29112a 100644
--- a/mm/interval_tree.c
+++ b/mm/interval_tree.c
@@ -18,7 +18,7 @@ static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
{
- return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
+ return v->vm_pgoff + vma_pages(v) - 1;
}
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ea4ff259b671..b7e2268dfc9a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
}
if (page_mapped(page))
- unmap_mapping_range(mapping, index << PAGE_SHIFT,
- PAGE_SIZE, 0);
+ unmap_mapping_pages(mapping, index, 1, false);
spin_lock_irq(&mapping->tree_lock);
@@ -1674,10 +1673,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
spin_unlock(&khugepaged_mm_lock);
mm = mm_slot->mm;
- down_read(&mm->mmap_sem);
- if (unlikely(khugepaged_test_exit(mm)))
- vma = NULL;
- else
+ /*
+ * Don't wait for semaphore (to avoid long wait times). Just move to
+ * the next mm on the list.
+ */
+ vma = NULL;
+ if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+ goto breakouterloop_mmap_sem;
+ if (likely(!khugepaged_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
progress++;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d73c14294f3a..e83987c55a08 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -91,7 +91,6 @@
#include <linux/stacktrace.h>
#include <linux/cache.h>
#include <linux/percpu.h>
-#include <linux/hardirq.h>
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mmzone.h>
@@ -127,7 +126,7 @@
/* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN)
+ __GFP_NOWARN | __GFP_NOFAIL)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
diff --git a/mm/ksm.c b/mm/ksm.c
index be8f4576f842..c406f75957ad 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -675,15 +675,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
expected_mapping = (void *)((unsigned long)stable_node |
PAGE_MAPPING_KSM);
again:
- kpfn = READ_ONCE(stable_node->kpfn);
+ kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
page = pfn_to_page(kpfn);
-
- /*
- * page is computed from kpfn, so on most architectures reading
- * page->mapping is naturally ordered after reading node->kpfn,
- * but on Alpha we need to be more careful.
- */
- smp_read_barrier_depends();
if (READ_ONCE(page->mapping) != expected_mapping)
goto stale;
diff --git a/mm/madvise.c b/mm/madvise.c
index 751e97aa2210..4d3c922ea1a1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -661,7 +661,7 @@ static int madvise_inject_error(int behavior,
pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
page_to_pfn(page), start);
- ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED);
+ ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED);
if (ret)
return ret;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac2ffd5e02b9..0ae2dc3a1748 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -542,39 +542,10 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
return mz;
}
-/*
- * Return page count for single (non recursive) @memcg.
- *
- * Implementation Note: reading percpu statistics for memcg.
- *
- * Both of vmstat[] and percpu_counter has threshold and do periodic
- * synchronization to implement "quick" read. There are trade-off between
- * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronization of counter in memcg's counter.
- *
- * But this _read() function is used for user interface now. The user accounts
- * memory usage by memory cgroup and he _always_ requires exact value because
- * he accounts memory. Even if we provide quick-and-fuzzy read, we always
- * have to visit all online cpus and make sum. So, for now, unnecessary
- * synchronization is not implemented. (just implemented for cpu hotplug)
- *
- * If there are kernel internal actions which can make use of some not-exact
- * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threshold and synchronization as vmstat[] should be
- * implemented.
- *
- * The parameter idx can be of type enum memcg_event_item or vm_event_item.
- */
-
static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
int event)
{
- unsigned long val = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- val += per_cpu(memcg->stat->events[event], cpu);
- return val;
+ return atomic_long_read(&memcg->events[event]);
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
@@ -586,27 +557,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
* counted as CACHE even if it's on ANON LRU.
*/
if (PageAnon(page))
- __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
+ __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
else {
- __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
+ __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
if (PageSwapBacked(page))
- __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
+ __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
}
if (compound) {
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
- __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
+ __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
}
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
- __this_cpu_inc(memcg->stat->events[PGPGIN]);
+ __count_memcg_events(memcg, PGPGIN, 1);
else {
- __this_cpu_inc(memcg->stat->events[PGPGOUT]);
+ __count_memcg_events(memcg, PGPGOUT, 1);
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
+ __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
}
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
@@ -642,8 +613,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
{
unsigned long val, next;
- val = __this_cpu_read(memcg->stat->nr_page_events);
- next = __this_cpu_read(memcg->stat->targets[target]);
+ val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
+ next = __this_cpu_read(memcg->stat_cpu->targets[target]);
/* from time_after() in jiffies.h */
if ((long)(next - val) < 0) {
switch (target) {
@@ -659,7 +630,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
default:
break;
}
- __this_cpu_write(memcg->stat->targets[target], next);
+ __this_cpu_write(memcg->stat_cpu->targets[target], next);
return true;
}
return false;
@@ -1124,7 +1095,7 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
return false;
}
-unsigned int memcg1_stats[] = {
+static const unsigned int memcg1_stats[] = {
MEMCG_CACHE,
MEMCG_RSS,
MEMCG_RSS_HUGE,
@@ -1206,20 +1177,6 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
}
/*
- * This function returns the number of memcg under hierarchy tree. Returns
- * 1(self count) if no children.
- */
-static int mem_cgroup_count_children(struct mem_cgroup *memcg)
-{
- int num = 0;
- struct mem_cgroup *iter;
-
- for_each_mem_cgroup_tree(iter, memcg)
- num++;
- return num;
-}
-
-/*
* Return the memory (and swap, if configured) limit for a memcg.
*/
unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
@@ -1707,11 +1664,6 @@ void unlock_page_memcg(struct page *page)
}
EXPORT_SYMBOL(unlock_page_memcg);
-/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
- */
-#define CHARGE_BATCH 32U
struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -1739,7 +1691,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
unsigned long flags;
bool ret = false;
- if (nr_pages > CHARGE_BATCH)
+ if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
local_irq_save(flags);
@@ -1808,7 +1760,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
}
stock->nr_pages += nr_pages;
- if (stock->nr_pages > CHARGE_BATCH)
+ if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
local_irq_restore(flags);
@@ -1858,9 +1810,44 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
static int memcg_hotplug_cpu_dead(unsigned int cpu)
{
struct memcg_stock_pcp *stock;
+ struct mem_cgroup *memcg;
stock = &per_cpu(memcg_stock, cpu);
drain_stock(stock);
+
+ for_each_mem_cgroup(memcg) {
+ int i;
+
+ for (i = 0; i < MEMCG_NR_STAT; i++) {
+ int nid;
+ long x;
+
+ x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
+ if (x)
+ atomic_long_add(x, &memcg->stat[i]);
+
+ if (i >= NR_VM_NODE_STAT_ITEMS)
+ continue;
+
+ for_each_node(nid) {
+ struct mem_cgroup_per_node *pn;
+
+ pn = mem_cgroup_nodeinfo(memcg, nid);
+ x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
+ if (x)
+ atomic_long_add(x, &pn->lruvec_stat[i]);
+ }
+ }
+
+ for (i = 0; i < MEMCG_NR_EVENTS; i++) {
+ long x;
+
+ x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
+ if (x)
+ atomic_long_add(x, &memcg->events[i]);
+ }
+ }
+
return 0;
}
@@ -1881,7 +1868,7 @@ static void high_work_func(struct work_struct *work)
struct mem_cgroup *memcg;
memcg = container_of(work, struct mem_cgroup, high_work);
- reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
+ reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
}
/*
@@ -1905,7 +1892,7 @@ void mem_cgroup_handle_over_high(void)
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages)
{
- unsigned int batch = max(CHARGE_BATCH, nr_pages);
+ unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup *mem_over_limit;
struct page_counter *counter;
@@ -2415,18 +2402,11 @@ void mem_cgroup_split_huge_fixup(struct page *head)
for (i = 1; i < HPAGE_PMD_NR; i++)
head[i].mem_cgroup = head->mem_cgroup;
- __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
- HPAGE_PMD_NR);
+ __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_MEMCG_SWAP
-static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
- int nr_entries)
-{
- this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries);
-}
-
/**
* mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
* @entry: swap entry to be moved
@@ -2450,8 +2430,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
new_id = mem_cgroup_id(to);
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
- mem_cgroup_swap_statistics(from, -1);
- mem_cgroup_swap_statistics(to, 1);
+ mod_memcg_state(from, MEMCG_SWAP, -1);
+ mod_memcg_state(to, MEMCG_SWAP, 1);
return 0;
}
return -EINVAL;
@@ -2467,23 +2447,12 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
static DEFINE_MUTEX(memcg_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
- unsigned long limit)
+ unsigned long limit, bool memsw)
{
- unsigned long curusage;
- unsigned long oldusage;
bool enlarge = false;
- int retry_count;
int ret;
-
- /*
- * For keeping hierarchical_reclaim simple, how long we should retry
- * is depends on callers. We set our retry-count to be function
- * of # of children which we should visit in this loop.
- */
- retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
- oldusage = page_counter_read(&memcg->memory);
+ bool limits_invariant;
+ struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
do {
if (signal_pending(current)) {
@@ -2492,79 +2461,31 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
}
mutex_lock(&memcg_limit_mutex);
- if (limit > memcg->memsw.limit) {
+ /*
+ * Make sure that the new limit (memsw or memory limit) doesn't
+ * break our basic invariant rule memory.limit <= memsw.limit.
+ */
+ limits_invariant = memsw ? limit >= memcg->memory.limit :
+ limit <= memcg->memsw.limit;
+ if (!limits_invariant) {
mutex_unlock(&memcg_limit_mutex);
ret = -EINVAL;
break;
}
- if (limit > memcg->memory.limit)
+ if (limit > counter->limit)
enlarge = true;
- ret = page_counter_limit(&memcg->memory, limit);
+ ret = page_counter_limit(counter, limit);
mutex_unlock(&memcg_limit_mutex);
if (!ret)
break;
- try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
- curusage = page_counter_read(&memcg->memory);
- /* Usage is reduced ? */
- if (curusage >= oldusage)
- retry_count--;
- else
- oldusage = curusage;
- } while (retry_count);
-
- if (!ret && enlarge)
- memcg_oom_recover(memcg);
-
- return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
- unsigned long limit)
-{
- unsigned long curusage;
- unsigned long oldusage;
- bool enlarge = false;
- int retry_count;
- int ret;
-
- /* see mem_cgroup_resize_res_limit */
- retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
- oldusage = page_counter_read(&memcg->memsw);
-
- do {
- if (signal_pending(current)) {
- ret = -EINTR;
+ if (!try_to_free_mem_cgroup_pages(memcg, 1,
+ GFP_KERNEL, !memsw)) {
+ ret = -EBUSY;
break;
}
-
- mutex_lock(&memcg_limit_mutex);
- if (limit < memcg->memory.limit) {
- mutex_unlock(&memcg_limit_mutex);
- ret = -EINVAL;
- break;
- }
- if (limit > memcg->memsw.limit)
- enlarge = true;
- ret = page_counter_limit(&memcg->memsw, limit);
- mutex_unlock(&memcg_limit_mutex);
-
- if (!ret)
- break;
-
- try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
-
- curusage = page_counter_read(&memcg->memsw);
- /* Usage is reduced ? */
- if (curusage >= oldusage)
- retry_count--;
- else
- oldusage = curusage;
- } while (retry_count);
+ } while (true);
if (!ret && enlarge)
memcg_oom_recover(memcg);
@@ -3020,10 +2941,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
- ret = mem_cgroup_resize_limit(memcg, nr_pages);
+ ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
break;
case _MEMSWAP:
- ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
+ ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
break;
case _KMEM:
ret = memcg_update_kmem_limit(memcg, nr_pages);
@@ -3777,7 +3698,7 @@ static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
struct mem_cgroup_event *event =
container_of(wait, struct mem_cgroup_event, wait);
struct mem_cgroup *memcg = event->memcg;
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLHUP) {
/*
@@ -4168,8 +4089,8 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!pn)
return 1;
- pn->lruvec_stat = alloc_percpu(struct lruvec_stat);
- if (!pn->lruvec_stat) {
+ pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
+ if (!pn->lruvec_stat_cpu) {
kfree(pn);
return 1;
}
@@ -4187,7 +4108,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
- free_percpu(pn->lruvec_stat);
+ free_percpu(pn->lruvec_stat_cpu);
kfree(pn);
}
@@ -4197,7 +4118,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
- free_percpu(memcg->stat);
+ free_percpu(memcg->stat_cpu);
kfree(memcg);
}
@@ -4226,8 +4147,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (memcg->id.id < 0)
goto fail;
- memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
- if (!memcg->stat)
+ memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
+ if (!memcg->stat_cpu)
goto fail;
for_each_node(node)
@@ -4584,8 +4505,8 @@ static int mem_cgroup_move_account(struct page *page,
spin_lock_irqsave(&from->move_lock, flags);
if (!anon && page_mapped(page)) {
- __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
- __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
+ __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
+ __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
}
/*
@@ -4597,16 +4518,14 @@ static int mem_cgroup_move_account(struct page *page,
struct address_space *mapping = page_mapping(page);
if (mapping_cap_account_dirty(mapping)) {
- __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
- nr_pages);
- __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
- nr_pages);
+ __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
+ __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
}
}
if (PageWriteback(page)) {
- __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
- __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
+ __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
+ __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
}
/*
@@ -5642,12 +5561,12 @@ static void uncharge_batch(const struct uncharge_gather *ug)
}
local_irq_save(flags);
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
- __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
- __this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
- __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
- __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
+ __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
+ __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
+ __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
+ __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
+ __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
+ __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
@@ -5874,7 +5793,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
if (in_softirq())
gfp_mask = GFP_NOWAIT;
- this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
+ mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
if (try_charge(memcg, gfp_mask, nr_pages) == 0)
return true;
@@ -5895,7 +5814,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
return;
}
- this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
+ mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
refill_stock(memcg, nr_pages);
}
@@ -6019,7 +5938,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
nr_entries);
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(swap_memcg, nr_entries);
+ mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
page->mem_cgroup = NULL;
@@ -6085,7 +6004,7 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
mem_cgroup_id_get_many(memcg, nr_pages - 1);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(memcg, nr_pages);
+ mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
return 0;
}
@@ -6113,7 +6032,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
else
page_counter_uncharge(&memcg->memsw, nr_pages);
}
- mem_cgroup_swap_statistics(memcg, -nr_pages);
+ mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
mem_cgroup_id_put_many(memcg, nr_pages);
}
rcu_read_unlock();
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4acdf393a801..4b80ccee4535 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -178,25 +178,19 @@ EXPORT_SYMBOL_GPL(hwpoison_filter);
* ``action optional'' if they are not immediately affected by the error
* ``action required'' if error happened in current execution context
*/
-static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
+static int kill_proc(struct task_struct *t, unsigned long addr,
unsigned long pfn, struct page *page, int flags)
{
- struct siginfo si;
+ short addr_lsb;
int ret;
pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
pfn, t->comm, t->pid);
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_addr = (void *)addr;
-#ifdef __ARCH_SI_TRAPNO
- si.si_trapno = trapno;
-#endif
- si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+ addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
- si.si_code = BUS_MCEERR_AR;
- ret = force_sig_info(SIGBUS, &si, current);
+ ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
+ addr_lsb, current);
} else {
/*
* Don't use force here, it's convenient if the signal
@@ -204,8 +198,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
* This could cause a loop when the user sets SIGBUS
* to SIG_IGN, but hopefully no one will do that?
*/
- si.si_code = BUS_MCEERR_AO;
- ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
+ ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
+ addr_lsb, t); /* synchronous? */
}
if (ret < 0)
pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
@@ -323,7 +317,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went
* wrong earlier.
*/
-static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
+static void kill_procs(struct list_head *to_kill, int forcekill,
bool fail, struct page *page, unsigned long pfn,
int flags)
{
@@ -348,7 +342,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
* check for that, but we need to tell the
* process anyways.
*/
- else if (kill_proc(tk->tsk, tk->addr, trapno,
+ else if (kill_proc(tk->tsk, tk->addr,
pfn, page, flags) < 0)
pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
@@ -927,7 +921,7 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page);
* the pages and send SIGBUS to the processes if the data was dirty.
*/
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
- int trapno, int flags, struct page **hpagep)
+ int flags, struct page **hpagep)
{
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
@@ -1017,7 +1011,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* any accesses to the poisoned memory.
*/
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
- kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags);
+ kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
return unmap_success;
}
@@ -1045,7 +1039,7 @@ static int identify_page_state(unsigned long pfn, struct page *p,
return page_action(ps, p, pfn);
}
-static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags)
+static int memory_failure_hugetlb(unsigned long pfn, int flags)
{
struct page *p = pfn_to_page(pfn);
struct page *head = compound_head(p);
@@ -1090,7 +1084,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags)
return 0;
}
- if (!hwpoison_user_mappings(p, pfn, trapno, flags, &head)) {
+ if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
@@ -1105,7 +1099,6 @@ out:
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
- * @trapno: Trap number reported in the signal to user space.
* @flags: fine tune action taken
*
* This function is called by the low level machine check code
@@ -1120,7 +1113,7 @@ out:
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks hold.
*/
-int memory_failure(unsigned long pfn, int trapno, int flags)
+int memory_failure(unsigned long pfn, int flags)
{
struct page *p;
struct page *hpage;
@@ -1129,7 +1122,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
unsigned long page_flags;
if (!sysctl_memory_failure_recovery)
- panic("Memory failure from trap %d on page %lx", trapno, pfn);
+ panic("Memory failure on page %lx", pfn);
if (!pfn_valid(pfn)) {
pr_err("Memory failure: %#lx: memory outside kernel control\n",
@@ -1139,7 +1132,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
p = pfn_to_page(pfn);
if (PageHuge(p))
- return memory_failure_hugetlb(pfn, trapno, flags);
+ return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
@@ -1268,7 +1261,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* When the raw error page is thp tail page, hpage points to the raw
* page after thp split.
*/
- if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) {
+ if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
@@ -1296,7 +1289,6 @@ EXPORT_SYMBOL_GPL(memory_failure);
struct memory_failure_entry {
unsigned long pfn;
- int trapno;
int flags;
};
@@ -1312,7 +1304,6 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
/**
* memory_failure_queue - Schedule handling memory failure of a page.
* @pfn: Page Number of the corrupted page
- * @trapno: Trap number reported in the signal to user space.
* @flags: Flags for memory failure handling
*
* This function is called by the low level hardware error handler
@@ -1326,13 +1317,12 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
*
* Can run in IRQ context.
*/
-void memory_failure_queue(unsigned long pfn, int trapno, int flags)
+void memory_failure_queue(unsigned long pfn, int flags)
{
struct memory_failure_cpu *mf_cpu;
unsigned long proc_flags;
struct memory_failure_entry entry = {
.pfn = pfn,
- .trapno = trapno,
.flags = flags,
};
@@ -1365,7 +1355,7 @@ static void memory_failure_work_func(struct work_struct *work)
if (entry.flags & MF_SOFT_OFFLINE)
soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
else
- memory_failure(entry.pfn, entry.trapno, entry.flags);
+ memory_failure(entry.pfn, entry.flags);
}
}
diff --git a/mm/memory.c b/mm/memory.c
index ca5674cbaff2..53373b7a1512 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -400,10 +400,17 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-/* tlb_gather_mmu
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm. The @fullmm argument is used when @mm is without
- * users and we're going to destroy the full address space (exit/execve).
+/**
+ * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm. The @start and @end are set to 0 and -1
+ * respectively when @mm is without users and we're going to destroy
+ * the full address space (exit/execve).
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -2792,8 +2799,37 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
+ * unmap_mapping_pages() - Unmap pages from processes.
+ * @mapping: The address space containing pages to be unmapped.
+ * @start: Index of first page to be unmapped.
+ * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
+ * @even_cows: Whether to unmap even private COWed pages.
+ *
+ * Unmap the pages in this address space from any userspace process which
+ * has them mmaped. Generally, you want to remove COWed pages as well when
+ * a file is being truncated, but not when invalidating pages from the page
+ * cache.
+ */
+void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
+ pgoff_t nr, bool even_cows)
+{
+ struct zap_details details = { };
+
+ details.check_mapping = even_cows ? NULL : mapping;
+ details.first_index = start;
+ details.last_index = start + nr - 1;
+ if (details.last_index < details.first_index)
+ details.last_index = ULONG_MAX;
+
+ i_mmap_lock_write(mapping);
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ i_mmap_unlock_write(mapping);
+}
+
+/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
- * address_space corresponding to the specified page range in the underlying
+ * address_space corresponding to the specified byte range in the underlying
* file.
*
* @mapping: the address space containing mmaps to be unmapped.
@@ -2811,7 +2847,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
- struct zap_details details = { };
pgoff_t hba = holebegin >> PAGE_SHIFT;
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -2823,16 +2858,7 @@ void unmap_mapping_range(struct address_space *mapping,
hlen = ULONG_MAX - hba + 1;
}
- details.check_mapping = even_cows ? NULL : mapping;
- details.first_index = hba;
- details.last_index = hba + hlen - 1;
- if (details.last_index < details.first_index)
- details.last_index = ULONG_MAX;
-
- i_mmap_lock_write(mapping);
- if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
- unmap_mapping_range_tree(&mapping->i_mmap, &details);
- i_mmap_unlock_write(mapping);
+ unmap_mapping_pages(mapping, hba, hlen, even_cows);
}
EXPORT_SYMBOL(unmap_mapping_range);
@@ -2857,8 +2883,11 @@ int do_swap_page(struct vm_fault *vmf)
int ret = 0;
bool vma_readahead = swap_use_vma_readahead();
- if (vma_readahead)
+ if (vma_readahead) {
page = swap_readahead_detect(vmf, &swap_ra);
+ swapcache = page;
+ }
+
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
if (page)
put_page(page);
@@ -2889,9 +2918,12 @@ int do_swap_page(struct vm_fault *vmf)
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
- if (!page)
+ if (!page) {
page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
vmf->address);
+ swapcache = page;
+ }
+
if (!page) {
struct swap_info_struct *si = swp_swap_info(entry);
@@ -3479,9 +3511,8 @@ static int fault_around_bytes_get(void *data, u64 *val)
}
/*
- * fault_around_pages() and fault_around_mask() expects fault_around_bytes
- * rounded down to nearest page order. It's what do_fault_around() expects to
- * see.
+ * fault_around_bytes must be rounded down to the nearest page order as it's
+ * what do_fault_around() expects to see.
*/
static int fault_around_bytes_set(void *data, u64 val)
{
@@ -3524,13 +3555,14 @@ late_initcall(fault_around_debugfs);
* This function doesn't cross the VMA boundaries, in order to call map_pages()
* only once.
*
- * fault_around_pages() defines how many pages we'll try to map.
- * do_fault_around() expects it to return a power of two less than or equal to
- * PTRS_PER_PTE.
+ * fault_around_bytes defines how many bytes we'll try to map.
+ * do_fault_around() expects it to be set to a power of two less than or equal
+ * to PTRS_PER_PTE.
*
- * The virtual address of the area that we map is naturally aligned to the
- * fault_around_pages() value (and therefore to page order). This way it's
- * easier to guarantee that we don't cross page table boundaries.
+ * The virtual address of the area that we map is naturally aligned to
+ * fault_around_bytes rounded down to the machine page size
+ * (and therefore to page order). This way it's easier to guarantee
+ * that we don't cross page table boundaries.
*/
static int do_fault_around(struct vm_fault *vmf)
{
@@ -3547,8 +3579,8 @@ static int do_fault_around(struct vm_fault *vmf)
start_pgoff -= off;
/*
- * end_pgoff is either end of page table or end of vma
- * or fault_around_pages() from start_pgoff, depending what is nearest.
+ * end_pgoff is either the end of the page table, the end of
+ * the vma or nr_pages from start_pgoff, depending what is nearest.
*/
end_pgoff = start_pgoff -
((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c52aa05b106c..9bbd6982d4e4 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -184,7 +184,7 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
- usemap = __nr_to_section(section_nr)->pageblock_flags;
+ usemap = ms->pageblock_flags;
page = virt_to_page(usemap);
mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
@@ -200,9 +200,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
struct mem_section *ms;
struct page *page, *memmap;
- if (!pfn_valid(start_pfn))
- return;
-
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -210,7 +207,7 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
- usemap = __nr_to_section(section_nr)->pageblock_flags;
+ usemap = ms->pageblock_flags;
page = virt_to_page(usemap);
mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
@@ -1637,7 +1634,7 @@ repeat:
goto failed_removal;
cond_resched();
- lru_add_drain_all_cpuslocked();
+ lru_add_drain_all();
drain_all_pages(zone);
pfn = scan_movable_pages(start_pfn, end_pfn);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4ce44d3ff03d..d879f1d8a44a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1121,8 +1121,8 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
}
if (PageHuge(page)) {
- BUG_ON(!vma);
- return alloc_huge_page_noerr(vma, address, 1);
+ return alloc_huge_page_vma(page_hstate(compound_head(page)),
+ vma, address);
} else if (thp_migration_supported() && PageTransHuge(page)) {
struct page *thp;
@@ -1263,6 +1263,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned long k;
+ unsigned long t;
unsigned long nlongs;
unsigned long endmask;
@@ -1279,13 +1280,17 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
else
endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
- /* When the user specified more nodes than supported just check
- if the non supported part is all zero. */
+ /*
+ * When the user specified more nodes than supported just check
+ * if the non supported part is all zero.
+ *
+ * If maxnode have more longs than MAX_NUMNODES, check
+ * the bits in that area first. And then go through to
+ * check the rest bits which equal or bigger than MAX_NUMNODES.
+ * Otherwise, just check bits [MAX_NUMNODES, maxnode).
+ */
if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
- if (nlongs > PAGE_SIZE/sizeof(long))
- return -EINVAL;
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
- unsigned long t;
if (get_user(t, nmask + k))
return -EFAULT;
if (k == nlongs - 1) {
@@ -1298,6 +1303,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
endmask = ~0UL;
}
+ if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
+ unsigned long valid_mask = endmask;
+
+ valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
+ if (get_user(t, nmask + nlongs - 1))
+ return -EFAULT;
+ if (t & valid_mask)
+ return -EINVAL;
+ }
+
if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
return -EFAULT;
nodes_addr(*nodes)[nlongs-1] &= endmask;
@@ -1418,10 +1433,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
goto out_put;
}
- if (!nodes_subset(*new, node_states[N_MEMORY])) {
- err = -EINVAL;
+ task_nodes = cpuset_mems_allowed(current);
+ nodes_and(*new, *new, task_nodes);
+ if (nodes_empty(*new))
+ goto out_put;
+
+ nodes_and(*new, *new, node_states[N_MEMORY]);
+ if (nodes_empty(*new))
goto out_put;
- }
err = security_task_movememory(task);
if (err)
diff --git a/mm/migrate.c b/mm/migrate.c
index 4d0be47a322a..1e5525a25691 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1323,9 +1323,8 @@ put_anon:
put_anon_vma(anon_vma);
if (rc == MIGRATEPAGE_SUCCESS) {
- hugetlb_cgroup_migrate(hpage, new_hpage);
+ move_hugetlb_state(hpage, new_hpage, reason);
put_new_page = NULL;
- set_page_owner_migrate_reason(new_hpage, reason);
}
unlock_page(hpage);
diff --git a/mm/mlock.c b/mm/mlock.c
index 30472d438794..f7f54fd2e13f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -779,7 +779,7 @@ static int apply_mlockall_flags(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
- cond_resched_rcu_qs();
+ cond_resched();
}
out:
return 0;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 96edb33fd09a..eff6b88a993f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -236,6 +236,37 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
+/*
+ * Must be called while holding mm->mmap_sem for either read or write.
+ * The result is guaranteed to be valid until mm->mmap_sem is dropped.
+ */
+bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+{
+ struct mmu_notifier *mn;
+ int id;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+
+ if (!mm_has_notifiers(mm))
+ return ret;
+
+ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ if (!mn->ops->invalidate_range &&
+ !mn->ops->invalidate_range_start &&
+ !mn->ops->invalidate_range_end)
+ continue;
+
+ if (!(mn->ops->flags & MMU_INVALIDATE_DOES_NOT_BLOCK)) {
+ ret = true;
+ break;
+ }
+ }
+ srcu_read_unlock(&srcu, id);
+ return ret;
+}
+
static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm,
int take_mmap_sem)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ec39f730a0bf..e3309fcf586b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -84,6 +84,11 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page || PageKsm(page))
continue;
+ /* Also skip shared copy-on-write pages */
+ if (is_cow_mapping(vma->vm_flags) &&
+ page_mapcount(page) != 1)
+ continue;
+
/* Avoid TLB flush if possible */
if (pte_protnone(oldpte))
continue;
@@ -166,7 +171,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
next = pmd_addr_end(addr, end);
if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
&& pmd_none_or_clear_bad(pmd))
- continue;
+ goto next;
/* invoke the mmu notifier if the pmd is populated */
if (!mni_start) {
@@ -188,7 +193,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
}
/* huge pmd was handled */
- continue;
+ goto next;
}
}
/* fall through, the trans huge pmd just split */
@@ -196,6 +201,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
dirty_accountable, prot_numa);
pages += this_pages;
+next:
+ cond_resched();
} while (pmd++, addr = next, addr != end);
if (mni_start)
diff --git a/mm/nommu.c b/mm/nommu.c
index 17c00d93de2e..4b9864b17cb0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
}
-void unmap_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen,
- int even_cows)
-{
-}
-EXPORT_SYMBOL(unmap_mapping_range);
-
int filemap_fault(struct vm_fault *vmf)
{
BUG();
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 29f855551efe..f2e7dfb81eee 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -514,15 +514,12 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
}
/*
- * If the mm has notifiers then we would need to invalidate them around
- * unmap_page_range and that is risky because notifiers can sleep and
- * what they do is basically undeterministic. So let's have a short
+ * If the mm has invalidate_{start,end}() notifiers that could block,
* sleep to give the oom victim some more time.
* TODO: we really want to get rid of this ugly hack and make sure that
- * notifiers cannot block for unbounded amount of time and add
- * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+ * notifiers cannot block for unbounded amount of time
*/
- if (mm_has_notifiers(mm)) {
+ if (mm_has_blockable_invalidate_notifiers(mm)) {
up_read(&mm->mmap_sem);
schedule_timeout_idle(HZ);
goto unlock_oom;
@@ -565,10 +562,14 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
* count elevated without a good reason.
*/
if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
- tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
- unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
- NULL);
- tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+ const unsigned long start = vma->vm_start;
+ const unsigned long end = vma->vm_end;
+
+ tlb_gather_mmu(&tlb, mm, start, end);
+ mmu_notifier_invalidate_range_start(mm, start, end);
+ unmap_page_range(&tlb, vma, start, end, NULL);
+ mmu_notifier_invalidate_range_end(mm, start, end);
+ tlb_finish_mmu(&tlb, start, end);
}
}
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7e5e775e97f4..c7dd9c86e353 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -293,7 +293,7 @@ int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
- * Determine how many pages need to be initialized durig early boot
+ * Determine how many pages need to be initialized during early boot
* (non-deferred initialization).
* The value of first_deferred_pfn will be set later, once non-deferred pages
* are initialized, but for now set it ULONG_MAX.
@@ -344,7 +344,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
unsigned long pfn, unsigned long zone_end,
unsigned long *nr_initialised)
{
- /* Always populate low zones for address-contrained allocations */
+ /* Always populate low zones for address-constrained allocations */
if (zone_end < pgdat_end_pfn(pgdat))
return true;
(*nr_initialised)++;
@@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone,
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
- unsigned long zone, int nid)
+ unsigned long zone, int nid, bool zero)
{
- mm_zero_struct_page(page);
+ if (zero)
+ mm_zero_struct_page(page);
set_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
@@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
}
static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
- int nid)
+ int nid, bool zero)
{
- return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+ return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
break;
}
- __init_single_pfn(pfn, zid, nid);
+ __init_single_pfn(pfn, zid, nid, true);
}
#else
static inline void init_reserved_page(unsigned long pfn)
@@ -1457,92 +1458,87 @@ static inline void __init pgdat_init_report_one_done(void)
}
/*
- * Helper for deferred_init_range, free the given range, reset the counters, and
- * return number of pages freed.
+ * Returns true if page needs to be initialized or freed to buddy allocator.
+ *
+ * First we check if pfn is valid on architectures where it is possible to have
+ * holes within pageblock_nr_pages. On systems where it is not possible, this
+ * function is optimized out.
+ *
+ * Then, we check if a current large page is valid by only checking the validity
+ * of the head pfn.
+ *
+ * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
+ * within a node: a pfn is between start and end of a node, but does not belong
+ * to this memory node.
*/
-static inline unsigned long __init __def_free(unsigned long *nr_free,
- unsigned long *free_base_pfn,
- struct page **page)
+static inline bool __init
+deferred_pfn_valid(int nid, unsigned long pfn,
+ struct mminit_pfnnid_cache *nid_init_state)
{
- unsigned long nr = *nr_free;
+ if (!pfn_valid_within(pfn))
+ return false;
+ if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
+ return false;
+ if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
+ return false;
+ return true;
+}
- deferred_free_range(*free_base_pfn, nr);
- *free_base_pfn = 0;
- *nr_free = 0;
- *page = NULL;
+/*
+ * Free pages to buddy allocator. Try to free aligned pages in
+ * pageblock_nr_pages sizes.
+ */
+static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
+ unsigned long end_pfn)
+{
+ struct mminit_pfnnid_cache nid_init_state = { };
+ unsigned long nr_pgmask = pageblock_nr_pages - 1;
+ unsigned long nr_free = 0;
- return nr;
+ for (; pfn < end_pfn; pfn++) {
+ if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+ deferred_free_range(pfn - nr_free, nr_free);
+ nr_free = 0;
+ } else if (!(pfn & nr_pgmask)) {
+ deferred_free_range(pfn - nr_free, nr_free);
+ nr_free = 1;
+ cond_resched();
+ } else {
+ nr_free++;
+ }
+ }
+ /* Free the last block of pages to allocator */
+ deferred_free_range(pfn - nr_free, nr_free);
}
-static unsigned long __init deferred_init_range(int nid, int zid,
- unsigned long start_pfn,
- unsigned long end_pfn)
+/*
+ * Initialize struct pages. We minimize pfn page lookups and scheduler checks
+ * by performing it only once every pageblock_nr_pages.
+ * Return number of pages initialized.
+ */
+static unsigned long __init deferred_init_pages(int nid, int zid,
+ unsigned long pfn,
+ unsigned long end_pfn)
{
struct mminit_pfnnid_cache nid_init_state = { };
unsigned long nr_pgmask = pageblock_nr_pages - 1;
- unsigned long free_base_pfn = 0;
unsigned long nr_pages = 0;
- unsigned long nr_free = 0;
struct page *page = NULL;
- unsigned long pfn;
- /*
- * First we check if pfn is valid on architectures where it is possible
- * to have holes within pageblock_nr_pages. On systems where it is not
- * possible, this function is optimized out.
- *
- * Then, we check if a current large page is valid by only checking the
- * validity of the head pfn.
- *
- * meminit_pfn_in_nid is checked on systems where pfns can interleave
- * within a node: a pfn is between start and end of a node, but does not
- * belong to this memory node.
- *
- * Finally, we minimize pfn page lookups and scheduler checks by
- * performing it only once every pageblock_nr_pages.
- *
- * We do it in two loops: first we initialize struct page, than free to
- * buddy allocator, becuse while we are freeing pages we can access
- * pages that are ahead (computing buddy page in __free_one_page()).
- */
- for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- if (!pfn_valid_within(pfn))
+ for (; pfn < end_pfn; pfn++) {
+ if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+ page = NULL;
continue;
- if ((pfn & nr_pgmask) || pfn_valid(pfn)) {
- if (meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
- if (page && (pfn & nr_pgmask))
- page++;
- else
- page = pfn_to_page(pfn);
- __init_single_page(page, pfn, zid, nid);
- cond_resched();
- }
- }
- }
-
- page = NULL;
- for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- if (!pfn_valid_within(pfn)) {
- nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
- } else if (!(pfn & nr_pgmask) && !pfn_valid(pfn)) {
- nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
- } else if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
- nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
- } else if (page && (pfn & nr_pgmask)) {
- page++;
- nr_free++;
- } else {
- nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+ } else if (!page || !(pfn & nr_pgmask)) {
page = pfn_to_page(pfn);
- free_base_pfn = pfn;
- nr_free = 1;
cond_resched();
+ } else {
+ page++;
}
+ __init_single_page(page, pfn, zid, nid, true);
+ nr_pages++;
}
- /* Free the last block of pages to allocator */
- nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
-
- return nr_pages;
+ return (nr_pages);
}
/* Initialise remaining memory on a node */
@@ -1582,10 +1578,21 @@ static int __init deferred_init_memmap(void *data)
}
first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
+ /*
+ * Initialize and free pages. We do it in two loops: first we initialize
+ * struct page, than free to buddy allocator, because while we are
+ * freeing pages we can access pages that are ahead (computing buddy
+ * page in __free_one_page()).
+ */
for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
- nr_pages += deferred_init_range(nid, zid, spfn, epfn);
+ nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
+ }
+ for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
+ spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
+ epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+ deferred_free_pages(nid, zid, spfn, epfn);
}
/* Sanity check that the next zone really is unpopulated */
@@ -3391,7 +3398,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (gfp_mask & __GFP_THISNODE)
goto out;
- /* Exhausted what can be done so it's blamo time */
+ /* Exhausted what can be done so it's blame time */
if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
*did_some_progress = 1;
@@ -4272,7 +4279,7 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
struct page *page;
/*
- * __get_free_pages() returns a 32-bit address, which cannot represent
+ * __get_free_pages() returns a virtual address, which cannot represent
* a highmem page
*/
VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
@@ -5393,15 +5400,20 @@ not_early:
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
+ *
+ * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+ * because this is done early in sparse_add_one_section
*/
if (!(pfn & (pageblock_nr_pages - 1))) {
struct page *page = pfn_to_page(pfn);
- __init_single_page(page, pfn, zone, nid);
+ __init_single_page(page, pfn, zone, nid,
+ context != MEMMAP_HOTPLUG);
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched();
} else {
- __init_single_pfn(pfn, zone, nid);
+ __init_single_pfn(pfn, zone, nid,
+ context != MEMMAP_HOTPLUG);
}
}
}
@@ -6260,6 +6272,8 @@ void __paginginit zero_resv_unavail(void)
pgcnt = 0;
for_each_resv_unavail_range(i, &start, &end) {
for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+ if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+ continue;
mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 2c16216c29b6..5295ef331165 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -59,7 +59,9 @@
*/
static struct page_ext_operations *page_ext_ops[] = {
+#ifdef CONFIG_DEBUG_PAGEALLOC
&debug_guardpage_ops,
+#endif
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
diff --git a/mm/page_io.c b/mm/page_io.c
index e93f1a4cacd7..b41cf9644585 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -50,7 +50,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
void end_swap_bio_write(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
if (bio->bi_status) {
SetPageError(page);
@@ -122,7 +122,7 @@ static void swap_slot_free_notify(struct page *page)
static void end_swap_bio_read(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
struct task_struct *waiter = bio->bi_private;
if (bio->bi_status) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8592543a0f15..9886c6073828 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -528,21 +528,18 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
- struct page *page;
- struct page_ext *page_ext;
- unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
- unsigned long end_pfn = pfn + zone->spanned_pages;
+ unsigned long pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone_end_pfn(zone);
unsigned long count = 0;
- /* Scan block by block. First and last block may be incomplete */
- pfn = zone->zone_start_pfn;
-
/*
* Walk the zone in pageblock_nr_pages steps. If a page block spans
* a zone boundary, it will be double counted between zones. This does
* not matter as the mixed block count will still be correct
*/
for (; pfn < end_pfn; ) {
+ unsigned long block_end_pfn;
+
if (!pfn_valid(pfn)) {
pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
continue;
@@ -551,9 +548,10 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);
- page = pfn_to_page(pfn);
-
for (; pfn < block_end_pfn; pfn++) {
+ struct page *page;
+ struct page_ext *page_ext;
+
if (!pfn_valid_within(pfn))
continue;
@@ -616,7 +614,6 @@ static void init_early_allocated_pages(void)
{
pg_data_t *pgdat;
- drain_all_pages(NULL);
for_each_online_pgdat(pgdat)
init_zones_in_node(pgdat);
}
@@ -636,9 +633,7 @@ static int __init pageowner_init(void)
dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
NULL, &proc_page_owner_operations);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- return 0;
+ return PTR_ERR_OR_ZERO(dentry);
}
late_initcall(pageowner_init)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index d22b84310f6d..ae3c2a35d61b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
return true;
}
+static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
+{
+ unsigned long hpage_pfn = page_to_pfn(hpage);
+
+ /* THP can be referenced by any subpage */
+ return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
+}
+
+/**
+ * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
+ *
+ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
+ * mapped. check_pte() has to validate this.
+ *
+ * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
+ * page.
+ *
+ * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
+ * entry that points to @pvmw->page or any subpage in case of THP.
+ *
+ * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
+ * @pvmw->page or any subpage in case of THP.
+ *
+ * Otherwise, return false.
+ *
+ */
static bool check_pte(struct page_vma_mapped_walk *pvmw)
{
+ unsigned long pfn;
+
if (pvmw->flags & PVMW_MIGRATION) {
-#ifdef CONFIG_MIGRATION
swp_entry_t entry;
if (!is_swap_pte(*pvmw->pte))
return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
if (!is_migration_entry(entry))
return false;
- if (migration_entry_to_page(entry) - pvmw->page >=
- hpage_nr_pages(pvmw->page)) {
- return false;
- }
- if (migration_entry_to_page(entry) < pvmw->page)
- return false;
-#else
- WARN_ON_ONCE(1);
-#endif
- } else {
- if (is_swap_pte(*pvmw->pte)) {
- swp_entry_t entry;
- entry = pte_to_swp_entry(*pvmw->pte);
- if (is_device_private_entry(entry) &&
- device_private_entry_to_page(entry) == pvmw->page)
- return true;
- }
+ pfn = migration_entry_to_pfn(entry);
+ } else if (is_swap_pte(*pvmw->pte)) {
+ swp_entry_t entry;
- if (!pte_present(*pvmw->pte))
+ /* Handle un-addressable ZONE_DEVICE memory */
+ entry = pte_to_swp_entry(*pvmw->pte);
+ if (!is_device_private_entry(entry))
return false;
- /* THP can be referenced by any subpage */
- if (pte_page(*pvmw->pte) - pvmw->page >=
- hpage_nr_pages(pvmw->page)) {
- return false;
- }
- if (pte_page(*pvmw->pte) < pvmw->page)
+ pfn = device_private_entry_to_pfn(entry);
+ } else {
+ if (!pte_present(*pvmw->pte))
return false;
+
+ pfn = pte_pfn(*pvmw->pte);
}
- return true;
+ return pfn_in_hpage(pvmw->page, pfn);
}
/**
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 1e4ee763c190..cf2af04b34b9 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -181,12 +181,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
#endif
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_t entry = *pmdp;
- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
+ pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ return old;
}
#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 7fbe67be86fa..1907688b75ee 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2717,15 +2717,28 @@ continue_resched:
return error;
}
+static unsigned int *memfd_file_seals_ptr(struct file *file)
+{
+ if (file->f_op == &shmem_file_operations)
+ return &SHMEM_I(file_inode(file))->seals;
+
+#ifdef CONFIG_HUGETLBFS
+ if (file->f_op == &hugetlbfs_file_operations)
+ return &HUGETLBFS_I(file_inode(file))->seals;
+#endif
+
+ return NULL;
+}
+
#define F_ALL_SEALS (F_SEAL_SEAL | \
F_SEAL_SHRINK | \
F_SEAL_GROW | \
F_SEAL_WRITE)
-int shmem_add_seals(struct file *file, unsigned int seals)
+static int memfd_add_seals(struct file *file, unsigned int seals)
{
struct inode *inode = file_inode(file);
- struct shmem_inode_info *info = SHMEM_I(inode);
+ unsigned int *file_seals;
int error;
/*
@@ -2758,8 +2771,6 @@ int shmem_add_seals(struct file *file, unsigned int seals)
* other file types.
*/
- if (file->f_op != &shmem_file_operations)
- return -EINVAL;
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
if (seals & ~(unsigned int)F_ALL_SEALS)
@@ -2767,12 +2778,18 @@ int shmem_add_seals(struct file *file, unsigned int seals)
inode_lock(inode);
- if (info->seals & F_SEAL_SEAL) {
+ file_seals = memfd_file_seals_ptr(file);
+ if (!file_seals) {
+ error = -EINVAL;
+ goto unlock;
+ }
+
+ if (*file_seals & F_SEAL_SEAL) {
error = -EPERM;
goto unlock;
}
- if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
+ if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
error = mapping_deny_writable(file->f_mapping);
if (error)
goto unlock;
@@ -2784,25 +2801,22 @@ int shmem_add_seals(struct file *file, unsigned int seals)
}
}
- info->seals |= seals;
+ *file_seals |= seals;
error = 0;
unlock:
inode_unlock(inode);
return error;
}
-EXPORT_SYMBOL_GPL(shmem_add_seals);
-int shmem_get_seals(struct file *file)
+static int memfd_get_seals(struct file *file)
{
- if (file->f_op != &shmem_file_operations)
- return -EINVAL;
+ unsigned int *seals = memfd_file_seals_ptr(file);
- return SHMEM_I(file_inode(file))->seals;
+ return seals ? *seals : -EINVAL;
}
-EXPORT_SYMBOL_GPL(shmem_get_seals);
-long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
long error;
@@ -2812,10 +2826,10 @@ long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
if (arg > UINT_MAX)
return -EINVAL;
- error = shmem_add_seals(file, arg);
+ error = memfd_add_seals(file, arg);
break;
case F_GET_SEALS:
- error = shmem_get_seals(file);
+ error = memfd_get_seals(file);
break;
default:
error = -EINVAL;
@@ -3657,7 +3671,7 @@ SYSCALL_DEFINE2(memfd_create,
const char __user *, uname,
unsigned int, flags)
{
- struct shmem_inode_info *info;
+ unsigned int *file_seals;
struct file *file;
int fd, error;
char *name;
@@ -3667,9 +3681,6 @@ SYSCALL_DEFINE2(memfd_create,
if (flags & ~(unsigned int)MFD_ALL_FLAGS)
return -EINVAL;
} else {
- /* Sealing not supported in hugetlbfs (MFD_HUGETLB) */
- if (flags & MFD_ALLOW_SEALING)
- return -EINVAL;
/* Allow huge page size encoding in flags. */
if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
(MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
@@ -3722,12 +3733,8 @@ SYSCALL_DEFINE2(memfd_create,
file->f_flags |= O_RDWR | O_LARGEFILE;
if (flags & MFD_ALLOW_SEALING) {
- /*
- * flags check at beginning of function ensures
- * this is not a hugetlbfs (MFD_HUGETLB) file.
- */
- info = SHMEM_I(file_inode(file));
- info->seals &= ~F_SEAL_SEAL;
+ file_seals = memfd_file_seals_ptr(file);
+ *file_seals &= ~F_SEAL_SEAL;
}
fd_install(fd, file);
diff --git a/mm/slab.c b/mm/slab.c
index 4e51ef954026..226906294183 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1316,8 +1316,6 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- slab_state = UP;
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&slab_mutex);
list_for_each_entry(cachep, &slab_caches, list)
@@ -1353,8 +1351,6 @@ static int __init cpucache_init(void)
slab_online_cpu, slab_offline_cpu);
WARN_ON(ret < 0);
- /* Done! */
- slab_state = FULL;
return 0;
}
__initcall(cpucache_init);
diff --git a/mm/slab.h b/mm/slab.h
index ad657ffa44e5..e8e2095a6185 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -78,9 +78,6 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(slab_flags_t flags,
- unsigned long align, unsigned long size);
-
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c8cb36774ba1..deeddf95cdcf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -268,6 +268,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign;
+
+ ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+/*
* Find a mergeable slab cache
*/
int slab_unmergeable(struct kmem_cache *s)
@@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
return NULL;
}
-/*
- * Figure out what the alignment of the objects will be given a set of
- * flags, a user specified alignment and the size of the objects.
- */
-unsigned long calculate_alignment(slab_flags_t flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
slab_flags_t flags, void (*ctor)(void *),
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..693b7074bc53 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -838,6 +838,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
u8 *start;
u8 *fault;
u8 *end;
+ u8 *pad;
int length;
int remainder;
@@ -851,8 +852,9 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
if (!remainder)
return 1;
+ pad = end - remainder;
metadata_access_enable();
- fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
+ fault = memchr_inv(pad, POISON_INUSE, remainder);
metadata_access_disable();
if (!fault)
return 1;
@@ -860,9 +862,9 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
end--;
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
- print_section(KERN_ERR, "Padding ", end - remainder, remainder);
+ print_section(KERN_ERR, "Padding ", pad, remainder);
- restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
+ restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
return 0;
}
@@ -2220,9 +2222,7 @@ static void unfreeze_partials(struct kmem_cache *s,
/*
* Put a page that was just frozen (in __slab_free) into a partial page
- * slot if available. This is done without interrupts disabled and without
- * preemption disabled. The cmpxchg is racy and may put the partial page
- * onto a random cpus partial slot.
+ * slot if available.
*
* If we did not find a slot then simply move all the partials to the
* per node partial list.
diff --git a/mm/sparse.c b/mm/sparse.c
index 7a5dacaa06e3..6b8b5e91ceef 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
if (unlikely(!mem_section)) {
unsigned long size, align;
- size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+ size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
mem_section = memblock_virt_alloc(size, align);
}
@@ -264,7 +264,11 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
*/
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{
- return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
+ unsigned long coded_mem_map =
+ (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
+ BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
+ BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
+ return coded_mem_map;
}
/*
diff --git a/mm/swap.c b/mm/swap.c
index 38e1b6374a97..10568b1548d4 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -411,7 +411,7 @@ static void __lru_cache_add(struct page *page)
}
/**
- * lru_cache_add: add a page to the page lists
+ * lru_cache_add_anon - add a page to the page lists
* @page: the page to add
*/
void lru_cache_add_anon(struct page *page)
@@ -688,7 +688,14 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-void lru_add_drain_all_cpuslocked(void)
+/*
+ * Doesn't need any cpu hotplug locking because we do rely on per-cpu
+ * kworkers being shut down before our page_alloc_cpu_dead callback is
+ * executed on the offlined cpu.
+ * Calling this function with cpu hotplug locks held can actually lead
+ * to obscure indirect dependencies via WQ context.
+ */
+void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
@@ -724,13 +731,6 @@ void lru_add_drain_all_cpuslocked(void)
mutex_unlock(&lock);
}
-void lru_add_drain_all(void)
-{
- get_online_cpus();
- lru_add_drain_all_cpuslocked();
- put_online_cpus();
-}
-
/**
* release_pages - batched put_page()
* @pages: array of pages to release
@@ -930,10 +930,10 @@ EXPORT_SYMBOL(__pagevec_lru_add);
*/
unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping,
- pgoff_t start, unsigned nr_pages,
+ pgoff_t start, unsigned nr_entries,
pgoff_t *indices)
{
- pvec->nr = find_get_entries(mapping, start, nr_pages,
+ pvec->nr = find_get_entries(mapping, start, nr_entries,
pvec->pages, indices);
return pagevec_count(pvec);
}
@@ -965,9 +965,8 @@ void pagevec_remove_exceptionals(struct pagevec *pvec)
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index
- * @nr_pages: The maximum number of pages
*
- * pagevec_lookup_range() will search for and return a group of up to @nr_pages
+ * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
* pages in the mapping starting from index @start and upto index @end
* (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
* reference against the pages in @pvec.
@@ -977,7 +976,7 @@ void pagevec_remove_exceptionals(struct pagevec *pvec)
* also update @start to index the next page for the traversal.
*
* pagevec_lookup_range() returns the number of pages which were found. If this
- * number is smaller than @nr_pages, the end of specified range has been
+ * number is smaller than PAGEVEC_SIZE, the end of specified range has been
* reached.
*/
unsigned pagevec_lookup_range(struct pagevec *pvec,
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3074b02eaa09..42fe5653814a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2697,7 +2697,7 @@ out:
}
#ifdef CONFIG_PROC_FS
-static unsigned swaps_poll(struct file *file, poll_table *wait)
+static __poll_t swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
diff --git a/mm/truncate.c b/mm/truncate.c
index e4b4cf0f4070..c34e2fd4f583 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -179,12 +179,8 @@ static void
truncate_cleanup_page(struct address_space *mapping, struct page *page)
{
if (page_mapped(page)) {
- loff_t holelen;
-
- holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
- unmap_mapping_range(mapping,
- (loff_t)page->index << PAGE_SHIFT,
- holelen, 0);
+ pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
+ unmap_mapping_pages(mapping, page->index, nr, false);
}
if (page_has_private(page))
@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
/*
* Zap the rest of the file in one hit.
*/
- unmap_mapping_range(mapping,
- (loff_t)index << PAGE_SHIFT,
- (loff_t)(1 + end - index)
- << PAGE_SHIFT,
- 0);
+ unmap_mapping_pages(mapping, index,
+ (1 + end - index), false);
did_range_unmap = 1;
} else {
/*
* Just zap this page
*/
- unmap_mapping_range(mapping,
- (loff_t)index << PAGE_SHIFT,
- PAGE_SIZE, 0);
+ unmap_mapping_pages(mapping, index,
+ 1, false);
}
}
BUG_ON(page_mapped(page));
@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
* get remapped later.
*/
if (dax_mapping(mapping)) {
- unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
- (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
+ unmap_mapping_pages(mapping, start, end - start + 1, false);
}
out:
cleancache_invalidate_inode(mapping);
diff --git a/mm/util.c b/mm/util.c
index 34e57fae959d..c1250501364f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,18 +150,14 @@ EXPORT_SYMBOL(kmemdup_nul);
* @src: source address in user space
* @len: number of bytes to copy
*
- * Returns an ERR_PTR() on failure.
+ * Returns an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
*/
void *memdup_user(const void __user *src, size_t len)
{
void *p;
- /*
- * Always use GFP_KERNEL, since copy_from_user() can sleep and
- * cause pagefault, which makes it pointless to use GFP_NOFS
- * or GFP_ATOMIC.
- */
- p = kmalloc_track_caller(len, GFP_KERNEL);
+ p = kmalloc_track_caller(len, GFP_USER);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -174,6 +170,32 @@ void *memdup_user(const void __user *src, size_t len)
}
EXPORT_SYMBOL(memdup_user);
+/**
+ * vmemdup_user - duplicate memory region from user space
+ *
+ * @src: source address in user space
+ * @len: number of bytes to copy
+ *
+ * Returns an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+void *vmemdup_user(const void __user *src, size_t len)
+{
+ void *p;
+
+ p = kvmalloc(len, GFP_USER);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return p;
+}
+EXPORT_SYMBOL(vmemdup_user);
+
/*
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c02c850ea349..fdd3fc6be862 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -220,22 +220,6 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
return nr;
}
-unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
-{
- unsigned long nr;
-
- nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) +
- node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) +
- node_page_state_snapshot(pgdat, NR_ISOLATED_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) +
- node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) +
- node_page_state_snapshot(pgdat, NR_ISOLATED_ANON);
-
- return nr;
-}
-
/**
* lruvec_lru_size - Returns the number of pages on the given LRU list.
* @lruvec: lru vector
@@ -297,19 +281,20 @@ EXPORT_SYMBOL(register_shrinker);
*/
void unregister_shrinker(struct shrinker *shrinker)
{
+ if (!shrinker->nr_deferred)
+ return;
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
kfree(shrinker->nr_deferred);
+ shrinker->nr_deferred = NULL;
}
EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
- struct shrinker *shrinker,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
+ struct shrinker *shrinker, int priority)
{
unsigned long freed = 0;
unsigned long long delta;
@@ -334,9 +319,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
total_scan = nr;
- delta = (4 * nr_scanned) / shrinker->seeks;
- delta *= freeable;
- do_div(delta, nr_eligible + 1);
+ delta = freeable >> priority;
+ delta *= 4;
+ do_div(delta, shrinker->seeks);
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -370,8 +355,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
total_scan = freeable * 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
- nr_scanned, nr_eligible,
- freeable, delta, total_scan);
+ freeable, delta, total_scan, priority);
/*
* Normally, we should not scan less than batch_size objects in one
@@ -431,8 +415,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* @gfp_mask: allocation context
* @nid: node whose slab caches to target
* @memcg: memory cgroup whose slab caches to target
- * @nr_scanned: pressure numerator
- * @nr_eligible: pressure denominator
+ * @priority: the reclaim priority
*
* Call the shrink functions to age shrinkable caches.
*
@@ -444,20 +427,14 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* objects from the memory cgroup specified. Otherwise, only unaware
* shrinkers are called.
*
- * @nr_scanned and @nr_eligible form a ratio that indicate how much of
- * the available objects should be scanned. Page reclaim for example
- * passes the number of pages scanned and the number of pages on the
- * LRU lists that it considered on @nid, plus a bias in @nr_scanned
- * when it encountered mapped pages. The ratio is further biased by
- * the ->seeks setting of the shrink function, which indicates the
- * cost to recreate an object relative to that of an LRU page.
+ * @priority is sc->priority, we take the number of objects and >> by priority
+ * in order to get the scan target.
*
* Returns the number of reclaimed slab objects.
*/
static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg,
- unsigned long nr_scanned,
- unsigned long nr_eligible)
+ int priority)
{
struct shrinker *shrinker;
unsigned long freed = 0;
@@ -465,9 +442,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
return 0;
- if (nr_scanned == 0)
- nr_scanned = SWAP_CLUSTER_MAX;
-
if (!down_read_trylock(&shrinker_rwsem)) {
/*
* If we would return 0, our callers would understand that we
@@ -498,7 +472,16 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
sc.nid = 0;
- freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
+ freed += do_shrink_slab(&sc, shrinker, priority);
+ /*
+ * Bail out if someone want to register a new shrinker to
+ * prevent the regsitration from being stalled for long periods
+ * by parallel ongoing shrinking.
+ */
+ if (rwsem_is_contended(&shrinker_rwsem)) {
+ freed = freed ? : 1;
+ break;
+ }
}
up_read(&shrinker_rwsem);
@@ -516,8 +499,7 @@ void drop_slab_node(int nid)
freed = 0;
do {
- freed += shrink_slab(GFP_KERNEL, nid, memcg,
- 1000, 1000);
+ freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
} while (freed > 10);
}
@@ -1433,14 +1415,24 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if (PageDirty(page)) {
struct address_space *mapping;
+ bool migrate_dirty;
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
- * without blocking
+ * without blocking. However, we can be racing with
+ * truncation so it's necessary to lock the page
+ * to stabilise the mapping as truncation holds
+ * the page lock until after the page is removed
+ * from the page cache.
*/
+ if (!trylock_page(page))
+ return ret;
+
mapping = page_mapping(page);
- if (mapping && !mapping->a_ops->migratepage)
+ migrate_dirty = mapping && mapping->a_ops->migratepage;
+ unlock_page(page);
+ if (!migrate_dirty)
return ret;
}
}
@@ -2612,14 +2604,12 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
reclaimed = sc->nr_reclaimed;
scanned = sc->nr_scanned;
-
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
if (memcg)
shrink_slab(sc->gfp_mask, pgdat->node_id,
- memcg, sc->nr_scanned - scanned,
- lru_pages);
+ memcg, sc->priority);
/* Record the group's reclaim efficiency */
vmpressure(sc->gfp_mask, memcg, false,
@@ -2643,14 +2633,9 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
}
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
- /*
- * Shrink the slab caches in the same proportion that
- * the eligible LRU pages were scanned.
- */
if (global_reclaim(sc))
shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
- sc->nr_scanned - nr_scanned,
- node_lru_pages);
+ sc->priority);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
diff --git a/mm/zpool.c b/mm/zpool.c
index fd3ff719c32c..e1e7aa6d1d06 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -21,6 +21,7 @@ struct zpool {
struct zpool_driver *driver;
void *pool;
const struct zpool_ops *ops;
+ bool evictable;
struct list_head list;
};
@@ -142,7 +143,7 @@ EXPORT_SYMBOL(zpool_has_pool);
*
* This creates a new zpool of the specified type. The gfp flags will be
* used when allocating memory, if the implementation supports it. If the
- * ops param is NULL, then the created zpool will not be shrinkable.
+ * ops param is NULL, then the created zpool will not be evictable.
*
* Implementations must guarantee this to be thread-safe.
*
@@ -180,6 +181,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
zpool->driver = driver;
zpool->pool = driver->create(name, gfp, ops, zpool);
zpool->ops = ops;
+ zpool->evictable = driver->shrink && ops && ops->evict;
if (!zpool->pool) {
pr_err("couldn't create %s pool\n", type);
@@ -296,7 +298,8 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
int zpool_shrink(struct zpool *zpool, unsigned int pages,
unsigned int *reclaimed)
{
- return zpool->driver->shrink(zpool->pool, pages, reclaimed);
+ return zpool->driver->shrink ?
+ zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
}
/**
@@ -355,6 +358,24 @@ u64 zpool_get_total_size(struct zpool *zpool)
return zpool->driver->total_size(zpool->pool);
}
+/**
+ * zpool_evictable() - Test if zpool is potentially evictable
+ * @pool The zpool to test
+ *
+ * Zpool is only potentially evictable when it's created with struct
+ * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
+ *
+ * However, it doesn't necessarily mean driver will use zpool_ops.evict
+ * in its implementation of zpool_driver.shrink. It could do internal
+ * defragmentation instead.
+ *
+ * Returns: true if potentially evictable; false otherwise.
+ */
+bool zpool_evictable(struct zpool *zpool)
+{
+ return zpool->evictable;
+}
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 685049a9048d..c3013505c305 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -46,6 +46,7 @@
#include <linux/vmalloc.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
+#include <linux/shrinker.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/zsmalloc.h>
@@ -53,6 +54,7 @@
#include <linux/mount.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
+#include <linux/fs.h>
#define ZSPAGE_MAGIC 0x58
@@ -256,11 +258,7 @@ struct zs_pool {
/* Compact classes */
struct shrinker shrinker;
- /*
- * To signify that register_shrinker() was successful
- * and unregister_shrinker() will not Oops.
- */
- bool shrinker_enabled;
+
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
@@ -406,12 +404,6 @@ static void zs_zpool_free(void *pool, unsigned long handle)
zs_free(pool, handle);
}
-static int zs_zpool_shrink(void *pool, unsigned int pages,
- unsigned int *reclaimed)
-{
- return -EINVAL;
-}
-
static void *zs_zpool_map(void *pool, unsigned long handle,
enum zpool_mapmode mm)
{
@@ -449,7 +441,6 @@ static struct zpool_driver zs_zpool_driver = {
.destroy = zs_zpool_destroy,
.malloc = zs_zpool_malloc,
.free = zs_zpool_free,
- .shrink = zs_zpool_shrink,
.map = zs_zpool_map,
.unmap = zs_zpool_unmap,
.total_size = zs_zpool_total_size,
@@ -1056,7 +1047,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
* Reset OBJ_TAG_BITS bit to last link to tell
* whether it's allocated object or not.
*/
- link->next = -1 << OBJ_TAG_BITS;
+ link->next = -1UL << OBJ_TAG_BITS;
}
kunmap_atomic(vaddr);
page = next_page;
@@ -2323,10 +2314,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
static void zs_unregister_shrinker(struct zs_pool *pool)
{
- if (pool->shrinker_enabled) {
- unregister_shrinker(&pool->shrinker);
- pool->shrinker_enabled = false;
- }
+ unregister_shrinker(&pool->shrinker);
}
static int zs_register_shrinker(struct zs_pool *pool)
@@ -2425,11 +2413,13 @@ struct zs_pool *zs_create_pool(const char *name)
goto err;
/*
- * Not critical, we still can use the pool
- * and user can trigger compaction manually.
+ * Not critical since shrinker is only used to trigger internal
+ * defragmentation of the pool which is pretty optional thing. If
+ * registration fails we still can use the pool normally and user can
+ * trigger compaction manually. Thus, ignore return code.
*/
- if (zs_register_shrinker(pool) == 0)
- pool->shrinker_enabled = true;
+ zs_register_shrinker(pool);
+
return pool;
err:
diff --git a/mm/zswap.c b/mm/zswap.c
index d39581a076c3..c004aa4fd3f4 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -49,6 +49,8 @@
static u64 zswap_pool_total_size;
/* The number of compressed pages currently stored in zswap */
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
+/* The number of same-value filled pages currently stored in zswap */
+static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
/*
* The statistics below are not protected from concurrent access for
@@ -116,6 +118,11 @@ module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
static unsigned int zswap_max_pool_percent = 20;
module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
+/* Enable/disable handling same-value filled pages (enabled by default) */
+static bool zswap_same_filled_pages_enabled = true;
+module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
+ bool, 0644);
+
/*********************************
* data structures
**********************************/
@@ -145,9 +152,10 @@ struct zswap_pool {
* be held while changing the refcount. Since the lock must
* be held, there is no reason to also make refcount atomic.
* length - the length in bytes of the compressed page data. Needed during
- * decompression
+ * decompression. For a same value filled page length is 0.
* pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data
+ * value - value of the same-value filled pages which have same content
*/
struct zswap_entry {
struct rb_node rbnode;
@@ -155,7 +163,10 @@ struct zswap_entry {
int refcount;
unsigned int length;
struct zswap_pool *pool;
- unsigned long handle;
+ union {
+ unsigned long handle;
+ unsigned long value;
+ };
};
struct zswap_header {
@@ -320,8 +331,12 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
*/
static void zswap_free_entry(struct zswap_entry *entry)
{
- zpool_free(entry->pool->zpool, entry->handle);
- zswap_pool_put(entry->pool);
+ if (!entry->length)
+ atomic_dec(&zswap_same_filled_pages);
+ else {
+ zpool_free(entry->pool->zpool, entry->handle);
+ zswap_pool_put(entry->pool);
+ }
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
zswap_update_total_size();
@@ -953,6 +968,28 @@ static int zswap_shrink(void)
return ret;
}
+static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
+{
+ unsigned int pos;
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+ for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+ if (page[pos] != page[0])
+ return 0;
+ }
+ *value = page[0];
+ return 1;
+}
+
+static void zswap_fill_page(void *ptr, unsigned long value)
+{
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+ memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
+}
+
/*********************************
* frontswap hooks
**********************************/
@@ -964,11 +1001,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
struct zswap_entry *entry, *dupentry;
struct crypto_comp *tfm;
int ret;
- unsigned int dlen = PAGE_SIZE, len;
- unsigned long handle;
+ unsigned int hlen, dlen = PAGE_SIZE;
+ unsigned long handle, value;
char *buf;
u8 *src, *dst;
- struct zswap_header *zhdr;
+ struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
if (!zswap_enabled || !tree) {
ret = -ENODEV;
@@ -993,6 +1030,19 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
goto reject;
}
+ if (zswap_same_filled_pages_enabled) {
+ src = kmap_atomic(page);
+ if (zswap_is_page_same_filled(src, &value)) {
+ kunmap_atomic(src);
+ entry->offset = offset;
+ entry->length = 0;
+ entry->value = value;
+ atomic_inc(&zswap_same_filled_pages);
+ goto insert_entry;
+ }
+ kunmap_atomic(src);
+ }
+
/* if entry is successfully added, it keeps the reference */
entry->pool = zswap_pool_current_get();
if (!entry->pool) {
@@ -1013,8 +1063,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
}
/* store */
- len = dlen + sizeof(struct zswap_header);
- ret = zpool_malloc(entry->pool->zpool, len,
+ hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
+ ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
__GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
&handle);
if (ret == -ENOSPC) {
@@ -1025,10 +1075,9 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_reject_alloc_fail++;
goto put_dstmem;
}
- zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
- zhdr->swpentry = swp_entry(type, offset);
- buf = (u8 *)(zhdr + 1);
- memcpy(buf, dst, dlen);
+ buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
+ memcpy(buf, &zhdr, hlen);
+ memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle);
put_cpu_var(zswap_dstmem);
@@ -1037,6 +1086,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
entry->handle = handle;
entry->length = dlen;
+insert_entry:
/* map */
spin_lock(&tree->lock);
do {
@@ -1089,10 +1139,18 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
}
spin_unlock(&tree->lock);
+ if (!entry->length) {
+ dst = kmap_atomic(page);
+ zswap_fill_page(dst, entry->value);
+ kunmap_atomic(dst);
+ goto freeentry;
+ }
+
/* decompress */
dlen = PAGE_SIZE;
- src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
- ZPOOL_MM_RO) + sizeof(struct zswap_header);
+ src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
+ if (zpool_evictable(entry->pool->zpool))
+ src += sizeof(struct zswap_header);
dst = kmap_atomic(page);
tfm = *get_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
@@ -1101,6 +1159,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
zpool_unmap_handle(entry->pool->zpool, entry->handle);
BUG_ON(ret);
+freeentry:
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
@@ -1209,6 +1268,8 @@ static int __init zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_pool_total_size);
debugfs_create_atomic_t("stored_pages", S_IRUGO,
zswap_debugfs_root, &zswap_stored_pages);
+ debugfs_create_atomic_t("same_filled_pages", 0444,
+ zswap_debugfs_root, &zswap_same_filled_pages);
return 0;
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8dfdd94e430f..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
vlan_gvrp_uninit_applicant(real_dev);
}
- /* Take it out of our own structures, but be sure to interlock with
- * HW accelerating devices or SW vlan input packet processing if
- * VLAN is not 0 (leave it there for 802.1p).
- */
- if (vlan_id)
- vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+ vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 5f1446c9f098..a662ccc166df 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -80,7 +80,6 @@ static int vlan_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations vlan_fops = {
- .owner = THIS_MODULE,
.open = vlan_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -97,7 +96,6 @@ static int vlandev_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations vlandev_fops = {
- .owner = THIS_MODULE,
.open = vlandev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 80f5c79053a4..d6f7f7cb79c4 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -228,32 +228,31 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
}
}
-static int
-p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
+static __poll_t
+p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
{
- int ret, n;
+ __poll_t ret, n;
struct p9_trans_fd *ts = NULL;
if (client && client->status == Connected)
ts = client->trans;
- if (!ts)
- return -EREMOTEIO;
+ if (!ts) {
+ if (err)
+ *err = -EREMOTEIO;
+ return POLLERR;
+ }
if (!ts->rd->f_op->poll)
- return -EIO;
-
- if (!ts->wr->f_op->poll)
- return -EIO;
-
- ret = ts->rd->f_op->poll(ts->rd, pt);
- if (ret < 0)
- return ret;
+ ret = DEFAULT_POLLMASK;
+ else
+ ret = ts->rd->f_op->poll(ts->rd, pt);
if (ts->rd != ts->wr) {
- n = ts->wr->f_op->poll(ts->wr, pt);
- if (n < 0)
- return n;
+ if (!ts->wr->f_op->poll)
+ n = DEFAULT_POLLMASK;
+ else
+ n = ts->wr->f_op->poll(ts->wr, pt);
ret = (ret & ~POLLOUT) | (n & ~POLLIN);
}
@@ -298,7 +297,8 @@ static int p9_fd_read(struct p9_client *client, void *v, int len)
static void p9_read_work(struct work_struct *work)
{
- int n, err;
+ __poll_t n;
+ int err;
struct p9_conn *m;
int status = REQ_STATUS_ERROR;
@@ -398,7 +398,7 @@ end_clear:
if (test_and_clear_bit(Rpending, &m->wsched))
n = POLLIN;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
@@ -448,7 +448,8 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
static void p9_write_work(struct work_struct *work)
{
- int n, err;
+ __poll_t n;
+ int err;
struct p9_conn *m;
struct p9_req_t *req;
@@ -506,7 +507,7 @@ end_clear:
if (test_and_clear_bit(Wpending, &m->wsched))
n = POLLOUT;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if ((n & POLLOUT) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
@@ -581,7 +582,7 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
static void p9_conn_create(struct p9_client *client)
{
- int n;
+ __poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -597,7 +598,7 @@ static void p9_conn_create(struct p9_client *client)
INIT_LIST_HEAD(&m->poll_pending_link);
init_poll_funcptr(&m->pt, p9_pollwait);
- n = p9_fd_poll(client, &m->pt);
+ n = p9_fd_poll(client, &m->pt, NULL);
if (n & POLLIN) {
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
@@ -617,17 +618,16 @@ static void p9_conn_create(struct p9_client *client)
static void p9_poll_mux(struct p9_conn *m)
{
- int n;
+ __poll_t n;
+ int err = -ECONNRESET;
if (m->err < 0)
return;
- n = p9_fd_poll(m->client, NULL);
- if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
+ n = p9_fd_poll(m->client, NULL, &err);
+ if (n & (POLLERR | POLLHUP | POLLNVAL)) {
p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
- if (n >= 0)
- n = -ECONNRESET;
- p9_conn_cancel(m, n);
+ p9_conn_cancel(m, err);
}
if (n & POLLIN) {
@@ -663,7 +663,7 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
- int n;
+ __poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -680,7 +680,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
if (test_and_clear_bit(Wpending, &m->wsched))
n = POLLOUT;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
schedule_work(&m->wq);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 325c56043007..086a4abdfa7c 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
return xenbus_unregister_driver(&xen_9pfs_front_driver);
}
module_exit(p9_trans_xen_exit);
+
+MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
+MODULE_DESCRIPTION("Xen Transport for 9P");
+MODULE_LICENSE("GPL");
diff --git a/net/Kconfig b/net/Kconfig
index 9dba2715919d..0428f12c25c2 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -182,6 +182,7 @@ config BRIDGE_NETFILTER
depends on BRIDGE
depends on NETFILTER && INET
depends on NETFILTER_ADVANCED
+ select NETFILTER_FAMILY_BRIDGE
default m
---help---
Enabling this option will let arptables resp. iptables see bridged
@@ -212,7 +213,6 @@ source "net/dsa/Kconfig"
source "net/8021q/Kconfig"
source "net/decnet/Kconfig"
source "net/llc/Kconfig"
-source "net/ipx/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
source "net/lapb/Kconfig"
@@ -336,23 +336,6 @@ config NET_PKTGEN
To compile this code as a module, choose M here: the
module will be called pktgen.
-config NET_TCPPROBE
- tristate "TCP connection probing"
- depends on INET && PROC_FS && KPROBES
- ---help---
- This module allows for capturing the changes to TCP connection
- state in response to incoming packets. It is used for debugging
- TCP congestion avoidance modules. If you don't understand
- what was just said, you don't need it: say N.
-
- Documentation on how to use TCP connection probing can be found
- at:
-
- http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
-
- To compile this code as a module, choose M here: the
- module will be called tcp_probe.
-
config NET_DROP_MONITOR
tristate "Network packet drop alerting service"
depends on INET && TRACEPOINTS
diff --git a/net/Makefile b/net/Makefile
index 14fede520840..a6147c61b174 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_BRIDGE) += bridge/
obj-$(CONFIG_NET_DSA) += dsa/
-obj-$(CONFIG_IPX) += ipx/
obj-$(CONFIG_ATALK) += appletalk/
obj-$(CONFIG_X25) += x25/
obj-$(CONFIG_LAPB) += lapb/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 309d7dbb36e8..d4c1021e74e1 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -1047,7 +1047,6 @@ static int aarp_seq_open(struct inode *inode, struct file *file)
}
const struct file_operations atalk_seq_arp_fops = {
- .owner = THIS_MODULE,
.open = aarp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index af46bc49e1e9..a3bf9d519193 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -226,7 +226,6 @@ static int atalk_seq_socket_open(struct inode *inode, struct file *file)
}
static const struct file_operations atalk_seq_interface_fops = {
- .owner = THIS_MODULE,
.open = atalk_seq_interface_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -234,7 +233,6 @@ static const struct file_operations atalk_seq_interface_fops = {
};
static const struct file_operations atalk_seq_route_fops = {
- .owner = THIS_MODULE,
.open = atalk_seq_route_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -242,7 +240,6 @@ static const struct file_operations atalk_seq_route_fops = {
};
static const struct file_operations atalk_seq_socket_fops = {
- .owner = THIS_MODULE,
.open = atalk_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4e111196f902..fd94bea36ee8 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -824,7 +824,6 @@ static int br2684_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations br2684_proc_ops = {
- .owner = THIS_MODULE,
.open = br2684_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/atm/common.c b/net/atm/common.c
index 8a4f99114cd2..6523f38c4957 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -14,7 +14,7 @@
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
-#include <linux/time.h> /* struct timeval */
+#include <linux/time64.h> /* 64-bit time for seconds */
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/init.h>
@@ -648,11 +648,11 @@ out:
return error;
}
-unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/atm/common.h b/net/atm/common.h
index d9d583712a91..5850649068bb 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_setsockopt(struct socket *sock, int level, int optname,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 6676e3433261..09a1f056712a 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -992,7 +992,6 @@ static int lec_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations lec_seq_fops = {
- .owner = THIS_MODULE,
.open = lec_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 7c6a1cc760a2..31e0dcb970f8 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1089,7 +1089,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
- do_gettimeofday(&(entry->reply_wait));
+ entry->reply_wait = ktime_get_seconds();
mpc->in_ops->put(entry);
return;
}
@@ -1099,7 +1099,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
- do_gettimeofday(&(entry->reply_wait));
+ entry->reply_wait = ktime_get_seconds();
mpc->in_ops->put(entry);
return;
}
@@ -1175,8 +1175,9 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
}
entry->ctrl_info = msg->content.in_info;
- do_gettimeofday(&(entry->tv));
- do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
+ entry->time = ktime_get_seconds();
+ /* Used in refreshing func from now on */
+ entry->reply_wait = ktime_get_seconds();
entry->refresh_time = 0;
ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index e01450bb32d6..4bb418313720 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -117,7 +117,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info.in_dst_ip = dst_ip;
- do_gettimeofday(&(entry->tv));
+ entry->time = ktime_get_seconds();
entry->retry_time = client->parameters.mpc_p4;
entry->count = 1;
entry->entry_state = INGRESS_INVALID;
@@ -148,7 +148,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
- do_gettimeofday(&(entry->reply_wait));
+ entry->reply_wait = ktime_get_seconds();
entry->entry_state = INGRESS_RESOLVING;
}
if (entry->shortcut != NULL)
@@ -171,7 +171,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
- do_gettimeofday(&(entry->reply_wait));
+ entry->reply_wait = ktime_get_seconds();
}
return CLOSED;
@@ -227,17 +227,16 @@ static void in_cache_remove_entry(in_cache_entry *entry,
static void clear_count_and_expired(struct mpoa_client *client)
{
in_cache_entry *entry, *next_entry;
- struct timeval now;
+ time64_t now;
- do_gettimeofday(&now);
+ now = ktime_get_seconds();
write_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
entry->count = 0;
next_entry = entry->next;
- if ((now.tv_sec - entry->tv.tv_sec)
- > entry->ctrl_info.holding_time) {
+ if ((now - entry->time) > entry->ctrl_info.holding_time) {
dprintk("holding time expired, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
client->in_ops->remove_entry(entry, client);
@@ -253,35 +252,35 @@ static void check_resolving_entries(struct mpoa_client *client)
struct atm_mpoa_qos *qos;
in_cache_entry *entry;
- struct timeval now;
+ time64_t now;
struct k_message msg;
- do_gettimeofday(&now);
+ now = ktime_get_seconds();
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVING) {
- if ((now.tv_sec - entry->hold_down.tv_sec) <
- client->parameters.mpc_p6) {
+
+ if ((now - entry->hold_down)
+ < client->parameters.mpc_p6) {
entry = entry->next; /* Entry in hold down */
continue;
}
- if ((now.tv_sec - entry->reply_wait.tv_sec) >
- entry->retry_time) {
+ if ((now - entry->reply_wait) > entry->retry_time) {
entry->retry_time = MPC_C1 * (entry->retry_time);
/*
* Retry time maximum exceeded,
* put entry in hold down.
*/
if (entry->retry_time > client->parameters.mpc_p5) {
- do_gettimeofday(&(entry->hold_down));
+ entry->hold_down = ktime_get_seconds();
entry->retry_time = client->parameters.mpc_p4;
entry = entry->next;
continue;
}
/* Ask daemon to send a resolution request. */
- memset(&(entry->hold_down), 0, sizeof(struct timeval));
+ memset(&entry->hold_down, 0, sizeof(time64_t));
msg.type = SND_MPOA_RES_RTRY;
memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
@@ -289,7 +288,7 @@ static void check_resolving_entries(struct mpoa_client *client)
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, client);
- do_gettimeofday(&(entry->reply_wait));
+ entry->reply_wait = ktime_get_seconds();
}
}
entry = entry->next;
@@ -300,18 +299,18 @@ static void check_resolving_entries(struct mpoa_client *client)
/* Call this every MPC-p5 seconds. */
static void refresh_entries(struct mpoa_client *client)
{
- struct timeval now;
+ time64_t now;
struct in_cache_entry *entry = client->in_cache;
ddprintk("refresh_entries\n");
- do_gettimeofday(&now);
+ now = ktime_get_seconds();
read_lock_bh(&client->ingress_lock);
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVED) {
if (!(entry->refresh_time))
entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
- if ((now.tv_sec - entry->reply_wait.tv_sec) >
+ if ((now - entry->reply_wait) >
entry->refresh_time) {
dprintk("refreshing an entry.\n");
entry->entry_state = INGRESS_REFRESHING;
@@ -480,7 +479,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info = msg->content.eg_info;
- do_gettimeofday(&(entry->tv));
+ entry->time = ktime_get_seconds();
entry->entry_state = EGRESS_RESOLVED;
dprintk("new_eg_cache_entry cache_id %u\n",
ntohl(entry->ctrl_info.cache_id));
@@ -495,7 +494,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
{
- do_gettimeofday(&(entry->tv));
+ entry->time = ktime_get_seconds();
entry->entry_state = EGRESS_RESOLVED;
entry->ctrl_info.holding_time = holding_time;
}
@@ -503,17 +502,16 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
static void clear_expired(struct mpoa_client *client)
{
eg_cache_entry *entry, *next_entry;
- struct timeval now;
+ time64_t now;
struct k_message msg;
- do_gettimeofday(&now);
+ now = ktime_get_seconds();
write_lock_irq(&client->egress_lock);
entry = client->eg_cache;
while (entry != NULL) {
next_entry = entry->next;
- if ((now.tv_sec - entry->tv.tv_sec)
- > entry->ctrl_info.holding_time) {
+ if ((now - entry->time) > entry->ctrl_info.holding_time) {
msg.type = SND_EGRESS_PURGE;
msg.content.eg_info = entry->ctrl_info;
dprintk("egress_cache: holding time expired, cache_id = %u.\n",
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h
index 6a266669ebf4..464c4c7f8d1f 100644
--- a/net/atm/mpoa_caches.h
+++ b/net/atm/mpoa_caches.h
@@ -2,6 +2,7 @@
#ifndef MPOA_CACHES_H
#define MPOA_CACHES_H
+#include <linux/time64.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/atm.h>
@@ -16,9 +17,9 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc);
typedef struct in_cache_entry {
struct in_cache_entry *next;
struct in_cache_entry *prev;
- struct timeval tv;
- struct timeval reply_wait;
- struct timeval hold_down;
+ time64_t time;
+ time64_t reply_wait;
+ time64_t hold_down;
uint32_t packets_fwded;
uint16_t entry_state;
uint32_t retry_time;
@@ -53,7 +54,7 @@ struct in_cache_ops{
typedef struct eg_cache_entry{
struct eg_cache_entry *next;
struct eg_cache_entry *prev;
- struct timeval tv;
+ time64_t time;
uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
struct atm_vcc *shortcut;
uint32_t packets_rcvd;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 8a0c17e1c203..b93cc0f18292 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -8,7 +8,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
-#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/atmmpc.h>
@@ -57,7 +57,6 @@ static int parse_qos(const char *buff);
* Define allowed FILE OPERATIONS
*/
static const struct file_operations mpc_file_operations = {
- .owner = THIS_MODULE,
.open = proc_mpc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -138,7 +137,7 @@ static int mpc_show(struct seq_file *m, void *v)
int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
- struct timeval now;
+ time64_t now;
unsigned char ip_string[16];
if (v == SEQ_START_TOKEN) {
@@ -148,15 +147,17 @@ static int mpc_show(struct seq_file *m, void *v)
seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
- do_gettimeofday(&now);
+ now = ktime_get_seconds();
for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
+ unsigned long seconds_delta = now - in_entry->time;
+
sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
seq_printf(m, "%-16s%s%-14lu%-12u",
ip_string,
ingress_state_string(in_entry->entry_state),
in_entry->ctrl_info.holding_time -
- (now.tv_sec-in_entry->tv.tv_sec),
+ seconds_delta,
in_entry->packets_fwded);
if (in_entry->shortcut)
seq_printf(m, " %-3d %-3d",
@@ -169,13 +170,14 @@ static int mpc_show(struct seq_file *m, void *v)
seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
+ unsigned long seconds_delta = now - eg_entry->time;
+
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(m, "%02x", p[i]);
seq_printf(m, "\n%-16lu%s%-14lu%-15u",
(unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
egress_state_string(eg_entry->entry_state),
- (eg_entry->ctrl_info.holding_time -
- (now.tv_sec-eg_entry->tv.tv_sec)),
+ (eg_entry->ctrl_info.holding_time - seconds_delta),
eg_entry->packets_rcvd);
/* latest IP address */
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 642f9272ab95..edc48edc95c1 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -37,7 +37,6 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
size_t count, loff_t *pos);
static const struct file_operations proc_atm_dev_ops = {
- .owner = THIS_MODULE,
.read = proc_dev_atm_read,
.llseek = noop_llseek,
};
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 06eac1f50c5e..47fdd399626b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1931,7 +1931,6 @@ static int ax25_info_open(struct inode *inode, struct file *file)
}
static const struct file_operations ax25_info_fops = {
- .owner = THIS_MODULE,
.open = ax25_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 0446b892618a..525558972fd9 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -336,7 +336,6 @@ static int ax25_rt_info_open(struct inode *inode, struct file *file)
}
const struct file_operations ax25_route_fops = {
- .owner = THIS_MODULE,
.open = ax25_rt_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 83b035f56202..4ebe91ba317a 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -194,7 +194,6 @@ static int ax25_uid_info_open(struct inode *inode, struct file *file)
}
const struct file_operations ax25_uid_fops = {
- .owner = THIS_MODULE,
.open = ax25_uid_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index b73b96a2854b..c44f6515be5e 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
#
# B.A.T.M.A.N meshing protocol
#
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 915987bc6d29..022f6e77307b 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,4 +1,4 @@
-#
+# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 44fd073b7546..80c72c7d3cad 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -37,7 +38,8 @@ char batadv_routing_algo[20] = "BATMAN_IV";
static struct hlist_head batadv_algo_list;
/**
- * batadv_algo_init - Initialize batman-adv algorithm management data structures
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ * structures
*/
void batadv_algo_init(void)
{
@@ -59,6 +61,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
return bat_algo_ops;
}
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
{
struct batadv_algo_ops *bat_algo_ops_tmp;
@@ -88,6 +96,19 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
return 0;
}
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
{
struct batadv_algo_ops *bat_algo_ops;
@@ -102,6 +123,14 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
{
struct batadv_algo_ops *bat_algo_ops;
@@ -148,7 +177,7 @@ module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
0644);
/**
- * batadv_algo_dump_entry - fill in information about one supported routing
+ * batadv_algo_dump_entry() - fill in information about one supported routing
* algorithm
* @msg: netlink message to be sent back
* @portid: Port to reply to
@@ -179,7 +208,7 @@ static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_algo_dump - fill in information about supported routing
+ * batadv_algo_dump() - fill in information about supported routing
* algorithms
* @msg: netlink message to be sent back
* @cb: Parameters to the netlink request
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 29f6312f9bf1..029221615ba3 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index bbe8414b6ee7..79e326383726 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
#include <linux/cache.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
@@ -51,6 +52,7 @@
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
@@ -62,7 +64,6 @@
#include "netlink.h"
#include "network-coding.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
#include "translation-table.h"
@@ -72,21 +73,28 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
/**
* enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is no duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- * neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
*/
enum batadv_dup_status {
+ /** @BATADV_NO_DUP: the packet is no duplicate */
BATADV_NO_DUP = 0,
+
+ /**
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+ * the neighbor)
+ */
BATADV_ORIG_DUP,
+
+ /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
BATADV_NEIGH_DUP,
+
+ /**
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
BATADV_PROTECTED,
};
/**
- * batadv_ring_buffer_set - update the ring buffer with the given value
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
* @lq_recv: pointer to the ring buffer
* @lq_index: index to store the value at
* @value: value to store in the ring buffer
@@ -98,7 +106,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
}
/**
- * batadv_ring_buffer_avg - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
* in the given ring buffer
* @lq_recv: pointer to the ring buffer
*
@@ -130,7 +138,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
}
/**
- * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * batadv_iv_ogm_orig_free() - free the private resources allocated for this
* orig_node
* @orig_node: the orig_node for which the resources have to be free'd
*/
@@ -141,8 +149,8 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
}
/**
- * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
- * include the new hard-interface
+ * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
+ * to include the new hard-interface
* @orig_node: the orig_node that has to be changed
* @max_if_num: the current amount of interfaces
*
@@ -186,7 +194,7 @@ unlock:
}
/**
- * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
* @orig_node: the orig_node that has to be changed
* @max_if_num: the current amount of interfaces
* @del_if_num: the index of the interface being removed
@@ -224,7 +232,7 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
}
/**
- * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
* @orig_node: the orig_node that has to be changed
* @max_if_num: the current amount of interfaces
* @del_if_num: the index of the interface being removed
@@ -259,8 +267,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
}
/**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- * exclude the removed interface
+ * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
+ * to exclude the removed interface
* @orig_node: the orig_node that has to be changed
* @max_if_num: the current amount of interfaces
* @del_if_num: the index of the interface being removed
@@ -290,7 +298,8 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
}
/**
- * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ * originator
* @bat_priv: the bat priv with all the soft interface information
* @addr: mac address of the originator
*
@@ -447,7 +456,7 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
}
/**
- * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
* @tvlv_len: tvlv length of the previously considered OGM
@@ -557,7 +566,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
}
/**
- * batadv_iv_ogm_can_aggregate - find out if an OGM can be aggregated on an
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
* existing forward packet
* @new_bat_ogm_packet: OGM packet to be aggregated
* @bat_priv: the bat priv with all the soft interface information
@@ -660,7 +669,7 @@ out:
}
/**
- * batadv_iv_ogm_aggregate_new - create a new aggregated packet and add this
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
* packet to it.
* @packet_buff: pointer to the OGM
* @packet_len: (total) length of the OGM
@@ -743,7 +752,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
}
/**
- * batadv_iv_ogm_queue_add - queue up an OGM for transmission
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
* @bat_priv: the bat priv with all the soft interface information
* @packet_buff: pointer to the OGM
* @packet_len: (total) length of the OGM
@@ -869,8 +878,8 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
}
/**
- * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
- * the given interface
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ * for the given interface
* @hard_iface: the interface for which the windows have to be shifted
*/
static void
@@ -987,7 +996,7 @@ out:
}
/**
- * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
* originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig node who originally emitted the ogm packet
@@ -1152,7 +1161,7 @@ out:
}
/**
- * batadv_iv_ogm_calc_tq - calculate tq for current received ogm packet
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
* @orig_node: the orig node who originally emitted the ogm packet
* @orig_neigh_node: the orig node struct of the neighbor who sent the packet
* @batadv_ogm_packet: the ogm packet
@@ -1298,7 +1307,7 @@ out:
}
/**
- * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces,
+ * batadv_iv_ogm_update_seqnos() - process a batman packet for all interfaces,
* adjust the sequence number and find out whether it is a duplicate
* @ethhdr: ethernet header of the packet
* @batadv_ogm_packet: OGM packet to be considered
@@ -1401,7 +1410,8 @@ out:
}
/**
- * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ * interface
* @skb: the skb containing the OGM
* @ogm_offset: offset from skb->data to start of ogm header
* @orig_node: the (cached) orig node for the originator of this OGM
@@ -1608,7 +1618,7 @@ out:
}
/**
- * batadv_iv_ogm_process - process an incoming batman iv OGM
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
* @skb: the skb containing the OGM
* @ogm_offset: offset to the OGM which should be processed (for aggregates)
* @if_incoming: the interface where this packet was receved
@@ -1861,7 +1871,7 @@ free_skb:
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
+ * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
* @orig_node: the orig_node for which the neighbors are printed
* @if_outgoing: outgoing interface for these entries
* @seq: debugfs table seq_file struct
@@ -1890,7 +1900,7 @@ batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
}
/**
- * batadv_iv_ogm_orig_print - print the originator table
+ * batadv_iv_ogm_orig_print() - print the originator table
* @bat_priv: the bat priv with all the soft interface information
* @seq: debugfs table seq_file struct
* @if_outgoing: the outgoing interface for which this should be printed
@@ -1960,7 +1970,7 @@ next:
#endif
/**
- * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
* given outgoing interface.
* @neigh_node: Neighbour of interest
* @if_outgoing: Outgoing interface of interest
@@ -1986,7 +1996,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
}
/**
- * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
* message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
@@ -2048,7 +2058,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2110,7 +2120,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
* message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
@@ -2153,7 +2163,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
@@ -2190,7 +2200,7 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_iv_hardif_neigh_print - print a single hop neighbour node
+ * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
* @seq: neighbour table seq_file struct
* @hardif_neigh: hardif neighbour information
*/
@@ -2209,7 +2219,7 @@ batadv_iv_hardif_neigh_print(struct seq_file *seq,
}
/**
- * batadv_iv_ogm_neigh_print - print the single hop neighbour list
+ * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
* @bat_priv: the bat priv with all the soft interface information
* @seq: neighbour table seq_file struct
*/
@@ -2242,7 +2252,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
#endif
/**
- * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
@@ -2287,7 +2297,7 @@ out:
}
/**
- * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2326,7 +2336,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
* into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
@@ -2365,7 +2375,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
@@ -2417,7 +2427,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
}
/**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
@@ -2443,7 +2453,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
}
/**
- * batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
* than neigh2 from the metric prospective
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
@@ -2478,7 +2488,7 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_iv_init_sel_class - initialize GW selection class
+ * batadv_iv_init_sel_class() - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
@@ -2703,7 +2713,7 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
#endif
/**
- * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2774,7 +2784,7 @@ out:
}
/**
- * batadv_iv_gw_dump - Dump gateways into a message
+ * batadv_iv_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
@@ -2843,6 +2853,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
},
};
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int __init batadv_iv_init(void)
{
int ret;
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index ae2ab526bdb1..9dc0dd5c83df 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e0e2bfcd6b3e..27e165ac9302 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
@@ -36,6 +37,7 @@
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
@@ -48,7 +50,6 @@
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "packet.h"
struct sk_buff;
@@ -99,7 +100,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_v_iface_update_mac - react to hard-interface MAC address change
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
* @hard_iface: the modified interface
*
* If the modified interface is the primary one, update the originator
@@ -130,7 +131,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * batadv_v_orig_print_neigh() - print neighbors for the originator table
* @orig_node: the orig_node for which the neighbors are printed
* @if_outgoing: outgoing interface for these entries
* @seq: debugfs table seq_file struct
@@ -160,7 +161,7 @@ batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
}
/**
- * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
* @seq: neighbour table seq_file struct
* @hardif_neigh: hardif neighbour information
*/
@@ -181,7 +182,7 @@ batadv_v_hardif_neigh_print(struct seq_file *seq,
}
/**
- * batadv_v_neigh_print - print the single hop neighbour list
+ * batadv_v_neigh_print() - print the single hop neighbour list
* @bat_priv: the bat priv with all the soft interface information
* @seq: neighbour table seq_file struct
*/
@@ -215,7 +216,7 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
#endif
/**
- * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -258,7 +259,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_v_neigh_dump_hardif - Dump the neighbours of a hard interface into
+ * batadv_v_neigh_dump_hardif() - Dump the neighbours of a hard interface into
* a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
@@ -296,7 +297,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_v_neigh_dump - Dump the neighbours of a hard interface into a
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface into a
* message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
@@ -348,7 +349,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_v_orig_print - print the originator table
+ * batadv_v_orig_print() - print the originator table
* @bat_priv: the bat priv with all the soft interface information
* @seq: debugfs table seq_file struct
* @if_outgoing: the outgoing interface for which this should be printed
@@ -416,8 +417,7 @@ next:
#endif
/**
- * batadv_v_orig_dump_subentry - Dump an originator subentry into a
- * message
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -483,7 +483,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -536,8 +536,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_v_orig_dump_bucket - Dump an originator bucket into a
- * message
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -578,7 +577,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_v_orig_dump - Dump the originators into a message
+ * batadv_v_orig_dump() - Dump the originators into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
@@ -668,7 +667,7 @@ err_ifinfo1:
}
/**
- * batadv_v_init_sel_class - initialize GW selection class
+ * batadv_v_init_sel_class() - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
@@ -704,7 +703,7 @@ static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
}
/**
- * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
* @gw_node: the GW to retrieve the metric for
* @bw: the pointer where the metric will be stored. The metric is computed as
* the minimum between the GW advertised throughput and the path throughput to
@@ -747,7 +746,7 @@ out:
}
/**
- * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
* @bat_priv: the bat priv with all the soft interface information
*
* Return: the GW node having the best GW-metric, NULL if no GW is known
@@ -785,7 +784,7 @@ next:
}
/**
- * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
* @bat_priv: the bat priv with all the soft interface information
* @curr_gw_orig: originator representing the currently selected GW
* @orig_node: the originator representing the new candidate
@@ -884,7 +883,7 @@ out:
}
/**
- * batadv_v_gw_print - print the gateway list
+ * batadv_v_gw_print() - print the gateway list
* @bat_priv: the bat priv with all the soft interface information
* @seq: gateway table seq_file struct
*/
@@ -913,7 +912,7 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
#endif
/**
- * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -1004,7 +1003,7 @@ out:
}
/**
- * batadv_v_gw_dump - Dump gateways into a message
+ * batadv_v_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
@@ -1074,7 +1073,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
};
/**
- * batadv_v_hardif_init - initialize the algorithm specific fields in the
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
* hard-interface object
* @hard_iface: the hard-interface to initialize
*/
@@ -1088,7 +1087,7 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
* mesh
* @bat_priv: the object representing the mesh interface to initialise
*
@@ -1106,7 +1105,7 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
* @bat_priv: the object representing the mesh interface to free
*/
void batadv_v_mesh_free(struct batadv_priv *bat_priv)
@@ -1115,7 +1114,7 @@ void batadv_v_mesh_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_v_init - B.A.T.M.A.N. V initialization function
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
*
* Description: Takes care of initializing all the subcomponents.
* It is invoked upon module load only.
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
index dd7c4b647e6b..a17ab68bbce8 100644
--- a/net/batman-adv/bat_v.h
+++ b/net/batman-adv/bat_v.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 1de992c58b35..a83478c46597 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
@@ -24,7 +25,7 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -41,18 +42,18 @@
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
#include "bat_algo.h"
#include "bat_v_ogm.h"
#include "hard-interface.h"
#include "log.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
/**
- * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
* @hard_iface: the interface for which the timer has to be reset
*/
static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
@@ -67,7 +68,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
* @neigh: the neighbour for which the throughput has to be obtained
*
* Return: The throughput towards the given neighbour in multiples of 100kpbs
@@ -153,8 +154,8 @@ default_throughput:
}
/**
- * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
- * of a single hop neighbour
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ * metric of a single hop neighbour
* @work: the work queue item
*/
void batadv_v_elp_throughput_metric_update(struct work_struct *work)
@@ -177,7 +178,7 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
}
/**
- * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
* @neigh: the neighbour to probe
*
* Sends a predefined number of unicast wifi packets to a given neighbour in
@@ -240,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
}
/**
- * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
* @work: work queue item
*
* Emits broadcast ELP message in regular intervals.
@@ -327,7 +328,7 @@ out:
}
/**
- * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
* @hard_iface: interface for which the data has to be prepared
*
* Return: 0 on success or a -ENOMEM in case of failure.
@@ -375,7 +376,7 @@ out:
}
/**
- * batadv_v_elp_iface_disable - release ELP interface private resources
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
* @hard_iface: interface for which the resources have to be released
*/
void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -387,7 +388,7 @@ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_v_elp_iface_activate - update the ELP buffer belonging to the given
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
* hard-interface
* @primary_iface: the new primary interface
* @hard_iface: interface holding the to-be-updated buffer
@@ -408,7 +409,7 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
}
/**
- * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
* primary interface
* @primary_iface: the new primary interface
*/
@@ -428,7 +429,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
}
/**
- * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
* @bat_priv: the bat priv with all the soft interface information
* @neigh_addr: the neighbour interface address
* @if_incoming: the interface the packet was received through
@@ -488,7 +489,7 @@ orig_free:
}
/**
- * batadv_v_elp_packet_recv - main ELP packet handler
+ * batadv_v_elp_packet_recv() - main ELP packet handler
* @skb: the received packet
* @if_incoming: the interface this packet was received through
*
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index 376ead280ab9..5e39d0588a48 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index c251445a42a0..ba59b77c605d 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
@@ -22,7 +23,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -38,20 +39,20 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
#include "bat_algo.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
#include "translation-table.h"
#include "tvlv.h"
/**
- * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
*
@@ -88,7 +89,7 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
}
/**
- * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
@@ -107,7 +108,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
}
/**
- * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
* @skb: the OGM to send
* @hard_iface: the interface to use to send the OGM
*/
@@ -127,7 +128,7 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
}
/**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
* @work: work queue item
*/
static void batadv_v_ogm_send(struct work_struct *work)
@@ -235,7 +236,7 @@ out:
}
/**
- * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
* @hard_iface: the interface to prepare
*
* Takes care of scheduling own OGM sending routine for this interface.
@@ -252,7 +253,7 @@ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
* @primary_iface: the new primary interface
*/
void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
@@ -268,8 +269,8 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
}
/**
- * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
- * with B.A.T.M.A.N. V OGMs
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ * forwarded with B.A.T.M.A.N. V OGMs
* @bat_priv: the bat priv with all the soft interface information
* @if_incoming: the interface where the OGM has been received
* @if_outgoing: the interface where the OGM has to be forwarded to
@@ -314,7 +315,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
}
/**
- * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
* outgoing interface
* @bat_priv: the bat priv with all the soft interface information
* @ogm_received: previously received OGM to be forwarded
@@ -405,7 +406,7 @@ out:
}
/**
- * batadv_v_ogm_metric_update - update route metric based on OGM
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
* @bat_priv: the bat priv with all the soft interface information
* @ogm2: OGM2 structure
* @orig_node: Originator structure for which the OGM has been received
@@ -490,7 +491,7 @@ out:
}
/**
- * batadv_v_ogm_route_update - update routes based on OGM
+ * batadv_v_ogm_route_update() - update routes based on OGM
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: the Ethernet header of the OGM2
* @ogm2: OGM2 structure
@@ -590,7 +591,7 @@ out:
}
/**
- * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: the Ethernet header of the OGM2
* @ogm2: OGM2 structure
@@ -639,7 +640,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
}
/**
- * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
* @tvlv_len: tvlv length of the previously considered OGM
@@ -659,7 +660,7 @@ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
}
/**
- * batadv_v_ogm_process - process an incoming batman v OGM
+ * batadv_v_ogm_process() - process an incoming batman v OGM
* @skb: the skb containing the OGM
* @ogm_offset: offset to the OGM which should be processed (for aggregates)
* @if_incoming: the interface where this packet was receved
@@ -787,7 +788,7 @@ out:
}
/**
- * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
* @skb: the received OGM
* @if_incoming: the interface where this OGM has been received
*
@@ -851,7 +852,7 @@ free_skb:
}
/**
- * batadv_v_ogm_init - initialise the OGM2 engine
+ * batadv_v_ogm_init() - initialise the OGM2 engine
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or a negative error code in case of failure
@@ -884,7 +885,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_v_ogm_free - free OGM private resources
+ * batadv_v_ogm_free() - free OGM private resources
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_v_ogm_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index 2068770b542d..6a4c14ccc3c6 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 2b070c7e31da..bdc1ef06e05b 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
@@ -32,7 +33,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
}
/**
- * batadv_bit_get_packet - receive and process one packet within the sequence
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
* number window
* @priv: the bat priv with all the soft interface information
* @seq_bits: pointer to the sequence number receive packet
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index cc262c9d97e0..ca9d0753dd6b 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
@@ -26,7 +27,7 @@
#include <linux/types.h>
/**
- * batadv_test_bit - check if bit is set in the current window
+ * batadv_test_bit() - check if bit is set in the current window
*
* @seq_bits: pointer to the sequence number receive packet
* @last_seqno: latest sequence number in seq_bits
@@ -46,7 +47,12 @@ static inline bool batadv_test_bit(const unsigned long *seq_bits,
return test_bit(diff, seq_bits) != 0;
}
-/* turn corresponding bit on, so we can remember that we got the packet */
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ * the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
{
/* if too old, just drop it */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index cdd8e8e4df0b..fad47853ad3c 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
@@ -24,7 +25,7 @@
#include <linux/crc16.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -49,6 +50,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
@@ -56,7 +58,6 @@
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "packet.h"
#include "soft-interface.h"
#include "sysfs.h"
#include "translation-table.h"
@@ -69,7 +70,7 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw);
/**
- * batadv_choose_claim - choose the right bucket for a claim.
+ * batadv_choose_claim() - choose the right bucket for a claim.
* @data: data to hash
* @size: size of the hash table
*
@@ -87,7 +88,7 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
}
/**
- * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
* @data: data to hash
* @size: size of the hash table
*
@@ -105,7 +106,7 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
}
/**
- * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
* @node: list node of the first entry to compare
* @data2: pointer to the second backbone gateway
*
@@ -129,7 +130,7 @@ static bool batadv_compare_backbone_gw(const struct hlist_node *node,
}
/**
- * batadv_compare_claim - compare address and vid of two claims
+ * batadv_compare_claim() - compare address and vid of two claims
* @node: list node of the first entry to compare
* @data2: pointer to the second claims
*
@@ -153,7 +154,7 @@ static bool batadv_compare_claim(const struct hlist_node *node,
}
/**
- * batadv_backbone_gw_release - release backbone gw from lists and queue for
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the backbone gw
*/
@@ -168,7 +169,7 @@ static void batadv_backbone_gw_release(struct kref *ref)
}
/**
- * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
* release it
* @backbone_gw: backbone gateway to be free'd
*/
@@ -178,8 +179,8 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
}
/**
- * batadv_claim_release - release claim from lists and queue for free after rcu
- * grace period
+ * batadv_claim_release() - release claim from lists and queue for free after
+ * rcu grace period
* @ref: kref pointer of the claim
*/
static void batadv_claim_release(struct kref *ref)
@@ -204,8 +205,7 @@ static void batadv_claim_release(struct kref *ref)
}
/**
- * batadv_claim_put - decrement the claim refcounter and possibly
- * release it
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
* @claim: claim to be free'd
*/
static void batadv_claim_put(struct batadv_bla_claim *claim)
@@ -214,7 +214,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
}
/**
- * batadv_claim_hash_find - looks for a claim in the claim hash
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @data: search data (may be local/static data)
*
@@ -253,7 +253,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
}
/**
- * batadv_backbone_hash_find - looks for a backbone gateway in the hash
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
* @vid: the VLAN ID
@@ -297,7 +297,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
}
/**
- * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
* @backbone_gw: backbone gateway where the claims should be removed
*/
static void
@@ -337,7 +337,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
}
/**
- * batadv_bla_send_claim - sends a claim frame according to the provided info
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address to be announced within the claim
* @vid: the VLAN ID
@@ -457,7 +457,7 @@ out:
}
/**
- * batadv_bla_loopdetect_report - worker for reporting the loop
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
* @work: work queue item
*
* Throws an uevent, as the loopdetect check function can't do that itself
@@ -487,7 +487,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
}
/**
- * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
* @vid: the VLAN ID
@@ -560,7 +560,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
}
/**
- * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface
* @vid: VLAN identifier
@@ -586,7 +586,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_answer_request - answer a bla request by sending own claims
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: interface where the request came on
* @vid: the vid where the request came on
@@ -636,7 +636,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_send_request - send a request to repeat claims
+ * batadv_bla_send_request() - send a request to repeat claims
* @backbone_gw: the backbone gateway from whom we are out of sync
*
* When the crc is wrong, ask the backbone gateway for a full table update.
@@ -663,7 +663,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
}
/**
- * batadv_bla_send_announce - Send an announcement frame
+ * batadv_bla_send_announce() - Send an announcement frame
* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: our backbone gateway which should be announced
*/
@@ -684,7 +684,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_add_claim - Adds a claim in the claim hash
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address of the claim
* @vid: the VLAN ID of the frame
@@ -774,7 +774,7 @@ claim_free_ref:
}
/**
- * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
* claim
* @claim: claim whose backbone_gw should be returned
*
@@ -794,7 +794,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
}
/**
- * batadv_bla_del_claim - delete a claim from the claim hash
+ * batadv_bla_del_claim() - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: mac address of the claim to be removed
* @vid: VLAN id for the claim to be removed
@@ -822,7 +822,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
}
/**
- * batadv_handle_announce - check for ANNOUNCE frame
+ * batadv_handle_announce() - check for ANNOUNCE frame
* @bat_priv: the bat priv with all the soft interface information
* @an_addr: announcement mac address (ARP Sender HW address)
* @backbone_addr: originator address of the sender (Ethernet source MAC)
@@ -880,7 +880,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
}
/**
- * batadv_handle_request - check for REQUEST frame
+ * batadv_handle_request() - check for REQUEST frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: backbone address to be requested (ARP sender HW MAC)
@@ -913,7 +913,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
}
/**
- * batadv_handle_unclaim - check for UNCLAIM frame
+ * batadv_handle_unclaim() - check for UNCLAIM frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: originator address of the backbone (Ethernet source)
@@ -951,7 +951,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
}
/**
- * batadv_handle_claim - check for CLAIM frame
+ * batadv_handle_claim() - check for CLAIM frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: originator address of the backbone (Ethernet Source)
@@ -988,7 +988,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
}
/**
- * batadv_check_claim_group - check for claim group membership
+ * batadv_check_claim_group() - check for claim group membership
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary interface of this batman interface
* @hw_src: the Hardware source in the ARP Header
@@ -1063,7 +1063,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_process_claim - Check if this is a claim frame, and process it
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @skb: the frame to be checked
@@ -1205,7 +1205,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
* immediately
* @bat_priv: the bat priv with all the soft interface information
* @now: whether the whole hash shall be wiped now
@@ -1258,7 +1258,7 @@ purge_now:
}
/**
- * batadv_bla_purge_claims - Remove claims after a timeout or immediately
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface, may be NULL if now is set
* @now: whether the whole hash shall be wiped now
@@ -1316,7 +1316,7 @@ skip:
}
/**
- * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
* originator address changes
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the new selected primary_if
@@ -1372,7 +1372,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_send_loopdetect - send a loopdetect frame
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: the backbone gateway for which a loop should be detected
*
@@ -1392,7 +1392,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
}
/**
- * batadv_bla_status_update - purge bla interfaces if necessary
+ * batadv_bla_status_update() - purge bla interfaces if necessary
* @net_dev: the soft interface net device
*/
void batadv_bla_status_update(struct net_device *net_dev)
@@ -1412,7 +1412,7 @@ void batadv_bla_status_update(struct net_device *net_dev)
}
/**
- * batadv_bla_periodic_work - performs periodic bla work
+ * batadv_bla_periodic_work() - performs periodic bla work
* @work: kernel work struct
*
* periodic work to do:
@@ -1517,7 +1517,7 @@ static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
/**
- * batadv_bla_init - initialize all bla structures
+ * batadv_bla_init() - initialize all bla structures
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success, < 0 on error.
@@ -1579,7 +1579,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
* @skb: contains the bcast_packet to be checked
*
@@ -1652,7 +1652,7 @@ out:
}
/**
- * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
* the VLAN identified by vid.
* @bat_priv: the bat priv with all the soft interface information
* @orig: originator mac address
@@ -1692,7 +1692,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
}
/**
- * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
* @skb: the frame to be checked
* @orig_node: the orig_node of the frame
* @hdr_size: maximum length of the frame
@@ -1726,7 +1726,7 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
}
/**
- * batadv_bla_free - free all bla structures
+ * batadv_bla_free() - free all bla structures
* @bat_priv: the bat priv with all the soft interface information
*
* for softinterface free or module unload
@@ -1753,7 +1753,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_bla_loopdetect_check - check and handle a detected loop
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
* @bat_priv: the bat priv with all the soft interface information
* @skb: the packet to check
* @primary_if: interface where the request came on
@@ -1802,7 +1802,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/**
- * batadv_bla_rx - check packets coming from the mesh.
+ * batadv_bla_rx() - check packets coming from the mesh.
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
@@ -1914,7 +1914,7 @@ out:
}
/**
- * batadv_bla_tx - check packets going into the mesh
+ * batadv_bla_tx() - check packets going into the mesh
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
@@ -2022,7 +2022,7 @@ out:
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
* @seq: seq file to print on
* @offset: not used
*
@@ -2084,7 +2084,7 @@ out:
#endif
/**
- * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
@@ -2143,7 +2143,7 @@ out:
}
/**
- * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
@@ -2180,7 +2180,7 @@ unlock:
}
/**
- * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
@@ -2247,8 +2247,8 @@ out:
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
- * file
+ * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
+ * seq file
* @seq: seq file to print on
* @offset: not used
*
@@ -2312,8 +2312,8 @@ out:
#endif
/**
- * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ * netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @seq: Sequence number of netlink message
@@ -2373,8 +2373,8 @@ out:
}
/**
- * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ * a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @seq: Sequence number of netlink message
@@ -2410,7 +2410,7 @@ unlock:
}
/**
- * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
@@ -2477,7 +2477,7 @@ out:
#ifdef CONFIG_BATMAN_ADV_DAT
/**
- * batadv_bla_check_claim - check if address is claimed
+ * batadv_bla_check_claim() - check if address is claimed
*
* @bat_priv: the bat priv with all the soft interface information
* @addr: mac address of which the claim status is checked
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 234775748b8e..b27571abcd2f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
@@ -30,8 +31,8 @@ struct seq_file;
struct sk_buff;
/**
- * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
- * frame sent by bridge loop avoidance
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ * detect frame sent by bridge loop avoidance
* @mac: mac address to check
*
* Return: true if the it looks like a loop detect frame
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index e32ad47c6efd..21d1189957a7 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -25,7 +26,6 @@
#include <linux/fs.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/stddef.h>
@@ -66,8 +66,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
}
/**
- * batadv_originators_hardif_open - handles debugfs output for the
- * originator table of an hard interface
+ * batadv_originators_hardif_open() - handles debugfs output for the originator
+ * table of an hard interface
* @inode: inode pointer to debugfs file
* @file: pointer to the seq_file
*
@@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
#ifdef CONFIG_BATMAN_ADV_DAT
/**
- * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
* @inode: inode which was opened
* @file: file handle to be initialized
*
@@ -154,7 +154,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
#ifdef CONFIG_BATMAN_ADV_MCAST
/**
- * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
+ * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
* @inode: inode which was opened
* @file: file handle to be initialized
*
@@ -259,6 +259,9 @@ static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
NULL,
};
+/**
+ * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
+ */
void batadv_debugfs_init(void)
{
struct batadv_debuginfo **bat_debug;
@@ -289,6 +292,9 @@ err:
batadv_debugfs = NULL;
}
+/**
+ * batadv_debugfs_destroy() - Remove all debugfs entries
+ */
void batadv_debugfs_destroy(void)
{
debugfs_remove_recursive(batadv_debugfs);
@@ -296,7 +302,7 @@ void batadv_debugfs_destroy(void)
}
/**
- * batadv_debugfs_add_hardif - creates the base directory for a hard interface
+ * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
* in debugfs.
* @hard_iface: hard interface which should be added.
*
@@ -338,7 +344,7 @@ out:
}
/**
- * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
* in debugfs.
* @hard_iface: hard interface which is deleted.
*/
@@ -355,6 +361,12 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
}
}
+/**
+ * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_debugfs_add_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -401,6 +413,10 @@ out:
return -ENOMEM;
}
+/**
+ * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ */
void batadv_debugfs_del_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 9c5d4a65b98c..90a08d35c501 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 760c0de72582..9703c791ffc5 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
@@ -23,7 +24,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -55,7 +56,7 @@
static void batadv_dat_purge(struct work_struct *work);
/**
- * batadv_dat_start_timer - initialise the DAT periodic worker
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
@@ -66,7 +67,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
}
/**
- * batadv_dat_entry_release - release dat_entry from lists and queue for free
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the dat_entry
*/
@@ -80,7 +81,7 @@ static void batadv_dat_entry_release(struct kref *ref)
}
/**
- * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
* release it
* @dat_entry: dat_entry to be free'd
*/
@@ -90,7 +91,7 @@ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
}
/**
- * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
* @dat_entry: the entry to check
*
* Return: true if the entry has to be purged now, false otherwise.
@@ -102,7 +103,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
}
/**
- * __batadv_dat_purge - delete entries from the DAT local storage
+ * __batadv_dat_purge() - delete entries from the DAT local storage
* @bat_priv: the bat priv with all the soft interface information
* @to_purge: function in charge to decide whether an entry has to be purged or
* not. This function takes the dat_entry as argument and has to
@@ -145,8 +146,8 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
}
/**
- * batadv_dat_purge - periodic task that deletes old entries from the local DAT
- * hash table
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ * DAT hash table
* @work: kernel work struct
*/
static void batadv_dat_purge(struct work_struct *work)
@@ -164,7 +165,7 @@ static void batadv_dat_purge(struct work_struct *work)
}
/**
- * batadv_compare_dat - comparing function used in the local DAT hash table
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
* @node: node in the local table
* @data2: second object to compare the node to
*
@@ -179,7 +180,7 @@ static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
}
/**
- * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
@@ -196,7 +197,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
}
/**
- * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
@@ -208,7 +209,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
}
/**
- * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
@@ -220,7 +221,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
}
/**
- * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
@@ -232,7 +233,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
}
/**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat() - compute the hash value for an IP address
* @data: data to hash
* @size: size of the hash table
*
@@ -267,7 +268,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
}
/**
- * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
* table
* @bat_priv: the bat priv with all the soft interface information
* @ip: search key
@@ -310,7 +311,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
}
/**
- * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
* @bat_priv: the bat priv with all the soft interface information
* @ip: ipv4 to add/edit
* @mac_addr: mac address to assign to the given ipv4
@@ -367,7 +368,8 @@ out:
#ifdef CONFIG_BATMAN_ADV_DEBUG
/**
- * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ * details
* @bat_priv: the bat priv with all the soft interface information
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
@@ -448,7 +450,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
#endif /* CONFIG_BATMAN_ADV_DEBUG */
/**
- * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
* @res: the array with the already selected candidates
* @select: number of already selected candidates
* @tmp_max: address of the currently evaluated node
@@ -502,7 +504,7 @@ out:
}
/**
- * batadv_choose_next_candidate - select the next DHT candidate
+ * batadv_choose_next_candidate() - select the next DHT candidate
* @bat_priv: the bat priv with all the soft interface information
* @cands: candidates array
* @select: number of candidates already present in the array
@@ -566,8 +568,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
}
/**
- * batadv_dat_select_candidates - select the nodes which the DHT message has to
- * be sent to
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ * to be sent to
* @bat_priv: the bat priv with all the soft interface information
* @ip_dst: ipv4 to look up in the DHT
* @vid: VLAN identifier
@@ -612,7 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
}
/**
- * batadv_dat_send_data - send a payload to the selected candidates
+ * batadv_dat_send_data() - send a payload to the selected candidates
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @ip: the DHT key
@@ -688,7 +690,7 @@ out:
}
/**
- * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
* setting change
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -710,7 +712,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
}
/**
- * batadv_dat_status_update - update the dat tvlv container after dat
+ * batadv_dat_status_update() - update the dat tvlv container after dat
* setting change
* @net_dev: the soft interface net device
*/
@@ -722,7 +724,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
}
/**
- * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -741,7 +743,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
}
/**
- * batadv_dat_hash_free - free the local DAT hash table
+ * batadv_dat_hash_free() - free the local DAT hash table
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
@@ -757,7 +759,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_dat_init - initialise the DAT internals
+ * batadv_dat_init() - initialise the DAT internals
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 in case of success, a negative error code otherwise
@@ -782,7 +784,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_dat_free - free the DAT internals
+ * batadv_dat_free() - free the DAT internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_dat_free(struct batadv_priv *bat_priv)
@@ -797,7 +799,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * batadv_dat_cache_seq_print_text() - print the local DAT hash table
* @seq: seq file to print on
* @offset: not used
*
@@ -850,7 +852,7 @@ out:
#endif
/**
- * batadv_arp_get_type - parse an ARP packet and gets the type
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to analyse
* @hdr_size: size of the possible header before the ARP packet in the skb
@@ -924,7 +926,7 @@ out:
}
/**
- * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
* @skb: the buffer containing the packet to extract the VID from
* @hdr_size: the size of the batman-adv header encapsulating the packet
*
@@ -950,7 +952,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
}
/**
- * batadv_dat_arp_create_reply - create an ARP Reply
+ * batadv_dat_arp_create_reply() - create an ARP Reply
* @bat_priv: the bat priv with all the soft interface information
* @ip_src: ARP sender IP
* @ip_dst: ARP target IP
@@ -985,7 +987,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
}
/**
- * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
* answer using DAT
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
@@ -1083,7 +1085,7 @@ out:
}
/**
- * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
* answer using the local DAT storage
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
@@ -1153,7 +1155,7 @@ out:
}
/**
- * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
*/
@@ -1193,8 +1195,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
}
/**
- * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
- * DAT storage only
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ * local DAT storage only
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
@@ -1282,8 +1284,8 @@ out:
}
/**
- * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already obtained the reply via DAT) or not
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ * dropped (because the node has already obtained the reply via DAT) or not
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the broadcast packet
*
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index ec364a3c1c66..12897eb46268 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
@@ -23,9 +24,9 @@
#include <linux/compiler.h>
#include <linux/netdevice.h>
#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
#include "originator.h"
-#include "packet.h"
struct seq_file;
struct sk_buff;
@@ -48,7 +49,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet);
/**
- * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
* @orig_node: the node to assign the DAT address to
*/
static inline void
@@ -61,7 +62,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
}
/**
- * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: a pointer to the primary interface
*/
@@ -82,7 +83,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv);
int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
/**
- * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
* @bat_priv: the bat priv with all the soft interface information
* @subtype: the 4addr subtype of the packet to be counted
*
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index ebe6e38934e4..22dde42fd80e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
@@ -22,7 +23,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -32,16 +33,16 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
#include "hard-interface.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
#include "soft-interface.h"
/**
- * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
* @head: head of chain with entries.
* @dropped: whether the chain is cleared because all fragments are dropped
*
@@ -65,7 +66,7 @@ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
}
/**
- * batadv_frag_purge_orig - free fragments associated to an orig
+ * batadv_frag_purge_orig() - free fragments associated to an orig
* @orig_node: originator to free fragments from
* @check_cb: optional function to tell if an entry should be purged
*/
@@ -89,7 +90,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
}
/**
- * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
*
* Return: the maximum size of payload that can be fragmented.
*/
@@ -104,7 +105,7 @@ static int batadv_frag_size_limit(void)
}
/**
- * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
* @chain: chain in fragments table to init
* @seqno: sequence number of the received fragment
*
@@ -134,7 +135,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
}
/**
- * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
* @orig_node: originator that the fragment was received from
* @skb: skb to insert
* @chain_out: list head to attach complete chains of fragments to
@@ -248,7 +249,7 @@ err:
}
/**
- * batadv_frag_merge_packets - merge a chain of fragments
+ * batadv_frag_merge_packets() - merge a chain of fragments
* @chain: head of chain with fragments
*
* Expand the first skb in the chain and copy the content of the remaining
@@ -306,7 +307,7 @@ free:
}
/**
- * batadv_frag_skb_buffer - buffer fragment for later merge
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
* @skb: skb to buffer
* @orig_node_src: originator that the skb is received from
*
@@ -346,7 +347,7 @@ out_err:
}
/**
- * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
* @skb: skb to forward
* @recv_if: interface that the skb is received on
* @orig_node_src: originator that the skb is received from
@@ -400,7 +401,7 @@ out:
}
/**
- * batadv_frag_create - create a fragment from skb
+ * batadv_frag_create() - create a fragment from skb
* @skb: skb to create fragment from
* @frag_head: header to use in new fragment
* @fragment_size: size of new fragment
@@ -438,7 +439,7 @@ err:
}
/**
- * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
* @skb: skb to create fragments from
* @orig_node: final destination of the created fragments
* @neigh_node: next-hop of the created fragments
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 1a2d6c308745..138b22a1836a 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
@@ -39,7 +40,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node);
/**
- * batadv_frag_check_entry - check if a list of fragments has timed out
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
* @frags_entry: table entry to check
*
* Return: true if the frags entry has timed out, false otherwise.
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 10d521f0b17f..37fe9a644f22 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -22,7 +23,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -42,6 +43,7 @@
#include <linux/stddef.h>
#include <linux/udp.h>
#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "gateway_common.h"
@@ -49,7 +51,6 @@
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "soft-interface.h"
#include "sysfs.h"
@@ -68,8 +69,8 @@
#define BATADV_DHCP_CHADDR_OFFSET 28
/**
- * batadv_gw_node_release - release gw_node from lists and queue for free after
- * rcu grace period
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ * after rcu grace period
* @ref: kref pointer of the gw_node
*/
static void batadv_gw_node_release(struct kref *ref)
@@ -83,7 +84,8 @@ static void batadv_gw_node_release(struct kref *ref)
}
/**
- * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ * it
* @gw_node: gateway node to free
*/
void batadv_gw_node_put(struct batadv_gw_node *gw_node)
@@ -91,6 +93,12 @@ void batadv_gw_node_put(struct batadv_gw_node *gw_node)
kref_put(&gw_node->refcount, batadv_gw_node_release);
}
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
@@ -109,6 +117,12 @@ out:
return gw_node;
}
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
struct batadv_orig_node *
batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
{
@@ -155,7 +169,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
}
/**
- * batadv_gw_reselect - force a gateway reselection
+ * batadv_gw_reselect() - force a gateway reselection
* @bat_priv: the bat priv with all the soft interface information
*
* Set a flag to remind the GW component to perform a new gateway reselection.
@@ -171,7 +185,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
}
/**
- * batadv_gw_check_client_stop - check if client mode has been switched off
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
* @bat_priv: the bat priv with all the soft interface information
*
* This function assumes the caller has checked that the gw state *is actually
@@ -202,6 +216,10 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
batadv_gw_node_put(curr_gw);
}
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_gw_election(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw = NULL;
@@ -290,6 +308,11 @@ out:
batadv_neigh_ifinfo_put(router_ifinfo);
}
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
@@ -321,7 +344,7 @@ out:
}
/**
- * batadv_gw_node_add - add gateway node to list of available gateways
+ * batadv_gw_node_add() - add gateway node to list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
@@ -364,7 +387,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
}
/**
- * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
*
@@ -393,7 +416,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
}
/**
- * batadv_gw_node_update - update list of available gateways with changed
+ * batadv_gw_node_update() - update list of available gateways with changed
* bandwidth information
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
@@ -458,6 +481,11 @@ out:
batadv_gw_node_put(gw_node);
}
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
@@ -469,6 +497,10 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
batadv_gw_node_update(bat_priv, orig_node, &gateway);
}
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_gw_node_free(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
@@ -484,6 +516,14 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -514,7 +554,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
#endif
/**
- * batadv_gw_dump - Dump gateways into a message
+ * batadv_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
*
@@ -567,7 +607,7 @@ out:
}
/**
- * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
* @skb: the packet to check
* @header_len: a pointer to the batman-adv header size
* @chaddr: buffer where the client address will be stored. Valid
@@ -686,7 +726,8 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
}
/**
- * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ * gateway
* @bat_priv: the bat priv with all the soft interface information
* @skb: the outgoing packet
*
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 3baa3d466e5e..981f58421a32 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 2c26039c23fc..b3e156af2256 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -26,15 +27,15 @@
#include <linux/netdevice.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
#include "gateway_client.h"
#include "log.h"
-#include "packet.h"
#include "tvlv.h"
/**
- * batadv_parse_throughput - parse supplied string buffer to extract throughput
- * information
+ * batadv_parse_throughput() - parse supplied string buffer to extract
+ * throughput information
* @net_dev: the soft interface net device
* @buff: string buffer to parse
* @description: text shown when throughput string cannot be parsed
@@ -100,8 +101,8 @@ bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
}
/**
- * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
- * and upload bandwidth information
+ * batadv_parse_gw_bandwidth() - parse supplied string buffer to extract
+ * download and upload bandwidth information
* @net_dev: the soft interface net device
* @buff: string buffer to parse
* @down: pointer holding the returned download bandwidth information
@@ -136,8 +137,8 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
}
/**
- * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
- * setting change
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ * gateway setting change
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -164,6 +165,15 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
}
}
+/**
+ * batadv_gw_bandwidth_set() - Parse and set download/upload gateway bandwidth
+ * from supplied string buffer
+ * @net_dev: netdev struct of the soft interface
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
size_t count)
{
@@ -207,7 +217,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
}
/**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -248,7 +258,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
}
/**
- * batadv_gw_init - initialise the gateway handling internals
+ * batadv_gw_init() - initialise the gateway handling internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_init(struct batadv_priv *bat_priv)
@@ -264,7 +274,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_gw_free - free the gateway handling internals
+ * batadv_gw_free() - free the gateway handling internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 0a6a97d201f2..afebd9c7edf4 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -32,11 +33,12 @@ enum batadv_gw_modes {
/**
* enum batadv_bandwidth_units - bandwidth unit types
- * @BATADV_BW_UNIT_KBIT: unit type kbit
- * @BATADV_BW_UNIT_MBIT: unit type mbit
*/
enum batadv_bandwidth_units {
+ /** @BATADV_BW_UNIT_KBIT: unit type kbit */
BATADV_BW_UNIT_KBIT,
+
+ /** @BATADV_BW_UNIT_MBIT: unit type mbit */
BATADV_BW_UNIT_MBIT,
};
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4e3d5340ad96..5f186bff284a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -22,7 +23,7 @@
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -37,6 +38,7 @@
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
@@ -45,14 +47,13 @@
#include "gateway_client.h"
#include "log.h"
#include "originator.h"
-#include "packet.h"
#include "send.h"
#include "soft-interface.h"
#include "sysfs.h"
#include "translation-table.h"
/**
- * batadv_hardif_release - release hard interface from lists and queue for
+ * batadv_hardif_release() - release hard interface from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the hard interface
*/
@@ -66,6 +67,12 @@ void batadv_hardif_release(struct kref *ref)
kfree_rcu(hard_iface, rcu);
}
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
struct batadv_hard_iface *
batadv_hardif_get_by_netdev(const struct net_device *net_dev)
{
@@ -86,7 +93,7 @@ out:
}
/**
- * batadv_getlink_net - return link net namespace (of use fallback)
+ * batadv_getlink_net() - return link net namespace (of use fallback)
* @netdev: net_device to check
* @fallback_net: return in case get_link_net is not available for @netdev
*
@@ -105,7 +112,7 @@ static struct net *batadv_getlink_net(const struct net_device *netdev,
}
/**
- * batadv_mutual_parents - check if two devices are each others parent
+ * batadv_mutual_parents() - check if two devices are each others parent
* @dev1: 1st net dev
* @net1: 1st devices netns
* @dev2: 2nd net dev
@@ -138,7 +145,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
}
/**
- * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
* @net_dev: the device to check
*
* If the user creates any virtual device on top of a batman-adv interface, it
@@ -202,7 +209,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
}
/**
- * batadv_get_real_netdevice - check if the given netdev struct is a virtual
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
* interface on top of another 'real' interface
* @netdev: the device to check
*
@@ -246,7 +253,7 @@ out:
}
/**
- * batadv_get_real_netdev - check if the given net_device struct is a virtual
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
* interface on top of another 'real' interface
* @net_device: the device to check
*
@@ -265,7 +272,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
}
/**
- * batadv_is_wext_netdev - check if the given net_device struct is a
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
* wext wifi interface
* @net_device: the device to check
*
@@ -289,7 +296,7 @@ static bool batadv_is_wext_netdev(struct net_device *net_device)
}
/**
- * batadv_is_cfg80211_netdev - check if the given net_device struct is a
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
* cfg80211 wifi interface
* @net_device: the device to check
*
@@ -309,7 +316,7 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
}
/**
- * batadv_wifi_flags_evaluate - calculate wifi flags for net_device
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
* @net_device: the device to check
*
* Return: batadv_hard_iface_wifi_flags flags of the device
@@ -344,7 +351,7 @@ out:
}
/**
- * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
* interface
* @hard_iface: the device to check
*
@@ -362,7 +369,7 @@ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_is_wifi_hardif - check if the given hardif is a wifi interface
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
* @hard_iface: the device to check
*
* Return: true if the net device is a 802.11 wireless device, false otherwise.
@@ -376,7 +383,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
* @if_outgoing: the outgoing interface checked and considered for (re)broadcast
* @orig_addr: the originator of this packet
* @orig_neigh: originator address of the forwarder we just got the packet from
@@ -560,6 +567,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
soft_iface->needed_tailroom = lower_tailroom;
}
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ * slave interfaces)
+ */
int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -606,7 +620,11 @@ out:
return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
}
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ * MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
void batadv_update_min_mtu(struct net_device *soft_iface)
{
soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
@@ -667,7 +685,7 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_master_del_slave - remove hard_iface from the current master interface
+ * batadv_master_del_slave() - remove hard_iface from the current master iface
* @slave: the interface enslaved in another master
* @master: the master from which slave has to be removed
*
@@ -691,6 +709,14 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
return ret;
}
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @net: the applicable net namespace
+ * @iface_name: name of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
struct net *net, const char *iface_name)
{
@@ -802,6 +828,12 @@ err:
return ret;
}
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ * @autodel: whether to delete soft interface when it doesn't contain any other
+ * slave interfaces
+ */
void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
enum batadv_hard_if_cleanup autodel)
{
@@ -936,6 +968,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
batadv_hardif_put(hard_iface);
}
+/**
+ * batadv_hardif_remove_interfaces() - Remove all hard interfaces
+ */
void batadv_hardif_remove_interfaces(void)
{
struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 9f9890ff7a22..de5e9a374ece 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -30,36 +31,74 @@
struct net_device;
struct net;
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
enum batadv_hard_if_state {
+ /**
+ * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+ * batman-adv soft interface
+ */
BATADV_IF_NOT_IN_USE,
+
+ /**
+ * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+ * interface
+ */
BATADV_IF_TO_BE_REMOVED,
+
+ /** @BATADV_IF_INACTIVE: interface is deactivated */
BATADV_IF_INACTIVE,
+
+ /** @BATADV_IF_ACTIVE: interface is used */
BATADV_IF_ACTIVE,
+
+ /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
BATADV_IF_TO_BE_ACTIVATED,
+
+ /**
+ * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
+ * added as slave interface of a batman-adv soft interface
+ */
BATADV_IF_I_WANT_YOU,
};
/**
* enum batadv_hard_if_bcast - broadcast avoidance options
- * @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
- * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
- * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
- * @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
*/
enum batadv_hard_if_bcast {
+ /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
BATADV_HARDIF_BCAST_OK = 0,
+
+ /**
+ * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+ * recipient
+ */
BATADV_HARDIF_BCAST_NORECIPIENT,
+
+ /**
+ * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+ * from
+ */
BATADV_HARDIF_BCAST_DUPFWD,
+
+ /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
BATADV_HARDIF_BCAST_DUPORIG,
};
/**
* enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
*/
enum batadv_hard_if_cleanup {
+ /**
+ * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
+ */
BATADV_IF_CLEANUP_KEEP,
+
+ /**
+ * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
+ * removed
+ */
BATADV_IF_CLEANUP_AUTO,
};
@@ -82,7 +121,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
u8 *orig_addr, u8 *orig_neigh);
/**
- * batadv_hardif_put - decrement the hard interface refcounter and possibly
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
* release it
* @hard_iface: the hard interface to free
*/
@@ -91,6 +130,12 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
kref_put(&hard_iface->refcount, batadv_hardif_release);
}
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
static inline struct batadv_hard_iface *
batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
{
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index b5f7e13918ac..04d964358c98 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
@@ -18,7 +19,7 @@
#include "hash.h"
#include "main.h"
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
@@ -33,7 +34,10 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
}
}
-/* free only the hashtable and the hash itself. */
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
@@ -41,7 +45,12 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
kfree(hash);
}
-/* allocates and clears the hash */
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
struct batadv_hashtable *batadv_hash_new(u32 size)
{
struct batadv_hashtable *hash;
@@ -70,6 +79,11 @@ free_hash:
return NULL;
}
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
struct lock_class_key *key)
{
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 0c905e91c5e2..4ce1b6d3ad5c 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
@@ -45,10 +46,18 @@ typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
struct batadv_hashtable {
- struct hlist_head *table; /* the hashtable itself with the buckets */
- spinlock_t *list_locks; /* spinlock for each hash list entry */
- u32 size; /* size of hashtable */
+ /** @table: the hashtable itself with the buckets */
+ struct hlist_head *table;
+
+ /** @list_locks: spinlock for each hash list entry */
+ spinlock_t *list_locks;
+
+ /** @size: size of hashtable */
+ u32 size;
};
/* allocates and clears the hash */
@@ -62,7 +71,7 @@ void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
void batadv_hash_destroy(struct batadv_hashtable *hash);
/**
- * batadv_hash_add - adds data to the hashtable
+ * batadv_hash_add() - adds data to the hashtable
* @hash: storage hash table
* @compare: callback to determine if 2 hash elements are identical
* @choose: callback calculating the hash index
@@ -112,8 +121,15 @@ out:
return ret;
}
-/* removes data from hash, if found. data could be the structure you use with
- * just the key filled, we just need the key for comparing.
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with just the key filled, we just need
+ * the key for comparing.
*
* Return: returns pointer do data on success, so you can remove the used
* structure yourself, or NULL on error
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index bded31121d12..581375d0eed2 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -26,6 +27,7 @@
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -42,11 +44,11 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
+#include <uapi/linux/batadv_packet.h>
#include "hard-interface.h"
#include "log.h"
#include "originator.h"
-#include "packet.h"
#include "send.h"
static struct batadv_socket_client *batadv_socket_client_hash[256];
@@ -55,6 +57,9 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
struct batadv_icmp_header *icmph,
size_t icmp_len);
+/**
+ * batadv_socket_init() - Initialize soft interface independent socket data
+ */
void batadv_socket_init(void)
{
memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@ -292,7 +297,7 @@ out:
return len;
}
-static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
+static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
{
struct batadv_socket_client *socket_client = file->private_data;
@@ -314,6 +319,12 @@ static const struct file_operations batadv_fops = {
.llseek = no_llseek,
};
+/**
+ * batadv_socket_setup() - Create debugfs "socket" file
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_socket_setup(struct batadv_priv *bat_priv)
{
struct dentry *d;
@@ -333,7 +344,7 @@ err:
}
/**
- * batadv_socket_add_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet() - schedule an icmp packet to be sent to
* userspace on an icmp socket.
* @socket_client: the socket this packet belongs to
* @icmph: pointer to the header of the icmp packet
@@ -390,7 +401,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
}
/**
- * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * batadv_socket_receive_packet() - schedule an icmp packet to be received
* locally and sent to userspace.
* @icmph: pointer to the header of the icmp packet
* @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index f3fec40aae86..84cddd01eeab 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 4ef4bde2cc2d..9be74a44e99d 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -24,6 +25,7 @@
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -86,6 +88,13 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
return 0;
}
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
{
va_list args;
@@ -176,7 +185,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
return error;
}
-static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
+static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
{
struct batadv_priv *bat_priv = file->private_data;
struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
@@ -197,6 +206,12 @@ static const struct file_operations batadv_log_fops = {
.llseek = no_llseek,
};
+/**
+ * batadv_debug_log_setup() - Initialize debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_debug_log_setup(struct batadv_priv *bat_priv)
{
struct dentry *d;
@@ -222,6 +237,10 @@ err:
return -ENOMEM;
}
+/**
+ * batadv_debug_log_cleanup() - Destroy debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
{
kfree(bat_priv->debug_log);
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 65ce97efa6b5..35e02b2b9e72 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -44,25 +45,33 @@ static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
/**
* enum batadv_dbg_level - available log levels
- * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
- * @BATADV_DBG_ROUTES: route added / changed / deleted
- * @BATADV_DBG_TT: translation table messages
- * @BATADV_DBG_BLA: bridge loop avoidance messages
- * @BATADV_DBG_DAT: ARP snooping and DAT related messages
- * @BATADV_DBG_NC: network coding related messages
- * @BATADV_DBG_MCAST: multicast related messages
- * @BATADV_DBG_TP_METER: throughput meter messages
- * @BATADV_DBG_ALL: the union of all the above log levels
*/
enum batadv_dbg_level {
+ /** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
BATADV_DBG_BATMAN = BIT(0),
+
+ /** @BATADV_DBG_ROUTES: route added / changed / deleted */
BATADV_DBG_ROUTES = BIT(1),
+
+ /** @BATADV_DBG_TT: translation table messages */
BATADV_DBG_TT = BIT(2),
+
+ /** @BATADV_DBG_BLA: bridge loop avoidance messages */
BATADV_DBG_BLA = BIT(3),
+
+ /** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
BATADV_DBG_DAT = BIT(4),
+
+ /** @BATADV_DBG_NC: network coding related messages */
BATADV_DBG_NC = BIT(5),
+
+ /** @BATADV_DBG_MCAST: multicast related messages */
BATADV_DBG_MCAST = BIT(6),
+
+ /** @BATADV_DBG_TP_METER: throughput meter messages */
BATADV_DBG_TP_METER = BIT(7),
+
+ /** @BATADV_DBG_ALL: the union of all the above log levels */
BATADV_DBG_ALL = 255,
};
@@ -70,7 +79,14 @@ enum batadv_dbg_level {
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
__printf(2, 3);
-/* possibly ratelimited debug output */
+/**
+ * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \
do { \
struct batadv_priv *__batpriv = (bat_priv); \
@@ -89,11 +105,30 @@ static inline void _batadv_dbg(int type __always_unused,
}
#endif
+/**
+ * batadv_dbg() - Store debug output without ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
#define batadv_dbg(type, bat_priv, arg...) \
_batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
#define batadv_dbg_ratelimited(type, bat_priv, arg...) \
_batadv_dbg(type, bat_priv, 1, ## arg)
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
#define batadv_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
@@ -101,6 +136,13 @@ static inline void _batadv_dbg(int type __always_unused,
batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_info("%s: " fmt, _netdev->name, ## arg); \
} while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
#define batadv_err(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 4daed7ad46f2..d31c8266e244 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -18,12 +19,12 @@
#include "main.h"
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/crc32c.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/genetlink.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
@@ -45,6 +46,7 @@
#include <linux/workqueue.h>
#include <net/dsfield.h>
#include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
@@ -62,7 +64,6 @@
#include "netlink.h"
#include "network-coding.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
#include "soft-interface.h"
@@ -139,6 +140,12 @@ static void __exit batadv_exit(void)
batadv_tt_cache_destroy();
}
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_mesh_init(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -216,6 +223,10 @@ err:
return ret;
}
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
void batadv_mesh_free(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -255,8 +266,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
}
/**
- * batadv_is_my_mac - check if the given mac address belongs to any of the real
- * interfaces in the current mesh
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ * real interfaces in the current mesh
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address to check
*
@@ -286,7 +297,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
* function that requires the primary interface
* @seq: debugfs table seq_file struct
*
@@ -323,7 +334,7 @@ out:
#endif
/**
- * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
* payload packet
*
* Return: the maximum encapsulation overhead in bytes.
@@ -348,7 +359,7 @@ int batadv_max_header_len(void)
}
/**
- * batadv_skb_set_priority - sets skb priority according to packet content
+ * batadv_skb_set_priority() - sets skb priority according to packet content
* @skb: the packet to be sent
* @offset: offset to the packet content
*
@@ -412,6 +423,16 @@ static int batadv_recv_unhandled_packet(struct sk_buff *skb,
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
@@ -535,6 +556,13 @@ static void batadv_recv_handler_init(void)
batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
}
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int
batadv_recv_handler_register(u8 packet_type,
int (*recv_handler)(struct sk_buff *,
@@ -552,13 +580,17 @@ batadv_recv_handler_register(u8 packet_type,
return 0;
}
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
void batadv_recv_handler_unregister(u8 packet_type)
{
batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
}
/**
- * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
* the header
* @skb: skb pointing to fragmented socket buffers
* @payload_ptr: Pointer to position inside the head buffer of the skb
@@ -591,7 +623,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
}
/**
- * batadv_get_vid - extract the VLAN identifier from skb if any
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
* @skb: the buffer containing the packet
* @header_len: length of the batman header preceding the ethernet header
*
@@ -618,7 +650,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
}
/**
- * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier for which the AP isolation attributed as to be
* looked up
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index edb2f239d04d..f7ba3f96d8f3 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -24,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.4"
+#define BATADV_SOURCE_VERSION "2018.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -140,24 +141,56 @@
*/
#define BATADV_TP_MAX_NUM 5
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
enum batadv_mesh_state {
+ /** @BATADV_MESH_INACTIVE: soft interface is not yet running */
BATADV_MESH_INACTIVE,
+
+ /** @BATADV_MESH_ACTIVE: interface is up and running */
BATADV_MESH_ACTIVE,
+
+ /** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
BATADV_MESH_DEACTIVATING,
};
#define BATADV_BCAST_QUEUE_LEN 256
#define BATADV_BATMAN_QUEUE_LEN 256
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
enum batadv_uev_action {
+ /** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
BATADV_UEV_ADD = 0,
+
+ /**
+ * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+ * anymore
+ */
BATADV_UEV_DEL,
+
+ /**
+ * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+ */
BATADV_UEV_CHANGE,
+
+ /**
+ * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+ * bridge loop avoidance
+ */
BATADV_UEV_LOOPDETECT,
};
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
enum batadv_uev_type {
+ /** @BATADV_UEV_GW: selected gateway was modified */
BATADV_UEV_GW = 0,
+
+ /** @BATADV_UEV_BLA: bridge loop avoidance event */
BATADV_UEV_BLA,
};
@@ -184,16 +217,14 @@ enum batadv_uev_type {
/* Kernel headers */
-#include <linux/bitops.h> /* for packet.h */
#include <linux/compiler.h>
#include <linux/etherdevice.h>
-#include <linux/if_ether.h> /* for packet.h */
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
-#include "packet.h"
#include "types.h"
struct net_device;
@@ -202,7 +233,7 @@ struct seq_file;
struct sk_buff;
/**
- * batadv_print_vid - return printable version of vid information
+ * batadv_print_vid() - return printable version of vid information
* @vid: the VLAN identifier
*
* Return: -1 when no VLAN is used, VLAN id otherwise
@@ -238,7 +269,7 @@ void batadv_recv_handler_unregister(u8 packet_type);
__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
/**
- * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
* @data1: Pointer to a six-byte array containing the Ethernet address
* @data2: Pointer other six-byte array containing the Ethernet address
*
@@ -252,7 +283,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
}
/**
- * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
* timeout
* @timestamp: base value to compare with (in jiffies)
* @timeout: added to base value before comparing (in milliseconds)
@@ -265,40 +296,96 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
}
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-/* Returns the smallest signed integer in two's complement with the sizeof x */
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ * complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
* This means that for a u8 with the maximum value 255, it would think:
- * - when adding nothing - it is neither a predecessor nor a successor
- * - before adding more than 127 to the starting value - it is a predecessor,
- * - when adding 128 - it is neither a predecessor nor a successor,
- * - after adding more than 127 to the starting value - it is a successor
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
*/
#define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
typeof(y)_d2 = (y); \
typeof(x)_dummy = (_d1 - _d2); \
(void)(&_d1 == &_d2); \
_dummy > batadv_smallest_signed_int(_dummy); })
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential sucessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
#define batadv_seq_after(x, y) batadv_seq_before(y, x)
-/* Stop preemption on local cpu while incrementing the counter */
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
size_t count)
{
this_cpu_add(bat_priv->bat_counters[idx], count);
}
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
-/* Define a macro to reach the control buffer of the skb. The members of the
- * control buffer are defined in struct batadv_skb_cb in types.h.
- * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
*/
#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index e553a8770a89..cbdeb47ec3f6 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
@@ -24,7 +25,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/icmpv6.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
@@ -54,18 +55,18 @@
#include <net/if_inet6.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <uapi/linux/batadv_packet.h>
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
-#include "packet.h"
#include "translation-table.h"
#include "tvlv.h"
static void batadv_mcast_mla_update(struct work_struct *work);
/**
- * batadv_mcast_start_timer - schedule the multicast periodic worker
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
@@ -75,7 +76,7 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
* @soft_iface: netdev struct of the mesh interface
*
* If the given soft interface has a bridge on top then the refcount
@@ -101,7 +102,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
}
/**
- * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
* @dev: the device to collect multicast addresses from
* @mcast_list: a list to put found addresses into
*
@@ -147,7 +148,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
}
/**
- * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
* @mcast_addr: the multicast address to check
* @mcast_list: the list with multicast addresses to search in
*
@@ -167,7 +168,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
}
/**
- * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
* @dst: destination to write to - a multicast MAC address
* @src: source to read from - a multicast IP address
*
@@ -191,7 +192,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
}
/**
- * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
* @dev: a bridge slave whose bridge to collect multicast addresses from
* @mcast_list: a list to put found addresses into
*
@@ -244,7 +245,7 @@ out:
}
/**
- * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
* @mcast_list: the list to free
*
* Removes and frees all items in the given mcast_list.
@@ -261,7 +262,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
}
/**
- * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
* @bat_priv: the bat priv with all the soft interface information
* @mcast_list: a list of addresses which should _not_ be removed
*
@@ -297,7 +298,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
* @bat_priv: the bat priv with all the soft interface information
* @mcast_list: a list of addresses which are going to get added
*
@@ -333,7 +334,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
* @bat_priv: the bat priv with all the soft interface information
*
* Checks whether there is a bridge on top of our soft interface.
@@ -354,7 +355,8 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_querier_log - debug output regarding the querier status on link
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ * link
* @bat_priv: the bat priv with all the soft interface information
* @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
* @old_state: the previous querier state on our link
@@ -405,7 +407,8 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
}
/**
- * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ * setups
* @bat_priv: the bat priv with all the soft interface information
* @bridged: a flag about whether the soft interface is currently bridged or not
* @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
@@ -444,7 +447,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
}
/**
- * batadv_mcast_flags_logs - output debug information about mcast flag changes
+ * batadv_mcast_flags_logs() - output debug information about mcast flag changes
* @bat_priv: the bat priv with all the soft interface information
* @flags: flags indicating the new multicast state
*
@@ -470,7 +473,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
}
/**
- * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * batadv_mcast_mla_tvlv_update() - update multicast tvlv
* @bat_priv: the bat priv with all the soft interface information
*
* Updates the own multicast tvlv with our current multicast related settings,
@@ -545,7 +548,7 @@ update:
}
/**
- * __batadv_mcast_mla_update - update the own MLAs
+ * __batadv_mcast_mla_update() - update the own MLAs
* @bat_priv: the bat priv with all the soft interface information
*
* Updates the own multicast listener announcements in the translation
@@ -582,7 +585,7 @@ out:
}
/**
- * batadv_mcast_mla_update - update the own MLAs
+ * batadv_mcast_mla_update() - update the own MLAs
* @work: kernel work struct
*
* Updates the own multicast listener announcements in the translation
@@ -605,7 +608,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
}
/**
- * batadv_mcast_is_report_ipv4 - check for IGMP reports
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
* @skb: the ethernet frame destined for the mesh
*
* This call might reallocate skb data.
@@ -630,7 +633,8 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
}
/**
- * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ * potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the IPv4 packet to check
* @is_unsnoopable: stores whether the destination is snoopable
@@ -671,7 +675,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_is_report_ipv6 - check for MLD reports
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
* @skb: the ethernet frame destined for the mesh
*
* This call might reallocate skb data.
@@ -695,7 +699,8 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
}
/**
- * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ * potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the IPv6 packet to check
* @is_unsnoopable: stores whether the destination is snoopable
@@ -736,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast frame to check
* @is_unsnoopable: stores whether the destination is snoopable
@@ -774,7 +779,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
* interest
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: ethernet header of a packet
@@ -798,7 +803,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * batadv_mcast_forw_tt_node_get() - get a multicast tt node
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: the ether header containing the multicast destination
*
@@ -814,7 +819,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
* @bat_priv: the bat priv with all the soft interface information
*
* Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -841,7 +846,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
* @bat_priv: the bat priv with all the soft interface information
*
* Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -868,7 +873,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: an ethernet header to determine the protocol family from
*
@@ -892,7 +897,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
* @bat_priv: the bat priv with all the soft interface information
*
* Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
@@ -919,7 +924,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to check
* @orig: an originator to be set to forward the skb to
@@ -973,7 +978,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/**
- * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
@@ -1018,7 +1023,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
@@ -1063,7 +1068,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
@@ -1108,7 +1113,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -1164,7 +1169,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_init - initialize the multicast optimizations structures
+ * batadv_mcast_init() - initialize the multicast optimizations structures
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_mcast_init(struct batadv_priv *bat_priv)
@@ -1179,7 +1184,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
+ * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
* @bat_priv: the bat priv with all the soft interface information
* @seq: debugfs table seq_file struct
*
@@ -1220,7 +1225,7 @@ static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
}
/**
- * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
+ * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
* @seq: seq file to print on
* @offset: not used
*
@@ -1281,7 +1286,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
#endif
/**
- * batadv_mcast_free - free the multicast optimizations structures
+ * batadv_mcast_free() - free the multicast optimizations structures
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_mcast_free(struct batadv_priv *bat_priv)
@@ -1296,7 +1301,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
* @orig: the originator which is going to get purged
*/
void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 2a78cddab0e9..3ac06337ab71 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
@@ -25,15 +26,21 @@ struct sk_buff;
/**
* enum batadv_forw_mode - the way a packet should be forwarded as
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
- * flooding)
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
- * BATMAN unicast routing protocol)
- * @BATADV_FORW_NONE: don't forward, drop it
*/
enum batadv_forw_mode {
+ /**
+ * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
+ * classic flooding)
+ */
BATADV_FORW_ALL,
+
+ /**
+ * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
+ * via the BATMAN unicast routing protocol)
+ */
BATADV_FORW_SINGLE,
+
+ /** @BATADV_FORW_NONE: don't forward, drop it */
BATADV_FORW_NONE,
};
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index ab13b4d58733..a823d3899bad 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
@@ -23,8 +24,8 @@
#include <linux/cache.h>
#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/fs.h>
#include <linux/genetlink.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -39,6 +40,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
@@ -46,7 +48,6 @@
#include "gateway_client.h"
#include "hard-interface.h"
#include "originator.h"
-#include "packet.h"
#include "soft-interface.h"
#include "tp_meter.h"
#include "translation-table.h"
@@ -99,7 +100,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
};
/**
- * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
* @nlh: Message header
* @attrtype: Attribute which holds an interface index
*
@@ -114,7 +115,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
}
/**
- * batadv_netlink_mesh_info_put - fill in generic information about mesh
+ * batadv_netlink_mesh_info_put() - fill in generic information about mesh
* interface
* @msg: netlink message to be sent back
* @soft_iface: interface for which the data should be taken
@@ -169,7 +170,7 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
}
/**
- * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
+ * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
* netlink request
* @skb: received netlink message
* @info: receiver information
@@ -230,7 +231,7 @@ batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
}
/**
- * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
* @msg: netlink message to be sent back
* @cookie: tp meter session cookie
*
@@ -246,7 +247,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
}
/**
- * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
* @bat_priv: the bat priv with all the soft interface information
* @dst: destination of tp_meter session
* @result: reason for tp meter session stop
@@ -309,7 +310,7 @@ err_genlmsg:
}
/**
- * batadv_netlink_tp_meter_start - Start a new tp_meter session
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
* @skb: received netlink message
* @info: receiver information
*
@@ -386,7 +387,7 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
}
/**
- * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
+ * batadv_netlink_tp_meter_start() - Cancel a running tp_meter session
* @skb: received netlink message
* @info: receiver information
*
@@ -431,7 +432,7 @@ out:
}
/**
- * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -473,7 +474,7 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
@@ -620,7 +621,7 @@ struct genl_family batadv_netlink_family __ro_after_init = {
};
/**
- * batadv_netlink_register - register batadv genl netlink family
+ * batadv_netlink_register() - register batadv genl netlink family
*/
void __init batadv_netlink_register(void)
{
@@ -632,7 +633,7 @@ void __init batadv_netlink_register(void)
}
/**
- * batadv_netlink_unregister - unregister batadv genl netlink family
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
*/
void batadv_netlink_unregister(void)
{
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index f1cd8c5da966..0e7e57b69b54 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 3604d7899e2c..b48116bb24ef 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
@@ -25,7 +26,7 @@
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/init.h>
@@ -35,6 +36,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/random.h>
@@ -47,12 +49,12 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "originator.h"
-#include "packet.h"
#include "routing.h"
#include "send.h"
#include "tvlv.h"
@@ -65,7 +67,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
/**
- * batadv_nc_init - one-time initialization for network coding
+ * batadv_nc_init() - one-time initialization for network coding
*
* Return: 0 on success or negative error number in case of failure
*/
@@ -81,7 +83,7 @@ int __init batadv_nc_init(void)
}
/**
- * batadv_nc_start_timer - initialise the nc periodic worker
+ * batadv_nc_start_timer() - initialise the nc periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
@@ -91,7 +93,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
}
/**
- * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
* after network coding setting change
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -113,7 +115,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
}
/**
- * batadv_nc_status_update - update the network coding tvlv container after
+ * batadv_nc_status_update() - update the network coding tvlv container after
* network coding setting change
* @net_dev: the soft interface net device
*/
@@ -125,7 +127,7 @@ void batadv_nc_status_update(struct net_device *net_dev)
}
/**
- * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -144,7 +146,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
@@ -185,7 +187,7 @@ err:
}
/**
- * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
@@ -197,7 +199,7 @@ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
}
/**
- * batadv_nc_init_orig - initialise the nc fields of an orig_node
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
* @orig_node: the orig_node which is going to be initialised
*/
void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
@@ -209,8 +211,8 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
}
/**
- * batadv_nc_node_release - release nc_node from lists and queue for free after
- * rcu grace period
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ * after rcu grace period
* @ref: kref pointer of the nc_node
*/
static void batadv_nc_node_release(struct kref *ref)
@@ -224,7 +226,7 @@ static void batadv_nc_node_release(struct kref *ref)
}
/**
- * batadv_nc_node_put - decrement the nc_node refcounter and possibly
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
* release it
* @nc_node: nc_node to be free'd
*/
@@ -234,8 +236,8 @@ static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
}
/**
- * batadv_nc_path_release - release nc_path from lists and queue for free after
- * rcu grace period
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ * after rcu grace period
* @ref: kref pointer of the nc_path
*/
static void batadv_nc_path_release(struct kref *ref)
@@ -248,7 +250,7 @@ static void batadv_nc_path_release(struct kref *ref)
}
/**
- * batadv_nc_path_put - decrement the nc_path refcounter and possibly
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
* release it
* @nc_path: nc_path to be free'd
*/
@@ -258,7 +260,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
}
/**
- * batadv_nc_packet_free - frees nc packet
+ * batadv_nc_packet_free() - frees nc packet
* @nc_packet: the nc packet to free
* @dropped: whether the packet is freed because is is dropped
*/
@@ -275,7 +277,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
}
/**
- * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
* @bat_priv: the bat priv with all the soft interface information
* @nc_node: the nc node to check
*
@@ -291,7 +293,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path to check
*
@@ -311,7 +313,8 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ * out
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path to check
*
@@ -331,7 +334,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
* entries
* @bat_priv: the bat priv with all the soft interface information
* @list: list of nc nodes
@@ -369,7 +372,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_purge_orig - purges all nc node data attached of the given
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
* originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig_node with the nc node entries to be purged
@@ -395,8 +398,8 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
- * have timed out nc nodes
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ * they have timed out nc nodes
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
@@ -422,7 +425,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
}
/**
- * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
* unused ones
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the nc paths to check
@@ -481,7 +484,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_hash_key_gen - computes the nc_path hash key
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
* @key: buffer to hold the final hash key
* @src: source ethernet mac address going into the hash key
* @dst: destination ethernet mac address going into the hash key
@@ -494,7 +497,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
}
/**
- * batadv_nc_hash_choose - compute the hash value for an nc path
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
* @data: data to hash
* @size: size of the hash table
*
@@ -512,7 +515,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
}
/**
- * batadv_nc_hash_compare - comparing function used in the network coding hash
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
* tables
* @node: node in the local table
* @data2: second object to compare the node to
@@ -538,7 +541,7 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node,
}
/**
- * batadv_nc_hash_find - search for an existing nc path and return it
+ * batadv_nc_hash_find() - search for an existing nc path and return it
* @hash: hash table containing the nc path
* @data: search key
*
@@ -575,7 +578,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
}
/**
- * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
* @nc_packet: the nc packet to send
*/
static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
@@ -586,7 +589,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
}
/**
- * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path the packet belongs to
* @nc_packet: the nc packet to be checked
@@ -625,7 +628,7 @@ out:
}
/**
- * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path the packet belongs to
* @nc_packet: the nc packet to be checked
@@ -663,8 +666,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
- * nc packets
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ * out nc packets
* @bat_priv: the bat priv with all the soft interface information
* @hash: to be processed hash table
* @process_fn: Function called to process given nc packet. Should return true
@@ -709,7 +712,8 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_worker - periodic task for house keeping related to network coding
+ * batadv_nc_worker() - periodic task for house keeping related to network
+ * coding
* @work: kernel work struct
*/
static void batadv_nc_worker(struct work_struct *work)
@@ -749,8 +753,8 @@ static void batadv_nc_worker(struct work_struct *work)
}
/**
- * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
- * coding or not
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ * for coding or not
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: neighboring orig node which may be used as nc candidate
* @ogm_packet: incoming ogm packet also used for the checks
@@ -790,7 +794,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_find_nc_node - search for an existing nc node and return it
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
* @orig_node: orig node originating the ogm packet
* @orig_neigh_node: neighboring orig node from which we received the ogm packet
* (can be equal to orig_node)
@@ -830,7 +834,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
}
/**
- * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
* not found
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node originating the ogm packet
@@ -890,7 +894,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
* structs (best called on incoming OGMs)
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node originating the ogm packet
@@ -945,7 +949,7 @@ out:
}
/**
- * batadv_nc_get_path - get existing nc_path or allocate a new one
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the nc path
* @src: ethernet source address - first half of the nc path search key
@@ -1006,7 +1010,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
* selection of a receiver with slightly lower TQ than the other
* @tq: to be weighted tq value
*
@@ -1029,7 +1033,7 @@ static u8 batadv_nc_random_weight_tq(u8 tq)
}
/**
- * batadv_nc_memxor - XOR destination with source
+ * batadv_nc_memxor() - XOR destination with source
* @dst: byte array to XOR into
* @src: byte array to XOR from
* @len: length of destination array
@@ -1043,7 +1047,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
}
/**
- * batadv_nc_code_packets - code a received unicast_packet with an nc packet
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
* into a coded_packet and send it
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to forward
@@ -1236,7 +1240,7 @@ out:
}
/**
- * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
* @skb: data skb to forward
* @dst: destination mac address of the other skb to code with
* @src: source mac address of skb
@@ -1260,7 +1264,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
}
/**
- * batadv_nc_path_search - Find the coding path matching in_nc_node and
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
* out_nc_node to retrieve a buffered packet that can be used for coding.
* @bat_priv: the bat priv with all the soft interface information
* @in_nc_node: pointer to skb next hop's neighbor nc node
@@ -1328,8 +1332,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
- * skb's sender (may be equal to the originator).
+ * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ * the skb's sender (may be equal to the originator).
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to forward
* @eth_dst: next hop mac address of skb
@@ -1374,7 +1378,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
* unicast skb before it is stored for use in later decoding
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to store
@@ -1409,7 +1413,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
* @skb: data skb to forward
* @neigh_node: next hop to forward packet to
* @ethhdr: pointer to the ethernet header inside the skb
@@ -1467,7 +1471,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
}
/**
- * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
* @skb: skb to add to path
* @nc_path: path to add skb to
* @neigh_node: next hop to forward packet to
@@ -1502,7 +1506,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
}
/**
- * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
* buffer
* @skb: data skb to forward
* @neigh_node: next hop to forward packet to
@@ -1559,8 +1563,8 @@ out:
}
/**
- * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
- * when decoding coded packets
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ * used when decoding coded packets
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to store
*/
@@ -1620,7 +1624,7 @@ out:
}
/**
- * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
* should be saved in the decoding buffer and, if so, store it there
* @bat_priv: the bat priv with all the soft interface information
* @skb: unicast skb to store
@@ -1640,7 +1644,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
* in nc_packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: unicast skb to decode
@@ -1734,7 +1738,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/**
- * batadv_nc_find_decoding_packet - search through buffered decoding data to
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
* find the data needed to decode the coded packet
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: pointer to the ethernet header inside the coded packet
@@ -1799,7 +1803,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
}
/**
- * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
* resulting unicast packet
* @skb: incoming coded packet
* @recv_if: pointer to interface this packet was received on
@@ -1874,7 +1878,7 @@ free_skb:
}
/**
- * batadv_nc_mesh_free - clean up network coding memory
+ * batadv_nc_mesh_free() - clean up network coding memory
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
@@ -1891,7 +1895,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_nc_nodes_seq_print_text - print the nc node information
+ * batadv_nc_nodes_seq_print_text() - print the nc node information
* @seq: seq file to print on
* @offset: not used
*
@@ -1954,7 +1958,7 @@ out:
}
/**
- * batadv_nc_init_debugfs - create nc folder and related files in debugfs
+ * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index c66efb81d2f4..adaeafa4f71e 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 2967b86c13da..58a7d9274435 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -21,7 +22,7 @@
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -30,10 +31,12 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
+#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/stddef.h>
#include <linux/workqueue.h>
#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
@@ -55,10 +58,47 @@
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key;
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ index = batadv_choose_orig(data, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (!batadv_compare_eth(orig_node, data))
+ continue;
+
+ if (!kref_get_unless_zero(&orig_node->refcount))
+ continue;
+
+ orig_node_tmp = orig_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return orig_node_tmp;
+}
+
static void batadv_purge_orig(struct work_struct *work);
/**
- * batadv_compare_orig - comparing function used in the originator hash table
+ * batadv_compare_orig() - comparing function used in the originator hash table
* @node: node in the local table
* @data2: second object to compare the node to
*
@@ -73,7 +113,7 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
}
/**
- * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
* @orig_node: the originator serving the VLAN
* @vid: the VLAN identifier
*
@@ -104,7 +144,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
}
/**
- * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
* object
* @orig_node: the originator serving the VLAN
* @vid: the VLAN identifier
@@ -145,7 +185,7 @@ out:
}
/**
- * batadv_orig_node_vlan_release - release originator-vlan object from lists
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
* and queue for free after rcu grace period
* @ref: kref pointer of the originator-vlan object
*/
@@ -159,7 +199,7 @@ static void batadv_orig_node_vlan_release(struct kref *ref)
}
/**
- * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
* the originator-vlan object
* @orig_vlan: the originator-vlan object to release
*/
@@ -168,6 +208,12 @@ void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
}
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_originator_init(struct batadv_priv *bat_priv)
{
if (bat_priv->orig_hash)
@@ -193,7 +239,7 @@ err:
}
/**
- * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the neigh_ifinfo
*/
@@ -210,7 +256,7 @@ static void batadv_neigh_ifinfo_release(struct kref *ref)
}
/**
- * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
* the neigh_ifinfo
* @neigh_ifinfo: the neigh_ifinfo object to release
*/
@@ -220,7 +266,7 @@ void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
}
/**
- * batadv_hardif_neigh_release - release hardif neigh node from lists and
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the neigh_node
*/
@@ -240,7 +286,7 @@ static void batadv_hardif_neigh_release(struct kref *ref)
}
/**
- * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
* and possibly release it
* @hardif_neigh: hardif neigh neighbor to free
*/
@@ -250,7 +296,7 @@ void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
}
/**
- * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the neigh_node
*/
@@ -275,7 +321,7 @@ static void batadv_neigh_node_release(struct kref *ref)
}
/**
- * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
* release it
* @neigh_node: neigh neighbor to free
*/
@@ -285,7 +331,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
}
/**
- * batadv_orig_router_get - router to the originator depending on iface
+ * batadv_orig_router_get() - router to the originator depending on iface
* @orig_node: the orig node for the router
* @if_outgoing: the interface where the payload packet has been received or
* the OGM should be sent to
@@ -318,7 +364,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
}
/**
- * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
* @orig_node: the orig node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
@@ -350,7 +396,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
}
/**
- * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
* @orig_node: the orig node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
@@ -396,7 +442,7 @@ out:
}
/**
- * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
* @neigh: the neigh node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
@@ -429,7 +475,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
}
/**
- * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
* @neigh: the neigh node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
@@ -472,7 +518,7 @@ out:
}
/**
- * batadv_neigh_node_get - retrieve a neighbour from the list
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
* @orig_node: originator which the neighbour belongs to
* @hard_iface: the interface where this neighbour is connected to
* @addr: the address of the neighbour
@@ -509,7 +555,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
}
/**
- * batadv_hardif_neigh_create - create a hardif neighbour node
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
* @orig_node: originator object representing the neighbour
@@ -555,7 +601,7 @@ out:
}
/**
- * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
* node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
@@ -579,7 +625,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
}
/**
- * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
* @hard_iface: the interface where this neighbour is connected to
* @neigh_addr: the address of the neighbour
*
@@ -611,7 +657,7 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
}
/**
- * batadv_neigh_node_create - create a neigh node object
+ * batadv_neigh_node_create() - create a neigh node object
* @orig_node: originator object representing the neighbour
* @hard_iface: the interface where the neighbour is connected to
* @neigh_addr: the mac address of the neighbour interface
@@ -676,7 +722,7 @@ out:
}
/**
- * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
* @orig_node: originator object representing the neighbour
* @hard_iface: the interface where the neighbour is connected to
* @neigh_addr: the mac address of the neighbour interface
@@ -700,7 +746,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
+ * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
* @seq: neighbour table seq_file struct
* @offset: not used
*
@@ -735,8 +781,8 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
#endif
/**
- * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
- * outgoing interface
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ * specific outgoing interface
* @msg: message to dump into
* @cb: parameters for the dump
*
@@ -812,7 +858,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
/**
- * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the orig_ifinfo
*/
@@ -835,7 +881,7 @@ static void batadv_orig_ifinfo_release(struct kref *ref)
}
/**
- * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
* the orig_ifinfo
* @orig_ifinfo: the orig_ifinfo object to release
*/
@@ -845,7 +891,7 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
}
/**
- * batadv_orig_node_free_rcu - free the orig_node
+ * batadv_orig_node_free_rcu() - free the orig_node
* @rcu: rcu pointer of the orig_node
*/
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
@@ -866,7 +912,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
}
/**
- * batadv_orig_node_release - release orig_node from lists and queue for
+ * batadv_orig_node_release() - release orig_node from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the orig_node
*/
@@ -917,7 +963,7 @@ static void batadv_orig_node_release(struct kref *ref)
}
/**
- * batadv_orig_node_put - decrement the orig node refcounter and possibly
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
* release it
* @orig_node: the orig node to free
*/
@@ -926,6 +972,10 @@ void batadv_orig_node_put(struct batadv_orig_node *orig_node)
kref_put(&orig_node->refcount, batadv_orig_node_release);
}
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_originator_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
@@ -959,7 +1009,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_orig_node_new - creates a new orig_node
+ * batadv_orig_node_new() - creates a new orig_node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the originator
*
@@ -1038,7 +1088,7 @@ free_orig_node:
}
/**
- * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
* @bat_priv: the bat priv with all the soft interface information
* @neigh: orig node which is to be checked
*/
@@ -1079,7 +1129,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
}
/**
- * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
@@ -1131,7 +1181,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
}
/**
- * batadv_purge_orig_neighbors - purges neighbors from originator
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
@@ -1189,7 +1239,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
}
/**
- * batadv_find_best_neighbor - finds the best neighbor after purging
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
* @if_outgoing: the interface for which the metric should be compared
@@ -1224,7 +1274,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
}
/**
- * batadv_purge_orig_node - purges obsolete information from an orig_node
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
@@ -1341,12 +1391,24 @@ static void batadv_purge_orig(struct work_struct *work)
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
}
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
{
_batadv_purge_orig(bat_priv);
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_orig_seq_print_text() - Print the originator table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1376,7 +1438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
}
/**
- * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
+ * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
* outgoing interface
* @seq: debugfs table seq_file struct
* @offset: not used
@@ -1423,7 +1485,7 @@ out:
#endif
/**
- * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
* outgoing interface
* @msg: message to dump into
* @cb: parameters for the dump
@@ -1499,6 +1561,13 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
return ret;
}
+/**
+ * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
+ * @hard_iface: hard interface to add (already slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num)
{
@@ -1534,6 +1603,13 @@ err:
return -ENOMEM;
}
+/**
+ * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
+ * @hard_iface: hard interface to remove (still slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
int max_if_num)
{
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d94220a6d21a..8e543a3cdc6c 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -23,14 +24,8 @@
#include <linux/compiler.h>
#include <linux/if_ether.h>
#include <linux/jhash.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/stddef.h>
#include <linux/types.h>
-#include "hash.h"
-
struct netlink_callback;
struct seq_file;
struct sk_buff;
@@ -89,8 +84,13 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
unsigned short vid);
void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan);
-/* hashfunction to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
*/
static inline u32 batadv_choose_orig(const void *data, u32 size)
{
@@ -100,34 +100,7 @@ static inline u32 batadv_choose_orig(const void *data, u32 size)
return hash % size;
}
-static inline struct batadv_orig_node *
-batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
- int index;
-
- if (!hash)
- return NULL;
-
- index = batadv_choose_orig(data, hash->size);
- head = &hash->table[index];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- if (!batadv_compare_eth(orig_node, data))
- continue;
-
- if (!kref_get_unless_zero(&orig_node->refcount))
- continue;
-
- orig_node_tmp = orig_node;
- break;
- }
- rcu_read_unlock();
-
- return orig_node_tmp;
-}
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 40d9bf3e5bfe..b6891e8b741c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -33,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
#include "bitarray.h"
#include "bridge_loop_avoidance.h"
@@ -43,7 +45,6 @@
#include "log.h"
#include "network-coding.h"
#include "originator.h"
-#include "packet.h"
#include "send.h"
#include "soft-interface.h"
#include "tp_meter.h"
@@ -54,7 +55,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
/**
- * _batadv_update_route - set the router for this originator
+ * _batadv_update_route() - set the router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be configured
* @recv_if: the receive interface for which this route is set
@@ -118,7 +119,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
}
/**
- * batadv_update_route - set the router for this originator
+ * batadv_update_route() - set the router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be configured
* @recv_if: the receive interface for which this route is set
@@ -145,7 +146,7 @@ out:
}
/**
- * batadv_window_protected - checks whether the host restarted and is in the
+ * batadv_window_protected() - checks whether the host restarted and is in the
* protection time.
* @bat_priv: the bat priv with all the soft interface information
* @seq_num_diff: difference between the current/received sequence number and
@@ -180,6 +181,14 @@ bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
return false;
}
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
bool batadv_check_management_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
int header_len)
@@ -212,7 +221,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
}
/**
- * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
* @bat_priv: the bat priv with all the soft interface information
* @skb: icmp packet to process
*
@@ -347,6 +356,13 @@ out:
return ret;
}
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
@@ -440,7 +456,7 @@ free_skb:
}
/**
- * batadv_check_unicast_packet - Check for malformed unicast packets
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of header to pull
@@ -478,7 +494,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
}
/**
- * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
* @orig_node: originator node whose last bonding candidate should be retrieved
*
* Return: last bonding candidate of router or NULL if not found
@@ -501,7 +517,7 @@ batadv_last_bonding_get(struct batadv_orig_node *orig_node)
}
/**
- * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
* @orig_node: originator node whose bonding candidates should be replaced
* @new_candidate: new bonding candidate or NULL
*/
@@ -524,7 +540,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
}
/**
- * batadv_find_router - find a suitable router for this originator
+ * batadv_find_router() - find a suitable router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the destination node
* @recv_if: pointer to interface this packet was received on
@@ -741,7 +757,7 @@ free_skb:
}
/**
- * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
* @bat_priv: the bat priv with all the soft interface information
* @unicast_packet: the unicast header to be updated
* @dst_addr: the payload destination
@@ -904,7 +920,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
}
/**
- * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
* are in the unicast number space but not yet known to the implementation
* @skb: unicast tvlv packet to process
* @recv_if: pointer to interface this packet was received on
@@ -935,6 +951,13 @@ free_skb:
return NET_RX_DROP;
}
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
@@ -1036,7 +1059,7 @@ free_skb:
}
/**
- * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
* @skb: unicast tvlv packet to process
* @recv_if: pointer to interface this packet was received on
*
@@ -1090,7 +1113,7 @@ free_skb:
}
/**
- * batadv_recv_frag_packet - process received fragment
+ * batadv_recv_frag_packet() - process received fragment
* @skb: the received fragment
* @recv_if: interface that the skb is received on
*
@@ -1155,6 +1178,13 @@ free_skb:
return ret;
}
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 5ede16c32f15..a1289bc5f115 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 7895323fd2a7..2a5ab6f1076d 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -23,7 +24,7 @@
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
@@ -54,7 +55,7 @@
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
/**
- * batadv_send_skb_packet - send an already prepared packet
+ * batadv_send_skb_packet() - send an already prepared packet
* @skb: the packet to send
* @hard_iface: the interface to use to send the broadcast packet
* @dst_addr: the payload destination
@@ -123,12 +124,30 @@ send_skb_err:
return NET_XMIT_DROP;
}
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
int batadv_send_broadcast_skb(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface)
{
return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
}
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
int batadv_send_unicast_skb(struct sk_buff *skb,
struct batadv_neigh_node *neigh)
{
@@ -153,7 +172,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
}
/**
- * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
* @skb: Packet to be transmitted.
* @orig_node: Final destination of the packet.
* @recv_if: Interface used when receiving the packet (can be NULL).
@@ -216,7 +235,7 @@ free_skb:
}
/**
- * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
* common fields for unicast packets
* @skb: the skb carrying the unicast header to initialize
* @hdr_size: amount of bytes to push at the beginning of the skb
@@ -249,7 +268,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
}
/**
- * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
@@ -264,7 +283,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
}
/**
- * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
* unicast 4addr header
* @bat_priv: the bat priv with all the soft interface information
* @skb: the skb containing the payload to encapsulate
@@ -308,7 +327,7 @@ out:
}
/**
- * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
@@ -378,7 +397,7 @@ out:
}
/**
- * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
@@ -425,7 +444,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
}
/**
- * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @vid: the vid to be used to search the translation table
@@ -452,7 +471,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/**
- * batadv_forw_packet_free - free a forwarding packet
+ * batadv_forw_packet_free() - free a forwarding packet
* @forw_packet: The packet to free
* @dropped: whether the packet is freed because is is dropped
*
@@ -477,7 +496,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
}
/**
- * batadv_forw_packet_alloc - allocate a forwarding packet
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
* @if_incoming: The (optional) if_incoming to be grabbed
* @if_outgoing: The (optional) if_outgoing to be grabbed
* @queue_left: The (optional) queue counter to decrease
@@ -543,7 +562,7 @@ err:
}
/**
- * batadv_forw_packet_was_stolen - check whether someone stole this packet
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
* @forw_packet: the forwarding packet to check
*
* This function checks whether the given forwarding packet was claimed by
@@ -558,7 +577,7 @@ batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
}
/**
- * batadv_forw_packet_steal - claim a forw_packet for free()
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
* @forw_packet: the forwarding packet to steal
* @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
*
@@ -589,7 +608,7 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
}
/**
- * batadv_forw_packet_list_steal - claim a list of forward packets for free()
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
* @forw_list: the to be stolen forward packets
* @cleanup_list: a backup pointer, to be able to dispose the packet later
* @hard_iface: the interface to steal forward packets from
@@ -625,7 +644,7 @@ batadv_forw_packet_list_steal(struct hlist_head *forw_list,
}
/**
- * batadv_forw_packet_list_free - free a list of forward packets
+ * batadv_forw_packet_list_free() - free a list of forward packets
* @head: a list of to be freed forw_packets
*
* This function cancels the scheduling of any packet in the provided list,
@@ -649,7 +668,7 @@ static void batadv_forw_packet_list_free(struct hlist_head *head)
}
/**
- * batadv_forw_packet_queue - try to queue a forwarding packet
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
* @forw_packet: the forwarding packet to queue
* @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
* @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
@@ -693,7 +712,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
}
/**
- * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the forwarding packet to queue
* @send_time: timestamp (jiffies) when the packet is to be sent
@@ -712,7 +731,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
}
/**
- * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the forwarding packet to queue
* @send_time: timestamp (jiffies) when the packet is to be sent
@@ -730,7 +749,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
}
/**
- * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
@@ -790,7 +809,7 @@ err:
}
/**
- * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
* @forw_packet: the forwarding packet to check
* @hard_iface: the interface to check on
*
@@ -818,7 +837,8 @@ batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
}
/**
- * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ * packet
* @forw_packet: the packet to increase the counter for
*/
static void
@@ -828,7 +848,7 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
}
/**
- * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
* @forw_packet: the packet to check
*
* Return: True if this packet was transmitted before, false otherwise.
@@ -953,7 +973,7 @@ out:
}
/**
- * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
* @bat_priv: the bat priv with all the soft interface information
* @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
*
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index a16b34f473ef..1e8c79093623 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -23,8 +24,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-
-#include "packet.h"
+#include <uapi/linux/batadv_packet.h>
struct sk_buff;
@@ -76,7 +76,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
/**
- * batadv_send_skb_via_tt - send an skb via TT lookup
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
* @dst_hint: can be used to override the destination contained in the skb
@@ -97,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
}
/**
- * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: the payload to send
* @packet_subtype: the unicast 4addr packet subtype to use
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9f673cdfecf8..900c5ce21cd4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
@@ -48,6 +49,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
@@ -59,11 +61,17 @@
#include "multicast.h"
#include "network-coding.h"
#include "originator.h"
-#include "packet.h"
#include "send.h"
#include "sysfs.h"
#include "translation-table.h"
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
@@ -96,7 +104,7 @@ static int batadv_interface_release(struct net_device *dev)
}
/**
- * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
* @bat_priv: the bat priv with all the soft interface information
* @idx: index of counter to sum up
*
@@ -169,7 +177,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
}
/**
- * batadv_interface_set_rx_mode - set the rx mode of a device
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
* @dev: registered network device to modify
*
* We do not actually need to set any rx filters for the virtual batman
@@ -389,7 +397,7 @@ end:
}
/**
- * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
* @soft_iface: local interface which will receive the ethernet frame
* @skb: ethernet frame for @soft_iface
* @hdr_size: size of already parsed batman-adv header
@@ -501,8 +509,8 @@ out:
}
/**
- * batadv_softif_vlan_release - release vlan from lists and queue for free after
- * rcu grace period
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ * after rcu grace period
* @ref: kref pointer of the vlan object
*/
static void batadv_softif_vlan_release(struct kref *ref)
@@ -519,7 +527,7 @@ static void batadv_softif_vlan_release(struct kref *ref)
}
/**
- * batadv_softif_vlan_put - decrease the vlan object refcounter and
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
* possibly release it
* @vlan: the vlan object to release
*/
@@ -532,7 +540,7 @@ void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
}
/**
- * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the identifier of the vlan object to retrieve
*
@@ -561,7 +569,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
}
/**
- * batadv_softif_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*
@@ -613,7 +621,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
}
/**
- * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
* @bat_priv: the bat priv with all the soft interface information
* @vlan: the object to remove
*/
@@ -631,7 +639,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
}
/**
- * batadv_interface_add_vid - ndo_add_vid API implementation
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
* @dev: the netdev of the mesh interface
* @proto: protocol of the the vlan id
* @vid: identifier of the new vlan
@@ -689,7 +697,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
}
/**
- * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
* @dev: the netdev of the mesh interface
* @proto: protocol of the the vlan id
* @vid: identifier of the deleted vlan
@@ -732,7 +740,7 @@ static struct lock_class_key batadv_netdev_xmit_lock_key;
static struct lock_class_key batadv_netdev_addr_lock_key;
/**
- * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
* @dev: device which owns the tx queue
* @txq: tx queue to modify
* @_unused: always NULL
@@ -745,7 +753,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
}
/**
- * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
* @dev: network device to modify
*/
static void batadv_set_lockdep_class(struct net_device *dev)
@@ -755,7 +763,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
}
/**
- * batadv_softif_init_late - late stage initialization of soft interface
+ * batadv_softif_init_late() - late stage initialization of soft interface
* @dev: registered network device to modify
*
* Return: error code on failures
@@ -860,7 +868,7 @@ free_bat_counters:
}
/**
- * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
* @dev: batadv_soft_interface used as master interface
* @slave_dev: net_device which should become the slave interface
* @extack: extended ACK report struct
@@ -888,7 +896,7 @@ out:
}
/**
- * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
* @dev: batadv_soft_interface used as master interface
* @slave_dev: net_device which should be removed from the master interface
*
@@ -1023,7 +1031,7 @@ static const struct ethtool_ops batadv_ethtool_ops = {
};
/**
- * batadv_softif_free - Deconstructor of batadv_soft_interface
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
* @dev: Device to cleanup and remove
*/
static void batadv_softif_free(struct net_device *dev)
@@ -1039,7 +1047,7 @@ static void batadv_softif_free(struct net_device *dev)
}
/**
- * batadv_softif_init_early - early stage initialization of soft interface
+ * batadv_softif_init_early() - early stage initialization of soft interface
* @dev: registered network device to modify
*/
static void batadv_softif_init_early(struct net_device *dev)
@@ -1063,6 +1071,13 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->ethtool_ops = &batadv_ethtool_ops;
}
+/**
+ * batadv_softif_create() - Create and register soft interface
+ * @net: the applicable net namespace
+ * @name: name of the new soft interface
+ *
+ * Return: newly allocated soft_interface, NULL on errors
+ */
struct net_device *batadv_softif_create(struct net *net, const char *name)
{
struct net_device *soft_iface;
@@ -1089,7 +1104,7 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
}
/**
- * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
+ * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
* @soft_iface: the to-be-removed batman-adv interface
*/
void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
@@ -1111,7 +1126,8 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
}
/**
- * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ * netlink
* @soft_iface: the to-be-removed batman-adv interface
* @head: list pointer
*/
@@ -1139,6 +1155,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
unregister_netdevice_queue(soft_iface, head);
}
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
bool batadv_softif_is_valid(const struct net_device *net_dev)
{
if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 639c3abb214a..075c5b5b2ce1 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index aa187fd42475..c1578fa0b952 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -22,10 +23,11 @@
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
+#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
@@ -37,6 +39,7 @@
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
@@ -45,7 +48,6 @@
#include "hard-interface.h"
#include "log.h"
#include "network-coding.h"
-#include "packet.h"
#include "soft-interface.h"
static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
@@ -63,7 +65,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
}
/**
- * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
* @obj: kobject to covert
*
* Return: the associated batadv_priv struct.
@@ -83,7 +85,7 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
}
/**
- * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
* @bat_priv: the bat priv with all the soft interface information
* @obj: kobject to covert
*
@@ -598,7 +600,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
}
/**
- * batadv_show_isolation_mark - print the current isolation mark/mask
+ * batadv_show_isolation_mark() - print the current isolation mark/mask
* @kobj: kobject representing the private mesh sysfs directory
* @attr: the batman-adv attribute the user is interacting with
* @buff: the buffer that will contain the data to send back to the user
@@ -616,8 +618,8 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
}
/**
- * batadv_store_isolation_mark - parse and store the isolation mark/mask entered
- * by the user
+ * batadv_store_isolation_mark() - parse and store the isolation mark/mask
+ * entered by the user
* @kobj: kobject representing the private mesh sysfs directory
* @attr: the batman-adv attribute the user is interacting with
* @buff: the buffer containing the user data
@@ -733,6 +735,12 @@ static struct batadv_attribute *batadv_vlan_attrs[] = {
NULL,
};
+/**
+ * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_sysfs_add_meshif(struct net_device *dev)
{
struct kobject *batif_kobject = &dev->dev.kobj;
@@ -773,6 +781,10 @@ out:
return -ENOMEM;
}
+/**
+ * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ */
void batadv_sysfs_del_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -788,7 +800,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
}
/**
- * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
* @dev: netdev of the mesh interface
* @vlan: private data of the newly added VLAN interface
*
@@ -849,7 +861,7 @@ out:
}
/**
- * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
* @bat_priv: the bat priv with all the soft interface information
* @vlan: the private data of the VLAN to destroy
*/
@@ -894,7 +906,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
}
/**
- * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
* @net_dev: netdevice to add/remove to/from batman-adv soft-interface
* @ifname: name of soft-interface to modify
*
@@ -947,7 +959,7 @@ out:
}
/**
- * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
* @work: work queue item
*
* Changes the parts of the hard+soft interface which can not be modified under
@@ -1043,7 +1055,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
/**
- * batadv_store_throughput_override - parse and store throughput override
+ * batadv_store_throughput_override() - parse and store throughput override
* entered by the user
* @kobj: kobject representing the private mesh sysfs directory
* @attr: the batman-adv attribute the user is interacting with
@@ -1130,6 +1142,13 @@ static struct batadv_attribute *batadv_batman_attrs[] = {
NULL,
};
+/**
+ * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
+ * @hardif_obj: address where to store the pointer to new sysfs folder
+ * @dev: netdev struct of the hard interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
{
struct kobject *hardif_kobject = &dev->dev.kobj;
@@ -1164,6 +1183,11 @@ out:
return -ENOMEM;
}
+/**
+ * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
+ * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
+ * of the hard interface
+ */
void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
{
kobject_uevent(*hardif_obj, KOBJ_REMOVE);
@@ -1172,6 +1196,16 @@ void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
*hardif_obj = NULL;
}
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ * BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
enum batadv_uev_action action, const char *data)
{
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index e487412e256b..bbeee61221fa 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
@@ -35,10 +36,23 @@ struct net_device;
*/
#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
+/**
+ * struct batadv_attribute - sysfs export helper for batman-adv attributes
+ */
struct batadv_attribute {
+ /** @attr: sysfs attribute file */
struct attribute attr;
+
+ /**
+ * @show: function to export the current attribute's content to sysfs
+ */
ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
char *buf);
+
+ /**
+ * @store: function to load new value from character buffer and save it
+ * in batman-adv attribute
+ */
ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
char *buf, size_t count);
};
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index ebc4e2241c77..8b576712d0c1 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
@@ -19,13 +20,13 @@
#include "main.h"
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
@@ -48,13 +49,13 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "packet.h"
#include "send.h"
/**
@@ -97,7 +98,7 @@
static u8 batadv_tp_prerandom[4096] __read_mostly;
/**
- * batadv_tp_session_cookie - generate session cookie based on session ids
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
* @session: TP session identifier
* @icmp_uid: icmp pseudo uid of the tp session
*
@@ -115,7 +116,7 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
}
/**
- * batadv_tp_cwnd - compute the new cwnd size
+ * batadv_tp_cwnd() - compute the new cwnd size
* @base: base cwnd size value
* @increment: the value to add to base to get the new size
* @min: minumim cwnd value (usually MSS)
@@ -140,7 +141,7 @@ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
}
/**
- * batadv_tp_updated_cwnd - update the Congestion Windows
+ * batadv_tp_updated_cwnd() - update the Congestion Windows
* @tp_vars: the private data of the current TP meter session
* @mss: maximum segment size of transmission
*
@@ -176,7 +177,7 @@ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
}
/**
- * batadv_tp_update_rto - calculate new retransmission timeout
+ * batadv_tp_update_rto() - calculate new retransmission timeout
* @tp_vars: the private data of the current TP meter session
* @new_rtt: new roundtrip time in msec
*/
@@ -212,7 +213,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
}
/**
- * batadv_tp_batctl_notify - send client status result to client
+ * batadv_tp_batctl_notify() - send client status result to client
* @reason: reason for tp meter session stop
* @dst: destination of tp_meter session
* @bat_priv: the bat priv with all the soft interface information
@@ -244,7 +245,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
}
/**
- * batadv_tp_batctl_error_notify - send client error result to client
+ * batadv_tp_batctl_error_notify() - send client error result to client
* @reason: reason for tp meter session stop
* @dst: destination of tp_meter session
* @bat_priv: the bat priv with all the soft interface information
@@ -259,7 +260,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
}
/**
- * batadv_tp_list_find - find a tp_vars object in the global list
+ * batadv_tp_list_find() - find a tp_vars object in the global list
* @bat_priv: the bat priv with all the soft interface information
* @dst: the other endpoint MAC address to look for
*
@@ -294,7 +295,8 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
}
/**
- * batadv_tp_list_find_session - find tp_vars session object in the global list
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ * list
* @bat_priv: the bat priv with all the soft interface information
* @dst: the other endpoint MAC address to look for
* @session: session identifier
@@ -335,7 +337,7 @@ batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
}
/**
- * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the batadv_tp_vars
*/
@@ -360,7 +362,7 @@ static void batadv_tp_vars_release(struct kref *ref)
}
/**
- * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
* release it
* @tp_vars: the private data of the current TP meter session to be free'd
*/
@@ -370,7 +372,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
}
/**
- * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
* @bat_priv: the bat priv with all the soft interface information
* @tp_vars: the private data of the current TP meter session to cleanup
*/
@@ -400,7 +402,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
}
/**
- * batadv_tp_sender_end - print info about ended session and inform client
+ * batadv_tp_sender_end() - print info about ended session and inform client
* @bat_priv: the bat priv with all the soft interface information
* @tp_vars: the private data of the current TP meter session
*/
@@ -433,7 +435,7 @@ static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
}
/**
- * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
* @tp_vars: the private data of the current TP meter session
* @reason: reason for tp meter session stop
*/
@@ -447,7 +449,7 @@ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
}
/**
- * batadv_tp_sender_finish - stop sender session after test_length was reached
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
* @work: delayed work reference of the related tp_vars
*/
static void batadv_tp_sender_finish(struct work_struct *work)
@@ -463,7 +465,7 @@ static void batadv_tp_sender_finish(struct work_struct *work)
}
/**
- * batadv_tp_reset_sender_timer - reschedule the sender timer
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
* @tp_vars: the private TP meter data for this session
*
* Reschedule the timer using tp_vars->rto as delay
@@ -481,7 +483,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
}
/**
- * batadv_tp_sender_timeout - timer that fires in case of packet loss
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
* @t: address to timer_list inside tp_vars
*
* If fired it means that there was packet loss.
@@ -531,7 +533,7 @@ static void batadv_tp_sender_timeout(struct timer_list *t)
}
/**
- * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
* @tp_vars: the private TP meter data for this session
* @buf: Buffer to fill with bytes
* @nbytes: amount of pseudorandom bytes
@@ -563,7 +565,7 @@ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
}
/**
- * batadv_tp_send_msg - send a single message
+ * batadv_tp_send_msg() - send a single message
* @tp_vars: the private TP meter data for this session
* @src: source mac address
* @orig_node: the originator of the destination
@@ -623,7 +625,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
}
/**
- * batadv_tp_recv_ack - ACK receiving function
+ * batadv_tp_recv_ack() - ACK receiving function
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*
@@ -765,7 +767,7 @@ out:
}
/**
- * batadv_tp_avail - check if congestion window is not full
+ * batadv_tp_avail() - check if congestion window is not full
* @tp_vars: the private data of the current TP meter session
* @payload_len: size of the payload of a single message
*
@@ -783,7 +785,7 @@ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
}
/**
- * batadv_tp_wait_available - wait until congestion window becomes free or
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
* timeout is reached
* @tp_vars: the private data of the current TP meter session
* @plen: size of the payload of a single message
@@ -805,7 +807,7 @@ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
}
/**
- * batadv_tp_send - main sending thread of a tp meter session
+ * batadv_tp_send() - main sending thread of a tp meter session
* @arg: address of the related tp_vars
*
* Return: nothing, this function never returns
@@ -904,7 +906,8 @@ out:
}
/**
- * batadv_tp_start_kthread - start new thread which manages the tp meter sender
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ * sender
* @tp_vars: the private data of the current TP meter session
*/
static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
@@ -935,7 +938,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
}
/**
- * batadv_tp_start - start a new tp meter session
+ * batadv_tp_start() - start a new tp meter session
* @bat_priv: the bat priv with all the soft interface information
* @dst: the receiver MAC address
* @test_length: test length in milliseconds
@@ -1060,7 +1063,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
}
/**
- * batadv_tp_stop - stop currently running tp meter session
+ * batadv_tp_stop() - stop currently running tp meter session
* @bat_priv: the bat priv with all the soft interface information
* @dst: the receiver MAC address
* @return_value: reason for tp meter session stop
@@ -1092,7 +1095,7 @@ out:
}
/**
- * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
* @tp_vars: the private data of the current TP meter session
*
* start the receiver shutdown timer or reset it if already started
@@ -1104,7 +1107,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
}
/**
- * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
* reached without received ack
* @t: address to timer_list inside tp_vars
*/
@@ -1149,7 +1152,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
}
/**
- * batadv_tp_send_ack - send an ACK packet
+ * batadv_tp_send_ack() - send an ACK packet
* @bat_priv: the bat priv with all the soft interface information
* @dst: the mac address of the destination originator
* @seq: the sequence number to ACK
@@ -1221,7 +1224,7 @@ out:
}
/**
- * batadv_tp_handle_out_of_order - store an out of order packet
+ * batadv_tp_handle_out_of_order() - store an out of order packet
* @tp_vars: the private data of the current TP meter session
* @skb: the buffer containing the received packet
*
@@ -1297,7 +1300,7 @@ out:
}
/**
- * batadv_tp_ack_unordered - update number received bytes in current stream
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
* without gaps
* @tp_vars: the private data of the current TP meter session
*/
@@ -1330,7 +1333,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
}
/**
- * batadv_tp_init_recv - return matching or create new receiver tp_vars
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
* @bat_priv: the bat priv with all the soft interface information
* @icmp: received icmp tp msg
*
@@ -1383,7 +1386,7 @@ out_unlock:
}
/**
- * batadv_tp_recv_msg - process a single data message
+ * batadv_tp_recv_msg() - process a single data message
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*
@@ -1468,7 +1471,7 @@ out:
}
/**
- * batadv_tp_meter_recv - main TP Meter receiving function
+ * batadv_tp_meter_recv() - main TP Meter receiving function
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*/
@@ -1494,7 +1497,7 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
}
/**
- * batadv_tp_meter_init - initialize global tp_meter structures
+ * batadv_tp_meter_init() - initialize global tp_meter structures
*/
void __init batadv_tp_meter_init(void)
{
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
index a8ada5c123bd..c8b8f2cb2c2b 100644
--- a/net/batman-adv/tp_meter.h
+++ b/net/batman-adv/tp_meter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8a3ce79b1307..7550a9ccd695 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
@@ -20,14 +21,14 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/crc32c.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jhash.h>
@@ -36,6 +37,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@@ -50,6 +52,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
@@ -58,7 +61,6 @@
#include "log.h"
#include "netlink.h"
#include "originator.h"
-#include "packet.h"
#include "soft-interface.h"
#include "tvlv.h"
@@ -86,7 +88,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
bool roaming);
/**
- * batadv_compare_tt - check if two TT entries are the same
+ * batadv_compare_tt() - check if two TT entries are the same
* @node: the list element pointer of the first TT entry
* @data2: pointer to the tt_common_entry of the second TT entry
*
@@ -105,7 +107,7 @@ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
}
/**
- * batadv_choose_tt - return the index of the tt entry in the hash table
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
* @data: pointer to the tt_common_entry object to map
* @size: the size of the hash table
*
@@ -125,7 +127,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
}
/**
- * batadv_tt_hash_find - look for a client in the given hash table
+ * batadv_tt_hash_find() - look for a client in the given hash table
* @hash: the hash table to search
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
@@ -170,7 +172,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
}
/**
- * batadv_tt_local_hash_find - search the local table for a given client
+ * batadv_tt_local_hash_find() - search the local table for a given client
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
@@ -195,7 +197,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
- * batadv_tt_global_hash_find - search the global table for a given client
+ * batadv_tt_global_hash_find() - search the global table for a given client
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
@@ -220,7 +222,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
- * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
* @rcu: rcu pointer of the tt_local_entry
*/
static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
@@ -234,7 +236,7 @@ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
}
/**
- * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
*/
@@ -251,7 +253,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
}
/**
- * batadv_tt_local_entry_put - decrement the tt_local_entry refcounter and
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
* possibly release it
* @tt_local_entry: tt_local_entry to be free'd
*/
@@ -263,7 +265,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
}
/**
- * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
* @rcu: rcu pointer of the tt_global_entry
*/
static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
@@ -277,8 +279,8 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
}
/**
- * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
- * for free after rcu grace period
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ * queue for free after rcu grace period
* @ref: kref pointer of the nc_node
*/
static void batadv_tt_global_entry_release(struct kref *ref)
@@ -294,7 +296,7 @@ static void batadv_tt_global_entry_release(struct kref *ref)
}
/**
- * batadv_tt_global_entry_put - decrement the tt_global_entry refcounter and
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
* possibly release it
* @tt_global_entry: tt_global_entry to be free'd
*/
@@ -306,7 +308,7 @@ batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
}
/**
- * batadv_tt_global_hash_count - count the number of orig entries
+ * batadv_tt_global_hash_count() - count the number of orig entries
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to count entries for
* @vid: VLAN identifier
@@ -331,8 +333,8 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_size_mod - change the size by v of the local table identified
- * by vid
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ * identified by vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier of the sub-table to change
* @v: the amount to sum to the local table size
@@ -352,8 +354,8 @@ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_size_inc - increase by one the local table size for the given
- * vid
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ * given vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*/
@@ -364,8 +366,8 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_size_dec - decrease by one the local table size for the given
- * vid
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ * given vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*/
@@ -376,7 +378,7 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_global_size_mod - change the size by v of the global table
+ * batadv_tt_global_size_mod() - change the size by v of the global table
* for orig_node identified by vid
* @orig_node: the originator for which the table has to be modified
* @vid: the VLAN identifier
@@ -404,7 +406,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
}
/**
- * batadv_tt_global_size_inc - increase by one the global table size for the
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
* given vid
* @orig_node: the originator which global table size has to be decreased
* @vid: the vlan identifier
@@ -416,7 +418,7 @@ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
}
/**
- * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
* given vid
* @orig_node: the originator which global table size has to be decreased
* @vid: the vlan identifier
@@ -428,7 +430,7 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
}
/**
- * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
* @rcu: rcu pointer of the orig_entry
*/
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
@@ -441,7 +443,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
}
/**
- * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
*/
@@ -457,7 +459,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
}
/**
- * batadv_tt_orig_list_entry_put - decrement the tt orig entry refcounter and
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
* possibly release it
* @orig_entry: tt orig entry to be free'd
*/
@@ -468,7 +470,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
}
/**
- * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
* @bat_priv: the bat priv with all the soft interface information
* @tt_local_entry: the TT entry involved in the event
* @event_flags: flags to store in the event structure
@@ -543,7 +545,7 @@ unlock:
}
/**
- * batadv_tt_len - compute length in bytes of given number of tt changes
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
* @changes_num: number of tt changes
*
* Return: computed length in bytes.
@@ -554,7 +556,7 @@ static int batadv_tt_len(int changes_num)
}
/**
- * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
* @tt_len: available space
*
* Return: the number of entries.
@@ -565,8 +567,8 @@ static u16 batadv_tt_entries(u16 tt_len)
}
/**
- * batadv_tt_local_table_transmit_size - calculates the local translation table
- * size when transmitted over the air
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ * table size when transmitted over the air
* @bat_priv: the bat priv with all the soft interface information
*
* Return: local translation table size in bytes.
@@ -625,7 +627,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_add - add a new client to the local table or update an
+ * batadv_tt_local_add() - add a new client to the local table or update an
* existing client
* @soft_iface: netdev struct of the mesh interface
* @addr: the mac address of the client to add
@@ -830,7 +832,7 @@ out:
}
/**
- * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
* within a TT Response directed to another node
* @orig_node: originator for which the TT data has to be prepared
* @tt_data: uninitialised pointer to the address of the TVLV buffer
@@ -903,8 +905,8 @@ out:
}
/**
- * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
- * node
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ * this node
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: uninitialised pointer to the address of the TVLV buffer
* @tt_change: uninitialised pointer to the address of the area where the TT
@@ -977,8 +979,8 @@ out:
}
/**
- * batadv_tt_tvlv_container_update - update the translation table tvlv container
- * after local tt changes have been committed
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ * container after local tt changes have been committed
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -1053,6 +1055,14 @@ container_register:
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1123,7 +1133,7 @@ out:
#endif
/**
- * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
* @msg :Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -1179,7 +1189,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -1216,7 +1226,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_tt_local_dump - Dump TT local entries into a message
+ * batadv_tt_local_dump() - Dump TT local entries into a message
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
@@ -1300,7 +1310,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_remove - logically remove an entry from the local table
+ * batadv_tt_local_remove() - logically remove an entry from the local table
* @bat_priv: the bat priv with all the soft interface information
* @addr: the MAC address of the client to remove
* @vid: VLAN identifier
@@ -1362,7 +1372,7 @@ out:
}
/**
- * batadv_tt_local_purge_list - purge inactive tt local entries
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
* @bat_priv: the bat priv with all the soft interface information
* @head: pointer to the list containing the local tt entries
* @timeout: parameter deciding whether a given tt local entry is considered
@@ -1397,7 +1407,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_purge - purge inactive tt local entries
+ * batadv_tt_local_purge() - purge inactive tt local entries
* @bat_priv: the bat priv with all the soft interface information
* @timeout: parameter deciding whether a given tt local entry is considered
* inactive or not
@@ -1490,7 +1500,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
* @entry: the TT global entry where the orig_list_entry has to be
* extracted from
* @orig_node: the originator for which the orig_list_entry has to be found
@@ -1524,8 +1534,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
}
/**
- * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
- * by a given originator
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ * handled by a given originator
* @entry: the TT global entry to check
* @orig_node: the originator to search in the list
*
@@ -1550,7 +1560,7 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
}
/**
- * batadv_tt_global_sync_flags - update TT sync flags
+ * batadv_tt_global_sync_flags() - update TT sync flags
* @tt_global: the TT global entry to update sync flags in
*
* Updates the sync flag bits in the tt_global flag attribute with a logical
@@ -1574,7 +1584,7 @@ batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
}
/**
- * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
* @tt_global: the TT global entry to add an orig entry in
* @orig_node: the originator to add an orig entry for
* @ttvn: translation table version number of this changeset
@@ -1624,7 +1634,7 @@ out:
}
/**
- * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the originator announcing the client
* @tt_addr: the mac address of the non-mesh client
@@ -1796,7 +1806,7 @@ out:
}
/**
- * batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
* @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: global translation table entry to be analyzed
*
@@ -1842,8 +1852,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
- * batadv_tt_global_print_entry - print all orig nodes who announce the address
- * for this global entry
+ * batadv_tt_global_print_entry() - print all orig nodes who announce the
+ * address for this global entry
* @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: global translation table entry to be printed
* @seq: debugfs table seq_file struct
@@ -1925,6 +1935,13 @@ print_list:
}
}
+/**
+ * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1967,7 +1984,7 @@ out:
#endif
/**
- * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2028,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2073,7 +2090,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
@@ -2112,7 +2129,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
}
/**
- * batadv_tt_global_dump - Dump TT global entries into a message
+ * batadv_tt_global_dump() - Dump TT global entries into a message
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
@@ -2180,7 +2197,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
/**
- * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
* @tt_global_entry: the global entry to remove the orig_entry from
* @orig_entry: the orig entry to remove and free
*
@@ -2222,7 +2239,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
}
/**
- * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
* @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: the global entry to remove the orig_node from
* @orig_node: the originator announcing the client
@@ -2301,7 +2318,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_global_del - remove a client from the global table
+ * batadv_tt_global_del() - remove a client from the global table
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: an originator serving this client
* @addr: the mac address of the client
@@ -2367,8 +2384,8 @@ out:
}
/**
- * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
- * given originator matching the provided vid
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ * the given originator matching the provided vid
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the originator owning the entries to remove
* @match_vid: the VLAN identifier to match. If negative all the entries will be
@@ -2539,7 +2556,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
}
/**
- * batadv_transtable_search - get the mesh destination for a given client
+ * batadv_transtable_search() - get the mesh destination for a given client
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of the source client
* @addr: mac address of the destination client
@@ -2599,7 +2616,7 @@ out:
}
/**
- * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
* to the given orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator for which the CRC should be computed
@@ -2694,7 +2711,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_crc - calculates the checksum of the local table
+ * batadv_tt_local_crc() - calculates the checksum of the local table
* @bat_priv: the bat priv with all the soft interface information
* @vid: VLAN identifier for which the CRC32 has to be computed
*
@@ -2751,7 +2768,7 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_req_node_release - free tt_req node entry
+ * batadv_tt_req_node_release() - free tt_req node entry
* @ref: kref pointer of the tt req_node entry
*/
static void batadv_tt_req_node_release(struct kref *ref)
@@ -2764,7 +2781,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
}
/**
- * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
* possibly release it
* @tt_req_node: tt_req_node to be free'd
*/
@@ -2826,7 +2843,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node this request is being issued for
*
@@ -2863,7 +2880,7 @@ unlock:
}
/**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify that given tt entry is a valid one
* @entry_ptr: to be checked local tt entry
* @data_ptr: not used but definition required to satisfy the callback prototype
*
@@ -2897,7 +2914,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
}
/**
- * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
* specified tt hash
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the tt entries
@@ -2948,7 +2965,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
* @orig_node: originator for which the CRCs have to be checked
* @tt_vlan: pointer to the first tvlv VLAN entry
* @num_vlan: number of tvlv VLAN entries
@@ -3005,7 +3022,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
}
/**
- * batadv_tt_local_update_crc - update all the local CRCs
+ * batadv_tt_local_update_crc() - update all the local CRCs
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
@@ -3021,7 +3038,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig_node for which the CRCs have to be updated
*/
@@ -3048,7 +3065,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
}
/**
- * batadv_send_tt_request - send a TT Request message to a given node
+ * batadv_send_tt_request() - send a TT Request message to a given node
* @bat_priv: the bat priv with all the soft interface information
* @dst_orig_node: the destination of the message
* @ttvn: the version number that the source of the message is looking for
@@ -3137,7 +3154,7 @@ out:
}
/**
- * batadv_send_other_tt_response - send reply to tt request concerning another
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
* node's translation table
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
@@ -3270,8 +3287,8 @@ out:
}
/**
- * batadv_send_my_tt_response - send reply to tt request concerning this node's
- * translation table
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ * node's translation table
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @req_src: mac address of tt request sender
@@ -3388,7 +3405,7 @@ out:
}
/**
- * batadv_send_tt_response - send reply to tt request
+ * batadv_send_tt_response() - send reply to tt request
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @req_src: mac address of tt request sender
@@ -3484,7 +3501,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
}
/**
- * batadv_is_my_client - check if a client is served by the local node
+ * batadv_is_my_client() - check if a client is served by the local node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to check
* @vid: VLAN identifier
@@ -3514,7 +3531,7 @@ out:
}
/**
- * batadv_handle_tt_response - process incoming tt reply
+ * batadv_handle_tt_response() - process incoming tt reply
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @resp_src: mac address of tt reply sender
@@ -3607,7 +3624,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
* @bat_priv: the bat priv with all the soft interface information
* @client: mac address of the roaming client
*
@@ -3662,7 +3679,7 @@ unlock:
}
/**
- * batadv_send_roam_adv - send a roaming advertisement message
+ * batadv_send_roam_adv() - send a roaming advertisement message
* @bat_priv: the bat priv with all the soft interface information
* @client: mac address of the roaming client
* @vid: VLAN identifier
@@ -3727,6 +3744,10 @@ static void batadv_tt_purge(struct work_struct *work)
msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
}
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
void batadv_tt_free(struct batadv_priv *bat_priv)
{
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
@@ -3744,7 +3765,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
* table and possibly count them in the TT size
* @bat_priv: the bat priv with all the soft interface information
* @flags: the flag to switch
@@ -3830,7 +3851,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
* which have been queued in the time since the last commit
* @bat_priv: the bat priv with all the soft interface information
*
@@ -3863,7 +3884,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
* have been queued in the time since the last commit
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -3874,6 +3895,15 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt.commit_lock);
}
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
unsigned short vid)
{
@@ -3909,7 +3939,7 @@ vlan_put:
}
/**
- * batadv_tt_update_orig - update global translation table with new tt
+ * batadv_tt_update_orig() - update global translation table with new tt
* information received via ogms
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig_node of the ogm
@@ -3994,7 +4024,7 @@ request_table:
}
/**
- * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to check
* @vid: VLAN identifier
@@ -4020,7 +4050,7 @@ out:
}
/**
- * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the local client to query
* @vid: VLAN identifier
@@ -4045,6 +4075,15 @@ out:
return ret;
}
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr,
@@ -4069,7 +4108,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
* maximum packet size that can be transported through the mesh
* @soft_iface: netdev struct of the mesh interface
*
@@ -4110,7 +4149,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
}
/**
- * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -4149,7 +4188,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
* container
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of tt tvlv sender
@@ -4231,7 +4270,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
}
/**
- * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ * container
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of tt tvlv sender
* @dst: mac address of tt tvlv recipient
@@ -4281,7 +4321,7 @@ out:
}
/**
- * batadv_tt_init - initialise the translation table internals
+ * batadv_tt_init() - initialise the translation table internals
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure.
@@ -4317,7 +4357,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_tt_global_is_isolated - check if a client is marked as isolated
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client
* @vid: the identifier of the VLAN where this client is connected
@@ -4343,7 +4383,7 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
}
/**
- * batadv_tt_cache_init - Initialize tt memory object cache
+ * batadv_tt_cache_init() - Initialize tt memory object cache
*
* Return: 0 on success or negative error number in case of failure.
*/
@@ -4412,7 +4452,7 @@ err_tt_tl_destroy:
}
/**
- * batadv_tt_cache_destroy - Destroy tt memory object cache
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
*/
void batadv_tt_cache_destroy(void)
{
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 411d586191da..8d9e3abec2c8 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 1d9e267caec9..5ffcb45ac6ff 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -19,7 +20,7 @@
#include <linux/byteorder/generic.h>
#include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -35,14 +36,14 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
#include "originator.h"
-#include "packet.h"
#include "send.h"
#include "tvlv.h"
/**
- * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the tvlv
*/
@@ -55,7 +56,7 @@ static void batadv_tvlv_handler_release(struct kref *ref)
}
/**
- * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
* possibly release it
* @tvlv_handler: the tvlv handler to free
*/
@@ -65,7 +66,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
}
/**
- * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
* based on the provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv handler type to look for
@@ -99,7 +100,7 @@ batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
}
/**
- * batadv_tvlv_container_release - release tvlv from lists and free
+ * batadv_tvlv_container_release() - release tvlv from lists and free
* @ref: kref pointer of the tvlv
*/
static void batadv_tvlv_container_release(struct kref *ref)
@@ -111,7 +112,7 @@ static void batadv_tvlv_container_release(struct kref *ref)
}
/**
- * batadv_tvlv_container_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
* possibly release it
* @tvlv: the tvlv container to free
*/
@@ -121,7 +122,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
}
/**
- * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
* list based on the provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type to look for
@@ -155,7 +156,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
}
/**
- * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
* list entries
* @bat_priv: the bat priv with all the soft interface information
*
@@ -180,8 +181,8 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
}
/**
- * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
- * list
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ * container list
* @bat_priv: the bat priv with all the soft interface information
* @tvlv: the to be removed tvlv container
*
@@ -204,7 +205,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
* provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type to unregister
@@ -222,7 +223,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_container_register - register tvlv type, version and content
+ * batadv_tvlv_container_register() - register tvlv type, version and content
* to be propagated with each (primary interface) OGM
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type
@@ -267,7 +268,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
* requested packet size
* @packet_buff: packet buffer
* @packet_buff_len: packet buffer size
@@ -300,7 +301,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
}
/**
- * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
* OGM packet buffer
* @bat_priv: the bat priv with all the soft interface information
* @packet_buff: ogm packet buffer
@@ -353,7 +354,7 @@ end:
}
/**
- * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
* @tvlv_handler: tvlv callback function handling the tvlv content
@@ -407,7 +408,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
* @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
@@ -474,7 +475,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
* handlers
* @bat_priv: the bat priv with all the soft interface information
* @batadv_ogm_packet: ogm packet containing the tvlv containers
@@ -501,7 +502,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
* type and version (both need to match) for ogm tvlv payload and/or unicast
* payload
* @bat_priv: the bat priv with all the soft interface information
@@ -556,7 +557,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
* provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv handler type to be unregistered
@@ -579,7 +580,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
}
/**
- * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
* specified host
* @bat_priv: the bat priv with all the soft interface information
* @src: source mac address of the unicast packet
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 4d01400ada30..a74df33f446d 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index a62795868794..bb1578410e0c 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
@@ -34,10 +35,9 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
-#include "packet.h"
-
struct seq_file;
#ifdef CONFIG_BATMAN_ADV_DAT
@@ -54,13 +54,15 @@ struct seq_file;
/**
* enum batadv_dhcp_recipient - dhcp destination
- * @BATADV_DHCP_NO: packet is not a dhcp message
- * @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server
- * @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client
*/
enum batadv_dhcp_recipient {
+ /** @BATADV_DHCP_NO: packet is not a dhcp message */
BATADV_DHCP_NO = 0,
+
+ /** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
BATADV_DHCP_TO_SERVER,
+
+ /** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
BATADV_DHCP_TO_CLIENT,
};
@@ -78,196 +80,274 @@ enum batadv_dhcp_recipient {
/**
* struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
*/
struct batadv_hard_iface_bat_iv {
+ /** @ogm_buff: buffer holding the OGM packet */
unsigned char *ogm_buff;
+
+ /** @ogm_buff_len: length of the OGM packet buffer */
int ogm_buff_len;
+
+ /** @ogm_seqno: OGM sequence number - used to identify each OGM */
atomic_t ogm_seqno;
};
/**
* enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
- * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
- * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
- * throughput data is available for this interface and that default values are
- * assumed.
*/
enum batadv_v_hard_iface_flags {
+ /**
+ * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+ * full-duplex
+ */
BATADV_FULL_DUPLEX = BIT(0),
+
+ /**
+ * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+ * no throughput data is available for this interface and that default
+ * values are assumed.
+ */
BATADV_WARNING_DEFAULT = BIT(1),
};
/**
* struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
- * @elp_interval: time interval between two ELP transmissions
- * @elp_seqno: current ELP sequence number
- * @elp_skb: base skb containing the ELP message to send
- * @elp_wq: workqueue used to schedule ELP transmissions
- * @throughput_override: throughput override to disable link auto-detection
- * @flags: interface specific flags
*/
struct batadv_hard_iface_bat_v {
+ /** @elp_interval: time interval between two ELP transmissions */
atomic_t elp_interval;
+
+ /** @elp_seqno: current ELP sequence number */
atomic_t elp_seqno;
+
+ /** @elp_skb: base skb containing the ELP message to send */
struct sk_buff *elp_skb;
+
+ /** @elp_wq: workqueue used to schedule ELP transmissions */
struct delayed_work elp_wq;
+
+ /**
+ * @throughput_override: throughput override to disable link
+ * auto-detection
+ */
atomic_t throughput_override;
+
+ /** @flags: interface specific flags */
u8 flags;
};
/**
* enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
* of a batadv_hard_iface
- * @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device
- * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi device
*/
enum batadv_hard_iface_wifi_flags {
+ /** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+ /** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+ /**
+ * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+ */
BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+ /**
+ * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+ * device
+ */
BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
};
/**
* struct batadv_hard_iface - network device known to batman-adv
- * @list: list node for batadv_hardif_list
- * @if_num: identificator of the interface
- * @if_status: status of the interface for batman-adv
- * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
- * @wifi_flags: flags whether this is (directly or indirectly) a wifi interface
- * @net_dev: pointer to the net_device
- * @hardif_obj: kobject of the per interface sysfs "mesh" directory
- * @refcount: number of contexts the object is used
- * @batman_adv_ptype: packet type describing packets that should be processed by
- * batman-adv for this interface
- * @soft_iface: the batman-adv interface which uses this network interface
- * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
- * @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @neigh_list: list of unique single hop neighbors via this interface
- * @neigh_list_lock: lock protecting neigh_list
*/
struct batadv_hard_iface {
+ /** @list: list node for batadv_hardif_list */
struct list_head list;
+
+ /** @if_num: identificator of the interface */
s16 if_num;
+
+ /** @if_status: status of the interface for batman-adv */
char if_status;
+
+ /**
+ * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+ */
u8 num_bcasts;
+
+ /**
+ * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+ * interface
+ */
u32 wifi_flags;
+
+ /** @net_dev: pointer to the net_device */
struct net_device *net_dev;
+
+ /** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
struct kobject *hardif_obj;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /**
+ * @batman_adv_ptype: packet type describing packets that should be
+ * processed by batman-adv for this interface
+ */
struct packet_type batman_adv_ptype;
+
+ /**
+ * @soft_iface: the batman-adv interface which uses this network
+ * interface
+ */
struct net_device *soft_iface;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
+
+ /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
struct batadv_hard_iface_bat_iv bat_iv;
+
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: per hard-interface B.A.T.M.A.N. V data */
struct batadv_hard_iface_bat_v bat_v;
#endif
+
+ /**
+ * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+ */
struct dentry *debug_dir;
+
+ /**
+ * @neigh_list: list of unique single hop neighbors via this interface
+ */
struct hlist_head neigh_list;
- /* neigh_list_lock protects: neigh_list */
+
+ /** @neigh_list_lock: lock protecting neigh_list */
spinlock_t neigh_list_lock;
};
/**
* struct batadv_orig_ifinfo - originator info per outgoing interface
- * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @router: router that should be used to reach this originator
- * @last_real_seqno: last and best known sequence number
- * @last_ttl: ttl of last received packet
- * @last_seqno_forwarded: seqno of the OGM which was forwarded last
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_orig_ifinfo {
+ /** @list: list node for &batadv_orig_node.ifinfo_list */
struct hlist_node list;
+
+ /** @if_outgoing: pointer to outgoing hard-interface */
struct batadv_hard_iface *if_outgoing;
- struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+
+ /** @router: router that should be used to reach this originator */
+ struct batadv_neigh_node __rcu *router;
+
+ /** @last_real_seqno: last and best known sequence number */
u32 last_real_seqno;
+
+ /** @last_ttl: ttl of last received packet */
u8 last_ttl;
+
+ /** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
u32 last_seqno_forwarded;
+
+ /** @batman_seqno_reset: time when the batman seqno window was reset */
unsigned long batman_seqno_reset;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_frag_table_entry - head in the fragment buffer table
- * @fragment_list: head of list with fragments
- * @lock: lock to protect the list of fragments
- * @timestamp: time (jiffie) of last received fragment
- * @seqno: sequence number of the fragments in the list
- * @size: accumulated size of packets in list
- * @total_size: expected size of the assembled packet
*/
struct batadv_frag_table_entry {
+ /** @fragment_list: head of list with fragments */
struct hlist_head fragment_list;
- spinlock_t lock; /* protects fragment_list */
+
+ /** @lock: lock to protect the list of fragments */
+ spinlock_t lock;
+
+ /** @timestamp: time (jiffie) of last received fragment */
unsigned long timestamp;
+
+ /** @seqno: sequence number of the fragments in the list */
u16 seqno;
+
+ /** @size: accumulated size of packets in list */
u16 size;
+
+ /** @total_size: expected size of the assembled packet */
u16 total_size;
};
/**
* struct batadv_frag_list_entry - entry in a list of fragments
- * @list: list node information
- * @skb: fragment
- * @no: fragment number in the set
*/
struct batadv_frag_list_entry {
+ /** @list: list node information */
struct hlist_node list;
+
+ /** @skb: fragment */
struct sk_buff *skb;
+
+ /** @no: fragment number in the set */
u8 no;
};
/**
* struct batadv_vlan_tt - VLAN specific TT attributes
- * @crc: CRC32 checksum of the entries belonging to this vlan
- * @num_entries: number of TT entries for this VLAN
*/
struct batadv_vlan_tt {
+ /** @crc: CRC32 checksum of the entries belonging to this vlan */
u32 crc;
+
+ /** @num_entries: number of TT entries for this VLAN */
atomic_t num_entries;
};
/**
* struct batadv_orig_node_vlan - VLAN specific data per orig_node
- * @vid: the VLAN identifier
- * @tt: VLAN specific TT attributes
- * @list: list node for orig_node::vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_orig_node_vlan {
+ /** @vid: the VLAN identifier */
unsigned short vid;
+
+ /** @tt: VLAN specific TT attributes */
struct batadv_vlan_tt tt;
+
+ /** @list: list node for &batadv_orig_node.vlan_list */
struct hlist_node list;
+
+ /**
+ * @refcount: number of context where this object is currently in use
+ */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard-interface) where each one counts
- * the number of our OGMs this orig_node rebroadcasted "back" to us (relative
- * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- * @bcast_own_sum: sum of bcast_own
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
*/
struct batadv_orig_bat_iv {
+ /**
+ * @bcast_own: set of bitfields (one per hard-interface) where each one
+ * counts the number of our OGMs this orig_node rebroadcasted "back" to
+ * us (relative to last_real_seqno). Every bitfield is
+ * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+ */
unsigned long *bcast_own;
+
+ /** @bcast_own_sum: sum of bcast_own */
u8 *bcast_own_sum;
- /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+
+ /**
+ * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
* neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
*/
spinlock_t ogm_cnt_lock;
@@ -275,130 +355,205 @@ struct batadv_orig_bat_iv {
/**
* struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
- * @orig: originator ethernet address
- * @ifinfo_list: list for routers per outgoing interface
- * @last_bonding_candidate: pointer to last ifinfo of last used router
- * @dat_addr: address of the orig node in the distributed hash
- * @last_seen: time when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
- * @mcast_flags: multicast flags announced by the orig node
- * @mcast_want_all_unsnoopables_node: a list node for the
- * mcast.want_all_unsnoopables list
- * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
- * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
- * @capabilities: announced capabilities of this originator
- * @capa_initialized: bitfield to remember whether a capability was initialized
- * @last_ttvn: last seen translation table version number
- * @tt_buff: last tt changeset this node received from the orig node
- * @tt_buff_len: length of the last tt changeset this node received from the
- * orig node
- * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_lock: prevents from updating the table while reading it. Table update is
- * made up by two operations (data structure update and metdata -CRC/TTVN-
- * recalculation) and they have to be executed atomically in order to avoid
- * another thread to read the table/metadata between those.
- * @bcast_bits: bitfield containing the info which payload broadcast originated
- * from this orig node this host already has seen (relative to
- * last_bcast_seqno)
- * @last_bcast_seqno: last broadcast sequence number received by this host
- * @neigh_list: list of potential next hop neighbor towards this orig node
- * @neigh_list_lock: lock protecting neigh_list and router
- * @hash_entry: hlist node for batadv_priv::orig_hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
- * @in_coding_list: list of nodes this orig can hear
- * @out_coding_list: list of nodes that can hear this orig
- * @in_coding_list_lock: protects in_coding_list
- * @out_coding_list_lock: protects out_coding_list
- * @fragments: array with heads for fragment chains
- * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
- * originator represented by this object
- * @vlan_list_lock: lock protecting vlan_list
- * @bat_iv: B.A.T.M.A.N. IV private structure
*/
struct batadv_orig_node {
+ /** @orig: originator ethernet address */
u8 orig[ETH_ALEN];
+
+ /** @ifinfo_list: list for routers per outgoing interface */
struct hlist_head ifinfo_list;
+
+ /**
+ * @last_bonding_candidate: pointer to last ifinfo of last used router
+ */
struct batadv_orig_ifinfo *last_bonding_candidate;
+
#ifdef CONFIG_BATMAN_ADV_DAT
+ /** @dat_addr: address of the orig node in the distributed hash */
batadv_dat_addr_t dat_addr;
#endif
+
+ /** @last_seen: time when last packet from this node was received */
unsigned long last_seen;
+
+ /**
+ * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ */
unsigned long bcast_seqno_reset;
+
#ifdef CONFIG_BATMAN_ADV_MCAST
- /* synchronizes mcast tvlv specific orig changes */
+ /**
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+ */
spinlock_t mcast_handler_lock;
+
+ /** @mcast_flags: multicast flags announced by the orig node */
u8 mcast_flags;
+
+ /**
+ * @mcast_want_all_unsnoopables_node: a list node for the
+ * mcast.want_all_unsnoopables list
+ */
struct hlist_node mcast_want_all_unsnoopables_node;
+
+ /**
+ * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+ * list
+ */
struct hlist_node mcast_want_all_ipv4_node;
+ /**
+ * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+ * list
+ */
struct hlist_node mcast_want_all_ipv6_node;
#endif
+
+ /** @capabilities: announced capabilities of this originator */
unsigned long capabilities;
+
+ /**
+ * @capa_initialized: bitfield to remember whether a capability was
+ * initialized
+ */
unsigned long capa_initialized;
+
+ /** @last_ttvn: last seen translation table version number */
atomic_t last_ttvn;
+
+ /** @tt_buff: last tt changeset this node received from the orig node */
unsigned char *tt_buff;
+
+ /**
+ * @tt_buff_len: length of the last tt changeset this node received
+ * from the orig node
+ */
s16 tt_buff_len;
- spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
- /* prevents from changing the table while reading it */
+
+ /** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+ spinlock_t tt_buff_lock;
+
+ /**
+ * @tt_lock: prevents from updating the table while reading it. Table
+ * update is made up by two operations (data structure update and
+ * metdata -CRC/TTVN-recalculation) and they have to be executed
+ * atomically in order to avoid another thread to read the
+ * table/metadata between those.
+ */
spinlock_t tt_lock;
+
+ /**
+ * @bcast_bits: bitfield containing the info which payload broadcast
+ * originated from this orig node this host already has seen (relative
+ * to last_bcast_seqno)
+ */
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /**
+ * @last_bcast_seqno: last broadcast sequence number received by this
+ * host
+ */
u32 last_bcast_seqno;
+
+ /**
+ * @neigh_list: list of potential next hop neighbor towards this orig
+ * node
+ */
struct hlist_head neigh_list;
- /* neigh_list_lock protects: neigh_list, ifinfo_list,
- * last_bonding_candidate and router
+
+ /**
+ * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+ * last_bonding_candidate and router
*/
spinlock_t neigh_list_lock;
+
+ /** @hash_entry: hlist node for &batadv_priv.orig_hash */
struct hlist_node hash_entry;
+
+ /** @bat_priv: pointer to soft_iface this orig node belongs to */
struct batadv_priv *bat_priv;
- /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
+
+ /** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
spinlock_t bcast_seqno_lock;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
+
#ifdef CONFIG_BATMAN_ADV_NC
+ /** @in_coding_list: list of nodes this orig can hear */
struct list_head in_coding_list;
+
+ /** @out_coding_list: list of nodes that can hear this orig */
struct list_head out_coding_list;
- spinlock_t in_coding_list_lock; /* Protects in_coding_list */
- spinlock_t out_coding_list_lock; /* Protects out_coding_list */
+
+ /** @in_coding_list_lock: protects in_coding_list */
+ spinlock_t in_coding_list_lock;
+
+ /** @out_coding_list_lock: protects out_coding_list */
+ spinlock_t out_coding_list_lock;
#endif
+
+ /** @fragments: array with heads for fragment chains */
struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+ /**
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+ * the originator represented by this object
+ */
struct hlist_head vlan_list;
- spinlock_t vlan_list_lock; /* protects vlan_list */
+
+ /** @vlan_list_lock: lock protecting vlan_list */
+ spinlock_t vlan_list_lock;
+
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
struct batadv_orig_bat_iv bat_iv;
};
/**
* enum batadv_orig_capabilities - orig node capabilities
- * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
- * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
- * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
- * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
- * (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
*/
enum batadv_orig_capabilities {
+ /**
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+ * enabled
+ */
BATADV_ORIG_CAPA_HAS_DAT,
+
+ /** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
BATADV_ORIG_CAPA_HAS_NC,
+
+ /** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
BATADV_ORIG_CAPA_HAS_TT,
+
+ /**
+ * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+ * (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+ */
BATADV_ORIG_CAPA_HAS_MCAST,
};
/**
* struct batadv_gw_node - structure for orig nodes announcing gw capabilities
- * @list: list node for batadv_priv_gw::list
- * @orig_node: pointer to corresponding orig node
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_gw_node {
+ /** @list: list node for &batadv_priv_gw.list */
struct hlist_node list;
+
+ /** @orig_node: pointer to corresponding orig node */
struct batadv_orig_node *orig_node;
+
+ /** @bandwidth_down: advertised uplink download bandwidth */
u32 bandwidth_down;
+
+ /** @bandwidth_up: advertised uplink upload bandwidth */
u32 bandwidth_up;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
@@ -407,118 +562,161 @@ DECLARE_EWMA(throughput, 10, 8)
/**
* struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
* information
- * @throughput: ewma link throughput towards this neighbor
- * @elp_interval: time interval between two ELP transmissions
- * @elp_latest_seqno: latest and best known ELP sequence number
- * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
- * @metric_work: work queue callback item for metric update
*/
struct batadv_hardif_neigh_node_bat_v {
+ /** @throughput: ewma link throughput towards this neighbor */
struct ewma_throughput throughput;
+
+ /** @elp_interval: time interval between two ELP transmissions */
u32 elp_interval;
+
+ /** @elp_latest_seqno: latest and best known ELP sequence number */
u32 elp_latest_seqno;
+
+ /**
+ * @last_unicast_tx: when the last unicast packet has been sent to this
+ * neighbor
+ */
unsigned long last_unicast_tx;
+
+ /** @metric_work: work queue callback item for metric update */
struct work_struct metric_work;
};
/**
* struct batadv_hardif_neigh_node - unique neighbor per hard-interface
- * @list: list node for batadv_hard_iface::neigh_list
- * @addr: the MAC address of the neighboring interface
- * @orig: the address of the originator this neighbor node belongs to
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @bat_v: B.A.T.M.A.N. V private data
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_hardif_neigh_node {
+ /** @list: list node for &batadv_hard_iface.neigh_list */
struct hlist_node list;
+
+ /** @addr: the MAC address of the neighboring interface */
u8 addr[ETH_ALEN];
+
+ /**
+ * @orig: the address of the originator this neighbor node belongs to
+ */
u8 orig[ETH_ALEN];
+
+ /** @if_incoming: pointer to incoming hard-interface */
struct batadv_hard_iface *if_incoming;
+
+ /** @last_seen: when last packet via this neighbor was received */
unsigned long last_seen;
+
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V private data */
struct batadv_hardif_neigh_node_bat_v bat_v;
#endif
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_neigh_node - structure for single hops neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @orig_node: pointer to corresponding orig_node
- * @addr: the MAC address of the neighboring interface
- * @ifinfo_list: list for routing metrics per outgoing interface
- * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @hardif_neigh: hardif_neigh of this neighbor
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_neigh_node {
+ /** @list: list node for &batadv_orig_node.neigh_list */
struct hlist_node list;
+
+ /** @orig_node: pointer to corresponding orig_node */
struct batadv_orig_node *orig_node;
+
+ /** @addr: the MAC address of the neighboring interface */
u8 addr[ETH_ALEN];
+
+ /** @ifinfo_list: list for routing metrics per outgoing interface */
struct hlist_head ifinfo_list;
- spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
+
+ /** @ifinfo_lock: lock protecting ifinfo_list and its members */
+ spinlock_t ifinfo_lock;
+
+ /** @if_incoming: pointer to incoming hard-interface */
struct batadv_hard_iface *if_incoming;
+
+ /** @last_seen: when last packet via this neighbor was received */
unsigned long last_seen;
+
+ /** @hardif_neigh: hardif_neigh of this neighbor */
struct batadv_hardif_neigh_node *hardif_neigh;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
* interface for B.A.T.M.A.N. IV
- * @tq_recv: ring buffer of received TQ values from this neigh node
- * @tq_index: ring buffer index
- * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @real_bits: bitfield containing the number of OGMs received from this neigh
- * node (relative to orig_node->last_real_seqno)
- * @real_packet_count: counted result of real_bits
*/
struct batadv_neigh_ifinfo_bat_iv {
+ /** @tq_recv: ring buffer of received TQ values from this neigh node */
u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+ /** @tq_index: ring buffer index */
u8 tq_index;
+
+ /**
+ * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+ */
u8 tq_avg;
+
+ /**
+ * @real_bits: bitfield containing the number of OGMs received from this
+ * neigh node (relative to orig_node->last_real_seqno)
+ */
DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /** @real_packet_count: counted result of real_bits */
u8 real_packet_count;
};
/**
* struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
* interface for B.A.T.M.A.N. V
- * @throughput: last throughput metric received from originator via this neigh
- * @last_seqno: last sequence number known for this neighbor
*/
struct batadv_neigh_ifinfo_bat_v {
+ /**
+ * @throughput: last throughput metric received from originator via this
+ * neigh
+ */
u32 throughput;
+
+ /** @last_seqno: last sequence number known for this neighbor */
u32 last_seqno;
};
/**
* struct batadv_neigh_ifinfo - neighbor information per outgoing interface
- * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @bat_iv: B.A.T.M.A.N. IV private structure
- * @bat_v: B.A.T.M.A.N. V private data
- * @last_ttl: last received ttl from this neigh node
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_neigh_ifinfo {
+ /** @list: list node for &batadv_neigh_node.ifinfo_list */
struct hlist_node list;
+
+ /** @if_outgoing: pointer to outgoing hard-interface */
struct batadv_hard_iface *if_outgoing;
+
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V private data */
struct batadv_neigh_ifinfo_bat_v bat_v;
#endif
+
+ /** @last_ttl: last received ttl from this neigh node */
u8 last_ttl;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
struct rcu_head rcu;
};
@@ -526,148 +724,278 @@ struct batadv_neigh_ifinfo {
/**
* struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
- * @orig: mac address of orig node orginating the broadcast
- * @crc: crc32 checksum of broadcast payload
- * @entrytime: time when the broadcast packet was received
*/
struct batadv_bcast_duplist_entry {
+ /** @orig: mac address of orig node orginating the broadcast */
u8 orig[ETH_ALEN];
+
+ /** @crc: crc32 checksum of broadcast payload */
__be32 crc;
+
+ /** @entrytime: time when the broadcast packet was received */
unsigned long entrytime;
};
#endif
/**
* enum batadv_counters - indices for traffic counters
- * @BATADV_CNT_TX: transmitted payload traffic packet counter
- * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
- * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
- * @BATADV_CNT_RX: received payload traffic packet counter
- * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
- * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
- * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
- * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
- * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
- * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
- * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
- * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
- * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
- * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
- * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
- * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
- * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
- * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
- * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
- * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
- * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
- * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
- * counter
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
- * counter
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
- * mode.
- * @BATADV_CNT_NUM: number of traffic counters
*/
enum batadv_counters {
+ /** @BATADV_CNT_TX: transmitted payload traffic packet counter */
BATADV_CNT_TX,
+
+ /** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
BATADV_CNT_TX_BYTES,
+
+ /**
+ * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+ * counter
+ */
BATADV_CNT_TX_DROPPED,
+
+ /** @BATADV_CNT_RX: received payload traffic packet counter */
BATADV_CNT_RX,
+
+ /** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
BATADV_CNT_RX_BYTES,
+
+ /** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
BATADV_CNT_FORWARD,
+
+ /**
+ * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+ */
BATADV_CNT_FORWARD_BYTES,
+
+ /**
+ * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+ * counter
+ */
BATADV_CNT_MGMT_TX,
+
+ /**
+ * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+ * counter
+ */
BATADV_CNT_MGMT_TX_BYTES,
+
+ /**
+ * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+ */
BATADV_CNT_MGMT_RX,
+
+ /**
+ * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+ * counter
+ */
BATADV_CNT_MGMT_RX_BYTES,
+
+ /** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
BATADV_CNT_FRAG_TX,
+
+ /**
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ */
BATADV_CNT_FRAG_TX_BYTES,
+
+ /** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
BATADV_CNT_FRAG_RX,
+
+ /**
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ */
BATADV_CNT_FRAG_RX_BYTES,
+
+ /** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
BATADV_CNT_FRAG_FWD,
+
+ /**
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+ */
BATADV_CNT_FRAG_FWD_BYTES,
+
+ /**
+ * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+ */
BATADV_CNT_TT_REQUEST_TX,
+
+ /** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
BATADV_CNT_TT_REQUEST_RX,
+
+ /**
+ * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+ * counter
+ */
BATADV_CNT_TT_RESPONSE_TX,
+
+ /**
+ * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+ */
BATADV_CNT_TT_RESPONSE_RX,
+
+ /**
+ * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+ * counter
+ */
BATADV_CNT_TT_ROAM_ADV_TX,
+
+ /**
+ * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+ */
BATADV_CNT_TT_ROAM_ADV_RX,
+
#ifdef CONFIG_BATMAN_ADV_DAT
+ /**
+ * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+ */
BATADV_CNT_DAT_GET_TX,
+
+ /** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
BATADV_CNT_DAT_GET_RX,
+
+ /**
+ * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+ */
BATADV_CNT_DAT_PUT_TX,
+
+ /** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
BATADV_CNT_DAT_PUT_RX,
+
+ /**
+ * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+ * packet counter
+ */
BATADV_CNT_DAT_CACHED_REPLY_TX,
#endif
+
#ifdef CONFIG_BATMAN_ADV_NC
+ /**
+ * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+ */
BATADV_CNT_NC_CODE,
+
+ /**
+ * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+ * counter
+ */
BATADV_CNT_NC_CODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+ * counter
+ */
BATADV_CNT_NC_RECODE,
+
+ /**
+ * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+ * counter
+ */
BATADV_CNT_NC_RECODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+ * decoding
+ */
BATADV_CNT_NC_BUFFER,
+
+ /**
+ * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+ */
BATADV_CNT_NC_DECODE,
+
+ /**
+ * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+ * counter
+ */
BATADV_CNT_NC_DECODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+ * packet counter
+ */
BATADV_CNT_NC_DECODE_FAILED,
+
+ /**
+ * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+ * promisc mode.
+ */
BATADV_CNT_NC_SNIFFED,
#endif
+
+ /** @BATADV_CNT_NUM: number of traffic counters */
BATADV_CNT_NUM,
};
/**
* struct batadv_priv_tt - per mesh interface translation table data
- * @vn: translation table version number
- * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
- * @local_changes: changes registered in an originator interval
- * @changes_list: tracks tt local changes within an originator interval
- * @local_hash: local translation table hash table
- * @global_hash: global translation table hash table
- * @req_list: list of pending & unanswered tt_requests
- * @roam_list: list of the last roaming events of each client limiting the
- * number of roaming events to avoid route flapping
- * @changes_list_lock: lock protecting changes_list
- * @req_list_lock: lock protecting req_list
- * @roam_list_lock: lock protecting roam_list
- * @last_changeset: last tt changeset this host has generated
- * @last_changeset_len: length of last tt changeset this host has generated
- * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
- * @commit_lock: prevents from executing a local TT commit while reading the
- * local table. The local TT commit is made up by two operations (data
- * structure update and metdata -CRC/TTVN- recalculation) and they have to be
- * executed atomically in order to avoid another thread to read the
- * table/metadata between those.
- * @work: work queue callback item for translation table purging
*/
struct batadv_priv_tt {
+ /** @vn: translation table version number */
atomic_t vn;
+
+ /**
+ * @ogm_append_cnt: counter of number of OGMs containing the local tt
+ * diff
+ */
atomic_t ogm_append_cnt;
+
+ /** @local_changes: changes registered in an originator interval */
atomic_t local_changes;
+
+ /**
+ * @changes_list: tracks tt local changes within an originator interval
+ */
struct list_head changes_list;
+
+ /** @local_hash: local translation table hash table */
struct batadv_hashtable *local_hash;
+
+ /** @global_hash: global translation table hash table */
struct batadv_hashtable *global_hash;
+
+ /** @req_list: list of pending & unanswered tt_requests */
struct hlist_head req_list;
+
+ /**
+ * @roam_list: list of the last roaming events of each client limiting
+ * the number of roaming events to avoid route flapping
+ */
struct list_head roam_list;
- spinlock_t changes_list_lock; /* protects changes */
- spinlock_t req_list_lock; /* protects req_list */
- spinlock_t roam_list_lock; /* protects roam_list */
+
+ /** @changes_list_lock: lock protecting changes_list */
+ spinlock_t changes_list_lock;
+
+ /** @req_list_lock: lock protecting req_list */
+ spinlock_t req_list_lock;
+
+ /** @roam_list_lock: lock protecting roam_list */
+ spinlock_t roam_list_lock;
+
+ /** @last_changeset: last tt changeset this host has generated */
unsigned char *last_changeset;
+
+ /**
+ * @last_changeset_len: length of last tt changeset this host has
+ * generated
+ */
s16 last_changeset_len;
- /* protects last_changeset & last_changeset_len */
+
+ /**
+ * @last_changeset_lock: lock protecting last_changeset &
+ * last_changeset_len
+ */
spinlock_t last_changeset_lock;
- /* prevents from executing a commit while reading the table */
+
+ /**
+ * @commit_lock: prevents from executing a local TT commit while reading
+ * the local table. The local TT commit is made up by two operations
+ * (data structure update and metdata -CRC/TTVN- recalculation) and
+ * they have to be executed atomically in order to avoid another thread
+ * to read the table/metadata between those.
+ */
spinlock_t commit_lock;
+
+ /** @work: work queue callback item for translation table purging */
struct delayed_work work;
};
@@ -675,31 +1003,57 @@ struct batadv_priv_tt {
/**
* struct batadv_priv_bla - per mesh interface bridge loope avoidance data
- * @num_requests: number of bla requests in flight
- * @claim_hash: hash table containing mesh nodes this host has claimed
- * @backbone_hash: hash table containing all detected backbone gateways
- * @loopdetect_addr: MAC address used for own loopdetection frames
- * @loopdetect_lasttime: time when the loopdetection frames were sent
- * @loopdetect_next: how many periods to wait for the next loopdetect process
- * @bcast_duplist: recently received broadcast packets array (for broadcast
- * duplicate suppression)
- * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
- * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
- * @claim_dest: local claim data (e.g. claim group)
- * @work: work queue callback item for cleanups & bla announcements
*/
struct batadv_priv_bla {
+ /** @num_requests: number of bla requests in flight */
atomic_t num_requests;
+
+ /**
+ * @claim_hash: hash table containing mesh nodes this host has claimed
+ */
struct batadv_hashtable *claim_hash;
+
+ /**
+ * @backbone_hash: hash table containing all detected backbone gateways
+ */
struct batadv_hashtable *backbone_hash;
+
+ /** @loopdetect_addr: MAC address used for own loopdetection frames */
u8 loopdetect_addr[ETH_ALEN];
+
+ /**
+ * @loopdetect_lasttime: time when the loopdetection frames were sent
+ */
unsigned long loopdetect_lasttime;
+
+ /**
+ * @loopdetect_next: how many periods to wait for the next loopdetect
+ * process
+ */
atomic_t loopdetect_next;
+
+ /**
+ * @bcast_duplist: recently received broadcast packets array (for
+ * broadcast duplicate suppression)
+ */
struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+ /**
+ * @bcast_duplist_curr: index of last broadcast packet added to
+ * bcast_duplist
+ */
int bcast_duplist_curr;
- /* protects bcast_duplist & bcast_duplist_curr */
+
+ /**
+ * @bcast_duplist_lock: lock protecting bcast_duplist &
+ * bcast_duplist_curr
+ */
spinlock_t bcast_duplist_lock;
+
+ /** @claim_dest: local claim data (e.g. claim group) */
struct batadv_bla_claim_dst claim_dest;
+
+ /** @work: work queue callback item for cleanups & bla announcements */
struct delayed_work work;
};
#endif
@@ -708,68 +1062,94 @@ struct batadv_priv_bla {
/**
* struct batadv_priv_debug_log - debug logging data
- * @log_buff: buffer holding the logs (ring bufer)
- * @log_start: index of next character to read
- * @log_end: index of next character to write
- * @lock: lock protecting log_buff, log_start & log_end
- * @queue_wait: log reader's wait queue
*/
struct batadv_priv_debug_log {
+ /** @log_buff: buffer holding the logs (ring bufer) */
char log_buff[BATADV_LOG_BUF_LEN];
+
+ /** @log_start: index of next character to read */
unsigned long log_start;
+
+ /** @log_end: index of next character to write */
unsigned long log_end;
- spinlock_t lock; /* protects log_buff, log_start and log_end */
+
+ /** @lock: lock protecting log_buff, log_start & log_end */
+ spinlock_t lock;
+
+ /** @queue_wait: log reader's wait queue */
wait_queue_head_t queue_wait;
};
#endif
/**
* struct batadv_priv_gw - per mesh interface gateway data
- * @gateway_list: list of available gateway nodes
- * @list_lock: lock protecting gateway_list & curr_gw
- * @curr_gw: pointer to currently selected gateway node
- * @mode: gateway operation: off, client or server (see batadv_gw_modes)
- * @sel_class: gateway selection class (applies if gw_mode client)
- * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
- * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
- * @reselect: bool indicating a gateway re-selection is in progress
*/
struct batadv_priv_gw {
+ /** @gateway_list: list of available gateway nodes */
struct hlist_head gateway_list;
- spinlock_t list_lock; /* protects gateway_list & curr_gw */
- struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
+
+ /** @list_lock: lock protecting gateway_list & curr_gw */
+ spinlock_t list_lock;
+
+ /** @curr_gw: pointer to currently selected gateway node */
+ struct batadv_gw_node __rcu *curr_gw;
+
+ /**
+ * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+ */
atomic_t mode;
+
+ /** @sel_class: gateway selection class (applies if gw_mode client) */
atomic_t sel_class;
+
+ /**
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+ * server)
+ */
atomic_t bandwidth_down;
+
+ /**
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+ */
atomic_t bandwidth_up;
+
+ /** @reselect: bool indicating a gateway re-selection is in progress */
atomic_t reselect;
};
/**
* struct batadv_priv_tvlv - per mesh interface tvlv data
- * @container_list: list of registered tvlv containers to be sent with each OGM
- * @handler_list: list of the various tvlv content handlers
- * @container_list_lock: protects tvlv container list access
- * @handler_list_lock: protects handler list access
*/
struct batadv_priv_tvlv {
+ /**
+ * @container_list: list of registered tvlv containers to be sent with
+ * each OGM
+ */
struct hlist_head container_list;
+
+ /** @handler_list: list of the various tvlv content handlers */
struct hlist_head handler_list;
- spinlock_t container_list_lock; /* protects container_list */
- spinlock_t handler_list_lock; /* protects handler_list */
+
+ /** @container_list_lock: protects tvlv container list access */
+ spinlock_t container_list_lock;
+
+ /** @handler_list_lock: protects handler list access */
+ spinlock_t handler_list_lock;
};
#ifdef CONFIG_BATMAN_ADV_DAT
/**
* struct batadv_priv_dat - per mesh interface DAT private data
- * @addr: node DAT address
- * @hash: hashtable representing the local ARP cache
- * @work: work queue callback item for cache purging
*/
struct batadv_priv_dat {
+ /** @addr: node DAT address */
batadv_dat_addr_t addr;
+
+ /** @hash: hashtable representing the local ARP cache */
struct batadv_hashtable *hash;
+
+ /** @work: work queue callback item for cache purging */
struct delayed_work work;
};
#endif
@@ -777,375 +1157,582 @@ struct batadv_priv_dat {
#ifdef CONFIG_BATMAN_ADV_MCAST
/**
* struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
- * @exists: whether a querier exists in the mesh
- * @shadowing: if a querier exists, whether it is potentially shadowing
- * multicast listeners (i.e. querier is behind our own bridge segment)
*/
struct batadv_mcast_querier_state {
+ /** @exists: whether a querier exists in the mesh */
bool exists;
+
+ /**
+ * @shadowing: if a querier exists, whether it is potentially shadowing
+ * multicast listeners (i.e. querier is behind our own bridge segment)
+ */
bool shadowing;
};
/**
* struct batadv_priv_mcast - per mesh interface mcast data
- * @mla_list: list of multicast addresses we are currently announcing via TT
- * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
- * multicast traffic
- * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
- * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
- * @querier_ipv4: the current state of an IGMP querier in the mesh
- * @querier_ipv6: the current state of an MLD querier in the mesh
- * @flags: the flags we have last sent in our mcast tvlv
- * @enabled: whether the multicast tvlv is currently enabled
- * @bridged: whether the soft interface has a bridge on top
- * @num_disabled: number of nodes that have no mcast tvlv
- * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
- * @num_want_all_ipv4: counter for items in want_all_ipv4_list
- * @num_want_all_ipv6: counter for items in want_all_ipv6_list
- * @want_lists_lock: lock for protecting modifications to mcast want lists
- * (traversals are rcu-locked)
- * @work: work queue callback item for multicast TT and TVLV updates
*/
struct batadv_priv_mcast {
+ /**
+ * @mla_list: list of multicast addresses we are currently announcing
+ * via TT
+ */
struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+ /**
+ * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+ * unsnoopable multicast traffic
+ */
struct hlist_head want_all_unsnoopables_list;
+
+ /**
+ * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+ * traffic
+ */
struct hlist_head want_all_ipv4_list;
+
+ /**
+ * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+ * traffic
+ */
struct hlist_head want_all_ipv6_list;
+
+ /** @querier_ipv4: the current state of an IGMP querier in the mesh */
struct batadv_mcast_querier_state querier_ipv4;
+
+ /** @querier_ipv6: the current state of an MLD querier in the mesh */
struct batadv_mcast_querier_state querier_ipv6;
+
+ /** @flags: the flags we have last sent in our mcast tvlv */
u8 flags;
+
+ /** @enabled: whether the multicast tvlv is currently enabled */
bool enabled;
+
+ /** @bridged: whether the soft interface has a bridge on top */
bool bridged;
+
+ /** @num_disabled: number of nodes that have no mcast tvlv */
atomic_t num_disabled;
+
+ /**
+ * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+ * traffic
+ */
atomic_t num_want_all_unsnoopables;
+
+ /** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
atomic_t num_want_all_ipv4;
+
+ /** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
atomic_t num_want_all_ipv6;
- /* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+
+ /**
+ * @want_lists_lock: lock for protecting modifications to mcasts
+ * want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+ */
spinlock_t want_lists_lock;
+
+ /** @work: work queue callback item for multicast TT and TVLV updates */
struct delayed_work work;
};
#endif
/**
* struct batadv_priv_nc - per mesh interface network coding private data
- * @work: work queue callback item for cleanup
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- * @max_fwd_delay: maximum packet forward delay to allow coding of packets
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
- * @coding_hash: Hash table used to buffer skbs while waiting for another
- * incoming skb to code it with. Skbs are added to the buffer just before being
- * forwarded in routing.c
- * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
- * a received coded skb. The buffer is used for 1) skbs arriving on the
- * soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
- * forwarded by batman-adv.
*/
struct batadv_priv_nc {
+ /** @work: work queue callback item for cleanup */
struct delayed_work work;
+
+ /**
+ * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+ */
struct dentry *debug_dir;
+
+ /**
+ * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+ */
u8 min_tq;
+
+ /**
+ * @max_fwd_delay: maximum packet forward delay to allow coding of
+ * packets
+ */
u32 max_fwd_delay;
+
+ /**
+ * @max_buffer_time: buffer time for sniffed packets used to decoding
+ */
u32 max_buffer_time;
+
+ /**
+ * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+ */
unsigned long timestamp_fwd_flush;
+
+ /**
+ * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+ * purge
+ */
unsigned long timestamp_sniffed_purge;
+
+ /**
+ * @coding_hash: Hash table used to buffer skbs while waiting for
+ * another incoming skb to code it with. Skbs are added to the buffer
+ * just before being forwarded in routing.c
+ */
struct batadv_hashtable *coding_hash;
+
+ /**
+ * @decoding_hash: Hash table used to buffer skbs that might be needed
+ * to decode a received coded skb. The buffer is used for 1) skbs
+ * arriving on the soft-interface; 2) skbs overheard on the
+ * hard-interface; and 3) skbs forwarded by batman-adv.
+ */
struct batadv_hashtable *decoding_hash;
};
/**
* struct batadv_tp_unacked - unacked packet meta-information
- * @seqno: seqno of the unacked packet
- * @len: length of the packet
- * @list: list node for batadv_tp_vars::unacked_list
*
* This struct is supposed to represent a buffer unacked packet. However, since
* the purpose of the TP meter is to count the traffic only, there is no need to
* store the entire sk_buff, the starting offset and the length are enough
*/
struct batadv_tp_unacked {
+ /** @seqno: seqno of the unacked packet */
u32 seqno;
+
+ /** @len: length of the packet */
u16 len;
+
+ /** @list: list node for &batadv_tp_vars.unacked_list */
struct list_head list;
};
/**
* enum batadv_tp_meter_role - Modus in tp meter session
- * @BATADV_TP_RECEIVER: Initialized as receiver
- * @BATADV_TP_SENDER: Initialized as sender
*/
enum batadv_tp_meter_role {
+ /** @BATADV_TP_RECEIVER: Initialized as receiver */
BATADV_TP_RECEIVER,
+
+ /** @BATADV_TP_SENDER: Initialized as sender */
BATADV_TP_SENDER
};
/**
* struct batadv_tp_vars - tp meter private variables per session
- * @list: list node for bat_priv::tp_list
- * @timer: timer for ack (receiver) and retry (sender)
- * @bat_priv: pointer to the mesh object
- * @start_time: start time in jiffies
- * @other_end: mac address of remote
- * @role: receiver/sender modi
- * @sending: sending binary semaphore: 1 if sending, 0 is not
- * @reason: reason for a stopped session
- * @finish_work: work item for the finishing procedure
- * @test_length: test length in milliseconds
- * @session: TP session identifier
- * @icmp_uid: local ICMP "socket" index
- * @dec_cwnd: decimal part of the cwnd used during linear growth
- * @cwnd: current size of the congestion window
- * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
- * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
- * connection switches to the Congestion Avoidance state
- * @last_acked: last acked byte
- * @last_sent: last sent byte, not yet acked
- * @tot_sent: amount of data sent/ACKed so far
- * @dup_acks: duplicate ACKs counter
- * @fast_recovery: true if in Fast Recovery mode
- * @recover: last sent seqno when entering Fast Recovery
- * @rto: sender timeout
- * @srtt: smoothed RTT scaled by 2^3
- * @rttvar: RTT variation scaled by 2^2
- * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
- * @prerandom_offset: offset inside the prerandom buffer
- * @prerandom_lock: spinlock protecting access to prerandom_offset
- * @last_recv: last in-order received packet
- * @unacked_list: list of unacked packets (meta-info only)
- * @unacked_lock: protect unacked_list
- * @last_recv_time: time time (jiffies) a msg was received
- * @refcount: number of context where the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_tp_vars {
+ /** @list: list node for &bat_priv.tp_list */
struct hlist_node list;
+
+ /** @timer: timer for ack (receiver) and retry (sender) */
struct timer_list timer;
+
+ /** @bat_priv: pointer to the mesh object */
struct batadv_priv *bat_priv;
+
+ /** @start_time: start time in jiffies */
unsigned long start_time;
+
+ /** @other_end: mac address of remote */
u8 other_end[ETH_ALEN];
+
+ /** @role: receiver/sender modi */
enum batadv_tp_meter_role role;
+
+ /** @sending: sending binary semaphore: 1 if sending, 0 is not */
atomic_t sending;
+
+ /** @reason: reason for a stopped session */
enum batadv_tp_meter_reason reason;
+
+ /** @finish_work: work item for the finishing procedure */
struct delayed_work finish_work;
+
+ /** @test_length: test length in milliseconds */
u32 test_length;
+
+ /** @session: TP session identifier */
u8 session[2];
+
+ /** @icmp_uid: local ICMP "socket" index */
u8 icmp_uid;
/* sender variables */
+
+ /** @dec_cwnd: decimal part of the cwnd used during linear growth */
u16 dec_cwnd;
+
+ /** @cwnd: current size of the congestion window */
u32 cwnd;
- spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
+
+ /** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+ spinlock_t cwnd_lock;
+
+ /**
+ * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+ * connection switches to the Congestion Avoidance state
+ */
u32 ss_threshold;
+
+ /** @last_acked: last acked byte */
atomic_t last_acked;
+
+ /** @last_sent: last sent byte, not yet acked */
u32 last_sent;
+
+ /** @tot_sent: amount of data sent/ACKed so far */
atomic64_t tot_sent;
+
+ /** @dup_acks: duplicate ACKs counter */
atomic_t dup_acks;
+
+ /** @fast_recovery: true if in Fast Recovery mode */
bool fast_recovery;
+
+ /** @recover: last sent seqno when entering Fast Recovery */
u32 recover;
+
+ /** @rto: sender timeout */
u32 rto;
+
+ /** @srtt: smoothed RTT scaled by 2^3 */
u32 srtt;
+
+ /** @rttvar: RTT variation scaled by 2^2 */
u32 rttvar;
+
+ /**
+ * @more_bytes: waiting queue anchor when waiting for more ack/retry
+ * timeout
+ */
wait_queue_head_t more_bytes;
+
+ /** @prerandom_offset: offset inside the prerandom buffer */
u32 prerandom_offset;
- spinlock_t prerandom_lock; /* Protects prerandom_offset */
+
+ /** @prerandom_lock: spinlock protecting access to prerandom_offset */
+ spinlock_t prerandom_lock;
/* receiver variables */
+
+ /** @last_recv: last in-order received packet */
u32 last_recv;
+
+ /** @unacked_list: list of unacked packets (meta-info only) */
struct list_head unacked_list;
- spinlock_t unacked_lock; /* Protects unacked_list */
+
+ /** @unacked_lock: protect unacked_list */
+ spinlock_t unacked_lock;
+
+ /** @last_recv_time: time time (jiffies) a msg was received */
unsigned long last_recv_time;
+
+ /** @refcount: number of context where the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_softif_vlan - per VLAN attributes set
- * @bat_priv: pointer to the mesh object
- * @vid: VLAN identifier
- * @kobj: kobject for sysfs vlan subdirectory
- * @ap_isolation: AP isolation state
- * @tt: TT private attributes (VLAN specific)
- * @list: list node for bat_priv::softif_vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
*/
struct batadv_softif_vlan {
+ /** @bat_priv: pointer to the mesh object */
struct batadv_priv *bat_priv;
+
+ /** @vid: VLAN identifier */
unsigned short vid;
+
+ /** @kobj: kobject for sysfs vlan subdirectory */
struct kobject *kobj;
+
+ /** @ap_isolation: AP isolation state */
atomic_t ap_isolation; /* boolean */
+
+ /** @tt: TT private attributes (VLAN specific) */
struct batadv_vlan_tt tt;
+
+ /** @list: list node for &bat_priv.softif_vlan_list */
struct hlist_node list;
+
+ /**
+ * @refcount: number of context where this object is currently in use
+ */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
- * @ogm_wq: workqueue used to schedule OGM transmissions
*/
struct batadv_priv_bat_v {
+ /** @ogm_buff: buffer holding the OGM packet */
unsigned char *ogm_buff;
+
+ /** @ogm_buff_len: length of the OGM packet buffer */
int ogm_buff_len;
+
+ /** @ogm_seqno: OGM sequence number - used to identify each OGM */
atomic_t ogm_seqno;
+
+ /** @ogm_wq: workqueue used to schedule OGM transmissions */
struct delayed_work ogm_wq;
};
/**
* struct batadv_priv - per mesh interface data
- * @mesh_state: current status of the mesh (inactive/active/deactivating)
- * @soft_iface: net device which holds this struct as private data
- * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
- * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
- * @bonding: bool indicating whether traffic bonding is enabled
- * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @packet_size_max: max packet size that can be transmitted via
- * multiple fragmented skbs or a single frame if fragmentation is disabled
- * @frag_seqno: incremental counter to identify chains of egress fragments
- * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
- * enabled
- * @distributed_arp_table: bool indicating whether distributed ARP table is
- * enabled
- * @multicast_mode: Enable or disable multicast optimizations on this node's
- * sender/originating side
- * @orig_interval: OGM broadcast interval in milliseconds
- * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
- * @log_level: configured log level (see batadv_dbg_level)
- * @isolation_mark: the skb->mark value used to match packets for AP isolation
- * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
- * for the isolation mark
- * @bcast_seqno: last sent broadcast packet sequence number
- * @bcast_queue_left: number of remaining buffered broadcast packet slots
- * @batman_queue_left: number of remaining OGM packet slots
- * @num_ifaces: number of interfaces assigned to this mesh interface
- * @mesh_obj: kobject for sysfs mesh subdirectory
- * @debug_dir: dentry for debugfs batman-adv subdirectory
- * @forw_bat_list: list of aggregated OGMs that will be forwarded
- * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
- * @tp_list: list of tp sessions
- * @tp_num: number of currently active tp sessions
- * @orig_hash: hash table containing mesh participants (orig nodes)
- * @forw_bat_list_lock: lock protecting forw_bat_list
- * @forw_bcast_list_lock: lock protecting forw_bcast_list
- * @tp_list_lock: spinlock protecting @tp_list
- * @orig_work: work queue callback item for orig node purging
- * @primary_if: one of the hard-interfaces assigned to this mesh interface
- * becomes the primary interface
- * @algo_ops: routing algorithm used by this mesh interface
- * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
- * of the mesh interface represented by this object
- * @softif_vlan_list_lock: lock protecting softif_vlan_list
- * @bla: bridge loope avoidance data
- * @debug_log: holding debug logging relevant data
- * @gw: gateway data
- * @tt: translation table data
- * @tvlv: type-version-length-value data
- * @dat: distributed arp table data
- * @mcast: multicast data
- * @network_coding: bool indicating whether network coding is enabled
- * @nc: network coding data
- * @bat_v: B.A.T.M.A.N. V per soft-interface private data
*/
struct batadv_priv {
+ /**
+ * @mesh_state: current status of the mesh
+ * (inactive/active/deactivating)
+ */
atomic_t mesh_state;
+
+ /** @soft_iface: net device which holds this struct as private data */
struct net_device *soft_iface;
+
+ /**
+ * @bat_counters: mesh internal traffic statistic counters (see
+ * batadv_counters)
+ */
u64 __percpu *bat_counters; /* Per cpu counters */
+
+ /**
+ * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+ */
atomic_t aggregated_ogms;
+
+ /** @bonding: bool indicating whether traffic bonding is enabled */
atomic_t bonding;
+
+ /**
+ * @fragmentation: bool indicating whether traffic fragmentation is
+ * enabled
+ */
atomic_t fragmentation;
+
+ /**
+ * @packet_size_max: max packet size that can be transmitted via
+ * multiple fragmented skbs or a single frame if fragmentation is
+ * disabled
+ */
atomic_t packet_size_max;
+
+ /**
+ * @frag_seqno: incremental counter to identify chains of egress
+ * fragments
+ */
atomic_t frag_seqno;
+
#ifdef CONFIG_BATMAN_ADV_BLA
+ /**
+ * @bridge_loop_avoidance: bool indicating whether bridge loop
+ * avoidance is enabled
+ */
atomic_t bridge_loop_avoidance;
#endif
+
#ifdef CONFIG_BATMAN_ADV_DAT
+ /**
+ * @distributed_arp_table: bool indicating whether distributed ARP table
+ * is enabled
+ */
atomic_t distributed_arp_table;
#endif
+
#ifdef CONFIG_BATMAN_ADV_MCAST
+ /**
+ * @multicast_mode: Enable or disable multicast optimizations on this
+ * node's sender/originating side
+ */
atomic_t multicast_mode;
#endif
+
+ /** @orig_interval: OGM broadcast interval in milliseconds */
atomic_t orig_interval;
+
+ /**
+ * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+ * every hop
+ */
atomic_t hop_penalty;
+
#ifdef CONFIG_BATMAN_ADV_DEBUG
+ /** @log_level: configured log level (see batadv_dbg_level) */
atomic_t log_level;
#endif
+
+ /**
+ * @isolation_mark: the skb->mark value used to match packets for AP
+ * isolation
+ */
u32 isolation_mark;
+
+ /**
+ * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+ * used for the isolation mark
+ */
u32 isolation_mark_mask;
+
+ /** @bcast_seqno: last sent broadcast packet sequence number */
atomic_t bcast_seqno;
+
+ /**
+ * @bcast_queue_left: number of remaining buffered broadcast packet
+ * slots
+ */
atomic_t bcast_queue_left;
+
+ /** @batman_queue_left: number of remaining OGM packet slots */
atomic_t batman_queue_left;
+
+ /** @num_ifaces: number of interfaces assigned to this mesh interface */
char num_ifaces;
+
+ /** @mesh_obj: kobject for sysfs mesh subdirectory */
struct kobject *mesh_obj;
+
+ /** @debug_dir: dentry for debugfs batman-adv subdirectory */
struct dentry *debug_dir;
+
+ /** @forw_bat_list: list of aggregated OGMs that will be forwarded */
struct hlist_head forw_bat_list;
+
+ /**
+ * @forw_bcast_list: list of broadcast packets that will be
+ * rebroadcasted
+ */
struct hlist_head forw_bcast_list;
+
+ /** @tp_list: list of tp sessions */
struct hlist_head tp_list;
+
+ /** @tp_num: number of currently active tp sessions */
struct batadv_hashtable *orig_hash;
- spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
- spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
- spinlock_t tp_list_lock; /* protects tp_list */
+
+ /** @orig_hash: hash table containing mesh participants (orig nodes) */
+ spinlock_t forw_bat_list_lock;
+
+ /** @forw_bat_list_lock: lock protecting forw_bat_list */
+ spinlock_t forw_bcast_list_lock;
+
+ /** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+ spinlock_t tp_list_lock;
+
+ /** @tp_list_lock: spinlock protecting @tp_list */
atomic_t tp_num;
+
+ /** @orig_work: work queue callback item for orig node purging */
struct delayed_work orig_work;
+
+ /**
+ * @primary_if: one of the hard-interfaces assigned to this mesh
+ * interface becomes the primary interface
+ */
struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
+
+ /** @algo_ops: routing algorithm used by this mesh interface */
struct batadv_algo_ops *algo_ops;
+
+ /**
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+ * created on top of the mesh interface represented by this object
+ */
struct hlist_head softif_vlan_list;
- spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
+
+ /** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+ spinlock_t softif_vlan_list_lock;
+
#ifdef CONFIG_BATMAN_ADV_BLA
+ /** @bla: bridge loope avoidance data */
struct batadv_priv_bla bla;
#endif
+
#ifdef CONFIG_BATMAN_ADV_DEBUG
+ /** @debug_log: holding debug logging relevant data */
struct batadv_priv_debug_log *debug_log;
#endif
+
+ /** @gw: gateway data */
struct batadv_priv_gw gw;
+
+ /** @tt: translation table data */
struct batadv_priv_tt tt;
+
+ /** @tvlv: type-version-length-value data */
struct batadv_priv_tvlv tvlv;
+
#ifdef CONFIG_BATMAN_ADV_DAT
+ /** @dat: distributed arp table data */
struct batadv_priv_dat dat;
#endif
+
#ifdef CONFIG_BATMAN_ADV_MCAST
+ /** @mcast: multicast data */
struct batadv_priv_mcast mcast;
#endif
+
#ifdef CONFIG_BATMAN_ADV_NC
+ /**
+ * @network_coding: bool indicating whether network coding is enabled
+ */
atomic_t network_coding;
+
+ /** @nc: network coding data */
struct batadv_priv_nc nc;
#endif /* CONFIG_BATMAN_ADV_NC */
+
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
struct batadv_priv_bat_v bat_v;
#endif
};
/**
* struct batadv_socket_client - layer2 icmp socket client data
- * @queue_list: packet queue for packets destined for this socket client
- * @queue_len: number of packets in the packet queue (queue_list)
- * @index: socket client's index in the batadv_socket_client_hash
- * @lock: lock protecting queue_list, queue_len & index
- * @queue_wait: socket client's wait queue
- * @bat_priv: pointer to soft_iface this client belongs to
*/
struct batadv_socket_client {
+ /**
+ * @queue_list: packet queue for packets destined for this socket client
+ */
struct list_head queue_list;
+
+ /** @queue_len: number of packets in the packet queue (queue_list) */
unsigned int queue_len;
+
+ /** @index: socket client's index in the batadv_socket_client_hash */
unsigned char index;
- spinlock_t lock; /* protects queue_list, queue_len & index */
+
+ /** @lock: lock protecting queue_list, queue_len & index */
+ spinlock_t lock;
+
+ /** @queue_wait: socket client's wait queue */
wait_queue_head_t queue_wait;
+
+ /** @bat_priv: pointer to soft_iface this client belongs to */
struct batadv_priv *bat_priv;
};
/**
* struct batadv_socket_packet - layer2 icmp packet for socket client
- * @list: list node for batadv_socket_client::queue_list
- * @icmp_len: size of the layer2 icmp packet
- * @icmp_packet: layer2 icmp packet
*/
struct batadv_socket_packet {
+ /** @list: list node for &batadv_socket_client.queue_list */
struct list_head list;
+
+ /** @icmp_len: size of the layer2 icmp packet */
size_t icmp_len;
+
+ /** @icmp_packet: layer2 icmp packet */
u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
};
@@ -1153,312 +1740,432 @@ struct batadv_socket_packet {
/**
* struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
- * @orig: originator address of backbone node (mac address of primary iface)
- * @vid: vlan id this gateway was detected on
- * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
- * @bat_priv: pointer to soft_iface this backbone gateway belongs to
- * @lasttime: last time we heard of this backbone gw
- * @wait_periods: grace time for bridge forward delays and bla group forming at
- * bootup phase - no bcast traffic is formwared until it has elapsed
- * @request_sent: if this bool is set to true we are out of sync with this
- * backbone gateway - no bcast traffic is formwared until the situation was
- * resolved
- * @crc: crc16 checksum over all claims
- * @crc_lock: lock protecting crc
- * @report_work: work struct for reporting detected loops
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_bla_backbone_gw {
+ /**
+ * @orig: originator address of backbone node (mac address of primary
+ * iface)
+ */
u8 orig[ETH_ALEN];
+
+ /** @vid: vlan id this gateway was detected on */
unsigned short vid;
+
+ /** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
struct hlist_node hash_entry;
+
+ /** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
struct batadv_priv *bat_priv;
+
+ /** @lasttime: last time we heard of this backbone gw */
unsigned long lasttime;
+
+ /**
+ * @wait_periods: grace time for bridge forward delays and bla group
+ * forming at bootup phase - no bcast traffic is formwared until it has
+ * elapsed
+ */
atomic_t wait_periods;
+
+ /**
+ * @request_sent: if this bool is set to true we are out of sync with
+ * this backbone gateway - no bcast traffic is formwared until the
+ * situation was resolved
+ */
atomic_t request_sent;
+
+ /** @crc: crc16 checksum over all claims */
u16 crc;
- spinlock_t crc_lock; /* protects crc */
+
+ /** @crc_lock: lock protecting crc */
+ spinlock_t crc_lock;
+
+ /** @report_work: work struct for reporting detected loops */
struct work_struct report_work;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_bla_claim - claimed non-mesh client structure
- * @addr: mac address of claimed non-mesh client
- * @vid: vlan id this client was detected on
- * @backbone_gw: pointer to backbone gw claiming this client
- * @backbone_lock: lock protecting backbone_gw pointer
- * @lasttime: last time we heard of claim (locals only)
- * @hash_entry: hlist node for batadv_priv_bla::claim_hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_bla_claim {
+ /** @addr: mac address of claimed non-mesh client */
u8 addr[ETH_ALEN];
+
+ /** @vid: vlan id this client was detected on */
unsigned short vid;
+
+ /** @backbone_gw: pointer to backbone gw claiming this client */
struct batadv_bla_backbone_gw *backbone_gw;
- spinlock_t backbone_lock; /* protects backbone_gw */
+
+ /** @backbone_lock: lock protecting backbone_gw pointer */
+ spinlock_t backbone_lock;
+
+ /** @lasttime: last time we heard of claim (locals only) */
unsigned long lasttime;
+
+ /** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
struct hlist_node hash_entry;
+
+ /** @refcount: number of contexts the object is used */
struct rcu_head rcu;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct kref refcount;
};
#endif
/**
* struct batadv_tt_common_entry - tt local & tt global common data
- * @addr: mac address of non-mesh client
- * @vid: VLAN identifier
- * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
- * batadv_priv_tt::global_hash
- * @flags: various state handling flags (see batadv_tt_client_flags)
- * @added_at: timestamp used for purging stale tt common entries
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_tt_common_entry {
+ /** @addr: mac address of non-mesh client */
u8 addr[ETH_ALEN];
+
+ /** @vid: VLAN identifier */
unsigned short vid;
+
+ /**
+ * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+ * &batadv_priv_tt.global_hash
+ */
struct hlist_node hash_entry;
+
+ /** @flags: various state handling flags (see batadv_tt_client_flags) */
u16 flags;
+
+ /** @added_at: timestamp used for purging stale tt common entries */
unsigned long added_at;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_tt_local_entry - translation table local entry data
- * @common: general translation table data
- * @last_seen: timestamp used for purging stale tt local entries
- * @vlan: soft-interface vlan of the entry
*/
struct batadv_tt_local_entry {
+ /** @common: general translation table data */
struct batadv_tt_common_entry common;
+
+ /** @last_seen: timestamp used for purging stale tt local entries */
unsigned long last_seen;
+
+ /** @vlan: soft-interface vlan of the entry */
struct batadv_softif_vlan *vlan;
};
/**
* struct batadv_tt_global_entry - translation table global entry data
- * @common: general translation table data
- * @orig_list: list of orig nodes announcing this non-mesh client
- * @orig_list_count: number of items in the orig_list
- * @list_lock: lock protecting orig_list
- * @roam_at: time at which TT_GLOBAL_ROAM was set
*/
struct batadv_tt_global_entry {
+ /** @common: general translation table data */
struct batadv_tt_common_entry common;
+
+ /** @orig_list: list of orig nodes announcing this non-mesh client */
struct hlist_head orig_list;
+
+ /** @orig_list_count: number of items in the orig_list */
atomic_t orig_list_count;
- spinlock_t list_lock; /* protects orig_list */
+
+ /** @list_lock: lock protecting orig_list */
+ spinlock_t list_lock;
+
+ /** @roam_at: time at which TT_GLOBAL_ROAM was set */
unsigned long roam_at;
};
/**
* struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
- * @orig_node: pointer to orig node announcing this non-mesh client
- * @ttvn: translation table version number which added the non-mesh client
- * @flags: per orig entry TT sync flags
- * @list: list node for batadv_tt_global_entry::orig_list
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_tt_orig_list_entry {
+ /** @orig_node: pointer to orig node announcing this non-mesh client */
struct batadv_orig_node *orig_node;
+
+ /**
+ * @ttvn: translation table version number which added the non-mesh
+ * client
+ */
u8 ttvn;
+
+ /** @flags: per orig entry TT sync flags */
u8 flags;
+
+ /** @list: list node for &batadv_tt_global_entry.orig_list */
struct hlist_node list;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_tt_change_node - structure for tt changes occurred
- * @list: list node for batadv_priv_tt::changes_list
- * @change: holds the actual translation table diff data
*/
struct batadv_tt_change_node {
+ /** @list: list node for &batadv_priv_tt.changes_list */
struct list_head list;
+
+ /** @change: holds the actual translation table diff data */
struct batadv_tvlv_tt_change change;
};
/**
* struct batadv_tt_req_node - data to keep track of the tt requests in flight
- * @addr: mac address address of the originator this request was sent to
- * @issued_at: timestamp used for purging stale tt requests
- * @refcount: number of contexts the object is used by
- * @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
+ /**
+ * @addr: mac address address of the originator this request was sent to
+ */
u8 addr[ETH_ALEN];
+
+ /** @issued_at: timestamp used for purging stale tt requests */
unsigned long issued_at;
+
+ /** @refcount: number of contexts the object is used by */
struct kref refcount;
+
+ /** @list: list node for &batadv_priv_tt.req_list */
struct hlist_node list;
};
/**
* struct batadv_tt_roam_node - roaming client data
- * @addr: mac address of the client in the roaming phase
- * @counter: number of allowed roaming events per client within a single
- * OGM interval (changes are committed with each OGM)
- * @first_time: timestamp used for purging stale roaming node entries
- * @list: list node for batadv_priv_tt::roam_list
*/
struct batadv_tt_roam_node {
+ /** @addr: mac address of the client in the roaming phase */
u8 addr[ETH_ALEN];
+
+ /**
+ * @counter: number of allowed roaming events per client within a single
+ * OGM interval (changes are committed with each OGM)
+ */
atomic_t counter;
+
+ /**
+ * @first_time: timestamp used for purging stale roaming node entries
+ */
unsigned long first_time;
+
+ /** @list: list node for &batadv_priv_tt.roam_list */
struct list_head list;
};
/**
* struct batadv_nc_node - network coding node
- * @list: next and prev pointer for the list handling
- * @addr: the node's mac address
- * @refcount: number of contexts the object is used by
- * @rcu: struct used for freeing in an RCU-safe manner
- * @orig_node: pointer to corresponding orig node struct
- * @last_seen: timestamp of last ogm received from this node
*/
struct batadv_nc_node {
+ /** @list: next and prev pointer for the list handling */
struct list_head list;
+
+ /** @addr: the node's mac address */
u8 addr[ETH_ALEN];
+
+ /** @refcount: number of contexts the object is used by */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
+
+ /** @orig_node: pointer to corresponding orig node struct */
struct batadv_orig_node *orig_node;
+
+ /** @last_seen: timestamp of last ogm received from this node */
unsigned long last_seen;
};
/**
* struct batadv_nc_path - network coding path
- * @hash_entry: next and prev pointer for the list handling
- * @rcu: struct used for freeing in an RCU-safe manner
- * @refcount: number of contexts the object is used by
- * @packet_list: list of buffered packets for this path
- * @packet_list_lock: access lock for packet list
- * @next_hop: next hop (destination) of path
- * @prev_hop: previous hop (source) of path
- * @last_valid: timestamp for last validation of path
*/
struct batadv_nc_path {
+ /** @hash_entry: next and prev pointer for the list handling */
struct hlist_node hash_entry;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
+
+ /** @refcount: number of contexts the object is used by */
struct kref refcount;
+
+ /** @packet_list: list of buffered packets for this path */
struct list_head packet_list;
- spinlock_t packet_list_lock; /* Protects packet_list */
+
+ /** @packet_list_lock: access lock for packet list */
+ spinlock_t packet_list_lock;
+
+ /** @next_hop: next hop (destination) of path */
u8 next_hop[ETH_ALEN];
+
+ /** @prev_hop: previous hop (source) of path */
u8 prev_hop[ETH_ALEN];
+
+ /** @last_valid: timestamp for last validation of path */
unsigned long last_valid;
};
/**
* struct batadv_nc_packet - network coding packet used when coding and
* decoding packets
- * @list: next and prev pointer for the list handling
- * @packet_id: crc32 checksum of skb data
- * @timestamp: field containing the info when the packet was added to path
- * @neigh_node: pointer to original next hop neighbor of skb
- * @skb: skb which can be encoded or used for decoding
- * @nc_path: pointer to path this nc packet is attached to
*/
struct batadv_nc_packet {
+ /** @list: next and prev pointer for the list handling */
struct list_head list;
+
+ /** @packet_id: crc32 checksum of skb data */
__be32 packet_id;
+
+ /**
+ * @timestamp: field containing the info when the packet was added to
+ * path
+ */
unsigned long timestamp;
+
+ /** @neigh_node: pointer to original next hop neighbor of skb */
struct batadv_neigh_node *neigh_node;
+
+ /** @skb: skb which can be encoded or used for decoding */
struct sk_buff *skb;
+
+ /** @nc_path: pointer to path this nc packet is attached to */
struct batadv_nc_path *nc_path;
};
/**
* struct batadv_skb_cb - control buffer structure used to store private data
* relevant to batman-adv in the skb->cb buffer in skbs.
- * @decoded: Marks a skb as decoded, which is checked when searching for coding
- * opportunities in network-coding.c
- * @num_bcasts: Counter for broadcast packet retransmissions
*/
struct batadv_skb_cb {
+ /**
+ * @decoded: Marks a skb as decoded, which is checked when searching for
+ * coding opportunities in network-coding.c
+ */
bool decoded;
+
+ /** @num_bcasts: Counter for broadcast packet retransmissions */
unsigned int num_bcasts;
};
/**
* struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
- * @list: list node for batadv_priv::forw_{bat,bcast}_list
- * @cleanup_list: list node for purging functions
- * @send_time: execution time for delayed_work (packet sending)
- * @own: bool for locally generated packets (local OGMs are re-scheduled after
- * sending)
- * @skb: bcast packet's skb buffer
- * @packet_len: size of aggregated OGM packet inside the skb buffer
- * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for aggregated OGMv1 packets
- * @delayed_work: work queue callback item for packet sending
- * @if_incoming: pointer to incoming hard-iface or primary iface if
- * locally generated packet
- * @if_outgoing: packet where the packet should be sent to, or NULL if
- * unspecified
- * @queue_left: The queue (counter) this packet was applied to
*/
struct batadv_forw_packet {
+ /**
+ * @list: list node for &batadv_priv.forw.bcast_list and
+ * &batadv_priv.forw.bat_list
+ */
struct hlist_node list;
+
+ /** @cleanup_list: list node for purging functions */
struct hlist_node cleanup_list;
+
+ /** @send_time: execution time for delayed_work (packet sending) */
unsigned long send_time;
+
+ /**
+ * @own: bool for locally generated packets (local OGMs are re-scheduled
+ * after sending)
+ */
u8 own;
+
+ /** @skb: bcast packet's skb buffer */
struct sk_buff *skb;
+
+ /** @packet_len: size of aggregated OGM packet inside the skb buffer */
u16 packet_len;
+
+ /** @direct_link_flags: direct link flags for aggregated OGM packets */
u32 direct_link_flags;
+
+ /** @num_packets: counter for aggregated OGMv1 packets */
u8 num_packets;
+
+ /** @delayed_work: work queue callback item for packet sending */
struct delayed_work delayed_work;
+
+ /**
+ * @if_incoming: pointer to incoming hard-iface or primary iface if
+ * locally generated packet
+ */
struct batadv_hard_iface *if_incoming;
+
+ /**
+ * @if_outgoing: packet where the packet should be sent to, or NULL if
+ * unspecified
+ */
struct batadv_hard_iface *if_outgoing;
+
+ /** @queue_left: The queue (counter) this packet was applied to */
atomic_t *queue_left;
};
/**
* struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
- * @activate: start routing mechanisms when hard-interface is brought up
- * (optional)
- * @enable: init routing info when hard-interface is enabled
- * @disable: de-init routing info when hard-interface is disabled
- * @update_mac: (re-)init mac addresses of the protocol information
- * belonging to this hard-interface
- * @primary_set: called when primary interface is selected / changed
*/
struct batadv_algo_iface_ops {
+ /**
+ * @activate: start routing mechanisms when hard-interface is brought up
+ * (optional)
+ */
void (*activate)(struct batadv_hard_iface *hard_iface);
+
+ /** @enable: init routing info when hard-interface is enabled */
int (*enable)(struct batadv_hard_iface *hard_iface);
+
+ /** @disable: de-init routing info when hard-interface is disabled */
void (*disable)(struct batadv_hard_iface *hard_iface);
+
+ /**
+ * @update_mac: (re-)init mac addresses of the protocol information
+ * belonging to this hard-interface
+ */
void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+ /** @primary_set: called when primary interface is selected / changed */
void (*primary_set)(struct batadv_hard_iface *hard_iface);
};
/**
* struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
- * @hardif_init: called on creation of single hop entry
- * (optional)
- * @cmp: compare the metrics of two neighbors for their respective outgoing
- * interfaces
- * @is_similar_or_better: check if neigh1 is equally similar or better than
- * neigh2 for their respective outgoing interface from the metric prospective
- * @print: print the single hop neighbor list (optional)
- * @dump: dump neighbors to a netlink socket (optional)
*/
struct batadv_algo_neigh_ops {
+ /** @hardif_init: called on creation of single hop entry (optional) */
void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+ /**
+ * @cmp: compare the metrics of two neighbors for their respective
+ * outgoing interfaces
+ */
int (*cmp)(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2);
+
+ /**
+ * @is_similar_or_better: check if neigh1 is equally similar or better
+ * than neigh2 for their respective outgoing interface from the metric
+ * prospective
+ */
bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2);
+
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ /** @print: print the single hop neighbor list (optional) */
void (*print)(struct batadv_priv *priv, struct seq_file *seq);
#endif
+
+ /** @dump: dump neighbors to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv,
struct batadv_hard_iface *hard_iface);
@@ -1466,24 +2173,36 @@ struct batadv_algo_neigh_ops {
/**
* struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
- * @free: free the resources allocated by the routing algorithm for an orig_node
- * object (optional)
- * @add_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to a new hard-interface being added into the mesh (optional)
- * @del_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to an hard-interface being removed from the mesh (optional)
- * @print: print the originator table (optional)
- * @dump: dump originators to a netlink socket (optional)
*/
struct batadv_algo_orig_ops {
+ /**
+ * @free: free the resources allocated by the routing algorithm for an
+ * orig_node object (optional)
+ */
void (*free)(struct batadv_orig_node *orig_node);
+
+ /**
+ * @add_if: ask the routing algorithm to apply the needed changes to the
+ * orig_node due to a new hard-interface being added into the mesh
+ * (optional)
+ */
int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+
+ /**
+ * @del_if: ask the routing algorithm to apply the needed changes to the
+ * orig_node due to an hard-interface being removed from the mesh
+ * (optional)
+ */
int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
int del_if_num);
+
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ /** @print: print the originator table (optional) */
void (*print)(struct batadv_priv *priv, struct seq_file *seq,
struct batadv_hard_iface *hard_iface);
#endif
+
+ /** @dump: dump originators to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv,
struct batadv_hard_iface *hard_iface);
@@ -1491,158 +2210,213 @@ struct batadv_algo_orig_ops {
/**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
- * @init_sel_class: initialize GW selection class (optional)
- * @store_sel_class: parse and stores a new GW selection class (optional)
- * @show_sel_class: prints the current GW selection class (optional)
- * @get_best_gw_node: select the best GW from the list of available nodes
- * (optional)
- * @is_eligible: check if a newly discovered GW is a potential candidate for
- * the election as best GW (optional)
- * @print: print the gateway table (optional)
- * @dump: dump gateways to a netlink socket (optional)
*/
struct batadv_algo_gw_ops {
+ /** @init_sel_class: initialize GW selection class (optional) */
void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+ /**
+ * @store_sel_class: parse and stores a new GW selection class
+ * (optional)
+ */
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
+
+ /** @show_sel_class: prints the current GW selection class (optional) */
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+
+ /**
+ * @get_best_gw_node: select the best GW from the list of available
+ * nodes (optional)
+ */
struct batadv_gw_node *(*get_best_gw_node)
(struct batadv_priv *bat_priv);
+
+ /**
+ * @is_eligible: check if a newly discovered GW is a potential candidate
+ * for the election as best GW (optional)
+ */
bool (*is_eligible)(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node);
+
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ /** @print: print the gateway table (optional) */
void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
#endif
+
+ /** @dump: dump gateways to a netlink socket (optional) */
void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *priv);
};
/**
* struct batadv_algo_ops - mesh algorithm callbacks
- * @list: list node for the batadv_algo_list
- * @name: name of the algorithm
- * @iface: callbacks related to interface handling
- * @neigh: callbacks related to neighbors handling
- * @orig: callbacks related to originators handling
- * @gw: callbacks related to GW mode
*/
struct batadv_algo_ops {
+ /** @list: list node for the batadv_algo_list */
struct hlist_node list;
+
+ /** @name: name of the algorithm */
char *name;
+
+ /** @iface: callbacks related to interface handling */
struct batadv_algo_iface_ops iface;
+
+ /** @neigh: callbacks related to neighbors handling */
struct batadv_algo_neigh_ops neigh;
+
+ /** @orig: callbacks related to originators handling */
struct batadv_algo_orig_ops orig;
+
+ /** @gw: callbacks related to GW mode */
struct batadv_algo_gw_ops gw;
};
/**
* struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
* is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
- * @mac_addr: the MAC address associated to the stored IPv4
- * @vid: the vlan ID associated to this entry
- * @last_update: time in jiffies when this entry was refreshed last time
- * @hash_entry: hlist node for batadv_priv_dat::hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_dat_entry {
+ /** @ip: the IPv4 corresponding to this DAT/ARP entry */
__be32 ip;
+
+ /** @mac_addr: the MAC address associated to the stored IPv4 */
u8 mac_addr[ETH_ALEN];
+
+ /** @vid: the vlan ID associated to this entry */
unsigned short vid;
+
+ /**
+ * @last_update: time in jiffies when this entry was refreshed last time
+ */
unsigned long last_update;
+
+ /** @hash_entry: hlist node for &batadv_priv_dat.hash */
struct hlist_node hash_entry;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* struct batadv_hw_addr - a list entry for a MAC address
- * @list: list node for the linking of entries
- * @addr: the MAC address of this list entry
*/
struct batadv_hw_addr {
+ /** @list: list node for the linking of entries */
struct hlist_node list;
+
+ /** @addr: the MAC address of this list entry */
unsigned char addr[ETH_ALEN];
};
/**
* struct batadv_dat_candidate - candidate destination for DAT operations
- * @type: the type of the selected candidate. It can one of the following:
- * - BATADV_DAT_CANDIDATE_NOT_FOUND
- * - BATADV_DAT_CANDIDATE_ORIG
- * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
- * corresponding originator node structure
*/
struct batadv_dat_candidate {
+ /**
+ * @type: the type of the selected candidate. It can one of the
+ * following:
+ * - BATADV_DAT_CANDIDATE_NOT_FOUND
+ * - BATADV_DAT_CANDIDATE_ORIG
+ */
int type;
+
+ /**
+ * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+ * the corresponding originator node structure
+ */
struct batadv_orig_node *orig_node;
};
/**
* struct batadv_tvlv_container - container for tvlv appended to OGMs
- * @list: hlist node for batadv_priv_tvlv::container_list
- * @tvlv_hdr: tvlv header information needed to construct the tvlv
- * @refcount: number of contexts the object is used
*/
struct batadv_tvlv_container {
+ /** @list: hlist node for &batadv_priv_tvlv.container_list */
struct hlist_node list;
+
+ /** @tvlv_hdr: tvlv header information needed to construct the tvlv */
struct batadv_tvlv_hdr tvlv_hdr;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
};
/**
* struct batadv_tvlv_handler - handler for specific tvlv type and version
- * @list: hlist node for batadv_priv_tvlv::handler_list
- * @ogm_handler: handler callback which is given the tvlv payload to process on
- * incoming OGM packets
- * @unicast_handler: handler callback which is given the tvlv payload to process
- * on incoming unicast tvlv packets
- * @type: tvlv type this handler feels responsible for
- * @version: tvlv version this handler feels responsible for
- * @flags: tvlv handler flags
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_tvlv_handler {
+ /** @list: hlist node for &batadv_priv_tvlv.handler_list */
struct hlist_node list;
+
+ /**
+ * @ogm_handler: handler callback which is given the tvlv payload to
+ * process on incoming OGM packets
+ */
void (*ogm_handler)(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+ /**
+ * @unicast_handler: handler callback which is given the tvlv payload to
+ * process on incoming unicast tvlv packets
+ */
int (*unicast_handler)(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value, u16 tvlv_value_len);
+
+ /** @type: tvlv type this handler feels responsible for */
u8 type;
+
+ /** @version: tvlv version this handler feels responsible for */
u8 version;
+
+ /** @flags: tvlv handler flags */
u8 flags;
+
+ /** @refcount: number of contexts the object is used */
struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
};
/**
* enum batadv_tvlv_handler_flags - tvlv handler flags definitions
- * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
- * this handler even if its type was not found (with no data)
- * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
- * a handler as being called, so it won't be called if the
- * BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
*/
enum batadv_tvlv_handler_flags {
+ /**
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+ * will call this handler even if its type was not found (with no data)
+ */
BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+ /**
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+ * API marks a handler as being called, so it won't be called if the
+ * BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
};
/**
* struct batadv_store_mesh_work - Work queue item to detach add/del interface
* from sysfs locks
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @soft_iface_name: name of soft-interface to modify
- * @work: work queue item
*/
struct batadv_store_mesh_work {
+ /**
+ * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+ */
struct net_device *net_dev;
+
+ /** @soft_iface_name: name of soft-interface to modify */
char soft_iface_name[IFNAMSIZ];
+
+ /** @work: work queue item */
struct work_struct work;
};
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 91e3ba280706..f897681780db 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -421,7 +421,7 @@ out:
}
EXPORT_SYMBOL(bt_sock_stream_recvmsg);
-static inline unsigned int bt_accept_poll(struct sock *parent)
+static inline __poll_t bt_accept_poll(struct sock *parent)
{
struct bt_sock *s, *n;
struct sock *sk;
@@ -437,11 +437,11 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -766,43 +766,39 @@ static int __init bt_init(void)
return err;
err = sock_register(&bt_sock_family_ops);
- if (err < 0) {
- bt_sysfs_cleanup();
- return err;
- }
+ if (err)
+ goto cleanup_sysfs;
BT_INFO("HCI device and connection manager initialized");
err = hci_sock_init();
- if (err < 0)
- goto error;
+ if (err)
+ goto unregister_socket;
err = l2cap_init();
- if (err < 0)
- goto sock_err;
+ if (err)
+ goto cleanup_socket;
err = sco_init();
- if (err < 0) {
- l2cap_exit();
- goto sock_err;
- }
+ if (err)
+ goto cleanup_cap;
err = mgmt_init();
- if (err < 0) {
- sco_exit();
- l2cap_exit();
- goto sock_err;
- }
+ if (err)
+ goto cleanup_sco;
return 0;
-sock_err:
+cleanup_sco:
+ sco_exit();
+cleanup_cap:
+ l2cap_exit();
+cleanup_socket:
hci_sock_cleanup();
-
-error:
+unregister_socket:
sock_unregister(PF_BLUETOOTH);
+cleanup_sysfs:
bt_sysfs_cleanup();
-
return err;
}
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index bb308224099c..426a92f02db4 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -527,7 +527,6 @@ static int cmtp_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations cmtp_proc_fops = {
- .owner = THIS_MODULE,
.open = cmtp_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 63df63ebfb24..57403bd567d0 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -88,6 +88,9 @@ static int __name ## _show(struct seq_file *f, void *ptr) \
return 0; \
} \
\
+DEFINE_SHOW_ATTRIBUTE(__name)
+
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __name ## _show, inode->i_private); \
@@ -106,37 +109,16 @@ static int features_show(struct seq_file *f, void *ptr)
u8 p;
hci_dev_lock(hdev);
- for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
- seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
- hdev->features[p][0], hdev->features[p][1],
- hdev->features[p][2], hdev->features[p][3],
- hdev->features[p][4], hdev->features[p][5],
- hdev->features[p][6], hdev->features[p][7]);
- }
+ for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++)
+ seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]);
if (lmp_le_capable(hdev))
- seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
- hdev->le_features[0], hdev->le_features[1],
- hdev->le_features[2], hdev->le_features[3],
- hdev->le_features[4], hdev->le_features[5],
- hdev->le_features[6], hdev->le_features[7]);
+ seq_printf(f, "LE: %8ph\n", hdev->le_features);
hci_dev_unlock(hdev);
return 0;
}
-static int features_open(struct inode *inode, struct file *file)
-{
- return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
- .open = features_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(features);
static int device_id_show(struct seq_file *f, void *ptr)
{
@@ -150,17 +132,7 @@ static int device_id_show(struct seq_file *f, void *ptr)
return 0;
}
-static int device_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_id_show, inode->i_private);
-}
-
-static const struct file_operations device_id_fops = {
- .open = device_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_id);
static int device_list_show(struct seq_file *f, void *ptr)
{
@@ -180,17 +152,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
return 0;
}
-static int device_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
- .open = device_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_list);
static int blacklist_show(struct seq_file *f, void *p)
{
@@ -205,17 +167,7 @@ static int blacklist_show(struct seq_file *f, void *p)
return 0;
}
-static int blacklist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
- .open = blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(blacklist);
static int uuids_show(struct seq_file *f, void *p)
{
@@ -240,17 +192,7 @@ static int uuids_show(struct seq_file *f, void *p)
return 0;
}
-static int uuids_open(struct inode *inode, struct file *file)
-{
- return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
- .open = uuids_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(uuids);
static int remote_oob_show(struct seq_file *f, void *ptr)
{
@@ -269,17 +211,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
return 0;
}
-static int remote_oob_open(struct inode *inode, struct file *file)
-{
- return single_open(file, remote_oob_show, inode->i_private);
-}
-
-static const struct file_operations remote_oob_fops = {
- .open = remote_oob_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(remote_oob);
static int conn_info_min_age_set(void *data, u64 val)
{
@@ -443,17 +375,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
return 0;
}
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(inquiry_cache);
static int link_keys_show(struct seq_file *f, void *ptr)
{
@@ -469,17 +391,7 @@ static int link_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int link_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
- .open = link_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(link_keys);
static int dev_class_show(struct seq_file *f, void *ptr)
{
@@ -493,17 +405,7 @@ static int dev_class_show(struct seq_file *f, void *ptr)
return 0;
}
-static int dev_class_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
- .open = dev_class_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dev_class);
static int voice_setting_get(void *data, u64 *val)
{
@@ -692,17 +594,7 @@ static int identity_show(struct seq_file *f, void *p)
return 0;
}
-static int identity_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
- .open = identity_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity);
static int rpa_timeout_set(void *data, u64 val)
{
@@ -746,17 +638,7 @@ static int random_address_show(struct seq_file *f, void *p)
return 0;
}
-static int random_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
- .open = random_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(random_address);
static int static_address_show(struct seq_file *f, void *p)
{
@@ -769,17 +651,7 @@ static int static_address_show(struct seq_file *f, void *p)
return 0;
}
-static int static_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
- .open = static_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(static_address);
static ssize_t force_static_address_read(struct file *file,
char __user *user_buf,
@@ -841,17 +713,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
return 0;
}
-static int white_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
- .open = white_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(white_list);
static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
{
@@ -869,18 +731,7 @@ static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_resolving_keys_show,
- inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
- .open = identity_resolving_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys);
static int long_term_keys_show(struct seq_file *f, void *ptr)
{
@@ -898,17 +749,7 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
- .open = long_term_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(long_term_keys);
static int conn_min_interval_set(void *data, u64 val)
{
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index abc0f3224dd1..3394e6791673 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -919,6 +919,43 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
return true;
}
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+ /* If there is no connection we are OK to advertise. */
+ if (hci_conn_num(hdev, LE_LINK) == 0)
+ return true;
+
+ /* Check le_states if there is any connection in slave role. */
+ if (hdev->conn_hash.le_num_slave > 0) {
+ /* Slave connection state and non connectable mode bit 20. */
+ if (!connectable && !(hdev->le_states[2] & 0x10))
+ return false;
+
+ /* Slave connection state and connectable mode bit 38
+ * and scannable bit 21.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x01) ||
+ !(hdev->le_states[2] & 0x40)))
+ return false;
+ }
+
+ /* Check le_states if there is any connection in master role. */
+ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
+ /* Master connection state and non connectable mode bit 18. */
+ if (!connectable && !(hdev->le_states[2] & 0x02))
+ return false;
+
+ /* Master connection state and connectable mode bit 35 and
+ * scannable 19.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x10) ||
+ !(hdev->le_states[2] & 0x08)))
+ return false;
+ }
+
+ return true;
+}
+
void __hci_req_enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -927,7 +964,15 @@ void __hci_req_enable_advertising(struct hci_request *req)
bool connectable;
u32 flags;
- if (hci_conn_num(hdev, LE_LINK) > 0)
+ flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
return;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -940,14 +985,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
-
- /* If the "connectable" instance flag was not set, then choose between
- * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
- */
- connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
- mgmt_get_connectable(hdev);
-
/* Set require_privacy to true only when non-connectable
* advertising is used. In that case it is fine to use a
* non-resolvable private address.
@@ -1985,13 +2022,6 @@ unlock:
hci_dev_unlock(hdev);
}
-static void disable_advertising(struct hci_request *req)
-{
- u8 enable = 0x00;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
static int active_scan(struct hci_request *req, unsigned long opt)
{
uint16_t interval = opt;
@@ -2017,7 +2047,7 @@ static int active_scan(struct hci_request *req, unsigned long opt)
cancel_adv_timeout(hdev);
hci_dev_unlock(hdev);
- disable_advertising(req);
+ __hci_req_disable_advertising(req);
}
/* If controller is scanning, it means the background scanning is
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index f2cec70d520c..1036e4fa1ea2 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -789,7 +789,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
- /* True if device is blacklisted in drivers/hid/hid-core.c */
+ /* True if device is blacklisted in drivers/hid/hid-quirks.c */
if (hid_ignore(hid)) {
hid_destroy_device(session->hid);
session->hid = NULL;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 43ba91c440bc..fc6615d59165 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
break;
case L2CAP_CONF_EFS:
- remote_efs = 1;
- if (olen == sizeof(efs))
+ if (olen == sizeof(efs)) {
+ remote_efs = 1;
memcpy(&efs, (void *) val, olen);
+ }
break;
case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs))
+ if (olen == sizeof(efs)) {
memcpy(&efs, (void *)val, olen);
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype)
- return -ECONNREFUSED;
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs, endptr - ptr);
+ }
break;
case L2CAP_CONF_FCS:
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index af5b8c87f590..1285ca30ab0a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -125,9 +125,16 @@ static int br_dev_init(struct net_device *dev)
if (!br->stats)
return -ENOMEM;
+ err = br_fdb_hash_init(br);
+ if (err) {
+ free_percpu(br->stats);
+ return err;
+ }
+
err = br_vlan_init(br);
if (err) {
free_percpu(br->stats);
+ br_fdb_hash_fini(br);
return err;
}
@@ -135,6 +142,7 @@ static int br_dev_init(struct net_device *dev)
if (err) {
free_percpu(br->stats);
br_vlan_flush(br);
+ br_fdb_hash_fini(br);
}
br_set_lockdep_class(dev);
@@ -148,6 +156,7 @@ static void br_dev_uninit(struct net_device *dev)
br_multicast_dev_del(br);
br_multicast_uninit_stats(br);
br_vlan_flush(br);
+ br_fdb_hash_fini(br);
free_percpu(br->stats);
}
@@ -416,6 +425,7 @@ void br_dev_setup(struct net_device *dev)
br->dev = dev;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
+ INIT_HLIST_HEAD(&br->fdb_list);
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4ea5c8bbe286..dc87fbc9a23b 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,14 +28,20 @@
#include <trace/events/bridge.h>
#include "br_private.h"
+static const struct rhashtable_params br_fdb_rht_params = {
+ .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+ .key_offset = offsetof(struct net_bridge_fdb_entry, key),
+ .key_len = sizeof(struct net_bridge_fdb_key),
+ .automatic_shrinking = true,
+ .locks_mul = 1,
+};
+
static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int);
-static u32 fdb_salt __read_mostly;
-
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -45,7 +51,6 @@ int __init br_fdb_init(void)
if (!br_fdb_cache)
return -ENOMEM;
- get_random_bytes(&fdb_salt, sizeof(fdb_salt));
return 0;
}
@@ -54,6 +59,15 @@ void br_fdb_fini(void)
kmem_cache_destroy(br_fdb_cache);
}
+int br_fdb_hash_init(struct net_bridge *br)
+{
+ return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+ rhashtable_destroy(&br->fdb_hash_tbl);
+}
/* if topology_changing then use forward_delay (default 15 sec)
* otherwise keep longer (default 5 minutes)
@@ -70,13 +84,6 @@ static inline int has_expired(const struct net_bridge *br,
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
-static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
-{
- /* use 1 byte of OUI and 3 bytes of NIC */
- u32 key = get_unaligned((u32 *)(mac + 2));
- return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
-}
-
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
@@ -84,19 +91,18 @@ static void fdb_rcu_free(struct rcu_head *head)
kmem_cache_free(br_fdb_cache, ent);
}
-static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
const unsigned char *addr,
__u16 vid)
{
- struct net_bridge_fdb_entry *f;
+ struct net_bridge_fdb_key key;
WARN_ON_ONCE(!rcu_read_lock_held());
- hlist_for_each_entry_rcu(f, head, hlist)
- if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
- break;
+ key.vlan_id = vid;
+ memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
- return f;
+ return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
}
/* requires bridge hash_lock */
@@ -104,13 +110,12 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
- struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
- fdb = fdb_find_rcu(head, addr, vid);
+ fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
rcu_read_unlock();
return fdb;
@@ -120,9 +125,7 @@ struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
- struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
-
- return fdb_find_rcu(head, addr, vid);
+ return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
}
/* When a static FDB entry is added, the mac address from the entry is
@@ -175,9 +178,11 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
trace_fdb_delete(br, f);
if (f->is_static)
- fdb_del_hw_addr(br, f->addr.addr);
+ fdb_del_hw_addr(br, f->key.addr.addr);
- hlist_del_init_rcu(&f->hlist);
+ hlist_del_init_rcu(&f->fdb_node);
+ rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+ br_fdb_rht_params);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
}
@@ -187,11 +192,11 @@ static void fdb_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_fdb_entry *f)
{
- const unsigned char *addr = f->addr.addr;
+ const unsigned char *addr = f->key.addr.addr;
struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
struct net_bridge_port *op;
- u16 vid = f->vlan_id;
+ u16 vid = f->key.vlan_id;
/* Maybe another port has same hw addr? */
list_for_each_entry(op, &br->port_list, list) {
@@ -233,31 +238,23 @@ void br_fdb_find_delete_local(struct net_bridge *br,
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge_vlan_group *vg;
+ struct net_bridge_fdb_entry *f;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
- int i;
spin_lock_bh(&br->hash_lock);
-
vg = nbp_vlan_group(p);
- /* Search all chains since old address/hash is unknown */
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct hlist_node *h;
- hlist_for_each(h, &br->hash[i]) {
- struct net_bridge_fdb_entry *f;
-
- f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
- if (f->dst == p && f->is_local && !f->added_by_user) {
- /* delete old one */
- fdb_delete_local(br, p, f);
-
- /* if this port has no vlan information
- * configured, we can safely be done at
- * this point.
- */
- if (!vg || !vg->num_vlans)
- goto insert;
- }
+ hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+ if (f->dst == p && f->is_local && !f->added_by_user) {
+ /* delete old one */
+ fdb_delete_local(br, p, f);
+
+ /* if this port has no vlan information
+ * configured, we can safely be done at
+ * this point.
+ */
+ if (!vg || !vg->num_vlans)
+ goto insert;
}
}
@@ -316,35 +313,32 @@ void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
gc_work.work);
+ struct net_bridge_fdb_entry *f = NULL;
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
- int i;
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct net_bridge_fdb_entry *f;
- struct hlist_node *n;
+ /* this part is tricky, in order to avoid blocking learning and
+ * consequently forwarding, we rely on rcu to delete objects with
+ * delayed freeing allowing us to continue traversing
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ unsigned long this_timer;
- if (!br->hash[i].first)
+ if (f->is_static || f->added_by_external_learn)
continue;
-
- spin_lock_bh(&br->hash_lock);
- hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
- unsigned long this_timer;
-
- if (f->is_static)
- continue;
- if (f->added_by_external_learn)
- continue;
- this_timer = f->updated + delay;
- if (time_after(this_timer, now))
- work_delay = min(work_delay, this_timer - now);
- else
+ this_timer = f->updated + delay;
+ if (time_after(this_timer, now)) {
+ work_delay = min(work_delay, this_timer - now);
+ } else {
+ spin_lock_bh(&br->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node))
fdb_delete(br, f);
+ spin_unlock_bh(&br->hash_lock);
}
- spin_unlock_bh(&br->hash_lock);
- cond_resched();
}
+ rcu_read_unlock();
/* Cleanup minimum 10 milliseconds apart */
work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
@@ -354,16 +348,13 @@ void br_fdb_cleanup(struct work_struct *work)
/* Completely flush all dynamic entries in forwarding database.*/
void br_fdb_flush(struct net_bridge *br)
{
- int i;
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *tmp;
spin_lock_bh(&br->hash_lock);
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct net_bridge_fdb_entry *f;
- struct hlist_node *n;
- hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
- if (!f->is_static)
- fdb_delete(br, f);
- }
+ hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+ if (!f->is_static)
+ fdb_delete(br, f);
}
spin_unlock_bh(&br->hash_lock);
}
@@ -377,27 +368,22 @@ void br_fdb_delete_by_port(struct net_bridge *br,
u16 vid,
int do_all)
{
- int i;
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *tmp;
spin_lock_bh(&br->hash_lock);
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct hlist_node *h, *g;
+ hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+ if (f->dst != p)
+ continue;
- hlist_for_each_safe(h, g, &br->hash[i]) {
- struct net_bridge_fdb_entry *f
- = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
- if (f->dst != p)
+ if (!do_all)
+ if (f->is_static || (vid && f->key.vlan_id != vid))
continue;
- if (!do_all)
- if (f->is_static || (vid && f->vlan_id != vid))
- continue;
-
- if (f->is_local)
- fdb_delete_local(br, p, f);
- else
- fdb_delete(br, f);
- }
+ if (f->is_local)
+ fdb_delete_local(br, p, f);
+ else
+ fdb_delete(br, f);
}
spin_unlock_bh(&br->hash_lock);
}
@@ -433,52 +419,48 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
- struct __fdb_entry *fe = buf;
- int i, num = 0;
struct net_bridge_fdb_entry *f;
+ struct __fdb_entry *fe = buf;
+ int num = 0;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
- for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
- if (num >= maxnum)
- goto out;
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ if (num >= maxnum)
+ break;
- if (has_expired(br, f))
- continue;
+ if (has_expired(br, f))
+ continue;
- /* ignore pseudo entry for local MAC address */
- if (!f->dst)
- continue;
+ /* ignore pseudo entry for local MAC address */
+ if (!f->dst)
+ continue;
- if (skip) {
- --skip;
- continue;
- }
+ if (skip) {
+ --skip;
+ continue;
+ }
- /* convert from internal format to API */
- memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
+ /* convert from internal format to API */
+ memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
- /* due to ABI compat need to split into hi/lo */
- fe->port_no = f->dst->port_no;
- fe->port_hi = f->dst->port_no >> 8;
+ /* due to ABI compat need to split into hi/lo */
+ fe->port_no = f->dst->port_no;
+ fe->port_hi = f->dst->port_no >> 8;
- fe->is_local = f->is_local;
- if (!f->is_static)
- fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
- ++fe;
- ++num;
- }
+ fe->is_local = f->is_local;
+ if (!f->is_static)
+ fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+ ++fe;
+ ++num;
}
-
- out:
rcu_read_unlock();
return num;
}
-static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid,
@@ -489,16 +471,23 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
- memcpy(fdb->addr.addr, addr, ETH_ALEN);
+ memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
fdb->dst = source;
- fdb->vlan_id = vid;
+ fdb->key.vlan_id = vid;
fdb->is_local = is_local;
fdb->is_static = is_static;
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
fdb->offloaded = 0;
fdb->updated = fdb->used = jiffies;
- hlist_add_head_rcu(&fdb->hlist, head);
+ if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+ &fdb->rhnode,
+ br_fdb_rht_params)) {
+ kmem_cache_free(br_fdb_cache, fdb);
+ fdb = NULL;
+ } else {
+ hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+ }
}
return fdb;
}
@@ -506,7 +495,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
- struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
if (!is_valid_ether_addr(addr))
@@ -524,7 +512,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
fdb_delete(br, fdb);
}
- fdb = fdb_create(head, source, addr, vid, 1, 1);
+ fdb = fdb_create(br, source, addr, vid, 1, 1);
if (!fdb)
return -ENOMEM;
@@ -548,7 +536,6 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user)
{
- struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
@@ -561,7 +548,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
source->state == BR_STATE_FORWARDING))
return;
- fdb = fdb_find_rcu(head, addr, vid);
+ fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
@@ -590,14 +577,13 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
}
} else {
spin_lock(&br->hash_lock);
- if (likely(!fdb_find_rcu(head, addr, vid))) {
- fdb = fdb_create(head, source, addr, vid, 0, 0);
- if (fdb) {
- if (unlikely(added_by_user))
- fdb->added_by_user = 1;
- trace_br_fdb_update(br, source, addr, vid, added_by_user);
- fdb_notify(br, fdb, RTM_NEWNEIGH);
- }
+ fdb = fdb_create(br, source, addr, vid, 0, 0);
+ if (fdb) {
+ if (unlikely(added_by_user))
+ fdb->added_by_user = 1;
+ trace_br_fdb_update(br, source, addr, vid,
+ added_by_user);
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
}
/* else we lose race and someone else inserts
* it first, don't bother updating
@@ -646,7 +632,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
if (fdb->added_by_external_learn)
ndm->ndm_flags |= NTF_EXT_LEARNED;
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
goto nla_put_failure;
@@ -657,7 +643,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
- if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
+ if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+ &fdb->key.vlan_id))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -711,54 +698,48 @@ int br_fdb_dump(struct sk_buff *skb,
int *idx)
{
struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_fdb_entry *f;
int err = 0;
- int i;
if (!(dev->priv_flags & IFF_EBRIDGE))
- goto out;
+ return err;
if (!filter_dev) {
err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
if (err < 0)
- goto out;
+ return err;
}
- for (i = 0; i < BR_HASH_SIZE; i++) {
- struct net_bridge_fdb_entry *f;
-
- hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
-
- if (*idx < cb->args[2])
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ if (*idx < cb->args[2])
+ goto skip;
+ if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+ if (filter_dev != dev)
goto skip;
-
- if (filter_dev &&
- (!f->dst || f->dst->dev != filter_dev)) {
- if (filter_dev != dev)
- goto skip;
- /* !f->dst is a special case for bridge
- * It means the MAC belongs to the bridge
- * Therefore need a little more filtering
- * we only want to dump the !f->dst case
- */
- if (f->dst)
- goto skip;
- }
- if (!filter_dev && f->dst)
+ /* !f->dst is a special case for bridge
+ * It means the MAC belongs to the bridge
+ * Therefore need a little more filtering
+ * we only want to dump the !f->dst case
+ */
+ if (f->dst)
goto skip;
-
- err = fdb_fill_info(skb, br, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI);
- if (err < 0)
- goto out;
-skip:
- *idx += 1;
}
+ if (!filter_dev && f->dst)
+ goto skip;
+
+ err = fdb_fill_info(skb, br, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI);
+ if (err < 0)
+ break;
+skip:
+ *idx += 1;
}
+ rcu_read_unlock();
-out:
return err;
}
@@ -766,7 +747,6 @@ out:
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
{
- struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool modified = false;
@@ -787,7 +767,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- fdb = fdb_create(head, source, addr, vid, 0, 0);
+ fdb = fdb_create(br, source, addr, vid, 0, 0);
if (!fdb)
return -ENOMEM;
@@ -1012,65 +992,60 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
{
- struct net_bridge_fdb_entry *fdb, *tmp;
- int i;
+ struct net_bridge_fdb_entry *f, *tmp;
int err;
ASSERT_RTNL();
- for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry(fdb, &br->hash[i], hlist) {
- /* We only care for static entries */
- if (!fdb->is_static)
- continue;
-
- err = dev_uc_add(p->dev, fdb->addr.addr);
- if (err)
- goto rollback;
- }
+ /* the key here is that static entries change only under rtnl */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!f->is_static)
+ continue;
+ err = dev_uc_add(p->dev, f->key.addr.addr);
+ if (err)
+ goto rollback;
}
- return 0;
+done:
+ rcu_read_unlock();
-rollback:
- for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry(tmp, &br->hash[i], hlist) {
- /* If we reached the fdb that failed, we can stop */
- if (tmp == fdb)
- break;
-
- /* We only care for static entries */
- if (!tmp->is_static)
- continue;
+ return err;
- dev_uc_del(p->dev, tmp->addr.addr);
- }
+rollback:
+ hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!tmp->is_static)
+ continue;
+ if (tmp == f)
+ break;
+ dev_uc_del(p->dev, tmp->key.addr.addr);
}
- return err;
+
+ goto done;
}
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
{
- struct net_bridge_fdb_entry *fdb;
- int i;
+ struct net_bridge_fdb_entry *f;
ASSERT_RTNL();
- for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
- /* We only care for static entries */
- if (!fdb->is_static)
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!f->is_static)
+ continue;
- dev_uc_del(p->dev, fdb->addr.addr);
- }
+ dev_uc_del(p->dev, f->key.addr.addr);
}
+ rcu_read_unlock();
}
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *fdb;
- struct hlist_head *head;
bool modified = false;
int err = 0;
@@ -1078,10 +1053,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
spin_lock_bh(&br->hash_lock);
- head = &br->hash[br_mac_hash(addr, vid)];
fdb = br_fdb_find(br, addr, vid);
if (!fdb) {
- fdb = fdb_create(head, p, addr, vid, 0, 0);
+ fdb = fdb_create(br, p, addr, vid, 0, 0);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b0f4c734900b..6d9f48bd374a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -760,9 +760,9 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
void br_mdb_init(void)
{
- rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
- rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
}
void br_mdb_uninit(void)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c2eea1b8737a..27f1d4f2114a 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -991,7 +991,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
unsigned int i;
int ret;
- e = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]);
+ e = rcu_dereference(net->nf.hooks_bridge[hook]);
if (!e)
return okfn(net, sk, skb);
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 20cbb727df4d..8e2d7cfa4e16 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -78,7 +78,6 @@ void br_netfilter_rtable_init(struct net_bridge *br)
atomic_set(&rt->dst.__refcnt, 1);
rt->dst.dev = br->dev;
- rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1312b8d20ec3..8e13a64d8c99 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -168,12 +168,17 @@ struct net_bridge_vlan_group {
u16 pvid;
};
+struct net_bridge_fdb_key {
+ mac_addr addr;
+ u16 vlan_id;
+};
+
struct net_bridge_fdb_entry {
- struct hlist_node hlist;
+ struct rhash_head rhnode;
struct net_bridge_port *dst;
- mac_addr addr;
- __u16 vlan_id;
+ struct net_bridge_fdb_key key;
+ struct hlist_node fdb_node;
unsigned char is_local:1,
is_static:1,
added_by_user:1,
@@ -315,7 +320,7 @@ struct net_bridge {
struct net_bridge_vlan_group __rcu *vlgrp;
#endif
- struct hlist_head hash[BR_HASH_SIZE];
+ struct rhashtable fdb_hash_tbl;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
union {
struct rtable fake_rtable;
@@ -405,6 +410,7 @@ struct net_bridge {
int offload_fwd_mark;
#endif
bool neigh_suppress_enabled;
+ struct hlist_head fdb_list;
};
struct br_input_skb_cb {
@@ -515,6 +521,8 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
/* br_fdb.c */
int br_fdb_init(void);
void br_fdb_fini(void);
+int br_fdb_hash_init(struct net_bridge *br);
+void br_fdb_hash_fini(struct net_bridge *br);
void br_fdb_flush(struct net_bridge *br);
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
@@ -752,7 +760,7 @@ static inline void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
static inline bool br_multicast_is_router(struct net_bridge *br)
{
- return 0;
+ return false;
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 9700e0f3307b..ee775f4ff76c 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -121,13 +121,13 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
switch (type) {
case RTM_DELNEIGH:
- br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
- fdb->vlan_id,
+ br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
+ fdb->key.vlan_id,
fdb->dst->dev);
break;
case RTM_NEWNEIGH:
- br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
- fdb->vlan_id,
+ br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
+ fdb->key.vlan_id,
fdb->dst->dev);
break;
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 723f25eed8ea..b1be0dcfba6b 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -272,10 +272,7 @@ static ssize_t group_addr_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
- br->group_addr[0], br->group_addr[1],
- br->group_addr[2], br->group_addr[3],
- br->group_addr[4], br->group_addr[5]);
+ return sprintf(buf, "%pM\n", br->group_addr);
}
static ssize_t group_addr_store(struct device *d,
@@ -284,14 +281,11 @@ static ssize_t group_addr_store(struct device *d,
{
struct net_bridge *br = to_bridge(d);
u8 new_addr[6];
- int i;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &new_addr[0], &new_addr[1], &new_addr[2],
- &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
+ if (!mac_pton(buf, new_addr))
return -EINVAL;
if (!is_link_local_ether_addr(new_addr))
@@ -306,8 +300,7 @@ static ssize_t group_addr_store(struct device *d,
return restart_syscall();
spin_lock_bh(&br->lock);
- for (i = 0; i < 6; i++)
- br->group_addr[i] = new_addr[i];
+ ether_addr_copy(br->group_addr, new_addr);
spin_unlock_bh(&br->lock);
br->group_addr_set = true;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index e7ef1a1ef3a6..225d1668dfdd 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig NF_TABLES_BRIDGE
depends on BRIDGE && NETFILTER && NF_TABLES
+ select NETFILTER_FAMILY_BRIDGE
tristate "Ethernet Bridge nf_tables support"
if NF_TABLES_BRIDGE
@@ -29,6 +30,7 @@ endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
+ select NETFILTER_FAMILY_BRIDGE
help
ebtables is a general, extensible frame/packet identification
framework. Say 'Y' or 'M' here if you want to do Ethernet
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 37817d25b63d..02c4b409d317 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2445,7 +2445,6 @@ static int __init ebtables_init(void)
return ret;
}
- printk(KERN_INFO "Ebtables v2.0 registered\n");
return 0;
}
@@ -2453,7 +2452,6 @@ static void __exit ebtables_fini(void)
{
nf_unregister_sockopt(&ebt_sockopts);
xt_unregister_target(&ebt_standard_target);
- printk(KERN_INFO "Ebtables v2.0 unregistered\n");
}
EXPORT_SYMBOL(ebt_register_table);
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 97afdc0744e6..5160cf614176 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -25,63 +25,23 @@ nft_do_chain_bridge(void *priv,
{
struct nft_pktinfo pkt;
+ nft_set_pktinfo(&pkt, skb, state);
+
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
- nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+ nft_set_pktinfo_ipv4_validate(&pkt, skb);
break;
case htons(ETH_P_IPV6):
- nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+ nft_set_pktinfo_ipv6_validate(&pkt, skb);
break;
default:
- nft_set_pktinfo_unspec(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb);
break;
}
return nft_do_chain(&pkt, priv);
}
-static struct nft_af_info nft_af_bridge __read_mostly = {
- .family = NFPROTO_BRIDGE,
- .nhooks = NF_BR_NUMHOOKS,
- .owner = THIS_MODULE,
- .nops = 1,
- .hooks = {
- [NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
- [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
- [NF_BR_FORWARD] = nft_do_chain_bridge,
- [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
- [NF_BR_POST_ROUTING] = nft_do_chain_bridge,
- },
-};
-
-static int nf_tables_bridge_init_net(struct net *net)
-{
- net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.bridge == NULL)
- return -ENOMEM;
-
- memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
-
- if (nft_register_afinfo(net, net->nft.bridge) < 0)
- goto err;
-
- return 0;
-err:
- kfree(net->nft.bridge);
- return -ENOMEM;
-}
-
-static void nf_tables_bridge_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.bridge);
- kfree(net->nft.bridge);
-}
-
-static struct pernet_operations nf_tables_bridge_net_ops = {
- .init = nf_tables_bridge_init_net,
- .exit = nf_tables_bridge_exit_net,
-};
-
static const struct nf_chain_type filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
@@ -92,75 +52,23 @@ static const struct nf_chain_type filter_bridge = {
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT) |
(1 << NF_BR_POST_ROUTING),
-};
-
-static void nf_br_saveroute(const struct sk_buff *skb,
- struct nf_queue_entry *entry)
-{
-}
-
-static int nf_br_reroute(struct net *net, struct sk_buff *skb,
- const struct nf_queue_entry *entry)
-{
- return 0;
-}
-
-static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol)
-{
- return 0;
-}
-
-static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol)
-{
- return 0;
-}
-
-static int nf_br_route(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict __always_unused)
-{
- return 0;
-}
-
-static const struct nf_afinfo nf_br_afinfo = {
- .family = AF_BRIDGE,
- .checksum = nf_br_checksum,
- .checksum_partial = nf_br_checksum_partial,
- .route = nf_br_route,
- .saveroute = nf_br_saveroute,
- .reroute = nf_br_reroute,
- .route_key_size = 0,
+ .hooks = {
+ [NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
+ [NF_BR_LOCAL_IN] = nft_do_chain_bridge,
+ [NF_BR_FORWARD] = nft_do_chain_bridge,
+ [NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
+ [NF_BR_POST_ROUTING] = nft_do_chain_bridge,
+ },
};
static int __init nf_tables_bridge_init(void)
{
- int ret;
-
- nf_register_afinfo(&nf_br_afinfo);
- ret = nft_register_chain_type(&filter_bridge);
- if (ret < 0)
- goto err1;
-
- ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
- if (ret < 0)
- goto err2;
-
- return ret;
-
-err2:
- nft_unregister_chain_type(&filter_bridge);
-err1:
- nf_unregister_afinfo(&nf_br_afinfo);
- return ret;
+ return nft_register_chain_type(&filter_bridge);
}
static void __exit nf_tables_bridge_exit(void)
{
- unregister_pernet_subsys(&nf_tables_bridge_net_ops);
nft_unregister_chain_type(&filter_bridge);
- nf_unregister_afinfo(&nf_br_afinfo);
}
module_init(nf_tables_bridge_init);
@@ -168,4 +76,4 @@ module_exit(nf_tables_bridge_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
+MODULE_ALIAS_NFT_CHAIN(AF_BRIDGE, "filter");
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 2d38b6e34203..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
mutex_lock(&caifdevs->lock);
list_add_rcu(&caifd->list, &caifdevs->list);
- strncpy(caifd->layer.name, dev->name,
- sizeof(caifd->layer.name) - 1);
- caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ strlcpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name));
caifd->layer.transmit = transmit;
cfcnfg_add_phy_layer(cfg,
dev,
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 632d5a416d97..64048cec41e0 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -934,11 +934,11 @@ static int caif_release(struct socket *sock)
}
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static unsigned int caif_poll(struct file *file,
+static __poll_t caif_poll(struct file *file,
struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 5cd44f001f64..1a082a946045 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
dev_add_pack(&caif_usb_type);
pack_added = true;
- strncpy(layer->name, dev->name,
- sizeof(layer->name) - 1);
- layer->name[sizeof(layer->name) - 1] = 0;
+ strlcpy(layer->name, dev->name, sizeof(layer->name));
return 0;
}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 273cb07f57d8..8f00bea093b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
case CAIFPROTO_RFM:
l->linktype = CFCTRL_SRV_RFM;
l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
- strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
- sizeof(l->u.rfm.volume)-1);
- l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+ strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+ sizeof(l->u.rfm.volume));
break;
case CAIFPROTO_UTIL:
l->linktype = CFCTRL_SRV_UTIL;
l->endpoint = 0x00;
l->chtype = 0x00;
- strncpy(l->u.utility.name, s->sockaddr.u.util.service,
- sizeof(l->u.utility.name)-1);
- l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+ strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
+ sizeof(l->u.utility.name));
caif_assert(sizeof(l->u.utility.name) > 10);
l->u.utility.paramlen = s->param.size;
if (l->u.utility.paramlen > sizeof(l->u.utility.params))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..a1e85f032108 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
cfpkt_add_body(pkt, &tmp16, 2);
memset(utility_name, 0, sizeof(utility_name));
- strncpy(utility_name, param->u.utility.name,
- UTILITY_NAME_LENGTH - 1);
+ strlcpy(utility_name, param->u.utility.name,
+ UTILITY_NAME_LENGTH);
cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
tmp8 = param->u.utility.paramlen;
cfpkt_add_body(pkt, &tmp8, 1);
@@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 cmdrsp;
u8 cmd;
int ret = -1;
- u16 tmp16;
u8 len;
u8 param[255];
- u8 linkid;
+ u8 linkid = 0;
struct cfctrl *cfctrl = container_obj(layer);
struct cfctrl_request_info rsp, *req;
- cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmdrsp = cfpkt_extr_head_u8(pkt);
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
@@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 physlinkid;
u8 prio;
u8 tmp;
- u32 tmp32;
u8 *cp;
int i;
struct cfctrl_link_param linkparam;
memset(&linkparam, 0, sizeof(linkparam));
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
serv = tmp & CFCTRL_SRV_MASK;
linkparam.linktype = serv;
@@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
servtype = tmp >> 4;
linkparam.chtype = servtype;
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
physlinkid = tmp & 0x07;
prio = tmp >> 3;
linkparam.priority = prio;
linkparam.phyid = physlinkid;
- cfpkt_extr_head(pkt, &endpoint, 1);
+ endpoint = cfpkt_extr_head_u8(pkt);
linkparam.endpoint = endpoint & 0x03;
switch (serv) {
@@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_VIDEO:
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
linkparam.u.video.connid = tmp;
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_DATAGRAM:
- cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.datagram.connid =
- le32_to_cpu(tmp32);
+ cfpkt_extr_head_u32(pkt);
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_RFM:
/* Construct a frame, convert
* DatagramConnectionID
* to network format long and copy it out...
*/
- cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.rfm.connid =
- le32_to_cpu(tmp32);
+ cfpkt_extr_head_u32(pkt);
cp = (u8 *) linkparam.u.rfm.volume;
- for (cfpkt_extr_head(pkt, &tmp, 1);
+ for (tmp = cfpkt_extr_head_u8(pkt);
cfpkt_more(pkt) && tmp != '\0';
- cfpkt_extr_head(pkt, &tmp, 1))
+ tmp = cfpkt_extr_head_u8(pkt))
*cp++ = tmp;
*cp = '\0';
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_UTIL:
@@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
* to network format long and copy it out...
*/
/* Fifosize KB */
- cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_kb =
- le16_to_cpu(tmp16);
+ cfpkt_extr_head_u16(pkt);
/* Fifosize bufs */
- cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_bufs =
- le16_to_cpu(tmp16);
+ cfpkt_extr_head_u16(pkt);
/* name */
cp = (u8 *) linkparam.u.utility.name;
caif_assert(sizeof(linkparam.u.utility.name)
@@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
for (i = 0;
i < UTILITY_NAME_LENGTH
&& cfpkt_more(pkt); i++) {
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
/* Length */
- cfpkt_extr_head(pkt, &len, 1);
+ len = cfpkt_extr_head_u8(pkt);
linkparam.u.utility.paramlen = len;
/* Param Data */
cp = linkparam.u.utility.params;
while (cfpkt_more(pkt) && len--) {
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
/* Length */
- cfpkt_extr_head(pkt, &len, 1);
+ len = cfpkt_extr_head_u8(pkt);
/* Param Data */
cfpkt_extr_head(pkt, &param, len);
break;
@@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
}
break;
case CFCTRL_CMD_LINK_DESTROY:
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 71b6ab240dea..38c2b7a890dd 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -8,7 +8,6 @@
#include <linux/string.h>
#include <linux/skbuff.h>
-#include <linux/hardirq.h>
#include <linux/export.h>
#include <net/caif/cfpkt.h>
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 922ac1d605b3..53ecda10b790 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/fs.h>
-#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/net/can/Kconfig b/net/can/Kconfig
index a15c0e0d1fc7..a4399be54ff4 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -11,7 +11,7 @@ menuconfig CAN
1991, mainly for automotive, but now widely used in marine
(NMEA2000), industrial, and medical applications.
More information on the CAN network protocol family PF_CAN
- is contained in <Documentation/networking/can.txt>.
+ is contained in <Documentation/networking/can.rst>.
If you want CAN support you should say Y here and also to the
specific driver for your controller(s) below.
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 003b2d6d655f..6da324550eec 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -321,13 +321,13 @@ EXPORT_SYMBOL(can_send);
* af_can rx path
*/
-static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net,
+static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net,
struct net_device *dev)
{
if (!dev)
return net->can.can_rx_alldev_list;
else
- return (struct dev_rcv_lists *)dev->ml_priv;
+ return (struct can_dev_rcv_lists *)dev->ml_priv;
}
/**
@@ -381,7 +381,7 @@ static unsigned int effhash(canid_t can_id)
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
- struct dev_rcv_lists *d)
+ struct can_dev_rcv_lists *d)
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
@@ -464,7 +464,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
{
struct receiver *r;
struct hlist_head *rl;
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
struct s_pstats *can_pstats = net->can.can_pstats;
int err = 0;
@@ -542,7 +542,7 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
struct receiver *r = NULL;
struct hlist_head *rl;
struct s_pstats *can_pstats = net->can.can_pstats;
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
if (dev && dev->type != ARPHRD_CAN)
return;
@@ -615,7 +615,7 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
r->matches++;
}
-static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
+static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
{
struct receiver *r;
int matches = 0;
@@ -682,7 +682,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
static void can_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
struct net *net = dev_net(dev);
struct s_stats *can_stats = net->can.can_stats;
int matches;
@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
- skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN,
- "PF_CAN: dropped non conform CAN skbuf: "
- "dev type %d, len %d, datalen %d\n",
- dev->type, skb->len, cfd->len))
- goto drop;
+ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
can_receive(skb, dev);
return NET_RX_SUCCESS;
-
-drop:
- kfree_skb(skb);
- return NET_RX_DROP;
}
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
- skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN,
- "PF_CAN: dropped non conform CAN FD skbuf: "
- "dev type %d, len %d, datalen %d\n",
- dev->type, skb->len, cfd->len))
- goto drop;
+ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
can_receive(skb, dev);
return NET_RX_SUCCESS;
-
-drop:
- kfree_skb(skb);
- return NET_RX_DROP;
}
/*
@@ -829,7 +821,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
@@ -874,7 +866,7 @@ static int can_pernet_init(struct net *net)
{
spin_lock_init(&net->can.can_rcvlists_lock);
net->can.can_rx_alldev_list =
- kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
+ kzalloc(sizeof(struct can_dev_rcv_lists), GFP_KERNEL);
if (!net->can.can_rx_alldev_list)
goto out;
net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
@@ -920,7 +912,7 @@ static void can_pernet_exit(struct net *net)
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
- struct dev_rcv_lists *d = dev->ml_priv;
+ struct can_dev_rcv_lists *d = dev->ml_priv;
BUG_ON(d->entries);
kfree(d);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index eca6463c6213..9cb3719632bd 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -67,7 +67,7 @@ struct receiver {
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
/* per device receive filters linked at dev->ml_priv */
-struct dev_rcv_lists {
+struct can_dev_rcv_lists {
struct hlist_head rx[RX_MAX];
struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 13690334efa3..ac5e5e34fee3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -246,7 +246,6 @@ static int bcm_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations bcm_proc_fops = {
- .owner = THIS_MODULE,
.open = bcm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/can/gw.c b/net/can/gw.c
index 73a02af4b5d7..398dd0395ad9 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1014,6 +1014,8 @@ static struct pernet_operations cangw_pernet_ops = {
static __init int cgw_module_init(void)
{
+ int ret;
+
/* sanitize given module parameter */
max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
@@ -1031,15 +1033,19 @@ static __init int cgw_module_init(void)
notifier.notifier_call = cgw_notifier;
register_netdevice_notifier(&notifier);
- if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, 0)) {
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
+ NULL, cgw_dump_jobs, 0);
+ if (ret) {
unregister_netdevice_notifier(&notifier);
kmem_cache_destroy(cgw_cache);
return -ENOBUFS;
}
- /* Only the first call to __rtnl_register can fail */
- __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, 0);
- __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, 0);
+ /* Only the first call to rtnl_register_module can fail */
+ rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+ cgw_create_job, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+ cgw_remove_job, NULL, 0);
return 0;
}
diff --git a/net/can/proc.c b/net/can/proc.c
index 0c59f876fe6f..fdf704e9bb8c 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -276,7 +276,6 @@ static int can_stats_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_stats_proc_fops = {
- .owner = THIS_MODULE,
.open = can_stats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -310,7 +309,6 @@ static int can_reset_stats_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_reset_stats_proc_fops = {
- .owner = THIS_MODULE,
.open = can_reset_stats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -329,7 +327,6 @@ static int can_version_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_version_proc_fops = {
- .owner = THIS_MODULE,
.open = can_version_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -338,7 +335,7 @@ static const struct file_operations can_version_proc_fops = {
static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
struct net_device *dev,
- struct dev_rcv_lists *d)
+ struct can_dev_rcv_lists *d)
{
if (!hlist_empty(&d->rx[idx])) {
can_print_recv_banner(m);
@@ -353,7 +350,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
/* double cast to prevent GCC warning */
int idx = (int)(long)PDE_DATA(m->file->f_inode);
struct net_device *dev;
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
struct net *net = m->private;
seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
@@ -382,7 +379,6 @@ static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_rcvlist_proc_fops = {
- .owner = THIS_MODULE,
.open = can_rcvlist_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -417,7 +413,7 @@ static inline void can_rcvlist_proc_show_array(struct seq_file *m,
static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
struct net *net = m->private;
/* RX_SFF */
@@ -450,7 +446,6 @@ static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_rcvlist_sff_proc_fops = {
- .owner = THIS_MODULE,
.open = can_rcvlist_sff_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -461,7 +456,7 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
- struct dev_rcv_lists *d;
+ struct can_dev_rcv_lists *d;
struct net *net = m->private;
/* RX_EFF */
@@ -494,7 +489,6 @@ static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations can_rcvlist_eff_proc_fops = {
- .owner = THIS_MODULE,
.open = can_rcvlist_eff_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/can/raw.c b/net/can/raw.c
index 864c80dbdb72..f2ecc43376a1 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -401,6 +401,8 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (len < sizeof(*addr))
return -EINVAL;
+ if (addr->can_family != AF_CAN)
+ return -EINVAL;
lock_sock(sk);
diff --git a/net/core/Makefile b/net/core/Makefile
index 1fd0a9c88b1b..6dbbba8c57ae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o
+ fib_notifier.o xdp.o
obj-y += net-sysfs.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 522873ed120b..b7d9293940b5 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -72,12 +72,10 @@ static inline int connection_based(struct sock *sk)
static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
void *key)
{
- unsigned long bits = (unsigned long)key;
-
/*
* Avoid a wakeup if event not interesting for us
*/
- if (bits && !(bits & (POLLIN | POLLERR)))
+ if (key && !(key_to_poll(key) & (POLLIN | POLLERR)))
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
@@ -833,11 +831,11 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
* and you use a different write policy from sock_writeable()
* then please supply your own write_space callback.
*/
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 01ee854454a8..dda9d7b9a840 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- return dev_alloc_name_ns(net, dev, name);
+ BUG_ON(!net);
+
+ if (!dev_valid_name(name))
+ return -EINVAL;
+
+ if (strchr(name, '%'))
+ return dev_alloc_name_ns(net, dev, name);
+ else if (__dev_get_by_name(net, name))
+ return -EEXIST;
+ else if (dev->name != name)
+ strlcpy(dev->name, name, IFNAMSIZ);
+
+ return 0;
}
EXPORT_SYMBOL(dev_get_valid_name);
@@ -1542,6 +1554,23 @@ void dev_disable_lro(struct net_device *dev)
}
EXPORT_SYMBOL(dev_disable_lro);
+/**
+ * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
+ * @dev: device
+ *
+ * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
+ * called under RTNL. This is needed if Generic XDP is installed on
+ * the device.
+ */
+static void dev_disable_gro_hw(struct net_device *dev)
+{
+ dev->wanted_features &= ~NETIF_F_GRO_HW;
+ netdev_update_features(dev);
+
+ if (unlikely(dev->features & NETIF_F_GRO_HW))
+ netdev_WARN(dev, "failed to disable GRO_HW!\n");
+}
+
static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
struct net_device *dev)
{
@@ -1665,7 +1694,6 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
/**
* call_netdevice_notifiers_info - call all network notifier blocks
* @val: value passed unmodified to notifier function
- * @dev: net_device pointer passed unmodified to notifier function
* @info: notifier information data
*
* Call all network notifier blocks. Parameters and return value
@@ -2803,7 +2831,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
segs = skb_mac_gso_segment(skb, features);
- if (unlikely(skb_needs_check(skb, tx_path)))
+ if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
skb_warn_bad_offload(skb);
return segs;
@@ -3042,7 +3070,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{
netdev_features_t features;
@@ -3066,9 +3094,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb))
goto out_kfree_skb;
- if (validate_xmit_xfrm(skb, features))
- goto out_kfree_skb;
-
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
@@ -3085,6 +3110,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
}
}
+ skb = validate_xmit_xfrm(skb, features, again);
+
return skb;
out_kfree_skb:
@@ -3094,7 +3121,7 @@ out_null:
return NULL;
}
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{
struct sk_buff *next, *head = NULL, *tail;
@@ -3105,7 +3132,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
/* in case skb wont be segmented, point to itself */
skb->prev = skb;
- skb = validate_xmit_skb(skb, dev);
+ skb = validate_xmit_skb(skb, dev, again);
if (!skb)
continue;
@@ -3139,10 +3166,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
/* + transport layer */
- if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
- hdr_len += tcp_hdrlen(skb);
- else
- hdr_len += sizeof(struct udphdr);
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+ const struct tcphdr *th;
+ struct tcphdr _tcphdr;
+
+ th = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_tcphdr), &_tcphdr);
+ if (likely(th))
+ hdr_len += __tcp_hdrlen(th);
+ } else {
+ struct udphdr _udphdr;
+
+ if (skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_udphdr), &_udphdr))
+ hdr_len += sizeof(struct udphdr);
+ }
if (shinfo->gso_type & SKB_GSO_DODGY)
gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
@@ -3162,6 +3200,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
int rc;
qdisc_calculate_pkt_len(skb, q);
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ __qdisc_drop(skb, &to_free);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ __qdisc_run(q);
+ }
+
+ if (unlikely(to_free))
+ kfree_skb_list(to_free);
+ return rc;
+ }
+
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
@@ -3192,9 +3245,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
contended = false;
}
__qdisc_run(q);
- } else
- qdisc_run_end(q);
+ }
+ qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
@@ -3204,6 +3257,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
contended = false;
}
__qdisc_run(q);
+ qdisc_run_end(q);
}
}
spin_unlock(root_lock);
@@ -3376,8 +3430,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
else
queue_index = __netdev_pick_tx(dev, skb);
- if (!accel_priv)
- queue_index = netdev_cap_txqueue(dev, queue_index);
+ queue_index = netdev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
@@ -3416,6 +3469,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
+ bool again = false;
skb_reset_mac_header(skb);
@@ -3477,7 +3531,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
XMIT_RECURSION_LIMIT))
goto recursion_alert;
- skb = validate_xmit_skb(skb, dev);
+ skb = validate_xmit_skb(skb, dev, &again);
if (!skb)
goto out;
@@ -3873,9 +3927,33 @@ drop:
return NET_RX_DROP;
}
+static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct netdev_rx_queue *rxqueue;
+
+ rxqueue = dev->_rx;
+
+ if (skb_rx_queue_recorded(skb)) {
+ u16 index = skb_get_rx_queue(skb);
+
+ if (unlikely(index >= dev->real_num_rx_queues)) {
+ WARN_ONCE(dev->real_num_rx_queues > 1,
+ "%s received packet on queue %u, but number "
+ "of RX queues is %u\n",
+ dev->name, index, dev->real_num_rx_queues);
+
+ return rxqueue; /* Return first rxqueue */
+ }
+ rxqueue += index;
+ }
+ return rxqueue;
+}
+
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{
+ struct netdev_rx_queue *rxqueue;
u32 metalen, act = XDP_DROP;
struct xdp_buff xdp;
void *orig_data;
@@ -3919,6 +3997,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
xdp.data_hard_start = skb->data - skb_headroom(skb);
orig_data = xdp.data;
+ rxqueue = netif_get_rxqueue(skb);
+ xdp.rxq = &rxqueue->xdp_rxq;
+
act = bpf_prog_run_xdp(xdp_prog, &xdp);
off = xdp.data - orig_data;
@@ -4143,21 +4224,26 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
while (head) {
struct Qdisc *q = head;
- spinlock_t *root_lock;
+ spinlock_t *root_lock = NULL;
head = head->next_sched;
- root_lock = qdisc_lock(q);
- spin_lock(root_lock);
+ if (!(q->flags & TCQ_F_NOLOCK)) {
+ root_lock = qdisc_lock(q);
+ spin_lock(root_lock);
+ }
/* We need to make sure head->next_sched is read
* before clearing __QDISC_STATE_SCHED
*/
smp_mb__before_atomic();
clear_bit(__QDISC_STATE_SCHED, &q->state);
qdisc_run(q);
- spin_unlock(root_lock);
+ if (root_lock)
+ spin_unlock(root_lock);
}
}
+
+ xfrm_dev_backlog(sd);
}
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -4545,6 +4631,7 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
} else if (new && !old) {
static_key_slow_inc(&generic_xdp_needed);
dev_disable_lro(dev);
+ dev_disable_gro_hw(dev);
}
break;
@@ -6348,6 +6435,7 @@ rollback:
* netdev_upper_dev_link - Add a link to the upper device
* @dev: device
* @upper_dev: new upper device
+ * @extack: netlink extended ack
*
* Adds a link to device which is upper to this one. The caller must hold
* the RTNL lock. On a failure a negative errno code is returned.
@@ -6369,6 +6457,7 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
* @upper_dev: new upper device
* @upper_priv: upper device private
* @upper_info: upper info to be passed down via notifier
+ * @extack: netlink extended ack
*
* Adds a link to device which is upper to this one. In this case, only
* one master upper device can be linked, although other non-master devices
@@ -6959,6 +7048,35 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
EXPORT_SYMBOL(dev_set_mtu);
/**
+ * dev_change_tx_queue_len - Change TX queue length of a netdevice
+ * @dev: device
+ * @new_len: new tx queue length
+ */
+int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
+{
+ unsigned int orig_len = dev->tx_queue_len;
+ int res;
+
+ if (new_len != (unsigned int)new_len)
+ return -ERANGE;
+
+ if (new_len != orig_len) {
+ dev->tx_queue_len = new_len;
+ res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
+ res = notifier_to_errno(res);
+ if (res) {
+ netdev_err(dev,
+ "refused to change device tx_queue_len\n");
+ dev->tx_queue_len = orig_len;
+ return res;
+ }
+ return dev_qdisc_change_tx_queue_len(dev);
+ }
+
+ return 0;
+}
+
+/**
* dev_set_group - Change group this device belongs to
* @dev: device
* @new_group: group this device should belong to
@@ -7073,17 +7191,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
}
EXPORT_SYMBOL(dev_change_proto_down);
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
+void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+ struct netdev_bpf *xdp)
{
- struct netdev_bpf xdp;
-
- memset(&xdp, 0, sizeof(xdp));
- xdp.command = XDP_QUERY_PROG;
+ memset(xdp, 0, sizeof(*xdp));
+ xdp->command = XDP_QUERY_PROG;
/* Query must always succeed. */
- WARN_ON(bpf_op(dev, &xdp) < 0);
- if (prog_id)
- *prog_id = xdp.prog_id;
+ WARN_ON(bpf_op(dev, xdp) < 0);
+}
+
+static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
+{
+ struct netdev_bpf xdp;
+
+ __dev_xdp_query(dev, bpf_op, &xdp);
return xdp.prog_attached;
}
@@ -7106,6 +7228,27 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
return bpf_op(dev, &xdp);
}
+static void dev_xdp_uninstall(struct net_device *dev)
+{
+ struct netdev_bpf xdp;
+ bpf_op_t ndo_bpf;
+
+ /* Remove generic XDP */
+ WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
+
+ /* Remove from the driver */
+ ndo_bpf = dev->netdev_ops->ndo_bpf;
+ if (!ndo_bpf)
+ return;
+
+ __dev_xdp_query(dev, ndo_bpf, &xdp);
+ if (xdp.prog_attached == XDP_ATTACHED_NONE)
+ return;
+
+ /* Program removal should always succeed */
+ WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+}
+
/**
* dev_change_xdp_fd - set or clear a bpf program for a device rx path
* @dev: device
@@ -7134,10 +7277,10 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
bpf_chk = generic_xdp_install;
if (fd >= 0) {
- if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
+ if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
return -EEXIST;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- __dev_xdp_attached(dev, bpf_op, NULL))
+ __dev_xdp_attached(dev, bpf_op))
return -EBUSY;
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@ -7236,6 +7379,7 @@ static void rollback_registered_many(struct list_head *head)
/* Shutdown queueing discipline. */
dev_shutdown(dev);
+ dev_xdp_uninstall(dev);
/* Notify protocols, that we are about to destroy
* this device. They should clean all the things.
@@ -7245,7 +7389,7 @@ static void rollback_registered_many(struct list_head *head)
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
- GFP_KERNEL, NULL);
+ GFP_KERNEL, NULL, 0);
/*
* Flush the unicast and multicast chains
@@ -7379,6 +7523,18 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~dev->gso_partial_features;
}
+ if (!(features & NETIF_F_RXCSUM)) {
+ /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
+ * successfully merged by hardware must also have the
+ * checksum verified by hardware. If the user does not
+ * want to enable RXCSUM, logically, we should disable GRO_HW.
+ */
+ if (features & NETIF_F_GRO_HW) {
+ netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
return features;
}
@@ -7512,12 +7668,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
-#ifdef CONFIG_SYSFS
static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
size_t sz = count * sizeof(*rx);
+ int err = 0;
BUG_ON(count < 1);
@@ -7527,11 +7683,38 @@ static int netif_alloc_rx_queues(struct net_device *dev)
dev->_rx = rx;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
rx[i].dev = dev;
+
+ /* XDP RX-queue setup */
+ err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+ if (err < 0)
+ goto err_rxq_info;
+ }
return 0;
+
+err_rxq_info:
+ /* Rollback successful reg's and free other resources */
+ while (i--)
+ xdp_rxq_info_unreg(&rx[i].xdp_rxq);
+ kvfree(dev->_rx);
+ dev->_rx = NULL;
+ return err;
+}
+
+static void netif_free_rx_queues(struct net_device *dev)
+{
+ unsigned int i, count = dev->num_rx_queues;
+
+ /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
+ if (!dev->_rx)
+ return;
+
+ for (i = 0; i < count; i++)
+ xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
+
+ kvfree(dev->_rx);
}
-#endif
static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue, void *_unused)
@@ -8092,12 +8275,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
return NULL;
}
-#ifdef CONFIG_SYSFS
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
return NULL;
}
-#endif
alloc_size = sizeof(struct net_device);
if (sizeof_priv) {
@@ -8154,12 +8335,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
if (netif_alloc_netdev_queues(dev))
goto free_all;
-#ifdef CONFIG_SYSFS
dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
-#endif
strcpy(dev->name, name);
dev->name_assign_type = name_assign_type;
@@ -8195,13 +8374,10 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
void free_netdev(struct net_device *dev)
{
struct napi_struct *p, *n;
- struct bpf_prog *prog;
might_sleep();
netif_free_tx_queues(dev);
-#ifdef CONFIG_SYSFS
- kvfree(dev->_rx);
-#endif
+ netif_free_rx_queues(dev);
kfree(rcu_dereference_protected(dev->ingress_queue, 1));
@@ -8214,12 +8390,6 @@ void free_netdev(struct net_device *dev)
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;
- prog = rcu_dereference_protected(dev->xdp_prog, 1);
- if (prog) {
- bpf_prog_put(prog);
- static_key_slow_dec(&generic_xdp_needed);
- }
-
/* Compatibility with error handling in drivers */
if (dev->reg_state == NETREG_UNINITIALIZED) {
netdev_freemem(dev);
@@ -8332,7 +8502,7 @@ EXPORT_SYMBOL(unregister_netdev);
int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
- int err, new_nsid;
+ int err, new_nsid, new_ifindex;
ASSERT_RTNL();
@@ -8388,11 +8558,16 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
- if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net)
- new_nsid = peernet2id_alloc(dev_net(dev), net);
+
+ new_nsid = peernet2id_alloc(dev_net(dev), net);
+ /* If there is an ifindex conflict assign a new one */
+ if (__dev_get_by_index(net, dev->ifindex))
+ new_ifindex = dev_new_index(net);
else
- new_nsid = peernet2id(dev_net(dev), net);
- rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid);
+ new_ifindex = dev->ifindex;
+
+ rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
+ new_ifindex);
/*
* Flush the unicast and multicast chains
@@ -8406,10 +8581,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Actually switch the network namespace */
dev_net_set(dev, net);
-
- /* If there is an ifindex conflict assign a new one */
- if (__dev_get_by_index(net, dev->ifindex))
- dev->ifindex = dev_new_index(net);
+ dev->ifindex = new_ifindex;
/* Send a netdev-add uevent to the new namespace */
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
@@ -8807,6 +8979,9 @@ static int __init net_dev_init(void)
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+ skb_queue_head_init(&sd->xfrm_backlog);
+#endif
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 7e690d0ccd05..0ab1af04296c 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -18,26 +18,10 @@
* match. --pb
*/
-static int dev_ifname(struct net *net, struct ifreq __user *arg)
+static int dev_ifname(struct net *net, struct ifreq *ifr)
{
- struct ifreq ifr;
- int error;
-
- /*
- * Fetch the caller's info block.
- */
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- return -EFAULT;
- ifr.ifr_name[IFNAMSIZ-1] = 0;
-
- error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
- if (error)
- return error;
-
- if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
- return -EFAULT;
- return 0;
+ ifr->ifr_name[IFNAMSIZ-1] = 0;
+ return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex);
}
static gifconf_func_t *gifconf_list[NPROTO];
@@ -66,9 +50,8 @@ EXPORT_SYMBOL(register_gifconf);
* Thus we will need a 'compatibility mode'.
*/
-static int dev_ifconf(struct net *net, char __user *arg)
+int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
{
- struct ifconf ifc;
struct net_device *dev;
char __user *pos;
int len;
@@ -79,11 +62,8 @@ static int dev_ifconf(struct net *net, char __user *arg)
* Fetch the caller's info block.
*/
- if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
- return -EFAULT;
-
- pos = ifc.ifc_buf;
- len = ifc.ifc_len;
+ pos = ifc->ifc_buf;
+ len = ifc->ifc_len;
/*
* Loop over the interfaces, and write an info block for each.
@@ -95,10 +75,10 @@ static int dev_ifconf(struct net *net, char __user *arg)
if (gifconf_list[i]) {
int done;
if (!pos)
- done = gifconf_list[i](dev, NULL, 0);
+ done = gifconf_list[i](dev, NULL, 0, size);
else
done = gifconf_list[i](dev, pos + total,
- len - total);
+ len - total, size);
if (done < 0)
return -EFAULT;
total += done;
@@ -109,12 +89,12 @@ static int dev_ifconf(struct net *net, char __user *arg)
/*
* All done. Write the updated control block back to the caller.
*/
- ifc.ifc_len = total;
+ ifc->ifc_len = total;
/*
* Both BSD and Solaris return 0 here, so we do too.
*/
- return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+ return 0;
}
/*
@@ -406,53 +386,24 @@ EXPORT_SYMBOL(dev_load);
* positive or a negative errno code on error.
*/
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_copyout)
{
- struct ifreq ifr;
int ret;
char *colon;
- /* One special case: SIOCGIFCONF takes ifconf argument
- and requires shared lock, because it sleeps writing
- to user space.
- */
-
- if (cmd == SIOCGIFCONF) {
- rtnl_lock();
- ret = dev_ifconf(net, (char __user *) arg);
- rtnl_unlock();
- return ret;
- }
+ if (need_copyout)
+ *need_copyout = true;
if (cmd == SIOCGIFNAME)
- return dev_ifname(net, (struct ifreq __user *)arg);
-
- /*
- * Take care of Wireless Extensions. Unfortunately struct iwreq
- * isn't a proper subset of struct ifreq (it's 8 byte shorter)
- * so we need to treat it specially, otherwise applications may
- * fault if the struct they're passing happens to land at the
- * end of a mapped page.
- */
- if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
- struct iwreq iwr;
-
- if (copy_from_user(&iwr, arg, sizeof(iwr)))
- return -EFAULT;
-
- iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
+ return dev_ifname(net, ifr);
- return wext_handle_ioctl(net, &iwr, cmd, arg);
- }
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- return -EFAULT;
-
- ifr.ifr_name[IFNAMSIZ-1] = 0;
+ ifr->ifr_name[IFNAMSIZ-1] = 0;
- colon = strchr(ifr.ifr_name, ':');
+ colon = strchr(ifr->ifr_name, ':');
if (colon)
*colon = 0;
+ dev_load(net, ifr->ifr_name);
+
/*
* See which interface the caller is talking about.
*/
@@ -472,31 +423,19 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCGIFMAP:
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
- dev_load(net, ifr.ifr_name);
rcu_read_lock();
- ret = dev_ifsioc_locked(net, &ifr, cmd);
+ ret = dev_ifsioc_locked(net, ifr, cmd);
rcu_read_unlock();
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
+ if (colon)
+ *colon = ':';
return ret;
case SIOCETHTOOL:
- dev_load(net, ifr.ifr_name);
rtnl_lock();
- ret = dev_ethtool(net, &ifr);
+ ret = dev_ethtool(net, ifr);
rtnl_unlock();
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
+ if (colon)
+ *colon = ':';
return ret;
/*
@@ -510,17 +449,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSIFNAME:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- dev_load(net, ifr.ifr_name);
rtnl_lock();
- ret = dev_ifsioc(net, &ifr, cmd);
+ ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
+ if (colon)
+ *colon = ':';
return ret;
/*
@@ -561,10 +494,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
- dev_load(net, ifr.ifr_name);
rtnl_lock();
- ret = dev_ifsioc(net, &ifr, cmd);
+ ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
+ if (need_copyout)
+ *need_copyout = false;
return ret;
case SIOCGIFMEM:
@@ -584,13 +518,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
cmd == SIOCGHWTSTAMP ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
- dev_load(net, ifr.ifr_name);
rtnl_lock();
- ret = dev_ifsioc(net, &ifr, cmd);
+ ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
- if (!ret && copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
return ret;
}
return -ENOTTY;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 7d430c1d9c3e..18d385ed8237 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -92,12 +92,6 @@ static LIST_HEAD(devlink_list);
*/
static DEFINE_MUTEX(devlink_mutex);
-/* devlink_port_mutex
- *
- * Shared lock to guard lists of ports in all devlink devices.
- */
-static DEFINE_MUTEX(devlink_port_mutex);
-
static struct net *devlink_net(const struct devlink *devlink)
{
return read_pnet(&devlink->_net);
@@ -335,15 +329,18 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0)
#define DEVLINK_NL_FLAG_NEED_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_SB BIT(2)
-#define DEVLINK_NL_FLAG_LOCK_PORTS BIT(3)
- /* port is not needed but we need to ensure they don't
- * change in the middle of command
- */
+
+/* The per devlink instance lock is taken by default in the pre-doit
+ * operation, yet several commands do not require this. The global
+ * devlink lock is taken and protects from disruption by user-calls.
+ */
+#define DEVLINK_NL_FLAG_NO_LOCK BIT(3)
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink;
+ int err;
mutex_lock(&devlink_mutex);
devlink = devlink_get_from_info(info);
@@ -351,44 +348,47 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink);
}
+ if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+ mutex_lock(&devlink->lock);
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
info->user_ptr[0] = devlink;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
struct devlink_port *devlink_port;
- mutex_lock(&devlink_port_mutex);
devlink_port = devlink_port_get_from_info(devlink, info);
if (IS_ERR(devlink_port)) {
- mutex_unlock(&devlink_port_mutex);
- mutex_unlock(&devlink_mutex);
- return PTR_ERR(devlink_port);
+ err = PTR_ERR(devlink_port);
+ goto unlock;
}
info->user_ptr[0] = devlink_port;
}
- if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
- mutex_lock(&devlink_port_mutex);
- }
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
struct devlink_sb *devlink_sb;
devlink_sb = devlink_sb_get_from_info(devlink, info);
if (IS_ERR(devlink_sb)) {
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
- mutex_unlock(&devlink_port_mutex);
- mutex_unlock(&devlink_mutex);
- return PTR_ERR(devlink_sb);
+ err = PTR_ERR(devlink_sb);
+ goto unlock;
}
info->user_ptr[1] = devlink_sb;
}
return 0;
+
+unlock:
+ if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+ mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink_mutex);
+ return err;
}
static void devlink_nl_post_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
- ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
- mutex_unlock(&devlink_port_mutex);
+ struct devlink *devlink;
+
+ devlink = devlink_get_from_info(info);
+ if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+ mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
}
@@ -614,10 +614,10 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
- mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
if (idx < start) {
idx++;
@@ -628,13 +628,15 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
- if (err)
+ if (err) {
+ mutex_unlock(&devlink->lock);
goto out;
+ }
idx++;
}
+ mutex_unlock(&devlink->lock);
}
out:
- mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@@ -801,6 +803,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
if (idx < start) {
idx++;
@@ -811,10 +814,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
- if (err)
+ if (err) {
+ mutex_unlock(&devlink->lock);
goto out;
+ }
idx++;
}
+ mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@@ -935,14 +941,18 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_pool_get)
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
- if (err && err != -EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP) {
+ mutex_unlock(&devlink->lock);
goto out;
+ }
}
+ mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@@ -1123,22 +1133,24 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
- mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_port_pool_get)
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_port_pool_get_dumpit(msg, start, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
- if (err && err != -EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP) {
+ mutex_unlock(&devlink->lock);
goto out;
+ }
}
+ mutex_unlock(&devlink->lock);
}
out:
- mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@@ -1347,23 +1359,26 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
- mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
continue;
+
+ mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
- if (err && err != -EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP) {
+ mutex_unlock(&devlink->lock);
goto out;
+ }
}
+ mutex_unlock(&devlink->lock);
}
out:
- mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@@ -1679,6 +1694,12 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
table->counters_enabled))
goto nla_put_failure;
+ if (table->resource_valid) {
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+ table->resource_id, DEVLINK_ATTR_PAD);
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+ table->resource_units, DEVLINK_ATTR_PAD);
+ }
if (devlink_dpipe_matches_put(table, skb))
goto nla_put_failure;
@@ -2273,6 +2294,273 @@ static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
counters_enable);
}
+static struct devlink_resource *
+devlink_resource_find(struct devlink *devlink,
+ struct devlink_resource *resource, u64 resource_id)
+{
+ struct list_head *resource_list;
+
+ if (resource)
+ resource_list = &resource->resource_list;
+ else
+ resource_list = &devlink->resource_list;
+
+ list_for_each_entry(resource, resource_list, list) {
+ struct devlink_resource *child_resource;
+
+ if (resource->id == resource_id)
+ return resource;
+
+ child_resource = devlink_resource_find(devlink, resource,
+ resource_id);
+ if (child_resource)
+ return child_resource;
+ }
+ return NULL;
+}
+
+static void
+devlink_resource_validate_children(struct devlink_resource *resource)
+{
+ struct devlink_resource *child_resource;
+ bool size_valid = true;
+ u64 parts_size = 0;
+
+ if (list_empty(&resource->resource_list))
+ goto out;
+
+ list_for_each_entry(child_resource, &resource->resource_list, list)
+ parts_size += child_resource->size_new;
+
+ if (parts_size > resource->size)
+ size_valid = false;
+out:
+ resource->size_valid = size_valid;
+}
+
+static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_resource *resource;
+ u64 resource_id;
+ u64 size;
+ int err;
+
+ if (!info->attrs[DEVLINK_ATTR_RESOURCE_ID] ||
+ !info->attrs[DEVLINK_ATTR_RESOURCE_SIZE])
+ return -EINVAL;
+ resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (!resource)
+ return -EINVAL;
+
+ if (!resource->resource_ops->size_validate)
+ return -EINVAL;
+
+ size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
+ err = resource->resource_ops->size_validate(devlink, size,
+ info->extack);
+ if (err)
+ return err;
+
+ resource->size_new = size;
+ devlink_resource_validate_children(resource);
+ if (resource->parent)
+ devlink_resource_validate_children(resource->parent);
+ return 0;
+}
+
+static void
+devlink_resource_size_params_put(struct devlink_resource *resource,
+ struct sk_buff *skb)
+{
+ struct devlink_resource_size_params *size_params;
+
+ size_params = resource->size_params;
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+ size_params->size_granularity, DEVLINK_ATTR_PAD);
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+ size_params->size_max, DEVLINK_ATTR_PAD);
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+ size_params->size_min, DEVLINK_ATTR_PAD);
+ nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+}
+
+static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
+ struct devlink_resource *resource)
+{
+ struct devlink_resource *child_resource;
+ struct nlattr *child_resource_attr;
+ struct nlattr *resource_attr;
+
+ resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE);
+ if (!resource_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
+ DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (resource->size != resource->size_new)
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+ resource->size_new, DEVLINK_ATTR_PAD);
+ if (resource->resource_ops && resource->resource_ops->occ_get)
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ resource->resource_ops->occ_get(devlink),
+ DEVLINK_ATTR_PAD);
+ devlink_resource_size_params_put(resource, skb);
+ if (list_empty(&resource->resource_list))
+ goto out;
+
+ if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
+ resource->size_valid))
+ goto nla_put_failure;
+
+ child_resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+ if (!child_resource_attr)
+ goto nla_put_failure;
+
+ list_for_each_entry(child_resource, &resource->resource_list, list) {
+ if (devlink_resource_put(devlink, skb, child_resource))
+ goto resource_put_failure;
+ }
+
+ nla_nest_end(skb, child_resource_attr);
+out:
+ nla_nest_end(skb, resource_attr);
+ return 0;
+
+resource_put_failure:
+ nla_nest_cancel(skb, child_resource_attr);
+nla_put_failure:
+ nla_nest_cancel(skb, resource_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_resource_fill(struct genl_info *info,
+ enum devlink_command cmd, int flags)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_resource *resource;
+ struct nlattr *resources_attr;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ bool incomplete;
+ void *hdr;
+ int i;
+ int err;
+
+ resource = list_first_entry(&devlink->resource_list,
+ struct devlink_resource, list);
+start_again:
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, NLM_F_MULTI, cmd);
+ if (!hdr) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ if (devlink_nl_put_handle(skb, devlink))
+ goto nla_put_failure;
+
+ resources_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+ if (!resources_attr)
+ goto nla_put_failure;
+
+ incomplete = false;
+ i = 0;
+ list_for_each_entry_from(resource, &devlink->resource_list, list) {
+ err = devlink_resource_put(devlink, skb, resource);
+ if (err) {
+ if (!i)
+ goto err_resource_put;
+ incomplete = true;
+ break;
+ }
+ i++;
+ }
+ nla_nest_end(skb, resources_attr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+send_done:
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ goto err_skb_send_alloc;
+ goto send_done;
+ }
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+err_resource_put:
+err_skb_send_alloc:
+ genlmsg_cancel(skb, hdr);
+ nlmsg_free(skb);
+ return err;
+}
+
+static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (list_empty(&devlink->resource_list))
+ return -EOPNOTSUPP;
+
+ return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
+}
+
+static int
+devlink_resources_validate(struct devlink *devlink,
+ struct devlink_resource *resource,
+ struct genl_info *info)
+{
+ struct list_head *resource_list;
+ int err = 0;
+
+ if (resource)
+ resource_list = &resource->resource_list;
+ else
+ resource_list = &devlink->resource_list;
+
+ list_for_each_entry(resource, resource_list, list) {
+ if (!resource->size_valid)
+ return -EINVAL;
+ err = devlink_resources_validate(devlink, resource, info);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
+static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ int err;
+
+ if (!devlink->ops->reload)
+ return -EOPNOTSUPP;
+
+ err = devlink_resources_validate(devlink, NULL, info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
+ return err;
+ }
+ return devlink->ops->reload(devlink);
+}
+
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@@ -2291,6 +2579,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+ [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -2322,14 +2612,16 @@ static const struct genl_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_port_split_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_SB_GET,
@@ -2397,8 +2689,7 @@ static const struct genl_ops devlink_nl_ops[] = {
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
- DEVLINK_NL_FLAG_NEED_SB |
- DEVLINK_NL_FLAG_LOCK_PORTS,
+ DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
@@ -2406,8 +2697,7 @@ static const struct genl_ops devlink_nl_ops[] = {
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
- DEVLINK_NL_FLAG_NEED_SB |
- DEVLINK_NL_FLAG_LOCK_PORTS,
+ DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_ESWITCH_GET,
@@ -2451,6 +2741,28 @@ static const struct genl_ops devlink_nl_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_SET,
+ .doit = devlink_nl_cmd_resource_set,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_DUMP,
+ .doit = devlink_nl_cmd_resource_dump,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_RELOAD,
+ .doit = devlink_nl_cmd_reload,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -2488,6 +2800,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
+ INIT_LIST_HEAD(&devlink->resource_list);
+ mutex_init(&devlink->lock);
return devlink;
}
EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -2550,16 +2864,16 @@ int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index)
{
- mutex_lock(&devlink_port_mutex);
+ mutex_lock(&devlink->lock);
if (devlink_port_index_exists(devlink, port_index)) {
- mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink->lock);
return -EEXIST;
}
devlink_port->devlink = devlink;
devlink_port->index = port_index;
devlink_port->registered = true;
list_add_tail(&devlink_port->list, &devlink->port_list);
- mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink->lock);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
}
@@ -2572,10 +2886,12 @@ EXPORT_SYMBOL_GPL(devlink_port_register);
*/
void devlink_port_unregister(struct devlink_port *devlink_port)
{
+ struct devlink *devlink = devlink_port->devlink;
+
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
- mutex_lock(&devlink_port_mutex);
+ mutex_lock(&devlink->lock);
list_del(&devlink_port->list);
- mutex_unlock(&devlink_port_mutex);
+ mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
@@ -2651,7 +2967,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
struct devlink_sb *devlink_sb;
int err = 0;
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
if (devlink_sb_index_exists(devlink, sb_index)) {
err = -EEXIST;
goto unlock;
@@ -2670,7 +2986,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
devlink_sb->egress_tc_count = egress_tc_count;
list_add_tail(&devlink_sb->list, &devlink->sb_list);
unlock:
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_sb_register);
@@ -2679,11 +2995,11 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
{
struct devlink_sb *devlink_sb;
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
WARN_ON(!devlink_sb);
list_del(&devlink_sb->list);
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
kfree(devlink_sb);
}
EXPORT_SYMBOL_GPL(devlink_sb_unregister);
@@ -2699,9 +3015,9 @@ EXPORT_SYMBOL_GPL(devlink_sb_unregister);
int devlink_dpipe_headers_register(struct devlink *devlink,
struct devlink_dpipe_headers *dpipe_headers)
{
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
devlink->dpipe_headers = dpipe_headers;
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
return 0;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
@@ -2715,9 +3031,9 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
*/
void devlink_dpipe_headers_unregister(struct devlink *devlink)
{
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
devlink->dpipe_headers = NULL;
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
@@ -2783,9 +3099,9 @@ int devlink_dpipe_table_register(struct devlink *devlink,
table->priv = priv;
table->counter_control_extern = counter_control_extern;
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
return 0;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@@ -2801,20 +3117,182 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
{
struct devlink_dpipe_table *table;
- mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
if (!table)
goto unlock;
list_del_rcu(&table->list);
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
kfree_rcu(table, rcu);
return;
unlock:
- mutex_unlock(&devlink_mutex);
+ mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
+/**
+ * devlink_resource_register - devlink resource register
+ *
+ * @devlink: devlink
+ * @resource_name: resource's name
+ * @top_hierarchy: top hierarchy
+ * @reload_required: reload is required for new configuration to
+ * apply
+ * @resource_size: resource's size
+ * @resource_id: resource's id
+ * @parent_reosurce_id: resource's parent id
+ * @size params: size parameters
+ * @resource_ops: resource ops
+ */
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ bool top_hierarchy,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ struct devlink_resource_size_params *size_params,
+ const struct devlink_resource_ops *resource_ops)
+{
+ struct devlink_resource *resource;
+ struct list_head *resource_list;
+ int err = 0;
+
+ mutex_lock(&devlink->lock);
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (resource) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (top_hierarchy) {
+ resource_list = &devlink->resource_list;
+ } else {
+ struct devlink_resource *parent_resource;
+
+ parent_resource = devlink_resource_find(devlink, NULL,
+ parent_resource_id);
+ if (parent_resource) {
+ resource_list = &parent_resource->resource_list;
+ resource->parent = parent_resource;
+ } else {
+ kfree(resource);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ resource->name = resource_name;
+ resource->size = resource_size;
+ resource->size_new = resource_size;
+ resource->id = resource_id;
+ resource->resource_ops = resource_ops;
+ resource->size_valid = true;
+ resource->size_params = size_params;
+ INIT_LIST_HEAD(&resource->resource_list);
+ list_add_tail(&resource->list, resource_list);
+out:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_register);
+
+/**
+ * devlink_resources_unregister - free all resources
+ *
+ * @devlink: devlink
+ * @resource: resource
+ */
+void devlink_resources_unregister(struct devlink *devlink,
+ struct devlink_resource *resource)
+{
+ struct devlink_resource *tmp, *child_resource;
+ struct list_head *resource_list;
+
+ if (resource)
+ resource_list = &resource->resource_list;
+ else
+ resource_list = &devlink->resource_list;
+
+ if (!resource)
+ mutex_lock(&devlink->lock);
+
+ list_for_each_entry_safe(child_resource, tmp, resource_list, list) {
+ devlink_resources_unregister(devlink, child_resource);
+ list_del(&child_resource->list);
+ kfree(child_resource);
+ }
+
+ if (!resource)
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_resources_unregister);
+
+/**
+ * devlink_resource_size_get - get and update size
+ *
+ * @devlink: devlink
+ * @resource_id: the requested resource id
+ * @p_resource_size: ptr to update
+ */
+int devlink_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size)
+{
+ struct devlink_resource *resource;
+ int err = 0;
+
+ mutex_lock(&devlink->lock);
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (!resource) {
+ err = -EINVAL;
+ goto out;
+ }
+ *p_resource_size = resource->size_new;
+ resource->size = resource->size_new;
+out:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_size_get);
+
+/**
+ * devlink_dpipe_table_resource_set - set the resource id
+ *
+ * @devlink: devlink
+ * @table_name: table name
+ * @resource_id: resource id
+ * @resource_units: number of resource's units consumed per table's entry
+ */
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units)
+{
+ struct devlink_dpipe_table *table;
+ int err = 0;
+
+ mutex_lock(&devlink->lock);
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name);
+ if (!table) {
+ err = -EINVAL;
+ goto out;
+ }
+ table->resource_id = resource_id;
+ table->resource_units = resource_units;
+ table->resource_valid = true;
+out:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
+
static int __init devlink_module_init(void)
{
return genl_register_family(&devlink_nl_family);
diff --git a/net/core/dst.c b/net/core/dst.c
index 662a2d4a3d19..007aa0b08291 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/prefetch.h>
#include <net/lwtunnel.h>
+#include <net/xfrm.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
@@ -62,15 +63,12 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_ref, int initial_obsolete,
unsigned short flags)
{
- dst->child = NULL;
dst->dev = dev;
if (dev)
dev_hold(dev);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
- dst->path = dst;
- dst->from = NULL;
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
@@ -88,7 +86,6 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
- dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
}
@@ -116,12 +113,17 @@ EXPORT_SYMBOL(dst_alloc);
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
- struct dst_entry *child;
+ struct dst_entry *child = NULL;
smp_rmb();
- child = dst->child;
+#ifdef CONFIG_XFRM
+ if (dst->xfrm) {
+ struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+ child = xdst->child;
+ }
+#endif
if (!(dst->flags & DST_NOCOUNT))
dst_entries_add(dst->ops, -1);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f8fcf450a36e..107b122c8969 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_LLTX_BIT] = "tx-lockless",
[NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
[NETIF_F_GRO_BIT] = "rx-gro",
+ [NETIF_F_GRO_HW_BIT] = "rx-gro-hw",
[NETIF_F_LRO_BIT] = "rx-lro",
[NETIF_F_TSO_BIT] = "tx-tcp-segmentation",
@@ -770,15 +771,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
}
-static void
-warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
-{
- char name[sizeof(current->comm)];
-
- pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
- get_task_comm(name, current), details);
-}
-
/* Query device for its ethtool_cmd settings.
*
* Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +797,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
&link_ksettings);
if (err < 0)
return err;
- if (!convert_link_ksettings_to_legacy_settings(&cmd,
- &link_ksettings))
- warn_incomplete_ethtool_legacy_settings_conversion(
- "link modes are only partially reported");
+ convert_link_ksettings_to_legacy_settings(&cmd,
+ &link_ksettings);
/* send a sensible cmd tag back to user */
cmd.cmd = ETHTOOL_GSET;
@@ -1703,14 +1693,23 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam;
+ struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
- if (!dev->ethtool_ops->set_ringparam)
+ if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
+ dev->ethtool_ops->get_ringparam(dev, &max);
+
+ /* ensure new ring parameters are within the maximums */
+ if (ringparam.rx_pending > max.rx_max_pending ||
+ ringparam.rx_mini_pending > max.rx_mini_max_pending ||
+ ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
+ ringparam.tx_pending > max.tx_max_pending)
+ return -EINVAL;
+
return dev->ethtool_ops->set_ringparam(dev, &ringparam);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 6a85e67fafce..08ab4c65a998 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -401,8 +401,8 @@ do_pass:
/* Classic BPF expects A and X to be reset first. These need
* to be guaranteed to be the first two instructions.
*/
- *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
- *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
+ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
/* All programs must keep CTX in callee saved BPF_REG_CTX.
* In eBPF case it's done by the compiler, here we need to
@@ -458,6 +458,17 @@ do_pass:
convert_bpf_extensions(fp, &insn))
break;
+ if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
+ fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
+ *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
+ /* Error with exception code on div/mod by 0.
+ * For cBPF programs, this was always return 0.
+ */
+ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
+ *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+ *insn++ = BPF_EXIT_INSN();
+ }
+
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
break;
@@ -1054,11 +1065,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
*/
goto out_err_free;
- /* We are guaranteed to never error here with cBPF to eBPF
- * transitions, since there's no issue with type compatibility
- * checks on program arrays.
- */
fp = bpf_prog_select_runtime(fp, &err);
+ if (err)
+ goto out_err_free;
kfree(old_prog);
return fp;
@@ -2684,8 +2693,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
return 0;
}
-int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+static int xdp_do_generic_redirect_map(struct net_device *dev,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
unsigned long map_owner = ri->map_owner;
@@ -2862,7 +2872,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static unsigned short bpf_tunnel_key_af(u64 flags)
@@ -3013,6 +3023,8 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
if (flags & BPF_F_DONT_FRAGMENT)
info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+ if (flags & BPF_F_ZERO_CSUM_TX)
+ info->key.tun_flags &= ~TUNNEL_CSUM;
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
@@ -3026,8 +3038,6 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
IPV6_FLOWLABEL_MASK;
} else {
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
- if (flags & BPF_F_ZERO_CSUM_TX)
- info->key.tun_flags &= ~TUNNEL_CSUM;
}
return 0;
@@ -3151,7 +3161,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
@@ -3229,6 +3239,29 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
ret = -EINVAL;
}
#ifdef CONFIG_INET
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (level == SOL_IPV6) {
+ if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
+ return -EINVAL;
+
+ val = *((int *)optval);
+ /* Only some options are supported */
+ switch (optname) {
+ case IPV6_TCLASS:
+ if (val < -1 || val > 0xff) {
+ ret = -EINVAL;
+ } else {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (val == -1)
+ val = 0;
+ np->tclass = val;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+#endif
} else if (level == SOL_TCP &&
sk->sk_prot->setsockopt == tcp_setsockopt) {
if (optname == TCP_CONGESTION) {
@@ -3238,7 +3271,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
strncpy(name, optval, min_t(long, optlen,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
- ret = tcp_set_congestion_control(sk, name, false, reinit);
+ ret = tcp_set_congestion_control(sk, name, false,
+ reinit);
} else {
struct tcp_sock *tp = tcp_sk(sk);
@@ -3304,6 +3338,22 @@ BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
} else {
goto err_clear;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (level == SOL_IPV6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
+ goto err_clear;
+
+ /* Only some options are supported */
+ switch (optname) {
+ case IPV6_TCLASS:
+ *((int *)optval) = (int)np->tclass;
+ break;
+ default:
+ goto err_clear;
+ }
+#endif
} else {
goto err_clear;
}
@@ -3325,6 +3375,33 @@ static const struct bpf_func_proto bpf_getsockopt_proto = {
.arg5_type = ARG_CONST_SIZE,
};
+BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
+ int, argval)
+{
+ struct sock *sk = bpf_sock->sk;
+ int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
+
+ if (!sk_fullsock(sk))
+ return -EINVAL;
+
+#ifdef CONFIG_INET
+ if (val)
+ tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
+
+ return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
+#else
+ return -EINVAL;
+#endif
+}
+
+static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
+ .func = bpf_sock_ops_cb_flags_set,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
@@ -3457,6 +3534,8 @@ xdp_func_proto(enum bpf_func_id func_id)
return &bpf_xdp_event_output_proto;
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
+ case BPF_FUNC_csum_diff:
+ return &bpf_csum_diff_proto;
case BPF_FUNC_xdp_adjust_head:
return &bpf_xdp_adjust_head_proto;
case BPF_FUNC_xdp_adjust_meta:
@@ -3505,6 +3584,8 @@ static const struct bpf_func_proto *
return &bpf_setsockopt_proto;
case BPF_FUNC_getsockopt:
return &bpf_getsockopt_proto;
+ case BPF_FUNC_sock_ops_cb_flags_set:
+ return &bpf_sock_ops_cb_flags_set_proto;
case BPF_FUNC_sock_map_update:
return &bpf_sock_map_update_proto;
default:
@@ -3821,34 +3902,44 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-static bool __is_valid_sock_ops_access(int off, int size)
+static bool sock_ops_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
if (off < 0 || off >= sizeof(struct bpf_sock_ops))
return false;
+
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
- if (size != sizeof(__u32))
- return false;
- return true;
-}
-
-static bool sock_ops_is_valid_access(int off, int size,
- enum bpf_access_type type,
- struct bpf_insn_access_aux *info)
-{
if (type == BPF_WRITE) {
switch (off) {
- case offsetof(struct bpf_sock_ops, op) ...
- offsetof(struct bpf_sock_ops, replylong[3]):
+ case offsetof(struct bpf_sock_ops, reply):
+ case offsetof(struct bpf_sock_ops, sk_txhash):
+ if (size != size_default)
+ return false;
break;
default:
return false;
}
+ } else {
+ switch (off) {
+ case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
+ bytes_acked):
+ if (size != sizeof(__u64))
+ return false;
+ break;
+ default:
+ if (size != size_default)
+ return false;
+ break;
+ }
}
- return __is_valid_sock_ops_access(off, size);
+ return true;
}
static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
@@ -4303,6 +4394,24 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
si->dst_reg, si->src_reg,
offsetof(struct xdp_buff, data_end));
break;
+ case offsetof(struct xdp_md, ingress_ifindex):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+ si->dst_reg, si->src_reg,
+ offsetof(struct xdp_buff, rxq));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
+ si->dst_reg, si->dst_reg,
+ offsetof(struct xdp_rxq_info, dev));
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+ offsetof(struct net_device, ifindex));
+ break;
+ case offsetof(struct xdp_md, rx_queue_index):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+ si->dst_reg, si->src_reg,
+ offsetof(struct xdp_buff, rxq));
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+ offsetof(struct xdp_rxq_info,
+ queue_index));
+ break;
}
return insn - insn_buf;
@@ -4437,6 +4546,211 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_num));
break;
+
+ case offsetof(struct bpf_sock_ops, is_fullsock):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+ struct bpf_sock_ops_kern,
+ is_fullsock),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ is_fullsock));
+ break;
+
+ case offsetof(struct bpf_sock_ops, state):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+ struct bpf_sock_ops_kern, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern, sk));
+ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
+ offsetof(struct sock_common, skc_state));
+ break;
+
+ case offsetof(struct bpf_sock_ops, rtt_min):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+ sizeof(struct minmax));
+ BUILD_BUG_ON(sizeof(struct minmax) <
+ sizeof(struct minmax_sample));
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+ struct bpf_sock_ops_kern, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern, sk));
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+ offsetof(struct tcp_sock, rtt_min) +
+ FIELD_SIZEOF(struct minmax_sample, t));
+ break;
+
+/* Helper macro for adding read access to tcp_sock or sock fields. */
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
+ OBJ_FIELD), \
+ si->dst_reg, si->dst_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ } while (0)
+
+/* Helper macro for adding write access to tcp_sock or sock fields.
+ * The macro is called with two registers, dst_reg which contains a pointer
+ * to ctx (context) and src_reg which contains the value that should be
+ * stored. However, we need an additional register since we cannot overwrite
+ * dst_reg because it may be used later in the program.
+ * Instead we "borrow" one of the other register. We first save its value
+ * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
+ * it at the end of the macro.
+ */
+#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ int reg = BPF_REG_9; \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
+ reg, si->src_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } while (0)
+
+#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
+ do { \
+ if (TYPE == BPF_WRITE) \
+ SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ else \
+ SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ } while (0)
+
+ case offsetof(struct bpf_sock_ops, snd_cwnd):
+ SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, srtt_us):
+ SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
+ SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, snd_ssthresh):
+ SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, rcv_nxt):
+ SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, snd_nxt):
+ SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, snd_una):
+ SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, mss_cache):
+ SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, ecn_flags):
+ SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, rate_delivered):
+ SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, rate_interval_us):
+ SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, packets_out):
+ SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, retrans_out):
+ SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, total_retrans):
+ SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, segs_in):
+ SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, data_segs_in):
+ SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, segs_out):
+ SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, data_segs_out):
+ SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, lost_out):
+ SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, sacked_out):
+ SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, sk_txhash):
+ SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
+ struct sock, type);
+ break;
+
+ case offsetof(struct bpf_sock_ops, bytes_received):
+ SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
+ struct tcp_sock);
+ break;
+
+ case offsetof(struct bpf_sock_ops, bytes_acked):
+ SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
+ break;
+
}
return insn - insn_buf;
}
@@ -4473,6 +4787,7 @@ const struct bpf_verifier_ops sk_filter_verifier_ops = {
};
const struct bpf_prog_ops sk_filter_prog_ops = {
+ .test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 15ce30063765..559db9ea8d86 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -24,6 +24,7 @@
#include <linux/tcp.h>
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
+#include <uapi/linux/batadv_packet.h>
static void dissector_set_key(struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -133,10 +134,10 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
ctrl->addr_type = type;
}
-static void
-__skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- void *target_container)
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
{
struct ip_tunnel_info *info;
struct ip_tunnel_key *key;
@@ -212,6 +213,7 @@ __skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
tp->dst = key->tp_dst;
}
}
+EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
static enum flow_dissect_ret
__skb_flow_dissect_mpls(const struct sk_buff *skb,
@@ -436,6 +438,57 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
return FLOW_DISSECT_RET_PROTO_AGAIN;
}
+/**
+ * __skb_flow_dissect_batadv() - dissect batman-adv header
+ * @skb: sk_buff to with the batman-adv header
+ * @key_control: flow dissectors control key
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @p_proto: pointer used to update the protocol to process next
+ * @p_nhoff: pointer used to update inner network header offset
+ * @hlen: packet header length
+ * @flags: any combination of FLOW_DISSECTOR_F_*
+ *
+ * ETH_P_BATMAN packets are tried to be dissected. Only
+ * &struct batadv_unicast packets are actually processed because they contain an
+ * inner ethernet header and are usually followed by actual network header. This
+ * allows the flow dissector to continue processing the packet.
+ *
+ * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
+ * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
+ * otherwise FLOW_DISSECT_RET_OUT_BAD
+ */
+static enum flow_dissect_ret
+__skb_flow_dissect_batadv(const struct sk_buff *skb,
+ struct flow_dissector_key_control *key_control,
+ void *data, __be16 *p_proto, int *p_nhoff, int hlen,
+ unsigned int flags)
+{
+ struct {
+ struct batadv_unicast_packet batadv_unicast;
+ struct ethhdr eth;
+ } *hdr, _hdr;
+
+ hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
+ &_hdr);
+ if (!hdr)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
+ return FLOW_DISSECT_RET_OUT_BAD;
+
+ *p_proto = hdr->eth.h_proto;
+ *p_nhoff += sizeof(*hdr);
+
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+ return FLOW_DISSECT_RET_OUT_GOOD;
+
+ return FLOW_DISSECT_RET_PROTO_AGAIN;
+}
+
static void
__skb_flow_dissect_tcp(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -576,9 +629,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
- __skb_flow_dissect_tunnel_info(skb, flow_dissector,
- target_container);
-
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
@@ -817,6 +867,11 @@ proto_again:
nhoff, hlen);
break;
+ case htons(ETH_P_BATMAN):
+ fdret = __skb_flow_dissect_batadv(skb, key_control, data,
+ &proto, &nhoff, hlen, flags);
+ break;
+
default:
fdret = FLOW_DISSECT_RET_OUT_BAD;
break;
@@ -976,8 +1031,8 @@ ip_proto_again:
out_good:
ret = true;
- key_control->thoff = (u16)nhoff;
out:
+ key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
@@ -985,7 +1040,6 @@ out:
out_bad:
ret = false;
- key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
goto out;
}
EXPORT_SYMBOL(__skb_flow_dissect);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9834cfa21b21..0a3f88f08727 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->intvl_log = intvl_log;
est->cpu_bstats = cpu_bstats;
+ if (stats_lock)
+ local_bh_disable();
est_fetch_counters(est, &b);
+ if (stats_lock)
+ local_bh_enable();
est->last_bytes = b.bytes;
est->last_packets = b.packets;
old = rcu_dereference_protected(*rate_est, 1);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 87f28557b329..b2b2323bdc84 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -252,10 +252,10 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
}
}
-static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu,
- const struct gnet_stats_queue *q,
- __u32 qlen)
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu,
+ const struct gnet_stats_queue *q,
+ __u32 qlen)
{
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
@@ -269,6 +269,7 @@ static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
qstats->qlen = qlen;
}
+EXPORT_SYMBOL(__gnet_stats_copy_queue);
/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 982861607f88..e38e641e98d5 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -92,7 +92,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
if (dev->ifindex != dev_get_iflink(dev))
return true;
- if (dev->priv_flags & IFF_TEAM_PORT)
+ if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
return true;
return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d1f5fe986edd..7b7a14abba28 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
- hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
n1 != NULL;
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
- if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+ if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
if (want_ref)
neigh_hold(n1);
rc = n1;
@@ -2862,7 +2862,6 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
};
static const struct file_operations neigh_stat_seq_fops = {
- .owner = THIS_MODULE,
.open = neigh_stat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 615ccab55f38..e010bb800d7b 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -182,7 +182,6 @@ static int dev_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dev_seq_fops = {
- .owner = THIS_MODULE,
.open = dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -202,7 +201,6 @@ static int softnet_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations softnet_seq_fops = {
- .owner = THIS_MODULE,
.open = softnet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -306,7 +304,6 @@ static int ptype_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations ptype_seq_fops = {
- .owner = THIS_MODULE,
.open = ptype_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -387,7 +384,6 @@ static int dev_mc_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dev_mc_seq_fops = {
- .owner = THIS_MODULE,
.open = dev_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 799b75268291..60a5ad2c33ee 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -295,10 +295,31 @@ static ssize_t carrier_changes_show(struct device *dev,
struct net_device *netdev = to_net_dev(dev);
return sprintf(buf, fmt_dec,
- atomic_read(&netdev->carrier_changes));
+ atomic_read(&netdev->carrier_up_count) +
+ atomic_read(&netdev->carrier_down_count));
}
static DEVICE_ATTR_RO(carrier_changes);
+static ssize_t carrier_up_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+
+ return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
+}
+static DEVICE_ATTR_RO(carrier_up_count);
+
+static ssize_t carrier_down_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+
+ return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
+}
+static DEVICE_ATTR_RO(carrier_down_count);
+
/* read-write attributes */
static int change_mtu(struct net_device *dev, unsigned long new_mtu)
@@ -325,29 +346,6 @@ static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
}
NETDEVICE_SHOW_RW(flags, fmt_hex);
-static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
-{
- unsigned int orig_len = dev->tx_queue_len;
- int res;
-
- if (new_len != (unsigned int)new_len)
- return -ERANGE;
-
- if (new_len != orig_len) {
- dev->tx_queue_len = new_len;
- res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
- res = notifier_to_errno(res);
- if (res) {
- netdev_err(dev,
- "refused to change device tx_queue_len\n");
- dev->tx_queue_len = orig_len;
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
static ssize_t tx_queue_len_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -355,7 +353,7 @@ static ssize_t tx_queue_len_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return netdev_store(dev, attr, buf, len, change_tx_queue_len);
+ return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
}
NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
@@ -547,6 +545,8 @@ static struct attribute *net_class_attrs[] __ro_after_init = {
&dev_attr_phys_port_name.attr,
&dev_attr_phys_switch_id.attr,
&dev_attr_proto_down.attr,
+ &dev_attr_carrier_up_count.attr,
+ &dev_attr_carrier_down_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(net_class);
@@ -961,7 +961,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct kobject *kobj = &dev->_rx[i].kobj;
- if (!atomic_read(&dev_net(dev)->count))
+ if (!refcount_read(&dev_net(dev)->count))
kobj->uevent_suppress = 1;
if (dev->sysfs_rx_queue_group)
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1367,7 +1367,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct netdev_queue *queue = dev->_tx + i;
- if (!atomic_read(&dev_net(dev)->count))
+ if (!refcount_read(&dev_net(dev)->count))
queue->kobj.uevent_suppress = 1;
#ifdef CONFIG_BQL
sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
{
struct device *dev = &ndev->dev;
- if (!atomic_read(&dev_net(ndev)->count))
+ if (!refcount_read(&dev_net(ndev)->count))
dev_set_uevent_suppress(dev, 1);
kobject_get(&dev->kobj);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 60a71be75aea..3cad5f51afd3 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
struct net init_net = {
- .count = ATOMIC_INIT(1),
+ .count = REFCOUNT_INIT(1),
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
EXPORT_SYMBOL(init_net);
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
- bool alloc;
+ bool alloc = false, alive = false;
int id;
- if (atomic_read(&net->count) == 0)
+ if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock);
- alloc = atomic_read(&peer->count) == 0 ? false : true;
+ /*
+ * When peer is obtained from RCU lists, we may race with
+ * its cleanup. Check whether it's alive, and this guarantees
+ * we never hash a peer back to net->netns_ids, after it has
+ * just been idr_remove()'d from there in cleanup_net().
+ */
+ if (maybe_get_net(peer))
+ alive = alloc = true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ if (alive)
+ put_net(peer);
return id;
}
EXPORT_SYMBOL_GPL(peernet2id_alloc);
@@ -264,11 +273,9 @@ struct net *get_net_ns_by_id(struct net *net, int id)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
peer = idr_find(&net->netns_ids, id);
if (peer)
peer = maybe_get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock();
return peer;
@@ -284,7 +291,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
int error = 0;
LIST_HEAD(net_exit_list);
- atomic_set(&net->count, 1);
+ refcount_set(&net->count, 1);
refcount_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
@@ -432,13 +439,40 @@ struct net *copy_net_ns(unsigned long flags,
return net;
}
+static void unhash_nsid(struct net *net, struct net *last)
+{
+ struct net *tmp;
+ /* This function is only called from cleanup_net() work,
+ * and this work is the only process, that may delete
+ * a net from net_namespace_list. So, when the below
+ * is executing, the list may only grow. Thus, we do not
+ * use for_each_net_rcu() or rtnl_lock().
+ */
+ for_each_net(tmp) {
+ int id;
+
+ spin_lock_bh(&tmp->nsid_lock);
+ id = __peernet2id(tmp, net);
+ if (id >= 0)
+ idr_remove(&tmp->netns_ids, id);
+ spin_unlock_bh(&tmp->nsid_lock);
+ if (id >= 0)
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+ if (tmp == last)
+ break;
+ }
+ spin_lock_bh(&net->nsid_lock);
+ idr_destroy(&net->netns_ids);
+ spin_unlock_bh(&net->nsid_lock);
+}
+
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
- struct net *net, *tmp;
+ struct net *net, *tmp, *last;
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
@@ -451,26 +485,25 @@ static void cleanup_net(struct work_struct *work)
/* Don't let anyone else find us. */
rtnl_lock();
- list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ list_for_each_entry(net, &net_kill_list, cleanup_list)
list_del_rcu(&net->list);
- list_add_tail(&net->exit_list, &net_exit_list);
- for_each_net(tmp) {
- int id;
-
- spin_lock_bh(&tmp->nsid_lock);
- id = __peernet2id(tmp, net);
- if (id >= 0)
- idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
- if (id >= 0)
- rtnl_net_notifyid(tmp, RTM_DELNSID, id);
- }
- spin_lock_bh(&net->nsid_lock);
- idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ /* Cache last net. After we unlock rtnl, no one new net
+ * added to net_namespace_list can assign nsid pointer
+ * to a net from net_kill_list (see peernet2id_alloc()).
+ * So, we skip them in unhash_nsid().
+ *
+ * Note, that unhash_nsid() does not delete nsid links
+ * between net_kill_list's nets, as they've already
+ * deleted from net_namespace_list. But, this would be
+ * useless anyway, as netns_ids are destroyed there.
+ */
+ last = list_last_entry(&net_namespace_list, struct net, list);
+ rtnl_unlock();
+ list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ unhash_nsid(net, last);
+ list_add_tail(&net->exit_list, &net_exit_list);
}
- rtnl_unlock();
/*
* Another CPU might be rcu-iterating the list, wait for it.
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f95a15086225..b8ab5c829511 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -184,25 +184,44 @@
#define func_enter() pr_debug("entering %s\n", __func__);
+#define PKT_FLAGS \
+ pf(IPV6) /* Interface in IPV6 Mode */ \
+ pf(IPSRC_RND) /* IP-Src Random */ \
+ pf(IPDST_RND) /* IP-Dst Random */ \
+ pf(TXSIZE_RND) /* Transmit size is random */ \
+ pf(UDPSRC_RND) /* UDP-Src Random */ \
+ pf(UDPDST_RND) /* UDP-Dst Random */ \
+ pf(UDPCSUM) /* Include UDP checksum */ \
+ pf(NO_TIMESTAMP) /* Don't timestamp packets (default TS) */ \
+ pf(MPLS_RND) /* Random MPLS labels */ \
+ pf(QUEUE_MAP_RND) /* queue map Random */ \
+ pf(QUEUE_MAP_CPU) /* queue map mirrors smp_processor_id() */ \
+ pf(FLOW_SEQ) /* Sequential flows */ \
+ pf(IPSEC) /* ipsec on for flows */ \
+ pf(MACSRC_RND) /* MAC-Src Random */ \
+ pf(MACDST_RND) /* MAC-Dst Random */ \
+ pf(VID_RND) /* Random VLAN ID */ \
+ pf(SVID_RND) /* Random SVLAN ID */ \
+ pf(NODE) /* Node memory alloc*/ \
+
+#define pf(flag) flag##_SHIFT,
+enum pkt_flags {
+ PKT_FLAGS
+};
+#undef pf
+
/* Device flag bits */
-#define F_IPSRC_RND (1<<0) /* IP-Src Random */
-#define F_IPDST_RND (1<<1) /* IP-Dst Random */
-#define F_UDPSRC_RND (1<<2) /* UDP-Src Random */
-#define F_UDPDST_RND (1<<3) /* UDP-Dst Random */
-#define F_MACSRC_RND (1<<4) /* MAC-Src Random */
-#define F_MACDST_RND (1<<5) /* MAC-Dst Random */
-#define F_TXSIZE_RND (1<<6) /* Transmit size is random */
-#define F_IPV6 (1<<7) /* Interface in IPV6 Mode */
-#define F_MPLS_RND (1<<8) /* Random MPLS labels */
-#define F_VID_RND (1<<9) /* Random VLAN ID */
-#define F_SVID_RND (1<<10) /* Random SVLAN ID */
-#define F_FLOW_SEQ (1<<11) /* Sequential flows */
-#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
-#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
-#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
-#define F_NODE (1<<15) /* Node memory alloc*/
-#define F_UDPCSUM (1<<16) /* Include UDP checksum */
-#define F_NO_TIMESTAMP (1<<17) /* Don't timestamp packets (default TS) */
+#define pf(flag) static const __u32 F_##flag = (1<<flag##_SHIFT);
+PKT_FLAGS
+#undef pf
+
+#define pf(flag) __stringify(flag),
+static char *pkt_flag_names[] = {
+ PKT_FLAGS
+};
+#undef pf
+
+#define NR_PKT_FLAGS ARRAY_SIZE(pkt_flag_names)
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -399,7 +418,7 @@ struct pktgen_dev {
__u8 ipsmode; /* IPSEC mode (config) */
__u8 ipsproto; /* IPSEC type (config) */
__u32 spi;
- struct dst_entry dst;
+ struct xfrm_dst xdst;
struct dst_ops dstops;
#endif
char result[512];
@@ -523,7 +542,6 @@ static int pgctrl_open(struct inode *inode, struct file *file)
}
static const struct file_operations pktgen_fops = {
- .owner = THIS_MODULE,
.open = pgctrl_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -535,6 +553,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
{
const struct pktgen_dev *pkt_dev = seq->private;
ktime_t stopped;
+ unsigned int i;
u64 idle;
seq_printf(seq,
@@ -596,7 +615,6 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
if (pkt_dev->nr_labels) {
- unsigned int i;
seq_puts(seq, " mpls: ");
for (i = 0; i < pkt_dev->nr_labels; i++)
seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -632,68 +650,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, " Flags: ");
- if (pkt_dev->flags & F_IPV6)
- seq_puts(seq, "IPV6 ");
-
- if (pkt_dev->flags & F_IPSRC_RND)
- seq_puts(seq, "IPSRC_RND ");
-
- if (pkt_dev->flags & F_IPDST_RND)
- seq_puts(seq, "IPDST_RND ");
-
- if (pkt_dev->flags & F_TXSIZE_RND)
- seq_puts(seq, "TXSIZE_RND ");
-
- if (pkt_dev->flags & F_UDPSRC_RND)
- seq_puts(seq, "UDPSRC_RND ");
-
- if (pkt_dev->flags & F_UDPDST_RND)
- seq_puts(seq, "UDPDST_RND ");
-
- if (pkt_dev->flags & F_UDPCSUM)
- seq_puts(seq, "UDPCSUM ");
-
- if (pkt_dev->flags & F_NO_TIMESTAMP)
- seq_puts(seq, "NO_TIMESTAMP ");
-
- if (pkt_dev->flags & F_MPLS_RND)
- seq_puts(seq, "MPLS_RND ");
-
- if (pkt_dev->flags & F_QUEUE_MAP_RND)
- seq_puts(seq, "QUEUE_MAP_RND ");
+ for (i = 0; i < NR_PKT_FLAGS; i++) {
+ if (i == F_FLOW_SEQ)
+ if (!pkt_dev->cflows)
+ continue;
- if (pkt_dev->flags & F_QUEUE_MAP_CPU)
- seq_puts(seq, "QUEUE_MAP_CPU ");
-
- if (pkt_dev->cflows) {
- if (pkt_dev->flags & F_FLOW_SEQ)
- seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
- else
- seq_puts(seq, "FLOW_RND ");
- }
+ if (pkt_dev->flags & (1 << i))
+ seq_printf(seq, "%s ", pkt_flag_names[i]);
+ else if (i == F_FLOW_SEQ)
+ seq_puts(seq, "FLOW_RND ");
#ifdef CONFIG_XFRM
- if (pkt_dev->flags & F_IPSEC_ON) {
- seq_puts(seq, "IPSEC ");
- if (pkt_dev->spi)
+ if (i == F_IPSEC && pkt_dev->spi)
seq_printf(seq, "spi:%u", pkt_dev->spi);
- }
#endif
-
- if (pkt_dev->flags & F_MACSRC_RND)
- seq_puts(seq, "MACSRC_RND ");
-
- if (pkt_dev->flags & F_MACDST_RND)
- seq_puts(seq, "MACDST_RND ");
-
- if (pkt_dev->flags & F_VID_RND)
- seq_puts(seq, "VID_RND ");
-
- if (pkt_dev->flags & F_SVID_RND)
- seq_puts(seq, "SVID_RND ");
-
- if (pkt_dev->flags & F_NODE)
- seq_puts(seq, "NODE_ALLOC ");
+ }
seq_puts(seq, "\n");
@@ -859,6 +830,35 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
return i;
}
+static __u32 pktgen_read_flag(const char *f, bool *disable)
+{
+ __u32 i;
+
+ if (f[0] == '!') {
+ *disable = true;
+ f++;
+ }
+
+ for (i = 0; i < NR_PKT_FLAGS; i++) {
+ if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT)
+ continue;
+
+ /* allow only disabling ipv6 flag */
+ if (!*disable && i == IPV6_SHIFT)
+ continue;
+
+ if (strcmp(f, pkt_flag_names[i]) == 0)
+ return 1 << i;
+ }
+
+ if (strcmp(f, "FLOW_RND") == 0) {
+ *disable = !*disable;
+ return F_FLOW_SEQ;
+ }
+
+ return 0;
+}
+
static ssize_t pktgen_if_write(struct file *file,
const char __user * user_buffer, size_t count,
loff_t * offset)
@@ -1216,7 +1216,10 @@ static ssize_t pktgen_if_write(struct file *file,
return count;
}
if (!strcmp(name, "flag")) {
+ __u32 flag;
char f[32];
+ bool disable = false;
+
memset(f, 0, 32);
len = strn_len(&user_buffer[i], sizeof(f) - 1);
if (len < 0)
@@ -1225,107 +1228,15 @@ static ssize_t pktgen_if_write(struct file *file,
if (copy_from_user(f, &user_buffer[i], len))
return -EFAULT;
i += len;
- if (strcmp(f, "IPSRC_RND") == 0)
- pkt_dev->flags |= F_IPSRC_RND;
-
- else if (strcmp(f, "!IPSRC_RND") == 0)
- pkt_dev->flags &= ~F_IPSRC_RND;
-
- else if (strcmp(f, "TXSIZE_RND") == 0)
- pkt_dev->flags |= F_TXSIZE_RND;
-
- else if (strcmp(f, "!TXSIZE_RND") == 0)
- pkt_dev->flags &= ~F_TXSIZE_RND;
-
- else if (strcmp(f, "IPDST_RND") == 0)
- pkt_dev->flags |= F_IPDST_RND;
-
- else if (strcmp(f, "!IPDST_RND") == 0)
- pkt_dev->flags &= ~F_IPDST_RND;
-
- else if (strcmp(f, "UDPSRC_RND") == 0)
- pkt_dev->flags |= F_UDPSRC_RND;
-
- else if (strcmp(f, "!UDPSRC_RND") == 0)
- pkt_dev->flags &= ~F_UDPSRC_RND;
-
- else if (strcmp(f, "UDPDST_RND") == 0)
- pkt_dev->flags |= F_UDPDST_RND;
-
- else if (strcmp(f, "!UDPDST_RND") == 0)
- pkt_dev->flags &= ~F_UDPDST_RND;
-
- else if (strcmp(f, "MACSRC_RND") == 0)
- pkt_dev->flags |= F_MACSRC_RND;
-
- else if (strcmp(f, "!MACSRC_RND") == 0)
- pkt_dev->flags &= ~F_MACSRC_RND;
-
- else if (strcmp(f, "MACDST_RND") == 0)
- pkt_dev->flags |= F_MACDST_RND;
-
- else if (strcmp(f, "!MACDST_RND") == 0)
- pkt_dev->flags &= ~F_MACDST_RND;
-
- else if (strcmp(f, "MPLS_RND") == 0)
- pkt_dev->flags |= F_MPLS_RND;
-
- else if (strcmp(f, "!MPLS_RND") == 0)
- pkt_dev->flags &= ~F_MPLS_RND;
- else if (strcmp(f, "VID_RND") == 0)
- pkt_dev->flags |= F_VID_RND;
+ flag = pktgen_read_flag(f, &disable);
- else if (strcmp(f, "!VID_RND") == 0)
- pkt_dev->flags &= ~F_VID_RND;
-
- else if (strcmp(f, "SVID_RND") == 0)
- pkt_dev->flags |= F_SVID_RND;
-
- else if (strcmp(f, "!SVID_RND") == 0)
- pkt_dev->flags &= ~F_SVID_RND;
-
- else if (strcmp(f, "FLOW_SEQ") == 0)
- pkt_dev->flags |= F_FLOW_SEQ;
-
- else if (strcmp(f, "QUEUE_MAP_RND") == 0)
- pkt_dev->flags |= F_QUEUE_MAP_RND;
-
- else if (strcmp(f, "!QUEUE_MAP_RND") == 0)
- pkt_dev->flags &= ~F_QUEUE_MAP_RND;
-
- else if (strcmp(f, "QUEUE_MAP_CPU") == 0)
- pkt_dev->flags |= F_QUEUE_MAP_CPU;
-
- else if (strcmp(f, "!QUEUE_MAP_CPU") == 0)
- pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
-#ifdef CONFIG_XFRM
- else if (strcmp(f, "IPSEC") == 0)
- pkt_dev->flags |= F_IPSEC_ON;
-#endif
-
- else if (strcmp(f, "!IPV6") == 0)
- pkt_dev->flags &= ~F_IPV6;
-
- else if (strcmp(f, "NODE_ALLOC") == 0)
- pkt_dev->flags |= F_NODE;
-
- else if (strcmp(f, "!NODE_ALLOC") == 0)
- pkt_dev->flags &= ~F_NODE;
-
- else if (strcmp(f, "UDPCSUM") == 0)
- pkt_dev->flags |= F_UDPCSUM;
-
- else if (strcmp(f, "!UDPCSUM") == 0)
- pkt_dev->flags &= ~F_UDPCSUM;
-
- else if (strcmp(f, "NO_TIMESTAMP") == 0)
- pkt_dev->flags |= F_NO_TIMESTAMP;
-
- else if (strcmp(f, "!NO_TIMESTAMP") == 0)
- pkt_dev->flags &= ~F_NO_TIMESTAMP;
-
- else {
+ if (flag) {
+ if (disable)
+ pkt_dev->flags &= ~flag;
+ else
+ pkt_dev->flags |= flag;
+ } else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
f,
@@ -1804,7 +1715,6 @@ static int pktgen_if_open(struct inode *inode, struct file *file)
}
static const struct file_operations pktgen_if_fops = {
- .owner = THIS_MODULE,
.open = pktgen_if_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1942,7 +1852,6 @@ static int pktgen_thread_open(struct inode *inode, struct file *file)
}
static const struct file_operations pktgen_thread_fops = {
- .owner = THIS_MODULE,
.open = pktgen_thread_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2544,7 +2453,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
pkt_dev->flows[flow].cur_daddr =
pkt_dev->cur_daddr;
#ifdef CONFIG_XFRM
- if (pkt_dev->flags & F_IPSEC_ON)
+ if (pkt_dev->flags & F_IPSEC)
get_ipsec_sa(pkt_dev, flow);
#endif
pkt_dev->nflows++;
@@ -2609,7 +2518,7 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
* supports both transport/tunnel mode + ESP/AH type.
*/
if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
- skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF;
+ skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
rcu_read_lock_bh();
err = x->outer_mode->output(x, skb);
@@ -2649,7 +2558,7 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
static int process_ipsec(struct pktgen_dev *pkt_dev,
struct sk_buff *skb, __be16 protocol)
{
- if (pkt_dev->flags & F_IPSEC_ON) {
+ if (pkt_dev->flags & F_IPSEC) {
struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
int nhead = 0;
if (x) {
@@ -3742,10 +3651,10 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
* performance under such circumstance.
*/
pkt_dev->dstops.family = AF_INET;
- pkt_dev->dst.dev = pkt_dev->odev;
- dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false);
- pkt_dev->dst.child = &pkt_dev->dst;
- pkt_dev->dst.ops = &pkt_dev->dstops;
+ pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
+ dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
+ pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
+ pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
#endif
return add_dev_to_thread(t, pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dabba2a91fc8..204297dffd2a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -62,7 +62,9 @@
struct rtnl_link {
rtnl_doit_func doit;
rtnl_dumpit_func dumpit;
+ struct module *owner;
unsigned int flags;
+ struct rcu_head rcu;
};
static DEFINE_MUTEX(rtnl_mutex);
@@ -127,8 +129,7 @@ bool lockdep_rtnl_is_held(void)
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
-static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
-static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
+static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
@@ -144,72 +145,127 @@ static inline int rtm_msgindex(int msgtype)
return msgindex;
}
-/**
- * __rtnl_register - Register a rtnetlink message type
- * @protocol: Protocol family or PF_UNSPEC
- * @msgtype: rtnetlink message type
- * @doit: Function pointer called for each request message
- * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
- * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
- *
- * Registers the specified function pointers (at least one of them has
- * to be non-NULL) to be called whenever a request message for the
- * specified protocol family and message type is received.
- *
- * The special protocol family PF_UNSPEC may be used to define fallback
- * function pointers for the case when no entry for the specific protocol
- * family exists.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func doit, rtnl_dumpit_func dumpit,
- unsigned int flags)
+static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
+{
+ struct rtnl_link **tab;
+
+ if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
+ protocol = PF_UNSPEC;
+
+ tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
+ if (!tab)
+ tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
+
+ return tab[msgtype];
+}
+
+static int rtnl_register_internal(struct module *owner,
+ int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+ unsigned int flags)
{
- struct rtnl_link *tab;
+ struct rtnl_link *link, *old;
+ struct rtnl_link __rcu **tab;
int msgindex;
+ int ret = -ENOBUFS;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
- tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
+ rtnl_lock();
+ tab = rtnl_msg_handlers[protocol];
if (tab == NULL) {
- tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
- if (tab == NULL)
- return -ENOBUFS;
+ tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
+ if (!tab)
+ goto unlock;
+ /* ensures we see the 0 stores */
rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
}
+ old = rtnl_dereference(tab[msgindex]);
+ if (old) {
+ link = kmemdup(old, sizeof(*old), GFP_KERNEL);
+ if (!link)
+ goto unlock;
+ } else {
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto unlock;
+ }
+
+ WARN_ON(link->owner && link->owner != owner);
+ link->owner = owner;
+
+ WARN_ON(doit && link->doit && link->doit != doit);
if (doit)
- tab[msgindex].doit = doit;
+ link->doit = doit;
+ WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
if (dumpit)
- tab[msgindex].dumpit = dumpit;
- tab[msgindex].flags |= flags;
+ link->dumpit = dumpit;
- return 0;
+ link->flags |= flags;
+
+ /* publish protocol:msgtype */
+ rcu_assign_pointer(tab[msgindex], link);
+ ret = 0;
+ if (old)
+ kfree_rcu(old, rcu);
+unlock:
+ rtnl_unlock();
+ return ret;
}
-EXPORT_SYMBOL_GPL(__rtnl_register);
+
+/**
+ * rtnl_register_module - Register a rtnetlink message type
+ *
+ * @owner: module registering the hook (THIS_MODULE)
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ *
+ * Like rtnl_register, but for use by removable modules.
+ */
+int rtnl_register_module(struct module *owner,
+ int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+ unsigned int flags)
+{
+ return rtnl_register_internal(owner, protocol, msgtype,
+ doit, dumpit, flags);
+}
+EXPORT_SYMBOL_GPL(rtnl_register_module);
/**
* rtnl_register - Register a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
*
- * Identical to __rtnl_register() but panics on failure. This is useful
- * as failure of this function is very unlikely, it can only happen due
- * to lack of memory when allocating the chain to store all message
- * handlers for a protocol. Meant for use in init functions where lack
- * of memory implies no sense in continuing.
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
*/
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit,
unsigned int flags)
{
- if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
- panic("Unable to register rtnetlink message handler, "
- "protocol = %d, message type = %d\n",
- protocol, msgtype);
+ int err;
+
+ err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
+ flags);
+ if (err)
+ pr_err("Unable to register rtnetlink message handler, "
+ "protocol = %d, message type = %d\n", protocol, msgtype);
}
-EXPORT_SYMBOL_GPL(rtnl_register);
/**
* rtnl_unregister - Unregister a rtnetlink message type
@@ -220,24 +276,25 @@ EXPORT_SYMBOL_GPL(rtnl_register);
*/
int rtnl_unregister(int protocol, int msgtype)
{
- struct rtnl_link *handlers;
+ struct rtnl_link **tab, *link;
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
rtnl_lock();
- handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
- if (!handlers) {
+ tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
+ if (!tab) {
rtnl_unlock();
return -ENOENT;
}
- handlers[msgindex].doit = NULL;
- handlers[msgindex].dumpit = NULL;
- handlers[msgindex].flags = 0;
+ link = tab[msgindex];
+ rcu_assign_pointer(tab[msgindex], NULL);
rtnl_unlock();
+ kfree_rcu(link, rcu);
+
return 0;
}
EXPORT_SYMBOL_GPL(rtnl_unregister);
@@ -251,20 +308,27 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
*/
void rtnl_unregister_all(int protocol)
{
- struct rtnl_link *handlers;
+ struct rtnl_link **tab, *link;
+ int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
rtnl_lock();
- handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
+ tab = rtnl_msg_handlers[protocol];
RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
+ for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
+ link = tab[msgindex];
+ if (!link)
+ continue;
+
+ rcu_assign_pointer(tab[msgindex], NULL);
+ kfree_rcu(link, rcu);
+ }
rtnl_unlock();
synchronize_net();
- while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
- schedule();
- kfree(handlers);
+ kfree(tab);
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
@@ -840,6 +904,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_RX_DROPPED */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_TX_DROPPED */
+ nla_total_size_64bit(sizeof(__u64)) +
nla_total_size(sizeof(struct ifla_vf_trust)));
return size;
} else
@@ -920,8 +988,11 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_xdp_size() /* IFLA_XDP */
+ nla_total_size(4) /* IFLA_EVENT */
+ nla_total_size(4) /* IFLA_NEW_NETNSID */
+ + nla_total_size(4) /* IFLA_NEW_IFINDEX */
+ nla_total_size(1) /* IFLA_PROTO_DOWN */
+ nla_total_size(4) /* IFLA_IF_NETNSID */
+ + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
+ + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
+ 0;
}
@@ -1194,7 +1265,11 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
- vf_stats.multicast, IFLA_VF_STATS_PAD)) {
+ vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+ vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+ vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
nla_nest_cancel(skb, vfstats);
goto nla_put_vf_failure;
}
@@ -1261,6 +1336,7 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
{
const struct net_device_ops *ops = dev->netdev_ops;
const struct bpf_prog *generic_xdp_prog;
+ struct netdev_bpf xdp;
ASSERT_RTNL();
@@ -1273,7 +1349,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
if (!ops->ndo_bpf)
return XDP_ATTACHED_NONE;
- return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
+ __dev_xdp_query(dev, ops->ndo_bpf, &xdp);
+ *prog_id = xdp.prog_id;
+
+ return xdp.prog_attached;
}
static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1433,7 +1512,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
struct net_device *dev, struct net *src_net,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask,
- u32 event, int *new_nsid, int tgt_netnsid)
+ u32 event, int *new_nsid, int new_ifindex,
+ int tgt_netnsid)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
@@ -1475,8 +1555,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
- atomic_read(&dev->carrier_changes)) ||
- nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
+ atomic_read(&dev->carrier_up_count) +
+ atomic_read(&dev->carrier_down_count)) ||
+ nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
+ nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
+ atomic_read(&dev->carrier_up_count)) ||
+ nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
+ atomic_read(&dev->carrier_down_count)))
goto nla_put_failure;
if (event != IFLA_EVENT_NONE) {
@@ -1525,6 +1610,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (new_nsid &&
nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
goto nla_put_failure;
+ if (new_ifindex &&
+ nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
+ goto nla_put_failure;
+
rcu_read_lock();
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
@@ -1569,6 +1658,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
+ [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
[IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
@@ -1578,6 +1669,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_EVENT] = { .type = NLA_U32 },
[IFLA_GROUP] = { .type = NLA_U32 },
[IFLA_IF_NETNSID] = { .type = NLA_S32 },
+ [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
+ [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1681,18 +1774,18 @@ static bool link_dump_filtered(struct net_device *dev,
return false;
}
-static struct net *get_target_net(struct sk_buff *skb, int netnsid)
+static struct net *get_target_net(struct sock *sk, int netnsid)
{
struct net *net;
- net = get_net_ns_by_id(sock_net(skb->sk), netnsid);
+ net = get_net_ns_by_id(sock_net(sk), netnsid);
if (!net)
return ERR_PTR(-EINVAL);
/* For now, the caller is required to have CAP_NET_ADMIN in
* the user namespace owning the target net ns.
*/
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
put_net(net);
return ERR_PTR(-EACCES);
}
@@ -1733,7 +1826,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
ifla_policy, NULL) >= 0) {
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(skb, netnsid);
+ tgt_net = get_target_net(skb->sk, netnsid);
if (IS_ERR(tgt_net)) {
tgt_net = net;
netnsid = -1;
@@ -1766,7 +1859,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, 0,
flags,
- ext_filter_mask, 0, NULL,
+ ext_filter_mask, 0, NULL, 0,
netnsid);
if (err < 0) {
@@ -1815,6 +1908,49 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
}
EXPORT_SYMBOL(rtnl_link_get_net);
+/* Figure out which network namespace we are talking about by
+ * examining the link attributes in the following order:
+ *
+ * 1. IFLA_NET_NS_PID
+ * 2. IFLA_NET_NS_FD
+ * 3. IFLA_IF_NETNSID
+ */
+static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
+ struct nlattr *tb[])
+{
+ struct net *net;
+
+ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
+ return rtnl_link_get_net(src_net, tb);
+
+ if (!tb[IFLA_IF_NETNSID])
+ return get_net(src_net);
+
+ net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
+ if (!net)
+ return ERR_PTR(-EINVAL);
+
+ return net;
+}
+
+static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
+ struct net *src_net,
+ struct nlattr *tb[], int cap)
+{
+ struct net *net;
+
+ net = rtnl_link_get_net_by_nlattr(src_net, tb);
+ if (IS_ERR(net))
+ return net;
+
+ if (!netlink_ns_capable(skb, net->user_ns, cap)) {
+ put_net(net);
+ return ERR_PTR(-EPERM);
+ }
+
+ return net;
+}
+
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
@@ -2077,17 +2213,14 @@ static int do_setlink(const struct sk_buff *skb,
const struct net_device_ops *ops = dev->netdev_ops;
int err;
- if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
- struct net *net = rtnl_link_get_net(dev_net(dev), tb);
+ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
+ struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
+ tb, CAP_NET_ADMIN);
if (IS_ERR(net)) {
err = PTR_ERR(net);
goto errout;
}
- if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
- put_net(net);
- err = -EPERM;
- goto errout;
- }
+
err = dev_change_net_namespace(dev, net, ifname);
put_net(net);
if (err)
@@ -2204,17 +2337,37 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_TXQLEN]) {
unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
- unsigned int orig_len = dev->tx_queue_len;
-
- if (dev->tx_queue_len ^ value) {
- dev->tx_queue_len = value;
- err = call_netdevice_notifiers(
- NETDEV_CHANGE_TX_QUEUE_LEN, dev);
- err = notifier_to_errno(err);
- if (err) {
- dev->tx_queue_len = orig_len;
- goto errout;
- }
+
+ err = dev_change_tx_queue_len(dev, value);
+ if (err)
+ goto errout;
+ status |= DO_SETLINK_MODIFIED;
+ }
+
+ if (tb[IFLA_GSO_MAX_SIZE]) {
+ u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
+
+ if (max_size > GSO_MAX_SIZE) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (dev->gso_max_size ^ max_size) {
+ netif_set_gso_max_size(dev, max_size);
+ status |= DO_SETLINK_MODIFIED;
+ }
+ }
+
+ if (tb[IFLA_GSO_MAX_SEGS]) {
+ u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+
+ if (max_segs > GSO_MAX_SEGS) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (dev->gso_max_segs ^ max_segs) {
+ dev->gso_max_segs = max_segs;
status |= DO_SETLINK_MODIFIED;
}
}
@@ -2400,9 +2553,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout;
- if (tb[IFLA_IF_NETNSID])
- return -EOPNOTSUPP;
-
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
@@ -2487,36 +2637,53 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
- struct net_device *dev;
+ struct net *tgt_net = net;
+ struct net_device *dev = NULL;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
+ int netnsid = -1;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
if (err < 0)
return err;
- if (tb[IFLA_IF_NETNSID])
- return -EOPNOTSUPP;
-
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ if (tb[IFLA_IF_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
+ tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
+ }
+
+ err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
- dev = __dev_get_by_index(net, ifm->ifi_index);
+ dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
- dev = __dev_get_by_name(net, ifname);
+ dev = __dev_get_by_name(tgt_net, ifname);
else if (tb[IFLA_GROUP])
- return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
+ err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
- return -EINVAL;
+ goto out;
- if (!dev)
- return -ENODEV;
+ if (!dev) {
+ if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
+ err = -ENODEV;
- return rtnl_delete_link(dev);
+ goto out;
+ }
+
+ err = rtnl_delete_link(dev);
+
+out:
+ if (netnsid >= 0)
+ put_net(tgt_net);
+
+ return err;
}
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
@@ -2583,6 +2750,10 @@ struct net_device *rtnl_create_link(struct net *net,
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP])
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+ if (tb[IFLA_GSO_MAX_SIZE])
+ netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
+ if (tb[IFLA_GSO_MAX_SEGS])
+ dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
return dev;
}
@@ -2781,14 +2952,10 @@ replay:
name_assign_type = NET_NAME_ENUM;
}
- dest_net = rtnl_link_get_net(net, tb);
+ dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
- err = -EPERM;
- if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
- goto out;
-
if (tb[IFLA_LINK_NETNSID]) {
int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
@@ -2883,7 +3050,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(skb, netnsid);
+ tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
@@ -2915,7 +3082,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
err = rtnl_fill_ifinfo(nskb, dev, net,
RTM_NEWLINK, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, 0, 0, ext_filter_mask,
- 0, NULL, netnsid);
+ 0, NULL, 0, netnsid);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
@@ -2973,18 +3140,26 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
+ struct rtnl_link **tab;
int type = cb->nlh->nlmsg_type-RTM_BASE;
- struct rtnl_link *handlers;
+ struct rtnl_link *link;
rtnl_dumpit_func dumpit;
if (idx < s_idx || idx == PF_PACKET)
continue;
- handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
- if (!handlers)
+ if (type < 0 || type >= RTM_NR_MSGTYPES)
continue;
- dumpit = READ_ONCE(handlers[type].dumpit);
+ tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
+ if (!tab)
+ continue;
+
+ link = tab[type];
+ if (!link)
+ continue;
+
+ dumpit = link->dumpit;
if (!dumpit)
continue;
@@ -3003,7 +3178,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned int change,
- u32 event, gfp_t flags, int *new_nsid)
+ u32 event, gfp_t flags, int *new_nsid,
+ int new_ifindex)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -3016,7 +3192,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
type, 0, 0, change, 0, 0, event,
- new_nsid, -1);
+ new_nsid, new_ifindex, -1);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -3039,14 +3215,15 @@ void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
static void rtmsg_ifinfo_event(int type, struct net_device *dev,
unsigned int change, u32 event,
- gfp_t flags, int *new_nsid)
+ gfp_t flags, int *new_nsid, int new_ifindex)
{
struct sk_buff *skb;
if (dev->reg_state != NETREG_REGISTERED)
return;
- skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid);
+ skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
+ new_ifindex);
if (skb)
rtmsg_ifinfo_send(skb, dev, flags);
}
@@ -3054,14 +3231,15 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
gfp_t flags)
{
- rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, NULL);
+ rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
+ NULL, 0);
}
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
- gfp_t flags, int *new_nsid)
+ gfp_t flags, int *new_nsid, int new_ifindex)
{
rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
- new_nsid);
+ new_nsid, new_ifindex);
}
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
@@ -4314,7 +4492,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
- struct rtnl_link *handlers;
+ struct rtnl_link *link;
+ struct module *owner;
int err = -EOPNOTSUPP;
rtnl_doit_func doit;
unsigned int flags;
@@ -4338,79 +4517,85 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- if (family >= ARRAY_SIZE(rtnl_msg_handlers))
- family = PF_UNSPEC;
-
rcu_read_lock();
- handlers = rcu_dereference(rtnl_msg_handlers[family]);
- if (!handlers) {
- family = PF_UNSPEC;
- handlers = rcu_dereference(rtnl_msg_handlers[family]);
- }
-
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
u16 min_dump_alloc = 0;
- dumpit = READ_ONCE(handlers[type].dumpit);
- if (!dumpit) {
+ link = rtnl_get_link(family, type);
+ if (!link || !link->dumpit) {
family = PF_UNSPEC;
- handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
- if (!handlers)
- goto err_unlock;
-
- dumpit = READ_ONCE(handlers[type].dumpit);
- if (!dumpit)
+ link = rtnl_get_link(family, type);
+ if (!link || !link->dumpit)
goto err_unlock;
}
-
- refcount_inc(&rtnl_msg_handlers_ref[family]);
+ owner = link->owner;
+ dumpit = link->dumpit;
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
+ err = 0;
+ /* need to do this before rcu_read_unlock() */
+ if (!try_module_get(owner))
+ err = -EPROTONOSUPPORT;
+
rcu_read_unlock();
rtnl = net->rtnl;
- {
+ if (err == 0) {
struct netlink_dump_control c = {
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
+ .module = owner,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
+ /* netlink_dump_start() will keep a reference on
+ * module if dump is still in progress.
+ */
+ module_put(owner);
}
- refcount_dec(&rtnl_msg_handlers_ref[family]);
return err;
}
- doit = READ_ONCE(handlers[type].doit);
- if (!doit) {
+ link = rtnl_get_link(family, type);
+ if (!link || !link->doit) {
family = PF_UNSPEC;
- handlers = rcu_dereference(rtnl_msg_handlers[family]);
+ link = rtnl_get_link(PF_UNSPEC, type);
+ if (!link || !link->doit)
+ goto out_unlock;
+ }
+
+ owner = link->owner;
+ if (!try_module_get(owner)) {
+ err = -EPROTONOSUPPORT;
+ goto out_unlock;
}
- flags = READ_ONCE(handlers[type].flags);
+ flags = link->flags;
if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
- refcount_inc(&rtnl_msg_handlers_ref[family]);
- doit = READ_ONCE(handlers[type].doit);
+ doit = link->doit;
rcu_read_unlock();
if (doit)
err = doit(skb, nlh, extack);
- refcount_dec(&rtnl_msg_handlers_ref[family]);
+ module_put(owner);
return err;
}
-
rcu_read_unlock();
rtnl_lock();
- handlers = rtnl_dereference(rtnl_msg_handlers[family]);
- if (handlers) {
- doit = READ_ONCE(handlers[type].doit);
- if (doit)
- err = doit(skb, nlh, extack);
- }
+ link = rtnl_get_link(family, type);
+ if (link && link->doit)
+ err = link->doit(skb, nlh, extack);
rtnl_unlock();
+
+ module_put(owner);
+
+ return err;
+
+out_unlock:
+ rcu_read_unlock();
return err;
err_unlock:
@@ -4454,7 +4639,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_CHANGELOWERSTATE:
case NETDEV_CHANGE_TX_QUEUE_LEN:
rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
- GFP_KERNEL, NULL);
+ GFP_KERNEL, NULL, 0);
break;
default:
break;
@@ -4498,11 +4683,6 @@ static struct pernet_operations rtnetlink_net_ops = {
void __init rtnetlink_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
- refcount_set(&rtnl_msg_handlers_ref[i], 1);
-
if (register_pernet_subsys(&rtnetlink_net_ops))
panic("rtnetlink_init: cannot initialize rtnetlink\n");
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 08f574081315..01e8285aea73 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3656,6 +3656,10 @@ normal:
skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
+ if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+ skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+ goto err;
+
while (pos < offset + len) {
if (i >= nfrags) {
BUG_ON(skb_headlen(list_skb));
@@ -3667,6 +3671,11 @@ normal:
BUG_ON(!nfrags);
+ if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+ skb_zerocopy_clone(nskb, frag_skb,
+ GFP_ATOMIC))
+ goto err;
+
list_skb = list_skb->next;
}
@@ -3678,11 +3687,6 @@ normal:
goto err;
}
- if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
- goto err;
- if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
- goto err;
-
*nskb_frag = *frag;
__skb_frag_ref(nskb_frag);
size = skb_frag_size(nskb_frag);
diff --git a/net/core/sock.c b/net/core/sock.c
index c0b5b2f17412..1033f8ab0547 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,8 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
+static void sock_inuse_add(struct net *net, int val);
+
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
@@ -1531,8 +1533,11 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sk->sk_kern_sock = kern;
sock_lock_init(sk);
sk->sk_net_refcnt = kern ? 0 : 1;
- if (likely(sk->sk_net_refcnt))
+ if (likely(sk->sk_net_refcnt)) {
get_net(net);
+ sock_inuse_add(net, 1);
+ }
+
sock_net_set(sk, net);
refcount_set(&sk->sk_wmem_alloc, 1);
@@ -1595,6 +1600,9 @@ void sk_destruct(struct sock *sk)
static void __sk_free(struct sock *sk)
{
+ if (likely(sk->sk_net_refcnt))
+ sock_inuse_add(sock_net(sk), -1);
+
if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
sock_diag_broadcast_destroy(sk);
else
@@ -1716,6 +1724,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
+ if (likely(newsk->sk_net_refcnt))
+ sock_inuse_add(sock_net(newsk), 1);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
@@ -2496,7 +2506,7 @@ int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
}
EXPORT_SYMBOL(sock_no_getname);
-unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
+__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
{
return 0;
}
@@ -3045,7 +3055,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
{
- __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
+ __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
@@ -3055,21 +3065,50 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
int res = 0;
for_each_possible_cpu(cpu)
- res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+ res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
return res >= 0 ? res : 0;
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
+static void sock_inuse_add(struct net *net, int val)
+{
+ this_cpu_add(*net->core.sock_inuse, val);
+}
+
+int sock_inuse_get(struct net *net)
+{
+ int cpu, res = 0;
+
+ for_each_possible_cpu(cpu)
+ res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+
+ return res;
+}
+
+EXPORT_SYMBOL_GPL(sock_inuse_get);
+
static int __net_init sock_inuse_init_net(struct net *net)
{
- net->core.inuse = alloc_percpu(struct prot_inuse);
- return net->core.inuse ? 0 : -ENOMEM;
+ net->core.prot_inuse = alloc_percpu(struct prot_inuse);
+ if (net->core.prot_inuse == NULL)
+ return -ENOMEM;
+
+ net->core.sock_inuse = alloc_percpu(int);
+ if (net->core.sock_inuse == NULL)
+ goto out;
+
+ return 0;
+
+out:
+ free_percpu(net->core.prot_inuse);
+ return -ENOMEM;
}
static void __net_exit sock_inuse_exit_net(struct net *net)
{
- free_percpu(net->core.inuse);
+ free_percpu(net->core.prot_inuse);
+ free_percpu(net->core.sock_inuse);
}
static struct pernet_operations net_inuse_ops = {
@@ -3112,6 +3151,10 @@ static inline void assign_proto_idx(struct proto *prot)
static inline void release_proto_idx(struct proto *prot)
{
}
+
+static void sock_inuse_add(struct net *net, int val)
+{
+}
#endif
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
@@ -3319,7 +3362,6 @@ static int proto_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations proto_seq_fops = {
- .owner = THIS_MODULE,
.open = proto_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 217f4e3b82f6..146b50e30659 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
case SKNLGRP_INET6_UDP_DESTROY:
if (!sock_diag_handlers[AF_INET6])
request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET);
+ NETLINK_SOCK_DIAG, AF_INET6);
break;
}
return 0;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5eeb1d20cc38..c5bb52bc73a1 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -235,7 +235,9 @@ struct sock *reuseport_select_sock(struct sock *sk,
if (prog && skb)
sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
- else
+
+ /* no bpf or invalid bpf result: fall back to hash usage */
+ if (!sk2)
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cbc3dde4cfcc..f2d0462611c3 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,7 @@
static int zero = 0;
static int one = 1;
+static int two __maybe_unused = 2;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
@@ -250,6 +251,46 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
+#ifdef CONFIG_BPF_JIT
+static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret, jit_enable = *(int *)table->data;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &jit_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ if (jit_enable < 2 ||
+ (jit_enable == 2 && bpf_dump_raw_ok())) {
+ *(int *)table->data = jit_enable;
+ if (jit_enable == 2)
+ pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
+ } else {
+ ret = -EPERM;
+ }
+ }
+ return ret;
+}
+
+# ifdef CONFIG_HAVE_EBPF_JIT
+static int
+proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+}
+# endif
+#endif
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -325,7 +366,14 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_enable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax_bpf_enable,
+# ifdef CONFIG_BPF_JIT_ALWAYS_ON
+ .extra1 = &one,
+ .extra2 = &one,
+# else
+ .extra1 = &zero,
+ .extra2 = &two,
+# endif
},
# ifdef CONFIG_HAVE_EBPF_JIT
{
@@ -333,14 +381,18 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_harden,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax_bpf_restricted,
+ .extra1 = &zero,
+ .extra2 = &two,
},
{
.procname = "bpf_jit_kallsyms",
.data = &bpf_jit_kallsyms,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax_bpf_restricted,
+ .extra1 = &zero,
+ .extra2 = &one,
},
# endif
#endif
diff --git a/net/core/xdp.c b/net/core/xdp.c
new file mode 100644
index 000000000000..097a0f74e004
--- /dev/null
+++ b/net/core/xdp.c
@@ -0,0 +1,73 @@
+/* net/core/xdp.c
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2. See COPYING.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <net/xdp.h>
+
+#define REG_STATE_NEW 0x0
+#define REG_STATE_REGISTERED 0x1
+#define REG_STATE_UNREGISTERED 0x2
+#define REG_STATE_UNUSED 0x3
+
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
+{
+ /* Simplify driver cleanup code paths, allow unreg "unused" */
+ if (xdp_rxq->reg_state == REG_STATE_UNUSED)
+ return;
+
+ WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
+
+ xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
+ xdp_rxq->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
+
+static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
+{
+ memset(xdp_rxq, 0, sizeof(*xdp_rxq));
+}
+
+/* Returns 0 on success, negative on failure */
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index)
+{
+ if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
+ WARN(1, "Driver promised not to register this");
+ return -EINVAL;
+ }
+
+ if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
+ WARN(1, "Missing unregister, handled but fix driver");
+ xdp_rxq_info_unreg(xdp_rxq);
+ }
+
+ if (!dev) {
+ WARN(1, "Missing net_device from driver");
+ return -ENODEV;
+ }
+
+ /* State either UNREGISTERED or NEW */
+ xdp_rxq_info_init(xdp_rxq);
+ xdp_rxq->dev = dev;
+ xdp_rxq->queue_index = queue_index;
+
+ xdp_rxq->reg_state = REG_STATE_REGISTERED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
+
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
+{
+ xdp_rxq->reg_state = REG_STATE_UNUSED;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
+
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
+{
+ return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 8c0ef71bed2f..b270e84d9c13 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -39,23 +39,6 @@ config IP_DCCP_DEBUG
Just say N.
-config NET_DCCPPROBE
- tristate "DCCP connection probing"
- depends on PROC_FS && KPROBES
- ---help---
- This module allows for capturing the changes to DCCP connection
- state in response to incoming packets. It is used for debugging
- DCCP congestion avoidance modules. If you don't understand
- what was just said, you don't need it: say N.
-
- Documentation on how to use DCCP connection probing can be found
- at:
-
- http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
-
- To compile this code as a module, choose M here: the
- module will be called dccp_probe.
-
endmenu
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2e7b56097bc4..5b4ff37bc806 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -21,9 +21,10 @@ obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
dccp_ipv6-y := ipv6.o
obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
-obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
dccp-$(CONFIG_SYSCTL) += sysctl.o
dccp_diag-y := diag.o
-dccp_probe-y := probe.o
+
+# build with local directory for trace.h
+CFLAGS_proto.o := -I$(src)
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 3de0d0362d7f..2a24f7d171a5 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -228,7 +228,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
}
if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
- DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+ DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
av->av_overflow = true;
}
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 1c75cd1255f6..92d016e87816 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
ccid2_pr_debug("RTO_EXPIRE\n");
+ if (sk->sk_state == DCCP_CLOSED)
+ goto out;
+
/* back-off timer */
hc->tx_rto <<= 1;
if (hc->tx_rto > DCCP_RTO_MAX)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 0c55ffb859bf..f91e3816806b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -316,7 +316,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
void dccp_shutdown(struct sock *sk, int how);
int inet_dccp_listen(struct socket *sock, int backlog);
-unsigned int dccp_poll(struct file *file, struct socket *sock,
+__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait);
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void dccp_req_err(struct sock *sk, u64 seq);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 178bb9833311..37ccbe62eb1a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -63,9 +63,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
*/
local_bh_disable();
inet_twsk_schedule(tw, timeo);
- /* Linkage updates. */
- __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
- inet_twsk_put(tw);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+ inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
deleted file mode 100644
index 3d3fda05b32d..000000000000
--- a/net/dccp/probe.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * dccp_probe - Observe the DCCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for DCCP from Stephen Hemminger's code
- * Copyright (C) 2006, Ian McDonald <ian.mcdonald@jandi.co.nz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/dccp.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/vmalloc.h>
-#include <linux/time64.h>
-#include <linux/gfp.h>
-#include <net/net_namespace.h>
-
-#include "dccp.h"
-#include "ccid.h"
-#include "ccids/ccid3.h"
-
-static int port;
-
-static int bufsize = 64 * 1024;
-
-static const char procname[] = "dccpprobe";
-
-static struct {
- struct kfifo fifo;
- spinlock_t lock;
- wait_queue_head_t wait;
- struct timespec64 tstart;
-} dccpw;
-
-static void printl(const char *fmt, ...)
-{
- va_list args;
- int len;
- struct timespec64 now;
- char tbuf[256];
-
- va_start(args, fmt);
- getnstimeofday64(&now);
-
- now = timespec64_sub(now, dccpw.tstart);
-
- len = sprintf(tbuf, "%lu.%06lu ",
- (unsigned long) now.tv_sec,
- (unsigned long) now.tv_nsec / NSEC_PER_USEC);
- len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
- va_end(args);
-
- kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
- wake_up(&dccpw.wait);
-}
-
-static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
-{
- const struct inet_sock *inet = inet_sk(sk);
- struct ccid3_hc_tx_sock *hc = NULL;
-
- if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
- hc = ccid3_hc_tx_sk(sk);
-
- if (port == 0 || ntohs(inet->inet_dport) == port ||
- ntohs(inet->inet_sport) == port) {
- if (hc)
- printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n",
- &inet->inet_saddr, ntohs(inet->inet_sport),
- &inet->inet_daddr, ntohs(inet->inet_dport), size,
- hc->tx_s, hc->tx_rtt, hc->tx_p,
- hc->tx_x_calc, hc->tx_x_recv >> 6,
- hc->tx_x >> 6, hc->tx_t_ipi);
- else
- printl("%pI4:%u %pI4:%u %d\n",
- &inet->inet_saddr, ntohs(inet->inet_sport),
- &inet->inet_daddr, ntohs(inet->inet_dport),
- size);
- }
-
- jprobe_return();
- return 0;
-}
-
-static struct jprobe dccp_send_probe = {
- .kp = {
- .symbol_name = "dccp_sendmsg",
- },
- .entry = jdccp_sendmsg,
-};
-
-static int dccpprobe_open(struct inode *inode, struct file *file)
-{
- kfifo_reset(&dccpw.fifo);
- getnstimeofday64(&dccpw.tstart);
- return 0;
-}
-
-static ssize_t dccpprobe_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- int error = 0, cnt = 0;
- unsigned char *tbuf;
-
- if (!buf)
- return -EINVAL;
-
- if (len == 0)
- return 0;
-
- tbuf = vmalloc(len);
- if (!tbuf)
- return -ENOMEM;
-
- error = wait_event_interruptible(dccpw.wait,
- kfifo_len(&dccpw.fifo) != 0);
- if (error)
- goto out_free;
-
- cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
- error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
- vfree(tbuf);
-
- return error ? error : cnt;
-}
-
-static const struct file_operations dccpprobe_fops = {
- .owner = THIS_MODULE,
- .open = dccpprobe_open,
- .read = dccpprobe_read,
- .llseek = noop_llseek,
-};
-
-static __init int dccpprobe_init(void)
-{
- int ret = -ENOMEM;
-
- init_waitqueue_head(&dccpw.wait);
- spin_lock_init(&dccpw.lock);
- if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
- return ret;
- if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
- goto err0;
-
- ret = register_jprobe(&dccp_send_probe);
- if (ret) {
- ret = request_module("dccp");
- if (!ret)
- ret = register_jprobe(&dccp_send_probe);
- }
-
- if (ret)
- goto err1;
-
- pr_info("DCCP watch registered (port=%d)\n", port);
- return 0;
-err1:
- remove_proc_entry(procname, init_net.proc_net);
-err0:
- kfifo_free(&dccpw.fifo);
- return ret;
-}
-module_init(dccpprobe_init);
-
-static __exit void dccpprobe_exit(void)
-{
- kfifo_free(&dccpw.fifo);
- remove_proc_entry(procname, init_net.proc_net);
- unregister_jprobe(&dccp_send_probe);
-
-}
-module_exit(dccpprobe_exit);
-
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>");
-MODULE_DESCRIPTION("DCCP snooper");
-MODULE_LICENSE("GPL");
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9d43c1f40274..74685fecfdb9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -38,6 +38,9 @@
#include "dccp.h"
#include "feat.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
EXPORT_SYMBOL_GPL(dccp_statistics);
@@ -110,7 +113,7 @@ void dccp_set_state(struct sock *sk, const int state)
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
- sk->sk_state = state;
+ inet_sk_set_state(sk, state);
}
EXPORT_SYMBOL_GPL(dccp_set_state);
@@ -318,10 +321,10 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
-unsigned int dccp_poll(struct file *file, struct socket *sock,
+__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct sock *sk = sock->sk;
sock_poll_wait(file, sk_sleep(sk), wait);
@@ -761,6 +764,8 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int rc, size;
long timeo;
+ trace_dccp_probe(sk, len);
+
if (len > dp->dccps_mss_cache)
return -EMSGSIZE;
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
new file mode 100644
index 000000000000..5062421beee9
--- /dev/null
+++ b/net/dccp/trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dccp
+
+#if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCCP_H
+
+#include <net/sock.h>
+#include "dccp.h"
+#include "ccids/ccid3.h"
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(dccp_probe,
+
+ TP_PROTO(struct sock *sk, size_t size),
+
+ TP_ARGS(sk, size),
+
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, size)
+ __field(__u16, tx_s)
+ __field(__u32, tx_rtt)
+ __field(__u32, tx_p)
+ __field(__u32, tx_x_calc)
+ __field(__u64, tx_x_recv)
+ __field(__u64, tx_x)
+ __field(__u32, tx_t_ipi)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+ struct ccid3_hc_tx_sock *hc = NULL;
+
+ if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
+ hc = ccid3_hc_tx_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ __entry->size = size;
+ if (hc) {
+ __entry->tx_s = hc->tx_s;
+ __entry->tx_rtt = hc->tx_rtt;
+ __entry->tx_p = hc->tx_p;
+ __entry->tx_x_calc = hc->tx_x_calc;
+ __entry->tx_x_recv = hc->tx_x_recv >> 6;
+ __entry->tx_x = hc->tx_x >> 6;
+ __entry->tx_t_ipi = hc->tx_t_ipi;
+ } else {
+ __entry->tx_s = 0;
+ memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
+ (void *)&__entry->tx_rtt +
+ sizeof(__entry->tx_t_ipi));
+ }
+ ),
+
+ TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d "
+ "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d",
+ __entry->saddr, __entry->daddr, __entry->size,
+ __entry->tx_s, __entry->tx_rtt, __entry->tx_p,
+ __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x,
+ __entry->tx_t_ipi)
+);
+
+#endif /* _TRACE_TCP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 518cea17b811..cc1b505453a8 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1209,11 +1209,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
}
-static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
- int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
if (!skb_queue_empty(&scp->other_receive_queue))
mask |= POLLRDBAND;
@@ -2320,7 +2320,6 @@ static int dn_socket_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dn_socket_seq_fops = {
- .owner = THIS_MODULE,
.open = dn_socket_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 9153247dad28..c9f5e1ebb9c8 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1389,7 +1389,6 @@ static int dn_dev_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dn_dev_seq_fops = {
- .owner = THIS_MODULE,
.open = dn_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1418,9 +1417,12 @@ void __init dn_dev_init(void)
dn_dev_devices_on();
- rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, 0);
- rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, 0);
- rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWADDR,
+ dn_nl_newaddr, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELADDR,
+ dn_nl_deladdr, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR,
+ NULL, dn_nl_dump_ifaddr, 0);
proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index b37a1b833c77..fce94cbd4378 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -792,8 +792,10 @@ void __init dn_fib_init(void)
register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
- rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, 0);
- rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWROUTE,
+ dn_fib_rtm_newroute, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE,
+ dn_fib_rtm_delroute, NULL, 0);
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 528119a5618e..6e37d9e6345e 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -597,7 +597,6 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dn_neigh_seq_fops = {
- .owner = THIS_MODULE,
.open = dn_neigh_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 324cb9f2f551..ef20b8e31669 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -199,11 +199,11 @@ static void dn_dst_check_expire(struct timer_list *unused)
lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
if (atomic_read(&rt->dst.__refcnt) > 1 ||
(now - rt->dst.lastuse) < expire) {
- rtp = &rt->dst.dn_next;
+ rtp = &rt->dn_next;
continue;
}
- *rtp = rt->dst.dn_next;
- rt->dst.dn_next = NULL;
+ *rtp = rt->dn_next;
+ rt->dn_next = NULL;
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
@@ -233,11 +233,11 @@ static int dn_dst_gc(struct dst_ops *ops)
lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
if (atomic_read(&rt->dst.__refcnt) > 1 ||
(now - rt->dst.lastuse) < expire) {
- rtp = &rt->dst.dn_next;
+ rtp = &rt->dn_next;
continue;
}
- *rtp = rt->dst.dn_next;
- rt->dst.dn_next = NULL;
+ *rtp = rt->dn_next;
+ rt->dn_next = NULL;
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
break;
@@ -333,8 +333,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
if (compare_keys(&rth->fld, &rt->fld)) {
/* Put it first */
- *rthp = rth->dst.dn_next;
- rcu_assign_pointer(rth->dst.dn_next,
+ *rthp = rth->dn_next;
+ rcu_assign_pointer(rth->dn_next,
dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
@@ -345,10 +345,10 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
*rp = rth;
return 0;
}
- rthp = &rth->dst.dn_next;
+ rthp = &rth->dn_next;
}
- rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
+ rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
dst_hold_and_use(&rt->dst, now);
@@ -369,8 +369,8 @@ static void dn_run_flush(struct timer_list *unused)
goto nothing_to_declare;
for(; rt; rt = next) {
- next = rcu_dereference_raw(rt->dst.dn_next);
- RCU_INIT_POINTER(rt->dst.dn_next, NULL);
+ next = rcu_dereference_raw(rt->dn_next);
+ RCU_INIT_POINTER(rt->dn_next, NULL);
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
@@ -1183,6 +1183,7 @@ make_route:
if (rt == NULL)
goto e_nobufs;
+ rt->dn_next = NULL;
memset(&rt->fld, 0, sizeof(rt->fld));
rt->fld.saddr = oldflp->saddr;
rt->fld.daddr = oldflp->daddr;
@@ -1252,7 +1253,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *
if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh();
for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
- rt = rcu_dereference_bh(rt->dst.dn_next)) {
+ rt = rcu_dereference_bh(rt->dn_next)) {
if ((flp->daddr == rt->fld.daddr) &&
(flp->saddr == rt->fld.saddr) &&
(flp->flowidn_mark == rt->fld.flowidn_mark) &&
@@ -1448,6 +1449,7 @@ make_route:
if (rt == NULL)
goto e_nobufs;
+ rt->dn_next = NULL;
memset(&rt->fld, 0, sizeof(rt->fld));
rt->rt_saddr = fld.saddr;
rt->rt_daddr = fld.daddr;
@@ -1529,7 +1531,7 @@ static int dn_route_input(struct sk_buff *skb)
rcu_read_lock();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
- rt = rcu_dereference(rt->dst.dn_next)) {
+ rt = rcu_dereference(rt->dn_next)) {
if ((rt->fld.saddr == cb->src) &&
(rt->fld.daddr == cb->dst) &&
(rt->fld.flowidn_oif == 0) &&
@@ -1749,7 +1751,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock_bh();
for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt;
- rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
+ rt = rcu_dereference_bh(rt->dn_next), idx++) {
if (idx < s_idx)
continue;
skb_dst_set(skb, dst_clone(&rt->dst));
@@ -1795,7 +1797,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
{
struct dn_rt_cache_iter_state *s = seq->private;
- rt = rcu_dereference_bh(rt->dst.dn_next);
+ rt = rcu_dereference_bh(rt->dn_next);
while (!rt) {
rcu_read_unlock_bh();
if (--s->bucket < 0)
@@ -1858,7 +1860,6 @@ static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations dn_rt_cache_seq_fops = {
- .owner = THIS_MODULE,
.open = dn_rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1921,11 +1922,11 @@ void __init dn_route_init(void)
&dn_rt_cache_seq_fops);
#ifdef CONFIG_DECNET_ROUTER
- rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
- dn_fib_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+ dn_cache_getroute, dn_fib_dump, 0);
#else
- rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
- dn_cache_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+ dn_cache_getroute, dn_cache_dump, 0);
#endif
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 03c3bdf25468..bbf2c82cf7b2 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -16,6 +16,15 @@ config NET_DSA
if NET_DSA
+config NET_DSA_LEGACY
+ bool "Support for older platform device and Device Tree registration"
+ default y
+ ---help---
+ Say Y if you want to enable support for the older platform device and
+ deprecated Device Tree binding registration.
+
+ This feature is scheduled for removal in 4.17.
+
# tagging formats
config NET_DSA_TAG_BRCM
bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 0e13c1f95d13..9e4d3536f977 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o dsa2.o legacy.o master.o port.o slave.o switch.o
+dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
+dsa_core-$(CONFIG_NET_DSA_LEGACY) += legacy.o
# tagging formats
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 1e287420ff49..adf50fbc4c13 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -241,7 +241,7 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
for (port = 0; port < ds->num_ports; port++) {
dp = &ds->ports[port];
- if (dsa_port_is_user(dp))
+ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
dp->cpu_dp = dst->cpu_dp;
}
}
@@ -271,13 +271,12 @@ static int dsa_port_setup(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_CPU:
case DSA_PORT_TYPE_DSA:
- err = dsa_port_fixed_link_register_of(dp);
+ err = dsa_port_link_register_of(dp);
if (err) {
- dev_err(ds->dev, "failed to register fixed link for port %d.%d\n",
+ dev_err(ds->dev, "failed to setup link for port %d.%d\n",
ds->index, dp->index);
return err;
}
-
break;
case DSA_PORT_TYPE_USER:
err = dsa_slave_create(dp);
@@ -301,7 +300,7 @@ static void dsa_port_teardown(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_CPU:
case DSA_PORT_TYPE_DSA:
- dsa_port_fixed_link_unregister_of(dp);
+ dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
if (dp->slave) {
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 7d036696e8c4..70de7895e5b8 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -97,8 +97,17 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
bool dsa_schedule_work(struct work_struct *work);
/* legacy.c */
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
int dsa_legacy_register(void);
void dsa_legacy_unregister(void);
+#else
+static inline int dsa_legacy_register(void)
+{
+ return 0;
+}
+
+static inline void dsa_legacy_unregister(void) { }
+#endif
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
@@ -157,8 +166,8 @@ int dsa_port_vlan_add(struct dsa_port *dp,
struct switchdev_trans *trans);
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
-int dsa_port_fixed_link_register_of(struct dsa_port *dp);
-void dsa_port_fixed_link_unregister_of(struct dsa_port *dp);
+int dsa_port_link_register_of(struct dsa_port *dp);
+void dsa_port_link_unregister_of(struct dsa_port *dp);
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 84611d7fcfa2..cb54b81d0bd9 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -86,7 +86,7 @@ static int dsa_cpu_dsa_setups(struct dsa_switch *ds)
if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
continue;
- ret = dsa_port_fixed_link_register_of(&ds->ports[port]);
+ ret = dsa_port_link_register_of(&ds->ports[port]);
if (ret)
return ret;
}
@@ -275,7 +275,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
continue;
- dsa_port_fixed_link_unregister_of(&ds->ports[port]);
+ dsa_port_link_unregister_of(&ds->ports[port]);
}
if (ds->slave_mii_bus && ds->ops->phy_read)
@@ -718,26 +718,6 @@ static int dsa_resume(struct device *d)
}
#endif
-/* legacy way, bypassing the bridge *****************************************/
-int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid,
- u16 flags)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_fdb_add(dp, addr, vid);
-}
-
-int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_fdb_del(dp, addr, vid);
-}
-
static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
static const struct of_device_id dsa_of_match_table[] = {
diff --git a/net/dsa/port.c b/net/dsa/port.c
index bb4be2679904..7acc1169d75e 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -273,7 +273,56 @@ int dsa_port_vlan_del(struct dsa_port *dp,
return 0;
}
-int dsa_port_fixed_link_register_of(struct dsa_port *dp)
+static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
+{
+ struct device_node *port_dn = dp->dn;
+ struct device_node *phy_dn;
+ struct dsa_switch *ds = dp->ds;
+ struct phy_device *phydev;
+ int port = dp->index;
+ int err = 0;
+
+ phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
+ if (!phy_dn)
+ return 0;
+
+ phydev = of_phy_find_device(phy_dn);
+ if (!phydev) {
+ err = -EPROBE_DEFER;
+ goto err_put_of;
+ }
+
+ if (enable) {
+ err = genphy_config_init(phydev);
+ if (err < 0)
+ goto err_put_dev;
+
+ err = genphy_resume(phydev);
+ if (err < 0)
+ goto err_put_dev;
+
+ err = genphy_read_status(phydev);
+ if (err < 0)
+ goto err_put_dev;
+ } else {
+ err = genphy_suspend(phydev);
+ if (err < 0)
+ goto err_put_dev;
+ }
+
+ if (ds->ops->adjust_link)
+ ds->ops->adjust_link(ds, port, phydev);
+
+ dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
+
+err_put_dev:
+ put_device(&phydev->mdio.dev);
+err_put_of:
+ of_node_put(phy_dn);
+ return err;
+}
+
+static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
{
struct device_node *dn = dp->dn;
struct dsa_switch *ds = dp->ds;
@@ -282,38 +331,44 @@ int dsa_port_fixed_link_register_of(struct dsa_port *dp)
int mode;
int err;
- if (of_phy_is_fixed_link(dn)) {
- err = of_phy_register_fixed_link(dn);
- if (err) {
- dev_err(ds->dev,
- "failed to register the fixed PHY of port %d\n",
- port);
- return err;
- }
+ err = of_phy_register_fixed_link(dn);
+ if (err) {
+ dev_err(ds->dev,
+ "failed to register the fixed PHY of port %d\n",
+ port);
+ return err;
+ }
- phydev = of_phy_find_device(dn);
+ phydev = of_phy_find_device(dn);
- mode = of_get_phy_mode(dn);
- if (mode < 0)
- mode = PHY_INTERFACE_MODE_NA;
- phydev->interface = mode;
+ mode = of_get_phy_mode(dn);
+ if (mode < 0)
+ mode = PHY_INTERFACE_MODE_NA;
+ phydev->interface = mode;
- genphy_config_init(phydev);
- genphy_read_status(phydev);
+ genphy_config_init(phydev);
+ genphy_read_status(phydev);
- if (ds->ops->adjust_link)
- ds->ops->adjust_link(ds, port, phydev);
+ if (ds->ops->adjust_link)
+ ds->ops->adjust_link(ds, port, phydev);
- put_device(&phydev->mdio.dev);
- }
+ put_device(&phydev->mdio.dev);
return 0;
}
-void dsa_port_fixed_link_unregister_of(struct dsa_port *dp)
+int dsa_port_link_register_of(struct dsa_port *dp)
{
- struct device_node *dn = dp->dn;
+ if (of_phy_is_fixed_link(dp->dn))
+ return dsa_port_fixed_link_register_of(dp);
+ else
+ return dsa_port_setup_phy_of(dp, true);
+}
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
+void dsa_port_link_unregister_of(struct dsa_port *dp)
+{
+ if (of_phy_is_fixed_link(dp->dn))
+ of_phy_deregister_fixed_link(dp->dn);
+ else
+ dsa_port_setup_phy_of(dp, false);
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a95a55f79137..f52307296de4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -708,14 +708,12 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
__be16 protocol = cls->common.protocol;
- struct net *net = dev_net(dev);
struct dsa_switch *ds = dp->ds;
struct net_device *to_dev;
const struct tc_action *a;
struct dsa_port *to_dp;
int err = -EOPNOTSUPP;
LIST_HEAD(actions);
- int ifindex;
if (!ds->ops->port_mirror_add)
return err;
@@ -729,8 +727,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror;
- ifindex = tcf_mirred_ifindex(a);
- to_dev = __dev_get_by_index(net, ifindex);
+ to_dev = tcf_mirred_dev(a);
if (!to_dev)
return -EINVAL;
@@ -943,6 +940,26 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.set_rxnfc = dsa_slave_set_rxnfc,
};
+/* legacy way, bypassing the bridge *****************************************/
+int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 flags)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dsa_port_fdb_add(dp, addr, vid);
+}
+
+int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dsa_port_fdb_del(dp, addr, vid);
+}
+
static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 29608d087a7c..b93511726069 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -83,29 +83,52 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
static int dsa_switch_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- /* Do not care yet about other switch chips of the fabric */
- if (ds->index != info->sw_index)
- return 0;
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
if (!ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return ds->ops->port_fdb_add(ds, info->port, info->addr,
- info->vid);
+ return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
}
static int dsa_switch_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
- /* Do not care yet about other switch chips of the fabric */
- if (ds->index != info->sw_index)
- return 0;
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
if (!ds->ops->port_fdb_del)
return -EOPNOTSUPP;
- return ds->ops->port_fdb_del(ds, info->port, info->addr,
- info->vid);
+ return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+}
+
+static int
+dsa_switch_mdb_prepare_bitmap(struct dsa_switch *ds,
+ const struct switchdev_obj_port_mdb *mdb,
+ const unsigned long *bitmap)
+{
+ int port, err;
+
+ if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
+
+ for_each_set_bit(port, bitmap, ds->num_ports) {
+ err = ds->ops->port_mdb_prepare(ds, port, mdb);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
+ const struct switchdev_obj_port_mdb *mdb,
+ const unsigned long *bitmap)
+{
+ int port;
+
+ for_each_set_bit(port, bitmap, ds->num_ports)
+ ds->ops->port_mdb_add(ds, port, mdb);
}
static int dsa_switch_mdb_add(struct dsa_switch *ds,
@@ -114,7 +137,7 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
const struct switchdev_obj_port_mdb *mdb = info->mdb;
struct switchdev_trans *trans = info->trans;
DECLARE_BITMAP(group, ds->num_ports);
- int port, err;
+ int port;
/* Build a mask of Multicast group members */
bitmap_zero(group, ds->num_ports);
@@ -124,21 +147,10 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
if (dsa_is_dsa_port(ds, port))
set_bit(port, group);
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
- return -EOPNOTSUPP;
-
- for_each_set_bit(port, group, ds->num_ports) {
- err = ds->ops->port_mdb_prepare(ds, port, mdb, trans);
- if (err)
- return err;
- }
-
- return 0;
- }
+ if (switchdev_trans_ph_prepare(trans))
+ return dsa_switch_mdb_prepare_bitmap(ds, mdb, group);
- for_each_set_bit(port, group, ds->num_ports)
- ds->ops->port_mdb_add(ds, port, mdb, trans);
+ dsa_switch_mdb_add_bitmap(ds, mdb, group);
return 0;
}
@@ -157,13 +169,43 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
return 0;
}
+static int
+dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
+ const struct switchdev_obj_port_vlan *vlan,
+ const unsigned long *bitmap)
+{
+ int port, err;
+
+ if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ for_each_set_bit(port, bitmap, ds->num_ports) {
+ err = ds->ops->port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+dsa_switch_vlan_add_bitmap(struct dsa_switch *ds,
+ const struct switchdev_obj_port_vlan *vlan,
+ const unsigned long *bitmap)
+{
+ int port;
+
+ for_each_set_bit(port, bitmap, ds->num_ports)
+ ds->ops->port_vlan_add(ds, port, vlan);
+}
+
static int dsa_switch_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
const struct switchdev_obj_port_vlan *vlan = info->vlan;
struct switchdev_trans *trans = info->trans;
DECLARE_BITMAP(members, ds->num_ports);
- int port, err;
+ int port;
/* Build a mask of VLAN members */
bitmap_zero(members, ds->num_ports);
@@ -173,21 +215,10 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
set_bit(port, members);
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
- return -EOPNOTSUPP;
-
- for_each_set_bit(port, members, ds->num_ports) {
- err = ds->ops->port_vlan_prepare(ds, port, vlan, trans);
- if (err)
- return err;
- }
-
- return 0;
- }
+ if (switchdev_trans_ph_prepare(trans))
+ return dsa_switch_vlan_prepare_bitmap(ds, vlan, members);
- for_each_set_bit(port, members, ds->num_ports)
- ds->ops->port_vlan_add(ds, port, vlan, trans);
+ dsa_switch_vlan_add_bitmap(ds, vlan, members);
return 0;
}
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index e6e0b7b6025c..2b06bb91318b 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -70,6 +70,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
return NULL;
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ *
+ * Let dsa_slave_xmit() free the SKB
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
+ return NULL;
+
skb_push(skb, BRCM_TAG_LEN);
if (offset)
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 8475434af7d5..11535bc70743 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -13,10 +13,13 @@
*/
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include "dsa_priv.h"
#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_UNTAGGED 0
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
@@ -25,20 +28,37 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
{
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *mtk_tag;
+ bool is_vlan_skb = true;
- if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
- return NULL;
-
- skb_push(skb, MTK_HDR_LEN);
+ /* Build the special tag after the MAC Source Address. If VLAN header
+ * is present, it's required that VLAN header and special tag is
+ * being combined. Only in this way we can allow the switch can parse
+ * the both special and VLAN tag at the same time and then look up VLAN
+ * table with VID.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
+ return NULL;
- memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+ skb_push(skb, MTK_HDR_LEN);
+ memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+ is_vlan_skb = false;
+ }
- /* Build the tag after the MAC Source Address */
mtk_tag = skb->data + 2 * ETH_ALEN;
- mtk_tag[0] = 0;
+
+ /* Mark tag attribute on special tag insertion to notify hardware
+ * whether that's a combined special tag with 802.1Q header.
+ */
+ mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
+ MTK_HDR_XMIT_UNTAGGED;
mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
- mtk_tag[2] = 0;
- mtk_tag[3] = 0;
+
+ /* Tag control information is kept for 802.1Q */
+ if (!is_vlan_skb) {
+ mtk_tag[2] = 0;
+ mtk_tag[3] = 0;
+ }
return skb;
}
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c6c8ad1d4b6d..47a0a6649a9d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
-obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f00499a46927..c24008daa3d8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -121,6 +121,7 @@
#endif
#include <net/l3mdev.h>
+#include <trace/events/sock.h>
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
@@ -789,7 +790,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int addr_len = 0;
int err;
- sock_rps_record_flow(sk);
+ if (likely(!(flags & MSG_ERRQUEUE)))
+ sock_rps_record_flow(sk);
err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
@@ -870,6 +872,9 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sock *sk = sock->sk;
int err = 0;
struct net *net = sock_net(sk);
+ void __user *p = (void __user *)arg;
+ struct ifreq ifr;
+ struct rtentry rt;
switch (cmd) {
case SIOCGSTAMP:
@@ -880,8 +885,12 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
case SIOCADDRT:
case SIOCDELRT:
+ if (copy_from_user(&rt, p, sizeof(struct rtentry)))
+ return -EFAULT;
+ err = ip_rt_ioctl(net, cmd, &rt);
+ break;
case SIOCRTMSG:
- err = ip_rt_ioctl(net, cmd, (void __user *)arg);
+ err = -EINVAL;
break;
case SIOCDARP:
case SIOCGARP:
@@ -889,17 +898,26 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
err = arp_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCGIFADDR:
- case SIOCSIFADDR:
case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
+ case SIOCGIFPFLAGS:
+ if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
+ return -EFAULT;
+ err = devinet_ioctl(net, cmd, &ifr);
+ if (!err && copy_to_user(p, &ifr, sizeof(struct ifreq)))
+ err = -EFAULT;
+ break;
+
+ case SIOCSIFADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
case SIOCSIFDSTADDR:
case SIOCSIFPFLAGS:
- case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
- err = devinet_ioctl(net, cmd, (void __user *)arg);
+ if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
+ return -EFAULT;
+ err = devinet_ioctl(net, cmd, &ifr);
break;
default:
if (sk->sk_prot->ioctl)
@@ -1220,6 +1238,19 @@ int inet_sk_rebuild_header(struct sock *sk)
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
+void inet_sk_set_state(struct sock *sk, int state)
+{
+ trace_inet_sock_set_state(sk, sk->sk_state, state);
+ sk->sk_state = state;
+}
+EXPORT_SYMBOL(inet_sk_set_state);
+
+void inet_sk_state_store(struct sock *sk, int newstate)
+{
+ trace_inet_sock_set_state(sk, sk->sk_state, newstate);
+ smp_store_release(&sk->sk_state, newstate);
+}
+
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a8d7c5a9fb05..f28f06c91ead 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
static int arp_constructor(struct neighbour *neigh)
{
- __be32 addr = *(__be32 *)neigh->primary_key;
+ __be32 addr;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
+ u32 inaddr_any = INADDR_ANY;
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+ memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
+
+ addr = *(__be32 *)neigh->primary_key;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {
@@ -1420,7 +1425,6 @@ static int arp_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations arp_seq_fops = {
- .owner = THIS_MODULE,
.open = arp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7a93359fbc72..40f001782c1b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -946,11 +946,10 @@ static int inet_abc_len(__be32 addr)
}
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
{
- struct ifreq ifr;
struct sockaddr_in sin_orig;
- struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
struct in_device *in_dev;
struct in_ifaddr **ifap = NULL;
struct in_ifaddr *ifa = NULL;
@@ -959,22 +958,16 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
int ret = -EFAULT;
int tryaddrmatch = 0;
- /*
- * Fetch the caller's info block into kernel space
- */
-
- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
- goto out;
- ifr.ifr_name[IFNAMSIZ - 1] = 0;
+ ifr->ifr_name[IFNAMSIZ - 1] = 0;
/* save original address for comparison */
memcpy(&sin_orig, sin, sizeof(*sin));
- colon = strchr(ifr.ifr_name, ':');
+ colon = strchr(ifr->ifr_name, ':');
if (colon)
*colon = 0;
- dev_load(net, ifr.ifr_name);
+ dev_load(net, ifr->ifr_name);
switch (cmd) {
case SIOCGIFADDR: /* Get interface address */
@@ -1014,7 +1007,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
rtnl_lock();
ret = -ENODEV;
- dev = __dev_get_by_name(net, ifr.ifr_name);
+ dev = __dev_get_by_name(net, ifr->ifr_name);
if (!dev)
goto done;
@@ -1031,7 +1024,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
This is checked above. */
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
- if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
+ if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
ifa->ifa_local) {
break; /* found */
@@ -1044,7 +1037,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ifa) {
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next)
- if (!strcmp(ifr.ifr_name, ifa->ifa_label))
+ if (!strcmp(ifr->ifr_name, ifa->ifa_label))
break;
}
}
@@ -1055,20 +1048,24 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
switch (cmd) {
case SIOCGIFADDR: /* Get interface address */
+ ret = 0;
sin->sin_addr.s_addr = ifa->ifa_local;
- goto rarok;
+ break;
case SIOCGIFBRDADDR: /* Get the broadcast address */
+ ret = 0;
sin->sin_addr.s_addr = ifa->ifa_broadcast;
- goto rarok;
+ break;
case SIOCGIFDSTADDR: /* Get the destination address */
+ ret = 0;
sin->sin_addr.s_addr = ifa->ifa_address;
- goto rarok;
+ break;
case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ ret = 0;
sin->sin_addr.s_addr = ifa->ifa_mask;
- goto rarok;
+ break;
case SIOCSIFFLAGS:
if (colon) {
@@ -1076,11 +1073,11 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ifa)
break;
ret = 0;
- if (!(ifr.ifr_flags & IFF_UP))
+ if (!(ifr->ifr_flags & IFF_UP))
inet_del_ifa(in_dev, ifap, 1);
break;
}
- ret = dev_change_flags(dev, ifr.ifr_flags);
+ ret = dev_change_flags(dev, ifr->ifr_flags);
break;
case SIOCSIFADDR: /* Set interface address (and family) */
@@ -1095,7 +1092,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
break;
INIT_HLIST_NODE(&ifa->hash);
if (colon)
- memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
+ memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
@@ -1182,28 +1179,27 @@ done:
rtnl_unlock();
out:
return ret;
-rarok:
- rtnl_unlock();
- ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
- goto out;
}
-static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
+static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
{
struct in_device *in_dev = __in_dev_get_rtnl(dev);
struct in_ifaddr *ifa;
struct ifreq ifr;
int done = 0;
+ if (WARN_ON(size > sizeof(struct ifreq)))
+ goto out;
+
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (!buf) {
- done += sizeof(ifr);
+ done += size;
continue;
}
- if (len < (int) sizeof(ifr))
+ if (len < size)
break;
memset(&ifr, 0, sizeof(struct ifreq));
strcpy(ifr.ifr_name, ifa->ifa_label);
@@ -1212,13 +1208,12 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
ifa->ifa_local;
- if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
+ if (copy_to_user(buf + done, &ifr, size)) {
done = -EFAULT;
break;
}
- buf += sizeof(struct ifreq);
- len -= sizeof(struct ifreq);
- done += sizeof(struct ifreq);
+ len -= size;
+ done += size;
}
out:
return done;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d57aa64fa7c7..296d0b956bfe 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
- struct dst_entry *dst = skb_dst(skb);
- struct xfrm_state *x = dst->xfrm;
+ struct xfrm_state *x;
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME))
+ x = skb->sp->xvec[skb->sp->len - 1];
+ else
+ x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
- xfrm_output_resume(skb, err);
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ secpath_reset(skb);
+ xfrm_dev_resume(skb);
+ } else {
+ xfrm_output_resume(skb, err);
+ }
}
/* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- u32 mask = 0;
err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error;
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(aead_name, 0, mask);
+ aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
- u32 mask = 0;
err = -EINVAL;
if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
}
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(authenc_name, 0, mask);
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -981,6 +991,7 @@ static int esp_init_state(struct xfrm_state *x)
switch (encap->encap_type) {
default:
+ err = -EINVAL;
goto error;
case UDP_ENCAP_ESPINUDP:
x->props.header_len += sizeof(struct udphdr);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index f8b918c766b0..da5635fc52c2 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
__be32 spi;
int err;
- skb_pull(skb, offset);
+ if (!pskb_pull(skb, offset))
+ return NULL;
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out;
@@ -108,75 +109,39 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- __u32 seq;
- int err = 0;
- struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
- struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
- goto out;
+ return ERR_PTR(-EINVAL);
- seq = xo->seq.low;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+ return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
- goto out;
+ return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
- goto out;
+ return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP))
+ if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+ (x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- segs = x->outer_mode->gso_segment(x, skb, esp_features);
- if (IS_ERR_OR_NULL(segs))
- goto out;
-
- __skb_pull(skb, skb->data - skb_mac_header(skb));
-
- skb2 = segs;
- do {
- struct sk_buff *nskb = skb2->next;
-
- xo = xfrm_offload(skb2);
- xo->flags |= XFRM_GSO_SEGMENT;
- xo->seq.low = seq;
- xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
- if(!(features & NETIF_F_HW_ESP))
- xo->flags |= CRYPTO_FALLBACK;
-
- x->outer_mode->xmit(x, skb2);
-
- err = x->type_offload->xmit(x, skb2, esp_features);
- if (err) {
- kfree_skb_list(segs);
- return ERR_PTR(err);
- }
-
- if (!skb_is_gso(skb2))
- seq++;
- else
- seq += skb_shinfo(skb2)->gso_segs;
-
- skb_push(skb2, skb2->mac_len);
- skb2 = nskb;
- } while (skb2);
+ xo->flags |= XFRM_GSO_SEGMENT;
-out:
- return segs;
+ return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +168,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
+ __u32 seq;
esp.inplace = true;
@@ -241,23 +207,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
return esp.nfrags;
}
+ seq = xo->seq.low;
+
esph = esp.esph;
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
- esph->seq_no = htonl(xo->seq.low);
- } else {
- ip_hdr(skb)->tot_len = htons(skb->len);
- ip_send_check(ip_hdr(skb));
+ esph->seq_no = htonl(seq);
+
+ if (!skb_is_gso(skb))
+ xo->seq.low++;
+ else
+ xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+ ip_hdr(skb)->tot_len = htons(skb->len);
+ ip_send_check(ip_hdr(skb));
+
if (hw_offload)
return 0;
- esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
err = esp_output_tail(x, skb, &esp);
if (err)
return err;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 08259d078b1c..f05afaf3235c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -587,10 +587,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
* Handle IP routing ioctl calls.
* These are used to manipulate the routing tables
*/
-int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt)
{
struct fib_config cfg;
- struct rtentry rt;
int err;
switch (cmd) {
@@ -599,11 +598,8 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&rt, arg, sizeof(rt)))
- return -EFAULT;
-
rtnl_lock();
- err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
+ err = rtentry_to_fib_config(net, cmd, rt, &cfg);
if (err == 0) {
struct fib_table *tb;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5ddc4aefff12..5530cd6fdbc7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2334,7 +2334,6 @@ static int fib_triestat_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations fib_triestat_fops = {
- .owner = THIS_MODULE,
.open = fib_triestat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2521,7 +2520,6 @@ static int fib_trie_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations fib_trie_fops = {
- .owner = THIS_MODULE,
.open = fib_trie_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2715,7 +2713,6 @@ static int fib_route_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations fib_route_fops = {
- .owner = THIS_MODULE,
.open = fib_route_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 726f6b608274..10f7f74a0831 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
return htonl(INADDR_ANY);
for_ifa(in_dev) {
- if (inet_ifa_match(fl4->saddr, ifa))
+ if (fl4->saddr == ifa->ifa_local)
return fl4->saddr;
} endfor_ifa(in_dev);
@@ -2832,7 +2832,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations igmp_mc_seq_fops = {
- .owner = THIS_MODULE,
.open = igmp_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2979,7 +2978,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations igmp_mcf_seq_fops = {
- .owner = THIS_MODULE,
.open = igmp_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4ca46dc08e63..12410ec6f7f7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
int max_retries, thresh;
u8 defer_accept;
- if (sk_state_load(sk_listener) != TCP_LISTEN)
+ if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
goto drop;
max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
- newsk->sk_state = TCP_SYN_RECV;
+ inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
- sk_state_store(sk, TCP_LISTEN);
+ inet_sk_state_store(sk, TCP_LISTEN);
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
return 0;
}
- sk->sk_state = TCP_CLOSE;
+ inet_sk_set_state(sk, TCP_CLOSE);
return err;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c9c35b61a027..a383f299ce24 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -564,12 +564,18 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
case INET_DIAG_BC_JMP:
yes = 0;
break;
+ case INET_DIAG_BC_S_EQ:
+ yes = entry->sport == op[1].no;
+ break;
case INET_DIAG_BC_S_GE:
yes = entry->sport >= op[1].no;
break;
case INET_DIAG_BC_S_LE:
yes = entry->sport <= op[1].no;
break;
+ case INET_DIAG_BC_D_EQ:
+ yes = entry->dport == op[1].no;
+ break;
case INET_DIAG_BC_D_GE:
yes = entry->dport >= op[1].no;
break;
@@ -802,8 +808,10 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
if (!valid_devcond(bc, len, &min_len))
return -EINVAL;
break;
+ case INET_DIAG_BC_S_EQ:
case INET_DIAG_BC_S_GE:
case INET_DIAG_BC_S_LE:
+ case INET_DIAG_BC_D_EQ:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
if (!valid_port_comparison(bc, len, &min_len))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index e7d15fb0d94d..37b7da0b975d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
+#include <linux/bootmem.h>
#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
@@ -168,6 +169,60 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
}
EXPORT_SYMBOL_GPL(__inet_inherit_port);
+static struct inet_listen_hashbucket *
+inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
+{
+ u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(sock_net(sk),
+ &sk->sk_v6_rcv_saddr,
+ inet_sk(sk)->inet_num);
+ else
+#endif
+ hash = ipv4_portaddr_hash(sock_net(sk),
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_num);
+ return inet_lhash2_bucket(h, hash);
+}
+
+static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
+{
+ struct inet_listen_hashbucket *ilb2;
+
+ if (!h->lhash2)
+ return;
+
+ ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+ spin_lock(&ilb2->lock);
+ if (sk->sk_reuseport && sk->sk_family == AF_INET6)
+ hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+ &ilb2->head);
+ else
+ hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+ &ilb2->head);
+ ilb2->count++;
+ spin_unlock(&ilb2->lock);
+}
+
+static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
+{
+ struct inet_listen_hashbucket *ilb2;
+
+ if (!h->lhash2 ||
+ WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
+ return;
+
+ ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+ spin_lock(&ilb2->lock);
+ hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
+ ilb2->count--;
+ spin_unlock(&ilb2->lock);
+}
+
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
const int dif, const int sdif, bool exact_dif)
@@ -207,6 +262,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
*/
/* called with rcu_read_lock() : No refcount taken on the socket */
+static struct sock *inet_lhash2_lookup(struct net *net,
+ struct inet_listen_hashbucket *ilb2,
+ struct sk_buff *skb, int doff,
+ const __be32 saddr, __be16 sport,
+ const __be32 daddr, const unsigned short hnum,
+ const int dif, const int sdif)
+{
+ bool exact_dif = inet_exact_dif_match(net, skb);
+ struct inet_connection_sock *icsk;
+ struct sock *sk, *result = NULL;
+ int score, hiscore = 0;
+ u32 phash = 0;
+
+ inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+ sk = (struct sock *)icsk;
+ score = compute_score(sk, net, hnum, daddr,
+ dif, sdif, exact_dif);
+ if (score > hiscore) {
+ if (sk->sk_reuseport) {
+ phash = inet_ehashfn(net, daddr, hnum,
+ saddr, sport);
+ result = reuseport_select_sock(sk, phash,
+ skb, doff);
+ if (result)
+ return result;
+ }
+ result = sk;
+ hiscore = score;
+ }
+ }
+
+ return result;
+}
+
struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -216,32 +305,57 @@ struct sock *__inet_lookup_listener(struct net *net,
{
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
- int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet_exact_dif_match(net, skb);
+ struct inet_listen_hashbucket *ilb2;
struct sock *sk, *result = NULL;
+ int score, hiscore = 0;
+ unsigned int hash2;
u32 phash = 0;
+ if (ilb->count <= 10 || !hashinfo->lhash2)
+ goto port_lookup;
+
+ /* Too many sk in the ilb bucket (which is hashed by port alone).
+ * Try lhash2 (which is hashed by port and addr) instead.
+ */
+
+ hash2 = ipv4_portaddr_hash(net, daddr, hnum);
+ ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+ if (ilb2->count > ilb->count)
+ goto port_lookup;
+
+ result = inet_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+ if (result)
+ return result;
+
+ /* Lookup lhash2 with INADDR_ANY */
+
+ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+ ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+ if (ilb2->count > ilb->count)
+ goto port_lookup;
+
+ return inet_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+
+port_lookup:
sk_for_each_rcu(sk, &ilb->head) {
score = compute_score(sk, net, hnum, daddr,
dif, sdif, exact_dif);
if (score > hiscore) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
phash = inet_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, phash,
skb, doff);
if (result)
return result;
- matches = 1;
}
result = sk;
hiscore = score;
- } else if (score == hiscore && reuseport) {
- matches++;
- if (reciprocal_scale(phash, matches) == 0)
- result = sk;
- phash = next_pseudo_random32(phash);
}
}
return result;
@@ -430,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
percpu_counter_inc(sk->sk_prot->orphan_count);
- sk->sk_state = TCP_CLOSE;
+ inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);
}
@@ -483,6 +597,8 @@ int __inet_hash(struct sock *sk, struct sock *osk)
hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
else
hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+ inet_hash2(hashinfo, sk);
+ ilb->count++;
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
@@ -509,28 +625,35 @@ EXPORT_SYMBOL_GPL(inet_hash);
void inet_unhash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ struct inet_listen_hashbucket *ilb;
spinlock_t *lock;
bool listener = false;
- int done;
if (sk_unhashed(sk))
return;
if (sk->sk_state == TCP_LISTEN) {
- lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
+ ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+ lock = &ilb->lock;
listener = true;
} else {
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
}
spin_lock_bh(lock);
+ if (sk_unhashed(sk))
+ goto unlock;
+
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
- if (listener)
- done = __sk_del_node_init(sk);
- else
- done = __sk_nulls_del_node_init_rcu(sk);
- if (done)
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ if (listener) {
+ inet_unhash2(hashinfo, sk);
+ __sk_del_node_init(sk);
+ ilb->count--;
+ } else {
+ __sk_nulls_del_node_init_rcu(sk);
+ }
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+unlock:
spin_unlock_bh(lock);
}
EXPORT_SYMBOL_GPL(inet_unhash);
@@ -665,10 +788,37 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_HEAD(&h->listening_hash[i].head);
+ h->listening_hash[i].count = 0;
}
+
+ h->lhash2 = NULL;
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
+void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+ unsigned long numentries, int scale,
+ unsigned long low_limit,
+ unsigned long high_limit)
+{
+ unsigned int i;
+
+ h->lhash2 = alloc_large_system_hash(name,
+ sizeof(*h->lhash2),
+ numentries,
+ scale,
+ 0,
+ NULL,
+ &h->lhash2_mask,
+ low_limit,
+ high_limit);
+
+ for (i = 0; i <= h->lhash2_mask; i++) {
+ spin_lock_init(&h->lhash2[i].lock);
+ INIT_HLIST_HEAD(&h->lhash2[i].head);
+ h->lhash2[i].count = 0;
+ }
+}
+
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
unsigned int locksz = sizeof(spinlock_t);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index b563e0c46bac..c3ea4906d237 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -97,7 +97,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
*/
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo)
{
const struct inet_sock *inet = inet_sk(sk);
@@ -119,18 +119,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
- /*
- * Step 2: Hash TW into tcp ehash chain.
- * Notes :
- * - tw_refcnt is set to 4 because :
- * - We have one reference from bhash chain.
- * - We have one reference from ehash chain.
- * - We have one reference from timer.
- * - One reference for ourself (our caller will release it).
- * We can use atomic_set() because prior spin_lock()/spin_unlock()
- * committed into memory all tw fields.
- */
- refcount_set(&tw->tw_refcnt, 4);
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
@@ -138,8 +126,19 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock(lock);
+
+ /* tw_refcnt is set to 3 because we have :
+ * - one reference for bhash chain.
+ * - one reference for ehash chain.
+ * - one reference for timer.
+ * We can use atomic_set() because prior spin_lock()/spin_unlock()
+ * committed into memory all tw fields.
+ * Also note that after this point, we lost our implicit reference
+ * so we are not allowed to use tw anymore.
+ */
+ refcount_set(&tw->tw_refcnt, 3);
}
-EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
static void tw_timer_handler(struct timer_list *t)
{
@@ -271,14 +270,14 @@ restart:
continue;
tw = inet_twsk(sk);
if ((tw->tw_family != family) ||
- atomic_read(&twsk_net(tw)->count))
+ refcount_read(&twsk_net(tw)->count))
continue;
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
continue;
if (unlikely((tw->tw_family != family) ||
- atomic_read(&twsk_net(tw)->count))) {
+ refcount_read(&twsk_net(tw)->count))) {
inet_twsk_put(tw);
goto restart;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 45ffd3d045d2..6ec670fbbbdd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -114,7 +114,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static struct rtnl_link_ops ipgre_link_ops __read_mostly;
static int ipgre_tunnel_init(struct net_device *dev);
static void erspan_build_header(struct sk_buff *skb,
- __be32 id, u32 index, bool truncate);
+ u32 id, u32 index,
+ bool truncate, bool is_ipv4);
static unsigned int ipgre_net_id __read_mostly;
static unsigned int gre_tap_net_id __read_mostly;
@@ -255,34 +256,43 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
{
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
+ struct erspan_base_hdr *ershdr;
+ struct erspan_metadata *pkt_md;
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
- struct erspanhdr *ershdr;
const struct iphdr *iph;
- __be32 index;
+ int ver;
int len;
itn = net_generic(net, erspan_net_id);
len = gre_hdr_len + sizeof(*ershdr);
+ /* Check based hdr len */
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
iph = ip_hdr(skb);
- ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
+ ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+ ver = ershdr->ver;
/* The original GRE header does not have key field,
* Use ERSPAN 10-bit session ID as key.
*/
- tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
- index = ershdr->md.index;
+ tpi->key = cpu_to_be32(get_session_id(ershdr));
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
+ len = gre_hdr_len + erspan_hdr_len(ver);
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return PACKET_REJECT;
+
+ ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+ pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
if (__iptunnel_pull_header(skb,
- gre_hdr_len + sizeof(*ershdr),
+ len,
htons(ETH_P_TEB),
false, false) < 0)
goto drop;
@@ -303,15 +313,21 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
return PACKET_REJECT;
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
- if (!md)
- return PACKET_REJECT;
+ memcpy(md, pkt_md, sizeof(*md));
+ md->version = ver;
- md->index = index;
info = &tun_dst->u.tun_info;
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
info->options_len = sizeof(*md);
} else {
- tunnel->index = ntohl(index);
+ tunnel->erspan_ver = ver;
+ if (ver == 1) {
+ tunnel->index = ntohl(pkt_md->u.index);
+ } else {
+ tunnel->dir = pkt_md->u.md2.dir;
+ tunnel->hwid = get_hwid(&pkt_md->u.md2);
+ }
+
}
skb_reset_mac_header(skb);
@@ -405,14 +421,17 @@ static int gre_rcv(struct sk_buff *skb)
if (hdr_len < 0)
goto drop;
- if (unlikely(tpi.proto == htons(ETH_P_ERSPAN))) {
+ if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+ tpi.proto == htons(ETH_P_ERSPAN2))) {
if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
+ goto out;
}
if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
+out:
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
@@ -560,6 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
bool truncate = false;
struct flowi4 fl;
int tunnel_hlen;
+ int version;
__be16 df;
tun_info = skb_tunnel_info(skb);
@@ -568,9 +588,13 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
goto err_free_skb;
key = &tun_info->key;
+ md = ip_tunnel_info_opts(tun_info);
+ if (!md)
+ goto err_free_rt;
/* ERSPAN has fixed 8 byte GRE header */
- tunnel_hlen = 8 + sizeof(struct erspanhdr);
+ version = md->version;
+ tunnel_hlen = 8 + erspan_hdr_len(version);
rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
if (!rt)
@@ -584,12 +608,18 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
truncate = true;
}
- md = ip_tunnel_info_opts(tun_info);
- if (!md)
+ if (version == 1) {
+ erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
+ ntohl(md->u.index), truncate, true);
+ } else if (version == 2) {
+ erspan_build_header_v2(skb,
+ ntohl(tunnel_id_to_key32(key->tun_id)),
+ md->u.md2.dir,
+ get_hwid(&md->u.md2),
+ truncate, true);
+ } else {
goto err_free_rt;
-
- erspan_build_header(skb, tunnel_id_to_key32(key->tun_id),
- ntohl(md->index), truncate);
+ }
gre_build_header(skb, 8, TUNNEL_SEQ,
htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
@@ -668,52 +698,6 @@ free_skb:
return NETDEV_TX_OK;
}
-static inline u8 tos_to_cos(u8 tos)
-{
- u8 dscp, cos;
-
- dscp = tos >> 2;
- cos = dscp >> 3;
- return cos;
-}
-
-static void erspan_build_header(struct sk_buff *skb,
- __be32 id, u32 index, bool truncate)
-{
- struct iphdr *iphdr = ip_hdr(skb);
- struct ethhdr *eth = eth_hdr(skb);
- enum erspan_encap_type enc_type;
- struct erspanhdr *ershdr;
- struct qtag_prefix {
- __be16 eth_type;
- __be16 tci;
- } *qp;
- u16 vlan_tci = 0;
-
- enc_type = ERSPAN_ENCAP_NOVLAN;
-
- /* If mirrored packet has vlan tag, extract tci and
- * perserve vlan header in the mirrored frame.
- */
- if (eth->h_proto == htons(ETH_P_8021Q)) {
- qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
- vlan_tci = ntohs(qp->tci);
- enc_type = ERSPAN_ENCAP_INFRAME;
- }
-
- skb_push(skb, sizeof(*ershdr));
- ershdr = (struct erspanhdr *)skb->data;
- memset(ershdr, 0, sizeof(*ershdr));
-
- ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
- (ERSPAN_VERSION << VER_OFFSET));
- ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) |
- ((tos_to_cos(iphdr->tos) << COS_OFFSET) & COS_MASK) |
- (enc_type << EN_OFFSET & EN_MASK) |
- ((truncate << T_OFFSET) & T_MASK));
- ershdr->md.index = htonl(index & INDEX_MASK);
-}
-
static netdev_tx_t erspan_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -737,7 +721,15 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
}
/* Push ERSPAN header */
- erspan_build_header(skb, tunnel->parms.o_key, tunnel->index, truncate);
+ if (tunnel->erspan_ver == 1)
+ erspan_build_header(skb, ntohl(tunnel->parms.o_key),
+ tunnel->index,
+ truncate, true);
+ else
+ erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
+ tunnel->dir, tunnel->hwid,
+ truncate, true);
+
tunnel->parms.o_flags &= ~TUNNEL_KEY;
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
return NETDEV_TX_OK;
@@ -1209,13 +1201,32 @@ static int ipgre_netlink_parms(struct net_device *dev,
if (data[IFLA_GRE_FWMARK])
*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
- if (data[IFLA_GRE_ERSPAN_INDEX]) {
- t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ if (data[IFLA_GRE_ERSPAN_VER]) {
+ t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
- if (t->index & ~INDEX_MASK)
+ if (t->erspan_ver != 1 && t->erspan_ver != 2)
return -EINVAL;
}
+ if (t->erspan_ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX]) {
+ t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ if (t->index & ~INDEX_MASK)
+ return -EINVAL;
+ }
+ } else if (t->erspan_ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR]) {
+ t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+ if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
+ return -EINVAL;
+ }
+ if (data[IFLA_GRE_ERSPAN_HWID]) {
+ t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+ if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1282,7 +1293,7 @@ static int erspan_tunnel_init(struct net_device *dev)
tunnel->tun_hlen = 8;
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
- sizeof(struct erspanhdr);
+ erspan_hdr_len(tunnel->erspan_ver);
t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
@@ -1413,6 +1424,12 @@ static size_t ipgre_get_size(const struct net_device *dev)
nla_total_size(4) +
/* IFLA_GRE_ERSPAN_INDEX */
nla_total_size(4) +
+ /* IFLA_GRE_ERSPAN_VER */
+ nla_total_size(1) +
+ /* IFLA_GRE_ERSPAN_DIR */
+ nla_total_size(1) +
+ /* IFLA_GRE_ERSPAN_HWID */
+ nla_total_size(2) +
0;
}
@@ -1455,9 +1472,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
}
- if (t->index)
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+ goto nla_put_failure;
+
+ if (t->erspan_ver == 1) {
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
goto nla_put_failure;
+ } else if (t->erspan_ver == 2) {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+ goto nla_put_failure;
+ }
return 0;
@@ -1493,6 +1519,9 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
[IFLA_GRE_FWMARK] = { .type = NLA_U32 },
[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
+ [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
+ [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 60fb1eb7d7d8..6cc70fa488cb 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -808,6 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
{
struct net_device *dev = NULL;
int ifindex;
+ int midx;
if (optlen != sizeof(int))
goto e_inval;
@@ -823,10 +824,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -EADDRNOTAVAIL;
if (!dev)
break;
+
+ midx = l3mdev_master_ifindex(dev);
dev_put(dev);
err = -EINVAL;
- if (sk->sk_bound_dev_if)
+ if (sk->sk_bound_dev_if &&
+ (!midx || midx != sk->sk_bound_dev_if))
break;
inet->uc_index = ifindex;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 5ddb1cb52bd4..d786a8441bce 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
@@ -711,9 +710,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
- init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
- tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
- tunnel->fwmark);
+ if (tunnel->fwmark) {
+ init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+ tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+ tunnel->fwmark);
+ }
+ else {
+ init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
+ tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+ skb->mark);
+ }
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 949f432a5f04..51b1669334fe 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index abdebca848c9..f75802ad960f 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -329,39 +329,6 @@ set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
sin->sin_port = port;
}
-static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
-{
- int res;
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
- set_fs(oldfs);
- return res;
-}
-
-static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
-{
- int res;
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
- set_fs(oldfs);
- return res;
-}
-
-static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
-{
- int res;
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
- set_fs(oldfs);
- return res;
-}
-
/*
* Set up interface addresses and routes.
*/
@@ -375,19 +342,19 @@ static int __init ic_setup_if(void)
memset(&ir, 0, sizeof(ir));
strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->dev->name);
set_sockaddr(sin, ic_myaddr, 0);
- if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
+ if ((err = devinet_ioctl(&init_net, SIOCSIFADDR, &ir)) < 0) {
pr_err("IP-Config: Unable to set interface address (%d)\n",
err);
return -1;
}
set_sockaddr(sin, ic_netmask, 0);
- if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
+ if ((err = devinet_ioctl(&init_net, SIOCSIFNETMASK, &ir)) < 0) {
pr_err("IP-Config: Unable to set interface netmask (%d)\n",
err);
return -1;
}
set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
- if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
+ if ((err = devinet_ioctl(&init_net, SIOCSIFBRDADDR, &ir)) < 0) {
pr_err("IP-Config: Unable to set interface broadcast address (%d)\n",
err);
return -1;
@@ -397,11 +364,11 @@ static int __init ic_setup_if(void)
* out, we'll try to muddle along.
*/
if (ic_dev_mtu != 0) {
- strcpy(ir.ifr_name, ic_dev->dev->name);
- ir.ifr_mtu = ic_dev_mtu;
- if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
+ rtnl_lock();
+ if ((err = dev_set_mtu(ic_dev->dev, ic_dev_mtu)) < 0)
pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
ic_dev_mtu, err);
+ rtnl_unlock();
}
return 0;
}
@@ -423,7 +390,7 @@ static int __init ic_setup_routes(void)
set_sockaddr((struct sockaddr_in *) &rm.rt_genmask, 0, 0);
set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0);
rm.rt_flags = RTF_UP | RTF_GATEWAY;
- if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) {
+ if ((err = ip_rt_ioctl(&init_net, SIOCADDRT, &rm)) < 0) {
pr_err("IP-Config: Cannot add default route (%d)\n",
err);
return -1;
@@ -1322,7 +1289,6 @@ static int pnp_seq_open(struct inode *indoe, struct file *file)
}
static const struct file_operations pnp_seq_fops = {
- .owner = THIS_MODULE,
.open = pnp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index fd5f19c988e4..b05689bbba31 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -3022,7 +3022,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
const char *name = vif->dev ? vif->dev->name : "none";
seq_printf(seq,
- "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
+ "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
vif - mrt->vif_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
@@ -3045,7 +3045,6 @@ static int ipmr_vif_open(struct inode *inode, struct file *file)
}
static const struct file_operations ipmr_vif_fops = {
- .owner = THIS_MODULE,
.open = ipmr_vif_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -3198,7 +3197,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
}
static const struct file_operations ipmr_mfc_fops = {
- .owner = THIS_MODULE,
.open = ipmr_mfc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c0cc6aa8cfaa..e6774ccb7731 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -80,35 +80,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
}
EXPORT_SYMBOL(ip_route_me_harder);
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip_rt_info {
- __be32 daddr;
- __be32 saddr;
- u_int8_t tos;
- u_int32_t mark;
-};
-
-static void nf_ip_saveroute(const struct sk_buff *skb,
- struct nf_queue_entry *entry)
-{
- struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- rt_info->tos = iph->tos;
- rt_info->daddr = iph->daddr;
- rt_info->saddr = iph->saddr;
- rt_info->mark = skb->mark;
- }
-}
-
-static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
- const struct nf_queue_entry *entry)
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
{
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -119,10 +91,12 @@ static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
- return ip_route_me_harder(net, skb, RTN_UNSPEC);
+ return ip_route_me_harder(entry->state.net, skb,
+ RTN_UNSPEC);
}
return 0;
}
+EXPORT_SYMBOL_GPL(nf_ip_reroute);
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol)
@@ -155,9 +129,9 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
}
EXPORT_SYMBOL(nf_ip_checksum);
-static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol)
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
@@ -175,9 +149,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
}
return csum;
}
+EXPORT_SYMBOL_GPL(nf_ip_checksum_partial);
-static int nf_ip_route(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict __always_unused)
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict __always_unused)
{
struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
if (IS_ERR(rt))
@@ -185,19 +160,4 @@ static int nf_ip_route(struct net *net, struct dst_entry **dst,
*dst = &rt->dst;
return 0;
}
-
-static const struct nf_afinfo nf_ip_afinfo = {
- .family = AF_INET,
- .checksum = nf_ip_checksum,
- .checksum_partial = nf_ip_checksum_partial,
- .route = nf_ip_route,
- .saveroute = nf_ip_saveroute,
- .reroute = nf_ip_reroute,
- .route_key_size = sizeof(struct ip_rt_info),
-};
-
-static int __init ipv4_netfilter_init(void)
-{
- return nf_register_afinfo(&nf_ip_afinfo);
-}
-subsys_initcall(ipv4_netfilter_init);
+EXPORT_SYMBOL_GPL(nf_ip_route);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index c11eb1744ab1..5f52236780b4 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -72,11 +72,21 @@ endif # NF_TABLES_IPV4
config NF_TABLES_ARP
tristate "ARP nf_tables support"
+ select NETFILTER_FAMILY_ARP
help
This option enables the ARP support for nf_tables.
endif # NF_TABLES
+config NF_FLOW_TABLE_IPV4
+ tristate "Netfilter flow table IPv4 module"
+ depends on NF_CONNTRACK && NF_TABLES
+ select NF_FLOW_TABLE
+ help
+ This option adds the flow table IPv4 support.
+
+ To compile it as a module, choose M here.
+
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -148,6 +158,7 @@ config NF_NAT_SNMP_BASIC
depends on NF_CONNTRACK_SNMP
depends on NETFILTER_ADVANCED
default NF_NAT && NF_CONNTRACK_SNMP
+ select ASN1
---help---
This module implements an Application Layer Gateway (ALG) for
@@ -333,6 +344,7 @@ config IP_NF_TARGET_CLUSTERIP
depends on NF_CONNTRACK_IPV4
depends on NETFILTER_ADVANCED
select NF_CONNTRACK_MARK
+ select NETFILTER_FAMILY_ARP
help
The CLUSTERIP target allows you to build load-balancing clusters of
network servers without having a dedicated load-balancing
@@ -392,6 +404,7 @@ endif # IP_NF_IPTABLES
config IP_NF_ARPTABLES
tristate "ARP tables support"
select NETFILTER_XTABLES
+ select NETFILTER_FAMILY_ARP
depends on NETFILTER_ADVANCED
help
arptables is a general, extensible packet identification framework.
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index adcdae358365..2dad20eefd26 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -27,9 +27,15 @@ obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o
# NAT helpers (nf_conntrack)
obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
+
+nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o
+nf_nat_snmp_basic-y : nf_nat_snmp_basic-asn1.h nf_nat_snmp_basic-asn1.c
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
+clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h
+
obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
+
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
@@ -43,6 +49,9 @@ obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
+
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 0c3c944a7b72..4ffe302f9b82 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -202,13 +202,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
@@ -810,9 +805,8 @@ static int get_info(struct net *net, void __user *user,
if (compat)
xt_compat_lock(NFPROTO_ARP);
#endif
- t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
- "arptable_%s", name);
- if (t) {
+ t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+ if (!IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
@@ -841,7 +835,7 @@ static int get_info(struct net *net, void __user *user,
xt_table_unlock(t);
module_put(t->me);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_unlock(NFPROTO_ARP);
@@ -866,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
if (get.size == private->size)
@@ -878,7 +872,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
return ret;
}
@@ -903,10 +897,9 @@ static int __do_replace(struct net *net, const char *name,
goto out;
}
- t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
- "arptable_%s", name);
- if (!t) {
- ret = -ENOENT;
+ t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
@@ -1020,8 +1013,8 @@ static int do_add_counters(struct net *net, const void __user *user,
return PTR_ERR(paddc);
t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
- if (!t) {
- ret = -ENOENT;
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free;
}
@@ -1408,7 +1401,7 @@ static int compat_get_entries(struct net *net,
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
@@ -1423,7 +1416,7 @@ static int compat_get_entries(struct net *net,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
xt_compat_unlock(NFPROTO_ARP);
return ret;
@@ -1658,7 +1651,6 @@ static int __init arp_tables_init(void)
if (ret < 0)
goto err4;
- pr_info("arp_tables: (C) 2002 David S. Miller\n");
return 0;
err4:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2e0d339028bb..9a71f3149507 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -260,13 +260,8 @@ ipt_do_table(struct sk_buff *skb,
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
@@ -973,9 +968,8 @@ static int get_info(struct net *net, void __user *user,
if (compat)
xt_compat_lock(AF_INET);
#endif
- t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
- "iptable_%s", name);
- if (t) {
+ t = xt_request_find_table_lock(net, AF_INET, name);
+ if (!IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
@@ -1005,7 +999,7 @@ static int get_info(struct net *net, void __user *user,
xt_table_unlock(t);
module_put(t->me);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_unlock(AF_INET);
@@ -1030,7 +1024,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
@@ -1041,7 +1035,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
return ret;
}
@@ -1064,10 +1058,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
goto out;
}
- t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
- "iptable_%s", name);
- if (!t) {
- ret = -ENOENT;
+ t = xt_request_find_table_lock(net, AF_INET, name);
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
@@ -1181,8 +1174,8 @@ do_add_counters(struct net *net, const void __user *user,
return PTR_ERR(paddc);
t = xt_find_table_lock(net, AF_INET, tmp.name);
- if (!t) {
- ret = -ENOENT;
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free;
}
@@ -1625,7 +1618,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
@@ -1639,7 +1632,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
xt_compat_unlock(AF_INET);
return ret;
@@ -1941,7 +1934,6 @@ static int __init ip_tables_init(void)
if (ret < 0)
goto err5;
- pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 69060e3abe85..c29a6ca6c6d6 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -776,7 +776,6 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
}
static const struct file_operations clusterip_proc_fops = {
- .owner = THIS_MODULE,
.open = clusterip_proc_open,
.read = seq_read,
.write = clusterip_proc_write,
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 7667f223d7f8..9ac92ea7b93c 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -38,12 +38,6 @@ static unsigned int
iptable_filter_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (state->hook == NF_INET_LOCAL_OUT &&
- (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)))
- /* root is playing with raw sockets. */
- return NF_ACCEPT;
-
return ipt_do_table(skb, state, state->net->ipv4.iptable_filter);
}
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index aebdb337fd7e..dea138ca8925 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -49,11 +49,6 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
u_int32_t mark;
int err;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
/* Save things which could affect route */
mark = skb->mark;
iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a1a07b338ccf..0f7255cc65ee 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -72,6 +72,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
.hook = iptable_nat_ipv4_in,
.pf = NFPROTO_IPV4,
+ .nat_hook = true,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
@@ -79,6 +80,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
.hook = iptable_nat_ipv4_out,
.pf = NFPROTO_IPV4,
+ .nat_hook = true,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
@@ -86,6 +88,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
.hook = iptable_nat_ipv4_local_fn,
.pf = NFPROTO_IPV4,
+ .nat_hook = true,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
@@ -93,6 +96,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
{
.hook = iptable_nat_ipv4_fn,
.pf = NFPROTO_IPV4,
+ .nat_hook = true,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 2642ecd2645c..960625aabf04 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/slab.h>
@@ -12,6 +13,10 @@
static int __net_init iptable_raw_table_init(struct net *net);
+static bool raw_before_defrag __read_mostly;
+MODULE_PARM_DESC(raw_before_defrag, "Enable raw table before defrag");
+module_param(raw_before_defrag, bool, 0000);
+
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
@@ -21,17 +26,20 @@ static const struct xt_table packet_raw = {
.table_init = iptable_raw_table_init,
};
+static const struct xt_table packet_raw_before_defrag = {
+ .name = "raw",
+ .valid_hooks = RAW_VALID_HOOKS,
+ .me = THIS_MODULE,
+ .af = NFPROTO_IPV4,
+ .priority = NF_IP_PRI_RAW_BEFORE_DEFRAG,
+ .table_init = iptable_raw_table_init,
+};
+
/* The work comes in here from netfilter.c. */
static unsigned int
iptable_raw_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (state->hook == NF_INET_LOCAL_OUT &&
- (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)))
- /* root is playing with raw sockets. */
- return NF_ACCEPT;
-
return ipt_do_table(skb, state, state->net->ipv4.iptable_raw);
}
@@ -40,15 +48,19 @@ static struct nf_hook_ops *rawtable_ops __read_mostly;
static int __net_init iptable_raw_table_init(struct net *net)
{
struct ipt_replace *repl;
+ const struct xt_table *table = &packet_raw;
int ret;
+ if (raw_before_defrag)
+ table = &packet_raw_before_defrag;
+
if (net->ipv4.iptable_raw)
return 0;
- repl = ipt_alloc_initial_table(&packet_raw);
+ repl = ipt_alloc_initial_table(table);
if (repl == NULL)
return -ENOMEM;
- ret = ipt_register_table(net, &packet_raw, repl, rawtable_ops,
+ ret = ipt_register_table(net, table, repl, rawtable_ops,
&net->ipv4.iptable_raw);
kfree(repl);
return ret;
@@ -69,8 +81,15 @@ static struct pernet_operations iptable_raw_net_ops = {
static int __init iptable_raw_init(void)
{
int ret;
+ const struct xt_table *table = &packet_raw;
+
+ if (raw_before_defrag) {
+ table = &packet_raw_before_defrag;
+
+ pr_info("Enabling raw table before defrag\n");
+ }
- rawtable_ops = xt_hook_ops_alloc(&packet_raw, iptable_raw_hook);
+ rawtable_ops = xt_hook_ops_alloc(table, iptable_raw_hook);
if (IS_ERR(rawtable_ops))
return PTR_ERR(rawtable_ops);
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index ff226596e4b5..e5379fe57b64 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -43,12 +43,6 @@ static unsigned int
iptable_security_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (state->hook == NF_INET_LOCAL_OUT &&
- (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)))
- /* Somebody is playing with raw sockets. */
- return NF_ACCEPT;
-
return ipt_do_table(skb, state, state->net->ipv4.iptable_security);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 89af9d88ca21..de213a397ea8 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -154,11 +154,6 @@ static unsigned int ipv4_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
return NF_ACCEPT;
@@ -368,7 +363,7 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
-static struct nf_conntrack_l4proto *builtin_l4proto4[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = {
&nf_conntrack_l4proto_tcp4,
&nf_conntrack_l4proto_udp4,
&nf_conntrack_l4proto_icmp,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1849fedd9b81..5c15beafa711 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -22,7 +22,7 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
-static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmp_timeout = 30*HZ;
static inline struct nf_icmp_net *icmp_pernet(struct net *net)
{
@@ -351,7 +351,7 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.icmp.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_ICMP,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 37fe1616ca0b..a0d3ad60a411 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -78,6 +78,8 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
return NF_ACCEPT;
#endif
+ if (skb->_nfct == IP_CT_UNTRACKED)
+ return NF_ACCEPT;
#endif
/* Gather fragments. */
if (ip_is_fragment(ip_hdr(skb))) {
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
new file mode 100644
index 000000000000..b2d01eb25f2c
--- /dev/null
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -0,0 +1,284 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
+ __be32 addr, __be32 new_addr)
+{
+ struct tcphdr *tcph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ return -1;
+
+ tcph = (void *)(skb_network_header(skb) + thoff);
+ inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
+
+ return 0;
+}
+
+static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
+ __be32 addr, __be32 new_addr)
+{
+ struct udphdr *udph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ return -1;
+
+ udph = (void *)(skb_network_header(skb) + thoff);
+ if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&udph->check, skb, addr,
+ new_addr, true);
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+ return 0;
+}
+
+static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int thoff, __be32 addr,
+ __be32 new_addr)
+{
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
+ return NF_DROP;
+ break;
+ case IPPROTO_UDP:
+ if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
+ return NF_DROP;
+ break;
+ }
+
+ return 0;
+}
+
+static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+ struct iphdr *iph, unsigned int thoff,
+ enum flow_offload_tuple_dir dir)
+{
+ __be32 addr, new_addr;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = iph->saddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
+ iph->saddr = new_addr;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = iph->daddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
+ iph->daddr = new_addr;
+ break;
+ default:
+ return -1;
+ }
+ csum_replace4(&iph->check, addr, new_addr);
+
+ return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+ struct iphdr *iph, unsigned int thoff,
+ enum flow_offload_tuple_dir dir)
+{
+ __be32 addr, new_addr;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = iph->daddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
+ iph->daddr = new_addr;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = iph->saddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
+ iph->saddr = new_addr;
+ break;
+ default:
+ return -1;
+ }
+
+ return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+ enum flow_offload_tuple_dir dir)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ unsigned int thoff = iph->ihl * 4;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+ nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
+ return -1;
+ if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+ nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
+ return -1;
+
+ return 0;
+}
+
+static bool ip_has_options(unsigned int thoff)
+{
+ return thoff != sizeof(struct iphdr);
+}
+
+static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
+ struct flow_offload_tuple *tuple)
+{
+ struct flow_ports *ports;
+ unsigned int thoff;
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(skb);
+ thoff = iph->ihl * 4;
+
+ if (ip_is_fragment(iph) ||
+ unlikely(ip_has_options(thoff)))
+ return -1;
+
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return -1;
+
+ thoff = iph->ihl * 4;
+ if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+ return -1;
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+ tuple->src_v4.s_addr = iph->saddr;
+ tuple->dst_v4.s_addr = iph->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET;
+ tuple->l4proto = iph->protocol;
+ tuple->iifidx = dev->ifindex;
+
+ return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu)
+ return false;
+
+ if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+ return false;
+
+ return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt)
+{
+ u32 mtu;
+
+ mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
+ if (__nf_flow_exceeds_mtu(skb, mtu))
+ return true;
+
+ return false;
+}
+
+unsigned int
+nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct nf_flowtable *flow_table = priv;
+ struct flow_offload_tuple tuple = {};
+ enum flow_offload_tuple_dir dir;
+ struct flow_offload *flow;
+ struct net_device *outdev;
+ const struct rtable *rt;
+ struct iphdr *iph;
+ __be32 nexthop;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return NF_ACCEPT;
+
+ if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
+ return NF_ACCEPT;
+
+ tuplehash = flow_offload_lookup(flow_table, &tuple);
+ if (tuplehash == NULL)
+ return NF_ACCEPT;
+
+ outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+ if (!outdev)
+ return NF_ACCEPT;
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+ rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+ if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+ return NF_ACCEPT;
+
+ if (skb_try_make_writable(skb, sizeof(*iph)))
+ return NF_DROP;
+
+ if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+ nf_flow_nat_ip(flow, skb, dir) < 0)
+ return NF_DROP;
+
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ iph = ip_hdr(skb);
+ ip_decrease_ttl(iph);
+
+ skb->dev = outdev;
+ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+ neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+
+ return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
+
+static struct nf_flowtable_type flowtable_ipv4 = {
+ .family = NFPROTO_IPV4,
+ .params = &nf_flow_offload_rhash_params,
+ .gc = nf_flow_offload_work_gc,
+ .hook = nf_flow_offload_ip_hook,
+ .owner = THIS_MODULE,
+};
+
+static int __init nf_flow_ipv4_module_init(void)
+{
+ nft_register_flowtable_type(&flowtable_ipv4);
+
+ return 0;
+}
+
+static void __exit nf_flow_ipv4_module_exit(void)
+{
+ nft_unregister_flowtable_type(&flowtable_ipv4);
+}
+
+module_init(nf_flow_ipv4_module_init);
+module_exit(nf_flow_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 0443ca4120b0..f7ff6a364d7b 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -356,11 +356,6 @@ nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
#endif
unsigned int ret;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -396,11 +391,6 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
unsigned int ret;
int err;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.asn1 b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1
new file mode 100644
index 000000000000..24b73268f362
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.asn1
@@ -0,0 +1,177 @@
+Message ::=
+ SEQUENCE {
+ version
+ INTEGER ({snmp_version}),
+
+ community
+ OCTET STRING,
+
+ pdu
+ PDUs
+ }
+
+
+ObjectName ::=
+ OBJECT IDENTIFIER
+
+ObjectSyntax ::=
+ CHOICE {
+ simple
+ SimpleSyntax,
+
+ application-wide
+ ApplicationSyntax
+ }
+
+SimpleSyntax ::=
+ CHOICE {
+ integer-value
+ INTEGER,
+
+ string-value
+ OCTET STRING,
+
+ objectID-value
+ OBJECT IDENTIFIER
+ }
+
+ApplicationSyntax ::=
+ CHOICE {
+ ipAddress-value
+ IpAddress,
+
+ counter-value
+ Counter32,
+
+ timeticks-value
+ TimeTicks,
+
+ arbitrary-value
+ Opaque,
+
+ big-counter-value
+ Counter64,
+
+ unsigned-integer-value
+ Unsigned32
+ }
+
+IpAddress ::=
+ [APPLICATION 0]
+ IMPLICIT OCTET STRING OPTIONAL ({snmp_helper})
+
+Counter32 ::=
+ [APPLICATION 1]
+ IMPLICIT INTEGER OPTIONAL
+
+Unsigned32 ::=
+ [APPLICATION 2]
+ IMPLICIT INTEGER OPTIONAL
+
+Gauge32 ::= Unsigned32 OPTIONAL
+
+TimeTicks ::=
+ [APPLICATION 3]
+ IMPLICIT INTEGER OPTIONAL
+
+Opaque ::=
+ [APPLICATION 4]
+ IMPLICIT OCTET STRING OPTIONAL
+
+Counter64 ::=
+ [APPLICATION 6]
+ IMPLICIT INTEGER OPTIONAL
+
+PDUs ::=
+ CHOICE {
+ get-request
+ GetRequest-PDU,
+
+ get-next-request
+ GetNextRequest-PDU,
+
+ get-bulk-request
+ GetBulkRequest-PDU,
+
+ response
+ Response-PDU,
+
+ set-request
+ SetRequest-PDU,
+
+ inform-request
+ InformRequest-PDU,
+
+ snmpV2-trap
+ SNMPv2-Trap-PDU,
+
+ report
+ Report-PDU
+ }
+
+GetRequest-PDU ::=
+ [0] IMPLICIT PDU OPTIONAL
+
+GetNextRequest-PDU ::=
+ [1] IMPLICIT PDU OPTIONAL
+
+Response-PDU ::=
+ [2] IMPLICIT PDU OPTIONAL
+
+SetRequest-PDU ::=
+ [3] IMPLICIT PDU OPTIONAL
+
+-- [4] is obsolete
+
+GetBulkRequest-PDU ::=
+ [5] IMPLICIT PDU OPTIONAL
+
+InformRequest-PDU ::=
+ [6] IMPLICIT PDU OPTIONAL
+
+SNMPv2-Trap-PDU ::=
+ [7] IMPLICIT PDU OPTIONAL
+
+Report-PDU ::=
+ [8] IMPLICIT PDU OPTIONAL
+
+PDU ::=
+ SEQUENCE {
+ request-id
+ INTEGER,
+
+ error-status
+ INTEGER,
+
+ error-index
+ INTEGER,
+
+ variable-bindings
+ VarBindList
+ }
+
+
+VarBind ::=
+ SEQUENCE {
+ name
+ ObjectName,
+
+ CHOICE {
+ value
+ ObjectSyntax,
+
+ unSpecified
+ NULL,
+
+ noSuchObject
+ [0] IMPLICIT NULL,
+
+ noSuchInstance
+ [1] IMPLICIT NULL,
+
+ endOfMibView
+ [2] IMPLICIT NULL
+ }
+}
+
+VarBindList ::= SEQUENCE OF VarBind
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
deleted file mode 100644
index d5b1e0b3f687..000000000000
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-/*
- * nf_nat_snmp_basic.c
- *
- * Basic SNMP Application Layer Gateway
- *
- * This IP NAT module is intended for use with SNMP network
- * discovery and monitoring applications where target networks use
- * conflicting private address realms.
- *
- * Static NAT is used to remap the networks from the view of the network
- * management system at the IP layer, and this module remaps some application
- * layer addresses to match.
- *
- * The simplest form of ALG is performed, where only tagged IP addresses
- * are modified. The module does not need to be MIB aware and only scans
- * messages at the ASN.1/BER level.
- *
- * Currently, only SNMPv1 and SNMPv2 are supported.
- *
- * More information on ALG and associated issues can be found in
- * RFC 2962
- *
- * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
- * McLean & Jochen Friedrich, stripped down for use in the kernel.
- *
- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Author: James Morris <jmorris@intercode.com.au>
- *
- * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <linux/netfilter/nf_conntrack_snmp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
-MODULE_ALIAS("ip_nat_snmp_basic");
-
-#define SNMP_PORT 161
-#define SNMP_TRAP_PORT 162
-#define NOCT1(n) (*(u8 *)(n))
-
-static int debug;
-static DEFINE_SPINLOCK(snmp_lock);
-
-/*
- * Application layer address mapping mimics the NAT mapping, but
- * only for the first octet in this case (a more flexible system
- * can be implemented if needed).
- */
-struct oct1_map
-{
- u_int8_t from;
- u_int8_t to;
-};
-
-
-/*****************************************************************************
- *
- * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* Class */
-#define ASN1_UNI 0 /* Universal */
-#define ASN1_APL 1 /* Application */
-#define ASN1_CTX 2 /* Context */
-#define ASN1_PRV 3 /* Private */
-
-/* Tag */
-#define ASN1_EOC 0 /* End Of Contents */
-#define ASN1_BOL 1 /* Boolean */
-#define ASN1_INT 2 /* Integer */
-#define ASN1_BTS 3 /* Bit String */
-#define ASN1_OTS 4 /* Octet String */
-#define ASN1_NUL 5 /* Null */
-#define ASN1_OJI 6 /* Object Identifier */
-#define ASN1_OJD 7 /* Object Description */
-#define ASN1_EXT 8 /* External */
-#define ASN1_SEQ 16 /* Sequence */
-#define ASN1_SET 17 /* Set */
-#define ASN1_NUMSTR 18 /* Numerical String */
-#define ASN1_PRNSTR 19 /* Printable String */
-#define ASN1_TEXSTR 20 /* Teletext String */
-#define ASN1_VIDSTR 21 /* Video String */
-#define ASN1_IA5STR 22 /* IA5 String */
-#define ASN1_UNITIM 23 /* Universal Time */
-#define ASN1_GENTIM 24 /* General Time */
-#define ASN1_GRASTR 25 /* Graphical String */
-#define ASN1_VISSTR 26 /* Visible String */
-#define ASN1_GENSTR 27 /* General String */
-
-/* Primitive / Constructed methods*/
-#define ASN1_PRI 0 /* Primitive */
-#define ASN1_CON 1 /* Constructed */
-
-/*
- * Error codes.
- */
-#define ASN1_ERR_NOERROR 0
-#define ASN1_ERR_DEC_EMPTY 2
-#define ASN1_ERR_DEC_EOC_MISMATCH 3
-#define ASN1_ERR_DEC_LENGTH_MISMATCH 4
-#define ASN1_ERR_DEC_BADVALUE 5
-
-/*
- * ASN.1 context.
- */
-struct asn1_ctx
-{
- int error; /* Error condition */
- unsigned char *pointer; /* Octet just to be decoded */
- unsigned char *begin; /* First octet */
- unsigned char *end; /* Octet after last octet */
-};
-
-/*
- * Octet string (not null terminated)
- */
-struct asn1_octstr
-{
- unsigned char *data;
- unsigned int len;
-};
-
-static void asn1_open(struct asn1_ctx *ctx,
- unsigned char *buf,
- unsigned int len)
-{
- ctx->begin = buf;
- ctx->end = buf + len;
- ctx->pointer = buf;
- ctx->error = ASN1_ERR_NOERROR;
-}
-
-static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
-{
- if (ctx->pointer >= ctx->end) {
- ctx->error = ASN1_ERR_DEC_EMPTY;
- return 0;
- }
- *ch = *(ctx->pointer)++;
- return 1;
-}
-
-static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
-{
- unsigned char ch;
-
- *tag = 0;
-
- do
- {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
- *tag <<= 7;
- *tag |= ch & 0x7F;
- } while ((ch & 0x80) == 0x80);
- return 1;
-}
-
-static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
- unsigned int *cls,
- unsigned int *con,
- unsigned int *tag)
-{
- unsigned char ch;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *cls = (ch & 0xC0) >> 6;
- *con = (ch & 0x20) >> 5;
- *tag = (ch & 0x1F);
-
- if (*tag == 0x1F) {
- if (!asn1_tag_decode(ctx, tag))
- return 0;
- }
- return 1;
-}
-
-static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
- unsigned int *def,
- unsigned int *len)
-{
- unsigned char ch, cnt;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch == 0x80)
- *def = 0;
- else {
- *def = 1;
-
- if (ch < 0x80)
- *len = ch;
- else {
- cnt = ch & 0x7F;
- *len = 0;
-
- while (cnt > 0) {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
- *len <<= 8;
- *len |= ch;
- cnt--;
- }
- }
- }
-
- /* don't trust len bigger than ctx buffer */
- if (*len > ctx->end - ctx->pointer)
- return 0;
-
- return 1;
-}
-
-static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
- unsigned char **eoc,
- unsigned int *cls,
- unsigned int *con,
- unsigned int *tag)
-{
- unsigned int def, len;
-
- if (!asn1_id_decode(ctx, cls, con, tag))
- return 0;
-
- def = len = 0;
- if (!asn1_length_decode(ctx, &def, &len))
- return 0;
-
- /* primitive shall be definite, indefinite shall be constructed */
- if (*con == ASN1_PRI && !def)
- return 0;
-
- if (def)
- *eoc = ctx->pointer + len;
- else
- *eoc = NULL;
- return 1;
-}
-
-static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
- unsigned char ch;
-
- if (eoc == NULL) {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch != 0x00) {
- ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- if (ch != 0x00) {
- ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
- return 0;
- }
- return 1;
- } else {
- if (ctx->pointer != eoc) {
- ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
- return 0;
- }
- return 1;
- }
-}
-
-static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
-{
- ctx->pointer = eoc;
- return 1;
-}
-
-static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- long *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer = (signed char) ch;
- len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof (long)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
-}
-
-static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned int *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer = ch;
- if (ch == 0) len = 0;
- else len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof (unsigned int)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
-}
-
-static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned long *integer)
-{
- unsigned char ch;
- unsigned int len;
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer = ch;
- if (ch == 0) len = 0;
- else len = 1;
-
- while (ctx->pointer < eoc) {
- if (++len > sizeof (unsigned long)) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- return 0;
- }
-
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *integer <<= 8;
- *integer |= ch;
- }
- return 1;
-}
-
-static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned char **octets,
- unsigned int *len)
-{
- unsigned char *ptr;
-
- *len = 0;
-
- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
- if (*octets == NULL)
- return 0;
-
- ptr = *octets;
- while (ctx->pointer < eoc) {
- if (!asn1_octet_decode(ctx, ptr++)) {
- kfree(*octets);
- *octets = NULL;
- return 0;
- }
- (*len)++;
- }
- return 1;
-}
-
-static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
- unsigned long *subid)
-{
- unsigned char ch;
-
- *subid = 0;
-
- do {
- if (!asn1_octet_decode(ctx, &ch))
- return 0;
-
- *subid <<= 7;
- *subid |= ch & 0x7F;
- } while ((ch & 0x80) == 0x80);
- return 1;
-}
-
-static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
- unsigned char *eoc,
- unsigned long **oid,
- unsigned int *len)
-{
- unsigned long subid;
- unsigned long *optr;
- size_t size;
-
- size = eoc - ctx->pointer + 1;
-
- /* first subid actually encodes first two subids */
- if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
- return 0;
-
- *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
- if (*oid == NULL)
- return 0;
-
- optr = *oid;
-
- if (!asn1_subid_decode(ctx, &subid)) {
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
-
- if (subid < 40) {
- optr[0] = 0;
- optr[1] = subid;
- } else if (subid < 80) {
- optr[0] = 1;
- optr[1] = subid - 40;
- } else {
- optr[0] = 2;
- optr[1] = subid - 80;
- }
-
- *len = 2;
- optr += 2;
-
- while (ctx->pointer < eoc) {
- if (++(*len) > size) {
- ctx->error = ASN1_ERR_DEC_BADVALUE;
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
-
- if (!asn1_subid_decode(ctx, optr++)) {
- kfree(*oid);
- *oid = NULL;
- return 0;
- }
- }
- return 1;
-}
-
-/*****************************************************************************
- *
- * SNMP decoding routines (gxsnmp author Dirk Wisse)
- *
- *****************************************************************************/
-
-/* SNMP Versions */
-#define SNMP_V1 0
-#define SNMP_V2C 1
-#define SNMP_V2 2
-#define SNMP_V3 3
-
-/* Default Sizes */
-#define SNMP_SIZE_COMM 256
-#define SNMP_SIZE_OBJECTID 128
-#define SNMP_SIZE_BUFCHR 256
-#define SNMP_SIZE_BUFINT 128
-#define SNMP_SIZE_SMALLOBJECTID 16
-
-/* Requests */
-#define SNMP_PDU_GET 0
-#define SNMP_PDU_NEXT 1
-#define SNMP_PDU_RESPONSE 2
-#define SNMP_PDU_SET 3
-#define SNMP_PDU_TRAP1 4
-#define SNMP_PDU_BULK 5
-#define SNMP_PDU_INFORM 6
-#define SNMP_PDU_TRAP2 7
-
-/* Errors */
-#define SNMP_NOERROR 0
-#define SNMP_TOOBIG 1
-#define SNMP_NOSUCHNAME 2
-#define SNMP_BADVALUE 3
-#define SNMP_READONLY 4
-#define SNMP_GENERROR 5
-#define SNMP_NOACCESS 6
-#define SNMP_WRONGTYPE 7
-#define SNMP_WRONGLENGTH 8
-#define SNMP_WRONGENCODING 9
-#define SNMP_WRONGVALUE 10
-#define SNMP_NOCREATION 11
-#define SNMP_INCONSISTENTVALUE 12
-#define SNMP_RESOURCEUNAVAILABLE 13
-#define SNMP_COMMITFAILED 14
-#define SNMP_UNDOFAILED 15
-#define SNMP_AUTHORIZATIONERROR 16
-#define SNMP_NOTWRITABLE 17
-#define SNMP_INCONSISTENTNAME 18
-
-/* General SNMP V1 Traps */
-#define SNMP_TRAP_COLDSTART 0
-#define SNMP_TRAP_WARMSTART 1
-#define SNMP_TRAP_LINKDOWN 2
-#define SNMP_TRAP_LINKUP 3
-#define SNMP_TRAP_AUTFAILURE 4
-#define SNMP_TRAP_EQPNEIGHBORLOSS 5
-#define SNMP_TRAP_ENTSPECIFIC 6
-
-/* SNMPv1 Types */
-#define SNMP_NULL 0
-#define SNMP_INTEGER 1 /* l */
-#define SNMP_OCTETSTR 2 /* c */
-#define SNMP_DISPLAYSTR 2 /* c */
-#define SNMP_OBJECTID 3 /* ul */
-#define SNMP_IPADDR 4 /* uc */
-#define SNMP_COUNTER 5 /* ul */
-#define SNMP_GAUGE 6 /* ul */
-#define SNMP_TIMETICKS 7 /* ul */
-#define SNMP_OPAQUE 8 /* c */
-
-/* Additional SNMPv2 Types */
-#define SNMP_UINTEGER 5 /* ul */
-#define SNMP_BITSTR 9 /* uc */
-#define SNMP_NSAP 10 /* uc */
-#define SNMP_COUNTER64 11 /* ul */
-#define SNMP_NOSUCHOBJECT 12
-#define SNMP_NOSUCHINSTANCE 13
-#define SNMP_ENDOFMIBVIEW 14
-
-union snmp_syntax
-{
- unsigned char uc[0]; /* 8 bit unsigned */
- char c[0]; /* 8 bit signed */
- unsigned long ul[0]; /* 32 bit unsigned */
- long l[0]; /* 32 bit signed */
-};
-
-struct snmp_object
-{
- unsigned long *id;
- unsigned int id_len;
- unsigned short type;
- unsigned int syntax_len;
- union snmp_syntax syntax;
-};
-
-struct snmp_request
-{
- unsigned long id;
- unsigned int error_status;
- unsigned int error_index;
-};
-
-struct snmp_v1_trap
-{
- unsigned long *id;
- unsigned int id_len;
- unsigned long ip_address; /* pointer */
- unsigned int general;
- unsigned int specific;
- unsigned long time;
-};
-
-/* SNMP types */
-#define SNMP_IPA 0
-#define SNMP_CNT 1
-#define SNMP_GGE 2
-#define SNMP_TIT 3
-#define SNMP_OPQ 4
-#define SNMP_C64 6
-
-/* SNMP errors */
-#define SERR_NSO 0
-#define SERR_NSI 1
-#define SERR_EOM 2
-
-static inline void mangle_address(unsigned char *begin,
- unsigned char *addr,
- const struct oct1_map *map,
- __sum16 *check);
-struct snmp_cnv
-{
- unsigned int class;
- unsigned int tag;
- int syntax;
-};
-
-static const struct snmp_cnv snmp_conv[] = {
- {ASN1_UNI, ASN1_NUL, SNMP_NULL},
- {ASN1_UNI, ASN1_INT, SNMP_INTEGER},
- {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
- {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR},
- {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID},
- {ASN1_APL, SNMP_IPA, SNMP_IPADDR},
- {ASN1_APL, SNMP_CNT, SNMP_COUNTER}, /* Counter32 */
- {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */
- {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
- {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
-
- /* SNMPv2 data types and errors */
- {ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
- {ASN1_APL, SNMP_C64, SNMP_COUNTER64},
- {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT},
- {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE},
- {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW},
- {0, 0, -1}
-};
-
-static unsigned char snmp_tag_cls2syntax(unsigned int tag,
- unsigned int cls,
- unsigned short *syntax)
-{
- const struct snmp_cnv *cnv;
-
- cnv = snmp_conv;
-
- while (cnv->syntax != -1) {
- if (cnv->tag == tag && cnv->class == cls) {
- *syntax = cnv->syntax;
- return 1;
- }
- cnv++;
- }
- return 0;
-}
-
-static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
- struct snmp_object **obj)
-{
- unsigned int cls, con, tag, len, idlen;
- unsigned short type;
- unsigned char *eoc, *end, *p;
- unsigned long *lp, *id;
- unsigned long ul;
- long l;
-
- *obj = NULL;
- id = NULL;
-
- if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
- return 0;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
- return 0;
-
- if (!asn1_oid_decode(ctx, end, &id, &idlen))
- return 0;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
- kfree(id);
- return 0;
- }
-
- if (con != ASN1_PRI) {
- kfree(id);
- return 0;
- }
-
- type = 0;
- if (!snmp_tag_cls2syntax(tag, cls, &type)) {
- kfree(id);
- return 0;
- }
-
- l = 0;
- switch (type) {
- case SNMP_INTEGER:
- len = sizeof(long);
- if (!asn1_long_decode(ctx, end, &l)) {
- kfree(id);
- return 0;
- }
- *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(id);
- return 0;
- }
- (*obj)->syntax.l[0] = l;
- break;
- case SNMP_OCTETSTR:
- case SNMP_OPAQUE:
- if (!asn1_octets_decode(ctx, end, &p, &len)) {
- kfree(id);
- return 0;
- }
- *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(p);
- kfree(id);
- return 0;
- }
- memcpy((*obj)->syntax.c, p, len);
- kfree(p);
- break;
- case SNMP_NULL:
- case SNMP_NOSUCHOBJECT:
- case SNMP_NOSUCHINSTANCE:
- case SNMP_ENDOFMIBVIEW:
- len = 0;
- *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(id);
- return 0;
- }
- if (!asn1_null_decode(ctx, end)) {
- kfree(id);
- kfree(*obj);
- *obj = NULL;
- return 0;
- }
- break;
- case SNMP_OBJECTID:
- if (!asn1_oid_decode(ctx, end, &lp, &len)) {
- kfree(id);
- return 0;
- }
- len *= sizeof(unsigned long);
- *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(lp);
- kfree(id);
- return 0;
- }
- memcpy((*obj)->syntax.ul, lp, len);
- kfree(lp);
- break;
- case SNMP_IPADDR:
- if (!asn1_octets_decode(ctx, end, &p, &len)) {
- kfree(id);
- return 0;
- }
- if (len != 4) {
- kfree(p);
- kfree(id);
- return 0;
- }
- *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(p);
- kfree(id);
- return 0;
- }
- memcpy((*obj)->syntax.uc, p, len);
- kfree(p);
- break;
- case SNMP_COUNTER:
- case SNMP_GAUGE:
- case SNMP_TIMETICKS:
- len = sizeof(unsigned long);
- if (!asn1_ulong_decode(ctx, end, &ul)) {
- kfree(id);
- return 0;
- }
- *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
- if (*obj == NULL) {
- kfree(id);
- return 0;
- }
- (*obj)->syntax.ul[0] = ul;
- break;
- default:
- kfree(id);
- return 0;
- }
-
- (*obj)->syntax_len = len;
- (*obj)->type = type;
- (*obj)->id = id;
- (*obj)->id_len = idlen;
-
- if (!asn1_eoc_decode(ctx, eoc)) {
- kfree(id);
- kfree(*obj);
- *obj = NULL;
- return 0;
- }
- return 1;
-}
-
-static unsigned char noinline_for_stack
-snmp_request_decode(struct asn1_ctx *ctx, struct snmp_request *request)
-{
- unsigned int cls, con, tag;
- unsigned char *end;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- return 0;
-
- if (!asn1_ulong_decode(ctx, end, &request->id))
- return 0;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- return 0;
-
- if (!asn1_uint_decode(ctx, end, &request->error_status))
- return 0;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- return 0;
-
- if (!asn1_uint_decode(ctx, end, &request->error_index))
- return 0;
-
- return 1;
-}
-
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(__sum16 *csum,
- const unsigned char *optr,
- const unsigned char *nptr,
- int offset)
-{
- unsigned char s[4];
-
- if (offset & 1) {
- s[0] = ~0;
- s[1] = ~*optr;
- s[2] = 0;
- s[3] = *nptr;
- } else {
- s[0] = ~*optr;
- s[1] = ~0;
- s[2] = *nptr;
- s[3] = 0;
- }
-
- *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
-}
-
-/*
- * Mangle IP address.
- * - begin points to the start of the snmp messgae
- * - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
- unsigned char *addr,
- const struct oct1_map *map,
- __sum16 *check)
-{
- if (map->from == NOCT1(addr)) {
- u_int32_t old;
-
- if (debug)
- memcpy(&old, addr, sizeof(old));
-
- *addr = map->to;
-
- /* Update UDP checksum if being used */
- if (*check) {
- fast_csum(check,
- &map->from, &map->to, addr - begin);
-
- }
-
- if (debug)
- printk(KERN_DEBUG "bsalg: mapped %pI4 to %pI4\n",
- &old, addr);
- }
-}
-
-static unsigned char noinline_for_stack
-snmp_trap_decode(struct asn1_ctx *ctx, struct snmp_v1_trap *trap,
- const struct oct1_map *map,
- __sum16 *check)
-{
- unsigned int cls, con, tag, len;
- unsigned char *end;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
- return 0;
-
- if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
- return 0;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- goto err_id_free;
-
- if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
- (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
- goto err_id_free;
-
- if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
- goto err_id_free;
-
- /* IPv4 only */
- if (len != 4)
- goto err_addr_free;
-
- mangle_address(ctx->begin, ctx->pointer - 4, map, check);
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- goto err_addr_free;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- goto err_addr_free;
-
- if (!asn1_uint_decode(ctx, end, &trap->general))
- goto err_addr_free;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- goto err_addr_free;
-
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- goto err_addr_free;
-
- if (!asn1_uint_decode(ctx, end, &trap->specific))
- goto err_addr_free;
-
- if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
- goto err_addr_free;
-
- if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
- (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
- goto err_addr_free;
-
- if (!asn1_ulong_decode(ctx, end, &trap->time))
- goto err_addr_free;
-
- return 1;
-
-err_addr_free:
- kfree((unsigned long *)trap->ip_address);
-
-err_id_free:
- kfree(trap->id);
-
- return 0;
-}
-
-/*****************************************************************************
- *
- * Misc. routines
- *
- *****************************************************************************/
-
-/*
- * Parse and mangle SNMP message according to mapping.
- * (And this is the fucking 'basic' method).
- */
-static int snmp_parse_mangle(unsigned char *msg,
- u_int16_t len,
- const struct oct1_map *map,
- __sum16 *check)
-{
- unsigned char *eoc, *end;
- unsigned int cls, con, tag, vers, pdutype;
- struct asn1_ctx ctx;
- struct asn1_octstr comm;
- struct snmp_object *obj;
-
- if (debug > 1)
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1,
- msg, len, 0);
-
- asn1_open(&ctx, msg, len);
-
- /*
- * Start of SNMP message.
- */
- if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
- return 0;
- if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
- return 0;
-
- /*
- * Version 1 or 2 handled.
- */
- if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
- return 0;
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
- return 0;
- if (!asn1_uint_decode (&ctx, end, &vers))
- return 0;
- if (debug > 1)
- pr_debug("bsalg: snmp version: %u\n", vers + 1);
- if (vers > 1)
- return 1;
-
- /*
- * Community.
- */
- if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag))
- return 0;
- if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS)
- return 0;
- if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len))
- return 0;
- if (debug > 1) {
- unsigned int i;
-
- pr_debug("bsalg: community: ");
- for (i = 0; i < comm.len; i++)
- pr_cont("%c", comm.data[i]);
- pr_cont("\n");
- }
- kfree(comm.data);
-
- /*
- * PDU type
- */
- if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype))
- return 0;
- if (cls != ASN1_CTX || con != ASN1_CON)
- return 0;
- if (debug > 1) {
- static const unsigned char *const pdus[] = {
- [SNMP_PDU_GET] = "get",
- [SNMP_PDU_NEXT] = "get-next",
- [SNMP_PDU_RESPONSE] = "response",
- [SNMP_PDU_SET] = "set",
- [SNMP_PDU_TRAP1] = "trapv1",
- [SNMP_PDU_BULK] = "bulk",
- [SNMP_PDU_INFORM] = "inform",
- [SNMP_PDU_TRAP2] = "trapv2"
- };
-
- if (pdutype > SNMP_PDU_TRAP2)
- pr_debug("bsalg: bad pdu type %u\n", pdutype);
- else
- pr_debug("bsalg: pdu: %s\n", pdus[pdutype]);
- }
- if (pdutype != SNMP_PDU_RESPONSE &&
- pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
- return 1;
-
- /*
- * Request header or v1 trap
- */
- if (pdutype == SNMP_PDU_TRAP1) {
- struct snmp_v1_trap trap;
- unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
-
- if (ret) {
- kfree(trap.id);
- kfree((unsigned long *)trap.ip_address);
- } else
- return ret;
-
- } else {
- struct snmp_request req;
-
- if (!snmp_request_decode(&ctx, &req))
- return 0;
-
- if (debug > 1)
- pr_debug("bsalg: request: id=0x%lx error_status=%u "
- "error_index=%u\n", req.id, req.error_status,
- req.error_index);
- }
-
- /*
- * Loop through objects, look for IP addresses to mangle.
- */
- if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
- return 0;
-
- if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
- return 0;
-
- while (!asn1_eoc_decode(&ctx, eoc)) {
- unsigned int i;
-
- if (!snmp_object_decode(&ctx, &obj)) {
- if (obj) {
- kfree(obj->id);
- kfree(obj);
- }
- return 0;
- }
-
- if (debug > 1) {
- pr_debug("bsalg: object: ");
- for (i = 0; i < obj->id_len; i++) {
- if (i > 0)
- pr_cont(".");
- pr_cont("%lu", obj->id[i]);
- }
- pr_cont(": type=%u\n", obj->type);
-
- }
-
- if (obj->type == SNMP_IPADDR)
- mangle_address(ctx.begin, ctx.pointer - 4, map, check);
-
- kfree(obj->id);
- kfree(obj);
- }
-
- if (!asn1_eoc_decode(&ctx, eoc))
- return 0;
-
- return 1;
-}
-
-/*****************************************************************************
- *
- * NAT routines.
- *
- *****************************************************************************/
-
-/*
- * SNMP translation routine.
- */
-static int snmp_translate(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
- struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
- u_int16_t udplen = ntohs(udph->len);
- u_int16_t paylen = udplen - sizeof(struct udphdr);
- int dir = CTINFO2DIR(ctinfo);
- struct oct1_map map;
-
- /*
- * Determine mappping for application layer addresses based
- * on NAT manipulations for the packet.
- */
- if (dir == IP_CT_DIR_ORIGINAL) {
- /* SNAT traps */
- map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
- map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
- } else {
- /* DNAT replies */
- map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
- map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
- }
-
- if (map.from == map.to)
- return NF_ACCEPT;
-
- if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
- paylen, &map, &udph->check)) {
- net_warn_ratelimited("bsalg: parser failed\n");
- return NF_DROP;
- }
- return NF_ACCEPT;
-}
-
-/* We don't actually set up expectations, just adjust internal IP
- * addresses if this is being NATted */
-static int help(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- int dir = CTINFO2DIR(ctinfo);
- unsigned int ret;
- const struct iphdr *iph = ip_hdr(skb);
- const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
-
- /* SNMP replies and originating SNMP traps get mangled */
- if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
- return NF_ACCEPT;
- if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
- return NF_ACCEPT;
-
- /* No NAT? */
- if (!(ct->status & IPS_NAT_MASK))
- return NF_ACCEPT;
-
- /*
- * Make sure the packet length is ok. So far, we were only guaranteed
- * to have a valid length IP header plus 8 bytes, which means we have
- * enough room for a UDP header. Just verify the UDP length field so we
- * can mess around with the payload.
- */
- if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
- net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
- &iph->saddr, &iph->daddr);
- return NF_DROP;
- }
-
- if (!skb_make_writable(skb, skb->len))
- return NF_DROP;
-
- spin_lock_bh(&snmp_lock);
- ret = snmp_translate(ct, ctinfo, skb);
- spin_unlock_bh(&snmp_lock);
- return ret;
-}
-
-static const struct nf_conntrack_expect_policy snmp_exp_policy = {
- .max_expected = 0,
- .timeout = 180,
-};
-
-static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
- .me = THIS_MODULE,
- .help = help,
- .expect_policy = &snmp_exp_policy,
- .name = "snmp_trap",
- .tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT),
- .tuple.dst.protonum = IPPROTO_UDP,
-};
-
-/*****************************************************************************
- *
- * Module stuff.
- *
- *****************************************************************************/
-
-static int __init nf_nat_snmp_basic_init(void)
-{
- BUG_ON(nf_nat_snmp_hook != NULL);
- RCU_INIT_POINTER(nf_nat_snmp_hook, help);
-
- return nf_conntrack_helper_register(&snmp_trap_helper);
-}
-
-static void __exit nf_nat_snmp_basic_fini(void)
-{
- RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
- synchronize_rcu();
- nf_conntrack_helper_unregister(&snmp_trap_helper);
-}
-
-module_init(nf_nat_snmp_basic_init);
-module_exit(nf_nat_snmp_basic_fini);
-
-module_param(debug, int, 0600);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
new file mode 100644
index 000000000000..b6e277093e7e
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -0,0 +1,235 @@
+/*
+ * nf_nat_snmp_basic.c
+ *
+ * Basic SNMP Application Layer Gateway
+ *
+ * This IP NAT module is intended for use with SNMP network
+ * discovery and monitoring applications where target networks use
+ * conflicting private address realms.
+ *
+ * Static NAT is used to remap the networks from the view of the network
+ * management system at the IP layer, and this module remaps some application
+ * layer addresses to match.
+ *
+ * The simplest form of ALG is performed, where only tagged IP addresses
+ * are modified. The module does not need to be MIB aware and only scans
+ * messages at the ASN.1/BER level.
+ *
+ * Currently, only SNMPv1 and SNMPv2 are supported.
+ *
+ * More information on ALG and associated issues can be found in
+ * RFC 2962
+ *
+ * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
+ * McLean & Jochen Friedrich, stripped down for use in the kernel.
+ *
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: James Morris <jmorris@intercode.com.au>
+ *
+ * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
+#include "nf_nat_snmp_basic-asn1.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
+MODULE_ALIAS("ip_nat_snmp_basic");
+
+#define SNMP_PORT 161
+#define SNMP_TRAP_PORT 162
+
+static DEFINE_SPINLOCK(snmp_lock);
+
+struct snmp_ctx {
+ unsigned char *begin;
+ __sum16 *check;
+ __be32 from;
+ __be32 to;
+};
+
+static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
+{
+ unsigned char s[12] = {0,};
+ int size;
+
+ if (offset & 1) {
+ memcpy(&s[1], &ctx->from, 4);
+ memcpy(&s[7], &ctx->to, 4);
+ s[0] = ~0;
+ s[1] = ~s[1];
+ s[2] = ~s[2];
+ s[3] = ~s[3];
+ s[4] = ~s[4];
+ s[5] = ~0;
+ size = 12;
+ } else {
+ memcpy(&s[0], &ctx->from, 4);
+ memcpy(&s[4], &ctx->to, 4);
+ s[0] = ~s[0];
+ s[1] = ~s[1];
+ s[2] = ~s[2];
+ s[3] = ~s[3];
+ size = 8;
+ }
+ *ctx->check = csum_fold(csum_partial(s, size,
+ ~csum_unfold(*ctx->check)));
+}
+
+int snmp_version(void *context, size_t hdrlen, unsigned char tag,
+ const void *data, size_t datalen)
+{
+ if (*(unsigned char *)data > 1)
+ return -ENOTSUPP;
+ return 1;
+}
+
+int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
+ const void *data, size_t datalen)
+{
+ struct snmp_ctx *ctx = (struct snmp_ctx *)context;
+ __be32 *pdata = (__be32 *)data;
+
+ if (*pdata == ctx->from) {
+ pr_debug("%s: %pI4 to %pI4\n", __func__,
+ (void *)&ctx->from, (void *)&ctx->to);
+
+ if (*ctx->check)
+ fast_csum(ctx, (unsigned char *)data - ctx->begin);
+ *pdata = ctx->to;
+ }
+
+ return 1;
+}
+
+static int snmp_translate(struct nf_conn *ct, int dir, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
+ u16 datalen = ntohs(udph->len) - sizeof(struct udphdr);
+ char *data = (unsigned char *)udph + sizeof(struct udphdr);
+ struct snmp_ctx ctx;
+ int ret;
+
+ if (dir == IP_CT_DIR_ORIGINAL) {
+ ctx.from = ct->tuplehash[dir].tuple.src.u3.ip;
+ ctx.to = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ } else {
+ ctx.from = ct->tuplehash[!dir].tuple.src.u3.ip;
+ ctx.to = ct->tuplehash[dir].tuple.dst.u3.ip;
+ }
+
+ if (ctx.from == ctx.to)
+ return NF_ACCEPT;
+
+ ctx.begin = (unsigned char *)udph + sizeof(struct udphdr);
+ ctx.check = &udph->check;
+ ret = asn1_ber_decoder(&nf_nat_snmp_basic_decoder, &ctx, data, datalen);
+ if (ret < 0) {
+ nf_ct_helper_log(skb, ct, "parser failed\n");
+ return NF_DROP;
+ }
+
+ return NF_ACCEPT;
+}
+
+/* We don't actually set up expectations, just adjust internal IP
+ * addresses if this is being NATted
+ */
+static int help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ unsigned int ret;
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
+
+ /* SNMP replies and originating SNMP traps get mangled */
+ if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
+ return NF_ACCEPT;
+ if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
+ return NF_ACCEPT;
+
+ /* No NAT? */
+ if (!(ct->status & IPS_NAT_MASK))
+ return NF_ACCEPT;
+
+ /* Make sure the packet length is ok. So far, we were only guaranteed
+ * to have a valid length IP header plus 8 bytes, which means we have
+ * enough room for a UDP header. Just verify the UDP length field so we
+ * can mess around with the payload.
+ */
+ if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
+ nf_ct_helper_log(skb, ct, "dropping malformed packet\n");
+ return NF_DROP;
+ }
+
+ if (!skb_make_writable(skb, skb->len)) {
+ nf_ct_helper_log(skb, ct, "cannot mangle packet");
+ return NF_DROP;
+ }
+
+ spin_lock_bh(&snmp_lock);
+ ret = snmp_translate(ct, dir, skb);
+ spin_unlock_bh(&snmp_lock);
+ return ret;
+}
+
+static const struct nf_conntrack_expect_policy snmp_exp_policy = {
+ .max_expected = 0,
+ .timeout = 180,
+};
+
+static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+ .me = THIS_MODULE,
+ .help = help,
+ .expect_policy = &snmp_exp_policy,
+ .name = "snmp_trap",
+ .tuple.src.l3num = AF_INET,
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_TRAP_PORT),
+ .tuple.dst.protonum = IPPROTO_UDP,
+};
+
+static int __init nf_nat_snmp_basic_init(void)
+{
+ BUG_ON(nf_nat_snmp_hook != NULL);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, help);
+
+ return nf_conntrack_helper_register(&snmp_trap_helper);
+}
+
+static void __exit nf_nat_snmp_basic_fini(void)
+{
+ RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+ synchronize_rcu();
+ nf_conntrack_helper_unregister(&snmp_trap_helper);
+}
+
+module_init(nf_nat_snmp_basic_init);
+module_exit(nf_nat_snmp_basic_fini);
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 4bbc273b45e8..036c074736b0 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -21,51 +21,12 @@ nft_do_chain_arp(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_unspec(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb);
return nft_do_chain(&pkt, priv);
}
-static struct nft_af_info nft_af_arp __read_mostly = {
- .family = NFPROTO_ARP,
- .nhooks = NF_ARP_NUMHOOKS,
- .owner = THIS_MODULE,
- .nops = 1,
- .hooks = {
- [NF_ARP_IN] = nft_do_chain_arp,
- [NF_ARP_OUT] = nft_do_chain_arp,
- [NF_ARP_FORWARD] = nft_do_chain_arp,
- },
-};
-
-static int nf_tables_arp_init_net(struct net *net)
-{
- net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.arp== NULL)
- return -ENOMEM;
-
- memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
-
- if (nft_register_afinfo(net, net->nft.arp) < 0)
- goto err;
-
- return 0;
-err:
- kfree(net->nft.arp);
- return -ENOMEM;
-}
-
-static void nf_tables_arp_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.arp);
- kfree(net->nft.arp);
-}
-
-static struct pernet_operations nf_tables_arp_net_ops = {
- .init = nf_tables_arp_init_net,
- .exit = nf_tables_arp_exit_net,
-};
-
static const struct nf_chain_type filter_arp = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
@@ -73,26 +34,19 @@ static const struct nf_chain_type filter_arp = {
.owner = THIS_MODULE,
.hook_mask = (1 << NF_ARP_IN) |
(1 << NF_ARP_OUT),
+ .hooks = {
+ [NF_ARP_IN] = nft_do_chain_arp,
+ [NF_ARP_OUT] = nft_do_chain_arp,
+ },
};
static int __init nf_tables_arp_init(void)
{
- int ret;
-
- ret = nft_register_chain_type(&filter_arp);
- if (ret < 0)
- return ret;
-
- ret = register_pernet_subsys(&nf_tables_arp_net_ops);
- if (ret < 0)
- nft_unregister_chain_type(&filter_arp);
-
- return ret;
+ return nft_register_chain_type(&filter_arp);
}
static void __exit nf_tables_arp_exit(void)
{
- unregister_pernet_subsys(&nf_tables_arp_net_ops);
nft_unregister_chain_type(&filter_arp);
}
@@ -101,4 +55,4 @@ module_exit(nf_tables_arp_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
+MODULE_ALIAS_NFT_CHAIN(3, "filter"); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 2840a29b2e04..96f955496d5f 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -24,69 +24,12 @@ static unsigned int nft_do_chain_ipv4(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv4(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv4(&pkt, skb);
return nft_do_chain(&pkt, priv);
}
-static unsigned int nft_ipv4_output(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- if (unlikely(skb->len < sizeof(struct iphdr) ||
- ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
- if (net_ratelimit())
- pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
- "packet\n");
- return NF_ACCEPT;
- }
-
- return nft_do_chain_ipv4(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv4 __read_mostly = {
- .family = NFPROTO_IPV4,
- .nhooks = NF_INET_NUMHOOKS,
- .owner = THIS_MODULE,
- .nops = 1,
- .hooks = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
- [NF_INET_LOCAL_OUT] = nft_ipv4_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv4,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
- },
-};
-EXPORT_SYMBOL_GPL(nft_af_ipv4);
-
-static int nf_tables_ipv4_init_net(struct net *net)
-{
- net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.ipv4 == NULL)
- return -ENOMEM;
-
- memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
-
- if (nft_register_afinfo(net, net->nft.ipv4) < 0)
- goto err;
-
- return 0;
-err:
- kfree(net->nft.ipv4);
- return -ENOMEM;
-}
-
-static void nf_tables_ipv4_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.ipv4);
- kfree(net->nft.ipv4);
-}
-
-static struct pernet_operations nf_tables_ipv4_net_ops = {
- .init = nf_tables_ipv4_init_net,
- .exit = nf_tables_ipv4_exit_net,
-};
-
static const struct nf_chain_type filter_ipv4 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
@@ -97,26 +40,22 @@ static const struct nf_chain_type filter_ipv4 = {
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
+ .hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv4,
+ [NF_INET_LOCAL_OUT] = nft_do_chain_ipv4,
+ [NF_INET_FORWARD] = nft_do_chain_ipv4,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv4,
+ },
};
static int __init nf_tables_ipv4_init(void)
{
- int ret;
-
- ret = nft_register_chain_type(&filter_ipv4);
- if (ret < 0)
- return ret;
-
- ret = register_pernet_subsys(&nf_tables_ipv4_net_ops);
- if (ret < 0)
- nft_unregister_chain_type(&filter_ipv4);
-
- return ret;
+ return nft_register_chain_type(&filter_ipv4);
}
static void __exit nf_tables_ipv4_exit(void)
{
- unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
nft_unregister_chain_type(&filter_ipv4);
}
@@ -125,4 +64,4 @@ module_exit(nf_tables_ipv4_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_INET);
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "filter");
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index f5c66a7a4bf2..f2a490981594 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -33,7 +33,8 @@ static unsigned int nft_nat_do_chain(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv4(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv4(&pkt, skb);
return nft_do_chain(&pkt, priv);
}
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 30493beb611a..d965c225b9f6 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -33,12 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
const struct iphdr *iph;
int err;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
- nft_set_pktinfo_ipv4(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv4(&pkt, skb);
mark = skb->mark;
iph = ip_hdr(skb);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9f37c4727861..dc5edc8f7564 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -83,7 +83,6 @@ static int sockstat_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations sockstat_seq_fops = {
- .owner = THIS_MODULE,
.open = sockstat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -467,7 +466,6 @@ static int snmp_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations snmp_seq_fops = {
- .owner = THIS_MODULE,
.open = snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -515,7 +513,6 @@ static int netstat_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations netstat_seq_fops = {
- .owner = THIS_MODULE,
.open = netstat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 125c1eab3eaa..7c509697ebc7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
/* hdrincl should be READ_ONCE(inet->hdrincl)
- * but READ_ONCE() doesn't work with bit fields
+ * but READ_ONCE() doesn't work with bit fields.
+ * Doing this indirectly yields the same result.
*/
hdrincl = inet->hdrincl;
+ hdrincl = READ_ONCE(hdrincl);
/*
* Check the flags.
*/
@@ -615,8 +617,21 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
- } else if (!ipc.oif)
+ } else if (!ipc.oif) {
ipc.oif = inet->uc_index;
+ } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+ /* oif is set, packet is to local broadcast and
+ * and uc_index is set. oif is most likely set
+ * by sk_bound_dev_if. If uc_index != oif check if the
+ * oif is an L3 master and uc_index is an L3 slave.
+ * If so, we want to allow the send using the uc_index.
+ */
+ if (ipc.oif != inet->uc_index &&
+ ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
+ inet->uc_index)) {
+ ipc.oif = inet->uc_index;
+ }
+ }
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
@@ -1117,7 +1132,6 @@ static int raw_v4_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations raw_seq_fops = {
- .owner = THIS_MODULE,
.open = raw_v4_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 43b69af242e1..49cc1c1df1ba 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -240,7 +240,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations rt_cache_seq_fops = {
- .owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -331,7 +330,6 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations rt_cpu_seq_fops = {
- .owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -369,7 +367,6 @@ static int rt_acct_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations rt_acct_proc_fops = {
- .owner = THIS_MODULE,
.open = rt_acct_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1106,7 +1103,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
new = true;
}
- __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+ __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
@@ -2762,6 +2759,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
+ fl4.flowi4_iif = LOOPBACK_IFINDEX;
rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f08eebe60446..c059aa7df0a9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -283,8 +283,6 @@
#include <asm/ioctls.h>
#include <net/busy_poll.h>
-#include <trace/events/tcp.h>
-
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -465,7 +463,7 @@ void tcp_init_transfer(struct sock *sk, int bpf_op)
tcp_mtup_init(sk);
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
- tcp_call_bpf(sk, bpf_op);
+ tcp_call_bpf(sk, bpf_op, 0, NULL);
tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
}
@@ -493,18 +491,16 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
-unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct sock *sk = sock->sk;
const struct tcp_sock *tp = tcp_sk(sk);
int state;
- sock_rps_record_flow(sk);
-
sock_poll_wait(file, sk_sleep(sk), wait);
- state = sk_state_load(sk);
+ state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
@@ -1106,12 +1102,15 @@ static int linear_payload_sz(bool first_skb)
return 0;
}
-static int select_size(const struct sock *sk, bool sg, bool first_skb)
+static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
{
const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
+ if (zc)
+ return 0;
+
if (sk_can_gso(sk)) {
tmp = linear_payload_sz(first_skb);
} else {
@@ -1188,7 +1187,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
int flags, err, copied = 0;
int mss_now = 0, size_goal, copied_syn = 0;
bool process_backlog = false;
- bool sg;
+ bool sg, zc = false;
long timeo;
flags = msg->msg_flags;
@@ -1206,7 +1205,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
goto out_err;
}
- if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+ zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+ if (!zc)
uarg->zerocopy = 0;
}
@@ -1283,6 +1283,7 @@ restart:
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
bool first_skb;
+ int linear;
new_segment:
/* Allocate new segment. If the interface is SG,
@@ -1296,9 +1297,8 @@ new_segment:
goto restart;
}
first_skb = tcp_rtx_and_write_queues_empty(sk);
- skb = sk_stream_alloc_skb(sk,
- select_size(sk, sg, first_skb),
- sk->sk_allocation,
+ linear = select_size(sk, sg, first_skb, zc);
+ skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
first_skb);
if (!skb)
goto wait_for_memory;
@@ -1327,13 +1327,13 @@ new_segment:
copy = msg_data_left(msg);
/* Where to copy to? */
- if (skb_availroom(skb) > 0) {
+ if (skb_availroom(skb) > 0 && !zc) {
/* We have some space in skb head. Superb! */
copy = min_t(int, copy, skb_availroom(skb));
err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
if (err)
goto do_fault;
- } else if (!uarg || !uarg->zerocopy) {
+ } else if (!zc) {
bool merge = true;
int i = skb_shinfo(skb)->nr_frags;
struct page_frag *pfrag = sk_page_frag(sk);
@@ -1373,8 +1373,10 @@ new_segment:
pfrag->offset += copy;
} else {
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
- if (err == -EMSGSIZE || err == -EEXIST)
+ if (err == -EMSGSIZE || err == -EEXIST) {
+ tcp_mark_push(tp, skb);
goto new_segment;
+ }
if (err < 0)
goto do_error;
copy = err;
@@ -1731,8 +1733,8 @@ static void tcp_update_recv_tstamps(struct sk_buff *skb,
}
/* Similar to __sock_recv_timestamp, but does not require an skb */
-void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
- struct scm_timestamping *tss)
+static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ struct scm_timestamping *tss)
{
struct timeval tv;
bool has_timestamping = false;
@@ -2040,7 +2042,29 @@ void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
- trace_tcp_set_state(sk, oldstate, state);
+ /* We defined a new enum for TCP states that are exported in BPF
+ * so as not force the internal TCP states to be frozen. The
+ * following checks will detect if an internal state value ever
+ * differs from the BPF value. If this ever happens, then we will
+ * need to remap the internal value to the BPF value before calling
+ * tcp_call_bpf_2arg.
+ */
+ BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
+ BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
+ BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
+ BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
+ BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
+ BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
+ BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
+ BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
+ BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
+ BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
+ BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
+ BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
+ BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
+
+ if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
+ tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
switch (state) {
case TCP_ESTABLISHED:
@@ -2065,7 +2089,7 @@ void tcp_set_state(struct sock *sk, int state)
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
- sk_state_store(sk, state);
+ inet_sk_state_store(sk, state);
#ifdef STATE_TRACE
SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2298,6 +2322,9 @@ adjudge_to_death:
tcp_send_active_reset(sk, GFP_ATOMIC);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
+ } else if (!check_net(sock_net(sk))) {
+ /* Not possible to send reset; just close */
+ tcp_set_state(sk, TCP_CLOSE);
}
}
@@ -2431,6 +2458,12 @@ int tcp_disconnect(struct sock *sk, int flags)
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ if (sk->sk_frag.page) {
+ put_page(sk->sk_frag.page);
+ sk->sk_frag.page = NULL;
+ sk->sk_frag.offset = 0;
+ }
+
sk->sk_error_report(sk);
return err;
}
@@ -2920,7 +2953,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (sk->sk_type != SOCK_STREAM)
return;
- info->tcpi_state = sk_state_load(sk);
+ info->tcpi_state = inet_sk_state_load(sk);
/* Report meaningful fields for all TCP states, including listeners */
rate = READ_ONCE(sk->sk_pacing_rate);
@@ -3578,6 +3611,9 @@ void __init tcp_init(void)
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
inet_hashinfo_init(&tcp_hashinfo);
+ inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
+ thash_entries, 21, /* one slot per 2 MB*/
+ 0, 64 * 1024);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8322f26e770e..785712be5b0d 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -766,7 +766,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
filter_expired = after(tcp_jiffies32,
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
if (rs->rtt_us >= 0 &&
- (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
+ (rs->rtt_us <= bbr->min_rtt_us ||
+ (filter_expired && !rs->is_ack_delayed))) {
bbr->min_rtt_us = rs->rtt_us;
bbr->min_rtt_stamp = tcp_jiffies32;
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index abbf0edcf6c2..81148f7a2323 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
{
struct tcp_info *info = _info;
- if (sk_state_load(sk) == TCP_LISTEN) {
+ if (inet_sk_state_load(sk) == TCP_LISTEN) {
r->idiag_rqueue = sk->sk_ack_backlog;
r->idiag_wqueue = sk->sk_max_ack_backlog;
} else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 78c192ee03a4..018a48477355 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -379,18 +379,9 @@ fastopen:
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie)
{
- unsigned long last_syn_loss = 0;
const struct dst_entry *dst;
- int syn_loss = 0;
- tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
-
- /* Recurring FO SYN losses: no cookie or data in SYN */
- if (syn_loss > 1 &&
- time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
- cookie->len = -1;
- return false;
- }
+ tcp_fastopen_cache_get(sk, mss, cookie);
/* Firewall blackhole issue check */
if (tcp_fastopen_active_should_disable(sk)) {
@@ -448,6 +439,8 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
* following circumstances:
* 1. client side TFO socket receives out of order FIN
* 2. client side TFO socket receives out of order RST
+ * 3. client side TFO socket has timed out three times consecutively during
+ * or after handshake
* We disable active side TFO globally for 1hr at first. Then if it
* happens again, we disable it for 2h, then 4h, 8h, ...
* And we reset the timeout back to 1hr when we see a successful active
@@ -524,3 +517,20 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
dst_release(dst);
}
}
+
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
+{
+ u32 timeouts = inet_csk(sk)->icsk_retransmits;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Broken middle-boxes may black-hole Fast Open connection during or
+ * even after the handshake. Be extremely conservative and pause
+ * Fast Open globally after hitting the third consecutive timeout or
+ * exceeding the configured timeout limit.
+ */
+ if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
+ (timeouts == 2 || (timeouts < 2 && expired))) {
+ tcp_fastopen_active_disable(sk);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ }
+}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 45f750e85714..cfa51cfd2d99 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -97,6 +97,7 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
+#define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -578,8 +579,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ u32 copied;
int time;
- int copied;
tcp_mstamp_refresh(tp);
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -602,38 +603,31 @@ void tcp_rcv_space_adjust(struct sock *sk)
if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
- int rcvwin, rcvmem, rcvbuf;
+ int rcvmem, rcvbuf;
+ u64 rcvwin, grow;
/* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations.
*/
- rcvwin = (copied << 1) + 16 * tp->advmss;
+ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
- /* If rate increased by 25%,
- * assume slow start, rcvwin = 3 * copied
- * If rate increased by 50%,
- * assume sender can use 2x growth, rcvwin = 4 * copied
- */
- if (copied >=
- tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
- if (copied >=
- tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
- rcvwin <<= 1;
- else
- rcvwin += (rcvwin >> 1);
- }
+ /* Accommodate for sender rate increase (eg. slow start) */
+ grow = rcvwin * (copied - tp->rcvq_space.space);
+ do_div(grow, tp->rcvq_space.space);
+ rcvwin += (grow << 1);
rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
rcvmem += 128;
- rcvbuf = min(rcvwin / tp->advmss * rcvmem,
- sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+ do_div(rcvwin, tp->advmss);
+ rcvbuf = min_t(u64, rcvwin * rcvmem,
+ sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf;
/* Make the window clamp follow along. */
- tp->window_clamp = rcvwin;
+ tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
}
}
tp->rcvq_space.space = copied;
@@ -2864,11 +2858,18 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
*rexmit = REXMIT_LOST;
}
-static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
{
u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
struct tcp_sock *tp = tcp_sk(sk);
+ if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
+ /* If the remote keeps returning delayed ACKs, eventually
+ * the min filter would pick it up and overestimate the
+ * prop. delay when it expires. Skip suspected delayed ACKs.
+ */
+ return;
+ }
minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
rtt_us ? : jiffies_to_usecs(1));
}
@@ -2908,7 +2909,7 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* always taken together with ACK, SACK, or TS-opts. Any negative
* values will be skipped with the seq_rtt_us < 0 check above.
*/
- tcp_update_rtt_min(sk, ca_rtt_us);
+ tcp_update_rtt_min(sk, ca_rtt_us, flag);
tcp_rtt_estimator(sk, seq_rtt_us);
tcp_set_rto(sk);
@@ -3132,6 +3133,17 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
+
+ if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
+ last_in_flight && !prior_sacked && fully_acked &&
+ sack->rate->prior_delivered + 1 == tp->delivered &&
+ !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
+ /* Conservatively mark a delayed ACK. It's typically
+ * from a lone runt packet over the round trip to
+ * a receiver w/o out-of-order or CE events.
+ */
+ flag |= FLAG_ACK_MAYBE_DELAYED;
+ }
}
if (sack->first_sackt) {
sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
@@ -3621,6 +3633,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
lost = tp->lost - lost; /* freshly marked lost */
+ rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
tcp_xmit_recovery(sk, rexmit);
@@ -5306,6 +5319,9 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
unsigned int len = skb->len;
struct tcp_sock *tp = tcp_sk(sk);
+ /* TCP congestion window tracking */
+ trace_tcp_probe(sk, skb);
+
tcp_mstamp_refresh(tp);
if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94e28350f420..95738aa0d8a6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
tcp_clear_md5_list(sk);
- kfree_rcu(tp->md5sig_info, rcu);
+ kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
tp->md5sig_info = NULL;
}
#endif
@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
timer_expires = jiffies;
}
- state = sk_state_load(sk);
+ state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
rx_queue = sk->sk_ack_backlog;
else
@@ -2358,7 +2358,6 @@ out:
}
static const struct file_operations tcp_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = tcp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 7097f92d16e5..03b51cdcc731 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -546,8 +546,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
static DEFINE_SEQLOCK(fastopen_seqlock);
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
- struct tcp_fastopen_cookie *cookie,
- int *syn_loss, unsigned long *last_syn_loss)
+ struct tcp_fastopen_cookie *cookie)
{
struct tcp_metrics_block *tm;
@@ -564,8 +563,6 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
*cookie = tfom->cookie;
if (cookie->len <= 0 && tfom->try_exp == 1)
cookie->exp = true;
- *syn_loss = tfom->syn_loss;
- *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
} while (read_seqretry(&fastopen_seqlock, seq));
}
rcu_read_unlock();
@@ -895,7 +892,7 @@ static void tcp_metrics_flush_all(struct net *net)
pp = &hb->chain;
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
match = net ? net_eq(tm_net(tm), net) :
- !atomic_read(&tm_net(tm)->count);
+ !refcount_read(&tm_net(tm)->count);
if (match) {
*pp = tm->tcpm_next;
kfree_rcu(tm, rcu_head);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b079b619b60c..a8384b0c11f8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -316,9 +316,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
*/
local_bh_disable();
inet_twsk_schedule(tw, timeo);
- /* Linkage updates. */
- __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
- inet_twsk_put(tw);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+ inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 0b5a05bd82e3..764298e52577 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -146,7 +146,7 @@ static void tcpnv_init(struct sock *sk)
* within a datacenter, where we have reasonable estimates of
* RTTs
*/
- base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT);
+ base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT, 0, NULL);
if (base_rtt > 0) {
ca->nv_base_rtt = base_rtt;
ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */
@@ -364,7 +364,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
*/
cwnd_by_slope = (u32)
div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
- (u64)(80000 * tp->mss_cache));
+ 80000ULL * tp->mss_cache);
max_win = cwnd_by_slope + nv_pad;
/* If cwnd > max_win, decrease cwnd
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b6a2aa1dcf56..4d58e2ce0b5b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
+ return ERR_PTR(-EINVAL);
+
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a4d214c7b506..e9f985e42405 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1944,7 +1944,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
in_flight = tcp_packets_in_flight(tp);
- BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
+ BUG_ON(tcp_skb_pcount(skb) <= 1);
+ BUG_ON(tp->snd_cwnd <= in_flight);
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -2414,15 +2415,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
/* Schedule a loss probe in 2*RTT for SACK capable connections
- * in Open state, that are either limited by cwnd or application.
+ * not in loss recovery, that are either limited by cwnd or application.
*/
if ((early_retrans != 3 && early_retrans != 4) ||
!tp->packets_out || !tcp_is_sack(tp) ||
- icsk->icsk_ca_state != TCP_CA_Open)
- return false;
-
- if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
- !tcp_write_queue_empty(sk))
+ (icsk->icsk_ca_state != TCP_CA_Open &&
+ icsk->icsk_ca_state != TCP_CA_CWR))
return false;
/* Probe timeout is 2*rtt. Add minimum RTO to account
@@ -2907,6 +2905,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+ if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
+ tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
+ TCP_SKB_CB(skb)->seq, segs, err);
+
if (likely(!err)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
trace_tcp_retransmit_skb(sk, skb);
@@ -3471,7 +3473,7 @@ int tcp_connect(struct sock *sk)
struct sk_buff *buff;
int err;
- tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
+ tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
deleted file mode 100644
index 697f4c67b2e3..000000000000
--- a/net/ipv4/tcp_probe.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * tcpprobe - Observe the TCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/ktime.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/tcp.h>
-
-MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
-MODULE_DESCRIPTION("TCP cwnd snooper");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1");
-
-static int port __read_mostly;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int bufsize __read_mostly = 4096;
-MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
-module_param(bufsize, uint, 0);
-
-static unsigned int fwmark __read_mostly;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int full __read_mostly;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "tcpprobe";
-
-struct tcp_log {
- ktime_t tstamp;
- union {
- struct sockaddr raw;
- struct sockaddr_in v4;
- struct sockaddr_in6 v6;
- } src, dst;
- u16 length;
- u32 snd_nxt;
- u32 snd_una;
- u32 snd_wnd;
- u32 rcv_wnd;
- u32 snd_cwnd;
- u32 ssthresh;
- u32 srtt;
-};
-
-static struct {
- spinlock_t lock;
- wait_queue_head_t wait;
- ktime_t start;
- u32 lastcwnd;
-
- unsigned long head, tail;
- struct tcp_log *log;
-} tcp_probe;
-
-static inline int tcp_probe_used(void)
-{
- return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
-}
-
-static inline int tcp_probe_avail(void)
-{
- return bufsize - tcp_probe_used() - 1;
-}
-
-#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \
- do { \
- si4.sin_family = AF_INET; \
- si4.sin_port = inet->inet_##mem##port; \
- si4.sin_addr.s_addr = inet->inet_##mem##addr; \
- } while (0) \
-
-/*
- * Hook inserted to be called before each receive packet.
- * Note: arguments must match tcp_rcv_established()!
- */
-static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th)
-{
- unsigned int len = skb->len;
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_sock *inet = inet_sk(sk);
-
- /* Only update if port or skb mark matches */
- if (((port == 0 && fwmark == 0) ||
- ntohs(inet->inet_dport) == port ||
- ntohs(inet->inet_sport) == port ||
- (fwmark > 0 && skb->mark == fwmark)) &&
- (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
-
- spin_lock(&tcp_probe.lock);
- /* If log fills, just silently drop */
- if (tcp_probe_avail() > 1) {
- struct tcp_log *p = tcp_probe.log + tcp_probe.head;
-
- p->tstamp = ktime_get();
- switch (sk->sk_family) {
- case AF_INET:
- tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
- tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
- break;
- case AF_INET6:
- memset(&p->src.v6, 0, sizeof(p->src.v6));
- memset(&p->dst.v6, 0, sizeof(p->dst.v6));
-#if IS_ENABLED(CONFIG_IPV6)
- p->src.v6.sin6_family = AF_INET6;
- p->src.v6.sin6_port = inet->inet_sport;
- p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
-
- p->dst.v6.sin6_family = AF_INET6;
- p->dst.v6.sin6_port = inet->inet_dport;
- p->dst.v6.sin6_addr = sk->sk_v6_daddr;
-#endif
- break;
- default:
- BUG();
- }
-
- p->length = len;
- p->snd_nxt = tp->snd_nxt;
- p->snd_una = tp->snd_una;
- p->snd_cwnd = tp->snd_cwnd;
- p->snd_wnd = tp->snd_wnd;
- p->rcv_wnd = tp->rcv_wnd;
- p->ssthresh = tcp_current_ssthresh(sk);
- p->srtt = tp->srtt_us >> 3;
-
- tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
- }
- tcp_probe.lastcwnd = tp->snd_cwnd;
- spin_unlock(&tcp_probe.lock);
-
- wake_up(&tcp_probe.wait);
- }
-
- jprobe_return();
-}
-
-static struct jprobe tcp_jprobe = {
- .kp = {
- .symbol_name = "tcp_rcv_established",
- },
- .entry = jtcp_rcv_established,
-};
-
-static int tcpprobe_open(struct inode *inode, struct file *file)
-{
- /* Reset (empty) log */
- spin_lock_bh(&tcp_probe.lock);
- tcp_probe.head = tcp_probe.tail = 0;
- tcp_probe.start = ktime_get();
- spin_unlock_bh(&tcp_probe.lock);
-
- return 0;
-}
-
-static int tcpprobe_sprint(char *tbuf, int n)
-{
- const struct tcp_log *p
- = tcp_probe.log + tcp_probe.tail;
- struct timespec64 ts
- = ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start));
-
- return scnprintf(tbuf, n,
- "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
- (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec,
- &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
- p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
-}
-
-static ssize_t tcpprobe_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- int error = 0;
- size_t cnt = 0;
-
- if (!buf)
- return -EINVAL;
-
- while (cnt < len) {
- char tbuf[256];
- int width;
-
- /* Wait for data in buffer */
- error = wait_event_interruptible(tcp_probe.wait,
- tcp_probe_used() > 0);
- if (error)
- break;
-
- spin_lock_bh(&tcp_probe.lock);
- if (tcp_probe.head == tcp_probe.tail) {
- /* multiple readers race? */
- spin_unlock_bh(&tcp_probe.lock);
- continue;
- }
-
- width = tcpprobe_sprint(tbuf, sizeof(tbuf));
-
- if (cnt + width < len)
- tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
-
- spin_unlock_bh(&tcp_probe.lock);
-
- /* if record greater than space available
- return partial buffer (so far) */
- if (cnt + width >= len)
- break;
-
- if (copy_to_user(buf + cnt, tbuf, width))
- return -EFAULT;
- cnt += width;
- }
-
- return cnt == 0 ? error : cnt;
-}
-
-static const struct file_operations tcpprobe_fops = {
- .owner = THIS_MODULE,
- .open = tcpprobe_open,
- .read = tcpprobe_read,
- .llseek = noop_llseek,
-};
-
-static __init int tcpprobe_init(void)
-{
- int ret = -ENOMEM;
-
- /* Warning: if the function signature of tcp_rcv_established,
- * has been changed, you also have to change the signature of
- * jtcp_rcv_established, otherwise you end up right here!
- */
- BUILD_BUG_ON(__same_type(tcp_rcv_established,
- jtcp_rcv_established) == 0);
-
- init_waitqueue_head(&tcp_probe.wait);
- spin_lock_init(&tcp_probe.lock);
-
- if (bufsize == 0)
- return -EINVAL;
-
- bufsize = roundup_pow_of_two(bufsize);
- tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
- if (!tcp_probe.log)
- goto err0;
-
- if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
- goto err0;
-
- ret = register_jprobe(&tcp_jprobe);
- if (ret)
- goto err1;
-
- pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
- port, fwmark, bufsize);
- return 0;
- err1:
- remove_proc_entry(procname, init_net.proc_net);
- err0:
- kfree(tcp_probe.log);
- return ret;
-}
-module_init(tcpprobe_init);
-
-static __exit void tcpprobe_exit(void)
-{
- remove_proc_entry(procname, init_net.proc_net);
- unregister_jprobe(&tcp_jprobe);
- kfree(tcp_probe.log);
-}
-module_exit(tcpprobe_exit);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 968fda198376..71fc60f1b326 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk)
* to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket.
*
+ * Also close if our net namespace is exiting; in that case there is no
+ * hope of ever communicating again since all netns interfaces are already
+ * down (or about to be down), and we need to release our dst references,
+ * which have been moved to the netns loopback interface, so the namespace
+ * can finish exiting. This condition is only possible if we are a kernel
+ * socket, as those do not hold references to the namespace.
+ *
* Criteria is still not confirmed experimentally and may change.
* We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured
* limit.
* 2. If we have strong memory pressure.
+ * 3. If our net namespace is exiting.
*/
static int tcp_out_of_resources(struct sock *sk, bool do_reset)
{
@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
+
+ if (!check_net(sock_net(sk))) {
+ /* Not possible to send reset; just close */
+ tcp_done(sk);
+ return 1;
+ }
+
return 0;
}
@@ -183,11 +198,6 @@ static int tcp_write_timeout(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
- if (tp->syn_fastopen || tp->syn_data)
- tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
- if (tp->syn_data && icsk->icsk_retransmits == 1)
- NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} else if (!tp->syn_data && !tp->syn_fastopen) {
sk_rethink_txhash(sk);
}
@@ -195,17 +205,6 @@ static int tcp_write_timeout(struct sock *sk)
expired = icsk->icsk_retransmits >= retry_until;
} else {
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
- /* Some middle-boxes may black-hole Fast Open _after_
- * the handshake. Therefore we conservatively disable
- * Fast Open on this path on recurring timeouts after
- * successful Fast Open.
- */
- if (tp->syn_data_acked) {
- tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
- if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
- NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPFASTOPENACTIVEFAIL);
- }
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -228,11 +227,19 @@ static int tcp_write_timeout(struct sock *sk)
expired = retransmits_timed_out(sk, retry_until,
icsk->icsk_user_timeout);
}
+ tcp_fastopen_active_detect_blackhole(sk, expired);
+
+ if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
+ tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
+ icsk->icsk_retransmits,
+ icsk->icsk_rto, (int)expired);
+
if (expired) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
}
+
return 0;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e4ff25c947c5..f81f969f9c06 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -357,18 +357,12 @@ fail:
}
EXPORT_SYMBOL(udp_lib_get_port);
-static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
- unsigned int port)
-{
- return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
-}
-
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
- udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
+ ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
- udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
+ ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -445,7 +439,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
struct sk_buff *skb)
{
struct sock *sk, *result;
- int score, badness, matches = 0, reuseport = 0;
+ int score, badness;
u32 hash = 0;
result = NULL;
@@ -454,23 +448,16 @@ static struct sock *udp4_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif, exact_dif);
if (score > badness) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
if (result)
return result;
- matches = 1;
}
badness = score;
result = sk;
- } else if (score == badness && reuseport) {
- matches++;
- if (reciprocal_scale(hash, matches) == 0)
- result = sk;
- hash = next_pseudo_random32(hash);
}
}
return result;
@@ -488,11 +475,11 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
bool exact_dif = udp_lib_exact_dif_match(net, skb);
- int score, badness, matches = 0, reuseport = 0;
+ int score, badness;
u32 hash = 0;
if (hslot->count > 10) {
- hash2 = udp4_portaddr_hash(net, daddr, hnum);
+ hash2 = ipv4_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
@@ -503,7 +490,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
exact_dif, hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
- hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
/* avoid searching the same slot again. */
if (unlikely(slot2 == old_slot2))
@@ -526,23 +513,16 @@ begin:
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif, exact_dif);
if (score > badness) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
if (result)
return result;
- matches = 1;
}
result = sk;
badness = score;
- } else if (score == badness && reuseport) {
- matches++;
- if (reciprocal_scale(hash, matches) == 0)
- result = sk;
- hash = next_pseudo_random32(hash);
}
}
return result;
@@ -997,8 +977,21 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
- } else if (!ipc.oif)
+ } else if (!ipc.oif) {
ipc.oif = inet->uc_index;
+ } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
+ /* oif is set, packet is to local broadcast and
+ * and uc_index is set. oif is most likely set
+ * by sk_bound_dev_if. If uc_index != oif check if the
+ * oif is an L3 master and uc_index is an L3 slave.
+ * If so, we want to allow the send using the uc_index.
+ */
+ if (ipc.oif != inet->uc_index &&
+ ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
+ inet->uc_index)) {
+ ipc.oif = inet->uc_index;
+ }
+ }
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
@@ -1775,7 +1768,7 @@ EXPORT_SYMBOL(udp_lib_rehash);
static void udp_v4_rehash(struct sock *sk)
{
- u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+ u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
@@ -1966,9 +1959,9 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct sk_buff *nskb;
if (use_hash2) {
- hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+ hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udptable->mask;
- hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
+ hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -2200,7 +2193,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
int dif, int sdif)
{
unsigned short hnum = ntohs(loc_port);
- unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+ unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
@@ -2502,16 +2495,14 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
mask |= POLLIN | POLLRDNORM;
- sock_rps_record_flow(sk);
-
/* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
@@ -2736,7 +2727,6 @@ int udp4_seq_show(struct seq_file *seq, void *v)
}
static const struct file_operations udp_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 01801b77bd0d..ea6e6e7df0ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out;
}
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+ goto out;
+
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 59f10fe9782e..f96614e9b9a5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -75,7 +75,6 @@ static struct inet_protosw udplite4_protosw = {
#ifdef CONFIG_PROC_FS
static const struct file_operations udplite_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e6265e2c274e..63faeee989a9 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -62,7 +62,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
- top_iph->ttl = ip4_dst_hoplimit(dst->child);
+ top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
@@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
+ eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
@@ -105,18 +106,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
-
}
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb->network_header = skb->network_header - x->props.header_len;
+ if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header +
sizeof(struct iphdr);
- }
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f49bd7897e95..e1846b97ee69 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -186,7 +186,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_work(struct work_struct *w);
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
+ bool send_na);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(struct timer_list *t);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3438,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
} else if (event == NETDEV_CHANGE) {
if (!addrconf_link_ready(dev)) {
/* device is still not ready. */
+ rt6_sync_down_dev(dev, event);
break;
}
@@ -3449,6 +3451,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
* multicast snooping switches
*/
ipv6_mc_up(idev);
+ rt6_sync_up(dev, RTNH_F_LINKDOWN);
break;
}
idev->if_flags |= IF_READY;
@@ -3484,6 +3487,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (run_pending)
addrconf_dad_run(idev);
+ /* Device has an address by now */
+ rt6_sync_up(dev, RTNH_F_DEAD);
+
/*
* If the MTU changed during the interface down,
* when the interface up, the changed MTU must be
@@ -3577,6 +3583,7 @@ static bool addr_is_local(const struct in6_addr *addr)
static int addrconf_ifdown(struct net_device *dev, int how)
{
+ unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
struct net *net = dev_net(dev);
struct inet6_dev *idev;
struct inet6_ifaddr *ifa, *tmp;
@@ -3586,8 +3593,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
ASSERT_RTNL();
- rt6_ifdown(net, dev);
- neigh_ifdown(&nd_tbl, dev);
+ rt6_disable_ip(dev, event);
idev = __in6_dev_get(dev);
if (!idev)
@@ -3833,12 +3839,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
idev->cnf.accept_dad < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
+ bool send_na = false;
+
+ if (ifp->flags & IFA_F_TENTATIVE &&
+ !(ifp->flags & IFA_F_OPTIMISTIC))
+ send_na = true;
bump_id = ifp->flags & IFA_F_TENTATIVE;
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
- addrconf_dad_completed(ifp, bump_id);
+ addrconf_dad_completed(ifp, bump_id, send_na);
return;
}
@@ -3967,16 +3978,21 @@ static void addrconf_dad_work(struct work_struct *w)
}
if (ifp->dad_probes == 0) {
+ bool send_na = false;
+
/*
* DAD was successful
*/
+ if (ifp->flags & IFA_F_TENTATIVE &&
+ !(ifp->flags & IFA_F_OPTIMISTIC))
+ send_na = true;
bump_id = ifp->flags & IFA_F_TENTATIVE;
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
write_unlock_bh(&idev->lock);
- addrconf_dad_completed(ifp, bump_id);
+ addrconf_dad_completed(ifp, bump_id, send_na);
goto out;
}
@@ -4014,7 +4030,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
return true;
}
-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
+ bool send_na)
{
struct net_device *dev = ifp->idev->dev;
struct in6_addr lladdr;
@@ -4046,6 +4063,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
if (send_mld)
ipv6_mc_dad_complete(ifp->idev);
+ /* send unsolicited NA if enabled */
+ if (send_na &&
+ (ifp->idev->cnf.ndisc_notify ||
+ dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+ ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
+ /*router=*/ !!ifp->idev->cnf.forwarding,
+ /*solicited=*/ false, /*override=*/ true,
+ /*inc_opt=*/ true);
+ }
+
if (send_rs) {
/*
* If a host as already performed a random delay
@@ -4209,7 +4236,6 @@ static int if6_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations if6_fops = {
- .owner = THIS_MODULE,
.open = if6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -4352,9 +4378,11 @@ restart:
spin_lock(&ifpub->lock);
ifpub->regen_count = 0;
spin_unlock(&ifpub->lock);
+ rcu_read_unlock_bh();
ipv6_create_tempaddr(ifpub, ifp, true);
in6_ifa_put(ifpub);
in6_ifa_put(ifp);
+ rcu_read_lock_bh();
goto restart;
}
} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
@@ -6595,27 +6623,45 @@ int __init addrconf_init(void)
rtnl_af_register(&inet6_ops);
- err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
- 0);
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
+ NULL, inet6_dump_ifinfo, 0);
if (err < 0)
goto errout;
- /* Only the first call to __rtnl_register can fail */
- __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
- __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
- __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
- inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
- __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
- inet6_dump_ifmcaddr, 0);
- __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
- inet6_dump_ifacaddr, 0);
- __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
- inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
-
- ipv6_addr_label_rtnl_register();
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
+ inet6_rtm_newaddr, NULL, 0);
+ if (err < 0)
+ goto errout;
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
+ inet6_rtm_deladdr, NULL, 0);
+ if (err < 0)
+ goto errout;
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
+ inet6_rtm_getaddr, inet6_dump_ifaddr,
+ RTNL_FLAG_DOIT_UNLOCKED);
+ if (err < 0)
+ goto errout;
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
+ NULL, inet6_dump_ifmcaddr, 0);
+ if (err < 0)
+ goto errout;
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
+ NULL, inet6_dump_ifacaddr, 0);
+ if (err < 0)
+ goto errout;
+ err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
+ inet6_netconf_get_devconf,
+ inet6_netconf_dump_devconf,
+ RTNL_FLAG_DOIT_UNLOCKED);
+ if (err < 0)
+ goto errout;
+ err = ipv6_addr_label_rtnl_register();
+ if (err < 0)
+ goto errout;
return 0;
errout:
+ rtnl_unregister_all(PF_INET6);
rtnl_af_unregister(&inet6_ops);
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 00e1f8ee08f8..1d6ced37ad71 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -547,13 +547,22 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
return err;
}
-void __init ipv6_addr_label_rtnl_register(void)
+int __init ipv6_addr_label_rtnl_register(void)
{
- __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel,
- NULL, RTNL_FLAG_DOIT_UNLOCKED);
- __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel,
- NULL, RTNL_FLAG_DOIT_UNLOCKED);
- __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get,
- ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
-}
+ int ret;
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDRLABEL,
+ ip6addrlbl_newdel,
+ NULL, RTNL_FLAG_DOIT_UNLOCKED);
+ if (ret < 0)
+ return ret;
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDRLABEL,
+ ip6addrlbl_newdel,
+ NULL, RTNL_FLAG_DOIT_UNLOCKED);
+ if (ret < 0)
+ return ret;
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDRLABEL,
+ ip6addrlbl_get,
+ ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
+ return ret;
+}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c9441ca45399..416917719a6f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct net *net = sock_net(sk);
__be32 v4addr = 0;
unsigned short snum;
+ bool saved_ipv6only;
int addr_type = 0;
int err = 0;
@@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
+ saved_ipv6only = sk->sk_ipv6only;
+ if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED)
+ sk->sk_ipv6only = 1;
+
/* Make sure we are allowed to bind here. */
if ((snum || !inet->bind_address_no_port) &&
sk->sk_prot->get_port(sk, snum)) {
+ sk->sk_ipv6only = saved_ipv6only;
inet_reset_saddr(sk);
err = -EADDRINUSE;
goto out;
}
- if (addr_type != IPV6_ADDR_ANY) {
+ if (addr_type != IPV6_ADDR_ANY)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
- if (addr_type != IPV6_ADDR_MAPPED)
- sk->sk_ipv6only = 1;
- }
if (snum)
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->inet_sport = htons(inet->inet_num);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0bbab8a4b5d8..8e085cc05aeb 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -533,7 +533,6 @@ static int ac6_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations ac6_seq_fops = {
- .owner = THIS_MODULE,
.open = ac6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f918713006..fbf08ce3f5ab 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -221,8 +221,7 @@ ipv4_connected:
if (__ipv6_addr_needs_scope_id(addr_type)) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != usin->sin6_scope_id) {
+ if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) {
err = -EINVAL;
goto out;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a902ff8f59be..97513f35bcc5 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
- struct dst_entry *dst = skb_dst(skb);
- struct xfrm_state *x = dst->xfrm;
+ struct xfrm_state *x;
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME))
+ x = skb->sp->xvec[skb->sp->len - 1];
+ else
+ x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
- xfrm_output_resume(skb, err);
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ if (err) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, skb->data - skb_mac_header(skb));
+ secpath_reset(skb);
+ xfrm_dev_resume(skb);
+ } else {
+ xfrm_output_resume(skb, err);
+ }
}
/* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- u32 mask = 0;
err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error;
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(aead_name, 0, mask);
+ aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
- u32 mask = 0;
err = -EINVAL;
if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
}
- if (x->xso.offload_handle)
- mask |= CRYPTO_ALG_ASYNC;
-
- aead = crypto_alloc_aead(authenc_name, 0, mask);
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
@@ -890,13 +900,12 @@ static int esp6_init_state(struct xfrm_state *x)
x->props.header_len += IPV4_BEET_PHMAXLEN +
(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
break;
+ default:
case XFRM_MODE_TRANSPORT:
break;
case XFRM_MODE_TUNNEL:
x->props.header_len += sizeof(struct ipv6hdr);
break;
- default:
- goto error;
}
align = ALIGN(crypto_aead_blocksize(aead), 4);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 333a478aa161..3fd1ec775dc2 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
int nhoff;
int err;
- skb_pull(skb, offset);
+ if (!pskb_pull(skb, offset))
+ return NULL;
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out;
@@ -135,75 +136,39 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- __u32 seq;
- int err = 0;
- struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
- struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
- goto out;
+ return ERR_PTR(-EINVAL);
- seq = xo->seq.low;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+ return ERR_PTR(-EINVAL);
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
- goto out;
+ return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
- goto out;
+ return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP))
+ if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+ (x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- segs = x->outer_mode->gso_segment(x, skb, esp_features);
- if (IS_ERR_OR_NULL(segs))
- goto out;
-
- __skb_pull(skb, skb->data - skb_mac_header(skb));
+ xo->flags |= XFRM_GSO_SEGMENT;
- skb2 = segs;
- do {
- struct sk_buff *nskb = skb2->next;
-
- xo = xfrm_offload(skb2);
- xo->flags |= XFRM_GSO_SEGMENT;
- xo->seq.low = seq;
- xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
- if(!(features & NETIF_F_HW_ESP))
- xo->flags |= CRYPTO_FALLBACK;
-
- x->outer_mode->xmit(x, skb2);
-
- err = x->type_offload->xmit(x, skb2, esp_features);
- if (err) {
- kfree_skb_list(segs);
- return ERR_PTR(err);
- }
-
- if (!skb_is_gso(skb2))
- seq++;
- else
- seq += skb_shinfo(skb2)->gso_segs;
-
- skb_push(skb2, skb2->mac_len);
- skb2 = nskb;
- } while (skb2);
-
-out:
- return segs;
+ return x->outer_mode->gso_segment(x, skb, esp_features);
}
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +187,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{
+ int len;
int err;
int alen;
int blksize;
@@ -230,6 +196,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
+ __u32 seq;
esp.inplace = true;
@@ -265,28 +232,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
return esp.nfrags;
}
+ seq = xo->seq.low;
+
esph = ip_esp_hdr(skb);
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
- esph->seq_no = htonl(xo->seq.low);
- } else {
- int len;
-
- len = skb->len - sizeof(struct ipv6hdr);
- if (len > IPV6_MAXPLEN)
- len = 0;
+ esph->seq_no = htonl(seq);
- ipv6_hdr(skb)->payload_len = htons(len);
+ if (!skb_is_gso(skb))
+ xo->seq.low++;
+ else
+ xo->seq.low += skb_shinfo(skb)->gso_segs;
}
+ esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+ len = skb->len - sizeof(struct ipv6hdr);
+ if (len > IPV6_MAXPLEN)
+ len = 0;
+
+ ipv6_hdr(skb)->payload_len = htons(len);
+
if (hw_offload)
return 0;
- esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
err = esp6_output_tail(x, skb, &esp);
if (err)
return err;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 83bd75713535..bc68eb661970 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
sr_phdr->segments[0] = **addr_p;
*addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
+ if (sr_ihdr->hdrlen > hops * 2) {
+ int tlvs_offset, tlvs_length;
+
+ tlvs_offset = (1 + hops * 2) << 3;
+ tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
+ memcpy((char *)sr_phdr + tlvs_offset,
+ (char *)sr_ihdr + tlvs_offset, tlvs_length);
+ }
+
#ifdef CONFIG_IPV6_SEG6_HMAC
if (sr_has_hmac(sr_phdr)) {
struct net *net = NULL;
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 6eb5e68f112a..44c39c5f0638 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -512,9 +512,7 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct ila_map *ila;
int ret;
- ret = rhashtable_walk_start(rhiter);
- if (ret && ret != -EAGAIN)
- goto done;
+ rhashtable_walk_start(rhiter);
for (;;) {
ila = rhashtable_walk_next(rhiter);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b01858f5deb1..2febe26de6a1 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -125,6 +125,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
}
/* called with rcu_read_lock() */
+static struct sock *inet6_lhash2_lookup(struct net *net,
+ struct inet_listen_hashbucket *ilb2,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport, const struct in6_addr *daddr,
+ const unsigned short hnum, const int dif, const int sdif)
+{
+ bool exact_dif = inet6_exact_dif_match(net, skb);
+ struct inet_connection_sock *icsk;
+ struct sock *sk, *result = NULL;
+ int score, hiscore = 0;
+ u32 phash = 0;
+
+ inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+ sk = (struct sock *)icsk;
+ score = compute_score(sk, net, hnum, daddr, dif, sdif,
+ exact_dif);
+ if (score > hiscore) {
+ if (sk->sk_reuseport) {
+ phash = inet6_ehashfn(net, daddr, hnum,
+ saddr, sport);
+ result = reuseport_select_sock(sk, phash,
+ skb, doff);
+ if (result)
+ return result;
+ }
+ result = sk;
+ hiscore = score;
+ }
+ }
+
+ return result;
+}
+
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -134,31 +168,56 @@ struct sock *inet6_lookup_listener(struct net *net,
{
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
- int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet6_exact_dif_match(net, skb);
+ struct inet_listen_hashbucket *ilb2;
struct sock *sk, *result = NULL;
+ int score, hiscore = 0;
+ unsigned int hash2;
u32 phash = 0;
+ if (ilb->count <= 10 || !hashinfo->lhash2)
+ goto port_lookup;
+
+ /* Too many sk in the ilb bucket (which is hashed by port alone).
+ * Try lhash2 (which is hashed by port and addr) instead.
+ */
+
+ hash2 = ipv6_portaddr_hash(net, daddr, hnum);
+ ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+ if (ilb2->count > ilb->count)
+ goto port_lookup;
+
+ result = inet6_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+ if (result)
+ return result;
+
+ /* Lookup lhash2 with in6addr_any */
+
+ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
+ ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+ if (ilb2->count > ilb->count)
+ goto port_lookup;
+
+ return inet6_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+
+port_lookup:
sk_for_each(sk, &ilb->head) {
score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
if (score > hiscore) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
phash = inet6_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, phash,
skb, doff);
if (result)
return result;
- matches = 1;
}
result = sk;
hiscore = score;
- } else if (score == hiscore && reuseport) {
- matches++;
- if (reciprocal_scale(phash, matches) == 0)
- result = sk;
- phash = next_pseudo_random32(phash);
}
}
return result;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f5285f4e1d08..92b8d8c75eed 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -107,16 +107,13 @@ enum {
void fib6_update_sernum(struct rt6_info *rt)
{
- struct fib6_table *table = rt->rt6i_table;
struct net *net = dev_net(rt->dst.dev);
struct fib6_node *fn;
- spin_lock_bh(&table->tb6_lock);
fn = rcu_dereference_protected(rt->rt6i_node,
- lockdep_is_held(&table->tb6_lock));
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
if (fn)
fn->fn_sernum = fib6_new_sernum(net);
- spin_unlock_bh(&table->tb6_lock);
}
/*
@@ -640,6 +637,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
if (!(fn->fn_flags & RTN_RTINFO)) {
RCU_INIT_POINTER(fn->leaf, NULL);
rt6_release(leaf);
+ /* remove null_entry in the root node */
+ } else if (fn->fn_flags & RTN_TL_ROOT &&
+ rcu_access_pointer(fn->leaf) ==
+ net->ipv6.ip6_null_entry) {
+ RCU_INIT_POINTER(fn->leaf, NULL);
}
return fn;
@@ -799,12 +801,6 @@ insert_above:
return ln;
}
-static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
-{
- return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
- RTF_GATEWAY;
-}
-
static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
{
int i;
@@ -893,7 +889,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
ins = &fn->leaf;
for (iter = leaf; iter;
- iter = rcu_dereference_protected(iter->dst.rt6_next,
+ iter = rcu_dereference_protected(iter->rt6_next,
lockdep_is_held(&rt->rt6i_table->tb6_lock))) {
/*
* Search for duplicates
@@ -950,7 +946,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
break;
next_iter:
- ins = &iter->dst.rt6_next;
+ ins = &iter->rt6_next;
}
if (fallback_ins && !found) {
@@ -979,7 +975,7 @@ next_iter:
&sibling->rt6i_siblings);
break;
}
- sibling = rcu_dereference_protected(sibling->dst.rt6_next,
+ sibling = rcu_dereference_protected(sibling->rt6_next,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
}
/* For each sibling in the list, increment the counter of
@@ -994,6 +990,7 @@ next_iter:
rt6i_nsiblings++;
}
BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
+ rt6_multipath_rebalance(temp_sibling);
}
/*
@@ -1009,7 +1006,7 @@ add:
if (err)
return err;
- rcu_assign_pointer(rt->dst.rt6_next, iter);
+ rcu_assign_pointer(rt->rt6_next, iter);
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
rcu_assign_pointer(*ins, rt);
@@ -1040,7 +1037,7 @@ add:
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
- rt->dst.rt6_next = iter->dst.rt6_next;
+ rt->rt6_next = iter->rt6_next;
rcu_assign_pointer(*ins, rt);
call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
rt, extack);
@@ -1059,14 +1056,14 @@ add:
if (nsiblings) {
/* Replacing an ECMP route, remove all siblings */
- ins = &rt->dst.rt6_next;
+ ins = &rt->rt6_next;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
while (iter) {
if (iter->rt6i_metric > rt->rt6i_metric)
break;
if (rt6_qualify_for_ecmp(iter)) {
- *ins = iter->dst.rt6_next;
+ *ins = iter->rt6_next;
iter->rt6i_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
@@ -1075,7 +1072,7 @@ add:
nsiblings--;
info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
} else {
- ins = &iter->dst.rt6_next;
+ ins = &iter->rt6_next;
}
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1102,8 +1099,8 @@ void fib6_force_start_gc(struct net *net)
jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
}
-static void fib6_update_sernum_upto_root(struct rt6_info *rt,
- int sernum)
+static void __fib6_update_sernum_upto_root(struct rt6_info *rt,
+ int sernum)
{
struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1117,6 +1114,11 @@ static void fib6_update_sernum_upto_root(struct rt6_info *rt,
}
}
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt)
+{
+ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(net));
+}
+
/*
* Add routing information to the routing tree.
* <destination addr>/<source addr>
@@ -1221,8 +1223,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
}
if (!rcu_access_pointer(fn->leaf)) {
- atomic_inc(&rt->rt6i_ref);
- rcu_assign_pointer(fn->leaf, rt);
+ if (fn->fn_flags & RTN_TL_ROOT) {
+ /* put back null_entry for root node */
+ rcu_assign_pointer(fn->leaf,
+ info->nl_net->ipv6.ip6_null_entry);
+ } else {
+ atomic_inc(&rt->rt6i_ref);
+ rcu_assign_pointer(fn->leaf, rt);
+ }
}
fn = sn;
}
@@ -1230,7 +1238,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
err = fib6_add_rt2node(fn, rt, info, mxc, extack);
if (!err) {
- fib6_update_sernum_upto_root(rt, sernum);
+ __fib6_update_sernum_upto_root(rt, sernum);
fib6_start_gc(info->nl_net, rt);
}
@@ -1241,23 +1249,28 @@ out:
* If fib6_add_1 has cleared the old leaf pointer in the
* super-tree leaf node we have to find a new one for it.
*/
- struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf,
- lockdep_is_held(&table->tb6_lock));
- if (pn != fn && pn_leaf == rt) {
- pn_leaf = NULL;
- RCU_INIT_POINTER(pn->leaf, NULL);
- atomic_dec(&rt->rt6i_ref);
- }
- if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
- pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
-#if RT6_DEBUG >= 2
- if (!pn_leaf) {
- WARN_ON(!pn_leaf);
- pn_leaf = info->nl_net->ipv6.ip6_null_entry;
+ if (pn != fn) {
+ struct rt6_info *pn_leaf =
+ rcu_dereference_protected(pn->leaf,
+ lockdep_is_held(&table->tb6_lock));
+ if (pn_leaf == rt) {
+ pn_leaf = NULL;
+ RCU_INIT_POINTER(pn->leaf, NULL);
+ atomic_dec(&rt->rt6i_ref);
}
+ if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+ pn_leaf = fib6_find_prefix(info->nl_net, table,
+ pn);
+#if RT6_DEBUG >= 2
+ if (!pn_leaf) {
+ WARN_ON(!pn_leaf);
+ pn_leaf =
+ info->nl_net->ipv6.ip6_null_entry;
+ }
#endif
- atomic_inc(&pn_leaf->rt6i_ref);
- rcu_assign_pointer(pn->leaf, pn_leaf);
+ atomic_inc(&pn_leaf->rt6i_ref);
+ rcu_assign_pointer(pn->leaf, pn_leaf);
+ }
}
#endif
goto failure;
@@ -1265,13 +1278,17 @@ out:
return err;
failure:
- /* fn->leaf could be NULL if fn is an intermediate node and we
- * failed to add the new route to it in both subtree creation
- * failure and fib6_add_rt2node() failure case.
- * In both cases, fib6_repair_tree() should be called to fix
- * fn->leaf.
+ /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
+ * 1. fn is an intermediate node and we failed to add the new
+ * route to it in both subtree creation failure and fib6_add_rt2node()
+ * failure case.
+ * 2. fn is the root node in the table and we fail to add the first
+ * default route to it.
*/
- if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
+ if (fn &&
+ (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
+ (fn->fn_flags & RTN_TL_ROOT &&
+ !rcu_access_pointer(fn->leaf))))
fib6_repair_tree(info->nl_net, table, fn);
/* Always release dst as dst->__refcnt is guaranteed
* to be taken before entering this function
@@ -1526,6 +1543,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
struct fib6_walker *w;
int iter = 0;
+ /* Set fn->leaf to null_entry for root node. */
+ if (fn->fn_flags & RTN_TL_ROOT) {
+ rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
+ return fn;
+ }
+
for (;;) {
struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
lockdep_is_held(&table->tb6_lock));
@@ -1644,7 +1667,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
WARN_ON_ONCE(rt->rt6i_flags & RTF_CACHE);
/* Unlink it */
- *rtp = rt->dst.rt6_next;
+ *rtp = rt->rt6_next;
rt->rt6i_node = NULL;
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1665,6 +1688,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
sibling->rt6i_nsiblings--;
rt->rt6i_nsiblings = 0;
list_del_init(&rt->rt6i_siblings);
+ rt6_multipath_rebalance(next_sibling);
}
/* Adjust walkers */
@@ -1672,7 +1696,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
FOR_WALKERS(net, w) {
if (w->state == FWS_C && w->leaf == rt) {
RT6_TRACE("walker %p adjusted by delroute\n", w);
- w->leaf = rcu_dereference_protected(rt->dst.rt6_next,
+ w->leaf = rcu_dereference_protected(rt->rt6_next,
lockdep_is_held(&table->tb6_lock));
if (!w->leaf)
w->state = FWS_U;
@@ -1680,10 +1704,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
}
read_unlock(&net->ipv6.fib6_walker_lock);
- /* If it was last route, expunge its radix tree node */
+ /* If it was last route, call fib6_repair_tree() to:
+ * 1. For root node, put back null_entry as how the table was created.
+ * 2. For other nodes, expunge its radix tree node.
+ */
if (!rcu_access_pointer(fn->leaf)) {
- fn->fn_flags &= ~RTN_RTINFO;
- net->ipv6.rt6_stats->fib_route_nodes--;
+ if (!(fn->fn_flags & RTN_TL_ROOT)) {
+ fn->fn_flags &= ~RTN_RTINFO;
+ net->ipv6.rt6_stats->fib_route_nodes--;
+ }
fn = fib6_repair_tree(net, table, fn);
}
@@ -1731,7 +1760,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
fib6_del_route(table, fn, rtp, info);
return 0;
}
- rtp_next = &cur->dst.rt6_next;
+ rtp_next = &cur->rt6_next;
}
return -ENOENT;
}
@@ -1887,7 +1916,7 @@ static int fib6_clean_node(struct fib6_walker *w)
for_each_fib6_walker_rt(w) {
res = c->func(rt, c->arg);
- if (res < 0) {
+ if (res == -1) {
w->leaf = rt;
res = fib6_del(rt, &info);
if (res) {
@@ -1900,6 +1929,12 @@ static int fib6_clean_node(struct fib6_walker *w)
continue;
}
return 0;
+ } else if (res == -2) {
+ if (WARN_ON(!rt->rt6i_nsiblings))
+ continue;
+ rt = list_last_entry(&rt->rt6i_siblings,
+ struct rt6_info, rt6i_siblings);
+ continue;
}
WARN_ON(res != 0);
}
@@ -1911,7 +1946,8 @@ static int fib6_clean_node(struct fib6_walker *w)
* Convenient frontend to tree walker.
*
* func is called on each route.
- * It may return -1 -> delete this route.
+ * It may return -2 -> skip multipath route.
+ * -1 -> delete this route.
* 0 -> continue walking
*/
@@ -2103,7 +2139,6 @@ static void fib6_net_exit(struct net *net)
{
unsigned int i;
- rt6_ifdown(net, NULL);
del_timer_sync(&net->ipv6.ip6_fib_timer);
for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
@@ -2142,8 +2177,8 @@ int __init fib6_init(void)
if (ret)
goto out_kmem_cache_create;
- ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
- 0);
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
+ inet6_dump_fib, 0);
if (ret)
goto out_unregister_subsys;
@@ -2208,7 +2243,7 @@ static int ipv6_route_yield(struct fib6_walker *w)
do {
iter->w.leaf = rcu_dereference_protected(
- iter->w.leaf->dst.rt6_next,
+ iter->w.leaf->rt6_next,
lockdep_is_held(&iter->tbl->tb6_lock));
iter->skip--;
if (!iter->skip && iter->w.leaf)
@@ -2274,7 +2309,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!v)
goto iter_table;
- n = rcu_dereference_bh(((struct rt6_info *)v)->dst.rt6_next);
+ n = rcu_dereference_bh(((struct rt6_info *)v)->rt6_next);
if (n) {
++*pos;
return n;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7f59c8fabeeb..3dab664ff503 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -836,7 +836,6 @@ static int ip6fl_seq_release(struct inode *inode, struct file *file)
}
static const struct file_operations ip6fl_seq_fops = {
- .owner = THIS_MODULE,
.open = ip6fl_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 772695960890..05f070e123e4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -55,6 +55,8 @@
#include <net/ip6_route.h>
#include <net/ip6_tunnel.h>
#include <net/gre.h>
+#include <net/erspan.h>
+#include <net/dst_metadata.h>
static bool log_ecn_error = true;
@@ -68,11 +70,13 @@ static unsigned int ip6gre_net_id __read_mostly;
struct ip6gre_net {
struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
+ struct ip6_tnl __rcu *collect_md_tun;
struct net_device *fb_tunnel_dev;
};
static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
static int ip6gre_tunnel_init(struct net_device *dev);
static void ip6gre_tunnel_setup(struct net_device *dev);
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -121,7 +125,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
unsigned int h1 = HASH_KEY(key);
struct ip6_tnl *t, *cand = NULL;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
- int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+ int dev_type = (gre_proto == htons(ETH_P_TEB) ||
+ gre_proto == htons(ETH_P_ERSPAN)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
@@ -226,6 +231,10 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
if (cand)
return cand;
+ t = rcu_dereference(ign->collect_md_tun);
+ if (t && t->dev->flags & IFF_UP)
+ return t;
+
dev = ign->fb_tunnel_dev;
if (dev->flags & IFF_UP)
return netdev_priv(dev);
@@ -261,6 +270,9 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun, t);
+
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
@@ -270,6 +282,9 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ign->collect_md_tun, NULL);
+
for (tp = ip6gre_bucket(ign, t);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
@@ -337,11 +352,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
nt->dev = dev;
nt->net = dev_net(dev);
- ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
goto failed_free;
+ ip6gre_tnl_link_config(nt, 1);
+
/* Can use a lockless transmit, unless we generate output sequences */
if (!(nt->parms.o_flags & TUNNEL_SEQ))
dev->features |= NETIF_F_LLTX;
@@ -460,7 +476,101 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
tpi->proto);
if (tunnel) {
- ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+ if (tunnel->parms.collect_md) {
+ struct metadata_dst *tun_dst;
+ __be64 tun_id;
+ __be16 flags;
+
+ flags = tpi->flags;
+ tun_id = key32_to_tunnel_id(tpi->key);
+
+ tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
+ if (!tun_dst)
+ return PACKET_REJECT;
+
+ ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+ } else {
+ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+ }
+
+ return PACKET_RCVD;
+ }
+
+ return PACKET_REJECT;
+}
+
+static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+ struct tnl_ptk_info *tpi)
+{
+ struct erspan_base_hdr *ershdr;
+ struct erspan_metadata *pkt_md;
+ const struct ipv6hdr *ipv6h;
+ struct ip6_tnl *tunnel;
+ u8 ver;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
+ return PACKET_REJECT;
+
+ ipv6h = ipv6_hdr(skb);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ ver = ershdr->ver;
+ tpi->key = cpu_to_be32(get_session_id(ershdr));
+
+ tunnel = ip6gre_tunnel_lookup(skb->dev,
+ &ipv6h->saddr, &ipv6h->daddr, tpi->key,
+ tpi->proto);
+ if (tunnel) {
+ int len = erspan_hdr_len(ver);
+
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return PACKET_REJECT;
+
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
+ if (__iptunnel_pull_header(skb, len,
+ htons(ETH_P_TEB),
+ false, false) < 0)
+ return PACKET_REJECT;
+
+ if (tunnel->parms.collect_md) {
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *info;
+ struct erspan_metadata *md;
+ __be64 tun_id;
+ __be16 flags;
+
+ tpi->flags |= TUNNEL_KEY;
+ flags = tpi->flags;
+ tun_id = key32_to_tunnel_id(tpi->key);
+
+ tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
+ sizeof(*md));
+ if (!tun_dst)
+ return PACKET_REJECT;
+
+ info = &tun_dst->u.tun_info;
+ md = ip_tunnel_info_opts(info);
+
+ memcpy(md, pkt_md, sizeof(*md));
+ md->version = ver;
+ info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+ info->options_len = sizeof(*md);
+
+ ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+
+ } else {
+ tunnel->parms.erspan_ver = ver;
+
+ if (ver == 1) {
+ tunnel->parms.index = ntohl(pkt_md->u.index);
+ } else {
+ tunnel->parms.dir = pkt_md->u.md2.dir;
+ tunnel->parms.hwid = get_hwid(&pkt_md->u.md2);
+ }
+
+ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+ }
return PACKET_RCVD;
}
@@ -481,9 +591,17 @@ static int gre_rcv(struct sk_buff *skb)
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
goto drop;
+ if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+ tpi.proto == htons(ETH_P_ERSPAN2))) {
+ if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
+ return 0;
+ goto out;
+ }
+
if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
return 0;
+out:
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
@@ -496,6 +614,78 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
+static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi6 *fl6, __u8 *dsfield,
+ int *encap_limit)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ *encap_limit = t->parms.encap_limit;
+
+ memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ *dsfield = ipv4_get_dsfield(iph);
+ else
+ *dsfield = ip6_tclass(t->parms.flowinfo);
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6->flowi6_mark = skb->mark;
+ else
+ fl6->flowi6_mark = t->parms.fwmark;
+
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+}
+
+static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi6 *fl6, __u8 *dsfield,
+ int *encap_limit)
+{
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ip6_tnl *t = netdev_priv(dev);
+ __u16 offset;
+
+ offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+
+ if (offset > 0) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+
+ tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+ if (tel->encap_limit == 0) {
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_HDR_FIELD, offset + 2);
+ return -1;
+ }
+ *encap_limit = tel->encap_limit - 1;
+ } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+ *encap_limit = t->parms.encap_limit;
+ }
+
+ memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ *dsfield = ipv6_get_dsfield(ipv6h);
+ else
+ *dsfield = ip6_tclass(t->parms.flowinfo);
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+ fl6->flowlabel |= ip6_flowlabel(ipv6h);
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6->flowi6_mark = skb->mark;
+ else
+ fl6->flowi6_mark = t->parms.fwmark;
+
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+ return 0;
+}
+
static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
struct net_device *dev, __u8 dsfield,
struct flowi6 *fl6, int encap_limit,
@@ -517,8 +707,38 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
/* Push GRE header. */
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
- gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
- protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+
+ if (tunnel->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+ __be16 flags;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info ||
+ !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+ return -EINVAL;
+
+ key = &tun_info->key;
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = IPPROTO_GRE;
+ fl6->daddr = key->u.ipv6.dst;
+ fl6->flowlabel = key->label;
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+ dsfield = key->tos;
+ flags = key->tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+ tunnel->tun_hlen = gre_calc_hlen(flags);
+
+ gre_build_header(skb, tunnel->tun_hlen,
+ flags, protocol,
+ tunnel_id_to_key32(tun_info->key.tun_id), 0);
+
+ } else {
+ gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ protocol, tunnel->parms.o_key,
+ htonl(tunnel->o_seqno));
+ }
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
@@ -527,30 +747,17 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
int encap_limit = -1;
struct flowi6 fl6;
- __u8 dsfield;
+ __u8 dsfield = 0;
__u32 mtu;
int err;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
-
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- dsfield = ipv4_get_dsfield(iph);
- else
- dsfield = ip6_tclass(t->parms.flowinfo);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
- else
- fl6.flowi6_mark = t->parms.fwmark;
-
- fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ if (!t->parms.collect_md)
+ prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+ &dsfield, &encap_limit);
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
if (err)
@@ -574,46 +781,17 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
struct ip6_tnl *t = netdev_priv(dev);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int encap_limit = -1;
- __u16 offset;
struct flowi6 fl6;
- __u8 dsfield;
+ __u8 dsfield = 0;
__u32 mtu;
int err;
if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
return -1;
- offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
- /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
- ipv6h = ipv6_hdr(skb);
-
- if (offset > 0) {
- struct ipv6_tlv_tnl_enc_lim *tel;
- tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
- if (tel->encap_limit == 0) {
- icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_HDR_FIELD, offset + 2);
- return -1;
- }
- encap_limit = tel->encap_limit - 1;
- } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
-
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- dsfield = ipv6_get_dsfield(ipv6h);
- else
- dsfield = ip6_tclass(t->parms.flowinfo);
-
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- fl6.flowlabel |= ip6_flowlabel(ipv6h);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
- else
- fl6.flowi6_mark = t->parms.fwmark;
-
- fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ if (!t->parms.collect_md &&
+ prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
+ return -1;
if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
return -1;
@@ -660,7 +838,8 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ if (!t->parms.collect_md)
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
if (err)
@@ -705,6 +884,137 @@ tx_err:
return NETDEV_TX_OK;
}
+static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device_stats *stats;
+ bool truncate = false;
+ int encap_limit = -1;
+ __u8 dsfield = false;
+ struct flowi6 fl6;
+ int err = -EINVAL;
+ __u32 mtu;
+
+ if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
+ goto tx_err;
+
+ if (gre_handle_offloads(skb, false))
+ goto tx_err;
+
+ if (skb->len > dev->mtu + dev->hard_header_len) {
+ pskb_trim(skb, dev->mtu + dev->hard_header_len);
+ truncate = true;
+ }
+
+ t->parms.o_flags &= ~TUNNEL_KEY;
+ IPCB(skb)->flags = 0;
+
+ /* For collect_md mode, derive fl6 from the tunnel key,
+ * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
+ */
+ if (t->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+ struct erspan_metadata *md;
+ __be32 tun_id;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info ||
+ !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+ return -EINVAL;
+
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_GRE;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.flowlabel = key->label;
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+ dsfield = key->tos;
+ md = ip_tunnel_info_opts(tun_info);
+ if (!md)
+ goto tx_err;
+
+ tun_id = tunnel_id_to_key32(key->tun_id);
+ if (md->version == 1) {
+ erspan_build_header(skb,
+ ntohl(tun_id),
+ ntohl(md->u.index), truncate,
+ false);
+ } else if (md->version == 2) {
+ erspan_build_header_v2(skb,
+ ntohl(tun_id),
+ md->u.md2.dir,
+ get_hwid(&md->u.md2),
+ truncate, false);
+ }
+ } else {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+ &dsfield, &encap_limit);
+ break;
+ case htons(ETH_P_IPV6):
+ if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+ goto tx_err;
+ if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
+ &dsfield, &encap_limit))
+ goto tx_err;
+ break;
+ default:
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ break;
+ }
+
+ if (t->parms.erspan_ver == 1)
+ erspan_build_header(skb, ntohl(t->parms.o_key),
+ t->parms.index,
+ truncate, false);
+ else
+ erspan_build_header_v2(skb, ntohl(t->parms.o_key),
+ t->parms.dir,
+ t->parms.hwid,
+ truncate, false);
+ fl6.daddr = t->parms.raddr;
+ }
+
+ /* Push GRE header. */
+ gre_build_header(skb, 8, TUNNEL_SEQ,
+ htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+
+ /* TooBig packet may have updated dst->dev's mtu */
+ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
+ err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+ NEXTHDR_GRE);
+ if (err != 0) {
+ /* XXX: send ICMP error even if DF is not set. */
+ if (err == -EMSGSIZE) {
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_FRAG_NEEDED, htonl(mtu));
+ else
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
+
+ goto tx_err;
+ }
+ return NETDEV_TX_OK;
+
+tx_err:
+ stats = &t->dev->stats;
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
{
struct net_device *dev = t->dev;
@@ -1078,6 +1388,10 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
+ if (tunnel->parms.collect_md) {
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ netif_keep_dst(dev);
+ }
ip6gre_tnl_init_features(dev);
return 0;
@@ -1094,6 +1408,9 @@ static int ip6gre_tunnel_init(struct net_device *dev)
tunnel = netdev_priv(dev);
+ if (tunnel->parms.collect_md)
+ return 0;
+
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
@@ -1116,7 +1433,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
dev_hold(dev);
}
-
static struct inet6_protocol ip6gre_protocol __read_mostly = {
.handler = gre_rcv,
.err_handler = ip6gre_err,
@@ -1131,7 +1447,8 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &ip6gre_link_ops ||
- dev->rtnl_link_ops == &ip6gre_tap_ops)
+ dev->rtnl_link_ops == &ip6gre_tap_ops ||
+ dev->rtnl_link_ops == &ip6erspan_tap_ops)
unregister_netdevice_queue(dev, head);
for (prio = 0; prio < 4; prio++) {
@@ -1253,6 +1570,70 @@ out:
return ip6gre_tunnel_validate(tb, data, extack);
}
+static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ __be16 flags = 0;
+ int ret, ver = 0;
+
+ if (!data)
+ return 0;
+
+ ret = ip6gre_tap_validate(tb, data, extack);
+ if (ret)
+ return ret;
+
+ /* ERSPAN should only have GRE sequence and key flag */
+ if (data[IFLA_GRE_OFLAGS])
+ flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+ if (data[IFLA_GRE_IFLAGS])
+ flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+ if (!data[IFLA_GRE_COLLECT_METADATA] &&
+ flags != (GRE_SEQ | GRE_KEY))
+ return -EINVAL;
+
+ /* ERSPAN Session ID only has 10-bit. Since we reuse
+ * 32-bit key field as ID, check it's range.
+ */
+ if (data[IFLA_GRE_IKEY] &&
+ (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
+ return -EINVAL;
+
+ if (data[IFLA_GRE_OKEY] &&
+ (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
+ return -EINVAL;
+
+ if (data[IFLA_GRE_ERSPAN_VER]) {
+ ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+ if (ver != 1 && ver != 2)
+ return -EINVAL;
+ }
+
+ if (ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX]) {
+ u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+
+ if (index & ~INDEX_MASK)
+ return -EINVAL;
+ }
+ } else if (ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR]) {
+ u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+
+ if (dir & ~(DIR_MASK >> DIR_OFFSET))
+ return -EINVAL;
+ }
+
+ if (data[IFLA_GRE_ERSPAN_HWID]) {
+ u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+
+ if (hwid & ~(HWID_MASK >> HWID_OFFSET))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
@@ -1299,11 +1680,26 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_FWMARK])
parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
+
+ if (data[IFLA_GRE_COLLECT_METADATA])
+ parms->collect_md = true;
+
+ if (data[IFLA_GRE_ERSPAN_VER])
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+ if (parms->erspan_ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX])
+ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ } else if (parms->erspan_ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR])
+ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+ if (data[IFLA_GRE_ERSPAN_HWID])
+ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+ }
}
static int ip6gre_tap_init(struct net_device *dev)
{
- struct ip6_tnl *tunnel;
int ret;
ret = ip6gre_tunnel_init_common(dev);
@@ -1312,10 +1708,6 @@ static int ip6gre_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tunnel = netdev_priv(dev);
-
- ip6gre_tnl_link_config(tunnel, 1);
-
return 0;
}
@@ -1330,6 +1722,59 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_get_iflink = ip6_tnl_get_iflink,
};
+static int ip6erspan_tap_init(struct net_device *dev)
+{
+ struct ip6_tnl *tunnel;
+ int t_hlen;
+ int ret;
+
+ tunnel = netdev_priv(dev);
+
+ tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
+ strcpy(tunnel->parms.name, dev->name);
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+ if (ret) {
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
+ }
+
+ tunnel->tun_hlen = 8;
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+ erspan_hdr_len(tunnel->parms.erspan_ver);
+ t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+
+ dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ dev->mtu = ETH_DATA_LEN - t_hlen;
+ if (dev->type == ARPHRD_ETHER)
+ dev->mtu -= ETH_HLEN;
+ if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ dev->mtu -= 8;
+
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ tunnel = netdev_priv(dev);
+ ip6gre_tnl_link_config(tunnel, 1);
+
+ return 0;
+}
+
+static const struct net_device_ops ip6erspan_netdev_ops = {
+ .ndo_init = ip6erspan_tap_init,
+ .ndo_uninit = ip6gre_tunnel_uninit,
+ .ndo_start_xmit = ip6erspan_tunnel_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = ip6_tnl_change_mtu,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
+};
+
static void ip6gre_tap_setup(struct net_device *dev)
{
@@ -1400,20 +1845,29 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
ip6gre_netlink_parms(data, &nt->parms);
- if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
- return -EEXIST;
+ if (nt->parms.collect_md) {
+ if (rtnl_dereference(ign->collect_md_tun))
+ return -EEXIST;
+ } else {
+ if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+ return -EEXIST;
+ }
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
nt->dev = dev;
nt->net = dev_net(dev);
- ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
err = register_netdevice(dev);
if (err)
goto out;
+ ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+ if (tb[IFLA_MTU])
+ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
@@ -1500,8 +1954,12 @@ static size_t ip6gre_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_GRE_COLLECT_METADATA */
+ nla_total_size(0) +
/* IFLA_GRE_FWMARK */
nla_total_size(4) +
+ /* IFLA_GRE_ERSPAN_INDEX */
+ nla_total_size(4) +
0;
}
@@ -1523,7 +1981,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
- nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
+ nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
+ nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -1536,6 +1995,24 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
t->encap.flags))
goto nla_put_failure;
+ if (p->collect_md) {
+ if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+ goto nla_put_failure;
+
+ if (p->erspan_ver == 1) {
+ if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+ goto nla_put_failure;
+ } else if (p->erspan_ver == 2) {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+ goto nla_put_failure;
+ }
+
return 0;
nla_put_failure:
@@ -1558,9 +2035,28 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
[IFLA_GRE_FWMARK] = { .type = NLA_U32 },
+ [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
+ [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
+ [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
+ [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
};
+static void ip6erspan_tap_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &ip6erspan_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6gre_dev_free;
+
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_keep_dst(dev);
+}
+
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
.kind = "ip6gre",
.maxtype = IFLA_GRE_MAX,
@@ -1590,6 +2086,20 @@ static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
.get_link_net = ip6_tnl_get_link_net,
};
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
+ .kind = "ip6erspan",
+ .maxtype = IFLA_GRE_MAX,
+ .policy = ip6gre_policy,
+ .priv_size = sizeof(struct ip6_tnl),
+ .setup = ip6erspan_tap_setup,
+ .validate = ip6erspan_tap_validate,
+ .newlink = ip6gre_newlink,
+ .changelink = ip6gre_changelink,
+ .get_size = ip6gre_get_size,
+ .fill_info = ip6gre_fill_info,
+ .get_link_net = ip6_tnl_get_link_net,
+};
+
/*
* And now the modules code and kernel interface.
*/
@@ -1618,9 +2128,15 @@ static int __init ip6gre_init(void)
if (err < 0)
goto tap_ops_failed;
+ err = rtnl_link_register(&ip6erspan_tap_ops);
+ if (err < 0)
+ goto erspan_link_failed;
+
out:
return err;
+erspan_link_failed:
+ rtnl_link_unregister(&ip6gre_tap_ops);
tap_ops_failed:
rtnl_link_unregister(&ip6gre_link_ops);
rtnl_link_failed:
@@ -1634,6 +2150,7 @@ static void __exit ip6gre_fini(void)
{
rtnl_link_unregister(&ip6gre_tap_ops);
rtnl_link_unregister(&ip6gre_link_ops);
+ rtnl_link_unregister(&ip6erspan_tap_ops);
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
unregister_pernet_device(&ip6gre_net_ops);
}
@@ -1645,4 +2162,5 @@ MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
MODULE_ALIAS_RTNL_LINK("ip6gre");
MODULE_ALIAS_RTNL_LINK("ip6gretap");
+MODULE_ALIAS_RTNL_LINK("ip6erspan");
MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f7dd51c42314..997c7f19ad62 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
return ret;
}
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+ /* Policy lookup after SNAT yielded a new policy */
+ if (skb_dst(skb)->xfrm) {
+ IPCB(skb)->flags |= IPSKB_REROUTED;
+ return dst_output(net, sk, skb);
+ }
+#endif
+
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -166,7 +174,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
{
if (!np->autoflowlabel_set)
return ip6_default_np_autolabel(net);
@@ -370,7 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
return dst_output(net, sk, skb);
}
-static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
{
unsigned int mtu;
struct inet6_dev *idev;
@@ -390,6 +398,7 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
return mtu;
}
+EXPORT_SYMBOL_GPL(ip6_dst_mtu_forward);
static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
@@ -1206,16 +1215,18 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
v6_cork->tclass = ipc6->tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
else
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
+ if (mtu < IPV6_MIN_MTU)
+ return -EINVAL;
cork->base.fragsize = mtu;
- if (dst_allfrag(rt->dst.path))
+ if (dst_allfrag(xfrm_dst_path(&rt->dst)))
cork->base.flags |= IPCORK_ALLFRAG;
cork->base.length = 0;
@@ -1733,11 +1744,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
cork.base.flags = 0;
cork.base.addr = 0;
cork.base.opt = NULL;
+ cork.base.dst = NULL;
v6_cork.opt = NULL;
err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
- if (err)
+ if (err) {
+ ip6_cork_release(&cork, &v6_cork);
return ERR_PTR(err);
-
+ }
if (ipc6->dontfrag < 0)
ipc6->dontfrag = inet6_sk(sk)->dontfrag;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 931c38f6ff4a..4b15fe928278 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
- skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2,
- rel_info);
+ skb_dst_update_pmtu(skb2, rel_info);
}
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -861,7 +860,7 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
- return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
@@ -979,6 +978,9 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
int ret = 0;
struct net *net = t->net;
+ if (t->parms.collect_md)
+ return 1;
+
if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
(ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
@@ -1074,10 +1076,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
}
- } else if (!(t->parms.flags &
- (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
- /* enable the cache only only if the routing decision does
- * not depend on the current inner header value
+ } else if (t->parms.proto != 0 && !(t->parms.flags &
+ (IP6_TNL_F_USE_ORIG_TCLASS |
+ IP6_TNL_F_USE_ORIG_FWMARK))) {
+ /* enable the cache only if neither the outer protocol nor the
+ * routing decision depends on the current inner header value
*/
use_cache = true;
}
@@ -1130,8 +1133,7 @@ route_lookup:
mtu = 576;
}
- if (skb_dst(skb) && !t->parms.collect_md)
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
@@ -1676,11 +1678,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tnl = netdev_priv(dev);
- if (tnl->parms.proto == IPPROTO_IPIP) {
- if (new_mtu < ETH_MIN_MTU)
+ if (tnl->parms.proto == IPPROTO_IPV6) {
+ if (new_mtu < IPV6_MIN_MTU)
return -EINVAL;
} else {
- if (new_mtu < IPV6_MIN_MTU)
+ if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
}
if (new_mtu > 0xFFF8 - dev->hard_header_len)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index dbb74f3c57a7..fa3ae1cb50d3 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
mtu = dst_mtu(dst);
if (!skb->ignore_df && skb->len > mtu) {
- skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
@@ -626,6 +626,7 @@ static void vti6_link_config(struct ip6_tnl *t)
{
struct net_device *dev = t->dev;
struct __ip6_tnl_parm *p = &t->parms;
+ struct net_device *tdev = NULL;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -638,6 +639,25 @@ static void vti6_link_config(struct ip6_tnl *t)
dev->flags |= IFF_POINTOPOINT;
else
dev->flags &= ~IFF_POINTOPOINT;
+
+ if (p->flags & IP6_TNL_F_CAP_XMIT) {
+ int strict = (ipv6_addr_type(&p->raddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+ struct rt6_info *rt = rt6_lookup(t->net,
+ &p->raddr, &p->laddr,
+ p->link, strict);
+
+ if (rt)
+ tdev = rt->dst.dev;
+ ip6_rt_put(rt);
+ }
+
+ if (!tdev && p->link)
+ tdev = __dev_get_by_index(t->net, p->link);
+
+ if (tdev)
+ dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
+ IPV6_MIN_MTU);
}
/**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index a2e1a864eb46..9f6cace9c817 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -477,7 +477,6 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip6mr_vif_fops = {
- .owner = THIS_MODULE,
.open = ip6mr_vif_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -495,6 +494,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
return ERR_PTR(-ENOENT);
it->mrt = mrt;
+ it->cache = NULL;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
@@ -609,7 +609,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip6mr_mfc_fops = {
- .owner = THIS_MODULE,
.open = ipmr_mfc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1425,10 +1424,13 @@ int __init ip6_mr_init(void)
goto add_proto_fail;
}
#endif
- rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
- ip6mr_rtm_dumproute, 0);
- return 0;
+ err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
+ NULL, ip6mr_rtm_dumproute, 0);
+ if (err == 0)
+ return 0;
+
#ifdef CONFIG_IPV6_PIMSM_V2
+ inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
add_proto_fail:
unregister_netdevice_notifier(&ip6_mr_notifier);
#endif
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2d4680e0376f..e8ffb5b5d84e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1336,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_AUTOFLOWLABEL:
- val = np->autoflowlabel;
+ val = ip6_autoflowlabel(sock_net(sk), np);
break;
case IPV6_RECVFRAGSIZE:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 844642682b83..6a5d0e39bb87 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1655,8 +1655,6 @@ static void mld_sendpack(struct sk_buff *skb)
if (err)
goto err_out;
- payload_len = skb->len;
-
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
dst_output);
@@ -2758,7 +2756,6 @@ static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations igmp6_mc_seq_fops = {
- .owner = THIS_MODULE,
.open = igmp6_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2913,7 +2910,6 @@ static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations igmp6_mcf_seq_fops = {
- .owner = THIS_MODULE,
.open = igmp6_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b3cea200c85e..f61a5b613b52 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -566,6 +566,11 @@ static void ndisc_send_unsol_na(struct net_device *dev)
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ /* skip tentative addresses until dad completes */
+ if (ifa->flags & IFA_F_TENTATIVE &&
+ !(ifa->flags & IFA_F_OPTIMISTIC))
+ continue;
+
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
/*router=*/ !!idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39970e212ad5..d95ceca7ff8f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -68,32 +68,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
}
EXPORT_SYMBOL(ip6_route_me_harder);
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip6_rt_info {
- struct in6_addr daddr;
- struct in6_addr saddr;
- u_int32_t mark;
-};
-
-static void nf_ip6_saveroute(const struct sk_buff *skb,
- struct nf_queue_entry *entry)
-{
- struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct ipv6hdr *iph = ipv6_hdr(skb);
-
- rt_info->daddr = iph->daddr;
- rt_info->saddr = iph->saddr;
- rt_info->mark = skb->mark;
- }
-}
-
-static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
+static int nf_ip6_reroute(struct sk_buff *skb,
const struct nf_queue_entry *entry)
{
struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -103,7 +78,7 @@ static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
skb->mark != rt_info->mark)
- return ip6_route_me_harder(net, skb);
+ return ip6_route_me_harder(entry->state.net, skb);
}
return 0;
}
@@ -190,25 +165,19 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
};
static const struct nf_ipv6_ops ipv6ops = {
- .chk_addr = ipv6_chk_addr,
- .route_input = ip6_route_input,
- .fragment = ip6_fragment
-};
-
-static const struct nf_afinfo nf_ip6_afinfo = {
- .family = AF_INET6,
+ .chk_addr = ipv6_chk_addr,
+ .route_input = ip6_route_input,
+ .fragment = ip6_fragment,
.checksum = nf_ip6_checksum,
.checksum_partial = nf_ip6_checksum_partial,
.route = nf_ip6_route,
- .saveroute = nf_ip6_saveroute,
.reroute = nf_ip6_reroute,
- .route_key_size = sizeof(struct ip6_rt_info),
};
int __init ipv6_netfilter_init(void)
{
RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
- return nf_register_afinfo(&nf_ip6_afinfo);
+ return 0;
}
/* This can be called from inet6_init() on errors, so it cannot
@@ -217,5 +186,4 @@ int __init ipv6_netfilter_init(void)
void ipv6_netfilter_fini(void)
{
RCU_INIT_POINTER(nf_ipv6_ops, NULL);
- nf_unregister_afinfo(&nf_ip6_afinfo);
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6acb2eecd986..4a634b7a2c80 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -71,6 +71,15 @@ config NFT_FIB_IPV6
endif # NF_TABLES_IPV6
endif # NF_TABLES
+config NF_FLOW_TABLE_IPV6
+ tristate "Netfilter flow table IPv6 module"
+ depends on NF_CONNTRACK && NF_TABLES
+ select NF_FLOW_TABLE
+ help
+ This option adds the flow table IPv6 support.
+
+ To compile it as a module, choose M here.
+
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -232,6 +241,15 @@ config IP6_NF_MATCH_RT
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_MATCH_SRH
+ tristate '"srh" Segment Routing header match support'
+ depends on NETFILTER_ADVANCED
+ help
+ srh matching allows you to match packets based on the segment
+ routing header of the packet.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# The targets
config IP6_NF_TARGET_HL
tristate '"HL" hoplimit target support'
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c6ee0cdd0ba9..d984057b8395 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -45,6 +45,9 @@ obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o
+
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
@@ -54,6 +57,7 @@ obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
+obj-$(CONFIG_IP6_NF_MATCH_SRH) += ip6t_srh.o
# targets
obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 1d7ae9366335..af4c917e0836 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -282,12 +282,7 @@ ip6t_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -991,9 +986,8 @@ static int get_info(struct net *net, void __user *user,
if (compat)
xt_compat_lock(AF_INET6);
#endif
- t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
- "ip6table_%s", name);
- if (t) {
+ t = xt_request_find_table_lock(net, AF_INET6, name);
+ if (!IS_ERR(t)) {
struct ip6t_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
@@ -1023,7 +1017,7 @@ static int get_info(struct net *net, void __user *user,
xt_table_unlock(t);
module_put(t->me);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_unlock(AF_INET6);
@@ -1049,7 +1043,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET6, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
@@ -1060,7 +1054,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
return ret;
}
@@ -1083,10 +1077,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
goto out;
}
- t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
- "ip6table_%s", name);
- if (!t) {
- ret = -ENOENT;
+ t = xt_request_find_table_lock(net, AF_INET6, name);
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
@@ -1199,8 +1192,8 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
if (IS_ERR(paddc))
return PTR_ERR(paddc);
t = xt_find_table_lock(net, AF_INET6, tmp.name);
- if (!t) {
- ret = -ENOENT;
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
goto free;
}
@@ -1636,7 +1629,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
- if (t) {
+ if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
@@ -1650,7 +1643,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
module_put(t->me);
xt_table_unlock(t);
} else
- ret = -ENOENT;
+ ret = PTR_ERR(t);
xt_compat_unlock(AF_INET6);
return ret;
@@ -1954,7 +1947,6 @@ static int __init ip6_tables_init(void)
if (ret < 0)
goto err5;
- pr_info("(C) 2000-2006 Netfilter Core Team\n");
return 0;
err5:
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
new file mode 100644
index 000000000000..9642164107ce
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -0,0 +1,161 @@
+/* Kernel module to match Segment Routing Header (SRH) parameters. */
+
+/* Author:
+ * Ahmed Abdelsalam <amsalam20@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+#include <net/ipv6.h>
+#include <net/seg6.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv6/ip6t_srh.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+/* Test a struct->mt_invflags and a boolean for inequality */
+#define NF_SRH_INVF(ptr, flag, boolean) \
+ ((boolean) ^ !!((ptr)->mt_invflags & (flag)))
+
+static bool srh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct ip6t_srh *srhinfo = par->matchinfo;
+ struct ipv6_sr_hdr *srh;
+ struct ipv6_sr_hdr _srh;
+ int hdrlen, srhoff = 0;
+
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+ return false;
+ srh = skb_header_pointer(skb, srhoff, sizeof(_srh), &_srh);
+ if (!srh)
+ return false;
+
+ hdrlen = ipv6_optlen(srh);
+ if (skb->len - srhoff < hdrlen)
+ return false;
+
+ if (srh->type != IPV6_SRCRT_TYPE_4)
+ return false;
+
+ if (srh->segments_left > srh->first_segment)
+ return false;
+
+ /* Next Header matching */
+ if (srhinfo->mt_flags & IP6T_SRH_NEXTHDR)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NEXTHDR,
+ !(srh->nexthdr == srhinfo->next_hdr)))
+ return false;
+
+ /* Header Extension Length matching */
+ if (srhinfo->mt_flags & IP6T_SRH_LEN_EQ)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_EQ,
+ !(srh->hdrlen == srhinfo->hdr_len)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_LEN_GT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_GT,
+ !(srh->hdrlen > srhinfo->hdr_len)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_LEN_LT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_LT,
+ !(srh->hdrlen < srhinfo->hdr_len)))
+ return false;
+
+ /* Segments Left matching */
+ if (srhinfo->mt_flags & IP6T_SRH_SEGS_EQ)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_EQ,
+ !(srh->segments_left == srhinfo->segs_left)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_SEGS_GT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_GT,
+ !(srh->segments_left > srhinfo->segs_left)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_SEGS_LT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_LT,
+ !(srh->segments_left < srhinfo->segs_left)))
+ return false;
+
+ /**
+ * Last Entry matching
+ * Last_Entry field was introduced in revision 6 of the SRH draft.
+ * It was called First_Segment in the previous revision
+ */
+ if (srhinfo->mt_flags & IP6T_SRH_LAST_EQ)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_EQ,
+ !(srh->first_segment == srhinfo->last_entry)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_LAST_GT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_GT,
+ !(srh->first_segment > srhinfo->last_entry)))
+ return false;
+
+ if (srhinfo->mt_flags & IP6T_SRH_LAST_LT)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_LT,
+ !(srh->first_segment < srhinfo->last_entry)))
+ return false;
+
+ /**
+ * Tag matchig
+ * Tag field was introduced in revision 6 of the SRH draft.
+ */
+ if (srhinfo->mt_flags & IP6T_SRH_TAG)
+ if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_TAG,
+ !(srh->tag == srhinfo->tag)))
+ return false;
+ return true;
+}
+
+static int srh_mt6_check(const struct xt_mtchk_param *par)
+{
+ const struct ip6t_srh *srhinfo = par->matchinfo;
+
+ if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
+ pr_err("unknown srh match flags %X\n", srhinfo->mt_flags);
+ return -EINVAL;
+ }
+
+ if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
+ pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_match srh_mt6_reg __read_mostly = {
+ .name = "srh",
+ .family = NFPROTO_IPV6,
+ .match = srh_mt6,
+ .matchsize = sizeof(struct ip6t_srh),
+ .checkentry = srh_mt6_check,
+ .me = THIS_MODULE,
+};
+
+static int __init srh_mt6_init(void)
+{
+ return xt_register_match(&srh_mt6_reg);
+}
+
+static void __exit srh_mt6_exit(void)
+{
+ xt_unregister_match(&srh_mt6_reg);
+}
+
+module_init(srh_mt6_init);
+module_exit(srh_mt6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: IPv6 Segment Routing Header match");
+MODULE_AUTHOR("Ahmed Abdelsalam <amsalam20@gmail.com>");
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 2b1a9dcdbcb3..b0524b18c4fb 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,14 +42,6 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
u_int8_t hop_limit;
u_int32_t flowlabel, mark;
int err;
-#if 0
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- net_warn_ratelimited("ip6t_hook: happy cracking\n");
- return NF_ACCEPT;
- }
-#endif
/* save source/dest address, mark, hoplimit, flowlabel, priority, */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 991512576c8c..47306e45a80a 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -74,6 +74,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
{
.hook = ip6table_nat_in,
.pf = NFPROTO_IPV6,
+ .nat_hook = true,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_NAT_DST,
},
@@ -81,6 +82,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
{
.hook = ip6table_nat_out,
.pf = NFPROTO_IPV6,
+ .nat_hook = true,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_NAT_SRC,
},
@@ -88,12 +90,14 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
{
.hook = ip6table_nat_local_fn,
.pf = NFPROTO_IPV6,
+ .nat_hook = true,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = ip6table_nat_fn,
+ .nat_hook = true,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index d4bc56443dc1..710fa0806c37 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
@@ -11,6 +12,10 @@
static int __net_init ip6table_raw_table_init(struct net *net);
+static bool raw_before_defrag __read_mostly;
+MODULE_PARM_DESC(raw_before_defrag, "Enable raw table before defrag");
+module_param(raw_before_defrag, bool, 0000);
+
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
@@ -20,6 +25,15 @@ static const struct xt_table packet_raw = {
.table_init = ip6table_raw_table_init,
};
+static const struct xt_table packet_raw_before_defrag = {
+ .name = "raw",
+ .valid_hooks = RAW_VALID_HOOKS,
+ .me = THIS_MODULE,
+ .af = NFPROTO_IPV6,
+ .priority = NF_IP6_PRI_RAW_BEFORE_DEFRAG,
+ .table_init = ip6table_raw_table_init,
+};
+
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_raw_hook(void *priv, struct sk_buff *skb,
@@ -33,15 +47,19 @@ static struct nf_hook_ops *rawtable_ops __read_mostly;
static int __net_init ip6table_raw_table_init(struct net *net)
{
struct ip6t_replace *repl;
+ const struct xt_table *table = &packet_raw;
int ret;
+ if (raw_before_defrag)
+ table = &packet_raw_before_defrag;
+
if (net->ipv6.ip6table_raw)
return 0;
- repl = ip6t_alloc_initial_table(&packet_raw);
+ repl = ip6t_alloc_initial_table(table);
if (repl == NULL)
return -ENOMEM;
- ret = ip6t_register_table(net, &packet_raw, repl, rawtable_ops,
+ ret = ip6t_register_table(net, table, repl, rawtable_ops,
&net->ipv6.ip6table_raw);
kfree(repl);
return ret;
@@ -62,9 +80,16 @@ static struct pernet_operations ip6table_raw_net_ops = {
static int __init ip6table_raw_init(void)
{
int ret;
+ const struct xt_table *table = &packet_raw;
+
+ if (raw_before_defrag) {
+ table = &packet_raw_before_defrag;
+
+ pr_info("Enabling raw table before defrag\n");
+ }
/* Register hooks */
- rawtable_ops = xt_hook_ops_alloc(&packet_raw, ip6table_raw_hook);
+ rawtable_ops = xt_hook_ops_alloc(table, ip6table_raw_hook);
if (IS_ERR(rawtable_ops))
return PTR_ERR(rawtable_ops);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3b80a38f62b8..11a313fd9273 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -176,11 +176,6 @@ static unsigned int ipv6_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct ipv6hdr)) {
- net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
- return NF_ACCEPT;
- }
return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
}
@@ -368,7 +363,7 @@ static struct nf_sockopt_ops so_getorigdst6 = {
.owner = THIS_MODULE,
};
-static struct nf_conntrack_l4proto *builtin_l4proto6[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = {
&nf_conntrack_l4proto_tcp6,
&nf_conntrack_l4proto_udp6,
&nf_conntrack_l4proto_icmpv6,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3ac0d826afc4..2548e2c8aedd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -27,7 +27,7 @@
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
-static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
{
@@ -352,7 +352,7 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.icmpv6.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_ICMPV6,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 977d8900cfd1..ce53dcfda88a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -231,7 +231,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
if ((unsigned int)end > IPV6_MAXPLEN) {
pr_debug("offset is too large.\n");
- return -1;
+ return -EINVAL;
}
ecn = ip6_frag_ecn(ipv6_hdr(skb));
@@ -264,7 +264,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
* this case. -DaveM
*/
pr_debug("end of fragment not rounded to 8 bytes.\n");
- return -1;
+ return -EPROTO;
}
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
@@ -358,7 +358,7 @@ found:
discard_fq:
inet_frag_kill(&fq->q, &nf_frags);
err:
- return -1;
+ return -EINVAL;
}
/*
@@ -567,6 +567,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
+ u16 savethdr = skb->transport_header;
struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
@@ -600,8 +601,12 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
spin_lock_bh(&fq->q.lock);
- if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
- ret = -EINVAL;
+ ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
+ if (ret < 0) {
+ if (ret == -EPROTO) {
+ skb->transport_header = savethdr;
+ ret = 0;
+ }
goto out_unlock;
}
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index b326da59257f..c87b48359e8f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -63,6 +63,9 @@ static unsigned int ipv6_defrag(void *priv,
/* Previously seen (loopback)? */
if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
return NF_ACCEPT;
+
+ if (skb->_nfct == IP_CT_UNTRACKED)
+ return NF_ACCEPT;
#endif
err = nf_ct_frag6_gather(state->net, skb,
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
new file mode 100644
index 000000000000..fff21602875a
--- /dev/null
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -0,0 +1,277 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
+ struct in6_addr *addr,
+ struct in6_addr *new_addr)
+{
+ struct tcphdr *tcph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ return -1;
+
+ tcph = (void *)(skb_network_header(skb) + thoff);
+ inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
+ new_addr->s6_addr32, true);
+
+ return 0;
+}
+
+static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
+ struct in6_addr *addr,
+ struct in6_addr *new_addr)
+{
+ struct udphdr *udph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ return -1;
+
+ udph = (void *)(skb_network_header(skb) + thoff);
+ if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
+ new_addr->s6_addr32, true);
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+ return 0;
+}
+
+static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int thoff, struct in6_addr *addr,
+ struct in6_addr *new_addr)
+{
+ switch (ip6h->nexthdr) {
+ case IPPROTO_TCP:
+ if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
+ return NF_DROP;
+ break;
+ case IPPROTO_UDP:
+ if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
+ return NF_DROP;
+ break;
+ }
+
+ return 0;
+}
+
+static int nf_flow_snat_ipv6(const struct flow_offload *flow,
+ struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int thoff,
+ enum flow_offload_tuple_dir dir)
+{
+ struct in6_addr addr, new_addr;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = ip6h->saddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
+ ip6h->saddr = new_addr;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = ip6h->daddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
+ ip6h->daddr = new_addr;
+ break;
+ default:
+ return -1;
+ }
+
+ return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
+ struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int thoff,
+ enum flow_offload_tuple_dir dir)
+{
+ struct in6_addr addr, new_addr;
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ addr = ip6h->daddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
+ ip6h->daddr = new_addr;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ addr = ip6h->saddr;
+ new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
+ ip6h->saddr = new_addr;
+ break;
+ default:
+ return -1;
+ }
+
+ return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_nat_ipv6(const struct flow_offload *flow,
+ struct sk_buff *skb,
+ enum flow_offload_tuple_dir dir)
+{
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ unsigned int thoff = sizeof(*ip6h);
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+ nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+ return -1;
+ if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+ nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+ return -1;
+
+ return 0;
+}
+
+static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
+ struct flow_offload_tuple *tuple)
+{
+ struct flow_ports *ports;
+ struct ipv6hdr *ip6h;
+ unsigned int thoff;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(skb);
+
+ if (ip6h->nexthdr != IPPROTO_TCP &&
+ ip6h->nexthdr != IPPROTO_UDP)
+ return -1;
+
+ thoff = sizeof(*ip6h);
+ if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+ return -1;
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+ tuple->src_v6 = ip6h->saddr;
+ tuple->dst_v6 = ip6h->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET6;
+ tuple->l4proto = ip6h->nexthdr;
+ tuple->iifidx = dev->ifindex;
+
+ return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+ return false;
+
+ return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rt6_info *rt)
+{
+ u32 mtu;
+
+ mtu = ip6_dst_mtu_forward(&rt->dst);
+ if (__nf_flow_exceeds_mtu(skb, mtu))
+ return true;
+
+ return false;
+}
+
+unsigned int
+nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct nf_flowtable *flow_table = priv;
+ struct flow_offload_tuple tuple = {};
+ enum flow_offload_tuple_dir dir;
+ struct flow_offload *flow;
+ struct net_device *outdev;
+ struct in6_addr *nexthop;
+ struct ipv6hdr *ip6h;
+ struct rt6_info *rt;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return NF_ACCEPT;
+
+ if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
+ return NF_ACCEPT;
+
+ tuplehash = flow_offload_lookup(flow_table, &tuple);
+ if (tuplehash == NULL)
+ return NF_ACCEPT;
+
+ outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+ if (!outdev)
+ return NF_ACCEPT;
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+ rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
+ if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+ return NF_ACCEPT;
+
+ if (skb_try_make_writable(skb, sizeof(*ip6h)))
+ return NF_DROP;
+
+ if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+ nf_flow_nat_ipv6(flow, skb, dir) < 0)
+ return NF_DROP;
+
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ ip6h = ipv6_hdr(skb);
+ ip6h->hop_limit--;
+
+ skb->dev = outdev;
+ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+ neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
+
+ return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
+
+static struct nf_flowtable_type flowtable_ipv6 = {
+ .family = NFPROTO_IPV6,
+ .params = &nf_flow_offload_rhash_params,
+ .gc = nf_flow_offload_work_gc,
+ .hook = nf_flow_offload_ipv6_hook,
+ .owner = THIS_MODULE,
+};
+
+static int __init nf_flow_ipv6_module_init(void)
+{
+ nft_register_flowtable_type(&flowtable_ipv6);
+
+ return 0;
+}
+
+static void __exit nf_flow_ipv6_module_exit(void)
+{
+ nft_unregister_flowtable_type(&flowtable_ipv6);
+}
+
+module_init(nf_flow_ipv6_module_init);
+module_exit(nf_flow_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 1d2fb9267d6f..bed57ee65f7b 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -369,10 +369,6 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
#endif
unsigned int ret;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct ipv6hdr))
- return NF_ACCEPT;
-
ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -408,10 +404,6 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
unsigned int ret;
int err;
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct ipv6hdr))
- return NF_ACCEPT;
-
ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index d6e4ba5de916..17e03589331c 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -22,68 +22,12 @@ static unsigned int nft_do_chain_ipv6(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv6(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv6(&pkt, skb);
return nft_do_chain(&pkt, priv);
}
-static unsigned int nft_ipv6_output(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
- if (net_ratelimit())
- pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
- "packet\n");
- return NF_ACCEPT;
- }
-
- return nft_do_chain_ipv6(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv6 __read_mostly = {
- .family = NFPROTO_IPV6,
- .nhooks = NF_INET_NUMHOOKS,
- .owner = THIS_MODULE,
- .nops = 1,
- .hooks = {
- [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
- [NF_INET_LOCAL_OUT] = nft_ipv6_output,
- [NF_INET_FORWARD] = nft_do_chain_ipv6,
- [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
- [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
- },
-};
-EXPORT_SYMBOL_GPL(nft_af_ipv6);
-
-static int nf_tables_ipv6_init_net(struct net *net)
-{
- net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.ipv6 == NULL)
- return -ENOMEM;
-
- memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
-
- if (nft_register_afinfo(net, net->nft.ipv6) < 0)
- goto err;
-
- return 0;
-err:
- kfree(net->nft.ipv6);
- return -ENOMEM;
-}
-
-static void nf_tables_ipv6_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.ipv6);
- kfree(net->nft.ipv6);
-}
-
-static struct pernet_operations nf_tables_ipv6_net_ops = {
- .init = nf_tables_ipv6_init_net,
- .exit = nf_tables_ipv6_exit_net,
-};
-
static const struct nf_chain_type filter_ipv6 = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
@@ -94,26 +38,22 @@ static const struct nf_chain_type filter_ipv6 = {
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
+ .hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_ipv6,
+ [NF_INET_LOCAL_OUT] = nft_do_chain_ipv6,
+ [NF_INET_FORWARD] = nft_do_chain_ipv6,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6,
+ [NF_INET_POST_ROUTING] = nft_do_chain_ipv6,
+ },
};
static int __init nf_tables_ipv6_init(void)
{
- int ret;
-
- ret = nft_register_chain_type(&filter_ipv6);
- if (ret < 0)
- return ret;
-
- ret = register_pernet_subsys(&nf_tables_ipv6_net_ops);
- if (ret < 0)
- nft_unregister_chain_type(&filter_ipv6);
-
- return ret;
+ return nft_register_chain_type(&filter_ipv6);
}
static void __exit nf_tables_ipv6_exit(void)
{
- unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
nft_unregister_chain_type(&filter_ipv6);
}
@@ -122,4 +62,4 @@ module_exit(nf_tables_ipv6_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(AF_INET6);
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "filter");
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 443cd306c0b0..73fe2bd13fcf 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -31,7 +31,8 @@ static unsigned int nft_nat_do_chain(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo_ipv6(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv6(&pkt, skb);
return nft_do_chain(&pkt, priv);
}
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index f2727475895e..11d3c3b9aa18 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -33,7 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
u32 mark, flowlabel;
int err;
- nft_set_pktinfo_ipv6(&pkt, skb, state);
+ nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_ipv6(&pkt, skb);
/* save source/dest address, mark, hoplimit, flowlabel, priority */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 54b5899543ef..cc5174c7254c 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -60,7 +60,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
{
const struct net_device *dev = NULL;
const struct nf_ipv6_ops *v6ops;
- const struct nf_afinfo *afinfo;
int route_err, addrtype;
struct rt6_info *rt;
struct flowi6 fl6 = {
@@ -69,8 +68,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
};
u32 ret = 0;
- afinfo = nf_get_afinfo(NFPROTO_IPV6);
- if (!afinfo)
+ v6ops = nf_get_ipv6_ops();
+ if (!v6ops)
return RTN_UNREACHABLE;
if (priv->flags & NFTA_FIB_F_IIF)
@@ -80,12 +79,11 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
- v6ops = nf_get_ipv6_ops();
- if (dev && v6ops && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+ if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
ret = RTN_LOCAL;
- route_err = afinfo->route(nft_net(pkt), (struct dst_entry **)&rt,
- flowi6_to_flowi(&fl6), false);
+ route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt,
+ flowi6_to_flowi(&fl6), false);
if (route_err)
goto err;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index e88bcb8ff0fd..b67814242f78 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -58,7 +58,6 @@ static int sockstat6_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations sockstat6_seq_fops = {
- .owner = THIS_MODULE,
.open = sockstat6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -248,7 +247,6 @@ static int snmp6_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations snmp6_seq_fops = {
- .owner = THIS_MODULE,
.open = snmp6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -274,7 +272,6 @@ static int snmp6_dev_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations snmp6_dev_seq_fops = {
- .owner = THIS_MODULE,
.open = snmp6_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 761a473a07c5..ddda7eb3c623 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1308,7 +1308,6 @@ static int raw6_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations raw6_seq_fops = {
- .owner = THIS_MODULE,
.open = raw6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0458b761f3c5..fb2d251c0500 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -186,7 +186,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
{
- return dst_metrics_write_ptr(rt->dst.from);
+ return dst_metrics_write_ptr(&rt->from->dst);
}
static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -391,7 +391,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct rt6_exception_bucket *bucket;
- struct dst_entry *from = dst->from;
+ struct rt6_info *from = rt->from;
struct inet6_dev *idev;
dst_destroy_metrics_generic(dst);
@@ -409,8 +409,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
kfree(bucket);
}
- dst->from = NULL;
- dst_release(from);
+ rt->from = NULL;
+ dst_release(&from->dst);
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -443,9 +443,9 @@ static bool rt6_check_expired(const struct rt6_info *rt)
if (rt->rt6i_flags & RTF_EXPIRES) {
if (time_after(jiffies, rt->dst.expires))
return true;
- } else if (rt->dst.from) {
+ } else if (rt->from) {
return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
- rt6_check_expired((struct rt6_info *)rt->dst.from);
+ rt6_check_expired(rt->from);
}
return false;
}
@@ -455,7 +455,6 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
int strict)
{
struct rt6_info *sibling, *next_sibling;
- int route_choosen;
/* We might have already computed the hash for ICMPv6 errors. In such
* case it will always be non-zero. Otherwise now is the time to do it.
@@ -463,26 +462,19 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
if (!fl6->mp_hash)
fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
- route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
- /* Don't change the route, if route_choosen == 0
- * (siblings does not include ourself)
- */
- if (route_choosen)
- list_for_each_entry_safe(sibling, next_sibling,
- &match->rt6i_siblings, rt6i_siblings) {
- route_choosen--;
- if (route_choosen == 0) {
- struct inet6_dev *idev = sibling->rt6i_idev;
-
- if (!netif_carrier_ok(sibling->dst.dev) &&
- idev->cnf.ignore_routes_with_linkdown)
- break;
- if (rt6_score_route(sibling, oif, strict) < 0)
- break;
- match = sibling;
- break;
- }
- }
+ if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound))
+ return match;
+
+ list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
+ rt6i_siblings) {
+ if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound))
+ continue;
+ if (rt6_score_route(sibling, oif, strict) < 0)
+ break;
+ match = sibling;
+ break;
+ }
+
return match;
}
@@ -499,12 +491,15 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
struct rt6_info *local = NULL;
struct rt6_info *sprt;
- if (!oif && ipv6_addr_any(saddr))
- goto out;
+ if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
+ return rt;
- for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
+ for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
struct net_device *dev = sprt->dst.dev;
+ if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
+ continue;
+
if (oif) {
if (dev->ifindex == oif)
return sprt;
@@ -533,8 +528,8 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
if (flags & RT6_LOOKUP_F_IFACE)
return net->ipv6.ip6_null_entry;
}
-out:
- return rt;
+
+ return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
}
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -679,10 +674,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
int m;
bool match_do_rr = false;
struct inet6_dev *idev = rt->rt6i_idev;
- struct net_device *dev = rt->dst.dev;
- if (dev && !netif_carrier_ok(dev) &&
- idev->cnf.ignore_routes_with_linkdown &&
+ if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+ goto out;
+
+ if (idev->cnf.ignore_routes_with_linkdown &&
+ rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
!(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
@@ -721,7 +718,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
match = NULL;
cont = NULL;
- for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
+ for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
if (rt->rt6i_metric != metric) {
cont = rt;
break;
@@ -731,7 +728,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
}
for (rt = leaf; rt && rt != rr_head;
- rt = rcu_dereference(rt->dst.rt6_next)) {
+ rt = rcu_dereference(rt->rt6_next)) {
if (rt->rt6i_metric != metric) {
cont = rt;
break;
@@ -743,7 +740,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
if (match || !cont)
return match;
- for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
+ for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
match = find_match(rt, oif, strict, &mpri, match, do_rr);
return match;
@@ -781,7 +778,7 @@ static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
&do_rr);
if (do_rr) {
- struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
+ struct rt6_info *next = rcu_dereference(rt0->rt6_next);
/* no entries matched; do round-robin */
if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -1054,7 +1051,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
*/
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
- ort = (struct rt6_info *)ort->dst.from;
+ ort = ort->from;
rcu_read_lock();
dev = ip6_rt_get_dev_rcu(ort);
@@ -1274,7 +1271,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
/* ort can't be a cache or pcpu route */
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
- ort = (struct rt6_info *)ort->dst.from;
+ ort = ort->from;
WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
spin_lock_bh(&rt6_exception_lock);
@@ -1346,7 +1343,9 @@ out:
/* Update fn->fn_sernum to invalidate all cached dst */
if (!err) {
+ spin_lock_bh(&ort->rt6i_table->tb6_lock);
fib6_update_sernum(ort);
+ spin_unlock_bh(&ort->rt6i_table->tb6_lock);
fib6_force_start_gc(net);
}
@@ -1415,8 +1414,8 @@ static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
/* Remove the passed in cached rt from the hash table that contains it */
int rt6_remove_exception_rt(struct rt6_info *rt)
{
- struct rt6_info *from = (struct rt6_info *)rt->dst.from;
struct rt6_exception_bucket *bucket;
+ struct rt6_info *from = rt->from;
struct in6_addr *src_key = NULL;
struct rt6_exception *rt6_ex;
int err;
@@ -1460,8 +1459,8 @@ int rt6_remove_exception_rt(struct rt6_info *rt)
*/
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
- struct rt6_info *from = (struct rt6_info *)rt->dst.from;
struct rt6_exception_bucket *bucket;
+ struct rt6_info *from = rt->from;
struct in6_addr *src_key = NULL;
struct rt6_exception *rt6_ex;
@@ -1586,12 +1585,19 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
* EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
* expired, independently from their aging, as per RFC 8201 section 4
*/
- if (!(rt->rt6i_flags & RTF_EXPIRES) &&
- time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
- RT6_TRACE("aging clone %p\n", rt);
+ if (!(rt->rt6i_flags & RTF_EXPIRES)) {
+ if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+ RT6_TRACE("aging clone %p\n", rt);
+ rt6_remove_exception(bucket, rt6_ex);
+ return;
+ }
+ } else if (time_after(jiffies, rt->dst.expires)) {
+ RT6_TRACE("purging expired route %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
- } else if (rt->rt6i_flags & RTF_GATEWAY) {
+ }
+
+ if (rt->rt6i_flags & RTF_GATEWAY) {
struct neighbour *neigh;
__u8 neigh_flags = 0;
@@ -1606,11 +1612,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
rt6_remove_exception(bucket, rt6_ex);
return;
}
- } else if (__rt6_check_expired(rt)) {
- RT6_TRACE("purging expired route %p\n", rt);
- rt6_remove_exception(bucket, rt6_ex);
- return;
}
+
gc_args->more++;
}
@@ -1824,10 +1827,10 @@ u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
if (skb) {
ip6_multipath_l3_keys(skb, &hash_keys);
- return flow_hash_from_keys(&hash_keys);
+ return flow_hash_from_keys(&hash_keys) >> 1;
}
- return get_hash_from_flowi6(fl6);
+ return get_hash_from_flowi6(fl6) >> 1;
}
void ip6_route_input(struct sk_buff *skb)
@@ -1929,9 +1932,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
static void rt6_dst_from_metrics_check(struct rt6_info *rt)
{
- if (rt->dst.from &&
- dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
- dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+ if (rt->from &&
+ dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst))
+ dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true);
}
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
@@ -1951,7 +1954,7 @@ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
{
if (!__rt6_check_expired(rt) &&
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
- rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+ rt6_check(rt->from, cookie))
return &rt->dst;
else
return NULL;
@@ -1971,7 +1974,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
rt6_dst_from_metrics_check(rt);
if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
+ (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
return rt6_dst_from_check(rt, cookie);
else
return rt6_check(rt, cookie);
@@ -2154,6 +2157,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
for_each_fib6_node_rt_rcu(fn) {
+ if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+ continue;
if (rt6_check_expired(rt))
continue;
if (rt->dst.error)
@@ -2344,7 +2349,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
- /* Add this dst into uncached_list so that rt6_ifdown() can
+ /* Add this dst into uncached_list so that rt6_disable_ip() can
* do proper release of the net_device
*/
rt6_uncached_list_add(rt);
@@ -2439,7 +2444,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
static struct rt6_info *ip6_nh_lookup_table(struct net *net,
struct fib6_config *cfg,
- const struct in6_addr *gw_addr)
+ const struct in6_addr *gw_addr,
+ u32 tbid, int flags)
{
struct flowi6 fl6 = {
.flowi6_oif = cfg->fc_ifindex,
@@ -2448,15 +2454,15 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
- table = fib6_get_table(net, cfg->fc_table);
+ table = fib6_get_table(net, tbid);
if (!table)
return NULL;
if (!ipv6_addr_any(&cfg->fc_prefsrc))
flags |= RT6_LOOKUP_F_HAS_SADDR;
+ flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
/* if table lookup failed, fall back to full lookup */
@@ -2468,6 +2474,82 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
return rt;
}
+static int ip6_route_check_nh_onlink(struct net *net,
+ struct fib6_config *cfg,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
+ const struct in6_addr *gw_addr = &cfg->fc_gateway;
+ u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+ struct rt6_info *grt;
+ int err;
+
+ err = 0;
+ grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
+ if (grt) {
+ if (grt->rt6i_flags & flags || dev != grt->dst.dev) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+ err = -EINVAL;
+ }
+
+ ip6_rt_put(grt);
+ }
+
+ return err;
+}
+
+static int ip6_route_check_nh(struct net *net,
+ struct fib6_config *cfg,
+ struct net_device **_dev,
+ struct inet6_dev **idev)
+{
+ const struct in6_addr *gw_addr = &cfg->fc_gateway;
+ struct net_device *dev = _dev ? *_dev : NULL;
+ struct rt6_info *grt = NULL;
+ int err = -EHOSTUNREACH;
+
+ if (cfg->fc_table) {
+ int flags = RT6_LOOKUP_F_IFACE;
+
+ grt = ip6_nh_lookup_table(net, cfg, gw_addr,
+ cfg->fc_table, flags);
+ if (grt) {
+ if (grt->rt6i_flags & RTF_GATEWAY ||
+ (dev && dev != grt->dst.dev)) {
+ ip6_rt_put(grt);
+ grt = NULL;
+ }
+ }
+ }
+
+ if (!grt)
+ grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
+
+ if (!grt)
+ goto out;
+
+ if (dev) {
+ if (dev != grt->dst.dev) {
+ ip6_rt_put(grt);
+ goto out;
+ }
+ } else {
+ *_dev = dev = grt->dst.dev;
+ *idev = grt->rt6i_idev;
+ dev_hold(dev);
+ in6_dev_hold(grt->rt6i_idev);
+ }
+
+ if (!(grt->rt6i_flags & RTF_GATEWAY))
+ err = 0;
+
+ ip6_rt_put(grt);
+
+out:
+ return err;
+}
+
static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
@@ -2519,6 +2601,21 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_metric == 0)
cfg->fc_metric = IP6_RT_PRIO_USER;
+ if (cfg->fc_flags & RTNH_F_ONLINK) {
+ if (!dev) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device required for onlink");
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!(dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+ err = -ENETDOWN;
+ goto out;
+ }
+ }
+
err = -ENOBUFS;
if (cfg->fc_nlinfo.nlh &&
!(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
@@ -2593,6 +2690,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
#endif
rt->rt6i_metric = cfg->fc_metric;
+ rt->rt6i_nh_weight = 1;
/* We cannot add true routes via loopback here,
they would result in kernel looping; promote them to reject routes
@@ -2662,8 +2760,6 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->rt6i_gateway = *gw_addr;
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
- struct rt6_info *grt = NULL;
-
/* IPv6 strictly inhibits using not link-local
addresses as nexthop address.
Otherwise, router will not able to send redirects.
@@ -2680,40 +2776,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
goto out;
}
- if (cfg->fc_table) {
- grt = ip6_nh_lookup_table(net, cfg, gw_addr);
-
- if (grt) {
- if (grt->rt6i_flags & RTF_GATEWAY ||
- (dev && dev != grt->dst.dev)) {
- ip6_rt_put(grt);
- grt = NULL;
- }
- }
- }
-
- if (!grt)
- grt = rt6_lookup(net, gw_addr, NULL,
- cfg->fc_ifindex, 1);
-
- err = -EHOSTUNREACH;
- if (!grt)
- goto out;
- if (dev) {
- if (dev != grt->dst.dev) {
- ip6_rt_put(grt);
- goto out;
- }
+ if (cfg->fc_flags & RTNH_F_ONLINK) {
+ err = ip6_route_check_nh_onlink(net, cfg, dev,
+ extack);
} else {
- dev = grt->dst.dev;
- idev = grt->rt6i_idev;
- dev_hold(dev);
- in6_dev_hold(grt->rt6i_idev);
+ err = ip6_route_check_nh(net, cfg, &dev, &idev);
}
- if (!(grt->rt6i_flags & RTF_GATEWAY))
- err = 0;
- ip6_rt_put(grt);
-
if (err)
goto out;
}
@@ -2732,6 +2800,12 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
if (!dev)
goto out;
+ if (!(dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Nexthop device is not up");
+ err = -ENETDOWN;
+ goto out;
+ }
+
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
NL_SET_ERR_MSG(extack, "Invalid source address");
@@ -2746,6 +2820,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->rt6i_flags = cfg->fc_flags;
install_route:
+ if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
+ !netif_carrier_ok(dev))
+ rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+ rt->rt6i_nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
rt->dst.dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
@@ -3056,11 +3134,11 @@ out:
static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
{
- BUG_ON(from->dst.from);
+ BUG_ON(from->from);
rt->rt6i_flags &= ~RTF_EXPIRES;
dst_hold(&from->dst);
- rt->dst.from = &from->dst;
+ rt->from = from;
dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
}
@@ -3459,37 +3537,249 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
fib6_clean_all(net, fib6_clean_tohost, gateway);
}
-struct arg_dev_net {
- struct net_device *dev;
- struct net *net;
+struct arg_netdev_event {
+ const struct net_device *dev;
+ union {
+ unsigned int nh_flags;
+ unsigned long event;
+ };
};
+static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
+{
+ struct rt6_info *iter;
+ struct fib6_node *fn;
+
+ fn = rcu_dereference_protected(rt->rt6i_node,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
+ iter = rcu_dereference_protected(fn->leaf,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
+ while (iter) {
+ if (iter->rt6i_metric == rt->rt6i_metric &&
+ rt6_qualify_for_ecmp(iter))
+ return iter;
+ iter = rcu_dereference_protected(iter->rt6_next,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
+ }
+
+ return NULL;
+}
+
+static bool rt6_is_dead(const struct rt6_info *rt)
+{
+ if (rt->rt6i_nh_flags & RTNH_F_DEAD ||
+ (rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
+ rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
+ return true;
+
+ return false;
+}
+
+static int rt6_multipath_total_weight(const struct rt6_info *rt)
+{
+ struct rt6_info *iter;
+ int total = 0;
+
+ if (!rt6_is_dead(rt))
+ total += rt->rt6i_nh_weight;
+
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
+ if (!rt6_is_dead(iter))
+ total += iter->rt6i_nh_weight;
+ }
+
+ return total;
+}
+
+static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
+{
+ int upper_bound = -1;
+
+ if (!rt6_is_dead(rt)) {
+ *weight += rt->rt6i_nh_weight;
+ upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
+ total) - 1;
+ }
+ atomic_set(&rt->rt6i_nh_upper_bound, upper_bound);
+}
+
+static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
+{
+ struct rt6_info *iter;
+ int weight = 0;
+
+ rt6_upper_bound_set(rt, &weight, total);
+
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+ rt6_upper_bound_set(iter, &weight, total);
+}
+
+void rt6_multipath_rebalance(struct rt6_info *rt)
+{
+ struct rt6_info *first;
+ int total;
+
+ /* In case the entire multipath route was marked for flushing,
+ * then there is no need to rebalance upon the removal of every
+ * sibling route.
+ */
+ if (!rt->rt6i_nsiblings || rt->should_flush)
+ return;
+
+ /* During lookup routes are evaluated in order, so we need to
+ * make sure upper bounds are assigned from the first sibling
+ * onwards.
+ */
+ first = rt6_multipath_first_sibling(rt);
+ if (WARN_ON_ONCE(!first))
+ return;
+
+ total = rt6_multipath_total_weight(first);
+ rt6_multipath_upper_bound_set(first, total);
+}
+
+static int fib6_ifup(struct rt6_info *rt, void *p_arg)
+{
+ const struct arg_netdev_event *arg = p_arg;
+ const struct net *net = dev_net(arg->dev);
+
+ if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
+ rt->rt6i_nh_flags &= ~arg->nh_flags;
+ fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
+ rt6_multipath_rebalance(rt);
+ }
+
+ return 0;
+}
+
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
+{
+ struct arg_netdev_event arg = {
+ .dev = dev,
+ {
+ .nh_flags = nh_flags,
+ },
+ };
+
+ if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
+ arg.nh_flags |= RTNH_F_LINKDOWN;
+
+ fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
+}
+
+static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
+ const struct net_device *dev)
+{
+ struct rt6_info *iter;
+
+ if (rt->dst.dev == dev)
+ return true;
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+ if (iter->dst.dev == dev)
+ return true;
+
+ return false;
+}
+
+static void rt6_multipath_flush(struct rt6_info *rt)
+{
+ struct rt6_info *iter;
+
+ rt->should_flush = 1;
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+ iter->should_flush = 1;
+}
+
+static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
+ const struct net_device *down_dev)
+{
+ struct rt6_info *iter;
+ unsigned int dead = 0;
+
+ if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
+ dead++;
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+ if (iter->dst.dev == down_dev ||
+ iter->rt6i_nh_flags & RTNH_F_DEAD)
+ dead++;
+
+ return dead;
+}
+
+static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
+ const struct net_device *dev,
+ unsigned int nh_flags)
+{
+ struct rt6_info *iter;
+
+ if (rt->dst.dev == dev)
+ rt->rt6i_nh_flags |= nh_flags;
+ list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+ if (iter->dst.dev == dev)
+ iter->rt6i_nh_flags |= nh_flags;
+}
+
/* called with write lock held for table with rt */
-static int fib6_ifdown(struct rt6_info *rt, void *arg)
+static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
{
- const struct arg_dev_net *adn = arg;
- const struct net_device *dev = adn->dev;
+ const struct arg_netdev_event *arg = p_arg;
+ const struct net_device *dev = arg->dev;
+ const struct net *net = dev_net(dev);
- if ((rt->dst.dev == dev || !dev) &&
- rt != adn->net->ipv6.ip6_null_entry &&
- (rt->rt6i_nsiblings == 0 ||
- (dev && netdev_unregistering(dev)) ||
- !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
- return -1;
+ if (rt == net->ipv6.ip6_null_entry)
+ return 0;
+
+ switch (arg->event) {
+ case NETDEV_UNREGISTER:
+ return rt->dst.dev == dev ? -1 : 0;
+ case NETDEV_DOWN:
+ if (rt->should_flush)
+ return -1;
+ if (!rt->rt6i_nsiblings)
+ return rt->dst.dev == dev ? -1 : 0;
+ if (rt6_multipath_uses_dev(rt, dev)) {
+ unsigned int count;
+
+ count = rt6_multipath_dead_count(rt, dev);
+ if (rt->rt6i_nsiblings + 1 == count) {
+ rt6_multipath_flush(rt);
+ return -1;
+ }
+ rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
+ RTNH_F_LINKDOWN);
+ fib6_update_sernum(rt);
+ rt6_multipath_rebalance(rt);
+ }
+ return -2;
+ case NETDEV_CHANGE:
+ if (rt->dst.dev != dev ||
+ rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+ break;
+ rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+ rt6_multipath_rebalance(rt);
+ break;
+ }
return 0;
}
-void rt6_ifdown(struct net *net, struct net_device *dev)
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
{
- struct arg_dev_net adn = {
+ struct arg_netdev_event arg = {
.dev = dev,
- .net = net,
+ {
+ .event = event,
+ },
};
- fib6_clean_all(net, fib6_ifdown, &adn);
- if (dev)
- rt6_uncached_list_flush_dev(net, dev);
+ fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+}
+
+void rt6_disable_ip(struct net_device *dev, unsigned long event)
+{
+ rt6_sync_down_dev(dev, event);
+ rt6_uncached_list_flush_dev(dev_net(dev), dev);
+ neigh_ifdown(&nd_tbl, dev);
}
struct rt6_mtu_change_arg {
@@ -3603,6 +3893,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_flags & RTM_F_CLONED)
cfg->fc_flags |= RTF_CACHE;
+ cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
+
cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -3812,6 +4104,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
goto cleanup;
}
+ rt->rt6i_nh_weight = rtnh->rtnh_hops + 1;
+
err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
if (err) {
dst_release_immediate(&rt->dst);
@@ -3992,7 +4286,10 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
unsigned int *flags, bool skip_oif)
{
- if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+ if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+ *flags |= RTNH_F_DEAD;
+
+ if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
*flags |= RTNH_F_LINKDOWN;
if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
*flags |= RTNH_F_DEAD;
@@ -4003,6 +4300,7 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
goto nla_put_failure;
}
+ *flags |= (rt->rt6i_nh_flags & RTNH_F_ONLINK);
if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
*flags |= RTNH_F_OFFLOAD;
@@ -4031,7 +4329,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
if (!rtnh)
goto nla_put_failure;
- rtnh->rtnh_hops = 0;
+ rtnh->rtnh_hops = rt->rt6i_nh_weight - 1;
rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
@@ -4321,9 +4619,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout;
}
- if (fibmatch && rt->dst.from) {
- struct rt6_info *ort = container_of(rt->dst.from,
- struct rt6_info, dst);
+ if (fibmatch && rt->from) {
+ struct rt6_info *ort = rt->from;
dst_hold(&ort->dst);
ip6_rt_put(rt);
@@ -4427,7 +4724,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
#ifdef CONFIG_PROC_FS
static const struct file_operations ipv6_route_proc_fops = {
- .owner = THIS_MODULE,
.open = ipv6_route_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -4455,7 +4751,6 @@ static int rt6_stats_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations rt6_stats_seq_fops = {
- .owner = THIS_MODULE,
.open = rt6_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -4600,8 +4895,6 @@ static int __net_init ip6_route_net_init(struct net *net)
GFP_KERNEL);
if (!net->ipv6.ip6_null_entry)
goto out_ip6_dst_entries;
- net->ipv6.ip6_null_entry->dst.path =
- (struct dst_entry *)net->ipv6.ip6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
ip6_template_metrics, true);
@@ -4613,8 +4906,6 @@ static int __net_init ip6_route_net_init(struct net *net)
GFP_KERNEL);
if (!net->ipv6.ip6_prohibit_entry)
goto out_ip6_null_entry;
- net->ipv6.ip6_prohibit_entry->dst.path =
- (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
ip6_template_metrics, true);
@@ -4624,8 +4915,6 @@ static int __net_init ip6_route_net_init(struct net *net)
GFP_KERNEL);
if (!net->ipv6.ip6_blk_hole_entry)
goto out_ip6_prohibit_entry;
- net->ipv6.ip6_blk_hole_entry->dst.path =
- (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
ip6_template_metrics, true);
@@ -4782,11 +5071,20 @@ int __init ip6_route_init(void)
if (ret)
goto fib6_rules_init;
- ret = -ENOBUFS;
- if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
- __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
- __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
- RTNL_FLAG_DOIT_UNLOCKED))
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
+ inet6_rtm_newroute, NULL, 0);
+ if (ret < 0)
+ goto out_register_late_subsys;
+
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
+ inet6_rtm_delroute, NULL, 0);
+ if (ret < 0)
+ goto out_register_late_subsys;
+
+ ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
+ inet6_rtm_getroute, NULL,
+ RTNL_FLAG_DOIT_UNLOCKED);
+ if (ret < 0)
goto out_register_late_subsys;
ret = register_netdevice_notifier(&ip6_route_dev_notifier);
@@ -4804,6 +5102,7 @@ out:
return ret;
out_register_late_subsys:
+ rtnl_unregister_all(PF_INET6);
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_init:
fib6_rules_cleanup();
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index c81407770956..7f5621d09571 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -306,9 +306,7 @@ static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
struct seg6_hmac_info *hinfo;
int ret;
- ret = rhashtable_walk_start(iter);
- if (ret && ret != -EAGAIN)
- goto done;
+ rhashtable_walk_start(iter);
for (;;) {
hinfo = rhashtable_walk_next(iter);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 825b8e01f947..ba3767ef5e93 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -501,7 +501,7 @@ static struct seg6_action_desc *__get_action_desc(int action)
struct seg6_action_desc *desc;
int i, count;
- count = sizeof(seg6_action_table) / sizeof(struct seg6_action_desc);
+ count = ARRAY_SIZE(seg6_action_table);
for (i = 0; i < count; i++) {
desc = &seg6_action_table[i];
if (desc->action == action)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d7dc23c1b2ca..3873d3877135 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
df = 0;
}
- if (tunnel->parms.iph.daddr && skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ if (tunnel->parms.iph.daddr)
+ skb_dst_update_pmtu(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7178476b3d2f..a1ab29e2ab3b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -176,8 +176,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
/* If interface is set while binding, indices
* must coincide.
*/
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != usin->sin6_scope_id)
+ if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
@@ -1795,7 +1794,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
timer_expires = jiffies;
}
- state = sk_state_load(sp);
+ state = inet_sk_state_load(sp);
if (state == TCP_LISTEN)
rx_queue = sp->sk_ack_backlog;
else
@@ -1884,7 +1883,6 @@ out:
}
static const struct file_operations tcp6_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = tcp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d883c9204c01..278e49cd67d4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
{
struct tcphdr *th;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
+ return ERR_PTR(-EINVAL);
+
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f30fa313bf2..52e3ea0e6f50 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -89,28 +89,12 @@ static u32 udp6_ehashfn(const struct net *net,
udp_ipv6_hash_secret + net_hash_mix(net));
}
-static u32 udp6_portaddr_hash(const struct net *net,
- const struct in6_addr *addr6,
- unsigned int port)
-{
- unsigned int hash, mix = net_hash_mix(net);
-
- if (ipv6_addr_any(addr6))
- hash = jhash_1word(0, mix);
- else if (ipv6_addr_v4mapped(addr6))
- hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
- else
- hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
-
- return hash ^ port;
-}
-
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
- udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
+ ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
- udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
+ ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -119,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
static void udp_v6_rehash(struct sock *sk)
{
- u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+ u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
@@ -184,7 +168,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
struct udp_hslot *hslot2, struct sk_buff *skb)
{
struct sock *sk, *result;
- int score, badness, matches = 0, reuseport = 0;
+ int score, badness;
u32 hash = 0;
result = NULL;
@@ -193,8 +177,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif, exact_dif);
if (score > badness) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
@@ -202,15 +185,9 @@ static struct sock *udp6_lib_lookup2(struct net *net,
sizeof(struct udphdr));
if (result)
return result;
- matches = 1;
}
result = sk;
badness = score;
- } else if (score == badness && reuseport) {
- matches++;
- if (reciprocal_scale(hash, matches) == 0)
- result = sk;
- hash = next_pseudo_random32(hash);
}
}
return result;
@@ -228,11 +205,11 @@ struct sock *__udp6_lib_lookup(struct net *net,
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
bool exact_dif = udp6_lib_exact_dif_match(net, skb);
- int score, badness, matches = 0, reuseport = 0;
+ int score, badness;
u32 hash = 0;
if (hslot->count > 10) {
- hash2 = udp6_portaddr_hash(net, daddr, hnum);
+ hash2 = ipv6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
@@ -243,7 +220,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
- hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
+ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
/* avoid searching the same slot again. */
if (unlikely(slot2 == old_slot2))
@@ -267,23 +244,16 @@ begin:
score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
sdif, exact_dif);
if (score > badness) {
- reuseport = sk->sk_reuseport;
- if (reuseport) {
+ if (sk->sk_reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
if (result)
return result;
- matches = 1;
}
result = sk;
badness = score;
- } else if (score == badness && reuseport) {
- matches++;
- if (reciprocal_scale(hash, matches) == 0)
- result = sk;
- hash = next_pseudo_random32(hash);
}
}
return result;
@@ -719,9 +689,9 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct sk_buff *nskb;
if (use_hash2) {
- hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
+ hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
udptable->mask;
- hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
+ hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -909,7 +879,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
int dif, int sdif)
{
unsigned short hnum = ntohs(loc_port);
- unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+ unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
@@ -1509,7 +1479,6 @@ int udp6_seq_show(struct seq_file *seq, void *v)
}
static const struct file_operations udp6_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a0f89ad76f9d..2a04dc9c781b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+ goto out;
+
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 2784cc363f2b..14ae32bb1f3d 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -94,7 +94,6 @@ void udplitev6_exit(void)
#ifdef CONFIG_PROC_FS
static const struct file_operations udplite6_afinfo_seq_fops = {
- .owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 02556e356f87..bb935a3b7fea 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -59,7 +59,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
if (x->props.flags & XFRM_STATE_NOECN)
dsfield &= ~INET_ECN_MASK;
ipv6_change_dsfield(top_iph, 0, dsfield);
- top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
+ top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
@@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
+ eth_hdr(skb)->h_proto = skb->protocol;
err = 0;
@@ -105,17 +106,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
-
}
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb->network_header = skb->network_header - x->props.header_len;
+ if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
- }
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 885ade234a49..09fb44ee3b45 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -265,7 +265,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
in6_dev_put(xdst->u.rt6.rt6i_idev);
xdst->u.rt6.rt6i_idev = loopback_idev;
in6_dev_hold(loopback_idev);
- xdst = (struct xfrm_dst *)xdst->u.dst.child;
+ xdst = (struct xfrm_dst *)xfrm_dst_child(&xdst->u.dst);
} while (xdst->u.dst.xfrm);
__in6_dev_put(loopback_idev);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 148533169b1d..64331158d693 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1474,7 +1474,7 @@ done:
return copied;
}
-static inline unsigned int iucv_accept_poll(struct sock *parent)
+static inline __poll_t iucv_accept_poll(struct sock *parent)
{
struct iucv_sock *isk, *n;
struct sock *sk;
@@ -1489,11 +1489,11 @@ static inline unsigned int iucv_accept_poll(struct sock *parent)
return 0;
}
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index bd5723315069..9d5649e4e8b7 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -247,7 +247,6 @@ static int kcm_seq_show(struct seq_file *seq, void *v)
}
static const struct file_operations kcm_seq_fops = {
- .owner = THIS_MODULE,
.open = kcm_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -397,7 +396,6 @@ static int kcm_stats_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations kcm_stats_seq_fops = {
- .owner = THIS_MODULE,
.open = kcm_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index d4e98f20fc2a..4a8d407f8902 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
if (!csk)
return -EINVAL;
- /* We must prevent loops or risk deadlock ! */
- if (csk->sk_family == PF_KCM)
+ /* Only allow TCP sockets to be attached for now */
+ if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
+ csk->sk_protocol != IPPROTO_TCP)
+ return -EOPNOTSUPP;
+
+ /* Don't allow listeners or closed sockets */
+ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
return -EOPNOTSUPP;
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
return err;
}
- sock_hold(csk);
-
write_lock_bh(&csk->sk_callback_lock);
+
+ /* Check if sk_user_data is aready by KCM or someone else.
+ * Must be done under lock to prevent race conditions.
+ */
+ if (csk->sk_user_data) {
+ write_unlock_bh(&csk->sk_callback_lock);
+ strp_done(&psock->strp);
+ kmem_cache_free(kcm_psockp, psock);
+ return -EALREADY;
+ }
+
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
csk->sk_data_ready = psock_data_ready;
csk->sk_write_space = psock_write_space;
csk->sk_state_change = psock_state_change;
+
write_unlock_bh(&csk->sk_callback_lock);
+ sock_hold(csk);
+
/* Finished initialization, now add the psock to the MUX. */
spin_lock_bh(&mux->lock);
head = &mux->psocks;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3dffb892d52c..7e2e7188e7f4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
#endif
int len;
+ if (sp->sadb_address_len <
+ DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
+ sizeof(uint64_t)))
+ return -EINVAL;
+
switch (addr->sa_family) {
case AF_INET:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
uint16_t ext_type;
int ext_len;
+ if (len < sizeof(*ehdr))
+ return -EINVAL;
+
ext_len = ehdr->sadb_ext_len;
ext_len *= sizeof(uint64_t);
ext_type = ehdr->sadb_ext_type;
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
return err;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 115918ad8eca..194a7483bb93 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -662,10 +662,9 @@ discard:
* |x|S|x|x|x|x|x|x| Sequence Number |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Cookie value, sublayer format and offset (pad) are negotiated with
- * the peer when the session is set up. Unlike L2TPv2, we do not need
- * to parse the packet header to determine if optional fields are
- * present.
+ * Cookie value and sublayer format are negotiated with the peer when
+ * the session is set up. Unlike L2TPv2, we do not need to parse the
+ * packet header to determine if optional fields are present.
*
* Caller must already have parsed the frame and determined that it is
* a data (not control) frame before coming here. Fields up to the
@@ -731,11 +730,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
"%s: recv data ns=%u, session nr=%u\n",
session->name, ns, session->nr);
}
+ ptr += 4;
}
- /* Advance past L2-specific header, if present */
- ptr += session->l2specific_len;
-
if (L2TP_SKB_CB(skb)->has_seq) {
/* Received a packet with sequence numbers. If we're the LNS,
* check if we sre sending sequence numbers and if not,
@@ -780,10 +777,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
}
}
- /* Session data offset is handled differently for L2TPv2 and
- * L2TPv3. For L2TPv2, there is an optional 16-bit value in
- * the header. For L2TPv3, the offset is negotiated using AVPs
- * in the session setup control protocol.
+ /* Session data offset is defined only for L2TPv2 and is
+ * indicated by an optional 16-bit value in the header.
*/
if (tunnel->version == L2TP_HDR_VER_2) {
/* If offset bit set, skip it. */
@@ -791,8 +786,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
offset = ntohs(*(__be16 *)ptr);
ptr += 2 + offset;
}
- } else
- ptr += session->offset;
+ }
offset = ptr - optr;
if (!pskb_may_pull(skb, offset))
@@ -1052,24 +1046,21 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
memcpy(bufp, &session->cookie[0], session->cookie_len);
bufp += session->cookie_len;
}
- if (session->l2specific_len) {
- if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
- u32 l2h = 0;
- if (session->send_seq) {
- l2h = 0x40000000 | session->ns;
- session->ns++;
- session->ns &= 0xffffff;
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: updated ns to %u\n",
- session->name, session->ns);
- }
+ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = 0;
- *((__be32 *) bufp) = htonl(l2h);
+ if (session->send_seq) {
+ l2h = 0x40000000 | session->ns;
+ session->ns++;
+ session->ns &= 0xffffff;
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: updated ns to %u\n",
+ session->name, session->ns);
}
- bufp += session->l2specific_len;
+
+ *((__be32 *)bufp) = htonl(l2h);
+ bufp += 4;
}
- if (session->offset)
- bufp += session->offset;
return bufp - optr;
}
@@ -1725,7 +1716,7 @@ int l2tp_session_delete(struct l2tp_session *session)
EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
- * l2specific_len parameters are set.
+ * l2specific_type parameters are set.
*/
void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
@@ -1734,7 +1725,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
if (session->send_seq)
session->hdr_len += 4;
} else {
- session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+ session->hdr_len = 4 + session->cookie_len;
+ session->hdr_len += l2tp_get_l2specific_len(session);
if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
session->hdr_len += 4;
}
@@ -1784,9 +1776,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
session->recv_seq = cfg->recv_seq;
session->lns_mode = cfg->lns_mode;
session->reorder_timeout = cfg->reorder_timeout;
- session->offset = cfg->offset;
session->l2specific_type = cfg->l2specific_type;
- session->l2specific_len = cfg->l2specific_len;
session->cookie_len = cfg->cookie_len;
memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
session->peer_cookie_len = cfg->peer_cookie_len;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9534e16965cc..9bbee90e9963 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -59,8 +59,6 @@ struct l2tp_session_cfg {
int debug; /* bitmask of debug message
* categories */
u16 vlan_id; /* VLAN pseudowire only */
- u16 offset; /* offset to payload */
- u16 l2specific_len; /* Layer 2 specific length */
u16 l2specific_type; /* Layer 2 specific type */
u8 cookie[8]; /* optional cookie */
int cookie_len; /* 0, 4 or 8 bytes */
@@ -86,9 +84,6 @@ struct l2tp_session {
int cookie_len;
u8 peer_cookie[8];
int peer_cookie_len;
- u16 offset; /* offset from end of L2TP header
- to beginning of data */
- u16 l2specific_len;
u16 l2specific_type;
u16 hdr_len;
u32 nr; /* session NR state (receive) */
@@ -305,6 +300,17 @@ static inline void l2tp_session_dec_refcount(struct l2tp_session *session)
l2tp_session_free(session);
}
+static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
+{
+ switch (session->l2specific_type) {
+ case L2TP_L2SPECTYPE_DEFAULT:
+ return 4;
+ case L2TP_L2SPECTYPE_NONE:
+ default:
+ return 0;
+ }
+}
+
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index eb69411bcb47..72e713da4733 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
session->lns_mode ? "LNS" : "LAC",
session->debug,
jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " offset %hu l2specific %hu/%hu\n",
- session->offset, session->l2specific_type, session->l2specific_len);
+ seq_printf(m, " offset 0 l2specific %hu/%hu\n",
+ session->l2specific_type, l2tp_get_l2specific_len(session));
if (session->cookie_len) {
seq_printf(m, " cookie %02x%02x%02x%02x",
session->cookie[0], session->cookie[1],
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index a1f24fb2be98..e7ea9c4b89ff 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -547,19 +547,19 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
}
if (tunnel->version > 2) {
- if (info->attrs[L2TP_ATTR_OFFSET])
- cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
-
if (info->attrs[L2TP_ATTR_DATA_SEQ])
cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
- cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
- if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
+ if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) {
cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
-
- cfg.l2specific_len = 4;
- if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
- cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
+ if (cfg.l2specific_type != L2TP_L2SPECTYPE_DEFAULT &&
+ cfg.l2specific_type != L2TP_L2SPECTYPE_NONE) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ } else {
+ cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
+ }
if (info->attrs[L2TP_ATTR_COOKIE]) {
u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
@@ -620,27 +620,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
goto out_tunnel;
}
- /* Check that pseudowire-specific params are present */
- switch (cfg.pw_type) {
- case L2TP_PWTYPE_NONE:
- break;
- case L2TP_PWTYPE_ETH_VLAN:
- if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
- ret = -EINVAL;
- goto out_tunnel;
- }
- break;
- case L2TP_PWTYPE_ETH:
- break;
- case L2TP_PWTYPE_PPP:
- case L2TP_PWTYPE_PPP_AC:
- break;
- case L2TP_PWTYPE_IP:
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
session_id,
peer_session_id,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b412fc3351dc..59f246d7b290 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1734,7 +1734,6 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations pppol2tp_proc_fops = {
- .owner = THIS_MODULE,
.open = pppol2tp_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 29c509c54bb2..66821e8a2b7a 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -225,7 +225,6 @@ static int llc_seq_core_open(struct inode *inode, struct file *file)
}
static const struct file_operations llc_seq_socket_fops = {
- .owner = THIS_MODULE,
.open = llc_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -233,7 +232,6 @@ static const struct file_operations llc_seq_socket_fops = {
};
static const struct file_operations llc_seq_core_fops = {
- .owner = THIS_MODULE,
.open = llc_seq_core_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index d444752dbf40..a8b1616cec41 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -153,27 +153,16 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
*/
static void sta_rx_agg_session_timer_expired(struct timer_list *t)
{
- struct tid_ampdu_rx *tid_rx_timer =
- from_timer(tid_rx_timer, t, session_timer);
- struct sta_info *sta = tid_rx_timer->sta;
- u8 tid = tid_rx_timer->tid;
- struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
+ struct sta_info *sta = tid_rx->sta;
+ u8 tid = tid_rx->tid;
unsigned long timeout;
- rcu_read_lock();
- tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
- if (!tid_rx) {
- rcu_read_unlock();
- return;
- }
-
timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_rx->session_timer, timeout);
- rcu_read_unlock();
return;
}
- rcu_read_unlock();
ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
sta->sta.addr, tid);
@@ -415,10 +404,11 @@ end:
timeout);
}
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
- u8 dialog_token, u16 timeout,
- u16 start_seq_num, u16 ba_policy, u16 tid,
- u16 buf_size, bool tx, bool auto_seq)
+static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ u8 dialog_token, u16 timeout,
+ u16 start_seq_num, u16 ba_policy,
+ u16 tid, u16 buf_size, bool tx,
+ bool auto_seq)
{
mutex_lock(&sta->ampdu_mlme.mtx);
___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5f8ab5be369f..595c662a61e8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -392,7 +392,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
* telling the driver. New packets will not go through since
* the aggregation session is no longer OPERATIONAL.
*/
- synchronize_net();
+ if (!local->in_reconfig)
+ synchronize_net();
tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
WLAN_BACK_RECIPIENT :
@@ -429,18 +430,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*/
static void sta_addba_resp_timer_expired(struct timer_list *t)
{
- struct tid_ampdu_tx *tid_tx_timer =
- from_timer(tid_tx_timer, t, addba_resp_timer);
- struct sta_info *sta = tid_tx_timer->sta;
- u8 tid = tid_tx_timer->tid;
- struct tid_ampdu_tx *tid_tx;
+ struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
+ struct sta_info *sta = tid_tx->sta;
+ u8 tid = tid_tx->tid;
/* check if the TID waits for addBA response */
- rcu_read_lock();
- tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
- if (!tid_tx ||
- test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
- rcu_read_unlock();
+ if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
ht_dbg(sta->sdata,
"timer expired on %pM tid %d not expecting addBA response\n",
sta->sta.addr, tid);
@@ -451,7 +446,6 @@ static void sta_addba_resp_timer_expired(struct timer_list *t)
sta->sta.addr, tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
- rcu_read_unlock();
}
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -529,29 +523,21 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
- struct tid_ampdu_tx *tid_tx_timer =
- from_timer(tid_tx_timer, t, session_timer);
- struct sta_info *sta = tid_tx_timer->sta;
- u8 tid = tid_tx_timer->tid;
- struct tid_ampdu_tx *tid_tx;
+ struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
+ struct sta_info *sta = tid_tx->sta;
+ u8 tid = tid_tx->tid;
unsigned long timeout;
- rcu_read_lock();
- tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
- if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
- rcu_read_unlock();
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
return;
}
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
- rcu_read_unlock();
return;
}
- rcu_read_unlock();
-
ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
sta->sta.addr, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb15d3b97cb2..46028e12e216 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -573,10 +573,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
offsetof(typeof(kseq), aes_cmac));
+ /* fall through */
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
offsetof(typeof(kseq), aes_gmac));
+ /* fall through */
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
@@ -2205,6 +2207,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
* for now fall through to allow scanning only when
* beaconing hasn't been configured yet
*/
+ /* fall through */
case NL80211_IFTYPE_AP:
/*
* If the scan has been forced (and the driver supports
@@ -2373,10 +2376,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
enum nl80211_tx_power_setting txp_type = type;
bool update_txp_type = false;
+ bool has_monitor = false;
if (wdev) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (!sdata)
+ return -EOPNOTSUPP;
+ }
+
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2415,15 +2425,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ has_monitor = true;
+ continue;
+ }
sdata->user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
sdata->vif.bss_conf.txpower_type = txp_type;
}
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ continue;
ieee80211_recalc_txpower(sdata, update_txp_type);
+ }
mutex_unlock(&local->iflist_mtx);
+ if (has_monitor) {
+ sdata = rtnl_dereference(local->monitor_sdata);
+ if (sdata) {
+ sdata->user_power_level = local->user_power_level;
+ if (txp_type != sdata->vif.bss_conf.txpower_type)
+ update_txp_type = true;
+ sdata->vif.bss_conf.txpower_type = txp_type;
+
+ ieee80211_recalc_txpower(sdata, update_txp_type);
+ }
+ }
+
return 0;
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 5fae001f286c..1f466d12a6bc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -211,6 +211,7 @@ static const char *hw_flag_names[] = {
FLAG(TX_FRAG_LIST),
FLAG(REPORTS_LOW_ACK),
FLAG(SUPPORTS_TX_FRAG),
+ FLAG(SUPPORTS_TDLS_BUFFER_STA),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index b15412c21ac9..444ea8d127fe 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -420,7 +420,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
default:
p += scnprintf(p, sizeof(buf) + buf - p,
"\t\tMAX-MPDU-UNKNOWN\n");
- };
+ }
switch (vhtc->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case 0:
p += scnprintf(p, sizeof(buf) + buf - p,
@@ -438,7 +438,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p,
"\t\tUNKNOWN-MHZ: 0x%x\n",
(vhtc->cap >> 2) & 0x3);
- };
+ }
PFLAG(RXLDPC, "RXLDPC");
PFLAG(SHORT_GI_80, "SHORT-GI-80");
PFLAG(SHORT_GI_160, "SHORT-GI-160");
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c7f93fd9ca7a..4d82fe7d627c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
sdata->vif.type == NL80211_IFTYPE_NAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !sdata->vif.mu_mimo_owner)))
+ !sdata->vif.mu_mimo_owner &&
+ !(changed & BSS_CHANGED_TXPOWER))))
return;
if (!check_sdata_in_driver(sdata))
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 1621b6ab17ba..d7523530d3f8 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -492,6 +492,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
+ /* fall through */
case IEEE80211_SMPS_OFF:
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 885d00b41911..26900025de2f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1757,10 +1757,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason, bool stop);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason, bool stop);
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
- u8 dialog_token, u16 timeout,
- u16 start_seq_num, u16 ba_policy, u16 tid,
- u16 buf_size, bool tx, bool auto_seq);
void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 13b16f90e1cf..5fe01f82df12 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1474,7 +1474,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
- BUG();
+ WARN_ON(1);
break;
}
@@ -1633,7 +1633,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
goto out_unlock;
}
}
- /* otherwise fall through */
+ /* fall through */
default:
/* assign a new address if possible -- try n_addresses first */
for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 938049395f90..aee05ec3f7ea 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -178,13 +178,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
decrease_tailroom_need_count(sdata, 1);
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
+ WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) &&
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC));
+
return 0;
}
@@ -237,7 +241,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
@@ -1104,7 +1109,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e054a2fd8d38..0785d04a80bc 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -263,6 +263,9 @@ static void ieee80211_restart_work(struct work_struct *work)
flush_delayed_work(&local->roc_work);
flush_work(&local->hw_roc_done);
+ /* wait for all packet processing to be done */
+ synchronize_net();
+
ieee80211_reconfig(local);
rtnl_unlock();
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5e27364e10ac..73ac607beb5d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -989,8 +989,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
sta_flags |= IEEE80211_STA_DISABLE_HT;
+ /* fall through */
case NL80211_CHAN_WIDTH_20:
sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+ /* fall through */
case NL80211_CHAN_WIDTH_40:
sta_flags |= IEEE80211_STA_DISABLE_VHT;
break;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 4394463a0c2e..35ad3983ae4b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1250,6 +1250,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
break;
case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+ /* fall through */
case IEEE80211_PROACTIVE_PREQ_NO_PREP:
interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
target_flags |= IEEE80211_PREQ_TO_FLAG |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 86c8dfef56a4..a5125624a76d 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -257,9 +257,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
if (ret)
return NULL;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto err;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -269,7 +267,6 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
if (i++ == idx)
break;
}
-err:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
@@ -513,9 +510,7 @@ void mesh_plink_broken(struct sta_info *sta)
if (ret)
return;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -535,7 +530,6 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
-out:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
@@ -584,9 +578,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
if (ret)
return;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -597,7 +589,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
}
-out:
+
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
@@ -614,9 +606,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
if (ret)
return;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -627,7 +617,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
}
-out:
+
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
@@ -642,9 +632,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
if (ret)
return;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -653,7 +641,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
break;
__mesh_path_del(tbl, mpath);
}
-out:
+
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
@@ -873,9 +861,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
if (ret)
return;
- ret = rhashtable_walk_start(&iter);
- if (ret && ret != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -887,7 +873,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath);
}
-out:
+
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index e2d00cce3c17..0f6c9ca59062 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -672,7 +672,7 @@ void mesh_plink_timer(struct timer_list *t)
break;
}
reason = WLAN_REASON_MESH_MAX_RETRIES;
- /* fall through on else */
+ /* fall through */
case NL80211_PLINK_CNF_RCVD:
/* confirm timer */
if (!reason)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c244691deab9..39b660b9a908 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -473,6 +473,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
+ /* fall through */
case IEEE80211_SMPS_OFF:
cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
@@ -2861,10 +2862,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
- if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
- sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
- aid);
- aid &= ~(BIT(15) | BIT(14));
+ /*
+ * The 5 MSB of the AID field are reserved
+ * (802.11-2016 9.4.1.8 AID field)
+ */
+ aid &= 0x7ff;
ifmgd->broken_ap = false;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index faf4f6055000..f1d40b6645ff 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -801,14 +801,14 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
case NL80211_IFTYPE_ADHOC:
if (!sdata->vif.bss_conf.ibss_joined)
need_offchan = true;
- /* fall through */
#ifdef CONFIG_MAC80211_MESH
+ /* fall through */
case NL80211_IFTYPE_MESH_POINT:
if (ieee80211_vif_is_mesh(&sdata->vif) &&
!sdata->u.mesh.mesh_id_len)
need_offchan = true;
- /* fall through */
#endif
+ /* fall through */
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 70e9d2ca8bbe..fd580614085b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1607,23 +1607,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
/*
* Change STA power saving mode only at the end of a frame
- * exchange sequence.
+ * exchange sequence, and only for a data or management
+ * frame as specified in IEEE 802.11-2016 11.2.3.2
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
- !ieee80211_is_back_req(hdr->frame_control) &&
+ (ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control)) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- /*
- * PM bit is only checked in frames where it isn't reserved,
- * in AP mode it's reserved in non-bufferable management frames
- * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
- * BAR frames should be ignored as specified in
- * IEEE 802.11-2012 10.2.1.2.
- */
- (!ieee80211_is_mgmt(hdr->frame_control) ||
- ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
if (!ieee80211_has_pm(hdr->frame_control))
sta_ps_end(sta);
@@ -3632,6 +3625,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
}
return true;
case NL80211_IFTYPE_MESH_POINT:
+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
+ return false;
if (multicast)
return true;
return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 91093d4a2f84..5cd5e6e5834e 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -47,6 +47,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
!ifmgd->tdls_wider_bw_prohibited;
+ bool buffer_sta = ieee80211_hw_check(&local->hw,
+ SUPPORTS_TDLS_BUFFER_STA);
struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
bool vht = sband && sband->vht_cap.vht_supported;
u8 *pos = skb_put(skb, 10);
@@ -56,7 +58,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = 0x0;
- *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
+ *pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) |
+ (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0);
*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
*pos++ = 0;
*pos++ = 0;
@@ -236,6 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
switch (ac) {
default:
WARN_ON_ONCE(1);
+ /* fall through */
case 0:
return IEEE80211_AC_BE;
case 1:
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3160954fc406..25904af38839 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2922,7 +2922,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
- mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ mmic = build.key->conf.flags &
+ (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
/* don't handle software crypto */
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d57e5f6bd8b6..1f82191ce601 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2110,15 +2110,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0);
wake_up:
- if (local->in_reconfig) {
- local->in_reconfig = false;
- barrier();
-
- /* Restart deferred ROCs */
- mutex_lock(&local->mtx);
- ieee80211_start_next_roc(local);
- mutex_unlock(&local->mtx);
- }
if (local->monitors == local->open_count && local->monitors > 0)
ieee80211_add_virtual_monitor(local);
@@ -2146,6 +2137,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
}
+ if (local->in_reconfig) {
+ local->in_reconfig = false;
+ barrier();
+
+ /* Restart deferred ROCs */
+ mutex_lock(&local->mtx);
+ ieee80211_start_next_roc(local);
+ mutex_unlock(&local->mtx);
+ }
+
ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 3e3d3014e9ab..5f7c96368b11 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -165,6 +165,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
qos = sta->sta.wme;
break;
}
+ /* fall through */
case NL80211_IFTYPE_AP:
ra = skb->data;
break;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b58722d9de37..785056cb76f6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1,7 +1,7 @@
/*
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -59,8 +59,9 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
if (info->control.hw_key &&
(info->flags & IEEE80211_TX_CTL_DONTFRAG ||
ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
- !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
- /* hwaccel - with no need for SW-generated MMIC */
+ !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
+ /* hwaccel - with no need for SW-generated MMIC or MIC space */
return TX_CONTINUE;
}
@@ -75,8 +76,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
skb_tailroom(skb), tail))
return TX_DROP;
- key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
mic = skb_put(skb, MICHAEL_MIC_LEN);
+
+ if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
+ /* Zeroed MIC can help with debug */
+ memset(mic, 0, MICHAEL_MIC_LEN);
+ return TX_CONTINUE;
+ }
+
+ key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
mic[0]++;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8ca9915befc8..5dce8336d33f 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2510,12 +2510,15 @@ static int __init mpls_init(void)
rtnl_af_register(&mpls_af_ops);
- rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0);
- rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0);
- rtnl_register(PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes,
- 0);
- rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
- mpls_netconf_dump_devconf, 0);
+ rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
+ mpls_rtm_newroute, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
+ mpls_rtm_delroute, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
+ mpls_getroute, mpls_dump_routes, 0);
+ rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
+ mpls_netconf_get_devconf,
+ mpls_netconf_dump_devconf, 0);
err = ipgre_tunnel_encap_add_mpls_ops();
if (err)
pr_err("Can't add mpls over gre tunnel ops\n");
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 67e708e98ccf..e7b05de1e6d1 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -143,43 +143,14 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
if (!nc)
return -ENODEV;
- /* If the channel is active one, we need reconfigure it */
spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
- netdev_info(ndp->ndev.dev, "NCSI: HNCDSC AEN - channel %u state %s\n",
- nc->id, ncm->data[3] & 0x3 ? "up" : "down");
- if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE) {
- spin_unlock_irqrestore(&nc->lock, flags);
- return 0;
- }
-
- spin_unlock_irqrestore(&nc->lock, flags);
- if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
- ndp->flags |= NCSI_DEV_RESHUFFLE;
-
- /* If this channel is the active one and the link doesn't
- * work, we have to choose another channel to be active one.
- * The logic here is exactly similar to what we do when link
- * is down on the active channel.
- *
- * On the other hand, we need configure it when host driver
- * state on the active channel becomes ready.
- */
- ncsi_stop_channel_monitor(nc);
-
- spin_lock_irqsave(&nc->lock, flags);
- nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
- NCSI_CHANNEL_ACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
-
- spin_lock_irqsave(&ndp->lock, flags);
- list_add_tail_rcu(&nc->link, &ndp->channel_queue);
- spin_unlock_irqrestore(&ndp->lock, flags);
-
- ncsi_process_next_channel(ndp);
+ netdev_printk(KERN_DEBUG, ndp->ndev.dev,
+ "NCSI: host driver %srunning on channel %u\n",
+ ncm->data[3] & 0x1 ? "" : "not ", nc->id);
return 0;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e4a13cc8a2e7..9019fa98003d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -12,6 +12,12 @@ config NETFILTER_INGRESS
config NETFILTER_NETLINK
tristate
+config NETFILTER_FAMILY_BRIDGE
+ bool
+
+config NETFILTER_FAMILY_ARP
+ bool
+
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"
depends on NETFILTER_ADVANCED
@@ -62,6 +68,8 @@ config NF_LOG_NETDEV
select NF_LOG_COMMON
if NF_CONNTRACK
+config NETFILTER_CONNCOUNT
+ tristate
config NF_CONNTRACK_MARK
bool 'Connection mark tracking support'
@@ -497,6 +505,13 @@ config NFT_CT
This option adds the "ct" expression that you can use to match
connection tracking information such as the flow state.
+config NFT_FLOW_OFFLOAD
+ depends on NF_CONNTRACK && NF_FLOW_TABLE
+ tristate "Netfilter nf_tables hardware flow offload module"
+ help
+ This option adds the "flow_offload" expression that you can use to
+ choose what flows are placed into the hardware.
+
config NFT_SET_RBTREE
tristate "Netfilter nf_tables rbtree set module"
help
@@ -649,6 +664,23 @@ endif # NF_TABLES_NETDEV
endif # NF_TABLES
+config NF_FLOW_TABLE_INET
+ tristate "Netfilter flow table mixed IPv4/IPv6 module"
+ depends on NF_FLOW_TABLE_IPV4 && NF_FLOW_TABLE_IPV6
+ select NF_FLOW_TABLE
+ help
+ This option adds the flow table mixed IPv4/IPv6 support.
+
+ To compile it as a module, choose M here.
+
+config NF_FLOW_TABLE
+ tristate "Netfilter flow table module"
+ depends on NF_CONNTRACK && NF_TABLES
+ help
+ This option adds the flow table core infrastructure.
+
+ To compile it as a module, choose M here.
+
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
@@ -1120,6 +1152,7 @@ config NETFILTER_XT_MATCH_CONNLIMIT
tristate '"connlimit" match support'
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
+ select NETFILTER_CONNCOUNT
---help---
This match allows you to match against the number of parallel
connections to a server per client IP address (or address block).
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f78ed2470831..5d9b8b959e58 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
+netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
@@ -67,6 +67,8 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
# SYNPROXY
obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
+obj-$(CONFIG_NETFILTER_CONNCOUNT) += nf_conncount.o
+
# generic packet duplication from netdev family
obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o
@@ -84,6 +86,7 @@ obj-$(CONFIG_NFT_META) += nft_meta.o
obj-$(CONFIG_NFT_RT) += nft_rt.o
obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
obj-$(CONFIG_NFT_CT) += nft_ct.o
+obj-$(CONFIG_NFT_FLOW_OFFLOAD) += nft_flow_offload.o
obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
obj-$(CONFIG_NFT_OBJREF) += nft_objref.o
@@ -107,6 +110,10 @@ obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o
obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
+# flow table infrastructure
+obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
+obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
+
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 52cd2901a097..0f6b8172fb9a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -4,8 +4,7 @@
* Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
* way.
*
- * Rusty Russell (C)2000 -- This code is GPL.
- * Patrick McHardy (c) 2006-2012
+ * This code is GPL.
*/
#include <linux/kernel.h>
#include <linux/netfilter.h>
@@ -28,34 +27,12 @@
#include "nf_internals.h"
-static DEFINE_MUTEX(afinfo_mutex);
-
-const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-EXPORT_SYMBOL(nf_afinfo);
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
DEFINE_PER_CPU(bool, nf_skb_duplicated);
EXPORT_SYMBOL_GPL(nf_skb_duplicated);
-int nf_register_afinfo(const struct nf_afinfo *afinfo)
-{
- mutex_lock(&afinfo_mutex);
- RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
- mutex_unlock(&afinfo_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_register_afinfo);
-
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
-{
- mutex_lock(&afinfo_mutex);
- RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
- mutex_unlock(&afinfo_mutex);
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
-
#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
@@ -74,7 +51,8 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
struct nf_hook_entries *e;
size_t alloc = sizeof(*e) +
sizeof(struct nf_hook_entry) * num +
- sizeof(struct nf_hook_ops *) * num;
+ sizeof(struct nf_hook_ops *) * num +
+ sizeof(struct nf_hook_entries_rcu_head);
if (num == 0)
return NULL;
@@ -85,6 +63,30 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
return e;
}
+static void __nf_hook_entries_free(struct rcu_head *h)
+{
+ struct nf_hook_entries_rcu_head *head;
+
+ head = container_of(h, struct nf_hook_entries_rcu_head, head);
+ kvfree(head->allocation);
+}
+
+static void nf_hook_entries_free(struct nf_hook_entries *e)
+{
+ struct nf_hook_entries_rcu_head *head;
+ struct nf_hook_ops **ops;
+ unsigned int num;
+
+ if (!e)
+ return;
+
+ num = e->num_hook_entries;
+ ops = nf_hook_entries_get_hook_ops(e);
+ head = (void *)&ops[num];
+ head->allocation = e;
+ call_rcu(&head->head, __nf_hook_entries_free);
+}
+
static unsigned int accept_all(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -135,6 +137,12 @@ nf_hook_entries_grow(const struct nf_hook_entries *old,
++i;
continue;
}
+
+ if (reg->nat_hook && orig_ops[i]->nat_hook) {
+ kvfree(new);
+ return ERR_PTR(-EBUSY);
+ }
+
if (inserted || reg->priority > orig_ops[i]->priority) {
new_ops[nhooks] = (void *)orig_ops[i];
new->hooks[nhooks] = old->hooks[i];
@@ -237,27 +245,61 @@ out_assign:
return old;
}
-static struct nf_hook_entries __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
+static struct nf_hook_entries __rcu **
+nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
+ struct net_device *dev)
{
- if (reg->pf != NFPROTO_NETDEV)
- return net->nf.hooks[reg->pf]+reg->hooknum;
+ switch (pf) {
+ case NFPROTO_NETDEV:
+ break;
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ case NFPROTO_ARP:
+ if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
+ return NULL;
+ return net->nf.hooks_arp + hooknum;
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
+ return NULL;
+ return net->nf.hooks_bridge + hooknum;
+#endif
+ case NFPROTO_IPV4:
+ if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
+ return NULL;
+ return net->nf.hooks_ipv4 + hooknum;
+ case NFPROTO_IPV6:
+ if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
+ return NULL;
+ return net->nf.hooks_ipv6 + hooknum;
+#if IS_ENABLED(CONFIG_DECNET)
+ case NFPROTO_DECNET:
+ if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
+ return NULL;
+ return net->nf.hooks_decnet + hooknum;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS) {
- if (reg->dev && dev_net(reg->dev) == net)
- return &reg->dev->nf_hooks_ingress;
+ if (hooknum == NF_NETDEV_INGRESS) {
+ if (dev && dev_net(dev) == net)
+ return &dev->nf_hooks_ingress;
}
#endif
WARN_ON_ONCE(1);
return NULL;
}
-int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static int __nf_register_net_hook(struct net *net, int pf,
+ const struct nf_hook_ops *reg)
{
struct nf_hook_entries *p, *new_hooks;
struct nf_hook_entries __rcu **pp;
- if (reg->pf == NFPROTO_NETDEV) {
+ if (pf == NFPROTO_NETDEV) {
#ifndef CONFIG_NETFILTER_INGRESS
if (reg->hooknum == NF_NETDEV_INGRESS)
return -EOPNOTSUPP;
@@ -267,7 +309,7 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
return -EINVAL;
}
- pp = nf_hook_entry_head(net, reg);
+ pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
if (!pp)
return -EINVAL;
@@ -285,21 +327,19 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
hooks_validate(new_hooks);
#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
net_inc_ingress_queue();
#endif
#ifdef HAVE_JUMP_LABEL
- static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+ static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
#endif
- synchronize_net();
BUG_ON(p == new_hooks);
- kvfree(p);
+ nf_hook_entries_free(p);
return 0;
}
-EXPORT_SYMBOL(nf_register_net_hook);
/*
- * __nf_unregister_net_hook - remove a hook from blob
+ * nf_remove_net_hook - remove a hook from blob
*
* @oldp: current address of hook blob
* @unreg: hook to unregister
@@ -307,8 +347,8 @@ EXPORT_SYMBOL(nf_register_net_hook);
* This cannot fail, hook unregistration must always succeed.
* Therefore replace the to-be-removed hook with a dummy hook.
*/
-static void __nf_unregister_net_hook(struct nf_hook_entries *old,
- const struct nf_hook_ops *unreg)
+static void nf_remove_net_hook(struct nf_hook_entries *old,
+ const struct nf_hook_ops *unreg, int pf)
{
struct nf_hook_ops **orig_ops;
bool found = false;
@@ -326,24 +366,24 @@ static void __nf_unregister_net_hook(struct nf_hook_entries *old,
if (found) {
#ifdef CONFIG_NETFILTER_INGRESS
- if (unreg->pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
+ if (pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
net_dec_ingress_queue();
#endif
#ifdef HAVE_JUMP_LABEL
- static_key_slow_dec(&nf_hooks_needed[unreg->pf][unreg->hooknum]);
+ static_key_slow_dec(&nf_hooks_needed[pf][unreg->hooknum]);
#endif
} else {
- WARN_ONCE(1, "hook not found, pf %d num %d", unreg->pf, unreg->hooknum);
+ WARN_ONCE(1, "hook not found, pf %d num %d", pf, unreg->hooknum);
}
}
-void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static void __nf_unregister_net_hook(struct net *net, int pf,
+ const struct nf_hook_ops *reg)
{
struct nf_hook_entries __rcu **pp;
struct nf_hook_entries *p;
- unsigned int nfq;
- pp = nf_hook_entry_head(net, reg);
+ pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
if (!pp)
return;
@@ -355,23 +395,52 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
return;
}
- __nf_unregister_net_hook(p, reg);
+ nf_remove_net_hook(p, reg, pf);
p = __nf_hook_entries_try_shrink(pp);
mutex_unlock(&nf_hook_mutex);
if (!p)
return;
- synchronize_net();
+ nf_queue_nf_hook_drop(net);
+ nf_hook_entries_free(p);
+}
- /* other cpu might still process nfqueue verdict that used reg */
- nfq = nf_queue_nf_hook_drop(net);
- if (nfq)
- synchronize_net();
- kvfree(p);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+ if (reg->pf == NFPROTO_INET) {
+ __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+ __nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
+ } else {
+ __nf_unregister_net_hook(net, reg->pf, reg);
+ }
}
EXPORT_SYMBOL(nf_unregister_net_hook);
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+ int err;
+
+ if (reg->pf == NFPROTO_INET) {
+ err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
+ if (err < 0)
+ return err;
+
+ err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
+ if (err < 0) {
+ __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+ return err;
+ }
+ } else {
+ err = __nf_register_net_hook(net, reg->pf, reg);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(nf_register_net_hook);
+
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n)
{
@@ -395,63 +464,10 @@ EXPORT_SYMBOL(nf_register_net_hooks);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int hookcount)
{
- struct nf_hook_entries *to_free[16], *p;
- struct nf_hook_entries __rcu **pp;
- unsigned int i, j, n;
-
- mutex_lock(&nf_hook_mutex);
- for (i = 0; i < hookcount; i++) {
- pp = nf_hook_entry_head(net, &reg[i]);
- if (!pp)
- continue;
-
- p = nf_entry_dereference(*pp);
- if (WARN_ON_ONCE(!p))
- continue;
- __nf_unregister_net_hook(p, &reg[i]);
- }
- mutex_unlock(&nf_hook_mutex);
-
- do {
- n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free));
-
- mutex_lock(&nf_hook_mutex);
-
- for (i = 0, j = 0; i < hookcount && j < n; i++) {
- pp = nf_hook_entry_head(net, &reg[i]);
- if (!pp)
- continue;
-
- p = nf_entry_dereference(*pp);
- if (!p)
- continue;
-
- to_free[j] = __nf_hook_entries_try_shrink(pp);
- if (to_free[j])
- ++j;
- }
-
- mutex_unlock(&nf_hook_mutex);
-
- if (j) {
- unsigned int nfq;
-
- synchronize_net();
-
- /* need 2nd synchronize_net() if nfqueue is used, skb
- * can get reinjected right before nf_queue_hook_drop()
- */
- nfq = nf_queue_nf_hook_drop(net);
- if (nfq)
- synchronize_net();
-
- for (i = 0; i < j; i++)
- kvfree(to_free[i]);
- }
+ unsigned int i;
- reg += n;
- hookcount -= n;
- } while (hookcount > 0);
+ for (i = 0; i < hookcount; i++)
+ nf_unregister_net_hook(net, &reg[i]);
}
EXPORT_SYMBOL(nf_unregister_net_hooks);
@@ -569,14 +585,27 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif
-static int __net_init netfilter_net_init(struct net *net)
+static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max)
{
- int i, h;
+ int h;
- for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
- for (h = 0; h < NF_MAX_HOOKS; h++)
- RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
- }
+ for (h = 0; h < max; h++)
+ RCU_INIT_POINTER(e[h], NULL);
+}
+
+static int __net_init netfilter_net_init(struct net *net)
+{
+ __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
+ __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+ __netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
+#endif
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 5ca18f07683b..257ca393e6f2 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -127,14 +127,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (ret <= 0)
return ret;
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(x, set)))
- return 0;
- if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
- if (SET_WITH_SKBINFO(set))
- ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags);
- return 1;
+ return ip_set_match_extensions(set, ext, mext, flags, x);
}
static int
@@ -227,6 +220,7 @@ mtype_list(const struct ip_set *set,
rcu_read_lock();
for (; cb->args[IPSET_CB_ARG0] < map->elements;
cb->args[IPSET_CB_ARG0]++) {
+ cond_resched_rcu();
id = cb->args[IPSET_CB_ARG0];
x = get_ext(set, map, id);
if (!test_bit(id, map->members) ||
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index d8975a0b4282..488d6d05c65c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -263,12 +263,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
- if (first_ip > last_ip) {
- u32 tmp = first_ip;
-
- first_ip = last_ip;
- last_ip = tmp;
- }
+ if (first_ip > last_ip)
+ swap(first_ip, last_ip);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 4c279fbd2d5d..c00b6a2e8e3c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -337,12 +337,8 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
- if (first_ip > last_ip) {
- u32 tmp = first_ip;
-
- first_ip = last_ip;
- last_ip = tmp;
- }
+ if (first_ip > last_ip)
+ swap(first_ip, last_ip);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 7f9bbd7c98b5..b561ca8b3659 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -238,12 +238,8 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
- if (first_port > last_port) {
- u16 tmp = first_port;
-
- first_port = last_port;
- last_port = tmp;
- }
+ if (first_port > last_port)
+ swap(first_port, last_port);
elements = last_port - first_port + 1;
set->dsize = ip_set_elem_len(set, tb, 0, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index cf84f7b37cd9..975a85a48d39 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -57,7 +57,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex is held: */
#define ip_set_dereference(p) \
- rcu_dereference_protected(p, 1)
+ rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
@@ -472,6 +472,31 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
}
EXPORT_SYMBOL_GPL(ip_set_put_extensions);
+bool
+ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags, void *data)
+{
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(data, set)))
+ return false;
+ if (SET_WITH_COUNTER(set)) {
+ struct ip_set_counter *counter = ext_counter(data, set);
+
+ if (flags & IPSET_FLAG_MATCH_COUNTERS &&
+ !(ip_set_match_counter(ip_set_get_packets(counter),
+ mext->packets, mext->packets_op) &&
+ ip_set_match_counter(ip_set_get_bytes(counter),
+ mext->bytes, mext->bytes_op)))
+ return false;
+ ip_set_update_counter(counter, ext, flags);
+ }
+ if (SET_WITH_SKBINFO(set))
+ ip_set_get_skbinfo(ext_skbinfo(data, set),
+ ext, mext, flags);
+ return true;
+}
+EXPORT_SYMBOL_GPL(ip_set_match_extensions);
+
/* Creating/destroying/renaming/swapping affect the existence and
* the properties of a set. All of these can be executed from userspace
* only and serialized by the nfnl mutex indirectly from nfnetlink.
@@ -1386,11 +1411,9 @@ dump_last:
goto next_set;
if (set->variant->uref)
set->variant->uref(set, cb, true);
- /* Fall through and add elements */
+ /* fall through */
default:
- rcu_read_lock_bh();
ret = set->variant->list(set, skb, cb);
- rcu_read_unlock_bh();
if (!cb->args[IPSET_CB_ARG0])
/* Set is done, proceed with next one */
goto next_set;
@@ -2055,6 +2078,7 @@ ip_set_net_exit(struct net *net)
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
+ nfnl_lock(NFNL_SUBSYS_IPSET);
for (i = 0; i < inst->ip_set_max; i++) {
set = ip_set(inst, i);
if (set) {
@@ -2062,6 +2086,7 @@ ip_set_net_exit(struct net *net)
ip_set_destroy_set(set);
}
}
+ nfnl_unlock(NFNL_SUBSYS_IPSET);
kfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
@@ -2097,7 +2122,6 @@ ip_set_init(void)
return ret;
}
- pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
return 0;
}
@@ -2113,3 +2137,5 @@ ip_set_fini(void)
module_init(ip_set_init);
module_exit(ip_set_fini);
+
+MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index efffc8eabafe..bbad940c0137 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -917,12 +917,9 @@ static inline int
mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
struct ip_set_ext *mext, struct ip_set *set, u32 flags)
{
- if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(data, set),
- ext, mext, flags);
- if (SET_WITH_SKBINFO(set))
- ip_set_get_skbinfo(ext_skbinfo(data, set),
- ext, mext, flags);
+ if (!ip_set_match_extensions(set, ext, mext, flags, data))
+ return 0;
+ /* nomatch entries return -ENOTEMPTY */
return mtype_do_data_match(data);
}
@@ -941,9 +938,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
struct mtype_elem *data;
#if IPSET_NET_COUNT == 2
struct mtype_elem orig = *d;
- int i, j = 0, k;
+ int ret, i, j = 0, k;
#else
- int i, j = 0;
+ int ret, i, j = 0;
#endif
u32 key, multi = 0;
@@ -969,18 +966,13 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
data = ahash_data(n, i, set->dsize);
if (!mtype_data_equal(data, d, &multi))
continue;
- if (SET_WITH_TIMEOUT(set)) {
- if (!ip_set_timeout_expired(
- ext_timeout(data, set)))
- return mtype_data_match(data, ext,
- mext, set,
- flags);
+ ret = mtype_data_match(data, ext, mext, set, flags);
+ if (ret != 0)
+ return ret;
#ifdef IP_SET_HASH_WITH_MULTI
- multi = 0;
+ /* No match, reset multiple match flag */
+ multi = 0;
#endif
- } else
- return mtype_data_match(data, ext,
- mext, set, flags);
}
#if IPSET_NET_COUNT == 2
}
@@ -1027,12 +1019,11 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (!test_bit(i, n->used))
continue;
data = ahash_data(n, i, set->dsize);
- if (mtype_data_equal(data, d, &multi) &&
- !(SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)))) {
- ret = mtype_data_match(data, ext, mext, set, flags);
+ if (!mtype_data_equal(data, d, &multi))
+ continue;
+ ret = mtype_data_match(data, ext, mext, set, flags);
+ if (ret != 0)
goto out;
- }
}
out:
return ret;
@@ -1143,6 +1134,7 @@ mtype_list(const struct ip_set *set,
rcu_read_lock();
for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
cb->args[IPSET_CB_ARG0]++) {
+ cond_resched_rcu();
incomplete = skb_tail_pointer(skb);
n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
pr_debug("cb->arg bucket: %lu, t %p n %p\n",
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e864681b8dc5..072a658fde04 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -55,8 +55,9 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
+ struct ip_set_ext *mext = &opt->ext;
struct set_elem *e;
- u32 cmdflags = opt->cmdflags;
+ u32 flags = opt->cmdflags;
int ret;
/* Don't lookup sub-counters at all */
@@ -64,21 +65,11 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
list_for_each_entry_rcu(e, &map->members, list) {
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, set)))
- continue;
ret = ip_set_test(e->id, skb, par, opt);
- if (ret > 0) {
- if (SET_WITH_COUNTER(set))
- ip_set_update_counter(ext_counter(e, set),
- ext, &opt->ext,
- cmdflags);
- if (SET_WITH_SKBINFO(set))
- ip_set_get_skbinfo(ext_skbinfo(e, set),
- ext, &opt->ext,
- cmdflags);
- return ret;
- }
+ if (ret <= 0)
+ continue;
+ if (ip_set_match_extensions(set, ext, mext, flags, e))
+ return 1;
}
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 299edc6add5a..1c98c907bc63 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -595,7 +595,6 @@ static int ip_vs_app_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_app_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_app_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 3e053cb30070..370abbf6f421 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -322,7 +322,7 @@ ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
{
__be16 _ports[2], *pptr;
- pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+ pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
@@ -1143,7 +1143,6 @@ static int ip_vs_conn_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_conn_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_conn_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1221,7 +1220,6 @@ static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_conn_sync_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_conn_sync_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5cb7cac9177d..5f6f73cf2174 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -433,7 +433,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
/*
* IPv6 frags, only the first hit here.
*/
- pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+ pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (pptr == NULL)
return NULL;
@@ -566,7 +566,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
struct netns_ipvs *ipvs = svc->ipvs;
struct net *net = ipvs->net;
- pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+ pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
if (!pptr)
return NF_DROP;
dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
@@ -982,7 +982,7 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
unsigned int offset;
*related = 1;
- ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
+ ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
@@ -1214,7 +1214,7 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
return NULL;
pptr = frag_safe_skb_hp(skb, iph->len,
- sizeof(_ports), _ports, iph);
+ sizeof(_ports), _ports);
if (!pptr)
return NULL;
@@ -1407,7 +1407,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
__be16 _ports[2], *pptr;
pptr = frag_safe_skb_hp(skb, iph.len,
- sizeof(_ports), _ports, &iph);
+ sizeof(_ports), _ports);
if (pptr == NULL)
return NF_ACCEPT; /* Not for me */
if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
@@ -1741,7 +1741,7 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
*related = 1;
- ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
+ ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
if (ic == NULL)
return NF_DROP;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index fff213eacf2a..5ebde4b15810 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2116,7 +2116,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_info_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_info_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2161,7 +2160,6 @@ static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_stats_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_stats_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2230,7 +2228,6 @@ static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations ip_vs_stats_percpu_fops = {
- .owner = THIS_MODULE,
.open = ip_vs_stats_percpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 121a321b91be..bcd9b7bde4ee 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -315,6 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
+ /* fall through */
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 30e11cd6aa8a..c15ef7c2a1fa 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -319,6 +319,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, udphoff,
skb->len - udphoff, 0);
+ /* fall through */
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9ee71cb276d7..fbaf3bd05b2e 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1636,17 +1636,14 @@ static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
struct msghdr msg = {NULL,};
- struct kvec iov;
+ struct kvec iov = {buffer, buflen};
int len;
EnterFunction(7);
/* Receive a packet */
- iov.iov_base = buffer;
- iov.iov_len = (size_t)buflen;
-
- len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT);
-
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen);
+ len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
new file mode 100644
index 000000000000..6d65389e308f
--- /dev/null
+++ b/net/netfilter/nf_conncount.c
@@ -0,0 +1,373 @@
+/*
+ * count the number of connections matching an arbitrary key.
+ *
+ * (C) 2017 Red Hat GmbH
+ * Author: Florian Westphal <fw@strlen.de>
+ *
+ * split from xt_connlimit.c:
+ * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
+ * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
+ * only ignore TIME_WAIT or gone connections
+ * (C) CC Computer Consultants GmbH, 2007
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_count.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#define CONNCOUNT_SLOTS 256U
+
+#ifdef CONFIG_LOCKDEP
+#define CONNCOUNT_LOCK_SLOTS 8U
+#else
+#define CONNCOUNT_LOCK_SLOTS 256U
+#endif
+
+#define CONNCOUNT_GC_MAX_NODES 8
+#define MAX_KEYLEN 5
+
+/* we will save the tuples of all connections we care about */
+struct nf_conncount_tuple {
+ struct hlist_node node;
+ struct nf_conntrack_tuple tuple;
+};
+
+struct nf_conncount_rb {
+ struct rb_node node;
+ struct hlist_head hhead; /* connections/hosts in same subnet */
+ u32 key[MAX_KEYLEN];
+};
+
+static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+
+struct nf_conncount_data {
+ unsigned int keylen;
+ struct rb_root root[CONNCOUNT_SLOTS];
+};
+
+static u_int32_t conncount_rnd __read_mostly;
+static struct kmem_cache *conncount_rb_cachep __read_mostly;
+static struct kmem_cache *conncount_conn_cachep __read_mostly;
+
+static inline bool already_closed(const struct nf_conn *conn)
+{
+ if (nf_ct_protonum(conn) == IPPROTO_TCP)
+ return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
+ conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
+ else
+ return false;
+}
+
+static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
+{
+ return memcmp(a, b, klen * sizeof(u32));
+}
+
+static bool add_hlist(struct hlist_head *head,
+ const struct nf_conntrack_tuple *tuple)
+{
+ struct nf_conncount_tuple *conn;
+
+ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+ if (conn == NULL)
+ return false;
+ conn->tuple = *tuple;
+ hlist_add_head(&conn->node, head);
+ return true;
+}
+
+static unsigned int check_hlist(struct net *net,
+ struct hlist_head *head,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit)
+{
+ const struct nf_conntrack_tuple_hash *found;
+ struct nf_conncount_tuple *conn;
+ struct hlist_node *n;
+ struct nf_conn *found_ct;
+ unsigned int length = 0;
+
+ *addit = true;
+
+ /* check the saved connections */
+ hlist_for_each_entry_safe(conn, n, head, node) {
+ found = nf_conntrack_find_get(net, zone, &conn->tuple);
+ if (found == NULL) {
+ hlist_del(&conn->node);
+ kmem_cache_free(conncount_conn_cachep, conn);
+ continue;
+ }
+
+ found_ct = nf_ct_tuplehash_to_ctrack(found);
+
+ if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
+ /*
+ * Just to be sure we have it only once in the list.
+ * We should not see tuples twice unless someone hooks
+ * this into a table without "-p tcp --syn".
+ */
+ *addit = false;
+ } else if (already_closed(found_ct)) {
+ /*
+ * we do not care about connections which are
+ * closed already -> ditch it
+ */
+ nf_ct_put(found_ct);
+ hlist_del(&conn->node);
+ kmem_cache_free(conncount_conn_cachep, conn);
+ continue;
+ }
+
+ nf_ct_put(found_ct);
+ length++;
+ }
+
+ return length;
+}
+
+static void tree_nodes_free(struct rb_root *root,
+ struct nf_conncount_rb *gc_nodes[],
+ unsigned int gc_count)
+{
+ struct nf_conncount_rb *rbconn;
+
+ while (gc_count) {
+ rbconn = gc_nodes[--gc_count];
+ rb_erase(&rbconn->node, root);
+ kmem_cache_free(conncount_rb_cachep, rbconn);
+ }
+}
+
+static unsigned int
+count_tree(struct net *net, struct rb_root *root,
+ const u32 *key, u8 keylen,
+ u8 family,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
+{
+ struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
+ struct rb_node **rbnode, *parent;
+ struct nf_conncount_rb *rbconn;
+ struct nf_conncount_tuple *conn;
+ unsigned int gc_count;
+ bool no_gc = false;
+
+ restart:
+ gc_count = 0;
+ parent = NULL;
+ rbnode = &(root->rb_node);
+ while (*rbnode) {
+ int diff;
+ bool addit;
+
+ rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+
+ parent = *rbnode;
+ diff = key_diff(key, rbconn->key, keylen);
+ if (diff < 0) {
+ rbnode = &((*rbnode)->rb_left);
+ } else if (diff > 0) {
+ rbnode = &((*rbnode)->rb_right);
+ } else {
+ /* same source network -> be counted! */
+ unsigned int count;
+ count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+
+ tree_nodes_free(root, gc_nodes, gc_count);
+ if (!addit)
+ return count;
+
+ if (!add_hlist(&rbconn->hhead, tuple))
+ return 0; /* hotdrop */
+
+ return count + 1;
+ }
+
+ if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
+ continue;
+
+ /* only used for GC on hhead, retval and 'addit' ignored */
+ check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+ if (hlist_empty(&rbconn->hhead))
+ gc_nodes[gc_count++] = rbconn;
+ }
+
+ if (gc_count) {
+ no_gc = true;
+ tree_nodes_free(root, gc_nodes, gc_count);
+ /* tree_node_free before new allocation permits
+ * allocator to re-use newly free'd object.
+ *
+ * This is a rare event; in most cases we will find
+ * existing node to re-use. (or gc_count is 0).
+ */
+ goto restart;
+ }
+
+ /* no match, need to insert new node */
+ rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
+ if (rbconn == NULL)
+ return 0;
+
+ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+ if (conn == NULL) {
+ kmem_cache_free(conncount_rb_cachep, rbconn);
+ return 0;
+ }
+
+ conn->tuple = *tuple;
+ memcpy(rbconn->key, key, sizeof(u32) * keylen);
+
+ INIT_HLIST_HEAD(&rbconn->hhead);
+ hlist_add_head(&conn->node, &rbconn->hhead);
+
+ rb_link_node(&rbconn->node, parent, rbnode);
+ rb_insert_color(&rbconn->node, root);
+ return 1;
+}
+
+unsigned int nf_conncount_count(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ unsigned int family,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
+{
+ struct rb_root *root;
+ int count;
+ u32 hash;
+
+ hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+ root = &data->root[hash];
+
+ spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+ count = count_tree(net, root, key, data->keylen, family, tuple, zone);
+
+ spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_count);
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+ unsigned int keylen)
+{
+ struct nf_conncount_data *data;
+ int ret, i;
+
+ if (keylen % sizeof(u32) ||
+ keylen / sizeof(u32) > MAX_KEYLEN ||
+ keylen == 0)
+ return ERR_PTR(-EINVAL);
+
+ net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = nf_ct_netns_get(net, family);
+ if (ret < 0) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+ data->root[i] = RB_ROOT;
+
+ data->keylen = keylen / sizeof(u32);
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_init);
+
+static void destroy_tree(struct rb_root *r)
+{
+ struct nf_conncount_tuple *conn;
+ struct nf_conncount_rb *rbconn;
+ struct hlist_node *n;
+ struct rb_node *node;
+
+ while ((node = rb_first(r)) != NULL) {
+ rbconn = rb_entry(node, struct nf_conncount_rb, node);
+
+ rb_erase(node, r);
+
+ hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
+ kmem_cache_free(conncount_conn_cachep, conn);
+
+ kmem_cache_free(conncount_rb_cachep, rbconn);
+ }
+}
+
+void nf_conncount_destroy(struct net *net, unsigned int family,
+ struct nf_conncount_data *data)
+{
+ unsigned int i;
+
+ nf_ct_netns_put(net, family);
+
+ for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+ destroy_tree(&data->root[i]);
+
+ kfree(data);
+}
+EXPORT_SYMBOL_GPL(nf_conncount_destroy);
+
+static int __init nf_conncount_modinit(void)
+{
+ int i;
+
+ BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
+ BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
+
+ for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+ spin_lock_init(&nf_conncount_locks[i]);
+
+ conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
+ sizeof(struct nf_conncount_tuple),
+ 0, 0, NULL);
+ if (!conncount_conn_cachep)
+ return -ENOMEM;
+
+ conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
+ sizeof(struct nf_conncount_rb),
+ 0, 0, NULL);
+ if (!conncount_rb_cachep) {
+ kmem_cache_destroy(conncount_conn_cachep);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit nf_conncount_modexit(void)
+{
+ kmem_cache_destroy(conncount_conn_cachep);
+ kmem_cache_destroy(conncount_rb_cachep);
+}
+
+module_init(nf_conncount_modinit);
+module_exit(nf_conncount_modexit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 85f643c1e227..705198de671d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -58,8 +58,6 @@
#include "nf_internals.h"
-#define NF_CONNTRACK_VERSION "0.5.0"
-
int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr) __read_mostly;
@@ -901,6 +899,9 @@ static unsigned int early_drop_list(struct net *net,
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+ continue;
+
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
continue;
@@ -975,6 +976,18 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
return false;
}
+#define DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+ if (nf_ct_expires(ct) < DAY / 2)
+ ct->timeout = nfct_time_stamp + DAY;
+}
+
static void gc_worker(struct work_struct *work)
{
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
@@ -1011,6 +1024,11 @@ static void gc_worker(struct work_struct *work)
tmp = nf_ct_tuplehash_to_ctrack(h);
scanned++;
+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+ nf_ct_offload_timeout(tmp);
+ continue;
+ }
+
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
expired_count++;
@@ -1044,7 +1062,7 @@ static void gc_worker(struct work_struct *work)
* we will just continue with next hash slot.
*/
rcu_read_unlock();
- cond_resched_rcu_qs();
+ cond_resched();
} while (++buckets < goal);
if (gc_work->exiting)
@@ -2048,10 +2066,6 @@ int nf_conntrack_init_start(void)
if (!nf_conntrack_cachep)
goto err_cachep;
- printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
- NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
- nf_conntrack_max);
-
ret = nf_conntrack_expect_init();
if (ret < 0)
goto err_expect;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index d6748a8a79c5..8ef21d9f9a00 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -649,7 +649,6 @@ static int exp_open(struct inode *inode, struct file *file)
}
static const struct file_operations exp_file_ops = {
- .owner = THIS_MODULE,
.open = exp_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index dc6347342e34..1601275efe2d 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -1,4 +1,4 @@
-/****************************************************************************
+/*
* ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
* conntrack/NAT module.
*
@@ -8,7 +8,7 @@
*
* See ip_conntrack_helper_h323_asn1.h for details.
*
- ****************************************************************************/
+ */
#ifdef __KERNEL__
#include <linux/kernel.h>
@@ -140,14 +140,15 @@ static const decoder_t Decoders[] = {
decode_choice,
};
-/****************************************************************************
+/*
* H.323 Types
- ****************************************************************************/
+ */
#include "nf_conntrack_h323_types.c"
-/****************************************************************************
+/*
* Functions
- ****************************************************************************/
+ */
+
/* Assume bs is aligned && v < 16384 */
static unsigned int get_len(struct bitstr *bs)
{
@@ -177,7 +178,6 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
return 0;
}
-/****************************************************************************/
static unsigned int get_bit(struct bitstr *bs)
{
unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
@@ -187,7 +187,6 @@ static unsigned int get_bit(struct bitstr *bs)
return b;
}
-/****************************************************************************/
/* Assume b <= 8 */
static unsigned int get_bits(struct bitstr *bs, unsigned int b)
{
@@ -213,7 +212,6 @@ static unsigned int get_bits(struct bitstr *bs, unsigned int b)
return v;
}
-/****************************************************************************/
/* Assume b <= 32 */
static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
{
@@ -251,9 +249,9 @@ static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
return v;
}
-/****************************************************************************
+/*
* Assume bs is aligned and sizeof(unsigned int) == 4
- ****************************************************************************/
+ */
static unsigned int get_uint(struct bitstr *bs, int b)
{
unsigned int v = 0;
@@ -262,12 +260,15 @@ static unsigned int get_uint(struct bitstr *bs, int b)
case 4:
v |= *bs->cur++;
v <<= 8;
+ /* fall through */
case 3:
v |= *bs->cur++;
v <<= 8;
+ /* fall through */
case 2:
v |= *bs->cur++;
v <<= 8;
+ /* fall through */
case 1:
v |= *bs->cur++;
break;
@@ -275,7 +276,6 @@ static unsigned int get_uint(struct bitstr *bs, int b)
return v;
}
-/****************************************************************************/
static int decode_nul(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -284,7 +284,6 @@ static int decode_nul(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_bool(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -296,7 +295,6 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_oid(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -316,7 +314,6 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_int(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -364,7 +361,6 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_enum(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -381,7 +377,6 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -418,7 +413,6 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_numstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -439,7 +433,6 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_octstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -493,7 +486,6 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -523,7 +515,6 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_seq(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -653,7 +644,6 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
static int decode_seqof(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -750,8 +740,6 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-
-/****************************************************************************/
static int decode_choice(struct bitstr *bs, const struct field_t *f,
char *base, int level)
{
@@ -833,7 +821,6 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
return H323_ERROR_NONE;
}
-/****************************************************************************/
int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
{
static const struct field_t ras_message = {
@@ -849,7 +836,6 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
return decode_choice(&bs, &ras_message, (char *) ras, 0);
}
-/****************************************************************************/
static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
size_t sz, H323_UserInformation *uuie)
{
@@ -867,7 +853,6 @@ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0);
}
-/****************************************************************************/
int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
MultimediaSystemControlMessage *
mscm)
@@ -886,7 +871,6 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
(char *) mscm, 0);
}
-/****************************************************************************/
int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
{
unsigned char *p = buf;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f71f0d2558fd..005589c6d0f6 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/ip6_route.h>
+#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -115,7 +116,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245;
static struct nf_conntrack_helper nf_conntrack_helper_q931[];
static struct nf_conntrack_helper nf_conntrack_helper_ras[];
-/****************************************************************************/
static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned char **data, int *datalen, int *dataoff)
@@ -219,7 +219,6 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
return 0;
}
-/****************************************************************************/
static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
H245_TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
@@ -254,7 +253,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
return 1;
}
-/****************************************************************************/
static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -328,7 +326,6 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
return ret;
}
-/****************************************************************************/
static int expect_t120(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -380,7 +377,6 @@ static int expect_t120(struct sk_buff *skb,
return ret;
}
-/****************************************************************************/
static int process_h245_channel(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -410,7 +406,6 @@ static int process_h245_channel(struct sk_buff *skb,
return 0;
}
-/****************************************************************************/
static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -472,7 +467,6 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
@@ -542,7 +536,6 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
@@ -578,7 +571,6 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int h245_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
@@ -628,7 +620,6 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
return NF_DROP;
}
-/****************************************************************************/
static const struct nf_conntrack_expect_policy h245_exp_policy = {
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
.timeout = 240,
@@ -643,7 +634,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
.expect_policy = &h245_exp_policy,
};
-/****************************************************************************/
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port)
@@ -675,7 +665,6 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
return 1;
}
-/****************************************************************************/
static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
@@ -726,20 +715,15 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
}
/* If the calling party is on the same side of the forward-to party,
- * we don't need to track the second call */
+ * we don't need to track the second call
+ */
static int callforward_do_filter(struct net *net,
const union nf_inet_addr *src,
const union nf_inet_addr *dst,
u_int8_t family)
{
- const struct nf_afinfo *afinfo;
int ret = 0;
- /* rcu_read_lock()ed by nf_hook_thresh */
- afinfo = nf_get_afinfo(family);
- if (!afinfo)
- return 0;
-
switch (family) {
case AF_INET: {
struct flowi4 fl1, fl2;
@@ -750,10 +734,10 @@ static int callforward_do_filter(struct net *net,
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip;
- if (!afinfo->route(net, (struct dst_entry **)&rt1,
- flowi4_to_flowi(&fl1), false)) {
- if (!afinfo->route(net, (struct dst_entry **)&rt2,
- flowi4_to_flowi(&fl2), false)) {
+ if (!nf_ip_route(net, (struct dst_entry **)&rt1,
+ flowi4_to_flowi(&fl1), false)) {
+ if (!nf_ip_route(net, (struct dst_entry **)&rt2,
+ flowi4_to_flowi(&fl2), false)) {
if (rt_nexthop(rt1, fl1.daddr) ==
rt_nexthop(rt2, fl2.daddr) &&
rt1->dst.dev == rt2->dst.dev)
@@ -766,18 +750,23 @@ static int callforward_do_filter(struct net *net,
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
case AF_INET6: {
- struct flowi6 fl1, fl2;
+ const struct nf_ipv6_ops *v6ops;
struct rt6_info *rt1, *rt2;
+ struct flowi6 fl1, fl2;
+
+ v6ops = nf_get_ipv6_ops();
+ if (!v6ops)
+ return 0;
memset(&fl1, 0, sizeof(fl1));
fl1.daddr = src->in6;
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->in6;
- if (!afinfo->route(net, (struct dst_entry **)&rt1,
- flowi6_to_flowi(&fl1), false)) {
- if (!afinfo->route(net, (struct dst_entry **)&rt2,
- flowi6_to_flowi(&fl2), false)) {
+ if (!v6ops->route(net, (struct dst_entry **)&rt1,
+ flowi6_to_flowi(&fl1), false)) {
+ if (!v6ops->route(net, (struct dst_entry **)&rt2,
+ flowi6_to_flowi(&fl2), false)) {
if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
rt6_nexthop(rt2, &fl2.daddr)) &&
rt1->dst.dev == rt2->dst.dev)
@@ -794,7 +783,6 @@ static int callforward_do_filter(struct net *net,
}
-/****************************************************************************/
static int expect_callforwarding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -815,7 +803,8 @@ static int expect_callforwarding(struct sk_buff *skb,
return 0;
/* If the calling party is on the same side of the forward-to party,
- * we don't need to track the second call */
+ * we don't need to track the second call
+ */
if (callforward_filter &&
callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3,
nf_ct_l3num(ct))) {
@@ -854,7 +843,6 @@ static int expect_callforwarding(struct sk_buff *skb,
return ret;
}
-/****************************************************************************/
static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -925,7 +913,6 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_callproceeding(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -958,7 +945,6 @@ static int process_callproceeding(struct sk_buff *skb,
return 0;
}
-/****************************************************************************/
static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -990,7 +976,6 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1022,7 +1007,6 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1063,7 +1047,6 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1095,7 +1078,6 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data, int dataoff,
@@ -1154,7 +1136,6 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int q931_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
@@ -1203,7 +1184,6 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
return NF_DROP;
}
-/****************************************************************************/
static const struct nf_conntrack_expect_policy q931_exp_policy = {
/* T.120 and H.245 */
.max_expected = H323_RTP_CHANNEL_MAX * 4 + 4,
@@ -1231,7 +1211,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
},
};
-/****************************************************************************/
static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
int *datalen)
{
@@ -1249,7 +1228,6 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
}
-/****************************************************************************/
static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
union nf_inet_addr *addr,
__be16 port)
@@ -1270,7 +1248,6 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
return NULL;
}
-/****************************************************************************/
static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff, unsigned char **data,
@@ -1328,7 +1305,6 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
return ret;
}
-/****************************************************************************/
static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1346,7 +1322,6 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1391,7 +1366,6 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
return ret;
}
-/****************************************************************************/
static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1428,7 +1402,6 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1480,7 +1453,6 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1514,7 +1486,6 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1559,7 +1530,6 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1608,7 +1578,6 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
return ret;
}
-/****************************************************************************/
static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1626,7 +1595,6 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1666,7 +1634,6 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
return ret;
}
-/****************************************************************************/
static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1700,7 +1667,6 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
@@ -1745,7 +1711,6 @@ static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
return 0;
}
-/****************************************************************************/
static int ras_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
@@ -1788,7 +1753,6 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
return NF_DROP;
}
-/****************************************************************************/
static const struct nf_conntrack_expect_policy ras_exp_policy = {
.max_expected = 32,
.timeout = 240,
@@ -1849,7 +1813,6 @@ static void __exit h323_helper_exit(void)
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
}
-/****************************************************************************/
static void __exit nf_conntrack_h323_fini(void)
{
h323_helper_exit();
@@ -1857,7 +1820,6 @@ static void __exit nf_conntrack_h323_fini(void)
pr_debug("nf_ct_h323: fini\n");
}
-/****************************************************************************/
static int __init nf_conntrack_h323_init(void)
{
int ret;
@@ -1877,7 +1839,6 @@ err1:
return ret;
}
-/****************************************************************************/
module_init(nf_conntrack_h323_init);
module_exit(nf_conntrack_h323_fini);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 382d49792f42..dd177ebee9aa 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -57,8 +57,6 @@
MODULE_LICENSE("GPL");
-static char __initdata version[] = "0.93";
-
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *l4proto)
@@ -544,7 +542,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
len *= 3u; /* ORIG, REPLY, MASTER */
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
- len += l4proto->nla_size;
+ len += l4proto->nlattr_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
len4 *= 3u; /* ORIG, REPLY, MASTER */
@@ -1110,6 +1108,14 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
.len = NF_CT_LABELS_MAX_SIZE },
};
+static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
+{
+ if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+ return 0;
+
+ return ctnetlink_filter_match(ct, data);
+}
+
static int ctnetlink_flush_conntrack(struct net *net,
const struct nlattr * const cda[],
u32 portid, int report)
@@ -1122,7 +1128,7 @@ static int ctnetlink_flush_conntrack(struct net *net,
return PTR_ERR(filter);
}
- nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
+ nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
portid, report);
kfree(filter);
@@ -1168,6 +1174,11 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
ct = nf_ct_tuplehash_to_ctrack(h);
+ if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
+ nf_ct_put(ct);
+ return -EBUSY;
+ }
+
if (cda[CTA_ID]) {
u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
if (id != (u32)(unsigned long)ct) {
@@ -3412,7 +3423,6 @@ static int __init ctnetlink_init(void)
{
int ret;
- pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
ret = nfnetlink_subsys_register(&ctnl_subsys);
if (ret < 0) {
pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
@@ -3446,8 +3456,6 @@ err_out:
static void __exit ctnetlink_exit(void)
{
- pr_info("ctnetlink: unregistering from nfnetlink.\n");
-
unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index c8e9c9503a08..afdeca53e88b 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -385,14 +385,14 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net,
/* FIXME: Allow NULL functions and sub in pointers to generic for
them. --RR */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
return -EBUSY;
- if ((l4proto->to_nlattr && !l4proto->nlattr_size) ||
+ if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
(l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
return -EINVAL;
@@ -428,10 +428,6 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
goto out_unlock;
}
- l4proto->nla_size = 0;
- if (l4proto->nlattr_size)
- l4proto->nla_size += l4proto->nlattr_size();
-
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
out_unlock:
@@ -502,7 +498,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
unsigned int num_proto)
{
int ret = -EINVAL, ver;
@@ -524,7 +520,7 @@ int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *const l4proto[],
+ const struct nf_conntrack_l4proto *const l4proto[],
unsigned int num_proto)
{
int ret = -EINVAL;
@@ -545,7 +541,7 @@ int nf_ct_l4proto_pernet_register(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
unsigned int num_proto)
{
mutex_lock(&nf_ct_proto_mutex);
@@ -555,12 +551,12 @@ void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
synchronize_net();
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_destroy(kill_l4proto, l4proto);
+ nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *const l4proto[],
+ const struct nf_conntrack_l4proto *const l4proto[],
unsigned int num_proto)
{
while (num_proto-- != 0)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 2a446f4a554c..abe647d5b8c6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -654,6 +654,12 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
[CTA_PROTOINFO_DCCP_PAD] = { .type = NLA_UNSPEC },
};
+#define DCCP_NLATTR_SIZE ( \
+ NLA_ALIGN(NLA_HDRLEN + 1) + \
+ NLA_ALIGN(NLA_HDRLEN + 1) + \
+ NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \
+ NLA_ALIGN(NLA_HDRLEN + 0))
+
static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
@@ -691,13 +697,6 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
spin_unlock_bh(&ct->lock);
return 0;
}
-
-static int dccp_nlattr_size(void)
-{
- return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
- + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
-}
-
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -862,7 +861,7 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.dccp.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
.pkt_to_tuple = dccp_pkt_to_tuple,
@@ -876,8 +875,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
.print_conntrack = dccp_print_conntrack,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+ .nlattr_size = DCCP_NLATTR_SIZE,
.to_nlattr = dccp_to_nlattr,
- .nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
@@ -898,7 +897,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.l3proto = AF_INET6,
.l4proto = IPPROTO_DCCP,
.pkt_to_tuple = dccp_pkt_to_tuple,
@@ -912,8 +911,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
.print_conntrack = dccp_print_conntrack,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+ .nlattr_size = DCCP_NLATTR_SIZE,
.to_nlattr = dccp_to_nlattr,
- .nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 1f86ddf6649a..6c6896d21cd7 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -12,7 +12,7 @@
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
-static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static const unsigned int nf_ct_generic_timeout = 600*HZ;
static bool nf_generic_should_process(u8 proto)
{
@@ -163,7 +163,7 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.generic.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
{
.l3proto = PF_UNSPEC,
.l4proto = 255,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a2503005d80b..d049ea5a3770 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -48,7 +48,7 @@ enum grep_conntrack {
GRE_CT_MAX
};
-static unsigned int gre_timeouts[GRE_CT_MAX] = {
+static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_UNREPLIED] = 30*HZ,
[GRE_CT_REPLIED] = 180*HZ,
};
@@ -352,7 +352,7 @@ static int gre_init_net(struct net *net, u_int16_t proto)
}
/* protocol helper struct */
-static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
+static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.l3proto = AF_INET,
.l4proto = IPPROTO_GRE,
.pkt_to_tuple = gre_pkt_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 80faf04ddf15..fb9a35d16069 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -52,7 +52,7 @@ static const char *const sctp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
+static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_CLOSED] = 10 SECS,
[SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
[SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
@@ -578,6 +578,11 @@ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
[CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
};
+#define SCTP_NLATTR_SIZE ( \
+ NLA_ALIGN(NLA_HDRLEN + 1) + \
+ NLA_ALIGN(NLA_HDRLEN + 4) + \
+ NLA_ALIGN(NLA_HDRLEN + 4))
+
static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
@@ -608,12 +613,6 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
return 0;
}
-
-static int sctp_nlattr_size(void)
-{
- return nla_total_size(0) /* CTA_PROTOINFO_SCTP */
- + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1);
-}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -778,7 +777,7 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.sctp.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.l3proto = PF_INET,
.l4proto = IPPROTO_SCTP,
.pkt_to_tuple = sctp_pkt_to_tuple,
@@ -793,8 +792,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.can_early_drop = sctp_can_early_drop,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+ .nlattr_size = SCTP_NLATTR_SIZE,
.to_nlattr = sctp_to_nlattr,
- .nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
@@ -815,7 +814,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.l3proto = PF_INET6,
.l4proto = IPPROTO_SCTP,
.pkt_to_tuple = sctp_pkt_to_tuple,
@@ -830,8 +829,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.can_early_drop = sctp_can_early_drop,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+ .nlattr_size = SCTP_NLATTR_SIZE,
.to_nlattr = sctp_to_nlattr,
- .nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37ef35b861f2..e97cdc1cf98c 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -68,7 +68,7 @@ static const char *const tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
@@ -305,6 +305,9 @@ static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
+ if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+ return;
+
seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
}
#endif
@@ -1222,6 +1225,12 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
[CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
};
+#define TCP_NLATTR_SIZE ( \
+ NLA_ALIGN(NLA_HDRLEN + 1) + \
+ NLA_ALIGN(NLA_HDRLEN + 1) + \
+ NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
+ NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
@@ -1274,12 +1283,6 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
return 0;
}
-static int tcp_nlattr_size(void)
-{
- return nla_total_size(0) /* CTA_PROTOINFO_TCP */
- + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
-}
-
static unsigned int tcp_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
@@ -1541,7 +1544,7 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.tcp.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_TCP,
@@ -1557,11 +1560,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
- .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
+ .nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -1579,7 +1582,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_TCP,
@@ -1594,8 +1597,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.error = tcp_error,
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+ .nlattr_size = TCP_NLATTR_SIZE,
.to_nlattr = tcp_to_nlattr,
- .nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3a5f727103af..fe7243970aa4 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -26,7 +26,7 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-static unsigned int udp_timeouts[UDP_CT_MAX] = {
+static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
[UDP_CT_REPLIED] = 180*HZ,
};
@@ -296,7 +296,7 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.udp.pn;
}
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_UDP,
@@ -328,7 +328,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_UDPLITE,
@@ -360,7 +360,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
#endif
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_UDP,
@@ -392,7 +392,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_UDPLITE,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5a101caa3e12..9123fdec5e14 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -309,10 +309,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
WARN_ON(!l4proto);
ret = -ENOSPC;
- seq_printf(s, "%-8s %u %-8s %u %ld ",
+ seq_printf(s, "%-8s %u %-8s %u ",
l3proto_name(l3proto->l3proto), nf_ct_l3num(ct),
- l4proto_name(l4proto->l4proto), nf_ct_protonum(ct),
- nf_ct_expires(ct) / HZ);
+ l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
+
+ if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
+ seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ);
if (l4proto->print_conntrack)
l4proto->print_conntrack(s, ct);
@@ -339,7 +341,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
- if (test_bit(IPS_ASSURED_BIT, &ct->status))
+ if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+ seq_puts(s, "[OFFLOAD] ");
+ else if (test_bit(IPS_ASSURED_BIT, &ct->status))
seq_puts(s, "[ASSURED] ");
if (seq_has_overflowed(s))
@@ -378,7 +382,6 @@ static int ct_open(struct inode *inode, struct file *file)
}
static const struct file_operations ct_file_ops = {
- .owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -471,7 +474,6 @@ static int ct_cpu_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations ct_cpu_seq_fops = {
- .owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c
new file mode 100644
index 000000000000..2f5099cb85b8
--- /dev/null
+++ b/net/netfilter/nf_flow_table.c
@@ -0,0 +1,429 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct flow_offload_entry {
+ struct flow_offload flow;
+ struct nf_conn *ct;
+ struct rcu_head rcu_head;
+};
+
+struct flow_offload *
+flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+{
+ struct flow_offload_entry *entry;
+ struct flow_offload *flow;
+
+ if (unlikely(nf_ct_is_dying(ct) ||
+ !atomic_inc_not_zero(&ct->ct_general.use)))
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ goto err_ct_refcnt;
+
+ flow = &entry->flow;
+
+ if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
+ goto err_dst_cache_original;
+
+ if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
+ goto err_dst_cache_reply;
+
+ entry->ct = ct;
+
+ switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) {
+ case NFPROTO_IPV4:
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4 =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4 =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4 =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4 =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in;
+ break;
+ case NFPROTO_IPV6:
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6 =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6 =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6 =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in6;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6 =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in6;
+ break;
+ }
+
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l3proto =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l3proto =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l4proto =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache =
+ route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache =
+ route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst;
+
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.tcp.port;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port =
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dir =
+ FLOW_OFFLOAD_DIR_ORIGINAL;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dir =
+ FLOW_OFFLOAD_DIR_REPLY;
+
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
+ route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.oifidx =
+ route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
+ route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+ flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.oifidx =
+ route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+
+ if (ct->status & IPS_SRC_NAT)
+ flow->flags |= FLOW_OFFLOAD_SNAT;
+ else if (ct->status & IPS_DST_NAT)
+ flow->flags |= FLOW_OFFLOAD_DNAT;
+
+ return flow;
+
+err_dst_cache_reply:
+ dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+err_dst_cache_original:
+ kfree(entry);
+err_ct_refcnt:
+ nf_ct_put(ct);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(flow_offload_alloc);
+
+void flow_offload_free(struct flow_offload *flow)
+{
+ struct flow_offload_entry *e;
+
+ dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
+ dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
+ e = container_of(flow, struct flow_offload_entry, flow);
+ kfree(e);
+}
+EXPORT_SYMBOL_GPL(flow_offload_free);
+
+void flow_offload_dead(struct flow_offload *flow)
+{
+ flow->flags |= FLOW_OFFLOAD_DYING;
+}
+EXPORT_SYMBOL_GPL(flow_offload_dead);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+{
+ flow->timeout = (u32)jiffies;
+
+ rhashtable_insert_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+ *flow_table->type->params);
+ rhashtable_insert_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+ *flow_table->type->params);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(flow_offload_add);
+
+void flow_offload_del(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+{
+ struct flow_offload_entry *e;
+
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+ *flow_table->type->params);
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+ *flow_table->type->params);
+
+ e = container_of(flow, struct flow_offload_entry, flow);
+ kfree_rcu(e, rcu_head);
+}
+EXPORT_SYMBOL_GPL(flow_offload_del);
+
+struct flow_offload_tuple_rhash *
+flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple)
+{
+ return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
+ *flow_table->type->params);
+}
+EXPORT_SYMBOL_GPL(flow_offload_lookup);
+
+static void nf_flow_release_ct(const struct flow_offload *flow)
+{
+ struct flow_offload_entry *e;
+
+ e = container_of(flow, struct flow_offload_entry, flow);
+ nf_ct_delete(e->ct, 0, 0);
+ nf_ct_put(e->ct);
+}
+
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct rhashtable_iter hti;
+ struct flow_offload *flow;
+ int err;
+
+ err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+ if (err)
+ return err;
+
+ rhashtable_walk_start(&hti);
+
+ while ((tuplehash = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(tuplehash)) {
+ err = PTR_ERR(tuplehash);
+ if (err != -EAGAIN)
+ goto out;
+
+ continue;
+ }
+ if (tuplehash->tuple.dir)
+ continue;
+
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+ iter(flow, data);
+ }
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
+
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+{
+ return (__s32)(flow->timeout - (u32)jiffies) <= 0;
+}
+
+static inline bool nf_flow_is_dying(const struct flow_offload *flow)
+{
+ return flow->flags & FLOW_OFFLOAD_DYING;
+}
+
+void nf_flow_offload_work_gc(struct work_struct *work)
+{
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct nf_flowtable *flow_table;
+ struct rhashtable_iter hti;
+ struct flow_offload *flow;
+ int err;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+
+ err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+ if (err)
+ goto schedule;
+
+ rhashtable_walk_start(&hti);
+
+ while ((tuplehash = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(tuplehash)) {
+ err = PTR_ERR(tuplehash);
+ if (err != -EAGAIN)
+ goto out;
+
+ continue;
+ }
+ if (tuplehash->tuple.dir)
+ continue;
+
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+ if (nf_flow_has_expired(flow) ||
+ nf_flow_is_dying(flow)) {
+ flow_offload_del(flow_table, flow);
+ nf_flow_release_ct(flow);
+ }
+ }
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+schedule:
+ queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
+
+static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
+{
+ const struct flow_offload_tuple *tuple = data;
+
+ return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct flow_offload_tuple_rhash *tuplehash = data;
+
+ return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct flow_offload_tuple *tuple = arg->key;
+ const struct flow_offload_tuple_rhash *x = ptr;
+
+ if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
+ return 1;
+
+ return 0;
+}
+
+const struct rhashtable_params nf_flow_offload_rhash_params = {
+ .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
+ .hashfn = flow_offload_hash,
+ .obj_hashfn = flow_offload_hash_obj,
+ .obj_cmpfn = flow_offload_hash_cmp,
+ .automatic_shrinking = true,
+};
+EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
+
+static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
+ __be16 port, __be16 new_port)
+{
+ struct tcphdr *tcph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ return -1;
+
+ tcph = (void *)(skb_network_header(skb) + thoff);
+ inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+
+ return 0;
+}
+
+static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
+ __be16 port, __be16 new_port)
+{
+ struct udphdr *udph;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ return -1;
+
+ udph = (void *)(skb_network_header(skb) + thoff);
+ if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace2(&udph->check, skb, port,
+ new_port, true);
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+ return 0;
+}
+
+static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, __be16 port, __be16 new_port)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
+ return NF_DROP;
+ break;
+ case IPPROTO_UDP:
+ if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
+ return NF_DROP;
+ break;
+ }
+
+ return 0;
+}
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir)
+{
+ struct flow_ports *hdr;
+ __be16 port, new_port;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+ return -1;
+
+ hdr = (void *)(skb_network_header(skb) + thoff);
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = hdr->source;
+ new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+ hdr->source = new_port;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = hdr->dest;
+ new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+ hdr->dest = new_port;
+ break;
+ default:
+ return -1;
+ }
+
+ return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_snat_port);
+
+int nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir)
+{
+ struct flow_ports *hdr;
+ __be16 port, new_port;
+
+ if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+ skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+ return -1;
+
+ hdr = (void *)(skb_network_header(skb) + thoff);
+
+ switch (dir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ port = hdr->dest;
+ new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
+ hdr->dest = new_port;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ port = hdr->source;
+ new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
+ hdr->source = new_port;
+ break;
+ default:
+ return -1;
+ }
+
+ return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
new file mode 100644
index 000000000000..281209aeba8f
--- /dev/null
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+
+static unsigned int
+nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ return nf_flow_offload_ip_hook(priv, skb, state);
+ case htons(ETH_P_IPV6):
+ return nf_flow_offload_ipv6_hook(priv, skb, state);
+ }
+
+ return NF_ACCEPT;
+}
+
+static struct nf_flowtable_type flowtable_inet = {
+ .family = NFPROTO_INET,
+ .params = &nf_flow_offload_rhash_params,
+ .gc = nf_flow_offload_work_gc,
+ .hook = nf_flow_offload_inet_hook,
+ .owner = THIS_MODULE,
+};
+
+static int __init nf_flow_inet_module_init(void)
+{
+ nft_register_flowtable_type(&flowtable_inet);
+
+ return 0;
+}
+
+static void __exit nf_flow_inet_module_exit(void)
+{
+ nft_unregister_flowtable_type(&flowtable_inet);
+}
+
+module_init(nf_flow_inet_module_init);
+module_exit(nf_flow_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 44284cd2528d..18f6d7ae995b 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -10,7 +10,7 @@
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
const struct nf_hook_entries *entries, unsigned int index,
unsigned int verdict);
-unsigned int nf_queue_nf_hook_drop(struct net *net);
+void nf_queue_nf_hook_drop(struct net *net);
/* nf_log.c */
int __init netfilter_log_init(void);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 8bb152a7cca4..c2c1b16b7538 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -402,7 +402,6 @@ static int nflog_open(struct inode *inode, struct file *file)
}
static const struct file_operations nflog_file_ops = {
- .owner = THIS_MODULE,
.open = nflog_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index f7e21953b1de..d67a96a25a68 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -10,6 +10,8 @@
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
@@ -96,30 +98,56 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
-unsigned int nf_queue_nf_hook_drop(struct net *net)
+void nf_queue_nf_hook_drop(struct net *net)
{
const struct nf_queue_handler *qh;
- unsigned int count = 0;
rcu_read_lock();
qh = rcu_dereference(net->nf.queue_handler);
if (qh)
- count = qh->nf_hook_drop(net);
+ qh->nf_hook_drop(net);
rcu_read_unlock();
-
- return count;
}
EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
+static void nf_ip_saveroute(const struct sk_buff *skb,
+ struct nf_queue_entry *entry)
+{
+ struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ rt_info->tos = iph->tos;
+ rt_info->daddr = iph->daddr;
+ rt_info->saddr = iph->saddr;
+ rt_info->mark = skb->mark;
+ }
+}
+
+static void nf_ip6_saveroute(const struct sk_buff *skb,
+ struct nf_queue_entry *entry)
+{
+ struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ rt_info->daddr = iph->daddr;
+ rt_info->saddr = iph->saddr;
+ rt_info->mark = skb->mark;
+ }
+}
+
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
const struct nf_hook_entries *entries,
unsigned int index, unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
- const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
struct net *net = state->net;
+ unsigned int route_key_size;
/* QUEUE == DROP if no one is waiting, to be safe. */
qh = rcu_dereference(net->nf.queue_handler);
@@ -128,11 +156,19 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
goto err;
}
- afinfo = nf_get_afinfo(state->pf);
- if (!afinfo)
- goto err;
+ switch (state->pf) {
+ case AF_INET:
+ route_key_size = sizeof(struct ip_rt_info);
+ break;
+ case AF_INET6:
+ route_key_size = sizeof(struct ip6_rt_info);
+ break;
+ default:
+ route_key_size = 0;
+ break;
+ }
- entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
+ entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
if (!entry) {
status = -ENOMEM;
goto err;
@@ -142,12 +178,21 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
.skb = skb,
.state = *state,
.hook_index = index,
- .size = sizeof(*entry) + afinfo->route_key_size,
+ .size = sizeof(*entry) + route_key_size,
};
nf_queue_entry_get_refs(entry);
skb_dst_force(skb);
- afinfo->saveroute(skb, entry);
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ nf_ip_saveroute(skb, entry);
+ break;
+ case AF_INET6:
+ nf_ip6_saveroute(skb, entry);
+ break;
+ }
+
status = qh->outfn(entry, queuenum);
if (status < 0) {
@@ -204,13 +249,31 @@ repeat:
return NF_ACCEPT;
}
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+ switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+ case NFPROTO_IPV4:
+ return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+ case NFPROTO_IPV6:
+ return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return NULL;
+}
+
/* Caller must hold rcu read-side lock */
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_hook_entry *hook_entry;
const struct nf_hook_entries *hooks;
struct sk_buff *skb = entry->skb;
- const struct nf_afinfo *afinfo;
const struct net *net;
unsigned int i;
int err;
@@ -219,12 +282,12 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
net = entry->state.net;
pf = entry->state.pf;
- hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]);
+ hooks = nf_hook_entries_head(net, pf, entry->state.hook);
nf_queue_entry_release_refs(entry);
i = entry->hook_index;
- if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) {
+ if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
kfree_skb(skb);
kfree(entry);
return;
@@ -237,8 +300,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
if (verdict == NF_ACCEPT) {
- afinfo = nf_get_afinfo(entry->state.pf);
- if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
+ if (nf_reroute(skb, entry) < 0)
verdict = NF_DROP;
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 49bd8bb16b18..92139a087260 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -317,7 +317,6 @@ static int synproxy_cpu_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations synproxy_cpu_seq_fops = {
- .owner = THIS_MODULE,
.open = synproxy_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 10798b357481..0791813a1e7d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -17,6 +17,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/net_namespace.h>
@@ -24,86 +25,20 @@
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
-
-/**
- * nft_register_afinfo - register nf_tables address family info
- *
- * @afi: address family info to register
- *
- * Register the address family for use with nf_tables. Returns zero on
- * success or a negative errno code otherwise.
- */
-int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
-{
- INIT_LIST_HEAD(&afi->tables);
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail_rcu(&afi->list, &net->nft.af_info);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- return 0;
-}
-EXPORT_SYMBOL_GPL(nft_register_afinfo);
-
-static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi);
-
-/**
- * nft_unregister_afinfo - unregister nf_tables address family info
- *
- * @afi: address family info to unregister
- *
- * Unregister the address family for use with nf_tables.
- */
-void nft_unregister_afinfo(struct net *net, struct nft_af_info *afi)
-{
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
- __nft_release_afinfo(net, afi);
- list_del_rcu(&afi->list);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-}
-EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
-
-static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
-{
- struct nft_af_info *afi;
-
- list_for_each_entry(afi, &net->nft.af_info, list) {
- if (afi->family == family)
- return afi;
- }
- return NULL;
-}
-
-static struct nft_af_info *
-nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
-{
- struct nft_af_info *afi;
-
- afi = nft_afinfo_lookup(net, family);
- if (afi != NULL)
- return afi;
-#ifdef CONFIG_MODULES
- if (autoload) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-afinfo-%u", family);
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
- afi = nft_afinfo_lookup(net, family);
- if (afi != NULL)
- return ERR_PTR(-EAGAIN);
- }
-#endif
- return ERR_PTR(-EAFNOSUPPORT);
-}
+static LIST_HEAD(nf_tables_flowtables);
+static u64 table_handle;
static void nft_ctx_init(struct nft_ctx *ctx,
struct net *net,
const struct sk_buff *skb,
const struct nlmsghdr *nlh,
- struct nft_af_info *afi,
+ u8 family,
struct nft_table *table,
struct nft_chain *chain,
const struct nlattr * const *nla)
{
ctx->net = net;
- ctx->afi = afi;
+ ctx->family = family;
ctx->table = table;
ctx->chain = chain;
ctx->nla = nla;
@@ -139,29 +74,26 @@ static void nft_trans_destroy(struct nft_trans *trans)
kfree(trans);
}
-static int nf_tables_register_hooks(struct net *net,
- const struct nft_table *table,
- struct nft_chain *chain,
- unsigned int hook_nops)
+static int nf_tables_register_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain)
{
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
return 0;
- return nf_register_net_hooks(net, nft_base_chain(chain)->ops,
- hook_nops);
+ return nf_register_net_hook(net, &nft_base_chain(chain)->ops);
}
-static void nf_tables_unregister_hooks(struct net *net,
- const struct nft_table *table,
- struct nft_chain *chain,
- unsigned int hook_nops)
+static void nf_tables_unregister_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain)
{
if (table->flags & NFT_TABLE_F_DORMANT ||
!nft_is_base_chain(chain))
return;
- nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops);
+ nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -348,34 +280,99 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
return err;
}
+static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ struct nft_flowtable *flowtable)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type,
+ sizeof(struct nft_trans_flowtable));
+ if (trans == NULL)
+ return -ENOMEM;
+
+ if (msg_type == NFT_MSG_NEWFLOWTABLE)
+ nft_activate_next(ctx->net, flowtable);
+
+ nft_trans_flowtable(trans) = flowtable;
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+ return 0;
+}
+
+static int nft_delflowtable(struct nft_ctx *ctx,
+ struct nft_flowtable *flowtable)
+{
+ int err;
+
+ err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+ if (err < 0)
+ return err;
+
+ nft_deactivate_next(ctx->net, flowtable);
+ ctx->table->use--;
+
+ return err;
+}
+
/*
* Tables
*/
-static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+static struct nft_table *nft_table_lookup(const struct net *net,
const struct nlattr *nla,
- u8 genmask)
+ u8 family, u8 genmask)
{
struct nft_table *table;
- list_for_each_entry(table, &afi->tables, list) {
+ list_for_each_entry(table, &net->nft.tables, list) {
if (!nla_strcmp(nla, table->name) &&
+ table->family == family &&
nft_active_genmask(table, genmask))
return table;
}
return NULL;
}
-static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ const struct nlattr *nla,
+ u8 genmask)
+{
+ struct nft_table *table;
+
+ list_for_each_entry(table, &net->nft.tables, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
+ nft_active_genmask(table, genmask))
+ return table;
+ }
+ return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct net *net,
const struct nlattr *nla,
- u8 genmask)
+ u8 family, u8 genmask)
+{
+ struct nft_table *table;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ table = nft_table_lookup(net, nla, family, genmask);
+ if (table != NULL)
+ return table;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct nft_table *nf_tables_table_lookup_byhandle(const struct net *net,
+ const struct nlattr *nla,
+ u8 genmask)
{
struct nft_table *table;
if (nla == NULL)
return ERR_PTR(-EINVAL);
- table = nft_table_lookup(afi, nla, genmask);
+ table = nft_table_lookup_byhandle(net, nla, genmask);
if (table != NULL)
return table;
@@ -390,7 +387,7 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
static const struct nf_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
static const struct nf_chain_type *
-__nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
{
int i;
@@ -403,22 +400,20 @@ __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
}
static const struct nf_chain_type *
-nf_tables_chain_type_lookup(const struct nft_af_info *afi,
- const struct nlattr *nla,
- bool autoload)
+nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload)
{
const struct nf_chain_type *type;
- type = __nf_tables_chain_type_lookup(afi->family, nla);
+ type = __nf_tables_chain_type_lookup(nla, family);
if (type != NULL)
return type;
#ifdef CONFIG_MODULES
if (autoload) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-chain-%u-%.*s", afi->family,
+ request_module("nft-chain-%u-%.*s", family,
nla_len(nla), (const char *)nla_data(nla));
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- type = __nf_tables_chain_type_lookup(afi->family, nla);
+ type = __nf_tables_chain_type_lookup(nla, family);
if (type != NULL)
return ERR_PTR(-EAGAIN);
}
@@ -430,6 +425,7 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
+ [NFTA_TABLE_HANDLE] = { .type = NLA_U64 },
};
static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -451,7 +447,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
- nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
+ nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
+ nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
+ NFTA_TABLE_PAD))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -476,7 +474,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
goto err;
err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->afi->family, ctx->table);
+ event, 0, ctx->family, ctx->table);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -493,7 +491,6 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- const struct nft_af_info *afi;
const struct nft_table *table;
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
@@ -502,30 +499,27 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
rcu_read_lock();
cb->seq = net->nft.base_seq;
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (family != NFPROTO_UNSPEC && family != afi->family)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
continue;
- list_for_each_entry_rcu(table, &afi->tables, list) {
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- memset(&cb->args[1], 0,
- sizeof(cb->args) - sizeof(cb->args[0]));
- if (!nft_is_active(net, table))
- continue;
- if (nf_tables_fill_table_info(skb, net,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWTABLE,
- NLM_F_MULTI,
- afi->family, table) < 0)
- goto done;
-
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (!nft_is_active(net, table))
+ continue;
+ if (nf_tables_fill_table_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWTABLE, NLM_F_MULTI,
+ table->family, table) < 0)
+ goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
- idx++;
- }
+ idx++;
}
done:
rcu_read_unlock();
@@ -540,7 +534,6 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_cur(net);
- const struct nft_af_info *afi;
const struct nft_table *table;
struct sk_buff *skb2;
int family = nfmsg->nfgen_family;
@@ -553,11 +546,8 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
return netlink_dump_start(nlsk, skb, nlh, &c);
}
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -578,10 +568,7 @@ err:
return err;
}
-static void _nf_tables_table_disable(struct net *net,
- const struct nft_af_info *afi,
- struct nft_table *table,
- u32 cnt)
+static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
{
struct nft_chain *chain;
u32 i = 0;
@@ -595,14 +582,11 @@ static void _nf_tables_table_disable(struct net *net,
if (cnt && i++ == cnt)
break;
- nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
- afi->nops);
+ nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
}
}
-static int nf_tables_table_enable(struct net *net,
- const struct nft_af_info *afi,
- struct nft_table *table)
+static int nf_tables_table_enable(struct net *net, struct nft_table *table)
{
struct nft_chain *chain;
int err, i = 0;
@@ -613,8 +597,7 @@ static int nf_tables_table_enable(struct net *net,
if (!nft_is_base_chain(chain))
continue;
- err = nf_register_net_hooks(net, nft_base_chain(chain)->ops,
- afi->nops);
+ err = nf_register_net_hook(net, &nft_base_chain(chain)->ops);
if (err < 0)
goto err;
@@ -623,15 +606,13 @@ static int nf_tables_table_enable(struct net *net,
return 0;
err:
if (i)
- _nf_tables_table_disable(net, afi, table, i);
+ nft_table_disable(net, table, i);
return err;
}
-static void nf_tables_table_disable(struct net *net,
- const struct nft_af_info *afi,
- struct nft_table *table)
+static void nf_tables_table_disable(struct net *net, struct nft_table *table)
{
- _nf_tables_table_disable(net, afi, table, 0);
+ nft_table_disable(net, table, 0);
}
static int nf_tables_updtable(struct nft_ctx *ctx)
@@ -660,7 +641,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
nft_trans_table_enable(trans) = false;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(ctx->net, ctx->afi, ctx->table);
+ ret = nf_tables_table_enable(ctx->net, ctx->table);
if (ret >= 0) {
ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
nft_trans_table_enable(trans) = true;
@@ -685,19 +666,14 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
const struct nlattr *name;
- struct nft_af_info *afi;
struct nft_table *table;
int family = nfmsg->nfgen_family;
u32 flags = 0;
struct nft_ctx ctx;
int err;
- afi = nf_tables_afinfo_lookup(net, family, true);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
name = nla[NFTA_TABLE_NAME];
- table = nf_tables_table_lookup(afi, name, genmask);
+ table = nf_tables_table_lookup(net, name, family, genmask);
if (IS_ERR(table)) {
if (PTR_ERR(table) != -ENOENT)
return PTR_ERR(table);
@@ -707,7 +683,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
return nf_tables_updtable(&ctx);
}
@@ -717,47 +693,45 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
return -EINVAL;
}
- err = -EAFNOSUPPORT;
- if (!try_module_get(afi->owner))
- goto err1;
-
err = -ENOMEM;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (table == NULL)
- goto err2;
+ goto err_kzalloc;
table->name = nla_strdup(name, GFP_KERNEL);
if (table->name == NULL)
- goto err3;
+ goto err_strdup;
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
INIT_LIST_HEAD(&table->objects);
+ INIT_LIST_HEAD(&table->flowtables);
+ table->family = family;
table->flags = flags;
+ table->handle = ++table_handle;
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
if (err < 0)
- goto err4;
+ goto err_trans;
- list_add_tail_rcu(&table->list, &afi->tables);
+ list_add_tail_rcu(&table->list, &net->nft.tables);
return 0;
-err4:
+err_trans:
kfree(table->name);
-err3:
+err_strdup:
kfree(table);
-err2:
- module_put(afi->owner);
-err1:
+err_kzalloc:
return err;
}
static int nft_flush_table(struct nft_ctx *ctx)
{
- int err;
+ struct nft_flowtable *flowtable, *nft;
struct nft_chain *chain, *nc;
struct nft_object *obj, *ne;
struct nft_set *set, *ns;
+ int err;
list_for_each_entry(chain, &ctx->table->chains, list) {
if (!nft_is_active_next(ctx->net, chain))
@@ -774,7 +748,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
if (!nft_is_active_next(ctx->net, set))
continue;
- if (set->flags & NFT_SET_ANONYMOUS &&
+ if (nft_set_is_anonymous(set) &&
!list_empty(&set->bindings))
continue;
@@ -783,6 +757,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
goto out;
}
+ list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+ err = nft_delflowtable(ctx, flowtable);
+ if (err < 0)
+ goto out;
+ }
+
list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
err = nft_delobj(ctx, obj);
if (err < 0)
@@ -807,30 +787,28 @@ out:
static int nft_flush(struct nft_ctx *ctx, int family)
{
- struct nft_af_info *afi;
struct nft_table *table, *nt;
const struct nlattr * const *nla = ctx->nla;
int err = 0;
- list_for_each_entry(afi, &ctx->net->nft.af_info, list) {
- if (family != AF_UNSPEC && afi->family != family)
+ list_for_each_entry_safe(table, nt, &ctx->net->nft.tables, list) {
+ if (family != AF_UNSPEC && table->family != family)
continue;
- ctx->afi = afi;
- list_for_each_entry_safe(table, nt, &afi->tables, list) {
- if (!nft_is_active_next(ctx->net, table))
- continue;
+ ctx->family = table->family;
- if (nla[NFTA_TABLE_NAME] &&
- nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
- continue;
+ if (!nft_is_active_next(ctx->net, table))
+ continue;
- ctx->table = table;
+ if (nla[NFTA_TABLE_NAME] &&
+ nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
+ continue;
- err = nft_flush_table(ctx);
- if (err < 0)
- goto out;
- }
+ ctx->table = table;
+
+ err = nft_flush_table(ctx);
+ if (err < 0)
+ goto out;
}
out:
return err;
@@ -843,20 +821,23 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
- struct nft_af_info *afi;
struct nft_table *table;
int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
- nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla);
- if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
+ nft_ctx_init(&ctx, net, skb, nlh, 0, NULL, NULL, nla);
+ if (family == AF_UNSPEC ||
+ (!nla[NFTA_TABLE_NAME] && !nla[NFTA_TABLE_HANDLE]))
return nft_flush(&ctx, family);
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
+ if (nla[NFTA_TABLE_HANDLE])
+ table = nf_tables_table_lookup_byhandle(net,
+ nla[NFTA_TABLE_HANDLE],
+ genmask);
+ else
+ table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME],
+ family, genmask);
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -864,7 +845,7 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
table->use > 0)
return -EBUSY;
- ctx.afi = afi;
+ ctx.family = family;
ctx.table = table;
return nft_flush_table(&ctx);
@@ -876,7 +857,6 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
kfree(ctx->table->name);
kfree(ctx->table);
- module_put(ctx->afi->owner);
}
int nft_register_chain_type(const struct nf_chain_type *ctype)
@@ -1026,7 +1006,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nft_is_base_chain(chain)) {
const struct nft_base_chain *basechain = nft_base_chain(chain);
- const struct nf_hook_ops *ops = &basechain->ops[0];
+ const struct nf_hook_ops *ops = &basechain->ops;
struct nlattr *nest;
nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
@@ -1077,7 +1057,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
goto err;
err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->afi->family, ctx->table,
+ event, 0, ctx->family, ctx->table,
ctx->chain);
if (err < 0) {
kfree_skb(skb);
@@ -1095,7 +1075,6 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- const struct nft_af_info *afi;
const struct nft_table *table;
const struct nft_chain *chain;
unsigned int idx = 0, s_idx = cb->args[0];
@@ -1105,31 +1084,30 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
rcu_read_lock();
cb->seq = net->nft.base_seq;
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (family != NFPROTO_UNSPEC && family != afi->family)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
continue;
- list_for_each_entry_rcu(table, &afi->tables, list) {
- list_for_each_entry_rcu(chain, &table->chains, list) {
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- memset(&cb->args[1], 0,
- sizeof(cb->args) - sizeof(cb->args[0]));
- if (!nft_is_active(net, chain))
- continue;
- if (nf_tables_fill_chain_info(skb, net,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWCHAIN,
- NLM_F_MULTI,
- afi->family, table, chain) < 0)
- goto done;
+ list_for_each_entry_rcu(chain, &table->chains, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (!nft_is_active(net, chain))
+ continue;
+ if (nf_tables_fill_chain_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWCHAIN,
+ NLM_F_MULTI,
+ table->family, table,
+ chain) < 0)
+ goto done;
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
- idx++;
- }
+ idx++;
}
}
done:
@@ -1145,7 +1123,6 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_cur(net);
- const struct nft_af_info *afi;
const struct nft_table *table;
const struct nft_chain *chain;
struct sk_buff *skb2;
@@ -1159,11 +1136,8 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
return netlink_dump_start(nlsk, skb, nlh, &c);
}
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -1227,13 +1201,13 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
static void nft_chain_stats_replace(struct nft_base_chain *chain,
struct nft_stats __percpu *newstats)
{
+ struct nft_stats __percpu *oldstats;
+
if (newstats == NULL)
return;
if (chain->stats) {
- struct nft_stats __percpu *oldstats =
- nft_dereference(chain->stats);
-
+ oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
rcu_assign_pointer(chain->stats, newstats);
synchronize_rcu();
free_percpu(oldstats);
@@ -1252,8 +1226,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
free_percpu(basechain->stats);
if (basechain->stats)
static_branch_dec(&nft_counters_enabled);
- if (basechain->ops[0].dev != NULL)
- dev_put(basechain->ops[0].dev);
+ if (basechain->ops.dev != NULL)
+ dev_put(basechain->ops.dev);
kfree(chain->name);
kfree(basechain);
} else {
@@ -1264,15 +1238,15 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
struct nft_chain_hook {
u32 num;
- u32 priority;
+ s32 priority;
const struct nf_chain_type *type;
struct net_device *dev;
};
static int nft_chain_parse_hook(struct net *net,
const struct nlattr * const nla[],
- struct nft_af_info *afi,
- struct nft_chain_hook *hook, bool create)
+ struct nft_chain_hook *hook, u8 family,
+ bool create)
{
struct nlattr *ha[NFTA_HOOK_MAX + 1];
const struct nf_chain_type *type;
@@ -1289,27 +1263,29 @@ static int nft_chain_parse_hook(struct net *net,
return -EINVAL;
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
- if (hook->num >= afi->nhooks)
- return -EINVAL;
-
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- type = chain_type[afi->family][NFT_CHAIN_T_DEFAULT];
+ type = chain_type[family][NFT_CHAIN_T_DEFAULT];
if (nla[NFTA_CHAIN_TYPE]) {
- type = nf_tables_chain_type_lookup(afi, nla[NFTA_CHAIN_TYPE],
- create);
+ type = nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE],
+ family, create);
if (IS_ERR(type))
return PTR_ERR(type);
}
if (!(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
+
+ if (type->type == NFT_CHAIN_T_NAT &&
+ hook->priority <= NF_IP_PRI_CONNTRACK)
+ return -EOPNOTSUPP;
+
if (!try_module_get(type->owner))
return -ENOENT;
hook->type = type;
hook->dev = NULL;
- if (afi->flags & NFT_AF_NEEDS_DEV) {
+ if (family == NFPROTO_NETDEV) {
char ifname[IFNAMSIZ];
if (!ha[NFTA_HOOK_DEV]) {
@@ -1344,12 +1320,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
- struct nft_af_info *afi = ctx->afi;
struct nft_base_chain *basechain;
struct nft_stats __percpu *stats;
struct net *net = ctx->net;
struct nft_chain *chain;
- unsigned int i;
int err;
if (table->use == UINT_MAX)
@@ -1358,9 +1332,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (nla[NFTA_CHAIN_HOOK]) {
struct nft_chain_hook hook;
struct nf_hook_ops *ops;
- nf_hookfn *hookfn;
- err = nft_chain_parse_hook(net, nla, afi, &hook, create);
+ err = nft_chain_parse_hook(net, nla, &hook, family, create);
if (err < 0)
return err;
@@ -1384,23 +1357,19 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
static_branch_inc(&nft_counters_enabled);
}
- hookfn = hook.type->hooks[hook.num];
basechain->type = hook.type;
chain = &basechain->chain;
- for (i = 0; i < afi->nops; i++) {
- ops = &basechain->ops[i];
- ops->pf = family;
- ops->hooknum = hook.num;
- ops->priority = hook.priority;
- ops->priv = chain;
- ops->hook = afi->hooks[ops->hooknum];
- ops->dev = hook.dev;
- if (hookfn)
- ops->hook = hookfn;
- if (afi->hook_ops_init)
- afi->hook_ops_init(ops, i);
- }
+ ops = &basechain->ops;
+ ops->pf = family;
+ ops->hooknum = hook.num;
+ ops->priority = hook.priority;
+ ops->priv = chain;
+ ops->hook = hook.type->hooks[ops->hooknum];
+ ops->dev = hook.dev;
+
+ if (basechain->type->type == NFT_CHAIN_T_NAT)
+ ops->nat_hook = true;
chain->flags |= NFT_BASE_CHAIN;
basechain->policy = policy;
@@ -1418,7 +1387,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
goto err1;
}
- err = nf_tables_register_hooks(net, table, chain, afi->nops);
+ err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err1;
@@ -1432,7 +1401,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
return 0;
err2:
- nf_tables_unregister_hooks(net, table, chain, afi->nops);
+ nf_tables_unregister_hook(net, table, chain);
err1:
nf_tables_chain_destroy(chain);
@@ -1445,20 +1414,19 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
struct nft_chain *chain = ctx->chain;
- struct nft_af_info *afi = ctx->afi;
struct nft_base_chain *basechain;
struct nft_stats *stats = NULL;
struct nft_chain_hook hook;
const struct nlattr *name;
struct nf_hook_ops *ops;
struct nft_trans *trans;
- int err, i;
+ int err;
if (nla[NFTA_CHAIN_HOOK]) {
if (!nft_is_base_chain(chain))
return -EBUSY;
- err = nft_chain_parse_hook(ctx->net, nla, ctx->afi, &hook,
+ err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
create);
if (err < 0)
return err;
@@ -1469,14 +1437,12 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EBUSY;
}
- for (i = 0; i < afi->nops; i++) {
- ops = &basechain->ops[i];
- if (ops->hooknum != hook.num ||
- ops->priority != hook.priority ||
- ops->dev != hook.dev) {
- nft_chain_release_hook(&hook);
- return -EBUSY;
- }
+ ops = &basechain->ops;
+ if (ops->hooknum != hook.num ||
+ ops->priority != hook.priority ||
+ ops->dev != hook.dev) {
+ nft_chain_release_hook(&hook);
+ return -EBUSY;
}
nft_chain_release_hook(&hook);
}
@@ -1539,7 +1505,6 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
const struct nlattr * uninitialized_var(name);
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain;
u8 policy = NF_ACCEPT;
@@ -1549,11 +1514,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
- afi = nf_tables_afinfo_lookup(net, family, true);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -1593,7 +1555,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
}
}
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
if (chain != NULL) {
if (nlh->nlmsg_flags & NLM_F_EXCL)
@@ -1614,24 +1576,26 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain;
struct nft_rule *rule;
int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
+ u64 handle;
u32 use;
int err;
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+ if (nla[NFTA_CHAIN_HANDLE]) {
+ handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+ chain = nf_tables_chain_lookup_byhandle(table, handle, genmask);
+ } else {
+ chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+ }
if (IS_ERR(chain))
return PTR_ERR(chain);
@@ -1639,7 +1603,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
chain->use > 0)
return -EBUSY;
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
use = chain->use;
list_for_each_entry(rule, &chain->rules, list) {
@@ -1804,7 +1768,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
if (err < 0)
return err;
- type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
+ type = nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
if (IS_ERR(type))
return PTR_ERR(type);
@@ -2027,7 +1991,7 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
goto err;
err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
- event, 0, ctx->afi->family, ctx->table,
+ event, 0, ctx->family, ctx->table,
ctx->chain, rule);
if (err < 0) {
kfree_skb(skb);
@@ -2051,7 +2015,6 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nft_rule_dump_ctx *ctx = cb->data;
- const struct nft_af_info *afi;
const struct nft_table *table;
const struct nft_chain *chain;
const struct nft_rule *rule;
@@ -2062,39 +2025,37 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
rcu_read_lock();
cb->seq = net->nft.base_seq;
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (family != NFPROTO_UNSPEC && family != afi->family)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+ continue;
+
+ if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- list_for_each_entry_rcu(table, &afi->tables, list) {
- if (ctx && ctx->table &&
- strcmp(ctx->table, table->name) != 0)
+ list_for_each_entry_rcu(chain, &table->chains, list) {
+ if (ctx && ctx->chain &&
+ strcmp(ctx->chain, chain->name) != 0)
continue;
- list_for_each_entry_rcu(chain, &table->chains, list) {
- if (ctx && ctx->chain[0] &&
- strcmp(ctx->chain, chain->name) != 0)
- continue;
-
- list_for_each_entry_rcu(rule, &chain->rules, list) {
- if (!nft_is_active(net, rule))
- goto cont;
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- memset(&cb->args[1], 0,
- sizeof(cb->args) - sizeof(cb->args[0]));
- if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWRULE,
- NLM_F_MULTI | NLM_F_APPEND,
- afi->family, table, chain, rule) < 0)
- goto done;
-
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ list_for_each_entry_rcu(rule, &chain->rules, list) {
+ if (!nft_is_active(net, rule))
+ goto cont;
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWRULE,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family,
+ table, chain, rule) < 0)
+ goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
- idx++;
- }
+ idx++;
}
}
}
@@ -2124,7 +2085,6 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_cur(net);
- const struct nft_af_info *afi;
const struct nft_table *table;
const struct nft_chain *chain;
const struct nft_rule *rule;
@@ -2168,11 +2128,8 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
return netlink_dump_start(nlsk, skb, nlh, &c);
}
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -2229,7 +2186,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
- struct nft_af_info *afi;
+ int family = nfmsg->nfgen_family;
struct nft_table *table;
struct nft_chain *chain;
struct nft_rule *rule, *old_rule = NULL;
@@ -2245,11 +2202,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
- afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -2288,7 +2242,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
return PTR_ERR(old_rule);
}
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
n = 0;
size = 0;
@@ -2412,18 +2366,14 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain = NULL;
struct nft_rule *rule;
int family = nfmsg->nfgen_family, err = 0;
struct nft_ctx ctx;
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -2434,7 +2384,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
return PTR_ERR(chain);
}
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
if (chain) {
if (nla[NFTA_RULE_HANDLE]) {
@@ -2601,6 +2551,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
[NFTA_SET_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
+ [NFTA_SET_HANDLE] = { .type = NLA_U64 },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -2614,26 +2565,17 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
u8 genmask)
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- struct nft_af_info *afi = NULL;
+ int family = nfmsg->nfgen_family;
struct nft_table *table = NULL;
- if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
- afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
- }
-
if (nla[NFTA_SET_TABLE] != NULL) {
- if (afi == NULL)
- return -EAFNOSUPPORT;
-
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE],
- genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE],
+ family, genmask);
if (IS_ERR(table))
return PTR_ERR(table);
}
- nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
return 0;
}
@@ -2653,6 +2595,22 @@ static struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
return ERR_PTR(-ENOENT);
}
+static struct nft_set *nf_tables_set_lookup_byhandle(const struct nft_table *table,
+ const struct nlattr *nla, u8 genmask)
+{
+ struct nft_set *set;
+
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
+ list_for_each_entry(set, &table->sets, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == set->handle &&
+ nft_active_genmask(set, genmask))
+ return set;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
static struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
const struct nlattr *nla,
u8 genmask)
@@ -2760,7 +2718,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = ctx->afi->family;
+ nfmsg->nfgen_family = ctx->family;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
@@ -2768,6 +2726,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_NAME, set->name))
goto nla_put_failure;
+ if (nla_put_be64(skb, NFTA_SET_HANDLE, cpu_to_be64(set->handle),
+ NFTA_SET_PAD))
+ goto nla_put_failure;
if (set->flags != 0)
if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
goto nla_put_failure;
@@ -2852,10 +2813,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nft_set *set;
unsigned int idx, s_idx = cb->args[0];
- struct nft_af_info *afi;
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
struct net *net = sock_net(skb->sk);
- int cur_family = cb->args[3];
struct nft_ctx *ctx = cb->data, ctx_set;
if (cb->args[1])
@@ -2864,51 +2823,44 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->nft.base_seq;
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (ctx->afi && ctx->afi != afi)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (ctx->family != NFPROTO_UNSPEC &&
+ ctx->family != table->family)
continue;
- if (cur_family) {
- if (afi->family != cur_family)
+ if (ctx->table && ctx->table != table)
+ continue;
+
+ if (cur_table) {
+ if (cur_table != table)
continue;
- cur_family = 0;
+ cur_table = NULL;
}
- list_for_each_entry_rcu(table, &afi->tables, list) {
- if (ctx->table && ctx->table != table)
- continue;
+ idx = 0;
+ list_for_each_entry_rcu(set, &table->sets, list) {
+ if (idx < s_idx)
+ goto cont;
+ if (!nft_is_active(net, set))
+ goto cont;
- if (cur_table) {
- if (cur_table != table)
- continue;
+ ctx_set = *ctx;
+ ctx_set.table = table;
+ ctx_set.family = table->family;
- cur_table = NULL;
+ if (nf_tables_fill_set(skb, &ctx_set, set,
+ NFT_MSG_NEWSET,
+ NLM_F_MULTI) < 0) {
+ cb->args[0] = idx;
+ cb->args[2] = (unsigned long) table;
+ goto done;
}
- idx = 0;
- list_for_each_entry_rcu(set, &table->sets, list) {
- if (idx < s_idx)
- goto cont;
- if (!nft_is_active(net, set))
- goto cont;
-
- ctx_set = *ctx;
- ctx_set.table = table;
- ctx_set.afi = afi;
- if (nf_tables_fill_set(skb, &ctx_set, set,
- NFT_MSG_NEWSET,
- NLM_F_MULTI) < 0) {
- cb->args[0] = idx;
- cb->args[2] = (unsigned long) table;
- cb->args[3] = afi->family;
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
- idx++;
- }
- if (s_idx)
- s_idx = 0;
+ idx++;
}
+ if (s_idx)
+ s_idx = 0;
}
cb->args[1] = 1;
done:
@@ -3006,8 +2958,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
+ int family = nfmsg->nfgen_family;
const struct nft_set_ops *ops;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
@@ -3114,15 +3066,12 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
- afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set)) {
@@ -3188,6 +3137,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
set->udata = udata;
set->timeout = timeout;
set->gc_int = gc_int;
+ set->handle = nf_tables_alloc_handle(table);
err = ops->init(set, &desc, nla);
if (err < 0)
@@ -3245,7 +3195,10 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
if (err < 0)
return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
+ if (nla[NFTA_SET_HANDLE])
+ set = nf_tables_set_lookup_byhandle(ctx.table, nla[NFTA_SET_HANDLE], genmask);
+ else
+ set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -3277,7 +3230,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *i;
struct nft_set_iter iter;
- if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+ if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
return -EBUSY;
if (binding->flags & NFT_SET_MAP) {
@@ -3312,7 +3265,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
{
list_del_rcu(&binding->list);
- if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
nft_is_active(ctx->net, set))
nf_tables_set_destroy(ctx, set);
}
@@ -3380,19 +3333,15 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
u8 genmask)
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- struct nft_af_info *afi;
+ int family = nfmsg->nfgen_family;
struct nft_table *table;
- afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE],
- genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE],
+ family, genmask);
if (IS_ERR(table))
return PTR_ERR(table);
- nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
return 0;
}
@@ -3497,7 +3446,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nft_set_dump_ctx *dump_ctx = cb->data;
struct net *net = sock_net(skb->sk);
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_set *set;
struct nft_set_dump_args args;
@@ -3509,21 +3457,19 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
int event;
rcu_read_lock();
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (afi != dump_ctx->ctx.afi)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
+ dump_ctx->ctx.family != table->family)
continue;
- list_for_each_entry_rcu(table, &afi->tables, list) {
- if (table != dump_ctx->ctx.table)
- continue;
+ if (table != dump_ctx->ctx.table)
+ continue;
- list_for_each_entry_rcu(set, &table->sets, list) {
- if (set == dump_ctx->set) {
- set_found = true;
- break;
- }
+ list_for_each_entry_rcu(set, &table->sets, list) {
+ if (set == dump_ctx->set) {
+ set_found = true;
+ break;
}
- break;
}
break;
}
@@ -3543,7 +3489,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
goto nla_put_failure;
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = afi->family;
+ nfmsg->nfgen_family = table->family;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
@@ -3606,7 +3552,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
goto nla_put_failure;
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = ctx->afi->family;
+ nfmsg->nfgen_family = ctx->family;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
@@ -3963,7 +3909,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
.net = ctx->net,
- .afi = ctx->afi,
+ .family = ctx->family,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,
};
@@ -4382,6 +4328,21 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
}
EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
+struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
+ const struct nlattr *nla,
+ u32 objtype, u8 genmask)
+{
+ struct nft_object *obj;
+
+ list_for_each_entry(obj, &table->objects, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == obj->handle &&
+ objtype == obj->ops->type->type &&
+ nft_active_genmask(obj, genmask))
+ return obj;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
[NFTA_OBJ_TABLE] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
@@ -4389,6 +4350,7 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
.len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
+ [NFTA_OBJ_HANDLE] = { .type = NLA_U64},
};
static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
@@ -4494,7 +4456,6 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
const struct nft_object_type *type;
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_object *obj;
struct nft_ctx ctx;
@@ -4506,11 +4467,8 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
!nla[NFTA_OBJ_DATA])
return -EINVAL;
- afi = nf_tables_afinfo_lookup(net, family, true);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -4528,7 +4486,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
return 0;
}
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
type = nft_obj_type_get(objtype);
if (IS_ERR(type))
@@ -4540,6 +4498,8 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
goto err1;
}
obj->table = table;
+ obj->handle = nf_tables_alloc_handle(table);
+
obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
if (!obj->name) {
err = -ENOMEM;
@@ -4586,7 +4546,9 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
nla_put_string(skb, NFTA_OBJ_NAME, obj->name) ||
nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
- nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
+ nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) ||
+ nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
+ NFTA_OBJ_PAD))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -4605,7 +4567,6 @@ struct nft_obj_filter {
static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- const struct nft_af_info *afi;
const struct nft_table *table;
unsigned int idx = 0, s_idx = cb->args[0];
struct nft_obj_filter *filter = cb->data;
@@ -4620,38 +4581,37 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->nft.base_seq;
- list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
- if (family != NFPROTO_UNSPEC && family != afi->family)
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
continue;
- list_for_each_entry_rcu(table, &afi->tables, list) {
- list_for_each_entry_rcu(obj, &table->objects, list) {
- if (!nft_is_active(net, obj))
- goto cont;
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- memset(&cb->args[1], 0,
- sizeof(cb->args) - sizeof(cb->args[0]));
- if (filter && filter->table[0] &&
- strcmp(filter->table, table->name))
- goto cont;
- if (filter &&
- filter->type != NFT_OBJECT_UNSPEC &&
- obj->ops->type->type != filter->type)
- goto cont;
+ list_for_each_entry_rcu(obj, &table->objects, list) {
+ if (!nft_is_active(net, obj))
+ goto cont;
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (filter && filter->table[0] &&
+ strcmp(filter->table, table->name))
+ goto cont;
+ if (filter &&
+ filter->type != NFT_OBJECT_UNSPEC &&
+ obj->ops->type->type != filter->type)
+ goto cont;
- if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWOBJ,
- NLM_F_MULTI | NLM_F_APPEND,
- afi->family, table, obj, reset) < 0)
- goto done;
+ if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWOBJ,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, table,
+ obj, reset) < 0)
+ goto done;
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
- idx++;
- }
+ idx++;
}
}
done:
@@ -4665,8 +4625,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_filter *filter = cb->data;
- kfree(filter->table);
- kfree(filter);
+ if (filter) {
+ kfree(filter->table);
+ kfree(filter);
+ }
return 0;
}
@@ -4701,7 +4663,6 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_cur(net);
int family = nfmsg->nfgen_family;
- const struct nft_af_info *afi;
const struct nft_table *table;
struct nft_object *obj;
struct sk_buff *skb2;
@@ -4732,11 +4693,8 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
!nla[NFTA_OBJ_TYPE])
return -EINVAL;
- afi = nf_tables_afinfo_lookup(net, family, false);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -4782,32 +4740,33 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_object *obj;
struct nft_ctx ctx;
u32 objtype;
if (!nla[NFTA_OBJ_TYPE] ||
- !nla[NFTA_OBJ_NAME])
+ (!nla[NFTA_OBJ_NAME] && !nla[NFTA_OBJ_HANDLE]))
return -EINVAL;
- afi = nf_tables_afinfo_lookup(net, family, true);
- if (IS_ERR(afi))
- return PTR_ERR(afi);
-
- table = nf_tables_table_lookup(afi, nla[NFTA_OBJ_TABLE], genmask);
+ table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family,
+ genmask);
if (IS_ERR(table))
return PTR_ERR(table);
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ if (nla[NFTA_OBJ_HANDLE])
+ obj = nf_tables_obj_lookup_byhandle(table, nla[NFTA_OBJ_HANDLE],
+ objtype, genmask);
+ else
+ obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME],
+ objtype, genmask);
if (IS_ERR(obj))
return PTR_ERR(obj);
if (obj->use > 0)
return -EBUSY;
- nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
return nft_delobj(&ctx, obj);
}
@@ -4845,7 +4804,613 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
{
nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
- ctx->afi->family, ctx->report, GFP_KERNEL);
+ ctx->family, ctx->report, GFP_KERNEL);
+}
+
+/*
+ * Flow tables
+ */
+void nft_register_flowtable_type(struct nf_flowtable_type *type)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_add_tail_rcu(&type->list, &nf_tables_flowtables);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_register_flowtable_type);
+
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type)
+{
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_del_rcu(&type->list);
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_flowtable_type);
+
+static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
+ [NFTA_FLOWTABLE_TABLE] = { .type = NLA_STRING,
+ .len = NFT_NAME_MAXLEN - 1 },
+ [NFTA_FLOWTABLE_NAME] = { .type = NLA_STRING,
+ .len = NFT_NAME_MAXLEN - 1 },
+ [NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED },
+ [NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+ const struct nlattr *nla,
+ u8 genmask)
+{
+ struct nft_flowtable *flowtable;
+
+ list_for_each_entry(flowtable, &table->flowtables, list) {
+ if (!nla_strcmp(nla, flowtable->name) &&
+ nft_active_genmask(flowtable, genmask))
+ return flowtable;
+ }
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
+
+struct nft_flowtable *
+nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
+ const struct nlattr *nla, u8 genmask)
+{
+ struct nft_flowtable *flowtable;
+
+ list_for_each_entry(flowtable, &table->flowtables, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == flowtable->handle &&
+ nft_active_genmask(flowtable, genmask))
+ return flowtable;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+#define NFT_FLOWTABLE_DEVICE_MAX 8
+
+static int nf_tables_parse_devices(const struct nft_ctx *ctx,
+ const struct nlattr *attr,
+ struct net_device *dev_array[], int *len)
+{
+ const struct nlattr *tmp;
+ struct net_device *dev;
+ char ifname[IFNAMSIZ];
+ int rem, n = 0, err;
+
+ nla_for_each_nested(tmp, attr, rem) {
+ if (nla_type(tmp) != NFTA_DEVICE_NAME) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ nla_strlcpy(ifname, tmp, IFNAMSIZ);
+ dev = dev_get_by_name(ctx->net, ifname);
+ if (!dev) {
+ err = -ENOENT;
+ goto err1;
+ }
+
+ dev_array[n++] = dev;
+ if (n == NFT_FLOWTABLE_DEVICE_MAX) {
+ err = -EFBIG;
+ goto err1;
+ }
+ }
+ if (!len)
+ return -EINVAL;
+
+ err = 0;
+err1:
+ *len = n;
+ return err;
+}
+
+static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
+ [NFTA_FLOWTABLE_HOOK_NUM] = { .type = NLA_U32 },
+ [NFTA_FLOWTABLE_HOOK_PRIORITY] = { .type = NLA_U32 },
+ [NFTA_FLOWTABLE_HOOK_DEVS] = { .type = NLA_NESTED },
+};
+
+static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
+ const struct nlattr *attr,
+ struct nft_flowtable *flowtable)
+{
+ struct net_device *dev_array[NFT_FLOWTABLE_DEVICE_MAX];
+ struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
+ struct nf_hook_ops *ops;
+ int hooknum, priority;
+ int err, n = 0, i;
+
+ err = nla_parse_nested(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
+ nft_flowtable_hook_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
+ !tb[NFTA_FLOWTABLE_HOOK_PRIORITY] ||
+ !tb[NFTA_FLOWTABLE_HOOK_DEVS])
+ return -EINVAL;
+
+ hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
+ if (hooknum != NF_NETDEV_INGRESS)
+ return -EINVAL;
+
+ priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
+
+ err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
+ dev_array, &n);
+ if (err < 0)
+ goto err1;
+
+ ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
+ if (!ops) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ flowtable->hooknum = hooknum;
+ flowtable->priority = priority;
+ flowtable->ops = ops;
+ flowtable->ops_len = n;
+
+ for (i = 0; i < n; i++) {
+ flowtable->ops[i].pf = NFPROTO_NETDEV;
+ flowtable->ops[i].hooknum = hooknum;
+ flowtable->ops[i].priority = priority;
+ flowtable->ops[i].priv = &flowtable->data.rhashtable;
+ flowtable->ops[i].hook = flowtable->data.type->hook;
+ flowtable->ops[i].dev = dev_array[i];
+ }
+
+ err = 0;
+err1:
+ for (i = 0; i < n; i++)
+ dev_put(dev_array[i]);
+
+ return err;
+}
+
+static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
+{
+ const struct nf_flowtable_type *type;
+
+ list_for_each_entry(type, &nf_tables_flowtables, list) {
+ if (family == type->family)
+ return type;
+ }
+ return NULL;
+}
+
+static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family)
+{
+ const struct nf_flowtable_type *type;
+
+ type = __nft_flowtable_type_get(family);
+ if (type != NULL && try_module_get(type->owner))
+ return type;
+
+#ifdef CONFIG_MODULES
+ if (type == NULL) {
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nf-flowtable-%u", family);
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (__nft_flowtable_type_get(family))
+ return ERR_PTR(-EAGAIN);
+ }
+#endif
+ return ERR_PTR(-ENOENT);
+}
+
+void nft_flow_table_iterate(struct net *net,
+ void (*iter)(struct nf_flowtable *flowtable, void *data),
+ void *data)
+{
+ struct nft_flowtable *flowtable;
+ const struct nft_table *table;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+ iter(&flowtable->data, data);
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
+
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable)
+{
+ int i;
+
+ for (i = 0; i < flowtable->ops_len; i++) {
+ if (!flowtable->ops[i].dev)
+ continue;
+
+ nf_unregister_net_hook(net, &flowtable->ops[i]);
+ }
+}
+
+static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[],
+ struct netlink_ext_ack *extack)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ const struct nf_flowtable_type *type;
+ u8 genmask = nft_genmask_next(net);
+ int family = nfmsg->nfgen_family;
+ struct nft_flowtable *flowtable;
+ struct nft_table *table;
+ struct nft_ctx ctx;
+ int err, i, k;
+
+ if (!nla[NFTA_FLOWTABLE_TABLE] ||
+ !nla[NFTA_FLOWTABLE_NAME] ||
+ !nla[NFTA_FLOWTABLE_HOOK])
+ return -EINVAL;
+
+ table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+ family, genmask);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+ genmask);
+ if (IS_ERR(flowtable)) {
+ err = PTR_ERR(flowtable);
+ if (err != -ENOENT)
+ return err;
+ } else {
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
+ flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
+ if (!flowtable)
+ return -ENOMEM;
+
+ flowtable->table = table;
+ flowtable->handle = nf_tables_alloc_handle(table);
+
+ flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL);
+ if (!flowtable->name) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ type = nft_flowtable_type_get(family);
+ if (IS_ERR(type)) {
+ err = PTR_ERR(type);
+ goto err2;
+ }
+
+ flowtable->data.type = type;
+ err = rhashtable_init(&flowtable->data.rhashtable, type->params);
+ if (err < 0)
+ goto err3;
+
+ err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+ flowtable);
+ if (err < 0)
+ goto err3;
+
+ for (i = 0; i < flowtable->ops_len; i++) {
+ err = nf_register_net_hook(net, &flowtable->ops[i]);
+ if (err < 0)
+ goto err4;
+ }
+
+ err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+ if (err < 0)
+ goto err5;
+
+ INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc);
+ queue_delayed_work(system_power_efficient_wq,
+ &flowtable->data.gc_work, HZ);
+
+ list_add_tail_rcu(&flowtable->list, &table->flowtables);
+ table->use++;
+
+ return 0;
+err5:
+ i = flowtable->ops_len;
+err4:
+ for (k = i - 1; k >= 0; k--)
+ nf_unregister_net_hook(net, &flowtable->ops[i]);
+
+ kfree(flowtable->ops);
+err3:
+ module_put(type->owner);
+err2:
+ kfree(flowtable->name);
+err1:
+ kfree(flowtable);
+ return err;
+}
+
+static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[],
+ struct netlink_ext_ack *extack)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ u8 genmask = nft_genmask_next(net);
+ int family = nfmsg->nfgen_family;
+ struct nft_flowtable *flowtable;
+ struct nft_table *table;
+ struct nft_ctx ctx;
+
+ table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+ family, genmask);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ if (nla[NFTA_FLOWTABLE_HANDLE])
+ flowtable = nf_tables_flowtable_lookup_byhandle(table,
+ nla[NFTA_FLOWTABLE_HANDLE],
+ genmask);
+ else
+ flowtable = nf_tables_flowtable_lookup(table,
+ nla[NFTA_FLOWTABLE_NAME],
+ genmask);
+ if (IS_ERR(flowtable))
+ return PTR_ERR(flowtable);
+ if (flowtable->use > 0)
+ return -EBUSY;
+
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
+ return nft_delflowtable(&ctx, flowtable);
+}
+
+static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq, int event,
+ u32 flags, int family,
+ struct nft_flowtable *flowtable)
+{
+ struct nlattr *nest, *nest_devs;
+ struct nfgenmsg *nfmsg;
+ struct nlmsghdr *nlh;
+ int i;
+
+ event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
+
+ if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
+ nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
+ nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
+ NFTA_FLOWTABLE_PAD))
+ goto nla_put_failure;
+
+ nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK);
+ if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
+ goto nla_put_failure;
+
+ nest_devs = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK_DEVS);
+ if (!nest_devs)
+ goto nla_put_failure;
+
+ for (i = 0; i < flowtable->ops_len; i++) {
+ if (flowtable->ops[i].dev &&
+ nla_put_string(skb, NFTA_DEVICE_NAME,
+ flowtable->ops[i].dev->name))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest_devs);
+ nla_nest_end(skb, nest);
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -1;
+}
+
+struct nft_flowtable_filter {
+ char *table;
+};
+
+static int nf_tables_dump_flowtable(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ struct nft_flowtable_filter *filter = cb->data;
+ unsigned int idx = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ struct nft_flowtable *flowtable;
+ const struct nft_table *table;
+
+ rcu_read_lock();
+ cb->seq = net->nft.base_seq;
+
+ list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ if (family != NFPROTO_UNSPEC && family != table->family)
+ continue;
+
+ list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+ if (!nft_is_active(net, flowtable))
+ goto cont;
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (filter && filter->table[0] &&
+ strcmp(filter->table, table->name))
+ goto cont;
+
+ if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWFLOWTABLE,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, flowtable) < 0)
+ goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+ idx++;
+ }
+ }
+done:
+ rcu_read_unlock();
+
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+{
+ struct nft_flowtable_filter *filter = cb->data;
+
+ if (!filter)
+ return 0;
+
+ kfree(filter->table);
+ kfree(filter);
+
+ return 0;
+}
+
+static struct nft_flowtable_filter *
+nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+{
+ struct nft_flowtable_filter *filter;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return ERR_PTR(-ENOMEM);
+
+ if (nla[NFTA_FLOWTABLE_TABLE]) {
+ filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+ GFP_KERNEL);
+ if (!filter->table) {
+ kfree(filter);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ return filter;
+}
+
+static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
+ struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[],
+ struct netlink_ext_ack *extack)
+{
+ const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+ u8 genmask = nft_genmask_cur(net);
+ int family = nfmsg->nfgen_family;
+ struct nft_flowtable *flowtable;
+ const struct nft_table *table;
+ struct sk_buff *skb2;
+ int err;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nf_tables_dump_flowtable,
+ .done = nf_tables_dump_flowtable_done,
+ };
+
+ if (nla[NFTA_FLOWTABLE_TABLE]) {
+ struct nft_flowtable_filter *filter;
+
+ filter = nft_flowtable_filter_alloc(nla);
+ if (IS_ERR(filter))
+ return -ENOMEM;
+
+ c.data = filter;
+ }
+ return netlink_dump_start(nlsk, skb, nlh, &c);
+ }
+
+ if (!nla[NFTA_FLOWTABLE_NAME])
+ return -EINVAL;
+
+ table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
+ family, genmask);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+ genmask);
+ if (IS_ERR(flowtable))
+ return PTR_ERR(flowtable);
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq,
+ NFT_MSG_NEWFLOWTABLE, 0, family,
+ flowtable);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
+static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
+ struct nft_flowtable *flowtable,
+ int event)
+{
+ struct sk_buff *skb;
+ int err;
+
+ if (ctx->report &&
+ !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+ return;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
+ ctx->seq, event, 0,
+ ctx->family, flowtable);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto err;
+ }
+
+ nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+ ctx->report, GFP_KERNEL);
+ return;
+err:
+ nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
+}
+
+static void nft_flowtable_destroy(void *ptr, void *arg)
+{
+ kfree(ptr);
+}
+
+static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+{
+ cancel_delayed_work_sync(&flowtable->data.gc_work);
+ kfree(flowtable->name);
+ rhashtable_free_and_destroy(&flowtable->data.rhashtable,
+ nft_flowtable_destroy, NULL);
+ module_put(flowtable->data.type->owner);
}
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -4878,6 +5443,46 @@ nla_put_failure:
return -EMSGSIZE;
}
+static void nft_flowtable_event(unsigned long event, struct net_device *dev,
+ struct nft_flowtable *flowtable)
+{
+ int i;
+
+ for (i = 0; i < flowtable->ops_len; i++) {
+ if (flowtable->ops[i].dev != dev)
+ continue;
+
+ nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
+ flowtable->ops[i].dev = NULL;
+ break;
+ }
+}
+
+static int nf_tables_flowtable_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nft_flowtable *flowtable;
+ struct nft_table *table;
+
+ if (event != NETDEV_UNREGISTER)
+ return 0;
+
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_for_each_entry(table, &dev_net(dev)->nft.tables, list) {
+ list_for_each_entry(flowtable, &table->flowtables, list) {
+ nft_flowtable_event(event, dev, flowtable);
+ }
+ }
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_tables_flowtable_notifier = {
+ .notifier_call = nf_tables_flowtable_event,
+};
+
static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
int event)
{
@@ -5030,6 +5635,21 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,
},
+ [NFT_MSG_NEWFLOWTABLE] = {
+ .call_batch = nf_tables_newflowtable,
+ .attr_count = NFTA_FLOWTABLE_MAX,
+ .policy = nft_flowtable_policy,
+ },
+ [NFT_MSG_GETFLOWTABLE] = {
+ .call = nf_tables_getflowtable,
+ .attr_count = NFTA_FLOWTABLE_MAX,
+ .policy = nft_flowtable_policy,
+ },
+ [NFT_MSG_DELFLOWTABLE] = {
+ .call_batch = nf_tables_delflowtable,
+ .attr_count = NFTA_FLOWTABLE_MAX,
+ .policy = nft_flowtable_policy,
+ },
};
static void nft_chain_commit_update(struct nft_trans *trans)
@@ -5075,6 +5695,9 @@ static void nf_tables_commit_release(struct nft_trans *trans)
case NFT_MSG_DELOBJ:
nft_obj_destroy(nft_trans_obj(trans));
break;
+ case NFT_MSG_DELFLOWTABLE:
+ nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ break;
}
kfree(trans);
}
@@ -5101,7 +5724,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
if (nft_trans_table_update(trans)) {
if (!nft_trans_table_enable(trans)) {
nf_tables_table_disable(net,
- trans->ctx.afi,
trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
}
@@ -5127,10 +5749,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
case NFT_MSG_DELCHAIN:
list_del_rcu(&trans->ctx.chain->list);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
- nf_tables_unregister_hooks(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain,
- trans->ctx.afi->nops);
+ nf_tables_unregister_hook(trans->ctx.net,
+ trans->ctx.table,
+ trans->ctx.chain);
break;
case NFT_MSG_NEWRULE:
nft_clear(trans->ctx.net, nft_trans_rule(trans));
@@ -5150,7 +5771,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
/* This avoids hitting -EBUSY when deleting the table
* from the transaction.
*/
- if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
+ if (nft_set_is_anonymous(nft_trans_set(trans)) &&
!list_empty(&nft_trans_set(trans)->bindings))
trans->ctx.table->use--;
@@ -5193,6 +5814,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
NFT_MSG_DELOBJ);
break;
+ case NFT_MSG_NEWFLOWTABLE:
+ nft_clear(net, nft_trans_flowtable(trans));
+ nf_tables_flowtable_notify(&trans->ctx,
+ nft_trans_flowtable(trans),
+ NFT_MSG_NEWFLOWTABLE);
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_DELFLOWTABLE:
+ list_del_rcu(&nft_trans_flowtable(trans)->list);
+ nf_tables_flowtable_notify(&trans->ctx,
+ nft_trans_flowtable(trans),
+ NFT_MSG_DELFLOWTABLE);
+ nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans));
+ break;
}
}
@@ -5230,6 +5866,9 @@ static void nf_tables_abort_release(struct nft_trans *trans)
case NFT_MSG_NEWOBJ:
nft_obj_destroy(nft_trans_obj(trans));
break;
+ case NFT_MSG_NEWFLOWTABLE:
+ nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ break;
}
kfree(trans);
}
@@ -5246,7 +5885,6 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
if (nft_trans_table_update(trans)) {
if (nft_trans_table_enable(trans)) {
nf_tables_table_disable(net,
- trans->ctx.afi,
trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
}
@@ -5267,10 +5905,9 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
} else {
trans->ctx.table->use--;
list_del_rcu(&trans->ctx.chain->list);
- nf_tables_unregister_hooks(trans->ctx.net,
- trans->ctx.table,
- trans->ctx.chain,
- trans->ctx.afi->nops);
+ nf_tables_unregister_hook(trans->ctx.net,
+ trans->ctx.table,
+ trans->ctx.chain);
}
break;
case NFT_MSG_DELCHAIN:
@@ -5320,6 +5957,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
nft_clear(trans->ctx.net, nft_trans_obj(trans));
nft_trans_destroy(trans);
break;
+ case NFT_MSG_NEWFLOWTABLE:
+ trans->ctx.table->use--;
+ list_del_rcu(&nft_trans_flowtable(trans)->list);
+ nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans));
+ break;
+ case NFT_MSG_DELFLOWTABLE:
+ trans->ctx.table->use++;
+ nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+ nft_trans_destroy(trans);
+ break;
}
}
@@ -5371,7 +6019,7 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
if (nft_is_base_chain(chain)) {
basechain = nft_base_chain(chain);
- if ((1 << basechain->ops[0].hooknum) & hook_flags)
+ if ((1 << basechain->ops.hooknum) & hook_flags)
return 0;
return -EOPNOTSUPP;
@@ -5839,28 +6487,13 @@ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
}
EXPORT_SYMBOL_GPL(nft_data_dump);
-static int __net_init nf_tables_init_net(struct net *net)
-{
- INIT_LIST_HEAD(&net->nft.af_info);
- INIT_LIST_HEAD(&net->nft.commit_list);
- net->nft.base_seq = 1;
- return 0;
-}
-
-static void __net_exit nf_tables_exit_net(struct net *net)
-{
- WARN_ON_ONCE(!list_empty(&net->nft.af_info));
- WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
-}
-
int __nft_release_basechain(struct nft_ctx *ctx)
{
struct nft_rule *rule, *nr;
BUG_ON(!nft_is_base_chain(ctx->chain));
- nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain,
- ctx->afi->nops);
+ nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
list_del(&rule->list);
ctx->chain->use--;
@@ -5874,9 +6507,9 @@ int __nft_release_basechain(struct nft_ctx *ctx)
}
EXPORT_SYMBOL_GPL(__nft_release_basechain);
-/* Called by nft_unregister_afinfo() from __net_exit path, nfnl_lock is held. */
-static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
+static void __nft_release_tables(struct net *net)
{
+ struct nft_flowtable *flowtable, *nf;
struct nft_table *table, *nt;
struct nft_chain *chain, *nc;
struct nft_object *obj, *ne;
@@ -5884,13 +6517,16 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
struct nft_set *set, *ns;
struct nft_ctx ctx = {
.net = net,
- .afi = afi,
};
- list_for_each_entry_safe(table, nt, &afi->tables, list) {
+ list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
+ ctx.family = table->family;
+
list_for_each_entry(chain, &table->chains, list)
- nf_tables_unregister_hooks(net, table, chain,
- afi->nops);
+ nf_tables_unregister_hook(net, table, chain);
+ list_for_each_entry(flowtable, &table->flowtables, list)
+ nf_unregister_net_hooks(net, flowtable->ops,
+ flowtable->ops_len);
/* No packets are walking on these chains anymore. */
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
@@ -5901,6 +6537,11 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
nf_tables_rule_destroy(&ctx, rule);
}
}
+ list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+ list_del(&flowtable->list);
+ table->use--;
+ nf_tables_flowtable_destroy(flowtable);
+ }
list_for_each_entry_safe(set, ns, &table->sets, list) {
list_del(&set->list);
table->use--;
@@ -5921,6 +6562,21 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
}
}
+static int __net_init nf_tables_init_net(struct net *net)
+{
+ INIT_LIST_HEAD(&net->nft.tables);
+ INIT_LIST_HEAD(&net->nft.commit_list);
+ net->nft.base_seq = 1;
+ return 0;
+}
+
+static void __net_exit nf_tables_exit_net(struct net *net)
+{
+ __nft_release_tables(net);
+ WARN_ON_ONCE(!list_empty(&net->nft.tables));
+ WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+}
+
static struct pernet_operations nf_tables_net_ops = {
.init = nf_tables_init_net,
.exit = nf_tables_exit_net,
@@ -5945,7 +6601,8 @@ static int __init nf_tables_module_init(void)
if (err < 0)
goto err3;
- pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+ register_netdevice_notifier(&nf_tables_flowtable_notifier);
+
return register_pernet_subsys(&nf_tables_net_ops);
err3:
nf_tables_core_module_exit();
@@ -5959,6 +6616,7 @@ static void __exit nf_tables_module_exit(void)
{
unregister_pernet_subsys(&nf_tables_net_ops);
nfnetlink_subsys_unregister(&nf_tables_subsys);
+ unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
rcu_barrier();
nf_tables_core_module_exit();
kfree(info);
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
index f713cc205669..e30c7da09d0d 100644
--- a/net/netfilter/nf_tables_inet.c
+++ b/net/netfilter/nf_tables_inet.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_tables.h>
@@ -16,56 +17,27 @@
#include <net/netfilter/nf_tables_ipv6.h>
#include <net/ip.h>
-static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n)
+static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
- struct nft_af_info *afi;
-
- if (n == 1)
- afi = &nft_af_ipv4;
- else
- afi = &nft_af_ipv6;
-
- ops->pf = afi->family;
- if (afi->hooks[ops->hooknum])
- ops->hook = afi->hooks[ops->hooknum];
-}
-
-static struct nft_af_info nft_af_inet __read_mostly = {
- .family = NFPROTO_INET,
- .nhooks = NF_INET_NUMHOOKS,
- .owner = THIS_MODULE,
- .nops = 2,
- .hook_ops_init = nft_inet_hook_ops_init,
-};
-
-static int __net_init nf_tables_inet_init_net(struct net *net)
-{
- net->nft.inet = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.inet == NULL)
- return -ENOMEM;
- memcpy(net->nft.inet, &nft_af_inet, sizeof(nft_af_inet));
-
- if (nft_register_afinfo(net, net->nft.inet) < 0)
- goto err;
-
- return 0;
-
-err:
- kfree(net->nft.inet);
- return -ENOMEM;
-}
-
-static void __net_exit nf_tables_inet_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.inet);
- kfree(net->nft.inet);
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, skb, state);
+
+ switch (state->pf) {
+ case NFPROTO_IPV4:
+ nft_set_pktinfo_ipv4(&pkt, skb);
+ break;
+ case NFPROTO_IPV6:
+ nft_set_pktinfo_ipv6(&pkt, skb);
+ break;
+ default:
+ break;
+ }
+
+ return nft_do_chain(&pkt, priv);
}
-static struct pernet_operations nf_tables_inet_net_ops = {
- .init = nf_tables_inet_init_net,
- .exit = nf_tables_inet_exit_net,
-};
-
static const struct nf_chain_type filter_inet = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
@@ -76,26 +48,22 @@ static const struct nf_chain_type filter_inet = {
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
+ .hooks = {
+ [NF_INET_LOCAL_IN] = nft_do_chain_inet,
+ [NF_INET_LOCAL_OUT] = nft_do_chain_inet,
+ [NF_INET_FORWARD] = nft_do_chain_inet,
+ [NF_INET_PRE_ROUTING] = nft_do_chain_inet,
+ [NF_INET_POST_ROUTING] = nft_do_chain_inet,
+ },
};
static int __init nf_tables_inet_init(void)
{
- int ret;
-
- ret = nft_register_chain_type(&filter_inet);
- if (ret < 0)
- return ret;
-
- ret = register_pernet_subsys(&nf_tables_inet_net_ops);
- if (ret < 0)
- nft_unregister_chain_type(&filter_inet);
-
- return ret;
+ return nft_register_chain_type(&filter_inet);
}
static void __exit nf_tables_inet_exit(void)
{
- unregister_pernet_subsys(&nf_tables_inet_net_ops);
nft_unregister_chain_type(&filter_inet);
}
@@ -104,4 +72,4 @@ module_exit(nf_tables_inet_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_FAMILY(1);
+MODULE_ALIAS_NFT_CHAIN(1, "filter");
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 403432988313..4041fafca934 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -21,66 +21,32 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb,
{
struct nft_pktinfo pkt;
+ nft_set_pktinfo(&pkt, skb, state);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+ nft_set_pktinfo_ipv4_validate(&pkt, skb);
break;
case htons(ETH_P_IPV6):
- nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+ nft_set_pktinfo_ipv6_validate(&pkt, skb);
break;
default:
- nft_set_pktinfo_unspec(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb);
break;
}
return nft_do_chain(&pkt, priv);
}
-static struct nft_af_info nft_af_netdev __read_mostly = {
- .family = NFPROTO_NETDEV,
- .nhooks = NF_NETDEV_NUMHOOKS,
- .owner = THIS_MODULE,
- .flags = NFT_AF_NEEDS_DEV,
- .nops = 1,
- .hooks = {
- [NF_NETDEV_INGRESS] = nft_do_chain_netdev,
- },
-};
-
-static int nf_tables_netdev_init_net(struct net *net)
-{
- net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
- if (net->nft.netdev == NULL)
- return -ENOMEM;
-
- memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
-
- if (nft_register_afinfo(net, net->nft.netdev) < 0)
- goto err;
-
- return 0;
-err:
- kfree(net->nft.netdev);
- return -ENOMEM;
-}
-
-static void nf_tables_netdev_exit_net(struct net *net)
-{
- nft_unregister_afinfo(net, net->nft.netdev);
- kfree(net->nft.netdev);
-}
-
-static struct pernet_operations nf_tables_netdev_net_ops = {
- .init = nf_tables_netdev_init_net,
- .exit = nf_tables_netdev_exit_net,
-};
-
static const struct nf_chain_type nft_filter_chain_netdev = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_NETDEV,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_NETDEV_INGRESS),
+ .hooks = {
+ [NF_NETDEV_INGRESS] = nft_do_chain_netdev,
+ },
};
static void nft_netdev_event(unsigned long event, struct net_device *dev,
@@ -96,7 +62,7 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
__nft_release_basechain(ctx);
break;
case NETDEV_CHANGENAME:
- if (dev->ifindex != basechain->ops[0].dev->ifindex)
+ if (dev->ifindex != basechain->ops.dev->ifindex)
return;
strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
@@ -108,7 +74,6 @@ static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain, *nr;
struct nft_ctx ctx = {
@@ -120,20 +85,18 @@ static int nf_tables_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_for_each_entry(afi, &dev_net(dev)->nft.af_info, list) {
- ctx.afi = afi;
- if (afi->family != NFPROTO_NETDEV)
+ list_for_each_entry(table, &ctx.net->nft.tables, list) {
+ if (table->family != NFPROTO_NETDEV)
continue;
- list_for_each_entry(table, &afi->tables, list) {
- ctx.table = table;
- list_for_each_entry_safe(chain, nr, &table->chains, list) {
- if (!nft_is_base_chain(chain))
- continue;
+ ctx.family = table->family;
+ ctx.table = table;
+ list_for_each_entry_safe(chain, nr, &table->chains, list) {
+ if (!nft_is_base_chain(chain))
+ continue;
- ctx.chain = chain;
- nft_netdev_event(event, dev, &ctx);
- }
+ ctx.chain = chain;
+ nft_netdev_event(event, dev, &ctx);
}
}
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
@@ -153,27 +116,21 @@ static int __init nf_tables_netdev_init(void)
if (ret)
return ret;
- ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
- if (ret)
- goto err1;
-
ret = register_netdevice_notifier(&nf_tables_netdev_notifier);
if (ret)
- goto err2;
+ goto err_register_netdevice_notifier;
return 0;
-err2:
- unregister_pernet_subsys(&nf_tables_netdev_net_ops);
-err1:
+err_register_netdevice_notifier:
nft_unregister_chain_type(&nft_filter_chain_netdev);
+
return ret;
}
static void __exit nf_tables_netdev_exit(void)
{
unregister_netdevice_notifier(&nf_tables_netdev_notifier);
- unregister_pernet_subsys(&nf_tables_netdev_net_ops);
nft_unregister_chain_type(&nft_filter_chain_netdev);
}
@@ -182,4 +139,4 @@ module_exit(nf_tables_netdev_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
+MODULE_ALIAS_NFT_CHAIN(5, "filter"); /* NFPROTO_NETDEV */
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 733d3e4a30d8..03ead8a9e90c 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -37,8 +37,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
rcu_dereference_protected(table[(id)].subsys, \
lockdep_nfnl_is_held((id)))
-static char __initdata nfversion[] = "0.30";
-
static struct {
struct mutex mutex;
const struct nfnetlink_subsystem __rcu *subsys;
@@ -580,13 +578,11 @@ static int __init nfnetlink_init(void)
for (i=0; i<NFNL_SUBSYS_COUNT; i++)
mutex_init(&table[i].mutex);
- pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
return register_pernet_subsys(&nfnetlink_net_ops);
}
static void __exit nfnetlink_exit(void)
{
- pr_info("Removing netfilter NETLINK layer.\n");
unregister_pernet_subsys(&nfnetlink_net_ops);
}
module_init(nfnetlink_init);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c45e6d4358ab..88d427f9f9e6 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -527,7 +527,6 @@ static int __init nfnl_acct_init(void)
goto err_out;
}
- pr_info("nfnl_acct: registering with nfnetlink.\n");
ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
if (ret < 0) {
pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
@@ -543,7 +542,6 @@ err_out:
static void __exit nfnl_acct_exit(void)
{
- pr_info("nfnl_acct: unregistering from nfnetlink.\n");
nfnetlink_subsys_unregister(&nfnl_acct_subsys);
unregister_pernet_subsys(&nfnl_acct_ops);
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 32b1c0b44e79..95b04702a655 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -615,8 +615,6 @@ err_out:
static void __exit cttimeout_exit(void)
{
- pr_info("cttimeout: unregistering from nfnetlink.\n");
-
nfnetlink_subsys_unregister(&cttimeout_subsys);
unregister_pernet_subsys(&cttimeout_ops);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e955bec0acc6..7b46aa4c478d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1054,7 +1054,6 @@ static int nful_open(struct inode *inode, struct file *file)
}
static const struct file_operations nful_file_ops = {
- .owner = THIS_MODULE,
.open = nful_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index c09b36755ed7..8bba23160a68 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -941,23 +941,18 @@ static struct notifier_block nfqnl_dev_notifier = {
.notifier_call = nfqnl_rcv_dev_event,
};
-static unsigned int nfqnl_nf_hook_drop(struct net *net)
+static void nfqnl_nf_hook_drop(struct net *net)
{
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
- unsigned int instances = 0;
int i;
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct nfqnl_instance *inst;
struct hlist_head *head = &q->instance_table[i];
- hlist_for_each_entry_rcu(inst, head, hlist) {
+ hlist_for_each_entry_rcu(inst, head, hlist)
nfqnl_flush(inst, NULL, 0);
- instances++;
- }
}
-
- return instances;
}
static int
@@ -1482,7 +1477,6 @@ static int nfqnl_open(struct inode *inode, struct file *file)
}
static const struct file_operations nfqnl_file_ops = {
- .owner = THIS_MODULE,
.open = nfqnl_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index c2945eb3397c..fa90a8402845 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -44,6 +44,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
case NFT_CMP_LT:
if (d == 0)
goto mismatch;
+ /* fall through */
case NFT_CMP_LTE:
if (d > 0)
goto mismatch;
@@ -51,6 +52,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
case NFT_CMP_GT:
if (d == 0)
goto mismatch;
+ /* fall through */
case NFT_CMP_GTE:
if (d < 0)
goto mismatch;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index b89f4f65b2a0..8e23726b9081 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -144,7 +144,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
{
par->net = ctx->net;
par->table = ctx->table->name;
- switch (ctx->afi->family) {
+ switch (ctx->family) {
case AF_INET:
entry->e4.ip.proto = proto;
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
@@ -169,13 +169,13 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops[0];
+ const struct nf_hook_ops *ops = &basechain->ops;
par->hook_mask = 1 << ops->hooknum;
} else {
par->hook_mask = 0;
}
- par->family = ctx->afi->family;
+ par->family = ctx->family;
par->nft_compat = true;
}
@@ -267,7 +267,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
par.net = ctx->net;
par.target = target;
par.targinfo = info;
- par.family = ctx->afi->family;
+ par.family = ctx->family;
if (par.target->destroy != NULL)
par.target->destroy(&par);
@@ -302,7 +302,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops[0];
+ const struct nf_hook_ops *ops = &basechain->ops;
hook_mask = 1 << ops->hooknum;
if (target->hooks && !(hook_mask & target->hooks))
@@ -358,7 +358,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
{
par->net = ctx->net;
par->table = ctx->table->name;
- switch (ctx->afi->family) {
+ switch (ctx->family) {
case AF_INET:
entry->e4.ip.proto = proto;
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
@@ -383,13 +383,13 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops[0];
+ const struct nf_hook_ops *ops = &basechain->ops;
par->hook_mask = 1 << ops->hooknum;
} else {
par->hook_mask = 0;
}
- par->family = ctx->afi->family;
+ par->family = ctx->family;
par->nft_compat = true;
}
@@ -446,7 +446,7 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
par.net = ctx->net;
par.match = match;
par.matchinfo = info;
- par.family = ctx->afi->family;
+ par.family = ctx->family;
if (par.match->destroy != NULL)
par.match->destroy(&par);
@@ -481,7 +481,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
- const struct nf_hook_ops *ops = &basechain->ops[0];
+ const struct nf_hook_ops *ops = &basechain->ops;
hook_mask = 1 << ops->hooknum;
if (match->hooks && !(hook_mask & match->hooks))
@@ -648,7 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
mt_name = nla_data(tb[NFTA_MATCH_NAME]);
rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
- family = ctx->afi->family;
+ family = ctx->family;
/* Re-use the existing match if it's already loaded. */
list_for_each_entry(nft_match, &nft_match_list, head) {
@@ -733,7 +733,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
tg_name = nla_data(tb[NFTA_TARGET_NAME]);
rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
- family = ctx->afi->family;
+ family = ctx->family;
/* Re-use the existing target if it's already loaded. */
list_for_each_entry(nft_target, &nft_target_list, head) {
@@ -812,8 +812,6 @@ static int __init nft_compat_module_init(void)
goto err_target;
}
- pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
-
return ret;
err_target:
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 2647b895f4b0..6ab274b14484 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -405,7 +405,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
if (tb[NFTA_CT_DIRECTION] == NULL)
return -EINVAL;
- switch (ctx->afi->family) {
+ switch (ctx->family) {
case NFPROTO_IPV4:
len = FIELD_SIZEOF(struct nf_conntrack_tuple,
src.u3.ip);
@@ -456,7 +456,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- err = nf_ct_netns_get(ctx->net, ctx->afi->family);
+ err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
return err;
@@ -550,7 +550,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
if (err < 0)
goto err1;
- err = nf_ct_netns_get(ctx->net, ctx->afi->family);
+ err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
goto err1;
@@ -564,7 +564,7 @@ err1:
static void nft_ct_get_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
- nf_ct_netns_put(ctx->net, ctx->afi->family);
+ nf_ct_netns_put(ctx->net, ctx->family);
}
static void nft_ct_set_destroy(const struct nft_ctx *ctx,
@@ -573,7 +573,7 @@ static void nft_ct_set_destroy(const struct nft_ctx *ctx,
struct nft_ct *priv = nft_expr_priv(expr);
__nft_ct_set_destroy(ctx, priv);
- nf_ct_netns_put(ctx->net, ctx->afi->family);
+ nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -734,7 +734,7 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
struct nft_ct_helper_obj *priv = nft_obj_data(obj);
struct nf_conntrack_helper *help4, *help6;
char name[NF_CT_HELPER_NAME_LEN];
- int family = ctx->afi->family;
+ int family = ctx->family;
if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
return -EINVAL;
@@ -753,14 +753,14 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
switch (family) {
case NFPROTO_IPV4:
- if (ctx->afi->family == NFPROTO_IPV6)
+ if (ctx->family == NFPROTO_IPV6)
return -EINVAL;
help4 = nf_conntrack_helper_try_module_get(name, family,
priv->l4proto);
break;
case NFPROTO_IPV6:
- if (ctx->afi->family == NFPROTO_IPV4)
+ if (ctx->family == NFPROTO_IPV4)
return -EINVAL;
help6 = nf_conntrack_helper_try_module_get(name, family,
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 66221ad891a9..fc83e29d6634 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -164,7 +164,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
- err = nft_validate_register_load(priv->sreg_key, set->klen);;
+ err = nft_validate_register_load(priv->sreg_key, set->klen);
if (err < 0)
return err;
@@ -184,7 +184,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_EXPR] != NULL) {
if (!(set->flags & NFT_SET_EVAL))
return -EINVAL;
- if (!(set->flags & NFT_SET_ANONYMOUS))
+ if (!nft_set_is_anonymous(set))
return -EOPNOTSUPP;
priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
new file mode 100644
index 000000000000..4503b8dcf9c0
--- /dev/null
+++ b/net/netfilter/nft_flow_offload.c
@@ -0,0 +1,264 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/ip.h> /* for ipv4 options. */
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct nft_flow_offload {
+ struct nft_flowtable *flowtable;
+};
+
+static int nft_flow_route(const struct nft_pktinfo *pkt,
+ const struct nf_conn *ct,
+ struct nf_flow_route *route,
+ enum ip_conntrack_dir dir)
+{
+ struct dst_entry *this_dst = skb_dst(pkt->skb);
+ struct dst_entry *other_dst = NULL;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ break;
+ case NFPROTO_IPV6:
+ fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ break;
+ }
+
+ nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
+ if (!other_dst)
+ return -ENOENT;
+
+ route->tuple[dir].dst = this_dst;
+ route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
+ route->tuple[!dir].dst = other_dst;
+ route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
+
+ return 0;
+}
+
+static bool nft_flow_offload_skip(struct sk_buff *skb)
+{
+ struct ip_options *opt = &(IPCB(skb)->opt);
+
+ if (unlikely(opt->optlen))
+ return true;
+ if (skb_sec_path(skb))
+ return true;
+
+ return false;
+}
+
+static void nft_flow_offload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+ struct nf_flowtable *flowtable = &priv->flowtable->data;
+ enum ip_conntrack_info ctinfo;
+ struct nf_flow_route route;
+ struct flow_offload *flow;
+ enum ip_conntrack_dir dir;
+ struct nf_conn *ct;
+ int ret;
+
+ if (nft_flow_offload_skip(pkt->skb))
+ goto out;
+
+ ct = nf_ct_get(pkt->skb, &ctinfo);
+ if (!ct)
+ goto out;
+
+ switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ goto out;
+ }
+
+ if (test_bit(IPS_HELPER_BIT, &ct->status))
+ goto out;
+
+ if (ctinfo == IP_CT_NEW ||
+ ctinfo == IP_CT_RELATED)
+ goto out;
+
+ if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+ goto out;
+
+ dir = CTINFO2DIR(ctinfo);
+ if (nft_flow_route(pkt, ct, &route, dir) < 0)
+ goto err_flow_route;
+
+ flow = flow_offload_alloc(ct, &route);
+ if (!flow)
+ goto err_flow_alloc;
+
+ ret = flow_offload_add(flowtable, flow);
+ if (ret < 0)
+ goto err_flow_add;
+
+ return;
+
+err_flow_add:
+ flow_offload_free(flow);
+err_flow_alloc:
+ dst_release(route.tuple[!dir].dst);
+err_flow_route:
+ clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+out:
+ regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ unsigned int hook_mask = (1 << NF_INET_FORWARD);
+
+ return nft_chain_validate_hooks(ctx->chain, hook_mask);
+}
+
+static int nft_flow_offload_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+ u8 genmask = nft_genmask_next(ctx->net);
+ struct nft_flowtable *flowtable;
+
+ if (!tb[NFTA_FLOW_TABLE_NAME])
+ return -EINVAL;
+
+ flowtable = nf_tables_flowtable_lookup(ctx->table,
+ tb[NFTA_FLOW_TABLE_NAME],
+ genmask);
+ if (IS_ERR(flowtable))
+ return PTR_ERR(flowtable);
+
+ priv->flowtable = flowtable;
+ flowtable->use++;
+
+ return nf_ct_netns_get(ctx->net, ctx->family);
+}
+
+static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+ priv->flowtable->use--;
+ nf_ct_netns_put(ctx->net, ctx->family);
+}
+
+static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+ if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_flow_offload_type;
+static const struct nft_expr_ops nft_flow_offload_ops = {
+ .type = &nft_flow_offload_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
+ .eval = nft_flow_offload_eval,
+ .init = nft_flow_offload_init,
+ .destroy = nft_flow_offload_destroy,
+ .validate = nft_flow_offload_validate,
+ .dump = nft_flow_offload_dump,
+};
+
+static struct nft_expr_type nft_flow_offload_type __read_mostly = {
+ .name = "flow_offload",
+ .ops = &nft_flow_offload_ops,
+ .maxattr = NFTA_FLOW_MAX,
+ .owner = THIS_MODULE,
+};
+
+static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data)
+{
+ struct net_device *dev = data;
+
+ if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
+ return;
+
+ flow_offload_dead(flow);
+}
+
+static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable,
+ void *data)
+{
+ nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
+}
+
+static int flow_offload_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event != NETDEV_DOWN)
+ return NOTIFY_DONE;
+
+ nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block flow_offload_netdev_notifier = {
+ .notifier_call = flow_offload_netdev_event,
+};
+
+static int __init nft_flow_offload_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&flow_offload_netdev_notifier);
+
+ err = nft_register_expr(&nft_flow_offload_type);
+ if (err < 0)
+ goto register_expr;
+
+ return 0;
+
+register_expr:
+ unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+ return err;
+}
+
+static void __exit nft_flow_offload_module_exit(void)
+{
+ struct net *net;
+
+ nft_unregister_expr(&nft_flow_offload_type);
+ unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+ rtnl_lock();
+ for_each_net(net)
+ nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL);
+ rtnl_unlock();
+}
+
+module_init(nft_flow_offload_module_init);
+module_exit(nft_flow_offload_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("flow_offload");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6f6e64423643..a27be36dc0af 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -112,7 +112,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
break;
}
- err = nf_logger_find_get(ctx->afi->family, li->type);
+ err = nf_logger_find_get(ctx->family, li->type);
if (err < 0)
goto err1;
@@ -133,7 +133,7 @@ static void nft_log_destroy(const struct nft_ctx *ctx,
if (priv->prefix != nft_log_null_prefix)
kfree(priv->prefix);
- nf_logger_put(ctx->afi->family, li->type);
+ nf_logger_put(ctx->family, li->type);
}
static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 6ac03d4266c9..9d8655bc1bea 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -73,7 +73,7 @@ int nft_masq_init(const struct nft_ctx *ctx,
}
}
- return nf_ct_netns_get(ctx->net, ctx->afi->family);
+ return nf_ct_netns_get(ctx->net, ctx->family);
}
EXPORT_SYMBOL_GPL(nft_masq_init);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 5a60eb23a7ed..8fb91940e2e7 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -210,6 +210,11 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = prandom_u32_state(state);
break;
}
+#ifdef CONFIG_XFRM
+ case NFT_META_SECPATH:
+ nft_reg_store8(dest, !!skb->sp);
+ break;
+#endif
default:
WARN_ON(1);
goto err;
@@ -308,6 +313,11 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
prandom_init_once(&nft_prandom_state);
len = sizeof(u32);
break;
+#ifdef CONFIG_XFRM
+ case NFT_META_SECPATH:
+ len = sizeof(u8);
+ break;
+#endif
default:
return -EOPNOTSUPP;
}
@@ -318,6 +328,38 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
+static int nft_meta_get_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+#ifdef CONFIG_XFRM
+ const struct nft_meta *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
+ if (priv->key != NFT_META_SECPATH)
+ return 0;
+
+ switch (ctx->family) {
+ case NFPROTO_NETDEV:
+ hooks = 1 << NF_NETDEV_INGRESS;
+ break;
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ hooks = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+#else
+ return 0;
+#endif
+}
+
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
@@ -328,7 +370,7 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
if (priv->key != NFT_META_PKTTYPE)
return 0;
- switch (ctx->afi->family) {
+ switch (ctx->family) {
case NFPROTO_BRIDGE:
hooks = 1 << NF_BR_PRE_ROUTING;
break;
@@ -434,6 +476,7 @@ static const struct nft_expr_ops nft_meta_get_ops = {
.eval = nft_meta_get_eval,
.init = nft_meta_get_init,
.dump = nft_meta_get_dump,
+ .validate = nft_meta_get_validate,
};
static const struct nft_expr_ops nft_meta_set_ops = {
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index ed548d06b6dd..1f36954c2ba9 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -142,7 +142,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return -EINVAL;
family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
- if (family != ctx->afi->family)
+ if (family != ctx->family)
return -EOPNOTSUPP;
switch (family) {
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 1e66538bf0ff..c64cbe78dee7 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -75,7 +75,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
return -EINVAL;
}
- return nf_ct_netns_get(ctx->net, ctx->afi->family);
+ return nf_ct_netns_get(ctx->net, ctx->family);
}
EXPORT_SYMBOL_GPL(nft_redir_init);
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index a6b7d05aeacf..11a2071b6dd4 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -27,7 +27,7 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
{
u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst);
const struct sk_buff *skb = pkt->skb;
- const struct nf_afinfo *ai;
+ struct dst_entry *dst = NULL;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
@@ -43,15 +43,10 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
break;
}
- ai = nf_get_afinfo(nft_pf(pkt));
- if (ai) {
- struct dst_entry *dst = NULL;
-
- ai->route(nft_net(pkt), &dst, &fl, false);
- if (dst) {
- mtu = min(mtu, dst_mtu(dst));
- dst_release(dst);
- }
+ nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt));
+ if (dst) {
+ mtu = min(mtu, dst_mtu(dst));
+ dst_release(dst);
}
if (mtu <= minlen || mtu > 0xffff)
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index f8166c1d5430..3f1624ee056f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -251,11 +251,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
if (err)
return;
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN) {
- iter->err = err;
- goto out;
- }
+ rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
@@ -306,9 +302,7 @@ static void nft_rhash_gc(struct work_struct *work)
if (err)
goto schedule;
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN)
- goto out;
+ rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
new file mode 100644
index 000000000000..0b660c568156
--- /dev/null
+++ b/net/netfilter/utils.c
@@ -0,0 +1,90 @@
+#include <linux/kernel.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_queue.h>
+
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol,
+ unsigned short family)
+{
+ const struct nf_ipv6_ops *v6ops;
+ __sum16 csum = 0;
+
+ switch (family) {
+ case AF_INET:
+ csum = nf_ip_checksum(skb, hook, dataoff, protocol);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ csum = v6ops->checksum(skb, hook, dataoff, protocol);
+ break;
+ }
+
+ return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum);
+
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol, unsigned short family)
+{
+ const struct nf_ipv6_ops *v6ops;
+ __sum16 csum = 0;
+
+ switch (family) {
+ case AF_INET:
+ csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
+ protocol);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ csum = v6ops->checksum_partial(skb, hook, dataoff, len,
+ protocol);
+ break;
+ }
+
+ return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum_partial);
+
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict, unsigned short family)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (family) {
+ case AF_INET:
+ ret = nf_ip_route(net, dst, fl, strict);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->route(net, dst, fl, strict);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_route);
+
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ ret = nf_ip_reroute(skb, entry);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->reroute(skb, entry);
+ break;
+ }
+ return ret;
+}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 55802e97f906..0b56bf05c169 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1027,7 +1027,7 @@ void xt_free_table_info(struct xt_table_info *info)
}
EXPORT_SYMBOL(xt_free_table_info);
-/* Find table by name, grabs mutex & ref. Returns NULL on error. */
+/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name)
{
@@ -1043,17 +1043,17 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
/* Table doesn't exist in this netns, re-try init */
list_for_each_entry(t, &init_net.xt.tables[af], list) {
+ int err;
+
if (strcmp(t->name, name))
continue;
- if (!try_module_get(t->me)) {
- mutex_unlock(&xt[af].mutex);
- return NULL;
- }
-
+ if (!try_module_get(t->me))
+ goto out;
mutex_unlock(&xt[af].mutex);
- if (t->table_init(net) != 0) {
+ err = t->table_init(net);
+ if (err < 0) {
module_put(t->me);
- return NULL;
+ return ERR_PTR(err);
}
found = t;
@@ -1073,10 +1073,28 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
module_put(found->me);
out:
mutex_unlock(&xt[af].mutex);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+ const char *name)
+{
+ struct xt_table *t = xt_find_table_lock(net, af, name);
+
+#ifdef CONFIG_MODULES
+ if (IS_ERR(t)) {
+ int err = request_module("%stable_%s", xt_prefix[af], name);
+ if (err < 0)
+ return ERR_PTR(err);
+ t = xt_find_table_lock(net, af, name);
+ }
+#endif
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
+
void xt_table_unlock(struct xt_table *table)
{
mutex_unlock(&xt[table->af].mutex);
@@ -1344,7 +1362,6 @@ static int xt_table_open(struct inode *inode, struct file *file)
}
static const struct file_operations xt_table_ops = {
- .owner = THIS_MODULE,
.open = xt_table_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1397,7 +1414,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
- /* fallthru, _stop will unlock */
+ /* fall through */
default:
return NULL;
}
@@ -1480,7 +1497,6 @@ static int xt_match_open(struct inode *inode, struct file *file)
}
static const struct file_operations xt_match_ops = {
- .owner = THIS_MODULE,
.open = xt_match_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1533,7 +1549,6 @@ static int xt_target_open(struct inode *inode, struct file *file)
}
static const struct file_operations xt_target_ops = {
- .owner = THIS_MODULE,
.open = xt_target_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9dae4d665965..99bb8e410f22 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -48,7 +48,6 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
unsigned int family)
{
struct flowi fl;
- const struct nf_afinfo *ai;
struct rtable *rt = NULL;
u_int32_t mtu = ~0U;
@@ -62,10 +61,8 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
memset(fl6, 0, sizeof(*fl6));
fl6->daddr = ipv6_hdr(skb)->saddr;
}
- ai = nf_get_afinfo(family);
- if (ai != NULL)
- ai->route(net, (struct dst_entry **)&rt, &fl, false);
+ nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
if (rt != NULL) {
mtu = dst_mtu(&rt->dst);
dst_release(&rt->dst);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 3b2be2ae6987..911a7c0da504 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -36,7 +36,7 @@ MODULE_ALIAS("ip6t_addrtype");
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr, u16 mask)
{
- const struct nf_afinfo *afinfo;
+ const struct nf_ipv6_ops *v6ops;
struct flowi6 flow;
struct rt6_info *rt;
u32 ret = 0;
@@ -47,17 +47,14 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
if (dev)
flow.flowi6_oif = dev->ifindex;
- afinfo = nf_get_afinfo(NFPROTO_IPV6);
- if (afinfo != NULL) {
- const struct nf_ipv6_ops *v6ops;
-
+ v6ops = nf_get_ipv6_ops();
+ if (v6ops) {
if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
- v6ops = nf_get_ipv6_ops();
- if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+ if (v6ops->chk_addr(net, addr, dev, true))
ret = XT_ADDRTYPE_LOCAL;
}
- route_err = afinfo->route(net, (struct dst_entry **)&rt,
- flowi6_to_flowi(&flow), false);
+ route_err = v6ops->route(net, (struct dst_entry **)&rt,
+ flowi6_to_flowi(&flow), false);
} else {
route_err = 1;
}
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 1f7fbd3c7e5a..06b090d8e901 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
{
- mm_segment_t oldfs = get_fs();
- int retval, fd;
-
if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
return -EINVAL;
- set_fs(KERNEL_DS);
- fd = bpf_obj_get_user(path, 0);
- set_fs(oldfs);
- if (fd < 0)
- return fd;
-
- retval = __bpf_mt_check_fd(fd, ret);
- sys_close(fd);
- return retval;
+ *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
+ return PTR_ERR_OR_ZERO(*ret);
}
static int bpf_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index a6214f235333..b1b17b9353e1 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -12,292 +12,30 @@
* GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
+
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connlimit.h>
+
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
-
-#define CONNLIMIT_SLOTS 256U
-
-#ifdef CONFIG_LOCKDEP
-#define CONNLIMIT_LOCK_SLOTS 8U
-#else
-#define CONNLIMIT_LOCK_SLOTS 256U
-#endif
-
-#define CONNLIMIT_GC_MAX_NODES 8
-
-/* we will save the tuples of all connections we care about */
-struct xt_connlimit_conn {
- struct hlist_node node;
- struct nf_conntrack_tuple tuple;
-};
-
-struct xt_connlimit_rb {
- struct rb_node node;
- struct hlist_head hhead; /* connections/hosts in same subnet */
- union nf_inet_addr addr; /* search key */
-};
-
-static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
-
-struct xt_connlimit_data {
- struct rb_root climit_root[CONNLIMIT_SLOTS];
-};
-
-static u_int32_t connlimit_rnd __read_mostly;
-static struct kmem_cache *connlimit_rb_cachep __read_mostly;
-static struct kmem_cache *connlimit_conn_cachep __read_mostly;
-
-static inline unsigned int connlimit_iphash(__be32 addr)
-{
- return jhash_1word((__force __u32)addr,
- connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline unsigned int
-connlimit_iphash6(const union nf_inet_addr *addr)
-{
- return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6),
- connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline bool already_closed(const struct nf_conn *conn)
-{
- if (nf_ct_protonum(conn) == IPPROTO_TCP)
- return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
- conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
- else
- return 0;
-}
-
-static int
-same_source(const union nf_inet_addr *addr,
- const union nf_inet_addr *u3, u_int8_t family)
-{
- if (family == NFPROTO_IPV4)
- return ntohl(addr->ip) - ntohl(u3->ip);
-
- return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6));
-}
-
-static bool add_hlist(struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const union nf_inet_addr *addr)
-{
- struct xt_connlimit_conn *conn;
-
- conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
- if (conn == NULL)
- return false;
- conn->tuple = *tuple;
- hlist_add_head(&conn->node, head);
- return true;
-}
-
-static unsigned int check_hlist(struct net *net,
- struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit)
-{
- const struct nf_conntrack_tuple_hash *found;
- struct xt_connlimit_conn *conn;
- struct hlist_node *n;
- struct nf_conn *found_ct;
- unsigned int length = 0;
-
- *addit = true;
-
- /* check the saved connections */
- hlist_for_each_entry_safe(conn, n, head, node) {
- found = nf_conntrack_find_get(net, zone, &conn->tuple);
- if (found == NULL) {
- hlist_del(&conn->node);
- kmem_cache_free(connlimit_conn_cachep, conn);
- continue;
- }
-
- found_ct = nf_ct_tuplehash_to_ctrack(found);
-
- if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
- /*
- * Just to be sure we have it only once in the list.
- * We should not see tuples twice unless someone hooks
- * this into a table without "-p tcp --syn".
- */
- *addit = false;
- } else if (already_closed(found_ct)) {
- /*
- * we do not care about connections which are
- * closed already -> ditch it
- */
- nf_ct_put(found_ct);
- hlist_del(&conn->node);
- kmem_cache_free(connlimit_conn_cachep, conn);
- continue;
- }
-
- nf_ct_put(found_ct);
- length++;
- }
-
- return length;
-}
-
-static void tree_nodes_free(struct rb_root *root,
- struct xt_connlimit_rb *gc_nodes[],
- unsigned int gc_count)
-{
- struct xt_connlimit_rb *rbconn;
-
- while (gc_count) {
- rbconn = gc_nodes[--gc_count];
- rb_erase(&rbconn->node, root);
- kmem_cache_free(connlimit_rb_cachep, rbconn);
- }
-}
-
-static unsigned int
-count_tree(struct net *net, struct rb_root *root,
- const struct nf_conntrack_tuple *tuple,
- const union nf_inet_addr *addr,
- u8 family, const struct nf_conntrack_zone *zone)
-{
- struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
- struct rb_node **rbnode, *parent;
- struct xt_connlimit_rb *rbconn;
- struct xt_connlimit_conn *conn;
- unsigned int gc_count;
- bool no_gc = false;
-
- restart:
- gc_count = 0;
- parent = NULL;
- rbnode = &(root->rb_node);
- while (*rbnode) {
- int diff;
- bool addit;
-
- rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
-
- parent = *rbnode;
- diff = same_source(addr, &rbconn->addr, family);
- if (diff < 0) {
- rbnode = &((*rbnode)->rb_left);
- } else if (diff > 0) {
- rbnode = &((*rbnode)->rb_right);
- } else {
- /* same source network -> be counted! */
- unsigned int count;
- count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
-
- tree_nodes_free(root, gc_nodes, gc_count);
- if (!addit)
- return count;
-
- if (!add_hlist(&rbconn->hhead, tuple, addr))
- return 0; /* hotdrop */
-
- return count + 1;
- }
-
- if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
- continue;
-
- /* only used for GC on hhead, retval and 'addit' ignored */
- check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
- if (hlist_empty(&rbconn->hhead))
- gc_nodes[gc_count++] = rbconn;
- }
-
- if (gc_count) {
- no_gc = true;
- tree_nodes_free(root, gc_nodes, gc_count);
- /* tree_node_free before new allocation permits
- * allocator to re-use newly free'd object.
- *
- * This is a rare event; in most cases we will find
- * existing node to re-use. (or gc_count is 0).
- */
- goto restart;
- }
-
- /* no match, need to insert new node */
- rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
- if (rbconn == NULL)
- return 0;
-
- conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
- if (conn == NULL) {
- kmem_cache_free(connlimit_rb_cachep, rbconn);
- return 0;
- }
-
- conn->tuple = *tuple;
- rbconn->addr = *addr;
-
- INIT_HLIST_HEAD(&rbconn->hhead);
- hlist_add_head(&conn->node, &rbconn->hhead);
-
- rb_link_node(&rbconn->node, parent, rbnode);
- rb_insert_color(&rbconn->node, root);
- return 1;
-}
-
-static int count_them(struct net *net,
- struct xt_connlimit_data *data,
- const struct nf_conntrack_tuple *tuple,
- const union nf_inet_addr *addr,
- u_int8_t family,
- const struct nf_conntrack_zone *zone)
-{
- struct rb_root *root;
- int count;
- u32 hash;
-
- if (family == NFPROTO_IPV6)
- hash = connlimit_iphash6(addr);
- else
- hash = connlimit_iphash(addr->ip);
- root = &data->climit_root[hash];
-
- spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
- count = count_tree(net, root, tuple, addr, family, zone);
-
- spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
- return count;
-}
+#include <net/netfilter/nf_conntrack_count.h>
static bool
connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = xt_net(par);
const struct xt_connlimit_info *info = par->matchinfo;
- union nf_inet_addr addr;
struct nf_conntrack_tuple tuple;
const struct nf_conntrack_tuple *tuple_ptr = &tuple;
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int connections;
+ u32 key[5];
ct = nf_ct_get(skb, &ctinfo);
if (ct != NULL) {
@@ -310,6 +48,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (xt_family(par) == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
+ union nf_inet_addr addr;
unsigned int i;
memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
@@ -317,22 +56,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
addr.ip6[i] &= info->mask.ip6[i];
+ memcpy(key, &addr, sizeof(addr.ip6));
+ key[4] = zone->id;
} else {
const struct iphdr *iph = ip_hdr(skb);
- addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+ key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
iph->daddr : iph->saddr;
- addr.ip &= info->mask.ip;
+ key[0] &= info->mask.ip;
+ key[1] = zone->id;
}
- connections = count_them(net, info->data, tuple_ptr, &addr,
- xt_family(par), zone);
+ connections = nf_conncount_count(net, info->data, key,
+ xt_family(par), tuple_ptr, zone);
if (connections == 0)
/* kmalloc failed, drop it entirely */
goto hotdrop;
- return (connections > info->limit) ^
- !!(info->flags & XT_CONNLIMIT_INVERT);
+ return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT);
hotdrop:
par->hotdrop = true;
@@ -342,61 +83,27 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
static int connlimit_mt_check(const struct xt_mtchk_param *par)
{
struct xt_connlimit_info *info = par->matchinfo;
- unsigned int i;
- int ret;
+ unsigned int keylen;
- net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
-
- ret = nf_ct_netns_get(par->net, par->family);
- if (ret < 0) {
- pr_info("cannot load conntrack support for "
- "address family %u\n", par->family);
- return ret;
- }
+ keylen = sizeof(u32);
+ if (par->family == NFPROTO_IPV6)
+ keylen += sizeof(struct in6_addr);
+ else
+ keylen += sizeof(struct in_addr);
/* init private data */
- info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
- if (info->data == NULL) {
- nf_ct_netns_put(par->net, par->family);
- return -ENOMEM;
- }
-
- for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
- info->data->climit_root[i] = RB_ROOT;
+ info->data = nf_conncount_init(par->net, par->family, keylen);
+ if (IS_ERR(info->data))
+ return PTR_ERR(info->data);
return 0;
}
-static void destroy_tree(struct rb_root *r)
-{
- struct xt_connlimit_conn *conn;
- struct xt_connlimit_rb *rbconn;
- struct hlist_node *n;
- struct rb_node *node;
-
- while ((node = rb_first(r)) != NULL) {
- rbconn = rb_entry(node, struct xt_connlimit_rb, node);
-
- rb_erase(node, r);
-
- hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
- kmem_cache_free(connlimit_conn_cachep, conn);
-
- kmem_cache_free(connlimit_rb_cachep, rbconn);
- }
-}
-
static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_connlimit_info *info = par->matchinfo;
- unsigned int i;
-
- nf_ct_netns_put(par->net, par->family);
-
- for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
- destroy_tree(&info->data->climit_root[i]);
- kfree(info->data);
+ nf_conncount_destroy(par->net, par->family, info->data);
}
static struct xt_match connlimit_mt_reg __read_mostly = {
@@ -413,40 +120,12 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
static int __init connlimit_mt_init(void)
{
- int ret, i;
-
- BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
- BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
-
- for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
- spin_lock_init(&xt_connlimit_locks[i]);
-
- connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
- sizeof(struct xt_connlimit_conn),
- 0, 0, NULL);
- if (!connlimit_conn_cachep)
- return -ENOMEM;
-
- connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
- sizeof(struct xt_connlimit_rb),
- 0, 0, NULL);
- if (!connlimit_rb_cachep) {
- kmem_cache_destroy(connlimit_conn_cachep);
- return -ENOMEM;
- }
- ret = xt_register_match(&connlimit_mt_reg);
- if (ret != 0) {
- kmem_cache_destroy(connlimit_conn_cachep);
- kmem_cache_destroy(connlimit_rb_cachep);
- }
- return ret;
+ return xt_register_match(&connlimit_mt_reg);
}
static void __exit connlimit_mt_exit(void)
{
xt_unregister_match(&connlimit_mt_reg);
- kmem_cache_destroy(connlimit_conn_cachep);
- kmem_cache_destroy(connlimit_rb_cachep);
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 5da8746f7b88..ca6847403ca2 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -353,7 +353,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
static bool select_all(const struct xt_hashlimit_htable *ht,
const struct dsthash_ent *he)
{
- return 1;
+ return true;
}
static bool select_gc(const struct xt_hashlimit_htable *ht,
@@ -1266,7 +1266,6 @@ static int dl_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations dl_file_ops_v2 = {
- .owner = THIS_MODULE,
.open = dl_proc_open_v2,
.read = seq_read,
.llseek = seq_lseek,
@@ -1274,7 +1273,6 @@ static const struct file_operations dl_file_ops_v2 = {
};
static const struct file_operations dl_file_ops_v1 = {
- .owner = THIS_MODULE,
.open = dl_proc_open_v1,
.read = seq_read,
.llseek = seq_lseek,
@@ -1282,7 +1280,6 @@ static const struct file_operations dl_file_ops_v1 = {
};
static const struct file_operations dl_file_ops = {
- .owner = THIS_MODULE,
.open = dl_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 000e70377f85..7ca64a50db04 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -58,7 +58,7 @@ static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
*/
pr_debug("Dropping evil IPComp tinygram.\n");
par->hotdrop = true;
- return 0;
+ return false;
}
return spi_match(compinfo->spis[0], compinfo->spis[1],
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 2b4ab189bba7..5639fb03bdd9 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -93,7 +93,8 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
if (dst->xfrm == NULL)
return -1;
- for (i = 0; dst && dst->xfrm; dst = dst->child, i++) {
+ for (i = 0; dst && dst->xfrm;
+ dst = ((struct xfrm_dst *)dst)->child, i++) {
pos = strict ? i : 0;
if (pos >= info->len)
return 0;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 64285702afd5..16b6b11ee83f 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -39,13 +39,17 @@ match_set(ip_set_id_t index, const struct sk_buff *skb,
return inv;
}
-#define ADT_OPT(n, f, d, fs, cfs, t) \
-struct ip_set_adt_opt n = { \
- .family = f, \
- .dim = d, \
- .flags = fs, \
- .cmdflags = cfs, \
- .ext.timeout = t, \
+#define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \
+struct ip_set_adt_opt n = { \
+ .family = f, \
+ .dim = d, \
+ .flags = fs, \
+ .cmdflags = cfs, \
+ .ext.timeout = t, \
+ .ext.packets = p, \
+ .ext.bytes = b, \
+ .ext.packets_op = po, \
+ .ext.bytes_op = bo, \
}
/* Revision 0 interface: backward compatible with netfilter/iptables */
@@ -56,7 +60,8 @@ set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_set_info_match_v0 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim,
- info->match_set.u.compat.flags, 0, UINT_MAX);
+ info->match_set.u.compat.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
return match_set(info->match_set.index, skb, par, &opt,
info->match_set.u.compat.flags & IPSET_INV_MATCH);
@@ -119,7 +124,8 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_set_info_match_v1 *info = par->matchinfo;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
- info->match_set.flags, 0, UINT_MAX);
+ info->match_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
if (opt.flags & IPSET_RETURN_NOMATCH)
opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
@@ -161,45 +167,21 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
/* Revision 3 match */
static bool
-match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
-{
- switch (info->op) {
- case IPSET_COUNTER_NONE:
- return true;
- case IPSET_COUNTER_EQ:
- return counter == info->value;
- case IPSET_COUNTER_NE:
- return counter != info->value;
- case IPSET_COUNTER_LT:
- return counter < info->value;
- case IPSET_COUNTER_GT:
- return counter > info->value;
- }
- return false;
-}
-
-static bool
set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v3 *info = par->matchinfo;
- int ret;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
- info->match_set.flags, info->flags, UINT_MAX);
+ info->match_set.flags, info->flags, UINT_MAX,
+ info->packets.value, info->bytes.value,
+ info->packets.op, info->bytes.op);
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
- ret = match_set(info->match_set.index, skb, par, &opt,
- info->match_set.flags & IPSET_INV_MATCH);
-
- if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
- return ret;
-
- if (!match_counter0(opt.ext.packets, &info->packets))
- return false;
- return match_counter0(opt.ext.bytes, &info->bytes);
+ return match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
}
#define set_match_v3_checkentry set_match_v1_checkentry
@@ -208,45 +190,21 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
/* Revision 4 match */
static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
- switch (info->op) {
- case IPSET_COUNTER_NONE:
- return true;
- case IPSET_COUNTER_EQ:
- return counter == info->value;
- case IPSET_COUNTER_NE:
- return counter != info->value;
- case IPSET_COUNTER_LT:
- return counter < info->value;
- case IPSET_COUNTER_GT:
- return counter > info->value;
- }
- return false;
-}
-
-static bool
set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v4 *info = par->matchinfo;
- int ret;
ADT_OPT(opt, xt_family(par), info->match_set.dim,
- info->match_set.flags, info->flags, UINT_MAX);
+ info->match_set.flags, info->flags, UINT_MAX,
+ info->packets.value, info->bytes.value,
+ info->packets.op, info->bytes.op);
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
- ret = match_set(info->match_set.index, skb, par, &opt,
- info->match_set.flags & IPSET_INV_MATCH);
-
- if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
- return ret;
-
- if (!match_counter(opt.ext.packets, &info->packets))
- return false;
- return match_counter(opt.ext.bytes, &info->bytes);
+ return match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
}
#define set_match_v4_checkentry set_match_v1_checkentry
@@ -260,9 +218,11 @@ set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_set_info_target_v0 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.u.compat.dim,
- info->add_set.u.compat.flags, 0, UINT_MAX);
+ info->add_set.u.compat.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.u.compat.dim,
- info->del_set.u.compat.flags, 0, UINT_MAX);
+ info->del_set.u.compat.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -333,9 +293,11 @@ set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_set_info_target_v1 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
- info->add_set.flags, 0, UINT_MAX);
+ info->add_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
- info->del_set.flags, 0, UINT_MAX);
+ info->del_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -402,9 +364,11 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_set_info_target_v2 *info = par->targinfo;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
- info->add_set.flags, info->flags, info->timeout);
+ info->add_set.flags, info->flags, info->timeout,
+ 0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
- info->del_set.flags, 0, UINT_MAX);
+ info->del_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
/* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
@@ -432,11 +396,14 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
int ret;
ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
- info->add_set.flags, info->flags, info->timeout);
+ info->add_set.flags, info->flags, info->timeout,
+ 0, 0, 0, 0);
ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
- info->del_set.flags, 0, UINT_MAX);
+ info->del_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
ADT_OPT(map_opt, xt_family(par), info->map_set.dim,
- info->map_set.flags, 0, UINT_MAX);
+ info->map_set.flags, 0, UINT_MAX,
+ 0, 0, 0, 0);
/* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 79cc1bf36e4a..2ad445c1d27c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -65,6 +65,7 @@
#include <linux/net_namespace.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
@@ -145,8 +146,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
static BLOCKING_NOTIFIER_HEAD(netlink_chain);
-static DEFINE_SPINLOCK(netlink_tap_lock);
-static struct list_head netlink_tap_all __read_mostly;
static const struct rhashtable_params netlink_rhashtable_params;
@@ -173,14 +172,24 @@ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
return new;
}
+static unsigned int netlink_tap_net_id;
+
+struct netlink_tap_net {
+ struct list_head netlink_tap_all;
+ struct mutex netlink_tap_lock;
+};
+
int netlink_add_tap(struct netlink_tap *nt)
{
+ struct net *net = dev_net(nt->dev);
+ struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
return -EINVAL;
- spin_lock(&netlink_tap_lock);
- list_add_rcu(&nt->list, &netlink_tap_all);
- spin_unlock(&netlink_tap_lock);
+ mutex_lock(&nn->netlink_tap_lock);
+ list_add_rcu(&nt->list, &nn->netlink_tap_all);
+ mutex_unlock(&nn->netlink_tap_lock);
__module_get(nt->module);
@@ -190,12 +199,14 @@ EXPORT_SYMBOL_GPL(netlink_add_tap);
static int __netlink_remove_tap(struct netlink_tap *nt)
{
+ struct net *net = dev_net(nt->dev);
+ struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
bool found = false;
struct netlink_tap *tmp;
- spin_lock(&netlink_tap_lock);
+ mutex_lock(&nn->netlink_tap_lock);
- list_for_each_entry(tmp, &netlink_tap_all, list) {
+ list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
if (nt == tmp) {
list_del_rcu(&nt->list);
found = true;
@@ -205,7 +216,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
pr_warn("__netlink_remove_tap: %p not found\n", nt);
out:
- spin_unlock(&netlink_tap_lock);
+ mutex_unlock(&nn->netlink_tap_lock);
if (found)
module_put(nt->module);
@@ -224,6 +235,26 @@ int netlink_remove_tap(struct netlink_tap *nt)
}
EXPORT_SYMBOL_GPL(netlink_remove_tap);
+static __net_init int netlink_tap_init_net(struct net *net)
+{
+ struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
+ INIT_LIST_HEAD(&nn->netlink_tap_all);
+ mutex_init(&nn->netlink_tap_lock);
+ return 0;
+}
+
+static void __net_exit netlink_tap_exit_net(struct net *net)
+{
+}
+
+static struct pernet_operations netlink_tap_net_ops = {
+ .init = netlink_tap_init_net,
+ .exit = netlink_tap_exit_net,
+ .id = &netlink_tap_net_id,
+ .size = sizeof(struct netlink_tap_net),
+};
+
static bool netlink_filter_tap(const struct sk_buff *skb)
{
struct sock *sk = skb->sk;
@@ -277,7 +308,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
return ret;
}
-static void __netlink_deliver_tap(struct sk_buff *skb)
+static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
{
int ret;
struct netlink_tap *tmp;
@@ -285,19 +316,21 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
if (!netlink_filter_tap(skb))
return;
- list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
+ list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret))
break;
}
}
-static void netlink_deliver_tap(struct sk_buff *skb)
+static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
{
+ struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
rcu_read_lock();
- if (unlikely(!list_empty(&netlink_tap_all)))
- __netlink_deliver_tap(skb);
+ if (unlikely(!list_empty(&nn->netlink_tap_all)))
+ __netlink_deliver_tap(skb, nn);
rcu_read_unlock();
}
@@ -306,7 +339,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
struct sk_buff *skb)
{
if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
- netlink_deliver_tap(skb);
+ netlink_deliver_tap(sock_net(dst), skb);
}
static void netlink_overrun(struct sock *sk)
@@ -1216,7 +1249,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;
- netlink_deliver_tap(skb);
+ netlink_deliver_tap(sock_net(sk), skb);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
@@ -2384,13 +2417,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *,
struct netlink_ext_ack *))
{
- struct netlink_ext_ack extack = {};
+ struct netlink_ext_ack extack;
struct nlmsghdr *nlh;
int err;
while (skb->len >= nlmsg_total_size(0)) {
int msglen;
+ memset(&extack, 0, sizeof(extack));
nlh = nlmsg_hdr(skb);
err = 0;
@@ -2481,8 +2515,9 @@ static int netlink_walk_start(struct nl_seq_iter *iter)
return err;
}
- err = rhashtable_walk_start(&iter->hti);
- return err == -EAGAIN ? 0 : err;
+ rhashtable_walk_start(&iter->hti);
+
+ return 0;
}
static void netlink_walk_stop(struct nl_seq_iter *iter)
@@ -2603,7 +2638,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations netlink_seq_fops = {
- .owner = THIS_MODULE,
.open = netlink_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2733,12 +2767,11 @@ static int __init netlink_proto_init(void)
}
}
- INIT_LIST_HEAD(&netlink_tap_all);
-
netlink_add_usersock_entry();
sock_register(&netlink_family_ops);
register_pernet_subsys(&netlink_net_ops);
+ register_pernet_subsys(&netlink_tap_net_ops);
/* The netlink device handler may be needed early. */
rtnetlink_init();
out:
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 8faa20b4d457..7dda33b9b784 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -115,11 +115,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
if (!s_num)
rhashtable_walk_enter(&tbl->hash, hti);
- ret = rhashtable_walk_start(hti);
- if (ret == -EAGAIN)
- ret = 0;
- if (ret)
- goto stop;
+ rhashtable_walk_start(hti);
while ((nlsk = rhashtable_walk_next(hti))) {
if (IS_ERR(nlsk)) {
@@ -146,8 +142,8 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
}
}
-stop:
rhashtable_walk_stop(hti);
+
if (ret)
goto done;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7ed9d4422a73..9ba30c63be3d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1344,7 +1344,6 @@ static int nr_info_open(struct inode *inode, struct file *file)
}
static const struct file_operations nr_info_fops = {
- .owner = THIS_MODULE,
.open = nr_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 75e6ba970fde..b5a7dcb30991 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -901,7 +901,6 @@ static int nr_node_info_open(struct inode *inode, struct file *file)
}
const struct file_operations nr_nodes_fops = {
- .owner = THIS_MODULE,
.open = nr_node_info_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -968,7 +967,6 @@ static int nr_neigh_info_open(struct inode *inode, struct file *file)
}
const struct file_operations nr_neigh_fops = {
- .owner = THIS_MODULE,
.open = nr_neigh_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index fb7afcaa3004..985909f105eb 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -531,7 +531,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
return 0;
}
-static inline unsigned int llcp_accept_poll(struct sock *parent)
+static inline __poll_t llcp_accept_poll(struct sock *parent)
{
struct nfc_llcp_sock *llcp_sock, *parent_sock;
struct sock *sk;
@@ -549,11 +549,11 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
return 0;
}
-static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("%p\n", sk);
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index 8d104c1db628..a66f102c6c01 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -305,7 +305,7 @@ static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int nci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t nci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index b27c5c6d9cab..62f36cc938ca 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1266,14 +1266,14 @@ static int parse_nat(const struct nlattr *attr,
/* Do not allow flags if no type is given. */
if (info->range.flags) {
OVS_NLERR(log,
- "NAT flags may be given only when NAT range (SRC or DST) is also specified.\n"
+ "NAT flags may be given only when NAT range (SRC or DST) is also specified."
);
return -EINVAL;
}
info->nat = OVS_CT_NAT; /* NAT existing connections. */
} else if (!info->commit) {
OVS_NLERR(log,
- "NAT attributes may be specified only when CT COMMIT flag is also specified.\n"
+ "NAT attributes may be specified only when CT COMMIT flag is also specified."
);
return -EINVAL;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index f039064ce922..56b8e7167790 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -56,12 +56,12 @@
u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
- struct timespec cur_ts;
+ struct timespec64 cur_ts;
u64 cur_ms, idle_ms;
- ktime_get_ts(&cur_ts);
+ ktime_get_ts64(&cur_ts);
idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
- cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+ cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
cur_ts.tv_nsec / NSEC_PER_MSEC;
return cur_ms - idle_ms;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624ea74353dd..7322aa1e382e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -330,12 +330,12 @@ size_t ovs_tun_key_attr_size(void)
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
- /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
+ /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
+ * OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
*/
+ nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
- + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */
- + nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
+ + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
}
static size_t ovs_nsh_key_attr_size(void)
@@ -402,7 +402,7 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
.next = ovs_vxlan_ext_key_lens },
[OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
[OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
- [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) },
+ [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = OVS_ATTR_VARIABLE },
};
static const struct ovs_len_tbl
@@ -634,30 +634,30 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
return 0;
}
-static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
+static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
struct sw_flow_match *match, bool is_mask,
bool log)
{
unsigned long opt_key_offset;
- struct erspan_metadata opts;
- BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
-
- memset(&opts, 0, sizeof(opts));
- opts.index = nla_get_be32(attr);
+ BUILD_BUG_ON(sizeof(struct erspan_metadata) >
+ sizeof(match->key->tun_opts));
- /* Index has only 20-bit */
- if (ntohl(opts.index) & ~INDEX_MASK) {
- OVS_NLERR(log, "ERSPAN index number %x too large.",
- ntohl(opts.index));
+ if (nla_len(a) > sizeof(match->key->tun_opts)) {
+ OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
+ nla_len(a), sizeof(match->key->tun_opts));
return -EINVAL;
}
- SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
- opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
- SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
- is_mask);
+ if (!is_mask)
+ SW_FLOW_KEY_PUT(match, tun_opts_len,
+ sizeof(struct erspan_metadata), false);
+ else
+ SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
+ opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
+ SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
+ nla_len(a), is_mask);
return 0;
}
@@ -774,7 +774,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
return -EINVAL;
}
- err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
+ err = erspan_tun_opt_from_nlattr(a, match, is_mask,
+ log);
if (err)
return err;
@@ -906,8 +907,8 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
- ((struct erspan_metadata *)tun_opts)->index))
+ nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
+ swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
}
@@ -2501,7 +2502,7 @@ static int validate_geneve_opts(struct sw_flow_key *key)
option = (struct geneve_opt *)((u8 *)option + len);
opts_len -= len;
- };
+ }
key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
@@ -2536,7 +2537,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
break;
}
- };
+ }
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
if (start < 0)
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 3fbfc78991ac..04b94281a30b 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -488,7 +488,7 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
long long int max_bucket_size;
band = &meter->bands[i];
- max_bucket_size = (band->burst_size + band->rate) * 1000;
+ max_bucket_size = (band->burst_size + band->rate) * 1000LL;
band->bucket += delta_ms * band->rate;
if (band->bucket > max_bucket_size)
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 04a3128adcf0..bb95c43aae76 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -16,7 +16,6 @@
* 02110-1301, USA
*/
-#include <linux/hardirq.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -126,18 +125,12 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
}
-static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
-{
- dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
-}
-
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = internal_get_stats,
- .ndo_set_rx_headroom = internal_set_rx_headroom,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -154,7 +147,7 @@ static void do_setup(struct net_device *netdev)
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
- IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
+ IFF_NO_QUEUE;
netdev->needs_free_netdev = true;
netdev->priv_destructor = internal_dev_destructor;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
@@ -195,7 +188,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
err = -ENOMEM;
goto error_free_netdev;
}
- vport->dev->needed_headroom = vport->dp->max_headroom;
dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
internal_dev = internal_dev_priv(vport->dev);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index da215e5c1399..1d1483007e46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
+ bool again = false;
if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
goto drop;
- skb = validate_xmit_skb_list(skb, dev);
+ skb = validate_xmit_skb_list(skb, dev, &again);
if (skb != orig_skb)
goto drop;
@@ -4073,12 +4074,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
return 0;
}
-static unsigned int packet_poll(struct file *file, struct socket *sock,
+static __poll_t packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
@@ -4530,7 +4531,6 @@ static int packet_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations packet_seq_fops = {
- .owner = THIS_MODULE,
.open = packet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index da754fc926e7..871eaf2cb85e 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -299,16 +299,21 @@ out:
int __init phonet_netlink_register(void)
{
- int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit,
- NULL, 0);
+ int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
+ addr_doit, NULL, 0);
if (err)
return err;
- /* Further __rtnl_register() cannot fail */
- __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0);
- __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0);
- __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0);
- __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0);
- __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, 0);
+ /* Further rtnl_register_module() cannot fail */
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
+ addr_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
+ NULL, getaddr_dumpit, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
+ route_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
+ route_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
+ NULL, route_dumpit, 0);
return 0;
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1b050dd17393..08f6751d2030 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -341,12 +341,12 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
return 0;
}
-static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct pep_sock *pn = pep_sk(sk);
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, sk_sleep(sk), wait);
@@ -635,7 +635,6 @@ static int pn_sock_open(struct inode *inode, struct file *file)
}
const struct file_operations pn_sock_seq_fops = {
- .owner = THIS_MODULE,
.open = pn_sock_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -818,7 +817,6 @@ static int pn_res_open(struct inode *inode, struct file *file)
}
const struct file_operations pn_res_seq_fops = {
- .owner = THIS_MODULE,
.open = pn_res_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 77ab05e23001..5fb3929e3d7d 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1116,9 +1116,13 @@ static int __init qrtr_proto_init(void)
return rc;
}
- rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+ rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+ if (rc) {
+ sock_unregister(qrtr_family.family);
+ proto_unregister(&qrtr_proto);
+ }
- return 0;
+ return rc;
}
postcore_initcall(qrtr_proto_init);
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index b405f77d664c..88aa8ad0f5b6 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -152,12 +152,12 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
* to send to a congested destination, the system call may still fail (and
* return ENOBUFS).
*/
-static unsigned int rds_poll(struct file *file, struct socket *sock,
+static __poll_t rds_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 75d43dc8e96b..5aa3a64aa4f0 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
rs, &addr, (int)ntohs(*port));
break;
} else {
+ rs->rs_bound_addr = 0;
rds_sock_put(rs);
ret = -ENOMEM;
break;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 8398fee7c866..8d19fd25dce3 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,7 +219,11 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
- if (!test_and_set_bit(0, &conn->c_map_queued)) {
+ struct rds_conn_path *cp = &conn->c_path[0];
+
+ rcu_read_lock();
+ if (!test_and_set_bit(0, &conn->c_map_queued) &&
+ !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
rds_stats_inc(s_cong_update_queued);
/* We cannot inline the call to rds_send_xmit() here
* for two reasons (both pertaining to a TCP transport):
@@ -235,9 +239,9 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
* therefore trigger warnings.
* Defer the xmit to rds_send_worker() instead.
*/
- queue_delayed_work(rds_wq,
- &conn->c_path[0].cp_send_w, 0);
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
}
+ rcu_read_unlock();
}
spin_unlock_irqrestore(&rds_cong_lock, flags);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7ee2d5d68b78..b10c0ef36d8d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -230,8 +230,8 @@ static struct rds_connection *__rds_conn_create(struct net *net,
rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
conn, &laddr, &faddr,
- trans->t_name ? trans->t_name : "[unknown]",
- is_outgoing ? "(outgoing)" : "");
+ strnlen(trans->t_name, sizeof(trans->t_name)) ? trans->t_name :
+ "[unknown]", is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
@@ -382,10 +382,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
{
struct rds_message *rm, *rtmp;
+ set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
+
if (!cp->cp_transport_data)
return;
/* make sure lingering queued work won't try to ref the conn */
+ synchronize_rcu();
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
@@ -403,6 +406,11 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
if (cp->cp_xmit_rm)
rds_message_put(cp->cp_xmit_rm);
+ WARN_ON(delayed_work_pending(&cp->cp_send_w));
+ WARN_ON(delayed_work_pending(&cp->cp_recv_w));
+ WARN_ON(delayed_work_pending(&cp->cp_conn_w));
+ WARN_ON(work_pending(&cp->cp_down_w));
+
cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
}
@@ -424,7 +432,6 @@ void rds_conn_destroy(struct rds_connection *conn)
"%pI4\n", conn, &conn->c_laddr,
&conn->c_faddr);
- conn->c_destroy_in_prog = 1;
/* Ensure conn will not be scheduled for reconnect */
spin_lock_irq(&rds_conn_lock);
hlist_del_init_rcu(&conn->c_hash_node);
@@ -445,7 +452,6 @@ void rds_conn_destroy(struct rds_connection *conn)
*/
rds_cong_remove_conn(conn);
- put_net(conn->c_net);
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
@@ -684,10 +690,13 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
{
atomic_set(&cp->cp_state, RDS_CONN_ERROR);
- if (!destroy && cp->cp_conn->c_destroy_in_prog)
+ rcu_read_lock();
+ if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ rcu_read_unlock();
return;
-
+ }
queue_work(rds_wq, &cp->cp_down_w);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_drop);
@@ -704,9 +713,15 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
*/
void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
{
+ rcu_read_lock();
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ rcu_read_unlock();
+ return;
+ }
if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 36dd2099048a..b2a5067b4afe 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -301,13 +301,11 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
- dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
+ (union ib_gid *)&iinfo->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index bc2f1e0977d6..634cfcb7bba6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
+ if (args->nr_local == 0)
+ return -EINVAL;
+
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
err:
if (page)
put_page(page);
+ rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
return ret;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c349c71babff..374ae83b60d4 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -88,6 +88,7 @@ enum {
#define RDS_RECONNECT_PENDING 1
#define RDS_IN_XMIT 2
#define RDS_RECV_REFILL 3
+#define RDS_DESTROY_PENDING 4
/* Max number of multipaths per RDS connection. Must be a power of 2 */
#define RDS_MPATH_WORKERS 8
@@ -139,8 +140,7 @@ struct rds_connection {
__be32 c_faddr;
unsigned int c_loopback:1,
c_ping_triggered:1,
- c_destroy_in_prog:1,
- c_pad_to_32:29;
+ c_pad_to_32:30;
int c_npaths;
struct rds_connection *c_passive;
struct rds_transport *c_trans;
@@ -150,7 +150,7 @@ struct rds_connection {
/* Protocol version */
unsigned int c_version;
- struct net *c_net;
+ possible_net_t c_net;
struct list_head c_map_item;
unsigned long c_map_queued;
@@ -165,13 +165,13 @@ struct rds_connection {
static inline
struct net *rds_conn_net(struct rds_connection *conn)
{
- return conn->c_net;
+ return read_pnet(&conn->c_net);
}
static inline
void rds_conn_net_set(struct rds_connection *conn, struct net *net)
{
- conn->c_net = get_net(net);
+ write_pnet(&conn->c_net, net);
}
#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/send.c b/net/rds/send.c
index f72466c63f0c..d3e32d1f3c7d 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,6 +162,12 @@ restart:
goto out;
}
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ release_in_xmit(cp);
+ ret = -ENETUNREACH; /* dont requeue send work */
+ goto out;
+ }
+
/*
* we record the send generation after doing the xmit acquire.
* if someone else manages to jump in and do some work, we'll use
@@ -437,7 +443,12 @@ over_batch:
!list_empty(&cp->cp_send_queue)) && !raced) {
if (batch_count < send_batch_count)
goto restart;
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_lock();
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ ret = -ENETUNREACH;
+ else
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_unlock();
} else if (raced) {
rds_stats_inc(s_send_lock_queue_raced);
}
@@ -1151,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
else
cpath = &conn->c_path[0];
+ if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
rds_conn_path_connect_if_down(cpath);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -1190,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
rds_stats_inc(s_send_queued);
ret = rds_send_xmit(cpath);
- if (ret == -ENOMEM || ret == -EAGAIN)
- queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
-
+ if (ret == -ENOMEM || ret == -EAGAIN) {
+ ret = 0;
+ rcu_read_lock();
+ if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+ ret = -ENETUNREACH;
+ else
+ queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+ rcu_read_unlock();
+ }
+ if (ret)
+ goto out;
rds_message_put(rm);
return payload_len;
@@ -1270,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
rds_stats_inc(s_send_pong);
/* schedule the send work on rds_wq */
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_unlock();
rds_message_put(rm);
return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 6b7ee71f40c6..9920d2f84eff 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
sizeof(val));
}
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
{
- return tcp_sk(tc->t_sock->sk)->snd_nxt;
+ /* seq# of the last byte of data in tcp send buffer */
+ return tcp_sk(tc->t_sock->sk)->write_seq;
}
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
@@ -270,16 +271,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
return -EADDRNOTAVAIL;
}
+static void rds_tcp_conn_free(void *arg)
+{
+ struct rds_tcp_connection *tc = arg;
+ unsigned long flags;
+
+ rdsdebug("freeing tc %p\n", tc);
+
+ spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+ if (!tc->t_tcp_node_detached)
+ list_del(&tc->t_tcp_node);
+ spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+ kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_tcp_connection *tc;
- int i;
+ int i, j;
+ int ret = 0;
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
- if (!tc)
- return -ENOMEM;
-
+ if (!tc) {
+ ret = -ENOMEM;
+ break;
+ }
mutex_init(&tc->t_conn_path_lock);
tc->t_sock = NULL;
tc->t_tinc = NULL;
@@ -290,26 +308,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
tc->t_cpath = &conn->c_path[i];
spin_lock_irq(&rds_tcp_conn_lock);
+ tc->t_tcp_node_detached = false;
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
spin_unlock_irq(&rds_tcp_conn_lock);
rdsdebug("rds_conn_path [%d] tc %p\n", i,
conn->c_path[i].cp_transport_data);
}
-
- return 0;
-}
-
-static void rds_tcp_conn_free(void *arg)
-{
- struct rds_tcp_connection *tc = arg;
- unsigned long flags;
- rdsdebug("freeing tc %p\n", tc);
-
- spin_lock_irqsave(&rds_tcp_conn_lock, flags);
- list_del(&tc->t_tcp_node);
- spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
-
- kmem_cache_free(rds_tcp_conn_slab, tc);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
+ }
+ return ret;
}
static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
@@ -495,27 +504,6 @@ static struct pernet_operations rds_tcp_net_ops = {
.size = sizeof(struct rds_tcp_net),
};
-/* explicitly send a RST on each socket, thereby releasing any socket refcnts
- * that may otherwise hold up netns deletion.
- */
-static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
-{
- struct rds_conn_path *cp;
- struct rds_tcp_connection *tc;
- int i;
- struct sock *sk;
-
- for (i = 0; i < RDS_MPATH_WORKERS; i++) {
- cp = &conn->c_path[i];
- tc = cp->cp_transport_data;
- if (!tc->t_sock)
- continue;
- sk = tc->t_sock->sk;
- sk->sk_prot->disconnect(sk, 0);
- tcp_done(sk);
- }
-}
-
static void rds_tcp_kill_sock(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
@@ -527,18 +515,20 @@ static void rds_tcp_kill_sock(struct net *net)
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = tc->t_cpath->cp_conn->c_net;
+ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
continue;
- if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
+ if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
+ } else {
+ list_del(&tc->t_tcp_node);
+ tc->t_tcp_node_detached = true;
+ }
}
spin_unlock_irq(&rds_tcp_conn_lock);
- list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
- rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
+ list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
- }
}
void *rds_tcp_listen_sock_def_readable(struct net *net)
@@ -586,7 +576,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = tc->t_cpath->cp_conn->c_net;
+ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
continue;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1aafbf7c3011..c6fa080e9b6d 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,7 @@ struct rds_tcp_incoming {
struct rds_tcp_connection {
struct list_head t_tcp_node;
+ bool t_tcp_node_detached;
struct rds_conn_path *t_cpath;
/* t_conn_path_lock synchronizes the connection establishment between
* rds_tcp_accept_one and rds_tcp_conn_path_connect
@@ -54,7 +55,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc);
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 46f74dad0e16..534c67aeb20f 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
cp->cp_conn, tc, sock);
if (sock) {
- if (cp->cp_conn->c_destroy_in_prog)
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
rds_tcp_set_linger(sock);
sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
lock_sock(sock->sk);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e006ef8e6d40..dd707b9e73e5 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk)
ready = tc->t_orig_data_ready;
rds_tcp_stats_inc(s_tcp_data_ready_calls);
- if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ rcu_read_unlock();
+ }
out:
read_unlock_bh(&sk->sk_callback_lock);
ready(sk);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index dc860d1bb608..16f65744d984 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
* m_ack_seq is set to the sequence number of the last byte of
* header and data. see rds_tcp_is_acked().
*/
- tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
+ tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
rm->m_ack_seq = tc->t_last_sent_nxt +
sizeof(struct rds_header) +
be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
- rm, rds_tcp_snd_nxt(tc),
+ rm, rds_tcp_write_seq(tc),
(unsigned long long)rm->m_ack_seq);
}
@@ -202,8 +202,11 @@ void rds_tcp_write_space(struct sock *sk)
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ rcu_read_lock();
+ if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
+ !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+ rcu_read_unlock();
out:
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index f121daa402c8..eb76db1360b0 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -87,8 +87,12 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
cp->cp_reconnect_jiffies = 0;
set_bit(0, &cp->cp_conn->c_map_queued);
- queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+ queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ }
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_connect_path_complete);
@@ -133,7 +137,10 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
if (cp->cp_reconnect_jiffies == 0) {
cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
- queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+ rcu_read_unlock();
return;
}
@@ -141,8 +148,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
conn, &conn->c_laddr, &conn->c_faddr);
- queue_delayed_work(rds_wq, &cp->cp_conn_w,
- rand % cp->cp_reconnect_jiffies);
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_conn_w,
+ rand % cp->cp_reconnect_jiffies);
+ rcu_read_unlock();
cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
rds_sysctl_reconnect_max_jiffies);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 2064c3a35ef8..124c77e9d058 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1139,10 +1139,10 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
-static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
{
struct rfkill_data *data = file->private_data;
- unsigned int res = POLLOUT | POLLWRNORM;
+ __poll_t res = POLLOUT | POLLWRNORM;
poll_wait(file, &data->read_wait, wait);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a5c4992cf61..083bd251406f 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1461,7 +1461,6 @@ static int rose_info_open(struct inode *inode, struct file *file)
}
static const struct file_operations rose_info_fops = {
- .owner = THIS_MODULE,
.open = rose_info_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 8ca3124df83f..178619ddab68 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -1156,7 +1156,6 @@ static int rose_nodes_open(struct inode *inode, struct file *file)
}
const struct file_operations rose_nodes_fops = {
- .owner = THIS_MODULE,
.open = rose_nodes_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1240,7 +1239,6 @@ static int rose_neigh_open(struct inode *inode, struct file *file)
}
const struct file_operations rose_neigh_fops = {
- .owner = THIS_MODULE,
.open = rose_neigh_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1326,7 +1324,6 @@ static int rose_route_open(struct inode *inode, struct file *file)
}
const struct file_operations rose_routes_fops = {
- .owner = THIS_MODULE,
.open = rose_route_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index dcd818fa837e..21ad6a3a465c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -729,12 +729,12 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
/*
* permit an RxRPC socket to be polled
*/
-static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rxrpc_sock *rx = rxrpc_sk(sk);
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 7421656963a9..f79f260c6ddc 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -125,7 +125,6 @@ static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
}
const struct file_operations rxrpc_call_seq_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_call_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -217,7 +216,6 @@ static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
}
const struct file_operations rxrpc_connection_seq_fops = {
- .owner = THIS_MODULE,
.open = rxrpc_connection_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index c03d86a7775e..f24a6ae6819a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -857,17 +857,14 @@ config NET_ACT_TUNNEL_KEY
config NET_IFE_SKBMARK
tristate "Support to encoding decoding skb mark on IFE action"
depends on NET_ACT_IFE
- ---help---
config NET_IFE_SKBPRIO
tristate "Support to encoding decoding skb prio on IFE action"
depends on NET_ACT_IFE
- ---help---
config NET_IFE_SKBTCINDEX
tristate "Support to encoding decoding skb tcindex on IFE action"
depends on NET_ACT_IFE
- ---help---
config NET_CLS_IND
bool "Incoming device classification"
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 4d33a50a8a6d..52622a3d2517 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -99,7 +99,7 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
p->tcfa_refcnt--;
if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
if (p->ops->cleanup)
- p->ops->cleanup(p, bind);
+ p->ops->cleanup(p);
tcf_idr_remove(p->idrinfo, p);
ret = ACT_P_DELETED;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5ef8ce8c83d4..b3f2c15affa7 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -357,7 +357,7 @@ out:
return ret;
}
-static void tcf_bpf_cleanup(struct tc_action *act, int bind)
+static void tcf_bpf_cleanup(struct tc_action *act)
{
struct tcf_bpf_cfg tmp;
@@ -401,16 +401,14 @@ static __net_init int bpf_init_net(struct net *net)
return tc_action_net_init(tn, &act_bpf_ops);
}
-static void __net_exit bpf_exit_net(struct net *net)
+static void __net_exit bpf_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, bpf_net_id);
}
static struct pernet_operations bpf_net_ops = {
.init = bpf_init_net,
- .exit = bpf_exit_net,
+ .exit_batch = bpf_exit_net,
.id = &bpf_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 10b7a8855a6c..2b15ba84e0c8 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -209,16 +209,14 @@ static __net_init int connmark_init_net(struct net *net)
return tc_action_net_init(tn, &act_connmark_ops);
}
-static void __net_exit connmark_exit_net(struct net *net)
+static void __net_exit connmark_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, connmark_net_id);
}
static struct pernet_operations connmark_net_ops = {
.init = connmark_init_net,
- .exit = connmark_exit_net,
+ .exit_batch = connmark_exit_net,
.id = &connmark_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index d836f998117b..b7ba9b06b147 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -49,6 +49,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
int bind)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
+ struct tcf_csum_params *params_old, *params_new;
struct nlattr *tb[TCA_CSUM_MAX + 1];
struct tc_csum *parm;
struct tcf_csum *p;
@@ -67,7 +68,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
if (!tcf_idr_check(tn, parm->index, a, bind)) {
ret = tcf_idr_create(tn, parm->index, est, a,
- &act_csum_ops, bind, false);
+ &act_csum_ops, bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
@@ -80,10 +81,21 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
}
p = to_tcf_csum(*a);
- spin_lock_bh(&p->tcf_lock);
- p->tcf_action = parm->action;
- p->update_flags = parm->update_flags;
- spin_unlock_bh(&p->tcf_lock);
+ ASSERT_RTNL();
+
+ params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ if (unlikely(!params_new)) {
+ if (ret == ACT_P_CREATED)
+ tcf_idr_release(*a, bind);
+ return -ENOMEM;
+ }
+ params_old = rtnl_dereference(p->params);
+
+ params_new->action = parm->action;
+ params_new->update_flags = parm->update_flags;
+ rcu_assign_pointer(p->params, params_new);
+ if (params_old)
+ kfree_rcu(params_old, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -539,19 +551,21 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_csum *p = to_tcf_csum(a);
- int action;
+ struct tcf_csum_params *params;
u32 update_flags;
+ int action;
+
+ rcu_read_lock();
+ params = rcu_dereference(p->params);
- spin_lock(&p->tcf_lock);
tcf_lastuse_update(&p->tcf_tm);
- bstats_update(&p->tcf_bstats, skb);
- action = p->tcf_action;
- update_flags = p->update_flags;
- spin_unlock(&p->tcf_lock);
+ bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
+ action = params->action;
if (unlikely(action == TC_ACT_SHOT))
- goto drop;
+ goto drop_stats;
+ update_flags = params->update_flags;
switch (tc_skb_protocol(skb)) {
case cpu_to_be16(ETH_P_IP):
if (!tcf_csum_ipv4(skb, update_flags))
@@ -563,13 +577,16 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
break;
}
+unlock:
+ rcu_read_unlock();
return action;
drop:
- spin_lock(&p->tcf_lock);
- p->tcf_qstats.drops++;
- spin_unlock(&p->tcf_lock);
- return TC_ACT_SHOT;
+ action = TC_ACT_SHOT;
+
+drop_stats:
+ qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
+ goto unlock;
}
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -577,15 +594,18 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_csum *p = to_tcf_csum(a);
+ struct tcf_csum_params *params;
struct tc_csum opt = {
- .update_flags = p->update_flags,
.index = p->tcf_index,
- .action = p->tcf_action,
.refcnt = p->tcf_refcnt - ref,
.bindcnt = p->tcf_bindcnt - bind,
};
struct tcf_t t;
+ params = rtnl_dereference(p->params);
+ opt.action = params->action;
+ opt.update_flags = params->update_flags;
+
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -600,6 +620,15 @@ nla_put_failure:
return -1;
}
+static void tcf_csum_cleanup(struct tc_action *a)
+{
+ struct tcf_csum *p = to_tcf_csum(a);
+ struct tcf_csum_params *params;
+
+ params = rcu_dereference_protected(p->params, 1);
+ kfree_rcu(params, rcu);
+}
+
static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops)
@@ -623,6 +652,7 @@ static struct tc_action_ops act_csum_ops = {
.act = tcf_csum,
.dump = tcf_csum_dump,
.init = tcf_csum_init,
+ .cleanup = tcf_csum_cleanup,
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.size = sizeof(struct tcf_csum),
@@ -635,16 +665,14 @@ static __net_init int csum_init_net(struct net *net)
return tc_action_net_init(tn, &act_csum_ops);
}
-static void __net_exit csum_exit_net(struct net *net)
+static void __net_exit csum_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, csum_net_id);
}
static struct pernet_operations csum_net_ops = {
.init = csum_init_net,
- .exit = csum_exit_net,
+ .exit_batch = csum_exit_net,
.id = &csum_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e29a48ef7fc3..b56986d41c87 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
if (action == TC_ACT_SHOT)
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
- tm->lastuse = lastuse;
+ tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
@@ -235,16 +235,14 @@ static __net_init int gact_init_net(struct net *net)
return tc_action_net_init(tn, &act_gact_ops);
}
-static void __net_exit gact_exit_net(struct net *net)
+static void __net_exit gact_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, gact_net_id);
}
static struct pernet_operations gact_net_ops = {
.init = gact_init_net,
- .exit = gact_exit_net,
+ .exit_batch = gact_exit_net,
.id = &gact_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 3007cb1310ea..5954e992685a 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -387,7 +387,7 @@ out_nlmsg_trim:
}
/* under ife->tcf_lock */
-static void _tcf_ife_cleanup(struct tc_action *a, int bind)
+static void _tcf_ife_cleanup(struct tc_action *a)
{
struct tcf_ife_info *ife = to_ife(a);
struct tcf_meta_info *e, *n;
@@ -405,13 +405,13 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
}
}
-static void tcf_ife_cleanup(struct tc_action *a, int bind)
+static void tcf_ife_cleanup(struct tc_action *a)
{
struct tcf_ife_info *ife = to_ife(a);
struct tcf_ife_params *p;
spin_lock_bh(&ife->tcf_lock);
- _tcf_ife_cleanup(a, bind);
+ _tcf_ife_cleanup(a);
spin_unlock_bh(&ife->tcf_lock);
p = rcu_dereference_protected(ife->params, 1);
@@ -546,7 +546,7 @@ metadata_parse_err:
if (exists)
tcf_idr_release(*a, bind);
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a, bind);
+ _tcf_ife_cleanup(*a);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +567,7 @@ metadata_parse_err:
err = use_all_metadata(ife);
if (err) {
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a, bind);
+ _tcf_ife_cleanup(*a);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
@@ -858,16 +858,14 @@ static __net_init int ife_init_net(struct net *net)
return tc_action_net_init(tn, &act_ife_ops);
}
-static void __net_exit ife_exit_net(struct net *net)
+static void __net_exit ife_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, ife_net_id);
}
static struct pernet_operations ife_net_ops = {
.init = ife_init_net,
- .exit = ife_exit_net,
+ .exit_batch = ife_exit_net,
.id = &ife_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d9e399a7e3d5..06e380ae0928 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -77,7 +77,7 @@ static void ipt_destroy_target(struct xt_entry_target *t)
module_put(par.target->me);
}
-static void tcf_ipt_release(struct tc_action *a, int bind)
+static void tcf_ipt_release(struct tc_action *a)
{
struct tcf_ipt *ipt = to_ipt(a);
ipt_destroy_target(ipt->tcfi_t);
@@ -337,16 +337,14 @@ static __net_init int ipt_init_net(struct net *net)
return tc_action_net_init(tn, &act_ipt_ops);
}
-static void __net_exit ipt_exit_net(struct net *net)
+static void __net_exit ipt_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, ipt_net_id);
}
static struct pernet_operations ipt_net_ops = {
.init = ipt_init_net,
- .exit = ipt_exit_net,
+ .exit_batch = ipt_exit_net,
.id = &ipt_net_id,
.size = sizeof(struct tc_action_net),
};
@@ -387,16 +385,14 @@ static __net_init int xt_init_net(struct net *net)
return tc_action_net_init(tn, &act_xt_ops);
}
-static void __net_exit xt_exit_net(struct net *net)
+static void __net_exit xt_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, xt_net_id);
}
static struct pernet_operations xt_net_ops = {
.init = xt_init_net,
- .exit = xt_exit_net,
+ .exit_batch = xt_exit_net,
.id = &xt_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8b3e59388480..e6ff88f72900 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -29,7 +29,6 @@
#include <net/tc_act/tc_mirred.h>
static LIST_HEAD(mirred_list);
-static DEFINE_SPINLOCK(mirred_list_lock);
static bool tcf_mirred_is_act_redirect(int action)
{
@@ -50,18 +49,15 @@ static bool tcf_mirred_act_wants_ingress(int action)
}
}
-static void tcf_mirred_release(struct tc_action *a, int bind)
+static void tcf_mirred_release(struct tc_action *a)
{
struct tcf_mirred *m = to_mirred(a);
struct net_device *dev;
- /* We could be called either in a RCU callback or with RTNL lock held. */
- spin_lock_bh(&mirred_list_lock);
list_del(&m->tcfm_list);
- dev = rcu_dereference_protected(m->tcfm_dev, 1);
+ dev = rtnl_dereference(m->tcfm_dev);
if (dev)
dev_put(dev);
- spin_unlock_bh(&mirred_list_lock);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -139,8 +135,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
if (dev != NULL) {
- m->tcfm_ifindex = parm->ifindex;
- m->net = net;
if (ret != ACT_P_CREATED)
dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
dev_hold(dev);
@@ -149,9 +143,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
if (ret == ACT_P_CREATED) {
- spin_lock_bh(&mirred_list_lock);
list_add(&m->tcfm_list, &mirred_list);
- spin_unlock_bh(&mirred_list_lock);
tcf_idr_insert(tn, *a);
}
@@ -239,7 +231,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
struct tcf_t *tm = &m->tcf_tm;
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
- tm->lastuse = lastuse;
+ tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -247,13 +239,14 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = to_mirred(a);
+ struct net_device *dev = rtnl_dereference(m->tcfm_dev);
struct tc_mirred opt = {
.index = m->tcf_index,
.action = m->tcf_action,
.refcnt = m->tcf_refcnt - ref,
.bindcnt = m->tcf_bindcnt - bind,
.eaction = m->tcfm_eaction,
- .ifindex = m->tcfm_ifindex,
+ .ifindex = dev ? dev->ifindex : 0,
};
struct tcf_t t;
@@ -294,7 +287,6 @@ static int mirred_device_event(struct notifier_block *unused,
ASSERT_RTNL();
if (event == NETDEV_UNREGISTER) {
- spin_lock_bh(&mirred_list_lock);
list_for_each_entry(m, &mirred_list, tcfm_list) {
if (rcu_access_pointer(m->tcfm_dev) == dev) {
dev_put(dev);
@@ -304,7 +296,6 @@ static int mirred_device_event(struct notifier_block *unused,
RCU_INIT_POINTER(m->tcfm_dev, NULL);
}
}
- spin_unlock_bh(&mirred_list_lock);
}
return NOTIFY_DONE;
@@ -318,7 +309,7 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
{
struct tcf_mirred *m = to_mirred(a);
- return __dev_get_by_index(m->net, m->tcfm_ifindex);
+ return rtnl_dereference(m->tcfm_dev);
}
static struct tc_action_ops act_mirred_ops = {
@@ -343,16 +334,14 @@ static __net_init int mirred_init_net(struct net *net)
return tc_action_net_init(tn, &act_mirred_ops);
}
-static void __net_exit mirred_exit_net(struct net *net)
+static void __net_exit mirred_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, mirred_net_id);
}
static struct pernet_operations mirred_net_ops = {
.init = mirred_init_net,
- .exit = mirred_exit_net,
+ .exit_batch = mirred_exit_net,
.id = &mirred_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c365d01b99c8..98c6a4b2f523 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -310,16 +310,14 @@ static __net_init int nat_init_net(struct net *net)
return tc_action_net_init(tn, &act_nat_ops);
}
-static void __net_exit nat_exit_net(struct net *net)
+static void __net_exit nat_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, nat_net_id);
}
static struct pernet_operations nat_net_ops = {
.init = nat_init_net,
- .exit = nat_exit_net,
+ .exit_batch = nat_exit_net,
.id = &nat_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 491fe5deb09e..349beaffb29e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -216,7 +216,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
return ret;
}
-static void tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a)
{
struct tcf_pedit *p = to_pedit(a);
struct tc_pedit_key *keys = p->tcfp_keys;
@@ -453,16 +453,14 @@ static __net_init int pedit_init_net(struct net *net)
return tc_action_net_init(tn, &act_pedit_ops);
}
-static void __net_exit pedit_exit_net(struct net *net)
+static void __net_exit pedit_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, pedit_net_id);
}
static struct pernet_operations pedit_net_ops = {
.init = pedit_init_net,
- .exit = pedit_exit_net,
+ .exit_batch = pedit_exit_net,
.id = &pedit_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 3bb2ebf9e9ae..95d3c9097b25 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -118,13 +118,13 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
police = to_police(*a);
if (parm->rate.rate) {
err = -ENOMEM;
- R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
+ R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
if (R_tab == NULL)
goto failure;
if (parm->peakrate.rate) {
P_tab = qdisc_get_rtab(&parm->peakrate,
- tb[TCA_POLICE_PEAKRATE]);
+ tb[TCA_POLICE_PEAKRATE], NULL);
if (P_tab == NULL)
goto failure;
}
@@ -334,16 +334,14 @@ static __net_init int police_init_net(struct net *net)
return tc_action_net_init(tn, &act_police_ops);
}
-static void __net_exit police_exit_net(struct net *net)
+static void __net_exit police_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, police_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, police_net_id);
}
static struct pernet_operations police_net_ops = {
.init = police_init_net,
- .exit = police_exit_net,
+ .exit_batch = police_exit_net,
.id = &police_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 9438969290a6..1ba0df238756 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -96,7 +96,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return ret;
}
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
+static void tcf_sample_cleanup(struct tc_action *a)
{
struct tcf_sample *s = to_sample(a);
struct psample_group *psample_group;
@@ -236,16 +236,14 @@ static __net_init int sample_init_net(struct net *net)
return tc_action_net_init(tn, &act_sample_ops);
}
-static void __net_exit sample_exit_net(struct net *net)
+static void __net_exit sample_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, sample_net_id);
}
static struct pernet_operations sample_net_ops = {
.init = sample_init_net,
- .exit = sample_exit_net,
+ .exit_batch = sample_exit_net,
.id = &sample_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e7b57e5071a3..425eac11f6da 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
return d->tcf_action;
}
-static void tcf_simp_release(struct tc_action *a, int bind)
+static void tcf_simp_release(struct tc_action *a)
{
struct tcf_defact *d = to_defact(a);
kfree(d->tcfd_defdata);
@@ -204,16 +204,14 @@ static __net_init int simp_init_net(struct net *net)
return tc_action_net_init(tn, &act_simp_ops);
}
-static void __net_exit simp_exit_net(struct net *net)
+static void __net_exit simp_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, simp_net_id);
}
static struct pernet_operations simp_net_ops = {
.init = simp_init_net,
- .exit = simp_exit_net,
+ .exit_batch = simp_exit_net,
.id = &simp_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 59949d61f20d..5a3f691bb545 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -241,16 +241,14 @@ static __net_init int skbedit_init_net(struct net *net)
return tc_action_net_init(tn, &act_skbedit_ops);
}
-static void __net_exit skbedit_exit_net(struct net *net)
+static void __net_exit skbedit_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, skbedit_net_id);
}
static struct pernet_operations skbedit_net_ops = {
.init = skbedit_init_net,
- .exit = skbedit_exit_net,
+ .exit_batch = skbedit_exit_net,
.id = &skbedit_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index b642ad3d39dd..fa975262dbac 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -184,7 +184,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
return ret;
}
-static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+static void tcf_skbmod_cleanup(struct tc_action *a)
{
struct tcf_skbmod *d = to_skbmod(a);
struct tcf_skbmod_params *p;
@@ -266,16 +266,14 @@ static __net_init int skbmod_init_net(struct net *net)
return tc_action_net_init(tn, &act_skbmod_ops);
}
-static void __net_exit skbmod_exit_net(struct net *net)
+static void __net_exit skbmod_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, skbmod_net_id);
}
static struct pernet_operations skbmod_net_ops = {
.init = skbmod_init_net,
- .exit = skbmod_exit_net,
+ .exit_batch = skbmod_exit_net,
.id = &skbmod_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 30c96274c638..0e23aac09ad6 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -201,7 +201,7 @@ err_out:
return ret;
}
-static void tunnel_key_release(struct tc_action *a, int bind)
+static void tunnel_key_release(struct tc_action *a)
{
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params;
@@ -325,16 +325,14 @@ static __net_init int tunnel_key_init_net(struct net *net)
return tc_action_net_init(tn, &act_tunnel_key_ops);
}
-static void __net_exit tunnel_key_exit_net(struct net *net)
+static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, tunnel_key_net_id);
}
static struct pernet_operations tunnel_key_net_ops = {
.init = tunnel_key_init_net,
- .exit = tunnel_key_exit_net,
+ .exit_batch = tunnel_key_exit_net,
.id = &tunnel_key_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 97f717a13ad5..e1a1b3f3983a 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -219,7 +219,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
return ret;
}
-static void tcf_vlan_cleanup(struct tc_action *a, int bind)
+static void tcf_vlan_cleanup(struct tc_action *a)
{
struct tcf_vlan *v = to_vlan(a);
struct tcf_vlan_params *p;
@@ -301,16 +301,14 @@ static __net_init int vlan_init_net(struct net *net)
return tc_action_net_init(tn, &act_vlan_ops);
}
-static void __net_exit vlan_exit_net(struct net *net)
+static void __net_exit vlan_exit_net(struct list_head *net_list)
{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
- tc_action_net_exit(tn);
+ tc_action_net_exit(net_list, vlan_net_id);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
- .exit = vlan_exit_net,
+ .exit_batch = vlan_exit_net,
.id = &vlan_net_id,
.size = sizeof(struct tc_action_net),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b9d63d2246e6..bcb4ccb5f894 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -121,8 +122,8 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
}
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
- u32 prio, u32 parent, struct Qdisc *q,
- struct tcf_chain *chain)
+ u32 prio, struct tcf_chain *chain,
+ struct netlink_ext_ack *extack)
{
struct tcf_proto *tp;
int err;
@@ -148,6 +149,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
module_put(tp->ops->owner);
err = -EAGAIN;
} else {
+ NL_SET_ERR_MSG(extack, "TC classifier not found");
err = -ENOENT;
}
goto errout;
@@ -156,8 +158,6 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->classify = tp->ops->classify;
tp->protocol = protocol;
tp->prio = prio;
- tp->classid = parent;
- tp->q = q;
tp->chain = chain;
err = tp->ops->init(tp);
@@ -172,13 +172,20 @@ errout:
return ERR_PTR(err);
}
-static void tcf_proto_destroy(struct tcf_proto *tp)
+static void tcf_proto_destroy(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
- tp->ops->destroy(tp);
+ tp->ops->destroy(tp, extack);
module_put(tp->ops->owner);
kfree_rcu(tp, rcu);
}
+struct tcf_filter_chain_list_item {
+ struct list_head list;
+ tcf_chain_head_change_t *chain_head_change;
+ void *chain_head_change_priv;
+};
+
static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
u32 chain_index)
{
@@ -187,6 +194,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
+ INIT_LIST_HEAD(&chain->filter_chain_list);
list_add_tail(&chain->list, &block->chain_list);
chain->block = block;
chain->index = chain_index;
@@ -194,12 +202,19 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
return chain;
}
+static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
+ struct tcf_proto *tp_head)
+{
+ if (item->chain_head_change)
+ item->chain_head_change(tp_head, item->chain_head_change_priv);
+}
static void tcf_chain_head_change(struct tcf_chain *chain,
struct tcf_proto *tp_head)
{
- if (chain->chain_head_change)
- chain->chain_head_change(tp_head,
- chain->chain_head_change_priv);
+ struct tcf_filter_chain_list_item *item;
+
+ list_for_each_entry(item, &chain->filter_chain_list, list)
+ tcf_chain_head_change_item(item, tp_head);
}
static void tcf_chain_flush(struct tcf_chain *chain)
@@ -209,7 +224,7 @@ static void tcf_chain_flush(struct tcf_chain *chain)
tcf_chain_head_change(chain, NULL);
while (tp) {
RCU_INIT_POINTER(chain->filter_chain, tp->next);
- tcf_proto_destroy(tp);
+ tcf_proto_destroy(tp, NULL);
tp = rtnl_dereference(chain->filter_chain);
tcf_chain_put(chain);
}
@@ -217,8 +232,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
static void tcf_chain_destroy(struct tcf_chain *chain)
{
+ struct tcf_block *block = chain->block;
+
list_del(&chain->list);
kfree(chain);
+ if (list_empty(&block->chain_list))
+ kfree(block);
}
static void tcf_chain_hold(struct tcf_chain *chain)
@@ -249,62 +268,300 @@ void tcf_chain_put(struct tcf_chain *chain)
}
EXPORT_SYMBOL(tcf_chain_put);
-static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
- struct tcf_block_ext_info *ei,
- enum tc_block_command command)
+static bool tcf_block_offload_in_use(struct tcf_block *block)
+{
+ return block->offloadcnt;
+}
+
+static int tcf_block_offload_cmd(struct tcf_block *block,
+ struct net_device *dev,
+ struct tcf_block_ext_info *ei,
+ enum tc_block_command command)
{
- struct net_device *dev = q->dev_queue->dev;
struct tc_block_offload bo = {};
- if (!dev->netdev_ops->ndo_setup_tc)
- return;
bo.command = command;
bo.binder_type = ei->binder_type;
bo.block = block;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+ return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
}
-static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
{
- tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
+ struct net_device *dev = q->dev_queue->dev;
+ int err;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ goto no_offload_dev_inc;
+
+ /* If tc offload feature is disabled and the block we try to bind
+ * to already has some offloaded filters, forbid to bind.
+ */
+ if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+ return -EOPNOTSUPP;
+
+ err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+ if (err == -EOPNOTSUPP)
+ goto no_offload_dev_inc;
+ return err;
+
+no_offload_dev_inc:
+ if (tcf_block_offload_in_use(block))
+ return -EOPNOTSUPP;
+ block->nooffloaddevcnt++;
+ return 0;
}
static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
- tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
+ struct net_device *dev = q->dev_queue->dev;
+ int err;
+
+ if (!dev->netdev_ops->ndo_setup_tc)
+ goto no_offload_dev_dec;
+ err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+ if (err == -EOPNOTSUPP)
+ goto no_offload_dev_dec;
+ return;
+
+no_offload_dev_dec:
+ WARN_ON(block->nooffloaddevcnt-- == 0);
}
-int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+static int
+tcf_chain_head_change_cb_add(struct tcf_chain *chain,
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_filter_chain_list_item *item;
+
+ item = kmalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
+ return -ENOMEM;
+ }
+ item->chain_head_change = ei->chain_head_change;
+ item->chain_head_change_priv = ei->chain_head_change_priv;
+ if (chain->filter_chain)
+ tcf_chain_head_change_item(item, chain->filter_chain);
+ list_add(&item->list, &chain->filter_chain_list);
+ return 0;
+}
+
+static void
+tcf_chain_head_change_cb_del(struct tcf_chain *chain,
+ struct tcf_block_ext_info *ei)
+{
+ struct tcf_filter_chain_list_item *item;
+
+ list_for_each_entry(item, &chain->filter_chain_list, list) {
+ if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
+ (item->chain_head_change == ei->chain_head_change &&
+ item->chain_head_change_priv == ei->chain_head_change_priv)) {
+ tcf_chain_head_change_item(item, NULL);
+ list_del(&item->list);
+ kfree(item);
+ return;
+ }
+ }
+ WARN_ON(1);
+}
+
+struct tcf_net {
+ struct idr idr;
+};
+
+static unsigned int tcf_net_id;
+
+static int tcf_block_insert(struct tcf_block *block, struct net *net,
+ u32 block_index, struct netlink_ext_ack *extack)
+{
+ struct tcf_net *tn = net_generic(net, tcf_net_id);
+ int err;
+
+ err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
+ block_index + 1, GFP_KERNEL);
+ if (err)
+ return err;
+ block->index = block_index;
+ return 0;
+}
+
+static void tcf_block_remove(struct tcf_block *block, struct net *net)
+{
+ struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+ idr_remove_ext(&tn->idr, block->index);
+}
+
+static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
+ struct netlink_ext_ack *extack)
{
- struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ struct tcf_block *block;
struct tcf_chain *chain;
int err;
- if (!block)
- return -ENOMEM;
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block) {
+ NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
+ return ERR_PTR(-ENOMEM);
+ }
INIT_LIST_HEAD(&block->chain_list);
INIT_LIST_HEAD(&block->cb_list);
+ INIT_LIST_HEAD(&block->owner_list);
/* Create chain 0 by default, it has to be always present. */
chain = tcf_chain_create(block, 0);
if (!chain) {
+ NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
err = -ENOMEM;
goto err_chain_create;
}
- WARN_ON(!ei->chain_head_change);
- chain->chain_head_change = ei->chain_head_change;
- chain->chain_head_change_priv = ei->chain_head_change_priv;
block->net = qdisc_net(q);
+ block->refcnt = 1;
+ block->net = net;
block->q = q;
- tcf_block_offload_bind(block, q, ei);
- *p_block = block;
- return 0;
+ return block;
err_chain_create:
kfree(block);
+ return ERR_PTR(err);
+}
+
+static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
+{
+ struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+ return idr_find_ext(&tn->idr, block_index);
+}
+
+static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
+{
+ return list_first_entry(&block->chain_list, struct tcf_chain, list);
+}
+
+struct tcf_block_owner_item {
+ struct list_head list;
+ struct Qdisc *q;
+ enum tcf_block_binder_type binder_type;
+};
+
+static void
+tcf_block_owner_netif_keep_dst(struct tcf_block *block,
+ struct Qdisc *q,
+ enum tcf_block_binder_type binder_type)
+{
+ if (block->keep_dst &&
+ binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ netif_keep_dst(qdisc_dev(q));
+}
+
+void tcf_block_netif_keep_dst(struct tcf_block *block)
+{
+ struct tcf_block_owner_item *item;
+
+ block->keep_dst = true;
+ list_for_each_entry(item, &block->owner_list, list)
+ tcf_block_owner_netif_keep_dst(block, item->q,
+ item->binder_type);
+}
+EXPORT_SYMBOL(tcf_block_netif_keep_dst);
+
+static int tcf_block_owner_add(struct tcf_block *block,
+ struct Qdisc *q,
+ enum tcf_block_binder_type binder_type)
+{
+ struct tcf_block_owner_item *item;
+
+ item = kmalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->q = q;
+ item->binder_type = binder_type;
+ list_add(&item->list, &block->owner_list);
+ return 0;
+}
+
+static void tcf_block_owner_del(struct tcf_block *block,
+ struct Qdisc *q,
+ enum tcf_block_binder_type binder_type)
+{
+ struct tcf_block_owner_item *item;
+
+ list_for_each_entry(item, &block->owner_list, list) {
+ if (item->q == q && item->binder_type == binder_type) {
+ list_del(&item->list);
+ kfree(item);
+ return;
+ }
+ }
+ WARN_ON(1);
+}
+
+int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = qdisc_net(q);
+ struct tcf_block *block = NULL;
+ bool created = false;
+ int err;
+
+ if (ei->block_index) {
+ /* block_index not 0 means the shared block is requested */
+ block = tcf_block_lookup(net, ei->block_index);
+ if (block)
+ block->refcnt++;
+ }
+
+ if (!block) {
+ block = tcf_block_create(net, q, extack);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+ created = true;
+ if (ei->block_index) {
+ err = tcf_block_insert(block, net,
+ ei->block_index, extack);
+ if (err)
+ goto err_block_insert;
+ }
+ }
+
+ err = tcf_block_owner_add(block, q, ei->binder_type);
+ if (err)
+ goto err_block_owner_add;
+
+ tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
+
+ err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
+ ei, extack);
+ if (err)
+ goto err_chain_head_change_cb_add;
+
+ err = tcf_block_offload_bind(block, q, ei);
+ if (err)
+ goto err_block_offload_bind;
+
+ *p_block = block;
+ return 0;
+
+err_block_offload_bind:
+ tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+err_chain_head_change_cb_add:
+ tcf_block_owner_del(block, q, ei->binder_type);
+err_block_owner_add:
+ if (created) {
+ if (tcf_block_shared(block))
+ tcf_block_remove(block, net);
+err_block_insert:
+ kfree(tcf_block_chain_zero(block));
+ kfree(block);
+ } else {
+ block->refcnt--;
+ }
return err;
}
EXPORT_SYMBOL(tcf_block_get_ext);
@@ -317,7 +574,8 @@ static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
}
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack)
{
struct tcf_block_ext_info ei = {
.chain_head_change = tcf_chain_head_change_dflt,
@@ -325,53 +583,47 @@ int tcf_block_get(struct tcf_block **p_block,
};
WARN_ON(!p_filter_chain);
- return tcf_block_get_ext(p_block, q, &ei);
+ return tcf_block_get_ext(p_block, q, &ei, extack);
}
EXPORT_SYMBOL(tcf_block_get);
-static void tcf_block_put_final(struct work_struct *work)
-{
- struct tcf_block *block = container_of(work, struct tcf_block, work);
- struct tcf_chain *chain, *tmp;
-
- rtnl_lock();
-
- /* At this point, all the chains should have refcnt == 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
- tcf_chain_put(chain);
- rtnl_unlock();
- kfree(block);
-}
-
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
* actions should be all removed after flushing.
*/
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
- struct tcf_chain *chain;
+ struct tcf_chain *chain, *tmp;
if (!block)
return;
- /* Hold a refcnt for all chains, except 0, so that they don't disappear
- * while we are iterating.
- */
- list_for_each_entry(chain, &block->chain_list, list)
- if (chain->index)
+ tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+ tcf_block_owner_del(block, q, ei->binder_type);
+
+ if (--block->refcnt == 0) {
+ if (tcf_block_shared(block))
+ tcf_block_remove(block, block->net);
+
+ /* Hold a refcnt for all chains, so that they don't disappear
+ * while we are iterating.
+ */
+ list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_hold(chain);
- list_for_each_entry(chain, &block->chain_list, list)
- tcf_chain_flush(chain);
+ list_for_each_entry(chain, &block->chain_list, list)
+ tcf_chain_flush(chain);
+ }
tcf_block_offload_unbind(block, q, ei);
- INIT_WORK(&block->work, tcf_block_put_final);
- /* Wait for existing RCU callbacks to cool down, make sure their works
- * have been queued before this. We can not flush pending works here
- * because we are holding the RTNL lock.
- */
- rcu_barrier();
- tcf_queue_work(&block->work);
+ if (block->refcnt == 0) {
+ /* At this point, all the chains should have refcnt >= 1. */
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_put(chain);
+
+ /* Finally, put chain 0 and allow block to be freed. */
+ tcf_chain_put(tcf_block_chain_zero(block));
+ }
}
EXPORT_SYMBOL(tcf_block_put_ext);
@@ -429,9 +681,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
{
struct tcf_block_cb *block_cb;
+ /* At this point, playback of previous block cb calls is not supported,
+ * so forbid to register to block which already has some offloaded
+ * filters present.
+ */
+ if (tcf_block_offload_in_use(block))
+ return ERR_PTR(-EOPNOTSUPP);
+
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
if (!block_cb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
block_cb->cb = cb;
block_cb->cb_ident = cb_ident;
block_cb->cb_priv = cb_priv;
@@ -447,7 +706,7 @@ int tcf_block_cb_register(struct tcf_block *block,
struct tcf_block_cb *block_cb;
block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
- return block_cb ? 0 : -ENOMEM;
+ return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
}
EXPORT_SYMBOL(tcf_block_cb_register);
@@ -477,6 +736,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
int ok_count = 0;
int err;
+ /* Make sure all netdevs sharing this block are offload-capable. */
+ if (block->nooffloaddevcnt && err_stop)
+ return -EOPNOTSUPP;
+
list_for_each_entry(block_cb, &block->cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err) {
@@ -530,8 +793,9 @@ reclassify:
#ifdef CONFIG_NET_CLS_ACT
reset:
if (unlikely(limit++ >= max_reclassify_loop)) {
- net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
- tp->q->ops->id, tp->prio & 0xffff,
+ net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
+ tp->chain->block->index,
+ tp->prio & 0xffff,
ntohs(tp->protocol));
return TC_ACT_SHOT;
}
@@ -604,8 +868,9 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
}
static int tcf_fill_node(struct net *net, struct sk_buff *skb,
- struct tcf_proto *tp, struct Qdisc *q, u32 parent,
- void *fh, u32 portid, u32 seq, u16 flags, int event)
+ struct tcf_proto *tp, struct tcf_block *block,
+ struct Qdisc *q, u32 parent, void *fh,
+ u32 portid, u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -618,8 +883,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
- tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
- tcm->tcm_parent = parent;
+ if (q) {
+ tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
+ tcm->tcm_parent = parent;
+ } else {
+ tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
+ tcm->tcm_block_index = block->index;
+ }
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
goto nla_put_failure;
@@ -642,8 +912,8 @@ nla_put_failure:
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
- struct Qdisc *q, u32 parent,
- void *fh, int event, bool unicast)
+ struct tcf_block *block, struct Qdisc *q,
+ u32 parent, void *fh, int event, bool unicast)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -652,8 +922,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
- n->nlmsg_flags, event) <= 0) {
+ if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -667,8 +937,9 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
- struct Qdisc *q, u32 parent,
- void *fh, bool unicast, bool *last)
+ struct tcf_block *block, struct Qdisc *q,
+ u32 parent, void *fh, bool unicast, bool *last,
+ struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -678,13 +949,14 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
- n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+ if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+ NL_SET_ERR_MSG(extack, "Failed to build del event notification");
kfree_skb(skb);
return -EINVAL;
}
- err = tp->ops->delete(tp, fh, last);
+ err = tp->ops->delete(tp, fh, last, extack);
if (err) {
kfree_skb(skb);
return err;
@@ -693,20 +965,24 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
if (unicast)
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
- return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
+ if (err < 0)
+ NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
+ return err;
}
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
- struct Qdisc *q, u32 parent,
- struct nlmsghdr *n,
+ struct tcf_block *block, struct Qdisc *q,
+ u32 parent, struct nlmsghdr *n,
struct tcf_chain *chain, int event)
{
struct tcf_proto *tp;
for (tp = rtnl_dereference(chain->filter_chain);
tp; tp = rtnl_dereference(tp->next))
- tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false);
+ tfilter_notify(net, oskb, n, tp, block,
+ q, parent, 0, event, false);
}
/* Add/change/delete/get a filter node */
@@ -722,13 +998,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
bool prio_allocate;
u32 parent;
u32 chain_index;
- struct net_device *dev;
- struct Qdisc *q;
+ struct Qdisc *q = NULL;
struct tcf_chain_info chain_info;
struct tcf_chain *chain = NULL;
struct tcf_block *block;
struct tcf_proto *tp;
- const struct Qdisc_class_ops *cops;
unsigned long cl;
void *fh;
int err;
@@ -755,8 +1029,10 @@ replay:
if (prio == 0) {
switch (n->nlmsg_type) {
case RTM_DELTFILTER:
- if (protocol || t->tcm_handle || tca[TCA_KIND])
+ if (protocol || t->tcm_handle || tca[TCA_KIND]) {
+ NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
return -ENOENT;
+ }
break;
case RTM_NEWTFILTER:
/* If no priority is provided by the user,
@@ -769,63 +1045,91 @@ replay:
}
/* fall-through */
default:
+ NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
return -ENOENT;
}
}
/* Find head of filter chain. */
- /* Find link */
- dev = __dev_get_by_index(net, t->tcm_ifindex);
- if (dev == NULL)
- return -ENODEV;
-
- /* Find qdisc */
- if (!parent) {
- q = dev->qdisc;
- parent = q->handle;
+ if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+ block = tcf_block_lookup(net, t->tcm_block_index);
+ if (!block) {
+ NL_SET_ERR_MSG(extack, "Block of given index was not found");
+ err = -EINVAL;
+ goto errout;
+ }
} else {
- q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
- if (q == NULL)
- return -EINVAL;
- }
+ const struct Qdisc_class_ops *cops;
+ struct net_device *dev;
- /* Is it classful? */
- cops = q->ops->cl_ops;
- if (!cops)
- return -EINVAL;
+ /* Find link */
+ dev = __dev_get_by_index(net, t->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
- if (!cops->tcf_block)
- return -EOPNOTSUPP;
+ /* Find qdisc */
+ if (!parent) {
+ q = dev->qdisc;
+ parent = q->handle;
+ } else {
+ q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
+ if (!q) {
+ NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
+ return -EINVAL;
+ }
+ }
- /* Do we search for filter, attached to class? */
- if (TC_H_MIN(parent)) {
- cl = cops->find(q, parent);
- if (cl == 0)
- return -ENOENT;
- }
+ /* Is it classful? */
+ cops = q->ops->cl_ops;
+ if (!cops) {
+ NL_SET_ERR_MSG(extack, "Qdisc not classful");
+ return -EINVAL;
+ }
- /* And the last stroke */
- block = cops->tcf_block(q, cl);
- if (!block) {
- err = -EINVAL;
- goto errout;
+ if (!cops->tcf_block) {
+ NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
+ return -EOPNOTSUPP;
+ }
+
+ /* Do we search for filter, attached to class? */
+ if (TC_H_MIN(parent)) {
+ cl = cops->find(q, parent);
+ if (cl == 0) {
+ NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
+ return -ENOENT;
+ }
+ }
+
+ /* And the last stroke */
+ block = cops->tcf_block(q, cl, extack);
+ if (!block) {
+ err = -EINVAL;
+ goto errout;
+ }
+ if (tcf_block_shared(block)) {
+ NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
}
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
if (chain_index > TC_ACT_EXT_VAL_MASK) {
+ NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
goto errout;
}
chain = tcf_chain_get(block, chain_index,
n->nlmsg_type == RTM_NEWTFILTER);
if (!chain) {
+ NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
goto errout;
}
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
- tfilter_notify_chain(net, skb, q, parent, n,
+ tfilter_notify_chain(net, skb, block, q, parent, n,
chain, RTM_DELTFILTER);
tcf_chain_flush(chain);
err = 0;
@@ -835,6 +1139,7 @@ replay:
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
prio, prio_allocate);
if (IS_ERR(tp)) {
+ NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = PTR_ERR(tp);
goto errout;
}
@@ -843,12 +1148,14 @@ replay:
/* Proto-tcf does not exist, create new one */
if (tca[TCA_KIND] == NULL || !protocol) {
+ NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
err = -EINVAL;
goto errout;
}
if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE)) {
+ NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
err = -ENOENT;
goto errout;
}
@@ -857,13 +1164,14 @@ replay:
prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
- protocol, prio, parent, q, chain);
+ protocol, prio, chain, extack);
if (IS_ERR(tp)) {
err = PTR_ERR(tp);
goto errout;
}
tp_created = 1;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
+ NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
err = -EINVAL;
goto errout;
}
@@ -873,15 +1181,16 @@ replay:
if (!fh) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
tcf_chain_tp_remove(chain, &chain_info, tp);
- tfilter_notify(net, skb, n, tp, q, parent, fh,
+ tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_DELTFILTER, false);
- tcf_proto_destroy(tp);
+ tcf_proto_destroy(tp, extack);
err = 0;
goto errout;
}
if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE)) {
+ NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
err = -ENOENT;
goto errout;
}
@@ -892,41 +1201,47 @@ replay:
case RTM_NEWTFILTER:
if (n->nlmsg_flags & NLM_F_EXCL) {
if (tp_created)
- tcf_proto_destroy(tp);
+ tcf_proto_destroy(tp, NULL);
+ NL_SET_ERR_MSG(extack, "Filter already exists");
err = -EEXIST;
goto errout;
}
break;
case RTM_DELTFILTER:
- err = tfilter_del_notify(net, skb, n, tp, q, parent,
- fh, false, &last);
+ err = tfilter_del_notify(net, skb, n, tp, block,
+ q, parent, fh, false, &last,
+ extack);
if (err)
goto errout;
if (last) {
tcf_chain_tp_remove(chain, &chain_info, tp);
- tcf_proto_destroy(tp);
+ tcf_proto_destroy(tp, extack);
}
goto errout;
case RTM_GETTFILTER:
- err = tfilter_notify(net, skb, n, tp, q, parent, fh,
- RTM_NEWTFILTER, true);
+ err = tfilter_notify(net, skb, n, tp, block, q, parent,
+ fh, RTM_NEWTFILTER, true);
+ if (err < 0)
+ NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
goto errout;
default:
+ NL_SET_ERR_MSG(extack, "Invalid netlink message type");
err = -EINVAL;
goto errout;
}
}
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
- n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
+ n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
+ extack);
if (err == 0) {
if (tp_created)
tcf_chain_tp_insert(chain, &chain_info, tp);
- tfilter_notify(net, skb, n, tp, q, parent, fh,
+ tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false);
} else {
if (tp_created)
- tcf_proto_destroy(tp);
+ tcf_proto_destroy(tp, NULL);
}
errout:
@@ -942,6 +1257,7 @@ struct tcf_dump_args {
struct tcf_walker w;
struct sk_buff *skb;
struct netlink_callback *cb;
+ struct tcf_block *block;
struct Qdisc *q;
u32 parent;
};
@@ -951,7 +1267,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
struct tcf_dump_args *a = (void *)arg;
struct net *net = sock_net(a->skb->sk);
- return tcf_fill_node(net, a->skb, tp, a->q, a->parent,
+ return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
n, NETLINK_CB(a->cb->skb).portid,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWTFILTER);
@@ -962,6 +1278,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
long index_start, long *p_index)
{
struct net *net = sock_net(skb->sk);
+ struct tcf_block *block = chain->block;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
struct tcf_dump_args arg;
struct tcf_proto *tp;
@@ -980,7 +1297,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (cb->args[1] == 0) {
- if (tcf_fill_node(net, skb, tp, q, parent, 0,
+ if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWTFILTER) <= 0)
@@ -993,6 +1310,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
arg.w.fn = tcf_node_dump;
arg.skb = skb;
arg.cb = cb;
+ arg.block = block;
arg.q = q;
arg.parent = parent;
arg.w.stop = 0;
@@ -1011,13 +1329,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
- struct net_device *dev;
- struct Qdisc *q;
+ struct Qdisc *q = NULL;
struct tcf_block *block;
struct tcf_chain *chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
- unsigned long cl = 0;
- const struct Qdisc_class_ops *cops;
long index_start;
long index;
u32 parent;
@@ -1030,32 +1345,51 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
- dev = __dev_get_by_index(net, tcm->tcm_ifindex);
- if (!dev)
- return skb->len;
-
- parent = tcm->tcm_parent;
- if (!parent) {
- q = dev->qdisc;
- parent = q->handle;
+ if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+ block = tcf_block_lookup(net, tcm->tcm_block_index);
+ if (!block)
+ goto out;
+ /* If we work with block index, q is NULL and parent value
+ * will never be used in the following code. The check
+ * in tcf_fill_node prevents it. However, compiler does not
+ * see that far, so set parent to zero to silence the warning
+ * about parent being uninitialized.
+ */
+ parent = 0;
} else {
- q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
- }
- if (!q)
- goto out;
- cops = q->ops->cl_ops;
- if (!cops)
- goto out;
- if (!cops->tcf_block)
- goto out;
- if (TC_H_MIN(tcm->tcm_parent)) {
- cl = cops->find(q, tcm->tcm_parent);
- if (cl == 0)
+ const struct Qdisc_class_ops *cops;
+ struct net_device *dev;
+ unsigned long cl = 0;
+
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return skb->len;
+
+ parent = tcm->tcm_parent;
+ if (!parent) {
+ q = dev->qdisc;
+ parent = q->handle;
+ } else {
+ q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+ }
+ if (!q)
+ goto out;
+ cops = q->ops->cl_ops;
+ if (!cops)
+ goto out;
+ if (!cops->tcf_block)
+ goto out;
+ if (TC_H_MIN(tcm->tcm_parent)) {
+ cl = cops->find(q, tcm->tcm_parent);
+ if (cl == 0)
+ goto out;
+ }
+ block = cops->tcf_block(q, cl, NULL);
+ if (!block)
goto out;
+ if (tcf_block_shared(block))
+ q = NULL;
}
- block = cops->tcf_block(q, cl);
- if (!block)
- goto out;
index_start = cb->args[0];
index = 0;
@@ -1090,7 +1424,8 @@ void tcf_exts_destroy(struct tcf_exts *exts)
EXPORT_SYMBOL(tcf_exts_destroy);
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
+ struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
+ struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
{
@@ -1123,8 +1458,10 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
}
#else
if ((exts->action && tb[exts->action]) ||
- (exts->police && tb[exts->police]))
+ (exts->police && tb[exts->police])) {
+ NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
return -EOPNOTSUPP;
+ }
#endif
return 0;
@@ -1258,18 +1595,50 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
}
EXPORT_SYMBOL(tc_setup_cb_call);
+static __net_init int tcf_net_init(struct net *net)
+{
+ struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+ idr_init(&tn->idr);
+ return 0;
+}
+
+static void __net_exit tcf_net_exit(struct net *net)
+{
+ struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+ idr_destroy(&tn->idr);
+}
+
+static struct pernet_operations tcf_net_ops = {
+ .init = tcf_net_init,
+ .exit = tcf_net_exit,
+ .id = &tcf_net_id,
+ .size = sizeof(struct tcf_net),
+};
+
static int __init tc_filter_init(void)
{
+ int err;
+
tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
if (!tc_filter_wq)
return -ENOMEM;
+ err = register_pernet_subsys(&tcf_net_ops);
+ if (err)
+ goto err_register_pernet_subsys;
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
tc_dump_tfilter, 0);
return 0;
+
+err_register_pernet_subsys:
+ destroy_workqueue(tc_filter_wq);
+ return err;
}
subsys_initcall(tc_filter_init);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 5f169ded347e..d333f5c5101d 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -112,7 +112,7 @@ static void basic_delete_filter(struct rcu_head *head)
tcf_queue_work(&f->work);
}
-static void basic_destroy(struct tcf_proto *tp)
+static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f, *n;
@@ -130,7 +130,8 @@ static void basic_destroy(struct tcf_proto *tp)
kfree_rcu(head, rcu);
}
-static int basic_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f = arg;
@@ -152,11 +153,12 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
static int basic_set_parms(struct net *net, struct tcf_proto *tp,
struct basic_filter *f, unsigned long base,
struct nlattr **tb,
- struct nlattr *est, bool ovr)
+ struct nlattr *est, bool ovr,
+ struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
if (err < 0)
return err;
@@ -175,7 +177,8 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
static int basic_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr)
+ struct nlattr **tca, void **arg, bool ovr,
+ struct netlink_ext_ack *extack)
{
int err;
struct basic_head *head = rtnl_dereference(tp->root);
@@ -221,7 +224,8 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
fnew->handle = idr_index;
}
- err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr);
+ err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
+ extack);
if (err < 0) {
if (!fold)
idr_remove_ext(&head->handle_idr, fnew->handle);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8d78e7f4ecc3..8e5326bc6440 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -147,7 +147,8 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
}
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
- struct cls_bpf_prog *oldprog)
+ struct cls_bpf_prog *oldprog,
+ struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {};
@@ -158,22 +159,25 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog;
- tc_cls_common_offload_init(&cls_bpf.common, tp);
+ tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
+ extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts;
cls_bpf.prog = prog ? prog->filter : NULL;
cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
cls_bpf.name = obj->bpf_name;
cls_bpf.exts_integrated = obj->exts_integrated;
- cls_bpf.gen_flags = obj->gen_flags;
+
+ if (oldprog)
+ tcf_block_offload_dec(block, &oldprog->gen_flags);
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
if (prog) {
if (err < 0) {
- cls_bpf_offload_cmd(tp, oldprog, prog);
+ cls_bpf_offload_cmd(tp, oldprog, prog, extack);
return err;
} else if (err > 0) {
- prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
+ tcf_block_offload_inc(block, &prog->gen_flags);
}
}
@@ -183,10 +187,18 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
return 0;
}
+static u32 cls_bpf_flags(u32 flags)
+{
+ return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
+}
+
static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
- struct cls_bpf_prog *oldprog)
+ struct cls_bpf_prog *oldprog,
+ struct netlink_ext_ack *extack)
{
- if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+ if (prog && oldprog &&
+ cls_bpf_flags(prog->gen_flags) !=
+ cls_bpf_flags(oldprog->gen_flags))
return -EINVAL;
if (prog && tc_skip_hw(prog->gen_flags))
@@ -196,15 +208,16 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
if (!prog && !oldprog)
return 0;
- return cls_bpf_offload_cmd(tp, prog, oldprog);
+ return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
}
static void cls_bpf_stop_offload(struct tcf_proto *tp,
- struct cls_bpf_prog *prog)
+ struct cls_bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
int err;
- err = cls_bpf_offload_cmd(tp, NULL, prog);
+ err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
if (err)
pr_err("Stopping hardware offload failed: %d\n", err);
}
@@ -215,13 +228,12 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {};
- tc_cls_common_offload_init(&cls_bpf.common, tp);
+ tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter;
cls_bpf.name = prog->bpf_name;
cls_bpf.exts_integrated = prog->exts_integrated;
- cls_bpf.gen_flags = prog->gen_flags;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
}
@@ -278,12 +290,13 @@ static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
tcf_queue_work(&prog->work);
}
-static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
+static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
idr_remove_ext(&head->handle_idr, prog->handle);
- cls_bpf_stop_offload(tp, prog);
+ cls_bpf_stop_offload(tp, prog, extack);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
if (tcf_exts_get_net(&prog->exts))
@@ -292,22 +305,24 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
__cls_bpf_delete_prog(prog);
}
-static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
- __cls_bpf_delete(tp, arg);
+ __cls_bpf_delete(tp, arg, extack);
*last = list_empty(&head->plist);
return 0;
}
-static void cls_bpf_destroy(struct tcf_proto *tp)
+static void cls_bpf_destroy(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *tmp;
list_for_each_entry_safe(prog, tmp, &head->plist, link)
- __cls_bpf_delete(tp, prog);
+ __cls_bpf_delete(tp, prog, extack);
idr_destroy(&head->handle_idr);
kfree_rcu(head, rcu);
@@ -392,15 +407,16 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
prog->bpf_name = name;
prog->filter = fp;
- if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
- netif_keep_dst(qdisc_dev(tp->q));
+ if (fp->dst_needed)
+ tcf_block_netif_keep_dst(tp->chain->block);
return 0;
}
static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_bpf_prog *prog, unsigned long base,
- struct nlattr **tb, struct nlattr *est, bool ovr)
+ struct nlattr **tb, struct nlattr *est, bool ovr,
+ struct netlink_ext_ack *extack)
{
bool is_bpf, is_ebpf, have_exts = false;
u32 gen_flags = 0;
@@ -411,7 +427,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
return -EINVAL;
- ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
+ ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
if (ret < 0)
return ret;
@@ -449,7 +465,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr, struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *oldprog = *arg;
@@ -497,11 +513,12 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
prog->handle = handle;
}
- ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
+ ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
+ extack);
if (ret < 0)
goto errout_idr;
- ret = cls_bpf_offload(tp, prog, oldprog);
+ ret = cls_bpf_offload(tp, prog, oldprog, extack);
if (ret)
goto errout_parms;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 309d5899265f..762da5c0cf5e 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -91,7 +91,8 @@ static void cls_cgroup_destroy_rcu(struct rcu_head *root)
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
@@ -121,7 +122,8 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto errout;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr,
+ extack);
if (err < 0)
goto errout;
@@ -141,7 +143,8 @@ errout:
return err;
}
-static void cls_cgroup_destroy(struct tcf_proto *tp)
+static void cls_cgroup_destroy(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
@@ -154,7 +157,8 @@ static void cls_cgroup_destroy(struct tcf_proto *tp)
}
}
-static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 25c2a888e1f0..cd5fe383afdd 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -401,7 +401,7 @@ static void flow_destroy_filter(struct rcu_head *head)
static int flow_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr, struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *fold, *fnew;
@@ -454,7 +454,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto err2;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
+ extack);
if (err < 0)
goto err2;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
- netif_keep_dst(qdisc_dev(tp->q));
+ tcf_block_netif_keep_dst(tp->chain->block);
if (tb[TCA_FLOW_KEYS]) {
fnew->keymask = keymask;
@@ -574,7 +575,8 @@ err1:
return err;
}
-static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f = arg;
@@ -598,7 +600,7 @@ static int flow_init(struct tcf_proto *tp)
return 0;
}
-static void flow_destroy(struct tcf_proto *tp)
+static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f, *next;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 543a3e875d05..dc9acaafc0a8 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -166,6 +166,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
* so do it rather here.
*/
skb_key.basic.n_proto = skb->protocol;
+ skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key);
skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
@@ -217,30 +218,33 @@ static void fl_destroy_filter(struct rcu_head *head)
tcf_queue_work(&f->work);
}
-static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
+static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
+ struct netlink_ext_ack *extack)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
&cls_flower, false);
+ tcf_block_offload_dec(block, &f->flags);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
struct flow_dissector *dissector,
struct fl_flow_key *mask,
- struct cls_fl_filter *f)
+ struct cls_fl_filter *f,
+ struct netlink_ext_ack *extack)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
bool skip_sw = tc_skip_sw(f->flags);
int err;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f;
cls_flower.dissector = dissector;
@@ -252,10 +256,10 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
&cls_flower, skip_sw);
if (err < 0) {
- fl_hw_destroy_filter(tp, f);
+ fl_hw_destroy_filter(tp, f, NULL);
return err;
} else if (err > 0) {
- f->flags |= TCA_CLS_FLAGS_IN_HW;
+ tcf_block_offload_inc(block, &f->flags);
}
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
@@ -269,7 +273,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
cls_flower.exts = &f->exts;
@@ -279,14 +283,15 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
&cls_flower, false);
}
-static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
+static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
+ struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
idr_remove_ext(&head->handle_idr, f->handle);
list_del_rcu(&f->list);
if (!tc_skip_hw(f->flags))
- fl_hw_destroy_filter(tp, f);
+ fl_hw_destroy_filter(tp, f, extack);
tcf_unbind_filter(tp, &f->res);
if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, fl_destroy_filter);
@@ -312,13 +317,13 @@ static void fl_destroy_rcu(struct rcu_head *rcu)
schedule_work(&head->work);
}
-static void fl_destroy(struct tcf_proto *tp)
+static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f, *next;
list_for_each_entry_safe(f, next, &head->filters, list)
- __fl_delete(tp, f);
+ __fl_delete(tp, f, extack);
idr_destroy(&head->handle_idr);
__module_get(THIS_MODULE);
@@ -524,13 +529,14 @@ static void fl_set_key_ip(struct nlattr **tb,
}
static int fl_set_key(struct net *net, struct nlattr **tb,
- struct fl_flow_key *key, struct fl_flow_key *mask)
+ struct fl_flow_key *key, struct fl_flow_key *mask,
+ struct netlink_ext_ack *extack)
{
__be16 ethertype;
int ret = 0;
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_FLOWER_INDEV]) {
- int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
+ int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
if (err < 0)
return err;
key->indev_ifindex = err;
@@ -825,11 +831,12 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
static int fl_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_fl_filter *f, struct fl_flow_mask *mask,
unsigned long base, struct nlattr **tb,
- struct nlattr *est, bool ovr)
+ struct nlattr *est, bool ovr,
+ struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
if (err < 0)
return err;
@@ -838,7 +845,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
tcf_bind_filter(tp, &f->res, base);
}
- err = fl_set_key(net, tb, &f->key, &mask->key);
+ err = fl_set_key(net, tb, &f->key, &mask->key, extack);
if (err)
return err;
@@ -851,7 +858,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
static int fl_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *fold = *arg;
@@ -914,7 +921,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
+ err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+ extack);
if (err)
goto errout_idr;
@@ -938,7 +946,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
err = fl_hw_replace_filter(tp,
&head->dissector,
&mask.key,
- fnew);
+ fnew,
+ extack);
if (err)
goto errout_idr;
}
@@ -951,7 +960,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
rhashtable_remove_fast(&head->ht, &fold->ht_node,
head->ht_params);
if (!tc_skip_hw(fold->flags))
- fl_hw_destroy_filter(tp, fold);
+ fl_hw_destroy_filter(tp, fold, NULL);
}
*arg = fnew;
@@ -981,7 +990,8 @@ errout_tb:
return err;
}
-static int fl_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f = arg;
@@ -989,7 +999,7 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last)
if (!tc_skip_sw(f->flags))
rhashtable_remove_fast(&head->ht, &f->ht_node,
head->ht_params);
- __fl_delete(tp, f);
+ __fl_delete(tp, f, extack);
*last = list_empty(&head->filters);
return 0;
}
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 20f0de1a960a..8b207723fbc2 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -149,7 +149,7 @@ static void fw_delete_filter(struct rcu_head *head)
tcf_queue_work(&f->work);
}
-static void fw_destroy(struct tcf_proto *tp)
+static void fw_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
@@ -172,7 +172,8 @@ static void fw_destroy(struct tcf_proto *tp)
kfree_rcu(head, rcu);
}
-static int fw_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int fw_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = arg;
@@ -218,13 +219,15 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
static int fw_set_parms(struct net *net, struct tcf_proto *tp,
struct fw_filter *f, struct nlattr **tb,
- struct nlattr **tca, unsigned long base, bool ovr)
+ struct nlattr **tca, unsigned long base, bool ovr,
+ struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
u32 mask;
int err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr,
+ extack);
if (err < 0)
return err;
@@ -236,7 +239,7 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_FW_INDEV]) {
int ret;
- ret = tcf_change_indev(net, tb[TCA_FW_INDEV]);
+ ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
if (ret < 0)
return ret;
f->ifindex = ret;
@@ -257,7 +260,7 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
static int fw_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca, void **arg,
- bool ovr)
+ bool ovr, struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = *arg;
@@ -296,7 +299,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return err;
}
- err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr);
+ err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack);
if (err < 0) {
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
@@ -345,7 +348,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
f->id = handle;
f->tp = tp;
- err = fw_set_parms(net, tp, f, tb, tca, base, ovr);
+ err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack);
if (err < 0)
goto errout;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 66d4e0099158..2ba721a590a7 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -71,28 +71,31 @@ static void mall_destroy_rcu(struct rcu_head *rcu)
static void mall_destroy_hw_filter(struct tcf_proto *tp,
struct cls_mall_head *head,
- unsigned long cookie)
+ unsigned long cookie,
+ struct netlink_ext_ack *extack)
{
struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_mall.common, tp);
+ tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+ tcf_block_offload_dec(block, &head->flags);
}
static int mall_replace_hw_filter(struct tcf_proto *tp,
struct cls_mall_head *head,
- unsigned long cookie)
+ unsigned long cookie,
+ struct netlink_ext_ack *extack)
{
struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block;
bool skip_sw = tc_skip_sw(head->flags);
int err;
- tc_cls_common_offload_init(&cls_mall.common, tp);
+ tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.exts = &head->exts;
cls_mall.cookie = cookie;
@@ -100,10 +103,10 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL,
&cls_mall, skip_sw);
if (err < 0) {
- mall_destroy_hw_filter(tp, head, cookie);
+ mall_destroy_hw_filter(tp, head, cookie, NULL);
return err;
} else if (err > 0) {
- head->flags |= TCA_CLS_FLAGS_IN_HW;
+ tcf_block_offload_inc(block, &head->flags);
}
if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
@@ -112,7 +115,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
return 0;
}
-static void mall_destroy(struct tcf_proto *tp)
+static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
@@ -120,7 +123,7 @@ static void mall_destroy(struct tcf_proto *tp)
return;
if (!tc_skip_hw(head->flags))
- mall_destroy_hw_filter(tp, head, (unsigned long) head);
+ mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
if (tcf_exts_get_net(&head->exts))
call_rcu(&head->rcu, mall_destroy_rcu);
@@ -141,11 +144,12 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_mall_head *head,
unsigned long base, struct nlattr **tb,
- struct nlattr *est, bool ovr)
+ struct nlattr *est, bool ovr,
+ struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, extack);
if (err < 0)
return err;
@@ -159,7 +163,7 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
static int mall_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr, struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
@@ -197,12 +201,14 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
new->handle = handle;
new->flags = flags;
- err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
+ err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
+ extack);
if (err)
goto err_set_parms;
if (!tc_skip_hw(new->flags)) {
- err = mall_replace_hw_filter(tp, new, (unsigned long) new);
+ err = mall_replace_hw_filter(tp, new, (unsigned long)new,
+ extack);
if (err)
goto err_replace_hw_filter;
}
@@ -222,7 +228,8 @@ err_exts_init:
return err;
}
-static int mall_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index ac9a5b8825b9..21a03a8ee029 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -281,7 +281,7 @@ static void route4_delete_filter(struct rcu_head *head)
tcf_queue_work(&f->work);
}
-static void route4_destroy(struct tcf_proto *tp)
+static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
@@ -316,7 +316,8 @@ static void route4_destroy(struct tcf_proto *tp)
kfree_rcu(head, rcu);
}
-static int route4_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter *f = arg;
@@ -389,7 +390,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct route4_filter *f,
u32 handle, struct route4_head *head,
struct nlattr **tb, struct nlattr *est, int new,
- bool ovr)
+ bool ovr, struct netlink_ext_ack *extack)
{
u32 id = 0, to = 0, nhandle = 0x8000;
struct route4_filter *fp;
@@ -397,7 +398,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
struct route4_bucket *b;
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
if (err < 0)
return err;
@@ -471,7 +472,8 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
static int route4_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr)
+ struct nlattr **tca, void **arg, bool ovr,
+ struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
@@ -515,7 +517,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
}
err = route4_set_parms(net, tp, base, f, handle, head, tb,
- tca[TCA_RATE], new, ovr);
+ tca[TCA_RATE], new, ovr, extack);
if (err < 0)
goto errout;
@@ -527,7 +529,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (f->handle < f1->handle)
break;
- netif_keep_dst(qdisc_dev(tp->q));
+ tcf_block_netif_keep_dst(tp->chain->block);
rcu_assign_pointer(f->next, f1);
rcu_assign_pointer(*fp, f);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index cf325625c99d..4f1297657c27 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -322,7 +322,7 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
__rsvp_delete_filter(f);
}
-static void rsvp_destroy(struct tcf_proto *tp)
+static void rsvp_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
int h1, h2;
@@ -350,7 +350,8 @@ static void rsvp_destroy(struct tcf_proto *tp)
kfree_rcu(data, rcu);
}
-static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct rsvp_head *head = rtnl_dereference(tp->root);
struct rsvp_filter *nfp, *f = arg;
@@ -486,7 +487,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
- void **arg, bool ovr)
+ void **arg, bool ovr, struct netlink_ext_ack *extack)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
struct rsvp_filter *f, *nfp;
@@ -511,7 +512,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, extack);
if (err < 0)
goto errout2;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 67467ae24c97..b49cc990a000 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -193,7 +193,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
tcf_queue_work(&f->work);
}
-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = arg;
@@ -246,7 +247,7 @@ static int tcindex_destroy_element(struct tcf_proto *tp,
{
bool last;
- return tcindex_delete(tp, arg, &last);
+ return tcindex_delete(tp, arg, &last, NULL);
}
static void __tcindex_destroy(struct rcu_head *head)
@@ -322,7 +323,7 @@ static int
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
u32 handle, struct tcindex_data *p,
struct tcindex_filter_result *r, struct nlattr **tb,
- struct nlattr *est, bool ovr)
+ struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
{
struct tcindex_filter_result new_filter_result, *old_r = r;
struct tcindex_filter_result cr;
@@ -334,7 +335,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr, extack);
if (err < 0)
goto errout;
@@ -520,7 +521,8 @@ errout:
static int
tcindex_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr)
+ struct nlattr **tca, void **arg, bool ovr,
+ struct netlink_ext_ack *extack)
{
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -540,7 +542,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
return err;
return tcindex_set_parms(net, tp, base, handle, p, r, tb,
- tca[TCA_RATE], ovr);
+ tca[TCA_RATE], ovr, extack);
}
static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
@@ -579,7 +581,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
}
}
-static void tcindex_destroy(struct tcf_proto *tp)
+static void tcindex_destroy(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcf_walker walker;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 507859cdd1cb..60c892c36a60 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -87,6 +87,7 @@ struct tc_u_hnode {
unsigned int divisor;
struct idr handle_idr;
struct rcu_head rcu;
+ u32 flags;
/* The 'ht' field MUST be the last field in structure to allow for
* more entries allocated at end of structure.
*/
@@ -486,12 +487,13 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
return 0;
}
-static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
+static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
+ struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
@@ -501,7 +503,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
}
static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
- u32 flags)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
@@ -509,7 +511,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
bool offloaded = false;
int err;
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
@@ -517,7 +519,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
- u32_clear_hw_hnode(tp, h);
+ u32_clear_hw_hnode(tp, h, NULL);
return err;
} else if (err > 0) {
offloaded = true;
@@ -529,27 +531,29 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
return 0;
}
-static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
+static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+ struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
cls_u32.command = TC_CLSU32_DELETE_KNODE;
- cls_u32.knode.handle = handle;
+ cls_u32.knode.handle = n->handle;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+ tcf_block_offload_dec(block, &n->flags);
}
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
- u32 flags)
+ u32 flags, struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
bool skip_sw = tc_skip_sw(flags);
int err;
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift;
@@ -567,10 +571,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
- u32_remove_hw_knode(tp, n->handle);
+ u32_remove_hw_knode(tp, n, NULL);
return err;
} else if (err > 0) {
- n->flags |= TCA_CLS_FLAGS_IN_HW;
+ tcf_block_offload_inc(block, &n->flags);
}
if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
@@ -579,7 +583,8 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
return 0;
}
-static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
+static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+ struct netlink_ext_ack *extack)
{
struct tc_u_knode *n;
unsigned int h;
@@ -589,7 +594,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res);
- u32_remove_hw_knode(tp, n->handle);
+ u32_remove_hw_knode(tp, n, extack);
idr_remove_ext(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts))
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
@@ -599,7 +604,8 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
}
}
-static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
+static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+ struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode __rcu **hn;
@@ -607,14 +613,14 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
WARN_ON(ht->refcnt);
- u32_clear_hnode(tp, ht);
+ u32_clear_hnode(tp, ht, extack);
hn = &tp_c->hlist;
for (phn = rtnl_dereference(*hn);
phn;
hn = &phn->next, phn = rtnl_dereference(*hn)) {
if (phn == ht) {
- u32_clear_hw_hnode(tp, ht);
+ u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
idr_remove_ext(&tp_c->handle_idr, ht->handle);
RCU_INIT_POINTER(*hn, ht->next);
@@ -637,7 +643,7 @@ static bool ht_empty(struct tc_u_hnode *ht)
return true;
}
-static void u32_destroy(struct tcf_proto *tp)
+static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
@@ -645,7 +651,7 @@ static void u32_destroy(struct tcf_proto *tp)
WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 0)
- u32_destroy_hnode(tp, root_ht);
+ u32_destroy_hnode(tp, root_ht, extack);
if (--tp_c->refcnt == 0) {
struct tc_u_hnode *ht;
@@ -656,7 +662,7 @@ static void u32_destroy(struct tcf_proto *tp)
ht;
ht = rtnl_dereference(ht->next)) {
ht->refcnt--;
- u32_clear_hnode(tp, ht);
+ u32_clear_hnode(tp, ht, extack);
}
while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
@@ -671,7 +677,8 @@ static void u32_destroy(struct tcf_proto *tp)
tp->data = NULL;
}
-static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
+static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
+ struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = arg;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
@@ -682,18 +689,21 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
goto out;
if (TC_U32_KEY(ht->handle)) {
- u32_remove_hw_knode(tp, ht->handle);
+ u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
goto out;
}
- if (root_ht == ht)
+ if (root_ht == ht) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
return -EINVAL;
+ }
if (ht->refcnt == 1) {
ht->refcnt--;
- u32_destroy_hnode(tp, ht);
+ u32_destroy_hnode(tp, ht, extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
return -EBUSY;
}
@@ -764,11 +774,12 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base, struct tc_u_hnode *ht,
struct tc_u_knode *n, struct nlattr **tb,
- struct nlattr *est, bool ovr)
+ struct nlattr *est, bool ovr,
+ struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr);
+ err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
if (err < 0)
return err;
@@ -776,14 +787,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
- if (TC_U32_KEY(handle))
+ if (TC_U32_KEY(handle)) {
+ NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
return -EINVAL;
+ }
if (handle) {
ht_down = u32_lookup_ht(ht->tp_c, handle);
- if (ht_down == NULL)
+ if (!ht_down) {
+ NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
return -EINVAL;
+ }
ht_down->refcnt++;
}
@@ -801,7 +816,7 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_U32_INDEV]) {
int ret;
- ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
+ ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
if (ret < 0)
return -EINVAL;
n->ifindex = ret;
@@ -892,7 +907,8 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr)
+ struct nlattr **tca, void **arg, bool ovr,
+ struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
@@ -906,28 +922,40 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
size_t size;
#endif
- if (opt == NULL)
- return handle ? -EINVAL : 0;
+ if (!opt) {
+ if (handle) {
+ NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
- err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL);
+ err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
if (err < 0)
return err;
if (tb[TCA_U32_FLAGS]) {
flags = nla_get_u32(tb[TCA_U32_FLAGS]);
- if (!tc_flags_valid(flags))
+ if (!tc_flags_valid(flags)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
return -EINVAL;
+ }
}
n = *arg;
if (n) {
struct tc_u_knode *new;
- if (TC_U32_KEY(n->handle) == 0)
+ if (TC_U32_KEY(n->handle) == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
return -EINVAL;
+ }
- if (n->flags != flags)
+ if (n->flags != flags) {
+ NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
return -EINVAL;
+ }
new = u32_init_knode(tp, n);
if (!new)
@@ -935,14 +963,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_set_parms(net, tp, base,
rtnl_dereference(n->ht_up), new, tb,
- tca[TCA_RATE], ovr);
+ tca[TCA_RATE], ovr, extack);
if (err) {
u32_destroy_key(tp, new, false);
return err;
}
- err = u32_replace_hw_knode(tp, new, flags);
+ err = u32_replace_hw_knode(tp, new, flags, extack);
if (err) {
u32_destroy_key(tp, new, false);
return err;
@@ -961,10 +989,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_U32_DIVISOR]) {
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
- if (--divisor > 0x100)
+ if (--divisor > 0x100) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
return -EINVAL;
- if (TC_U32_KEY(handle))
+ }
+ if (TC_U32_KEY(handle)) {
+ NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
return -EINVAL;
+ }
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
@@ -988,8 +1020,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
ht->handle = handle;
ht->prio = tp->prio;
idr_init(&ht->handle_idr);
+ ht->flags = flags;
- err = u32_replace_hw_hnode(tp, ht, flags);
+ err = u32_replace_hw_hnode(tp, ht, flags, extack);
if (err) {
idr_remove_ext(&tp_c->handle_idr, handle);
kfree(ht);
@@ -1010,20 +1043,26 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
htid = ht->handle;
} else {
ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
- if (ht == NULL)
+ if (!ht) {
+ NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
return -EINVAL;
+ }
}
} else {
ht = rtnl_dereference(tp->root);
htid = ht->handle;
}
- if (ht->divisor < TC_U32_HASH(htid))
+ if (ht->divisor < TC_U32_HASH(htid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
return -EINVAL;
+ }
if (handle) {
- if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
+ if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
return -EINVAL;
+ }
handle = htid | TC_U32_NODE(handle);
err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
handle, handle + 1,
@@ -1034,6 +1073,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
handle = gen_new_kid(ht, htid);
if (tb[TCA_U32_SEL] == NULL) {
+ NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
err = -EINVAL;
goto erridr;
}
@@ -1082,12 +1122,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
+ err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
+ extack);
if (err == 0) {
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
- err = u32_replace_hw_knode(tp, n, flags);
+ err = u32_replace_hw_knode(tp, n, flags, extack);
if (err)
goto errhw;
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index df3110d69585..07c10bac06a0 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
return 0;
- return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len);
+ return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
}
static struct tcf_ematch_ops em_nbyte_ops = {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0f1eab99ff4e..d512f49ee83c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -393,13 +393,16 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab)
+ struct nlattr *tab,
+ struct netlink_ext_ack *extack)
{
struct qdisc_rate_table *rtab;
if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
- nla_len(tab) != TC_RTAB_SIZE)
+ nla_len(tab) != TC_RTAB_SIZE) {
+ NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
return NULL;
+ }
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
@@ -418,6 +421,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
+ } else {
+ NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
}
return rtab;
}
@@ -449,7 +454,8 @@ static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
[TCA_STAB_DATA] = { .type = NLA_BINARY },
};
-static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_STAB_MAX + 1];
struct qdisc_size_table *stab;
@@ -458,23 +464,29 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
u16 *tab = NULL;
int err;
- err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
+ err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
if (err < 0)
return ERR_PTR(err);
- if (!tb[TCA_STAB_BASE])
+ if (!tb[TCA_STAB_BASE]) {
+ NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
return ERR_PTR(-EINVAL);
+ }
s = nla_data(tb[TCA_STAB_BASE]);
if (s->tsize > 0) {
- if (!tb[TCA_STAB_DATA])
+ if (!tb[TCA_STAB_DATA]) {
+ NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
return ERR_PTR(-EINVAL);
+ }
tab = nla_data(tb[TCA_STAB_DATA]);
tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
}
- if (tsize != s->tsize || (!tab && tsize > 0))
+ if (tsize != s->tsize || (!tab && tsize > 0)) {
+ NL_SET_ERR_MSG(extack, "Invalid size of size table");
return ERR_PTR(-EINVAL);
+ }
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -669,7 +681,7 @@ int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
unsigned int size = 4;
clhash->hash = qdisc_class_hash_alloc(size);
- if (clhash->hash == NULL)
+ if (!clhash->hash)
return -ENOMEM;
clhash->hashsize = size;
clhash->hashmask = size - 1;
@@ -779,6 +791,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
struct qdisc_size_table *stab;
+ u32 block_index;
__u32 qlen;
cond_resched();
@@ -795,11 +808,23 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
tcm->tcm_info = refcount_read(&q->refcnt);
if (nla_put_string(skb, TCA_KIND, q->ops->id))
goto nla_put_failure;
- if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
- goto nla_put_failure;
+ if (q->ops->ingress_block_get) {
+ block_index = q->ops->ingress_block_get(q);
+ if (block_index &&
+ nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
+ goto nla_put_failure;
+ }
+ if (q->ops->egress_block_get) {
+ block_index = q->ops->egress_block_get(q);
+ if (block_index &&
+ nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
+ goto nla_put_failure;
+ }
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto nla_put_failure;
- qlen = q->q.qlen;
+ if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+ goto nla_put_failure;
+ qlen = qdisc_qlen_sum(q);
stab = rtnl_dereference(q->stab);
if (stab && qdisc_dump_stab(skb, stab) < 0)
@@ -898,7 +923,8 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
- struct Qdisc *new, struct Qdisc *old)
+ struct Qdisc *new, struct Qdisc *old,
+ struct netlink_ext_ack *extack)
{
struct Qdisc *q = old;
struct net *net = dev_net(dev);
@@ -913,8 +939,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
(new && new->flags & TCQ_F_INGRESS)) {
num_q = 1;
ingress = 1;
- if (!dev_ingress_queue(dev))
+ if (!dev_ingress_queue(dev)) {
+ NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
return -ENOENT;
+ }
}
if (dev->flags & IFF_UP)
@@ -956,14 +984,22 @@ skip:
} else {
const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
+ /* Only support running class lockless if parent is lockless */
+ if (new && (new->flags & TCQ_F_NOLOCK) &&
+ parent && !(parent->flags & TCQ_F_NOLOCK))
+ new->flags &= ~TCQ_F_NOLOCK;
+
err = -EOPNOTSUPP;
if (cops && cops->graft) {
unsigned long cl = cops->find(parent, classid);
- if (cl)
- err = cops->graft(parent, cl, new, &old);
- else
+ if (cl) {
+ err = cops->graft(parent, cl, new, &old,
+ extack);
+ } else {
+ NL_SET_ERR_MSG(extack, "Specified class not found");
err = -ENOENT;
+ }
}
if (!err)
notify_and_destroy(net, skb, n, classid, old, new);
@@ -971,6 +1007,40 @@ skip:
return err;
}
+static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
+ struct netlink_ext_ack *extack)
+{
+ u32 block_index;
+
+ if (tca[TCA_INGRESS_BLOCK]) {
+ block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
+
+ if (!block_index) {
+ NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
+ return -EINVAL;
+ }
+ if (!sch->ops->ingress_block_set) {
+ NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
+ return -EOPNOTSUPP;
+ }
+ sch->ops->ingress_block_set(sch, block_index);
+ }
+ if (tca[TCA_EGRESS_BLOCK]) {
+ block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
+
+ if (!block_index) {
+ NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
+ return -EINVAL;
+ }
+ if (!sch->ops->egress_block_set) {
+ NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
+ return -EOPNOTSUPP;
+ }
+ sch->ops->egress_block_set(sch, block_index);
+ }
+ return 0;
+}
+
/* lockdep annotation is needed for ingress; egress gets it only for name */
static struct lock_class_key qdisc_tx_lock;
static struct lock_class_key qdisc_rx_lock;
@@ -984,7 +1054,8 @@ static struct lock_class_key qdisc_rx_lock;
static struct Qdisc *qdisc_create(struct net_device *dev,
struct netdev_queue *dev_queue,
struct Qdisc *p, u32 parent, u32 handle,
- struct nlattr **tca, int *errp)
+ struct nlattr **tca, int *errp,
+ struct netlink_ext_ack *extack)
{
int err;
struct nlattr *kind = tca[TCA_KIND];
@@ -1022,10 +1093,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
#endif
err = -ENOENT;
- if (ops == NULL)
+ if (!ops) {
+ NL_SET_ERR_MSG(extack, "Specified qdisc not found");
goto err_out;
+ }
- sch = qdisc_alloc(dev_queue, ops);
+ sch = qdisc_alloc(dev_queue, ops, extack);
if (IS_ERR(sch)) {
err = PTR_ERR(sch);
goto err_out2;
@@ -1062,60 +1135,63 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
}
- if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
- if (qdisc_is_percpu_stats(sch)) {
- sch->cpu_bstats =
- netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
- if (!sch->cpu_bstats)
- goto err_out4;
+ err = qdisc_block_indexes_set(sch, tca, extack);
+ if (err)
+ goto err_out3;
- sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
- if (!sch->cpu_qstats)
- goto err_out4;
+ if (ops->init) {
+ err = ops->init(sch, tca[TCA_OPTIONS], extack);
+ if (err != 0)
+ goto err_out5;
+ }
+
+ if (tca[TCA_STAB]) {
+ stab = qdisc_get_stab(tca[TCA_STAB], extack);
+ if (IS_ERR(stab)) {
+ err = PTR_ERR(stab);
+ goto err_out4;
}
+ rcu_assign_pointer(sch->stab, stab);
+ }
+ if (tca[TCA_RATE]) {
+ seqcount_t *running;
- if (tca[TCA_STAB]) {
- stab = qdisc_get_stab(tca[TCA_STAB]);
- if (IS_ERR(stab)) {
- err = PTR_ERR(stab);
- goto err_out4;
- }
- rcu_assign_pointer(sch->stab, stab);
+ err = -EOPNOTSUPP;
+ if (sch->flags & TCQ_F_MQROOT) {
+ NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
+ goto err_out4;
}
- if (tca[TCA_RATE]) {
- seqcount_t *running;
-
- err = -EOPNOTSUPP;
- if (sch->flags & TCQ_F_MQROOT)
- goto err_out4;
-
- if ((sch->parent != TC_H_ROOT) &&
- !(sch->flags & TCQ_F_INGRESS) &&
- (!p || !(p->flags & TCQ_F_MQROOT)))
- running = qdisc_root_sleeping_running(sch);
- else
- running = &sch->running;
-
- err = gen_new_estimator(&sch->bstats,
- sch->cpu_bstats,
- &sch->rate_est,
- NULL,
- running,
- tca[TCA_RATE]);
- if (err)
- goto err_out4;
+
+ if (sch->parent != TC_H_ROOT &&
+ !(sch->flags & TCQ_F_INGRESS) &&
+ (!p || !(p->flags & TCQ_F_MQROOT)))
+ running = qdisc_root_sleeping_running(sch);
+ else
+ running = &sch->running;
+
+ err = gen_new_estimator(&sch->bstats,
+ sch->cpu_bstats,
+ &sch->rate_est,
+ NULL,
+ running,
+ tca[TCA_RATE]);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
+ goto err_out4;
}
+ }
- qdisc_hash_add(sch, false);
+ qdisc_hash_add(sch, false);
- return sch;
- }
+ return sch;
+
+err_out5:
/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
if (ops->destroy)
ops->destroy(sch);
err_out3:
dev_put(dev);
- kfree((char *) sch - sch->padded);
+ qdisc_free(sch);
err_out2:
module_put(ops->owner);
err_out:
@@ -1123,8 +1199,6 @@ err_out:
return NULL;
err_out4:
- free_percpu(sch->cpu_bstats);
- free_percpu(sch->cpu_qstats);
/*
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
@@ -1135,21 +1209,28 @@ err_out4:
goto err_out3;
}
-static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
+static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
+ struct netlink_ext_ack *extack)
{
struct qdisc_size_table *ostab, *stab = NULL;
int err = 0;
if (tca[TCA_OPTIONS]) {
- if (sch->ops->change == NULL)
+ if (!sch->ops->change) {
+ NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
return -EINVAL;
- err = sch->ops->change(sch, tca[TCA_OPTIONS]);
+ }
+ if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+ NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+ return -EOPNOTSUPP;
+ }
+ err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
if (err)
return err;
}
if (tca[TCA_STAB]) {
- stab = qdisc_get_stab(tca[TCA_STAB]);
+ stab = qdisc_get_stab(tca[TCA_STAB], extack);
if (IS_ERR(stab))
return PTR_ERR(stab);
}
@@ -1247,8 +1328,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
- if (!p)
+ if (!p) {
+ NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
return -ENOENT;
+ }
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue(dev)) {
q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1256,26 +1339,38 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
} else {
q = dev->qdisc;
}
- if (!q)
+ if (!q) {
+ NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
return -ENOENT;
+ }
- if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
+ if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+ NL_SET_ERR_MSG(extack, "Invalid handle");
return -EINVAL;
+ }
} else {
q = qdisc_lookup(dev, tcm->tcm_handle);
- if (!q)
+ if (!q) {
+ NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
return -ENOENT;
+ }
}
- if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+ if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
+ }
if (n->nlmsg_type == RTM_DELQDISC) {
- if (!clid)
+ if (!clid) {
+ NL_SET_ERR_MSG(extack, "Classid cannot be zero");
return -EINVAL;
- if (q->handle == 0)
+ }
+ if (q->handle == 0) {
+ NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
return -ENOENT;
- err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+ }
+ err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
if (err != 0)
return err;
} else {
@@ -1321,8 +1416,10 @@ replay:
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
- if (!p)
+ if (!p) {
+ NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
return -ENOENT;
+ }
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue_create(dev)) {
q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1337,20 +1434,31 @@ replay:
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
- if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
+ if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
+ NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
return -EEXIST;
- if (TC_H_MIN(tcm->tcm_handle))
+ }
+ if (TC_H_MIN(tcm->tcm_handle)) {
+ NL_SET_ERR_MSG(extack, "Invalid minor handle");
return -EINVAL;
+ }
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
- if (n->nlmsg_flags & NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL) {
+ NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
return -EEXIST;
- if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+ }
+ if (tca[TCA_KIND] &&
+ nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
+ }
if (q == p ||
- (p && check_loop(q, p, 0)))
+ (p && check_loop(q, p, 0))) {
+ NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
return -ELOOP;
+ }
qdisc_refcount_inc(q);
goto graft;
} else {
@@ -1385,33 +1493,45 @@ replay:
}
}
} else {
- if (!tcm->tcm_handle)
+ if (!tcm->tcm_handle) {
+ NL_SET_ERR_MSG(extack, "Handle cannot be zero");
return -EINVAL;
+ }
q = qdisc_lookup(dev, tcm->tcm_handle);
}
/* Change qdisc parameters */
- if (q == NULL)
+ if (!q) {
+ NL_SET_ERR_MSG(extack, "Specified qdisc not found");
return -ENOENT;
- if (n->nlmsg_flags & NLM_F_EXCL)
+ }
+ if (n->nlmsg_flags & NLM_F_EXCL) {
+ NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
return -EEXIST;
- if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+ }
+ if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
- err = qdisc_change(q, tca);
+ }
+ err = qdisc_change(q, tca, extack);
if (err == 0)
qdisc_notify(net, skb, n, clid, NULL, q);
return err;
create_n_graft:
- if (!(n->nlmsg_flags & NLM_F_CREATE))
+ if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+ NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
return -ENOENT;
+ }
if (clid == TC_H_INGRESS) {
- if (dev_ingress_queue(dev))
+ if (dev_ingress_queue(dev)) {
q = qdisc_create(dev, dev_ingress_queue(dev), p,
tcm->tcm_parent, tcm->tcm_parent,
- tca, &err);
- else
+ tca, &err, extack);
+ } else {
+ NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
err = -ENOENT;
+ }
} else {
struct netdev_queue *dev_queue;
@@ -1424,7 +1544,7 @@ create_n_graft:
q = qdisc_create(dev, dev_queue, p,
tcm->tcm_parent, tcm->tcm_handle,
- tca, &err);
+ tca, &err, extack);
}
if (q == NULL) {
if (err == -EAGAIN)
@@ -1433,7 +1553,7 @@ create_n_graft:
}
graft:
- err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+ err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
if (err) {
if (q)
qdisc_destroy(q);
@@ -1685,7 +1805,7 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
cl = cops->find(q, portid);
if (!cl)
return;
- block = cops->tcf_block(q, cl);
+ block = cops->tcf_block(q, cl, NULL);
if (!block)
return;
list_for_each_entry(chain, &block->chain_list, list) {
@@ -1829,10 +1949,15 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
}
}
+ if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+ NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
+ return -EOPNOTSUPP;
+ }
+
new_cl = cl;
err = -EOPNOTSUPP;
if (cops->change)
- err = cops->change(q, clid, portid, tca, &new_cl);
+ err = cops->change(q, clid, portid, tca, &new_cl, extack);
if (err == 0) {
tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
/* We just create a new class, need to do reverse binding. */
@@ -1968,7 +2093,6 @@ static int psched_open(struct inode *inode, struct file *file)
}
static const struct file_operations psched_fops = {
- .owner = THIS_MODULE,
.open = psched_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2dbd249c0b2f..cd49afca9617 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -82,7 +82,8 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
}
static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old)
+ struct Qdisc *new, struct Qdisc **old,
+ struct netlink_ext_ack *extack)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
@@ -191,7 +192,8 @@ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
};
static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
@@ -281,13 +283,15 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
goto err_out;
}
- error = tcf_block_get(&flow->block, &flow->filter_list, sch);
+ error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+ extack);
if (error) {
kfree(flow);
goto err_out;
}
- flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+ flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+ extack);
if (!flow->q)
flow->q = &noop_qdisc;
pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -356,7 +360,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -531,7 +536,8 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
return p->link.q->ops->peek(p->link.q);
}
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
int err;
@@ -541,12 +547,13 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
INIT_LIST_HEAD(&p->link.list);
list_add(&p->link.list, &p->flows);
p->link.q = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, sch->handle);
+ &pfifo_qdisc_ops, sch->handle, extack);
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- err = tcf_block_get(&p->link.block, &p->link.filter_list, sch);
+ err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+ extack);
if (err)
return err;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 525eb3a6d625..f42025d53cfe 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1132,7 +1132,8 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
[TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
};
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CBQ_MAX + 1];
@@ -1143,22 +1144,27 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
- if (!opt)
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
return -EINVAL;
+ }
- err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+ err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
if (err < 0)
return err;
- if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
+ if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+ NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
return -EINVAL;
+ }
r = nla_data(tb[TCA_CBQ_RATE]);
- if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
+ q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+ if (!q->link.R_tab)
return -EINVAL;
- err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+ err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
if (err)
goto put_rtab;
@@ -1170,7 +1176,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.common.classid = sch->handle;
q->link.qdisc = sch;
q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle);
+ sch->handle, NULL);
if (!q->link.q)
q->link.q = &noop_qdisc;
else
@@ -1369,13 +1375,13 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
}
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct cbq_class *cl = (struct cbq_class *)arg;
if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, cl->common.classid);
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ cl->common.classid, extack);
if (new == NULL)
return -ENOBUFS;
}
@@ -1450,7 +1456,7 @@ static void cbq_destroy(struct Qdisc *sch)
static int
cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
- unsigned long *arg)
+ unsigned long *arg, struct netlink_ext_ack *extack)
{
int err;
struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1460,29 +1466,37 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
struct cbq_class *parent;
struct qdisc_rate_table *rtab = NULL;
- if (opt == NULL)
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
return -EINVAL;
+ }
- err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+ err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
if (err < 0)
return err;
- if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+ if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+ NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
return -EOPNOTSUPP;
+ }
if (cl) {
/* Check parent */
if (parentid) {
if (cl->tparent &&
- cl->tparent->common.classid != parentid)
+ cl->tparent->common.classid != parentid) {
+ NL_SET_ERR_MSG(extack, "Invalid parent id");
return -EINVAL;
- if (!cl->tparent && parentid != TC_H_ROOT)
+ }
+ if (!cl->tparent && parentid != TC_H_ROOT) {
+ NL_SET_ERR_MSG(extack, "Parent must be root");
return -EINVAL;
+ }
}
if (tb[TCA_CBQ_RATE]) {
rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
- tb[TCA_CBQ_RTAB]);
+ tb[TCA_CBQ_RTAB], extack);
if (rtab == NULL)
return -EINVAL;
}
@@ -1494,6 +1508,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
qdisc_put_rtab(rtab);
return err;
}
@@ -1532,19 +1547,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (parentid == TC_H_ROOT)
return -EINVAL;
- if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
- tb[TCA_CBQ_LSSOPT] == NULL)
+ if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+ NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
return -EINVAL;
+ }
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+ rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+ extack);
if (rtab == NULL)
return -EINVAL;
if (classid) {
err = -EINVAL;
if (TC_H_MAJ(classid ^ sch->handle) ||
- cbq_class_lookup(q, classid))
+ cbq_class_lookup(q, classid)) {
+ NL_SET_ERR_MSG(extack, "Specified class not found");
goto failure;
+ }
} else {
int i;
classid = TC_H_MAKE(sch->handle, 0x8000);
@@ -1556,8 +1575,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
break;
}
err = -ENOSR;
- if (i >= 0x8000)
+ if (i >= 0x8000) {
+ NL_SET_ERR_MSG(extack, "Unable to generate classid");
goto failure;
+ }
classid = classid|q->hgenerator;
}
@@ -1565,8 +1586,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (parentid) {
parent = cbq_class_lookup(q, parentid);
err = -EINVAL;
- if (parent == NULL)
+ if (!parent) {
+ NL_SET_ERR_MSG(extack, "Failed to find parentid");
goto failure;
+ }
}
err = -ENOBUFS;
@@ -1574,7 +1597,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (cl == NULL)
goto failure;
- err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+ err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
return err;
@@ -1586,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
tcf_block_put(cl->block);
kfree(cl);
goto failure;
@@ -1594,7 +1618,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->R_tab = rtab;
rtab = NULL;
- cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+ cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+ NULL);
if (!cl->q)
cl->q = &noop_qdisc;
else
@@ -1678,7 +1703,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
return 0;
}
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+ struct netlink_ext_ack *extack)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 7a72980c1509..cdd96b9a27bc 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -219,14 +219,17 @@ static void cbs_disable_offload(struct net_device *dev,
}
static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
- const struct tc_cbs_qopt *opt)
+ const struct tc_cbs_qopt *opt,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct tc_cbs_qopt_offload cbs = { };
int err;
- if (!ops->ndo_setup_tc)
+ if (!ops->ndo_setup_tc) {
+ NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
return -EOPNOTSUPP;
+ }
cbs.queue = q->queue;
@@ -237,8 +240,10 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
cbs.sendslope = opt->sendslope;
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
- if (err < 0)
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
return err;
+ }
q->enqueue = cbs_enqueue_offload;
q->dequeue = cbs_dequeue_offload;
@@ -246,7 +251,8 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
return 0;
}
-static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct cbs_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
@@ -254,12 +260,14 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
struct tc_cbs_qopt *qopt;
int err;
- err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, NULL);
+ err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, extack);
if (err < 0)
return err;
- if (!tb[TCA_CBS_PARMS])
+ if (!tb[TCA_CBS_PARMS]) {
+ NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
return -EINVAL;
+ }
qopt = nla_data(tb[TCA_CBS_PARMS]);
@@ -276,7 +284,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
cbs_disable_offload(dev, q);
} else {
- err = cbs_enable_offload(dev, q, qopt);
+ err = cbs_enable_offload(dev, q, qopt, extack);
if (err < 0)
return err;
}
@@ -291,13 +299,16 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct cbs_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- if (!opt)
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
return -EINVAL;
+ }
q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
@@ -306,7 +317,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
- return cbs_change(sch, opt);
+ return cbs_change(sch, opt, extack);
}
static void cbs_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 531250fceb9e..eafc0d17d174 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -344,7 +344,8 @@ static void choke_free(void *addr)
kvfree(addr);
}
-static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CHOKE_MAX + 1];
@@ -431,9 +432,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+static int choke_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
- return choke_change(sch, opt);
+ return choke_change(sch, opt, extack);
}
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index c518a1efcb9d..17cd81f84b5d 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -130,7 +130,8 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
};
-static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
@@ -184,7 +185,8 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int codel_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct codel_sched_data *q = qdisc_priv(sch);
@@ -196,7 +198,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
q->params.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
- int err = codel_change(sch, opt);
+ int err = codel_change(sch, opt, extack);
if (err)
return err;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 5bbcef3dcd8c..e0b0cf8a9939 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -64,7 +64,8 @@ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
};
static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl = (struct drr_class *)*arg;
@@ -73,17 +74,21 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
u32 quantum;
int err;
- if (!opt)
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
return -EINVAL;
+ }
- err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
+ err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
if (err < 0)
return err;
if (tb[TCA_DRR_QUANTUM]) {
quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
- if (quantum == 0)
+ if (quantum == 0) {
+ NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
return -EINVAL;
+ }
} else
quantum = psched_mtu(qdisc_dev(sch));
@@ -94,8 +99,10 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to replace estimator");
return err;
+ }
}
sch_tree_lock(sch);
@@ -113,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->common.classid = classid;
cl->quantum = quantum;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, classid);
+ &pfifo_qdisc_ops, classid,
+ NULL);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
else
@@ -125,6 +133,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ NL_SET_ERR_MSG(extack, "Failed to replace estimator");
qdisc_destroy(cl->qdisc);
kfree(cl);
return err;
@@ -172,12 +181,15 @@ static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
return (unsigned long)drr_find_class(sch, classid);
}
-static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct drr_sched *q = qdisc_priv(sch);
- if (cl)
+ if (cl) {
+ NL_SET_ERR_MSG(extack, "DRR classid must be zero");
return NULL;
+ }
return q->block;
}
@@ -201,13 +213,14 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
}
static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old)
+ struct Qdisc *new, struct Qdisc **old,
+ struct netlink_ext_ack *extack)
{
struct drr_class *cl = (struct drr_class *)arg;
if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, cl->common.classid);
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ cl->common.classid, NULL);
if (new == NULL)
new = &noop_qdisc;
}
@@ -408,12 +421,13 @@ out:
return NULL;
}
-static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct drr_sched *q = qdisc_priv(sch);
int err;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
err = qdisc_class_hash_init(&q->clhash);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index fb4fb71c68cf..049714c57075 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -61,7 +61,8 @@ static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
/* ------------------------- Class/flow operations ------------------------- */
static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old)
+ struct Qdisc *new, struct Qdisc **old,
+ struct netlink_ext_ack *extack)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
@@ -70,7 +71,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle);
+ sch->handle, NULL);
if (new == NULL)
new = &noop_qdisc;
}
@@ -112,7 +113,8 @@ static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
};
static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
struct nlattr *opt = tca[TCA_OPTIONS];
@@ -184,7 +186,8 @@ ignore:
}
}
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
@@ -330,7 +333,8 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
return p->q->ops->peek(p->q);
}
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
struct nlattr *tb[TCA_DSMARK_MAX + 1];
@@ -344,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
if (!opt)
goto errout;
- err = tcf_block_get(&p->block, &p->filter_list, sch);
+ err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
if (err)
return err;
@@ -377,7 +381,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
p->default_index = default_index;
p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
- p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
+ p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+ NULL);
if (p->q == NULL)
p->q = &noop_qdisc;
else
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 1e37247656f8..24893d3b5d22 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -55,7 +55,8 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_CN;
}
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
bool bypass;
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -157,7 +158,7 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
- ret = q->ops->change(q, nla);
+ ret = q->ops->change(q, nla, NULL);
kfree(nla);
}
return ret;
@@ -165,12 +166,14 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
EXPORT_SYMBOL(fifo_set_limit);
struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit)
+ unsigned int limit,
+ struct netlink_ext_ack *extack)
{
struct Qdisc *q;
int err = -ENOMEM;
- q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
+ q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
+ extack);
if (q) {
err = fifo_set_limit(q, limit);
if (err < 0) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 263d16e3219e..a366e4c9413a 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -685,7 +685,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
};
-static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
@@ -788,7 +789,8 @@ static void fq_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
}
-static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct fq_sched_data *q = qdisc_priv(sch);
int err;
@@ -811,7 +813,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
if (opt)
- err = fq_change(sch, opt);
+ err = fq_change(sch, opt, extack);
else
err = fq_resize(sch, q->fq_trees_log);
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 0305d791ea94..22fa13cf5d8b 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -377,7 +377,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
};
-static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
@@ -458,7 +459,8 @@ static void fq_codel_destroy(struct Qdisc *sch)
kvfree(q->flows);
}
-static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
int i;
@@ -477,12 +479,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
q->cparams.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
- int err = fq_codel_change(sch, opt);
+ int err = fq_codel_change(sch, opt, extack);
if (err)
return err;
}
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
@@ -595,7 +597,8 @@ static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 661c7144b53a..190570f21b20 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -26,11 +26,13 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <linux/skb_array.h>
#include <linux/if_macvlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <trace/events/qdisc.h>
+#include <net/xfrm.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -47,17 +49,115 @@ EXPORT_SYMBOL(default_qdisc_ops);
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+{
+ const struct netdev_queue *txq = q->dev_queue;
+ spinlock_t *lock = NULL;
+ struct sk_buff *skb;
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ lock = qdisc_lock(q);
+ spin_lock(lock);
+ }
+
+ skb = skb_peek(&q->skb_bad_txq);
+ if (skb) {
+ /* check the reason of requeuing without tx lock first */
+ txq = skb_get_tx_queue(txq->dev, skb);
+ if (!netif_xmit_frozen_or_stopped(txq)) {
+ skb = __skb_dequeue(&q->skb_bad_txq);
+ if (qdisc_is_percpu_stats(q)) {
+ qdisc_qstats_cpu_backlog_dec(q, skb);
+ qdisc_qstats_cpu_qlen_dec(q);
+ } else {
+ qdisc_qstats_backlog_dec(q, skb);
+ q->q.qlen--;
+ }
+ } else {
+ skb = NULL;
+ }
+ }
+
+ if (lock)
+ spin_unlock(lock);
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
+{
+ struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
+
+ if (unlikely(skb))
+ skb = __skb_dequeue_bad_txq(q);
+
+ return skb;
+}
+
+static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
+ struct sk_buff *skb)
+{
+ spinlock_t *lock = NULL;
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ lock = qdisc_lock(q);
+ spin_lock(lock);
+ }
+
+ __skb_queue_tail(&q->skb_bad_txq, skb);
+
+ if (lock)
+ spin_unlock(lock);
+}
+
+static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- q->gso_skb = skb;
- q->qstats.requeues++;
- qdisc_qstats_backlog_inc(q, skb);
- q->q.qlen++; /* it's still part of the queue */
+ while (skb) {
+ struct sk_buff *next = skb->next;
+
+ __skb_queue_tail(&q->gso_skb, skb);
+ q->qstats.requeues++;
+ qdisc_qstats_backlog_inc(q, skb);
+ q->q.qlen++; /* it's still part of the queue */
+
+ skb = next;
+ }
+ __netif_schedule(q);
+
+ return 0;
+}
+
+static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
+{
+ spinlock_t *lock = qdisc_lock(q);
+
+ spin_lock(lock);
+ while (skb) {
+ struct sk_buff *next = skb->next;
+
+ __skb_queue_tail(&q->gso_skb, skb);
+
+ qdisc_qstats_cpu_requeues_inc(q);
+ qdisc_qstats_cpu_backlog_inc(q, skb);
+ qdisc_qstats_cpu_qlen_inc(q);
+
+ skb = next;
+ }
+ spin_unlock(lock);
+
__netif_schedule(q);
return 0;
}
+static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+{
+ if (q->flags & TCQ_F_NOLOCK)
+ return dev_requeue_skb_locked(skb, q);
+ else
+ return __dev_requeue_skb(skb, q);
+}
+
static void try_bulk_dequeue_skb(struct Qdisc *q,
struct sk_buff *skb,
const struct netdev_queue *txq,
@@ -95,9 +195,15 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
if (!nskb)
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
- q->skb_bad_txq = nskb;
- qdisc_qstats_backlog_inc(q, nskb);
- q->q.qlen++;
+ qdisc_enqueue_skb_bad_txq(q, nskb);
+
+ if (qdisc_is_percpu_stats(q)) {
+ qdisc_qstats_cpu_backlog_inc(q, nskb);
+ qdisc_qstats_cpu_qlen_inc(q);
+ } else {
+ qdisc_qstats_backlog_inc(q, nskb);
+ q->q.qlen++;
+ }
break;
}
skb->next = nskb;
@@ -113,40 +219,62 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
int *packets)
{
- struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
+ struct sk_buff *skb = NULL;
*packets = 1;
- if (unlikely(skb)) {
+ if (unlikely(!skb_queue_empty(&q->gso_skb))) {
+ spinlock_t *lock = NULL;
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ lock = qdisc_lock(q);
+ spin_lock(lock);
+ }
+
+ skb = skb_peek(&q->gso_skb);
+
+ /* skb may be null if another cpu pulls gso_skb off in between
+ * empty check and lock.
+ */
+ if (!skb) {
+ if (lock)
+ spin_unlock(lock);
+ goto validate;
+ }
+
/* skb in gso_skb were already validated */
*validate = false;
+ if (xfrm_offload(skb))
+ *validate = true;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
- q->gso_skb = NULL;
- qdisc_qstats_backlog_dec(q, skb);
- q->q.qlen--;
- } else
+ skb = __skb_dequeue(&q->gso_skb);
+ if (qdisc_is_percpu_stats(q)) {
+ qdisc_qstats_cpu_backlog_dec(q, skb);
+ qdisc_qstats_cpu_qlen_dec(q);
+ } else {
+ qdisc_qstats_backlog_dec(q, skb);
+ q->q.qlen--;
+ }
+ } else {
skb = NULL;
- goto trace;
- }
- *validate = true;
- skb = q->skb_bad_txq;
- if (unlikely(skb)) {
- /* check the reason of requeuing without tx lock first */
- txq = skb_get_tx_queue(txq->dev, skb);
- if (!netif_xmit_frozen_or_stopped(txq)) {
- q->skb_bad_txq = NULL;
- qdisc_qstats_backlog_dec(q, skb);
- q->q.qlen--;
- goto bulk;
}
- skb = NULL;
+ if (lock)
+ spin_unlock(lock);
goto trace;
}
- if (!(q->flags & TCQ_F_ONETXQUEUE) ||
- !netif_xmit_frozen_or_stopped(txq))
- skb = q->dequeue(q);
+validate:
+ *validate = true;
+
+ if ((q->flags & TCQ_F_ONETXQUEUE) &&
+ netif_xmit_frozen_or_stopped(txq))
+ return skb;
+
+ skb = qdisc_dequeue_skb_bad_txq(q);
+ if (unlikely(skb))
+ goto bulk;
+ skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))
@@ -165,21 +293,33 @@ trace:
* only one CPU can execute this function.
*
* Returns to the caller:
- * 0 - queue is empty or throttled.
- * >0 - queue is not empty.
+ * false - hardware queue frozen backoff
+ * true - feel free to send more pkts
*/
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock, bool validate)
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
+ bool again = false;
/* And release qdisc */
- spin_unlock(root_lock);
+ if (root_lock)
+ spin_unlock(root_lock);
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
- skb = validate_xmit_skb_list(skb, dev);
+ skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (unlikely(again)) {
+ if (root_lock)
+ spin_lock(root_lock);
+
+ dev_requeue_skb(skb, q);
+ return false;
+ }
+#endif
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
@@ -188,27 +328,28 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_UNLOCK(dev, txq);
} else {
- spin_lock(root_lock);
- return qdisc_qlen(q);
+ if (root_lock)
+ spin_lock(root_lock);
+ return true;
}
- spin_lock(root_lock);
- if (dev_xmit_complete(ret)) {
- /* Driver sent out skb successfully or skb was consumed */
- ret = qdisc_qlen(q);
- } else {
+ if (root_lock)
+ spin_lock(root_lock);
+
+ if (!dev_xmit_complete(ret)) {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
net_warn_ratelimited("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
- ret = dev_requeue_skb(skb, q);
+ dev_requeue_skb(skb, q);
+ return false;
}
if (ret && netif_xmit_frozen_or_stopped(txq))
- ret = 0;
+ return false;
- return ret;
+ return true;
}
/*
@@ -230,20 +371,22 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
* >0 - queue is not empty.
*
*/
-static inline int qdisc_restart(struct Qdisc *q, int *packets)
+static inline bool qdisc_restart(struct Qdisc *q, int *packets)
{
+ spinlock_t *root_lock = NULL;
struct netdev_queue *txq;
struct net_device *dev;
- spinlock_t *root_lock;
struct sk_buff *skb;
bool validate;
/* Dequeue packet */
skb = dequeue_skb(q, &validate, packets);
if (unlikely(!skb))
- return 0;
+ return false;
+
+ if (!(q->flags & TCQ_F_NOLOCK))
+ root_lock = qdisc_lock(q);
- root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
@@ -267,8 +410,6 @@ void __qdisc_run(struct Qdisc *q)
break;
}
}
-
- qdisc_run_end(q);
}
unsigned long dev_trans_start(struct net_device *dev)
@@ -369,7 +510,7 @@ void netif_carrier_on(struct net_device *dev)
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
- atomic_inc(&dev->carrier_changes);
+ atomic_inc(&dev->carrier_up_count);
linkwatch_fire_event(dev);
if (netif_running(dev))
__netdev_watchdog_up(dev);
@@ -388,7 +529,7 @@ void netif_carrier_off(struct net_device *dev)
if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
- atomic_inc(&dev->carrier_changes);
+ atomic_inc(&dev->carrier_down_count);
linkwatch_fire_event(dev);
}
}
@@ -437,7 +578,8 @@ struct Qdisc noop_qdisc = {
};
EXPORT_SYMBOL(noop_qdisc);
-static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
/* register_qdisc() assigns a default of noop_enqueue if unset,
* but __dev_queue_xmit() treats noqueue only as such
@@ -468,93 +610,99 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
/*
* Private data for a pfifo_fast scheduler containing:
- * - queues for the three band
- * - bitmap indicating which of the bands contain skbs
+ * - rings for priority bands
*/
struct pfifo_fast_priv {
- u32 bitmap;
- struct qdisc_skb_head q[PFIFO_FAST_BANDS];
+ struct skb_array q[PFIFO_FAST_BANDS];
};
-/*
- * Convert a bitmap to the first band number where an skb is queued, where:
- * bitmap=0 means there are no skbs on any band.
- * bitmap=1 means there is an skb on band 0.
- * bitmap=7 means there are skbs on all 3 bands, etc.
- */
-static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
-
-static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
- int band)
+static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
+ int band)
{
- return priv->q + band;
+ return &priv->q[band];
}
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
- if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
- int band = prio2band[skb->priority & TC_PRIO_MAX];
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- struct qdisc_skb_head *list = band2list(priv, band);
-
- priv->bitmap |= (1 << band);
- qdisc->q.qlen++;
- return __qdisc_enqueue_tail(skb, qdisc, list);
- }
+ int band = prio2band[skb->priority & TC_PRIO_MAX];
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ struct skb_array *q = band2list(priv, band);
+ int err;
- return qdisc_drop(skb, qdisc, to_free);
+ err = skb_array_produce(q, skb);
+
+ if (unlikely(err))
+ return qdisc_drop_cpu(skb, qdisc, to_free);
+
+ qdisc_qstats_cpu_qlen_inc(qdisc);
+ qdisc_qstats_cpu_backlog_inc(qdisc, skb);
+ return NET_XMIT_SUCCESS;
}
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- int band = bitmap2band[priv->bitmap];
+ struct sk_buff *skb = NULL;
+ int band;
- if (likely(band >= 0)) {
- struct qdisc_skb_head *qh = band2list(priv, band);
- struct sk_buff *skb = __qdisc_dequeue_head(qh);
+ for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+ struct skb_array *q = band2list(priv, band);
- if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(qdisc, skb);
- qdisc_bstats_update(qdisc, skb);
- }
+ if (__skb_array_empty(q))
+ continue;
- qdisc->q.qlen--;
- if (qh->qlen == 0)
- priv->bitmap &= ~(1 << band);
-
- return skb;
+ skb = skb_array_consume_bh(q);
+ }
+ if (likely(skb)) {
+ qdisc_qstats_cpu_backlog_dec(qdisc, skb);
+ qdisc_bstats_cpu_update(qdisc, skb);
+ qdisc_qstats_cpu_qlen_dec(qdisc);
}
- return NULL;
+ return skb;
}
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- int band = bitmap2band[priv->bitmap];
+ struct sk_buff *skb = NULL;
+ int band;
- if (band >= 0) {
- struct qdisc_skb_head *qh = band2list(priv, band);
+ for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+ struct skb_array *q = band2list(priv, band);
- return qh->head;
+ skb = __skb_array_peek(q);
}
- return NULL;
+ return skb;
}
static void pfifo_fast_reset(struct Qdisc *qdisc)
{
- int prio;
+ int i, band;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- __qdisc_reset_queue(band2list(priv, prio));
+ for (band = 0; band < PFIFO_FAST_BANDS; band++) {
+ struct skb_array *q = band2list(priv, band);
+ struct sk_buff *skb;
- priv->bitmap = 0;
- qdisc->qstats.backlog = 0;
- qdisc->q.qlen = 0;
+ /* NULL ring is possible if destroy path is due to a failed
+ * skb_array_init() in pfifo_fast_init() case.
+ */
+ if (!q->ring.queue)
+ continue;
+
+ while ((skb = skb_array_consume_bh(q)) != NULL)
+ kfree_skb(skb);
+ }
+
+ for_each_possible_cpu(i) {
+ struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+
+ q->backlog = 0;
+ q->qlen = 0;
+ }
}
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
@@ -570,19 +718,68 @@ nla_put_failure:
return -1;
}
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
- int prio;
+ unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ int prio;
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- qdisc_skb_head_init(band2list(priv, prio));
+ /* guard against zero length rings */
+ if (!qlen)
+ return -EINVAL;
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+ struct skb_array *q = band2list(priv, prio);
+ int err;
+
+ err = skb_array_init(q, qlen, GFP_KERNEL);
+ if (err)
+ return -ENOMEM;
+ }
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
+static void pfifo_fast_destroy(struct Qdisc *sch)
+{
+ struct pfifo_fast_priv *priv = qdisc_priv(sch);
+ int prio;
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+ struct skb_array *q = band2list(priv, prio);
+
+ /* NULL ring is possible if destroy path is due to a failed
+ * skb_array_init() in pfifo_fast_init() case.
+ */
+ if (!q->ring.queue)
+ continue;
+ /* Destroy ring but no need to kfree_skb because a call to
+ * pfifo_fast_reset() has already done that work.
+ */
+ ptr_ring_cleanup(&q->ring, NULL);
+ }
+}
+
+static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
+ unsigned int new_len)
+{
+ struct pfifo_fast_priv *priv = qdisc_priv(sch);
+ struct skb_array *bands[PFIFO_FAST_BANDS];
+ int prio;
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+ struct skb_array *q = band2list(priv, prio);
+
+ bands[prio] = q;
+ }
+
+ return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
+ GFP_KERNEL);
+}
+
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = sizeof(struct pfifo_fast_priv),
@@ -590,9 +787,12 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dequeue = pfifo_fast_dequeue,
.peek = pfifo_fast_peek,
.init = pfifo_fast_init,
+ .destroy = pfifo_fast_destroy,
.reset = pfifo_fast_reset,
.dump = pfifo_fast_dump,
+ .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
.owner = THIS_MODULE,
+ .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
};
EXPORT_SYMBOL(pfifo_fast_ops);
@@ -600,7 +800,8 @@ static struct lock_class_key qdisc_tx_busylock;
static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- const struct Qdisc_ops *ops)
+ const struct Qdisc_ops *ops,
+ struct netlink_ext_ack *extack)
{
void *p;
struct Qdisc *sch;
@@ -609,6 +810,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct net_device *dev;
if (!dev_queue) {
+ NL_SET_ERR_MSG(extack, "No device queue given");
err = -EINVAL;
goto errout;
}
@@ -630,9 +832,24 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
}
+ __skb_queue_head_init(&sch->gso_skb);
+ __skb_queue_head_init(&sch->skb_bad_txq);
qdisc_skb_head_init(&sch->q);
spin_lock_init(&sch->q.lock);
+ if (ops->static_flags & TCQ_F_CPUSTATS) {
+ sch->cpu_bstats =
+ netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ if (!sch->cpu_bstats)
+ goto errout1;
+
+ sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!sch->cpu_qstats) {
+ free_percpu(sch->cpu_bstats);
+ goto errout1;
+ }
+ }
+
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -642,6 +859,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
+ sch->flags = ops->static_flags;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
@@ -649,27 +867,32 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
refcount_set(&sch->refcnt, 1);
return sch;
+errout1:
+ kfree(p);
errout:
return ERR_PTR(err);
}
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
- unsigned int parentid)
+ unsigned int parentid,
+ struct netlink_ext_ack *extack)
{
struct Qdisc *sch;
- if (!try_module_get(ops->owner))
+ if (!try_module_get(ops->owner)) {
+ NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
return NULL;
+ }
- sch = qdisc_alloc(dev_queue, ops);
+ sch = qdisc_alloc(dev_queue, ops, extack);
if (IS_ERR(sch)) {
module_put(ops->owner);
return NULL;
}
sch->parent = parentid;
- if (!ops->init || ops->init(sch, NULL) == 0)
+ if (!ops->init || ops->init(sch, NULL, extack) == 0)
return sch;
qdisc_destroy(sch);
@@ -682,23 +905,27 @@ EXPORT_SYMBOL(qdisc_create_dflt);
void qdisc_reset(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
+ struct sk_buff *skb, *tmp;
if (ops->reset)
ops->reset(qdisc);
- kfree_skb(qdisc->skb_bad_txq);
- qdisc->skb_bad_txq = NULL;
+ skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+ __skb_unlink(skb, &qdisc->gso_skb);
+ kfree_skb_list(skb);
+ }
- if (qdisc->gso_skb) {
- kfree_skb_list(qdisc->gso_skb);
- qdisc->gso_skb = NULL;
+ skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+ __skb_unlink(skb, &qdisc->skb_bad_txq);
+ kfree_skb_list(skb);
}
+
qdisc->q.qlen = 0;
qdisc->qstats.backlog = 0;
}
EXPORT_SYMBOL(qdisc_reset);
-static void qdisc_free(struct Qdisc *qdisc)
+void qdisc_free(struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc)) {
free_percpu(qdisc->cpu_bstats);
@@ -711,6 +938,7 @@ static void qdisc_free(struct Qdisc *qdisc)
void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
+ struct sk_buff *skb, *tmp;
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_test(&qdisc->refcnt))
@@ -730,8 +958,16 @@ void qdisc_destroy(struct Qdisc *qdisc)
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
- kfree_skb_list(qdisc->gso_skb);
- kfree_skb(qdisc->skb_bad_txq);
+ skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+ __skb_unlink(skb, &qdisc->gso_skb);
+ kfree_skb_list(skb);
+ }
+
+ skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+ __skb_unlink(skb, &qdisc->skb_bad_txq);
+ kfree_skb_list(skb);
+ }
+
qdisc_free(qdisc);
}
EXPORT_SYMBOL(qdisc_destroy);
@@ -746,10 +982,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock);
- /* Prune old scheduler */
- if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
- qdisc_reset(oqdisc);
-
/* ... and graft new one */
if (qdisc == NULL)
qdisc = &noop_qdisc;
@@ -772,7 +1004,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
if (dev->priv_flags & IFF_NO_QUEUE)
ops = &noqueue_qdisc_ops;
- qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+ qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
if (!qdisc) {
netdev_info(dev, "activation failed\n");
return;
@@ -795,7 +1027,7 @@ static void attach_default_qdiscs(struct net_device *dev)
dev->qdisc = txq->qdisc_sleeping;
qdisc_refcount_inc(dev->qdisc);
} else {
- qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
+ qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
dev->qdisc = qdisc;
qdisc->ops->attach(qdisc);
@@ -885,14 +1117,18 @@ static bool some_qdisc_is_busy(struct net_device *dev)
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc_sleeping;
- root_lock = qdisc_lock(q);
- spin_lock_bh(root_lock);
+ if (q->flags & TCQ_F_NOLOCK) {
+ val = test_bit(__QDISC_STATE_SCHED, &q->state);
+ } else {
+ root_lock = qdisc_lock(q);
+ spin_lock_bh(root_lock);
- val = (qdisc_is_running(q) ||
- test_bit(__QDISC_STATE_SCHED, &q->state));
+ val = (qdisc_is_running(q) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
- spin_unlock_bh(root_lock);
+ spin_unlock_bh(root_lock);
+ }
if (val)
return true;
@@ -900,6 +1136,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
return false;
}
+static void dev_qdisc_reset(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *none)
+{
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+ if (qdisc)
+ qdisc_reset(qdisc);
+}
+
/**
* dev_deactivate_many - deactivate transmissions on several devices
* @head: list of devices to deactivate
@@ -910,7 +1156,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
void dev_deactivate_many(struct list_head *head)
{
struct net_device *dev;
- bool sync_needed = false;
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -920,20 +1165,25 @@ void dev_deactivate_many(struct list_head *head)
&noop_qdisc);
dev_watchdog_down(dev);
- sync_needed |= !dev->dismantle;
}
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
- if (sync_needed)
- synchronize_net();
+ synchronize_net();
/* Wait for outstanding qdisc_run calls. */
- list_for_each_entry(dev, head, close_list)
+ list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
yield();
+ /* The new qdisc is assigned at this point so we can safely
+ * unwind stale skb lists and qdisc statistics
+ */
+ netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
+ if (dev_ingress_queue(dev))
+ dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
+ }
}
void dev_deactivate(struct net_device *dev)
@@ -946,6 +1196,39 @@ void dev_deactivate(struct net_device *dev)
}
EXPORT_SYMBOL(dev_deactivate);
+static int qdisc_change_tx_queue_len(struct net_device *dev,
+ struct netdev_queue *dev_queue)
+{
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ const struct Qdisc_ops *ops = qdisc->ops;
+
+ if (ops->change_tx_queue_len)
+ return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
+ return 0;
+}
+
+int dev_qdisc_change_tx_queue_len(struct net_device *dev)
+{
+ bool up = dev->flags & IFF_UP;
+ unsigned int i;
+ int ret = 0;
+
+ if (up)
+ dev_deactivate(dev);
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
+
+ /* TODO: revert changes on a partial failure */
+ if (ret)
+ break;
+ }
+
+ if (up)
+ dev_activate(dev);
+ return ret;
+}
+
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc)
@@ -954,6 +1237,8 @@ static void dev_init_scheduler_queue(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
+ __skb_queue_head_init(&qdisc->gso_skb);
+ __skb_queue_head_init(&qdisc->skb_bad_txq);
}
void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index bc30f9186ac6..cbe4831f46f4 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -306,12 +306,13 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
struct tc_gred_sopt *sopt;
int i;
- if (dps == NULL)
+ if (!dps)
return -EINVAL;
sopt = nla_data(dps);
- if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+ if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
+ sopt->def_DP >= sopt->DPs)
return -EINVAL;
sch_tree_lock(sch);
@@ -391,7 +392,8 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_LIMIT] = { .type = NLA_U32 },
};
-static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+static int gred_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt *ctl;
@@ -465,12 +467,13 @@ errout:
return err;
}
-static int gred_init(struct Qdisc *sch, struct nlattr *opt)
+static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_GRED_MAX + 1];
int err;
- if (opt == NULL)
+ if (!opt)
return -EINVAL;
err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d04068a97d81..3ae9877ea205 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -921,7 +921,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
static int
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1033,7 +1034,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
- err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+ err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
return err;
@@ -1061,8 +1062,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cl_common.classid = classid;
cl->sched = q;
cl->cl_parent = parent;
- cl->qdisc = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, classid);
+ cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ classid, NULL);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
else
@@ -1176,7 +1177,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
static int
hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1184,7 +1185,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return -EINVAL;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- cl->cl_common.classid);
+ cl->cl_common.classid, NULL);
if (new == NULL)
new = &noop_qdisc;
}
@@ -1246,7 +1247,8 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
cl->filter_cnt--;
}
-static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
+ struct netlink_ext_ack *extack)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1388,7 +1390,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
}
static int
-hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
@@ -1396,7 +1399,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
- if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+ if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
@@ -1406,14 +1409,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
return err;
q->eligible = RB_ROOT;
- err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
+ err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
if (err)
return err;
q->root.cl_common.classid = sch->handle;
q->root.sched = q;
q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle);
+ sch->handle, NULL);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
else
@@ -1429,7 +1432,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
}
static int
-hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 73a53c08091b..bce2632212d3 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -504,7 +504,8 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
[TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
};
-static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
@@ -571,7 +572,8 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct hhf_sched_data *q = qdisc_priv(sch);
int i;
@@ -589,7 +591,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
q->hhf_non_hh_weight = 2;
if (opt) {
- int err = hhf_change(sch, opt);
+ int err = hhf_change(sch, opt, extack);
if (err)
return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index fa0380730ff0..1ea9846cc6ce 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1017,7 +1017,8 @@ static void htb_work_func(struct work_struct *work)
rcu_read_unlock();
}
-static int htb_init(struct Qdisc *sch, struct nlattr *opt)
+static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct htb_sched *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HTB_MAX + 1];
@@ -1031,7 +1032,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
if (!opt)
return -EINVAL;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
@@ -1171,7 +1172,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
}
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct htb_class *cl = (struct htb_class *)arg;
@@ -1179,7 +1180,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return -EINVAL;
if (new == NULL &&
(new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- cl->common.classid)) == NULL)
+ cl->common.classid, extack)) == NULL)
return -ENOBUFS;
*old = qdisc_replace(sch, new, &cl->un.leaf.q);
@@ -1289,7 +1290,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
if (!cl->level && htb_parent_last_child(cl)) {
new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- cl->parent->common.classid);
+ cl->parent->common.classid,
+ NULL);
last_child = 1;
}
@@ -1326,7 +1328,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
static int htb_change_class(struct Qdisc *sch, u32 classid,
u32 parentid, struct nlattr **tca,
- unsigned long *arg)
+ unsigned long *arg, struct netlink_ext_ack *extack)
{
int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch);
@@ -1356,10 +1358,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* Keeping backward compatible with rate_table based iproute2 tc */
if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
- qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+ qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
+ NULL));
if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
- qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
+ qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
+ NULL));
if (!cl) { /* new class */
struct Qdisc *new_q;
@@ -1394,7 +1398,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl)
goto failure;
- err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+ err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
goto failure;
@@ -1423,8 +1427,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
* so that can't be used inside of sch_tree_lock
* -- thanks to Karlis Peisenieks
*/
- new_q = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, classid);
+ new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ classid, NULL);
sch_tree_lock(sch);
if (parent && !parent->level) {
unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1524,7 +1528,8 @@ failure:
return err;
}
-static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
+ struct netlink_ext_ack *extack)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index fc1286f499c1..ce3f55259d0d 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -48,7 +48,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
}
-static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct ingress_sched_data *q = qdisc_priv(sch);
@@ -60,13 +61,27 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
struct mini_Qdisc_pair *miniqp = priv;
mini_qdisc_pair_swap(miniqp, tp_head);
+};
+
+static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+ struct ingress_sched_data *q = qdisc_priv(sch);
+
+ q->block_info.block_index = block_index;
+}
+
+static u32 ingress_ingress_block_get(struct Qdisc *sch)
+{
+ struct ingress_sched_data *q = qdisc_priv(sch);
+
+ return q->block_info.block_index;
}
-static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- int err;
net_inc_ingress_queue();
@@ -76,13 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
q->block_info.chain_head_change = clsact_chain_head_change;
q->block_info.chain_head_change_priv = &q->miniqp;
- err = tcf_block_get_ext(&q->block, sch, &q->block_info);
- if (err)
- return err;
-
- sch->flags |= TCQ_F_CPUSTATS;
-
- return 0;
+ return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
}
static void ingress_destroy(struct Qdisc *sch)
@@ -118,13 +127,16 @@ static const struct Qdisc_class_ops ingress_class_ops = {
};
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
- .cl_ops = &ingress_class_ops,
- .id = "ingress",
- .priv_size = sizeof(struct ingress_sched_data),
- .init = ingress_init,
- .destroy = ingress_destroy,
- .dump = ingress_dump,
- .owner = THIS_MODULE,
+ .cl_ops = &ingress_class_ops,
+ .id = "ingress",
+ .priv_size = sizeof(struct ingress_sched_data),
+ .static_flags = TCQ_F_CPUSTATS,
+ .init = ingress_init,
+ .destroy = ingress_destroy,
+ .dump = ingress_dump,
+ .ingress_block_set = ingress_ingress_block_set,
+ .ingress_block_get = ingress_ingress_block_get,
+ .owner = THIS_MODULE,
};
struct clsact_sched_data {
@@ -153,7 +165,8 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
return clsact_find(sch, classid);
}
-static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct clsact_sched_data *q = qdisc_priv(sch);
@@ -167,7 +180,36 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
}
}
-static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
+static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+
+ q->ingress_block_info.block_index = block_index;
+}
+
+static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
+{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+
+ q->egress_block_info.block_index = block_index;
+}
+
+static u32 clsact_ingress_block_get(struct Qdisc *sch)
+{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+
+ return q->ingress_block_info.block_index;
+}
+
+static u32 clsact_egress_block_get(struct Qdisc *sch)
+{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+
+ return q->egress_block_info.block_index;
+}
+
+static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct clsact_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
@@ -182,7 +224,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
q->ingress_block_info.chain_head_change = clsact_chain_head_change;
q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
- err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info);
+ err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
+ extack);
if (err)
return err;
@@ -192,13 +235,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
q->egress_block_info.chain_head_change = clsact_chain_head_change;
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
- err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
- if (err)
- return err;
-
- sch->flags |= TCQ_F_CPUSTATS;
-
- return 0;
+ return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
}
static void clsact_destroy(struct Qdisc *sch)
@@ -222,13 +259,18 @@ static const struct Qdisc_class_ops clsact_class_ops = {
};
static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
- .cl_ops = &clsact_class_ops,
- .id = "clsact",
- .priv_size = sizeof(struct clsact_sched_data),
- .init = clsact_init,
- .destroy = clsact_destroy,
- .dump = ingress_dump,
- .owner = THIS_MODULE,
+ .cl_ops = &clsact_class_ops,
+ .id = "clsact",
+ .priv_size = sizeof(struct clsact_sched_data),
+ .static_flags = TCQ_F_CPUSTATS,
+ .init = clsact_init,
+ .destroy = clsact_destroy,
+ .dump = ingress_dump,
+ .ingress_block_set = clsact_ingress_block_set,
+ .egress_block_set = clsact_egress_block_set,
+ .ingress_block_get = clsact_ingress_block_get,
+ .egress_block_get = clsact_egress_block_get,
+ .owner = THIS_MODULE,
};
static int __init ingress_module_init(void)
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 213b586a06a0..f062a18e9162 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -17,6 +17,7 @@
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
struct mq_sched {
struct Qdisc **qdiscs;
@@ -35,7 +36,8 @@ static void mq_destroy(struct Qdisc *sch)
kfree(priv->qdiscs);
}
-static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+static int mq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct mq_sched *priv = qdisc_priv(sch);
@@ -59,7 +61,8 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
dev_queue = netdev_get_tx_queue(dev, ntx);
qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
TC_H_MAKE(TC_H_MAJ(sch->handle),
- TC_H_MIN(ntx + 1)));
+ TC_H_MIN(ntx + 1)),
+ extack);
if (!qdisc)
return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
@@ -97,23 +100,42 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *qdisc;
unsigned int ntx;
+ __u32 qlen = 0;
sch->q.qlen = 0;
memset(&sch->bstats, 0, sizeof(sch->bstats));
memset(&sch->qstats, 0, sizeof(sch->qstats));
+ /* MQ supports lockless qdiscs. However, statistics accounting needs
+ * to account for all, none, or a mix of locked and unlocked child
+ * qdiscs. Percpu stats are added to counters in-band and locking
+ * qdisc totals are added at end.
+ */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));
- sch->q.qlen += qdisc->q.qlen;
- sch->bstats.bytes += qdisc->bstats.bytes;
- sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.backlog += qdisc->qstats.backlog;
- sch->qstats.drops += qdisc->qstats.drops;
- sch->qstats.requeues += qdisc->qstats.requeues;
- sch->qstats.overlimits += qdisc->qstats.overlimits;
+
+ if (qdisc_is_percpu_stats(qdisc)) {
+ qlen = qdisc_qlen_sum(qdisc);
+ __gnet_stats_copy_basic(NULL, &sch->bstats,
+ qdisc->cpu_bstats,
+ &qdisc->bstats);
+ __gnet_stats_copy_queue(&sch->qstats,
+ qdisc->cpu_qstats,
+ &qdisc->qstats, qlen);
+ } else {
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ }
+
spin_unlock_bh(qdisc_lock(qdisc));
}
+
return 0;
}
@@ -134,7 +156,7 @@ static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
}
static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
struct net_device *dev = qdisc_dev(sch);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b85885a9d8a1..0e9d761cdd80 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
return 0;
}
-static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
@@ -229,7 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
qdisc = qdisc_create_dflt(dev_queue,
get_default_qdisc_ops(dev, i),
TC_H_MAKE(TC_H_MAJ(sch->handle),
- TC_H_MIN(i + 1)));
+ TC_H_MIN(i + 1)), extack);
if (!qdisc)
return -ENOMEM;
@@ -319,7 +320,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
}
static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
@@ -388,22 +389,40 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
struct tc_mqprio_qopt opt = { 0 };
struct Qdisc *qdisc;
- unsigned int i;
+ unsigned int ntx, tc;
sch->q.qlen = 0;
memset(&sch->bstats, 0, sizeof(sch->bstats));
memset(&sch->qstats, 0, sizeof(sch->qstats));
- for (i = 0; i < dev->num_tx_queues; i++) {
- qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ /* MQ supports lockless qdiscs. However, statistics accounting needs
+ * to account for all, none, or a mix of locked and unlocked child
+ * qdiscs. Percpu stats are added to counters in-band and locking
+ * qdisc totals are added at end.
+ */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));
- sch->q.qlen += qdisc->q.qlen;
- sch->bstats.bytes += qdisc->bstats.bytes;
- sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.backlog += qdisc->qstats.backlog;
- sch->qstats.drops += qdisc->qstats.drops;
- sch->qstats.requeues += qdisc->qstats.requeues;
- sch->qstats.overlimits += qdisc->qstats.overlimits;
+
+ if (qdisc_is_percpu_stats(qdisc)) {
+ __u32 qlen = qdisc_qlen_sum(qdisc);
+
+ __gnet_stats_copy_basic(NULL, &sch->bstats,
+ qdisc->cpu_bstats,
+ &qdisc->bstats);
+ __gnet_stats_copy_queue(&sch->qstats,
+ qdisc->cpu_qstats,
+ &qdisc->qstats, qlen);
+ } else {
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ }
+
spin_unlock_bh(qdisc_lock(qdisc));
}
@@ -411,9 +430,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
opt.hw = priv->hw_offload;
- for (i = 0; i < netdev_get_num_tc(dev); i++) {
- opt.count[i] = dev->tc_to_txq[i].count;
- opt.offset[i] = dev->tc_to_txq[i].offset;
+ for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
+ opt.count[tc] = dev->tc_to_txq[tc].count;
+ opt.offset[tc] = dev->tc_to_txq[tc].offset;
}
if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
@@ -495,7 +514,6 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (cl >= TC_H_MIN_PRIORITY) {
int i;
__u32 qlen = 0;
- struct Qdisc *qdisc;
struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_packed bstats = {0};
struct net_device *dev = qdisc_dev(sch);
@@ -511,18 +529,26 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
+ struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+ struct gnet_stats_queue __percpu *cpu_qstats = NULL;
- qdisc = rtnl_dereference(q->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
- qlen += qdisc->q.qlen;
- bstats.bytes += qdisc->bstats.bytes;
- bstats.packets += qdisc->bstats.packets;
- qstats.backlog += qdisc->qstats.backlog;
- qstats.drops += qdisc->qstats.drops;
- qstats.requeues += qdisc->qstats.requeues;
- qstats.overlimits += qdisc->qstats.overlimits;
+ if (qdisc_is_percpu_stats(qdisc)) {
+ cpu_bstats = qdisc->cpu_bstats;
+ cpu_qstats = qdisc->cpu_qstats;
+ }
+
+ qlen = qdisc_qlen_sum(qdisc);
+ __gnet_stats_copy_basic(NULL, &sch->bstats,
+ cpu_bstats, &qdisc->bstats);
+ __gnet_stats_copy_queue(&sch->qstats,
+ cpu_qstats,
+ &qdisc->qstats,
+ qlen);
spin_unlock_bh(qdisc_lock(qdisc));
}
+
/* Reclaim root sleeping lock before completing stats */
if (d->lock)
spin_lock_bh(d->lock);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 012216386c0b..1da7ea8de0ad 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -180,7 +180,8 @@ multiq_destroy(struct Qdisc *sch)
kfree(q->queues);
}
-static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
struct tc_multiq_qopt *qopt;
@@ -215,7 +216,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle,
- i + 1));
+ i + 1), extack);
if (child) {
sch_tree_lock(sch);
old = q->queues[i];
@@ -236,17 +237,18 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
int i, err;
q->queues = NULL;
- if (opt == NULL)
+ if (!opt)
return -EINVAL;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
@@ -258,7 +260,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- return multiq_tune(sch, opt);
+ return multiq_tune(sch, opt, extack);
}
static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -281,7 +283,7 @@ nla_put_failure:
}
static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
@@ -369,7 +371,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct multiq_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index dd70924cbcdf..7bbc13b8ca47 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -893,7 +893,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
}
/* Parse netlink message to set options */
-static int netem_change(struct Qdisc *sch, struct nlattr *opt)
+static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_NETEM_MAX + 1];
@@ -984,7 +985,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
return ret;
}
-static int netem_init(struct Qdisc *sch, struct nlattr *opt)
+static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct netem_sched_data *q = qdisc_priv(sch);
int ret;
@@ -995,7 +997,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
return -EINVAL;
q->loss_model = CLG_RANDOM;
- ret = netem_change(sch, opt);
+ ret = netem_change(sch, opt, extack);
if (ret)
pr_info("netem: change failed\n");
return ret;
@@ -1157,7 +1159,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
}
static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netem_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 776c694c77c7..18d30bb86881 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -181,7 +181,8 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
};
-static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
@@ -439,7 +440,8 @@ static void pie_timer(struct timer_list *t)
}
-static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct pie_sched_data *q = qdisc_priv(sch);
@@ -451,7 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
timer_setup(&q->adapt_timer, pie_timer, 0);
if (opt) {
- int err = pie_change(sch, opt);
+ int err = pie_change(sch, opt, extack);
if (err)
return err;
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 1c6cbab3e7b9..5619d2eb17b6 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -123,7 +123,8 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
return qdisc_dequeue_head(sch);
}
-static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+static int plug_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct plug_sched_data *q = qdisc_priv(sch);
@@ -158,7 +159,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
* command is received (just act as a pass-thru queue).
* TCQ_PLUG_LIMIT: Increase/decrease queue size
*/
-static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+static int plug_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct plug_sched_data *q = qdisc_priv(sch);
struct tc_plug_qopt *msg;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2c79559a0d31..efbf51f35778 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -142,6 +142,31 @@ prio_reset(struct Qdisc *sch)
sch->q.qlen = 0;
}
+static int prio_offload(struct Qdisc *sch, bool enable)
+{
+ struct prio_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_prio_qopt_offload opt = {
+ .handle = sch->handle,
+ .parent = sch->parent,
+ };
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return -EOPNOTSUPP;
+
+ if (enable) {
+ opt.command = TC_PRIO_REPLACE;
+ opt.replace_params.bands = q->bands;
+ memcpy(&opt.replace_params.priomap, q->prio2band,
+ TC_PRIO_MAX + 1);
+ opt.replace_params.qstats = &sch->qstats;
+ } else {
+ opt.command = TC_PRIO_DESTROY;
+ }
+
+ return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
+}
+
static void
prio_destroy(struct Qdisc *sch)
{
@@ -149,11 +174,13 @@ prio_destroy(struct Qdisc *sch)
struct prio_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
+ prio_offload(sch, false);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
-static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct prio_sched_data *q = qdisc_priv(sch);
struct Qdisc *queues[TCQ_PRIO_BANDS];
@@ -175,7 +202,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
/* Before commit, make sure we can allocate all new qdiscs */
for (i = oldbands; i < qopt->bands; i++) {
queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, i + 1));
+ TC_H_MAKE(sch->handle, i + 1),
+ extack);
if (!queues[i]) {
while (i > oldbands)
qdisc_destroy(queues[--i]);
@@ -202,10 +230,12 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_unlock(sch);
+ prio_offload(sch, true);
return 0;
}
-static int prio_init(struct Qdisc *sch, struct nlattr *opt)
+static int prio_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct prio_sched_data *q = qdisc_priv(sch);
int err;
@@ -213,11 +243,42 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
if (!opt)
return -EINVAL;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
- return prio_tune(sch, opt);
+ return prio_tune(sch, opt, extack);
+}
+
+static int prio_dump_offload(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_prio_qopt_offload hw_stats = {
+ .command = TC_PRIO_STATS,
+ .handle = sch->handle,
+ .parent = sch->parent,
+ {
+ .stats = {
+ .bstats = &sch->bstats,
+ .qstats = &sch->qstats,
+ },
+ },
+ };
+ int err;
+
+ sch->flags &= ~TCQ_F_OFFLOADED;
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return 0;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
+ &hw_stats);
+ if (err == -EOPNOTSUPP)
+ return 0;
+
+ if (!err)
+ sch->flags |= TCQ_F_OFFLOADED;
+
+ return err;
}
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -225,10 +286,15 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct prio_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_prio_qopt opt;
+ int err;
opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
+ err = prio_dump_offload(sch);
+ if (err)
+ goto nla_put_failure;
+
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -240,7 +306,7 @@ nla_put_failure:
}
static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
@@ -327,7 +393,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct prio_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6962b37a3ad3..bb1a9c11fc54 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -402,7 +402,8 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
}
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)*arg;
@@ -479,8 +480,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->common.classid = classid;
cl->deficit = lmax;
- cl->qdisc = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, classid);
+ cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ classid, NULL);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
@@ -564,7 +565,8 @@ static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
return (unsigned long)qfq_find_class(sch, classid);
}
-static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -593,13 +595,14 @@ static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
}
static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old)
+ struct Qdisc *new, struct Qdisc **old,
+ struct netlink_ext_ack *extack)
{
struct qfq_class *cl = (struct qfq_class *)arg;
if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, cl->common.classid);
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ cl->common.classid, NULL);
if (new == NULL)
new = &noop_qdisc;
}
@@ -1413,14 +1416,15 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
qfq_deactivate_class(q, cl);
}
-static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
int i, j, err;
u32 max_cl_shift, maxbudg_shift, max_classes;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index f0747eb87dc4..16644b3d2362 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable)
.handle = sch->handle,
.parent = sch->parent,
};
- int err;
if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
@@ -168,18 +167,12 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
opt.set.is_ecn = red_use_ecn(q);
+ opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
}
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
-
- if (!err && enable)
- sch->flags |= TCQ_F_OFFLOADED;
- else
- sch->flags &= ~TCQ_F_OFFLOADED;
-
- return err;
+ return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
}
static void red_destroy(struct Qdisc *sch)
@@ -197,7 +190,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
[TCA_RED_MAX_P] = { .type = NLA_U32 },
};
-static int red_change(struct Qdisc *sch, struct nlattr *opt)
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_RED_MAX + 1];
@@ -224,7 +218,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return -EINVAL;
if (ctl->limit > 0) {
- child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+ child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
+ extack);
if (IS_ERR(child))
return PTR_ERR(child);
}
@@ -272,14 +267,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
spin_unlock(root_lock);
}
-static int red_init(struct Qdisc *sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
q->qdisc = &noop_qdisc;
q->sch = sch;
timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
- return red_change(sch, opt);
+ return red_change(sch, opt, extack);
}
static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
@@ -294,12 +290,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
.stats.qstats = &sch->qstats,
},
};
+ int err;
- if (!(sch->flags & TCQ_F_OFFLOADED))
+ sch->flags &= ~TCQ_F_OFFLOADED;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
return 0;
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
- &hw_stats);
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+ &hw_stats);
+ if (err == -EOPNOTSUPP)
+ return 0;
+
+ if (!err)
+ sch->flags |= TCQ_F_OFFLOADED;
+
+ return err;
}
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -317,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
};
int err;
- sch->qstats.backlog = q->qdisc->qstats.backlog;
err = red_dump_offload_stats(sch, &opt);
if (err)
goto nla_put_failure;
@@ -339,32 +344,24 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct red_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- struct tc_red_xstats st = {
- .early = q->stats.prob_drop + q->stats.forced_drop,
- .pdrop = q->stats.pdrop,
- .other = q->stats.other,
- .marked = q->stats.prob_mark + q->stats.forced_mark,
- };
+ struct tc_red_xstats st = {0};
if (sch->flags & TCQ_F_OFFLOADED) {
- struct red_stats hw_stats = {0};
struct tc_red_qopt_offload hw_stats_request = {
.command = TC_RED_XSTATS,
.handle = sch->handle,
.parent = sch->parent,
{
- .xstats = &hw_stats,
+ .xstats = &q->stats,
},
};
- if (!dev->netdev_ops->ndo_setup_tc(dev,
- TC_SETUP_QDISC_RED,
- &hw_stats_request)) {
- st.early += hw_stats.prob_drop + hw_stats.forced_drop;
- st.pdrop += hw_stats.pdrop;
- st.other += hw_stats.other;
- st.marked += hw_stats.prob_mark + hw_stats.forced_mark;
- }
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+ &hw_stats_request);
}
+ st.early = q->stats.prob_drop + q->stats.forced_drop;
+ st.pdrop = q->stats.pdrop;
+ st.other = q->stats.other;
+ st.marked = q->stats.prob_mark + q->stats.forced_mark;
return gnet_stats_copy_app(d, &st, sizeof(st));
}
@@ -380,7 +377,7 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
}
static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct red_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0678debdd856..7cbdad8419b7 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -488,7 +488,8 @@ static const struct tc_sfb_qopt sfb_default_ops = {
.penalty_burst = 20,
};
-static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfb_sched_data *q = qdisc_priv(sch);
struct Qdisc *child;
@@ -512,7 +513,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
if (limit == 0)
limit = qdisc_dev(sch)->tx_queue_len;
- child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+ child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -549,17 +550,18 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfb_sched_data *q = qdisc_priv(sch);
int err;
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
q->qdisc = &noop_qdisc;
- return sfb_change(sch, opt);
+ return sfb_change(sch, opt, extack);
}
static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -615,7 +617,7 @@ static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
}
static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct sfb_sched_data *q = qdisc_priv(sch);
@@ -643,7 +645,8 @@ static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
}
static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
+ struct nlattr **tca, unsigned long *arg,
+ struct netlink_ext_ack *extack)
{
return -ENOSYS;
}
@@ -665,7 +668,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct sfb_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 930e5bd26d3d..2f2678197760 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -721,7 +721,8 @@ static void sfq_destroy(struct Qdisc *sch)
kfree(q->red_parms);
}
-static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfq_sched_data *q = qdisc_priv(sch);
int i;
@@ -730,7 +731,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->sch = sch;
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
- err = tcf_block_get(&q->block, &q->filter_list, sch);
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
return err;
@@ -836,7 +837,8 @@ static void sfq_unbind(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
{
struct sfq_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 120f4f365967..83e76d046993 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -302,7 +302,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PBURST] = { .type = NLA_U32 },
};
-static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
int err;
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -326,11 +327,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
qopt = nla_data(tb[TCA_TBF_PARMS]);
if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
- tb[TCA_TBF_RTAB]));
+ tb[TCA_TBF_RTAB],
+ NULL));
if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
- tb[TCA_TBF_PTAB]));
+ tb[TCA_TBF_PTAB],
+ NULL));
buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -383,7 +386,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
if (err)
goto done;
} else if (qopt->limit > 0) {
- child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+ child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
+ extack);
if (IS_ERR(child)) {
err = PTR_ERR(child);
goto done;
@@ -421,19 +425,20 @@ done:
return err;
}
-static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_watchdog_init(&q->watchdog, sch);
q->qdisc = &noop_qdisc;
- if (opt == NULL)
+ if (!opt)
return -EINVAL;
q->t_c = ktime_get_ns();
- return tbf_change(sch, opt);
+ return tbf_change(sch, opt, extack);
}
static void tbf_destroy(struct Qdisc *sch)
@@ -494,7 +499,7 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
}
static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct tbf_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 9fe6b427afed..93f04cf5cac1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -167,7 +167,8 @@ teql_destroy(struct Qdisc *sch)
}
}
-static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
+static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_master *m = (struct teql_master *)sch->ops;
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index d9c04dc1b3f3..c740b189d4ba 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,18 +37,6 @@ menuconfig IP_SCTP
if IP_SCTP
-config NET_SCTPPROBE
- tristate "SCTP: Association probing"
- depends on PROC_FS && KPROBES
- ---help---
- This module allows for capturing the changes to SCTP association
- state in response to incoming packets. It is used for debugging
- SCTP congestion control algorithms. If you don't understand
- what was just said, you don't need it: say N.
-
- To compile this code as a module, choose M here: the
- module will be called sctp_probe.
-
config SCTP_DBG_OBJCNT
bool "SCTP: Debug object counts"
depends on PROC_FS
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 1ca84a288443..6776582ec449 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_IP_SCTP) += sctp.o
-obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
@@ -14,9 +13,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o stream.o auth.o \
offload.o stream_sched.o stream_sched_prio.o \
- stream_sched_rr.o
-
-sctp_probe-y := probe.o
+ stream_sched_rr.o stream_interleave.o
sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
sctp-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 69394f4d6091..837806dd5799 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC);
if (event)
- sctp_ulpq_tail_event(&asoc->ulpq, event);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
}
/* Select new active and retran paths. */
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7f8baa48e7c2..991a530c6b31 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
error, GFP_ATOMIC);
if (ev)
- sctp_ulpq_tail_event(&asoc->ulpq, ev);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
sctp_chunk_put(chunk);
@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
*/
max_data = asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len -
- sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream);
max_data = SCTP_TRUNC4(max_data);
/* If the the peer requested that we authenticate DATA chunks
@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
frag |= SCTP_DATA_SACK_IMM;
}
- chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
- 0, GFP_KERNEL);
+ chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
+ GFP_KERNEL);
if (!chunk) {
err = -ENOMEM;
goto errout;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ee1e601a0b11..8b3146816519 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
{
ep->base.dead = true;
- ep->base.sk->sk_state = SCTP_SS_CLOSED;
+ inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint(ep);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 621b5ca3fd1c..141c9c466ec1 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
return;
}
- if (t->param_flags & SPP_PMTUD_ENABLE) {
- /* Update transports view of the MTU */
- sctp_transport_update_pmtu(t, pmtu);
-
- /* Update association pmtu. */
- sctp_assoc_sync_pmtu(asoc);
- }
+ if (!(t->param_flags & SPP_PMTUD_ENABLE))
+ /* We can't allow retransmitting in such case, as the
+ * retransmission would be sized just as before, and thus we
+ * would get another icmp, and retransmit again.
+ */
+ return;
- /* Retransmit with the new pmtu setting.
- * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
- * Needed will never be sent, but if a message was sent before
- * PMTU discovery was disabled that was larger than the PMTU, it
- * would not be fragmented, so it must be re-transmitted fragmented.
+ /* Update transports view of the MTU. Return if no update was needed.
+ * If an update wasn't needed/possible, it also doesn't make sense to
+ * try to retransmit now.
*/
+ if (!sctp_transport_update_pmtu(t, pmtu))
+ return;
+
+ /* Update association pmtu. */
+ sctp_assoc_sync_pmtu(asoc);
+
+ /* Retransmit with the new pmtu setting. */
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3b18085e3b10..5d4c15bf66d2 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
case AF_INET:
if (!__ipv6_only_sock(sctp_opt2sk(sp)))
return 1;
+ /* fallthru */
default:
return 0;
}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 275925b93b29..35bc7106d182 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct sctphdr *sh;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
+ goto out;
+
sh = sctp_hdr(skb);
if (!pskb_may_pull(skb, sizeof(*sh)))
goto out;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 4a865cd06d76..01a26ee051e3 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
/* We believe that this chunk is OK to add to the packet */
switch (chunk->chunk_hdr->type) {
case SCTP_CID_DATA:
+ case SCTP_CID_I_DATA:
/* Account for the data being in the packet */
sctp_packet_append_data(packet, chunk);
/* Disallow SACK bundling after DATA. */
@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
* or delay in hopes of bundling a full sized packet.
*/
if (chunk->skb->len + q->out_qlen > transport->pathmtu -
- packet->overhead - sizeof(struct sctp_data_chunk) - 4)
+ packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
/* Enough data queued to fill a packet */
return SCTP_XMIT_OK;
@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
asoc->peer.rwnd = rwnd;
sctp_chunk_assign_tsn(chunk);
- sctp_chunk_assign_ssn(chunk);
+ asoc->stream.si->assign_number(chunk);
}
static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7d67feeeffc1..f211b3db6a35 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -67,8 +67,6 @@ static void sctp_mark_missing(struct sctp_outq *q,
__u32 highest_new_tsn,
int count_of_newacks);
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
@@ -591,7 +589,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* following the procedures outlined in C1 - C5.
*/
if (reason == SCTP_RTXR_T3_RTX)
- sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
+ q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
/* Flush the queues only on timeout, since fast_rtx is only
* triggered during sack processing and the queue
@@ -918,9 +916,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
break;
case SCTP_CID_ABORT:
- if (sctp_test_T_bit(chunk)) {
+ if (sctp_test_T_bit(chunk))
packet->vtag = asoc->c.my_vtag;
- }
+ /* fallthru */
/* The following chunks are "response" chunks, i.e.
* they are generated in response to something we
* received. If we are sending these, then we can
@@ -942,6 +940,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
case SCTP_CID_ECN_ECNE:
case SCTP_CID_ASCONF:
case SCTP_CID_FWD_TSN:
+ case SCTP_CID_I_FWD_TSN:
case SCTP_CID_RECONF:
status = sctp_packet_transmit_chunk(packet, chunk,
one_packet, gfp);
@@ -956,7 +955,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
* sender MUST assure that at least one T3-rtx
* timer is running.
*/
- if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
+ chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
sctp_transport_reset_t3_rtx(transport);
transport->last_time_sent = jiffies;
}
@@ -1372,7 +1372,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
asoc->peer.rwnd = sack_a_rwnd;
- sctp_generate_fwdtsn(q, sack_ctsn);
+ asoc->stream.si->generate_ftsn(q, sack_ctsn);
pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
@@ -1795,7 +1795,7 @@ static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
}
/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
{
struct sctp_association *asoc = q->asoc;
struct sctp_chunk *ftsn_chunk = NULL;
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
deleted file mode 100644
index 1280f85a598d..000000000000
--- a/net/sctp/probe.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * sctp_probe - Observe the SCTP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for SCTP from Stephen Hemminger's code
- * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/sctp.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-MODULE_SOFTDEP("pre: sctp");
-MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
-MODULE_DESCRIPTION("SCTP snooper");
-MODULE_LICENSE("GPL");
-
-static int port __read_mostly = 0;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int fwmark __read_mostly = 0;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int bufsize __read_mostly = 64 * 1024;
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-static int full __read_mostly = 1;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "sctpprobe";
-
-static struct {
- struct kfifo fifo;
- spinlock_t lock;
- wait_queue_head_t wait;
- struct timespec64 tstart;
-} sctpw;
-
-static __printf(1, 2) void printl(const char *fmt, ...)
-{
- va_list args;
- int len;
- char tbuf[256];
-
- va_start(args, fmt);
- len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
- va_end(args);
-
- kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
- wake_up(&sctpw.wait);
-}
-
-static int sctpprobe_open(struct inode *inode, struct file *file)
-{
- kfifo_reset(&sctpw.fifo);
- ktime_get_ts64(&sctpw.tstart);
-
- return 0;
-}
-
-static ssize_t sctpprobe_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- int error = 0, cnt = 0;
- unsigned char *tbuf;
-
- if (!buf)
- return -EINVAL;
-
- if (len == 0)
- return 0;
-
- tbuf = vmalloc(len);
- if (!tbuf)
- return -ENOMEM;
-
- error = wait_event_interruptible(sctpw.wait,
- kfifo_len(&sctpw.fifo) != 0);
- if (error)
- goto out_free;
-
- cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
- error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
- vfree(tbuf);
-
- return error ? error : cnt;
-}
-
-static const struct file_operations sctpprobe_fops = {
- .owner = THIS_MODULE,
- .open = sctpprobe_open,
- .read = sctpprobe_read,
- .llseek = noop_llseek,
-};
-
-static enum sctp_disposition jsctp_sf_eat_sack(
- struct net *net,
- const struct sctp_endpoint *ep,
- const struct sctp_association *asoc,
- const union sctp_subtype type,
- void *arg,
- struct sctp_cmd_seq *commands)
-{
- struct sctp_chunk *chunk = arg;
- struct sk_buff *skb = chunk->skb;
- struct sctp_transport *sp;
- static __u32 lcwnd = 0;
- struct timespec64 now;
-
- sp = asoc->peer.primary_path;
-
- if (((port == 0 && fwmark == 0) ||
- asoc->peer.port == port ||
- ep->base.bind_addr.port == port ||
- (fwmark > 0 && skb->mark == fwmark)) &&
- (full || sp->cwnd != lcwnd)) {
- lcwnd = sp->cwnd;
-
- ktime_get_ts64(&now);
- now = timespec64_sub(now, sctpw.tstart);
-
- printl("%lu.%06lu ", (unsigned long) now.tv_sec,
- (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-
- printl("%p %5d %5d %5d %8d %5d ", asoc,
- ep->base.bind_addr.port, asoc->peer.port,
- asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
-
- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
- transports) {
- if (sp == asoc->peer.primary_path)
- printl("*");
-
- printl("%pISc %2u %8u %8u %8u %8u %8u ",
- &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
- sp->flight_size, sp->partial_bytes_acked,
- sp->pathmtu);
- }
- printl("\n");
- }
-
- jprobe_return();
- return 0;
-}
-
-static struct jprobe sctp_recv_probe = {
- .kp = {
- .symbol_name = "sctp_sf_eat_sack_6_2",
- },
- .entry = jsctp_sf_eat_sack,
-};
-
-static __init int sctp_setup_jprobe(void)
-{
- int ret = register_jprobe(&sctp_recv_probe);
-
- if (ret) {
- if (request_module("sctp"))
- goto out;
- ret = register_jprobe(&sctp_recv_probe);
- }
-
-out:
- return ret;
-}
-
-static __init int sctpprobe_init(void)
-{
- int ret = -ENOMEM;
-
- /* Warning: if the function signature of sctp_sf_eat_sack_6_2,
- * has been changed, you also have to change the signature of
- * jsctp_sf_eat_sack, otherwise you end up right here!
- */
- BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
- jsctp_sf_eat_sack) == 0);
-
- init_waitqueue_head(&sctpw.wait);
- spin_lock_init(&sctpw.lock);
- if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
- return ret;
-
- if (!proc_create(procname, S_IRUSR, init_net.proc_net,
- &sctpprobe_fops))
- goto free_kfifo;
-
- ret = sctp_setup_jprobe();
- if (ret)
- goto remove_proc;
-
- pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
- port, fwmark, bufsize);
- return 0;
-
-remove_proc:
- remove_proc_entry(procname, init_net.proc_net);
-free_kfifo:
- kfifo_free(&sctpw.fifo);
- return ret;
-}
-
-static __exit void sctpprobe_exit(void)
-{
- kfifo_free(&sctpw.fifo);
- remove_proc_entry(procname, init_net.proc_net);
- unregister_jprobe(&sctp_recv_probe);
-}
-
-module_init(sctpprobe_init);
-module_exit(sctpprobe_exit);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 26b4be6b4172..537545ebcb0e 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -95,7 +95,6 @@ static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations sctp_snmp_seq_fops = {
- .owner = THIS_MODULE,
.open = sctp_snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -288,12 +287,8 @@ struct sctp_ht_iter {
static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
{
struct sctp_ht_iter *iter = seq->private;
- int err = sctp_transport_walk_start(&iter->hti);
- if (err) {
- iter->start_fail = 1;
- return ERR_PTR(err);
- }
+ sctp_transport_walk_start(&iter->hti);
iter->start_fail = 0;
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9bf575f2e8ed..793b05ec692b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
struct sctp_inithdr init;
union sctp_params addrs;
struct sctp_sock *sp;
- __u8 extensions[4];
+ __u8 extensions[5];
size_t chunksize;
__be16 types[2];
int num_ext = 0;
@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
+ if (sp->strm_interleave) {
+ extensions[num_ext] = SCTP_CID_I_DATA;
+ num_ext += 1;
+ }
+
chunksize += vparam_len;
/* Account for AUTH related parameters */
@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
struct sctp_inithdr initack;
union sctp_params addrs;
struct sctp_sock *sp;
- __u8 extensions[4];
+ __u8 extensions[5];
size_t chunksize;
int num_ext = 0;
int cookie_len;
@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
+ if (asoc->intl_enable) {
+ extensions[num_ext] = SCTP_CID_I_DATA;
+ num_ext += 1;
+ }
+
if (asoc->peer.auth_capable) {
auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
chunksize += ntohs(auth_random->length);
@@ -711,38 +721,31 @@ nodata:
/* Make a DATA chunk for the given association from the provided
* parameters. However, do not populate the data payload.
*/
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
- int data_len, __u8 flags, __u16 ssn,
- gfp_t gfp)
+ int len, __u8 flags, gfp_t gfp)
{
struct sctp_chunk *retval;
struct sctp_datahdr dp;
- int chunk_len;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
- dp.tsn = 0;
+ memset(&dp, 0, sizeof(dp));
+ dp.ppid = sinfo->sinfo_ppid;
dp.stream = htons(sinfo->sinfo_stream);
- dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
- if (sinfo->sinfo_flags & SCTP_UNORDERED) {
+ if (sinfo->sinfo_flags & SCTP_UNORDERED)
flags |= SCTP_DATA_UNORDERED;
- dp.ssn = 0;
- } else
- dp.ssn = htons(ssn);
- chunk_len = sizeof(dp) + data_len;
- retval = sctp_make_data(asoc, flags, chunk_len, gfp);
+ retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
if (!retval)
- goto nodata;
+ return NULL;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
-nodata:
return retval;
}
@@ -1273,7 +1276,6 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
struct sctp_authhdr auth_hdr;
struct sctp_hmac *hmac_desc;
struct sctp_chunk *retval;
- __u8 *hmac;
/* Get the first hmac that the peer told us to use */
hmac_desc = sctp_auth_asoc_get_hmac(asoc);
@@ -1292,7 +1294,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(auth_hdr),
&auth_hdr);
- hmac = skb_put_zero(retval->skb, hmac_desc->hmac_len);
+ skb_put_zero(retval->skb, hmac_desc->hmac_len);
/* Adjust the chunk header to include the empty MAC */
retval->chunk_hdr->length =
@@ -1415,6 +1417,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
}
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
+}
+
static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
__u8 type, __u8 flags, int paylen,
gfp_t gfp)
@@ -2032,6 +2040,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
if (net->sctp.addip_enable)
asoc->peer.asconf_capable = 1;
break;
+ case SCTP_CID_I_DATA:
+ if (sctp_sk(asoc->base.sk)->strm_interleave)
+ asoc->intl_enable = 1;
+ break;
default:
break;
}
@@ -3523,6 +3535,30 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
return retval;
}
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist)
+{
+ struct sctp_chunk *retval = NULL;
+ struct sctp_ifwdtsn_hdr ftsn_hdr;
+ size_t hint;
+
+ hint = (nstreams + 1) * sizeof(__u32);
+
+ retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
+ retval->subh.ifwdtsn_hdr =
+ sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
+
+ sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
+
+ return retval;
+}
+
/* RE-CONFIG 3.1 (RE-CONFIG chunk)
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index df94d77401e7..b71e7fb0a20a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
struct sctp_chunk *abort;
/* Cancel any partial delivery in progress. */
- sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
* successfully completed a connect() call.
*/
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
- sk->sk_state = SCTP_SS_ESTABLISHED;
+ inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
sctp_sstate(sk, ESTABLISHED)) {
- sk->sk_state = SCTP_SS_CLOSING;
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
sk->sk_shutdown |= RCV_SHUTDOWN;
}
}
@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
if (!ev)
return;
- sctp_ulpq_tail_event(&asoc->ulpq, ev);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK:
@@ -1007,18 +1007,6 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
}
}
-/* Process variable FWDTSN chunk information. */
-static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
- struct sctp_chunk *chunk)
-{
- struct sctp_fwdtsn_skip *skip;
-
- /* Walk through all the skipped SSNs */
- sctp_walk_fwdtsn(skip, chunk) {
- sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
- }
-}
-
/* Helper function to remove the association non-primary peer
* transports.
*/
@@ -1058,7 +1046,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (ev)
- sctp_ulpq_tail_event(&asoc->ulpq, ev);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
/* Helper function to generate an adaptation indication event */
@@ -1070,7 +1058,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (ev)
- sctp_ulpq_tail_event(&asoc->ulpq, ev);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
@@ -1368,18 +1356,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_REPORT_FWDTSN:
- /* Move the Cumulattive TSN Ack ahead. */
- sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
-
- /* purge the fragmentation queue */
- sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
-
- /* Abort any in progress partial delivery. */
- sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
break;
case SCTP_CMD_PROCESS_FWDTSN:
- sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
+ asoc->stream.si->handle_ftsn(&asoc->ulpq,
+ cmd->obj.chunk);
break;
case SCTP_CMD_GEN_SACK:
@@ -1483,8 +1465,9 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
__func__, cmd->obj.chunk, &asoc->ulpq);
- sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
- GFP_ATOMIC);
+ asoc->stream.si->ulpevent_data(&asoc->ulpq,
+ cmd->obj.chunk,
+ GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
@@ -1492,7 +1475,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
__func__, cmd->obj.ulpevent, &asoc->ulpq);
- sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
+ asoc->stream.si->enqueue_event(&asoc->ulpq,
+ cmd->obj.ulpevent);
break;
case SCTP_CMD_REPLY:
@@ -1729,12 +1713,13 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_PART_DELIVER:
- sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
+ asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
break;
case SCTP_CMD_RENEGE:
- sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
- GFP_ATOMIC);
+ asoc->stream.si->renege_events(&asoc->ulpq,
+ cmd->obj.chunk,
+ GFP_ATOMIC);
break;
case SCTP_CMD_SETUP_T4:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8f8ccded13e4..eb7905ffe5f2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -59,6 +59,9 @@
#include <net/sctp/sm.h>
#include <net/sctp/structs.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sctp.h>
+
static struct sctp_packet *sctp_abort_pkt_new(
struct net *net,
const struct sctp_endpoint *ep,
@@ -3013,7 +3016,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+ if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -3034,7 +3037,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr,
- sizeof(struct sctp_datahdr));
+ sctp_datahdr_len(&asoc->stream));
default:
BUG();
}
@@ -3133,7 +3136,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+ if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -3150,7 +3153,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr,
- sizeof(struct sctp_datahdr));
+ sctp_datahdr_len(&asoc->stream));
default:
BUG();
}
@@ -3219,6 +3222,8 @@ enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
struct sctp_sackhdr *sackh;
__u32 ctsn;
+ trace_sctp_probe(ep, asoc, chunk);
+
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
@@ -3957,7 +3962,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
{
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_chunk *chunk = arg;
- struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
@@ -3971,7 +3975,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the FORWARD_TSN chunk has valid length. */
- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+ if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -3990,14 +3994,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
- /* Silently discard the chunk if stream-id is not valid */
- sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->stream.incnt)
- goto discard_noforce;
- }
+ if (!asoc->stream.si->validate_ftsn(chunk))
+ goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
- if (len > sizeof(struct sctp_fwdtsn_hdr))
+ if (len > sctp_ftsnhdr_len(&asoc->stream))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
@@ -4028,7 +4029,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
{
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_chunk *chunk = arg;
- struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
@@ -4042,7 +4042,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the FORWARD_TSN chunk has a valid length. */
- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+ if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -4061,14 +4061,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
- /* Silently discard the chunk if stream-id is not valid */
- sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->stream.incnt)
- goto gen_shutdown;
- }
+ if (!asoc->stream.si->validate_ftsn(chunk))
+ goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
- if (len > sizeof(struct sctp_fwdtsn_hdr))
+ if (len > sctp_ftsnhdr_len(&asoc->stream))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
@@ -6244,14 +6241,12 @@ static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *err;
enum sctp_verb deliver;
size_t datalen;
- u8 ordered = 0;
- u16 ssn, sid;
__u32 tsn;
int tmp;
data_hdr = (struct sctp_datahdr *)chunk->skb->data;
chunk->subh.data_hdr = data_hdr;
- skb_pull(chunk->skb, sizeof(*data_hdr));
+ skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
tsn = ntohl(data_hdr->tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
@@ -6299,7 +6294,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
- datalen -= sizeof(struct sctp_data_chunk);
+ datalen -= sctp_datachk_len(&asoc->stream);
deliver = SCTP_CMD_CHUNK_ULP;
@@ -6394,7 +6389,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iodchunks++;
- ordered = 1;
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
@@ -6405,8 +6399,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
- sid = ntohs(data_hdr->stream);
- if (sid >= asoc->stream.incnt) {
+ if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
@@ -6427,8 +6420,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid.
*/
- ssn = ntohs(data_hdr->ssn);
- if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
+ if (!asoc->stream.si->validate_data(chunk))
return SCTP_IERROR_PROTO_VIOLATION;
/* Send the data up to the user. Note: Schedule the
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 79b6bee5b768..691d9dc620e3 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -985,11 +985,14 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
if (state > SCTP_STATE_MAX)
return &bug;
+ if (cid == SCTP_CID_I_DATA)
+ cid = SCTP_CID_DATA;
+
if (cid <= SCTP_CID_BASE_MAX)
return &chunk_event_table[cid][state];
if (net->sctp.prsctp_enable) {
- if (cid == SCTP_CID_FWD_TSN)
+ if (cid == SCTP_CID_FWD_TSN || cid == SCTP_CID_I_FWD_TSN)
return &prsctp_chunk_event_table[0][state];
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b4fb6e4886d2..356e387f82e7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -85,7 +85,7 @@
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len, struct sock **orig_sk);
+ size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
cb(chunk);
}
+static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
+ void (*cb)(struct sk_buff *, struct sock *))
+
+{
+ struct sk_buff *skb, *tmp;
+
+ sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
+ cb(skb, sk);
+
+ sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
+ cb(skb, sk);
+
+ sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
+ cb(skb, sk);
+}
+
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
@@ -335,16 +351,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
if (len < sizeof (struct sockaddr))
return NULL;
+ if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+ return NULL;
+
/* V4 mapped address are really of AF_INET family */
if (addr->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
- if (!opt->pf->af_supported(AF_INET, opt))
- return NULL;
- } else {
- /* Does this PF support this AF? */
- if (!opt->pf->af_supported(addr->sa.sa_family, opt))
- return NULL;
- }
+ ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
@@ -970,13 +984,6 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
* from userspace.
*
- * We don't use copy_from_user() for optimization: we first do the
- * sanity checks (buffer size -fast- and access check-healthy
- * pointer); if all of those succeed, then we can alloc the memory
- * (expensive operation) needed to copy the data to kernel. Then we do
- * the copying without checking the user space area
- * (__copy_from_user()).
- *
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
@@ -1006,25 +1013,15 @@ static int sctp_setsockopt_bindx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- /* Check the user passed a healthy pointer. */
- if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
- return -EFAULT;
-
- /* Alloc space for the address array in kernel memory. */
- kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
- if (unlikely(!kaddrs))
- return -ENOMEM;
-
- if (__copy_from_user(kaddrs, addrs, addrs_size)) {
- kfree(kaddrs);
- return -EFAULT;
- }
+ kaddrs = vmemdup_user(addrs, addrs_size);
+ if (unlikely(IS_ERR(kaddrs)))
+ return PTR_ERR(kaddrs);
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
- kfree(kaddrs);
+ kvfree(kaddrs);
return -EINVAL;
}
@@ -1035,7 +1032,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
- kfree(kaddrs);
+ kvfree(kaddrs);
return -EINVAL;
}
addrcnt++;
@@ -1065,7 +1062,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
}
out:
- kfree(kaddrs);
+ kvfree(kaddrs);
return err;
}
@@ -1323,13 +1320,6 @@ out_free:
* land and invoking either sctp_connectx(). This is used for tunneling
* the sctp_connectx() request through sctp_setsockopt() from userspace.
*
- * We don't use copy_from_user() for optimization: we first do the
- * sanity checks (buffer size -fast- and access check-healthy
- * pointer); if all of those succeed, then we can alloc the memory
- * (expensive operation) needed to copy the data to kernel. Then we do
- * the copying without checking the user space area
- * (__copy_from_user()).
- *
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
@@ -1345,7 +1335,6 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
sctp_assoc_t *assoc_id)
{
struct sockaddr *kaddrs;
- gfp_t gfp = GFP_KERNEL;
int err = 0;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
@@ -1354,24 +1343,12 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- /* Check the user passed a healthy pointer. */
- if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
- return -EFAULT;
-
- /* Alloc space for the address array in kernel memory. */
- if (sk->sk_socket->file)
- gfp = GFP_USER | __GFP_NOWARN;
- kaddrs = kmalloc(addrs_size, gfp);
- if (unlikely(!kaddrs))
- return -ENOMEM;
-
- if (__copy_from_user(kaddrs, addrs, addrs_size)) {
- err = -EFAULT;
- } else {
- err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
- }
+ kaddrs = vmemdup_user(addrs, addrs_size);
+ if (unlikely(IS_ERR(kaddrs)))
+ return PTR_ERR(kaddrs);
- kfree(kaddrs);
+ err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
+ kvfree(kaddrs);
return err;
}
@@ -1528,7 +1505,7 @@ static void sctp_close(struct sock *sk, long timeout)
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
- sk->sk_state = SCTP_SS_CLOSING;
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
ep = sctp_sk(sk)->ep;
@@ -1554,6 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
+ !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
@@ -1883,8 +1861,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
*/
if (sinit) {
if (sinit->sinit_num_ostreams) {
- asoc->c.sinit_num_ostreams =
- sinit->sinit_num_ostreams;
+ __u16 outcnt = sinit->sinit_num_ostreams;
+
+ asoc->c.sinit_num_ostreams = outcnt;
+ /* outcnt has been changed, so re-init stream */
+ err = sctp_stream_init(&asoc->stream, outcnt, 0,
+ GFP_KERNEL);
+ if (err)
+ goto out_free;
}
if (sinit->sinit_max_instreams) {
asoc->c.sinit_max_instreams =
@@ -1971,7 +1955,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
if (!sctp_wspace(asoc)) {
/* sk can be changed by peel off when waiting for buf. */
- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err) {
if (err == -ESRCH) {
/* asoc is already dead. */
@@ -2002,7 +1986,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
if (err < 0)
goto out_free;
- wait_connect = true;
+ /* If stream interleave is enabled, wait_connect has to be
+ * done earlier than data enqueue, as it needs to make data
+ * or idata according to asoc->intl_enable which is set
+ * after connection is done.
+ */
+ if (sctp_sk(asoc->base.sk)->strm_interleave) {
+ timeo = sock_sndtimeo(sk, 0);
+ err = sctp_wait_for_connect(asoc, &timeo);
+ if (err)
+ goto out_unlock;
+ } else {
+ wait_connect = true;
+ }
+
pr_debug("%s: we associated primitively\n", __func__);
}
@@ -2277,11 +2274,11 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
event = sctp_ulpevent_make_sender_dry_event(asoc,
- GFP_ATOMIC);
+ GFP_USER | __GFP_NOWARN);
if (!event)
return -ENOMEM;
- sctp_ulpq_tail_event(&asoc->ulpq, event);
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
}
}
@@ -3180,7 +3177,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
if (val == 0) {
val = asoc->pathmtu - sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) +
- sizeof(struct sctp_data_chunk);
+ sctp_datachk_len(&asoc->stream);
}
asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
@@ -3350,7 +3347,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
if (get_user(val, (int __user *)optval))
return -EFAULT;
- sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
+ sctp_sk(sk)->frag_interleave = !!val;
+
+ if (!sctp_sk(sk)->frag_interleave)
+ sctp_sk(sk)->strm_interleave = 0;
return 0;
}
@@ -3498,6 +3498,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
+ optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
+ SCTP_AUTH_NUM_HMACS * sizeof(u16));
hmacs = memdup_user(optval, optlen);
if (IS_ERR(hmacs))
@@ -3536,6 +3538,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
+ /* authkey->sca_keylength is u16, so optlen can't be bigger than
+ * this.
+ */
+ optlen = min_t(unsigned int, optlen, USHRT_MAX +
+ sizeof(struct sctp_authkey));
authkey = memdup_user(optval, optlen);
if (IS_ERR(authkey))
@@ -3893,6 +3900,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
if (optlen < sizeof(*params))
return -EINVAL;
+ /* srs_number_streams is u16, so optlen can't be bigger than this. */
+ optlen = min_t(unsigned int, optlen, USHRT_MAX +
+ sizeof(__u16) * sizeof(*params));
params = memdup_user(optval, optlen);
if (IS_ERR(params))
@@ -4023,6 +4033,40 @@ out:
return retval;
}
+static int sctp_setsockopt_interleaving_supported(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
+ struct sctp_assoc_value params;
+ int retval = -EINVAL;
+
+ if (optlen < sizeof(params))
+ goto out;
+
+ optlen = sizeof(params);
+ if (copy_from_user(&params, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (params.assoc_id)
+ goto out;
+
+ if (!net->sctp.intl_enable || !sp->frag_interleave) {
+ retval = -EPERM;
+ goto out;
+ }
+
+ sp->strm_interleave = !!params.assoc_value;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4210,6 +4254,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_STREAM_SCHEDULER_VALUE:
retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
break;
+ case SCTP_INTERLEAVING_SUPPORTED:
+ retval = sctp_setsockopt_interleaving_supported(sk, optval,
+ optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -4586,7 +4634,7 @@ static void sctp_shutdown(struct sock *sk, int how)
if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
struct sctp_association *asoc;
- sk->sk_state = SCTP_SS_CLOSING;
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
asoc = list_entry(ep->asocs.next,
struct sctp_association, asocs);
sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@ -4680,20 +4728,11 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
/* use callback to avoid exporting the core structure */
-int sctp_transport_walk_start(struct rhashtable_iter *iter)
+void sctp_transport_walk_start(struct rhashtable_iter *iter)
{
- int err;
-
rhltable_walk_enter(&sctp_transport_hashtable, iter);
- err = rhashtable_walk_start(iter);
- if (err && err != -EAGAIN) {
- rhashtable_walk_stop(iter);
- rhashtable_walk_exit(iter);
- return err;
- }
-
- return 0;
+ rhashtable_walk_start(iter);
}
void sctp_transport_walk_stop(struct rhashtable_iter *iter)
@@ -4787,9 +4826,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
int ret;
again:
- ret = sctp_transport_walk_start(&hti);
- if (ret)
- return ret;
+ ret = 0;
+ sctp_transport_walk_start(&hti);
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
@@ -5015,7 +5053,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
len = sizeof(int);
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+ if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
return -EFAULT;
return 0;
}
@@ -5645,6 +5683,9 @@ copy_getaddrs:
err = -EFAULT;
goto out;
}
+ /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
+ * but we can't change it anymore.
+ */
if (put_user(bytes_copied, optlen))
err = -EFAULT;
out:
@@ -6081,7 +6122,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
- if (copy_from_user(&params, optval, sizeof(params)))
+ if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
return -EINVAL;
@@ -6251,7 +6292,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
+
+ len = sizeof(struct sctp_authkeyid);
+ if (copy_from_user(&val, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6263,7 +6306,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
else
val.scact_keynumber = ep->active_key_id;
- len = sizeof(struct sctp_authkeyid);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
@@ -6289,7 +6331,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
to = p->gauth_chunks;
@@ -6334,7 +6376,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
to = p->gauth_chunks;
@@ -6984,6 +7026,47 @@ out:
return retval;
}
+static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ params.assoc_value = asoc->intl_enable;
+ } else if (!params.assoc_id) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ params.assoc_value = sp->strm_interleave;
+ } else {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -7174,6 +7257,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_scheduler_value(sk, len, optval,
optlen);
break;
+ case SCTP_INTERLEAVING_SUPPORTED:
+ retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
+ optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -7408,13 +7495,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
* sockets.
*
*/
- sk->sk_state = SCTP_SS_LISTENING;
+ inet_sk_set_state(sk, SCTP_SS_LISTENING);
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk))
return -EAGAIN;
} else {
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
- sk->sk_state = SCTP_SS_CLOSED;
+ inet_sk_set_state(sk, SCTP_SS_CLOSED);
return -EADDRINUSE;
}
}
@@ -7500,11 +7587,11 @@ out:
* here, again, by modeling the current TCP/UDP code. We don't have
* a good way to test with it yet.
*/
-unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct sctp_sock *sp = sctp_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
@@ -8002,12 +8089,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len, struct sock **orig_sk)
+ size_t msg_len)
{
struct sock *sk = asoc->base.sk;
- int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
+ int err = 0;
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
@@ -8036,17 +8123,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
- if (sk != asoc->base.sk) {
- release_sock(sk);
- sk = asoc->base.sk;
- lock_sock(sk);
- }
+ if (sk != asoc->base.sk)
+ goto do_error;
*timeo_p = current_timeo;
}
out:
- *orig_sk = sk;
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
@@ -8411,11 +8494,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
- sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
- sctp_skb_set_owner_r_frag(skb, newsk);
-
- sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
- sctp_skb_set_owner_r_frag(skb, newsk);
+ sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
@@ -8441,10 +8520,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* is called, set RCV_SHUTDOWN flag.
*/
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
- newsk->sk_state = SCTP_SS_CLOSED;
+ inet_sk_set_state(newsk, SCTP_SS_CLOSED);
newsk->sk_shutdown |= RCV_SHUTDOWN;
} else {
- newsk->sk_state = SCTP_SS_ESTABLISHED;
+ inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
}
release_sock(newsk);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 76ea66be0bbe..cedf672487f9 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
sctp_stream_outq_migrate(stream, NULL, outcnt);
sched->sched_all(stream);
- i = sctp_stream_alloc_out(stream, outcnt, gfp);
- if (i)
- return i;
+ ret = sctp_stream_alloc_out(stream, outcnt, gfp);
+ if (ret)
+ goto out;
stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
@@ -167,22 +167,21 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
sched->init(stream);
in:
+ sctp_stream_interleave_init(stream);
if (!incnt)
goto out;
- i = sctp_stream_alloc_in(stream, incnt, gfp);
- if (i) {
- ret = -ENOMEM;
- goto free;
+ ret = sctp_stream_alloc_in(stream, incnt, gfp);
+ if (ret) {
+ sched->free(stream);
+ kfree(stream->out);
+ stream->out = NULL;
+ stream->outcnt = 0;
+ goto out;
}
stream->incnt = incnt;
- goto out;
-free:
- sched->free(stream);
- kfree(stream->out);
- stream->out = NULL;
out:
return ret;
}
@@ -215,11 +214,13 @@ void sctp_stream_clear(struct sctp_stream *stream)
{
int i;
- for (i = 0; i < stream->outcnt; i++)
- stream->out[i].ssn = 0;
+ for (i = 0; i < stream->outcnt; i++) {
+ stream->out[i].mid = 0;
+ stream->out[i].mid_uo = 0;
+ }
for (i = 0; i < stream->incnt; i++)
- stream->in[i].ssn = 0;
+ stream->in[i].mid = 0;
}
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
@@ -606,10 +607,10 @@ struct sctp_chunk *sctp_process_strreset_outreq(
}
for (i = 0; i < nums; i++)
- stream->in[ntohs(str_p[i])].ssn = 0;
+ stream->in[ntohs(str_p[i])].mid = 0;
} else {
for (i = 0; i < stream->incnt; i++)
- stream->in[i].ssn = 0;
+ stream->in[i].mid = 0;
}
result = SCTP_STRRESET_PERFORMED;
@@ -753,8 +754,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
* performed.
*/
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
- sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
- sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
/* G1: Compute an appropriate value for the Receiver's Next TSN -- the
* TSN that the peer should use to send the next DATA chunk. The
@@ -783,10 +783,12 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
/* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
* incoming and outgoing streams.
*/
- for (i = 0; i < stream->outcnt; i++)
- stream->out[i].ssn = 0;
+ for (i = 0; i < stream->outcnt; i++) {
+ stream->out[i].mid = 0;
+ stream->out[i].mid_uo = 0;
+ }
for (i = 0; i < stream->incnt; i++)
- stream->in[i].ssn = 0;
+ stream->in[i].mid = 0;
result = SCTP_STRRESET_PERFORMED;
@@ -976,11 +978,15 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (result == SCTP_STRRESET_PERFORMED) {
if (nums) {
- for (i = 0; i < nums; i++)
- stream->out[ntohs(str_p[i])].ssn = 0;
+ for (i = 0; i < nums; i++) {
+ stream->out[ntohs(str_p[i])].mid = 0;
+ stream->out[ntohs(str_p[i])].mid_uo = 0;
+ }
} else {
- for (i = 0; i < stream->outcnt; i++)
- stream->out[i].ssn = 0;
+ for (i = 0; i < stream->outcnt; i++) {
+ stream->out[i].mid = 0;
+ stream->out[i].mid_uo = 0;
+ }
}
flags = SCTP_STREAM_RESET_OUTGOING_SSN;
@@ -1023,8 +1029,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
&asoc->peer.tsn_map);
LIST_HEAD(temp);
- sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
- sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
sctp_tsnmap_init(&asoc->peer.tsn_map,
SCTP_TSN_MAP_INITIAL,
@@ -1042,10 +1047,12 @@ struct sctp_chunk *sctp_process_strreset_resp(
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
- for (i = 0; i < stream->outcnt; i++)
- stream->out[i].ssn = 0;
+ for (i = 0; i < stream->outcnt; i++) {
+ stream->out[i].mid = 0;
+ stream->out[i].mid_uo = 0;
+ }
for (i = 0; i < stream->incnt; i++)
- stream->in[i].ssn = 0;
+ stream->in[i].mid = 0;
}
for (i = 0; i < stream->outcnt; i++)
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
new file mode 100644
index 000000000000..8c7cf8f08711
--- /dev/null
+++ b/net/sctp/stream_interleave.c
@@ -0,0 +1,1334 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <net/busy_poll.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/ulpevent.h>
+#include <linux/sctp.h>
+
+static struct sctp_chunk *sctp_make_idatafrag_empty(
+ const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp)
+{
+ struct sctp_chunk *retval;
+ struct sctp_idatahdr dp;
+
+ memset(&dp, 0, sizeof(dp));
+ dp.stream = htons(sinfo->sinfo_stream);
+
+ if (sinfo->sinfo_flags & SCTP_UNORDERED)
+ flags |= SCTP_DATA_UNORDERED;
+
+ retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
+ if (!retval)
+ return NULL;
+
+ retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
+ memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
+
+ return retval;
+}
+
+static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ struct sctp_chunk *lchunk;
+ __u32 cfsn = 0;
+ __u16 sid;
+
+ if (chunk->has_mid)
+ return;
+
+ sid = sctp_chunk_stream_no(chunk);
+ stream = &chunk->asoc->stream;
+
+ list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
+ struct sctp_idatahdr *hdr;
+ __u32 mid;
+
+ lchunk->has_mid = 1;
+
+ hdr = lchunk->subh.idata_hdr;
+
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
+ hdr->ppid = lchunk->sinfo.sinfo_ppid;
+ else
+ hdr->fsn = htonl(cfsn++);
+
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+ sctp_mid_uo_next(stream, out, sid) :
+ sctp_mid_uo_peek(stream, out, sid);
+ } else {
+ mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+ sctp_mid_next(stream, out, sid) :
+ sctp_mid_peek(stream, out, sid);
+ }
+ hdr->mid = htonl(mid);
+ }
+}
+
+static bool sctp_validate_data(struct sctp_chunk *chunk)
+{
+ const struct sctp_stream *stream;
+ __u16 sid, ssn;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_DATA)
+ return false;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ return true;
+
+ stream = &chunk->asoc->stream;
+ sid = sctp_chunk_stream_no(chunk);
+ ssn = ntohs(chunk->subh.data_hdr->ssn);
+
+ return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
+}
+
+static bool sctp_validate_idata(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ __u32 mid;
+ __u16 sid;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
+ return false;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ return true;
+
+ stream = &chunk->asoc->stream;
+ sid = sctp_chunk_stream_no(chunk);
+ mid = ntohl(chunk->subh.idata_hdr->mid);
+
+ return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
+}
+
+static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos;
+
+ pos = skb_peek_tail(&ulpq->reasm);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ event->fsn > cevent->fsn))) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ if ((event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) ||
+ event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream < cevent->stream ||
+ (event->stream == cevent->stream &&
+ MID_lt(event->mid, cevent->mid)))
+ break;
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ event->fsn < cevent->fsn))
+ break;
+ }
+
+ __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sctp_stream_in *sin;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ int is_last = 0;
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+
+ if (cevent->stream > event->stream ||
+ cevent->mid != sin->mid)
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ goto out;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = cevent->fsn + 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn++;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ goto out;
+ default:
+ goto out;
+ }
+ }
+
+out:
+ if (!first_frag)
+ return NULL;
+
+ retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+ &ulpq->reasm, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn = next_fsn;
+ if (is_last) {
+ retval->msg_flags |= MSG_EOR;
+ sin->pd_mode = 0;
+ }
+ }
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ struct sk_buff *pos, *first_frag = NULL;
+ struct sctp_ulpevent *retval = NULL;
+ struct sk_buff *pd_first = NULL;
+ struct sk_buff *pd_last = NULL;
+ struct sctp_stream_in *sin;
+ __u32 next_fsn = 0;
+ __u32 pd_point = 0;
+ __u32 pd_len = 0;
+ __u32 mid = 0;
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, event->mid))
+ continue;
+ if (MID_lt(event->mid, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (cevent->mid == sin->mid) {
+ pd_first = pos;
+ pd_last = pos;
+ pd_len = pos->len;
+ }
+
+ first_frag = pos;
+ next_fsn = 0;
+ mid = cevent->mid;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ if (pd_first) {
+ pd_last = pos;
+ pd_len += pos->len;
+ }
+ } else {
+ first_frag = NULL;
+ }
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn)
+ goto found;
+ else
+ first_frag = NULL;
+ break;
+ }
+ }
+
+ if (!pd_first)
+ goto out;
+
+ pd_point = sctp_sk(asoc->base.sk)->pd_point;
+ if (pd_point && pd_point <= pd_len) {
+ retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+ &ulpq->reasm,
+ pd_first, pd_last);
+ if (retval) {
+ sin->fsn = next_fsn;
+ sin->pd_mode = 1;
+ }
+ }
+ goto out;
+
+found:
+ retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+ &ulpq->reasm,
+ first_frag, pos);
+ if (retval)
+ retval->msg_flags |= MSG_EOR;
+
+out:
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
+ struct sctp_stream_in *sin;
+
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
+ return event;
+ }
+
+ sctp_intl_store_reasm(ulpq, event);
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+ if (sin->pd_mode && event->mid == sin->mid &&
+ event->fsn == sin->fsn)
+ retval = sctp_intl_retrieve_partial(ulpq, event);
+
+ if (!retval)
+ retval = sctp_intl_retrieve_reassembled(ulpq, event);
+
+ return retval;
+}
+
+static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos;
+
+ pos = skb_peek_tail(&ulpq->lobby);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ if (event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ if (event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ skb_queue_walk(&ulpq->lobby, pos) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+
+ if (cevent->stream > event->stream)
+ break;
+
+ if (cevent->stream == event->stream &&
+ MID_lt(event->mid, cevent->mid))
+ break;
+ }
+
+ __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+}
+
+static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head *event_list;
+ struct sctp_stream *stream;
+ struct sk_buff *pos, *tmp;
+ __u16 sid = event->stream;
+
+ stream = &ulpq->asoc->stream;
+ event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
+
+ sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+ struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
+
+ if (cevent->stream > sid)
+ break;
+
+ if (cevent->stream < sid)
+ continue;
+
+ if (cevent->mid != sctp_mid_peek(stream, in, sid))
+ break;
+
+ sctp_mid_next(stream, in, sid);
+
+ __skb_unlink(pos, &ulpq->lobby);
+
+ __skb_queue_tail(event_list, pos);
+ }
+}
+
+static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_stream *stream;
+ __u16 sid;
+
+ stream = &ulpq->asoc->stream;
+ sid = event->stream;
+
+ if (event->mid != sctp_mid_peek(stream, in, sid)) {
+ sctp_intl_store_ordered(ulpq, event);
+ return NULL;
+ }
+
+ sctp_mid_next(stream, in, sid);
+
+ sctp_intl_retrieve_ordered(ulpq, event);
+
+ return event;
+}
+
+static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *skb = sctp_event2skb(event);
+ struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sk_buff_head *skb_list;
+
+ skb_list = (struct sk_buff_head *)skb->prev;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN &&
+ (sk->sk_shutdown & SEND_SHUTDOWN ||
+ !sctp_ulpevent_is_notification(event)))
+ goto out_free;
+
+ if (!sctp_ulpevent_is_notification(event)) {
+ sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
+ }
+
+ if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+ goto out_free;
+
+ if (skb_list)
+ skb_queue_splice_tail_init(skb_list,
+ &sk->sk_receive_queue);
+ else
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+
+ return 1;
+
+out_free:
+ if (skb_list)
+ sctp_queue_purge_ulpevents(skb_list);
+ else
+ sctp_ulpevent_free(event);
+
+ return 0;
+}
+
+static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos;
+
+ pos = skb_peek_tail(&ulpq->reasm_uo);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ event->fsn > cevent->fsn))) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ if ((event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) ||
+ event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream < cevent->stream ||
+ (event->stream == cevent->stream &&
+ MID_lt(event->mid, cevent->mid)))
+ break;
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ event->fsn < cevent->fsn))
+ break;
+ }
+
+ __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sctp_stream_in *sin;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ int is_last = 0;
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, sin->mid_uo))
+ continue;
+ if (MID_lt(sin->mid_uo, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ goto out;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn_uo) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = cevent->fsn + 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn++;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn_uo) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ goto out;
+ default:
+ goto out;
+ }
+ }
+
+out:
+ if (!first_frag)
+ return NULL;
+
+ retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+ &ulpq->reasm_uo, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ if (is_last) {
+ retval->msg_flags |= MSG_EOR;
+ sin->pd_mode_uo = 0;
+ }
+ }
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ struct sk_buff *pos, *first_frag = NULL;
+ struct sctp_ulpevent *retval = NULL;
+ struct sk_buff *pd_first = NULL;
+ struct sk_buff *pd_last = NULL;
+ struct sctp_stream_in *sin;
+ __u32 next_fsn = 0;
+ __u32 pd_point = 0;
+ __u32 pd_len = 0;
+ __u32 mid = 0;
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, event->mid))
+ continue;
+ if (MID_lt(event->mid, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!sin->pd_mode_uo) {
+ sin->mid_uo = cevent->mid;
+ pd_first = pos;
+ pd_last = pos;
+ pd_len = pos->len;
+ }
+
+ first_frag = pos;
+ next_fsn = 0;
+ mid = cevent->mid;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ if (pd_first) {
+ pd_last = pos;
+ pd_len += pos->len;
+ }
+ } else {
+ first_frag = NULL;
+ }
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn)
+ goto found;
+ else
+ first_frag = NULL;
+ break;
+ }
+ }
+
+ if (!pd_first)
+ goto out;
+
+ pd_point = sctp_sk(asoc->base.sk)->pd_point;
+ if (pd_point && pd_point <= pd_len) {
+ retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+ &ulpq->reasm_uo,
+ pd_first, pd_last);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ sin->pd_mode_uo = 1;
+ }
+ }
+ goto out;
+
+found:
+ retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+ &ulpq->reasm_uo,
+ first_frag, pos);
+ if (retval)
+ retval->msg_flags |= MSG_EOR;
+
+out:
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
+ struct sctp_stream_in *sin;
+
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
+ return event;
+ }
+
+ sctp_intl_store_reasm_uo(ulpq, event);
+
+ sin = sctp_stream_in(ulpq->asoc, event->stream);
+ if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
+ event->fsn == sin->fsn_uo)
+ retval = sctp_intl_retrieve_partial_uo(ulpq, event);
+
+ if (!retval)
+ retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
+{
+ struct sctp_stream_in *csin, *sin = NULL;
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ __u16 sid = 0;
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+ if (csin->pd_mode_uo)
+ continue;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (first_frag)
+ goto out;
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ sin = csin;
+ sid = cevent->stream;
+ sin->mid_uo = cevent->mid;
+ break;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ break;
+ if (cevent->stream == sid &&
+ cevent->mid == sin->mid_uo &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ last_frag = pos;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!first_frag)
+ return NULL;
+
+out:
+ retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+ &ulpq->reasm_uo, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ sin->pd_mode_uo = 1;
+ }
+
+ return retval;
+}
+
+static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sk_buff_head temp;
+ int event_eor = 0;
+
+ event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
+ if (!event)
+ return -ENOMEM;
+
+ event->mid = ntohl(chunk->subh.idata_hdr->mid);
+ if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
+ event->ppid = chunk->subh.idata_hdr->ppid;
+ else
+ event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
+
+ if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
+ event = sctp_intl_reasm(ulpq, event);
+ if (event && event->msg_flags & MSG_EOR) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+
+ event = sctp_intl_order(ulpq, event);
+ }
+ } else {
+ event = sctp_intl_reasm_uo(ulpq, event);
+ }
+
+ if (event) {
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
+ sctp_enqueue_event(ulpq, event);
+ }
+
+ return event_eor;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
+{
+ struct sctp_stream_in *csin, *sin = NULL;
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ __u16 sid = 0;
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+ if (csin->pd_mode)
+ continue;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (first_frag)
+ goto out;
+ if (cevent->mid == csin->mid) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ sin = csin;
+ sid = cevent->stream;
+ }
+ break;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ break;
+ if (cevent->stream == sid &&
+ cevent->mid == sin->mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ last_frag = pos;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!first_frag)
+ return NULL;
+
+out:
+ retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+ &ulpq->reasm, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn = next_fsn;
+ sin->pd_mode = 1;
+ }
+
+ return retval;
+}
+
+static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+
+ if (!skb_queue_empty(&ulpq->reasm)) {
+ do {
+ event = sctp_intl_retrieve_first(ulpq);
+ if (event)
+ sctp_enqueue_event(ulpq, event);
+ } while (event);
+ }
+
+ if (!skb_queue_empty(&ulpq->reasm_uo)) {
+ do {
+ event = sctp_intl_retrieve_first_uo(ulpq);
+ if (event)
+ sctp_enqueue_event(ulpq, event);
+ } while (event);
+ }
+}
+
+static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ __u32 freed = 0;
+ __u16 needed;
+
+ if (chunk) {
+ needed = ntohs(chunk->chunk_hdr->length);
+ needed -= sizeof(struct sctp_idata_chunk);
+ } else {
+ needed = SCTP_DEFAULT_MAXWINDOW;
+ }
+
+ if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
+ freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
+ if (freed < needed)
+ freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
+ needed);
+ if (freed < needed)
+ freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
+ needed);
+ }
+
+ if (chunk && freed >= needed)
+ if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
+ sctp_intl_start_pd(ulpq, gfp);
+
+ sk_mem_reclaim(asoc->base.sk);
+}
+
+static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
+ __u32 mid, __u16 flags, gfp_t gfp)
+{
+ struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_ulpevent *ev = NULL;
+
+ if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
+ &sctp_sk(sk)->subscribe))
+ return;
+
+ ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
+ sid, mid, flags, gfp);
+ if (ev) {
+ __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+
+ if (!sctp_sk(sk)->data_ready_signalled) {
+ sctp_sk(sk)->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+ }
+}
+
+static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
+{
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+ struct sctp_ulpevent *cevent, *event = NULL;
+ struct sk_buff_head *lobby = &ulpq->lobby;
+ struct sk_buff *pos, *tmp;
+ struct sk_buff_head temp;
+ __u16 csid;
+ __u32 cmid;
+
+ skb_queue_head_init(&temp);
+ sctp_skb_for_each(pos, lobby, tmp) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ csid = cevent->stream;
+ cmid = cevent->mid;
+
+ if (csid > sid)
+ break;
+
+ if (csid < sid)
+ continue;
+
+ if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
+ break;
+
+ __skb_unlink(pos, lobby);
+ if (!event)
+ event = sctp_skb2event(pos);
+
+ __skb_queue_tail(&temp, pos);
+ }
+
+ if (!event && pos != (struct sk_buff *)lobby) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ csid = cevent->stream;
+ cmid = cevent->mid;
+
+ if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
+ sctp_mid_next(stream, in, csid);
+ __skb_unlink(pos, lobby);
+ __skb_queue_tail(&temp, pos);
+ event = sctp_skb2event(pos);
+ }
+ }
+
+ if (event) {
+ sctp_intl_retrieve_ordered(ulpq, event);
+ sctp_enqueue_event(ulpq, event);
+ }
+}
+
+static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+ __u16 sid;
+
+ for (sid = 0; sid < stream->incnt; sid++) {
+ struct sctp_stream_in *sin = &stream->in[sid];
+ __u32 mid;
+
+ if (sin->pd_mode_uo) {
+ sin->pd_mode_uo = 0;
+
+ mid = sin->mid_uo;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
+ }
+
+ if (sin->pd_mode) {
+ sin->pd_mode = 0;
+
+ mid = sin->mid;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
+ sctp_mid_skip(stream, in, sid, mid);
+
+ sctp_intl_reap_ordered(ulpq, sid);
+ }
+ }
+
+ /* intl abort pd happens only when all data needs to be cleaned */
+ sctp_ulpq_flush(ulpq);
+}
+
+static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
+ int nskips, __be16 stream, __u8 flags)
+{
+ int i;
+
+ for (i = 0; i < nskips; i++)
+ if (skiplist[i].stream == stream &&
+ skiplist[i].flags == flags)
+ return i;
+
+ return i;
+}
+
+#define SCTP_FTSN_U_BIT 0x1
+static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+{
+ struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_chunk *ftsn_chunk = NULL;
+ struct list_head *lchunk, *temp;
+ int nskips = 0, skip_pos;
+ struct sctp_chunk *chunk;
+ __u32 tsn;
+
+ if (!asoc->peer.prsctp_capable)
+ return;
+
+ if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+ asoc->adv_peer_ack_point = ctsn;
+
+ list_for_each_safe(lchunk, temp, &q->abandoned) {
+ chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
+ tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+ if (TSN_lte(tsn, ctsn)) {
+ list_del_init(lchunk);
+ sctp_chunk_free(chunk);
+ } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
+ __be16 sid = chunk->subh.idata_hdr->stream;
+ __be32 mid = chunk->subh.idata_hdr->mid;
+ __u8 flags = 0;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ flags |= SCTP_FTSN_U_BIT;
+
+ asoc->adv_peer_ack_point = tsn;
+ skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
+ sid, flags);
+ ftsn_skip_arr[skip_pos].stream = sid;
+ ftsn_skip_arr[skip_pos].reserved = 0;
+ ftsn_skip_arr[skip_pos].flags = flags;
+ ftsn_skip_arr[skip_pos].mid = mid;
+ if (skip_pos == nskips)
+ nskips++;
+ if (nskips == 10)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (asoc->adv_peer_ack_point > ctsn)
+ ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
+ nskips, &ftsn_skip_arr[0]);
+
+ if (ftsn_chunk) {
+ list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+ SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
+ }
+}
+
+#define _sctp_walk_ifwdtsn(pos, chunk, end) \
+ for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+ (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+
+#define sctp_walk_ifwdtsn(pos, ch) \
+ _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+ sizeof(struct sctp_ifwdtsn_chunk))
+
+static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
+{
+ struct sctp_fwdtsn_skip *skip;
+ __u16 incnt;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
+ return false;
+
+ incnt = chunk->asoc->stream.incnt;
+ sctp_walk_fwdtsn(skip, chunk)
+ if (ntohs(skip->stream) >= incnt)
+ return false;
+
+ return true;
+}
+
+static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
+{
+ struct sctp_ifwdtsn_skip *skip;
+ __u16 incnt;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
+ return false;
+
+ incnt = chunk->asoc->stream.incnt;
+ sctp_walk_ifwdtsn(skip, chunk)
+ if (ntohs(skip->stream) >= incnt)
+ return false;
+
+ return true;
+}
+
+static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ /* Move the Cumulattive TSN Ack ahead. */
+ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
+ /* Abort any in progress partial delivery. */
+ sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ struct sk_buff *pos, *tmp;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ struct sctp_ulpevent *event = sctp_skb2event(pos);
+ __u32 tsn = event->tsn;
+
+ if (TSN_lte(tsn, ftsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ }
+ }
+
+ skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
+ struct sctp_ulpevent *event = sctp_skb2event(pos);
+ __u32 tsn = event->tsn;
+
+ if (TSN_lte(tsn, ftsn)) {
+ __skb_unlink(pos, &ulpq->reasm_uo);
+ sctp_ulpevent_free(event);
+ }
+ }
+}
+
+static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ /* Move the Cumulattive TSN Ack ahead. */
+ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+ /* purge the fragmentation queue */
+ sctp_intl_reasm_flushtsn(ulpq, ftsn);
+ /* abort only when it's for all data */
+ if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
+ sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+ struct sctp_fwdtsn_skip *skip;
+
+ /* Walk through all the skipped SSNs */
+ sctp_walk_fwdtsn(skip, chunk)
+ sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
+}
+
+static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
+ __u8 flags)
+{
+ struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+
+ if (flags & SCTP_FTSN_U_BIT) {
+ if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
+ sin->pd_mode_uo = 0;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
+ GFP_ATOMIC);
+ }
+ return;
+ }
+
+ if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
+ return;
+
+ if (sin->pd_mode) {
+ sin->pd_mode = 0;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
+ }
+
+ sctp_mid_skip(stream, in, sid, mid);
+
+ sctp_intl_reap_ordered(ulpq, sid);
+}
+
+static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+ struct sctp_ifwdtsn_skip *skip;
+
+ /* Walk through all the skipped MIDs and abort stream pd if possible */
+ sctp_walk_ifwdtsn(skip, chunk)
+ sctp_intl_skip(ulpq, ntohs(skip->stream),
+ ntohl(skip->mid), skip->flags);
+}
+
+static struct sctp_stream_interleave sctp_stream_interleave_0 = {
+ .data_chunk_len = sizeof(struct sctp_data_chunk),
+ .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
+ /* DATA process functions */
+ .make_datafrag = sctp_make_datafrag_empty,
+ .assign_number = sctp_chunk_assign_ssn,
+ .validate_data = sctp_validate_data,
+ .ulpevent_data = sctp_ulpq_tail_data,
+ .enqueue_event = sctp_ulpq_tail_event,
+ .renege_events = sctp_ulpq_renege,
+ .start_pd = sctp_ulpq_partial_delivery,
+ .abort_pd = sctp_ulpq_abort_pd,
+ /* FORWARD-TSN process functions */
+ .generate_ftsn = sctp_generate_fwdtsn,
+ .validate_ftsn = sctp_validate_fwdtsn,
+ .report_ftsn = sctp_report_fwdtsn,
+ .handle_ftsn = sctp_handle_fwdtsn,
+};
+
+static struct sctp_stream_interleave sctp_stream_interleave_1 = {
+ .data_chunk_len = sizeof(struct sctp_idata_chunk),
+ .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
+ /* I-DATA process functions */
+ .make_datafrag = sctp_make_idatafrag_empty,
+ .assign_number = sctp_chunk_assign_mid,
+ .validate_data = sctp_validate_idata,
+ .ulpevent_data = sctp_ulpevent_idata,
+ .enqueue_event = sctp_enqueue_event,
+ .renege_events = sctp_renege_events,
+ .start_pd = sctp_intl_start_pd,
+ .abort_pd = sctp_intl_abort_pd,
+ /* I-FORWARD-TSN process functions */
+ .generate_ftsn = sctp_generate_iftsn,
+ .validate_ftsn = sctp_validate_iftsn,
+ .report_ftsn = sctp_report_iftsn,
+ .handle_ftsn = sctp_handle_iftsn,
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
+ : &sctp_stream_interleave_0;
+}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index d8c162a4089c..f5fcd425232a 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -242,7 +242,8 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
{
- if (!list_is_last(&ch->frag_list, &ch->msg->chunks)) {
+ if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
+ !q->asoc->intl_enable) {
struct sctp_stream_out *sout;
__u16 sid;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ef7ca44d6e6a..33ca5b73cdb3 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -289,6 +289,13 @@ static struct ctl_table sctp_net_table[] = {
.proc_handler = proc_sctp_do_auth,
},
{
+ .procname = "intl_enable",
+ .data = &init_net.sctp.intl_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "addr_scope_policy",
.data = &init_net.sctp.scope_policy,
.maxlen = sizeof(int),
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1e5a22430cf5..47f82bd794d9 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst = sctp_transport_dst_check(t);
+ bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
- pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
- __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
- /* Use default minimum segment size and disable
- * pmtu discovery on this transport.
- */
- t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
- } else {
- t->pathmtu = pmtu;
+ pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
+ __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
+ /* Use default minimum segment instead */
+ pmtu = SCTP_DEFAULT_MINSEGMENT;
}
+ pmtu = SCTP_TRUNC4(pmtu);
if (dst) {
dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
dst = sctp_transport_dst_check(t);
}
- if (!dst)
+ if (!dst) {
t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+ dst = t->dst;
+ }
+
+ if (dst) {
+ /* Re-fetch, as under layers may have a higher minimum size */
+ pmtu = SCTP_TRUNC4(dst_mtu(dst));
+ change = t->pathmtu != pmtu;
+ }
+ t->pathmtu = pmtu;
+
+ return change;
}
/* Caches the dst entry and source address for a transport's destination
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 5447228bf1a0..84207ad33e8e 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
goto fail;
/* Pull off the common chunk header and DATA header. */
- skb_pull(skb, sizeof(struct sctp_data_chunk));
- len -= sizeof(struct sctp_data_chunk);
+ skb_pull(skb, sctp_datachk_len(&asoc->stream));
+ len -= sctp_datachk_len(&asoc->stream);
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
sctp_ulpevent_receive_data(event, asoc);
event->stream = ntohs(chunk->subh.data_hdr->stream);
- event->ssn = ntohs(chunk->subh.data_hdr->ssn);
- event->ppid = chunk->subh.data_hdr->ppid;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
event->flags |= SCTP_UNORDERED;
event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
@@ -732,8 +730,9 @@ fail:
* various events.
*/
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
- const struct sctp_association *asoc, __u32 indication,
- gfp_t gfp)
+ const struct sctp_association *asoc,
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_pdapi_event *pd;
@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
* Currently unused.
*/
pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
- pd->pdapi_flags = 0;
+ pd->pdapi_flags = flags;
+ pd->pdapi_stream = sid;
+ pd->pdapi_seq = seq;
/* pdapi_length: 32 bits (unsigned integer)
*
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index e36ec5dd64c6..0b427100b0d4 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
+ skb_queue_head_init(&ulpq->reasm_uo);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
sctp_ulpevent_free(event);
}
+ while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_free(event);
+ }
}
/* Dispose of a ulpqueue. */
@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (!event)
return -ENOMEM;
+ event->ssn = ntohs(chunk->subh.data_hdr->ssn);
+ event->ppid = chunk->subh.data_hdr->ppid;
+
/* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event);
@@ -328,9 +336,10 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
- struct sk_buff_head *queue, struct sk_buff *f_frag,
- struct sk_buff *l_frag)
+struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+ struct sk_buff_head *queue,
+ struct sk_buff *f_frag,
+ struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sk_buff *new = NULL;
@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_stream *stream;
/* Check if this message needs ordering. */
- if (SCTP_DATA_UNORDERED & event->msg_flags)
+ if (event->msg_flags & SCTP_DATA_UNORDERED)
return event;
/* Note: The stream ID must be verified before this routine. */
@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
sctp_ulpq_reap_ordered(ulpq, sid);
}
-static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
- struct sk_buff_head *list, __u16 needed)
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
+ __u16 needed)
{
__u16 freed = 0;
__u32 tsn, last_tsn;
@@ -1132,7 +1141,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
- gfp);
+ 0, 0, 0, gfp);
if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 6451c5013e06..3583c8ab1bae 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
goto out;
smc = smc_sk(sk);
- sock_hold(sk);
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
* sock lock for child sockets again
@@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
else
lock_sock(sk);
- if (smc->use_fallback) {
- sk->sk_state = SMC_CLOSED;
- sk->sk_state_change(sk);
- } else {
+ if (!smc->use_fallback) {
rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
sock_release(smc->clcsock);
smc->clcsock = NULL;
}
+ if (smc->use_fallback) {
+ sock_put(sk); /* passive closing */
+ sk->sk_state = SMC_CLOSED;
+ sk->sk_state_change(sk);
+ }
/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
- if (smc->use_fallback) {
- schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
- } else if (sk->sk_state == SMC_CLOSED) {
+ if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
- schedule_delayed_work(&smc->sock_put_work,
- SMC_CLOSE_SOCK_PUT_DELAY);
- }
release_sock(sk);
- sock_put(sk);
+ sk->sk_prot->unhash(sk);
+ sock_put(sk); /* final sock_put */
out:
return rc;
}
@@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_LIST_HEAD(&smc->accept_q);
spin_lock_init(&smc->accept_q_lock);
- INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk);
@@ -377,6 +373,15 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->qp_mtu;
}
+static void smc_lgr_forget(struct smc_link_group *lgr)
+{
+ spin_lock_bh(&smc_lgr_list.lock);
+ /* do not use this link group for new connections */
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
+ spin_unlock_bh(&smc_lgr_list.lock);
+}
+
/* setup for RDMA connection of client */
static int smc_connect_rdma(struct smc_sock *smc)
{
@@ -390,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
int rc = 0;
u8 ibport;
+ sock_hold(&smc->sk); /* sock put in passive closing */
+
if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
/* peer has not signalled SMC-capability */
smc->use_fallback = true;
@@ -513,6 +520,8 @@ out_connected:
return rc ? rc : local_contact;
decline_rdma_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
decline_rdma:
@@ -520,15 +529,19 @@ decline_rdma:
smc->use_fallback = true;
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
rc = smc_clc_send_decline(smc, reason_code);
- if (rc < sizeof(struct smc_clc_msg_decline))
+ if (rc < 0)
goto out_err;
}
goto out_connected;
out_err_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
out_err:
+ if (smc->sk.sk_state == SMC_INIT)
+ sock_put(&smc->sk); /* passive closing */
return rc;
}
@@ -581,40 +594,33 @@ out_err:
static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
{
- struct sock *sk = &lsmc->sk;
- struct socket *new_clcsock;
+ struct socket *new_clcsock = NULL;
+ struct sock *lsk = &lsmc->sk;
struct sock *new_sk;
int rc;
- release_sock(&lsmc->sk);
- new_sk = smc_sock_alloc(sock_net(sk), NULL);
+ release_sock(lsk);
+ new_sk = smc_sock_alloc(sock_net(lsk), NULL);
if (!new_sk) {
rc = -ENOMEM;
- lsmc->sk.sk_err = ENOMEM;
+ lsk->sk_err = ENOMEM;
*new_smc = NULL;
- lock_sock(&lsmc->sk);
+ lock_sock(lsk);
goto out;
}
*new_smc = smc_sk(new_sk);
rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
- lock_sock(&lsmc->sk);
- if (rc < 0) {
- lsmc->sk.sk_err = -rc;
- new_sk->sk_state = SMC_CLOSED;
- sock_set_flag(new_sk, SOCK_DEAD);
- sk->sk_prot->unhash(new_sk);
- sock_put(new_sk);
- *new_smc = NULL;
- goto out;
- }
- if (lsmc->sk.sk_state == SMC_CLOSED) {
+ lock_sock(lsk);
+ if (rc < 0)
+ lsk->sk_err = -rc;
+ if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
if (new_clcsock)
sock_release(new_clcsock);
new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD);
- sk->sk_prot->unhash(new_sk);
- sock_put(new_sk);
+ new_sk->sk_prot->unhash(new_sk);
+ sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
}
@@ -631,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
{
struct smc_sock *par = smc_sk(parent);
- sock_hold(sk);
+ sock_hold(sk); /* sock_put in smc_accept_unlink () */
spin_lock(&par->accept_q_lock);
list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
spin_unlock(&par->accept_q_lock);
@@ -647,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
list_del_init(&smc_sk(sk)->accept_q);
spin_unlock(&par->accept_q_lock);
sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
- sock_put(sk);
+ sock_put(sk); /* sock_hold in smc_accept_enqueue */
}
/* remove a sock from the accept queue to bind it to a new socket created
@@ -664,8 +670,12 @@ struct sock *smc_accept_dequeue(struct sock *parent,
smc_accept_unlink(new_sk);
if (new_sk->sk_state == SMC_CLOSED) {
+ if (isk->clcsock) {
+ sock_release(isk->clcsock);
+ isk->clcsock = NULL;
+ }
new_sk->sk_prot->unhash(new_sk);
- sock_put(new_sk);
+ sock_put(new_sk); /* final */
continue;
}
if (new_sock)
@@ -680,14 +690,11 @@ void smc_close_non_accepted(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
- sock_hold(sk);
lock_sock(sk);
if (!sk->sk_lingertime)
/* wait for peer closing */
sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
- if (smc->use_fallback) {
- sk->sk_state = SMC_CLOSED;
- } else {
+ if (!smc->use_fallback) {
smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
@@ -700,14 +707,15 @@ void smc_close_non_accepted(struct sock *sk)
sock_release(tcp);
}
if (smc->use_fallback) {
- schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
- } else if (sk->sk_state == SMC_CLOSED) {
- smc_conn_free(&smc->conn);
- schedule_delayed_work(&smc->sock_put_work,
- SMC_CLOSE_SOCK_PUT_DELAY);
+ sock_put(sk); /* passive closing */
+ sk->sk_state = SMC_CLOSED;
+ } else {
+ if (sk->sk_state == SMC_CLOSED)
+ smc_conn_free(&smc->conn);
}
release_sock(sk);
- sock_put(sk);
+ sk->sk_prot->unhash(sk);
+ sock_put(sk); /* final sock_put */
}
static int smc_serv_conf_first_link(struct smc_sock *smc)
@@ -751,14 +759,16 @@ static void smc_listen_work(struct work_struct *work)
{
struct smc_sock *new_smc = container_of(work, struct smc_sock,
smc_listen_work);
+ struct smc_clc_msg_proposal_prefix *pclc_prfx;
struct socket *newclcsock = new_smc->clcsock;
struct smc_sock *lsmc = new_smc->listen_smc;
struct smc_clc_msg_accept_confirm cclc;
int local_contact = SMC_REUSE_CONTACT;
struct sock *newsmcsk = &new_smc->sk;
- struct smc_clc_msg_proposal pclc;
+ struct smc_clc_msg_proposal *pclc;
struct smc_ib_device *smcibdev;
struct sockaddr_in peeraddr;
+ u8 buf[SMC_CLC_MAX_LEN];
struct smc_link *link;
int reason_code = 0;
int rc = 0, len;
@@ -775,7 +785,7 @@ static void smc_listen_work(struct work_struct *work)
/* do inband token exchange -
*wait for and receive SMC Proposal CLC message
*/
- reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc),
+ reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
SMC_CLC_PROPOSAL);
if (reason_code < 0)
goto out_err;
@@ -804,8 +814,11 @@ static void smc_listen_work(struct work_struct *work)
reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
goto decline_rdma;
}
- if ((pclc.outgoing_subnet != subnet) ||
- (pclc.prefix_len != prefix_len)) {
+
+ pclc = (struct smc_clc_msg_proposal *)&buf;
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (pclc_prfx->outgoing_subnet != subnet ||
+ pclc_prfx->prefix_len != prefix_len) {
reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
goto decline_rdma;
}
@@ -816,7 +829,7 @@ static void smc_listen_work(struct work_struct *work)
/* allocate connection / link group */
mutex_lock(&smc_create_lgr_pending);
local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
- smcibdev, ibport, &pclc.lcl, 0);
+ smcibdev, ibport, &pclc->lcl, 0);
if (local_contact < 0) {
rc = local_contact;
if (rc == -ENOMEM)
@@ -879,11 +892,9 @@ static void smc_listen_work(struct work_struct *work)
}
/* QP confirmation over RoCE fabric */
reason_code = smc_serv_conf_first_link(new_smc);
- if (reason_code < 0) {
+ if (reason_code < 0)
/* peer is not aware of a problem */
- rc = reason_code;
goto out_err_unlock;
- }
if (reason_code > 0)
goto decline_rdma_unlock;
}
@@ -910,21 +921,26 @@ enqueue:
return;
decline_rdma_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
decline_rdma:
/* RDMA setup failed, switch back to TCP */
smc_conn_free(&new_smc->conn);
new_smc->use_fallback = true;
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
- rc = smc_clc_send_decline(new_smc, reason_code);
- if (rc < sizeof(struct smc_clc_msg_decline))
+ if (smc_clc_send_decline(new_smc, reason_code) < 0)
goto out_err;
}
goto out_connected;
out_err_unlock:
+ if (local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
out_err:
+ if (newsmcsk->sk_state == SMC_INIT)
+ sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
smc_conn_free(&new_smc->conn);
goto enqueue; /* queue new sock with sk_err set */
@@ -934,11 +950,12 @@ static void smc_tcp_listen_work(struct work_struct *work)
{
struct smc_sock *lsmc = container_of(work, struct smc_sock,
tcp_listen_work);
+ struct sock *lsk = &lsmc->sk;
struct smc_sock *new_smc;
int rc = 0;
- lock_sock(&lsmc->sk);
- while (lsmc->sk.sk_state == SMC_LISTEN) {
+ lock_sock(lsk);
+ while (lsk->sk_state == SMC_LISTEN) {
rc = smc_clcsock_accept(lsmc, &new_smc);
if (rc)
goto out;
@@ -947,15 +964,25 @@ static void smc_tcp_listen_work(struct work_struct *work)
new_smc->listen_smc = lsmc;
new_smc->use_fallback = false; /* assume rdma capability first*/
- sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */
+ sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
- schedule_work(&new_smc->smc_listen_work);
+ sock_hold(&new_smc->sk); /* sock_put in passive closing */
+ if (!schedule_work(&new_smc->smc_listen_work))
+ sock_put(&new_smc->sk);
}
out:
- release_sock(&lsmc->sk);
- lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */
+ if (lsmc->clcsock) {
+ sock_release(lsmc->clcsock);
+ lsmc->clcsock = NULL;
+ }
+ release_sock(lsk);
+ /* no more listening, wake up smc_close_wait_listen_clcsock and
+ * accept
+ */
+ lsk->sk_state_change(lsk);
+ sock_put(&lsmc->sk); /* sock_hold in smc_listen */
}
static int smc_listen(struct socket *sock, int backlog)
@@ -989,7 +1016,9 @@ static int smc_listen(struct socket *sock, int backlog)
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
- schedule_work(&smc->tcp_listen_work);
+ sock_hold(sk); /* sock_hold in tcp_listen_worker */
+ if (!schedule_work(&smc->tcp_listen_work))
+ sock_put(sk);
out:
release_sock(sk);
@@ -1006,6 +1035,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
int rc = 0;
lsmc = smc_sk(sk);
+ sock_hold(sk); /* sock_put below */
lock_sock(sk);
if (lsmc->sk.sk_state != SMC_LISTEN) {
@@ -1040,6 +1070,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
out:
release_sock(sk);
+ sock_put(sk); /* sock_hold above */
return rc;
}
@@ -1107,36 +1138,36 @@ out:
return rc;
}
-static unsigned int smc_accept_poll(struct sock *parent)
+static __poll_t smc_accept_poll(struct sock *parent)
{
- struct smc_sock *isk;
- struct sock *sk;
+ struct smc_sock *isk = smc_sk(parent);
+ int mask = 0;
- lock_sock(parent);
- list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
- sk = (struct sock *)isk;
+ spin_lock(&isk->accept_q_lock);
+ if (!list_empty(&isk->accept_q))
+ mask = POLLIN | POLLRDNORM;
+ spin_unlock(&isk->accept_q_lock);
- if (sk->sk_state == SMC_ACTIVE) {
- release_sock(parent);
- return POLLIN | POLLRDNORM;
- }
- }
- release_sock(parent);
-
- return 0;
+ return mask;
}
-static unsigned int smc_poll(struct file *file, struct socket *sock,
+static __poll_t smc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct smc_sock *smc;
int rc;
+ if (!sk)
+ return POLLNVAL;
+
smc = smc_sk(sock->sk);
+ sock_hold(sk);
+ lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */
+ release_sock(sk);
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
/* if non-blocking connect finished ... */
lock_sock(sk);
@@ -1148,37 +1179,43 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
rc = smc_connect_rdma(smc);
if (rc < 0)
mask |= POLLERR;
- else
- /* success cases including fallback */
- mask |= POLLOUT | POLLWRNORM;
+ /* success cases including fallback */
+ mask |= POLLOUT | POLLWRNORM;
}
}
- release_sock(sk);
} else {
- sock_poll_wait(file, sk_sleep(sk), wait);
- if (sk->sk_state == SMC_LISTEN)
- /* woken up by sk_data_ready in smc_listen_work() */
- mask |= smc_accept_poll(sk);
+ if (sk->sk_state != SMC_CLOSED) {
+ release_sock(sk);
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ lock_sock(sk);
+ }
if (sk->sk_err)
mask |= POLLERR;
- if (atomic_read(&smc->conn.sndbuf_space) ||
- (sk->sk_shutdown & SEND_SHUTDOWN)) {
- mask |= POLLOUT | POLLWRNORM;
- } else {
- sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- }
- if (atomic_read(&smc->conn.bytes_to_rcv))
- mask |= POLLIN | POLLRDNORM;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLIN | POLLRDNORM | POLLRDHUP;
- if (sk->sk_state == SMC_APPCLOSEWAIT1)
- mask |= POLLIN;
+ if (sk->sk_state == SMC_LISTEN) {
+ /* woken up by sk_data_ready in smc_listen_work() */
+ mask = smc_accept_poll(sk);
+ } else {
+ if (atomic_read(&smc->conn.sndbuf_space) ||
+ sk->sk_shutdown & SEND_SHUTDOWN) {
+ mask |= POLLOUT | POLLWRNORM;
+ } else {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ }
+ if (atomic_read(&smc->conn.bytes_to_rcv))
+ mask |= POLLIN | POLLRDNORM;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+ if (sk->sk_state == SMC_APPCLOSEWAIT1)
+ mask |= POLLIN;
+ }
}
+ release_sock(sk);
+ sock_put(sk);
return mask;
}
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 0bee9d16cf29..9518986c97b1 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -178,7 +178,6 @@ struct smc_sock { /* smc sock container */
struct work_struct smc_listen_work;/* prepare new accept socket */
struct list_head accept_q; /* sockets to be accepted */
spinlock_t accept_q_lock; /* protects accept_q */
- struct delayed_work sock_put_work; /* final socket freeing */
bool use_fallback; /* fallback to tcp */
u8 wait_close_tx_prepared : 1;
/* shutdown wr or close
@@ -253,12 +252,12 @@ static inline int smc_uncompress_bufsize(u8 compressed)
static inline bool using_ipsec(struct smc_sock *smc)
{
return (smc->clcsock->sk->sk_policy[0] ||
- smc->clcsock->sk->sk_policy[1]) ? 1 : 0;
+ smc->clcsock->sk->sk_policy[1]) ? true : false;
}
#else
static inline bool using_ipsec(struct smc_sock *smc)
{
- return 0;
+ return false;
}
#endif
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 87f7bede6eab..3cd086e5bd28 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -57,9 +57,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
cdcpend->conn);
}
smc_tx_sndbuf_nonfull(smc);
- if (smc->sk.sk_state != SMC_ACTIVE)
- /* wake up smc_close_wait_tx_pends() */
- smc->sk.sk_state_change(&smc->sk);
bh_unlock_sock(&smc->sk);
}
@@ -68,9 +65,14 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_cdc_tx_pend **pend)
{
struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ int rc;
- return smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
- (struct smc_wr_tx_pend_priv **)pend);
+ rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+ (struct smc_wr_tx_pend_priv **)pend);
+ if (!conn->alert_token_local)
+ /* abnormal termination */
+ rc = -EPIPE;
+ return rc;
}
static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
@@ -155,14 +157,6 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
(unsigned long)conn);
}
-bool smc_cdc_tx_has_pending(struct smc_connection *conn)
-{
- struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
-
- return smc_wr_tx_has_pending(link, SMC_CDC_MSG_TYPE,
- smc_cdc_tx_filter, (unsigned long)conn);
-}
-
/********************************* receive ***********************************/
static inline bool smc_cdc_before(u16 seq1, u16 seq2)
@@ -213,6 +207,17 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
/* guarantee 0 <= bytes_to_rcv <= rmbe_size */
smp_mb__after_atomic();
smc->sk.sk_data_ready(&smc->sk);
+ } else if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
+ (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req)) {
+ smc->sk.sk_data_ready(&smc->sk);
+ }
+
+ /* piggy backed tx info */
+ /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
+ if (diff_cons && smc_tx_prepared_sends(conn)) {
+ smc_tx_sndbuf_nonempty(conn);
+ /* trigger socket release if connection closed */
+ smc_close_wake_tx_prepared(smc);
}
if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
@@ -224,25 +229,10 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
if (smc->clcsock && smc->clcsock->sk)
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(&smc->sk, SOCK_DONE);
- schedule_work(&conn->close_work);
+ sock_hold(&smc->sk); /* sock_put in close_work */
+ if (!schedule_work(&conn->close_work))
+ sock_put(&smc->sk);
}
-
- /* piggy backed tx info */
- /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
- if (diff_cons && smc_tx_prepared_sends(conn)) {
- smc_tx_sndbuf_nonempty(conn);
- /* trigger socket release if connection closed */
- smc_close_wake_tx_prepared(smc);
- }
-
- /* socket connected but not accepted */
- if (!smc->sk.sk_socket)
- return;
-
- /* data available */
- if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
- (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req))
- smc_tx_consumer_update(conn);
}
/* called under tasklet context */
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 149ceda1b088..ab240b37ad11 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -214,7 +214,6 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend);
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
-bool smc_cdc_tx_has_pending(struct smc_connection *conn);
int smc_cdc_init(void) __init;
#endif /* SMC_CDC_H */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 1800e16b2a02..8ac51583a063 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -22,6 +22,54 @@
#include "smc_clc.h"
#include "smc_ib.h"
+/* check if received message has a correct header length and contains valid
+ * heading and trailing eyecatchers
+ */
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+{
+ struct smc_clc_msg_proposal_prefix *pclc_prfx;
+ struct smc_clc_msg_accept_confirm *clc;
+ struct smc_clc_msg_proposal *pclc;
+ struct smc_clc_msg_decline *dclc;
+ struct smc_clc_msg_trail *trl;
+
+ if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ return false;
+ switch (clcm->type) {
+ case SMC_CLC_PROPOSAL:
+ pclc = (struct smc_clc_msg_proposal *)clcm;
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (ntohs(pclc->hdr.length) !=
+ sizeof(*pclc) + ntohs(pclc->iparea_offset) +
+ sizeof(*pclc_prfx) +
+ pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(struct smc_clc_ipv6_prefix) +
+ sizeof(*trl))
+ return false;
+ trl = (struct smc_clc_msg_trail *)
+ ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
+ break;
+ case SMC_CLC_ACCEPT:
+ case SMC_CLC_CONFIRM:
+ clc = (struct smc_clc_msg_accept_confirm *)clcm;
+ if (ntohs(clc->hdr.length) != sizeof(*clc))
+ return false;
+ trl = &clc->trl;
+ break;
+ case SMC_CLC_DECLINE:
+ dclc = (struct smc_clc_msg_decline *)clcm;
+ if (ntohs(dclc->hdr.length) != sizeof(*dclc))
+ return false;
+ trl = &dclc->trl;
+ break;
+ default:
+ return false;
+ }
+ if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ return false;
+ return true;
+}
+
/* Wait for data on the tcp-socket, analyze received data
* Returns:
* 0 if success and it was not a decline that we received.
@@ -35,7 +83,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
struct smc_clc_msg_hdr *clcm = buf;
struct msghdr msg = {NULL, 0};
int reason_code = 0;
- struct kvec vec;
+ struct kvec vec = {buf, buflen};
int len, datlen;
int krflags;
@@ -43,12 +91,15 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
* so we don't consume any subsequent CLC message or payload data
* in the TCP byte stream
*/
- vec.iov_base = buf;
- vec.iov_len = buflen;
+ /*
+ * Caller must make sure that buflen is no less than
+ * sizeof(struct smc_clc_msg_hdr)
+ */
krflags = MSG_PEEK | MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1,
- sizeof(struct smc_clc_msg_hdr), krflags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
+ sizeof(struct smc_clc_msg_hdr));
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (signal_pending(current)) {
reason_code = -EINTR;
clc_sk->sk_err = EINTR;
@@ -72,9 +123,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
datlen = ntohs(clcm->length);
if ((len < sizeof(struct smc_clc_msg_hdr)) ||
- (datlen < sizeof(struct smc_clc_msg_decline)) ||
- (datlen > sizeof(struct smc_clc_msg_accept_confirm)) ||
- memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) ||
+ (datlen > buflen) ||
((clcm->type != SMC_CLC_DECLINE) &&
(clcm->type != expected_type))) {
smc->sk.sk_err = EPROTO;
@@ -83,13 +132,12 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
/* receive the complete CLC message */
- vec.iov_base = buf;
- vec.iov_len = buflen;
memset(&msg, 0, sizeof(struct msghdr));
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen);
krflags = MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
- if (len < datlen) {
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
goto out;
@@ -133,7 +181,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
smc->sk.sk_err = EPROTO;
if (len < 0)
smc->sk.sk_err = -len;
- return len;
+ return sock_error(&smc->sk);
}
/* send CLC PROPOSAL message across internal TCP socket */
@@ -141,33 +189,43 @@ int smc_clc_send_proposal(struct smc_sock *smc,
struct smc_ib_device *smcibdev,
u8 ibport)
{
+ struct smc_clc_msg_proposal_prefix pclc_prfx;
struct smc_clc_msg_proposal pclc;
+ struct smc_clc_msg_trail trl;
int reason_code = 0;
+ struct kvec vec[3];
struct msghdr msg;
- struct kvec vec;
- int len, rc;
+ int len, plen, rc;
/* send SMC Proposal CLC message */
+ plen = sizeof(pclc) + sizeof(pclc_prfx) + sizeof(trl);
memset(&pclc, 0, sizeof(pclc));
memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
pclc.hdr.type = SMC_CLC_PROPOSAL;
- pclc.hdr.length = htons(sizeof(pclc));
+ pclc.hdr.length = htons(plen);
pclc.hdr.version = SMC_CLC_V1; /* SMC version */
memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
+ pclc.iparea_offset = htons(0);
+ memset(&pclc_prfx, 0, sizeof(pclc_prfx));
/* determine subnet and mask from internal TCP socket */
- rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
- &pclc.prefix_len);
+ rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc_prfx.outgoing_subnet,
+ &pclc_prfx.prefix_len);
if (rc)
return SMC_CLC_DECL_CNFERR; /* configuration error */
- memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ pclc_prfx.ipv6_prefixes_cnt = 0;
+ memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
memset(&msg, 0, sizeof(msg));
- vec.iov_base = &pclc;
- vec.iov_len = sizeof(pclc);
+ vec[0].iov_base = &pclc;
+ vec[0].iov_len = sizeof(pclc);
+ vec[1].iov_base = &pclc_prfx;
+ vec[1].iov_len = sizeof(pclc_prfx);
+ vec[2].iov_base = &trl;
+ vec[2].iov_len = sizeof(trl);
/* due to the few bytes needed for clc-handshake this cannot block */
- len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc));
+ len = kernel_sendmsg(smc->clcsock, &msg, vec, 3, plen);
if (len < sizeof(pclc)) {
if (len >= 0) {
reason_code = -ENETUNREACH;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 12a9af1539a2..c145a0f36a68 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -44,7 +44,7 @@ struct smc_clc_msg_hdr { /* header1 of clc messages */
#if defined(__BIG_ENDIAN_BITFIELD)
u8 version : 4,
flag : 1,
- rsvd : 3;
+ rsvd : 3;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u8 rsvd : 3,
flag : 1,
@@ -62,17 +62,31 @@ struct smc_clc_msg_local { /* header2 of clc messages */
u8 mac[6]; /* mac of ib_device port */
};
-struct smc_clc_msg_proposal { /* clc proposal message */
- struct smc_clc_msg_hdr hdr;
- struct smc_clc_msg_local lcl;
- __be16 iparea_offset; /* offset to IP address information area */
+struct smc_clc_ipv6_prefix {
+ u8 prefix[4];
+ u8 prefix_len;
+} __packed;
+
+struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
__be32 outgoing_subnet; /* subnet mask */
u8 prefix_len; /* number of significant bits in mask */
u8 reserved[2];
u8 ipv6_prefixes_cnt; /* number of IPv6 prefixes in prefix array */
- struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
} __aligned(4);
+struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
+ struct smc_clc_msg_hdr hdr;
+ struct smc_clc_msg_local lcl;
+ __be16 iparea_offset; /* offset to IP address information area */
+} __aligned(4);
+
+#define SMC_CLC_PROPOSAL_MAX_OFFSET 0x28
+#define SMC_CLC_PROPOSAL_MAX_PREFIX (8 * sizeof(struct smc_clc_ipv6_prefix))
+#define SMC_CLC_MAX_LEN (sizeof(struct smc_clc_msg_proposal) + \
+ SMC_CLC_PROPOSAL_MAX_OFFSET + \
+ SMC_CLC_PROPOSAL_MAX_PREFIX + \
+ sizeof(struct smc_clc_msg_trail))
+
struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
struct smc_clc_msg_hdr hdr;
struct smc_clc_msg_local lcl;
@@ -102,6 +116,14 @@ struct smc_clc_msg_decline { /* clc decline message */
struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
} __aligned(4);
+/* determine start of the prefix area within the proposal message */
+static inline struct smc_clc_msg_proposal_prefix *
+smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
+{
+ return (struct smc_clc_msg_proposal_prefix *)
+ ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
+}
+
struct smc_sock;
struct smc_ib_device;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 48615d2ac4aa..e339c0186dcf 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -19,7 +19,7 @@
#include "smc_cdc.h"
#include "smc_close.h"
-#define SMC_CLOSE_WAIT_TX_PENDS_TIME (5 * HZ)
+#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
static void smc_close_cleanup_listen(struct sock *parent)
{
@@ -30,23 +30,24 @@ static void smc_close_cleanup_listen(struct sock *parent)
smc_close_non_accepted(sk);
}
-static void smc_close_wait_tx_pends(struct smc_sock *smc)
+static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = &smc->sk;
signed long timeout;
- timeout = SMC_CLOSE_WAIT_TX_PENDS_TIME;
+ timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
add_wait_queue(sk_sleep(sk), &wait);
- while (!signal_pending(current) && timeout) {
- int rc;
-
- rc = sk_wait_event(sk, &timeout,
- !smc_cdc_tx_has_pending(&smc->conn),
- &wait);
- if (rc)
+ do {
+ release_sock(sk);
+ if (smc->clcsock)
+ timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
+ timeout);
+ sched_annotate_sleep();
+ lock_sock(sk);
+ if (!smc->clcsock)
break;
- }
+ } while (timeout);
remove_wait_queue(sk_sleep(sk), &wait);
}
@@ -111,58 +112,63 @@ static int smc_close_abort(struct smc_connection *conn)
}
/* terminate smc socket abnormally - active abort
- * RDMA communication no longer possible
+ * link group is terminated, i.e. RDMA communication no longer possible
*/
-void smc_close_active_abort(struct smc_sock *smc)
+static void smc_close_active_abort(struct smc_sock *smc)
{
+ struct sock *sk = &smc->sk;
+
struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags;
- smc->sk.sk_err = ECONNABORTED;
+ sk->sk_err = ECONNABORTED;
if (smc->clcsock && smc->clcsock->sk) {
smc->clcsock->sk->sk_err = ECONNABORTED;
smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
}
- switch (smc->sk.sk_state) {
+ switch (sk->sk_state) {
case SMC_INIT:
case SMC_ACTIVE:
- smc->sk.sk_state = SMC_PEERABORTWAIT;
+ sk->sk_state = SMC_PEERABORTWAIT;
+ release_sock(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
- txflags->peer_conn_abort = 1;
- sock_release(smc->clcsock);
if (!smc_cdc_rxed_any_close(&smc->conn))
- smc->sk.sk_state = SMC_PEERABORTWAIT;
+ sk->sk_state = SMC_PEERABORTWAIT;
else
- smc->sk.sk_state = SMC_CLOSED;
+ sk->sk_state = SMC_CLOSED;
+ release_sock(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
if (!txflags->peer_conn_closed) {
- smc->sk.sk_state = SMC_PEERABORTWAIT;
- txflags->peer_conn_abort = 1;
- sock_release(smc->clcsock);
+ /* just SHUTDOWN_SEND done */
+ sk->sk_state = SMC_PEERABORTWAIT;
} else {
- smc->sk.sk_state = SMC_CLOSED;
+ sk->sk_state = SMC_CLOSED;
}
+ sock_put(sk); /* passive closing */
break;
case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT:
- if (!txflags->peer_conn_closed) {
- txflags->peer_conn_abort = 1;
- sock_release(smc->clcsock);
- }
- smc->sk.sk_state = SMC_CLOSED;
+ sk->sk_state = SMC_CLOSED;
break;
case SMC_PEERFINCLOSEWAIT:
+ sock_put(sk); /* passive closing */
+ break;
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
break;
}
- sock_set_flag(&smc->sk, SOCK_DEAD);
- smc->sk.sk_state_change(&smc->sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_state_change(sk);
}
static inline bool smc_close_sent_any_close(struct smc_connection *conn)
@@ -185,13 +191,11 @@ int smc_close_active(struct smc_sock *smc)
0 : sock_flag(sk, SOCK_LINGER) ?
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
-again:
old_state = sk->sk_state;
- switch (old_state) {
+again:
+ switch (sk->sk_state) {
case SMC_INIT:
sk->sk_state = SMC_CLOSED;
- if (smc->smc_listen_work.func)
- cancel_work_sync(&smc->smc_listen_work);
break;
case SMC_LISTEN:
sk->sk_state = SMC_CLOSED;
@@ -200,11 +204,9 @@ again:
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
/* wake up kernel_accept of smc_tcp_listen_worker */
smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
+ smc_close_wait_listen_clcsock(smc);
}
- release_sock(sk);
smc_close_cleanup_listen(sk);
- cancel_work_sync(&smc->smc_listen_work);
- lock_sock(sk);
break;
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
@@ -214,6 +216,8 @@ again:
if (sk->sk_state == SMC_ACTIVE) {
/* send close request */
rc = smc_close_final(conn);
+ if (rc)
+ break;
sk->sk_state = SMC_PEERCLOSEWAIT1;
} else {
/* peer event has changed the state */
@@ -226,9 +230,10 @@ again:
!smc_close_sent_any_close(conn)) {
/* just shutdown wr done, send close request */
rc = smc_close_final(conn);
+ if (rc)
+ break;
}
sk->sk_state = SMC_CLOSED;
- smc_close_wait_tx_pends(smc);
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
@@ -237,19 +242,21 @@ again:
release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
- if (sk->sk_err != ECONNABORTED) {
- /* confirm close from peer */
- rc = smc_close_final(conn);
- if (rc)
- break;
- }
- if (smc_cdc_rxed_any_close(conn))
+ if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
+ sk->sk_state != SMC_APPCLOSEWAIT2)
+ goto again;
+ /* confirm close from peer */
+ rc = smc_close_final(conn);
+ if (rc)
+ break;
+ if (smc_cdc_rxed_any_close(conn)) {
/* peer has closed the socket already */
sk->sk_state = SMC_CLOSED;
- else
+ sock_put(sk); /* postponed passive closing */
+ } else {
/* peer has just issued a shutdown write */
sk->sk_state = SMC_PEERFINCLOSEWAIT;
- smc_close_wait_tx_pends(smc);
+ }
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
@@ -257,6 +264,8 @@ again:
!smc_close_sent_any_close(conn)) {
/* just shutdown wr done, send close request */
rc = smc_close_final(conn);
+ if (rc)
+ break;
}
/* peer sending PeerConnectionClosed will cause transition */
break;
@@ -264,12 +273,8 @@ again:
/* peer sending PeerConnectionClosed will cause transition */
break;
case SMC_PROCESSABORT:
- release_sock(sk);
- cancel_delayed_work_sync(&conn->tx_work);
- lock_sock(sk);
smc_close_abort(conn);
sk->sk_state = SMC_CLOSED;
- smc_close_wait_tx_pends(smc);
break;
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
@@ -278,7 +283,7 @@ again:
}
if (old_state != sk->sk_state)
- sk->sk_state_change(&smc->sk);
+ sk->sk_state_change(sk);
return rc;
}
@@ -289,37 +294,42 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
struct sock *sk = &smc->sk;
switch (sk->sk_state) {
+ case SMC_INIT:
case SMC_ACTIVE:
- case SMC_APPFINCLOSEWAIT:
case SMC_APPCLOSEWAIT1:
- case SMC_APPCLOSEWAIT2:
- smc_close_abort(&smc->conn);
+ sk->sk_state = SMC_PROCESSABORT;
+ sock_put(sk); /* passive closing */
+ break;
+ case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PROCESSABORT;
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
if (txflags->peer_done_writing &&
- !smc_close_sent_any_close(&smc->conn)) {
+ !smc_close_sent_any_close(&smc->conn))
/* just shutdown, but not yet closed locally */
- smc_close_abort(&smc->conn);
sk->sk_state = SMC_PROCESSABORT;
- } else {
+ else
sk->sk_state = SMC_CLOSED;
- }
+ sock_put(sk); /* passive closing */
break;
+ case SMC_APPCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
+ sk->sk_state = SMC_CLOSED;
+ sock_put(sk); /* passive closing */
+ break;
case SMC_PEERABORTWAIT:
sk->sk_state = SMC_CLOSED;
break;
- case SMC_INIT:
case SMC_PROCESSABORT:
/* nothing to do, add tracing in future patch */
break;
}
}
-/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
- * or peer_done_writing.
+/* Either some kind of closing has been received: peer_conn_closed,
+ * peer_conn_abort, or peer_done_writing
+ * or the link group of the connection terminates abnormally.
*/
static void smc_close_passive_work(struct work_struct *work)
{
@@ -331,7 +341,7 @@ static void smc_close_passive_work(struct work_struct *work)
struct sock *sk = &smc->sk;
int old_state;
- lock_sock(&smc->sk);
+ lock_sock(sk);
old_state = sk->sk_state;
if (!conn->alert_token_local) {
@@ -340,23 +350,32 @@ static void smc_close_passive_work(struct work_struct *work)
goto wakeup;
}
- rxflags = &smc->conn.local_rx_ctrl.conn_state_flags;
+ rxflags = &conn->local_rx_ctrl.conn_state_flags;
if (rxflags->peer_conn_abort) {
+ /* peer has not received all data */
smc_close_passive_abort_received(smc);
+ release_sock(&smc->sk);
+ cancel_delayed_work_sync(&conn->tx_work);
+ lock_sock(&smc->sk);
goto wakeup;
}
switch (sk->sk_state) {
case SMC_INIT:
- if (atomic_read(&smc->conn.bytes_to_rcv) ||
+ if (atomic_read(&conn->bytes_to_rcv) ||
(rxflags->peer_done_writing &&
- !smc_cdc_rxed_any_close(conn)))
+ !smc_cdc_rxed_any_close(conn))) {
sk->sk_state = SMC_APPCLOSEWAIT1;
- else
+ } else {
sk->sk_state = SMC_CLOSED;
+ sock_put(sk); /* passive closing */
+ }
break;
case SMC_ACTIVE:
sk->sk_state = SMC_APPCLOSEWAIT1;
+ /* postpone sock_put() for passive closing to cover
+ * received SEND_SHUTDOWN as well
+ */
break;
case SMC_PEERCLOSEWAIT1:
if (rxflags->peer_done_writing)
@@ -364,8 +383,7 @@ static void smc_close_passive_work(struct work_struct *work)
/* fall through */
/* to check for closing */
case SMC_PEERCLOSEWAIT2:
- case SMC_PEERFINCLOSEWAIT:
- if (!smc_cdc_rxed_any_close(&smc->conn))
+ if (!smc_cdc_rxed_any_close(conn))
break;
if (sock_flag(sk, SOCK_DEAD) &&
smc_close_sent_any_close(conn)) {
@@ -375,9 +393,20 @@ static void smc_close_passive_work(struct work_struct *work)
/* just shutdown, but not yet closed locally */
sk->sk_state = SMC_APPFINCLOSEWAIT;
}
+ sock_put(sk); /* passive closing */
+ break;
+ case SMC_PEERFINCLOSEWAIT:
+ if (smc_cdc_rxed_any_close(conn)) {
+ sk->sk_state = SMC_CLOSED;
+ sock_put(sk); /* passive closing */
+ }
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
+ /* postpone sock_put() for passive closing to cover
+ * received SEND_SHUTDOWN as well
+ */
+ break;
case SMC_APPFINCLOSEWAIT:
case SMC_PEERABORTWAIT:
case SMC_PROCESSABORT:
@@ -393,23 +422,11 @@ wakeup:
if (old_state != sk->sk_state) {
sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) &&
- (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
- smc_conn_free(&smc->conn);
- schedule_delayed_work(&smc->sock_put_work,
- SMC_CLOSE_SOCK_PUT_DELAY);
- }
+ (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
+ smc_conn_free(conn);
}
- release_sock(&smc->sk);
-}
-
-void smc_close_sock_put_work(struct work_struct *work)
-{
- struct smc_sock *smc = container_of(to_delayed_work(work),
- struct smc_sock,
- sock_put_work);
-
- smc->sk.sk_prot->unhash(&smc->sk);
- sock_put(&smc->sk);
+ release_sock(sk);
+ sock_put(sk); /* sock_hold done by schedulers of close_work */
}
int smc_close_shutdown_write(struct smc_sock *smc)
@@ -424,20 +441,21 @@ int smc_close_shutdown_write(struct smc_sock *smc)
0 : sock_flag(sk, SOCK_LINGER) ?
sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
-again:
old_state = sk->sk_state;
- switch (old_state) {
+again:
+ switch (sk->sk_state) {
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
+ if (sk->sk_state != SMC_ACTIVE)
+ goto again;
/* send close wr request */
rc = smc_close_wr(conn);
- if (sk->sk_state == SMC_ACTIVE)
- sk->sk_state = SMC_PEERCLOSEWAIT1;
- else
- goto again;
+ if (rc)
+ break;
+ sk->sk_state = SMC_PEERCLOSEWAIT1;
break;
case SMC_APPCLOSEWAIT1:
/* passive close */
@@ -446,8 +464,12 @@ again:
release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
+ if (sk->sk_state != SMC_APPCLOSEWAIT1)
+ goto again;
/* confirm close from peer */
rc = smc_close_wr(conn);
+ if (rc)
+ break;
sk->sk_state = SMC_APPCLOSEWAIT2;
break;
case SMC_APPCLOSEWAIT2:
@@ -462,7 +484,7 @@ again:
}
if (old_state != sk->sk_state)
- sk->sk_state_change(&smc->sk);
+ sk->sk_state_change(sk);
return rc;
}
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index ed82506b1b0a..19eb6a211c23 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -20,9 +20,7 @@
#define SMC_CLOSE_SOCK_PUT_DELAY HZ
void smc_close_wake_tx_prepared(struct smc_sock *smc);
-void smc_close_active_abort(struct smc_sock *smc);
int smc_close_active(struct smc_sock *smc);
-void smc_close_sock_put_work(struct work_struct *work);
int smc_close_shutdown_write(struct smc_sock *smc);
void smc_close_init(struct smc_sock *smc);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 94f21116dac5..2424c7100aaf 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -128,6 +128,8 @@ static void smc_lgr_free_work(struct work_struct *work)
bool conns;
spin_lock_bh(&smc_lgr_list.lock);
+ if (list_empty(&lgr->list))
+ goto free;
read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock);
@@ -136,6 +138,7 @@ static void smc_lgr_free_work(struct work_struct *work)
return;
}
list_del_init(&lgr->list); /* remove from smc_lgr_list */
+free:
spin_unlock_bh(&smc_lgr_list.lock);
smc_lgr_free(lgr);
}
@@ -231,9 +234,7 @@ static void smc_buf_unuse(struct smc_connection *conn)
/* remove a finished connection from its link group */
void smc_conn_free(struct smc_connection *conn)
{
- struct smc_link_group *lgr = conn->lgr;
-
- if (!lgr)
+ if (!conn->lgr)
return;
smc_cdc_tx_dismiss_slots(conn);
smc_lgr_unregister_conn(conn);
@@ -327,13 +328,17 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
while (node) {
conn = rb_entry(node, struct smc_connection, alert_node);
smc = container_of(conn, struct smc_sock, conn);
- sock_hold(&smc->sk);
+ sock_hold(&smc->sk); /* sock_put in close work */
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
__smc_lgr_unregister_conn(conn);
- schedule_work(&conn->close_work);
- sock_put(&smc->sk);
+ write_unlock_bh(&lgr->conns_lock);
+ if (!schedule_work(&conn->close_work))
+ sock_put(&smc->sk);
+ write_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
}
write_unlock_bh(&lgr->conns_lock);
+ wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
}
/* Determine vlan of internal TCP socket.
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index d2d01cf70224..427b91c1c964 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -86,7 +86,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
goto errout;
- if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) {
+ if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
+ smc->conn.alert_token_local) {
struct smc_connection *conn = &smc->conn;
struct smc_diag_conninfo cinfo = {
.token = conn->alert_token_local,
@@ -124,7 +125,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
goto errout;
}
- if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr) {
+ if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr &&
+ !list_empty(&smc->conn.lgr->list)) {
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
.lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 90f1a7f9085c..2a8957bd6d38 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -141,6 +141,17 @@ out:
return rc;
}
+static void smc_ib_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
+{
+ struct smc_link_group *lgr, *l;
+
+ list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
+ if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
+ lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
+ smc_lgr_terminate(lgr);
+ }
+}
+
/* process context wrapper for might_sleep smc_ib_remember_port_attr */
static void smc_ib_port_event_work(struct work_struct *work)
{
@@ -151,6 +162,8 @@ static void smc_ib_port_event_work(struct work_struct *work)
for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
smc_ib_remember_port_attr(smcibdev, port_idx + 1);
clear_bit(port_idx, &smcibdev->port_event_mask);
+ if (!smc_ib_port_active(smcibdev, port_idx + 1))
+ smc_ib_port_terminate(smcibdev, port_idx + 1);
}
}
@@ -165,15 +178,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
switch (ibevent->event) {
case IB_EVENT_PORT_ERR:
- port_idx = ibevent->element.port_num - 1;
- set_bit(port_idx, &smcibdev->port_event_mask);
- schedule_work(&smcibdev->port_event_work);
- /* fall through */
case IB_EVENT_DEVICE_FATAL:
- /* tbd in follow-on patch:
- * abnormal close of corresponding connections
- */
- break;
case IB_EVENT_PORT_ACTIVE:
port_idx = ibevent->element.port_num - 1;
set_bit(port_idx, &smcibdev->port_event_mask);
@@ -186,7 +191,8 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
{
- ib_dealloc_pd(lnk->roce_pd);
+ if (lnk->roce_pd)
+ ib_dealloc_pd(lnk->roce_pd);
lnk->roce_pd = NULL;
}
@@ -203,14 +209,18 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
{
+ struct smc_ib_device *smcibdev =
+ (struct smc_ib_device *)ibevent->device;
+ u8 port_idx;
+
switch (ibevent->event) {
case IB_EVENT_DEVICE_FATAL:
case IB_EVENT_GID_CHANGE:
case IB_EVENT_PORT_ERR:
case IB_EVENT_QP_ACCESS_ERR:
- /* tbd in follow-on patch:
- * abnormal close of corresponding connections
- */
+ port_idx = ibevent->element.port_num - 1;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
break;
default:
break;
@@ -219,7 +229,8 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
void smc_ib_destroy_queue_pair(struct smc_link *lnk)
{
- ib_destroy_qp(lnk->roce_qp);
+ if (lnk->roce_qp)
+ ib_destroy_qp(lnk->roce_qp);
lnk->roce_qp = NULL;
}
@@ -462,6 +473,7 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
{
if (!smcibdev->initialized)
return;
+ smcibdev->initialized = 0;
smc_wr_remove_dev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
ib_destroy_cq(smcibdev->roce_cq_recv);
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index cbf58637ee14..9dc392ca06bf 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -65,7 +65,6 @@ static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
rc = sk_wait_event(sk, timeo,
sk->sk_err ||
sk->sk_shutdown & RCV_SHUTDOWN ||
- sock_flag(sk, SOCK_DONE) ||
atomic_read(&conn->bytes_to_rcv) ||
smc_cdc_rxed_any_close_or_senddone(conn),
&wait);
@@ -116,7 +115,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
if (read_done) {
if (sk->sk_err ||
sk->sk_state == SMC_CLOSED ||
- (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ sk->sk_shutdown & RCV_SHUTDOWN ||
!timeo ||
signal_pending(current) ||
smc_cdc_rxed_any_close_or_senddone(conn) ||
@@ -124,8 +123,6 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
peer_conn_abort)
break;
} else {
- if (sock_flag(sk, SOCK_DONE))
- break;
if (sk->sk_err) {
read_done = sock_error(sk);
break;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index c48dc2d5fd3a..838bce20c361 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -86,7 +86,7 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
rc = -EPIPE;
break;
}
- if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
+ if (smc_cdc_rxed_any_close(conn)) {
rc = -ECONNRESET;
break;
}
@@ -104,14 +104,12 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
if (atomic_read(&conn->sndbuf_space))
break; /* at least 1 byte of free space available */
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- sk->sk_write_pending++;
sk_wait_event(sk, &timeo,
sk->sk_err ||
(sk->sk_shutdown & SEND_SHUTDOWN) ||
- smc_cdc_rxed_any_close_or_senddone(conn) ||
+ smc_cdc_rxed_any_close(conn) ||
atomic_read(&conn->sndbuf_space),
&wait);
- sk->sk_write_pending--;
}
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
@@ -250,8 +248,10 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
peer_rmbe_offset;
rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
- if (rc)
+ if (rc) {
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ smc_lgr_terminate(lgr);
+ }
return rc;
}
@@ -408,8 +408,9 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
goto out_unlock;
}
rc = 0;
- schedule_delayed_work(&conn->tx_work,
- SMC_TX_WORK_DELAY);
+ if (conn->alert_token_local) /* connection healthy */
+ schedule_delayed_work(&conn->tx_work,
+ SMC_TX_WORK_DELAY);
}
goto out_unlock;
}
@@ -440,19 +441,24 @@ static void smc_tx_work(struct work_struct *work)
int rc;
lock_sock(&smc->sk);
+ if (smc->sk.sk_err ||
+ !conn->alert_token_local ||
+ conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
+ goto out;
+
rc = smc_tx_sndbuf_nonempty(conn);
if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
!atomic_read(&conn->bytes_to_rcv))
conn->local_rx_ctrl.prod_flags.write_blocked = 0;
+
+out:
release_sock(&smc->sk);
}
void smc_tx_consumer_update(struct smc_connection *conn)
{
union smc_host_cursor cfed, cons;
- struct smc_cdc_tx_pend *pend;
- struct smc_wr_buf *wr_buf;
- int to_confirm, rc;
+ int to_confirm;
smc_curs_write(&cons,
smc_curs_read(&conn->local_tx_ctrl.cons, conn),
@@ -466,10 +472,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
((to_confirm > conn->rmbe_update_limit) &&
((to_confirm > (conn->rmbe_size / 2)) ||
conn->local_rx_ctrl.prod_flags.write_blocked))) {
- rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
- if (!rc)
- rc = smc_cdc_msg_send(conn, wr_buf, pend);
- if (rc < 0) {
+ if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
+ conn->alert_token_local) { /* connection healthy */
schedule_delayed_work(&conn->tx_work,
SMC_TX_WORK_DELAY);
return;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index de4537f66832..1b8af23e6e2b 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -122,6 +122,7 @@ static void smc_wr_tx_tasklet_fn(unsigned long data)
again:
polled++;
do {
+ memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
if (polled == 1) {
ib_req_notify_cq(dev->roce_cq_send,
@@ -173,9 +174,9 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
struct smc_wr_tx_pend_priv **wr_pend_priv)
{
struct smc_wr_tx_pend *wr_pend;
+ u32 idx = link->wr_tx_cnt;
struct ib_send_wr *wr_ib;
u64 wr_id;
- u32 idx;
int rc;
*wr_buf = NULL;
@@ -185,21 +186,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
if (rc)
return rc;
} else {
- rc = wait_event_interruptible_timeout(
+ struct smc_link_group *lgr;
+
+ lgr = container_of(link, struct smc_link_group,
+ lnk[SMC_SINGLE_LINK]);
+ rc = wait_event_timeout(
link->wr_tx_wait,
+ list_empty(&lgr->list) || /* lgr terminated */
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) {
/* timeout - terminate connections */
- struct smc_link_group *lgr;
-
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
smc_lgr_terminate(lgr);
return -EPIPE;
}
- if (rc == -ERESTARTSYS)
- return -EINTR;
if (idx == link->wr_tx_cnt)
return -EPIPE;
}
@@ -249,8 +249,14 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
pend = container_of(priv, struct smc_wr_tx_pend, priv);
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
&failed_wr);
- if (rc)
+ if (rc) {
+ struct smc_link_group *lgr =
+ container_of(link, struct smc_link_group,
+ lnk[SMC_SINGLE_LINK]);
+
smc_wr_tx_put_slot(link, priv);
+ smc_lgr_terminate(lgr);
+ }
return rc;
}
@@ -300,18 +306,18 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
return rc;
}
-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
+void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
unsigned long data)
{
struct smc_wr_tx_pend_priv *tx_pend;
- struct smc_wr_rx_hdr *wr_rx;
+ struct smc_wr_rx_hdr *wr_tx;
int i;
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
- if (wr_rx->type != wr_rx_hdr_type)
+ wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
+ if (wr_tx->type != wr_tx_hdr_type)
continue;
tx_pend = &link->wr_tx_pends[i].priv;
if (filter(tx_pend, data))
@@ -319,24 +325,6 @@ void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
}
}
-bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
- smc_wr_tx_filter filter, unsigned long data)
-{
- struct smc_wr_tx_pend_priv *tx_pend;
- struct smc_wr_rx_hdr *wr_rx;
- int i;
-
- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
- if (wr_rx->type != wr_rx_hdr_type)
- continue;
- tx_pend = &link->wr_tx_pends[i].priv;
- if (filter(tx_pend, data))
- return true;
- }
- return false;
-}
-
/****************************** receive queue ********************************/
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 2acf12b06063..ef0c3494c9cb 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -93,8 +93,6 @@ int smc_wr_tx_put_slot(struct smc_link *link,
int smc_wr_tx_send(struct smc_link *link,
struct smc_wr_tx_pend_priv *wr_pend_priv);
void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
-bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
- smc_wr_tx_filter filter, unsigned long data);
void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..a93c99b518ca 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -118,7 +118,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
-static unsigned int sock_poll(struct file *file,
+static __poll_t sock_poll(struct file *file,
struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -163,12 +163,6 @@ static DEFINE_SPINLOCK(net_family_lock);
static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
/*
- * Statistics counters of the socket lists
- */
-
-static DEFINE_PER_CPU(int, sockets_in_use);
-
-/*
* Support routines.
* Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
@@ -436,8 +430,10 @@ static int sock_map_fd(struct socket *sock, int flags)
{
struct file *newfile;
int fd = get_unused_fd_flags(flags);
- if (unlikely(fd < 0))
+ if (unlikely(fd < 0)) {
+ sock_release(sock);
return fd;
+ }
newfile = sock_alloc_file(sock, flags, NULL);
if (likely(!IS_ERR(newfile))) {
@@ -578,7 +574,6 @@ struct socket *sock_alloc(void)
inode->i_gid = current_fsgid();
inode->i_op = &sockfs_inode_ops;
- this_cpu_add(sockets_in_use, 1);
return sock;
}
EXPORT_SYMBOL(sock_alloc);
@@ -605,7 +600,6 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
pr_err("%s: fasync list not empty!\n", __func__);
- this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
@@ -967,9 +961,28 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
* If this ioctl is unknown try to hand it down
* to the NIC driver.
*/
- if (err == -ENOIOCTLCMD)
- err = dev_ioctl(net, cmd, argp);
+ if (err != -ENOIOCTLCMD)
+ return err;
+ if (cmd == SIOCGIFCONF) {
+ struct ifconf ifc;
+ if (copy_from_user(&ifc, argp, sizeof(struct ifconf)))
+ return -EFAULT;
+ rtnl_lock();
+ err = dev_ifconf(net, &ifc, sizeof(struct ifreq));
+ rtnl_unlock();
+ if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
+ err = -EFAULT;
+ } else {
+ struct ifreq ifr;
+ bool need_copyout;
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+ return -EFAULT;
+ err = dev_ioctl(net, cmd, &ifr, &need_copyout);
+ if (!err && need_copyout)
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+ return -EFAULT;
+ }
return err;
}
@@ -994,12 +1007,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
sock = file->private_data;
sk = sock->sk;
net = sock_net(sk);
- if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
- err = dev_ioctl(net, cmd, argp);
+ if (unlikely(cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))) {
+ struct ifreq ifr;
+ bool need_copyout;
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+ return -EFAULT;
+ err = dev_ioctl(net, cmd, &ifr, &need_copyout);
+ if (!err && need_copyout)
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+ return -EFAULT;
} else
#ifdef CONFIG_WEXT_CORE
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
- err = dev_ioctl(net, cmd, argp);
+ err = wext_handle_ioctl(net, cmd, argp);
} else
#endif
switch (cmd) {
@@ -1095,9 +1115,9 @@ out_release:
EXPORT_SYMBOL(sock_create_lite);
/* No kernel lock held - perfect */
-static unsigned int sock_poll(struct file *file, poll_table *wait)
+static __poll_t sock_poll(struct file *file, poll_table *wait)
{
- unsigned int busy_flag = 0;
+ __poll_t busy_flag = 0;
struct socket *sock;
/*
@@ -2622,17 +2642,8 @@ core_initcall(sock_init); /* early initcall */
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
- int cpu;
- int counter = 0;
-
- for_each_possible_cpu(cpu)
- counter += per_cpu(sockets_in_use, cpu);
-
- /* It can be negative, by the way. 8) */
- if (counter < 0)
- counter = 0;
-
- seq_printf(seq, "sockets: used %d\n", counter);
+ seq_printf(seq, "sockets: used %d\n",
+ sock_inuse_get(seq->private));
}
#endif /* CONFIG_PROC_FS */
@@ -2669,89 +2680,25 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
return err;
}
-static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32)
-{
- struct ifreq __user *uifr;
- int err;
-
- uifr = compat_alloc_user_space(sizeof(struct ifreq));
- if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
- return -EFAULT;
-
- err = dev_ioctl(net, SIOCGIFNAME, uifr);
- if (err)
- return err;
-
- if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq)))
- return -EFAULT;
-
- return 0;
-}
-
-static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
+static int compat_dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
{
struct compat_ifconf ifc32;
struct ifconf ifc;
- struct ifconf __user *uifc;
- struct compat_ifreq __user *ifr32;
- struct ifreq __user *ifr;
- unsigned int i, j;
int err;
if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
- memset(&ifc, 0, sizeof(ifc));
- if (ifc32.ifcbuf == 0) {
- ifc32.ifc_len = 0;
- ifc.ifc_len = 0;
- ifc.ifc_req = NULL;
- uifc = compat_alloc_user_space(sizeof(struct ifconf));
- } else {
- size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
- sizeof(struct ifreq);
- uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
- ifc.ifc_len = len;
- ifr = ifc.ifc_req = (void __user *)(uifc + 1);
- ifr32 = compat_ptr(ifc32.ifcbuf);
- for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
- if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
- return -EFAULT;
- ifr++;
- ifr32++;
- }
- }
- if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
- return -EFAULT;
+ ifc.ifc_len = ifc32.ifc_len;
+ ifc.ifc_req = compat_ptr(ifc32.ifcbuf);
- err = dev_ioctl(net, SIOCGIFCONF, uifc);
+ rtnl_lock();
+ err = dev_ifconf(net, &ifc, sizeof(struct compat_ifreq));
+ rtnl_unlock();
if (err)
return err;
- if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
- return -EFAULT;
-
- ifr = ifc.ifc_req;
- ifr32 = compat_ptr(ifc32.ifcbuf);
- for (i = 0, j = 0;
- i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
- i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
- if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
- return -EFAULT;
- ifr32++;
- ifr++;
- }
-
- if (ifc32.ifcbuf == 0) {
- /* Translate from 64-bit structure multiple to
- * a 32-bit one.
- */
- i = ifc.ifc_len;
- i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq));
- ifc32.ifc_len = i;
- } else {
- ifc32.ifc_len = i;
- }
+ ifc32.ifc_len = ifc.ifc_len;
if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
@@ -2762,9 +2709,9 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
{
struct compat_ethtool_rxnfc __user *compat_rxnfc;
bool convert_in = false, convert_out = false;
- size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
- struct ethtool_rxnfc __user *rxnfc;
- struct ifreq __user *ifr;
+ size_t buf_size = 0;
+ struct ethtool_rxnfc __user *rxnfc = NULL;
+ struct ifreq ifr;
u32 rule_cnt = 0, actual_rule_cnt;
u32 ethcmd;
u32 data;
@@ -2801,18 +2748,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
case ETHTOOL_SRXCLSRLDEL:
buf_size += sizeof(struct ethtool_rxnfc);
convert_in = true;
+ rxnfc = compat_alloc_user_space(buf_size);
break;
}
- ifr = compat_alloc_user_space(buf_size);
- rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
-
- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+ if (copy_from_user(&ifr.ifr_name, &ifr32->ifr_name, IFNAMSIZ))
return -EFAULT;
- if (put_user(convert_in ? rxnfc : compat_ptr(data),
- &ifr->ifr_ifru.ifru_data))
- return -EFAULT;
+ ifr.ifr_data = convert_in ? rxnfc : (void __user *)compat_rxnfc;
if (convert_in) {
/* We expect there to be holes between fs.m_ext and
@@ -2840,7 +2783,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
return -EFAULT;
}
- ret = dev_ioctl(net, SIOCETHTOOL, ifr);
+ ret = dev_ioctl(net, SIOCETHTOOL, &ifr, NULL);
if (ret)
return ret;
@@ -2881,113 +2824,43 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
{
- void __user *uptr;
compat_uptr_t uptr32;
- struct ifreq __user *uifr;
+ struct ifreq ifr;
+ void __user *saved;
+ int err;
- uifr = compat_alloc_user_space(sizeof(*uifr));
- if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
+ if (copy_from_user(&ifr, uifr32, sizeof(struct compat_ifreq)))
return -EFAULT;
if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu))
return -EFAULT;
- uptr = compat_ptr(uptr32);
-
- if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc))
- return -EFAULT;
-
- return dev_ioctl(net, SIOCWANDEV, uifr);
-}
-
-static int bond_ioctl(struct net *net, unsigned int cmd,
- struct compat_ifreq __user *ifr32)
-{
- struct ifreq kifr;
- mm_segment_t old_fs;
- int err;
+ saved = ifr.ifr_settings.ifs_ifsu.raw_hdlc;
+ ifr.ifr_settings.ifs_ifsu.raw_hdlc = compat_ptr(uptr32);
- switch (cmd) {
- case SIOCBONDENSLAVE:
- case SIOCBONDRELEASE:
- case SIOCBONDSETHWADDR:
- case SIOCBONDCHANGEACTIVE:
- if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq)))
- return -EFAULT;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = dev_ioctl(net, cmd,
- (struct ifreq __user __force *) &kifr);
- set_fs(old_fs);
-
- return err;
- default:
- return -ENOIOCTLCMD;
+ err = dev_ioctl(net, SIOCWANDEV, &ifr, NULL);
+ if (!err) {
+ ifr.ifr_settings.ifs_ifsu.raw_hdlc = saved;
+ if (copy_to_user(uifr32, &ifr, sizeof(struct compat_ifreq)))
+ err = -EFAULT;
}
+ return err;
}
/* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */
static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
struct compat_ifreq __user *u_ifreq32)
{
- struct ifreq __user *u_ifreq64;
- char tmp_buf[IFNAMSIZ];
- void __user *data64;
+ struct ifreq ifreq;
u32 data32;
- if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
- IFNAMSIZ))
- return -EFAULT;
- if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
- return -EFAULT;
- data64 = compat_ptr(data32);
-
- u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
-
- if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
- IFNAMSIZ))
+ if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
return -EFAULT;
- if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
+ if (get_user(data32, &u_ifreq32->ifr_data))
return -EFAULT;
+ ifreq.ifr_data = compat_ptr(data32);
- return dev_ioctl(net, cmd, u_ifreq64);
-}
-
-static int dev_ifsioc(struct net *net, struct socket *sock,
- unsigned int cmd, struct compat_ifreq __user *uifr32)
-{
- struct ifreq __user *uifr;
- int err;
-
- uifr = compat_alloc_user_space(sizeof(*uifr));
- if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
- return -EFAULT;
-
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
-
- if (!err) {
- switch (cmd) {
- case SIOCGIFFLAGS:
- case SIOCGIFMETRIC:
- case SIOCGIFMTU:
- case SIOCGIFMEM:
- case SIOCGIFHWADDR:
- case SIOCGIFINDEX:
- case SIOCGIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCGIFDSTADDR:
- case SIOCGIFNETMASK:
- case SIOCGIFPFLAGS:
- case SIOCGIFTXQLEN:
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
- err = -EFAULT;
- break;
- }
- }
- return err;
+ return dev_ioctl(net, cmd, &ifreq, NULL);
}
static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
@@ -2995,7 +2868,6 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
{
struct ifreq ifr;
struct compat_ifmap __user *uifmap32;
- mm_segment_t old_fs;
int err;
uifmap32 = &uifr32->ifr_ifru.ifru_map;
@@ -3009,10 +2881,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
if (err)
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
- set_fs(old_fs);
+ err = dev_ioctl(net, cmd, &ifr, NULL);
if (cmd == SIOCGIFMAP && !err) {
err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
@@ -3145,10 +3014,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCSIFBR:
case SIOCGIFBR:
return old_bridge_ioctl(argp);
- case SIOCGIFNAME:
- return dev_ifname32(net, argp);
case SIOCGIFCONF:
- return dev_ifconf(net, argp);
+ return compat_dev_ifconf(net, argp);
case SIOCETHTOOL:
return ethtool_ioctl(net, argp);
case SIOCWANDEV:
@@ -3156,11 +3023,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCGIFMAP:
case SIOCSIFMAP:
return compat_sioc_ifmap(net, cmd, argp);
- case SIOCBONDENSLAVE:
- case SIOCBONDRELEASE:
- case SIOCBONDSETHWADDR:
- case SIOCBONDCHANGEACTIVE:
- return bond_ioctl(net, cmd, argp);
case SIOCADDRT:
case SIOCDELRT:
return routing_ioctl(net, sock, cmd, argp);
@@ -3220,12 +3082,15 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return dev_ifsioc(net, sock, cmd, argp);
-
case SIOCSARP:
case SIOCGARP:
case SIOCDARP:
case SIOCATMARK:
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ case SIOCGIFNAME:
return sock_do_ioctl(net, sock, cmd, arg);
}
@@ -3380,19 +3245,6 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
}
EXPORT_SYMBOL(kernel_sendpage_locked);
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
-{
- mm_segment_t oldfs = get_fs();
- int err;
-
- set_fs(KERNEL_DS);
- err = sock->ops->ioctl(sock, cmd, arg);
- set_fs(oldfs);
-
- return err;
-}
-EXPORT_SYMBOL(kernel_sock_ioctl);
-
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return sock->ops->shutdown(sock, how);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e68943895be4..aa36dad32db1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -930,10 +930,10 @@ out:
static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
-static unsigned int cache_poll(struct file *filp, poll_table *wait,
+static __poll_t cache_poll(struct file *filp, poll_table *wait,
struct cache_detail *cd)
{
- unsigned int mask;
+ __poll_t mask;
struct cache_reader *rp = filp->private_data;
struct cache_queue *cq;
@@ -1501,7 +1501,7 @@ static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
return cache_write(filp, buf, count, ppos, cd);
}
-static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
+static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
{
struct cache_detail *cd = PDE_DATA(file_inode(filp));
@@ -1720,7 +1720,7 @@ static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
return cache_write(filp, buf, count, ppos, cd);
}
-static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
+static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
{
struct cache_detail *cd = RPC_I(file_inode(filp))->private;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index e2a4184f3c5d..6e432ecd7f99 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1376,22 +1376,6 @@ rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize
EXPORT_SYMBOL_GPL(rpc_setbufsize);
/**
- * rpc_protocol - Get transport protocol number for an RPC client
- * @clnt: RPC client to query
- *
- */
-int rpc_protocol(struct rpc_clnt *clnt)
-{
- int protocol;
-
- rcu_read_lock();
- protocol = rcu_dereference(clnt->cl_xprt)->prot;
- rcu_read_unlock();
- return protocol;
-}
-EXPORT_SYMBOL_GPL(rpc_protocol);
-
-/**
* rpc_net_ns - Get the network namespace for this RPC client
* @clnt: RPC client to query
*
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7803f3b6aa53..5c4330325787 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -340,12 +340,12 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
return res;
}
-static unsigned int
+static __poll_t
rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
{
struct inode *inode = file_inode(filp);
struct rpc_inode *rpci = RPC_I(inode);
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &rpci->waitq, wait);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b1b49edd7c4d..896691afbb1a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -755,22 +755,20 @@ static void __rpc_execute(struct rpc_task *task)
void (*do_action)(struct rpc_task *);
/*
- * Execute any pending callback first.
+ * Perform the next FSM step or a pending callback.
+ *
+ * tk_action may be NULL if the task has been killed.
+ * In particular, note that rpc_killall_tasks may
+ * do this at any time, so beware when dereferencing.
*/
- do_action = task->tk_callback;
- task->tk_callback = NULL;
- if (do_action == NULL) {
- /*
- * Perform the next FSM step.
- * tk_action may be NULL if the task has been killed.
- * In particular, note that rpc_killall_tasks may
- * do this at any time, so beware when dereferencing.
- */
- do_action = task->tk_action;
- if (do_action == NULL)
- break;
+ do_action = task->tk_action;
+ if (task->tk_callback) {
+ do_action = task->tk_callback;
+ task->tk_callback = NULL;
}
- trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
+ if (!do_action)
+ break;
+ trace_rpc_task_run_action(task->tk_client, task, do_action);
do_action(task);
/*
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ff8e06cd067e..5570719e4787 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -338,8 +338,8 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
rqstp->rq_xprt_hlen = 0;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
- msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen);
+ len = sock_recvmsg(svsk->sk_sock, &msg, msg.msg_flags);
/* If we read a full record, then assume there may be more
* data to read (stream based sockets only!)
*/
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 33b74fd84051..2436fd1125fc 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -940,8 +940,8 @@ static void xprt_timer(struct rpc_task *task)
if (task->tk_status != -ETIMEDOUT)
return;
- dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
+ trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
if (!req->rq_reply_bytes_recvd) {
if (xprt->ops->timer)
xprt->ops->timer(xprt, task);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 8b818bb3518a..ed1a4a3065ee 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -43,7 +43,6 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
req = rpcrdma_create_req(r_xprt);
if (IS_ERR(req))
return PTR_ERR(req);
- __set_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags);
rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
DMA_TO_DEVICE, GFP_KERNEL);
@@ -74,21 +73,13 @@ out_fail:
static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
unsigned int count)
{
- struct rpcrdma_rep *rep;
int rc = 0;
while (count--) {
- rep = rpcrdma_create_rep(r_xprt);
- if (IS_ERR(rep)) {
- pr_err("RPC: %s: reply buffer alloc failed\n",
- __func__);
- rc = PTR_ERR(rep);
+ rc = rpcrdma_create_rep(r_xprt);
+ if (rc)
break;
- }
-
- rpcrdma_recv_buffer_put(rep);
}
-
return rc;
}
@@ -129,6 +120,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
rqst->rq_xprt = &r_xprt->rx_xprt;
INIT_LIST_HEAD(&rqst->rq_list);
INIT_LIST_HEAD(&rqst->rq_bc_list);
+ __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
goto out_free;
@@ -148,7 +140,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
buffer->rb_bc_srv_max_requests = reqs;
request_module("svcrdma");
-
+ trace_xprtrdma_cb_setup(r_xprt, reqs);
return 0;
out_free:
@@ -196,13 +188,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
return maxmsg - RPCRDMA_HDRLEN_MIN;
}
-/**
- * rpcrdma_bc_marshal_reply - Send backwards direction reply
- * @rqst: buffer containing RPC reply data
- *
- * Returns zero on success.
- */
-int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
+static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
@@ -226,7 +212,46 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch))
return -EIO;
+
+ trace_xprtrdma_cb_reply(rqst);
+ return 0;
+}
+
+/**
+ * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
+ * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
+ *
+ * Caller holds the transport's write lock.
+ *
+ * Returns:
+ * %0 if the RPC message has been sent
+ * %-ENOTCONN if the caller should reconnect and call again
+ * %-EIO if a permanent error occurred and the request was not
+ * sent. Do not try to send this message again.
+ */
+int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
+{
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+ struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
+ int rc;
+
+ if (!xprt_connected(rqst->rq_xprt))
+ goto drop_connection;
+
+ rc = rpcrdma_bc_marshal_reply(rqst);
+ if (rc < 0)
+ goto failed_marshal;
+
+ if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
+ goto drop_connection;
return 0;
+
+failed_marshal:
+ if (rc != -ENOTCONN)
+ return rc;
+drop_connection:
+ xprt_disconnect_done(rqst->rq_xprt);
+ return -ENOTCONN;
}
/**
@@ -262,11 +287,6 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
dprintk("RPC: %s: freeing rqst %p (req %p)\n",
__func__, rqst, rpcr_to_rdmar(rqst));
- smp_mb__before_atomic();
- WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
- clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
- smp_mb__after_atomic();
-
spin_lock_bh(&xprt->bc_pa_lock);
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
spin_unlock_bh(&xprt->bc_pa_lock);
@@ -274,7 +294,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
/**
* rpcrdma_bc_receive_call - Handle a backward direction call
- * @xprt: transport receiving the call
+ * @r_xprt: transport receiving the call
* @rep: receive buffer containing the call
*
* Operational assumptions:
@@ -313,7 +333,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst, rq_bc_pa_list);
list_del(&rqst->rq_bc_pa_list);
spin_unlock(&xprt->bc_pa_lock);
- dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
/* Prepare rqst */
rqst->rq_reply_bytes_recvd = 0;
@@ -321,7 +340,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
rqst->rq_xid = *p;
rqst->rq_private_buf.len = size;
- set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
buf = &rqst->rq_rcv_buf;
memset(buf, 0, sizeof(*buf));
@@ -335,12 +353,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
* the Upper Layer is done decoding it.
*/
req = rpcr_to_rdmar(rqst);
- dprintk("RPC: %s: attaching rep %p to req %p\n",
- __func__, rep, req);
req->rl_reply = rep;
-
- /* Defeat the retransmit detection logic in send_request */
- req->rl_connect_cookie = 0;
+ trace_xprtrdma_cb_call(rqst);
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 29fc84c7ff98..d5f95bb39300 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/
@@ -47,7 +47,7 @@ fmr_is_supported(struct rpcrdma_ia *ia)
}
static int
-fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
+fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
static struct ib_fmr_attr fmr_attr = {
.max_pages = RPCRDMA_MAX_FMR_SGES,
@@ -55,106 +55,108 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
.page_shift = PAGE_SHIFT
};
- mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
sizeof(u64), GFP_KERNEL);
- if (!mw->fmr.fm_physaddrs)
+ if (!mr->fmr.fm_physaddrs)
goto out_free;
- mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(*mw->mw_sg), GFP_KERNEL);
- if (!mw->mw_sg)
+ mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(*mr->mr_sg), GFP_KERNEL);
+ if (!mr->mr_sg)
goto out_free;
- sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
+ sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
- mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
+ mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
&fmr_attr);
- if (IS_ERR(mw->fmr.fm_mr))
+ if (IS_ERR(mr->fmr.fm_mr))
goto out_fmr_err;
return 0;
out_fmr_err:
dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
- PTR_ERR(mw->fmr.fm_mr));
+ PTR_ERR(mr->fmr.fm_mr));
out_free:
- kfree(mw->mw_sg);
- kfree(mw->fmr.fm_physaddrs);
+ kfree(mr->mr_sg);
+ kfree(mr->fmr.fm_physaddrs);
return -ENOMEM;
}
static int
-__fmr_unmap(struct rpcrdma_mw *mw)
+__fmr_unmap(struct rpcrdma_mr *mr)
{
LIST_HEAD(l);
int rc;
- list_add(&mw->fmr.fm_mr->list, &l);
+ list_add(&mr->fmr.fm_mr->list, &l);
rc = ib_unmap_fmr(&l);
- list_del(&mw->fmr.fm_mr->list);
+ list_del(&mr->fmr.fm_mr->list);
return rc;
}
static void
-fmr_op_release_mr(struct rpcrdma_mw *r)
+fmr_op_release_mr(struct rpcrdma_mr *mr)
{
LIST_HEAD(unmap_list);
int rc;
/* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
+ if (!list_empty(&mr->mr_list))
+ list_del(&mr->mr_list);
- kfree(r->fmr.fm_physaddrs);
- kfree(r->mw_sg);
+ kfree(mr->fmr.fm_physaddrs);
+ kfree(mr->mr_sg);
/* In case this one was left mapped, try to unmap it
* to prevent dealloc_fmr from failing with EBUSY
*/
- rc = __fmr_unmap(r);
+ rc = __fmr_unmap(mr);
if (rc)
pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
- r, rc);
+ mr, rc);
- rc = ib_dealloc_fmr(r->fmr.fm_mr);
+ rc = ib_dealloc_fmr(mr->fmr.fm_mr);
if (rc)
pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
- r, rc);
+ mr, rc);
- kfree(r);
+ kfree(mr);
}
/* Reset of a single FMR.
*/
static void
-fmr_op_recover_mr(struct rpcrdma_mw *mw)
+fmr_op_recover_mr(struct rpcrdma_mr *mr)
{
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
int rc;
/* ORDER: invalidate first */
- rc = __fmr_unmap(mw);
-
- /* ORDER: then DMA unmap */
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ rc = __fmr_unmap(mr);
if (rc)
goto out_release;
- rpcrdma_put_mw(r_xprt, mw);
+ /* ORDER: then DMA unmap */
+ rpcrdma_mr_unmap_and_put(mr);
+
r_xprt->rx_stats.mrs_recovered++;
return;
out_release:
- pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
+ pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++;
- spin_lock(&r_xprt->rx_buf.rb_mwlock);
- list_del(&mw->mw_all);
- spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+ trace_xprtrdma_dma_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+
+ spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ list_del(&mr->mr_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- fmr_op_release_mr(mw);
+ fmr_op_release_mr(mr);
}
static int
@@ -180,15 +182,15 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
*/
static struct rpcrdma_mr_seg *
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mw **out)
+ int nsegs, bool writing, struct rpcrdma_mr **out)
{
struct rpcrdma_mr_seg *seg1 = seg;
int len, pageoff, i, rc;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
u64 *dma_pages;
- mw = rpcrdma_get_mw(r_xprt);
- if (!mw)
+ mr = rpcrdma_mr_get(r_xprt);
+ if (!mr)
return ERR_PTR(-ENOBUFS);
pageoff = offset_in_page(seg1->mr_offset);
@@ -199,12 +201,12 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
nsegs = RPCRDMA_MAX_FMR_SGES;
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&mw->mw_sg[i],
+ sg_set_page(&mr->mr_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+ sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
seg->mr_len);
len += seg->mr_len;
++seg;
@@ -214,40 +216,38 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_dir = rpcrdma_data_dir(writing);
+ mr->mr_dir = rpcrdma_data_dir(writing);
- mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, i, mw->mw_dir);
- if (!mw->mw_nents)
+ mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, i, mr->mr_dir);
+ if (!mr->mr_nents)
goto out_dmamap_err;
- for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
- dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
- rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
+ for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
+ dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
+ rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
dma_pages[0]);
if (rc)
goto out_maperr;
- mw->mw_handle = mw->fmr.fm_mr->rkey;
- mw->mw_length = len;
- mw->mw_offset = dma_pages[0] + pageoff;
+ mr->mr_handle = mr->fmr.fm_mr->rkey;
+ mr->mr_length = len;
+ mr->mr_offset = dma_pages[0] + pageoff;
- *out = mw;
+ *out = mr;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mw->mw_sg, i);
- rpcrdma_put_mw(r_xprt, mw);
+ mr->mr_sg, i);
+ rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_maperr:
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
len, (unsigned long long)dma_pages[0],
- pageoff, mw->mw_nents, rc);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ pageoff, mr->mr_nents, rc);
+ rpcrdma_mr_unmap_and_put(mr);
return ERR_PTR(-EIO);
}
@@ -256,13 +256,13 @@ out_maperr:
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that @mws is not empty before the call. This
+ * Caller ensures that @mrs is not empty before the call. This
* function empties the list.
*/
static void
-fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
+fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
LIST_HEAD(unmap_list);
int rc;
@@ -271,10 +271,11 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* ib_unmap_fmr() is slow, so use a single call instead
* of one call per mapped FMR.
*/
- list_for_each_entry(mw, mws, mw_list) {
+ list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n",
- __func__, &mw->fmr);
- list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
+ __func__, &mr->fmr);
+ trace_xprtrdma_localinv(mr);
+ list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
}
r_xprt->rx_stats.local_inv_needed++;
rc = ib_unmap_fmr(&unmap_list);
@@ -284,14 +285,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
/* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list.
*/
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- dprintk("RPC: %s: DMA unmapping fmr %p\n",
- __func__, &mw->fmr);
- list_del(&mw->fmr.fm_mr->list);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ list_del(&mr->fmr.fm_mr->list);
+ rpcrdma_mr_unmap_and_put(mr);
}
return;
@@ -299,10 +296,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
out_reset:
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- list_del(&mw->fmr.fm_mr->list);
- fmr_op_recover_mr(mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ list_del(&mr->fmr.fm_mr->list);
+ fmr_op_recover_mr(mr);
}
}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 773e66e10a15..90f688f19783 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/
/* Lightweight memory registration using Fast Registration Work
- * Requests (FRWR). Also referred to sometimes as FRMR mode.
+ * Requests (FRWR).
*
* FRWR features ordered asynchronous registration and deregistration
* of arbitrarily sized memory regions. This is the fastest and safest
@@ -15,9 +15,9 @@
/* Normal operation
*
* A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
- * Work Request (frmr_op_map). When the RDMA operation is finished, this
+ * Work Request (frwr_op_map). When the RDMA operation is finished, this
* Memory Region is invalidated using a LOCAL_INV Work Request
- * (frmr_op_unmap).
+ * (frwr_op_unmap_sync).
*
* Typically these Work Requests are not signaled, and neither are RDMA
* SEND Work Requests (with the exception of signaling occasionally to
@@ -26,7 +26,7 @@
*
* As an optimization, frwr_op_unmap marks MRs INVALID before the
* LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
- * rb_mws immediately so that no work (like managing a linked list
+ * rb_mrs immediately so that no work (like managing a linked list
* under a spinlock) is needed in the completion upcall.
*
* But this means that frwr_op_map() can occasionally encounter an MR
@@ -60,7 +60,7 @@
* When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
* with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
- * recovered MRs are placed back on the rb_mws list when recovery is
+ * recovered MRs are placed back on the rb_mrs list when recovery is
* complete. frwr_op_map allocates another MR for the current RPC while
* the broken MR is reset.
*
@@ -96,26 +96,26 @@ out_not_supported:
}
static int
-frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
- unsigned int depth = ia->ri_max_frmr_depth;
- struct rpcrdma_frmr *f = &r->frmr;
+ unsigned int depth = ia->ri_max_frwr_depth;
+ struct rpcrdma_frwr *frwr = &mr->frwr;
int rc;
- f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
- if (IS_ERR(f->fr_mr))
+ frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
+ if (IS_ERR(frwr->fr_mr))
goto out_mr_err;
- r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
- if (!r->mw_sg)
+ mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
+ if (!mr->mr_sg)
goto out_list_err;
- sg_init_table(r->mw_sg, depth);
- init_completion(&f->fr_linv_done);
+ sg_init_table(mr->mr_sg, depth);
+ init_completion(&frwr->fr_linv_done);
return 0;
out_mr_err:
- rc = PTR_ERR(f->fr_mr);
+ rc = PTR_ERR(frwr->fr_mr);
dprintk("RPC: %s: ib_alloc_mr status %i\n",
__func__, rc);
return rc;
@@ -124,83 +124,85 @@ out_list_err:
rc = -ENOMEM;
dprintk("RPC: %s: sg allocation failure\n",
__func__);
- ib_dereg_mr(f->fr_mr);
+ ib_dereg_mr(frwr->fr_mr);
return rc;
}
static void
-frwr_op_release_mr(struct rpcrdma_mw *r)
+frwr_op_release_mr(struct rpcrdma_mr *mr)
{
int rc;
- /* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
+ /* Ensure MR is not on any rl_registered list */
+ if (!list_empty(&mr->mr_list))
+ list_del(&mr->mr_list);
- rc = ib_dereg_mr(r->frmr.fr_mr);
+ rc = ib_dereg_mr(mr->frwr.fr_mr);
if (rc)
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
- r, rc);
- kfree(r->mw_sg);
- kfree(r);
+ mr, rc);
+ kfree(mr->mr_sg);
+ kfree(mr);
}
static int
-__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
- struct rpcrdma_frmr *f = &r->frmr;
+ struct rpcrdma_frwr *frwr = &mr->frwr;
int rc;
- rc = ib_dereg_mr(f->fr_mr);
+ rc = ib_dereg_mr(frwr->fr_mr);
if (rc) {
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
- rc, r);
+ rc, mr);
return rc;
}
- f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
- ia->ri_max_frmr_depth);
- if (IS_ERR(f->fr_mr)) {
+ frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
+ ia->ri_max_frwr_depth);
+ if (IS_ERR(frwr->fr_mr)) {
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
- PTR_ERR(f->fr_mr), r);
- return PTR_ERR(f->fr_mr);
+ PTR_ERR(frwr->fr_mr), mr);
+ return PTR_ERR(frwr->fr_mr);
}
- dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
- f->fr_state = FRMR_IS_INVALID;
+ dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
+ frwr->fr_state = FRWR_IS_INVALID;
return 0;
}
-/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
+/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
*/
static void
-frwr_op_recover_mr(struct rpcrdma_mw *mw)
+frwr_op_recover_mr(struct rpcrdma_mr *mr)
{
- enum rpcrdma_frmr_state state = mw->frmr.fr_state;
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ enum rpcrdma_frwr_state state = mr->frwr.fr_state;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc;
- rc = __frwr_reset_mr(ia, mw);
- if (state != FRMR_FLUSHED_LI)
+ rc = __frwr_mr_reset(ia, mr);
+ if (state != FRWR_FLUSHED_LI) {
+ trace_xprtrdma_dma_unmap(mr);
ib_dma_unmap_sg(ia->ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ }
if (rc)
goto out_release;
- rpcrdma_put_mw(r_xprt, mw);
+ rpcrdma_mr_put(mr);
r_xprt->rx_stats.mrs_recovered++;
return;
out_release:
- pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
+ pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++;
- spin_lock(&r_xprt->rx_buf.rb_mwlock);
- list_del(&mw->mw_all);
- spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+ spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ list_del(&mr->mr_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- frwr_op_release_mr(mw);
+ frwr_op_release_mr(mr);
}
static int
@@ -214,31 +216,31 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
- ia->ri_max_frmr_depth =
+ ia->ri_max_frwr_depth =
min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
attrs->max_fast_reg_page_list_len);
dprintk("RPC: %s: device's max FR page list len = %u\n",
- __func__, ia->ri_max_frmr_depth);
-
- /* Add room for frmr register and invalidate WRs.
- * 1. FRMR reg WR for head
- * 2. FRMR invalidate WR for head
- * 3. N FRMR reg WRs for pagelist
- * 4. N FRMR invalidate WRs for pagelist
- * 5. FRMR reg WR for tail
- * 6. FRMR invalidate WR for tail
+ __func__, ia->ri_max_frwr_depth);
+
+ /* Add room for frwr register and invalidate WRs.
+ * 1. FRWR reg WR for head
+ * 2. FRWR invalidate WR for head
+ * 3. N FRWR reg WRs for pagelist
+ * 4. N FRWR invalidate WRs for pagelist
+ * 5. FRWR reg WR for tail
+ * 6. FRWR invalidate WR for tail
* 7. The RDMA_SEND WR
*/
depth = 7;
- /* Calculate N if the device max FRMR depth is smaller than
+ /* Calculate N if the device max FRWR depth is smaller than
* RPCRDMA_MAX_DATA_SEGS.
*/
- if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
- delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
+ if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
+ delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
do {
- depth += 2; /* FRMR reg + invalidate */
- delta -= ia->ri_max_frmr_depth;
+ depth += 2; /* FRWR reg + invalidate */
+ delta -= ia->ri_max_frwr_depth;
} while (delta > 0);
}
@@ -252,7 +254,7 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
}
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
- ia->ri_max_frmr_depth);
+ ia->ri_max_frwr_depth);
return 0;
}
@@ -265,7 +267,7 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
+ RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
}
static void
@@ -286,16 +288,16 @@ __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
static void
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr =
+ container_of(cqe, struct rpcrdma_frwr, fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS) {
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- frmr->fr_state = FRMR_FLUSHED_FR;
+ frwr->fr_state = FRWR_FLUSHED_FR;
__frwr_sendcompletion_flush(wc, "fastreg");
}
+ trace_xprtrdma_wc_fastreg(wc, frwr);
}
/**
@@ -307,16 +309,16 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
static void
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS) {
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- frmr->fr_state = FRMR_FLUSHED_LI;
+ frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
+ trace_xprtrdma_wc_li(wc, frwr);
}
/**
@@ -329,17 +331,17 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
if (wc->status != IB_WC_SUCCESS) {
- frmr->fr_state = FRMR_FLUSHED_LI;
+ frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
- complete(&frmr->fr_linv_done);
+ complete(&frwr->fr_linv_done);
+ trace_xprtrdma_wc_li_wake(wc, frwr);
}
/* Post a REG_MR Work Request to register a memory region
@@ -347,41 +349,39 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
*/
static struct rpcrdma_mr_seg *
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mw **out)
+ int nsegs, bool writing, struct rpcrdma_mr **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
- struct rpcrdma_mw *mw;
- struct rpcrdma_frmr *frmr;
- struct ib_mr *mr;
+ struct rpcrdma_frwr *frwr;
+ struct rpcrdma_mr *mr;
+ struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
struct ib_send_wr *bad_wr;
int rc, i, n;
u8 key;
- mw = NULL;
+ mr = NULL;
do {
- if (mw)
- rpcrdma_defer_mr_recovery(mw);
- mw = rpcrdma_get_mw(r_xprt);
- if (!mw)
+ if (mr)
+ rpcrdma_mr_defer_recovery(mr);
+ mr = rpcrdma_mr_get(r_xprt);
+ if (!mr)
return ERR_PTR(-ENOBUFS);
- } while (mw->frmr.fr_state != FRMR_IS_INVALID);
- frmr = &mw->frmr;
- frmr->fr_state = FRMR_IS_VALID;
- mr = frmr->fr_mr;
- reg_wr = &frmr->fr_regwr;
-
- if (nsegs > ia->ri_max_frmr_depth)
- nsegs = ia->ri_max_frmr_depth;
+ } while (mr->frwr.fr_state != FRWR_IS_INVALID);
+ frwr = &mr->frwr;
+ frwr->fr_state = FRWR_IS_VALID;
+
+ if (nsegs > ia->ri_max_frwr_depth)
+ nsegs = ia->ri_max_frwr_depth;
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&mw->mw_sg[i],
+ sg_set_page(&mr->mr_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+ sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
seg->mr_len);
++seg;
@@ -392,30 +392,29 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_dir = rpcrdma_data_dir(writing);
+ mr->mr_dir = rpcrdma_data_dir(writing);
- mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
- if (!mw->mw_nents)
+ mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
+ if (!mr->mr_nents)
goto out_dmamap_err;
- n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
- if (unlikely(n != mw->mw_nents))
+ ibmr = frwr->fr_mr;
+ n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
+ if (unlikely(n != mr->mr_nents))
goto out_mapmr_err;
- dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n",
- __func__, frmr, mw->mw_nents, mr->length);
-
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
+ key = (u8)(ibmr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(ibmr, ++key);
+ reg_wr = &frwr->fr_regwr;
reg_wr->wr.next = NULL;
reg_wr->wr.opcode = IB_WR_REG_MR;
- frmr->fr_cqe.done = frwr_wc_fastreg;
- reg_wr->wr.wr_cqe = &frmr->fr_cqe;
+ frwr->fr_cqe.done = frwr_wc_fastreg;
+ reg_wr->wr.wr_cqe = &frwr->fr_cqe;
reg_wr->wr.num_sge = 0;
reg_wr->wr.send_flags = 0;
- reg_wr->mr = mr;
- reg_wr->key = mr->rkey;
+ reg_wr->mr = ibmr;
+ reg_wr->key = ibmr->rkey;
reg_wr->access = writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
@@ -424,47 +423,64 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (rc)
goto out_senderr;
- mw->mw_handle = mr->rkey;
- mw->mw_length = mr->length;
- mw->mw_offset = mr->iova;
+ mr->mr_handle = ibmr->rkey;
+ mr->mr_length = ibmr->length;
+ mr->mr_offset = ibmr->iova;
- *out = mw;
+ *out = mr;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mw->mw_sg, i);
- frmr->fr_state = FRMR_IS_INVALID;
- rpcrdma_put_mw(r_xprt, mw);
+ mr->mr_sg, i);
+ frwr->fr_state = FRWR_IS_INVALID;
+ rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_mapmr_err:
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
- frmr->fr_mr, n, mw->mw_nents);
- rpcrdma_defer_mr_recovery(mw);
+ frwr->fr_mr, n, mr->mr_nents);
+ rpcrdma_mr_defer_recovery(mr);
return ERR_PTR(-EIO);
out_senderr:
- pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
- rpcrdma_defer_mr_recovery(mw);
+ pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc);
+ rpcrdma_mr_defer_recovery(mr);
return ERR_PTR(-ENOTCONN);
}
+/* Handle a remotely invalidated mr on the @mrs list
+ */
+static void
+frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
+{
+ struct rpcrdma_mr *mr;
+
+ list_for_each_entry(mr, mrs, mr_list)
+ if (mr->mr_handle == rep->rr_inv_rkey) {
+ list_del(&mr->mr_list);
+ trace_xprtrdma_remoteinv(mr);
+ mr->frwr.fr_state = FRWR_IS_INVALID;
+ rpcrdma_mr_unmap_and_put(mr);
+ break; /* only one invalidated MR per RPC */
+ }
+}
+
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that @mws is not empty before the call. This
+ * Caller ensures that @mrs is not empty before the call. This
* function empties the list.
*/
static void
-frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
+frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{
struct ib_send_wr *first, **prev, *last, *bad_wr;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_frmr *f;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_frwr *frwr;
+ struct rpcrdma_mr *mr;
int count, rc;
/* ORDER: Invalidate all of the MRs first
@@ -472,31 +488,27 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* Chain the LOCAL_INV Work Requests and post them with
* a single ib_post_send() call.
*/
- f = NULL;
+ frwr = NULL;
count = 0;
prev = &first;
- list_for_each_entry(mw, mws, mw_list) {
- mw->frmr.fr_state = FRMR_IS_INVALID;
+ list_for_each_entry(mr, mrs, mr_list) {
+ mr->frwr.fr_state = FRWR_IS_INVALID;
- if (mw->mw_flags & RPCRDMA_MW_F_RI)
- continue;
+ frwr = &mr->frwr;
+ trace_xprtrdma_localinv(mr);
- f = &mw->frmr;
- dprintk("RPC: %s: invalidating frmr %p\n",
- __func__, f);
-
- f->fr_cqe.done = frwr_wc_localinv;
- last = &f->fr_invwr;
+ frwr->fr_cqe.done = frwr_wc_localinv;
+ last = &frwr->fr_invwr;
memset(last, 0, sizeof(*last));
- last->wr_cqe = &f->fr_cqe;
+ last->wr_cqe = &frwr->fr_cqe;
last->opcode = IB_WR_LOCAL_INV;
- last->ex.invalidate_rkey = mw->mw_handle;
+ last->ex.invalidate_rkey = mr->mr_handle;
count++;
*prev = last;
prev = &last->next;
}
- if (!f)
+ if (!frwr)
goto unmap;
/* Strong send queue ordering guarantees that when the
@@ -504,8 +516,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* are complete.
*/
last->send_flags = IB_SEND_SIGNALED;
- f->fr_cqe.done = frwr_wc_localinv_wake;
- reinit_completion(&f->fr_linv_done);
+ frwr->fr_cqe.done = frwr_wc_localinv_wake;
+ reinit_completion(&frwr->fr_linv_done);
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
@@ -515,36 +527,32 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
bad_wr = NULL;
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
if (bad_wr != first)
- wait_for_completion(&f->fr_linv_done);
+ wait_for_completion(&frwr->fr_linv_done);
if (rc)
goto reset_mrs;
/* ORDER: Now DMA unmap all of the MRs, and return
- * them to the free MW list.
+ * them to the free MR list.
*/
unmap:
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- dprintk("RPC: %s: DMA unmapping frmr %p\n",
- __func__, &mw->frmr);
- ib_dma_unmap_sg(ia->ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ rpcrdma_mr_unmap_and_put(mr);
}
return;
reset_mrs:
- pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
+ pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
/* Find and reset the MRs in the LOCAL_INV WRs that did not
* get posted.
*/
while (bad_wr) {
- f = container_of(bad_wr, struct rpcrdma_frmr,
- fr_invwr);
- mw = container_of(f, struct rpcrdma_mw, frmr);
+ frwr = container_of(bad_wr, struct rpcrdma_frwr,
+ fr_invwr);
+ mr = container_of(frwr, struct rpcrdma_mr, frwr);
- __frwr_reset_mr(ia, mw);
+ __frwr_mr_reset(ia, mr);
bad_wr = bad_wr->next;
}
@@ -553,6 +561,7 @@ reset_mrs:
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
+ .ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,
diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
index 560712bd9fa2..a762d192372b 100644
--- a/net/sunrpc/xprtrdma/module.c
+++ b/net/sunrpc/xprtrdma/module.c
@@ -1,18 +1,20 @@
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
*/
/* rpcrdma.ko module initialization
*/
+#include <linux/types.h>
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sunrpc/svc_rdma.h>
-#include "xprt_rdma.h"
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_TRANS
-#endif
+#include <asm/swab.h>
+
+#define CREATE_TRACE_POINTS
+#include "xprt_rdma.h"
MODULE_AUTHOR("Open Grid Computing and Network Appliance, Inc.");
MODULE_DESCRIPTION("RPC/RDMA Transport");
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index a3f2ab283aeb..162e5dd82466 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -292,15 +292,15 @@ encode_item_not_present(struct xdr_stream *xdr)
}
static void
-xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
+xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
{
- *iptr++ = cpu_to_be32(mw->mw_handle);
- *iptr++ = cpu_to_be32(mw->mw_length);
- xdr_encode_hyper(iptr, mw->mw_offset);
+ *iptr++ = cpu_to_be32(mr->mr_handle);
+ *iptr++ = cpu_to_be32(mr->mr_length);
+ xdr_encode_hyper(iptr, mr->mr_offset);
}
static int
-encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
+encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
{
__be32 *p;
@@ -308,12 +308,12 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
if (unlikely(!p))
return -EMSGSIZE;
- xdr_encode_rdma_segment(p, mw);
+ xdr_encode_rdma_segment(p, mr);
return 0;
}
static int
-encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
+encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
u32 position)
{
__be32 *p;
@@ -324,7 +324,7 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
*p++ = xdr_one; /* Item present */
*p++ = cpu_to_be32(position);
- xdr_encode_rdma_segment(p, mw);
+ xdr_encode_rdma_segment(p, mr);
return 0;
}
@@ -348,7 +348,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
unsigned int pos;
int nsegs;
@@ -363,21 +363,17 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- false, &mw);
+ false, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_read_segment(xdr, mw, pos) < 0)
+ if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__, pos,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
r_xprt->rx_stats.read_chunk_count++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
return 0;
@@ -404,7 +400,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int nsegs, nchunks;
__be32 *segcount;
@@ -425,23 +421,19 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mw);
+ true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_rdma_segment(xdr, mw) < 0)
+ if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.write_chunk_count++;
- r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
/* Update count of segments in this Write chunk */
@@ -468,7 +460,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int nsegs, nchunks;
__be32 *segcount;
@@ -487,23 +479,19 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mw);
+ true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_rdma_segment(xdr, mw) < 0)
+ if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.reply_chunk_count++;
- r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
/* Update count of segments in the Reply chunk */
@@ -524,9 +512,6 @@ rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
struct ib_sge *sge;
unsigned int count;
- dprintk("RPC: %s: unmapping %u sges for sc=%p\n",
- __func__, sc->sc_unmap_count, sc);
-
/* The first two SGEs contain the transport header and
* the inline buffer. These are always left mapped so
* they can be cheaply re-used.
@@ -754,11 +739,6 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
__be32 *p;
int ret;
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
- return rpcrdma_bc_marshal_reply(rqst);
-#endif
-
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(xdr, &req->rl_hdrbuf,
req->rl_rdmabuf->rg_base);
@@ -821,6 +801,17 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
rtype = rpcrdma_areadch;
}
+ /* If this is a retransmit, discard previously registered
+ * chunks. Very likely the connection has been replaced,
+ * so these registrations are invalid and unusable.
+ */
+ while (unlikely(!list_empty(&req->rl_registered))) {
+ struct rpcrdma_mr *mr;
+
+ mr = rpcrdma_mr_pop(&req->rl_registered);
+ rpcrdma_mr_defer_recovery(mr);
+ }
+
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
*
@@ -868,10 +859,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
if (ret)
goto out_err;
- dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
- rqst->rq_task->tk_pid, __func__,
- transfertypes[rtype], transfertypes[wtype],
- xdr_stream_pos(xdr));
+ trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
&rqst->rq_snd_buf, rtype);
@@ -926,8 +914,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
curlen = rqst->rq_rcv_buf.head[0].iov_len;
if (curlen > copy_len)
curlen = copy_len;
- dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
- __func__, srcp, copy_len, curlen);
+ trace_xprtrdma_fixup(rqst, copy_len, curlen);
srcp += curlen;
copy_len -= curlen;
@@ -947,9 +934,8 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
if (curlen > pagelist_len)
curlen = pagelist_len;
- dprintk("RPC: %s: page %d"
- " srcp 0x%p len %d curlen %d\n",
- __func__, i, srcp, copy_len, curlen);
+ trace_xprtrdma_fixup_pg(rqst, i, srcp,
+ copy_len, curlen);
destp = kmap_atomic(ppages[i]);
memcpy(destp + page_base, srcp, curlen);
flush_dcache_page(ppages[i]);
@@ -984,24 +970,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
return fixup_copy_count;
}
-/* Caller must guarantee @rep remains stable during this call.
- */
-static void
-rpcrdma_mark_remote_invalidation(struct list_head *mws,
- struct rpcrdma_rep *rep)
-{
- struct rpcrdma_mw *mw;
-
- if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
- return;
-
- list_for_each_entry(mw, mws, mw_list)
- if (mw->mw_handle == rep->rr_inv_rkey) {
- mw->mw_flags = RPCRDMA_MW_F_RI;
- break; /* only one invalidated MR per RPC */
- }
-}
-
/* By convention, backchannel calls arrive via rdma_msg type
* messages, and never populate the chunk lists. This makes
* the RPC/RDMA header small and fixed in size, so it is
@@ -1058,26 +1026,19 @@ out_short:
static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
{
+ u32 handle;
+ u64 offset;
__be32 *p;
p = xdr_inline_decode(xdr, 4 * sizeof(*p));
if (unlikely(!p))
return -EIO;
- ifdebug(FACILITY) {
- u64 offset;
- u32 handle;
-
- handle = be32_to_cpup(p++);
- *length = be32_to_cpup(p++);
- xdr_decode_hyper(p, &offset);
- dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
- __func__, *length, (unsigned long long)offset,
- handle);
- } else {
- *length = be32_to_cpup(p + 1);
- }
+ handle = be32_to_cpup(p++);
+ *length = be32_to_cpup(p++);
+ xdr_decode_hyper(p, &offset);
+ trace_xprtrdma_decode_seg(handle, *length, offset);
return 0;
}
@@ -1098,8 +1059,6 @@ static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
*length += seglength;
}
- dprintk("RPC: %s: segcount=%u, %u bytes\n",
- __func__, be32_to_cpup(p), *length);
return 0;
}
@@ -1296,8 +1255,7 @@ out:
* being marshaled.
*/
out_badheader:
- dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
- rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
+ trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
status = -EIO;
goto out;
@@ -1339,9 +1297,12 @@ void rpcrdma_deferred_completion(struct work_struct *work)
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
+ struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
- rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
- rpcrdma_release_rqst(rep->rr_rxprt, req);
+ trace_xprtrdma_defer_cmp(rep);
+ if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
+ r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
+ rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep);
}
@@ -1360,8 +1321,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
u32 credits;
__be32 *p;
- dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
-
if (rep->rr_hdrbuf.head[0].iov_len == 0)
goto out_badstatus;
@@ -1405,8 +1364,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
rep->rr_rqst = rqst;
clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
- dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
- __func__, rep, req, be32_to_cpu(rep->rr_xid));
+ trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
return;
@@ -1420,8 +1378,7 @@ out_badstatus:
return;
out_badversion:
- dprintk("RPC: %s: invalid version %d\n",
- __func__, be32_to_cpu(rep->rr_vers));
+ trace_xprtrdma_reply_vers(rep);
goto repost;
/* The RPC transaction has already been terminated, or the header
@@ -1429,12 +1386,11 @@ out_badversion:
*/
out_norqst:
spin_unlock(&xprt->recv_lock);
- dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
- __func__, be32_to_cpu(rep->rr_xid));
+ trace_xprtrdma_reply_rqst(rep);
goto repost;
out_shortreply:
- dprintk("RPC: %s: short/invalid reply\n", __func__);
+ trace_xprtrdma_reply_short(rep);
/* If no pending RPC transaction was matched, post a replacement
* receive buffer before returning.
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 6ee1ad8978f3..4b1ecfe979cf 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -67,8 +67,7 @@
static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
-static unsigned int xprt_rdma_inline_write_padding;
-unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
+unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
int xprt_rdma_pad_optimize;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -81,6 +80,7 @@ static unsigned int zero;
static unsigned int max_padding = PAGE_SIZE;
static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
static unsigned int max_memreg = RPCRDMA_LAST - 1;
+static unsigned int dummy;
static struct ctl_table_header *sunrpc_table_header;
@@ -114,7 +114,7 @@ static struct ctl_table xr_tunables_table[] = {
},
{
.procname = "rdma_inline_write_padding",
- .data = &xprt_rdma_inline_write_padding,
+ .data = &dummy,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -259,13 +259,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
xprt_clear_connected(xprt);
- dprintk("RPC: %s: %sconnect\n", __func__,
- r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
if (rc)
xprt_wake_pending_tasks(xprt, rc);
- dprintk("RPC: %s: exit\n", __func__);
xprt_clear_connecting(xprt);
}
@@ -275,7 +272,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
rx_xprt);
- pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt);
+ trace_xprtrdma_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ia.ri_id);
}
@@ -295,7 +292,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- dprintk("RPC: %s: called\n", __func__);
+ trace_xprtrdma_destroy(r_xprt);
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
@@ -306,11 +303,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
rpcrdma_ia_close(&r_xprt->rx_ia);
xprt_rdma_free_addresses(xprt);
-
xprt_free(xprt);
- dprintk("RPC: %s: returning\n", __func__);
-
module_put(THIS_MODULE);
}
@@ -361,9 +355,7 @@ xprt_setup_rdma(struct xprt_create *args)
/*
* Set up RDMA-specific connect data.
*/
-
- sap = (struct sockaddr *)&cdata.addr;
- memcpy(sap, args->dstaddr, args->addrlen);
+ sap = args->dstaddr;
/* Ensure xprt->addr holds valid server TCP (not RDMA)
* address, for any side protocols which peek at it */
@@ -373,6 +365,7 @@ xprt_setup_rdma(struct xprt_create *args)
if (rpc_get_port(sap))
xprt_set_bound(xprt);
+ xprt_rdma_format_addresses(xprt, sap);
cdata.max_requests = xprt->max_reqs;
@@ -387,8 +380,6 @@ xprt_setup_rdma(struct xprt_create *args)
if (cdata.inline_rsize > cdata.rsize)
cdata.inline_rsize = cdata.rsize;
- cdata.padding = xprt_rdma_inline_write_padding;
-
/*
* Create new transport instance, which includes initialized
* o ia
@@ -398,7 +389,7 @@ xprt_setup_rdma(struct xprt_create *args)
new_xprt = rpcx_to_rdmax(xprt);
- rc = rpcrdma_ia_open(new_xprt, sap);
+ rc = rpcrdma_ia_open(new_xprt);
if (rc)
goto out1;
@@ -407,31 +398,19 @@ xprt_setup_rdma(struct xprt_create *args)
*/
new_xprt->rx_data = cdata;
new_ep = &new_xprt->rx_ep;
- new_ep->rep_remote_addr = cdata.addr;
rc = rpcrdma_ep_create(&new_xprt->rx_ep,
&new_xprt->rx_ia, &new_xprt->rx_data);
if (rc)
goto out2;
- /*
- * Allocate pre-registered send and receive buffers for headers and
- * any inline data. Also specify any padding which will be provided
- * from a preregistered zero buffer.
- */
rc = rpcrdma_buffer_create(new_xprt);
if (rc)
goto out3;
- /*
- * Register a callback for connection events. This is necessary because
- * connection loss notification is async. We also catch connection loss
- * when reaping receives.
- */
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker);
- xprt_rdma_format_addresses(xprt, sap);
xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
if (xprt->max_payload == 0)
goto out4;
@@ -445,16 +424,19 @@ xprt_setup_rdma(struct xprt_create *args)
dprintk("RPC: %s: %s:%s\n", __func__,
xprt->address_strings[RPC_DISPLAY_ADDR],
xprt->address_strings[RPC_DISPLAY_PORT]);
+ trace_xprtrdma_create(new_xprt);
return xprt;
out4:
- xprt_rdma_free_addresses(xprt);
- rc = -EINVAL;
+ rpcrdma_buffer_destroy(&new_xprt->rx_buf);
+ rc = -ENODEV;
out3:
rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
out2:
rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
+ trace_xprtrdma_destroy(new_xprt);
+ xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
return ERR_PTR(rc);
}
@@ -488,16 +470,34 @@ xprt_rdma_close(struct rpc_xprt *xprt)
rpcrdma_ep_disconnect(ep, ia);
}
+/**
+ * xprt_rdma_set_port - update server port with rpcbind result
+ * @xprt: controlling RPC transport
+ * @port: new port value
+ *
+ * Transport connect status is unchanged.
+ */
static void
xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
{
- struct sockaddr_in *sap;
+ struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
+ char buf[8];
- sap = (struct sockaddr_in *)&xprt->addr;
- sap->sin_port = htons(port);
- sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
- sap->sin_port = htons(port);
- dprintk("RPC: %s: %u\n", __func__, port);
+ dprintk("RPC: %s: setting port for xprt %p (%s:%s) to %u\n",
+ __func__, xprt,
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ port);
+
+ rpc_set_port(sap, port);
+
+ kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
+ snprintf(buf, sizeof(buf), "%u", port);
+ xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
+
+ kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
+ snprintf(buf, sizeof(buf), "%4hx", port);
+ xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
}
/**
@@ -516,8 +516,6 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
static void
xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
{
- dprintk("RPC: %5u %s: xprt = %p\n", task->tk_pid, __func__, xprt);
-
xprt_force_disconnect(xprt);
}
@@ -640,7 +638,7 @@ xprt_rdma_allocate(struct rpc_task *task)
req = rpcrdma_buffer_get(&r_xprt->rx_buf);
if (req == NULL)
- return -ENOMEM;
+ goto out_get;
flags = RPCRDMA_DEF_GFP;
if (RPC_IS_SWAPPER(task))
@@ -653,19 +651,18 @@ xprt_rdma_allocate(struct rpc_task *task)
if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
goto out_fail;
- dprintk("RPC: %5u %s: send size = %zd, recv size = %zd, req = %p\n",
- task->tk_pid, __func__, rqst->rq_callsize,
- rqst->rq_rcvsize, req);
-
req->rl_cpu = smp_processor_id();
req->rl_connect_cookie = 0; /* our reserved value */
rpcrdma_set_xprtdata(rqst, req);
rqst->rq_buffer = req->rl_sendbuf->rg_base;
rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
+ trace_xprtrdma_allocate(task, req);
return 0;
out_fail:
rpcrdma_buffer_put(req);
+out_get:
+ trace_xprtrdma_allocate(task, NULL);
return -ENOMEM;
}
@@ -682,13 +679,9 @@ xprt_rdma_free(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags))
- return;
-
- dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
-
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req);
+ trace_xprtrdma_rpc_done(task, req);
rpcrdma_buffer_put(req);
}
@@ -698,22 +691,12 @@ xprt_rdma_free(struct rpc_task *task)
*
* Caller holds the transport's write lock.
*
- * Return values:
- * 0: The request has been sent
- * ENOTCONN: Caller needs to invoke connect logic then call again
- * ENOBUFS: Call again later to send the request
- * EIO: A permanent error occurred. The request was not sent,
- * and don't try it again
- *
- * send_request invokes the meat of RPC RDMA. It must do the following:
- *
- * 1. Marshal the RPC request into an RPC RDMA request, which means
- * putting a header in front of data, and creating IOVs for RDMA
- * from those in the request.
- * 2. In marshaling, detect opportunities for RDMA, and use them.
- * 3. Post a recv message to set up asynch completion, then send
- * the request (rpcrdma_ep_post).
- * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
+ * Returns:
+ * %0 if the RPC message has been sent
+ * %-ENOTCONN if the caller should reconnect and call again
+ * %-ENOBUFS if the caller should call again later
+ * %-EIO if a permanent error occurred and the request was not
+ * sent. Do not try to send this message again.
*/
static int
xprt_rdma_send_request(struct rpc_task *task)
@@ -724,14 +707,14 @@ xprt_rdma_send_request(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
int rc = 0;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (unlikely(!rqst->rq_buffer))
+ return xprt_rdma_bc_send_reply(rqst);
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
if (!xprt_connected(xprt))
goto drop_connection;
- /* On retransmit, remove any previously registered chunks */
- if (unlikely(!list_empty(&req->rl_registered)))
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
- &req->rl_registered);
-
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
goto failed_marshal;
@@ -744,7 +727,7 @@ xprt_rdma_send_request(struct rpc_task *task)
goto drop_connection;
req->rl_connect_cookie = xprt->connect_cookie;
- set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
+ __set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection;
@@ -904,8 +887,7 @@ int xprt_rdma_init(void)
"\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
xprt_rdma_slot_table_entries,
xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
- dprintk("\tPadding %d\n\tMemreg %d\n",
- xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
+ dprintk("\tPadding 0\n\tMemreg %d\n", xprt_rdma_memreg_strategy);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (!sunrpc_table_header)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 8607c029c0dd..f4eb63e8e689 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -71,8 +71,8 @@
/*
* internal functions
*/
-static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf);
+static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
+static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
@@ -108,7 +108,10 @@ static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
struct rpcrdma_ep *ep = context;
+ struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
+ rx_ep);
+ trace_xprtrdma_qp_error(r_xprt, event);
pr_err("rpcrdma: %s on device %s ep %p\n",
ib_event_msg(event->event), event->device->name, context);
@@ -133,6 +136,7 @@ rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
+ trace_xprtrdma_wc_send(sc, wc);
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
@@ -155,13 +159,11 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
rr_cqe);
/* WARNING: Only wr_id and status are reliable at this point */
+ trace_xprtrdma_wc_receive(rep, wc);
if (wc->status != IB_WC_SUCCESS)
goto out_fail;
/* status == SUCCESS means all fields in wc are trustworthy */
- dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
- __func__, rep, wc->byte_len);
-
rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
rep->rr_wc_flags = wc->wc_flags;
rep->rr_inv_rkey = wc->ex.invalidate_rkey;
@@ -192,7 +194,6 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
unsigned int rsize, wsize;
/* Default settings for RPC-over-RDMA Version One */
- r_xprt->rx_ia.ri_reminv_expected = false;
r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
@@ -200,7 +201,6 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
if (pmsg &&
pmsg->cp_magic == rpcrdma_cmp_magic &&
pmsg->cp_version == RPCRDMA_CMP_VERSION) {
- r_xprt->rx_ia.ri_reminv_expected = true;
r_xprt->rx_ia.ri_implicit_roundup = true;
rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
@@ -221,11 +221,9 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
struct rpcrdma_xprt *xprt = id->context;
struct rpcrdma_ia *ia = &xprt->rx_ia;
struct rpcrdma_ep *ep = &xprt->rx_ep;
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
-#endif
int connstate = 0;
+ trace_xprtrdma_conn_upcall(xprt, event);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ROUTE_RESOLVED:
@@ -234,21 +232,17 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_ADDR_ERROR:
ia->ri_async_rc = -EHOSTUNREACH;
- dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
- __func__, ep);
complete(&ia->ri_done);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
ia->ri_async_rc = -ENETUNREACH;
- dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
- __func__, ep);
complete(&ia->ri_done);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- pr_info("rpcrdma: removing device %s for %pIS:%u\n",
+ pr_info("rpcrdma: removing device %s for %s:%s\n",
ia->ri_device->name,
- sap, rpc_get_port(sap));
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
#endif
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
ep->rep_connected = -ENODEV;
@@ -271,8 +265,8 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
connstate = -ENETDOWN;
goto connected;
case RDMA_CM_EVENT_REJECTED:
- dprintk("rpcrdma: connection to %pIS:%u rejected: %s\n",
- sap, rpc_get_port(sap),
+ dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
rdma_reject_msg(id, event->status));
connstate = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
@@ -287,8 +281,9 @@ connected:
wake_up_all(&ep->rep_connect_wait);
/*FALLTHROUGH*/
default:
- dprintk("RPC: %s: %pIS:%u on %s/%s (ep 0x%p): %s\n",
- __func__, sap, rpc_get_port(sap),
+ dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n",
+ __func__,
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
ia->ri_device->name, ia->ri_ops->ro_displayname,
ep, rdma_event_msg(event->event));
break;
@@ -298,13 +293,14 @@ connected:
}
static struct rdma_cm_id *
-rpcrdma_create_id(struct rpcrdma_xprt *xprt,
- struct rpcrdma_ia *ia, struct sockaddr *addr)
+rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
{
unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
struct rdma_cm_id *id;
int rc;
+ trace_xprtrdma_conn_start(xprt);
+
init_completion(&ia->ri_done);
init_completion(&ia->ri_remove_done);
@@ -318,7 +314,9 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
ia->ri_async_rc = -ETIMEDOUT;
- rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
+ rc = rdma_resolve_addr(id, NULL,
+ (struct sockaddr *)&xprt->rx_xprt.addr,
+ RDMA_RESOLVE_TIMEOUT);
if (rc) {
dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
__func__, rc);
@@ -326,8 +324,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
- dprintk("RPC: %s: wait() exited: %i\n",
- __func__, rc);
+ trace_xprtrdma_conn_tout(xprt);
goto out;
}
@@ -344,8 +341,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
- dprintk("RPC: %s: wait() exited: %i\n",
- __func__, rc);
+ trace_xprtrdma_conn_tout(xprt);
goto out;
}
rc = ia->ri_async_rc;
@@ -365,19 +361,18 @@ out:
/**
* rpcrdma_ia_open - Open and initialize an Interface Adapter.
- * @xprt: controlling transport
- * @addr: IP address of remote peer
+ * @xprt: transport with IA to (re)initialize
*
* Returns 0 on success, negative errno if an appropriate
* Interface Adapter could not be found and opened.
*/
int
-rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr)
+rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
{
struct rpcrdma_ia *ia = &xprt->rx_ia;
int rc;
- ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
+ ia->ri_id = rpcrdma_create_id(xprt, ia);
if (IS_ERR(ia->ri_id)) {
rc = PTR_ERR(ia->ri_id);
goto out_err;
@@ -392,7 +387,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr)
}
switch (xprt_rdma_memreg_strategy) {
- case RPCRDMA_FRMR:
+ case RPCRDMA_FRWR:
if (frwr_is_supported(ia)) {
ia->ri_ops = &rpcrdma_frwr_memreg_ops;
break;
@@ -462,10 +457,12 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
}
- rpcrdma_destroy_mrs(buf);
+ rpcrdma_mrs_destroy(buf);
/* Allow waiters to continue */
complete(&ia->ri_remove_done);
+
+ trace_xprtrdma_remove(r_xprt);
}
/**
@@ -476,7 +473,6 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
- dprintk("RPC: %s: entering\n", __func__);
if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
if (ia->ri_id->qp)
rdma_destroy_qp(ia->ri_id);
@@ -630,9 +626,6 @@ out1:
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- dprintk("RPC: %s: entering, connected is %d\n",
- __func__, ep->rep_connected);
-
cancel_delayed_work_sync(&ep->rep_connect_worker);
if (ia->ri_id->qp) {
@@ -653,13 +646,12 @@ static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- struct sockaddr *sap = (struct sockaddr *)&r_xprt->rx_data.addr;
int rc, err;
- pr_info("%s: r_xprt = %p\n", __func__, r_xprt);
+ trace_xprtrdma_reinsert(r_xprt);
rc = -EHOSTUNREACH;
- if (rpcrdma_ia_open(r_xprt, sap))
+ if (rpcrdma_ia_open(r_xprt))
goto out1;
rc = -ENOMEM;
@@ -676,7 +668,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
goto out3;
}
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
return 0;
out3:
@@ -691,16 +683,15 @@ static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
struct rpcrdma_ia *ia)
{
- struct sockaddr *sap = (struct sockaddr *)&r_xprt->rx_data.addr;
struct rdma_cm_id *id, *old;
int err, rc;
- dprintk("RPC: %s: reconnecting...\n", __func__);
+ trace_xprtrdma_reconnect(r_xprt);
rpcrdma_ep_disconnect(ep, ia);
rc = -EHOSTUNREACH;
- id = rpcrdma_create_id(r_xprt, ia, sap);
+ id = rpcrdma_create_id(r_xprt, ia);
if (IS_ERR(id))
goto out;
@@ -817,16 +808,14 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
int rc;
rc = rdma_disconnect(ia->ri_id);
- if (!rc) {
+ if (!rc)
/* returns without wait if not connected */
wait_event_interruptible(ep->rep_connect_wait,
ep->rep_connected != 1);
- dprintk("RPC: %s: after wait, %sconnected\n", __func__,
- (ep->rep_connected == 1) ? "still " : "dis");
- } else {
- dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
+ else
ep->rep_connected = rc;
- }
+ trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
+ rx_ep), rc);
ib_drain_qp(ia->ri_id->qp);
}
@@ -998,15 +987,15 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
{
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
rb_recovery_worker.work);
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
spin_lock(&buf->rb_recovery_lock);
while (!list_empty(&buf->rb_stale_mrs)) {
- mw = rpcrdma_pop_mw(&buf->rb_stale_mrs);
+ mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock);
- dprintk("RPC: %s: recovering MR %p\n", __func__, mw);
- mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);
+ trace_xprtrdma_recover_mr(mr);
+ mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
spin_lock(&buf->rb_recovery_lock);
}
@@ -1014,20 +1003,20 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
}
void
-rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
+rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
{
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_recovery_lock);
- rpcrdma_push_mw(mw, &buf->rb_stale_mrs);
+ rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock);
schedule_delayed_work(&buf->rb_recovery_worker, 0);
}
static void
-rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
+rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
@@ -1036,32 +1025,32 @@ rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
LIST_HEAD(all);
for (count = 0; count < 32; count++) {
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int rc;
- mw = kzalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
break;
- rc = ia->ri_ops->ro_init_mr(ia, mw);
+ rc = ia->ri_ops->ro_init_mr(ia, mr);
if (rc) {
- kfree(mw);
+ kfree(mr);
break;
}
- mw->mw_xprt = r_xprt;
+ mr->mr_xprt = r_xprt;
- list_add(&mw->mw_list, &free);
- list_add(&mw->mw_all, &all);
+ list_add(&mr->mr_list, &free);
+ list_add(&mr->mr_all, &all);
}
- spin_lock(&buf->rb_mwlock);
- list_splice(&free, &buf->rb_mws);
+ spin_lock(&buf->rb_mrlock);
+ list_splice(&free, &buf->rb_mrs);
list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count;
- spin_unlock(&buf->rb_mwlock);
+ spin_unlock(&buf->rb_mrlock);
- dprintk("RPC: %s: created %u MRs\n", __func__, count);
+ trace_xprtrdma_createmrs(r_xprt, count);
}
static void
@@ -1072,7 +1061,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
}
struct rpcrdma_req *
@@ -1093,10 +1082,17 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
return req;
}
-struct rpcrdma_rep *
+/**
+ * rpcrdma_create_rep - Allocate an rpcrdma_rep object
+ * @r_xprt: controlling transport
+ *
+ * Returns 0 on success or a negative errno on failure.
+ */
+int
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_rep *rep;
int rc;
@@ -1121,12 +1117,18 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
rep->rr_recv_wr.num_sge = 1;
- return rep;
+
+ spin_lock(&buf->rb_lock);
+ list_add(&rep->rr_list, &buf->rb_recv_bufs);
+ spin_unlock(&buf->rb_lock);
+ return 0;
out_free:
kfree(rep);
out:
- return ERR_PTR(rc);
+ dprintk("RPC: %s: reply buffer %d alloc failed\n",
+ __func__, rc);
+ return rc;
}
int
@@ -1137,10 +1139,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0;
- spin_lock_init(&buf->rb_mwlock);
+ spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock);
spin_lock_init(&buf->rb_recovery_lock);
- INIT_LIST_HEAD(&buf->rb_mws);
+ INIT_LIST_HEAD(&buf->rb_mrs);
INIT_LIST_HEAD(&buf->rb_all);
INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
@@ -1148,7 +1150,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
INIT_DELAYED_WORK(&buf->rb_recovery_worker,
rpcrdma_mr_recovery_worker);
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs);
@@ -1167,17 +1169,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
INIT_LIST_HEAD(&buf->rb_recv_bufs);
- for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
- struct rpcrdma_rep *rep;
-
- rep = rpcrdma_create_rep(r_xprt);
- if (IS_ERR(rep)) {
- dprintk("RPC: %s: reply buffer %d alloc failed\n",
- __func__, i);
- rc = PTR_ERR(rep);
+ for (i = 0; i <= buf->rb_max_requests; i++) {
+ rc = rpcrdma_create_rep(r_xprt);
+ if (rc)
goto out;
- }
- list_add(&rep->rr_list, &buf->rb_recv_bufs);
}
rc = rpcrdma_sendctxs_create(r_xprt);
@@ -1229,26 +1224,26 @@ rpcrdma_destroy_req(struct rpcrdma_req *req)
}
static void
-rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
+rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
struct rpcrdma_ia *ia = rdmab_to_ia(buf);
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
unsigned int count;
count = 0;
- spin_lock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
while (!list_empty(&buf->rb_all)) {
- mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&mw->mw_all);
+ mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
+ list_del(&mr->mr_all);
- spin_unlock(&buf->rb_mwlock);
- ia->ri_ops->ro_release_mr(mw);
+ spin_unlock(&buf->rb_mrlock);
+ ia->ri_ops->ro_release_mr(mr);
count++;
- spin_lock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
}
- spin_unlock(&buf->rb_mwlock);
+ spin_unlock(&buf->rb_mrlock);
r_xprt->rx_stats.mrs_allocated = 0;
dprintk("RPC: %s: released %u MRs\n", __func__, count);
@@ -1285,27 +1280,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
spin_unlock(&buf->rb_reqslock);
buf->rb_recv_count = 0;
- rpcrdma_destroy_mrs(buf);
+ rpcrdma_mrs_destroy(buf);
}
-struct rpcrdma_mw *
-rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
+/**
+ * rpcrdma_mr_get - Allocate an rpcrdma_mr object
+ * @r_xprt: controlling transport
+ *
+ * Returns an initialized rpcrdma_mr or NULL if no free
+ * rpcrdma_mr objects are available.
+ */
+struct rpcrdma_mr *
+rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_mw *mw = NULL;
+ struct rpcrdma_mr *mr = NULL;
- spin_lock(&buf->rb_mwlock);
- if (!list_empty(&buf->rb_mws))
- mw = rpcrdma_pop_mw(&buf->rb_mws);
- spin_unlock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
+ if (!list_empty(&buf->rb_mrs))
+ mr = rpcrdma_mr_pop(&buf->rb_mrs);
+ spin_unlock(&buf->rb_mrlock);
- if (!mw)
- goto out_nomws;
- mw->mw_flags = 0;
- return mw;
+ if (!mr)
+ goto out_nomrs;
+ return mr;
-out_nomws:
- dprintk("RPC: %s: no MWs available\n", __func__);
+out_nomrs:
+ trace_xprtrdma_nomrs(r_xprt);
if (r_xprt->rx_ep.rep_connected != -ENODEV)
schedule_delayed_work(&buf->rb_refresh_worker, 0);
@@ -1315,14 +1316,39 @@ out_nomws:
return NULL;
}
+static void
+__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
+{
+ spin_lock(&buf->rb_mrlock);
+ rpcrdma_mr_push(mr, &buf->rb_mrs);
+ spin_unlock(&buf->rb_mrlock);
+}
+
+/**
+ * rpcrdma_mr_put - Release an rpcrdma_mr object
+ * @mr: object to release
+ *
+ */
void
-rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
+rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
+}
+
+/**
+ * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
+ * @mr: object to release
+ *
+ */
+void
+rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
+{
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- spin_lock(&buf->rb_mwlock);
- rpcrdma_push_mw(mw, &buf->rb_mws);
- spin_unlock(&buf->rb_mwlock);
+ trace_xprtrdma_dma_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
}
static struct rpcrdma_rep *
@@ -1359,11 +1385,11 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
req = rpcrdma_buffer_get_req_locked(buffers);
req->rl_reply = rpcrdma_buffer_get_rep(buffers);
spin_unlock(&buffers->rb_lock);
+
return req;
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("RPC: %s: out of request buffers\n", __func__);
return NULL;
}
@@ -1519,9 +1545,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
req->rl_reply = NULL;
}
- dprintk("RPC: %s: posting %d s/g entries\n",
- __func__, send_wr->num_sge);
-
if (!ep->rep_send_count ||
test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
send_wr->send_flags |= IB_SEND_SIGNALED;
@@ -1530,14 +1553,12 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
send_wr->send_flags &= ~IB_SEND_SIGNALED;
--ep->rep_send_count;
}
+
rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
+ trace_xprtrdma_post_send(req, rc);
if (rc)
- goto out_postsend_err;
+ return -ENOTCONN;
return 0;
-
-out_postsend_err:
- pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
- return -ENOTCONN;
}
int
@@ -1550,23 +1571,20 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
goto out_map;
rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
+ trace_xprtrdma_post_recv(rep, rc);
if (rc)
- goto out_postrecv;
+ return -ENOTCONN;
return 0;
out_map:
pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
return -EIO;
-
-out_postrecv:
- pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
- return -ENOTCONN;
}
/**
* rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
* @r_xprt: transport associated with these backchannel resources
- * @min_reqs: minimum number of incoming requests expected
+ * @count: minimum number of incoming requests expected
*
* Returns zero if all requested buffers were posted, or a negative errno.
*/
@@ -1594,7 +1612,7 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("%s: no extra receive buffers\n", __func__);
+ trace_xprtrdma_noreps(r_xprt);
return -ENOMEM;
out_rc:
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 1342f743f1c4..69883a960a3f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -73,11 +73,10 @@ struct rpcrdma_ia {
struct completion ri_remove_done;
int ri_async_rc;
unsigned int ri_max_segs;
- unsigned int ri_max_frmr_depth;
+ unsigned int ri_max_frwr_depth;
unsigned int ri_max_inline_write;
unsigned int ri_max_inline_read;
unsigned int ri_max_send_sges;
- bool ri_reminv_expected;
bool ri_implicit_roundup;
enum ib_mr_type ri_mrtype;
unsigned long ri_flags;
@@ -101,7 +100,6 @@ struct rpcrdma_ep {
wait_queue_head_t rep_connect_wait;
struct rpcrdma_connect_private rep_cm_private;
struct rdma_conn_param rep_remote_cma;
- struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker;
};
@@ -232,29 +230,29 @@ enum {
};
/*
- * struct rpcrdma_mw - external memory region metadata
+ * struct rpcrdma_mr - external memory region metadata
*
* An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered).
*
- * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
+ * Each rpcrdma_buffer has a list of free MWs anchored in rb_mrs. During
* call_allocate, rpcrdma_buffer_get() assigns one to each segment in
* an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
* track of registration metadata while each RPC is pending.
* rpcrdma_deregister_external() uses this metadata to unmap and
* release these resources when an RPC is complete.
*/
-enum rpcrdma_frmr_state {
- FRMR_IS_INVALID, /* ready to be used */
- FRMR_IS_VALID, /* in use */
- FRMR_FLUSHED_FR, /* flushed FASTREG WR */
- FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
+enum rpcrdma_frwr_state {
+ FRWR_IS_INVALID, /* ready to be used */
+ FRWR_IS_VALID, /* in use */
+ FRWR_FLUSHED_FR, /* flushed FASTREG WR */
+ FRWR_FLUSHED_LI, /* flushed LOCALINV WR */
};
-struct rpcrdma_frmr {
+struct rpcrdma_frwr {
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
- enum rpcrdma_frmr_state fr_state;
+ enum rpcrdma_frwr_state fr_state;
struct completion fr_linv_done;
union {
struct ib_reg_wr fr_regwr;
@@ -267,26 +265,20 @@ struct rpcrdma_fmr {
u64 *fm_physaddrs;
};
-struct rpcrdma_mw {
- struct list_head mw_list;
- struct scatterlist *mw_sg;
- int mw_nents;
- enum dma_data_direction mw_dir;
- unsigned long mw_flags;
+struct rpcrdma_mr {
+ struct list_head mr_list;
+ struct scatterlist *mr_sg;
+ int mr_nents;
+ enum dma_data_direction mr_dir;
union {
struct rpcrdma_fmr fmr;
- struct rpcrdma_frmr frmr;
+ struct rpcrdma_frwr frwr;
};
- struct rpcrdma_xprt *mw_xprt;
- u32 mw_handle;
- u32 mw_length;
- u64 mw_offset;
- struct list_head mw_all;
-};
-
-/* mw_flags */
-enum {
- RPCRDMA_MW_F_RI = 1,
+ struct rpcrdma_xprt *mr_xprt;
+ u32 mr_handle;
+ u32 mr_length;
+ u64 mr_offset;
+ struct list_head mr_all;
};
/*
@@ -362,8 +354,7 @@ struct rpcrdma_req {
/* rl_flags */
enum {
- RPCRDMA_REQ_F_BACKCHANNEL = 0,
- RPCRDMA_REQ_F_PENDING,
+ RPCRDMA_REQ_F_PENDING = 0,
RPCRDMA_REQ_F_TX_RESOURCES,
};
@@ -374,25 +365,25 @@ rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
}
static inline struct rpcrdma_req *
-rpcr_to_rdmar(struct rpc_rqst *rqst)
+rpcr_to_rdmar(const struct rpc_rqst *rqst)
{
return rqst->rq_xprtdata;
}
static inline void
-rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
+rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
{
- list_add_tail(&mw->mw_list, list);
+ list_add_tail(&mr->mr_list, list);
}
-static inline struct rpcrdma_mw *
-rpcrdma_pop_mw(struct list_head *list)
+static inline struct rpcrdma_mr *
+rpcrdma_mr_pop(struct list_head *list)
{
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
- mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
- list_del(&mw->mw_list);
- return mw;
+ mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
+ list_del(&mr->mr_list);
+ return mr;
}
/*
@@ -402,8 +393,8 @@ rpcrdma_pop_mw(struct list_head *list)
* One of these is associated with a transport instance
*/
struct rpcrdma_buffer {
- spinlock_t rb_mwlock; /* protect rb_mws list */
- struct list_head rb_mws;
+ spinlock_t rb_mrlock; /* protect rb_mrs list */
+ struct list_head rb_mrs;
struct list_head rb_all;
unsigned long rb_sc_head;
@@ -438,13 +429,11 @@ struct rpcrdma_buffer {
* This data should be set with mount options
*/
struct rpcrdma_create_data_internal {
- struct sockaddr_storage addr; /* RDMA server address */
unsigned int max_requests; /* max requests (slots) in flight */
unsigned int rsize; /* mount rsize - max read hdr+data */
unsigned int wsize; /* mount wsize - max write hdr+data */
unsigned int inline_rsize; /* max non-rdma read data payload */
unsigned int inline_wsize; /* max non-rdma write data payload */
- unsigned int padding; /* non-rdma write header padding */
};
/*
@@ -484,17 +473,19 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg *
(*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool,
- struct rpcrdma_mw **);
+ struct rpcrdma_mr **);
+ void (*ro_reminv)(struct rpcrdma_rep *rep,
+ struct list_head *mrs);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *);
- void (*ro_recover_mr)(struct rpcrdma_mw *);
+ void (*ro_recover_mr)(struct rpcrdma_mr *mr);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *);
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
int (*ro_init_mr)(struct rpcrdma_ia *,
- struct rpcrdma_mw *);
- void (*ro_release_mr)(struct rpcrdma_mw *);
+ struct rpcrdma_mr *);
+ void (*ro_release_mr)(struct rpcrdma_mr *mr);
const char *ro_displayname;
const int ro_send_w_inv_ok;
};
@@ -525,6 +516,18 @@ struct rpcrdma_xprt {
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
+static inline const char *
+rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
+{
+ return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
+}
+
+static inline const char *
+rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
+{
+ return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
+}
+
/* Setting this to 0 ensures interoperability with early servers.
* Setting this to 1 enhances certain unaligned read/write performance.
* Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
@@ -538,7 +541,7 @@ extern unsigned int xprt_rdma_memreg_strategy;
/*
* Interface Adapter calls - xprtrdma/verbs.c
*/
-int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr);
+int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
void rpcrdma_ia_close(struct rpcrdma_ia *);
bool frwr_is_supported(struct rpcrdma_ia *);
@@ -564,22 +567,23 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
* Buffer calls - xprtrdma/verbs.c
*/
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
-struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
void rpcrdma_destroy_req(struct rpcrdma_req *);
+int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
-struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
-void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
+struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
+void rpcrdma_mr_put(struct rpcrdma_mr *mr);
+void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
+void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
+
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
-void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
-
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
gfp_t);
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
@@ -663,7 +667,7 @@ int xprt_rdma_bc_up(struct svc_serv *, struct net *);
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
-int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
+int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
@@ -671,3 +675,5 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
extern struct xprt_class xprt_rdma_bc;
#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
+
+#include <trace/events/rpcrdma.h>
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6d0cc3b8f932..18803021f242 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -52,6 +52,8 @@
#include "sunrpc.h"
+#define RPC_TCP_READ_CHUNK_SZ (3*512*1024)
+
static void xs_close(struct rpc_xprt *xprt);
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
struct socket *sock);
@@ -1003,6 +1005,7 @@ static void xs_local_data_receive(struct sock_xprt *transport)
struct sock *sk;
int err;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
@@ -1016,6 +1019,11 @@ static void xs_local_data_receive(struct sock_xprt *transport)
}
if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1094,6 +1102,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
struct sock *sk;
int err;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
@@ -1107,6 +1116,11 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
}
if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1479,6 +1493,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
.offset = offset,
.count = len,
};
+ size_t ret;
dprintk("RPC: xs_tcp_data_recv started\n");
do {
@@ -1507,9 +1522,14 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
/* Skip over any trailing bytes on short reads */
xs_tcp_read_discard(transport, &desc);
} while (desc.count);
+ ret = len - desc.count;
+ if (ret < rd_desc->count)
+ rd_desc->count -= ret;
+ else
+ rd_desc->count = 0;
trace_xs_tcp_data_recv(transport);
dprintk("RPC: xs_tcp_data_recv done\n");
- return len - desc.count;
+ return ret;
}
static void xs_tcp_data_receive(struct sock_xprt *transport)
@@ -1517,30 +1537,34 @@ static void xs_tcp_data_receive(struct sock_xprt *transport)
struct rpc_xprt *xprt = &transport->xprt;
struct sock *sk;
read_descriptor_t rd_desc = {
- .count = 2*1024*1024,
.arg.data = xprt,
};
unsigned long total = 0;
- int loop;
int read = 0;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
goto out;
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
- for (loop = 0; loop < 64; loop++) {
+ for (;;) {
+ rd_desc.count = RPC_TCP_READ_CHUNK_SZ;
lock_sock(sk);
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
- if (read <= 0) {
+ if (rd_desc.count != 0 || read < 0) {
clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
release_sock(sk);
break;
}
release_sock(sk);
total += read;
- rd_desc.count = 65536;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
queue_work(xprtiod_workqueue, &transport->recv_worker);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 329325bd553e..37892b3909af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
- * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
@@ -42,8 +42,8 @@
#include "link.h"
#include "name_table.h"
-#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
-#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
const char tipc_bclink_name[] = "broadcast-link";
@@ -74,6 +74,10 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
return tipc_net(net)->bcbase;
}
+/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
+ * Note: the MTU is decremented to give room for a tunnel header, in
+ * case the message needs to be sent as replicast
+ */
int tipc_bcast_get_mtu(struct net *net)
{
return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
@@ -515,7 +519,7 @@ int tipc_bcast_init(struct net *net)
spin_lock_init(&tipc_net(net)->bclock);
if (!tipc_link_bc_create(net, 0, 0,
- U16_MAX,
+ FB_MTU,
BCLINK_WIN_DEFAULT,
0,
&bb->inputq,
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 964342689f2c..20b21af2ff14 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -49,7 +49,6 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
-#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/list.h>
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 8e12ab55346b..122162a31816 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -49,8 +49,6 @@
#define ADV_ACTIVE (ADV_UNIT * 12)
enum mbr_state {
- MBR_QUARANTINED,
- MBR_DISCOVERED,
MBR_JOINING,
MBR_PUBLISHED,
MBR_JOINED,
@@ -64,8 +62,7 @@ enum mbr_state {
struct tipc_member {
struct rb_node tree_node;
struct list_head list;
- struct list_head congested;
- struct sk_buff *event_msg;
+ struct list_head small_win;
struct sk_buff_head deferredq;
struct tipc_group *group;
u32 node;
@@ -77,21 +74,18 @@ struct tipc_member {
u16 bc_rcv_nxt;
u16 bc_syncpt;
u16 bc_acked;
- bool usr_pending;
};
struct tipc_group {
struct rb_root members;
- struct list_head congested;
+ struct list_head small_win;
struct list_head pending;
struct list_head active;
- struct list_head reclaiming;
struct tipc_nlist dests;
struct net *net;
int subid;
u32 type;
u32 instance;
- u32 domain;
u32 scope;
u32 portid;
u16 member_cnt;
@@ -99,6 +93,7 @@ struct tipc_group {
u16 max_active;
u16 bc_snd_nxt;
u16 bc_ackers;
+ bool *open;
bool loopback;
bool events;
};
@@ -106,10 +101,21 @@ struct tipc_group {
static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
int mtyp, struct sk_buff_head *xmitq);
+static void tipc_group_open(struct tipc_member *m, bool *wakeup)
+{
+ *wakeup = false;
+ if (list_empty(&m->small_win))
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
+ *wakeup = true;
+}
+
static void tipc_group_decr_active(struct tipc_group *grp,
struct tipc_member *m)
{
- if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING)
+ if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
+ m->state == MBR_REMITTED)
grp->active_cnt--;
}
@@ -136,14 +142,14 @@ u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
return grp->bc_snd_nxt;
}
-static bool tipc_group_is_enabled(struct tipc_member *m)
+static bool tipc_group_is_receiver(struct tipc_member *m)
{
- return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
+ return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
}
-static bool tipc_group_is_receiver(struct tipc_member *m)
+static bool tipc_group_is_sender(struct tipc_member *m)
{
- return m && m->state >= MBR_JOINED;
+ return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
}
u32 tipc_group_exclude(struct tipc_group *grp)
@@ -159,8 +165,11 @@ int tipc_group_size(struct tipc_group *grp)
}
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
- struct tipc_group_req *mreq)
+ struct tipc_group_req *mreq,
+ bool *group_is_open)
{
+ u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
+ bool global = mreq->scope != TIPC_NODE_SCOPE;
struct tipc_group *grp;
u32 type = mreq->type;
@@ -168,25 +177,41 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
if (!grp)
return NULL;
tipc_nlist_init(&grp->dests, tipc_own_addr(net));
- INIT_LIST_HEAD(&grp->congested);
+ INIT_LIST_HEAD(&grp->small_win);
INIT_LIST_HEAD(&grp->active);
INIT_LIST_HEAD(&grp->pending);
- INIT_LIST_HEAD(&grp->reclaiming);
grp->members = RB_ROOT;
grp->net = net;
grp->portid = portid;
- grp->domain = addr_domain(net, mreq->scope);
grp->type = type;
grp->instance = mreq->instance;
grp->scope = mreq->scope;
grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
- if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid))
+ grp->open = group_is_open;
+ filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
+ if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
+ filter, &grp->subid))
return grp;
kfree(grp);
return NULL;
}
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
+{
+ struct rb_root *tree = &grp->members;
+ struct tipc_member *m, *tmp;
+ struct sk_buff_head xmitq;
+
+ skb_queue_head_init(&xmitq);
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
+ tipc_group_update_member(m, 0);
+ }
+ tipc_node_distr_xmit(net, &xmitq);
+ *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
void tipc_group_delete(struct net *net, struct tipc_group *grp)
{
struct rb_root *tree = &grp->members;
@@ -232,7 +257,7 @@ static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
struct tipc_member *m;
m = tipc_group_find_member(grp, node, port);
- if (m && tipc_group_is_enabled(m))
+ if (m && tipc_group_is_receiver(m))
return m;
return NULL;
}
@@ -277,7 +302,7 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
u32 node, u32 port,
- int state)
+ u32 instance, int state)
{
struct tipc_member *m;
@@ -285,11 +310,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
if (!m)
return NULL;
INIT_LIST_HEAD(&m->list);
- INIT_LIST_HEAD(&m->congested);
+ INIT_LIST_HEAD(&m->small_win);
__skb_queue_head_init(&m->deferredq);
m->group = grp;
m->node = node;
m->port = port;
+ m->instance = instance;
m->bc_acked = grp->bc_snd_nxt - 1;
grp->member_cnt++;
tipc_group_add_to_tree(grp, m);
@@ -298,9 +324,10 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
return m;
}
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port)
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance)
{
- tipc_group_create_member(grp, node, port, MBR_DISCOVERED);
+ tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
}
static void tipc_group_delete_member(struct tipc_group *grp,
@@ -314,7 +341,7 @@ static void tipc_group_delete_member(struct tipc_group *grp,
grp->bc_ackers--;
list_del_init(&m->list);
- list_del_init(&m->congested);
+ list_del_init(&m->small_win);
tipc_group_decr_active(grp, m);
/* If last member on a node, remove node from dest list */
@@ -343,7 +370,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
struct tipc_group *grp = m->group;
struct tipc_member *_m, *tmp;
- if (!tipc_group_is_enabled(m))
+ if (!tipc_group_is_receiver(m))
return;
m->window -= len;
@@ -351,16 +378,14 @@ void tipc_group_update_member(struct tipc_member *m, int len)
if (m->window >= ADV_IDLE)
return;
- list_del_init(&m->congested);
+ list_del_init(&m->small_win);
- /* Sort member into congested members' list */
- list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
- if (m->window > _m->window)
- continue;
- list_add_tail(&m->congested, &_m->congested);
- return;
+ /* Sort member into small_window members' list */
+ list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+ if (_m->window > m->window)
+ break;
}
- list_add_tail(&m->congested, &grp->congested);
+ list_add_tail(&m->small_win, &_m->small_win);
}
void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
@@ -372,7 +397,7 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
for (n = rb_first(&grp->members); n; n = rb_next(n)) {
m = container_of(n, struct tipc_member, tree_node);
- if (tipc_group_is_enabled(m)) {
+ if (tipc_group_is_receiver(m)) {
tipc_group_update_member(m, len);
m->bc_acked = prev;
ackers++;
@@ -393,20 +418,20 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
int adv, state;
m = tipc_group_find_dest(grp, dnode, dport);
- *mbr = m;
- if (!m)
+ if (!tipc_group_is_receiver(m)) {
+ *mbr = NULL;
return false;
- if (m->usr_pending)
- return true;
+ }
+ *mbr = m;
+
if (m->window >= len)
return false;
- m->usr_pending = true;
+
+ *grp->open = false;
/* If not fully advertised, do it now to prevent mutual blocking */
adv = m->advertised;
state = m->state;
- if (state < MBR_JOINED)
- return true;
if (state == MBR_JOINED && adv == ADV_IDLE)
return true;
if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
@@ -424,13 +449,14 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len)
struct tipc_member *m = NULL;
/* If prev bcast was replicast, reject until all receivers have acked */
- if (grp->bc_ackers)
+ if (grp->bc_ackers) {
+ *grp->open = false;
return true;
-
- if (list_empty(&grp->congested))
+ }
+ if (list_empty(&grp->small_win))
return false;
- m = list_first_entry(&grp->congested, struct tipc_member, congested);
+ m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
if (m->window >= len)
return false;
@@ -485,7 +511,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
goto drop;
m = tipc_group_find_member(grp, node, port);
- if (!tipc_group_is_receiver(m))
+ if (!tipc_group_is_sender(m))
goto drop;
if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
@@ -562,7 +588,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
int max_active = grp->max_active;
int reclaim_limit = max_active * 3 / 4;
int active_cnt = grp->active_cnt;
- struct tipc_member *m, *rm;
+ struct tipc_member *m, *rm, *pm;
m = tipc_group_find_member(grp, node, port);
if (!m)
@@ -572,24 +598,34 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
switch (m->state) {
case MBR_JOINED:
- /* Reclaim advertised space from least active member */
- if (!list_empty(active) && active_cnt >= reclaim_limit) {
+ /* First, decide if member can go active */
+ if (active_cnt <= max_active) {
+ m->state = MBR_ACTIVE;
+ list_add_tail(&m->list, active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ } else {
+ m->state = MBR_PENDING;
+ list_add_tail(&m->list, &grp->pending);
+ }
+
+ if (active_cnt < reclaim_limit)
+ break;
+
+ /* Reclaim from oldest active member, if possible */
+ if (!list_empty(active)) {
rm = list_first_entry(active, struct tipc_member, list);
rm->state = MBR_RECLAIMING;
- list_move_tail(&rm->list, &grp->reclaiming);
+ list_del_init(&rm->list);
tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
- }
- /* If max active, become pending and wait for reclaimed space */
- if (active_cnt >= max_active) {
- m->state = MBR_PENDING;
- list_add_tail(&m->list, &grp->pending);
break;
}
- /* Otherwise become active */
- m->state = MBR_ACTIVE;
- list_add_tail(&m->list, &grp->active);
- grp->active_cnt++;
- /* Fall through */
+ /* Nobody to reclaim from; - revert oldest pending to JOINED */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ list_del_init(&pm->list);
+ pm->state = MBR_JOINED;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ break;
case MBR_ACTIVE:
if (!list_is_last(&m->list, &grp->active))
list_move_tail(&m->list, &grp->active);
@@ -601,13 +637,23 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
if (m->advertised > ADV_IDLE)
break;
m->state = MBR_JOINED;
+ grp->active_cnt--;
if (m->advertised < ADV_IDLE) {
pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
}
+
+ if (list_empty(&grp->pending))
+ return;
+
+ /* Set oldest pending member to active and advertise */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ pm->state = MBR_ACTIVE;
+ list_move_tail(&pm->list, &grp->active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
break;
case MBR_RECLAIMING:
- case MBR_DISCOVERED:
case MBR_JOINING:
case MBR_LEAVING:
default:
@@ -615,6 +661,40 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
}
}
+static void tipc_group_create_event(struct tipc_group *grp,
+ struct tipc_member *m,
+ u32 event, u16 seqno,
+ struct sk_buff_head *inputq)
+{ u32 dnode = tipc_own_addr(grp->net);
+ struct tipc_event evt;
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+
+ evt.event = event;
+ evt.found_lower = m->instance;
+ evt.found_upper = m->instance;
+ evt.port.ref = m->port;
+ evt.port.node = m->node;
+ evt.s.seq.type = grp->type;
+ evt.s.seq.lower = m->instance;
+ evt.s.seq.upper = m->instance;
+
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
+ GROUP_H_SIZE, sizeof(evt), dnode, m->node,
+ grp->portid, m->port, 0);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ msg_set_nametype(hdr, grp->type);
+ msg_set_grp_evt(hdr, event);
+ msg_set_dest_droppable(hdr, true);
+ msg_set_grp_bc_seqno(hdr, seqno);
+ memcpy(msg_data(hdr), &evt, sizeof(evt));
+ TIPC_SKB_CB(skb)->orig_member = m->instance;
+ __skb_queue_tail(inputq, skb);
+}
+
static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
int mtyp, struct sk_buff_head *xmitq)
{
@@ -660,107 +740,95 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
u32 node = msg_orignode(hdr);
u32 port = msg_origport(hdr);
struct tipc_member *m, *pm;
- struct tipc_msg *ehdr;
u16 remitted, in_flight;
if (!grp)
return;
+ if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
+ return;
+
m = tipc_group_find_member(grp, node, port);
switch (msg_type(hdr)) {
case GRP_JOIN_MSG:
if (!m)
m = tipc_group_create_member(grp, node, port,
- MBR_QUARANTINED);
+ 0, MBR_JOINING);
if (!m)
return;
m->bc_syncpt = msg_grp_bc_syncpt(hdr);
m->bc_rcv_nxt = m->bc_syncpt;
m->window += msg_adv_win(hdr);
- /* Wait until PUBLISH event is received */
- if (m->state == MBR_DISCOVERED) {
- m->state = MBR_JOINING;
- } else if (m->state == MBR_PUBLISHED) {
- m->state = MBR_JOINED;
- *usr_wakeup = true;
- m->usr_pending = false;
- tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
- ehdr = buf_msg(m->event_msg);
- msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
- __skb_queue_tail(inputq, m->event_msg);
- }
- list_del_init(&m->congested);
+ /* Wait until PUBLISH event is received if necessary */
+ if (m->state != MBR_PUBLISHED)
+ return;
+
+ /* Member can be taken into service */
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
return;
case GRP_LEAVE_MSG:
if (!m)
return;
m->bc_syncpt = msg_grp_bc_syncpt(hdr);
list_del_init(&m->list);
- list_del_init(&m->congested);
- *usr_wakeup = true;
-
- /* Wait until WITHDRAW event is received */
- if (m->state != MBR_LEAVING) {
- tipc_group_decr_active(grp, m);
- m->state = MBR_LEAVING;
- return;
- }
- /* Otherwise deliver already received WITHDRAW event */
- ehdr = buf_msg(m->event_msg);
- msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
- __skb_queue_tail(inputq, m->event_msg);
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_syncpt, inputq);
return;
case GRP_ADV_MSG:
if (!m)
return;
m->window += msg_adv_win(hdr);
- *usr_wakeup = m->usr_pending;
- m->usr_pending = false;
- list_del_init(&m->congested);
+ tipc_group_open(m, usr_wakeup);
return;
case GRP_ACK_MSG:
if (!m)
return;
m->bc_acked = msg_grp_bc_acked(hdr);
if (--grp->bc_ackers)
- break;
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
*usr_wakeup = true;
- m->usr_pending = false;
+ tipc_group_update_member(m, 0);
return;
case GRP_RECLAIM_MSG:
if (!m)
return;
- *usr_wakeup = m->usr_pending;
- m->usr_pending = false;
tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
m->window = ADV_IDLE;
+ tipc_group_open(m, usr_wakeup);
return;
case GRP_REMIT_MSG:
if (!m || m->state != MBR_RECLAIMING)
return;
- list_del_init(&m->list);
- grp->active_cnt--;
remitted = msg_grp_remitted(hdr);
/* Messages preceding the REMIT still in receive queue */
if (m->advertised > remitted) {
m->state = MBR_REMITTED;
in_flight = m->advertised - remitted;
+ m->advertised = ADV_IDLE + in_flight;
+ return;
}
- /* All messages preceding the REMIT have been read */
- if (m->advertised <= remitted) {
- m->state = MBR_JOINED;
- in_flight = 0;
- }
- /* ..and the REMIT overtaken by more messages => re-advertise */
+ /* This should never happen */
if (m->advertised < remitted)
- tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ pr_warn_ratelimited("Unexpected REMIT msg\n");
- m->advertised = ADV_IDLE + in_flight;
+ /* All messages preceding the REMIT have been read */
+ m->state = MBR_JOINED;
+ grp->active_cnt--;
+ m->advertised = ADV_IDLE;
/* Set oldest pending member to active and advertise */
if (list_empty(&grp->pending))
@@ -782,11 +850,10 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
void tipc_group_member_evt(struct tipc_group *grp,
bool *usr_wakeup,
int *sk_rcvbuf,
- struct sk_buff *skb,
+ struct tipc_msg *hdr,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
- struct tipc_msg *hdr = buf_msg(skb);
struct tipc_event *evt = (void *)msg_data(hdr);
u32 instance = evt->found_lower;
u32 node = evt->port.node;
@@ -794,89 +861,59 @@ void tipc_group_member_evt(struct tipc_group *grp,
int event = evt->event;
struct tipc_member *m;
struct net *net;
- bool node_up;
u32 self;
if (!grp)
- goto drop;
+ return;
net = grp->net;
self = tipc_own_addr(net);
if (!grp->loopback && node == self && port == grp->portid)
- goto drop;
-
- /* Convert message before delivery to user */
- msg_set_hdr_sz(hdr, GROUP_H_SIZE);
- msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
- msg_set_type(hdr, TIPC_GRP_MEMBER_EVT);
- msg_set_origport(hdr, port);
- msg_set_orignode(hdr, node);
- msg_set_nametype(hdr, grp->type);
- msg_set_grp_evt(hdr, event);
+ return;
m = tipc_group_find_member(grp, node, port);
- if (event == TIPC_PUBLISHED) {
- if (!m)
- m = tipc_group_create_member(grp, node, port,
- MBR_DISCOVERED);
- if (!m)
- goto drop;
-
- /* Hold back event if JOIN message not yet received */
- if (m->state == MBR_DISCOVERED) {
- m->event_msg = skb;
- m->state = MBR_PUBLISHED;
- } else {
- msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
- __skb_queue_tail(inputq, skb);
- m->state = MBR_JOINED;
- *usr_wakeup = true;
- m->usr_pending = false;
+ switch (event) {
+ case TIPC_PUBLISHED:
+ /* Send and wait for arrival of JOIN message if necessary */
+ if (!m) {
+ m = tipc_group_create_member(grp, node, port, instance,
+ MBR_PUBLISHED);
+ if (!m)
+ break;
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+ break;
}
+
+ if (m->state != MBR_JOINING)
+ break;
+
+ /* Member can be taken into service */
m->instance = instance;
- TIPC_SKB_CB(skb)->orig_member = m->instance;
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_update_member(m, 0);
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
- if (m->window < ADV_IDLE)
- tipc_group_update_member(m, 0);
- else
- list_del_init(&m->congested);
- } else if (event == TIPC_WITHDRAWN) {
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
+ break;
+ case TIPC_WITHDRAWN:
if (!m)
- goto drop;
-
- TIPC_SKB_CB(skb)->orig_member = m->instance;
+ break;
- *usr_wakeup = true;
- m->usr_pending = false;
- node_up = tipc_node_is_up(net, node);
- m->event_msg = NULL;
-
- if (node_up) {
- /* Hold back event if a LEAVE msg should be expected */
- if (m->state != MBR_LEAVING) {
- m->event_msg = skb;
- tipc_group_decr_active(grp, m);
- m->state = MBR_LEAVING;
- } else {
- msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
- __skb_queue_tail(inputq, skb);
- }
- } else {
- if (m->state != MBR_LEAVING) {
- tipc_group_decr_active(grp, m);
- m->state = MBR_LEAVING;
- msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
- } else {
- msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
- }
- __skb_queue_tail(inputq, skb);
- }
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
list_del_init(&m->list);
- list_del_init(&m->congested);
+ tipc_group_open(m, usr_wakeup);
+
+ /* Only send event if no LEAVE message can be expected */
+ if (!tipc_node_is_up(net, node))
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_rcv_nxt, inputq);
+ break;
+ default:
+ break;
}
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
- return;
-drop:
- kfree_skb(skb);
}
diff --git a/net/tipc/group.h b/net/tipc/group.h
index d525e1cd7de5..5996af6e9f1d 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -43,9 +43,12 @@ struct tipc_member;
struct tipc_msg;
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
- struct tipc_group_req *mreq);
+ struct tipc_group_req *mreq,
+ bool *group_is_open);
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
void tipc_group_delete(struct net *net, struct tipc_group *grp);
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port);
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance);
struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
int *scope);
@@ -54,7 +57,7 @@ void tipc_group_filter_msg(struct tipc_group *grp,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq);
void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup,
- int *sk_rcvbuf, struct sk_buff *skb,
+ int *sk_rcvbuf, struct tipc_msg *hdr,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq);
void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup,
@@ -69,5 +72,4 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
u32 port, struct sk_buff_head *xmitq);
u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
void tipc_group_update_member(struct tipc_member *m, int len);
-int tipc_group_size(struct tipc_group *grp);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6bce0b1117bd..2d6b2aed30e0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -483,7 +483,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
/**
* tipc_link_bc_create - create new link to be used for broadcast
* @n: pointer to associated node
- * @mtu: mtu to be used
+ * @mtu: mtu to be used initially if no peers
* @window: send window to be used
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b0d07b35909d..55d8ba92291d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -251,20 +251,23 @@ bool tipc_msg_validate(struct sk_buff **_skb)
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
*
+ * Note that the recursive call we are making here is safe, since it can
+ * logically go only one further level down.
+ *
* Returns message data size or errno: -ENOMEM, -EFAULT
*/
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
- int offset, int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
+ struct tipc_msg pkthdr;
int msz = mhsz + dsz;
- int pktno = 1;
- int pktsz;
int pktrem = pktmax;
- int drem = dsz;
- struct tipc_msg pkthdr;
struct sk_buff *skb;
+ int drem = dsz;
+ int pktno = 1;
char *pktpos;
+ int pktsz;
int rc;
msg_set_size(mhdr, msz);
@@ -272,8 +275,18 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
skb = tipc_buf_acquire(msz, GFP_KERNEL);
- if (unlikely(!skb))
+
+ /* Fall back to smaller MTU if node local message */
+ if (unlikely(!skb)) {
+ if (pktmax != MAX_MSG_SIZE)
+ return -ENOMEM;
+ rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
+ if (rc != dsz)
+ return rc;
+ if (tipc_msg_assemble(list))
+ return dsz;
return -ENOMEM;
+ }
skb_orphan(skb);
__skb_queue_tail(list, skb);
skb_copy_to_linear_data(skb, mhdr, mhsz);
@@ -589,6 +602,30 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
return true;
}
+/* tipc_msg_assemble() - assemble chain of fragments into one message
+ */
+bool tipc_msg_assemble(struct sk_buff_head *list)
+{
+ struct sk_buff *skb, *tmp = NULL;
+
+ if (skb_queue_len(list) == 1)
+ return true;
+
+ while ((skb = __skb_dequeue(list))) {
+ skb->next = NULL;
+ if (tipc_buf_append(&tmp, &skb)) {
+ __skb_queue_tail(list, skb);
+ return true;
+ }
+ if (!tmp)
+ break;
+ }
+ __skb_queue_purge(list);
+ __skb_queue_head_init(list);
+ pr_warn("Failed do assemble buffer\n");
+ return false;
+}
+
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message
*/
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 3e4384c222f7..b4ba1b4f9ae7 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -98,7 +98,7 @@ struct plist;
#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-
+#define FB_MTU 3744
#define TIPC_MEDIA_INFO_OFFSET 5
struct tipc_skb_cb {
@@ -943,6 +943,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
+bool tipc_msg_assemble(struct sk_buff_head *list);
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
struct sk_buff_head *cpy);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index b3829bcf63c7..ed0457cc99d6 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -328,7 +328,8 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net,
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
TIPC_PUBLISHED, publ->ref,
- publ->node, created_subseq);
+ publ->node, publ->scope,
+ created_subseq);
}
return publ;
}
@@ -398,19 +399,21 @@ found:
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
TIPC_WITHDRAWN, publ->ref,
- publ->node, removed_subseq);
+ publ->node, publ->scope,
+ removed_subseq);
}
return publ;
}
/**
- * tipc_nameseq_subscribe - attach a subscription, and issue
- * the prescribed number of events if there is any sub-
+ * tipc_nameseq_subscribe - attach a subscription, and optionally
+ * issue the prescribed number of events if there is any sub-
* sequence overlapping with the requested sequence
*/
static void tipc_nameseq_subscribe(struct name_seq *nseq,
- struct tipc_subscription *s)
+ struct tipc_subscription *s,
+ bool status)
{
struct sub_seq *sseq = nseq->sseqs;
struct tipc_name_seq ns;
@@ -420,7 +423,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
tipc_subscrp_get(s);
list_add(&s->nameseq_list, &nseq->subscriptions);
- if (!sseq)
+ if (!status || !sseq)
return;
while (sseq != &nseq->sseqs[nseq->first_free]) {
@@ -434,6 +437,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
sseq->upper,
TIPC_PUBLISHED,
crs->ref, crs->node,
+ crs->scope,
must_report);
must_report = 0;
}
@@ -597,7 +601,7 @@ not_found:
return ref;
}
-bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
+bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope,
struct list_head *dsts, int *dstcnt, u32 exclude,
bool all)
{
@@ -607,9 +611,6 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
struct name_seq *seq;
struct sub_seq *sseq;
- if (!tipc_in_scope(domain, self))
- return false;
-
*dstcnt = 0;
rcu_read_lock();
seq = nametbl_find_seq(net, type);
@@ -620,7 +621,7 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
if (likely(sseq)) {
info = sseq->info;
list_for_each_entry(publ, &info->zone_list, zone_list) {
- if (!tipc_in_scope(domain, publ->node))
+ if (publ->scope != scope)
continue;
if (publ->ref == exclude && publ->node == self)
continue;
@@ -638,13 +639,14 @@ exit:
return !list_empty(dsts);
}
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
- u32 limit, struct list_head *dports)
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+ u32 scope, bool exact, struct list_head *dports)
{
- struct name_seq *seq;
- struct sub_seq *sseq;
struct sub_seq *sseq_stop;
struct name_info *info;
+ struct publication *p;
+ struct name_seq *seq;
+ struct sub_seq *sseq;
int res = 0;
rcu_read_lock();
@@ -656,15 +658,12 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
sseq_stop = seq->sseqs + seq->first_free;
for (; sseq != sseq_stop; sseq++) {
- struct publication *publ;
-
if (sseq->lower > upper)
break;
-
info = sseq->info;
- list_for_each_entry(publ, &info->node_list, node_list) {
- if (publ->scope <= limit)
- tipc_dest_push(dports, 0, publ->ref);
+ list_for_each_entry(p, &info->node_list, node_list) {
+ if (p->scope == scope || (!exact && p->scope < scope))
+ tipc_dest_push(dports, 0, p->ref);
}
if (info->cluster_list_size != info->node_list_size)
@@ -681,8 +680,7 @@ exit:
* - Determines if any node local ports overlap
*/
void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
- u32 upper, u32 domain,
- struct tipc_nlist *nodes)
+ u32 upper, struct tipc_nlist *nodes)
{
struct sub_seq *sseq, *stop;
struct publication *publ;
@@ -700,8 +698,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
for (; sseq != stop && sseq->lower <= upper; sseq++) {
info = sseq->info;
list_for_each_entry(publ, &info->zone_list, zone_list) {
- if (tipc_in_scope(domain, publ->node))
- tipc_nlist_add(nodes, publ->node);
+ tipc_nlist_add(nodes, publ->node);
}
}
spin_unlock_bh(&seq->lock);
@@ -712,7 +709,7 @@ exit:
/* tipc_nametbl_build_group - build list of communication group members
*/
void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
- u32 type, u32 domain)
+ u32 type, u32 scope)
{
struct sub_seq *sseq, *stop;
struct name_info *info;
@@ -730,9 +727,9 @@ void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
for (; sseq != stop; sseq++) {
info = sseq->info;
list_for_each_entry(p, &info->zone_list, zone_list) {
- if (!tipc_in_scope(domain, p->node))
+ if (p->scope != scope)
continue;
- tipc_group_add_member(grp, p->node, p->ref);
+ tipc_group_add_member(grp, p->node, p->ref, p->lower);
}
}
spin_unlock_bh(&seq->lock);
@@ -811,7 +808,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
-void tipc_nametbl_subscribe(struct tipc_subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status)
{
struct tipc_net *tn = net_generic(s->net, tipc_net_id);
u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
@@ -825,7 +822,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
if (seq) {
spin_lock_bh(&seq->lock);
- tipc_nameseq_subscribe(seq, s);
+ tipc_nameseq_subscribe(seq, s, status);
spin_unlock_bh(&seq->lock);
} else {
tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 71926e429446..f56e7cb3d436 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,13 +100,12 @@ struct name_table {
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
- u32 limit, struct list_head *dports);
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+ u32 scope, bool exact, struct list_head *dports);
void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
u32 type, u32 domain);
void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
- u32 upper, u32 domain,
- struct tipc_nlist *nodes);
+ u32 upper, struct tipc_nlist *nodes);
bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
struct list_head *dsts, int *dstcnt, u32 exclude,
bool all);
@@ -121,7 +120,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
u32 lower, u32 node, u32 ref,
u32 key);
-void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status);
void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(struct net *net);
void tipc_nametbl_stop(struct net *net);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 507017fe0f1b..9036d8756e73 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
if (strcmp(name, tipc_bclink_name) == 0) {
err = tipc_nl_add_bc_link(net, &msg);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ if (err)
+ goto err_free;
} else {
int bearer_id;
struct tipc_node *node;
struct tipc_link *link;
node = tipc_node_find_by_name(net, name, &bearer_id);
- if (!node)
- return -EINVAL;
+ if (!node) {
+ err = -EINVAL;
+ goto err_free;
+ }
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
tipc_node_read_unlock(node);
- nlmsg_free(msg.skb);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
err = __tipc_nl_add_link(net, &msg, link, 0);
tipc_node_read_unlock(node);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ if (err)
+ goto err_free;
}
return genlmsg_reply(msg.skb, info);
+
+err_free:
+ nlmsg_free(msg.skb);
+ return err;
}
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/server.c b/net/tipc/server.c
index d60c30342327..df0c563c90cd 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -132,10 +132,11 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
- if (con && test_bit(CF_CONNECTED, &con->flags))
- conn_get(con);
- else
- con = NULL;
+ if (con) {
+ if (!test_bit(CF_CONNECTED, &con->flags) ||
+ !kref_get_unless_zero(&con->kref))
+ con = NULL;
+ }
spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -183,35 +184,28 @@ static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
write_unlock_bh(&sk->sk_callback_lock);
}
-static void tipc_unregister_callbacks(struct tipc_conn *con)
-{
- struct sock *sk = con->sock->sk;
-
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_user_data = NULL;
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
static void tipc_close_conn(struct tipc_conn *con)
{
struct tipc_server *s = con->server;
+ struct sock *sk = con->sock->sk;
+ bool disconnect = false;
- if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
- if (con->sock)
- tipc_unregister_callbacks(con);
-
+ write_lock_bh(&sk->sk_callback_lock);
+ disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
+ if (disconnect) {
+ sk->sk_user_data = NULL;
if (con->conid)
s->tipc_conn_release(con->conid, con->usr_data);
-
- /* We shouldn't flush pending works as we may be in the
- * thread. In fact the races with pending rx/tx work structs
- * are harmless for us here as we have already deleted this
- * connection from server connection list.
- */
- if (con->sock)
- kernel_sock_shutdown(con->sock, SHUT_RDWR);
- conn_put(con);
}
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ /* Handle concurrent calls from sending and receiving threads */
+ if (!disconnect)
+ return;
+
+ /* Don't flush pending works, -just let them expire */
+ kernel_sock_shutdown(con->sock, SHUT_RDWR);
+ conn_put(con);
}
static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
@@ -248,9 +242,10 @@ static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
static int tipc_receive_from_sock(struct tipc_conn *con)
{
- struct msghdr msg = {};
struct tipc_server *s = con->server;
+ struct sock *sk = con->sock->sk;
struct sockaddr_tipc addr;
+ struct msghdr msg = {};
struct kvec iov;
void *buf;
int ret;
@@ -264,19 +259,22 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
iov.iov_base = buf;
iov.iov_len = s->max_rcvbuf_size;
msg.msg_name = &addr;
- ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
- MSG_DONTWAIT);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
+ ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret <= 0) {
kmem_cache_free(s->rcvbuf_cache, buf);
goto out_close;
}
- s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
- con->usr_data, buf, ret);
-
+ read_lock_bh(&sk->sk_callback_lock);
+ if (test_bit(CF_CONNECTED, &con->flags))
+ ret = s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid,
+ &addr, con->usr_data, buf, ret);
+ read_unlock_bh(&sk->sk_callback_lock);
kmem_cache_free(s->rcvbuf_cache, buf);
-
- return 0;
+ if (ret < 0)
+ tipc_conn_terminate(s, con->conid);
+ return ret;
out_close:
if (ret != -EWOULDBLOCK)
@@ -489,8 +487,8 @@ void tipc_conn_terminate(struct tipc_server *s, int conid)
}
}
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
- u32 lower, u32 upper, int *conid)
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid)
{
struct tipc_subscriber *scbr;
struct tipc_subscr sub;
@@ -501,7 +499,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
sub.seq.lower = lower;
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
- sub.filter = TIPC_SUB_PORTS;
+ sub.filter = filter;
*(u32 *)&sub.usr_handle = port;
con = tipc_alloc_conn(tipc_topsrv(net));
@@ -525,11 +523,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
{
struct tipc_conn *con;
+ struct tipc_server *srv;
con = tipc_conn_lookup(tipc_topsrv(net), conid);
if (!con)
return;
- tipc_close_conn(con);
+
+ test_and_clear_bit(CF_CONNECTED, &con->flags);
+ srv = con->server;
+ if (con->conid)
+ srv->tipc_conn_release(con->conid, con->usr_data);
+ conn_put(con);
conn_put(con);
}
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 2113c9192633..64df7513cd70 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -41,6 +41,9 @@
#include <net/net_namespace.h>
#define TIPC_SERVER_NAME_LEN 32
+#define TIPC_SUB_CLUSTER_SCOPE 0x20
+#define TIPC_SUB_NODE_SCOPE 0x40
+#define TIPC_SUB_NO_STATUS 0x80
/**
* struct tipc_server - TIPC server structure
@@ -71,9 +74,9 @@ struct tipc_server {
int max_rcvbuf_size;
void *(*tipc_conn_new)(int conid);
void (*tipc_conn_release)(int conid, void *usr_data);
- void (*tipc_conn_recvmsg)(struct net *net, int conid,
- struct sockaddr_tipc *addr, void *usr_data,
- void *buf, size_t len);
+ int (*tipc_conn_recvmsg)(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len);
struct sockaddr_tipc *saddr;
char name[TIPC_SERVER_NAME_LEN];
int imp;
@@ -83,8 +86,8 @@ struct tipc_server {
int tipc_conn_sendmsg(struct tipc_server *s, int conid,
struct sockaddr_tipc *addr, void *data, size_t len);
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
- u32 lower, u32 upper, int *conid);
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid);
void tipc_topsrv_kern_unsubscr(struct net *net, int conid);
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b4084480377..163f3a547501 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -116,6 +116,7 @@ struct tipc_sock {
struct tipc_mc_method mc_method;
struct rcu_head rcu;
struct tipc_group *group;
+ bool group_is_open;
};
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -710,13 +711,12 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static unsigned int tipc_poll(struct file *file, struct socket *sock,
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_group *grp = tsk->group;
- u32 revents = 0;
+ __poll_t revents = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
@@ -736,9 +736,8 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
revents |= POLLIN | POLLRDNORM;
break;
case TIPC_OPEN:
- if (!grp || tipc_group_size(grp))
- if (!tsk->cong_link_cnt)
- revents |= POLLOUT;
+ if (tsk->group_is_open && !tsk->cong_link_cnt)
+ revents |= POLLOUT;
if (!tipc_sk_type_connectionless(sk))
break;
if (skb_queue_empty(&sk->sk_receive_queue))
@@ -772,7 +771,6 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct net *net = sock_net(sk);
int mtu = tipc_bcast_get_mtu(net);
struct tipc_mc_method *method = &tsk->mc_method;
- u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
struct sk_buff_head pkts;
struct tipc_nlist dsts;
int rc;
@@ -788,7 +786,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
/* Lookup destination nodes */
tipc_nlist_init(&dsts, tipc_own_addr(net));
tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
- seq->upper, domain, &dsts);
+ seq->upper, &dsts);
if (!dsts.local && !dsts.remote)
return -EHOSTUNREACH;
@@ -928,21 +926,22 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
struct list_head *cong_links = &tsk->cong_links;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
struct tipc_member *first = NULL;
struct tipc_member *mbr = NULL;
struct net *net = sock_net(sk);
u32 node, port, exclude;
- u32 type, inst, domain;
struct list_head dsts;
+ u32 type, inst, scope;
int lookups = 0;
int dstcnt, rc;
bool cong;
INIT_LIST_HEAD(&dsts);
- type = dest->addr.name.name.type;
+ type = msg_nametype(hdr);
inst = dest->addr.name.name.instance;
- domain = addr_domain(net, dest->scope);
+ scope = msg_lookup_scope(hdr);
exclude = tipc_group_exclude(grp);
while (++lookups < 4) {
@@ -950,7 +949,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
/* Look for a non-congested destination member, if any */
while (1) {
- if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts,
+ if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
&dstcnt, exclude, false))
return -EHOSTUNREACH;
tipc_dest_pop(&dsts, &node, &port);
@@ -1079,22 +1078,23 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
- struct tipc_name_seq *seq = &dest->addr.nameseq;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
- u32 domain, exclude, dstcnt;
+ u32 type, inst, scope, exclude;
struct list_head dsts;
+ u32 dstcnt;
INIT_LIST_HEAD(&dsts);
- if (seq->lower != seq->upper)
- return -ENOTSUPP;
-
- domain = addr_domain(net, dest->scope);
+ type = msg_nametype(hdr);
+ inst = dest->addr.name.name.instance;
+ scope = msg_lookup_scope(hdr);
exclude = tipc_group_exclude(grp);
- if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain,
- &dsts, &dstcnt, exclude, true))
+
+ if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
+ &dstcnt, exclude, true))
return -EHOSTUNREACH;
if (dstcnt == 1) {
@@ -1116,24 +1116,29 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq)
{
- u32 scope = TIPC_CLUSTER_SCOPE;
u32 self = tipc_own_addr(net);
+ u32 type, lower, upper, scope;
struct sk_buff *skb, *_skb;
- u32 lower = 0, upper = ~0;
- struct sk_buff_head tmpq;
u32 portid, oport, onode;
+ struct sk_buff_head tmpq;
struct list_head dports;
- struct tipc_msg *msg;
- int user, mtyp, hsz;
+ struct tipc_msg *hdr;
+ int user, mtyp, hlen;
+ bool exact;
__skb_queue_head_init(&tmpq);
INIT_LIST_HEAD(&dports);
skb = tipc_skb_peek(arrvq, &inputq->lock);
for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
- msg = buf_msg(skb);
- user = msg_user(msg);
- mtyp = msg_type(msg);
+ hdr = buf_msg(skb);
+ user = msg_user(hdr);
+ mtyp = msg_type(hdr);
+ hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
+ oport = msg_origport(hdr);
+ onode = msg_orignode(hdr);
+ type = msg_nametype(hdr);
+
if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
@@ -1144,21 +1149,31 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
spin_unlock_bh(&inputq->lock);
continue;
}
- hsz = skb_headroom(skb) + msg_hdr_sz(msg);
- oport = msg_origport(msg);
- onode = msg_orignode(msg);
- if (onode == self)
- scope = TIPC_NODE_SCOPE;
-
- /* Create destination port list and message clones: */
- if (!msg_in_group(msg)) {
- lower = msg_namelower(msg);
- upper = msg_nameupper(msg);
+
+ /* Group messages require exact scope match */
+ if (msg_in_group(hdr)) {
+ lower = 0;
+ upper = ~0;
+ scope = msg_lookup_scope(hdr);
+ exact = true;
+ } else {
+ /* TIPC_NODE_SCOPE means "any scope" in this context */
+ if (onode == self)
+ scope = TIPC_NODE_SCOPE;
+ else
+ scope = TIPC_CLUSTER_SCOPE;
+ exact = false;
+ lower = msg_namelower(hdr);
+ upper = msg_nameupper(hdr);
}
- tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper,
- scope, &dports);
+
+ /* Create destination port list: */
+ tipc_nametbl_mc_lookup(net, type, lower, upper,
+ scope, exact, &dports);
+
+ /* Clone message per destination */
while (tipc_dest_pop(&dports, NULL, &portid)) {
- _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+ _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
if (_skb) {
msg_set_destport(buf_msg(_skb), portid);
__skb_queue_tail(&tmpq, _skb);
@@ -1933,8 +1948,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
break;
case TOP_SRV:
tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
- skb, inputq, xmitq);
- skb = NULL;
+ hdr, inputq, xmitq);
break;
default:
break;
@@ -2640,9 +2654,7 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_enter(&tn->sk_rht, &iter);
do {
- tsk = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(tsk))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2651,7 +2663,7 @@ void tipc_sk_reinit(struct net *net)
msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock);
}
-walk_stop:
+
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
}
@@ -2734,7 +2746,6 @@ void tipc_sk_rht_destroy(struct net *net)
static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
{
struct net *net = sock_net(&tsk->sk);
- u32 domain = addr_domain(net, mreq->scope);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_name_seq seq;
@@ -2742,9 +2753,11 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
if (mreq->type < TIPC_RESERVED_TYPES)
return -EACCES;
+ if (mreq->scope > TIPC_NODE_SCOPE)
+ return -EINVAL;
if (grp)
return -EACCES;
- grp = tipc_group_create(net, tsk->portid, mreq);
+ grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
if (!grp)
return -ENOMEM;
tsk->group = grp;
@@ -2754,16 +2767,17 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
seq.type = mreq->type;
seq.lower = mreq->instance;
seq.upper = seq.lower;
- tipc_nametbl_build_group(net, grp, mreq->type, domain);
+ tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
rc = tipc_sk_publish(tsk, mreq->scope, &seq);
if (rc) {
tipc_group_delete(net, grp);
tsk->group = NULL;
+ return rc;
}
-
- /* Eliminate any risk that a broadcast overtakes the sent JOIN */
+ /* Eliminate any risk that a broadcast overtakes sent JOINs */
tsk->mc_method.rcast = true;
tsk->mc_method.mandatory = true;
+ tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
return rc;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 251065dfd8df..68e26470c516 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -118,15 +118,19 @@ void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper, u32 event, u32 port_ref,
- u32 node, int must)
+ u32 node, u32 scope, int must)
{
+ u32 filter = htohl(sub->evt.s.filter, sub->swap);
struct tipc_name_seq seq;
tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
return;
- if (!must &&
- !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
+ if (!must && !(filter & TIPC_SUB_PORTS))
+ return;
+ if (filter & TIPC_SUB_CLUSTER_SCOPE && scope == TIPC_NODE_SCOPE)
+ return;
+ if (filter & TIPC_SUB_NODE_SCOPE && scope != TIPC_NODE_SCOPE)
return;
tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@ -285,21 +289,21 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
return sub;
}
-static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
- struct tipc_subscriber *subscriber, int swap)
+static int tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber, int swap,
+ bool status)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_subscription *sub = NULL;
u32 timeout;
sub = tipc_subscrp_create(net, s, swap);
if (!sub)
- return tipc_conn_terminate(tn->topsrv, subscriber->conid);
+ return -1;
spin_lock_bh(&subscriber->lock);
list_add(&sub->subscrp_list, &subscriber->subscrp_list);
sub->subscriber = subscriber;
- tipc_nametbl_subscribe(sub);
+ tipc_nametbl_subscribe(sub, status);
tipc_subscrb_get(subscriber);
spin_unlock_bh(&subscriber->lock);
@@ -308,6 +312,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
if (timeout != TIPC_WAIT_FOREVER)
mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ return 0;
}
/* Handle one termination request for the subscriber */
@@ -317,12 +322,13 @@ static void tipc_subscrb_release_cb(int conid, void *usr_data)
}
/* Handle one request to create a new subscription for the subscriber */
-static void tipc_subscrb_rcv_cb(struct net *net, int conid,
- struct sockaddr_tipc *addr, void *usr_data,
- void *buf, size_t len)
+static int tipc_subscrb_rcv_cb(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
struct tipc_subscr *s = (struct tipc_subscr *)buf;
+ bool status;
int swap;
/* Determine subscriber's endianness */
@@ -332,10 +338,11 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
/* Detect & process a subscription cancellation request */
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
- return tipc_subscrp_cancel(s, subscriber);
+ tipc_subscrp_cancel(s, subscriber);
+ return 0;
}
-
- tipc_subscrp_subscribe(net, s, subscriber, swap);
+ status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
+ return tipc_subscrp_subscribe(net, s, subscriber, swap, status);
}
/* Handle one request to establish a new subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ee52957dc952..f3edca775d9f 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -71,7 +71,7 @@ int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
u32 found_upper);
void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
u32 found_lower, u32 found_upper, u32 event,
- u32 port_ref, u32 node, int must);
+ u32 port_ref, u32 node, u32 scope, int must);
void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
struct tipc_name_seq *out);
u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e07ee3ae0023..736719c8314e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -367,8 +367,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
crypto_info = &ctx->crypto_send;
/* Currently we don't support set crypto info more than one time */
- if (TLS_CRYPTO_INFO_READY(crypto_info))
+ if (TLS_CRYPTO_INFO_READY(crypto_info)) {
+ rc = -EBUSY;
goto out;
+ }
rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
if (rc) {
@@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
case TLS_CIPHER_AES_GCM_128: {
if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
rc = -EINVAL;
- goto out;
+ goto err_crypto_info;
}
rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
optlen - sizeof(*crypto_info));
@@ -398,7 +400,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
}
default:
rc = -EINVAL;
- goto out;
+ goto err_crypto_info;
}
/* currently SW is default, we will have ethtool in future */
@@ -454,6 +456,15 @@ static int tls_init(struct sock *sk)
struct tls_context *ctx;
int rc = 0;
+ /* The TLS ulp is currently supported only for TCP sockets
+ * in ESTABLISHED state.
+ * Supporting sockets in LISTEN state will require us
+ * to modify the accept implementation to clone rather then
+ * share the ulp context.
+ */
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTSUPP;
+
/* allocate tls context */
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 73d19210dd49..f26376e954ae 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -214,7 +214,11 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
data_len, tls_ctx->iv);
- rc = crypto_aead_encrypt(aead_req);
+
+ aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->async_wait);
+
+ rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
@@ -391,7 +395,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
while (msg_data_left(msg)) {
if (sk->sk_err) {
- ret = sk->sk_err;
+ ret = -sk->sk_err;
goto send_end;
}
@@ -544,7 +548,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
size_t copy, required_size;
if (sk->sk_err) {
- ret = sk->sk_err;
+ ret = -sk->sk_err;
goto sendpage_end;
}
@@ -577,6 +581,8 @@ alloc_payload:
get_page(page);
sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
sg_set_page(sg, page, copy, offset);
+ sg_unmark_end(sg);
+
ctx->sg_plaintext_num_elem++;
sk_mem_charge(sk, copy);
@@ -663,6 +669,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
goto out;
}
+ crypto_init_wait(&sw_ctx->async_wait);
+
ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
crypto_info = &ctx->crypto_send;
@@ -681,18 +689,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
}
default:
rc = -EINVAL;
- goto out;
+ goto free_priv;
}
ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
ctx->tag_size = tag_size;
ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
ctx->iv_size = iv_size;
- ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- GFP_KERNEL);
+ ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
if (!ctx->iv) {
rc = -ENOMEM;
- goto out;
+ goto free_priv;
}
memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -740,7 +747,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
if (!rc)
- goto out;
+ return 0;
free_aead:
crypto_free_aead(sw_ctx->aead_send);
@@ -751,6 +758,9 @@ free_rec_seq:
free_iv:
kfree(ctx->iv);
ctx->iv = NULL;
+free_priv:
+ kfree(ctx->priv_ctx);
+ ctx->priv_ctx = NULL;
out:
return rc;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a9ee634f3c42..0214acbd6bff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -367,7 +367,7 @@ static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int
/* relaying can only happen while the wq still exists */
u_sleep = sk_sleep(&u->sk);
if (u_sleep)
- wake_up_interruptible_poll(u_sleep, key);
+ wake_up_interruptible_poll(u_sleep, key_to_poll(key));
return 0;
}
@@ -638,8 +638,8 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
-static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
-static unsigned int unix_dgram_poll(struct file *, struct socket *,
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
@@ -2640,10 +2640,10 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return err;
}
-static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
@@ -2675,11 +2675,12 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
return mask;
}
-static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk, *other;
- unsigned int mask, writable;
+ unsigned int writable;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
@@ -2869,7 +2870,6 @@ static int unix_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations unix_seq_fops = {
- .owner = THIS_MODULE,
.open = unix_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5d28abf87fbf..9d95e773f4c8 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -850,11 +850,11 @@ static int vsock_shutdown(struct socket *sock, int mode)
return err;
}
-static unsigned int vsock_poll(struct file *file, struct socket *sock,
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk;
- unsigned int mask;
+ __poll_t mask;
struct vsock_sock *vsk;
sk = sock->sk;
@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
* POLLOUT|POLLWRNORM when peer is closed and nothing to read,
* but local send is not shutdown.
*/
- if (sk->sk_state == TCP_CLOSE) {
+ if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
mask |= POLLOUT | POLLWRNORM;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fdde0d98fde1..a6f3cac8c640 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
if (rv)
goto use_default_name;
} else {
+ int rv;
+
use_default_name:
/* NOTE: This is *probably* safe w/out holding rtnl because of
* the restrictions on phy names. Probably this call could
@@ -446,7 +448,11 @@ use_default_name:
* phyX. But, might should add some locking and check return
* value, and use a different name if this one exists?
*/
- dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+ rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+ if (rv < 0) {
+ kfree(rdev);
+ return NULL;
+ }
}
INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d2f7e8b8a097..eaff636169c2 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
-
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 413d4f4e6334..a1d10993d08a 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -126,6 +126,11 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
wdev->ibss_fixed = params->channel_fixed;
wdev->ibss_dfs_possible = params->userspace_handles_dfs;
wdev->chandef = params->chandef;
+ if (connkeys) {
+ params->wep_keys = connkeys->params;
+ params->wep_tx_key = connkeys->def;
+ }
+
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.chandef = params->chandef;
#endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index e7c64a8dce54..bbb9907bfa86 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -692,7 +692,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return rdev_mgmt_tx(rdev, wdev, params, cookie);
}
-bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
const u8 *buf, size_t len, u32 flags)
{
struct wiphy *wiphy = wdev->wiphy;
@@ -708,7 +708,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
u16 stype;
- trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
+ trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm);
stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
if (!(stypes->rx & BIT(stype))) {
@@ -735,7 +735,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
/* Indicate the received Action frame to user space */
if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
- freq, sig_mbm,
+ freq, sig_dbm,
buf, len, flags, GFP_ATOMIC))
continue;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 213d0c498c97..ab0c687d0c44 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -734,11 +734,12 @@ struct key_parse {
bool def_uni, def_multi;
};
-static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
+ struct key_parse *k)
{
struct nlattr *tb[NL80211_KEY_MAX + 1];
int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
- nl80211_key_policy, NULL);
+ nl80211_key_policy, info->extack);
if (err)
return err;
@@ -771,7 +772,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
if (tb[NL80211_KEY_TYPE]) {
k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
- return -EINVAL;
+ return genl_err_attr(info, -EINVAL,
+ tb[NL80211_KEY_TYPE]);
}
if (tb[NL80211_KEY_DEFAULT_TYPES]) {
@@ -779,7 +781,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
tb[NL80211_KEY_DEFAULT_TYPES],
- nl80211_key_default_policy, NULL);
+ nl80211_key_default_policy,
+ info->extack);
if (err)
return err;
@@ -820,8 +823,10 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
+ if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
+ GENL_SET_ERR_MSG(info, "key type out of range");
return -EINVAL;
+ }
}
if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
@@ -850,31 +855,42 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
k->type = -1;
if (info->attrs[NL80211_ATTR_KEY])
- err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+ err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k);
else
err = nl80211_parse_key_old(info, k);
if (err)
return err;
- if (k->def && k->defmgmt)
+ if (k->def && k->defmgmt) {
+ GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
return -EINVAL;
+ }
if (k->defmgmt) {
- if (k->def_uni || !k->def_multi)
+ if (k->def_uni || !k->def_multi) {
+ GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
return -EINVAL;
+ }
}
if (k->idx != -1) {
if (k->defmgmt) {
- if (k->idx < 4 || k->idx > 5)
+ if (k->idx < 4 || k->idx > 5) {
+ GENL_SET_ERR_MSG(info,
+ "defmgmt key idx not 4 or 5");
return -EINVAL;
+ }
} else if (k->def) {
- if (k->idx < 0 || k->idx > 3)
+ if (k->idx < 0 || k->idx > 3) {
+ GENL_SET_ERR_MSG(info, "def key idx not 0-3");
return -EINVAL;
+ }
} else {
- if (k->idx < 0 || k->idx > 5)
+ if (k->idx < 0 || k->idx > 5) {
+ GENL_SET_ERR_MSG(info, "key idx not 0-5");
return -EINVAL;
+ }
}
}
@@ -883,8 +899,9 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
static struct cfg80211_cached_keys *
nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
- struct nlattr *keys, bool *no_ht)
+ struct genl_info *info, bool *no_ht)
{
+ struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS];
struct key_parse parse;
struct nlattr *key;
struct cfg80211_cached_keys *result;
@@ -909,17 +926,22 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
memset(&parse, 0, sizeof(parse));
parse.idx = -1;
- err = nl80211_parse_key_new(key, &parse);
+ err = nl80211_parse_key_new(info, key, &parse);
if (err)
goto error;
err = -EINVAL;
if (!parse.p.key)
goto error;
- if (parse.idx < 0 || parse.idx > 3)
+ if (parse.idx < 0 || parse.idx > 3) {
+ GENL_SET_ERR_MSG(info, "key index out of range [0-3]");
goto error;
+ }
if (parse.def) {
- if (def)
+ if (def) {
+ GENL_SET_ERR_MSG(info,
+ "only one key can be default");
goto error;
+ }
def = 1;
result->def = parse.idx;
if (!parse.def_uni || !parse.def_multi)
@@ -932,6 +954,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
goto error;
if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+ GENL_SET_ERR_MSG(info, "connect key must be WEP");
err = -EINVAL;
goto error;
}
@@ -947,6 +970,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
if (result->def < 0) {
err = -EINVAL;
+ GENL_SET_ERR_MSG(info, "need a default/TX key");
goto error;
}
@@ -2618,12 +2642,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
const u8 *ssid_ie;
if (!wdev->current_bss)
break;
+ rcu_read_lock();
ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
WLAN_EID_SSID);
- if (!ssid_ie)
- break;
- if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
- goto nla_put_failure_locked;
+ if (ssid_ie &&
+ nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+ goto nla_put_failure_rcu_locked;
+ rcu_read_unlock();
break;
}
default:
@@ -2635,6 +2660,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
genlmsg_end(msg, hdr);
return 0;
+ nla_put_failure_rcu_locked:
+ rcu_read_unlock();
nla_put_failure_locked:
wdev_unlock(wdev);
nla_put_failure:
@@ -7817,6 +7844,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
intbss->ts_boottime, NL80211_BSS_PAD))
goto nla_put_failure;
+ if (!nl80211_put_signal(msg, intbss->pub.chains,
+ intbss->pub.chain_signal,
+ NL80211_BSS_CHAIN_SIGNAL))
+ goto nla_put_failure;
+
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8613,9 +8645,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
bool no_ht = false;
- connkeys = nl80211_parse_connkeys(rdev,
- info->attrs[NL80211_ATTR_KEYS],
- &no_ht);
+ connkeys = nl80211_parse_connkeys(rdev, info, &no_ht);
if (IS_ERR(connkeys))
return PTR_ERR(connkeys);
@@ -9019,8 +9049,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
}
if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
- connkeys = nl80211_parse_connkeys(rdev,
- info->attrs[NL80211_ATTR_KEYS], NULL);
+ connkeys = nl80211_parse_connkeys(rdev, info, NULL);
if (IS_ERR(connkeys))
return PTR_ERR(connkeys);
}
@@ -9806,7 +9835,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
*/
if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
rdev->ops->get_station) {
- struct station_info sinfo;
+ struct station_info sinfo = {};
u8 *mac_addr;
mac_addr = wdev->current_bss->pub.bssid;
@@ -11361,7 +11390,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
break;
case NL80211_NAN_FUNC_FOLLOW_UP:
if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
- !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
+ !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
+ !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
err = -EINVAL;
goto out;
}
@@ -13944,7 +13974,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- (from_ap && reason &&
+ (reason &&
nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
(from_ap &&
nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 78e71b0390be..7b42f0bacfd8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
return;
- chan_before.center_freq = chan->center_freq;
- chan_before.flags = chan->flags;
+ chan_before = *chan;
if (chan->flags & IEEE80211_CHAN_NO_IR) {
chan->flags &= ~IEEE80211_CHAN_NO_IR;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index f6c5fe482506..d36c3eb7b931 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
found->ts = tmp->ts;
found->ts_boottime = tmp->ts_boottime;
found->parent_tsf = tmp->parent_tsf;
+ found->pub.chains = tmp->pub.chains;
+ memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+ IEEE80211_MAX_CHAINS);
ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
} else {
struct cfg80211_internal_bss *new;
@@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
tmp.ts_boottime = data->boottime_ns;
tmp.parent_tsf = data->parent_tsf;
+ tmp.pub.chains = data->chains;
+ memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index f3353fe5b35b..bcfedd39e7a3 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2544,20 +2544,20 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
);
TRACE_EVENT(cfg80211_rx_mgmt,
- TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
- TP_ARGS(wdev, freq, sig_mbm),
+ TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm),
+ TP_ARGS(wdev, freq, sig_dbm),
TP_STRUCT__entry(
WDEV_ENTRY
__field(int, freq)
- __field(int, sig_mbm)
+ __field(int, sig_dbm)
),
TP_fast_assign(
WDEV_ASSIGN;
__entry->freq = freq;
- __entry->sig_mbm = sig_mbm;
+ __entry->sig_dbm = sig_dbm;
),
- TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
- WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
+ TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d",
+ WDEV_PR_ARG, __entry->freq, __entry->sig_dbm)
);
TRACE_EVENT(cfg80211_mgmt_tx_status,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 7ca04a7de85a..05186a47878f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1254,8 +1254,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- /* we are under RTNL - globally locked - so can use a static struct */
- static struct station_info sinfo;
+ struct station_info sinfo = {};
u8 addr[ETH_ALEN];
int err;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6cdb054484d6..9efbfc753347 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -1035,18 +1035,23 @@ static int ioctl_standard_call(struct net_device * dev,
}
-int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
- void __user *arg)
+int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct iw_request_info info = { .cmd = cmd, .flags = 0 };
+ struct iwreq iwr;
int ret;
- ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
+ if (copy_from_user(&iwr, arg, sizeof(iwr)))
+ return -EFAULT;
+
+ iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
+
+ ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
ioctl_standard_call,
ioctl_private_call);
if (ret >= 0 &&
IW_IS_GET(cmd) &&
- copy_to_user(arg, iwr, sizeof(struct iwreq)))
+ copy_to_user(arg, &iwr, sizeof(struct iwreq)))
return -EFAULT;
return ret;
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index e98a01c1034f..5511f989ef47 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -133,7 +133,6 @@ static int seq_open_wireless(struct inode *inode, struct file *file)
}
static const struct file_operations wireless_seq_fops = {
- .owner = THIS_MODULE,
.open = seq_open_wireless,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 30e5746085b8..8e70291e586a 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,32 +23,114 @@
#include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
int err;
+ unsigned long flags;
struct xfrm_state *x;
+ struct sk_buff *skb2;
+ struct softnet_data *sd;
+ netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
- if (skb_is_gso(skb))
- return 0;
+ if (!xo)
+ return skb;
- if (xo) {
- x = skb->sp->xvec[skb->sp->len - 1];
- if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
- return 0;
+ if (!(features & NETIF_F_HW_ESP))
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+ x = skb->sp->xvec[skb->sp->len - 1];
+ if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ return skb;
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ err = !skb_queue_empty(&sd->xfrm_backlog);
+ local_irq_restore(flags);
+
+ if (err) {
+ *again = true;
+ return skb;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct net_device *dev = skb->dev;
+
+ if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+ struct sk_buff *segs;
+
+ /* Packet got rerouted, fixup features and segment it. */
+ esp_features = esp_features & ~(NETIF_F_HW_ESP
+ | NETIF_F_GSO_ESP);
+
+ segs = skb_gso_segment(skb, esp_features);
+ if (IS_ERR(segs)) {
+ kfree_skb(skb);
+ atomic_long_inc(&dev->tx_dropped);
+ return NULL;
+ } else {
+ consume_skb(skb);
+ skb = segs;
+ }
+ }
+ }
+
+ if (!skb->next) {
x->outer_mode->xmit(x, skb);
- err = x->type_offload->xmit(x, skb, features);
+ xo->flags |= XFRM_DEV_RESUME;
+
+ err = x->type_offload->xmit(x, skb, esp_features);
if (err) {
+ if (err == -EINPROGRESS)
+ return NULL;
+
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
- return err;
+ kfree_skb(skb);
+ return NULL;
}
skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return skb;
}
- return 0;
+ skb2 = skb;
+
+ do {
+ struct sk_buff *nskb = skb2->next;
+ skb2->next = NULL;
+
+ xo = xfrm_offload(skb2);
+ xo->flags |= XFRM_DEV_RESUME;
+
+ x->outer_mode->xmit(x, skb2);
+
+ err = x->type_offload->xmit(x, skb2, esp_features);
+ if (!err) {
+ skb2->next = nskb;
+ } else if (err != -EINPROGRESS) {
+ XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+ skb2->next = nskb;
+ kfree_skb_list(skb2);
+ return NULL;
+ } else {
+ if (skb == skb2)
+ skb = nskb;
+
+ if (!skb)
+ return NULL;
+
+ goto skip_push;
+ }
+
+ skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+ skb2 = nskb;
+ } while (skb2);
+
+ return skb;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
@@ -65,9 +147,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
if (!x->type_offload)
return -EINVAL;
- /* We don't yet support UDP encapsulation, TFC padding and ESN. */
- if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
- return 0;
+ /* We don't yet support UDP encapsulation and TFC padding. */
+ if (x->encap || x->tfcpad)
+ return -EINVAL;
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
@@ -96,12 +178,20 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
return 0;
}
+ if (x->props.flags & XFRM_STATE_ESN &&
+ !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ xso->dev = NULL;
+ dev_put(dev);
+ return -EINVAL;
+ }
+
xso->dev = dev;
xso->num_exthdrs = 1;
xso->flags = xuo->flags;
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
+ xso->dev = NULL;
dev_put(dev);
return err;
}
@@ -120,8 +210,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (!x->type_offload || x->encap)
return false;
- if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
- !dst->child->xfrm && x->type->get_mtu) {
+ if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+ (!xdst->child->xfrm && x->type->get_mtu)) {
mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
@@ -140,19 +230,82 @@ ok:
return true;
}
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ int ret = NETDEV_TX_BUSY;
+ struct netdev_queue *txq;
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ rcu_read_lock();
+ txq = netdev_pick_tx(dev, skb, NULL);
+
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_xmit_frozen_or_stopped(txq))
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ HARD_TX_UNLOCK(dev, txq);
+
+ if (!dev_xmit_complete(ret)) {
+ local_irq_save(flags);
+ sd = this_cpu_ptr(&softnet_data);
+ skb_queue_tail(&sd->xfrm_backlog, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+ struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(xfrm_backlog))
+ return;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock(&xfrm_backlog->lock);
+ skb_queue_splice_init(xfrm_backlog, &list);
+ spin_unlock(&xfrm_backlog->lock);
+
+ while (!skb_queue_empty(&list)) {
+ skb = __skb_dequeue(&list);
+ xfrm_dev_resume(skb);
+ }
+
+}
#endif
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
{
- if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
- return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
+ if ((dev->features & NETIF_F_HW_ESP) &&
+ (!(dev->xfrmdev_ops &&
+ dev->xfrmdev_ops->xdo_dev_state_add &&
+ dev->xfrmdev_ops->xdo_dev_state_delete)))
+ return NOTIFY_BAD;
+#else
+ if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+ return NOTIFY_BAD;
+#endif
+
return NOTIFY_DONE;
}
+static int xfrm_dev_register(struct net_device *dev)
+{
+ return xfrm_api_check(dev);
+}
+
static int xfrm_dev_unregister(struct net_device *dev)
{
xfrm_policy_cache_flush();
@@ -161,16 +314,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
static int xfrm_dev_feat_change(struct net_device *dev)
{
- if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
- return NOTIFY_BAD;
- else if (!(dev->features & NETIF_F_HW_ESP))
- dev->xfrmdev_ops = NULL;
-
- if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
- !(dev->features & NETIF_F_HW_ESP))
- return NOTIFY_BAD;
-
- return NOTIFY_DONE;
+ return xfrm_api_check(dev);
}
static int xfrm_dev_down(struct net_device *dev)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 3f6f6f8c9fa5..1472c0857975 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -257,7 +257,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (xo && (xo->flags & CRYPTO_DONE)) {
crypto_done = true;
- x = xfrm_input_state(skb);
family = XFRM_SPI_SKB_CB(skb)->family;
if (!(xo->status & CRYPTO_SUCCESS)) {
@@ -518,7 +517,7 @@ int xfrm_trans_queue(struct sk_buff *skb,
return -ENOBUFS;
XFRM_TRANS_SKB_CB(skb)->finish = finish;
- skb_queue_tail(&trans->queue, skb);
+ __skb_queue_tail(&trans->queue, skb);
tasklet_schedule(&trans->tasklet);
return 0;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 73ad8c8ef344..23468672a767 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -44,7 +44,7 @@ static int xfrm_skb_check_space(struct sk_buff *skb)
static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{
- struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+ struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
skb_dst_drop(skb);
return child;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 70aa5cb0c659..7a23078132cf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -54,7 +54,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
static struct kmem_cache *xfrm_dst_cache __read_mostly;
static __read_mostly seqcount_t xfrm_policy_hash_generation;
-static void xfrm_init_pmtu(struct dst_entry *dst);
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static void xfrm_policy_queue_process(struct timer_list *t);
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
- if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+ if (policy->walk.dead ||
+ xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
}
if (!cnt)
err = -ESRCH;
- else
- xfrm_policy_cache_flush();
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
@@ -1257,7 +1256,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
- struct net *net = xp_net(pol);
+ struct net *net = sock_net(sk);
struct xfrm_policy *old_pol;
#ifdef CONFIG_XFRM_SUB_POLICY
@@ -1544,7 +1543,9 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
*/
static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
- struct xfrm_state **xfrm, int nx,
+ struct xfrm_state **xfrm,
+ struct xfrm_dst **bundle,
+ int nx,
const struct flowi *fl,
struct dst_entry *dst)
{
@@ -1552,8 +1553,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
unsigned long now = jiffies;
struct net_device *dev;
struct xfrm_mode *inner_mode;
- struct dst_entry *dst_prev = NULL;
- struct dst_entry *dst0 = NULL;
+ struct xfrm_dst *xdst_prev = NULL;
+ struct xfrm_dst *xdst0 = NULL;
int i = 0;
int err;
int header_len = 0;
@@ -1579,13 +1580,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
- if (!dst_prev)
- dst0 = dst1;
+ bundle[i] = xdst;
+ if (!xdst_prev)
+ xdst0 = xdst;
else
/* Ref count is taken during xfrm_alloc_dst()
* No need to do dst_clone() on dst1
*/
- dst_prev->child = dst1;
+ xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
if (xfrm[i]->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(xfrm[i],
@@ -1622,8 +1624,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst1->input = dst_discard;
dst1->output = inner_mode->afinfo->output;
- dst1->next = dst_prev;
- dst_prev = dst1;
+ xdst_prev = xdst;
header_len += xfrm[i]->props.header_len;
if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
@@ -1631,40 +1632,39 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
trailer_len += xfrm[i]->props.trailer_len;
}
- dst_prev->child = dst;
- dst0->path = dst;
+ xfrm_dst_set_child(xdst_prev, dst);
+ xdst0->path = dst;
err = -ENODEV;
dev = dst->dev;
if (!dev)
goto free_dst;
- xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
- xfrm_init_pmtu(dst_prev);
-
- for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
+ xfrm_init_path(xdst0, dst, nfheader_len);
+ xfrm_init_pmtu(bundle, nx);
- err = xfrm_fill_dst(xdst, dev, fl);
+ for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
+ xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
+ err = xfrm_fill_dst(xdst_prev, dev, fl);
if (err)
goto free_dst;
- dst_prev->header_len = header_len;
- dst_prev->trailer_len = trailer_len;
- header_len -= xdst->u.dst.xfrm->props.header_len;
- trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
+ xdst_prev->u.dst.header_len = header_len;
+ xdst_prev->u.dst.trailer_len = trailer_len;
+ header_len -= xdst_prev->u.dst.xfrm->props.header_len;
+ trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
}
out:
- return dst0;
+ return &xdst0->u.dst;
put_states:
for (; i < nx; i++)
xfrm_state_put(xfrm[i]);
free_dst:
- if (dst0)
- dst_release_immediate(dst0);
- dst0 = ERR_PTR(err);
+ if (xdst0)
+ dst_release_immediate(&xdst0->u.dst);
+ xdst0 = ERR_PTR(err);
goto out;
}
@@ -1743,6 +1743,8 @@ void xfrm_policy_cache_flush(void)
bool found = 0;
int cpu;
+ might_sleep();
+
local_bh_disable();
rcu_read_lock();
for_each_possible_cpu(cpu) {
@@ -1806,7 +1808,7 @@ static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
for (i = 0; i < num; i++) {
if (!dst || dst->xfrm != xfrm[i])
return false;
- dst = dst->child;
+ dst = xfrm_dst_child(dst);
}
return xfrm_bundle_ok(xdst);
@@ -1819,6 +1821,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
{
struct net *net = xp_net(pols[0]);
struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+ struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
struct xfrm_dst *xdst, *old;
struct dst_entry *dst;
int err;
@@ -1847,7 +1850,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
old = xdst;
- dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+ dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
if (IS_ERR(dst)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
return ERR_CAST(dst);
@@ -1887,8 +1890,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
xfrm_decode_session(skb, &fl, dst->ops->family);
spin_unlock(&pq->hold_queue.lock);
- dst_hold(dst->path);
- dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
+ dst_hold(xfrm_dst_path(dst));
+ dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
if (IS_ERR(dst))
goto purge_queue;
@@ -1917,8 +1920,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
skb = __skb_dequeue(&list);
xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
- dst_hold(skb_dst(skb)->path);
- dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
+ dst_hold(xfrm_dst_path(skb_dst(skb)));
+ dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
if (IS_ERR(dst)) {
kfree_skb(skb);
continue;
@@ -2019,8 +2022,8 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
dst1->output = xdst_queue_output;
dst_hold(dst);
- dst1->child = dst;
- dst1->path = dst;
+ xfrm_dst_set_child(xdst, dst);
+ xdst->path = dst;
xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
@@ -2062,8 +2065,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
if (num_xfrms <= 0)
goto make_dummy_bundle;
+ local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
- xflo->dst_orig);
+ xflo->dst_orig);
+ local_bh_enable();
+
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
if (err != -EAGAIN)
@@ -2150,9 +2156,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
+ local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(
pols, num_pols, fl,
family, dst_orig);
+ local_bh_enable();
+
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);
@@ -2583,7 +2592,7 @@ static int stale_bundle(struct dst_entry *dst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
- while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
+ while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
dst->dev = dev_net(dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
@@ -2607,13 +2616,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void xfrm_init_pmtu(struct dst_entry *dst)
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
{
- do {
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ while (nr--) {
+ struct xfrm_dst *xdst = bundle[nr];
u32 pmtu, route_mtu_cached;
+ struct dst_entry *dst;
- pmtu = dst_mtu(dst->child);
+ dst = &xdst->u.dst;
+ pmtu = dst_mtu(xfrm_dst_child(dst));
xdst->child_mtu_cached = pmtu;
pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
@@ -2625,7 +2636,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
pmtu = route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, pmtu);
- } while ((dst = dst->next));
+ }
}
/* Check that the bundle accepts the flow and its components are
@@ -2634,19 +2645,20 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
static int xfrm_bundle_ok(struct xfrm_dst *first)
{
+ struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
struct dst_entry *dst = &first->u.dst;
- struct xfrm_dst *last;
+ struct xfrm_dst *xdst;
+ int start_from, nr;
u32 mtu;
- if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
+ if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
if (dst->flags & DST_XFRM_QUEUE)
return 1;
- last = NULL;
-
+ start_from = nr = 0;
do {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -2658,9 +2670,11 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
- mtu = dst_mtu(dst->child);
+ bundle[nr++] = xdst;
+
+ mtu = dst_mtu(xfrm_dst_child(dst));
if (xdst->child_mtu_cached != mtu) {
- last = xdst;
+ start_from = nr;
xdst->child_mtu_cached = mtu;
}
@@ -2668,30 +2682,30 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
return 0;
mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) {
- last = xdst;
+ start_from = nr;
xdst->route_mtu_cached = mtu;
}
- dst = dst->child;
+ dst = xfrm_dst_child(dst);
} while (dst->xfrm);
- if (likely(!last))
+ if (likely(!start_from))
return 1;
- mtu = last->child_mtu_cached;
- for (;;) {
- dst = &last->u.dst;
+ xdst = bundle[start_from - 1];
+ mtu = xdst->child_mtu_cached;
+ while (start_from--) {
+ dst = &xdst->u.dst;
mtu = xfrm_state_mtu(dst->xfrm, mtu);
- if (mtu > last->route_mtu_cached)
- mtu = last->route_mtu_cached;
+ if (mtu > xdst->route_mtu_cached)
+ mtu = xdst->route_mtu_cached;
dst_metric_set(dst, RTAX_MTU, mtu);
-
- if (last == first)
+ if (!start_from)
break;
- last = (struct xfrm_dst *)last->u.dst.next;
- last->child_mtu_cached = mtu;
+ xdst = bundle[start_from - 1];
+ xdst->child_mtu_cached = mtu;
}
return 1;
@@ -2699,22 +2713,20 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
{
- return dst_metric_advmss(dst->path);
+ return dst_metric_advmss(xfrm_dst_path(dst));
}
static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
- return mtu ? : dst_mtu(dst->path);
+ return mtu ? : dst_mtu(xfrm_dst_path(dst));
}
static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
const void *daddr)
{
- const struct dst_entry *path = dst->path;
-
- for (; dst != path; dst = dst->child) {
+ while (dst->xfrm) {
const struct xfrm_state *xfrm = dst->xfrm;
if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
@@ -2723,6 +2735,8 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
daddr = xfrm->coaddr;
else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
daddr = &xfrm->id.daddr;
+
+ dst = xfrm_dst_child(dst);
}
return daddr;
}
@@ -2731,7 +2745,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
- const struct dst_entry *path = dst->path;
+ const struct dst_entry *path = xfrm_dst_path(dst);
if (!skb)
daddr = xfrm_get_dst_nexthop(dst, daddr);
@@ -2740,7 +2754,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
- const struct dst_entry *path = dst->path;
+ const struct dst_entry *path = xfrm_dst_path(dst);
daddr = xfrm_get_dst_nexthop(dst, daddr);
path->ops->confirm_neigh(path, daddr);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index ba2b539879bc..6d5f85f4e672 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -71,7 +71,6 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file)
}
static const struct file_operations xfrm_statistics_seq_fops = {
- .owner = THIS_MODULE,
.open = xfrm_statistics_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8b23c5bcf8e8..1d38c6acf8af 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -551,6 +551,8 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
bitnr = replay_esn->replay_window - (diff - pos);
}
+ xfrm_dev_state_advance_esn(x);
+
nr = bitnr >> 5;
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] |= (1U << bitnr);
@@ -666,7 +668,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
if (unlikely(oseq < replay_esn->oseq)) {
XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
xo->seq.hi = oseq_hi;
-
+ replay_esn->oseq_hi = oseq_hi;
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
@@ -678,7 +680,6 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
}
replay_esn->oseq = oseq;
- replay_esn->oseq_hi = oseq_hi;
if (xfrm_aevent_is_on(net))
x->repl->notify(x, XFRM_REPLAY_UPDATE);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 500b3391f474..54e21f19d722 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -313,13 +313,14 @@ retry:
if ((type && !try_module_get(type->owner)))
type = NULL;
+ rcu_read_unlock();
+
if (!type && try_load) {
request_module("xfrm-offload-%d-%d", family, proto);
- try_load = 0;
+ try_load = false;
goto retry;
}
- rcu_read_unlock();
return type;
}
@@ -1534,8 +1535,12 @@ out:
err = -EINVAL;
spin_lock_bh(&x1->lock);
if (likely(x1->km.state == XFRM_STATE_VALID)) {
- if (x->encap && x1->encap)
+ if (x->encap && x1->encap &&
+ x->encap->encap_type == x1->encap->encap_type)
memcpy(x1->encap, x->encap, sizeof(*x1->encap));
+ else if (x->encap || x1->encap)
+ goto fail;
+
if (x->coaddr && x1->coaddr) {
memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
}
@@ -1552,6 +1557,8 @@ out:
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
}
+
+fail:
spin_unlock_bh(&x1->lock);
xfrm_state_put(x1);
@@ -2049,6 +2056,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
+ if (!optval && !optlen) {
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+ __sk_dst_reset(sk);
+ return 0;
+ }
+
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
@@ -2265,8 +2279,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
goto error;
}
- x->km.state = XFRM_STATE_VALID;
-
error:
return err;
}
@@ -2275,7 +2287,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
int xfrm_init_state(struct xfrm_state *x)
{
- return __xfrm_init_state(x, true, false);
+ int err;
+
+ err = __xfrm_init_state(x, true, false);
+ if (!err)
+ x->km.state = XFRM_STATE_VALID;
+
+ return err;
}
EXPORT_SYMBOL(xfrm_init_state);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bdb48e5dba04..7f52b8eb177d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
}
- if (attrs[XFRMA_OFFLOAD_DEV]) {
- err = xfrm_dev_state_add(net, x,
- nla_data(attrs[XFRMA_OFFLOAD_DEV]));
- if (err)
- goto error;
- }
-
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
@@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
+ /* configure the hardware if offload is requested */
+ if (attrs[XFRMA_OFFLOAD_DEV]) {
+ err = xfrm_dev_state_add(net, x,
+ nla_data(attrs[XFRMA_OFFLOAD_DEV]));
+ if (err)
+ goto error;
+ }
+
return x;
error:
@@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
+ if (x->km.state == XFRM_STATE_VOID)
+ x->km.state = XFRM_STATE_VALID;
+
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index adeaa1302f34..64335bb94f9f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@ hostprogs-y += tracex3
hostprogs-y += tracex4
hostprogs-y += tracex5
hostprogs-y += tracex6
+hostprogs-y += tracex7
hostprogs-y += test_probe_write_user
hostprogs-y += trace_output
hostprogs-y += lathist
@@ -40,6 +41,7 @@ hostprogs-y += xdp_redirect
hostprogs-y += xdp_redirect_map
hostprogs-y += xdp_redirect_cpu
hostprogs-y += xdp_monitor
+hostprogs-y += xdp_rxq_info
hostprogs-y += syscall_tp
# Libbpf dependencies
@@ -58,6 +60,7 @@ tracex3-objs := bpf_load.o $(LIBBPF) tracex3_user.o
tracex4-objs := bpf_load.o $(LIBBPF) tracex4_user.o
tracex5-objs := bpf_load.o $(LIBBPF) tracex5_user.o
tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
+tracex7-objs := bpf_load.o $(LIBBPF) tracex7_user.o
load_sock_ops-objs := bpf_load.o $(LIBBPF) load_sock_ops.o
test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
@@ -88,6 +91,7 @@ xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o
xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
+xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
# Tell kbuild to always build the programs
@@ -101,6 +105,7 @@ always += tracex3_kern.o
always += tracex4_kern.o
always += tracex5_kern.o
always += tracex6_kern.o
+always += tracex7_kern.o
always += sock_flags_kern.o
always += test_probe_write_user_kern.o
always += trace_output_kern.o
@@ -136,6 +141,8 @@ always += xdp_redirect_kern.o
always += xdp_redirect_map_kern.o
always += xdp_redirect_cpu_kern.o
always += xdp_monitor_kern.o
+always += xdp_rxq_info_kern.o
+always += xdp2skb_meta_kern.o
always += syscall_tp_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -155,6 +162,7 @@ HOSTLOADLIBES_tracex3 += -lelf
HOSTLOADLIBES_tracex4 += -lelf -lrt
HOSTLOADLIBES_tracex5 += -lelf
HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_tracex7 += -lelf
HOSTLOADLIBES_test_cgrp2_sock2 += -lelf
HOSTLOADLIBES_load_sock_ops += -lelf
HOSTLOADLIBES_test_probe_write_user += -lelf
@@ -178,6 +186,7 @@ HOSTLOADLIBES_xdp_redirect += -lelf
HOSTLOADLIBES_xdp_redirect_map += -lelf
HOSTLOADLIBES_xdp_redirect_cpu += -lelf
HOSTLOADLIBES_xdp_monitor += -lelf
+HOSTLOADLIBES_xdp_rxq_info += -lelf
HOSTLOADLIBES_syscall_tp += -lelf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
@@ -192,13 +201,16 @@ CLANG_ARCH_ARGS = -target $(ARCH)
endif
# Trick to allow make to be run from this directory
-all:
+all: $(LIBBPF)
$(MAKE) -C ../../ $(CURDIR)/
clean:
$(MAKE) -C ../../ M=$(CURDIR) clean
@rm -f *~
+$(LIBBPF): FORCE
+ $(MAKE) -C $(dir $@) $(notdir $@)
+
$(obj)/syscall_nrs.s: $(src)/syscall_nrs.c
$(call if_changed_dep,cc_s_c)
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 370b749f5ee6..f6bbf8f50da3 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -35,12 +35,22 @@ struct geneve_opt {
u8 opt_data[8]; /* hard-coded to 8 byte */
};
+struct erspan_md2 {
+ __be32 timestamp;
+ __be16 sgt;
+ __be16 flags;
+};
+
struct vxlan_metadata {
u32 gbp;
};
struct erspan_metadata {
- __be32 index;
+ union {
+ __be32 index;
+ struct erspan_md2 md2;
+ } u;
+ int version;
};
SEC("gre_set_tunnel")
@@ -81,6 +91,49 @@ int _gre_get_tunnel(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("ip6gretap_set_tunnel")
+int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key;
+ int ret;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv6[3] = _htonl(0x11); /* ::11 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+ key.tunnel_label = 0xabcde;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ip6gretap_get_tunnel")
+int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+{
+ char fmt[] = "key %d remote ip6 ::%x label %x\n";
+ struct bpf_tunnel_key key;
+ int ret;
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+
+ return TC_ACT_OK;
+}
+
SEC("erspan_set_tunnel")
int _erspan_set_tunnel(struct __sk_buff *skb)
{
@@ -100,7 +153,18 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- md.index = htonl(123);
+ __builtin_memset(&md, 0, sizeof(md));
+#ifdef ERSPAN_V1
+ md.version = 1;
+ md.u.index = htonl(123);
+#else
+ u8 direction = 1;
+ u16 hwid = 7;
+
+ md.version = 2;
+ md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
ERROR(ret);
@@ -113,7 +177,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
SEC("erspan_get_tunnel")
int _erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x erspan index 0x%x\n";
+ char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
u32 index;
@@ -131,9 +195,105 @@ int _erspan_get_tunnel(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- index = bpf_ntohl(md.index);
bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, index);
+ key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+ char fmt2[] = "\tindex %x\n";
+
+ index = bpf_ntohl(md.u.index);
+ bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+ char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+ bpf_trace_printk(fmt2, sizeof(fmt2),
+ (ntohs(md.u.md2.flags) >> 3) & 0x1,
+ (ntohs(md.u.md2.flags) >> 4) & 0x3f,
+ bpf_ntohl(md.u.md2.timestamp));
+#endif
+
+ return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_set_tunnel")
+int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key;
+ struct erspan_metadata md;
+ int ret;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv6[3] = _htonl(0x11);
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ __builtin_memset(&md, 0, sizeof(md));
+
+#ifdef ERSPAN_V1
+ md.u.index = htonl(123);
+ md.version = 1;
+#else
+ u8 direction = 0;
+ u16 hwid = 17;
+
+ md.version = 2;
+ md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
+ ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_get_tunnel")
+int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
+{
+ char fmt[] = "ip6erspan get key %d remote ip6 ::%x erspan version %d\n";
+ struct bpf_tunnel_key key;
+ struct erspan_metadata md;
+ u32 index;
+ int ret;
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+ char fmt2[] = "\tindex %x\n";
+
+ index = bpf_ntohl(md.u.index);
+ bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+ char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+ bpf_trace_printk(fmt2, sizeof(fmt2),
+ (ntohs(md.u.md2.flags) >> 3) & 0x1,
+ (ntohs(md.u.md2.flags) >> 4) & 0x3f,
+ bpf_ntohl(md.u.md2.timestamp));
+#endif
return TC_ACT_OK;
}
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 3e8232cc04a8..1af412ec6007 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -78,7 +78,8 @@ static int test_foo_bar(void)
if (join_cgroup(FOO))
goto err;
- if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo");
goto err;
}
@@ -97,7 +98,8 @@ static int test_foo_bar(void)
printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
@@ -114,7 +116,8 @@ static int test_foo_bar(void)
"This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
@@ -128,7 +131,8 @@ static int test_foo_bar(void)
"This ping in cgroup /foo/bar should pass...\n");
assert(system(PING_CMD) == 0);
- if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
@@ -161,13 +165,15 @@ static int test_foo_bar(void)
goto err;
}
- if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo/bar");
goto err;
}
- if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo");
goto err;
@@ -273,27 +279,33 @@ static int test_multiprog(void)
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
goto err;
- if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+ if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog to cg1");
goto err;
}
- if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+ if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI)) {
log_err("Unexpected success attaching the same prog to cg1");
goto err;
}
- if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+ if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog2 to cg1");
goto err;
}
- if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to cg2");
goto err;
}
- if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, 2)) {
+ if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog to cg3");
goto err;
}
- if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, 1)) {
+ if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
+ BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to cg4");
goto err;
}
diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
new file mode 100755
index 000000000000..e68b9ee6814b
--- /dev/null
+++ b/samples/bpf/test_override_return.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir tmpmnt
+./tracex7 $DEVICE
+if [ $? -eq 0 ]
+then
+ echo "SUCCESS!"
+else
+ echo "FAILED!"
+fi
+losetup -d $DEVICE
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
index 312e1722a39f..ae7f7c38309b 100755
--- a/samples/bpf/test_tunnel_bpf.sh
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -33,10 +33,43 @@ function add_gre_tunnel {
ip addr add dev $DEV 10.1.1.200/24
}
-function add_erspan_tunnel {
+function add_ip6gretap_tunnel {
+
+ # assign ipv6 address
+ ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip addr add dev veth1 ::22/96
+ ip link set dev veth1 up
+
# in namespace
ip netns exec at_ns0 \
- ip link add dev $DEV_NS type $TYPE seq key 2 local 172.16.1.100 remote 172.16.1.200 erspan 123
+ ip link add dev $DEV_NS type $TYPE flowlabel 0xbcdef key 2 \
+ local ::11 remote ::22
+
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns0 ip addr add dev $DEV_NS fc80::100/96
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external
+ ip addr add dev $DEV 10.1.1.200/24
+ ip addr add dev $DEV fc80::200/24
+ ip link set dev $DEV up
+}
+
+function add_erspan_tunnel {
+ # in namespace
+ if [ "$1" == "v1" ]; then
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE seq key 2 \
+ local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 1 erspan 123
+ else
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE seq key 2 \
+ local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 2 erspan_dir 1 erspan_hwid 3
+ fi
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
@@ -46,6 +79,35 @@ function add_erspan_tunnel {
ip addr add dev $DEV 10.1.1.200/24
}
+function add_ip6erspan_tunnel {
+
+ # assign ipv6 address
+ ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip addr add dev veth1 ::22/96
+ ip link set dev veth1 up
+
+ # in namespace
+ if [ "$1" == "v1" ]; then
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE seq key 2 \
+ local ::11 remote ::22 \
+ erspan_ver 1 erspan 123
+ else
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE seq key 2 \
+ local ::11 remote ::22 \
+ erspan_ver 2 erspan_dir 1 erspan_hwid 7
+ fi
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external
+ ip addr add dev $DEV 10.1.1.200/24
+ ip link set dev $DEV up
+}
+
function add_vxlan_tunnel {
# Set static ARP entry here because iptables set-mark works
# on L3 packet, as a result not applying to ARP packets,
@@ -113,18 +175,65 @@ function test_gre {
cleanup
}
+function test_ip6gre {
+ TYPE=ip6gre
+ DEV_NS=ip6gre00
+ DEV=ip6gre11
+ config_device
+ # reuse the ip6gretap function
+ add_ip6gretap_tunnel
+ attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+ # underlay
+ ping6 -c 4 ::11
+ # overlay: ipv4 over ipv6
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ping -c 1 10.1.1.100
+ # overlay: ipv6 over ipv6
+ ip netns exec at_ns0 ping6 -c 1 fc80::200
+ cleanup
+}
+
+function test_ip6gretap {
+ TYPE=ip6gretap
+ DEV_NS=ip6gretap00
+ DEV=ip6gretap11
+ config_device
+ add_ip6gretap_tunnel
+ attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+ # underlay
+ ping6 -c 4 ::11
+ # overlay: ipv4 over ipv6
+ ip netns exec at_ns0 ping -i .2 -c 1 10.1.1.200
+ ping -c 1 10.1.1.100
+ # overlay: ipv6 over ipv6
+ ip netns exec at_ns0 ping6 -c 1 fc80::200
+ cleanup
+}
+
function test_erspan {
TYPE=erspan
DEV_NS=erspan00
DEV=erspan11
config_device
- add_erspan_tunnel
+ add_erspan_tunnel $1
attach_bpf $DEV erspan_set_tunnel erspan_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
+function test_ip6erspan {
+ TYPE=ip6erspan
+ DEV_NS=ip6erspan00
+ DEV=ip6erspan11
+ config_device
+ add_ip6erspan_tunnel $1
+ attach_bpf $DEV ip4ip6erspan_set_tunnel ip4ip6erspan_get_tunnel
+ ping6 -c 3 ::11
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
function test_vxlan {
TYPE=vxlan
DEV_NS=vxlan00
@@ -175,9 +284,12 @@ function cleanup {
ip link del veth1
ip link del ipip11
ip link del gretap11
+ ip link del ip6gre11
+ ip link del ip6gretap11
ip link del vxlan11
ip link del geneve11
ip link del erspan11
+ ip link del ip6erspan11
pkill tcpdump
pkill cat
set -ex
@@ -187,8 +299,16 @@ trap cleanup 0 2 3 6 9
cleanup
echo "Testing GRE tunnel..."
test_gre
+echo "Testing IP6GRE tunnel..."
+test_ip6gre
+echo "Testing IP6GRETAP tunnel..."
+test_ip6gretap
echo "Testing ERSPAN tunnel..."
-test_erspan
+test_erspan v1
+test_erspan v2
+echo "Testing IP6ERSPAN tunnel..."
+test_ip6erspan v1
+test_ip6erspan v2
echo "Testing VXLAN tunnel..."
test_vxlan
echo "Testing GENEVE tunnel..."
diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7_kern.c
new file mode 100644
index 000000000000..1ab308a43e0f
--- /dev/null
+++ b/samples/bpf/tracex7_kern.c
@@ -0,0 +1,16 @@
+#include <uapi/linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+SEC("kprobe/open_ctree")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ unsigned long rc = -12;
+
+ bpf_override_return(ctx, rc);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
new file mode 100644
index 000000000000..8a52ac492e8b
--- /dev/null
+++ b/samples/bpf/tracex7_user.c
@@ -0,0 +1,28 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+int main(int argc, char **argv)
+{
+ FILE *f;
+ char filename[256];
+ char command[256];
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ snprintf(command, 256, "mount %s tmpmnt/", argv[1]);
+ f = popen(command, "r");
+ ret = pclose(f);
+
+ return ret ? 0 : 1;
+}
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
new file mode 100755
index 000000000000..b9c9549c4c27
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+#
+# Bash-shell example on using iproute2 tools 'tc' and 'ip' to load
+# eBPF programs, both for XDP and clsbpf. Shell script function
+# wrappers and even long options parsing is illustrated, for ease of
+# use.
+#
+# Related to sample/bpf/xdp2skb_meta_kern.c, which contains BPF-progs
+# that need to collaborate between XDP and TC hooks. Thus, it is
+# convenient that the same tool load both programs that need to work
+# together.
+#
+BPF_FILE=xdp2skb_meta_kern.o
+DIR=$(dirname $0)
+
+export TC=/usr/sbin/tc
+export IP=/usr/sbin/ip
+
+function usage() {
+ echo ""
+ echo "Usage: $0 [-vfh] --dev ethX"
+ echo " -d | --dev : Network device (required)"
+ echo " --flush : Cleanup flush TC and XDP progs"
+ echo " --list : (\$LIST) List TC and XDP progs"
+ echo " -v | --verbose : (\$VERBOSE) Verbose"
+ echo " --dry-run : (\$DRYRUN) Dry-run only (echo commands)"
+ echo ""
+}
+
+## -- General shell logging cmds --
+function err() {
+ local exitcode=$1
+ shift
+ echo "ERROR: $@" >&2
+ exit $exitcode
+}
+
+function info() {
+ if [[ -n "$VERBOSE" ]]; then
+ echo "# $@"
+ fi
+}
+
+## -- Helper function calls --
+
+# Wrapper call for TC and IP
+# - Will display the offending command on failure
+function _call_cmd() {
+ local cmd="$1"
+ local allow_fail="$2"
+ shift 2
+ if [[ -n "$VERBOSE" ]]; then
+ echo "$(basename $cmd) $@"
+ fi
+ if [[ -n "$DRYRUN" ]]; then
+ return
+ fi
+ $cmd "$@"
+ local status=$?
+ if (( $status != 0 )); then
+ if [[ "$allow_fail" == "" ]]; then
+ err 2 "Exec error($status) occurred cmd: \"$cmd $@\""
+ fi
+ fi
+}
+function call_tc() {
+ _call_cmd "$TC" "" "$@"
+}
+function call_tc_allow_fail() {
+ _call_cmd "$TC" "allow_fail" "$@"
+}
+function call_ip() {
+ _call_cmd "$IP" "" "$@"
+}
+
+## --- Parse command line arguments / parameters ---
+# Using external program "getopt" to get --long-options
+OPTIONS=$(getopt -o vfhd: \
+ --long verbose,flush,help,list,dev:,dry-run -- "$@")
+if (( $? != 0 )); then
+ err 4 "Error calling getopt"
+fi
+eval set -- "$OPTIONS"
+
+unset DEV
+unset FLUSH
+while true; do
+ case "$1" in
+ -d | --dev ) # device
+ DEV=$2
+ info "Device set to: DEV=$DEV" >&2
+ shift 2
+ ;;
+ -v | --verbose)
+ VERBOSE=yes
+ # info "Verbose mode: VERBOSE=$VERBOSE" >&2
+ shift
+ ;;
+ --dry-run )
+ DRYRUN=yes
+ VERBOSE=yes
+ info "Dry-run mode: enable VERBOSE and don't call TC+IP" >&2
+ shift
+ ;;
+ -f | --flush )
+ FLUSH=yes
+ shift
+ ;;
+ --list )
+ LIST=yes
+ shift
+ ;;
+ -- )
+ shift
+ break
+ ;;
+ -h | --help )
+ usage;
+ exit 0
+ ;;
+ * )
+ shift
+ break
+ ;;
+ esac
+done
+
+FILE="$DIR/$BPF_FILE"
+if [[ ! -e $FILE ]]; then
+ err 3 "Missing BPF object file ($FILE)"
+fi
+
+if [[ -z $DEV ]]; then
+ usage
+ err 2 "Please specify network device -- required option --dev"
+fi
+
+## -- Function calls --
+
+function list_tc()
+{
+ local device="$1"
+ shift
+ info "Listing current TC ingress rules"
+ call_tc filter show dev $device ingress
+}
+
+function list_xdp()
+{
+ local device="$1"
+ shift
+ info "Listing current XDP device($device) setting"
+ call_ip link show dev $device | grep --color=auto xdp
+}
+
+function flush_tc()
+{
+ local device="$1"
+ shift
+ info "Flush TC on device: $device"
+ call_tc_allow_fail filter del dev $device ingress
+ call_tc_allow_fail qdisc del dev $device clsact
+}
+
+function flush_xdp()
+{
+ local device="$1"
+ shift
+ info "Flush XDP on device: $device"
+ call_ip link set dev $device xdp off
+}
+
+function attach_tc_mark()
+{
+ local device="$1"
+ local file="$2"
+ local prog="tc_mark"
+ shift 2
+
+ # Re-attach clsact to clear/flush existing role
+ call_tc_allow_fail qdisc del dev $device clsact 2> /dev/null
+ call_tc qdisc add dev $device clsact
+
+ # Attach BPF prog
+ call_tc filter add dev $device ingress \
+ prio 1 handle 1 bpf da obj $file sec $prog
+}
+
+function attach_xdp_mark()
+{
+ local device="$1"
+ local file="$2"
+ local prog="xdp_mark"
+ shift 2
+
+ # Remove XDP prog in-case it's already loaded
+ # TODO: Need ip-link option to override/replace existing XDP prog
+ flush_xdp $device
+
+ # Attach XDP/BPF prog
+ call_ip link set dev $device xdp obj $file sec $prog
+}
+
+if [[ -n $FLUSH ]]; then
+ flush_tc $DEV
+ flush_xdp $DEV
+ exit 0
+fi
+
+if [[ -n $LIST ]]; then
+ list_tc $DEV
+ list_xdp $DEV
+ exit 0
+fi
+
+attach_tc_mark $DEV $FILE
+attach_xdp_mark $DEV $FILE
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
new file mode 100644
index 000000000000..0c12048ac79f
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto transfer info from XDP to SKB, e.g. skb->mark
+ * -----------------------------------------------------------
+ * This uses the XDP data_meta infrastructure, and is a cooperation
+ * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook.
+ *
+ * Notice: This example does not use the BPF C-loader (bpf_load.c),
+ * but instead rely on the iproute2 TC tool for loading BPF-objects.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/pkt_cls.h>
+
+#include "bpf_helpers.h"
+
+/*
+ * This struct is stored in the XDP 'data_meta' area, which is located
+ * just in-front-of the raw packet payload data. The meaning is
+ * specific to these two BPF programs that use it as a communication
+ * channel. XDP adjust/increase the area via a bpf-helper, and TC use
+ * boundary checks to see if data have been provided.
+ *
+ * The struct must be 4 byte aligned, which here is enforced by the
+ * struct __attribute__((aligned(4))).
+ */
+struct meta_info {
+ __u32 mark;
+} __attribute__((aligned(4)));
+
+SEC("xdp_mark")
+int _xdp_mark(struct xdp_md *ctx)
+{
+ struct meta_info *meta;
+ void *data, *data_end;
+ int ret;
+
+ /* Reserve space in-front of data pointer for our meta info.
+ * (Notice drivers not supporting data_meta will fail here!)
+ */
+ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
+ if (ret < 0)
+ return XDP_ABORTED;
+
+ /* Notice: Kernel-side verifier requires that loading of
+ * ctx->data MUST happen _after_ helper bpf_xdp_adjust_meta(),
+ * as pkt-data pointers are invalidated. Helpers that require
+ * this are determined/marked by bpf_helper_changes_pkt_data()
+ */
+ data = (void *)(unsigned long)ctx->data;
+
+ /* Check data_meta have room for meta_info struct */
+ meta = (void *)(unsigned long)ctx->data_meta;
+ if (meta + 1 > data)
+ return XDP_ABORTED;
+
+ meta->mark = 42;
+
+ return XDP_PASS;
+}
+
+SEC("tc_mark")
+int _tc_mark(struct __sk_buff *ctx)
+{
+ void *data = (void *)(unsigned long)ctx->data;
+ void *data_end = (void *)(unsigned long)ctx->data_end;
+ void *data_meta = (void *)(unsigned long)ctx->data_meta;
+ struct meta_info *meta = data_meta;
+
+ /* Check XDP gave us some data_meta */
+ if (meta + 1 > data) {
+ ctx->mark = 41;
+ /* Skip "accept" if no data_meta is avail */
+ return TC_ACT_OK;
+ }
+
+ /* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */
+ ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */
+
+ return TC_ACT_OK;
+}
+
+/* Manually attaching these programs:
+export DEV=ixgbe2
+export FILE=xdp2skb_meta_kern.o
+
+# via TC command
+tc qdisc del dev $DEV clsact 2> /dev/null
+tc qdisc add dev $DEV clsact
+tc filter add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark
+tc filter show dev $DEV ingress
+
+# XDP via IP command:
+ip link set dev $DEV xdp off
+ip link set dev $DEV xdp obj $FILE sec xdp_mark
+
+# Use iptable to "see" if SKBs are marked
+iptables -I INPUT -p icmp -m mark --mark 41 # == 0x29
+iptables -I INPUT -p icmp -m mark --mark 42 # == 0x2a
+
+# Hint: catch XDP_ABORTED errors via
+perf record -e xdp:*
+perf script
+
+*/
diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 2fe2f761a0d0..211db8ded0de 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -1,6 +1,7 @@
-/* XDP monitor tool, based on tracepoints
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
*
- * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * XDP monitor tool, based on tracepoints
*/
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
@@ -104,7 +105,7 @@ struct xdp_exception_ctx {
SEC("tracepoint/xdp/xdp_exception")
int trace_xdp_exception(struct xdp_exception_ctx *ctx)
{
- u64 *cnt;;
+ u64 *cnt;
u32 key;
key = ctx->act;
@@ -118,3 +119,92 @@ int trace_xdp_exception(struct xdp_exception_ctx *ctx)
return 0;
}
+
+/* Common stats data record shared with _user.c */
+struct datarec {
+ u64 processed;
+ u64 dropped;
+ u64 info;
+};
+#define MAX_CPUS 64
+
+struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(struct datarec),
+ .max_entries = MAX_CPUS,
+};
+
+struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(struct datarec),
+ .max_entries = 1,
+};
+
+/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
+ * Code in: kernel/include/trace/events/xdp.h
+ */
+struct cpumap_enqueue_ctx {
+ u64 __pad; // First 8 bytes are not accessible by bpf code
+ int map_id; // offset:8; size:4; signed:1;
+ u32 act; // offset:12; size:4; signed:0;
+ int cpu; // offset:16; size:4; signed:1;
+ unsigned int drops; // offset:20; size:4; signed:0;
+ unsigned int processed; // offset:24; size:4; signed:0;
+ int to_cpu; // offset:28; size:4; signed:1;
+};
+
+SEC("tracepoint/xdp/xdp_cpumap_enqueue")
+int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
+{
+ u32 to_cpu = ctx->to_cpu;
+ struct datarec *rec;
+
+ if (to_cpu >= MAX_CPUS)
+ return 1;
+
+ rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
+ if (!rec)
+ return 0;
+ rec->processed += ctx->processed;
+ rec->dropped += ctx->drops;
+
+ /* Record bulk events, then userspace can calc average bulk size */
+ if (ctx->processed > 0)
+ rec->info += 1;
+
+ return 0;
+}
+
+/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
+ * Code in: kernel/include/trace/events/xdp.h
+ */
+struct cpumap_kthread_ctx {
+ u64 __pad; // First 8 bytes are not accessible by bpf code
+ int map_id; // offset:8; size:4; signed:1;
+ u32 act; // offset:12; size:4; signed:0;
+ int cpu; // offset:16; size:4; signed:1;
+ unsigned int drops; // offset:20; size:4; signed:0;
+ unsigned int processed; // offset:24; size:4; signed:0;
+ int sched; // offset:28; size:4; signed:1;
+};
+
+SEC("tracepoint/xdp/xdp_cpumap_kthread")
+int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
+{
+ struct datarec *rec;
+ u32 key = 0;
+
+ rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
+ if (!rec)
+ return 0;
+ rec->processed += ctx->processed;
+ rec->dropped += ctx->drops;
+
+ /* Count times kthread yielded CPU via schedule call */
+ if (ctx->sched)
+ rec->info++;
+
+ return 0;
+}
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index eaba165b3549..eec14520d513 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -1,4 +1,5 @@
-/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
*/
static const char *__doc__=
"XDP monitor tool, based on tracepoints\n"
@@ -40,6 +41,9 @@ static const struct option long_options[] = {
{0, 0, NULL, 0 }
};
+/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
+#define EXIT_FAIL_MEM 5
+
static void usage(char *argv[])
{
int i;
@@ -108,23 +112,93 @@ static const char *action2str(int action)
return NULL;
}
+/* Common stats data record shared with _kern.c */
+struct datarec {
+ __u64 processed;
+ __u64 dropped;
+ __u64 info;
+};
+#define MAX_CPUS 64
+
+/* Userspace structs for collection of stats from maps */
struct record {
- __u64 counter;
__u64 timestamp;
+ struct datarec total;
+ struct datarec *cpu;
+};
+struct u64rec {
+ __u64 processed;
+};
+struct record_u64 {
+ /* record for _kern side __u64 values */
+ __u64 timestamp;
+ struct u64rec total;
+ struct u64rec *cpu;
};
struct stats_record {
- struct record xdp_redir[REDIR_RES_MAX];
- struct record xdp_exception[XDP_ACTION_MAX];
+ struct record_u64 xdp_redirect[REDIR_RES_MAX];
+ struct record_u64 xdp_exception[XDP_ACTION_MAX];
+ struct record xdp_cpumap_kthread;
+ struct record xdp_cpumap_enqueue[MAX_CPUS];
};
-static void stats_print_headers(bool err_only)
+static bool map_collect_record(int fd, __u32 key, struct record *rec)
{
- if (err_only)
- printf("\n%s\n", __doc_err_only__);
+ /* For percpu maps, userspace gets a value per possible CPU */
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec values[nr_cpus];
+ __u64 sum_processed = 0;
+ __u64 sum_dropped = 0;
+ __u64 sum_info = 0;
+ int i;
+
+ if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+ fprintf(stderr,
+ "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+ return false;
+ }
+ /* Get time as close as possible to reading map contents */
+ rec->timestamp = gettime();
- printf("%-14s %-11s %-10s %-18s %-9s\n",
- "ACTION", "result", "pps ", "pps-human-readable", "measure-period");
+ /* Record and sum values from each CPU */
+ for (i = 0; i < nr_cpus; i++) {
+ rec->cpu[i].processed = values[i].processed;
+ sum_processed += values[i].processed;
+ rec->cpu[i].dropped = values[i].dropped;
+ sum_dropped += values[i].dropped;
+ rec->cpu[i].info = values[i].info;
+ sum_info += values[i].info;
+ }
+ rec->total.processed = sum_processed;
+ rec->total.dropped = sum_dropped;
+ rec->total.info = sum_info;
+ return true;
+}
+
+static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
+{
+ /* For percpu maps, userspace gets a value per possible CPU */
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct u64rec values[nr_cpus];
+ __u64 sum_total = 0;
+ int i;
+
+ if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+ fprintf(stderr,
+ "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+ return false;
+ }
+ /* Get time as close as possible to reading map contents */
+ rec->timestamp = gettime();
+
+ /* Record and sum values from each CPU */
+ for (i = 0; i < nr_cpus; i++) {
+ rec->cpu[i].processed = values[i].processed;
+ sum_total += values[i].processed;
+ }
+ rec->total.processed = sum_total;
+ return true;
}
static double calc_period(struct record *r, struct record *p)
@@ -139,77 +213,203 @@ static double calc_period(struct record *r, struct record *p)
return period_;
}
-static double calc_pps(struct record *r, struct record *p, double period)
+static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
+{
+ double period_ = 0;
+ __u64 period = 0;
+
+ period = r->timestamp - p->timestamp;
+ if (period > 0)
+ period_ = ((double) period / NANOSEC_PER_SEC);
+
+ return period_;
+}
+
+static double calc_pps(struct datarec *r, struct datarec *p, double period)
+{
+ __u64 packets = 0;
+ double pps = 0;
+
+ if (period > 0) {
+ packets = r->processed - p->processed;
+ pps = packets / period;
+ }
+ return pps;
+}
+
+static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
+{
+ __u64 packets = 0;
+ double pps = 0;
+
+ if (period > 0) {
+ packets = r->processed - p->processed;
+ pps = packets / period;
+ }
+ return pps;
+}
+
+static double calc_drop(struct datarec *r, struct datarec *p, double period)
+{
+ __u64 packets = 0;
+ double pps = 0;
+
+ if (period > 0) {
+ packets = r->dropped - p->dropped;
+ pps = packets / period;
+ }
+ return pps;
+}
+
+static double calc_info(struct datarec *r, struct datarec *p, double period)
{
__u64 packets = 0;
double pps = 0;
if (period > 0) {
- packets = r->counter - p->counter;
+ packets = r->info - p->info;
pps = packets / period;
}
return pps;
}
-static void stats_print(struct stats_record *rec,
- struct stats_record *prev,
+static void stats_print(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
bool err_only)
{
- double period = 0, pps = 0;
- struct record *r, *p;
- int i = 0;
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ int rec_i = 0, i, to_cpu;
+ double t = 0, pps = 0;
- char *fmt = "%-14s %-11s %-10.0f %'-18.0f %f\n";
+ /* Header */
+ printf("%-15s %-7s %-12s %-12s %-9s\n",
+ "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
/* tracepoint: xdp:xdp_redirect_* */
if (err_only)
- i = REDIR_ERROR;
-
- for (; i < REDIR_RES_MAX; i++) {
- r = &rec->xdp_redir[i];
- p = &prev->xdp_redir[i];
-
- if (p->timestamp) {
- period = calc_period(r, p);
- pps = calc_pps(r, p, period);
+ rec_i = REDIR_ERROR;
+
+ for (; rec_i < REDIR_RES_MAX; rec_i++) {
+ struct record_u64 *rec, *prev;
+ char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
+ char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
+
+ rec = &stats_rec->xdp_redirect[rec_i];
+ prev = &stats_prev->xdp_redirect[rec_i];
+ t = calc_period_u64(rec, prev);
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct u64rec *r = &rec->cpu[i];
+ struct u64rec *p = &prev->cpu[i];
+
+ pps = calc_pps_u64(r, p, t);
+ if (pps > 0)
+ printf(fmt1, "XDP_REDIRECT", i,
+ rec_i ? 0.0: pps, rec_i ? pps : 0.0,
+ err2str(rec_i));
}
- printf(fmt, "XDP_REDIRECT", err2str(i), pps, pps, period);
+ pps = calc_pps_u64(&rec->total, &prev->total, t);
+ printf(fmt2, "XDP_REDIRECT", "total",
+ rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
}
/* tracepoint: xdp:xdp_exception */
- for (i = 0; i < XDP_ACTION_MAX; i++) {
- r = &rec->xdp_exception[i];
- p = &prev->xdp_exception[i];
- if (p->timestamp) {
- period = calc_period(r, p);
- pps = calc_pps(r, p, period);
+ for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
+ struct record_u64 *rec, *prev;
+ char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
+ char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
+
+ rec = &stats_rec->xdp_exception[rec_i];
+ prev = &stats_prev->xdp_exception[rec_i];
+ t = calc_period_u64(rec, prev);
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct u64rec *r = &rec->cpu[i];
+ struct u64rec *p = &prev->cpu[i];
+
+ pps = calc_pps_u64(r, p, t);
+ if (pps > 0)
+ printf(fmt1, "Exception", i,
+ 0.0, pps, err2str(rec_i));
}
+ pps = calc_pps_u64(&rec->total, &prev->total, t);
if (pps > 0)
- printf(fmt, action2str(i), "Exception",
- pps, pps, period);
+ printf(fmt2, "Exception", "total",
+ 0.0, pps, action2str(rec_i));
}
- printf("\n");
-}
-static __u64 get_key32_value64_percpu(int fd, __u32 key)
-{
- /* For percpu maps, userspace gets a value per possible CPU */
- unsigned int nr_cpus = bpf_num_possible_cpus();
- __u64 values[nr_cpus];
- __u64 sum = 0;
- int i;
-
- if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
- fprintf(stderr,
- "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
- return 0;
+ /* cpumap enqueue stats */
+ for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
+ char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
+ char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
+ struct record *rec, *prev;
+ char *info_str = "";
+ double drop, info;
+
+ rec = &stats_rec->xdp_cpumap_enqueue[to_cpu];
+ prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop(r, p, t);
+ info = calc_info(r, p, t);
+ if (info > 0) {
+ info_str = "bulk-average";
+ info = pps / info; /* calc average bulk size */
+ }
+ if (pps > 0)
+ printf(fmt1, "cpumap-enqueue",
+ i, to_cpu, pps, drop, info, info_str);
+ }
+ pps = calc_pps(&rec->total, &prev->total, t);
+ if (pps > 0) {
+ drop = calc_drop(&rec->total, &prev->total, t);
+ info = calc_info(&rec->total, &prev->total, t);
+ if (info > 0) {
+ info_str = "bulk-average";
+ info = pps / info; /* calc average bulk size */
+ }
+ printf(fmt2, "cpumap-enqueue",
+ "sum", to_cpu, pps, drop, info, info_str);
+ }
}
- /* Sum values from each CPU */
- for (i = 0; i < nr_cpus; i++) {
- sum += values[i];
+ /* cpumap kthread stats */
+ {
+ char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
+ char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
+ struct record *rec, *prev;
+ double drop, info;
+ char *i_str = "";
+
+ rec = &stats_rec->xdp_cpumap_kthread;
+ prev = &stats_prev->xdp_cpumap_kthread;
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop(r, p, t);
+ info = calc_info(r, p, t);
+ if (info > 0)
+ i_str = "sched";
+ if (pps > 0)
+ printf(fmt1, "cpumap-kthread",
+ i, pps, drop, info, i_str);
+ }
+ pps = calc_pps(&rec->total, &prev->total, t);
+ drop = calc_drop(&rec->total, &prev->total, t);
+ info = calc_info(&rec->total, &prev->total, t);
+ if (info > 0)
+ i_str = "sched-sum";
+ printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
}
- return sum;
+
+ printf("\n");
}
static bool stats_collect(struct stats_record *rec)
@@ -222,25 +422,109 @@ static bool stats_collect(struct stats_record *rec)
*/
fd = map_data[0].fd; /* map0: redirect_err_cnt */
- for (i = 0; i < REDIR_RES_MAX; i++) {
- rec->xdp_redir[i].timestamp = gettime();
- rec->xdp_redir[i].counter = get_key32_value64_percpu(fd, i);
- }
+ for (i = 0; i < REDIR_RES_MAX; i++)
+ map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
fd = map_data[1].fd; /* map1: exception_cnt */
for (i = 0; i < XDP_ACTION_MAX; i++) {
- rec->xdp_exception[i].timestamp = gettime();
- rec->xdp_exception[i].counter = get_key32_value64_percpu(fd, i);
+ map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
}
+ fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+ for (i = 0; i < MAX_CPUS; i++)
+ map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
+
+ fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+ map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
+
return true;
}
+static void *alloc_rec_per_cpu(int record_size)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ void *array;
+ size_t size;
+
+ size = record_size * nr_cpus;
+ array = malloc(size);
+ memset(array, 0, size);
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ exit(EXIT_FAIL_MEM);
+ }
+ return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+ struct stats_record *rec;
+ int rec_sz;
+ int i;
+
+ /* Alloc main stats_record structure */
+ rec = malloc(sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+ }
+
+ /* Alloc stats stored per CPU for each record */
+ rec_sz = sizeof(struct u64rec);
+ for (i = 0; i < REDIR_RES_MAX; i++)
+ rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+ rec_sz = sizeof(struct datarec);
+ rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
+
+ for (i = 0; i < MAX_CPUS; i++)
+ rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
+
+ return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+ int i;
+
+ for (i = 0; i < REDIR_RES_MAX; i++)
+ free(r->xdp_redirect[i].cpu);
+
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ free(r->xdp_exception[i].cpu);
+
+ free(r->xdp_cpumap_kthread.cpu);
+
+ for (i = 0; i < MAX_CPUS; i++)
+ free(r->xdp_cpumap_enqueue[i].cpu);
+
+ free(r);
+}
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+ struct stats_record *tmp;
+
+ tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
static void stats_poll(int interval, bool err_only)
{
- struct stats_record rec, prev;
+ struct stats_record *rec, *prev;
- memset(&rec, 0, sizeof(rec));
+ rec = alloc_stats_record();
+ prev = alloc_stats_record();
+ stats_collect(rec);
+
+ if (err_only)
+ printf("\n%s\n", __doc_err_only__);
/* Trick to pretty printf with thousands separators use %' */
setlocale(LC_NUMERIC, "en_US");
@@ -258,13 +542,15 @@ static void stats_poll(int interval, bool err_only)
fflush(stdout);
while (1) {
- memcpy(&prev, &rec, sizeof(rec));
- stats_collect(&rec);
- stats_print_headers(err_only);
- stats_print(&rec, &prev, err_only);
+ swap(&prev, &rec);
+ stats_collect(rec);
+ stats_print(rec, prev, err_only);
fflush(stdout);
sleep(interval);
}
+
+ free_stats_record(rec);
+ free_stats_record(prev);
}
static void print_bpf_prog_info(void)
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644
index 000000000000..3fd209291653
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto extract XDP RX-queue info
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* Config setup from with userspace
+ *
+ * User-side setup ifindex in config_map, to verify that
+ * ctx->ingress_ifindex is correct (against configured ifindex)
+ */
+struct config {
+ __u32 action;
+ int ifindex;
+};
+struct bpf_map_def SEC("maps") config_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct config),
+ .max_entries = 1,
+};
+
+/* Common stats data record (shared with userspace) */
+struct datarec {
+ __u64 processed;
+ __u64 issue;
+};
+
+struct bpf_map_def SEC("maps") stats_global_map = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(struct datarec),
+ .max_entries = 1,
+};
+
+#define MAX_RXQs 64
+
+/* Stats per rx_queue_index (per CPU) */
+struct bpf_map_def SEC("maps") rx_queue_index_map = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(struct datarec),
+ .max_entries = MAX_RXQs + 1,
+};
+
+SEC("xdp_prog0")
+int xdp_prognum0(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct datarec *rec, *rxq_rec;
+ int ingress_ifindex;
+ struct config *config;
+ u32 key = 0;
+
+ /* Global stats record */
+ rec = bpf_map_lookup_elem(&stats_global_map, &key);
+ if (!rec)
+ return XDP_ABORTED;
+ rec->processed++;
+
+ /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
+ * instructions inside kernel to access xdp_rxq->dev->ifindex
+ */
+ ingress_ifindex = ctx->ingress_ifindex;
+
+ config = bpf_map_lookup_elem(&config_map, &key);
+ if (!config)
+ return XDP_ABORTED;
+
+ /* Simple test: check ctx provided ifindex is as expected */
+ if (ingress_ifindex != config->ifindex) {
+ /* count this error case */
+ rec->issue++;
+ return XDP_ABORTED;
+ }
+
+ /* Update stats per rx_queue_index. Handle if rx_queue_index
+ * is larger than stats map can contain info for.
+ */
+ key = ctx->rx_queue_index;
+ if (key >= MAX_RXQs)
+ key = MAX_RXQs;
+ rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
+ if (!rxq_rec)
+ return XDP_ABORTED;
+ rxq_rec->processed++;
+ if (key == MAX_RXQs)
+ rxq_rec->issue++;
+
+ return config->action;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644
index 000000000000..32430e8b3a6a
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ = " XDP RX-queue info extract example\n\n"
+ "Monitor how many packets per sec (pps) are received\n"
+ "per NIC RX queue index and which CPU processed the packet\n"
+ ;
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+
+#include "libbpf.h"
+#include "bpf_load.h"
+#include "bpf_util.h"
+
+static int ifindex = -1;
+static char ifname_buf[IF_NAMESIZE];
+static char *ifname;
+
+static __u32 xdp_flags;
+
+/* Exit return codes */
+#define EXIT_OK 0
+#define EXIT_FAIL 1
+#define EXIT_FAIL_OPTION 2
+#define EXIT_FAIL_XDP 3
+#define EXIT_FAIL_BPF 4
+#define EXIT_FAIL_MEM 5
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"dev", required_argument, NULL, 'd' },
+ {"skb-mode", no_argument, NULL, 'S' },
+ {"sec", required_argument, NULL, 's' },
+ {"no-separators", no_argument, NULL, 'z' },
+ {"action", required_argument, NULL, 'a' },
+ {0, 0, NULL, 0 }
+};
+
+static void int_exit(int sig)
+{
+ fprintf(stderr,
+ "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+ ifindex, ifname);
+ if (ifindex > -1)
+ set_link_xdp_fd(ifindex, -1, xdp_flags);
+ exit(EXIT_OK);
+}
+
+struct config {
+ __u32 action;
+ int ifindex;
+};
+#define XDP_ACTION_MAX (XDP_TX + 1)
+#define XDP_ACTION_MAX_STRLEN 11
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+ [XDP_ABORTED] = "XDP_ABORTED",
+ [XDP_DROP] = "XDP_DROP",
+ [XDP_PASS] = "XDP_PASS",
+ [XDP_TX] = "XDP_TX",
+};
+
+static const char *action2str(int action)
+{
+ if (action < XDP_ACTION_MAX)
+ return xdp_action_names[action];
+ return NULL;
+}
+
+static int parse_xdp_action(char *action_str)
+{
+ size_t maxlen;
+ __u64 action = -1;
+ int i;
+
+ for (i = 0; i < XDP_ACTION_MAX; i++) {
+ maxlen = XDP_ACTION_MAX_STRLEN;
+ if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
+ action = i;
+ break;
+ }
+ }
+ return action;
+}
+
+static void list_xdp_actions(void)
+{
+ int i;
+
+ printf("Available XDP --action <options>\n");
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ printf("\t%s\n", xdp_action_names[i]);
+ printf("\n");
+}
+
+static void usage(char *argv[])
+{
+ int i;
+
+ printf("\nDOCUMENTATION:\n%s\n", __doc__);
+ printf(" Usage: %s (options-see-below)\n", argv[0]);
+ printf(" Listing options:\n");
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-12s", long_options[i].name);
+ if (long_options[i].flag != NULL)
+ printf(" flag (internal value:%d)",
+ *long_options[i].flag);
+ else
+ printf(" short-option: -%c",
+ long_options[i].val);
+ printf("\n");
+ }
+ printf("\n");
+ list_xdp_actions();
+}
+
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(void)
+{
+ struct timespec t;
+ int res;
+
+ res = clock_gettime(CLOCK_MONOTONIC, &t);
+ if (res < 0) {
+ fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+ exit(EXIT_FAIL);
+ }
+ return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+/* Common stats data record shared with _kern.c */
+struct datarec {
+ __u64 processed;
+ __u64 issue;
+};
+struct record {
+ __u64 timestamp;
+ struct datarec total;
+ struct datarec *cpu;
+};
+struct stats_record {
+ struct record stats;
+ struct record *rxq;
+};
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec *array;
+ size_t size;
+
+ size = sizeof(struct datarec) * nr_cpus;
+ array = malloc(size);
+ memset(array, 0, size);
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ exit(EXIT_FAIL_MEM);
+ }
+ return array;
+}
+
+static struct record *alloc_record_per_rxq(void)
+{
+ unsigned int nr_rxqs = map_data[2].def.max_entries;
+ struct record *array;
+ size_t size;
+
+ size = sizeof(struct record) * nr_rxqs;
+ array = malloc(size);
+ memset(array, 0, size);
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+ exit(EXIT_FAIL_MEM);
+ }
+ return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+ unsigned int nr_rxqs = map_data[2].def.max_entries;
+ struct stats_record *rec;
+ int i;
+
+ rec = malloc(sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+ }
+ rec->rxq = alloc_record_per_rxq();
+ for (i = 0; i < nr_rxqs; i++)
+ rec->rxq[i].cpu = alloc_record_per_cpu();
+
+ rec->stats.cpu = alloc_record_per_cpu();
+ return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+ unsigned int nr_rxqs = map_data[2].def.max_entries;
+ int i;
+
+ for (i = 0; i < nr_rxqs; i++)
+ free(r->rxq[i].cpu);
+
+ free(r->rxq);
+ free(r->stats.cpu);
+ free(r);
+}
+
+static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+{
+ /* For percpu maps, userspace gets a value per possible CPU */
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec values[nr_cpus];
+ __u64 sum_processed = 0;
+ __u64 sum_issue = 0;
+ int i;
+
+ if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+ fprintf(stderr,
+ "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+ return false;
+ }
+ /* Get time as close as possible to reading map contents */
+ rec->timestamp = gettime();
+
+ /* Record and sum values from each CPU */
+ for (i = 0; i < nr_cpus; i++) {
+ rec->cpu[i].processed = values[i].processed;
+ sum_processed += values[i].processed;
+ rec->cpu[i].issue = values[i].issue;
+ sum_issue += values[i].issue;
+ }
+ rec->total.processed = sum_processed;
+ rec->total.issue = sum_issue;
+ return true;
+}
+
+static void stats_collect(struct stats_record *rec)
+{
+ int fd, i, max_rxqs;
+
+ fd = map_data[1].fd; /* map: stats_global_map */
+ map_collect_percpu(fd, 0, &rec->stats);
+
+ fd = map_data[2].fd; /* map: rx_queue_index_map */
+ max_rxqs = map_data[2].def.max_entries;
+ for (i = 0; i < max_rxqs; i++)
+ map_collect_percpu(fd, i, &rec->rxq[i]);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+ double period_ = 0;
+ __u64 period = 0;
+
+ period = r->timestamp - p->timestamp;
+ if (period > 0)
+ period_ = ((double) period / NANOSEC_PER_SEC);
+
+ return period_;
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->processed - p->processed;
+ pps = packets / period_;
+ }
+ return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r,
+ struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->issue - p->issue;
+ pps = packets / period_;
+ }
+ return pps;
+}
+
+static void stats_print(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ int action)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ unsigned int nr_rxqs = map_data[2].def.max_entries;
+ double pps = 0, err = 0;
+ struct record *rec, *prev;
+ double t;
+ int rxq;
+ int i;
+
+ /* Header */
+ printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
+ ifname, ifindex, action2str(action));
+
+ /* stats_global_map */
+ {
+ char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
+ char *fm2_rx = "%-15s %-7s %'-11.0f\n";
+ char *errstr = "";
+
+ printf("%-15s %-7s %-11s %-11s\n",
+ "XDP stats", "CPU", "pps", "issue-pps");
+
+ rec = &stats_rec->stats;
+ prev = &stats_prev->stats;
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps (r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (err > 0)
+ errstr = "invalid-ifindex";
+ if (pps > 0)
+ printf(fmt_rx, "XDP-RX CPU",
+ i, pps, err, errstr);
+ }
+ pps = calc_pps (&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+ printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
+ }
+
+ /* rx_queue_index_map */
+ printf("\n%-15s %-7s %-11s %-11s\n",
+ "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
+
+ for (rxq = 0; rxq < nr_rxqs; rxq++) {
+ char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
+ char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
+ char *errstr = "";
+ int rxq_ = rxq;
+
+ /* Last RXQ in map catch overflows */
+ if (rxq_ == nr_rxqs - 1)
+ rxq_ = -1;
+
+ rec = &stats_rec->rxq[rxq];
+ prev = &stats_prev->rxq[rxq];
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps (r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (err > 0) {
+ if (rxq_ == -1)
+ errstr = "map-overflow-RXQ";
+ else
+ errstr = "err";
+ }
+ if (pps > 0)
+ printf(fmt_rx, "rx_queue_index",
+ rxq_, i, pps, err, errstr);
+ }
+ pps = calc_pps (&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+ if (pps || err)
+ printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
+ }
+}
+
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+ struct stats_record *tmp;
+
+ tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+static void stats_poll(int interval, int action)
+{
+ struct stats_record *record, *prev;
+
+ record = alloc_stats_record();
+ prev = alloc_stats_record();
+ stats_collect(record);
+
+ while (1) {
+ swap(&prev, &record);
+ stats_collect(record);
+ stats_print(record, prev, action);
+ sleep(interval);
+ }
+
+ free_stats_record(record);
+ free_stats_record(prev);
+}
+
+
+int main(int argc, char **argv)
+{
+ struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+ bool use_separators = true;
+ struct config cfg = { 0 };
+ char filename[256];
+ int longindex = 0;
+ int interval = 2;
+ __u32 key = 0;
+ int opt, err;
+
+ char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
+ int action = XDP_PASS; /* Default action */
+ char *action_str = NULL;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
+
+ if (load_bpf_file(filename)) {
+ fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
+ return EXIT_FAIL;
+ }
+
+ if (!prog_fd[0]) {
+ fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+ return EXIT_FAIL;
+ }
+
+ /* Parse commands line args */
+ while ((opt = getopt_long(argc, argv, "hSd:",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'd':
+ if (strlen(optarg) >= IF_NAMESIZE) {
+ fprintf(stderr, "ERR: --dev name too long\n");
+ goto error;
+ }
+ ifname = (char *)&ifname_buf;
+ strncpy(ifname, optarg, IF_NAMESIZE);
+ ifindex = if_nametoindex(ifname);
+ if (ifindex == 0) {
+ fprintf(stderr,
+ "ERR: --dev name unknown err(%d):%s\n",
+ errno, strerror(errno));
+ goto error;
+ }
+ break;
+ case 's':
+ interval = atoi(optarg);
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'z':
+ use_separators = false;
+ break;
+ case 'a':
+ action_str = (char *)&action_str_buf;
+ strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
+ break;
+ case 'h':
+ error:
+ default:
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ }
+ /* Required option */
+ if (ifindex == -1) {
+ fprintf(stderr, "ERR: required option --dev missing\n");
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ cfg.ifindex = ifindex;
+
+ /* Parse action string */
+ if (action_str) {
+ action = parse_xdp_action(action_str);
+ if (action < 0) {
+ fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
+ action_str);
+ list_xdp_actions();
+ return EXIT_FAIL_OPTION;
+ }
+ }
+ cfg.action = action;
+
+ /* Trick to pretty printf with thousands separators use %' */
+ if (use_separators)
+ setlocale(LC_NUMERIC, "en_US");
+
+ /* User-side setup ifindex in config_map */
+ err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0);
+ if (err) {
+ fprintf(stderr, "Store config failed (err:%d)\n", err);
+ exit(EXIT_FAIL_BPF);
+ }
+
+ /* Remove XDP program when program is interrupted */
+ signal(SIGINT, int_exit);
+
+ if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
+ fprintf(stderr, "link set xdp fd failed\n");
+ return EXIT_FAIL_XDP;
+ }
+
+ stats_poll(interval, action);
+ return EXIT_OK;
+}
diff --git a/samples/kobject/kobject-example.c b/samples/kobject/kobject-example.c
index 2e0740f06cd7..9e383fdbaa00 100644
--- a/samples/kobject/kobject-example.c
+++ b/samples/kobject/kobject-example.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Sample kobject implementation
*
* Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2007 Novell Inc.
- *
- * Released under the GPL version 2 only.
- *
*/
#include <linux/kobject.h>
#include <linux/string.h>
diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
index a55bff52bde3..401328fd687d 100644
--- a/samples/kobject/kset-example.c
+++ b/samples/kobject/kset-example.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Sample kset and ktype implementation
*
* Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2007 Novell Inc.
- *
- * Released under the GPL version 2 only.
- *
*/
#include <linux/kobject.h>
#include <linux/string.h>
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c
index 3d115bd68442..72f9e6d1387b 100644
--- a/samples/livepatch/livepatch-callbacks-demo.c
+++ b/samples/livepatch/livepatch-callbacks-demo.c
@@ -197,21 +197,6 @@ static int livepatch_callbacks_demo_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index 84795223f15f..2d554dd930e2 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -71,21 +71,6 @@ static int livepatch_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index fbe0a1f3d99b..830c55514f9f 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -133,21 +133,6 @@ static int livepatch_shadow_fix1_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
index 53c1794bdc5f..ff9948f0ec00 100644
--- a/samples/livepatch/livepatch-shadow-fix2.c
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -128,21 +128,6 @@ static int livepatch_shadow_fix2_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/sockmap/sockmap_user.c b/samples/sockmap/sockmap_user.c
index 7cc9d228216f..7c25c0c112bc 100644
--- a/samples/sockmap/sockmap_user.c
+++ b/samples/sockmap/sockmap_user.c
@@ -23,8 +23,11 @@
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
+#include <sys/wait.h>
+#include <time.h>
#include <sys/time.h>
+#include <sys/resource.h>
#include <sys/types.h>
#include <linux/netlink.h>
@@ -35,6 +38,8 @@
#include <assert.h>
#include <libgen.h>
+#include <getopt.h>
+
#include "../bpf/bpf_load.h"
#include "../bpf/bpf_util.h"
#include "../bpf/libbpf.h"
@@ -46,15 +51,42 @@ void running_handler(int a);
#define S1_PORT 10000
#define S2_PORT 10001
-static int sockmap_test_sockets(int rate, int dot)
+/* global sockets */
+int s1, s2, c1, c2, p1, p2;
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"cgroup", required_argument, NULL, 'c' },
+ {"rate", required_argument, NULL, 'r' },
+ {"verbose", no_argument, NULL, 'v' },
+ {"iov_count", required_argument, NULL, 'i' },
+ {"length", required_argument, NULL, 'l' },
+ {"test", required_argument, NULL, 't' },
+ {0, 0, NULL, 0 }
+};
+
+static void usage(char *argv[])
+{
+ int i;
+
+ printf(" Usage: %s --cgroup <cgroup_path>\n", argv[0]);
+ printf(" options:\n");
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-12s", long_options[i].name);
+ if (long_options[i].flag != NULL)
+ printf(" flag (internal value:%d)\n",
+ *long_options[i].flag);
+ else
+ printf(" -%c\n", long_options[i].val);
+ }
+ printf("\n");
+}
+
+static int sockmap_init_sockets(void)
{
- int i, sc, err, max_fd, one = 1;
- int s1, s2, c1, c2, p1, p2;
+ int i, err, one = 1;
struct sockaddr_in addr;
- struct timeval timeout;
- char buf[1024] = {0};
int *fds[4] = {&s1, &s2, &c1, &c2};
- fd_set w;
s1 = s2 = p1 = p2 = c1 = c2 = 0;
@@ -63,8 +95,7 @@ static int sockmap_test_sockets(int rate, int dot)
*fds[i] = socket(AF_INET, SOCK_STREAM, 0);
if (*fds[i] < 0) {
perror("socket s1 failed()");
- err = *fds[i];
- goto out;
+ return errno;
}
}
@@ -74,16 +105,16 @@ static int sockmap_test_sockets(int rate, int dot)
(char *)&one, sizeof(one));
if (err) {
perror("setsockopt failed()");
- goto out;
+ return errno;
}
}
/* Non-blocking sockets */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 2; i++) {
err = ioctl(*fds[i], FIONBIO, (char *)&one);
if (err < 0) {
perror("ioctl s1 failed()");
- goto out;
+ return errno;
}
}
@@ -96,14 +127,14 @@ static int sockmap_test_sockets(int rate, int dot)
err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
perror("bind s1 failed()\n");
- goto out;
+ return errno;
}
addr.sin_port = htons(S2_PORT);
err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
perror("bind s2 failed()\n");
- goto out;
+ return errno;
}
/* Listen server sockets */
@@ -111,14 +142,14 @@ static int sockmap_test_sockets(int rate, int dot)
err = listen(s1, 32);
if (err < 0) {
perror("listen s1 failed()\n");
- goto out;
+ return errno;
}
addr.sin_port = htons(S2_PORT);
err = listen(s2, 32);
if (err < 0) {
perror("listen s1 failed()\n");
- goto out;
+ return errno;
}
/* Initiate Connect */
@@ -126,46 +157,232 @@ static int sockmap_test_sockets(int rate, int dot)
err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
perror("connect c1 failed()\n");
- goto out;
+ return errno;
}
addr.sin_port = htons(S2_PORT);
err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
perror("connect c2 failed()\n");
- goto out;
+ return errno;
+ } else if (err < 0) {
+ err = 0;
}
/* Accept Connecrtions */
p1 = accept(s1, NULL, NULL);
if (p1 < 0) {
perror("accept s1 failed()\n");
- goto out;
+ return errno;
}
p2 = accept(s2, NULL, NULL);
if (p2 < 0) {
perror("accept s1 failed()\n");
- goto out;
+ return errno;
}
- max_fd = p2;
- timeout.tv_sec = 10;
- timeout.tv_usec = 0;
-
printf("connected sockets: c1 <-> p1, c2 <-> p2\n");
printf("cgroups binding: c1(%i) <-> s1(%i) - - - c2(%i) <-> s2(%i)\n",
c1, s1, c2, s2);
+ return 0;
+}
+
+struct msg_stats {
+ size_t bytes_sent;
+ size_t bytes_recvd;
+ struct timespec start;
+ struct timespec end;
+};
+
+static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ struct msg_stats *s, bool tx)
+{
+ struct msghdr msg = {0};
+ int err, i, flags = MSG_NOSIGNAL;
+ struct iovec *iov;
+
+ iov = calloc(iov_count, sizeof(struct iovec));
+ if (!iov)
+ return errno;
+
+ for (i = 0; i < iov_count; i++) {
+ char *d = calloc(iov_length, sizeof(char));
+
+ if (!d) {
+ fprintf(stderr, "iov_count %i/%i OOM\n", i, iov_count);
+ goto out_errno;
+ }
+ iov[i].iov_base = d;
+ iov[i].iov_len = iov_length;
+ }
+
+ msg.msg_iov = iov;
+ msg.msg_iovlen = iov_count;
+
+ if (tx) {
+ clock_gettime(CLOCK_MONOTONIC, &s->start);
+ for (i = 0; i < cnt; i++) {
+ int sent = sendmsg(fd, &msg, flags);
+
+ if (sent < 0) {
+ perror("send loop error:");
+ goto out_errno;
+ }
+ s->bytes_sent += sent;
+ }
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ } else {
+ int slct, recv, max_fd = fd;
+ struct timeval timeout;
+ float total_bytes;
+ fd_set w;
+
+ total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
+ err = clock_gettime(CLOCK_MONOTONIC, &s->start);
+ if (err < 0)
+ perror("recv start time: ");
+ while (s->bytes_recvd < total_bytes) {
+ timeout.tv_sec = 1;
+ timeout.tv_usec = 0;
+
+ /* FD sets */
+ FD_ZERO(&w);
+ FD_SET(fd, &w);
+
+ slct = select(max_fd + 1, &w, NULL, NULL, &timeout);
+ if (slct == -1) {
+ perror("select()");
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ goto out_errno;
+ } else if (!slct) {
+ fprintf(stderr, "unexpected timeout\n");
+ errno = -EIO;
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ goto out_errno;
+ }
+
+ recv = recvmsg(fd, &msg, flags);
+ if (recv < 0) {
+ if (errno != EWOULDBLOCK) {
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ perror("recv failed()\n");
+ goto out_errno;
+ }
+ }
+
+ s->bytes_recvd += recv;
+ }
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ }
+
+ for (i = 0; i < iov_count; i++)
+ free(iov[i].iov_base);
+ free(iov);
+ return 0;
+out_errno:
+ for (i = 0; i < iov_count; i++)
+ free(iov[i].iov_base);
+ free(iov);
+ return errno;
+}
+
+static float giga = 1000000000;
+
+static inline float sentBps(struct msg_stats s)
+{
+ return s.bytes_sent / (s.end.tv_sec - s.start.tv_sec);
+}
+
+static inline float recvdBps(struct msg_stats s)
+{
+ return s.bytes_recvd / (s.end.tv_sec - s.start.tv_sec);
+}
+
+static int sendmsg_test(int iov_count, int iov_buf, int cnt,
+ int verbose, bool base)
+{
+ float sent_Bps = 0, recvd_Bps = 0;
+ int rx_fd, txpid, rxpid, err = 0;
+ struct msg_stats s = {0};
+ int status;
+
+ errno = 0;
+
+ if (base)
+ rx_fd = p1;
+ else
+ rx_fd = p2;
+
+ rxpid = fork();
+ if (rxpid == 0) {
+ err = msg_loop(rx_fd, iov_count, iov_buf, cnt, &s, false);
+ if (err)
+ fprintf(stderr,
+ "msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n",
+ iov_count, iov_buf, cnt, err);
+ shutdown(p2, SHUT_RDWR);
+ shutdown(p1, SHUT_RDWR);
+ if (s.end.tv_sec - s.start.tv_sec) {
+ sent_Bps = sentBps(s);
+ recvd_Bps = recvdBps(s);
+ }
+ fprintf(stdout,
+ "rx_sendmsg: TX: %zuB %fB/s %fGB/s RX: %zuB %fB/s %fGB/s\n",
+ s.bytes_sent, sent_Bps, sent_Bps/giga,
+ s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+ exit(1);
+ } else if (rxpid == -1) {
+ perror("msg_loop_rx: ");
+ return errno;
+ }
+
+ txpid = fork();
+ if (txpid == 0) {
+ err = msg_loop(c1, iov_count, iov_buf, cnt, &s, true);
+ if (err)
+ fprintf(stderr,
+ "msg_loop_tx: iov_count %i iov_buf %i cnt %i err %i\n",
+ iov_count, iov_buf, cnt, err);
+ shutdown(c1, SHUT_RDWR);
+ if (s.end.tv_sec - s.start.tv_sec) {
+ sent_Bps = sentBps(s);
+ recvd_Bps = recvdBps(s);
+ }
+ fprintf(stdout,
+ "tx_sendmsg: TX: %zuB %fB/s %f GB/s RX: %zuB %fB/s %fGB/s\n",
+ s.bytes_sent, sent_Bps, sent_Bps/giga,
+ s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+ exit(1);
+ } else if (txpid == -1) {
+ perror("msg_loop_tx: ");
+ return errno;
+ }
+
+ assert(waitpid(rxpid, &status, 0) == rxpid);
+ assert(waitpid(txpid, &status, 0) == txpid);
+ return err;
+}
+
+static int forever_ping_pong(int rate, int verbose)
+{
+ struct timeval timeout;
+ char buf[1024] = {0};
+ int sc;
+
+ timeout.tv_sec = 10;
+ timeout.tv_usec = 0;
/* Ping/Pong data from client to server */
sc = send(c1, buf, sizeof(buf), 0);
if (sc < 0) {
perror("send failed()\n");
- goto out;
+ return sc;
}
do {
- int s, rc, i;
+ int s, rc, i, max_fd = p2;
+ fd_set w;
/* FD sets */
FD_ZERO(&w);
@@ -193,7 +410,7 @@ static int sockmap_test_sockets(int rate, int dot)
if (rc < 0) {
if (errno != EWOULDBLOCK) {
perror("recv failed()\n");
- break;
+ return rc;
}
}
@@ -205,35 +422,92 @@ static int sockmap_test_sockets(int rate, int dot)
sc = send(i, buf, rc, 0);
if (sc < 0) {
perror("send failed()\n");
- break;
+ return sc;
}
}
- sleep(rate);
- if (dot) {
+
+ if (rate)
+ sleep(rate);
+
+ if (verbose) {
printf(".");
fflush(stdout);
}
} while (running);
-out:
- close(s1);
- close(s2);
- close(p1);
- close(p2);
- close(c1);
- close(c2);
- return err;
+ return 0;
}
+enum {
+ PING_PONG,
+ SENDMSG,
+ BASE,
+};
+
int main(int argc, char **argv)
{
- int rate = 1, dot = 1;
+ int iov_count = 1, length = 1024, rate = 1, verbose = 0;
+ struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+ int opt, longindex, err, cg_fd = 0;
+ int test = PING_PONG;
char filename[256];
- int err, cg_fd;
- char *cg_path;
- cg_path = argv[argc - 1];
+ while ((opt = getopt_long(argc, argv, "hvc:r:i:l:t:",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ /* Cgroup configuration */
+ case 'c':
+ cg_fd = open(optarg, O_DIRECTORY, O_RDONLY);
+ if (cg_fd < 0) {
+ fprintf(stderr,
+ "ERROR: (%i) open cg path failed: %s\n",
+ cg_fd, optarg);
+ return cg_fd;
+ }
+ break;
+ case 'r':
+ rate = atoi(optarg);
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'i':
+ iov_count = atoi(optarg);
+ break;
+ case 'l':
+ length = atoi(optarg);
+ break;
+ case 't':
+ if (strcmp(optarg, "ping") == 0) {
+ test = PING_PONG;
+ } else if (strcmp(optarg, "sendmsg") == 0) {
+ test = SENDMSG;
+ } else if (strcmp(optarg, "base") == 0) {
+ test = BASE;
+ } else {
+ usage(argv);
+ return -1;
+ }
+ break;
+ case 'h':
+ default:
+ usage(argv);
+ return -1;
+ }
+ }
+
+ if (!cg_fd) {
+ fprintf(stderr, "%s requires cgroup option: --cgroup <path>\n",
+ argv[0]);
+ return -1;
+ }
+
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
+
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
running = 1;
@@ -241,20 +515,16 @@ int main(int argc, char **argv)
/* catch SIGINT */
signal(SIGINT, running_handler);
+ /* If base test skip BPF setup */
+ if (test == BASE)
+ goto run;
+
if (load_bpf_file(filename)) {
fprintf(stderr, "load_bpf_file: (%s) %s\n",
filename, strerror(errno));
return 1;
}
- /* Cgroup configuration */
- cg_fd = open(cg_path, O_DIRECTORY, O_RDONLY);
- if (cg_fd < 0) {
- fprintf(stderr, "ERROR: (%i) open cg path failed: %s\n",
- cg_fd, cg_path);
- return cg_fd;
- }
-
/* Attach programs to sockmap */
err = bpf_prog_attach(prog_fd[0], map_fd[0],
BPF_SK_SKB_STREAM_PARSER, 0);
@@ -280,12 +550,30 @@ int main(int argc, char **argv)
return err;
}
- err = sockmap_test_sockets(rate, dot);
+run:
+ err = sockmap_init_sockets();
if (err) {
fprintf(stderr, "ERROR: test socket failed: %d\n", err);
- return err;
+ goto out;
}
- return 0;
+
+ if (test == PING_PONG)
+ err = forever_ping_pong(rate, verbose);
+ else if (test == SENDMSG)
+ err = sendmsg_test(iov_count, length, rate, verbose, false);
+ else if (test == BASE)
+ err = sendmsg_test(iov_count, length, rate, verbose, true);
+ else
+ fprintf(stderr, "unknown test\n");
+out:
+ close(s1);
+ close(s2);
+ close(p1);
+ close(p2);
+ close(c1);
+ close(c2);
+ close(cg_fd);
+ return err;
}
void running_handler(int a)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index cb8997ed0149..47cddf32aeba 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -265,12 +265,18 @@ else
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
endif
+ifdef CONFIG_MODVERSIONS
+objtool_o = $(@D)/.tmp_$(@F)
+else
+objtool_o = $(@)
+endif
+
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
cmd_objtool = $(if $(patsubst y%,, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
- $(__objtool_obj) $(objtool_args) "$(@)";)
+ $(__objtool_obj) $(objtool_args) "$(objtool_o)";)
objtool_obj = $(if $(patsubst y%,, \
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
$(__objtool_obj))
@@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj) \
define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call cmd_and_fixdep,cc_o_c) \
- $(cmd_modversions_c) \
$(cmd_checkdoc) \
$(call echo-cmd,objtool) $(cmd_objtool) \
+ $(cmd_modversions_c) \
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
endef
define rule_as_o_S
$(call cmd_and_fixdep,as_o_S) \
- $(cmd_modversions_S) \
- $(call echo-cmd,objtool) $(cmd_objtool)
+ $(call echo-cmd,objtool) $(cmd_objtool) \
+ $(cmd_modversions_S)
endef
# List module undefined symbols (or empty line if not enabled)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 31031f10fe56..ba03f17ff662 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5586,6 +5586,12 @@ sub process {
}
}
+# check for smp_read_barrier_depends and read_barrier_depends
+ if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) {
+ WARN("READ_BARRIER_DEPENDS",
+ "$1read_barrier_depends should only be used in READ_ONCE or DEC Alpha code\n" . $herecurr);
+ }
+
# check of hardware specific defines
if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
CHK("ARCH_DEFINES",
diff --git a/scripts/decodecode b/scripts/decodecode
index 438120da1361..9cef558528aa 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -21,12 +21,24 @@ trap cleanup EXIT
T=`mktemp` || die "cannot create temp file"
code=
+cont=
while read i ; do
case "$i" in
*Code:*)
code=$i
+ cont=yes
+ ;;
+*)
+ [ -n "$cont" ] && {
+ xdump="$(echo $i | grep '^[[:xdigit:]<>[:space:]]\+$')"
+ if [ -n "$xdump" ]; then
+ code="$code $xdump"
+ else
+ cont=
+ fi
+ }
;;
esac
@@ -59,6 +71,14 @@ disas() {
${CROSS_COMPILE}strip $1.o
fi
+ if [ "$ARCH" = "arm64" ]; then
+ if [ $width -eq 4 ]; then
+ type=inst
+ fi
+
+ ${CROSS_COMPILE}strip $1.o
+ fi
+
${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1
}
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 1bf949c43b76..f6ab3ccf698f 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -96,6 +96,8 @@ def get_thread_info(task):
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
+ if task.type.fields()[0].type == thread_info_type.get_type():
+ return task['thread_info']
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index 86dc07a01b43..e7836b47f060 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1,3 @@
-*.hash.c
*.lex.c
*.tab.c
*.tab.h
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..8cee597d33a5 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
switch (type) {
case S_BOOLEAN:
case S_TRISTATE:
- return k_string;
+ val->s = !strcmp(str, "n") ? 0 :
+ !strcmp(str, "m") ? 1 :
+ !strcmp(str, "y") ? 2 : -1;
+ return k_signed;
case S_INT:
val->s = strtoll(str, &tail, 10);
kind = k_signed;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index df0f045a9a89..fee8952037b1 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -48,16 +48,11 @@ Read C language source or header FILEs, extract embedded documentation comments,
and print formatted documentation to standard output.
The documentation comments are identified by "/**" opening comment mark. See
-Documentation/kernel-doc-nano-HOWTO.txt for the documentation comment syntax.
+Documentation/doc-guide/kernel-doc.rst for the documentation comment syntax.
Output format selection (mutually exclusive):
- -docbook Output DocBook format.
- -html Output HTML format.
- -html5 Output HTML5 format.
- -list Output symbol list format. This is for use by docproc.
-man Output troff manual page format. This is the default.
-rst Output reStructuredText format.
- -text Output plain text format.
-none Do not output documentation, only warnings.
Output selection (mutually exclusive):
@@ -216,7 +211,7 @@ my $anon_struct_union = 0;
my $type_constant = '\b``([^\`]+)``\b';
my $type_constant2 = '\%([-_\w]+)';
my $type_func = '(\w+)\(\)';
-my $type_param = '\@(\w+(\.\.\.)?)';
+my $type_param = '\@(\w*(\.\w+)*(\.\.\.)?)';
my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params
my $type_env = '(\$\w+)';
my $type_enum = '\&(enum\s*([_\w]+))';
@@ -225,84 +220,11 @@ my $type_typedef = '\&(typedef\s*([_\w]+))';
my $type_union = '\&(union\s*([_\w]+))';
my $type_member = '\&([_\w]+)(\.|->)([_\w]+)';
my $type_fallback = '\&([_\w]+)';
-my $type_enum_xml = '\&amp;(enum\s*([_\w]+))';
-my $type_struct_xml = '\&amp;(struct\s*([_\w]+))';
-my $type_typedef_xml = '\&amp;(typedef\s*([_\w]+))';
-my $type_union_xml = '\&amp;(union\s*([_\w]+))';
-my $type_member_xml = '\&amp;([_\w]+)(\.|-\&gt;)([_\w]+)';
-my $type_fallback_xml = '\&amp([_\w]+)';
my $type_member_func = $type_member . '\(\)';
# Output conversion substitutions.
# One for each output format
-# these work fairly well
-my @highlights_html = (
- [$type_constant, "<i>\$1</i>"],
- [$type_constant2, "<i>\$1</i>"],
- [$type_func, "<b>\$1</b>"],
- [$type_enum_xml, "<i>\$1</i>"],
- [$type_struct_xml, "<i>\$1</i>"],
- [$type_typedef_xml, "<i>\$1</i>"],
- [$type_union_xml, "<i>\$1</i>"],
- [$type_env, "<b><i>\$1</i></b>"],
- [$type_param, "<tt><b>\$1</b></tt>"],
- [$type_member_xml, "<tt><i>\$1</i>\$2\$3</tt>"],
- [$type_fallback_xml, "<i>\$1</i>"]
- );
-my $local_lt = "\\\\\\\\lt:";
-my $local_gt = "\\\\\\\\gt:";
-my $blankline_html = $local_lt . "p" . $local_gt; # was "<p>"
-
-# html version 5
-my @highlights_html5 = (
- [$type_constant, "<span class=\"const\">\$1</span>"],
- [$type_constant2, "<span class=\"const\">\$1</span>"],
- [$type_func, "<span class=\"func\">\$1</span>"],
- [$type_enum_xml, "<span class=\"enum\">\$1</span>"],
- [$type_struct_xml, "<span class=\"struct\">\$1</span>"],
- [$type_typedef_xml, "<span class=\"typedef\">\$1</span>"],
- [$type_union_xml, "<span class=\"union\">\$1</span>"],
- [$type_env, "<span class=\"env\">\$1</span>"],
- [$type_param, "<span class=\"param\">\$1</span>]"],
- [$type_member_xml, "<span class=\"literal\"><span class=\"struct\">\$1</span>\$2<span class=\"member\">\$3</span></span>"],
- [$type_fallback_xml, "<span class=\"struct\">\$1</span>"]
- );
-my $blankline_html5 = $local_lt . "br /" . $local_gt;
-
-# XML, docbook format
-my @highlights_xml = (
- ["([^=])\\\"([^\\\"<]+)\\\"", "\$1<quote>\$2</quote>"],
- [$type_constant, "<constant>\$1</constant>"],
- [$type_constant2, "<constant>\$1</constant>"],
- [$type_enum_xml, "<type>\$1</type>"],
- [$type_struct_xml, "<structname>\$1</structname>"],
- [$type_typedef_xml, "<type>\$1</type>"],
- [$type_union_xml, "<structname>\$1</structname>"],
- [$type_param, "<parameter>\$1</parameter>"],
- [$type_func, "<function>\$1</function>"],
- [$type_env, "<envar>\$1</envar>"],
- [$type_member_xml, "<literal><structname>\$1</structname>\$2<structfield>\$3</structfield></literal>"],
- [$type_fallback_xml, "<structname>\$1</structname>"]
- );
-my $blankline_xml = $local_lt . "/para" . $local_gt . $local_lt . "para" . $local_gt . "\n";
-
-# gnome, docbook format
-my @highlights_gnome = (
- [$type_constant, "<replaceable class=\"option\">\$1</replaceable>"],
- [$type_constant2, "<replaceable class=\"option\">\$1</replaceable>"],
- [$type_func, "<function>\$1</function>"],
- [$type_enum, "<type>\$1</type>"],
- [$type_struct, "<structname>\$1</structname>"],
- [$type_typedef, "<type>\$1</type>"],
- [$type_union, "<structname>\$1</structname>"],
- [$type_env, "<envar>\$1</envar>"],
- [$type_param, "<parameter>\$1</parameter>" ],
- [$type_member, "<literal><structname>\$1</structname>\$2<structfield>\$3</structfield></literal>"],
- [$type_fallback, "<structname>\$1</structname>"]
- );
-my $blankline_gnome = "</para><para>\n";
-
# these are pretty rough
my @highlights_man = (
[$type_constant, "\$1"],
@@ -318,21 +240,6 @@ my @highlights_man = (
);
my $blankline_man = "";
-# text-mode
-my @highlights_text = (
- [$type_constant, "\$1"],
- [$type_constant2, "\$1"],
- [$type_func, "\$1"],
- [$type_enum, "\$1"],
- [$type_struct, "\$1"],
- [$type_typedef, "\$1"],
- [$type_union, "\$1"],
- [$type_param, "\$1"],
- [$type_member, "\$1\$2\$3"],
- [$type_fallback, "\$1"]
- );
-my $blankline_text = "";
-
# rst-mode
my @highlights_rst = (
[$type_constant, "``\$1``"],
@@ -352,21 +259,6 @@ my @highlights_rst = (
);
my $blankline_rst = "\n";
-# list mode
-my @highlights_list = (
- [$type_constant, "\$1"],
- [$type_constant2, "\$1"],
- [$type_func, "\$1"],
- [$type_enum, "\$1"],
- [$type_struct, "\$1"],
- [$type_typedef, "\$1"],
- [$type_union, "\$1"],
- [$type_param, "\$1"],
- [$type_member, "\$1"],
- [$type_fallback, "\$1"]
- );
-my $blankline_list = "";
-
# read arguments
if ($#ARGV == -1) {
usage();
@@ -376,12 +268,12 @@ my $kernelversion;
my $dohighlight = "";
my $verbose = 0;
-my $output_mode = "man";
+my $output_mode = "rst";
my $output_preformatted = 0;
my $no_doc_sections = 0;
my $enable_lineno = 0;
-my @highlights = @highlights_man;
-my $blankline = $blankline_man;
+my @highlights = @highlights_rst;
+my $blankline = $blankline_rst;
my $modulename = "Kernel API";
use constant {
@@ -499,71 +391,51 @@ my $undescribed = "-- undescribed --";
reset_state();
-while ($ARGV[0] =~ m/^-(.*)/) {
- my $cmd = shift @ARGV;
- if ($cmd eq "-html") {
- $output_mode = "html";
- @highlights = @highlights_html;
- $blankline = $blankline_html;
- } elsif ($cmd eq "-html5") {
- $output_mode = "html5";
- @highlights = @highlights_html5;
- $blankline = $blankline_html5;
- } elsif ($cmd eq "-man") {
+while ($ARGV[0] =~ m/^--?(.*)/) {
+ my $cmd = $1;
+ shift @ARGV;
+ if ($cmd eq "man") {
$output_mode = "man";
@highlights = @highlights_man;
$blankline = $blankline_man;
- } elsif ($cmd eq "-text") {
- $output_mode = "text";
- @highlights = @highlights_text;
- $blankline = $blankline_text;
- } elsif ($cmd eq "-rst") {
+ } elsif ($cmd eq "rst") {
$output_mode = "rst";
@highlights = @highlights_rst;
$blankline = $blankline_rst;
- } elsif ($cmd eq "-docbook") {
- $output_mode = "xml";
- @highlights = @highlights_xml;
- $blankline = $blankline_xml;
- } elsif ($cmd eq "-list") {
- $output_mode = "list";
- @highlights = @highlights_list;
- $blankline = $blankline_list;
- } elsif ($cmd eq "-gnome") {
- $output_mode = "gnome";
- @highlights = @highlights_gnome;
- $blankline = $blankline_gnome;
- } elsif ($cmd eq "-none") {
+ } elsif ($cmd eq "none") {
$output_mode = "none";
- } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
+ } elsif ($cmd eq "module") { # not needed for XML, inherits from calling document
$modulename = shift @ARGV;
- } elsif ($cmd eq "-function") { # to only output specific functions
+ } elsif ($cmd eq "function") { # to only output specific functions
$output_selection = OUTPUT_INCLUDE;
$function = shift @ARGV;
$function_table{$function} = 1;
- } elsif ($cmd eq "-nofunction") { # output all except specific functions
+ } elsif ($cmd eq "nofunction") { # output all except specific functions
$output_selection = OUTPUT_EXCLUDE;
$function = shift @ARGV;
$function_table{$function} = 1;
- } elsif ($cmd eq "-export") { # only exported symbols
+ } elsif ($cmd eq "export") { # only exported symbols
$output_selection = OUTPUT_EXPORTED;
%function_table = ();
- } elsif ($cmd eq "-internal") { # only non-exported symbols
+ } elsif ($cmd eq "internal") { # only non-exported symbols
$output_selection = OUTPUT_INTERNAL;
%function_table = ();
- } elsif ($cmd eq "-export-file") {
+ } elsif ($cmd eq "export-file") {
my $file = shift @ARGV;
push(@export_file_list, $file);
- } elsif ($cmd eq "-v") {
+ } elsif ($cmd eq "v") {
$verbose = 1;
- } elsif (($cmd eq "-h") || ($cmd eq "--help")) {
+ } elsif (($cmd eq "h") || ($cmd eq "help")) {
usage();
- } elsif ($cmd eq '-no-doc-sections') {
+ } elsif ($cmd eq 'no-doc-sections') {
$no_doc_sections = 1;
- } elsif ($cmd eq '-enable-lineno') {
+ } elsif ($cmd eq 'enable-lineno') {
$enable_lineno = 1;
- } elsif ($cmd eq '-show-not-found') {
+ } elsif ($cmd eq 'show-not-found') {
$show_not_found = 1;
+ } else {
+ # Unknown argument
+ usage();
}
}
@@ -670,22 +542,11 @@ sub output_highlight {
# confess "output_highlight got called with no args?\n";
# }
- if ($output_mode eq "html" || $output_mode eq "html5" ||
- $output_mode eq "xml") {
- $contents = local_unescape($contents);
- # convert data read & converted thru xml_escape() into &xyz; format:
- $contents =~ s/\\\\\\/\&/g;
- }
# print STDERR "contents b4:$contents\n";
eval $dohighlight;
die $@ if $@;
# print STDERR "contents af:$contents\n";
-# strip whitespaces when generating html5
- if ($output_mode eq "html5") {
- $contents =~ s/^\s+//;
- $contents =~ s/\s+$//;
- }
foreach $line (split "\n", $contents) {
if (! $output_preformatted) {
$line =~ s/^\s*//;
@@ -706,817 +567,6 @@ sub output_highlight {
}
}
-# output sections in html
-sub output_section_html(%) {
- my %args = %{$_[0]};
- my $section;
-
- foreach $section (@{$args{'sectionlist'}}) {
- print "<h3>$section</h3>\n";
- print "<blockquote>\n";
- output_highlight($args{'sections'}{$section});
- print "</blockquote>\n";
- }
-}
-
-# output enum in html
-sub output_enum_html(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- print "<h2>enum " . $args{'enum'} . "</h2>\n";
-
- print "<b>enum " . $args{'enum'} . "</b> {<br>\n";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- print " <b>" . $parameter . "</b>";
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",\n";
- }
- print "<br>";
- }
- print "};<br>\n";
-
- print "<h3>Constants</h3>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "<dt><b>" . $parameter . "</b>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter});
- }
- print "</dl>\n";
- output_section_html(@_);
- print "<hr>\n";
-}
-
-# output typedef in html
-sub output_typedef_html(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- print "<h2>typedef " . $args{'typedef'} . "</h2>\n";
-
- print "<b>typedef " . $args{'typedef'} . "</b>\n";
- output_section_html(@_);
- print "<hr>\n";
-}
-
-# output struct in html
-sub output_struct_html(%) {
- my %args = %{$_[0]};
- my ($parameter);
-
- print "<h2>" . $args{'type'} . " " . $args{'struct'} . " - " . $args{'purpose'} . "</h2>\n";
- print "<b>" . $args{'type'} . " " . $args{'struct'} . "</b> {<br>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- if ($parameter =~ /^#/) {
- print "$parameter<br>\n";
- next;
- }
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print "&nbsp; &nbsp; <i>$1</i><b>$parameter</b>) <i>($2)</i>;<br>\n";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print "&nbsp; &nbsp; <i>$1</i> <b>$parameter</b>$2;<br>\n";
- } else {
- print "&nbsp; &nbsp; <i>$type</i> <b>$parameter</b>;<br>\n";
- }
- }
- print "};<br>\n";
-
- print "<h3>Members</h3>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print "<dt><b>" . $parameter . "</b>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- }
- print "</dl>\n";
- output_section_html(@_);
- print "<hr>\n";
-}
-
-# output function in html
-sub output_function_html(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
-
- print "<h2>" . $args{'function'} . " - " . $args{'purpose'} . "</h2>\n";
- print "<i>" . $args{'functiontype'} . "</i>\n";
- print "<b>" . $args{'function'} . "</b>\n";
- print "(";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print "<i>$1</i><b>$parameter</b>) <i>($2)</i>";
- } else {
- print "<i>" . $type . "</i> <b>" . $parameter . "</b>";
- }
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",\n";
- }
- }
- print ")\n";
-
- print "<h3>Arguments</h3>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print "<dt><b>" . $parameter . "</b>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- }
- print "</dl>\n";
- output_section_html(@_);
- print "<hr>\n";
-}
-
-# output DOC: block header in html
-sub output_blockhead_html(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
-
- foreach $section (@{$args{'sectionlist'}}) {
- print "<h3>$section</h3>\n";
- print "<ul>\n";
- output_highlight($args{'sections'}{$section});
- print "</ul>\n";
- }
- print "<hr>\n";
-}
-
-# output sections in html5
-sub output_section_html5(%) {
- my %args = %{$_[0]};
- my $section;
-
- foreach $section (@{$args{'sectionlist'}}) {
- print "<section>\n";
- print "<h1>$section</h1>\n";
- print "<p>\n";
- output_highlight($args{'sections'}{$section});
- print "</p>\n";
- print "</section>\n";
- }
-}
-
-# output enum in html5
-sub output_enum_html5(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- my $html5id;
-
- $html5id = $args{'enum'};
- $html5id =~ s/[^a-zA-Z0-9\-]+/_/g;
- print "<article class=\"enum\" id=\"enum:". $html5id . "\">";
- print "<h1>enum " . $args{'enum'} . "</h1>\n";
- print "<ol class=\"code\">\n";
- print "<li>";
- print "<span class=\"keyword\">enum</span> ";
- print "<span class=\"identifier\">" . $args{'enum'} . "</span> {";
- print "</li>\n";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "<li class=\"indent\">";
- print "<span class=\"param\">" . $parameter . "</span>";
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",";
- }
- print "</li>\n";
- }
- print "<li>};</li>\n";
- print "</ol>\n";
-
- print "<section>\n";
- print "<h1>Constants</h1>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "<dt>" . $parameter . "</dt>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter});
- print "</dd>\n";
- }
- print "</dl>\n";
- print "</section>\n";
- output_section_html5(@_);
- print "</article>\n";
-}
-
-# output typedef in html5
-sub output_typedef_html5(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- my $html5id;
-
- $html5id = $args{'typedef'};
- $html5id =~ s/[^a-zA-Z0-9\-]+/_/g;
- print "<article class=\"typedef\" id=\"typedef:" . $html5id . "\">\n";
- print "<h1>typedef " . $args{'typedef'} . "</h1>\n";
-
- print "<ol class=\"code\">\n";
- print "<li>";
- print "<span class=\"keyword\">typedef</span> ";
- print "<span class=\"identifier\">" . $args{'typedef'} . "</span>";
- print "</li>\n";
- print "</ol>\n";
- output_section_html5(@_);
- print "</article>\n";
-}
-
-# output struct in html5
-sub output_struct_html5(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $html5id;
-
- $html5id = $args{'struct'};
- $html5id =~ s/[^a-zA-Z0-9\-]+/_/g;
- print "<article class=\"struct\" id=\"struct:" . $html5id . "\">\n";
- print "<hgroup>\n";
- print "<h1>" . $args{'type'} . " " . $args{'struct'} . "</h1>";
- print "<h2>". $args{'purpose'} . "</h2>\n";
- print "</hgroup>\n";
- print "<ol class=\"code\">\n";
- print "<li>";
- print "<span class=\"type\">" . $args{'type'} . "</span> ";
- print "<span class=\"identifier\">" . $args{'struct'} . "</span> {";
- print "</li>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "<li class=\"indent\">";
- if ($parameter =~ /^#/) {
- print "<span class=\"param\">" . $parameter ."</span>\n";
- print "</li>\n";
- next;
- }
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print "<span class=\"type\">$1</span> ";
- print "<span class=\"param\">$parameter</span>";
- print "<span class=\"type\">)</span> ";
- print "(<span class=\"args\">$2</span>);";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print "<span class=\"type\">$1</span> ";
- print "<span class=\"param\">$parameter</span>";
- print "<span class=\"bits\">$2</span>;";
- } else {
- print "<span class=\"type\">$type</span> ";
- print "<span class=\"param\">$parameter</span>;";
- }
- print "</li>\n";
- }
- print "<li>};</li>\n";
- print "</ol>\n";
-
- print "<section>\n";
- print "<h1>Members</h1>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print "<dt>" . $parameter . "</dt>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print "</dd>\n";
- }
- print "</dl>\n";
- print "</section>\n";
- output_section_html5(@_);
- print "</article>\n";
-}
-
-# output function in html5
-sub output_function_html5(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $html5id;
-
- $html5id = $args{'function'};
- $html5id =~ s/[^a-zA-Z0-9\-]+/_/g;
- print "<article class=\"function\" id=\"func:". $html5id . "\">\n";
- print "<hgroup>\n";
- print "<h1>" . $args{'function'} . "</h1>";
- print "<h2>" . $args{'purpose'} . "</h2>\n";
- print "</hgroup>\n";
- print "<ol class=\"code\">\n";
- print "<li>";
- print "<span class=\"type\">" . $args{'functiontype'} . "</span> ";
- print "<span class=\"identifier\">" . $args{'function'} . "</span> (";
- print "</li>";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "<li class=\"indent\">";
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print "<span class=\"type\">$1</span> ";
- print "<span class=\"param\">$parameter</span>";
- print "<span class=\"type\">)</span> ";
- print "(<span class=\"args\">$2</span>)";
- } else {
- print "<span class=\"type\">$type</span> ";
- print "<span class=\"param\">$parameter</span>";
- }
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",";
- }
- print "</li>\n";
- }
- print "<li>)</li>\n";
- print "</ol>\n";
-
- print "<section>\n";
- print "<h1>Arguments</h1>\n";
- print "<p>\n";
- print "<dl>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print "<dt>" . $parameter . "</dt>\n";
- print "<dd>";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print "</dd>\n";
- }
- print "</dl>\n";
- print "</section>\n";
- output_section_html5(@_);
- print "</article>\n";
-}
-
-# output DOC: block header in html5
-sub output_blockhead_html5(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $html5id;
-
- foreach $section (@{$args{'sectionlist'}}) {
- $html5id = $section;
- $html5id =~ s/[^a-zA-Z0-9\-]+/_/g;
- print "<article class=\"doc\" id=\"doc:". $html5id . "\">\n";
- print "<h1>$section</h1>\n";
- print "<p>\n";
- output_highlight($args{'sections'}{$section});
- print "</p>\n";
- }
- print "</article>\n";
-}
-
-sub output_section_xml(%) {
- my %args = %{$_[0]};
- my $section;
- # print out each section
- $lineprefix=" ";
- foreach $section (@{$args{'sectionlist'}}) {
- print "<refsect1>\n";
- print "<title>$section</title>\n";
- if ($section =~ m/EXAMPLE/i) {
- print "<informalexample><programlisting>\n";
- $output_preformatted = 1;
- } else {
- print "<para>\n";
- }
- output_highlight($args{'sections'}{$section});
- $output_preformatted = 0;
- if ($section =~ m/EXAMPLE/i) {
- print "</programlisting></informalexample>\n";
- } else {
- print "</para>\n";
- }
- print "</refsect1>\n";
- }
-}
-
-# output function in XML DocBook
-sub output_function_xml(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $id;
-
- $id = "API-" . $args{'function'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- print "<refentry id=\"$id\">\n";
- print "<refentryinfo>\n";
- print " <title>LINUX</title>\n";
- print " <productname>Kernel Hackers Manual</productname>\n";
- print " <date>$man_date</date>\n";
- print "</refentryinfo>\n";
- print "<refmeta>\n";
- print " <refentrytitle><phrase>" . $args{'function'} . "</phrase></refentrytitle>\n";
- print " <manvolnum>9</manvolnum>\n";
- print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
- print "</refmeta>\n";
- print "<refnamediv>\n";
- print " <refname>" . $args{'function'} . "</refname>\n";
- print " <refpurpose>\n";
- print " ";
- output_highlight ($args{'purpose'});
- print " </refpurpose>\n";
- print "</refnamediv>\n";
-
- print "<refsynopsisdiv>\n";
- print " <title>Synopsis</title>\n";
- print " <funcsynopsis><funcprototype>\n";
- print " <funcdef>" . $args{'functiontype'} . " ";
- print "<function>" . $args{'function'} . " </function></funcdef>\n";
-
- $count = 0;
- if ($#{$args{'parameterlist'}} >= 0) {
- foreach $parameter (@{$args{'parameterlist'}}) {
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print " <paramdef>$1<parameter>$parameter</parameter>)\n";
- print " <funcparams>$2</funcparams></paramdef>\n";
- } else {
- print " <paramdef>" . $type;
- print " <parameter>$parameter</parameter></paramdef>\n";
- }
- }
- } else {
- print " <void/>\n";
- }
- print " </funcprototype></funcsynopsis>\n";
- print "</refsynopsisdiv>\n";
-
- # print parameters
- print "<refsect1>\n <title>Arguments</title>\n";
- if ($#{$args{'parameterlist'}} >= 0) {
- print " <variablelist>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
- $type = $args{'parametertypes'}{$parameter};
-
- print " <varlistentry>\n <term><parameter>$type $parameter</parameter></term>\n";
- print " <listitem>\n <para>\n";
- $lineprefix=" ";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print " </para>\n </listitem>\n </varlistentry>\n";
- }
- print " </variablelist>\n";
- } else {
- print " <para>\n None\n </para>\n";
- }
- print "</refsect1>\n";
-
- output_section_xml(@_);
- print "</refentry>\n\n";
-}
-
-# output struct in XML DocBook
-sub output_struct_xml(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $id;
-
- $id = "API-struct-" . $args{'struct'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- print "<refentry id=\"$id\">\n";
- print "<refentryinfo>\n";
- print " <title>LINUX</title>\n";
- print " <productname>Kernel Hackers Manual</productname>\n";
- print " <date>$man_date</date>\n";
- print "</refentryinfo>\n";
- print "<refmeta>\n";
- print " <refentrytitle><phrase>" . $args{'type'} . " " . $args{'struct'} . "</phrase></refentrytitle>\n";
- print " <manvolnum>9</manvolnum>\n";
- print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
- print "</refmeta>\n";
- print "<refnamediv>\n";
- print " <refname>" . $args{'type'} . " " . $args{'struct'} . "</refname>\n";
- print " <refpurpose>\n";
- print " ";
- output_highlight ($args{'purpose'});
- print " </refpurpose>\n";
- print "</refnamediv>\n";
-
- print "<refsynopsisdiv>\n";
- print " <title>Synopsis</title>\n";
- print " <programlisting>\n";
- print $args{'type'} . " " . $args{'struct'} . " {\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- if ($parameter =~ /^#/) {
- my $prm = $parameter;
- # convert data read & converted thru xml_escape() into &xyz; format:
- # This allows us to have #define macros interspersed in a struct.
- $prm =~ s/\\\\\\/\&/g;
- print "$prm\n";
- next;
- }
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- defined($args{'parameterdescs'}{$parameter_name}) || next;
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print " $1 $parameter) ($2);\n";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print " $1 $parameter$2;\n";
- } else {
- print " " . $type . " " . $parameter . ";\n";
- }
- }
- print "};";
- print " </programlisting>\n";
- print "</refsynopsisdiv>\n";
-
- print " <refsect1>\n";
- print " <title>Members</title>\n";
-
- if ($#{$args{'parameterlist'}} >= 0) {
- print " <variablelist>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- defined($args{'parameterdescs'}{$parameter_name}) || next;
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- print " <varlistentry>";
- print " <term><literal>$type $parameter</literal></term>\n";
- print " <listitem><para>\n";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print " </para></listitem>\n";
- print " </varlistentry>\n";
- }
- print " </variablelist>\n";
- } else {
- print " <para>\n None\n </para>\n";
- }
- print " </refsect1>\n";
-
- output_section_xml(@_);
-
- print "</refentry>\n\n";
-}
-
-# output enum in XML DocBook
-sub output_enum_xml(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $id;
-
- $id = "API-enum-" . $args{'enum'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- print "<refentry id=\"$id\">\n";
- print "<refentryinfo>\n";
- print " <title>LINUX</title>\n";
- print " <productname>Kernel Hackers Manual</productname>\n";
- print " <date>$man_date</date>\n";
- print "</refentryinfo>\n";
- print "<refmeta>\n";
- print " <refentrytitle><phrase>enum " . $args{'enum'} . "</phrase></refentrytitle>\n";
- print " <manvolnum>9</manvolnum>\n";
- print " <refmiscinfo class=\"version\">" . $kernelversion . "</refmiscinfo>\n";
- print "</refmeta>\n";
- print "<refnamediv>\n";
- print " <refname>enum " . $args{'enum'} . "</refname>\n";
- print " <refpurpose>\n";
- print " ";
- output_highlight ($args{'purpose'});
- print " </refpurpose>\n";
- print "</refnamediv>\n";
-
- print "<refsynopsisdiv>\n";
- print " <title>Synopsis</title>\n";
- print " <programlisting>\n";
- print "enum " . $args{'enum'} . " {\n";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- print " $parameter";
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",";
- }
- print "\n";
- }
- print "};";
- print " </programlisting>\n";
- print "</refsynopsisdiv>\n";
-
- print "<refsect1>\n";
- print " <title>Constants</title>\n";
- print " <variablelist>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- print " <varlistentry>";
- print " <term>$parameter</term>\n";
- print " <listitem><para>\n";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print " </para></listitem>\n";
- print " </varlistentry>\n";
- }
- print " </variablelist>\n";
- print "</refsect1>\n";
-
- output_section_xml(@_);
-
- print "</refentry>\n\n";
-}
-
-# output typedef in XML DocBook
-sub output_typedef_xml(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $id;
-
- $id = "API-typedef-" . $args{'typedef'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- print "<refentry id=\"$id\">\n";
- print "<refentryinfo>\n";
- print " <title>LINUX</title>\n";
- print " <productname>Kernel Hackers Manual</productname>\n";
- print " <date>$man_date</date>\n";
- print "</refentryinfo>\n";
- print "<refmeta>\n";
- print " <refentrytitle><phrase>typedef " . $args{'typedef'} . "</phrase></refentrytitle>\n";
- print " <manvolnum>9</manvolnum>\n";
- print "</refmeta>\n";
- print "<refnamediv>\n";
- print " <refname>typedef " . $args{'typedef'} . "</refname>\n";
- print " <refpurpose>\n";
- print " ";
- output_highlight ($args{'purpose'});
- print " </refpurpose>\n";
- print "</refnamediv>\n";
-
- print "<refsynopsisdiv>\n";
- print " <title>Synopsis</title>\n";
- print " <synopsis>typedef " . $args{'typedef'} . ";</synopsis>\n";
- print "</refsynopsisdiv>\n";
-
- output_section_xml(@_);
-
- print "</refentry>\n\n";
-}
-
-# output in XML DocBook
-sub output_blockhead_xml(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
-
- my $id = $args{'module'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- # print out each section
- $lineprefix=" ";
- foreach $section (@{$args{'sectionlist'}}) {
- if (!$args{'content-only'}) {
- print "<refsect1>\n <title>$section</title>\n";
- }
- if ($section =~ m/EXAMPLE/i) {
- print "<example><para>\n";
- $output_preformatted = 1;
- } else {
- print "<para>\n";
- }
- output_highlight($args{'sections'}{$section});
- $output_preformatted = 0;
- if ($section =~ m/EXAMPLE/i) {
- print "</para></example>\n";
- } else {
- print "</para>";
- }
- if (!$args{'content-only'}) {
- print "\n</refsect1>\n";
- }
- }
-
- print "\n\n";
-}
-
-# output in XML DocBook
-sub output_function_gnome {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $id;
-
- $id = $args{'module'} . "-" . $args{'function'};
- $id =~ s/[^A-Za-z0-9]/-/g;
-
- print "<sect2>\n";
- print " <title id=\"$id\">" . $args{'function'} . "</title>\n";
-
- print " <funcsynopsis>\n";
- print " <funcdef>" . $args{'functiontype'} . " ";
- print "<function>" . $args{'function'} . " ";
- print "</function></funcdef>\n";
-
- $count = 0;
- if ($#{$args{'parameterlist'}} >= 0) {
- foreach $parameter (@{$args{'parameterlist'}}) {
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print " <paramdef>$1 <parameter>$parameter</parameter>)\n";
- print " <funcparams>$2</funcparams></paramdef>\n";
- } else {
- print " <paramdef>" . $type;
- print " <parameter>$parameter</parameter></paramdef>\n";
- }
- }
- } else {
- print " <void>\n";
- }
- print " </funcsynopsis>\n";
- if ($#{$args{'parameterlist'}} >= 0) {
- print " <informaltable pgwide=\"1\" frame=\"none\" role=\"params\">\n";
- print "<tgroup cols=\"2\">\n";
- print "<colspec colwidth=\"2*\">\n";
- print "<colspec colwidth=\"8*\">\n";
- print "<tbody>\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- print " <row><entry align=\"right\"><parameter>$parameter</parameter></entry>\n";
- print " <entry>\n";
- $lineprefix=" ";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- print " </entry></row>\n";
- }
- print " </tbody></tgroup></informaltable>\n";
- } else {
- print " <para>\n None\n </para>\n";
- }
-
- # print out each section
- $lineprefix=" ";
- foreach $section (@{$args{'sectionlist'}}) {
- print "<simplesect>\n <title>$section</title>\n";
- if ($section =~ m/EXAMPLE/i) {
- print "<example><programlisting>\n";
- $output_preformatted = 1;
- } else {
- }
- print "<para>\n";
- output_highlight($args{'sections'}{$section});
- $output_preformatted = 0;
- print "</para>\n";
- if ($section =~ m/EXAMPLE/i) {
- print "</programlisting></example>\n";
- } else {
- }
- print " </simplesect>\n";
- }
-
- print "</sect2>\n\n";
-}
-
##
# output function in man
sub output_function_man(%) {
@@ -1620,32 +670,12 @@ sub output_struct_man(%) {
print ".SH NAME\n";
print $args{'type'} . " " . $args{'struct'} . " \\- " . $args{'purpose'} . "\n";
+ my $declaration = $args{'definition'};
+ $declaration =~ s/\t/ /g;
+ $declaration =~ s/\n/"\n.br\n.BI \"/g;
print ".SH SYNOPSIS\n";
print $args{'type'} . " " . $args{'struct'} . " {\n.br\n";
-
- foreach my $parameter (@{$args{'parameterlist'}}) {
- if ($parameter =~ /^#/) {
- print ".BI \"$parameter\"\n.br\n";
- next;
- }
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print ".BI \" " . $1 . "\" " . $parameter . " \") (" . $2 . ")" . "\"\n;\n";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print ".BI \" " . $1 . "\ \" " . $parameter . $2 . " \"" . "\"\n;\n";
- } else {
- $type =~ s/([^\*])$/$1 /;
- print ".BI \" " . $type . "\" " . $parameter . " \"" . "\"\n;\n";
- }
- print "\n.br\n";
- }
- print "};\n.br\n";
+ print ".BI \"$declaration\n};\n.br\n\n";
print ".SH Members\n";
foreach $parameter (@{$args{'parameterlist'}}) {
@@ -1695,161 +725,6 @@ sub output_blockhead_man(%) {
}
##
-# output in text
-sub output_function_text(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $start;
-
- print "Name:\n\n";
- print $args{'function'} . " - " . $args{'purpose'} . "\n";
-
- print "\nSynopsis:\n\n";
- if ($args{'functiontype'} ne "") {
- $start = $args{'functiontype'} . " " . $args{'function'} . " (";
- } else {
- $start = $args{'function'} . " (";
- }
- print $start;
-
- my $count = 0;
- foreach my $parameter (@{$args{'parameterlist'}}) {
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print $1 . $parameter . ") (" . $2;
- } else {
- print $type . " " . $parameter;
- }
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",\n";
- print " " x length($start);
- } else {
- print ");\n\n";
- }
- }
-
- print "Arguments:\n\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- print $parameter . "\n\t" . $args{'parameterdescs'}{$parameter_name} . "\n";
- }
- output_section_text(@_);
-}
-
-#output sections in text
-sub output_section_text(%) {
- my %args = %{$_[0]};
- my $section;
-
- print "\n";
- foreach $section (@{$args{'sectionlist'}}) {
- print "$section:\n\n";
- output_highlight($args{'sections'}{$section});
- }
- print "\n\n";
-}
-
-# output enum in text
-sub output_enum_text(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- print "Enum:\n\n";
-
- print "enum " . $args{'enum'} . " - " . $args{'purpose'} . "\n\n";
- print "enum " . $args{'enum'} . " {\n";
- $count = 0;
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "\t$parameter";
- if ($count != $#{$args{'parameterlist'}}) {
- $count++;
- print ",";
- }
- print "\n";
- }
- print "};\n\n";
-
- print "Constants:\n\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- print "$parameter\n\t";
- print $args{'parameterdescs'}{$parameter} . "\n";
- }
-
- output_section_text(@_);
-}
-
-# output typedef in text
-sub output_typedef_text(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $count;
- print "Typedef:\n\n";
-
- print "typedef " . $args{'typedef'} . " - " . $args{'purpose'} . "\n";
- output_section_text(@_);
-}
-
-# output struct as text
-sub output_struct_text(%) {
- my %args = %{$_[0]};
- my ($parameter);
-
- print $args{'type'} . " " . $args{'struct'} . " - " . $args{'purpose'} . "\n\n";
- print $args{'type'} . " " . $args{'struct'} . " {\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- if ($parameter =~ /^#/) {
- print "$parameter\n";
- next;
- }
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print "\t$1 $parameter) ($2);\n";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print "\t$1 $parameter$2;\n";
- } else {
- print "\t" . $type . " " . $parameter . ";\n";
- }
- }
- print "};\n\n";
-
- print "Members:\n\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print "$parameter\n\t";
- print $args{'parameterdescs'}{$parameter_name} . "\n";
- }
- print "\n";
- output_section_text(@_);
-}
-
-sub output_blockhead_text(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
-
- foreach $section (@{$args{'sectionlist'}}) {
- print " $section:\n";
- print " -> ";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-##
# output in restructured text
#
@@ -2038,29 +913,9 @@ sub output_struct_rst(%) {
print "**Definition**\n\n";
print "::\n\n";
- print " " . $args{'type'} . " " . $args{'struct'} . " {\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- if ($parameter =~ /^#/) {
- print " " . "$parameter\n";
- next;
- }
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
- # pointer-to-function
- print " $1 $parameter) ($2);\n";
- } elsif ($type =~ m/^(.*?)\s*(:.*)/) {
- # bitfield
- print " $1 $parameter$2;\n";
- } else {
- print " " . $type . " " . $parameter . ";\n";
- }
- }
- print " };\n\n";
+ my $declaration = $args{'definition'};
+ $declaration =~ s/\t/ /g;
+ print " " . $args{'type'} . " " . $args{'struct'} . " {\n$declaration };\n\n";
print "**Members**\n\n";
$lineprefix = " ";
@@ -2083,44 +938,6 @@ sub output_struct_rst(%) {
output_section_rst(@_);
}
-
-## list mode output functions
-
-sub output_function_list(%) {
- my %args = %{$_[0]};
-
- print $args{'function'} . "\n";
-}
-
-# output enum in list
-sub output_enum_list(%) {
- my %args = %{$_[0]};
- print $args{'enum'} . "\n";
-}
-
-# output typedef in list
-sub output_typedef_list(%) {
- my %args = %{$_[0]};
- print $args{'typedef'} . "\n";
-}
-
-# output struct as list
-sub output_struct_list(%) {
- my %args = %{$_[0]};
-
- print $args{'struct'} . "\n";
-}
-
-sub output_blockhead_list(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
-
- foreach $section (@{$args{'sectionlist'}}) {
- print "DOC: $section\n";
- }
-}
-
-
## none mode output functions
sub output_function_none(%) {
@@ -2186,39 +1003,128 @@ sub dump_union($$) {
sub dump_struct($$) {
my $x = shift;
my $file = shift;
- my $nested;
if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) {
my $decl_type = $1;
$declaration_name = $2;
my $members = $3;
- # ignore embedded structs or unions
- $members =~ s/({.*})//g;
- $nested = $1;
-
# ignore members marked private:
$members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi;
$members =~ s/\/\*\s*private:.*//gosi;
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
- $nested =~ s/\/\*.*?\*\///gos;
# strip attributes
$members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
$members =~ s/__aligned\s*\([^;]*\)//gos;
$members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos;
# replace DECLARE_BITMAP
- $members =~ s/DECLARE_BITMAP\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
+ $members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
# replace DECLARE_HASHTABLE
- $members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
-
- create_parameterlist($members, ';', $file);
- check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual, $nested);
-
+ $members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
+ # replace DECLARE_KFIFO
+ $members =~ s/DECLARE_KFIFO\s*\(([^,)]+),\s*([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos;
+ # replace DECLARE_KFIFO_PTR
+ $members =~ s/DECLARE_KFIFO_PTR\s*\(([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos;
+
+ my $declaration = $members;
+
+ # Split nested struct/union elements as newer ones
+ while ($members =~ m/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/) {
+ my $newmember;
+ my $maintype = $1;
+ my $ids = $4;
+ my $content = $3;
+ foreach my $id(split /,/, $ids) {
+ $newmember .= "$maintype $id; ";
+
+ $id =~ s/[:\[].*//;
+ $id =~ s/^\s*\**(\S+)\s*/$1/;
+ foreach my $arg (split /;/, $content) {
+ next if ($arg =~ m/^\s*$/);
+ if ($arg =~ m/^([^\(]+\(\*?\s*)([\w\.]*)(\s*\).*)/) {
+ # pointer-to-function
+ my $type = $1;
+ my $name = $2;
+ my $extra = $3;
+ next if (!$name);
+ if ($id =~ m/^\s*$/) {
+ # anonymous struct/union
+ $newmember .= "$type$name$extra; ";
+ } else {
+ $newmember .= "$type$id.$name$extra; ";
+ }
+ } else {
+ my $type;
+ my $names;
+ $arg =~ s/^\s+//;
+ $arg =~ s/\s+$//;
+ # Handle bitmaps
+ $arg =~ s/:\s*\d+\s*//g;
+ # Handle arrays
+ $arg =~ s/\[\S+\]//g;
+ # The type may have multiple words,
+ # and multiple IDs can be defined, like:
+ # const struct foo, *bar, foobar
+ # So, we remove spaces when parsing the
+ # names, in order to match just names
+ # and commas for the names
+ $arg =~ s/\s*,\s*/,/g;
+ if ($arg =~ m/(.*)\s+([\S+,]+)/) {
+ $type = $1;
+ $names = $2;
+ } else {
+ $newmember .= "$arg; ";
+ next;
+ }
+ foreach my $name (split /,/, $names) {
+ $name =~ s/^\s*\**(\S+)\s*/$1/;
+ next if (($name =~ m/^\s*$/));
+ if ($id =~ m/^\s*$/) {
+ # anonymous struct/union
+ $newmember .= "$type $name; ";
+ } else {
+ $newmember .= "$type $id.$name; ";
+ }
+ }
+ }
+ }
+ }
+ $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)}([^\{\}\;]*)\;/$newmember/;
+ }
+
+ # Ignore other nested elements, like enums
+ $members =~ s/({[^\{\}]*})//g;
+
+ create_parameterlist($members, ';', $file, $declaration_name);
+ check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual);
+
+ # Adjust declaration for better display
+ $declaration =~ s/([{;])/$1\n/g;
+ $declaration =~ s/}\s+;/};/g;
+ # Better handle inlined enums
+ do {} while ($declaration =~ s/(enum\s+{[^}]+),([^\n])/$1,\n$2/);
+
+ my @def_args = split /\n/, $declaration;
+ my $level = 1;
+ $declaration = "";
+ foreach my $clause (@def_args) {
+ $clause =~ s/^\s+//;
+ $clause =~ s/\s+$//;
+ $clause =~ s/\s+/ /;
+ next if (!$clause);
+ $level-- if ($clause =~ m/(})/ && $level > 1);
+ if (!($clause =~ m/^\s*#/)) {
+ $declaration .= "\t" x $level;
+ }
+ $declaration .= "\t" . $clause . "\n";
+ $level++ if ($clause =~ m/({)/ && !($clause =~m/}/));
+ }
output_declaration($declaration_name,
'struct',
{'struct' => $declaration_name,
'module' => $modulename,
+ 'definition' => $declaration,
'parameterlist' => \@parameterlist,
'parameterdescs' => \%parameterdescs,
'parametertypes' => \%parametertypes,
@@ -2234,6 +1140,44 @@ sub dump_struct($$) {
}
}
+
+sub show_warnings($$) {
+ my $functype = shift;
+ my $name = shift;
+
+ return 1 if ($output_selection == OUTPUT_ALL);
+
+ if ($output_selection == OUTPUT_EXPORTED) {
+ if (defined($function_table{$name})) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ if ($output_selection == OUTPUT_INTERNAL) {
+ if (!($functype eq "function" && defined($function_table{$name}))) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ if ($output_selection == OUTPUT_INCLUDE) {
+ if (defined($function_table{$name})) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ if ($output_selection == OUTPUT_EXCLUDE) {
+ if (!defined($function_table{$name})) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ die("Please add the new output type at show_warnings()");
+}
+
sub dump_enum($$) {
my $x = shift;
my $file = shift;
@@ -2254,16 +1198,18 @@ sub dump_enum($$) {
push @parameterlist, $arg;
if (!$parameterdescs{$arg}) {
$parameterdescs{$arg} = $undescribed;
- print STDERR "${file}:$.: warning: Enum value '$arg' ".
- "not described in enum '$declaration_name'\n";
+ if (show_warnings("enum", $declaration_name)) {
+ print STDERR "${file}:$.: warning: Enum value '$arg' not described in enum '$declaration_name'\n";
+ }
}
$_members{$arg} = 1;
}
while (my ($k, $v) = each %parameterdescs) {
if (!exists($_members{$k})) {
- print STDERR "${file}:$.: warning: Excess enum value " .
- "'$k' description in '$declaration_name'\n";
+ if (show_warnings("enum", $declaration_name)) {
+ print STDERR "${file}:$.: warning: Excess enum value '$k' description in '$declaration_name'\n";
+ }
}
}
@@ -2299,7 +1245,7 @@ sub dump_typedef($$) {
$declaration_name = $2;
my $args = $3;
- create_parameterlist($args, ',', $file);
+ create_parameterlist($args, ',', $file, $declaration_name);
output_declaration($declaration_name,
'function',
@@ -2348,10 +1294,11 @@ sub save_struct_actual($) {
$struct_actual = $struct_actual . $actual . " ";
}
-sub create_parameterlist($$$) {
+sub create_parameterlist($$$$) {
my $args = shift;
my $splitter = shift;
my $file = shift;
+ my $declaration_name = shift;
my $type;
my $param;
@@ -2376,12 +1323,12 @@ sub create_parameterlist($$$) {
} elsif ($arg =~ m/\(.+\)\s*\(/) {
# pointer-to-function
$arg =~ tr/#/,/;
- $arg =~ m/[^\(]+\(\*?\s*(\w*)\s*\)/;
+ $arg =~ m/[^\(]+\(\*?\s*([\w\.]*)\s*\)/;
$param = $1;
$type = $arg;
$type =~ s/([^\(]+\(\*?)\s*$param/$1/;
save_struct_actual($param);
- push_parameter($param, $type, $file);
+ push_parameter($param, $type, $file, $declaration_name);
} elsif ($arg) {
$arg =~ s/\s*:\s*/:/g;
$arg =~ s/\s*\[/\[/g;
@@ -2406,27 +1353,28 @@ sub create_parameterlist($$$) {
foreach $param (@args) {
if ($param =~ m/^(\*+)\s*(.*)/) {
save_struct_actual($2);
- push_parameter($2, "$type $1", $file);
+ push_parameter($2, "$type $1", $file, $declaration_name);
}
elsif ($param =~ m/(.*?):(\d+)/) {
if ($type ne "") { # skip unnamed bit-fields
save_struct_actual($1);
- push_parameter($1, "$type:$2", $file)
+ push_parameter($1, "$type:$2", $file, $declaration_name)
}
}
else {
save_struct_actual($param);
- push_parameter($param, $type, $file);
+ push_parameter($param, $type, $file, $declaration_name);
}
}
}
}
}
-sub push_parameter($$$) {
+sub push_parameter($$$$) {
my $param = shift;
my $type = shift;
my $file = shift;
+ my $declaration_name = shift;
if (($anon_struct_union == 1) && ($type eq "") &&
($param eq "}")) {
@@ -2463,21 +1411,15 @@ sub push_parameter($$$) {
# warn if parameter has no description
# (but ignore ones starting with # as these are not parameters
# but inline preprocessor statements);
- # also ignore unnamed structs/unions;
- if (!$anon_struct_union) {
+ # Note: It will also ignore void params and unnamed structs/unions
if (!defined $parameterdescs{$param} && $param !~ /^#/) {
+ $parameterdescs{$param} = $undescribed;
- $parameterdescs{$param} = $undescribed;
-
- if (($type eq 'function') || ($type eq 'enum')) {
- print STDERR "${file}:$.: warning: Function parameter ".
- "or member '$param' not " .
- "described in '$declaration_name'\n";
- }
- print STDERR "${file}:$.: warning:" .
- " No description found for parameter '$param'\n";
- ++$warnings;
- }
+ if (show_warnings($type, $declaration_name)) {
+ print STDERR
+ "${file}:$.: warning: Function parameter or member '$param' not described in '$declaration_name'\n";
+ ++$warnings;
+ }
}
$param = xml_escape($param);
@@ -2496,8 +1438,8 @@ sub push_parameter($$$) {
$parametertypes{$param} = $type;
}
-sub check_sections($$$$$$) {
- my ($file, $decl_name, $decl_type, $sectcheck, $prmscheck, $nested) = @_;
+sub check_sections($$$$$) {
+ my ($file, $decl_name, $decl_type, $sectcheck, $prmscheck) = @_;
my @sects = split ' ', $sectcheck;
my @prms = split ' ', $prmscheck;
my $err;
@@ -2531,14 +1473,6 @@ sub check_sections($$$$$$) {
"'$sects[$sx]' " .
"description in '$decl_name'\n";
++$warnings;
- } else {
- if ($nested !~ m/\Q$sects[$sx]\E/) {
- print STDERR "${file}:$.: warning: " .
- "Excess $decl_type member " .
- "'$sects[$sx]' " .
- "description in '$decl_name'\n";
- ++$warnings;
- }
}
}
}
@@ -2642,14 +1576,14 @@ sub dump_function($$) {
$declaration_name = $2;
my $args = $3;
- create_parameterlist($args, ',', $file);
+ create_parameterlist($args, ',', $file, $declaration_name);
} else {
print STDERR "${file}:$.: warning: cannot understand function prototype: '$prototype'\n";
return;
}
my $prms = join " ", @parameterlist;
- check_sections($file, $declaration_name, "function", $sectcheck, $prms, "");
+ check_sections($file, $declaration_name, "function", $sectcheck, $prms);
# This check emits a lot of warnings at the moment, because many
# functions don't have a 'Return' doc section. So until the number
@@ -2718,7 +1652,7 @@ sub tracepoint_munge($) {
sub syscall_munge() {
my $void = 0;
- $prototype =~ s@[\r\n\t]+@ @gos; # strip newlines/CR's/tabs
+ $prototype =~ s@[\r\n]+@ @gos; # strip newlines/CR's
## if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) {
if ($prototype =~ m/SYSCALL_DEFINE0/) {
$void = 1;
@@ -2823,7 +1757,7 @@ sub process_proto_type($$) {
# just before actual output; (this is done by local_unescape())
sub xml_escape($) {
my $text = shift;
- if (($output_mode eq "text") || ($output_mode eq "man")) {
+ if ($output_mode eq "man") {
return $text;
}
$text =~ s/\&/\\\\\\amp;/g;
@@ -2835,7 +1769,7 @@ sub xml_escape($) {
# xml_unescape: reverse the effects of xml_escape
sub xml_unescape($) {
my $text = shift;
- if (($output_mode eq "text") || ($output_mode eq "man")) {
+ if ($output_mode eq "man") {
return $text;
}
$text =~ s/\\\\\\amp;/\&/g;
@@ -2848,7 +1782,7 @@ sub xml_unescape($) {
# local escape strings look like: '\\\\menmonic:' (that's 4 backslashes)
sub local_unescape($) {
my $text = shift;
- if (($output_mode eq "text") || ($output_mode eq "man")) {
+ if ($output_mode eq "man") {
return $text;
}
$text =~ s/\\\\\\\\lt:/</g;
@@ -2917,6 +1851,8 @@ sub process_file($) {
while (s/\\\s*$//) {
$_ .= <IN>;
}
+ # Replace tabs by spaces
+ while ($_ =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {};
if ($state == STATE_NORMAL) {
if (/$doc_start/o) {
$state = STATE_NAME; # next line is always the function name
@@ -3016,8 +1952,7 @@ sub process_file($) {
$in_purpose = 0;
$contents = $newcontents;
$new_start_line = $.;
- while ((substr($contents, 0, 1) eq " ") ||
- substr($contents, 0, 1) eq "\t") {
+ while (substr($contents, 0, 1) eq " ") {
$contents = substr($contents, 1);
}
if ($contents ne "") {
@@ -3086,8 +2021,7 @@ sub process_file($) {
$contents = $2;
$new_start_line = $.;
if ($contents ne "") {
- while ((substr($contents, 0, 1) eq " ") ||
- substr($contents, 0, 1) eq "\t") {
+ while (substr($contents, 0, 1) eq " ") {
$contents = substr($contents, 1);
}
$contents .= "\n";
@@ -3170,34 +2104,6 @@ sub process_file($) {
if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
print STDERR " Was looking for '$_'.\n" for keys %function_table;
}
- if ($output_mode eq "xml") {
- # The template wants at least one RefEntry here; make one.
- print "<refentry>\n";
- print " <refnamediv>\n";
- print " <refname>\n";
- print " ${orig_file}\n";
- print " </refname>\n";
- print " <refpurpose>\n";
- print " Document generation inconsistency\n";
- print " </refpurpose>\n";
- print " </refnamediv>\n";
- print " <refsect1>\n";
- print " <title>\n";
- print " Oops\n";
- print " </title>\n";
- print " <warning>\n";
- print " <para>\n";
- print " The template for this document tried to insert\n";
- print " the structured comment from the file\n";
- print " <filename>${orig_file}</filename> at this point,\n";
- print " but none was found.\n";
- print " This dummy section is inserted to allow\n";
- print " generation to continue.\n";
- print " </para>\n";
- print " </warning>\n";
- print " </refsect1>\n";
- print "</refentry>\n";
- }
}
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f51cf977c65b..6510536c06df 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffer *b, int is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
}
+/* Cannot check for assembler */
+static void add_retpoline(struct buffer *b)
+{
+ buf_printf(b, "\n#ifdef RETPOLINE\n");
+ buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
+ buf_printf(b, "#endif\n");
+}
+
static void add_staging_flag(struct buffer *b, const char *name)
{
static const char *staging_dir = "drivers/staging";
@@ -2506,6 +2514,7 @@ int main(int argc, char **argv)
err |= check_modname_len(mod);
add_header(&buf, mod);
add_intree_flag(&buf, !external_module);
+ add_retpoline(&buf);
add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
diff --git a/scripts/tags.sh b/scripts/tags.sh
index d23dcbf17457..78e546ff689c 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -77,7 +77,7 @@ find_include_sources()
find_other_sources()
{
find ${tree}* $ignore \
- \( -name include -o -name arch -o -name '.tmp_*' \) -prune -o \
+ \( -path ${tree}include -o -path ${tree}arch -o -name '.tmp_*' \) -prune -o \
-name "$1" -not -type l -print;
}
diff --git a/security/Kconfig b/security/Kconfig
index 5ea8914817f6..3709db95027f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -56,13 +56,14 @@ config SECURITY_NETWORK
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
+ default y
depends on X86_64 && !UML
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
into userspace.
- See Documentation/x86/pagetable-isolation.txt for more details.
+ See Documentation/x86/pti.txt for more details.
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index d4fa04d91439..4d202b73a0e1 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -571,10 +571,10 @@ static int ns_revision_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int ns_revision_poll(struct file *file, poll_table *pt)
+static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
{
struct aa_revision *rev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (rev) {
mutex_lock_nested(&rev->ns->lock, rev->ns->level);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 04ba9d0718ea..6a54d2ffa840 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
continue;
if (profile->xmatch) {
- if (profile->xmatch_len == len) {
- conflict = true;
- continue;
- } else if (profile->xmatch_len > len) {
+ if (profile->xmatch_len >= len) {
unsigned int state;
u32 perm;
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
perm = dfa_user_allow(profile->xmatch, state);
/* any accepting state means a valid match. */
if (perm & MAY_EXEC) {
+ if (profile->xmatch_len == len) {
+ conflict = true;
+ continue;
+ }
candidate = profile;
len = profile->xmatch_len;
conflict = false;
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 2b27bb79aec4..d7b7e7115160 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
#define xcheck_labels_profiles(L1, L2, FN, args...) \
xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
+#define xcheck_labels(L1, L2, P, FN1, FN2) \
+ xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
+
void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ca0032e7ba9..b40678f3c1d5 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
FLAGS_NONE, GFP_ATOMIC);
}
+/* assumes check for PROFILE_MEDIATES is already done */
/* TODO: conditionals */
static int profile_ptrace_perm(struct aa_profile *profile,
- struct aa_profile *peer, u32 request,
- struct common_audit_data *sa)
+ struct aa_label *peer, u32 request,
+ struct common_audit_data *sa)
{
struct aa_perms perms = { };
- /* need because of peer in cross check */
- if (profile_unconfined(profile) ||
- !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
- return 0;
-
- aad(sa)->peer = &peer->label;
- aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
+ aad(sa)->peer = peer;
+ aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
&perms);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
}
-static int cross_ptrace_perm(struct aa_profile *tracer,
- struct aa_profile *tracee, u32 request,
- struct common_audit_data *sa)
+static int profile_tracee_perm(struct aa_profile *tracee,
+ struct aa_label *tracer, u32 request,
+ struct common_audit_data *sa)
{
+ if (profile_unconfined(tracee) || unconfined(tracer) ||
+ !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
+ return 0;
+
+ return profile_ptrace_perm(tracee, tracer, request, sa);
+}
+
+static int profile_tracer_perm(struct aa_profile *tracer,
+ struct aa_label *tracee, u32 request,
+ struct common_audit_data *sa)
+{
+ if (profile_unconfined(tracer))
+ return 0;
+
if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
- return xcheck(profile_ptrace_perm(tracer, tracee, request, sa),
- profile_ptrace_perm(tracee, tracer,
- request << PTRACE_PERM_SHIFT,
- sa));
- /* policy uses the old style capability check for ptrace */
- if (profile_unconfined(tracer) || tracer == tracee)
+ return profile_ptrace_perm(tracer, tracee, request, sa);
+
+ /* profile uses the old style capability check for ptrace */
+ if (&tracer->label == tracee)
return 0;
aad(sa)->label = &tracer->label;
- aad(sa)->peer = &tracee->label;
+ aad(sa)->peer = tracee;
aad(sa)->request = 0;
aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
u32 request)
{
+ struct aa_profile *profile;
+ u32 xrequest = request << PTRACE_PERM_SHIFT;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
- return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm,
- request, &sa);
+ return xcheck_labels(tracer, tracee, profile,
+ profile_tracer_perm(profile, tracee, request, &sa),
+ profile_tracee_perm(profile, tracer, xrequest, &sa));
}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index ed9b4d0f9f7e..8c558cbce930 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
AA_BUG(!mntpath);
AA_BUG(!buffer);
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
&mntpnt, &info, profile->disconnected);
if (error)
@@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
AA_BUG(!profile);
AA_BUG(devpath && !devbuffer);
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
if (devpath) {
error = aa_path_name(devpath, path_flags(profile, devpath),
devbuffer, &devname, &info,
@@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
AA_BUG(!profile);
AA_BUG(!path);
+ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
+ return 0;
+
error = aa_path_name(path, path_flags(profile, path), buffer, &name,
&info, profile->disconnected);
if (error)
@@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
AA_BUG(!new_path);
AA_BUG(!old_path);
- if (profile_unconfined(profile))
+ if (profile_unconfined(profile) ||
+ !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
return aa_get_newest_label(&profile->label);
error = aa_path_name(old_path, path_flags(profile, old_path),
diff --git a/security/commoncap.c b/security/commoncap.c
index 4f8e09340956..48620c93d697 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
return m & ~VFS_CAP_FLAGS_EFFECTIVE;
}
-static bool is_v2header(size_t size, __le32 magic)
+static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
{
- __u32 m = le32_to_cpu(magic);
if (size != XATTR_CAPS_SZ_2)
return false;
- return sansflags(m) == VFS_CAP_REVISION_2;
+ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
}
-static bool is_v3header(size_t size, __le32 magic)
+static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
{
- __u32 m = le32_to_cpu(magic);
-
if (size != XATTR_CAPS_SZ_3)
return false;
- return sansflags(m) == VFS_CAP_REVISION_3;
+ return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
}
/*
@@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
- if (is_v2header((size_t) ret, cap->magic_etc)) {
+ if (is_v2header((size_t) ret, cap)) {
/* If this is sizeof(vfs_cap_data) then we're ok with the
* on-disk value, so return that. */
if (alloc)
@@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
else
kfree(tmpbuf);
return ret;
- } else if (!is_v3header((size_t) ret, cap->magic_etc)) {
+ } else if (!is_v3header((size_t) ret, cap)) {
kfree(tmpbuf);
return -EINVAL;
}
@@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
return make_kuid(task_ns, rootid);
}
-static bool validheader(size_t size, __le32 magic)
+static bool validheader(size_t size, const struct vfs_cap_data *cap)
{
- return is_v2header(size, magic) || is_v3header(size, magic);
+ return is_v2header(size, cap) || is_v3header(size, cap);
}
/*
@@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
if (!*ivalue)
return -EINVAL;
- if (!validheader(size, cap->magic_etc))
+ if (!validheader(size, cap))
return -EINVAL;
if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
return -EPERM;
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 241aca315b0c..04825393facb 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -23,9 +23,12 @@
#define EVM_INIT_HMAC 0x0001
#define EVM_INIT_X509 0x0002
-#define EVM_SETUP 0x80000000 /* userland has signaled key load */
+#define EVM_ALLOW_METADATA_WRITES 0x0004
+#define EVM_SETUP_COMPLETE 0x80000000 /* userland has signaled key load */
-#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP)
+#define EVM_KEY_MASK (EVM_INIT_HMAC | EVM_INIT_X509)
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
+ EVM_ALLOW_METADATA_WRITES)
extern int evm_initialized;
extern char *evm_hmac;
@@ -51,7 +54,7 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
size_t req_xattr_value_len, char *digest);
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value,
- size_t req_xattr_value_len, char *digest);
+ size_t req_xattr_value_len, char type, char *digest);
int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
char *hmac_val);
int evm_init_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index bcd64baf8788..691f3e09154c 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -138,7 +138,7 @@ out:
* protection.)
*/
static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
- char *digest)
+ char type, char *digest)
{
struct h_misc {
unsigned long ino;
@@ -149,8 +149,13 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
} hmac_misc;
memset(&hmac_misc, 0, sizeof(hmac_misc));
- hmac_misc.ino = inode->i_ino;
- hmac_misc.generation = inode->i_generation;
+ /* Don't include the inode or generation number in portable
+ * signatures
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG) {
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ }
/* The hmac uid and gid must be encoded in the initial user
* namespace (not the filesystems user namespace) as encoding
* them in the filesystems user namespace allows an attack
@@ -163,7 +168,8 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
hmac_misc.mode = inode->i_mode;
crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
- if (evm_hmac_attrs & EVM_ATTR_FSUUID)
+ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
+ type != EVM_XATTR_PORTABLE_DIGSIG)
crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
sizeof(inode->i_sb->s_uuid));
crypto_shash_final(desc, digest);
@@ -189,6 +195,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
char *xattr_value = NULL;
int error;
int size;
+ bool ima_present = false;
if (!(inode->i_opflags & IOP_XATTR))
return -EOPNOTSUPP;
@@ -199,11 +206,18 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
error = -ENODATA;
for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ bool is_ima = false;
+
+ if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
+ is_ima = true;
+
if ((req_xattr_name && req_xattr_value)
&& !strcmp(*xattrname, req_xattr_name)) {
error = 0;
crypto_shash_update(desc, (const u8 *)req_xattr_value,
req_xattr_value_len);
+ if (is_ima)
+ ima_present = true;
continue;
}
size = vfs_getxattr_alloc(dentry, *xattrname,
@@ -218,9 +232,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
error = 0;
xattr_size = size;
crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ if (is_ima)
+ ima_present = true;
}
- hmac_add_misc(desc, inode, digest);
+ hmac_add_misc(desc, inode, type, digest);
+ /* Portable EVM signatures must include an IMA hash */
+ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
+ return -EPERM;
out:
kfree(xattr_value);
kfree(desc);
@@ -232,17 +251,45 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
char *digest)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, EVM_XATTR_HMAC, digest);
+ req_xattr_value_len, EVM_XATTR_HMAC, digest);
}
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value, size_t req_xattr_value_len,
- char *digest)
+ char type, char *digest)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, IMA_XATTR_DIGEST, digest);
+ req_xattr_value_len, type, digest);
+}
+
+static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
+{
+ const struct evm_ima_xattr_data *xattr_data = NULL;
+ struct integrity_iint_cache *iint;
+ int rc = 0;
+
+ iint = integrity_iint_find(inode);
+ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
+ return 1;
+
+ /* Do this the hard way */
+ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0) {
+ if (rc == -ENODATA)
+ return 0;
+ return rc;
+ }
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
+ rc = 1;
+ else
+ rc = 0;
+
+ kfree(xattr_data);
+ return rc;
}
+
/*
* Calculate the hmac and update security.evm xattr
*
@@ -255,6 +302,16 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
struct evm_ima_xattr_data xattr_data;
int rc = 0;
+ /*
+ * Don't permit any transformation of the EVM xattr if the signature
+ * is of an immutable type
+ */
+ rc = evm_is_immutable(dentry, inode);
+ if (rc < 0)
+ return rc;
+ if (rc)
+ return -EPERM;
+
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
xattr_value_len, xattr_data.digest);
if (rc == 0) {
@@ -280,7 +337,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
}
crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
- hmac_add_misc(desc, inode, hmac_val);
+ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
kfree(desc);
return 0;
}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 9826c02e2db8..a8d502827270 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -31,7 +31,7 @@
int evm_initialized;
static char *integrity_status_msg[] = {
- "pass", "fail", "no_label", "no_xattrs", "unknown"
+ "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
};
char *evm_hmac = "hmac(sha1)";
char *evm_hash = "sha1";
@@ -76,6 +76,11 @@ static void __init evm_init_config(void)
pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
}
+static bool evm_key_loaded(void)
+{
+ return (bool)(evm_initialized & EVM_KEY_MASK);
+}
+
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
@@ -123,7 +128,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
enum integrity_status evm_status = INTEGRITY_PASS;
int rc, xattr_len;
- if (iint && iint->evm_status == INTEGRITY_PASS)
+ if (iint && (iint->evm_status == INTEGRITY_PASS ||
+ iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
return iint->evm_status;
/* if status is not PASS, try to check again - against -ENOMEM */
@@ -164,22 +170,26 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
rc = -EINVAL;
break;
case EVM_IMA_XATTR_DIGSIG:
+ case EVM_XATTR_PORTABLE_DIGSIG:
rc = evm_calc_hash(dentry, xattr_name, xattr_value,
- xattr_value_len, calc.digest);
+ xattr_value_len, xattr_data->type,
+ calc.digest);
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
(const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
- /* Replace RSA with HMAC if not mounted readonly and
- * not immutable
- */
- if (!IS_RDONLY(d_backing_inode(dentry)) &&
- !IS_IMMUTABLE(d_backing_inode(dentry)))
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
+ if (iint)
+ iint->flags |= EVM_IMMUTABLE_DIGSIG;
+ evm_status = INTEGRITY_PASS_IMMUTABLE;
+ } else if (!IS_RDONLY(d_backing_inode(dentry)) &&
+ !IS_IMMUTABLE(d_backing_inode(dentry))) {
evm_update_evmxattr(dentry, xattr_name,
xattr_value,
xattr_value_len);
+ }
}
break;
default:
@@ -241,7 +251,7 @@ enum integrity_status evm_verifyxattr(struct dentry *dentry,
void *xattr_value, size_t xattr_value_len,
struct integrity_iint_cache *iint)
{
- if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
return INTEGRITY_UNKNOWN;
if (!iint) {
@@ -265,7 +275,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
- if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ if (!evm_key_loaded() || !S_ISREG(inode->i_mode) || evm_fixmode)
return 0;
return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
}
@@ -280,7 +290,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
* affect security.evm. An interesting side affect of writing posix xattr
* acls is their modifying of the i_mode, which is included in security.evm.
* For posix xattr acls only, permit security.evm, even if it currently
- * doesn't exist, to be updated.
+ * doesn't exist, to be updated unless the EVM signature is immutable.
*/
static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
@@ -299,6 +309,7 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
return 0;
goto out;
}
+
evm_status = evm_verify_current_integrity(dentry);
if (evm_status == INTEGRITY_NOXATTRS) {
struct integrity_iint_cache *iint;
@@ -345,10 +356,17 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
{
const struct evm_ima_xattr_data *xattr_data = xattr_value;
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
if (!xattr_value_len)
return -EINVAL;
- if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
return -EPERM;
}
return evm_protect_xattr(dentry, xattr_name, xattr_value,
@@ -365,6 +383,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
*/
int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
{
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
return evm_protect_xattr(dentry, xattr_name, NULL, 0);
}
@@ -393,8 +417,8 @@ static void evm_reset_status(struct inode *inode)
void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
- if (!evm_initialized || (!evm_protected_xattr(xattr_name)
- && !posix_xattr_acl(xattr_name)))
+ if (!evm_key_loaded() || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
return;
evm_reset_status(dentry->d_inode);
@@ -414,7 +438,7 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
*/
void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
{
- if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
return;
evm_reset_status(dentry->d_inode);
@@ -425,12 +449,21 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
/**
* evm_inode_setattr - prevent updating an invalid EVM extended attribute
* @dentry: pointer to the affected dentry
+ *
+ * Permit update of file attributes when files have a valid EVM signature,
+ * except in the case of them having an immutable portable signature.
*/
int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
enum integrity_status evm_status;
+ /* Policy permits modification of the protected attrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return 0;
evm_status = evm_verify_current_integrity(dentry);
@@ -456,7 +489,7 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
*/
void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
{
- if (!evm_initialized)
+ if (!evm_key_loaded())
return;
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
@@ -473,7 +506,7 @@ int evm_inode_init_security(struct inode *inode,
struct evm_ima_xattr_data *xattr_data;
int rc;
- if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
return 0;
xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 319cf16d6603..feba03bbedae 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -40,7 +40,7 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
if (*ppos != 0)
return 0;
- sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP));
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP_COMPLETE));
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
@@ -63,7 +63,7 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
{
int i, ret;
- if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP))
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
return -EPERM;
ret = kstrtoint_from_user(buf, count, 0, &i);
@@ -75,16 +75,30 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
if (!i || (i & ~EVM_INIT_MASK) != 0)
return -EINVAL;
+ /* Don't allow a request to freshly enable metadata writes if
+ * keys are loaded.
+ */
+ if ((i & EVM_ALLOW_METADATA_WRITES) &&
+ ((evm_initialized & EVM_KEY_MASK) != 0) &&
+ !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
+ return -EPERM;
+
if (i & EVM_INIT_HMAC) {
ret = evm_init_key();
if (ret != 0)
return ret;
/* Forbid further writes after the symmetric key is loaded */
- i |= EVM_SETUP;
+ i |= EVM_SETUP_COMPLETE;
}
evm_initialized |= i;
+ /* Don't allow protected metadata modification if a symmetric key
+ * is loaded
+ */
+ if (evm_initialized & EVM_INIT_HMAC)
+ evm_initialized &= ~(EVM_ALLOW_METADATA_WRITES);
+
return count;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index c84e05866052..fc38ca08dbb5 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -153,14 +153,12 @@ static void init_once(void *foo)
struct integrity_iint_cache *iint = foo;
memset(iint, 0, sizeof(*iint));
- iint->version = 0;
- iint->flags = 0UL;
iint->ima_file_status = INTEGRITY_UNKNOWN;
iint->ima_mmap_status = INTEGRITY_UNKNOWN;
iint->ima_bprm_status = INTEGRITY_UNKNOWN;
iint->ima_read_status = INTEGRITY_UNKNOWN;
iint->evm_status = INTEGRITY_UNKNOWN;
- iint->measured_pcrs = 0;
+ mutex_init(&iint->mutex);
}
static int __init integrity_iintcache_init(void)
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index c7e8db0ea4c0..08fe405338e1 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/evm.h>
+#include <linux/iversion.h>
#include "ima.h"
@@ -174,7 +175,7 @@ err_out:
*/
int ima_get_action(struct inode *inode, int mask, enum ima_hooks func, int *pcr)
{
- int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
flags &= ima_policy_flag;
@@ -215,7 +216,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
* which do not support i_version, support is limited to an initial
* measurement/appraisal/audit.
*/
- i_version = file_inode(file)->i_version;
+ i_version = inode_query_iversion(inode);
hash.hdr.algo = algo;
/* Initialize hash digest to 0's in case of failure */
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 65fbcf3c32c7..f2803a40ff82 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -46,14 +46,15 @@ bool is_ima_appraise_enabled(void)
/*
* ima_must_appraise - set appraise flag
*
- * Return 1 to appraise
+ * Return 1 to appraise or hash
*/
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
{
if (!ima_appraise)
return 0;
- return ima_match_policy(inode, func, mask, IMA_APPRAISE, NULL);
+ return ima_match_policy(inode, func, mask, IMA_APPRAISE | IMA_HASH,
+ NULL);
}
static int ima_fix_xattr(struct dentry *dentry,
@@ -223,13 +224,16 @@ int ima_appraise_measurement(enum ima_hooks func,
if (opened & FILE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
- !(iint->flags & IMA_DIGSIG_REQUIRED))
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
status = INTEGRITY_PASS;
goto out;
}
status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
- if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+ if ((status != INTEGRITY_PASS) &&
+ (status != INTEGRITY_PASS_IMMUTABLE) &&
+ (status != INTEGRITY_UNKNOWN)) {
if ((status == INTEGRITY_NOLABEL)
|| (status == INTEGRITY_NOXATTRS))
cause = "missing-HMAC";
@@ -248,6 +252,7 @@ int ima_appraise_measurement(enum ima_hooks func,
status = INTEGRITY_FAIL;
break;
}
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
if (xattr_len - sizeof(xattr_value->type) - hash_start >=
iint->ima_hash->length)
/* xattr length may be longer. md5 hash in previous
@@ -266,7 +271,7 @@ int ima_appraise_measurement(enum ima_hooks func,
status = INTEGRITY_PASS;
break;
case EVM_IMA_XATTR_DIGSIG:
- iint->flags |= IMA_DIGSIG;
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
(const char *)xattr_value, rc,
iint->ima_hash->digest,
@@ -317,17 +322,20 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
int rc = 0;
/* do not collect and update hash for digital signatures */
- if (iint->flags & IMA_DIGSIG)
+ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
return;
- if (iint->ima_file_status != INTEGRITY_PASS)
+ if ((iint->ima_file_status != INTEGRITY_PASS) &&
+ !(iint->flags & IMA_HASH))
return;
rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
if (rc < 0)
return;
+ inode_lock(file_inode(file));
ima_fix_xattr(dentry, iint);
+ inode_unlock(file_inode(file));
}
/**
@@ -343,23 +351,21 @@ void ima_inode_post_setattr(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
struct integrity_iint_cache *iint;
- int must_appraise;
+ int action;
if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
|| !(inode->i_opflags & IOP_XATTR))
return;
- must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ action = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ if (!action)
+ __vfs_removexattr(dentry, XATTR_NAME_IMA);
iint = integrity_iint_find(inode);
if (iint) {
- iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
- IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_ACTION_RULE_FLAGS);
- if (must_appraise)
- iint->flags |= IMA_APPRAISE;
+ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+ if (!action)
+ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
}
- if (!must_appraise)
- __vfs_removexattr(dentry, XATTR_NAME_IMA);
}
/*
@@ -388,12 +394,12 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
iint = integrity_iint_find(inode);
if (!iint)
return;
-
- iint->flags &= ~IMA_DONE_MASK;
iint->measured_pcrs = 0;
+ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
if (digsig)
- iint->flags |= IMA_DIGSIG;
- return;
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ else
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
}
int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 9057b163c378..205bc69361ea 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -632,7 +632,7 @@ static void __init ima_pcrread(int idx, u8 *pcr)
if (!ima_used_chip)
return;
- if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
+ if (tpm_pcr_read(NULL, idx, pcr) != 0)
pr_err("Error Communicating to TPM chip\n");
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 2967d497a665..29b72cd2502e 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -110,7 +110,7 @@ int __init ima_init(void)
int rc;
ima_used_chip = 0;
- rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i);
+ rc = tpm_pcr_read(NULL, 0, pcr_i);
if (rc == 0)
ima_used_chip = 1;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 770654694efc..061425dd6400 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/ima.h>
+#include <linux/iversion.h>
#include "ima.h"
@@ -84,10 +85,10 @@ static void ima_rdwr_violation_check(struct file *file,
struct integrity_iint_cache *iint,
int must_measure,
char **pathbuf,
- const char **pathname)
+ const char **pathname,
+ char *filename)
{
struct inode *inode = file_inode(file);
- char filename[NAME_MAX];
fmode_t mode = file->f_mode;
bool send_tomtou = false, send_writers = false;
@@ -96,10 +97,13 @@ static void ima_rdwr_violation_check(struct file *file,
if (!iint)
iint = integrity_iint_find(inode);
/* IMA_MEASURE is set from reader side */
- if (iint && (iint->flags & IMA_MEASURE))
+ if (iint && test_bit(IMA_MUST_MEASURE,
+ &iint->atomic_flags))
send_tomtou = true;
}
} else {
+ if (must_measure)
+ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
send_writers = true;
}
@@ -121,21 +125,25 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
struct inode *inode, struct file *file)
{
fmode_t mode = file->f_mode;
+ bool update;
if (!(mode & FMODE_WRITE))
return;
- inode_lock(inode);
+ mutex_lock(&iint->mutex);
if (atomic_read(&inode->i_writecount) == 1) {
- if ((iint->version != inode->i_version) ||
+ update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ &iint->atomic_flags);
+ if (!IS_I_VERSION(inode) ||
+ inode_cmp_iversion(inode, iint->version) ||
(iint->flags & IMA_NEW_FILE)) {
iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
iint->measured_pcrs = 0;
- if (iint->flags & IMA_APPRAISE)
+ if (update)
ima_update_xattr(iint, file);
}
}
- inode_unlock(inode);
+ mutex_unlock(&iint->mutex);
}
/**
@@ -168,7 +176,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
char *pathbuf = NULL;
char filename[NAME_MAX];
const char *pathname = NULL;
- int rc = -ENOMEM, action, must_appraise;
+ int rc = 0, action, must_appraise = 0;
int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
struct evm_ima_xattr_data *xattr_value = NULL;
int xattr_len = 0;
@@ -199,17 +207,31 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action) {
iint = integrity_inode_get(inode);
if (!iint)
- goto out;
+ rc = -ENOMEM;
}
- if (violation_check) {
+ if (!rc && violation_check)
ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
- &pathbuf, &pathname);
- if (!action) {
- rc = 0;
- goto out_free;
- }
- }
+ &pathbuf, &pathname, filename);
+
+ inode_unlock(inode);
+
+ if (rc)
+ goto out;
+ if (!action)
+ goto out;
+
+ mutex_lock(&iint->mutex);
+
+ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+ /* reset appraisal flags if ima_inode_post_setattr was called */
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_ACTION_FLAGS);
+
+ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags))
+ /* reset all flags if ima_inode_setxattr was called */
+ iint->flags &= ~IMA_DONE_MASK;
/* Determine if already appraised/measured based on bitmask
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
@@ -223,11 +245,23 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr)))
action ^= IMA_MEASURE;
+ /* HASH sets the digital signature and update flags, nothing else */
+ if ((action & IMA_HASH) &&
+ !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) {
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ if ((xattr_value && xattr_len > 2) &&
+ (xattr_value->type == EVM_IMA_XATTR_DIGSIG))
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ iint->flags |= IMA_HASHED;
+ action ^= IMA_HASH;
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+
/* Nothing to do, just return existing appraised status */
if (!action) {
if (must_appraise)
rc = ima_get_cache_status(iint, func);
- goto out_digsig;
+ goto out_locked;
}
template_desc = ima_template_desc_current();
@@ -240,7 +274,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
if (rc != 0 && rc != -EBADF && rc != -EINVAL)
- goto out_digsig;
+ goto out_locked;
if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
pathname = ima_d_path(&file->f_path, &pathbuf, filename);
@@ -248,26 +282,32 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
xattr_value, xattr_len, pcr);
- if (rc == 0 && (action & IMA_APPRAISE_SUBMASK))
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
+ inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
xattr_value, xattr_len, opened);
+ inode_unlock(inode);
+ }
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
rc = 0;
-out_digsig:
- if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
+out_locked:
+ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
!(iint->flags & IMA_NEW_FILE))
rc = -EACCES;
+ mutex_unlock(&iint->mutex);
kfree(xattr_value);
-out_free:
+out:
if (pathbuf)
__putname(pathbuf);
-out:
- inode_unlock(inode);
- if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
- return -EACCES;
+ if (must_appraise) {
+ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
return 0;
}
@@ -366,8 +406,10 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
if (!file && read_id == READING_MODULE) {
if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES) &&
- (ima_appraise & IMA_APPRAISE_ENFORCE))
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
return 0; /* We rely on module signature checking */
}
return 0;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index ee4613fa5840..915f5572c6ff 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -40,6 +40,8 @@
#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
#define DONT_APPRAISE 0x0008
#define AUDIT 0x0040
+#define HASH 0x0100
+#define DONT_HASH 0x0200
#define INVALID_PCR(a) (((a) < 0) || \
(a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
@@ -380,8 +382,10 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
action |= entry->flags & IMA_ACTION_FLAGS;
action |= entry->action & IMA_DO_MASK;
- if (entry->action & IMA_APPRAISE)
+ if (entry->action & IMA_APPRAISE) {
action |= get_subaction(entry, func);
+ action ^= IMA_HASH;
+ }
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
@@ -521,7 +525,7 @@ enum {
Opt_err = -1,
Opt_measure = 1, Opt_dont_measure,
Opt_appraise, Opt_dont_appraise,
- Opt_audit,
+ Opt_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
Opt_func, Opt_mask, Opt_fsmagic,
@@ -538,6 +542,8 @@ static match_table_t policy_tokens = {
{Opt_appraise, "appraise"},
{Opt_dont_appraise, "dont_appraise"},
{Opt_audit, "audit"},
+ {Opt_hash, "hash"},
+ {Opt_dont_hash, "dont_hash"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
@@ -671,6 +677,22 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->action = AUDIT;
break;
+ case Opt_hash:
+ ima_log_string(ab, "action", "hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = HASH;
+ break;
+ case Opt_dont_hash:
+ ima_log_string(ab, "action", "dont_hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_HASH;
+ break;
case Opt_func:
ima_log_string(ab, "func", args[0].from);
@@ -743,7 +765,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
- if (uuid_is_null(&entry->fsuuid)) {
+ if (!uuid_is_null(&entry->fsuuid)) {
result = -EINVAL;
break;
}
@@ -1040,6 +1062,10 @@ int ima_policy_show(struct seq_file *m, void *v)
seq_puts(m, pt(Opt_dont_appraise));
if (entry->action & AUDIT)
seq_puts(m, pt(Opt_audit));
+ if (entry->action & HASH)
+ seq_puts(m, pt(Opt_hash));
+ if (entry->action & DONT_HASH)
+ seq_puts(m, pt(Opt_dont_hash));
seq_puts(m, " ");
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index a02a86d51102..418f35e38015 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -145,7 +145,7 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
if (!ima_used_chip)
return result;
- result = tpm_pcr_extend(TPM_ANY_NUM, pcr, hash);
+ result = tpm_pcr_extend(NULL, pcr, hash);
if (result != 0)
pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 7412d0291ab9..30db39b23804 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -377,8 +377,7 @@ int ima_restore_measurement_list(loff_t size, void *buf)
break;
if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
- pr_err("attempting to restore a template name \
- that is too long\n");
+ pr_err("attempting to restore a template name that is too long\n");
ret = -EINVAL;
break;
}
@@ -389,8 +388,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
if (strcmp(template_name, "ima") == 0) {
- pr_err("attempting to restore an unsupported \
- template \"%s\" failed\n", template_name);
+ pr_err("attempting to restore an unsupported template \"%s\" failed\n",
+ template_name);
ret = -EINVAL;
break;
}
@@ -410,8 +409,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
&(template_desc->fields),
&(template_desc->num_fields));
if (ret < 0) {
- pr_err("attempting to restore the template fmt \"%s\" \
- failed\n", template_desc->fmt);
+ pr_err("attempting to restore the template fmt \"%s\" failed\n",
+ template_desc->fmt);
ret = -EINVAL;
break;
}
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index e1bf040fb110..50a8e3365df7 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -25,39 +25,50 @@
#define IMA_COLLECTED 0x00000020
#define IMA_AUDIT 0x00000040
#define IMA_AUDITED 0x00000080
+#define IMA_HASH 0x00000100
+#define IMA_HASHED 0x00000200
/* iint cache flags */
#define IMA_ACTION_FLAGS 0xff000000
#define IMA_ACTION_RULE_FLAGS 0x06000000
-#define IMA_DIGSIG 0x01000000
-#define IMA_DIGSIG_REQUIRED 0x02000000
-#define IMA_PERMIT_DIRECTIO 0x04000000
-#define IMA_NEW_FILE 0x08000000
+#define IMA_DIGSIG_REQUIRED 0x01000000
+#define IMA_PERMIT_DIRECTIO 0x02000000
+#define IMA_NEW_FILE 0x04000000
+#define EVM_IMMUTABLE_DIGSIG 0x08000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
- IMA_APPRAISE_SUBMASK)
+ IMA_HASH | IMA_APPRAISE_SUBMASK)
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
- IMA_COLLECTED | IMA_APPRAISED_SUBMASK)
+ IMA_HASHED | IMA_COLLECTED | \
+ IMA_APPRAISED_SUBMASK)
/* iint subaction appraise cache flags */
-#define IMA_FILE_APPRAISE 0x00000100
-#define IMA_FILE_APPRAISED 0x00000200
-#define IMA_MMAP_APPRAISE 0x00000400
-#define IMA_MMAP_APPRAISED 0x00000800
-#define IMA_BPRM_APPRAISE 0x00001000
-#define IMA_BPRM_APPRAISED 0x00002000
-#define IMA_READ_APPRAISE 0x00004000
-#define IMA_READ_APPRAISED 0x00008000
+#define IMA_FILE_APPRAISE 0x00001000
+#define IMA_FILE_APPRAISED 0x00002000
+#define IMA_MMAP_APPRAISE 0x00004000
+#define IMA_MMAP_APPRAISED 0x00008000
+#define IMA_BPRM_APPRAISE 0x00010000
+#define IMA_BPRM_APPRAISED 0x00020000
+#define IMA_READ_APPRAISE 0x00040000
+#define IMA_READ_APPRAISED 0x00080000
#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
IMA_BPRM_APPRAISE | IMA_READ_APPRAISE)
#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
IMA_BPRM_APPRAISED | IMA_READ_APPRAISED)
+/* iint cache atomic_flags */
+#define IMA_CHANGE_XATTR 0
+#define IMA_UPDATE_XATTR 1
+#define IMA_CHANGE_ATTR 2
+#define IMA_DIGSIG 3
+#define IMA_MUST_MEASURE 4
+
enum evm_ima_xattr_type {
IMA_XATTR_DIGEST = 0x01,
EVM_XATTR_HMAC,
EVM_IMA_XATTR_DIGSIG,
IMA_XATTR_DIGEST_NG,
+ EVM_XATTR_PORTABLE_DIGSIG,
IMA_XATTR_LAST
};
@@ -100,10 +111,12 @@ struct signature_v2_hdr {
/* integrity data associated with an inode */
struct integrity_iint_cache {
struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct mutex mutex; /* protects: version, flags, digest */
struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
unsigned long flags;
unsigned long measured_pcrs;
+ unsigned long atomic_flags;
enum integrity_status ima_file_status:4;
enum integrity_status ima_mmap_status:4;
enum integrity_status ima_bprm_status:4;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d0bccebbd3b5..41bcf57e96f2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -713,7 +713,6 @@ descend_to_keyring:
* doesn't contain any keyring pointers.
*/
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
goto not_this_keyring;
@@ -723,8 +722,6 @@ descend_to_keyring:
}
node = assoc_array_ptr_to_node(ptr);
- smp_read_barrier_depends();
-
ptr = node->slots[0];
if (!assoc_array_ptr_is_meta(ptr))
goto begin_node;
@@ -736,7 +733,6 @@ descend_to_node:
kdebug("descend");
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->next_node);
BUG_ON(!assoc_array_ptr_is_node(ptr));
}
@@ -744,7 +740,6 @@ descend_to_node:
begin_node:
kdebug("begin_node");
- smp_read_barrier_depends();
slot = 0;
ascend_to_node:
/* Go through the slots in a node */
@@ -792,14 +787,12 @@ ascend_to_node:
if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
}
if (!ptr)
goto not_this_keyring;
node = assoc_array_ptr_to_node(ptr);
- smp_read_barrier_depends();
slot++;
/* If we've ascended to the root (zero backpointer), we must have just
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 98aa89ff7bfd..423776682025 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -355,13 +355,12 @@ out:
* For key specific tpm requests, we will generate and send our
* own TPM command packets using the drivers send function.
*/
-static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
- size_t buflen)
+static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
{
int rc;
dump_tpm_buf(cmd);
- rc = tpm_send(chip_num, cmd, buflen);
+ rc = tpm_send(NULL, cmd, buflen);
dump_tpm_buf(cmd);
if (rc > 0)
/* Can't return positive return codes values to keyctl */
@@ -382,10 +381,10 @@ static int pcrlock(const int pcrnum)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+ ret = tpm_get_random(NULL, hash, SHA1_DIGEST_SIZE);
if (ret != SHA1_DIGEST_SIZE)
return ret;
- return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
+ return tpm_pcr_extend(NULL, pcrnum, hash) ? -EINVAL : 0;
}
/*
@@ -398,7 +397,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
unsigned char ononce[TPM_NONCE_SIZE];
int ret;
- ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, ononce, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
return ret;
@@ -410,7 +409,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
store32(tb, handle);
storebytes(tb, ononce, TPM_NONCE_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -434,7 +433,7 @@ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
store16(tb, TPM_TAG_RQU_COMMAND);
store32(tb, TPM_OIAP_SIZE);
store32(tb, TPM_ORD_OIAP);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -493,7 +492,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
if (ret < 0)
goto out;
- ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, td->nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
goto out;
ordinal = htonl(TPM_ORD_SEAL);
@@ -542,7 +541,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
store8(tb, cont);
storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
goto out;
@@ -603,7 +602,7 @@ static int tpm_unseal(struct tpm_buf *tb,
ordinal = htonl(TPM_ORD_UNSEAL);
keyhndl = htonl(SRKHANDLE);
- ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
return ret;
@@ -635,7 +634,7 @@ static int tpm_unseal(struct tpm_buf *tb,
store8(tb, cont);
storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
pr_info("trusted_key: authhmac failed (%d)\n", ret);
return ret;
@@ -748,7 +747,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
int i;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return tpm2;
@@ -917,7 +916,7 @@ static struct trusted_key_options *trusted_options_alloc(void)
struct trusted_key_options *options;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return NULL;
@@ -967,7 +966,7 @@ static int trusted_instantiate(struct key *key,
size_t key_len;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return tpm2;
@@ -1008,7 +1007,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options);
+ ret = tpm_unseal_trusted(NULL, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1018,13 +1017,13 @@ static int trusted_instantiate(struct key *key,
break;
case Opt_new:
key_len = payload->key_len;
- ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+ ret = tpm_get_random(NULL, payload->key, key_len);
if (ret != key_len) {
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options);
+ ret = tpm_seal_trusted(NULL, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 75686d53df07..e77a5e307955 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -19,8 +19,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index aaba6677ee2e..2c297b995b16 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -22,8 +22,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 33cfe5d3d6cb..8900ea5cbabf 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
int index;
int rc;
+ if (!ss_initialized)
+ return 0;
+
read_lock(&policy_rwlock);
rc = -EINVAL;
@@ -1413,27 +1416,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
if (!scontext_len)
return -EINVAL;
+ /* Copy the string to allow changes and ensure a NUL terminator */
+ scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
+ if (!scontext2)
+ return -ENOMEM;
+
if (!ss_initialized) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
- if (!strcmp(initial_sid_to_string[i], scontext)) {
+ if (!strcmp(initial_sid_to_string[i], scontext2)) {
*sid = i;
- return 0;
+ goto out;
}
}
*sid = SECINITSID_KERNEL;
- return 0;
+ goto out;
}
*sid = SECSID_NULL;
- /* Copy the string so that we can modify the copy as we parse it. */
- scontext2 = kmalloc(scontext_len + 1, gfp_flags);
- if (!scontext2)
- return -ENOMEM;
- memcpy(scontext2, scontext, scontext_len);
- scontext2[scontext_len] = 0;
-
if (force) {
/* Save another copy for storing in uninterpreted form */
rc = -ENOMEM;
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 56e354fcdfc6..928188902901 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -452,7 +452,7 @@ int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
if (dst) {
struct dst_entry *iter;
- for (iter = dst; iter != NULL; iter = iter->child) {
+ for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
struct xfrm_state *x = iter->xfrm;
if (x && selinux_authorizable_xfrm(x))
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 6a71fc7831ab..f7db791fb566 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -321,6 +321,7 @@ struct smack_known *smk_import_entry(const char *, int);
void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
bool smack_privileged(int cap);
+bool smack_privileged_cred(int cap, const struct cred *cred);
void smk_destroy_label_list(struct list_head *list);
/*
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 1a3004189447..9a4c0ad46518 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -623,26 +623,24 @@ struct smack_known *smack_from_secid(const u32 secid)
LIST_HEAD(smack_onlycap_list);
DEFINE_MUTEX(smack_onlycap_lock);
-/*
+/**
+ * smack_privileged_cred - are all privilege requirements met by cred
+ * @cap: The requested capability
+ * @cred: the credential to use
+ *
* Is the task privileged and allowed to be privileged
* by the onlycap rule.
*
* Returns true if the task is allowed to be privileged, false if it's not.
*/
-bool smack_privileged(int cap)
+bool smack_privileged_cred(int cap, const struct cred *cred)
{
- struct smack_known *skp = smk_of_current();
+ struct task_smack *tsp = cred->security;
+ struct smack_known *skp = tsp->smk_task;
struct smack_known_list_elem *sklep;
int rc;
- /*
- * All kernel tasks are privileged
- */
- if (unlikely(current->flags & PF_KTHREAD))
- return true;
-
- rc = cap_capable(current_cred(), &init_user_ns, cap,
- SECURITY_CAP_AUDIT);
+ rc = cap_capable(cred, &init_user_ns, cap, SECURITY_CAP_AUDIT);
if (rc)
return false;
@@ -662,3 +660,23 @@ bool smack_privileged(int cap)
return false;
}
+
+/**
+ * smack_privileged - are all privilege requirements met
+ * @cap: The requested capability
+ *
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns true if the task is allowed to be privileged, false if it's not.
+ */
+bool smack_privileged(int cap)
+{
+ /*
+ * All kernel tasks are privileged
+ */
+ if (unlikely(current->flags & PF_KTHREAD))
+ return true;
+
+ return smack_privileged_cred(cap, current_cred());
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 14cc7940b36d..03fdecba93bb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2866,12 +2866,16 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
#endif
#ifdef SMACK_IPV6_SECMARK_LABELING
struct smack_known *rsp;
- struct socket_smack *ssp = sock->sk->sk_security;
+ struct socket_smack *ssp;
#endif
if (sock->sk == NULL)
return 0;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ ssp = sock->sk->sk_security;
+#endif
+
switch (sock->sk->sk_family) {
case PF_INET:
if (addrlen < sizeof(struct sockaddr_in))
@@ -4365,6 +4369,10 @@ static int smack_key_permission(key_ref_t key_ref,
*/
if (tkp == NULL)
return -EACCES;
+
+ if (smack_privileged_cred(CAP_MAC_OVERRIDE, cred))
+ return 0;
+
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = keyp->serial;
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 0f73fe30e37a..558e3076d38c 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -458,7 +458,7 @@ void tomoyo_read_log(struct tomoyo_io_buffer *head)
*
* Returns POLLIN | POLLRDNORM when ready to read an audit log.
*/
-unsigned int tomoyo_poll_log(struct file *file, poll_table *wait)
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait)
{
if (tomoyo_log_count)
return POLLIN | POLLRDNORM;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 25eed4b0b0e8..70c73bf66c88 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2120,7 +2120,7 @@ static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
*
* Waits for access requests which violated policy in enforcing mode.
*/
-static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait)
+static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait)
{
if (!list_empty(&tomoyo_query_list))
return POLLIN | POLLRDNORM;
@@ -2453,7 +2453,7 @@ int tomoyo_open_control(const u8 type, struct file *file)
* Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
* POLLOUT | POLLWRNORM otherwise.
*/
-unsigned int tomoyo_poll_control(struct file *file, poll_table *wait)
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
{
struct tomoyo_io_buffer *head = file->private_data;
if (head->poll)
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 7adccdd8e36d..539bcdd30bb8 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -789,7 +789,7 @@ struct tomoyo_acl_param {
struct tomoyo_io_buffer {
void (*read) (struct tomoyo_io_buffer *);
int (*write) (struct tomoyo_io_buffer *);
- unsigned int (*poll) (struct file *file, poll_table *wait);
+ __poll_t (*poll) (struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
char __user *read_user_buf;
@@ -981,8 +981,8 @@ int tomoyo_path_number_perm(const u8 operation, const struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, const struct path *path,
const char *target);
-unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
-unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait);
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait);
int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
int addr_len);
int tomoyo_socket_connect_permission(struct socket *sock,
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 49393c2a3f8b..fb9bf99deb35 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -157,7 +157,7 @@ static int tomoyo_release(struct inode *inode, struct file *file)
* Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
* POLLOUT | POLLWRNORM otherwise.
*/
-static unsigned int tomoyo_poll(struct file *file, poll_table *wait)
+static __poll_t tomoyo_poll(struct file *file, poll_table *wait)
{
return tomoyo_poll_control(file, wait);
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 4490a699030b..a12b9555e910 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -396,7 +396,7 @@ static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
return -ENXIO;
}
-static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
+static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
{
if (stream->direction == SND_COMPRESS_PLAYBACK)
return POLLOUT | POLLWRNORM;
@@ -404,12 +404,12 @@ static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
return POLLIN | POLLRDNORM;
}
-static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
+static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
- int retval = 0;
+ __poll_t retval = 0;
if (snd_BUG_ON(!data))
return POLLERR;
diff --git a/sound/core/control.c b/sound/core/control.c
index 56b3e2d49c82..50fa16022f1f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
+#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/minors.h>
@@ -1129,7 +1130,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
if (size > 1024 * 128) /* sane value */
return -EINVAL;
- container = memdup_user(buf, size);
+ container = vmemdup_user(buf, size);
if (IS_ERR(container))
return PTR_ERR(container);
@@ -1137,7 +1138,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
if (!change)
change = memcmp(ue->tlv_data, container, size) != 0;
if (!change) {
- kfree(container);
+ kvfree(container);
return 0;
}
@@ -1148,7 +1149,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
mask = SNDRV_CTL_EVENT_MASK_INFO;
}
- kfree(ue->tlv_data);
+ kvfree(ue->tlv_data);
ue->tlv_data = container;
ue->tlv_data_size = size;
@@ -1197,7 +1198,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
- names = memdup_user((const void __user *)user_ptrval,
+ names = vmemdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
@@ -1208,7 +1209,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
- kfree(names);
+ kvfree(names);
return -EINVAL;
}
p += name_len + 1;
@@ -1225,8 +1226,8 @@ static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
- kfree(ue->tlv_data);
- kfree(ue->priv_data);
+ kvfree(ue->tlv_data);
+ kvfree(ue->priv_data);
kfree(ue);
}
@@ -1666,9 +1667,9 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
return result > 0 ? result : err;
}
-static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
+static __poll_t snd_ctl_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 8faae3d1455d..26e71cf05f1e 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -177,7 +177,7 @@ static int snd_hwdep_release(struct inode *inode, struct file * file)
return err;
}
-static unsigned int snd_hwdep_poll(struct file * file, poll_table * wait)
+static __poll_t snd_hwdep_poll(struct file * file, poll_table * wait)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.poll)
@@ -233,8 +233,6 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
/* check whether the dsp was already loaded */
if (hw->dsp_loaded & (1 << info.index))
return -EBUSY;
- if (!access_ok(VERIFY_READ, info.image, info.length))
- return -EFAULT;
err = hw->ops.dsp_load(hw, &info);
if (err < 0)
return err;
diff --git a/sound/core/info.c b/sound/core/info.c
index bcf6a48cc70d..aa86f3f8e056 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -203,11 +203,11 @@ static ssize_t snd_info_entry_write(struct file *file, const char __user *buffer
return size;
}
-static unsigned int snd_info_entry_poll(struct file *file, poll_table *wait)
+static __poll_t snd_info_entry_poll(struct file *file, poll_table *wait)
{
struct snd_info_private_data *data = file->private_data;
struct snd_info_entry *entry = data->entry;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (entry->c.ops->poll)
return entry->c.ops->poll(entry,
diff --git a/sound/core/init.c b/sound/core/init.c
index 168ae03d3a1c..8753440c3a6e 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -344,7 +344,7 @@ static int snd_disconnect_release(struct inode *inode, struct file *file)
panic("%s(%p, %p) failed!", __func__, inode, file);
}
-static unsigned int snd_disconnect_poll(struct file * file, poll_table * wait)
+static __poll_t snd_disconnect_poll(struct file * file, poll_table * wait)
{
return POLLERR | POLLNVAL;
}
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index e49f448ee04f..3ebba9c7f86e 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -186,7 +186,7 @@ static int _snd_pcm_hw_param_mask(struct snd_pcm_hw_params *params,
{
int changed;
changed = snd_mask_refine(hw_param_mask(params, var), val);
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -233,7 +233,7 @@ static int _snd_pcm_hw_param_min(struct snd_pcm_hw_params *params,
val, open);
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -294,7 +294,7 @@ static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params,
val, open);
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
v = snd_pcm_hw_param_last(pcm, params, var, dir);
else
v = snd_pcm_hw_param_first(pcm, params, var, dir);
- snd_BUG_ON(v < 0);
return v;
}
@@ -500,7 +499,7 @@ static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
}
} else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -540,7 +539,7 @@ static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params,
{
int changed;
changed = snd_interval_setinteger(hw_param_interval(params, var));
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -843,7 +842,7 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
if (!(mutex_trylock(&runtime->oss.params_lock)))
return -EAGAIN;
} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
- return -EINTR;
+ return -ERESTARTSYS;
sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
return tmp;
- mutex_lock(&runtime->oss.params_lock);
while (bytes > 0) {
+ if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ tmp = -ERESTARTSYS;
+ break;
+ }
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
tmp = bytes;
if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
xfer += tmp;
if ((substream->f_flags & O_NONBLOCK) != 0 &&
tmp != runtime->oss.period_bytes)
- break;
+ tmp = -EAGAIN;
}
- }
- mutex_unlock(&runtime->oss.params_lock);
- return xfer;
-
err:
- mutex_unlock(&runtime->oss.params_lock);
+ mutex_unlock(&runtime->oss.params_lock);
+ if (tmp < 0)
+ break;
+ if (signal_pending(current)) {
+ tmp = -ERESTARTSYS;
+ break;
+ }
+ tmp = 0;
+ }
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
return tmp;
- mutex_lock(&runtime->oss.params_lock);
while (bytes > 0) {
+ if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ tmp = -ERESTARTSYS;
+ break;
+ }
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
if (runtime->oss.buffer_used == 0) {
tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
bytes -= tmp;
xfer += tmp;
}
- }
- mutex_unlock(&runtime->oss.params_lock);
- return xfer;
-
err:
- mutex_unlock(&runtime->oss.params_lock);
+ mutex_unlock(&runtime->oss.params_lock);
+ if (tmp < 0)
+ break;
+ if (signal_pending(current)) {
+ tmp = -ERESTARTSYS;
+ break;
+ }
+ tmp = 0;
+ }
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -2673,10 +2686,10 @@ static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream)
runtime->oss.period_frames;
}
-static unsigned int snd_pcm_oss_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_oss_file *pcm_oss_file;
- unsigned int mask;
+ __poll_t mask;
struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL;
pcm_oss_file = file->private_data;
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index cadc93792868..85a56af104bd 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
snd_pcm_sframes_t frames = size;
plugin = snd_pcm_plug_first(plug);
- while (plugin && frames > 0) {
+ while (plugin) {
+ if (frames <= 0)
+ return frames;
if ((next = plugin->next) != NULL) {
snd_pcm_sframes_t frames1 = frames;
- if (plugin->dst_frames)
+ if (plugin->dst_frames) {
frames1 = plugin->dst_frames(plugin, frames);
+ if (frames1 <= 0)
+ return frames1;
+ }
if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
return err;
}
if (err != frames1) {
frames = err;
- if (plugin->src_frames)
+ if (plugin->src_frames) {
frames = plugin->src_frames(plugin, frames1);
+ if (frames <= 0)
+ return frames;
+ }
}
} else
dst_channels = NULL;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 10e7ef7a8804..a83152e7d387 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
{
u_int64_t n = (u_int64_t) a * b;
if (c == 0) {
- snd_BUG_ON(!n);
*r = 0;
return UINT_MAX;
}
@@ -1603,7 +1602,7 @@ static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
changed = snd_interval_refine_first(hw_param_interval(params, var));
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -1632,7 +1631,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
return changed;
if (params->rmask) {
int err = snd_pcm_hw_refine(pcm, params);
- if (snd_BUG_ON(err < 0))
+ if (err < 0)
return err;
}
return snd_pcm_hw_param_value(params, var, dir);
@@ -1649,7 +1648,7 @@ static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
changed = snd_interval_refine_last(hw_param_interval(params, var));
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -1678,7 +1677,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
return changed;
if (params->rmask) {
int err = snd_pcm_hw_refine(pcm, params);
- if (snd_BUG_ON(err < 0))
+ if (err < 0)
return err;
}
return snd_pcm_hw_param_value(params, var, dir);
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 9be81025372f..c4eb561d2008 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -163,13 +163,30 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
.width = 32, .phys = 32, .le = 0, .signd = 0,
.silence = { 0x69, 0x69, 0x69, 0x69 },
},
- /* FIXME: the following three formats are not defined properly yet */
+ /* FIXME: the following two formats are not defined properly yet */
[SNDRV_PCM_FORMAT_MPEG] = {
.le = -1, .signd = -1,
},
[SNDRV_PCM_FORMAT_GSM] = {
.le = -1, .signd = -1,
},
+ [SNDRV_PCM_FORMAT_S20_LE] = {
+ .width = 20, .phys = 32, .le = 1, .signd = 1,
+ .silence = {},
+ },
+ [SNDRV_PCM_FORMAT_S20_BE] = {
+ .width = 20, .phys = 32, .le = 0, .signd = 1,
+ .silence = {},
+ },
+ [SNDRV_PCM_FORMAT_U20_LE] = {
+ .width = 20, .phys = 32, .le = 1, .signd = 0,
+ .silence = { 0x00, 0x00, 0x08, 0x00 },
+ },
+ [SNDRV_PCM_FORMAT_U20_BE] = {
+ .width = 20, .phys = 32, .le = 0, .signd = 0,
+ .silence = { 0x00, 0x08, 0x00, 0x00 },
+ },
+ /* FIXME: the following format is not defined properly yet */
[SNDRV_PCM_FORMAT_SPECIAL] = {
.le = -1, .signd = -1,
},
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a4d92e46c459..51104df924e1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
return ret < 0 ? ret : frames;
}
-/* decrease the appl_ptr; returns the processed frames or a negative error */
+/* decrease the appl_ptr; returns the processed frames or zero for error */
static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
snd_pcm_uframes_t frames,
snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
if (appl_ptr < 0)
appl_ptr += runtime->boundary;
ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
- return ret < 0 ? ret : frames;
+ /* NOTE: we return zero for errors because PulseAudio gets depressed
+ * upon receiving an error from rewind ioctl and stops processing
+ * any longer. Returning zero means that no rewind is done, so
+ * it's not absolutely wrong to answer like that.
+ */
+ return ret < 0 ? 0 : frames;
}
static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
@@ -3130,12 +3135,12 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
return result;
}
-static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_playback_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
@@ -3169,12 +3174,12 @@ static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
return mask;
}
-static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_capture_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
@@ -3441,7 +3446,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
- struct snd_pcm_runtime *runtime = substream->runtime;;
+ struct snd_pcm_runtime *runtime = substream->runtime;
area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index f055ca10bbc1..fae21311723f 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1366,11 +1366,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return result;
}
-static unsigned int snd_rawmidi_poll(struct file *file, poll_table * wait)
+static __poll_t snd_rawmidi_poll(struct file *file, poll_table * wait)
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
rfile = file->private_data;
if (rfile->input != NULL) {
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 8cdf489df80e..5f64d0d88320 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -59,7 +59,7 @@ static int odev_release(struct inode *inode, struct file *file);
static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset);
static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset);
static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static unsigned int odev_poll(struct file *file, poll_table * wait);
+static __poll_t odev_poll(struct file *file, poll_table * wait);
/*
@@ -197,7 +197,7 @@ static long odev_ioctl_compat(struct file *file, unsigned int cmd,
#define odev_ioctl_compat NULL
#endif
-static unsigned int
+static __poll_t
odev_poll(struct file *file, poll_table * wait)
{
struct seq_oss_devinfo *dp;
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index afa007c0cc2d..2d0e9eaf13aa 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -124,7 +124,7 @@ void snd_seq_oss_release(struct seq_oss_devinfo *dp);
int snd_seq_oss_ioctl(struct seq_oss_devinfo *dp, unsigned int cmd, unsigned long arg);
int snd_seq_oss_read(struct seq_oss_devinfo *dev, char __user *buf, int count);
int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int count, struct file *opt);
-unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
+__poll_t snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 6a7b6aceeca9..c538e78ca310 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -196,10 +196,10 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt)
* select / poll
*/
-unsigned int
+__poll_t
snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* input */
if (dp->readq && is_read_mode(dp->file_mode)) {
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6e22eea72654..b611deef81f5 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
rwlock_init(&client->ports_lock);
mutex_init(&client->ports_mutex);
INIT_LIST_HEAD(&client->ports_list_head);
+ mutex_init(&client->ioctl_mutex);
/* find free slot in the client table */
spin_lock_irqsave(&clients_lock, flags);
@@ -1086,10 +1087,10 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/*
* handle polling
*/
-static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
+static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
{
struct snd_seq_client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* check client structures are in place */
if (snd_BUG_ON(!client))
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
}
+ mutex_lock(&client->ioctl_mutex);
err = handler->func(client, &buf);
+ mutex_unlock(&client->ioctl_mutex);
if (err >= 0) {
/* Some commands includes a bug in 'dir' field. */
if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index c6614254ef8a..0611e1e0ed5b 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -61,6 +61,7 @@ struct snd_seq_client {
struct list_head ports_list_head;
rwlock_t ports_lock;
struct mutex ports_mutex;
+ struct mutex ioctl_mutex;
int convert32; /* convert 32->64bit */
/* output pool */
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 79e0c5604ef8..0428e9061b47 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -497,9 +497,7 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
return -EPERM;
}
- result = snd_seq_timer_set_tempo(q->timer, info->tempo);
- if (result >= 0)
- result = snd_seq_timer_set_ppq(q->timer, info->ppq);
+ result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
if (result >= 0 && info->skew_base > 0)
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
info->skew_base);
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index b80985fbc334..23167578231f 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -191,14 +191,15 @@ int snd_seq_timer_set_tempo(struct snd_seq_timer * tmr, int tempo)
return 0;
}
-/* set current ppq */
-int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
+/* set current tempo and ppq in a shot */
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq)
{
+ int changed;
unsigned long flags;
if (snd_BUG_ON(!tmr))
return -EINVAL;
- if (ppq <= 0)
+ if (tempo <= 0 || ppq <= 0)
return -EINVAL;
spin_lock_irqsave(&tmr->lock, flags);
if (tmr->running && (ppq != tmr->ppq)) {
@@ -208,9 +209,11 @@ int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
pr_debug("ALSA: seq: cannot change ppq of a running timer\n");
return -EBUSY;
}
-
+ changed = (tempo != tmr->tempo) || (ppq != tmr->ppq);
+ tmr->tempo = tempo;
tmr->ppq = ppq;
- snd_seq_timer_set_tick_resolution(tmr);
+ if (changed)
+ snd_seq_timer_set_tick_resolution(tmr);
spin_unlock_irqrestore(&tmr->lock, flags);
return 0;
}
diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
index 9506b661fe5b..62f390671096 100644
--- a/sound/core/seq/seq_timer.h
+++ b/sound/core/seq/seq_timer.h
@@ -131,7 +131,7 @@ int snd_seq_timer_stop(struct snd_seq_timer *tmr);
int snd_seq_timer_start(struct snd_seq_timer *tmr);
int snd_seq_timer_continue(struct snd_seq_timer *tmr);
int snd_seq_timer_set_tempo(struct snd_seq_timer *tmr, int tempo);
-int snd_seq_timer_set_ppq(struct snd_seq_timer *tmr, int ppq);
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index ee09dace8bb1..da05e314917f 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -2072,9 +2072,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
return result > 0 ? result : err;
}
-static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
+static __poll_t snd_timer_user_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
struct snd_timer_user *tu;
tu = file->private_data;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index afac886ffa28..0333143a1fa7 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -39,6 +39,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
+#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/initval.h>
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static void params_change_substream(struct loopback_pcm *dpcm,
- struct snd_pcm_runtime *runtime)
-{
- struct snd_pcm_runtime *dst_runtime;
-
- if (dpcm == NULL || dpcm->substream == NULL)
- return;
- dst_runtime = dpcm->substream->runtime;
- if (dst_runtime == NULL)
- return;
- dst_runtime->hw = dpcm->cable->hw;
-}
-
static void params_change(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
cable->hw.rate_max = runtime->rate;
cable->hw.channels_min = runtime->channels;
cable->hw.channels_max = runtime->channels;
- params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
- runtime);
- params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
- runtime);
}
static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
static int rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
+ struct loopback_pcm *dpcm = rule->private;
+ struct loopback_cable *cable = dpcm->cable;
+ struct snd_mask m;
- struct snd_pcm_hardware *hw = rule->private;
- struct snd_mask *maskp = hw_param_mask(params, rule->var);
-
- maskp->bits[0] &= (u_int32_t)hw->formats;
- maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
- memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
- if (! maskp->bits[0] && ! maskp->bits[1])
- return -EINVAL;
- return 0;
+ snd_mask_none(&m);
+ mutex_lock(&dpcm->loopback->cable_lock);
+ m.bits[0] = (u_int32_t)cable->hw.formats;
+ m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
+ mutex_unlock(&dpcm->loopback->cable_lock);
+ return snd_mask_refine(hw_param_mask(params, rule->var), &m);
}
static int rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
- struct snd_pcm_hardware *hw = rule->private;
+ struct loopback_pcm *dpcm = rule->private;
+ struct loopback_cable *cable = dpcm->cable;
struct snd_interval t;
- t.min = hw->rate_min;
- t.max = hw->rate_max;
+ mutex_lock(&dpcm->loopback->cable_lock);
+ t.min = cable->hw.rate_min;
+ t.max = cable->hw.rate_max;
+ mutex_unlock(&dpcm->loopback->cable_lock);
t.openmin = t.openmax = 0;
t.integer = 0;
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
static int rule_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
- struct snd_pcm_hardware *hw = rule->private;
+ struct loopback_pcm *dpcm = rule->private;
+ struct loopback_cable *cable = dpcm->cable;
struct snd_interval t;
- t.min = hw->channels_min;
- t.max = hw->channels_max;
+ mutex_lock(&dpcm->loopback->cable_lock);
+ t.min = cable->hw.channels_min;
+ t.max = cable->hw.channels_max;
+ mutex_unlock(&dpcm->loopback->cable_lock);
t.openmin = t.openmax = 0;
t.integer = 0;
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
+static void free_cable(struct snd_pcm_substream *substream)
+{
+ struct loopback *loopback = substream->private_data;
+ int dev = get_cable_index(substream);
+ struct loopback_cable *cable;
+
+ cable = loopback->cables[substream->number][dev];
+ if (!cable)
+ return;
+ if (cable->streams[!substream->stream]) {
+ /* other stream is still alive */
+ cable->streams[substream->stream] = NULL;
+ } else {
+ /* free the cable */
+ loopback->cables[substream->number][dev] = NULL;
+ kfree(cable);
+ }
+}
+
static int loopback_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm;
- struct loopback_cable *cable;
+ struct loopback_cable *cable = NULL;
int err = 0;
int dev = get_cable_index(substream);
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
if (!cable) {
cable = kzalloc(sizeof(*cable), GFP_KERNEL);
if (!cable) {
- kfree(dpcm);
err = -ENOMEM;
goto unlock;
}
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
/* are cached -> they do not reflect the actual state */
err = snd_pcm_hw_rule_add(runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
- rule_format, &runtime->hw,
+ rule_format, dpcm,
SNDRV_PCM_HW_PARAM_FORMAT, -1);
if (err < 0)
goto unlock;
err = snd_pcm_hw_rule_add(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
- rule_rate, &runtime->hw,
+ rule_rate, dpcm,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto unlock;
err = snd_pcm_hw_rule_add(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
- rule_channels, &runtime->hw,
+ rule_channels, dpcm,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (err < 0)
goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
else
runtime->hw = cable->hw;
unlock:
+ if (err < 0) {
+ free_cable(substream);
+ kfree(dpcm);
+ }
mutex_unlock(&loopback->cable_lock);
return err;
}
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
{
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm = substream->runtime->private_data;
- struct loopback_cable *cable;
- int dev = get_cable_index(substream);
loopback_timer_stop(dpcm);
mutex_lock(&loopback->cable_lock);
- cable = loopback->cables[substream->number][dev];
- if (cable->streams[!substream->stream]) {
- /* other stream is still alive */
- cable->streams[substream->stream] = NULL;
- } else {
- /* free the cable */
- loopback->cables[substream->number][dev] = NULL;
- kfree(cable);
- }
+ free_cable(substream);
mutex_unlock(&loopback->cable_lock);
return 0;
}
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 7b2b1f766b00..8fb9a54fe8ba 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -375,17 +375,9 @@ struct dummy_hrtimer_pcm {
ktime_t period_time;
atomic_t running;
struct hrtimer timer;
- struct tasklet_struct tasklet;
struct snd_pcm_substream *substream;
};
-static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
-{
- struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
- if (atomic_read(&dpcm->running))
- snd_pcm_period_elapsed(dpcm->substream);
-}
-
static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
{
struct dummy_hrtimer_pcm *dpcm;
@@ -393,7 +385,14 @@ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART;
- tasklet_schedule(&dpcm->tasklet);
+ /*
+ * In cases of XRUN and draining, this calls .trigger to stop PCM
+ * substream.
+ */
+ snd_pcm_period_elapsed(dpcm->substream);
+ if (!atomic_read(&dpcm->running))
+ return HRTIMER_NORESTART;
+
hrtimer_forward_now(timer, dpcm->period_time);
return HRTIMER_RESTART;
}
@@ -403,7 +402,7 @@ static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
- hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
+ hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL_SOFT);
atomic_set(&dpcm->running, 1);
return 0;
}
@@ -413,14 +412,14 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
atomic_set(&dpcm->running, 0);
- hrtimer_cancel(&dpcm->timer);
+ if (!hrtimer_callback_running(&dpcm->timer))
+ hrtimer_cancel(&dpcm->timer);
return 0;
}
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{
hrtimer_cancel(&dpcm->timer);
- tasklet_kill(&dpcm->tasklet);
}
static snd_pcm_uframes_t
@@ -465,12 +464,10 @@ static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
- hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dpcm->timer.function = dummy_hrtimer_callback;
dpcm->substream = substream;
atomic_set(&dpcm->running, 0);
- tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
- (unsigned long)dpcm);
return 0;
}
@@ -830,7 +827,7 @@ static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
static int snd_dummy_iobox_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
- const char *const names[] = { "None", "CD Player" };
+ static const char *const names[] = { "None", "CD Player" };
return snd_ctl_enum_info(info, 1, 2, names);
}
diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
index 2b367c21b80c..83e791810c52 100644
--- a/sound/firewire/bebob/bebob_hwdep.c
+++ b/sound/firewire/bebob/bebob_hwdep.c
@@ -53,11 +53,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int
+static __poll_t
hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
{
struct snd_bebob *bebob = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &bebob->hwdep_wait, wait);
diff --git a/sound/firewire/dice/dice-hwdep.c b/sound/firewire/dice/dice-hwdep.c
index a4dc02a86f12..7a8af0f91c96 100644
--- a/sound/firewire/dice/dice-hwdep.c
+++ b/sound/firewire/dice/dice-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_dice *dice = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &dice->hwdep_wait, wait);
diff --git a/sound/firewire/digi00x/digi00x-hwdep.c b/sound/firewire/digi00x/digi00x-hwdep.c
index 463c6b8e864d..a084c2a834db 100644
--- a/sound/firewire/digi00x/digi00x-hwdep.c
+++ b/sound/firewire/digi00x/digi00x-hwdep.c
@@ -60,11 +60,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_dg00x *dg00x = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &dg00x->hwdep_wait, wait);
diff --git a/sound/firewire/fireface/ff-hwdep.c b/sound/firewire/fireface/ff-hwdep.c
index 3ee04b054585..68e273fa5d23 100644
--- a/sound/firewire/fireface/ff-hwdep.c
+++ b/sound/firewire/fireface/ff-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_ff *ff = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &ff->hwdep_wait, wait);
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index a3a3a16f5e08..e0eff9328ee1 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -184,11 +184,11 @@ end:
return count;
}
-static unsigned int
+static __poll_t
hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
{
struct snd_efw *efw = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &efw->hwdep_wait, wait);
diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
index b87ccb69d597..7b6a086866e7 100644
--- a/sound/firewire/motu/motu-hwdep.c
+++ b/sound/firewire/motu/motu-hwdep.c
@@ -59,11 +59,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_motu *motu = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &motu->hwdep_wait, wait);
diff --git a/sound/firewire/oxfw/oxfw-hwdep.c b/sound/firewire/oxfw/oxfw-hwdep.c
index ff2687ad0460..6c1828aff672 100644
--- a/sound/firewire/oxfw/oxfw-hwdep.c
+++ b/sound/firewire/oxfw/oxfw-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_oxfw *oxfw = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &oxfw->hwdep_wait, wait);
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 8c4437d0051d..37b21647b471 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -50,11 +50,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_tscm *tscm = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &tscm->hwdep_wait, wait);
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 31b510c5ca0b..0daf31383084 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -146,7 +146,7 @@ int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
- hdev = &edev->hdac;
+ hdev = &edev->hdev;
edev->ebus = ebus;
snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
diff --git a/sound/isa/gus/gus_dma.c b/sound/isa/gus/gus_dma.c
index 36c27c832360..7f95f452f106 100644
--- a/sound/isa/gus/gus_dma.c
+++ b/sound/isa/gus/gus_dma.c
@@ -201,10 +201,9 @@ int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
struct snd_gf1_dma_block *block;
block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (block == NULL) {
- snd_printk(KERN_ERR "gf1: DMA transfer failure; not enough memory\n");
+ if (!block)
return -ENOMEM;
- }
+
*block = *__block;
block->next = NULL;
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 37d378a26a50..c8904e732aaa 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -814,7 +814,7 @@ static int hal2_create(struct snd_card *card, struct snd_hal2 **rchip)
struct hpc3_regs *hpc3 = hpc3c0;
int err;
- hal2 = kzalloc(sizeof(struct snd_hal2), GFP_KERNEL);
+ hal2 = kzalloc(sizeof(*hal2), GFP_KERNEL);
if (!hal2)
return -ENOMEM;
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 71c942162c25..9fb68b35de5a 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -840,7 +840,7 @@ static int snd_sgio2audio_create(struct snd_card *card,
if (!(readq(&mace->perif.audio.control) & AUDIO_CONTROL_CODEC_PRESENT))
return -ENOENT;
- chip = kzalloc(sizeof(struct snd_sgio2audio), GFP_KERNEL);
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index fb3bbceb1fef..6b57f8aac1b7 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -670,9 +670,9 @@ static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft,
return uUsed < 0? uUsed: uWritten;
}
-static unsigned int sq_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t sq_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int retVal;
if (write_sq.locked == 0) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7f3b5ed81995..f7a492c382d9 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -88,7 +88,6 @@ config SND_HDA_PATCH_LOADER
config SND_HDA_CODEC_REALTEK
tristate "Build Realtek HD-audio codec support"
select SND_HDA_GENERIC
- select INPUT
help
Say Y or M here to include Realtek HD-audio codec support in
snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 80bbadc83721..d6e079f4ec09 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
/* codec SSID */
+ SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8fd2d9c62c96..23475888192b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3154,11 +3154,13 @@ static void alc256_shutup(struct hda_codec *codec)
if (hp_pin_sense)
msleep(85);
+ /* 3k pull low control for Headset jack. */
+ /* NOTE: call this before clearing the pin, otherwise codec stalls */
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
-
if (hp_pin_sense)
msleep(100);
@@ -3166,6 +3168,93 @@ static void alc256_shutup(struct hda_codec *codec)
snd_hda_shutup_pins(codec);
}
+static void alc225_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin)
+ return;
+
+ msleep(30);
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(2);
+
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(85);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(100);
+
+ alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+}
+
+static void alc225_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+ }
+
+ /* 3k pull low control for Headset jack. */
+ alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(2);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(85);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+ snd_hda_shutup_pins(codec);
+}
+
static void alc_default_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -3723,6 +3812,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
}
}
+#if IS_REACHABLE(INPUT)
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
struct hda_jack_callback *event)
{
@@ -3855,6 +3945,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
spec->kb_dev = NULL;
}
}
+#else /* INPUT */
+#define alc280_fixup_hp_gpio2_mic_hotkey NULL
+#define alc233_fixup_lenovo_line2_mic_hotkey NULL
+#endif /* INPUT */
static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
@@ -3994,8 +4088,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0668);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
@@ -4117,8 +4214,11 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0688);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
@@ -4189,8 +4289,11 @@ static void alc_headset_mode_default(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
@@ -4332,8 +4435,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
val = alc_read_coef_idx(codec, 0x45);
if (val & (1 << 9))
@@ -4436,8 +4542,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
@@ -4566,9 +4675,18 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0xbe);
is_ctia = (val & 0x1c02) == 0x1c02;
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
val = alc_read_coef_idx(codec, 0x45);
@@ -4588,6 +4706,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0867:
is_ctia = true;
@@ -6196,6 +6320,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+ SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6919,16 +7044,17 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0285:
case 0x10ec0289:
spec->codec_variant = ALC269_TYPE_ALC215;
+ spec->shutup = alc225_shutup;
+ spec->init_hook = alc225_init;
spec->gen.mixer_nid = 0;
break;
case 0x10ec0225:
case 0x10ec0295:
- spec->codec_variant = ALC269_TYPE_ALC225;
- spec->gen.mixer_nid = 0; /* no loopback on ALC225 ALC295 */
- break;
case 0x10ec0299:
spec->codec_variant = ALC269_TYPE_ALC225;
- spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
+ spec->shutup = alc225_shutup;
+ spec->init_hook = alc225_init;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC225, ALC295 and ALC299 */
break;
case 0x10ec0234:
case 0x10ec0274:
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 2697402b5195..8dabd4d0211d 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -965,13 +965,32 @@ static int prodigy_hd2_add_controls(struct snd_ice1712 *ice)
return 0;
}
+static void wm8766_init(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8766_inits[] = {
+ WM8766_RESET, 0x0000,
+ WM8766_DAC_CTRL, 0x0120,
+ WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */
+ WM8766_DAC_CTRL2, 0x0001,
+ WM8766_DAC_CTRL3, 0x0080,
+ WM8766_LDA1, 0x0100,
+ WM8766_LDA2, 0x0100,
+ WM8766_LDA3, 0x0100,
+ WM8766_RDA1, 0x0100,
+ WM8766_RDA2, 0x0100,
+ WM8766_RDA3, 0x0100,
+ WM8766_MUTE1, 0x0000,
+ WM8766_MUTE2, 0x0000,
+ };
+ unsigned int i;
-/*
- * initialize the chip
- */
-static int prodigy_hifi_init(struct snd_ice1712 *ice)
+ for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
+ wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i + 1]);
+}
+
+static void wm8776_init(struct snd_ice1712 *ice)
{
- static unsigned short wm_inits[] = {
+ static unsigned short wm8776_inits[] = {
/* These come first to reduce init pop noise */
WM_ADC_MUX, 0x0003, /* ADC mute */
/* 0x00c0 replaced by 0x0003 */
@@ -982,7 +1001,76 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
WM_POWERDOWN, 0x0008, /* All power-up except HP */
WM_RESET, 0x0000, /* reset */
};
- static unsigned short wm_inits2[] = {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8776_inits); i += 2)
+ wm_put(ice, wm8776_inits[i], wm8776_inits[i + 1]);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prodigy_hifi_resume(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8776_reinit_registers[] = {
+ WM_MASTER_CTRL,
+ WM_DAC_INT,
+ WM_ADC_INT,
+ WM_OUT_MUX,
+ WM_HP_ATTEN_L,
+ WM_HP_ATTEN_R,
+ WM_PHASE_SWAP,
+ WM_DAC_CTRL2,
+ WM_ADC_ATTEN_L,
+ WM_ADC_ATTEN_R,
+ WM_ALC_CTRL1,
+ WM_ALC_CTRL2,
+ WM_ALC_CTRL3,
+ WM_NOISE_GATE,
+ WM_ADC_MUX,
+ /* no DAC attenuation here */
+ };
+ struct prodigy_hifi_spec *spec = ice->spec;
+ int i, ch;
+
+ mutex_lock(&ice->gpio_mutex);
+
+ /* reinitialize WM8776 and re-apply old register values */
+ wm8776_init(ice);
+ schedule_timeout_uninterruptible(1);
+ for (i = 0; i < ARRAY_SIZE(wm8776_reinit_registers); i++)
+ wm_put(ice, wm8776_reinit_registers[i],
+ wm_get(ice, wm8776_reinit_registers[i]));
+
+ /* reinitialize WM8766 and re-apply volumes for all DACs */
+ wm8766_init(ice);
+ for (ch = 0; ch < 2; ch++) {
+ wm_set_vol(ice, WM_DAC_ATTEN_L + ch,
+ spec->vol[2 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA1 + ch,
+ spec->vol[0 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA2 + ch,
+ spec->vol[4 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA3 + ch,
+ spec->vol[6 + ch], spec->master[ch]);
+ }
+
+ /* unmute WM8776 DAC */
+ wm_put(ice, WM_DAC_MUTE, 0x00);
+ wm_put(ice, WM_DAC_CTRL1, 0x90);
+
+ mutex_unlock(&ice->gpio_mutex);
+ return 0;
+}
+#endif
+
+/*
+ * initialize the chip
+ */
+static int prodigy_hifi_init(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8776_defaults[] = {
WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */
WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */
WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */
@@ -1010,22 +1098,6 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
WM_DAC_MUTE, 0x0000, /* DAC unmute */
WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */
};
- static unsigned short wm8766_inits[] = {
- WM8766_RESET, 0x0000,
- WM8766_DAC_CTRL, 0x0120,
- WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */
- WM8766_DAC_CTRL2, 0x0001,
- WM8766_DAC_CTRL3, 0x0080,
- WM8766_LDA1, 0x0100,
- WM8766_LDA2, 0x0100,
- WM8766_LDA3, 0x0100,
- WM8766_RDA1, 0x0100,
- WM8766_RDA2, 0x0100,
- WM8766_RDA3, 0x0100,
- WM8766_MUTE1, 0x0000,
- WM8766_MUTE2, 0x0000,
- };
-
struct prodigy_hifi_spec *spec;
unsigned int i;
@@ -1052,16 +1124,17 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
ice->spec = spec;
/* initialize WM8776 codec */
- for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
- wm_put(ice, wm_inits[i], wm_inits[i+1]);
+ wm8776_init(ice);
schedule_timeout_uninterruptible(1);
- for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2)
- wm_put(ice, wm_inits2[i], wm_inits2[i+1]);
+ for (i = 0; i < ARRAY_SIZE(wm8776_defaults); i += 2)
+ wm_put(ice, wm8776_defaults[i], wm8776_defaults[i + 1]);
- /* initialize WM8766 codec */
- for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
- wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i+1]);
+ wm8766_init(ice);
+#ifdef CONFIG_PM_SLEEP
+ ice->pm_resume = &prodigy_hifi_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
return 0;
}
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index c7b007164c99..4206ba44d8bb 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2348,7 +2348,6 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
err = request_firmware(&dsp_code, "korg/k1212.dsp", &pci->dev);
if (err < 0) {
- release_firmware(dsp_code);
snd_printk(KERN_ERR "firmware not available\n");
snd_korg1212_free(korg1212);
return err;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index d22758165496..84c3582f3982 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -71,6 +71,7 @@ source "sound/soc/stm/Kconfig"
source "sound/soc/sunxi/Kconfig"
source "sound/soc/tegra/Kconfig"
source "sound/soc/txx9/Kconfig"
+source "sound/soc/uniphier/Kconfig"
source "sound/soc/ux500/Kconfig"
source "sound/soc/xtensa/Kconfig"
source "sound/soc/zte/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 5327f4d6c668..74cd1858d38b 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_SND_SOC) += stm/
obj-$(CONFIG_SND_SOC) += sunxi/
obj-$(CONFIG_SND_SOC) += tegra/
obj-$(CONFIG_SND_SOC) += txx9/
+obj-$(CONFIG_SND_SOC) += uniphier/
obj-$(CONFIG_SND_SOC) += ux500/
obj-$(CONFIG_SND_SOC) += xtensa/
obj-$(CONFIG_SND_SOC) += zte/
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index b5e41df6bb3a..c33a512283a4 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -850,6 +850,9 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
+ if (!rtd)
+ return -EINVAL;
+
buffersize = frames_to_bytes(runtime, runtime->buffer_size);
bytescount = acp_get_byte_count(rtd->acp_mmio, substream->stream);
@@ -875,6 +878,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
+ if (!rtd)
+ return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
config_acp_dma_channel(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM,
PLAYBACK_START_DMA_DESCR_CH12,
@@ -1091,7 +1096,11 @@ static int acp_audio_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, audio_drv_data);
/* Initialize the ACP */
- acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+ status = acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+ if (status) {
+ dev_err(&pdev->dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
status = snd_soc_register_platform(&pdev->dev, &acp_asoc_platform);
if (status != 0) {
@@ -1108,9 +1117,12 @@ static int acp_audio_probe(struct platform_device *pdev)
static int acp_audio_remove(struct platform_device *pdev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(&pdev->dev);
- acp_deinit(adata->acp_mmio);
+ status = acp_deinit(adata->acp_mmio);
+ if (status)
+ dev_err(&pdev->dev, "ACP Deinit failed status:%d\n", status);
snd_soc_unregister_platform(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1120,9 +1132,14 @@ static int acp_audio_remove(struct platform_device *pdev)
static int acp_pcm_resume(struct device *dev)
{
u16 bank;
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio, adata->asic_type);
+ status = acp_init(adata->acp_mmio, adata->asic_type);
+ if (status) {
+ dev_err(dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
if (adata->play_stream && adata->play_stream->runtime) {
/* For Stoney, Memory gating is disabled,i.e SRAM Banks
@@ -1154,18 +1171,26 @@ static int acp_pcm_resume(struct device *dev)
static int acp_pcm_runtime_suspend(struct device *dev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_deinit(adata->acp_mmio);
+ status = acp_deinit(adata->acp_mmio);
+ if (status)
+ dev_err(dev, "ACP Deinit failed status:%d\n", status);
acp_reg_write(0, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
}
static int acp_pcm_runtime_resume(struct device *dev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio, adata->asic_type);
+ status = acp_init(adata->acp_mmio, adata->asic_type);
+ if (status) {
+ dev_err(dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
}
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 8445edd06737..ebabed69f0e6 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -308,15 +308,9 @@ static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
return regcache_sync(dd->regmap);
}
-static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
-{
- return dev_get_regmap(dev, NULL);
-}
-
static struct snd_soc_codec_driver soc_codec_dev_classd = {
.probe = atmel_classd_codec_probe,
.resume = atmel_classd_codec_resume,
- .get_regmap = atmel_classd_codec_get_remap,
.component_driver = {
.controls = atmel_classd_snd_controls,
.num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 29a97d52e8ad..66d6c52e7761 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
do {
mutex_lock(&ctx->lock);
- tmo = 5;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ tmo = 6;
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
udelay(21); /* wait an ac97 frame time */
if (!tmo) {
pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
* poll, Forrest, poll...
*/
tmo = 0x10000;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
asm volatile ("nop");
data = RD(ctx, AC97_CMDRESP);
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 2e449d7173fc..d5f73a8ab893 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -130,6 +130,7 @@ struct bcm2835_i2s_dev {
struct regmap *i2s_regmap;
struct clk *clk;
bool clk_prepared;
+ int clk_rate;
};
static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
@@ -419,10 +420,19 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
}
/* Clock should only be set up here if CPU is clock master */
- if (bit_clock_master) {
- ret = clk_set_rate(dev->clk, bclk_rate);
- if (ret)
- return ret;
+ if (bit_clock_master &&
+ (!dev->clk_prepared || dev->clk_rate != bclk_rate)) {
+ if (dev->clk_prepared)
+ bcm2835_i2s_stop_clock(dev);
+
+ if (dev->clk_rate != bclk_rate) {
+ ret = clk_set_rate(dev->clk, bclk_rate);
+ if (ret)
+ return ret;
+ dev->clk_rate = bclk_rate;
+ }
+
+ bcm2835_i2s_start_clock(dev);
}
/* Setup the frame format */
@@ -618,8 +628,6 @@ static int bcm2835_i2s_prepare(struct snd_pcm_substream *substream,
struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
uint32_t cs_reg;
- bcm2835_i2s_start_clock(dev);
-
/*
* Clear both FIFOs if the one that should be started
* is not empty at the moment. This should only happen
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index bbf7a9266a99..cd5a939ad608 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -365,7 +365,7 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info;
struct resource *res;
- unsigned int irq;
+ int irq;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -378,8 +378,8 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -ENODEV;
+ if (irq <= 0)
+ return irq < 0 ? irq : -ENODEV;
ret = devm_request_irq(&pdev->dev, irq, ep93xx_ac97_interrupt,
IRQF_TRIGGER_HIGH, pdev->name, info);
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 848c5fe49bc7..be8ea723dff9 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1319,6 +1319,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
int i, ret;
pm860x->codec = codec;
+ snd_soc_codec_init_regmap(codec, pm860x->regmap);
for (i = 0; i < 4; i++) {
ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1348,18 +1349,10 @@ static int pm860x_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *pm860x_get_regmap(struct device *dev)
-{
- struct pm860x_priv *pm860x = dev_get_drvdata(dev);
-
- return pm860x->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_pm860x = {
.probe = pm860x_probe,
.remove = pm860x_remove,
.set_bias_level = pm860x_set_bias_level,
- .get_regmap = pm860x_get_regmap,
.component_driver = {
.controls = pm860x_snd_controls,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a42ddbc93f3d..2b331f7266ab 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -95,6 +95,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
select SND_SOC_MAX98927 if I2C
+ select SND_SOC_MAX98373 if I2C
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9860 if I2C
select SND_SOC_MAX9768 if I2C
@@ -109,6 +110,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM1681 if I2C
select SND_SOC_PCM179X_I2C if I2C
select SND_SOC_PCM179X_SPI if SPI_MASTER
+ select SND_SOC_PCM186X_I2C if I2C
+ select SND_SOC_PCM186X_SPI if SPI_MASTER
select SND_SOC_PCM3008
select SND_SOC_PCM3168A_I2C if I2C
select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -133,7 +136,6 @@ config SND_SOC_ALL_CODECS
select SND_SOC_SGTL5000 if I2C
select SND_SOC_SI476X if MFD_SI476X_CORE
select SND_SOC_SIRF_AUDIO_CODEC
- select SND_SOC_SN95031 if INTEL_SCU_IPC
select SND_SOC_SPDIF
select SND_SOC_SSM2518 if I2C
select SND_SOC_SSM2602_SPI if SPI_MASTER
@@ -148,6 +150,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TAS5086 if I2C
select SND_SOC_TAS571X if I2C
select SND_SOC_TAS5720 if I2C
+ select SND_SOC_TAS6424 if I2C
select SND_SOC_TFA9879 if I2C
select SND_SOC_TLV320AIC23_I2C if I2C
select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
@@ -158,6 +161,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TLV320AIC3X if I2C
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
+ select SND_SOC_TSCS42XX if I2C
select SND_SOC_TS3A227E if I2C
select SND_SOC_TWL4030 if TWL4030_CORE
select SND_SOC_TWL6040 if TWL6040_CORE
@@ -623,6 +627,10 @@ config SND_SOC_MAX98927
tristate "Maxim Integrated MAX98927 Speaker Amplifier"
depends on I2C
+config SND_SOC_MAX98373
+ tristate "Maxim Integrated MAX98373 Speaker Amplifier"
+ depends on I2C
+
config SND_SOC_MAX9850
tristate
@@ -661,6 +669,21 @@ config SND_SOC_PCM179X_SPI
Enable support for Texas Instruments PCM179x CODEC.
Select this if your PCM179x is connected via an SPI bus.
+config SND_SOC_PCM186X
+ tristate
+
+config SND_SOC_PCM186X_I2C
+ tristate "Texas Instruments PCM186x CODECs - I2C"
+ depends on I2C
+ select SND_SOC_PCM186X
+ select REGMAP_I2C
+
+config SND_SOC_PCM186X_SPI
+ tristate "Texas Instruments PCM186x CODECs - SPI"
+ depends on SPI_MASTER
+ select SND_SOC_PCM186X
+ select REGMAP_SPI
+
config SND_SOC_PCM3008
tristate
@@ -818,9 +841,6 @@ config SND_SOC_SIRF_AUDIO_CODEC
tristate "SiRF SoC internal audio codec"
select REGMAP_MMIO
-config SND_SOC_SN95031
- tristate
-
config SND_SOC_SPDIF
tristate "S/PDIF CODEC"
@@ -883,6 +903,13 @@ config SND_SOC_TAS5720
Enable support for Texas Instruments TAS5720L/M high-efficiency mono
Class-D audio power amplifiers.
+config SND_SOC_TAS6424
+ tristate "Texas Instruments TAS6424 Quad-Channel Audio amplifier"
+ depends on I2C
+ help
+ Enable support for Texas Instruments TAS6424 high-efficiency
+ digital input quad-channel Class-D audio power amplifiers.
+
config SND_SOC_TFA9879
tristate "NXP Semiconductors TFA9879 amplifier"
depends on I2C
@@ -913,12 +940,12 @@ config SND_SOC_TLV320AIC32X4
tristate
config SND_SOC_TLV320AIC32X4_I2C
- tristate
+ tristate "Texas Instruments TLV320AIC32x4 audio CODECs - I2C"
depends on I2C
select SND_SOC_TLV320AIC32X4
config SND_SOC_TLV320AIC32X4_SPI
- tristate
+ tristate "Texas Instruments TLV320AIC32x4 audio CODECs - SPI"
depends on SPI_MASTER
select SND_SOC_TLV320AIC32X4
@@ -933,6 +960,13 @@ config SND_SOC_TS3A227E
tristate "TI Headset/Mic detect and keypress chip"
depends on I2C
+config SND_SOC_TSCS42XX
+ tristate "Tempo Semiconductor TSCS42xx CODEC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Add support for Tempo Semiconductor's TSCS42xx audio CODEC.
+
config SND_SOC_TWL4030
select MFD_TWL4030_AUDIO
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 0001069ce2a7..da1571336f1e 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -90,6 +90,7 @@ snd-soc-max9867-objs := max9867.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max98927-objs := max98927.o
+snd-soc-max98373-objs := max98373.o
snd-soc-max9850-objs := max9850.o
snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
@@ -105,6 +106,9 @@ snd-soc-pcm1681-objs := pcm1681.o
snd-soc-pcm179x-codec-objs := pcm179x.o
snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
snd-soc-pcm179x-spi-objs := pcm179x-spi.o
+snd-soc-pcm186x-objs := pcm186x.o
+snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
+snd-soc-pcm186x-spi-objs := pcm186x-spi.o
snd-soc-pcm3008-objs := pcm3008.o
snd-soc-pcm3168a-objs := pcm3168a.o
snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -140,7 +144,6 @@ snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
snd-soc-si476x-objs := si476x.o
snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
-snd-soc-sn95031-objs := sn95031.o
snd-soc-spdif-tx-objs := spdif_transmitter.o
snd-soc-spdif-rx-objs := spdif_receiver.o
snd-soc-ssm2518-objs := ssm2518.o
@@ -156,6 +159,7 @@ snd-soc-sti-sas-objs := sti-sas.o
snd-soc-tas5086-objs := tas5086.o
snd-soc-tas571x-objs := tas571x.o
snd-soc-tas5720-objs := tas5720.o
+snd-soc-tas6424-objs := tas6424.o
snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -167,6 +171,7 @@ snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-tscs42xx-objs := tscs42xx.o
snd-soc-ts3a227e-objs := ts3a227e.o
snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
@@ -330,6 +335,7 @@ obj-$(CONFIG_SND_SOC_MAX9867) += snd-soc-max9867.o
obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
+obj-$(CONFIG_SND_SOC_MAX98373) += snd-soc-max98373.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
@@ -345,6 +351,9 @@ obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
obj-$(CONFIG_SND_SOC_PCM179X) += snd-soc-pcm179x-codec.o
obj-$(CONFIG_SND_SOC_PCM179X_I2C) += snd-soc-pcm179x-i2c.o
obj-$(CONFIG_SND_SOC_PCM179X_SPI) += snd-soc-pcm179x-spi.o
+obj-$(CONFIG_SND_SOC_PCM186X) += snd-soc-pcm186x.o
+obj-$(CONFIG_SND_SOC_PCM186X_I2C) += snd-soc-pcm186x-i2c.o
+obj-$(CONFIG_SND_SOC_PCM186X_SPI) += snd-soc-pcm186x-spi.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
obj-$(CONFIG_SND_SOC_PCM3168A_I2C) += snd-soc-pcm3168a-i2c.o
@@ -395,6 +404,7 @@ obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
+obj-$(CONFIG_SND_SOC_TAS6424) += snd-soc-tas6424.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
@@ -406,6 +416,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C) += snd-soc-tlv320aic32x4-i2c.o
obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI) += snd-soc-tlv320aic32x4-spi.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TSCS42XX) += snd-soc-tscs42xx.o
obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 6ed2cc374768..3bf93652bb31 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -121,17 +121,19 @@ static struct snd_soc_dai_driver cq93vc_dai = {
.ops = &cq93vc_dai_ops,
};
-static struct regmap *cq93vc_get_regmap(struct device *dev)
+static int cq93vc_probe(struct snd_soc_component *component)
{
- struct davinci_vc *davinci_vc = dev->platform_data;
+ struct davinci_vc *davinci_vc = component->dev->platform_data;
- return davinci_vc->regmap;
+ snd_soc_component_init_regmap(component, davinci_vc->regmap);
+
+ return 0;
}
static const struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
.set_bias_level = cq93vc_set_bias_level,
- .get_regmap = cq93vc_get_regmap,
.component_driver = {
+ .probe = cq93vc_probe,
.controls = cq93vc_snd_controls,
.num_controls = ARRAY_SIZE(cq93vc_snd_controls),
},
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 7e9806206648..bc3a72e4c4ed 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -355,13 +355,9 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
unsigned int devid = 0;
unsigned int reg;
-
- cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l32_private),
- GFP_KERNEL);
- if (!cs35l32) {
- dev_err(&i2c_client->dev, "could not allocate codec\n");
+ cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l32), GFP_KERNEL);
+ if (!cs35l32)
return -ENOMEM;
- }
i2c_set_clientdata(i2c_client, cs35l32);
@@ -375,13 +371,11 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs35l32->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l32_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs35l32_handle_of_data(i2c_client,
&cs35l32->pdata);
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 1e05026bedca..0600d5264c4c 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -1004,13 +1004,9 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
unsigned int devid = 0;
unsigned int reg;
- cs35l34 = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l34_private),
- GFP_KERNEL);
- if (!cs35l34) {
- dev_err(&i2c_client->dev, "could not allocate codec\n");
+ cs35l34 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l34), GFP_KERNEL);
+ if (!cs35l34)
return -ENOMEM;
- }
i2c_set_clientdata(i2c_client, cs35l34);
cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
@@ -1044,14 +1040,11 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs35l34->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l34_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev,
- "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs35l34_handle_of_data(i2c_client, pdata);
if (ret != 0)
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 0d9c4a57301b..9731e5dff291 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1100,8 +1100,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
unsigned int reg;
u32 val32;
- cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l52_private),
- GFP_KERNEL);
+ cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l52), GFP_KERNEL);
if (cs42l52 == NULL)
return -ENOMEM;
cs42l52->dev = &i2c_client->dev;
@@ -1115,13 +1114,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l52->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l52_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
if (of_property_read_bool(i2c_client->dev.of_node,
"cirrus,mica-differential-cfg"))
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index cb6ca85f1536..fd7b8d32c2b2 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1190,9 +1190,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
unsigned int alpha_rev, metal_rev;
unsigned int reg;
- cs42l56 = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l56_private),
- GFP_KERNEL);
+ cs42l56 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l56), GFP_KERNEL);
if (cs42l56 == NULL)
return -ENOMEM;
cs42l56->dev = &i2c_client->dev;
@@ -1207,14 +1205,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l56->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l56_platform_data),
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev,
- "could not allocate pdata\n");
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs42l56_handle_of_data(i2c_client,
&cs42l56->pdata);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 3df2c473ab88..aebaa97490b6 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1289,8 +1289,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
unsigned int reg;
u32 val32;
- cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
- GFP_KERNEL);
+ cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l73), GFP_KERNEL);
if (!cs42l73)
return -ENOMEM;
@@ -1304,13 +1303,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l73->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l73_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
if (of_property_read_u32(i2c_client->dev.of_node,
"chgfreq", &val32) >= 0)
@@ -1358,7 +1355,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
ret = regmap_read(cs42l73->regmap, CS42L73_REVID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- return ret;;
+ return ret;
}
dev_info(&i2c_client->dev,
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 94c0209977d0..be2750680838 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1120,9 +1120,11 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -1175,17 +1177,9 @@ static unsigned int cs47l24_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_4L,
};
-static struct regmap *cs47l24_get_regmap(struct device *dev)
-{
- struct cs47l24_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
.probe = cs47l24_codec_probe,
.remove = cs47l24_codec_remove,
- .get_regmap = cs47l24_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 46b1fbb66eba..95bb10ba80dc 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -26,8 +26,9 @@
struct cx20442_priv {
- void *control_data;
+ struct tty_struct *tty;
struct regulator *por;
+ u8 reg_cache;
};
#define CX20442_PM 0x0
@@ -89,14 +90,14 @@ static const struct snd_soc_dapm_route cx20442_audio_map[] = {
};
static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
+ unsigned int reg)
{
- u8 *reg_cache = codec->reg_cache;
+ struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- if (reg >= codec->driver->reg_cache_size)
+ if (reg >= 1)
return -EINVAL;
- return reg_cache[reg];
+ return cx20442->reg_cache;
}
enum v253_vls {
@@ -156,20 +157,19 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- u8 *reg_cache = codec->reg_cache;
int vls, vsp, old, len;
char buf[18];
- if (reg >= codec->driver->reg_cache_size)
+ if (reg >= 1)
return -EINVAL;
- /* hw_write and control_data pointers required for talking to the modem
+ /* tty and write pointers required for talking to the modem
* are expected to be set by the line discipline initialization code */
- if (!codec->hw_write || !cx20442->control_data)
+ if (!cx20442->tty || !cx20442->tty->ops->write)
return -EIO;
- old = reg_cache[reg];
- reg_cache[reg] = value;
+ old = cx20442->reg_cache;
+ cx20442->reg_cache = value;
vls = cx20442_pm_to_v253_vls(value);
if (vls < 0)
@@ -194,13 +194,12 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
return -ENOMEM;
dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
- if (codec->hw_write(cx20442->control_data, buf, len) != len)
+ if (cx20442->tty->ops->write(cx20442->tty, buf, len) != len)
return -EIO;
return 0;
}
-
/*
* Line discpline related code
*
@@ -252,8 +251,7 @@ static void v253_close(struct tty_struct *tty)
cx20442 = snd_soc_codec_get_drvdata(codec);
/* Prevent the codec driver from further accessing the modem */
- codec->hw_write = NULL;
- cx20442->control_data = NULL;
+ cx20442->tty = NULL;
codec->component.card->pop_time = 0;
}
@@ -276,12 +274,11 @@ static void v253_receive(struct tty_struct *tty,
cx20442 = snd_soc_codec_get_drvdata(codec);
- if (!cx20442->control_data) {
+ if (!cx20442->tty) {
/* First modem response, complete setup procedure */
/* Set up codec driver access to modem controls */
- cx20442->control_data = tty;
- codec->hw_write = (hw_write_t)tty->ops->write;
+ cx20442->tty = tty;
codec->component.card->pop_time = 1;
}
}
@@ -367,10 +364,9 @@ static int cx20442_codec_probe(struct snd_soc_codec *codec)
cx20442->por = regulator_get(codec->dev, "POR");
if (IS_ERR(cx20442->por))
dev_warn(codec->dev, "failed to get the regulator");
- cx20442->control_data = NULL;
+ cx20442->tty = NULL;
snd_soc_codec_set_drvdata(codec, cx20442);
- codec->hw_write = NULL;
codec->component.card->pop_time = 0;
return 0;
@@ -381,8 +377,8 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- if (cx20442->control_data) {
- struct tty_struct *tty = cx20442->control_data;
+ if (cx20442->tty) {
+ struct tty_struct *tty = cx20442->tty;
tty_hangup(tty);
}
@@ -396,17 +392,13 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static const u8 cx20442_reg;
-
static const struct snd_soc_codec_driver cx20442_codec_dev = {
.probe = cx20442_codec_probe,
.remove = cx20442_codec_remove,
.set_bias_level = cx20442_set_bias_level,
- .reg_cache_default = &cx20442_reg,
- .reg_cache_size = 1,
- .reg_word_size = sizeof(u8),
.read = cx20442_read_reg_cache,
.write = cx20442_write,
+
.component_driver = {
.dapm_widgets = cx20442_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets),
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41d9b1da27c2..b2b4e90fc02a 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1654,10 +1654,8 @@ static struct da7213_platform_data
u32 fw_val32;
pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+ if (!pdata)
return NULL;
- }
if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0)
pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, fw_val32);
@@ -1855,8 +1853,7 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
struct da7213_priv *da7213;
int ret;
- da7213 = devm_kzalloc(&i2c->dev, sizeof(struct da7213_priv),
- GFP_KERNEL);
+ da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL);
if (!da7213)
return -ENOMEM;
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index 56564ce90cb6..96c644a15b11 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -2455,10 +2455,8 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
u32 of_val32;
pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+ if (!pdata)
return NULL;
- }
if (of_property_read_u32(np, "dlg,micbias1-lvl-millivolt", &of_val32) >= 0)
pdata->micbias1_lvl = da7218_of_micbias_lvl(codec, of_val32);
@@ -2527,8 +2525,6 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
hpldet_pdata = devm_kzalloc(codec->dev, sizeof(*hpldet_pdata),
GFP_KERNEL);
if (!hpldet_pdata) {
- dev_warn(codec->dev,
- "Failed to allocate memory for hpldet pdata\n");
of_node_put(hpldet_np);
return pdata;
}
@@ -3273,8 +3269,7 @@ static int da7218_i2c_probe(struct i2c_client *i2c,
struct da7218_priv *da7218;
int ret;
- da7218 = devm_kzalloc(&i2c->dev, sizeof(struct da7218_priv),
- GFP_KERNEL);
+ da7218 = devm_kzalloc(&i2c->dev, sizeof(*da7218), GFP_KERNEL);
if (!da7218)
return -ENOMEM;
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index b88a1ee66f80..c88f974ebe3e 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -107,8 +107,30 @@ static const struct snd_soc_codec_driver soc_dmic = {
static int dmic_dev_probe(struct platform_device *pdev)
{
+ int err;
+ u32 chans;
+ struct snd_soc_dai_driver *dai_drv = &dmic_dai;
+
+ if (pdev->dev.of_node) {
+ err = of_property_read_u32(pdev->dev.of_node, "num-channels", &chans);
+ if (err && (err != -ENOENT))
+ return err;
+
+ if (!err) {
+ if (chans < 1 || chans > 8)
+ return -EINVAL;
+
+ dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL);
+ if (!dai_drv)
+ return -ENOMEM;
+
+ memcpy(dai_drv, &dmic_dai, sizeof(*dai_drv));
+ dai_drv->capture.channels_max = chans;
+ }
+ }
+
return snd_soc_register_codec(&pdev->dev,
- &soc_dmic, &dmic_dai, 1);
+ &soc_dmic, dai_drv, 1);
}
static int dmic_dev_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index f3b4f4dfae6a..dba6f4c5074a 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -136,8 +136,11 @@ struct hdac_hdmi_priv {
struct mutex pin_mutex;
struct hdac_chmap chmap;
struct hdac_hdmi_drv_data *drv_data;
+ struct snd_soc_dai_driver *dai_drv;
};
+#define hdev_to_hdmi_priv(_hdev) ((to_ehdac_device(_hdev))->private_data)
+
static struct hdac_hdmi_pcm *
hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
struct hdac_hdmi_cvt *cvt)
@@ -169,7 +172,7 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
* ports.
*/
if (pcm->jack_event == 0) {
- dev_dbg(&edev->hdac.dev,
+ dev_dbg(&edev->hdev.dev,
"jack report for pcm=%d\n",
pcm->pcm_id);
snd_soc_jack_report(pcm->jack, SND_JACK_AVOUT,
@@ -195,18 +198,18 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
/*
* Get the no devices that can be connected to a port on the Pin widget.
*/
-static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
+static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
{
unsigned int caps;
unsigned int type, param;
- caps = get_wcaps(&hdac->hdac, nid);
+ caps = get_wcaps(&edev->hdev, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL) || (type != AC_WID_PIN))
return 0;
- param = snd_hdac_read_parm_uncached(&hdac->hdac, nid,
+ param = snd_hdac_read_parm_uncached(&edev->hdev, nid,
AC_PAR_DEVLIST_LEN);
if (param == -1)
return param;
@@ -219,10 +222,10 @@ static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
* id selected on the pin. Return 0 means the first port entry
* is selected or MST is not supported.
*/
-static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
- return snd_hdac_codec_read(&hdac->hdac, port->pin->nid,
+ return snd_hdac_codec_read(&edev->hdev, port->pin->nid,
0, AC_VERB_GET_DEVICE_SEL, 0);
}
@@ -230,7 +233,7 @@ static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
* Sets the selected port entry for the configuring Pin widget verb.
* returns error if port set is not equal to port get otherwise success
*/
-static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
int num_ports;
@@ -239,7 +242,7 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
return 0;
/* AC_PAR_DEVLIST_LEN is 0 based. */
- num_ports = hdac_hdmi_get_port_len(hdac, port->pin->nid);
+ num_ports = hdac_hdmi_get_port_len(edev, port->pin->nid);
if (num_ports < 0)
return -EIO;
@@ -250,13 +253,13 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
if (num_ports + 1 < port->id)
return 0;
- snd_hdac_codec_write(&hdac->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_DEVICE_SEL, port->id);
- if (port->id != hdac_hdmi_port_select_get(hdac, port))
+ if (port->id != hdac_hdmi_port_select_get(edev, port))
return -EIO;
- dev_dbg(&hdac->hdac.dev, "Selected the port=%d\n", port->id);
+ dev_dbg(&edev->hdev.dev, "Selected the port=%d\n", port->id);
return 0;
}
@@ -276,9 +279,9 @@ static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
{
- struct hdac_device *hdac = dev_to_hdac_dev(dev);
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
- return to_ehdac_device(hdac);
+ return to_ehdac_device(hdev);
}
static unsigned int sad_format(const u8 *sad)
@@ -321,14 +324,14 @@ format_constraint:
}
static void
-hdac_hdmi_set_dip_index(struct hdac_ext_device *hdac, hda_nid_t pin_nid,
+hdac_hdmi_set_dip_index(struct hdac_ext_device *edev, hda_nid_t pin_nid,
int packet_index, int byte_index)
{
int val;
val = (packet_index << 5) | (byte_index & 0x1f);
- snd_hdac_codec_write(&hdac->hdac, pin_nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin_nid, 0,
AC_VERB_SET_HDMI_DIP_INDEX, val);
}
@@ -344,14 +347,14 @@ struct dp_audio_infoframe {
u8 LFEPBL01_LSV36_DM_INH7;
};
-static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
+static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
struct hdac_hdmi_pcm *pcm, struct hdac_hdmi_port *port)
{
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
struct hdac_hdmi_pin *pin = port->pin;
struct dp_audio_infoframe dp_ai;
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_cvt *cvt = pcm->cvt;
u8 *dip;
int ret;
@@ -360,11 +363,11 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
u8 conn_type;
int channels, ca;
- ca = snd_hdac_channel_allocation(&hdac->hdac, port->eld.info.spk_alloc,
+ ca = snd_hdac_channel_allocation(&edev->hdev, port->eld.info.spk_alloc,
pcm->channels, pcm->chmap_set, true, pcm->chmap);
channels = snd_hdac_get_active_channels(ca);
- hdmi->chmap.ops.set_channel_count(&hdac->hdac, cvt->nid, channels);
+ hdmi->chmap.ops.set_channel_count(&edev->hdev, cvt->nid, channels);
snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
pcm->channels, pcm->chmap, pcm->chmap_set);
@@ -397,32 +400,32 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
break;
default:
- dev_err(&hdac->hdac.dev, "Invalid connection type: %d\n",
+ dev_err(&edev->hdev.dev, "Invalid connection type: %d\n",
conn_type);
return -EIO;
}
/* stop infoframe transmission */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_DISABLE);
/* Fill infoframe. Index auto-incremented */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
if (conn_type == DRM_ELD_CONN_TYPE_HDMI) {
for (i = 0; i < sizeof(buffer); i++)
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, buffer[i]);
} else {
for (i = 0; i < sizeof(dp_ai); i++)
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, dip[i]);
}
/* Start infoframe */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_BEST);
return 0;
@@ -433,11 +436,11 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
int slots, int slot_width)
{
struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+ dev_dbg(&edev->hdev.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
dai_map = &hdmi->dai_map[dai->id];
@@ -452,8 +455,8 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hparams, struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_port *port;
struct hdac_hdmi_pcm *pcm;
@@ -466,7 +469,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return -ENODEV;
if ((!port->eld.monitor_present) || (!port->eld.eld_valid)) {
- dev_err(&hdac->hdac.dev,
+ dev_err(&edev->hdev.dev,
"device is not configured for this pin:port%d:%d\n",
port->pin->nid, port->id);
return -ENODEV;
@@ -486,28 +489,28 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *hdac,
+static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
- if (!(get_wcaps(&hdac->hdac, pin->nid) & AC_WCAP_CONN_LIST)) {
- dev_warn(&hdac->hdac.dev,
+ if (!(get_wcaps(&edev->hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
+ dev_warn(&edev->hdev.dev,
"HDMI: pin %d wcaps %#x does not support connection list\n",
- pin->nid, get_wcaps(&hdac->hdac, pin->nid));
+ pin->nid, get_wcaps(&edev->hdev, pin->nid));
return -EINVAL;
}
- if (hdac_hdmi_port_select_set(hdac, port) < 0)
+ if (hdac_hdmi_port_select_set(edev, port) < 0)
return -EIO;
- port->num_mux_nids = snd_hdac_get_connections(&hdac->hdac, pin->nid,
+ port->num_mux_nids = snd_hdac_get_connections(&edev->hdev, pin->nid,
port->mux_nids, HDA_MAX_CONNECTIONS);
if (port->num_mux_nids == 0)
- dev_warn(&hdac->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"No connections found for pin:port %d:%d\n",
pin->nid, port->id);
- dev_dbg(&hdac->hdac.dev, "num_mux_nids %d for pin:port %d:%d\n",
+ dev_dbg(&edev->hdev.dev, "num_mux_nids %d for pin:port %d:%d\n",
port->num_mux_nids, pin->nid, port->id);
return port->num_mux_nids;
@@ -565,8 +568,8 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
struct hdac_hdmi_port *port;
@@ -575,7 +578,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
dai_map = &hdmi->dai_map[dai->id];
cvt = dai_map->cvt;
- port = hdac_hdmi_get_port_from_cvt(hdac, hdmi, cvt);
+ port = hdac_hdmi_get_port_from_cvt(edev, hdmi, cvt);
/*
* To make PA and other userland happy.
@@ -586,7 +589,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
if ((!port->eld.monitor_present) ||
(!port->eld.eld_valid)) {
- dev_warn(&hdac->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"Failed: present?:%d ELD valid?:%d pin:port: %d:%d\n",
port->eld.monitor_present, port->eld.eld_valid,
port->pin->nid, port->id);
@@ -608,8 +611,8 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
@@ -630,14 +633,13 @@ static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
}
static int
-hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
+hdac_hdmi_query_cvt_params(struct hdac_device *hdev, struct hdac_hdmi_cvt *cvt)
{
unsigned int chans;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
int err;
- chans = get_wcaps(hdac, cvt->nid);
+ chans = get_wcaps(hdev, cvt->nid);
chans = get_wcaps_channels(chans);
cvt->params.channels_min = 2;
@@ -646,12 +648,12 @@ hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
if (chans > hdmi->chmap.channels_max)
hdmi->chmap.channels_max = chans;
- err = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+ err = snd_hdac_query_supported_pcm(hdev, cvt->nid,
&cvt->params.rates,
&cvt->params.formats,
&cvt->params.maxbps);
if (err < 0)
- dev_err(&hdac->dev,
+ dev_err(&hdev->dev,
"Failed to query pcm params for nid %d: %d\n",
cvt->nid, err);
@@ -696,7 +698,7 @@ static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route,
static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm = NULL;
struct hdac_hdmi_port *p;
@@ -716,9 +718,9 @@ static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
hda_nid_t nid, unsigned int pwr_state)
{
- if (get_wcaps(&edev->hdac, nid) & AC_WCAP_POWER) {
- if (!snd_hdac_check_power_state(&edev->hdac, nid, pwr_state))
- snd_hdac_codec_write(&edev->hdac, nid, 0,
+ if (get_wcaps(&edev->hdev, nid) & AC_WCAP_POWER) {
+ if (!snd_hdac_check_power_state(&edev->hdev, nid, pwr_state))
+ snd_hdac_codec_write(&edev->hdev, nid, 0,
AC_VERB_SET_POWER_STATE, pwr_state);
}
}
@@ -726,8 +728,8 @@ static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
static void hdac_hdmi_set_amp(struct hdac_ext_device *edev,
hda_nid_t nid, int val)
{
- if (get_wcaps(&edev->hdac, nid) & AC_WCAP_OUT_AMP)
- snd_hdac_codec_write(&edev->hdac, nid, 0,
+ if (get_wcaps(&edev->hdev, nid) & AC_WCAP_OUT_AMP)
+ snd_hdac_codec_write(&edev->hdev, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, val);
}
@@ -739,7 +741,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
pcm = hdac_hdmi_get_pcm(edev, port);
@@ -755,7 +757,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D0);
/* Enable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_UNMUTE);
@@ -766,7 +768,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_MUTE);
/* Disable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D3);
@@ -782,10 +784,10 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
{
struct hdac_hdmi_cvt *cvt = w->priv;
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, cvt);
@@ -797,23 +799,23 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D0);
/* Enable transmission */
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_1, 1);
/* Category Code (CC) to zero */
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_2, 0);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, pcm->stream_tag);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, pcm->format);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, 0);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, 0);
hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D3);
@@ -831,7 +833,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
int mux_idx;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
if (!kc)
@@ -844,7 +846,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
return -EIO;
if (mux_idx > 0) {
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_CONNECT_SEL, (mux_idx - 1));
}
@@ -864,7 +866,7 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_context *dapm = w->dapm;
struct hdac_hdmi_port *port = w->priv;
struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm = NULL;
const char *cvt_name = e->texts[ucontrol->value.enumerated.item[0]];
@@ -922,7 +924,7 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
struct snd_soc_dapm_widget *widget,
const char *widget_name)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin = port->pin;
struct snd_kcontrol_new *kc;
struct hdac_hdmi_cvt *cvt;
@@ -934,17 +936,17 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
int i = 0;
int num_items = hdmi->num_cvt + 1;
- kc = devm_kzalloc(&edev->hdac.dev, sizeof(*kc), GFP_KERNEL);
+ kc = devm_kzalloc(&edev->hdev.dev, sizeof(*kc), GFP_KERNEL);
if (!kc)
return -ENOMEM;
- se = devm_kzalloc(&edev->hdac.dev, sizeof(*se), GFP_KERNEL);
+ se = devm_kzalloc(&edev->hdev.dev, sizeof(*se), GFP_KERNEL);
if (!se)
return -ENOMEM;
snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
pin->nid, port->id);
- kc->name = devm_kstrdup(&edev->hdac.dev, kc_name, GFP_KERNEL);
+ kc->name = devm_kstrdup(&edev->hdev.dev, kc_name, GFP_KERNEL);
if (!kc->name)
return -ENOMEM;
@@ -962,24 +964,24 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
se->mask = roundup_pow_of_two(se->items) - 1;
sprintf(mux_items, "NONE");
- items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
list_for_each_entry(cvt, &hdmi->cvt_list, head) {
i++;
sprintf(mux_items, "cvt %d", cvt->nid);
- items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
}
- se->texts = devm_kmemdup(&edev->hdac.dev, items,
+ se->texts = devm_kmemdup(&edev->hdev.dev, items,
(num_items * sizeof(char *)), GFP_KERNEL);
if (!se->texts)
return -ENOMEM;
- return hdac_hdmi_fill_widget_info(&edev->hdac.dev, widget,
+ return hdac_hdmi_fill_widget_info(&edev->hdev.dev, widget,
snd_soc_dapm_mux, port, widget_name, NULL, kc, 1,
hdac_hdmi_pin_mux_widget_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_REG);
@@ -990,7 +992,7 @@ static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_ext_device *edev,
struct snd_soc_dapm_widget *widgets,
struct snd_soc_dapm_route *route, int rindex)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
const struct snd_kcontrol_new *kc;
struct soc_enum *se;
int mux_index = hdmi->num_cvt + hdmi->num_ports;
@@ -1033,8 +1035,8 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
- struct snd_soc_dai_driver *dai_drv = dapm->component->dai_drv;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct snd_soc_dai_driver *dai_drv = hdmi->dai_drv;
char widget_name[NAME_SIZE];
struct hdac_hdmi_cvt *cvt;
struct hdac_hdmi_pin *pin;
@@ -1134,7 +1136,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
int dai_id = 0;
@@ -1150,7 +1152,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
dai_id++;
if (dai_id == HDA_MAX_CVTS) {
- dev_warn(&edev->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"Max dais supported: %d\n", dai_id);
break;
}
@@ -1161,7 +1163,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_cvt *cvt;
char name[NAME_SIZE];
@@ -1176,7 +1178,7 @@ static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
list_add_tail(&cvt->head, &hdmi->cvt_list);
hdmi->num_cvt++;
- return hdac_hdmi_query_cvt_params(&edev->hdac, cvt);
+ return hdac_hdmi_query_cvt_params(&edev->hdev, cvt);
}
static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
@@ -1188,7 +1190,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
>> DRM_ELD_VER_SHIFT;
if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
- dev_err(&edev->hdac.dev, "HDMI: Unknown ELD version %d\n", ver);
+ dev_err(&edev->hdev.dev, "HDMI: Unknown ELD version %d\n", ver);
return -EINVAL;
}
@@ -1196,7 +1198,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
if (mnl > ELD_MAX_MNL) {
- dev_err(&edev->hdac.dev, "HDMI: MNL Invalid %d\n", mnl);
+ dev_err(&edev->hdev.dev, "HDMI: MNL Invalid %d\n", mnl);
return -EINVAL;
}
@@ -1209,7 +1211,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
struct hdac_ext_device *edev = pin->edev;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
int size = 0;
int port_id = -1;
@@ -1227,7 +1229,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (pin->mst_capable)
port_id = port->id;
- size = snd_hdac_acomp_get_eld(&edev->hdac, pin->nid, port_id,
+ size = snd_hdac_acomp_get_eld(&edev->hdev, pin->nid, port_id,
&port->eld.monitor_present,
port->eld.eld_buffer,
ELD_MAX_SIZE);
@@ -1250,7 +1252,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (!port->eld.monitor_present || !port->eld.eld_valid) {
- dev_err(&edev->hdac.dev, "%s: disconnect for pin:port %d:%d\n",
+ dev_err(&edev->hdev.dev, "%s: disconnect for pin:port %d:%d\n",
__func__, pin->nid, port->id);
/*
@@ -1304,7 +1306,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin;
int ret;
@@ -1333,40 +1335,38 @@ static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
-static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdev)
{
unsigned int vendor_param;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_GET_VENDOR_VERB, 0);
if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
return;
vendor_param |= INTEL_EN_ALL_PIN_CVTS;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_SET_VENDOR_VERB, vendor_param);
if (vendor_param == -1)
return;
}
-static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdev)
{
unsigned int vendor_param;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_GET_VENDOR_VERB, 0);
if (vendor_param == -1 || vendor_param & INTEL_EN_DP12)
return;
/* enable DP1.2 mode */
vendor_param |= INTEL_EN_DP12;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_SET_VENDOR_VERB, vendor_param);
if (vendor_param == -1)
return;
@@ -1384,7 +1384,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
* Each converter can support a stream independently. So a dai is created
* based on the number of converter queried.
*/
-static int hdac_hdmi_create_dais(struct hdac_device *hdac,
+static int hdac_hdmi_create_dais(struct hdac_device *hdev,
struct snd_soc_dai_driver **dais,
struct hdac_hdmi_priv *hdmi, int num_dais)
{
@@ -1397,20 +1397,20 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
u64 formats;
int ret;
- hdmi_dais = devm_kzalloc(&hdac->dev,
+ hdmi_dais = devm_kzalloc(&hdev->dev,
(sizeof(*hdmi_dais) * num_dais),
GFP_KERNEL);
if (!hdmi_dais)
return -ENOMEM;
list_for_each_entry(cvt, &hdmi->cvt_list, head) {
- ret = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+ ret = snd_hdac_query_supported_pcm(hdev, cvt->nid,
&rates, &formats, &bps);
if (ret)
return ret;
sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
- hdmi_dais[i].name = devm_kstrdup(&hdac->dev,
+ hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
dai_name, GFP_KERNEL);
if (!hdmi_dais[i].name)
@@ -1418,7 +1418,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
snprintf(name, sizeof(name), "hifi%d", i+1);
hdmi_dais[i].playback.stream_name =
- devm_kstrdup(&hdac->dev, name, GFP_KERNEL);
+ devm_kstrdup(&hdev->dev, name, GFP_KERNEL);
if (!hdmi_dais[i].playback.stream_name)
return -ENOMEM;
@@ -1438,6 +1438,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
}
*dais = hdmi_dais;
+ hdmi->dai_drv = hdmi_dais;
return 0;
}
@@ -1451,29 +1452,26 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
{
hda_nid_t nid;
int i, num_nodes;
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
struct hdac_hdmi_pin *temp_pin, *pin_next;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = &edev->hdev;
int ret;
- hdac_hdmi_skl_enable_all_pins(hdac);
- hdac_hdmi_skl_enable_dp12(hdac);
+ hdac_hdmi_skl_enable_all_pins(hdev);
+ hdac_hdmi_skl_enable_dp12(hdev);
- num_nodes = snd_hdac_get_sub_nodes(hdac, hdac->afg, &nid);
+ num_nodes = snd_hdac_get_sub_nodes(hdev, hdev->afg, &nid);
if (!nid || num_nodes <= 0) {
- dev_warn(&hdac->dev, "HDMI: failed to get afg sub nodes\n");
+ dev_warn(&hdev->dev, "HDMI: failed to get afg sub nodes\n");
return -EINVAL;
}
- hdac->num_nodes = num_nodes;
- hdac->start_nid = nid;
-
- for (i = 0; i < hdac->num_nodes; i++, nid++) {
+ for (i = 0; i < num_nodes; i++, nid++) {
unsigned int caps;
unsigned int type;
- caps = get_wcaps(hdac, nid);
+ caps = get_wcaps(hdev, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL))
@@ -1495,16 +1493,14 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
}
}
- hdac->end_nid = nid;
-
if (!hdmi->num_pin || !hdmi->num_cvt) {
ret = -EIO;
goto free_widgets;
}
- ret = hdac_hdmi_create_dais(hdac, dais, hdmi, hdmi->num_cvt);
+ ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt);
if (ret) {
- dev_err(&hdac->dev, "Failed to create dais with err: %d\n",
+ dev_err(&hdev->dev, "Failed to create dais with err: %d\n",
ret);
goto free_widgets;
}
@@ -1537,7 +1533,7 @@ free_widgets:
static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
{
struct hdac_ext_device *edev = aptr;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin = NULL;
struct hdac_hdmi_port *hport = NULL;
struct snd_soc_codec *codec = edev->scodec;
@@ -1546,7 +1542,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
/* Don't know how this mapping is derived */
hda_nid_t pin_nid = port + 0x04;
- dev_dbg(&edev->hdac.dev, "%s: for pin:%d port=%d\n", __func__,
+ dev_dbg(&edev->hdev.dev, "%s: for pin:%d port=%d\n", __func__,
pin_nid, pipe);
/*
@@ -1559,7 +1555,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
SNDRV_CTL_POWER_D0)
return;
- if (atomic_read(&edev->hdac.in_pm))
+ if (atomic_read(&edev->hdev.in_pm))
return;
list_for_each_entry(pin, &hdmi->pin_list, head) {
@@ -1614,7 +1610,7 @@ static int create_fill_jack_kcontrols(struct snd_soc_card *card,
char *name;
int i = 0, j;
struct snd_soc_codec *codec = edev->scodec;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
kc = devm_kcalloc(codec->dev, hdmi->num_ports,
sizeof(*kc), GFP_KERNEL);
@@ -1652,7 +1648,7 @@ int hdac_hdmi_jack_port_init(struct snd_soc_codec *codec,
struct snd_soc_dapm_context *dapm)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin;
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
@@ -1728,7 +1724,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
{
struct snd_soc_codec *codec = dai->codec;
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
struct snd_pcm *snd_pcm;
int err;
@@ -1750,7 +1746,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
if (snd_pcm) {
err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
if (err < 0) {
- dev_err(&edev->hdac.dev,
+ dev_err(&edev->hdev.dev,
"chmap control add failed with err: %d for pcm: %d\n",
err, device);
kfree(pcm);
@@ -1791,7 +1787,7 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
static int hdmi_codec_probe(struct snd_soc_codec *codec)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(&codec->component);
struct hdac_ext_link *hlink = NULL;
@@ -1803,9 +1799,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
* hold the ref while we probe, also no need to drop the ref on
* exit, we call pm_runtime_suspend() so that will do for us
*/
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
if (!hlink) {
- dev_err(&edev->hdac.dev, "hdac link not found\n");
+ dev_err(&edev->hdev.dev, "hdac link not found\n");
return -EIO;
}
@@ -1818,7 +1814,7 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
aops.audio_ptr = edev;
ret = snd_hdac_i915_register_notifier(&aops);
if (ret < 0) {
- dev_err(&edev->hdac.dev, "notifier register failed: err: %d\n",
+ dev_err(&edev->hdev.dev, "notifier register failed: err: %d\n",
ret);
return ret;
}
@@ -1831,9 +1827,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
* hdac_device core already sets the state to active and calls
* get_noresume. So enable runtime and set the device to suspend.
*/
- pm_runtime_enable(&edev->hdac.dev);
- pm_runtime_put(&edev->hdac.dev);
- pm_runtime_suspend(&edev->hdac.dev);
+ pm_runtime_enable(&edev->hdev.dev);
+ pm_runtime_put(&edev->hdev.dev);
+ pm_runtime_suspend(&edev->hdev.dev);
return 0;
}
@@ -1842,7 +1838,7 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- pm_runtime_disable(&edev->hdac.dev);
+ pm_runtime_disable(&edev->hdev.dev);
return 0;
}
@@ -1850,9 +1846,9 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
static int hdmi_codec_prepare(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
+ struct hdac_device *hdev = &edev->hdev;
- pm_runtime_get_sync(&edev->hdac.dev);
+ pm_runtime_get_sync(&edev->hdev.dev);
/*
* Power down afg.
@@ -1861,7 +1857,7 @@ static int hdmi_codec_prepare(struct device *dev)
* is received. So setting power state is ensured without using loop
* to read the state.
*/
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
return 0;
@@ -1870,15 +1866,15 @@ static int hdmi_codec_prepare(struct device *dev)
static void hdmi_codec_complete(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
- struct hdac_device *hdac = &edev->hdac;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = &edev->hdev;
/* Power up afg */
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D0);
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
+ hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+ hdac_hdmi_skl_enable_dp12(&edev->hdev);
/*
* As the ELD notify callback request is not entertained while the
@@ -1888,7 +1884,7 @@ static void hdmi_codec_complete(struct device *dev)
*/
hdac_hdmi_present_sense_all_pins(edev, hdmi, false);
- pm_runtime_put_sync(&edev->hdac.dev);
+ pm_runtime_put_sync(&edev->hdev.dev);
}
#else
#define hdmi_codec_prepare NULL
@@ -1901,21 +1897,20 @@ static const struct snd_soc_codec_driver hdmi_hda_codec = {
.idle_bias_off = true,
};
-static void hdac_hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
unsigned char *chmap)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
memcpy(chmap, pcm->chmap, ARRAY_SIZE(pcm->chmap));
}
-static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
unsigned char *chmap, int prepared)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_ext_device *edev = to_ehdac_device(hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
@@ -1934,10 +1929,9 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
mutex_unlock(&pcm->lock);
}
-static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
+static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdev, int pcm_idx)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
if (!pcm)
@@ -1949,10 +1943,9 @@ static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
return true;
}
-static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
@@ -1983,30 +1976,30 @@ static struct hdac_hdmi_drv_data intel_drv_data = {
static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
{
- struct hdac_device *codec = &edev->hdac;
+ struct hdac_device *hdev = &edev->hdev;
struct hdac_hdmi_priv *hdmi_priv;
struct snd_soc_dai_driver *hdmi_dais = NULL;
struct hdac_ext_link *hlink = NULL;
int num_dais = 0;
int ret = 0;
- struct hdac_driver *hdrv = drv_to_hdac_driver(codec->dev.driver);
- const struct hda_device_id *hdac_id = hdac_get_device_id(codec, hdrv);
+ struct hdac_driver *hdrv = drv_to_hdac_driver(hdev->dev.driver);
+ const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
/* hold the ref while we probe */
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
if (!hlink) {
- dev_err(&edev->hdac.dev, "hdac link not found\n");
+ dev_err(&edev->hdev.dev, "hdac link not found\n");
return -EIO;
}
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
- hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
+ hdmi_priv = devm_kzalloc(&hdev->dev, sizeof(*hdmi_priv), GFP_KERNEL);
if (hdmi_priv == NULL)
return -ENOMEM;
edev->private_data = hdmi_priv;
- snd_hdac_register_chmap_ops(codec, &hdmi_priv->chmap);
+ snd_hdac_register_chmap_ops(hdev, &hdmi_priv->chmap);
hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
@@ -2021,7 +2014,7 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
else
hdmi_priv->drv_data = &intel_drv_data;
- dev_set_drvdata(&codec->dev, edev);
+ dev_set_drvdata(&hdev->dev, edev);
INIT_LIST_HEAD(&hdmi_priv->pin_list);
INIT_LIST_HEAD(&hdmi_priv->cvt_list);
@@ -2032,9 +2025,9 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
* Turned off in the runtime_suspend during the first explicit
* pm_runtime_suspend call.
*/
- ret = snd_hdac_display_power(edev->hdac.bus, true);
+ ret = snd_hdac_display_power(edev->hdev.bus, true);
if (ret < 0) {
- dev_err(&edev->hdac.dev,
+ dev_err(&edev->hdev.dev,
"Cannot turn on display power on i915 err: %d\n",
ret);
return ret;
@@ -2042,13 +2035,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
ret = hdac_hdmi_parse_and_map_nid(edev, &hdmi_dais, &num_dais);
if (ret < 0) {
- dev_err(&codec->dev,
+ dev_err(&hdev->dev,
"Failed in parse and map nid with err: %d\n", ret);
return ret;
}
+ snd_hdac_refresh_widgets(hdev, true);
/* ASoC specific initialization */
- ret = snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
+ ret = snd_soc_register_codec(&hdev->dev, &hdmi_hda_codec,
hdmi_dais, num_dais);
snd_hdac_ext_bus_link_put(edev->ebus, hlink);
@@ -2058,14 +2052,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin, *pin_next;
struct hdac_hdmi_cvt *cvt, *cvt_next;
struct hdac_hdmi_pcm *pcm, *pcm_next;
struct hdac_hdmi_port *port, *port_next;
int i;
- snd_soc_unregister_codec(&edev->hdac.dev);
+ snd_soc_unregister_codec(&edev->hdev.dev);
list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
pcm->cvt = NULL;
@@ -2101,8 +2095,8 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
static int hdac_hdmi_runtime_suspend(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
+ struct hdac_device *hdev = &edev->hdev;
+ struct hdac_bus *bus = hdev->bus;
struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2120,7 +2114,7 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
* is received. So setting power state is ensured without using loop
* to read the state.
*/
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
@@ -2142,8 +2136,8 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
static int hdac_hdmi_runtime_resume(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
+ struct hdac_device *hdev = &edev->hdev;
+ struct hdac_bus *bus = hdev->bus;
struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2168,11 +2162,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
return err;
}
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
+ hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+ hdac_hdmi_skl_enable_dp12(&edev->hdev);
/* Power up afg */
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D0);
return 0;
@@ -2192,6 +2186,8 @@ static const struct hda_device_id hdmi_list[] = {
HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
+ HDA_CODEC_EXT_ENTRY(0x8086280c, 0x100000, "Cannonlake HDMI",
+ &intel_glk_drv_data),
HDA_CODEC_EXT_ENTRY(0x8086280d, 0x100000, "Geminilake HDMI",
&intel_glk_drv_data),
{}
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
new file mode 100644
index 000000000000..31b0864583e8
--- /dev/null
+++ b/sound/soc/codecs/max98373.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/tlv.h>
+#include "max98373.h"
+
+static struct reg_default max98373_reg[] = {
+ {MAX98373_R2000_SW_RESET, 0x00},
+ {MAX98373_R2001_INT_RAW1, 0x00},
+ {MAX98373_R2002_INT_RAW2, 0x00},
+ {MAX98373_R2003_INT_RAW3, 0x00},
+ {MAX98373_R2004_INT_STATE1, 0x00},
+ {MAX98373_R2005_INT_STATE2, 0x00},
+ {MAX98373_R2006_INT_STATE3, 0x00},
+ {MAX98373_R2007_INT_FLAG1, 0x00},
+ {MAX98373_R2008_INT_FLAG2, 0x00},
+ {MAX98373_R2009_INT_FLAG3, 0x00},
+ {MAX98373_R200A_INT_EN1, 0x00},
+ {MAX98373_R200B_INT_EN2, 0x00},
+ {MAX98373_R200C_INT_EN3, 0x00},
+ {MAX98373_R200D_INT_FLAG_CLR1, 0x00},
+ {MAX98373_R200E_INT_FLAG_CLR2, 0x00},
+ {MAX98373_R200F_INT_FLAG_CLR3, 0x00},
+ {MAX98373_R2010_IRQ_CTRL, 0x00},
+ {MAX98373_R2014_THERM_WARN_THRESH, 0x10},
+ {MAX98373_R2015_THERM_SHDN_THRESH, 0x27},
+ {MAX98373_R2016_THERM_HYSTERESIS, 0x01},
+ {MAX98373_R2017_THERM_FOLDBACK_SET, 0xC0},
+ {MAX98373_R2018_THERM_FOLDBACK_EN, 0x00},
+ {MAX98373_R201E_PIN_DRIVE_STRENGTH, 0x55},
+ {MAX98373_R2020_PCM_TX_HIZ_EN_1, 0xFE},
+ {MAX98373_R2021_PCM_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2022_PCM_TX_SRC_1, 0x00},
+ {MAX98373_R2023_PCM_TX_SRC_2, 0x00},
+ {MAX98373_R2024_PCM_DATA_FMT_CFG, 0xC0},
+ {MAX98373_R2025_AUDIO_IF_MODE, 0x00},
+ {MAX98373_R2026_PCM_CLOCK_RATIO, 0x04},
+ {MAX98373_R2027_PCM_SR_SETUP_1, 0x08},
+ {MAX98373_R2028_PCM_SR_SETUP_2, 0x88},
+ {MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1, 0x00},
+ {MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x00},
+ {MAX98373_R202B_PCM_RX_EN, 0x00},
+ {MAX98373_R202C_PCM_TX_EN, 0x00},
+ {MAX98373_R202E_ICC_RX_CH_EN_1, 0x00},
+ {MAX98373_R202F_ICC_RX_CH_EN_2, 0x00},
+ {MAX98373_R2030_ICC_TX_HIZ_EN_1, 0xFF},
+ {MAX98373_R2031_ICC_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2032_ICC_LINK_EN_CFG, 0x30},
+ {MAX98373_R2034_ICC_TX_CNTL, 0x00},
+ {MAX98373_R2035_ICC_TX_EN, 0x00},
+ {MAX98373_R2036_SOUNDWIRE_CTRL, 0x05},
+ {MAX98373_R203D_AMP_DIG_VOL_CTRL, 0x00},
+ {MAX98373_R203E_AMP_PATH_GAIN, 0x08},
+ {MAX98373_R203F_AMP_DSP_CFG, 0x02},
+ {MAX98373_R2040_TONE_GEN_CFG, 0x00},
+ {MAX98373_R2041_AMP_CFG, 0x03},
+ {MAX98373_R2042_AMP_EDGE_RATE_CFG, 0x00},
+ {MAX98373_R2043_AMP_EN, 0x00},
+ {MAX98373_R2046_IV_SENSE_ADC_DSP_CFG, 0x04},
+ {MAX98373_R2047_IV_SENSE_ADC_EN, 0x00},
+ {MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0x00},
+ {MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG, 0x00},
+ {MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG, 0x00},
+ {MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0x00},
+ {MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0x00},
+ {MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0x00},
+ {MAX98373_R2090_BDE_LVL_HOLD, 0x00},
+ {MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0x00},
+ {MAX98373_R2092_BDE_CLIPPER_MODE, 0x00},
+ {MAX98373_R2097_BDE_L1_THRESH, 0x00},
+ {MAX98373_R2098_BDE_L2_THRESH, 0x00},
+ {MAX98373_R2099_BDE_L3_THRESH, 0x00},
+ {MAX98373_R209A_BDE_L4_THRESH, 0x00},
+ {MAX98373_R209B_BDE_THRESH_HYST, 0x00},
+ {MAX98373_R20A8_BDE_L1_CFG_1, 0x00},
+ {MAX98373_R20A9_BDE_L1_CFG_2, 0x00},
+ {MAX98373_R20AA_BDE_L1_CFG_3, 0x00},
+ {MAX98373_R20AB_BDE_L2_CFG_1, 0x00},
+ {MAX98373_R20AC_BDE_L2_CFG_2, 0x00},
+ {MAX98373_R20AD_BDE_L2_CFG_3, 0x00},
+ {MAX98373_R20AE_BDE_L3_CFG_1, 0x00},
+ {MAX98373_R20AF_BDE_L3_CFG_2, 0x00},
+ {MAX98373_R20B0_BDE_L3_CFG_3, 0x00},
+ {MAX98373_R20B1_BDE_L4_CFG_1, 0x00},
+ {MAX98373_R20B2_BDE_L4_CFG_2, 0x00},
+ {MAX98373_R20B3_BDE_L4_CFG_3, 0x00},
+ {MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE, 0x00},
+ {MAX98373_R20B5_BDE_EN, 0x00},
+ {MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0x00},
+ {MAX98373_R20D1_DHT_CFG, 0x01},
+ {MAX98373_R20D2_DHT_ATTACK_CFG, 0x02},
+ {MAX98373_R20D3_DHT_RELEASE_CFG, 0x03},
+ {MAX98373_R20D4_DHT_EN, 0x00},
+ {MAX98373_R20E0_LIMITER_THRESH_CFG, 0x00},
+ {MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0x00},
+ {MAX98373_R20E2_LIMITER_EN, 0x00},
+ {MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG, 0x00},
+ {MAX98373_R20FF_GLOBAL_SHDN, 0x00},
+ {MAX98373_R21FF_REV_ID, 0x42},
+};
+
+static int max98373_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int format = 0;
+ unsigned int invert = 0;
+
+ dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert = MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE;
+ break;
+ default:
+ dev_err(codec->dev, "DAI invert mode unsupported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE,
+ invert);
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ format = MAX98373_PCM_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ format = MAX98373_PCM_FORMAT_LJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ format = MAX98373_PCM_FORMAT_TDM_MODE1;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format = MAX98373_PCM_FORMAT_TDM_MODE0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_FORMAT_MASK,
+ format << MAX98373_PCM_MODE_CFG_FORMAT_SHIFT);
+
+ return 0;
+}
+
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+ 32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+};
+
+static int max98373_get_bclk_sel(int bclk)
+{
+ int i;
+ /* match BCLKs per LRCLK */
+ for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+ if (bclk_sel_table[i] == bclk)
+ return i + 2;
+ }
+ return 0;
+}
+
+static int max98373_set_clock(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ /* BCLK/LRCLK ratio calculation */
+ int blr_clk_ratio = params_channels(params) * max98373->ch_size;
+ int value;
+
+ if (!max98373->tdm_mode) {
+ /* BCLK configuration */
+ value = max98373_get_bclk_sel(blr_clk_ratio);
+ if (!value) {
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ value);
+ }
+ return 0;
+}
+
+static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int sampling_rate = 0;
+ unsigned int chan_sz = 0;
+
+ /* pcm mode configuration */
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ goto err;
+ }
+
+ max98373->ch_size = snd_pcm_format_width(params_format(params));
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ dev_dbg(codec->dev, "format supported %d",
+ params_format(params));
+
+ /* sampling rate configuration */
+ switch (params_rate(params)) {
+ case 8000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_8000;
+ break;
+ case 11025:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_11025;
+ break;
+ case 12000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_12000;
+ break;
+ case 16000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_16000;
+ break;
+ case 22050:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_22050;
+ break;
+ case 24000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_24000;
+ break;
+ case 32000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_32000;
+ break;
+ case 44100:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_44100;
+ break;
+ case 48000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
+ break;
+ default:
+ dev_err(codec->dev, "rate %d not supported\n",
+ params_rate(params));
+ goto err;
+ }
+
+ /* set DAI_SR to correct LRCLK frequency */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2027_PCM_SR_SETUP_1,
+ MAX98373_PCM_SR_SET1_SR_MASK,
+ sampling_rate);
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_SR_MASK,
+ sampling_rate << MAX98373_PCM_SR_SET2_SR_SHIFT);
+
+ /* set sampling rate of IV */
+ if (max98373->interleave_mode &&
+ sampling_rate > MAX98373_PCM_SR_SET1_SR_16000)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate - 3);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate);
+
+ return max98373_set_clock(codec, params);
+err:
+ return -EINVAL;
+}
+
+static int max98373_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ int bsel = 0;
+ unsigned int chan_sz = 0;
+ unsigned int mask;
+ int x, slot_found;
+
+ if (!tx_mask && !rx_mask && !slots && !slot_width)
+ max98373->tdm_mode = false;
+ else
+ max98373->tdm_mode = true;
+
+ /* BCLK configuration */
+ bsel = max98373_get_bclk_sel(slots * slot_width);
+ if (bsel == 0) {
+ dev_err(codec->dev, "BCLK %d not supported\n",
+ slots * slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
+
+ /* Channel size configuration */
+ switch (slot_width) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ /* Rx slot configuration */
+ slot_found = 0;
+ mask = rx_mask;
+ for (x = 0 ; x < 16 ; x++, mask >>= 1) {
+ if (mask & 0x1) {
+ if (slot_found == 0)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_CH0_SRC_MASK, x);
+ else
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ x);
+ slot_found++;
+ if (slot_found > 1)
+ break;
+ }
+ }
+
+ /* Tx slot Hi-Z configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ ~tx_mask & 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ (~tx_mask & 0xFF00) >> 8);
+
+ return 0;
+}
+
+#define MAX98373_RATES SNDRV_PCM_RATE_8000_96000
+
+#define MAX98373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98373_dai_ops = {
+ .set_fmt = max98373_dai_set_fmt,
+ .hw_params = max98373_dai_hw_params,
+ .set_tdm_slot = max98373_dai_tdm_slot,
+};
+
+static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 0);
+ max98373->tdm_mode = 0;
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static const char * const max98373_switch_text[] = {
+ "Left", "Right", "LeftRight"};
+
+static const struct soc_enum dai_sel_enum =
+ SOC_ENUM_SINGLE(MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
+ 3, max98373_switch_text);
+
+static const struct snd_kcontrol_new max98373_dai_controls =
+ SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_kcontrol_new max98373_vi_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R202C_PCM_TX_EN, 0, 1, 0);
+
+static const struct snd_kcontrol_new max98373_spkfb_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R2043_AMP_EN, 1, 1, 0);
+
+static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
+SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+ MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+ &max98373_dai_controls),
+SND_SOC_DAPM_OUTPUT("BE_OUT"),
+SND_SOC_DAPM_AIF_OUT("Voltage Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 0, 0),
+SND_SOC_DAPM_AIF_OUT("Current Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 1, 0),
+SND_SOC_DAPM_AIF_OUT("Speaker FB Sense", "HiFi Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_SWITCH("VI Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_vi_control),
+SND_SOC_DAPM_SWITCH("SpkFB Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_spkfb_control),
+SND_SOC_DAPM_SIGGEN("VMON"),
+SND_SOC_DAPM_SIGGEN("IMON"),
+SND_SOC_DAPM_SIGGEN("FBMON"),
+};
+
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
+ 0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
+ 9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_spkgain_max_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_step_size_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(25, 25, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(100, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+ 2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
+ 8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
+ 10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
+ 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
+ 0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
+ 0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+);
+
+static bool max98373_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
+ case MAX98373_R2010_IRQ_CTRL:
+ case MAX98373_R2014_THERM_WARN_THRESH
+ ... MAX98373_R2018_THERM_FOLDBACK_EN:
+ case MAX98373_R201E_PIN_DRIVE_STRENGTH
+ ... MAX98373_R2036_SOUNDWIRE_CTRL:
+ case MAX98373_R203D_AMP_DIG_VOL_CTRL ... MAX98373_R2043_AMP_EN:
+ case MAX98373_R2046_IV_SENSE_ADC_DSP_CFG
+ ... MAX98373_R2047_IV_SENSE_ADC_EN:
+ case MAX98373_R2051_MEAS_ADC_SAMPLING_RATE
+ ... MAX98373_R2056_MEAS_ADC_PVDD_CH_EN:
+ case MAX98373_R2090_BDE_LVL_HOLD ... MAX98373_R2092_BDE_CLIPPER_MODE:
+ case MAX98373_R2097_BDE_L1_THRESH
+ ... MAX98373_R209B_BDE_THRESH_HYST:
+ case MAX98373_R20A8_BDE_L1_CFG_1 ... MAX98373_R20B3_BDE_L4_CFG_3:
+ case MAX98373_R20B5_BDE_EN ... MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R20D1_DHT_CFG ... MAX98373_R20D4_DHT_EN:
+ case MAX98373_R20E0_LIMITER_THRESH_CFG ... MAX98373_R20E2_LIMITER_EN:
+ case MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG
+ ... MAX98373_R20FF_GLOBAL_SHDN:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+};
+
+static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+ case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+ case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const char * const max98373_output_voltage_lvl_text[] = {
+ "5.43V", "6.09V", "6.83V", "7.67V", "8.60V",
+ "9.65V", "10.83V", "12.15V", "13.63V", "15.29V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_out_volt_enum,
+ MAX98373_R203E_AMP_PATH_GAIN, 0,
+ max98373_output_voltage_lvl_text);
+
+static const char * const max98373_dht_attack_rate_text[] = {
+ "17.5us", "35us", "70us", "140us",
+ "280us", "560us", "1120us", "2240us"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_attack_rate_enum,
+ MAX98373_R20D2_DHT_ATTACK_CFG, 0,
+ max98373_dht_attack_rate_text);
+
+static const char * const max98373_dht_release_rate_text[] = {
+ "45ms", "225ms", "450ms", "1150ms",
+ "2250ms", "3100ms", "4500ms", "6750ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_release_rate_enum,
+ MAX98373_R20D3_DHT_RELEASE_CFG, 0,
+ max98373_dht_release_rate_text);
+
+static const char * const max98373_limiter_attack_rate_text[] = {
+ "10us", "20us", "40us", "80us",
+ "160us", "320us", "640us", "1.28ms",
+ "2.56ms", "5.12ms", "10.24ms", "20.48ms",
+ "40.96ms", "81.92ms", "16.384ms", "32.768ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_attack_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 4,
+ max98373_limiter_attack_rate_text);
+
+static const char * const max98373_limiter_release_rate_text[] = {
+ "40us", "80us", "160us", "320us",
+ "640us", "1.28ms", "2.56ms", "5.120ms",
+ "10.24ms", "20.48ms", "40.96ms", "81.92ms",
+ "163.84ms", "327.68ms", "655.36ms", "1310.72ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_release_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0,
+ max98373_limiter_release_rate_text);
+
+static const char * const max98373_ADC_samplerate_text[] = {
+ "333kHz", "192kHz", "64kHz", "48kHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
+ MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
+ max98373_ADC_samplerate_text);
+
+static const struct snd_kcontrol_new max98373_snd_controls[] = {
+SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Volume Location Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Up Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Down Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+SOC_SINGLE("CLK Monitor Switch", MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG,
+ MAX98373_CLOCK_MON_SHIFT, 1, 0),
+SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DITH_SHIFT, 1, 0),
+SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0, 0x7F, 0, max98373_digital_tlv),
+SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
+SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_FS_GAIN_MAX_SHIFT, 9, 0, max98373_spkgain_max_tlv),
+SOC_ENUM("Output Voltage", max98373_out_volt_enum),
+/* Dynamic Headroom Tracking */
+SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
+ MAX98373_DHT_EN_SHIFT, 1, 0),
+SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
+SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
+ MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
+ MAX98373_DHT_RELEASE_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_ENUM("DHT Attack Rate", max98373_dht_attack_rate_enum),
+SOC_ENUM("DHT Release Rate", max98373_dht_release_rate_enum),
+/* ADC configuration */
+SOC_SINGLE("ADC PVDD CH Switch", MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0, 1, 0),
+SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ 0, 0x3, 0),
+SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ 0, 0x3, 0),
+SOC_ENUM("ADC SampleRate", max98373_adc_samplerate_enum),
+/* Brownout Detection Engine */
+SOC_SINGLE("BDE Switch", MAX98373_R20B5_BDE_EN, MAX98373_BDE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Mute Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_MUTE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Hold Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_HOLD_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
+SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
+SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
+SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
+SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
+SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+/* Limiter */
+SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
+ MAX98373_LIMITER_EN_SHIFT, 1, 0),
+SOC_SINGLE("Limiter Src Switch", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SRC_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Limiter Thresh Volume", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SHIFT, 15, 0, max98373_limiter_thresh_tlv),
+SOC_ENUM("Limiter Attack Rate", max98373_limiter_attack_rate_enum),
+SOC_ENUM("Limiter Release Rate", max98373_limiter_release_rate_enum),
+};
+
+static const struct snd_soc_dapm_route max98373_audio_map[] = {
+ /* Plabyack */
+ {"DAI Sel Mux", "Left", "Amp Enable"},
+ {"DAI Sel Mux", "Right", "Amp Enable"},
+ {"DAI Sel Mux", "LeftRight", "Amp Enable"},
+ {"BE_OUT", NULL, "DAI Sel Mux"},
+ /* Capture */
+ { "VI Sense", "Switch", "VMON" },
+ { "VI Sense", "Switch", "IMON" },
+ { "SpkFB Sense", "Switch", "FBMON" },
+ { "Voltage Sense", NULL, "VI Sense" },
+ { "Current Sense", NULL, "VI Sense" },
+ { "Speaker FB Sense", NULL, "SpkFB Sense" },
+};
+
+static struct snd_soc_dai_driver max98373_dai[] = {
+ {
+ .name = "max98373-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .ops = &max98373_dai_ops,
+ }
+};
+
+static int max98373_probe(struct snd_soc_codec *codec)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ codec->control_data = max98373->regmap;
+
+ /* Software Reset */
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+
+ /* IV default slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 0xFF);
+ /* L/R mix configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ 0x80);
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ 0x1);
+ /* Set inital volume (0dB) */
+ regmap_write(max98373->regmap,
+ MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0x00);
+ regmap_write(max98373->regmap,
+ MAX98373_R203E_AMP_PATH_GAIN,
+ 0x00);
+ /* Enable DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R203F_AMP_DSP_CFG,
+ 0x3);
+ /* Enable IMON VMON DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R2046_IV_SENSE_ADC_DSP_CFG,
+ 0x7);
+ /* voltage, current slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2022_PCM_TX_SRC_1,
+ (max98373->i_slot << MAX98373_PCM_TX_CH_SRC_A_I_SHIFT |
+ max98373->v_slot) & 0xFF);
+ if (max98373->v_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->v_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->v_slot - 8), 0);
+
+ if (max98373->i_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->i_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->i_slot - 8), 0);
+
+ /* speaker feedback slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2023_PCM_TX_SRC_2,
+ max98373->spkfb_slot & 0xFF);
+
+ /* Set interleave mode */
+ if (max98373->interleave_mode)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK);
+
+ /* Speaker enable */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2043_AMP_EN,
+ MAX98373_SPK_EN_MASK, 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98373_suspend(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regcache_cache_only(max98373->regmap, true);
+ regcache_mark_dirty(max98373->regmap);
+ return 0;
+}
+static int max98373_resume(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ regcache_cache_only(max98373->regmap, false);
+ regcache_sync(max98373->regmap);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98373_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(max98373_suspend, max98373_resume)
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98373 = {
+ .probe = max98373_probe,
+ .component_driver = {
+ .controls = max98373_snd_controls,
+ .num_controls = ARRAY_SIZE(max98373_snd_controls),
+ .dapm_widgets = max98373_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
+ .dapm_routes = max98373_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
+ },
+};
+
+static const struct regmap_config max98373_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98373_R21FF_REV_ID,
+ .reg_defaults = max98373_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98373_reg),
+ .readable_reg = max98373_readable_register,
+ .volatile_reg = max98373_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void max98373_slot_config(struct i2c_client *i2c,
+ struct max98373_priv *max98373)
+{
+ int value;
+ struct device *dev = &i2c->dev;
+
+ if (!device_property_read_u32(dev, "maxim,vmon-slot-no", &value))
+ max98373->v_slot = value & 0xF;
+ else
+ max98373->v_slot = 0;
+
+ if (!device_property_read_u32(dev, "maxim,imon-slot-no", &value))
+ max98373->i_slot = value & 0xF;
+ else
+ max98373->i_slot = 1;
+
+ if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
+ max98373->spkfb_slot = value & 0xF;
+ else
+ max98373->spkfb_slot = 2;
+}
+
+static int max98373_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+
+ int ret = 0;
+ int reg = 0;
+ struct max98373_priv *max98373 = NULL;
+
+ max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
+
+ if (!max98373) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ i2c_set_clientdata(i2c, max98373);
+
+ /* update interleave mode info */
+ if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
+ max98373->interleave_mode = 1;
+ else
+ max98373->interleave_mode = 0;
+
+
+ /* regmap initialization */
+ max98373->regmap
+ = devm_regmap_init_i2c(i2c, &max98373_regmap);
+ if (IS_ERR(max98373->regmap)) {
+ ret = PTR_ERR(max98373->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Check Revision ID */
+ ret = regmap_read(max98373->regmap,
+ MAX98373_R21FF_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c->dev,
+ "Failed to read: 0x%02X\n", MAX98373_R21FF_REV_ID);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MAX98373 revisionID: 0x%02X\n", reg);
+
+ /* voltage/current slot configuration */
+ max98373_slot_config(i2c, max98373);
+
+ /* codec registeration */
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98373,
+ max98373_dai, ARRAY_SIZE(max98373_dai));
+ if (ret < 0)
+ dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+
+ return ret;
+}
+
+static int max98373_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98373_i2c_id[] = {
+ { "max98373", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98373_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98373_of_match[] = {
+ { .compatible = "maxim,max98373", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98373_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98373_acpi_match[] = {
+ { "MX98373", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, max98373_acpi_match);
+#endif
+
+static struct i2c_driver max98373_i2c_driver = {
+ .driver = {
+ .name = "max98373",
+ .of_match_table = of_match_ptr(max98373_of_match),
+ .acpi_match_table = ACPI_PTR(max98373_acpi_match),
+ .pm = &max98373_pm,
+ },
+ .probe = max98373_i2c_probe,
+ .remove = max98373_i2c_remove,
+ .id_table = max98373_i2c_id,
+};
+
+module_i2c_driver(max98373_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98373 driver");
+MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
new file mode 100644
index 000000000000..d0b359d0cf8c
--- /dev/null
+++ b/sound/soc/codecs/max98373.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+#ifndef _MAX98373_H
+#define _MAX98373_H
+
+#define MAX98373_R2000_SW_RESET 0x2000
+#define MAX98373_R2001_INT_RAW1 0x2001
+#define MAX98373_R2002_INT_RAW2 0x2002
+#define MAX98373_R2003_INT_RAW3 0x2003
+#define MAX98373_R2004_INT_STATE1 0x2004
+#define MAX98373_R2005_INT_STATE2 0x2005
+#define MAX98373_R2006_INT_STATE3 0x2006
+#define MAX98373_R2007_INT_FLAG1 0x2007
+#define MAX98373_R2008_INT_FLAG2 0x2008
+#define MAX98373_R2009_INT_FLAG3 0x2009
+#define MAX98373_R200A_INT_EN1 0x200A
+#define MAX98373_R200B_INT_EN2 0x200B
+#define MAX98373_R200C_INT_EN3 0x200C
+#define MAX98373_R200D_INT_FLAG_CLR1 0x200D
+#define MAX98373_R200E_INT_FLAG_CLR2 0x200E
+#define MAX98373_R200F_INT_FLAG_CLR3 0x200F
+#define MAX98373_R2010_IRQ_CTRL 0x2010
+#define MAX98373_R2014_THERM_WARN_THRESH 0x2014
+#define MAX98373_R2015_THERM_SHDN_THRESH 0x2015
+#define MAX98373_R2016_THERM_HYSTERESIS 0x2016
+#define MAX98373_R2017_THERM_FOLDBACK_SET 0x2017
+#define MAX98373_R2018_THERM_FOLDBACK_EN 0x2018
+#define MAX98373_R201E_PIN_DRIVE_STRENGTH 0x201E
+#define MAX98373_R2020_PCM_TX_HIZ_EN_1 0x2020
+#define MAX98373_R2021_PCM_TX_HIZ_EN_2 0x2021
+#define MAX98373_R2022_PCM_TX_SRC_1 0x2022
+#define MAX98373_R2023_PCM_TX_SRC_2 0x2023
+#define MAX98373_R2024_PCM_DATA_FMT_CFG 0x2024
+#define MAX98373_R2025_AUDIO_IF_MODE 0x2025
+#define MAX98373_R2026_PCM_CLOCK_RATIO 0x2026
+#define MAX98373_R2027_PCM_SR_SETUP_1 0x2027
+#define MAX98373_R2028_PCM_SR_SETUP_2 0x2028
+#define MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 0x2029
+#define MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2 0x202A
+#define MAX98373_R202B_PCM_RX_EN 0x202B
+#define MAX98373_R202C_PCM_TX_EN 0x202C
+#define MAX98373_R202E_ICC_RX_CH_EN_1 0x202E
+#define MAX98373_R202F_ICC_RX_CH_EN_2 0x202F
+#define MAX98373_R2030_ICC_TX_HIZ_EN_1 0x2030
+#define MAX98373_R2031_ICC_TX_HIZ_EN_2 0x2031
+#define MAX98373_R2032_ICC_LINK_EN_CFG 0x2032
+#define MAX98373_R2034_ICC_TX_CNTL 0x2034
+#define MAX98373_R2035_ICC_TX_EN 0x2035
+#define MAX98373_R2036_SOUNDWIRE_CTRL 0x2036
+#define MAX98373_R203D_AMP_DIG_VOL_CTRL 0x203D
+#define MAX98373_R203E_AMP_PATH_GAIN 0x203E
+#define MAX98373_R203F_AMP_DSP_CFG 0x203F
+#define MAX98373_R2040_TONE_GEN_CFG 0x2040
+#define MAX98373_R2041_AMP_CFG 0x2041
+#define MAX98373_R2042_AMP_EDGE_RATE_CFG 0x2042
+#define MAX98373_R2043_AMP_EN 0x2043
+#define MAX98373_R2046_IV_SENSE_ADC_DSP_CFG 0x2046
+#define MAX98373_R2047_IV_SENSE_ADC_EN 0x2047
+#define MAX98373_R2051_MEAS_ADC_SAMPLING_RATE 0x2051
+#define MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG 0x2052
+#define MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG 0x2053
+#define MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK 0x2054
+#define MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK 0x2055
+#define MAX98373_R2056_MEAS_ADC_PVDD_CH_EN 0x2056
+#define MAX98373_R2090_BDE_LVL_HOLD 0x2090
+#define MAX98373_R2091_BDE_GAIN_ATK_REL_RATE 0x2091
+#define MAX98373_R2092_BDE_CLIPPER_MODE 0x2092
+#define MAX98373_R2097_BDE_L1_THRESH 0x2097
+#define MAX98373_R2098_BDE_L2_THRESH 0x2098
+#define MAX98373_R2099_BDE_L3_THRESH 0x2099
+#define MAX98373_R209A_BDE_L4_THRESH 0x209A
+#define MAX98373_R209B_BDE_THRESH_HYST 0x209B
+#define MAX98373_R20A8_BDE_L1_CFG_1 0x20A8
+#define MAX98373_R20A9_BDE_L1_CFG_2 0x20A9
+#define MAX98373_R20AA_BDE_L1_CFG_3 0x20AA
+#define MAX98373_R20AB_BDE_L2_CFG_1 0x20AB
+#define MAX98373_R20AC_BDE_L2_CFG_2 0x20AC
+#define MAX98373_R20AD_BDE_L2_CFG_3 0x20AD
+#define MAX98373_R20AE_BDE_L3_CFG_1 0x20AE
+#define MAX98373_R20AF_BDE_L3_CFG_2 0x20AF
+#define MAX98373_R20B0_BDE_L3_CFG_3 0x20B0
+#define MAX98373_R20B1_BDE_L4_CFG_1 0x20B1
+#define MAX98373_R20B2_BDE_L4_CFG_2 0x20B2
+#define MAX98373_R20B3_BDE_L4_CFG_3 0x20B3
+#define MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE 0x20B4
+#define MAX98373_R20B5_BDE_EN 0x20B5
+#define MAX98373_R20B6_BDE_CUR_STATE_READBACK 0x20B6
+#define MAX98373_R20D1_DHT_CFG 0x20D1
+#define MAX98373_R20D2_DHT_ATTACK_CFG 0x20D2
+#define MAX98373_R20D3_DHT_RELEASE_CFG 0x20D3
+#define MAX98373_R20D4_DHT_EN 0x20D4
+#define MAX98373_R20E0_LIMITER_THRESH_CFG 0x20E0
+#define MAX98373_R20E1_LIMITER_ATK_REL_RATES 0x20E1
+#define MAX98373_R20E2_LIMITER_EN 0x20E2
+#define MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG 0x20FE
+#define MAX98373_R20FF_GLOBAL_SHDN 0x20FF
+#define MAX98373_R21FF_REV_ID 0x21FF
+
+/* MAX98373_R2022_PCM_TX_SRC_1 */
+#define MAX98373_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98373_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98373_R2024_PCM_DATA_FMT_CFG */
+#define MAX98373_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98373_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98373_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98373_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98373_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98373_R2026_PCM_CLOCK_RATIO */
+#define MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 4)
+#define MAX98373_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* MAX98373_R2027_PCM_SR_SETUP_1 */
+#define MAX98373_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98373_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98373_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98373_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98373_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98373_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98373_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* MAX98373_R2028_PCM_SR_SETUP_2 */
+#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
+#define MAX98373_PCM_SR_SET2_SR_SHIFT (4)
+#define MAX98373_PCM_SR_SET2_IVADC_SR_MASK (0xF << 0)
+
+/* MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98373_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+
+/* MAX98373_R203E_AMP_PATH_GAIN */
+#define MAX98373_SPK_DIGI_GAIN_MASK (0xF << 4)
+#define MAX98373_SPK_DIGI_GAIN_SHIFT (4)
+#define MAX98373_FS_GAIN_MAX_MASK (0xF << 0)
+#define MAX98373_FS_GAIN_MAX_SHIFT (0)
+
+/* MAX98373_R203F_AMP_DSP_CFG */
+#define MAX98373_AMP_DSP_CFG_DCBLK_SHIFT (0)
+#define MAX98373_AMP_DSP_CFG_DITH_SHIFT (1)
+#define MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT (2)
+#define MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT (3)
+#define MAX98373_AMP_DSP_CFG_DAC_INV_SHIFT (5)
+#define MAX98373_AMP_VOL_SEL_SHIFT (7)
+
+/* MAX98373_R2043_AMP_EN */
+#define MAX98373_SPKFB_EN_MASK (0x1 << 1)
+#define MAX98373_SPK_EN_MASK (0x1 << 0)
+#define MAX98373_SPKFB_EN_SHIFT (1)
+
+/*MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG */
+#define MAX98373_FLT_EN_SHIFT (4)
+
+/* MAX98373_R20B2_BDE_L4_CFG_2 */
+#define MAX98373_LVL4_MUTE_EN_SHIFT (7)
+#define MAX98373_LVL4_HOLD_EN_SHIFT (6)
+
+/* MAX98373_R20B5_BDE_EN */
+#define MAX98373_BDE_EN_SHIFT (0)
+
+/* MAX98373_R20D1_DHT_CFG */
+#define MAX98373_DHT_SPK_GAIN_MIN_SHIFT (4)
+#define MAX98373_DHT_ROT_PNT_SHIFT (0)
+
+/* MAX98373_R20D2_DHT_ATTACK_CFG */
+#define MAX98373_DHT_ATTACK_STEP_SHIFT (3)
+#define MAX98373_DHT_ATTACK_RATE_SHIFT (0)
+
+/* MAX98373_R20D3_DHT_RELEASE_CFG */
+#define MAX98373_DHT_RELEASE_STEP_SHIFT (3)
+#define MAX98373_DHT_RELEASE_RATE_SHIFT (0)
+
+/* MAX98373_R20D4_DHT_EN */
+#define MAX98373_DHT_EN_SHIFT (0)
+
+/* MAX98373_R20E0_LIMITER_THRESH_CFG */
+#define MAX98373_LIMITER_THRESH_SHIFT (2)
+#define MAX98373_LIMITER_THRESH_SRC_SHIFT (0)
+
+/* MAX98373_R20E2_LIMITER_EN */
+#define MAX98373_LIMITER_EN_SHIFT (0)
+
+/* MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG */
+#define MAX98373_CLOCK_MON_SHIFT (0)
+
+/* MAX98373_R20FF_GLOBAL_SHDN */
+#define MAX98373_GLOBAL_EN_MASK (0x1 << 0)
+
+/* MAX98373_R2000_SW_RESET */
+#define MAX98373_SOFT_RESET (0x1 << 0)
+
+struct max98373_priv {
+ struct regmap *regmap;
+ unsigned int v_slot;
+ unsigned int i_slot;
+ unsigned int spkfb_slot;
+ bool interleave_mode;
+ unsigned int ch_size;
+ bool tdm_mode;
+};
+#endif
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
index 03d07bf4d942..7b1d1b0fa879 100644
--- a/sound/soc/codecs/max98926.c
+++ b/sound/soc/codecs/max98926.c
@@ -490,7 +490,7 @@ static int max98926_probe(struct snd_soc_codec *codec)
struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
max98926->codec = codec;
- codec->control_data = max98926->regmap;
+
/* Hi-Z all the slots */
regmap_write(max98926->regmap, MAX98926_DOUT_HIZ_CFG4, 0xF0);
return 0;
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index a1d39353719d..f701fdc81175 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -682,7 +682,6 @@ static int max98927_probe(struct snd_soc_codec *codec)
struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
max98927->codec = codec;
- codec->control_data = max98927->regmap;
/* Software Reset */
regmap_write(max98927->regmap,
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 4fd8d1dc4eef..be7a45f05bbf 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -610,6 +610,9 @@ static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+ snd_soc_codec_init_regmap(codec,
+ dev_get_regmap(codec->dev->parent, NULL));
+
/* these are the reset values */
mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX1, 0x00d35A);
@@ -728,15 +731,9 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
}
};
-static struct regmap *mc13783_get_regmap(struct device *dev)
-{
- return dev_get_regmap(dev->parent, NULL);
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
.probe = mc13783_probe,
.remove = mc13783_remove,
- .get_regmap = mc13783_get_regmap,
.component_driver = {
.controls = mc13783_control_list,
.num_controls = ARRAY_SIZE(mc13783_control_list),
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 066ea2f4ce7b..44062bb7bf2f 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -712,6 +712,8 @@ static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
return err;
}
+ snd_soc_codec_init_regmap(codec,
+ dev_get_regmap(codec->dev->parent, NULL));
snd_soc_codec_set_drvdata(codec, priv);
priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
@@ -943,11 +945,6 @@ static int pm8916_wcd_analog_set_jack(struct snd_soc_codec *codec,
return 0;
}
-static struct regmap *pm8916_get_regmap(struct device *dev)
-{
- return dev_get_regmap(dev->parent, NULL);
-}
-
static irqreturn_t mbhc_btn_release_irq_handler(int irq, void *arg)
{
struct pm8916_wcd_analog_priv *priv = arg;
@@ -1082,7 +1079,6 @@ static const struct snd_soc_codec_driver pm8916_wcd_analog = {
.probe = pm8916_wcd_analog_probe,
.remove = pm8916_wcd_analog_remove,
.set_jack = pm8916_wcd_analog_set_jack,
- .get_regmap = pm8916_get_regmap,
.component_driver = {
.controls = pm8916_wcd_analog_snd_controls,
.num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index f9c9933acffb..b08fb7e243c3 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -233,6 +233,41 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new digital_ch1_mux =
SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum);
+static int adc_power_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ msleep(300);
+ /* DO12 and DO34 pad output enable */
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, 0);
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, 0);
+ } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
+ }
+ return 0;
+}
+
+static int aiftx_power_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0001);
+ regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0000);
+ }
+ return 0;
+}
+
static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0),
@@ -247,14 +282,18 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0),
SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0),
- SND_SOC_DAPM_ADC("ADC1", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 0, 0),
- SND_SOC_DAPM_ADC("ADC2", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 1, 0),
- SND_SOC_DAPM_ADC("ADC3", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 2, 0),
- SND_SOC_DAPM_ADC("ADC4", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 3, 0),
+ SND_SOC_DAPM_ADC_E("ADC1", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 0, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 1, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC3", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 2, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC4", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 3, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0),
@@ -270,7 +309,8 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_MUX("Digital CH1 Mux",
SND_SOC_NOPM, 0, 0, &digital_ch1_mux),
- SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT_E("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0,
+ aiftx_power_control, SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
@@ -575,7 +615,8 @@ static void nau8540_fll_apply(struct regmap *regmap,
NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK,
NAU8540_CLK_SRC_MCLK | fll_param->mclk_src);
regmap_update_bits(regmap, NAU8540_REG_FLL1,
- NAU8540_FLL_RATIO_MASK, fll_param->ratio);
+ NAU8540_FLL_RATIO_MASK | NAU8540_ICTRL_LATCH_MASK,
+ fll_param->ratio | (0x6 << NAU8540_ICTRL_LATCH_SFT));
/* FLL 16-bit fractional input */
regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac);
/* FLL 10-bit integer input */
@@ -596,13 +637,14 @@ static void nau8540_fll_apply(struct regmap *regmap,
NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
NAU8540_FLL_FTR_SW_FILTER);
regmap_update_bits(regmap, NAU8540_REG_FLL6,
- NAU8540_SDM_EN, NAU8540_SDM_EN);
+ NAU8540_SDM_EN | NAU8540_CUTOFF500,
+ NAU8540_SDM_EN | NAU8540_CUTOFF500);
} else {
regmap_update_bits(regmap, NAU8540_REG_FLL5,
NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU);
- regmap_update_bits(regmap,
- NAU8540_REG_FLL6, NAU8540_SDM_EN, 0);
+ regmap_update_bits(regmap, NAU8540_REG_FLL6,
+ NAU8540_SDM_EN | NAU8540_CUTOFF500, 0);
}
}
@@ -617,17 +659,22 @@ static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
switch (pll_id) {
case NAU8540_CLK_FLL_MCLK:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_MCLK | 0);
break;
case NAU8540_CLK_FLL_BLK:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_BLK |
+ (0xf << NAU8540_GAIN_ERR_SFT));
break;
case NAU8540_CLK_FLL_FS:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_FS |
+ (0xf << NAU8540_GAIN_ERR_SFT));
break;
default:
@@ -710,9 +757,24 @@ static void nau8540_init_regs(struct nau8540 *nau8540)
regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN,
NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN);
- /* ADC OSR selection, CLK_ADC = Fs * OSR */
+ /* ADC OSR selection, CLK_ADC = Fs * OSR;
+ * Channel time alignment enable.
+ */
regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE,
- NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64);
+ NAU8540_CH_SYNC | NAU8540_ADC_OSR_MASK,
+ NAU8540_CH_SYNC | NAU8540_ADC_OSR_64);
+ /* PGA input mode selection */
+ regmap_update_bits(regmap, NAU8540_REG_FEPGA1,
+ NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT,
+ NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT);
+ regmap_update_bits(regmap, NAU8540_REG_FEPGA2,
+ NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT,
+ NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT);
+ /* DO12 and DO34 pad output disable */
+ regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+ regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
}
static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/nau8540.h b/sound/soc/codecs/nau8540.h
index 5db5b224944d..732b490edf81 100644
--- a/sound/soc/codecs/nau8540.h
+++ b/sound/soc/codecs/nau8540.h
@@ -100,9 +100,13 @@
#define NAU8540_CLK_MCLK_SRC_MASK 0xf
/* FLL1 (0x04) */
+#define NAU8540_ICTRL_LATCH_SFT 10
+#define NAU8540_ICTRL_LATCH_MASK (0x7 << NAU8540_ICTRL_LATCH_SFT)
#define NAU8540_FLL_RATIO_MASK 0x7f
/* FLL3 (0x06) */
+#define NAU8540_GAIN_ERR_SFT 12
+#define NAU8540_GAIN_ERR_MASK (0xf << NAU8540_GAIN_ERR_SFT)
#define NAU8540_FLL_CLK_SRC_SFT 10
#define NAU8540_FLL_CLK_SRC_MASK (0x3 << NAU8540_FLL_CLK_SRC_SFT)
#define NAU8540_FLL_CLK_SRC_MCLK (0 << NAU8540_FLL_CLK_SRC_SFT)
@@ -127,6 +131,7 @@
/* FLL6 (0x9) */
#define NAU8540_DCO_EN (0x1 << 15)
#define NAU8540_SDM_EN (0x1 << 14)
+#define NAU8540_CUTOFF500 (0x1 << 13)
/* PCM_CTRL0 (0x10) */
#define NAU8540_I2S_BP_SFT 7
@@ -146,6 +151,7 @@
#define NAU8540_I2S_DF_PCM_AB 0x3
/* PCM_CTRL1 (0x11) */
+#define NAU8540_I2S_DO12_TRI (0x1 << 15)
#define NAU8540_I2S_LRC_DIV_SFT 12
#define NAU8540_I2S_LRC_DIV_MASK (0x3 << NAU8540_I2S_LRC_DIV_SFT)
#define NAU8540_I2S_DO12_OE (0x1 << 4)
@@ -156,6 +162,7 @@
#define NAU8540_I2S_BLK_DIV_MASK 0x7
/* PCM_CTRL1 (0x12) */
+#define NAU8540_I2S_DO34_TRI (0x1 << 15)
#define NAU8540_I2S_DO34_OE (0x1 << 11)
#define NAU8540_I2S_TSLOT_L_MASK 0x3ff
@@ -165,6 +172,7 @@
#define NAU8540_TDM_TX_MASK 0xf
/* ADC_SAMPLE_RATE (0x3A) */
+#define NAU8540_CH_SYNC (0x1 << 14)
#define NAU8540_ADC_OSR_MASK 0x3
#define NAU8540_ADC_OSR_256 0x3
#define NAU8540_ADC_OSR_128 0x2
@@ -183,6 +191,18 @@
#define NAU8540_PRECHARGE_DIS (0x1 << 13)
#define NAU8540_GLOBAL_BIAS_EN (0x1 << 12)
+/* FEPGA1 (0x69) */
+#define NAU8540_FEPGA1_MODCH2_SHT_SFT 7
+#define NAU8540_FEPGA1_MODCH2_SHT (0x1 << NAU8540_FEPGA1_MODCH2_SHT_SFT)
+#define NAU8540_FEPGA1_MODCH1_SHT_SFT 3
+#define NAU8540_FEPGA1_MODCH1_SHT (0x1 << NAU8540_FEPGA1_MODCH1_SHT_SFT)
+
+/* FEPGA2 (0x6A) */
+#define NAU8540_FEPGA2_MODCH4_SHT_SFT 7
+#define NAU8540_FEPGA2_MODCH4_SHT (0x1 << NAU8540_FEPGA2_MODCH4_SHT_SFT)
+#define NAU8540_FEPGA2_MODCH3_SHT_SFT 3
+#define NAU8540_FEPGA2_MODCH3_SHT (0x1 << NAU8540_FEPGA2_MODCH3_SHT_SFT)
+
/* System Clock Source */
enum {
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 0240759f951c..088e0cef4cb8 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -43,7 +43,7 @@ static bool nau8824_is_jack_inserted(struct nau8824 *nau8824);
/* the parameter threshold of FLL */
#define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 124000000
+#define NAU_FVCO_MAX 100000000
#define NAU_FVCO_MIN 90000000
/* scaling for mclk from sysclk_src output */
@@ -811,7 +811,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
/* Close clock for jack type detection at manual mode */
- nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
+ if (dapm->bias_level < SND_SOC_BIAS_PREPARE)
+ nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
}
static void nau8824_jdet_work(struct work_struct *work)
@@ -843,6 +844,11 @@ static void nau8824_jdet_work(struct work_struct *work)
event_mask |= SND_JACK_HEADSET;
snd_soc_jack_report(nau8824->jack, event, event_mask);
+ /* Enable short key press and release interruption. */
+ regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
+ NAU8824_IRQ_KEY_RELEASE_DIS |
+ NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+
nau8824_sema_release(nau8824);
}
@@ -850,15 +856,15 @@ static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
{
struct regmap *regmap = nau8824->regmap;
- /* Enable jack ejection, short key press and release interruption. */
+ /* Enable jack ejection interruption. */
regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
NAU8824_IRQ_EJECT_EN);
regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
- NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_KEY_RELEASE_DIS |
- NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+ NAU8824_IRQ_EJECT_DIS, 0);
/* Enable internal VCO needed for interruptions */
- nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
+ if (nau8824->dapm->bias_level < SND_SOC_BIAS_PREPARE)
+ nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
NAU8824_JD_SLEEP_MODE, 0);
}
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index e853a6dfd33b..a1b697b6fb64 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -194,10 +194,10 @@ static const struct reg_default nau8825_reg_defaults[] = {
/* register backup table when cross talk detection */
static struct reg_default nau8825_xtalk_baktab[] = {
- { NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+ { NAU8825_REG_ADC_DGAIN_CTRL, 0x00cf },
{ NAU8825_REG_HSVOL_CTRL, 0 },
- { NAU8825_REG_DACL_CTRL, 0 },
- { NAU8825_REG_DACR_CTRL, 0 },
+ { NAU8825_REG_DACL_CTRL, 0x00cf },
+ { NAU8825_REG_DACR_CTRL, 0x02cf },
};
static const unsigned short logtable[256] = {
@@ -245,13 +245,14 @@ static const unsigned short logtable[256] = {
* tasks are allowed to acquire the semaphore, calling this function will
* put the task to sleep. If the semaphore is not released within the
* specified number of jiffies, this function returns.
- * Acquires the semaphore without jiffies. If no more tasks are allowed
- * to acquire the semaphore, calling this function will put the task to
- * sleep until the semaphore is released.
* If the semaphore is not released within the specified number of jiffies,
- * this function returns -ETIME.
- * If the sleep is interrupted by a signal, this function will return -EINTR.
- * It returns 0 if the semaphore was acquired successfully.
+ * this function returns -ETIME. If the sleep is interrupted by a signal,
+ * this function will return -EINTR. It returns 0 if the semaphore was
+ * acquired successfully.
+ *
+ * Acquires the semaphore without jiffies. Try to acquire the semaphore
+ * atomically. Returns 0 if the semaphore has been acquired successfully
+ * or 1 if it it cannot be acquired.
*/
static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
{
@@ -262,8 +263,8 @@ static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
if (ret < 0)
dev_warn(nau8825->dev, "Acquire semaphore timeout\n");
} else {
- ret = down_interruptible(&nau8825->xtalk_sem);
- if (ret < 0)
+ ret = down_trylock(&nau8825->xtalk_sem);
+ if (ret)
dev_warn(nau8825->dev, "Acquire semaphore fail\n");
}
@@ -454,22 +455,32 @@ static void nau8825_xtalk_backup(struct nau8825 *nau8825)
{
int i;
+ if (nau8825->xtalk_baktab_initialized)
+ return;
+
/* Backup some register values to backup table */
for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
&nau8825_xtalk_baktab[i].def);
+
+ nau8825->xtalk_baktab_initialized = true;
}
-static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+static void nau8825_xtalk_restore(struct nau8825 *nau8825, bool cause_cancel)
{
int i, volume;
+ if (!nau8825->xtalk_baktab_initialized)
+ return;
+
/* Restore register values from backup table; When the driver restores
- * the headphone volumem, it needs recover to original level gradually
- * with 3dB per step for less pop noise.
+ * the headphone volume in XTALK_DONE state, it needs recover to
+ * original level gradually with 3dB per step for less pop noise.
+ * Otherwise, the restore should do ASAP.
*/
for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
- if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+ if (!cause_cancel && nau8825_xtalk_baktab[i].reg ==
+ NAU8825_REG_HSVOL_CTRL) {
/* Ramping up the volume change to reduce pop noise */
volume = nau8825_xtalk_baktab[i].def &
NAU8825_HPR_VOL_MASK;
@@ -479,6 +490,8 @@ static void nau8825_xtalk_restore(struct nau8825 *nau8825)
regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
nau8825_xtalk_baktab[i].def);
}
+
+ nau8825->xtalk_baktab_initialized = false;
}
static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
@@ -644,7 +657,7 @@ static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
}
-static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+static void nau8825_xtalk_clean(struct nau8825 *nau8825, bool cause_cancel)
{
/* Enable internal VCO needed for interruptions */
nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
@@ -660,7 +673,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
/* Restore value of specific register for cross talk */
- nau8825_xtalk_restore(nau8825);
+ nau8825_xtalk_restore(nau8825, cause_cancel);
}
static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
@@ -779,7 +792,7 @@ static void nau8825_xtalk_measure(struct nau8825 *nau8825)
dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
(sidetone << 8) | sidetone);
- nau8825_xtalk_clean(nau8825);
+ nau8825_xtalk_clean(nau8825, false);
nau8825->xtalk_state = NAU8825_XTALK_DONE;
break;
default:
@@ -815,13 +828,14 @@ static void nau8825_xtalk_work(struct work_struct *work)
static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
{
- /* If the xtalk_protect is true, that means the process is still
- * on going. The driver forces to cancel the cross talk task and
+ /* If the crosstalk is eanbled and the process is on going,
+ * the driver forces to cancel the crosstalk task and
* restores the configuration to original status.
*/
- if (nau8825->xtalk_protect) {
+ if (nau8825->xtalk_enable && nau8825->xtalk_state !=
+ NAU8825_XTALK_DONE) {
cancel_work_sync(&nau8825->xtalk_work);
- nau8825_xtalk_clean(nau8825);
+ nau8825_xtalk_clean(nau8825, true);
}
/* Reset parameters for cross talk suppression function */
nau8825_sema_reset(nau8825);
@@ -1246,8 +1260,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
osr &= NAU8825_DAC_OVERSAMPLE_MASK;
if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr))
+ params_rate(params), osr)) {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_DAC_SRC_MASK,
osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
@@ -1255,8 +1271,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
osr &= NAU8825_ADC_SYNC_DOWN_MASK;
if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr))
+ params_rate(params), osr)) {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_ADC_SRC_MASK,
osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
@@ -1273,8 +1291,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
bclk_div = 1;
else if (bclk_fs <= 128)
bclk_div = 0;
- else
+ else {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1294,6 +1314,7 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
val_len |= NAU8825_I2S_DL_32;
break;
default:
+ nau8825_sema_release(nau8825);
return -EINVAL;
}
@@ -1312,8 +1333,6 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
- nau8825_sema_acquire(nau8825, 3 * HZ);
-
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1355,6 +1374,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return -EINVAL;
}
+ nau8825_sema_acquire(nau8825, 3 * HZ);
+
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
NAU8825_I2S_DL_MASK | NAU8825_I2S_DF_MASK |
NAU8825_I2S_BP_MASK | NAU8825_I2S_PCMB_MASK,
@@ -1687,7 +1708,7 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
if (nau8825_is_jack_inserted(regmap)) {
event |= nau8825_jack_insert(nau8825);
- if (!nau8825->xtalk_bypass && !nau8825->high_imped) {
+ if (nau8825->xtalk_enable && !nau8825->high_imped) {
/* Apply the cross talk suppression in the
* headset without high impedance.
*/
@@ -1701,12 +1722,15 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
int ret;
nau8825->xtalk_protect = true;
ret = nau8825_sema_acquire(nau8825, 0);
- if (ret < 0)
+ if (ret)
nau8825->xtalk_protect = false;
}
/* Startup cross talk detection process */
- nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
- schedule_work(&nau8825->xtalk_work);
+ if (nau8825->xtalk_protect) {
+ nau8825->xtalk_state =
+ NAU8825_XTALK_PREPARE;
+ schedule_work(&nau8825->xtalk_work);
+ }
} else {
/* The cross talk suppression shouldn't apply
* in the headset with high impedance. Thus,
@@ -1733,7 +1757,9 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
nau8825->xtalk_event_mask = event_mask;
}
} else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
- schedule_work(&nau8825->xtalk_work);
+ /* crosstalk detection enable and process on going */
+ if (nau8825->xtalk_enable && nau8825->xtalk_protect)
+ schedule_work(&nau8825->xtalk_work);
clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
} else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
NAU8825_JACK_INSERTION_DETECTED) {
@@ -2382,7 +2408,7 @@ static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
regcache_sync(nau8825->regmap);
nau8825->xtalk_protect = true;
ret = nau8825_sema_acquire(nau8825, 0);
- if (ret < 0)
+ if (ret)
nau8825->xtalk_protect = false;
enable_irq(nau8825->irq);
@@ -2441,8 +2467,8 @@ static void nau8825_print_device_properties(struct nau8825 *nau8825)
nau8825->jack_insert_debounce);
dev_dbg(dev, "jack-eject-debounce: %d\n",
nau8825->jack_eject_debounce);
- dev_dbg(dev, "crosstalk-bypass: %d\n",
- nau8825->xtalk_bypass);
+ dev_dbg(dev, "crosstalk-enable: %d\n",
+ nau8825->xtalk_enable);
}
static int nau8825_read_device_properties(struct device *dev,
@@ -2507,8 +2533,8 @@ static int nau8825_read_device_properties(struct device *dev,
&nau8825->jack_eject_debounce);
if (ret)
nau8825->jack_eject_debounce = 0;
- nau8825->xtalk_bypass = device_property_read_bool(dev,
- "nuvoton,crosstalk-bypass");
+ nau8825->xtalk_enable = device_property_read_bool(dev,
+ "nuvoton,crosstalk-enable");
nau8825->mclk = devm_clk_get(dev, "mclk");
if (PTR_ERR(nau8825->mclk) == -EPROBE_DEFER) {
@@ -2569,6 +2595,7 @@ static int nau8825_i2c_probe(struct i2c_client *i2c,
*/
nau8825->xtalk_state = NAU8825_XTALK_DONE;
nau8825->xtalk_protect = false;
+ nau8825->xtalk_baktab_initialized = false;
sema_init(&nau8825->xtalk_sem, 1);
INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 8aee5c8647ae..f7e732125882 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -476,7 +476,8 @@ struct nau8825 {
int xtalk_event_mask;
bool xtalk_protect;
int imp_rms[NAU8825_XTALK_IMM];
- int xtalk_bypass;
+ int xtalk_enable;
+ bool xtalk_baktab_initialized; /* True if initialized. */
};
int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
new file mode 100644
index 000000000000..543621232d60
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - I2C
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+ { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+ { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+ { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+ { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+ int irq = i2c->irq;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &pcm186x_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcm186x_probe(&i2c->dev, type, irq, regmap);
+}
+
+static int pcm186x_i2c_remove(struct i2c_client *i2c)
+{
+ pcm186x_remove(&i2c->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id pcm186x_i2c_id[] = {
+ { "pcm1862", PCM1862 },
+ { "pcm1863", PCM1863 },
+ { "pcm1864", PCM1864 },
+ { "pcm1865", PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
+
+static struct i2c_driver pcm186x_i2c_driver = {
+ .probe = pcm186x_i2c_probe,
+ .remove = pcm186x_i2c_remove,
+ .id_table = pcm186x_i2c_id,
+ .driver = {
+ .name = "pcm186x",
+ .of_match_table = pcm186x_of_match,
+ },
+};
+module_i2c_driver(pcm186x_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x-spi.c b/sound/soc/codecs/pcm186x-spi.c
new file mode 100644
index 000000000000..2366f8e4d4d4
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-spi.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - SPI
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+ { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+ { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+ { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+ { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_spi_probe(struct spi_device *spi)
+{
+ const enum pcm186x_type type =
+ (enum pcm186x_type)spi_get_device_id(spi)->driver_data;
+ int irq = spi->irq;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &pcm186x_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcm186x_probe(&spi->dev, type, irq, regmap);
+}
+
+static int pcm186x_spi_remove(struct spi_device *spi)
+{
+ pcm186x_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id pcm186x_spi_id[] = {
+ { "pcm1862", PCM1862 },
+ { "pcm1863", PCM1863 },
+ { "pcm1864", PCM1864 },
+ { "pcm1865", PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, pcm186x_spi_id);
+
+static struct spi_driver pcm186x_spi_driver = {
+ .probe = pcm186x_spi_probe,
+ .remove = pcm186x_spi_remove,
+ .id_table = pcm186x_spi_id,
+ .driver = {
+ .name = "pcm186x",
+ .of_match_table = pcm186x_of_match,
+ },
+};
+module_spi_driver(pcm186x_spi_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
new file mode 100644
index 000000000000..cdb51427facc
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "pcm186x.h"
+
+static const char * const pcm186x_supply_names[] = {
+ "avdd", /* Analog power supply. Connect to 3.3-V supply. */
+ "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+ "iovdd", /* I/O power supply. Connect to 3.3-V or 1.8-V. */
+};
+#define PCM186x_NUM_SUPPLIES ARRAY_SIZE(pcm186x_supply_names)
+
+struct pcm186x_priv {
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[PCM186x_NUM_SUPPLIES];
+ unsigned int sysclk;
+ unsigned int tdm_offset;
+ bool is_tdm_mode;
+ bool is_master_mode;
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
+
+static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
+ SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
+ PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+};
+
+static const struct snd_kcontrol_new pcm1865_snd_controls[] = {
+ SOC_DOUBLE_R_S_TLV("ADC1 Capture Volume", PCM186X_PGA_VAL_CH1_L,
+ PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+ SOC_DOUBLE_R_S_TLV("ADC2 Capture Volume", PCM186X_PGA_VAL_CH2_L,
+ PCM186X_PGA_VAL_CH2_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+};
+
+static const unsigned int pcm186x_adc_input_channel_sel_value[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x20, 0x30
+};
+
+static const char * const pcm186x_adcl_input_channel_sel_text[] = {
+ "No Select",
+ "VINL1[SE]", /* Default for ADC1L */
+ "VINL2[SE]", /* Default for ADC2L */
+ "VINL2[SE] + VINL1[SE]",
+ "VINL3[SE]",
+ "VINL3[SE] + VINL1[SE]",
+ "VINL3[SE] + VINL2[SE]",
+ "VINL3[SE] + VINL2[SE] + VINL1[SE]",
+ "VINL4[SE]",
+ "VINL4[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL2[SE]",
+ "VINL4[SE] + VINL2[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL3[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL2[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL2[SE] + VINL1[SE]",
+ "{VIN1P, VIN1M}[DIFF]",
+ "{VIN4P, VIN4M}[DIFF]",
+ "{VIN1P, VIN1M}[DIFF] + {VIN4P, VIN4M}[DIFF]"
+};
+
+static const char * const pcm186x_adcr_input_channel_sel_text[] = {
+ "No Select",
+ "VINR1[SE]", /* Default for ADC1R */
+ "VINR2[SE]", /* Default for ADC2R */
+ "VINR2[SE] + VINR1[SE]",
+ "VINR3[SE]",
+ "VINR3[SE] + VINR1[SE]",
+ "VINR3[SE] + VINR2[SE]",
+ "VINR3[SE] + VINR2[SE] + VINR1[SE]",
+ "VINR4[SE]",
+ "VINR4[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR2[SE]",
+ "VINR4[SE] + VINR2[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR3[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR2[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR2[SE] + VINR1[SE]",
+ "{VIN2P, VIN2M}[DIFF]",
+ "{VIN3P, VIN3M}[DIFF]",
+ "{VIN2P, VIN2M}[DIFF] + {VIN3P, VIN3M}[DIFF]"
+};
+
+static const struct soc_enum pcm186x_adc_input_channel_sel[] = {
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_L, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+ pcm186x_adcl_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_R, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+ pcm186x_adcr_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_L, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+ pcm186x_adcl_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_R, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+ pcm186x_adcr_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+};
+
+static const struct snd_kcontrol_new pcm186x_adc_mux_controls[] = {
+ SOC_DAPM_ENUM("ADC1 Left Input", pcm186x_adc_input_channel_sel[0]),
+ SOC_DAPM_ENUM("ADC1 Right Input", pcm186x_adc_input_channel_sel[1]),
+ SOC_DAPM_ENUM("ADC2 Left Input", pcm186x_adc_input_channel_sel[2]),
+ SOC_DAPM_ENUM("ADC2 Right Input", pcm186x_adc_input_channel_sel[3]),
+};
+
+static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("VINL1"),
+ SND_SOC_DAPM_INPUT("VINR1"),
+ SND_SOC_DAPM_INPUT("VINL2"),
+ SND_SOC_DAPM_INPUT("VINR2"),
+ SND_SOC_DAPM_INPUT("VINL3"),
+ SND_SOC_DAPM_INPUT("VINR3"),
+ SND_SOC_DAPM_INPUT("VINL4"),
+ SND_SOC_DAPM_INPUT("VINR4"),
+
+ SND_SOC_DAPM_MUX("ADC Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[0]),
+ SND_SOC_DAPM_MUX("ADC Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[1]),
+
+ /*
+ * Put the codec into SLEEP mode when not in use, allowing the
+ * Energysense mechanism to operate.
+ */
+ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("VINL1"),
+ SND_SOC_DAPM_INPUT("VINR1"),
+ SND_SOC_DAPM_INPUT("VINL2"),
+ SND_SOC_DAPM_INPUT("VINR2"),
+ SND_SOC_DAPM_INPUT("VINL3"),
+ SND_SOC_DAPM_INPUT("VINR3"),
+ SND_SOC_DAPM_INPUT("VINL4"),
+ SND_SOC_DAPM_INPUT("VINR4"),
+
+ SND_SOC_DAPM_MUX("ADC1 Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[0]),
+ SND_SOC_DAPM_MUX("ADC1 Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[1]),
+ SND_SOC_DAPM_MUX("ADC2 Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[2]),
+ SND_SOC_DAPM_MUX("ADC2 Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[3]),
+
+ /*
+ * Put the codec into SLEEP mode when not in use, allowing the
+ * Energysense mechanism to operate.
+ */
+ SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
+ SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
+};
+
+static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
+ { "ADC Left Capture Source", NULL, "VINL1" },
+ { "ADC Left Capture Source", NULL, "VINR1" },
+ { "ADC Left Capture Source", NULL, "VINL2" },
+ { "ADC Left Capture Source", NULL, "VINR2" },
+ { "ADC Left Capture Source", NULL, "VINL3" },
+ { "ADC Left Capture Source", NULL, "VINR3" },
+ { "ADC Left Capture Source", NULL, "VINL4" },
+ { "ADC Left Capture Source", NULL, "VINR4" },
+
+ { "ADC", NULL, "ADC Left Capture Source" },
+
+ { "ADC Right Capture Source", NULL, "VINL1" },
+ { "ADC Right Capture Source", NULL, "VINR1" },
+ { "ADC Right Capture Source", NULL, "VINL2" },
+ { "ADC Right Capture Source", NULL, "VINR2" },
+ { "ADC Right Capture Source", NULL, "VINL3" },
+ { "ADC Right Capture Source", NULL, "VINR3" },
+ { "ADC Right Capture Source", NULL, "VINL4" },
+ { "ADC Right Capture Source", NULL, "VINR4" },
+
+ { "ADC", NULL, "ADC Right Capture Source" },
+};
+
+static const struct snd_soc_dapm_route pcm1865_dapm_routes[] = {
+ { "ADC1 Left Capture Source", NULL, "VINL1" },
+ { "ADC1 Left Capture Source", NULL, "VINR1" },
+ { "ADC1 Left Capture Source", NULL, "VINL2" },
+ { "ADC1 Left Capture Source", NULL, "VINR2" },
+ { "ADC1 Left Capture Source", NULL, "VINL3" },
+ { "ADC1 Left Capture Source", NULL, "VINR3" },
+ { "ADC1 Left Capture Source", NULL, "VINL4" },
+ { "ADC1 Left Capture Source", NULL, "VINR4" },
+
+ { "ADC1", NULL, "ADC1 Left Capture Source" },
+
+ { "ADC1 Right Capture Source", NULL, "VINL1" },
+ { "ADC1 Right Capture Source", NULL, "VINR1" },
+ { "ADC1 Right Capture Source", NULL, "VINL2" },
+ { "ADC1 Right Capture Source", NULL, "VINR2" },
+ { "ADC1 Right Capture Source", NULL, "VINL3" },
+ { "ADC1 Right Capture Source", NULL, "VINR3" },
+ { "ADC1 Right Capture Source", NULL, "VINL4" },
+ { "ADC1 Right Capture Source", NULL, "VINR4" },
+
+ { "ADC1", NULL, "ADC1 Right Capture Source" },
+
+ { "ADC2 Left Capture Source", NULL, "VINL1" },
+ { "ADC2 Left Capture Source", NULL, "VINR1" },
+ { "ADC2 Left Capture Source", NULL, "VINL2" },
+ { "ADC2 Left Capture Source", NULL, "VINR2" },
+ { "ADC2 Left Capture Source", NULL, "VINL3" },
+ { "ADC2 Left Capture Source", NULL, "VINR3" },
+ { "ADC2 Left Capture Source", NULL, "VINL4" },
+ { "ADC2 Left Capture Source", NULL, "VINR4" },
+
+ { "ADC2", NULL, "ADC2 Left Capture Source" },
+
+ { "ADC2 Right Capture Source", NULL, "VINL1" },
+ { "ADC2 Right Capture Source", NULL, "VINR1" },
+ { "ADC2 Right Capture Source", NULL, "VINL2" },
+ { "ADC2 Right Capture Source", NULL, "VINR2" },
+ { "ADC2 Right Capture Source", NULL, "VINL3" },
+ { "ADC2 Right Capture Source", NULL, "VINR3" },
+ { "ADC2 Right Capture Source", NULL, "VINL4" },
+ { "ADC2 Right Capture Source", NULL, "VINR4" },
+
+ { "ADC2", NULL, "ADC2 Right Capture Source" },
+};
+
+static int pcm186x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate = params_rate(params);
+ unsigned int format = params_format(params);
+ unsigned int width = params_width(params);
+ unsigned int channels = params_channels(params);
+ unsigned int div_lrck;
+ unsigned int div_bck;
+ u8 tdm_tx_sel = 0;
+ u8 pcm_cfg = 0;
+
+ dev_dbg(codec->dev, "%s() rate=%u format=0x%x width=%u channels=%u\n",
+ __func__, rate, format, width, channels);
+
+ switch (width) {
+ case 16:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_16 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_16 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 20:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_20 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_20 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 24:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_24 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_24 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 32:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_32 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_32 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_RX_WLEN_MASK |
+ PCM186X_PCM_CFG_TX_WLEN_MASK,
+ pcm_cfg);
+
+ div_lrck = width * channels;
+
+ if (priv->is_tdm_mode) {
+ /* Select TDM transmission data */
+ switch (channels) {
+ case 2:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_2CH;
+ break;
+ case 4:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_4CH;
+ break;
+ case 6:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_6CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_TDM_TX_SEL,
+ PCM186X_TDM_TX_SEL_MASK, tdm_tx_sel);
+
+ /* In DSP/TDM mode, the LRCLK divider must be 256 */
+ div_lrck = 256;
+
+ /* Configure 1/256 duty cycle for LRCK */
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_TDM_LRCK_MODE,
+ PCM186X_PCM_CFG_TDM_LRCK_MODE);
+ }
+
+ /* Only configure clock dividers in master mode. */
+ if (priv->is_master_mode) {
+ div_bck = priv->sysclk / (div_lrck * rate);
+
+ dev_dbg(codec->dev,
+ "%s() master_clk=%u div_bck=%u div_lrck=%u\n",
+ __func__, priv->sysclk, div_bck, div_lrck);
+
+ snd_soc_write(codec, PCM186X_BCK_DIV, div_bck - 1);
+ snd_soc_write(codec, PCM186X_LRK_DIV, div_lrck - 1);
+ }
+
+ return 0;
+}
+
+static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ u8 clk_ctrl = 0;
+ u8 pcm_cfg = 0;
+
+ dev_dbg(codec->dev, "%s() format=0x%x\n", __func__, format);
+
+ /* set master/slave audio interface */
+ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ if (!priv->sysclk) {
+ dev_err(codec->dev, "operating in master mode requires sysclock to be configured\n");
+ return -EINVAL;
+ }
+ clk_ctrl |= PCM186X_CLK_CTRL_MST_MODE;
+ priv->is_master_mode = true;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ priv->is_master_mode = false;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* set interface polarity */
+ switch (format & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ dev_err(codec->dev, "Inverted DAI clocks not supported\n");
+ return -EINVAL;
+ }
+
+ /* set interface format */
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ pcm_cfg = PCM186X_PCM_CFG_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ pcm_cfg = PCM186X_PCM_CFG_FMT_LEFTJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ priv->tdm_offset += 1;
+ /* Fall through... DSP_A uses the same basic config as DSP_B
+ * except we need to shift the TDM output by one BCK cycle
+ */
+ case SND_SOC_DAIFMT_DSP_B:
+ priv->is_tdm_mode = true;
+ pcm_cfg = PCM186X_PCM_CFG_FMT_TDM;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_CLK_CTRL,
+ PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
+
+ snd_soc_write(codec, PCM186X_TDM_TX_OFFSET, priv->tdm_offset);
+
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
+
+ return 0;
+}
+
+static int pcm186x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int first_slot, last_slot, tdm_offset;
+
+ dev_dbg(codec->dev,
+ "%s() tx_mask=0x%x rx_mask=0x%x slots=%d slot_width=%d\n",
+ __func__, tx_mask, rx_mask, slots, slot_width);
+
+ if (!tx_mask) {
+ dev_err(codec->dev, "tdm tx mask must not be 0\n");
+ return -EINVAL;
+ }
+
+ first_slot = __ffs(tx_mask);
+ last_slot = __fls(tx_mask);
+
+ if (last_slot - first_slot != hweight32(tx_mask) - 1) {
+ dev_err(codec->dev, "tdm tx mask must be contiguous\n");
+ return -EINVAL;
+ }
+
+ tdm_offset = first_slot * slot_width;
+
+ if (tdm_offset > 255) {
+ dev_err(codec->dev, "tdm tx slot selection out of bounds\n");
+ return -EINVAL;
+ }
+
+ priv->tdm_offset = tdm_offset;
+
+ return 0;
+}
+
+static int pcm186x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s() clk_id=%d freq=%u dir=%d\n",
+ __func__, clk_id, freq, dir);
+
+ priv->sysclk = freq;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm186x_dai_ops = {
+ .set_sysclk = pcm186x_set_dai_sysclk,
+ .set_tdm_slot = pcm186x_set_tdm_slot,
+ .set_fmt = pcm186x_set_fmt,
+ .hw_params = pcm186x_hw_params,
+};
+
+static struct snd_soc_dai_driver pcm1863_dai = {
+ .name = "pcm1863-aif",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = PCM186X_RATES,
+ .formats = PCM186X_FORMATS,
+ },
+ .ops = &pcm186x_dai_ops,
+};
+
+static struct snd_soc_dai_driver pcm1865_dai = {
+ .name = "pcm1865-aif",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = PCM186X_RATES,
+ .formats = PCM186X_FORMATS,
+ },
+ .ops = &pcm186x_dai_ops,
+};
+
+static int pcm186x_power_on(struct snd_soc_codec *codec)
+{
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(priv->regmap, false);
+ ret = regcache_sync(priv->regmap);
+ if (ret) {
+ dev_err(codec->dev, "Failed to restore cache\n");
+ regcache_cache_only(priv->regmap, true);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ return ret;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+ PCM186X_PWR_CTRL_PWRDN, 0);
+
+ return 0;
+}
+
+static int pcm186x_power_off(struct snd_soc_codec *codec)
+{
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+ PCM186X_PWR_CTRL_PWRDN, PCM186X_PWR_CTRL_PWRDN);
+
+ regcache_cache_only(priv->regmap, true);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcm186x_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ dev_dbg(codec->dev, "## %s: %d -> %d\n", __func__,
+ snd_soc_codec_get_bias_level(codec), level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+ pcm186x_power_on(codec);
+ break;
+ case SND_SOC_BIAS_OFF:
+ pcm186x_power_off(codec);
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1863 = {
+ .set_bias_level = pcm186x_set_bias_level,
+
+ .component_driver = {
+ .controls = pcm1863_snd_controls,
+ .num_controls = ARRAY_SIZE(pcm1863_snd_controls),
+ .dapm_widgets = pcm1863_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1863_dapm_widgets),
+ .dapm_routes = pcm1863_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1863_dapm_routes),
+ },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1865 = {
+ .set_bias_level = pcm186x_set_bias_level,
+ .suspend_bias_off = true,
+
+ .component_driver = {
+ .controls = pcm1865_snd_controls,
+ .num_controls = ARRAY_SIZE(pcm1865_snd_controls),
+ .dapm_widgets = pcm1865_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1865_dapm_widgets),
+ .dapm_routes = pcm1865_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1865_dapm_routes),
+ },
+};
+
+static bool pcm186x_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PCM186X_PAGE:
+ case PCM186X_DEVICE_STATUS:
+ case PCM186X_FSAMPLE_STATUS:
+ case PCM186X_DIV_STATUS:
+ case PCM186X_CLK_STATUS:
+ case PCM186X_SUPPLY_STATUS:
+ case PCM186X_MMAP_STAT_CTRL:
+ case PCM186X_MMAP_ADDRESS:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_range_cfg pcm186x_range = {
+ .name = "Pages",
+ .range_max = PCM186X_MAX_REGISTER,
+ .selector_reg = PCM186X_PAGE,
+ .selector_mask = 0xff,
+ .window_len = PCM186X_PAGE_LEN,
+};
+
+const struct regmap_config pcm186x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_reg = pcm186x_volatile,
+
+ .ranges = &pcm186x_range,
+ .num_ranges = 1,
+
+ .max_register = PCM186X_MAX_REGISTER,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(pcm186x_regmap);
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+ struct regmap *regmap)
+{
+ struct pcm186x_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct pcm186x_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); i++)
+ priv->supplies[i].supply = pcm186x_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset device registers for a consistent power-on like state */
+ ret = regmap_write(regmap, PCM186X_PAGE, PCM186X_RESET);
+ if (ret) {
+ dev_err(dev, "failed to write device: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ switch (type) {
+ case PCM1865:
+ case PCM1864:
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1865,
+ &pcm1865_dai, 1);
+ break;
+ case PCM1863:
+ case PCM1862:
+ default:
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1863,
+ &pcm1863_dai, 1);
+ }
+ if (ret) {
+ dev_err(dev, "failed to register CODEC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_probe);
+
+int pcm186x_remove(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_remove);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
new file mode 100644
index 000000000000..b630111bb3c4
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.h
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef _PCM186X_H_
+#define _PCM186X_H_
+
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+enum pcm186x_type {
+ PCM1862,
+ PCM1863,
+ PCM1864,
+ PCM1865,
+};
+
+#define PCM186X_RATES SNDRV_PCM_RATE_8000_192000
+#define PCM186X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define PCM186X_PAGE_LEN 0x0100
+#define PCM186X_PAGE_BASE(n) (PCM186X_PAGE_LEN * n)
+
+/* The page selection register address is the same on all pages */
+#define PCM186X_PAGE 0
+
+/* Register Definitions - Page 0 */
+#define PCM186X_PGA_VAL_CH1_L (PCM186X_PAGE_BASE(0) + 1)
+#define PCM186X_PGA_VAL_CH1_R (PCM186X_PAGE_BASE(0) + 2)
+#define PCM186X_PGA_VAL_CH2_L (PCM186X_PAGE_BASE(0) + 3)
+#define PCM186X_PGA_VAL_CH2_R (PCM186X_PAGE_BASE(0) + 4)
+#define PCM186X_PGA_CTRL (PCM186X_PAGE_BASE(0) + 5)
+#define PCM186X_ADC1_INPUT_SEL_L (PCM186X_PAGE_BASE(0) + 6)
+#define PCM186X_ADC1_INPUT_SEL_R (PCM186X_PAGE_BASE(0) + 7)
+#define PCM186X_ADC2_INPUT_SEL_L (PCM186X_PAGE_BASE(0) + 8)
+#define PCM186X_ADC2_INPUT_SEL_R (PCM186X_PAGE_BASE(0) + 9)
+#define PCM186X_AUXADC_INPUT_SEL (PCM186X_PAGE_BASE(0) + 10)
+#define PCM186X_PCM_CFG (PCM186X_PAGE_BASE(0) + 11)
+#define PCM186X_TDM_TX_SEL (PCM186X_PAGE_BASE(0) + 12)
+#define PCM186X_TDM_TX_OFFSET (PCM186X_PAGE_BASE(0) + 13)
+#define PCM186X_TDM_RX_OFFSET (PCM186X_PAGE_BASE(0) + 14)
+#define PCM186X_DPGA_VAL_CH1_L (PCM186X_PAGE_BASE(0) + 15)
+#define PCM186X_GPIO1_0_CTRL (PCM186X_PAGE_BASE(0) + 16)
+#define PCM186X_GPIO3_2_CTRL (PCM186X_PAGE_BASE(0) + 17)
+#define PCM186X_GPIO1_0_DIR_CTRL (PCM186X_PAGE_BASE(0) + 18)
+#define PCM186X_GPIO3_2_DIR_CTRL (PCM186X_PAGE_BASE(0) + 19)
+#define PCM186X_GPIO_IN_OUT (PCM186X_PAGE_BASE(0) + 20)
+#define PCM186X_GPIO_PULL_CTRL (PCM186X_PAGE_BASE(0) + 21)
+#define PCM186X_DPGA_VAL_CH1_R (PCM186X_PAGE_BASE(0) + 22)
+#define PCM186X_DPGA_VAL_CH2_L (PCM186X_PAGE_BASE(0) + 23)
+#define PCM186X_DPGA_VAL_CH2_R (PCM186X_PAGE_BASE(0) + 24)
+#define PCM186X_DPGA_GAIN_CTRL (PCM186X_PAGE_BASE(0) + 25)
+#define PCM186X_DPGA_MIC_CTRL (PCM186X_PAGE_BASE(0) + 26)
+#define PCM186X_DIN_RESAMP_CTRL (PCM186X_PAGE_BASE(0) + 27)
+#define PCM186X_CLK_CTRL (PCM186X_PAGE_BASE(0) + 32)
+#define PCM186X_DSP1_CLK_DIV (PCM186X_PAGE_BASE(0) + 33)
+#define PCM186X_DSP2_CLK_DIV (PCM186X_PAGE_BASE(0) + 34)
+#define PCM186X_ADC_CLK_DIV (PCM186X_PAGE_BASE(0) + 35)
+#define PCM186X_PLL_SCK_DIV (PCM186X_PAGE_BASE(0) + 37)
+#define PCM186X_BCK_DIV (PCM186X_PAGE_BASE(0) + 38)
+#define PCM186X_LRK_DIV (PCM186X_PAGE_BASE(0) + 39)
+#define PCM186X_PLL_CTRL (PCM186X_PAGE_BASE(0) + 40)
+#define PCM186X_PLL_P_DIV (PCM186X_PAGE_BASE(0) + 41)
+#define PCM186X_PLL_R_DIV (PCM186X_PAGE_BASE(0) + 42)
+#define PCM186X_PLL_J_DIV (PCM186X_PAGE_BASE(0) + 43)
+#define PCM186X_PLL_D_DIV_LSB (PCM186X_PAGE_BASE(0) + 44)
+#define PCM186X_PLL_D_DIV_MSB (PCM186X_PAGE_BASE(0) + 45)
+#define PCM186X_SIGDET_MODE (PCM186X_PAGE_BASE(0) + 48)
+#define PCM186X_SIGDET_MASK (PCM186X_PAGE_BASE(0) + 49)
+#define PCM186X_SIGDET_STAT (PCM186X_PAGE_BASE(0) + 50)
+#define PCM186X_SIGDET_LOSS_TIME (PCM186X_PAGE_BASE(0) + 52)
+#define PCM186X_SIGDET_SCAN_TIME (PCM186X_PAGE_BASE(0) + 53)
+#define PCM186X_SIGDET_INT_INTVL (PCM186X_PAGE_BASE(0) + 54)
+#define PCM186X_SIGDET_DC_REF_CH1_L (PCM186X_PAGE_BASE(0) + 64)
+#define PCM186X_SIGDET_DC_DIFF_CH1_L (PCM186X_PAGE_BASE(0) + 65)
+#define PCM186X_SIGDET_DC_LEV_CH1_L (PCM186X_PAGE_BASE(0) + 66)
+#define PCM186X_SIGDET_DC_REF_CH1_R (PCM186X_PAGE_BASE(0) + 67)
+#define PCM186X_SIGDET_DC_DIFF_CH1_R (PCM186X_PAGE_BASE(0) + 68)
+#define PCM186X_SIGDET_DC_LEV_CH1_R (PCM186X_PAGE_BASE(0) + 69)
+#define PCM186X_SIGDET_DC_REF_CH2_L (PCM186X_PAGE_BASE(0) + 70)
+#define PCM186X_SIGDET_DC_DIFF_CH2_L (PCM186X_PAGE_BASE(0) + 71)
+#define PCM186X_SIGDET_DC_LEV_CH2_L (PCM186X_PAGE_BASE(0) + 72)
+#define PCM186X_SIGDET_DC_REF_CH2_R (PCM186X_PAGE_BASE(0) + 73)
+#define PCM186X_SIGDET_DC_DIFF_CH2_R (PCM186X_PAGE_BASE(0) + 74)
+#define PCM186X_SIGDET_DC_LEV_CH2_R (PCM186X_PAGE_BASE(0) + 75)
+#define PCM186X_SIGDET_DC_REF_CH3_L (PCM186X_PAGE_BASE(0) + 76)
+#define PCM186X_SIGDET_DC_DIFF_CH3_L (PCM186X_PAGE_BASE(0) + 77)
+#define PCM186X_SIGDET_DC_LEV_CH3_L (PCM186X_PAGE_BASE(0) + 78)
+#define PCM186X_SIGDET_DC_REF_CH3_R (PCM186X_PAGE_BASE(0) + 79)
+#define PCM186X_SIGDET_DC_DIFF_CH3_R (PCM186X_PAGE_BASE(0) + 80)
+#define PCM186X_SIGDET_DC_LEV_CH3_R (PCM186X_PAGE_BASE(0) + 81)
+#define PCM186X_SIGDET_DC_REF_CH4_L (PCM186X_PAGE_BASE(0) + 82)
+#define PCM186X_SIGDET_DC_DIFF_CH4_L (PCM186X_PAGE_BASE(0) + 83)
+#define PCM186X_SIGDET_DC_LEV_CH4_L (PCM186X_PAGE_BASE(0) + 84)
+#define PCM186X_SIGDET_DC_REF_CH4_R (PCM186X_PAGE_BASE(0) + 85)
+#define PCM186X_SIGDET_DC_DIFF_CH4_R (PCM186X_PAGE_BASE(0) + 86)
+#define PCM186X_SIGDET_DC_LEV_CH4_R (PCM186X_PAGE_BASE(0) + 87)
+#define PCM186X_AUXADC_DATA_CTRL (PCM186X_PAGE_BASE(0) + 88)
+#define PCM186X_AUXADC_DATA_LSB (PCM186X_PAGE_BASE(0) + 89)
+#define PCM186X_AUXADC_DATA_MSB (PCM186X_PAGE_BASE(0) + 90)
+#define PCM186X_INT_ENABLE (PCM186X_PAGE_BASE(0) + 96)
+#define PCM186X_INT_FLAG (PCM186X_PAGE_BASE(0) + 97)
+#define PCM186X_INT_POL_WIDTH (PCM186X_PAGE_BASE(0) + 98)
+#define PCM186X_POWER_CTRL (PCM186X_PAGE_BASE(0) + 112)
+#define PCM186X_FILTER_MUTE_CTRL (PCM186X_PAGE_BASE(0) + 113)
+#define PCM186X_DEVICE_STATUS (PCM186X_PAGE_BASE(0) + 114)
+#define PCM186X_FSAMPLE_STATUS (PCM186X_PAGE_BASE(0) + 115)
+#define PCM186X_DIV_STATUS (PCM186X_PAGE_BASE(0) + 116)
+#define PCM186X_CLK_STATUS (PCM186X_PAGE_BASE(0) + 117)
+#define PCM186X_SUPPLY_STATUS (PCM186X_PAGE_BASE(0) + 120)
+
+/* Register Definitions - Page 1 */
+#define PCM186X_MMAP_STAT_CTRL (PCM186X_PAGE_BASE(1) + 1)
+#define PCM186X_MMAP_ADDRESS (PCM186X_PAGE_BASE(1) + 2)
+#define PCM186X_MEM_WDATA0 (PCM186X_PAGE_BASE(1) + 4)
+#define PCM186X_MEM_WDATA1 (PCM186X_PAGE_BASE(1) + 5)
+#define PCM186X_MEM_WDATA2 (PCM186X_PAGE_BASE(1) + 6)
+#define PCM186X_MEM_WDATA3 (PCM186X_PAGE_BASE(1) + 7)
+#define PCM186X_MEM_RDATA0 (PCM186X_PAGE_BASE(1) + 8)
+#define PCM186X_MEM_RDATA1 (PCM186X_PAGE_BASE(1) + 9)
+#define PCM186X_MEM_RDATA2 (PCM186X_PAGE_BASE(1) + 10)
+#define PCM186X_MEM_RDATA3 (PCM186X_PAGE_BASE(1) + 11)
+
+/* Register Definitions - Page 3 */
+#define PCM186X_OSC_PWR_DOWN_CTRL (PCM186X_PAGE_BASE(3) + 18)
+#define PCM186X_MIC_BIAS_CTRL (PCM186X_PAGE_BASE(3) + 21)
+
+/* Register Definitions - Page 253 */
+#define PCM186X_CURR_TRIM_CTRL (PCM186X_PAGE_BASE(253) + 20)
+
+#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL
+
+/* PCM186X_PAGE */
+#define PCM186X_RESET 0xff
+
+/* PCM186X_ADCX_INPUT_SEL_X */
+#define PCM186X_ADC_INPUT_SEL_POL BIT(7)
+#define PCM186X_ADC_INPUT_SEL_MASK GENMASK(5, 0)
+
+/* PCM186X_PCM_CFG */
+#define PCM186X_PCM_CFG_RX_WLEN_MASK GENMASK(7, 6)
+#define PCM186X_PCM_CFG_RX_WLEN_SHIFT 6
+#define PCM186X_PCM_CFG_RX_WLEN_32 0x00
+#define PCM186X_PCM_CFG_RX_WLEN_24 0x01
+#define PCM186X_PCM_CFG_RX_WLEN_20 0x02
+#define PCM186X_PCM_CFG_RX_WLEN_16 0x03
+#define PCM186X_PCM_CFG_TDM_LRCK_MODE BIT(4)
+#define PCM186X_PCM_CFG_TX_WLEN_MASK GENMASK(3, 2)
+#define PCM186X_PCM_CFG_TX_WLEN_SHIFT 2
+#define PCM186X_PCM_CFG_TX_WLEN_32 0x00
+#define PCM186X_PCM_CFG_TX_WLEN_24 0x01
+#define PCM186X_PCM_CFG_TX_WLEN_20 0x02
+#define PCM186X_PCM_CFG_TX_WLEN_16 0x03
+#define PCM186X_PCM_CFG_FMT_MASK GENMASK(1, 0)
+#define PCM186X_PCM_CFG_FMT_SHIFT 0
+#define PCM186X_PCM_CFG_FMT_I2S 0x00
+#define PCM186X_PCM_CFG_FMT_LEFTJ 0x01
+#define PCM186X_PCM_CFG_FMT_RIGHTJ 0x02
+#define PCM186X_PCM_CFG_FMT_TDM 0x03
+
+/* PCM186X_TDM_TX_SEL */
+#define PCM186X_TDM_TX_SEL_2CH 0x00
+#define PCM186X_TDM_TX_SEL_4CH 0x01
+#define PCM186X_TDM_TX_SEL_6CH 0x02
+#define PCM186X_TDM_TX_SEL_MASK 0x03
+
+/* PCM186X_CLK_CTRL */
+#define PCM186X_CLK_CTRL_SCK_XI_SEL1 BIT(7)
+#define PCM186X_CLK_CTRL_SCK_XI_SEL0 BIT(6)
+#define PCM186X_CLK_CTRL_SCK_SRC_PLL BIT(5)
+#define PCM186X_CLK_CTRL_MST_MODE BIT(4)
+#define PCM186X_CLK_CTRL_ADC_SRC_PLL BIT(3)
+#define PCM186X_CLK_CTRL_DSP2_SRC_PLL BIT(2)
+#define PCM186X_CLK_CTRL_DSP1_SRC_PLL BIT(1)
+#define PCM186X_CLK_CTRL_CLKDET_EN BIT(0)
+
+/* PCM186X_PLL_CTRL */
+#define PCM186X_PLL_CTRL_LOCK BIT(4)
+#define PCM186X_PLL_CTRL_REF_SEL BIT(1)
+#define PCM186X_PLL_CTRL_EN BIT(0)
+
+/* PCM186X_POWER_CTRL */
+#define PCM186X_PWR_CTRL_PWRDN BIT(2)
+#define PCM186X_PWR_CTRL_SLEEP BIT(1)
+#define PCM186X_PWR_CTRL_STBY BIT(0)
+
+/* PCM186X_CLK_STATUS */
+#define PCM186X_CLK_STATUS_LRCKHLT BIT(6)
+#define PCM186X_CLK_STATUS_BCKHLT BIT(5)
+#define PCM186X_CLK_STATUS_SCKHLT BIT(4)
+#define PCM186X_CLK_STATUS_LRCKERR BIT(2)
+#define PCM186X_CLK_STATUS_BCKERR BIT(1)
+#define PCM186X_CLK_STATUS_SCKERR BIT(0)
+
+/* PCM186X_SUPPLY_STATUS */
+#define PCM186X_SUPPLY_STATUS_DVDD BIT(2)
+#define PCM186X_SUPPLY_STATUS_AVDD BIT(1)
+#define PCM186X_SUPPLY_STATUS_LDO BIT(0)
+
+/* PCM186X_MMAP_STAT_CTRL */
+#define PCM186X_MMAP_STAT_DONE BIT(4)
+#define PCM186X_MMAP_STAT_BUSY BIT(2)
+#define PCM186X_MMAP_STAT_R_REQ BIT(1)
+#define PCM186X_MMAP_STAT_W_REQ BIT(0)
+
+extern const struct regmap_config pcm186x_regmap;
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+ struct regmap *regmap);
+int pcm186x_remove(struct device *dev);
+
+#endif /* _PCM186X_H_ */
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 25c63510ae15..7cdd2dc4fd79 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = {
};
module_spi_driver(pcm512x_spi_driver);
+
+MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 974a9040651d..7ef3b5476bcc 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/gcd.h>
#include "rl6231.h"
/**
@@ -106,6 +107,25 @@ static const struct pll_calc_map pll_preset_table[] = {
{19200000, 24576000, 3, 30, 3, false},
};
+static unsigned int find_best_div(unsigned int in,
+ unsigned int max, unsigned int div)
+{
+ unsigned int d;
+
+ if (in <= max)
+ return 1;
+
+ d = in / max;
+ if (in % max)
+ d++;
+
+ while (div % d != 0)
+ d++;
+
+
+ return d;
+}
+
/**
* rl6231_pll_calc - Calcualte PLL M/N/K code.
* @freq_in: external clock provided to codec.
@@ -120,9 +140,11 @@ int rl6231_pll_calc(const unsigned int freq_in,
const unsigned int freq_out, struct rl6231_pll_code *pll_code)
{
int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
- int i, k, red, n_t, pll_out, in_t, out_t;
- int n = 0, m = 0, m_t = 0;
- int red_t = abs(freq_out - freq_in);
+ int i, k, n_t;
+ int k_t, min_k, max_k, n = 0, m = 0, m_t = 0;
+ unsigned int red, pll_out, in_t, out_t, div, div_t;
+ unsigned int red_t = abs(freq_out - freq_in);
+ unsigned int f_in, f_out, f_max;
bool bypass = false;
if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
@@ -140,39 +162,52 @@ int rl6231_pll_calc(const unsigned int freq_in,
}
}
- k = 100000000 / freq_out - 2;
- if (k > RL6231_PLL_K_MAX)
- k = RL6231_PLL_K_MAX;
- for (n_t = 0; n_t <= max_n; n_t++) {
- in_t = freq_in / (k + 2);
- pll_out = freq_out / (n_t + 2);
- if (in_t < 0)
- continue;
- if (in_t == pll_out) {
- bypass = true;
- n = n_t;
- goto code_find;
- }
- red = abs(in_t - pll_out);
- if (red < red_t) {
- bypass = true;
- n = n_t;
- m = m_t;
- if (red == 0)
+ min_k = 80000000 / freq_out - 2;
+ max_k = 150000000 / freq_out - 2;
+ if (max_k > RL6231_PLL_K_MAX)
+ max_k = RL6231_PLL_K_MAX;
+ if (min_k > RL6231_PLL_K_MAX)
+ min_k = max_k = RL6231_PLL_K_MAX;
+ div_t = gcd(freq_in, freq_out);
+ f_max = 0xffffffff / RL6231_PLL_N_MAX;
+ div = find_best_div(freq_in, f_max, div_t);
+ f_in = freq_in / div;
+ f_out = freq_out / div;
+ k = min_k;
+ for (k_t = min_k; k_t <= max_k; k_t++) {
+ for (n_t = 0; n_t <= max_n; n_t++) {
+ in_t = f_in * (n_t + 2);
+ pll_out = f_out * (k_t + 2);
+ if (in_t == pll_out) {
+ bypass = true;
+ n = n_t;
+ k = k_t;
goto code_find;
- red_t = red;
- }
- for (m_t = 0; m_t <= max_m; m_t++) {
- out_t = in_t / (m_t + 2);
- red = abs(out_t - pll_out);
+ }
+ out_t = in_t / (k_t + 2);
+ red = abs(f_out - out_t);
if (red < red_t) {
- bypass = false;
+ bypass = true;
n = n_t;
- m = m_t;
+ m = 0;
+ k = k_t;
if (red == 0)
goto code_find;
red_t = red;
}
+ for (m_t = 0; m_t <= max_m; m_t++) {
+ out_t = in_t / ((m_t + 2) * (k_t + 2));
+ red = abs(f_out - out_t);
+ if (red < red_t) {
+ bypass = false;
+ n = n_t;
+ m = m_t;
+ k = k_t;
+ if (red == 0)
+ goto code_find;
+ red_t = red;
+ }
+ }
}
}
pr_debug("Only get approximation about PLL\n");
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 64bf26cec20d..2144edca97b0 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -381,6 +381,7 @@ int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
return true;
}
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_read);
/**
* rt5514_spi_burst_write - Write data to SPI by rt5514 address.
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 2dd6e9f990a4..198df016802f 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -295,6 +295,33 @@ static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int rt5514_calibration(struct rt5514_priv *rt5514, bool on)
+{
+ if (on) {
+ regmap_write(rt5514->regmap, RT5514_ANA_CTRL_PLL3, 0x0000000a);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+ 0xa);
+ regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301,
+ 0x301);
+ regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL4,
+ 0x80000000 | rt5514->pll3_cal_value);
+ regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL1,
+ 0x8bb80800);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0x80000000);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0xc0000000);
+ } else {
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0x40000000);
+ regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301, 0);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+ 0x4);
+ }
+
+ return 0;
+}
+
static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -302,6 +329,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
struct snd_soc_codec *codec = rt5514->codec;
const struct firmware *fw = NULL;
+ u8 buf[8];
if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
return 0;
@@ -310,6 +338,35 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
rt5514->dsp_enabled = ucontrol->value.integer.value[0];
if (rt5514->dsp_enabled) {
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
+ if (clk_set_rate(rt5514->dsp_calib_clk,
+ rt5514->pdata.dsp_calib_clk_rate))
+ dev_err(codec->dev,
+ "Can't set rate for mclk");
+
+ if (clk_prepare_enable(rt5514->dsp_calib_clk))
+ dev_err(codec->dev,
+ "Can't enable dsp_calib_clk");
+
+ rt5514_calibration(rt5514, true);
+
+ msleep(20);
+#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_read(RT5514_PLL3_CALIB_CTRL6 |
+ RT5514_DSP_MAPPING,
+ (u8 *)&buf, sizeof(buf));
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ rt5514->pll3_cal_value = buf[0] | buf[1] << 8 |
+ buf[2] << 16 | buf[3] << 24;
+
+ rt5514_calibration(rt5514, false);
+ clk_disable_unprepare(rt5514->dsp_calib_clk);
+ }
+
rt5514_enable_dsp_prepare(rt5514);
request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
@@ -341,6 +398,20 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
/* DSP run */
regmap_write(rt5514->i2c_regmap, 0x18002f00,
0x00055148);
+
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
+ msleep(20);
+
+ regmap_write(rt5514->i2c_regmap, 0x1800211c,
+ rt5514->pll3_cal_value);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0x00220012);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0x80220042);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0xe0220042);
+ }
} else {
regmap_multi_reg_write(rt5514->i2c_regmap,
rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
@@ -1024,12 +1095,22 @@ static int rt5514_set_bias_level(struct snd_soc_codec *codec,
static int rt5514_probe(struct snd_soc_codec *codec)
{
struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ struct platform_device *pdev = container_of(codec->dev,
+ struct platform_device, dev);
rt5514->mclk = devm_clk_get(codec->dev, "mclk");
if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
+ if (rt5514->pdata.dsp_calib_clk_name) {
+ rt5514->dsp_calib_clk = devm_clk_get(&pdev->dev,
+ rt5514->pdata.dsp_calib_clk_name);
+ if (PTR_ERR(rt5514->dsp_calib_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
+
rt5514->codec = codec;
+ rt5514->pll3_cal_value = 0x0078b000;
return 0;
}
@@ -1147,6 +1228,10 @@ static int rt5514_parse_dp(struct rt5514_priv *rt5514, struct device *dev)
{
device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
&rt5514->pdata.dmic_init_delay);
+ device_property_read_string(dev, "realtek,dsp-calib-clk-name",
+ &rt5514->pdata.dsp_calib_clk_name);
+ device_property_read_u32(dev, "realtek,dsp-calib-clk-rate",
+ &rt5514->pdata.dsp_calib_clk_rate);
return 0;
}
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 2dc40e6d8b3f..f0f3400ce6b1 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -34,7 +34,9 @@
#define RT5514_CLK_CTRL1 0x2104
#define RT5514_CLK_CTRL2 0x2108
#define RT5514_PLL3_CALIB_CTRL1 0x2110
+#define RT5514_PLL3_CALIB_CTRL4 0x2120
#define RT5514_PLL3_CALIB_CTRL5 0x2124
+#define RT5514_PLL3_CALIB_CTRL6 0x2128
#define RT5514_DELAY_BUF_CTRL1 0x2140
#define RT5514_DELAY_BUF_CTRL3 0x2148
#define RT5514_ASRC_IN_CTRL1 0x2180
@@ -272,7 +274,7 @@ struct rt5514_priv {
struct rt5514_platform_data pdata;
struct snd_soc_codec *codec;
struct regmap *i2c_regmap, *regmap;
- struct clk *mclk;
+ struct clk *mclk, *dsp_calib_clk;
int sysclk;
int sysclk_src;
int lrck;
@@ -281,6 +283,7 @@ struct rt5514_priv {
int pll_in;
int pll_out;
int dsp_enabled;
+ unsigned int pll3_cal_value;
};
#endif /* __RT5514_H__ */
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index edc152c8a1fe..8f140c8b93ac 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -1943,6 +1943,56 @@ static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5645_set_micbias1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS1_POW_CTRL_SEL_M);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS1_POW_CTRL_SEL_A);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5645_set_micbias2_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS2_POW_CTRL_SEL_M);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS2_POW_CTRL_SEL_A);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1980,10 +2030,12 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
/* Input Side */
/* micbias */
- SND_SOC_DAPM_MICBIAS("micbias1", RT5645_PWR_ANLG2,
- RT5645_PWR_MB1_BIT, 0),
- SND_SOC_DAPM_MICBIAS("micbias2", RT5645_PWR_ANLG2,
- RT5645_PWR_MB2_BIT, 0),
+ SND_SOC_DAPM_SUPPLY("micbias1", RT5645_PWR_ANLG2,
+ RT5645_PWR_MB1_BIT, 0, rt5645_set_micbias1_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("micbias2", RT5645_PWR_ANLG2,
+ RT5645_PWR_MB2_BIT, 0, rt5645_set_micbias2_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* Input Lines */
SND_SOC_DAPM_INPUT("DMIC L1"),
SND_SOC_DAPM_INPUT("DMIC R1"),
@@ -3394,6 +3446,9 @@ static int rt5645_probe(struct snd_soc_codec *codec)
snd_soc_dapm_sync(dapm);
}
+ if (rt5645->pdata.long_name)
+ codec->component.card->long_name = rt5645->pdata.long_name;
+
rt5645->eq_param = devm_kzalloc(codec->dev,
RT5645_HWEQ_NUM * sizeof(struct rt5645_eq_param_s), GFP_KERNEL);
@@ -3570,63 +3625,74 @@ static const struct acpi_device_id rt5645_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
#endif
-static const struct rt5645_platform_data general_platform_data = {
+static const struct rt5645_platform_data intel_braswell_platform_data = {
.dmic1_data_pin = RT5645_DMIC1_DISABLE,
.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
.jd_mode = 3,
};
-static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct rt5645_platform_data buddy_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+ .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+ .jd_mode = 3,
+ .level_trigger_irq = true,
+};
+
+static const struct rt5645_platform_data gpd_win_platform_data = {
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+ .long_name = "gpd-win-pocket-rt5645",
+ /* The GPD pocket has a diff. mic, for the win this does not matter. */
+ .in2_diff = true,
+};
+
+static const struct rt5645_platform_data asus_t100ha_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+ .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+};
+
+static const struct rt5645_platform_data jd_mode3_platform_data = {
+ .jd_mode = 3,
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+ {
+ .ident = "Chrome Buddy",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
+ },
+ .driver_data = (void *)&buddy_platform_data,
+ },
{
.ident = "Intel Strago",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Google Chrome",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Google Setzer",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Microsoft Surface 3",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
- { }
-};
-
-static const struct rt5645_platform_data buddy_platform_data = {
- .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
- .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
- .jd_mode = 3,
- .level_trigger_irq = true,
-};
-
-static const struct dmi_system_id dmi_platform_intel_broadwell[] = {
- {
- .ident = "Chrome Buddy",
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
- },
- },
- { }
-};
-
-static const struct rt5645_platform_data gpd_win_platform_data = {
- .jd_mode = 3,
- .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_gpd_win[] = {
{
/*
* Match for the GPDwin which unfortunately uses somewhat
@@ -3637,46 +3703,38 @@ static const struct dmi_system_id dmi_platform_gpd_win[] = {
* the same default product_name. Also the GPDwin is the
* only device to have both board_ and product_name not set.
*/
- .ident = "GPD Win",
+ .ident = "GPD Win / Pocket",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
+ .driver_data = (void *)&gpd_win_platform_data,
},
- {}
-};
-
-static const struct rt5645_platform_data general_platform_data2 = {
- .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
- .dmic2_data_pin = RT5645_DMIC2_DISABLE,
- .jd_mode = 3,
- .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_asus_t100ha[] = {
{
.ident = "ASUS T100HAN",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
+ .driver_data = (void *)&asus_t100ha_platform_data,
},
- { }
-};
-
-static const struct rt5645_platform_data minix_z83_4_platform_data = {
- .jd_mode = 3,
-};
-
-static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
{
.ident = "MINIX Z83-4",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
},
+ .driver_data = (void *)&jd_mode3_platform_data,
+ },
+ {
+ .ident = "Teclast X80 Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X80 Pro"),
+ },
+ .driver_data = (void *)&jd_mode3_platform_data,
},
{ }
};
@@ -3684,9 +3742,9 @@ static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
static bool rt5645_check_dp(struct device *dev)
{
if (device_property_present(dev, "realtek,in2-differential") ||
- device_property_present(dev, "realtek,dmic1-data-pin") ||
- device_property_present(dev, "realtek,dmic2-data-pin") ||
- device_property_present(dev, "realtek,jd-mode"))
+ device_property_present(dev, "realtek,dmic1-data-pin") ||
+ device_property_present(dev, "realtek,dmic2-data-pin") ||
+ device_property_present(dev, "realtek,jd-mode"))
return true;
return false;
@@ -3710,6 +3768,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ const struct dmi_system_id *dmi_data;
struct rt5645_priv *rt5645;
int ret, i;
unsigned int val;
@@ -3723,20 +3782,18 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
rt5645->i2c = i2c;
i2c_set_clientdata(i2c, rt5645);
+ dmi_data = dmi_first_match(dmi_platform_data);
+ if (dmi_data) {
+ dev_info(&i2c->dev, "Detected %s platform\n", dmi_data->ident);
+ pdata = dmi_data->driver_data;
+ }
+
if (pdata)
rt5645->pdata = *pdata;
- else if (dmi_check_system(dmi_platform_intel_broadwell))
- rt5645->pdata = buddy_platform_data;
else if (rt5645_check_dp(&i2c->dev))
rt5645_parse_dt(rt5645, &i2c->dev);
- else if (dmi_check_system(dmi_platform_intel_braswell))
- rt5645->pdata = general_platform_data;
- else if (dmi_check_system(dmi_platform_gpd_win))
- rt5645->pdata = gpd_win_platform_data;
- else if (dmi_check_system(dmi_platform_asus_t100ha))
- rt5645->pdata = general_platform_data2;
- else if (dmi_check_system(dmi_platform_minix_z83_4))
- rt5645->pdata = minix_z83_4_platform_data;
+ else
+ rt5645->pdata = jd_mode3_platform_data;
if (quirk != -1) {
rt5645->pdata.in2_diff = QUIRK_IN2_DIFF(quirk);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index cfc5f97549eb..940325b28c29 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2117,6 +2117,12 @@ enum {
#define RT5645_RXDC_SRC_STO (0x0 << 7)
#define RT5645_RXDC_SRC_MONO (0x1 << 7)
#define RT5645_RXDC_SRC_SFT (7)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_MASK (0x1 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_A (0x0 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_M (0x1 << 5)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_MASK (0x1 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_A (0x0 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_M (0x1 << 4)
#define RT5645_RXDP2_SEL_MASK (0x1 << 3)
#define RT5645_RXDP2_SEL_IF2 (0x0 << 3)
#define RT5645_RXDP2_SEL_ADC (0x1 << 3)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index f2bb4feba3b6..633cdcfc933d 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1332,10 +1332,13 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
sgtl5000->mclk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(sgtl5000->mclk)) {
ret = PTR_ERR(sgtl5000->mclk);
- dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
/* Defer the probe to see if the clk will be provided later */
if (ret == -ENOENT)
ret = -EPROBE_DEFER;
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "Failed to get mclock: %d\n",
+ ret);
goto disable_regs;
}
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 354dc0d64f11..7b91ee267b4e 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -231,14 +231,17 @@ static struct snd_soc_dai_driver si476x_dai = {
.ops = &si476x_dai_ops,
};
-static struct regmap *si476x_get_regmap(struct device *dev)
+static int si476x_probe(struct snd_soc_component *component)
{
- return dev_get_regmap(dev->parent, NULL);
+ snd_soc_component_init_regmap(component,
+ dev_get_regmap(component->dev->parent, NULL));
+
+ return 0;
}
static const struct snd_soc_codec_driver soc_codec_dev_si476x = {
- .get_regmap = si476x_get_regmap,
.component_driver = {
+ .probe = si476x_probe,
.dapm_widgets = si476x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
.dapm_routes = si476x_dapm_routes,
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
deleted file mode 100644
index 887923e68849..000000000000
--- a/sound/soc/codecs/sn95031.c
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- * sn95031.c - TI sn95031 Codec driver
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <asm/intel_scu_ipc.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/initval.h>
-#include <sound/tlv.h>
-#include <sound/jack.h>
-#include "sn95031.h"
-
-#define SN95031_RATES (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100)
-#define SN95031_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
-
-/* adc helper functions */
-
-/* enables mic bias voltage */
-static void sn95031_enable_mic_bias(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_VAUD, BIT(2)|BIT(1)|BIT(0));
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(2), BIT(2));
-}
-
-/* Enable/Disable the ADC depending on the argument */
-static void configure_adc(struct snd_soc_codec *sn95031_codec, int val)
-{
- int value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
- if (val) {
- /* Enable and start the ADC */
- value |= (SN95031_ADC_ENBL | SN95031_ADC_START);
- value &= (~SN95031_ADC_NO_LOOP);
- } else {
- /* Just stop the ADC */
- value &= (~SN95031_ADC_START);
- }
- snd_soc_write(sn95031_codec, SN95031_ADC1CNTL1, value);
-}
-
-/*
- * finds an empty channel for conversion
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static int find_free_channel(struct snd_soc_codec *sn95031_codec)
-{
- int i, value;
-
- /* check whether ADC is enabled */
- value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
- if ((value & SN95031_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < SN95031_ADC_CHANLS_MAX; i++) {
- value = snd_soc_read(sn95031_codec,
- SN95031_ADC_CHNL_START_ADDR + i);
- if (value & SN95031_STOPBIT_MASK)
- break;
- }
- return (i == SN95031_ADC_CHANLS_MAX) ? (-EINVAL) : i;
-}
-
-/* Initialize the ADC for reading micbias values. Can sleep. */
-static int sn95031_initialize_adc(struct snd_soc_codec *sn95031_codec)
-{
- int base_addr, chnl_addr;
- int value;
- int channel_index;
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel(sn95031_codec);
- if (channel_index < 0) {
- pr_err("No free ADC channels");
- return channel_index;
- }
-
- base_addr = SN95031_ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == SN95031_ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- value = snd_soc_read(sn95031_codec, base_addr);
- /* Set the stop bit to zero */
- snd_soc_write(sn95031_codec, base_addr, value & 0xEF);
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- snd_soc_write(sn95031_codec, base_addr,
- SN95031_AUDIO_DETECT_CODE | 0x10);
-
- chnl_addr = SN95031_ADC_DATA_START_ADDR + 2 * channel_index;
- pr_debug("mid_initialize : %x", chnl_addr);
- configure_adc(sn95031_codec, 1);
- return chnl_addr;
-}
-
-
-/* reads the ADC registers and gets the mic bias value in mV. */
-static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
-{
- u16 adc_adr = sn95031_initialize_adc(codec);
- u16 adc_val1, adc_val2;
- unsigned int mic_bias;
-
- sn95031_enable_mic_bias(codec);
-
- /* Enable the sound card for conversion before reading */
- snd_soc_write(codec, SN95031_ADC1CNTL3, 0x05);
- /* Re-toggle the RRDATARD bit */
- snd_soc_write(codec, SN95031_ADC1CNTL3, 0x04);
-
- /* Read the higher bits of data */
- msleep(1000);
- adc_val1 = snd_soc_read(codec, adc_adr);
- adc_adr++;
- adc_val2 = snd_soc_read(codec, adc_adr);
-
- /* Adding lower two bits to the higher bits */
- mic_bias = (adc_val1 << 2) + (adc_val2 & 3);
- mic_bias = (mic_bias * SN95031_ADC_ONE_LSB_MULTIPLIER) / 1000;
- pr_debug("mic bias = %dmV\n", mic_bias);
- return mic_bias;
-}
-/*end - adc helper functions */
-
-static int sn95031_read(void *ctx, unsigned int reg, unsigned int *val)
-{
- u8 value = 0;
- int ret;
-
- ret = intel_scu_ipc_ioread8(reg, &value);
- if (ret == 0)
- *val = value;
-
- return ret;
-}
-
-static int sn95031_write(void *ctx, unsigned int reg, unsigned int value)
-{
- return intel_scu_ipc_iowrite8(reg, value);
-}
-
-static const struct regmap_config sn95031_regmap = {
- .reg_read = sn95031_read,
- .reg_write = sn95031_write,
-};
-
-static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- switch (level) {
- case SND_SOC_BIAS_ON:
- break;
-
- case SND_SOC_BIAS_PREPARE:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
- pr_debug("vaud_bias powering up pll\n");
- /* power up the pll */
- snd_soc_write(codec, SN95031_AUDPLLCTRL, BIT(5));
- /* enable pcm 2 */
- snd_soc_update_bits(codec, SN95031_PCM2C2,
- BIT(0), BIT(0));
- }
- break;
-
- case SND_SOC_BIAS_STANDBY:
- switch (snd_soc_codec_get_bias_level(codec)) {
- case SND_SOC_BIAS_OFF:
- pr_debug("vaud_bias power up rail\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VAUD,
- BIT(2)|BIT(1)|BIT(0));
- msleep(1);
- break;
- case SND_SOC_BIAS_PREPARE:
- /* turn off pcm */
- pr_debug("vaud_bias power dn pcm\n");
- snd_soc_update_bits(codec, SN95031_PCM2C2, BIT(0), 0);
- snd_soc_write(codec, SN95031_AUDPLLCTRL, 0);
- break;
- default:
- break;
- }
- break;
-
-
- case SND_SOC_BIAS_OFF:
- pr_debug("vaud_bias _OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VAUD, BIT(3));
- break;
- }
-
- return 0;
-}
-
-static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- pr_debug("VHS SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VHSP, 0x3D);
- snd_soc_write(codec, SN95031_VHSN, 0x3F);
- msleep(1);
- } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
- pr_debug("VHS SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VHSP, 0xC4);
- snd_soc_write(codec, SN95031_VHSN, 0x04);
- }
- return 0;
-}
-
-static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- pr_debug("VIHF SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VIHF, 0x27);
- msleep(1);
- } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
- pr_debug("VIHF SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VIHF, 0x24);
- }
- return 0;
-}
-
-static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- ldo = BIT(5)|BIT(4);
- clk_dir = BIT(0);
- data_dir = BIT(7);
- }
- /* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(7), data_dir);
- return 0;
-}
-
-static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- ldo = BIT(5)|BIT(4);
- clk_dir = BIT(2);
- data_dir = BIT(1);
- }
- /* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
- snd_soc_update_bits(codec, SN95031_DMICBUF45, BIT(1), data_dir);
- return 0;
-}
-
-static int sn95031_dmic56_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event))
- ldo = BIT(7)|BIT(6);
-
- /* program DMIC LDO */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
- return 0;
-}
-
-/* mux controls */
-static const char *sn95031_mic_texts[] = { "AMIC", "LineIn" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micl_enum,
- SN95031_ADCCONFIG, 1, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micl_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_micl_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micr_enum,
- SN95031_ADCCONFIG, 3, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micr_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_micr_enum);
-
-static const char *sn95031_input_texts[] = { "DMIC1", "DMIC2", "DMIC3",
- "DMIC4", "DMIC5", "DMIC6",
- "ADC Left", "ADC Right" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input1_enum,
- SN95031_AUDIOMUX12, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input1_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input1_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input2_enum,
- SN95031_AUDIOMUX12, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input2_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input2_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input3_enum,
- SN95031_AUDIOMUX34, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input3_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input3_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input4_enum,
- SN95031_AUDIOMUX34, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input4_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input4_enum);
-
-/* capture path controls */
-
-static const char *sn95031_micmode_text[] = {"Single Ended", "Differential"};
-
-/* 0dB to 30dB in 10dB steps */
-static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 10, 0);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode1_enum,
- SN95031_MICAMP1, 1, sn95031_micmode_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode2_enum,
- SN95031_MICAMP2, 1, sn95031_micmode_text);
-
-static const char *sn95031_dmic_cfg_text[] = {"GPO", "DMIC"};
-
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic12_cfg_enum,
- SN95031_DMICMUX, 0, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic34_cfg_enum,
- SN95031_DMICMUX, 1, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic56_cfg_enum,
- SN95031_DMICMUX, 2, sn95031_dmic_cfg_text);
-
-static const struct snd_kcontrol_new sn95031_snd_controls[] = {
- SOC_ENUM("Mic1Mode Capture Route", sn95031_micmode1_enum),
- SOC_ENUM("Mic2Mode Capture Route", sn95031_micmode2_enum),
- SOC_ENUM("DMIC12 Capture Route", sn95031_dmic12_cfg_enum),
- SOC_ENUM("DMIC34 Capture Route", sn95031_dmic34_cfg_enum),
- SOC_ENUM("DMIC56 Capture Route", sn95031_dmic56_cfg_enum),
- SOC_SINGLE_TLV("Mic1 Capture Volume", SN95031_MICAMP1,
- 2, 4, 0, mic_tlv),
- SOC_SINGLE_TLV("Mic2 Capture Volume", SN95031_MICAMP2,
- 2, 4, 0, mic_tlv),
-};
-
-/* DAPM widgets */
-static const struct snd_soc_dapm_widget sn95031_dapm_widgets[] = {
-
- /* all end points mic, hs etc */
- SND_SOC_DAPM_OUTPUT("HPOUTL"),
- SND_SOC_DAPM_OUTPUT("HPOUTR"),
- SND_SOC_DAPM_OUTPUT("EPOUT"),
- SND_SOC_DAPM_OUTPUT("IHFOUTL"),
- SND_SOC_DAPM_OUTPUT("IHFOUTR"),
- SND_SOC_DAPM_OUTPUT("LINEOUTL"),
- SND_SOC_DAPM_OUTPUT("LINEOUTR"),
- SND_SOC_DAPM_OUTPUT("VIB1OUT"),
- SND_SOC_DAPM_OUTPUT("VIB2OUT"),
-
- SND_SOC_DAPM_INPUT("AMIC1"), /* headset mic */
- SND_SOC_DAPM_INPUT("AMIC2"),
- SND_SOC_DAPM_INPUT("DMIC1"),
- SND_SOC_DAPM_INPUT("DMIC2"),
- SND_SOC_DAPM_INPUT("DMIC3"),
- SND_SOC_DAPM_INPUT("DMIC4"),
- SND_SOC_DAPM_INPUT("DMIC5"),
- SND_SOC_DAPM_INPUT("DMIC6"),
- SND_SOC_DAPM_INPUT("LINEINL"),
- SND_SOC_DAPM_INPUT("LINEINR"),
-
- SND_SOC_DAPM_MICBIAS("AMIC1Bias", SN95031_MICBIAS, 2, 0),
- SND_SOC_DAPM_MICBIAS("AMIC2Bias", SN95031_MICBIAS, 3, 0),
- SND_SOC_DAPM_MICBIAS("DMIC12Bias", SN95031_DMICMUX, 3, 0),
- SND_SOC_DAPM_MICBIAS("DMIC34Bias", SN95031_DMICMUX, 4, 0),
- SND_SOC_DAPM_MICBIAS("DMIC56Bias", SN95031_DMICMUX, 5, 0),
-
- SND_SOC_DAPM_SUPPLY("DMIC12supply", SN95031_DMICLK, 0, 0,
- sn95031_dmic12_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("DMIC34supply", SN95031_DMICLK, 1, 0,
- sn95031_dmic34_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("DMIC56supply", SN95031_DMICLK, 2, 0,
- sn95031_dmic56_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT("PCM_Out", "Capture", 0,
- SND_SOC_NOPM, 0, 0),
-
- SND_SOC_DAPM_SUPPLY("Headset Rail", SND_SOC_NOPM, 0, 0,
- sn95031_vhs_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("Speaker Rail", SND_SOC_NOPM, 0, 0,
- sn95031_vihf_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- /* playback path driver enables */
- SND_SOC_DAPM_PGA("Headset Left Playback",
- SN95031_DRIVEREN, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Headset Right Playback",
- SN95031_DRIVEREN, 1, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Left Playback",
- SN95031_DRIVEREN, 2, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Right Playback",
- SN95031_DRIVEREN, 3, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Vibra1 Playback",
- SN95031_DRIVEREN, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Vibra2 Playback",
- SN95031_DRIVEREN, 5, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Earpiece Playback",
- SN95031_DRIVEREN, 6, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Lineout Left Playback",
- SN95031_LOCTL, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Lineout Right Playback",
- SN95031_LOCTL, 4, 0, NULL, 0),
-
- /* playback path filter enable */
- SND_SOC_DAPM_PGA("Headset Left Filter",
- SN95031_HSEPRXCTRL, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Headset Right Filter",
- SN95031_HSEPRXCTRL, 5, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Left Filter",
- SN95031_IHFRXCTRL, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Right Filter",
- SN95031_IHFRXCTRL, 1, 0, NULL, 0),
-
- /* DACs */
- SND_SOC_DAPM_DAC("HSDAC Left", "Headset",
- SN95031_DACCONFIG, 0, 0),
- SND_SOC_DAPM_DAC("HSDAC Right", "Headset",
- SN95031_DACCONFIG, 1, 0),
- SND_SOC_DAPM_DAC("IHFDAC Left", "Speaker",
- SN95031_DACCONFIG, 2, 0),
- SND_SOC_DAPM_DAC("IHFDAC Right", "Speaker",
- SN95031_DACCONFIG, 3, 0),
- SND_SOC_DAPM_DAC("Vibra1 DAC", "Vibra1",
- SN95031_VIB1C5, 1, 0),
- SND_SOC_DAPM_DAC("Vibra2 DAC", "Vibra2",
- SN95031_VIB2C5, 1, 0),
-
- /* capture widgets */
- SND_SOC_DAPM_PGA("LineIn Enable Left", SN95031_MICAMP1,
- 7, 0, NULL, 0),
- SND_SOC_DAPM_PGA("LineIn Enable Right", SN95031_MICAMP2,
- 7, 0, NULL, 0),
-
- SND_SOC_DAPM_PGA("MIC1 Enable", SN95031_MICAMP1, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("MIC2 Enable", SN95031_MICAMP2, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX1 Enable", SN95031_AUDIOTXEN, 2, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX2 Enable", SN95031_AUDIOTXEN, 3, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX3 Enable", SN95031_AUDIOTXEN, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX4 Enable", SN95031_AUDIOTXEN, 5, 0, NULL, 0),
-
- /* ADC have null stream as they will be turned ON by TX path */
- SND_SOC_DAPM_ADC("ADC Left", NULL,
- SN95031_ADCCONFIG, 0, 0),
- SND_SOC_DAPM_ADC("ADC Right", NULL,
- SN95031_ADCCONFIG, 2, 0),
-
- SND_SOC_DAPM_MUX("Mic_InputL Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_micl_mux_control),
- SND_SOC_DAPM_MUX("Mic_InputR Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_micr_mux_control),
-
- SND_SOC_DAPM_MUX("Txpath1 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input1_mux_control),
- SND_SOC_DAPM_MUX("Txpath2 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input2_mux_control),
- SND_SOC_DAPM_MUX("Txpath3 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input3_mux_control),
- SND_SOC_DAPM_MUX("Txpath4 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input4_mux_control),
-
-};
-
-static const struct snd_soc_dapm_route sn95031_audio_map[] = {
- /* headset and earpiece map */
- { "HPOUTL", NULL, "Headset Rail"},
- { "HPOUTR", NULL, "Headset Rail"},
- { "HPOUTL", NULL, "Headset Left Playback" },
- { "HPOUTR", NULL, "Headset Right Playback" },
- { "EPOUT", NULL, "Earpiece Playback" },
- { "Headset Left Playback", NULL, "Headset Left Filter"},
- { "Headset Right Playback", NULL, "Headset Right Filter"},
- { "Earpiece Playback", NULL, "Headset Left Filter"},
- { "Headset Left Filter", NULL, "HSDAC Left"},
- { "Headset Right Filter", NULL, "HSDAC Right"},
-
- /* speaker map */
- { "IHFOUTL", NULL, "Speaker Rail"},
- { "IHFOUTR", NULL, "Speaker Rail"},
- { "IHFOUTL", NULL, "Speaker Left Playback"},
- { "IHFOUTR", NULL, "Speaker Right Playback"},
- { "Speaker Left Playback", NULL, "Speaker Left Filter"},
- { "Speaker Right Playback", NULL, "Speaker Right Filter"},
- { "Speaker Left Filter", NULL, "IHFDAC Left"},
- { "Speaker Right Filter", NULL, "IHFDAC Right"},
-
- /* vibra map */
- { "VIB1OUT", NULL, "Vibra1 Playback"},
- { "Vibra1 Playback", NULL, "Vibra1 DAC"},
-
- { "VIB2OUT", NULL, "Vibra2 Playback"},
- { "Vibra2 Playback", NULL, "Vibra2 DAC"},
-
- /* lineout */
- { "LINEOUTL", NULL, "Lineout Left Playback"},
- { "LINEOUTR", NULL, "Lineout Right Playback"},
- { "Lineout Left Playback", NULL, "Headset Left Filter"},
- { "Lineout Left Playback", NULL, "Speaker Left Filter"},
- { "Lineout Left Playback", NULL, "Vibra1 DAC"},
- { "Lineout Right Playback", NULL, "Headset Right Filter"},
- { "Lineout Right Playback", NULL, "Speaker Right Filter"},
- { "Lineout Right Playback", NULL, "Vibra2 DAC"},
-
- /* Headset (AMIC1) mic */
- { "AMIC1Bias", NULL, "AMIC1"},
- { "MIC1 Enable", NULL, "AMIC1Bias"},
- { "Mic_InputL Capture Route", "AMIC", "MIC1 Enable"},
-
- /* AMIC2 */
- { "AMIC2Bias", NULL, "AMIC2"},
- { "MIC2 Enable", NULL, "AMIC2Bias"},
- { "Mic_InputR Capture Route", "AMIC", "MIC2 Enable"},
-
-
- /* Linein */
- { "LineIn Enable Left", NULL, "LINEINL"},
- { "LineIn Enable Right", NULL, "LINEINR"},
- { "Mic_InputL Capture Route", "LineIn", "LineIn Enable Left"},
- { "Mic_InputR Capture Route", "LineIn", "LineIn Enable Right"},
-
- /* ADC connection */
- { "ADC Left", NULL, "Mic_InputL Capture Route"},
- { "ADC Right", NULL, "Mic_InputR Capture Route"},
-
- /*DMIC connections */
- { "DMIC1", NULL, "DMIC12supply"},
- { "DMIC2", NULL, "DMIC12supply"},
- { "DMIC3", NULL, "DMIC34supply"},
- { "DMIC4", NULL, "DMIC34supply"},
- { "DMIC5", NULL, "DMIC56supply"},
- { "DMIC6", NULL, "DMIC56supply"},
-
- { "DMIC12Bias", NULL, "DMIC1"},
- { "DMIC12Bias", NULL, "DMIC2"},
- { "DMIC34Bias", NULL, "DMIC3"},
- { "DMIC34Bias", NULL, "DMIC4"},
- { "DMIC56Bias", NULL, "DMIC5"},
- { "DMIC56Bias", NULL, "DMIC6"},
-
- /*TX path inputs*/
- { "Txpath1 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath2 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath3 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath4 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath1 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath2 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath3 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath4 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath1 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath2 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath3 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath4 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath1 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath2 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath3 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath4 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath1 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath2 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath3 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath4 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath1 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath2 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath3 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath4 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath1 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath2 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath3 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath4 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath1 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath2 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath3 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath4 Capture Route", "DMIC6", "DMIC6"},
-
- /* tx path */
- { "TX1 Enable", NULL, "Txpath1 Capture Route"},
- { "TX2 Enable", NULL, "Txpath2 Capture Route"},
- { "TX3 Enable", NULL, "Txpath3 Capture Route"},
- { "TX4 Enable", NULL, "Txpath4 Capture Route"},
- { "PCM_Out", NULL, "TX1 Enable"},
- { "PCM_Out", NULL, "TX2 Enable"},
- { "PCM_Out", NULL, "TX3 Enable"},
- { "PCM_Out", NULL, "TX4 Enable"},
-
-};
-
-/* speaker and headset mutes, for audio pops and clicks */
-static int sn95031_pcm_hs_mute(struct snd_soc_dai *dai, int mute)
-{
- snd_soc_update_bits(dai->codec,
- SN95031_HSLVOLCTRL, BIT(7), (!mute << 7));
- snd_soc_update_bits(dai->codec,
- SN95031_HSRVOLCTRL, BIT(7), (!mute << 7));
- return 0;
-}
-
-static int sn95031_pcm_spkr_mute(struct snd_soc_dai *dai, int mute)
-{
- snd_soc_update_bits(dai->codec,
- SN95031_IHFLVOLCTRL, BIT(7), (!mute << 7));
- snd_soc_update_bits(dai->codec,
- SN95031_IHFRVOLCTRL, BIT(7), (!mute << 7));
- return 0;
-}
-
-static int sn95031_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
-{
- unsigned int format, rate;
-
- switch (params_width(params)) {
- case 16:
- format = BIT(4)|BIT(5);
- break;
-
- case 24:
- format = 0;
- break;
- default:
- return -EINVAL;
- }
- snd_soc_update_bits(dai->codec, SN95031_PCM2C2,
- BIT(4)|BIT(5), format);
-
- switch (params_rate(params)) {
- case 48000:
- pr_debug("RATE_48000\n");
- rate = 0;
- break;
-
- case 44100:
- pr_debug("RATE_44100\n");
- rate = BIT(7);
- break;
-
- default:
- pr_err("ERR rate %d\n", params_rate(params));
- return -EINVAL;
- }
- snd_soc_update_bits(dai->codec, SN95031_PCM1C1, BIT(7), rate);
-
- return 0;
-}
-
-/* Codec DAI section */
-static const struct snd_soc_dai_ops sn95031_headset_dai_ops = {
- .digital_mute = sn95031_pcm_hs_mute,
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_speaker_dai_ops = {
- .digital_mute = sn95031_pcm_spkr_mute,
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib1_dai_ops = {
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib2_dai_ops = {
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static struct snd_soc_dai_driver sn95031_dais[] = {
-{
- .name = "SN95031 Headset",
- .playback = {
- .stream_name = "Headset",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 5,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_headset_dai_ops,
-},
-{ .name = "SN95031 Speaker",
- .playback = {
- .stream_name = "Speaker",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_speaker_dai_ops,
-},
-{ .name = "SN95031 Vibra1",
- .playback = {
- .stream_name = "Vibra1",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_vib1_dai_ops,
-},
-{ .name = "SN95031 Vibra2",
- .playback = {
- .stream_name = "Vibra2",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_vib2_dai_ops,
-},
-};
-
-static inline void sn95031_disable_jack_btn(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_BTNCTRL2, 0x00);
-}
-
-static inline void sn95031_enable_jack_btn(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_BTNCTRL1, 0x77);
- snd_soc_write(codec, SN95031_BTNCTRL2, 0x01);
-}
-
-static int sn95031_get_headset_state(struct snd_soc_codec *codec,
- struct snd_soc_jack *mfld_jack)
-{
- int micbias = sn95031_get_mic_bias(codec);
-
- int jack_type = snd_soc_jack_get_type(mfld_jack, micbias);
-
- pr_debug("jack type detected = %d\n", jack_type);
- if (jack_type == SND_JACK_HEADSET)
- sn95031_enable_jack_btn(codec);
- return jack_type;
-}
-
-void sn95031_jack_detection(struct snd_soc_codec *codec,
- struct mfld_jack_data *jack_data)
-{
- unsigned int status;
- unsigned int mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_HEADSET;
-
- pr_debug("interrupt id read in sram = 0x%x\n", jack_data->intr_id);
- if (jack_data->intr_id & 0x1) {
- pr_debug("short_push detected\n");
- status = SND_JACK_HEADSET | SND_JACK_BTN_0;
- } else if (jack_data->intr_id & 0x2) {
- pr_debug("long_push detected\n");
- status = SND_JACK_HEADSET | SND_JACK_BTN_1;
- } else if (jack_data->intr_id & 0x4) {
- pr_debug("headset or headphones inserted\n");
- status = sn95031_get_headset_state(codec, jack_data->mfld_jack);
- } else if (jack_data->intr_id & 0x8) {
- pr_debug("headset or headphones removed\n");
- status = 0;
- sn95031_disable_jack_btn(codec);
- } else {
- pr_err("unidentified interrupt\n");
- return;
- }
-
- snd_soc_jack_report(jack_data->mfld_jack, status, mask);
- /*button pressed and released so we send explicit button release */
- if ((status & SND_JACK_BTN_0) | (status & SND_JACK_BTN_1))
- snd_soc_jack_report(jack_data->mfld_jack,
- SND_JACK_HEADSET, mask);
-}
-EXPORT_SYMBOL_GPL(sn95031_jack_detection);
-
-/* codec registration */
-static int sn95031_codec_probe(struct snd_soc_codec *codec)
-{
- pr_debug("codec_probe called\n");
-
- /* PCM interface config
- * This sets the pcm rx slot conguration to max 6 slots
- * for max 4 dais (2 stereo and 2 mono)
- */
- snd_soc_write(codec, SN95031_PCM2RXSLOT01, 0x10);
- snd_soc_write(codec, SN95031_PCM2RXSLOT23, 0x32);
- snd_soc_write(codec, SN95031_PCM2RXSLOT45, 0x54);
- snd_soc_write(codec, SN95031_PCM2TXSLOT01, 0x10);
- snd_soc_write(codec, SN95031_PCM2TXSLOT23, 0x32);
- /* pcm port setting
- * This sets the pcm port to slave and clock at 19.2Mhz which
- * can support 6slots, sampling rate set per stream in hw-params
- */
- snd_soc_write(codec, SN95031_PCM1C1, 0x00);
- snd_soc_write(codec, SN95031_PCM2C1, 0x01);
- snd_soc_write(codec, SN95031_PCM2C2, 0x0A);
- snd_soc_write(codec, SN95031_HSMIXER, BIT(0)|BIT(4));
- /* vendor vibra workround, the vibras are muted by
- * custom register so unmute them
- */
- snd_soc_write(codec, SN95031_SSR5, 0x80);
- snd_soc_write(codec, SN95031_SSR6, 0x80);
- snd_soc_write(codec, SN95031_VIB1C5, 0x00);
- snd_soc_write(codec, SN95031_VIB2C5, 0x00);
- /* configure vibras for pcm port */
- snd_soc_write(codec, SN95031_VIB1C3, 0x00);
- snd_soc_write(codec, SN95031_VIB2C3, 0x00);
-
- /* soft mute ramp time */
- snd_soc_write(codec, SN95031_SOFTMUTE, 0x3);
- /* fix the initial volume at 1dB,
- * default in +9dB,
- * 1dB give optimal swing on DAC, amps
- */
- snd_soc_write(codec, SN95031_HSLVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_HSRVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_IHFLVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_IHFRVOLCTRL, 0x08);
- /* dac mode and lineout workaround */
- snd_soc_write(codec, SN95031_SSR2, 0x10);
- snd_soc_write(codec, SN95031_SSR3, 0x40);
-
- return 0;
-}
-
-static const struct snd_soc_codec_driver sn95031_codec = {
- .probe = sn95031_codec_probe,
- .set_bias_level = sn95031_set_vaud_bias,
- .idle_bias_off = true,
-
- .component_driver = {
- .controls = sn95031_snd_controls,
- .num_controls = ARRAY_SIZE(sn95031_snd_controls),
- .dapm_widgets = sn95031_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sn95031_dapm_widgets),
- .dapm_routes = sn95031_audio_map,
- .num_dapm_routes = ARRAY_SIZE(sn95031_audio_map),
- },
-};
-
-static int sn95031_device_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
-
- pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
-
- regmap = devm_regmap_init(&pdev->dev, NULL, NULL, &sn95031_regmap);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
- sn95031_dais, ARRAY_SIZE(sn95031_dais));
-}
-
-static int sn95031_device_remove(struct platform_device *pdev)
-{
- pr_debug("codec device remove called\n");
- snd_soc_unregister_codec(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver sn95031_codec_driver = {
- .driver = {
- .name = "sn95031",
- },
- .probe = sn95031_device_probe,
- .remove = sn95031_device_remove,
-};
-
-module_platform_driver(sn95031_codec_driver);
-
-MODULE_DESCRIPTION("ASoC TI SN95031 codec driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sn95031");
diff --git a/sound/soc/codecs/sn95031.h b/sound/soc/codecs/sn95031.h
deleted file mode 100644
index 7651fe4e6a45..000000000000
--- a/sound/soc/codecs/sn95031.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * sn95031.h - TI sn95031 Codec driver
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#ifndef _SN95031_H
-#define _SN95031_H
-
-/*register map*/
-#define SN95031_VAUD 0xDB
-#define SN95031_VHSP 0xDC
-#define SN95031_VHSN 0xDD
-#define SN95031_VIHF 0xC9
-
-#define SN95031_AUDPLLCTRL 0x240
-#define SN95031_DMICBUF0123 0x241
-#define SN95031_DMICBUF45 0x242
-#define SN95031_DMICGPO 0x244
-#define SN95031_DMICMUX 0x245
-#define SN95031_DMICLK 0x246
-#define SN95031_MICBIAS 0x247
-#define SN95031_ADCCONFIG 0x248
-#define SN95031_MICAMP1 0x249
-#define SN95031_MICAMP2 0x24A
-#define SN95031_NOISEMUX 0x24B
-#define SN95031_AUDIOMUX12 0x24C
-#define SN95031_AUDIOMUX34 0x24D
-#define SN95031_AUDIOSINC 0x24E
-#define SN95031_AUDIOTXEN 0x24F
-#define SN95031_HSEPRXCTRL 0x250
-#define SN95031_IHFRXCTRL 0x251
-#define SN95031_HSMIXER 0x256
-#define SN95031_DACCONFIG 0x257
-#define SN95031_SOFTMUTE 0x258
-#define SN95031_HSLVOLCTRL 0x259
-#define SN95031_HSRVOLCTRL 0x25A
-#define SN95031_IHFLVOLCTRL 0x25B
-#define SN95031_IHFRVOLCTRL 0x25C
-#define SN95031_DRIVEREN 0x25D
-#define SN95031_LOCTL 0x25E
-#define SN95031_VIB1C1 0x25F
-#define SN95031_VIB1C2 0x260
-#define SN95031_VIB1C3 0x261
-#define SN95031_VIB1SPIPCM1 0x262
-#define SN95031_VIB1SPIPCM2 0x263
-#define SN95031_VIB1C5 0x264
-#define SN95031_VIB2C1 0x265
-#define SN95031_VIB2C2 0x266
-#define SN95031_VIB2C3 0x267
-#define SN95031_VIB2SPIPCM1 0x268
-#define SN95031_VIB2SPIPCM2 0x269
-#define SN95031_VIB2C5 0x26A
-#define SN95031_BTNCTRL1 0x26B
-#define SN95031_BTNCTRL2 0x26C
-#define SN95031_PCM1TXSLOT01 0x26D
-#define SN95031_PCM1TXSLOT23 0x26E
-#define SN95031_PCM1TXSLOT45 0x26F
-#define SN95031_PCM1RXSLOT0_3 0x270
-#define SN95031_PCM1RXSLOT45 0x271
-#define SN95031_PCM2TXSLOT01 0x272
-#define SN95031_PCM2TXSLOT23 0x273
-#define SN95031_PCM2TXSLOT45 0x274
-#define SN95031_PCM2RXSLOT01 0x275
-#define SN95031_PCM2RXSLOT23 0x276
-#define SN95031_PCM2RXSLOT45 0x277
-#define SN95031_PCM1C1 0x278
-#define SN95031_PCM1C2 0x279
-#define SN95031_PCM1C3 0x27A
-#define SN95031_PCM2C1 0x27B
-#define SN95031_PCM2C2 0x27C
-/*end codec register defn*/
-
-/*vendor defn these are not part of avp*/
-#define SN95031_SSR2 0x381
-#define SN95031_SSR3 0x382
-#define SN95031_SSR5 0x384
-#define SN95031_SSR6 0x385
-
-/* ADC registers */
-
-#define SN95031_ADC1CNTL1 0x1C0
-#define SN95031_ADC_ENBL 0x10
-#define SN95031_ADC_START 0x08
-#define SN95031_ADC1CNTL3 0x1C2
-#define SN95031_ADCTHERM_ENBL 0x04
-#define SN95031_ADCRRDATA_ENBL 0x05
-#define SN95031_STOPBIT_MASK 16
-#define SN95031_ADCTHERM_MASK 4
-#define SN95031_ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define SN95031_ADC_LOOP_MAX (SN95031_ADC_CHANLS_MAX - 1)
-#define SN95031_ADC_NO_LOOP 0x07
-#define SN95031_AUDIO_GPIO_CTRL 0x070
-
-/* ADC channel code values */
-#define SN95031_AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define SN95031_ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define SN95031_ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
-/* multipier to convert to mV */
-#define SN95031_ADC_ONE_LSB_MULTIPLIER 2346
-
-
-struct mfld_jack_data {
- int intr_id;
- int micbias_vol;
- struct snd_soc_jack *mfld_jack;
-};
-
-extern void sn95031_jack_detection(struct snd_soc_codec *codec,
- struct mfld_jack_data *jack_data);
-
-#endif
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index 7acd05140a81..c8fd6367f6c0 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -34,10 +34,11 @@ static const struct snd_soc_dapm_route dir_routes[] = {
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
-static const struct snd_soc_codec_driver soc_codec_spdif_dir = {
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
.component_driver = {
.dapm_widgets = dir_widgets,
.num_dapm_widgets = ARRAY_SIZE(dir_widgets),
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index 063a64ff82d3..037aa1d45559 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -27,7 +27,8 @@
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dapm_widget dit_widgets[] = {
SND_SOC_DAPM_OUTPUT("spdif-out"),
@@ -37,7 +38,7 @@ static const struct snd_soc_dapm_route dit_routes[] = {
{ "spdif-out", NULL, "Playback" },
};
-static const struct snd_soc_codec_driver soc_codec_spdif_dit = {
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
.component_driver = {
.dapm_widgets = dit_widgets,
.num_dapm_widgets = ARRAY_SIZE(dit_widgets),
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index a736a2a6976c..f3006f301fe8 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -36,6 +36,11 @@
/* Define how often to check (and clear) the fault status register (in ms) */
#define TAS5720_FAULT_CHECK_INTERVAL 200
+enum tas572x_type {
+ TAS5720,
+ TAS5722,
+};
+
static const char * const tas5720_supply_names[] = {
"dvdd", /* Digital power supply. Connect to 3.3-V supply. */
"pvdd", /* Class-D amp and analog power supply (connected). */
@@ -47,6 +52,7 @@ struct tas5720_data {
struct snd_soc_codec *codec;
struct regmap *regmap;
struct i2c_client *tas5720_client;
+ enum tas572x_type devtype;
struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
struct delayed_work fault_check_work;
unsigned int last_fault;
@@ -264,7 +270,7 @@ out:
static int tas5720_codec_probe(struct snd_soc_codec *codec)
{
struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
- unsigned int device_id;
+ unsigned int device_id, expected_device_id;
int ret;
tas5720->codec = codec;
@@ -276,6 +282,11 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
return ret;
}
+ /*
+ * Take a liberal approach to checking the device ID to allow the
+ * driver to be used even if the device ID does not match, however
+ * issue a warning if there is a mismatch.
+ */
ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
if (ret < 0) {
dev_err(codec->dev, "failed to read device ID register: %d\n",
@@ -283,13 +294,22 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
goto probe_fail;
}
- if (device_id != TAS5720_DEVICE_ID) {
- dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
- TAS5720_DEVICE_ID, device_id);
- ret = -ENODEV;
- goto probe_fail;
+ switch (tas5720->devtype) {
+ case TAS5720:
+ expected_device_id = TAS5720_DEVICE_ID;
+ break;
+ case TAS5722:
+ expected_device_id = TAS5722_DEVICE_ID;
+ break;
+ default:
+ dev_err(codec->dev, "unexpected private driver data\n");
+ return -EINVAL;
}
+ if (device_id != expected_device_id)
+ dev_warn(codec->dev, "wrong device ID. expected: %u read: %u\n",
+ expected_device_id, device_id);
+
/* Set device to mute */
ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
TAS5720_MUTE, TAS5720_MUTE);
@@ -446,6 +466,15 @@ static const struct regmap_config tas5720_regmap_config = {
.volatile_reg = tas5720_is_volatile_reg,
};
+static const struct regmap_config tas5722_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = TAS5722_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = tas5720_is_volatile_reg,
+};
+
/*
* DAC analog gain. There are four discrete values to select from, ranging
* from 19.2 dB to 26.3dB.
@@ -544,6 +573,7 @@ static int tas5720_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct tas5720_data *data;
+ const struct regmap_config *regmap_config;
int ret;
int i;
@@ -552,7 +582,20 @@ static int tas5720_probe(struct i2c_client *client,
return -ENOMEM;
data->tas5720_client = client;
- data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+ data->devtype = id->driver_data;
+
+ switch (id->driver_data) {
+ case TAS5720:
+ regmap_config = &tas5720_regmap_config;
+ break;
+ case TAS5722:
+ regmap_config = &tas5722_regmap_config;
+ break;
+ default:
+ dev_err(dev, "unexpected private driver data\n");
+ return -EINVAL;
+ }
+ data->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(data->regmap)) {
ret = PTR_ERR(data->regmap);
dev_err(dev, "failed to allocate register map: %d\n", ret);
@@ -592,7 +635,8 @@ static int tas5720_remove(struct i2c_client *client)
}
static const struct i2c_device_id tas5720_id[] = {
- { "tas5720", 0 },
+ { "tas5720", TAS5720 },
+ { "tas5722", TAS5722 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tas5720_id);
@@ -600,6 +644,7 @@ MODULE_DEVICE_TABLE(i2c, tas5720_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id tas5720_of_match[] = {
{ .compatible = "ti,tas5720", },
+ { .compatible = "ti,tas5722", },
{ },
};
MODULE_DEVICE_TABLE(of, tas5720_of_match);
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
index 3d077c779b12..1dda3095961d 100644
--- a/sound/soc/codecs/tas5720.h
+++ b/sound/soc/codecs/tas5720.h
@@ -30,8 +30,14 @@
#define TAS5720_DIGITAL_CLIP1_REG 0x11
#define TAS5720_MAX_REG TAS5720_DIGITAL_CLIP1_REG
+/* Additional TAS5722-specific Registers */
+#define TAS5722_DIGITAL_CTRL2_REG 0x13
+#define TAS5722_ANALOG_CTRL2_REG 0x14
+#define TAS5722_MAX_REG TAS5722_ANALOG_CTRL2_REG
+
/* TAS5720_DEVICE_ID_REG */
#define TAS5720_DEVICE_ID 0x01
+#define TAS5722_DEVICE_ID 0x12
/* TAS5720_POWER_CTRL_REG */
#define TAS5720_DIG_CLIP_MASK GENMASK(7, 2)
@@ -51,6 +57,7 @@
#define TAS5720_SAIF_FORMAT_MASK GENMASK(2, 0)
/* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5722_VOL_RAMP_RATE BIT(6)
#define TAS5720_MUTE BIT(4)
#define TAS5720_TDM_SLOT_SEL_MASK GENMASK(2, 0)
@@ -87,4 +94,28 @@
#define TAS5720_CLIP1_MASK GENMASK(7, 2)
#define TAS5720_CLIP1_SHIFT (0x2)
+/* TAS5722_DIGITAL_CTRL2_REG */
+#define TAS5722_HPF_3_7HZ (0x0 << 5)
+#define TAS5722_HPF_7_4HZ (0x1 << 5)
+#define TAS5722_HPF_14_9HZ (0x2 << 5)
+#define TAS5722_HPF_29_7HZ (0x3 << 5)
+#define TAS5722_HPF_59_4HZ (0x4 << 5)
+#define TAS5722_HPF_118_4HZ (0x5 << 5)
+#define TAS5722_HPF_235_0HZ (0x6 << 5)
+#define TAS5722_HPF_463_2HZ (0x7 << 5)
+#define TAS5722_HPF_MASK GENMASK(7, 5)
+#define TAS5722_AUTO_SLEEP_OFF (0x0 << 3)
+#define TAS5722_AUTO_SLEEP_1024LR (0x1 << 3)
+#define TAS5722_AUTO_SLEEP_65536LR (0x2 << 3)
+#define TAS5722_AUTO_SLEEP_262144LR (0x3 << 3)
+#define TAS5722_AUTO_SLEEP_MASK GENMASK(4, 3)
+#define TAS5722_TDM_SLOT_16B BIT(2)
+#define TAS5722_MCLK_PIN_CFG BIT(1)
+#define TAS5722_VOL_CONTROL_LSB BIT(0)
+
+/* TAS5722_ANALOG_CTRL2_REG */
+#define TAS5722_FAULTZ_PU BIT(3)
+#define TAS5722_VREG_LVL BIT(2)
+#define TAS5722_PWR_TUNE BIT(0)
+
#endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
new file mode 100644
index 000000000000..49b87f6e85bf
--- /dev/null
+++ b/sound/soc/codecs/tas6424.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas6424.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS6424_FAULT_CHECK_INTERVAL 200
+
+static const char * const tas6424_supply_names[] = {
+ "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+ "vbat", /* Supply used for higher voltage analog circuits. */
+ "pvdd", /* Class-D amp output FETs supply. */
+};
+#define TAS6424_NUM_SUPPLIES ARRAY_SIZE(tas6424_supply_names)
+
+struct tas6424_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
+ struct delayed_work fault_check_work;
+ unsigned int last_fault1;
+ unsigned int last_fault2;
+ unsigned int last_warn;
+};
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas6424_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Driver CH1 Playback Volume",
+ TAS6424_CH1_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH2 Playback Volume",
+ TAS6424_CH2_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH3 Playback Volume",
+ TAS6424_CH3_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH4 Playback Volume",
+ TAS6424_CH4_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+};
+
+static int tas6424_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s() event=0x%0x\n", __func__, event);
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Observe codec shutdown-to-active time */
+ msleep(12);
+
+ /* Turn on TAS6424 periodic fault checking/handling */
+ tas6424->last_fault1 = 0;
+ tas6424->last_fault2 = 0;
+ tas6424->last_warn = 0;
+ schedule_delayed_work(&tas6424->fault_check_work,
+ msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+ } else if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Disable TAS6424 periodic fault checking/handling */
+ cancel_delayed_work_sync(&tas6424->fault_check_work);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tas6424_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas6424_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas6424_audio_map[] = {
+ { "DAC", NULL, "DAC IN" },
+ { "OUT", NULL, "DAC" },
+};
+
+static int tas6424_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int rate = params_rate(params);
+ unsigned int width = params_width(params);
+ u8 sap_ctrl = 0;
+
+ dev_dbg(codec->dev, "%s() rate=%u width=%u\n", __func__, rate, width);
+
+ switch (rate) {
+ case 44100:
+ sap_ctrl |= TAS6424_SAP_RATE_44100;
+ break;
+ case 48000:
+ sap_ctrl |= TAS6424_SAP_RATE_48000;
+ break;
+ case 96000:
+ sap_ctrl |= TAS6424_SAP_RATE_96000;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+ return -EINVAL;
+ }
+
+ switch (width) {
+ case 16:
+ sap_ctrl |= TAS6424_SAP_TDM_SLOT_SZ_16;
+ break;
+ case 24:
+ break;
+ default:
+ dev_err(codec->dev, "unsupported sample width: %u\n", width);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+ TAS6424_SAP_RATE_MASK |
+ TAS6424_SAP_TDM_SLOT_SZ_16,
+ sap_ctrl);
+
+ return 0;
+}
+
+static int tas6424_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 serial_format = 0;
+
+ dev_dbg(codec->dev, "%s() fmt=0x%0x\n", __func__, fmt);
+
+ /* clock masters */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* signal polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ serial_format |= TAS6424_SAP_I2S;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ serial_format |= TAS6424_SAP_DSP;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ /*
+ * We can use the fact that the TAS6424 does not care about the
+ * LRCLK duty cycle during TDM to receive DSP_B formatted data
+ * in LEFTJ mode (no delaying of the 1st data bit).
+ */
+ serial_format |= TAS6424_SAP_LEFTJ;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ serial_format |= TAS6424_SAP_LEFTJ;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+ TAS6424_SAP_FMT_MASK, serial_format);
+
+ return 0;
+}
+
+static int tas6424_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int first_slot, last_slot;
+ bool sap_tdm_slot_last;
+
+ dev_dbg(codec->dev, "%s() tx_mask=%d rx_mask=%d\n", __func__,
+ tx_mask, rx_mask);
+
+ if (!tx_mask || !rx_mask)
+ return 0; /* nothing needed to disable TDM mode */
+
+ /*
+ * Determine the first slot and last slot that is being requested so
+ * we'll be able to more easily enforce certain constraints as the
+ * TAS6424's TDM interface is not fully configurable.
+ */
+ first_slot = __ffs(tx_mask);
+ last_slot = __fls(rx_mask);
+
+ if (last_slot - first_slot != 4) {
+ dev_err(codec->dev, "tdm mask must cover 4 contiguous slots\n");
+ return -EINVAL;
+ }
+
+ switch (first_slot) {
+ case 0:
+ sap_tdm_slot_last = false;
+ break;
+ case 4:
+ sap_tdm_slot_last = true;
+ break;
+ default:
+ dev_err(codec->dev, "tdm mask must start at slot 0 or 4\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL, TAS6424_SAP_TDM_SLOT_LAST,
+ sap_tdm_slot_last ? TAS6424_SAP_TDM_SLOT_LAST : 0);
+
+ return 0;
+}
+
+static int tas6424_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val;
+
+ dev_dbg(codec->dev, "%s() mute=%d\n", __func__, mute);
+
+ if (mute)
+ val = TAS6424_ALL_STATE_MUTE;
+ else
+ val = TAS6424_ALL_STATE_PLAY;
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, val);
+
+ return 0;
+}
+
+static int tas6424_power_off(struct snd_soc_codec *codec)
+{
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_HIZ);
+
+ regcache_cache_only(tas6424->regmap, true);
+ regcache_mark_dirty(tas6424->regmap);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas6424_power_on(struct snd_soc_codec *codec)
+{
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(tas6424->regmap, false);
+
+ ret = regcache_sync(tas6424->regmap);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+ return ret;
+ }
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_MUTE);
+
+ /* any time we come out of HIZ, the output channels automatically run DC
+ * load diagnostics, wait here until this completes
+ */
+ msleep(230);
+
+ return 0;
+}
+
+static int tas6424_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ dev_dbg(codec->dev, "%s() level=%d\n", __func__, level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+ tas6424_power_on(codec);
+ break;
+ case SND_SOC_BIAS_OFF:
+ tas6424_power_off(codec);
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tas6424 = {
+ .set_bias_level = tas6424_set_bias_level,
+ .idle_bias_off = true,
+
+ .component_driver = {
+ .controls = tas6424_snd_controls,
+ .num_controls = ARRAY_SIZE(tas6424_snd_controls),
+ .dapm_widgets = tas6424_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas6424_dapm_widgets),
+ .dapm_routes = tas6424_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas6424_audio_map),
+ },
+};
+
+static struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
+ .hw_params = tas6424_hw_params,
+ .set_fmt = tas6424_set_dai_fmt,
+ .set_tdm_slot = tas6424_set_dai_tdm_slot,
+ .digital_mute = tas6424_mute,
+};
+
+static struct snd_soc_dai_driver tas6424_dai[] = {
+ {
+ .name = "tas6424-amplifier",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = TAS6424_RATES,
+ .formats = TAS6424_FORMATS,
+ },
+ .ops = &tas6424_speaker_dai_ops,
+ },
+};
+
+static void tas6424_fault_check_work(struct work_struct *work)
+{
+ struct tas6424_data *tas6424 = container_of(work, struct tas6424_data,
+ fault_check_work.work);
+ struct device *dev = tas6424->dev;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * Ignore any clock faults as there is no clean way to check for them.
+ * We would need to start checking for those faults *after* the SAIF
+ * stream has been setup, and stop checking *before* the stream is
+ * stopped to avoid any false-positives. However there are no
+ * appropriate hooks to monitor these events.
+ */
+ reg &= TAS6424_FAULT_PVDD_OV |
+ TAS6424_FAULT_VBAT_OV |
+ TAS6424_FAULT_PVDD_UV |
+ TAS6424_FAULT_VBAT_UV;
+
+ if (reg)
+ goto check_global_fault2_reg;
+
+ /*
+ * Only flag errors once for a given occurrence. This is needed as
+ * the TAS6424 will take time clearing the fault condition internally
+ * during which we don't want to bombard the system with the same
+ * error message over and over.
+ */
+ if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
+ dev_crit(dev, "experienced a PVDD overvoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_VBAT_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_OV))
+ dev_crit(dev, "experienced a VBAT overvoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_PVDD_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_UV))
+ dev_crit(dev, "experienced a PVDD undervoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_VBAT_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_UV))
+ dev_crit(dev, "experienced a VBAT undervoltage fault\n");
+
+ /* Store current fault1 value so we can detect any changes next time */
+ tas6424->last_fault1 = reg;
+
+check_global_fault2_reg:
+ ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+ goto out;
+ }
+
+ reg &= TAS6424_FAULT_OTSD |
+ TAS6424_FAULT_OTSD_CH1 |
+ TAS6424_FAULT_OTSD_CH2 |
+ TAS6424_FAULT_OTSD_CH3 |
+ TAS6424_FAULT_OTSD_CH4;
+
+ if (!reg)
+ goto check_warn_reg;
+
+ if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
+ dev_crit(dev, "experienced a global overtemp shutdown\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH1) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH1))
+ dev_crit(dev, "experienced an overtemp shutdown on CH1\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH2) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH2))
+ dev_crit(dev, "experienced an overtemp shutdown on CH2\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH3) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH3))
+ dev_crit(dev, "experienced an overtemp shutdown on CH3\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH4) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH4))
+ dev_crit(dev, "experienced an overtemp shutdown on CH4\n");
+
+ /* Store current fault2 value so we can detect any changes next time */
+ tas6424->last_fault2 = reg;
+
+check_warn_reg:
+ ret = regmap_read(tas6424->regmap, TAS6424_WARN, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read WARN register: %d\n", ret);
+ goto out;
+ }
+
+ reg &= TAS6424_WARN_VDD_UV |
+ TAS6424_WARN_VDD_POR |
+ TAS6424_WARN_VDD_OTW |
+ TAS6424_WARN_VDD_OTW_CH1 |
+ TAS6424_WARN_VDD_OTW_CH2 |
+ TAS6424_WARN_VDD_OTW_CH3 |
+ TAS6424_WARN_VDD_OTW_CH4;
+
+ if (!reg)
+ goto out;
+
+ if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
+ dev_warn(dev, "experienced a VDD under voltage condition\n");
+
+ if ((reg & TAS6424_WARN_VDD_POR) && !(tas6424->last_warn & TAS6424_WARN_VDD_POR))
+ dev_warn(dev, "experienced a VDD POR condition\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW))
+ dev_warn(dev, "experienced a global overtemp warning\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH1) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH1))
+ dev_warn(dev, "experienced an overtemp warning on CH1\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH2) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH2))
+ dev_warn(dev, "experienced an overtemp warning on CH2\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH3) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH3))
+ dev_warn(dev, "experienced an overtemp warning on CH3\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH4) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH4))
+ dev_warn(dev, "experienced an overtemp warning on CH4\n");
+
+ /* Store current warn value so we can detect any changes next time */
+ tas6424->last_warn = reg;
+
+ /* Clear any faults by toggling the CLEAR_FAULT control bit */
+ ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+ TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
+ if (ret < 0)
+ dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+ ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+ TAS6424_CLEAR_FAULT, 0);
+ if (ret < 0)
+ dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+out:
+ /* Schedule the next fault check at the specified interval */
+ schedule_delayed_work(&tas6424->fault_check_work,
+ msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+}
+
+static const struct reg_default tas6424_reg_defaults[] = {
+ { TAS6424_MODE_CTRL, 0x00 },
+ { TAS6424_MISC_CTRL1, 0x32 },
+ { TAS6424_MISC_CTRL2, 0x62 },
+ { TAS6424_SAP_CTRL, 0x04 },
+ { TAS6424_CH_STATE_CTRL, 0x55 },
+ { TAS6424_CH1_VOL_CTRL, 0xcf },
+ { TAS6424_CH2_VOL_CTRL, 0xcf },
+ { TAS6424_CH3_VOL_CTRL, 0xcf },
+ { TAS6424_CH4_VOL_CTRL, 0xcf },
+ { TAS6424_DC_DIAG_CTRL1, 0x00 },
+ { TAS6424_DC_DIAG_CTRL2, 0x11 },
+ { TAS6424_DC_DIAG_CTRL3, 0x11 },
+ { TAS6424_PIN_CTRL, 0xff },
+ { TAS6424_AC_DIAG_CTRL1, 0x00 },
+ { TAS6424_MISC_CTRL3, 0x00 },
+ { TAS6424_CLIP_CTRL, 0x01 },
+ { TAS6424_CLIP_WINDOW, 0x14 },
+ { TAS6424_CLIP_WARN, 0x00 },
+ { TAS6424_CBC_STAT, 0x00 },
+ { TAS6424_MISC_CTRL4, 0x40 },
+};
+
+static bool tas6424_is_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS6424_MODE_CTRL:
+ case TAS6424_MISC_CTRL1:
+ case TAS6424_MISC_CTRL2:
+ case TAS6424_SAP_CTRL:
+ case TAS6424_CH_STATE_CTRL:
+ case TAS6424_CH1_VOL_CTRL:
+ case TAS6424_CH2_VOL_CTRL:
+ case TAS6424_CH3_VOL_CTRL:
+ case TAS6424_CH4_VOL_CTRL:
+ case TAS6424_DC_DIAG_CTRL1:
+ case TAS6424_DC_DIAG_CTRL2:
+ case TAS6424_DC_DIAG_CTRL3:
+ case TAS6424_PIN_CTRL:
+ case TAS6424_AC_DIAG_CTRL1:
+ case TAS6424_MISC_CTRL3:
+ case TAS6424_CLIP_CTRL:
+ case TAS6424_CLIP_WINDOW:
+ case TAS6424_CLIP_WARN:
+ case TAS6424_CBC_STAT:
+ case TAS6424_MISC_CTRL4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tas6424_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS6424_DC_LOAD_DIAG_REP12:
+ case TAS6424_DC_LOAD_DIAG_REP34:
+ case TAS6424_DC_LOAD_DIAG_REPLO:
+ case TAS6424_CHANNEL_STATE:
+ case TAS6424_CHANNEL_FAULT:
+ case TAS6424_GLOB_FAULT1:
+ case TAS6424_GLOB_FAULT2:
+ case TAS6424_WARN:
+ case TAS6424_AC_LOAD_DIAG_REP1:
+ case TAS6424_AC_LOAD_DIAG_REP2:
+ case TAS6424_AC_LOAD_DIAG_REP3:
+ case TAS6424_AC_LOAD_DIAG_REP4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tas6424_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .writeable_reg = tas6424_is_writable_reg,
+ .volatile_reg = tas6424_is_volatile_reg,
+
+ .max_register = TAS6424_MAX,
+ .reg_defaults = tas6424_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas6424_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas6424_of_ids[] = {
+ { .compatible = "ti,tas6424", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tas6424_of_ids);
+#endif
+
+static int tas6424_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tas6424_data *tas6424;
+ int ret;
+ int i;
+
+ tas6424 = devm_kzalloc(dev, sizeof(*tas6424), GFP_KERNEL);
+ if (!tas6424)
+ return -ENOMEM;
+ dev_set_drvdata(dev, tas6424);
+
+ tas6424->dev = dev;
+
+ tas6424->regmap = devm_regmap_init_i2c(client, &tas6424_regmap_config);
+ if (IS_ERR(tas6424->regmap)) {
+ ret = PTR_ERR(tas6424->regmap);
+ dev_err(dev, "unable to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tas6424->supplies); i++)
+ tas6424->supplies[i].supply = tas6424_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret) {
+ dev_err(dev, "unable to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret) {
+ dev_err(dev, "unable to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset device to establish well-defined startup state */
+ ret = regmap_update_bits(tas6424->regmap, TAS6424_MODE_CTRL,
+ TAS6424_RESET, TAS6424_RESET);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&tas6424->fault_check_work, tas6424_fault_check_work);
+
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_tas6424,
+ tas6424_dai, ARRAY_SIZE(tas6424_dai));
+ if (ret < 0) {
+ dev_err(dev, "unable to register codec: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas6424_i2c_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tas6424_data *tas6424 = dev_get_drvdata(dev);
+ int ret;
+
+ snd_soc_unregister_codec(dev);
+
+ cancel_delayed_work_sync(&tas6424->fault_check_work);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(dev, "unable to disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id tas6424_i2c_ids[] = {
+ { "tas6424", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas6424_i2c_ids);
+
+static struct i2c_driver tas6424_i2c_driver = {
+ .driver = {
+ .name = "tas6424",
+ .of_match_table = of_match_ptr(tas6424_of_ids),
+ },
+ .probe = tas6424_i2c_probe,
+ .remove = tas6424_i2c_remove,
+ .id_table = tas6424_i2c_ids,
+};
+module_i2c_driver(tas6424_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TAS6424 Audio amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
new file mode 100644
index 000000000000..430588328a06
--- /dev/null
+++ b/sound/soc/codecs/tas6424.h
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef __TAS6424_H__
+#define __TAS6424_H__
+
+#define TAS6424_RATES (SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define TAS6424_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* Register Address Map */
+#define TAS6424_MODE_CTRL 0x00
+#define TAS6424_MISC_CTRL1 0x01
+#define TAS6424_MISC_CTRL2 0x02
+#define TAS6424_SAP_CTRL 0x03
+#define TAS6424_CH_STATE_CTRL 0x04
+#define TAS6424_CH1_VOL_CTRL 0x05
+#define TAS6424_CH2_VOL_CTRL 0x06
+#define TAS6424_CH3_VOL_CTRL 0x07
+#define TAS6424_CH4_VOL_CTRL 0x08
+#define TAS6424_DC_DIAG_CTRL1 0x09
+#define TAS6424_DC_DIAG_CTRL2 0x0a
+#define TAS6424_DC_DIAG_CTRL3 0x0b
+#define TAS6424_DC_LOAD_DIAG_REP12 0x0c
+#define TAS6424_DC_LOAD_DIAG_REP34 0x0d
+#define TAS6424_DC_LOAD_DIAG_REPLO 0x0e
+#define TAS6424_CHANNEL_STATE 0x0f
+#define TAS6424_CHANNEL_FAULT 0x10
+#define TAS6424_GLOB_FAULT1 0x11
+#define TAS6424_GLOB_FAULT2 0x12
+#define TAS6424_WARN 0x13
+#define TAS6424_PIN_CTRL 0x14
+#define TAS6424_AC_DIAG_CTRL1 0x15
+#define TAS6424_AC_DIAG_CTRL2 0x16
+#define TAS6424_AC_LOAD_DIAG_REP1 0x17
+#define TAS6424_AC_LOAD_DIAG_REP2 0x18
+#define TAS6424_AC_LOAD_DIAG_REP3 0x19
+#define TAS6424_AC_LOAD_DIAG_REP4 0x1a
+#define TAS6424_MISC_CTRL3 0x21
+#define TAS6424_CLIP_CTRL 0x22
+#define TAS6424_CLIP_WINDOW 0x23
+#define TAS6424_CLIP_WARN 0x24
+#define TAS6424_CBC_STAT 0x25
+#define TAS6424_MISC_CTRL4 0x26
+#define TAS6424_MAX TAS6424_MISC_CTRL4
+
+/* TAS6424_MODE_CTRL_REG */
+#define TAS6424_RESET BIT(7)
+
+/* TAS6424_SAP_CTRL_REG */
+#define TAS6424_SAP_RATE_MASK GENMASK(7, 6)
+#define TAS6424_SAP_RATE_44100 (0x00 << 6)
+#define TAS6424_SAP_RATE_48000 (0x01 << 6)
+#define TAS6424_SAP_RATE_96000 (0x02 << 6)
+#define TAS6424_SAP_TDM_SLOT_LAST BIT(5)
+#define TAS6424_SAP_TDM_SLOT_SZ_16 BIT(4)
+#define TAS6424_SAP_TDM_SLOT_SWAP BIT(3)
+#define TAS6424_SAP_FMT_MASK GENMASK(2, 0)
+#define TAS6424_SAP_RIGHTJ_24 (0x00 << 0)
+#define TAS6424_SAP_RIGHTJ_20 (0x01 << 0)
+#define TAS6424_SAP_RIGHTJ_18 (0x02 << 0)
+#define TAS6424_SAP_RIGHTJ_16 (0x03 << 0)
+#define TAS6424_SAP_I2S (0x04 << 0)
+#define TAS6424_SAP_LEFTJ (0x05 << 0)
+#define TAS6424_SAP_DSP (0x06 << 0)
+
+/* TAS6424_CH_STATE_CTRL_REG */
+#define TAS6424_CH1_STATE_MASK GENMASK(7, 6)
+#define TAS6424_CH1_STATE_PLAY (0x00 << 6)
+#define TAS6424_CH1_STATE_HIZ (0x01 << 6)
+#define TAS6424_CH1_STATE_MUTE (0x02 << 6)
+#define TAS6424_CH1_STATE_DIAG (0x03 << 6)
+#define TAS6424_CH2_STATE_MASK GENMASK(5, 4)
+#define TAS6424_CH2_STATE_PLAY (0x00 << 4)
+#define TAS6424_CH2_STATE_HIZ (0x01 << 4)
+#define TAS6424_CH2_STATE_MUTE (0x02 << 4)
+#define TAS6424_CH2_STATE_DIAG (0x03 << 4)
+#define TAS6424_CH3_STATE_MASK GENMASK(3, 2)
+#define TAS6424_CH3_STATE_PLAY (0x00 << 2)
+#define TAS6424_CH3_STATE_HIZ (0x01 << 2)
+#define TAS6424_CH3_STATE_MUTE (0x02 << 2)
+#define TAS6424_CH3_STATE_DIAG (0x03 << 2)
+#define TAS6424_CH4_STATE_MASK GENMASK(1, 0)
+#define TAS6424_CH4_STATE_PLAY (0x00 << 0)
+#define TAS6424_CH4_STATE_HIZ (0x01 << 0)
+#define TAS6424_CH4_STATE_MUTE (0x02 << 0)
+#define TAS6424_CH4_STATE_DIAG (0x03 << 0)
+#define TAS6424_ALL_STATE_PLAY (TAS6424_CH1_STATE_PLAY | \
+ TAS6424_CH2_STATE_PLAY | \
+ TAS6424_CH3_STATE_PLAY | \
+ TAS6424_CH4_STATE_PLAY)
+#define TAS6424_ALL_STATE_HIZ (TAS6424_CH1_STATE_HIZ | \
+ TAS6424_CH2_STATE_HIZ | \
+ TAS6424_CH3_STATE_HIZ | \
+ TAS6424_CH4_STATE_HIZ)
+#define TAS6424_ALL_STATE_MUTE (TAS6424_CH1_STATE_MUTE | \
+ TAS6424_CH2_STATE_MUTE | \
+ TAS6424_CH3_STATE_MUTE | \
+ TAS6424_CH4_STATE_MUTE)
+#define TAS6424_ALL_STATE_DIAG (TAS6424_CH1_STATE_DIAG | \
+ TAS6424_CH2_STATE_DIAG | \
+ TAS6424_CH3_STATE_DIAG | \
+ TAS6424_CH4_STATE_DIAG)
+
+/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_CLOCK BIT(4)
+#define TAS6424_FAULT_PVDD_OV BIT(3)
+#define TAS6424_FAULT_VBAT_OV BIT(2)
+#define TAS6424_FAULT_PVDD_UV BIT(1)
+#define TAS6424_FAULT_VBAT_UV BIT(0)
+
+/* TAS6424_GLOB_FAULT2_REG */
+#define TAS6424_FAULT_OTSD BIT(4)
+#define TAS6424_FAULT_OTSD_CH1 BIT(3)
+#define TAS6424_FAULT_OTSD_CH2 BIT(2)
+#define TAS6424_FAULT_OTSD_CH3 BIT(1)
+#define TAS6424_FAULT_OTSD_CH4 BIT(0)
+
+/* TAS6424_WARN_REG */
+#define TAS6424_WARN_VDD_UV BIT(6)
+#define TAS6424_WARN_VDD_POR BIT(5)
+#define TAS6424_WARN_VDD_OTW BIT(4)
+#define TAS6424_WARN_VDD_OTW_CH1 BIT(3)
+#define TAS6424_WARN_VDD_OTW_CH2 BIT(2)
+#define TAS6424_WARN_VDD_OTW_CH3 BIT(1)
+#define TAS6424_WARN_VDD_OTW_CH4 BIT(0)
+
+/* TAS6424_MISC_CTRL3_REG */
+#define TAS6424_CLEAR_FAULT BIT(7)
+#define TAS6424_PBTL_CH_SEL BIT(6)
+#define TAS6424_MASK_CBC_WARN BIT(5)
+#define TAS6424_MASK_VDD_UV BIT(4)
+#define TAS6424_OTSD_AUTO_RECOVERY BIT(3)
+
+#endif /* __TAS6424_H__ */
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index f8dd67ca0744..e7ca764b5729 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -316,6 +316,7 @@ static const struct of_device_id tfa9879_of_match[] = {
{ .compatible = "nxp,tfa9879", },
{ }
};
+MODULE_DEVICE_TABLE(of, tfa9879_of_match);
static struct i2c_driver tfa9879_i2c_driver = {
.driver = {
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index e2862372c26e..858cb8be445f 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * ALSA SoC TLV320AIC31XX codec driver
+ * ALSA SoC TLV320AIC31xx CODEC Driver
*
- * Copyright (C) 2014 Texas Instruments, Inc.
- *
- * Author: Jyri Sarha <jsarha@ti.com>
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Jyri Sarha <jsarha@ti.com>
*
* Based on ground work by: Ajit Kulkarni <x0175765@ti.com>
*
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- * The TLV320AIC31xx series of audio codec is a low-power, highly integrated
- * high performance codec which provides a stereo DAC, a mono ADC,
+ * The TLV320AIC31xx series of audio codecs are low-power, highly integrated
+ * high performance codecs which provides a stereo DAC, a mono ADC,
* and mono/stereo Class-D speaker driver.
*/
@@ -26,7 +18,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -144,8 +136,7 @@ static const struct regmap_config aic31xx_i2c_regmap = {
.max_register = 12 * 128,
};
-#define AIC31XX_NUM_SUPPLIES 6
-static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
+static const char * const aic31xx_supply_names[] = {
"HPVDD",
"SPRVDD",
"SPLVDD",
@@ -154,6 +145,8 @@ static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
"DVDD",
};
+#define AIC31XX_NUM_SUPPLIES ARRAY_SIZE(aic31xx_supply_names)
+
struct aic31xx_disable_nb {
struct notifier_block nb;
struct aic31xx_priv *aic31xx;
@@ -164,6 +157,9 @@ struct aic31xx_priv {
u8 i2c_regs_status;
struct device *dev;
struct regmap *regmap;
+ enum aic31xx_type codec_type;
+ struct gpio_desc *gpio_reset;
+ int micbias_vg;
struct aic31xx_pdata pdata;
struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
@@ -185,7 +181,7 @@ struct aic31xx_rate_divs {
u8 madc;
};
-/* ADC dividers can be disabled by cofiguring them to 0 */
+/* ADC dividers can be disabled by configuring them to 0 */
static const struct aic31xx_rate_divs aic31xx_divs[] = {
/* mclk/p rate pll: j d dosr ndac mdac aors nadc madc */
/* 8k rate */
@@ -456,7 +452,7 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
/* change mic bias voltage to user defined */
snd_soc_update_bits(codec, AIC31XX_MICBIAS,
AIC31XX_MICBIAS_MASK,
- aic31xx->pdata.micbias_vg <<
+ aic31xx->micbias_vg <<
AIC31XX_MICBIAS_SHIFT);
dev_dbg(codec->dev, "%s: turned on\n", __func__);
break;
@@ -679,14 +675,14 @@ static int aic31xx_add_controls(struct snd_soc_codec *codec)
int ret = 0;
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- if (!(aic31xx->pdata.codec_type & DAC31XX_BIT))
+ if (!(aic31xx->codec_type & DAC31XX_BIT))
ret = snd_soc_add_codec_controls(
codec, aic31xx_snd_controls,
ARRAY_SIZE(aic31xx_snd_controls));
if (ret)
return ret;
- if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT)
+ if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT)
ret = snd_soc_add_codec_controls(
codec, aic311x_snd_controls,
ARRAY_SIZE(aic311x_snd_controls));
@@ -704,7 +700,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- if (aic31xx->pdata.codec_type & DAC31XX_BIT) {
+ if (aic31xx->codec_type & DAC31XX_BIT) {
ret = snd_soc_dapm_new_controls(
dapm, dac31xx_dapm_widgets,
ARRAY_SIZE(dac31xx_dapm_widgets));
@@ -728,7 +724,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
return ret;
}
- if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
+ if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
ret = snd_soc_dapm_new_controls(
dapm, aic311x_dapm_widgets,
ARRAY_SIZE(aic311x_dapm_widgets));
@@ -760,11 +756,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int bclk_score = snd_soc_params_to_frame_size(params);
- int mclk_p = aic31xx->sysclk / aic31xx->p_div;
+ int mclk_p;
int bclk_n = 0;
int match = -1;
int i;
+ if (!aic31xx->sysclk || !aic31xx->p_div) {
+ dev_err(codec->dev, "Master clock not supplied\n");
+ return -EINVAL;
+ }
+ mclk_p = aic31xx->sysclk / aic31xx->p_div;
+
/* Use PLL as CODEC_CLKIN and DAC_CLK as BDIV_CLKIN */
snd_soc_update_bits(codec, AIC31XX_CLKMUX,
AIC31XX_CODEC_CLKIN_MASK, AIC31XX_CODEC_CLKIN_PLL);
@@ -840,11 +842,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
dev_dbg(codec->dev,
"pll %d.%04d/%d dosr %d n %d m %d aosr %d n %d m %d bclk_n %d\n",
- aic31xx_divs[i].pll_j, aic31xx_divs[i].pll_d,
- aic31xx->p_div, aic31xx_divs[i].dosr,
- aic31xx_divs[i].ndac, aic31xx_divs[i].mdac,
- aic31xx_divs[i].aosr, aic31xx_divs[i].nadc,
- aic31xx_divs[i].madc, bclk_n);
+ aic31xx_divs[i].pll_j,
+ aic31xx_divs[i].pll_d,
+ aic31xx->p_div,
+ aic31xx_divs[i].dosr,
+ aic31xx_divs[i].ndac,
+ aic31xx_divs[i].mdac,
+ aic31xx_divs[i].aosr,
+ aic31xx_divs[i].nadc,
+ aic31xx_divs[i].madc,
+ bclk_n
+ );
return 0;
}
@@ -919,8 +927,28 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
case SND_SOC_DAIFMT_CBM_CFM:
iface_reg1 |= AIC31XX_BCLK_MASTER | AIC31XX_WCLK_MASTER;
break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ iface_reg1 |= AIC31XX_WCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ iface_reg1 |= AIC31XX_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* signal polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface_reg2 |= AIC31XX_BCLKINV_MASK;
+ break;
default:
- dev_alert(codec->dev, "Invalid DAI master/slave interface\n");
+ dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
return -EINVAL;
}
@@ -931,16 +959,12 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
case SND_SOC_DAIFMT_DSP_A:
dsp_a_val = 0x1; /* fall through */
case SND_SOC_DAIFMT_DSP_B:
- /* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- iface_reg2 |= AIC31XX_BCLKINV_MASK;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- break;
- default:
- return -EINVAL;
- }
+ /*
+ * NOTE: This CODEC samples on the falling edge of BCLK in
+ * DSP mode, this is inverted compared to what most DAIs
+ * expect, so we invert for this mode
+ */
+ iface_reg2 ^= AIC31XX_BCLKINV_MASK;
iface_reg1 |= (AIC31XX_DSP_MODE <<
AIC31XX_IFACE1_DATATYPE_SHIFT);
break;
@@ -981,8 +1005,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
dev_dbg(codec->dev, "## %s: clk_id = %d, freq = %d, dir = %d\n",
__func__, clk_id, freq, dir);
- for (i = 1; freq/i > 20000000 && i < 8; i++)
- ;
+ for (i = 1; i < 8; i++)
+ if (freq / i <= 20000000)
+ break;
if (freq/i > 20000000) {
dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
__func__, freq);
@@ -990,9 +1015,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
}
aic31xx->p_div = i;
- for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
- aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
- ;
+ for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++)
+ if (aic31xx_divs[i].mclk_p == freq / aic31xx->p_div)
+ break;
if (i == ARRAY_SIZE(aic31xx_divs)) {
dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
__func__, freq);
@@ -1004,6 +1029,7 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
clk_id << AIC31XX_PLL_CLKIN_SHIFT);
aic31xx->sysclk = freq;
+
return 0;
}
@@ -1019,8 +1045,8 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
* Put codec to reset and as at least one of the
* supplies was disabled.
*/
- if (gpio_is_valid(aic31xx->pdata.gpio_reset))
- gpio_set_value(aic31xx->pdata.gpio_reset, 0);
+ if (aic31xx->gpio_reset)
+ gpiod_set_value(aic31xx->gpio_reset, 1);
regcache_mark_dirty(aic31xx->regmap);
dev_dbg(aic31xx->dev, "## %s: DISABLE received\n", __func__);
@@ -1029,6 +1055,22 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
return 0;
}
+static int aic31xx_reset(struct aic31xx_priv *aic31xx)
+{
+ int ret = 0;
+
+ if (aic31xx->gpio_reset) {
+ gpiod_set_value(aic31xx->gpio_reset, 1);
+ ndelay(10); /* At least 10ns */
+ gpiod_set_value(aic31xx->gpio_reset, 0);
+ } else {
+ ret = regmap_write(aic31xx->regmap, AIC31XX_RESET, 1);
+ }
+ mdelay(1); /* At least 1ms */
+
+ return ret;
+}
+
static void aic31xx_clk_on(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
@@ -1065,20 +1107,22 @@ static void aic31xx_clk_off(struct snd_soc_codec *codec)
static int aic31xx_power_on(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(aic31xx->supplies),
aic31xx->supplies);
if (ret)
return ret;
- if (gpio_is_valid(aic31xx->pdata.gpio_reset)) {
- gpio_set_value(aic31xx->pdata.gpio_reset, 1);
- udelay(100);
- }
regcache_cache_only(aic31xx->regmap, false);
+
+ /* Reset device registers for a consistent power-on like state */
+ ret = aic31xx_reset(aic31xx);
+ if (ret < 0)
+ dev_err(aic31xx->dev, "Could not reset device: %d\n", ret);
+
ret = regcache_sync(aic31xx->regmap);
- if (ret != 0) {
+ if (ret) {
dev_err(codec->dev,
"Failed to restore cache: %d\n", ret);
regcache_cache_only(aic31xx->regmap, true);
@@ -1086,19 +1130,17 @@ static int aic31xx_power_on(struct snd_soc_codec *codec)
aic31xx->supplies);
return ret;
}
+
return 0;
}
-static int aic31xx_power_off(struct snd_soc_codec *codec)
+static void aic31xx_power_off(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
regcache_cache_only(aic31xx->regmap, true);
- ret = regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
- aic31xx->supplies);
-
- return ret;
+ regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
}
static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
@@ -1137,14 +1179,11 @@ static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
static int aic31xx_codec_probe(struct snd_soc_codec *codec)
{
- int ret = 0;
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int i;
+ int i, ret;
dev_dbg(aic31xx->dev, "## %s\n", __func__);
- aic31xx = snd_soc_codec_get_drvdata(codec);
-
aic31xx->codec = codec;
for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++) {
@@ -1169,8 +1208,10 @@ static int aic31xx_codec_probe(struct snd_soc_codec *codec)
return ret;
ret = aic31xx_add_widgets(codec);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
static int aic31xx_codec_remove(struct snd_soc_codec *codec)
@@ -1258,89 +1299,31 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
-
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
- struct device_node *np = aic31xx->dev->of_node;
- unsigned int value = MICBIAS_2_0V;
- int ret;
-
- of_property_read_u32(np, "ai31xx-micbias-vg", &value);
- switch (value) {
- case MICBIAS_2_0V:
- case MICBIAS_2_5V:
- case MICBIAS_AVDDV:
- aic31xx->pdata.micbias_vg = value;
- break;
- default:
- dev_err(aic31xx->dev,
- "Bad ai31xx-micbias-vg value %d DT\n",
- value);
- aic31xx->pdata.micbias_vg = MICBIAS_2_0V;
- }
-
- ret = of_get_named_gpio(np, "gpio-reset", 0);
- if (ret > 0)
- aic31xx->pdata.gpio_reset = ret;
-}
-#else /* CONFIG_OF */
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-}
#endif /* CONFIG_OF */
-static int aic31xx_device_init(struct aic31xx_priv *aic31xx)
-{
- int ret, i;
-
- dev_set_drvdata(aic31xx->dev, aic31xx);
-
- if (dev_get_platdata(aic31xx->dev))
- memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev),
- sizeof(aic31xx->pdata));
- else if (aic31xx->dev->of_node)
- aic31xx_pdata_from_of(aic31xx);
-
- if (aic31xx->pdata.gpio_reset) {
- ret = devm_gpio_request_one(aic31xx->dev,
- aic31xx->pdata.gpio_reset,
- GPIOF_OUT_INIT_HIGH,
- "aic31xx-reset-pin");
- if (ret < 0) {
- dev_err(aic31xx->dev, "not able to acquire gpio\n");
- return ret;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
- aic31xx->supplies[i].supply = aic31xx_supply_names[i];
-
- ret = devm_regulator_bulk_get(aic31xx->dev,
- ARRAY_SIZE(aic31xx->supplies),
- aic31xx->supplies);
- if (ret != 0)
- dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
-
- return ret;
-}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+ { "10TI3100", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct aic31xx_priv *aic31xx;
- int ret;
- const struct regmap_config *regmap_config;
+ unsigned int micbias_value = MICBIAS_2_0V;
+ int i, ret;
dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
- id->name, (int) id->driver_data);
-
- regmap_config = &aic31xx_i2c_regmap;
+ id->name, (int)id->driver_data);
aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
- if (aic31xx == NULL)
+ if (!aic31xx)
return -ENOMEM;
- aic31xx->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+ aic31xx->regmap = devm_regmap_init_i2c(i2c, &aic31xx_i2c_regmap);
if (IS_ERR(aic31xx->regmap)) {
ret = PTR_ERR(aic31xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1349,13 +1332,49 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
}
aic31xx->dev = &i2c->dev;
- aic31xx->pdata.codec_type = id->driver_data;
+ aic31xx->codec_type = id->driver_data;
- ret = aic31xx_device_init(aic31xx);
- if (ret)
+ dev_set_drvdata(aic31xx->dev, aic31xx);
+
+ fwnode_property_read_u32(aic31xx->dev->fwnode, "ai31xx-micbias-vg",
+ &micbias_value);
+ switch (micbias_value) {
+ case MICBIAS_2_0V:
+ case MICBIAS_2_5V:
+ case MICBIAS_AVDDV:
+ aic31xx->micbias_vg = micbias_value;
+ break;
+ default:
+ dev_err(aic31xx->dev, "Bad ai31xx-micbias-vg value %d\n",
+ micbias_value);
+ aic31xx->micbias_vg = MICBIAS_2_0V;
+ }
+
+ if (dev_get_platdata(aic31xx->dev)) {
+ memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev), sizeof(aic31xx->pdata));
+ aic31xx->codec_type = aic31xx->pdata.codec_type;
+ aic31xx->micbias_vg = aic31xx->pdata.micbias_vg;
+ }
+
+ aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(aic31xx->gpio_reset)) {
+ dev_err(aic31xx->dev, "not able to acquire gpio\n");
+ return PTR_ERR(aic31xx->gpio_reset);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
+ aic31xx->supplies[i].supply = aic31xx_supply_names[i];
+
+ ret = devm_regulator_bulk_get(aic31xx->dev,
+ ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
+ if (ret) {
+ dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
return ret;
+ }
- if (aic31xx->pdata.codec_type & DAC31XX_BIT)
+ if (aic31xx->codec_type & DAC31XX_BIT)
return snd_soc_register_codec(&i2c->dev,
&soc_codec_driver_aic31xx,
dac31xx_dai_driver,
@@ -1386,14 +1405,6 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id aic31xx_acpi_match[] = {
- { "10TI3100", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
-#endif
-
static struct i2c_driver aic31xx_i2c_driver = {
.driver = {
.name = "tlv320aic31xx-codec",
@@ -1404,9 +1415,8 @@ static struct i2c_driver aic31xx_i2c_driver = {
.remove = aic31xx_i2c_remove,
.id_table = aic31xx_i2c_id,
};
-
module_i2c_driver(aic31xx_i2c_driver);
-MODULE_DESCRIPTION("ASoC TLV320AIC3111 codec driver");
-MODULE_AUTHOR("Jyri Sarha");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("ASoC TLV320AIC31xx CODEC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 1ff3edb7bbb6..15ac7cba86fe 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -1,36 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * ALSA SoC TLV320AIC31XX codec driver
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ * ALSA SoC TLV320AIC31xx CODEC Driver Definitions
*
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
*/
+
#ifndef _TLV320AIC31XX_H
#define _TLV320AIC31XX_H
#define AIC31XX_RATES SNDRV_PCM_RATE_8000_192000
-#define AIC31XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
- | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE \
- | SNDRV_PCM_FMTBIT_S32_LE)
-
+#define AIC31XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
-#define AIC31XX_STEREO_CLASS_D_BIT 0x1
-#define AIC31XX_MINIDSP_BIT 0x2
-#define DAC31XX_BIT 0x4
+#define AIC31XX_STEREO_CLASS_D_BIT BIT(1)
+#define AIC31XX_MINIDSP_BIT BIT(2)
+#define DAC31XX_BIT BIT(3)
enum aic31xx_type {
AIC3100 = 0,
AIC3110 = AIC31XX_STEREO_CLASS_D_BIT,
AIC3120 = AIC31XX_MINIDSP_BIT,
- AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
+ AIC3111 = AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT,
DAC3100 = DAC31XX_BIT,
DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
};
@@ -43,222 +37,167 @@ struct aic31xx_pdata {
#define AIC31XX_REG(page, reg) ((page * 128) + reg)
-/* Page Control Register */
-#define AIC31XX_PAGECTL AIC31XX_REG(0, 0)
+#define AIC31XX_PAGECTL AIC31XX_REG(0, 0) /* Page Control Register */
/* Page 0 Registers */
-/* Software reset register */
-#define AIC31XX_RESET AIC31XX_REG(0, 1)
-/* OT FLAG register */
-#define AIC31XX_OT_FLAG AIC31XX_REG(0, 3)
-/* Clock clock Gen muxing, Multiplexers*/
-#define AIC31XX_CLKMUX AIC31XX_REG(0, 4)
-/* PLL P and R-VAL register */
-#define AIC31XX_PLLPR AIC31XX_REG(0, 5)
-/* PLL J-VAL register */
-#define AIC31XX_PLLJ AIC31XX_REG(0, 6)
-/* PLL D-VAL MSB register */
-#define AIC31XX_PLLDMSB AIC31XX_REG(0, 7)
-/* PLL D-VAL LSB register */
-#define AIC31XX_PLLDLSB AIC31XX_REG(0, 8)
-/* DAC NDAC_VAL register*/
-#define AIC31XX_NDAC AIC31XX_REG(0, 11)
-/* DAC MDAC_VAL register */
-#define AIC31XX_MDAC AIC31XX_REG(0, 12)
-/* DAC OSR setting register 1, MSB value */
-#define AIC31XX_DOSRMSB AIC31XX_REG(0, 13)
-/* DAC OSR setting register 2, LSB value */
-#define AIC31XX_DOSRLSB AIC31XX_REG(0, 14)
+#define AIC31XX_RESET AIC31XX_REG(0, 1) /* Software reset register */
+#define AIC31XX_OT_FLAG AIC31XX_REG(0, 3) /* OT FLAG register */
+#define AIC31XX_CLKMUX AIC31XX_REG(0, 4) /* Clock clock Gen muxing, Multiplexers*/
+#define AIC31XX_PLLPR AIC31XX_REG(0, 5) /* PLL P and R-VAL register */
+#define AIC31XX_PLLJ AIC31XX_REG(0, 6) /* PLL J-VAL register */
+#define AIC31XX_PLLDMSB AIC31XX_REG(0, 7) /* PLL D-VAL MSB register */
+#define AIC31XX_PLLDLSB AIC31XX_REG(0, 8) /* PLL D-VAL LSB register */
+#define AIC31XX_NDAC AIC31XX_REG(0, 11) /* DAC NDAC_VAL register*/
+#define AIC31XX_MDAC AIC31XX_REG(0, 12) /* DAC MDAC_VAL register */
+#define AIC31XX_DOSRMSB AIC31XX_REG(0, 13) /* DAC OSR setting register 1, MSB value */
+#define AIC31XX_DOSRLSB AIC31XX_REG(0, 14) /* DAC OSR setting register 2, LSB value */
#define AIC31XX_MINI_DSP_INPOL AIC31XX_REG(0, 16)
-/* Clock setting register 8, PLL */
-#define AIC31XX_NADC AIC31XX_REG(0, 18)
-/* Clock setting register 9, PLL */
-#define AIC31XX_MADC AIC31XX_REG(0, 19)
-/* ADC Oversampling (AOSR) Register */
-#define AIC31XX_AOSR AIC31XX_REG(0, 20)
-/* Clock setting register 9, Multiplexers */
-#define AIC31XX_CLKOUTMUX AIC31XX_REG(0, 25)
-/* Clock setting register 10, CLOCKOUT M divider value */
-#define AIC31XX_CLKOUTMVAL AIC31XX_REG(0, 26)
-/* Audio Interface Setting Register 1 */
-#define AIC31XX_IFACE1 AIC31XX_REG(0, 27)
-/* Audio Data Slot Offset Programming */
-#define AIC31XX_DATA_OFFSET AIC31XX_REG(0, 28)
-/* Audio Interface Setting Register 2 */
-#define AIC31XX_IFACE2 AIC31XX_REG(0, 29)
-/* Clock setting register 11, BCLK N Divider */
-#define AIC31XX_BCLKN AIC31XX_REG(0, 30)
-/* Audio Interface Setting Register 3, Secondary Audio Interface */
-#define AIC31XX_IFACESEC1 AIC31XX_REG(0, 31)
-/* Audio Interface Setting Register 4 */
-#define AIC31XX_IFACESEC2 AIC31XX_REG(0, 32)
-/* Audio Interface Setting Register 5 */
-#define AIC31XX_IFACESEC3 AIC31XX_REG(0, 33)
-/* I2C Bus Condition */
-#define AIC31XX_I2C AIC31XX_REG(0, 34)
-/* ADC FLAG */
-#define AIC31XX_ADCFLAG AIC31XX_REG(0, 36)
-/* DAC Flag Registers */
-#define AIC31XX_DACFLAG1 AIC31XX_REG(0, 37)
+#define AIC31XX_NADC AIC31XX_REG(0, 18) /* Clock setting register 8, PLL */
+#define AIC31XX_MADC AIC31XX_REG(0, 19) /* Clock setting register 9, PLL */
+#define AIC31XX_AOSR AIC31XX_REG(0, 20) /* ADC Oversampling (AOSR) Register */
+#define AIC31XX_CLKOUTMUX AIC31XX_REG(0, 25) /* Clock setting register 9, Multiplexers */
+#define AIC31XX_CLKOUTMVAL AIC31XX_REG(0, 26) /* Clock setting register 10, CLOCKOUT M divider value */
+#define AIC31XX_IFACE1 AIC31XX_REG(0, 27) /* Audio Interface Setting Register 1 */
+#define AIC31XX_DATA_OFFSET AIC31XX_REG(0, 28) /* Audio Data Slot Offset Programming */
+#define AIC31XX_IFACE2 AIC31XX_REG(0, 29) /* Audio Interface Setting Register 2 */
+#define AIC31XX_BCLKN AIC31XX_REG(0, 30) /* Clock setting register 11, BCLK N Divider */
+#define AIC31XX_IFACESEC1 AIC31XX_REG(0, 31) /* Audio Interface Setting Register 3, Secondary Audio Interface */
+#define AIC31XX_IFACESEC2 AIC31XX_REG(0, 32) /* Audio Interface Setting Register 4 */
+#define AIC31XX_IFACESEC3 AIC31XX_REG(0, 33) /* Audio Interface Setting Register 5 */
+#define AIC31XX_I2C AIC31XX_REG(0, 34) /* I2C Bus Condition */
+#define AIC31XX_ADCFLAG AIC31XX_REG(0, 36) /* ADC FLAG */
+#define AIC31XX_DACFLAG1 AIC31XX_REG(0, 37) /* DAC Flag Registers */
#define AIC31XX_DACFLAG2 AIC31XX_REG(0, 38)
-/* Sticky Interrupt flag (overflow) */
-#define AIC31XX_OFFLAG AIC31XX_REG(0, 39)
-/* Sticy DAC Interrupt flags */
-#define AIC31XX_INTRDACFLAG AIC31XX_REG(0, 44)
-/* Sticy ADC Interrupt flags */
-#define AIC31XX_INTRADCFLAG AIC31XX_REG(0, 45)
-/* DAC Interrupt flags 2 */
-#define AIC31XX_INTRDACFLAG2 AIC31XX_REG(0, 46)
-/* ADC Interrupt flags 2 */
-#define AIC31XX_INTRADCFLAG2 AIC31XX_REG(0, 47)
-/* INT1 interrupt control */
-#define AIC31XX_INT1CTRL AIC31XX_REG(0, 48)
-/* INT2 interrupt control */
-#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
-/* GPIO1 control */
-#define AIC31XX_GPIO1 AIC31XX_REG(0, 51)
-
+#define AIC31XX_OFFLAG AIC31XX_REG(0, 39) /* Sticky Interrupt flag (overflow) */
+#define AIC31XX_INTRDACFLAG AIC31XX_REG(0, 44) /* Sticy DAC Interrupt flags */
+#define AIC31XX_INTRADCFLAG AIC31XX_REG(0, 45) /* Sticy ADC Interrupt flags */
+#define AIC31XX_INTRDACFLAG2 AIC31XX_REG(0, 46) /* DAC Interrupt flags 2 */
+#define AIC31XX_INTRADCFLAG2 AIC31XX_REG(0, 47) /* ADC Interrupt flags 2 */
+#define AIC31XX_INT1CTRL AIC31XX_REG(0, 48) /* INT1 interrupt control */
+#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49) /* INT2 interrupt control */
+#define AIC31XX_GPIO1 AIC31XX_REG(0, 51) /* GPIO1 control */
#define AIC31XX_DACPRB AIC31XX_REG(0, 60)
-/* ADC Instruction Set Register */
-#define AIC31XX_ADCPRB AIC31XX_REG(0, 61)
-/* DAC channel setup register */
-#define AIC31XX_DACSETUP AIC31XX_REG(0, 63)
-/* DAC Mute and volume control register */
-#define AIC31XX_DACMUTE AIC31XX_REG(0, 64)
-/* Left DAC channel digital volume control */
-#define AIC31XX_LDACVOL AIC31XX_REG(0, 65)
-/* Right DAC channel digital volume control */
-#define AIC31XX_RDACVOL AIC31XX_REG(0, 66)
-/* Headset detection */
-#define AIC31XX_HSDETECT AIC31XX_REG(0, 67)
-/* ADC Digital Mic */
-#define AIC31XX_ADCSETUP AIC31XX_REG(0, 81)
-/* ADC Digital Volume Control Fine Adjust */
-#define AIC31XX_ADCFGA AIC31XX_REG(0, 82)
-/* ADC Digital Volume Control Coarse Adjust */
-#define AIC31XX_ADCVOL AIC31XX_REG(0, 83)
-
+#define AIC31XX_ADCPRB AIC31XX_REG(0, 61) /* ADC Instruction Set Register */
+#define AIC31XX_DACSETUP AIC31XX_REG(0, 63) /* DAC channel setup register */
+#define AIC31XX_DACMUTE AIC31XX_REG(0, 64) /* DAC Mute and volume control register */
+#define AIC31XX_LDACVOL AIC31XX_REG(0, 65) /* Left DAC channel digital volume control */
+#define AIC31XX_RDACVOL AIC31XX_REG(0, 66) /* Right DAC channel digital volume control */
+#define AIC31XX_HSDETECT AIC31XX_REG(0, 67) /* Headset detection */
+#define AIC31XX_ADCSETUP AIC31XX_REG(0, 81) /* ADC Digital Mic */
+#define AIC31XX_ADCFGA AIC31XX_REG(0, 82) /* ADC Digital Volume Control Fine Adjust */
+#define AIC31XX_ADCVOL AIC31XX_REG(0, 83) /* ADC Digital Volume Control Coarse Adjust */
/* Page 1 Registers */
-/* Headphone drivers */
-#define AIC31XX_HPDRIVER AIC31XX_REG(1, 31)
-/* Class-D Speakear Amplifier */
-#define AIC31XX_SPKAMP AIC31XX_REG(1, 32)
-/* HP Output Drivers POP Removal Settings */
-#define AIC31XX_HPPOP AIC31XX_REG(1, 33)
-/* Output Driver PGA Ramp-Down Period Control */
-#define AIC31XX_SPPGARAMP AIC31XX_REG(1, 34)
-/* DAC_L and DAC_R Output Mixer Routing */
-#define AIC31XX_DACMIXERROUTE AIC31XX_REG(1, 35)
-/* Left Analog Vol to HPL */
-#define AIC31XX_LANALOGHPL AIC31XX_REG(1, 36)
-/* Right Analog Vol to HPR */
-#define AIC31XX_RANALOGHPR AIC31XX_REG(1, 37)
-/* Left Analog Vol to SPL */
-#define AIC31XX_LANALOGSPL AIC31XX_REG(1, 38)
-/* Right Analog Vol to SPR */
-#define AIC31XX_RANALOGSPR AIC31XX_REG(1, 39)
-/* HPL Driver */
-#define AIC31XX_HPLGAIN AIC31XX_REG(1, 40)
-/* HPR Driver */
-#define AIC31XX_HPRGAIN AIC31XX_REG(1, 41)
-/* SPL Driver */
-#define AIC31XX_SPLGAIN AIC31XX_REG(1, 42)
-/* SPR Driver */
-#define AIC31XX_SPRGAIN AIC31XX_REG(1, 43)
-/* HP Driver Control */
-#define AIC31XX_HPCONTROL AIC31XX_REG(1, 44)
-/* MIC Bias Control */
-#define AIC31XX_MICBIAS AIC31XX_REG(1, 46)
-/* MIC PGA*/
-#define AIC31XX_MICPGA AIC31XX_REG(1, 47)
-/* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
-#define AIC31XX_MICPGAPI AIC31XX_REG(1, 48)
-/* ADC Input Selection for M-Terminal */
-#define AIC31XX_MICPGAMI AIC31XX_REG(1, 49)
-/* Input CM Settings */
-#define AIC31XX_MICPGACM AIC31XX_REG(1, 50)
-
-/* Bits, masks and shifts */
+#define AIC31XX_HPDRIVER AIC31XX_REG(1, 31) /* Headphone drivers */
+#define AIC31XX_SPKAMP AIC31XX_REG(1, 32) /* Class-D Speakear Amplifier */
+#define AIC31XX_HPPOP AIC31XX_REG(1, 33) /* HP Output Drivers POP Removal Settings */
+#define AIC31XX_SPPGARAMP AIC31XX_REG(1, 34) /* Output Driver PGA Ramp-Down Period Control */
+#define AIC31XX_DACMIXERROUTE AIC31XX_REG(1, 35) /* DAC_L and DAC_R Output Mixer Routing */
+#define AIC31XX_LANALOGHPL AIC31XX_REG(1, 36) /* Left Analog Vol to HPL */
+#define AIC31XX_RANALOGHPR AIC31XX_REG(1, 37) /* Right Analog Vol to HPR */
+#define AIC31XX_LANALOGSPL AIC31XX_REG(1, 38) /* Left Analog Vol to SPL */
+#define AIC31XX_RANALOGSPR AIC31XX_REG(1, 39) /* Right Analog Vol to SPR */
+#define AIC31XX_HPLGAIN AIC31XX_REG(1, 40) /* HPL Driver */
+#define AIC31XX_HPRGAIN AIC31XX_REG(1, 41) /* HPR Driver */
+#define AIC31XX_SPLGAIN AIC31XX_REG(1, 42) /* SPL Driver */
+#define AIC31XX_SPRGAIN AIC31XX_REG(1, 43) /* SPR Driver */
+#define AIC31XX_HPCONTROL AIC31XX_REG(1, 44) /* HP Driver Control */
+#define AIC31XX_MICBIAS AIC31XX_REG(1, 46) /* MIC Bias Control */
+#define AIC31XX_MICPGA AIC31XX_REG(1, 47) /* MIC PGA*/
+#define AIC31XX_MICPGAPI AIC31XX_REG(1, 48) /* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
+#define AIC31XX_MICPGAMI AIC31XX_REG(1, 49) /* ADC Input Selection for M-Terminal */
+#define AIC31XX_MICPGACM AIC31XX_REG(1, 50) /* Input CM Settings */
+
+/* Bits, masks, and shifts */
/* AIC31XX_CLKMUX */
-#define AIC31XX_PLL_CLKIN_MASK 0x0c
-#define AIC31XX_PLL_CLKIN_SHIFT 2
-#define AIC31XX_PLL_CLKIN_MCLK 0
-#define AIC31XX_CODEC_CLKIN_MASK 0x03
-#define AIC31XX_CODEC_CLKIN_SHIFT 0
-#define AIC31XX_CODEC_CLKIN_PLL 3
-#define AIC31XX_CODEC_CLKIN_BCLK 1
-
-/* AIC31XX_PLLPR, AIC31XX_NDAC, AIC31XX_MDAC, AIC31XX_NADC, AIC31XX_MADC,
- AIC31XX_BCLKN */
-#define AIC31XX_PLL_MASK 0x7f
-#define AIC31XX_PM_MASK 0x80
+#define AIC31XX_PLL_CLKIN_MASK GENMASK(3, 2)
+#define AIC31XX_PLL_CLKIN_SHIFT (2)
+#define AIC31XX_PLL_CLKIN_MCLK 0x00
+#define AIC31XX_PLL_CLKIN_BCKL 0x01
+#define AIC31XX_PLL_CLKIN_GPIO1 0x02
+#define AIC31XX_PLL_CLKIN_DIN 0x03
+#define AIC31XX_CODEC_CLKIN_MASK GENMASK(1, 0)
+#define AIC31XX_CODEC_CLKIN_SHIFT (0)
+#define AIC31XX_CODEC_CLKIN_MCLK 0x00
+#define AIC31XX_CODEC_CLKIN_BCLK 0x01
+#define AIC31XX_CODEC_CLKIN_GPIO1 0x02
+#define AIC31XX_CODEC_CLKIN_PLL 0x03
+
+/* AIC31XX_PLLPR */
+/* AIC31XX_NDAC */
+/* AIC31XX_MDAC */
+/* AIC31XX_NADC */
+/* AIC31XX_MADC */
+/* AIC31XX_BCLKN */
+#define AIC31XX_PLL_MASK GENMASK(6, 0)
+#define AIC31XX_PM_MASK BIT(7)
/* AIC31XX_IFACE1 */
-#define AIC31XX_WORD_LEN_16BITS 0x00
-#define AIC31XX_WORD_LEN_20BITS 0x01
-#define AIC31XX_WORD_LEN_24BITS 0x02
-#define AIC31XX_WORD_LEN_32BITS 0x03
-#define AIC31XX_IFACE1_DATALEN_MASK 0x30
-#define AIC31XX_IFACE1_DATALEN_SHIFT (4)
-#define AIC31XX_IFACE1_DATATYPE_MASK 0xC0
+#define AIC31XX_IFACE1_DATATYPE_MASK GENMASK(7, 6)
#define AIC31XX_IFACE1_DATATYPE_SHIFT (6)
#define AIC31XX_I2S_MODE 0x00
#define AIC31XX_DSP_MODE 0x01
#define AIC31XX_RIGHT_JUSTIFIED_MODE 0x02
#define AIC31XX_LEFT_JUSTIFIED_MODE 0x03
-#define AIC31XX_IFACE1_MASTER_MASK 0x0C
-#define AIC31XX_BCLK_MASTER 0x08
-#define AIC31XX_WCLK_MASTER 0x04
+#define AIC31XX_IFACE1_DATALEN_MASK GENMASK(5, 4)
+#define AIC31XX_IFACE1_DATALEN_SHIFT (4)
+#define AIC31XX_WORD_LEN_16BITS 0x00
+#define AIC31XX_WORD_LEN_20BITS 0x01
+#define AIC31XX_WORD_LEN_24BITS 0x02
+#define AIC31XX_WORD_LEN_32BITS 0x03
+#define AIC31XX_IFACE1_MASTER_MASK GENMASK(3, 2)
+#define AIC31XX_BCLK_MASTER BIT(2)
+#define AIC31XX_WCLK_MASTER BIT(3)
/* AIC31XX_DATA_OFFSET */
-#define AIC31XX_DATA_OFFSET_MASK 0xFF
+#define AIC31XX_DATA_OFFSET_MASK GENMASK(7, 0)
/* AIC31XX_IFACE2 */
-#define AIC31XX_BCLKINV_MASK 0x08
-#define AIC31XX_BDIVCLK_MASK 0x03
+#define AIC31XX_BCLKINV_MASK BIT(3)
+#define AIC31XX_BDIVCLK_MASK GENMASK(1, 0)
#define AIC31XX_DAC2BCLK 0x00
#define AIC31XX_DACMOD2BCLK 0x01
#define AIC31XX_ADC2BCLK 0x02
#define AIC31XX_ADCMOD2BCLK 0x03
/* AIC31XX_ADCFLAG */
-#define AIC31XX_ADCPWRSTATUS_MASK 0x40
+#define AIC31XX_ADCPWRSTATUS_MASK BIT(6)
/* AIC31XX_DACFLAG1 */
-#define AIC31XX_LDACPWRSTATUS_MASK 0x80
-#define AIC31XX_RDACPWRSTATUS_MASK 0x08
-#define AIC31XX_HPLDRVPWRSTATUS_MASK 0x20
-#define AIC31XX_HPRDRVPWRSTATUS_MASK 0x02
-#define AIC31XX_SPLDRVPWRSTATUS_MASK 0x10
-#define AIC31XX_SPRDRVPWRSTATUS_MASK 0x01
+#define AIC31XX_LDACPWRSTATUS_MASK BIT(7)
+#define AIC31XX_HPLDRVPWRSTATUS_MASK BIT(5)
+#define AIC31XX_SPLDRVPWRSTATUS_MASK BIT(4)
+#define AIC31XX_RDACPWRSTATUS_MASK BIT(3)
+#define AIC31XX_HPRDRVPWRSTATUS_MASK BIT(1)
+#define AIC31XX_SPRDRVPWRSTATUS_MASK BIT(0)
/* AIC31XX_INTRDACFLAG */
-#define AIC31XX_HPSCDETECT_MASK 0x80
-#define AIC31XX_BUTTONPRESS_MASK 0x20
-#define AIC31XX_HSPLUG_MASK 0x10
-#define AIC31XX_LDRCTHRES_MASK 0x08
-#define AIC31XX_RDRCTHRES_MASK 0x04
-#define AIC31XX_DACSINT_MASK 0x02
-#define AIC31XX_DACAINT_MASK 0x01
+#define AIC31XX_HPLSCDETECT BIT(7)
+#define AIC31XX_HPRSCDETECT BIT(6)
+#define AIC31XX_BUTTONPRESS BIT(5)
+#define AIC31XX_HSPLUG BIT(4)
+#define AIC31XX_LDRCTHRES BIT(3)
+#define AIC31XX_RDRCTHRES BIT(2)
+#define AIC31XX_DACSINT BIT(1)
+#define AIC31XX_DACAINT BIT(0)
/* AIC31XX_INT1CTRL */
-#define AIC31XX_HSPLUGDET_MASK 0x80
-#define AIC31XX_BUTTONPRESSDET_MASK 0x40
-#define AIC31XX_DRCTHRES_MASK 0x20
-#define AIC31XX_AGCNOISE_MASK 0x10
-#define AIC31XX_OC_MASK 0x08
-#define AIC31XX_ENGINE_MASK 0x04
+#define AIC31XX_HSPLUGDET BIT(7)
+#define AIC31XX_BUTTONPRESSDET BIT(6)
+#define AIC31XX_DRCTHRES BIT(5)
+#define AIC31XX_AGCNOISE BIT(4)
+#define AIC31XX_SC BIT(3)
+#define AIC31XX_ENGINE BIT(2)
/* AIC31XX_DACSETUP */
-#define AIC31XX_SOFTSTEP_MASK 0x03
+#define AIC31XX_SOFTSTEP_MASK GENMASK(1, 0)
/* AIC31XX_DACMUTE */
-#define AIC31XX_DACMUTE_MASK 0x0C
+#define AIC31XX_DACMUTE_MASK GENMASK(3, 2)
/* AIC31XX_MICBIAS */
-#define AIC31XX_MICBIAS_MASK 0x03
-#define AIC31XX_MICBIAS_SHIFT 0
+#define AIC31XX_MICBIAS_MASK GENMASK(1, 0)
+#define AIC31XX_MICBIAS_SHIFT 0
#endif /* _TLV320AIC31XX_H */
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e694f5f04eb9..fea019343c3b 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -281,34 +281,34 @@ static const struct snd_kcontrol_new aic32x4_snd_controls[] = {
static const struct aic32x4_rate_divs aic32x4_divs[] = {
/* 8k rate */
- {AIC32X4_FREQ_12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
- {AIC32X4_FREQ_24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
- {AIC32X4_FREQ_25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
+ {12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
+ {24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
+ {25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
/* 11.025k rate */
- {AIC32X4_FREQ_12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
- {AIC32X4_FREQ_24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
+ {12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
+ {24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
/* 16k rate */
- {AIC32X4_FREQ_12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
- {AIC32X4_FREQ_24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
- {AIC32X4_FREQ_25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
+ {12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
+ {24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
+ {25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
/* 22.05k rate */
- {AIC32X4_FREQ_12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
- {AIC32X4_FREQ_24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
- {AIC32X4_FREQ_25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
+ {12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
+ {24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
+ {25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
/* 32k rate */
- {AIC32X4_FREQ_12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
- {AIC32X4_FREQ_24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
+ {12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
+ {24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
/* 44.1k rate */
- {AIC32X4_FREQ_12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
- {AIC32X4_FREQ_24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
- {AIC32X4_FREQ_25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
+ {12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
+ {24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
+ {25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
/* 48k rate */
- {AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
- {AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
- {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+ {12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
+ {24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
+ {25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
/* 96k rate */
- {AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
+ {25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
};
static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -601,9 +601,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
- case AIC32X4_FREQ_12000000:
- case AIC32X4_FREQ_24000000:
- case AIC32X4_FREQ_25000000:
+ case 12000000:
+ case 24000000:
+ case 25000000:
aic32x4->sysclk = freq;
return 0;
}
@@ -614,16 +614,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u8 iface_reg_1;
- u8 iface_reg_2;
- u8 iface_reg_3;
-
- iface_reg_1 = snd_soc_read(codec, AIC32X4_IFACE1);
- iface_reg_1 = iface_reg_1 & ~(3 << 6 | 3 << 2);
- iface_reg_2 = snd_soc_read(codec, AIC32X4_IFACE2);
- iface_reg_2 = 0;
- iface_reg_3 = snd_soc_read(codec, AIC32X4_IFACE3);
- iface_reg_3 = iface_reg_3 & ~(1 << 3);
+ u8 iface_reg_1 = 0;
+ u8 iface_reg_2 = 0;
+ u8 iface_reg_3 = 0;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -641,30 +634,37 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_DSP_A:
- iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
- iface_reg_3 |= (1 << 3); /* invert bit clock */
+ iface_reg_1 |= (AIC32X4_DSP_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
+ iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
iface_reg_2 = 0x01; /* add offset 1 */
break;
case SND_SOC_DAIFMT_DSP_B:
- iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
- iface_reg_3 |= (1 << 3); /* invert bit clock */
+ iface_reg_1 |= (AIC32X4_DSP_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
+ iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
break;
case SND_SOC_DAIFMT_RIGHT_J:
- iface_reg_1 |=
- (AIC32X4_RIGHT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+ iface_reg_1 |= (AIC32X4_RIGHT_JUSTIFIED_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
break;
case SND_SOC_DAIFMT_LEFT_J:
- iface_reg_1 |=
- (AIC32X4_LEFT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+ iface_reg_1 |= (AIC32X4_LEFT_JUSTIFIED_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
break;
default:
printk(KERN_ERR "aic32x4: invalid DAI interface format\n");
return -EINVAL;
}
- snd_soc_write(codec, AIC32X4_IFACE1, iface_reg_1);
- snd_soc_write(codec, AIC32X4_IFACE2, iface_reg_2);
- snd_soc_write(codec, AIC32X4_IFACE3, iface_reg_3);
+ snd_soc_update_bits(codec, AIC32X4_IFACE1,
+ AIC32X4_IFACE1_DATATYPE_MASK |
+ AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
+ snd_soc_update_bits(codec, AIC32X4_IFACE2,
+ AIC32X4_DATA_OFFSET_MASK, iface_reg_2);
+ snd_soc_update_bits(codec, AIC32X4_IFACE3,
+ AIC32X4_BCLKINV_MASK, iface_reg_3);
+
return 0;
}
@@ -674,7 +674,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
- u8 data;
+ u8 iface1_reg = 0;
+ u8 dacsetup_reg = 0;
int i;
i = aic32x4_get_divs(aic32x4->sysclk, params_rate(params));
@@ -683,82 +684,88 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
return i;
}
- /* Use PLL as CODEC_CLKIN and DAC_MOD_CLK as BDIV_CLKIN */
- snd_soc_write(codec, AIC32X4_CLKMUX, AIC32X4_PLLCLKIN);
- snd_soc_write(codec, AIC32X4_IFACE3, AIC32X4_DACMOD2BCLK);
+ /* MCLK as PLL_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_PLL_CLKIN_MASK,
+ AIC32X4_PLL_CLKIN_MCLK << AIC32X4_PLL_CLKIN_SHIFT);
+ /* PLL as CODEC_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_CODEC_CLKIN_MASK,
+ AIC32X4_CODEC_CLKIN_PLL << AIC32X4_CODEC_CLKIN_SHIFT);
+ /* DAC_MOD_CLK as BDIV_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_IFACE3, AIC32X4_BDIVCLK_MASK,
+ AIC32X4_DACMOD2BCLK << AIC32X4_BDIVCLK_SHIFT);
+
+ /* We will fix R value to 1 and will make P & J=K.D as variable */
+ snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_R_MASK, 0x01);
- /* We will fix R value to 1 and will make P & J=K.D as varialble */
- data = snd_soc_read(codec, AIC32X4_PLLPR);
- data &= ~(7 << 4);
- snd_soc_write(codec, AIC32X4_PLLPR,
- (data | (aic32x4_divs[i].p_val << 4) | 0x01));
+ /* PLL P value */
+ snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_P_MASK,
+ aic32x4_divs[i].p_val << AIC32X4_PLL_P_SHIFT);
+ /* PLL J value */
snd_soc_write(codec, AIC32X4_PLLJ, aic32x4_divs[i].pll_j);
+ /* PLL D value */
snd_soc_write(codec, AIC32X4_PLLDMSB, (aic32x4_divs[i].pll_d >> 8));
- snd_soc_write(codec, AIC32X4_PLLDLSB,
- (aic32x4_divs[i].pll_d & 0xff));
+ snd_soc_write(codec, AIC32X4_PLLDLSB, (aic32x4_divs[i].pll_d & 0xff));
/* NDAC divider value */
- data = snd_soc_read(codec, AIC32X4_NDAC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_NDAC, data | aic32x4_divs[i].ndac);
+ snd_soc_update_bits(codec, AIC32X4_NDAC,
+ AIC32X4_NDAC_MASK, aic32x4_divs[i].ndac);
/* MDAC divider value */
- data = snd_soc_read(codec, AIC32X4_MDAC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_MDAC, data | aic32x4_divs[i].mdac);
+ snd_soc_update_bits(codec, AIC32X4_MDAC,
+ AIC32X4_MDAC_MASK, aic32x4_divs[i].mdac);
/* DOSR MSB & LSB values */
snd_soc_write(codec, AIC32X4_DOSRMSB, aic32x4_divs[i].dosr >> 8);
- snd_soc_write(codec, AIC32X4_DOSRLSB,
- (aic32x4_divs[i].dosr & 0xff));
+ snd_soc_write(codec, AIC32X4_DOSRLSB, (aic32x4_divs[i].dosr & 0xff));
/* NADC divider value */
- data = snd_soc_read(codec, AIC32X4_NADC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_NADC, data | aic32x4_divs[i].nadc);
+ snd_soc_update_bits(codec, AIC32X4_NADC,
+ AIC32X4_NADC_MASK, aic32x4_divs[i].nadc);
/* MADC divider value */
- data = snd_soc_read(codec, AIC32X4_MADC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_MADC, data | aic32x4_divs[i].madc);
+ snd_soc_update_bits(codec, AIC32X4_MADC,
+ AIC32X4_MADC_MASK, aic32x4_divs[i].madc);
/* AOSR value */
snd_soc_write(codec, AIC32X4_AOSR, aic32x4_divs[i].aosr);
/* BCLK N divider */
- data = snd_soc_read(codec, AIC32X4_BCLKN);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_BCLKN, data | aic32x4_divs[i].blck_N);
+ snd_soc_update_bits(codec, AIC32X4_BCLKN,
+ AIC32X4_BCLK_MASK, aic32x4_divs[i].blck_N);
- data = snd_soc_read(codec, AIC32X4_IFACE1);
- data = data & ~(3 << 4);
switch (params_width(params)) {
case 16:
+ iface1_reg |= (AIC32X4_WORD_LEN_16BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 20:
- data |= (AIC32X4_WORD_LEN_20BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_20BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 24:
- data |= (AIC32X4_WORD_LEN_24BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_24BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 32:
- data |= (AIC32X4_WORD_LEN_32BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_32BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
}
- snd_soc_write(codec, AIC32X4_IFACE1, data);
+ snd_soc_update_bits(codec, AIC32X4_IFACE1,
+ AIC32X4_IFACE1_DATALEN_MASK, iface1_reg);
if (params_channels(params) == 1) {
- data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
+ dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
} else {
if (aic32x4->swapdacs)
- data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
+ dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
else
- data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
+ dacsetup_reg = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
}
- snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
- data);
+ snd_soc_update_bits(codec, AIC32X4_DACSETUP,
+ AIC32X4_DAC_CHAN_MASK, dacsetup_reg);
return 0;
}
@@ -766,13 +773,10 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u8 dac_reg;
- dac_reg = snd_soc_read(codec, AIC32X4_DACMUTE) & ~AIC32X4_MUTEON;
- if (mute)
- snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg | AIC32X4_MUTEON);
- else
- snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg);
+ snd_soc_update_bits(codec, AIC32X4_DACMUTE,
+ AIC32X4_MUTEON, mute ? AIC32X4_MUTEON : 0);
+
return 0;
}
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index da7cec482bcb..e9df49edbf19 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -19,141 +19,189 @@ int aic32x4_remove(struct device *dev);
/* tlv320aic32x4 register space (in decimal to match datasheet) */
-#define AIC32X4_PAGE1 128
-
-#define AIC32X4_PSEL 0
-#define AIC32X4_RESET 1
-#define AIC32X4_CLKMUX 4
-#define AIC32X4_PLLPR 5
-#define AIC32X4_PLLJ 6
-#define AIC32X4_PLLDMSB 7
-#define AIC32X4_PLLDLSB 8
-#define AIC32X4_NDAC 11
-#define AIC32X4_MDAC 12
-#define AIC32X4_DOSRMSB 13
-#define AIC32X4_DOSRLSB 14
-#define AIC32X4_NADC 18
-#define AIC32X4_MADC 19
-#define AIC32X4_AOSR 20
-#define AIC32X4_CLKMUX2 25
-#define AIC32X4_CLKOUTM 26
-#define AIC32X4_IFACE1 27
-#define AIC32X4_IFACE2 28
-#define AIC32X4_IFACE3 29
-#define AIC32X4_BCLKN 30
-#define AIC32X4_IFACE4 31
-#define AIC32X4_IFACE5 32
-#define AIC32X4_IFACE6 33
-#define AIC32X4_GPIOCTL 52
-#define AIC32X4_DOUTCTL 53
-#define AIC32X4_DINCTL 54
-#define AIC32X4_MISOCTL 55
-#define AIC32X4_SCLKCTL 56
-#define AIC32X4_DACSPB 60
-#define AIC32X4_ADCSPB 61
-#define AIC32X4_DACSETUP 63
-#define AIC32X4_DACMUTE 64
-#define AIC32X4_LDACVOL 65
-#define AIC32X4_RDACVOL 66
-#define AIC32X4_ADCSETUP 81
-#define AIC32X4_ADCFGA 82
-#define AIC32X4_LADCVOL 83
-#define AIC32X4_RADCVOL 84
-#define AIC32X4_LAGC1 86
-#define AIC32X4_LAGC2 87
-#define AIC32X4_LAGC3 88
-#define AIC32X4_LAGC4 89
-#define AIC32X4_LAGC5 90
-#define AIC32X4_LAGC6 91
-#define AIC32X4_LAGC7 92
-#define AIC32X4_RAGC1 94
-#define AIC32X4_RAGC2 95
-#define AIC32X4_RAGC3 96
-#define AIC32X4_RAGC4 97
-#define AIC32X4_RAGC5 98
-#define AIC32X4_RAGC6 99
-#define AIC32X4_RAGC7 100
-#define AIC32X4_PWRCFG (AIC32X4_PAGE1 + 1)
-#define AIC32X4_LDOCTL (AIC32X4_PAGE1 + 2)
-#define AIC32X4_OUTPWRCTL (AIC32X4_PAGE1 + 9)
-#define AIC32X4_CMMODE (AIC32X4_PAGE1 + 10)
-#define AIC32X4_HPLROUTE (AIC32X4_PAGE1 + 12)
-#define AIC32X4_HPRROUTE (AIC32X4_PAGE1 + 13)
-#define AIC32X4_LOLROUTE (AIC32X4_PAGE1 + 14)
-#define AIC32X4_LORROUTE (AIC32X4_PAGE1 + 15)
-#define AIC32X4_HPLGAIN (AIC32X4_PAGE1 + 16)
-#define AIC32X4_HPRGAIN (AIC32X4_PAGE1 + 17)
-#define AIC32X4_LOLGAIN (AIC32X4_PAGE1 + 18)
-#define AIC32X4_LORGAIN (AIC32X4_PAGE1 + 19)
-#define AIC32X4_HEADSTART (AIC32X4_PAGE1 + 20)
-#define AIC32X4_MICBIAS (AIC32X4_PAGE1 + 51)
-#define AIC32X4_LMICPGAPIN (AIC32X4_PAGE1 + 52)
-#define AIC32X4_LMICPGANIN (AIC32X4_PAGE1 + 54)
-#define AIC32X4_RMICPGAPIN (AIC32X4_PAGE1 + 55)
-#define AIC32X4_RMICPGANIN (AIC32X4_PAGE1 + 57)
-#define AIC32X4_FLOATINGINPUT (AIC32X4_PAGE1 + 58)
-#define AIC32X4_LMICPGAVOL (AIC32X4_PAGE1 + 59)
-#define AIC32X4_RMICPGAVOL (AIC32X4_PAGE1 + 60)
-
-#define AIC32X4_FREQ_12000000 12000000
-#define AIC32X4_FREQ_24000000 24000000
-#define AIC32X4_FREQ_25000000 25000000
-
-#define AIC32X4_WORD_LEN_16BITS 0x00
-#define AIC32X4_WORD_LEN_20BITS 0x01
-#define AIC32X4_WORD_LEN_24BITS 0x02
-#define AIC32X4_WORD_LEN_32BITS 0x03
-
-#define AIC32X4_LADC_EN (1 << 7)
-#define AIC32X4_RADC_EN (1 << 6)
-
-#define AIC32X4_I2S_MODE 0x00
-#define AIC32X4_DSP_MODE 0x01
-#define AIC32X4_RIGHT_JUSTIFIED_MODE 0x02
-#define AIC32X4_LEFT_JUSTIFIED_MODE 0x03
-
-#define AIC32X4_AVDDWEAKDISABLE 0x08
-#define AIC32X4_LDOCTLEN 0x01
-
-#define AIC32X4_LDOIN_18_36 0x01
-#define AIC32X4_LDOIN2HP 0x02
-
-#define AIC32X4_DACSPBLOCK_MASK 0x1f
-#define AIC32X4_ADCSPBLOCK_MASK 0x1f
-
-#define AIC32X4_PLLJ_SHIFT 6
-#define AIC32X4_DOSRMSB_SHIFT 4
-
-#define AIC32X4_PLLCLKIN 0x03
-
-#define AIC32X4_MICBIAS_LDOIN 0x08
+#define AIC32X4_REG(page, reg) ((page * 128) + reg)
+
+#define AIC32X4_PSEL AIC32X4_REG(0, 0)
+
+#define AIC32X4_RESET AIC32X4_REG(0, 1)
+#define AIC32X4_CLKMUX AIC32X4_REG(0, 4)
+#define AIC32X4_PLLPR AIC32X4_REG(0, 5)
+#define AIC32X4_PLLJ AIC32X4_REG(0, 6)
+#define AIC32X4_PLLDMSB AIC32X4_REG(0, 7)
+#define AIC32X4_PLLDLSB AIC32X4_REG(0, 8)
+#define AIC32X4_NDAC AIC32X4_REG(0, 11)
+#define AIC32X4_MDAC AIC32X4_REG(0, 12)
+#define AIC32X4_DOSRMSB AIC32X4_REG(0, 13)
+#define AIC32X4_DOSRLSB AIC32X4_REG(0, 14)
+#define AIC32X4_NADC AIC32X4_REG(0, 18)
+#define AIC32X4_MADC AIC32X4_REG(0, 19)
+#define AIC32X4_AOSR AIC32X4_REG(0, 20)
+#define AIC32X4_CLKMUX2 AIC32X4_REG(0, 25)
+#define AIC32X4_CLKOUTM AIC32X4_REG(0, 26)
+#define AIC32X4_IFACE1 AIC32X4_REG(0, 27)
+#define AIC32X4_IFACE2 AIC32X4_REG(0, 28)
+#define AIC32X4_IFACE3 AIC32X4_REG(0, 29)
+#define AIC32X4_BCLKN AIC32X4_REG(0, 30)
+#define AIC32X4_IFACE4 AIC32X4_REG(0, 31)
+#define AIC32X4_IFACE5 AIC32X4_REG(0, 32)
+#define AIC32X4_IFACE6 AIC32X4_REG(0, 33)
+#define AIC32X4_GPIOCTL AIC32X4_REG(0, 52)
+#define AIC32X4_DOUTCTL AIC32X4_REG(0, 53)
+#define AIC32X4_DINCTL AIC32X4_REG(0, 54)
+#define AIC32X4_MISOCTL AIC32X4_REG(0, 55)
+#define AIC32X4_SCLKCTL AIC32X4_REG(0, 56)
+#define AIC32X4_DACSPB AIC32X4_REG(0, 60)
+#define AIC32X4_ADCSPB AIC32X4_REG(0, 61)
+#define AIC32X4_DACSETUP AIC32X4_REG(0, 63)
+#define AIC32X4_DACMUTE AIC32X4_REG(0, 64)
+#define AIC32X4_LDACVOL AIC32X4_REG(0, 65)
+#define AIC32X4_RDACVOL AIC32X4_REG(0, 66)
+#define AIC32X4_ADCSETUP AIC32X4_REG(0, 81)
+#define AIC32X4_ADCFGA AIC32X4_REG(0, 82)
+#define AIC32X4_LADCVOL AIC32X4_REG(0, 83)
+#define AIC32X4_RADCVOL AIC32X4_REG(0, 84)
+#define AIC32X4_LAGC1 AIC32X4_REG(0, 86)
+#define AIC32X4_LAGC2 AIC32X4_REG(0, 87)
+#define AIC32X4_LAGC3 AIC32X4_REG(0, 88)
+#define AIC32X4_LAGC4 AIC32X4_REG(0, 89)
+#define AIC32X4_LAGC5 AIC32X4_REG(0, 90)
+#define AIC32X4_LAGC6 AIC32X4_REG(0, 91)
+#define AIC32X4_LAGC7 AIC32X4_REG(0, 92)
+#define AIC32X4_RAGC1 AIC32X4_REG(0, 94)
+#define AIC32X4_RAGC2 AIC32X4_REG(0, 95)
+#define AIC32X4_RAGC3 AIC32X4_REG(0, 96)
+#define AIC32X4_RAGC4 AIC32X4_REG(0, 97)
+#define AIC32X4_RAGC5 AIC32X4_REG(0, 98)
+#define AIC32X4_RAGC6 AIC32X4_REG(0, 99)
+#define AIC32X4_RAGC7 AIC32X4_REG(0, 100)
+
+#define AIC32X4_PWRCFG AIC32X4_REG(1, 1)
+#define AIC32X4_LDOCTL AIC32X4_REG(1, 2)
+#define AIC32X4_OUTPWRCTL AIC32X4_REG(1, 9)
+#define AIC32X4_CMMODE AIC32X4_REG(1, 10)
+#define AIC32X4_HPLROUTE AIC32X4_REG(1, 12)
+#define AIC32X4_HPRROUTE AIC32X4_REG(1, 13)
+#define AIC32X4_LOLROUTE AIC32X4_REG(1, 14)
+#define AIC32X4_LORROUTE AIC32X4_REG(1, 15)
+#define AIC32X4_HPLGAIN AIC32X4_REG(1, 16)
+#define AIC32X4_HPRGAIN AIC32X4_REG(1, 17)
+#define AIC32X4_LOLGAIN AIC32X4_REG(1, 18)
+#define AIC32X4_LORGAIN AIC32X4_REG(1, 19)
+#define AIC32X4_HEADSTART AIC32X4_REG(1, 20)
+#define AIC32X4_MICBIAS AIC32X4_REG(1, 51)
+#define AIC32X4_LMICPGAPIN AIC32X4_REG(1, 52)
+#define AIC32X4_LMICPGANIN AIC32X4_REG(1, 54)
+#define AIC32X4_RMICPGAPIN AIC32X4_REG(1, 55)
+#define AIC32X4_RMICPGANIN AIC32X4_REG(1, 57)
+#define AIC32X4_FLOATINGINPUT AIC32X4_REG(1, 58)
+#define AIC32X4_LMICPGAVOL AIC32X4_REG(1, 59)
+#define AIC32X4_RMICPGAVOL AIC32X4_REG(1, 60)
+
+/* Bits, masks, and shifts */
+
+/* AIC32X4_CLKMUX */
+#define AIC32X4_PLL_CLKIN_MASK GENMASK(3, 2)
+#define AIC32X4_PLL_CLKIN_SHIFT (2)
+#define AIC32X4_PLL_CLKIN_MCLK (0x00)
+#define AIC32X4_PLL_CLKIN_BCKL (0x01)
+#define AIC32X4_PLL_CLKIN_GPIO1 (0x02)
+#define AIC32X4_PLL_CLKIN_DIN (0x03)
+#define AIC32X4_CODEC_CLKIN_MASK GENMASK(1, 0)
+#define AIC32X4_CODEC_CLKIN_SHIFT (0)
+#define AIC32X4_CODEC_CLKIN_MCLK (0x00)
+#define AIC32X4_CODEC_CLKIN_BCLK (0x01)
+#define AIC32X4_CODEC_CLKIN_GPIO1 (0x02)
+#define AIC32X4_CODEC_CLKIN_PLL (0x03)
+
+/* AIC32X4_PLLPR */
+#define AIC32X4_PLLEN BIT(7)
+#define AIC32X4_PLL_P_MASK GENMASK(6, 4)
+#define AIC32X4_PLL_P_SHIFT (4)
+#define AIC32X4_PLL_R_MASK GENMASK(3, 0)
+
+/* AIC32X4_NDAC */
+#define AIC32X4_NDACEN BIT(7)
+#define AIC32X4_NDAC_MASK GENMASK(6, 0)
+
+/* AIC32X4_MDAC */
+#define AIC32X4_MDACEN BIT(7)
+#define AIC32X4_MDAC_MASK GENMASK(6, 0)
+
+/* AIC32X4_NADC */
+#define AIC32X4_NADCEN BIT(7)
+#define AIC32X4_NADC_MASK GENMASK(6, 0)
+
+/* AIC32X4_MADC */
+#define AIC32X4_MADCEN BIT(7)
+#define AIC32X4_MADC_MASK GENMASK(6, 0)
+
+/* AIC32X4_BCLKN */
+#define AIC32X4_BCLKEN BIT(7)
+#define AIC32X4_BCLK_MASK GENMASK(6, 0)
+
+/* AIC32X4_IFACE1 */
+#define AIC32X4_IFACE1_DATATYPE_MASK GENMASK(7, 6)
+#define AIC32X4_IFACE1_DATATYPE_SHIFT (6)
+#define AIC32X4_I2S_MODE (0x00)
+#define AIC32X4_DSP_MODE (0x01)
+#define AIC32X4_RIGHT_JUSTIFIED_MODE (0x02)
+#define AIC32X4_LEFT_JUSTIFIED_MODE (0x03)
+#define AIC32X4_IFACE1_DATALEN_MASK GENMASK(5, 4)
+#define AIC32X4_IFACE1_DATALEN_SHIFT (4)
+#define AIC32X4_WORD_LEN_16BITS (0x00)
+#define AIC32X4_WORD_LEN_20BITS (0x01)
+#define AIC32X4_WORD_LEN_24BITS (0x02)
+#define AIC32X4_WORD_LEN_32BITS (0x03)
+#define AIC32X4_IFACE1_MASTER_MASK GENMASK(3, 2)
+#define AIC32X4_BCLKMASTER BIT(2)
+#define AIC32X4_WCLKMASTER BIT(3)
+
+/* AIC32X4_IFACE2 */
+#define AIC32X4_DATA_OFFSET_MASK GENMASK(7, 0)
+
+/* AIC32X4_IFACE3 */
+#define AIC32X4_BCLKINV_MASK BIT(3)
+#define AIC32X4_BDIVCLK_MASK GENMASK(1, 0)
+#define AIC32X4_BDIVCLK_SHIFT (0)
+#define AIC32X4_DAC2BCLK (0x00)
+#define AIC32X4_DACMOD2BCLK (0x01)
+#define AIC32X4_ADC2BCLK (0x02)
+#define AIC32X4_ADCMOD2BCLK (0x03)
+
+/* AIC32X4_DACSETUP */
+#define AIC32X4_DAC_CHAN_MASK GENMASK(5, 2)
+#define AIC32X4_LDAC2RCHN BIT(5)
+#define AIC32X4_LDAC2LCHN BIT(4)
+#define AIC32X4_RDAC2LCHN BIT(3)
+#define AIC32X4_RDAC2RCHN BIT(2)
+
+/* AIC32X4_DACMUTE */
+#define AIC32X4_MUTEON 0x0C
+
+/* AIC32X4_ADCSETUP */
+#define AIC32X4_LADC_EN BIT(7)
+#define AIC32X4_RADC_EN BIT(6)
+
+/* AIC32X4_PWRCFG */
+#define AIC32X4_AVDDWEAKDISABLE BIT(3)
+
+/* AIC32X4_LDOCTL */
+#define AIC32X4_LDOCTLEN BIT(0)
+
+/* AIC32X4_CMMODE */
+#define AIC32X4_LDOIN_18_36 BIT(0)
+#define AIC32X4_LDOIN2HP BIT(1)
+
+/* AIC32X4_MICBIAS */
+#define AIC32X4_MICBIAS_LDOIN BIT(3)
#define AIC32X4_MICBIAS_2075V 0x60
+/* AIC32X4_LMICPGANIN */
#define AIC32X4_LMICPGANIN_IN2R_10K 0x10
#define AIC32X4_LMICPGANIN_CM1L_10K 0x40
+
+/* AIC32X4_RMICPGANIN */
#define AIC32X4_RMICPGANIN_IN1L_10K 0x10
#define AIC32X4_RMICPGANIN_CM1R_10K 0x40
-#define AIC32X4_LMICPGAVOL_NOGAIN 0x80
-#define AIC32X4_RMICPGAVOL_NOGAIN 0x80
-
-#define AIC32X4_BCLKMASTER 0x08
-#define AIC32X4_WCLKMASTER 0x04
-#define AIC32X4_PLLEN (0x01 << 7)
-#define AIC32X4_NDACEN (0x01 << 7)
-#define AIC32X4_MDACEN (0x01 << 7)
-#define AIC32X4_NADCEN (0x01 << 7)
-#define AIC32X4_MADCEN (0x01 << 7)
-#define AIC32X4_BCLKEN (0x01 << 7)
-#define AIC32X4_DACEN (0x03 << 6)
-#define AIC32X4_RDAC2LCHN (0x02 << 2)
-#define AIC32X4_LDAC2RCHN (0x02 << 4)
-#define AIC32X4_LDAC2LCHN (0x01 << 4)
-#define AIC32X4_RDAC2RCHN (0x01 << 2)
-#define AIC32X4_DAC_CHAN_MASK 0x3c
-
-#define AIC32X4_SSTEP2WCLK 0x01
-#define AIC32X4_MUTEON 0x0C
-#define AIC32X4_DACMOD2BCLK 0x01
-
#endif /* _TLV320AIC32X4_H */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 06f92571eba4..b751cad545da 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1804,11 +1804,18 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
if (!ai3x_setup)
return -ENOMEM;
- ret = of_get_named_gpio(np, "gpio-reset", 0);
- if (ret >= 0)
+ ret = of_get_named_gpio(np, "reset-gpios", 0);
+ if (ret >= 0) {
aic3x->gpio_reset = ret;
- else
- aic3x->gpio_reset = -1;
+ } else {
+ ret = of_get_named_gpio(np, "gpio-reset", 0);
+ if (ret > 0) {
+ dev_warn(&i2c->dev, "Using deprecated property \"gpio-reset\", please update your DT");
+ aic3x->gpio_reset = ret;
+ } else {
+ aic3x->gpio_reset = -1;
+ }
+ }
if (of_property_read_u32_array(np, "ai3x-gpio-func",
ai3x_setup->gpio_func, 2) >= 0) {
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 5b94a151539c..8c71d2f876ff 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -106,6 +106,7 @@ struct tlv320dac33_priv {
int mode1_latency; /* latency caused by the i2c writes in
* us */
u8 burst_bclkdiv; /* BCLK divider value in burst mode */
+ u8 *reg_cache;
unsigned int burst_rate; /* Interface speed in Burst modes */
int keep_bclk; /* Keep the BCLK continuously running
@@ -121,7 +122,7 @@ struct tlv320dac33_priv {
unsigned int uthr;
enum dac33_state state;
- void *control_data;
+ struct i2c_client *i2c;
};
static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
@@ -173,7 +174,8 @@ static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
unsigned reg)
{
- u8 *cache = codec->reg_cache;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ u8 *cache = dac33->reg_cache;
if (reg >= DAC33_CACHEREGNUM)
return 0;
@@ -183,7 +185,8 @@ static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
static inline void dac33_write_reg_cache(struct snd_soc_codec *codec,
u8 reg, u8 value)
{
- u8 *cache = codec->reg_cache;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ u8 *cache = dac33->reg_cache;
if (reg >= DAC33_CACHEREGNUM)
return;
@@ -200,7 +203,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
/* If powered off, return the cached value */
if (dac33->chip_power) {
- val = i2c_smbus_read_byte_data(codec->control_data, value[0]);
+ val = i2c_smbus_read_byte_data(dac33->i2c, value[0]);
if (val < 0) {
dev_err(codec->dev, "Read failed (%d)\n", val);
value[0] = dac33_read_reg_cache(codec, reg);
@@ -233,7 +236,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
dac33_write_reg_cache(codec, data[0], data[1]);
if (dac33->chip_power) {
- ret = codec->hw_write(codec->control_data, data, 2);
+ ret = i2c_master_send(dac33->i2c, data, 2);
if (ret != 2)
dev_err(codec->dev, "Write failed (%d)\n", ret);
else
@@ -244,7 +247,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
}
static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
+ unsigned int value)
{
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret;
@@ -280,7 +283,7 @@ static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
if (dac33->chip_power) {
/* We need to set autoincrement mode for 16 bit writes */
data[0] |= DAC33_I2C_ADDR_AUTOINC;
- ret = codec->hw_write(codec->control_data, data, 3);
+ ret = i2c_master_send(dac33->i2c, data, 3);
if (ret != 3)
dev_err(codec->dev, "Write failed (%d)\n", ret);
else
@@ -1379,8 +1382,6 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- codec->control_data = dac33->control_data;
- codec->hw_write = (hw_write_t) i2c_master_send;
dac33->codec = codec;
/* Read the tlv320dac33 ID registers */
@@ -1438,9 +1439,7 @@ static const struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = {
.write = dac33_write_locked,
.set_bias_level = dac33_set_bias_level,
.idle_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(dac33_reg),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = dac33_reg,
+
.probe = dac33_soc_probe,
.remove = dac33_soc_remove,
@@ -1499,7 +1498,14 @@ static int dac33_i2c_probe(struct i2c_client *client,
if (dac33 == NULL)
return -ENOMEM;
- dac33->control_data = client;
+ dac33->reg_cache = devm_kmemdup(&client->dev,
+ dac33_reg,
+ ARRAY_SIZE(dac33_reg) * sizeof(u8),
+ GFP_KERNEL);
+ if (!dac33->reg_cache)
+ return -ENOMEM;
+
+ dac33->i2c = client;
mutex_init(&dac33->mutex);
spin_lock_init(&dac33->lock);
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 738e04b09116..1271e7e1fc78 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -241,7 +241,7 @@ int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
{
struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
new file mode 100644
index 000000000000..e7661d0315e6
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.c
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.c -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tscs42xx.h"
+
+#define COEFF_SIZE 3
+#define BIQUAD_COEFF_COUNT 5
+#define BIQUAD_SIZE (COEFF_SIZE * BIQUAD_COEFF_COUNT)
+
+#define COEFF_RAM_MAX_ADDR 0xcd
+#define COEFF_RAM_COEFF_COUNT (COEFF_RAM_MAX_ADDR + 1)
+#define COEFF_RAM_SIZE (COEFF_SIZE * COEFF_RAM_COEFF_COUNT)
+
+struct tscs42xx {
+
+ int bclk_ratio;
+ int samplerate;
+ unsigned int blrcm;
+ struct mutex audio_params_lock;
+
+ u8 coeff_ram[COEFF_RAM_SIZE];
+ bool coeff_ram_synced;
+ struct mutex coeff_ram_lock;
+
+ struct mutex pll_lock;
+
+ struct regmap *regmap;
+
+ struct device *dev;
+};
+
+struct coeff_ram_ctl {
+ unsigned int addr;
+ struct soc_bytes_ext bytes_ext;
+};
+
+static bool tscs42xx_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case R_DACCRWRL:
+ case R_DACCRWRM:
+ case R_DACCRWRH:
+ case R_DACCRRDL:
+ case R_DACCRRDM:
+ case R_DACCRRDH:
+ case R_DACCRSTAT:
+ case R_DACCRADDR:
+ case R_PLLCTL0:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool tscs42xx_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case R_DACCRWRL:
+ case R_DACCRWRM:
+ case R_DACCRWRH:
+ case R_DACCRRDL:
+ case R_DACCRRDM:
+ case R_DACCRRDH:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config tscs42xx_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_reg = tscs42xx_volatile,
+ .precious_reg = tscs42xx_precious,
+ .max_register = R_DACMBCREL3H,
+
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+};
+
+#define MAX_PLL_LOCK_20MS_WAITS 1
+static bool plls_locked(struct snd_soc_codec *codec)
+{
+ int ret;
+ int count = MAX_PLL_LOCK_20MS_WAITS;
+
+ do {
+ ret = snd_soc_read(codec, R_PLLCTL0);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to read PLL lock status (%d)\n", ret);
+ return false;
+ } else if (ret > 0) {
+ return true;
+ }
+ msleep(20);
+ } while (count--);
+
+ return false;
+}
+
+static int sample_rate_to_pll_freq_out(int sample_rate)
+{
+ switch (sample_rate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ return 112896000;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 96000:
+ return 122880000;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define DACCRSTAT_MAX_TRYS 10
+static int write_coeff_ram(struct snd_soc_codec *codec, u8 *coeff_ram,
+ unsigned int addr, unsigned int coeff_cnt)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int cnt;
+ int trys;
+ int ret;
+
+ for (cnt = 0; cnt < coeff_cnt; cnt++, addr++) {
+
+ for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) {
+ ret = snd_soc_read(codec, R_DACCRSTAT);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to read stat (%d)\n", ret);
+ return ret;
+ }
+ if (!ret)
+ break;
+ }
+
+ if (trys == DACCRSTAT_MAX_TRYS) {
+ ret = -EIO;
+ dev_err(codec->dev,
+ "dac coefficient write error (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(tscs42xx->regmap, R_DACCRADDR, addr);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to write dac ram address (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_write(tscs42xx->regmap, R_DACCRWRL,
+ &coeff_ram[addr * COEFF_SIZE],
+ COEFF_SIZE);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to write dac ram (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int power_up_audio_plls(struct snd_soc_codec *codec)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int freq_out;
+ int ret;
+ unsigned int mask;
+ unsigned int val;
+
+ freq_out = sample_rate_to_pll_freq_out(tscs42xx->samplerate);
+ switch (freq_out) {
+ case 122880000: /* 48k */
+ mask = RM_PLLCTL1C_PDB_PLL1;
+ val = RV_PLLCTL1C_PDB_PLL1_ENABLE;
+ break;
+ case 112896000: /* 44.1k */
+ mask = RM_PLLCTL1C_PDB_PLL2;
+ val = RV_PLLCTL1C_PDB_PLL2_ENABLE;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unrecognized PLL output freq (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C, mask, val);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL on (%d)\n", ret);
+ goto exit;
+ }
+
+ if (!plls_locked(codec)) {
+ dev_err(codec->dev, "Failed to lock plls\n");
+ ret = -ENOMSG;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ return ret;
+}
+
+static int power_down_audio_plls(struct snd_soc_codec *codec)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+ RM_PLLCTL1C_PDB_PLL1,
+ RV_PLLCTL1C_PDB_PLL1_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+ goto exit;
+ }
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+ RM_PLLCTL1C_PDB_PLL2,
+ RV_PLLCTL1C_PDB_PLL2_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ return ret;
+}
+
+static int coeff_ram_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ memcpy(ucontrol->value.bytes.data,
+ &tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE], params->max);
+
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return 0;
+}
+
+static int coeff_ram_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+ unsigned int coeff_cnt = params->max / COEFF_SIZE;
+ int ret;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ tscs42xx->coeff_ram_synced = false;
+
+ memcpy(&tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE],
+ ucontrol->value.bytes.data, params->max);
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ if (plls_locked(codec)) {
+ ret = write_coeff_ram(codec, tscs42xx->coeff_ram,
+ ctl->addr, coeff_cnt);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to flush coeff ram cache (%d)\n", ret);
+ goto exit;
+ }
+ tscs42xx->coeff_ram_synced = true;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return ret;
+}
+
+/* Input L Capture Route */
+static char const * const input_select_text[] = {
+ "Line 1", "Line 2", "Line 3", "D2S"
+};
+
+static const struct soc_enum left_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELL, FB_INSELL, ARRAY_SIZE(input_select_text),
+ input_select_text);
+
+static const struct snd_kcontrol_new left_input_select =
+SOC_DAPM_ENUM("LEFT_INPUT_SELECT_ENUM", left_input_select_enum);
+
+/* Input R Capture Route */
+static const struct soc_enum right_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELR, FB_INSELR, ARRAY_SIZE(input_select_text),
+ input_select_text);
+
+static const struct snd_kcontrol_new right_input_select =
+SOC_DAPM_ENUM("RIGHT_INPUT_SELECT_ENUM", right_input_select_enum);
+
+/* Input Channel Mapping */
+static char const * const ch_map_select_text[] = {
+ "Normal", "Left to Right", "Right to Left", "Swap"
+};
+
+static const struct soc_enum ch_map_select_enum =
+SOC_ENUM_SINGLE(R_AIC2, FB_AIC2_ADCDSEL, ARRAY_SIZE(ch_map_select_text),
+ ch_map_select_text);
+
+static int dapm_vref_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ msleep(20);
+ return 0;
+}
+
+static int dapm_micb_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ msleep(20);
+ return 0;
+}
+
+static int pll_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ int ret;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = power_up_audio_plls(codec);
+ else
+ ret = power_down_audio_plls(codec);
+
+ return ret;
+}
+
+static int dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ if (tscs42xx->coeff_ram_synced == false) {
+ ret = write_coeff_ram(codec, tscs42xx->coeff_ram, 0x00,
+ COEFF_RAM_COEFF_COUNT);
+ if (ret < 0)
+ goto exit;
+ tscs42xx->coeff_ram_synced = true;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget tscs42xx_dapm_widgets[] = {
+ /* Vref */
+ SND_SOC_DAPM_SUPPLY_S("Vref", 1, R_PWRM2, FB_PWRM2_VREF, 0,
+ dapm_vref_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* PLL */
+ SND_SOC_DAPM_SUPPLY("PLL", SND_SOC_NOPM, 0, 0, pll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Headphone */
+ SND_SOC_DAPM_DAC_E("DAC L", "HiFi Playback", R_PWRM2, FB_PWRM2_HPL, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_DAC_E("DAC R", "HiFi Playback", R_PWRM2, FB_PWRM2_HPR, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("Headphone L"),
+ SND_SOC_DAPM_OUTPUT("Headphone R"),
+
+ /* Speaker */
+ SND_SOC_DAPM_DAC_E("ClassD L", "HiFi Playback",
+ R_PWRM2, FB_PWRM2_SPKL, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_DAC_E("ClassD R", "HiFi Playback",
+ R_PWRM2, FB_PWRM2_SPKR, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("Speaker L"),
+ SND_SOC_DAPM_OUTPUT("Speaker R"),
+
+ /* Capture */
+ SND_SOC_DAPM_PGA("Analog In PGA L", R_PWRM1, FB_PWRM1_PGAL, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog In PGA R", R_PWRM1, FB_PWRM1_PGAR, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog Boost L", R_PWRM1, FB_PWRM1_BSTL, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog Boost R", R_PWRM1, FB_PWRM1_BSTR, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("ADC Mute", R_CNVRTR0, FB_CNVRTR0_HPOR, true, NULL, 0),
+ SND_SOC_DAPM_ADC("ADC L", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCL, 0),
+ SND_SOC_DAPM_ADC("ADC R", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCR, 0),
+
+ /* Capture Input */
+ SND_SOC_DAPM_MUX("Input L Capture Route", R_PWRM2,
+ FB_PWRM2_INSELL, 0, &left_input_select),
+ SND_SOC_DAPM_MUX("Input R Capture Route", R_PWRM2,
+ FB_PWRM2_INSELR, 0, &right_input_select),
+
+ /* Digital Mic */
+ SND_SOC_DAPM_SUPPLY_S("Digital Mic Enable", 2, R_DMICCTL,
+ FB_DMICCTL_DMICEN, 0, NULL,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* Analog Mic */
+ SND_SOC_DAPM_SUPPLY_S("Mic Bias", 2, R_PWRM1, FB_PWRM1_MICB,
+ 0, dapm_micb_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("Line In 1 L"),
+ SND_SOC_DAPM_INPUT("Line In 1 R"),
+ SND_SOC_DAPM_INPUT("Line In 2 L"),
+ SND_SOC_DAPM_INPUT("Line In 2 R"),
+ SND_SOC_DAPM_INPUT("Line In 3 L"),
+ SND_SOC_DAPM_INPUT("Line In 3 R"),
+};
+
+static const struct snd_soc_dapm_route tscs42xx_intercon[] = {
+ {"DAC L", NULL, "PLL"},
+ {"DAC R", NULL, "PLL"},
+ {"DAC L", NULL, "Vref"},
+ {"DAC R", NULL, "Vref"},
+ {"Headphone L", NULL, "DAC L"},
+ {"Headphone R", NULL, "DAC R"},
+
+ {"ClassD L", NULL, "PLL"},
+ {"ClassD R", NULL, "PLL"},
+ {"ClassD L", NULL, "Vref"},
+ {"ClassD R", NULL, "Vref"},
+ {"Speaker L", NULL, "ClassD L"},
+ {"Speaker R", NULL, "ClassD R"},
+
+ {"Input L Capture Route", NULL, "Vref"},
+ {"Input R Capture Route", NULL, "Vref"},
+
+ {"Mic Bias", NULL, "Vref"},
+
+ {"Input L Capture Route", "Line 1", "Line In 1 L"},
+ {"Input R Capture Route", "Line 1", "Line In 1 R"},
+ {"Input L Capture Route", "Line 2", "Line In 2 L"},
+ {"Input R Capture Route", "Line 2", "Line In 2 R"},
+ {"Input L Capture Route", "Line 3", "Line In 3 L"},
+ {"Input R Capture Route", "Line 3", "Line In 3 R"},
+
+ {"Analog In PGA L", NULL, "Input L Capture Route"},
+ {"Analog In PGA R", NULL, "Input R Capture Route"},
+ {"Analog Boost L", NULL, "Analog In PGA L"},
+ {"Analog Boost R", NULL, "Analog In PGA R"},
+ {"ADC Mute", NULL, "Analog Boost L"},
+ {"ADC Mute", NULL, "Analog Boost R"},
+ {"ADC L", NULL, "PLL"},
+ {"ADC R", NULL, "PLL"},
+ {"ADC L", NULL, "ADC Mute"},
+ {"ADC R", NULL, "ADC Mute"},
+};
+
+/************
+ * CONTROLS *
+ ************/
+
+static char const * const eq_band_enable_text[] = {
+ "Prescale only",
+ "Band1",
+ "Band1:2",
+ "Band1:3",
+ "Band1:4",
+ "Band1:5",
+ "Band1:6",
+};
+
+static char const * const level_detection_text[] = {
+ "Average",
+ "Peak",
+};
+
+static char const * const level_detection_window_text[] = {
+ "512 Samples",
+ "64 Samples",
+};
+
+static char const * const compressor_ratio_text[] = {
+ "Reserved", "1.5:1", "2:1", "3:1", "4:1", "5:1", "6:1",
+ "7:1", "8:1", "9:1", "10:1", "11:1", "12:1", "13:1", "14:1",
+ "15:1", "16:1", "17:1", "18:1", "19:1", "20:1",
+};
+
+static DECLARE_TLV_DB_SCALE(hpvol_scale, -8850, 75, 0);
+static DECLARE_TLV_DB_SCALE(spkvol_scale, -7725, 75, 0);
+static DECLARE_TLV_DB_SCALE(dacvol_scale, -9563, 38, 0);
+static DECLARE_TLV_DB_SCALE(adcvol_scale, -7125, 38, 0);
+static DECLARE_TLV_DB_SCALE(invol_scale, -1725, 75, 0);
+static DECLARE_TLV_DB_SCALE(mic_boost_scale, 0, 1000, 0);
+static DECLARE_TLV_DB_MINMAX(mugain_scale, 0, 4650);
+static DECLARE_TLV_DB_MINMAX(compth_scale, -9562, 0);
+
+static const struct soc_enum eq1_band_enable_enum =
+ SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ1_BE,
+ ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum eq2_band_enable_enum =
+ SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ2_BE,
+ ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum cle_level_detection_enum =
+ SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_LVL_MODE,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text);
+
+static const struct soc_enum cle_level_detection_window_enum =
+ SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_WINDOWSEL,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text);
+
+static const struct soc_enum mbc_level_detection_enums[] = {
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE1,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE2,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE3,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+};
+
+static const struct soc_enum mbc_level_detection_window_enums[] = {
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL1,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL2,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL3,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+};
+
+static const struct soc_enum compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_CMPRAT, FB_CMPRAT,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc1_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT1, FB_DACMBCRAT1_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc2_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT2, FB_DACMBCRAT2_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc3_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT3, FB_DACMBCRAT3_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static int bytes_info_ext(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *ucontrol)
+{
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+ ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ ucontrol->count = params->max;
+
+ return 0;
+}
+
+#define COEFF_RAM_CTL(xname, xcount, xaddr) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = bytes_info_ext, \
+ .get = coeff_ram_get, .put = coeff_ram_put, \
+ .private_value = (unsigned long)&(struct coeff_ram_ctl) { \
+ .addr = xaddr, \
+ .bytes_ext = {.max = xcount, }, \
+ } \
+}
+
+static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
+ /* Volumes */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", R_HPVOLL, R_HPVOLR,
+ FB_HPVOLL, 0x7F, 0, hpvol_scale),
+ SOC_DOUBLE_R_TLV("Speaker Playback Volume", R_SPKVOLL, R_SPKVOLR,
+ FB_SPKVOLL, 0x7F, 0, spkvol_scale),
+ SOC_DOUBLE_R_TLV("Master Playback Volume", R_DACVOLL, R_DACVOLR,
+ FB_DACVOLL, 0xFF, 0, dacvol_scale),
+ SOC_DOUBLE_R_TLV("PCM Capture Volume", R_ADCVOLL, R_ADCVOLR,
+ FB_ADCVOLL, 0xFF, 0, adcvol_scale),
+ SOC_DOUBLE_R_TLV("Master Capture Volume", R_INVOLL, R_INVOLR,
+ FB_INVOLL, 0x3F, 0, invol_scale),
+
+ /* INSEL */
+ SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", R_INSELL, R_INSELR,
+ FB_INSELL_MICBSTL, FV_INSELL_MICBSTL_30DB,
+ 0, mic_boost_scale),
+
+ /* Input Channel Map */
+ SOC_ENUM("Input Channel Map", ch_map_select_enum),
+
+ /* Coefficient Ram */
+ COEFF_RAM_CTL("Cascade1L BiQuad1", BIQUAD_SIZE, 0x00),
+ COEFF_RAM_CTL("Cascade1L BiQuad2", BIQUAD_SIZE, 0x05),
+ COEFF_RAM_CTL("Cascade1L BiQuad3", BIQUAD_SIZE, 0x0a),
+ COEFF_RAM_CTL("Cascade1L BiQuad4", BIQUAD_SIZE, 0x0f),
+ COEFF_RAM_CTL("Cascade1L BiQuad5", BIQUAD_SIZE, 0x14),
+ COEFF_RAM_CTL("Cascade1L BiQuad6", BIQUAD_SIZE, 0x19),
+
+ COEFF_RAM_CTL("Cascade1R BiQuad1", BIQUAD_SIZE, 0x20),
+ COEFF_RAM_CTL("Cascade1R BiQuad2", BIQUAD_SIZE, 0x25),
+ COEFF_RAM_CTL("Cascade1R BiQuad3", BIQUAD_SIZE, 0x2a),
+ COEFF_RAM_CTL("Cascade1R BiQuad4", BIQUAD_SIZE, 0x2f),
+ COEFF_RAM_CTL("Cascade1R BiQuad5", BIQUAD_SIZE, 0x34),
+ COEFF_RAM_CTL("Cascade1R BiQuad6", BIQUAD_SIZE, 0x39),
+
+ COEFF_RAM_CTL("Cascade1L Prescale", COEFF_SIZE, 0x1f),
+ COEFF_RAM_CTL("Cascade1R Prescale", COEFF_SIZE, 0x3f),
+
+ COEFF_RAM_CTL("Cascade2L BiQuad1", BIQUAD_SIZE, 0x40),
+ COEFF_RAM_CTL("Cascade2L BiQuad2", BIQUAD_SIZE, 0x45),
+ COEFF_RAM_CTL("Cascade2L BiQuad3", BIQUAD_SIZE, 0x4a),
+ COEFF_RAM_CTL("Cascade2L BiQuad4", BIQUAD_SIZE, 0x4f),
+ COEFF_RAM_CTL("Cascade2L BiQuad5", BIQUAD_SIZE, 0x54),
+ COEFF_RAM_CTL("Cascade2L BiQuad6", BIQUAD_SIZE, 0x59),
+
+ COEFF_RAM_CTL("Cascade2R BiQuad1", BIQUAD_SIZE, 0x60),
+ COEFF_RAM_CTL("Cascade2R BiQuad2", BIQUAD_SIZE, 0x65),
+ COEFF_RAM_CTL("Cascade2R BiQuad3", BIQUAD_SIZE, 0x6a),
+ COEFF_RAM_CTL("Cascade2R BiQuad4", BIQUAD_SIZE, 0x6f),
+ COEFF_RAM_CTL("Cascade2R BiQuad5", BIQUAD_SIZE, 0x74),
+ COEFF_RAM_CTL("Cascade2R BiQuad6", BIQUAD_SIZE, 0x79),
+
+ COEFF_RAM_CTL("Cascade2L Prescale", COEFF_SIZE, 0x5f),
+ COEFF_RAM_CTL("Cascade2R Prescale", COEFF_SIZE, 0x7f),
+
+ COEFF_RAM_CTL("Bass Extraction BiQuad1", BIQUAD_SIZE, 0x80),
+ COEFF_RAM_CTL("Bass Extraction BiQuad2", BIQUAD_SIZE, 0x85),
+
+ COEFF_RAM_CTL("Bass Non Linear Function 1", COEFF_SIZE, 0x8a),
+ COEFF_RAM_CTL("Bass Non Linear Function 2", COEFF_SIZE, 0x8b),
+
+ COEFF_RAM_CTL("Bass Limiter BiQuad", BIQUAD_SIZE, 0x8c),
+
+ COEFF_RAM_CTL("Bass Cut Off BiQuad", BIQUAD_SIZE, 0x91),
+
+ COEFF_RAM_CTL("Bass Mix", COEFF_SIZE, 0x96),
+
+ COEFF_RAM_CTL("Treb Extraction BiQuad1", BIQUAD_SIZE, 0x97),
+ COEFF_RAM_CTL("Treb Extraction BiQuad2", BIQUAD_SIZE, 0x9c),
+
+ COEFF_RAM_CTL("Treb Non Linear Function 1", COEFF_SIZE, 0xa1),
+ COEFF_RAM_CTL("Treb Non Linear Function 2", COEFF_SIZE, 0xa2),
+
+ COEFF_RAM_CTL("Treb Limiter BiQuad", BIQUAD_SIZE, 0xa3),
+
+ COEFF_RAM_CTL("Treb Cut Off BiQuad", BIQUAD_SIZE, 0xa8),
+
+ COEFF_RAM_CTL("Treb Mix", COEFF_SIZE, 0xad),
+
+ COEFF_RAM_CTL("3D", COEFF_SIZE, 0xae),
+
+ COEFF_RAM_CTL("3D Mix", COEFF_SIZE, 0xaf),
+
+ COEFF_RAM_CTL("MBC1 BiQuad1", BIQUAD_SIZE, 0xb0),
+ COEFF_RAM_CTL("MBC1 BiQuad2", BIQUAD_SIZE, 0xb5),
+
+ COEFF_RAM_CTL("MBC2 BiQuad1", BIQUAD_SIZE, 0xba),
+ COEFF_RAM_CTL("MBC2 BiQuad2", BIQUAD_SIZE, 0xbf),
+
+ COEFF_RAM_CTL("MBC3 BiQuad1", BIQUAD_SIZE, 0xc4),
+ COEFF_RAM_CTL("MBC3 BiQuad2", BIQUAD_SIZE, 0xc9),
+
+ /* EQ */
+ SOC_SINGLE("EQ1 Switch", R_CONFIG1, FB_CONFIG1_EQ1_EN, 1, 0),
+ SOC_SINGLE("EQ2 Switch", R_CONFIG1, FB_CONFIG1_EQ2_EN, 1, 0),
+ SOC_ENUM("EQ1 Band Enable", eq1_band_enable_enum),
+ SOC_ENUM("EQ2 Band Enable", eq2_band_enable_enum),
+
+ /* CLE */
+ SOC_ENUM("CLE Level Detect",
+ cle_level_detection_enum),
+ SOC_ENUM("CLE Level Detect Win",
+ cle_level_detection_window_enum),
+ SOC_SINGLE("Expander Switch",
+ R_CLECTL, FB_CLECTL_EXP_EN, 1, 0),
+ SOC_SINGLE("Limiter Switch",
+ R_CLECTL, FB_CLECTL_LIMIT_EN, 1, 0),
+ SOC_SINGLE("Comp Switch",
+ R_CLECTL, FB_CLECTL_COMP_EN, 1, 0),
+ SOC_SINGLE_TLV("CLE Make-Up Gain Playback Volume",
+ R_MUGAIN, FB_MUGAIN_CLEMUG, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("Comp Thresh Playback Volume",
+ R_COMPTH, FB_COMPTH, 0xff, 0, compth_scale),
+ SOC_ENUM("Comp Ratio", compressor_ratio_enum),
+ SND_SOC_BYTES("Comp Atk Time", R_CATKTCL, 2),
+
+ /* Effects */
+ SOC_SINGLE("3D Switch", R_FXCTL, FB_FXCTL_3DEN, 1, 0),
+ SOC_SINGLE("Treble Switch", R_FXCTL, FB_FXCTL_TEEN, 1, 0),
+ SOC_SINGLE("Treble Bypass Switch", R_FXCTL, FB_FXCTL_TNLFBYPASS, 1, 0),
+ SOC_SINGLE("Bass Switch", R_FXCTL, FB_FXCTL_BEEN, 1, 0),
+ SOC_SINGLE("Bass Bypass Switch", R_FXCTL, FB_FXCTL_BNLFBYPASS, 1, 0),
+
+ /* MBC */
+ SOC_SINGLE("MBC Band1 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN1, 1, 0),
+ SOC_SINGLE("MBC Band2 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN2, 1, 0),
+ SOC_SINGLE("MBC Band3 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN3, 1, 0),
+ SOC_ENUM("MBC Band1 Level Detect",
+ mbc_level_detection_enums[0]),
+ SOC_ENUM("MBC Band2 Level Detect",
+ mbc_level_detection_enums[1]),
+ SOC_ENUM("MBC Band3 Level Detect",
+ mbc_level_detection_enums[2]),
+ SOC_ENUM("MBC Band1 Level Detect Win",
+ mbc_level_detection_window_enums[0]),
+ SOC_ENUM("MBC Band2 Level Detect Win",
+ mbc_level_detection_window_enums[1]),
+ SOC_ENUM("MBC Band3 Level Detect Win",
+ mbc_level_detection_window_enums[2]),
+
+ SOC_SINGLE("MBC1 Phase Invert Switch",
+ R_DACMBCMUG1, FB_DACMBCMUG1_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Playback Volume",
+ R_DACMBCMUG1, FB_DACMBCMUG1_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Playback Volume",
+ R_DACMBCTHR1, FB_DACMBCTHR1_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC1 Comp Ratio",
+ dac_mbc1_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC1 Comp Atk Time", R_DACMBCATK1L, 2),
+ SND_SOC_BYTES("DAC MBC1 Comp Rel Time Const",
+ R_DACMBCREL1L, 2),
+
+ SOC_SINGLE("MBC2 Phase Invert Switch",
+ R_DACMBCMUG2, FB_DACMBCMUG2_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Playback Volume",
+ R_DACMBCMUG2, FB_DACMBCMUG2_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Playback Volume",
+ R_DACMBCTHR2, FB_DACMBCTHR2_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC2 Comp Ratio",
+ dac_mbc2_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC2 Comp Atk Time", R_DACMBCATK2L, 2),
+ SND_SOC_BYTES("DAC MBC2 Comp Rel Time Const",
+ R_DACMBCREL2L, 2),
+
+ SOC_SINGLE("MBC3 Phase Invert Switch",
+ R_DACMBCMUG3, FB_DACMBCMUG3_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Playback Volume",
+ R_DACMBCMUG3, FB_DACMBCMUG3_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Playback Volume",
+ R_DACMBCTHR3, FB_DACMBCTHR3_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC3 Comp Ratio",
+ dac_mbc3_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC3 Comp Atk Time", R_DACMBCATK3L, 2),
+ SND_SOC_BYTES("DAC MBC3 Comp Rel Time Const",
+ R_DACMBCREL3L, 2),
+};
+
+static int setup_sample_format(struct snd_soc_codec *codec,
+ snd_pcm_format_t format)
+{
+ unsigned int width;
+ int ret;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ width = RV_AIC1_WL_16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ width = RV_AIC1_WL_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ width = RV_AIC1_WL_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ width = RV_AIC1_WL_32;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unsupported format width (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_WL, width);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set sample width (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int setup_sample_rate(struct snd_soc_codec *codec, unsigned int rate)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ unsigned int br, bm;
+ int ret;
+
+ switch (rate) {
+ case 8000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_PT25;
+ break;
+ case 16000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 24000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 32000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 48000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 96000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_2;
+ break;
+ case 11025:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_PT25;
+ break;
+ case 22050:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 44100:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 88200:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_2;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported sample rate %d\n", rate);
+ return -EINVAL;
+ }
+
+ /* DAC and ADC share bit and frame clock */
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBR, br);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBM, bm);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBR, br);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBM, bm);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->audio_params_lock);
+
+ tscs42xx->samplerate = rate;
+
+ mutex_unlock(&tscs42xx->audio_params_lock);
+
+ return 0;
+}
+
+struct reg_setting {
+ unsigned int addr;
+ unsigned int val;
+ unsigned int mask;
+};
+
+#define PLL_REG_SETTINGS_COUNT 13
+struct pll_ctl {
+ int input_freq;
+ struct reg_setting settings[PLL_REG_SETTINGS_COUNT];
+};
+
+#define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb, \
+ rc, r12, r1b_h, re, rf, r10, r11) \
+ { \
+ .input_freq = f, \
+ .settings = { \
+ {R_TIMEBASE, rt, 0xFF}, \
+ {R_PLLCTLD, rd, 0xFF}, \
+ {R_PLLCTL1B, r1b_l, 0x0F}, \
+ {R_PLLCTL9, r9, 0xFF}, \
+ {R_PLLCTLA, ra, 0xFF}, \
+ {R_PLLCTLB, rb, 0xFF}, \
+ {R_PLLCTLC, rc, 0xFF}, \
+ {R_PLLCTL12, r12, 0xFF}, \
+ {R_PLLCTL1B, r1b_h, 0xF0}, \
+ {R_PLLCTLE, re, 0xFF}, \
+ {R_PLLCTLF, rf, 0xFF}, \
+ {R_PLLCTL10, r10, 0xFF}, \
+ {R_PLLCTL11, r11, 0xFF}, \
+ }, \
+ }
+
+static const struct pll_ctl pll_ctls[] = {
+ PLL_CTL(1411200, 0x05,
+ 0x39, 0x04, 0x07, 0x02, 0xC3, 0x04,
+ 0x1B, 0x10, 0x03, 0x03, 0xD0, 0x02),
+ PLL_CTL(1536000, 0x05,
+ 0x1A, 0x04, 0x02, 0x03, 0xE0, 0x01,
+ 0x1A, 0x10, 0x02, 0x03, 0xB9, 0x01),
+ PLL_CTL(2822400, 0x0A,
+ 0x23, 0x04, 0x07, 0x04, 0xC3, 0x04,
+ 0x22, 0x10, 0x05, 0x03, 0x58, 0x02),
+ PLL_CTL(3072000, 0x0B,
+ 0x22, 0x04, 0x07, 0x03, 0x48, 0x03,
+ 0x1A, 0x10, 0x04, 0x03, 0xB9, 0x01),
+ PLL_CTL(5644800, 0x15,
+ 0x23, 0x04, 0x0E, 0x04, 0xC3, 0x04,
+ 0x1A, 0x10, 0x08, 0x03, 0xE0, 0x01),
+ PLL_CTL(6144000, 0x17,
+ 0x1A, 0x04, 0x08, 0x03, 0xE0, 0x01,
+ 0x1A, 0x10, 0x08, 0x03, 0xB9, 0x01),
+ PLL_CTL(12000000, 0x2E,
+ 0x1B, 0x04, 0x19, 0x03, 0x00, 0x03,
+ 0x2A, 0x10, 0x19, 0x05, 0x98, 0x04),
+ PLL_CTL(19200000, 0x4A,
+ 0x13, 0x04, 0x14, 0x03, 0x80, 0x01,
+ 0x1A, 0x10, 0x19, 0x03, 0xB9, 0x01),
+ PLL_CTL(22000000, 0x55,
+ 0x2A, 0x04, 0x37, 0x05, 0x00, 0x06,
+ 0x22, 0x10, 0x26, 0x03, 0x49, 0x02),
+ PLL_CTL(22579200, 0x57,
+ 0x22, 0x04, 0x31, 0x03, 0x20, 0x03,
+ 0x1A, 0x10, 0x1D, 0x03, 0xB3, 0x01),
+ PLL_CTL(24000000, 0x5D,
+ 0x13, 0x04, 0x19, 0x03, 0x80, 0x01,
+ 0x1B, 0x10, 0x19, 0x05, 0x4C, 0x02),
+ PLL_CTL(24576000, 0x5F,
+ 0x13, 0x04, 0x1D, 0x03, 0xB3, 0x01,
+ 0x22, 0x10, 0x40, 0x03, 0x72, 0x03),
+ PLL_CTL(27000000, 0x68,
+ 0x22, 0x04, 0x4B, 0x03, 0x00, 0x04,
+ 0x2A, 0x10, 0x7D, 0x03, 0x20, 0x06),
+ PLL_CTL(36000000, 0x8C,
+ 0x1B, 0x04, 0x4B, 0x03, 0x00, 0x03,
+ 0x2A, 0x10, 0x7D, 0x03, 0x98, 0x04),
+ PLL_CTL(25000000, 0x61,
+ 0x1B, 0x04, 0x37, 0x03, 0x2B, 0x03,
+ 0x1A, 0x10, 0x2A, 0x03, 0x39, 0x02),
+ PLL_CTL(26000000, 0x65,
+ 0x23, 0x04, 0x41, 0x05, 0x00, 0x06,
+ 0x1A, 0x10, 0x26, 0x03, 0xEF, 0x01),
+ PLL_CTL(12288000, 0x2F,
+ 0x1A, 0x04, 0x12, 0x03, 0x1C, 0x02,
+ 0x22, 0x10, 0x20, 0x03, 0x72, 0x03),
+ PLL_CTL(40000000, 0x9B,
+ 0x22, 0x08, 0x7D, 0x03, 0x80, 0x04,
+ 0x23, 0x10, 0x7D, 0x05, 0xE4, 0x06),
+ PLL_CTL(512000, 0x01,
+ 0x22, 0x04, 0x01, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x01, 0x04, 0x72, 0x03),
+ PLL_CTL(705600, 0x02,
+ 0x22, 0x04, 0x02, 0x03, 0x15, 0x04,
+ 0x22, 0x10, 0x01, 0x04, 0x80, 0x02),
+ PLL_CTL(1024000, 0x03,
+ 0x22, 0x04, 0x02, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x02, 0x04, 0x72, 0x03),
+ PLL_CTL(2048000, 0x07,
+ 0x22, 0x04, 0x04, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x04, 0x04, 0x72, 0x03),
+ PLL_CTL(2400000, 0x08,
+ 0x22, 0x04, 0x05, 0x03, 0x00, 0x03,
+ 0x23, 0x10, 0x05, 0x05, 0x98, 0x04),
+};
+
+static const struct pll_ctl *get_pll_ctl(int input_freq)
+{
+ int i;
+ const struct pll_ctl *pll_ctl = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(pll_ctls); ++i)
+ if (input_freq == pll_ctls[i].input_freq) {
+ pll_ctl = &pll_ctls[i];
+ break;
+ }
+
+ return pll_ctl;
+}
+
+static int set_pll_ctl_from_input_freq(struct snd_soc_codec *codec,
+ const int input_freq)
+{
+ int ret;
+ int i;
+ const struct pll_ctl *pll_ctl;
+
+ pll_ctl = get_pll_ctl(input_freq);
+ if (!pll_ctl) {
+ ret = -EINVAL;
+ dev_err(codec->dev, "No PLL input entry for %d (%d)\n",
+ input_freq, ret);
+ return ret;
+ }
+
+ for (i = 0; i < PLL_REG_SETTINGS_COUNT; ++i) {
+ ret = snd_soc_update_bits(codec,
+ pll_ctl->settings[i].addr,
+ pll_ctl->settings[i].mask,
+ pll_ctl->settings[i].val);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set pll ctl (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tscs42xx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *codec_dai)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ ret = setup_sample_format(codec, params_format(params));
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to setup sample format (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = setup_sample_rate(codec, params_rate(params));
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to setup sample rate (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int dac_mute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+ RV_CNVRTR1_DACMU_ENABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to mute DAC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int dac_unmute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+ RV_CNVRTR1_DACMU_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to unmute DAC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int adc_mute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+ RV_CNVRTR0_ADCMU_ENABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to mute ADC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int adc_unmute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+ RV_CNVRTR0_ADCMU_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to unmute ADC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int ret;
+
+ if (mute)
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = dac_mute(codec);
+ else
+ ret = adc_mute(codec);
+ else
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = dac_unmute(codec);
+ else
+ ret = adc_unmute(codec);
+
+ return ret;
+}
+
+static int tscs42xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ /* Slave mode not supported since it needs always-on frame clock */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_MS,
+ RV_AIC1_MS_MASTER);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set codec DAI master (%d)\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unsupported format (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_set_dai_bclk_ratio(struct snd_soc_dai *codec_dai,
+ unsigned int ratio)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+ int ret = 0;
+
+ switch (ratio) {
+ case 32:
+ value = RV_DACSR_DBCM_32;
+ break;
+ case 40:
+ value = RV_DACSR_DBCM_40;
+ break;
+ case 64:
+ value = RV_DACSR_DBCM_64;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported bclk ratio (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBCM, value);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set DAC BCLK ratio (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_ADCSR_ABCM, value);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set ADC BCLK ratio (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->audio_params_lock);
+
+ tscs42xx->bclk_ratio = ratio;
+
+ mutex_unlock(&tscs42xx->audio_params_lock);
+
+ return 0;
+}
+
+static int tscs42xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ switch (clk_id) {
+ case TSCS42XX_PLL_SRC_XTAL:
+ case TSCS42XX_PLL_SRC_MCLK1:
+ ret = snd_soc_write(codec, R_PLLREFSEL,
+ RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 |
+ RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set pll reference input (%d)\n",
+ ret);
+ return ret;
+ }
+ break;
+ case TSCS42XX_PLL_SRC_MCLK2:
+ ret = snd_soc_write(codec, R_PLLREFSEL,
+ RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 |
+ RV_PLLREFSEL_PLL2_REF_SEL_MCLK2);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set PLL reference (%d)\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(codec->dev, "pll src is unsupported\n");
+ return -EINVAL;
+ }
+
+ ret = set_pll_ctl_from_input_freq(codec, freq);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to setup PLL input freq (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops tscs42xx_dai_ops = {
+ .hw_params = tscs42xx_hw_params,
+ .mute_stream = tscs42xx_mute_stream,
+ .set_fmt = tscs42xx_set_dai_fmt,
+ .set_bclk_ratio = tscs42xx_set_dai_bclk_ratio,
+ .set_sysclk = tscs42xx_set_dai_sysclk,
+};
+
+static int part_is_valid(struct tscs42xx *tscs42xx)
+{
+ int val;
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(tscs42xx->regmap, R_DEVIDH, &reg);
+ if (ret < 0)
+ return ret;
+
+ val = reg << 8;
+ ret = regmap_read(tscs42xx->regmap, R_DEVIDL, &reg);
+ if (ret < 0)
+ return ret;
+
+ val |= reg;
+
+ switch (val) {
+ case 0x4A74:
+ case 0x4A73:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tscs42xx = {
+ .component_driver = {
+ .dapm_widgets = tscs42xx_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tscs42xx_dapm_widgets),
+ .dapm_routes = tscs42xx_intercon,
+ .num_dapm_routes = ARRAY_SIZE(tscs42xx_intercon),
+ .controls = tscs42xx_snd_controls,
+ .num_controls = ARRAY_SIZE(tscs42xx_snd_controls),
+ },
+};
+
+static inline void init_coeff_ram_cache(struct tscs42xx *tscs42xx)
+{
+ const u8 norm_addrs[] = { 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1f,
+ 0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3f, 0x40, 0x45, 0x4a,
+ 0x4f, 0x54, 0x59, 0x5f, 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79,
+ 0x7f, 0x80, 0x85, 0x8c, 0x91, 0x96, 0x97, 0x9c, 0xa3, 0xa8,
+ 0xad, 0xaf, 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, };
+ u8 *coeff_ram = tscs42xx->coeff_ram;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(norm_addrs); i++)
+ coeff_ram[((norm_addrs[i] + 1) * COEFF_SIZE) - 1] = 0x40;
+}
+
+#define TSCS42XX_RATES SNDRV_PCM_RATE_8000_96000
+
+#define TSCS42XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+ | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver tscs42xx_dai = {
+ .name = "tscs42xx-HiFi",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TSCS42XX_RATES,
+ .formats = TSCS42XX_FORMATS,},
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TSCS42XX_RATES,
+ .formats = TSCS42XX_FORMATS,},
+ .ops = &tscs42xx_dai_ops,
+ .symmetric_rates = 1,
+ .symmetric_channels = 1,
+ .symmetric_samplebits = 1,
+};
+
+static const struct reg_sequence tscs42xx_patch[] = {
+ { R_AIC2, RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED },
+};
+
+static int tscs42xx_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tscs42xx *tscs42xx;
+ int ret = 0;
+
+ tscs42xx = devm_kzalloc(&i2c->dev, sizeof(*tscs42xx), GFP_KERNEL);
+ if (!tscs42xx) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev,
+ "Failed to allocate memory for data (%d)\n", ret);
+ return ret;
+ }
+ i2c_set_clientdata(i2c, tscs42xx);
+ tscs42xx->dev = &i2c->dev;
+
+ tscs42xx->regmap = devm_regmap_init_i2c(i2c, &tscs42xx_regmap);
+ if (IS_ERR(tscs42xx->regmap)) {
+ ret = PTR_ERR(tscs42xx->regmap);
+ dev_err(tscs42xx->dev, "Failed to allocate regmap (%d)\n", ret);
+ return ret;
+ }
+
+ init_coeff_ram_cache(tscs42xx);
+
+ ret = part_is_valid(tscs42xx);
+ if (ret <= 0) {
+ dev_err(tscs42xx->dev, "No valid part (%d)\n", ret);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = regmap_write(tscs42xx->regmap, R_RESET, RV_RESET_ENABLE);
+ if (ret < 0) {
+ dev_err(tscs42xx->dev, "Failed to reset device (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_register_patch(tscs42xx->regmap, tscs42xx_patch,
+ ARRAY_SIZE(tscs42xx_patch));
+ if (ret < 0) {
+ dev_err(tscs42xx->dev, "Failed to apply patch (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_init(&tscs42xx->audio_params_lock);
+ mutex_init(&tscs42xx->coeff_ram_lock);
+ mutex_init(&tscs42xx->pll_lock);
+
+ ret = snd_soc_register_codec(tscs42xx->dev, &soc_codec_dev_tscs42xx,
+ &tscs42xx_dai, 1);
+ if (ret) {
+ dev_err(tscs42xx->dev, "Failed to register codec (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tscs42xx_i2c_id[] = {
+ { "tscs42A1", 0 },
+ { "tscs42A2", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tscs42xx_i2c_id);
+
+static const struct of_device_id tscs42xx_of_match[] = {
+ { .compatible = "tempo,tscs42A1", },
+ { .compatible = "tempo,tscs42A2", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tscs42xx_of_match);
+
+static struct i2c_driver tscs42xx_i2c_driver = {
+ .driver = {
+ .name = "tscs42xx",
+ .owner = THIS_MODULE,
+ .of_match_table = tscs42xx_of_match,
+ },
+ .probe = tscs42xx_i2c_probe,
+ .remove = tscs42xx_i2c_remove,
+ .id_table = tscs42xx_i2c_id,
+};
+
+module_i2c_driver(tscs42xx_i2c_driver);
+
+MODULE_AUTHOR("Tempo Semiconductor <steven.eckhoff.opensource@gmail.com");
+MODULE_DESCRIPTION("ASoC TSCS42xx driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tscs42xx.h b/sound/soc/codecs/tscs42xx.h
new file mode 100644
index 000000000000..d4a30bcbf64b
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.h
@@ -0,0 +1,2693 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.h -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#ifndef __WOOKIE_H__
+#define __WOOKIE_H__
+
+enum {
+ TSCS42XX_PLL_SRC_NONE,
+ TSCS42XX_PLL_SRC_XTAL,
+ TSCS42XX_PLL_SRC_MCLK1,
+ TSCS42XX_PLL_SRC_MCLK2,
+};
+
+#define R_HPVOLL 0x0
+#define R_HPVOLR 0x1
+#define R_SPKVOLL 0x2
+#define R_SPKVOLR 0x3
+#define R_DACVOLL 0x4
+#define R_DACVOLR 0x5
+#define R_ADCVOLL 0x6
+#define R_ADCVOLR 0x7
+#define R_INVOLL 0x8
+#define R_INVOLR 0x9
+#define R_INMODE 0x0B
+#define R_INSELL 0x0C
+#define R_INSELR 0x0D
+#define R_AIC1 0x13
+#define R_AIC2 0x14
+#define R_CNVRTR0 0x16
+#define R_ADCSR 0x17
+#define R_CNVRTR1 0x18
+#define R_DACSR 0x19
+#define R_PWRM1 0x1A
+#define R_PWRM2 0x1B
+#define R_CONFIG0 0x1F
+#define R_CONFIG1 0x20
+#define R_DMICCTL 0x24
+#define R_CLECTL 0x25
+#define R_MUGAIN 0x26
+#define R_COMPTH 0x27
+#define R_CMPRAT 0x28
+#define R_CATKTCL 0x29
+#define R_CATKTCH 0x2A
+#define R_CRELTCL 0x2B
+#define R_CRELTCH 0x2C
+#define R_LIMTH 0x2D
+#define R_LIMTGT 0x2E
+#define R_LATKTCL 0x2F
+#define R_LATKTCH 0x30
+#define R_LRELTCL 0x31
+#define R_LRELTCH 0x32
+#define R_EXPTH 0x33
+#define R_EXPRAT 0x34
+#define R_XATKTCL 0x35
+#define R_XATKTCH 0x36
+#define R_XRELTCL 0x37
+#define R_XRELTCH 0x38
+#define R_FXCTL 0x39
+#define R_DACCRWRL 0x3A
+#define R_DACCRWRM 0x3B
+#define R_DACCRWRH 0x3C
+#define R_DACCRRDL 0x3D
+#define R_DACCRRDM 0x3E
+#define R_DACCRRDH 0x3F
+#define R_DACCRADDR 0x40
+#define R_DCOFSEL 0x41
+#define R_PLLCTL9 0x4E
+#define R_PLLCTLA 0x4F
+#define R_PLLCTLB 0x50
+#define R_PLLCTLC 0x51
+#define R_PLLCTLD 0x52
+#define R_PLLCTLE 0x53
+#define R_PLLCTLF 0x54
+#define R_PLLCTL10 0x55
+#define R_PLLCTL11 0x56
+#define R_PLLCTL12 0x57
+#define R_PLLCTL1B 0x60
+#define R_PLLCTL1C 0x61
+#define R_TIMEBASE 0x77
+#define R_DEVIDL 0x7D
+#define R_DEVIDH 0x7E
+#define R_RESET 0x80
+#define R_DACCRSTAT 0x8A
+#define R_PLLCTL0 0x8E
+#define R_PLLREFSEL 0x8F
+#define R_DACMBCEN 0xC7
+#define R_DACMBCCTL 0xC8
+#define R_DACMBCMUG1 0xC9
+#define R_DACMBCTHR1 0xCA
+#define R_DACMBCRAT1 0xCB
+#define R_DACMBCATK1L 0xCC
+#define R_DACMBCATK1H 0xCD
+#define R_DACMBCREL1L 0xCE
+#define R_DACMBCREL1H 0xCF
+#define R_DACMBCMUG2 0xD0
+#define R_DACMBCTHR2 0xD1
+#define R_DACMBCRAT2 0xD2
+#define R_DACMBCATK2L 0xD3
+#define R_DACMBCATK2H 0xD4
+#define R_DACMBCREL2L 0xD5
+#define R_DACMBCREL2H 0xD6
+#define R_DACMBCMUG3 0xD7
+#define R_DACMBCTHR3 0xD8
+#define R_DACMBCRAT3 0xD9
+#define R_DACMBCATK3L 0xDA
+#define R_DACMBCATK3H 0xDB
+#define R_DACMBCREL3L 0xDC
+#define R_DACMBCREL3H 0xDD
+
+/* Helpers */
+#define RM(m, b) ((m)<<(b))
+#define RV(v, b) ((v)<<(b))
+
+/****************************
+ * R_HPVOLL (0x0) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLL 0
+
+/* Field Masks */
+#define FM_HPVOLL 0X7F
+
+/* Field Values */
+#define FV_HPVOLL_P6DB 0x7F
+#define FV_HPVOLL_N88PT5DB 0x1
+#define FV_HPVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_HPVOLL RM(FM_HPVOLL, FB_HPVOLL)
+
+/* Register Values */
+#define RV_HPVOLL_P6DB RV(FV_HPVOLL_P6DB, FB_HPVOLL)
+#define RV_HPVOLL_N88PT5DB RV(FV_HPVOLL_N88PT5DB, FB_HPVOLL)
+#define RV_HPVOLL_MUTE RV(FV_HPVOLL_MUTE, FB_HPVOLL)
+
+/****************************
+ * R_HPVOLR (0x1) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLR 0
+
+/* Field Masks */
+#define FM_HPVOLR 0X7F
+
+/* Field Values */
+#define FV_HPVOLR_P6DB 0x7F
+#define FV_HPVOLR_N88PT5DB 0x1
+#define FV_HPVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_HPVOLR RM(FM_HPVOLR, FB_HPVOLR)
+
+/* Register Values */
+#define RV_HPVOLR_P6DB RV(FV_HPVOLR_P6DB, FB_HPVOLR)
+#define RV_HPVOLR_N88PT5DB RV(FV_HPVOLR_N88PT5DB, FB_HPVOLR)
+#define RV_HPVOLR_MUTE RV(FV_HPVOLR_MUTE, FB_HPVOLR)
+
+/*****************************
+ * R_SPKVOLL (0x2) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLL 0
+
+/* Field Masks */
+#define FM_SPKVOLL 0X7F
+
+/* Field Values */
+#define FV_SPKVOLL_P12DB 0x7F
+#define FV_SPKVOLL_N77PT25DB 0x8
+#define FV_SPKVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_SPKVOLL RM(FM_SPKVOLL, FB_SPKVOLL)
+
+/* Register Values */
+#define RV_SPKVOLL_P12DB RV(FV_SPKVOLL_P12DB, FB_SPKVOLL)
+#define RV_SPKVOLL_N77PT25DB \
+ RV(FV_SPKVOLL_N77PT25DB, FB_SPKVOLL)
+
+#define RV_SPKVOLL_MUTE RV(FV_SPKVOLL_MUTE, FB_SPKVOLL)
+
+/*****************************
+ * R_SPKVOLR (0x3) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLR 0
+
+/* Field Masks */
+#define FM_SPKVOLR 0X7F
+
+/* Field Values */
+#define FV_SPKVOLR_P12DB 0x7F
+#define FV_SPKVOLR_N77PT25DB 0x8
+#define FV_SPKVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_SPKVOLR RM(FM_SPKVOLR, FB_SPKVOLR)
+
+/* Register Values */
+#define RV_SPKVOLR_P12DB RV(FV_SPKVOLR_P12DB, FB_SPKVOLR)
+#define RV_SPKVOLR_N77PT25DB \
+ RV(FV_SPKVOLR_N77PT25DB, FB_SPKVOLR)
+
+#define RV_SPKVOLR_MUTE RV(FV_SPKVOLR_MUTE, FB_SPKVOLR)
+
+/*****************************
+ * R_DACVOLL (0x4) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLL 0
+
+/* Field Masks */
+#define FM_DACVOLL 0XFF
+
+/* Field Values */
+#define FV_DACVOLL_0DB 0xFF
+#define FV_DACVOLL_N95PT625DB 0x1
+#define FV_DACVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_DACVOLL RM(FM_DACVOLL, FB_DACVOLL)
+
+/* Register Values */
+#define RV_DACVOLL_0DB RV(FV_DACVOLL_0DB, FB_DACVOLL)
+#define RV_DACVOLL_N95PT625DB \
+ RV(FV_DACVOLL_N95PT625DB, FB_DACVOLL)
+
+#define RV_DACVOLL_MUTE RV(FV_DACVOLL_MUTE, FB_DACVOLL)
+
+/*****************************
+ * R_DACVOLR (0x5) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLR 0
+
+/* Field Masks */
+#define FM_DACVOLR 0XFF
+
+/* Field Values */
+#define FV_DACVOLR_0DB 0xFF
+#define FV_DACVOLR_N95PT625DB 0x1
+#define FV_DACVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_DACVOLR RM(FM_DACVOLR, FB_DACVOLR)
+
+/* Register Values */
+#define RV_DACVOLR_0DB RV(FV_DACVOLR_0DB, FB_DACVOLR)
+#define RV_DACVOLR_N95PT625DB \
+ RV(FV_DACVOLR_N95PT625DB, FB_DACVOLR)
+
+#define RV_DACVOLR_MUTE RV(FV_DACVOLR_MUTE, FB_DACVOLR)
+
+/*****************************
+ * R_ADCVOLL (0x6) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLL 0
+
+/* Field Masks */
+#define FM_ADCVOLL 0XFF
+
+/* Field Values */
+#define FV_ADCVOLL_P24DB 0xFF
+#define FV_ADCVOLL_N71PT25DB 0x1
+#define FV_ADCVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_ADCVOLL RM(FM_ADCVOLL, FB_ADCVOLL)
+
+/* Register Values */
+#define RV_ADCVOLL_P24DB RV(FV_ADCVOLL_P24DB, FB_ADCVOLL)
+#define RV_ADCVOLL_N71PT25DB \
+ RV(FV_ADCVOLL_N71PT25DB, FB_ADCVOLL)
+
+#define RV_ADCVOLL_MUTE RV(FV_ADCVOLL_MUTE, FB_ADCVOLL)
+
+/*****************************
+ * R_ADCVOLR (0x7) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLR 0
+
+/* Field Masks */
+#define FM_ADCVOLR 0XFF
+
+/* Field Values */
+#define FV_ADCVOLR_P24DB 0xFF
+#define FV_ADCVOLR_N71PT25DB 0x1
+#define FV_ADCVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_ADCVOLR RM(FM_ADCVOLR, FB_ADCVOLR)
+
+/* Register Values */
+#define RV_ADCVOLR_P24DB RV(FV_ADCVOLR_P24DB, FB_ADCVOLR)
+#define RV_ADCVOLR_N71PT25DB \
+ RV(FV_ADCVOLR_N71PT25DB, FB_ADCVOLR)
+
+#define RV_ADCVOLR_MUTE RV(FV_ADCVOLR_MUTE, FB_ADCVOLR)
+
+/****************************
+ * R_INVOLL (0x8) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLL_INMUTEL 7
+#define FB_INVOLL_IZCL 6
+#define FB_INVOLL 0
+
+/* Field Masks */
+#define FM_INVOLL_INMUTEL 0X1
+#define FM_INVOLL_IZCL 0X1
+#define FM_INVOLL 0X3F
+
+/* Field Values */
+#define FV_INVOLL_INMUTEL_ENABLE 0x1
+#define FV_INVOLL_INMUTEL_DISABLE 0x0
+#define FV_INVOLL_IZCL_ENABLE 0x1
+#define FV_INVOLL_IZCL_DISABLE 0x0
+#define FV_INVOLL_P30DB 0x3F
+#define FV_INVOLL_N17PT25DB 0x0
+
+/* Register Masks */
+#define RM_INVOLL_INMUTEL \
+ RM(FM_INVOLL_INMUTEL, FB_INVOLL_INMUTEL)
+
+#define RM_INVOLL_IZCL RM(FM_INVOLL_IZCL, FB_INVOLL_IZCL)
+#define RM_INVOLL RM(FM_INVOLL, FB_INVOLL)
+
+/* Register Values */
+#define RV_INVOLL_INMUTEL_ENABLE \
+ RV(FV_INVOLL_INMUTEL_ENABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_INMUTEL_DISABLE \
+ RV(FV_INVOLL_INMUTEL_DISABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_IZCL_ENABLE \
+ RV(FV_INVOLL_IZCL_ENABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_IZCL_DISABLE \
+ RV(FV_INVOLL_IZCL_DISABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_P30DB RV(FV_INVOLL_P30DB, FB_INVOLL)
+#define RV_INVOLL_N17PT25DB RV(FV_INVOLL_N17PT25DB, FB_INVOLL)
+
+/****************************
+ * R_INVOLR (0x9) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLR_INMUTER 7
+#define FB_INVOLR_IZCR 6
+#define FB_INVOLR 0
+
+/* Field Masks */
+#define FM_INVOLR_INMUTER 0X1
+#define FM_INVOLR_IZCR 0X1
+#define FM_INVOLR 0X3F
+
+/* Field Values */
+#define FV_INVOLR_INMUTER_ENABLE 0x1
+#define FV_INVOLR_INMUTER_DISABLE 0x0
+#define FV_INVOLR_IZCR_ENABLE 0x1
+#define FV_INVOLR_IZCR_DISABLE 0x0
+#define FV_INVOLR_P30DB 0x3F
+#define FV_INVOLR_N17PT25DB 0x0
+
+/* Register Masks */
+#define RM_INVOLR_INMUTER \
+ RM(FM_INVOLR_INMUTER, FB_INVOLR_INMUTER)
+
+#define RM_INVOLR_IZCR RM(FM_INVOLR_IZCR, FB_INVOLR_IZCR)
+#define RM_INVOLR RM(FM_INVOLR, FB_INVOLR)
+
+/* Register Values */
+#define RV_INVOLR_INMUTER_ENABLE \
+ RV(FV_INVOLR_INMUTER_ENABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_INMUTER_DISABLE \
+ RV(FV_INVOLR_INMUTER_DISABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_IZCR_ENABLE \
+ RV(FV_INVOLR_IZCR_ENABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_IZCR_DISABLE \
+ RV(FV_INVOLR_IZCR_DISABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_P30DB RV(FV_INVOLR_P30DB, FB_INVOLR)
+#define RV_INVOLR_N17PT25DB RV(FV_INVOLR_N17PT25DB, FB_INVOLR)
+
+/*****************************
+ * R_INMODE (0x0B) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INMODE_DS 0
+
+/* Field Masks */
+#define FM_INMODE_DS 0X1
+
+/* Field Values */
+#define FV_INMODE_DS_LRIN1 0x0
+#define FV_INMODE_DS_LRIN2 0x1
+
+/* Register Masks */
+#define RM_INMODE_DS RM(FM_INMODE_DS, FB_INMODE_DS)
+
+/* Register Values */
+#define RV_INMODE_DS_LRIN1 \
+ RV(FV_INMODE_DS_LRIN1, FB_INMODE_DS)
+
+#define RV_INMODE_DS_LRIN2 \
+ RV(FV_INMODE_DS_LRIN2, FB_INMODE_DS)
+
+
+/*****************************
+ * R_INSELL (0x0C) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELL 6
+#define FB_INSELL_MICBSTL 4
+
+/* Field Masks */
+#define FM_INSELL 0X3
+#define FM_INSELL_MICBSTL 0X3
+
+/* Field Values */
+#define FV_INSELL_IN1 0x0
+#define FV_INSELL_IN2 0x1
+#define FV_INSELL_IN3 0x2
+#define FV_INSELL_D2S 0x3
+#define FV_INSELL_MICBSTL_OFF 0x0
+#define FV_INSELL_MICBSTL_10DB 0x1
+#define FV_INSELL_MICBSTL_20DB 0x2
+#define FV_INSELL_MICBSTL_30DB 0x3
+
+/* Register Masks */
+#define RM_INSELL RM(FM_INSELL, FB_INSELL)
+#define RM_INSELL_MICBSTL \
+ RM(FM_INSELL_MICBSTL, FB_INSELL_MICBSTL)
+
+
+/* Register Values */
+#define RV_INSELL_IN1 RV(FV_INSELL_IN1, FB_INSELL)
+#define RV_INSELL_IN2 RV(FV_INSELL_IN2, FB_INSELL)
+#define RV_INSELL_IN3 RV(FV_INSELL_IN3, FB_INSELL)
+#define RV_INSELL_D2S RV(FV_INSELL_D2S, FB_INSELL)
+#define RV_INSELL_MICBSTL_OFF \
+ RV(FV_INSELL_MICBSTL_OFF, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_10DB \
+ RV(FV_INSELL_MICBSTL_10DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_20DB \
+ RV(FV_INSELL_MICBSTL_20DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_30DB \
+ RV(FV_INSELL_MICBSTL_30DB, FB_INSELL_MICBSTL)
+
+
+/*****************************
+ * R_INSELR (0x0D) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELR 6
+#define FB_INSELR_MICBSTR 4
+
+/* Field Masks */
+#define FM_INSELR 0X3
+#define FM_INSELR_MICBSTR 0X3
+
+/* Field Values */
+#define FV_INSELR_IN1 0x0
+#define FV_INSELR_IN2 0x1
+#define FV_INSELR_IN3 0x2
+#define FV_INSELR_D2S 0x3
+#define FV_INSELR_MICBSTR_OFF 0x0
+#define FV_INSELR_MICBSTR_10DB 0x1
+#define FV_INSELR_MICBSTR_20DB 0x2
+#define FV_INSELR_MICBSTR_30DB 0x3
+
+/* Register Masks */
+#define RM_INSELR RM(FM_INSELR, FB_INSELR)
+#define RM_INSELR_MICBSTR \
+ RM(FM_INSELR_MICBSTR, FB_INSELR_MICBSTR)
+
+
+/* Register Values */
+#define RV_INSELR_IN1 RV(FV_INSELR_IN1, FB_INSELR)
+#define RV_INSELR_IN2 RV(FV_INSELR_IN2, FB_INSELR)
+#define RV_INSELR_IN3 RV(FV_INSELR_IN3, FB_INSELR)
+#define RV_INSELR_D2S RV(FV_INSELR_D2S, FB_INSELR)
+#define RV_INSELR_MICBSTR_OFF \
+ RV(FV_INSELR_MICBSTR_OFF, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_10DB \
+ RV(FV_INSELR_MICBSTR_10DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_20DB \
+ RV(FV_INSELR_MICBSTR_20DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_30DB \
+ RV(FV_INSELR_MICBSTR_30DB, FB_INSELR_MICBSTR)
+
+
+/***************************
+ * R_AIC1 (0x13) *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC1_BCLKINV 6
+#define FB_AIC1_MS 5
+#define FB_AIC1_LRP 4
+#define FB_AIC1_WL 2
+#define FB_AIC1_FORMAT 0
+
+/* Field Masks */
+#define FM_AIC1_BCLKINV 0X1
+#define FM_AIC1_MS 0X1
+#define FM_AIC1_LRP 0X1
+#define FM_AIC1_WL 0X3
+#define FM_AIC1_FORMAT 0X3
+
+/* Field Values */
+#define FV_AIC1_BCLKINV_ENABLE 0x1
+#define FV_AIC1_BCLKINV_DISABLE 0x0
+#define FV_AIC1_MS_MASTER 0x1
+#define FV_AIC1_MS_SLAVE 0x0
+#define FV_AIC1_LRP_INVERT 0x1
+#define FV_AIC1_LRP_NORMAL 0x0
+#define FV_AIC1_WL_16 0x0
+#define FV_AIC1_WL_20 0x1
+#define FV_AIC1_WL_24 0x2
+#define FV_AIC1_WL_32 0x3
+#define FV_AIC1_FORMAT_RIGHT 0x0
+#define FV_AIC1_FORMAT_LEFT 0x1
+#define FV_AIC1_FORMAT_I2S 0x2
+
+/* Register Masks */
+#define RM_AIC1_BCLKINV \
+ RM(FM_AIC1_BCLKINV, FB_AIC1_BCLKINV)
+
+#define RM_AIC1_MS RM(FM_AIC1_MS, FB_AIC1_MS)
+#define RM_AIC1_LRP RM(FM_AIC1_LRP, FB_AIC1_LRP)
+#define RM_AIC1_WL RM(FM_AIC1_WL, FB_AIC1_WL)
+#define RM_AIC1_FORMAT RM(FM_AIC1_FORMAT, FB_AIC1_FORMAT)
+
+/* Register Values */
+#define RV_AIC1_BCLKINV_ENABLE \
+ RV(FV_AIC1_BCLKINV_ENABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_BCLKINV_DISABLE \
+ RV(FV_AIC1_BCLKINV_DISABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_MS_MASTER RV(FV_AIC1_MS_MASTER, FB_AIC1_MS)
+#define RV_AIC1_MS_SLAVE RV(FV_AIC1_MS_SLAVE, FB_AIC1_MS)
+#define RV_AIC1_LRP_INVERT \
+ RV(FV_AIC1_LRP_INVERT, FB_AIC1_LRP)
+
+#define RV_AIC1_LRP_NORMAL \
+ RV(FV_AIC1_LRP_NORMAL, FB_AIC1_LRP)
+
+#define RV_AIC1_WL_16 RV(FV_AIC1_WL_16, FB_AIC1_WL)
+#define RV_AIC1_WL_20 RV(FV_AIC1_WL_20, FB_AIC1_WL)
+#define RV_AIC1_WL_24 RV(FV_AIC1_WL_24, FB_AIC1_WL)
+#define RV_AIC1_WL_32 RV(FV_AIC1_WL_32, FB_AIC1_WL)
+#define RV_AIC1_FORMAT_RIGHT \
+ RV(FV_AIC1_FORMAT_RIGHT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_LEFT \
+ RV(FV_AIC1_FORMAT_LEFT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_I2S \
+ RV(FV_AIC1_FORMAT_I2S, FB_AIC1_FORMAT)
+
+
+/***************************
+ * R_AIC2 (0x14) *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC2_DACDSEL 6
+#define FB_AIC2_ADCDSEL 4
+#define FB_AIC2_TRI 3
+#define FB_AIC2_BLRCM 0
+
+/* Field Masks */
+#define FM_AIC2_DACDSEL 0X3
+#define FM_AIC2_ADCDSEL 0X3
+#define FM_AIC2_TRI 0X1
+#define FM_AIC2_BLRCM 0X7
+
+/* Field Values */
+#define FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED 0x3
+
+/* Register Masks */
+#define RM_AIC2_DACDSEL \
+ RM(FM_AIC2_DACDSEL, FB_AIC2_DACDSEL)
+
+#define RM_AIC2_ADCDSEL \
+ RM(FM_AIC2_ADCDSEL, FB_AIC2_ADCDSEL)
+
+#define RM_AIC2_TRI RM(FM_AIC2_TRI, FB_AIC2_TRI)
+#define RM_AIC2_BLRCM RM(FM_AIC2_BLRCM, FB_AIC2_BLRCM)
+
+/* Register Values */
+#define RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED \
+ RV(FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED, FB_AIC2_BLRCM)
+
+
+/******************************
+ * R_CNVRTR0 (0x16) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR0_ADCPOLR 7
+#define FB_CNVRTR0_ADCPOLL 6
+#define FB_CNVRTR0_AMONOMIX 4
+#define FB_CNVRTR0_ADCMU 3
+#define FB_CNVRTR0_HPOR 2
+#define FB_CNVRTR0_ADCHPDR 1
+#define FB_CNVRTR0_ADCHPDL 0
+
+/* Field Masks */
+#define FM_CNVRTR0_ADCPOLR 0X1
+#define FM_CNVRTR0_ADCPOLL 0X1
+#define FM_CNVRTR0_AMONOMIX 0X3
+#define FM_CNVRTR0_ADCMU 0X1
+#define FM_CNVRTR0_HPOR 0X1
+#define FM_CNVRTR0_ADCHPDR 0X1
+#define FM_CNVRTR0_ADCHPDL 0X1
+
+/* Field Values */
+#define FV_CNVRTR0_ADCPOLR_INVERT 0x1
+#define FV_CNVRTR0_ADCPOLR_NORMAL 0x0
+#define FV_CNVRTR0_ADCPOLL_INVERT 0x1
+#define FV_CNVRTR0_ADCPOLL_NORMAL 0x0
+#define FV_CNVRTR0_ADCMU_ENABLE 0x1
+#define FV_CNVRTR0_ADCMU_DISABLE 0x0
+#define FV_CNVRTR0_ADCHPDR_ENABLE 0x1
+#define FV_CNVRTR0_ADCHPDR_DISABLE 0x0
+#define FV_CNVRTR0_ADCHPDL_ENABLE 0x1
+#define FV_CNVRTR0_ADCHPDL_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CNVRTR0_ADCPOLR \
+ RM(FM_CNVRTR0_ADCPOLR, FB_CNVRTR0_ADCPOLR)
+
+#define RM_CNVRTR0_ADCPOLL \
+ RM(FM_CNVRTR0_ADCPOLL, FB_CNVRTR0_ADCPOLL)
+
+#define RM_CNVRTR0_AMONOMIX \
+ RM(FM_CNVRTR0_AMONOMIX, FB_CNVRTR0_AMONOMIX)
+
+#define RM_CNVRTR0_ADCMU \
+ RM(FM_CNVRTR0_ADCMU, FB_CNVRTR0_ADCMU)
+
+#define RM_CNVRTR0_HPOR \
+ RM(FM_CNVRTR0_HPOR, FB_CNVRTR0_HPOR)
+
+#define RM_CNVRTR0_ADCHPDR \
+ RM(FM_CNVRTR0_ADCHPDR, FB_CNVRTR0_ADCHPDR)
+
+#define RM_CNVRTR0_ADCHPDL \
+ RM(FM_CNVRTR0_ADCHPDL, FB_CNVRTR0_ADCHPDL)
+
+
+/* Register Values */
+#define RV_CNVRTR0_ADCPOLR_INVERT \
+ RV(FV_CNVRTR0_ADCPOLR_INVERT, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLR_NORMAL \
+ RV(FV_CNVRTR0_ADCPOLR_NORMAL, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLL_INVERT \
+ RV(FV_CNVRTR0_ADCPOLL_INVERT, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCPOLL_NORMAL \
+ RV(FV_CNVRTR0_ADCPOLL_NORMAL, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCMU_ENABLE \
+ RV(FV_CNVRTR0_ADCMU_ENABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCMU_DISABLE \
+ RV(FV_CNVRTR0_ADCMU_DISABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCHPDR_ENABLE \
+ RV(FV_CNVRTR0_ADCHPDR_ENABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDR_DISABLE \
+ RV(FV_CNVRTR0_ADCHPDR_DISABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDL_ENABLE \
+ RV(FV_CNVRTR0_ADCHPDL_ENABLE, FB_CNVRTR0_ADCHPDL)
+
+#define RV_CNVRTR0_ADCHPDL_DISABLE \
+ RV(FV_CNVRTR0_ADCHPDL_DISABLE, FB_CNVRTR0_ADCHPDL)
+
+
+/****************************
+ * R_ADCSR (0x17) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_ADCSR_ABCM 6
+#define FB_ADCSR_ABR 3
+#define FB_ADCSR_ABM 0
+
+/* Field Masks */
+#define FM_ADCSR_ABCM 0X3
+#define FM_ADCSR_ABR 0X3
+#define FM_ADCSR_ABM 0X7
+
+/* Field Values */
+#define FV_ADCSR_ABCM_AUTO 0x0
+#define FV_ADCSR_ABCM_32 0x1
+#define FV_ADCSR_ABCM_40 0x2
+#define FV_ADCSR_ABCM_64 0x3
+#define FV_ADCSR_ABR_32 0x0
+#define FV_ADCSR_ABR_44_1 0x1
+#define FV_ADCSR_ABR_48 0x2
+#define FV_ADCSR_ABM_PT25 0x0
+#define FV_ADCSR_ABM_PT5 0x1
+#define FV_ADCSR_ABM_1 0x2
+#define FV_ADCSR_ABM_2 0x3
+
+/* Register Masks */
+#define RM_ADCSR_ABCM RM(FM_ADCSR_ABCM, FB_ADCSR_ABCM)
+#define RM_ADCSR_ABR RM(FM_ADCSR_ABR, FB_ADCSR_ABR)
+#define RM_ADCSR_ABM RM(FM_ADCSR_ABM, FB_ADCSR_ABM)
+
+/* Register Values */
+#define RV_ADCSR_ABCM_AUTO \
+ RV(FV_ADCSR_ABCM_AUTO, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_32 \
+ RV(FV_ADCSR_ABCM_32, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_40 \
+ RV(FV_ADCSR_ABCM_40, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_64 \
+ RV(FV_ADCSR_ABCM_64, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABR_32 RV(FV_ADCSR_ABR_32, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_44_1 \
+ RV(FV_ADCSR_ABR_44_1, FB_ADCSR_ABR)
+
+#define RV_ADCSR_ABR_48 RV(FV_ADCSR_ABR_48, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_ RV(FV_ADCSR_ABR_, FB_ADCSR_ABR)
+#define RV_ADCSR_ABM_PT25 \
+ RV(FV_ADCSR_ABM_PT25, FB_ADCSR_ABM)
+
+#define RV_ADCSR_ABM_PT5 RV(FV_ADCSR_ABM_PT5, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_1 RV(FV_ADCSR_ABM_1, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_2 RV(FV_ADCSR_ABM_2, FB_ADCSR_ABM)
+
+/******************************
+ * R_CNVRTR1 (0x18) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR1_DACPOLR 7
+#define FB_CNVRTR1_DACPOLL 6
+#define FB_CNVRTR1_DMONOMIX 4
+#define FB_CNVRTR1_DACMU 3
+#define FB_CNVRTR1_DEEMPH 2
+#define FB_CNVRTR1_DACDITH 0
+
+/* Field Masks */
+#define FM_CNVRTR1_DACPOLR 0X1
+#define FM_CNVRTR1_DACPOLL 0X1
+#define FM_CNVRTR1_DMONOMIX 0X3
+#define FM_CNVRTR1_DACMU 0X1
+#define FM_CNVRTR1_DEEMPH 0X1
+#define FM_CNVRTR1_DACDITH 0X3
+
+/* Field Values */
+#define FV_CNVRTR1_DACPOLR_INVERT 0x1
+#define FV_CNVRTR1_DACPOLR_NORMAL 0x0
+#define FV_CNVRTR1_DACPOLL_INVERT 0x1
+#define FV_CNVRTR1_DACPOLL_NORMAL 0x0
+#define FV_CNVRTR1_DMONOMIX_ENABLE 0x1
+#define FV_CNVRTR1_DMONOMIX_DISABLE 0x0
+#define FV_CNVRTR1_DACMU_ENABLE 0x1
+#define FV_CNVRTR1_DACMU_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CNVRTR1_DACPOLR \
+ RM(FM_CNVRTR1_DACPOLR, FB_CNVRTR1_DACPOLR)
+
+#define RM_CNVRTR1_DACPOLL \
+ RM(FM_CNVRTR1_DACPOLL, FB_CNVRTR1_DACPOLL)
+
+#define RM_CNVRTR1_DMONOMIX \
+ RM(FM_CNVRTR1_DMONOMIX, FB_CNVRTR1_DMONOMIX)
+
+#define RM_CNVRTR1_DACMU \
+ RM(FM_CNVRTR1_DACMU, FB_CNVRTR1_DACMU)
+
+#define RM_CNVRTR1_DEEMPH \
+ RM(FM_CNVRTR1_DEEMPH, FB_CNVRTR1_DEEMPH)
+
+#define RM_CNVRTR1_DACDITH \
+ RM(FM_CNVRTR1_DACDITH, FB_CNVRTR1_DACDITH)
+
+
+/* Register Values */
+#define RV_CNVRTR1_DACPOLR_INVERT \
+ RV(FV_CNVRTR1_DACPOLR_INVERT, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLR_NORMAL \
+ RV(FV_CNVRTR1_DACPOLR_NORMAL, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLL_INVERT \
+ RV(FV_CNVRTR1_DACPOLL_INVERT, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DACPOLL_NORMAL \
+ RV(FV_CNVRTR1_DACPOLL_NORMAL, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DMONOMIX_ENABLE \
+ RV(FV_CNVRTR1_DMONOMIX_ENABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DMONOMIX_DISABLE \
+ RV(FV_CNVRTR1_DMONOMIX_DISABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DACMU_ENABLE \
+ RV(FV_CNVRTR1_DACMU_ENABLE, FB_CNVRTR1_DACMU)
+
+#define RV_CNVRTR1_DACMU_DISABLE \
+ RV(FV_CNVRTR1_DACMU_DISABLE, FB_CNVRTR1_DACMU)
+
+
+/****************************
+ * R_DACSR (0x19) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_DACSR_DBCM 6
+#define FB_DACSR_DBR 3
+#define FB_DACSR_DBM 0
+
+/* Field Masks */
+#define FM_DACSR_DBCM 0X3
+#define FM_DACSR_DBR 0X3
+#define FM_DACSR_DBM 0X7
+
+/* Field Values */
+#define FV_DACSR_DBCM_AUTO 0x0
+#define FV_DACSR_DBCM_32 0x1
+#define FV_DACSR_DBCM_40 0x2
+#define FV_DACSR_DBCM_64 0x3
+#define FV_DACSR_DBR_32 0x0
+#define FV_DACSR_DBR_44_1 0x1
+#define FV_DACSR_DBR_48 0x2
+#define FV_DACSR_DBM_PT25 0x0
+#define FV_DACSR_DBM_PT5 0x1
+#define FV_DACSR_DBM_1 0x2
+#define FV_DACSR_DBM_2 0x3
+
+/* Register Masks */
+#define RM_DACSR_DBCM RM(FM_DACSR_DBCM, FB_DACSR_DBCM)
+#define RM_DACSR_DBR RM(FM_DACSR_DBR, FB_DACSR_DBR)
+#define RM_DACSR_DBM RM(FM_DACSR_DBM, FB_DACSR_DBM)
+
+/* Register Values */
+#define RV_DACSR_DBCM_AUTO \
+ RV(FV_DACSR_DBCM_AUTO, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_32 \
+ RV(FV_DACSR_DBCM_32, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_40 \
+ RV(FV_DACSR_DBCM_40, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_64 \
+ RV(FV_DACSR_DBCM_64, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBR_32 RV(FV_DACSR_DBR_32, FB_DACSR_DBR)
+#define RV_DACSR_DBR_44_1 \
+ RV(FV_DACSR_DBR_44_1, FB_DACSR_DBR)
+
+#define RV_DACSR_DBR_48 RV(FV_DACSR_DBR_48, FB_DACSR_DBR)
+#define RV_DACSR_DBM_PT25 \
+ RV(FV_DACSR_DBM_PT25, FB_DACSR_DBM)
+
+#define RV_DACSR_DBM_PT5 RV(FV_DACSR_DBM_PT5, FB_DACSR_DBM)
+#define RV_DACSR_DBM_1 RV(FV_DACSR_DBM_1, FB_DACSR_DBM)
+#define RV_DACSR_DBM_2 RV(FV_DACSR_DBM_2, FB_DACSR_DBM)
+
+/****************************
+ * R_PWRM1 (0x1A) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM1_BSTL 7
+#define FB_PWRM1_BSTR 6
+#define FB_PWRM1_PGAL 5
+#define FB_PWRM1_PGAR 4
+#define FB_PWRM1_ADCL 3
+#define FB_PWRM1_ADCR 2
+#define FB_PWRM1_MICB 1
+#define FB_PWRM1_DIGENB 0
+
+/* Field Masks */
+#define FM_PWRM1_BSTL 0X1
+#define FM_PWRM1_BSTR 0X1
+#define FM_PWRM1_PGAL 0X1
+#define FM_PWRM1_PGAR 0X1
+#define FM_PWRM1_ADCL 0X1
+#define FM_PWRM1_ADCR 0X1
+#define FM_PWRM1_MICB 0X1
+#define FM_PWRM1_DIGENB 0X1
+
+/* Field Values */
+#define FV_PWRM1_BSTL_ENABLE 0x1
+#define FV_PWRM1_BSTL_DISABLE 0x0
+#define FV_PWRM1_BSTR_ENABLE 0x1
+#define FV_PWRM1_BSTR_DISABLE 0x0
+#define FV_PWRM1_PGAL_ENABLE 0x1
+#define FV_PWRM1_PGAL_DISABLE 0x0
+#define FV_PWRM1_PGAR_ENABLE 0x1
+#define FV_PWRM1_PGAR_DISABLE 0x0
+#define FV_PWRM1_ADCL_ENABLE 0x1
+#define FV_PWRM1_ADCL_DISABLE 0x0
+#define FV_PWRM1_ADCR_ENABLE 0x1
+#define FV_PWRM1_ADCR_DISABLE 0x0
+#define FV_PWRM1_MICB_ENABLE 0x1
+#define FV_PWRM1_MICB_DISABLE 0x0
+#define FV_PWRM1_DIGENB_DISABLE 0x1
+#define FV_PWRM1_DIGENB_ENABLE 0x0
+
+/* Register Masks */
+#define RM_PWRM1_BSTL RM(FM_PWRM1_BSTL, FB_PWRM1_BSTL)
+#define RM_PWRM1_BSTR RM(FM_PWRM1_BSTR, FB_PWRM1_BSTR)
+#define RM_PWRM1_PGAL RM(FM_PWRM1_PGAL, FB_PWRM1_PGAL)
+#define RM_PWRM1_PGAR RM(FM_PWRM1_PGAR, FB_PWRM1_PGAR)
+#define RM_PWRM1_ADCL RM(FM_PWRM1_ADCL, FB_PWRM1_ADCL)
+#define RM_PWRM1_ADCR RM(FM_PWRM1_ADCR, FB_PWRM1_ADCR)
+#define RM_PWRM1_MICB RM(FM_PWRM1_MICB, FB_PWRM1_MICB)
+#define RM_PWRM1_DIGENB \
+ RM(FM_PWRM1_DIGENB, FB_PWRM1_DIGENB)
+
+
+/* Register Values */
+#define RV_PWRM1_BSTL_ENABLE \
+ RV(FV_PWRM1_BSTL_ENABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTL_DISABLE \
+ RV(FV_PWRM1_BSTL_DISABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTR_ENABLE \
+ RV(FV_PWRM1_BSTR_ENABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_BSTR_DISABLE \
+ RV(FV_PWRM1_BSTR_DISABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_PGAL_ENABLE \
+ RV(FV_PWRM1_PGAL_ENABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAL_DISABLE \
+ RV(FV_PWRM1_PGAL_DISABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAR_ENABLE \
+ RV(FV_PWRM1_PGAR_ENABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_PGAR_DISABLE \
+ RV(FV_PWRM1_PGAR_DISABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_ADCL_ENABLE \
+ RV(FV_PWRM1_ADCL_ENABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCL_DISABLE \
+ RV(FV_PWRM1_ADCL_DISABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCR_ENABLE \
+ RV(FV_PWRM1_ADCR_ENABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_ADCR_DISABLE \
+ RV(FV_PWRM1_ADCR_DISABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_MICB_ENABLE \
+ RV(FV_PWRM1_MICB_ENABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_MICB_DISABLE \
+ RV(FV_PWRM1_MICB_DISABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_DIGENB_DISABLE \
+ RV(FV_PWRM1_DIGENB_DISABLE, FB_PWRM1_DIGENB)
+
+#define RV_PWRM1_DIGENB_ENABLE \
+ RV(FV_PWRM1_DIGENB_ENABLE, FB_PWRM1_DIGENB)
+
+
+/****************************
+ * R_PWRM2 (0x1B) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM2_D2S 7
+#define FB_PWRM2_HPL 6
+#define FB_PWRM2_HPR 5
+#define FB_PWRM2_SPKL 4
+#define FB_PWRM2_SPKR 3
+#define FB_PWRM2_INSELL 2
+#define FB_PWRM2_INSELR 1
+#define FB_PWRM2_VREF 0
+
+/* Field Masks */
+#define FM_PWRM2_D2S 0X1
+#define FM_PWRM2_HPL 0X1
+#define FM_PWRM2_HPR 0X1
+#define FM_PWRM2_SPKL 0X1
+#define FM_PWRM2_SPKR 0X1
+#define FM_PWRM2_INSELL 0X1
+#define FM_PWRM2_INSELR 0X1
+#define FM_PWRM2_VREF 0X1
+
+/* Field Values */
+#define FV_PWRM2_D2S_ENABLE 0x1
+#define FV_PWRM2_D2S_DISABLE 0x0
+#define FV_PWRM2_HPL_ENABLE 0x1
+#define FV_PWRM2_HPL_DISABLE 0x0
+#define FV_PWRM2_HPR_ENABLE 0x1
+#define FV_PWRM2_HPR_DISABLE 0x0
+#define FV_PWRM2_SPKL_ENABLE 0x1
+#define FV_PWRM2_SPKL_DISABLE 0x0
+#define FV_PWRM2_SPKR_ENABLE 0x1
+#define FV_PWRM2_SPKR_DISABLE 0x0
+#define FV_PWRM2_INSELL_ENABLE 0x1
+#define FV_PWRM2_INSELL_DISABLE 0x0
+#define FV_PWRM2_INSELR_ENABLE 0x1
+#define FV_PWRM2_INSELR_DISABLE 0x0
+#define FV_PWRM2_VREF_ENABLE 0x1
+#define FV_PWRM2_VREF_DISABLE 0x0
+
+/* Register Masks */
+#define RM_PWRM2_D2S RM(FM_PWRM2_D2S, FB_PWRM2_D2S)
+#define RM_PWRM2_HPL RM(FM_PWRM2_HPL, FB_PWRM2_HPL)
+#define RM_PWRM2_HPR RM(FM_PWRM2_HPR, FB_PWRM2_HPR)
+#define RM_PWRM2_SPKL RM(FM_PWRM2_SPKL, FB_PWRM2_SPKL)
+#define RM_PWRM2_SPKR RM(FM_PWRM2_SPKR, FB_PWRM2_SPKR)
+#define RM_PWRM2_INSELL \
+ RM(FM_PWRM2_INSELL, FB_PWRM2_INSELL)
+
+#define RM_PWRM2_INSELR \
+ RM(FM_PWRM2_INSELR, FB_PWRM2_INSELR)
+
+#define RM_PWRM2_VREF RM(FM_PWRM2_VREF, FB_PWRM2_VREF)
+
+/* Register Values */
+#define RV_PWRM2_D2S_ENABLE \
+ RV(FV_PWRM2_D2S_ENABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_D2S_DISABLE \
+ RV(FV_PWRM2_D2S_DISABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_HPL_ENABLE \
+ RV(FV_PWRM2_HPL_ENABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPL_DISABLE \
+ RV(FV_PWRM2_HPL_DISABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPR_ENABLE \
+ RV(FV_PWRM2_HPR_ENABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_HPR_DISABLE \
+ RV(FV_PWRM2_HPR_DISABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_SPKL_ENABLE \
+ RV(FV_PWRM2_SPKL_ENABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKL_DISABLE \
+ RV(FV_PWRM2_SPKL_DISABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKR_ENABLE \
+ RV(FV_PWRM2_SPKR_ENABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_SPKR_DISABLE \
+ RV(FV_PWRM2_SPKR_DISABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_INSELL_ENABLE \
+ RV(FV_PWRM2_INSELL_ENABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELL_DISABLE \
+ RV(FV_PWRM2_INSELL_DISABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELR_ENABLE \
+ RV(FV_PWRM2_INSELR_ENABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_INSELR_DISABLE \
+ RV(FV_PWRM2_INSELR_DISABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_VREF_ENABLE \
+ RV(FV_PWRM2_VREF_ENABLE, FB_PWRM2_VREF)
+
+#define RV_PWRM2_VREF_DISABLE \
+ RV(FV_PWRM2_VREF_DISABLE, FB_PWRM2_VREF)
+
+
+/******************************
+ * R_CONFIG0 (0x1F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG0_ASDM 6
+#define FB_CONFIG0_DSDM 4
+#define FB_CONFIG0_DC_BYPASS 1
+#define FB_CONFIG0_SD_FORCE_ON 0
+
+/* Field Masks */
+#define FM_CONFIG0_ASDM 0X3
+#define FM_CONFIG0_DSDM 0X3
+#define FM_CONFIG0_DC_BYPASS 0X1
+#define FM_CONFIG0_SD_FORCE_ON 0X1
+
+/* Field Values */
+#define FV_CONFIG0_ASDM_HALF 0x1
+#define FV_CONFIG0_ASDM_FULL 0x2
+#define FV_CONFIG0_ASDM_AUTO 0x3
+#define FV_CONFIG0_DSDM_HALF 0x1
+#define FV_CONFIG0_DSDM_FULL 0x2
+#define FV_CONFIG0_DSDM_AUTO 0x3
+#define FV_CONFIG0_DC_BYPASS_ENABLE 0x1
+#define FV_CONFIG0_DC_BYPASS_DISABLE 0x0
+#define FV_CONFIG0_SD_FORCE_ON_ENABLE 0x1
+#define FV_CONFIG0_SD_FORCE_ON_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CONFIG0_ASDM \
+ RM(FM_CONFIG0_ASDM, FB_CONFIG0_ASDM)
+
+#define RM_CONFIG0_DSDM \
+ RM(FM_CONFIG0_DSDM, FB_CONFIG0_DSDM)
+
+#define RM_CONFIG0_DC_BYPASS \
+ RM(FM_CONFIG0_DC_BYPASS, FB_CONFIG0_DC_BYPASS)
+
+#define RM_CONFIG0_SD_FORCE_ON \
+ RM(FM_CONFIG0_SD_FORCE_ON, FB_CONFIG0_SD_FORCE_ON)
+
+
+/* Register Values */
+#define RV_CONFIG0_ASDM_HALF \
+ RV(FV_CONFIG0_ASDM_HALF, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_FULL \
+ RV(FV_CONFIG0_ASDM_FULL, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_AUTO \
+ RV(FV_CONFIG0_ASDM_AUTO, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_DSDM_HALF \
+ RV(FV_CONFIG0_DSDM_HALF, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_FULL \
+ RV(FV_CONFIG0_DSDM_FULL, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_AUTO \
+ RV(FV_CONFIG0_DSDM_AUTO, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DC_BYPASS_ENABLE \
+ RV(FV_CONFIG0_DC_BYPASS_ENABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_DC_BYPASS_DISABLE \
+ RV(FV_CONFIG0_DC_BYPASS_DISABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_SD_FORCE_ON_ENABLE \
+ RV(FV_CONFIG0_SD_FORCE_ON_ENABLE, FB_CONFIG0_SD_FORCE_ON)
+
+#define RV_CONFIG0_SD_FORCE_ON_DISABLE \
+ RV(FV_CONFIG0_SD_FORCE_ON_DISABLE, FB_CONFIG0_SD_FORCE_ON)
+
+
+/******************************
+ * R_CONFIG1 (0x20) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG1_EQ2_EN 7
+#define FB_CONFIG1_EQ2_BE 4
+#define FB_CONFIG1_EQ1_EN 3
+#define FB_CONFIG1_EQ1_BE 0
+
+/* Field Masks */
+#define FM_CONFIG1_EQ2_EN 0X1
+#define FM_CONFIG1_EQ2_BE 0X7
+#define FM_CONFIG1_EQ1_EN 0X1
+#define FM_CONFIG1_EQ1_BE 0X7
+
+/* Field Values */
+#define FV_CONFIG1_EQ2_EN_ENABLE 0x1
+#define FV_CONFIG1_EQ2_EN_DISABLE 0x0
+#define FV_CONFIG1_EQ2_BE_PRE 0x0
+#define FV_CONFIG1_EQ2_BE_PRE_EQ_0 0x1
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_1 0x2
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_2 0x3
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_3 0x4
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_4 0x5
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_5 0x6
+#define FV_CONFIG1_EQ1_EN_ENABLE 0x1
+#define FV_CONFIG1_EQ1_EN_DISABLE 0x0
+#define FV_CONFIG1_EQ1_BE_PRE 0x0
+#define FV_CONFIG1_EQ1_BE_PRE_EQ_0 0x1
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_1 0x2
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_2 0x3
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_3 0x4
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_4 0x5
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_5 0x6
+
+/* Register Masks */
+#define RM_CONFIG1_EQ2_EN \
+ RM(FM_CONFIG1_EQ2_EN, FB_CONFIG1_EQ2_EN)
+
+#define RM_CONFIG1_EQ2_BE \
+ RM(FM_CONFIG1_EQ2_BE, FB_CONFIG1_EQ2_BE)
+
+#define RM_CONFIG1_EQ1_EN \
+ RM(FM_CONFIG1_EQ1_EN, FB_CONFIG1_EQ1_EN)
+
+#define RM_CONFIG1_EQ1_BE \
+ RM(FM_CONFIG1_EQ1_BE, FB_CONFIG1_EQ1_BE)
+
+
+/* Register Values */
+#define RV_CONFIG1_EQ2_EN_ENABLE \
+ RV(FV_CONFIG1_EQ2_EN_ENABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_EN_DISABLE \
+ RV(FV_CONFIG1_EQ2_EN_DISABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_BE_PRE \
+ RV(FV_CONFIG1_EQ2_BE_PRE, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ_0 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ_0, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_1 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_1, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_2 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_2, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_3 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_3, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_4 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_4, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_5 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_5, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ1_EN_ENABLE \
+ RV(FV_CONFIG1_EQ1_EN_ENABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_EN_DISABLE \
+ RV(FV_CONFIG1_EQ1_EN_DISABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_BE_PRE \
+ RV(FV_CONFIG1_EQ1_BE_PRE, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ_0 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ_0, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_1 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_1, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_2 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_2, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_3 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_3, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_4 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_4, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_5 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_5, FB_CONFIG1_EQ1_BE)
+
+
+/******************************
+ * R_DMICCTL (0x24) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DMICCTL_DMICEN 7
+#define FB_DMICCTL_DMONO 4
+#define FB_DMICCTL_DMPHADJ 2
+#define FB_DMICCTL_DMRATE 0
+
+/* Field Masks */
+#define FM_DMICCTL_DMICEN 0X1
+#define FM_DMICCTL_DMONO 0X1
+#define FM_DMICCTL_DMPHADJ 0X3
+#define FM_DMICCTL_DMRATE 0X3
+
+/* Field Values */
+#define FV_DMICCTL_DMICEN_ENABLE 0x1
+#define FV_DMICCTL_DMICEN_DISABLE 0x0
+#define FV_DMICCTL_DMONO_STEREO 0x0
+#define FV_DMICCTL_DMONO_MONO 0x1
+
+/* Register Masks */
+#define RM_DMICCTL_DMICEN \
+ RM(FM_DMICCTL_DMICEN, FB_DMICCTL_DMICEN)
+
+#define RM_DMICCTL_DMONO \
+ RM(FM_DMICCTL_DMONO, FB_DMICCTL_DMONO)
+
+#define RM_DMICCTL_DMPHADJ \
+ RM(FM_DMICCTL_DMPHADJ, FB_DMICCTL_DMPHADJ)
+
+#define RM_DMICCTL_DMRATE \
+ RM(FM_DMICCTL_DMRATE, FB_DMICCTL_DMRATE)
+
+
+/* Register Values */
+#define RV_DMICCTL_DMICEN_ENABLE \
+ RV(FV_DMICCTL_DMICEN_ENABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMICEN_DISABLE \
+ RV(FV_DMICCTL_DMICEN_DISABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMONO_STEREO \
+ RV(FV_DMICCTL_DMONO_STEREO, FB_DMICCTL_DMONO)
+
+#define RV_DMICCTL_DMONO_MONO \
+ RV(FV_DMICCTL_DMONO_MONO, FB_DMICCTL_DMONO)
+
+
+/*****************************
+ * R_CLECTL (0x25) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CLECTL_LVL_MODE 4
+#define FB_CLECTL_WINDOWSEL 3
+#define FB_CLECTL_EXP_EN 2
+#define FB_CLECTL_LIMIT_EN 1
+#define FB_CLECTL_COMP_EN 0
+
+/* Field Masks */
+#define FM_CLECTL_LVL_MODE 0X1
+#define FM_CLECTL_WINDOWSEL 0X1
+#define FM_CLECTL_EXP_EN 0X1
+#define FM_CLECTL_LIMIT_EN 0X1
+#define FM_CLECTL_COMP_EN 0X1
+
+/* Field Values */
+#define FV_CLECTL_LVL_MODE_AVG 0x0
+#define FV_CLECTL_LVL_MODE_PEAK 0x1
+#define FV_CLECTL_WINDOWSEL_512 0x0
+#define FV_CLECTL_WINDOWSEL_64 0x1
+#define FV_CLECTL_EXP_EN_ENABLE 0x1
+#define FV_CLECTL_EXP_EN_DISABLE 0x0
+#define FV_CLECTL_LIMIT_EN_ENABLE 0x1
+#define FV_CLECTL_LIMIT_EN_DISABLE 0x0
+#define FV_CLECTL_COMP_EN_ENABLE 0x1
+#define FV_CLECTL_COMP_EN_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CLECTL_LVL_MODE \
+ RM(FM_CLECTL_LVL_MODE, FB_CLECTL_LVL_MODE)
+
+#define RM_CLECTL_WINDOWSEL \
+ RM(FM_CLECTL_WINDOWSEL, FB_CLECTL_WINDOWSEL)
+
+#define RM_CLECTL_EXP_EN \
+ RM(FM_CLECTL_EXP_EN, FB_CLECTL_EXP_EN)
+
+#define RM_CLECTL_LIMIT_EN \
+ RM(FM_CLECTL_LIMIT_EN, FB_CLECTL_LIMIT_EN)
+
+#define RM_CLECTL_COMP_EN \
+ RM(FM_CLECTL_COMP_EN, FB_CLECTL_COMP_EN)
+
+
+/* Register Values */
+#define RV_CLECTL_LVL_MODE_AVG \
+ RV(FV_CLECTL_LVL_MODE_AVG, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_LVL_MODE_PEAK \
+ RV(FV_CLECTL_LVL_MODE_PEAK, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_WINDOWSEL_512 \
+ RV(FV_CLECTL_WINDOWSEL_512, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_WINDOWSEL_64 \
+ RV(FV_CLECTL_WINDOWSEL_64, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_EXP_EN_ENABLE \
+ RV(FV_CLECTL_EXP_EN_ENABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_EXP_EN_DISABLE \
+ RV(FV_CLECTL_EXP_EN_DISABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_LIMIT_EN_ENABLE \
+ RV(FV_CLECTL_LIMIT_EN_ENABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_LIMIT_EN_DISABLE \
+ RV(FV_CLECTL_LIMIT_EN_DISABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_COMP_EN_ENABLE \
+ RV(FV_CLECTL_COMP_EN_ENABLE, FB_CLECTL_COMP_EN)
+
+#define RV_CLECTL_COMP_EN_DISABLE \
+ RV(FV_CLECTL_COMP_EN_DISABLE, FB_CLECTL_COMP_EN)
+
+
+/*****************************
+ * R_MUGAIN (0x26) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_MUGAIN_CLEMUG 0
+
+/* Field Masks */
+#define FM_MUGAIN_CLEMUG 0X1F
+
+/* Field Values */
+#define FV_MUGAIN_CLEMUG_46PT5DB 0x1F
+#define FV_MUGAIN_CLEMUG_0DB 0x0
+
+/* Register Masks */
+#define RM_MUGAIN_CLEMUG \
+ RM(FM_MUGAIN_CLEMUG, FB_MUGAIN_CLEMUG)
+
+
+/* Register Values */
+#define RV_MUGAIN_CLEMUG_46PT5DB \
+ RV(FV_MUGAIN_CLEMUG_46PT5DB, FB_MUGAIN_CLEMUG)
+
+#define RV_MUGAIN_CLEMUG_0DB \
+ RV(FV_MUGAIN_CLEMUG_0DB, FB_MUGAIN_CLEMUG)
+
+
+/*****************************
+ * R_COMPTH (0x27) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_COMPTH 0
+
+/* Field Masks */
+#define FM_COMPTH 0XFF
+
+/* Field Values */
+#define FV_COMPTH_0DB 0xFF
+#define FV_COMPTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_COMPTH RM(FM_COMPTH, FB_COMPTH)
+
+/* Register Values */
+#define RV_COMPTH_0DB RV(FV_COMPTH_0DB, FB_COMPTH)
+#define RV_COMPTH_N95PT625DB \
+ RV(FV_COMPTH_N95PT625DB, FB_COMPTH)
+
+
+/*****************************
+ * R_CMPRAT (0x28) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CMPRAT 0
+
+/* Field Masks */
+#define FM_CMPRAT 0X1F
+
+/* Register Masks */
+#define RM_CMPRAT RM(FM_CMPRAT, FB_CMPRAT)
+
+/******************************
+ * R_CATKTCL (0x29) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCL 0
+
+/* Field Masks */
+#define FM_CATKTCL 0XFF
+
+/* Register Masks */
+#define RM_CATKTCL RM(FM_CATKTCL, FB_CATKTCL)
+
+/******************************
+ * R_CATKTCH (0x2A) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCH 0
+
+/* Field Masks */
+#define FM_CATKTCH 0XFF
+
+/* Register Masks */
+#define RM_CATKTCH RM(FM_CATKTCH, FB_CATKTCH)
+
+/******************************
+ * R_CRELTCL (0x2B) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCL 0
+
+/* Field Masks */
+#define FM_CRELTCL 0XFF
+
+/* Register Masks */
+#define RM_CRELTCL RM(FM_CRELTCL, FB_CRELTCL)
+
+/******************************
+ * R_CRELTCH (0x2C) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCH 0
+
+/* Field Masks */
+#define FM_CRELTCH 0XFF
+
+/* Register Masks */
+#define RM_CRELTCH RM(FM_CRELTCH, FB_CRELTCH)
+
+/****************************
+ * R_LIMTH (0x2D) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_LIMTH 0
+
+/* Field Masks */
+#define FM_LIMTH 0XFF
+
+/* Field Values */
+#define FV_LIMTH_0DB 0xFF
+#define FV_LIMTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_LIMTH RM(FM_LIMTH, FB_LIMTH)
+
+/* Register Values */
+#define RV_LIMTH_0DB RV(FV_LIMTH_0DB, FB_LIMTH)
+#define RV_LIMTH_N95PT625DB RV(FV_LIMTH_N95PT625DB, FB_LIMTH)
+
+/*****************************
+ * R_LIMTGT (0x2E) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_LIMTGT 0
+
+/* Field Masks */
+#define FM_LIMTGT 0XFF
+
+/* Field Values */
+#define FV_LIMTGT_0DB 0xFF
+#define FV_LIMTGT_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_LIMTGT RM(FM_LIMTGT, FB_LIMTGT)
+
+/* Register Values */
+#define RV_LIMTGT_0DB RV(FV_LIMTGT_0DB, FB_LIMTGT)
+#define RV_LIMTGT_N95PT625DB \
+ RV(FV_LIMTGT_N95PT625DB, FB_LIMTGT)
+
+
+/******************************
+ * R_LATKTCL (0x2F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCL 0
+
+/* Field Masks */
+#define FM_LATKTCL 0XFF
+
+/* Register Masks */
+#define RM_LATKTCL RM(FM_LATKTCL, FB_LATKTCL)
+
+/******************************
+ * R_LATKTCH (0x30) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCH 0
+
+/* Field Masks */
+#define FM_LATKTCH 0XFF
+
+/* Register Masks */
+#define RM_LATKTCH RM(FM_LATKTCH, FB_LATKTCH)
+
+/******************************
+ * R_LRELTCL (0x31) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCL 0
+
+/* Field Masks */
+#define FM_LRELTCL 0XFF
+
+/* Register Masks */
+#define RM_LRELTCL RM(FM_LRELTCL, FB_LRELTCL)
+
+/******************************
+ * R_LRELTCH (0x32) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCH 0
+
+/* Field Masks */
+#define FM_LRELTCH 0XFF
+
+/* Register Masks */
+#define RM_LRELTCH RM(FM_LRELTCH, FB_LRELTCH)
+
+/****************************
+ * R_EXPTH (0x33) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_EXPTH 0
+
+/* Field Masks */
+#define FM_EXPTH 0XFF
+
+/* Field Values */
+#define FV_EXPTH_0DB 0xFF
+#define FV_EXPTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_EXPTH RM(FM_EXPTH, FB_EXPTH)
+
+/* Register Values */
+#define RV_EXPTH_0DB RV(FV_EXPTH_0DB, FB_EXPTH)
+#define RV_EXPTH_N95PT625DB RV(FV_EXPTH_N95PT625DB, FB_EXPTH)
+
+/*****************************
+ * R_EXPRAT (0x34) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_EXPRAT 0
+
+/* Field Masks */
+#define FM_EXPRAT 0X7
+
+/* Register Masks */
+#define RM_EXPRAT RM(FM_EXPRAT, FB_EXPRAT)
+
+/******************************
+ * R_XATKTCL (0x35) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCL 0
+
+/* Field Masks */
+#define FM_XATKTCL 0XFF
+
+/* Register Masks */
+#define RM_XATKTCL RM(FM_XATKTCL, FB_XATKTCL)
+
+/******************************
+ * R_XATKTCH (0x36) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCH 0
+
+/* Field Masks */
+#define FM_XATKTCH 0XFF
+
+/* Register Masks */
+#define RM_XATKTCH RM(FM_XATKTCH, FB_XATKTCH)
+
+/******************************
+ * R_XRELTCL (0x37) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCL 0
+
+/* Field Masks */
+#define FM_XRELTCL 0XFF
+
+/* Register Masks */
+#define RM_XRELTCL RM(FM_XRELTCL, FB_XRELTCL)
+
+/******************************
+ * R_XRELTCH (0x38) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCH 0
+
+/* Field Masks */
+#define FM_XRELTCH 0XFF
+
+/* Register Masks */
+#define RM_XRELTCH RM(FM_XRELTCH, FB_XRELTCH)
+
+/****************************
+ * R_FXCTL (0x39) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_FXCTL_3DEN 4
+#define FB_FXCTL_TEEN 3
+#define FB_FXCTL_TNLFBYPASS 2
+#define FB_FXCTL_BEEN 1
+#define FB_FXCTL_BNLFBYPASS 0
+
+/* Field Masks */
+#define FM_FXCTL_3DEN 0X1
+#define FM_FXCTL_TEEN 0X1
+#define FM_FXCTL_TNLFBYPASS 0X1
+#define FM_FXCTL_BEEN 0X1
+#define FM_FXCTL_BNLFBYPASS 0X1
+
+/* Field Values */
+#define FV_FXCTL_3DEN_ENABLE 0x1
+#define FV_FXCTL_3DEN_DISABLE 0x0
+#define FV_FXCTL_TEEN_ENABLE 0x1
+#define FV_FXCTL_TEEN_DISABLE 0x0
+#define FV_FXCTL_TNLFBYPASS_ENABLE 0x1
+#define FV_FXCTL_TNLFBYPASS_DISABLE 0x0
+#define FV_FXCTL_BEEN_ENABLE 0x1
+#define FV_FXCTL_BEEN_DISABLE 0x0
+#define FV_FXCTL_BNLFBYPASS_ENABLE 0x1
+#define FV_FXCTL_BNLFBYPASS_DISABLE 0x0
+
+/* Register Masks */
+#define RM_FXCTL_3DEN RM(FM_FXCTL_3DEN, FB_FXCTL_3DEN)
+#define RM_FXCTL_TEEN RM(FM_FXCTL_TEEN, FB_FXCTL_TEEN)
+#define RM_FXCTL_TNLFBYPASS \
+ RM(FM_FXCTL_TNLFBYPASS, FB_FXCTL_TNLFBYPASS)
+
+#define RM_FXCTL_BEEN RM(FM_FXCTL_BEEN, FB_FXCTL_BEEN)
+#define RM_FXCTL_BNLFBYPASS \
+ RM(FM_FXCTL_BNLFBYPASS, FB_FXCTL_BNLFBYPASS)
+
+
+/* Register Values */
+#define RV_FXCTL_3DEN_ENABLE \
+ RV(FV_FXCTL_3DEN_ENABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_3DEN_DISABLE \
+ RV(FV_FXCTL_3DEN_DISABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_TEEN_ENABLE \
+ RV(FV_FXCTL_TEEN_ENABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TEEN_DISABLE \
+ RV(FV_FXCTL_TEEN_DISABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TNLFBYPASS_ENABLE \
+ RV(FV_FXCTL_TNLFBYPASS_ENABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_TNLFBYPASS_DISABLE \
+ RV(FV_FXCTL_TNLFBYPASS_DISABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_BEEN_ENABLE \
+ RV(FV_FXCTL_BEEN_ENABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BEEN_DISABLE \
+ RV(FV_FXCTL_BEEN_DISABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BNLFBYPASS_ENABLE \
+ RV(FV_FXCTL_BNLFBYPASS_ENABLE, FB_FXCTL_BNLFBYPASS)
+
+#define RV_FXCTL_BNLFBYPASS_DISABLE \
+ RV(FV_FXCTL_BNLFBYPASS_DISABLE, FB_FXCTL_BNLFBYPASS)
+
+
+/*******************************
+ * R_DACCRWRL (0x3A) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRL_DACCRWDL 0
+
+/* Field Masks */
+#define FM_DACCRWRL_DACCRWDL 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRL_DACCRWDL \
+ RM(FM_DACCRWRL_DACCRWDL, FB_DACCRWRL_DACCRWDL)
+
+
+/*******************************
+ * R_DACCRWRM (0x3B) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRM_DACCRWDM 0
+
+/* Field Masks */
+#define FM_DACCRWRM_DACCRWDM 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRM_DACCRWDM \
+ RM(FM_DACCRWRM_DACCRWDM, FB_DACCRWRM_DACCRWDM)
+
+
+/*******************************
+ * R_DACCRWRH (0x3C) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRH_DACCRWDH 0
+
+/* Field Masks */
+#define FM_DACCRWRH_DACCRWDH 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRH_DACCRWDH \
+ RM(FM_DACCRWRH_DACCRWDH, FB_DACCRWRH_DACCRWDH)
+
+
+/*******************************
+ * R_DACCRRDL (0x3D) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDL 0
+
+/* Field Masks */
+#define FM_DACCRRDL 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDL RM(FM_DACCRRDL, FB_DACCRRDL)
+
+/*******************************
+ * R_DACCRRDM (0x3E) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDM 0
+
+/* Field Masks */
+#define FM_DACCRRDM 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDM RM(FM_DACCRRDM, FB_DACCRRDM)
+
+/*******************************
+ * R_DACCRRDH (0x3F) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDH 0
+
+/* Field Masks */
+#define FM_DACCRRDH 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDH RM(FM_DACCRRDH, FB_DACCRRDH)
+
+/********************************
+ * R_DACCRADDR (0x40) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRADDR_DACCRADD 0
+
+/* Field Masks */
+#define FM_DACCRADDR_DACCRADD 0XFF
+
+/* Register Masks */
+#define RM_DACCRADDR_DACCRADD \
+ RM(FM_DACCRADDR_DACCRADD, FB_DACCRADDR_DACCRADD)
+
+
+/******************************
+ * R_DCOFSEL (0x41) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DCOFSEL_DC_COEF_SEL 0
+
+/* Field Masks */
+#define FM_DCOFSEL_DC_COEF_SEL 0X7
+
+/* Field Values */
+#define FV_DCOFSEL_DC_COEF_SEL_2_N8 0x0
+#define FV_DCOFSEL_DC_COEF_SEL_2_N9 0x1
+#define FV_DCOFSEL_DC_COEF_SEL_2_N10 0x2
+#define FV_DCOFSEL_DC_COEF_SEL_2_N11 0x3
+#define FV_DCOFSEL_DC_COEF_SEL_2_N12 0x4
+#define FV_DCOFSEL_DC_COEF_SEL_2_N13 0x5
+#define FV_DCOFSEL_DC_COEF_SEL_2_N14 0x6
+#define FV_DCOFSEL_DC_COEF_SEL_2_N15 0x7
+
+/* Register Masks */
+#define RM_DCOFSEL_DC_COEF_SEL \
+ RM(FM_DCOFSEL_DC_COEF_SEL, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/* Register Values */
+#define RV_DCOFSEL_DC_COEF_SEL_2_N8 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N8, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N9 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N9, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N10 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N10, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N11 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N11, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N12 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N12, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N13 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N13, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N14 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N14, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N15 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N15, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/******************************
+ * R_PLLCTL9 (0x4E) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL9_REFDIV_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTL9_REFDIV_PLL1 0XFF
+
+/* Register Masks */
+#define RM_PLLCTL9_REFDIV_PLL1 \
+ RM(FM_PLLCTL9_REFDIV_PLL1, FB_PLLCTL9_REFDIV_PLL1)
+
+
+/******************************
+ * R_PLLCTLA (0x4F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLA_OUTDIV_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTLA_OUTDIV_PLL1 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLA_OUTDIV_PLL1 \
+ RM(FM_PLLCTLA_OUTDIV_PLL1, FB_PLLCTLA_OUTDIV_PLL1)
+
+
+/******************************
+ * R_PLLCTLB (0x50) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLB_FBDIV_PLL1L 0
+
+/* Field Masks */
+#define FM_PLLCTLB_FBDIV_PLL1L 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLB_FBDIV_PLL1L \
+ RM(FM_PLLCTLB_FBDIV_PLL1L, FB_PLLCTLB_FBDIV_PLL1L)
+
+
+/******************************
+ * R_PLLCTLC (0x51) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLC_FBDIV_PLL1H 0
+
+/* Field Masks */
+#define FM_PLLCTLC_FBDIV_PLL1H 0X7
+
+/* Register Masks */
+#define RM_PLLCTLC_FBDIV_PLL1H \
+ RM(FM_PLLCTLC_FBDIV_PLL1H, FB_PLLCTLC_FBDIV_PLL1H)
+
+
+/******************************
+ * R_PLLCTLD (0x52) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLD_RZ_PLL1 3
+#define FB_PLLCTLD_CP_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTLD_RZ_PLL1 0X7
+#define FM_PLLCTLD_CP_PLL1 0X7
+
+/* Register Masks */
+#define RM_PLLCTLD_RZ_PLL1 \
+ RM(FM_PLLCTLD_RZ_PLL1, FB_PLLCTLD_RZ_PLL1)
+
+#define RM_PLLCTLD_CP_PLL1 \
+ RM(FM_PLLCTLD_CP_PLL1, FB_PLLCTLD_CP_PLL1)
+
+
+/******************************
+ * R_PLLCTLE (0x53) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLE_REFDIV_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTLE_REFDIV_PLL2 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLE_REFDIV_PLL2 \
+ RM(FM_PLLCTLE_REFDIV_PLL2, FB_PLLCTLE_REFDIV_PLL2)
+
+
+/******************************
+ * R_PLLCTLF (0x54) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLF_OUTDIV_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTLF_OUTDIV_PLL2 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLF_OUTDIV_PLL2 \
+ RM(FM_PLLCTLF_OUTDIV_PLL2, FB_PLLCTLF_OUTDIV_PLL2)
+
+
+/*******************************
+ * R_PLLCTL10 (0x55) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL10_FBDIV_PLL2L 0
+
+/* Field Masks */
+#define FM_PLLCTL10_FBDIV_PLL2L 0XFF
+
+/* Register Masks */
+#define RM_PLLCTL10_FBDIV_PLL2L \
+ RM(FM_PLLCTL10_FBDIV_PLL2L, FB_PLLCTL10_FBDIV_PLL2L)
+
+
+/*******************************
+ * R_PLLCTL11 (0x56) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL11_FBDIV_PLL2H 0
+
+/* Field Masks */
+#define FM_PLLCTL11_FBDIV_PLL2H 0X7
+
+/* Register Masks */
+#define RM_PLLCTL11_FBDIV_PLL2H \
+ RM(FM_PLLCTL11_FBDIV_PLL2H, FB_PLLCTL11_FBDIV_PLL2H)
+
+
+/*******************************
+ * R_PLLCTL12 (0x57) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL12_RZ_PLL2 3
+#define FB_PLLCTL12_CP_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTL12_RZ_PLL2 0X7
+#define FM_PLLCTL12_CP_PLL2 0X7
+
+/* Register Masks */
+#define RM_PLLCTL12_RZ_PLL2 \
+ RM(FM_PLLCTL12_RZ_PLL2, FB_PLLCTL12_RZ_PLL2)
+
+#define RM_PLLCTL12_CP_PLL2 \
+ RM(FM_PLLCTL12_CP_PLL2, FB_PLLCTL12_CP_PLL2)
+
+
+/*******************************
+ * R_PLLCTL1B (0x60) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1B_VCOI_PLL2 4
+#define FB_PLLCTL1B_VCOI_PLL1 2
+
+/* Field Masks */
+#define FM_PLLCTL1B_VCOI_PLL2 0X3
+#define FM_PLLCTL1B_VCOI_PLL1 0X3
+
+/* Register Masks */
+#define RM_PLLCTL1B_VCOI_PLL2 \
+ RM(FM_PLLCTL1B_VCOI_PLL2, FB_PLLCTL1B_VCOI_PLL2)
+
+#define RM_PLLCTL1B_VCOI_PLL1 \
+ RM(FM_PLLCTL1B_VCOI_PLL1, FB_PLLCTL1B_VCOI_PLL1)
+
+
+/*******************************
+ * R_PLLCTL1C (0x61) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1C_PDB_PLL2 2
+#define FB_PLLCTL1C_PDB_PLL1 1
+
+/* Field Masks */
+#define FM_PLLCTL1C_PDB_PLL2 0X1
+#define FM_PLLCTL1C_PDB_PLL1 0X1
+
+/* Field Values */
+#define FV_PLLCTL1C_PDB_PLL2_ENABLE 0x1
+#define FV_PLLCTL1C_PDB_PLL2_DISABLE 0x0
+#define FV_PLLCTL1C_PDB_PLL1_ENABLE 0x1
+#define FV_PLLCTL1C_PDB_PLL1_DISABLE 0x0
+
+/* Register Masks */
+#define RM_PLLCTL1C_PDB_PLL2 \
+ RM(FM_PLLCTL1C_PDB_PLL2, FB_PLLCTL1C_PDB_PLL2)
+
+#define RM_PLLCTL1C_PDB_PLL1 \
+ RM(FM_PLLCTL1C_PDB_PLL1, FB_PLLCTL1C_PDB_PLL1)
+
+
+/* Register Values */
+#define RV_PLLCTL1C_PDB_PLL2_ENABLE \
+ RV(FV_PLLCTL1C_PDB_PLL2_ENABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL2_DISABLE \
+ RV(FV_PLLCTL1C_PDB_PLL2_DISABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL1_ENABLE \
+ RV(FV_PLLCTL1C_PDB_PLL1_ENABLE, FB_PLLCTL1C_PDB_PLL1)
+
+#define RV_PLLCTL1C_PDB_PLL1_DISABLE \
+ RV(FV_PLLCTL1C_PDB_PLL1_DISABLE, FB_PLLCTL1C_PDB_PLL1)
+
+
+/*******************************
+ * R_TIMEBASE (0x77) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_TIMEBASE_DIVIDER 0
+
+/* Field Masks */
+#define FM_TIMEBASE_DIVIDER 0XFF
+
+/* Register Masks */
+#define RM_TIMEBASE_DIVIDER \
+ RM(FM_TIMEBASE_DIVIDER, FB_TIMEBASE_DIVIDER)
+
+
+/*****************************
+ * R_DEVIDL (0x7D) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDL_DIDL 0
+
+/* Field Masks */
+#define FM_DEVIDL_DIDL 0XFF
+
+/* Register Masks */
+#define RM_DEVIDL_DIDL RM(FM_DEVIDL_DIDL, FB_DEVIDL_DIDL)
+
+/*****************************
+ * R_DEVIDH (0x7E) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDH_DIDH 0
+
+/* Field Masks */
+#define FM_DEVIDH_DIDH 0XFF
+
+/* Register Masks */
+#define RM_DEVIDH_DIDH RM(FM_DEVIDH_DIDH, FB_DEVIDH_DIDH)
+
+/****************************
+ * R_RESET (0x80) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_RESET 0
+
+/* Field Masks */
+#define FM_RESET 0XFF
+
+/* Field Values */
+#define FV_RESET_ENABLE 0x85
+
+/* Register Masks */
+#define RM_RESET RM(FM_RESET, FB_RESET)
+
+/* Register Values */
+#define RV_RESET_ENABLE RV(FV_RESET_ENABLE, FB_RESET)
+
+/********************************
+ * R_DACCRSTAT (0x8A) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRSTAT_DACCR_BUSY 7
+
+/* Field Masks */
+#define FM_DACCRSTAT_DACCR_BUSY 0X1
+
+/* Register Masks */
+#define RM_DACCRSTAT_DACCR_BUSY \
+ RM(FM_DACCRSTAT_DACCR_BUSY, FB_DACCRSTAT_DACCR_BUSY)
+
+
+/******************************
+ * R_PLLCTL0 (0x8E) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL0_PLL2_LOCK 1
+#define FB_PLLCTL0_PLL1_LOCK 0
+
+/* Field Masks */
+#define FM_PLLCTL0_PLL2_LOCK 0X1
+#define FM_PLLCTL0_PLL1_LOCK 0X1
+
+/* Register Masks */
+#define RM_PLLCTL0_PLL2_LOCK \
+ RM(FM_PLLCTL0_PLL2_LOCK, FB_PLLCTL0_PLL2_LOCK)
+
+#define RM_PLLCTL0_PLL1_LOCK \
+ RM(FM_PLLCTL0_PLL1_LOCK, FB_PLLCTL0_PLL1_LOCK)
+
+
+/********************************
+ * R_PLLREFSEL (0x8F) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_PLLREFSEL_PLL2_REF_SEL 4
+#define FB_PLLREFSEL_PLL1_REF_SEL 0
+
+/* Field Masks */
+#define FM_PLLREFSEL_PLL2_REF_SEL 0X7
+#define FM_PLLREFSEL_PLL1_REF_SEL 0X7
+
+/* Field Values */
+#define FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL2_REF_SEL_MCLK2 0x1
+#define FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL1_REF_SEL_MCLK2 0x1
+
+/* Register Masks */
+#define RM_PLLREFSEL_PLL2_REF_SEL \
+ RM(FM_PLLREFSEL_PLL2_REF_SEL, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RM_PLLREFSEL_PLL1_REF_SEL \
+ RM(FM_PLLREFSEL_PLL1_REF_SEL, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/* Register Values */
+#define RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 \
+ RV(FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL2_REF_SEL_MCLK2 \
+ RV(FV_PLLREFSEL_PLL2_REF_SEL_MCLK2, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 \
+ RV(FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL1_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 \
+ RV(FV_PLLREFSEL_PLL1_REF_SEL_MCLK2, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/*******************************
+ * R_DACMBCEN (0xC7) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACMBCEN_MBCEN3 2
+#define FB_DACMBCEN_MBCEN2 1
+#define FB_DACMBCEN_MBCEN1 0
+
+/* Field Masks */
+#define FM_DACMBCEN_MBCEN3 0X1
+#define FM_DACMBCEN_MBCEN2 0X1
+#define FM_DACMBCEN_MBCEN1 0X1
+
+/* Register Masks */
+#define RM_DACMBCEN_MBCEN3 \
+ RM(FM_DACMBCEN_MBCEN3, FB_DACMBCEN_MBCEN3)
+
+#define RM_DACMBCEN_MBCEN2 \
+ RM(FM_DACMBCEN_MBCEN2, FB_DACMBCEN_MBCEN2)
+
+#define RM_DACMBCEN_MBCEN1 \
+ RM(FM_DACMBCEN_MBCEN1, FB_DACMBCEN_MBCEN1)
+
+
+/********************************
+ * R_DACMBCCTL (0xC8) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACMBCCTL_LVLMODE3 5
+#define FB_DACMBCCTL_WINSEL3 4
+#define FB_DACMBCCTL_LVLMODE2 3
+#define FB_DACMBCCTL_WINSEL2 2
+#define FB_DACMBCCTL_LVLMODE1 1
+#define FB_DACMBCCTL_WINSEL1 0
+
+/* Field Masks */
+#define FM_DACMBCCTL_LVLMODE3 0X1
+#define FM_DACMBCCTL_WINSEL3 0X1
+#define FM_DACMBCCTL_LVLMODE2 0X1
+#define FM_DACMBCCTL_WINSEL2 0X1
+#define FM_DACMBCCTL_LVLMODE1 0X1
+#define FM_DACMBCCTL_WINSEL1 0X1
+
+/* Register Masks */
+#define RM_DACMBCCTL_LVLMODE3 \
+ RM(FM_DACMBCCTL_LVLMODE3, FB_DACMBCCTL_LVLMODE3)
+
+#define RM_DACMBCCTL_WINSEL3 \
+ RM(FM_DACMBCCTL_WINSEL3, FB_DACMBCCTL_WINSEL3)
+
+#define RM_DACMBCCTL_LVLMODE2 \
+ RM(FM_DACMBCCTL_LVLMODE2, FB_DACMBCCTL_LVLMODE2)
+
+#define RM_DACMBCCTL_WINSEL2 \
+ RM(FM_DACMBCCTL_WINSEL2, FB_DACMBCCTL_WINSEL2)
+
+#define RM_DACMBCCTL_LVLMODE1 \
+ RM(FM_DACMBCCTL_LVLMODE1, FB_DACMBCCTL_LVLMODE1)
+
+#define RM_DACMBCCTL_WINSEL1 \
+ RM(FM_DACMBCCTL_WINSEL1, FB_DACMBCCTL_WINSEL1)
+
+
+/*********************************
+ * R_DACMBCMUG1 (0xC9) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG1_PHASE 5
+#define FB_DACMBCMUG1_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG1_PHASE 0X1
+#define FM_DACMBCMUG1_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG1_PHASE \
+ RM(FM_DACMBCMUG1_PHASE, FB_DACMBCMUG1_PHASE)
+
+#define RM_DACMBCMUG1_MUGAIN \
+ RM(FM_DACMBCMUG1_MUGAIN, FB_DACMBCMUG1_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR1 (0xCA) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR1_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR1_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR1_THRESH \
+ RM(FM_DACMBCTHR1_THRESH, FB_DACMBCTHR1_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT1 (0xCB) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT1_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT1_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT1_RATIO \
+ RM(FM_DACMBCRAT1_RATIO, FB_DACMBCRAT1_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK1L (0xCC) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK1L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1L_TCATKL \
+ RM(FM_DACMBCATK1L_TCATKL, FB_DACMBCATK1L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK1H (0xCD) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK1H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1H_TCATKH \
+ RM(FM_DACMBCATK1H_TCATKH, FB_DACMBCATK1H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL1L (0xCE) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL1L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1L_TCRELL \
+ RM(FM_DACMBCREL1L_TCRELL, FB_DACMBCREL1L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL1H (0xCF) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL1H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1H_TCRELH \
+ RM(FM_DACMBCREL1H_TCRELH, FB_DACMBCREL1H_TCRELH)
+
+
+/*********************************
+ * R_DACMBCMUG2 (0xD0) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG2_PHASE 5
+#define FB_DACMBCMUG2_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG2_PHASE 0X1
+#define FM_DACMBCMUG2_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG2_PHASE \
+ RM(FM_DACMBCMUG2_PHASE, FB_DACMBCMUG2_PHASE)
+
+#define RM_DACMBCMUG2_MUGAIN \
+ RM(FM_DACMBCMUG2_MUGAIN, FB_DACMBCMUG2_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR2 (0xD1) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR2_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR2_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR2_THRESH \
+ RM(FM_DACMBCTHR2_THRESH, FB_DACMBCTHR2_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT2 (0xD2) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT2_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT2_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT2_RATIO \
+ RM(FM_DACMBCRAT2_RATIO, FB_DACMBCRAT2_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK2L (0xD3) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK2L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2L_TCATKL \
+ RM(FM_DACMBCATK2L_TCATKL, FB_DACMBCATK2L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK2H (0xD4) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK2H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2H_TCATKH \
+ RM(FM_DACMBCATK2H_TCATKH, FB_DACMBCATK2H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL2L (0xD5) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL2L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2L_TCRELL \
+ RM(FM_DACMBCREL2L_TCRELL, FB_DACMBCREL2L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL2H (0xD6) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL2H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2H_TCRELH \
+ RM(FM_DACMBCREL2H_TCRELH, FB_DACMBCREL2H_TCRELH)
+
+
+/*********************************
+ * R_DACMBCMUG3 (0xD7) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG3_PHASE 5
+#define FB_DACMBCMUG3_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG3_PHASE 0X1
+#define FM_DACMBCMUG3_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG3_PHASE \
+ RM(FM_DACMBCMUG3_PHASE, FB_DACMBCMUG3_PHASE)
+
+#define RM_DACMBCMUG3_MUGAIN \
+ RM(FM_DACMBCMUG3_MUGAIN, FB_DACMBCMUG3_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR3 (0xD8) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR3_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR3_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR3_THRESH \
+ RM(FM_DACMBCTHR3_THRESH, FB_DACMBCTHR3_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT3 (0xD9) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT3_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT3_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT3_RATIO \
+ RM(FM_DACMBCRAT3_RATIO, FB_DACMBCRAT3_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK3L (0xDA) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK3L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3L_TCATKL \
+ RM(FM_DACMBCATK3L_TCATKL, FB_DACMBCATK3L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK3H (0xDB) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK3H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3H_TCATKH \
+ RM(FM_DACMBCATK3H_TCATKH, FB_DACMBCATK3H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL3L (0xDC) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL3L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3L_TCRELL \
+ RM(FM_DACMBCREL3L_TCRELL, FB_DACMBCREL3L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL3H (0xDD) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL3H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3H_TCRELH \
+ RM(FM_DACMBCREL3H_TCRELH, FB_DACMBCREL3H_TCRELH)
+
+
+#endif /* __WOOKIE_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cfe72b9d4356..8798182959c1 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -240,7 +240,6 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
sizeof(struct twl4030_codec_data),
GFP_KERNEL);
if (!pdata) {
- dev_err(codec->dev, "Can not allocate memory\n");
of_node_put(twl4030_codec_node);
return NULL;
}
@@ -851,14 +850,14 @@ static int snd_soc_get_volsw_twl4030(struct snd_kcontrol *kcontrol,
int mask = (1 << fls(max)) - 1;
ucontrol->value.integer.value[0] =
- (snd_soc_read(codec, reg) >> shift) & mask;
+ (twl4030_read(codec, reg) >> shift) & mask;
if (ucontrol->value.integer.value[0])
ucontrol->value.integer.value[0] =
max + 1 - ucontrol->value.integer.value[0];
if (shift != rshift) {
ucontrol->value.integer.value[1] =
- (snd_soc_read(codec, reg) >> rshift) & mask;
+ (twl4030_read(codec, reg) >> rshift) & mask;
if (ucontrol->value.integer.value[1])
ucontrol->value.integer.value[1] =
max + 1 - ucontrol->value.integer.value[1];
@@ -909,9 +908,9 @@ static int snd_soc_get_volsw_r2_twl4030(struct snd_kcontrol *kcontrol,
int mask = (1<<fls(max))-1;
ucontrol->value.integer.value[0] =
- (snd_soc_read(codec, reg) >> shift) & mask;
+ (twl4030_read(codec, reg) >> shift) & mask;
ucontrol->value.integer.value[1] =
- (snd_soc_read(codec, reg2) >> shift) & mask;
+ (twl4030_read(codec, reg2) >> shift) & mask;
if (ucontrol->value.integer.value[0])
ucontrol->value.integer.value[0] =
@@ -2196,8 +2195,6 @@ static int twl4030_soc_remove(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
.probe = twl4030_soc_probe,
.remove = twl4030_soc_remove,
- .read = twl4030_read,
- .write = twl4030_write,
.set_bias_level = twl4030_set_bias_level,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 1773ff84ee3b..3b895b4b451c 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -106,10 +106,12 @@ static const struct snd_pcm_hw_constraint_list sysclk_constraints[] = {
{ .count = ARRAY_SIZE(hp_rates), .list = hp_rates, },
};
+#define to_twl6040(codec) dev_get_drvdata((codec)->dev->parent)
+
static unsigned int twl6040_read(struct snd_soc_codec *codec, unsigned int reg)
{
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
u8 value;
if (reg >= TWL6040_CACHEREGNUM)
@@ -171,7 +173,7 @@ static inline void twl6040_update_dl12_cache(struct snd_soc_codec *codec,
static int twl6040_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
if (reg >= TWL6040_CACHEREGNUM)
return -EIO;
@@ -541,7 +543,7 @@ int twl6040_get_dl1_gain(struct snd_soc_codec *codec)
if (snd_soc_dapm_get_pin_status(dapm, "HSOR") ||
snd_soc_dapm_get_pin_status(dapm, "HSOL")) {
- u8 val = snd_soc_read(codec, TWL6040_REG_HSLCTL);
+ u8 val = twl6040_read(codec, TWL6040_REG_HSLCTL);
if (val & TWL6040_HSDACMODE)
/* HSDACL in LP mode */
return -8; /* -8dB */
@@ -572,7 +574,7 @@ EXPORT_SYMBOL_GPL(twl6040_get_trim_value);
int twl6040_get_hs_step_size(struct snd_soc_codec *codec)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_3)
/* For ES under ES_1.3 HS step is 2 mV */
@@ -830,7 +832,7 @@ static const struct snd_soc_dapm_route intercon[] = {
static int twl6040_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int ret = 0;
@@ -922,7 +924,7 @@ static int twl6040_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int ret;
@@ -964,7 +966,7 @@ static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id id,
int mute)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int hslctl, hsrctl, earctl;
int hflctl, hfrctl;
@@ -1108,7 +1110,6 @@ static struct snd_soc_dai_driver twl6040_dai[] = {
static int twl6040_probe(struct snd_soc_codec *codec)
{
struct twl6040_data *priv;
- struct twl6040 *twl6040 = dev_get_drvdata(codec->dev->parent);
struct platform_device *pdev = to_platform_device(codec->dev);
int ret = 0;
@@ -1119,7 +1120,6 @@ static int twl6040_probe(struct snd_soc_codec *codec)
snd_soc_codec_set_drvdata(codec, priv);
priv->codec = codec;
- codec->control_data = twl6040;
priv->plug_irq = platform_get_irq(pdev, 0);
if (priv->plug_irq < 0) {
@@ -1158,8 +1158,6 @@ static int twl6040_remove(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
.probe = twl6040_probe,
.remove = twl6040_remove,
- .read = twl6040_read,
- .write = twl6040_write,
.set_bias_level = twl6040_set_bias_level,
.suspend_bias_off = true,
.ignore_pmdown_time = true,
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 926c81ae8185..c73e6a192224 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -37,7 +37,8 @@ struct uda1380_priv {
struct snd_soc_codec *codec;
unsigned int dac_clk;
struct work_struct work;
- void *control_data;
+ struct i2c_client *i2c;
+ u16 *reg_cache;
};
/*
@@ -63,7 +64,9 @@ static unsigned long uda1380_cache_dirty;
static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
unsigned int reg)
{
- u16 *cache = codec->reg_cache;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+ u16 *cache = uda1380->reg_cache;
+
if (reg == UDA1380_RESET)
return 0;
if (reg >= UDA1380_CACHEREGNUM)
@@ -77,7 +80,8 @@ static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
u16 reg, unsigned int value)
{
- u16 *cache = codec->reg_cache;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+ u16 *cache = uda1380->reg_cache;
if (reg >= UDA1380_CACHEREGNUM)
return;
@@ -92,6 +96,7 @@ static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
u8 data[3];
/* data is
@@ -111,10 +116,10 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
if (!snd_soc_codec_is_active(codec) && (reg >= UDA1380_MVOL))
return 0;
pr_debug("uda1380: hw write %x val %x\n", reg, value);
- if (codec->hw_write(codec->control_data, data, 3) == 3) {
+ if (i2c_master_send(uda1380->i2c, data, 3) == 3) {
unsigned int val;
- i2c_master_send(codec->control_data, data, 1);
- i2c_master_recv(codec->control_data, data, 2);
+ i2c_master_send(uda1380->i2c, data, 1);
+ i2c_master_recv(uda1380->i2c, data, 2);
val = (data[0]<<8) | data[1];
if (val != value) {
pr_debug("uda1380: READ BACK VAL %x\n",
@@ -130,16 +135,17 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
static void uda1380_sync_cache(struct snd_soc_codec *codec)
{
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
int reg;
u8 data[3];
- u16 *cache = codec->reg_cache;
+ u16 *cache = uda1380->reg_cache;
/* Sync reg_cache with the hardware */
for (reg = 0; reg < UDA1380_MVOL; reg++) {
data[0] = reg;
data[1] = (cache[reg] & 0xff00) >> 8;
data[2] = cache[reg] & 0x00ff;
- if (codec->hw_write(codec->control_data, data, 3) != 3)
+ if (i2c_master_send(uda1380->i2c, data, 3) != 3)
dev_err(codec->dev, "%s: write to reg 0x%x failed\n",
__func__, reg);
}
@@ -148,6 +154,7 @@ static void uda1380_sync_cache(struct snd_soc_codec *codec)
static int uda1380_reset(struct snd_soc_codec *codec)
{
struct uda1380_platform_data *pdata = codec->dev->platform_data;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
if (gpio_is_valid(pdata->gpio_reset)) {
gpio_set_value(pdata->gpio_reset, 1);
@@ -160,7 +167,7 @@ static int uda1380_reset(struct snd_soc_codec *codec)
data[1] = 0;
data[2] = 0;
- if (codec->hw_write(codec->control_data, data, 3) != 3) {
+ if (i2c_master_send(uda1380->i2c, data, 3) != 3) {
dev_err(codec->dev, "%s: failed\n", __func__);
return -EIO;
}
@@ -695,9 +702,6 @@ static int uda1380_probe(struct snd_soc_codec *codec)
uda1380->codec = codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
- codec->control_data = uda1380->control_data;
-
if (!gpio_is_valid(pdata->gpio_power)) {
ret = uda1380_reset(codec);
if (ret)
@@ -727,11 +731,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
.set_bias_level = uda1380_set_bias_level,
.suspend_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(uda1380_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = uda1380_reg,
- .reg_cache_step = 1,
-
.component_driver = {
.controls = uda1380_snd_controls,
.num_controls = ARRAY_SIZE(uda1380_snd_controls),
@@ -771,8 +770,15 @@ static int uda1380_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ uda1380->reg_cache = devm_kmemdup(&i2c->dev,
+ uda1380_reg,
+ ARRAY_SIZE(uda1380_reg) * sizeof(u16),
+ GFP_KERNEL);
+ if (!uda1380->reg_cache)
+ return -ENOMEM;
+
i2c_set_clientdata(i2c, uda1380);
- uda1380->control_data = i2c;
+ uda1380->i2c = i2c;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_uda1380, uda1380_dai, ARRAY_SIZE(uda1380_dai));
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 4f5f5710b569..0147d2fb7b0a 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -655,11 +655,8 @@ static int wm0010_boot(struct snd_soc_codec *codec)
ret = -ENOMEM;
len = pll_rec.length + 8;
out = kzalloc(len, GFP_KERNEL | GFP_DMA);
- if (!out) {
- dev_err(codec->dev,
- "Failed to allocate RX buffer\n");
+ if (!out)
goto abort;
- }
img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img_swap)
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 23cde3a0dc11..abfa052c07d8 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -13,7 +13,7 @@
* 'wm2000_anc.bin' by default (overridable via platform data) at
* runtime and is expected to be in flat binary format. This is
* generated by Wolfson configuration tools and includes
- * system-specific callibration information. If supplied as a
+ * system-specific calibration information. If supplied as a
* sequence of ASCII-encoded hexidecimal bytes this can be converted
* into a flat binary with a command such as this on the command line:
*
@@ -826,8 +826,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
int reg;
u16 id;
- wm2000 = devm_kzalloc(&i2c->dev, sizeof(struct wm2000_priv),
- GFP_KERNEL);
+ wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
if (!wm2000)
return -ENOMEM;
@@ -902,7 +901,6 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
wm2000->anc_download_size,
GFP_KERNEL);
if (wm2000->anc_download == NULL) {
- dev_err(&i2c->dev, "Out of memory\n");
ret = -ENOMEM;
goto err_supplies;
}
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index d83dab57a1d1..5c2f5727244d 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -98,6 +98,8 @@ struct wm2200_priv {
int rev;
int sysclk;
+
+ unsigned int symmetric_rates:1;
};
#define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1)
@@ -1550,7 +1552,7 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
static int wm2200_probe(struct snd_soc_codec *codec)
{
- struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev);
+ struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
int ret;
wm2200->codec = codec;
@@ -1758,7 +1760,7 @@ static int wm2200_hw_params(struct snd_pcm_substream *substream,
lrclk = bclk_rates[bclk] / params_rate(params);
dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
- dai->symmetric_rates)
+ wm2200->symmetric_rates)
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7,
WM2200_AIF1RX_BCPF_MASK, lrclk);
else
@@ -2059,13 +2061,14 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
static int wm2200_dai_probe(struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
+ struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
unsigned int val = 0;
int ret;
ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1);
if (ret >= 0) {
if ((ret & WM2200_GP1_FN_MASK) != 0) {
- dai->symmetric_rates = true;
+ wm2200->symmetric_rates = true;
val = WM2200_AIF1TX_LRCLK_SRC;
}
} else {
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 4f0481d3c7a7..fc066caa1918 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1935,8 +1935,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
+
ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
if (ret)
return ret;
@@ -1989,17 +1992,9 @@ static unsigned int wm5102_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm5102_get_regmap(struct device *dev)
-{
- struct wm5102_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.probe = wm5102_codec_probe,
.remove = wm5102_codec_remove,
- .get_regmap = wm5102_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6ed1e1f9ce51..fb0cf9c61f48 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2280,9 +2280,11 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int i, ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -2344,17 +2346,9 @@ static unsigned int wm5110_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_6R,
};
-static struct regmap *wm5110_get_regmap(struct device *dev)
-{
- struct wm5110_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.probe = wm5110_codec_probe,
.remove = wm5110_codec_remove,
- .get_regmap = wm5110_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2efc5b41ad0f..fc79c6725d06 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1472,6 +1472,8 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
+
+ snd_soc_codec_init_regmap(codec, wm8350->regmap);
snd_soc_codec_set_drvdata(codec, priv);
priv->wm8350 = wm8350;
@@ -1580,17 +1582,9 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8350_get_regmap(struct device *dev)
-{
- struct wm8350 *wm8350 = dev_get_platdata(dev);
-
- return wm8350->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.probe = wm8350_codec_probe,
.remove = wm8350_codec_remove,
- .get_regmap = wm8350_get_regmap,
.set_bias_level = wm8350_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 6c59fb933bd6..a36adf881bca 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1285,6 +1285,7 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
if (priv == NULL)
return -ENOMEM;
+ snd_soc_codec_init_regmap(codec, wm8400->regmap);
snd_soc_codec_set_drvdata(codec, priv);
priv->wm8400 = wm8400;
@@ -1325,17 +1326,9 @@ static int wm8400_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8400_get_regmap(struct device *dev)
-{
- struct wm8400 *wm8400 = dev_get_platdata(dev);
-
- return wm8400->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.probe = wm8400_codec_probe,
.remove = wm8400_codec_remove,
- .get_regmap = wm8400_get_regmap,
.set_bias_level = wm8400_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 237eeb9a8b97..cba90f21161f 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1995,8 +1995,7 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
unsigned int val, irq_pol;
int ret, i;
- wm8903 = devm_kzalloc(&i2c->dev, sizeof(struct wm8903_priv),
- GFP_KERNEL);
+ wm8903 = devm_kzalloc(&i2c->dev, sizeof(*wm8903), GFP_KERNEL);
if (wm8903 == NULL)
return -ENOMEM;
@@ -2017,13 +2016,10 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
if (pdata) {
wm8903->pdata = pdata;
} else {
- wm8903->pdata = devm_kzalloc(&i2c->dev,
- sizeof(struct wm8903_platform_data),
- GFP_KERNEL);
- if (wm8903->pdata == NULL) {
- dev_err(&i2c->dev, "Failed to allocate pdata\n");
+ wm8903->pdata = devm_kzalloc(&i2c->dev, sizeof(*wm8903->pdata),
+ GFP_KERNEL);
+ if (!wm8903->pdata)
return -ENOMEM;
- }
if (i2c->irq) {
ret = wm8903_set_pdata_irq_trigger(i2c, wm8903->pdata);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index f91b49e1ece3..21ffd6403173 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3993,6 +3993,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
unsigned int reg;
int ret, i;
+ snd_soc_codec_init_regmap(codec, control->regmap);
+
wm8994->hubs.codec = codec;
mutex_init(&wm8994->accdet_lock);
@@ -4434,19 +4436,11 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8994_get_regmap(struct device *dev)
-{
- struct wm8994 *control = dev_get_drvdata(dev->parent);
-
- return control->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
.probe = wm8994_codec_probe,
.remove = wm8994_codec_remove,
.suspend = wm8994_codec_suspend,
.resume = wm8994_codec_resume,
- .get_regmap = wm8994_get_regmap,
.set_bias_level = wm8994_set_bias_level,
};
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 77f512767273..cac9b3e7e15d 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1062,8 +1062,11 @@ static int wm8997_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
+
ret = arizona_init_spk(codec);
if (ret < 0)
return ret;
@@ -1095,17 +1098,9 @@ static unsigned int wm8997_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm8997_get_regmap(struct device *dev)
-{
- struct wm8997_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
.probe = wm8997_codec_probe,
.remove = wm8997_codec_remove,
- .get_regmap = wm8997_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 2d211dbe7422..1288e1f67dcf 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1275,9 +1275,11 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
+ struct arizona *arizona = priv->core.arizona;
int ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -1313,17 +1315,9 @@ static unsigned int wm8998_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm8998_get_regmap(struct device *dev)
-{
- struct wm8998_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
.probe = wm8998_codec_probe,
.remove = wm8998_codec_remove,
- .get_regmap = wm8998_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 804c6f2bcf21..03ba218160ca 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1242,6 +1242,20 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
return snd_mask_refine(fmt, &nfmt);
}
+static int davinci_mcasp_hw_rule_min_periodsize(
+ struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *period_size = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ struct snd_interval frames;
+
+ snd_interval_any(&frames);
+ frames.min = 64;
+ frames.integer = 1;
+
+ return snd_interval_refine(period_size, &frames);
+}
+
static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
@@ -1333,6 +1347,11 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
return ret;
}
+ snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ davinci_mcasp_hw_rule_min_periodsize, NULL,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+
return 0;
}
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 84ef6385736c..191426a6d9ad 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -29,7 +29,6 @@
#include "../codecs/tlv320aic23.h"
#include "imx-ssi.h"
-#include "fsl_ssi.h"
#include "imx-audmux.h"
#define CODEC_CLOCK 12000000
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 1225e0399de8..989be518c4ed 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -442,8 +442,8 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
if (fsl_asoc_card_is_ac97(priv)) {
#if IS_ENABLED(CONFIG_SND_AC97_CODEC)
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_ac97 *ac97 = snd_soc_component_get_drvdata(component);
/*
* Use slots 3/4 for S/PDIF so SSI won't try to enable
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 52c27a358933..2c5856ac5bc3 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -57,7 +57,7 @@
#define REG_ASRDOC 0x74
#define REG_ASRDI(i) (REG_ASRDIA + (i << 3))
#define REG_ASRDO(i) (REG_ASRDOA + (i << 3))
-#define REG_ASRDx(x, i) (x == IN ? REG_ASRDI(i) : REG_ASRDO(i))
+#define REG_ASRDx(x, i) ((x) == IN ? REG_ASRDI(i) : REG_ASRDO(i))
#define REG_ASRIDRHA 0x80
#define REG_ASRIDRLA 0x84
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 0c11f434a374..8c2981b70f64 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -913,8 +913,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
dma->dai.pcm_free = fsl_dma_free_dma_buffers;
/* Store the SSI-specific information that we need */
- dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
- dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
+ dma->ssi_stx_phys = res.start + REG_SSI_STX0;
+ dma->ssi_srx_phys = res.start + REG_SSI_SRX0;
iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
if (iprop)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 424bafaf51ef..aecd00f7929d 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -69,21 +69,35 @@
* samples will be written to STX properly.
*/
#ifdef __BIG_ENDIAN
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
- SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \
- SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE)
+#define FSLSSI_I2S_FORMATS \
+ (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_BE | \
+ SNDRV_PCM_FMTBIT_S18_3BE | \
+ SNDRV_PCM_FMTBIT_S20_3BE | \
+ SNDRV_PCM_FMTBIT_S24_3BE | \
+ SNDRV_PCM_FMTBIT_S24_BE)
#else
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define FSLSSI_I2S_FORMATS \
+ (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S18_3LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
#endif
-#define FSLSSI_SIER_DBG_RX_FLAGS (CCSR_SSI_SIER_RFF0_EN | \
- CCSR_SSI_SIER_RLS_EN | CCSR_SSI_SIER_RFS_EN | \
- CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_RFRC_EN)
-#define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \
- CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \
- CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN)
+#define FSLSSI_SIER_DBG_RX_FLAGS \
+ (SSI_SIER_RFF0_EN | \
+ SSI_SIER_RLS_EN | \
+ SSI_SIER_RFS_EN | \
+ SSI_SIER_ROE0_EN | \
+ SSI_SIER_RFRC_EN)
+#define FSLSSI_SIER_DBG_TX_FLAGS \
+ (SSI_SIER_TFE0_EN | \
+ SSI_SIER_TLS_EN | \
+ SSI_SIER_TFS_EN | \
+ SSI_SIER_TUE0_EN | \
+ SSI_SIER_TFRC_EN)
enum fsl_ssi_type {
FSL_SSI_MCP8610,
@@ -92,23 +106,18 @@ enum fsl_ssi_type {
FSL_SSI_MX51,
};
-struct fsl_ssi_reg_val {
+struct fsl_ssi_regvals {
u32 sier;
u32 srcr;
u32 stcr;
u32 scr;
};
-struct fsl_ssi_rxtx_reg_val {
- struct fsl_ssi_reg_val rx;
- struct fsl_ssi_reg_val tx;
-};
-
static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SACCEN:
- case CCSR_SSI_SACCDIS:
+ case REG_SSI_SACCEN:
+ case REG_SSI_SACCDIS:
return false;
default:
return true;
@@ -118,18 +127,18 @@ static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_STX0:
- case CCSR_SSI_STX1:
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SISR:
- case CCSR_SSI_SFCSR:
- case CCSR_SSI_SACNT:
- case CCSR_SSI_SACADD:
- case CCSR_SSI_SACDAT:
- case CCSR_SSI_SATAG:
- case CCSR_SSI_SACCST:
- case CCSR_SSI_SOR:
+ case REG_SSI_STX0:
+ case REG_SSI_STX1:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SISR:
+ case REG_SSI_SFCSR:
+ case REG_SSI_SACNT:
+ case REG_SSI_SACADD:
+ case REG_SSI_SACDAT:
+ case REG_SSI_SATAG:
+ case REG_SSI_SACCST:
+ case REG_SSI_SOR:
return true;
default:
return false;
@@ -139,12 +148,12 @@ static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SISR:
- case CCSR_SSI_SACADD:
- case CCSR_SSI_SACDAT:
- case CCSR_SSI_SATAG:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SISR:
+ case REG_SSI_SACADD:
+ case REG_SSI_SACDAT:
+ case REG_SSI_SATAG:
return true;
default:
return false;
@@ -154,9 +163,9 @@ static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SACCST:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SACCST:
return false;
default:
return true;
@@ -164,12 +173,12 @@ static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
}
static const struct regmap_config fsl_ssi_regconfig = {
- .max_register = CCSR_SSI_SACCDIS,
+ .max_register = REG_SSI_SACCDIS,
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
- .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
+ .num_reg_defaults_raw = REG_SSI_SACCDIS / sizeof(uint32_t) + 1,
.readable_reg = fsl_ssi_readable_reg,
.volatile_reg = fsl_ssi_volatile_reg,
.precious_reg = fsl_ssi_precious_reg,
@@ -185,78 +194,79 @@ struct fsl_ssi_soc_data {
};
/**
- * fsl_ssi_private: per-SSI private data
+ * fsl_ssi: per-SSI private data
*
- * @reg: Pointer to the regmap registers
+ * @regs: Pointer to the regmap registers
* @irq: IRQ of this SSI
* @cpu_dai_drv: CPU DAI driver for this device
*
* @dai_fmt: DAI configuration this device is currently used with
- * @i2s_mode: i2s and network mode configuration of the device. Is used to
- * switch between normal and i2s/network mode
- * mode depending on the number of channels
+ * @i2s_net: I2S and Network mode configurations of SCR register
* @use_dma: DMA is used or FIQ with stream filter
- * @use_dual_fifo: DMA with support for both FIFOs used
- * @fifo_deph: Depth of the SSI FIFOs
- * @slot_width: width of each DAI slot
- * @slots: number of slots
- * @rxtx_reg_val: Specific register settings for receive/transmit configuration
+ * @use_dual_fifo: DMA with support for dual FIFO mode
+ * @has_ipg_clk_name: If "ipg" is in the clock name list of device tree
+ * @fifo_depth: Depth of the SSI FIFOs
+ * @slot_width: Width of each DAI slot
+ * @slots: Number of slots
+ * @regvals: Specific RX/TX register settings
*
- * @clk: SSI clock
- * @baudclk: SSI baud clock for master mode
+ * @clk: Clock source to access register
+ * @baudclk: Clock source to generate bit and frame-sync clocks
* @baudclk_streams: Active streams that are using baudclk
*
+ * @regcache_sfcsr: Cache sfcsr register value during suspend and resume
+ * @regcache_sacnt: Cache sacnt register value during suspend and resume
+ *
* @dma_params_tx: DMA transmit parameters
* @dma_params_rx: DMA receive parameters
* @ssi_phys: physical address of the SSI registers
*
* @fiq_params: FIQ stream filtering parameters
*
- * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card
+ * @pdev: Pointer to pdev when using fsl-ssi as sound card (ppc only)
+ * TODO: Should be replaced with simple-sound-card
*
* @dbg_stats: Debugging statistics
*
* @soc: SoC specific data
+ * @dev: Pointer to &pdev->dev
*
- * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
- * there are @fifo_watermark or fewer words in TX fifo or
- * @fifo_watermark or more empty words in RX fifo.
- * @dma_maxburst: max number of words to transfer in one go. So far,
- * this is always the same as fifo_watermark.
+ * @fifo_watermark: The FIFO watermark setting. Notifies DMA when there are
+ * @fifo_watermark or fewer words in TX fifo or
+ * @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: Max number of words to transfer in one go. So far,
+ * this is always the same as fifo_watermark.
+ *
+ * @ac97_reg_lock: Mutex lock to serialize AC97 register access operations
*/
-struct fsl_ssi_private {
+struct fsl_ssi {
struct regmap *regs;
int irq;
struct snd_soc_dai_driver cpu_dai_drv;
unsigned int dai_fmt;
- u8 i2s_mode;
+ u8 i2s_net;
bool use_dma;
bool use_dual_fifo;
bool has_ipg_clk_name;
unsigned int fifo_depth;
unsigned int slot_width;
unsigned int slots;
- struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+ struct fsl_ssi_regvals regvals[2];
struct clk *clk;
struct clk *baudclk;
unsigned int baudclk_streams;
- /* regcache for volatile regs */
u32 regcache_sfcsr;
u32 regcache_sacnt;
- /* DMA params */
struct snd_dmaengine_dai_dma_data dma_params_tx;
struct snd_dmaengine_dai_dma_data dma_params_rx;
dma_addr_t ssi_phys;
- /* params for non-dma FIQ stream filtered mode */
struct imx_pcm_fiq_params fiq_params;
- /* Used when using fsl-ssi as sound-card. This is only used by ppc and
- * should be replaced with simple-sound-card. */
struct platform_device *pdev;
struct fsl_ssi_dbg dbg_stats;
@@ -271,27 +281,27 @@ struct fsl_ssi_private {
};
/*
- * imx51 and later SoCs have a slightly different IP that allows the
- * SSI configuration while the SSI unit is running.
- *
- * More important, it is necessary on those SoCs to configure the
- * sperate TX/RX DMA bits just before starting the stream
- * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
- * sends any DMA requests to the SDMA unit, otherwise it is not defined
- * how the SDMA unit handles the DMA request.
+ * SoC specific data
*
- * SDMA units are present on devices starting at imx35 but the imx35
- * reference manual states that the DMA bits should not be changed
- * while the SSI unit is running (SSIEN). So we support the necessary
- * online configuration of fsl-ssi starting at imx51.
+ * Notes:
+ * 1) SSI in earlier SoCS has critical bits in control registers that
+ * cannot be changed after SSI starts running -- a software reset
+ * (set SSIEN to 0) is required to change their values. So adding
+ * an offline_config flag for these SoCs.
+ * 2) SDMA is available since imx35. However, imx35 does not support
+ * DMA bits changing when SSI is running, so set offline_config.
+ * 3) imx51 and later versions support register configurations when
+ * SSI is running (SSIEN); For these versions, DMA needs to be
+ * configured before SSI sends DMA request to avoid an undefined
+ * DMA request on the SDMA side.
*/
static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
.imx = false,
.offline_config = true,
- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+ SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
@@ -304,16 +314,16 @@ static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
static struct fsl_ssi_soc_data fsl_ssi_imx35 = {
.imx = true,
.offline_config = true,
- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+ SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static struct fsl_ssi_soc_data fsl_ssi_imx51 = {
.imx = true,
.offline_config = false,
- .sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static const struct of_device_id fsl_ssi_ids[] = {
@@ -325,108 +335,86 @@ static const struct of_device_id fsl_ssi_ids[] = {
};
MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
-static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_ac97(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
SND_SOC_DAIFMT_AC97;
}
-static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_master(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
SND_SOC_DAIFMT_CBS_CFS;
}
-static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
SND_SOC_DAIFMT_CBM_CFS;
}
+
/**
- * fsl_ssi_isr: SSI interrupt handler
- *
- * Although it's possible to use the interrupt handler to send and receive
- * data to/from the SSI, we use the DMA instead. Programming is more
- * complicated, but the performance is much better.
- *
- * This interrupt handler is used only to gather statistics.
- *
- * @irq: IRQ of the SSI device
- * @dev_id: pointer to the ssi_private structure for this SSI device
+ * Interrupt handler to gather states
*/
static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
{
- struct fsl_ssi_private *ssi_private = dev_id;
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_id;
+ struct regmap *regs = ssi->regs;
__be32 sisr;
__be32 sisr2;
- /* We got an interrupt, so read the status register to see what we
- were interrupted for. We mask it with the Interrupt Enable register
- so that we only check for events that we're interested in.
- */
- regmap_read(regs, CCSR_SSI_SISR, &sisr);
+ regmap_read(regs, REG_SSI_SISR, &sisr);
- sisr2 = sisr & ssi_private->soc->sisr_write_mask;
+ sisr2 = sisr & ssi->soc->sisr_write_mask;
/* Clear the bits that we set */
if (sisr2)
- regmap_write(regs, CCSR_SSI_SISR, sisr2);
+ regmap_write(regs, REG_SSI_SISR, sisr2);
- fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr);
+ fsl_ssi_dbg_isr(&ssi->dbg_stats, sisr);
return IRQ_HANDLED;
}
-/*
- * Enable/Disable all rx/tx config flags at once.
+/**
+ * Enable or disable all rx/tx config flags at once
*/
-static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private,
- bool enable)
+static void fsl_ssi_rxtx_config(struct fsl_ssi *ssi, bool enable)
{
- struct regmap *regs = ssi_private->regs;
- struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val;
+ struct regmap *regs = ssi->regs;
+ struct fsl_ssi_regvals *vals = ssi->regvals;
if (enable) {
- regmap_update_bits(regs, CCSR_SSI_SIER,
- vals->rx.sier | vals->tx.sier,
- vals->rx.sier | vals->tx.sier);
- regmap_update_bits(regs, CCSR_SSI_SRCR,
- vals->rx.srcr | vals->tx.srcr,
- vals->rx.srcr | vals->tx.srcr);
- regmap_update_bits(regs, CCSR_SSI_STCR,
- vals->rx.stcr | vals->tx.stcr,
- vals->rx.stcr | vals->tx.stcr);
+ regmap_update_bits(regs, REG_SSI_SIER,
+ vals[RX].sier | vals[TX].sier,
+ vals[RX].sier | vals[TX].sier);
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ vals[RX].srcr | vals[TX].srcr,
+ vals[RX].srcr | vals[TX].srcr);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ vals[RX].stcr | vals[TX].stcr,
+ vals[RX].stcr | vals[TX].stcr);
} else {
- regmap_update_bits(regs, CCSR_SSI_SRCR,
- vals->rx.srcr | vals->tx.srcr, 0);
- regmap_update_bits(regs, CCSR_SSI_STCR,
- vals->rx.stcr | vals->tx.stcr, 0);
- regmap_update_bits(regs, CCSR_SSI_SIER,
- vals->rx.sier | vals->tx.sier, 0);
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ vals[RX].srcr | vals[TX].srcr, 0);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ vals[RX].stcr | vals[TX].stcr, 0);
+ regmap_update_bits(regs, REG_SSI_SIER,
+ vals[RX].sier | vals[TX].sier, 0);
}
}
-/*
- * Clear RX or TX FIFO to remove samples from the previous
- * stream session which may be still present in the FIFO and
- * may introduce bad samples and/or channel slipping.
- *
- * Note: The SOR is not documented in recent IMX datasheet, but
- * is described in IMX51 reference manual at section 56.3.3.15.
+/**
+ * Clear remaining data in the FIFO to avoid dirty data or channel slipping
*/
-static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
- bool is_rx)
+static void fsl_ssi_fifo_clear(struct fsl_ssi *ssi, bool is_rx)
{
- if (is_rx) {
- regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
- CCSR_SSI_SOR_RX_CLR, CCSR_SSI_SOR_RX_CLR);
- } else {
- regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
- CCSR_SSI_SOR_TX_CLR, CCSR_SSI_SOR_TX_CLR);
- }
+ bool tx = !is_rx;
+
+ regmap_update_bits(ssi->regs, REG_SSI_SOR,
+ SSI_SOR_xX_CLR(tx), SSI_SOR_xX_CLR(tx));
}
-/*
+/**
* Calculate the bits that have to be disabled for the current stream that is
* getting disabled. This keeps the bits enabled that are necessary for the
* second stream to work if 'stream_active' is true.
@@ -446,261 +434,239 @@ static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
((vals_disable) & \
((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active))))
-/*
- * Enable/Disable a ssi configuration. You have to pass either
- * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+/**
+ * Enable or disable SSI configuration.
*/
-static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable,
- struct fsl_ssi_reg_val *vals)
+static void fsl_ssi_config(struct fsl_ssi *ssi, bool enable,
+ struct fsl_ssi_regvals *vals)
{
- struct regmap *regs = ssi_private->regs;
- struct fsl_ssi_reg_val *avals;
+ struct regmap *regs = ssi->regs;
+ struct fsl_ssi_regvals *avals;
int nr_active_streams;
- u32 scr_val;
+ u32 scr;
int keep_active;
- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+ regmap_read(regs, REG_SSI_SCR, &scr);
- nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
- !!(scr_val & CCSR_SSI_SCR_RE);
+ nr_active_streams = !!(scr & SSI_SCR_TE) + !!(scr & SSI_SCR_RE);
if (nr_active_streams - 1 > 0)
keep_active = 1;
else
keep_active = 0;
- /* Find the other direction values rx or tx which we do not want to
- * modify */
- if (&ssi_private->rxtx_reg_val.rx == vals)
- avals = &ssi_private->rxtx_reg_val.tx;
+ /* Get the opposite direction to keep its values untouched */
+ if (&ssi->regvals[RX] == vals)
+ avals = &ssi->regvals[TX];
else
- avals = &ssi_private->rxtx_reg_val.rx;
+ avals = &ssi->regvals[RX];
- /* If vals should be disabled, start with disabling the unit */
if (!enable) {
+ /*
+ * To keep the other stream safe, exclude shared bits between
+ * both streams, and get safe bits to disable current stream
+ */
u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr,
- keep_active);
- regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0);
+ keep_active);
+ /* Safely disable SCR register for the stream */
+ regmap_update_bits(regs, REG_SSI_SCR, scr, 0);
}
/*
- * We are running on a SoC which does not support online SSI
- * reconfiguration, so we have to enable all necessary flags at once
- * even if we do not use them later (capture and playback configuration)
+ * For cases where online configuration is not supported,
+ * 1) Enable all necessary bits of both streams when 1st stream starts
+ * even if the opposite stream will not start
+ * 2) Disable all remaining bits of both streams when last stream ends
*/
- if (ssi_private->soc->offline_config) {
- if ((enable && !nr_active_streams) ||
- (!enable && !keep_active))
- fsl_ssi_rxtx_config(ssi_private, enable);
+ if (ssi->soc->offline_config) {
+ if ((enable && !nr_active_streams) || (!enable && !keep_active))
+ fsl_ssi_rxtx_config(ssi, enable);
goto config_done;
}
- /*
- * Configure single direction units while the SSI unit is running
- * (online configuration)
- */
+ /* Online configure single direction while SSI is running */
if (enable) {
- fsl_ssi_fifo_clear(ssi_private, vals->scr & CCSR_SSI_SCR_RE);
+ fsl_ssi_fifo_clear(ssi, vals->scr & SSI_SCR_RE);
- regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
- regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
- regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+ regmap_update_bits(regs, REG_SSI_SRCR, vals->srcr, vals->srcr);
+ regmap_update_bits(regs, REG_SSI_STCR, vals->stcr, vals->stcr);
+ regmap_update_bits(regs, REG_SSI_SIER, vals->sier, vals->sier);
} else {
u32 sier;
u32 srcr;
u32 stcr;
/*
- * Disabling the necessary flags for one of rx/tx while the
- * other stream is active is a little bit more difficult. We
- * have to disable only those flags that differ between both
- * streams (rx XOR tx) and that are set in the stream that is
- * disabled now. Otherwise we could alter flags of the other
- * stream
+ * To keep the other stream safe, exclude shared bits between
+ * both streams, and get safe bits to disable current stream
*/
-
- /* These assignments are simply vals without bits set in avals*/
sier = fsl_ssi_disable_val(vals->sier, avals->sier,
- keep_active);
+ keep_active);
srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr,
- keep_active);
+ keep_active);
stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr,
- keep_active);
+ keep_active);
- regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0);
- regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0);
- regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0);
+ /* Safely disable other control registers for the stream */
+ regmap_update_bits(regs, REG_SSI_SRCR, srcr, 0);
+ regmap_update_bits(regs, REG_SSI_STCR, stcr, 0);
+ regmap_update_bits(regs, REG_SSI_SIER, sier, 0);
}
config_done:
/* Enabling of subunits is done after configuration */
if (enable) {
- if (ssi_private->use_dma && (vals->scr & CCSR_SSI_SCR_TE)) {
- /*
- * Be sure the Tx FIFO is filled when TE is set.
- * Otherwise, there are some chances to start the
- * playback with some void samples inserted first,
- * generating a channel slip.
- *
- * First, SSIEN must be set, to let the FIFO be filled.
- *
- * Notes:
- * - Limit this fix to the DMA case until FIQ cases can
- * be tested.
- * - Limit the length of the busy loop to not lock the
- * system too long, even if 1-2 loops are sufficient
- * in general.
- */
+ /*
+ * Start DMA before setting TE to avoid FIFO underrun
+ * which may cause a channel slip or a channel swap
+ *
+ * TODO: FIQ cases might also need this upon testing
+ */
+ if (ssi->use_dma && (vals->scr & SSI_SCR_TE)) {
int i;
int max_loop = 100;
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN);
+
+ /* Enable SSI first to send TX DMA request */
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_SSIEN, SSI_SCR_SSIEN);
+
+ /* Busy wait until TX FIFO not empty -- DMA working */
for (i = 0; i < max_loop; i++) {
u32 sfcsr;
- regmap_read(regs, CCSR_SSI_SFCSR, &sfcsr);
- if (CCSR_SSI_SFCSR_TFCNT0(sfcsr))
+ regmap_read(regs, REG_SSI_SFCSR, &sfcsr);
+ if (SSI_SFCSR_TFCNT0(sfcsr))
break;
}
if (i == max_loop) {
- dev_err(ssi_private->dev,
+ dev_err(ssi->dev,
"Timeout waiting TX FIFO filling\n");
}
}
- regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
+ /* Enable all remaining bits */
+ regmap_update_bits(regs, REG_SSI_SCR, vals->scr, vals->scr);
}
}
+static void fsl_ssi_rx_config(struct fsl_ssi *ssi, bool enable)
+{
+ fsl_ssi_config(ssi, enable, &ssi->regvals[RX]);
+}
-static void fsl_ssi_rx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_ac97_saccst_setup(struct fsl_ssi *ssi)
{
- fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.rx);
+ struct regmap *regs = ssi->regs;
+
+ /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+ if (!ssi->soc->imx21regs) {
+ /* Disable all channel slots */
+ regmap_write(regs, REG_SSI_SACCDIS, 0xff);
+ /* Enable slots 3 & 4 -- PCM Playback Left & Right channels */
+ regmap_write(regs, REG_SSI_SACCEN, 0x300);
+ }
}
-static void fsl_ssi_tx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_config(struct fsl_ssi *ssi, bool enable)
{
- fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.tx);
+ /*
+ * SACCST might be modified via AC Link by a CODEC if it sends
+ * extra bits in their SLOTREQ requests, which'll accidentally
+ * send valid data to slots other than normal playback slots.
+ *
+ * To be safe, configure SACCST right before TX starts.
+ */
+ if (enable && fsl_ssi_is_ac97(ssi))
+ fsl_ssi_tx_ac97_saccst_setup(ssi);
+
+ fsl_ssi_config(ssi, enable, &ssi->regvals[TX]);
}
-/*
- * Setup rx/tx register values used to enable/disable the streams. These will
- * be used later in fsl_ssi_config to setup the streams without the need to
- * check for all different SSI modes.
+/**
+ * Cache critical bits of SIER, SRCR, STCR and SCR to later set them safely
*/
-static void fsl_ssi_setup_reg_vals(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_regvals(struct fsl_ssi *ssi)
{
- struct fsl_ssi_rxtx_reg_val *reg = &ssi_private->rxtx_reg_val;
-
- reg->rx.sier = CCSR_SSI_SIER_RFF0_EN;
- reg->rx.srcr = CCSR_SSI_SRCR_RFEN0;
- reg->rx.scr = 0;
- reg->tx.sier = CCSR_SSI_SIER_TFE0_EN;
- reg->tx.stcr = CCSR_SSI_STCR_TFEN0;
- reg->tx.scr = 0;
-
- if (!fsl_ssi_is_ac97(ssi_private)) {
- reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE;
- reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN;
- reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE;
- reg->tx.sier |= CCSR_SSI_SIER_TFE0_EN;
+ struct fsl_ssi_regvals *vals = ssi->regvals;
+
+ vals[RX].sier = SSI_SIER_RFF0_EN;
+ vals[RX].srcr = SSI_SRCR_RFEN0;
+ vals[RX].scr = 0;
+ vals[TX].sier = SSI_SIER_TFE0_EN;
+ vals[TX].stcr = SSI_STCR_TFEN0;
+ vals[TX].scr = 0;
+
+ /* AC97 has already enabled SSIEN, RE and TE, so ignore them */
+ if (!fsl_ssi_is_ac97(ssi)) {
+ vals[RX].scr = SSI_SCR_SSIEN | SSI_SCR_RE;
+ vals[TX].scr = SSI_SCR_SSIEN | SSI_SCR_TE;
}
- if (ssi_private->use_dma) {
- reg->rx.sier |= CCSR_SSI_SIER_RDMAE;
- reg->tx.sier |= CCSR_SSI_SIER_TDMAE;
+ if (ssi->use_dma) {
+ vals[RX].sier |= SSI_SIER_RDMAE;
+ vals[TX].sier |= SSI_SIER_TDMAE;
} else {
- reg->rx.sier |= CCSR_SSI_SIER_RIE;
- reg->tx.sier |= CCSR_SSI_SIER_TIE;
+ vals[RX].sier |= SSI_SIER_RIE;
+ vals[TX].sier |= SSI_SIER_TIE;
}
- reg->rx.sier |= FSLSSI_SIER_DBG_RX_FLAGS;
- reg->tx.sier |= FSLSSI_SIER_DBG_TX_FLAGS;
+ vals[RX].sier |= FSLSSI_SIER_DBG_RX_FLAGS;
+ vals[TX].sier |= FSLSSI_SIER_DBG_TX_FLAGS;
}
-static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_ac97(struct fsl_ssi *ssi)
{
- struct regmap *regs = ssi_private->regs;
-
- /*
- * Setup the clock control register
- */
- regmap_write(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
- regmap_write(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
+ struct regmap *regs = ssi->regs;
- /*
- * Enable AC97 mode and startup the SSI
- */
- regmap_write(regs, CCSR_SSI_SACNT,
- CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
+ /* Setup the clock control register */
+ regmap_write(regs, REG_SSI_STCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
+ regmap_write(regs, REG_SSI_SRCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
- /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
- if (!ssi_private->soc->imx21regs) {
- regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
- regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
- }
+ /* Enable AC97 mode and startup the SSI */
+ regmap_write(regs, REG_SSI_SACNT, SSI_SACNT_AC97EN | SSI_SACNT_FV);
- /*
- * Enable SSI, Transmit and Receive. AC97 has to communicate with the
- * codec before a stream is started.
- */
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE,
- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+ /* AC97 has to communicate with codec before starting a stream */
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE,
+ SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE);
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3));
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_WAIT(3));
}
-/**
- * fsl_ssi_startup: create a new substream
- *
- * This is the first function called when a stream is opened.
- *
- * If this is the first stream open, then grab the IRQ and program most of
- * the SSI registers.
- */
static int fsl_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
int ret;
- ret = clk_prepare_enable(ssi_private->clk);
+ ret = clk_prepare_enable(ssi->clk);
if (ret)
return ret;
- /* When using dual fifo mode, it is safer to ensure an even period
+ /*
+ * When using dual fifo mode, it is safer to ensure an even period
* size. If appearing to an odd number while DMA always starts its
* task from fifo0, fifo1 would be neglected at the end of each
* period. But SSI would still access fifo1 with an invalid data.
*/
- if (ssi_private->use_dual_fifo)
+ if (ssi->use_dual_fifo)
snd_pcm_hw_constraint_step(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
return 0;
}
-/**
- * fsl_ssi_shutdown: shutdown the SSI
- *
- */
static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
-
- clk_disable_unprepare(ssi_private->clk);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ clk_disable_unprepare(ssi->clk);
}
/**
- * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
+ * Configure Digital Audio Interface bit clock
*
* Note: This function can be only called when using SSI as DAI master
*
@@ -709,12 +675,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
* (In 2-channel I2S Master mode, slot_width is fixed 32)
*/
static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai,
- struct snd_pcm_hw_params *hw_params)
+ struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *hw_params)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
- int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
+ bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
+ int synchronous = ssi->cpu_dai_drv.symmetric_rates, ret;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
unsigned int slots = params_channels(hw_params);
@@ -724,29 +691,29 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
bool baudclk_is_used;
/* Override slots and slot_width if being specifically set... */
- if (ssi_private->slots)
- slots = ssi_private->slots;
+ if (ssi->slots)
+ slots = ssi->slots;
/* ...but keep 32 bits if slots is 2 -- I2S Master mode */
- if (ssi_private->slot_width && slots != 2)
- slot_width = ssi_private->slot_width;
+ if (ssi->slot_width && slots != 2)
+ slot_width = ssi->slot_width;
/* Generate bit clock based on the slot number and slot width */
freq = slots * slot_width * params_rate(hw_params);
/* Don't apply it to any non-baudclk circumstance */
- if (IS_ERR(ssi_private->baudclk))
+ if (IS_ERR(ssi->baudclk))
return -EINVAL;
/*
* Hardware limitation: The bclk rate must be
* never greater than 1/5 IPG clock rate
*/
- if (freq * 5 > clk_get_rate(ssi_private->clk)) {
- dev_err(cpu_dai->dev, "bitclk > ipgclk/5\n");
+ if (freq * 5 > clk_get_rate(ssi->clk)) {
+ dev_err(dai->dev, "bitclk > ipgclk / 5\n");
return -EINVAL;
}
- baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
+ baudclk_is_used = ssi->baudclk_streams & ~(BIT(substream->stream));
/* It should be already enough to divide clock by setting pm alone */
psr = 0;
@@ -758,9 +725,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
tmprate = freq * factor * (i + 1);
if (baudclk_is_used)
- clkrate = clk_get_rate(ssi_private->baudclk);
+ clkrate = clk_get_rate(ssi->baudclk);
else
- clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
+ clkrate = clk_round_rate(ssi->baudclk, tmprate);
clkrate /= factor;
afreq = clkrate / (i + 1);
@@ -791,24 +758,22 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
/* No proper pm found if it is still remaining the initial value */
if (pm == 999) {
- dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
+ dev_err(dai->dev, "failed to handle the required sysclk\n");
return -EINVAL;
}
- stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
- (psr ? CCSR_SSI_SxCCR_PSR : 0);
- mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 |
- CCSR_SSI_SxCCR_PSR;
+ stccr = SSI_SxCCR_PM(pm + 1) | (div2 ? SSI_SxCCR_DIV2 : 0) |
+ (psr ? SSI_SxCCR_PSR : 0);
+ mask = SSI_SxCCR_PM_MASK | SSI_SxCCR_DIV2 | SSI_SxCCR_PSR;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous)
- regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr);
- else
- regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr);
+ /* STCCR is used for RX in synchronous mode */
+ tx2 = tx || synchronous;
+ regmap_update_bits(regs, REG_SSI_SxCCR(tx2), mask, stccr);
if (!baudclk_is_used) {
- ret = clk_set_rate(ssi_private->baudclk, baudrate);
+ ret = clk_set_rate(ssi->baudclk, baudrate);
if (ret) {
- dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
+ dev_err(dai->dev, "failed to set baudclk rate\n");
return -EINVAL;
}
}
@@ -817,185 +782,165 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
}
/**
- * fsl_ssi_hw_params - program the sample size
- *
- * Most of the SSI registers have been programmed in the startup function,
- * but the word length must be programmed here. Unfortunately, programming
- * the SxCCR.WL bits requires the SSI to be temporarily disabled. This can
- * cause a problem with supporting simultaneous playback and capture. If
- * the SSI is already playing a stream, then that stream may be temporarily
- * stopped when you start capture.
+ * Configure SSI based on PCM hardware parameters
*
- * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the
- * clock master.
+ * Notes:
+ * 1) SxCCR.WL bits are critical bits that require SSI to be temporarily
+ * disabled on offline_config SoCs. Even for online configurable SoCs
+ * running in synchronous mode (both TX and RX use STCCR), it is not
+ * safe to re-configure them when both two streams start running.
+ * 2) SxCCR.PM, SxCCR.DIV2 and SxCCR.PSR bits will be configured in the
+ * fsl_ssi_set_bclk() if SSI is the DAI clock master.
*/
static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai)
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *dai)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
unsigned int channels = params_channels(hw_params);
unsigned int sample_size = params_width(hw_params);
- u32 wl = CCSR_SSI_SxCCR_WL(sample_size);
+ u32 wl = SSI_SxCCR_WL(sample_size);
int ret;
- u32 scr_val;
+ u32 scr;
int enabled;
- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
- enabled = scr_val & CCSR_SSI_SCR_SSIEN;
+ regmap_read(regs, REG_SSI_SCR, &scr);
+ enabled = scr & SSI_SCR_SSIEN;
/*
- * If we're in synchronous mode, and the SSI is already enabled,
- * then STCCR is already set properly.
+ * SSI is properly configured if it is enabled and running in
+ * the synchronous mode; Note that AC97 mode is an exception
+ * that should set separate configurations for STCCR and SRCCR
+ * despite running in the synchronous mode.
*/
- if (enabled && ssi_private->cpu_dai_drv.symmetric_rates)
+ if (enabled && ssi->cpu_dai_drv.symmetric_rates)
return 0;
- if (fsl_ssi_is_i2s_master(ssi_private)) {
- ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params);
+ if (fsl_ssi_is_i2s_master(ssi)) {
+ ret = fsl_ssi_set_bclk(substream, dai, hw_params);
if (ret)
return ret;
/* Do not enable the clock if it is already enabled */
- if (!(ssi_private->baudclk_streams & BIT(substream->stream))) {
- ret = clk_prepare_enable(ssi_private->baudclk);
+ if (!(ssi->baudclk_streams & BIT(substream->stream))) {
+ ret = clk_prepare_enable(ssi->baudclk);
if (ret)
return ret;
- ssi_private->baudclk_streams |= BIT(substream->stream);
+ ssi->baudclk_streams |= BIT(substream->stream);
}
}
- if (!fsl_ssi_is_ac97(ssi_private)) {
- u8 i2smode;
- /*
- * Switch to normal net mode in order to have a frame sync
- * signal every 32 bits instead of 16 bits
- */
- if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
- i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
- CCSR_SSI_SCR_NET;
+ if (!fsl_ssi_is_ac97(ssi)) {
+ u8 i2s_net;
+ /* Normal + Network mode to send 16-bit data in 32-bit frames */
+ if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
+ i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
else
- i2smode = ssi_private->i2s_mode;
+ i2s_net = ssi->i2s_net;
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
- channels == 1 ? 0 : i2smode);
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_I2S_NET_MASK,
+ channels == 1 ? 0 : i2s_net);
}
- /*
- * FIXME: The documentation says that SxCCR[WL] should not be
- * modified while the SSI is enabled. The only time this can
- * happen is if we're trying to do simultaneous playback and
- * capture in asynchronous mode. Unfortunately, I have been enable
- * to get that to work at all on the P1022DS. Therefore, we don't
- * bother to disable/enable the SSI when setting SxCCR[WL], because
- * the SSI will stop anyway. Maybe one day, this will get fixed.
- */
-
/* In synchronous mode, the SSI uses STCCR for capture */
- if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ||
- ssi_private->cpu_dai_drv.symmetric_rates)
- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK,
- wl);
- else
- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
- wl);
+ tx2 = tx || ssi->cpu_dai_drv.symmetric_rates;
+ regmap_update_bits(regs, REG_SSI_SxCCR(tx2), SSI_SxCCR_WL_MASK, wl);
return 0;
}
static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai)
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
- if (fsl_ssi_is_i2s_master(ssi_private) &&
- ssi_private->baudclk_streams & BIT(substream->stream)) {
- clk_disable_unprepare(ssi_private->baudclk);
- ssi_private->baudclk_streams &= ~BIT(substream->stream);
+ if (fsl_ssi_is_i2s_master(ssi) &&
+ ssi->baudclk_streams & BIT(substream->stream)) {
+ clk_disable_unprepare(ssi->baudclk);
+ ssi->baudclk_streams &= ~BIT(substream->stream);
}
return 0;
}
static int _fsl_ssi_set_dai_fmt(struct device *dev,
- struct fsl_ssi_private *ssi_private,
- unsigned int fmt)
+ struct fsl_ssi *ssi, unsigned int fmt)
{
- struct regmap *regs = ssi_private->regs;
+ struct regmap *regs = ssi->regs;
u32 strcr = 0, stcr, srcr, scr, mask;
u8 wm;
- ssi_private->dai_fmt = fmt;
+ ssi->dai_fmt = fmt;
- if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
- dev_err(dev, "baudclk is missing which is necessary for master mode\n");
+ if (fsl_ssi_is_i2s_master(ssi) && IS_ERR(ssi->baudclk)) {
+ dev_err(dev, "missing baudclk for master mode\n");
return -EINVAL;
}
- fsl_ssi_setup_reg_vals(ssi_private);
+ fsl_ssi_setup_regvals(ssi);
- regmap_read(regs, CCSR_SSI_SCR, &scr);
- scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
- scr |= CCSR_SSI_SCR_SYNC_TX_FS;
+ regmap_read(regs, REG_SSI_SCR, &scr);
+ scr &= ~(SSI_SCR_SYN | SSI_SCR_I2S_MODE_MASK);
+ /* Synchronize frame sync clock for TE to avoid data slipping */
+ scr |= SSI_SCR_SYNC_TX_FS;
- mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR |
- CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL |
- CCSR_SSI_STCR_TEFS;
- regmap_read(regs, CCSR_SSI_STCR, &stcr);
- regmap_read(regs, CCSR_SSI_SRCR, &srcr);
+ mask = SSI_STCR_TXBIT0 | SSI_STCR_TFDIR | SSI_STCR_TXDIR |
+ SSI_STCR_TSCKP | SSI_STCR_TFSI | SSI_STCR_TFSL | SSI_STCR_TEFS;
+ regmap_read(regs, REG_SSI_STCR, &stcr);
+ regmap_read(regs, REG_SSI_SRCR, &srcr);
stcr &= ~mask;
srcr &= ~mask;
- ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
+ /* Use Network mode as default */
+ ssi->i2s_net = SSI_SCR_NET;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- regmap_update_bits(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
- regmap_update_bits(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, REG_SSI_STCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, REG_SSI_SRCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFS:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_SLAVE;
break;
default:
return -EINVAL;
}
/* Data on rising edge of bclk, frame low, 1clk before data */
- strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+ strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP |
+ SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
case SND_SOC_DAIFMT_LEFT_J:
/* Data on rising edge of bclk, frame high */
- strcr |= CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TSCKP;
+ strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_DSP_A:
/* Data on rising edge of bclk, frame high, 1clk before data */
- strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+ strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP |
+ SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
case SND_SOC_DAIFMT_DSP_B:
/* Data on rising edge of bclk, frame high */
- strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0;
+ strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP | SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_AC97:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
+ /* Data on falling edge of bclk, frame high, 1clk before data */
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_NORMAL;
break;
default:
return -EINVAL;
}
- scr |= ssi_private->i2s_mode;
+ scr |= ssi->i2s_net;
/* DAI clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1004,16 +949,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
break;
case SND_SOC_DAIFMT_IB_NF:
/* Invert bit clock */
- strcr ^= CCSR_SSI_STCR_TSCKP;
+ strcr ^= SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_NB_IF:
/* Invert frame clock */
- strcr ^= CCSR_SSI_STCR_TFSI;
+ strcr ^= SSI_STCR_TFSI;
break;
case SND_SOC_DAIFMT_IB_IF:
/* Invert both clocks */
- strcr ^= CCSR_SSI_STCR_TSCKP;
- strcr ^= CCSR_SSI_STCR_TFSI;
+ strcr ^= SSI_STCR_TSCKP;
+ strcr ^= SSI_STCR_TFSI;
break;
default:
return -EINVAL;
@@ -1022,123 +967,122 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- strcr |= CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR;
- scr |= CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Output bit and frame sync clocks */
+ strcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
+ scr |= SSI_SCR_SYS_CLK_EN;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Input bit or frame sync clocks */
+ scr &= ~SSI_SCR_SYS_CLK_EN;
break;
case SND_SOC_DAIFMT_CBM_CFS:
- strcr &= ~CCSR_SSI_STCR_TXDIR;
- strcr |= CCSR_SSI_STCR_TFDIR;
- scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Input bit clock but output frame sync clock */
+ strcr &= ~SSI_STCR_TXDIR;
+ strcr |= SSI_STCR_TFDIR;
+ scr &= ~SSI_SCR_SYS_CLK_EN;
break;
default:
- if (!fsl_ssi_is_ac97(ssi_private))
+ if (!fsl_ssi_is_ac97(ssi))
return -EINVAL;
}
stcr |= strcr;
srcr |= strcr;
- if (ssi_private->cpu_dai_drv.symmetric_rates
- || fsl_ssi_is_ac97(ssi_private)) {
- /* Need to clear RXDIR when using SYNC or AC97 mode */
- srcr &= ~CCSR_SSI_SRCR_RXDIR;
- scr |= CCSR_SSI_SCR_SYN;
+ /* Set SYN mode and clear RXDIR bit when using SYN or AC97 mode */
+ if (ssi->cpu_dai_drv.symmetric_rates || fsl_ssi_is_ac97(ssi)) {
+ srcr &= ~SSI_SRCR_RXDIR;
+ scr |= SSI_SCR_SYN;
}
- regmap_write(regs, CCSR_SSI_STCR, stcr);
- regmap_write(regs, CCSR_SSI_SRCR, srcr);
- regmap_write(regs, CCSR_SSI_SCR, scr);
+ regmap_write(regs, REG_SSI_STCR, stcr);
+ regmap_write(regs, REG_SSI_SRCR, srcr);
+ regmap_write(regs, REG_SSI_SCR, scr);
- wm = ssi_private->fifo_watermark;
+ wm = ssi->fifo_watermark;
- regmap_write(regs, CCSR_SSI_SFCSR,
- CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
- CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm));
+ regmap_write(regs, REG_SSI_SFCSR,
+ SSI_SFCSR_TFWM0(wm) | SSI_SFCSR_RFWM0(wm) |
+ SSI_SFCSR_TFWM1(wm) | SSI_SFCSR_RFWM1(wm));
- if (ssi_private->use_dual_fifo) {
- regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1,
- CCSR_SSI_SRCR_RFEN1);
- regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1,
- CCSR_SSI_STCR_TFEN1);
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN,
- CCSR_SSI_SCR_TCH_EN);
+ if (ssi->use_dual_fifo) {
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ SSI_SRCR_RFEN1, SSI_SRCR_RFEN1);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ SSI_STCR_TFEN1, SSI_STCR_TFEN1);
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_TCH_EN, SSI_SCR_TCH_EN);
}
if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
- fsl_ssi_setup_ac97(ssi_private);
+ fsl_ssi_setup_ac97(ssi);
return 0;
-
}
/**
- * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
+ * Configure Digital Audio Interface (DAI) Format
*/
-static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
- return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
+ /* AC97 configured DAIFMT earlier in the probe() */
+ if (fsl_ssi_is_ac97(ssi))
+ return 0;
+
+ return _fsl_ssi_set_dai_fmt(dai->dev, ssi, fmt);
}
/**
- * fsl_ssi_set_dai_tdm_slot - set TDM slot number
- *
- * Note: This function can be only called when using SSI as DAI master
+ * Set TDM slot number and slot width
*/
-static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
- u32 rx_mask, int slots, int slot_width)
+static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
+ u32 rx_mask, int slots, int slot_width)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
u32 val;
/* The word length should be 8, 10, 12, 16, 18, 20, 22 or 24 */
if (slot_width & 1 || slot_width < 8 || slot_width > 24) {
- dev_err(cpu_dai->dev, "invalid slot width: %d\n", slot_width);
+ dev_err(dai->dev, "invalid slot width: %d\n", slot_width);
return -EINVAL;
}
/* The slot number should be >= 2 if using Network mode or I2S mode */
- regmap_read(regs, CCSR_SSI_SCR, &val);
- val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
+ regmap_read(regs, REG_SSI_SCR, &val);
+ val &= SSI_SCR_I2S_MODE_MASK | SSI_SCR_NET;
if (val && slots < 2) {
- dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n");
+ dev_err(dai->dev, "slot number should be >= 2 in I2S or NET\n");
return -EINVAL;
}
- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(slots));
- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(slots));
+ regmap_update_bits(regs, REG_SSI_STCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+ regmap_update_bits(regs, REG_SSI_SRCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
- /* The register SxMSKs needs SSI to provide essential clock due to
- * hardware design. So we here temporarily enable SSI to set them.
- */
- regmap_read(regs, CCSR_SSI_SCR, &val);
- val &= CCSR_SSI_SCR_SSIEN;
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
- CCSR_SSI_SCR_SSIEN);
+ /* Save SSIEN bit of the SCR register */
+ regmap_read(regs, REG_SSI_SCR, &val);
+ val &= SSI_SCR_SSIEN;
+ /* Temporarily enable SSI to allow SxMSKs to be configurable */
+ regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, SSI_SCR_SSIEN);
- regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
- regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
+ regmap_write(regs, REG_SSI_STMSK, ~tx_mask);
+ regmap_write(regs, REG_SSI_SRMSK, ~rx_mask);
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
+ /* Restore the value of SSIEN bit */
+ regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, val);
- ssi_private->slot_width = slot_width;
- ssi_private->slots = slots;
+ ssi->slot_width = slot_width;
+ ssi->slots = slots;
return 0;
}
/**
- * fsl_ssi_trigger: start and stop the DMA transfer.
- *
- * This function is called by ALSA to start, stop, pause, and resume the DMA
- * transfer of data.
+ * Start or stop SSI and corresponding DMA transaction.
*
* The DMA channel is in external master start and pause mode, which
* means the SSI completely controls the flow of data.
@@ -1147,37 +1091,38 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct regmap *regs = ssi->regs;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- fsl_ssi_tx_config(ssi_private, true);
+ fsl_ssi_tx_config(ssi, true);
else
- fsl_ssi_rx_config(ssi_private, true);
+ fsl_ssi_rx_config(ssi, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- fsl_ssi_tx_config(ssi_private, false);
+ fsl_ssi_tx_config(ssi, false);
else
- fsl_ssi_rx_config(ssi_private, false);
+ fsl_ssi_rx_config(ssi, false);
break;
default:
return -EINVAL;
}
- if (fsl_ssi_is_ac97(ssi_private)) {
+ /* Clear corresponding FIFO */
+ if (fsl_ssi_is_ac97(ssi)) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR);
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_TX_CLR);
else
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR);
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_RX_CLR);
}
return 0;
@@ -1185,27 +1130,26 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
- if (ssi_private->soc->imx && ssi_private->use_dma) {
- dai->playback_dma_data = &ssi_private->dma_params_tx;
- dai->capture_dma_data = &ssi_private->dma_params_rx;
+ if (ssi->soc->imx && ssi->use_dma) {
+ dai->playback_dma_data = &ssi->dma_params_tx;
+ dai->capture_dma_data = &ssi->dma_params_rx;
}
return 0;
}
static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
- .startup = fsl_ssi_startup,
- .shutdown = fsl_ssi_shutdown,
- .hw_params = fsl_ssi_hw_params,
- .hw_free = fsl_ssi_hw_free,
- .set_fmt = fsl_ssi_set_dai_fmt,
- .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
- .trigger = fsl_ssi_trigger,
+ .startup = fsl_ssi_startup,
+ .shutdown = fsl_ssi_shutdown,
+ .hw_params = fsl_ssi_hw_params,
+ .hw_free = fsl_ssi_hw_free,
+ .set_fmt = fsl_ssi_set_dai_fmt,
+ .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+ .trigger = fsl_ssi_trigger,
};
-/* Template for the CPU dai driver structure */
static struct snd_soc_dai_driver fsl_ssi_dai_template = {
.probe = fsl_ssi_dai_probe,
.playback = {
@@ -1226,7 +1170,7 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
};
static const struct snd_soc_component_driver fsl_ssi_component = {
- .name = "fsl-ssi",
+ .name = "fsl-ssi",
};
static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
@@ -1237,23 +1181,23 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ /* 16-bit capture is broken (errata ERR003778) */
+ .formats = SNDRV_PCM_FMTBIT_S20,
},
.ops = &fsl_ssi_dai_ops,
};
-
-static struct fsl_ssi_private *fsl_ac97_data;
+static struct fsl_ssi *fsl_ac97_data;
static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
- unsigned short val)
+ unsigned short val)
{
struct regmap *regs = fsl_ac97_data->regs;
unsigned int lreg;
@@ -1273,13 +1217,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
}
lreg = reg << 12;
- regmap_write(regs, CCSR_SSI_SACADD, lreg);
+ regmap_write(regs, REG_SSI_SACADD, lreg);
lval = val << 4;
- regmap_write(regs, CCSR_SSI_SACDAT, lval);
+ regmap_write(regs, REG_SSI_SACDAT, lval);
- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
- CCSR_SSI_SACNT_WR);
+ regmap_update_bits(regs, REG_SSI_SACNT,
+ SSI_SACNT_RDWR_MASK, SSI_SACNT_WR);
udelay(100);
clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1289,10 +1233,9 @@ ret_unlock:
}
static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
- unsigned short reg)
+ unsigned short reg)
{
struct regmap *regs = fsl_ac97_data->regs;
-
unsigned short val = 0;
u32 reg_val;
unsigned int lreg;
@@ -1302,19 +1245,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
ret = clk_prepare_enable(fsl_ac97_data->clk);
if (ret) {
- pr_err("ac97 read clk_prepare_enable failed: %d\n",
- ret);
+ pr_err("ac97 read clk_prepare_enable failed: %d\n", ret);
goto ret_unlock;
}
lreg = (reg & 0x7f) << 12;
- regmap_write(regs, CCSR_SSI_SACADD, lreg);
- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
- CCSR_SSI_SACNT_RD);
+ regmap_write(regs, REG_SSI_SACADD, lreg);
+ regmap_update_bits(regs, REG_SSI_SACNT,
+ SSI_SACNT_RDWR_MASK, SSI_SACNT_RD);
udelay(100);
- regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
+ regmap_read(regs, REG_SSI_SACDAT, &reg_val);
val = (reg_val >> 4) & 0xffff;
clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1325,8 +1267,8 @@ ret_unlock:
}
static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
- .read = fsl_ssi_ac97_read,
- .write = fsl_ssi_ac97_write,
+ .read = fsl_ssi_ac97_read,
+ .write = fsl_ssi_ac97_write,
};
/**
@@ -1341,70 +1283,67 @@ static void make_lowercase(char *s)
}
static int fsl_ssi_imx_probe(struct platform_device *pdev,
- struct fsl_ssi_private *ssi_private, void __iomem *iomem)
+ struct fsl_ssi *ssi, void __iomem *iomem)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
u32 dmas[4];
int ret;
- if (ssi_private->has_ipg_clk_name)
- ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+ /* Backward compatible for a DT without ipg clock name assigned */
+ if (ssi->has_ipg_clk_name)
+ ssi->clk = devm_clk_get(dev, "ipg");
else
- ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ssi_private->clk)) {
- ret = PTR_ERR(ssi_private->clk);
- dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+ ssi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssi->clk)) {
+ ret = PTR_ERR(ssi->clk);
+ dev_err(dev, "failed to get clock: %d\n", ret);
return ret;
}
- if (!ssi_private->has_ipg_clk_name) {
- ret = clk_prepare_enable(ssi_private->clk);
+ /* Enable the clock since regmap will not handle it in this case */
+ if (!ssi->has_ipg_clk_name) {
+ ret = clk_prepare_enable(ssi->clk);
if (ret) {
- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ dev_err(dev, "clk_prepare_enable failed: %d\n", ret);
return ret;
}
}
- /* For those SLAVE implementations, we ignore non-baudclk cases
- * and, instead, abandon MASTER mode that needs baud clock.
- */
- ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
- if (IS_ERR(ssi_private->baudclk))
- dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
- PTR_ERR(ssi_private->baudclk));
+ /* Do not error out for slave cases that live without a baud clock */
+ ssi->baudclk = devm_clk_get(dev, "baud");
+ if (IS_ERR(ssi->baudclk))
+ dev_dbg(dev, "failed to get baud clock: %ld\n",
+ PTR_ERR(ssi->baudclk));
- ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
- ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
- ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
- ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
+ ssi->dma_params_tx.maxburst = ssi->dma_maxburst;
+ ssi->dma_params_rx.maxburst = ssi->dma_maxburst;
+ ssi->dma_params_tx.addr = ssi->ssi_phys + REG_SSI_STX0;
+ ssi->dma_params_rx.addr = ssi->ssi_phys + REG_SSI_SRX0;
+ /* Set to dual FIFO mode according to the SDMA sciprt */
ret = of_property_read_u32_array(np, "dmas", dmas, 4);
- if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
- ssi_private->use_dual_fifo = true;
- /* When using dual fifo mode, we need to keep watermark
- * as even numbers due to dma script limitation.
+ if (ssi->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+ ssi->use_dual_fifo = true;
+ /*
+ * Use even numbers to avoid channel swap due to SDMA
+ * script design
*/
- ssi_private->dma_params_tx.maxburst &= ~0x1;
- ssi_private->dma_params_rx.maxburst &= ~0x1;
+ ssi->dma_params_tx.maxburst &= ~0x1;
+ ssi->dma_params_rx.maxburst &= ~0x1;
}
- if (!ssi_private->use_dma) {
-
+ if (!ssi->use_dma) {
/*
- * Some boards use an incompatible codec. To get it
- * working, we are using imx-fiq-pcm-audio, that
- * can handle those codecs. DMA is not possible in this
- * situation.
+ * Some boards use an incompatible codec. Use imx-fiq-pcm-audio
+ * to get it working, as DMA is not possible in this situation.
*/
+ ssi->fiq_params.irq = ssi->irq;
+ ssi->fiq_params.base = iomem;
+ ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
- ssi_private->fiq_params.irq = ssi_private->irq;
- ssi_private->fiq_params.base = iomem;
- ssi_private->fiq_params.dma_params_rx =
- &ssi_private->dma_params_rx;
- ssi_private->fiq_params.dma_params_tx =
- &ssi_private->dma_params_tx;
-
- ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+ ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
if (ret)
goto error_pcm;
} else {
@@ -1416,26 +1355,26 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
return 0;
error_pcm:
+ if (!ssi->has_ipg_clk_name)
+ clk_disable_unprepare(ssi->clk);
- if (!ssi_private->has_ipg_clk_name)
- clk_disable_unprepare(ssi_private->clk);
return ret;
}
-static void fsl_ssi_imx_clean(struct platform_device *pdev,
- struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_imx_clean(struct platform_device *pdev, struct fsl_ssi *ssi)
{
- if (!ssi_private->use_dma)
+ if (!ssi->use_dma)
imx_pcm_fiq_exit(pdev);
- if (!ssi_private->has_ipg_clk_name)
- clk_disable_unprepare(ssi_private->clk);
+ if (!ssi->has_ipg_clk_name)
+ clk_disable_unprepare(ssi->clk);
}
static int fsl_ssi_probe(struct platform_device *pdev)
{
- struct fsl_ssi_private *ssi_private;
+ struct fsl_ssi *ssi;
int ret = 0;
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
const char *p, *sprop;
const uint32_t *iprop;
@@ -1444,185 +1383,159 @@ static int fsl_ssi_probe(struct platform_device *pdev)
char name[64];
struct regmap_config regconfig = fsl_ssi_regconfig;
- of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
+ of_id = of_match_device(fsl_ssi_ids, dev);
if (!of_id || !of_id->data)
return -EINVAL;
- ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private),
- GFP_KERNEL);
- if (!ssi_private)
+ ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
+ if (!ssi)
return -ENOMEM;
- ssi_private->soc = of_id->data;
- ssi_private->dev = &pdev->dev;
+ ssi->soc = of_id->data;
+ ssi->dev = dev;
+ /* Check if being used in AC97 mode */
sprop = of_get_property(np, "fsl,mode", NULL);
if (sprop) {
if (!strcmp(sprop, "ac97-slave"))
- ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
+ ssi->dai_fmt = SND_SOC_DAIFMT_AC97;
}
- ssi_private->use_dma = !of_property_read_bool(np,
- "fsl,fiq-stream-filter");
-
- if (fsl_ssi_is_ac97(ssi_private)) {
- memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
- sizeof(fsl_ssi_ac97_dai));
+ /* Select DMA or FIQ */
+ ssi->use_dma = !of_property_read_bool(np, "fsl,fiq-stream-filter");
- fsl_ac97_data = ssi_private;
+ if (fsl_ssi_is_ac97(ssi)) {
+ memcpy(&ssi->cpu_dai_drv, &fsl_ssi_ac97_dai,
+ sizeof(fsl_ssi_ac97_dai));
+ fsl_ac97_data = ssi;
} else {
- /* Initialize this copy of the CPU DAI driver structure */
- memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+ memcpy(&ssi->cpu_dai_drv, &fsl_ssi_dai_template,
sizeof(fsl_ssi_dai_template));
}
- ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
+ ssi->cpu_dai_drv.name = dev_name(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_ioremap_resource(dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
- ssi_private->ssi_phys = res->start;
+ ssi->ssi_phys = res->start;
- if (ssi_private->soc->imx21regs) {
- /*
- * According to datasheet imx21-class SSI
- * don't have SACC{ST,EN,DIS} regs.
- */
- regconfig.max_register = CCSR_SSI_SRMSK;
+ if (ssi->soc->imx21regs) {
+ /* No SACC{ST,EN,DIS} regs in imx21-class SSI */
+ regconfig.max_register = REG_SSI_SRMSK;
regconfig.num_reg_defaults_raw =
- CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+ REG_SSI_SRMSK / sizeof(uint32_t) + 1;
}
ret = of_property_match_string(np, "clock-names", "ipg");
if (ret < 0) {
- ssi_private->has_ipg_clk_name = false;
- ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
- &regconfig);
+ ssi->has_ipg_clk_name = false;
+ ssi->regs = devm_regmap_init_mmio(dev, iomem, &regconfig);
} else {
- ssi_private->has_ipg_clk_name = true;
- ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
- "ipg", iomem, &regconfig);
+ ssi->has_ipg_clk_name = true;
+ ssi->regs = devm_regmap_init_mmio_clk(dev, "ipg", iomem,
+ &regconfig);
}
- if (IS_ERR(ssi_private->regs)) {
- dev_err(&pdev->dev, "Failed to init register map\n");
- return PTR_ERR(ssi_private->regs);
+ if (IS_ERR(ssi->regs)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(ssi->regs);
}
- ssi_private->irq = platform_get_irq(pdev, 0);
- if (ssi_private->irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
- return ssi_private->irq;
+ ssi->irq = platform_get_irq(pdev, 0);
+ if (ssi->irq < 0) {
+ dev_err(dev, "no irq for node %s\n", pdev->name);
+ return ssi->irq;
}
- /* Are the RX and the TX clocks locked? */
+ /* Set software limitations for synchronous mode */
if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
- if (!fsl_ssi_is_ac97(ssi_private))
- ssi_private->cpu_dai_drv.symmetric_rates = 1;
+ if (!fsl_ssi_is_ac97(ssi)) {
+ ssi->cpu_dai_drv.symmetric_rates = 1;
+ ssi->cpu_dai_drv.symmetric_samplebits = 1;
+ }
- ssi_private->cpu_dai_drv.symmetric_channels = 1;
- ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
+ ssi->cpu_dai_drv.symmetric_channels = 1;
}
- /* Determine the FIFO depth. */
+ /* Fetch FIFO depth; Set to 8 for older DT without this property */
iprop = of_get_property(np, "fsl,fifo-depth", NULL);
if (iprop)
- ssi_private->fifo_depth = be32_to_cpup(iprop);
+ ssi->fifo_depth = be32_to_cpup(iprop);
else
- /* Older 8610 DTs didn't have the fifo-depth property */
- ssi_private->fifo_depth = 8;
+ ssi->fifo_depth = 8;
/*
- * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
- * use FIFO 1 but set the watermark appropriately nontheless.
- * We program the transmit water to signal a DMA transfer
- * if there are N elements left in the FIFO. For chips with 15-deep
- * FIFOs, set watermark to 8. This allows the SSI to operate at a
- * high data rate without channel slipping. Behavior is unchanged
- * for the older chips with a fifo depth of only 8. A value of 4
- * might be appropriate for the older chips, but is left at
- * fifo_depth-2 until sombody has a chance to test.
+ * Configure TX and RX DMA watermarks -- when to send a DMA request
*
- * We set the watermark on the same level as the DMA burstsize. For
- * fiq it is probably better to use the biggest possible watermark
- * size.
+ * Values should be tested to avoid FIFO under/over run. Set maxburst
+ * to fifo_watermark to maxiumize DMA transaction to reduce overhead.
*/
- switch (ssi_private->fifo_depth) {
+ switch (ssi->fifo_depth) {
case 15:
/*
- * 2 samples is not enough when running at high data
- * rates (like 48kHz @ 16 bits/channel, 16 channels)
- * 8 seems to split things evenly and leave enough time
- * for the DMA to fill the FIFO before it's over/under
- * run.
+ * Set to 8 as a balanced configuration -- When TX FIFO has 8
+ * empty slots, send a DMA request to fill these 8 slots. The
+ * remaining 7 slots should be able to allow DMA to finish the
+ * transaction before TX FIFO underruns; Same applies to RX.
+ *
+ * Tested with cases running at 48kHz @ 16 bits x 16 channels
*/
- ssi_private->fifo_watermark = 8;
- ssi_private->dma_maxburst = 8;
+ ssi->fifo_watermark = 8;
+ ssi->dma_maxburst = 8;
break;
case 8:
default:
- /*
- * maintain old behavior for older chips.
- * Keeping it the same because I don't have an older
- * board to test with.
- * I suspect this could be changed to be something to
- * leave some more space in the fifo.
- */
- ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
- ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+ /* Safely use old watermark configurations for older chips */
+ ssi->fifo_watermark = ssi->fifo_depth - 2;
+ ssi->dma_maxburst = ssi->fifo_depth - 2;
break;
}
- dev_set_drvdata(&pdev->dev, ssi_private);
+ dev_set_drvdata(dev, ssi);
- if (ssi_private->soc->imx) {
- ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
+ if (ssi->soc->imx) {
+ ret = fsl_ssi_imx_probe(pdev, ssi, iomem);
if (ret)
return ret;
}
- if (fsl_ssi_is_ac97(ssi_private)) {
- mutex_init(&ssi_private->ac97_reg_lock);
+ if (fsl_ssi_is_ac97(ssi)) {
+ mutex_init(&ssi->ac97_reg_lock);
ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
if (ret) {
- dev_err(&pdev->dev, "could not set AC'97 ops\n");
+ dev_err(dev, "failed to set AC'97 ops\n");
goto error_ac97_ops;
}
}
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
- &ssi_private->cpu_dai_drv, 1);
+ ret = devm_snd_soc_register_component(dev, &fsl_ssi_component,
+ &ssi->cpu_dai_drv, 1);
if (ret) {
- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+ dev_err(dev, "failed to register DAI: %d\n", ret);
goto error_asoc_register;
}
- if (ssi_private->use_dma) {
- ret = devm_request_irq(&pdev->dev, ssi_private->irq,
- fsl_ssi_isr, 0, dev_name(&pdev->dev),
- ssi_private);
+ if (ssi->use_dma) {
+ ret = devm_request_irq(dev, ssi->irq, fsl_ssi_isr, 0,
+ dev_name(dev), ssi);
if (ret < 0) {
- dev_err(&pdev->dev, "could not claim irq %u\n",
- ssi_private->irq);
+ dev_err(dev, "failed to claim irq %u\n", ssi->irq);
goto error_asoc_register;
}
}
- ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
+ ret = fsl_ssi_debugfs_create(&ssi->dbg_stats, dev);
if (ret)
goto error_asoc_register;
- /*
- * If codec-handle property is missing from SSI node, we assume
- * that the machine driver uses new binding which does not require
- * SSI driver to trigger machine driver's probe.
- */
+ /* Bypass it if using newer DT bindings of ASoC machine drivers */
if (!of_get_property(np, "codec-handle", NULL))
goto done;
- /* Trigger the machine driver's probe function. The platform driver
- * name of the machine driver is taken from /compatible property of the
- * device tree. We also pass the address of the CPU DAI driver
- * structure.
+ /*
+ * Backward compatible for older bindings by manually triggering the
+ * machine driver's probe(). Use /compatible property, including the
+ * address of CPU DAI driver structure, as the name of machine driver.
*/
sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL);
/* Sometimes the compatible name has a "fsl," prefix, so we strip it. */
@@ -1632,34 +1545,31 @@ static int fsl_ssi_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "snd-soc-%s", sprop);
make_lowercase(name);
- ssi_private->pdev =
- platform_device_register_data(&pdev->dev, name, 0, NULL, 0);
- if (IS_ERR(ssi_private->pdev)) {
- ret = PTR_ERR(ssi_private->pdev);
- dev_err(&pdev->dev, "failed to register platform: %d\n", ret);
+ ssi->pdev = platform_device_register_data(dev, name, 0, NULL, 0);
+ if (IS_ERR(ssi->pdev)) {
+ ret = PTR_ERR(ssi->pdev);
+ dev_err(dev, "failed to register platform: %d\n", ret);
goto error_sound_card;
}
done:
- if (ssi_private->dai_fmt)
- _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
- ssi_private->dai_fmt);
+ if (ssi->dai_fmt)
+ _fsl_ssi_set_dai_fmt(dev, ssi, ssi->dai_fmt);
- if (fsl_ssi_is_ac97(ssi_private)) {
+ if (fsl_ssi_is_ac97(ssi)) {
u32 ssi_idx;
ret = of_property_read_u32(np, "cell-index", &ssi_idx);
if (ret) {
- dev_err(&pdev->dev, "cannot get SSI index property\n");
+ dev_err(dev, "failed to get SSI index property\n");
goto error_sound_card;
}
- ssi_private->pdev =
- platform_device_register_data(NULL,
- "ac97-codec", ssi_idx, NULL, 0);
- if (IS_ERR(ssi_private->pdev)) {
- ret = PTR_ERR(ssi_private->pdev);
- dev_err(&pdev->dev,
+ ssi->pdev = platform_device_register_data(NULL, "ac97-codec",
+ ssi_idx, NULL, 0);
+ if (IS_ERR(ssi->pdev)) {
+ ret = PTR_ERR(ssi->pdev);
+ dev_err(dev,
"failed to register AC97 codec platform: %d\n",
ret);
goto error_sound_card;
@@ -1669,37 +1579,35 @@ done:
return 0;
error_sound_card:
- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
-
+ fsl_ssi_debugfs_remove(&ssi->dbg_stats);
error_asoc_register:
- if (fsl_ssi_is_ac97(ssi_private))
+ if (fsl_ssi_is_ac97(ssi))
snd_soc_set_ac97_ops(NULL);
-
error_ac97_ops:
- if (fsl_ssi_is_ac97(ssi_private))
- mutex_destroy(&ssi_private->ac97_reg_lock);
+ if (fsl_ssi_is_ac97(ssi))
+ mutex_destroy(&ssi->ac97_reg_lock);
- if (ssi_private->soc->imx)
- fsl_ssi_imx_clean(pdev, ssi_private);
+ if (ssi->soc->imx)
+ fsl_ssi_imx_clean(pdev, ssi);
return ret;
}
static int fsl_ssi_remove(struct platform_device *pdev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev);
+ struct fsl_ssi *ssi = dev_get_drvdata(&pdev->dev);
- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
+ fsl_ssi_debugfs_remove(&ssi->dbg_stats);
- if (ssi_private->pdev)
- platform_device_unregister(ssi_private->pdev);
+ if (ssi->pdev)
+ platform_device_unregister(ssi->pdev);
- if (ssi_private->soc->imx)
- fsl_ssi_imx_clean(pdev, ssi_private);
+ if (ssi->soc->imx)
+ fsl_ssi_imx_clean(pdev, ssi);
- if (fsl_ssi_is_ac97(ssi_private)) {
+ if (fsl_ssi_is_ac97(ssi)) {
snd_soc_set_ac97_ops(NULL);
- mutex_destroy(&ssi_private->ac97_reg_lock);
+ mutex_destroy(&ssi->ac97_reg_lock);
}
return 0;
@@ -1708,13 +1616,11 @@ static int fsl_ssi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int fsl_ssi_suspend(struct device *dev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_get_drvdata(dev);
+ struct regmap *regs = ssi->regs;
- regmap_read(regs, CCSR_SSI_SFCSR,
- &ssi_private->regcache_sfcsr);
- regmap_read(regs, CCSR_SSI_SACNT,
- &ssi_private->regcache_sacnt);
+ regmap_read(regs, REG_SSI_SFCSR, &ssi->regcache_sfcsr);
+ regmap_read(regs, REG_SSI_SACNT, &ssi->regcache_sacnt);
regcache_cache_only(regs, true);
regcache_mark_dirty(regs);
@@ -1724,17 +1630,16 @@ static int fsl_ssi_suspend(struct device *dev)
static int fsl_ssi_resume(struct device *dev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_get_drvdata(dev);
+ struct regmap *regs = ssi->regs;
regcache_cache_only(regs, false);
- regmap_update_bits(regs, CCSR_SSI_SFCSR,
- CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK |
- CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK,
- ssi_private->regcache_sfcsr);
- regmap_write(regs, CCSR_SSI_SACNT,
- ssi_private->regcache_sacnt);
+ regmap_update_bits(regs, REG_SSI_SFCSR,
+ SSI_SFCSR_RFWM1_MASK | SSI_SFCSR_TFWM1_MASK |
+ SSI_SFCSR_RFWM0_MASK | SSI_SFCSR_TFWM0_MASK,
+ ssi->regcache_sfcsr);
+ regmap_write(regs, REG_SSI_SACNT, ssi->regcache_sacnt);
return regcache_sync(regs);
}
diff --git a/sound/soc/fsl/fsl_ssi.h b/sound/soc/fsl/fsl_ssi.h
index 506510540d0a..de2fdc5db726 100644
--- a/sound/soc/fsl/fsl_ssi.h
+++ b/sound/soc/fsl/fsl_ssi.h
@@ -1,5 +1,5 @@
/*
- * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 SoC
+ * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 and i.MX SoC
*
* Author: Timur Tabi <timur@freescale.com>
*
@@ -12,198 +12,261 @@
#ifndef _MPC8610_I2S_H
#define _MPC8610_I2S_H
-/* SSI registers */
-#define CCSR_SSI_STX0 0x00
-#define CCSR_SSI_STX1 0x04
-#define CCSR_SSI_SRX0 0x08
-#define CCSR_SSI_SRX1 0x0c
-#define CCSR_SSI_SCR 0x10
-#define CCSR_SSI_SISR 0x14
-#define CCSR_SSI_SIER 0x18
-#define CCSR_SSI_STCR 0x1c
-#define CCSR_SSI_SRCR 0x20
-#define CCSR_SSI_STCCR 0x24
-#define CCSR_SSI_SRCCR 0x28
-#define CCSR_SSI_SFCSR 0x2c
-#define CCSR_SSI_STR 0x30
-#define CCSR_SSI_SOR 0x34
-#define CCSR_SSI_SACNT 0x38
-#define CCSR_SSI_SACADD 0x3c
-#define CCSR_SSI_SACDAT 0x40
-#define CCSR_SSI_SATAG 0x44
-#define CCSR_SSI_STMSK 0x48
-#define CCSR_SSI_SRMSK 0x4c
-#define CCSR_SSI_SACCST 0x50
-#define CCSR_SSI_SACCEN 0x54
-#define CCSR_SSI_SACCDIS 0x58
+#define RX 0
+#define TX 1
-#define CCSR_SSI_SCR_SYNC_TX_FS 0x00001000
-#define CCSR_SSI_SCR_RFR_CLK_DIS 0x00000800
-#define CCSR_SSI_SCR_TFR_CLK_DIS 0x00000400
-#define CCSR_SSI_SCR_TCH_EN 0x00000100
-#define CCSR_SSI_SCR_SYS_CLK_EN 0x00000080
-#define CCSR_SSI_SCR_I2S_MODE_MASK 0x00000060
-#define CCSR_SSI_SCR_I2S_MODE_NORMAL 0x00000000
-#define CCSR_SSI_SCR_I2S_MODE_MASTER 0x00000020
-#define CCSR_SSI_SCR_I2S_MODE_SLAVE 0x00000040
-#define CCSR_SSI_SCR_SYN 0x00000010
-#define CCSR_SSI_SCR_NET 0x00000008
-#define CCSR_SSI_SCR_RE 0x00000004
-#define CCSR_SSI_SCR_TE 0x00000002
-#define CCSR_SSI_SCR_SSIEN 0x00000001
+/* -- SSI Register Map -- */
-#define CCSR_SSI_SISR_RFRC 0x01000000
-#define CCSR_SSI_SISR_TFRC 0x00800000
-#define CCSR_SSI_SISR_CMDAU 0x00040000
-#define CCSR_SSI_SISR_CMDDU 0x00020000
-#define CCSR_SSI_SISR_RXT 0x00010000
-#define CCSR_SSI_SISR_RDR1 0x00008000
-#define CCSR_SSI_SISR_RDR0 0x00004000
-#define CCSR_SSI_SISR_TDE1 0x00002000
-#define CCSR_SSI_SISR_TDE0 0x00001000
-#define CCSR_SSI_SISR_ROE1 0x00000800
-#define CCSR_SSI_SISR_ROE0 0x00000400
-#define CCSR_SSI_SISR_TUE1 0x00000200
-#define CCSR_SSI_SISR_TUE0 0x00000100
-#define CCSR_SSI_SISR_TFS 0x00000080
-#define CCSR_SSI_SISR_RFS 0x00000040
-#define CCSR_SSI_SISR_TLS 0x00000020
-#define CCSR_SSI_SISR_RLS 0x00000010
-#define CCSR_SSI_SISR_RFF1 0x00000008
-#define CCSR_SSI_SISR_RFF0 0x00000004
-#define CCSR_SSI_SISR_TFE1 0x00000002
-#define CCSR_SSI_SISR_TFE0 0x00000001
+/* SSI Transmit Data Register 0 */
+#define REG_SSI_STX0 0x00
+/* SSI Transmit Data Register 1 */
+#define REG_SSI_STX1 0x04
+/* SSI Receive Data Register 0 */
+#define REG_SSI_SRX0 0x08
+/* SSI Receive Data Register 1 */
+#define REG_SSI_SRX1 0x0c
+/* SSI Control Register */
+#define REG_SSI_SCR 0x10
+/* SSI Interrupt Status Register */
+#define REG_SSI_SISR 0x14
+/* SSI Interrupt Enable Register */
+#define REG_SSI_SIER 0x18
+/* SSI Transmit Configuration Register */
+#define REG_SSI_STCR 0x1c
+/* SSI Receive Configuration Register */
+#define REG_SSI_SRCR 0x20
+#define REG_SSI_SxCR(tx) ((tx) ? REG_SSI_STCR : REG_SSI_SRCR)
+/* SSI Transmit Clock Control Register */
+#define REG_SSI_STCCR 0x24
+/* SSI Receive Clock Control Register */
+#define REG_SSI_SRCCR 0x28
+#define REG_SSI_SxCCR(tx) ((tx) ? REG_SSI_STCCR : REG_SSI_SRCCR)
+/* SSI FIFO Control/Status Register */
+#define REG_SSI_SFCSR 0x2c
+/*
+ * SSI Test Register (Intended for debugging purposes only)
+ *
+ * Note: STR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.14
+ */
+#define REG_SSI_STR 0x30
+/*
+ * SSI Option Register (Intended for internal use only)
+ *
+ * Note: SOR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.15
+ */
+#define REG_SSI_SOR 0x34
+/* SSI AC97 Control Register */
+#define REG_SSI_SACNT 0x38
+/* SSI AC97 Command Address Register */
+#define REG_SSI_SACADD 0x3c
+/* SSI AC97 Command Data Register */
+#define REG_SSI_SACDAT 0x40
+/* SSI AC97 Tag Register */
+#define REG_SSI_SATAG 0x44
+/* SSI Transmit Time Slot Mask Register */
+#define REG_SSI_STMSK 0x48
+/* SSI Receive Time Slot Mask Register */
+#define REG_SSI_SRMSK 0x4c
+#define REG_SSI_SxMSK(tx) ((tx) ? REG_SSI_STMSK : REG_SSI_SRMSK)
+/*
+ * SSI AC97 Channel Status Register
+ *
+ * The status could be changed by:
+ * 1) Writing a '1' bit at some position in SACCEN sets relevant bit in SACCST
+ * 2) Writing a '1' bit at some position in SACCDIS unsets the relevant bit
+ * 3) Receivng a '1' in SLOTREQ bit from external CODEC via AC Link
+ */
+#define REG_SSI_SACCST 0x50
+/* SSI AC97 Channel Enable Register -- Set bits in SACCST */
+#define REG_SSI_SACCEN 0x54
+/* SSI AC97 Channel Disable Register -- Clear bits in SACCST */
+#define REG_SSI_SACCDIS 0x58
+
+/* -- SSI Register Field Maps -- */
-#define CCSR_SSI_SIER_RFRC_EN 0x01000000
-#define CCSR_SSI_SIER_TFRC_EN 0x00800000
-#define CCSR_SSI_SIER_RDMAE 0x00400000
-#define CCSR_SSI_SIER_RIE 0x00200000
-#define CCSR_SSI_SIER_TDMAE 0x00100000
-#define CCSR_SSI_SIER_TIE 0x00080000
-#define CCSR_SSI_SIER_CMDAU_EN 0x00040000
-#define CCSR_SSI_SIER_CMDDU_EN 0x00020000
-#define CCSR_SSI_SIER_RXT_EN 0x00010000
-#define CCSR_SSI_SIER_RDR1_EN 0x00008000
-#define CCSR_SSI_SIER_RDR0_EN 0x00004000
-#define CCSR_SSI_SIER_TDE1_EN 0x00002000
-#define CCSR_SSI_SIER_TDE0_EN 0x00001000
-#define CCSR_SSI_SIER_ROE1_EN 0x00000800
-#define CCSR_SSI_SIER_ROE0_EN 0x00000400
-#define CCSR_SSI_SIER_TUE1_EN 0x00000200
-#define CCSR_SSI_SIER_TUE0_EN 0x00000100
-#define CCSR_SSI_SIER_TFS_EN 0x00000080
-#define CCSR_SSI_SIER_RFS_EN 0x00000040
-#define CCSR_SSI_SIER_TLS_EN 0x00000020
-#define CCSR_SSI_SIER_RLS_EN 0x00000010
-#define CCSR_SSI_SIER_RFF1_EN 0x00000008
-#define CCSR_SSI_SIER_RFF0_EN 0x00000004
-#define CCSR_SSI_SIER_TFE1_EN 0x00000002
-#define CCSR_SSI_SIER_TFE0_EN 0x00000001
+/* SSI Control Register -- REG_SSI_SCR 0x10 */
+#define SSI_SCR_SYNC_TX_FS 0x00001000
+#define SSI_SCR_RFR_CLK_DIS 0x00000800
+#define SSI_SCR_TFR_CLK_DIS 0x00000400
+#define SSI_SCR_TCH_EN 0x00000100
+#define SSI_SCR_SYS_CLK_EN 0x00000080
+#define SSI_SCR_I2S_MODE_MASK 0x00000060
+#define SSI_SCR_I2S_MODE_NORMAL 0x00000000
+#define SSI_SCR_I2S_MODE_MASTER 0x00000020
+#define SSI_SCR_I2S_MODE_SLAVE 0x00000040
+#define SSI_SCR_SYN 0x00000010
+#define SSI_SCR_NET 0x00000008
+#define SSI_SCR_I2S_NET_MASK (SSI_SCR_NET | SSI_SCR_I2S_MODE_MASK)
+#define SSI_SCR_RE 0x00000004
+#define SSI_SCR_TE 0x00000002
+#define SSI_SCR_SSIEN 0x00000001
-#define CCSR_SSI_STCR_TXBIT0 0x00000200
-#define CCSR_SSI_STCR_TFEN1 0x00000100
-#define CCSR_SSI_STCR_TFEN0 0x00000080
-#define CCSR_SSI_STCR_TFDIR 0x00000040
-#define CCSR_SSI_STCR_TXDIR 0x00000020
-#define CCSR_SSI_STCR_TSHFD 0x00000010
-#define CCSR_SSI_STCR_TSCKP 0x00000008
-#define CCSR_SSI_STCR_TFSI 0x00000004
-#define CCSR_SSI_STCR_TFSL 0x00000002
-#define CCSR_SSI_STCR_TEFS 0x00000001
+/* SSI Interrupt Status Register -- REG_SSI_SISR 0x14 */
+#define SSI_SISR_RFRC 0x01000000
+#define SSI_SISR_TFRC 0x00800000
+#define SSI_SISR_CMDAU 0x00040000
+#define SSI_SISR_CMDDU 0x00020000
+#define SSI_SISR_RXT 0x00010000
+#define SSI_SISR_RDR1 0x00008000
+#define SSI_SISR_RDR0 0x00004000
+#define SSI_SISR_TDE1 0x00002000
+#define SSI_SISR_TDE0 0x00001000
+#define SSI_SISR_ROE1 0x00000800
+#define SSI_SISR_ROE0 0x00000400
+#define SSI_SISR_TUE1 0x00000200
+#define SSI_SISR_TUE0 0x00000100
+#define SSI_SISR_TFS 0x00000080
+#define SSI_SISR_RFS 0x00000040
+#define SSI_SISR_TLS 0x00000020
+#define SSI_SISR_RLS 0x00000010
+#define SSI_SISR_RFF1 0x00000008
+#define SSI_SISR_RFF0 0x00000004
+#define SSI_SISR_TFE1 0x00000002
+#define SSI_SISR_TFE0 0x00000001
-#define CCSR_SSI_SRCR_RXEXT 0x00000400
-#define CCSR_SSI_SRCR_RXBIT0 0x00000200
-#define CCSR_SSI_SRCR_RFEN1 0x00000100
-#define CCSR_SSI_SRCR_RFEN0 0x00000080
-#define CCSR_SSI_SRCR_RFDIR 0x00000040
-#define CCSR_SSI_SRCR_RXDIR 0x00000020
-#define CCSR_SSI_SRCR_RSHFD 0x00000010
-#define CCSR_SSI_SRCR_RSCKP 0x00000008
-#define CCSR_SSI_SRCR_RFSI 0x00000004
-#define CCSR_SSI_SRCR_RFSL 0x00000002
-#define CCSR_SSI_SRCR_REFS 0x00000001
+/* SSI Interrupt Enable Register -- REG_SSI_SIER 0x18 */
+#define SSI_SIER_RFRC_EN 0x01000000
+#define SSI_SIER_TFRC_EN 0x00800000
+#define SSI_SIER_RDMAE 0x00400000
+#define SSI_SIER_RIE 0x00200000
+#define SSI_SIER_TDMAE 0x00100000
+#define SSI_SIER_TIE 0x00080000
+#define SSI_SIER_CMDAU_EN 0x00040000
+#define SSI_SIER_CMDDU_EN 0x00020000
+#define SSI_SIER_RXT_EN 0x00010000
+#define SSI_SIER_RDR1_EN 0x00008000
+#define SSI_SIER_RDR0_EN 0x00004000
+#define SSI_SIER_TDE1_EN 0x00002000
+#define SSI_SIER_TDE0_EN 0x00001000
+#define SSI_SIER_ROE1_EN 0x00000800
+#define SSI_SIER_ROE0_EN 0x00000400
+#define SSI_SIER_TUE1_EN 0x00000200
+#define SSI_SIER_TUE0_EN 0x00000100
+#define SSI_SIER_TFS_EN 0x00000080
+#define SSI_SIER_RFS_EN 0x00000040
+#define SSI_SIER_TLS_EN 0x00000020
+#define SSI_SIER_RLS_EN 0x00000010
+#define SSI_SIER_RFF1_EN 0x00000008
+#define SSI_SIER_RFF0_EN 0x00000004
+#define SSI_SIER_TFE1_EN 0x00000002
+#define SSI_SIER_TFE0_EN 0x00000001
-/* STCCR and SRCCR */
-#define CCSR_SSI_SxCCR_DIV2_SHIFT 18
-#define CCSR_SSI_SxCCR_DIV2 0x00040000
-#define CCSR_SSI_SxCCR_PSR_SHIFT 17
-#define CCSR_SSI_SxCCR_PSR 0x00020000
-#define CCSR_SSI_SxCCR_WL_SHIFT 13
-#define CCSR_SSI_SxCCR_WL_MASK 0x0001E000
-#define CCSR_SSI_SxCCR_WL(x) \
- (((((x) / 2) - 1) << CCSR_SSI_SxCCR_WL_SHIFT) & CCSR_SSI_SxCCR_WL_MASK)
-#define CCSR_SSI_SxCCR_DC_SHIFT 8
-#define CCSR_SSI_SxCCR_DC_MASK 0x00001F00
-#define CCSR_SSI_SxCCR_DC(x) \
- ((((x) - 1) << CCSR_SSI_SxCCR_DC_SHIFT) & CCSR_SSI_SxCCR_DC_MASK)
-#define CCSR_SSI_SxCCR_PM_SHIFT 0
-#define CCSR_SSI_SxCCR_PM_MASK 0x000000FF
-#define CCSR_SSI_SxCCR_PM(x) \
- ((((x) - 1) << CCSR_SSI_SxCCR_PM_SHIFT) & CCSR_SSI_SxCCR_PM_MASK)
+/* SSI Transmit Configuration Register -- REG_SSI_STCR 0x1C */
+#define SSI_STCR_TXBIT0 0x00000200
+#define SSI_STCR_TFEN1 0x00000100
+#define SSI_STCR_TFEN0 0x00000080
+#define SSI_STCR_TFDIR 0x00000040
+#define SSI_STCR_TXDIR 0x00000020
+#define SSI_STCR_TSHFD 0x00000010
+#define SSI_STCR_TSCKP 0x00000008
+#define SSI_STCR_TFSI 0x00000004
+#define SSI_STCR_TFSL 0x00000002
+#define SSI_STCR_TEFS 0x00000001
+
+/* SSI Receive Configuration Register -- REG_SSI_SRCR 0x20 */
+#define SSI_SRCR_RXEXT 0x00000400
+#define SSI_SRCR_RXBIT0 0x00000200
+#define SSI_SRCR_RFEN1 0x00000100
+#define SSI_SRCR_RFEN0 0x00000080
+#define SSI_SRCR_RFDIR 0x00000040
+#define SSI_SRCR_RXDIR 0x00000020
+#define SSI_SRCR_RSHFD 0x00000010
+#define SSI_SRCR_RSCKP 0x00000008
+#define SSI_SRCR_RFSI 0x00000004
+#define SSI_SRCR_RFSL 0x00000002
+#define SSI_SRCR_REFS 0x00000001
/*
- * The xFCNT bits are read-only, and the xFWM bits are read/write. Use the
- * CCSR_SSI_SFCSR_xFCNTy() macros to read the FIFO counters, and use the
- * CCSR_SSI_SFCSR_xFWMy() macros to set the watermarks.
+ * SSI Transmit Clock Control Register -- REG_SSI_STCCR 0x24
+ * SSI Receive Clock Control Register -- REG_SSI_SRCCR 0x28
+ */
+#define SSI_SxCCR_DIV2_SHIFT 18
+#define SSI_SxCCR_DIV2 0x00040000
+#define SSI_SxCCR_PSR_SHIFT 17
+#define SSI_SxCCR_PSR 0x00020000
+#define SSI_SxCCR_WL_SHIFT 13
+#define SSI_SxCCR_WL_MASK 0x0001E000
+#define SSI_SxCCR_WL(x) \
+ (((((x) / 2) - 1) << SSI_SxCCR_WL_SHIFT) & SSI_SxCCR_WL_MASK)
+#define SSI_SxCCR_DC_SHIFT 8
+#define SSI_SxCCR_DC_MASK 0x00001F00
+#define SSI_SxCCR_DC(x) \
+ ((((x) - 1) << SSI_SxCCR_DC_SHIFT) & SSI_SxCCR_DC_MASK)
+#define SSI_SxCCR_PM_SHIFT 0
+#define SSI_SxCCR_PM_MASK 0x000000FF
+#define SSI_SxCCR_PM(x) \
+ ((((x) - 1) << SSI_SxCCR_PM_SHIFT) & SSI_SxCCR_PM_MASK)
+
+/*
+ * SSI FIFO Control/Status Register -- REG_SSI_SFCSR 0x2c
+ *
+ * Tx or Rx FIFO Counter -- SSI_SFCSR_xFCNTy Read-Only
+ * Tx or Rx FIFO Watermarks -- SSI_SFCSR_xFWMy Read/Write
*/
-#define CCSR_SSI_SFCSR_RFCNT1_SHIFT 28
-#define CCSR_SSI_SFCSR_RFCNT1_MASK 0xF0000000
-#define CCSR_SSI_SFCSR_RFCNT1(x) \
- (((x) & CCSR_SSI_SFCSR_RFCNT1_MASK) >> CCSR_SSI_SFCSR_RFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT1_SHIFT 24
-#define CCSR_SSI_SFCSR_TFCNT1_MASK 0x0F000000
-#define CCSR_SSI_SFCSR_TFCNT1(x) \
- (((x) & CCSR_SSI_SFCSR_TFCNT1_MASK) >> CCSR_SSI_SFCSR_TFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM1_SHIFT 20
-#define CCSR_SSI_SFCSR_RFWM1_MASK 0x00F00000
-#define CCSR_SSI_SFCSR_RFWM1(x) \
- (((x) << CCSR_SSI_SFCSR_RFWM1_SHIFT) & CCSR_SSI_SFCSR_RFWM1_MASK)
-#define CCSR_SSI_SFCSR_TFWM1_SHIFT 16
-#define CCSR_SSI_SFCSR_TFWM1_MASK 0x000F0000
-#define CCSR_SSI_SFCSR_TFWM1(x) \
- (((x) << CCSR_SSI_SFCSR_TFWM1_SHIFT) & CCSR_SSI_SFCSR_TFWM1_MASK)
-#define CCSR_SSI_SFCSR_RFCNT0_SHIFT 12
-#define CCSR_SSI_SFCSR_RFCNT0_MASK 0x0000F000
-#define CCSR_SSI_SFCSR_RFCNT0(x) \
- (((x) & CCSR_SSI_SFCSR_RFCNT0_MASK) >> CCSR_SSI_SFCSR_RFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT0_SHIFT 8
-#define CCSR_SSI_SFCSR_TFCNT0_MASK 0x00000F00
-#define CCSR_SSI_SFCSR_TFCNT0(x) \
- (((x) & CCSR_SSI_SFCSR_TFCNT0_MASK) >> CCSR_SSI_SFCSR_TFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM0_SHIFT 4
-#define CCSR_SSI_SFCSR_RFWM0_MASK 0x000000F0
-#define CCSR_SSI_SFCSR_RFWM0(x) \
- (((x) << CCSR_SSI_SFCSR_RFWM0_SHIFT) & CCSR_SSI_SFCSR_RFWM0_MASK)
-#define CCSR_SSI_SFCSR_TFWM0_SHIFT 0
-#define CCSR_SSI_SFCSR_TFWM0_MASK 0x0000000F
-#define CCSR_SSI_SFCSR_TFWM0(x) \
- (((x) << CCSR_SSI_SFCSR_TFWM0_SHIFT) & CCSR_SSI_SFCSR_TFWM0_MASK)
+#define SSI_SFCSR_RFCNT1_SHIFT 28
+#define SSI_SFCSR_RFCNT1_MASK 0xF0000000
+#define SSI_SFCSR_RFCNT1(x) \
+ (((x) & SSI_SFCSR_RFCNT1_MASK) >> SSI_SFCSR_RFCNT1_SHIFT)
+#define SSI_SFCSR_TFCNT1_SHIFT 24
+#define SSI_SFCSR_TFCNT1_MASK 0x0F000000
+#define SSI_SFCSR_TFCNT1(x) \
+ (((x) & SSI_SFCSR_TFCNT1_MASK) >> SSI_SFCSR_TFCNT1_SHIFT)
+#define SSI_SFCSR_RFWM1_SHIFT 20
+#define SSI_SFCSR_RFWM1_MASK 0x00F00000
+#define SSI_SFCSR_RFWM1(x) \
+ (((x) << SSI_SFCSR_RFWM1_SHIFT) & SSI_SFCSR_RFWM1_MASK)
+#define SSI_SFCSR_TFWM1_SHIFT 16
+#define SSI_SFCSR_TFWM1_MASK 0x000F0000
+#define SSI_SFCSR_TFWM1(x) \
+ (((x) << SSI_SFCSR_TFWM1_SHIFT) & SSI_SFCSR_TFWM1_MASK)
+#define SSI_SFCSR_RFCNT0_SHIFT 12
+#define SSI_SFCSR_RFCNT0_MASK 0x0000F000
+#define SSI_SFCSR_RFCNT0(x) \
+ (((x) & SSI_SFCSR_RFCNT0_MASK) >> SSI_SFCSR_RFCNT0_SHIFT)
+#define SSI_SFCSR_TFCNT0_SHIFT 8
+#define SSI_SFCSR_TFCNT0_MASK 0x00000F00
+#define SSI_SFCSR_TFCNT0(x) \
+ (((x) & SSI_SFCSR_TFCNT0_MASK) >> SSI_SFCSR_TFCNT0_SHIFT)
+#define SSI_SFCSR_RFWM0_SHIFT 4
+#define SSI_SFCSR_RFWM0_MASK 0x000000F0
+#define SSI_SFCSR_RFWM0(x) \
+ (((x) << SSI_SFCSR_RFWM0_SHIFT) & SSI_SFCSR_RFWM0_MASK)
+#define SSI_SFCSR_TFWM0_SHIFT 0
+#define SSI_SFCSR_TFWM0_MASK 0x0000000F
+#define SSI_SFCSR_TFWM0(x) \
+ (((x) << SSI_SFCSR_TFWM0_SHIFT) & SSI_SFCSR_TFWM0_MASK)
-#define CCSR_SSI_STR_TEST 0x00008000
-#define CCSR_SSI_STR_RCK2TCK 0x00004000
-#define CCSR_SSI_STR_RFS2TFS 0x00002000
-#define CCSR_SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
-#define CCSR_SSI_STR_TXD2RXD 0x00000080
-#define CCSR_SSI_STR_TCK2RCK 0x00000040
-#define CCSR_SSI_STR_TFS2RFS 0x00000020
-#define CCSR_SSI_STR_TXSTATE(x) ((x) & 0x1F)
+/* SSI Test Register -- REG_SSI_STR 0x30 */
+#define SSI_STR_TEST 0x00008000
+#define SSI_STR_RCK2TCK 0x00004000
+#define SSI_STR_RFS2TFS 0x00002000
+#define SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
+#define SSI_STR_TXD2RXD 0x00000080
+#define SSI_STR_TCK2RCK 0x00000040
+#define SSI_STR_TFS2RFS 0x00000020
+#define SSI_STR_TXSTATE(x) ((x) & 0x1F)
-#define CCSR_SSI_SOR_CLKOFF 0x00000040
-#define CCSR_SSI_SOR_RX_CLR 0x00000020
-#define CCSR_SSI_SOR_TX_CLR 0x00000010
-#define CCSR_SSI_SOR_INIT 0x00000008
-#define CCSR_SSI_SOR_WAIT_SHIFT 1
-#define CCSR_SSI_SOR_WAIT_MASK 0x00000006
-#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT)
-#define CCSR_SSI_SOR_SYNRST 0x00000001
+/* SSI Option Register -- REG_SSI_SOR 0x34 */
+#define SSI_SOR_CLKOFF 0x00000040
+#define SSI_SOR_RX_CLR 0x00000020
+#define SSI_SOR_TX_CLR 0x00000010
+#define SSI_SOR_xX_CLR(tx) ((tx) ? SSI_SOR_TX_CLR : SSI_SOR_RX_CLR)
+#define SSI_SOR_INIT 0x00000008
+#define SSI_SOR_WAIT_SHIFT 1
+#define SSI_SOR_WAIT_MASK 0x00000006
+#define SSI_SOR_WAIT(x) (((x) & 3) << SSI_SOR_WAIT_SHIFT)
+#define SSI_SOR_SYNRST 0x00000001
-#define CCSR_SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
-#define CCSR_SSI_SACNT_WR 0x00000010
-#define CCSR_SSI_SACNT_RD 0x00000008
-#define CCSR_SSI_SACNT_RDWR_MASK 0x00000018
-#define CCSR_SSI_SACNT_TIF 0x00000004
-#define CCSR_SSI_SACNT_FV 0x00000002
-#define CCSR_SSI_SACNT_AC97EN 0x00000001
+/* SSI AC97 Control Register -- REG_SSI_SACNT 0x38 */
+#define SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
+#define SSI_SACNT_WR 0x00000010
+#define SSI_SACNT_RD 0x00000008
+#define SSI_SACNT_RDWR_MASK 0x00000018
+#define SSI_SACNT_TIF 0x00000004
+#define SSI_SACNT_FV 0x00000002
+#define SSI_SACNT_AC97EN 0x00000001
struct device;
@@ -255,7 +318,7 @@ static inline void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *stats, u32 sisr)
}
static inline int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg,
- struct device *dev)
+ struct device *dev)
{
return 0;
}
diff --git a/sound/soc/fsl/fsl_ssi_dbg.c b/sound/soc/fsl/fsl_ssi_dbg.c
index 5469ffbc0253..7aac63e2c561 100644
--- a/sound/soc/fsl/fsl_ssi_dbg.c
+++ b/sound/soc/fsl/fsl_ssi_dbg.c
@@ -18,86 +18,86 @@
void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *dbg, u32 sisr)
{
- if (sisr & CCSR_SSI_SISR_RFRC)
+ if (sisr & SSI_SISR_RFRC)
dbg->stats.rfrc++;
- if (sisr & CCSR_SSI_SISR_TFRC)
+ if (sisr & SSI_SISR_TFRC)
dbg->stats.tfrc++;
- if (sisr & CCSR_SSI_SISR_CMDAU)
+ if (sisr & SSI_SISR_CMDAU)
dbg->stats.cmdau++;
- if (sisr & CCSR_SSI_SISR_CMDDU)
+ if (sisr & SSI_SISR_CMDDU)
dbg->stats.cmddu++;
- if (sisr & CCSR_SSI_SISR_RXT)
+ if (sisr & SSI_SISR_RXT)
dbg->stats.rxt++;
- if (sisr & CCSR_SSI_SISR_RDR1)
+ if (sisr & SSI_SISR_RDR1)
dbg->stats.rdr1++;
- if (sisr & CCSR_SSI_SISR_RDR0)
+ if (sisr & SSI_SISR_RDR0)
dbg->stats.rdr0++;
- if (sisr & CCSR_SSI_SISR_TDE1)
+ if (sisr & SSI_SISR_TDE1)
dbg->stats.tde1++;
- if (sisr & CCSR_SSI_SISR_TDE0)
+ if (sisr & SSI_SISR_TDE0)
dbg->stats.tde0++;
- if (sisr & CCSR_SSI_SISR_ROE1)
+ if (sisr & SSI_SISR_ROE1)
dbg->stats.roe1++;
- if (sisr & CCSR_SSI_SISR_ROE0)
+ if (sisr & SSI_SISR_ROE0)
dbg->stats.roe0++;
- if (sisr & CCSR_SSI_SISR_TUE1)
+ if (sisr & SSI_SISR_TUE1)
dbg->stats.tue1++;
- if (sisr & CCSR_SSI_SISR_TUE0)
+ if (sisr & SSI_SISR_TUE0)
dbg->stats.tue0++;
- if (sisr & CCSR_SSI_SISR_TFS)
+ if (sisr & SSI_SISR_TFS)
dbg->stats.tfs++;
- if (sisr & CCSR_SSI_SISR_RFS)
+ if (sisr & SSI_SISR_RFS)
dbg->stats.rfs++;
- if (sisr & CCSR_SSI_SISR_TLS)
+ if (sisr & SSI_SISR_TLS)
dbg->stats.tls++;
- if (sisr & CCSR_SSI_SISR_RLS)
+ if (sisr & SSI_SISR_RLS)
dbg->stats.rls++;
- if (sisr & CCSR_SSI_SISR_RFF1)
+ if (sisr & SSI_SISR_RFF1)
dbg->stats.rff1++;
- if (sisr & CCSR_SSI_SISR_RFF0)
+ if (sisr & SSI_SISR_RFF0)
dbg->stats.rff0++;
- if (sisr & CCSR_SSI_SISR_TFE1)
+ if (sisr & SSI_SISR_TFE1)
dbg->stats.tfe1++;
- if (sisr & CCSR_SSI_SISR_TFE0)
+ if (sisr & SSI_SISR_TFE0)
dbg->stats.tfe0++;
}
-/* Show the statistics of a flag only if its interrupt is enabled. The
- * compiler will optimze this code to a no-op if the interrupt is not
- * enabled.
+/**
+ * Show the statistics of a flag only if its interrupt is enabled
+ *
+ * Compilers will optimize it to a no-op if the interrupt is disabled
*/
#define SIER_SHOW(flag, name) \
do { \
- if (CCSR_SSI_SIER_##flag) \
+ if (SSI_SIER_##flag) \
seq_printf(s, #name "=%u\n", ssi_dbg->stats.name); \
} while (0)
/**
- * fsl_sysfs_ssi_show: display SSI statistics
+ * Display the statistics for the current SSI device
*
- * Display the statistics for the current SSI device. To avoid confusion,
- * we only show those counts that are enabled.
+ * To avoid confusion, only show those counts that are enabled
*/
static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
{
@@ -147,7 +147,8 @@ int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
return -ENOMEM;
ssi_dbg->dbg_stats = debugfs_create_file("stats", S_IRUGO,
- ssi_dbg->dbg_dir, ssi_dbg, &fsl_ssi_stats_ops);
+ ssi_dbg->dbg_dir, ssi_dbg,
+ &fsl_ssi_stats_ops);
if (!ssi_dbg->dbg_stats) {
debugfs_remove(ssi_dbg->dbg_dir);
return -ENOMEM;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 0c8f86d4020e..07a57209e055 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -36,7 +36,6 @@
#include <linux/of_irq.h>
#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include "hi6210-i2s.h"
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7b49d04e3c60..f2c9e8c5970a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -1,71 +1,122 @@
+config SND_SOC_INTEL_SST_TOPLEVEL
+ bool "Intel ASoC SST drivers"
+ default y
+ depends on X86 || COMPILE_TEST
+ select SND_SOC_INTEL_MACH
+ help
+ Intel ASoC SST Platform Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel SST drivers.
+
+if SND_SOC_INTEL_SST_TOPLEVEL
+
config SND_SST_IPC
tristate
+ # This option controls the IPC core for HiFi2 platforms
config SND_SST_IPC_PCI
tristate
select SND_SST_IPC
+ # This option controls the PCI-based IPC for HiFi2 platforms
+ # (Medfield, Merrifield).
config SND_SST_IPC_ACPI
tristate
select SND_SST_IPC
- select SND_SOC_INTEL_SST
- select IOSF_MBI
+ # This option controls the ACPI-based IPC for HiFi2 platforms
+ # (Baytrail, Cherrytrail)
-config SND_SOC_INTEL_COMMON
+config SND_SOC_INTEL_SST_ACPI
tristate
+ # This option controls ACPI-based probing on
+ # Haswell/Broadwell/Baytrail legacy and will be set
+ # when these platforms are enabled
config SND_SOC_INTEL_SST
tristate
- select SND_SOC_INTEL_SST_ACPI if ACPI
config SND_SOC_INTEL_SST_FIRMWARE
tristate
select DW_DMAC_CORE
-
-config SND_SOC_INTEL_SST_ACPI
- tristate
-
-config SND_SOC_ACPI_INTEL_MATCH
- tristate
- select SND_SOC_ACPI if ACPI
-
-config SND_SOC_INTEL_SST_TOPLEVEL
- tristate "Intel ASoC SST drivers"
- depends on X86 || COMPILE_TEST
- select SND_SOC_INTEL_MACH
- select SND_SOC_INTEL_COMMON
- help
- Intel ASoC Audio Drivers. If you have a Intel machine that
- has audio controller with a DSP and I2S or DMIC port, then
- enable this option by saying Y or M
- If unsure select "N".
+ # This option controls firmware download on
+ # Haswell/Broadwell/Baytrail legacy and will be set
+ # when these platforms are enabled
config SND_SOC_INTEL_HASWELL
- tristate "Intel ASoC SST driver for Haswell/Broadwell"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && SND_DMA_SGBUF
- depends on DMADEVICES
+ tristate "Haswell/Broadwell Platforms"
+ depends on SND_DMA_SGBUF
+ depends on DMADEVICES && ACPI
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Haswell or Broadwell platform connected to
+ an I2S codec, then enable this option by saying Y or m. This is
+ typically used for Chromebooks. This is a recommended option.
config SND_SOC_INTEL_BAYTRAIL
- tristate "Intel ASoC SST driver for Baytrail (legacy)"
- depends on SND_SOC_INTEL_SST_TOPLEVEL
- depends on DMADEVICES
+ tristate "Baytrail (legacy) Platforms"
+ depends on DMADEVICES && ACPI
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Baytrail platform connected to an I2S codec,
+ then enable this option by saying Y or m. This was typically used
+ for Baytrail Chromebooks but this option is now deprecated and is
+ not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
+
+config SND_SST_ATOM_HIFI2_PLATFORM_PCI
+ tristate "PCI HiFi2 (Medfield, Merrifield) Platforms"
+ depends on X86 && PCI
+ select SND_SST_IPC_PCI
+ select SND_SOC_COMPRESS
+ help
+ If you have a Intel Medfield or Merrifield/Edison platform, then
+ enable this option by saying Y or m. Distros will typically not
+ enable this option: Medfield devices are not available to
+ developers and while Merrifield/Edison can run a mainline kernel with
+ limited functionality it will require a firmware file which
+ is not in the standard firmware tree
config SND_SST_ATOM_HIFI2_PLATFORM
- tristate "Intel ASoC SST driver for HiFi2 platforms (*field, *trail)"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && X86
+ tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
+ depends on X86 && ACPI
+ select SND_SST_IPC_ACPI
select SND_SOC_COMPRESS
+ select SND_SOC_ACPI_INTEL_MATCH
+ select IOSF_MBI
+ help
+ If you have a Intel Baytrail or Cherrytrail platform with an I2S
+ codec, then enable this option by saying Y or m. This is a
+ recommended option
config SND_SOC_INTEL_SKYLAKE
- tristate "Intel ASoC SST driver for SKL/BXT/KBL/GLK/CNL"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && PCI && ACPI
+ tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
+ depends on PCI && ACPI
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_SOC_TOPOLOGY
select SND_SOC_INTEL_SST
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+ GeminiLake or CannonLake platform with the DSP enabled in the BIOS
+ then enable this option by saying Y or m.
+
+config SND_SOC_ACPI_INTEL_MATCH
+ tristate
+ select SND_SOC_ACPI if ACPI
+ # this option controls the compilation of ACPI matching tables and
+ # helpers and is not meant to be selected by the user.
+
+endif ## SND_SOC_INTEL_SST_TOPLEVEL
# ASoC codec drivers
source "sound/soc/intel/boards/Kconfig"
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index b973d457e834..8160520fd74c 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Core support
-obj-$(CONFIG_SND_SOC_INTEL_COMMON) += common/
+obj-$(CONFIG_SND_SOC) += common/
# Platform Support
obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 32d6e02e2104..6cd481bec275 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -236,6 +236,9 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
/* Find the IRQ */
ctx->irq_num = platform_get_irq(pdev,
ctx->pdata->res_info->acpi_ipc_irq_index);
+ if (ctx->irq_num <= 0)
+ return ctx->irq_num < 0 ? ctx->irq_num : -EIO;
+
return 0;
}
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 65e257b17a7e..7ee6aeb7e0af 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -220,10 +220,10 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
sst_free_block(sst_drv_ctx, block);
out:
test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
- return 0;
+ return ret;
}
-/*
+/**
* sst_pause_stream - Send msg for a pausing stream
* @str_id: stream ID
*
@@ -261,7 +261,7 @@ int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
}
} else {
retval = -EBADRQC;
- dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n ");
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n");
}
return retval;
@@ -284,7 +284,7 @@ int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
if (!str_info)
return -EINVAL;
if (str_info->status == STREAM_RUNNING)
- return 0;
+ return 0;
if (str_info->status == STREAM_PAUSED) {
retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
IPC_CMD, IPC_IA_RESUME_STREAM_MRFLD,
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 6f754708a48c..d4e103615f51 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -1,183 +1,183 @@
-config SND_SOC_INTEL_MACH
- tristate "Intel Audio machine drivers"
+menuconfig SND_SOC_INTEL_MACH
+ bool "Intel Machine drivers"
depends on SND_SOC_INTEL_SST_TOPLEVEL
- select SND_SOC_ACPI_INTEL_MATCH if ACPI
+ help
+ Intel ASoC Machine Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel ASoC machine drivers.
if SND_SOC_INTEL_MACH
-config SND_MFLD_MACHINE
- tristate "SOC Machine Audio driver for Intel Medfield MID platform"
- depends on INTEL_SCU_IPC
- select SND_SOC_SN95031
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_PCI
- help
- This adds support for ASoC machine driver for Intel(R) MID Medfield platform
- used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device.
- If unsure select "N".
+if SND_SOC_INTEL_HASWELL
config SND_SOC_INTEL_HASWELL_MACH
- tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
+ tristate "Haswell Lynxpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
- Ultrabook platforms.
- Say Y if you have such a device.
+ Ultrabook platforms. This is a recommended option.
+ Say Y or m if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BDW_RT5677_MACH
- tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
- depends on X86_INTEL_LPSS && GPIOLIB && I2C
- depends on SND_SOC_INTEL_HASWELL
+ tristate "Broadwell with RT5677 codec"
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM && GPIOLIB
select SND_SOC_RT5677
help
This adds support for Intel Broadwell platform based boards with
- the RT5677 audio codec.
+ the RT5677 audio codec. This is a recommended option.
+ Say Y or m if you have such a device.
+ If unsure select "N".
config SND_SOC_INTEL_BROADWELL_MACH
- tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
+ tristate "Broadwell Wildcatpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
Ultrabook platforms.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_HASWELL
+
+if SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_BYT_MAX98090_MACH
- tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
+ tristate "Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on SND_SST_IPC_ACPI = n
- depends on SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
This adds audio driver for Intel Baytrail platform based boards
- with the MAX98090 audio codec.
+ with the MAX98090 audio codec. This driver is deprecated, use
+ SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH instead for better
+ functionality.
config SND_SOC_INTEL_BYT_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
+ tristate "Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on SND_SST_IPC_ACPI = n
- depends on SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
This adds audio driver for Intel Baytrail platform based boards
with the RT5640 audio codec. This driver is deprecated, use
SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
+endif ## SND_SOC_INTEL_BAYTRAIL
+
+if SND_SST_ATOM_HIFI2_PLATFORM
+
config SND_SOC_INTEL_BYTCR_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
- depends on X86 && I2C && ACPI
+ tristate "Baytrail and Baytrail-CR with RT5640 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5640
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5640 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5640 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5651_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
- depends on X86 && I2C && ACPI
+ tristate "Baytrail and Baytrail-CR with RT5651 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5651
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5651 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5651 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
+ tristate "Cherrytrail & Braswell with RT5672 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_RT5670
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
+ select SND_SOC_ACPI
+ select SND_SOC_RT5670
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5672 audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
+ tristate "Cherrytrail & Braswell with RT5645/5650 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5645
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5645/5650 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
+ tristate "Cherrytrail & Braswell with MAX98090 & TI codec"
depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
+ tristate "Baytrail & Cherrytrail with DA7212/7213 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_DA7213
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
platforms with DA7212/7213 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
+ tristate "Baytrail & Cherrytrail with ES8316 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_ES8316
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail &
Cherrytrail platforms with ES8316 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
+ tristate "Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
depends on X86_INTEL_LPSS && I2C && ACPI
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for the MinnowBoard Max or
Up boards and provides access to I2S signals on the Low-Speed
- connector
+ connector. This is not a recommended option outside of these cases.
+ It is not intended to be enabled by distros by default.
+ Say Y or m if you have such a device.
+
If unsure select "N".
+endif ## SND_SST_ATOM_HIFI2_PLATFORM
+
+if SND_SOC_INTEL_SKYLAKE
+
config SND_SOC_INTEL_SKL_RT286_MACH
- tristate "ASoC Audio driver for SKL with RT286 I2S mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with RT286 I2S mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT286
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
help
This adds support for ASoC machine driver for Skylake platforms
with RT286 I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and SSM4567 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with NAU88L25 and SSM4567 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_NAU8825
select SND_SOC_SSM4567
select SND_SOC_DMIC
@@ -185,13 +185,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + SSM4567.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and MAX98357A in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with NAU88L25 and MAX98357A in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_NAU8825
select SND_SOC_MAX98357A
select SND_SOC_DMIC
@@ -199,13 +198,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + MAX98357A.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
- tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_DA7219
select SND_SOC_MAX98357A
select SND_SOC_DMIC
@@ -214,13 +212,12 @@ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
help
This adds support for ASoC machine driver for Broxton-P platforms
with DA7219 + MAX98357A I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BXT_RT298_MACH
- tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "Broxton with RT298 I2S mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT298
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
@@ -228,14 +225,12 @@ config SND_SOC_INTEL_BXT_RT298_MACH
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- select SND_SOC_INTEL_SST
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "KBL with RT5663 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5663
select SND_SOC_MAX98927
select SND_SOC_DMIC
@@ -243,14 +238,13 @@ config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for RT5663 + MAX98927.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C && SPI
- select SND_SOC_INTEL_SST
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ depends on SPI
select SND_SOC_RT5663
select SND_SOC_RT5514
select SND_SOC_RT5514_SPI
@@ -259,7 +253,8 @@ config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for RT5663 + RT5514 + MAX98927.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_SKYLAKE
-endif
+endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index c4d82ad41bd7..2179dedb28ad 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -219,7 +219,7 @@ static struct snd_soc_card bytcht_da7213_card = {
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
-static char codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char codec_name[SND_ACPI_I2C_ID_LEN];
static int bytcht_da7213_probe(struct platform_device *pdev)
{
@@ -243,7 +243,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(codec_name, sizeof(codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 8088396717e3..305e7f4fe55a 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -232,15 +232,39 @@ static struct snd_soc_card byt_cht_es8316_card = {
.fully_routed = true,
};
+static char codec_name[SND_ACPI_I2C_ID_LEN];
+
static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
{
- int ret = 0;
struct byt_cht_es8316_private *priv;
+ struct snd_soc_acpi_mach *mach;
+ const char *i2c_name = NULL;
+ int dai_index = 0;
+ int i;
+ int ret = 0;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv)
return -ENOMEM;
+ mach = (&pdev->dev)->platform_data;
+ /* fix index of codec dai */
+ for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
+ if (!strcmp(byt_cht_es8316_dais[i].codec_name,
+ "i2c-ESSX8316:00")) {
+ dai_index = i;
+ break;
+ }
+ }
+
+ /* fixup codec name based on HID */
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
+ if (i2c_name) {
+ snprintf(codec_name, sizeof(codec_name),
+ "%s%s", "i2c-", i2c_name);
+ byt_cht_es8316_dais[dai_index].codec_name = codec_name;
+ }
+
/* register the soc card */
byt_cht_es8316_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index f2c0fc415e52..b6a1cfeec830 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -713,7 +713,7 @@ static struct snd_soc_card byt_rt5640_card = {
.fully_routed = true,
};
-static char byt_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5640_codec_name[SND_ACPI_I2C_ID_LEN];
static char byt_rt5640_codec_aif_name[12]; /* = "rt5640-aif[1|2]" */
static char byt_rt5640_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
@@ -762,7 +762,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index d955836c6870..456526a93dd5 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -38,6 +38,8 @@ enum {
BYT_RT5651_DMIC_MAP,
BYT_RT5651_IN1_MAP,
BYT_RT5651_IN2_MAP,
+ BYT_RT5651_IN1_IN2_MAP,
+ BYT_RT5651_IN3_MAP,
};
#define BYT_RT5651_MAP(quirk) ((quirk) & GENMASK(7, 0))
@@ -62,6 +64,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk IN1_MAP enabled");
if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
dev_info(dev, "quirk IN2_MAP enabled");
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN3_MAP)
+ dev_info(dev, "quirk IN3_MAP enabled");
if (byt_rt5651_quirk & BYT_RT5651_DMIC_EN)
dev_info(dev, "quirk DMIC enabled");
if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN)
@@ -127,6 +131,7 @@ static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Internal Mic", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
@@ -138,6 +143,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"Headset Mic", NULL, "Platform Clock"},
{"Internal Mic", NULL, "Platform Clock"},
{"Speaker", NULL, "Platform Clock"},
+ {"Line In", NULL, "Platform Clock"},
{"AIF1 Playback", NULL, "ssp2 Tx"},
{"ssp2 Tx", NULL, "codec_out0"},
@@ -151,6 +157,9 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"Headphone", NULL, "HPOR"},
{"Speaker", NULL, "LOUTL"},
{"Speaker", NULL, "LOUTR"},
+ {"IN2P", NULL, "Line In"},
+ {"IN2N", NULL, "Line In"},
+
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
@@ -171,11 +180,25 @@ static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
{"IN2P", NULL, "Internal Mic"},
};
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
+ {"Internal Mic", NULL, "micbias1"},
+ {"IN1P", NULL, "Internal Mic"},
+ {"IN2P", NULL, "Internal Mic"},
+ {"IN3P", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in3_map[] = {
+ {"Internal Mic", NULL, "micbias1"},
+ {"IN3P", NULL, "Headset Mic"},
+ {"IN1P", NULL, "Internal Mic"},
+};
+
static const struct snd_kcontrol_new byt_rt5651_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Internal Mic"),
SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Line In"),
};
static struct snd_soc_jack_pin bytcr_jack_pins[] = {
@@ -247,8 +270,16 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
},
- .driver_data = (void *)(BYT_RT5651_DMIC_MAP |
- BYT_RT5651_DMIC_EN),
+ .driver_data = (void *)(BYT_RT5651_IN3_MAP),
+ },
+ {
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ADI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Turbot"),
+ },
+ .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+ BYT_RT5651_IN3_MAP),
},
{
.callback = byt_rt5651_quirk_cb,
@@ -256,7 +287,8 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
},
- .driver_data = (void *)(BYT_RT5651_IN2_MAP),
+ .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+ BYT_RT5651_IN1_IN2_MAP),
},
{}
};
@@ -281,6 +313,14 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
custom_map = byt_rt5651_intmic_in2_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_map);
break;
+ case BYT_RT5651_IN1_IN2_MAP:
+ custom_map = byt_rt5651_intmic_in1_in2_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_in2_map);
+ break;
+ case BYT_RT5651_IN3_MAP:
+ custom_map = byt_rt5651_intmic_in3_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_in3_map);
+ break;
default:
custom_map = byt_rt5651_intmic_dmic_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
@@ -469,7 +509,7 @@ static struct snd_soc_card byt_rt5651_card = {
.fully_routed = true,
};
-static char byt_rt5651_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5651_codec_name[SND_ACPI_I2C_ID_LEN];
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
{
@@ -499,7 +539,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 18d129caa974..31641aab62cd 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -49,7 +49,7 @@ struct cht_acpi_card {
struct cht_mc_private {
struct snd_soc_jack jack;
struct cht_acpi_card *acpi_card;
- char codec_name[16];
+ char codec_name[SND_ACPI_I2C_ID_LEN];
struct clk *mclk;
};
@@ -118,6 +118,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -128,6 +129,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"IN1N", NULL, "Headset Mic"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
+ {"IN2P", NULL, "Int Analog Mic"},
+ {"IN2N", NULL, "Int Analog Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Ext Spk", NULL, "SPOL"},
@@ -135,6 +138,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"Headphone", NULL, "Platform Clock"},
{"Headset Mic", NULL, "Platform Clock"},
{"Int Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "micbias1"},
+ {"Int Analog Mic", NULL, "micbias2"},
{"Ext Spk", NULL, "Platform Clock"},
};
@@ -189,6 +195,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
SOC_DAPM_PIN_SWITCH("Ext Spk"),
};
@@ -499,7 +506,7 @@ static struct cht_acpi_card snd_soc_cards[] = {
{"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
};
-static char cht_rt5645_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char cht_rt5645_codec_name[SND_ACPI_I2C_ID_LEN];
static char cht_rt5645_codec_aif_name[12]; /* = "rt5645-aif[1|2]" */
static char cht_rt5645_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
@@ -566,7 +573,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(cht_rt5645_codec_name, sizeof(cht_rt5645_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index f8f21eee9b2d..c14a52d2f714 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -35,7 +35,7 @@
struct cht_mc_private {
struct snd_soc_jack headset;
- char codec_name[16];
+ char codec_name[SND_ACPI_I2C_ID_LEN];
struct clk *mclk;
};
@@ -396,7 +396,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* fixup codec name based on HID */
if (mach) {
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(drv->codec_name, sizeof(drv->codec_name),
"i2c-%s", i2c_name);
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 5e1ea0371c90..3c5160779204 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -76,7 +76,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
}
/* set correct codec filter for DAI format and clock config */
- snd_soc_update_bits(rtd->codec, 0x83, 0xffff, 0x8000);
+ snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
return ret;
}
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 6dcad0a8a0d0..bf7014ca486f 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -225,7 +225,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
}
jack = &ctx->kabylake_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 271ae3c2c535..90ea98f01c4c 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -195,7 +195,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
}
jack = &ctx->kabylake_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
deleted file mode 100644
index 6f44acfb4aae..000000000000
--- a/sound/soc/intel/boards/mfld_machine.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
- MFLD_MV_START = 0,
- /* mic bias volutage range for Headphones*/
- MFLD_MV_HP = 400,
- /* mic bias volutage range for American Headset*/
- MFLD_MV_AM_HS = 650,
- /* mic bias volutage range for Headset*/
- MFLD_MV_HS = 2000,
- MFLD_MV_UNDEFINED,
-};
-
-static unsigned int hs_switch;
-static unsigned int lo_dac;
-static struct snd_soc_codec *mfld_codec;
-
-struct mfld_mc_private {
- void __iomem *int_base;
- u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
- {
- .pin = "AMIC1",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
- {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
- {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
- SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
- SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.enumerated.item[0] = hs_switch;
- return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_context *dapm = &card->dapm;
-
- if (ucontrol->value.enumerated.item[0] == hs_switch)
- return 0;
-
- snd_soc_dapm_mutex_lock(dapm);
-
- if (ucontrol->value.enumerated.item[0]) {
- pr_debug("hs_set HS path\n");
- snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- } else {
- pr_debug("hs_set EP path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
- }
-
- snd_soc_dapm_sync_unlocked(dapm);
-
- snd_soc_dapm_mutex_unlock(dapm);
-
- hs_switch = ucontrol->value.enumerated.item[0];
-
- return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_dapm_context *dapm)
-{
- snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTL");
- snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTR");
- snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTL");
- snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTR");
- snd_soc_dapm_enable_pin_unlocked(dapm, "VIB1OUT");
- snd_soc_dapm_enable_pin_unlocked(dapm, "VIB2OUT");
- if (hs_switch) {
- snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- } else {
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
- }
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.enumerated.item[0] = lo_dac;
- return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_context *dapm = &card->dapm;
-
- if (ucontrol->value.enumerated.item[0] == lo_dac)
- return 0;
-
- snd_soc_dapm_mutex_lock(dapm);
-
- /* we dont want to work with last state of lineout so just enable all
- * pins and then disable pins not required
- */
- lo_enable_out_pins(dapm);
-
- switch (ucontrol->value.enumerated.item[0]) {
- case 0:
- pr_debug("set vibra path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "VIB1OUT");
- snd_soc_dapm_disable_pin_unlocked(dapm, "VIB2OUT");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0);
- break;
-
- case 1:
- pr_debug("set hs path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x22);
- break;
-
- case 2:
- pr_debug("set spkr path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTL");
- snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTR");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x44);
- break;
-
- case 3:
- pr_debug("set null path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTR");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x66);
- break;
- }
-
- snd_soc_dapm_sync_unlocked(dapm);
-
- snd_soc_dapm_mutex_unlock(dapm);
-
- lo_dac = ucontrol->value.enumerated.item[0];
- return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
- SOC_ENUM_EXT("Playback Switch", headset_enum,
- headset_get_switch, headset_set_switch),
- SOC_ENUM_EXT("Lineout Mux", lo_enum,
- lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
- {"Headphones", NULL, "HPOUTR"},
- {"Headphones", NULL, "HPOUTL"},
- {"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
- struct mfld_jack_data jack_data;
-
- if (!mfld_codec)
- return;
-
- jack_data.mfld_jack = &mfld_jack;
- jack_data.intr_id = intr_status;
-
- sn95031_jack_detection(mfld_codec, &jack_data);
- /* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_dapm_context *dapm = &runtime->card->dapm;
- int ret_val;
-
- /* default is earpiece pin, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "Headphones");
- /* default is lineout NC, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
- lo_dac = 3;
- hs_switch = 0;
- /* we dont use linein in this so set to NC */
- snd_soc_dapm_disable_pin(dapm, "LINEINL");
- snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
- /* Headset and button jack detection */
- ret_val = snd_soc_card_jack_new(runtime->card,
- "Intel(R) MID Audio Jack", SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1, &mfld_jack,
- mfld_jack_pins, ARRAY_SIZE(mfld_jack_pins));
- if (ret_val) {
- pr_err("jack creation failed\n");
- return ret_val;
- }
-
- ret_val = snd_soc_jack_add_zones(&mfld_jack,
- ARRAY_SIZE(mfld_zones), mfld_zones);
- if (ret_val) {
- pr_err("adding jack zones failed\n");
- return ret_val;
- }
-
- mfld_codec = runtime->codec;
-
- /* we want to check if anything is inserted at boot,
- * so send a fake event to codec and it will read adc
- * to find if anything is there or not */
- mfld_jack_check(MFLD_JACK_INSERT);
- return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
- {
- .name = "Medfield Headset",
- .stream_name = "Headset",
- .cpu_dai_name = "Headset-cpu-dai",
- .codec_dai_name = "SN95031 Headset",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = mfld_init,
- },
- {
- .name = "Medfield Speaker",
- .stream_name = "Speaker",
- .cpu_dai_name = "Speaker-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Vibra",
- .stream_name = "Vibra1",
- .cpu_dai_name = "Vibra1-cpu-dai",
- .codec_dai_name = "SN95031 Vibra1",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Haptics",
- .stream_name = "Vibra2",
- .cpu_dai_name = "Vibra2-cpu-dai",
- .codec_dai_name = "SN95031 Vibra2",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Compress",
- .stream_name = "Speaker",
- .cpu_dai_name = "Compress-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
- .name = "medfield_audio",
- .owner = THIS_MODULE,
- .dai_link = mfld_msic_dailink,
- .num_links = ARRAY_SIZE(mfld_msic_dailink),
-
- .controls = mfld_snd_controls,
- .num_controls = ARRAY_SIZE(mfld_snd_controls),
- .dapm_widgets = mfld_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mfld_widgets),
- .dapm_routes = mfld_map,
- .num_dapm_routes = ARRAY_SIZE(mfld_map),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
- struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
- memcpy_fromio(&mc_private->interrupt_status,
- ((void *)(mc_private->int_base)),
- sizeof(u8));
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
- struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
- mfld_jack_check(mc_drv_ctx->interrupt_status);
-
- return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
- int ret_val = 0, irq;
- struct mfld_mc_private *mc_drv_ctx;
- struct resource *irq_mem;
-
- pr_debug("snd_mfld_mc_probe called\n");
-
- /* retrive the irq number */
- irq = platform_get_irq(pdev, 0);
-
- /* audio interrupt base of SRAM location where
- * interrupts are stored by System FW */
- mc_drv_ctx = devm_kzalloc(&pdev->dev, sizeof(*mc_drv_ctx), GFP_ATOMIC);
- if (!mc_drv_ctx)
- return -ENOMEM;
-
- irq_mem = platform_get_resource_byname(
- pdev, IORESOURCE_MEM, "IRQ_BASE");
- if (!irq_mem) {
- pr_err("no mem resource given\n");
- return -ENODEV;
- }
- mc_drv_ctx->int_base = devm_ioremap_nocache(&pdev->dev, irq_mem->start,
- resource_size(irq_mem));
- if (!mc_drv_ctx->int_base) {
- pr_err("Mapping of cache failed\n");
- return -ENOMEM;
- }
- /* register for interrupt */
- ret_val = devm_request_threaded_irq(&pdev->dev, irq,
- snd_mfld_jack_intr_handler,
- snd_mfld_jack_detection,
- IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
- if (ret_val) {
- pr_err("cannot register IRQ\n");
- return ret_val;
- }
- /* register the soc card */
- snd_soc_card_mfld.dev = &pdev->dev;
- ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_mfld);
- if (ret_val) {
- pr_debug("snd_soc_register_card failed %d\n", ret_val);
- return ret_val;
- }
- platform_set_drvdata(pdev, mc_drv_ctx);
- pr_debug("successfully exited probe\n");
- return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
- .driver = {
- .name = "msic_audio",
- },
- .probe = snd_mfld_mc_probe,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 11c0805393ff..fd82f4b1d4a0 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -269,7 +269,7 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
*/
timeout = jiffies + msecs_to_jiffies(time);
- while (((sst_dsp_shim_read_unlocked(ctx, offset) & mask) != target)
+ while ((((reg = sst_dsp_shim_read_unlocked(ctx, offset)) & mask) != target)
&& time_before(jiffies, timeout)) {
k++;
if (k > 10)
@@ -278,8 +278,6 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
usleep_range(s, 2*s);
}
- reg = sst_dsp_shim_read_unlocked(ctx, offset);
-
if ((reg & mask) == target) {
dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s successful\n",
reg, operation);
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 4524211960e4..440bca7afbf1 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -595,7 +595,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
skl->d0i3.state = SKL_DSP_D0I3_NONE;
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index 387de388ce29..245df1067ba8 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -458,7 +458,7 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
cnl->boot_complete = false;
init_waitqueue_head(&cnl->boot_wait);
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/skl-i2s.h b/sound/soc/intel/skylake/skl-i2s.h
new file mode 100644
index 000000000000..dcf819bc688f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-i2s.h
@@ -0,0 +1,64 @@
+/*
+ * skl-i2s.h - i2s blob mapping
+ *
+ * Copyright (C) 2017 Intel Corp
+ * Author: Subhransu S. Prusty < subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_I2S_H
+#define __SOUND_SOC_SKL_I2S_H
+
+#define SKL_I2S_MAX_TIME_SLOTS 8
+#define SKL_MCLK_DIV_CLK_SRC_MASK GENMASK(17, 16)
+
+#define SKL_MNDSS_DIV_CLK_SRC_MASK GENMASK(21, 20)
+#define SKL_SHIFT(x) (ffs(x) - 1)
+#define SKL_MCLK_DIV_RATIO_MASK GENMASK(11, 0)
+
+struct skl_i2s_config {
+ u32 ssc0;
+ u32 ssc1;
+ u32 sscto;
+ u32 sspsp;
+ u32 sstsa;
+ u32 ssrsa;
+ u32 ssc2;
+ u32 sspsp2;
+ u32 ssc3;
+ u32 ssioc;
+} __packed;
+
+struct skl_i2s_config_mclk {
+ u32 mdivctrl;
+ u32 mdivr;
+};
+
+/**
+ * struct skl_i2s_config_blob_legacy - Structure defines I2S Gateway
+ * configuration legacy blob
+ *
+ * @gtw_attr: Gateway attribute for the I2S Gateway
+ * @tdm_ts_group: TDM slot mapping against channels in the Gateway.
+ * @i2s_cfg: I2S HW registers
+ * @mclk: MCLK clock source and divider values
+ */
+struct skl_i2s_config_blob_legacy {
+ u32 gtw_attr;
+ u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
+ struct skl_i2s_config i2s_cfg;
+ struct skl_i2s_config_mclk mclk;
+};
+
+#endif /* __SOUND_SOC_SKL_I2S_H */
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 61b5bfa79d13..8cbf080c38b3 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -55,6 +55,19 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
return 0;
}
+#define SKL_ASTATE_PARAM_ID 4
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data)
+{
+ struct skl_ipc_large_config_msg msg = {0};
+
+ msg.large_param_id = SKL_ASTATE_PARAM_ID;
+ msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
+ sizeof(cnt));
+
+ skl_ipc_set_large_config(&ctx->ipc, &msg, data);
+}
+
#define NOTIFICATION_PARAM_ID 3
#define NOTIFICATION_MASK 0xf
@@ -404,11 +417,20 @@ int skl_resume_dsp(struct skl *skl)
if (skl->skl_sst->is_first_boot == true)
return 0;
+ /* disable dynamic clock gating during fw and lib download */
+ ctx->enable_miscbdcge(ctx->dev, false);
+
ret = skl_dsp_wake(ctx->dsp);
+ ctx->enable_miscbdcge(ctx->dev, true);
if (ret < 0)
return ret;
skl_dsp_enable_notification(skl->skl_sst, false);
+
+ if (skl->cfg.astate_cfg != NULL) {
+ skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count,
+ skl->cfg.astate_cfg);
+ }
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 3eaac41090ca..3b1d2b828c1b 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -19,6 +19,7 @@
*/
#include <linux/pci.h>
#include "skl.h"
+#include "skl-i2s.h"
#define NHLT_ACPI_HEADER_SIG "NHLT"
@@ -43,7 +44,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
if (obj && obj->type == ACPI_TYPE_BUFFER) {
nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
- nhlt_table = (struct nhlt_acpi_table *)
+ if (nhlt_ptr->length)
+ nhlt_table = (struct nhlt_acpi_table *)
memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
MEMREMAP_WB);
ACPI_FREE(obj);
@@ -276,3 +278,157 @@ void skl_nhlt_remove_sysfs(struct skl *skl)
sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
}
+
+/*
+ * Queries NHLT for all the fmt configuration for a particular endpoint and
+ * stores all possible rates supported in a rate table for the corresponding
+ * sclk/sclkfs.
+ */
+static void skl_get_ssp_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks,
+ struct nhlt_fmt *fmt, u8 id)
+{
+ struct skl_i2s_config_blob_legacy *i2s_config;
+ struct skl_clk_parent_src *parent;
+ struct skl_ssp_clk *sclk, *sclkfs;
+ struct nhlt_fmt_cfg *fmt_cfg;
+ struct wav_fmt_ext *wav_fmt;
+ unsigned long rate = 0;
+ bool present = false;
+ int rate_index = 0;
+ u16 channels, bps;
+ u8 clk_src;
+ int i, j;
+ u32 fs;
+
+ sclk = &ssp_clks[SKL_SCLK_OFS];
+ sclkfs = &ssp_clks[SKL_SCLKFS_OFS];
+
+ if (fmt->fmt_count == 0)
+ return;
+
+ for (i = 0; i < fmt->fmt_count; i++) {
+ fmt_cfg = &fmt->fmt_config[i];
+ wav_fmt = &fmt_cfg->fmt_ext;
+
+ channels = wav_fmt->fmt.channels;
+ bps = wav_fmt->fmt.bits_per_sample;
+ fs = wav_fmt->fmt.samples_per_sec;
+
+ /*
+ * In case of TDM configuration on a ssp, there can
+ * be more than one blob in which channel masks are
+ * different for each usecase for a specific rate and bps.
+ * But the sclk rate will be generated for the total
+ * number of channels used for that endpoint.
+ *
+ * So for the given fs and bps, choose blob which has
+ * the superset of all channels for that endpoint and
+ * derive the rate.
+ */
+ for (j = i; j < fmt->fmt_count; j++) {
+ fmt_cfg = &fmt->fmt_config[j];
+ wav_fmt = &fmt_cfg->fmt_ext;
+ if ((fs == wav_fmt->fmt.samples_per_sec) &&
+ (bps == wav_fmt->fmt.bits_per_sample))
+ channels = max_t(u16, channels,
+ wav_fmt->fmt.channels);
+ }
+
+ rate = channels * bps * fs;
+
+ /* check if the rate is added already to the given SSP's sclk */
+ for (j = 0; (j < SKL_MAX_CLK_RATES) &&
+ (sclk[id].rate_cfg[j].rate != 0); j++) {
+ if (sclk[id].rate_cfg[j].rate == rate) {
+ present = true;
+ break;
+ }
+ }
+
+ /* Fill rate and parent for sclk/sclkfs */
+ if (!present) {
+ /* MCLK Divider Source Select */
+ i2s_config = (struct skl_i2s_config_blob_legacy *)
+ fmt->fmt_config[0].config.caps;
+ clk_src = ((i2s_config->mclk.mdivctrl)
+ & SKL_MNDSS_DIV_CLK_SRC_MASK) >>
+ SKL_SHIFT(SKL_MNDSS_DIV_CLK_SRC_MASK);
+
+ parent = skl_get_parent_clk(clk_src);
+
+ /*
+ * Do not copy the config data if there is no parent
+ * clock available for this clock source select
+ */
+ if (!parent)
+ continue;
+
+ sclk[id].rate_cfg[rate_index].rate = rate;
+ sclk[id].rate_cfg[rate_index].config = fmt_cfg;
+ sclkfs[id].rate_cfg[rate_index].rate = rate;
+ sclkfs[id].rate_cfg[rate_index].config = fmt_cfg;
+ sclk[id].parent_name = parent->name;
+ sclkfs[id].parent_name = parent->name;
+
+ rate_index++;
+ }
+ }
+}
+
+static void skl_get_mclk(struct skl *skl, struct skl_ssp_clk *mclk,
+ struct nhlt_fmt *fmt, u8 id)
+{
+ struct skl_i2s_config_blob_legacy *i2s_config;
+ struct nhlt_specific_cfg *fmt_cfg;
+ struct skl_clk_parent_src *parent;
+ u32 clkdiv, div_ratio;
+ u8 clk_src;
+
+ fmt_cfg = &fmt->fmt_config[0].config;
+ i2s_config = (struct skl_i2s_config_blob_legacy *)fmt_cfg->caps;
+
+ /* MCLK Divider Source Select */
+ clk_src = ((i2s_config->mclk.mdivctrl) & SKL_MCLK_DIV_CLK_SRC_MASK) >>
+ SKL_SHIFT(SKL_MCLK_DIV_CLK_SRC_MASK);
+
+ clkdiv = i2s_config->mclk.mdivr & SKL_MCLK_DIV_RATIO_MASK;
+
+ /* bypass divider */
+ div_ratio = 1;
+
+ if (clkdiv != SKL_MCLK_DIV_RATIO_MASK)
+ /* Divider is 2 + clkdiv */
+ div_ratio = clkdiv + 2;
+
+ /* Calculate MCLK rate from source using div value */
+ parent = skl_get_parent_clk(clk_src);
+ if (!parent)
+ return;
+
+ mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
+ mclk[id].rate_cfg[0].config = &fmt->fmt_config[0];
+ mclk[id].parent_name = parent->name;
+}
+
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks)
+{
+ struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+ struct nhlt_endpoint *epnt;
+ struct nhlt_fmt *fmt;
+ int i;
+ u8 id;
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+ for (i = 0; i < nhlt->endpoint_count; i++) {
+ if (epnt->linktype == NHLT_LINK_SSP) {
+ id = epnt->virtual_bus_id;
+
+ fmt = (struct nhlt_fmt *)(epnt->config.caps
+ + epnt->config.size);
+
+ skl_get_ssp_clks(skl, ssp_clks, fmt, id);
+ skl_get_mclk(skl, ssp_clks, fmt, id);
+ }
+ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ }
+}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 1dd97479e0c0..e46828533826 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -537,7 +537,7 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
- link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+ link = snd_hdac_ext_bus_get_link(ebus, codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -620,7 +620,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
link_dev->link_prepared = 0;
- link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+ link = snd_hdac_ext_bus_get_link(ebus, rtd->codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -1343,7 +1343,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
return -EIO;
}
+ /* disable dynamic clock gating during fw and lib download */
+ skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
ret = ops->init_fw(platform->dev, skl->skl_sst);
+ skl->skl_sst->enable_miscbdcge(platform->dev, true);
if (ret < 0) {
dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
return ret;
@@ -1351,6 +1355,12 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
skl_populate_modules(skl);
skl->skl_sst->update_d0i3c = skl_update_d0i3c;
skl_dsp_enable_notification(skl->skl_sst, false);
+
+ if (skl->cfg.astate_cfg != NULL) {
+ skl_dsp_set_astate_cfg(skl->skl_sst,
+ skl->cfg.astate_cfg->count,
+ skl->cfg.astate_cfg);
+ }
}
pm_runtime_mark_last_busy(platform->dev);
pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.h b/sound/soc/intel/skylake/skl-ssp-clk.h
new file mode 100644
index 000000000000..c9ea84004260
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-ssp-clk.h
@@ -0,0 +1,79 @@
+/*
+ * skl-ssp-clk.h - Skylake ssp clock information and ipc structure
+ *
+ * Copyright (C) 2017 Intel Corp
+ * Author: Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef SOUND_SOC_SKL_SSP_CLK_H
+#define SOUND_SOC_SKL_SSP_CLK_H
+
+#define SKL_MAX_SSP 6
+/* xtal/cardinal/pll, parent of ssp clocks and mclk */
+#define SKL_MAX_CLK_SRC 3
+#define SKL_MAX_SSP_CLK_TYPES 3 /* mclk, sclk, sclkfs */
+
+#define SKL_MAX_CLK_CNT (SKL_MAX_SSP * SKL_MAX_SSP_CLK_TYPES)
+
+/* Max number of configurations supported for each clock */
+#define SKL_MAX_CLK_RATES 10
+
+#define SKL_SCLK_OFS SKL_MAX_SSP
+#define SKL_SCLKFS_OFS (SKL_SCLK_OFS + SKL_MAX_SSP)
+
+enum skl_clk_type {
+ SKL_MCLK,
+ SKL_SCLK,
+ SKL_SCLK_FS,
+};
+
+enum skl_clk_src_type {
+ SKL_XTAL,
+ SKL_CARDINAL,
+ SKL_PLL,
+};
+
+struct skl_clk_parent_src {
+ u8 clk_id;
+ const char *name;
+ unsigned long rate;
+ const char *parent_name;
+};
+
+struct skl_clk_rate_cfg_table {
+ unsigned long rate;
+ void *config;
+};
+
+/*
+ * rate for mclk will be in rates[0]. For sclk and sclkfs, rates[] store
+ * all possible clocks ssp can generate for that platform.
+ */
+struct skl_ssp_clk {
+ const char *name;
+ const char *parent_name;
+ struct skl_clk_rate_cfg_table rate_cfg[SKL_MAX_CLK_RATES];
+};
+
+struct skl_clk_pdata {
+ struct skl_clk_parent_src *parent_clks;
+ int num_clks;
+ struct skl_ssp_clk *ssp_clks;
+ void *pvt_data;
+};
+
+#endif /* SOUND_SOC_SKL_SSP_CLK_H */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index 19ee1d4f3bdf..71e31ad0bb3f 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -435,16 +435,22 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
return NULL;
}
+ return sst;
+}
+
+int skl_dsp_acquire_irq(struct sst_dsp *sst)
+{
+ struct sst_dsp_device *sst_dev = sst->sst_dev;
+ int ret;
+
/* Register the ISR */
ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
- if (ret) {
+ if (ret)
dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
sst->irq);
- return NULL;
- }
- return sst;
+ return ret;
}
void skl_dsp_free(struct sst_dsp *dsp)
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index eba20d37ba8c..12fc9a73dc8a 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -206,6 +206,7 @@ int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_acquire_irq(struct sst_dsp *sst);
bool is_skl_dsp_running(struct sst_dsp *ctx);
unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
@@ -251,6 +252,9 @@ void skl_freeup_uuid_list(struct skl_sst *ctx);
int skl_dsp_strip_extended_manifest(struct firmware *fw);
void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable);
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data);
+
int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
struct sst_dsp_device *skl_dev);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 8ff89280d9fd..2ae405617876 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -178,7 +178,8 @@ static inline int skl_pvtid_128(struct uuid_module *module)
* skl_get_pvt_id: generate a private id for use as module id
*
* @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @instance_id: module's instance id
*
* This generates a 128 bit private unique id for a module TYPE so that
* module instance is unique
@@ -208,7 +209,8 @@ EXPORT_SYMBOL_GPL(skl_get_pvt_id);
* skl_put_pvt_id: free up the private id allocated
*
* @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @pvt_id: module pvt id
*
* This frees a 128 bit private unique id previously generated
*/
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index a436abf2fe3f..5a7e41b65ef3 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -569,7 +569,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
sst->fw_ops = skl_fw_ops;
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 81923da18ac2..73af6e19ebbd 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -190,7 +190,6 @@ skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
u8 res_idx = mconfig->res_idx;
struct skl_module_res *res = &mconfig->module->resources[res_idx];
- res = &mconfig->module->resources[res_idx];
skl->resource.mcps -= res->cps;
}
@@ -3056,11 +3055,13 @@ static int skl_tplg_get_int_tkn(struct device *dev,
struct snd_soc_tplg_vendor_value_elem *tkn_elem,
struct skl *skl)
{
- int tkn_count = 0, ret;
+ int tkn_count = 0, ret, size;
static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
struct skl_module_res *res = NULL;
struct skl_module_iface *fmt = NULL;
struct skl_module *mod = NULL;
+ static struct skl_astate_param *astate_table;
+ static int astate_cfg_idx, count;
int i;
if (skl->modules) {
@@ -3093,6 +3094,46 @@ static int skl_tplg_get_int_tkn(struct device *dev,
mod_idx = tkn_elem->value;
break;
+ case SKL_TKN_U32_ASTATE_COUNT:
+ if (astate_table != NULL) {
+ dev_err(dev, "More than one entry for A-State count");
+ return -EINVAL;
+ }
+
+ if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
+ dev_err(dev, "Invalid A-State count %d\n",
+ tkn_elem->value);
+ return -EINVAL;
+ }
+
+ size = tkn_elem->value * sizeof(struct skl_astate_param) +
+ sizeof(count);
+ skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!skl->cfg.astate_cfg)
+ return -ENOMEM;
+
+ astate_table = skl->cfg.astate_cfg->astate_table;
+ count = skl->cfg.astate_cfg->count = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_IDX:
+ if (tkn_elem->value >= count) {
+ dev_err(dev, "Invalid A-State index %d\n",
+ tkn_elem->value);
+ return -EINVAL;
+ }
+
+ astate_cfg_idx = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_KCPS:
+ astate_table[astate_cfg_idx].kcps = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_CLK_SRC:
+ astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
+ break;
+
case SKL_TKN_U8_IN_PIN_TYPE:
case SKL_TKN_U8_OUT_PIN_TYPE:
case SKL_TKN_U8_IN_QUEUE_COUNT:
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 31d8634e8aa1..32ce64c6b2dc 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -355,6 +355,7 @@ static int skl_resume(struct device *dev)
if (ebus->cmd_dma_state)
snd_hdac_bus_init_cmd_io(&ebus->bus);
+ ret = 0;
} else {
ret = _skl_resume(ebus);
@@ -435,19 +436,51 @@ static int skl_free(struct hdac_ext_bus *ebus)
return 0;
}
-static int skl_machine_device_register(struct skl *skl, void *driver_data)
+/*
+ * For each ssp there are 3 clocks (mclk/sclk/sclkfs).
+ * e.g. for ssp0, clocks will be named as
+ * "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs"
+ * So for skl+, there are 6 ssps, so 18 clocks will be created.
+ */
+static struct skl_ssp_clk skl_ssp_clks[] = {
+ {.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"},
+ {.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"},
+ {.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"},
+ {.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"},
+ {.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"},
+ {.name = "ssp2_sclkfs"},
+ {.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"},
+ {.name = "ssp5_sclkfs"},
+};
+
+static int skl_find_machine(struct skl *skl, void *driver_data)
{
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
- struct platform_device *pdev;
struct snd_soc_acpi_mach *mach = driver_data;
- int ret;
+ struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct skl_machine_pdata *pdata;
mach = snd_soc_acpi_find_machine(mach);
if (mach == NULL) {
dev_err(bus->dev, "No matching machine driver found\n");
return -ENODEV;
}
+
+ skl->mach = mach;
skl->fw_name = mach->fw_filename;
+ pdata = skl->mach->pdata;
+
+ if (mach->pdata)
+ skl->use_tplg_pcm = pdata->use_tplg_pcm;
+
+ return 0;
+}
+
+static int skl_machine_device_register(struct skl *skl)
+{
+ struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct snd_soc_acpi_mach *mach = skl->mach;
+ struct platform_device *pdev;
+ int ret;
pdev = platform_device_alloc(mach->drv_name, -1);
if (pdev == NULL) {
@@ -462,11 +495,8 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
return -EIO;
}
- if (mach->pdata) {
- skl->use_tplg_pcm =
- ((struct skl_machine_pdata *)mach->pdata)->use_tplg_pcm;
+ if (mach->pdata)
dev_set_drvdata(&pdev->dev, mach->pdata);
- }
skl->i2s_dev = pdev;
@@ -509,6 +539,74 @@ static void skl_dmic_device_unregister(struct skl *skl)
platform_device_unregister(skl->dmic_dev);
}
+static struct skl_clk_parent_src skl_clk_src[] = {
+ { .clk_id = SKL_XTAL, .name = "xtal" },
+ { .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 },
+ { .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 },
+};
+
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) {
+ if (skl_clk_src[i].clk_id == clk_id)
+ return &skl_clk_src[i];
+ }
+
+ return NULL;
+}
+
+static void init_skl_xtal_rate(int pci_id)
+{
+ switch (pci_id) {
+ case 0x9d70:
+ case 0x9d71:
+ skl_clk_src[0].rate = 24000000;
+ return;
+
+ default:
+ skl_clk_src[0].rate = 19200000;
+ return;
+ }
+}
+
+static int skl_clock_device_register(struct skl *skl)
+{
+ struct platform_device_info pdevinfo = {NULL};
+ struct skl_clk_pdata *clk_pdata;
+
+ clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
+ GFP_KERNEL);
+ if (!clk_pdata)
+ return -ENOMEM;
+
+ init_skl_xtal_rate(skl->pci->device);
+
+ clk_pdata->parent_clks = skl_clk_src;
+ clk_pdata->ssp_clks = skl_ssp_clks;
+ clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks);
+
+ /* Query NHLT to fill the rates and parent */
+ skl_get_clks(skl, clk_pdata->ssp_clks);
+ clk_pdata->pvt_data = skl;
+
+ /* Register Platform device */
+ pdevinfo.parent = &skl->pci->dev;
+ pdevinfo.id = -1;
+ pdevinfo.name = "skl-ssp-clk";
+ pdevinfo.data = clk_pdata;
+ pdevinfo.size_data = sizeof(*clk_pdata);
+ skl->clk_dev = platform_device_register_full(&pdevinfo);
+ return PTR_ERR_OR_ZERO(skl->clk_dev);
+}
+
+static void skl_clock_device_unregister(struct skl *skl)
+{
+ if (skl->clk_dev)
+ platform_device_unregister(skl->clk_dev);
+}
+
/*
* Probe the given codec address
*/
@@ -615,18 +713,30 @@ static void skl_probe_work(struct work_struct *work)
/* create codec instances */
skl_codec_create(ebus);
+ /* register platform dai and controls */
+ err = skl_platform_register(bus->dev);
+ if (err < 0) {
+ dev_err(bus->dev, "platform register failed: %d\n", err);
+ return;
+ }
+
+ if (bus->ppcap) {
+ err = skl_machine_device_register(skl);
+ if (err < 0) {
+ dev_err(bus->dev, "machine register failed: %d\n", err);
+ goto out_err;
+ }
+ }
+
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
err = snd_hdac_display_power(bus, false);
if (err < 0) {
dev_err(bus->dev, "Cannot turn off display power on i915\n");
+ skl_machine_device_unregister(skl);
return;
}
}
- /* register platform dai and controls */
- err = skl_platform_register(bus->dev);
- if (err < 0)
- return;
/*
* we are done probing so decrement link counts
*/
@@ -791,18 +901,21 @@ static int skl_probe(struct pci_dev *pci,
/* check if dsp is there */
if (bus->ppcap) {
- err = skl_machine_device_register(skl,
- (void *)pci_id->driver_data);
+ /* create device for dsp clk */
+ err = skl_clock_device_register(skl);
+ if (err < 0)
+ goto out_clk_free;
+
+ err = skl_find_machine(skl, (void *)pci_id->driver_data);
if (err < 0)
goto out_nhlt_free;
err = skl_init_dsp(skl);
if (err < 0) {
dev_dbg(bus->dev, "error failed to register dsp\n");
- goto out_mach_free;
+ goto out_nhlt_free;
}
skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
-
}
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(ebus);
@@ -820,8 +933,8 @@ static int skl_probe(struct pci_dev *pci,
out_dsp_free:
skl_free_dsp(skl);
-out_mach_free:
- skl_machine_device_unregister(skl);
+out_clk_free:
+ skl_clock_device_unregister(skl);
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
out_free:
@@ -872,6 +985,7 @@ static void skl_remove(struct pci_dev *pci)
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
skl_dmic_device_unregister(skl);
+ skl_clock_device_unregister(skl);
skl_nhlt_remove_sysfs(skl);
skl_nhlt_free(skl->nhlt);
skl_free(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index e00cde8200dd..f411579bc713 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -25,9 +25,12 @@
#include <sound/hdaudio_ext.h>
#include <sound/soc.h>
#include "skl-nhlt.h"
+#include "skl-ssp-clk.h"
#define SKL_SUSPEND_DELAY 2000
+#define SKL_MAX_ASTATE_CFG 3
+
#define AZX_PCIREG_PGCTL 0x44
#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
@@ -45,6 +48,20 @@ struct skl_dsp_resource {
struct skl_debug;
+struct skl_astate_param {
+ u32 kcps;
+ u32 clk_src;
+};
+
+struct skl_astate_config {
+ u32 count;
+ struct skl_astate_param astate_table[0];
+};
+
+struct skl_fw_config {
+ struct skl_astate_config *astate_cfg;
+};
+
struct skl {
struct hdac_ext_bus ebus;
struct pci_dev *pci;
@@ -52,6 +69,7 @@ struct skl {
unsigned int init_done:1; /* delayed init status */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
+ struct platform_device *clk_dev;
struct snd_soc_platform *platform;
struct snd_soc_dai_driver *dais;
@@ -75,6 +93,8 @@ struct skl {
u8 nr_modules;
struct skl_module **modules;
bool use_tplg_pcm;
+ struct skl_fw_config cfg;
+ struct snd_soc_acpi_mach *mach;
};
#define skl_to_ebus(s) (&(s)->ebus)
@@ -125,6 +145,8 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
void skl_update_d0i3c(struct device *dev, bool enable);
int skl_nhlt_create_sysfs(struct skl *skl);
void skl_nhlt_remove_sysfs(struct skl *skl);
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks);
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id);
struct skl_module_cfg;
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
index affa7fb25dd9..949fc3a1d025 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
@@ -14,451 +14,285 @@
* GNU General Public License for more details.
*/
-#include <sound/soc.h>
-#include <linux/regmap.h>
-#include <linux/pm_runtime.h>
-
#include "mt2701-afe-common.h"
#include "mt2701-afe-clock-ctrl.h"
-static const char *aud_clks[MT2701_CLOCK_NUM] = {
- [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
- [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
- [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
- [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
- [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
- [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
- [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
- [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
- [MT2701_AUD_APLL_SEL] = "top_apll_sel",
- [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
- [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
- [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
- [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
- [MT2701_AUD_AUDPLL] = "top_audpll",
- [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
- [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
- [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
- [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
- [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
- [MT2701_AUD_CLK_26M] = "clk_26m",
- [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
- [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
- [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
- [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
- [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
- [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
- [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
- [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
- [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
- [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
- [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
- [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
- [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
- [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
- [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
- [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
- [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
- [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
- [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
- [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
- [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
- [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
- [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
- [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+static const char *const base_clks[] = {
+ [MT2701_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+ [MT2701_TOP_AUD_MCLK_SRC0] = "top_audio_mux1_sel",
+ [MT2701_TOP_AUD_MCLK_SRC1] = "top_audio_mux2_sel",
+ [MT2701_TOP_AUD_A1SYS] = "top_audio_a1sys_hp",
+ [MT2701_TOP_AUD_A2SYS] = "top_audio_a2sys_hp",
+ [MT2701_AUDSYS_AFE] = "audio_afe_pd",
+ [MT2701_AUDSYS_AFE_CONN] = "audio_afe_conn_pd",
+ [MT2701_AUDSYS_A1SYS] = "audio_a1sys_pd",
+ [MT2701_AUDSYS_A2SYS] = "audio_a2sys_pd",
};
int mt2701_init_clock(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i = 0;
-
- for (i = 0; i < MT2701_CLOCK_NUM; i++) {
- afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
- if (IS_ERR(afe_priv->clocks[i])) {
- dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
- __func__, aud_clks[i]);
- return PTR_ERR(aud_clks[i]);
+ int i;
+
+ for (i = 0; i < MT2701_BASE_CLK_NUM; i++) {
+ afe_priv->base_ck[i] = devm_clk_get(afe->dev, base_clks[i]);
+ if (IS_ERR(afe_priv->base_ck[i])) {
+ dev_err(afe->dev, "failed to get %s\n", base_clks[i]);
+ return PTR_ERR(afe_priv->base_ck[i]);
+ }
+ }
+
+ /* Get I2S related clocks */
+ for (i = 0; i < MT2701_I2S_NUM; i++) {
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i];
+ char name[13];
+
+ snprintf(name, sizeof(name), "i2s%d_src_sel", i);
+ i2s_path->sel_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->sel_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->sel_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2s%d_src_div", i);
+ i2s_path->div_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->div_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->div_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2s%d_mclk_en", i);
+ i2s_path->mclk_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->mclk_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->mclk_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2so%d_hop_ck", i);
+ i2s_path->hop_ck[I2S_OUT] = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->hop_ck[I2S_OUT])) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->hop_ck[I2S_OUT]);
+ }
+
+ snprintf(name, sizeof(name), "i2si%d_hop_ck", i);
+ i2s_path->hop_ck[I2S_IN] = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->hop_ck[I2S_IN])) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->hop_ck[I2S_IN]);
+ }
+
+ snprintf(name, sizeof(name), "asrc%d_out_ck", i);
+ i2s_path->asrco_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->asrco_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->asrco_ck);
}
}
+ /* Some platforms may support BT path */
+ afe_priv->mrgif_ck = devm_clk_get(afe->dev, "audio_mrgif_pd");
+ if (IS_ERR(afe_priv->mrgif_ck)) {
+ if (PTR_ERR(afe_priv->mrgif_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ afe_priv->mrgif_ck = NULL;
+ }
+
return 0;
}
-int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir)
{
- int ret = 0;
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+ int ret;
- ret = mt2701_turn_on_a1sys_clock(afe);
+ ret = clk_prepare_enable(i2s_path->asrco_ck);
if (ret) {
- dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
- __func__, ret);
+ dev_err(afe->dev, "failed to enable ASRC clock %d\n", ret);
return ret;
}
- ret = mt2701_turn_on_a2sys_clock(afe);
+ ret = clk_prepare_enable(i2s_path->hop_ck[dir]);
if (ret) {
- dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
- __func__, ret);
- mt2701_turn_off_a1sys_clock(afe);
- return ret;
+ dev_err(afe->dev, "failed to enable I2S clock %d\n", ret);
+ goto err_hop_ck;
}
- ret = mt2701_turn_on_afe_clock(afe);
- if (ret) {
- dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
- __func__, ret);
- mt2701_turn_off_a1sys_clock(afe);
- mt2701_turn_off_a2sys_clock(afe);
- return ret;
- }
+ return 0;
- regmap_update_bits(afe->regmap, ASYS_TOP_CON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- AFE_DAC_CON0_AFE_ON,
- AFE_DAC_CON0_AFE_ON);
- regmap_write(afe->regmap, PWR2_TOP_CON,
- PWR2_TOP_CON_INIT_VAL);
- regmap_write(afe->regmap, PWR1_ASM_CON1,
- PWR1_ASM_CON1_INIT_VAL);
- regmap_write(afe->regmap, PWR2_ASM_CON1,
- PWR2_ASM_CON1_INIT_VAL);
+err_hop_ck:
+ clk_disable_unprepare(i2s_path->asrco_ck);
- return 0;
+ return ret;
}
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir)
{
- mt2701_turn_off_afe_clock(afe);
- mt2701_turn_off_a1sys_clock(afe);
- mt2701_turn_off_a2sys_clock(afe);
- regmap_update_bits(afe->regmap, ASYS_TOP_CON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- AFE_DAC_CON0_AFE_ON, 0);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+
+ clk_disable_unprepare(i2s_path->hop_ck[dir]);
+ clk_disable_unprepare(i2s_path->asrco_ck);
}
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int ret = 0;
-
- /* Set Mux */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
- goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
- afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_SEL],
- aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
- goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
- }
-
- /* Set Divider */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_DIV],
- ret);
- goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
- }
-
- ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
- MT2701_AUD_AUD_MUX1_DIV_RATE);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_DIV],
- MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
- goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
- }
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
- /* Enable clock gate */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
- goto A1SYS_CLK_AUD_48K_ERR;
- }
+ return clk_prepare_enable(i2s_path->mclk_ck);
+}
- /* Enable infra audio */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto A1SYS_CLK_INFRA_ERR;
- }
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
- return 0;
+ clk_disable_unprepare(i2s_path->mclk_ck);
+}
-A1SYS_CLK_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A1SYS_CLK_AUD_48K_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-A1SYS_CLK_AUD_MUX1_DIV_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-A1SYS_CLK_AUD_MUX1_SEL_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
- return ret;
+ return clk_prepare_enable(afe_priv->mrgif_ck);
}
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ clk_disable_unprepare(afe_priv->mrgif_ck);
}
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+static int mt2701_afe_enable_audsys(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int ret = 0;
+ int ret;
- /* Set Mux */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
- goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
- }
+ /* Enable infra clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
+ if (ret)
+ return ret;
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
- afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX2_SEL],
- aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
- goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
- }
+ /* Enable top a1sys clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+ if (ret)
+ goto err_a1sys;
- /* Set Divider */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
- goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
- }
+ /* Enable top a2sys clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+ if (ret)
+ goto err_a2sys;
- ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
- MT2701_AUD_AUD_MUX2_DIV_RATE);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX2_DIV],
- MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
- goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
- }
+ /* Internal clock gates */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+ if (ret)
+ goto err_afe;
- /* Enable clock gate */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
- goto A2SYS_CLK_AUD_44K_ERR;
- }
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+ if (ret)
+ goto err_audio_a1sys;
- /* Enable infra audio */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto A2SYS_CLK_INFRA_ERR;
- }
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+ if (ret)
+ goto err_audio_a2sys;
+
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+ if (ret)
+ goto err_afe_conn;
return 0;
-A2SYS_CLK_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A2SYS_CLK_AUD_44K_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-A2SYS_CLK_AUD_MUX2_DIV_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-A2SYS_CLK_AUD_MUX2_SEL_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+err_afe_conn:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+err_audio_a2sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+err_audio_a1sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+err_afe:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+err_a2sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+err_a1sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
return ret;
}
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+static void mt2701_afe_disable_audsys(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
}
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
int ret;
- /* enable INFRA_SYS */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto AFE_AUD_INFRA_ERR;
- }
-
- /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
- goto AFE_AUD_AUDINTBUS_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
- afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUDINTBUS],
- aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
- goto AFE_AUD_AUDINTBUS_ERR;
- }
-
- /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ /* Enable audio system */
+ ret = mt2701_afe_enable_audsys(afe);
if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
- goto AFE_AUD_ASM_H_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
- afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_ASM_H_SEL],
- aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
- goto AFE_AUD_ASM_H_ERR;
- }
-
- /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
- goto AFE_AUD_ASM_M_ERR;
+ dev_err(afe->dev, "failed to enable audio system %d\n", ret);
+ return ret;
}
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
- afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_ASM_M_SEL],
- aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
- goto AFE_AUD_ASM_M_ERR;
- }
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ ASYS_TOP_CON_ASYS_TIMING_ON,
+ ASYS_TOP_CON_ASYS_TIMING_ON);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON,
+ AFE_DAC_CON0_AFE_ON);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_AFE, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_APLL_CK, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A1SYS, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A2SYS, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+ /* Configure ASRC */
+ regmap_write(afe->regmap, PWR1_ASM_CON1, PWR1_ASM_CON1_INIT_VAL);
+ regmap_write(afe->regmap, PWR2_ASM_CON1, PWR2_ASM_CON1_INIT_VAL);
return 0;
-
-AFE_AUD_ASM_M_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-AFE_AUD_ASM_H_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-AFE_AUD_AUDINTBUS_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-AFE_AUD_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
- return ret;
}
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ ASYS_TOP_CON_ASYS_TIMING_ON, 0);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON, 0);
+
+ mt2701_afe_disable_audsys(afe);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_APLL_CK,
- AUDIO_TOP_CON0_PDN_APLL_CK);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A1SYS,
- AUDIO_TOP_CON4_PDN_A1SYS);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A2SYS,
- AUDIO_TOP_CON4_PDN_A2SYS);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_AFE_CONN,
- AUDIO_TOP_CON4_PDN_AFE_CONN);
+ return 0;
}
void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
int mclk)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_afe_private *priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &priv->i2s_path[id];
int ret;
- int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
- int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
- /* Set MCLK Kx_SRC_SEL(domain) */
- ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
- if (ret)
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[aud_src_clk_id], ret);
-
- if (domain == 0) {
- ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
- afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
- if (ret)
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
- __func__, aud_clks[aud_src_clk_id],
- aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
- } else {
- ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
- afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
- if (ret)
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
- __func__, aud_clks[aud_src_clk_id],
- aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
- }
- clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+ /* Set mclk source */
+ if (domain == 0)
+ ret = clk_set_parent(i2s_path->sel_ck,
+ priv->base_ck[MT2701_TOP_AUD_MCLK_SRC0]);
+ else
+ ret = clk_set_parent(i2s_path->sel_ck,
+ priv->base_ck[MT2701_TOP_AUD_MCLK_SRC1]);
- /* Set MCLK Kx_SRC_DIV(divider) */
- ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
if (ret)
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[aud_src_div_id], ret);
+ dev_err(afe->dev, "failed to set domain%d mclk source %d\n",
+ domain, ret);
- ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+ /* Set mclk divider */
+ ret = clk_set_rate(i2s_path->div_ck, mclk);
if (ret)
- dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
- aud_clks[aud_src_div_id], mclk, ret);
- clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+ dev_err(afe->dev, "failed to set mclk divider %d\n", ret);
}
-
-MODULE_DESCRIPTION("MT2701 afe clock control");
-MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
index 6497d570cf09..15417d9d6597 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
@@ -21,16 +21,15 @@ struct mtk_base_afe;
int mt2701_init_clock(struct mtk_base_afe *afe);
int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe);
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir);
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir);
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id);
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id);
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
-
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe);
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe);
void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
int mclk);
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-common.h b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
index c19430e98adf..ae8ddeacfbfe 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-common.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
@@ -16,6 +16,7 @@
#ifndef _MT_2701_AFE_COMMON_H_
#define _MT_2701_AFE_COMMON_H_
+
#include <sound/soc.h>
#include <linux/clk.h>
#include <linux/regmap.h>
@@ -25,16 +26,7 @@
#define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
#define MT2701_PLL_DOMAIN_0_RATE 98304000
#define MT2701_PLL_DOMAIN_1_RATE 90316800
-#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
-#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
-
-enum {
- MT2701_I2S_1,
- MT2701_I2S_2,
- MT2701_I2S_3,
- MT2701_I2S_4,
- MT2701_I2S_NUM,
-};
+#define MT2701_I2S_NUM 4
enum {
MT2701_MEMIF_DL1,
@@ -62,60 +54,23 @@ enum {
};
enum {
- MT2701_IRQ_ASYS_START,
- MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ1,
MT2701_IRQ_ASYS_IRQ2,
MT2701_IRQ_ASYS_IRQ3,
MT2701_IRQ_ASYS_END,
};
-/* 2701 clock def */
-enum audio_system_clock_type {
- MT2701_AUD_INFRA_SYS_AUDIO,
- MT2701_AUD_AUD_MUX1_SEL,
- MT2701_AUD_AUD_MUX2_SEL,
- MT2701_AUD_AUD_MUX1_DIV,
- MT2701_AUD_AUD_MUX2_DIV,
- MT2701_AUD_AUD_48K_TIMING,
- MT2701_AUD_AUD_44K_TIMING,
- MT2701_AUD_AUDPLL_MUX_SEL,
- MT2701_AUD_APLL_SEL,
- MT2701_AUD_AUD1PLL_98M,
- MT2701_AUD_AUD2PLL_90M,
- MT2701_AUD_HADDS2PLL_98M,
- MT2701_AUD_HADDS2PLL_294M,
- MT2701_AUD_AUDPLL,
- MT2701_AUD_AUDPLL_D4,
- MT2701_AUD_AUDPLL_D8,
- MT2701_AUD_AUDPLL_D16,
- MT2701_AUD_AUDPLL_D24,
- MT2701_AUD_AUDINTBUS,
- MT2701_AUD_CLK_26M,
- MT2701_AUD_SYSPLL1_D4,
- MT2701_AUD_AUD_K1_SRC_SEL,
- MT2701_AUD_AUD_K2_SRC_SEL,
- MT2701_AUD_AUD_K3_SRC_SEL,
- MT2701_AUD_AUD_K4_SRC_SEL,
- MT2701_AUD_AUD_K5_SRC_SEL,
- MT2701_AUD_AUD_K6_SRC_SEL,
- MT2701_AUD_AUD_K1_SRC_DIV,
- MT2701_AUD_AUD_K2_SRC_DIV,
- MT2701_AUD_AUD_K3_SRC_DIV,
- MT2701_AUD_AUD_K4_SRC_DIV,
- MT2701_AUD_AUD_K5_SRC_DIV,
- MT2701_AUD_AUD_K6_SRC_DIV,
- MT2701_AUD_AUD_I2S1_MCLK,
- MT2701_AUD_AUD_I2S2_MCLK,
- MT2701_AUD_AUD_I2S3_MCLK,
- MT2701_AUD_AUD_I2S4_MCLK,
- MT2701_AUD_AUD_I2S5_MCLK,
- MT2701_AUD_AUD_I2S6_MCLK,
- MT2701_AUD_ASM_M_SEL,
- MT2701_AUD_ASM_H_SEL,
- MT2701_AUD_UNIVPLL2_D4,
- MT2701_AUD_UNIVPLL2_D2,
- MT2701_AUD_SYSPLL_D5,
- MT2701_CLOCK_NUM
+enum audio_base_clock {
+ MT2701_INFRA_SYS_AUDIO,
+ MT2701_TOP_AUD_MCLK_SRC0,
+ MT2701_TOP_AUD_MCLK_SRC1,
+ MT2701_TOP_AUD_A1SYS,
+ MT2701_TOP_AUD_A2SYS,
+ MT2701_AUDSYS_AFE,
+ MT2701_AUDSYS_AFE_CONN,
+ MT2701_AUDSYS_A1SYS,
+ MT2701_AUDSYS_A2SYS,
+ MT2701_BASE_CLK_NUM,
};
static const unsigned int mt2701_afe_backup_list[] = {
@@ -139,12 +94,8 @@ static const unsigned int mt2701_afe_backup_list[] = {
AFE_MEMIF_PBUF_SIZE,
};
-struct snd_pcm_substream;
-struct mtk_base_irq_data;
-
struct mt2701_i2s_data {
int i2s_ctrl_reg;
- int i2s_pwn_shift;
int i2s_asrc_fs_shift;
int i2s_asrc_fs_mask;
};
@@ -160,12 +111,18 @@ struct mt2701_i2s_path {
int mclk_rate;
int on[I2S_DIR_NUM];
int occupied[I2S_DIR_NUM];
- const struct mt2701_i2s_data *i2s_data[2];
+ const struct mt2701_i2s_data *i2s_data[I2S_DIR_NUM];
+ struct clk *hop_ck[I2S_DIR_NUM];
+ struct clk *sel_ck;
+ struct clk *div_ck;
+ struct clk *mclk_ck;
+ struct clk *asrco_ck;
};
struct mt2701_afe_private {
- struct clk *clocks[MT2701_CLOCK_NUM];
struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+ struct clk *base_ck[MT2701_BASE_CLK_NUM];
+ struct clk *mrgif_ck;
bool mrg_enable[MT2701_STREAM_DIR_NUM];
};
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index 8fda182f849b..5bc4e00a4a29 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -17,19 +17,16 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
-#include <sound/soc.h>
#include "mt2701-afe-common.h"
-
#include "mt2701-afe-clock-ctrl.h"
#include "../common/mtk-afe-platform-driver.h"
#include "../common/mtk-afe-fe-dai.h"
-#define AFE_IRQ_STATUS_BITS 0xff
-
static const struct snd_pcm_hardware mt2701_afe_hardware = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
| SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
@@ -97,40 +94,26 @@ static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
- int ret = 0;
if (i2s_num < 0)
return i2s_num;
- /* enable mclk */
- ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
- if (ret)
- dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
- i2s_num);
-
- return ret;
+ return mt2701_afe_enable_mclk(afe, i2s_num);
}
static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
+ int i2s_num,
int dir_invert)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- struct mt2701_i2s_path *i2s_path;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
const struct mt2701_i2s_data *i2s_data;
int stream_dir = substream->stream;
- if (i2s_num < 0)
- return i2s_num;
-
- i2s_path = &afe_priv->i2s_path[i2s_num];
-
if (dir_invert) {
if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -151,9 +134,9 @@ static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
/* disable i2s */
regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
ASYS_I2S_CON_I2S_EN, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- 1 << i2s_data->i2s_pwn_shift,
- 1 << i2s_data->i2s_pwn_shift);
+
+ mt2701_afe_disable_i2s(afe, i2s_num, stream_dir);
+
return 0;
}
@@ -165,7 +148,6 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
struct mt2701_afe_private *afe_priv = afe->platform_priv;
int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
struct mt2701_i2s_path *i2s_path;
- int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
if (i2s_num < 0)
return;
@@ -177,37 +159,32 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
else
goto I2S_UNSTART;
- mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+ mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 0);
/* need to disable i2s-out path when disable i2s-in */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+ mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 1);
I2S_UNSTART:
/* disable mclk */
- clk_disable_unprepare(afe_priv->clocks[clk_num]);
+ mt2701_afe_disable_mclk(afe, i2s_num);
}
static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
+ int i2s_num,
int dir_invert)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- struct mt2701_i2s_path *i2s_path;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
const struct mt2701_i2s_data *i2s_data;
struct snd_pcm_runtime * const runtime = substream->runtime;
int reg, fs, w_len = 1; /* now we support bck 64bits only */
int stream_dir = substream->stream;
unsigned int mask = 0, val = 0;
- if (i2s_num < 0)
- return i2s_num;
-
- i2s_path = &afe_priv->i2s_path[i2s_num];
-
if (dir_invert) {
if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -251,9 +228,7 @@ static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
fs << i2s_data->i2s_asrc_fs_shift);
/* enable i2s */
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- 1 << i2s_data->i2s_pwn_shift,
- 0 << i2s_data->i2s_pwn_shift);
+ mt2701_afe_enable_i2s(afe, i2s_num, stream_dir);
/* reset i2s hw status before enable */
regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
@@ -300,13 +275,13 @@ static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
} else {
/* need to enable i2s-out path when enable i2s-in */
/* prepare for another direction "out" */
- mt2701_i2s_path_prepare_enable(substream, dai, 1);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 1);
/* prepare for "in" */
- mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
}
return 0;
@@ -339,9 +314,11 @@ static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_MRGIF, 0);
+ ret = mt2701_enable_btmrg_clk(afe);
+ if (ret)
+ return ret;
afe_priv->mrg_enable[substream->stream] = 1;
return 0;
@@ -406,9 +383,7 @@ static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
AFE_MRGIF_CON_MRG_EN, 0);
regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
AFE_MRGIF_CON_MRG_I2S_EN, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_MRGIF,
- AUDIO_TOP_CON4_PDN_MRGIF);
+ mt2701_disable_btmrg_clk(afe);
}
afe_priv->mrg_enable[substream->stream] = 0;
}
@@ -574,7 +549,6 @@ static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
.hw_free = mtk_afe_fe_hw_free,
.prepare = mtk_afe_fe_prepare,
.trigger = mtk_afe_fe_trigger,
-
};
static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
@@ -915,31 +889,6 @@ static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
PWR2_TOP_CON, 19, 1, 0),
};
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
- 1),
-};
-
static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
/* inter-connections */
SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -999,19 +948,6 @@ static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
mt2701_afe_multi_ch_out_i2s3,
ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
-
- SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc0,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
- SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc1,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
- SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc2,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
- SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc3,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
};
static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
@@ -1021,7 +957,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I2S0 Playback", NULL, "O15"},
{"I2S0 Playback", NULL, "O16"},
-
{"I2S1 Playback", NULL, "O17"},
{"I2S1 Playback", NULL, "O18"},
{"I2S2 Playback", NULL, "O19"},
@@ -1038,7 +973,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I00", NULL, "I2S0 Capture"},
{"I01", NULL, "I2S0 Capture"},
-
{"I02", NULL, "I2S1 Capture"},
{"I03", NULL, "I2S1 Capture"},
/* I02,03 link to UL2, also need to open I2S0 */
@@ -1046,15 +980,10 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I26", NULL, "BT Capture"},
- {"ASRC_O0", "Asrc0 out Switch", "DLM"},
- {"ASRC_O1", "Asrc1 out Switch", "DLM"},
- {"ASRC_O2", "Asrc2 out Switch", "DLM"},
- {"ASRC_O3", "Asrc3 out Switch", "DLM"},
-
- {"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
- {"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
- {"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
- {"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+ {"I12I13", "Multich I2S0 Out Switch", "DLM"},
+ {"I14I15", "Multich I2S1 Out Switch", "DLM"},
+ {"I16I17", "Multich I2S2 Out Switch", "DLM"},
+ {"I18I19", "Multich I2S3 Out Switch", "DLM"},
{ "I12", NULL, "I12I13" },
{ "I13", NULL, "I12I13" },
@@ -1079,7 +1008,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{ "O21", "I18 Switch", "I18" },
{ "O22", "I19 Switch", "I19" },
{ "O31", "I35 Switch", "I35" },
-
};
static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
@@ -1386,14 +1314,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO1_CON,
- .i2s_pwn_shift = 6,
.i2s_asrc_fs_shift = 0,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN1_CON,
- .i2s_pwn_shift = 0,
.i2s_asrc_fs_shift = 0,
.i2s_asrc_fs_mask = 0x1f,
@@ -1402,14 +1328,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO2_CON,
- .i2s_pwn_shift = 7,
.i2s_asrc_fs_shift = 5,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN2_CON,
- .i2s_pwn_shift = 1,
.i2s_asrc_fs_shift = 5,
.i2s_asrc_fs_mask = 0x1f,
@@ -1418,14 +1342,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO3_CON,
- .i2s_pwn_shift = 8,
.i2s_asrc_fs_shift = 10,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN3_CON,
- .i2s_pwn_shift = 2,
.i2s_asrc_fs_shift = 10,
.i2s_asrc_fs_mask = 0x1f,
@@ -1434,14 +1356,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO4_CON,
- .i2s_pwn_shift = 9,
.i2s_asrc_fs_shift = 15,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN4_CON,
- .i2s_pwn_shift = 3,
.i2s_asrc_fs_shift = 15,
.i2s_asrc_fs_mask = 0x1f,
@@ -1449,14 +1369,6 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
},
};
-static const struct regmap_config mt2701_afe_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = AFE_END_ADDR,
- .cache_type = REGCACHE_NONE,
-};
-
static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
{
int id;
@@ -1483,8 +1395,7 @@ static int mt2701_afe_runtime_suspend(struct device *dev)
{
struct mtk_base_afe *afe = dev_get_drvdata(dev);
- mt2701_afe_disable_clock(afe);
- return 0;
+ return mt2701_afe_disable_clock(afe);
}
static int mt2701_afe_runtime_resume(struct device *dev)
@@ -1496,21 +1407,22 @@ static int mt2701_afe_runtime_resume(struct device *dev)
static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
{
+ struct snd_soc_component *component;
struct mtk_base_afe *afe;
struct mt2701_afe_private *afe_priv;
- struct resource *res;
struct device *dev;
int i, irq_id, ret;
afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
if (!afe)
return -ENOMEM;
+
afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
GFP_KERNEL);
if (!afe->platform_priv)
return -ENOMEM;
- afe_priv = afe->platform_priv;
+ afe_priv = afe->platform_priv;
afe->dev = &pdev->dev;
dev = afe->dev;
@@ -1527,17 +1439,11 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
-
- if (IS_ERR(afe->base_addr))
- return PTR_ERR(afe->base_addr);
-
- afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
- &mt2701_afe_regmap_config);
- if (IS_ERR(afe->regmap))
+ afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(dev, "could not get regmap from parent\n");
return PTR_ERR(afe->regmap);
+ }
mutex_init(&afe->irq_alloc_lock);
@@ -1545,7 +1451,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
afe->memif_size = MT2701_MEMIF_NUM;
afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
GFP_KERNEL);
-
if (!afe->memif)
return -ENOMEM;
@@ -1558,7 +1463,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
afe->irqs_size = MT2701_IRQ_ASYS_END;
afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
GFP_KERNEL);
-
if (!afe->irqs)
return -ENOMEM;
@@ -1573,10 +1477,15 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
= &mt2701_i2s_data[i][I2S_IN];
}
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->regmap = afe->regmap;
+
afe->mtk_afe_hardware = &mt2701_afe_hardware;
afe->memif_fs = mt2701_memif_fs;
afe->irq_fs = mt2701_irq_fs;
-
afe->reg_back_up_list = mt2701_afe_backup_list;
afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
afe->runtime_resume = mt2701_afe_runtime_resume;
@@ -1586,59 +1495,58 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
ret = mt2701_init_clock(afe);
if (ret) {
dev_err(dev, "init clock error\n");
- return ret;
+ goto err_init_clock;
}
platform_set_drvdata(pdev, afe);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
- goto err_pm_disable;
- pm_runtime_get_sync(&pdev->dev);
- ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = mt2701_afe_runtime_resume(dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+ pm_runtime_get_sync(dev);
+
+ ret = snd_soc_register_platform(dev, &mtk_afe_pcm_platform);
if (ret) {
dev_warn(dev, "err_platform\n");
goto err_platform;
}
- ret = snd_soc_register_component(&pdev->dev,
- &mt2701_afe_pcm_dai_component,
- mt2701_afe_pcm_dais,
- ARRAY_SIZE(mt2701_afe_pcm_dais));
+ ret = snd_soc_add_component(dev, component,
+ &mt2701_afe_pcm_dai_component,
+ mt2701_afe_pcm_dais,
+ ARRAY_SIZE(mt2701_afe_pcm_dais));
if (ret) {
dev_warn(dev, "err_dai_component\n");
goto err_dai_component;
}
- mt2701_afe_runtime_resume(&pdev->dev);
-
return 0;
err_dai_component:
- snd_soc_unregister_component(&pdev->dev);
-
+ snd_soc_unregister_platform(dev);
err_platform:
- snd_soc_unregister_platform(&pdev->dev);
-
+ pm_runtime_put_sync(dev);
err_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+err_init_clock:
+ kfree(component);
return ret;
}
static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
{
- struct mtk_base_afe *afe = platform_get_drvdata(pdev);
-
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt2701_afe_runtime_suspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
- /* disable afe clock */
- mt2701_afe_disable_clock(afe);
+
return 0;
}
@@ -1670,4 +1578,3 @@ module_platform_driver(mt2701_afe_pcm_driver);
MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
MODULE_LICENSE("GPL v2");
-
diff --git a/sound/soc/mediatek/mt2701/mt2701-reg.h b/sound/soc/mediatek/mt2701/mt2701-reg.h
index bb62b1c55957..18e676974f22 100644
--- a/sound/soc/mediatek/mt2701/mt2701-reg.h
+++ b/sound/soc/mediatek/mt2701/mt2701-reg.h
@@ -17,17 +17,6 @@
#ifndef _MT2701_REG_H_
#define _MT2701_REG_H_
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/pm_runtime.h>
-#include <sound/soc.h>
-#include "mt2701-afe-common.h"
-
-/*****************************************************************************
- * R E G I S T E R D E F I N I T I O N
- *****************************************************************************/
#define AUDIO_TOP_CON0 0x0000
#define AUDIO_TOP_CON4 0x0010
#define AUDIO_TOP_CON5 0x0014
@@ -109,18 +98,6 @@
#define AFE_DAI_BASE 0x1370
#define AFE_DAI_CUR 0x137c
-/* AUDIO_TOP_CON0 (0x0000) */
-#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON (0x3 << 0)
-#define AUDIO_TOP_CON0_PDN_AFE (0x1 << 2)
-#define AUDIO_TOP_CON0_PDN_APLL_CK (0x1 << 23)
-
-/* AUDIO_TOP_CON4 (0x0010) */
-#define AUDIO_TOP_CON4_I2SO1_PWN (0x1 << 6)
-#define AUDIO_TOP_CON4_PDN_A1SYS (0x1 << 21)
-#define AUDIO_TOP_CON4_PDN_A2SYS (0x1 << 22)
-#define AUDIO_TOP_CON4_PDN_AFE_CONN (0x1 << 23)
-#define AUDIO_TOP_CON4_PDN_MRGIF (0x1 << 25)
-
/* AFE_DAIBT_CON0 (0x001c) */
#define AFE_DAIBT_CON0_DAIBT_EN (0x1 << 0)
#define AFE_DAIBT_CON0_BT_FUNC_EN (0x1 << 1)
@@ -137,22 +114,8 @@
#define AFE_MRGIF_CON_I2S_MODE_MASK (0xf << 20)
#define AFE_MRGIF_CON_I2S_MODE_32K (0x4 << 20)
-/* ASYS_I2SO1_CON (0x061c) */
-#define ASYS_I2SO1_CON_FS (0x1f << 8)
-#define ASYS_I2SO1_CON_FS_SET(x) ((x) << 8)
-#define ASYS_I2SO1_CON_MULTI_CH (0x1 << 16)
-#define ASYS_I2SO1_CON_SIDEGEN (0x1 << 30)
-#define ASYS_I2SO1_CON_I2S_EN (0x1 << 0)
-/* 0:EIAJ 1:I2S */
-#define ASYS_I2SO1_CON_I2S_MODE (0x1 << 3)
-#define ASYS_I2SO1_CON_WIDE_MODE (0x1 << 1)
-#define ASYS_I2SO1_CON_WIDE_MODE_SET(x) ((x) << 1)
-
-/* PWR2_TOP_CON (0x0634) */
-#define PWR2_TOP_CON_INIT_VAL (0xffe1ffff)
-
-/* ASYS_IRQ_CLR (0x07c0) */
-#define ASYS_IRQ_CLR_ALL (0xffffffff)
+/* ASYS_TOP_CON (0x0600) */
+#define ASYS_TOP_CON_ASYS_TIMING_ON (0x3 << 0)
/* PWR2_ASM_CON1 (0x1070) */
#define PWR2_ASM_CON1_INIT_VAL (0x492492)
@@ -182,5 +145,4 @@
#define ASYS_I2S_CON_WIDE_MODE_SET(x) ((x) << 1)
#define ASYS_I2S_IN_PHASE_FIX (0x1 << 31)
-#define AFE_END_ADDR 0x15e0
#endif
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 8a643a35d3d4..c7f7f8add5d9 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1083,7 +1083,7 @@ static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
{
int ret, i;
- unsigned int irq_id;
+ int irq_id;
struct mtk_base_afe *afe;
struct mt8173_afe_private *afe_priv;
struct resource *res;
@@ -1105,9 +1105,9 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
afe->dev = &pdev->dev;
irq_id = platform_get_irq(pdev, 0);
- if (!irq_id) {
+ if (irq_id <= 0) {
dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
- return -ENXIO;
+ return irq_id < 0 ? irq_id : -ENXIO;
}
ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
0, "Afe_ISR_Handle", (void *)afe);
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 99c15219dbc8..5a9a5482976e 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -37,8 +37,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5514_routes[] = {
{"Sub DMIC1R", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
};
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index 42de84ca8c84..b7248085ca04 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -40,8 +40,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5676_routes[] = {
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Headphone", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
{"Sub AIF2RX", NULL, "Headset Mic"}, /* IF2 DAC from 5650 */
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index e69c141d8ed4..40ebefd625c1 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -51,8 +51,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
{"DMIC R1", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
};
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 2ed3240cc682..2b3f2408301a 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -93,6 +93,14 @@ static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
},
};
+static const struct snd_soc_dapm_widget mxs_sgtl5000_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Line Out Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+};
+
static struct snd_soc_card mxs_sgtl5000 = {
.name = "mxs_sgtl5000",
.owner = THIS_MODULE,
@@ -141,10 +149,23 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
+ if (of_find_property(np, "audio-routing", NULL)) {
+ card->dapm_widgets = mxs_sgtl5000_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(mxs_sgtl5000_dapm_widgets);
+
+ ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio-routing (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
return ret;
}
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index b6615affe571..81b09d740ed9 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
/* polling the AC_R_FINISH */
while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
/* polling the AC_W_FINISH */
while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout)
@@ -345,11 +345,10 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
goto out;
}
- nuc900_audio->irq_num = platform_get_irq(pdev, 0);
- if (!nuc900_audio->irq_num) {
- ret = -EBUSY;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto out;
- }
+ nuc900_audio->irq_num = ret;
nuc900_ac97_data = nuc900_audio;
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index d40219678700..cb72c1e57da0 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -105,7 +105,7 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
int pin, changed = 0;
/* Refuse any mode changes if we are not able to control the codec. */
- if (!cx20442_codec->hw_write)
+ if (!cx20442_codec->component.card->pop_time)
return -EUNATCH;
if (ucontrol->value.enumerated.item[0] >= control->items)
@@ -345,7 +345,7 @@ static void cx81801_receive(struct tty_struct *tty,
if (!codec)
return;
- if (!codec->hw_write) {
+ if (!codec->component.card->pop_time) {
/* First modem response, complete setup procedure */
/* Initialize timer used for config pulse generation */
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 7a54e3083203..79d4dc785e5c 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -854,7 +854,7 @@ unlock:
return size;
}
-static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store);
+static DEVICE_ATTR_RW(dma_op_mode);
static const struct attribute *additional_attrs[] = {
&dev_attr_max_tx_thres.attr,
@@ -923,7 +923,7 @@ out:
return size;
}
-static DEVICE_ATTR(st_taps, 0644, st_taps_show, st_taps_store);
+static DEVICE_ATTR_RW(st_taps);
static const struct attribute *sidetone_attrs[] = {
&dev_attr_st_taps.attr,
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index d49adc822a11..704428735e3c 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -43,7 +43,7 @@ struct apq8016_sbc_data {
static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_soc_card *card = rtd->card;
struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card);
@@ -92,7 +92,7 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
jack = pdata->jack.jack;
- snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
@@ -102,15 +102,15 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
for (i = 0 ; i < dai_link->num_codecs; i++) {
struct snd_soc_dai *dai = rtd->codec_dais[i];
- codec = dai->codec;
+ component = dai->component;
/* Set default mclk for internal codec */
- rval = snd_soc_codec_set_sysclk(codec, 0, 0, DEFAULT_MCLK_RATE,
+ rval = snd_soc_component_set_sysclk(component, 0, 0, DEFAULT_MCLK_RATE,
SND_SOC_CLOCK_IN);
if (rval != 0 && rval != -ENOTSUPP) {
dev_warn(card->dev, "Failed to set mclk: %d\n", rval);
return rval;
}
- rval = snd_soc_codec_set_jack(codec, &pdata->jack, NULL);
+ rval = snd_soc_component_set_jack(component, &pdata->jack, NULL);
if (rval != 0 && rval != -ENOTSUPP) {
dev_warn(card->dev, "Failed to set jack: %d\n", rval);
return rval;
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index d64fbbd50544..fa6cd1de828b 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -206,7 +206,8 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- snd_jack_set_key(rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(
+ rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(
rockchip_sound_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
snd_jack_set_key(
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 908211e1d6fc..950823d69e9c 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -328,6 +328,7 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
val |= I2S_CHN_4;
break;
case 2:
+ case 1:
val |= I2S_CHN_2;
break;
default:
@@ -460,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
},
.capture = {
.stream_name = "Capture",
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -504,6 +505,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
case I2S_INTCR:
case I2S_XFER:
case I2S_CLR:
+ case I2S_TXDR:
case I2S_RXDR:
case I2S_FIFOLR:
case I2S_INTSR:
@@ -518,6 +520,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case I2S_INTSR:
case I2S_CLR:
+ case I2S_FIFOLR:
+ case I2S_TXDR:
+ case I2S_RXDR:
return true;
default:
return false;
@@ -527,6 +532,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case I2S_RXDR:
+ return true;
default:
return false;
}
@@ -654,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
- if (val >= 2 && val <= 8)
+ if (val >= 1 && val <= 8)
soc_dai->capture.channels_max = val;
}
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 34deba461ae1..0e66cd8ef2f9 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -60,13 +60,13 @@ static int bells_set_bias_level(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *codec_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct bells_drvdata *bells = card->drvdata;
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
codec_dai = rtd->codec_dai;
- codec = codec_dai->codec;
+ component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
return 0;
@@ -76,7 +76,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
break;
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL1,
ARIZONA_FLL_SRC_MCLK1,
MCLK_RATE,
bells->sysclk_rate);
@@ -84,7 +84,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
pr_err("Failed to start FLL: %d\n", ret);
if (bells->asyncclk_rate) {
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL2,
ARIZONA_FLL_SRC_AIF2BCLK,
BCLK2_RATE,
bells->asyncclk_rate);
@@ -106,27 +106,27 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *codec_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct bells_drvdata *bells = card->drvdata;
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
codec_dai = rtd->codec_dai;
- codec = codec_dai->codec;
+ component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
return 0;
switch (level) {
case SND_SOC_BIAS_STANDBY:
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL1, 0, 0, 0);
+ ret = snd_soc_component_set_pll(component, WM5102_FLL1, 0, 0, 0);
if (ret < 0) {
pr_err("Failed to stop FLL: %d\n", ret);
return ret;
}
if (bells->asyncclk_rate) {
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL2,
0, 0, 0);
if (ret < 0) {
pr_err("Failed to stop FLL: %d\n", ret);
@@ -148,8 +148,8 @@ static int bells_late_probe(struct snd_soc_card *card)
{
struct bells_drvdata *bells = card->drvdata;
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_codec *wm0010;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *wm0010;
+ struct snd_soc_component *component;
struct snd_soc_dai *aif1_dai;
struct snd_soc_dai *aif2_dai;
struct snd_soc_dai *aif3_dai;
@@ -157,22 +157,22 @@ static int bells_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_AP_DSP].name);
- wm0010 = rtd->codec;
+ wm0010 = rtd->codec_dai->component;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
- codec = rtd->codec;
+ component = rtd->codec_dai->component;
aif1_dai = rtd->codec_dai;
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK,
ARIZONA_CLK_SRC_FLL1,
bells->sysclk_rate,
SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set SYSCLK: %d\n", ret);
return ret;
}
- ret = snd_soc_codec_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
+ ret = snd_soc_component_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
if (ret != 0) {
dev_err(wm0010->dev, "Failed to set WM0010 clock: %d\n", ret);
return ret;
@@ -182,20 +182,20 @@ static int bells_late_probe(struct snd_soc_card *card)
if (ret != 0)
dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_OPCLK, 0,
SYS_MCLK_RATE, SND_SOC_CLOCK_OUT);
if (ret != 0)
- dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set OPCLK: %d\n", ret);
if (card->num_rtd == DAI_CODEC_CP)
return 0;
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_ASYNCCLK,
ARIZONA_CLK_SRC_FLL2,
bells->asyncclk_rate,
SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set ASYNCCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set ASYNCCLK: %d\n", ret);
return ret;
}
@@ -221,7 +221,7 @@ static int bells_late_probe(struct snd_soc_card *card)
return ret;
}
- ret = snd_soc_codec_set_sysclk(wm9081_dai->codec, WM9081_SYSCLK_MCLK,
+ ret = snd_soc_component_set_sysclk(wm9081_dai->component, WM9081_SYSCLK_MCLK,
0, SYS_MCLK_RATE, 0);
if (ret != 0) {
dev_err(wm9081_dai->dev, "Failed to set MCLK: %d\n", ret);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f12a88a21dfa..64d5ecb86528 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -197,16 +197,27 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
return 0;
}
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- return runtime->channels;
+ /*
+ * params will be added when refine
+ * see
+ * __rsnd_soc_hw_rule_rate()
+ * __rsnd_soc_hw_rule_channels()
+ */
+ if (params)
+ return params_channels(params);
+ else
+ return runtime->channels;
}
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
- int chan = rsnd_runtime_channel_original(io);
+ int chan = rsnd_runtime_channel_original_with_params(io, params);
struct rsnd_mod *ctu_mod = rsnd_io_to_mod_ctu(io);
if (ctu_mod) {
@@ -219,12 +230,13 @@ int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
return chan;
}
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
int chan = rsnd_io_is_play(io) ?
- rsnd_runtime_channel_after_ctu(io) :
- rsnd_runtime_channel_original(io);
+ rsnd_runtime_channel_after_ctu_with_params(io, params) :
+ rsnd_runtime_channel_original_with_params(io, params);
/* Use Multi SSI */
if (rsnd_runtime_is_ssi_multi(io))
@@ -262,10 +274,10 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct device *dev = rsnd_priv_to_dev(priv);
- switch (runtime->sample_bits) {
+ switch (snd_pcm_format_width(runtime->format)) {
case 16:
return 8 << 16;
- case 32:
+ case 24:
return 0 << 16;
}
@@ -282,11 +294,12 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *target;
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- u32 val = 0x76543210;
- u32 mask = ~0;
/*
- * *Hardware* L/R and *Software* L/R are inverted.
+ * *Hardware* L/R and *Software* L/R are inverted for 16bit data.
+ * 31..16 15...0
+ * HW: [L ch] [R ch]
+ * SW: [R ch] [L ch]
* We need to care about inversion timing to control
* Playback/Capture correctly.
* The point is [DVC] needs *Hardware* L/R, [MEM] needs *Software* L/R
@@ -313,27 +326,13 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
- mask <<= runtime->channels * 4;
- val = val & mask;
-
- switch (runtime->sample_bits) {
- case 16:
- val |= 0x67452301 & ~mask;
- break;
- case 32:
- val |= 0x76543210 & ~mask;
- break;
- }
-
- /*
- * exchange channeles on SRC if possible,
- * otherwise, R/L volume settings on DVC
- * changes inverted channels
- */
- if (mod == target)
- return val;
- else
+ /* Non target mod or 24bit data needs normal DALIGN */
+ if ((snd_pcm_format_width(runtime->format) != 16) ||
+ (mod != target))
return 0x76543210;
+ /* Target mod needs inverted DALIGN when 16bit */
+ else
+ return 0x67452301;
}
u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -363,12 +362,8 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
* HW 24bit data is located as 0x******00
*
*/
- switch (runtime->sample_bits) {
- case 16:
+ if (snd_pcm_format_width(runtime->format) == 16)
return 0;
- case 32:
- break;
- }
for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
tmod = rsnd_io_to_mod(io, mods[i]);
@@ -616,8 +611,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
- rsnd_dai_stream_init(io, substream);
-
ret = rsnd_dai_call(init, io, priv);
if (ret < 0)
goto dai_trigger_end;
@@ -639,7 +632,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
ret |= rsnd_dai_call(quit, io, priv);
- rsnd_dai_stream_quit(io);
break;
default:
ret = -EINVAL;
@@ -784,8 +776,9 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
return snd_interval_refine(iv, &p);
}
-static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule,
+ int is_play)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -793,25 +786,37 @@ static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai = rule->private;
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
/*
* possible sampling rate limitation is same as
* 2ch if it supports multi ssi
+ * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
*/
ic = *ic_;
- if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
- ic.min = 2;
- ic.max = 2;
- }
+ ic.min =
+ ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
ARRAY_SIZE(rsnd_soc_hw_rate_list),
&ic, ir);
}
+static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_rate(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_rate(params, rule, 0);
+}
-static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule,
+ int is_play)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -819,22 +824,34 @@ static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai = rule->private;
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
/*
* possible sampling rate limitation is same as
* 2ch if it supports multi ssi
+ * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
*/
ic = *ic_;
- if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
- ic.min = 2;
- ic.max = 2;
- }
+ ic.min =
+ ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
ARRAY_SIZE(rsnd_soc_hw_channels_list),
ir, &ic);
}
+static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_channels(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_channels(params, rule, 0);
+}
+
static const struct snd_pcm_hardware rsnd_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
@@ -859,6 +876,8 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
int ret;
int i;
+ rsnd_dai_stream_init(io, substream);
+
/*
* Channel Limitation
* It depends on Platform design
@@ -886,11 +905,17 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
* It depends on Clock Master Mode
*/
if (rsnd_rdai_is_clk_master(rdai)) {
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- rsnd_soc_hw_rule_rate, dai,
+ is_play ? rsnd_soc_hw_rule_rate_playback :
+ rsnd_soc_hw_rule_rate_capture,
+ dai,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- rsnd_soc_hw_rule_channels, dai,
+ is_play ? rsnd_soc_hw_rule_channels_playback :
+ rsnd_soc_hw_rule_channels_capture,
+ dai,
SNDRV_PCM_HW_PARAM_RATE, -1);
}
@@ -915,6 +940,8 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
* call rsnd_dai_call without spinlock
*/
rsnd_dai_call(nolock_stop, io, priv);
+
+ rsnd_dai_stream_quit(io);
}
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
@@ -990,7 +1017,7 @@ of_node_compatible:
static void __rsnd_dai_probe(struct rsnd_priv *priv,
struct device_node *dai_np,
- int dai_i, int is_graph)
+ int dai_i)
{
struct device_node *playback, *capture;
struct rsnd_dai_stream *io_playback;
@@ -1089,13 +1116,13 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
dai_i = 0;
if (is_graph) {
for_each_endpoint_of_node(dai_node, dai_np) {
- __rsnd_dai_probe(priv, dai_np, dai_i, is_graph);
+ __rsnd_dai_probe(priv, dai_np, dai_i);
rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
dai_i++;
}
} else {
for_each_child_of_node(dai_node, dai_np)
- __rsnd_dai_probe(priv, dai_np, dai_i++, is_graph);
+ __rsnd_dai_probe(priv, dai_np, dai_i++);
}
return 0;
@@ -1496,6 +1523,8 @@ static int rsnd_remove(struct platform_device *pdev)
};
int ret = 0, i;
+ snd_soc_disconnect_sync(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
for_each_rsnd_dai(rdai, priv, i) {
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 4d750bdf8e24..41de23417c4a 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -71,25 +71,7 @@ static struct rsnd_mod mem = {
static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- bool elapsed = false;
- unsigned long flags;
-
- /*
- * Renesas sound Gen1 needs 1 DMAC,
- * Gen2 needs 2 DMAC.
- * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
- * But, Audio-DMAC-peri-peri doesn't have interrupt,
- * and this driver is assuming that here.
- */
- spin_lock_irqsave(&priv->lock, flags);
-
if (rsnd_io_is_working(io))
- elapsed = true;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (elapsed)
rsnd_dai_period_elapsed(io);
}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 57cd2bc773c2..ad6523595b0a 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -399,9 +399,18 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io);
+#define rsnd_runtime_channel_original(io) \
+ rsnd_runtime_channel_original_with_params(io, NULL)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_after_ctu(io) \
+ rsnd_runtime_channel_after_ctu_with_params(io, NULL)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_for_ssi(io) \
+ rsnd_runtime_channel_for_ssi_with_params(io, NULL)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index cbf3bf312d23..97a9db892a8f 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -79,8 +79,8 @@ struct rsnd_ssi {
int irq;
unsigned int usrcnt;
+ /* for PIO */
int byte_pos;
- int period_pos;
int byte_per_period;
int next_period_byte;
};
@@ -371,11 +371,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
if (rsnd_io_is_play(io))
cr_own |= TRMD;
- switch (runtime->sample_bits) {
+ switch (snd_pcm_format_width(runtime->format)) {
case 16:
cr_own |= DWL_16;
break;
- case 32:
+ case 24:
cr_own |= DWL_24;
break;
}
@@ -414,63 +414,6 @@ static void rsnd_ssi_register_setup(struct rsnd_mod *mod)
ssi->cr_en);
}
-static void rsnd_ssi_pointer_init(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
- ssi->byte_pos = 0;
- ssi->period_pos = 0;
- ssi->byte_per_period = runtime->period_size *
- runtime->channels *
- samples_to_bytes(runtime, 1);
- ssi->next_period_byte = ssi->byte_per_period;
-}
-
-static int rsnd_ssi_pointer_offset(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- int additional)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- int pos = ssi->byte_pos + additional;
-
- pos %= (runtime->periods * ssi->byte_per_period);
-
- return pos;
-}
-
-static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- int byte)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- bool ret = false;
- int byte_pos;
-
- byte_pos = ssi->byte_pos + byte;
-
- if (byte_pos >= ssi->next_period_byte) {
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
- ssi->period_pos++;
- ssi->next_period_byte += ssi->byte_per_period;
-
- if (ssi->period_pos >= runtime->periods) {
- byte_pos = 0;
- ssi->period_pos = 0;
- ssi->next_period_byte = ssi->byte_per_period;
- }
-
- ret = true;
- }
-
- WRITE_ONCE(ssi->byte_pos, byte_pos);
-
- return ret;
-}
-
/*
* SSI mod common functions
*/
@@ -484,8 +427,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
- rsnd_ssi_pointer_init(mod, io);
-
ssi->usrcnt++;
rsnd_mod_power_on(mod);
@@ -656,6 +597,8 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
return 0;
}
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io);
static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
@@ -674,30 +617,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
status = rsnd_ssi_status_get(mod);
/* PIO only */
- if (!is_dma && (status & DIRQ)) {
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- u32 *buf = (u32 *)(runtime->dma_area +
- rsnd_ssi_pointer_offset(mod, io, 0));
- int shift = 0;
-
- switch (runtime->sample_bits) {
- case 32:
- shift = 8;
- break;
- }
-
- /*
- * 8/16/32 data can be assesse to TDR/RDR register
- * directly as 32bit data
- * see rsnd_ssi_init()
- */
- if (rsnd_io_is_play(io))
- rsnd_mod_write(mod, SSITDR, (*buf) << shift);
- else
- *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
-
- elapsed = rsnd_ssi_pointer_update(mod, io, sizeof(*buf));
- }
+ if (!is_dma && (status & DIRQ))
+ elapsed = rsnd_ssi_pio_interrupt(mod, io);
/* DMA only */
if (is_dma && (status & (UIRQ | OIRQ)))
@@ -835,7 +756,71 @@ static int rsnd_ssi_common_remove(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_ssi_pointer(struct rsnd_mod *mod,
+/*
+ * SSI PIO functions
+ */
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ u32 *buf = (u32 *)(runtime->dma_area + ssi->byte_pos);
+ int shift = 0;
+ int byte_pos;
+ bool elapsed = false;
+
+ if (snd_pcm_format_width(runtime->format) == 24)
+ shift = 8;
+
+ /*
+ * 8/16/32 data can be assesse to TDR/RDR register
+ * directly as 32bit data
+ * see rsnd_ssi_init()
+ */
+ if (rsnd_io_is_play(io))
+ rsnd_mod_write(mod, SSITDR, (*buf) << shift);
+ else
+ *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
+
+ byte_pos = ssi->byte_pos + sizeof(*buf);
+
+ if (byte_pos >= ssi->next_period_byte) {
+ int period_pos = byte_pos / ssi->byte_per_period;
+
+ if (period_pos >= runtime->periods) {
+ byte_pos = 0;
+ period_pos = 0;
+ }
+
+ ssi->next_period_byte = (period_pos + 1) * ssi->byte_per_period;
+
+ elapsed = true;
+ }
+
+ WRITE_ONCE(ssi->byte_pos, byte_pos);
+
+ return elapsed;
+}
+
+static int rsnd_ssi_pio_init(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ if (!rsnd_ssi_is_parent(mod, io)) {
+ ssi->byte_pos = 0;
+ ssi->byte_per_period = runtime->period_size *
+ runtime->channels *
+ samples_to_bytes(runtime, 1);
+ ssi->next_period_byte = ssi->byte_per_period;
+ }
+
+ return rsnd_ssi_init(mod, io, priv);
+}
+
+static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
snd_pcm_uframes_t *pointer)
{
@@ -851,12 +836,12 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
.remove = rsnd_ssi_common_remove,
- .init = rsnd_ssi_init,
+ .init = rsnd_ssi_pio_init,
.quit = rsnd_ssi_quit,
.start = rsnd_ssi_start,
.stop = rsnd_ssi_stop,
.irq = rsnd_ssi_irq,
- .pointer= rsnd_ssi_pointer,
+ .pointer = rsnd_ssi_pio_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
};
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index f21df28bc28e..3d7e1ff79139 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -16,79 +16,16 @@
#include <sound/soc-acpi.h>
-static acpi_status snd_soc_acpi_find_name(acpi_handle handle, u32 level,
- void *context, void **ret)
-{
- struct acpi_device *adev;
- const char *name = NULL;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
-
- if (adev->status.present && adev->status.functional) {
- name = acpi_dev_name(adev);
- *(const char **)ret = name;
- return AE_CTRL_TERMINATE;
- }
-
- return AE_OK;
-}
-
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- const char *name = NULL;
- acpi_status status;
-
- status = acpi_get_devices(hid, snd_soc_acpi_find_name, NULL,
- (void **)&name);
-
- if (ACPI_FAILURE(status) || name[0] == '\0')
- return NULL;
-
- return name;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_find_name_from_hid);
-
-static acpi_status snd_soc_acpi_mach_match(acpi_handle handle, u32 level,
- void *context, void **ret)
-{
- unsigned long long sta;
- acpi_status status;
-
- *(bool *)context = true;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
- *(bool *)context = false;
-
- return AE_OK;
-}
-
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
-{
- acpi_status status;
- bool found = false;
-
- status = acpi_get_devices(hid, snd_soc_acpi_mach_match, &found, NULL);
-
- if (ACPI_FAILURE(status))
- return false;
-
- return found;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_check_hid);
-
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
{
struct snd_soc_acpi_mach *mach;
for (mach = machines; mach->id[0]; mach++) {
- if (snd_soc_acpi_check_hid(mach->id) == true) {
- if (mach->machine_quirk == NULL)
- return mach;
-
- if (mach->machine_quirk(mach) != NULL)
- return mach;
+ if (acpi_dev_present(mach->id, NULL, -1)) {
+ if (mach->machine_quirk)
+ mach = mach->machine_quirk(mach);
+ return mach;
}
}
return NULL;
@@ -163,7 +100,7 @@ struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
return mach;
for (i = 0; i < codec_list->num_codecs; i++) {
- if (snd_soc_acpi_check_hid(codec_list->codecs[i]) != true)
+ if (!acpi_dev_present(codec_list->codecs[i], NULL, -1))
return NULL;
}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index d9b1e6417fb9..81232f4ab614 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -1096,7 +1096,6 @@ static struct snd_compr_ops soc_compr_dyn_ops = {
*/
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
- struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
@@ -1199,8 +1198,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
ret = snd_compress_new(rtd->card->snd_card, num, direction,
new_name, compr);
if (ret < 0) {
+ component = rtd->codec_dai->component;
pr_err("compress asoc: can't create compress for codec %s\n",
- codec->component.name);
+ component->name);
goto compr_err;
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c0edac80df34..88efc84f3e7b 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -173,7 +173,7 @@ static ssize_t codec_reg_show(struct device *dev,
return soc_codec_reg_show(rtd->codec, buf, PAGE_SIZE, 0);
}
-static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL);
+static DEVICE_ATTR_RO(codec_reg);
static ssize_t pmdown_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -213,7 +213,7 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_pmdown_time.attr)
return attr->mode; /* always visible */
- return rtd->codec ? attr->mode : 0; /* enabled only with codec */
+ return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
}
static const struct attribute_group soc_dapm_dev_group = {
@@ -349,120 +349,84 @@ static void soc_init_codec_debugfs(struct snd_soc_component *component)
"ASoC: Failed to create codec register debugfs file\n");
}
-static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int codec_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_codec *codec;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(codec, &codec_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- codec->component.name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
+ list_for_each_entry(codec, &codec_list, list)
+ seq_printf(m, "%s\n", codec->component.name);
mutex_unlock(&client_mutex);
- if (ret >= 0)
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int codec_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, codec_list_seq_show, NULL);
}
static const struct file_operations codec_list_fops = {
- .read = codec_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = codec_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int dai_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_component *component;
struct snd_soc_dai *dai;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
- list_for_each_entry(dai, &component->dai_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- dai->name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
- }
+ list_for_each_entry(component, &component_list, list)
+ list_for_each_entry(dai, &component->dai_list, list)
+ seq_printf(m, "%s\n", dai->name);
mutex_unlock(&client_mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int dai_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dai_list_seq_show, NULL);
}
static const struct file_operations dai_list_fops = {
- .read = dai_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = dai_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static ssize_t platform_list_read_file(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static int platform_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_platform *platform;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(platform, &platform_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- platform->component.name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
+ list_for_each_entry(platform, &platform_list, list)
+ seq_printf(m, "%s\n", platform->component.name);
mutex_unlock(&client_mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int platform_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, platform_list_seq_show, NULL);
}
static const struct file_operations platform_list_fops = {
- .read = platform_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = platform_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void soc_init_card_debugfs(struct snd_soc_card *card)
@@ -491,7 +455,6 @@ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
debugfs_remove_recursive(card->debugfs_card_root);
}
-
static void snd_soc_debugfs_init(void)
{
snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
@@ -598,6 +561,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
return NULL;
}
+EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream)
@@ -1392,6 +1356,17 @@ static int soc_init_dai_link(struct snd_soc_card *card,
return 0;
}
+void snd_soc_disconnect_sync(struct device *dev)
+{
+ struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+
+ if (!component || !component->card)
+ return;
+
+ snd_card_disconnect_sync(component->card->snd_card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
+
/**
* snd_soc_add_dai_link - Add a DAI link dynamically
* @card: The ASoC card to which the DAI link is added
@@ -1945,7 +1920,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
/* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
- if (cpu_dai->codec) {
+ /* the component which has non_legacy_dai_naming is Codec */
+ if (cpu_dai->codec ||
+ cpu_dai->component->driver->non_legacy_dai_naming) {
unsigned int inv_dai_fmt;
inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
@@ -3149,7 +3126,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
- list_add(&dai->list, &component->dai_list);
+ list_add_tail(&dai->list, &component->dai_list);
component->num_dai++;
dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
@@ -3176,8 +3153,6 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
- component->dai_drv = dai_drv;
-
for (i = 0; i < count; i++) {
dai = soc_add_dai(component, dai_drv + i,
@@ -4354,6 +4329,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
args,
dai_name);
} else {
+ struct snd_soc_dai *dai;
int id = -1;
switch (args->args_count) {
@@ -4375,7 +4351,14 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
ret = 0;
- *dai_name = pos->dai_drv[id].name;
+ /* find target DAI */
+ list_for_each_entry(dai, &pos->dai_list, list) {
+ if (id == 0)
+ break;
+ id--;
+ }
+
+ *dai_name = dai->driver->name;
if (!*dai_name)
*dai_name = pos->name;
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a10b21cfc31e..d1977ced895f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2364,7 +2364,7 @@ static ssize_t dapm_widget_show(struct device *dev,
return count;
}
-static DEVICE_ATTR(dapm_widget, 0444, dapm_widget_show, NULL);
+static DEVICE_ATTR_RO(dapm_widget);
struct attribute *soc_dapm_dev_attrs[] = {
&dev_attr_dapm_widget.attr,
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 20340ade20a7..2bc1c4c17896 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -34,6 +34,10 @@ int snd_soc_component_read(struct snd_soc_component *component,
ret = regmap_read(component->regmap, reg, val);
else if (component->read)
ret = component->read(component, reg, val);
+ else if (component->driver->read) {
+ *val = component->driver->read(component, reg);
+ ret = 0;
+ }
else
ret = -EIO;
@@ -70,6 +74,8 @@ int snd_soc_component_write(struct snd_soc_component *component,
return regmap_write(component->regmap, reg, val);
else if (component->write)
return component->write(component, reg, val);
+ else if (component->driver->write)
+ return component->driver->write(component, reg, val);
else
return -EIO;
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 500f98c730b9..7144a51ddfa9 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -378,7 +378,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
unsigned int val;
int ret;
@@ -423,7 +423,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
int err = 0;
unsigned int val, val_mask, val2 = 0;
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index e30aacbcfc29..bcd3da2739e2 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -288,7 +288,7 @@ static const struct snd_soc_platform_driver dummy_platform = {
.ops = &dummy_dma_ops,
};
-static struct snd_soc_codec_driver dummy_codec;
+static const struct snd_soc_codec_driver dummy_codec;
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 3398e6c57f37..3ad881fc40a1 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -28,4 +28,16 @@ config SND_SOC_STM32_SPDIFRX
help
Say Y if you want to enable S/PDIF capture for STM32
+config SND_SOC_STM32_DFSDM
+ tristate "SoC Audio support for STM32 DFSDM"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on SND_SOC
+ depends on STM32_DFSDM_ADC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_SOC_DMIC
+ select IIO_BUFFER_CB
+ help
+ Select this option to enable the STM32 Digital Filter
+ for Sigma Delta Modulators (DFSDM) driver used
+ in various STM32 series for digital microphone capture.
endmenu
diff --git a/sound/soc/stm/Makefile b/sound/soc/stm/Makefile
index 5b7f0fab0bd6..3143c0b47042 100644
--- a/sound/soc/stm/Makefile
+++ b/sound/soc/stm/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_SND_SOC_STM32_I2S) += snd-soc-stm32-i2s.o
# SPDIFRX
snd-soc-stm32-spdifrx-objs := stm32_spdifrx.o
obj-$(CONFIG_SND_SOC_STM32_SPDIFRX) += snd-soc-stm32-spdifrx.o
+
+#DFSDM
+obj-$(CONFIG_SND_SOC_STM32_DFSDM) += stm32_adfsdm.o
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
new file mode 100644
index 000000000000..7306e3eca9e1
--- /dev/null
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of STM32 DFSDM ASoC DAI driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ * Olivier Moysan <olivier.moysan@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/adc/stm32-dfsdm-adc.h>
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define STM32_ADFSDM_DRV_NAME "stm32-adfsdm"
+
+#define DFSDM_MAX_PERIOD_SIZE (PAGE_SIZE / 2)
+#define DFSDM_MAX_PERIODS 6
+
+struct stm32_adfsdm_priv {
+ struct snd_soc_dai_driver dai_drv;
+ struct snd_pcm_substream *substream;
+ struct device *dev;
+
+ /* IIO */
+ struct iio_channel *iio_ch;
+ struct iio_cb_buffer *iio_cb;
+ bool iio_active;
+
+ /* PCM buffer */
+ unsigned char *pcm_buff;
+ unsigned int pos;
+};
+
+static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+
+ .rate_min = 8000,
+ .rate_max = 32000,
+
+ .channels_min = 1,
+ .channels_max = 1,
+
+ .periods_min = 2,
+ .periods_max = DFSDM_MAX_PERIODS,
+
+ .period_bytes_max = DFSDM_MAX_PERIOD_SIZE,
+ .buffer_bytes_max = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE
+};
+
+static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ if (priv->iio_active) {
+ iio_channel_stop_all_cb(priv->iio_cb);
+ priv->iio_active = false;
+ }
+}
+
+static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = iio_write_channel_attribute(priv->iio_ch,
+ substream->runtime->rate, 0,
+ IIO_CHAN_INFO_SAMP_FREQ);
+ if (ret < 0) {
+ dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
+ __func__, substream->runtime->rate);
+ return ret;
+ }
+
+ if (!priv->iio_active) {
+ ret = iio_channel_start_all_cb(priv->iio_cb);
+ if (!ret)
+ priv->iio_active = true;
+ else
+ dev_err(dai->dev, "%s: IIO channel start failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int stm32_adfsdm_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+ ssize_t size;
+ char str_freq[10];
+
+ dev_dbg(dai->dev, "%s: Enter for freq %d\n", __func__, freq);
+
+ /* Set IIO frequency if CODEC is master as clock comes from SPI_IN */
+
+ snprintf(str_freq, sizeof(str_freq), "%d\n", freq);
+ size = iio_write_channel_ext_info(priv->iio_ch, "spi_clk_freq",
+ str_freq, sizeof(str_freq));
+ if (size != sizeof(str_freq)) {
+ dev_err(dai->dev, "%s: Failed to set SPI clock\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dai_ops stm32_adfsdm_dai_ops = {
+ .shutdown = stm32_adfsdm_shutdown,
+ .prepare = stm32_adfsdm_dai_prepare,
+ .set_sysclk = stm32_adfsdm_set_sysclk,
+};
+
+static const struct snd_soc_dai_driver stm32_adfsdm_dai = {
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 1,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_32000),
+ },
+ .ops = &stm32_adfsdm_dai_ops,
+};
+
+static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
+ .name = "stm32_dfsdm_audio",
+};
+
+static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
+{
+ struct stm32_adfsdm_priv *priv = private;
+ struct snd_soc_pcm_runtime *rtd = priv->substream->private_data;
+ u8 *pcm_buff = priv->pcm_buff;
+ u8 *src_buff = (u8 *)data;
+ unsigned int buff_size = snd_pcm_lib_buffer_bytes(priv->substream);
+ unsigned int period_size = snd_pcm_lib_period_bytes(priv->substream);
+ unsigned int old_pos = priv->pos;
+ unsigned int cur_size = size;
+
+ dev_dbg(rtd->dev, "%s: buff_add :%p, pos = %d, size = %zu\n",
+ __func__, &pcm_buff[priv->pos], priv->pos, size);
+
+ if ((priv->pos + size) > buff_size) {
+ memcpy(&pcm_buff[priv->pos], src_buff, buff_size - priv->pos);
+ cur_size -= buff_size - priv->pos;
+ priv->pos = 0;
+ }
+
+ memcpy(&pcm_buff[priv->pos], &src_buff[size - cur_size], cur_size);
+ priv->pos = (priv->pos + cur_size) % buff_size;
+
+ if (cur_size != size || (old_pos && (old_pos % period_size < size)))
+ snd_pcm_period_elapsed(priv->substream);
+
+ return 0;
+}
+
+static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ priv->pos = 0;
+ return stm32_dfsdm_get_buff_cb(priv->iio_ch->indio_dev,
+ stm32_afsdm_pcm_cb, priv);
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ return stm32_dfsdm_release_buff_cb(priv->iio_ch->indio_dev);
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ int ret;
+
+ ret = snd_soc_set_runtime_hwparams(substream, &stm32_adfsdm_pcm_hw);
+ if (!ret)
+ priv->substream = substream;
+
+ return ret;
+}
+
+static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_pcm_lib_free_pages(substream);
+ priv->substream = NULL;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ return bytes_to_frames(substream->runtime, priv->pos);
+}
+
+static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ int ret;
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+ priv->pcm_buff = substream->runtime->dma_area;
+
+ return iio_channel_cb_set_buffer_watermark(priv->iio_cb,
+ params_period_size(params));
+}
+
+static int stm32_adfsdm_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_lib_free_pages(substream);
+
+ return 0;
+}
+
+static struct snd_pcm_ops stm32_adfsdm_pcm_ops = {
+ .open = stm32_adfsdm_pcm_open,
+ .close = stm32_adfsdm_pcm_close,
+ .hw_params = stm32_adfsdm_pcm_hw_params,
+ .hw_free = stm32_adfsdm_pcm_hw_free,
+ .trigger = stm32_adfsdm_trigger,
+ .pointer = stm32_adfsdm_pcm_pointer,
+};
+
+static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ unsigned int size = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE;
+
+ return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ priv->dev, size, size);
+}
+
+static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_soc_pcm_runtime *rtd;
+ struct stm32_adfsdm_priv *priv;
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (substream) {
+ rtd = substream->private_data;
+ priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+ }
+}
+
+static struct snd_soc_platform_driver stm32_adfsdm_soc_platform = {
+ .ops = &stm32_adfsdm_pcm_ops,
+ .pcm_new = stm32_adfsdm_pcm_new,
+ .pcm_free = stm32_adfsdm_pcm_free,
+};
+
+static const struct of_device_id stm32_adfsdm_of_match[] = {
+ {.compatible = "st,stm32h7-dfsdm-dai"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
+
+static int stm32_adfsdm_probe(struct platform_device *pdev)
+{
+ struct stm32_adfsdm_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->dai_drv = stm32_adfsdm_dai;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &stm32_adfsdm_dai_component,
+ &priv->dai_drv, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Associate iio channel */
+ priv->iio_ch = devm_iio_channel_get_all(&pdev->dev);
+ if (IS_ERR(priv->iio_ch))
+ return PTR_ERR(priv->iio_ch);
+
+ priv->iio_cb = iio_channel_get_all_cb(&pdev->dev, NULL, NULL);
+ if (IS_ERR(priv->iio_cb))
+ return PTR_ERR(priv->iio_cb);
+
+ ret = devm_snd_soc_register_platform(&pdev->dev,
+ &stm32_adfsdm_soc_platform);
+ if (ret < 0)
+ dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver stm32_adfsdm_driver = {
+ .driver = {
+ .name = STM32_ADFSDM_DRV_NAME,
+ .of_match_table = stm32_adfsdm_of_match,
+ },
+ .probe = stm32_adfsdm_probe,
+};
+
+module_platform_driver(stm32_adfsdm_driver);
+
+MODULE_DESCRIPTION("stm32 DFSDM DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" STM32_ADFSDM_DRV_NAME);
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index d6f71a3406e9..d743b7dd52fb 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -28,16 +28,6 @@
#include "stm32_sai.h"
-static LIST_HEAD(sync_providers);
-static DEFINE_MUTEX(sync_mutex);
-
-struct sync_provider {
- struct list_head link;
- struct device_node *node;
- int (*sync_conf)(void *data, int synco);
- void *data;
-};
-
static const struct stm32_sai_conf stm32_sai_conf_f4 = {
.version = SAI_STM32F4,
};
@@ -70,9 +60,8 @@ static int stm32_sai_sync_conf_client(struct stm32_sai_data *sai, int synci)
return 0;
}
-static int stm32_sai_sync_conf_provider(void *data, int synco)
+static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
{
- struct stm32_sai_data *sai = (struct stm32_sai_data *)data;
u32 prev_synco;
int ret;
@@ -103,83 +92,42 @@ static int stm32_sai_sync_conf_provider(void *data, int synco)
return 0;
}
-static int stm32_sai_set_sync_provider(struct device_node *np, int synco)
+static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
+ struct device_node *np_provider,
+ int synco, int synci)
{
- struct sync_provider *provider;
+ struct platform_device *pdev = of_find_device_by_node(np_provider);
+ struct stm32_sai_data *sai_provider;
int ret;
- mutex_lock(&sync_mutex);
- list_for_each_entry(provider, &sync_providers, link) {
- if (provider->node == np) {
- ret = provider->sync_conf(provider->data, synco);
- mutex_unlock(&sync_mutex);
- return ret;
- }
+ if (!pdev) {
+ dev_err(&sai_client->pdev->dev,
+ "Device not found for node %s\n", np_provider->name);
+ return -ENODEV;
}
- mutex_unlock(&sync_mutex);
- /* SAI sync provider not found */
- return -ENODEV;
-}
-
-static int stm32_sai_set_sync(struct stm32_sai_data *sai,
- struct device_node *np_provider,
- int synco, int synci)
-{
- int ret;
+ sai_provider = platform_get_drvdata(pdev);
+ if (!sai_provider) {
+ dev_err(&sai_client->pdev->dev,
+ "SAI sync provider data not found\n");
+ return -EINVAL;
+ }
/* Configure sync client */
- stm32_sai_sync_conf_client(sai, synci);
+ ret = stm32_sai_sync_conf_client(sai_client, synci);
+ if (ret < 0)
+ return ret;
/* Configure sync provider */
- ret = stm32_sai_set_sync_provider(np_provider, synco);
-
- return ret;
-}
-
-static int stm32_sai_sync_add_provider(struct platform_device *pdev,
- void *data)
-{
- struct sync_provider *sp;
-
- sp = devm_kzalloc(&pdev->dev, sizeof(*sp), GFP_KERNEL);
- if (!sp)
- return -ENOMEM;
-
- sp->node = of_node_get(pdev->dev.of_node);
- sp->data = data;
- sp->sync_conf = &stm32_sai_sync_conf_provider;
-
- mutex_lock(&sync_mutex);
- list_add(&sp->link, &sync_providers);
- mutex_unlock(&sync_mutex);
-
- return 0;
-}
-
-static void stm32_sai_sync_del_provider(struct device_node *np)
-{
- struct sync_provider *sp;
-
- mutex_lock(&sync_mutex);
- list_for_each_entry(sp, &sync_providers, link) {
- if (sp->node == np) {
- list_del(&sp->link);
- of_node_put(sp->node);
- break;
- }
- }
- mutex_unlock(&sync_mutex);
+ return stm32_sai_sync_conf_provider(sai_provider, synco);
}
static int stm32_sai_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32_sai_data *sai;
struct reset_control *rst;
struct resource *res;
const struct of_device_id *of_id;
- int ret;
sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
if (!sai)
@@ -231,28 +179,11 @@ static int stm32_sai_probe(struct platform_device *pdev)
reset_control_deassert(rst);
}
- ret = stm32_sai_sync_add_provider(pdev, sai);
- if (ret < 0)
- return ret;
- sai->set_sync = &stm32_sai_set_sync;
-
sai->pdev = pdev;
+ sai->set_sync = &stm32_sai_set_sync;
platform_set_drvdata(pdev, sai);
- ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
- if (ret < 0)
- stm32_sai_sync_del_provider(np);
-
- return ret;
-}
-
-static int stm32_sai_remove(struct platform_device *pdev)
-{
- of_platform_depopulate(&pdev->dev);
-
- stm32_sai_sync_del_provider(pdev->dev.of_node);
-
- return 0;
+ return devm_of_platform_populate(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, stm32_sai_ids);
@@ -263,7 +194,6 @@ static struct platform_driver stm32_sai_driver = {
.of_match_table = stm32_sai_ids,
},
.probe = stm32_sai_probe,
- .remove = stm32_sai_remove,
};
module_platform_driver(stm32_sai_driver);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 5da4efe7a550..886281673972 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -590,12 +590,28 @@ static int sun4i_codec_hw_params(struct snd_pcm_substream *substream,
hwrate);
}
+
+static unsigned int sun4i_codec_src_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000,
+ 44100, 48000, 96000, 192000
+};
+
+
+static struct snd_pcm_hw_constraint_list sun4i_codec_constraints = {
+ .count = ARRAY_SIZE(sun4i_codec_src_rates),
+ .list = sun4i_codec_src_rates,
+};
+
+
static int sun4i_codec_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card);
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &sun4i_codec_constraints);
+
/*
* Stop issuing DRQ when we have room for less than 16 samples
* in our TX FIFO
@@ -633,9 +649,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
.channels_max = 2,
.rate_min = 8000,
.rate_max = 192000,
- .rates = SNDRV_PCM_RATE_8000_48000 |
- SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.sig_bits = 24,
@@ -645,11 +659,8 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 192000,
- .rates = SNDRV_PCM_RATE_8000_48000 |
- SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000 |
- SNDRV_PCM_RATE_KNOT,
+ .rate_max = 48000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.sig_bits = 24,
@@ -1128,7 +1139,7 @@ static const struct snd_soc_component_driver sun4i_codec_component = {
.name = "sun4i-codec",
};
-#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_8000_192000
+#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_CONTINUOUS
#define SUN4I_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 04f92583a969..dca1143c1150 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -269,10 +269,11 @@ static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
return false;
}
-static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
unsigned int rate,
unsigned int word_size)
{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int oversample_rate, clk_rate;
int bclk_div, mclk_div;
int ret;
@@ -300,6 +301,7 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
break;
default:
+ dev_err(dai->dev, "Unsupported sample rate: %u\n", rate);
return -EINVAL;
}
@@ -308,18 +310,25 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
return ret;
oversample_rate = i2s->mclk_freq / rate;
- if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+ if (!sun4i_i2s_oversample_is_valid(oversample_rate)) {
+ dev_err(dai->dev, "Unsupported oversample rate: %d\n",
+ oversample_rate);
return -EINVAL;
+ }
bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
word_size);
- if (bclk_div < 0)
+ if (bclk_div < 0) {
+ dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
return -EINVAL;
+ }
mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
clk_rate, rate);
- if (mclk_div < 0)
+ if (mclk_div < 0) {
+ dev_err(dai->dev, "Unsupported MCLK divider: %d\n", mclk_div);
return -EINVAL;
+ }
/* Adjust the clock division values if needed */
bclk_div += i2s->variant->bclk_offset;
@@ -349,8 +358,11 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
u32 width;
channels = params_channels(params);
- if (channels != 2)
+ if (channels != 2) {
+ dev_err(dai->dev, "Unsupported number of channels: %d\n",
+ channels);
return -EINVAL;
+ }
if (i2s->variant->has_chcfg) {
regmap_update_bits(i2s->regmap, SUN8I_I2S_CHAN_CFG_REG,
@@ -382,6 +394,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
default:
+ dev_err(dai->dev, "Unsupported physical sample width: %d\n",
+ params_physical_width(params));
return -EINVAL;
}
i2s->playback_dma_data.addr_width = width;
@@ -393,6 +407,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
break;
default:
+ dev_err(dai->dev, "Unsupported sample width: %d\n",
+ params_width(params));
return -EINVAL;
}
@@ -401,7 +417,7 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
regmap_field_write(i2s->field_fmt_sr,
sr + i2s->variant->fmt_offset);
- return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+ return sun4i_i2s_set_clk_rate(dai, params_rate(params),
params_width(params));
}
@@ -426,6 +442,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
break;
default:
+ dev_err(dai->dev, "Unsupported format: %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
@@ -464,6 +482,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_NB_NF:
break;
default:
+ dev_err(dai->dev, "Unsupported clock polarity: %d\n",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
return -EINVAL;
}
@@ -482,6 +502,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = SUN4I_I2S_CTRL_MODE_SLAVE;
break;
default:
+ dev_err(dai->dev, "Unsupported slave setting: %d\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -504,6 +526,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = 0;
break;
default:
+ dev_err(dai->dev, "Unsupported slave setting: %d\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -897,6 +921,23 @@ static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
.field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
};
+static const struct sun4i_i2s_quirks sun8i_a83t_i2s_quirks = {
+ .has_reset = true,
+ .reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
+ .sun4i_i2s_regmap = &sun4i_i2s_regmap_config,
+ .field_clkdiv_mclk_en = REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+ .field_fmt_wss = REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+ .field_fmt_sr = REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+ .field_fmt_bclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+ .field_fmt_lrclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+ .has_slave_select_bit = true,
+ .field_fmt_mode = REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+ .field_txchanmap = REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+ .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+ .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+ .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
.has_reset = true,
.reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
@@ -1121,6 +1162,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
.data = &sun6i_a31_i2s_quirks,
},
{
+ .compatible = "allwinner,sun8i-a83t-i2s",
+ .data = &sun8i_a83t_i2s_quirks,
+ },
+ {
.compatible = "allwinner,sun8i-h3-i2s",
.data = &sun8i_h3_i2s_quirks,
},
diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig
new file mode 100644
index 000000000000..02886a457eaf
--- /dev/null
+++ b/sound/soc/uniphier/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config SND_SOC_UNIPHIER
+ tristate "ASoC support for UniPhier"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST)
+ help
+ Say Y or M if you want to add support for the Socionext
+ UniPhier SoC audio interfaces. You will also need to select the
+ audio interfaces to support below.
+ If unsure select "N".
+
+config SND_SOC_UNIPHIER_EVEA_CODEC
+ tristate "UniPhier SoC internal audio codec"
+ depends on SND_SOC_UNIPHIER
+ select REGMAP_MMIO
+ help
+ This adds Codec driver for Socionext UniPhier LD11/20 SoC
+ internal DAC. This driver supports Line In / Out and HeadPhone.
+ Select Y if you use such device.
+ If unsure select "N".
diff --git a/sound/soc/uniphier/Makefile b/sound/soc/uniphier/Makefile
new file mode 100644
index 000000000000..3be00d72f5e5
--- /dev/null
+++ b/sound/soc/uniphier/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+snd-soc-uniphier-evea-objs := evea.o
+obj-$(CONFIG_SND_SOC_UNIPHIER_EVEA_CODEC) += snd-soc-uniphier-evea.o
diff --git a/sound/soc/uniphier/evea.c b/sound/soc/uniphier/evea.c
new file mode 100644
index 000000000000..0cc9efff1d9a
--- /dev/null
+++ b/sound/soc/uniphier/evea.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Socionext UniPhier EVEA ADC/DAC codec driver.
+ *
+ * Copyright (c) 2016-2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "evea"
+#define EVEA_RATES SNDRV_PCM_RATE_48000
+#define EVEA_FORMATS SNDRV_PCM_FMTBIT_S32_LE
+
+#define AADCPOW(n) (0x0078 + 0x04 * (n))
+#define AADCPOW_AADC_POWD BIT(0)
+#define AHPOUTPOW 0x0098
+#define AHPOUTPOW_HP_ON BIT(4)
+#define ALINEPOW 0x009c
+#define ALINEPOW_LIN2_POWD BIT(3)
+#define ALINEPOW_LIN1_POWD BIT(4)
+#define ALO1OUTPOW 0x00a8
+#define ALO1OUTPOW_LO1_ON BIT(4)
+#define ALO2OUTPOW 0x00ac
+#define ALO2OUTPOW_ADAC2_MUTE BIT(0)
+#define ALO2OUTPOW_LO2_ON BIT(4)
+#define AANAPOW 0x00b8
+#define AANAPOW_A_POWD BIT(4)
+#define ADACSEQ1(n) (0x0144 + 0x40 * (n))
+#define ADACSEQ1_MMUTE BIT(1)
+#define ADACSEQ2(n) (0x0160 + 0x40 * (n))
+#define ADACSEQ2_ADACIN_FIX BIT(0)
+#define ADAC1ODC 0x0200
+#define ADAC1ODC_HP_DIS_RES_MASK GENMASK(2, 1)
+#define ADAC1ODC_HP_DIS_RES_OFF (0x0 << 1)
+#define ADAC1ODC_HP_DIS_RES_ON (0x3 << 1)
+#define ADAC1ODC_ADAC_RAMPCLT_MASK GENMASK(8, 7)
+#define ADAC1ODC_ADAC_RAMPCLT_NORMAL (0x0 << 7)
+#define ADAC1ODC_ADAC_RAMPCLT_REDUCE (0x1 << 7)
+
+struct evea_priv {
+ struct clk *clk, *clk_exiv;
+ struct reset_control *rst, *rst_exiv, *rst_adamv;
+ struct regmap *regmap;
+
+ int switch_lin;
+ int switch_lo;
+ int switch_hp;
+};
+
+static const struct snd_soc_dapm_widget evea_widgets[] = {
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_INPUT("LIN1_LP"),
+ SND_SOC_DAPM_INPUT("LIN1_RP"),
+ SND_SOC_DAPM_INPUT("LIN2_LP"),
+ SND_SOC_DAPM_INPUT("LIN2_RP"),
+ SND_SOC_DAPM_INPUT("LIN3_LP"),
+ SND_SOC_DAPM_INPUT("LIN3_RP"),
+
+ SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("HP1_L"),
+ SND_SOC_DAPM_OUTPUT("HP1_R"),
+ SND_SOC_DAPM_OUTPUT("LO2_L"),
+ SND_SOC_DAPM_OUTPUT("LO2_R"),
+};
+
+static const struct snd_soc_dapm_route evea_routes[] = {
+ { "ADC", NULL, "LIN1_LP" },
+ { "ADC", NULL, "LIN1_RP" },
+ { "ADC", NULL, "LIN2_LP" },
+ { "ADC", NULL, "LIN2_RP" },
+ { "ADC", NULL, "LIN3_LP" },
+ { "ADC", NULL, "LIN3_RP" },
+
+ { "HP1_L", NULL, "DAC" },
+ { "HP1_R", NULL, "DAC" },
+ { "LO2_L", NULL, "DAC" },
+ { "LO2_R", NULL, "DAC" },
+};
+
+static void evea_set_power_state_on(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
+ AANAPOW_A_POWD);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
+ ADAC1ODC_ADAC_RAMPCLT_REDUCE);
+
+ regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
+ regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
+ regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
+}
+
+static void evea_set_power_state_off(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+ regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+}
+
+static int evea_update_switch_lin(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_lin) {
+ regmap_update_bits(map, ALINEPOW,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD);
+
+ regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
+ AADCPOW_AADC_POWD);
+ regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
+ AADCPOW_AADC_POWD);
+ } else {
+ regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
+ regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
+
+ regmap_update_bits(map, ALINEPOW,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD, 0);
+ }
+
+ return 0;
+}
+
+static int evea_update_switch_lo(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_lo) {
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
+ ALO1OUTPOW_LO1_ON);
+ regmap_update_bits(map, ALO2OUTPOW,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON);
+ } else {
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+ regmap_update_bits(map, ALO2OUTPOW,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+ 0);
+ }
+
+ return 0;
+}
+
+static int evea_update_switch_hp(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_hp) {
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
+
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
+ AHPOUTPOW_HP_ON);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_OFF);
+ } else {
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+ }
+
+ return 0;
+}
+
+static void evea_update_switch_all(struct evea_priv *evea)
+{
+ evea_update_switch_lin(evea);
+ evea_update_switch_lo(evea);
+ evea_update_switch_hp(evea);
+}
+
+static int evea_get_switch_lin(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_lin;
+
+ return 0;
+}
+
+static int evea_set_switch_lin(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_lin == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_lin = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_lin(evea);
+}
+
+static int evea_get_switch_lo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_lo;
+
+ return 0;
+}
+
+static int evea_set_switch_lo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_lo == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_lo = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_lo(evea);
+}
+
+static int evea_get_switch_hp(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_hp;
+
+ return 0;
+}
+
+static int evea_set_switch_hp(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_hp == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_hp = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_hp(evea);
+}
+
+static const struct snd_kcontrol_new eva_controls[] = {
+ SOC_SINGLE_BOOL_EXT("Line Capture Switch", 0,
+ evea_get_switch_lin, evea_set_switch_lin),
+ SOC_SINGLE_BOOL_EXT("Line Playback Switch", 0,
+ evea_get_switch_lo, evea_set_switch_lo),
+ SOC_SINGLE_BOOL_EXT("Headphone Playback Switch", 0,
+ evea_get_switch_hp, evea_set_switch_hp),
+};
+
+static int evea_codec_probe(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ evea->switch_lin = 1;
+ evea->switch_lo = 1;
+ evea->switch_hp = 1;
+
+ evea_set_power_state_on(evea);
+ evea_update_switch_all(evea);
+
+ return 0;
+}
+
+static int evea_codec_suspend(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ evea_set_power_state_off(evea);
+
+ reset_control_assert(evea->rst_adamv);
+ reset_control_assert(evea->rst_exiv);
+ reset_control_assert(evea->rst);
+
+ clk_disable_unprepare(evea->clk_exiv);
+ clk_disable_unprepare(evea->clk);
+
+ return 0;
+}
+
+static int evea_codec_resume(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = clk_prepare_enable(evea->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(evea->clk_exiv);
+ if (ret)
+ goto err_out_clock;
+
+ ret = reset_control_deassert(evea->rst);
+ if (ret)
+ goto err_out_clock_exiv;
+
+ ret = reset_control_deassert(evea->rst_exiv);
+ if (ret)
+ goto err_out_reset;
+
+ ret = reset_control_deassert(evea->rst_adamv);
+ if (ret)
+ goto err_out_reset_exiv;
+
+ evea_set_power_state_on(evea);
+ evea_update_switch_all(evea);
+
+ return 0;
+
+err_out_reset_exiv:
+ reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+ reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+ clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+ clk_disable_unprepare(evea->clk);
+
+ return ret;
+}
+
+static struct snd_soc_codec_driver soc_codec_evea = {
+ .probe = evea_codec_probe,
+ .suspend = evea_codec_suspend,
+ .resume = evea_codec_resume,
+
+ .component_driver = {
+ .dapm_widgets = evea_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(evea_widgets),
+ .dapm_routes = evea_routes,
+ .num_dapm_routes = ARRAY_SIZE(evea_routes),
+ .controls = eva_controls,
+ .num_controls = ARRAY_SIZE(eva_controls),
+ },
+};
+
+static struct snd_soc_dai_driver soc_dai_evea[] = {
+ {
+ .name = DRV_NAME "-line1",
+ .playback = {
+ .stream_name = "Line Out 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ .capture = {
+ .stream_name = "Line In 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+ {
+ .name = DRV_NAME "-hp1",
+ .playback = {
+ .stream_name = "Headphone 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+ {
+ .name = DRV_NAME "-lo2",
+ .playback = {
+ .stream_name = "Line Out 2",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+};
+
+static const struct regmap_config evea_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int evea_probe(struct platform_device *pdev)
+{
+ struct evea_priv *evea;
+ struct resource *res;
+ void __iomem *preg;
+ int ret;
+
+ evea = devm_kzalloc(&pdev->dev, sizeof(struct evea_priv), GFP_KERNEL);
+ if (!evea)
+ return -ENOMEM;
+
+ evea->clk = devm_clk_get(&pdev->dev, "evea");
+ if (IS_ERR(evea->clk))
+ return PTR_ERR(evea->clk);
+
+ evea->clk_exiv = devm_clk_get(&pdev->dev, "exiv");
+ if (IS_ERR(evea->clk_exiv))
+ return PTR_ERR(evea->clk_exiv);
+
+ evea->rst = devm_reset_control_get_shared(&pdev->dev, "evea");
+ if (IS_ERR(evea->rst))
+ return PTR_ERR(evea->rst);
+
+ evea->rst_exiv = devm_reset_control_get_shared(&pdev->dev, "exiv");
+ if (IS_ERR(evea->rst_exiv))
+ return PTR_ERR(evea->rst_exiv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ preg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(preg))
+ return PTR_ERR(preg);
+
+ evea->regmap = devm_regmap_init_mmio(&pdev->dev, preg,
+ &evea_regmap_config);
+ if (IS_ERR(evea->regmap))
+ return PTR_ERR(evea->regmap);
+
+ ret = clk_prepare_enable(evea->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(evea->clk_exiv);
+ if (ret)
+ goto err_out_clock;
+
+ ret = reset_control_deassert(evea->rst);
+ if (ret)
+ goto err_out_clock_exiv;
+
+ ret = reset_control_deassert(evea->rst_exiv);
+ if (ret)
+ goto err_out_reset;
+
+ /* ADAMV will hangup if EXIV reset is asserted */
+ evea->rst_adamv = devm_reset_control_get_shared(&pdev->dev, "adamv");
+ if (IS_ERR(evea->rst_adamv)) {
+ ret = PTR_ERR(evea->rst_adamv);
+ goto err_out_reset_exiv;
+ }
+
+ ret = reset_control_deassert(evea->rst_adamv);
+ if (ret)
+ goto err_out_reset_exiv;
+
+ platform_set_drvdata(pdev, evea);
+
+ ret = snd_soc_register_codec(&pdev->dev, &soc_codec_evea,
+ soc_dai_evea, ARRAY_SIZE(soc_dai_evea));
+ if (ret)
+ goto err_out_reset_adamv;
+
+ return 0;
+
+err_out_reset_adamv:
+ reset_control_assert(evea->rst_adamv);
+
+err_out_reset_exiv:
+ reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+ reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+ clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+ clk_disable_unprepare(evea->clk);
+
+ return ret;
+}
+
+static int evea_remove(struct platform_device *pdev)
+{
+ struct evea_priv *evea = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_codec(&pdev->dev);
+
+ reset_control_assert(evea->rst_adamv);
+ reset_control_assert(evea->rst_exiv);
+ reset_control_assert(evea->rst);
+
+ clk_disable_unprepare(evea->clk_exiv);
+ clk_disable_unprepare(evea->clk);
+
+ return 0;
+}
+
+static const struct of_device_id evea_of_match[] = {
+ { .compatible = "socionext,uniphier-evea", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, evea_of_match);
+
+static struct platform_driver evea_codec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(evea_of_match),
+ },
+ .probe = evea_probe,
+ .remove = evea_remove,
+};
+module_platform_driver(evea_codec_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier EVEA codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 070a6880980e..c60a57797640 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
};
module_platform_driver(snd_soc_mop500_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC MOP500 board driver");
+MODULE_AUTHOR("Ola Lilja");
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index f12c01dddc8d..d35ba7700f46 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
+
+MODULE_AUTHOR("Ola Lilja");
+MODULE_AUTHOR("Roger Nilsson");
+MODULE_DESCRIPTION("ASoC UX500 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 99b73c675743..b4efb22db561 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -119,13 +119,6 @@ struct sound_unit
char name[32];
};
-#ifdef CONFIG_SOUND_MSNDCLAS
-extern int msnd_classic_init(void);
-#endif
-#ifdef CONFIG_SOUND_MSNDPIN
-extern int msnd_pinnacle_init(void);
-#endif
-
/*
* By default, OSS sound_core claims full legacy minor range (0-255)
* of SOUND_MAJOR to trap open attempts to any sound minor and
@@ -452,26 +445,6 @@ int register_sound_mixer(const struct file_operations *fops, int dev)
EXPORT_SYMBOL(register_sound_mixer);
-/**
- * register_sound_midi - register a midi device
- * @fops: File operations for the driver
- * @dev: Unit number to allocate
- *
- * Allocate a midi device. Unit is the number of the midi device requested.
- * Pass -1 to request the next free midi unit.
- *
- * Return: On success, the allocated number is returned. On failure,
- * a negative error code is returned.
- */
-
-int register_sound_midi(const struct file_operations *fops, int dev)
-{
- return sound_insert_unit(&chains[2], fops, dev, 2, 130,
- "midi", S_IRUSR | S_IWUSR, NULL);
-}
-
-EXPORT_SYMBOL(register_sound_midi);
-
/*
* DSP's are registered as a triple. Register only one and cheat
* in open - see below.
@@ -533,21 +506,6 @@ void unregister_sound_mixer(int unit)
EXPORT_SYMBOL(unregister_sound_mixer);
/**
- * unregister_sound_midi - unregister a midi device
- * @unit: unit number to allocate
- *
- * Release a sound device that was allocated with register_sound_midi().
- * The unit passed is the return value from the register function.
- */
-
-void unregister_sound_midi(int unit)
-{
- sound_remove_unit(&chains[2], unit);
-}
-
-EXPORT_SYMBOL(unregister_sound_midi);
-
-/**
* unregister_sound_dsp - unregister a DSP device
* @unit: unit number to allocate
*
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 23d1d23aefec..8018d56cfecc 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -585,15 +585,24 @@ static int usb_audio_probe(struct usb_interface *intf,
* now look for an empty slot and create a new card instance
*/
for (i = 0; i < SNDRV_CARDS; i++)
- if (enable[i] && ! usb_chip[i] &&
+ if (!usb_chip[i] &&
(vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
(pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
- err = snd_usb_audio_create(intf, dev, i, quirk,
- id, &chip);
- if (err < 0)
+ if (enable[i]) {
+ err = snd_usb_audio_create(intf, dev, i, quirk,
+ id, &chip);
+ if (err < 0)
+ goto __error;
+ chip->pm_intf = intf;
+ break;
+ } else if (vid[i] != -1 || pid[i] != -1) {
+ dev_info(&dev->dev,
+ "device (%04x:%04x) is disabled\n",
+ USB_ID_VENDOR(id),
+ USB_ID_PRODUCT(id));
+ err = -ENOENT;
goto __error;
- chip->pm_intf = intf;
- break;
+ }
}
if (!chip) {
dev_err(&dev->dev, "no available usb audio device\n");
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2b4ceda36291..9afb8ab524c7 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -656,10 +656,14 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
unsigned char *name, int maxlen, int term_only)
{
struct iterm_name_combo *names;
+ int len;
- if (iterm->name)
- return snd_usb_copy_string_desc(state, iterm->name,
+ if (iterm->name) {
+ len = snd_usb_copy_string_desc(state, iterm->name,
name, maxlen);
+ if (len)
+ return len;
+ }
/* virtual type - not a real terminal */
if (iterm->type >> 16) {
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index e1e7ce9ab217..05ccc7fdcc09 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -27,6 +27,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/hid.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -239,7 +240,7 @@ static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
return err < 0 ? err : count;
}
-static unsigned int snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
+static __poll_t snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
poll_table *wait)
{
struct usb_mixer_interface *mixer = hw->private_data;
@@ -1721,6 +1722,83 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+/* Creative Sound Blaster E1 */
+
+static int snd_soundblaster_e1_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = kcontrol->private_value;
+ return 0;
+}
+
+static int snd_soundblaster_e1_switch_update(struct usb_mixer_interface *mixer,
+ unsigned char state)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
+ unsigned char buff[2];
+
+ buff[0] = 0x02;
+ buff[1] = state ? 0x02 : 0x00;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0x0202, 3, buff, 2);
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_soundblaster_e1_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ unsigned char value = !!ucontrol->value.integer.value[0];
+ int err;
+
+ if (kcontrol->private_value == value)
+ return 0;
+ kcontrol->private_value = value;
+ err = snd_soundblaster_e1_switch_update(list->mixer, value);
+ return err < 0 ? err : 1;
+}
+
+static int snd_soundblaster_e1_switch_resume(struct usb_mixer_elem_list *list)
+{
+ return snd_soundblaster_e1_switch_update(list->mixer,
+ list->kctl->private_value);
+}
+
+static int snd_soundblaster_e1_switch_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const texts[2] = {
+ "Mic", "Aux"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static struct snd_kcontrol_new snd_soundblaster_e1_input_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Input Source",
+ .info = snd_soundblaster_e1_switch_info,
+ .get = snd_soundblaster_e1_switch_get,
+ .put = snd_soundblaster_e1_switch_put,
+ .private_value = 0,
+};
+
+static int snd_soundblaster_e1_switch_create(struct usb_mixer_interface *mixer)
+{
+ return add_single_ctl_with_resume(mixer, 0,
+ snd_soundblaster_e1_switch_resume,
+ &snd_soundblaster_e1_input_switch,
+ NULL);
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1802,6 +1880,10 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x800c): /* Focusrite Scarlett 18i20 */
err = snd_scarlett_controls_create(mixer);
break;
+
+ case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+ err = snd_soundblaster_e1_switch_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8a59d4782a0f..50252046b01d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3277,4 +3277,52 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},
+{
+ /*
+ * Nura's first gen headphones use Cambridge Silicon Radio's vendor
+ * ID, but it looks like the product ID actually is only for Nura.
+ * The capture interface does not work at all (even on Windows),
+ * and only the 48 kHz sample rate works for the playback interface.
+ */
+ USB_DEVICE(0x0a12, 0x1243),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 159da1f3924e..e2be10d17118 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -271,12 +271,12 @@ out:
return err;
}
-static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw,
+static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
struct file *file, poll_table *wait)
{
struct us122l *us122l = hw->private_data;
unsigned *polled;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &us122l->sk.sleep, wait);
@@ -378,7 +378,7 @@ out:
static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
unsigned cmd, unsigned long arg)
{
- struct usb_stream_config *cfg;
+ struct usb_stream_config cfg;
struct us122l *us122l = hw->private_data;
struct usb_stream *s;
unsigned min_period_frames;
@@ -388,24 +388,21 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
return -ENOTTY;
- cfg = memdup_user((void *)arg, sizeof(*cfg));
- if (IS_ERR(cfg))
- return PTR_ERR(cfg);
+ if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.version != USB_STREAM_INTERFACE_VERSION)
+ return -ENXIO;
- if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
- err = -ENXIO;
- goto free;
- }
high_speed = us122l->dev->speed == USB_SPEED_HIGH;
- if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 &&
+ if ((cfg.sample_rate != 44100 && cfg.sample_rate != 48000 &&
(!high_speed ||
- (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) ||
- cfg->frame_size != 6 ||
- cfg->period_frames > 0x3000) {
- err = -EINVAL;
- goto free;
- }
- switch (cfg->sample_rate) {
+ (cfg.sample_rate != 88200 && cfg.sample_rate != 96000))) ||
+ cfg.frame_size != 6 ||
+ cfg.period_frames > 0x3000)
+ return -EINVAL;
+
+ switch (cfg.sample_rate) {
case 44100:
min_period_frames = 48;
break;
@@ -418,10 +415,8 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
}
if (!high_speed)
min_period_frames <<= 1;
- if (cfg->period_frames < min_period_frames) {
- err = -EINVAL;
- goto free;
- }
+ if (cfg.period_frames < min_period_frames)
+ return -EINVAL;
snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
@@ -430,24 +425,22 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
if (!us122l->master)
us122l->master = file;
else if (us122l->master != file) {
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
+ if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg))) {
err = -EIO;
goto unlock;
}
us122l->slave = file;
}
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+ if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg)) ||
s->state == usb_stream_xrun) {
us122l_stop(us122l);
- if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
+ if (!us122l_start(us122l, cfg.sample_rate, cfg.period_frames))
err = -EIO;
else
err = 1;
}
unlock:
mutex_unlock(&us122l->mutex);
-free:
- kfree(cfg);
wake_up_all(&us122l->sk.sleep);
return err;
}
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index f4b3cda412fc..07d15bae75bc 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -86,9 +86,9 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
return 0;
}
-static unsigned int snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
+static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct usX2Ydev *us428 = hw->private_data;
struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem;
if (us428->chip_status & USX2Y_STAT_CHIP_HUP)
@@ -198,24 +198,22 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
struct snd_hwdep_dsp_image *dsp)
{
struct usX2Ydev *priv = hw->private_data;
- int lret, err = -EINVAL;
- snd_printdd( "dsp_load %s\n", dsp->name);
+ struct usb_device* dev = priv->dev;
+ int lret, err;
+ char *buf;
- if (access_ok(VERIFY_READ, dsp->image, dsp->length)) {
- struct usb_device* dev = priv->dev;
- char *buf;
+ snd_printdd( "dsp_load %s\n", dsp->name);
- buf = memdup_user(dsp->image, dsp->length);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ buf = memdup_user(dsp->image, dsp->length);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
- err = usb_set_interface(dev, 0, 1);
- if (err)
- snd_printk(KERN_ERR "usb_set_interface error \n");
- else
- err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
- kfree(buf);
- }
+ err = usb_set_interface(dev, 0, 1);
+ if (err)
+ snd_printk(KERN_ERR "usb_set_interface error \n");
+ else
+ err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
+ kfree(buf);
if (err)
return err;
if (dsp->index == 1) {
diff --git a/tools/arch/alpha/include/uapi/asm/errno.h b/tools/arch/alpha/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..3d265f6babaf
--- /dev/null
+++ b/tools/arch/alpha/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ALPHA_ERRNO_H
+#define _ALPHA_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#undef EAGAIN /* 11 in errno-base.h */
+
+#define EDEADLK 11 /* Resource deadlock would occur */
+
+#define EAGAIN 35 /* Try again */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+
+#define ENOLCK 77 /* No record locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#define ENOMSG 80 /* No message of desired type */
+#define EIDRM 81 /* Identifier removed */
+#define ENOSR 82 /* Out of streams resources */
+#define ETIME 83 /* Timer expired */
+#define EBADMSG 84 /* Not a data message */
+#define EPROTO 85 /* Protocol error */
+#define ENODATA 86 /* No data available */
+#define ENOSTR 87 /* Device not a stream */
+
+#define ENOPKG 92 /* Package not installed */
+
+#define EILSEQ 116 /* Illegal byte sequence */
+
+/* The following are just random noise.. */
+#define ECHRNG 88 /* Channel number out of range */
+#define EL2NSYNC 89 /* Level 2 not synchronized */
+#define EL3HLT 90 /* Level 3 halted */
+#define EL3RST 91 /* Level 3 reset */
+
+#define ELNRNG 93 /* Link number out of range */
+#define EUNATCH 94 /* Protocol driver not attached */
+#define ENOCSI 95 /* No CSI structure available */
+#define EL2HLT 96 /* Level 2 halted */
+#define EBADE 97 /* Invalid exchange */
+#define EBADR 98 /* Invalid request descriptor */
+#define EXFULL 99 /* Exchange full */
+#define ENOANO 100 /* No anode */
+#define EBADRQC 101 /* Invalid request code */
+#define EBADSLT 102 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 104 /* Bad font file format */
+#define ENONET 105 /* Machine is not on the network */
+#define ENOLINK 106 /* Link has been severed */
+#define EADV 107 /* Advertise error */
+#define ESRMNT 108 /* Srmount error */
+#define ECOMM 109 /* Communication error on send */
+#define EMULTIHOP 110 /* Multihop attempted */
+#define EDOTDOT 111 /* RFS specific error */
+#define EOVERFLOW 112 /* Value too large for defined data type */
+#define ENOTUNIQ 113 /* Name not unique on network */
+#define EBADFD 114 /* File descriptor in bad state */
+#define EREMCHG 115 /* Remote address changed */
+
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+
+#define ELIBACC 122 /* Can not access a needed shared library */
+#define ELIBBAD 123 /* Accessing a corrupted shared library */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+#define ELIBMAX 125 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 126 /* Cannot exec a shared library directly */
+#define ERESTART 127 /* Interrupted system call should be restarted */
+#define ESTRPIPE 128 /* Streams pipe error */
+
+#define ENOMEDIUM 129 /* No medium found */
+#define EMEDIUMTYPE 130 /* Wrong medium type */
+#define ECANCELED 131 /* Operation Cancelled */
+#define ENOKEY 132 /* Required key not available */
+#define EKEYEXPIRED 133 /* Key has expired */
+#define EKEYREVOKED 134 /* Key has been revoked */
+#define EKEYREJECTED 135 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 136 /* Owner died */
+#define ENOTRECOVERABLE 137 /* State not recoverable */
+
+#define ERFKILL 138 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 139 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/mips/include/asm/errno.h b/tools/arch/mips/include/asm/errno.h
new file mode 100644
index 000000000000..21d91cdfe3c9
--- /dev/null
+++ b/tools/arch/mips/include/asm/errno.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <uapi/asm/errno.h>
+
+
+/* The biggest error number defined here or in <linux/errno.h>. */
+#define EMAXERRNO 1133
+
+#endif /* _ASM_ERRNO_H */
diff --git a/tools/arch/mips/include/uapi/asm/errno.h b/tools/arch/mips/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..2fb714e2d6d8
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/errno.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _UAPI_ASM_ERRNO_H
+#define _UAPI_ASM_ERRNO_H
+
+/*
+ * These error numbers are intended to be MIPS ABI compatible
+ */
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
+#define EINIT 141 /* Reserved */
+#define EREMDEV 142 /* Error 142 */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale file handle */
+#define ECANCELED 158 /* AIO operation canceled */
+
+/*
+ * These error are Linux extensions.
+ */
+#define ENOMEDIUM 159 /* No medium found */
+#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
+
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 168 /* Memory page has hardware error */
+
+#define EDQUOT 1133 /* Quota exceeded */
+
+
+#endif /* _UAPI_ASM_ERRNO_H */
diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..fc0df353ff0d
--- /dev/null
+++ b/tools/arch/parisc/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_ERRNO_H
+#define _PARISC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define EDEADLOCK EDEADLK
+#define ENOLCK 46 /* No record locks available */
+#define EILSEQ 47 /* Illegal byte sequence */
+
+#define ENONET 50 /* Machine is not on the network */
+#define ENODATA 51 /* No data available */
+#define ETIME 52 /* Timer expired */
+#define ENOSR 53 /* Out of streams resources */
+#define ENOSTR 54 /* Device not a stream */
+#define ENOPKG 55 /* Package not installed */
+
+#define ENOLINK 57 /* Link has been severed */
+#define EADV 58 /* Advertise error */
+#define ESRMNT 59 /* Srmount error */
+#define ECOMM 60 /* Communication error on send */
+#define EPROTO 61 /* Protocol error */
+
+#define EMULTIHOP 64 /* Multihop attempted */
+
+#define EDOTDOT 66 /* RFS specific error */
+#define EBADMSG 67 /* Not a data message */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define EOVERFLOW 72 /* Value too large for defined data type */
+
+/* these errnos are defined by Linux but not HPUX. */
+
+#define EBADE 160 /* Invalid exchange */
+#define EBADR 161 /* Invalid request descriptor */
+#define EXFULL 162 /* Exchange full */
+#define ENOANO 163 /* No anode */
+#define EBADRQC 164 /* Invalid request code */
+#define EBADSLT 165 /* Invalid slot */
+#define EBFONT 166 /* Bad font file format */
+#define ENOTUNIQ 167 /* Name not unique on network */
+#define EBADFD 168 /* File descriptor in bad state */
+#define EREMCHG 169 /* Remote address changed */
+#define ELIBACC 170 /* Can not access a needed shared library */
+#define ELIBBAD 171 /* Accessing a corrupted shared library */
+#define ELIBSCN 172 /* .lib section in a.out corrupted */
+#define ELIBMAX 173 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 174 /* Cannot exec a shared library directly */
+#define ERESTART 175 /* Interrupted system call should be restarted */
+#define ESTRPIPE 176 /* Streams pipe error */
+#define EUCLEAN 177 /* Structure needs cleaning */
+#define ENOTNAM 178 /* Not a XENIX named type file */
+#define ENAVAIL 179 /* No XENIX semaphores available */
+#define EISNAM 180 /* Is a named type file */
+#define EREMOTEIO 181 /* Remote I/O error */
+#define ENOMEDIUM 182 /* No medium found */
+#define EMEDIUMTYPE 183 /* Wrong medium type */
+#define ENOKEY 184 /* Required key not available */
+#define EKEYEXPIRED 185 /* Key has expired */
+#define EKEYREVOKED 186 /* Key has been revoked */
+#define EKEYREJECTED 187 /* Key was rejected by service */
+
+/* We now return you to your regularly scheduled HPUX. */
+
+#define ENOSYM 215 /* symbol does not exist in executable */
+#define ENOTSOCK 216 /* Socket operation on non-socket */
+#define EDESTADDRREQ 217 /* Destination address required */
+#define EMSGSIZE 218 /* Message too long */
+#define EPROTOTYPE 219 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 220 /* Protocol not available */
+#define EPROTONOSUPPORT 221 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 222 /* Socket type not supported */
+#define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 224 /* Protocol family not supported */
+#define EAFNOSUPPORT 225 /* Address family not supported by protocol */
+#define EADDRINUSE 226 /* Address already in use */
+#define EADDRNOTAVAIL 227 /* Cannot assign requested address */
+#define ENETDOWN 228 /* Network is down */
+#define ENETUNREACH 229 /* Network is unreachable */
+#define ENETRESET 230 /* Network dropped connection because of reset */
+#define ECONNABORTED 231 /* Software caused connection abort */
+#define ECONNRESET 232 /* Connection reset by peer */
+#define ENOBUFS 233 /* No buffer space available */
+#define EISCONN 234 /* Transport endpoint is already connected */
+#define ENOTCONN 235 /* Transport endpoint is not connected */
+#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 237 /* Too many references: cannot splice */
+#define ETIMEDOUT 238 /* Connection timed out */
+#define ECONNREFUSED 239 /* Connection refused */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EHOSTDOWN 241 /* Host is down */
+#define EHOSTUNREACH 242 /* No route to host */
+
+#define EALREADY 244 /* Operation already in progress */
+#define EINPROGRESS 245 /* Operation now in progress */
+#define EWOULDBLOCK EAGAIN /* Operation would block (Not HPUX compliant) */
+#define ENOTEMPTY 247 /* Directory not empty */
+#define ENAMETOOLONG 248 /* File name too long */
+#define ELOOP 249 /* Too many symbolic links encountered */
+#define ENOSYS 251 /* Function not implemented */
+
+#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
+#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
+
+/* for robust mutexes */
+#define EOWNERDEAD 254 /* Owner died */
+#define ENOTRECOVERABLE 255 /* State not recoverable */
+
+#define ERFKILL 256 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 257 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/powerpc/include/uapi/asm/errno.h b/tools/arch/powerpc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..cc79856896a1
--- /dev/null
+++ b/tools/arch/powerpc/include/uapi/asm/errno.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_ERRNO_H
+#define _ASM_POWERPC_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#undef EDEADLOCK
+#define EDEADLOCK 58 /* File locking deadlock error */
+
+#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/tools/arch/s390/include/uapi/asm/unistd.h b/tools/arch/s390/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..725120939051
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/unistd.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/unistd.h"
+ */
+
+#ifndef _UAPI_ASM_S390_UNISTD_H_
+#define _UAPI_ASM_S390_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_restart_syscall 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_brk 45
+#define __NR_signal 48
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_setpgid 57
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_symlink 83
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_lookup_dcookie 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_getdents 141
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188
+#define __NR_putpmsg 189
+#define __NR_vfork 190
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_getdents64 220
+#define __NR_readahead 222
+#define __NR_setxattr 224
+#define __NR_lsetxattr 225
+#define __NR_fsetxattr 226
+#define __NR_getxattr 227
+#define __NR_lgetxattr 228
+#define __NR_fgetxattr 229
+#define __NR_listxattr 230
+#define __NR_llistxattr 231
+#define __NR_flistxattr 232
+#define __NR_removexattr 233
+#define __NR_lremovexattr 234
+#define __NR_fremovexattr 235
+#define __NR_gettid 236
+#define __NR_tkill 237
+#define __NR_futex 238
+#define __NR_sched_setaffinity 239
+#define __NR_sched_getaffinity 240
+#define __NR_tgkill 241
+/* Number 242 is reserved for tux */
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
+#define __NR_epoll_create 249
+#define __NR_epoll_ctl 250
+#define __NR_epoll_wait 251
+#define __NR_set_tid_address 252
+#define __NR_fadvise64 253
+#define __NR_timer_create 254
+#define __NR_timer_settime 255
+#define __NR_timer_gettime 256
+#define __NR_timer_getoverrun 257
+#define __NR_timer_delete 258
+#define __NR_clock_settime 259
+#define __NR_clock_gettime 260
+#define __NR_clock_getres 261
+#define __NR_clock_nanosleep 262
+/* Number 263 is reserved for vserver */
+#define __NR_statfs64 265
+#define __NR_fstatfs64 266
+#define __NR_remap_file_pages 267
+#define __NR_mbind 268
+#define __NR_get_mempolicy 269
+#define __NR_set_mempolicy 270
+#define __NR_mq_open 271
+#define __NR_mq_unlink 272
+#define __NR_mq_timedsend 273
+#define __NR_mq_timedreceive 274
+#define __NR_mq_notify 275
+#define __NR_mq_getsetattr 276
+#define __NR_kexec_load 277
+#define __NR_add_key 278
+#define __NR_request_key 279
+#define __NR_keyctl 280
+#define __NR_waitid 281
+#define __NR_ioprio_set 282
+#define __NR_ioprio_get 283
+#define __NR_inotify_init 284
+#define __NR_inotify_add_watch 285
+#define __NR_inotify_rm_watch 286
+#define __NR_migrate_pages 287
+#define __NR_openat 288
+#define __NR_mkdirat 289
+#define __NR_mknodat 290
+#define __NR_fchownat 291
+#define __NR_futimesat 292
+#define __NR_unlinkat 294
+#define __NR_renameat 295
+#define __NR_linkat 296
+#define __NR_symlinkat 297
+#define __NR_readlinkat 298
+#define __NR_fchmodat 299
+#define __NR_faccessat 300
+#define __NR_pselect6 301
+#define __NR_ppoll 302
+#define __NR_unshare 303
+#define __NR_set_robust_list 304
+#define __NR_get_robust_list 305
+#define __NR_splice 306
+#define __NR_sync_file_range 307
+#define __NR_tee 308
+#define __NR_vmsplice 309
+#define __NR_move_pages 310
+#define __NR_getcpu 311
+#define __NR_epoll_pwait 312
+#define __NR_utimes 313
+#define __NR_fallocate 314
+#define __NR_utimensat 315
+#define __NR_signalfd 316
+#define __NR_timerfd 317
+#define __NR_eventfd 318
+#define __NR_timerfd_create 319
+#define __NR_timerfd_settime 320
+#define __NR_timerfd_gettime 321
+#define __NR_signalfd4 322
+#define __NR_eventfd2 323
+#define __NR_inotify_init1 324
+#define __NR_pipe2 325
+#define __NR_dup3 326
+#define __NR_epoll_create1 327
+#define __NR_preadv 328
+#define __NR_pwritev 329
+#define __NR_rt_tgsigqueueinfo 330
+#define __NR_perf_event_open 331
+#define __NR_fanotify_init 332
+#define __NR_fanotify_mark 333
+#define __NR_prlimit64 334
+#define __NR_name_to_handle_at 335
+#define __NR_open_by_handle_at 336
+#define __NR_clock_adjtime 337
+#define __NR_syncfs 338
+#define __NR_setns 339
+#define __NR_process_vm_readv 340
+#define __NR_process_vm_writev 341
+#define __NR_s390_runtime_instr 342
+#define __NR_kcmp 343
+#define __NR_finit_module 344
+#define __NR_sched_setattr 345
+#define __NR_sched_getattr 346
+#define __NR_renameat2 347
+#define __NR_seccomp 348
+#define __NR_getrandom 349
+#define __NR_memfd_create 350
+#define __NR_bpf 351
+#define __NR_s390_pci_mmio_write 352
+#define __NR_s390_pci_mmio_read 353
+#define __NR_execveat 354
+#define __NR_userfaultfd 355
+#define __NR_membarrier 356
+#define __NR_recvmmsg 357
+#define __NR_sendmmsg 358
+#define __NR_socket 359
+#define __NR_socketpair 360
+#define __NR_bind 361
+#define __NR_connect 362
+#define __NR_listen 363
+#define __NR_accept4 364
+#define __NR_getsockopt 365
+#define __NR_setsockopt 366
+#define __NR_getsockname 367
+#define __NR_getpeername 368
+#define __NR_sendto 369
+#define __NR_sendmsg 370
+#define __NR_recvfrom 371
+#define __NR_recvmsg 372
+#define __NR_shutdown 373
+#define __NR_mlock2 374
+#define __NR_copy_file_range 375
+#define __NR_preadv2 376
+#define __NR_pwritev2 377
+#define __NR_s390_guarded_storage 378
+#define __NR_statx 379
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
+
+/*
+ * There are some system calls that are not present on 64 bit, some
+ * have a different name although they do the same (e.g. __NR_chown32
+ * is __NR_chown on 64 bit).
+ */
+#ifndef __s390x__
+
+#define __NR_time 13
+#define __NR_lchown 16
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_getrlimit 76
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_fchown 95
+#define __NR_ioperm 101
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR__newselect 142
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_chown 182
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_fcntl64 221
+#define __NR_sendfile64 223
+#define __NR_fadvise64_64 264
+#define __NR_fstatat64 293
+
+#else
+
+#define __NR_select 142
+#define __NR_getrlimit 191 /* SuS compliant getrlimit */
+#define __NR_lchown 198
+#define __NR_getuid 199
+#define __NR_getgid 200
+#define __NR_geteuid 201
+#define __NR_getegid 202
+#define __NR_setreuid 203
+#define __NR_setregid 204
+#define __NR_getgroups 205
+#define __NR_setgroups 206
+#define __NR_fchown 207
+#define __NR_setresuid 208
+#define __NR_getresuid 209
+#define __NR_setresgid 210
+#define __NR_getresgid 211
+#define __NR_chown 212
+#define __NR_setuid 213
+#define __NR_setgid 214
+#define __NR_setfsuid 215
+#define __NR_setfsgid 216
+#define __NR_newfstatat 293
+
+#endif
+
+#endif /* _UAPI_ASM_S390_UNISTD_H_ */
diff --git a/tools/arch/sparc/include/uapi/asm/errno.h b/tools/arch/sparc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..81a732b902ee
--- /dev/null
+++ b/tools/arch/sparc/include/uapi/asm/errno.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _SPARC_ERRNO_H
+#define _SPARC_ERRNO_H
+
+/* These match the SunOS error numbering scheme. */
+
+#include <asm-generic/errno-base.h>
+
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Op not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Net dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* No send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+#define EPROCLIM 67 /* SUNOS: Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define ENOSTR 72 /* Device not a stream */
+#define ETIME 73 /* Timer expired */
+#define ENOSR 74 /* Out of streams resources */
+#define ENOMSG 75 /* No message of desired type */
+#define EBADMSG 76 /* Not a data message */
+#define EIDRM 77 /* Identifier removed */
+#define EDEADLK 78 /* Resource deadlock would occur */
+#define ENOLCK 79 /* No record locks available */
+#define ENONET 80 /* Machine is not on the network */
+#define ERREMOTE 81 /* SunOS: Too many lvls of remote in path */
+#define ENOLINK 82 /* Link has been severed */
+#define EADV 83 /* Advertise error */
+#define ESRMNT 84 /* Srmount error */
+#define ECOMM 85 /* Communication error on send */
+#define EPROTO 86 /* Protocol error */
+#define EMULTIHOP 87 /* Multihop attempted */
+#define EDOTDOT 88 /* RFS specific error */
+#define EREMCHG 89 /* Remote address changed */
+#define ENOSYS 90 /* Function not implemented */
+
+/* The rest have no SunOS equivalent. */
+#define ESTRPIPE 91 /* Streams pipe error */
+#define EOVERFLOW 92 /* Value too large for defined data type */
+#define EBADFD 93 /* File descriptor in bad state */
+#define ECHRNG 94 /* Channel number out of range */
+#define EL2NSYNC 95 /* Level 2 not synchronized */
+#define EL3HLT 96 /* Level 3 halted */
+#define EL3RST 97 /* Level 3 reset */
+#define ELNRNG 98 /* Link number out of range */
+#define EUNATCH 99 /* Protocol driver not attached */
+#define ENOCSI 100 /* No CSI structure available */
+#define EL2HLT 101 /* Level 2 halted */
+#define EBADE 102 /* Invalid exchange */
+#define EBADR 103 /* Invalid request descriptor */
+#define EXFULL 104 /* Exchange full */
+#define ENOANO 105 /* No anode */
+#define EBADRQC 106 /* Invalid request code */
+#define EBADSLT 107 /* Invalid slot */
+#define EDEADLOCK 108 /* File locking deadlock error */
+#define EBFONT 109 /* Bad font file format */
+#define ELIBEXEC 110 /* Cannot exec a shared library directly */
+#define ENODATA 111 /* No data available */
+#define ELIBBAD 112 /* Accessing a corrupted shared library */
+#define ENOPKG 113 /* Package not installed */
+#define ELIBACC 114 /* Can not access a needed shared library */
+#define ENOTUNIQ 115 /* Name not unique on network */
+#define ERESTART 116 /* Interrupted syscall should be restarted */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EILSEQ 122 /* Illegal byte sequence */
+#define ELIBMAX 123 /* Atmpt to link in too many shared libs */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+
+#define ENOMEDIUM 125 /* No medium found */
+#define EMEDIUMTYPE 126 /* Wrong medium type */
+#define ECANCELED 127 /* Operation Cancelled */
+#define ENOKEY 128 /* Required key not available */
+#define EKEYEXPIRED 129 /* Key has expired */
+#define EKEYREVOKED 130 /* Key has been revoked */
+#define EKEYREJECTED 131 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 132 /* Owner died */
+#define ENOTRECOVERABLE 133 /* State not recoverable */
+
+#define ERFKILL 134 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 135 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 800104c8a3ed..21ac898df2d8 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -197,11 +197,12 @@
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
-
+#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
@@ -340,5 +341,6 @@
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 14d6d5007314..b027633e7300 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -50,6 +50,12 @@
# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define DISABLE_PTI 0
+#else
+# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -60,7 +66,7 @@
#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
-#define DISABLED_MASK7 0
+#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
#define DISABLED_MASK10 0
diff --git a/tools/arch/x86/include/uapi/asm/errno.h b/tools/arch/x86/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..4c82b503d92f
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 07a6697466ef..c8ec0ae16bf0 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -9,6 +9,35 @@ MAKE = make
CFLAGS += -Wall -O2
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
%.yacc.c: %.y
$(YACC) -o $@ -d $<
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 75bf526a0168..58c2bab4ef6e 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
disassemble_init_for_target(&info);
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
disassemble = disassembler(bfdf);
+#endif
assert(disassemble);
do {
@@ -165,7 +172,8 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
{
char *ptr, *pptr, *tmp;
off_t off = 0;
- int ret, flen, proglen, pass, ulen = 0;
+ unsigned int proglen;
+ int ret, flen, pass, ulen = 0;
regmatch_t pmatch[1];
unsigned long base;
regex_t regex;
@@ -192,7 +200,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
}
ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
- ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
+ ret = sscanf(ptr, "flen=%d proglen=%u pass=%d image=%lx",
&flen, &proglen, &pass, &base);
if (ret != 4) {
regfree(&regex);
@@ -232,7 +240,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
}
assert(ulen == proglen);
- printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
+ printf("%u bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
proglen, pass, flen);
printf("%lx + <x>:\n", base);
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 37292bb5ce60..c462a928e03d 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -3,12 +3,16 @@ include ../../../scripts/utilities.mak
INSTALL ?= install
RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
-prefix ?= /usr/local
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
endif
-mandir ?= $(prefix)/share/man
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
MAN8_RST = $(wildcard *.rst)
@@ -20,15 +24,21 @@ man: man8
man8: $(DOC_MAN8)
$(OUTPUT)%.8: %.rst
- rst2man $< > $@
+ $(QUIET_GEN)rst2man $< > $@
clean:
- $(call QUIET_CLEAN, Documentation) $(RM) $(DOC_MAN8)
+ $(call QUIET_CLEAN, Documentation)
+ $(Q)$(RM) $(DOC_MAN8)
install: man
- $(call QUIET_INSTALL, Documentation-man) \
- $(INSTALL) -d -m 755 $(DESTDIR)$(man8dir); \
- $(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir);
+ $(call QUIET_INSTALL, Documentation-man)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
+ $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
+
+uninstall:
+ $(call QUIET_UNINST, Documentation-man)
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
+ $(Q)$(RMDIR) $(DESTDIR)$(man8dir)
-.PHONY: man man8 clean install
+.PHONY: man man8 clean install uninstall
.DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
new file mode 100644
index 000000000000..2fe2a1bdbe3e
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -0,0 +1,118 @@
+================
+bpftool-cgroup
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **cgroup** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+
+ *COMMANDS* :=
+ { **show** | **list** | **attach** | **detach** | **help** }
+
+MAP COMMANDS
+=============
+
+| **bpftool** **cgroup { show | list }** *CGROUP*
+| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+| **bpftool** **cgroup help**
+|
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *ATTACH_TYPE* := { *ingress* | *egress* | *sock_create* | *sock_ops* | *device* }
+| *ATTACH_FLAGS* := { *multi* | *override* }
+
+DESCRIPTION
+===========
+ **bpftool cgroup { show | list }** *CGROUP*
+ List all programs attached to the cgroup *CGROUP*.
+
+ Output will start with program ID followed by attach type,
+ attach flags and program name.
+
+ **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+ Attach program *PROG* to the cgroup *CGROUP* with attach type
+ *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
+
+ *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs
+ some bpf program, the program in this cgroup yields to sub-cgroup
+ program; **multi** if a sub-cgroup installs some bpf program,
+ that cgroup program gets run in addition to the program in this
+ cgroup.
+
+ Only one program is allowed to be attached to a cgroup with
+ no attach flags or the **override** flag. Attaching another
+ program will release old program and attach the new one.
+
+ Multiple programs are allowed to be attached to a cgroup with
+ **multi**. They are executed in FIFO order (those that were
+ attached first, run first).
+
+ Non-default *ATTACH_FLAGS* are supported by kernel version 4.14
+ and later.
+
+ *ATTACH_TYPE* can be on of:
+ **ingress** ingress path of the inet socket (since 4.10);
+ **egress** egress path of the inet socket (since 4.10);
+ **sock_create** opening of an inet socket (since 4.10);
+ **sock_ops** various socket operations (since 4.12);
+ **device** device access (since 4.15).
+
+ **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+ Detach *PROG* from the cgroup *CGROUP* and attach type
+ *ATTACH_TYPE*.
+
+ **bpftool prog help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+ -f, --bpffs
+ Show file names of pinned programs.
+
+EXAMPLES
+========
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# mkdir /sys/fs/cgroup/test.slice**
+| **# bpftool prog load ./device_cgroup.o /sys/fs/bpf/prog**
+| **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
+
+**# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
+ 1 device allow_multi bpf_prog1
+
+|
+| **# bpftool cgroup detach /sys/fs/cgroup/test.slice/ device id 1**
+| **# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
+
+SEE ALSO
+========
+ **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 9f51a268eb06..0ab32b312aec 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -15,13 +15,13 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+ { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
| **pin** | **help** }
MAP COMMANDS
=============
-| **bpftool** **map show** [*MAP*]
+| **bpftool** **map { show | list }** [*MAP*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* **key** *BYTES* **value** *VALUE* [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* **key** *BYTES*
@@ -36,7 +36,7 @@ MAP COMMANDS
DESCRIPTION
===========
- **bpftool map show** [*MAP*]
+ **bpftool map { show | list }** [*MAP*]
Show information about loaded maps. If *MAP* is specified
show information only about given map, otherwise list all
maps currently loaded on the system.
@@ -128,4 +128,4 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8)
+ **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 36e8d1c3c40d..e4ceee7f2dff 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -15,22 +15,23 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **dump xlated** | **dump jited** | **pin** | **help** }
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
MAP COMMANDS
=============
-| **bpftool** **prog show** [*PROG*]
+| **bpftool** **prog { show | list }** [*PROG*]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
+| **bpftool** **prog load** *OBJ* *FILE*
| **bpftool** **prog help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
DESCRIPTION
===========
- **bpftool prog show** [*PROG*]
+ **bpftool prog { show | list }** [*PROG*]
Show information about loaded programs. If *PROG* is
specified show information only about given program, otherwise
list all programs currently loaded on the system.
@@ -57,6 +58,11 @@ DESCRIPTION
Note: *FILE* must be located in *bpffs* mount.
+ **bpftool prog load** *OBJ* *FILE*
+ Load bpf program from binary *OBJ* and pin as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount.
+
**bpftool prog help**
Print short help message.
@@ -126,8 +132,10 @@ EXAMPLES
|
| **# mount -t bpf none /sys/fs/bpf/**
| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
+| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
| **# ls -l /sys/fs/bpf/**
| -rw------- 1 root root 0 Jul 22 01:43 prog
+| -rw------- 1 root root 0 Jul 22 01:44 prog2
**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
@@ -147,4 +155,4 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-map**\ (8)
+ **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 926c03d5a8da..20689a321ffe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,17 +16,19 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** }
+ *OBJECT* := { **map** | **program** | **cgroup** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
*MAP-COMMANDS* :=
- { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+ { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
| **pin** | **help** }
- *PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
- | **help** }
+ *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
+ | **load** | **help** }
+
+ *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
DESCRIPTION
===========
@@ -53,4 +55,4 @@ OPTIONS
SEE ALSO
========
- **bpftool-map**\ (8), **bpftool-prog**\ (8)
+ **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index ec3052c0b004..26901ec87361 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,25 +1,10 @@
include ../../scripts/Makefile.include
-
include ../../scripts/utilities.mak
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-ifneq ($(objtree),)
-#$(info Determined 'objtree' to be $(objtree))
-endif
-
-ifneq ($(OUTPUT),)
-#$(info Determined 'OUTPUT' to be $(OUTPUT))
-# Adding $(OUTPUT) as a directory to look for source files,
-# because use generated output files as sources dependency
-# for flex/bison parsers.
-VPATH += $(OUTPUT)
-export VPATH
endif
ifeq ($(V),1)
@@ -28,16 +13,18 @@ else
Q = @
endif
-BPF_DIR = $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf/
ifneq ($(OUTPUT),)
- BPF_PATH=$(OUTPUT)
+ BPF_PATH = $(OUTPUT)
else
- BPF_PATH=$(BPF_DIR)
+ BPF_PATH = $(BPF_DIR)
endif
LIBBPF = $(BPF_PATH)libbpf.a
+BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+
$(LIBBPF): FORCE
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
@@ -45,22 +32,50 @@ $(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
-prefix = /usr/local
+prefix ?= /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
CC = gcc
CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
+INSTALL ?= install
+RM ?= rm -f
+
+FEATURE_USER = .bpftool
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
include $(wildcard *.d)
all: $(OUTPUT)bpftool
-SRCS=$(wildcard *.c)
-OBJS=$(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+SRCS = $(wildcard *.c)
+OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -73,21 +88,34 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool)
- $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+
+install: $(OUTPUT)bpftool
+ $(call QUIET_INSTALL, bpftool)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
+ $(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
+ $(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
-install:
- install -m 0755 -d $(prefix)/sbin
- install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
- install -m 0755 -d $(bash_compdir)
- install -m 0644 bash-completion/bpftool $(bash_compdir)
+uninstall:
+ $(call QUIET_UNINST, bpftool)
+ $(Q)$(RM) $(DESTDIR)$(prefix)/sbin/bpftool
+ $(Q)$(RM) $(DESTDIR)$(bash_compdir)/bpftool
doc:
- $(Q)$(MAKE) -C Documentation/
+ $(call descend,Documentation)
+
+doc-clean:
+ $(call descend,Documentation,clean)
doc-install:
- $(Q)$(MAKE) -C Documentation/ install
+ $(call descend,Documentation,install)
+
+doc-uninstall:
+ $(call descend,Documentation,uninstall)
FORCE:
-.PHONY: all clean FORCE install doc doc-install
+.PHONY: all FORCE clean install uninstall
+.PHONY: doc doc-clean doc-install doc-uninstall
.DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 7febee05c8e7..0137866bb8f6 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -197,7 +197,7 @@ _bpftool()
local PROG_TYPE='id pinned tag'
case $command in
- show)
+ show|list)
[[ $prev != "$command" ]] && return 0
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
@@ -232,7 +232,7 @@ _bpftool()
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'dump help pin show' -- \
+ COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
"$cur" ) )
;;
esac
@@ -240,7 +240,7 @@ _bpftool()
map)
local MAP_TYPE='id pinned'
case $command in
- show|dump)
+ show|list|dump)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
@@ -343,7 +343,7 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin show update' -- "$cur" ) )
+ lookup pin show list update' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
new file mode 100644
index 000000000000..cae32a61cb18
--- /dev/null
+++ b/tools/bpf/bpftool/cgroup.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2017 Facebook
+// Author: Roman Gushchin <guro@fb.com>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <bpf.h>
+
+#include "main.h"
+
+#define HELP_SPEC_ATTACH_FLAGS \
+ "ATTACH_FLAGS := { multi | override }"
+
+#define HELP_SPEC_ATTACH_TYPES \
+ "ATTACH_TYPE := { ingress | egress | sock_create | sock_ops | device }"
+
+static const char * const attach_type_strings[] = {
+ [BPF_CGROUP_INET_INGRESS] = "ingress",
+ [BPF_CGROUP_INET_EGRESS] = "egress",
+ [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
+ [BPF_CGROUP_SOCK_OPS] = "sock_ops",
+ [BPF_CGROUP_DEVICE] = "device",
+ [__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+ enum bpf_attach_type type;
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_type_strings[type] &&
+ is_prefix(str, attach_type_strings[type]))
+ return type;
+ }
+
+ return __MAX_BPF_ATTACH_TYPE;
+}
+
+static int show_bpf_prog(int id, const char *attach_type_str,
+ const char *attach_flags_str)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int prog_fd;
+
+ prog_fd = bpf_prog_get_fd_by_id(id);
+ if (prog_fd < 0)
+ return -1;
+
+ if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ close(prog_fd);
+ return -1;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "id", info.id);
+ jsonw_string_field(json_wtr, "attach_type",
+ attach_type_str);
+ jsonw_string_field(json_wtr, "attach_flags",
+ attach_flags_str);
+ jsonw_string_field(json_wtr, "name", info.name);
+ jsonw_end_object(json_wtr);
+ } else {
+ printf("%-8u %-15s %-15s %-15s\n", info.id,
+ attach_type_str,
+ attach_flags_str,
+ info.name);
+ }
+
+ close(prog_fd);
+ return 0;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+ __u32 prog_ids[1024] = {0};
+ char *attach_flags_str;
+ __u32 prog_cnt, iter;
+ __u32 attach_flags;
+ char buf[32];
+ int ret;
+
+ prog_cnt = ARRAY_SIZE(prog_ids);
+ ret = bpf_prog_query(cgroup_fd, type, 0, &attach_flags, prog_ids,
+ &prog_cnt);
+ if (ret)
+ return ret;
+
+ if (prog_cnt == 0)
+ return 0;
+
+ switch (attach_flags) {
+ case BPF_F_ALLOW_MULTI:
+ attach_flags_str = "multi";
+ break;
+ case BPF_F_ALLOW_OVERRIDE:
+ attach_flags_str = "override";
+ break;
+ case 0:
+ attach_flags_str = "";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+ attach_flags_str = buf;
+ }
+
+ for (iter = 0; iter < prog_cnt; iter++)
+ show_bpf_prog(prog_ids[iter], attach_type_strings[type],
+ attach_flags_str);
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ enum bpf_attach_type type;
+ int cgroup_fd;
+ int ret = -1;
+
+ if (argc < 1) {
+ p_err("too few parameters for cgroup show");
+ goto exit;
+ } else if (argc > 1) {
+ p_err("too many parameters for cgroup show");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else
+ printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
+ "AttachFlags", "Name");
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ /*
+ * Not all attach types may be supported, so it's expected,
+ * that some requests will fail.
+ * If we were able to get the show for at least one
+ * attach type, let's return 0.
+ */
+ if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int cgroup_fd, prog_fd;
+ int attach_flags = 0;
+ int ret = -1;
+ int i;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup attach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ for (i = 0; i < argc; i++) {
+ if (is_prefix(argv[i], "multi")) {
+ attach_flags |= BPF_F_ALLOW_MULTI;
+ } else if (is_prefix(argv[i], "override")) {
+ attach_flags |= BPF_F_ALLOW_OVERRIDE;
+ } else {
+ p_err("unknown option: %s", argv[i]);
+ goto exit_cgroup;
+ }
+ }
+
+ if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
+ p_err("failed to attach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int prog_fd, cgroup_fd;
+ int ret = -1;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup detach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[1]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
+ p_err("failed to detach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s { show | list } CGROUP\n"
+ " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+ " %s %s detach CGROUP ATTACH_TYPE PROG\n"
+ " %s %s help\n"
+ "\n"
+ " " HELP_SPEC_ATTACH_TYPES "\n"
+ " " HELP_SPEC_ATTACH_FLAGS "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " " HELP_SPEC_OPTIONS "\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "attach", do_attach },
+ { "detach", do_detach },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_cgroup(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 2bd3b280e6dd..0b482c0070e0 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -34,6 +34,7 @@
/* Author: Jakub Kicinski <kubakici@wp.pl> */
#include <errno.h>
+#include <fcntl.h>
#include <fts.h>
#include <libgen.h>
#include <mntent.h>
@@ -44,7 +45,9 @@
#include <unistd.h>
#include <linux/limits.h>
#include <linux/magic.h>
+#include <net/if.h>
#include <sys/mount.h>
+#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
@@ -163,13 +166,49 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
return fd;
}
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_fd(int fd, const char *name)
{
char err_str[ERR_MAX_LEN];
- unsigned int id;
- char *endptr;
char *file;
char *dir;
+ int err = 0;
+
+ err = bpf_obj_pin(fd, name);
+ if (!err)
+ goto out;
+
+ file = malloc(strlen(name) + 1);
+ strcpy(file, name);
+ dir = dirname(file);
+
+ if (errno != EPERM || is_bpffs(dir)) {
+ p_err("can't pin the object (%s): %s", name, strerror(errno));
+ goto out_free;
+ }
+
+ /* Attempt to mount bpffs, then retry pinning. */
+ err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
+ if (!err) {
+ err = bpf_obj_pin(fd, name);
+ if (err)
+ p_err("can't pin the object (%s): %s", name,
+ strerror(errno));
+ } else {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount BPF file system to pin the object (%s): %s",
+ name, err_str);
+ }
+
+out_free:
+ free(file);
+out:
+ return err;
+}
+
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+{
+ unsigned int id;
+ char *endptr;
int err;
int fd;
@@ -195,35 +234,8 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
return -1;
}
- err = bpf_obj_pin(fd, *argv);
- if (!err)
- goto out_close;
-
- file = malloc(strlen(*argv) + 1);
- strcpy(file, *argv);
- dir = dirname(file);
-
- if (errno != EPERM || is_bpffs(dir)) {
- p_err("can't pin the object (%s): %s", *argv, strerror(errno));
- goto out_free;
- }
-
- /* Attempt to mount bpffs, then retry pinning. */
- err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
- if (!err) {
- err = bpf_obj_pin(fd, *argv);
- if (err)
- p_err("can't pin the object (%s): %s", *argv,
- strerror(errno));
- } else {
- err_str[ERR_MAX_LEN - 1] = '\0';
- p_err("can't mount BPF file system to pin the object (%s): %s",
- *argv, err_str);
- }
+ err = do_pin_fd(fd, *argv);
-out_free:
- free(file);
-out_close:
close(fd);
return err;
}
@@ -403,3 +415,124 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab)
free(obj);
}
}
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+ struct stat st;
+ int err;
+
+ err = stat("/proc/self/ns/net", &st);
+ if (err) {
+ p_err("Can't stat /proc/self: %s", strerror(errno));
+ return NULL;
+ }
+
+ if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+ return NULL;
+
+ return if_indextoname(ifindex, buf);
+}
+
+static int read_sysfs_hex_int(char *path)
+{
+ char vendor_id_buf[8];
+ int len;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ p_err("Can't open %s: %s", path, strerror(errno));
+ return -1;
+ }
+
+ len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
+ close(fd);
+ if (len < 0) {
+ p_err("Can't read %s: %s", path, strerror(errno));
+ return -1;
+ }
+ if (len >= (int)sizeof(vendor_id_buf)) {
+ p_err("Value in %s too long", path);
+ return -1;
+ }
+
+ vendor_id_buf[len] = 0;
+
+ return strtol(vendor_id_buf, NULL, 0);
+}
+
+static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
+{
+ char full_path[64];
+
+ snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
+ devname, entry_name);
+
+ return read_sysfs_hex_int(full_path);
+}
+
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino)
+{
+ char devname[IF_NAMESIZE];
+ int vendor_id;
+ int device_id;
+
+ if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
+ p_err("Can't get net device name for ifindex %d: %s", ifindex,
+ strerror(errno));
+ return NULL;
+ }
+
+ vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
+ if (vendor_id < 0) {
+ p_err("Can't get device vendor id for %s", devname);
+ return NULL;
+ }
+
+ switch (vendor_id) {
+ case 0x19ee:
+ device_id = read_sysfs_netdev_hex_int(devname, "device");
+ if (device_id != 0x4000 &&
+ device_id != 0x6000 &&
+ device_id != 0x6003)
+ p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
+ return "NFP-6xxx";
+ default:
+ p_err("Can't get bfd arch name for device vendor id 0x%04x",
+ vendor_id);
+ return NULL;
+ }
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ printf(" dev ");
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ printf("%s", name);
+ else
+ printf("ifindex %u ns_dev %llu ns_ino %llu",
+ ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ jsonw_name(json_wtr, "dev");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "ifindex", ifindex);
+ jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+ jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_end_object(json_wtr);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 1551d3918d4c..87439320ef70 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -76,7 +76,8 @@ static int fprintf_json(void *out, const char *fmt, ...)
return 0;
}
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch)
{
disassembler_ftype disassemble;
struct disassemble_info info;
@@ -100,6 +101,19 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
else
init_disassemble_info(&info, stdout,
(fprintf_ftype) fprintf);
+
+ /* Update architecture info for offload. */
+ if (arch) {
+ const bfd_arch_info_type *inf = bfd_scan_arch(arch);
+
+ if (inf) {
+ bfdf->arch_info = inf;
+ } else {
+ p_err("No libfd support for %s", arch);
+ return;
+ }
+ }
+
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info.buffer = image;
@@ -107,7 +121,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
disassemble_init_for_target(&info);
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
disassemble = disassembler(bfdf);
+#endif
assert(disassemble);
if (json_output)
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d294bc8168be..3a0396d87c42 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -38,7 +38,6 @@
#include <errno.h>
#include <getopt.h>
#include <linux/bpf.h>
-#include <linux/version.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -85,7 +84,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map }\n"
+ " OBJECT := { prog | map | cgroup }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -95,21 +94,13 @@ static int do_help(int argc, char **argv)
static int do_version(int argc, char **argv)
{
- unsigned int version[3];
-
- version[0] = LINUX_VERSION_CODE >> 16;
- version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
- version[2] = LINUX_VERSION_CODE & 0xf;
-
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "version");
- jsonw_printf(json_wtr, "\"%u.%u.%u\"",
- version[0], version[1], version[2]);
+ jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
jsonw_end_object(json_wtr);
} else {
- printf("%s v%u.%u.%u\n", bin_name,
- version[0], version[1], version[2]);
+ printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
}
return 0;
}
@@ -173,6 +164,7 @@ static const struct cmd cmds[] = {
{ "batch", do_batch },
{ "prog", do_prog },
{ "map", do_map },
+ { "cgroup", do_cgroup },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index bff330b49791..b8e9584d6246 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -96,6 +96,8 @@ struct pinned_obj {
int build_pinned_obj_table(struct pinned_obj_table *table,
enum bpf_obj_type type);
void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
struct cmd {
const char *cmd;
@@ -111,13 +113,18 @@ char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_fd(int fd, const char *name);
int do_prog(int argc, char **arg);
int do_map(int argc, char **arg);
+int do_cgroup(int argc, char **arg);
int prog_parse_fd(int *argc, char ***argv);
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes);
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch);
void print_hex_data_json(uint8_t *data, size_t len);
+const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index a8c3a33dd185..f95fa67bb498 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -66,6 +66,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
[BPF_MAP_TYPE_DEVMAP] = "devmap",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
+ [BPF_MAP_TYPE_CPUMAP] = "cpumap",
};
static unsigned int get_possible_cpus(void)
@@ -428,6 +429,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_name(json_wtr, "flags");
jsonw_printf(json_wtr, "%#x", info->map_flags);
+
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
@@ -469,7 +473,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
if (*info->name)
printf("name %s ", info->name);
- printf("flags 0x%x\n", info->map_flags);
+ printf("flags 0x%x", info->map_flags);
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
+ printf("\n");
printf("\tkey %uB value %uB max_entries %u",
info->key_size, info->value_size, info->max_entries);
@@ -861,7 +867,7 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s show [MAP]\n"
+ "Usage: %s %s { show | list } [MAP]\n"
" %s %s dump MAP\n"
" %s %s update MAP key BYTES value VALUE [UPDATE_FLAGS]\n"
" %s %s lookup MAP key BYTES\n"
@@ -885,6 +891,7 @@ static int do_help(int argc, char **argv)
static const struct cmd cmds[] = {
{ "show", do_show },
+ { "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "update", do_update },
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index dded77345bfb..e8e2baaf93c2 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -45,6 +45,7 @@
#include <sys/stat.h>
#include <bpf.h>
+#include <libbpf.h>
#include "main.h"
#include "disasm.h"
@@ -65,6 +66,7 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
+ [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
};
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
@@ -229,6 +231,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
if (info->load_time) {
char buf[32];
@@ -286,6 +290,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("\n");
if (info->load_time) {
@@ -402,6 +407,88 @@ static int do_show(int argc, char **argv)
return err;
}
+#define SYM_MAX_NAME 256
+
+struct kernel_sym {
+ unsigned long address;
+ char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+ unsigned long address_call_base;
+ struct kernel_sym *sym_mapping;
+ __u32 sym_count;
+ char scratch_buff[SYM_MAX_NAME];
+};
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+ return ((struct kernel_sym *)sym_a)->address -
+ ((struct kernel_sym *)sym_b)->address;
+}
+
+static void kernel_syms_load(struct dump_data *dd)
+{
+ struct kernel_sym *sym;
+ char buff[256];
+ void *tmp, *address;
+ FILE *fp;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp)
+ return;
+
+ while (!feof(fp)) {
+ if (!fgets(buff, sizeof(buff), fp))
+ break;
+ tmp = realloc(dd->sym_mapping,
+ (dd->sym_count + 1) *
+ sizeof(*dd->sym_mapping));
+ if (!tmp) {
+out:
+ free(dd->sym_mapping);
+ dd->sym_mapping = NULL;
+ fclose(fp);
+ return;
+ }
+ dd->sym_mapping = tmp;
+ sym = &dd->sym_mapping[dd->sym_count];
+ if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+ continue;
+ sym->address = (unsigned long)address;
+ if (!strcmp(sym->name, "__bpf_call_base")) {
+ dd->address_call_base = sym->address;
+ /* sysctl kernel.kptr_restrict was set */
+ if (!sym->address)
+ goto out;
+ }
+ if (sym->address)
+ dd->sym_count++;
+ }
+
+ fclose(fp);
+
+ qsort(dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+static void kernel_syms_destroy(struct dump_data *dd)
+{
+ free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+ unsigned long key)
+{
+ struct kernel_sym sym = {
+ .address = key,
+ };
+
+ return dd->sym_mapping ?
+ bsearch(&sym, dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
{
va_list args;
@@ -411,8 +498,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args);
}
-static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
+static const char *print_call_pcrel(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address,
+ const struct bpf_insn *insn)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#%s", insn->off, sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#0x%lx", insn->off, address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%s", sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%lx", address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+ const struct bpf_insn *insn)
+{
+ struct dump_data *dd = private_data;
+ unsigned long address = dd->address_call_base + insn->imm;
+ struct kernel_sym *sym;
+
+ sym = kernel_syms_search(dd, address);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return print_call_pcrel(dd, sym, address, insn);
+ else
+ return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm)
+{
+ struct dump_data *dd = private_data;
+
+ if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u]", insn->imm);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%llx", (unsigned long long)full_imm);
+ return dd->scratch_buff;
+}
+
+static void dump_xlated_plain(struct dump_data *dd, void *buf,
+ unsigned int len, bool opcodes)
{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
@@ -426,7 +576,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
- print_bpf_insn(print_insn, NULL, insn + i, true);
+ print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
printf(" ");
@@ -455,8 +605,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args);
}
-static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
+static void dump_xlated_json(struct dump_data *dd, void *buf,
+ unsigned int len, bool opcodes)
{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_json,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
@@ -471,7 +628,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "disasm");
- print_bpf_insn(print_insn_json, NULL, insn + i, true);
+ print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
jsonw_name(json_wtr, "opcodes");
@@ -506,6 +663,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info info = {};
+ struct dump_data dd = {};
__u32 len = sizeof(info);
unsigned int buf_size;
char *filepath = NULL;
@@ -593,6 +751,14 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
+ if ((member_len == &info.jited_prog_len &&
+ info.jited_prog_insns == 0) ||
+ (member_len == &info.xlated_prog_len &&
+ info.xlated_prog_insns == 0)) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+ goto err_free;
+ }
+
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
@@ -609,17 +775,29 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
} else {
- if (member_len == &info.jited_prog_len)
- disasm_print_insn(buf, *member_len, opcodes);
- else
+ if (member_len == &info.jited_prog_len) {
+ const char *name = NULL;
+
+ if (info.ifindex) {
+ name = ifindex_to_bfd_name_ns(info.ifindex,
+ info.netns_dev,
+ info.netns_ino);
+ if (!name)
+ goto err_free;
+ }
+
+ disasm_print_insn(buf, *member_len, opcodes, name);
+ } else {
+ kernel_syms_load(&dd);
if (json_output)
- dump_xlated_json(buf, *member_len, opcodes);
+ dump_xlated_json(&dd, buf, *member_len, opcodes);
else
- dump_xlated_plain(buf, *member_len, opcodes);
+ dump_xlated_plain(&dd, buf, *member_len, opcodes);
+ kernel_syms_destroy(&dd);
+ }
}
free(buf);
-
return 0;
err_free:
@@ -637,6 +815,30 @@ static int do_pin(int argc, char **argv)
return err;
}
+static int do_load(int argc, char **argv)
+{
+ struct bpf_object *obj;
+ int prog_fd;
+
+ if (argc != 2)
+ usage();
+
+ if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
+ p_err("failed to load program");
+ return -1;
+ }
+
+ if (do_pin_fd(prog_fd, argv[1])) {
+ p_err("failed to pin program");
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -645,26 +847,29 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s show [PROG]\n"
+ "Usage: %s %s { show | list } [PROG]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
+ " %s %s load OBJ FILE\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
{ "show", do_show },
+ { "list", do_show },
{ "help", do_help },
{ "dump", do_dump },
{ "pin", do_pin },
+ { "load", do_load },
{ 0 }
};
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c71a05b9c984..c378f003b007 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -56,6 +56,7 @@ FEATURE_TESTS_BASIC := \
libunwind-arm \
libunwind-aarch64 \
pthread-attr-setaffinity-np \
+ pthread-barrier \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
@@ -65,7 +66,8 @@ FEATURE_TESTS_BASIC := \
bpf \
sched_getcpu \
sdt \
- setns
+ setns \
+ libopencsd
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 96982640fbf8..0a490cb15149 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -13,6 +13,7 @@ FILES= \
test-hello.bin \
test-libaudit.bin \
test-libbfd.bin \
+ test-disassembler-four-args.bin \
test-liberty.bin \
test-liberty-z.bin \
test-cplus-demangle.bin \
@@ -37,6 +38,7 @@ FILES= \
test-libunwind-debug-frame-arm.bin \
test-libunwind-debug-frame-aarch64.bin \
test-pthread-attr-setaffinity-np.bin \
+ test-pthread-barrier.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
@@ -51,7 +53,8 @@ FILES= \
test-cxx.bin \
test-jvmti.bin \
test-sched_getcpu.bin \
- test-setns.bin
+ test-setns.bin \
+ test-libopencsd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -79,6 +82,9 @@ $(OUTPUT)test-hello.bin:
$(OUTPUT)test-pthread-attr-setaffinity-np.bin:
$(BUILD) -D_GNU_SOURCE -lpthread
+$(OUTPUT)test-pthread-barrier.bin:
+ $(BUILD) -lpthread
+
$(OUTPUT)test-stackprotector-all.bin:
$(BUILD) -fstack-protector-all
@@ -100,6 +106,10 @@ $(OUTPUT)test-sched_getcpu.bin:
$(OUTPUT)test-setns.bin:
$(BUILD)
+$(OUTPUT)test-libopencsd.bin:
+ $(BUILD) # -lopencsd_c_api -lopencsd provided by
+ # $(FEATURE_CHECK_LDFLAGS-libopencsd)
+
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
@@ -188,6 +198,9 @@ $(OUTPUT)test-libpython-version.bin:
$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
+$(OUTPUT)test-disassembler-four-args.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+
$(OUTPUT)test-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 4112702e4aed..8dc20a61341f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -118,6 +118,10 @@
# include "test-pthread-attr-setaffinity-np.c"
#undef main
+#define main main_test_pthread_barrier
+# include "test-pthread-barrier.c"
+#undef main
+
#define main main_test_sched_getcpu
# include "test-sched_getcpu.c"
#undef main
@@ -158,6 +162,10 @@
# include "test-setns.c"
#undef main
+#define main main_test_libopencsd
+# include "test-libopencsd.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -187,6 +195,7 @@ int main(int argc, char *argv[])
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
+ main_test_pthread_barrier();
main_test_lzma();
main_test_get_cpuid();
main_test_bpf();
@@ -194,6 +203,7 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
+ main_test_libopencsd();
return 0;
}
diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c
new file mode 100644
index 000000000000..45ce65cfddf0
--- /dev/null
+++ b/tools/build/feature/test-disassembler-four-args.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+ bfd *abfd = bfd_openr(NULL, NULL);
+
+ disassembler(bfd_get_arch(abfd),
+ bfd_big_endian(abfd),
+ bfd_get_mach(abfd),
+ abfd);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
new file mode 100644
index 000000000000..5ff1246e6194
--- /dev/null
+++ b/tools/build/feature/test-libopencsd.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <opencsd/c_api/opencsd_c_api.h>
+
+int main(void)
+{
+ (void)ocsd_get_version();
+ return 0;
+}
diff --git a/tools/build/feature/test-pthread-barrier.c b/tools/build/feature/test-pthread-barrier.c
new file mode 100644
index 000000000000..0558d9334d97
--- /dev/null
+++ b/tools/build/feature/test-pthread-barrier.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <pthread.h>
+
+int main(void)
+{
+ pthread_barrier_t barrier;
+
+ pthread_barrier_init(&barrier, NULL, 1);
+ pthread_barrier_wait(&barrier);
+ return pthread_barrier_destroy(&barrier);
+}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index 1c14c2595158..dac4d4131d9b 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -14,6 +14,7 @@
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
+#include <stdint.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
@@ -23,12 +24,13 @@
#include <getopt.h>
#include <inttypes.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
#include <linux/gpio.h>
int monitor_device(const char *device_name,
unsigned int line,
- u_int32_t handleflags,
- u_int32_t eventflags,
+ uint32_t handleflags,
+ uint32_t eventflags,
unsigned int loops)
{
struct gpioevent_request req;
@@ -145,8 +147,8 @@ int main(int argc, char **argv)
const char *device_name = NULL;
unsigned int line = -1;
unsigned int loops = 0;
- u_int32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
- u_int32_t eventflags = 0;
+ uint32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
+ uint32_t eventflags = 0;
int c;
while ((c = getopt(argc, argv, "c:n:o:dsrf?")) != -1) {
diff --git a/tools/include/uapi/asm-generic/errno-base.h b/tools/include/uapi/asm-generic/errno-base.h
new file mode 100644
index 000000000000..9653140bff92
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno-base.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/tools/include/uapi/asm-generic/errno.h b/tools/include/uapi/asm-generic/errno.h
new file mode 100644
index 000000000000..cf9c51ac49f9
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#define ERFKILL 132 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 133 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
+#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set).
*/
char map_name[BPF_OBJ_NAME_LEN];
+ __u32 map_ifindex; /* ifindex of netdev to create on */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ * Set callback flags for sock_ops
+ * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ * @flags: flags value
+ * Return: 0 for no error
+ * -EINVAL if there is no full tcp socket
+ * bits in flags that are not supported by current kernel
+ *
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ * @pt_regs: pointer to struct pt_regs
+ * @rc: the return value to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -736,7 +755,9 @@ union bpf_attr {
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
- FN(getsockopt),
+ FN(getsockopt), \
+ FN(override_return), \
+ FN(sock_ops_cb_flags_set),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
__u32 data;
__u32 data_end;
__u32 data_meta;
+ /* Below access go through struct xdp_rxq_info */
+ __u32 ingress_ifindex; /* rxq->dev->ifindex */
+ __u32 rx_queue_index; /* rxq->queue_index */
};
enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
struct bpf_sock_ops {
__u32 op;
union {
- __u32 reply;
- __u32 replylong[4];
+ __u32 args[4]; /* Optionally passed to bpf program */
+ __u32 reply; /* Returned by bpf program */
+ __u32 replylong[4]; /* Optionally returned by bpf prog */
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 is_fullsock; /* Some TCP fields are only valid if
+ * there is a full socket. If not, the
+ * fields read as zero.
+ */
+ __u32 snd_cwnd;
+ __u32 srtt_us; /* Averaged RTT << 3 in usecs */
+ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+ __u32 state;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u32 sk_txhash;
+ __u64 bytes_received;
+ __u64 bytes_acked;
};
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ * supported cb flags
+ */
+
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
@@ -976,6 +1044,43 @@ enum {
* a congestion threshold. RTTs above
* this indicate congestion
*/
+ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
+ * Arg1: value of icsk_retransmits
+ * Arg2: value of icsk_rto
+ * Arg3: whether RTO has expired
+ */
+ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
+ * Arg1: sequence number of 1st byte
+ * Arg2: # segments
+ * Arg3: return value of
+ * tcp_transmit_skb (0 => success)
+ */
+ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
+ * Arg1: old_state
+ * Arg2: new_state
+ */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+ BPF_TCP_ESTABLISHED = 1,
+ BPF_TCP_SYN_SENT,
+ BPF_TCP_SYN_RECV,
+ BPF_TCP_FIN_WAIT1,
+ BPF_TCP_FIN_WAIT2,
+ BPF_TCP_TIME_WAIT,
+ BPF_TCP_CLOSE,
+ BPF_TCP_CLOSE_WAIT,
+ BPF_TCP_LAST_ACK,
+ BPF_TCP_LISTEN,
+ BPF_TCP_CLOSING, /* Now a valid state */
+ BPF_TCP_NEW_SYN_RECV,
+
+ BPF_TCP_MAX_STATES /* Leave at the end! */
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
struct bpf_cgroup_dev_ctx {
- __u32 access_type; /* (access << 16) | type */
+ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+ __u32 access_type;
__u32 major;
__u32 minor;
};
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index b9a4953018ed..e0739a1aa4b2 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
__u16 __reserved_2; /* align to __u64 */
};
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+ /*
+ * The below ids array length
+ */
+ __u32 ids_len;
+ /*
+ * Set by the kernel to indicate the number of
+ * available programs
+ */
+ __u32 prog_cnt;
+ /*
+ * User provided buffer to store program ids
+ */
+ __u32 ids[0];
+};
+
#define perf_flags(attr) (*(&(attr)->read_format + 1))
/*
@@ -433,6 +454,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -612,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -864,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 4555304dc18e..83714ca1f22b 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -93,7 +93,6 @@ export prefix libdir src obj
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
LIB_FILE = libbpf.a libbpf.so
@@ -150,7 +149,7 @@ CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
-all: fixdep $(VERSION_FILES) all_cmd
+all: fixdep all_cmd
all_cmd: $(CMD_TARGETS)
@@ -169,21 +168,11 @@ $(OUTPUT)libbpf.so: $(BPF_IN)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
-define update_dir
- (echo $1 > $@.tmp; \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
-endef
-
define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
- $(INSTALL) $1 '$(DESTDIR_SQ)$2'
+ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
@@ -192,7 +181,8 @@ install_lib: all_cmd
install_headers:
$(call QUIET_INSTALL, headers) \
- $(call do_install,bpf.h,$(prefix)/include/bpf,644)
+ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
install: install_lib
@@ -203,7 +193,7 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
+ $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so .*.d .*.cmd \
$(RM) LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
@@ -213,10 +203,10 @@ PHONY += force elfdep bpfdep
force:
elfdep:
- @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit -1 ; fi
+ @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi
bpfdep:
- @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit -1 ; fi
+ @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6534889e2b2f..9f44c196931e 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
__u32 map_flags);
/* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE 65536
+#define BPF_LOG_BUF_SIZE (256 * 1024)
int bpf_load_program_name(enum bpf_prog_type type, const char *name,
const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5aa45f89da93..30c776375118 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -174,12 +174,19 @@ struct bpf_program {
char *name;
char *section_name;
struct bpf_insn *insns;
- size_t insns_cnt;
+ size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type;
- struct {
+ struct reloc_desc {
+ enum {
+ RELO_LD64,
+ RELO_CALL,
+ } type;
int insn_idx;
- int map_idx;
+ union {
+ int map_idx;
+ int text_off;
+ };
} *reloc_desc;
int nr_reloc;
@@ -234,6 +241,7 @@ struct bpf_object {
} *reloc;
int nr_reloc;
int maps_shndx;
+ int text_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
size_t pi, si;
for (pi = 0; pi < obj->nr_programs; pi++) {
- char *name = NULL;
+ const char *name = NULL;
prog = &obj->programs[pi];
+ if (prog->idx == obj->efile.text_shndx) {
+ name = ".text";
+ goto skip_search;
+ }
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) {
@@ -387,6 +399,8 @@ bpf_object__init_prog_names(struct bpf_object *obj)
continue;
if (sym.st_shndx != prog->idx)
continue;
+ if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
+ continue;
name = elf_strptr(obj->efile.elf,
obj->efile.strtabidx,
@@ -403,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
prog->section_name);
return -EINVAL;
}
-
+skip_search:
prog->name = strdup(name);
if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n",
@@ -793,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if ((sh.sh_type == SHT_PROGBITS) &&
(sh.sh_flags & SHF_EXECINSTR) &&
(data->d_size > 0)) {
+ if (strcmp(name, ".text") == 0)
+ obj->efile.text_shndx = idx;
err = bpf_object__add_program(obj, data->d_buf,
data->d_size, name, idx);
if (err) {
@@ -854,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
}
static int
-bpf_program__collect_reloc(struct bpf_program *prog,
- size_t nr_maps, GElf_Shdr *shdr,
- Elf_Data *data, Elf_Data *symbols,
- int maps_shndx, struct bpf_map *maps)
+bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
+ Elf_Data *data, struct bpf_object *obj)
{
+ Elf_Data *symbols = obj->efile.symbols;
+ int text_shndx = obj->efile.text_shndx;
+ int maps_shndx = obj->efile.maps_shndx;
+ struct bpf_map *maps = obj->maps;
+ size_t nr_maps = obj->nr_maps;
int i, nrels;
pr_debug("collecting relocating info for: '%s'\n",
@@ -891,8 +910,11 @@ bpf_program__collect_reloc(struct bpf_program *prog,
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
+ pr_debug("relo for %lld value %lld name %d\n",
+ (long long) (rel.r_info >> 32),
+ (long long) sym.st_value, sym.st_name);
- if (sym.st_shndx != maps_shndx) {
+ if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
prog->section_name, sym.st_shndx);
return -LIBBPF_ERRNO__RELOC;
@@ -901,6 +923,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx);
+ if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
+ if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
+ pr_warning("incorrect bpf_call opcode\n");
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ prog->reloc_desc[i].type = RELO_CALL;
+ prog->reloc_desc[i].insn_idx = insn_idx;
+ prog->reloc_desc[i].text_off = sym.st_value;
+ continue;
+ }
+
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
@@ -922,6 +955,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__RELOC;
}
+ prog->reloc_desc[i].type = RELO_LD64;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].map_idx = map_idx;
}
@@ -961,27 +995,76 @@ bpf_object__create_maps(struct bpf_object *obj)
}
static int
+bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
+ struct reloc_desc *relo)
+{
+ struct bpf_insn *insn, *new_insn;
+ struct bpf_program *text;
+ size_t new_cnt;
+
+ if (relo->type != RELO_CALL)
+ return -LIBBPF_ERRNO__RELOC;
+
+ if (prog->idx == obj->efile.text_shndx) {
+ pr_warning("relo in .text insn %d into off %d\n",
+ relo->insn_idx, relo->text_off);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ if (prog->main_prog_cnt == 0) {
+ text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
+ if (!text) {
+ pr_warning("no .text section found yet relo into text exist\n");
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ new_cnt = prog->insns_cnt + text->insns_cnt;
+ new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+ if (!new_insn) {
+ pr_warning("oom in prog realloc\n");
+ return -ENOMEM;
+ }
+ memcpy(new_insn + prog->insns_cnt, text->insns,
+ text->insns_cnt * sizeof(*insn));
+ prog->insns = new_insn;
+ prog->main_prog_cnt = prog->insns_cnt;
+ prog->insns_cnt = new_cnt;
+ }
+ insn = &prog->insns[relo->insn_idx];
+ insn->imm += prog->main_prog_cnt - relo->insn_idx;
+ pr_debug("added %zd insn from %s to prog %s\n",
+ text->insns_cnt, text->section_name, prog->section_name);
+ return 0;
+}
+
+static int
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
- int i;
+ int i, err;
if (!prog || !prog->reloc_desc)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
- int insn_idx, map_idx;
- struct bpf_insn *insns = prog->insns;
+ if (prog->reloc_desc[i].type == RELO_LD64) {
+ struct bpf_insn *insns = prog->insns;
+ int insn_idx, map_idx;
- insn_idx = prog->reloc_desc[i].insn_idx;
- map_idx = prog->reloc_desc[i].map_idx;
+ insn_idx = prog->reloc_desc[i].insn_idx;
+ map_idx = prog->reloc_desc[i].map_idx;
- if (insn_idx >= (int)prog->insns_cnt) {
- pr_warning("relocation out of range: '%s'\n",
- prog->section_name);
- return -LIBBPF_ERRNO__RELOC;
+ if (insn_idx >= (int)prog->insns_cnt) {
+ pr_warning("relocation out of range: '%s'\n",
+ prog->section_name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+ insns[insn_idx].imm = obj->maps[map_idx].fd;
+ } else {
+ err = bpf_program__reloc_text(prog, obj,
+ &prog->reloc_desc[i]);
+ if (err)
+ return err;
}
- insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
- insns[insn_idx].imm = obj->maps[map_idx].fd;
}
zfree(&prog->reloc_desc);
@@ -1024,7 +1107,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
Elf_Data *data = obj->efile.reloc[i].data;
int idx = shdr->sh_info;
struct bpf_program *prog;
- size_t nr_maps = obj->nr_maps;
if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__);
@@ -1038,11 +1120,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__RELOC;
}
- err = bpf_program__collect_reloc(prog, nr_maps,
+ err = bpf_program__collect_reloc(prog,
shdr, data,
- obj->efile.symbols,
- obj->efile.maps_shndx,
- obj->maps);
+ obj);
if (err)
return err;
}
@@ -1195,6 +1275,8 @@ bpf_object__load_progs(struct bpf_object *obj)
int err;
for (i = 0; i < obj->nr_programs; i++) {
+ if (obj->programs[i].idx == obj->efile.text_shndx)
+ continue;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
@@ -1721,6 +1803,45 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
+#define BPF_PROG_SEC(string, type) { string, sizeof(string) - 1, type }
+static const struct {
+ const char *sec;
+ size_t len;
+ enum bpf_prog_type prog_type;
+} section_names[] = {
+ BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
+ BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+ BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
+ BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
+ BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
+ BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
+ BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
+};
+#undef BPF_PROG_SEC
+
+static enum bpf_prog_type bpf_program__guess_type(struct bpf_program *prog)
+{
+ int i;
+
+ if (!prog->section_name)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++)
+ if (strncmp(prog->section_name, section_names[i].sec,
+ section_names[i].len) == 0)
+ return section_names[i].prog_type;
+
+err:
+ pr_warning("failed to guess program type based on section name %s\n",
+ prog->section_name);
+
+ return BPF_PROG_TYPE_UNSPEC;
+}
+
int bpf_map__fd(struct bpf_map *map)
{
return map ? map->fd : -EINVAL;
@@ -1818,7 +1939,7 @@ long libbpf_get_error(const void *ptr)
int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
- struct bpf_program *prog;
+ struct bpf_program *prog, *first_prog = NULL;
struct bpf_object *obj;
int err;
@@ -1826,13 +1947,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
if (IS_ERR(obj))
return -ENOENT;
- prog = bpf_program__next(NULL, obj);
- if (!prog) {
+ bpf_object__for_each_program(prog, obj) {
+ /*
+ * If type is not specified, try to guess it based on
+ * section name.
+ */
+ if (type == BPF_PROG_TYPE_UNSPEC) {
+ type = bpf_program__guess_type(prog);
+ if (type == BPF_PROG_TYPE_UNSPEC) {
+ bpf_object__close(obj);
+ return -EINVAL;
+ }
+ }
+
+ bpf_program__set_type(prog, type);
+ if (prog->idx != obj->efile.text_shndx && !first_prog)
+ first_prog = prog;
+ }
+
+ if (!first_prog) {
+ pr_warning("object file doesn't contain bpf program\n");
bpf_object__close(obj);
return -ENOENT;
}
- bpf_program__set_type(prog, type);
err = bpf_object__load(obj);
if (err) {
bpf_object__close(obj);
@@ -1840,6 +1978,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
}
*pobj = obj;
- *prog_fd = bpf_program__fd(prog);
+ *prog_fd = bpf_program__fd(first_prog);
return 0;
}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 7ce724fc0544..e5f2acbb70cc 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1094,7 +1094,7 @@ static enum event_type __read_token(char **tok)
if (strcmp(*tok, "LOCAL_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
- return force_token("\"\%s\" ", tok);
+ return force_token("\"%s\" ", tok);
} else if (strcmp(*tok, "STA_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
@@ -3970,6 +3970,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
val &= ~fval;
}
}
+ if (val) {
+ if (print && arg->flags.delim)
+ trace_seq_puts(s, arg->flags.delim);
+ trace_seq_printf(s, "0x%llx", val);
+ }
break;
case PRINT_SYMBOL:
val = eval_num_arg(data, size, event, arg->symbol.field);
@@ -3980,6 +3985,8 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
break;
}
}
+ if (!flag)
+ trace_seq_printf(s, "0x%llx", val);
break;
case PRINT_HEX:
case PRINT_HEX_STR:
@@ -4293,6 +4300,26 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case 'p':
ls = 1;
+ if (isalnum(ptr[1])) {
+ ptr++;
+ /* Check for special pointers */
+ switch (*ptr) {
+ case 's':
+ case 'S':
+ case 'f':
+ case 'F':
+ break;
+ default:
+ /*
+ * Older kernels do not process
+ * dereferenced pointers.
+ * Only process if the pointer
+ * value is a printable.
+ */
+ if (isprint(*(char *)bptr))
+ goto process_string;
+ }
+ }
/* fall through */
case 'd':
case 'u':
@@ -4345,6 +4372,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
break;
case 's':
+ process_string:
arg = alloc_arg();
if (!arg) {
do_warning_event(event, "%s(%d): not enough memory!",
@@ -4949,21 +4977,27 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
else
ls = 2;
- if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
- *(ptr+1) == 'S' || *(ptr+1) == 's') {
+ if (isalnum(ptr[1]))
ptr++;
+
+ if (arg->type == PRINT_BSTRING) {
+ trace_seq_puts(s, arg->string.string);
+ break;
+ }
+
+ if (*ptr == 'F' || *ptr == 'f' ||
+ *ptr == 'S' || *ptr == 's') {
show_func = *ptr;
- } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
- print_mac_arg(s, *(ptr+1), data, size, event, arg);
- ptr++;
+ } else if (*ptr == 'M' || *ptr == 'm') {
+ print_mac_arg(s, *ptr, data, size, event, arg);
arg = arg->next;
break;
- } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+ } else if (*ptr == 'I' || *ptr == 'i') {
int n;
- n = print_ip_arg(s, ptr+1, data, size, event, arg);
+ n = print_ip_arg(s, ptr, data, size, event, arg);
if (n > 0) {
- ptr += n;
+ ptr += n - 1;
arg = arg->next;
break;
}
@@ -5532,8 +5566,14 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
event = pevent_find_event_by_record(pevent, record);
if (!event) {
- do_warning("ug! no event found for type %d",
- trace_parse_common_type(pevent, record->data));
+ int i;
+ int type = trace_parse_common_type(pevent, record->data);
+
+ do_warning("ug! no event found for type %d", type);
+ trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
+ for (i = 0; i < record->size; i++)
+ trace_seq_printf(s, " %02x",
+ ((unsigned char *)record->data)[i]);
return;
}
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index a16756ae3526..d542cb60ca1a 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -120,12 +120,12 @@ char **traceevent_plugin_list_options(void)
for (op = reg->options; op->name; op++) {
char *alias = op->plugin_alias ? op->plugin_alias : op->file;
char **temp = list;
+ int ret;
- name = malloc(strlen(op->name) + strlen(alias) + 2);
- if (!name)
+ ret = asprintf(&name, "%s:%s", alias, op->name);
+ if (ret < 0)
goto err;
- sprintf(name, "%s:%s", alias, op->name);
list = realloc(list, count + 2);
if (!list) {
list = temp;
@@ -290,17 +290,14 @@ load_plugin(struct pevent *pevent, const char *path,
const char *alias;
char *plugin;
void *handle;
+ int ret;
- plugin = malloc(strlen(path) + strlen(file) + 2);
- if (!plugin) {
+ ret = asprintf(&plugin, "%s/%s", path, file);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(plugin, path);
- strcat(plugin, "/");
- strcat(plugin, file);
-
handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
warning("could not load plugin '%s'\n%s\n",
@@ -391,6 +388,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
char *home;
char *path;
char *envdir;
+ int ret;
if (pevent->flags & PEVENT_DISABLE_PLUGINS)
return;
@@ -421,16 +419,12 @@ load_plugins(struct pevent *pevent, const char *suffix,
if (!home)
return;
- path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2);
- if (!path) {
+ ret = asprintf(&path, "%s/%s", home, LOCAL_PLUGIN_DIR);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(path, home);
- strcat(path, "/");
- strcat(path, LOCAL_PLUGIN_DIR);
-
load_plugins_dir(pevent, suffix, path, load_plugin, data);
free(path);
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index c94e3641b046..ca424b157e46 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -24,8 +24,8 @@
#include "kbuffer.h"
-#define MISSING_EVENTS (1 << 31)
-#define MISSING_STORED (1 << 30)
+#define MISSING_EVENTS (1UL << 31)
+#define MISSING_STORED (1UL << 30)
#define COMMIT_MASK ((1 << 27) - 1)
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 315df0a70265..431e8b309f6e 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -287,12 +287,10 @@ find_event(struct pevent *pevent, struct event_list **events,
sys_name = NULL;
}
- reg = malloc(strlen(event_name) + 3);
- if (reg == NULL)
+ ret = asprintf(&reg, "^%s$", event_name);
+ if (ret < 0)
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
- sprintf(reg, "^%s$", event_name);
-
ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
free(reg);
@@ -300,13 +298,12 @@ find_event(struct pevent *pevent, struct event_list **events,
return PEVENT_ERRNO__INVALID_EVENT_NAME;
if (sys_name) {
- reg = malloc(strlen(sys_name) + 3);
- if (reg == NULL) {
+ ret = asprintf(&reg, "^%s$", sys_name);
+ if (ret < 0) {
regfree(&ereg);
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
}
- sprintf(reg, "^%s$", sys_name);
ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
free(reg);
if (ret) {
@@ -1634,6 +1631,7 @@ int pevent_filter_clear_trivial(struct event_filter *filter,
case FILTER_TRIVIAL_FALSE:
if (filter_type->filter->boolean.value)
continue;
+ break;
case FILTER_TRIVIAL_TRUE:
if (!filter_type->filter->boolean.value)
continue;
@@ -1879,17 +1877,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
struct pevent *pevent;
unsigned long long addr;
const char *val = NULL;
+ unsigned int size;
char hex[64];
/* If the field is not a string convert it */
if (arg->str.field->flags & FIELD_IS_STRING) {
val = record->data + arg->str.field->offset;
+ size = arg->str.field->size;
+
+ if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+ addr = *(unsigned int *)val;
+ val = record->data + (addr & 0xffff);
+ size = addr >> 16;
+ }
/*
* We need to copy the data since we can't be sure the field
* is null terminated.
*/
- if (*(val + arg->str.field->size - 1)) {
+ if (*(val + size - 1)) {
/* copy it */
memcpy(arg->str.buffer, val, arg->str.field->size);
/* the buffer is already NULL terminated */
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index ae0272f9a091..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
@$(MAKE) $(build)=objtool
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @./sync-check.sh
+ @$(CONFIG_SHELL) ./sync-check.sh
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584eb1b..f40d46e24bcc 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)
}
/*
+ * FIXME: For now, just ignore any alternatives which add retpolines. This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+ struct section *sec;
+ struct rela *rela;
+ struct instruction *insn;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("bad .discard.nospec entry");
+ return -1;
+ }
+
+ insn->ignore_alts = true;
+ }
+
+ return 0;
+}
+
+/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
} else if (rela->sym->sec->idx) {
dest_sec = rela->sym->sec;
dest_off = rela->sym->sym.st_value + rela->addend + 4;
+ } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+ /*
+ * Retpoline jumps are really dynamic jumps in
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
+ continue;
} else {
/* sibling call */
insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = insn->offset + insn->len + insn->immediate;
insn->call_dest = find_symbol_by_offset(insn->sec,
dest_off);
+ /*
+ * FIXME: Thanks to retpolines, it's now considered
+ * normal for a function to call within itself. So
+ * disable this warning for now.
+ */
+#if 0
if (!insn->call_dest) {
WARN_FUNC("can't find call dest symbol at offset 0x%lx",
insn->sec, insn->offset, dest_off);
return -1;
}
+#endif
} else if (rela->sym->type == STT_SECTION) {
insn->call_dest = find_symbol_by_offset(rela->sym->sec,
rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
return ret;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
- alt = malloc(sizeof(*alt));
- if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
- }
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
goto out;
}
+ /* Ignore retpoline alternatives. */
+ if (orig_insn->ignore_alts)
+ continue;
+
new_insn = NULL;
if (!special_alt->group || special_alt->new_len) {
new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
goto out;
}
+ alt = malloc(sizeof(*alt));
+ if (!alt) {
+ WARN("malloc failed");
+ ret = -1;
+ goto out;
+ }
+
alt->insn = new_insn;
list_add_tail(&alt->list, &orig_insn->alts);
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
add_ignores(file);
+ ret = add_nospec_ignores(file);
+ if (ret)
+ return ret;
+
ret = add_jump_destinations(file);
if (ret)
return ret;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea70a83d..dbadb304a410 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
unsigned int len;
unsigned char type;
unsigned long immediate;
- bool alt_group, visited, dead_end, ignore, hint, save, restore;
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
struct symbol *call_dest;
struct instruction *jump_dest;
struct list_head alts;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 24460155c82c..c1c338661699 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -26,6 +26,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <errno.h>
#include "elf.h"
#include "warn.h"
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
elf->fd = open(name, flags);
if (elf->fd == -1) {
- perror("open");
+ fprintf(stderr, "objtool: Can't open '%s': %s\n",
+ name, strerror(errno));
goto err;
}
diff --git a/tools/perf/Build b/tools/perf/Build
index b48ca40fccf9..e5232d567611 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -25,7 +25,7 @@ perf-y += builtin-data.o
perf-y += builtin-version.o
perf-y += builtin-c2c.o
-perf-$(CONFIG_AUDIT) += builtin-trace.o
+perf-$(CONFIG_TRACE) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
perf-y += bench/
@@ -50,6 +50,6 @@ libperf-y += util/
libperf-y += arch/
libperf-y += ui/
libperf-y += scripts/
-libperf-$(CONFIG_AUDIT) += trace/beauty/
+libperf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 84681007f80f..73c2650bd0db 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -24,6 +24,9 @@ OPTIONS
-a::
--add=::
Add specified file to the cache.
+-f::
+--force::
+ Don't complain, do it.
-k::
--kcore::
Add specified kcore file to the cache. For the current host that is
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 6f7200fb85cf..c0a66400a960 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -20,6 +20,10 @@ OPTIONS
--input=::
Input file name. (default: perf.data unless stdin is a fifo)
+-f::
+--force::
+ Don't complain, do it.
+
-F::
--freq=::
Show just the sample frequency used for each event.
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 87b2588d1cbd..a64d6588470e 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -60,6 +60,10 @@ include::itrace.txt[]
found in the jitdumps files captured in the input perf.data file. Use this option
if you are monitoring environment using JIT runtimes, such as Java, DART or V8.
+-f::
+--force::
+ Don't complain, do it.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index ab25be28c9dc..74d774592196 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -42,6 +42,10 @@ COMMON OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+-f::
+--force::
+ Don't complan, do it.
+
REPORT OPTIONS
--------------
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index d7e4869905f1..b6866a05edd2 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -170,7 +170,7 @@ Probe points are defined by following syntax.
or,
sdt_PROVIDER:SDTEVENT
-'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function, and for return probes, a "\_\_return" suffix is automatically added to the function name. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
Note that using existing group name can conflict with other events. Especially, using the group name reserved for kernel modules can hide embedded events in the
modules.
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
@@ -182,6 +182,14 @@ Note that before using the SDT event, the target binary (on which SDT events are
For details of the SDT, see below.
https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
+ESCAPED CHARACTER
+-----------------
+
+In the probe syntax, '=', '@', '+', ':' and ';' are treated as a special character. You can use a backslash ('\') to escape the special characters.
+This is useful if you need to probe on a specific versioned symbols, like @GLIBC_... suffixes, or also you need to specify a source file which includes the special characters.
+Note that usually single backslash is consumed by shell, so you might need to pass double backslash (\\) or wrapping with single quotes (\'AAA\@BBB').
+See EXAMPLES how it is used.
+
PROBE ARGUMENT
--------------
Each probe argument follows below syntax.
@@ -277,6 +285,14 @@ Add a USDT probe to a target process running in a different mount namespace
./perf probe --target-ns <target pid> -x /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64/jre/lib/amd64/server/libjvm.so %sdt_hotspot:thread__sleep__end
+Add a probe on specific versioned symbol by backslash escape
+
+ ./perf probe -x /lib64/libc-2.25.so 'malloc_get_state\@GLIBC_2.2.5'
+
+Add a probe in a source file using special characters by backslash escape
+
+ ./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a626ef666c2..3eea6de35a38 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,9 @@ Configure all used events to run in user space.
--timestamp-filename
Append timestamp to output file name.
+--timestamp-boundary::
+Record timestamp boundary (time of first/last samples).
+
--switch-output[=mode]::
Generate multiple perf.data files, timestamp prefixed, switching to a new one
based on 'mode' value:
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ddde2b54af57..907e505b6309 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -402,6 +402,26 @@ OPTIONS
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multiple time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+
+ perf report --time 10%/2
+
+ Select from 0% to 10% time slice:
+
+ perf report --time 0%-10%
+
+ Select the first and second 10% time slices:
+
+ perf report --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+
+ perf report --time 0%-10%,30%-40%
+
--itrace::
Options for decoding instruction tracing data. The options are:
@@ -437,8 +457,23 @@ include::itrace.txt[]
will be printed. Each entry is function name or file/line. Enabled by
default, disable with --no-inline.
+--mmaps::
+ Show --tasks output plus mmap information in a format similar to
+ /proc/<PID>/maps.
+
+ Please note that not all mmaps are stored, options affecting which ones
+ are include 'perf record --data', for instance.
+
+--stats::
+ Display overall events statistics without any further processing.
+ (like the one at the end of the perf report -D command)
+
+--tasks::
+ Display monitored tasks stored in perf data. Displaying pid/tid/ppid
+ plus the command string aligned to distinguish parent and child tasks.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
--------
-linkperf:perf-stat[1], linkperf:perf-annotate[1]
+linkperf:perf-stat[1], linkperf:perf-annotate[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 55b67338548e..c7e50f263887 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -74,6 +74,10 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the sched data.
+-f::
+--force::
+ Don't complain, do it.
+
OPTIONS for 'perf sched map'
----------------------------
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2811fcf684cb..7730c1d2b5d3 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
- brstackoff, callindent, insn, insnlen, synth, phys_addr.
+ brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -217,6 +217,32 @@ OPTIONS
The brstackoff field will print an offset into a specific dso/binary.
+ With the metric option perf script can compute metrics for
+ sampling periods, similar to perf stat. This requires
+ specifying a group with multiple metrics with the :S option
+ for perf record. perf will sample on the first event, and
+ compute metrics for all the events in the group. Please note
+ that the metric computed is averaged over the whole sampling
+ period, not just for the sample point.
+
+ For sample events it's possible to display misc field with -F +misc option,
+ following letters are displayed for each bit:
+
+ PERF_RECORD_MISC_KERNEL K
+ PERF_RECORD_MISC_USER U
+ PERF_RECORD_MISC_HYPERVISOR H
+ PERF_RECORD_MISC_GUEST_KERNEL G
+ PERF_RECORD_MISC_GUEST_USER g
+ PERF_RECORD_MISC_MMAP_DATA* M
+ PERF_RECORD_MISC_COMM_EXEC E
+ PERF_RECORD_MISC_SWITCH_OUT S
+
+ $ perf script -F +misc ...
+ sched-messaging 1414 K 28690.636582: 4590 cycles ...
+ sched-messaging 1407 U 28690.636600: 325620 cycles ...
+ sched-messaging 1414 K 28690.636608: 19473 cycles ...
+ misc field ___________/
+
-k::
--vmlinux=<file>::
vmlinux pathname
@@ -274,6 +300,9 @@ OPTIONS
Display context switch events i.e. events of type PERF_RECORD_SWITCH or
PERF_RECORD_SWITCH_CPU_WIDE.
+--show-lost-events
+ Display lost events i.e. events of type PERF_RECORD_LOST.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
@@ -321,6 +350,22 @@ include::itrace.txt[]
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multipe time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+ perf script --time 10%/2
+
+ Select from 0% to 10% time slice:
+ perf script --time 0%-10%
+
+ Select the first and second 10% time slices:
+ perf script --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+ perf script --time 0%-10%,30%-40%
+
--max-blocks::
Set the maximum number of program blocks to print with brstackasm for
each sample.
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index df98d1c82688..ef0c7565bd5c 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -50,7 +50,9 @@ TIMECHART OPTIONS
-p::
--process::
Select the processes to display, by name or PID
-
+-f::
+--force::
+ Don't complain, do it.
--symfs=<directory>::
Look for files with symbols relative to this directory.
-n::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4353262bc462..8a32cc77bead 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -268,6 +268,12 @@ INTERACTIVE PROMPTING KEYS
[S]::
Stop annotation, return to full profile display.
+[K]::
+ Hide kernel symbols.
+
+[U]::
+ Hide user symbols.
+
[z]::
Toggle event count zeroing across display updates.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index d53bea6bd571..33a88e984e66 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -86,18 +86,18 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), Events are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
---duration:
+--duration::
Show only events that had a duration greater than N.M ms.
---sched:
+--sched::
Accrue thread runtime and provide a summary at the end of the session.
--i
---input
+-i::
+--input::
Process events from a given perf data file.
--T
---time
+-T::
+--time::
Print full timestamp rather time relative to first sample.
--comm::
@@ -117,6 +117,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Show tool stats such as number of times fd->pathname was discovered thru
hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
+-f::
+--force::
+ Don't complain, do it.
+
-F=[all|min|maj]::
--pf=[all|min|maj]::
Trace pagefaults. Optionally, you can specify whether you want minor,
@@ -159,6 +163,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Implies '--call-graph dwarf' when --call-graph not present on the
command line, on systems where DWARF unwinding was built in.
+--print-sample::
+ Print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info for the
+ raw_syscalls:sys_{enter,exit} tracepoints, for debugging.
+
--proc-map-timeout::
When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
because the file may be huge. A time out is needed in such cases.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index e90c59c6d815..f7d85e89a98a 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -238,6 +238,33 @@ struct auxtrace_index {
struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
};
+ HEADER_STAT = 19,
+
+This is merely a flag signifying that the data section contains data
+recorded from perf stat record.
+
+ HEADER_CACHE = 20,
+
+Description of the cache hierarchy. Based on the Linux sysfs format
+in /sys/devices/system/cpu/cpu*/cache/
+
+ u32 version Currently always 1
+ u32 number_of_cache_levels
+
+struct {
+ u32 level;
+ u32 line_size;
+ u32 sets;
+ u32 ways;
+ struct perf_header_string type;
+ struct perf_header_string size;
+ struct perf_header_string map;
+}[number_of_cache_levels];
+
+ HEADER_SAMPLE_TIME = 21,
+
+Two uint64_t for the time of first sample and the time of last sample.
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index db0ca3063eae..849599f39c5e 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -32,3 +32,5 @@ Order by the overhead of source file name and line number: perf report -s srclin
System-wide collection from all CPUs: perf record -a
Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
+To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
+To report cacheline events from previous recording: perf c2c report
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 0294bfb6c5f8..0dfdaa9fa81e 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -22,6 +22,7 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
+NO_SYSCALL_TABLE := 1
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
@@ -33,7 +34,8 @@ endif
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
$(call detected,CONFIG_X86_64)
@@ -55,12 +57,18 @@ endif
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
ifeq ($(NO_PERF_REGS),0)
$(call detected,CONFIG_PERF_REGS)
endif
+ifneq ($(NO_SYSCALL_TABLE),1)
+ CFLAGS += -DHAVE_SYSCALL_TABLE
+endif
+
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
@@ -97,6 +105,16 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ifdef CSINCLUDES
+ LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
+endif
+OPENCSDLIBS := -lopencsd_c_api -lopencsd
+ifdef CSLIBS
+ LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
+endif
+FEATURE_CHECK_CFLAGS-libopencsd := $(LIBOPENCSD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libopencsd := $(LIBOPENCSD_LDFLAGS) $(OPENCSDLIBS)
+
ifeq ($(NO_PERF_REGS),0)
CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
@@ -265,6 +283,10 @@ ifeq ($(feature-pthread-attr-setaffinity-np), 1)
CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
endif
+ifeq ($(feature-pthread-barrier), 1)
+ CFLAGS += -DHAVE_PTHREAD_BARRIER
+endif
+
ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)
@@ -341,6 +363,21 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
+ifndef NO_CORESIGHT
+ ifeq ($(feature-libopencsd), 1)
+ CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
+ LDFLAGS += $(LIBOPENCSD_LDFLAGS)
+ EXTLIBS += $(OPENCSDLIBS)
+ $(call detected,CONFIG_LIBOPENCSD)
+ ifdef CSTRACE_RAW
+ CFLAGS += -DCS_DEBUG_RAW
+ ifeq (${CSTRACE_RAW}, packed)
+ CFLAGS += -DCS_RAW_PACKED
+ endif
+ endif
+ endif
+endif
+
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
@@ -519,14 +556,18 @@ ifndef NO_LIBUNWIND
EXTLIBS += $(EXTLIBS_LIBUNWIND)
endif
-ifndef NO_LIBAUDIT
- ifneq ($(feature-libaudit), 1)
- msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
- NO_LIBAUDIT := 1
- else
- CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
- EXTLIBS += -laudit
- $(call detected,CONFIG_AUDIT)
+ifeq ($(NO_SYSCALL_TABLE),0)
+ $(call detected,CONFIG_TRACE)
+else
+ ifndef NO_LIBAUDIT
+ ifneq ($(feature-libaudit), 1)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ NO_LIBAUDIT := 1
+ else
+ CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
+ EXTLIBS += -laudit
+ $(call detected,CONFIG_TRACE)
+ endif
endif
endif
@@ -768,7 +809,7 @@ else
NO_PERF_READ_VDSOX32 := 1
endif
-ifdef LIBBABELTRACE
+ifndef NO_LIBBABELTRACE
$(call feature_check,libbabeltrace)
ifeq ($(feature-libbabeltrace), 1)
CFLAGS += -DHAVE_LIBBABELTRACE_SUPPORT $(LIBBABELTRACE_CFLAGS)
@@ -935,6 +976,10 @@ define print_var_code
endef
ifeq ($(VF),1)
+ # Display EXTRA features which are detected manualy
+ # from here with feature_check call and thus cannot
+ # be partof global state output.
+ $(foreach feat,$(FEATURE_TESTS_EXTRA),$(call feature_print_status,$(feat),))
$(call print_var,prefix)
$(call print_var,bindir)
$(call print_var,libdir)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 68cf1360a3f3..9b0351d3ce34 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -77,7 +77,7 @@ include ../scripts/utilities.mak
#
# Define NO_ZLIB if you do not want to support compressed kernel modules
#
-# Define LIBBABELTRACE if you DO want libbabeltrace support
+# Define NO_LIBBABELTRACE if you do not want libbabeltrace support
# for CTF data format.
#
# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
@@ -98,6 +98,8 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
+# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
LC_COLLATE=C
@@ -462,6 +464,13 @@ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
$(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+arch_errno_name_array := $(beauty_outdir)/arch_errno_name_array.c
+arch_errno_hdr_dir := $(srctree)/tools
+arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
+
+$(arch_errno_name_array): $(arch_errno_tbl)
+ $(Q)$(SHELL) '$(arch_errno_tbl)' $(CC) $(arch_errno_hdr_dir) > $@
+
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -565,7 +574,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(perf_ioctl_array) \
- $(prctl_option_array)
+ $(prctl_option_array) \
+ $(arch_errno_name_array)
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -847,7 +857,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(kcmp_type_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
- $(OUTPUT)$(prctl_option_array)
+ $(OUTPUT)$(prctl_option_array) \
+ $(OUTPUT)$(arch_errno_name_array)
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
#
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 8edf2cb71564..2323581b157d 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -22,6 +22,42 @@
#include "../../util/evlist.h"
#include "../../util/pmu.h"
#include "cs-etm.h"
+#include "arm-spe.h"
+
+static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
+{
+ struct perf_pmu **arm_spe_pmus = NULL;
+ int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ /* arm_spe_xxxxxxxxx\0 */
+ char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
+
+ arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
+ if (!arm_spe_pmus) {
+ pr_err("spes alloc failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i);
+ if (ret < 0) {
+ pr_err("sprintf failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name);
+ if (arm_spe_pmus[*nr_spes]) {
+ pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n",
+ __func__, __LINE__, *nr_spes,
+ arm_spe_pmus[*nr_spes]->type,
+ arm_spe_pmus[*nr_spes]->name);
+ (*nr_spes)++;
+ }
+ }
+
+ return arm_spe_pmus;
+}
struct auxtrace_record
*auxtrace_record__init(struct perf_evlist *evlist, int *err)
@@ -29,22 +65,51 @@ struct auxtrace_record
struct perf_pmu *cs_etm_pmu;
struct perf_evsel *evsel;
bool found_etm = false;
+ bool found_spe = false;
+ static struct perf_pmu **arm_spe_pmus = NULL;
+ static int nr_spes = 0;
+ int i;
+
+ if (!evlist)
+ return NULL;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
- if (evlist) {
- evlist__for_each_entry(evlist, evsel) {
- if (cs_etm_pmu &&
- evsel->attr.type == cs_etm_pmu->type)
- found_etm = true;
+ if (!arm_spe_pmus)
+ arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (cs_etm_pmu &&
+ evsel->attr.type == cs_etm_pmu->type)
+ found_etm = true;
+
+ if (!nr_spes)
+ continue;
+
+ for (i = 0; i < nr_spes; i++) {
+ if (evsel->attr.type == arm_spe_pmus[i]->type) {
+ found_spe = true;
+ break;
+ }
}
}
+ if (found_etm && found_spe) {
+ pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
+ *err = -EOPNOTSUPP;
+ return NULL;
+ }
+
if (found_etm)
return cs_etm_record_init(err);
+#if defined(__aarch64__)
+ if (found_spe)
+ return arm_spe_recording_init(err, arm_spe_pmus[i]);
+#endif
+
/*
- * Clear 'err' even if we haven't found a cs_etm event - that way perf
+ * Clear 'err' even if we haven't found an event - that way perf
* record can still be used even if tracers aren't present. The NULL
* return value will take care of telling the infrastructure HW tracing
* isn't available.
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index 98d67399a0d6..ac4dffc807b8 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -20,6 +20,7 @@
#include <linux/perf_event.h>
#include "cs-etm.h"
+#include "arm-spe.h"
#include "../../util/pmu.h"
struct perf_event_attr
@@ -30,7 +31,12 @@ struct perf_event_attr
/* add ETM default config here */
pmu->selectable = true;
pmu->set_drv_config = cs_etm_set_drv_config;
+#if defined(__aarch64__)
+ } else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
+ return arm_spe_pmu_default_config(pmu);
+#endif
}
+
#endif
return NULL;
}
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index cef6fb38d17e..c0b8dfef98ba 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,6 +1,9 @@
+libperf-y += header.o
+libperf-y += sym-handling.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
- ../../arm/util/cs-etm.o
+ ../../arm/util/cs-etm.o \
+ arm-spe.o
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
new file mode 100644
index 000000000000..1120e39c1b00
--- /dev/null
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <time.h>
+
+#include "../../util/cpumap.h"
+#include "../../util/evsel.h"
+#include "../../util/evlist.h"
+#include "../../util/session.h"
+#include "../../util/util.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/arm-spe.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+
+struct arm_spe_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *arm_spe_pmu;
+ struct perf_evlist *evlist;
+};
+
+static size_t
+arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return ARM_SPE_AUXTRACE_PRIV_SIZE;
+}
+
+static int arm_spe_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct auxtrace_info_event *auxtrace_info,
+ size_t priv_size)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+
+ if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->nr_mmaps)
+ return -EINVAL;
+
+ auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
+ auxtrace_info->priv[ARM_SPE_PMU_TYPE] = arm_spe_pmu->type;
+
+ return 0;
+}
+
+static int arm_spe_recording_options(struct auxtrace_record *itr,
+ struct perf_evlist *evlist,
+ struct record_opts *opts)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+ struct perf_evsel *evsel, *arm_spe_evsel = NULL;
+ bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+ struct perf_evsel *tracking_evsel;
+ int err;
+
+ sper->evlist = evlist;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == arm_spe_pmu->type) {
+ if (arm_spe_evsel) {
+ pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
+ return -EINVAL;
+ }
+ evsel->attr.freq = 0;
+ evsel->attr.sample_period = 1;
+ arm_spe_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ /* We are in full trace mode but '-m,xyz' wasn't specified */
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ perf_evlist__to_front(evlist, arm_spe_evsel);
+
+ perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TID);
+
+ /* Add dummy event to keep tracking */
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ tracking_evsel = perf_evlist__last(evlist);
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+ perf_evsel__set_sample_bit(tracking_evsel, TIME);
+ perf_evsel__set_sample_bit(tracking_evsel, CPU);
+ perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ return 0;
+}
+
+static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+
+ return ts.tv_sec ^ ts.tv_nsec;
+}
+
+static void arm_spe_recording_free(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+
+ free(sper);
+}
+
+static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(sper->evlist, evsel) {
+ if (evsel->attr.type == sper->arm_spe_pmu->type)
+ return perf_evlist__enable_event_idx(sper->evlist,
+ evsel, idx);
+ }
+ return -EINVAL;
+}
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu)
+{
+ struct arm_spe_recording *sper;
+
+ if (!arm_spe_pmu) {
+ *err = -ENODEV;
+ return NULL;
+ }
+
+ sper = zalloc(sizeof(struct arm_spe_recording));
+ if (!sper) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ sper->arm_spe_pmu = arm_spe_pmu;
+ sper->itr.recording_options = arm_spe_recording_options;
+ sper->itr.info_priv_size = arm_spe_info_priv_size;
+ sper->itr.info_fill = arm_spe_info_fill;
+ sper->itr.free = arm_spe_recording_free;
+ sper->itr.reference = arm_spe_reference;
+ sper->itr.read_finish = arm_spe_read_finish;
+ sper->itr.alignment = 0;
+
+ return &sper->itr;
+}
+
+struct perf_event_attr
+*arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu)
+{
+ struct perf_event_attr *attr;
+
+ attr = zalloc(sizeof(struct perf_event_attr));
+ if (!attr) {
+ pr_err("arm_spe default config cannot allocate a perf_event_attr\n");
+ return NULL;
+ }
+
+ /*
+ * If kernel driver doesn't advertise a minimum,
+ * use max allowable by PMSIDR_EL1.INTERVAL
+ */
+ if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
+ &attr->sample_period) != 1) {
+ pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
+ attr->sample_period = 4096;
+ }
+
+ arm_spe_pmu->selectable = true;
+ arm_spe_pmu->is_uncore = false;
+
+ return attr;
+}
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
new file mode 100644
index 000000000000..534cd2507d83
--- /dev/null
+++ b/tools/perf/arch/arm64/util/header.c
@@ -0,0 +1,65 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <api/fs/fs.h>
+#include "header.h"
+
+#define MIDR "/regs/identification/midr_el1"
+#define MIDR_SIZE 19
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+
+char *get_cpuid_str(struct perf_pmu *pmu)
+{
+ char *buf = NULL;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+ int cpu;
+ u64 midr = 0;
+ struct cpu_map *cpus;
+ FILE *file;
+
+ if (!sysfs || !pmu || !pmu->cpus)
+ return NULL;
+
+ buf = malloc(MIDR_SIZE);
+ if (!buf)
+ return NULL;
+
+ /* read midr from list of cpus mapped to this pmu */
+ cpus = cpu_map__get(pmu->cpus);
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
+ sysfs, cpus->map[cpu]);
+
+ file = fopen(path, "r");
+ if (!file) {
+ pr_debug("fopen failed for file %s\n", path);
+ continue;
+ }
+
+ if (!fgets(buf, MIDR_SIZE, file)) {
+ fclose(file);
+ continue;
+ }
+ fclose(file);
+
+ /* Ignore/clear Variant[23:20] and
+ * Revision[3:0] of MIDR
+ */
+ midr = strtoul(buf, NULL, 16);
+ midr &= (~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK));
+ scnprintf(buf, MIDR_SIZE, "0x%016lx", midr);
+ /* got midr break loop */
+ break;
+ }
+
+ if (!midr) {
+ pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
+ free(buf);
+ buf = NULL;
+ }
+
+ cpu_map__put(cpus);
+ return buf;
+}
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
new file mode 100644
index 000000000000..0051b1ee8450
--- /dev/null
+++ b/tools/perf/arch/arm64/util/sym-handling.c
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include "debug.h"
+#include "symbol.h"
+#include "map.h"
+#include "probe-event.h"
+#include "probe-file.h"
+
+#ifdef HAVE_LIBELF_SUPPORT
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+{
+ return ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
+ ehdr.e_type == ET_DYN;
+}
+#endif
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 8c0cfeb55f8e..c6f373508a4f 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
-#include <sys/utsname.h>
#include "common.h"
+#include "../util/env.h"
#include "../util/util.h"
#include "../util/debug.h"
-#include "sane_ctype.h"
-
const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
@@ -120,55 +118,19 @@ static int lookup_triplets(const char *const *triplets, const char *name)
return -1;
}
-/*
- * Return architecture name in a normalized form.
- * The conversion logic comes from the Makefile.
- */
-const char *normalize_arch(char *arch)
-{
- if (!strcmp(arch, "x86_64"))
- return "x86";
- if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
- return "x86";
- if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
- return "sparc";
- if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
- return "arm64";
- if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
- return "arm";
- if (!strncmp(arch, "s390", 4))
- return "s390";
- if (!strncmp(arch, "parisc", 6))
- return "parisc";
- if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
- return "powerpc";
- if (!strncmp(arch, "mips", 4))
- return "mips";
- if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
- return "sh";
-
- return arch;
-}
-
static int perf_env__lookup_binutils_path(struct perf_env *env,
const char *name, const char **path)
{
int idx;
- const char *arch, *cross_env;
- struct utsname uts;
+ const char *arch = perf_env__arch(env), *cross_env;
const char *const *path_list;
char *buf = NULL;
- arch = normalize_arch(env->arch);
-
- if (uname(&uts) < 0)
- goto out;
-
/*
* We don't need to try to find objdump path for native system.
* Just use default binutils path (e.g.: "objdump").
*/
- if (!strcmp(normalize_arch(uts.machine), arch))
+ if (!strcmp(perf_env__arch(NULL), arch))
goto out;
cross_env = getenv("CROSS_COMPILE");
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index a1546509ad24..2d875baa92e6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -7,6 +7,5 @@
extern const char *objdump_path;
int perf_env__lookup_objdump(struct perf_env *env);
-const char *normalize_arch(char *arch);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 7a4cf80c207a..0b242664f5ea 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -35,7 +35,7 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *bufp;
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 9c4e23d8c8ce..53d83d7e6a09 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
return strncmp(namea, nameb, n);
}
+
+const char *arch__normalize_symbol_name(const char *name)
+{
+ /* Skip over initial dot */
+ if (name && *name == '.')
+ name++;
+ return name;
+}
#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 09ba923debe8..48228de415d0 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -3,3 +3,24 @@ PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/s390/include/generated/asm
+header := $(out)/syscalls_64.c
+sysdef := $(srctree)/tools/arch/s390/include/uapi/asm/unistd.h
+sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, s390) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index e0e466c650df..8c72b44444cb 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -18,7 +18,8 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
if (!strcmp(name, "br"))
ops = &ret_ops;
- arch__associate_ins_ops(arch, name, ops);
+ if (ops)
+ arch__associate_ins_ops(arch, name, ops);
return ops;
}
diff --git a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..7fa0d0abd419
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf
+#
+#
+# Copyright IBM Corp. 2017
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+#
+
+gcc=$1
+input=$2
+
+if ! test -r $input; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table()
+{
+ local max_nr
+
+ echo 'static const char *syscalltbl_s390_64[] = {'
+ while read sc nr; do
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ done
+ echo '};'
+ echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
+}
+
+
+$gcc -m64 -E -dM -x c $input \
+ |sed -ne 's/^#define __NR_//p' \
+ |sort -t' ' -k2 -nu \
+ |create_table
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index b59678e8c1e2..06abe8108b33 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -84,7 +84,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
CHECK__(perf_evlist__open(evlist));
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
pc = evlist->mmap[0].base;
ret = perf_read_tsc_conversion(pc, &tc);
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 33027c5e6f92..fb0d71afee8b 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -66,11 +66,11 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *buf = malloc(128);
- if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+ if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
free(buf);
return NULL;
}
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 9c917f80c906..05920e3edf7a 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#ifndef REMOTE_UNWIND_LIBUNWIND
#include <errno.h>
+#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 58ae6ed8f38b..9aa3a674829b 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -24,9 +24,9 @@
#include <subcmd/parse-options.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
-#include <sys/time.h>
static unsigned int nthreads = 0;
static unsigned int nsecs = 10;
@@ -118,11 +118,12 @@ static void print_summary(void)
int bench_futex_hash(int argc, const char **argv)
{
int ret = 0;
- cpu_set_t cpu;
+ cpu_set_t cpuset;
struct sigaction act;
- unsigned int i, ncpus;
+ unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
@@ -130,14 +131,16 @@ int bench_futex_hash(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ goto errmem;
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads) /* default to the number of CPUs */
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -163,10 +166,10 @@ int bench_futex_hash(int argc, const char **argv)
if (!worker[i].futex)
goto errmem;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
if (ret)
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
@@ -217,6 +220,7 @@ int bench_futex_hash(int argc, const char **argv)
print_summary();
free(worker);
+ free(cpu);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 08653ae8a8c4..8e9c4753e304 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -32,7 +33,7 @@ static struct worker *worker;
static unsigned int nsecs = 10;
static bool silent = false, multi = false;
static bool done = false, fshared = false;
-static unsigned int ncpus, nthreads = 0;
+static unsigned int nthreads = 0;
static int futex_flag = 0;
struct timeval start, end, runtime;
static pthread_mutex_t thread_lock;
@@ -113,9 +114,10 @@ static void *workerfn(void *arg)
return NULL;
}
-static void create_threads(struct worker *w, pthread_attr_t thread_attr)
+static void create_threads(struct worker *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
@@ -130,10 +132,10 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr)
} else
worker[i].futex = &global_futex;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
@@ -147,19 +149,22 @@ int bench_futex_lock_pi(int argc, const char **argv)
unsigned int i;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -180,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
pthread_attr_init(&thread_attr);
gettimeofday(&start, NULL);
- create_threads(worker, thread_attr);
+ create_threads(worker, thread_attr, cpu);
pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 1058c194608a..fc692efa0c05 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -40,7 +41,7 @@ static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
static int futex_flag = 0;
static const struct option options[] = {
@@ -83,19 +84,19 @@ static void *workerfn(void *arg __maybe_unused)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -116,19 +117,22 @@ int bench_futex_requeue(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "cpu_map__new");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -156,7 +160,7 @@ int bench_futex_requeue(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index b4732dad9f89..69d8fdc87315 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -7,7 +7,17 @@
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
+#include "bench.h"
+#include <linux/compiler.h>
+#include "../util/debug.h"
+#ifndef HAVE_PTHREAD_BARRIER
+int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
+{
+ pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
+ return 0;
+}
+#else /* HAVE_PTHREAD_BARRIER */
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
@@ -15,12 +25,11 @@
#include <signal.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
-#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
-#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -42,8 +51,9 @@ static bool done = false, silent = false, fshared = false;
static unsigned int nblocked_threads = 0, nwaking_threads = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
+static pthread_barrier_t barrier;
static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting;
+static unsigned int threads_starting;
static int futex_flag = 0;
static const struct option options[] = {
@@ -64,6 +74,8 @@ static void *waking_workerfn(void *arg)
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
+ pthread_barrier_wait(&barrier);
+
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
@@ -84,6 +96,8 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
+ pthread_barrier_init(&barrier, NULL, nwaking_threads + 1);
+
/* create and block all threads */
for (i = 0; i < nwaking_threads; i++) {
/*
@@ -96,9 +110,13 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
err(EXIT_FAILURE, "pthread_create");
}
+ pthread_barrier_wait(&barrier);
+
for (i = 0; i < nwaking_threads; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
+
+ pthread_barrier_destroy(&barrier);
}
static void *blocked_workerfn(void *arg __maybe_unused)
@@ -119,19 +137,20 @@ static void *blocked_workerfn(void *arg __maybe_unused)
return NULL;
}
-static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
+static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nblocked_threads;
/* create and block all threads */
for (i = 0; i < nblocked_threads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
@@ -205,6 +224,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
@@ -217,9 +237,12 @@ int bench_futex_wake_parallel(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
+
if (!nblocked_threads)
- nblocked_threads = ncpus;
+ nblocked_threads = cpu->nr;
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
@@ -259,7 +282,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
- block_threads(blocked_worker, thread_attr);
+ block_threads(blocked_worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
@@ -297,3 +320,4 @@ int bench_futex_wake_parallel(int argc, const char **argv)
free(blocked_worker);
return ret;
}
+#endif /* HAVE_PTHREAD_BARRIER */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 8c5c0b6b5c97..e8181ad7d088 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -89,19 +90,19 @@ static void print_summary(void)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -122,6 +123,7 @@ int bench_futex_wake(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
@@ -129,7 +131,9 @@ int bench_futex_wake(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -161,7 +165,7 @@ int bench_futex_wake(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 3d354ba6e9c5..41db2cba77eb 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -325,8 +325,8 @@ int cmd_buildid_cache(int argc, const char **argv)
"file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
- OPT_STRING('p', "purge", &purge_name_list_str, "path list",
- "path(s) to remove (remove old caches too)"),
+ OPT_STRING('p', "purge", &purge_name_list_str, "file list",
+ "file(s) to remove (remove old caches too)"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 17855c4626a0..c0815a37fdb5 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -27,13 +27,10 @@
#include "sort.h"
#include "tool.h"
#include "data.h"
-#include "sort.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
-#include <asm/bug.h>
#include "ui/browsers/hists.h"
-#include "evlist.h"
#include "thread.h"
struct c2c_hists {
@@ -2224,9 +2221,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct hist_browser *browser;
int key = -1;
const char help[] =
- " ENTER Togle callchains (if present) \n"
- " n Togle Node details info \n"
- " s Togle full lenght of symbol and source line columns \n"
+ " ENTER Toggle callchains (if present) \n"
+ " n Toggle Node details info \n"
+ " s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
/* Display compact version first. */
@@ -2303,7 +2300,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
int key = -1;
const char help[] =
" d Display cacheline details \n"
- " ENTER Togle callchains (if present) \n"
+ " ENTER Toggle callchains (if present) \n"
" q Quit \n";
browser = perf_c2c_browser__new(hists);
@@ -2393,9 +2390,10 @@ static int setup_callchain(struct perf_evlist *evlist)
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
mode = CALLCHAIN_LBR;
else if (sample_type & PERF_SAMPLE_CALLCHAIN)
mode = CALLCHAIN_FP;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index a0f7ed2b869b..4aca13f23b9d 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv)
#ifdef HAVE_LIBELF_SUPPORT
"probe",
#endif
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
"trace",
#endif
NULL };
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 16a28547ca86..40fe919bbcf3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -536,8 +536,7 @@ found:
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
- evsel->attr.read_format, &sample_sw,
- false);
+ evsel->attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0c36f2ac6a0e..55d919dc5bc6 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -26,6 +26,9 @@
#include <sys/timerfd.h>
#endif
#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <linux/kernel.h>
#include <linux/time64.h>
@@ -741,20 +744,20 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
u64 *mmap_time)
{
union perf_event *event;
- struct perf_sample sample;
+ u64 timestamp;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
- err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
+ err = perf_evlist__parse_sample_timestamp(kvm->evlist, event, &timestamp);
if (err) {
perf_evlist__mmap_consume(kvm->evlist, idx);
pr_err("Failed to parse sample\n");
return -1;
}
- err = perf_session__queue_event(kvm->session, event, &sample, 0);
+ err = perf_session__queue_event(kvm->session, event, timestamp, 0);
/*
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
@@ -768,7 +771,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
/* save time stamp of our first sample for this mmap */
if (n == 0)
- *mmap_time = sample.time;
+ *mmap_time = timestamp;
/* limit events per mmap handled all at once */
n++;
@@ -1044,7 +1047,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
goto out;
}
- if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
perf_evlist__close(evlist);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 003255910c05..65681a1a292a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -51,7 +51,6 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
-#include <asm/bug.h>
#include <linux/time64.h>
struct switch_output {
@@ -79,6 +78,7 @@ struct record {
bool no_buildid_cache_set;
bool buildid_all;
bool timestamp_filename;
+ bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
};
@@ -301,7 +301,7 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
+ if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {
if (errno == EPERM) {
@@ -372,6 +372,8 @@ try_again:
ui__error("%s\n", msg);
goto out;
}
+
+ pos->supported = true;
}
if (perf_evlist__apply_filters(evlist, &pos)) {
@@ -408,8 +410,15 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- rec->samples++;
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample->time;
+
+ rec->evlist->last_sample_time = sample->time;
+ if (rec->buildid_all)
+ return 0;
+
+ rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
@@ -434,9 +443,11 @@ static int process_buildids(struct record *rec)
/*
* If --buildid-all is given, it marks all DSO regardless of hits,
- * so no need to process samples.
+ * so no need to process samples. But if timestamp_boundary is enabled,
+ * it still needs to walk on all samples to get the timestamps of
+ * first/last samples.
*/
- if (rec->buildid_all)
+ if (rec->buildid_all && !rec->timestamp_boundary)
rec->tool.sample = NULL;
return perf_session__process_events(session);
@@ -477,7 +488,7 @@ static struct perf_event_header finished_round_event = {
};
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool backward)
+ bool overwrite)
{
u64 bytes_written = rec->bytes_written;
int i;
@@ -487,18 +498,18 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist)
return 0;
- maps = backward ? evlist->backward_mmap : evlist->mmap;
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
if (!maps)
return 0;
- if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
+ if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
- if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
+ if (perf_mmap__push(&maps[i], overwrite, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}
@@ -518,7 +529,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (bytes_written != rec->bytes_written)
rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
- if (backward)
+ if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
@@ -690,8 +701,8 @@ perf_evlist__pick_pc(struct perf_evlist *evlist)
if (evlist) {
if (evlist->mmap && evlist->mmap[0].base)
return evlist->mmap[0].base;
- if (evlist->backward_mmap && evlist->backward_mmap[0].base)
- return evlist->backward_mmap[0].base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
+ return evlist->overwrite_mmap[0].base;
}
return NULL;
}
@@ -784,6 +795,28 @@ static int record__synthesize(struct record *rec, bool tail)
perf_event__synthesize_guest_os, tool);
}
+ err = perf_event__synthesize_extra_attr(&rec->tool,
+ rec->evlist,
+ process_synthesized_event,
+ data->is_pipe);
+ if (err)
+ goto out;
+
+ err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
+ process_synthesized_event,
+ NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
+ process_synthesized_event, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize cpu map.\n");
+ return err;
+ }
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
opts->proc_map_timeout, 1);
@@ -1598,6 +1631,8 @@ static struct option __record_options[] = {
"Record build-id of all DSOs regardless of hits"),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
+ OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
+ "Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
&record.switch_output.set, "signal,size,time",
"Switch output when receive SIGUSR2 or cross size,time threshold",
@@ -1781,8 +1816,8 @@ int cmd_record(int argc, const char **argv)
goto out;
}
- /* Enable ignoring missing threads when -u option is defined. */
- rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
+ /* Enable ignoring missing threads when -u/-p option is defined. */
+ rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
err = -ENOMEM;
if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index af5dd038195e..42a52dcc41cd 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -15,6 +15,7 @@
#include "util/color.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -51,6 +52,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
+#include <linux/mman.h>
struct report {
struct perf_tool tool;
@@ -60,6 +62,9 @@ struct report {
bool show_threads;
bool inverted_callchain;
bool mem_mode;
+ bool stats_mode;
+ bool tasks_mode;
+ bool mmaps_mode;
bool header;
bool header_only;
bool nonany_branch_mode;
@@ -69,7 +74,9 @@ struct report {
const char *cpu_list;
const char *symbol_filter_str;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
float min_percent;
u64 nr_entries;
u64 queue_size;
@@ -162,12 +169,28 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct hist_entry *he = iter->he;
struct report *rep = arg;
struct branch_info *bi;
+ struct perf_sample *sample = iter->sample;
+ struct perf_evsel *evsel = iter->evsel;
+ int err;
+
+ if (!ui__has_annotation())
+ return 0;
+
+ hist__account_cycles(sample->branch_stack, al, sample,
+ rep->nonany_branch_mode);
bi = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+
branch_type_count(&rep->brtype_stat, &bi->flags,
bi->from.addr, bi->to.addr);
- return 0;
+out:
+ return err;
}
static int process_sample_event(struct perf_tool *tool,
@@ -186,8 +209,10 @@ static int process_sample_event(struct perf_tool *tool,
};
int ret = 0;
- if (perf_time__skip_sample(&rep->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
+ sample->time)) {
return 0;
+ }
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -312,9 +337,10 @@ static int report__setup_sample_type(struct report *rep)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -377,6 +403,9 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (evname != NULL)
ret += fprintf(fp, " of event '%s'", evname);
+ if (rep->time_str)
+ ret += fprintf(fp, " (time slices: %s)", rep->time_str);
+
if (symbol_conf.show_ref_callgraph &&
strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
@@ -567,6 +596,174 @@ static void report__output_resort(struct report *rep)
ui_progress__finish();
}
+static void stats_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.no_warn = true;
+}
+
+static int stats_print(struct report *rep)
+{
+ struct perf_session *session = rep->session;
+
+ perf_session__fprintf_nr_events(session, stdout);
+ return 0;
+}
+
+static void tasks_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ if (rep->mmaps_mode) {
+ rep->tool.mmap = perf_event__process_mmap;
+ rep->tool.mmap2 = perf_event__process_mmap2;
+ }
+ rep->tool.comm = perf_event__process_comm;
+ rep->tool.exit = perf_event__process_exit;
+ rep->tool.fork = perf_event__process_fork;
+ rep->tool.no_warn = true;
+}
+
+struct task {
+ struct thread *thread;
+ struct list_head list;
+ struct list_head children;
+};
+
+static struct task *tasks_list(struct task *task, struct machine *machine)
+{
+ struct thread *parent_thread, *thread = task->thread;
+ struct task *parent_task;
+
+ /* Already listed. */
+ if (!list_empty(&task->list))
+ return NULL;
+
+ /* Last one in the chain. */
+ if (thread->ppid == -1)
+ return task;
+
+ parent_thread = machine__find_thread(machine, -1, thread->ppid);
+ if (!parent_thread)
+ return ERR_PTR(-ENOENT);
+
+ parent_task = thread__priv(parent_thread);
+ list_add_tail(&task->list, &parent_task->children);
+ return tasks_list(parent_task, machine);
+}
+
+static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+{
+ size_t printed = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+ indent, "", map->start, map->end,
+ map->prot & PROT_READ ? 'r' : '-',
+ map->prot & PROT_WRITE ? 'w' : '-',
+ map->prot & PROT_EXEC ? 'x' : '-',
+ map->flags & MAP_SHARED ? 's' : 'p',
+ map->pgoff,
+ map->ino, map->dso->name);
+ }
+
+ return printed;
+}
+
+static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
+{
+ int printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += maps__fprintf_task(&mg->maps[i], indent, fp);
+ return printed;
+}
+
+static void task__print_level(struct task *task, FILE *fp, int level)
+{
+ struct thread *thread = task->thread;
+ struct task *child;
+ int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
+ thread->pid_, thread->tid, thread->ppid,
+ level, "");
+
+ fprintf(fp, "%s\n", thread__comm_str(thread));
+
+ map_groups__fprintf_task(thread->mg, comm_indent, fp);
+
+ if (!list_empty(&task->children)) {
+ list_for_each_entry(child, &task->children, list)
+ task__print_level(child, fp, level + 1);
+ }
+}
+
+static int tasks_print(struct report *rep, FILE *fp)
+{
+ struct perf_session *session = rep->session;
+ struct machine *machine = &session->machines.host;
+ struct task *tasks, *task;
+ unsigned int nr = 0, itask = 0, i;
+ struct rb_node *nd;
+ LIST_HEAD(list);
+
+ /*
+ * No locking needed while accessing machine->threads,
+ * because --tasks is single threaded command.
+ */
+
+ /* Count all the threads. */
+ for (i = 0; i < THREADS__TABLE_SIZE; i++)
+ nr += machine->threads[i].nr;
+
+ tasks = malloc(sizeof(*tasks) * nr);
+ if (!tasks)
+ return -ENOMEM;
+
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+
+ for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ task = tasks + itask++;
+
+ task->thread = rb_entry(nd, struct thread, rb_node);
+ INIT_LIST_HEAD(&task->children);
+ INIT_LIST_HEAD(&task->list);
+ thread__set_priv(task->thread, task);
+ }
+ }
+
+ /*
+ * Iterate every task down to the unprocessed parent
+ * and link all in task children list. Task with no
+ * parent is added into 'list'.
+ */
+ for (itask = 0; itask < nr; itask++) {
+ task = tasks + itask;
+
+ if (!list_empty(&task->list))
+ continue;
+
+ task = tasks_list(task, machine);
+ if (IS_ERR(task)) {
+ pr_err("Error: failed to process tasks\n");
+ free(tasks);
+ return PTR_ERR(task);
+ }
+
+ if (task)
+ list_add_tail(&task->list, &list);
+ }
+
+ fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
+
+ list_for_each_entry(task, &list, list)
+ task__print_level(task, fp, 0);
+
+ free(tasks);
+ return 0;
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -598,12 +795,24 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ if (rep->stats_mode)
+ stats_setup(rep);
+
+ if (rep->tasks_mode)
+ tasks_setup(rep);
+
ret = perf_session__process_events(session);
if (ret) {
ui__error("failed to process sample\n");
return ret;
}
+ if (rep->stats_mode)
+ return stats_print(rep);
+
+ if (rep->tasks_mode)
+ return tasks_print(rep, stdout);
+
report__warn_kptr_restrict(rep);
evlist__for_each_entry(session->evlist, pos)
@@ -760,6 +969,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
+ OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
+ OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
@@ -907,6 +1119,9 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
+ if (report.mmaps_mode)
+ report.tasks_mode = true;
+
if (quiet)
perf_quiet_option();
@@ -921,13 +1136,6 @@ int cmd_report(int argc, const char **argv)
return -EINVAL;
}
- if (report.use_stdio)
- use_browser = 0;
- else if (report.use_tui)
- use_browser = 1;
- else if (report.use_gtk)
- use_browser = 2;
-
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
@@ -1014,6 +1222,13 @@ repeat:
perf_hpp_list.need_collapse = true;
}
+ if (report.use_stdio)
+ use_browser = 0;
+ else if (report.use_tui)
+ use_browser = 1;
+ else if (report.use_gtk)
+ use_browser = 2;
+
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
use_browser = 0;
@@ -1021,6 +1236,12 @@ repeat:
report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
if (report.show_full_info)
report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
+ if (report.stats_mode || report.tasks_mode)
+ use_browser = 0;
+ if (report.stats_mode && report.tasks_mode) {
+ pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
+ goto error;
+ }
if (strcmp(input_name, "-") != 0)
setup_browser(true);
@@ -1043,7 +1264,8 @@ repeat:
ret = 0;
goto error;
}
- } else if (use_browser == 0 && !quiet) {
+ } else if (use_browser == 0 && !quiet &&
+ !report.stats_mode && !report.tasks_mode) {
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
stdout);
}
@@ -1077,9 +1299,36 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- if (perf_time__parse_str(&report.ptime, report.time_str) != 0) {
- pr_err("Invalid time string\n");
- return -EINVAL;
+ report.ptime_range = perf_time__range_alloc(report.time_str,
+ &report.range_size);
+ if (!report.ptime_range) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ report.range_num = perf_time__percent_parse_str(
+ report.ptime_range, report.range_size,
+ report.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (report.range_num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ report.range_num = 1;
}
sort__setup_elide(stdout);
@@ -1092,6 +1341,8 @@ repeat:
ret = 0;
error:
+ zfree(&report.ptime_range);
+
perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9b43bda45a41..ab19a6ee4093 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,9 +22,11 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/stat.h"
+#include "util/color.h"
#include "util/string2.h"
#include "util/thread-stack.h"
#include "util/time-utils.h"
+#include "util/path.h"
#include "print_binary.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
@@ -40,6 +42,7 @@
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <unistd.h>
#include "sane_ctype.h"
@@ -90,6 +93,8 @@ enum perf_output_field {
PERF_OUTPUT_SYNTH = 1U << 25,
PERF_OUTPUT_PHYS_ADDR = 1U << 26,
PERF_OUTPUT_UREGS = 1U << 27,
+ PERF_OUTPUT_METRIC = 1U << 28,
+ PERF_OUTPUT_MISC = 1U << 29,
};
struct output_option {
@@ -124,6 +129,8 @@ struct output_option {
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
+ {.str = "metric", .field = PERF_OUTPUT_METRIC},
+ {.str = "misc", .field = PERF_OUTPUT_MISC},
};
enum {
@@ -215,12 +222,20 @@ struct perf_evsel_script {
char *filename;
FILE *fp;
u64 samples;
+ /* For metric output */
+ u64 val;
+ int gnum;
};
+static inline struct perf_evsel_script *evsel_script(struct perf_evsel *evsel)
+{
+ return (struct perf_evsel_script *)evsel->priv;
+}
+
static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
struct perf_data *data)
{
- struct perf_evsel_script *es = malloc(sizeof(*es));
+ struct perf_evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
@@ -228,7 +243,6 @@ static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
goto out_free_filename;
- es->samples = 0;
}
return es;
@@ -423,11 +437,6 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
- if (PRINT_FIELD(PERIOD) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_PERIOD, "PERIOD",
- PERF_OUTPUT_PERIOD))
- return -EINVAL;
-
if (PRINT_FIELD(IREGS) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
PERF_OUTPUT_IREGS))
@@ -588,7 +597,8 @@ static int perf_sample__fprintf_uregs(struct perf_sample *sample,
static int perf_sample__fprintf_start(struct perf_sample *sample,
struct thread *thread,
- struct perf_evsel *evsel, FILE *fp)
+ struct perf_evsel *evsel,
+ u32 type, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned long secs;
@@ -618,6 +628,47 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
printed += fprintf(fp, "[%03d] ", sample->cpu);
}
+ if (PRINT_FIELD(MISC)) {
+ int ret = 0;
+
+ #define has(m) \
+ (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
+
+ if (has(KERNEL))
+ ret += fprintf(fp, "K");
+ if (has(USER))
+ ret += fprintf(fp, "U");
+ if (has(HYPERVISOR))
+ ret += fprintf(fp, "H");
+ if (has(GUEST_KERNEL))
+ ret += fprintf(fp, "G");
+ if (has(GUEST_USER))
+ ret += fprintf(fp, "g");
+
+ switch (type) {
+ case PERF_RECORD_MMAP:
+ case PERF_RECORD_MMAP2:
+ if (has(MMAP_DATA))
+ ret += fprintf(fp, "M");
+ break;
+ case PERF_RECORD_COMM:
+ if (has(COMM_EXEC))
+ ret += fprintf(fp, "E");
+ break;
+ case PERF_RECORD_SWITCH:
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ if (has(SWITCH_OUT))
+ ret += fprintf(fp, "S");
+ default:
+ break;
+ }
+
+ #undef has
+
+ ret += fprintf(fp, "%*s", 6 - ret, " ");
+ printed += ret;
+ }
+
if (PRINT_FIELD(TIME)) {
nsecs = sample->time;
secs = nsecs / NSEC_PER_SEC;
@@ -1437,13 +1488,16 @@ struct perf_script {
bool show_mmap_events;
bool show_switch_events;
bool show_namespace_events;
+ bool show_lost_events;
bool allocated;
bool per_event_dump;
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
};
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
@@ -1477,6 +1531,88 @@ static int data_src__fprintf(u64 data_src, FILE *fp)
return fprintf(fp, "%-*s", maxlen, out);
}
+struct metric_ctx {
+ struct perf_sample *sample;
+ struct thread *thread;
+ struct perf_evsel *evsel;
+ FILE *fp;
+};
+
+static void script_print_metric(void *ctx, const char *color,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct metric_ctx *mctx = ctx;
+
+ if (!fmt)
+ return;
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+ if (color)
+ color_fprintf(mctx->fp, color, fmt, val);
+ else
+ printf(fmt, val);
+ fprintf(mctx->fp, " %s\n", unit);
+}
+
+static void script_new_line(void *ctx)
+{
+ struct metric_ctx *mctx = ctx;
+
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+}
+
+static void perf_sample__fprint_metric(struct perf_script *script,
+ struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ FILE *fp)
+{
+ struct perf_stat_output_ctx ctx = {
+ .print_metric = script_print_metric,
+ .new_line = script_new_line,
+ .ctx = &(struct metric_ctx) {
+ .sample = sample,
+ .thread = thread,
+ .evsel = evsel,
+ .fp = fp,
+ },
+ .force_header = false,
+ };
+ struct perf_evsel *ev2;
+ static bool init;
+ u64 val;
+
+ if (!init) {
+ perf_stat__init_shadow_stats();
+ init = true;
+ }
+ if (!evsel->stats)
+ perf_evlist__alloc_stats(script->session->evlist, false);
+ if (evsel_script(evsel->leader)->gnum++ == 0)
+ perf_stat__reset_shadow_stats();
+ val = sample->period * evsel->scale;
+ perf_stat__update_shadow_stats(evsel,
+ val,
+ sample->cpu,
+ &rt_stat);
+ evsel_script(evsel)->val = val;
+ if (evsel_script(evsel->leader)->gnum == evsel->leader->nr_members) {
+ for_each_group_member (ev2, evsel->leader) {
+ perf_stat__print_shadow_stats(ev2,
+ evsel_script(ev2)->val,
+ sample->cpu,
+ &ctx,
+ NULL,
+ &rt_stat);
+ }
+ evsel_script(evsel->leader)->gnum = 0;
+ }
+}
+
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct perf_evsel *evsel,
struct addr_location *al,
@@ -1493,7 +1629,8 @@ static void process_event(struct perf_script *script,
++es->samples;
- perf_sample__fprintf_start(sample, thread, evsel, fp);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SAMPLE, fp);
if (PRINT_FIELD(PERIOD))
fprintf(fp, "%10" PRIu64 " ", sample->period);
@@ -1564,6 +1701,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
fprintf(fp, "\n");
+
+ if (PRINT_FIELD(METRIC))
+ perf_sample__fprint_metric(script, thread, evsel, sample, fp);
}
static struct scripting_ops *scripting_ops;
@@ -1643,8 +1783,10 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
- if (perf_time__skip_sample(&scr->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
+ sample->time)) {
return 0;
+ }
if (debug_mode) {
if (sample->time < last_timestamp) {
@@ -1737,7 +1879,8 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_COMM, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1772,7 +1915,8 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1805,7 +1949,8 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
@@ -1834,7 +1979,8 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
@@ -1869,7 +2015,8 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1900,7 +2047,8 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1926,7 +2074,31 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ thread__put(thread);
+ return 0;
+}
+
+static int
+process_lost_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ struct thread *thread;
+
+ thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
+ if (thread == NULL)
+ return -1;
+
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -2026,6 +2198,8 @@ static int __cmd_script(struct perf_script *script)
script->tool.context_switch = process_switch_event;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
+ if (script->show_lost_events)
+ script->tool.lost = process_lost_event;
if (perf_script__setup_per_event_dump(script)) {
pr_err("Couldn't create the per event dump files\n");
@@ -2311,19 +2485,6 @@ out:
return rc;
}
-/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
-static int is_directory(const char *base_path, const struct dirent *dent)
-{
- char path[PATH_MAX];
- struct stat st;
-
- sprintf(path, "%s/%s", base_path, dent->d_name);
- if (stat(path, &st))
- return 0;
-
- return S_ISDIR(st.st_mode);
-}
-
#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
while ((lang_dirent = readdir(scripts_dir)) != NULL) \
if ((lang_dirent->d_type == DT_DIR || \
@@ -2758,9 +2919,10 @@ static void script__setup_sample_type(struct perf_script *script)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -2975,6 +3137,8 @@ int cmd_script(int argc, const char **argv)
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
+ OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
+ "Show lost events (if recorded)"),
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
"Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
@@ -3281,18 +3445,46 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ script.ptime_range = perf_time__range_alloc(script.time_str,
+ &script.range_size);
+ if (!script.ptime_range) {
+ err = -ENOMEM;
goto out_delete;
}
+ /* needs to be parsed after looking up reference time */
+ if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+
+ script.range_num = perf_time__percent_parse_str(
+ script.ptime_range, script.range_size,
+ script.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (script.range_num < 0) {
+ pr_err("Invalid time string\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+ } else {
+ script.range_num = 1;
+ }
+
err = __cmd_script(&script);
flush_scripting();
out_delete:
+ zfree(&script.ptime_range);
+
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 59af5a8419e2..98bf9d32f222 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -63,7 +63,6 @@
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
-#include "util/group.h"
#include "util/string2.h"
#include "util/metricgroup.h"
#include "asm/bug.h"
@@ -214,8 +213,13 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
static void perf_stat__reset_stats(void)
{
+ int i;
+
perf_evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
+
+ for (i = 0; i < stat_config.stats_num; i++)
+ perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
static int create_perf_stat_counter(struct perf_evsel *evsel)
@@ -272,7 +276,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->enable_on_exec = 1;
}
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
@@ -335,7 +339,7 @@ static int read_counter(struct perf_evsel *counter)
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
ncpus = perf_evsel__nr_cpus(counter);
else
ncpus = 1;
@@ -458,19 +462,8 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
-static bool has_unit(struct perf_evsel *counter)
-{
- return counter->unit && *counter->unit;
-}
-
-static bool has_scale(struct perf_evsel *counter)
-{
- return counter->scale != 1;
-}
-
static int perf_stat_synthesize_config(bool is_pipe)
{
- struct perf_evsel *counter;
int err;
if (is_pipe) {
@@ -482,53 +475,10 @@ static int perf_stat_synthesize_config(bool is_pipe)
}
}
- /*
- * Synthesize other events stuff not carried within
- * attr event - unit, scale, name
- */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->supported)
- continue;
-
- /*
- * Synthesize unit and scale only if it's defined.
- */
- if (has_unit(counter)) {
- err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel unit.\n");
- return err;
- }
- }
-
- if (has_scale(counter)) {
- err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- if (counter->own_cpus) {
- err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- /*
- * Name is needed only for pipe output,
- * perf.data carries event names.
- */
- if (is_pipe) {
- err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel name.\n");
- return err;
- }
- }
- }
+ err = perf_event__synthesize_extra_attr(NULL,
+ evsel_list,
+ process_synthesized_event,
+ is_pipe);
err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
process_synthesized_event,
@@ -1151,7 +1101,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
}
static void printout(int id, int nr, struct perf_evsel *counter, double uval,
- char *prefix, u64 run, u64 ena, double noise)
+ char *prefix, u64 run, u64 ena, double noise,
+ struct runtime_stat *st)
{
struct perf_stat_output_ctx out;
struct outstate os = {
@@ -1244,7 +1195,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
perf_stat__print_shadow_stats(counter, uval,
first_shadow_cpu(counter, id),
- &out, &metric_events);
+ &out, &metric_events, st);
if (!csv_output && !metric_only) {
print_noise(counter, noise);
print_running(run, ena);
@@ -1268,7 +1219,8 @@ static void aggr_update_shadow(void)
val += perf_counts(counter->counts, cpu, 0)->val;
}
perf_stat__update_shadow_stats(counter, val,
- first_shadow_cpu(counter, id));
+ first_shadow_cpu(counter, id),
+ &rt_stat);
}
}
}
@@ -1388,7 +1340,8 @@ static void print_aggr(char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(id, nr, counter, uval, prefix, run, ena, 1.0);
+ printout(id, nr, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
if (!metric_only)
fputc('\n', output);
}
@@ -1397,13 +1350,24 @@ static void print_aggr(char *prefix)
}
}
-static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+static int cmp_val(const void *a, const void *b)
{
- FILE *output = stat_config.output;
- int nthreads = thread_map__nr(counter->threads);
- int ncpus = cpu_map__nr(counter->cpus);
- int cpu, thread;
+ return ((struct perf_aggr_thread_value *)b)->val -
+ ((struct perf_aggr_thread_value *)a)->val;
+}
+
+static struct perf_aggr_thread_value *sort_aggr_thread(
+ struct perf_evsel *counter,
+ int nthreads, int ncpus,
+ int *ret)
+{
+ int cpu, thread, i = 0;
double uval;
+ struct perf_aggr_thread_value *buf;
+
+ buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
+ if (!buf)
+ return NULL;
for (thread = 0; thread < nthreads; thread++) {
u64 ena = 0, run = 0, val = 0;
@@ -1414,13 +1378,63 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
run += perf_counts(counter->counts, cpu, thread)->run;
}
+ uval = val * counter->scale;
+
+ /*
+ * Skip value 0 when enabling --per-thread globally,
+ * otherwise too many 0 output.
+ */
+ if (uval == 0.0 && target__has_per_thread(&target))
+ continue;
+
+ buf[i].counter = counter;
+ buf[i].id = thread;
+ buf[i].uval = uval;
+ buf[i].val = val;
+ buf[i].run = run;
+ buf[i].ena = ena;
+ i++;
+ }
+
+ qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
+
+ if (ret)
+ *ret = i;
+
+ return buf;
+}
+
+static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+{
+ FILE *output = stat_config.output;
+ int nthreads = thread_map__nr(counter->threads);
+ int ncpus = cpu_map__nr(counter->cpus);
+ int thread, sorted_threads, id;
+ struct perf_aggr_thread_value *buf;
+
+ buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads);
+ if (!buf) {
+ perror("cannot sort aggr thread");
+ return;
+ }
+
+ for (thread = 0; thread < sorted_threads; thread++) {
if (prefix)
fprintf(output, "%s", prefix);
- uval = val * counter->scale;
- printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
+ id = buf[thread].id;
+ if (stat_config.stats)
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &stat_config.stats[id]);
+ else
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
+
+ free(buf);
}
struct caggr_data {
@@ -1455,7 +1469,8 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = cd.avg * counter->scale;
- printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled, cd.avg);
+ printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
+ cd.avg, &rt_stat);
if (!metric_only)
fprintf(output, "\n");
}
@@ -1494,7 +1509,8 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
@@ -1526,7 +1542,8 @@ static void print_no_aggr_metric(char *prefix)
run = perf_counts(counter->counts, cpu, 0)->run;
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -1582,7 +1599,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
perf_stat__print_shadow_stats(counter, 0,
0,
&out,
- &metric_events);
+ &metric_events,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -2541,6 +2559,35 @@ int process_cpu_map_event(struct perf_tool *tool,
return set_maps(st);
}
+static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
+{
+ int i;
+
+ config->stats = calloc(nthreads, sizeof(struct runtime_stat));
+ if (!config->stats)
+ return -1;
+
+ config->stats_num = nthreads;
+
+ for (i = 0; i < nthreads; i++)
+ runtime_stat__init(&config->stats[i]);
+
+ return 0;
+}
+
+static void runtime_stat_delete(struct perf_stat_config *config)
+{
+ int i;
+
+ if (!config->stats)
+ return;
+
+ for (i = 0; i < config->stats_num; i++)
+ runtime_stat__exit(&config->stats[i]);
+
+ free(config->stats);
+}
+
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
@@ -2750,12 +2797,16 @@ int cmd_stat(int argc, const char **argv)
run_count = 1;
}
- if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
- fprintf(stderr, "The --per-thread option is only available "
- "when monitoring via -p -t options.\n");
- parse_options_usage(NULL, stat_options, "p", 1);
- parse_options_usage(NULL, stat_options, "t", 1);
- goto out;
+ if ((stat_config.aggr_mode == AGGR_THREAD) &&
+ !target__has_task(&target)) {
+ if (!target.system_wide || target.cpu_list) {
+ fprintf(stderr, "The --per-thread option is only "
+ "available when monitoring via -p -t -a "
+ "options or only --per-thread.\n");
+ parse_options_usage(NULL, stat_options, "p", 1);
+ parse_options_usage(NULL, stat_options, "t", 1);
+ goto out;
+ }
}
/*
@@ -2779,6 +2830,9 @@ int cmd_stat(int argc, const char **argv)
target__validate(&target);
+ if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
+ target.per_thread = true;
+
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
@@ -2796,8 +2850,15 @@ int cmd_stat(int argc, const char **argv)
* Initialize thread_map with comm names,
* so we could print it out on output.
*/
- if (stat_config.aggr_mode == AGGR_THREAD)
+ if (stat_config.aggr_mode == AGGR_THREAD) {
thread_map__read_comms(evsel_list->threads);
+ if (target.system_wide) {
+ if (runtime_stat_new(&stat_config,
+ thread_map__nr(evsel_list->threads))) {
+ goto out;
+ }
+ }
+ }
if (interval && interval < 100) {
if (interval < 10) {
@@ -2887,5 +2948,8 @@ out:
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
perf_evlist__delete(evsel_list);
+
+ runtime_stat_delete(&stat_config);
+
return status;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 9e0d2645ae13..c6ccda52117d 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -99,6 +99,7 @@ static void perf_top__resize(struct perf_top *top)
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct symbol *sym;
struct annotation *notes;
struct map *map;
@@ -137,7 +138,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__disassemble(sym, map, NULL, 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
@@ -229,6 +230,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -241,6 +243,8 @@ static void perf_top__show_details(struct perf_top *top)
pthread_mutex_lock(&notes->lock);
+ symbol__calc_percent(symbol, evsel);
+
if (notes->src == NULL)
goto out_unlock;
@@ -412,7 +416,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
- "\t[K] hide kernel_symbols symbols. \t(%s)\n",
+ "\t[K] hide kernel symbols. \t(%s)\n",
top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
@@ -903,7 +907,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 84debdbad327..17d11deeb88d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -21,6 +21,7 @@
#include "builtin.h"
#include "util/color.h"
#include "util/debug.h"
+#include "util/env.h"
#include "util/event.h"
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
@@ -45,18 +46,17 @@
#include <errno.h>
#include <inttypes.h>
-#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
#include <poll.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <linux/err.h>
#include <linux/filter.h>
-#include <linux/audit.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <fcntl.h>
#include "sane_ctype.h"
@@ -111,6 +111,7 @@ struct trace {
bool summary;
bool summary_only;
bool show_comm;
+ bool print_sample;
bool show_tool_stats;
bool trace_syscalls;
bool kernel_syscallchains;
@@ -545,9 +546,10 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
{ .scnprintf = SCA_STRARRAY, \
.parm = &strarray__##array, }
+#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
-#include "trace/beauty/flock.c"
#include "trace/beauty/futex_op.c"
+#include "trace/beauty/futex_val3.c"
#include "trace/beauty/mmap.c"
#include "trace/beauty/mode_t.c"
#include "trace/beauty/msg_flags.c"
@@ -610,7 +612,8 @@ static struct syscall_fmt {
{ .name = "fstat", .alias = "newfstat", },
{ .name = "fstatat", .alias = "newfstatat", },
{ .name = "futex",
- .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, }, },
+ .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
+ [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
{ .name = "futimesat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "getitimer",
@@ -622,6 +625,7 @@ static struct syscall_fmt {
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
#if defined(__i386__) || defined(__x86_64__)
@@ -819,7 +823,7 @@ static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
size_t printed = fprintf(fp, "(");
if (!calculated)
- printed += fprintf(fp, " ? ");
+ printed += fprintf(fp, " ");
else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
@@ -1554,10 +1558,9 @@ static void thread__update_stats(struct thread_trace *ttrace,
update_stats(stats, duration);
}
-static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
+static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
- u64 duration;
size_t printed;
if (trace->current == NULL)
@@ -1568,15 +1571,30 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
if (!ttrace->entry_pending)
return 0;
- duration = sample->time - ttrace->entry_time;
-
- printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
return printed;
}
+static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct thread *thread)
+{
+ int printed = 0;
+
+ if (trace->print_sample) {
+ double ts = (double)sample->time / NSEC_PER_MSEC;
+
+ printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
+ perf_evsel__name(evsel), ts,
+ thread__comm_str(thread),
+ sample->pid, sample->tid, sample->cpu);
+ }
+
+ return printed;
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1597,6 +1615,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
@@ -1606,7 +1626,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
}
if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
@@ -1643,7 +1663,7 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse
struct addr_location al;
if (machine__resolve(trace->host, &al, sample) < 0 ||
- thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
+ thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, evsel->attr.sample_max_stack))
return -1;
return 0;
@@ -1659,6 +1679,14 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
}
+static const char *errno_to_name(struct perf_evsel *evsel, int err)
+{
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+
+ return arch_syscalls__strerrno(arch_name, err);
+}
+
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1679,6 +1707,8 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
if (trace->summary)
thread__update_stats(ttrace, id, sample);
@@ -1729,7 +1759,7 @@ signed_print:
errno_print: {
char bf[STRERR_BUFSIZE];
const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
- *e = audit_errno_to_name(-ret);
+ *e = errno_to_name(evsel, -ret);
fprintf(trace->output, ") = -1 %s %s", e, emsg);
}
@@ -1910,7 +1940,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
}
}
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
trace__fprintf_tstamp(trace, sample->time, trace->output);
if (trace->trace_syscalls)
@@ -2221,6 +2251,9 @@ static int trace__add_syscall_newtp(struct trace *trace)
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
+ perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
+ perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
+
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
@@ -2317,6 +2350,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_maj);
}
@@ -2324,6 +2358,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_min);
}
@@ -2344,45 +2379,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
- perf_evlist__config(evlist, &trace->opts, NULL);
-
- if (callchain_param.enabled) {
- bool use_identifier = false;
-
- if (trace->syscalls.events.sys_exit) {
- perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
- &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_maj) {
- perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_min) {
- perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (use_identifier) {
- /*
- * Now we have evsels with different sample_ids, use
- * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
- * from a fixed position in each ring buffer record.
- *
- * As of this the changeset introducing this comment, this
- * isn't strictly needed, as the fields that can come before
- * PERF_SAMPLE_ID are all used, but we'll probably disable
- * some of those for things like copying the payload of
- * pointer syscall arguments, and for vfs_getname we don't
- * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
- * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
- */
- perf_evlist__set_sample_bit(evlist, IDENTIFIER);
- perf_evlist__reset_sample_bit(evlist, ID);
- }
- }
+ perf_evlist__config(evlist, &trace->opts, &callchain_param);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
@@ -2437,7 +2434,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
- err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -2455,6 +2452,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
evlist->threads->nr > 1 ||
perf_evlist__first(evlist)->attr.inherit;
+
+ /*
+ * Now that we already used evsel->attr to ask the kernel to setup the
+ * events, lets reuse evsel->attr.sample_max_stack as the limit in
+ * trace__resolve_callchain(), allowing per-event max-stack settings
+ * to override an explicitely set --max-stack global setting.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+ evsel->attr.sample_max_stack == 0)
+ evsel->attr.sample_max_stack = trace->max_stack;
+ }
again:
before = trace->nr_events;
@@ -3046,6 +3055,8 @@ int cmd_trace(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
+ "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
@@ -3097,8 +3108,9 @@ int cmd_trace(int argc, const char **argv)
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
+ if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
+ }
#endif
if (callchain_param.enabled) {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 3e64f10b6d66..51abdb0a4047 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -33,21 +33,30 @@ arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
+arch/s390/include/uapi/asm/unistd.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+arch/alpha/include/uapi/asm/errno.h
+arch/mips/include/asm/errno.h
+arch/mips/include/uapi/asm/errno.h
+arch/parisc/include/uapi/asm/errno.h
+arch/powerpc/include/uapi/asm/errno.h
+arch/sparc/include/uapi/asm/errno.h
+arch/x86/include/uapi/asm/errno.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls64.h
include/linux/coresight-pmu.h
+include/uapi/asm-generic/errno.h
+include/uapi/asm-generic/errno-base.h
include/uapi/asm-generic/ioctls.h
include/uapi/asm-generic/mman-common.h
'
check () {
file=$1
- opts="--ignore-blank-lines --ignore-space-change"
shift
while [ -n "$*" ]; do
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 345f5d6e9ed5..fdf75d45efff 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -162,8 +162,37 @@ __perf_main ()
# List possible events for -e option
elif [[ $prev == @("-e"|"--event") &&
$prev_skip_opts == @(record|stat|top) ]]; then
- evts=$($cmd list --raw-dump)
- __perfcomp_colon "$evts" "$cur"
+
+ local cur1=${COMP_WORDS[COMP_CWORD]}
+ local raw_evts=$($cmd list --raw-dump)
+ local arr s tmp result
+
+ if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($raw_evts)
+ IFS="$OLD_IFS"
+
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == *cpu/* ]]; then
+ tmp=${s#*cpu/}
+ result=$result" ""cpu/"${tmp^^}
+ else
+ result=$result" "$s
+ fi
+ done
+
+ evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ else
+ evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ fi
+
+ if [[ "$cur1" == , ]]; then
+ __perfcomp_colon "$evts" ""
+ else
+ __perfcomp_colon "$evts" "$cur1"
+ fi
else
# List subcommands for perf commands
if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
@@ -246,11 +275,21 @@ fi
type perf &>/dev/null &&
_perf()
{
+ if [[ "$COMP_WORDBREAKS" != *,* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS},"
+ export COMP_WORDBREAKS
+ fi
+
+ if [[ "$COMP_WORDBREAKS" == *:* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS/:/}"
+ export COMP_WORDBREAKS
+ fi
+
local cur words cword prev
if [ $preload_get_comp_words_by_ref = "true" ]; then
- _get_comp_words_by_ref -n =: cur words cword prev
+ _get_comp_words_by_ref -n =:, cur words cword prev
else
- __perf_get_comp_words_by_ref -n =: cur words cword prev
+ __perf_get_comp_words_by_ref -n =:, cur words cword prev
fi
__perf_main
} &&
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 62b13518bc6e..1b3fc8ec0fa2 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = {
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
@@ -485,7 +485,7 @@ int main(int argc, const char **argv)
argv[0] = cmd;
}
if (strstarts(cmd, "trace")) {
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv);
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
new file mode 100644
index 000000000000..2db45c40ebc7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
@@ -0,0 +1,62 @@
+[
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, read",
+ "EventCode": "0x40",
+ "EventName": "l1d_cache_rd",
+ "BriefDescription": "L1D cache read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, write ",
+ "EventCode": "0x41",
+ "EventName": "l1d_cache_wr",
+ "BriefDescription": "L1D cache write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, read",
+ "EventCode": "0x42",
+ "EventName": "l1d_cache_refill_rd",
+ "BriefDescription": "L1D cache refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, write",
+ "EventCode": "0x43",
+ "EventName": "l1d_cache_refill_wr",
+ "BriefDescription": "L1D refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, read",
+ "EventCode": "0x4C",
+ "EventName": "l1d_tlb_refill_rd",
+ "BriefDescription": "L1D tlb refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, write",
+ "EventCode": "0x4D",
+ "EventName": "l1d_tlb_refill_wr",
+ "BriefDescription": "L1D tlb refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, read",
+ "EventCode": "0x4E",
+ "EventName": "l1d_tlb_rd",
+ "BriefDescription": "L1D tlb read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, write",
+ "EventCode": "0x4F",
+ "EventName": "l1d_tlb_wr",
+ "BriefDescription": "L1D tlb write",
+ },
+ {
+ "PublicDescription": "Bus access read",
+ "EventCode": "0x60",
+ "EventName": "bus_access_rd",
+ "BriefDescription": "Bus access read",
+ },
+ {
+ "PublicDescription": "Bus access write",
+ "EventCode": "0x61",
+ "EventName": "bus_access_wr",
+ "BriefDescription": "Bus access write",
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
new file mode 100644
index 000000000000..219d6756134e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -0,0 +1,15 @@
+# Format:
+# MIDR,Version,JSON/file/pathname,Type
+#
+# where
+# MIDR Processor version
+# Variant[23:20] and Revision [3:0] should be zero.
+# Version could be used to track version of of JSON file
+# but currently unused.
+# JSON/file/pathname is the path to JSON file, relative
+# to tools/perf/pmu-events/arch/arm64/.
+# Type is core, uncore etc
+#
+#
+#Family-model,Version,Filename,EventType
+0x00000000420f5160,v1,cavium,core
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index a0f3a11ca19f..229150e7ab7d 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -13,13 +13,5 @@
#
# Power8 entries
-004b0000,1,power8,core
-004b0201,1,power8,core
-004c0000,1,power8,core
-004d0000,1,power8,core
-004d0100,1,power8,core
-004d0200,1,power8,core
-004c0100,1,power8,core
-004e0100,1,power9,core
-004e0200,1,power9,core
-004e1200,1,power9,core
+004[bcd][[:xdigit:]]{4},1,power8,core
+004e[[:xdigit:]]{4},1,power9,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/cache.json b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
index 18f6645f2897..7945c5196c43 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
@@ -125,11 +125,6 @@
"BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied"
},
{,
- "EventCode": "0x3006C",
- "EventName": "PM_RUN_CYC_SMT2_MODE",
- "BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT2 mode"
- },
- {,
"EventCode": "0x1C058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB Miss page size 16G"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
index c63a919eda98..bd8361b5fd6a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3E15C",
- "EventName": "PM_MRK_L2_TM_ST_ABORT_SISTER",
- "BriefDescription": "TM marked store abort for this thread"
- },
- {,
"EventCode": "0x25044",
"EventName": "PM_IPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request"
@@ -369,4 +364,4 @@
"EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/marked.json b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
index b9df54fb37e3..22f9f32060a8 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3C052",
- "EventName": "PM_DATA_SYS_PUMP_MPRED",
- "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
- },
- {,
"EventCode": "0x3013E",
"EventName": "PM_MRK_STALL_CMPLU_CYC",
"BriefDescription": "Number of cycles the marked instruction is experiencing a stall while it is next to complete (NTC)"
@@ -255,6 +250,11 @@
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from the core's L3 data cache"
},
{,
+ "EventCode": "0x3C052",
+ "EventName": "PM_DATA_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
+ },
+ {,
"EventCode": "0x4D142",
"EventName": "PM_MRK_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load"
@@ -435,21 +435,6 @@
"BriefDescription": "ITLB Reloaded. Counts 1 per ITLB miss for HPT but multiple for radix depending on number of levels traveresed"
},
{,
- "EventCode": "0x2D024",
- "EventName": "PM_RADIX_PWC_L2_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on both the first and second levels of page walk cache."
- },
- {,
- "EventCode": "0x3F056",
- "EventName": "PM_RADIX_PWC_L3_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on the first, second, and third levels of page walk cache."
- },
- {,
- "EventCode": "0x4E014",
- "EventName": "PM_TM_TX_PASS_RUN_INST",
- "BriefDescription": "Run instructions spent in successful transactions"
- },
- {,
"EventCode": "0x1E044",
"EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -644,4 +629,4 @@
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 54cc3be00fc2..5ce312973f1e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -80,6 +80,11 @@
"BriefDescription": "A radix translation attempt missed in the TLB and all levels of page walk cache."
},
{,
+ "EventCode": "0x26882",
+ "EventName": "PM_L2_DC_INV",
+ "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
+ },
+ {,
"EventCode": "0x24048",
"EventName": "PM_INST_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)"
@@ -95,11 +100,6 @@
"BriefDescription": "Number of TM transactions that passed"
},
{,
- "EventCode": "0xD1A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHS",
- "BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
- },
- {,
"EventCode": "0xF088",
"EventName": "PM_LSU0_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
@@ -127,7 +127,7 @@
{,
"EventCode": "0xD08C",
"EventName": "PM_LSU2_LDMX_FIN",
- "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])"
+ "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
{,
"EventCode": "0x300F8",
@@ -205,11 +205,6 @@
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
},
{,
- "EventCode": "0xF0B4",
- "EventName": "PM_DC_PREF_CONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0xF894",
"EventName": "PM_LSU3_L1_CAM_CANCEL",
"BriefDescription": "ls3 l1 tm cam cancel"
@@ -220,21 +215,11 @@
"BriefDescription": "Dispatch Flush: TLBIE"
},
{,
- "EventCode": "0xD1A4",
- "EventName": "PM_MRK_LSU_FLUSH_SAO",
- "BriefDescription": "A load-hit-load condition with Strong Address Ordering will have address compare disabled and flush"
- },
- {,
"EventCode": "0x4E11E",
"EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load"
},
{,
- "EventCode": "0x5894",
- "EventName": "PM_LWSYNC",
- "BriefDescription": "Lwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x14156",
"EventName": "PM_MRK_DATA_FROM_L2_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load"
@@ -245,11 +230,6 @@
"BriefDescription": "Read clearing SC"
},
{,
- "EventCode": "0x50A0",
- "EventName": "PM_HWSYNC",
- "BriefDescription": "Hwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x168B0",
"EventName": "PM_L3_P1_NODE_PUMP",
"BriefDescription": "L3 PF sent with nodal scope port 1, counts even retried requests"
@@ -265,6 +245,11 @@
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load"
},
{,
+ "EventCode": "0x468AE",
+ "EventName": "PM_L3_P3_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 3 (memory only), every retry counted"
+ },
+ {,
"EventCode": "0x460A8",
"EventName": "PM_SN_HIT",
"BriefDescription": "Any port snooper hit L3. Up to 4 can happen in a cycle but we only count 1"
@@ -280,11 +265,6 @@
"BriefDescription": "Prefetch stream allocated by the hardware prefetch mechanism"
},
{,
- "EventCode": "0xF0BC",
- "EventName": "PM_LS2_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0xD0AC",
"EventName": "PM_SRQ_SYNC_CYC",
"BriefDescription": "A sync is in the S2Q (edge detect to count)"
@@ -380,26 +360,11 @@
"BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT4 mode"
},
{,
- "EventCode": "0x5088",
- "EventName": "PM_DECODE_FUSION_OP_PRESERV",
- "BriefDescription": "Destructive op operand preservation"
- },
- {,
"EventCode": "0x1D14E",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
},
{,
- "EventCode": "0x509C",
- "EventName": "PM_FORCED_NOP",
- "BriefDescription": "Instruction was forced to execute as a nop because it was found to behave like a nop (have no effect) at decode time"
- },
- {,
- "EventCode": "0xC098",
- "EventName": "PM_LS2_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20058",
"EventName": "PM_DARQ1_10_12_ENTRIES",
"BriefDescription": "Cycles in which 10 or more DARQ1 entries (out of 12) are in use"
@@ -435,11 +400,6 @@
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
{,
- "EventCode": "0x4505E",
- "EventName": "PM_FLOP_CMPL",
- "BriefDescription": "Floating Point Operation Finished"
- },
- {,
"EventCode": "0x1D144",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load"
@@ -480,14 +440,9 @@
"BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issued"
},
{,
- "EventCode": "0xC094",
- "EventName": "PM_LS0_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
- "EventCode": "0xF8BC",
- "EventName": "PM_LS3_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x460AE",
+ "EventName": "PM_L3_P2_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 2 (memory only), every retry counted"
},
{,
"EventCode": "0x58B0",
@@ -505,11 +460,6 @@
"BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)"
},
{,
- "EventCode": "0xD998",
- "EventName": "PM_MRK_LSU_FLUSH_EMSH",
- "BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
- },
- {,
"EventCode": "0xF8A0",
"EventName": "PM_NON_DATA_STORE",
"BriefDescription": "All ops that drain from s2q to L2 and contain no data"
@@ -525,11 +475,6 @@
"BriefDescription": "Unconditional Branch Completed. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was covenrted to a Resolve."
},
{,
- "EventCode": "0x1F056",
- "EventName": "PM_RADIX_PWC_L1_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB and only the first level page walk cache was a hit."
- },
- {,
"EventCode": "0xF8A8",
"EventName": "PM_DC_PREF_FUZZY_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)"
@@ -545,6 +490,11 @@
"BriefDescription": "Load tm L1 miss"
},
{,
+ "EventCode": "0xC880",
+ "EventName": "PM_LS1_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x2894",
"EventName": "PM_TM_OUTER_TEND",
"BriefDescription": "Completion time outer tend"
@@ -565,21 +515,11 @@
"BriefDescription": "Marked derat reload (miss) for any page size"
},
{,
- "EventCode": "0x160A0",
- "EventName": "PM_L3_PF_MISS_L3",
- "BriefDescription": "L3 PF missed in L3"
- },
- {,
"EventCode": "0x1C04A",
"EventName": "PM_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load"
},
{,
- "EventCode": "0xD99C",
- "EventName": "PM_MRK_LSU_FLUSH_UE",
- "BriefDescription": "Correctable ECC error on reload data, reported at critical data forward time"
- },
- {,
"EventCode": "0x268B0",
"EventName": "PM_L3_P1_GRP_PUMP",
"BriefDescription": "L3 PF sent with grp scope port 1, counts even retried requests"
@@ -630,11 +570,6 @@
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding"
},
{,
- "EventCode": "0x5884",
- "EventName": "PM_DECODE_LANES_NOT_AVAIL",
- "BriefDescription": "Decode has something to transmit but dispatch lanes are not available"
- },
- {,
"EventCode": "0x3C042",
"EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load"
@@ -690,9 +625,9 @@
"BriefDescription": "False LHS match detected"
},
{,
- "EventCode": "0xD9A4",
- "EventName": "PM_MRK_LSU_FLUSH_LARX_STCX",
- "BriefDescription": "A larx is flushed because an older larx has an LMQ reservation for the same thread. A stcx is flushed because an older stcx is in the LMQ. The flush happens when the older larx/stcx relaunches"
+ "EventCode": "0xF0B0",
+ "EventName": "PM_L3_LD_PREF",
+ "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
},
{,
"EventCode": "0x4D012",
@@ -715,9 +650,9 @@
"BriefDescription": "All successful Ld/St dispatches for this thread that were an L2 miss (excludes i_l2mru_tch_reqs)"
},
{,
- "EventCode": "0xF8B8",
- "EventName": "PM_LS1_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x160A0",
+ "EventName": "PM_L3_PF_MISS_L3",
+ "BriefDescription": "L3 PF missed in L3"
},
{,
"EventCode": "0x408C",
@@ -765,11 +700,6 @@
"BriefDescription": "Completion time nested tend"
},
{,
- "EventCode": "0x36084",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "All D-side store dispatch attempts for this thread"
- },
- {,
"EventCode": "0x368A0",
"EventName": "PM_L3_PF_OFF_CHIP_CACHE",
"BriefDescription": "L3 PF from Off chip cache"
@@ -830,11 +760,6 @@
"BriefDescription": "Rotating sample of 16 snoop valids"
},
{,
- "EventCode": "0x16084",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
- },
- {,
"EventCode": "0x1608C",
"EventName": "PM_RC0_BUSY",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC lifetime (mach0 used as sample point)"
@@ -842,7 +767,7 @@
{,
"EventCode": "0x36082",
"EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)."
+ "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)"
},
{,
"EventCode": "0xF8B0",
@@ -905,11 +830,6 @@
"BriefDescription": "Instruction prefetch requests"
},
{,
- "EventCode": "0xC898",
- "EventName": "PM_LS3_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x488C",
"EventName": "PM_IC_PREF_WRITE",
"BriefDescription": "Instruction prefetch written into IL1"
@@ -1017,7 +937,7 @@
{,
"EventCode": "0x3E05E",
"EventName": "PM_L3_CO_MEPF",
- "BriefDescription": "L3 castouts in Mepf state for this thread"
+ "BriefDescription": "L3 CO of line in Mep state (includes casthrough to memory). The Mepf state indicates that a line was brought in to satisfy an L3 prefetch request"
},
{,
"EventCode": "0x460A2",
@@ -1205,11 +1125,6 @@
"BriefDescription": "Non transactional conflict from LSU, gets reported to TEXASR"
},
{,
- "EventCode": "0xD198",
- "EventName": "PM_MRK_LSU_FLUSH_ATOMIC",
- "BriefDescription": "Quad-word loads (lq) are considered atomic because they always span at least 2 slices. If a snoop or store from another thread changes the data the load is accessing between the 2 or 3 pieces of the lq instruction, the lq will be flushed"
- },
- {,
"EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load"
@@ -1295,11 +1210,6 @@
"BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
},
{,
- "EventCode": "0xC894",
- "EventName": "PM_LS1_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x360A2",
"EventName": "PM_L3_L2_CO_HIT",
"BriefDescription": "L2 CO hits"
@@ -1325,11 +1235,6 @@
"BriefDescription": "L2 Castouts - Shared (Tx,Sx)"
},
{,
- "EventCode": "0xD884",
- "EventName": "PM_LSU3_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x26092",
"EventName": "PM_L2_LD_MISS_64B",
"BriefDescription": "All successful D-side load dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 64B(i.e., M=1)"
@@ -1362,12 +1267,12 @@
{,
"EventCode": "0xD8A8",
"EventName": "PM_ISLB_MISS",
- "BriefDescription": "Instruction SLB miss - Total of all segment sizes"
+ "BriefDescription": "Instruction SLB Miss - Total of all segment sizes"
},
{,
- "EventCode": "0xD19C",
- "EventName": "PM_MRK_LSU_FLUSH_RELAUNCH_MISS",
- "BriefDescription": "If a load that has already returned data and has to relaunch for any reason then gets a miss (erat, setp, data cache), it will often be flushed at relaunch time because the data might be inconsistent"
+ "EventCode": "0x368AE",
+ "EventName": "PM_L3_P1_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
},
{,
"EventCode": "0x260A2",
@@ -1385,6 +1290,11 @@
"BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT"
},
{,
+ "EventCode": "0xC084",
+ "EventName": "PM_LS2_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x1608E",
"EventName": "PM_ST_CAUSED_FAIL",
"BriefDescription": "Non-TM Store caused any thread to fail"
@@ -1410,11 +1320,6 @@
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
{,
- "EventCode": "0xD084",
- "EventName": "PM_LSU2_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x48B8",
"EventName": "PM_BR_MPRED_TAKEN_TA",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event."
@@ -1450,29 +1355,24 @@
"BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
},
{,
+ "EventCode": "0x36084",
+ "EventName": "PM_L2_RCST_DISP",
+ "BriefDescription": "All D-side store dispatch attempts for this thread"
+ },
+ {,
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
},
{,
- "EventCode": "0x5090",
- "EventName": "PM_SHL_ST_DISABLE",
- "BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)"
- },
- {,
"EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512"
},
{,
- "EventCode": "0x5084",
- "EventName": "PM_DECODE_FUSION_EXT_ADD",
- "BriefDescription": "32-bit extended addition"
- },
- {,
"EventCode": "0x36080",
"EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)."
+ "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)"
},
{,
"EventCode": "0x3504C",
@@ -1555,21 +1455,11 @@
"BriefDescription": "Memory Read With Intent to Modify for this thread"
},
{,
- "EventCode": "0x26882",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
- },
- {,
"EventCode": "0xC090",
"EventName": "PM_LSU_STCX",
"BriefDescription": "STCX sent to nest, i.e. total"
},
{,
- "EventCode": "0xD080",
- "EventName": "PM_LSU0_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x2C120",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load"
@@ -1610,11 +1500,6 @@
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request"
},
{,
- "EventCode": "0xD9A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHL_SHL",
- "BriefDescription": "The instruction was flushed because of a sequential load/store consistency. If a load or store hits on an older load that has either been snooped (for loads) or has stale data (for stores)."
- },
- {,
"EventCode": "0x35042",
"EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request"
@@ -1692,7 +1577,7 @@
{,
"EventCode": "0x2001A",
"EventName": "PM_NTC_ALL_FIN",
- "BriefDescription": "Cycles after all instructions have finished to group completed"
+ "BriefDescription": "Cycles after instruction finished to instruction completed."
},
{,
"EventCode": "0x3005A",
@@ -1710,6 +1595,11 @@
"BriefDescription": "ls1 l1 tm cam cancel"
},
{,
+ "EventCode": "0x268AE",
+ "EventName": "PM_L3_P3_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 3, every retry counted"
+ },
+ {,
"EventCode": "0xE884",
"EventName": "PM_LS1_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch"
@@ -1742,7 +1632,7 @@
{,
"EventCode": "0x160B6",
"EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "Rotating sample of 8 WI valid"
+ "BriefDescription": "Rotating sample of 8 WI valid (duplicate)"
},
{,
"EventCode": "0x368AC",
@@ -1790,9 +1680,9 @@
"BriefDescription": "L2 guess system (VGS or RNS) and guess was correct (ie data beyond-group)"
},
{,
- "EventCode": "0x589C",
- "EventName": "PM_PTESYNC",
- "BriefDescription": "ptesync instruction counted when the instruction is decoded and transmitted"
+ "EventCode": "0x260AE",
+ "EventName": "PM_L3_P2_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 2, every retry counted"
},
{,
"EventCode": "0x26086",
@@ -1825,6 +1715,11 @@
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled"
},
{,
+ "EventCode": "0x46882",
+ "EventName": "PM_L2_ST_HIT",
+ "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ },
+ {,
"EventCode": "0x360AC",
"EventName": "PM_L3_SN0_BUSY",
"BriefDescription": "Lifetime, sample of snooper machine 0 valid"
@@ -1845,11 +1740,6 @@
"BriefDescription": "All successful D-Side Store dispatches that were an L2 miss for this thread"
},
{,
- "EventCode": "0xF8B4",
- "EventName": "PM_DC_PREF_XCONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the Ultra conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0x35048",
"EventName": "PM_IPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request"
@@ -1970,11 +1860,6 @@
"BriefDescription": "Cycles thread running at priority level 2 or 3"
},
{,
- "EventCode": "0x10134",
- "EventName": "PM_MRK_ST_DONE_L2",
- "BriefDescription": "marked store completed in L2 ( RC machine done)"
- },
- {,
"EventCode": "0x368B2",
"EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
"BriefDescription": "Initial scope=group (GS or NNS) but data from local node. Prediction too high"
@@ -2005,11 +1890,6 @@
"BriefDescription": "L2 guess grp (GS or NNS) and guess was not correct (ie data on-chip OR beyond-group)"
},
{,
- "EventCode": "0x368AE",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
- },
- {,
"EventCode": "0xC0AC",
"EventName": "PM_LSU_FLUSH_EMSH",
"BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
@@ -2035,11 +1915,6 @@
"BriefDescription": "RC requests that were on group (aka nodel) pump attempts"
},
{,
- "EventCode": "0xF0B0",
- "EventName": "PM_L3_LD_PREF",
- "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
- },
- {,
"EventCode": "0x16080",
"EventName": "PM_L2_LD",
"BriefDescription": "All successful D-side Load dispatches for this thread (L2 miss + L2 hits)"
@@ -2050,6 +1925,11 @@
"BriefDescription": "Math flop instruction completed"
},
{,
+ "EventCode": "0xC080",
+ "EventName": "PM_LS0_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x368B0",
"EventName": "PM_L3_P1_SYS_PUMP",
"BriefDescription": "L3 PF sent with sys scope port 1, counts even retried requests"
@@ -2120,11 +2000,6 @@
"BriefDescription": "Conditional Branch Completed in which the HW correctly predicted the direction as taken. Counted at completion time"
},
{,
- "EventCode": "0xF0B8",
- "EventName": "PM_LS0_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20132",
"EventName": "PM_MRK_DFU_FIN",
"BriefDescription": "Decimal Unit marked Instruction Finish"
@@ -2140,6 +2015,11 @@
"BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
},
{,
+ "EventCode": "0x16084",
+ "EventName": "PM_L2_RCLD_DISP",
+ "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
+ },
+ {,
"EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
"BriefDescription": "cycles to drain st from core to L2"
@@ -2225,11 +2105,6 @@
"BriefDescription": "Prefetch Canceled due to page boundary"
},
{,
- "EventCode": "0xF09C",
- "EventName": "PM_SLB_TABLEWALK_CYC",
- "BriefDescription": "Cycles when a tablewalk is pending on this thread on the SLB table"
- },
- {,
"EventCode": "0x460AA",
"EventName": "PM_L3_P0_CO_L31",
"BriefDescription": "L3 CO to L3.1 (LCO) port 0 with or without data"
@@ -2247,10 +2122,10 @@
{,
"EventCode": "0x46082",
"EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful D-side store dispatches for this thread "
+ "BriefDescription": "All successful D-side store dispatches for this thread (L2 miss + L2 hits)"
},
{,
- "EventCode": "0x4609E",
+ "EventCode": "0x36880",
"EventName": "PM_L2_INST_MISS",
"BriefDescription": "All successful I-side dispatches that were an L2 miss for this thread (excludes i_l2mru_tch reqs)"
},
@@ -2340,9 +2215,9 @@
"BriefDescription": "All ISU rejects"
},
{,
- "EventCode": "0x46882",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ "EventCode": "0xC884",
+ "EventName": "PM_LS3_LD_VECTOR_FIN",
+ "BriefDescription": ""
},
{,
"EventCode": "0x360A8",
@@ -2360,11 +2235,6 @@
"BriefDescription": "Asserts when a i=1 store op is sent to the nest. No record of issue pipe (LS0/LS1) is maintained so this is for both pipes. Probably don't need separate LS0 and LS1"
},
{,
- "EventCode": "0xD880",
- "EventName": "PM_LSU1_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0xD0B8",
"EventName": "PM_LSU_LMQ_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LMQ is full"
@@ -2389,4 +2259,4 @@
"EventName": "PM_L3_PF_USAGE",
"BriefDescription": "Rotating sample of 32 PF actives"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index bc2db636dabf..5af1abbe82c4 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -125,6 +125,11 @@
"BriefDescription": "Overflow from counter 5"
},
{,
+ "EventCode": "0x4505E",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operation Finished"
+ },
+ {,
"EventCode": "0x2C018",
"EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
"BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
@@ -390,11 +395,6 @@
"BriefDescription": "Ict empty for this thread due to branch mispred"
},
{,
- "EventCode": "0x3405E",
- "EventName": "PM_IFETCH_THROTTLE",
- "BriefDescription": "Cycles in which Instruction fetch throttle was active."
- },
- {,
"EventCode": "0x1F148",
"EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -422,7 +422,7 @@
{,
"EventCode": "0xD0A8",
"EventName": "PM_DSLB_MISS",
- "BriefDescription": "Data SLB Miss - Total of all segment sizes"
+ "BriefDescription": "gate_and(sd_pc_c0_comp_valid AND sd_pc_c0_comp_thread(0:1)=tid,sd_pc_c0_comp_ppc_count(0:3)) + gate_and(sd_pc_c1_comp_valid AND sd_pc_c1_comp_thread(0:1)=tid,sd_pc_c1_comp_ppc_count(0:3))"
},
{,
"EventCode": "0x4C058",
@@ -549,4 +549,4 @@
"EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
index 3ef8a10aac86..d0b89f930567 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
@@ -119,4 +119,4 @@
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation completed"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/translation.json b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
index 8c0f12024afa..bc8e03d7a6b0 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
@@ -90,11 +90,6 @@
"BriefDescription": "stcx failed"
},
{,
- "EventCode": "0x20112",
- "EventName": "PM_MRK_NTF_FIN",
- "BriefDescription": "Marked next to finish instruction finished"
- },
- {,
"EventCode": "0x300F0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1"
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 73688a9dab2a..bba3152ec54a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -10,13 +10,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -30,6 +47,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,6 +124,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -152,7 +245,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -174,26 +290,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x8",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"EventCode": "0x63",
"Counter": "0,1,2,3",
@@ -261,7 +365,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -281,152 +385,161 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x21",
"Errata": "BDM35",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM35",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -438,84 +551,83 @@
"Errata": "BDM100, BDE70",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDE70, BDM100",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -659,119 +771,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
@@ -784,6 +784,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -796,6 +797,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -808,6 +810,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -820,6 +823,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -832,6 +836,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -844,6 +849,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020001 ",
"Counter": "0,1,2,3",
@@ -856,6 +862,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0001 ",
"Counter": "0,1,2,3",
@@ -868,6 +875,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0001 ",
"Counter": "0,1,2,3",
@@ -880,6 +888,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0001 ",
"Counter": "0,1,2,3",
@@ -892,6 +901,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001 ",
"Counter": "0,1,2,3",
@@ -904,6 +914,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001 ",
"Counter": "0,1,2,3",
@@ -916,6 +927,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0001 ",
"Counter": "0,1,2,3",
@@ -928,6 +940,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010002 ",
"Counter": "0,1,2,3",
@@ -940,6 +953,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0002 ",
"Counter": "0,1,2,3",
@@ -952,6 +966,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0002 ",
"Counter": "0,1,2,3",
@@ -964,6 +979,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0002 ",
"Counter": "0,1,2,3",
@@ -976,6 +992,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002 ",
"Counter": "0,1,2,3",
@@ -988,6 +1005,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002 ",
"Counter": "0,1,2,3",
@@ -1000,6 +1018,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0002 ",
"Counter": "0,1,2,3",
@@ -1012,6 +1031,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010004 ",
"Counter": "0,1,2,3",
@@ -1024,6 +1044,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020004 ",
"Counter": "0,1,2,3",
@@ -1036,6 +1057,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020004 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1070,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020004 ",
"Counter": "0,1,2,3",
@@ -1060,6 +1083,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020004 ",
"Counter": "0,1,2,3",
@@ -1072,6 +1096,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020004 ",
"Counter": "0,1,2,3",
@@ -1084,6 +1109,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020004 ",
"Counter": "0,1,2,3",
@@ -1096,6 +1122,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0004 ",
"Counter": "0,1,2,3",
@@ -1108,6 +1135,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0004 ",
"Counter": "0,1,2,3",
@@ -1120,6 +1148,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0004 ",
"Counter": "0,1,2,3",
@@ -1132,6 +1161,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004 ",
"Counter": "0,1,2,3",
@@ -1144,6 +1174,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004 ",
"Counter": "0,1,2,3",
@@ -1156,6 +1187,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0004 ",
"Counter": "0,1,2,3",
@@ -1168,6 +1200,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010008 ",
"Counter": "0,1,2,3",
@@ -1180,6 +1213,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020008 ",
"Counter": "0,1,2,3",
@@ -1192,6 +1226,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020008 ",
"Counter": "0,1,2,3",
@@ -1204,6 +1239,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020008 ",
"Counter": "0,1,2,3",
@@ -1216,6 +1252,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020008 ",
"Counter": "0,1,2,3",
@@ -1228,6 +1265,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020008 ",
"Counter": "0,1,2,3",
@@ -1240,6 +1278,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020008 ",
"Counter": "0,1,2,3",
@@ -1252,6 +1291,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0008 ",
"Counter": "0,1,2,3",
@@ -1264,6 +1304,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0008 ",
"Counter": "0,1,2,3",
@@ -1276,6 +1317,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0008 ",
"Counter": "0,1,2,3",
@@ -1288,6 +1330,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0008 ",
"Counter": "0,1,2,3",
@@ -1300,6 +1343,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0008 ",
"Counter": "0,1,2,3",
@@ -1312,6 +1356,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0008 ",
"Counter": "0,1,2,3",
@@ -1324,6 +1369,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010010 ",
"Counter": "0,1,2,3",
@@ -1336,6 +1382,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020010 ",
"Counter": "0,1,2,3",
@@ -1348,6 +1395,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020010 ",
"Counter": "0,1,2,3",
@@ -1360,6 +1408,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020010 ",
"Counter": "0,1,2,3",
@@ -1372,6 +1421,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020010 ",
"Counter": "0,1,2,3",
@@ -1384,6 +1434,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020010 ",
"Counter": "0,1,2,3",
@@ -1396,6 +1447,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020010 ",
"Counter": "0,1,2,3",
@@ -1408,6 +1460,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0010 ",
"Counter": "0,1,2,3",
@@ -1420,6 +1473,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0010 ",
"Counter": "0,1,2,3",
@@ -1432,6 +1486,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0010 ",
"Counter": "0,1,2,3",
@@ -1444,6 +1499,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0010 ",
"Counter": "0,1,2,3",
@@ -1456,6 +1512,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0010 ",
"Counter": "0,1,2,3",
@@ -1468,6 +1525,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010 ",
"Counter": "0,1,2,3",
@@ -1480,6 +1538,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010020 ",
"Counter": "0,1,2,3",
@@ -1492,6 +1551,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020020 ",
"Counter": "0,1,2,3",
@@ -1504,6 +1564,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020020 ",
"Counter": "0,1,2,3",
@@ -1516,6 +1577,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020020 ",
"Counter": "0,1,2,3",
@@ -1528,6 +1590,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020020 ",
"Counter": "0,1,2,3",
@@ -1540,6 +1603,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020020 ",
"Counter": "0,1,2,3",
@@ -1552,6 +1616,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020020 ",
"Counter": "0,1,2,3",
@@ -1564,6 +1629,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0020 ",
"Counter": "0,1,2,3",
@@ -1576,6 +1642,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0020 ",
"Counter": "0,1,2,3",
@@ -1588,6 +1655,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0020 ",
"Counter": "0,1,2,3",
@@ -1600,6 +1668,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0020 ",
"Counter": "0,1,2,3",
@@ -1612,6 +1681,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0020 ",
"Counter": "0,1,2,3",
@@ -1624,6 +1694,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020 ",
"Counter": "0,1,2,3",
@@ -1636,6 +1707,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010040 ",
"Counter": "0,1,2,3",
@@ -1648,6 +1720,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020040 ",
"Counter": "0,1,2,3",
@@ -1660,6 +1733,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020040 ",
"Counter": "0,1,2,3",
@@ -1672,6 +1746,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020040 ",
"Counter": "0,1,2,3",
@@ -1684,6 +1759,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020040 ",
"Counter": "0,1,2,3",
@@ -1696,6 +1772,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020040 ",
"Counter": "0,1,2,3",
@@ -1708,6 +1785,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020040 ",
"Counter": "0,1,2,3",
@@ -1720,6 +1798,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0040 ",
"Counter": "0,1,2,3",
@@ -1732,6 +1811,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0040 ",
"Counter": "0,1,2,3",
@@ -1744,6 +1824,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0040 ",
"Counter": "0,1,2,3",
@@ -1756,6 +1837,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0040 ",
"Counter": "0,1,2,3",
@@ -1768,6 +1850,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0040 ",
"Counter": "0,1,2,3",
@@ -1780,6 +1863,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040 ",
"Counter": "0,1,2,3",
@@ -1792,6 +1876,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010080 ",
"Counter": "0,1,2,3",
@@ -1804,6 +1889,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020080 ",
"Counter": "0,1,2,3",
@@ -1816,6 +1902,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020080 ",
"Counter": "0,1,2,3",
@@ -1828,6 +1915,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020080 ",
"Counter": "0,1,2,3",
@@ -1840,6 +1928,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020080 ",
"Counter": "0,1,2,3",
@@ -1852,6 +1941,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020080 ",
"Counter": "0,1,2,3",
@@ -1864,6 +1954,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020080 ",
"Counter": "0,1,2,3",
@@ -1876,6 +1967,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0080 ",
"Counter": "0,1,2,3",
@@ -1888,6 +1980,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0080 ",
"Counter": "0,1,2,3",
@@ -1900,6 +1993,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0080 ",
"Counter": "0,1,2,3",
@@ -1912,6 +2006,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0080 ",
"Counter": "0,1,2,3",
@@ -1924,6 +2019,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0080 ",
"Counter": "0,1,2,3",
@@ -1936,6 +2032,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080 ",
"Counter": "0,1,2,3",
@@ -1948,6 +2045,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010100 ",
"Counter": "0,1,2,3",
@@ -1960,6 +2058,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020100 ",
"Counter": "0,1,2,3",
@@ -1972,6 +2071,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020100 ",
"Counter": "0,1,2,3",
@@ -1984,6 +2084,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020100 ",
"Counter": "0,1,2,3",
@@ -1996,6 +2097,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020100 ",
"Counter": "0,1,2,3",
@@ -2008,6 +2110,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020100 ",
"Counter": "0,1,2,3",
@@ -2020,6 +2123,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020100 ",
"Counter": "0,1,2,3",
@@ -2032,6 +2136,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0100 ",
"Counter": "0,1,2,3",
@@ -2044,6 +2149,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0100 ",
"Counter": "0,1,2,3",
@@ -2056,6 +2162,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0100 ",
"Counter": "0,1,2,3",
@@ -2068,6 +2175,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0100 ",
"Counter": "0,1,2,3",
@@ -2080,6 +2188,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0100 ",
"Counter": "0,1,2,3",
@@ -2092,6 +2201,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100 ",
"Counter": "0,1,2,3",
@@ -2104,6 +2214,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010200 ",
"Counter": "0,1,2,3",
@@ -2116,6 +2227,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020200 ",
"Counter": "0,1,2,3",
@@ -2128,6 +2240,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020200 ",
"Counter": "0,1,2,3",
@@ -2140,6 +2253,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020200 ",
"Counter": "0,1,2,3",
@@ -2152,6 +2266,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020200 ",
"Counter": "0,1,2,3",
@@ -2164,6 +2279,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020200 ",
"Counter": "0,1,2,3",
@@ -2176,6 +2292,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020200 ",
"Counter": "0,1,2,3",
@@ -2188,6 +2305,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0200 ",
"Counter": "0,1,2,3",
@@ -2200,6 +2318,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0200 ",
"Counter": "0,1,2,3",
@@ -2212,6 +2331,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0200 ",
"Counter": "0,1,2,3",
@@ -2224,6 +2344,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0200 ",
"Counter": "0,1,2,3",
@@ -2236,6 +2357,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0200 ",
"Counter": "0,1,2,3",
@@ -2248,6 +2370,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200 ",
"Counter": "0,1,2,3",
@@ -2260,6 +2383,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -2272,6 +2396,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080028000 ",
"Counter": "0,1,2,3",
@@ -2284,6 +2409,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100028000 ",
"Counter": "0,1,2,3",
@@ -2296,6 +2422,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200028000 ",
"Counter": "0,1,2,3",
@@ -2308,6 +2435,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400028000 ",
"Counter": "0,1,2,3",
@@ -2320,6 +2448,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000028000 ",
"Counter": "0,1,2,3",
@@ -2332,6 +2461,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80028000 ",
"Counter": "0,1,2,3",
@@ -2344,6 +2474,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c8000 ",
"Counter": "0,1,2,3",
@@ -2356,6 +2487,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c8000 ",
"Counter": "0,1,2,3",
@@ -2368,6 +2500,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c8000 ",
"Counter": "0,1,2,3",
@@ -2380,6 +2513,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c8000 ",
"Counter": "0,1,2,3",
@@ -2392,6 +2526,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c8000 ",
"Counter": "0,1,2,3",
@@ -2404,6 +2539,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8000 ",
"Counter": "0,1,2,3",
@@ -2416,6 +2552,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010090 ",
"Counter": "0,1,2,3",
@@ -2428,6 +2565,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020090 ",
"Counter": "0,1,2,3",
@@ -2440,6 +2578,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020090 ",
"Counter": "0,1,2,3",
@@ -2452,6 +2591,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020090 ",
"Counter": "0,1,2,3",
@@ -2464,6 +2604,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020090 ",
"Counter": "0,1,2,3",
@@ -2476,6 +2617,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020090 ",
"Counter": "0,1,2,3",
@@ -2488,6 +2630,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020090 ",
"Counter": "0,1,2,3",
@@ -2500,6 +2643,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0090 ",
"Counter": "0,1,2,3",
@@ -2512,6 +2656,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0090 ",
"Counter": "0,1,2,3",
@@ -2524,6 +2669,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0090 ",
"Counter": "0,1,2,3",
@@ -2536,6 +2682,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0090 ",
"Counter": "0,1,2,3",
@@ -2548,6 +2695,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0090 ",
"Counter": "0,1,2,3",
@@ -2560,6 +2708,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0090 ",
"Counter": "0,1,2,3",
@@ -2572,6 +2721,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010120 ",
"Counter": "0,1,2,3",
@@ -2584,6 +2734,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020120 ",
"Counter": "0,1,2,3",
@@ -2596,6 +2747,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020120 ",
"Counter": "0,1,2,3",
@@ -2608,6 +2760,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020120 ",
"Counter": "0,1,2,3",
@@ -2620,6 +2773,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020120 ",
"Counter": "0,1,2,3",
@@ -2632,6 +2786,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020120 ",
"Counter": "0,1,2,3",
@@ -2644,6 +2799,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020120 ",
"Counter": "0,1,2,3",
@@ -2656,6 +2812,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0120 ",
"Counter": "0,1,2,3",
@@ -2668,6 +2825,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0120 ",
"Counter": "0,1,2,3",
@@ -2680,6 +2838,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0120 ",
"Counter": "0,1,2,3",
@@ -2692,6 +2851,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0120 ",
"Counter": "0,1,2,3",
@@ -2704,6 +2864,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0120 ",
"Counter": "0,1,2,3",
@@ -2716,6 +2877,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0120 ",
"Counter": "0,1,2,3",
@@ -2728,6 +2890,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010240 ",
"Counter": "0,1,2,3",
@@ -2740,6 +2903,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020240 ",
"Counter": "0,1,2,3",
@@ -2752,6 +2916,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020240 ",
"Counter": "0,1,2,3",
@@ -2764,6 +2929,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020240 ",
"Counter": "0,1,2,3",
@@ -2776,6 +2942,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020240 ",
"Counter": "0,1,2,3",
@@ -2788,6 +2955,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020240 ",
"Counter": "0,1,2,3",
@@ -2800,6 +2968,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020240 ",
"Counter": "0,1,2,3",
@@ -2812,6 +2981,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0240 ",
"Counter": "0,1,2,3",
@@ -2824,6 +2994,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0240 ",
"Counter": "0,1,2,3",
@@ -2836,6 +3007,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0240 ",
"Counter": "0,1,2,3",
@@ -2848,6 +3020,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0240 ",
"Counter": "0,1,2,3",
@@ -2860,6 +3033,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0240 ",
"Counter": "0,1,2,3",
@@ -2872,6 +3046,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0240 ",
"Counter": "0,1,2,3",
@@ -2884,6 +3059,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010091 ",
"Counter": "0,1,2,3",
@@ -2896,6 +3072,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020091 ",
"Counter": "0,1,2,3",
@@ -2908,6 +3085,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020091 ",
"Counter": "0,1,2,3",
@@ -2920,6 +3098,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020091 ",
"Counter": "0,1,2,3",
@@ -2932,6 +3111,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020091 ",
"Counter": "0,1,2,3",
@@ -2944,6 +3124,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020091 ",
"Counter": "0,1,2,3",
@@ -2956,6 +3137,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020091 ",
"Counter": "0,1,2,3",
@@ -2968,6 +3150,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0091 ",
"Counter": "0,1,2,3",
@@ -2980,6 +3163,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0091 ",
"Counter": "0,1,2,3",
@@ -2992,6 +3176,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0091 ",
"Counter": "0,1,2,3",
@@ -3004,6 +3189,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091 ",
"Counter": "0,1,2,3",
@@ -3016,6 +3202,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091 ",
"Counter": "0,1,2,3",
@@ -3028,6 +3215,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0091 ",
"Counter": "0,1,2,3",
@@ -3040,6 +3228,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010122 ",
"Counter": "0,1,2,3",
@@ -3052,6 +3241,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020122 ",
"Counter": "0,1,2,3",
@@ -3064,6 +3254,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020122 ",
"Counter": "0,1,2,3",
@@ -3076,6 +3267,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020122 ",
"Counter": "0,1,2,3",
@@ -3088,6 +3280,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020122 ",
"Counter": "0,1,2,3",
@@ -3100,6 +3293,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020122 ",
"Counter": "0,1,2,3",
@@ -3112,6 +3306,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020122 ",
"Counter": "0,1,2,3",
@@ -3124,6 +3319,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0122 ",
"Counter": "0,1,2,3",
@@ -3136,6 +3332,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0122 ",
"Counter": "0,1,2,3",
@@ -3148,6 +3345,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0122 ",
"Counter": "0,1,2,3",
@@ -3160,6 +3358,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122 ",
"Counter": "0,1,2,3",
@@ -3172,6 +3371,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122 ",
"Counter": "0,1,2,3",
@@ -3184,6 +3384,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 102bfb808199..689d478dae93 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,6 +1,6 @@
[
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -11,7 +11,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -22,7 +22,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -32,7 +31,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -42,7 +40,15 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -52,7 +58,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -62,7 +67,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -72,7 +76,43 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x15",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2a",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -82,7 +122,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -92,7 +132,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -102,7 +142,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -121,51 +161,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xc7",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2a",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x15",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index b0cdf1f097a0..7142c76d7f11 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -10,7 +10,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -20,80 +20,49 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -104,7 +73,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -116,7 +85,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -127,7 +96,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -138,7 +107,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -149,7 +128,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -160,7 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x3c",
@@ -200,7 +211,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -263,7 +274,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -271,16 +282,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index ff5416d29d0d..c9154cebbdf0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -90,7 +90,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -170,13 +169,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"EventCode": "0xc8",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -251,13 +250,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"EventCode": "0xc9",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "RTM_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -431,6 +430,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020001 ",
"Counter": "0,1,2,3",
@@ -443,6 +443,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0001 ",
"Counter": "0,1,2,3",
@@ -455,6 +456,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -467,6 +469,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -479,6 +482,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -491,6 +495,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -503,6 +508,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -515,6 +521,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000001 ",
"Counter": "0,1,2,3",
@@ -527,6 +534,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000001 ",
"Counter": "0,1,2,3",
@@ -539,6 +547,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -551,6 +560,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -563,6 +573,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -575,6 +586,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -587,6 +599,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0002 ",
"Counter": "0,1,2,3",
@@ -599,6 +612,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000002 ",
"Counter": "0,1,2,3",
@@ -611,6 +625,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000002 ",
"Counter": "0,1,2,3",
@@ -623,6 +638,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000002 ",
"Counter": "0,1,2,3",
@@ -635,6 +651,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000002 ",
"Counter": "0,1,2,3",
@@ -647,6 +664,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000002 ",
"Counter": "0,1,2,3",
@@ -659,6 +677,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020004 ",
"Counter": "0,1,2,3",
@@ -671,6 +690,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0004 ",
"Counter": "0,1,2,3",
@@ -683,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000004 ",
"Counter": "0,1,2,3",
@@ -695,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000004 ",
"Counter": "0,1,2,3",
@@ -707,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000004 ",
"Counter": "0,1,2,3",
@@ -719,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000004 ",
"Counter": "0,1,2,3",
@@ -731,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000004 ",
"Counter": "0,1,2,3",
@@ -743,6 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000004 ",
"Counter": "0,1,2,3",
@@ -755,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000004 ",
"Counter": "0,1,2,3",
@@ -767,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000004 ",
"Counter": "0,1,2,3",
@@ -779,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000004 ",
"Counter": "0,1,2,3",
@@ -791,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000004 ",
"Counter": "0,1,2,3",
@@ -803,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000004 ",
"Counter": "0,1,2,3",
@@ -815,6 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020008 ",
"Counter": "0,1,2,3",
@@ -827,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0008 ",
"Counter": "0,1,2,3",
@@ -839,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000008 ",
"Counter": "0,1,2,3",
@@ -851,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000008 ",
"Counter": "0,1,2,3",
@@ -863,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000008 ",
"Counter": "0,1,2,3",
@@ -875,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000008 ",
"Counter": "0,1,2,3",
@@ -887,6 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000008 ",
"Counter": "0,1,2,3",
@@ -899,6 +937,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000008 ",
"Counter": "0,1,2,3",
@@ -911,6 +950,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000008 ",
"Counter": "0,1,2,3",
@@ -923,6 +963,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000008 ",
"Counter": "0,1,2,3",
@@ -935,6 +976,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000008 ",
"Counter": "0,1,2,3",
@@ -947,6 +989,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000008 ",
"Counter": "0,1,2,3",
@@ -959,6 +1002,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000008 ",
"Counter": "0,1,2,3",
@@ -971,6 +1015,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020010 ",
"Counter": "0,1,2,3",
@@ -983,6 +1028,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0010 ",
"Counter": "0,1,2,3",
@@ -995,6 +1041,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000010 ",
"Counter": "0,1,2,3",
@@ -1007,6 +1054,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000010 ",
"Counter": "0,1,2,3",
@@ -1019,6 +1067,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000010 ",
"Counter": "0,1,2,3",
@@ -1031,6 +1080,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000010 ",
"Counter": "0,1,2,3",
@@ -1043,6 +1093,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000010 ",
"Counter": "0,1,2,3",
@@ -1055,6 +1106,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000010 ",
"Counter": "0,1,2,3",
@@ -1067,6 +1119,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000010 ",
"Counter": "0,1,2,3",
@@ -1079,6 +1132,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000010 ",
"Counter": "0,1,2,3",
@@ -1091,6 +1145,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000010 ",
"Counter": "0,1,2,3",
@@ -1103,6 +1158,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000010 ",
"Counter": "0,1,2,3",
@@ -1115,6 +1171,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000010 ",
"Counter": "0,1,2,3",
@@ -1127,6 +1184,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020020 ",
"Counter": "0,1,2,3",
@@ -1139,6 +1197,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0020 ",
"Counter": "0,1,2,3",
@@ -1151,6 +1210,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000020 ",
"Counter": "0,1,2,3",
@@ -1163,6 +1223,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000020 ",
"Counter": "0,1,2,3",
@@ -1175,6 +1236,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000020 ",
"Counter": "0,1,2,3",
@@ -1187,6 +1249,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000020 ",
"Counter": "0,1,2,3",
@@ -1199,6 +1262,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000020 ",
"Counter": "0,1,2,3",
@@ -1211,6 +1275,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000020 ",
"Counter": "0,1,2,3",
@@ -1223,6 +1288,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000020 ",
"Counter": "0,1,2,3",
@@ -1235,6 +1301,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000020 ",
"Counter": "0,1,2,3",
@@ -1247,6 +1314,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000020 ",
"Counter": "0,1,2,3",
@@ -1259,6 +1327,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000020 ",
"Counter": "0,1,2,3",
@@ -1271,6 +1340,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000020 ",
"Counter": "0,1,2,3",
@@ -1283,6 +1353,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020040 ",
"Counter": "0,1,2,3",
@@ -1295,6 +1366,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0040 ",
"Counter": "0,1,2,3",
@@ -1307,6 +1379,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000040 ",
"Counter": "0,1,2,3",
@@ -1319,6 +1392,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000040 ",
"Counter": "0,1,2,3",
@@ -1331,6 +1405,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000040 ",
"Counter": "0,1,2,3",
@@ -1343,6 +1418,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000040 ",
"Counter": "0,1,2,3",
@@ -1355,6 +1431,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000040 ",
"Counter": "0,1,2,3",
@@ -1367,6 +1444,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000040 ",
"Counter": "0,1,2,3",
@@ -1379,6 +1457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000040 ",
"Counter": "0,1,2,3",
@@ -1391,6 +1470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000040 ",
"Counter": "0,1,2,3",
@@ -1403,6 +1483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000040 ",
"Counter": "0,1,2,3",
@@ -1415,6 +1496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000040 ",
"Counter": "0,1,2,3",
@@ -1427,6 +1509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000040 ",
"Counter": "0,1,2,3",
@@ -1439,6 +1522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020080 ",
"Counter": "0,1,2,3",
@@ -1451,6 +1535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0080 ",
"Counter": "0,1,2,3",
@@ -1463,6 +1548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000080 ",
"Counter": "0,1,2,3",
@@ -1475,6 +1561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000080 ",
"Counter": "0,1,2,3",
@@ -1487,6 +1574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000080 ",
"Counter": "0,1,2,3",
@@ -1499,6 +1587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000080 ",
"Counter": "0,1,2,3",
@@ -1511,6 +1600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000080 ",
"Counter": "0,1,2,3",
@@ -1523,6 +1613,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000080 ",
"Counter": "0,1,2,3",
@@ -1535,6 +1626,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000080 ",
"Counter": "0,1,2,3",
@@ -1547,6 +1639,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000080 ",
"Counter": "0,1,2,3",
@@ -1559,6 +1652,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000080 ",
"Counter": "0,1,2,3",
@@ -1571,6 +1665,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000080 ",
"Counter": "0,1,2,3",
@@ -1583,6 +1678,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000080 ",
"Counter": "0,1,2,3",
@@ -1595,6 +1691,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020100 ",
"Counter": "0,1,2,3",
@@ -1607,6 +1704,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0100 ",
"Counter": "0,1,2,3",
@@ -1619,6 +1717,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000100 ",
"Counter": "0,1,2,3",
@@ -1631,6 +1730,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000100 ",
"Counter": "0,1,2,3",
@@ -1643,6 +1743,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000100 ",
"Counter": "0,1,2,3",
@@ -1655,6 +1756,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000100 ",
"Counter": "0,1,2,3",
@@ -1667,6 +1769,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000100 ",
"Counter": "0,1,2,3",
@@ -1679,6 +1782,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000100 ",
"Counter": "0,1,2,3",
@@ -1691,6 +1795,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000100 ",
"Counter": "0,1,2,3",
@@ -1703,6 +1808,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000100 ",
"Counter": "0,1,2,3",
@@ -1715,6 +1821,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000100 ",
"Counter": "0,1,2,3",
@@ -1727,6 +1834,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000100 ",
"Counter": "0,1,2,3",
@@ -1739,6 +1847,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000100 ",
"Counter": "0,1,2,3",
@@ -1751,6 +1860,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020200 ",
"Counter": "0,1,2,3",
@@ -1763,6 +1873,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0200 ",
"Counter": "0,1,2,3",
@@ -1775,6 +1886,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000200 ",
"Counter": "0,1,2,3",
@@ -1787,6 +1899,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000200 ",
"Counter": "0,1,2,3",
@@ -1799,6 +1912,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000200 ",
"Counter": "0,1,2,3",
@@ -1811,6 +1925,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000200 ",
"Counter": "0,1,2,3",
@@ -1823,6 +1938,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000200 ",
"Counter": "0,1,2,3",
@@ -1835,6 +1951,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000200 ",
"Counter": "0,1,2,3",
@@ -1847,6 +1964,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000200 ",
"Counter": "0,1,2,3",
@@ -1859,6 +1977,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000200 ",
"Counter": "0,1,2,3",
@@ -1871,6 +1990,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000200 ",
"Counter": "0,1,2,3",
@@ -1883,6 +2003,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000200 ",
"Counter": "0,1,2,3",
@@ -1895,6 +2016,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000200 ",
"Counter": "0,1,2,3",
@@ -1907,6 +2029,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000028000 ",
"Counter": "0,1,2,3",
@@ -1919,6 +2042,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c8000 ",
"Counter": "0,1,2,3",
@@ -1931,6 +2055,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084008000 ",
"Counter": "0,1,2,3",
@@ -1943,6 +2068,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104008000 ",
"Counter": "0,1,2,3",
@@ -1955,6 +2081,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204008000 ",
"Counter": "0,1,2,3",
@@ -1967,6 +2094,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404008000 ",
"Counter": "0,1,2,3",
@@ -1979,6 +2107,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004008000 ",
"Counter": "0,1,2,3",
@@ -1991,6 +2120,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004008000 ",
"Counter": "0,1,2,3",
@@ -2003,6 +2133,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84008000 ",
"Counter": "0,1,2,3",
@@ -2015,6 +2146,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc008000 ",
"Counter": "0,1,2,3",
@@ -2027,6 +2159,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c008000 ",
"Counter": "0,1,2,3",
@@ -2039,6 +2172,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c008000 ",
"Counter": "0,1,2,3",
@@ -2051,6 +2185,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c008000 ",
"Counter": "0,1,2,3",
@@ -2063,6 +2198,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020090 ",
"Counter": "0,1,2,3",
@@ -2075,6 +2211,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0090 ",
"Counter": "0,1,2,3",
@@ -2087,6 +2224,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000090 ",
"Counter": "0,1,2,3",
@@ -2099,6 +2237,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000090 ",
"Counter": "0,1,2,3",
@@ -2111,6 +2250,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000090 ",
"Counter": "0,1,2,3",
@@ -2123,6 +2263,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000090 ",
"Counter": "0,1,2,3",
@@ -2135,6 +2276,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000090 ",
"Counter": "0,1,2,3",
@@ -2147,6 +2289,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000090 ",
"Counter": "0,1,2,3",
@@ -2159,6 +2302,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000090 ",
"Counter": "0,1,2,3",
@@ -2171,6 +2315,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000090 ",
"Counter": "0,1,2,3",
@@ -2183,6 +2328,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000090 ",
"Counter": "0,1,2,3",
@@ -2195,6 +2341,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000090 ",
"Counter": "0,1,2,3",
@@ -2207,6 +2354,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000090 ",
"Counter": "0,1,2,3",
@@ -2219,6 +2367,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020120 ",
"Counter": "0,1,2,3",
@@ -2231,6 +2380,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0120 ",
"Counter": "0,1,2,3",
@@ -2243,6 +2393,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000120 ",
"Counter": "0,1,2,3",
@@ -2255,6 +2406,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000120 ",
"Counter": "0,1,2,3",
@@ -2267,6 +2419,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000120 ",
"Counter": "0,1,2,3",
@@ -2279,6 +2432,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000120 ",
"Counter": "0,1,2,3",
@@ -2291,6 +2445,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000120 ",
"Counter": "0,1,2,3",
@@ -2303,6 +2458,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000120 ",
"Counter": "0,1,2,3",
@@ -2315,6 +2471,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000120 ",
"Counter": "0,1,2,3",
@@ -2327,6 +2484,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000120 ",
"Counter": "0,1,2,3",
@@ -2339,6 +2497,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000120 ",
"Counter": "0,1,2,3",
@@ -2351,6 +2510,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000120 ",
"Counter": "0,1,2,3",
@@ -2363,6 +2523,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000120 ",
"Counter": "0,1,2,3",
@@ -2375,6 +2536,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020240 ",
"Counter": "0,1,2,3",
@@ -2387,6 +2549,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0240 ",
"Counter": "0,1,2,3",
@@ -2399,6 +2562,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000240 ",
"Counter": "0,1,2,3",
@@ -2411,6 +2575,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000240 ",
"Counter": "0,1,2,3",
@@ -2423,6 +2588,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000240 ",
"Counter": "0,1,2,3",
@@ -2435,6 +2601,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000240 ",
"Counter": "0,1,2,3",
@@ -2447,6 +2614,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000240 ",
"Counter": "0,1,2,3",
@@ -2459,6 +2627,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000240 ",
"Counter": "0,1,2,3",
@@ -2471,6 +2640,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000240 ",
"Counter": "0,1,2,3",
@@ -2483,6 +2653,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000240 ",
"Counter": "0,1,2,3",
@@ -2495,6 +2666,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000240 ",
"Counter": "0,1,2,3",
@@ -2507,6 +2679,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000240 ",
"Counter": "0,1,2,3",
@@ -2519,6 +2692,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000240 ",
"Counter": "0,1,2,3",
@@ -2531,6 +2705,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020091 ",
"Counter": "0,1,2,3",
@@ -2543,6 +2718,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0091 ",
"Counter": "0,1,2,3",
@@ -2555,6 +2731,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000091 ",
"Counter": "0,1,2,3",
@@ -2567,6 +2744,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000091 ",
"Counter": "0,1,2,3",
@@ -2579,6 +2757,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000091 ",
"Counter": "0,1,2,3",
@@ -2591,6 +2770,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000091 ",
"Counter": "0,1,2,3",
@@ -2603,6 +2783,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000091 ",
"Counter": "0,1,2,3",
@@ -2615,6 +2796,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000091 ",
"Counter": "0,1,2,3",
@@ -2627,6 +2809,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000091 ",
"Counter": "0,1,2,3",
@@ -2639,6 +2822,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000091 ",
"Counter": "0,1,2,3",
@@ -2651,6 +2835,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000091 ",
"Counter": "0,1,2,3",
@@ -2663,6 +2848,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000091 ",
"Counter": "0,1,2,3",
@@ -2675,6 +2861,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000091 ",
"Counter": "0,1,2,3",
@@ -2687,6 +2874,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020122 ",
"Counter": "0,1,2,3",
@@ -2699,6 +2887,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0122 ",
"Counter": "0,1,2,3",
@@ -2711,6 +2900,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000122 ",
"Counter": "0,1,2,3",
@@ -2723,6 +2913,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000122 ",
"Counter": "0,1,2,3",
@@ -2735,6 +2926,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000122 ",
"Counter": "0,1,2,3",
@@ -2747,6 +2939,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000122 ",
"Counter": "0,1,2,3",
@@ -2759,6 +2952,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000122 ",
"Counter": "0,1,2,3",
@@ -2771,6 +2965,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000122 ",
"Counter": "0,1,2,3",
@@ -2783,6 +2978,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000122 ",
"Counter": "0,1,2,3",
@@ -2795,6 +2991,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000122 ",
"Counter": "0,1,2,3",
@@ -2807,6 +3004,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000122 ",
"Counter": "0,1,2,3",
@@ -2819,6 +3017,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000122 ",
"Counter": "0,1,2,3",
@@ -2831,6 +3030,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index edf14f0d0eaf..4f829c5febbe 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 78913ae87703..97c5d0784c6c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -2,32 +2,42 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -59,27 +69,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"EventCode": "0x14",
"Counter": "0,1,2,3",
@@ -140,6 +161,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -405,6 +497,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"EventCode": "0x89",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "EventCode": "0xA0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -445,6 +556,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +586,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +616,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +646,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +676,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +706,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +736,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,15 +837,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
@@ -588,17 +858,37 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -610,6 +900,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -621,6 +921,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
"Counter": "2",
@@ -632,7 +963,16 @@
"CounterHTOff": "2"
},
{
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -642,6 +982,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of uops executed from any thread.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -662,36 +1074,64 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "BDM61",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "BDM61",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -717,29 +1167,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,50 +1254,73 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDW98",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -847,17 +1331,17 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -868,29 +1352,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDW98",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
@@ -902,13 +1363,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_MISP_RETIRED.RET",
- "SampleAfterValue": "100007",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "EventCode": "0xA0",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 4301e6fbc5eb..2a015e4c7e21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -44,6 +44,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -117,6 +136,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
"Counter": "0,1,2,3",
@@ -200,6 +238,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
"EventCode": "0xAE",
"Counter": "0,1,2,3",
@@ -251,61 +308,61 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "UMask": "0x12",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
+ "UMask": "0x14",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x22",
+ "UMask": "0x18",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
+ "UMask": "0x21",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
+ "UMask": "0x22",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
+ "UMask": "0x24",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -327,62 +384,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 36fe398029b9..bf243fe2a0ec 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,77 +558,112 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
+ "Errata": "BDE70",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -657,118 +805,5 @@
"PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index cfa1e5876ec3..e44f73c24ac8 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index d1d043829b95..bf0c51272068 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,84 +558,83 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -534,7 +646,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -546,7 +658,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -695,119 +807,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -816,6 +815,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -828,6 +828,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -840,6 +841,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -852,6 +854,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -864,6 +867,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -876,6 +880,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -888,6 +893,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -900,6 +906,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -912,6 +919,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,6 +932,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -936,6 +945,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
+ "MSRValue": "0x3f803c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index 1204ea8ff30d..d79a5cfea44b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -439,6 +438,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,6 +451,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -463,6 +464,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -475,6 +477,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -487,6 +490,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -499,6 +503,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -511,6 +516,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -523,6 +529,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -535,6 +542,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -547,6 +555,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -559,6 +568,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -571,6 +581,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,6 +594,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -595,6 +607,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -607,6 +620,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -619,6 +633,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -631,6 +646,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -643,6 +659,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "MSRValue": "0x3fbfc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index 4e02e1e5e70d..f8bbe087b0f8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1,6 +1,26 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
"EventCode": "0x30",
"Counter": "0,1,2,3",
@@ -11,120 +31,119 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
"EventCode": "0x31",
"Counter": "0,1,2,3",
"UMask": "0x0",
"EventName": "CORE_REJECT_L2Q.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the L2Q "
+ "BriefDescription": "Requests rejected by the L2Q"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "UMask": "0x1",
+ "EventName": "DL1.DIRTY_EVICTION",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests"
+ "BriefDescription": "L1 Cache evictions for dirty data"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that an ICache miss is outstanding, and instruction fetch is stalled. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes, while an Icache miss outstanding. Note this event is not the same as cycles to retrieve an instruction due to an Icache miss. Rather, it is the part of the Instruction Cache (ICache) miss time where no bytes are available for the decoder.",
- "EventCode": "0x86",
+ "EventCode": "0xB7",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where code-fetch is stalled and an ICache miss is outstanding. This is not the same as an ICache Miss."
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired.",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired.",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x83",
- "EventName": "MEM_UOPS_RETIRED.ALL",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts locked memory uops retired. This includes \"regular\" locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "UMask": "0x43",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of load uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of store uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x43",
- "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "UMask": "0x83",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -140,24 +159,24 @@
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
},
{
"PEBS": "2",
@@ -205,24 +224,20 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DL1.DIRTY_EVICTION",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1 Cache evictions for dirty data"
- },
- {
- "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
+ "MSRValue": "0x40000032b7 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x36000032b7 ",
"Counter": "0,1,2,3",
@@ -234,6 +249,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x10000032b7 ",
"Counter": "0,1,2,3",
@@ -245,6 +262,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x04000032b7 ",
"Counter": "0,1,2,3",
@@ -256,6 +275,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x02000032b7 ",
"Counter": "0,1,2,3",
@@ -267,6 +288,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x00000432b7 ",
"Counter": "0,1,2,3",
@@ -278,6 +301,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000132b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000022 ",
"Counter": "0,1,2,3",
@@ -289,6 +340,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000022 ",
"Counter": "0,1,2,3",
@@ -300,6 +353,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000022 ",
"Counter": "0,1,2,3",
@@ -311,6 +366,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000022 ",
"Counter": "0,1,2,3",
@@ -322,6 +379,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040022 ",
"Counter": "0,1,2,3",
@@ -333,6 +392,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003091",
"Counter": "0,1,2,3",
@@ -344,6 +431,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003091",
"Counter": "0,1,2,3",
@@ -355,6 +444,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003091",
"Counter": "0,1,2,3",
@@ -366,6 +457,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003091",
"Counter": "0,1,2,3",
@@ -377,6 +470,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043091",
"Counter": "0,1,2,3",
@@ -388,6 +483,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003010 ",
"Counter": "0,1,2,3",
@@ -399,6 +522,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003010 ",
"Counter": "0,1,2,3",
@@ -410,6 +535,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003010 ",
"Counter": "0,1,2,3",
@@ -421,6 +548,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003010 ",
"Counter": "0,1,2,3",
@@ -432,6 +561,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043010 ",
"Counter": "0,1,2,3",
@@ -443,6 +574,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000008000 ",
"Counter": "0,1,2,3",
@@ -454,6 +626,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400008000 ",
"Counter": "0,1,2,3",
@@ -465,6 +639,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200008000 ",
"Counter": "0,1,2,3",
@@ -476,6 +652,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000048000 ",
"Counter": "0,1,2,3",
@@ -487,6 +665,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -498,6 +678,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004800 ",
"Counter": "0,1,2,3",
@@ -509,6 +704,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044800 ",
"Counter": "0,1,2,3",
@@ -520,6 +756,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004000 ",
"Counter": "0,1,2,3",
@@ -531,6 +795,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000004000 ",
"Counter": "0,1,2,3",
@@ -542,6 +808,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400004000 ",
"Counter": "0,1,2,3",
@@ -553,6 +821,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200004000 ",
"Counter": "0,1,2,3",
@@ -564,6 +834,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044000 ",
"Counter": "0,1,2,3",
@@ -575,6 +847,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600002000 ",
"Counter": "0,1,2,3",
@@ -586,6 +886,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000002000 ",
"Counter": "0,1,2,3",
@@ -597,6 +899,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400002000 ",
"Counter": "0,1,2,3",
@@ -608,6 +912,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200002000 ",
"Counter": "0,1,2,3",
@@ -619,6 +925,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000042000 ",
"Counter": "0,1,2,3",
@@ -630,6 +938,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600001000 ",
"Counter": "0,1,2,3",
@@ -641,6 +977,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000001000 ",
"Counter": "0,1,2,3",
@@ -652,6 +990,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400001000 ",
"Counter": "0,1,2,3",
@@ -663,6 +1003,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200001000 ",
"Counter": "0,1,2,3",
@@ -674,6 +1016,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000041000 ",
"Counter": "0,1,2,3",
@@ -685,6 +1029,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000011000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000800 ",
"Counter": "0,1,2,3",
@@ -696,6 +1068,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000800 ",
"Counter": "0,1,2,3",
@@ -707,6 +1081,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000800 ",
"Counter": "0,1,2,3",
@@ -718,6 +1094,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000800 ",
"Counter": "0,1,2,3",
@@ -729,6 +1107,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040800 ",
"Counter": "0,1,2,3",
@@ -740,6 +1120,99 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010400 ",
"Counter": "0,1,2,3",
@@ -751,6 +1224,112 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000100 ",
"Counter": "0,1,2,3",
@@ -762,6 +1341,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000080 ",
"Counter": "0,1,2,3",
@@ -773,6 +1432,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000020 ",
"Counter": "0,1,2,3",
@@ -784,6 +1523,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000020 ",
"Counter": "0,1,2,3",
@@ -795,6 +1536,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000020 ",
"Counter": "0,1,2,3",
@@ -806,6 +1549,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000020 ",
"Counter": "0,1,2,3",
@@ -817,6 +1562,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040020 ",
"Counter": "0,1,2,3",
@@ -828,6 +1575,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000010 ",
"Counter": "0,1,2,3",
@@ -839,6 +1614,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000010 ",
"Counter": "0,1,2,3",
@@ -850,6 +1627,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000010 ",
"Counter": "0,1,2,3",
@@ -861,6 +1640,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000010 ",
"Counter": "0,1,2,3",
@@ -872,6 +1653,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040010 ",
"Counter": "0,1,2,3",
@@ -883,6 +1666,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000008 ",
"Counter": "0,1,2,3",
@@ -894,6 +1705,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000008 ",
"Counter": "0,1,2,3",
@@ -905,6 +1718,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000008 ",
"Counter": "0,1,2,3",
@@ -916,6 +1731,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000008 ",
"Counter": "0,1,2,3",
@@ -927,6 +1744,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040008 ",
"Counter": "0,1,2,3",
@@ -938,6 +1757,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000004 ",
"Counter": "0,1,2,3",
@@ -949,6 +1783,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000004 ",
"Counter": "0,1,2,3",
@@ -960,6 +1796,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000004 ",
"Counter": "0,1,2,3",
@@ -971,6 +1822,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000004 ",
"Counter": "0,1,2,3",
@@ -982,6 +1835,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040004 ",
"Counter": "0,1,2,3",
@@ -993,6 +1848,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000002 ",
"Counter": "0,1,2,3",
@@ -1004,6 +1874,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000002 ",
"Counter": "0,1,2,3",
@@ -1015,6 +1887,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000002 ",
"Counter": "0,1,2,3",
@@ -1026,6 +1900,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000002 ",
"Counter": "0,1,2,3",
@@ -1037,6 +1913,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000002 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1926,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040002 ",
"Counter": "0,1,2,3",
@@ -1059,6 +1939,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000001 ",
"Counter": "0,1,2,3",
@@ -1070,6 +1965,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000001 ",
"Counter": "0,1,2,3",
@@ -1081,6 +1978,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000001 ",
"Counter": "0,1,2,3",
@@ -1092,6 +1991,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000001 ",
"Counter": "0,1,2,3",
@@ -1103,6 +2004,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000001 ",
"Counter": "0,1,2,3",
@@ -1114,6 +2017,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040001 ",
"Counter": "0,1,2,3",
@@ -1123,5 +2028,18 @@
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index ac8b0d365a19..690cebd12a94 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -1,15 +1,5 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to memory ordering issue"
- },
- {
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
@@ -30,5 +20,275 @@
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops that split a page (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to memory ordering issue"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x20000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index df25ca9542f1..959cadd7cb0e 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -1,23 +1,23 @@
[
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "UMask": "0x0",
+ "EventName": "FETCH_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "UMask": "0x1",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle to recover"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
},
{
"CollectPEBSRecord": "1",
@@ -30,14 +30,44 @@
"BriefDescription": "Unfilled issue slots per cycle"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle to recover"
+ },
+ {
"CollectPEBSRecord": "2",
"PublicDescription": "Counts hardware interrupts received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "203",
+ "BriefDescription": "Hardware interrupts received"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Hardware interrupts received (Precise event capable)"
+ "BriefDescription": "Cycles hardware interrupts are masked"
},
{
"CollectPEBSRecord": "2",
@@ -47,6 +77,6 @@
"UMask": "0x4",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles pending interrupts are masked (Precise event capable)"
+ "BriefDescription": "Cycles pending interrupts are masked"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 07f00041f56f..254788af8ab6 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,168 +1,136 @@
[
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 0",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf9",
- "EventName": "BR_INST_RETIRED.CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near call instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near relative CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near return branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_INST_RETIRED.RETURN",
+ "UMask": "0x4",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near return instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ "BriefDescription": "Uops issued to the back end per cycle"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "EventCode": "0x9C",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_MISP_RETIRED.RETURN",
+ "UMask": "0x0",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -187,8 +155,40 @@
"BriefDescription": "MS uops retired (Precise event capable)"
},
{
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Floating point divide uops retired. (Precise Event Capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Integer divide uops retired. (Precise Event Capable)"
+ },
+ {
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel? architecture processors.",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All machine clears"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,217 +217,239 @@
"BriefDescription": "Machine clears due to memory disambiguation"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears for any reason.",
- "EventCode": "0xC3",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "MACHINE_CLEARS.ALL",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "All machine clears"
+ "BriefDescription": "Retired branch instructions (Precise event capable)"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
- "EventCode": "0xC0",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Precise event capable)"
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
},
{
+ "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
- "EventCode": "0x9C",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "UMask": "0x80",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
- "EventCode": "0x0E",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops issued to the back end per cycle"
+ "BriefDescription": "Retired far branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles if either divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a divider is busy"
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the integer divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the integer divide unit is busy"
+ "BriefDescription": "Retired near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the FP divide unit is busy"
+ "BriefDescription": "Retired near call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for any branch type"
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on return instructions.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for return branch"
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for conditional branch"
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
- "EventCode": "0x03",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked (Precise event capable)"
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.UTLB_MISS",
+ "UMask": "0x0",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a divider is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ "BriefDescription": "Cycles the integer divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ "BriefDescription": "Cycles the FP divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "EventName": "BACLEARS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for any branch type"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "LD_BLOCKS.4K_ALIAS",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for return branch"
},
{
- "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for conditional branch"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 3202c4478836..9805198d3f5f 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -1,6 +1,36 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of D-side page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of I-side pagewalks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"EventCode": "0x81",
"Counter": "0,1,2,3",
@@ -41,35 +71,5 @@
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of D-side page-walks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of I-side pagewalks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of page-walks in cycles"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index bfb5ebf48c54..da4d6ddd4f92 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -11,14 +11,34 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
"Errata": "HSD78",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -32,6 +52,48 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -73,6 +135,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Demand requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Not rejected writebacks that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
"CounterMask": "1",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
@@ -163,6 +268,28 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
@@ -185,46 +312,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -289,6 +405,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
@@ -296,7 +421,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -308,7 +433,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -321,31 +446,33 @@
"Errata": "HSD76, HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -358,19 +485,20 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -401,20 +529,20 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -427,20 +555,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "HSD29, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -477,25 +603,27 @@
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -513,14 +641,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "HSD74, HSD29, HSD25, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100003",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -665,6 +792,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "",
"EventCode": "0xf4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -674,131 +802,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests that missed L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8fff",
"Counter": "0,1,2,3",
@@ -811,6 +815,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c07f7",
"Counter": "0,1,2,3",
@@ -823,6 +828,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c07f7",
"Counter": "0,1,2,3",
@@ -835,6 +841,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0244",
"Counter": "0,1,2,3",
@@ -847,6 +854,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122",
"Counter": "0,1,2,3",
@@ -859,6 +867,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122",
"Counter": "0,1,2,3",
@@ -871,6 +880,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091",
"Counter": "0,1,2,3",
@@ -883,6 +893,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091",
"Counter": "0,1,2,3",
@@ -895,6 +906,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200",
"Counter": "0,1,2,3",
@@ -907,6 +919,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100",
"Counter": "0,1,2,3",
@@ -919,6 +932,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080",
"Counter": "0,1,2,3",
@@ -931,6 +945,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040",
"Counter": "0,1,2,3",
@@ -943,6 +958,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020",
"Counter": "0,1,2,3",
@@ -955,6 +971,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010",
"Counter": "0,1,2,3",
@@ -967,6 +984,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004",
"Counter": "0,1,2,3",
@@ -979,6 +997,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004",
"Counter": "0,1,2,3",
@@ -991,6 +1010,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002",
"Counter": "0,1,2,3",
@@ -1003,6 +1023,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002",
"Counter": "0,1,2,3",
@@ -1015,6 +1036,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001",
"Counter": "0,1,2,3",
@@ -1027,6 +1049,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index 1732fa49c6d2..f9843e5a9b42 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "EventCode": "0xC6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "AVX_INSTS.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of X87 FP assists due to output values.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
@@ -69,15 +79,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "EventCode": "0xC6",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "AVX_INSTS.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index 57a1ce46971f..c0a5bedcc15c 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -21,74 +21,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -135,6 +104,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -157,6 +136,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
@@ -270,25 +290,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index aab981b42339..e5f9fa6655b3 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -401,6 +401,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc08fff",
"Counter": "0,1,2,3",
@@ -413,6 +414,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01004007f7",
"Counter": "0,1,2,3",
@@ -425,6 +427,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc007f7",
"Counter": "0,1,2,3",
@@ -437,6 +440,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400244",
"Counter": "0,1,2,3",
@@ -449,6 +453,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
@@ -461,6 +466,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400122",
"Counter": "0,1,2,3",
@@ -473,6 +479,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00122",
"Counter": "0,1,2,3",
@@ -485,6 +492,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400091",
"Counter": "0,1,2,3",
@@ -497,6 +505,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00091",
"Counter": "0,1,2,3",
@@ -509,6 +518,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00200",
"Counter": "0,1,2,3",
@@ -521,6 +531,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00100",
"Counter": "0,1,2,3",
@@ -533,6 +544,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00080",
"Counter": "0,1,2,3",
@@ -545,6 +557,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00040",
"Counter": "0,1,2,3",
@@ -557,6 +570,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00020",
"Counter": "0,1,2,3",
@@ -569,6 +583,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00010",
"Counter": "0,1,2,3",
@@ -581,6 +596,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400004",
"Counter": "0,1,2,3",
@@ -593,6 +609,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00004",
"Counter": "0,1,2,3",
@@ -605,6 +622,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400002",
"Counter": "0,1,2,3",
@@ -617,6 +635,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00002",
"Counter": "0,1,2,3",
@@ -629,6 +648,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
@@ -641,6 +661,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 85d6a14baf9d..8a4d898d76c1 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x5C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index 0099848607ad..a4dcfce4a512 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -2,33 +2,43 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"Errata": "HSD140, HSD143",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
@@ -67,7 +77,19 @@
"UMask": "0x3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -112,35 +157,32 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -409,6 +504,15 @@
{
"EventCode": "0x89",
"Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
"UMask": "0xc1",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -445,6 +549,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +579,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +609,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +638,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +667,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +697,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +727,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles allocation is stalled due to resource related reason.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,17 +827,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
"PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -594,7 +844,7 @@
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -621,6 +871,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -642,14 +903,23 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -665,24 +935,127 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "HSD11, HSD140",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "HSD11, HSD140",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
@@ -709,7 +1092,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -720,17 +1102,6 @@
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,8 +1185,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -814,18 +1215,27 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -846,7 +1256,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
@@ -886,16 +1295,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
"EventCode": "0xC5",
@@ -903,121 +1302,11 @@
"UMask": "0x4",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -1027,51 +1316,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -1082,248 +1334,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index ce80a08d0f08..777b500a5c9f 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -39,6 +39,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -69,6 +79,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -118,6 +138,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -148,6 +178,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -206,6 +246,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
"EventCode": "0xae",
"Counter": "0,1,2,3",
@@ -256,41 +316,45 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "UMask": "0x12",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "UMask": "0x14",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "UMask": "0x18",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "UMask": "0x21",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"CounterHTOff": "0,1,2,3"
},
{
@@ -304,43 +368,43 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "UMask": "0x24",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "UMask": "0x28",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "UMask": "0x41",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "UMask": "0x42",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -355,41 +419,37 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "UMask": "0x48",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "UMask": "0x81",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x28",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "UMask": "0x82",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x48",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "UMask": "0x84",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -420,65 +480,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index f1bae0817a6f..b2fbd617306a 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -12,12 +12,32 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"Errata": "HSD78",
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -33,6 +53,48 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests that missed L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -74,6 +136,17 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"UMask": "0x2",
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
"Counter": "0,1,2,3",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x51",
@@ -164,6 +269,28 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
@@ -186,23 +313,23 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -218,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -289,9 +405,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -303,20 +428,20 @@
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -328,32 +453,34 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -365,14 +492,15 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
+ "SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
@@ -402,13 +530,13 @@
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -421,20 +549,19 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"Errata": "HSM30",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -447,7 +574,6 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -478,24 +604,26 @@
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
@@ -514,20 +642,19 @@
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -539,7 +666,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -551,7 +678,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -706,135 +833,11 @@
"BriefDescription": "Split locks in SQ",
"Counter": "0,1,2,3",
"EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "Errata": "HSD78",
- "PublicDescription": "All requests that missed L2.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "All requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "HSD78, HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -843,6 +846,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -855,6 +859,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -867,6 +872,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -879,6 +885,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -891,6 +898,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -903,6 +911,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -915,6 +924,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -927,6 +937,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -939,6 +950,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -951,6 +963,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,6 +976,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -975,6 +989,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -987,6 +1002,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,6 +1015,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1011,6 +1028,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1023,6 +1041,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1035,6 +1054,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1047,6 +1067,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1059,6 +1080,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1071,6 +1093,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index 6282aed6e090..bc08cc1f2f7e 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC6",
+ "UMask": "0x7",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
+ "EventName": "AVX_INSTS.ALL",
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
@@ -69,15 +79,5 @@
"PublicDescription": "Cycles with any input/output SSE* or FP assists.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC6",
- "UMask": "0x7",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "Counter": "0,1,2,3",
- "EventName": "AVX_INSTS.ALL",
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index 2d0c7aac1e61..a4d9f1fcf940 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -22,72 +22,41 @@
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -136,6 +105,16 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x24",
"BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
@@ -158,6 +137,38 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x3c",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
@@ -270,25 +290,5 @@
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 0886cc000d22..56b0f24b8029 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -409,6 +409,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -421,6 +422,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -433,6 +435,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -445,6 +448,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -457,6 +461,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -469,6 +474,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -481,6 +487,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -493,6 +500,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,6 +513,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -517,6 +526,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -529,6 +539,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -541,6 +552,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -553,6 +565,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -565,6 +578,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -577,6 +591,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -589,6 +604,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -601,6 +617,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -613,6 +630,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -625,6 +643,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -637,6 +656,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -649,6 +669,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,6 +682,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -673,6 +695,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -685,6 +708,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -697,6 +721,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -709,6 +734,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -721,6 +747,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -733,6 +760,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 4e1b6ce96ca3..800e65df31bc 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index c3a163d34bd7..8a18bfe9e3e4 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -3,32 +3,42 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"Errata": "HSD140, HSD143",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -63,7 +73,7 @@
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"CounterMask": "1",
@@ -72,6 +82,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x1",
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -112,34 +157,31 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "EventCode": "0x14",
+ "UMask": "0x2",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "AnyThread": "1",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
- "UMask": "0x2",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -408,6 +503,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
@@ -446,6 +550,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +580,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +610,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +639,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +668,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +698,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +728,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,17 +828,6 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
"UMask": "0x2",
"BriefDescription": "Cycles with pending memory loads.",
"Counter": "0,1,2,3",
@@ -590,7 +840,7 @@
{
"EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -622,6 +872,17 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls due to L1 data cache misses",
"Counter": "2",
@@ -642,13 +903,22 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
- "Errata": "HSD30, HSM31",
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -665,23 +935,126 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "HSD11, HSD140",
- "PublicDescription": "Number of instructions at retirement.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "Errata": "HSD144, HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "Errata": "HSD30, HSM31",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "HSD11, HSD140",
+ "PublicDescription": "Number of instructions at retirement.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -715,18 +1098,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,12 +1186,21 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
"BriefDescription": "Conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -815,13 +1216,23 @@
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
"BriefDescription": "All (macro) branch instructions retired.",
+ "PEBS": "2",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Branch instructions at retirement.",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC4",
@@ -830,7 +1241,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts the number of near return instructions retired.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -851,7 +1261,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near taken branches retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
@@ -887,18 +1296,8 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -907,171 +1306,24 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x20",
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1082,248 +1334,5 @@
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index 9c00f8ef6a07..168df552b1a8 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -40,6 +40,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -70,6 +80,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x80",
"BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -119,6 +139,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -149,6 +179,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x80",
"BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -207,6 +247,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xae",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -257,39 +317,43 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x41",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x81",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -305,41 +369,41 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x42",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x82",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x28",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "UMask": "0x41",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "UMask": "0x42",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -354,41 +418,37 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x84",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x48",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "UMask": "0x81",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x28",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "UMask": "0x82",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x48",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "UMask": "0x84",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -420,65 +480,5 @@
"PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index f1ee6d4853c5..999a01bc6467 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,61 +538,56 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -753,50 +776,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index e1c6a1d4a4d5..a74d54f56192 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -39,18 +39,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -162,6 +150,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x300400244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index f036f5398906..f243551b4d12 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -1,5 +1,35 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -146,35 +176,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index ff27a620edd8..6dad3ad6b102 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,67 +538,61 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
"CounterHTOff": "0,1,2,3"
},
{
@@ -780,40 +802,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x4003c0091",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index 437d98f3e344..3a7b86af8816 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -30,18 +30,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -153,6 +141,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index c8de548b78fa..4645e9d3f460 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -1,5 +1,15 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x82",
@@ -9,6 +19,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x84",
@@ -18,6 +38,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -164,35 +194,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index fe1a2c47cabf..93656f2fd53a 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -23,10 +23,7 @@ GenuineIntel-6-1E,v2,nehalemep,core
GenuineIntel-6-1F,v2,nehalemep,core
GenuineIntel-6-1A,v2,nehalemep,core
GenuineIntel-6-2E,v2,nehalemex,core
-GenuineIntel-6-4E,v24,skylake,core
-GenuineIntel-6-5E,v24,skylake,core
-GenuineIntel-6-8E,v24,skylake,core
-GenuineIntel-6-9E,v24,skylake,core
+GenuineIntel-6-[4589]E,v24,skylake,core
GenuineIntel-6-37,v13,silvermont,core
GenuineIntel-6-4D,v13,silvermont,core
GenuineIntel-6-4C,v13,silvermont,core
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 0bd1bc5302de..82be7d1b8b81 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -36,12 +36,13 @@
"BriefDescription": "L2 cache request misses"
},
{
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles the NIP stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses."
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 0551a9ba865d..54bfe9e4045c 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -1,442 +1,6 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L1 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L1 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand\n from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cache line split locks sent to the uncore.",
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
"UMask": "0x21",
@@ -446,122 +10,123 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe1",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "UMask": "0x38",
+ "EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "PublicDescription": "All requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xf8",
- "EventName": "L2_RQSTS.ALL_PF",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "BriefDescription": "All requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x38",
- "EventName": "L2_RQSTS.PF_MISS",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xd8",
- "EventName": "L2_RQSTS.PF_HIT",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "BriefDescription": "RFO requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that hit L2 cache.",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that miss L2 cache.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "UMask": "0xd8",
+ "EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "UMask": "0xe1",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "BriefDescription": "Demand Data Read requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "L2 cache misses when fetching instructions.",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
+ "BriefDescription": "RFO requests to L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
+ "BriefDescription": "L2 code requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -575,13 +140,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "All requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -595,62 +160,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
- "EventCode": "0xF2",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "UMask": "0x41",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "L2_LINES_IN.ALL",
+ "UMask": "0x4f",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -666,3121 +214,483 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400002 ",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400002 ",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0002 ",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0002 ",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0002 ",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0002 ",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100002 ",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100002 ",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "EventCode": "0xB2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100002 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x11",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x12",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x41",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x42",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080002 ",
+ "PEBS": "1",
+ "PublicDescription": "All retired store instructions.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x82",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040002 ",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020002 ",
+ "PEBS": "1",
+ "EventCode": "0xD4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020002 ",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020002 ",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x1f",
+ "EventName": "L2_LINES_IN.ALL",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020002 ",
+ "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002 ",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "EventCode": "0xF4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0400001 ",
"Counter": "0,1,2,3",
@@ -3793,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000400001 ",
"Counter": "0,1,2,3",
@@ -3805,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400400001 ",
"Counter": "0,1,2,3",
@@ -3817,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200400001 ",
"Counter": "0,1,2,3",
@@ -3829,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001 ",
"Counter": "0,1,2,3",
@@ -3841,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080400001 ",
"Counter": "0,1,2,3",
@@ -3853,18 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc01c0001 ",
"Counter": "0,1,2,3",
@@ -3877,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10001c0001 ",
"Counter": "0,1,2,3",
@@ -3889,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04001c0001 ",
"Counter": "0,1,2,3",
@@ -3901,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02001c0001 ",
"Counter": "0,1,2,3",
@@ -3913,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01001c0001 ",
"Counter": "0,1,2,3",
@@ -3925,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00801c0001 ",
"Counter": "0,1,2,3",
@@ -3937,270 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0020001 ",
"Counter": "0,1,2,3",
@@ -4213,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -4225,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -4237,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -4249,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -4261,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -4273,18 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 3c6b59af5d54..213dd6230cf2 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -27,13 +27,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -55,7 +54,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index e697dbd63e6e..578dff5bd823 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -1,62 +1,81 @@
[
{
- "EventCode": "0x80",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "ICACHE_16B.IFDATA_STALL",
+ "EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -66,95 +85,99 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "EventCode": "0x80",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "UMask": "0x4",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
+ "UMask": "0x1",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread\n\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions)\n \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -164,7 +187,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -175,7 +198,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -186,6 +209,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -196,6 +220,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,7 +242,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -228,6 +253,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
@@ -235,7 +261,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -248,7 +274,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -261,12 +287,13 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -274,12 +301,13 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
@@ -287,7 +315,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -300,7 +328,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -313,7 +341,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -326,34 +354,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
@@ -367,6 +374,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
@@ -374,12 +382,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
@@ -387,7 +396,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -400,7 +409,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -413,7 +422,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -426,7 +435,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -439,12 +448,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
@@ -452,7 +462,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -465,7 +475,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index d7fd5b06825b..3bd8b712c889 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -1,6 +1,74 @@
[
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -10,7 +78,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -50,7 +118,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "SKL089",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -71,7 +209,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -99,13 +237,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -128,7 +265,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -149,7 +286,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -208,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL089",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
@@ -331,1718 +457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3ffc000001 ",
"Counter": "0,1,2,3",
@@ -2055,18 +470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x103c000001 ",
"Counter": "0,1,2,3",
@@ -2079,6 +483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -2091,6 +496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -2103,6 +509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -2115,6 +522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -2127,18 +535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc4000001 ",
"Counter": "0,1,2,3",
@@ -2151,18 +548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -2175,6 +561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -2187,6 +574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -2199,6 +587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -2211,6 +600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -2221,89 +611,5 @@
"BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index cfdc323acc82..84a316d380ac 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -1,11 +1,47 @@
[
{
- "PublicDescription": "This event counts the number of hardware interruptions received by the processor.",
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "SampleAfterValue": "100003",
+ "SampleAfterValue": "203",
"BriefDescription": "Number of hardware interrupts received by the processor.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index 0f7adb809be3..bc6d2afbcd8a 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,80 +1,92 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xE6",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ANY",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "EventCode": "0x87",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -87,33 +99,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -126,361 +140,318 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "EventCode": "0x5E",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xCC",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "UMask": "0x0",
+ "EdgeDetect": "1",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
- "EventCode": "0xC0",
- "Counter": "1",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventCode": "0x5E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
+ "EventCode": "0x5E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "EventCode": "0xB1",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops executed from any thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED.X87",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "UMask": "0x10",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x14",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -490,6 +461,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -499,6 +471,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -508,6 +481,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -517,6 +491,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -535,212 +510,196 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap)\n\n - store forwarding is impossible due to u-arch limitations\n\n - preceding lock RMW operations are not forwarded\n\n - store has the no-forward bit set (uncacheable/page-split/masked stores)\n\n - all-blocking stores are used (mostly, fences and port I/O)\n\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "EventCode": "0x07",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "EventName": "UOPS_EXECUTED.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "UMask": "0x0",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
},
{
"PEBS": "2",
@@ -757,183 +716,235 @@
"CounterHTOff": "0,2,3"
},
{
- "EventCode": "0x14",
+ "EventCode": "0xC1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "UMask": "0x3f",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "CounterMask": "1",
+ "BriefDescription": "Retirement slots used.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles without actually retired uops.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC1",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "OTHER_ASSISTS.ANY",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register.\r\nFor more information, refer to ?Mixing Intel AVX and Intel SSE Code? section of the Optimization Guide.",
- "EventCode": "0x0E",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "UMask": "0x2",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
+ "UMask": "0x8",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
+ "UMask": "0x10",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
+ "UMask": "0x20",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterMask": "1",
+ "UMask": "0x40",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EdgeDetect": "1",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "CounterMask": "1",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 02f32cbf6789..2bcba7daca14 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -1,83 +1,6 @@
[
{
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -87,45 +10,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -135,7 +81,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -145,45 +91,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -193,73 +162,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
+ "EventCode": "0x4F",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TLB_FLUSH.DTLB_THREAD",
- "SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "UMask": "0x8",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -268,5 +241,44 @@
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index b5bc742b6fbc..5c9940866acd 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -265,7 +265,7 @@
{
"EventCode": "0x60",
"UMask": "0x2",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -398,22 +398,24 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load instructions that miss the STLB.",
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -421,7 +423,7 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load instructions with locked access.",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -432,24 +434,22 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -457,7 +457,7 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -468,11 +468,12 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PublicDescription": "All retired store instructions.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -485,7 +486,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -509,7 +510,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. ",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -545,7 +546,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. ",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +558,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. ",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -616,7 +617,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -639,7 +639,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -648,9 +647,9 @@
"UMask": "0x8",
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
"Data_LA": "1",
+ "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -697,7 +696,7 @@
{
"EventCode": "0xF2",
"UMask": "0x2",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped ",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
@@ -742,7 +741,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -755,7 +754,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -768,7 +767,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -781,7 +780,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -794,7 +793,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -807,7 +806,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -820,7 +819,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -833,7 +832,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -846,7 +845,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -859,7 +858,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -872,7 +871,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -885,7 +884,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -898,7 +897,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -911,7 +910,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,7 +923,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -937,7 +936,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -950,7 +949,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,7 +962,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -976,7 +975,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -989,7 +988,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1002,7 +1001,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1015,7 +1014,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1028,7 +1027,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1041,7 +1040,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1054,7 +1053,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1067,7 +1066,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1080,7 +1079,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1093,7 +1092,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1106,7 +1105,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1119,7 +1118,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1132,7 +1131,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1145,7 +1144,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1158,7 +1157,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1171,7 +1170,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1184,7 +1183,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1197,7 +1196,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1210,7 +1209,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1223,7 +1222,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1236,7 +1235,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1249,7 +1248,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1262,7 +1261,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1275,7 +1274,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1288,7 +1287,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1301,7 +1300,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1314,7 +1313,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1327,7 +1326,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1340,7 +1339,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1353,7 +1352,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that have any response type.",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x04003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
+ "MSRValue": "0x08003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x10003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3.",
+ "MSRValue": "0x3f803c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1366,7 +1443,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1379,7 +1456,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1392,7 +1469,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1405,7 +1482,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1418,7 +1495,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1431,7 +1508,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1444,7 +1521,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1457,7 +1534,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1470,7 +1547,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1483,7 +1560,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1496,7 +1573,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1509,7 +1586,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1522,7 +1599,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1535,7 +1612,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1548,7 +1625,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1561,7 +1638,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1574,7 +1651,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1587,7 +1664,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1600,7 +1677,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1613,7 +1690,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1626,7 +1703,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1639,7 +1716,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1652,7 +1729,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1665,7 +1742,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 1c09a328df36..286ed1a37ec9 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -29,10 +29,9 @@
{
"EventCode": "0xC7",
"UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 40abc0852cd6..403a4f89e9b2 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -182,7 +182,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -247,20 +247,20 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -268,7 +268,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x12",
"Counter": "0,1,2,3",
@@ -281,7 +281,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x13",
"Counter": "0,1,2,3",
@@ -294,7 +294,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -308,13 +308,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -322,7 +322,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400206",
"Counter": "0,1,2,3",
@@ -335,7 +335,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x200206",
"Counter": "0,1,2,3",
@@ -348,7 +348,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400406",
"Counter": "0,1,2,3",
@@ -367,7 +367,7 @@
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -375,13 +375,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -389,13 +389,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -403,7 +403,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x404006",
"Counter": "0,1,2,3",
@@ -416,7 +416,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x408006",
"Counter": "0,1,2,3",
@@ -429,7 +429,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x410006",
"Counter": "0,1,2,3",
@@ -442,7 +442,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x420006",
"Counter": "0,1,2,3",
@@ -455,13 +455,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -469,7 +469,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x300206",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index ca22a22c1abd..e7f1aa31226d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -214,7 +214,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -239,10 +239,9 @@
{
"EventCode": "0xC8",
"UMask": "0x20",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -292,7 +291,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -466,7 +465,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3. ",
+ "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -479,7 +478,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -492,7 +491,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,7 +504,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -518,7 +517,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -531,7 +530,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -544,7 +543,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +556,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -570,7 +569,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,7 +582,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -596,7 +595,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -609,7 +608,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -622,7 +621,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -635,7 +634,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -648,7 +647,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,7 +660,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -674,7 +673,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -687,7 +686,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -700,7 +699,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -713,7 +712,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -726,7 +725,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -739,7 +738,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -752,7 +751,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -765,7 +764,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -778,7 +777,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -791,7 +790,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -804,7 +803,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -817,7 +816,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -830,7 +829,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -843,7 +842,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -856,7 +855,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -869,7 +868,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -882,7 +881,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -895,7 +894,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -908,7 +907,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -921,7 +920,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -934,7 +933,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -947,7 +946,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -960,7 +959,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -973,7 +972,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -986,7 +985,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,7 +998,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1012,7 +1011,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1025,7 +1024,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1038,7 +1037,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1051,7 +1050,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1064,7 +1063,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1077,7 +1076,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss in the L3.",
+ "MSRValue": "0x3fbc008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
+ "MSRValue": "0x083fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
+ "MSRValue": "0x103fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
+ "MSRValue": "0x063fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
+ "MSRValue": "0x063b808000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
+ "MSRValue": "0x0604008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1090,7 +1167,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1103,7 +1180,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1116,7 +1193,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1129,7 +1206,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1142,7 +1219,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1155,7 +1232,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1168,7 +1245,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1181,7 +1258,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1194,7 +1271,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1207,7 +1284,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1220,7 +1297,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1233,7 +1310,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1246,7 +1323,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1259,7 +1336,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1272,7 +1349,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1285,7 +1362,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1298,7 +1375,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1311,7 +1388,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1324,7 +1401,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1337,7 +1414,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1350,7 +1427,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1363,7 +1440,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1376,7 +1453,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1389,8 +1466,8 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 70243b0b0586..778a541463eb 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -40,6 +40,42 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x32",
+ "UMask": "0x1",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x2",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x4",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x8",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCB",
"UMask": "0x1",
"BriefDescription": "Number of hardware interrupts received by the processor.",
@@ -50,6 +86,62 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xEF",
+ "UMask": "0x1",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x2",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x4",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x8",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x10",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x20",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x40",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xFE",
"UMask": "0x2",
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
@@ -69,4 +161,4 @@
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 0895d1e52a4a..f99f7ae27820 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -3,41 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"AnyThread": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -126,7 +126,7 @@
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -762,11 +762,10 @@
"EdgeDetect": "1",
"EventCode": "0xC3",
"UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3",
"EventName": "MACHINE_CLEARS.COUNT",
"CounterMask": "1",
- "PublicDescription": "Number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -799,7 +798,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -811,14 +810,14 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. ",
+ "BriefDescription": "All (macro) branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -835,7 +834,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -858,19 +857,19 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -891,7 +890,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -902,14 +901,14 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -920,10 +919,11 @@
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 70750dab7ead..7f466c97e485 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -12,30 +12,30 @@
{
"EventCode": "0x08",
"UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -52,17 +52,17 @@
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -93,30 +93,30 @@
{
"EventCode": "0x49",
"UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,17 +133,17 @@
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -197,7 +197,7 @@
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts completed page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -224,10 +224,10 @@
{
"EventCode": "0x85",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 9eb7047bafe4..b578aa26e375 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -116,6 +116,43 @@ static void fixdesc(char *s)
*e = 0;
}
+/* Add escapes for '\' so they are proper C strings. */
+static char *fixregex(char *s)
+{
+ int len = 0;
+ int esc_count = 0;
+ char *fixed = NULL;
+ char *p, *q;
+
+ /* Count the number of '\' in string */
+ for (p = s; *p; p++) {
+ ++len;
+ if (*p == '\\')
+ ++esc_count;
+ }
+
+ if (esc_count == 0)
+ return s;
+
+ /* allocate space for a new string */
+ fixed = (char *) malloc(len + 1);
+ if (!fixed)
+ return NULL;
+
+ /* copy over the characters */
+ q = fixed;
+ for (p = s; *p; p++) {
+ if (*p == '\\') {
+ *q = '\\';
+ ++q;
+ }
+ *q = *p;
+ ++q;
+ }
+ *q = '\0';
+ return fixed;
+}
+
static struct msrmap {
const char *num;
const char *pname;
@@ -648,7 +685,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
}
line[strlen(line)-1] = '\0';
- cpuid = strtok_r(p, ",", &save);
+ cpuid = fixregex(strtok_r(p, ",", &save));
version = strtok_r(NULL, ",", &save);
fname = strtok_r(NULL, ",", &save);
type = strtok_r(NULL, ",", &save);
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-record b/tools/perf/scripts/python/bin/mem-phys-addr-record
new file mode 100644
index 000000000000..5a875122a904
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-record
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+#
+# Profiling physical memory by all retired load instructions/uops event
+# MEM_INST_RETIRED.ALL_LOADS or MEM_UOPS_RETIRED.ALL_LOADS
+#
+
+load=`perf list | grep mem_inst_retired.all_loads`
+if [ -z "$load" ]; then
+ load=`perf list | grep mem_uops_retired.all_loads`
+fi
+if [ -z "$load" ]; then
+ echo "There is no event to count all retired load instructions/uops."
+ exit 1
+fi
+
+arg=$(echo $load | tr -d ' ')
+arg="$arg:P"
+perf record --phys-data -e $arg $@
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-report b/tools/perf/scripts/python/bin/mem-phys-addr-report
new file mode 100644
index 000000000000..3f2b847e2eab
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: resolve physical address samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/mem-phys-addr.py
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
new file mode 100644
index 000000000000..ebee2c5ae496
--- /dev/null
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -0,0 +1,95 @@
+# mem-phys-addr.py: Resolve physical address samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Intel Corporation.
+
+from __future__ import division
+import os
+import sys
+import struct
+import re
+import bisect
+import collections
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+#physical address ranges for System RAM
+system_ram = []
+#physical address ranges for Persistent Memory
+pmem = []
+#file object for proc iomem
+f = None
+#Count for each type of memory
+load_mem_type_cnt = collections.Counter()
+#perf event name
+event_name = None
+
+def parse_iomem():
+ global f
+ f = open('/proc/iomem', 'r')
+ for i, j in enumerate(f):
+ m = re.split('-|:',j,2)
+ if m[2].strip() == 'System RAM':
+ system_ram.append(long(m[0], 16))
+ system_ram.append(long(m[1], 16))
+ if m[2].strip() == 'Persistent Memory':
+ pmem.append(long(m[0], 16))
+ pmem.append(long(m[1], 16))
+
+def print_memory_type():
+ print "Event: %s" % (event_name)
+ print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
+ print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ "-----------", "-----------"),
+ total = sum(load_mem_type_cnt.values())
+ for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
+ key = lambda(k, v): (v, k), reverse = True):
+ print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+
+def trace_begin():
+ parse_iomem()
+
+def trace_end():
+ print_memory_type()
+ f.close()
+
+def is_system_ram(phys_addr):
+ #/proc/iomem is sorted
+ position = bisect.bisect(system_ram, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def is_persistent_mem(phys_addr):
+ position = bisect.bisect(pmem, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def find_memory_type(phys_addr):
+ if phys_addr == 0:
+ return "N/A"
+ if is_system_ram(phys_addr):
+ return "System RAM"
+
+ if is_persistent_mem(phys_addr):
+ return "Persistent Memory"
+
+ #slow path, search all
+ f.seek(0, 0)
+ for j in f:
+ m = re.split('-|:',j,2)
+ if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ return m[2]
+ return "N/A"
+
+def process_event(param_dict):
+ name = param_dict["ev_name"]
+ sample = param_dict["sample"]
+ phys_addr = sample["phys_addr"]
+
+ global event_name
+ if event_name == None:
+ event_name = name
+ load_mem_type_cnt[find_memory_type(phys_addr)] += 1
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 0e1367f90af5..97f64ad7fa08 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -124,6 +124,12 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
WRITE_ASS(exclude_guest, "d");
WRITE_ASS(exclude_callchain_kernel, "d");
WRITE_ASS(exclude_callchain_user, "d");
+ WRITE_ASS(mmap2, "d");
+ WRITE_ASS(comm_exec, "d");
+ WRITE_ASS(context_switch, "d");
+ WRITE_ASS(write_backward, "d");
+ WRITE_ASS(namespaces, "d");
+ WRITE_ASS(use_clockid, "d");
WRITE_ASS(wakeup_events, PRIu32);
WRITE_ASS(bp_type, PRIu32);
WRITE_ASS(config1, "llu");
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 71b9a0b613d2..4035d43523c3 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -33,8 +33,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
- perf_mmap__read_catchup(&evlist->backward_mmap[i]);
- while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
+ perf_mmap__read_catchup(&evlist->overwrite_mmap[i]);
+ while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) {
const u32 type = event->header.type;
switch (type) {
@@ -59,7 +59,7 @@ static int do_test(struct perf_evlist *evlist, int mmap_pages,
int err;
char sbuf[STRERR_BUFSIZE];
- err = perf_evlist__mmap(evlist, mmap_pages, true);
+ err = perf_evlist__mmap(evlist, mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 335b695f4970..a467615c5a0e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -296,7 +296,7 @@ bool test__bp_signal_is_supported(void)
* instruction breakpoint using the perf event interface.
* Once it's there we can release this.
*/
-#ifdef __powerpc__
+#if defined(__powerpc__) || defined(__s390x__)
return false;
#else
return true;
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index 268e5f8e4aa2..e4123c1b0e88 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -31,8 +31,8 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1,
};
-SEC("func=SyS_epoll_wait")
-int bpf_func__SyS_epoll_wait(void *ctx)
+SEC("func=SyS_epoll_pwait")
+int bpf_func__SyS_epoll_pwait(void *ctx)
{
int ind =0;
int *flag = bpf_map_lookup_elem(&flip_table, &ind);
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 34c22cdf4d5d..e8399beca62b 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -3,6 +3,7 @@
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <util/util.h>
#include <util/bpf-loader.h>
#include <util/evlist.h>
@@ -19,13 +20,13 @@
#ifdef HAVE_LIBBPF_SUPPORT
-static int epoll_wait_loop(void)
+static int epoll_pwait_loop(void)
{
int i;
/* Should fail NR_ITERS times */
for (i = 0; i < NR_ITERS; i++)
- epoll_wait(-(i + 1), NULL, 0, 0);
+ epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
return 0;
}
@@ -63,46 +64,41 @@ static struct {
bool pin;
} bpf_testcase_table[] = {
{
- LLVM_TESTCASE_BASE,
- "Basic BPF filtering",
- "[basic_bpf_test]",
- "fix 'perf test LLVM' first",
- "load bpf object failed",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- false,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "Basic BPF filtering",
+ .name = "[basic_bpf_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "load bpf object failed",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
},
{
- LLVM_TESTCASE_BASE,
- "BPF pinning",
- "[bpf_pinning]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- true,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "BPF pinning",
+ .name = "[bpf_pinning]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
+ .pin = true,
},
#ifdef HAVE_BPF_PROLOGUE
{
- LLVM_TESTCASE_BPF_PROLOGUE,
- "BPF prologue generation",
- "[bpf_prologue_test]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &llseek_loop,
- (NR_ITERS + 1) / 4,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
+ .desc = "BPF prologue generation",
+ .name = "[bpf_prologue_test]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &llseek_loop,
+ .expect_result = (NR_ITERS + 1) / 4,
},
#endif
{
- LLVM_TESTCASE_BPF_RELOCATION,
- "BPF relocation checker",
- "[bpf_relocation_test]",
- "fix 'perf test LLVM' first",
- "libbpf error when dealing with relocation",
- NULL,
- 0,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_RELOCATION,
+ .desc = "BPF relocation checker",
+ .name = "[bpf_relocation_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "libbpf error when dealing with relocation",
},
};
@@ -167,7 +163,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
@@ -190,7 +186,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
if (count != expect) {
- pr_debug("BPF filter result incorrect\n");
+ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 766573e236e4..fafa014240cd 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -411,9 +411,9 @@ static const char *shell_test__description(char *description, size_t size,
return description ? trim(description + 1) : NULL;
}
-#define for_each_shell_test(dir, ent) \
+#define for_each_shell_test(dir, base, ent) \
while ((ent = readdir(dir)) != NULL) \
- if (ent->d_type == DT_REG && ent->d_name[0] != '.')
+ if (!is_directory(base, ent))
static const char *shell_tests__dir(char *path, size_t size)
{
@@ -452,7 +452,7 @@ static int shell_tests__max_desc_width(void)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
char bf[256];
const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
@@ -504,7 +504,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, st.dir, ent) {
int curr = i++;
char desc[256];
struct test test = {
@@ -614,7 +614,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
int curr = i++;
char bf[256];
struct test t = {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index fcc8984bc329..3bf7b145b826 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -639,7 +639,7 @@ static int do_test_code_reading(bool try_kcore)
break;
}
- ret = perf_evlist__mmap(evlist, UINT_MAX, false);
+ ret = perf_evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
goto out_put;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ac40e05bcab4..260418969120 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -173,6 +173,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
}
callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
if (init_live_machine(machine)) {
pr_err("Could not init machine\n");
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 842d33637a18..c46530918938 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -95,7 +95,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
goto out_err;
}
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
/*
* First, test that a 'comm' event can be found when the event is
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 5a8bf318f8a7..c0e971da965c 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -94,7 +94,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[i] = 1 + rand() % 127;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index d9619d265314..43519267b93b 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include "perf.h"
#include "evlist.h"
#include "evsel.h"
@@ -64,7 +67,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index f0679613bd18..18b06444f230 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -13,7 +13,6 @@
#include <unistd.h>
#include <linux/kernel.h>
#include <linux/hw_breakpoint.h>
-#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index c34904d37705..0afafab85238 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -141,7 +141,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 3ec6302b6498..0e2d00d69e6e 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -248,7 +248,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
event->header.size = sz;
err = perf_event__synthesize_sample(event, sample_type, read_format,
- &sample, false);
+ &sample);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"perf_event__synthesize_sample", sample_type, err);
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2a9ef080efd0..55ad9793d544 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,10 +17,9 @@ skip_if_no_perf_probe || exit 2
file=$(mktemp /tmp/temporary_file.XXXXX)
trace_open_vfs_getname() {
- test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
-
- perf trace -e ${svc:-open} touch $file 2>&1 | \
- egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+ evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ perf trace -e $evts touch $file 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 725a196991a8..f6c72f915d48 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, 128, true);
+ err = perf_evlist__mmap(evlist, 128);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 7d3f4bf9534f..33e00295a972 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -449,7 +449,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err) {
pr_debug("perf_evlist__mmap failed!\n");
goto out_err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 89c8e1604ca7..01b62b81751b 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -101,7 +101,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index dbcb6a19b375..4de1939b58ba 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -105,7 +105,7 @@ int test__thread_map_remove(struct test *test __maybe_unused, int subtest __mayb
TEST_ASSERT_VAL("failed to allocate map string",
asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
- threads = thread_map__new_str(str, NULL, 0);
+ threads = thread_map__new_str(str, NULL, 0, false);
TEST_ASSERT_VAL("failed to allocate thread_map",
threads);
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 066bbf0f4a74..66330d4b739b 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,5 +1,6 @@
libperf-y += clone.o
libperf-y += fcntl.o
+libperf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
libperf-y += ioctl.o
endif
diff --git a/tools/perf/trace/beauty/arch_errno_names.c b/tools/perf/trace/beauty/arch_errno_names.c
new file mode 100644
index 000000000000..ede031c3a9e0
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.c
@@ -0,0 +1 @@
+#include "trace/beauty/generated/arch_errno_name_array.c"
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
new file mode 100755
index 000000000000..22c9fc900c84
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate C file mapping errno codes to errno names.
+#
+# Copyright IBM Corp. 2018
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+
+gcc="$1"
+toolsdir="$2"
+include_path="-I$toolsdir/include/uapi"
+
+arch_string()
+{
+ echo "$1" |sed -e 'y/- /__/' |tr '[[:upper:]]' '[[:lower:]]'
+}
+
+asm_errno_file()
+{
+ local arch="$1"
+ local header
+
+ header="$toolsdir/arch/$arch/include/uapi/asm/errno.h"
+ if test -r "$header"; then
+ echo "$header"
+ else
+ echo "$toolsdir/include/uapi/asm-generic/errno.h"
+ fi
+}
+
+create_errno_lookup_func()
+{
+ local arch=$(arch_string "$1")
+ local nr name
+
+ cat <<EoFuncBegin
+static const char *errno_to_name__$arch(int err)
+{
+ switch (err) {
+EoFuncBegin
+
+ while read name nr; do
+ printf '\tcase %d: return "%s";\n' $nr $name
+ done
+
+ cat <<EoFuncEnd
+ default:
+ return "(unknown)";
+ }
+}
+
+EoFuncEnd
+}
+
+process_arch()
+{
+ local arch="$1"
+ local asm_errno=$(asm_errno_file "$arch")
+
+ $gcc $include_path -E -dM -x c $asm_errno \
+ |grep -hE '^#define[[:blank:]]+(E[^[:blank:]]+)[[:blank:]]+([[:digit:]]+).*' \
+ |awk '{ print $2","$3; }' \
+ |sort -t, -k2 -nu \
+ |IFS=, create_errno_lookup_func "$arch"
+}
+
+create_arch_errno_table_func()
+{
+ local archlist="$1"
+ local default="$2"
+ local arch
+
+ printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
+ printf '{\n'
+ for arch in $archlist; do
+ printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch")
+ printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch")
+ done
+ printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default")
+ printf '}\n'
+}
+
+cat <<EoHEADER
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+
+EoHEADER
+
+# Create list of architectures and ignore those that do not appear
+# in tools/perf/arch
+archlist=""
+for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
+ test -d arch/$arch && archlist="$archlist $arch"
+done
+
+for arch in x86 $archlist generic; do
+ process_arch "$arch"
+done
+create_arch_errno_table_func "x86 $archlist" "generic"
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index a6dfd04beaee..984a504d335c 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -79,6 +79,9 @@ size_t syscall_arg__scnprintf_fcntl_cmd(char *bf, size_t size, struct syscall_ar
size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_FCNTL_ARG syscall_arg__scnprintf_fcntl_arg
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_FLOCK syscall_arg__scnprintf_flock
+
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd
@@ -114,4 +117,6 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size);
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
+const char *arch_syscalls__strerrno(const char *arch, int err);
+
#endif /* _PERF_TRACE_BEAUTY_H */
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
index f9707f57566c..c4ff6ad30b06 100644
--- a/tools/perf/trace/beauty/flock.c
+++ b/tools/perf/trace/beauty/flock.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <fcntl.h>
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <uapi/linux/fcntl.h>
#ifndef LOCK_MAND
#define LOCK_MAND 32
@@ -17,8 +20,7 @@
#define LOCK_RW 192
#endif
-static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
- struct syscall_arg *arg)
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg)
{
int printed = 0, op = arg->val;
@@ -45,5 +47,3 @@ static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
return printed;
}
-
-#define SCA_FLOCK syscall_arg__scnprintf_flock
diff --git a/tools/perf/trace/beauty/futex_val3.c b/tools/perf/trace/beauty/futex_val3.c
new file mode 100644
index 000000000000..26f6b3253511
--- /dev/null
+++ b/tools/perf/trace/beauty/futex_val3.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/futex.h>
+
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+#endif
+
+static size_t syscall_arg__scnprintf_futex_val3(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned int bitset = arg->val;
+
+ if (bitset == FUTEX_BITSET_MATCH_ANY)
+ return scnprintf(bf, size, "MATCH_ANY");
+
+ return scnprintf(bf, size, "%#xd", bitset);
+}
+
+#define SCA_FUTEX_VAL3 syscall_arg__scnprintf_futex_val3
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 8f7f59d1a2b5..286427975112 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -25,16 +25,10 @@ struct disasm_line_samples {
#define IPC_WIDTH 6
#define CYCLES_WIDTH 6
-struct browser_disasm_line {
- struct rb_node rb_node;
- u32 idx;
- int idx_asm;
- int jump_sources;
- /*
- * actual length of this array is saved on the nr_events field
- * of the struct annotate_browser
- */
- struct disasm_line_samples samples[1];
+struct browser_line {
+ u32 idx;
+ int idx_asm;
+ int jump_sources;
};
static struct annotate_browser_opt {
@@ -53,39 +47,43 @@ static struct annotate_browser_opt {
struct arch;
struct annotate_browser {
- struct ui_browser b;
- struct rb_root entries;
- struct rb_node *curr_hot;
- struct disasm_line *selection;
- struct disasm_line **offsets;
- struct arch *arch;
- int nr_events;
- u64 start;
- int nr_asm_entries;
- int nr_entries;
- int max_jump_sources;
- int nr_jumps;
- bool searching_backwards;
- bool have_cycles;
- u8 addr_width;
- u8 jumps_width;
- u8 target_width;
- u8 min_addr_width;
- u8 max_addr_width;
- char search_bf[128];
+ struct ui_browser b;
+ struct rb_root entries;
+ struct rb_node *curr_hot;
+ struct annotation_line *selection;
+ struct annotation_line **offsets;
+ struct arch *arch;
+ int nr_events;
+ u64 start;
+ int nr_asm_entries;
+ int nr_entries;
+ int max_jump_sources;
+ int nr_jumps;
+ bool searching_backwards;
+ bool have_cycles;
+ u8 addr_width;
+ u8 jumps_width;
+ u8 target_width;
+ u8 min_addr_width;
+ u8 max_addr_width;
+ char search_bf[128];
};
-static inline struct browser_disasm_line *disasm_line__browser(struct disasm_line *dl)
+static inline struct browser_line *browser_line(struct annotation_line *al)
{
- return (struct browser_disasm_line *)(dl + 1);
+ void *ptr = al;
+
+ ptr = container_of(al, struct disasm_line, al);
+ return ptr - sizeof(struct browser_line);
}
static bool disasm_line__filter(struct ui_browser *browser __maybe_unused,
void *entry)
{
if (annotate_browser__opts.hide_src_code) {
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- return dl->offset == -1;
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+
+ return al->offset == -1;
}
return false;
@@ -120,11 +118,37 @@ static int annotate_browser__cycles_width(struct annotate_browser *ab)
return ab->have_cycles ? IPC_WIDTH + CYCLES_WIDTH : 0;
}
+static void disasm_line__write(struct disasm_line *dl, struct ui_browser *browser,
+ char *bf, size_t size)
+{
+ if (dl->ins.ops && dl->ins.ops->scnprintf) {
+ if (ins__is_jump(&dl->ins)) {
+ bool fwd = dl->ops.target.offset > dl->al.offset;
+
+ ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
+ SLSMG_UARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_call(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_ret(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+
+ disasm_line__scnprintf(dl, bf, size, !annotate_browser__opts.use_offset);
+}
+
static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- struct browser_disasm_line *bdl = disasm_line__browser(dl);
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+ struct browser_line *bl = browser_line(al);
bool current_entry = ui_browser__is_current_entry(browser, row);
bool change_color = (!annotate_browser__opts.hide_src_code &&
(!current_entry || (browser->use_navkeypressed &&
@@ -137,32 +161,32 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
bool show_title = false;
for (i = 0; i < ab->nr_events; i++) {
- if (bdl->samples[i].percent > percent_max)
- percent_max = bdl->samples[i].percent;
+ if (al->samples[i].percent > percent_max)
+ percent_max = al->samples[i].percent;
}
- if ((row == 0) && (dl->offset == -1 || percent_max == 0.0)) {
+ if ((row == 0) && (al->offset == -1 || percent_max == 0.0)) {
if (ab->have_cycles) {
- if (dl->ipc == 0.0 && dl->cycles == 0)
+ if (al->ipc == 0.0 && al->cycles == 0)
show_title = true;
} else
show_title = true;
}
- if (dl->offset != -1 && percent_max != 0.0) {
+ if (al->offset != -1 && percent_max != 0.0) {
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser,
- bdl->samples[i].percent,
+ al->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%11" PRIu64 " ",
- bdl->samples[i].he.period);
+ al->samples[i].he.period);
} else if (annotate_browser__opts.show_nr_samples) {
ui_browser__printf(browser, "%6" PRIu64 " ",
- bdl->samples[i].he.nr_samples);
+ al->samples[i].he.nr_samples);
} else {
ui_browser__printf(browser, "%6.2f ",
- bdl->samples[i].percent);
+ al->samples[i].percent);
}
}
} else {
@@ -177,16 +201,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
}
}
if (ab->have_cycles) {
- if (dl->ipc)
- ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
+ if (al->ipc)
+ ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, al->ipc);
else if (!show_title)
ui_browser__write_nstring(browser, " ", IPC_WIDTH);
else
ui_browser__printf(browser, "%*s ", IPC_WIDTH - 1, "IPC");
- if (dl->cycles)
+ if (al->cycles)
ui_browser__printf(browser, "%*" PRIu64 " ",
- CYCLES_WIDTH - 1, dl->cycles);
+ CYCLES_WIDTH - 1, al->cycles);
else if (!show_title)
ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
else
@@ -199,19 +223,19 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!browser->navkeypressed)
width += 1;
- if (!*dl->line)
+ if (!*al->line)
ui_browser__write_nstring(browser, " ", width - pcnt_width - cycles_width);
- else if (dl->offset == -1) {
- if (dl->line_nr && annotate_browser__opts.show_linenr)
+ else if (al->offset == -1) {
+ if (al->line_nr && annotate_browser__opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ",
- ab->addr_width + 1, dl->line_nr);
+ ab->addr_width + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
ui_browser__write_nstring(browser, bf, printed);
- ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width - cycles_width + 1);
+ ui_browser__write_nstring(browser, al->line, width - printed - pcnt_width - cycles_width + 1);
} else {
- u64 addr = dl->offset;
+ u64 addr = al->offset;
int color = -1;
if (!annotate_browser__opts.use_offset)
@@ -220,13 +244,13 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!annotate_browser__opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
- if (bdl->jump_sources) {
+ if (bl->jump_sources) {
if (annotate_browser__opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
- bdl->jump_sources);
- prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
+ bl->jump_sources);
+ prev = annotate_browser__set_jumps_percent_color(ab, bl->jump_sources,
current_entry);
ui_browser__write_nstring(browser, bf, printed);
ui_browser__set_color(browser, prev);
@@ -245,32 +269,14 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ui_browser__write_nstring(browser, bf, printed);
if (change_color)
ui_browser__set_color(browser, color);
- if (dl->ins.ops && dl->ins.ops->scnprintf) {
- if (ins__is_jump(&dl->ins)) {
- bool fwd = dl->ops.target.offset > dl->offset;
-
- ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
- SLSMG_UARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_call(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_ret(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
- SLsmg_write_char(' ');
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
+ disasm_line__write(disasm_line(al), browser, bf, sizeof(bf));
+
ui_browser__write_nstring(browser, bf, width - pcnt_width - cycles_width - 3 - printed);
}
if (current_entry)
- ab->selection = dl;
+ ab->selection = al;
}
static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
@@ -286,7 +292,7 @@ static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sy
static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
- struct disasm_line *pos = list_prev_entry(cursor, node);
+ struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
if (!pos)
@@ -306,8 +312,9 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *cursor = ab->selection, *target;
- struct browser_disasm_line *btarget, *bcursor;
+ struct disasm_line *cursor = disasm_line(ab->selection);
+ struct annotation_line *target;
+ struct browser_line *btarget, *bcursor;
unsigned int from, to;
struct map_symbol *ms = ab->b.priv;
struct symbol *sym = ms->sym;
@@ -321,11 +328,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
return;
target = ab->offsets[cursor->ops.target.offset];
- if (!target)
- return;
- bcursor = disasm_line__browser(cursor);
- btarget = disasm_line__browser(target);
+ bcursor = browser_line(&cursor->al);
+ btarget = browser_line(target);
if (annotate_browser__opts.hide_src_code) {
from = bcursor->idx_asm;
@@ -361,12 +366,11 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
return ret;
}
-static int disasm__cmp(struct browser_disasm_line *a,
- struct browser_disasm_line *b, int nr_pcnt)
+static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent == b->samples[i].percent)
continue;
return a->samples[i].percent < b->samples[i].percent;
@@ -374,28 +378,27 @@ static int disasm__cmp(struct browser_disasm_line *a,
return 0;
}
-static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl,
- int nr_events)
+static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
- struct browser_disasm_line *l;
+ struct annotation_line *l;
while (*p != NULL) {
parent = *p;
- l = rb_entry(parent, struct browser_disasm_line, rb_node);
+ l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(bdl, l, nr_events))
+ if (disasm__cmp(al, l))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&bdl->rb_node, parent, p);
- rb_insert_color(&bdl->rb_node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void annotate_browser__set_top(struct annotate_browser *browser,
- struct disasm_line *pos, u32 idx)
+ struct annotation_line *pos, u32 idx)
{
unsigned back;
@@ -404,7 +407,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
browser->b.top_idx = browser->b.index = idx;
while (browser->b.top_idx != 0 && back != 0) {
- pos = list_entry(pos->node.prev, struct disasm_line, node);
+ pos = list_entry(pos->node.prev, struct annotation_line, node);
if (disasm_line__filter(&browser->b, &pos->node))
continue;
@@ -420,12 +423,13 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
- struct browser_disasm_line *bpos;
- struct disasm_line *pos;
+ struct browser_line *bpos;
+ struct annotation_line *pos;
u32 idx;
- bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
- pos = ((struct disasm_line *)bpos) - 1;
+ pos = rb_entry(nd, struct annotation_line, rb_node);
+ bpos = browser_line(pos);
+
idx = bpos->idx;
if (annotate_browser__opts.hide_src_code)
idx = bpos->idx_asm;
@@ -439,46 +443,35 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos, *next;
- s64 len = symbol__size(sym);
+ struct disasm_line *pos;
browser->entries = RB_ROOT;
pthread_mutex_lock(&notes->lock);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos = disasm_line__browser(pos);
- const char *path = NULL;
+ symbol__calc_percent(sym, evsel);
+
+ list_for_each_entry(pos, &notes->src->source, al.node) {
double max_percent = 0.0;
int i;
- if (pos->offset == -1) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (pos->al.offset == -1) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- next = disasm__get_next_ip_line(&notes->src->source, pos);
-
- for (i = 0; i < browser->nr_events; i++) {
- struct sym_hist_entry sample;
-
- bpos->samples[i].percent = disasm__calc_percent(notes,
- evsel->idx + i,
- pos->offset,
- next ? next->offset : len,
- &path, &sample);
- bpos->samples[i].he = sample;
+ for (i = 0; i < pos->al.samples_nr; i++) {
+ struct annotation_data *sample = &pos->al.samples[i];
- if (max_percent < bpos->samples[i].percent)
- max_percent = bpos->samples[i].percent;
+ if (max_percent < sample->percent)
+ max_percent = sample->percent;
}
- if (max_percent < 0.01 && pos->ipc == 0) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (max_percent < 0.01 && pos->al.ipc == 0) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- disasm_rb_tree__insert(&browser->entries, bpos,
- browser->nr_events);
+ disasm_rb_tree__insert(&browser->entries, &pos->al);
}
pthread_mutex_unlock(&notes->lock);
@@ -487,38 +480,38 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
- struct disasm_line *dl;
- struct browser_disasm_line *bdl;
+ struct annotation_line *al;
+ struct browser_line *bl;
off_t offset = browser->b.index - browser->b.top_idx;
browser->b.seek(&browser->b, offset, SEEK_CUR);
- dl = list_entry(browser->b.top, struct disasm_line, node);
- bdl = disasm_line__browser(dl);
+ al = list_entry(browser->b.top, struct annotation_line, node);
+ bl = browser_line(al);
if (annotate_browser__opts.hide_src_code) {
- if (bdl->idx_asm < offset)
- offset = bdl->idx;
+ if (bl->idx_asm < offset)
+ offset = bl->idx;
browser->b.nr_entries = browser->nr_entries;
annotate_browser__opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx - offset;
- browser->b.index = bdl->idx;
+ browser->b.top_idx = bl->idx - offset;
+ browser->b.index = bl->idx;
} else {
- if (bdl->idx_asm < 0) {
+ if (bl->idx_asm < 0) {
ui_helpline__puts("Only available for assembly lines.");
browser->b.seek(&browser->b, -offset, SEEK_CUR);
return false;
}
- if (bdl->idx_asm < offset)
- offset = bdl->idx_asm;
+ if (bl->idx_asm < offset)
+ offset = bl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
annotate_browser__opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx_asm - offset;
- browser->b.index = bdl->idx_asm;
+ browser->b.top_idx = bl->idx_asm - offset;
+ browser->b.index = bl->idx_asm;
}
return true;
@@ -543,7 +536,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct hist_browser_timer *hbt)
{
struct map_symbol *ms = browser->b.priv;
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
struct addr_map_symbol target = {
.map = ms->map,
@@ -589,10 +582,10 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
struct disasm_line *pos;
*idx = 0;
- list_for_each_entry(pos, &notes->src->source, node) {
- if (pos->offset == offset)
+ list_for_each_entry(pos, &notes->src->source, al.node) {
+ if (pos->al.offset == offset)
return pos;
- if (!disasm_line__filter(&browser->b, &pos->node))
+ if (!disasm_line__filter(&browser->b, &pos->al.node))
++*idx;
}
@@ -601,7 +594,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
static bool annotate_browser__jump(struct annotate_browser *browser)
{
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
u64 offset;
s64 idx;
@@ -615,29 +608,29 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
return true;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, &dl->al, idx);
return true;
}
static
-struct disasm_line *annotate_browser__find_string(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
++*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -645,38 +638,38 @@ struct disasm_line *annotate_browser__find_string(struct annotate_browser *brows
static bool __annotate_browser__search(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = false;
return true;
}
static
-struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue_reverse(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue_reverse(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
--*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -684,16 +677,16 @@ struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browse
static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = true;
return true;
}
@@ -899,13 +892,16 @@ show_help:
continue;
case K_ENTER:
case K_RIGHT:
+ {
+ struct disasm_line *dl = disasm_line(browser->selection);
+
if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!browser->selection->ins.ops)
+ else if (!dl->ins.ops)
goto show_sup_ins;
- else if (ins__is_ret(&browser->selection->ins))
+ else if (ins__is_ret(&dl->ins))
goto out;
else if (!(annotate_browser__jump(browser) ||
annotate_browser__callq(browser, evsel, hbt))) {
@@ -913,6 +909,7 @@ show_sup_ins:
ui_helpline__puts("Actions are only available for function call/return & jump/branch instructions.");
}
continue;
+ }
case 't':
if (annotate_browser__opts.show_total_period) {
annotate_browser__opts.show_total_period = false;
@@ -990,10 +987,10 @@ static void count_and_fill(struct annotate_browser *browser, u64 start, u64 end,
return;
for (offset = start; offset <= end; offset++) {
- struct disasm_line *dl = browser->offsets[offset];
+ struct annotation_line *al = browser->offsets[offset];
- if (dl)
- dl->ipc = ipc;
+ if (al)
+ al->ipc = ipc;
}
}
}
@@ -1018,13 +1015,13 @@ static void annotate__compute_ipc(struct annotate_browser *browser, size_t size,
ch = &notes->src->cycles_hist[offset];
if (ch && ch->cycles) {
- struct disasm_line *dl;
+ struct annotation_line *al;
if (ch->have_start)
count_and_fill(browser, ch->start, offset, ch);
- dl = browser->offsets[offset];
- if (dl && ch->num_aggr)
- dl->cycles = ch->cycles_aggr / ch->num_aggr;
+ al = browser->offsets[offset];
+ if (al && ch->num_aggr)
+ al->cycles = ch->cycles_aggr / ch->num_aggr;
browser->have_cycles = true;
}
}
@@ -1043,23 +1040,27 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
return;
for (offset = 0; offset < size; ++offset) {
- struct disasm_line *dl = browser->offsets[offset], *dlt;
- struct browser_disasm_line *bdlt;
+ struct annotation_line *al = browser->offsets[offset];
+ struct disasm_line *dl;
+ struct browser_line *blt;
+
+ dl = disasm_line(al);
if (!disasm_line__is_valid_jump(dl, sym))
continue;
- dlt = browser->offsets[dl->ops.target.offset];
+ al = browser->offsets[dl->ops.target.offset];
+
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
- if (dlt == NULL)
+ if (al == NULL)
continue;
- bdlt = disasm_line__browser(dlt);
- if (++bdlt->jump_sources > browser->max_jump_sources)
- browser->max_jump_sources = bdlt->jump_sources;
+ blt = browser_line(al);
+ if (++blt->jump_sources > browser->max_jump_sources)
+ browser->max_jump_sources = blt->jump_sources;
++browser->nr_jumps;
}
@@ -1078,7 +1079,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al;
struct annotation *notes;
size_t size;
struct map_symbol ms = {
@@ -1097,7 +1098,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
};
int ret = -1, err;
int nr_pcnt = 1;
- size_t sizeof_bdl = sizeof(struct browser_disasm_line);
if (sym == NULL)
return -1;
@@ -1107,21 +1107,16 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- browser.offsets = zalloc(size * sizeof(struct disasm_line *));
+ browser.offsets = zalloc(size * sizeof(struct annotation_line *));
if (browser.offsets == NULL) {
ui__error("Not enough memory!");
return -1;
}
- if (perf_evsel__is_group_event(evsel)) {
+ if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->nr_members;
- sizeof_bdl += sizeof(struct disasm_line_samples) *
- (nr_pcnt - 1);
- }
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- sizeof_bdl, &browser.arch,
- perf_evsel__env_cpuid(evsel));
+ err = symbol__annotate(sym, map, evsel, sizeof(struct browser_line), &browser.arch);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -1129,20 +1124,22 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
goto out_free_offsets;
}
+ symbol__calc_percent(sym, evsel);
+
ui_helpline__push("Press ESC to exit");
notes = symbol__annotation(sym);
browser.start = map__rip_2objdump(map, sym->start);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos;
- size_t line_len = strlen(pos->line);
+ list_for_each_entry(al, &notes->src->source, node) {
+ struct browser_line *bpos;
+ size_t line_len = strlen(al->line);
if (browser.b.width < line_len)
browser.b.width = line_len;
- bpos = disasm_line__browser(pos);
+ bpos = browser_line(al);
bpos->idx = browser.nr_entries++;
- if (pos->offset != -1) {
+ if (al->offset != -1) {
bpos->idx_asm = browser.nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
@@ -1151,8 +1148,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
*
* E.g. copy_user_generic_unrolled
*/
- if (pos->offset < (s64)size)
- browser.offsets[pos->offset] = pos;
+ if (al->offset < (s64)size)
+ browser.offsets[al->offset] = al;
} else
bpos->idx_asm = -1;
}
@@ -1174,10 +1171,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
annotate_browser__update_addr_width(&browser);
ret = annotate_browser__run(&browser, evsel, hbt);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
- }
+
+ annotated_source__purge(notes->src);
out_free_offsets:
free(browser.offsets);
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index fc7a2e105bfd..aeeaf15029f0 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -31,14 +31,14 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
- if (!symbol_conf.event_group && !symhist->addr[dl->offset].nr_samples)
+ if (!symbol_conf.event_group && !symhist->addr[dl->al.offset].nr_samples)
return 0;
- percent = 100.0 * symhist->addr[dl->offset].nr_samples / symhist->nr_samples;
+ percent = 100.0 * symhist->addr[dl->al.offset].nr_samples / symhist->nr_samples;
markup = perf_gtk__get_percent_color(percent);
if (markup)
@@ -57,16 +57,16 @@ static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
- return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
+ return scnprintf(buf, size, "%"PRIx64, start + dl->al.offset);
}
static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
{
int ret = 0;
- char *line = g_markup_escape_text(dl->line, -1);
+ char *line = g_markup_escape_text(dl->al.line, -1);
const char *markup = "<span fgcolor='gray'>";
strcpy(buf, "");
@@ -74,7 +74,7 @@ static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
if (!line)
return 0;
- if (dl->offset != (s64) -1)
+ if (dl->al.offset != (s64) -1)
markup = NULL;
if (markup)
@@ -119,7 +119,7 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
- list_for_each_entry(pos, &notes->src->source, node) {
+ list_for_each_entry(pos, &notes->src->source, al.node) {
GtkTreeIter iter;
int ret = 0;
@@ -148,8 +148,8 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_container_add(GTK_CONTAINER(window), view);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
+ list_for_each_entry_safe(pos, n, &notes->src->source, al.node) {
+ list_del(&pos->al.node);
disasm_line__free(pos);
}
@@ -169,8 +169,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -178,6 +177,8 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
return -1;
}
+ symbol__calc_percent(sym, evsel);
+
if (perf_gtk__is_active_context(pgctx)) {
window = pgctx->main_window;
notebook = pgctx->notebook;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index a3de7916fe63..ea0a452550b0 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -44,7 +44,7 @@ libperf-y += machine.o
libperf-y += map.o
libperf-y += pstack.o
libperf-y += session.o
-libperf-$(CONFIG_AUDIT) += syscalltbl.o
+libperf-$(CONFIG_TRACE) += syscalltbl.o
libperf-y += ordered-events.o
libperf-y += namespaces.o
libperf-y += comm.o
@@ -86,6 +86,14 @@ libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
libperf-$(CONFIG_AUXTRACE) += intel-pt.o
libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+
+ifdef CONFIG_LIBOPENCSD
+libperf-$(CONFIG_AUXTRACE) += cs-etm.o
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+endif
+
libperf-y += parse-branch-options.o
libperf-y += dump-insn.o
libperf-y += parse-regs-options.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 3369c7830260..28b233c3dcbe 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -26,7 +26,6 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <sys/utsname.h>
#include "sane_ctype.h"
@@ -322,6 +321,8 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;
*addrp = strtoull(comment, &endptr, 16);
+ if (endptr == comment)
+ return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
@@ -435,8 +436,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *m
return 0;
comment = ltrim(comment);
- comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
@@ -480,7 +481,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
return 0;
comment = ltrim(comment);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
}
@@ -878,32 +879,99 @@ out_free_name:
return -1;
}
-static struct disasm_line *disasm_line__new(s64 offset, char *line,
- size_t privsize, int line_nr,
- struct arch *arch,
- struct map *map)
+struct annotate_args {
+ size_t privsize;
+ struct arch *arch;
+ struct map *map;
+ struct perf_evsel *evsel;
+ s64 offset;
+ char *line;
+ int line_nr;
+};
+
+static void annotation_line__delete(struct annotation_line *al)
{
- struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
+ void *ptr = (void *) al - al->privsize;
+
+ free_srcline(al->path);
+ zfree(&al->line);
+ free(ptr);
+}
+
+/*
+ * Allocating the annotation line data with following
+ * structure:
+ *
+ * --------------------------------------
+ * private space | struct annotation_line
+ * --------------------------------------
+ *
+ * Size of the private space is stored in 'struct annotation_line'.
+ *
+ */
+static struct annotation_line *
+annotation_line__new(struct annotate_args *args, size_t privsize)
+{
+ struct annotation_line *al;
+ struct perf_evsel *evsel = args->evsel;
+ size_t size = privsize + sizeof(*al);
+ int nr = 1;
+
+ if (perf_evsel__is_group_event(evsel))
+ nr = evsel->nr_members;
+
+ size += sizeof(al->samples[0]) * nr;
+
+ al = zalloc(size);
+ if (al) {
+ al = (void *) al + privsize;
+ al->privsize = privsize;
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->samples_nr = nr;
+ }
+
+ return al;
+}
+
+/*
+ * Allocating the disasm annotation line data with
+ * following structure:
+ *
+ * ------------------------------------------------------------
+ * privsize space | struct disasm_line | struct annotation_line
+ * ------------------------------------------------------------
+ *
+ * We have 'struct annotation_line' member as last member
+ * of 'struct disasm_line' to have an easy access.
+ *
+ */
+static struct disasm_line *disasm_line__new(struct annotate_args *args)
+{
+ struct disasm_line *dl = NULL;
+ struct annotation_line *al;
+ size_t privsize = args->privsize + offsetof(struct disasm_line, al);
+
+ al = annotation_line__new(args, privsize);
+ if (al != NULL) {
+ dl = disasm_line(al);
- if (dl != NULL) {
- dl->offset = offset;
- dl->line = strdup(line);
- dl->line_nr = line_nr;
- if (dl->line == NULL)
+ if (dl->al.line == NULL)
goto out_delete;
- if (offset != -1) {
- if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
- disasm_line__init_ins(dl, arch, map);
+ disasm_line__init_ins(dl, args->arch, args->map);
}
}
return dl;
out_free_line:
- zfree(&dl->line);
+ zfree(&dl->al.line);
out_delete:
free(dl);
return NULL;
@@ -911,14 +979,13 @@ out_delete:
void disasm_line__free(struct disasm_line *dl)
{
- zfree(&dl->line);
if (dl->ins.ops && dl->ins.ops->free)
dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
free((void *)dl->ins.name);
dl->ins.name = NULL;
- free(dl);
+ annotation_line__delete(&dl->al);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
@@ -929,12 +996,13 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
}
-static void disasm__add(struct list_head *head, struct disasm_line *line)
+static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
- list_add_tail(&line->node, head);
+ list_add_tail(&al->node, head);
}
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
@@ -943,50 +1011,6 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
return NULL;
}
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample)
-{
- struct source_line *src_line = notes->src->lines;
- double percent = 0.0;
-
- sample->nr_samples = sample->period = 0;
-
- if (src_line) {
- size_t sizeof_src_line = sizeof(*src_line) +
- sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
-
- while (offset < end) {
- src_line = (void *)notes->src->lines +
- (sizeof_src_line * offset);
-
- if (*path == NULL)
- *path = src_line->path;
-
- percent += src_line->samples[evidx].percent;
- sample->nr_samples += src_line->samples[evidx].nr;
- offset++;
- }
- } else {
- struct sym_hist *h = annotation__histogram(notes, evidx);
- unsigned int hits = 0;
- u64 period = 0;
-
- while (offset < end) {
- hits += h->addr[offset].nr_samples;
- period += h->addr[offset].period;
- ++offset;
- }
-
- if (h->nr_samples) {
- sample->period = period;
- sample->nr_samples = hits;
- percent = 100.0 * hits / h->nr_samples;
- }
- }
-
- return percent;
-}
-
static const char *annotate__address_color(struct block_range *br)
{
double cov = block_range__coverage(br);
@@ -1069,50 +1093,39 @@ static void annotate__branch_printf(struct block_range *br, u64 addr)
}
}
+static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
+{
+ s64 offset = dl->al.offset;
+ const u64 addr = start + offset;
+ struct block_range *br;
+
+ br = block_range__find(addr);
+ color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
+ color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
+ annotate__branch_printf(br, addr);
+ return 0;
+}
-static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
- struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
- int max_lines, struct disasm_line *queue)
+static int
+annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
+ struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
+ int max_lines, struct annotation_line *queue, int addr_fmt_width)
{
+ struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
static const char *prev_color;
- if (dl->offset != -1) {
- const char *path = NULL;
- double percent, max_percent = 0.0;
- double *ppercents = &percent;
- struct sym_hist_entry sample;
- struct sym_hist_entry *psamples = &sample;
+ if (al->offset != -1) {
+ double max_percent = 0.0;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
- s64 offset = dl->offset;
- const u64 addr = start + offset;
- struct disasm_line *next;
- struct block_range *br;
-
- next = disasm__get_next_ip_line(&notes->src->source, dl);
-
- if (perf_evsel__is_group_event(evsel)) {
- nr_percent = evsel->nr_members;
- ppercents = calloc(nr_percent, sizeof(double));
- psamples = calloc(nr_percent, sizeof(struct sym_hist_entry));
- if (ppercents == NULL || psamples == NULL) {
- return -1;
- }
- }
- for (i = 0; i < nr_percent; i++) {
- percent = disasm__calc_percent(notes,
- notes->src->lines ? i : evsel->idx + i,
- offset,
- next ? next->offset : (s64) len,
- &path, &sample);
-
- ppercents[i] = percent;
- psamples[i] = sample;
- if (percent > max_percent)
- max_percent = percent;
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample = &al->samples[i];
+
+ if (sample->percent > max_percent)
+ max_percent = sample->percent;
}
if (max_percent < min_pcnt)
@@ -1123,10 +1136,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (queue != NULL) {
list_for_each_entry_from(queue, &notes->src->source, node) {
- if (queue == dl)
+ if (queue == al)
break;
- disasm_line__print(queue, sym, start, evsel, len,
- 0, 0, 1, NULL);
+ annotation_line__print(queue, sym, start, evsel, len,
+ 0, 0, 1, NULL, addr_fmt_width);
}
}
@@ -1137,44 +1150,34 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
- if (path) {
- if (!prev_line || strcmp(prev_line, path)
+ if (al->path) {
+ if (!prev_line || strcmp(prev_line, al->path)
|| color != prev_color) {
- color_fprintf(stdout, color, " %s", path);
- prev_line = path;
+ color_fprintf(stdout, color, " %s", al->path);
+ prev_line = al->path;
prev_color = color;
}
}
for (i = 0; i < nr_percent; i++) {
- percent = ppercents[i];
- sample = psamples[i];
- color = get_percent_color(percent);
+ struct annotation_data *sample = &al->samples[i];
+
+ color = get_percent_color(sample->percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %11" PRIu64,
- sample.period);
+ sample->he.period);
else if (symbol_conf.show_nr_samples)
color_fprintf(stdout, color, " %7" PRIu64,
- sample.nr_samples);
+ sample->he.nr_samples);
else
- color_fprintf(stdout, color, " %7.2f", percent);
+ color_fprintf(stdout, color, " %7.2f", sample->percent);
}
- printf(" : ");
+ printf(" : ");
- br = block_range__find(addr);
- color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr);
- color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
- annotate__branch_printf(br, addr);
+ disasm_line__print(dl, start, addr_fmt_width);
printf("\n");
-
- if (ppercents != &percent)
- free(ppercents);
-
- if (psamples != &sample)
- free(psamples);
-
} else if (max_lines && printed >= max_lines)
return 1;
else {
@@ -1186,10 +1189,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
- if (!*dl->line)
+ if (!*al->line)
printf(" %*s:\n", width, " ");
else
- printf(" %*s: %s\n", width, " ", dl->line);
+ printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
}
return 0;
@@ -1215,11 +1218,11 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
-static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
- struct arch *arch,
- FILE *file, size_t privsize,
+static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
+ struct annotate_args *args,
int *line_nr)
{
+ struct map *map = args->map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *line = NULL, *parsed_line, *tmp, *tmp2;
@@ -1263,7 +1266,11 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
parsed_line = tmp2 + 1;
}
- dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
+ args->offset = offset;
+ args->line = parsed_line;
+ args->line_nr = *line_nr;
+
+ dl = disasm_line__new(args);
free(line);
(*line_nr)++;
@@ -1288,7 +1295,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
dl->ops.target.name = strdup(target.sym->name);
}
- disasm__add(&notes->src->source, dl);
+ annotation_line__add(&dl->al, &notes->src->source);
return 0;
}
@@ -1305,19 +1312,19 @@ static void delete_last_nop(struct symbol *sym)
struct disasm_line *dl;
while (!list_empty(list)) {
- dl = list_entry(list->prev, struct disasm_line, node);
+ dl = list_entry(list->prev, struct disasm_line, al.node);
if (dl->ins.ops) {
if (dl->ins.ops != &nop_ops)
return;
} else {
- if (!strstr(dl->line, " nop ") &&
- !strstr(dl->line, " nopl ") &&
- !strstr(dl->line, " nopw "))
+ if (!strstr(dl->al.line, " nop ") &&
+ !strstr(dl->al.line, " nopl ") &&
+ !strstr(dl->al.line, " nopw "))
return;
}
- list_del(&dl->node);
+ list_del(&dl->al.node);
disasm_line__free(dl);
}
}
@@ -1412,25 +1419,11 @@ fallback:
return 0;
}
-static const char *annotate__norm_arch(const char *arch_name)
-{
- struct utsname uts;
-
- if (!arch_name) { /* Assume we are annotating locally. */
- if (uname(&uts) < 0)
- return NULL;
- arch_name = uts.machine;
- }
- return normalize_arch((char *)arch_name);
-}
-
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid)
+static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
+ struct map *map = args->map;
struct dso *dso = map->dso;
char command[PATH_MAX * 2];
- struct arch *arch = NULL;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
@@ -1444,25 +1437,6 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
if (err)
return err;
- arch_name = annotate__norm_arch(arch_name);
- if (!arch_name)
- return -1;
-
- arch = arch__find(arch_name);
- if (arch == NULL)
- return -ENOTSUP;
-
- if (parch)
- *parch = arch;
-
- if (arch->init) {
- err = arch->init(arch, cpuid);
- if (err) {
- pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
- return err;
- }
- }
-
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -1546,8 +1520,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
- if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
- &lineno) < 0)
+ if (symbol__parse_objdump_line(sym, file, args, &lineno) < 0)
break;
nline++;
}
@@ -1580,21 +1553,110 @@ out_close_stdout:
goto out_remove_tmp;
}
-static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+static void calc_percent(struct sym_hist *hist,
+ struct annotation_data *sample,
+ s64 offset, s64 end)
+{
+ unsigned int hits = 0;
+ u64 period = 0;
+
+ while (offset < end) {
+ hits += hist->addr[offset].nr_samples;
+ period += hist->addr[offset].period;
+ ++offset;
+ }
+
+ if (hist->nr_samples) {
+ sample->he.period = period;
+ sample->he.nr_samples = hits;
+ sample->percent = 100.0 * hits / hist->nr_samples;
+ }
+}
+
+static void annotation__calc_percent(struct annotation *notes,
+ struct perf_evsel *evsel, s64 len)
+{
+ struct annotation_line *al, *next;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ s64 end;
+ int i;
+
+ if (al->offset == -1)
+ continue;
+
+ next = annotation_line__next(al, &notes->src->source);
+ end = next ? next->offset : len;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+ struct sym_hist *hist;
+
+ hist = annotation__histogram(notes, evsel->idx + i);
+ sample = &al->samples[i];
+
+ calc_percent(hist, sample, al->offset, end);
+ }
+ }
+}
+
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ annotation__calc_percent(notes, evsel, symbol__size(sym));
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch)
{
- struct source_line *iter;
+ struct annotate_args args = {
+ .privsize = privsize,
+ .map = map,
+ .evsel = evsel,
+ };
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+ struct arch *arch;
+ int err;
+
+ if (!arch_name)
+ return -1;
+
+ args.arch = arch = arch__find(arch_name);
+ if (arch == NULL)
+ return -ENOTSUP;
+
+ if (parch)
+ *parch = arch;
+
+ if (arch->init) {
+ err = arch->init(arch, env ? env->cpuid : NULL);
+ if (err) {
+ pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+ return err;
+ }
+ }
+
+ return symbol__disassemble(sym, &args);
+}
+
+static void insert_source_line(struct rb_root *root, struct annotation_line *al)
+{
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- ret = strcmp(iter->path, src_line->path);
+ ret = strcmp(iter->path, al->path);
if (ret == 0) {
- for (i = 0; i < src_line->nr_pcnt; i++)
- iter->samples[i].percent_sum += src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ iter->samples[i].percent_sum += al->samples[i].percent;
return;
}
@@ -1604,18 +1666,18 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
p = &(*p)->rb_right;
}
- for (i = 0; i < src_line->nr_pcnt; i++)
- src_line->samples[i].percent_sum = src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ al->samples[i].percent_sum = al->samples[i].percent;
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
-static int cmp_source_line(struct source_line *a, struct source_line *b)
+static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < a->nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent_sum == b->samples[i].percent_sum)
continue;
return a->samples[i].percent_sum > b->samples[i].percent_sum;
@@ -1624,135 +1686,47 @@ static int cmp_source_line(struct source_line *a, struct source_line *b)
return 0;
}
-static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
+static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
{
- struct source_line *iter;
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- if (cmp_source_line(src_line, iter))
+ if (cmp_source_line(al, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
- src_line = rb_entry(node, struct source_line, node);
+ al = rb_entry(node, struct annotation_line, rb_node);
next = rb_next(node);
rb_erase(node, src_root);
- __resort_source_line(dest_root, src_line);
+ __resort_source_line(dest_root, al);
node = next;
}
}
-static void symbol__free_source_line(struct symbol *sym, int len)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct source_line *src_line = notes->src->lines;
- size_t sizeof_src_line;
- int i;
-
- sizeof_src_line = sizeof(*src_line) +
- (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
-
- for (i = 0; i < len; i++) {
- free_srcline(src_line->path);
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- zfree(&notes->src->lines);
-}
-
-/* Get the filename:line for the colored entries */
-static int symbol__get_source_line(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel,
- struct rb_root *root, int len)
-{
- u64 start;
- int i, k;
- int evidx = evsel->idx;
- struct source_line *src_line;
- struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
- struct rb_root tmp_root = RB_ROOT;
- int nr_pcnt = 1;
- u64 nr_samples = h->nr_samples;
- size_t sizeof_src_line = sizeof(struct source_line);
-
- if (perf_evsel__is_group_event(evsel)) {
- for (i = 1; i < evsel->nr_members; i++) {
- h = annotation__histogram(notes, evidx + i);
- nr_samples += h->nr_samples;
- }
- nr_pcnt = evsel->nr_members;
- sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
- }
-
- if (!nr_samples)
- return 0;
-
- src_line = notes->src->lines = calloc(len, sizeof_src_line);
- if (!notes->src->lines)
- return -1;
-
- start = map__rip_2objdump(map, sym->start);
-
- for (i = 0; i < len; i++) {
- u64 offset;
- double percent_max = 0.0;
-
- src_line->nr_pcnt = nr_pcnt;
-
- for (k = 0; k < nr_pcnt; k++) {
- double percent = 0.0;
-
- h = annotation__histogram(notes, evidx + k);
- nr_samples = h->addr[i].nr_samples;
- if (h->nr_samples)
- percent = 100.0 * nr_samples / h->nr_samples;
-
- if (percent > percent_max)
- percent_max = percent;
- src_line->samples[k].percent = percent;
- src_line->samples[k].nr = nr_samples;
- }
-
- if (percent_max <= 0.5)
- goto next;
-
- offset = start + i;
- src_line->path = get_srcline(map->dso, offset, NULL,
- false, true);
- insert_source_line(&tmp_root, src_line);
-
- next:
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- resort_source_line(root, &tmp_root);
- return 0;
-}
-
static void print_summary(struct rb_root *root, const char *filename)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
@@ -1770,9 +1744,9 @@ static void print_summary(struct rb_root *root, const char *filename)
char *path;
int i;
- src_line = rb_entry(node, struct source_line, node);
- for (i = 0; i < src_line->nr_pcnt; i++) {
- percent = src_line->samples[i].percent_sum;
+ al = rb_entry(node, struct annotation_line, rb_node);
+ for (i = 0; i < al->samples_nr; i++) {
+ percent = al->samples[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
@@ -1780,7 +1754,7 @@ static void print_summary(struct rb_root *root, const char *filename)
percent_max = percent;
}
- path = src_line->path;
+ path = al->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
@@ -1801,6 +1775,19 @@ static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
}
+static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
+{
+ char bf[32];
+ struct annotation_line *line;
+
+ list_for_each_entry_reverse(line, lines, node) {
+ if (line->offset != -1)
+ return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
+ }
+
+ return 0;
+}
+
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context)
@@ -1811,9 +1798,9 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
- struct disasm_line *pos, *queue = NULL;
+ struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
- int printed = 2, queue_len = 0;
+ int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
@@ -1844,15 +1831,21 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
if (verbose > 0)
symbol__annotate_hits(sym, evsel);
+ addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
+
list_for_each_entry(pos, &notes->src->source, node) {
+ int err;
+
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
- switch (disasm_line__print(pos, sym, start, evsel, len,
- min_pcnt, printed, max_lines,
- queue)) {
+ err = annotation_line__print(pos, sym, start, evsel, len,
+ min_pcnt, printed, max_lines,
+ queue, addr_fmt_width);
+
+ switch (err) {
case 0:
++printed;
if (context) {
@@ -1907,13 +1900,13 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
}
}
-void disasm__purge(struct list_head *head)
+void annotated_source__purge(struct annotated_source *as)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al, *n;
- list_for_each_entry_safe(pos, n, head, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
+ list_for_each_entry_safe(al, n, &as->source, node) {
+ list_del(&al->node);
+ disasm_line__free(disasm_line(al));
}
}
@@ -1921,10 +1914,10 @@ static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
- if (dl->offset == -1)
- return fprintf(fp, "%s\n", dl->line);
+ if (dl->al.offset == -1)
+ return fprintf(fp, "%s\n", dl->al.line);
- printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
@@ -1939,38 +1932,73 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp)
struct disasm_line *pos;
size_t printed = 0;
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, head, al.node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
+static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ struct rb_root *root, u64 start)
+{
+ struct annotation_line *al;
+ struct rb_root tmp_root = RB_ROOT;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ double percent_max = 0.0;
+ int i;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+
+ sample = &al->samples[i];
+
+ if (sample->percent > percent_max)
+ percent_max = sample->percent;
+ }
+
+ if (percent_max <= 0.5)
+ continue;
+
+ al->path = get_srcline(map->dso, start + al->offset, NULL,
+ false, true, start + al->offset);
+ insert_source_line(&tmp_root, al);
+ }
+
+ resort_source_line(root, &tmp_root);
+}
+
+static void symbol__calc_lines(struct symbol *sym, struct map *map,
+ struct rb_root *root)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ u64 start = map__rip_2objdump(map, sym->start);
+
+ annotation__calc_lines(notes, map, root, start);
+}
+
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
- u64 len;
- if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL) < 0)
+ if (symbol__annotate(sym, map, evsel, 0, NULL) < 0)
return -1;
- len = symbol__size(sym);
+ symbol__calc_percent(sym, evsel);
if (print_lines) {
srcline_full_filename = full_paths;
- symbol__get_source_line(sym, map, evsel, &source_line, len);
+ symbol__calc_lines(sym, map, &source_line);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, full_paths,
min_pcnt, max_lines, 0);
- if (print_lines)
- symbol__free_source_line(sym, len);
- disasm__purge(&symbol__annotation(sym)->src->source);
+ annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index f6ba3560de5e..ce427445671f 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -59,33 +59,55 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
struct annotation;
+struct sym_hist_entry {
+ u64 nr_samples;
+ u64 period;
+};
+
+struct annotation_data {
+ double percent;
+ double percent_sum;
+ struct sym_hist_entry he;
+};
+
+struct annotation_line {
+ struct list_head node;
+ struct rb_node rb_node;
+ s64 offset;
+ char *line;
+ int line_nr;
+ float ipc;
+ u64 cycles;
+ size_t privsize;
+ char *path;
+ int samples_nr;
+ struct annotation_data samples[0];
+};
+
struct disasm_line {
- struct list_head node;
- s64 offset;
- char *line;
- struct ins ins;
- int line_nr;
- float ipc;
- u64 cycles;
- struct ins_operands ops;
+ struct ins ins;
+ struct ins_operands ops;
+
+ /* This needs to be at the end. */
+ struct annotation_line al;
};
+static inline struct disasm_line *disasm_line(struct annotation_line *al)
+{
+ return al ? container_of(al, struct disasm_line, al) : NULL;
+}
+
static inline bool disasm_line__has_offset(const struct disasm_line *dl)
{
return dl->ops.target.offset_avail;
}
-struct sym_hist_entry {
- u64 nr_samples;
- u64 period;
-};
-
void disasm_line__free(struct disasm_line *dl);
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head);
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample);
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
struct sym_hist {
u64 nr_samples;
@@ -104,19 +126,6 @@ struct cyc_hist {
u16 reset;
};
-struct source_line_samples {
- double percent;
- double percent_sum;
- u64 nr;
-};
-
-struct source_line {
- struct rb_node node;
- char *path;
- int nr_pcnt;
- struct source_line_samples samples[1];
-};
-
/** struct annotated_source - symbols with hits have this attached as in sannotation
*
* @histogram: Array of addr hit histograms per event being monitored
@@ -132,7 +141,6 @@ struct source_line {
*/
struct annotated_source {
struct list_head source;
- struct source_line *lines;
int nr_histograms;
size_t sizeof_sym_hist;
struct cyc_hist *cycles_hist;
@@ -169,9 +177,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid);
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch);
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
@@ -198,7 +206,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
int min_pcnt, int max_lines, int context);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
-void disasm__purge(struct list_head *head);
+void annotated_source__purge(struct annotated_source *as);
bool ui__has_annotation(void);
diff --git a/tools/perf/util/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-pkt-decoder.c
new file mode 100644
index 000000000000..b94001b756c7
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#include "arm-spe-pkt-decoder.h"
+
+#define BIT(n) (1ULL << (n))
+
+#define NS_FLAG BIT(63)
+#define EL_FLAG (BIT(62) | BIT(61))
+
+#define SPE_HEADER0_PAD 0x0
+#define SPE_HEADER0_END 0x1
+#define SPE_HEADER0_ADDRESS 0x30 /* address packet (short) */
+#define SPE_HEADER0_ADDRESS_MASK 0x38
+#define SPE_HEADER0_COUNTER 0x18 /* counter packet (short) */
+#define SPE_HEADER0_COUNTER_MASK 0x38
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_EVENTS 0x2
+#define SPE_HEADER0_EVENTS_MASK 0xf
+#define SPE_HEADER0_SOURCE 0x3
+#define SPE_HEADER0_SOURCE_MASK 0xf
+#define SPE_HEADER0_CONTEXT 0x24
+#define SPE_HEADER0_CONTEXT_MASK 0x3c
+#define SPE_HEADER0_OP_TYPE 0x8
+#define SPE_HEADER0_OP_TYPE_MASK 0x3c
+#define SPE_HEADER1_ALIGNMENT 0x0
+#define SPE_HEADER1_ADDRESS 0xb0 /* address packet (extended) */
+#define SPE_HEADER1_ADDRESS_MASK 0xf8
+#define SPE_HEADER1_COUNTER 0x98 /* counter packet (extended) */
+#define SPE_HEADER1_COUNTER_MASK 0xf8
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define le16_to_cpu bswap_16
+#define le32_to_cpu bswap_32
+#define le64_to_cpu bswap_64
+#define memcpy_le64(d, s, n) do { \
+ memcpy((d), (s), (n)); \
+ *(d) = le64_to_cpu(*(d)); \
+} while (0)
+#else
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define memcpy_le64 memcpy
+#endif
+
+static const char * const arm_spe_packet_name[] = {
+ [ARM_SPE_PAD] = "PAD",
+ [ARM_SPE_END] = "END",
+ [ARM_SPE_TIMESTAMP] = "TS",
+ [ARM_SPE_ADDRESS] = "ADDR",
+ [ARM_SPE_COUNTER] = "LAT",
+ [ARM_SPE_CONTEXT] = "CONTEXT",
+ [ARM_SPE_OP_TYPE] = "OP-TYPE",
+ [ARM_SPE_EVENTS] = "EVENTS",
+ [ARM_SPE_DATA_SOURCE] = "DATA-SOURCE",
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type type)
+{
+ return arm_spe_packet_name[type];
+}
+
+/* return ARM SPE payload size from its encoding,
+ * which is in bits 5:4 of the byte.
+ * 00 : byte
+ * 01 : halfword (2)
+ * 10 : word (4)
+ * 11 : doubleword (8)
+ */
+static int payloadlen(unsigned char byte)
+{
+ return 1 << ((byte & 0x30) >> 4);
+}
+
+static int arm_spe_get_payload(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ size_t payload_len = payloadlen(buf[0]);
+
+ if (len < 1 + payload_len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ buf++;
+
+ switch (payload_len) {
+ case 1: packet->payload = *(uint8_t *)buf; break;
+ case 2: packet->payload = le16_to_cpu(*(uint16_t *)buf); break;
+ case 4: packet->payload = le32_to_cpu(*(uint32_t *)buf); break;
+ case 8: packet->payload = le64_to_cpu(*(uint64_t *)buf); break;
+ default: return ARM_SPE_BAD_PACKET;
+ }
+
+ return 1 + payload_len;
+}
+
+static int arm_spe_get_pad(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_PAD;
+ return 1;
+}
+
+static int arm_spe_get_alignment(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int alignment = 1 << ((buf[0] & 0xf) + 1);
+
+ if (len < alignment)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_PAD;
+ return alignment - (((uintptr_t)buf) & (alignment - 1));
+}
+
+static int arm_spe_get_end(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_END;
+ return 1;
+}
+
+static int arm_spe_get_timestamp(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_TIMESTAMP;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_events(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret = arm_spe_get_payload(buf, len, packet);
+
+ packet->type = ARM_SPE_EVENTS;
+
+ /* we use index to identify Events with a less number of
+ * comparisons in arm_spe_pkt_desc(): E.g., the LLC-ACCESS,
+ * LLC-REFILL, and REMOTE-ACCESS events are identified iff
+ * index > 1.
+ */
+ packet->index = ret - 1;
+
+ return ret;
+}
+
+static int arm_spe_get_data_source(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_DATA_SOURCE;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_context(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_CONTEXT;
+ packet->index = buf[0] & 0x3;
+
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_op_type(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_OP_TYPE;
+ packet->index = buf[0] & 0x3;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_counter(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 2)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_COUNTER;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+
+ return 1 + ext_hdr + 2;
+}
+
+static int arm_spe_get_addr(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 8)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_ADDRESS;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ memcpy_le64(&packet->payload, buf + 1, 8);
+
+ return 1 + ext_hdr + 8;
+}
+
+static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int byte;
+
+ memset(packet, 0, sizeof(struct arm_spe_pkt));
+
+ if (!len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ byte = buf[0];
+ if (byte == SPE_HEADER0_PAD)
+ return arm_spe_get_pad(packet);
+ else if (byte == SPE_HEADER0_END) /* no timestamp at end of record */
+ return arm_spe_get_end(packet);
+ else if (byte & 0xc0 /* 0y11xxxxxx */) {
+ if (byte & 0x80) {
+ if ((byte & SPE_HEADER0_ADDRESS_MASK) == SPE_HEADER0_ADDRESS)
+ return arm_spe_get_addr(buf, len, 0, packet);
+ if ((byte & SPE_HEADER0_COUNTER_MASK) == SPE_HEADER0_COUNTER)
+ return arm_spe_get_counter(buf, len, 0, packet);
+ } else
+ if (byte == SPE_HEADER0_TIMESTAMP)
+ return arm_spe_get_timestamp(buf, len, packet);
+ else if ((byte & SPE_HEADER0_EVENTS_MASK) == SPE_HEADER0_EVENTS)
+ return arm_spe_get_events(buf, len, packet);
+ else if ((byte & SPE_HEADER0_SOURCE_MASK) == SPE_HEADER0_SOURCE)
+ return arm_spe_get_data_source(buf, len, packet);
+ else if ((byte & SPE_HEADER0_CONTEXT_MASK) == SPE_HEADER0_CONTEXT)
+ return arm_spe_get_context(buf, len, packet);
+ else if ((byte & SPE_HEADER0_OP_TYPE_MASK) == SPE_HEADER0_OP_TYPE)
+ return arm_spe_get_op_type(buf, len, packet);
+ } else if ((byte & 0xe0) == 0x20 /* 0y001xxxxx */) {
+ /* 16-bit header */
+ byte = buf[1];
+ if (byte == SPE_HEADER1_ALIGNMENT)
+ return arm_spe_get_alignment(buf, len, packet);
+ else if ((byte & SPE_HEADER1_ADDRESS_MASK) == SPE_HEADER1_ADDRESS)
+ return arm_spe_get_addr(buf, len, 1, packet);
+ else if ((byte & SPE_HEADER1_COUNTER_MASK) == SPE_HEADER1_COUNTER)
+ return arm_spe_get_counter(buf, len, 1, packet);
+ }
+
+ return ARM_SPE_BAD_PACKET;
+}
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret;
+
+ ret = arm_spe_do_get_packet(buf, len, packet);
+ /* put multiple consecutive PADs on the same line, up to
+ * the fixed-width output format of 16 bytes per line.
+ */
+ if (ret > 0 && packet->type == ARM_SPE_PAD) {
+ while (ret < 16 && len > (size_t)ret && !buf[ret])
+ ret += 1;
+ }
+ return ret;
+}
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf,
+ size_t buf_len)
+{
+ int ret, ns, el, idx = packet->index;
+ unsigned long long payload = packet->payload;
+ const char *name = arm_spe_pkt_name(packet->type);
+
+ switch (packet->type) {
+ case ARM_SPE_BAD:
+ case ARM_SPE_PAD:
+ case ARM_SPE_END:
+ return snprintf(buf, buf_len, "%s", name);
+ case ARM_SPE_EVENTS: {
+ size_t blen = buf_len;
+
+ ret = 0;
+ ret = snprintf(buf, buf_len, "EV");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " EXCEPTION-GEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " RETIRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " L1D-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " L1D-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " TLB-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x20) {
+ ret = snprintf(buf, buf_len, " TLB-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x40) {
+ ret = snprintf(buf, buf_len, " NOT-TAKEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x80) {
+ ret = snprintf(buf, buf_len, " MISPRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (idx > 1) {
+ if (payload & 0x100) {
+ ret = snprintf(buf, buf_len, " LLC-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x200) {
+ ret = snprintf(buf, buf_len, " LLC-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x400) {
+ ret = snprintf(buf, buf_len, " REMOTE-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case ARM_SPE_OP_TYPE:
+ switch (idx) {
+ case 0: return snprintf(buf, buf_len, "%s", payload & 0x1 ?
+ "COND-SELECT" : "INSN-OTHER");
+ case 1: {
+ size_t blen = buf_len;
+
+ if (payload & 0x1)
+ ret = snprintf(buf, buf_len, "ST");
+ else
+ ret = snprintf(buf, buf_len, "LD");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x2) {
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " AT");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " EXCL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " AR");
+ buf += ret;
+ blen -= ret;
+ }
+ } else if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " SIMD-FP");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case 2: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "B");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " COND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " IND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default: return 0;
+ }
+ case ARM_SPE_DATA_SOURCE:
+ case ARM_SPE_TIMESTAMP:
+ return snprintf(buf, buf_len, "%s %lld", name, payload);
+ case ARM_SPE_ADDRESS:
+ switch (idx) {
+ case 0:
+ case 1: ns = !!(packet->payload & NS_FLAG);
+ el = (packet->payload & EL_FLAG) >> 61;
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "%s 0x%llx el%d ns=%d",
+ (idx == 1) ? "TGT" : "PC", payload, el, ns);
+ case 2: return snprintf(buf, buf_len, "VA 0x%llx", payload);
+ case 3: ns = !!(packet->payload & NS_FLAG);
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "PA 0x%llx ns=%d",
+ payload, ns);
+ default: return 0;
+ }
+ case ARM_SPE_CONTEXT:
+ return snprintf(buf, buf_len, "%s 0x%lx el%d", name,
+ (unsigned long)payload, idx + 1);
+ case ARM_SPE_COUNTER: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "%s %d ", name,
+ (unsigned short)payload);
+ buf += ret;
+ blen -= ret;
+ switch (idx) {
+ case 0: ret = snprintf(buf, buf_len, "TOT"); break;
+ case 1: ret = snprintf(buf, buf_len, "ISSUE"); break;
+ case 2: ret = snprintf(buf, buf_len, "XLAT"); break;
+ default: ret = 0;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default:
+ break;
+ }
+
+ return snprintf(buf, buf_len, "%s 0x%llx (%d)",
+ name, payload, packet->index);
+}
diff --git a/tools/perf/util/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-pkt-decoder.h
new file mode 100644
index 000000000000..d786ef65113f
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__ARM_SPE_PKT_DECODER_H__
+#define INCLUDE__ARM_SPE_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define ARM_SPE_PKT_DESC_MAX 256
+
+#define ARM_SPE_NEED_MORE_BYTES -1
+#define ARM_SPE_BAD_PACKET -2
+
+enum arm_spe_pkt_type {
+ ARM_SPE_BAD,
+ ARM_SPE_PAD,
+ ARM_SPE_END,
+ ARM_SPE_TIMESTAMP,
+ ARM_SPE_ADDRESS,
+ ARM_SPE_COUNTER,
+ ARM_SPE_CONTEXT,
+ ARM_SPE_OP_TYPE,
+ ARM_SPE_EVENTS,
+ ARM_SPE_DATA_SOURCE,
+};
+
+struct arm_spe_pkt {
+ enum arm_spe_pkt_type type;
+ unsigned char index;
+ uint64_t payload;
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type);
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet);
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t len);
+#endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
new file mode 100644
index 000000000000..6067267cc76c
--- /dev/null
+++ b/tools/perf/util/arm-spe.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <endian.h>
+#include <errno.h>
+#include <byteswap.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "cpumap.h"
+#include "color.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "session.h"
+#include "util.h"
+#include "thread.h"
+#include "debug.h"
+#include "auxtrace.h"
+#include "arm-spe.h"
+#include "arm-spe-pkt-decoder.h"
+
+struct arm_spe {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ u32 auxtrace_type;
+ struct perf_session *session;
+ struct machine *machine;
+ u32 pmu_type;
+};
+
+struct arm_spe_queue {
+ struct arm_spe *spe;
+ unsigned int queue_nr;
+ struct auxtrace_buffer *buffer;
+ bool on_heap;
+ bool done;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+};
+
+static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
+ unsigned char *buf, size_t len)
+{
+ struct arm_spe_pkt packet;
+ size_t pos = 0;
+ int ret, pkt_len, i;
+ char desc[ARM_SPE_PKT_DESC_MAX];
+ const char *color = PERF_COLOR_BLUE;
+
+ color_fprintf(stdout, color,
+ ". ... ARM SPE data: size %zu bytes\n",
+ len);
+
+ while (len) {
+ ret = arm_spe_get_packet(buf, len, &packet);
+ if (ret > 0)
+ pkt_len = ret;
+ else
+ pkt_len = 1;
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ for (i = 0; i < pkt_len; i++)
+ color_fprintf(stdout, color, " %02x", buf[i]);
+ for (; i < 16; i++)
+ color_fprintf(stdout, color, " ");
+ if (ret > 0) {
+ ret = arm_spe_pkt_desc(&packet, desc,
+ ARM_SPE_PKT_DESC_MAX);
+ if (ret > 0)
+ color_fprintf(stdout, color, " %s\n", desc);
+ } else {
+ color_fprintf(stdout, color, " Bad packet!\n");
+ }
+ pos += pkt_len;
+ buf += pkt_len;
+ len -= pkt_len;
+ }
+}
+
+static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
+ size_t len)
+{
+ printf(".\n");
+ arm_spe_dump(spe, buf, len);
+}
+
+static int arm_spe_process_event(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static int arm_spe_process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ int err;
+
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&spe->queues, session, event,
+ data_offset, &buffer);
+ if (err)
+ return err;
+
+ /* Dump here now we have copied a piped trace out of the pipe */
+ if (dump_trace) {
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ arm_spe_dump_event(spe, buffer->data,
+ buffer->size);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_spe_flush(struct perf_session *session __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static void arm_spe_free_queue(void *priv)
+{
+ struct arm_spe_queue *speq = priv;
+
+ if (!speq)
+ return;
+ free(speq);
+}
+
+static void arm_spe_free_events(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_queues *queues = &spe->queues;
+ unsigned int i;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ arm_spe_free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+ auxtrace_queues__free(queues);
+}
+
+static void arm_spe_free(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+
+ auxtrace_heap__free(&spe->heap);
+ arm_spe_free_events(session);
+ session->auxtrace = NULL;
+ free(spe);
+}
+
+static const char * const arm_spe_info_fmts[] = {
+ [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
+};
+
+static void arm_spe_print_info(u64 *arr)
+{
+ if (!dump_trace)
+ return;
+
+ fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
+}
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ size_t min_sz = sizeof(u64) * ARM_SPE_PMU_TYPE;
+ struct arm_spe *spe;
+ int err;
+
+ if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
+ min_sz)
+ return -EINVAL;
+
+ spe = zalloc(sizeof(struct arm_spe));
+ if (!spe)
+ return -ENOMEM;
+
+ err = auxtrace_queues__init(&spe->queues);
+ if (err)
+ goto err_free;
+
+ spe->session = session;
+ spe->machine = &session->machines.host; /* No kvm support */
+ spe->auxtrace_type = auxtrace_info->type;
+ spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+
+ spe->auxtrace.process_event = arm_spe_process_event;
+ spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
+ spe->auxtrace.flush_events = arm_spe_flush;
+ spe->auxtrace.free_events = arm_spe_free_events;
+ spe->auxtrace.free = arm_spe_free;
+ session->auxtrace = &spe->auxtrace;
+
+ arm_spe_print_info(&auxtrace_info->priv[0]);
+
+ return 0;
+
+err_free:
+ free(spe);
+ return err;
+}
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
new file mode 100644
index 000000000000..98d3235781c3
--- /dev/null
+++ b/tools/perf/util/arm-spe.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__PERF_ARM_SPE_H__
+#define INCLUDE__PERF_ARM_SPE_H__
+
+#define ARM_SPE_PMU_NAME "arm_spe_"
+
+enum {
+ ARM_SPE_PMU_TYPE,
+ ARM_SPE_PER_CPU_MMAPS,
+ ARM_SPE_AUXTRACE_PRIV_MAX,
+};
+
+#define ARM_SPE_AUXTRACE_PRIV_SIZE (ARM_SPE_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+union perf_event;
+struct perf_session;
+struct perf_pmu;
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu);
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+
+struct perf_event_attr *arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu);
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a33491416400..9faf3b5367db 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,9 +31,6 @@
#include <sys/param.h>
#include <stdlib.h>
#include <stdio.h>
-#include <string.h>
-#include <limits.h>
-#include <errno.h>
#include <linux/list.h>
#include "../perf.h"
@@ -55,8 +52,10 @@
#include "debug.h"
#include <subcmd/parse-options.h>
+#include "cs-etm.h"
#include "intel-pt.h"
#include "intel-bts.h"
+#include "arm-spe.h"
#include "sane_ctype.h"
#include "symbol/kallsyms.h"
@@ -913,7 +912,10 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
return intel_pt_process_auxtrace_info(event, session);
case PERF_AUXTRACE_INTEL_BTS:
return intel_bts_process_auxtrace_info(event, session);
+ case PERF_AUXTRACE_ARM_SPE:
+ return arm_spe_process_auxtrace_info(event, session);
case PERF_AUXTRACE_CS_ETM:
+ return cs_etm__process_auxtrace_info(event, session);
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index d19e11b68de7..453c148d2158 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -43,6 +43,7 @@ enum auxtrace_type {
PERF_AUXTRACE_INTEL_PT,
PERF_AUXTRACE_INTEL_BTS,
PERF_AUXTRACE_CS_ETM,
+ PERF_AUXTRACE_ARM_SPE,
};
enum itrace_period_type {
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 72c107fcbc5a..af7ad814b2c3 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -94,7 +94,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
perf_clang__cleanup();
if (err) {
- pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
+ pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
if (err)
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
@@ -1533,7 +1533,7 @@ int bpf__apply_obj_config(void)
(strcmp("__bpf_stdout__", \
bpf_map__name(pos)) == 0))
-int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+int bpf__setup_stdout(struct perf_evlist *evlist)
{
struct bpf_map_priv *tmpl_priv = NULL;
struct bpf_object *obj, *tmp;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 082505d08d72..32ef7bdca1cf 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -37,6 +37,15 @@ struct callchain_param callchain_param = {
CALLCHAIN_PARAM_DEFAULT
};
+/*
+ * Are there any events usind DWARF callchains?
+ *
+ * I.e.
+ *
+ * -e cycles/call-graph=dwarf/
+ */
+bool dwarf_callchain_users;
+
struct callchain_param callchain_param_default = {
CALLCHAIN_PARAM_DEFAULT
};
@@ -265,6 +274,7 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
ret = 0;
param->record_mode = CALLCHAIN_DWARF;
param->dump_size = default_stack_dump_size;
+ dwarf_callchain_users = true;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index b79ef2478a57..154560b1eb65 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -89,6 +89,8 @@ enum chain_value {
CCVAL_COUNT,
};
+extern bool dwarf_callchain_users;
+
struct callchain_param {
bool enabled;
enum perf_call_graph_mode record_mode;
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index d9ffc1e6eb39..984f69144f87 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -6,6 +6,9 @@
#include "cgroup.h"
#include "evlist.h"
#include <linux/stringify.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
int nr_cgroups;
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
new file mode 100644
index 000000000000..bc22c39c727f
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
new file mode 100644
index 000000000000..1fb01849f1c7
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -0,0 +1,513 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <stdlib.h>
+#include <opencsd/c_api/opencsd_c_api.h>
+#include <opencsd/etmv4/trc_pkt_types_etmv4.h>
+#include <opencsd/ocsd_if_types.h>
+
+#include "cs-etm.h"
+#include "cs-etm-decoder.h"
+#include "intlist.h"
+#include "util.h"
+
+#define MAX_BUFFER 1024
+
+/* use raw logging */
+#ifdef CS_DEBUG_RAW
+#define CS_LOG_RAW_FRAMES
+#ifdef CS_RAW_PACKED
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
+ OCSD_DFRMTR_PACKED_RAW_OUT)
+#else
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
+#endif
+#endif
+
+struct cs_etm_decoder {
+ void *data;
+ void (*packet_printer)(const char *msg);
+ bool trace_on;
+ dcd_tree_handle_t dcd_tree;
+ cs_etm_mem_cb_type mem_access;
+ ocsd_datapath_resp_t prev_return;
+ u32 packet_count;
+ u32 head;
+ u32 tail;
+ struct cs_etm_packet packet_buffer[MAX_BUFFER];
+};
+
+static u32
+cs_etm_decoder__mem_access(const void *context,
+ const ocsd_vaddr_t address,
+ const ocsd_mem_space_acc_t mem_space __maybe_unused,
+ const u32 req_size,
+ u8 *buffer)
+{
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ return decoder->mem_access(decoder->data,
+ address,
+ req_size,
+ buffer);
+}
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func)
+{
+ decoder->mem_access = cb_func;
+
+ if (ocsd_dt_add_callback_mem_acc(decoder->dcd_tree, start, end,
+ OCSD_MEM_SPACE_ANY,
+ cs_etm_decoder__mem_access, decoder))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
+{
+ ocsd_datapath_resp_t dp_ret;
+
+ dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
+ 0, 0, NULL, NULL);
+ if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet)
+{
+ if (!decoder || !packet)
+ return -EINVAL;
+
+ /* Nothing to do, might as well just return */
+ if (decoder->packet_count == 0)
+ return 0;
+
+ *packet = decoder->packet_buffer[decoder->head];
+
+ decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
+
+ decoder->packet_count--;
+
+ return 1;
+}
+
+static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
+ ocsd_etmv4_cfg *config)
+{
+ config->reg_configr = params->etmv4.reg_configr;
+ config->reg_traceidr = params->etmv4.reg_traceidr;
+ config->reg_idr0 = params->etmv4.reg_idr0;
+ config->reg_idr1 = params->etmv4.reg_idr1;
+ config->reg_idr2 = params->etmv4.reg_idr2;
+ config->reg_idr8 = params->etmv4.reg_idr8;
+ config->reg_idr9 = 0;
+ config->reg_idr10 = 0;
+ config->reg_idr11 = 0;
+ config->reg_idr12 = 0;
+ config->reg_idr13 = 0;
+ config->arch_ver = ARCH_V8;
+ config->core_prof = profile_CortexA;
+}
+
+static void cs_etm_decoder__print_str_cb(const void *p_context,
+ const char *msg,
+ const int str_len)
+{
+ if (p_context && str_len)
+ ((struct cs_etm_decoder *)p_context)->packet_printer(msg);
+}
+
+static int
+cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ int ret = 0;
+
+ if (d_params->packet_printer == NULL)
+ return -1;
+
+ decoder->packet_printer = d_params->packet_printer;
+
+ /*
+ * Set up a library default logger to process any printers
+ * (packet/raw frame) we add later.
+ */
+ ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+ if (ret != 0)
+ return -1;
+
+ /* no stdout / err / file output */
+ ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+ if (ret != 0)
+ return -1;
+
+ /*
+ * Set the string CB for the default logger, passes strings to
+ * perf print logger.
+ */
+ ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+ if (ret != 0)
+ ret = -1;
+
+ return 0;
+}
+
+#ifdef CS_LOG_RAW_FRAMES
+static void
+cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ /* Only log these during a --dump operation */
+ if (d_params->operation == CS_ETM_OPERATION_PRINT) {
+ /* set up a library default logger to process the
+ * raw frame printer we add later
+ */
+ ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+
+ /* no stdout / err / file output */
+ ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+
+ /* set the string CB for the default logger,
+ * passes strings to perf print logger.
+ */
+ ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+
+ /* use the built in library printer for the raw frames */
+ ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
+ CS_RAW_DEBUG_FLAGS);
+ }
+}
+#else
+static void
+cs_etm_decoder__init_raw_frame_logging(
+ struct cs_etm_decoder_params *d_params __maybe_unused,
+ struct cs_etm_decoder *decoder __maybe_unused)
+{
+}
+#endif
+
+static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder,
+ const char *decoder_name,
+ void *trace_config)
+{
+ u8 csid;
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
+ OCSD_CREATE_FLG_PACKET_PROC,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ return cs_etm_decoder__create_packet_printer(decoder,
+ decoder_name,
+ trace_config);
+}
+
+static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
+{
+ int i;
+
+ decoder->head = 0;
+ decoder->tail = 0;
+ decoder->packet_count = 0;
+ for (i = 0; i < MAX_BUFFER; i++) {
+ decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].exc = false;
+ decoder->packet_buffer[i].exc_ret = false;
+ decoder->packet_buffer[i].cpu = INT_MIN;
+ }
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
+ const u8 trace_chan_id,
+ enum cs_etm_sample_type sample_type)
+{
+ u32 et = 0;
+ struct int_node *inode = NULL;
+
+ if (decoder->packet_count >= MAX_BUFFER - 1)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ /* Search the RB tree for the cpu associated with this traceID */
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ et = decoder->tail;
+ decoder->packet_buffer[et].sample_type = sample_type;
+ decoder->packet_buffer[et].start_addr = elem->st_addr;
+ decoder->packet_buffer[et].end_addr = elem->en_addr;
+ decoder->packet_buffer[et].exc = false;
+ decoder->packet_buffer[et].exc_ret = false;
+ decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+
+ /* Wrap around if need be */
+ et = (et + 1) & (MAX_BUFFER - 1);
+
+ decoder->tail = et;
+ decoder->packet_count++;
+
+ if (decoder->packet_count == MAX_BUFFER - 1)
+ return OCSD_RESP_WAIT;
+
+ return OCSD_RESP_CONT;
+}
+
+static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
+ const void *context,
+ const ocsd_trc_index_t indx __maybe_unused,
+ const u8 trace_chan_id __maybe_unused,
+ const ocsd_generic_trace_elem *elem)
+{
+ ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ switch (elem->elem_type) {
+ case OCSD_GEN_TRC_ELEM_UNKNOWN:
+ break;
+ case OCSD_GEN_TRC_ELEM_NO_SYNC:
+ decoder->trace_on = false;
+ break;
+ case OCSD_GEN_TRC_ELEM_TRACE_ON:
+ decoder->trace_on = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
+ resp = cs_etm_decoder__buffer_packet(decoder, elem,
+ trace_chan_id,
+ CS_ETM_RANGE);
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION:
+ decoder->packet_buffer[decoder->tail].exc = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
+ decoder->packet_buffer[decoder->tail].exc_ret = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
+ case OCSD_GEN_TRC_ELEM_EO_TRACE:
+ case OCSD_GEN_TRC_ELEM_ADDR_NACC:
+ case OCSD_GEN_TRC_ELEM_TIMESTAMP:
+ case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
+ case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
+ case OCSD_GEN_TRC_ELEM_EVENT:
+ case OCSD_GEN_TRC_ELEM_SWTRACE:
+ case OCSD_GEN_TRC_ELEM_CUSTOM:
+ default:
+ break;
+ }
+
+ return resp;
+}
+
+static int cs_etm_decoder__create_etm_packet_decoder(
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+ u8 csid;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree,
+ decoder_name,
+ OCSD_CREATE_FLG_FULL_DECODER,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
+ cs_etm_decoder__gen_trace_elem_printer,
+ decoder))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ if (d_params->operation == CS_ETM_OPERATION_PRINT)
+ return cs_etm_decoder__create_etm_packet_printer(t_params,
+ decoder);
+ else if (d_params->operation == CS_ETM_OPERATION_DECODE)
+ return cs_etm_decoder__create_etm_packet_decoder(t_params,
+ decoder);
+
+ return -1;
+}
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[])
+{
+ struct cs_etm_decoder *decoder;
+ ocsd_dcd_tree_src_t format;
+ u32 flags;
+ int i, ret;
+
+ if ((!t_params) || (!d_params))
+ return NULL;
+
+ decoder = zalloc(sizeof(*decoder));
+
+ if (!decoder)
+ return NULL;
+
+ decoder->data = d_params->data;
+ decoder->prev_return = OCSD_RESP_CONT;
+ cs_etm_decoder__clear_buffer(decoder);
+ format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
+ OCSD_TRC_SRC_SINGLE);
+ flags = 0;
+ flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
+ flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
+ flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
+
+ /*
+ * Drivers may add barrier frames when used with perf, set up to
+ * handle this. Barriers const of FSYNC packet repeated 4 times.
+ */
+ flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
+
+ /* Create decode tree for the data source */
+ decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
+
+ if (decoder->dcd_tree == 0)
+ goto err_free_decoder;
+
+ /* init library print logging support */
+ ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+
+ /* init raw frame logging if required */
+ cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
+
+ for (i = 0; i < num_cpu; i++) {
+ ret = cs_etm_decoder__create_etm_decoder(d_params,
+ &t_params[i],
+ decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+ }
+
+ return decoder;
+
+err_free_decoder_tree:
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+err_free_decoder:
+ free(decoder);
+ return NULL;
+}
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed)
+{
+ int ret = 0;
+ ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
+ ocsd_datapath_resp_t prev_return = decoder->prev_return;
+ size_t processed = 0;
+ u32 count;
+
+ while (processed < len) {
+ if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_FLUSH,
+ 0,
+ 0,
+ NULL,
+ NULL);
+ } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_DATA,
+ indx + processed,
+ len - processed,
+ &buf[processed],
+ &count);
+ processed += count;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * Return to the input code if the packet buffer is full.
+ * Flushing will get done once the packet buffer has been
+ * processed.
+ */
+ if (OCSD_DATA_RESP_IS_WAIT(cur))
+ break;
+
+ prev_return = cur;
+ }
+
+ decoder->prev_return = cur;
+ *consumed = processed;
+
+ return ret;
+}
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+ decoder->dcd_tree = NULL;
+ free(decoder);
+}
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
new file mode 100644
index 000000000000..3d2e6205d186
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#ifndef INCLUDE__CS_ETM_DECODER_H__
+#define INCLUDE__CS_ETM_DECODER_H__
+
+#include <linux/types.h>
+#include <stdio.h>
+
+struct cs_etm_decoder;
+
+struct cs_etm_buffer {
+ const unsigned char *buf;
+ size_t len;
+ u64 offset;
+ u64 ref_timestamp;
+};
+
+enum cs_etm_sample_type {
+ CS_ETM_RANGE = 1 << 0,
+};
+
+struct cs_etm_packet {
+ enum cs_etm_sample_type sample_type;
+ u64 start_addr;
+ u64 end_addr;
+ u8 exc;
+ u8 exc_ret;
+ int cpu;
+};
+
+struct cs_etm_queue;
+
+typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
+ size_t, u8 *);
+
+struct cs_etmv4_trace_params {
+ u32 reg_idr0;
+ u32 reg_idr1;
+ u32 reg_idr2;
+ u32 reg_idr8;
+ u32 reg_configr;
+ u32 reg_traceidr;
+};
+
+struct cs_etm_trace_params {
+ int protocol;
+ union {
+ struct cs_etmv4_trace_params etmv4;
+ };
+};
+
+struct cs_etm_decoder_params {
+ int operation;
+ void (*packet_printer)(const char *msg);
+ cs_etm_mem_cb_type mem_acc_cb;
+ u8 formatted;
+ u8 fsyncs;
+ u8 hsyncs;
+ u8 frame_aligned;
+ void *data;
+};
+
+/*
+ * The following enums are indexed starting with 1 to align with the
+ * open source coresight trace decoder library.
+ */
+enum {
+ CS_ETM_PROTO_ETMV3 = 1,
+ CS_ETM_PROTO_ETMV4i,
+ CS_ETM_PROTO_ETMV4d,
+};
+
+enum {
+ CS_ETM_OPERATION_PRINT = 1,
+ CS_ETM_OPERATION_DECODE,
+};
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed);
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu,
+ struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[]);
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder);
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func);
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet);
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder);
+
+#endif /* INCLUDE__CS_ETM_DECODER_H__ */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
new file mode 100644
index 000000000000..b9f0a53dfa65
--- /dev/null
+++ b/tools/perf/util/cs-etm.c
@@ -0,0 +1,1023 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <stdlib.h>
+
+#include "auxtrace.h"
+#include "color.h"
+#include "cs-etm.h"
+#include "cs-etm-decoder/cs-etm-decoder.h"
+#include "debug.h"
+#include "evlist.h"
+#include "intlist.h"
+#include "machine.h"
+#include "map.h"
+#include "perf.h"
+#include "thread.h"
+#include "thread_map.h"
+#include "thread-stack.h"
+#include "util.h"
+
+#define MAX_TIMESTAMP (~0ULL)
+
+struct cs_etm_auxtrace {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ struct itrace_synth_opts synth_opts;
+ struct perf_session *session;
+ struct machine *machine;
+ struct thread *unknown_thread;
+
+ u8 timeless_decoding;
+ u8 snapshot_mode;
+ u8 data_queued;
+ u8 sample_branches;
+
+ int num_cpu;
+ u32 auxtrace_type;
+ u64 branches_sample_type;
+ u64 branches_id;
+ u64 **metadata;
+ u64 kernel_start;
+ unsigned int pmu_type;
+};
+
+struct cs_etm_queue {
+ struct cs_etm_auxtrace *etm;
+ struct thread *thread;
+ struct cs_etm_decoder *decoder;
+ struct auxtrace_buffer *buffer;
+ const struct cs_etm_state *state;
+ union perf_event *event_buf;
+ unsigned int queue_nr;
+ pid_t pid, tid;
+ int cpu;
+ u64 time;
+ u64 timestamp;
+ u64 offset;
+};
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_);
+
+static void cs_etm__packet_dump(const char *pkt_string)
+{
+ const char *color = PERF_COLOR_BLUE;
+ int len = strlen(pkt_string);
+
+ if (len && (pkt_string[len-1] == '\n'))
+ color_fprintf(stdout, color, " %s", pkt_string);
+ else
+ color_fprintf(stdout, color, " %s\n", pkt_string);
+
+ fflush(stdout);
+}
+
+static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
+ struct auxtrace_buffer *buffer)
+{
+ int i, ret;
+ const char *color = PERF_COLOR_BLUE;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_decoder *decoder;
+ size_t buffer_used = 0;
+
+ fprintf(stdout, "\n");
+ color_fprintf(stdout, color,
+ ". ... CoreSight ETM Trace data: size %zu bytes\n",
+ buffer->size);
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_PRINT;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+
+ decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!decoder)
+ return;
+ do {
+ size_t consumed;
+
+ ret = cs_etm_decoder__process_data_block(
+ decoder, buffer->offset,
+ &((u8 *)buffer->data)[buffer_used],
+ buffer->size - buffer_used, &consumed);
+ if (ret)
+ break;
+
+ buffer_used += consumed;
+ } while (buffer_used < buffer->size);
+
+ cs_etm_decoder__free(decoder);
+}
+
+static int cs_etm__flush_events(struct perf_session *session,
+ struct perf_tool *tool)
+{
+ int ret;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events)
+ return -EINVAL;
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ ret = cs_etm__update_queues(etm);
+
+ if (ret < 0)
+ return ret;
+
+ return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+}
+
+static void cs_etm__free_queue(void *priv)
+{
+ struct cs_etm_queue *etmq = priv;
+
+ free(etmq);
+}
+
+static void cs_etm__free_events(struct perf_session *session)
+{
+ unsigned int i;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ struct auxtrace_queues *queues = &aux->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ cs_etm__free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+
+ auxtrace_queues__free(queues);
+}
+
+static void cs_etm__free(struct perf_session *session)
+{
+ int i;
+ struct int_node *inode, *tmp;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ cs_etm__free_events(session);
+ session->auxtrace = NULL;
+
+ /* First remove all traceID/CPU# nodes for the RB tree */
+ intlist__for_each_entry_safe(inode, tmp, traceid_list)
+ intlist__remove(traceid_list, inode);
+ /* Then the RB tree itself */
+ intlist__delete(traceid_list);
+
+ for (i = 0; i < aux->num_cpu; i++)
+ zfree(&aux->metadata[i]);
+
+ zfree(&aux->metadata);
+ zfree(&aux);
+}
+
+static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
+ size_t size, u8 *buffer)
+{
+ u8 cpumode;
+ u64 offset;
+ int len;
+ struct thread *thread;
+ struct machine *machine;
+ struct addr_location al;
+
+ if (!etmq)
+ return -1;
+
+ machine = etmq->etm->machine;
+ if (address >= etmq->etm->kernel_start)
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ else
+ cpumode = PERF_RECORD_MISC_USER;
+
+ thread = etmq->thread;
+ if (!thread) {
+ if (cpumode != PERF_RECORD_MISC_KERNEL)
+ return -EINVAL;
+ thread = etmq->etm->unknown_thread;
+ }
+
+ thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
+
+ if (!al.map || !al.map->dso)
+ return 0;
+
+ if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
+ dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
+ return 0;
+
+ offset = al.map->map_ip(al.map, address);
+
+ map__load(al.map);
+
+ len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
+
+ if (len <= 0)
+ return 0;
+
+ return len;
+}
+
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
+ unsigned int queue_nr)
+{
+ int i;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_queue *etmq;
+
+ etmq = zalloc(sizeof(*etmq));
+ if (!etmq)
+ return NULL;
+
+ etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
+ if (!etmq->event_buf)
+ goto out_free;
+
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->pid = -1;
+ etmq->tid = -1;
+ etmq->cpu = -1;
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+
+ if (!t_params)
+ goto out_free;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_DECODE;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+ d_params.data = etmq;
+
+ etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!etmq->decoder)
+ goto out_free;
+
+ /*
+ * Register a function to handle all memory accesses required by
+ * the trace decoder library.
+ */
+ if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
+ 0x0L, ((u64) -1L),
+ cs_etm__mem_access))
+ goto out_free_decoder;
+
+ etmq->offset = 0;
+
+ return etmq;
+
+out_free_decoder:
+ cs_etm_decoder__free(etmq->decoder);
+out_free:
+ zfree(&etmq->event_buf);
+ free(etmq);
+
+ return NULL;
+}
+
+static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue,
+ unsigned int queue_nr)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (list_empty(&queue->head) || etmq)
+ return 0;
+
+ etmq = cs_etm__alloc_queue(etm, queue_nr);
+
+ if (!etmq)
+ return -ENOMEM;
+
+ queue->priv = etmq;
+
+ if (queue->cpu != -1)
+ etmq->cpu = queue->cpu;
+
+ etmq->tid = queue->tid;
+
+ return 0;
+}
+
+static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < etm->queues.nr_queues; i++) {
+ ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
+{
+ if (etm->queues.new_data) {
+ etm->queues.new_data = false;
+ return cs_etm__setup_queues(etm);
+ }
+
+ return 0;
+}
+
+static int
+cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+{
+ struct auxtrace_buffer *aux_buffer = etmq->buffer;
+ struct auxtrace_buffer *old_buffer = aux_buffer;
+ struct auxtrace_queue *queue;
+
+ queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
+
+ aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
+
+ /* If no more data, drop the previous auxtrace_buffer and return */
+ if (!aux_buffer) {
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+ buff->len = 0;
+ return 0;
+ }
+
+ etmq->buffer = aux_buffer;
+
+ /* If the aux_buffer doesn't have data associated, try to load it */
+ if (!aux_buffer->data) {
+ /* get the file desc associated with the perf data file */
+ int fd = perf_data__fd(etmq->etm->session->data);
+
+ aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
+ if (!aux_buffer->data)
+ return -ENOMEM;
+ }
+
+ /* If valid, drop the previous buffer */
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+
+ buff->offset = aux_buffer->offset;
+ buff->len = aux_buffer->size;
+ buff->buf = aux_buffer->data;
+
+ buff->ref_timestamp = aux_buffer->reference;
+
+ return buff->len;
+}
+
+static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ /* CPU-wide tracing isn't supported yet */
+ if (queue->tid == -1)
+ return;
+
+ if ((!etmq->thread) && (etmq->tid != -1))
+ etmq->thread = machine__find_thread(etm->machine, -1,
+ etmq->tid);
+
+ if (etmq->thread) {
+ etmq->pid = etmq->thread->pid_;
+ if (queue->cpu == -1)
+ etmq->cpu = etmq->thread->cpu;
+ }
+}
+
+/*
+ * The cs etm packet encodes an instruction range between a branch target
+ * and the next taken branch. Generate sample accordingly.
+ */
+static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet)
+{
+ int ret = 0;
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct perf_sample sample = {.ip = 0,};
+ union perf_event *event = etmq->event_buf;
+ u64 start_addr = packet->start_addr;
+ u64 end_addr = packet->end_addr;
+
+ event->sample.header.type = PERF_RECORD_SAMPLE;
+ event->sample.header.misc = PERF_RECORD_MISC_USER;
+ event->sample.header.size = sizeof(struct perf_event_header);
+
+ sample.ip = start_addr;
+ sample.pid = etmq->pid;
+ sample.tid = etmq->tid;
+ sample.addr = end_addr;
+ sample.id = etmq->etm->branches_id;
+ sample.stream_id = etmq->etm->branches_id;
+ sample.period = 1;
+ sample.cpu = packet->cpu;
+ sample.flags = 0;
+ sample.cpumode = PERF_RECORD_MISC_USER;
+
+ ret = perf_session__deliver_synth_event(etm->session, event, &sample);
+
+ if (ret)
+ pr_err(
+ "CS ETM Trace: failed to deliver instruction event, error %d\n",
+ ret);
+
+ return ret;
+}
+
+struct cs_etm_synth {
+ struct perf_tool dummy_tool;
+ struct perf_session *session;
+};
+
+static int cs_etm__event_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct cs_etm_synth *cs_etm_synth =
+ container_of(tool, struct cs_etm_synth, dummy_tool);
+
+ return perf_session__deliver_synth_event(cs_etm_synth->session,
+ event, NULL);
+}
+
+static int cs_etm__synth_event(struct perf_session *session,
+ struct perf_event_attr *attr, u64 id)
+{
+ struct cs_etm_synth cs_etm_synth;
+
+ memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
+ cs_etm_synth.session = session;
+
+ return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
+ &id, cs_etm__event_synth);
+}
+
+static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
+ struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr;
+ bool found = false;
+ u64 id;
+ int err;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == etm->pmu_type) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("No selected events with CoreSight Trace data\n");
+ return 0;
+ }
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.size = sizeof(struct perf_event_attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+ PERF_SAMPLE_PERIOD;
+ if (etm->timeless_decoding)
+ attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+ else
+ attr.sample_type |= PERF_SAMPLE_TIME;
+
+ attr.exclude_user = evsel->attr.exclude_user;
+ attr.exclude_kernel = evsel->attr.exclude_kernel;
+ attr.exclude_hv = evsel->attr.exclude_hv;
+ attr.exclude_host = evsel->attr.exclude_host;
+ attr.exclude_guest = evsel->attr.exclude_guest;
+ attr.sample_id_all = evsel->attr.sample_id_all;
+ attr.read_format = evsel->attr.read_format;
+
+ /* create new id val to be a fixed offset from evsel id */
+ id = evsel->id[0] + 1000000000;
+
+ if (!id)
+ id = 1;
+
+ if (etm->synth_opts.branches) {
+ attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+ attr.sample_period = 1;
+ attr.sample_type |= PERF_SAMPLE_ADDR;
+ err = cs_etm__synth_event(session, &attr, id);
+ if (err)
+ return err;
+ etm->sample_branches = true;
+ etm->branches_sample_type = attr.sample_type;
+ etm->branches_id = id;
+ }
+
+ return 0;
+}
+
+static int cs_etm__sample(struct cs_etm_queue *etmq)
+{
+ int ret;
+ struct cs_etm_packet packet;
+
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder, &packet);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * If the packet contains an instruction range, generate an
+ * instruction sequence event.
+ */
+ if (packet.sample_type & CS_ETM_RANGE)
+ cs_etm__synth_branch_sample(etmq, &packet);
+ }
+
+ return 0;
+}
+
+static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct cs_etm_buffer buffer;
+ size_t buffer_used, processed;
+ int err = 0;
+
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
+ /* Go through each buffer in the queue and decode them one by one */
+more:
+ buffer_used = 0;
+ memset(&buffer, 0, sizeof(buffer));
+ err = cs_etm__get_trace(&buffer, etmq);
+ if (err <= 0)
+ return err;
+ /*
+ * We cannot assume consecutive blocks in the data file are contiguous,
+ * reset the decoder to force re-sync.
+ */
+ err = cs_etm_decoder__reset(etmq->decoder);
+ if (err != 0)
+ return err;
+
+ /* Run trace decoder until buffer consumed or end of trace */
+ do {
+ processed = 0;
+
+ err = cs_etm_decoder__process_data_block(
+ etmq->decoder,
+ etmq->offset,
+ &buffer.buf[buffer_used],
+ buffer.len - buffer_used,
+ &processed);
+
+ if (err)
+ return err;
+
+ etmq->offset += processed;
+ buffer_used += processed;
+
+ /*
+ * Nothing to do with an error condition, let's hope the next
+ * chunk will be better.
+ */
+ err = cs_etm__sample(etmq);
+ } while (buffer.len > buffer_used);
+
+goto more;
+
+ return err;
+}
+
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_)
+{
+ unsigned int i;
+ struct auxtrace_queues *queues = &etm->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ struct auxtrace_queue *queue = &etm->queues.queue_array[i];
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (etmq && ((tid == -1) || (etmq->tid == tid))) {
+ etmq->time = time_;
+ cs_etm__set_pid_tid_cpu(etm, queue);
+ cs_etm__run_decoder(etmq);
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool)
+{
+ int err = 0;
+ u64 timestamp;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events) {
+ pr_err("CoreSight ETM Trace requires ordered events\n");
+ return -EINVAL;
+ }
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ if (sample->time && (sample->time != (u64) -1))
+ timestamp = sample->time;
+ else
+ timestamp = 0;
+
+ if (timestamp || etm->timeless_decoding) {
+ err = cs_etm__update_queues(etm);
+ if (err)
+ return err;
+ }
+
+ if (event->header.type == PERF_RECORD_EXIT)
+ return cs_etm__process_timeless_queues(etm,
+ event->fork.tid,
+ sample->time);
+
+ return 0;
+}
+
+static int cs_etm__process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (!etm->data_queued) {
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ bool is_pipe = perf_data__is_pipe(session->data);
+ int err;
+
+ if (is_pipe)
+ data_offset = 0;
+ else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&etm->queues, session,
+ event, data_offset, &buffer);
+ if (err)
+ return err;
+
+ if (dump_trace)
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ cs_etm__dump_event(etm, buffer);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
+{
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = etm->session->evlist;
+ bool timeless_decoding = true;
+
+ /*
+ * Circle through the list of event and complain if we find one
+ * with the time bit set.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
+ timeless_decoding = false;
+ }
+
+ return timeless_decoding;
+}
+
+static const char * const cs_etm_global_header_fmts[] = {
+ [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
+ [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
+};
+
+static const char * const cs_etm_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_ETMCR] = " ETMCR %llx\n",
+ [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
+ [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
+ [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
+};
+
+static const char * const cs_etmv4_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
+ [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
+ [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
+ [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
+ [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
+ [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
+ [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
+};
+
+static void cs_etm__print_auxtrace_info(u64 *val, int num)
+{
+ int i, j, cpu = 0;
+
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
+ if (val[i] == __perf_cs_etmv3_magic)
+ for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ else if (val[i] == __perf_cs_etmv4_magic)
+ for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ else
+ /* failure.. return */
+ return;
+ }
+}
+
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ struct cs_etm_auxtrace *etm = NULL;
+ struct int_node *inode;
+ unsigned int pmu_type;
+ int event_header_size = sizeof(struct perf_event_header);
+ int info_header_size;
+ int total_size = auxtrace_info->header.size;
+ int priv_size = 0;
+ int num_cpu;
+ int err = 0, idx = -1;
+ int i, j, k;
+ u64 *ptr, *hdr = NULL;
+ u64 **metadata = NULL;
+
+ /*
+ * sizeof(auxtrace_info_event::type) +
+ * sizeof(auxtrace_info_event::reserved) == 8
+ */
+ info_header_size = 8;
+
+ if (total_size < (event_header_size + info_header_size))
+ return -EINVAL;
+
+ priv_size = total_size - event_header_size - info_header_size;
+
+ /* First the global part */
+ ptr = (u64 *) auxtrace_info->priv;
+
+ /* Look for version '0' of the header */
+ if (ptr[0] != 0)
+ return -EINVAL;
+
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ if (!hdr)
+ return -ENOMEM;
+
+ /* Extract header information - see cs-etm.h for format */
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ hdr[i] = ptr[i];
+ num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
+ pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
+ 0xffffffff);
+
+ /*
+ * Create an RB tree for traceID-CPU# tuple. Since the conversion has
+ * to be made for each packet that gets decoded, optimizing access in
+ * anything other than a sequential array is worth doing.
+ */
+ traceid_list = intlist__new(NULL);
+ if (!traceid_list) {
+ err = -ENOMEM;
+ goto err_free_hdr;
+ }
+
+ metadata = zalloc(sizeof(*metadata) * num_cpu);
+ if (!metadata) {
+ err = -ENOMEM;
+ goto err_free_traceid_list;
+ }
+
+ /*
+ * The metadata is stored in the auxtrace_info section and encodes
+ * the configuration of the ARM embedded trace macrocell which is
+ * required by the trace decoder to properly decode the trace due
+ * to its highly compressed nature.
+ */
+ for (j = 0; j < num_cpu; j++) {
+ if (ptr[i] == __perf_cs_etmv3_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETM_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETM_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETM_ETMTRACEIDR];
+ i += CS_ETM_PRIV_MAX;
+ } else if (ptr[i] == __perf_cs_etmv4_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETMV4_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
+ i += CS_ETMV4_PRIV_MAX;
+ }
+
+ /* Get an RB node for this CPU */
+ inode = intlist__findnew(traceid_list, idx);
+
+ /* Something went wrong, no need to continue */
+ if (!inode) {
+ err = PTR_ERR(inode);
+ goto err_free_metadata;
+ }
+
+ /*
+ * The node for that CPU should not be taken.
+ * Back out if that's the case.
+ */
+ if (inode->priv) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+ /* All good, associate the traceID with the CPU# */
+ inode->priv = &metadata[j][CS_ETM_CPU];
+ }
+
+ /*
+ * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * CS_ETMV4_PRIV_MAX mark how many double words are in the
+ * global metadata, and each cpu's metadata respectively.
+ * The following tests if the correct number of double words was
+ * present in the auxtrace info section.
+ */
+ if (i * 8 != priv_size) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+
+ etm = zalloc(sizeof(*etm));
+
+ if (!etm) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+
+ err = auxtrace_queues__init(&etm->queues);
+ if (err)
+ goto err_free_etm;
+
+ etm->session = session;
+ etm->machine = &session->machines.host;
+
+ etm->num_cpu = num_cpu;
+ etm->pmu_type = pmu_type;
+ etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
+ etm->metadata = metadata;
+ etm->auxtrace_type = auxtrace_info->type;
+ etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+
+ etm->auxtrace.process_event = cs_etm__process_event;
+ etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
+ etm->auxtrace.flush_events = cs_etm__flush_events;
+ etm->auxtrace.free_events = cs_etm__free_events;
+ etm->auxtrace.free = cs_etm__free;
+ session->auxtrace = &etm->auxtrace;
+
+ if (dump_trace) {
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
+ return 0;
+ }
+
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts);
+ etm->synth_opts.callchain = false;
+ }
+
+ err = cs_etm__synth_events(etm, session);
+ if (err)
+ goto err_free_queues;
+
+ err = auxtrace_queues__process_index(&etm->queues, session);
+ if (err)
+ goto err_free_queues;
+
+ etm->data_queued = etm->queues.populated;
+
+ return 0;
+
+err_free_queues:
+ auxtrace_queues__free(&etm->queues);
+ session->auxtrace = NULL;
+err_free_etm:
+ zfree(&etm);
+err_free_metadata:
+ /* No need to check @metadata[j], free(NULL) is supported */
+ for (j = 0; j < num_cpu; j++)
+ free(metadata[j]);
+ zfree(&metadata);
+err_free_traceid_list:
+ intlist__delete(traceid_list);
+err_free_hdr:
+ zfree(&hdr);
+
+ return -EINVAL;
+}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 3cc6bc3263fe..5864d5dca616 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -18,6 +18,9 @@
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
#define INCLUDE__UTIL_PERF_CS_ETM_H__
+#include "util/event.h"
+#include "util/session.h"
+
/* Versionning header in case things need tro change in the future. That way
* decoding of old snapshot is still possible.
*/
@@ -61,6 +64,9 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* RB tree for quick conversion between traceID and CPUs */
+struct intlist *traceid_list;
+
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -71,4 +77,16 @@ static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
+#ifdef HAVE_CSTRACE_SUPPORT
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+#else
+static inline int
+cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ return -1;
+}
+#endif
+
#endif
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 48094fde0a68..d8cfc19ddb10 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -12,16 +12,6 @@
#include "util.h"
#include "debug.h"
-#ifndef O_CLOEXEC
-#ifdef __sparc__
-#define O_CLOEXEC 0x400000
-#elif defined(__alpha__) || defined(__hppa__)
-#define O_CLOEXEC 010000000
-#else
-#define O_CLOEXEC 02000000
-#endif
-#endif
-
static bool check_pipe(struct perf_data *data)
{
struct stat st;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d5b6f7f5baff..36ef45b2e89d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -446,7 +446,7 @@ static int do_open(char *name)
char sbuf[STRERR_BUFSIZE];
do {
- fd = open(name, O_RDONLY);
+ fd = open(name, O_RDONLY|O_CLOEXEC);
if (fd >= 0)
return fd;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 6276b340f893..6d311868d850 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include "cpumap.h"
#include "env.h"
+#include "sane_ctype.h"
#include "util.h"
#include <errno.h>
+#include <sys/utsname.h>
struct perf_env perf_env;
@@ -93,3 +95,48 @@ void cpu_cache_level__free(struct cpu_cache_level *cache)
free(cache->map);
free(cache->size);
}
+
+/*
+ * Return architecture name in a normalized form.
+ * The conversion logic comes from the Makefile.
+ */
+static const char *normalize_arch(char *arch)
+{
+ if (!strcmp(arch, "x86_64"))
+ return "x86";
+ if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
+ return "x86";
+ if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
+ return "sparc";
+ if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
+ return "arm64";
+ if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
+ return "arm";
+ if (!strncmp(arch, "s390", 4))
+ return "s390";
+ if (!strncmp(arch, "parisc", 6))
+ return "parisc";
+ if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
+ return "powerpc";
+ if (!strncmp(arch, "mips", 4))
+ return "mips";
+ if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
+ return "sh";
+
+ return arch;
+}
+
+const char *perf_env__arch(struct perf_env *env)
+{
+ struct utsname uts;
+ char *arch_name;
+
+ if (!env) { /* Assume local operation */
+ if (uname(&uts) < 0)
+ return NULL;
+ arch_name = uts.machine;
+ } else
+ arch_name = env->arch;
+
+ return normalize_arch(arch_name);
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 1eb35b190b34..bf970f57dce0 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -65,4 +65,6 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
+
+const char *perf_env__arch(struct perf_env *env);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 97a8ef9980db..44e603c27944 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1435,6 +1435,11 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
event->context_switch.next_prev_tid);
}
+static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1467,6 +1472,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_SWITCH_CPU_WIDE:
ret += perf_event__fprintf_switch(event, fp);
break;
+ case PERF_RECORD_LOST:
+ ret += perf_event__fprintf_lost(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1ae95efbfb95..0f794744919c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -205,6 +205,7 @@ struct perf_sample {
u32 flags;
u16 insn_len;
u8 cpumode;
+ u16 misc;
char insn[MAX_INSN];
void *raw_data;
struct ip_callchain *callchain;
@@ -774,8 +775,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped);
+ const struct perf_sample *sample);
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b62e523a7035..ac35cd214feb 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,7 @@
#include "parse-events.h"
#include <subcmd/parse-options.h>
+#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -125,7 +126,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
fdarray__exit(&evlist->pollfd);
}
@@ -675,11 +676,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
{
int i;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
- int fd = evlist->backward_mmap[i].fd;
+ int fd = evlist->overwrite_mmap[i].fd;
int err;
if (fd < 0)
@@ -711,7 +712,7 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
* No need for read-write ring buffer: kernel stop outputting when
* it hit md->prev (perf_mmap__consume()).
*/
- return perf_mmap__read_forward(md, evlist->overwrite);
+ return perf_mmap__read_forward(md);
}
union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
@@ -738,7 +739,7 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
- perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
+ perf_mmap__consume(&evlist->mmap[idx], false);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
@@ -749,16 +750,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
for (i = 0; i < evlist->nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
- if (evlist->backward_mmap)
+ if (evlist->overwrite_mmap)
for (i = 0; i < evlist->nr_mmaps; i++)
- perf_mmap__munmap(&evlist->backward_mmap[i]);
+ perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
{
perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
}
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
@@ -800,7 +801,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx,
- int thread, int *_output, int *_output_backward)
+ int thread, int *_output, int *_output_overwrite)
{
struct perf_evsel *evsel;
int revent;
@@ -812,18 +813,20 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
int fd;
int cpu;
+ mp->prot = PROT_READ | PROT_WRITE;
if (evsel->attr.write_backward) {
- output = _output_backward;
- maps = evlist->backward_mmap;
+ output = _output_overwrite;
+ maps = evlist->overwrite_mmap;
if (!maps) {
maps = perf_evlist__alloc_mmap(evlist);
if (!maps)
return -1;
- evlist->backward_mmap = maps;
+ evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
+ mp->prot &= ~PROT_WRITE;
}
if (evsel->system_wide && thread)
@@ -884,14 +887,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
true);
for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
- thread, &output, &output_backward))
+ thread, &output, &output_overwrite))
goto out_unmap;
}
}
@@ -912,13 +915,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
- &output, &output_backward))
+ &output, &output_overwrite))
goto out_unmap;
}
@@ -1052,15 +1055,18 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- struct mmap_params mp = {
- .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
- };
+ /*
+ * Delay setting mp.prot: set it before calling perf_mmap__mmap.
+ * Its value is decided by evsel's write_backward.
+ * So &mp should not be passed through const pointer.
+ */
+ struct mmap_params mp;
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist);
@@ -1070,7 +1076,6 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- evlist->overwrite = overwrite;
evlist->mmap_len = perf_evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->mmap_len);
mp.mask = evlist->mmap_len - page_size - 1;
@@ -1091,10 +1096,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, &mp);
}
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1102,7 +1106,8 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
struct cpu_map *cpus;
struct thread_map *threads;
- threads = thread_map__new_str(target->pid, target->tid, target->uid);
+ threads = thread_map__new_str(target->pid, target->tid, target->uid,
+ target->per_thread);
if (!threads)
return -1;
@@ -1582,6 +1587,17 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
return perf_evsel__parse_sample(evsel, event, sample);
}
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
+ return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
+}
+
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
struct perf_evsel *evsel;
@@ -1739,13 +1755,13 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
RESUME,
} action = NONE;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return;
switch (old_state) {
case BKW_MMAP_NOTREADY: {
if (state != BKW_MMAP_RUNNING)
- goto state_err;;
+ goto state_err;
break;
}
case BKW_MMAP_RUNNING: {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 491f69542920..75f8e0ad5d76 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,7 +7,6 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
-#include <fcntl.h>
#include <stdio.h>
#include "../perf.h"
#include "event.h"
@@ -31,7 +30,6 @@ struct perf_evlist {
int nr_entries;
int nr_groups;
int nr_mmaps;
- bool overwrite;
bool enabled;
bool has_user_cpus;
size_t mmap_len;
@@ -45,12 +43,14 @@ struct perf_evlist {
} workload;
struct fdarray pollfd;
struct perf_mmap *mmap;
- struct perf_mmap *backward_mmap;
+ struct perf_mmap *overwrite_mmap;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ u64 first_sample_time;
+ u64 last_sample_time;
};
struct perf_evsel_str_handler {
@@ -169,10 +169,9 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite);
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite);
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
size_t perf_evlist__mmap_size(unsigned long pages);
@@ -205,6 +204,10 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample);
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp);
+
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d5fbcf8c7aa7..66fa45198a11 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -36,6 +36,7 @@
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
+#include "memswap.h"
#include "util/parse-branch-options.h"
#include "sane_ctype.h"
@@ -650,9 +651,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
return ret;
}
-void perf_evsel__config_callchain(struct perf_evsel *evsel,
- struct record_opts *opts,
- struct callchain_param *param)
+static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
@@ -698,6 +699,14 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
}
}
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
+{
+ if (param->enabled)
+ return __perf_evsel__config_callchain(evsel, opts, param);
+}
+
static void
perf_evsel__reset_callgraph(struct perf_evsel *evsel,
struct callchain_param *param)
@@ -717,19 +726,19 @@ perf_evsel__reset_callgraph(struct perf_evsel *evsel,
}
static void apply_config_terms(struct perf_evsel *evsel,
- struct record_opts *opts)
+ struct record_opts *opts, bool track)
{
struct perf_evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
struct perf_event_attr *attr = &evsel->attr;
- struct callchain_param param;
+ /* callgraph default */
+ struct callchain_param param = {
+ .record_mode = callchain_param.record_mode,
+ };
u32 dump_size = 0;
int max_stack = 0;
const char *callgraph_buf = NULL;
- /* callgraph default */
- param.record_mode = callchain_param.record_mode;
-
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
case PERF_EVSEL__CONFIG_TERM_PERIOD:
@@ -779,6 +788,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
attr->write_backward = term->val.overwrite ? 1 : 0;
break;
+ case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
+ break;
default:
break;
}
@@ -786,6 +797,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
/* User explicitly set per-event callgraph, clear the old setting and reset. */
if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
+ bool sample_address = false;
+
if (max_stack) {
param.max_stack = max_stack;
if (callgraph_buf == NULL)
@@ -805,6 +818,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
evsel->name);
return;
}
+ if (param.record_mode == CALLCHAIN_DWARF)
+ sample_address = true;
}
}
if (dump_size > 0) {
@@ -817,8 +832,14 @@ static void apply_config_terms(struct perf_evsel *evsel,
perf_evsel__reset_callgraph(evsel, &callchain_param);
/* set perf-event callgraph */
- if (param.enabled)
+ if (param.enabled) {
+ if (sample_address) {
+ perf_evsel__set_sample_bit(evsel, ADDR);
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
+ evsel->attr.mmap_data = track;
+ }
perf_evsel__config_callchain(evsel, opts, &param);
+ }
}
}
@@ -1049,7 +1070,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
* Apply event specific term settings,
* it overloads any global configuration.
*/
- apply_config_terms(evsel, opts);
+ apply_config_terms(evsel, opts, track);
evsel->ignore_missing_thread = opts->ignore_missing_thread;
}
@@ -1574,6 +1595,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
+ PRINT_ATTRf(namespaces, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1596,10 +1618,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
return fprintf(fp, " %-32s %s\n", name, val);
}
+static void perf_evsel__remove_fd(struct perf_evsel *pos,
+ int nr_cpus, int nr_threads,
+ int thread_idx)
+{
+ for (int cpu = 0; cpu < nr_cpus; cpu++)
+ for (int thread = thread_idx; thread < nr_threads - 1; thread++)
+ FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
+}
+
+static int update_fds(struct perf_evsel *evsel,
+ int nr_cpus, int cpu_idx,
+ int nr_threads, int thread_idx)
+{
+ struct perf_evsel *pos;
+
+ if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
+ return -EINVAL;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
+
+ perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
+
+ /*
+ * Since fds for next evsel has not been created,
+ * there is no need to iterate whole event list.
+ */
+ if (pos == evsel)
+ break;
+ }
+ return 0;
+}
+
static bool ignore_missing_thread(struct perf_evsel *evsel,
+ int nr_cpus, int cpu,
struct thread_map *threads,
int thread, int err)
{
+ pid_t ignore_pid = thread_map__pid(threads, thread);
+
if (!evsel->ignore_missing_thread)
return false;
@@ -1615,11 +1673,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
if (threads->nr == 1)
return false;
+ /*
+ * We should remove fd for missing_thread first
+ * because thread_map__remove() will decrease threads->nr.
+ */
+ if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
+ return false;
+
if (thread_map__remove(threads, thread))
return false;
pr_warning("WARNING: Ignored open failure for pid %d\n",
- thread_map__pid(threads, thread));
+ ignore_pid);
return true;
}
@@ -1724,7 +1789,7 @@ retry_open:
if (fd < 0) {
err = -errno;
- if (ignore_missing_thread(evsel, threads, thread, err)) {
+ if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
/*
* We just removed 1 thread, so take a step
* back on thread index and lower the upper
@@ -1960,6 +2025,20 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset,
#define OVERFLOW_CHECK_u64(offset) \
OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
+static int
+perf_event__check_size(union perf_event *event, unsigned int sample_size)
+{
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
+ if (sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ return 0;
+}
+
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -1981,6 +2060,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ data->misc = event->header.misc;
+ data->id = -1ULL;
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->attr.sample_id_all)
@@ -1990,15 +2072,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = event->sample.array;
- /*
- * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
- * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
- * check the format does not go past the end of the event.
- */
- if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ if (perf_event__check_size(event, evsel->sample_size))
return -EFAULT;
- data->id = -1ULL;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
array++;
@@ -2028,7 +2104,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->addr = 0;
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
@@ -2120,14 +2195,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_RAW) {
OVERFLOW_CHECK_u64(array);
u.val64 = *array;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /* undo swap of u64, then swap on individual u32s */
+
+ /*
+ * Undo swap of u64, then swap on individual u32s,
+ * get the size of the raw area and undo all of the
+ * swap. The pevent interface handles endianity by
+ * itself.
+ */
+ if (swapped) {
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
data->raw_size = u.val32[0];
+
+ /*
+ * The raw data is aligned on 64bits including the
+ * u32 size, so it's safe to use mem_bswap_64.
+ */
+ if (swapped)
+ mem_bswap_64((void *) array, data->raw_size);
+
array = (void *)array + sizeof(u32);
OVERFLOW_CHECK(array, data->raw_size, max_size);
@@ -2192,14 +2280,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->data_src = PERF_MEM_DATA_SRC_NONE;
if (type & PERF_SAMPLE_DATA_SRC) {
OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
- data->transaction = 0;
if (type & PERF_SAMPLE_TRANSACTION) {
OVERFLOW_CHECK_u64(array);
data->transaction = *array;
@@ -2232,6 +2318,50 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
return 0;
}
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ u64 type = evsel->attr.sample_type;
+ const u64 *array;
+
+ if (!(type & PERF_SAMPLE_TIME))
+ return -1;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ struct perf_sample data = {
+ .time = -1ULL,
+ };
+
+ if (!evsel->attr.sample_id_all)
+ return -1;
+ if (perf_evsel__parse_id_sample(evsel, event, &data))
+ return -1;
+
+ *timestamp = data.time;
+ return 0;
+ }
+
+ array = event->sample.array;
+
+ if (perf_event__check_size(event, evsel->sample_size))
+ return -EFAULT;
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ array++;
+
+ if (type & PERF_SAMPLE_IP)
+ array++;
+
+ if (type & PERF_SAMPLE_TID)
+ array++;
+
+ if (type & PERF_SAMPLE_TIME)
+ *timestamp = *array;
+
+ return 0;
+}
+
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format)
{
@@ -2342,8 +2472,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped)
+ const struct perf_sample *sample)
{
u64 *array;
size_t sz;
@@ -2368,15 +2497,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_TID) {
u.val32[0] = sample->pid;
u.val32[1] = sample->tid;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
-
*array = u.val64;
array++;
}
@@ -2403,13 +2523,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_CPU) {
u.val32[0] = sample->cpu;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val64 = bswap_64(u.val64);
- }
+ u.val32[1] = 0;
*array = u.val64;
array++;
}
@@ -2456,15 +2570,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_RAW) {
u.val32[0] = sample->raw_size;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
*array = u.val64;
array = (void *)array + sizeof(u32);
@@ -2743,8 +2848,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
break;
case EOPNOTSUPP:
if (evsel->attr.sample_period != 0)
- return scnprintf(msg, size, "%s",
- "PMU Hardware doesn't support sampling/overflow-interrupts.");
+ return scnprintf(msg, size,
+ "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
+ perf_evsel__name(evsel));
if (evsel->attr.precise_ip)
return scnprintf(msg, size, "%s",
"\'precise\' request may not be supported. Try removing 'p' modifier.");
@@ -2781,16 +2887,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
perf_evsel__name(evsel));
}
-char *perf_evsel__env_arch(struct perf_evsel *evsel)
-{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->arch;
- return NULL;
-}
-
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel)
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->cpuid;
+ if (evsel && evsel->evlist)
+ return evsel->evlist->env;
return NULL;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 157f49e8a772..846e41644525 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -38,7 +38,7 @@ struct cgroup_sel;
* It is allocated within event parsing and attached to
* perf_evsel::config_terms list head.
*/
-enum {
+enum term_type {
PERF_EVSEL__CONFIG_TERM_PERIOD,
PERF_EVSEL__CONFIG_TERM_FREQ,
PERF_EVSEL__CONFIG_TERM_TIME,
@@ -49,12 +49,11 @@ enum {
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_MAX,
};
struct perf_evsel_config_term {
struct list_head list;
- int type;
+ enum term_type type;
union {
u64 period;
u64 freq;
@@ -339,6 +338,10 @@ static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample);
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp);
+
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
return list_entry(evsel->node.next, struct perf_evsel, node);
@@ -443,7 +446,6 @@ typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv);
-char *perf_evsel__env_arch(struct perf_evsel *evsel);
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel);
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 9bbcec4e3365..ff17920a5ebc 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -38,7 +38,7 @@ do
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
-echo "#ifdef HAVE_LIBAUDIT_SUPPORT"
+echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort |
while read cmd
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7c0e9d587bfa..a326e0d8b5b6 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -15,9 +15,8 @@
#include <linux/bitops.h>
#include <linux/stringify.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/utsname.h>
-#include <unistd.h>
+#include <linux/time64.h>
#include "evlist.h"
#include "evsel.h"
@@ -37,6 +36,7 @@
#include <api/fs/fs.h>
#include "asm/bug.h"
#include "tool.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -1182,6 +1182,20 @@ static int write_stat(struct feat_fd *ff __maybe_unused,
return 0;
}
+static int write_sample_time(struct feat_fd *ff,
+ struct perf_evlist *evlist)
+{
+ int ret;
+
+ ret = do_write(ff, &evlist->first_sample_time,
+ sizeof(evlist->first_sample_time));
+ if (ret < 0)
+ return ret;
+
+ return do_write(ff, &evlist->last_sample_time,
+ sizeof(evlist->last_sample_time));
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1507,6 +1521,28 @@ static void print_group_desc(struct feat_fd *ff, FILE *fp)
}
}
+static void print_sample_time(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ char time_buf[32];
+ double d;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ timestamp__scnprintf_usec(session->evlist->first_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of first sample : %s\n", time_buf);
+
+ timestamp__scnprintf_usec(session->evlist->last_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of last sample : %s\n", time_buf);
+
+ d = (double)(session->evlist->last_sample_time -
+ session->evlist->first_sample_time) / NSEC_PER_MSEC;
+
+ fprintf(fp, "# sample duration : %10.3f ms\n", d);
+}
+
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
@@ -2148,6 +2184,27 @@ out_free_caches:
return -1;
}
+static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_session *session;
+ u64 first_sample_time, last_sample_time;
+ int ret;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ ret = do_read_u64(ff, &first_sample_time);
+ if (ret)
+ return -1;
+
+ ret = do_read_u64(ff, &last_sample_time);
+ if (ret)
+ return -1;
+
+ session->evlist->first_sample_time = first_sample_time;
+ session->evlist->last_sample_time = last_sample_time;
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2205,6 +2262,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
+ FEAT_OPR(SAMPLE_TIME, sample_time, false),
};
struct header_print_data {
@@ -3258,6 +3316,74 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
return err;
}
+static bool has_unit(struct perf_evsel *counter)
+{
+ return counter->unit && *counter->unit;
+}
+
+static bool has_scale(struct perf_evsel *counter)
+{
+ return counter->scale != 1;
+}
+
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe)
+{
+ struct perf_evsel *counter;
+ int err;
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each_entry(evsel_list, counter) {
+ if (!counter->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(counter)) {
+ err = perf_event__synthesize_event_update_unit(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(counter)) {
+ err = perf_event__synthesize_event_update_scale(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel counter.\n");
+ return err;
+ }
+ }
+
+ if (counter->own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel cpus.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_evlist **pevlist)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 29ccbfdf8724..f28aaaa3a440 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "event.h"
#include "env.h"
+#include "pmu.h"
enum {
HEADER_RESERVED = 0, /* always cleared */
@@ -34,6 +35,7 @@ enum {
HEADER_AUXTRACE,
HEADER_STAT,
HEADER_CACHE,
+ HEADER_SAMPLE_TIME,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -107,6 +109,11 @@ int perf_event__synthesize_features(struct perf_tool *tool,
struct perf_evlist *evlist,
perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe);
+
int perf_event__process_feature(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
@@ -166,5 +173,5 @@ int write_padded(struct feat_fd *fd, const void *bf,
*/
int get_cpuid(char *buffer, size_t sz);
-char *get_cpuid_str(void);
+char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused);
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 5325e65f9711..72db2744876d 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -67,7 +67,6 @@ struct intel_bts {
u64 branches_sample_type;
u64 branches_id;
size_t branches_event_size;
- bool synth_needs_swap;
unsigned long num_events;
};
@@ -303,8 +302,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
event.sample.header.size = bts->branches_event_size;
ret = perf_event__synthesize_sample(&event,
bts->branches_sample_type,
- 0, &sample,
- bts->synth_needs_swap);
+ 0, &sample);
if (ret)
return ret;
}
@@ -841,8 +839,6 @@ static int intel_bts_synth_events(struct intel_bts *bts,
__perf_evsel__sample_size(attr.sample_type);
}
- bts->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 10e0814bb8d2..1b704fbea9de 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -11,15 +11,21 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
@(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
- diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true
+ test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/insn.c' differs from latest version at 'arch/x86/lib/insn.c'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/inat.c' differs from latest version at 'arch/x86/lib/inat.c'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder map file at 'tools/perf/util/intel-pt-decoder/x86-opcode-map.txt' differs from latest version at 'arch/x86/lib/x86-opcode-map.txt'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder script at 'tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk' differs from latest version at 'arch/x86/tools/gen-insn-attr-x86.awk'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/insn.h' differs from latest version at 'arch/x86/include/asm/insn.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat.h' differs from latest version at 'arch/x86/include/asm/inat.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat_types.h' differs from latest version at 'arch/x86/include/asm/inat_types.h'" >&2)))) || true
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 23f9ba676df0..3773d9c54f45 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -104,8 +104,6 @@ struct intel_pt {
u64 pwrx_id;
u64 cbr_id;
- bool synth_needs_swap;
-
u64 tsc_bit;
u64 mtc_bit;
u64 mtc_freq_bits;
@@ -1101,11 +1099,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
}
static int intel_pt_inject_event(union perf_event *event,
- struct perf_sample *sample, u64 type,
- bool swapped)
+ struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
- return perf_event__synthesize_sample(event, type, 0, sample, swapped);
+ return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int intel_pt_opt_inject(struct intel_pt *pt,
@@ -1115,7 +1112,7 @@ static inline int intel_pt_opt_inject(struct intel_pt *pt,
if (!pt->synth_opts.inject)
return 0;
- return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
+ return intel_pt_inject_event(event, sample, type);
}
static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
@@ -2329,8 +2326,6 @@ static int intel_pt_synth_events(struct intel_pt *pt,
id += 1;
}
- pt->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 270f3223c6df..b05a67464c03 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1726,7 +1726,7 @@ static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
- sym, show_sym, show_addr);
+ sym, show_sym, show_addr, ip);
srcline__tree_insert(&map->dso->srclines, ip, srcline);
}
@@ -2204,7 +2204,7 @@ int thread__resolve_callchain(struct thread *thread,
{
int ret = 0;
- callchain_cursor_reset(&callchain_cursor);
+ callchain_cursor_reset(cursor);
if (callchain_param.order == ORDER_CALLEE) {
ret = thread__resolve_callchain_sample(thread, cursor,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6d40efd74402..8fe57031e1a8 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -419,7 +419,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
if (map && map->dso) {
srcline = get_srcline(map->dso,
map__rip_2objdump(map, addr), NULL,
- true, true);
+ true, true, addr);
if (srcline != SRCLINE_UNKNOWN)
ret = fprintf(fp, "%s%s", prefix, srcline);
free_srcline(srcline);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 0ddd9c199227..1ddc3d1d0147 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -20,12 +20,10 @@
#include "pmu.h"
#include "expr.h"
#include "rblist.h"
-#include "pmu.h"
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include "pmu-events/pmu-events.h"
-#include "strbuf.h"
#include "strlist.h"
#include <assert.h>
#include <ctype.h>
@@ -38,6 +36,10 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct metric_event me = {
.evsel = evsel
};
+
+ if (!metric_events)
+ return NULL;
+
nd = rblist__find(metric_events, &me);
if (nd)
return container_of(nd, struct metric_event, nd);
@@ -270,7 +272,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int i;
struct rblist groups;
@@ -368,7 +370,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
static int metricgroup__add_metric(const char *metric, struct strbuf *events,
struct list_head *group_list)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int ret = -EINVAL;
int i, j;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 9fe5f9c7d577..05076e683938 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -21,33 +21,13 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
+static union perf_event *perf_mmap__read(struct perf_mmap *map,
u64 start, u64 end, u64 *prev)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
- if (check_messup) {
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the 'end', we got messed up.
- *
- * In either case, truncate and restart at 'end'.
- */
- if (diff > map->mask / 2 || diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
- /*
- * 'end' points to a known good entry, start there.
- */
- start = end;
- diff = 0;
- }
- }
-
if (diff >= (int)sizeof(event->header)) {
size_t size;
@@ -89,7 +69,7 @@ broken_event:
return event;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{
u64 head;
u64 old = map->prev;
@@ -102,7 +82,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_mess
head = perf_mmap__read_head(map);
- return perf_mmap__read(map, check_messup, old, head, &map->prev);
+ return perf_mmap__read(map, old, head, &map->prev);
}
union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
@@ -138,7 +118,7 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
else
end = head + map->mask + 1;
- return perf_mmap__read(map, false, start, end, &map->prev);
+ return perf_mmap__read(map, start, end, &map->prev);
}
void perf_mmap__read_catchup(struct perf_mmap *map)
@@ -254,18 +234,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
return 0;
}
-static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
- pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+ pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
- pr_debug("Finished reading backward ring buffer: rewind\n");
+ pr_debug("Finished reading overwrite ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
@@ -275,7 +255,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
- pr_debug("Finished reading backward ring buffer: get start\n");
+ pr_debug("Finished reading overwrite ring buffer: get start\n");
*end = evt_head;
return 0;
}
@@ -287,19 +267,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
return -1;
}
-static int rb_find_range(void *data, int mask, u64 head, u64 old,
- u64 *start, u64 *end, bool backward)
-{
- if (!backward) {
- *start = old;
- *end = head;
- return 0;
- }
-
- return backward_rb_find_range(data, mask, head, start, end);
-}
-
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool overwrite,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
@@ -310,19 +278,28 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *buf;
int rc = 0;
- if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
- return -1;
+ start = overwrite ? head : old;
+ end = overwrite ? old : head;
if (start == end)
return 0;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+ if (!overwrite) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
- return 0;
+ md->prev = head;
+ perf_mmap__consume(md, overwrite);
+ return 0;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
+ return -1;
}
if ((start & md->mask) + size != (end & md->mask)) {
@@ -346,7 +323,7 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
}
md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
+ perf_mmap__consume(md, overwrite);
out:
return rc;
}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 3a5cb5a6e94a..e43d7b55a55f 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -86,10 +86,10 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
pc->data_tail = tail;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool backward,
void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 8e09fd2d842f..bad9e0296e9a 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -157,9 +157,8 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
}
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- u64 timestamp = sample->time;
struct ordered_event *oevent;
if (!timestamp || timestamp == ~0ULL)
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 96e5292d88e2..8c7a2948593e 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -45,7 +45,7 @@ struct ordered_events {
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 170316795a18..34589c427e52 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -4,6 +4,9 @@
#include <dirent.h>
#include <errno.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <sys/param.h>
#include "term.h"
#include "../perf.h"
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 933f5c6bffb4..ca56ba2dd3da 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <dirent.h>
#include <unistd.h>
static char bad_path[] = "/bad-path/";
@@ -77,3 +78,16 @@ bool is_regular_file(const char *file)
return S_ISREG(st.st_mode);
}
+
+/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
+bool is_directory(const char *base_path, const struct dirent *dent)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ sprintf(path, "%s/%s", base_path, dent->d_name);
+ if (stat(path, &st))
+ return false;
+
+ return S_ISDIR(st.st_mode);
+}
diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
index 14a254ada7eb..f014f905df50 100644
--- a/tools/perf/util/path.h
+++ b/tools/perf/util/path.h
@@ -2,9 +2,12 @@
#ifndef _PERF_PATH_H
#define _PERF_PATH_H
+struct dirent;
+
int path__join(char *bf, size_t size, const char *path1, const char *path2);
int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3);
bool is_regular_file(const char *file);
+bool is_directory(const char *base_path, const struct dirent *dent);
#endif /* _PERF_PATH_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 80fb1593913a..57e38fdf0b34 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -12,6 +12,7 @@
#include <dirent.h>
#include <api/fs/fs.h>
#include <locale.h>
+#include <regex.h>
#include "util.h"
#include "pmu.h"
#include "parse-events.h"
@@ -537,17 +538,45 @@ static bool pmu_is_uncore(const char *name)
}
/*
+ * PMU CORE devices have different name other than cpu in sysfs on some
+ * platforms. looking for possible sysfs files to identify as core device.
+ */
+static int is_pmu_core(const char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return 0;
+
+ /* Look for cpu sysfs (x86 and others) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
+ if ((stat(path, &st) == 0) &&
+ (strncmp(name, "cpu", strlen("cpu")) == 0))
+ return 1;
+
+ /* Look for cpu sysfs (specific to arm) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
+ sysfs, name);
+ if (stat(path, &st) == 0)
+ return 1;
+
+ return 0;
+}
+
+/*
* Return the CPU id as a raw string.
*
* Each architecture should provide a more precise id string that
* can be use to match the architecture's "mapfile".
*/
-char * __weak get_cpuid_str(void)
+char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return NULL;
}
-static char *perf_pmu__getcpuid(void)
+static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
static bool printed;
@@ -556,7 +585,7 @@ static char *perf_pmu__getcpuid(void)
if (cpuid)
cpuid = strdup(cpuid);
if (!cpuid)
- cpuid = get_cpuid_str();
+ cpuid = get_cpuid_str(pmu);
if (!cpuid)
return NULL;
@@ -567,22 +596,45 @@ static char *perf_pmu__getcpuid(void)
return cpuid;
}
-struct pmu_events_map *perf_pmu__find_map(void)
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
{
struct pmu_events_map *map;
- char *cpuid = perf_pmu__getcpuid();
+ char *cpuid = perf_pmu__getcpuid(pmu);
int i;
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
i = 0;
for (;;) {
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
map = &pmu_events_map[i++];
if (!map->table) {
map = NULL;
break;
}
- if (!strcmp(map->cpuid, cpuid))
+ if (regcomp(&re, map->cpuid, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", map->cpuid);
break;
+ }
+
+ match = !regexec(&re, cpuid, 1, pmatch, 0);
+ regfree(&re);
+ if (match) {
+ size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+
+ /* Verify the entire string matched. */
+ if (match_len == strlen(cpuid))
+ break;
+ }
}
free(cpuid);
return map;
@@ -593,13 +645,14 @@ struct pmu_events_map *perf_pmu__find_map(void)
* to the current running CPU. Then, add all PMU events from that table
* as aliases.
*/
-static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
+static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
int i;
struct pmu_events_map *map;
struct pmu_event *pe;
+ const char *name = pmu->name;
- map = perf_pmu__find_map();
+ map = perf_pmu__find_map(pmu);
if (!map)
return;
@@ -608,7 +661,6 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
*/
i = 0;
while (1) {
- const char *pname;
pe = &map->table[i++];
if (!pe->name) {
@@ -617,9 +669,13 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
break;
}
- pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
- continue;
+ if (!is_pmu_core(name)) {
+ /* check for uncore devices */
+ if (pe->pmu == NULL)
+ continue;
+ if (strncmp(pe->pmu, name, strlen(pe->pmu)))
+ continue;
+ }
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
@@ -661,21 +717,20 @@ static struct perf_pmu *pmu_lookup(const char *name)
if (pmu_aliases(name, &aliases))
return NULL;
- pmu_add_cpu_aliases(&aliases, name);
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
pmu->cpus = pmu_cpumask(name);
-
+ pmu->name = strdup(name);
+ pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
list_splice(&aliases, &pmu->aliases);
- pmu->name = strdup(name);
- pmu->type = type;
list_add_tail(&pmu->list, &pmus);
pmu->default_config = perf_pmu__get_default_config(pmu);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 27c75e635866..76fecec7b3f9 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,6 +92,6 @@ int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
-struct pmu_events_map *perf_pmu__find_map(void);
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b7aaf9b2294d..e1dbc9821617 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1325,27 +1325,30 @@ static int parse_perf_probe_event_name(char **arg, struct perf_probe_event *pev)
{
char *ptr;
- ptr = strchr(*arg, ':');
+ ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
- pev->group = strdup(*arg);
+ pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
- if (!pev->sdt && !is_c_func_name(*arg)) {
+
+ pev->event = strdup_esc(*arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+
+ if (!pev->sdt && !is_c_func_name(pev->event)) {
+ zfree(&pev->event);
ng_name:
+ zfree(&pev->group);
semantic_error("%s is bad for event name -it must "
"follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
- pev->event = strdup(*arg);
- if (pev->event == NULL)
- return -ENOMEM;
-
return 0;
}
@@ -1373,7 +1376,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg++;
}
- ptr = strpbrk(arg, ";=@+%");
+ ptr = strpbrk_esc(arg, ";=@+%");
if (pev->sdt) {
if (ptr) {
if (*ptr != '@') {
@@ -1387,7 +1390,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
pev->target = build_id_cache__origname(tmp);
free(tmp);
} else
- pev->target = strdup(ptr + 1);
+ pev->target = strdup_esc(ptr + 1);
if (!pev->target)
return -ENOMEM;
*ptr = '\0';
@@ -1421,13 +1424,14 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
*
* Otherwise, we consider arg to be a function specification.
*/
- if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) {
+ if (!strpbrk_esc(arg, "+@%")) {
+ ptr = strpbrk_esc(arg, ";:");
/* This is a file spec if it includes a '.' before ; or : */
- if (memchr(arg, '.', ptr - arg))
+ if (ptr && memchr(arg, '.', ptr - arg))
file_spec = true;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1436,7 +1440,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
if (arg[0] == '\0')
tmp = NULL;
else {
- tmp = strdup(arg);
+ tmp = strdup_esc(arg);
if (tmp == NULL)
return -ENOMEM;
}
@@ -1469,12 +1473,12 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg = ptr;
c = nc;
if (c == ';') { /* Lazy pattern must be the last part */
- pp->lazy_line = strdup(arg);
+ pp->lazy_line = strdup(arg); /* let leave escapes */
if (pp->lazy_line == NULL)
return -ENOMEM;
break;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1501,7 +1505,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
semantic_error("SRC@SRC is not allowed.\n");
return -EINVAL;
}
- pp->file = strdup(arg);
+ pp->file = strdup_esc(arg);
if (pp->file == NULL)
return -ENOMEM;
break;
@@ -2573,7 +2577,8 @@ int show_perf_probe_events(struct strfilter *filter)
}
static int get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+ struct strlist *namelist, bool ret_event,
+ bool allow_suffix)
{
int i, ret;
char *p, *nbase;
@@ -2584,13 +2589,13 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
if (!nbase)
return -ENOMEM;
- /* Cut off the dot suffixes (e.g. .const, .isra)*/
- p = strchr(nbase, '.');
+ /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+ p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
/* Try no suffix number */
- ret = e_snprintf(buf, len, "%s", nbase);
+ ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
@@ -2625,6 +2630,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
out:
free(nbase);
+
+ /* Final validation */
+ if (ret >= 0 && !is_c_func_name(buf)) {
+ pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+ buf);
+ ret = -EINVAL;
+ }
+
return ret;
}
@@ -2681,8 +2694,8 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
group = PERFPROBE_GROUP;
/* Get an unused new event name */
- ret = get_new_event_name(buf, 64, event,
- namelist, allow_suffix);
+ ret = get_new_event_name(buf, 64, event, namelist,
+ tev->point.retprobe, allow_suffix);
if (ret < 0)
return ret;
@@ -2792,16 +2805,40 @@ static int find_probe_functions(struct map *map, char *name,
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
+ const char *norm, *ver;
+ char *buf = NULL;
+ bool cut_version = true;
if (map__load(map) < 0)
return 0;
+ /* If user gives a version, don't cut off the version from symbols */
+ if (strchr(name, '@'))
+ cut_version = false;
+
map__for_each_symbol(map, sym, tmp) {
- if (strglobmatch(sym->name, name)) {
+ norm = arch__normalize_symbol_name(sym->name);
+ if (!norm)
+ continue;
+
+ if (cut_version) {
+ /* We don't care about default symbol or not */
+ ver = strchr(norm, '@');
+ if (ver) {
+ buf = strndup(norm, ver - norm);
+ if (!buf)
+ return -ENOMEM;
+ norm = buf;
+ }
+ }
+
+ if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
+ if (buf)
+ zfree(&buf);
}
return found;
@@ -2847,7 +2884,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* same name but different addresses, this lists all the symbols.
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
- if (num_matched_functions == 0) {
+ if (num_matched_functions <= 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index b4f2f06722a7..7aa0ea64544e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+util/memswap.c
util/mmap.c
util/namespaces.c
../lib/bitmap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 8e49d9cafcfc..b1e999bd21ef 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -864,7 +864,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite))
return NULL;
- if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ if (perf_evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0dfe27d99458..0efc3258c648 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -101,16 +101,21 @@ void rblist__init(struct rblist *rblist)
return;
}
+void rblist__exit(struct rblist *rblist)
+{
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rblist__remove_node(rblist, pos);
+ }
+}
+
void rblist__delete(struct rblist *rblist)
{
if (rblist != NULL) {
- struct rb_node *pos, *next = rb_first(&rblist->entries);
-
- while (next) {
- pos = next;
- next = rb_next(pos);
- rblist__remove_node(rblist, pos);
- }
+ rblist__exit(rblist);
free(rblist);
}
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 4c8638a22571..76df15c27f5f 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -29,6 +29,7 @@ struct rblist {
};
void rblist__init(struct rblist *rblist);
+void rblist__exit(struct rblist *rblist);
void rblist__delete(struct rblist *rblist);
int rblist__add_node(struct rblist *rblist, const void *new_entry);
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c7187f067d31..ea070883c593 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -43,7 +43,6 @@
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
-#include "../machine.h"
#include "../call-path.h"
#include "thread_map.h"
#include "cpumap.h"
@@ -500,6 +499,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",
PyLong_FromUnsignedLongLong(sample->period));
+ pydict_set_item_string_decref(dict_sample, "phys_addr",
+ PyLong_FromUnsignedLongLong(sample->phys_addr));
set_sample_read_in_dict(dict_sample, sample, evsel);
pydict_set_item_string_decref(dict, "sample", dict_sample);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5c412310f266..c71ced7db152 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -27,7 +27,6 @@
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
@@ -107,17 +106,10 @@ static void perf_session__set_comm_exec(struct perf_session *session)
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
- struct perf_sample sample;
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
- int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
-
- if (ret) {
- pr_err("Can't parse sample, err = %d\n", ret);
- return ret;
- }
- return perf_session__deliver_event(session, event->event, &sample,
+ return perf_session__deliver_event(session, event->event,
session->tool, event->file_offset);
}
@@ -873,9 +865,9 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused,
}
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
+ return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
}
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
@@ -1328,20 +1320,26 @@ static int machines__deliver_event(struct machines *machines,
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
+ struct perf_sample sample;
int ret;
- ret = auxtrace__process_event(session, event, sample, tool);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample);
+ if (ret) {
+ pr_err("Can't parse sample, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = auxtrace__process_event(session, event, &sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
return machines__deliver_event(&session->machines, session->evlist,
- event, sample, tool, file_offset);
+ event, &sample, tool, file_offset);
}
static s64 perf_session__process_user_event(struct perf_session *session,
@@ -1350,10 +1348,11 @@ static s64 perf_session__process_user_event(struct perf_session *session,
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
+ struct perf_sample sample = { .time = 0, };
int fd = perf_data__fd(session->data);
int err;
- dump_event(session->evlist, event, file_offset, NULL);
+ dump_event(session->evlist, event, file_offset, &sample);
/* These events are processed right away */
switch (event->header.type) {
@@ -1495,7 +1494,6 @@ static s64 perf_session__process_event(struct perf_session *session,
{
struct perf_evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
- struct perf_sample sample;
int ret;
if (session->header.needs_swap)
@@ -1509,21 +1507,19 @@ static s64 perf_session__process_event(struct perf_session *session,
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, file_offset);
- /*
- * For all kernel events we get the sample data
- */
- ret = perf_evlist__parse_sample(evlist, event, &sample);
- if (ret)
- return ret;
-
if (tool->ordered_events) {
- ret = perf_session__queue_event(session, event, &sample, file_offset);
+ u64 timestamp = -1ULL;
+
+ ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
+ if (ret && ret != -1)
+ return ret;
+
+ ret = perf_session__queue_event(session, event, timestamp, file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session__deliver_event(session, event, &sample, tool,
- file_offset);
+ return perf_session__deliver_event(session, event, tool, file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1777,7 +1773,8 @@ done:
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
@@ -1933,7 +1930,8 @@ out:
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index da1434a7c120..da40b4b380ca 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -53,7 +53,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
int perf_session__process_events(struct perf_session *session);
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a00eacdf02ed..2da4d0456a03 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -336,7 +336,7 @@ char *hist_entry__get_srcline(struct hist_entry *he)
return SRCLINE_UNKNOWN;
return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
- he->ms.sym, true, true);
+ he->ms.sym, true, true, he->ip);
}
static int64_t
@@ -380,7 +380,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->from.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->from.al_addr);
}
if (!right->branch_info->srcline_from) {
struct map *map = right->branch_info->from.map;
@@ -391,7 +392,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->from.al_addr),
right->branch_info->from.sym,
- true, true);
+ true, true,
+ right->branch_info->from.al_addr);
}
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
}
@@ -423,7 +425,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->to.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->to.al_addr);
}
if (!right->branch_info->srcline_to) {
struct map *map = right->branch_info->to.map;
@@ -434,7 +437,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->to.al_addr),
right->branch_info->to.sym,
- true, true);
+ true, true,
+ right->branch_info->to.al_addr);
}
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
}
@@ -465,7 +469,7 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
return no_srcfile;
sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
- e->ms.sym, false, true, true);
+ e->ms.sym, false, true, true, e->ip);
if (!strcmp(sf, SRCLINE_UNKNOWN))
return no_srcfile;
p = strchr(sf, ':');
@@ -2883,10 +2887,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
- pr_err("Invalid --fields key: `%s'", tok);
+ ui__error("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
- pr_err("Unknown --fields key: `%s'", tok);
+ ui__error("Unknown --fields key: `%s'", tok);
break;
}
}
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index d19f05c56de6..3c21fd059b64 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -496,7 +496,8 @@ out:
#define A2L_FAIL_LIMIT 123
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines)
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip)
{
char *file = NULL;
unsigned line = 0;
@@ -536,7 +537,7 @@ out:
if (sym) {
if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
- addr - sym->start) < 0)
+ ip - sym->start) < 0)
return SRCLINE_UNKNOWN;
} else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
return SRCLINE_UNKNOWN;
@@ -550,9 +551,9 @@ void free_srcline(char *srcline)
}
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr)
+ bool show_sym, bool show_addr, u64 ip)
{
- return __get_srcline(dso, addr, sym, show_sym, show_addr, false);
+ return __get_srcline(dso, addr, sym, show_sym, show_addr, false, ip);
}
struct srcline_node {
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 847b7086182c..b2bb5502fd62 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -11,9 +11,10 @@ struct symbol;
extern bool srcline_full_filename;
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr);
+ bool show_sym, bool show_addr, u64 ip);
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines);
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip);
void free_srcline(char *srcline);
/* insert the srcline into the DSO, which will take ownership */
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 855e35cbb1dc..594d14a02b67 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -9,17 +9,6 @@
#include "expr.h"
#include "metricgroup.h"
-enum {
- CTX_BIT_USER = 1 << 0,
- CTX_BIT_KERNEL = 1 << 1,
- CTX_BIT_HV = 1 << 2,
- CTX_BIT_HOST = 1 << 3,
- CTX_BIT_IDLE = 1 << 4,
- CTX_BIT_MAX = 1 << 5,
-};
-
-#define NUM_CTX CTX_BIT_MAX
-
/*
* AGGR_GLOBAL: Use CPU 0
* AGGR_SOCKET: Use first CPU of socket
@@ -27,36 +16,18 @@ enum {
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?
*/
-static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
-static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
-static struct rblist runtime_saved_values;
static bool have_frontend_stalled;
+struct runtime_stat rt_stat;
struct stats walltime_nsecs_stats;
struct saved_value {
struct rb_node rb_node;
struct perf_evsel *evsel;
+ enum stat_type type;
+ int ctx;
int cpu;
+ struct runtime_stat *stat;
struct stats stats;
};
@@ -69,6 +40,30 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
if (a->cpu != b->cpu)
return a->cpu - b->cpu;
+
+ /*
+ * Previously the rbtree was used to link generic metrics.
+ * The keys were evsel/cpu. Now the rbtree is extended to support
+ * per-thread shadow stats. For shadow stats case, the keys
+ * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
+ * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
+ */
+ if (a->type != b->type)
+ return a->type - b->type;
+
+ if (a->ctx != b->ctx)
+ return a->ctx - b->ctx;
+
+ if (a->evsel == NULL && b->evsel == NULL) {
+ if (a->stat == b->stat)
+ return 0;
+
+ if ((char *)a->stat < (char *)b->stat)
+ return -1;
+
+ return 1;
+ }
+
if (a->evsel == b->evsel)
return 0;
if ((char *)a->evsel < (char *)b->evsel)
@@ -87,34 +82,66 @@ static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
return &nd->rb_node;
}
+static void saved_value_delete(struct rblist *rblist __maybe_unused,
+ struct rb_node *rb_node)
+{
+ struct saved_value *v;
+
+ BUG_ON(!rb_node);
+ v = container_of(rb_node, struct saved_value, rb_node);
+ free(v);
+}
+
static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
int cpu,
- bool create)
+ bool create,
+ enum stat_type type,
+ int ctx,
+ struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *nd;
struct saved_value dm = {
.cpu = cpu,
.evsel = evsel,
+ .type = type,
+ .ctx = ctx,
+ .stat = st,
};
- nd = rblist__find(&runtime_saved_values, &dm);
+
+ rblist = &st->value_list;
+
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
if (create) {
- rblist__add_node(&runtime_saved_values, &dm);
- nd = rblist__find(&runtime_saved_values, &dm);
+ rblist__add_node(rblist, &dm);
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
}
return NULL;
}
+void runtime_stat__init(struct runtime_stat *st)
+{
+ struct rblist *rblist = &st->value_list;
+
+ rblist__init(rblist);
+ rblist->node_cmp = saved_value_cmp;
+ rblist->node_new = saved_value_new;
+ rblist->node_delete = saved_value_delete;
+}
+
+void runtime_stat__exit(struct runtime_stat *st)
+{
+ rblist__exit(&st->value_list);
+}
+
void perf_stat__init_shadow_stats(void)
{
have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
- rblist__init(&runtime_saved_values);
- runtime_saved_values.node_cmp = saved_value_cmp;
- runtime_saved_values.node_new = saved_value_new;
- /* No delete for now */
+ runtime_stat__init(&rt_stat);
}
static int evsel_context(struct perf_evsel *evsel)
@@ -135,36 +162,13 @@ static int evsel_context(struct perf_evsel *evsel)
return ctx;
}
-void perf_stat__reset_shadow_stats(void)
+static void reset_stat(struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *pos, *next;
- memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
- memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
- memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
- memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
- memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
- memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
- memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
- memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
- memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
- memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
- memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
- memset(runtime_cycles_in_tx_stats, 0,
- sizeof(runtime_cycles_in_tx_stats));
- memset(runtime_transaction_stats, 0,
- sizeof(runtime_transaction_stats));
- memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
- memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
- memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
- memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
- memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
- memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
- memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
- memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
- memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
-
- next = rb_first(&runtime_saved_values.entries);
+ rblist = &st->value_list;
+ next = rb_first(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
@@ -174,13 +178,35 @@ void perf_stat__reset_shadow_stats(void)
}
}
+void perf_stat__reset_shadow_stats(void)
+{
+ reset_stat(&rt_stat);
+ memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+}
+
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
+{
+ reset_stat(st);
+}
+
+static void update_runtime_stat(struct runtime_stat *st,
+ enum stat_type type,
+ int ctx, int cpu, u64 count)
+{
+ struct saved_value *v = saved_value_lookup(NULL, cpu, true,
+ type, ctx, st);
+
+ if (v)
+ update_stats(&v->stats, count);
+}
+
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu)
+ int cpu, struct runtime_stat *st)
{
int ctx = evsel_context(counter);
@@ -188,50 +214,58 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count);
+ update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_stats(&runtime_transaction_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_stats(&runtime_elision_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
- update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
- update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
- update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
- update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
- update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_stats(&runtime_smi_num_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, APERF))
- update_stats(&runtime_aperf_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
if (counter->collect_stat) {
- struct saved_value *v = saved_value_lookup(counter, cpu, true);
+ struct saved_value *v = saved_value_lookup(counter, cpu, true,
+ STAT_NONE, 0, st);
update_stats(&v->stats, count);
}
}
@@ -352,15 +386,40 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
}
}
+static double runtime_stat_avg(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return avg_stats(&v->stats);
+}
+
+static double runtime_stat_n(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return v->stats.n;
+}
+
static void print_stalled_cycles_frontend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -376,13 +435,14 @@ static void print_stalled_cycles_frontend(int cpu,
static void print_stalled_cycles_backend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -395,13 +455,14 @@ static void print_stalled_cycles_backend(int cpu,
static void print_branch_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_branches_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -414,13 +475,15 @@ static void print_branch_misses(int cpu,
static void print_l1_dcache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -433,13 +496,15 @@ static void print_l1_dcache_misses(int cpu,
static void print_l1_icache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -451,13 +516,14 @@ static void print_l1_icache_misses(int cpu,
static void print_dtlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -469,13 +535,14 @@ static void print_dtlb_cache_misses(int cpu,
static void print_itlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -487,13 +554,14 @@ static void print_itlb_cache_misses(int cpu,
static void print_ll_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -551,68 +619,72 @@ static double sanitize_val(double x)
return x;
}
-static double td_total_slots(int ctx, int cpu)
+static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
{
- return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
+ return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
}
-static double td_bad_spec(int ctx, int cpu)
+static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
{
double bad_spec = 0;
double total_slots;
double total;
- total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
- avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
- avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
- total_slots = td_total_slots(ctx, cpu);
+ total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
+ runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
+ runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+
+ total_slots = td_total_slots(ctx, cpu, st);
if (total_slots)
bad_spec = total / total_slots;
return sanitize_val(bad_spec);
}
-static double td_retiring(int ctx, int cpu)
+static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
{
double retiring = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu);
if (total_slots)
retiring = ret_slots / total_slots;
return retiring;
}
-static double td_fe_bound(int ctx, int cpu)
+static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
{
double fe_bound = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu);
if (total_slots)
fe_bound = fetch_bub / total_slots;
return fe_bound;
}
-static double td_be_bound(int ctx, int cpu)
+static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
{
- double sum = (td_fe_bound(ctx, cpu) +
- td_bad_spec(ctx, cpu) +
- td_retiring(ctx, cpu));
+ double sum = (td_fe_bound(ctx, cpu, st) +
+ td_bad_spec(ctx, cpu, st) +
+ td_retiring(ctx, cpu, st));
if (sum == 0)
return 0;
return sanitize_val(1.0 - sum);
}
static void print_smi_cost(int cpu, struct perf_evsel *evsel,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double smi_num, aperf, cycles, cost = 0.0;
int ctx = evsel_context(evsel);
const char *color = NULL;
- smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
- aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
- cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
+ aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
+ cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if ((cycles == 0) || (aperf == 0))
return;
@@ -632,7 +704,8 @@ static void generic_metric(const char *metric_expr,
const char *metric_name,
double avg,
int cpu,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
print_metric_t print_metric = out->print_metric;
struct parse_ctx pctx;
@@ -651,7 +724,8 @@ static void generic_metric(const char *metric_expr,
stats = &walltime_nsecs_stats;
scale = 1e-9;
} else {
- v = saved_value_lookup(metric_events[i], cpu, false);
+ v = saved_value_lookup(metric_events[i], cpu, false,
+ STAT_NONE, 0, st);
if (!v)
break;
stats = &v->stats;
@@ -679,7 +753,8 @@ static void generic_metric(const char *metric_expr,
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events)
+ struct rblist *metric_events,
+ struct runtime_stat *st)
{
void *ctxp = out->ctx;
print_metric_t print_metric = out->print_metric;
@@ -690,7 +765,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
int num = 1;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total) {
ratio = avg / total;
print_metric(ctxp, NULL, "%7.2f ",
@@ -698,8 +774,13 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
} else {
print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
}
- total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
- total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
+
+ total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu);
+
+ total = max(total, runtime_stat_avg(st,
+ STAT_STALLED_CYCLES_BACK,
+ ctx, cpu));
if (total && avg) {
out->new_line(ctxp);
@@ -712,8 +793,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
"stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
- if (runtime_branches_stats[ctx][cpu].n != 0)
- print_branch_misses(cpu, evsel, avg, out);
+ if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
+ print_branch_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all branches", 0);
} else if (
@@ -721,8 +802,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
- print_l1_dcache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
+ print_l1_dcache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
} else if (
@@ -730,8 +812,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_icache_stats[ctx][cpu].n != 0)
- print_l1_icache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
+ print_l1_icache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
} else if (
@@ -739,8 +822,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
- print_dtlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
+ print_dtlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
} else if (
@@ -748,8 +832,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
- print_itlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
+ print_itlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
} else if (
@@ -757,27 +842,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_ll_cache_stats[ctx][cpu].n != 0)
- print_ll_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
+ print_ll_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
- total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
if (total)
ratio = avg * 100 / total;
- if (runtime_cacherefs_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.3f %%",
"of all cache refs", ratio);
else
print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(cpu, evsel, avg, out);
+ print_stalled_cycles_frontend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(cpu, evsel, avg, out);
+ print_stalled_cycles_backend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total) {
ratio = avg / total;
@@ -786,7 +872,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total)
print_metric(ctxp, NULL,
"%7.2f%%", "transactional cycles",
@@ -795,8 +882,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "transactional cycles",
0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
- total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+
if (total2 < avg)
total2 = avg;
if (total)
@@ -805,19 +893,21 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
- if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.0f",
"cycles / transaction", ratio);
else
print_metric(ctxp, NULL, NULL, "cycles / transaction",
- 0);
+ 0);
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
@@ -831,28 +921,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
- double fe_bound = td_fe_bound(ctx, cpu);
+ double fe_bound = td_fe_bound(ctx, cpu, st);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
- double retiring = td_retiring(ctx, cpu);
+ double retiring = td_retiring(ctx, cpu, st);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
print_metric(ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
- double bad_spec = td_bad_spec(ctx, cpu);
+ double bad_spec = td_bad_spec(ctx, cpu, st);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
- double be_bound = td_be_bound(ctx, cpu);
+ double be_bound = td_be_bound(ctx, cpu, st);
const char *name = "backend bound";
static int have_recovery_bubbles = -1;
@@ -865,19 +955,19 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- if (td_total_slots(ctx, cpu) > 0)
+ if (td_total_slots(ctx, cpu, st) > 0)
print_metric(ctxp, color, "%8.1f%%", name,
be_bound * 100.);
else
print_metric(ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
- evsel->metric_name, avg, cpu, out);
- } else if (runtime_nsecs_stats[cpu].n != 0) {
+ evsel->metric_name, avg, cpu, out, st);
+ } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
char unit = 'M';
char unit_buf[10];
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total)
ratio = 1000.0 * avg / total;
@@ -888,7 +978,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
- print_smi_cost(cpu, evsel, out);
+ print_smi_cost(cpu, evsel, out, st);
} else {
num = 0;
}
@@ -901,7 +991,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
out->new_line(ctxp);
generic_metric(mexp->metric_expr, mexp->metric_events,
evsel->name, mexp->metric_name,
- avg, cpu, out);
+ avg, cpu, out, st);
}
}
if (num == 0)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 151e9efd7286..32235657c1ac 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -278,9 +278,16 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
perf_evsel__compute_deltas(evsel, cpu, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
if (config->aggr_mode == AGGR_NONE)
- perf_stat__update_shadow_stats(evsel, count->val, cpu);
- if (config->aggr_mode == AGGR_THREAD)
- perf_stat__update_shadow_stats(evsel, count->val, 0);
+ perf_stat__update_shadow_stats(evsel, count->val, cpu,
+ &rt_stat);
+ if (config->aggr_mode == AGGR_THREAD) {
+ if (config->stats)
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &config->stats[thread]);
+ else
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &rt_stat);
+ }
break;
case AGGR_GLOBAL:
aggr->val += count->val;
@@ -362,7 +369,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
/*
* Save the full runtime - to allow normalization during printout:
*/
- perf_stat__update_shadow_stats(counter, *count, 0);
+ perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
return 0;
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index eefca5c981fd..dbc6f7134f61 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <stdio.h>
#include "xyarray.h"
+#include "rblist.h"
struct stats
{
@@ -43,11 +44,54 @@ enum aggr_mode {
AGGR_UNSET,
};
+enum {
+ CTX_BIT_USER = 1 << 0,
+ CTX_BIT_KERNEL = 1 << 1,
+ CTX_BIT_HV = 1 << 2,
+ CTX_BIT_HOST = 1 << 3,
+ CTX_BIT_IDLE = 1 << 4,
+ CTX_BIT_MAX = 1 << 5,
+};
+
+#define NUM_CTX CTX_BIT_MAX
+
+enum stat_type {
+ STAT_NONE = 0,
+ STAT_NSECS,
+ STAT_CYCLES,
+ STAT_STALLED_CYCLES_FRONT,
+ STAT_STALLED_CYCLES_BACK,
+ STAT_BRANCHES,
+ STAT_CACHEREFS,
+ STAT_L1_DCACHE,
+ STAT_L1_ICACHE,
+ STAT_LL_CACHE,
+ STAT_ITLB_CACHE,
+ STAT_DTLB_CACHE,
+ STAT_CYCLES_IN_TX,
+ STAT_TRANSACTION,
+ STAT_ELISION,
+ STAT_TOPDOWN_TOTAL_SLOTS,
+ STAT_TOPDOWN_SLOTS_ISSUED,
+ STAT_TOPDOWN_SLOTS_RETIRED,
+ STAT_TOPDOWN_FETCH_BUBBLES,
+ STAT_TOPDOWN_RECOVERY_BUBBLES,
+ STAT_SMI_NUM,
+ STAT_APERF,
+ STAT_MAX
+};
+
+struct runtime_stat {
+ struct rblist value_list;
+};
+
struct perf_stat_config {
enum aggr_mode aggr_mode;
bool scale;
FILE *output;
unsigned int interval;
+ struct runtime_stat *stats;
+ int stats_num;
};
void update_stats(struct stats *stats, u64 val);
@@ -67,6 +111,15 @@ static inline void init_stats(struct stats *stats)
struct perf_evsel;
struct perf_evlist;
+struct perf_aggr_thread_value {
+ struct perf_evsel *counter;
+ int id;
+ double uval;
+ u64 val;
+ u64 run;
+ u64 ena;
+};
+
bool __perf_evsel_stat__is(struct perf_evsel *evsel,
enum perf_stat_evsel_id id);
@@ -75,16 +128,20 @@ bool __perf_evsel_stat__is(struct perf_evsel *evsel,
void perf_stat_evsel_id_init(struct perf_evsel *evsel);
+extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
typedef void (*print_metric_t)(void *ctx, const char *color, const char *unit,
const char *fmt, double val);
typedef void (*new_line_t )(void *ctx);
+void runtime_stat__init(struct runtime_stat *st);
+void runtime_stat__exit(struct runtime_stat *st);
void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void);
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st);
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu);
+ int cpu, struct runtime_stat *st);
struct perf_stat_output_ctx {
void *ctx;
print_metric_t print_metric;
@@ -92,11 +149,11 @@ struct perf_stat_output_ctx {
bool force_header;
};
-struct rblist;
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events);
+ struct rblist *metric_events,
+ struct runtime_stat *st);
void perf_stat__collect_metric_expr(struct perf_evlist *);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index aaa08ee8c717..d8bfd0c4d2cb 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -396,3 +396,49 @@ out_err_overflow:
free(expr);
return NULL;
}
+
+/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
+char *strpbrk_esc(char *str, const char *stopset)
+{
+ char *ptr;
+
+ do {
+ ptr = strpbrk(str, stopset);
+ if (ptr == str ||
+ (ptr == str + 1 && *(ptr - 1) != '\\'))
+ break;
+ str = ptr + 1;
+ } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+
+ return ptr;
+}
+
+/* Like strdup, but do not copy a single backslash */
+char *strdup_esc(const char *str)
+{
+ char *s, *d, *p, *ret = strdup(str);
+
+ if (!ret)
+ return NULL;
+
+ d = strchr(ret, '\\');
+ if (!d)
+ return ret;
+
+ s = d + 1;
+ do {
+ if (*s == '\0') {
+ *d = '\0';
+ break;
+ }
+ p = strchr(s + 1, '\\');
+ if (p) {
+ memmove(d, s, p - s);
+ d += p - s;
+ s = p + 1;
+ } else
+ memmove(d, s, strlen(s) + 1);
+ } while (p);
+
+ return ret;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index ee14ca5451ab..4c68a09b97e8 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,7 @@ static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
}
+char *strpbrk_esc(char *str, const char *stopset);
+char *strdup_esc(const char *str);
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1b67a8639dfe..cc065d4bfafc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
+const char * __weak arch__normalize_symbol_name(const char *name)
+{
+ return name;
+}
+
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
return strcmp(namea, nameb);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index a4f0075b4e5c..0563f33c1eb3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
void arch__sym_update(struct symbol *s, GElf_Sym *sym);
#endif
+const char *arch__normalize_symbol_name(const char *name);
#define SYMBOL_A 0
#define SYMBOL_B 1
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 6eea7cff3d4e..303bdb84ab5a 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -26,6 +26,10 @@
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
static const char **syscalltbl_native = syscalltbl_x86_64;
+#elif defined(__s390x__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_s390_64;
#endif
struct syscall {
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 446aa7a56f25..6ef01a83b24e 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -64,6 +64,11 @@ static inline bool target__none(struct target *target)
return !target__has_task(target) && !target__has_cpu(target);
}
+static inline bool target__has_per_thread(struct target *target)
+{
+ return target->system_wide && target->per_thread;
+}
+
static inline bool target__uses_dummy_map(struct target *target)
{
bool use_dummy = false;
@@ -73,6 +78,8 @@ static inline bool target__uses_dummy_map(struct target *target)
else if (target__has_task(target) ||
(!target__has_cpu(target) && !target->uses_mmap))
use_dummy = true;
+ else if (target__has_per_thread(target))
+ use_dummy = true;
return use_dummy;
}
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index be0d5a736dea..3e1038f6491c 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -92,7 +92,7 @@ struct thread_map *thread_map__new_by_tid(pid_t tid)
return threads;
}
-struct thread_map *thread_map__new_by_uid(uid_t uid)
+static struct thread_map *__thread_map__new_all_cpus(uid_t uid)
{
DIR *proc;
int max_threads = 32, items, i;
@@ -113,7 +113,6 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
- struct stat st;
pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
@@ -121,11 +120,12 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
- if (stat(path, &st) != 0)
- continue;
+ if (uid != UINT_MAX) {
+ struct stat st;
- if (st.st_uid != uid)
- continue;
+ if (stat(path, &st) != 0 || st.st_uid != uid)
+ continue;
+ }
snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
@@ -178,6 +178,16 @@ out_free_closedir:
goto out_closedir;
}
+struct thread_map *thread_map__new_all_cpus(void)
+{
+ return __thread_map__new_all_cpus(UINT_MAX);
+}
+
+struct thread_map *thread_map__new_by_uid(uid_t uid)
+{
+ return __thread_map__new_all_cpus(uid);
+}
+
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
{
if (pid != -1)
@@ -313,7 +323,7 @@ out_free_threads:
}
struct thread_map *thread_map__new_str(const char *pid, const char *tid,
- uid_t uid)
+ uid_t uid, bool per_thread)
{
if (pid)
return thread_map__new_by_pid_str(pid);
@@ -321,6 +331,9 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid,
if (!tid && uid != UINT_MAX)
return thread_map__new_by_uid(uid);
+ if (per_thread)
+ return thread_map__new_all_cpus();
+
return thread_map__new_by_tid_str(tid);
}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index f15803985435..0a806b99e73c 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -23,6 +23,7 @@ struct thread_map *thread_map__new_dummy(void);
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new_by_uid(uid_t uid);
+struct thread_map *thread_map__new_all_cpus(void);
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
struct thread_map *thread_map__new_event(struct thread_map_event *event);
@@ -30,7 +31,7 @@ struct thread_map *thread_map__get(struct thread_map *map);
void thread_map__put(struct thread_map *map);
struct thread_map *thread_map__new_str(const char *pid,
- const char *tid, uid_t uid);
+ const char *tid, uid_t uid, bool per_thread);
struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 81927d027417..6193b46050a5 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -6,6 +6,7 @@
#include <time.h>
#include <errno.h>
#include <inttypes.h>
+#include <math.h>
#include "perf.h"
#include "debug.h"
@@ -60,11 +61,10 @@ static int parse_timestr_sec_nsec(struct perf_time_interval *ptime,
return 0;
}
-int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+static int split_start_end(char **start, char **end, const char *ostr, char ch)
{
char *start_str, *end_str;
char *d, *str;
- int rc = 0;
if (ostr == NULL || *ostr == '\0')
return 0;
@@ -74,25 +74,35 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
if (str == NULL)
return -ENOMEM;
- ptime->start = 0;
- ptime->end = 0;
-
- /* str has the format: <start>,<stop>
- * variations: <start>,
- * ,<stop>
- * ,
- */
start_str = str;
- d = strchr(start_str, ',');
+ d = strchr(start_str, ch);
if (d) {
*d = '\0';
++d;
}
end_str = d;
+ *start = start_str;
+ *end = end_str;
+
+ return 0;
+}
+
+int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+{
+ char *start_str = NULL, *end_str;
+ int rc;
+
+ rc = split_start_end(&start_str, &end_str, ostr, ',');
+ if (rc || !start_str)
+ return rc;
+
+ ptime->start = 0;
+ ptime->end = 0;
+
rc = parse_timestr_sec_nsec(ptime, start_str, end_str);
- free(str);
+ free(start_str);
/* make sure end time is after start time if it was given */
if (rc == 0 && ptime->end && ptime->end < ptime->start)
@@ -104,6 +114,245 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
return rc;
}
+static int parse_percent(double *pcnt, char *str)
+{
+ char *c, *endptr;
+ double d;
+
+ c = strchr(str, '%');
+ if (c)
+ *c = '\0';
+ else
+ return -1;
+
+ d = strtod(str, &endptr);
+ if (endptr != str + strlen(str))
+ return -1;
+
+ *pcnt = d / 100.0;
+ return 0;
+}
+
+static int percent_slash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *p, *end_str;
+ double pcnt, start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int i;
+
+ /*
+ * Example:
+ * 10%/2: select the second 10% slice and the third 10% slice
+ */
+
+ /* We can modify this string since the original one is copied */
+ p = strchr(str, '/');
+ if (!p)
+ return -1;
+
+ *p = '\0';
+ if (parse_percent(&pcnt, str) < 0)
+ return -1;
+
+ p++;
+ i = (int)strtol(p, &end_str, 10);
+ if (*end_str)
+ return -1;
+
+ if (pcnt <= 0.0)
+ return -1;
+
+ start_pcnt = pcnt * (i - 1);
+ end_pcnt = pcnt * i;
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+static int percent_dash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *start_str = NULL, *end_str;
+ double start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int ret;
+
+ /*
+ * Example: 0%-10%
+ */
+
+ ret = split_start_end(&start_str, &end_str, str, '-');
+ if (ret || !start_str)
+ return ret;
+
+ if ((parse_percent(&start_pcnt, start_str) != 0) ||
+ (parse_percent(&end_pcnt, end_str) != 0)) {
+ free(start_str);
+ return -1;
+ }
+
+ free(start_str);
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0 ||
+ start_pcnt > end_pcnt) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+typedef int (*time_pecent_split)(char *, struct perf_time_interval *,
+ u64 start, u64 end);
+
+static int percent_comma_split(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end,
+ time_pecent_split func)
+{
+ char *str, *p1, *p2;
+ int len, ret, i = 0;
+
+ str = strdup(ostr);
+ if (str == NULL)
+ return -ENOMEM;
+
+ len = strlen(str);
+ p1 = str;
+
+ while (p1 < str + len) {
+ if (i >= num) {
+ free(str);
+ return -1;
+ }
+
+ p2 = strchr(p1, ',');
+ if (p2)
+ *p2 = '\0';
+
+ ret = (func)(p1, &ptime_buf[i], start, end);
+ if (ret < 0) {
+ free(str);
+ return -1;
+ }
+
+ pr_debug("start time %d: %" PRIu64 ", ", i, ptime_buf[i].start);
+ pr_debug("end time %d: %" PRIu64 "\n", i, ptime_buf[i].end);
+
+ i++;
+
+ if (p2)
+ p1 = p2 + 1;
+ else
+ break;
+ }
+
+ free(str);
+ return i;
+}
+
+static int one_percent_convert(struct perf_time_interval *ptime_buf,
+ const char *ostr, u64 start, u64 end, char *c)
+{
+ char *str;
+ int len = strlen(ostr), ret;
+
+ /*
+ * c points to '%'.
+ * '%' should be the last character
+ */
+ if (ostr + len - 1 != c)
+ return -1;
+
+ /*
+ * Construct a string like "xx%/1"
+ */
+ str = malloc(len + 3);
+ if (str == NULL)
+ return -ENOMEM;
+
+ memcpy(str, ostr, len);
+ strcpy(str + len, "/1");
+
+ ret = percent_slash_split(str, ptime_buf, start, end);
+ if (ret == 0)
+ ret = 1;
+
+ free(str);
+ return ret;
+}
+
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end)
+{
+ char *c;
+
+ /*
+ * ostr example:
+ * 10%/2,10%/3: select the second 10% slice and the third 10% slice
+ * 0%-10%,30%-40%: multiple time range
+ * 50%: just one percent
+ */
+
+ memset(ptime_buf, 0, sizeof(*ptime_buf) * num);
+
+ c = strchr(ostr, '/');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_slash_split);
+ }
+
+ c = strchr(ostr, '-');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_dash_split);
+ }
+
+ c = strchr(ostr, '%');
+ if (c)
+ return one_percent_convert(ptime_buf, ostr, start, end, c);
+
+ return -1;
+}
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size)
+{
+ const char *p1, *p2;
+ int i = 1;
+ struct perf_time_interval *ptime;
+
+ /*
+ * At least allocate one time range.
+ */
+ if (!ostr)
+ goto alloc;
+
+ p1 = ostr;
+ while (p1 < ostr + strlen(ostr)) {
+ p2 = strchr(p1, ',');
+ if (!p2)
+ break;
+
+ p1 = p2 + 1;
+ i++;
+ }
+
+alloc:
+ *size = i;
+ ptime = calloc(i, sizeof(*ptime));
+ return ptime;
+}
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
{
/* if time is not set don't drop sample */
@@ -119,6 +368,34 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
return false;
}
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp)
+{
+ struct perf_time_interval *ptime;
+ int i;
+
+ if ((timestamp == 0) || (num == 0))
+ return false;
+
+ if (num == 1)
+ return perf_time__skip_sample(&ptime_buf[0], timestamp);
+
+ /*
+ * start/end of multiple time ranges must be valid.
+ */
+ for (i = 0; i < num; i++) {
+ ptime = &ptime_buf[i];
+
+ if (timestamp >= ptime->start &&
+ ((timestamp < ptime->end && i < num - 1) ||
+ (timestamp <= ptime->end && i == num - 1))) {
+ break;
+ }
+ }
+
+ return (i == num) ? true : false;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 15b475c50ccf..70b177d2b98c 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -13,8 +13,16 @@ int parse_nsec_time(const char *str, u64 *ptime);
int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr);
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end);
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size);
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 2532b558099b..183c91453522 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -76,6 +76,7 @@ struct perf_tool {
bool ordered_events;
bool ordering_requires_timestamps;
bool namespace_events;
+ bool no_warn;
enum show_feature_header show_feat_hdr;
};
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 7a42f703e858..af873044d33a 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -631,9 +631,8 @@ static unw_accessors_t accessors = {
static int _unwind__prepare_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return 0;
-
thread->addr_space = unw_create_addr_space(&accessors, 0);
if (!thread->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
@@ -646,17 +645,15 @@ static int _unwind__prepare_access(struct thread *thread)
static void _unwind__flush_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_flush_cache(thread->addr_space, 0, 0);
}
static void _unwind__finish_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_destroy_addr_space(thread->addr_space);
}
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 647a1e6b4c7b..b029a5e9ae49 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -3,7 +3,7 @@
#include "thread.h"
#include "session.h"
#include "debug.h"
-#include "arch/common.h"
+#include "env.h"
struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
@@ -39,7 +39,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
- arch = normalize_arch(thread->mg->machine->env->arch);
+ arch = perf_env__arch(thread->mg->machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index a789f952b3e9..443892dabedb 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -210,7 +210,7 @@ static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64
size -= ret;
off_in += ret;
- off_out -= ret;
+ off_out += ret;
}
munmap(ptr, off_in + size);
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 01434509c2e9..9496365da3d7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -68,4 +68,14 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+#ifndef O_CLOEXEC
+#ifdef __sparc__
+#define O_CLOEXEC 0x400000
+#elif defined(__alpha__) || defined(__hppa__)
+#define O_CLOEXEC 010000000
+#else
+#define O_CLOEXEC 02000000
+#endif
+#endif
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 22c3b4ee1617..be418fba9441 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -79,7 +79,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
u32 current_action = 0;
#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility"
-#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:svxz"
+#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:sv^xz"
/******************************************************************************
*
@@ -100,6 +100,7 @@ static void ap_display_usage(void)
ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
ACPI_OPTION("-s", "Print table summaries only");
ACPI_OPTION("-v", "Display version information");
+ ACPI_OPTION("-vd", "Display build date and time");
ACPI_OPTION("-z", "Verbose mode");
ACPI_USAGE_TEXT("\nTable Options:\n");
@@ -231,10 +232,29 @@ static int ap_do_options(int argc, char **argv)
}
continue;
- case 'v': /* Revision/version */
+ case 'v': /* -v: (Version): signon already emitted, just exit */
- acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
- return (1);
+ switch (acpi_gbl_optarg[0]) {
+ case '^': /* -v: (Version) */
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ return (1);
+
+ case 'd':
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ printf(ACPI_COMMON_BUILD_TIME);
+ return (1);
+
+ default:
+
+ printf("Unknown option: -v%s\n",
+ acpi_gbl_optarg);
+ return (-1);
+ }
+ break;
case 'z': /* Verbose mode */
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 3b005c39f068..60beaf5ed2ea 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CPUPOWER_CPUFREQ_H__
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 0b24dd9d01ff..29f50d4cfea0 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -411,6 +411,16 @@ def set_trace_buffer_size():
print('IO error setting trace buffer size ')
quit()
+def free_trace_buffer():
+ """ Free the trace buffer memory """
+
+ try:
+ open('/sys/kernel/debug/tracing/buffer_size_kb'
+ , 'w').write("1")
+ except:
+ print('IO error setting trace buffer size ')
+ quit()
+
def read_trace_data(filename):
""" Read and parse trace data """
@@ -583,4 +593,9 @@ for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
+clear_trace_file()
+# Free the memory
+if interval:
+ free_trace_buffer()
+
os.chdir('../../')
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 3fab179b1aba..fcb3ed0be5f8 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -99,5 +99,6 @@ ifneq ($(silent),1)
QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
+ QUIET_UNINST = @printf ' UNINST %s\n' $1;
endif
endif
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 541d9d7fad5a..1e09d77f1948 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -3,3 +3,10 @@ test_maps
test_lru_map
test_lpm_map
test_tag
+FEATURE-DUMP.libbpf
+fixdep
+test_align
+test_dev_cgroup
+test_progs
+test_verifier_log
+feature
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 9316e648a880..bf05bc5e36e5 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,16 +11,19 @@ ifneq ($(wildcard $(GENHDR)),)
endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf -lrt
+LDLIBS += -lcap -lelf -lrt -lpthread
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
- test_align test_verifier_log test_dev_cgroup
+ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o
+ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
+ test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
+ sample_map_ret0.o test_tcpbpf_kern.o
-TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh
+TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
+ test_offload.py
include ../lib.mk
@@ -49,8 +52,13 @@ else
CPU ?= generic
endif
+CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
+ -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
+$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
+
%.o: %.c
- $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
- -Wno-compare-distinct-pointer-types \
+ $(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index fd9a17fa8a8b..dde2c11d7771 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -71,6 +71,8 @@ static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_getsockopt;
+static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
+ (void *) BPF_FUNC_sock_ops_cb_flags_set;
static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
@@ -82,7 +84,8 @@ static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
unsigned int buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
-
+static int (*bpf_override_return)(void *ctx, unsigned long rc) =
+ (void *) BPF_FUNC_override_return;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 52d53ed08769..983dd25d49f4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -3,3 +3,5 @@ CONFIG_BPF_SYSCALL=y
CONFIG_NET_CLS_BPF=m
CONFIG_BPF_EVENTS=y
CONFIG_TEST_BPF=m
+CONFIG_CGROUP_BPF=y
+CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/sample_map_ret0.c
new file mode 100644
index 000000000000..0756303676ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_map_ret0.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") htab = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(long),
+ .max_entries = 2,
+};
+
+struct bpf_map_def SEC("maps") array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(long),
+ .max_entries = 2,
+};
+
+/* Sample program which should always load for testing control paths. */
+SEC(".text") int func()
+{
+ __u64 key64 = 0;
+ __u32 key = 0;
+ long *value;
+
+ value = bpf_map_lookup_elem(&htab, &key);
+ if (!value)
+ return 1;
+ value = bpf_map_lookup_elem(&array, &key64);
+ if (!value)
+ return 1;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/sample_ret0.c
new file mode 100644
index 000000000000..fec99750d6ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_ret0.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+
+/* Sample program which should always load for testing control paths. */
+int func()
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
new file mode 100755
index 000000000000..481dccdf140c
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+ buf = ''
+ while len(buf) < n:
+ rem = n - len(buf)
+ try: s = sock.recv(rem)
+ except (socket.error), e: return ''
+ buf += s
+ return buf
+
+def send(sock, s):
+ total = len(s)
+ count = 0
+ while count < total:
+ try: n = sock.send(s)
+ except (socket.error), e: n = 0
+ if n == 0:
+ return count;
+ count += n
+ return count
+
+
+serverPort = int(sys.argv[1])
+HostName = socket.gethostname()
+
+# create active socket
+sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+try:
+ sock.connect((HostName, serverPort))
+except socket.error as e:
+ sys.exit(1)
+
+buf = ''
+n = 0
+while n < 1000:
+ buf += '+'
+ n += 1
+
+sock.settimeout(1);
+n = send(sock, buf)
+n = read(sock, 500)
+sys.exit(0)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
new file mode 100755
index 000000000000..bc454d7d0be2
--- /dev/null
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python2
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+
+import sys, os, os.path, getopt
+import socket, time
+import subprocess
+import select
+
+def read(sock, n):
+ buf = ''
+ while len(buf) < n:
+ rem = n - len(buf)
+ try: s = sock.recv(rem)
+ except (socket.error), e: return ''
+ buf += s
+ return buf
+
+def send(sock, s):
+ total = len(s)
+ count = 0
+ while count < total:
+ try: n = sock.send(s)
+ except (socket.error), e: n = 0
+ if n == 0:
+ return count;
+ count += n
+ return count
+
+
+SERVER_PORT = 12877
+MAX_PORTS = 2
+
+serverPort = SERVER_PORT
+serverSocket = None
+
+HostName = socket.gethostname()
+
+# create passive socket
+serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+host = socket.gethostname()
+
+try: serverSocket.bind((host, 0))
+except socket.error as msg:
+ print 'bind fails: ', msg
+
+sn = serverSocket.getsockname()
+serverPort = sn[1]
+
+cmdStr = ("./tcp_client.py %d &") % (serverPort)
+os.system(cmdStr)
+
+buf = ''
+n = 0
+while n < 500:
+ buf += '.'
+ n += 1
+
+serverSocket.listen(MAX_PORTS)
+readList = [serverSocket]
+
+while True:
+ readyRead, readyWrite, inError = \
+ select.select(readList, [], [], 2)
+
+ if len(readyRead) > 0:
+ waitCount = 0
+ for sock in readyRead:
+ if sock == serverSocket:
+ (clientSocket, address) = serverSocket.accept()
+ address = str(address[0])
+ readList.append(clientSocket)
+ else:
+ sock.settimeout(1);
+ s = read(sock, 1000)
+ n = send(sock, buf)
+ sock.close()
+ serverSocket.close()
+ sys.exit(0)
+ else:
+ print 'Select timeout!'
+ sys.exit(1)
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..ff8bd7e3e50c 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -64,11 +64,11 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv2"},
- {2, "R3=inv4"},
- {3, "R3=inv8"},
- {4, "R3=inv16"},
- {5, "R3=inv32"},
+ {1, "R3_w=inv2"},
+ {2, "R3_w=inv4"},
+ {3, "R3_w=inv8"},
+ {4, "R3_w=inv16"},
+ {5, "R3_w=inv32"},
},
},
{
@@ -92,17 +92,17 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv1"},
- {2, "R3=inv2"},
- {3, "R3=inv4"},
- {4, "R3=inv8"},
- {5, "R3=inv16"},
- {6, "R3=inv1"},
- {7, "R4=inv32"},
- {8, "R4=inv16"},
- {9, "R4=inv8"},
- {10, "R4=inv4"},
- {11, "R4=inv2"},
+ {1, "R3_w=inv1"},
+ {2, "R3_w=inv2"},
+ {3, "R3_w=inv4"},
+ {4, "R3_w=inv8"},
+ {5, "R3_w=inv16"},
+ {6, "R3_w=inv1"},
+ {7, "R4_w=inv32"},
+ {8, "R4_w=inv16"},
+ {9, "R4_w=inv8"},
+ {10, "R4_w=inv4"},
+ {11, "R4_w=inv2"},
},
},
{
@@ -121,12 +121,12 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv4"},
- {2, "R3=inv8"},
- {3, "R3=inv10"},
- {4, "R4=inv8"},
- {5, "R4=inv12"},
- {6, "R4=inv14"},
+ {1, "R3_w=inv4"},
+ {2, "R3_w=inv8"},
+ {3, "R3_w=inv10"},
+ {4, "R4_w=inv8"},
+ {5, "R4_w=inv12"},
+ {6, "R4_w=inv14"},
},
},
{
@@ -143,10 +143,10 @@ static struct bpf_align_test tests[] = {
.matches = {
{1, "R1=ctx(id=0,off=0,imm=0)"},
{1, "R10=fp0"},
- {1, "R3=inv7"},
- {2, "R3=inv7"},
- {3, "R3=inv14"},
- {4, "R3=inv56"},
+ {1, "R3_w=inv7"},
+ {2, "R3_w=inv7"},
+ {3, "R3_w=inv14"},
+ {4, "R3_w=inv56"},
},
},
@@ -185,18 +185,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
- {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{18, "R3=pkt_end(id=0,off=0,imm=0)"},
- {18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
- {20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
+ {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
},
},
{
@@ -217,16 +217,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+ {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
},
},
{
@@ -257,14 +257,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
- {5, "R5=pkt(id=0,off=14,r=0,imm=0)"},
- {6, "R4=pkt(id=0,off=14,r=0,imm=0)"},
+ {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
+ {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
+ {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
- {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
- {15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
},
},
{
@@ -320,11 +320,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
@@ -336,11 +336,11 @@ static struct bpf_align_test tests[] = {
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4.
*/
- {18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
@@ -352,18 +352,18 @@ static struct bpf_align_test tests[] = {
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5=pkt(id=0,off=14,r=8"},
+ {26, "R5_w=pkt(id=0,off=14,r=8"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n).
*/
- {27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
+ {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
@@ -410,11 +410,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
- {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -426,11 +426,11 @@ static struct bpf_align_test tests[] = {
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+ {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -446,11 +446,9 @@ static struct bpf_align_test tests[] = {
.insns = {
PREP_PKT_POINTERS,
BPF_MOV64_IMM(BPF_REG_0, 0),
- /* ptr & const => unknown & const */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40),
- /* ptr << const => unknown << const */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ /* (ptr - ptr) << 2 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
/* We have a (4n) value. Let's make a packet offset
* out of it. First add 14, to make it a (4n+2)
@@ -473,20 +471,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
- /* ptr & 0x40 == either 0 or 0x40 */
- {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
- /* ptr << 2 == unknown, (4n) */
- {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
+ {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+ /* (ptr - ptr) << 2 == unknown, (4n) */
+ {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
+ {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
/* Checked s>=0 */
- {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* packet pointer + nonnegative (4n+2) */
- {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
- {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -494,7 +490,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
}
},
{
@@ -530,11 +526,11 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* New unknown value in R7 is (4n) */
- {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
/* Checked s>= 0 */
@@ -583,15 +579,15 @@ static struct bpf_align_test tests[] = {
* alignment of 4.
*/
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
- {10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+ {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
/* Adding 14 makes R6 be (4n+2) */
- {11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+ {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
+ {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
/* New unknown value in R7 is (4n), >= 76 */
- {15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+ {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
+ {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6c89b0..3489cc283433 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
#include <string.h>
#include <errno.h>
#include <assert.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
@@ -19,19 +21,23 @@
#define DEV_CGROUP_PROG "./dev_cgroup.o"
-#define TEST_CGROUP "test-bpf-based-device-cgroup/"
+#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
int main(int argc, char **argv)
{
+ struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
struct bpf_object *obj;
int error = EXIT_FAILURE;
int prog_fd, cgroup_fd;
__u32 prog_cnt;
+ if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+ perror("Unable to lift memlock rlimit");
+
if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
- goto err;
+ goto out;
}
if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
err:
cleanup_cgroup_environment();
+out:
return error;
}
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/test_l4lb_noinline.c
new file mode 100644
index 000000000000..ba44a14e6dc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_l4lb_noinline.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[ETH_ALEN];
+ unsigned char eth_source[ETH_ALEN];
+ unsigned short eth_proto;
+};
+
+struct bpf_map_def SEC("maps") vip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct vip),
+ .value_size = sizeof(struct vip_meta),
+ .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ch_rings = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = CH_RINGS_SIZE,
+};
+
+struct bpf_map_def SEC("maps") reals = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct real_definition),
+ .max_entries = MAX_REALS,
+};
+
+struct bpf_map_def SEC("maps") stats = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct vip_stats),
+ .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ctl_array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct ctl_value),
+ .max_entries = CTL_MAP_SIZE,
+};
+
+static __u32 get_packet_hash(struct packet_description *pckt,
+ bool ipv6)
+{
+ if (ipv6)
+ return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+ pckt->ports, CH_RINGS_SIZE);
+ else
+ return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
+{
+ __u32 hash = get_packet_hash(pckt, is_ipv6);
+ __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+ __u32 *real_pos;
+
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return 0;
+
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return false;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return false;
+ return true;
+}
+
+static int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+ return TC_ACT_OK;
+ off += sizeof(struct icmp6hdr);
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+ pckt->proto = ip6h->nexthdr;
+ pckt->flags |= F_ICMP;
+ memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+ memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+ return TC_ACT_UNSPEC;
+}
+
+static int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+ icmp_hdr->code != ICMP_FRAG_NEEDED)
+ return TC_ACT_OK;
+ off += sizeof(struct icmphdr);
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (iph->ihl != 5)
+ return TC_ACT_SHOT;
+ pckt->proto = iph->protocol;
+ pckt->flags |= F_ICMP;
+ pckt->src = iph->daddr;
+ pckt->dst = iph->saddr;
+ return TC_ACT_UNSPEC;
+}
+
+static bool parse_udp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
+{
+ struct udphdr *udp;
+ udp = data + off;
+
+ if (udp + 1 > data_end)
+ return false;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = udp->source;
+ pckt->port16[1] = udp->dest;
+ } else {
+ pckt->port16[0] = udp->dest;
+ pckt->port16[1] = udp->source;
+ }
+ return true;
+}
+
+static bool parse_tcp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
+{
+ struct tcphdr *tcp;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return false;
+
+ if (tcp->syn)
+ pckt->flags |= F_SYN_SET;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = tcp->source;
+ pckt->port16[1] = tcp->dest;
+ } else {
+ pckt->port16[0] = tcp->dest;
+ pckt->port16[1] = tcp->source;
+ }
+ return true;
+}
+
+static int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct __sk_buff *skb)
+{
+ void *pkt_start = (void *)(long)skb->data;
+ struct packet_description pckt = {};
+ struct eth_hdr *eth = pkt_start;
+ struct bpf_tunnel_key tkey = {};
+ struct vip_stats *data_stats;
+ struct real_definition *dst;
+ struct vip_meta *vip_info;
+ struct ctl_value *cval;
+ __u32 v4_intf_pos = 1;
+ __u32 v6_intf_pos = 2;
+ struct ipv6hdr *ip6h;
+ struct vip vip = {};
+ struct iphdr *iph;
+ int tun_flag = 0;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u32 ifindex;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ tkey.tunnel_ttl = 64;
+ if (is_ipv6) {
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ iph_len = sizeof(struct ipv6hdr);
+ protocol = ip6h->nexthdr;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(ip6h->payload_len);
+ off += iph_len;
+ if (protocol == IPPROTO_FRAGMENT) {
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_ICMPV6) {
+ action = parse_icmpv6(data, data_end, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV6_PLUS_ICMP_HDR;
+ } else {
+ memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+ memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+ }
+ } else {
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+ if (iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ protocol = iph->protocol;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(iph->tot_len);
+ off += IPV4_HDR_LEN_NO_OPT;
+
+ if (iph->frag_off & PCKT_FRAGMENTED)
+ return TC_ACT_SHOT;
+ if (protocol == IPPROTO_ICMP) {
+ action = parse_icmp(data, data_end, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV4_PLUS_ICMP_HDR;
+ } else {
+ pckt.src = iph->saddr;
+ pckt.dst = iph->daddr;
+ }
+ }
+ protocol = pckt.proto;
+
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(data, off, data_end, &pckt))
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(data, off, data_end, &pckt))
+ return TC_ACT_SHOT;
+ } else {
+ return TC_ACT_SHOT;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.daddr.v6, pckt.dstv6, 16);
+ else
+ vip.daddr.v4 = pckt.dst;
+
+ vip.dport = pckt.port16[1];
+ vip.protocol = pckt.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.dport = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return TC_ACT_SHOT;
+ pckt.port16[1] = 0;
+ }
+
+ if (vip_info->flags & F_HASH_NO_SRC_PORT)
+ pckt.port16[0] = 0;
+
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+ return TC_ACT_SHOT;
+
+ if (dst->flags & F_IPV6) {
+ cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+ tun_flag = BPF_F_TUNINFO_IPV6;
+ } else {
+ cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ tkey.remote_ipv4 = dst->dst;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return TC_ACT_SHOT;
+ data_stats->pkts++;
+ data_stats->bytes += pkt_bytes;
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+ *(u32 *)eth->eth_dest = tkey.remote_ipv4;
+ return bpf_redirect(ifindex, 0);
+}
+
+SEC("l4lb-demo")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return TC_ACT_SHOT;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == bpf_htons(ETH_P_IP))
+ return process_packet(data, nh_off, data_end, false, ctx);
+ else if (eth_proto == bpf_htons(ETH_P_IPV6))
+ return process_packet(data, nh_off, data_end, true, ctx);
+ else
+ return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index f61480641b6e..2be87e9ee28d 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -14,6 +14,7 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/bpf.h>
+#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -521,6 +522,218 @@ static void test_lpm_delete(void)
close(map_fd);
}
+static void test_lpm_get_next_key(void)
+{
+ struct bpf_lpm_trie_key *key_p, *next_key_p;
+ size_t key_size;
+ __u32 value = 0;
+ int map_fd;
+
+ key_size = sizeof(*key_p) + sizeof(__u32);
+ key_p = alloca(key_size);
+ next_key_p = alloca(key_size);
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd >= 0);
+
+ /* empty tree. get_next_key should return ENOENT */
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
+ errno == ENOENT);
+
+ /* get and verify the first key, get the second one should fail. */
+ key_p->prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168);
+
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* no exact matching key should get the first one in post order. */
+ key_p->prefixlen = 8;
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168);
+
+ /* add one more element (total two) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* Add one more element (total three) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* Add one more element (total four) */
+ key_p->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
+ /* no exact matching key should return the first one in post order */
+ key_p->prefixlen = 22;
+ inet_pton(AF_INET, "192.168.1.0", key_p->data);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 0);
+
+ close(map_fd);
+}
+
+#define MAX_TEST_KEYS 4
+struct lpm_mt_test_info {
+ int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */
+ int iter;
+ int map_fd;
+ struct {
+ __u32 prefixlen;
+ __u32 data;
+ } key[MAX_TEST_KEYS];
+};
+
+static void *lpm_test_command(void *arg)
+{
+ int i, j, ret, iter, key_size;
+ struct lpm_mt_test_info *info = arg;
+ struct bpf_lpm_trie_key *key_p;
+
+ key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+ key_p = alloca(key_size);
+ for (iter = 0; iter < info->iter; iter++)
+ for (i = 0; i < MAX_TEST_KEYS; i++) {
+ /* first half of iterations in forward order,
+ * and second half in backward order.
+ */
+ j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
+ key_p->prefixlen = info->key[j].prefixlen;
+ memcpy(key_p->data, &info->key[j].data, sizeof(__u32));
+ if (info->cmd == 0) {
+ __u32 value = j;
+ /* update must succeed */
+ assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0);
+ } else if (info->cmd == 1) {
+ ret = bpf_map_delete_elem(info->map_fd, key_p);
+ assert(ret == 0 || errno == ENOENT);
+ } else if (info->cmd == 2) {
+ __u32 value;
+ ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
+ assert(ret == 0 || errno == ENOENT);
+ } else {
+ struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+ ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
+ assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
+ }
+ }
+
+ // Pass successful exit info back to the main thread
+ pthread_exit((void *)info);
+}
+
+static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd)
+{
+ info->iter = 2000;
+ info->map_fd = map_fd;
+ info->key[0].prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", &info->key[0].data);
+ info->key[1].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", &info->key[1].data);
+ info->key[2].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", &info->key[2].data);
+ info->key[3].prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", &info->key[3].data);
+}
+
+static void test_lpm_multi_thread(void)
+{
+ struct lpm_mt_test_info info[4];
+ size_t key_size, value_size;
+ pthread_t thread_id[4];
+ int i, map_fd;
+ void *ret;
+
+ /* create a trie */
+ value_size = sizeof(__u32);
+ key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
+ map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size,
+ 100, BPF_F_NO_PREALLOC);
+
+ /* create 4 threads to test update, delete, lookup and get_next_key */
+ setup_lpm_mt_test_info(&info[0], map_fd);
+ for (i = 0; i < 4; i++) {
+ if (i != 0)
+ memcpy(&info[i], &info[0], sizeof(info[i]));
+ info[i].cmd = i;
+ assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0);
+ }
+
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]);
+
+ close(map_fd);
+}
+
int main(void)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
@@ -545,6 +758,10 @@ int main(void)
test_lpm_delete();
+ test_lpm_get_next_key();
+
+ test_lpm_multi_thread();
+
printf("test_lpm: OK\n");
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 040356ecc862..436c4c72414f 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -242,7 +242,7 @@ static void test_hashmap_percpu(int task, void *data)
static void test_hashmap_walk(int task, void *data)
{
- int fd, i, max_entries = 100000;
+ int fd, i, max_entries = 1000;
long long key, value, next_key;
bool next_key_valid = true;
@@ -463,7 +463,7 @@ static void test_devmap(int task, void *data)
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
static void test_sockmap(int tasks, void *data)
{
- int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
+ int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc;
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
int ports[] = {50200, 50201, 50202, 50204};
int err, i, fd, udp, sfd[6] = {0xdeadbeef};
@@ -868,9 +868,12 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap;
}
- /* Test map close sockets */
- for (i = 0; i < 6; i++)
+ /* Test map close sockets and empty maps */
+ for (i = 0; i < 6; i++) {
+ bpf_map_delete_elem(map_fd_tx, &i);
+ bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
+ }
close(fd);
close(map_fd_rx);
bpf_object__close(obj);
@@ -881,8 +884,13 @@ out:
printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
exit(1);
out_sockmap:
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
+ if (map_fd_tx)
+ bpf_map_delete_elem(map_fd_tx, &i);
+ if (map_fd_rx)
+ bpf_map_delete_elem(map_fd_rx, &i);
close(sfd[i]);
+ }
close(fd);
exit(1);
}
@@ -931,8 +939,12 @@ static void test_map_large(void)
close(fd);
}
-static void run_parallel(int tasks, void (*fn)(int task, void *data),
- void *data)
+#define run_parallel(N, FN, DATA) \
+ printf("Fork %d tasks to '" #FN "'\n", N); \
+ __run_parallel(N, FN, DATA)
+
+static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+ void *data)
{
pid_t pid[tasks];
int i;
@@ -972,7 +984,7 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
-static void do_work(int fn, void *data)
+static void test_update_delete(int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
@@ -1012,7 +1024,7 @@ static void test_map_parallel(void)
*/
data[0] = fd;
data[1] = DO_UPDATE;
- run_parallel(TASKS, do_work, data);
+ run_parallel(TASKS, test_update_delete, data);
/* Check that key=0 is already there. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
@@ -1035,7 +1047,7 @@ static void test_map_parallel(void)
/* Now let's delete all elemenets in parallel. */
data[1] = DO_DELETE;
- run_parallel(TASKS, do_work, data);
+ run_parallel(TASKS, test_update_delete, data);
/* Nothing should be left. */
key = -1;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
new file mode 100755
index 000000000000..e78aad0a68bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2017 Netronome Systems, Inc.
+#
+# This software is licensed under the GNU General License Version 2,
+# June 1991 as shown in the file COPYING in the top-level directory of this
+# source tree.
+#
+# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+# OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+from datetime import datetime
+import argparse
+import json
+import os
+import pprint
+import random
+import string
+import struct
+import subprocess
+import time
+
+logfile = None
+log_level = 1
+skip_extack = False
+bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
+pp = pprint.PrettyPrinter()
+devs = [] # devices we created for clean up
+files = [] # files to be removed
+netns = [] # net namespaces to be removed
+
+def log_get_sec(level=0):
+ return "*" * (log_level + level)
+
+def log_level_inc(add=1):
+ global log_level
+ log_level += add
+
+def log_level_dec(sub=1):
+ global log_level
+ log_level -= sub
+
+def log_level_set(level):
+ global log_level
+ log_level = level
+
+def log(header, data, level=None):
+ """
+ Output to an optional log.
+ """
+ if logfile is None:
+ return
+ if level is not None:
+ log_level_set(level)
+
+ if not isinstance(data, str):
+ data = pp.pformat(data)
+
+ if len(header):
+ logfile.write("\n" + log_get_sec() + " ")
+ logfile.write(header)
+ if len(header) and len(data.strip()):
+ logfile.write("\n")
+ logfile.write(data)
+
+def skip(cond, msg):
+ if not cond:
+ return
+ print("SKIP: " + msg)
+ log("SKIP: " + msg, "", level=1)
+ os.sys.exit(0)
+
+def fail(cond, msg):
+ if not cond:
+ return
+ print("FAIL: " + msg)
+ log("FAIL: " + msg, "", level=1)
+ os.sys.exit(1)
+
+def start_test(msg):
+ log(msg, "", level=1)
+ log_level_inc()
+ print(msg)
+
+def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True):
+ """
+ Run a command in subprocess and return tuple of (retval, stdout);
+ optionally return stderr as well as third value.
+ """
+ proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ if background:
+ msg = "%s START: %s" % (log_get_sec(1),
+ datetime.now().strftime("%H:%M:%S.%f"))
+ log("BKG " + proc.args, msg)
+ return proc
+
+ return cmd_result(proc, include_stderr=include_stderr, fail=fail)
+
+def cmd_result(proc, include_stderr=False, fail=False):
+ stdout, stderr = proc.communicate()
+ stdout = stdout.decode("utf-8")
+ stderr = stderr.decode("utf-8")
+ proc.stdout.close()
+ proc.stderr.close()
+
+ stderr = "\n" + stderr
+ if stderr[-1] == "\n":
+ stderr = stderr[:-1]
+
+ sec = log_get_sec(1)
+ log("CMD " + proc.args,
+ "RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" %
+ (proc.returncode, sec, stdout, sec, stderr,
+ sec, datetime.now().strftime("%H:%M:%S.%f")))
+
+ if proc.returncode != 0 and fail:
+ if len(stderr) > 0 and stderr[-1] == "\n":
+ stderr = stderr[:-1]
+ raise Exception("Command failed: %s\n%s" % (proc.args, stderr))
+
+ if include_stderr:
+ return proc.returncode, stdout, stderr
+ else:
+ return proc.returncode, stdout
+
+def rm(f):
+ cmd("rm -f %s" % (f))
+ if f in files:
+ files.remove(f)
+
+def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
+ params = ""
+ if JSON:
+ params += "%s " % (flags["json"])
+
+ if ns != "":
+ ns = "ip netns exec %s " % (ns)
+
+ if include_stderr:
+ ret, stdout, stderr = cmd(ns + name + " " + params + args,
+ fail=fail, include_stderr=True)
+ else:
+ ret, stdout = cmd(ns + name + " " + params + args,
+ fail=fail, include_stderr=False)
+
+ if JSON and len(stdout.strip()) != 0:
+ out = json.loads(stdout)
+ else:
+ out = stdout
+
+ if include_stderr:
+ return ret, out, stderr
+ else:
+ return ret, out
+
+def bpftool(args, JSON=True, ns="", fail=True):
+ return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+
+def bpftool_prog_list(expected=None, ns=""):
+ _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+ if expected is not None:
+ if len(progs) != expected:
+ fail(True, "%d BPF programs loaded, expected %d" %
+ (len(progs), expected))
+ return progs
+
+def bpftool_map_list(expected=None, ns=""):
+ _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+ if expected is not None:
+ if len(maps) != expected:
+ fail(True, "%d BPF maps loaded, expected %d" %
+ (len(maps), expected))
+ return maps
+
+def bpftool_prog_list_wait(expected=0, n_retry=20):
+ for i in range(n_retry):
+ nprogs = len(bpftool_prog_list())
+ if nprogs == expected:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
+
+def bpftool_map_list_wait(expected=0, n_retry=20):
+ for i in range(n_retry):
+ nmaps = len(bpftool_map_list())
+ if nmaps == expected:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
+
+def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
+ if force:
+ args = "-force " + args
+ return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
+
+def tc(args, JSON=True, ns="", fail=True, include_stderr=False):
+ return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
+
+def ethtool(dev, opt, args, fail=True):
+ return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
+
+def bpf_obj(name, sec=".text", path=bpf_test_dir,):
+ return "obj %s sec %s" % (os.path.join(path, name), sec)
+
+def bpf_pinned(name):
+ return "pinned %s" % (name)
+
+def bpf_bytecode(bytecode):
+ return "bytecode \"%s\"" % (bytecode)
+
+def mknetns(n_retry=10):
+ for i in range(n_retry):
+ name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
+ ret, _ = ip("netns add %s" % (name), fail=False)
+ if ret == 0:
+ netns.append(name)
+ return name
+ return None
+
+def int2str(fmt, val):
+ ret = []
+ for b in struct.pack(fmt, val):
+ ret.append(int(b))
+ return " ".join(map(lambda x: str(x), ret))
+
+def str2int(strtab):
+ inttab = []
+ for i in strtab:
+ inttab.append(int(i, 16))
+ ba = bytearray(inttab)
+ if len(strtab) == 4:
+ fmt = "I"
+ elif len(strtab) == 8:
+ fmt = "Q"
+ else:
+ raise Exception("String array of len %d can't be unpacked to an int" %
+ (len(strtab)))
+ return struct.unpack(fmt, ba)[0]
+
+class DebugfsDir:
+ """
+ Class for accessing DebugFS directories as a dictionary.
+ """
+
+ def __init__(self, path):
+ self.path = path
+ self._dict = self._debugfs_dir_read(path)
+
+ def __len__(self):
+ return len(self._dict.keys())
+
+ def __getitem__(self, key):
+ if type(key) is int:
+ key = list(self._dict.keys())[key]
+ return self._dict[key]
+
+ def __setitem__(self, key, value):
+ log("DebugFS set %s = %s" % (key, value), "")
+ log_level_inc()
+
+ cmd("echo '%s' > %s/%s" % (value, self.path, key))
+ log_level_dec()
+
+ _, out = cmd('cat %s/%s' % (self.path, key))
+ self._dict[key] = out.strip()
+
+ def _debugfs_dir_read(self, path):
+ dfs = {}
+
+ log("DebugFS state for %s" % (path), "")
+ log_level_inc(add=2)
+
+ _, out = cmd('ls ' + path)
+ for f in out.split():
+ p = os.path.join(path, f)
+ if os.path.isfile(p):
+ _, out = cmd('cat %s/%s' % (path, f))
+ dfs[f] = out.strip()
+ elif os.path.isdir(p):
+ dfs[f] = DebugfsDir(p)
+ else:
+ raise Exception("%s is neither file nor directory" % (p))
+
+ log_level_dec()
+ log("DebugFS state", dfs)
+ log_level_dec()
+
+ return dfs
+
+class NetdevSim:
+ """
+ Class for netdevsim netdevice and its attributes.
+ """
+
+ def __init__(self):
+ self.dev = self._netdevsim_create()
+ devs.append(self)
+
+ self.ns = ""
+
+ self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+ self.dfs_refresh()
+
+ def __getitem__(self, key):
+ return self.dev[key]
+
+ def _netdevsim_create(self):
+ _, old = ip("link show")
+ ip("link add sim%d type netdevsim")
+ _, new = ip("link show")
+
+ for dev in new:
+ f = filter(lambda x: x["ifname"] == dev["ifname"], old)
+ if len(list(f)) == 0:
+ return dev
+
+ raise Exception("failed to create netdevsim device")
+
+ def remove(self):
+ devs.remove(self)
+ ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
+
+ def dfs_refresh(self):
+ self.dfs = DebugfsDir(self.dfs_dir)
+ return self.dfs
+
+ def dfs_num_bound_progs(self):
+ path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+ _, progs = cmd('ls %s' % (path))
+ return len(progs.split())
+
+ def dfs_get_bound_progs(self, expected):
+ progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+ if expected is not None:
+ if len(progs) != expected:
+ fail(True, "%d BPF programs bound, expected %d" %
+ (len(progs), expected))
+ return progs
+
+ def wait_for_flush(self, bound=0, total=0, n_retry=20):
+ for i in range(n_retry):
+ nbound = self.dfs_num_bound_progs()
+ nprogs = len(bpftool_prog_list())
+ if nbound == bound and nprogs == total:
+ return
+ time.sleep(0.05)
+ raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
+
+ def set_ns(self, ns):
+ name = "1" if ns == "" else ns
+ ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
+ self.ns = ns
+
+ def set_mtu(self, mtu, fail=True):
+ return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
+ fail=fail)
+
+ def set_xdp(self, bpf, mode, force=False, JSON=True, verbose=False,
+ fail=True, include_stderr=False):
+ if verbose:
+ bpf += " verbose"
+ return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf),
+ force=force, JSON=JSON,
+ fail=fail, include_stderr=include_stderr)
+
+ def unset_xdp(self, mode, force=False, JSON=True,
+ fail=True, include_stderr=False):
+ return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode),
+ force=force, JSON=JSON,
+ fail=fail, include_stderr=include_stderr)
+
+ def ip_link_show(self, xdp):
+ _, link = ip("link show dev %s" % (self['ifname']))
+ if len(link) > 1:
+ raise Exception("Multiple objects on ip link show")
+ if len(link) < 1:
+ return {}
+ fail(xdp != "xdp" in link,
+ "XDP program not reporting in iplink (reported %s, expected %s)" %
+ ("xdp" in link, xdp))
+ return link[0]
+
+ def tc_add_ingress(self):
+ tc("qdisc add dev %s ingress" % (self['ifname']))
+
+ def tc_del_ingress(self):
+ tc("qdisc del dev %s ingress" % (self['ifname']))
+
+ def tc_flush_filters(self, bound=0, total=0):
+ self.tc_del_ingress()
+ self.tc_add_ingress()
+ self.wait_for_flush(bound=bound, total=total)
+
+ def tc_show_ingress(self, expected=None):
+ # No JSON support, oh well...
+ flags = ["skip_sw", "skip_hw", "in_hw"]
+ named = ["protocol", "pref", "chain", "handle", "id", "tag"]
+
+ args = "-s filter show dev %s ingress" % (self['ifname'])
+ _, out = tc(args, JSON=False)
+
+ filters = []
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ if "handle" not in words:
+ continue
+ fltr = {}
+ for flag in flags:
+ fltr[flag] = flag in words
+ for name in named:
+ try:
+ idx = words.index(name)
+ fltr[name] = words[idx + 1]
+ except ValueError:
+ pass
+ filters.append(fltr)
+
+ if expected is not None:
+ fail(len(filters) != expected,
+ "%d ingress filters loaded, expected %d" %
+ (len(filters), expected))
+ return filters
+
+ def cls_filter_op(self, op, qdisc="ingress", prio=None, handle=None,
+ chain=None, cls="", params="",
+ fail=True, include_stderr=False):
+ spec = ""
+ if prio is not None:
+ spec += " prio %d" % (prio)
+ if handle:
+ spec += " handle %s" % (handle)
+ if chain is not None:
+ spec += " chain %d" % (chain)
+
+ return tc("filter {op} dev {dev} {qdisc} {spec} {cls} {params}"\
+ .format(op=op, dev=self['ifname'], qdisc=qdisc, spec=spec,
+ cls=cls, params=params),
+ fail=fail, include_stderr=include_stderr)
+
+ def cls_bpf_add_filter(self, bpf, op="add", prio=None, handle=None,
+ chain=None, da=False, verbose=False,
+ skip_sw=False, skip_hw=False,
+ fail=True, include_stderr=False):
+ cls = "bpf " + bpf
+
+ params = ""
+ if da:
+ params += " da"
+ if verbose:
+ params += " verbose"
+ if skip_sw:
+ params += " skip_sw"
+ if skip_hw:
+ params += " skip_hw"
+
+ return self.cls_filter_op(op=op, prio=prio, handle=handle, cls=cls,
+ chain=chain, params=params,
+ fail=fail, include_stderr=include_stderr)
+
+ def set_ethtool_tc_offloads(self, enable, fail=True):
+ args = "hw-tc-offload %s" % ("on" if enable else "off")
+ return ethtool(self, "-K", args, fail=fail)
+
+################################################################################
+def clean_up():
+ global files, netns, devs
+
+ for dev in devs:
+ dev.remove()
+ for f in files:
+ cmd("rm -f %s" % (f))
+ for ns in netns:
+ cmd("ip netns delete %s" % (ns))
+ files = []
+ netns = []
+
+def pin_prog(file_name, idx=0):
+ progs = bpftool_prog_list(expected=(idx + 1))
+ prog = progs[idx]
+ bpftool("prog pin id %d %s" % (prog["id"], file_name))
+ files.append(file_name)
+
+ return file_name, bpf_pinned(file_name)
+
+def pin_map(file_name, idx=0, expected=1):
+ maps = bpftool_map_list(expected=expected)
+ m = maps[idx]
+ bpftool("map pin id %d %s" % (m["id"], file_name))
+ files.append(file_name)
+
+ return file_name, bpf_pinned(file_name)
+
+def check_dev_info_removed(prog_file=None, map_file=None):
+ bpftool_prog_list(expected=0)
+ ret, err = bpftool("prog show pin %s" % (prog_file), fail=False)
+ fail(ret == 0, "Showing prog with removed device did not fail")
+ fail(err["error"].find("No such device") == -1,
+ "Showing prog with removed device expected ENODEV, error is %s" %
+ (err["error"]))
+
+ bpftool_map_list(expected=0)
+ ret, err = bpftool("map show pin %s" % (map_file), fail=False)
+ fail(ret == 0, "Showing map with removed device did not fail")
+ fail(err["error"].find("No such device") == -1,
+ "Showing map with removed device expected ENODEV, error is %s" %
+ (err["error"]))
+
+def check_dev_info(other_ns, ns, prog_file=None, map_file=None, removed=False):
+ progs = bpftool_prog_list(expected=1, ns=ns)
+ prog = progs[0]
+
+ fail("dev" not in prog.keys(), "Device parameters not reported")
+ dev = prog["dev"]
+ fail("ifindex" not in dev.keys(), "Device parameters not reported")
+ fail("ns_dev" not in dev.keys(), "Device parameters not reported")
+ fail("ns_inode" not in dev.keys(), "Device parameters not reported")
+
+ if not other_ns:
+ fail("ifname" not in dev.keys(), "Ifname not reported")
+ fail(dev["ifname"] != sim["ifname"],
+ "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
+ else:
+ fail("ifname" in dev.keys(), "Ifname is reported for other ns")
+
+ maps = bpftool_map_list(expected=2, ns=ns)
+ for m in maps:
+ fail("dev" not in m.keys(), "Device parameters not reported")
+ fail(dev != m["dev"], "Map's device different than program's")
+
+def check_extack(output, reference, args):
+ if skip_extack:
+ return
+ lines = output.split("\n")
+ comp = len(lines) >= 2 and lines[1] == reference
+ fail(not comp, "Missing or incorrect netlink extack message")
+
+def check_extack_nsim(output, reference, args):
+ check_extack(output, "Error: netdevsim: " + reference, args)
+
+def check_no_extack(res, needle):
+ fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
+ "Found '%s' in command output, leaky extack?" % (needle))
+
+def check_verifier_log(output, reference):
+ lines = output.split("\n")
+ for l in reversed(lines):
+ if l == reference:
+ return
+ fail(True, "Missing or incorrect message from netdevsim in verifier log")
+
+def test_spurios_extack(sim, obj, skip_hw, needle):
+ res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
+ include_stderr=True)
+ check_no_extack(res, needle)
+ res = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+ skip_hw=skip_hw, include_stderr=True)
+ check_no_extack(res, needle)
+ res = sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf",
+ include_stderr=True)
+ check_no_extack(res, needle)
+
+
+# Parse command line
+parser = argparse.ArgumentParser()
+parser.add_argument("--log", help="output verbose log to given file")
+args = parser.parse_args()
+if args.log:
+ logfile = open(args.log, 'w+')
+ logfile.write("# -*-Org-*-")
+
+log("Prepare...", "", level=1)
+log_level_inc()
+
+# Check permissions
+skip(os.getuid() != 0, "test must be run as root")
+
+# Check tools
+ret, progs = bpftool("prog", fail=False)
+skip(ret != 0, "bpftool not installed")
+# Check no BPF programs are loaded
+skip(len(progs) != 0, "BPF programs already loaded on the system")
+
+# Check netdevsim
+ret, out = cmd("modprobe netdevsim", fail=False)
+skip(ret != 0, "netdevsim module could not be loaded")
+
+# Check debugfs
+_, out = cmd("mount")
+if out.find("/sys/kernel/debug type debugfs") == -1:
+ cmd("mount -t debugfs none /sys/kernel/debug")
+
+# Check samples are compiled
+samples = ["sample_ret0.o", "sample_map_ret0.o"]
+for s in samples:
+ ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False)
+ skip(ret != 0, "sample %s/%s not found, please compile it" %
+ (bpf_test_dir, s))
+
+# Check if iproute2 is built with libmnl (needed by extack support)
+_, _, err = cmd("tc qdisc delete dev lo handle 0",
+ fail=False, include_stderr=True)
+if err.find("Error: Failed to find qdisc with specified handle.") == -1:
+ print("Warning: no extack message in iproute2 output, libmnl missing?")
+ log("Warning: no extack message in iproute2 output, libmnl missing?", "")
+ skip_extack = True
+
+# Check if net namespaces seem to work
+ns = mknetns()
+skip(ns is None, "Could not create a net namespace")
+cmd("ip netns delete %s" % (ns))
+netns = []
+
+try:
+ obj = bpf_obj("sample_ret0.o")
+ bytecode = bpf_bytecode("1,6 0 0 4294967295,")
+
+ start_test("Test destruction of generic XDP...")
+ sim = NetdevSim()
+ sim.set_xdp(obj, "generic")
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.tc_add_ingress()
+
+ start_test("Test TC non-offloaded...")
+ ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+
+ start_test("Test TC non-offloaded isn't getting bound...")
+ ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC offloads are off by default...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "TC filter loaded without enabling TC offloads")
+ check_extack(err, "Error: TC offload is disabled on net device.", args)
+ sim.wait_for_flush()
+
+ sim.set_ethtool_tc_offloads(True)
+ sim.dfs["bpf_tc_non_bound_accept"] = "Y"
+
+ start_test("Test TC offload by default...")
+ ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+ ingress = sim.tc_show_ingress(expected=1)
+ fltr = ingress[0]
+ fail(not fltr["in_hw"], "Filter not offloaded by default")
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC cBPF bytcode tries offload by default...")
+ ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
+ fail(ret != 0, "Software TC filter did not load")
+ sim.dfs_get_bound_progs(expected=0)
+ ingress = sim.tc_show_ingress(expected=1)
+ fltr = ingress[0]
+ fail(not fltr["in_hw"], "Bytecode not offloaded by default")
+
+ sim.tc_flush_filters()
+ sim.dfs["bpf_tc_non_bound_accept"] = "N"
+
+ start_test("Test TC cBPF unbound bytecode doesn't offload...")
+ ret, _, err = sim.cls_bpf_add_filter(bytecode, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "TC bytecode loaded for offload")
+ check_extack_nsim(err, "netdevsim configured to reject unbound programs.",
+ args)
+ sim.wait_for_flush()
+
+ start_test("Test non-0 chain offload...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, chain=1, prio=1, handle=1,
+ skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Offloaded a filter to chain other than 0")
+ check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+ sim.tc_flush_filters()
+
+ start_test("Test TC replace...")
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_sw=True)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_sw=True)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=True)
+ sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1, skip_hw=True)
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ start_test("Test TC replace bad flags...")
+ for i in range(3):
+ for j in range(3):
+ ret, _ = sim.cls_bpf_add_filter(obj, op="replace", prio=1, handle=1,
+ skip_sw=(j == 1), skip_hw=(j == 2),
+ fail=False)
+ fail(bool(ret) != bool(j),
+ "Software TC incorrect load in replace test, iteration %d" %
+ (j))
+ sim.cls_filter_op(op="delete", prio=1, handle=1, cls="bpf")
+
+ start_test("Test spurious extack from the driver...")
+ test_spurios_extack(sim, obj, False, "netdevsim")
+ test_spurios_extack(sim, obj, True, "netdevsim")
+
+ sim.set_ethtool_tc_offloads(False)
+
+ test_spurios_extack(sim, obj, False, "TC offload is disabled")
+ test_spurios_extack(sim, obj, True, "TC offload is disabled")
+
+ sim.set_ethtool_tc_offloads(True)
+
+ sim.tc_flush_filters()
+
+ start_test("Test TC offloads work...")
+ ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret != 0, "TC filter did not load with TC offloads enabled")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+ start_test("Test TC offload basics...")
+ dfs = sim.dfs_get_bound_progs(expected=1)
+ progs = bpftool_prog_list(expected=1)
+ ingress = sim.tc_show_ingress(expected=1)
+
+ dprog = dfs[0]
+ prog = progs[0]
+ fltr = ingress[0]
+ fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter")
+ fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter")
+ fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back")
+
+ start_test("Test TC offload is device-bound...")
+ fail(str(prog["id"]) != fltr["id"], "Program IDs don't match")
+ fail(prog["tag"] != fltr["tag"], "Program tags don't match")
+ fail(fltr["id"] != dprog["id"], "Program IDs don't match")
+ fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+ fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+ start_test("Test disabling TC offloads is rejected while filters installed...")
+ ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+ fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+
+ start_test("Test qdisc removal frees things...")
+ sim.tc_flush_filters()
+ sim.tc_show_ingress(expected=0)
+
+ start_test("Test disabling TC offloads is OK without filters...")
+ ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+ fail(ret != 0,
+ "Driver refused to disable TC offloads without filters installed...")
+
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test destroying device gets rid of TC filters...")
+ sim.cls_bpf_add_filter(obj, skip_sw=True)
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test destroying device gets rid of XDP...")
+ sim.set_xdp(obj, "offload")
+ sim.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
+ start_test("Test XDP prog reporting...")
+ sim.set_xdp(obj, "drv")
+ ipl = sim.ip_link_show(xdp=True)
+ progs = bpftool_prog_list(expected=1)
+ fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+ "Loaded program has wrong ID")
+
+ start_test("Test XDP prog replace without force...")
+ ret, _ = sim.set_xdp(obj, "drv", fail=False)
+ fail(ret == 0, "Replaced XDP program without -force")
+ sim.wait_for_flush(total=1)
+
+ start_test("Test XDP prog replace with force...")
+ ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False)
+ fail(ret != 0, "Could not replace XDP program with -force")
+ bpftool_prog_list_wait(expected=1)
+ ipl = sim.ip_link_show(xdp=True)
+ progs = bpftool_prog_list(expected=1)
+ fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+ "Loaded program has wrong ID")
+ fail("dev" in progs[0].keys(),
+ "Device parameters reported for non-offloaded program")
+
+ start_test("Test XDP prog replace with bad flags...")
+ ret, _, err = sim.set_xdp(obj, "offload", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced XDP program with a program in different mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+ ret, _, err = sim.set_xdp(obj, "", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced XDP program with a program in different mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+
+ start_test("Test XDP prog remove with bad flags...")
+ ret, _, err = sim.unset_xdp("offload", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+ ret, _, err = sim.unset_xdp("", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode mode")
+ check_extack_nsim(err, "program loaded with different flags.", args)
+
+ start_test("Test MTU restrictions...")
+ ret, _ = sim.set_mtu(9000, fail=False)
+ fail(ret == 0,
+ "Driver should refuse to increase MTU to 9000 with XDP loaded...")
+ sim.unset_xdp("drv")
+ bpftool_prog_list_wait(expected=0)
+ sim.set_mtu(9000)
+ ret, _, err = sim.set_xdp(obj, "drv", fail=False, include_stderr=True)
+ fail(ret == 0, "Driver should refuse to load program with MTU of 9000...")
+ check_extack_nsim(err, "MTU too large w/ XDP enabled.", args)
+ sim.set_mtu(1500)
+
+ sim.wait_for_flush()
+ start_test("Test XDP offload...")
+ _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
+ ipl = sim.ip_link_show(xdp=True)
+ link_xdp = ipl["xdp"]["prog"]
+ progs = bpftool_prog_list(expected=1)
+ prog = progs[0]
+ fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
+ check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+
+ start_test("Test XDP offload is device bound...")
+ dfs = sim.dfs_get_bound_progs(expected=1)
+ dprog = dfs[0]
+
+ fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
+ fail(prog["tag"] != link_xdp["tag"], "Program tags don't match")
+ fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match")
+ fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+ fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+ start_test("Test removing XDP program many times...")
+ sim.unset_xdp("offload")
+ sim.unset_xdp("offload")
+ sim.unset_xdp("drv")
+ sim.unset_xdp("drv")
+ sim.unset_xdp("")
+ sim.unset_xdp("")
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test attempt to use a program for a wrong device...")
+ sim2 = NetdevSim()
+ sim2.set_xdp(obj, "offload")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+
+ ret, _, err = sim.set_xdp(pinned, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Pinned program loaded for a different device accepted")
+ check_extack_nsim(err, "program bound to different dev.", args)
+ sim2.remove()
+ ret, _, err = sim.set_xdp(pinned, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Pinned program loaded for a removed device accepted")
+ check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+ rm(pin_file)
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test mixing of TC and XDP...")
+ sim.tc_add_ingress()
+ sim.set_xdp(obj, "offload")
+ ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Loading TC when XDP active should fail")
+ check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+ sim.unset_xdp("offload")
+ sim.wait_for_flush()
+
+ sim.cls_bpf_add_filter(obj, skip_sw=True)
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(ret == 0, "Loading XDP when TC active should fail")
+ check_extack_nsim(err, "TC program is already loaded.", args)
+
+ start_test("Test binding TC from pinned...")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+ sim.tc_flush_filters(bound=1, total=1)
+ sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True)
+ sim.tc_flush_filters(bound=1, total=1)
+
+ start_test("Test binding XDP from pinned...")
+ sim.set_xdp(obj, "offload")
+ pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1)
+
+ sim.set_xdp(pinned, "offload", force=True)
+ sim.unset_xdp("offload")
+ sim.set_xdp(pinned, "offload", force=True)
+ sim.unset_xdp("offload")
+
+ start_test("Test offload of wrong type fails...")
+ ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False)
+ fail(ret == 0, "Managed to attach XDP program to TC")
+
+ start_test("Test asking for TC offload of two filters...")
+ sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
+ ret, _, err = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Managed to offload two TC filters at the same time")
+ check_extack_nsim(err, "driver and netdev offload states mismatch.", args)
+
+ sim.tc_flush_filters(bound=2, total=2)
+
+ start_test("Test if netdev removal waits for translation...")
+ delay_msec = 500
+ sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+ start = time.time()
+ cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
+ (sim['ifname'], obj)
+ tc_proc = cmd(cmd_line, background=True, fail=False)
+ # Wait for the verifier to start
+ while sim.dfs_num_bound_progs() <= 2:
+ pass
+ sim.remove()
+ end = time.time()
+ ret, _ = cmd_result(tc_proc, fail=False)
+ time_diff = end - start
+ log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff))
+
+ fail(ret == 0, "Managed to load TC filter on a unregistering device")
+ delay_sec = delay_msec * 0.001
+ fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
+ (time_diff, delay_sec))
+
+ # Remove all pinned files and reinstantiate the netdev
+ clean_up()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+ map_obj = bpf_obj("sample_map_ret0.o")
+ start_test("Test loading program with maps...")
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+
+ start_test("Test bpftool bound info reporting (own ns)...")
+ check_dev_info(False, "")
+
+ start_test("Test bpftool bound info reporting (other ns)...")
+ ns = mknetns()
+ sim.set_ns(ns)
+ check_dev_info(True, "")
+
+ start_test("Test bpftool bound info reporting (remote ns)...")
+ check_dev_info(False, ns)
+
+ start_test("Test bpftool bound info reporting (back to own ns)...")
+ sim.set_ns("")
+ check_dev_info(False, "")
+
+ prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog")
+ map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2)
+ sim.remove()
+
+ start_test("Test bpftool bound info reporting (removed dev)...")
+ check_dev_info_removed(prog_file=prog_file, map_file=map_file)
+
+ # Remove all pinned files and reinstantiate the netdev
+ clean_up()
+ bpftool_prog_list_wait(expected=0)
+
+ sim = NetdevSim()
+
+ start_test("Test map update (no flags)...")
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+ maps = bpftool_map_list(expected=2)
+ array = maps[0] if maps[0]["type"] == "array" else maps[1]
+ htab = maps[0] if maps[0]["type"] == "hash" else maps[1]
+ for m in maps:
+ for i in range(2):
+ bpftool("map update id %d key %s value %s" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+ for m in maps:
+ ret, _ = bpftool("map update id %d key %s value %s" %
+ (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+ fail=False)
+ fail(ret == 0, "added too many entries")
+
+ start_test("Test map update (exists)...")
+ for m in maps:
+ for i in range(2):
+ bpftool("map update id %d key %s value %s exist" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)))
+
+ for m in maps:
+ ret, err = bpftool("map update id %d key %s value %s exist" %
+ (m["id"], int2str("I", 3), int2str("Q", 3 * 3)),
+ fail=False)
+ fail(ret == 0, "updated non-existing key")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map update (noexist)...")
+ for m in maps:
+ for i in range(2):
+ ret, err = bpftool("map update id %d key %s value %s noexist" %
+ (m["id"], int2str("I", i), int2str("Q", i * 3)),
+ fail=False)
+ fail(ret == 0, "updated existing key")
+ fail(err["error"].find("File exists") == -1,
+ "expected EEXIST, error is '%s'" % (err["error"]))
+
+ start_test("Test map dump...")
+ for m in maps:
+ _, entries = bpftool("map dump id %d" % (m["id"]))
+ for i in range(2):
+ key = str2int(entries[i]["key"])
+ fail(key != i, "expected key %d, got %d" % (key, i))
+ val = str2int(entries[i]["value"])
+ fail(val != i * 3, "expected value %d, got %d" % (val, i * 3))
+
+ start_test("Test map getnext...")
+ for m in maps:
+ _, entry = bpftool("map getnext id %d" % (m["id"]))
+ key = str2int(entry["next_key"])
+ fail(key != 0, "next key %d, expected %d" % (key, 0))
+ _, entry = bpftool("map getnext id %d key %s" %
+ (m["id"], int2str("I", 0)))
+ key = str2int(entry["next_key"])
+ fail(key != 1, "next key %d, expected %d" % (key, 1))
+ ret, err = bpftool("map getnext id %d key %s" %
+ (m["id"], int2str("I", 1)), fail=False)
+ fail(ret == 0, "got next key past the end of map")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map delete (htab)...")
+ for i in range(2):
+ bpftool("map delete id %d key %s" % (htab["id"], int2str("I", i)))
+
+ start_test("Test map delete (array)...")
+ for i in range(2):
+ ret, err = bpftool("map delete id %d key %s" %
+ (htab["id"], int2str("I", i)), fail=False)
+ fail(ret == 0, "removed entry from an array")
+ fail(err["error"].find("No such file or directory") == -1,
+ "expected ENOENT, error is '%s'" % (err["error"]))
+
+ start_test("Test map remove...")
+ sim.unset_xdp("offload")
+ bpftool_map_list_wait(expected=0)
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
+ sim.remove()
+ bpftool_map_list_wait(expected=0)
+
+ start_test("Test map creation fail path...")
+ sim = NetdevSim()
+ sim.dfs["bpf_map_accept"] = "N"
+ ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False)
+ fail(ret == 0,
+ "netdevsim didn't refuse to create a map with offload disabled")
+
+ print("%s: OK" % (os.path.basename(__file__)))
+
+finally:
+ log("Clean up...", "", level=1)
+ log_level_inc()
+ clean_up()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6761be18a91f..b549308abd19 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -21,8 +21,10 @@ typedef __u16 __sum16;
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/filter.h>
+#include <linux/perf_event.h>
#include <linux/unistd.h>
+#include <sys/ioctl.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <sys/types.h>
@@ -167,10 +169,9 @@ out:
#define NUM_ITER 100000
#define VIP_NUM 5
-static void test_l4lb(void)
+static void test_l4lb(const char *file)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
- const char *file = "./test_l4lb.o";
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
@@ -247,6 +248,95 @@ out:
bpf_object__close(obj);
}
+static void test_l4lb_all(void)
+{
+ const char *file1 = "./test_l4lb.o";
+ const char *file2 = "./test_l4lb_noinline.o";
+
+ test_l4lb(file1);
+ test_l4lb(file2);
+}
+
+static void test_xdp_noinline(void)
+{
+ const char *file = "./test_xdp_noinline.o";
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || errno || retval != 1 || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || errno || retval != 1 || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
+
static void test_tcp_estats(void)
{
const char *file = "./test_tcp_estats.o";
@@ -617,6 +707,262 @@ static void test_obj_name(void)
}
}
+static void test_tp_attach_query(void)
+{
+ const int num_progs = 3;
+ int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+ __u32 duration = 0, info_len, saved_prog_ids[num_progs];
+ const char *file = "./test_tracepoint.o";
+ struct perf_event_query_bpf *query;
+ struct perf_event_attr attr = {};
+ struct bpf_object *obj[num_progs];
+ struct bpf_prog_info prog_info;
+ char buf[256];
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ return;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ return;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+ for (i = 0; i < num_progs; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ &prog_fd[i]);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto cleanup1;
+
+ bzero(&prog_info, sizeof(prog_info));
+ prog_info.jited_prog_len = 0;
+ prog_info.xlated_prog_len = 0;
+ prog_info.nr_map_ids = 0;
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+ if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err, errno))
+ goto cleanup1;
+ saved_prog_ids[i] = prog_info.id;
+
+ pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd[i], errno))
+ goto cleanup2;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 0) {
+ /* check NULL prog array query */
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 0,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 1) {
+ /* try to get # of programs only */
+ query->ids_len = 0;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+
+ /* try a few negative tests */
+ /* invalid query pointer */
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+ (struct perf_event_query_bpf *)0x1);
+ if (CHECK(!err || errno != EFAULT,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d\n", err, errno))
+ goto cleanup3;
+
+ /* no enough space */
+ query->ids_len = 1;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != (i + 1),
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ for (j = 0; j < i + 1; j++)
+ if (CHECK(saved_prog_ids[j] != query->ids[j],
+ "perf_event_ioc_query_bpf",
+ "#%d saved_prog_id %x query prog_id %x\n",
+ j, saved_prog_ids[j], query->ids[j]))
+ goto cleanup3;
+ }
+
+ i = num_progs - 1;
+ for (; i >= 0; i--) {
+ cleanup3:
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+ close(pmu_fd[i]);
+ cleanup1:
+ bpf_object__close(obj[i]);
+ }
+ free(query);
+}
+
+static int compare_map_keys(int map1_fd, int map2_fd)
+{
+ __u32 key, next_key;
+ char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
+ int err;
+
+ err = bpf_map_get_next_key(map1_fd, NULL, &key);
+ if (err)
+ return err;
+ err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
+ if (err)
+ return err;
+
+ while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
+ err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
+ if (err)
+ return err;
+
+ key = next_key;
+ }
+ if (errno != ENOENT)
+ return -1;
+
+ return 0;
+}
+
+static void test_stacktrace_map()
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int bytes, efd, err, pmu_fd, prog_fd;
+ struct perf_event_attr attr = {};
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ ; /* fall through */
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+
+out:
+ return;
+}
+
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -625,11 +971,14 @@ int main(void)
test_pkt_access();
test_xdp();
- test_l4lb();
+ test_l4lb_all();
+ test_xdp_noinline();
test_tcp_estats();
test_bpf_obj_id();
test_pkt_md_access();
test_obj_name();
+ test_tp_attach_query();
+ test_stacktrace_map();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c
new file mode 100644
index 000000000000..76d85c5d08bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_stacktrace_map.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
+struct bpf_map_def SEC("maps") control_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") stackid_hmap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+ .max_entries = 10000,
+};
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+ unsigned long long pad;
+ char prev_comm[16];
+ int prev_pid;
+ int prev_prio;
+ long long prev_state;
+ char next_comm[16];
+ int next_pid;
+ int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ __u32 key = 0, val = 0, *value_p;
+
+ value_p = bpf_map_lookup_elem(&control_map, &key);
+ if (value_p && *value_p)
+ return 0; /* skip if non-zero *value_p */
+
+ /* The size of stackmap and stackid_hmap should be the same */
+ key = bpf_get_stackid(ctx, &stackmap, 0);
+ if ((int)key >= 0)
+ bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
new file mode 100644
index 000000000000..2fe43289943c
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _TEST_TCPBPF_H
+#define _TEST_TCPBPF_H
+
+struct tcpbpf_globals {
+ __u32 event_map;
+ __u32 total_retrans;
+ __u32 data_segs_in;
+ __u32 data_segs_out;
+ __u32 bad_cb_test_rv;
+ __u32 good_cb_test_rv;
+ __u64 bytes_received;
+ __u64 bytes_acked;
+};
+#endif
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
new file mode 100644
index 000000000000..57119ad57a3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/in6.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <netinet/in.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "test_tcpbpf.h"
+
+struct bpf_map_def SEC("maps") global_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tcpbpf_globals),
+ .max_entries = 2,
+};
+
+static inline void update_event_map(int event)
+{
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (gp == NULL) {
+ struct tcpbpf_globals g = {0};
+
+ g.event_map |= (1 << event);
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ } else {
+ g = *gp;
+ g.event_map |= (1 << event);
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+}
+
+int _version SEC("version") = 1;
+
+SEC("sockops")
+int bpf_testcb(struct bpf_sock_ops *skops)
+{
+ int rv = -1;
+ int bad_call_rv = 0;
+ int good_call_rv = 0;
+ int op;
+ int v = 0;
+
+ op = (int) skops->op;
+
+ update_event_map(op);
+
+ switch (op) {
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ /* Test failure to set largest cb flag (assumes not defined) */
+ bad_call_rv = bpf_sock_ops_cb_flags_set(skops, 0x80);
+ /* Set callback */
+ good_call_rv = bpf_sock_ops_cb_flags_set(skops,
+ BPF_SOCK_OPS_STATE_CB_FLAG);
+ /* Update results */
+ {
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (!gp)
+ break;
+ g = *gp;
+ g.bad_cb_test_rv = bad_call_rv;
+ g.good_cb_test_rv = good_call_rv;
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ skops->sk_txhash = 0x12345f;
+ v = 0xff;
+ rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
+ sizeof(v));
+ break;
+ case BPF_SOCK_OPS_RTO_CB:
+ break;
+ case BPF_SOCK_OPS_RETRANS_CB:
+ break;
+ case BPF_SOCK_OPS_STATE_CB:
+ if (skops->args[1] == BPF_TCP_CLOSE) {
+ __u32 key = 0;
+ struct tcpbpf_globals g, *gp;
+
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (!gp)
+ break;
+ g = *gp;
+ g.total_retrans = skops->total_retrans;
+ g.data_segs_in = skops->data_segs_in;
+ g.data_segs_out = skops->data_segs_out;
+ g.bytes_received = skops->bytes_received;
+ g.bytes_acked = skops->bytes_acked;
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ }
+ break;
+ default:
+ rv = -1;
+ }
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
new file mode 100644
index 000000000000..95a370f3d378
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include <linux/perf_event.h>
+#include "test_tcpbpf.h"
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+ const char *name)
+{
+ struct bpf_map *map;
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (!map) {
+ printf("%s:FAIL:map '%s' not found\n", test, name);
+ return -1;
+ }
+ return bpf_map__fd(map);
+}
+
+#define SYSTEM(CMD) \
+ do { \
+ if (system(CMD)) { \
+ printf("system(%s) FAILS!\n", CMD); \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ const char *file = "test_tcpbpf_kern.o";
+ struct tcpbpf_globals g = {0};
+ int cg_fd, prog_fd, map_fd;
+ bool debug_flag = false;
+ int error = EXIT_FAILURE;
+ struct bpf_object *obj;
+ char cmd[100], *dir;
+ struct stat buffer;
+ __u32 key = 0;
+ int pid;
+ int rv;
+
+ if (argc > 1 && strcmp(argv[1], "-d") == 0)
+ debug_flag = true;
+
+ dir = "/tmp/cgroupv2/foo";
+
+ if (stat(dir, &buffer) != 0) {
+ SYSTEM("mkdir -p /tmp/cgroupv2");
+ SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
+ SYSTEM("mkdir -p /tmp/cgroupv2/foo");
+ }
+ pid = (int) getpid();
+ sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
+ SYSTEM(cmd);
+
+ cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
+ if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
+ printf("FAILED: load_bpf_file failed for: %s\n", file);
+ goto err;
+ }
+
+ rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (rv) {
+ printf("FAILED: bpf_prog_attach: %d (%s)\n",
+ error, strerror(errno));
+ goto err;
+ }
+
+ SYSTEM("./tcp_server.py");
+
+ map_fd = bpf_find_map(__func__, obj, "global_map");
+ if (map_fd < 0)
+ goto err;
+
+ rv = bpf_map_lookup_elem(map_fd, &key, &g);
+ if (rv != 0) {
+ printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
+ goto err;
+ }
+
+ if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
+ g.data_segs_in != 1 || g.data_segs_out != 1 ||
+ (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
+ g.good_cb_test_rv != 0) {
+ printf("FAILED: Wrong stats\n");
+ if (debug_flag) {
+ printf("\n");
+ printf("bytes_received: %d (expecting 501)\n",
+ (int)g.bytes_received);
+ printf("bytes_acked: %d (expecting 1002)\n",
+ (int)g.bytes_acked);
+ printf("data_segs_in: %d (expecting 1)\n",
+ g.data_segs_in);
+ printf("data_segs_out: %d (expecting 1)\n",
+ g.data_segs_out);
+ printf("event_map: 0x%x (at least 0x47e)\n",
+ g.event_map);
+ printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
+ g.bad_cb_test_rv);
+ printf("good_cb_test_rv:0x%x (expecting 0)\n",
+ g.good_cb_test_rv);
+ }
+ goto err;
+ }
+ printf("PASSED!\n");
+ error = 0;
+err:
+ bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+ return error;
+
+}
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/test_tracepoint.c
new file mode 100644
index 000000000000..04bf084517e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tracepoint.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+ unsigned long long pad;
+ char prev_comm[16];
+ int prev_pid;
+ int prev_prio;
+ long long prev_state;
+ char next_comm[16];
+ int next_pid;
+ int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b51017404c62..697bd83de295 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2,6 +2,7 @@
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -20,6 +21,7 @@
#include <stddef.h>
#include <stdbool.h>
#include <sched.h>
+#include <limits.h>
#include <sys/capability.h>
#include <sys/resource.h>
@@ -28,6 +30,7 @@
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
+#include <linux/if_ether.h>
#include <bpf/bpf.h>
@@ -48,6 +51,8 @@
#define MAX_INSNS 512
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 4
+#define POINTER_VALUE 0xcafe4all
+#define TEST_DATA_LEN 64
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
@@ -61,6 +66,7 @@ struct bpf_test {
int fixup_map_in_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
+ uint32_t retval;
enum {
UNDEF,
ACCEPT,
@@ -94,6 +100,326 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = -3,
+ },
+ {
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+ },
+ {
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ },
+ {
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ },
+ {
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+ },
+ /* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+ {
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+ },
+ {
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+ },
+ {
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+ },
+ {
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
},
{
"unreachable",
@@ -209,6 +535,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 1,
},
{
"test8 ld_imm64",
@@ -273,11 +600,51 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "unknown opcode c4",
+ },
+ {
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "unknown opcode cc",
+ },
+ {
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
- .errstr = "jump out of range",
+ .errstr = "not an exit",
.result = REJECT,
},
{
@@ -367,7 +734,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "BPF_CALL uses reserved",
+ .errstr = "unknown opcode 8d",
.result = REJECT,
},
{
@@ -476,6 +843,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
},
{
"check valid spill/fill, skb mark",
@@ -556,7 +924,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_LD_IMM",
+ .errstr = "unknown opcode 00",
.result = REJECT,
},
{
@@ -574,7 +942,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_ALU opcode f0",
+ .errstr = "unknown opcode ff",
.result = REJECT,
},
{
@@ -583,7 +951,7 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
- .errstr = "invalid BPF_ALU opcode f0",
+ .errstr = "unknown opcode ff",
.result = REJECT,
},
{
@@ -762,6 +1130,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
+ .retval = -ENOENT,
},
{
"jump test 4",
@@ -1782,6 +2151,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 0xfaceb00c,
},
{
"PTR_TO_STACK store/load - bad alignment on off",
@@ -1840,6 +2210,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
},
{
"unpriv: add const to pointer",
@@ -2013,6 +2384,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
@@ -2553,6 +2925,29 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 context is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2777,6 +3172,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test12 (and, good access)",
@@ -2801,6 +3197,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test13 (branches, good access)",
@@ -2831,6 +3228,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
@@ -2854,6 +3252,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test15 (spill with xadd)",
@@ -3140,6 +3539,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
},
{
"direct packet access: test28 (marking on <=, bad access)",
@@ -4272,7 +4672,8 @@ static struct bpf_test tests[] = {
.fixup_map1 = { 2 },
.errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT,
- .result = ACCEPT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
},
{
"leak pointer into ctx 2",
@@ -4286,7 +4687,8 @@ static struct bpf_test tests[] = {
},
.errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT,
- .result = ACCEPT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 context is not allowed",
},
{
"leak pointer into ctx 3",
@@ -5635,7 +6037,7 @@ static struct bpf_test tests[] = {
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -5757,6 +6159,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
},
{
"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
@@ -5870,7 +6273,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
@@ -6125,6 +6528,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
},
{
"ld_ind: check calling conv, r1",
@@ -6196,6 +6600,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = 1,
},
{
"check bpf_perf_event_data->sample_period byte load permitted",
@@ -6667,7 +7072,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
- .errstr = "unbounded min value",
+ .errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
},
{
@@ -7183,6 +7588,7 @@ static struct bpf_test tests[] = {
},
.fixup_map1 = { 3 },
.result = ACCEPT,
+ .retval = POINTER_VALUE,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
@@ -7203,6 +7609,7 @@ static struct bpf_test tests[] = {
},
.fixup_map1 = { 3 },
.result = ACCEPT,
+ .retval = POINTER_VALUE,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
@@ -7369,7 +7776,7 @@ static struct bpf_test tests[] = {
},
BPF_EXIT_INSN(),
},
- .errstr = "BPF_END uses reserved fields",
+ .errstr = "unknown opcode d7",
.result = REJECT,
},
{
@@ -7644,6 +8051,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .retval = TEST_DATA_LEN,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
@@ -8569,6 +8977,128 @@ static struct bpf_test tests[] = {
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+ },
+ {
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+ },
+ {
"bpf_exit with invalid return code. test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
@@ -8640,6 +9170,1959 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
+ {
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ },
+ {
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ },
+ {
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+ },
+ {
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+ },
+ {
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+ },
+ {
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ },
+ {
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ },
+ {
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+ },
+ {
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+ },
+ {
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+ },
+ {
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+ },
+ {
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+ },
+ {
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+ },
+ {
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+ },
+ {
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+ },
+ {
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+ },
+ {
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+ },
+ {
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+ },
+ {
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+ },
+ {
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+ },
+ {
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+ },
+ {
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+ },
+ {
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+ },
+ {
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+ },
+ {
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+ },
+ {
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+ },
+ {
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+ },
+ {
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+ },
+ {
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ },
+ {
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map1 = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+ },
+ {
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ },
+ {
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ },
+ {
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map1 = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ },
+ {
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ },
+ {
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -8751,10 +11234,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int fd_prog, expected_ret, reject_from_alignment;
struct bpf_insn *prog = test->insns;
int prog_len = probe_filter_length(prog);
+ char data_in[TEST_DATA_LEN] = {};
int prog_type = test->prog_type;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
- int i;
+ uint32_t retval;
+ int i, err;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
@@ -8797,6 +11282,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
}
+ if (fd_prog >= 0) {
+ err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
+ NULL, NULL, &retval, NULL);
+ if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
+ printf("Unexpected bpf_prog_test_run error\n");
+ goto fail_log;
+ }
+ if (!err && retval != test->retval &&
+ test->retval != POINTER_VALUE) {
+ printf("FAIL retval %d != %d\n", retval, test->retval);
+ goto fail_log;
+ }
+ }
(*passes)++;
printf("OK%s\n", reject_from_alignment ?
" (NOTE: reject due to unknown alignment)" : "");
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/test_xdp_noinline.c
new file mode 100644
index 000000000000..5e4aac74f9d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_noinline.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static __attribute__ ((noinline))
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static __attribute__ ((noinline))
+u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static __attribute__ ((noinline))
+u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+struct flow_key {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+};
+
+struct packet_description {
+ struct flow_key flow;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_definition {
+ union {
+ __be32 vip;
+ __be32 vipv6[4];
+ };
+ __u16 port;
+ __u16 family;
+ __u8 proto;
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_pos_lru {
+ __u32 pos;
+ __u64 atime;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct lb_stats {
+ __u64 v2;
+ __u64 v1;
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct vip_definition),
+ .value_size = sizeof(struct vip_meta),
+ .max_entries = 512,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
+ .type = BPF_MAP_TYPE_LRU_HASH,
+ .key_size = sizeof(struct flow_key),
+ .value_size = sizeof(struct real_pos_lru),
+ .max_entries = 300,
+ .map_flags = 1U << 1,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 12 * 655,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct real_definition),
+ .max_entries = 40,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct lb_stats),
+ .max_entries = 515,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct ctl_value),
+ .max_entries = 16,
+ .map_flags = 0,
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[6];
+ unsigned char eth_source[6];
+ unsigned short eth_proto;
+};
+
+static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+{
+ __u64 off = sizeof(struct eth_hdr);
+ if (is_ipv6) {
+ off += sizeof(struct ipv6hdr);
+ if (is_icmp)
+ off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
+ } else {
+ off += sizeof(struct iphdr);
+ if (is_icmp)
+ off += sizeof(struct icmphdr) + sizeof(struct iphdr);
+ }
+ return off;
+}
+
+static __attribute__ ((noinline))
+bool parse_udp(void *data, void *data_end,
+ bool is_ipv6, struct packet_description *pckt)
+{
+
+ bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+ __u64 off = calc_offset(is_ipv6, is_icmp);
+ struct udphdr *udp;
+ udp = data + off;
+
+ if (udp + 1 > data_end)
+ return 0;
+ if (!is_icmp) {
+ pckt->flow.port16[0] = udp->source;
+ pckt->flow.port16[1] = udp->dest;
+ } else {
+ pckt->flow.port16[0] = udp->dest;
+ pckt->flow.port16[1] = udp->source;
+ }
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool parse_tcp(void *data, void *data_end,
+ bool is_ipv6, struct packet_description *pckt)
+{
+
+ bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+ __u64 off = calc_offset(is_ipv6, is_icmp);
+ struct tcphdr *tcp;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return 0;
+ if (tcp->syn)
+ pckt->flags |= (1 << 1);
+ if (!is_icmp) {
+ pckt->flow.port16[0] = tcp->source;
+ pckt->flow.port16[1] = tcp->dest;
+ } else {
+ pckt->flow.port16[0] = tcp->dest;
+ pckt->flow.port16[1] = tcp->source;
+ }
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
+ struct packet_description *pckt,
+ struct real_definition *dst, __u32 pkt_bytes)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+ struct ipv6hdr *ip6h;
+ __u32 ip_suffix;
+ void *data_end;
+ void *data;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+ return 0;
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ new_eth = data;
+ ip6h = data + sizeof(struct eth_hdr);
+ old_eth = data + sizeof(struct ipv6hdr);
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end || ip6h + 1 > data_end)
+ return 0;
+ memcpy(new_eth->eth_dest, cval->mac, 6);
+ memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 56710;
+ ip6h->version = 6;
+ ip6h->priority = 0;
+ memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+
+ ip6h->nexthdr = IPPROTO_IPV6;
+ ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
+ ip6h->payload_len =
+ __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
+ ip6h->hop_limit = 4;
+
+ ip6h->saddr.in6_u.u6_addr32[0] = 1;
+ ip6h->saddr.in6_u.u6_addr32[1] = 2;
+ ip6h->saddr.in6_u.u6_addr32[2] = 3;
+ ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
+ memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
+ struct packet_description *pckt,
+ struct real_definition *dst, __u32 pkt_bytes)
+{
+
+ __u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+ __u16 *next_iph_u16;
+ struct iphdr *iph;
+ __u32 csum = 0;
+ void *data_end;
+ void *data;
+
+ ip_suffix <<= 15;
+ ip_suffix ^= pckt->flow.src;
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+ return 0;
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ new_eth = data;
+ iph = data + sizeof(struct eth_hdr);
+ old_eth = data + sizeof(struct iphdr);
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end || iph + 1 > data_end)
+ return 0;
+ memcpy(new_eth->eth_dest, cval->mac, 6);
+ memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 8;
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_IPIP;
+ iph->check = 0;
+ iph->tos = 1;
+ iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
+ /* don't update iph->daddr, since it will overwrite old eth_proto
+ * and multiple iterations of bpf_prog_run() will fail
+ */
+
+ iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
+ iph->ttl = 4;
+
+ next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+ for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+ csum += *next_iph_u16++;
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+ return 0;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+
+ old_eth = *data;
+ new_eth = *data + sizeof(struct ipv6hdr);
+ memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+ memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+ if (inner_v4)
+ new_eth->eth_proto = 8;
+ else
+ new_eth->eth_proto = 56710;
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
+ return 0;
+ *data = (void *)(long)xdp->data;
+ *data_end = (void *)(long)xdp->data_end;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
+{
+ struct eth_hdr *new_eth;
+ struct eth_hdr *old_eth;
+
+ old_eth = *data;
+ new_eth = *data + sizeof(struct iphdr);
+ memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+ memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+ new_eth->eth_proto = 8;
+ if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+ return 0;
+ *data = (void *)(long)xdp->data;
+ *data_end = (void *)(long)xdp->data_end;
+ return 1;
+}
+
+static __attribute__ ((noinline))
+int swap_mac_and_send(void *data, void *data_end)
+{
+ unsigned char tmp_mac[6];
+ struct eth_hdr *eth;
+
+ eth = data;
+ memcpy(tmp_mac, eth->eth_source, 6);
+ memcpy(eth->eth_source, eth->eth_dest, 6);
+ memcpy(eth->eth_dest, tmp_mac, 6);
+ return XDP_TX;
+}
+
+static __attribute__ ((noinline))
+int send_icmp_reply(void *data, void *data_end)
+{
+ struct icmphdr *icmp_hdr;
+ __u16 *next_iph_u16;
+ __u32 tmp_addr = 0;
+ struct iphdr *iph;
+ __u32 csum1 = 0;
+ __u32 csum = 0;
+ __u64 off = 0;
+
+ if (data + sizeof(struct eth_hdr)
+ + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
+ return XDP_DROP;
+ off += sizeof(struct eth_hdr);
+ iph = data + off;
+ off += sizeof(struct iphdr);
+ icmp_hdr = data + off;
+ icmp_hdr->type = 0;
+ icmp_hdr->checksum += 0x0007;
+ iph->ttl = 4;
+ tmp_addr = iph->daddr;
+ iph->daddr = iph->saddr;
+ iph->saddr = tmp_addr;
+ iph->check = 0;
+ next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+ for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+ csum += *next_iph_u16++;
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+ return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int send_icmp6_reply(void *data, void *data_end)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+ __be32 tmp_addr[4];
+ __u64 off = 0;
+
+ if (data + sizeof(struct eth_hdr)
+ + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
+ return XDP_DROP;
+ off += sizeof(struct eth_hdr);
+ ip6h = data + off;
+ off += sizeof(struct ipv6hdr);
+ icmp_hdr = data + off;
+ icmp_hdr->icmp6_type = 129;
+ icmp_hdr->icmp6_cksum -= 0x0001;
+ ip6h->hop_limit = 4;
+ memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
+ memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
+ memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
+ return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return XDP_DROP;
+ if (icmp_hdr->icmp6_type == 128)
+ return send_icmp6_reply(data, data_end);
+ if (icmp_hdr->icmp6_type != 3)
+ return XDP_PASS;
+ off += sizeof(struct icmp6hdr);
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+ pckt->flow.proto = ip6h->nexthdr;
+ pckt->flags |= (1 << 0);
+ memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
+ memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
+ return -1;
+}
+
+static __attribute__ ((noinline))
+int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
+{
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = data + off;
+ if (icmp_hdr + 1 > data_end)
+ return XDP_DROP;
+ if (icmp_hdr->type == 8)
+ return send_icmp_reply(data, data_end);
+ if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
+ return XDP_PASS;
+ off += sizeof(struct icmphdr);
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+ if (iph->ihl != 5)
+ return XDP_DROP;
+ pckt->flow.proto = iph->protocol;
+ pckt->flags |= (1 << 0);
+ pckt->flow.src = iph->daddr;
+ pckt->flow.dst = iph->saddr;
+ return -1;
+}
+
+static __attribute__ ((noinline))
+__u32 get_packet_hash(struct packet_description *pckt,
+ bool hash_16bytes)
+{
+ if (hash_16bytes)
+ return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
+ pckt->flow.ports, 24);
+ else
+ return jhash_2words(pckt->flow.src, pckt->flow.ports,
+ 24);
+}
+
+__attribute__ ((noinline))
+static bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6, void *lru_map)
+{
+ struct real_pos_lru new_dst_lru = { };
+ bool hash_16bytes = is_ipv6;
+ __u32 *real_pos, hash, key;
+ __u64 cur_time;
+
+ if (vip_info->flags & (1 << 2))
+ hash_16bytes = 1;
+ if (vip_info->flags & (1 << 3)) {
+ pckt->flow.port16[0] = pckt->flow.port16[1];
+ memset(pckt->flow.srcv6, 0, 16);
+ }
+ hash = get_packet_hash(pckt, hash_16bytes);
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return 0;
+ key = 2 * vip_info->vip_num + hash % 2;
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return 0;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return 0;
+ if (!(vip_info->flags & (1 << 1))) {
+ __u32 conn_rate_key = 512 + 2;
+ struct lb_stats *conn_rate_stats =
+ bpf_map_lookup_elem(&stats, &conn_rate_key);
+
+ if (!conn_rate_stats)
+ return 1;
+ cur_time = bpf_ktime_get_ns();
+ if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
+ conn_rate_stats->v1 = 1;
+ conn_rate_stats->v2 = cur_time;
+ } else {
+ conn_rate_stats->v1 += 1;
+ if (conn_rate_stats->v1 >= 1)
+ return 1;
+ }
+ if (pckt->flow.proto == IPPROTO_UDP)
+ new_dst_lru.atime = cur_time;
+ new_dst_lru.pos = key;
+ bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
+ }
+ return 1;
+}
+
+__attribute__ ((noinline))
+static void connection_table_lookup(struct real_definition **real,
+ struct packet_description *pckt,
+ void *lru_map)
+{
+
+ struct real_pos_lru *dst_lru;
+ __u64 cur_time;
+ __u32 key;
+
+ dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
+ if (!dst_lru)
+ return;
+ if (pckt->flow.proto == IPPROTO_UDP) {
+ cur_time = bpf_ktime_get_ns();
+ if (cur_time - dst_lru->atime > 300000)
+ return;
+ dst_lru->atime = cur_time;
+ }
+ key = dst_lru->pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+}
+
+/* don't believe your eyes!
+ * below function has 6 arguments whereas bpf and llvm allow maximum of 5
+ * but since it's _static_ llvm can optimize one argument away
+ */
+__attribute__ ((noinline))
+static int process_l3_headers_v6(struct packet_description *pckt,
+ __u8 *protocol, __u64 off,
+ __u16 *pkt_bytes, void *data,
+ void *data_end)
+{
+ struct ipv6hdr *ip6h;
+ __u64 iph_len;
+ int action;
+
+ ip6h = data + off;
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+ iph_len = sizeof(struct ipv6hdr);
+ *protocol = ip6h->nexthdr;
+ pckt->flow.proto = *protocol;
+ *pkt_bytes = __builtin_bswap16(ip6h->payload_len);
+ off += iph_len;
+ if (*protocol == 45) {
+ return XDP_DROP;
+ } else if (*protocol == 59) {
+ action = parse_icmpv6(data, data_end, off, pckt);
+ if (action >= 0)
+ return action;
+ } else {
+ memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
+ memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
+ }
+ return -1;
+}
+
+__attribute__ ((noinline))
+static int process_l3_headers_v4(struct packet_description *pckt,
+ __u8 *protocol, __u64 off,
+ __u16 *pkt_bytes, void *data,
+ void *data_end)
+{
+ struct iphdr *iph;
+ __u64 iph_len;
+ int action;
+
+ iph = data + off;
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+ if (iph->ihl != 5)
+ return XDP_DROP;
+ *protocol = iph->protocol;
+ pckt->flow.proto = *protocol;
+ *pkt_bytes = __builtin_bswap16(iph->tot_len);
+ off += 20;
+ if (iph->frag_off & 65343)
+ return XDP_DROP;
+ if (*protocol == IPPROTO_ICMP) {
+ action = parse_icmp(data, data_end, off, pckt);
+ if (action >= 0)
+ return action;
+ } else {
+ pckt->flow.src = iph->saddr;
+ pckt->flow.dst = iph->daddr;
+ }
+ return -1;
+}
+
+__attribute__ ((noinline))
+static int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct xdp_md *xdp)
+{
+
+ struct real_definition *dst = NULL;
+ struct packet_description pckt = { };
+ struct vip_definition vip = { };
+ struct lb_stats *data_stats;
+ struct eth_hdr *eth = data;
+ void *lru_map = &lru_cache;
+ struct vip_meta *vip_info;
+ __u32 lru_stats_key = 513;
+ __u32 mac_addr_pos = 0;
+ __u32 stats_key = 512;
+ struct ctl_value *cval;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ if (is_ipv6)
+ action = process_l3_headers_v6(&pckt, &protocol, off,
+ &pkt_bytes, data, data_end);
+ else
+ action = process_l3_headers_v4(&pckt, &protocol, off,
+ &pkt_bytes, data, data_end);
+ if (action >= 0)
+ return action;
+ protocol = pckt.flow.proto;
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(data, data_end, is_ipv6, &pckt))
+ return XDP_DROP;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(data, data_end, is_ipv6, &pckt))
+ return XDP_DROP;
+ } else {
+ return XDP_TX;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.vipv6, pckt.flow.dstv6, 16);
+ else
+ vip.vip = pckt.flow.dst;
+ vip.port = pckt.flow.port16[1];
+ vip.proto = pckt.flow.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.port = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return XDP_PASS;
+ if (!(vip_info->flags & (1 << 4)))
+ pckt.flow.port16[1] = 0;
+ }
+ if (data_end - data > 1400)
+ return XDP_DROP;
+ data_stats = bpf_map_lookup_elem(&stats, &stats_key);
+ if (!data_stats)
+ return XDP_DROP;
+ data_stats->v1 += 1;
+ if (!dst) {
+ if (vip_info->flags & (1 << 0))
+ pckt.flow.port16[0] = 0;
+ if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
+ connection_table_lookup(&dst, &pckt, lru_map);
+ if (dst)
+ goto out;
+ if (pckt.flow.proto == IPPROTO_TCP) {
+ struct lb_stats *lru_stats =
+ bpf_map_lookup_elem(&stats, &lru_stats_key);
+
+ if (!lru_stats)
+ return XDP_DROP;
+ if (pckt.flags & (1 << 1))
+ lru_stats->v1 += 1;
+ else
+ lru_stats->v2 += 1;
+ }
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
+ return XDP_DROP;
+ data_stats->v2 += 1;
+ }
+out:
+ cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
+ if (!cval)
+ return XDP_DROP;
+ if (dst->flags & (1 << 0)) {
+ if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
+ return XDP_DROP;
+ } else {
+ if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
+ return XDP_DROP;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return XDP_DROP;
+ data_stats->v1 += 1;
+ data_stats->v2 += pkt_bytes;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ if (data + 4 > data_end)
+ return XDP_DROP;
+ *(u32 *)data = dst->dst;
+ return XDP_DROP;
+}
+
+__attribute__ ((section("xdp-test"), used))
+int balancer_ingress(struct xdp_md *ctx)
+{
+ void *data = (void *)(long)ctx->data;
+ void *data_end = (void *)(long)ctx->data_end;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == 8)
+ return process_packet(data, nh_off, data_end, 0, ctx);
+ else if (eth_proto == 56710)
+ return process_packet(data, nh_off, data_end, 1, ctx);
+ else
+ return XDP_DROP;
+}
+
+char _license[] __attribute__ ((section("license"), used)) = "GPL";
+int _version __attribute__ ((section("version"), used)) = 1;
diff --git a/tools/testing/selftests/firmware/fw_fallback.sh b/tools/testing/selftests/firmware/fw_fallback.sh
index 34a42c68ebfb..722cad91df74 100755
--- a/tools/testing/selftests/firmware/fw_fallback.sh
+++ b/tools/testing/selftests/firmware/fw_fallback.sh
@@ -175,96 +175,118 @@ trap "test_finish" EXIT
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
-DEVPATH="$DIR"/"nope-$NAME"/loading
-
-# Test failure when doing nothing (timeout works).
-echo -n 2 >/sys/class/firmware/timeout
-echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
-
-# Give the kernel some time to load the loading file, must be less
-# than the timeout above.
-sleep 1
-if [ ! -f $DEVPATH ]; then
- echo "$0: fallback mechanism immediately cancelled"
- echo ""
- echo "The file never appeared: $DEVPATH"
- echo ""
- echo "This might be a distribution udev rule setup by your distribution"
- echo "to immediately cancel all fallback requests, this must be"
- echo "removed before running these tests. To confirm look for"
- echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
- echo "and see if you have something like this:"
- echo ""
- echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
- echo ""
- echo "If you do remove this file or comment out this line before"
- echo "proceeding with these tests."
- exit 1
-fi
-
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not expected to match" >&2
- exit 1
-else
- echo "$0: timeout works"
-fi
-
-# Put timeout high enough for us to do work but not so long that failures
-# slow down this test too much.
-echo 4 >/sys/class/firmware/timeout
+test_syfs_timeout()
+{
+ DEVPATH="$DIR"/"nope-$NAME"/loading
+
+ # Test failure when doing nothing (timeout works).
+ echo -n 2 >/sys/class/firmware/timeout
+ echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+ # Give the kernel some time to load the loading file, must be less
+ # than the timeout above.
+ sleep 1
+ if [ ! -f $DEVPATH ]; then
+ echo "$0: fallback mechanism immediately cancelled"
+ echo ""
+ echo "The file never appeared: $DEVPATH"
+ echo ""
+ echo "This might be a distribution udev rule setup by your distribution"
+ echo "to immediately cancel all fallback requests, this must be"
+ echo "removed before running these tests. To confirm look for"
+ echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+ echo "and see if you have something like this:"
+ echo ""
+ echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+ echo ""
+ echo "If you do remove this file or comment out this line before"
+ echo "proceeding with these tests."
+ exit 1
+ fi
-# Load this script instead of the desired firmware.
-load_fw "$NAME" "$0"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not expected to match" >&2
- exit 1
-else
- echo "$0: firmware comparison works"
-fi
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not expected to match" >&2
+ exit 1
+ else
+ echo "$0: timeout works"
+ fi
+}
-# Do a proper load, which should work correctly.
-load_fw "$NAME" "$FW"
-if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was not loaded" >&2
- exit 1
-else
- echo "$0: fallback mechanism works"
-fi
+run_sysfs_main_tests()
+{
+ test_syfs_timeout
+ # Put timeout high enough for us to do work but not so long that failures
+ # slow down this test too much.
+ echo 4 >/sys/class/firmware/timeout
-load_fw_cancel "nope-$NAME" "$FW"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
- echo "$0: firmware was expected to be cancelled" >&2
- exit 1
-else
- echo "$0: cancelling fallback mechanism works"
-fi
+ # Load this script instead of the desired firmware.
+ load_fw "$NAME" "$0"
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not expected to match" >&2
+ exit 1
+ else
+ echo "$0: firmware comparison works"
+ fi
-if load_fw_custom "$NAME" "$FW" ; then
+ # Do a proper load, which should work correctly.
+ load_fw "$NAME" "$FW"
if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not loaded" >&2
exit 1
else
- echo "$0: custom fallback loading mechanism works"
+ echo "$0: fallback mechanism works"
fi
-fi
-if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
+ load_fw_cancel "nope-$NAME" "$FW"
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was expected to be cancelled" >&2
exit 1
else
- echo "$0: cancelling custom fallback mechanism works"
+ echo "$0: cancelling fallback mechanism works"
fi
-fi
-set +e
-load_fw_fallback_with_child "nope-signal-$NAME" "$FW"
-if [ "$?" -eq 0 ]; then
- echo "$0: SIGCHLD on sync ignored as expected" >&2
-else
- echo "$0: error - sync firmware request cancelled due to SIGCHLD" >&2
- exit 1
-fi
-set -e
+ set +e
+ load_fw_fallback_with_child "nope-signal-$NAME" "$FW"
+ if [ "$?" -eq 0 ]; then
+ echo "$0: SIGCHLD on sync ignored as expected" >&2
+ else
+ echo "$0: error - sync firmware request cancelled due to SIGCHLD" >&2
+ exit 1
+ fi
+ set -e
+}
+
+run_sysfs_custom_load_tests()
+{
+ if load_fw_custom "$NAME" "$FW" ; then
+ if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not loaded" >&2
+ exit 1
+ else
+ echo "$0: custom fallback loading mechanism works"
+ fi
+ fi
+
+ if load_fw_custom "$NAME" "$FW" ; then
+ if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was not loaded" >&2
+ exit 1
+ else
+ echo "$0: custom fallback loading mechanism works"
+ fi
+ fi
+
+ if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ echo "$0: firmware was expected to be cancelled" >&2
+ exit 1
+ else
+ echo "$0: cancelling custom fallback mechanism works"
+ fi
+ fi
+}
+
+run_sysfs_main_tests
+run_sysfs_custom_load_tests
exit 0
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index b1f20fef36c7..f9508e1a4058 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -45,7 +45,10 @@ test_finish()
if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
fi
- echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
+ if [ "$OLD_FWPATH" = "" ]; then
+ OLD_FWPATH=" "
+ fi
+ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
rm -f "$FW"
rmdir "$FWPATH"
}
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index 3926a0409dda..a5276a91dfbf 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -12,3 +12,8 @@ fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
include ../lib.mk
$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
+
+$(OUTPUT)/memfd_test: memfd_test.c common.o
+$(OUTPUT)/fuse_test: fuse_test.c common.o
+
+EXTRA_CLEAN = common.o
diff --git a/tools/testing/selftests/memfd/common.c b/tools/testing/selftests/memfd/common.c
new file mode 100644
index 000000000000..8eb3d75f6e60
--- /dev/null
+++ b/tools/testing/selftests/memfd/common.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/fcntl.h>
+#include <linux/memfd.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#include "common.h"
+
+int hugetlbfs_test = 0;
+
+/*
+ * Copied from mlock2-tests.c
+ */
+unsigned long default_huge_page_size(void)
+{
+ unsigned long hps = 0;
+ char *line = NULL;
+ size_t linelen = 0;
+ FILE *f = fopen("/proc/meminfo", "r");
+
+ if (!f)
+ return 0;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(f);
+ return hps;
+}
+
+int sys_memfd_create(const char *name, unsigned int flags)
+{
+ if (hugetlbfs_test)
+ flags |= MFD_HUGETLB;
+
+ return syscall(__NR_memfd_create, name, flags);
+}
diff --git a/tools/testing/selftests/memfd/common.h b/tools/testing/selftests/memfd/common.h
new file mode 100644
index 000000000000..522d2c630bd8
--- /dev/null
+++ b/tools/testing/selftests/memfd/common.h
@@ -0,0 +1,9 @@
+#ifndef COMMON_H_
+#define COMMON_H_
+
+extern int hugetlbfs_test;
+
+unsigned long default_huge_page_size(void);
+int sys_memfd_create(const char *name, unsigned int flags);
+
+#endif
diff --git a/tools/testing/selftests/memfd/fuse_test.c b/tools/testing/selftests/memfd/fuse_test.c
index 1ccb7a3eb14b..b018e835737d 100644
--- a/tools/testing/selftests/memfd/fuse_test.c
+++ b/tools/testing/selftests/memfd/fuse_test.c
@@ -33,14 +33,12 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "common.h"
+
#define MFD_DEF_SIZE 8192
#define STACK_SIZE 65536
-static int sys_memfd_create(const char *name,
- unsigned int flags)
-{
- return syscall(__NR_memfd_create, name, flags);
-}
+static size_t mfd_def_size = MFD_DEF_SIZE;
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
@@ -127,7 +125,7 @@ static void *mfd_assert_mmap_shared(int fd)
void *p;
p = mmap(NULL,
- MFD_DEF_SIZE,
+ mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
@@ -145,7 +143,7 @@ static void *mfd_assert_mmap_private(int fd)
void *p;
p = mmap(NULL,
- MFD_DEF_SIZE,
+ mfd_def_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
@@ -178,7 +176,7 @@ static int sealing_thread_fn(void *arg)
usleep(200000);
/* unmount mapping before sealing to avoid i_mmap_writable failures */
- munmap(global_p, MFD_DEF_SIZE);
+ munmap(global_p, mfd_def_size);
/* Try sealing the global file; expect EBUSY or success. Current
* kernels will never succeed, but in the future, kernels might
@@ -228,7 +226,7 @@ static void join_sealing_thread(pid_t pid)
int main(int argc, char **argv)
{
- static const char zero[MFD_DEF_SIZE];
+ char *zero;
int fd, mfd, r;
void *p;
int was_sealed;
@@ -239,6 +237,25 @@ int main(int argc, char **argv)
abort();
}
+ if (argc >= 3) {
+ if (!strcmp(argv[2], "hugetlbfs")) {
+ unsigned long hpage_size = default_huge_page_size();
+
+ if (!hpage_size) {
+ printf("Unable to determine huge page size\n");
+ abort();
+ }
+
+ hugetlbfs_test = 1;
+ mfd_def_size = hpage_size * 2;
+ } else {
+ printf("Unknown option: %s\n", argv[2]);
+ abort();
+ }
+ }
+
+ zero = calloc(sizeof(*zero), mfd_def_size);
+
/* open FUSE memfd file for GUP testing */
printf("opening: %s\n", argv[1]);
fd = open(argv[1], O_RDONLY | O_CLOEXEC);
@@ -249,7 +266,7 @@ int main(int argc, char **argv)
/* create new memfd-object */
mfd = mfd_assert_new("kern_memfd_fuse",
- MFD_DEF_SIZE,
+ mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
/* mmap memfd-object for writing */
@@ -268,7 +285,7 @@ int main(int argc, char **argv)
* This guarantees that the receive-buffer is pinned for 1s until the
* data is written into it. The racing ADD_SEALS should thus fail as
* the pages are still pinned. */
- r = read(fd, p, MFD_DEF_SIZE);
+ r = read(fd, p, mfd_def_size);
if (r < 0) {
printf("read() failed: %m\n");
abort();
@@ -295,10 +312,10 @@ int main(int argc, char **argv)
* enough to avoid any in-flight writes. */
p = mfd_assert_mmap_private(mfd);
- if (was_sealed && memcmp(p, zero, MFD_DEF_SIZE)) {
+ if (was_sealed && memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed during read() but data not discarded\n");
abort();
- } else if (!was_sealed && !memcmp(p, zero, MFD_DEF_SIZE)) {
+ } else if (!was_sealed && !memcmp(p, zero, mfd_def_size)) {
printf("memfd sealed after read() but data discarded\n");
abort();
}
@@ -307,6 +324,7 @@ int main(int argc, char **argv)
close(fd);
printf("fuse: DONE\n");
+ free(zero);
return 0;
}
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 132a54f74e88..10baa1652fc2 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -19,7 +19,10 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "common.h"
+
#define MEMFD_STR "memfd:"
+#define MEMFD_HUGE_STR "memfd-hugetlb:"
#define SHARED_FT_STR "(shared file-table)"
#define MFD_DEF_SIZE 8192
@@ -28,41 +31,8 @@
/*
* Default is not to test hugetlbfs
*/
-static int hugetlbfs_test;
static size_t mfd_def_size = MFD_DEF_SIZE;
-
-/*
- * Copied from mlock2-tests.c
- */
-static unsigned long default_huge_page_size(void)
-{
- unsigned long hps = 0;
- char *line = NULL;
- size_t linelen = 0;
- FILE *f = fopen("/proc/meminfo", "r");
-
- if (!f)
- return 0;
- while (getline(&line, &linelen, f) > 0) {
- if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
- hps <<= 10;
- break;
- }
- }
-
- free(line);
- fclose(f);
- return hps;
-}
-
-static int sys_memfd_create(const char *name,
- unsigned int flags)
-{
- if (hugetlbfs_test)
- flags |= MFD_HUGETLB;
-
- return syscall(__NR_memfd_create, name, flags);
-}
+static const char *memfd_str = MEMFD_STR;
static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
{
@@ -513,6 +483,10 @@ static void mfd_assert_grow_write(int fd)
static char *buf;
ssize_t l;
+ /* hugetlbfs does not support write */
+ if (hugetlbfs_test)
+ return;
+
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
@@ -533,6 +507,10 @@ static void mfd_fail_grow_write(int fd)
static char *buf;
ssize_t l;
+ /* hugetlbfs does not support write */
+ if (hugetlbfs_test)
+ return;
+
buf = malloc(mfd_def_size * 8);
if (!buf) {
printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
@@ -598,7 +576,7 @@ static void test_create(void)
char buf[2048];
int fd;
- printf("%s CREATE\n", MEMFD_STR);
+ printf("%s CREATE\n", memfd_str);
/* test NULL name */
mfd_fail_new(NULL, 0);
@@ -627,18 +605,13 @@ static void test_create(void)
fd = mfd_assert_new("", 0, MFD_CLOEXEC);
close(fd);
- if (!hugetlbfs_test) {
- /* verify MFD_ALLOW_SEALING is allowed */
- fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING);
- close(fd);
-
- /* verify MFD_ALLOW_SEALING | MFD_CLOEXEC is allowed */
- fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING | MFD_CLOEXEC);
- close(fd);
- } else {
- /* sealing is not supported on hugetlbfs */
- mfd_fail_new("", MFD_ALLOW_SEALING);
- }
+ /* verify MFD_ALLOW_SEALING is allowed */
+ fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING);
+ close(fd);
+
+ /* verify MFD_ALLOW_SEALING | MFD_CLOEXEC is allowed */
+ fd = mfd_assert_new("", 0, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ close(fd);
}
/*
@@ -649,11 +622,7 @@ static void test_basic(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s BASIC\n", MEMFD_STR);
+ printf("%s BASIC\n", memfd_str);
fd = mfd_assert_new("kern_memfd_basic",
mfd_def_size,
@@ -698,28 +667,6 @@ static void test_basic(void)
}
/*
- * hugetlbfs doesn't support seals or write, so just verify grow and shrink
- * on a hugetlbfs file created via memfd_create.
- */
-static void test_hugetlbfs_grow_shrink(void)
-{
- int fd;
-
- printf("%s HUGETLBFS-GROW-SHRINK\n", MEMFD_STR);
-
- fd = mfd_assert_new("kern_memfd_seal_write",
- mfd_def_size,
- MFD_CLOEXEC);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
- mfd_assert_shrink(fd);
- mfd_assert_grow(fd);
-
- close(fd);
-}
-
-/*
* Test SEAL_WRITE
* Test whether SEAL_WRITE actually prevents modifications.
*/
@@ -727,14 +674,7 @@ static void test_seal_write(void)
{
int fd;
- /*
- * hugetlbfs does not contain sealing or write support. Just test
- * basic grow and shrink via test_hugetlbfs_grow_shrink.
- */
- if (hugetlbfs_test)
- return test_hugetlbfs_grow_shrink();
-
- printf("%s SEAL-WRITE\n", MEMFD_STR);
+ printf("%s SEAL-WRITE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_write",
mfd_def_size,
@@ -760,11 +700,7 @@ static void test_seal_shrink(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-SHRINK\n", MEMFD_STR);
+ printf("%s SEAL-SHRINK\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_shrink",
mfd_def_size,
@@ -790,11 +726,7 @@ static void test_seal_grow(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-GROW\n", MEMFD_STR);
+ printf("%s SEAL-GROW\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_grow",
mfd_def_size,
@@ -820,11 +752,7 @@ static void test_seal_resize(void)
{
int fd;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s SEAL-RESIZE\n", MEMFD_STR);
+ printf("%s SEAL-RESIZE\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_resize",
mfd_def_size,
@@ -843,32 +771,6 @@ static void test_seal_resize(void)
}
/*
- * hugetlbfs does not support seals. Basic test to dup the memfd created
- * fd and perform some basic operations on it.
- */
-static void hugetlbfs_dup(char *b_suffix)
-{
- int fd, fd2;
-
- printf("%s HUGETLBFS-DUP %s\n", MEMFD_STR, b_suffix);
-
- fd = mfd_assert_new("kern_memfd_share_dup",
- mfd_def_size,
- MFD_CLOEXEC);
-
- fd2 = mfd_assert_dup(fd);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
-
- mfd_assert_shrink(fd2);
- mfd_assert_grow(fd2);
-
- close(fd2);
- close(fd);
-}
-
-/*
* Test sharing via dup()
* Test that seals are shared between dupped FDs and they're all equal.
*/
@@ -876,16 +778,7 @@ static void test_share_dup(char *banner, char *b_suffix)
{
int fd, fd2;
- /*
- * hugetlbfs does not contain sealing support. Perform some
- * basic testing on dup'ed fd instead via hugetlbfs_dup.
- */
- if (hugetlbfs_test) {
- hugetlbfs_dup(b_suffix);
- return;
- }
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_dup",
mfd_def_size,
@@ -927,11 +820,7 @@ static void test_share_mmap(char *banner, char *b_suffix)
int fd;
void *p;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_mmap",
mfd_def_size,
@@ -956,32 +845,6 @@ static void test_share_mmap(char *banner, char *b_suffix)
}
/*
- * Basic test to make sure we can open the hugetlbfs fd via /proc and
- * perform some simple operations on it.
- */
-static void hugetlbfs_proc_open(char *b_suffix)
-{
- int fd, fd2;
-
- printf("%s HUGETLBFS-PROC-OPEN %s\n", MEMFD_STR, b_suffix);
-
- fd = mfd_assert_new("kern_memfd_share_open",
- mfd_def_size,
- MFD_CLOEXEC);
-
- fd2 = mfd_assert_open(fd, O_RDWR, 0);
-
- mfd_assert_read(fd);
- mfd_assert_write(fd);
-
- mfd_assert_shrink(fd2);
- mfd_assert_grow(fd2);
-
- close(fd2);
- close(fd);
-}
-
-/*
* Test sealing with open(/proc/self/fd/%d)
* Via /proc we can get access to a separate file-context for the same memfd.
* This is *not* like dup(), but like a real separate open(). Make sure the
@@ -991,16 +854,7 @@ static void test_share_open(char *banner, char *b_suffix)
{
int fd, fd2;
- /*
- * hugetlbfs does not contain sealing support. So test basic
- * functionality of using /proc fd via hugetlbfs_proc_open
- */
- if (hugetlbfs_test) {
- hugetlbfs_proc_open(b_suffix);
- return;
- }
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_open",
mfd_def_size,
@@ -1043,11 +897,7 @@ static void test_share_fork(char *banner, char *b_suffix)
int fd;
pid_t pid;
- /* hugetlbfs does not contain sealing support */
- if (hugetlbfs_test)
- return;
-
- printf("%s %s %s\n", MEMFD_STR, banner, b_suffix);
+ printf("%s %s %s\n", memfd_str, banner, b_suffix);
fd = mfd_assert_new("kern_memfd_share_fork",
mfd_def_size,
@@ -1083,7 +933,11 @@ int main(int argc, char **argv)
}
hugetlbfs_test = 1;
+ memfd_str = MEMFD_HUGE_STR;
mfd_def_size = hpage_size * 2;
+ } else {
+ printf("Unknown option: %s\n", argv[1]);
+ abort();
}
}
diff --git a/tools/testing/selftests/memfd/run_fuse_test.sh b/tools/testing/selftests/memfd/run_fuse_test.sh
index 407df68dfe27..22e572e2d66a 100755
--- a/tools/testing/selftests/memfd/run_fuse_test.sh
+++ b/tools/testing/selftests/memfd/run_fuse_test.sh
@@ -10,6 +10,6 @@ set -e
mkdir mnt
./fuse_mnt ./mnt
-./fuse_test ./mnt/memfd
+./fuse_test ./mnt/memfd $@
fusermount -u ./mnt
rmdir ./mnt
diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh
index daabb350697c..c2d41ed81b24 100755
--- a/tools/testing/selftests/memfd/run_tests.sh
+++ b/tools/testing/selftests/memfd/run_tests.sh
@@ -60,6 +60,7 @@ fi
# Run the hugetlbfs test
#
./memfd_test hugetlbfs
+./run_fuse_test.sh hugetlbfs
#
# Give back any huge pages allocated for the test
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 500c74db746c..d7c30d366935 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,6 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS += fib_tests.sh
TEST_GEN_FILES = socket
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
new file mode 100755
index 000000000000..a9154eefb2e2
--- /dev/null
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -0,0 +1,429 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking IPv4 and IPv6 FIB behavior in response to
+# different events.
+
+ret=0
+
+check_err()
+{
+ if [ $ret -eq 0 ]; then
+ ret=$1
+ fi
+}
+
+check_fail()
+{
+ if [ $1 -eq 0 ]; then
+ ret=1
+ fi
+}
+
+netns_create()
+{
+ local testns=$1
+
+ ip netns add $testns
+ ip netns exec $testns ip link set dev lo up
+}
+
+fib_unreg_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_fail $?
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route test"
+ return 1
+ fi
+ echo "PASS: unicast route test"
+}
+
+fib_unreg_multipath_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link add dummy1 type dummy
+ ip netns exec testns ip link set dev dummy1 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+ ip netns exec testns ip route add 203.0.113.0/24 \
+ nexthop via 198.51.100.2 dev dummy0 \
+ nexthop via 192.0.2.2 dev dummy1
+ ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+ nexthop via 2001:db8:1::2 dev dummy0 \
+ nexthop via 2001:db8:2::2 dev dummy1
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ # In IPv6 we do not flush the entire multipath route.
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy1
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: multipath route test"
+ return 1
+ fi
+ echo "PASS: multipath route test"
+}
+
+fib_unreg_test()
+{
+ echo "Running netdev unregister tests"
+
+ fib_unreg_unicast_test
+ fib_unreg_multipath_test
+}
+
+fib_down_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route test"
+ return 1
+ fi
+ echo "PASS: unicast route test"
+}
+
+fib_down_multipath_test_do()
+{
+ local down_dev=$1
+ local up_dev=$2
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 \
+ oif $down_dev &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+ oif $down_dev &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 \
+ oif $up_dev &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+ oif $up_dev &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+ grep $down_dev | grep -q "dead linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+ grep $down_dev | grep -q "dead linkdown"
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+ grep $up_dev | grep -q "dead linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+ grep $up_dev | grep -q "dead linkdown"
+ check_fail $?
+}
+
+fib_down_multipath_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link add dummy1 type dummy
+ ip netns exec testns ip link set dev dummy1 up
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+ ip netns exec testns ip route add 203.0.113.0/24 \
+ nexthop via 198.51.100.2 dev dummy0 \
+ nexthop via 192.0.2.2 dev dummy1
+ ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+ nexthop via 2001:db8:1::2 dev dummy0 \
+ nexthop via 2001:db8:2::2 dev dummy1
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ fib_down_multipath_test_do "dummy0" "dummy1"
+
+ ip netns exec testns ip link set dev dummy0 up
+ check_err $?
+ ip netns exec testns ip link set dev dummy1 down
+ check_err $?
+
+ fib_down_multipath_test_do "dummy1" "dummy0"
+
+ ip netns exec testns ip link set dev dummy0 down
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy1
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: multipath route test"
+ return 1
+ fi
+ echo "PASS: multipath route test"
+}
+
+fib_down_test()
+{
+ echo "Running netdev down tests"
+
+ fib_down_unicast_test
+ fib_down_multipath_test
+}
+
+fib_carrier_local_test()
+{
+ ret=0
+
+ # Local routes should not be affected when carrier changes.
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link set dev dummy0 carrier on
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link set dev dummy0 carrier off
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 192.0.2.1 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: local route carrier test"
+ return 1
+ fi
+ echo "PASS: local route carrier test"
+}
+
+fib_carrier_unicast_test()
+{
+ ret=0
+
+ netns_create "testns"
+
+ ip netns exec testns ip link add dummy0 type dummy
+ ip netns exec testns ip link set dev dummy0 up
+
+ ip netns exec testns ip link set dev dummy0 carrier on
+
+ ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+ grep -q "linkdown"
+ check_fail $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+ grep -q "linkdown"
+ check_fail $?
+
+ ip netns exec testns ip link set dev dummy0 carrier off
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+ grep -q "linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+ grep -q "linkdown"
+ check_err $?
+
+ ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+ ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+ ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null
+ check_err $?
+
+ ip netns exec testns ip route get fibmatch 192.0.2.2 | \
+ grep -q "linkdown"
+ check_err $?
+ ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \
+ grep -q "linkdown"
+ check_err $?
+
+ ip netns exec testns ip link del dev dummy0
+
+ ip netns del testns
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: unicast route carrier test"
+ return 1
+ fi
+ echo "PASS: unicast route carrier test"
+}
+
+fib_carrier_test()
+{
+ echo "Running netdev carrier change tests"
+
+ fib_carrier_local_test
+ fib_carrier_unicast_test
+}
+
+fib_test()
+{
+ fib_unreg_test
+ fib_down_test
+ fib_carrier_test
+}
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit 0
+fi
+
+ip route help 2>&1 | grep -q fibmatch
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing fibmatch"
+ exit 0
+fi
+
+fib_test
+
+exit $ret
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 3ab6ec403905..e11fe84de0fd 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
return sizeof(*ip6h);
}
-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+
+static void setup_sockaddr(int domain, const char *str_addr,
+ struct sockaddr_storage *sockaddr)
{
struct sockaddr_in6 *addr6 = (void *) sockaddr;
struct sockaddr_in *addr4 = (void *) sockaddr;
switch (domain) {
case PF_INET:
+ memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = AF_INET;
addr4->sin_port = htons(cfg_port);
- if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+ if (str_addr &&
+ inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
error(1, 0, "ipv4 parse error: %s", str_addr);
break;
case PF_INET6:
+ memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = AF_INET6;
addr6->sin6_port = htons(cfg_port);
- if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+ if (str_addr &&
+ inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
error(1, 0, "ipv6 parse error: %s", str_addr);
break;
default:
@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
sizeof(struct tcphdr) -
40 /* max tcp options */;
int c;
+ char *daddr = NULL, *saddr = NULL;
cfg_payload_len = max_payload_len;
@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
cfg_cpu = strtol(optarg, NULL, 0);
break;
case 'D':
- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+ daddr = optarg;
break;
case 'i':
cfg_ifindex = if_nametoindex(optarg);
@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
cfg_cork_mixed = true;
break;
case 'p':
- cfg_port = htons(strtoul(optarg, NULL, 0));
+ cfg_port = strtoul(optarg, NULL, 0);
break;
case 'r':
cfg_rx = true;
@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
cfg_payload_len = strtoul(optarg, NULL, 0);
break;
case 'S':
- setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
+ saddr = optarg;
break;
case 't':
cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
break;
}
}
+ setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
+ setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
if (cfg_payload_len > max_payload_len)
error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 5215493166c9..a622eeecc3a6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -502,6 +502,231 @@ kci_test_macsec()
echo "PASS: macsec"
}
+kci_test_gretap()
+{
+ testns="testns"
+ DEV_NS=gretap00
+ ret=0
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP gretap tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ ip link help gretap 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: gretap: iproute2 too old"
+ return 1
+ fi
+
+ # test native tunnel
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: gretap"
+ return 1
+ fi
+ echo "PASS: gretap"
+
+ ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+ testns="testns"
+ DEV_NS=ip6gretap00
+ ret=0
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: ip6gretap: iproute2 too old"
+ return 1
+ fi
+
+ # test native tunnel
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+ key 102 local fc00:100::1 remote fc00:100::2
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ip6gretap"
+ return 1
+ fi
+ echo "PASS: ip6gretap"
+
+ ip netns del "$testns"
+}
+
+kci_test_erspan()
+{
+ testns="testns"
+ DEV_NS=erspan00
+ ret=0
+
+ ip link help erspan 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: erspan: iproute2 too old"
+ return 1
+ fi
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP erspan tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ # test native tunnel erspan v1
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 1 erspan 488
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test native tunnel erspan v2
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ key 102 local 172.16.1.100 remote 172.16.1.200 \
+ erspan_ver 2 erspan_dir ingress erspan_hwid 7
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: erspan"
+ return 1
+ fi
+ echo "PASS: erspan"
+
+ ip netns del "$testns"
+}
+
+kci_test_ip6erspan()
+{
+ testns="testns"
+ DEV_NS=ip6erspan00
+ ret=0
+
+ ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+ if [ $? -ne 0 ];then
+ echo "SKIP: ip6erspan: iproute2 too old"
+ return 1
+ fi
+
+ ip netns add "$testns"
+ if [ $? -ne 0 ]; then
+ echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+ return 1
+ fi
+
+ # test native tunnel ip6erspan v1
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ key 102 local fc00:100::1 remote fc00:100::2 \
+ erspan_ver 1 erspan 488
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test native tunnel ip6erspan v2
+ ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ key 102 local fc00:100::1 remote fc00:100::2 \
+ erspan_ver 2 erspan_dir ingress erspan_hwid 7
+ check_err $?
+
+ ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ check_err $?
+
+ ip netns exec "$testns" ip link set dev $DEV_NS up
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ # test external mode
+ ip netns exec "$testns" ip link add dev "$DEV_NS" \
+ type ip6erspan external
+ check_err $?
+
+ ip netns exec "$testns" ip link del "$DEV_NS"
+ check_err $?
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ip6erspan"
+ return 1
+ fi
+ echo "PASS: ip6erspan"
+
+ ip netns del "$testns"
+}
+
kci_test_rtnl()
{
kci_add_dummy
@@ -514,6 +739,10 @@ kci_test_rtnl()
kci_test_route_get
kci_test_tc
kci_test_gre
+ kci_test_gretap
+ kci_test_ip6gretap
+ kci_test_erspan
+ kci_test_ip6erspan
kci_test_bridge
kci_test_addrlabel
kci_test_ifalias
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index 5d2eae16f7ee..a5d8f0ab0da0 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -60,9 +60,7 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-
- return FD_TO_CLOCKID(fd);
+ return (((unsigned int) ~fd) << 3) | CLOCKFD;
}
static void handle_alarm(int s)
diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh
deleted file mode 100755
index 56f51ae13d73..000000000000
--- a/tools/testing/selftests/rcutorture/bin/config2frag.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Usage: config2frag.sh < .config > configfrag
-#
-# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
-# resulting file becomes a legitimate Kconfig fragment.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (C) IBM Corporation, 2013
-#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-
-LANG=C sed -e 's/^# CONFIG_\([a-zA-Z0-9_]*\) is not set$/CONFIG_\1=n/'
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 51f66a7ce876..c15f270e121d 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
mkdir $builddir
fi
else
- echo Bad build directory: \"$builddir\"
+ echo Bad build directory: \"$buildloc\"
exit 2
fi
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index fb66d0173638..34d126734cde 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -29,11 +29,6 @@ then
exit 1
fi
builddir=${2}
-if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
-then
- echo "kvm-build.sh :$builddir: Not a writable directory, cannot build into it"
- exit 1
-fi
T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 43f764098e50..2de92f43ee8c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 559e01ac86be..c2e1bb6d0cba 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -23,14 +23,14 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
echo Unreadable results directory: $i
exit 1
fi
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
configfile=`echo $i | sed -e 's/^.*\///'`
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
index f79b0e9e84fc..963f71289d22 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -26,7 +26,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
index 8f3121afc716..ccebf772fa1e 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
@@ -31,7 +31,7 @@ else
exit 1
fi
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if kvm-recheck-rcuperf-ftrace.sh $i
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index f659346d3358..f7e988f369dd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -25,7 +25,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
for rd in "$@"
do
firsttime=1
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index ab14b97c942c..1b78a12740e5 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -42,7 +42,7 @@ T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
mkdir $T
-. $KVM/bin/functions.sh
+. functions.sh
. $CONFIGFRAG/ver_functions.sh
config_template=${1}
@@ -154,9 +154,7 @@ cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
then
- echo CPU count limited from $cpu_count to $vcpus
- touch $resdir/Warnings
- echo CPU count limited from $cpu_count to $vcpus >> $resdir/Warnings
+ echo CPU count limited from $cpu_count to $vcpus | tee -a $resdir/Warnings
cpu_count=$vcpus
fi
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index ccd49e958fd2..7d1f607f0f76 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -1,8 +1,7 @@
#!/bin/bash
#
# Run a series of 14 tests under KVM. These are not particularly
-# well-selected or well-tuned, but are the current set. Run from the
-# top level of the source tree.
+# well-selected or well-tuned, but are the current set.
#
# Edit the definitions below to set the locations of the various directories,
# as well as the test duration.
@@ -34,6 +33,8 @@ T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0
mkdir $T
+cd `dirname $scriptname`/../../../../../
+
dur=$((30*60))
dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
@@ -70,7 +71,7 @@ usage () {
echo " --kmake-arg kernel-make-arguments"
echo " --mac nn:nn:nn:nn:nn:nn"
echo " --no-initrd"
- echo " --qemu-args qemu-system-..."
+ echo " --qemu-args qemu-arguments"
echo " --qemu-cmd qemu-system-..."
echo " --results absolute-pathname"
echo " --torture rcu"
@@ -150,7 +151,7 @@ do
TORTURE_INITRD=""; export TORTURE_INITRD
;;
--qemu-args|--qemu-arg)
- checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
+ checkarg --qemu-args "(qemu arguments)" $# "$2" '^-' '^error'
TORTURE_QEMU_ARG="$2"
shift
;;
@@ -238,7 +239,6 @@ BEGIN {
}
END {
- alldone = 0;
batch = 0;
nc = -1;
@@ -331,8 +331,7 @@ awk < $T/cfgcpu.pack \
# Dump out the scripting required to run one test batch.
function dump(first, pastlast, batchnum)
{
- print "echo ----Start batch " batchnum ": `date`";
- print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
+ print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
print "needqemurun="
jn=1
for (j = first; j < pastlast; j++) {
@@ -349,21 +348,18 @@ function dump(first, pastlast, batchnum)
ovf = "-ovf";
else
ovf = "";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
print "rm -f " builddir ".*";
print "touch " builddir ".wait";
print "mkdir " builddir " > /dev/null 2>&1 || :";
print "mkdir " rd cfr[jn] " || :";
print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
print "while test -f " builddir ".wait"
print "do"
print "\tsleep 1"
print "done"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` | tee -a " rd "log";
jn++;
}
for (j = 1; j < jn; j++) {
@@ -371,8 +367,7 @@ function dump(first, pastlast, batchnum)
print "rm -f " builddir ".ready"
print "if test -f \"" rd cfr[j] "/builtkernel\""
print "then"
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date`";
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` >> " rd "/log";
+ print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` | tee -a " rd "log";
print "\tneedqemurun=1"
print "fi"
}
@@ -386,31 +381,26 @@ function dump(first, pastlast, batchnum)
njitter = ja[1];
if (TORTURE_BUILDONLY && njitter != 0) {
njitter = 0;
- print "echo Build-only run, so suppressing jitter >> " rd "/log"
+ print "echo Build-only run, so suppressing jitter | tee -a " rd "log"
}
if (TORTURE_BUILDONLY) {
print "needqemurun="
}
print "if test -n \"$needqemurun\""
print "then"
- print "\techo ---- Starting kernels. `date`";
- print "\techo ---- Starting kernels. `date` >> " rd "/log";
+ print "\techo ---- Starting kernels. `date` | tee -a " rd "log";
for (j = 0; j < njitter; j++)
print "\tjitter.sh " j " " dur " " ja[2] " " ja[3] "&"
print "\twait"
- print "\techo ---- All kernel runs complete. `date`";
- print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
+ print "\techo ---- All kernel runs complete. `date` | tee -a " rd "log";
print "else"
print "\twait"
- print "\techo ---- No kernel runs. `date`";
- print "\techo ---- No kernel runs. `date` >> " rd "/log";
+ print "\techo ---- No kernel runs. `date` | tee -a " rd "log";
print "fi"
for (j = 1; j < jn; j++) {
builddir=KVM "/b" j
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
+ print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: | tee -a " rd "log";
+ print "cat " rd cfr[j] "/kvm-test-1-run.sh.out | tee -a " rd "log";
}
}
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index f12c38909b00..5987e50cfeb4 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -55,7 +55,7 @@ then
exit
fi
-grep --binary-files=text 'torture:.*ver:' $file | grep --binary-files=text -v '(null)' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
+grep --binary-files=text 'torture:.*ver:' $file | egrep --binary-files=text -v '\(null\)|rtc: 000000000* ' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
awk '
BEGIN {
ver = 0;
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 252aae618984..80eb646e1319 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -38,6 +38,5 @@ per_version_boot_params () {
echo $1 `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
- locktorture.torture_runnable=1 \
locktorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index ffb85ed786fa..24ec91041957 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -51,7 +51,6 @@ per_version_boot_params () {
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
- rcutorture.torture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
index 34f2a1b35ee5..b9603115d7c7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -46,7 +46,6 @@ rcuperf_param_nwriters () {
per_version_boot_params () {
echo $1 `rcuperf_param_nreaders "$1"` \
`rcuperf_param_nwriters "$1"` \
- rcuperf.perf_runnable=1 \
rcuperf.shutdown=1 \
rcuperf.verbose=1
}
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 7f45806bd863..fdefa2295ddc 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -8,17 +8,18 @@ endif
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
TEST_GEN_FILES = compaction_test
+TEST_GEN_FILES += gup_benchmark
TEST_GEN_FILES += hugepage-mmap
TEST_GEN_FILES += hugepage-shm
TEST_GEN_FILES += map_hugetlb
+TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
-TEST_GEN_FILES += mlock-random-test
+TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
-TEST_GEN_FILES += gup_benchmark
TEST_PROGS := run_vmtests
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index cc826326de87..d2561895a021 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -177,4 +177,15 @@ else
echo "[PASS]"
fi
+echo "-----------------------------"
+echo "running virtual address 128TB switch test"
+echo "-----------------------------"
+./va_128TBswitch
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c
new file mode 100644
index 000000000000..e7fe734c374f
--- /dev/null
+++ b/tools/testing/selftests/vm/va_128TBswitch.c
@@ -0,0 +1,297 @@
+/*
+ *
+ * Authors: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+ * Authors: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <stdio.h>
+#include <sys/mman.h>
+#include <string.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#ifdef __powerpc64__
+#define PAGE_SIZE (64 << 10)
+/*
+ * This will work with 16M and 2M hugepage size
+ */
+#define HUGETLB_SIZE (16 << 20)
+#else
+#define PAGE_SIZE (4 << 10)
+#define HUGETLB_SIZE (2 << 20)
+#endif
+
+/*
+ * >= 128TB is the hint addr value we used to select
+ * large address space.
+ */
+#define ADDR_SWITCH_HINT (1UL << 47)
+#define LOW_ADDR ((void *) (1UL << 30))
+#define HIGH_ADDR ((void *) (1UL << 48))
+
+struct testcase {
+ void *addr;
+ unsigned long size;
+ unsigned long flags;
+ const char *msg;
+ unsigned int low_addr_required:1;
+ unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+ {
+ /*
+ * If stack is moved, we could possibly allocate
+ * this at the requested address.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * We should never allocate at the requested address or above it
+ * The len cross the 128TB boundary. Without MAP_FIXED
+ * we will always search in the lower address space.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, (2 * PAGE_SIZE))",
+ .low_addr_required = 1,
+ },
+ {
+ /*
+ * Exact mapping at 128TB, the area is free we should get that
+ * even without MAP_FIXED.
+ */
+ .addr = ((void *)(ADDR_SWITCH_HINT)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+ {
+ .addr = NULL,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1) again",
+ },
+ {
+ .addr = ((void *)(ADDR_SWITCH_HINT - PAGE_SIZE)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, PAGE_SIZE)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE / 2),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE/2 , 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = ((void *)(ADDR_SWITCH_HINT)),
+ .size = PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT, PAGE_SIZE)",
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+};
+
+static struct testcase hugetlb_testcases[] = {
+ {
+ .addr = NULL,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+ },
+ {
+ .addr = (void *) -1,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *) -1,
+ .size = HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB) again",
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT - PAGE_SIZE),
+ .size = 2 * HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(ADDR_SWITCH_HINT - PAGE_SIZE, 2*HUGETLB_SIZE, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)(ADDR_SWITCH_HINT),
+ .size = 2 * HUGETLB_SIZE,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(ADDR_SWITCH_HINT , 2*HUGETLB_SIZE, MAP_FIXED | MAP_HUGETLB)",
+ },
+};
+
+static int run_test(struct testcase *test, int count)
+{
+ void *p;
+ int i, ret = 0;
+
+ for (i = 0; i < count; i++) {
+ struct testcase *t = test + i;
+
+ p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
+
+ printf("%s: %p - ", t->msg, p);
+
+ if (p == MAP_FAILED) {
+ printf("FAILED\n");
+ ret = 1;
+ continue;
+ }
+
+ if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
+ printf("FAILED\n");
+ ret = 1;
+ } else {
+ /*
+ * Do a dereference of the address returned so that we catch
+ * bugs in page fault handling
+ */
+ memset(p, 0, t->size);
+ printf("OK\n");
+ }
+ if (!t->keep_mapped)
+ munmap(p, t->size);
+ }
+
+ return ret;
+}
+
+static int supported_arch(void)
+{
+#if defined(__powerpc64__)
+ return 1;
+#elif defined(__x86_64__)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ if (!supported_arch())
+ return 0;
+
+ ret = run_test(testcases, ARRAY_SIZE(testcases));
+ if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
+ ret = run_test(hugetlb_testcases, ARRAY_SIZE(hugetlb_testcases));
+ return ret;
+}
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
deleted file mode 100644
index 2eafdcd4c2b3..000000000000
--- a/tools/testing/selftests/x86/5lvl.c
+++ /dev/null
@@ -1,177 +0,0 @@
-#include <stdio.h>
-#include <sys/mman.h>
-
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
-#define PAGE_SIZE 4096
-#define LOW_ADDR ((void *) (1UL << 30))
-#define HIGH_ADDR ((void *) (1UL << 50))
-
-struct testcase {
- void *addr;
- unsigned long size;
- unsigned long flags;
- const char *msg;
- unsigned int low_addr_required:1;
- unsigned int keep_mapped:1;
-};
-
-static struct testcase testcases[] = {
- {
- .addr = NULL,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
- },
- {
- .addr = (void*) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1)",
- .keep_mapped = 1,
- },
- {
- .addr = (void*) -1,
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1) again",
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 2 * PAGE_SIZE,
- .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
- },
- {
- .addr = NULL,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(NULL, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = LOW_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
- .low_addr_required = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
- .keep_mapped = 1,
- },
- {
- .addr = HIGH_ADDR,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
- },
- {
- .addr = (void*) -1,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB)",
- .keep_mapped = 1,
- },
- {
- .addr = (void*) -1,
- .size = 2UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(-1, MAP_HUGETLB) again",
- },
- {
- .addr = (void *)((1UL << 47) - PAGE_SIZE),
- .size = 4UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
- .low_addr_required = 1,
- .keep_mapped = 1,
- },
- {
- .addr = (void *)((1UL << 47) - (2UL << 20)),
- .size = 4UL << 20,
- .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
- },
-};
-
-int main(int argc, char **argv)
-{
- int i;
- void *p;
-
- for (i = 0; i < ARRAY_SIZE(testcases); i++) {
- struct testcase *t = testcases + i;
-
- p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
-
- printf("%s: %p - ", t->msg, p);
-
- if (p == MAP_FAILED) {
- printf("FAILED\n");
- continue;
- }
-
- if (t->low_addr_required && p >= (void *)(1UL << 47))
- printf("FAILED\n");
- else
- printf("OK\n");
- if (!t->keep_mapped)
- munmap(p, t->size);
- }
- return 0;
-}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337128db..5d4f10ac2af2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -7,7 +7,7 @@ include ../lib.mk
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
- protection_keys test_vdso
+ protection_keys test_vdso test_vsyscall
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 000000000000..7a744fa7b786
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <errno.h>
+#include <err.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <setjmp.h>
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+# define SYS_getcpu 309
+# else
+# define SYS_getcpu 318
+# endif
+#endif
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+{
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+/* vsyscalls and vDSO */
+bool should_read_vsyscall = false;
+
+typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
+gtod_t vdso_gtod;
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+vgettime_t vdso_gettime;
+
+typedef long (*time_func_t)(time_t *t);
+time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
+time_func_t vdso_time;
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+static void init_vdso(void)
+{
+ void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso)
+ vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso) {
+ printf("[WARN]\tfailed to find vDSO\n");
+ return;
+ }
+
+ vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
+ if (!vdso_gtod)
+ printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
+
+ vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+ if (!vdso_gettime)
+ printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
+
+ vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
+ if (!vdso_time)
+ printf("[WARN]\tfailed to find time in vDSO\n");
+
+ vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+ if (!vdso_getcpu) {
+ /* getcpu() was never wired up in the 32-bit vDSO. */
+ printf("[%s]\tfailed to find getcpu in vDSO\n",
+ sizeof(long) == 8 ? "WARN" : "NOTE");
+ }
+}
+
+static int init_vsys(void)
+{
+#ifdef __x86_64__
+ int nerrs = 0;
+ FILE *maps;
+ char line[128];
+ bool found = false;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
+ should_read_vsyscall = true;
+ return 0;
+ }
+
+ while (fgets(line, sizeof(line), maps)) {
+ char r, x;
+ void *start, *end;
+ char name[128];
+ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+ &start, &end, &r, &x, name) != 5)
+ continue;
+
+ if (strcmp(name, "[vsyscall]"))
+ continue;
+
+ printf("\tvsyscall map: %s", line);
+
+ if (start != (void *)0xffffffffff600000 ||
+ end != (void *)0xffffffffff601000) {
+ printf("[FAIL]\taddress range is nonsense\n");
+ nerrs++;
+ }
+
+ printf("\tvsyscall permissions are %c-%c\n", r, x);
+ should_read_vsyscall = (r == 'r');
+ if (x != 'x') {
+ vgtod = NULL;
+ vtime = NULL;
+ vgetcpu = NULL;
+ }
+
+ found = true;
+ break;
+ }
+
+ fclose(maps);
+
+ if (!found) {
+ printf("\tno vsyscall map in /proc/self/maps\n");
+ should_read_vsyscall = false;
+ vgtod = NULL;
+ vtime = NULL;
+ vgetcpu = NULL;
+ }
+
+ return nerrs;
+#else
+ return 0;
+#endif
+}
+
+/* syscalls */
+static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
+{
+ return syscall(SYS_gettimeofday, tv, tz);
+}
+
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+ return syscall(SYS_clock_gettime, id, ts);
+}
+
+static inline long sys_time(time_t *t)
+{
+ return syscall(SYS_time, t);
+}
+
+static inline long sys_getcpu(unsigned * cpu, unsigned * node,
+ void* cache)
+{
+ return syscall(SYS_getcpu, cpu, node, cache);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+ siglongjmp(jmpbuf, 1);
+}
+
+static double tv_diff(const struct timeval *a, const struct timeval *b)
+{
+ return (double)(a->tv_sec - b->tv_sec) +
+ (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
+}
+
+static int check_gtod(const struct timeval *tv_sys1,
+ const struct timeval *tv_sys2,
+ const struct timezone *tz_sys,
+ const char *which,
+ const struct timeval *tv_other,
+ const struct timezone *tz_other)
+{
+ int nerrs = 0;
+ double d1, d2;
+
+ if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
+ printf("[FAIL] %s tz mismatch\n", which);
+ nerrs++;
+ }
+
+ d1 = tv_diff(tv_other, tv_sys1);
+ d2 = tv_diff(tv_sys2, tv_other);
+ printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
+
+ if (d1 < 0 || d2 < 0) {
+ printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
+ nerrs++;
+ } else {
+ printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
+ }
+
+ return nerrs;
+}
+
+static int test_gtod(void)
+{
+ struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
+ struct timezone tz_sys, tz_vdso, tz_vsys;
+ long ret_vdso = -1;
+ long ret_vsys = -1;
+ int nerrs = 0;
+
+ printf("[RUN]\ttest gettimeofday()\n");
+
+ if (sys_gtod(&tv_sys1, &tz_sys) != 0)
+ err(1, "syscall gettimeofday");
+ if (vdso_gtod)
+ ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
+ if (vgtod)
+ ret_vsys = vgtod(&tv_vsys, &tz_vsys);
+ if (sys_gtod(&tv_sys2, &tz_sys) != 0)
+ err(1, "syscall gettimeofday");
+
+ if (vdso_gtod) {
+ if (ret_vdso == 0) {
+ nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
+ } else {
+ printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
+ nerrs++;
+ }
+ }
+
+ if (vgtod) {
+ if (ret_vsys == 0) {
+ nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
+ } else {
+ printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
+ nerrs++;
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_time(void) {
+ int nerrs = 0;
+
+ printf("[RUN]\ttest time()\n");
+ long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
+ long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
+ t_sys1 = sys_time(&t2_sys1);
+ if (vdso_time)
+ t_vdso = vdso_time(&t2_vdso);
+ if (vtime)
+ t_vsys = vtime(&t2_vsys);
+ t_sys2 = sys_time(&t2_sys2);
+ if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
+ printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
+ nerrs++;
+ return nerrs;
+ }
+
+ if (vdso_time) {
+ if (t_vdso < 0 || t_vdso != t2_vdso) {
+ printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
+ nerrs++;
+ } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
+ printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO time() is okay\n");
+ }
+ }
+
+ if (vtime) {
+ if (t_vsys < 0 || t_vsys != t2_vsys) {
+ printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
+ nerrs++;
+ } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
+ printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall time() is okay\n");
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_getcpu(int cpu)
+{
+ int nerrs = 0;
+ long ret_sys, ret_vdso = -1, ret_vsys = -1;
+
+ printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
+
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+ printf("[SKIP]\tfailed to force CPU %d\n", cpu);
+ return nerrs;
+ }
+
+ unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
+ unsigned node = 0;
+ bool have_node = false;
+ ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+ if (vdso_getcpu)
+ ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+ if (vgetcpu)
+ ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+ if (ret_sys == 0) {
+ if (cpu_sys != cpu) {
+ printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
+ nerrs++;
+ }
+
+ have_node = true;
+ node = node_sys;
+ }
+
+ if (vdso_getcpu) {
+ if (ret_vdso) {
+ printf("[FAIL]\tvDSO getcpu() failed\n");
+ nerrs++;
+ } else {
+ if (!have_node) {
+ have_node = true;
+ node = node_vdso;
+ }
+
+ if (cpu_vdso != cpu) {
+ printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO reported correct CPU\n");
+ }
+
+ if (node_vdso != node) {
+ printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
+ nerrs++;
+ } else {
+ printf("[OK]\tvDSO reported correct node\n");
+ }
+ }
+ }
+
+ if (vgetcpu) {
+ if (ret_vsys) {
+ printf("[FAIL]\tvsyscall getcpu() failed\n");
+ nerrs++;
+ } else {
+ if (!have_node) {
+ have_node = true;
+ node = node_vsys;
+ }
+
+ if (cpu_vsys != cpu) {
+ printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall reported correct CPU\n");
+ }
+
+ if (node_vsys != node) {
+ printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
+ nerrs++;
+ } else {
+ printf("[OK]\tvsyscall reported correct node\n");
+ }
+ }
+ }
+
+ return nerrs;
+}
+
+static int test_vsys_r(void)
+{
+#ifdef __x86_64__
+ printf("[RUN]\tChecking read access to the vsyscall page\n");
+ bool can_read;
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ *(volatile int *)0xffffffffff600000;
+ can_read = true;
+ } else {
+ can_read = false;
+ }
+
+ if (can_read && !should_read_vsyscall) {
+ printf("[FAIL]\tWe have read access, but we shouldn't\n");
+ return 1;
+ } else if (!can_read && should_read_vsyscall) {
+ printf("[FAIL]\tWe don't have read access, but we should\n");
+ return 1;
+ } else {
+ printf("[OK]\tgot expected result\n");
+ }
+#endif
+
+ return 0;
+}
+
+
+#ifdef __x86_64__
+#define X86_EFLAGS_TF (1UL << 8)
+static volatile sig_atomic_t num_vsyscall_traps;
+
+static unsigned long get_eflags(void)
+{
+ unsigned long eflags;
+ asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
+ return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+ asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+ ucontext_t *ctx = (ucontext_t *)ctx_void;
+ unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
+
+ if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
+ num_vsyscall_traps++;
+}
+
+static int test_native_vsyscall(void)
+{
+ time_t tmp;
+ bool is_native;
+
+ if (!vtime)
+ return 0;
+
+ printf("[RUN]\tchecking for native vsyscall\n");
+ sethandler(SIGTRAP, sigtrap, 0);
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ vtime(&tmp);
+ set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+ /*
+ * If vsyscalls are emulated, we expect a single trap in the
+ * vsyscall page -- the call instruction will trap with RIP
+ * pointing to the entry point before emulation takes over.
+ * In native mode, we expect two traps, since whatever code
+ * the vsyscall page contains will be more than just a ret
+ * instruction.
+ */
+ is_native = (num_vsyscall_traps > 1);
+
+ printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
+ (is_native ? "native" : "emulated"),
+ (int)num_vsyscall_traps);
+
+ return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+ int nerrs = 0;
+
+ init_vdso();
+ nerrs += init_vsys();
+
+ nerrs += test_gtod();
+ nerrs += test_time();
+ nerrs += test_getcpu(0);
+ nerrs += test_getcpu(1);
+
+ sethandler(SIGSEGV, sigsegv, 0);
+ nerrs += test_vsys_r();
+
+#ifdef __x86_64__
+ nerrs += test_native_vsyscall();
+#endif
+
+ return nerrs ? 1 : 0;
+}
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.c b/tools/usb/usbip/libsrc/usbip_device_driver.c
index e059b7d1ec5b..ec3a0b794f15 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.c
@@ -77,7 +77,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
const char *path, *name;
char filepath[SYSFS_PATH_MAX];
struct usb_device_descriptor descr;
- unsigned i;
+ unsigned int i;
FILE *fd = NULL;
struct udev_device *plat;
const char *speed;
@@ -92,7 +92,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
return -1;
ret = fread((char *) &descr, sizeof(descr), 1, fd);
if (ret < 0)
- return -1;
+ goto err;
fclose(fd);
copy_descr_attr(dev, &descr, bDeviceClass);
@@ -124,6 +124,9 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
name = udev_device_get_sysname(plat);
strncpy(dev->busid, name, SYSFS_BUS_ID_SIZE);
return 0;
+err:
+ fclose(fd);
+ return -1;
}
static int is_my_device(struct udev_device *dev)
diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
index fa46141ae68b..e121cfb1746a 100644
--- a/tools/usb/usbip/src/usbip_bind.c
+++ b/tools/usb/usbip/src/usbip_bind.c
@@ -144,6 +144,7 @@ static int bind_device(char *busid)
int rc;
struct udev *udev;
struct udev_device *dev;
+ const char *devpath;
/* Check whether the device with this bus ID exists. */
udev = udev_new();
@@ -152,8 +153,16 @@ static int bind_device(char *busid)
err("device with the specified bus ID does not exist");
return -1;
}
+ devpath = udev_device_get_devpath(dev);
udev_unref(udev);
+ /* If the device is already attached to vhci_hcd - bail out */
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ err("bind loop detected: device: %s is attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ return -1;
+ }
+
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
err("could not unbind driver from device on busid %s", busid);
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
index f1b38e866dd7..d65a9f444174 100644
--- a/tools/usb/usbip/src/usbip_list.c
+++ b/tools/usb/usbip/src/usbip_list.c
@@ -187,6 +187,7 @@ static int list_devices(bool parsable)
const char *busid;
char product_name[128];
int ret = -1;
+ const char *devpath;
/* Create libudev context. */
udev = udev_new();
@@ -209,6 +210,14 @@ static int list_devices(bool parsable)
path = udev_list_entry_get_name(dev_list_entry);
dev = udev_device_new_from_syspath(udev, path);
+ /* Ignore devices attached to vhci_hcd */
+ devpath = udev_device_get_devpath(dev);
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ dbg("Skip the device %s already attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ continue;
+ }
+
/* Get device information. */
idVendor = udev_device_get_sysattr_value(dev, "idVendor");
idProduct = udev_device_get_sysattr_value(dev, "idProduct");
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index 009afb4a3aae..c6dad2a13c80 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -456,7 +456,7 @@ static void set_signal(void)
sigaction(SIGTERM, &act, NULL);
sigaction(SIGINT, &act, NULL);
act.sa_handler = SIG_IGN;
- sigaction(SIGCLD, &act, NULL);
+ sigaction(SIGCHLD, &act, NULL);
}
static const char *pid_file;
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 395521a7a8d8..fca8381bbe04 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -118,7 +118,7 @@ static inline void free_page(unsigned long addr)
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
+#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
diff --git a/tools/virtio/linux/thread_info.h b/tools/virtio/linux/thread_info.h
new file mode 100644
index 000000000000..e0f610d08006
--- /dev/null
+++ b/tools/virtio/linux/thread_info.h
@@ -0,0 +1 @@
+#define check_copy_size(A, B, C) (1)
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 5706e075adf2..301d59bfcd0a 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -111,7 +111,7 @@ static inline void busy_wait(void)
}
#if defined(__x86_64__) || defined(__i386__)
-#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#else
/*
* Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
@@ -134,4 +134,61 @@ static inline void busy_wait(void)
barrier(); \
} while (0)
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
+#define smp_wmb() barrier()
+#else
+#define smp_wmb() smp_release()
+#endif
+
+#ifdef __alpha__
+#define smp_read_barrier_depends() smp_acquire()
+#else
+#define smp_read_barrier_depends() do {} while(0)
+#endif
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) { \
+ case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break; \
+ case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break; \
+ case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break; \
+ case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break; \
+ default: \
+ barrier(); \
+ __builtin_memcpy((void *)res, (const void *)p, size); \
+ barrier(); \
+ } \
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
+ case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
+ case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
+ case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ barrier();
+ }
+}
+
+#define READ_ONCE(x) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u; \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
+ __u.__val; \
+})
+
+#define WRITE_ONCE(x, val) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (typeof(x)) (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
#endif
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index e6e81305ef46..477899c12c51 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -187,7 +187,7 @@ bool enable_kick()
bool avail_empty()
{
- return !__ptr_ring_peek(&array);
+ return __ptr_ring_empty(&array);
}
bool use_buf(unsigned *lenp, void **bufp)
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index e92903fc7113..a8783f48f77f 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -169,9 +169,10 @@ static int opt_raw; /* for kernel developers */
static int opt_list; /* list pages (in ranges) */
static int opt_no_summary; /* don't show summary */
static pid_t opt_pid; /* process to walk */
-const char * opt_file; /* file or directory path */
+const char *opt_file; /* file or directory path */
static uint64_t opt_cgroup; /* cgroup inode */
static int opt_list_cgroup;/* list page cgroup */
+static const char *opt_kpageflags;/* kpageflags file to parse */
#define MAX_ADDR_RANGES 1024
static int nr_addr_ranges;
@@ -258,7 +259,7 @@ static int checked_open(const char *pathname, int flags)
* pagemap/kpageflags routines
*/
-static unsigned long do_u64_read(int fd, char *name,
+static unsigned long do_u64_read(int fd, const char *name,
uint64_t *buf,
unsigned long index,
unsigned long count)
@@ -283,7 +284,7 @@ static unsigned long kpageflags_read(uint64_t *buf,
unsigned long index,
unsigned long pages)
{
- return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
+ return do_u64_read(kpageflags_fd, opt_kpageflags, buf, index, pages);
}
static unsigned long kpagecgroup_read(uint64_t *buf,
@@ -293,7 +294,7 @@ static unsigned long kpagecgroup_read(uint64_t *buf,
if (kpagecgroup_fd < 0)
return pages;
- return do_u64_read(kpagecgroup_fd, PROC_KPAGEFLAGS, buf, index, pages);
+ return do_u64_read(kpagecgroup_fd, opt_kpageflags, buf, index, pages);
}
static unsigned long pagemap_read(uint64_t *buf,
@@ -743,7 +744,7 @@ static void walk_addr_ranges(void)
{
int i;
- kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
+ kpageflags_fd = checked_open(opt_kpageflags, O_RDONLY);
if (!nr_addr_ranges)
add_addr_range(0, ULONG_MAX);
@@ -790,6 +791,7 @@ static void usage(void)
" -N|--no-summary Don't show summary info\n"
" -X|--hwpoison hwpoison pages\n"
" -x|--unpoison unpoison pages\n"
+" -F|--kpageflags filename kpageflags file to parse\n"
" -h|--help Show this usage message\n"
"flags:\n"
" 0x10 bitfield format, e.g.\n"
@@ -1013,7 +1015,7 @@ static void walk_page_cache(void)
{
struct stat st;
- kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
+ kpageflags_fd = checked_open(opt_kpageflags, O_RDONLY);
pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY);
sigaction(SIGBUS, &sigbus_action, NULL);
@@ -1164,6 +1166,11 @@ static void parse_bits_mask(const char *optarg)
add_bits_filter(mask, bits);
}
+static void parse_kpageflags(const char *name)
+{
+ opt_kpageflags = name;
+}
+
static void describe_flags(const char *optarg)
{
uint64_t flags = parse_flag_names(optarg, 0);
@@ -1188,6 +1195,7 @@ static const struct option opts[] = {
{ "no-summary", 0, NULL, 'N' },
{ "hwpoison" , 0, NULL, 'X' },
{ "unpoison" , 0, NULL, 'x' },
+ { "kpageflags", 0, NULL, 'F' },
{ "help" , 0, NULL, 'h' },
{ NULL , 0, NULL, 0 }
};
@@ -1199,7 +1207,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
while ((c = getopt_long(argc, argv,
- "rp:f:a:b:d:c:ClLNXxh", opts, NULL)) != -1) {
+ "rp:f:a:b:d:c:ClLNXxF:h", opts, NULL)) != -1) {
switch (c) {
case 'r':
opt_raw = 1;
@@ -1242,6 +1250,9 @@ int main(int argc, char *argv[])
opt_unpoison = 1;
prepare_hwpoison_fd();
break;
+ case 'F':
+ parse_kpageflags(optarg);
+ break;
case 'h':
usage();
exit(0);
@@ -1251,6 +1262,9 @@ int main(int argc, char *argv[])
}
}
+ if (!opt_kpageflags)
+ opt_kpageflags = PROC_KPAGEFLAGS;
+
if (opt_cgroup || opt_list_cgroup)
kpagecgroup_fd = checked_open(PROC_KPAGECGROUP, O_RDONLY);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 2e43f9d42bd5..08464b2fba1d 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -53,8 +53,8 @@
__asm__(".arch_extension virt");
#endif
+DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
@@ -354,7 +354,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu);
@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock);
}
@@ -704,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
+ if (has_vhe())
+ kvm_arm_vhe_guest_enter();
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+ if (has_vhe())
+ kvm_arm_vhe_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/*
@@ -759,6 +763,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, run, ret);
+
preempt_enable();
ret = handle_exit(vcpu, run, ret);
@@ -1158,7 +1165,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)kvm_get_hyp_vector();
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
@@ -1272,19 +1279,8 @@ static inline void hyp_cpu_pm_exit(void)
}
#endif
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
static int init_common_resources(void)
{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1403,6 +1399,12 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = kvm_map_vectors();
+ if (err) {
+ kvm_err("Cannot map vectors\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1420,7 +1422,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
@@ -1544,7 +1546,6 @@ out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode();
out_err:
- teardown_common_resources();
return err;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b4b69c2d1012..f8eaf86b740a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -621,7 +621,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
return 0;
}
-static int __create_hyp_mappings(pgd_t *pgdp,
+static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
@@ -634,7 +634,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
addr = start & PAGE_MASK;
end = PAGE_ALIGN(end);
do {
- pgd = pgdp + pgd_index(addr);
+ pgd = pgdp + ((addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1));
if (pgd_none(*pgd)) {
pud = pud_alloc_one(NULL, addr);
@@ -697,8 +697,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
int err;
phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
+ err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
+ virt_addr, virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
prot);
if (err)
@@ -729,7 +729,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
- return __create_hyp_mappings(hyp_pgd, start, end,
+ return __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
}
@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma) && !logging_active) {
+ if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
hugetlb = true;
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
@@ -1735,7 +1735,7 @@ static int kvm_map_idmap_text(pgd_t *pgd)
int err;
/* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
+ err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP_EXEC);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
- ret = vgic_v4_init(kvm);
- if (ret)
- goto out;
+ if (vgic_has_its(kvm)) {
+ ret = vgic_v4_init(kvm);
+ if (ret)
+ goto out;
+ }
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 4a37292855bc..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int i, nr_vcpus, ret;
- if (!vgic_supports_direct_msis(kvm))
+ if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
if (dist->its_vm.vpes)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f2ac53ab8243..a334399fafec 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -188,7 +188,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
@@ -287,7 +287,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
- unsigned int events;
+ __poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 210bf820385a..35db929f92f0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -476,6 +476,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -1322,17 +1323,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int get_user_page_nowait(unsigned long start, int write,
- struct page **page)
-{
- int flags = FOLL_NOWAIT | FOLL_HWPOISON;
-
- if (write)
- flags |= FOLL_WRITE;
-
- return get_user_pages(start, 1, flags, page, NULL);
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1381,7 +1371,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ unsigned int flags = FOLL_HWPOISON;
+ struct page *page;
int npages = 0;
might_sleep();
@@ -1389,35 +1380,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable)
*writable = write_fault;
- if (async) {
- down_read(&current->mm->mmap_sem);
- npages = get_user_page_nowait(addr, write_fault, page);
- up_read(&current->mm->mmap_sem);
- } else {
- unsigned int flags = FOLL_HWPOISON;
-
- if (write_fault)
- flags |= FOLL_WRITE;
+ if (write_fault)
+ flags |= FOLL_WRITE;
+ if (async)
+ flags |= FOLL_NOWAIT;
- npages = get_user_pages_unlocked(addr, 1, page, flags);
- }
+ npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
- struct page *wpage[1];
+ struct page *wpage;
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
+ if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
+ put_page(page);
+ page = wpage;
}
-
- npages = 1;
}
- *pfn = page_to_pfn(page[0]);
+ *pfn = page_to_pfn(page);
return npages;
}